summaryrefslogtreecommitdiff
path: root/chromium/v8/src
diff options
context:
space:
mode:
authorJocelyn Turcotte <jocelyn.turcotte@digia.com>2014-08-08 14:30:41 +0200
committerJocelyn Turcotte <jocelyn.turcotte@digia.com>2014-08-12 13:49:54 +0200
commitab0a50979b9eb4dfa3320eff7e187e41efedf7a9 (patch)
tree498dfb8a97ff3361a9f7486863a52bb4e26bb898 /chromium/v8/src
parent4ce69f7403811819800e7c5ae1318b2647e778d1 (diff)
downloadqtwebengine-chromium-ab0a50979b9eb4dfa3320eff7e187e41efedf7a9.tar.gz
Update Chromium to beta version 37.0.2062.68
Change-Id: I188e3b5aff1bec75566014291b654eb19f5bc8ca Reviewed-by: Andras Becsi <andras.becsi@digia.com>
Diffstat (limited to 'chromium/v8/src')
-rw-r--r--chromium/v8/src/DEPS6
-rw-r--r--chromium/v8/src/accessors.cc1378
-rw-r--r--chromium/v8/src/accessors.h165
-rw-r--r--chromium/v8/src/allocation-site-scopes.cc31
-rw-r--r--chromium/v8/src/allocation-site-scopes.h37
-rw-r--r--chromium/v8/src/allocation-tracker.cc233
-rw-r--r--chromium/v8/src/allocation-tracker.h89
-rw-r--r--chromium/v8/src/allocation.cc74
-rw-r--r--chromium/v8/src/allocation.h32
-rw-r--r--chromium/v8/src/api.cc3229
-rw-r--r--chromium/v8/src/api.h99
-rw-r--r--chromium/v8/src/apinatives.js59
-rw-r--r--chromium/v8/src/apiutils.h49
-rw-r--r--chromium/v8/src/arguments.cc37
-rw-r--r--chromium/v8/src/arguments.h59
-rw-r--r--chromium/v8/src/arm/OWNERS1
-rw-r--r--chromium/v8/src/arm/assembler-arm-inl.h157
-rw-r--r--chromium/v8/src/arm/assembler-arm.cc667
-rw-r--r--chromium/v8/src/arm/assembler-arm.h211
-rw-r--r--chromium/v8/src/arm/builtins-arm.cc419
-rw-r--r--chromium/v8/src/arm/code-stubs-arm.cc2763
-rw-r--r--chromium/v8/src/arm/code-stubs-arm.h158
-rw-r--r--chromium/v8/src/arm/codegen-arm.cc161
-rw-r--r--chromium/v8/src/arm/codegen-arm.h72
-rw-r--r--chromium/v8/src/arm/constants-arm.cc35
-rw-r--r--chromium/v8/src/arm/constants-arm.h35
-rw-r--r--chromium/v8/src/arm/cpu-arm.cc54
-rw-r--r--chromium/v8/src/arm/debug-arm.cc116
-rw-r--r--chromium/v8/src/arm/deoptimizer-arm.cc82
-rw-r--r--chromium/v8/src/arm/disasm-arm.cc227
-rw-r--r--chromium/v8/src/arm/frames-arm.cc58
-rw-r--r--chromium/v8/src/arm/frames-arm.h38
-rw-r--r--chromium/v8/src/arm/full-codegen-arm.cc1184
-rw-r--r--chromium/v8/src/arm/ic-arm.cc419
-rw-r--r--chromium/v8/src/arm/lithium-arm.cc1037
-rw-r--r--chromium/v8/src/arm/lithium-arm.h673
-rw-r--r--chromium/v8/src/arm/lithium-codegen-arm.cc1782
-rw-r--r--chromium/v8/src/arm/lithium-codegen-arm.h78
-rw-r--r--chromium/v8/src/arm/lithium-gap-resolver-arm.cc108
-rw-r--r--chromium/v8/src/arm/lithium-gap-resolver-arm.h35
-rw-r--r--chromium/v8/src/arm/macro-assembler-arm.cc769
-rw-r--r--chromium/v8/src/arm/macro-assembler-arm.h249
-rw-r--r--chromium/v8/src/arm/regexp-macro-assembler-arm.cc54
-rw-r--r--chromium/v8/src/arm/regexp-macro-assembler-arm.h33
-rw-r--r--chromium/v8/src/arm/simulator-arm.cc136
-rw-r--r--chromium/v8/src/arm/simulator-arm.h48
-rw-r--r--chromium/v8/src/arm/stub-cache-arm.cc1803
-rw-r--r--chromium/v8/src/arm64/OWNERS1
-rw-r--r--chromium/v8/src/arm64/assembler-arm64-inl.h1264
-rw-r--r--chromium/v8/src/arm64/assembler-arm64.cc2892
-rw-r--r--chromium/v8/src/arm64/assembler-arm64.h2256
-rw-r--r--chromium/v8/src/arm64/builtins-arm64.cc1565
-rw-r--r--chromium/v8/src/arm64/code-stubs-arm64.cc5555
-rw-r--r--chromium/v8/src/arm64/code-stubs-arm64.h478
-rw-r--r--chromium/v8/src/arm64/codegen-arm64.cc620
-rw-r--r--chromium/v8/src/arm64/codegen-arm64.h48
-rw-r--r--chromium/v8/src/arm64/constants-arm64.h1250
-rw-r--r--chromium/v8/src/arm64/cpu-arm64.cc123
-rw-r--r--chromium/v8/src/arm64/debug-arm64.cc357
-rw-r--r--chromium/v8/src/arm64/decoder-arm64-inl.h648
-rw-r--r--chromium/v8/src/arm64/decoder-arm64.cc86
-rw-r--r--chromium/v8/src/arm64/decoder-arm64.h187
-rw-r--r--chromium/v8/src/arm64/deoptimizer-arm64.cc385
-rw-r--r--chromium/v8/src/arm64/disasm-arm64.cc1832
-rw-r--r--chromium/v8/src/arm64/disasm-arm64.h92
-rw-r--r--chromium/v8/src/arm64/frames-arm64.cc42
-rw-r--r--chromium/v8/src/arm64/frames-arm64.h109
-rw-r--r--chromium/v8/src/arm64/full-codegen-arm64.cc4884
-rw-r--r--chromium/v8/src/arm64/ic-arm64.cc1387
-rw-r--r--chromium/v8/src/arm64/instructions-arm64.cc317
-rw-r--r--chromium/v8/src/arm64/instructions-arm64.h509
-rw-r--r--chromium/v8/src/arm64/instrument-arm64.cc595
-rw-r--r--chromium/v8/src/arm64/instrument-arm64.h84
-rw-r--r--chromium/v8/src/arm64/lithium-arm64.cc2725
-rw-r--r--chromium/v8/src/arm64/lithium-arm64.h3248
-rw-r--r--chromium/v8/src/arm64/lithium-codegen-arm64.cc6061
-rw-r--r--chromium/v8/src/arm64/lithium-codegen-arm64.h506
-rw-r--r--chromium/v8/src/arm64/lithium-gap-resolver-arm64.cc311
-rw-r--r--chromium/v8/src/arm64/lithium-gap-resolver-arm64.h67
-rw-r--r--chromium/v8/src/arm64/macro-assembler-arm64-inl.h1706
-rw-r--r--chromium/v8/src/arm64/macro-assembler-arm64.cc5303
-rw-r--r--chromium/v8/src/arm64/macro-assembler-arm64.h2327
-rw-r--r--chromium/v8/src/arm64/regexp-macro-assembler-arm64.cc1707
-rw-r--r--chromium/v8/src/arm64/regexp-macro-assembler-arm64.h292
-rw-r--r--chromium/v8/src/arm64/simulator-arm64.cc3736
-rw-r--r--chromium/v8/src/arm64/simulator-arm64.h837
-rw-r--r--chromium/v8/src/arm64/stub-cache-arm64.cc1477
-rw-r--r--chromium/v8/src/arm64/utils-arm64.cc89
-rw-r--r--chromium/v8/src/arm64/utils-arm64.h112
-rw-r--r--chromium/v8/src/array-iterator.js90
-rw-r--r--chromium/v8/src/array.js525
-rw-r--r--chromium/v8/src/arraybuffer.js44
-rw-r--r--chromium/v8/src/assembler.cc400
-rw-r--r--chromium/v8/src/assembler.h233
-rw-r--r--chromium/v8/src/assert-scope.cc21
-rw-r--r--chromium/v8/src/assert-scope.h169
-rw-r--r--chromium/v8/src/ast.cc322
-rw-r--r--chromium/v8/src/ast.h790
-rw-r--r--chromium/v8/src/atomicops_internals_arm_gcc.h145
-rw-r--r--chromium/v8/src/atomicops_internals_x86_macosx.h301
-rw-r--r--chromium/v8/src/base/DEPS4
-rw-r--r--chromium/v8/src/base/atomicops.h (renamed from chromium/v8/src/atomicops.h)85
-rw-r--r--chromium/v8/src/base/atomicops_internals_arm64_gcc.h316
-rw-r--r--chromium/v8/src/base/atomicops_internals_arm_gcc.h301
-rw-r--r--chromium/v8/src/base/atomicops_internals_atomicword_compat.h99
-rw-r--r--chromium/v8/src/base/atomicops_internals_mac.h204
-rw-r--r--chromium/v8/src/base/atomicops_internals_mips_gcc.h (renamed from chromium/v8/src/atomicops_internals_mips_gcc.h)45
-rw-r--r--chromium/v8/src/base/atomicops_internals_tsan.h (renamed from chromium/v8/src/atomicops_internals_tsan.h)239
-rw-r--r--chromium/v8/src/base/atomicops_internals_x86_gcc.cc (renamed from chromium/v8/src/atomicops_internals_x86_gcc.cc)54
-rw-r--r--chromium/v8/src/base/atomicops_internals_x86_gcc.h (renamed from chromium/v8/src/atomicops_internals_x86_gcc.h)45
-rw-r--r--chromium/v8/src/base/atomicops_internals_x86_msvc.h (renamed from chromium/v8/src/atomicops_internals_x86_msvc.h)63
-rw-r--r--chromium/v8/src/base/build_config.h120
-rw-r--r--chromium/v8/src/base/lazy-instance.h (renamed from chromium/v8/src/lazy-instance.h)51
-rw-r--r--chromium/v8/src/base/macros.h120
-rw-r--r--chromium/v8/src/base/once.cc53
-rw-r--r--chromium/v8/src/base/once.h (renamed from chromium/v8/src/once.h)41
-rw-r--r--chromium/v8/src/base/safe_conversions.h67
-rw-r--r--chromium/v8/src/base/safe_conversions_impl.h220
-rw-r--r--chromium/v8/src/base/safe_math.h276
-rw-r--r--chromium/v8/src/base/safe_math_impl.h531
-rw-r--r--chromium/v8/src/base/win32-headers.h (renamed from chromium/v8/src/win32-headers.h)34
-rw-r--r--chromium/v8/src/bignum-dtoa.cc42
-rw-r--r--chromium/v8/src/bignum-dtoa.h27
-rw-r--r--chromium/v8/src/bignum.cc35
-rw-r--r--chromium/v8/src/bignum.h27
-rw-r--r--chromium/v8/src/bootstrapper.cc1592
-rw-r--r--chromium/v8/src/bootstrapper.h61
-rw-r--r--chromium/v8/src/builtins.cc998
-rw-r--r--chromium/v8/src/builtins.h88
-rw-r--r--chromium/v8/src/bytecodes-irregexp.h27
-rw-r--r--chromium/v8/src/cached-powers.cc40
-rw-r--r--chromium/v8/src/cached-powers.h29
-rw-r--r--chromium/v8/src/char-predicates-inl.h29
-rw-r--r--chromium/v8/src/char-predicates.h50
-rw-r--r--chromium/v8/src/checks.cc97
-rw-r--r--chromium/v8/src/checks.h90
-rw-r--r--chromium/v8/src/circular-queue-inl.h41
-rw-r--r--chromium/v8/src/circular-queue.h32
-rw-r--r--chromium/v8/src/code-stubs-hydrogen.cc1066
-rw-r--r--chromium/v8/src/code-stubs.cc331
-rw-r--r--chromium/v8/src/code-stubs.h1204
-rw-r--r--chromium/v8/src/code.h33
-rw-r--r--chromium/v8/src/codegen.cc148
-rw-r--r--chromium/v8/src/codegen.h110
-rw-r--r--chromium/v8/src/collection-iterator.js162
-rw-r--r--chromium/v8/src/collection.js321
-rw-r--r--chromium/v8/src/compilation-cache.cc227
-rw-r--r--chromium/v8/src/compilation-cache.h97
-rw-r--r--chromium/v8/src/compiler-intrinsics.h27
-rw-r--r--chromium/v8/src/compiler.cc1443
-rw-r--r--chromium/v8/src/compiler.h296
-rw-r--r--chromium/v8/src/contexts.cc64
-rw-r--r--chromium/v8/src/contexts.h176
-rw-r--r--chromium/v8/src/conversions-inl.h51
-rw-r--r--chromium/v8/src/conversions.cc129
-rw-r--r--chromium/v8/src/conversions.h130
-rw-r--r--chromium/v8/src/counters.cc149
-rw-r--r--chromium/v8/src/counters.h437
-rw-r--r--chromium/v8/src/cpu-profiler-inl.h43
-rw-r--r--chromium/v8/src/cpu-profiler.cc136
-rw-r--r--chromium/v8/src/cpu-profiler.h56
-rw-r--r--chromium/v8/src/cpu.cc164
-rw-r--r--chromium/v8/src/cpu.h42
-rw-r--r--chromium/v8/src/d8-debug.cc247
-rw-r--r--chromium/v8/src/d8-debug.h143
-rw-r--r--chromium/v8/src/d8-posix.cc65
-rw-r--r--chromium/v8/src/d8-readline.cc41
-rw-r--r--chromium/v8/src/d8-windows.cc39
-rw-r--r--chromium/v8/src/d8.cc635
-rw-r--r--chromium/v8/src/d8.gyp9
-rw-r--r--chromium/v8/src/d8.h83
-rw-r--r--chromium/v8/src/d8.js239
-rw-r--r--chromium/v8/src/data-flow.cc33
-rw-r--r--chromium/v8/src/data-flow.h37
-rw-r--r--chromium/v8/src/date.cc48
-rw-r--r--chromium/v8/src/date.h48
-rw-r--r--chromium/v8/src/date.js52
-rw-r--r--chromium/v8/src/dateparser-inl.h29
-rw-r--r--chromium/v8/src/dateparser.cc35
-rw-r--r--chromium/v8/src/dateparser.h33
-rw-r--r--chromium/v8/src/debug-agent.cc503
-rw-r--r--chromium/v8/src/debug-agent.h118
-rw-r--r--chromium/v8/src/debug-debugger.js170
-rw-r--r--chromium/v8/src/debug.cc2178
-rw-r--r--chromium/v8/src/debug.h1039
-rw-r--r--chromium/v8/src/default-platform.cc56
-rw-r--r--chromium/v8/src/default-platform.h55
-rw-r--r--chromium/v8/src/deoptimizer.cc943
-rw-r--r--chromium/v8/src/deoptimizer.h214
-rw-r--r--chromium/v8/src/disasm.h27
-rw-r--r--chromium/v8/src/disassembler.cc85
-rw-r--r--chromium/v8/src/disassembler.h29
-rw-r--r--chromium/v8/src/diy-fp.cc35
-rw-r--r--chromium/v8/src/diy-fp.h27
-rw-r--r--chromium/v8/src/double.h29
-rw-r--r--chromium/v8/src/dtoa.cc43
-rw-r--r--chromium/v8/src/dtoa.h27
-rw-r--r--chromium/v8/src/effects.h59
-rw-r--r--chromium/v8/src/elements-kind.cc164
-rw-r--r--chromium/v8/src/elements-kind.h102
-rw-r--r--chromium/v8/src/elements.cc1464
-rw-r--r--chromium/v8/src/elements.h193
-rw-r--r--chromium/v8/src/execution.cc688
-rw-r--r--chromium/v8/src/execution.h221
-rw-r--r--chromium/v8/src/extensions/externalize-string-extension.cc47
-rw-r--r--chromium/v8/src/extensions/externalize-string-extension.h31
-rw-r--r--chromium/v8/src/extensions/free-buffer-extension.cc45
-rw-r--r--chromium/v8/src/extensions/free-buffer-extension.h34
-rw-r--r--chromium/v8/src/extensions/gc-extension.cc56
-rw-r--r--chromium/v8/src/extensions/gc-extension.h43
-rw-r--r--chromium/v8/src/extensions/statistics-extension.cc47
-rw-r--r--chromium/v8/src/extensions/statistics-extension.h31
-rw-r--r--chromium/v8/src/extensions/trigger-failure-extension.cc56
-rw-r--r--chromium/v8/src/extensions/trigger-failure-extension.h32
-rw-r--r--chromium/v8/src/factory.cc2147
-rw-r--r--chromium/v8/src/factory.h488
-rw-r--r--chromium/v8/src/fast-dtoa.cc47
-rw-r--r--chromium/v8/src/fast-dtoa.h27
-rw-r--r--chromium/v8/src/feedback-slots.h27
-rw-r--r--chromium/v8/src/field-index-inl.h98
-rw-r--r--chromium/v8/src/field-index.cc23
-rw-r--r--chromium/v8/src/field-index.h119
-rw-r--r--chromium/v8/src/fixed-dtoa.cc37
-rw-r--r--chromium/v8/src/fixed-dtoa.h27
-rw-r--r--chromium/v8/src/flag-definitions.h244
-rw-r--r--chromium/v8/src/flags.cc57
-rw-r--r--chromium/v8/src/flags.h35
-rw-r--r--chromium/v8/src/frames-inl.h50
-rw-r--r--chromium/v8/src/frames.cc124
-rw-r--r--chromium/v8/src/frames.h86
-rw-r--r--chromium/v8/src/full-codegen.cc215
-rw-r--r--chromium/v8/src/full-codegen.h125
-rw-r--r--chromium/v8/src/func-name-inferrer.cc55
-rw-r--r--chromium/v8/src/func-name-inferrer.h31
-rw-r--r--chromium/v8/src/gdb-jit.cc105
-rw-r--r--chromium/v8/src/gdb-jit.h39
-rw-r--r--chromium/v8/src/generator.js38
-rw-r--r--chromium/v8/src/global-handles.cc119
-rw-r--r--chromium/v8/src/global-handles.h56
-rw-r--r--chromium/v8/src/globals.h756
-rw-r--r--chromium/v8/src/handles-inl.h75
-rw-r--r--chromium/v8/src/handles.cc696
-rw-r--r--chromium/v8/src/handles.h203
-rw-r--r--chromium/v8/src/harmony-array.js87
-rw-r--r--chromium/v8/src/harmony-math.js238
-rw-r--r--chromium/v8/src/harmony-string.js69
-rw-r--r--chromium/v8/src/hashmap.h38
-rw-r--r--chromium/v8/src/heap-inl.h493
-rw-r--r--chromium/v8/src/heap-profiler.cc134
-rw-r--r--chromium/v8/src/heap-profiler.h57
-rw-r--r--chromium/v8/src/heap-snapshot-generator-inl.h34
-rw-r--r--chromium/v8/src/heap-snapshot-generator.cc914
-rw-r--r--chromium/v8/src/heap-snapshot-generator.h202
-rw-r--r--chromium/v8/src/heap.cc4429
-rw-r--r--chromium/v8/src/heap.h1605
-rw-r--r--chromium/v8/src/hydrogen-alias-analysis.h29
-rw-r--r--chromium/v8/src/hydrogen-bce.cc187
-rw-r--r--chromium/v8/src/hydrogen-bce.h29
-rw-r--r--chromium/v8/src/hydrogen-bch.cc31
-rw-r--r--chromium/v8/src/hydrogen-bch.h29
-rw-r--r--chromium/v8/src/hydrogen-canonicalize.cc31
-rw-r--r--chromium/v8/src/hydrogen-canonicalize.h29
-rw-r--r--chromium/v8/src/hydrogen-check-elimination.cc757
-rw-r--r--chromium/v8/src/hydrogen-check-elimination.h49
-rw-r--r--chromium/v8/src/hydrogen-dce.cc35
-rw-r--r--chromium/v8/src/hydrogen-dce.h29
-rw-r--r--chromium/v8/src/hydrogen-dehoist.cc34
-rw-r--r--chromium/v8/src/hydrogen-dehoist.h29
-rw-r--r--chromium/v8/src/hydrogen-environment-liveness.cc49
-rw-r--r--chromium/v8/src/hydrogen-environment-liveness.h32
-rw-r--r--chromium/v8/src/hydrogen-escape-analysis.cc59
-rw-r--r--chromium/v8/src/hydrogen-escape-analysis.h33
-rw-r--r--chromium/v8/src/hydrogen-flow-engine.h75
-rw-r--r--chromium/v8/src/hydrogen-gvn.cc638
-rw-r--r--chromium/v8/src/hydrogen-gvn.h143
-rw-r--r--chromium/v8/src/hydrogen-infer-representation.cc31
-rw-r--r--chromium/v8/src/hydrogen-infer-representation.h29
-rw-r--r--chromium/v8/src/hydrogen-infer-types.cc29
-rw-r--r--chromium/v8/src/hydrogen-infer-types.h29
-rw-r--r--chromium/v8/src/hydrogen-instructions.cc1456
-rw-r--r--chromium/v8/src/hydrogen-instructions.h2546
-rw-r--r--chromium/v8/src/hydrogen-load-elimination.cc177
-rw-r--r--chromium/v8/src/hydrogen-load-elimination.h29
-rw-r--r--chromium/v8/src/hydrogen-mark-deoptimize.cc29
-rw-r--r--chromium/v8/src/hydrogen-mark-deoptimize.h29
-rw-r--r--chromium/v8/src/hydrogen-mark-unreachable.cc29
-rw-r--r--chromium/v8/src/hydrogen-mark-unreachable.h29
-rw-r--r--chromium/v8/src/hydrogen-minus-zero.cc91
-rw-r--r--chromium/v8/src/hydrogen-minus-zero.h56
-rw-r--r--chromium/v8/src/hydrogen-osr.cc33
-rw-r--r--chromium/v8/src/hydrogen-osr.h33
-rw-r--r--chromium/v8/src/hydrogen-range-analysis.cc146
-rw-r--r--chromium/v8/src/hydrogen-range-analysis.h45
-rw-r--r--chromium/v8/src/hydrogen-redundant-phi.cc29
-rw-r--r--chromium/v8/src/hydrogen-redundant-phi.h29
-rw-r--r--chromium/v8/src/hydrogen-removable-simulates.cc246
-rw-r--r--chromium/v8/src/hydrogen-removable-simulates.h29
-rw-r--r--chromium/v8/src/hydrogen-representation-changes.cc80
-rw-r--r--chromium/v8/src/hydrogen-representation-changes.h29
-rw-r--r--chromium/v8/src/hydrogen-sce.cc33
-rw-r--r--chromium/v8/src/hydrogen-sce.h29
-rw-r--r--chromium/v8/src/hydrogen-store-elimination.cc121
-rw-r--r--chromium/v8/src/hydrogen-store-elimination.h34
-rw-r--r--chromium/v8/src/hydrogen-types.cc67
-rw-r--r--chromium/v8/src/hydrogen-types.h87
-rw-r--r--chromium/v8/src/hydrogen-uint32-analysis.cc64
-rw-r--r--chromium/v8/src/hydrogen-uint32-analysis.h29
-rw-r--r--chromium/v8/src/hydrogen.cc6099
-rw-r--r--chromium/v8/src/hydrogen.h752
-rw-r--r--chromium/v8/src/i18n.cc413
-rw-r--r--chromium/v8/src/i18n.h52
-rw-r--r--chromium/v8/src/i18n.js246
-rw-r--r--chromium/v8/src/ia32/assembler-ia32-inl.h95
-rw-r--r--chromium/v8/src/ia32/assembler-ia32.cc205
-rw-r--r--chromium/v8/src/ia32/assembler-ia32.h235
-rw-r--r--chromium/v8/src/ia32/builtins-ia32.cc380
-rw-r--r--chromium/v8/src/ia32/code-stubs-ia32.cc2816
-rw-r--r--chromium/v8/src/ia32/code-stubs-ia32.h152
-rw-r--r--chromium/v8/src/ia32/codegen-ia32.cc758
-rw-r--r--chromium/v8/src/ia32/codegen-ia32.h68
-rw-r--r--chromium/v8/src/ia32/cpu-ia32.cc45
-rw-r--r--chromium/v8/src/ia32/debug-ia32.cc132
-rw-r--r--chromium/v8/src/ia32/deoptimizer-ia32.cc133
-rw-r--r--chromium/v8/src/ia32/disasm-ia32.cc114
-rw-r--r--chromium/v8/src/ia32/frames-ia32.cc53
-rw-r--r--chromium/v8/src/ia32/frames-ia32.h33
-rw-r--r--chromium/v8/src/ia32/full-codegen-ia32.cc1080
-rw-r--r--chromium/v8/src/ia32/ic-ia32.cc451
-rw-r--r--chromium/v8/src/ia32/lithium-codegen-ia32.cc2790
-rw-r--r--chromium/v8/src/ia32/lithium-codegen-ia32.h169
-rw-r--r--chromium/v8/src/ia32/lithium-gap-resolver-ia32.cc121
-rw-r--r--chromium/v8/src/ia32/lithium-gap-resolver-ia32.h31
-rw-r--r--chromium/v8/src/ia32/lithium-ia32.cc1072
-rw-r--r--chromium/v8/src/ia32/lithium-ia32.h731
-rw-r--r--chromium/v8/src/ia32/macro-assembler-ia32.cc765
-rw-r--r--chromium/v8/src/ia32/macro-assembler-ia32.h180
-rw-r--r--chromium/v8/src/ia32/regexp-macro-assembler-ia32.cc52
-rw-r--r--chromium/v8/src/ia32/regexp-macro-assembler-ia32.h33
-rw-r--r--chromium/v8/src/ia32/simulator-ia32.cc27
-rw-r--r--chromium/v8/src/ia32/simulator-ia32.h32
-rw-r--r--chromium/v8/src/ia32/stub-cache-ia32.cc1927
-rw-r--r--chromium/v8/src/ic-inl.h122
-rw-r--r--chromium/v8/src/ic.cc2051
-rw-r--r--chromium/v8/src/ic.h656
-rw-r--r--chromium/v8/src/icu_util.cc91
-rw-r--r--chromium/v8/src/icu_util.h29
-rw-r--r--chromium/v8/src/incremental-marking-inl.h33
-rw-r--r--chromium/v8/src/incremental-marking.cc121
-rw-r--r--chromium/v8/src/incremental-marking.h39
-rw-r--r--chromium/v8/src/interface.cc42
-rw-r--r--chromium/v8/src/interface.h29
-rw-r--r--chromium/v8/src/interpreter-irregexp.cc65
-rw-r--r--chromium/v8/src/interpreter-irregexp.h27
-rw-r--r--chromium/v8/src/isolate-inl.h58
-rw-r--r--chromium/v8/src/isolate.cc978
-rw-r--r--chromium/v8/src/isolate.h568
-rw-r--r--chromium/v8/src/json-parser.h95
-rw-r--r--chromium/v8/src/json-stringifier.h288
-rw-r--r--chromium/v8/src/json.js51
-rw-r--r--chromium/v8/src/jsregexp-inl.h37
-rw-r--r--chromium/v8/src/jsregexp.cc226
-rw-r--r--chromium/v8/src/jsregexp.h74
-rw-r--r--chromium/v8/src/libplatform/DEPS6
-rw-r--r--chromium/v8/src/libplatform/default-platform.cc70
-rw-r--r--chromium/v8/src/libplatform/default-platform.h53
-rw-r--r--chromium/v8/src/libplatform/task-queue.cc57
-rw-r--r--chromium/v8/src/libplatform/task-queue.h47
-rw-r--r--chromium/v8/src/libplatform/worker-thread.cc31
-rw-r--r--chromium/v8/src/libplatform/worker-thread.h38
-rw-r--r--chromium/v8/src/list-inl.h34
-rw-r--r--chromium/v8/src/list.h47
-rw-r--r--chromium/v8/src/lithium-allocator-inl.h41
-rw-r--r--chromium/v8/src/lithium-allocator.cc68
-rw-r--r--chromium/v8/src/lithium-allocator.h39
-rw-r--r--chromium/v8/src/lithium-codegen.cc169
-rw-r--r--chromium/v8/src/lithium-codegen.h44
-rw-r--r--chromium/v8/src/lithium.cc267
-rw-r--r--chromium/v8/src/lithium.h256
-rw-r--r--chromium/v8/src/liveedit-debugger.js31
-rw-r--r--chromium/v8/src/liveedit.cc794
-rw-r--r--chromium/v8/src/liveedit.h283
-rw-r--r--chromium/v8/src/log-inl.h29
-rw-r--r--chromium/v8/src/log-utils.cc42
-rw-r--r--chromium/v8/src/log-utils.h37
-rw-r--r--chromium/v8/src/log.cc321
-rw-r--r--chromium/v8/src/log.h68
-rw-r--r--chromium/v8/src/lookup.cc200
-rw-r--r--chromium/v8/src/lookup.h183
-rw-r--r--chromium/v8/src/macro-assembler.h86
-rw-r--r--chromium/v8/src/macros.py29
-rw-r--r--chromium/v8/src/mark-compact-inl.h39
-rw-r--r--chromium/v8/src/mark-compact.cc861
-rw-r--r--chromium/v8/src/mark-compact.h131
-rw-r--r--chromium/v8/src/math.js113
-rw-r--r--chromium/v8/src/messages.cc79
-rw-r--r--chromium/v8/src/messages.h30
-rw-r--r--chromium/v8/src/messages.js175
-rw-r--r--chromium/v8/src/mips/OWNERS3
-rw-r--r--chromium/v8/src/mips/assembler-mips-inl.h75
-rw-r--r--chromium/v8/src/mips/assembler-mips.cc177
-rw-r--r--chromium/v8/src/mips/assembler-mips.h140
-rw-r--r--chromium/v8/src/mips/builtins-mips.cc409
-rw-r--r--chromium/v8/src/mips/code-stubs-mips.cc2721
-rw-r--r--chromium/v8/src/mips/code-stubs-mips.h162
-rw-r--r--chromium/v8/src/mips/codegen-mips.cc705
-rw-r--r--chromium/v8/src/mips/codegen-mips.h72
-rw-r--r--chromium/v8/src/mips/constants-mips.cc57
-rw-r--r--chromium/v8/src/mips/constants-mips.h65
-rw-r--r--chromium/v8/src/mips/cpu-mips.cc45
-rw-r--r--chromium/v8/src/mips/debug-mips.cc123
-rw-r--r--chromium/v8/src/mips/deoptimizer-mips.cc87
-rw-r--r--chromium/v8/src/mips/disasm-mips.cc84
-rw-r--r--chromium/v8/src/mips/frames-mips.cc54
-rw-r--r--chromium/v8/src/mips/frames-mips.h36
-rw-r--r--chromium/v8/src/mips/full-codegen-mips.cc1058
-rw-r--r--chromium/v8/src/mips/ic-mips.cc416
-rw-r--r--chromium/v8/src/mips/lithium-codegen-mips.cc1717
-rw-r--r--chromium/v8/src/mips/lithium-codegen-mips.h104
-rw-r--r--chromium/v8/src/mips/lithium-gap-resolver-mips.cc37
-rw-r--r--chromium/v8/src/mips/lithium-gap-resolver-mips.h31
-rw-r--r--chromium/v8/src/mips/lithium-mips.cc1000
-rw-r--r--chromium/v8/src/mips/lithium-mips.h686
-rw-r--r--chromium/v8/src/mips/macro-assembler-mips.cc768
-rw-r--r--chromium/v8/src/mips/macro-assembler-mips.h198
-rw-r--r--chromium/v8/src/mips/regexp-macro-assembler-mips.cc52
-rw-r--r--chromium/v8/src/mips/regexp-macro-assembler-mips.h37
-rw-r--r--chromium/v8/src/mips/simulator-mips.cc236
-rw-r--r--chromium/v8/src/mips/simulator-mips.h58
-rw-r--r--chromium/v8/src/mips/stub-cache-mips.cc1807
-rw-r--r--chromium/v8/src/mirror-debugger.js286
-rw-r--r--chromium/v8/src/misc-intrinsics.h31
-rw-r--r--chromium/v8/src/mksnapshot.cc493
-rw-r--r--chromium/v8/src/msan.h37
-rw-r--r--chromium/v8/src/natives.h27
-rw-r--r--chromium/v8/src/object-observe.js323
-rw-r--r--chromium/v8/src/objects-debug.cc307
-rw-r--r--chromium/v8/src/objects-inl.h2794
-rw-r--r--chromium/v8/src/objects-printer.cc425
-rw-r--r--chromium/v8/src/objects-visiting-inl.h153
-rw-r--r--chromium/v8/src/objects-visiting.cc338
-rw-r--r--chromium/v8/src/objects-visiting.h64
-rw-r--r--chromium/v8/src/objects.cc11095
-rw-r--r--chromium/v8/src/objects.h4437
-rw-r--r--chromium/v8/src/once.cc77
-rw-r--r--chromium/v8/src/optimizing-compiler-thread.cc133
-rw-r--r--chromium/v8/src/optimizing-compiler-thread.h70
-rw-r--r--chromium/v8/src/parser.cc3196
-rw-r--r--chromium/v8/src/parser.h631
-rw-r--r--chromium/v8/src/platform-cygwin.cc61
-rw-r--r--chromium/v8/src/platform-freebsd.cc58
-rw-r--r--chromium/v8/src/platform-linux.cc75
-rw-r--r--chromium/v8/src/platform-macos.cc58
-rw-r--r--chromium/v8/src/platform-openbsd.cc59
-rw-r--r--chromium/v8/src/platform-posix.cc260
-rw-r--r--chromium/v8/src/platform-qnx.cc373
-rw-r--r--chromium/v8/src/platform-solaris.cc91
-rw-r--r--chromium/v8/src/platform-win32.cc490
-rw-r--r--chromium/v8/src/platform.h182
-rw-r--r--chromium/v8/src/platform/condition-variable.cc41
-rw-r--r--chromium/v8/src/platform/condition-variable.h36
-rw-r--r--chromium/v8/src/platform/elapsed-timer.h31
-rw-r--r--chromium/v8/src/platform/mutex.cc35
-rw-r--r--chromium/v8/src/platform/mutex.h44
-rw-r--r--chromium/v8/src/platform/semaphore.cc37
-rw-r--r--chromium/v8/src/platform/semaphore.h35
-rw-r--r--chromium/v8/src/platform/socket.cc224
-rw-r--r--chromium/v8/src/platform/socket.h101
-rw-r--r--chromium/v8/src/platform/time.cc61
-rw-r--r--chromium/v8/src/platform/time.h31
-rw-r--r--chromium/v8/src/preparse-data-format.h37
-rw-r--r--chromium/v8/src/preparse-data.cc152
-rw-r--r--chromium/v8/src/preparse-data.h255
-rw-r--r--chromium/v8/src/preparser.cc1207
-rw-r--r--chromium/v8/src/preparser.h2275
-rw-r--r--chromium/v8/src/prettyprinter.cc73
-rw-r--r--chromium/v8/src/prettyprinter.h37
-rw-r--r--chromium/v8/src/profile-generator-inl.h31
-rw-r--r--chromium/v8/src/profile-generator.cc88
-rw-r--r--chromium/v8/src/profile-generator.h55
-rw-r--r--chromium/v8/src/promise.js540
-rw-r--r--chromium/v8/src/property-details-inl.h39
-rw-r--r--chromium/v8/src/property-details.h75
-rw-r--r--chromium/v8/src/property.cc71
-rw-r--r--chromium/v8/src/property.h407
-rw-r--r--chromium/v8/src/proxy.js30
-rw-r--r--chromium/v8/src/qnx-math.h19
-rw-r--r--chromium/v8/src/regexp-macro-assembler-irregexp-inl.h33
-rw-r--r--chromium/v8/src/regexp-macro-assembler-irregexp.cc45
-rw-r--r--chromium/v8/src/regexp-macro-assembler-irregexp.h29
-rw-r--r--chromium/v8/src/regexp-macro-assembler-tracer.cc46
-rw-r--r--chromium/v8/src/regexp-macro-assembler-tracer.h27
-rw-r--r--chromium/v8/src/regexp-macro-assembler.cc41
-rw-r--r--chromium/v8/src/regexp-macro-assembler.h31
-rw-r--r--chromium/v8/src/regexp-stack.cc46
-rw-r--r--chromium/v8/src/regexp-stack.h27
-rw-r--r--chromium/v8/src/regexp.js65
-rw-r--r--chromium/v8/src/rewriter.cc52
-rw-r--r--chromium/v8/src/rewriter.h27
-rw-r--r--chromium/v8/src/runtime-profiler.cc290
-rw-r--r--chromium/v8/src/runtime-profiler.h59
-rw-r--r--chromium/v8/src/runtime.cc7458
-rw-r--r--chromium/v8/src/runtime.h442
-rw-r--r--chromium/v8/src/runtime.js44
-rw-r--r--chromium/v8/src/safepoint-table.cc45
-rw-r--r--chromium/v8/src/safepoint-table.h35
-rw-r--r--chromium/v8/src/sampler.cc134
-rw-r--r--chromium/v8/src/sampler.h50
-rw-r--r--chromium/v8/src/scanner-character-streams.cc45
-rw-r--r--chromium/v8/src/scanner-character-streams.h30
-rw-r--r--chromium/v8/src/scanner.cc173
-rw-r--r--chromium/v8/src/scanner.h258
-rw-r--r--chromium/v8/src/scopeinfo.cc140
-rw-r--r--chromium/v8/src/scopeinfo.h41
-rw-r--r--chromium/v8/src/scopes.cc127
-rw-r--r--chromium/v8/src/scopes.h73
-rw-r--r--chromium/v8/src/serialize.cc307
-rw-r--r--chromium/v8/src/serialize.h83
-rw-r--r--chromium/v8/src/simulator.h39
-rw-r--r--chromium/v8/src/small-pointer-list.h33
-rw-r--r--chromium/v8/src/smart-pointers.h74
-rw-r--r--chromium/v8/src/snapshot-common.cc39
-rw-r--r--chromium/v8/src/snapshot-empty.cc31
-rw-r--r--chromium/v8/src/snapshot.h29
-rw-r--r--chromium/v8/src/spaces-inl.h58
-rw-r--r--chromium/v8/src/spaces.cc405
-rw-r--r--chromium/v8/src/spaces.h318
-rw-r--r--chromium/v8/src/splay-tree-inl.h29
-rw-r--r--chromium/v8/src/splay-tree.h29
-rw-r--r--chromium/v8/src/store-buffer-inl.h29
-rw-r--r--chromium/v8/src/store-buffer.cc187
-rw-r--r--chromium/v8/src/store-buffer.h44
-rw-r--r--chromium/v8/src/string-search.cc31
-rw-r--r--chromium/v8/src/string-search.h27
-rw-r--r--chromium/v8/src/string-stream.cc53
-rw-r--r--chromium/v8/src/string-stream.h59
-rw-r--r--chromium/v8/src/string.js244
-rw-r--r--chromium/v8/src/strtod.cc42
-rw-r--r--chromium/v8/src/strtod.h27
-rw-r--r--chromium/v8/src/stub-cache.cc1104
-rw-r--r--chromium/v8/src/stub-cache.h461
-rw-r--r--chromium/v8/src/sweeper-thread.cc82
-rw-r--r--chromium/v8/src/sweeper-thread.h47
-rw-r--r--chromium/v8/src/symbol.js137
-rw-r--r--chromium/v8/src/third_party/valgrind/valgrind.h44
-rw-r--r--chromium/v8/src/third_party/vtune/jitprofiling.cc40
-rw-r--r--chromium/v8/src/third_party/vtune/jitprofiling.h92
-rw-r--r--chromium/v8/src/third_party/vtune/v8-vtune.h22
-rw-r--r--chromium/v8/src/third_party/vtune/vtune-jit.cc15
-rw-r--r--chromium/v8/src/third_party/vtune/vtune-jit.h22
-rw-r--r--chromium/v8/src/token.cc33
-rw-r--r--chromium/v8/src/token.h31
-rw-r--r--chromium/v8/src/transitions-inl.h40
-rw-r--r--chromium/v8/src/transitions.cc168
-rw-r--r--chromium/v8/src/transitions.h79
-rw-r--r--chromium/v8/src/trig-table.h27
-rw-r--r--chromium/v8/src/type-info.cc352
-rw-r--r--chromium/v8/src/type-info.h270
-rw-r--r--chromium/v8/src/typedarray.js359
-rw-r--r--chromium/v8/src/types-inl.h336
-rw-r--r--chromium/v8/src/types.cc1143
-rw-r--r--chromium/v8/src/types.h1012
-rw-r--r--chromium/v8/src/typing.cc249
-rw-r--r--chromium/v8/src/typing.h65
-rw-r--r--chromium/v8/src/unbound-queue-inl.h45
-rw-r--r--chromium/v8/src/unbound-queue.h34
-rw-r--r--chromium/v8/src/unicode-inl.h57
-rw-r--r--chromium/v8/src/unicode.cc69
-rw-r--r--chromium/v8/src/unicode.h47
-rw-r--r--chromium/v8/src/unique.h103
-rw-r--r--chromium/v8/src/uri.h71
-rw-r--r--chromium/v8/src/uri.js697
-rw-r--r--chromium/v8/src/utils-inl.h29
-rw-r--r--chromium/v8/src/utils.cc365
-rw-r--r--chromium/v8/src/utils.h863
-rw-r--r--chromium/v8/src/utils/DEPS5
-rw-r--r--chromium/v8/src/utils/random-number-generator.cc64
-rw-r--r--chromium/v8/src/utils/random-number-generator.h29
-rw-r--r--chromium/v8/src/v8-counters.cc104
-rw-r--r--chromium/v8/src/v8-counters.h440
-rw-r--r--chromium/v8/src/v8.cc168
-rw-r--r--chromium/v8/src/v8.h68
-rw-r--r--chromium/v8/src/v8checks.h31
-rw-r--r--chromium/v8/src/v8conversions.cc132
-rw-r--r--chromium/v8/src/v8conversions.h95
-rw-r--r--chromium/v8/src/v8dll-main.cc31
-rw-r--r--chromium/v8/src/v8globals.h580
-rw-r--r--chromium/v8/src/v8memory.h27
-rw-r--r--chromium/v8/src/v8natives.js342
-rw-r--r--chromium/v8/src/v8threads.cc68
-rw-r--r--chromium/v8/src/v8threads.h27
-rw-r--r--chromium/v8/src/v8utils.cc276
-rw-r--r--chromium/v8/src/v8utils.h443
-rw-r--r--chromium/v8/src/variables.cc43
-rw-r--r--chromium/v8/src/variables.h41
-rw-r--r--chromium/v8/src/vector.h171
-rw-r--r--chromium/v8/src/version.cc32
-rw-r--r--chromium/v8/src/version.h27
-rw-r--r--chromium/v8/src/vm-state-inl.h36
-rw-r--r--chromium/v8/src/vm-state.h31
-rw-r--r--chromium/v8/src/weak_collection.js183
-rw-r--r--chromium/v8/src/win32-math.cc33
-rw-r--r--chromium/v8/src/win32-math.h27
-rw-r--r--chromium/v8/src/x64/assembler-x64-inl.h125
-rw-r--r--chromium/v8/src/x64/assembler-x64.cc668
-rw-r--r--chromium/v8/src/x64/assembler-x64.h887
-rw-r--r--chromium/v8/src/x64/builtins-x64.cc728
-rw-r--r--chromium/v8/src/x64/code-stubs-x64.cc3357
-rw-r--r--chromium/v8/src/x64/code-stubs-x64.h164
-rw-r--r--chromium/v8/src/x64/codegen-x64.cc261
-rw-r--r--chromium/v8/src/x64/codegen-x64.h67
-rw-r--r--chromium/v8/src/x64/cpu-x64.cc45
-rw-r--r--chromium/v8/src/x64/debug-x64.cc159
-rw-r--r--chromium/v8/src/x64/deoptimizer-x64.cc172
-rw-r--r--chromium/v8/src/x64/disasm-x64.cc113
-rw-r--r--chromium/v8/src/x64/frames-x64.cc53
-rw-r--r--chromium/v8/src/x64/frames-x64.h44
-rw-r--r--chromium/v8/src/x64/full-codegen-x64.cc1734
-rw-r--r--chromium/v8/src/x64/ic-x64.cc624
-rw-r--r--chromium/v8/src/x64/lithium-codegen-x64.cc2528
-rw-r--r--chromium/v8/src/x64/lithium-codegen-x64.h72
-rw-r--r--chromium/v8/src/x64/lithium-gap-resolver-x64.cc71
-rw-r--r--chromium/v8/src/x64/lithium-gap-resolver-x64.h31
-rw-r--r--chromium/v8/src/x64/lithium-x64.cc1122
-rw-r--r--chromium/v8/src/x64/lithium-x64.h726
-rw-r--r--chromium/v8/src/x64/macro-assembler-x64.cc2231
-rw-r--r--chromium/v8/src/x64/macro-assembler-x64.h269
-rw-r--r--chromium/v8/src/x64/regexp-macro-assembler-x64.cc402
-rw-r--r--chromium/v8/src/x64/regexp-macro-assembler-x64.h84
-rw-r--r--chromium/v8/src/x64/simulator-x64.cc27
-rw-r--r--chromium/v8/src/x64/simulator-x64.h32
-rw-r--r--chromium/v8/src/x64/stub-cache-x64.cc2052
-rw-r--r--chromium/v8/src/x87/OWNERS1
-rw-r--r--chromium/v8/src/x87/assembler-x87-inl.h561
-rw-r--r--chromium/v8/src/x87/assembler-x87.cc2028
-rw-r--r--chromium/v8/src/x87/assembler-x87.h1031
-rw-r--r--chromium/v8/src/x87/builtins-x87.cc1452
-rw-r--r--chromium/v8/src/x87/code-stubs-x87.cc4734
-rw-r--r--chromium/v8/src/x87/code-stubs-x87.h413
-rw-r--r--chromium/v8/src/x87/codegen-x87.cc632
-rw-r--r--chromium/v8/src/x87/codegen-x87.h33
-rw-r--r--chromium/v8/src/x87/cpu-x87.cc44
-rw-r--r--chromium/v8/src/x87/debug-x87.cc336
-rw-r--r--chromium/v8/src/x87/deoptimizer-x87.cc406
-rw-r--r--chromium/v8/src/x87/disasm-x87.cc1757
-rw-r--r--chromium/v8/src/x87/frames-x87.cc42
-rw-r--r--chromium/v8/src/x87/frames-x87.h125
-rw-r--r--chromium/v8/src/x87/full-codegen-x87.cc4791
-rw-r--r--chromium/v8/src/x87/ic-x87.cc1290
-rw-r--r--chromium/v8/src/x87/lithium-codegen-x87.cc5707
-rw-r--r--chromium/v8/src/x87/lithium-codegen-x87.h504
-rw-r--r--chromium/v8/src/x87/lithium-gap-resolver-x87.cc445
-rw-r--r--chromium/v8/src/x87/lithium-gap-resolver-x87.h87
-rw-r--r--chromium/v8/src/x87/lithium-x87.cc2660
-rw-r--r--chromium/v8/src/x87/lithium-x87.h2888
-rw-r--r--chromium/v8/src/x87/macro-assembler-x87.cc3301
-rw-r--r--chromium/v8/src/x87/macro-assembler-x87.h1090
-rw-r--r--chromium/v8/src/x87/regexp-macro-assembler-x87.cc1309
-rw-r--r--chromium/v8/src/x87/regexp-macro-assembler-x87.h200
-rw-r--r--chromium/v8/src/x87/simulator-x87.cc6
-rw-r--r--chromium/v8/src/x87/simulator-x87.h48
-rw-r--r--chromium/v8/src/x87/stub-cache-x87.cc1493
-rw-r--r--chromium/v8/src/zone-allocator.h67
-rw-r--r--chromium/v8/src/zone-containers.h23
-rw-r--r--chromium/v8/src/zone-inl.h65
-rw-r--r--chromium/v8/src/zone.cc37
-rw-r--r--chromium/v8/src/zone.h46
666 files changed, 185635 insertions, 102689 deletions
diff --git a/chromium/v8/src/DEPS b/chromium/v8/src/DEPS
new file mode 100644
index 00000000000..4196627416f
--- /dev/null
+++ b/chromium/v8/src/DEPS
@@ -0,0 +1,6 @@
+include_rules = [
+ "+src",
+
+ # TODO(jochen): Enable this.
+ #"-src/libplatform",
+]
diff --git a/chromium/v8/src/accessors.cc b/chromium/v8/src/accessors.cc
index 4da9dd44ffe..54bd241b8d6 100644
--- a/chromium/v8/src/accessors.cc
+++ b/chromium/v8/src/accessors.cc
@@ -1,88 +1,84 @@
// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-#include "accessors.h"
-
-#include "contexts.h"
-#include "deoptimizer.h"
-#include "execution.h"
-#include "factory.h"
-#include "frames-inl.h"
-#include "isolate.h"
-#include "list-inl.h"
-#include "property-details.h"
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+#include "src/accessors.h"
+
+#include "src/compiler.h"
+#include "src/contexts.h"
+#include "src/deoptimizer.h"
+#include "src/execution.h"
+#include "src/factory.h"
+#include "src/frames-inl.h"
+#include "src/isolate.h"
+#include "src/list-inl.h"
+#include "src/property-details.h"
+#include "src/api.h"
namespace v8 {
namespace internal {
-template <class C>
-static C* FindInstanceOf(Isolate* isolate, Object* obj) {
- for (Object* cur = obj; !cur->IsNull(); cur = cur->GetPrototype(isolate)) {
- if (Is<C>(cur)) return C::cast(cur);
- }
- return NULL;
+// We have a slight impedance mismatch between the external API and the way we
+// use callbacks internally: Externally, callbacks can only be used with
+// v8::Object, but internally we even have callbacks on entities which are
+// higher in the hierarchy, so we can only return i::Object here, not
+// i::JSObject.
+Handle<Object> GetThisFrom(const v8::PropertyCallbackInfo<v8::Value>& info) {
+ return Utils::OpenHandle(*v8::Local<v8::Value>(info.This()));
}
-// Entry point that never should be called.
-MaybeObject* Accessors::IllegalSetter(Isolate* isolate,
- JSObject*,
- Object*,
- void*) {
- UNREACHABLE();
- return NULL;
+Handle<AccessorInfo> Accessors::MakeAccessor(
+ Isolate* isolate,
+ Handle<String> name,
+ AccessorGetterCallback getter,
+ AccessorSetterCallback setter,
+ PropertyAttributes attributes) {
+ Factory* factory = isolate->factory();
+ Handle<ExecutableAccessorInfo> info = factory->NewExecutableAccessorInfo();
+ info->set_property_attributes(attributes);
+ info->set_all_can_read(false);
+ info->set_all_can_write(false);
+ info->set_name(*name);
+ Handle<Object> get = v8::FromCData(isolate, getter);
+ Handle<Object> set = v8::FromCData(isolate, setter);
+ info->set_getter(*get);
+ info->set_setter(*set);
+ return info;
}
-Object* Accessors::IllegalGetAccessor(Isolate* isolate,
- Object* object,
- void*) {
- UNREACHABLE();
- return object;
+Handle<ExecutableAccessorInfo> Accessors::CloneAccessor(
+ Isolate* isolate,
+ Handle<ExecutableAccessorInfo> accessor) {
+ Factory* factory = isolate->factory();
+ Handle<ExecutableAccessorInfo> info = factory->NewExecutableAccessorInfo();
+ info->set_name(accessor->name());
+ info->set_flag(accessor->flag());
+ info->set_expected_receiver_type(accessor->expected_receiver_type());
+ info->set_getter(accessor->getter());
+ info->set_setter(accessor->setter());
+ info->set_data(accessor->data());
+ return info;
}
-MaybeObject* Accessors::ReadOnlySetAccessor(Isolate* isolate,
- JSObject*,
- Object* value,
- void*) {
- // According to ECMA-262, section 8.6.2.2, page 28, setting
- // read-only properties must be silently ignored.
- return value;
+template <class C>
+static C* FindInstanceOf(Isolate* isolate, Object* obj) {
+ for (Object* cur = obj; !cur->IsNull(); cur = cur->GetPrototype(isolate)) {
+ if (Is<C>(cur)) return C::cast(cur);
+ }
+ return NULL;
}
static V8_INLINE bool CheckForName(Handle<String> name,
- String* property_name,
+ Handle<String> property_name,
int offset,
int* object_offset) {
- if (name->Equals(property_name)) {
+ if (String::Equals(name, property_name)) {
*object_offset = offset;
return true;
}
@@ -90,63 +86,68 @@ static V8_INLINE bool CheckForName(Handle<String> name,
}
-bool Accessors::IsJSObjectFieldAccessor(
- Handle<Map> map, Handle<String> name,
- int* object_offset) {
- Isolate* isolate = map->GetIsolate();
+// Returns true for properties that are accessors to object fields.
+// If true, *object_offset contains offset of object field.
+template <class T>
+bool Accessors::IsJSObjectFieldAccessor(typename T::TypeHandle type,
+ Handle<String> name,
+ int* object_offset) {
+ Isolate* isolate = name->GetIsolate();
+
+ if (type->Is(T::String())) {
+ return CheckForName(name, isolate->factory()->length_string(),
+ String::kLengthOffset, object_offset);
+ }
+
+ if (!type->IsClass()) return false;
+ Handle<Map> map = type->AsClass()->Map();
+
switch (map->instance_type()) {
case JS_ARRAY_TYPE:
return
- CheckForName(name, isolate->heap()->length_string(),
+ CheckForName(name, isolate->factory()->length_string(),
JSArray::kLengthOffset, object_offset);
case JS_TYPED_ARRAY_TYPE:
return
- CheckForName(name, isolate->heap()->length_string(),
+ CheckForName(name, isolate->factory()->length_string(),
JSTypedArray::kLengthOffset, object_offset) ||
- CheckForName(name, isolate->heap()->byte_length_string(),
+ CheckForName(name, isolate->factory()->byte_length_string(),
JSTypedArray::kByteLengthOffset, object_offset) ||
- CheckForName(name, isolate->heap()->byte_offset_string(),
- JSTypedArray::kByteOffsetOffset, object_offset) ||
- CheckForName(name, isolate->heap()->buffer_string(),
- JSTypedArray::kBufferOffset, object_offset);
+ CheckForName(name, isolate->factory()->byte_offset_string(),
+ JSTypedArray::kByteOffsetOffset, object_offset);
case JS_ARRAY_BUFFER_TYPE:
return
- CheckForName(name, isolate->heap()->byte_length_string(),
+ CheckForName(name, isolate->factory()->byte_length_string(),
JSArrayBuffer::kByteLengthOffset, object_offset);
case JS_DATA_VIEW_TYPE:
return
- CheckForName(name, isolate->heap()->byte_length_string(),
+ CheckForName(name, isolate->factory()->byte_length_string(),
JSDataView::kByteLengthOffset, object_offset) ||
- CheckForName(name, isolate->heap()->byte_offset_string(),
- JSDataView::kByteOffsetOffset, object_offset) ||
- CheckForName(name, isolate->heap()->buffer_string(),
- JSDataView::kBufferOffset, object_offset);
- default: {
- if (map->instance_type() < FIRST_NONSTRING_TYPE) {
- return
- CheckForName(name, isolate->heap()->length_string(),
- String::kLengthOffset, object_offset);
- }
+ CheckForName(name, isolate->factory()->byte_offset_string(),
+ JSDataView::kByteOffsetOffset, object_offset);
+ default:
return false;
- }
}
}
+template
+bool Accessors::IsJSObjectFieldAccessor<Type>(Type* type,
+ Handle<String> name,
+ int* object_offset);
+
+
+template
+bool Accessors::IsJSObjectFieldAccessor<HeapType>(Handle<HeapType> type,
+ Handle<String> name,
+ int* object_offset);
+
+
//
// Accessors::ArrayLength
//
-MaybeObject* Accessors::ArrayGetLength(Isolate* isolate,
- Object* object,
- void*) {
- // Traverse the prototype chain until we reach an array.
- JSArray* holder = FindInstanceOf<JSArray>(isolate, object);
- return holder == NULL ? Smi::FromInt(0) : holder->length();
-}
-
-
// The helper function will 'flatten' Number objects.
Handle<Object> Accessors::FlattenNumber(Isolate* isolate,
Handle<Object> value) {
@@ -163,114 +164,162 @@ Handle<Object> Accessors::FlattenNumber(Isolate* isolate,
}
-MaybeObject* Accessors::ArraySetLength(Isolate* isolate,
- JSObject* object_raw,
- Object* value_raw,
- void*) {
+void Accessors::ArrayLengthGetter(
+ v8::Local<v8::String> name,
+ const v8::PropertyCallbackInfo<v8::Value>& info) {
+ i::Isolate* isolate = reinterpret_cast<i::Isolate*>(info.GetIsolate());
+ DisallowHeapAllocation no_allocation;
HandleScope scope(isolate);
- Handle<JSObject> object(object_raw, isolate);
- Handle<Object> value(value_raw, isolate);
+ Object* object = *GetThisFrom(info);
+ // Traverse the prototype chain until we reach an array.
+ JSArray* holder = FindInstanceOf<JSArray>(isolate, object);
+ Object* result;
+ if (holder != NULL) {
+ result = holder->length();
+ } else {
+ result = Smi::FromInt(0);
+ }
+ info.GetReturnValue().Set(Utils::ToLocal(Handle<Object>(result, isolate)));
+}
+
+void Accessors::ArrayLengthSetter(
+ v8::Local<v8::String> name,
+ v8::Local<v8::Value> val,
+ const v8::PropertyCallbackInfo<void>& info) {
+ i::Isolate* isolate = reinterpret_cast<i::Isolate*>(info.GetIsolate());
+ HandleScope scope(isolate);
+ Handle<JSObject> object = Handle<JSObject>::cast(
+ Utils::OpenHandle(*info.This()));
+ Handle<Object> value = Utils::OpenHandle(*val);
// This means one of the object's prototypes is a JSArray and the
// object does not have a 'length' property. Calling SetProperty
// causes an infinite loop.
if (!object->IsJSArray()) {
- Handle<Object> result = JSObject::SetLocalPropertyIgnoreAttributes(object,
- isolate->factory()->length_string(), value, NONE);
- RETURN_IF_EMPTY_HANDLE(isolate, result);
- return *result;
+ MaybeHandle<Object> maybe_result =
+ JSObject::SetOwnPropertyIgnoreAttributes(
+ object, isolate->factory()->length_string(), value, NONE);
+ maybe_result.Check();
+ return;
}
value = FlattenNumber(isolate, value);
Handle<JSArray> array_handle = Handle<JSArray>::cast(object);
-
- bool has_exception;
- Handle<Object> uint32_v =
- Execution::ToUint32(isolate, value, &has_exception);
- if (has_exception) return Failure::Exception();
- Handle<Object> number_v =
- Execution::ToNumber(isolate, value, &has_exception);
- if (has_exception) return Failure::Exception();
+ MaybeHandle<Object> maybe;
+ Handle<Object> uint32_v;
+ maybe = Execution::ToUint32(isolate, value);
+ if (!maybe.ToHandle(&uint32_v)) {
+ isolate->OptionalRescheduleException(false);
+ return;
+ }
+ Handle<Object> number_v;
+ maybe = Execution::ToNumber(isolate, value);
+ if (!maybe.ToHandle(&number_v)) {
+ isolate->OptionalRescheduleException(false);
+ return;
+ }
if (uint32_v->Number() == number_v->Number()) {
- return array_handle->SetElementsLength(*uint32_v);
+ maybe = JSArray::SetElementsLength(array_handle, uint32_v);
+ maybe.Check();
+ return;
}
- return isolate->Throw(
+
+ isolate->ScheduleThrow(
*isolate->factory()->NewRangeError("invalid_array_length",
HandleVector<Object>(NULL, 0)));
}
-const AccessorDescriptor Accessors::ArrayLength = {
- ArrayGetLength,
- ArraySetLength,
- 0
-};
+Handle<AccessorInfo> Accessors::ArrayLengthInfo(
+ Isolate* isolate, PropertyAttributes attributes) {
+ return MakeAccessor(isolate,
+ isolate->factory()->length_string(),
+ &ArrayLengthGetter,
+ &ArrayLengthSetter,
+ attributes);
+}
+
//
// Accessors::StringLength
//
+void Accessors::StringLengthGetter(
+ v8::Local<v8::String> name,
+ const v8::PropertyCallbackInfo<v8::Value>& info) {
+ i::Isolate* isolate = reinterpret_cast<i::Isolate*>(info.GetIsolate());
+ DisallowHeapAllocation no_allocation;
+ HandleScope scope(isolate);
+ Object* value = *GetThisFrom(info);
+ Object* result;
+ if (value->IsJSValue()) value = JSValue::cast(value)->value();
+ if (value->IsString()) {
+ result = Smi::FromInt(String::cast(value)->length());
+ } else {
+ // If object is not a string we return 0 to be compatible with WebKit.
+ // Note: Firefox returns the length of ToString(object).
+ result = Smi::FromInt(0);
+ }
+ info.GetReturnValue().Set(Utils::ToLocal(Handle<Object>(result, isolate)));
+}
+
-MaybeObject* Accessors::StringGetLength(Isolate* isolate,
- Object* object,
- void*) {
- Object* value = object;
- if (object->IsJSValue()) value = JSValue::cast(object)->value();
- if (value->IsString()) return Smi::FromInt(String::cast(value)->length());
- // If object is not a string we return 0 to be compatible with WebKit.
- // Note: Firefox returns the length of ToString(object).
- return Smi::FromInt(0);
+void Accessors::StringLengthSetter(
+ v8::Local<v8::String> name,
+ v8::Local<v8::Value> value,
+ const v8::PropertyCallbackInfo<void>& info) {
+ UNREACHABLE();
}
-const AccessorDescriptor Accessors::StringLength = {
- StringGetLength,
- IllegalSetter,
- 0
-};
+Handle<AccessorInfo> Accessors::StringLengthInfo(
+ Isolate* isolate, PropertyAttributes attributes) {
+ return MakeAccessor(isolate,
+ isolate->factory()->length_string(),
+ &StringLengthGetter,
+ &StringLengthSetter,
+ attributes);
+}
//
-// Accessors::ScriptSource
+// Accessors::ScriptColumnOffset
//
-MaybeObject* Accessors::ScriptGetSource(Isolate* isolate,
- Object* object,
- void*) {
- Object* script = JSValue::cast(object)->value();
- return Script::cast(script)->source();
+void Accessors::ScriptColumnOffsetGetter(
+ v8::Local<v8::String> name,
+ const v8::PropertyCallbackInfo<v8::Value>& info) {
+ i::Isolate* isolate = reinterpret_cast<i::Isolate*>(info.GetIsolate());
+ DisallowHeapAllocation no_allocation;
+ HandleScope scope(isolate);
+ Object* object = *Utils::OpenHandle(*info.This());
+ Object* res = Script::cast(JSValue::cast(object)->value())->column_offset();
+ info.GetReturnValue().Set(Utils::ToLocal(Handle<Object>(res, isolate)));
}
-const AccessorDescriptor Accessors::ScriptSource = {
- ScriptGetSource,
- IllegalSetter,
- 0
-};
-
-
-//
-// Accessors::ScriptName
-//
-
-
-MaybeObject* Accessors::ScriptGetName(Isolate* isolate,
- Object* object,
- void*) {
- Object* script = JSValue::cast(object)->value();
- return Script::cast(script)->name();
+void Accessors::ScriptColumnOffsetSetter(
+ v8::Local<v8::String> name,
+ v8::Local<v8::Value> value,
+ const v8::PropertyCallbackInfo<void>& info) {
+ UNREACHABLE();
}
-const AccessorDescriptor Accessors::ScriptName = {
- ScriptGetName,
- IllegalSetter,
- 0
-};
+Handle<AccessorInfo> Accessors::ScriptColumnOffsetInfo(
+ Isolate* isolate, PropertyAttributes attributes) {
+ Handle<String> name(isolate->factory()->InternalizeOneByteString(
+ STATIC_ASCII_VECTOR("column_offset")));
+ return MakeAccessor(isolate,
+ name,
+ &ScriptColumnOffsetGetter,
+ &ScriptColumnOffsetSetter,
+ attributes);
+}
//
@@ -278,77 +327,143 @@ const AccessorDescriptor Accessors::ScriptName = {
//
-MaybeObject* Accessors::ScriptGetId(Isolate* isolate, Object* object, void*) {
- Object* script = JSValue::cast(object)->value();
- return Script::cast(script)->id();
+void Accessors::ScriptIdGetter(
+ v8::Local<v8::String> name,
+ const v8::PropertyCallbackInfo<v8::Value>& info) {
+ i::Isolate* isolate = reinterpret_cast<i::Isolate*>(info.GetIsolate());
+ DisallowHeapAllocation no_allocation;
+ HandleScope scope(isolate);
+ Object* object = *Utils::OpenHandle(*info.This());
+ Object* id = Script::cast(JSValue::cast(object)->value())->id();
+ info.GetReturnValue().Set(Utils::ToLocal(Handle<Object>(id, isolate)));
}
-const AccessorDescriptor Accessors::ScriptId = {
- ScriptGetId,
- IllegalSetter,
- 0
-};
+void Accessors::ScriptIdSetter(
+ v8::Local<v8::String> name,
+ v8::Local<v8::Value> value,
+ const v8::PropertyCallbackInfo<void>& info) {
+ UNREACHABLE();
+}
+
+
+Handle<AccessorInfo> Accessors::ScriptIdInfo(
+ Isolate* isolate, PropertyAttributes attributes) {
+ Handle<String> name(isolate->factory()->InternalizeOneByteString(
+ STATIC_ASCII_VECTOR("id")));
+ return MakeAccessor(isolate,
+ name,
+ &ScriptIdGetter,
+ &ScriptIdSetter,
+ attributes);
+}
//
-// Accessors::ScriptLineOffset
+// Accessors::ScriptName
//
-MaybeObject* Accessors::ScriptGetLineOffset(Isolate* isolate,
- Object* object,
- void*) {
- Object* script = JSValue::cast(object)->value();
- return Script::cast(script)->line_offset();
+void Accessors::ScriptNameGetter(
+ v8::Local<v8::String> name,
+ const v8::PropertyCallbackInfo<v8::Value>& info) {
+ i::Isolate* isolate = reinterpret_cast<i::Isolate*>(info.GetIsolate());
+ DisallowHeapAllocation no_allocation;
+ HandleScope scope(isolate);
+ Object* object = *Utils::OpenHandle(*info.This());
+ Object* source = Script::cast(JSValue::cast(object)->value())->name();
+ info.GetReturnValue().Set(Utils::ToLocal(Handle<Object>(source, isolate)));
}
-const AccessorDescriptor Accessors::ScriptLineOffset = {
- ScriptGetLineOffset,
- IllegalSetter,
- 0
-};
+void Accessors::ScriptNameSetter(
+ v8::Local<v8::String> name,
+ v8::Local<v8::Value> value,
+ const v8::PropertyCallbackInfo<void>& info) {
+ UNREACHABLE();
+}
+
+
+Handle<AccessorInfo> Accessors::ScriptNameInfo(
+ Isolate* isolate, PropertyAttributes attributes) {
+ return MakeAccessor(isolate,
+ isolate->factory()->name_string(),
+ &ScriptNameGetter,
+ &ScriptNameSetter,
+ attributes);
+}
//
-// Accessors::ScriptColumnOffset
+// Accessors::ScriptSource
//
-MaybeObject* Accessors::ScriptGetColumnOffset(Isolate* isolate,
- Object* object,
- void*) {
- Object* script = JSValue::cast(object)->value();
- return Script::cast(script)->column_offset();
+void Accessors::ScriptSourceGetter(
+ v8::Local<v8::String> name,
+ const v8::PropertyCallbackInfo<v8::Value>& info) {
+ i::Isolate* isolate = reinterpret_cast<i::Isolate*>(info.GetIsolate());
+ DisallowHeapAllocation no_allocation;
+ HandleScope scope(isolate);
+ Object* object = *Utils::OpenHandle(*info.This());
+ Object* source = Script::cast(JSValue::cast(object)->value())->source();
+ info.GetReturnValue().Set(Utils::ToLocal(Handle<Object>(source, isolate)));
}
-const AccessorDescriptor Accessors::ScriptColumnOffset = {
- ScriptGetColumnOffset,
- IllegalSetter,
- 0
-};
+void Accessors::ScriptSourceSetter(
+ v8::Local<v8::String> name,
+ v8::Local<v8::Value> value,
+ const v8::PropertyCallbackInfo<void>& info) {
+ UNREACHABLE();
+}
+
+
+Handle<AccessorInfo> Accessors::ScriptSourceInfo(
+ Isolate* isolate, PropertyAttributes attributes) {
+ return MakeAccessor(isolate,
+ isolate->factory()->source_string(),
+ &ScriptSourceGetter,
+ &ScriptSourceSetter,
+ attributes);
+}
//
-// Accessors::ScriptData
+// Accessors::ScriptLineOffset
//
-MaybeObject* Accessors::ScriptGetData(Isolate* isolate,
- Object* object,
- void*) {
- Object* script = JSValue::cast(object)->value();
- return Script::cast(script)->data();
+void Accessors::ScriptLineOffsetGetter(
+ v8::Local<v8::String> name,
+ const v8::PropertyCallbackInfo<v8::Value>& info) {
+ i::Isolate* isolate = reinterpret_cast<i::Isolate*>(info.GetIsolate());
+ DisallowHeapAllocation no_allocation;
+ HandleScope scope(isolate);
+ Object* object = *Utils::OpenHandle(*info.This());
+ Object* res = Script::cast(JSValue::cast(object)->value())->line_offset();
+ info.GetReturnValue().Set(Utils::ToLocal(Handle<Object>(res, isolate)));
}
-const AccessorDescriptor Accessors::ScriptData = {
- ScriptGetData,
- IllegalSetter,
- 0
-};
+void Accessors::ScriptLineOffsetSetter(
+ v8::Local<v8::String> name,
+ v8::Local<v8::Value> value,
+ const v8::PropertyCallbackInfo<void>& info) {
+ UNREACHABLE();
+}
+
+
+Handle<AccessorInfo> Accessors::ScriptLineOffsetInfo(
+ Isolate* isolate, PropertyAttributes attributes) {
+ Handle<String> name(isolate->factory()->InternalizeOneByteString(
+ STATIC_ASCII_VECTOR("line_offset")));
+ return MakeAccessor(isolate,
+ name,
+ &ScriptLineOffsetGetter,
+ &ScriptLineOffsetSetter,
+ attributes);
+}
//
@@ -356,19 +471,36 @@ const AccessorDescriptor Accessors::ScriptData = {
//
-MaybeObject* Accessors::ScriptGetType(Isolate* isolate,
- Object* object,
- void*) {
- Object* script = JSValue::cast(object)->value();
- return Script::cast(script)->type();
+void Accessors::ScriptTypeGetter(
+ v8::Local<v8::String> name,
+ const v8::PropertyCallbackInfo<v8::Value>& info) {
+ i::Isolate* isolate = reinterpret_cast<i::Isolate*>(info.GetIsolate());
+ DisallowHeapAllocation no_allocation;
+ HandleScope scope(isolate);
+ Object* object = *Utils::OpenHandle(*info.This());
+ Object* res = Script::cast(JSValue::cast(object)->value())->type();
+ info.GetReturnValue().Set(Utils::ToLocal(Handle<Object>(res, isolate)));
}
-const AccessorDescriptor Accessors::ScriptType = {
- ScriptGetType,
- IllegalSetter,
- 0
-};
+void Accessors::ScriptTypeSetter(
+ v8::Local<v8::String> name,
+ v8::Local<v8::Value> value,
+ const v8::PropertyCallbackInfo<void>& info) {
+ UNREACHABLE();
+}
+
+
+Handle<AccessorInfo> Accessors::ScriptTypeInfo(
+ Isolate* isolate, PropertyAttributes attributes) {
+ Handle<String> name(isolate->factory()->InternalizeOneByteString(
+ STATIC_ASCII_VECTOR("type")));
+ return MakeAccessor(isolate,
+ name,
+ &ScriptTypeGetter,
+ &ScriptTypeSetter,
+ attributes);
+}
//
@@ -376,19 +508,37 @@ const AccessorDescriptor Accessors::ScriptType = {
//
-MaybeObject* Accessors::ScriptGetCompilationType(Isolate* isolate,
- Object* object,
- void*) {
- Object* script = JSValue::cast(object)->value();
- return Smi::FromInt(Script::cast(script)->compilation_type());
+void Accessors::ScriptCompilationTypeGetter(
+ v8::Local<v8::String> name,
+ const v8::PropertyCallbackInfo<v8::Value>& info) {
+ i::Isolate* isolate = reinterpret_cast<i::Isolate*>(info.GetIsolate());
+ DisallowHeapAllocation no_allocation;
+ HandleScope scope(isolate);
+ Object* object = *Utils::OpenHandle(*info.This());
+ Object* res = Smi::FromInt(
+ Script::cast(JSValue::cast(object)->value())->compilation_type());
+ info.GetReturnValue().Set(Utils::ToLocal(Handle<Object>(res, isolate)));
}
-const AccessorDescriptor Accessors::ScriptCompilationType = {
- ScriptGetCompilationType,
- IllegalSetter,
- 0
-};
+void Accessors::ScriptCompilationTypeSetter(
+ v8::Local<v8::String> name,
+ v8::Local<v8::Value> value,
+ const v8::PropertyCallbackInfo<void>& info) {
+ UNREACHABLE();
+}
+
+
+Handle<AccessorInfo> Accessors::ScriptCompilationTypeInfo(
+ Isolate* isolate, PropertyAttributes attributes) {
+ Handle<String> name(isolate->factory()->InternalizeOneByteString(
+ STATIC_ASCII_VECTOR("compilation_type")));
+ return MakeAccessor(isolate,
+ name,
+ &ScriptCompilationTypeGetter,
+ &ScriptCompilationTypeSetter,
+ attributes);
+}
//
@@ -396,13 +546,15 @@ const AccessorDescriptor Accessors::ScriptCompilationType = {
//
-MaybeObject* Accessors::ScriptGetLineEnds(Isolate* isolate,
- Object* object,
- void*) {
- JSValue* wrapper = JSValue::cast(object);
+void Accessors::ScriptLineEndsGetter(
+ v8::Local<v8::String> name,
+ const v8::PropertyCallbackInfo<v8::Value>& info) {
+ i::Isolate* isolate = reinterpret_cast<i::Isolate*>(info.GetIsolate());
HandleScope scope(isolate);
- Handle<Script> script(Script::cast(wrapper->value()), isolate);
- InitScriptLineEnds(script);
+ Handle<Object> object = Utils::OpenHandle(*info.This());
+ Handle<Script> script(
+ Script::cast(Handle<JSValue>::cast(object)->value()), isolate);
+ Script::InitLineEnds(script);
ASSERT(script->line_ends()->IsFixedArray());
Handle<FixedArray> line_ends(FixedArray::cast(script->line_ends()));
// We do not want anyone to modify this array from JS.
@@ -410,15 +562,28 @@ MaybeObject* Accessors::ScriptGetLineEnds(Isolate* isolate,
line_ends->map() == isolate->heap()->fixed_cow_array_map());
Handle<JSArray> js_array =
isolate->factory()->NewJSArrayWithElements(line_ends);
- return *js_array;
+ info.GetReturnValue().Set(Utils::ToLocal(js_array));
}
-const AccessorDescriptor Accessors::ScriptLineEnds = {
- ScriptGetLineEnds,
- IllegalSetter,
- 0
-};
+void Accessors::ScriptLineEndsSetter(
+ v8::Local<v8::String> name,
+ v8::Local<v8::Value> value,
+ const v8::PropertyCallbackInfo<void>& info) {
+ UNREACHABLE();
+}
+
+
+Handle<AccessorInfo> Accessors::ScriptLineEndsInfo(
+ Isolate* isolate, PropertyAttributes attributes) {
+ Handle<String> name(isolate->factory()->InternalizeOneByteString(
+ STATIC_ASCII_VECTOR("line_ends")));
+ return MakeAccessor(isolate,
+ name,
+ &ScriptLineEndsGetter,
+ &ScriptLineEndsSetter,
+ attributes);
+}
//
@@ -426,19 +591,36 @@ const AccessorDescriptor Accessors::ScriptLineEnds = {
//
-MaybeObject* Accessors::ScriptGetContextData(Isolate* isolate,
- Object* object,
- void*) {
- Object* script = JSValue::cast(object)->value();
- return Script::cast(script)->context_data();
+void Accessors::ScriptContextDataGetter(
+ v8::Local<v8::String> name,
+ const v8::PropertyCallbackInfo<v8::Value>& info) {
+ i::Isolate* isolate = reinterpret_cast<i::Isolate*>(info.GetIsolate());
+ DisallowHeapAllocation no_allocation;
+ HandleScope scope(isolate);
+ Object* object = *Utils::OpenHandle(*info.This());
+ Object* res = Script::cast(JSValue::cast(object)->value())->context_data();
+ info.GetReturnValue().Set(Utils::ToLocal(Handle<Object>(res, isolate)));
}
-const AccessorDescriptor Accessors::ScriptContextData = {
- ScriptGetContextData,
- IllegalSetter,
- 0
-};
+void Accessors::ScriptContextDataSetter(
+ v8::Local<v8::String> name,
+ v8::Local<v8::Value> value,
+ const v8::PropertyCallbackInfo<void>& info) {
+ UNREACHABLE();
+}
+
+
+Handle<AccessorInfo> Accessors::ScriptContextDataInfo(
+ Isolate* isolate, PropertyAttributes attributes) {
+ Handle<String> name(isolate->factory()->InternalizeOneByteString(
+ STATIC_ASCII_VECTOR("context_data")));
+ return MakeAccessor(isolate,
+ name,
+ &ScriptContextDataGetter,
+ &ScriptContextDataSetter,
+ attributes);
+}
//
@@ -446,28 +628,46 @@ const AccessorDescriptor Accessors::ScriptContextData = {
//
-MaybeObject* Accessors::ScriptGetEvalFromScript(Isolate* isolate,
- Object* object,
- void*) {
- Object* script = JSValue::cast(object)->value();
- if (!Script::cast(script)->eval_from_shared()->IsUndefined()) {
+void Accessors::ScriptEvalFromScriptGetter(
+ v8::Local<v8::String> name,
+ const v8::PropertyCallbackInfo<v8::Value>& info) {
+ i::Isolate* isolate = reinterpret_cast<i::Isolate*>(info.GetIsolate());
+ HandleScope scope(isolate);
+ Handle<Object> object = Utils::OpenHandle(*info.This());
+ Handle<Script> script(
+ Script::cast(Handle<JSValue>::cast(object)->value()), isolate);
+ Handle<Object> result = isolate->factory()->undefined_value();
+ if (!script->eval_from_shared()->IsUndefined()) {
Handle<SharedFunctionInfo> eval_from_shared(
- SharedFunctionInfo::cast(Script::cast(script)->eval_from_shared()));
-
+ SharedFunctionInfo::cast(script->eval_from_shared()));
if (eval_from_shared->script()->IsScript()) {
Handle<Script> eval_from_script(Script::cast(eval_from_shared->script()));
- return *GetScriptWrapper(eval_from_script);
+ result = Script::GetWrapper(eval_from_script);
}
}
- return isolate->heap()->undefined_value();
+
+ info.GetReturnValue().Set(Utils::ToLocal(result));
}
-const AccessorDescriptor Accessors::ScriptEvalFromScript = {
- ScriptGetEvalFromScript,
- IllegalSetter,
- 0
-};
+void Accessors::ScriptEvalFromScriptSetter(
+ v8::Local<v8::String> name,
+ v8::Local<v8::Value> value,
+ const v8::PropertyCallbackInfo<void>& info) {
+ UNREACHABLE();
+}
+
+
+Handle<AccessorInfo> Accessors::ScriptEvalFromScriptInfo(
+ Isolate* isolate, PropertyAttributes attributes) {
+ Handle<String> name(isolate->factory()->InternalizeOneByteString(
+ STATIC_ASCII_VECTOR("eval_from_script")));
+ return MakeAccessor(isolate,
+ name,
+ &ScriptEvalFromScriptGetter,
+ &ScriptEvalFromScriptSetter,
+ attributes);
+}
//
@@ -475,32 +675,45 @@ const AccessorDescriptor Accessors::ScriptEvalFromScript = {
//
-MaybeObject* Accessors::ScriptGetEvalFromScriptPosition(Isolate* isolate,
- Object* object,
- void*) {
- Script* raw_script = Script::cast(JSValue::cast(object)->value());
+void Accessors::ScriptEvalFromScriptPositionGetter(
+ v8::Local<v8::String> name,
+ const v8::PropertyCallbackInfo<v8::Value>& info) {
+ i::Isolate* isolate = reinterpret_cast<i::Isolate*>(info.GetIsolate());
HandleScope scope(isolate);
- Handle<Script> script(raw_script);
-
- // If this is not a script compiled through eval there is no eval position.
- if (script->compilation_type() != Script::COMPILATION_TYPE_EVAL) {
- return script->GetHeap()->undefined_value();
+ Handle<Object> object = Utils::OpenHandle(*info.This());
+ Handle<Script> script(
+ Script::cast(Handle<JSValue>::cast(object)->value()), isolate);
+ Handle<Object> result = isolate->factory()->undefined_value();
+ if (script->compilation_type() == Script::COMPILATION_TYPE_EVAL) {
+ Handle<Code> code(SharedFunctionInfo::cast(
+ script->eval_from_shared())->code());
+ result = Handle<Object>(
+ Smi::FromInt(code->SourcePosition(code->instruction_start() +
+ script->eval_from_instructions_offset()->value())),
+ isolate);
}
+ info.GetReturnValue().Set(Utils::ToLocal(result));
+}
+
- // Get the function from where eval was called and find the source position
- // from the instruction offset.
- Handle<Code> code(SharedFunctionInfo::cast(
- script->eval_from_shared())->code());
- return Smi::FromInt(code->SourcePosition(code->instruction_start() +
- script->eval_from_instructions_offset()->value()));
+void Accessors::ScriptEvalFromScriptPositionSetter(
+ v8::Local<v8::String> name,
+ v8::Local<v8::Value> value,
+ const v8::PropertyCallbackInfo<void>& info) {
+ UNREACHABLE();
}
-const AccessorDescriptor Accessors::ScriptEvalFromScriptPosition = {
- ScriptGetEvalFromScriptPosition,
- IllegalSetter,
- 0
-};
+Handle<AccessorInfo> Accessors::ScriptEvalFromScriptPositionInfo(
+ Isolate* isolate, PropertyAttributes attributes) {
+ Handle<String> name(isolate->factory()->InternalizeOneByteString(
+ STATIC_ASCII_VECTOR("eval_from_script_position")));
+ return MakeAccessor(isolate,
+ name,
+ &ScriptEvalFromScriptPositionGetter,
+ &ScriptEvalFromScriptPositionSetter,
+ attributes);
+}
//
@@ -508,103 +721,96 @@ const AccessorDescriptor Accessors::ScriptEvalFromScriptPosition = {
//
-MaybeObject* Accessors::ScriptGetEvalFromFunctionName(Isolate* isolate,
- Object* object,
- void*) {
- Object* script = JSValue::cast(object)->value();
- Handle<SharedFunctionInfo> shared(SharedFunctionInfo::cast(
- Script::cast(script)->eval_from_shared()));
-
-
+void Accessors::ScriptEvalFromFunctionNameGetter(
+ v8::Local<v8::String> name,
+ const v8::PropertyCallbackInfo<v8::Value>& info) {
+ i::Isolate* isolate = reinterpret_cast<i::Isolate*>(info.GetIsolate());
+ HandleScope scope(isolate);
+ Handle<Object> object = Utils::OpenHandle(*info.This());
+ Handle<Script> script(
+ Script::cast(Handle<JSValue>::cast(object)->value()), isolate);
+ Handle<Object> result;
+ Handle<SharedFunctionInfo> shared(
+ SharedFunctionInfo::cast(script->eval_from_shared()));
// Find the name of the function calling eval.
if (!shared->name()->IsUndefined()) {
- return shared->name();
+ result = Handle<Object>(shared->name(), isolate);
} else {
- return shared->inferred_name();
+ result = Handle<Object>(shared->inferred_name(), isolate);
}
+ info.GetReturnValue().Set(Utils::ToLocal(result));
}
-const AccessorDescriptor Accessors::ScriptEvalFromFunctionName = {
- ScriptGetEvalFromFunctionName,
- IllegalSetter,
- 0
-};
+void Accessors::ScriptEvalFromFunctionNameSetter(
+ v8::Local<v8::String> name,
+ v8::Local<v8::Value> value,
+ const v8::PropertyCallbackInfo<void>& info) {
+ UNREACHABLE();
+}
+
+
+Handle<AccessorInfo> Accessors::ScriptEvalFromFunctionNameInfo(
+ Isolate* isolate, PropertyAttributes attributes) {
+ Handle<String> name(isolate->factory()->InternalizeOneByteString(
+ STATIC_ASCII_VECTOR("eval_from_function_name")));
+ return MakeAccessor(isolate,
+ name,
+ &ScriptEvalFromFunctionNameGetter,
+ &ScriptEvalFromFunctionNameSetter,
+ attributes);
+}
//
// Accessors::FunctionPrototype
//
+static Handle<Object> GetFunctionPrototype(Isolate* isolate,
+ Handle<Object> receiver) {
+ Handle<JSFunction> function;
+ {
+ DisallowHeapAllocation no_allocation;
+ JSFunction* function_raw = FindInstanceOf<JSFunction>(isolate, *receiver);
+ if (function_raw == NULL) return isolate->factory()->undefined_value();
+ while (!function_raw->should_have_prototype()) {
+ function_raw = FindInstanceOf<JSFunction>(isolate,
+ function_raw->GetPrototype());
+ // There has to be one because we hit the getter.
+ ASSERT(function_raw != NULL);
+ }
+ function = Handle<JSFunction>(function_raw, isolate);
+ }
-Handle<Object> Accessors::FunctionGetPrototype(Handle<JSFunction> function) {
- CALL_HEAP_FUNCTION(function->GetIsolate(),
- Accessors::FunctionGetPrototype(function->GetIsolate(),
- *function,
- NULL),
- Object);
-}
-
-
-Handle<Object> Accessors::FunctionSetPrototype(Handle<JSFunction> function,
- Handle<Object> prototype) {
- ASSERT(function->should_have_prototype());
- CALL_HEAP_FUNCTION(function->GetIsolate(),
- Accessors::FunctionSetPrototype(function->GetIsolate(),
- *function,
- *prototype,
- NULL),
- Object);
-}
-
-
-MaybeObject* Accessors::FunctionGetPrototype(Isolate* isolate,
- Object* object,
- void*) {
- JSFunction* function_raw = FindInstanceOf<JSFunction>(isolate, object);
- if (function_raw == NULL) return isolate->heap()->undefined_value();
- while (!function_raw->should_have_prototype()) {
- function_raw = FindInstanceOf<JSFunction>(isolate,
- function_raw->GetPrototype());
- // There has to be one because we hit the getter.
- ASSERT(function_raw != NULL);
- }
-
- if (!function_raw->has_prototype()) {
- HandleScope scope(isolate);
- Handle<JSFunction> function(function_raw);
+ if (!function->has_prototype()) {
Handle<Object> proto = isolate->factory()->NewFunctionPrototype(function);
JSFunction::SetPrototype(function, proto);
- function_raw = *function;
}
- return function_raw->prototype();
+ return Handle<Object>(function->prototype(), isolate);
}
-MaybeObject* Accessors::FunctionSetPrototype(Isolate* isolate,
- JSObject* object_raw,
- Object* value_raw,
- void*) {
- JSFunction* function_raw = FindInstanceOf<JSFunction>(isolate, object_raw);
- if (function_raw == NULL) return isolate->heap()->undefined_value();
+static Handle<Object> SetFunctionPrototype(Isolate* isolate,
+ Handle<JSObject> receiver,
+ Handle<Object> value) {
+ Handle<JSFunction> function;
+ {
+ DisallowHeapAllocation no_allocation;
+ JSFunction* function_raw = FindInstanceOf<JSFunction>(isolate, *receiver);
+ if (function_raw == NULL) return isolate->factory()->undefined_value();
+ function = Handle<JSFunction>(function_raw, isolate);
+ }
- HandleScope scope(isolate);
- Handle<JSFunction> function(function_raw, isolate);
- Handle<JSObject> object(object_raw, isolate);
- Handle<Object> value(value_raw, isolate);
if (!function->should_have_prototype()) {
// Since we hit this accessor, object will have no prototype property.
- Handle<Object> result = JSObject::SetLocalPropertyIgnoreAttributes(object,
- isolate->factory()->prototype_string(), value, NONE);
- RETURN_IF_EMPTY_HANDLE(isolate, result);
- return *result;
+ MaybeHandle<Object> maybe_result =
+ JSObject::SetOwnPropertyIgnoreAttributes(
+ receiver, isolate->factory()->prototype_string(), value, NONE);
+ return maybe_result.ToHandleChecked();
}
Handle<Object> old_value;
- bool is_observed =
- FLAG_harmony_observation &&
- *function == *object &&
- function->map()->is_observed();
+ bool is_observed = *function == *receiver && function->map()->is_observed();
if (is_observed) {
if (function->has_prototype())
old_value = handle(function->prototype(), isolate);
@@ -620,15 +826,56 @@ MaybeObject* Accessors::FunctionSetPrototype(Isolate* isolate,
function, "update", isolate->factory()->prototype_string(), old_value);
}
- return *function;
+ return function;
}
-const AccessorDescriptor Accessors::FunctionPrototype = {
- FunctionGetPrototype,
- FunctionSetPrototype,
- 0
-};
+Handle<Object> Accessors::FunctionGetPrototype(Handle<JSFunction> function) {
+ return GetFunctionPrototype(function->GetIsolate(), function);
+}
+
+
+Handle<Object> Accessors::FunctionSetPrototype(Handle<JSFunction> function,
+ Handle<Object> prototype) {
+ ASSERT(function->should_have_prototype());
+ Isolate* isolate = function->GetIsolate();
+ return SetFunctionPrototype(isolate, function, prototype);
+}
+
+
+void Accessors::FunctionPrototypeGetter(
+ v8::Local<v8::String> name,
+ const v8::PropertyCallbackInfo<v8::Value>& info) {
+ i::Isolate* isolate = reinterpret_cast<i::Isolate*>(info.GetIsolate());
+ HandleScope scope(isolate);
+ Handle<Object> object = GetThisFrom(info);
+ Handle<Object> result = GetFunctionPrototype(isolate, object);
+ info.GetReturnValue().Set(Utils::ToLocal(result));
+}
+
+
+void Accessors::FunctionPrototypeSetter(
+ v8::Local<v8::String> name,
+ v8::Local<v8::Value> val,
+ const v8::PropertyCallbackInfo<void>& info) {
+ i::Isolate* isolate = reinterpret_cast<i::Isolate*>(info.GetIsolate());
+ HandleScope scope(isolate);
+ Handle<JSObject> object =
+ Handle<JSObject>::cast(Utils::OpenHandle(*info.This()));
+ Handle<Object> value = Utils::OpenHandle(*val);
+
+ SetFunctionPrototype(isolate, object, value);
+}
+
+
+Handle<AccessorInfo> Accessors::FunctionPrototypeInfo(
+ Isolate* isolate, PropertyAttributes attributes) {
+ return MakeAccessor(isolate,
+ isolate->factory()->prototype_string(),
+ &FunctionPrototypeGetter,
+ &FunctionPrototypeSetter,
+ attributes);
+}
//
@@ -636,31 +883,57 @@ const AccessorDescriptor Accessors::FunctionPrototype = {
//
-MaybeObject* Accessors::FunctionGetLength(Isolate* isolate,
- Object* object,
- void*) {
- JSFunction* function = FindInstanceOf<JSFunction>(isolate, object);
- if (function == NULL) return Smi::FromInt(0);
- // Check if already compiled.
- if (function->shared()->is_compiled()) {
- return Smi::FromInt(function->shared()->length());
- }
- // If the function isn't compiled yet, the length is not computed correctly
- // yet. Compile it now and return the right length.
+void Accessors::FunctionLengthGetter(
+ v8::Local<v8::String> name,
+ const v8::PropertyCallbackInfo<v8::Value>& info) {
+ i::Isolate* isolate = reinterpret_cast<i::Isolate*>(info.GetIsolate());
HandleScope scope(isolate);
- Handle<JSFunction> handle(function);
- if (JSFunction::CompileLazy(handle, KEEP_EXCEPTION)) {
- return Smi::FromInt(handle->shared()->length());
+ Handle<Object> object = GetThisFrom(info);
+ MaybeHandle<JSFunction> maybe_function;
+
+ {
+ DisallowHeapAllocation no_allocation;
+ JSFunction* function = FindInstanceOf<JSFunction>(isolate, *object);
+ if (function != NULL) maybe_function = Handle<JSFunction>(function);
+ }
+
+ int length = 0;
+ Handle<JSFunction> function;
+ if (maybe_function.ToHandle(&function)) {
+ if (function->shared()->is_compiled()) {
+ length = function->shared()->length();
+ } else {
+ // If the function isn't compiled yet, the length is not computed
+ // correctly yet. Compile it now and return the right length.
+ if (Compiler::EnsureCompiled(function, KEEP_EXCEPTION)) {
+ length = function->shared()->length();
+ }
+ if (isolate->has_pending_exception()) {
+ isolate->OptionalRescheduleException(false);
+ }
+ }
}
- return Failure::Exception();
+ Handle<Object> result(Smi::FromInt(length), isolate);
+ info.GetReturnValue().Set(Utils::ToLocal(result));
}
-const AccessorDescriptor Accessors::FunctionLength = {
- FunctionGetLength,
- ReadOnlySetAccessor,
- 0
-};
+void Accessors::FunctionLengthSetter(
+ v8::Local<v8::String> name,
+ v8::Local<v8::Value> val,
+ const v8::PropertyCallbackInfo<void>& info) {
+ // Do nothing.
+}
+
+
+Handle<AccessorInfo> Accessors::FunctionLengthInfo(
+ Isolate* isolate, PropertyAttributes attributes) {
+ return MakeAccessor(isolate,
+ isolate->factory()->length_string(),
+ &FunctionLengthGetter,
+ &FunctionLengthSetter,
+ attributes);
+}
//
@@ -668,21 +941,47 @@ const AccessorDescriptor Accessors::FunctionLength = {
//
-MaybeObject* Accessors::FunctionGetName(Isolate* isolate,
- Object* object,
- void*) {
- JSFunction* holder = FindInstanceOf<JSFunction>(isolate, object);
- return holder == NULL
- ? isolate->heap()->undefined_value()
- : holder->shared()->name();
+void Accessors::FunctionNameGetter(
+ v8::Local<v8::String> name,
+ const v8::PropertyCallbackInfo<v8::Value>& info) {
+ i::Isolate* isolate = reinterpret_cast<i::Isolate*>(info.GetIsolate());
+ HandleScope scope(isolate);
+ Handle<Object> object = GetThisFrom(info);
+ MaybeHandle<JSFunction> maybe_function;
+
+ {
+ DisallowHeapAllocation no_allocation;
+ JSFunction* function = FindInstanceOf<JSFunction>(isolate, *object);
+ if (function != NULL) maybe_function = Handle<JSFunction>(function);
+ }
+
+ Handle<JSFunction> function;
+ Handle<Object> result;
+ if (maybe_function.ToHandle(&function)) {
+ result = Handle<Object>(function->shared()->name(), isolate);
+ } else {
+ result = isolate->factory()->undefined_value();
+ }
+ info.GetReturnValue().Set(Utils::ToLocal(result));
+}
+
+
+void Accessors::FunctionNameSetter(
+ v8::Local<v8::String> name,
+ v8::Local<v8::Value> val,
+ const v8::PropertyCallbackInfo<void>& info) {
+ // Do nothing.
}
-const AccessorDescriptor Accessors::FunctionName = {
- FunctionGetName,
- ReadOnlySetAccessor,
- 0
-};
+Handle<AccessorInfo> Accessors::FunctionNameInfo(
+ Isolate* isolate, PropertyAttributes attributes) {
+ return MakeAccessor(isolate,
+ isolate->factory()->name_string(),
+ &FunctionNameGetter,
+ &FunctionNameSetter,
+ attributes);
+}
//
@@ -690,113 +989,148 @@ const AccessorDescriptor Accessors::FunctionName = {
//
-Handle<Object> Accessors::FunctionGetArguments(Handle<JSFunction> function) {
- CALL_HEAP_FUNCTION(function->GetIsolate(),
- Accessors::FunctionGetArguments(function->GetIsolate(),
- *function,
- NULL),
- Object);
-}
-
-
-static MaybeObject* ConstructArgumentsObjectForInlinedFunction(
+static Handle<Object> ArgumentsForInlinedFunction(
JavaScriptFrame* frame,
Handle<JSFunction> inlined_function,
int inlined_frame_index) {
Isolate* isolate = inlined_function->GetIsolate();
Factory* factory = isolate->factory();
- Vector<SlotRef> args_slots =
- SlotRef::ComputeSlotMappingForArguments(
- frame,
- inlined_frame_index,
- inlined_function->shared()->formal_parameter_count());
- int args_count = args_slots.length();
+ SlotRefValueBuilder slot_refs(
+ frame,
+ inlined_frame_index,
+ inlined_function->shared()->formal_parameter_count());
+
+ int args_count = slot_refs.args_length();
Handle<JSObject> arguments =
factory->NewArgumentsObject(inlined_function, args_count);
Handle<FixedArray> array = factory->NewFixedArray(args_count);
+ slot_refs.Prepare(isolate);
for (int i = 0; i < args_count; ++i) {
- Handle<Object> value = args_slots[i].GetValue(isolate);
+ Handle<Object> value = slot_refs.GetNext(isolate, 0);
array->set(i, *value);
}
+ slot_refs.Finish(isolate);
arguments->set_elements(*array);
- args_slots.Dispose();
// Return the freshly allocated arguments object.
- return *arguments;
+ return arguments;
}
-MaybeObject* Accessors::FunctionGetArguments(Isolate* isolate,
- Object* object,
- void*) {
- HandleScope scope(isolate);
- JSFunction* holder = FindInstanceOf<JSFunction>(isolate, object);
- if (holder == NULL) return isolate->heap()->undefined_value();
- Handle<JSFunction> function(holder, isolate);
+static int FindFunctionInFrame(JavaScriptFrame* frame,
+ Handle<JSFunction> function) {
+ DisallowHeapAllocation no_allocation;
+ List<JSFunction*> functions(2);
+ frame->GetFunctions(&functions);
+ for (int i = functions.length() - 1; i >= 0; i--) {
+ if (functions[i] == *function) return i;
+ }
+ return -1;
+}
+
+
+Handle<Object> GetFunctionArguments(Isolate* isolate,
+ Handle<JSFunction> function) {
+ if (function->shared()->native()) return isolate->factory()->null_value();
- if (function->shared()->native()) return isolate->heap()->null_value();
// Find the top invocation of the function by traversing frames.
- List<JSFunction*> functions(2);
for (JavaScriptFrameIterator it(isolate); !it.done(); it.Advance()) {
JavaScriptFrame* frame = it.frame();
- frame->GetFunctions(&functions);
- for (int i = functions.length() - 1; i >= 0; i--) {
- // Skip all frames that aren't invocations of the given function.
- if (functions[i] != *function) continue;
-
- if (i > 0) {
- // The function in question was inlined. Inlined functions have the
- // correct number of arguments and no allocated arguments object, so
- // we can construct a fresh one by interpreting the function's
- // deoptimization input data.
- return ConstructArgumentsObjectForInlinedFunction(frame, function, i);
- }
+ int function_index = FindFunctionInFrame(frame, function);
+ if (function_index < 0) continue;
+
+ if (function_index > 0) {
+ // The function in question was inlined. Inlined functions have the
+ // correct number of arguments and no allocated arguments object, so
+ // we can construct a fresh one by interpreting the function's
+ // deoptimization input data.
+ return ArgumentsForInlinedFunction(frame, function, function_index);
+ }
- if (!frame->is_optimized()) {
- // If there is an arguments variable in the stack, we return that.
- Handle<ScopeInfo> scope_info(function->shared()->scope_info());
- int index = scope_info->StackSlotIndex(
- isolate->heap()->arguments_string());
- if (index >= 0) {
- Handle<Object> arguments(frame->GetExpression(index), isolate);
- if (!arguments->IsArgumentsMarker()) return *arguments;
- }
+ if (!frame->is_optimized()) {
+ // If there is an arguments variable in the stack, we return that.
+ Handle<ScopeInfo> scope_info(function->shared()->scope_info());
+ int index = scope_info->StackSlotIndex(
+ isolate->heap()->arguments_string());
+ if (index >= 0) {
+ Handle<Object> arguments(frame->GetExpression(index), isolate);
+ if (!arguments->IsArgumentsMarker()) return arguments;
}
-
- // If there is no arguments variable in the stack or we have an
- // optimized frame, we find the frame that holds the actual arguments
- // passed to the function.
- it.AdvanceToArgumentsFrame();
- frame = it.frame();
-
- // Get the number of arguments and construct an arguments object
- // mirror for the right frame.
- const int length = frame->ComputeParametersCount();
- Handle<JSObject> arguments = isolate->factory()->NewArgumentsObject(
- function, length);
- Handle<FixedArray> array = isolate->factory()->NewFixedArray(length);
-
- // Copy the parameters to the arguments object.
- ASSERT(array->length() == length);
- for (int i = 0; i < length; i++) array->set(i, frame->GetParameter(i));
- arguments->set_elements(*array);
-
- // Return the freshly allocated arguments object.
- return *arguments;
}
- functions.Rewind(0);
+
+ // If there is no arguments variable in the stack or we have an
+ // optimized frame, we find the frame that holds the actual arguments
+ // passed to the function.
+ it.AdvanceToArgumentsFrame();
+ frame = it.frame();
+
+ // Get the number of arguments and construct an arguments object
+ // mirror for the right frame.
+ const int length = frame->ComputeParametersCount();
+ Handle<JSObject> arguments = isolate->factory()->NewArgumentsObject(
+ function, length);
+ Handle<FixedArray> array = isolate->factory()->NewFixedArray(length);
+
+ // Copy the parameters to the arguments object.
+ ASSERT(array->length() == length);
+ for (int i = 0; i < length; i++) array->set(i, frame->GetParameter(i));
+ arguments->set_elements(*array);
+
+ // Return the freshly allocated arguments object.
+ return arguments;
}
// No frame corresponding to the given function found. Return null.
- return isolate->heap()->null_value();
+ return isolate->factory()->null_value();
}
-const AccessorDescriptor Accessors::FunctionArguments = {
- FunctionGetArguments,
- ReadOnlySetAccessor,
- 0
-};
+Handle<Object> Accessors::FunctionGetArguments(Handle<JSFunction> function) {
+ return GetFunctionArguments(function->GetIsolate(), function);
+}
+
+
+void Accessors::FunctionArgumentsGetter(
+ v8::Local<v8::String> name,
+ const v8::PropertyCallbackInfo<v8::Value>& info) {
+ i::Isolate* isolate = reinterpret_cast<i::Isolate*>(info.GetIsolate());
+ HandleScope scope(isolate);
+ Handle<Object> object = GetThisFrom(info);
+ MaybeHandle<JSFunction> maybe_function;
+
+ {
+ DisallowHeapAllocation no_allocation;
+ JSFunction* function = FindInstanceOf<JSFunction>(isolate, *object);
+ if (function != NULL) maybe_function = Handle<JSFunction>(function);
+ }
+
+ Handle<JSFunction> function;
+ Handle<Object> result;
+ if (maybe_function.ToHandle(&function)) {
+ result = GetFunctionArguments(isolate, function);
+ } else {
+ result = isolate->factory()->undefined_value();
+ }
+ info.GetReturnValue().Set(Utils::ToLocal(result));
+}
+
+
+void Accessors::FunctionArgumentsSetter(
+ v8::Local<v8::String> name,
+ v8::Local<v8::Value> val,
+ const v8::PropertyCallbackInfo<void>& info) {
+ // Do nothing.
+}
+
+
+Handle<AccessorInfo> Accessors::FunctionArgumentsInfo(
+ Isolate* isolate, PropertyAttributes attributes) {
+ return MakeAccessor(isolate,
+ isolate->factory()->arguments_string(),
+ &FunctionArgumentsGetter,
+ &FunctionArgumentsSetter,
+ attributes);
+}
//
@@ -804,22 +1138,33 @@ const AccessorDescriptor Accessors::FunctionArguments = {
//
+static inline bool AllowAccessToFunction(Context* current_context,
+ JSFunction* function) {
+ return current_context->HasSameSecurityTokenAs(function->context());
+}
+
+
class FrameFunctionIterator {
public:
FrameFunctionIterator(Isolate* isolate, const DisallowHeapAllocation& promise)
- : frame_iterator_(isolate),
+ : isolate_(isolate),
+ frame_iterator_(isolate),
functions_(2),
index_(0) {
GetFunctions();
}
JSFunction* next() {
- if (functions_.length() == 0) return NULL;
- JSFunction* next_function = functions_[index_];
- index_--;
- if (index_ < 0) {
- GetFunctions();
+ while (true) {
+ if (functions_.length() == 0) return NULL;
+ JSFunction* next_function = functions_[index_];
+ index_--;
+ if (index_ < 0) {
+ GetFunctions();
+ }
+ // Skip functions from other origins.
+ if (!AllowAccessToFunction(isolate_->context(), next_function)) continue;
+ return next_function;
}
- return next_function;
}
// Iterate through functions until the first occurence of 'function'.
@@ -844,35 +1189,30 @@ class FrameFunctionIterator {
frame_iterator_.Advance();
index_ = functions_.length() - 1;
}
+ Isolate* isolate_;
JavaScriptFrameIterator frame_iterator_;
List<JSFunction*> functions_;
int index_;
};
-MaybeObject* Accessors::FunctionGetCaller(Isolate* isolate,
- Object* object,
- void*) {
- HandleScope scope(isolate);
+MaybeHandle<JSFunction> FindCaller(Isolate* isolate,
+ Handle<JSFunction> function) {
DisallowHeapAllocation no_allocation;
- JSFunction* holder = FindInstanceOf<JSFunction>(isolate, object);
- if (holder == NULL) return isolate->heap()->undefined_value();
- if (holder->shared()->native()) return isolate->heap()->null_value();
- Handle<JSFunction> function(holder, isolate);
-
FrameFunctionIterator it(isolate, no_allocation);
-
+ if (function->shared()->native()) {
+ return MaybeHandle<JSFunction>();
+ }
// Find the function from the frames.
if (!it.Find(*function)) {
// No frame corresponding to the given function found. Return null.
- return isolate->heap()->null_value();
+ return MaybeHandle<JSFunction>();
}
-
// Find previously called non-toplevel function.
JSFunction* caller;
do {
caller = it.next();
- if (caller == NULL) return isolate->heap()->null_value();
+ if (caller == NULL) return MaybeHandle<JSFunction>();
} while (caller->shared()->is_toplevel());
// If caller is a built-in function and caller's caller is also built-in,
@@ -889,24 +1229,68 @@ MaybeObject* Accessors::FunctionGetCaller(Isolate* isolate,
// allows us to make bound functions use the strict function map
// and its associated throwing caller and arguments.
if (caller->shared()->bound()) {
- return isolate->heap()->null_value();
+ return MaybeHandle<JSFunction>();
}
- // Censor if the caller is not a classic mode function.
+ // Censor if the caller is not a sloppy mode function.
// Change from ES5, which used to throw, see:
// https://bugs.ecmascript.org/show_bug.cgi?id=310
- if (!caller->shared()->is_classic_mode()) {
- return isolate->heap()->null_value();
+ if (caller->shared()->strict_mode() == STRICT) {
+ return MaybeHandle<JSFunction>();
+ }
+ // Don't return caller from another security context.
+ if (!AllowAccessToFunction(isolate->context(), caller)) {
+ return MaybeHandle<JSFunction>();
+ }
+ return Handle<JSFunction>(caller);
+}
+
+
+void Accessors::FunctionCallerGetter(
+ v8::Local<v8::String> name,
+ const v8::PropertyCallbackInfo<v8::Value>& info) {
+ i::Isolate* isolate = reinterpret_cast<i::Isolate*>(info.GetIsolate());
+ HandleScope scope(isolate);
+ Handle<Object> object = GetThisFrom(info);
+ MaybeHandle<JSFunction> maybe_function;
+ {
+ DisallowHeapAllocation no_allocation;
+ JSFunction* function = FindInstanceOf<JSFunction>(isolate, *object);
+ if (function != NULL) maybe_function = Handle<JSFunction>(function);
+ }
+ Handle<JSFunction> function;
+ Handle<Object> result;
+ if (maybe_function.ToHandle(&function)) {
+ MaybeHandle<JSFunction> maybe_caller;
+ maybe_caller = FindCaller(isolate, function);
+ Handle<JSFunction> caller;
+ if (maybe_caller.ToHandle(&caller)) {
+ result = caller;
+ } else {
+ result = isolate->factory()->null_value();
+ }
+ } else {
+ result = isolate->factory()->undefined_value();
}
+ info.GetReturnValue().Set(Utils::ToLocal(result));
+}
+
- return caller;
+void Accessors::FunctionCallerSetter(
+ v8::Local<v8::String> name,
+ v8::Local<v8::Value> val,
+ const v8::PropertyCallbackInfo<void>& info) {
+ // Do nothing.
}
-const AccessorDescriptor Accessors::FunctionCaller = {
- FunctionGetCaller,
- ReadOnlySetAccessor,
- 0
-};
+Handle<AccessorInfo> Accessors::FunctionCallerInfo(
+ Isolate* isolate, PropertyAttributes attributes) {
+ return MakeAccessor(isolate,
+ isolate->factory()->caller_string(),
+ &FunctionCallerGetter,
+ &FunctionCallerSetter,
+ attributes);
+}
//
diff --git a/chromium/v8/src/accessors.h b/chromium/v8/src/accessors.h
index 723abd253a2..41993ea1c70 100644
--- a/chromium/v8/src/accessors.h
+++ b/chromium/v8/src/accessors.h
@@ -1,78 +1,64 @@
// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
#ifndef V8_ACCESSORS_H_
#define V8_ACCESSORS_H_
-#include "allocation.h"
-#include "v8globals.h"
+#include "src/allocation.h"
+#include "src/globals.h"
namespace v8 {
namespace internal {
// The list of accessor descriptors. This is a second-order macro
// taking a macro to be applied to all accessor descriptor names.
-#define ACCESSOR_DESCRIPTOR_LIST(V) \
- V(FunctionPrototype) \
- V(FunctionLength) \
- V(FunctionName) \
+#define ACCESSOR_INFO_LIST(V) \
+ V(ArrayLength) \
V(FunctionArguments) \
V(FunctionCaller) \
- V(ArrayLength) \
- V(StringLength) \
- V(ScriptSource) \
- V(ScriptName) \
- V(ScriptId) \
- V(ScriptLineOffset) \
+ V(FunctionName) \
+ V(FunctionLength) \
+ V(FunctionPrototype) \
V(ScriptColumnOffset) \
- V(ScriptData) \
- V(ScriptType) \
V(ScriptCompilationType) \
- V(ScriptLineEnds) \
V(ScriptContextData) \
V(ScriptEvalFromScript) \
V(ScriptEvalFromScriptPosition) \
- V(ScriptEvalFromFunctionName)
+ V(ScriptEvalFromFunctionName) \
+ V(ScriptId) \
+ V(ScriptLineEnds) \
+ V(ScriptLineOffset) \
+ V(ScriptName) \
+ V(ScriptSource) \
+ V(ScriptType) \
+ V(StringLength)
// Accessors contains all predefined proxy accessors.
class Accessors : public AllStatic {
public:
// Accessor descriptors.
-#define ACCESSOR_DESCRIPTOR_DECLARATION(name) \
- static const AccessorDescriptor name;
- ACCESSOR_DESCRIPTOR_LIST(ACCESSOR_DESCRIPTOR_DECLARATION)
-#undef ACCESSOR_DESCRIPTOR_DECLARATION
+#define ACCESSOR_INFO_DECLARATION(name) \
+ static void name##Getter( \
+ v8::Local<v8::String> name, \
+ const v8::PropertyCallbackInfo<v8::Value>& info); \
+ static void name##Setter( \
+ v8::Local<v8::String> name, \
+ v8::Local<v8::Value> value, \
+ const v8::PropertyCallbackInfo<void>& info); \
+ static Handle<AccessorInfo> name##Info( \
+ Isolate* isolate, \
+ PropertyAttributes attributes);
+ ACCESSOR_INFO_LIST(ACCESSOR_INFO_DECLARATION)
+#undef ACCESSOR_INFO_DECLARATION
enum DescriptorId {
-#define ACCESSOR_DESCRIPTOR_DECLARATION(name) \
- k##name,
- ACCESSOR_DESCRIPTOR_LIST(ACCESSOR_DESCRIPTOR_DECLARATION)
-#undef ACCESSOR_DESCRIPTOR_DECLARATION
+#define ACCESSOR_INFO_DECLARATION(name) \
+ k##name##Getter, \
+ k##name##Setter,
+ ACCESSOR_INFO_LIST(ACCESSOR_INFO_DECLARATION)
+#undef ACCESSOR_INFO_DECLARATION
descriptorCount
};
@@ -88,77 +74,26 @@ class Accessors : public AllStatic {
// Returns true for properties that are accessors to object fields.
// If true, *object_offset contains offset of object field.
- static bool IsJSObjectFieldAccessor(
- Handle<Map> map, Handle<String> name,
- int* object_offset);
+ template <class T>
+ static bool IsJSObjectFieldAccessor(typename T::TypeHandle type,
+ Handle<String> name,
+ int* object_offset);
+ static Handle<AccessorInfo> MakeAccessor(
+ Isolate* isolate,
+ Handle<String> name,
+ AccessorGetterCallback getter,
+ AccessorSetterCallback setter,
+ PropertyAttributes attributes);
- private:
- // Accessor functions only used through the descriptor.
- static MaybeObject* FunctionSetPrototype(Isolate* isolate,
- JSObject* object,
- Object*,
- void*);
- static MaybeObject* FunctionGetPrototype(Isolate* isolate,
- Object* object,
- void*);
- static MaybeObject* FunctionGetLength(Isolate* isolate,
- Object* object,
- void*);
- static MaybeObject* FunctionGetName(Isolate* isolate, Object* object, void*);
- static MaybeObject* FunctionGetArguments(Isolate* isolate,
- Object* object,
- void*);
- static MaybeObject* FunctionGetCaller(Isolate* isolate,
- Object* object,
- void*);
- static MaybeObject* ArraySetLength(Isolate* isolate,
- JSObject* object,
- Object*,
- void*);
- static MaybeObject* ArrayGetLength(Isolate* isolate, Object* object, void*);
- static MaybeObject* StringGetLength(Isolate* isolate, Object* object, void*);
- static MaybeObject* ScriptGetName(Isolate* isolate, Object* object, void*);
- static MaybeObject* ScriptGetId(Isolate* isolate, Object* object, void*);
- static MaybeObject* ScriptGetSource(Isolate* isolate, Object* object, void*);
- static MaybeObject* ScriptGetLineOffset(Isolate* isolate,
- Object* object,
- void*);
- static MaybeObject* ScriptGetColumnOffset(Isolate* isolate,
- Object* object,
- void*);
- static MaybeObject* ScriptGetData(Isolate* isolate, Object* object, void*);
- static MaybeObject* ScriptGetType(Isolate* isolate, Object* object, void*);
- static MaybeObject* ScriptGetCompilationType(Isolate* isolate,
- Object* object,
- void*);
- static MaybeObject* ScriptGetLineEnds(Isolate* isolate,
- Object* object,
- void*);
- static MaybeObject* ScriptGetContextData(Isolate* isolate,
- Object* object,
- void*);
- static MaybeObject* ScriptGetEvalFromScript(Isolate* isolate,
- Object* object,
- void*);
- static MaybeObject* ScriptGetEvalFromScriptPosition(Isolate* isolate,
- Object* object,
- void*);
- static MaybeObject* ScriptGetEvalFromFunctionName(Isolate* isolate,
- Object* object,
- void*);
+ static Handle<ExecutableAccessorInfo> CloneAccessor(
+ Isolate* isolate,
+ Handle<ExecutableAccessorInfo> accessor);
+
+ private:
// Helper functions.
static Handle<Object> FlattenNumber(Isolate* isolate, Handle<Object> value);
- static MaybeObject* IllegalSetter(Isolate* isolate,
- JSObject*,
- Object*,
- void*);
- static Object* IllegalGetAccessor(Isolate* isolate, Object* object, void*);
- static MaybeObject* ReadOnlySetAccessor(Isolate* isolate,
- JSObject*,
- Object* value,
- void*);
};
} } // namespace v8::internal
diff --git a/chromium/v8/src/allocation-site-scopes.cc b/chromium/v8/src/allocation-site-scopes.cc
index bbfb39b122c..805ad7bbee8 100644
--- a/chromium/v8/src/allocation-site-scopes.cc
+++ b/chromium/v8/src/allocation-site-scopes.cc
@@ -1,31 +1,8 @@
// Copyright 2013 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
-#include "allocation-site-scopes.h"
+#include "src/allocation-site-scopes.h"
namespace v8 {
namespace internal {
@@ -62,7 +39,7 @@ Handle<AllocationSite> AllocationSiteCreationContext::EnterNewScope() {
void AllocationSiteCreationContext::ExitScope(
Handle<AllocationSite> scope_site,
Handle<JSObject> object) {
- if (!object.is_null() && !object->IsFailure()) {
+ if (!object.is_null()) {
bool top_level = !scope_site.is_null() &&
top().is_identical_to(scope_site);
diff --git a/chromium/v8/src/allocation-site-scopes.h b/chromium/v8/src/allocation-site-scopes.h
index a195b27d85a..7adf0284167 100644
--- a/chromium/v8/src/allocation-site-scopes.h
+++ b/chromium/v8/src/allocation-site-scopes.h
@@ -1,37 +1,14 @@
// Copyright 2013 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
#ifndef V8_ALLOCATION_SITE_SCOPES_H_
#define V8_ALLOCATION_SITE_SCOPES_H_
-#include "ast.h"
-#include "handles.h"
-#include "objects.h"
-#include "zone.h"
+#include "src/ast.h"
+#include "src/handles.h"
+#include "src/objects.h"
+#include "src/zone.h"
namespace v8 {
namespace internal {
@@ -43,7 +20,7 @@ class AllocationSiteContext {
public:
explicit AllocationSiteContext(Isolate* isolate) {
isolate_ = isolate;
- };
+ }
Handle<AllocationSite> top() { return top_; }
Handle<AllocationSite> current() { return current_; }
diff --git a/chromium/v8/src/allocation-tracker.cc b/chromium/v8/src/allocation-tracker.cc
index 8044cef3c81..f6dc5abff78 100644
--- a/chromium/v8/src/allocation-tracker.cc
+++ b/chromium/v8/src/allocation-tracker.cc
@@ -1,44 +1,21 @@
// Copyright 2013 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#include "allocation-tracker.h"
-
-#include "heap-snapshot-generator.h"
-#include "frames-inl.h"
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#include "src/allocation-tracker.h"
+
+#include "src/heap-snapshot-generator.h"
+#include "src/frames-inl.h"
namespace v8 {
namespace internal {
AllocationTraceNode::AllocationTraceNode(
- AllocationTraceTree* tree, SnapshotObjectId shared_function_info_id)
+ AllocationTraceTree* tree, unsigned function_info_index)
: tree_(tree),
- function_id_(shared_function_info_id),
+ function_info_index_(function_info_index),
total_size_(0),
allocation_count_(0),
id_(tree->next_node_id()) {
@@ -46,22 +23,25 @@ AllocationTraceNode::AllocationTraceNode(
AllocationTraceNode::~AllocationTraceNode() {
+ for (int i = 0; i < children_.length(); i++) delete children_[i];
}
-AllocationTraceNode* AllocationTraceNode::FindChild(SnapshotObjectId id) {
+AllocationTraceNode* AllocationTraceNode::FindChild(
+ unsigned function_info_index) {
for (int i = 0; i < children_.length(); i++) {
AllocationTraceNode* node = children_[i];
- if (node->function_id() == id) return node;
+ if (node->function_info_index() == function_info_index) return node;
}
return NULL;
}
-AllocationTraceNode* AllocationTraceNode::FindOrAddChild(SnapshotObjectId id) {
- AllocationTraceNode* child = FindChild(id);
+AllocationTraceNode* AllocationTraceNode::FindOrAddChild(
+ unsigned function_info_index) {
+ AllocationTraceNode* child = FindChild(function_info_index);
if (child == NULL) {
- child = new AllocationTraceNode(tree_, id);
+ child = new AllocationTraceNode(tree_, function_info_index);
children_.Add(child);
}
return child;
@@ -77,17 +57,11 @@ void AllocationTraceNode::AddAllocation(unsigned size) {
void AllocationTraceNode::Print(int indent, AllocationTracker* tracker) {
OS::Print("%10u %10u %*c", total_size_, allocation_count_, indent, ' ');
if (tracker != NULL) {
- const char* name = "<unknown function>";
- if (function_id_ != 0) {
- AllocationTracker::FunctionInfo* info =
- tracker->GetFunctionInfo(function_id_);
- if (info != NULL) {
- name = info->name;
- }
- }
- OS::Print("%s #%u", name, id_);
+ AllocationTracker::FunctionInfo* info =
+ tracker->function_info_list()[function_info_index_];
+ OS::Print("%s #%u", info->name, id_);
} else {
- OS::Print("%u #%u", function_id_, id_);
+ OS::Print("%u #%u", function_info_index_, id_);
}
OS::Print("\n");
indent += 2;
@@ -108,9 +82,9 @@ AllocationTraceTree::~AllocationTraceTree() {
AllocationTraceNode* AllocationTraceTree::AddPathFromEnd(
- const Vector<SnapshotObjectId>& path) {
+ const Vector<unsigned>& path) {
AllocationTraceNode* node = root();
- for (SnapshotObjectId* entry = path.start() + path.length() - 1;
+ for (unsigned* entry = path.start() + path.length() - 1;
entry != path.start() - 1;
--entry) {
node = node->FindOrAddChild(*entry);
@@ -125,6 +99,7 @@ void AllocationTraceTree::Print(AllocationTracker* tracker) {
root()->Print(0, tracker);
}
+
void AllocationTracker::DeleteUnresolvedLocation(
UnresolvedLocation** location) {
delete *location;
@@ -133,6 +108,7 @@ void AllocationTracker::DeleteUnresolvedLocation(
AllocationTracker::FunctionInfo::FunctionInfo()
: name(""),
+ function_id(0),
script_name(""),
script_id(0),
line(-1),
@@ -140,8 +116,80 @@ AllocationTracker::FunctionInfo::FunctionInfo()
}
-static bool AddressesMatch(void* key1, void* key2) {
- return key1 == key2;
+void AddressToTraceMap::AddRange(Address start, int size,
+ unsigned trace_node_id) {
+ Address end = start + size;
+ RemoveRange(start, end);
+
+ RangeStack new_range(start, trace_node_id);
+ ranges_.insert(RangeMap::value_type(end, new_range));
+}
+
+
+unsigned AddressToTraceMap::GetTraceNodeId(Address addr) {
+ RangeMap::const_iterator it = ranges_.upper_bound(addr);
+ if (it == ranges_.end()) return 0;
+ if (it->second.start <= addr) {
+ return it->second.trace_node_id;
+ }
+ return 0;
+}
+
+
+void AddressToTraceMap::MoveObject(Address from, Address to, int size) {
+ unsigned trace_node_id = GetTraceNodeId(from);
+ if (trace_node_id == 0) return;
+ RemoveRange(from, from + size);
+ AddRange(to, size, trace_node_id);
+}
+
+
+void AddressToTraceMap::Clear() {
+ ranges_.clear();
+}
+
+
+void AddressToTraceMap::Print() {
+ PrintF("[AddressToTraceMap (%" V8PRIuPTR "): \n", ranges_.size());
+ for (RangeMap::iterator it = ranges_.begin(); it != ranges_.end(); ++it) {
+ PrintF("[%p - %p] => %u\n", it->second.start, it->first,
+ it->second.trace_node_id);
+ }
+ PrintF("]\n");
+}
+
+
+void AddressToTraceMap::RemoveRange(Address start, Address end) {
+ RangeMap::iterator it = ranges_.upper_bound(start);
+ if (it == ranges_.end()) return;
+
+ RangeStack prev_range(0, 0);
+
+ RangeMap::iterator to_remove_begin = it;
+ if (it->second.start < start) {
+ prev_range = it->second;
+ }
+ do {
+ if (it->first > end) {
+ if (it->second.start < end) {
+ it->second.start = end;
+ }
+ break;
+ }
+ ++it;
+ }
+ while (it != ranges_.end());
+
+ ranges_.erase(to_remove_begin, it);
+
+ if (prev_range.start != 0) {
+ ranges_.insert(RangeMap::value_type(start, prev_range));
+ }
+}
+
+
+void AllocationTracker::DeleteFunctionInfo(FunctionInfo** info) {
+ delete *info;
}
@@ -149,12 +197,17 @@ AllocationTracker::AllocationTracker(
HeapObjectsMap* ids, StringsStorage* names)
: ids_(ids),
names_(names),
- id_to_function_info_(AddressesMatch) {
+ id_to_function_info_index_(HashMap::PointersMatch),
+ info_index_for_other_state_(0) {
+ FunctionInfo* info = new FunctionInfo();
+ info->name = "(root)";
+ function_info_list_.Add(info);
}
AllocationTracker::~AllocationTracker() {
unresolved_locations_.Iterate(DeleteUnresolvedLocation);
+ function_info_list_.Iterate(&DeleteFunctionInfo);
}
@@ -187,13 +240,20 @@ void AllocationTracker::AllocationEvent(Address addr, int size) {
SharedFunctionInfo* shared = frame->function()->shared();
SnapshotObjectId id = ids_->FindOrAddEntry(
shared->address(), shared->Size(), false);
- allocation_trace_buffer_[length++] = id;
- AddFunctionInfo(shared, id);
+ allocation_trace_buffer_[length++] = AddFunctionInfo(shared, id);
it.Advance();
}
+ if (length == 0) {
+ unsigned index = functionInfoIndexForVMState(isolate->current_vm_state());
+ if (index != 0) {
+ allocation_trace_buffer_[length++] = index;
+ }
+ }
AllocationTraceNode* top_node = trace_tree_.AddPathFromEnd(
- Vector<SnapshotObjectId>(allocation_trace_buffer_, length));
+ Vector<unsigned>(allocation_trace_buffer_, length));
top_node->AddAllocation(size);
+
+ address_to_trace_.AddRange(addr, size, top_node->id());
}
@@ -203,24 +263,14 @@ static uint32_t SnapshotObjectIdHash(SnapshotObjectId id) {
}
-AllocationTracker::FunctionInfo* AllocationTracker::GetFunctionInfo(
- SnapshotObjectId id) {
- HashMap::Entry* entry = id_to_function_info_.Lookup(
- reinterpret_cast<void*>(id), SnapshotObjectIdHash(id), false);
- if (entry == NULL) {
- return NULL;
- }
- return reinterpret_cast<FunctionInfo*>(entry->value);
-}
-
-
-void AllocationTracker::AddFunctionInfo(SharedFunctionInfo* shared,
- SnapshotObjectId id) {
- HashMap::Entry* entry = id_to_function_info_.Lookup(
+unsigned AllocationTracker::AddFunctionInfo(SharedFunctionInfo* shared,
+ SnapshotObjectId id) {
+ HashMap::Entry* entry = id_to_function_info_index_.Lookup(
reinterpret_cast<void*>(id), SnapshotObjectIdHash(id), true);
if (entry->value == NULL) {
FunctionInfo* info = new FunctionInfo();
info->name = names_->GetFunctionName(shared->DebugName());
+ info->function_id = id;
if (shared->script()->IsScript()) {
Script* script = Script::cast(shared->script());
if (script->name()->IsName()) {
@@ -235,8 +285,22 @@ void AllocationTracker::AddFunctionInfo(SharedFunctionInfo* shared,
shared->start_position(),
info));
}
- entry->value = info;
+ entry->value = reinterpret_cast<void*>(function_info_list_.length());
+ function_info_list_.Add(info);
+ }
+ return static_cast<unsigned>(reinterpret_cast<intptr_t>((entry->value)));
+}
+
+
+unsigned AllocationTracker::functionInfoIndexForVMState(StateTag state) {
+ if (state != OTHER) return 0;
+ if (info_index_for_other_state_ == 0) {
+ FunctionInfo* info = new FunctionInfo();
+ info->name = "(V8 API)";
+ info_index_for_other_state_ = function_info_list_.length();
+ function_info_list_.Add(info);
}
+ return info_index_for_other_state_;
}
@@ -246,34 +310,33 @@ AllocationTracker::UnresolvedLocation::UnresolvedLocation(
info_(info) {
script_ = Handle<Script>::cast(
script->GetIsolate()->global_handles()->Create(script));
- GlobalHandles::MakeWeak(
- reinterpret_cast<Object**>(script_.location()),
- this, &HandleWeakScript);
+ GlobalHandles::MakeWeak(reinterpret_cast<Object**>(script_.location()),
+ this,
+ &HandleWeakScript);
}
AllocationTracker::UnresolvedLocation::~UnresolvedLocation() {
if (!script_.is_null()) {
- script_->GetIsolate()->global_handles()->Destroy(
- reinterpret_cast<Object**>(script_.location()));
+ GlobalHandles::Destroy(reinterpret_cast<Object**>(script_.location()));
}
}
void AllocationTracker::UnresolvedLocation::Resolve() {
if (script_.is_null()) return;
- info_->line = GetScriptLineNumber(script_, start_position_);
- info_->column = GetScriptColumnNumber(script_, start_position_);
+ HandleScope scope(script_->GetIsolate());
+ info_->line = Script::GetLineNumber(script_, start_position_);
+ info_->column = Script::GetColumnNumber(script_, start_position_);
}
void AllocationTracker::UnresolvedLocation::HandleWeakScript(
- v8::Isolate* isolate,
- v8::Persistent<v8::Value>* obj,
- void* data) {
- UnresolvedLocation* location = reinterpret_cast<UnresolvedLocation*>(data);
- location->script_ = Handle<Script>::null();
- obj->Reset();
+ const v8::WeakCallbackData<v8::Value, void>& data) {
+ UnresolvedLocation* loc =
+ reinterpret_cast<UnresolvedLocation*>(data.GetParameter());
+ GlobalHandles::Destroy(reinterpret_cast<Object**>(loc->script_.location()));
+ loc->script_ = Handle<Script>::null();
}
diff --git a/chromium/v8/src/allocation-tracker.h b/chromium/v8/src/allocation-tracker.h
index 6844716a148..f3788b91a66 100644
--- a/chromium/v8/src/allocation-tracker.h
+++ b/chromium/v8/src/allocation-tracker.h
@@ -1,33 +1,12 @@
// Copyright 2013 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
#ifndef V8_ALLOCATION_TRACKER_H_
#define V8_ALLOCATION_TRACKER_H_
+#include <map>
+
namespace v8 {
namespace internal {
@@ -38,13 +17,13 @@ class AllocationTraceTree;
class AllocationTraceNode {
public:
AllocationTraceNode(AllocationTraceTree* tree,
- SnapshotObjectId shared_function_info_id);
+ unsigned function_info_index);
~AllocationTraceNode();
- AllocationTraceNode* FindChild(SnapshotObjectId shared_function_info_id);
- AllocationTraceNode* FindOrAddChild(SnapshotObjectId shared_function_info_id);
+ AllocationTraceNode* FindChild(unsigned function_info_index);
+ AllocationTraceNode* FindOrAddChild(unsigned function_info_index);
void AddAllocation(unsigned size);
- SnapshotObjectId function_id() const { return function_id_; }
+ unsigned function_info_index() const { return function_info_index_; }
unsigned allocation_size() const { return total_size_; }
unsigned allocation_count() const { return allocation_count_; }
unsigned id() const { return id_; }
@@ -54,7 +33,7 @@ class AllocationTraceNode {
private:
AllocationTraceTree* tree_;
- SnapshotObjectId function_id_;
+ unsigned function_info_index_;
unsigned total_size_;
unsigned allocation_count_;
unsigned id_;
@@ -68,7 +47,7 @@ class AllocationTraceTree {
public:
AllocationTraceTree();
~AllocationTraceTree();
- AllocationTraceNode* AddPathFromEnd(const Vector<SnapshotObjectId>& path);
+ AllocationTraceNode* AddPathFromEnd(const Vector<unsigned>& path);
AllocationTraceNode* root() { return &root_; }
unsigned next_node_id() { return next_node_id_++; }
void Print(AllocationTracker* tracker);
@@ -81,11 +60,36 @@ class AllocationTraceTree {
};
+class AddressToTraceMap {
+ public:
+ void AddRange(Address addr, int size, unsigned node_id);
+ unsigned GetTraceNodeId(Address addr);
+ void MoveObject(Address from, Address to, int size);
+ void Clear();
+ size_t size() { return ranges_.size(); }
+ void Print();
+
+ private:
+ struct RangeStack {
+ RangeStack(Address start, unsigned node_id)
+ : start(start), trace_node_id(node_id) {}
+ Address start;
+ unsigned trace_node_id;
+ };
+ // [start, end) -> trace
+ typedef std::map<Address, RangeStack> RangeMap;
+
+ void RemoveRange(Address start, Address end);
+
+ RangeMap ranges_;
+};
+
class AllocationTracker {
public:
struct FunctionInfo {
FunctionInfo();
const char* name;
+ SnapshotObjectId function_id;
const char* script_name;
int script_id;
int line;
@@ -99,11 +103,15 @@ class AllocationTracker {
void AllocationEvent(Address addr, int size);
AllocationTraceTree* trace_tree() { return &trace_tree_; }
- HashMap* id_to_function_info() { return &id_to_function_info_; }
- FunctionInfo* GetFunctionInfo(SnapshotObjectId id);
+ const List<FunctionInfo*>& function_info_list() const {
+ return function_info_list_;
+ }
+ AddressToTraceMap* address_to_trace() { return &address_to_trace_; }
private:
- void AddFunctionInfo(SharedFunctionInfo* info, SnapshotObjectId id);
+ unsigned AddFunctionInfo(SharedFunctionInfo* info, SnapshotObjectId id);
+ static void DeleteFunctionInfo(FunctionInfo** info);
+ unsigned functionInfoIndexForVMState(StateTag state);
class UnresolvedLocation {
public:
@@ -112,9 +120,9 @@ class AllocationTracker {
void Resolve();
private:
- static void HandleWeakScript(v8::Isolate* isolate,
- v8::Persistent<v8::Value>* obj,
- void* data);
+ static void HandleWeakScript(
+ const v8::WeakCallbackData<v8::Value, void>& data);
+
Handle<Script> script_;
int start_position_;
FunctionInfo* info_;
@@ -125,9 +133,12 @@ class AllocationTracker {
HeapObjectsMap* ids_;
StringsStorage* names_;
AllocationTraceTree trace_tree_;
- SnapshotObjectId allocation_trace_buffer_[kMaxAllocationTraceLength];
- HashMap id_to_function_info_;
+ unsigned allocation_trace_buffer_[kMaxAllocationTraceLength];
+ List<FunctionInfo*> function_info_list_;
+ HashMap id_to_function_info_index_;
List<UnresolvedLocation*> unresolved_locations_;
+ unsigned info_index_for_other_state_;
+ AddressToTraceMap address_to_trace_;
DISALLOW_COPY_AND_ASSIGN(AllocationTracker);
};
diff --git a/chromium/v8/src/allocation.cc b/chromium/v8/src/allocation.cc
index 69edf6906cf..98c9be22e02 100644
--- a/chromium/v8/src/allocation.cc
+++ b/chromium/v8/src/allocation.cc
@@ -1,36 +1,17 @@
// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "allocation.h"
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/allocation.h"
#include <stdlib.h> // For free, malloc.
-#include "checks.h"
-#include "platform.h"
-#include "utils.h"
+#include "src/checks.h"
+#include "src/platform.h"
+#include "src/utils.h"
+
+#if V8_LIBC_BIONIC
+#include <malloc.h> // NOLINT
+#endif
namespace v8 {
namespace internal {
@@ -85,7 +66,7 @@ void AllStatic::operator delete(void* p) {
char* StrDup(const char* str) {
int length = StrLength(str);
char* result = NewArray<char>(length + 1);
- OS::MemCopy(result, str, length);
+ MemCopy(result, str, length);
result[length] = '\0';
return result;
}
@@ -95,9 +76,38 @@ char* StrNDup(const char* str, int n) {
int length = StrLength(str);
if (n < length) length = n;
char* result = NewArray<char>(length + 1);
- OS::MemCopy(result, str, length);
+ MemCopy(result, str, length);
result[length] = '\0';
return result;
}
+
+void* AlignedAlloc(size_t size, size_t alignment) {
+ ASSERT(IsPowerOf2(alignment) && alignment >= V8_ALIGNOF(void*)); // NOLINT
+ void* ptr;
+#if V8_OS_WIN
+ ptr = _aligned_malloc(size, alignment);
+#elif V8_LIBC_BIONIC
+ // posix_memalign is not exposed in some Android versions, so we fall back to
+ // memalign. See http://code.google.com/p/android/issues/detail?id=35391.
+ ptr = memalign(alignment, size);
+#else
+ if (posix_memalign(&ptr, alignment, size)) ptr = NULL;
+#endif
+ if (ptr == NULL) FatalProcessOutOfMemory("AlignedAlloc");
+ return ptr;
+}
+
+
+void AlignedFree(void *ptr) {
+#if V8_OS_WIN
+ _aligned_free(ptr);
+#elif V8_LIBC_BIONIC
+ // Using free is not correct in general, but for V8_LIBC_BIONIC it is.
+ free(ptr);
+#else
+ free(ptr);
+#endif
+}
+
} } // namespace v8::internal
diff --git a/chromium/v8/src/allocation.h b/chromium/v8/src/allocation.h
index 03cc8f5e73f..2fea7b2826e 100644
--- a/chromium/v8/src/allocation.h
+++ b/chromium/v8/src/allocation.h
@@ -1,34 +1,11 @@
// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
#ifndef V8_ALLOCATION_H_
#define V8_ALLOCATION_H_
-#include "globals.h"
+#include "src/globals.h"
namespace v8 {
namespace internal {
@@ -109,6 +86,9 @@ class FreeStoreAllocationPolicy {
};
+void* AlignedAlloc(size_t size, size_t alignment);
+void AlignedFree(void *ptr);
+
} } // namespace v8::internal
#endif // V8_ALLOCATION_H_
diff --git a/chromium/v8/src/api.cc b/chromium/v8/src/api.cc
index d7c76d5a13d..bf54d0b259b 100644
--- a/chromium/v8/src/api.cc
+++ b/chromium/v8/src/api.cc
@@ -1,71 +1,52 @@
// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "api.h"
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/api.h"
#include <string.h> // For memcpy, strlen.
+#ifdef V8_USE_ADDRESS_SANITIZER
+#include <sanitizer/asan_interface.h>
+#endif // V8_USE_ADDRESS_SANITIZER
#include <cmath> // For isnan.
-#include "../include/v8-debug.h"
-#include "../include/v8-profiler.h"
-#include "../include/v8-testing.h"
-#include "assert-scope.h"
-#include "bootstrapper.h"
-#include "code-stubs.h"
-#include "compiler.h"
-#include "conversions-inl.h"
-#include "counters.h"
-#include "cpu-profiler.h"
-#include "debug.h"
-#include "deoptimizer.h"
-#include "execution.h"
-#include "global-handles.h"
-#include "heap-profiler.h"
-#include "heap-snapshot-generator-inl.h"
-#include "icu_util.h"
-#include "json-parser.h"
-#include "messages.h"
+#include "include/v8-debug.h"
+#include "include/v8-profiler.h"
+#include "include/v8-testing.h"
+#include "src/assert-scope.h"
+#include "src/bootstrapper.h"
+#include "src/code-stubs.h"
+#include "src/compiler.h"
+#include "src/conversions-inl.h"
+#include "src/counters.h"
+#include "src/cpu-profiler.h"
+#include "src/debug.h"
+#include "src/deoptimizer.h"
+#include "src/execution.h"
+#include "src/global-handles.h"
+#include "src/heap-profiler.h"
+#include "src/heap-snapshot-generator-inl.h"
+#include "src/icu_util.h"
+#include "src/json-parser.h"
+#include "src/messages.h"
#ifdef COMPRESS_STARTUP_DATA_BZ2
-#include "natives.h"
+#include "src/natives.h"
#endif
-#include "parser.h"
-#include "platform.h"
-#include "platform/time.h"
-#include "profile-generator-inl.h"
-#include "property-details.h"
-#include "property.h"
-#include "runtime.h"
-#include "runtime-profiler.h"
-#include "scanner-character-streams.h"
-#include "snapshot.h"
-#include "unicode-inl.h"
-#include "utils/random-number-generator.h"
-#include "v8threads.h"
-#include "version.h"
-#include "vm-state-inl.h"
+#include "src/parser.h"
+#include "src/platform.h"
+#include "src/platform/time.h"
+#include "src/profile-generator-inl.h"
+#include "src/property-details.h"
+#include "src/property.h"
+#include "src/runtime.h"
+#include "src/runtime-profiler.h"
+#include "src/scanner-character-streams.h"
+#include "src/simulator.h"
+#include "src/snapshot.h"
+#include "src/unicode-inl.h"
+#include "src/utils/random-number-generator.h"
+#include "src/v8threads.h"
+#include "src/version.h"
+#include "src/vm-state-inl.h"
#define LOG_API(isolate, expr) LOG(isolate, ApiEntryCall(expr))
@@ -95,11 +76,6 @@ namespace v8 {
(isolate)->handle_scope_implementer(); \
handle_scope_implementer->DecrementCallDepth(); \
if (has_pending_exception) { \
- if (handle_scope_implementer->CallDepthIsZero() && \
- (isolate)->is_out_of_memory()) { \
- if (!(isolate)->ignore_out_of_memory()) \
- i::V8::FatalProcessOutOfMemory(NULL); \
- } \
bool call_depth_is_zero = handle_scope_implementer->CallDepthIsZero(); \
(isolate)->OptionalRescheduleException(call_depth_is_zero); \
do_callback \
@@ -111,47 +87,16 @@ namespace v8 {
#define EXCEPTION_BAILOUT_CHECK_DO_CALLBACK(isolate, value) \
EXCEPTION_BAILOUT_CHECK_GENERIC( \
- isolate, value, i::V8::FireCallCompletedCallback(isolate);)
+ isolate, value, isolate->FireCallCompletedCallback();)
#define EXCEPTION_BAILOUT_CHECK(isolate, value) \
EXCEPTION_BAILOUT_CHECK_GENERIC(isolate, value, ;)
-#define API_ENTRY_CHECK(isolate, msg) \
- do { \
- if (v8::Locker::IsActive()) { \
- ApiCheck(isolate->thread_manager()->IsLockedByCurrentThread(), \
- msg, \
- "Entering the V8 API without proper locking in place"); \
- } \
- } while (false)
-
-
// --- E x c e p t i o n B e h a v i o r ---
-static void DefaultFatalErrorHandler(const char* location,
- const char* message) {
- i::Isolate* isolate = i::Isolate::Current();
- if (isolate->IsInitialized()) {
- i::VMState<i::OTHER> state(isolate);
- API_Fatal(location, message);
- } else {
- API_Fatal(location, message);
- }
-}
-
-
-static FatalErrorCallback GetFatalErrorHandler() {
- i::Isolate* isolate = i::Isolate::Current();
- if (isolate->exception_behavior() == NULL) {
- isolate->set_exception_behavior(DefaultFatalErrorHandler);
- }
- return isolate->exception_behavior();
-}
-
-
void i::FatalProcessOutOfMemory(const char* location) {
i::V8::FatalProcessOutOfMemory(location, false);
}
@@ -221,21 +166,23 @@ void i::V8::FatalProcessOutOfMemory(const char* location, bool take_snapshot) {
// HeapIterator here without doing a special GC.
isolate->heap()->RecordStats(&heap_stats, false);
}
- isolate->SignalFatalError();
- FatalErrorCallback callback = GetFatalErrorHandler();
- const char* message = "Allocation failed - process out of memory";
- callback(location, message);
- // If the callback returns, we stop execution.
+ Utils::ApiCheck(false, location, "Allocation failed - process out of memory");
+ // If the fatal error handler returns, we stop execution.
FATAL("API fatal error handler returned after process out of memory");
}
-bool Utils::ReportApiFailure(const char* location, const char* message) {
- FatalErrorCallback callback = GetFatalErrorHandler();
- callback(location, message);
+void Utils::ReportApiFailure(const char* location, const char* message) {
i::Isolate* isolate = i::Isolate::Current();
+ FatalErrorCallback callback = isolate->exception_behavior();
+ if (callback == NULL) {
+ i::OS::PrintError("\n#\n# Fatal error in %s\n# %s\n#\n\n",
+ location, message);
+ i::OS::Abort();
+ } else {
+ callback(location, message);
+ }
isolate->SignalFatalError();
- return false;
}
@@ -245,20 +192,6 @@ bool V8::IsDead() {
}
-static inline bool ApiCheck(bool condition,
- const char* location,
- const char* message) {
- return condition ? true : Utils::ReportApiFailure(location, message);
-}
-
-
-static bool ReportEmptyHandle(const char* location) {
- FatalErrorCallback callback = GetFatalErrorHandler();
- callback(location, "Reading from empty handle");
- return true;
-}
-
-
static inline bool IsExecutionTerminatingCheck(i::Isolate* isolate) {
if (!isolate->IsInitialized()) return false;
if (isolate->has_scheduled_exception()) {
@@ -269,16 +202,6 @@ static inline bool IsExecutionTerminatingCheck(i::Isolate* isolate) {
}
-static inline bool EmptyCheck(const char* location, v8::Handle<v8::Data> obj) {
- return obj.IsEmpty() ? ReportEmptyHandle(location) : false;
-}
-
-
-static inline bool EmptyCheck(const char* location, const v8::Data* obj) {
- return (obj == 0) ? ReportEmptyHandle(location) : false;
-}
-
-
// --- S t a t i c s ---
@@ -295,29 +218,10 @@ static bool InitializeHelper(i::Isolate* isolate) {
static inline bool EnsureInitializedForIsolate(i::Isolate* isolate,
const char* location) {
- if (isolate != NULL) {
- if (isolate->IsInitialized()) return true;
- }
- ASSERT(isolate == i::Isolate::Current());
- return ApiCheck(InitializeHelper(isolate), location, "Error initializing V8");
-}
-
-
-// Some initializing API functions are called early and may be
-// called on a thread different from static initializer thread.
-// If Isolate API is used, Isolate::Enter() will initialize TLS so
-// Isolate::Current() works. If it's a legacy case, then the thread
-// may not have TLS initialized yet. However, in initializing APIs it
-// may be too early to call EnsureInitialized() - some pre-init
-// parameters still have to be configured.
-static inline i::Isolate* EnterIsolateIfNeeded() {
- i::Isolate* isolate = i::Isolate::UncheckedCurrent();
- if (isolate != NULL)
- return isolate;
-
- i::Isolate::EnterDefaultIsolate();
- isolate = i::Isolate::Current();
- return isolate;
+ return (isolate != NULL && isolate->IsInitialized()) ||
+ Utils::ApiCheck(InitializeHelper(isolate),
+ location,
+ "Error initializing V8");
}
@@ -450,14 +354,14 @@ void V8::SetDecompressedStartupData(StartupData* decompressed_data) {
void V8::SetFatalErrorHandler(FatalErrorCallback that) {
- i::Isolate* isolate = EnterIsolateIfNeeded();
+ i::Isolate* isolate = i::Isolate::UncheckedCurrent();
isolate->set_exception_behavior(that);
}
void V8::SetAllowCodeGenerationFromStringsCallback(
AllowCodeGenerationFromStringsCallback callback) {
- i::Isolate* isolate = EnterIsolateIfNeeded();
+ i::Isolate* isolate = i::Isolate::UncheckedCurrent();
isolate->set_allow_code_gen_callback(callback);
}
@@ -472,11 +376,6 @@ void V8::SetFlagsFromCommandLine(int* argc, char** argv, bool remove_flags) {
}
-v8::Handle<Value> ThrowException(v8::Handle<v8::Value> value) {
- return v8::Isolate::GetCurrent()->ThrowException(value);
-}
-
-
RegisteredExtension* RegisteredExtension::first_extension_ = NULL;
@@ -523,52 +422,17 @@ Extension::Extension(const char* name,
}
-v8::Handle<Primitive> Undefined() {
- i::Isolate* isolate = i::Isolate::Current();
- if (!EnsureInitializedForIsolate(isolate, "v8::Undefined()")) {
- return v8::Handle<v8::Primitive>();
- }
- return ToApiHandle<Primitive>(isolate->factory()->undefined_value());
-}
-
-
-v8::Handle<Primitive> Null() {
- i::Isolate* isolate = i::Isolate::Current();
- if (!EnsureInitializedForIsolate(isolate, "v8::Null()")) {
- return v8::Handle<v8::Primitive>();
- }
- return ToApiHandle<Primitive>(isolate->factory()->null_value());
-}
-
-
-v8::Handle<Boolean> True() {
- i::Isolate* isolate = i::Isolate::Current();
- if (!EnsureInitializedForIsolate(isolate, "v8::True()")) {
- return v8::Handle<Boolean>();
- }
- return ToApiHandle<Boolean>(isolate->factory()->true_value());
-}
-
-
-v8::Handle<Boolean> False() {
- i::Isolate* isolate = i::Isolate::Current();
- if (!EnsureInitializedForIsolate(isolate, "v8::False()")) {
- return v8::Handle<Boolean>();
- }
- return ToApiHandle<Boolean>(isolate->factory()->false_value());
-}
-
-
ResourceConstraints::ResourceConstraints()
- : max_young_space_size_(0),
- max_old_space_size_(0),
- max_executable_size_(0),
- stack_limit_(NULL),
- max_available_threads_(0) { }
+ : max_semi_space_size_(0),
+ max_old_space_size_(0),
+ max_executable_size_(0),
+ stack_limit_(NULL),
+ max_available_threads_(0),
+ code_range_size_(0) { }
void ResourceConstraints::ConfigureDefaults(uint64_t physical_memory,
+ uint64_t virtual_memory_limit,
uint32_t number_of_processors) {
- const int lump_of_memory = (i::kPointerSize / 4) * i::MB;
#if V8_OS_ANDROID
// Android has higher physical memory requirements before raising the maximum
// heap size limits since it has no swap space.
@@ -581,47 +445,51 @@ void ResourceConstraints::ConfigureDefaults(uint64_t physical_memory,
const uint64_t high_limit = 1ul * i::GB;
#endif
- // The young_space_size should be a power of 2 and old_generation_size should
- // be a multiple of Page::kPageSize.
if (physical_memory <= low_limit) {
- set_max_young_space_size(2 * lump_of_memory);
- set_max_old_space_size(128 * lump_of_memory);
- set_max_executable_size(96 * lump_of_memory);
+ set_max_semi_space_size(i::Heap::kMaxSemiSpaceSizeLowMemoryDevice);
+ set_max_old_space_size(i::Heap::kMaxOldSpaceSizeLowMemoryDevice);
+ set_max_executable_size(i::Heap::kMaxExecutableSizeLowMemoryDevice);
} else if (physical_memory <= medium_limit) {
- set_max_young_space_size(8 * lump_of_memory);
- set_max_old_space_size(256 * lump_of_memory);
- set_max_executable_size(192 * lump_of_memory);
+ set_max_semi_space_size(i::Heap::kMaxSemiSpaceSizeMediumMemoryDevice);
+ set_max_old_space_size(i::Heap::kMaxOldSpaceSizeMediumMemoryDevice);
+ set_max_executable_size(i::Heap::kMaxExecutableSizeMediumMemoryDevice);
} else if (physical_memory <= high_limit) {
- set_max_young_space_size(16 * lump_of_memory);
- set_max_old_space_size(512 * lump_of_memory);
- set_max_executable_size(256 * lump_of_memory);
+ set_max_semi_space_size(i::Heap::kMaxSemiSpaceSizeHighMemoryDevice);
+ set_max_old_space_size(i::Heap::kMaxOldSpaceSizeHighMemoryDevice);
+ set_max_executable_size(i::Heap::kMaxExecutableSizeHighMemoryDevice);
} else {
- set_max_young_space_size(16 * lump_of_memory);
- set_max_old_space_size(700 * lump_of_memory);
- set_max_executable_size(256 * lump_of_memory);
+ set_max_semi_space_size(i::Heap::kMaxSemiSpaceSizeHugeMemoryDevice);
+ set_max_old_space_size(i::Heap::kMaxOldSpaceSizeHugeMemoryDevice);
+ set_max_executable_size(i::Heap::kMaxExecutableSizeHugeMemoryDevice);
}
set_max_available_threads(i::Max(i::Min(number_of_processors, 4u), 1u));
-}
-
-void ResourceConstraints::ConfigureDefaults(uint64_t physical_memory) {
- ConfigureDefaults(physical_memory, i::CPU::NumberOfProcessorsOnline());
+ if (virtual_memory_limit > 0 && i::kRequiresCodeRange) {
+ // Reserve no more than 1/8 of the memory for the code range, but at most
+ // kMaximalCodeRangeSize.
+ set_code_range_size(
+ i::Min(i::kMaximalCodeRangeSize / i::MB,
+ static_cast<size_t>((virtual_memory_limit >> 3) / i::MB)));
+ }
}
bool SetResourceConstraints(Isolate* v8_isolate,
ResourceConstraints* constraints) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(v8_isolate);
- int young_space_size = constraints->max_young_space_size();
- int old_gen_size = constraints->max_old_space_size();
+ int semi_space_size = constraints->max_semi_space_size();
+ int old_space_size = constraints->max_old_space_size();
int max_executable_size = constraints->max_executable_size();
- if (young_space_size != 0 || old_gen_size != 0 || max_executable_size != 0) {
+ size_t code_range_size = constraints->code_range_size();
+ if (semi_space_size != 0 || old_space_size != 0 ||
+ max_executable_size != 0 || code_range_size != 0) {
// After initialization it's too late to change Heap constraints.
ASSERT(!isolate->IsInitialized());
- bool result = isolate->heap()->ConfigureHeap(young_space_size / 2,
- old_gen_size,
- max_executable_size);
+ bool result = isolate->heap()->ConfigureHeap(semi_space_size,
+ old_space_size,
+ max_executable_size,
+ code_range_size);
if (!result) return false;
}
if (constraints->stack_limit() != NULL) {
@@ -638,7 +506,7 @@ i::Object** V8::GlobalizeReference(i::Isolate* isolate, i::Object** obj) {
LOG_API(isolate, "Persistent::New");
i::Handle<i::Object> result = isolate->global_handles()->Create(*obj);
#ifdef DEBUG
- (*obj)->Verify();
+ (*obj)->ObjectVerify();
#endif // DEBUG
return result.location();
}
@@ -647,7 +515,7 @@ i::Object** V8::GlobalizeReference(i::Isolate* isolate, i::Object** obj) {
i::Object** V8::CopyPersistent(i::Object** obj) {
i::Handle<i::Object> result = i::GlobalHandles::CopyGlobal(obj);
#ifdef DEBUG
- (*obj)->Verify();
+ (*obj)->ObjectVerify();
#endif // DEBUG
return result.location();
}
@@ -655,17 +523,13 @@ i::Object** V8::CopyPersistent(i::Object** obj) {
void V8::MakeWeak(i::Object** object,
void* parameters,
- WeakCallback weak_callback,
- RevivableCallback weak_reference_callback) {
- i::GlobalHandles::MakeWeak(object,
- parameters,
- weak_callback,
- weak_reference_callback);
+ WeakCallback weak_callback) {
+ i::GlobalHandles::MakeWeak(object, parameters, weak_callback);
}
-void V8::ClearWeak(i::Object** obj) {
- i::GlobalHandles::ClearWeakness(obj);
+void* V8::ClearWeak(i::Object** obj) {
+ return i::GlobalHandles::ClearWeakness(obj);
}
@@ -697,35 +561,29 @@ HandleScope::HandleScope(Isolate* isolate) {
void HandleScope::Initialize(Isolate* isolate) {
i::Isolate* internal_isolate = reinterpret_cast<i::Isolate*>(isolate);
- API_ENTRY_CHECK(internal_isolate, "HandleScope::HandleScope");
- v8::ImplementationUtilities::HandleScopeData* current =
- internal_isolate->handle_scope_data();
+ // We do not want to check the correct usage of the Locker class all over the
+ // place, so we do it only here: Without a HandleScope, an embedder can do
+ // almost nothing, so it is enough to check in this central place.
+ Utils::ApiCheck(!v8::Locker::IsActive() ||
+ internal_isolate->thread_manager()->IsLockedByCurrentThread(),
+ "HandleScope::HandleScope",
+ "Entering the V8 API without proper locking in place");
+ i::HandleScopeData* current = internal_isolate->handle_scope_data();
isolate_ = internal_isolate;
prev_next_ = current->next;
prev_limit_ = current->limit;
- is_closed_ = false;
current->level++;
}
HandleScope::~HandleScope() {
- if (!is_closed_) {
- Leave();
- }
-}
-
-
-void HandleScope::Leave() {
- return i::HandleScope::CloseScope(isolate_, prev_next_, prev_limit_);
+ i::HandleScope::CloseScope(isolate_, prev_next_, prev_limit_);
}
-int HandleScope::NumberOfHandles() {
- i::Isolate* isolate = i::Isolate::Current();
- if (!EnsureInitializedForIsolate(isolate, "HandleScope::NumberOfHandles")) {
- return 0;
- }
- return i::HandleScope::NumberOfHandles(isolate);
+int HandleScope::NumberOfHandles(Isolate* isolate) {
+ return i::HandleScope::NumberOfHandles(
+ reinterpret_cast<i::Isolate*>(isolate));
}
@@ -749,11 +607,12 @@ EscapableHandleScope::EscapableHandleScope(Isolate* v8_isolate) {
i::Object** EscapableHandleScope::Escape(i::Object** escape_value) {
- ApiCheck(*escape_slot_ == isolate_->heap()->the_hole_value(),
- "EscapeableHandleScope::Escape",
- "Escape value set twice");
+ i::Heap* heap = reinterpret_cast<i::Isolate*>(GetIsolate())->heap();
+ Utils::ApiCheck(*escape_slot_ == heap->the_hole_value(),
+ "EscapeableHandleScope::Escape",
+ "Escape value set twice");
if (escape_value == NULL) {
- *escape_slot_ = isolate_->heap()->undefined_value();
+ *escape_slot_ = heap->undefined_value();
return NULL;
}
*escape_slot_ = *escape_value;
@@ -765,38 +624,37 @@ void Context::Enter() {
i::Handle<i::Context> env = Utils::OpenHandle(this);
i::Isolate* isolate = env->GetIsolate();
ENTER_V8(isolate);
- isolate->handle_scope_implementer()->EnterContext(env);
- isolate->handle_scope_implementer()->SaveContext(isolate->context());
+ i::HandleScopeImplementer* impl = isolate->handle_scope_implementer();
+ impl->EnterContext(env);
+ impl->SaveContext(isolate->context());
isolate->set_context(*env);
}
void Context::Exit() {
- // TODO(dcarney): fix this once chrome is fixed.
- i::Isolate* isolate = i::Isolate::Current();
- i::Handle<i::Context> context = i::Handle<i::Context>::null();
+ i::Handle<i::Context> env = Utils::OpenHandle(this);
+ i::Isolate* isolate = env->GetIsolate();
ENTER_V8(isolate);
- if (!ApiCheck(isolate->handle_scope_implementer()->LeaveContext(context),
- "v8::Context::Exit()",
- "Cannot exit non-entered context")) {
+ i::HandleScopeImplementer* impl = isolate->handle_scope_implementer();
+ if (!Utils::ApiCheck(impl->LastEnteredContextWas(env),
+ "v8::Context::Exit()",
+ "Cannot exit non-entered context")) {
return;
}
- // Content of 'last_context' could be NULL.
- i::Context* last_context =
- isolate->handle_scope_implementer()->RestoreContext();
- isolate->set_context(last_context);
+ impl->LeaveContext();
+ isolate->set_context(impl->RestoreContext());
}
static void* DecodeSmiToAligned(i::Object* value, const char* location) {
- ApiCheck(value->IsSmi(), location, "Not a Smi");
+ Utils::ApiCheck(value->IsSmi(), location, "Not a Smi");
return reinterpret_cast<void*>(value);
}
static i::Smi* EncodeAlignedAsSmi(void* value, const char* location) {
i::Smi* smi = reinterpret_cast<i::Smi*>(value);
- ApiCheck(smi->IsSmi(), location, "Pointer is not aligned");
+ Utils::ApiCheck(smi->IsSmi(), location, "Pointer is not aligned");
return smi;
}
@@ -807,17 +665,18 @@ static i::Handle<i::FixedArray> EmbedderDataFor(Context* context,
const char* location) {
i::Handle<i::Context> env = Utils::OpenHandle(context);
bool ok =
- ApiCheck(env->IsNativeContext(), location, "Not a native context") &&
- ApiCheck(index >= 0, location, "Negative index");
+ Utils::ApiCheck(env->IsNativeContext(),
+ location,
+ "Not a native context") &&
+ Utils::ApiCheck(index >= 0, location, "Negative index");
if (!ok) return i::Handle<i::FixedArray>();
i::Handle<i::FixedArray> data(env->embedder_data());
if (index < data->length()) return data;
- if (!can_grow) {
- Utils::ReportApiFailure(location, "Index too large");
+ if (!Utils::ApiCheck(can_grow, location, "Index too large")) {
return i::Handle<i::FixedArray>();
}
int new_size = i::Max(index, data->length() << 1) + 1;
- data = env->GetIsolate()->factory()->CopySizeFixedArray(data, new_size);
+ data = i::FixedArray::CopySize(data, new_size);
env->set_embedder_data(*data);
return data;
}
@@ -859,32 +718,6 @@ void Context::SetAlignedPointerInEmbedderData(int index, void* value) {
}
-i::Object** v8::HandleScope::RawClose(i::Object** value) {
- if (!ApiCheck(!is_closed_,
- "v8::HandleScope::Close()",
- "Local scope has already been closed")) {
- return 0;
- }
- LOG_API(isolate_, "CloseHandleScope");
-
- // Read the result before popping the handle block.
- i::Object* result = NULL;
- if (value != NULL) {
- result = *value;
- }
- is_closed_ = true;
- Leave();
-
- if (value == NULL) {
- return NULL;
- }
-
- // Allocate a new handle on the previous handle block.
- i::Handle<i::Object> handle(result, isolate_);
- return handle.location();
-}
-
-
// --- N e a n d e r ---
@@ -893,8 +726,7 @@ i::Object** v8::HandleScope::RawClose(i::Object** value) {
// objects. To remind you about this there is no HandleScope in the
// NeanderObject constructor. When you add one to the site calling the
// constructor you should check that you ensured the VM was not dead first.
-NeanderObject::NeanderObject(int size) {
- i::Isolate* isolate = i::Isolate::Current();
+NeanderObject::NeanderObject(v8::internal::Isolate* isolate, int size) {
EnsureInitializedForIsolate(isolate, "v8::Nowhere");
ENTER_V8(isolate);
value_ = isolate->factory()->NewNeanderObject();
@@ -908,7 +740,7 @@ int NeanderObject::size() {
}
-NeanderArray::NeanderArray() : obj_(2) {
+NeanderArray::NeanderArray(v8::internal::Isolate* isolate) : obj_(isolate, 2) {
obj_.set(0, i::Smi::FromInt(0));
}
@@ -965,11 +797,11 @@ static void TemplateSet(i::Isolate* isolate,
v8::Handle<v8::Data>* data) {
i::Handle<i::Object> list(Utils::OpenHandle(templ)->property_list(), isolate);
if (list->IsUndefined()) {
- list = NeanderArray().value();
+ list = NeanderArray(isolate).value();
Utils::OpenHandle(templ)->set_property_list(*list);
}
NeanderArray array(list);
- array.add(Utils::OpenHandle(*v8::Integer::New(length)));
+ array.add(isolate->factory()->NewNumberFromInt(length));
for (int i = 0; i < length; i++) {
i::Handle<i::Object> value = data[i].IsEmpty() ?
i::Handle<i::Object>(isolate->factory()->undefined_value()) :
@@ -986,10 +818,11 @@ void Template::Set(v8::Handle<String> name,
ENTER_V8(isolate);
i::HandleScope scope(isolate);
const int kSize = 3;
+ v8::Isolate* v8_isolate = reinterpret_cast<v8::Isolate*>(isolate);
v8::Handle<v8::Data> data[kSize] = {
- name,
- value,
- v8::Integer::New(attribute)};
+ name,
+ value,
+ v8::Integer::New(v8_isolate, attribute)};
TemplateSet(isolate, this, kSize, data);
}
@@ -1006,31 +839,33 @@ void Template::SetAccessorProperty(
ASSERT(!getter.IsEmpty() || !setter.IsEmpty());
i::HandleScope scope(isolate);
const int kSize = 5;
+ v8::Isolate* v8_isolate = reinterpret_cast<v8::Isolate*>(isolate);
v8::Handle<v8::Data> data[kSize] = {
- name,
- getter,
- setter,
- v8::Integer::New(attribute),
- v8::Integer::New(access_control)};
+ name,
+ getter,
+ setter,
+ v8::Integer::New(v8_isolate, attribute),
+ v8::Integer::New(v8_isolate, access_control)};
TemplateSet(isolate, this, kSize, data);
}
// --- F u n c t i o n T e m p l a t e ---
static void InitializeFunctionTemplate(
- i::Handle<i::FunctionTemplateInfo> info) {
+ i::Handle<i::FunctionTemplateInfo> info) {
info->set_tag(i::Smi::FromInt(Consts::FUNCTION_TEMPLATE));
info->set_flag(0);
}
Local<ObjectTemplate> FunctionTemplate::PrototypeTemplate() {
- i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- ENTER_V8(isolate);
+ i::Isolate* i_isolate = Utils::OpenHandle(this)->GetIsolate();
+ ENTER_V8(i_isolate);
i::Handle<i::Object> result(Utils::OpenHandle(this)->prototype_template(),
- isolate);
+ i_isolate);
if (result->IsUndefined()) {
- result = Utils::OpenHandle(*ObjectTemplate::New());
+ v8::Isolate* isolate = reinterpret_cast<v8::Isolate*>(i_isolate);
+ result = Utils::OpenHandle(*ObjectTemplate::New(isolate));
Utils::OpenHandle(this)->set_prototype_template(*result);
}
return ToApiHandle<ObjectTemplate>(result);
@@ -1092,14 +927,6 @@ Local<FunctionTemplate> FunctionTemplate::New(
}
-Local<FunctionTemplate> FunctionTemplate::New(
- FunctionCallback callback,
- v8::Handle<Value> data,
- v8::Handle<Signature> signature,
- int length) {
- return New(Isolate::GetCurrent(), callback, data, signature, length);
-}
-
Local<Signature> Signature::New(Isolate* isolate,
Handle<FunctionTemplate> receiver, int argc,
Handle<FunctionTemplate> argv[]) {
@@ -1124,22 +951,9 @@ Local<Signature> Signature::New(Isolate* isolate,
}
-Local<Signature> Signature::New(Handle<FunctionTemplate> receiver,
- int argc, Handle<FunctionTemplate> argv[]) {
- return New(Isolate::GetCurrent(), receiver, argc, argv);
-}
-
-
Local<AccessorSignature> AccessorSignature::New(
- Isolate* isolate,
- Handle<FunctionTemplate> receiver) {
- return Utils::AccessorSignatureToLocal(Utils::OpenHandle(*receiver));
-}
-
-
-// While this is just a cast, it's lame not to use an Isolate parameter.
-Local<AccessorSignature> AccessorSignature::New(
- Handle<FunctionTemplate> receiver) {
+ Isolate* isolate,
+ Handle<FunctionTemplate> receiver) {
return Utils::AccessorSignatureToLocal(Utils::OpenHandle(*receiver));
}
@@ -1154,7 +968,7 @@ static Local<Operation> NewDescriptor(
i::Handle<i::DeclaredAccessorDescriptor>();
if (previous_descriptor != NULL) {
previous = Utils::OpenHandle(
- static_cast<DeclaredAccessorDescriptor*>(previous_descriptor));
+ static_cast<DeclaredAccessorDescriptor*>(previous_descriptor));
}
i::Handle<i::DeclaredAccessorDescriptor> descriptor =
i::DeclaredAccessorDescriptor::Create(internal_isolate, data, previous);
@@ -1163,7 +977,7 @@ static Local<Operation> NewDescriptor(
Local<RawOperationDescriptor>
- ObjectOperationDescriptor::NewInternalFieldDereference(
+ObjectOperationDescriptor::NewInternalFieldDereference(
Isolate* isolate,
int internal_field) {
i::DeclaredAccessorDescriptorData data;
@@ -1298,9 +1112,9 @@ int TypeSwitch::match(v8::Handle<Value> value) {
}
-#define SET_FIELD_WRAPPED(obj, setter, cdata) do { \
- i::Handle<i::Object> foreign = FromCData(obj->GetIsolate(), cdata); \
- (obj)->setter(*foreign); \
+#define SET_FIELD_WRAPPED(obj, setter, cdata) do { \
+ i::Handle<i::Object> foreign = FromCData(obj->GetIsolate(), cdata); \
+ (obj)->setter(*foreign); \
} while (false)
@@ -1331,7 +1145,6 @@ static i::Handle<i::AccessorInfo> SetAccessorInfoProperties(
obj->set_name(*Utils::OpenHandle(*name));
if (settings & ALL_CAN_READ) obj->set_all_can_read(true);
if (settings & ALL_CAN_WRITE) obj->set_all_can_write(true);
- if (settings & PROHIBITS_OVERWRITING) obj->set_prohibits_overwriting(true);
obj->set_property_attributes(static_cast<PropertyAttributes>(attributes));
if (!signature.IsEmpty()) {
obj->set_expected_receiver_type(*Utils::OpenHandle(*signature));
@@ -1342,13 +1155,13 @@ static i::Handle<i::AccessorInfo> SetAccessorInfoProperties(
template<typename Getter, typename Setter>
static i::Handle<i::AccessorInfo> MakeAccessorInfo(
- v8::Handle<String> name,
- Getter getter,
- Setter setter,
- v8::Handle<Value> data,
- v8::AccessControl settings,
- v8::PropertyAttribute attributes,
- v8::Handle<AccessorSignature> signature) {
+ v8::Handle<String> name,
+ Getter getter,
+ Setter setter,
+ v8::Handle<Value> data,
+ v8::AccessControl settings,
+ v8::PropertyAttribute attributes,
+ v8::Handle<AccessorSignature> signature) {
i::Isolate* isolate = Utils::OpenHandle(*name)->GetIsolate();
i::Handle<i::ExecutableAccessorInfo> obj =
isolate->factory()->NewExecutableAccessorInfo();
@@ -1363,13 +1176,13 @@ static i::Handle<i::AccessorInfo> MakeAccessorInfo(
static i::Handle<i::AccessorInfo> MakeAccessorInfo(
- v8::Handle<String> name,
- v8::Handle<v8::DeclaredAccessorDescriptor> descriptor,
- void* setter_ignored,
- void* data_ignored,
- v8::AccessControl settings,
- v8::PropertyAttribute attributes,
- v8::Handle<AccessorSignature> signature) {
+ v8::Handle<String> name,
+ v8::Handle<v8::DeclaredAccessorDescriptor> descriptor,
+ void* setter_ignored,
+ void* data_ignored,
+ v8::AccessControl settings,
+ v8::PropertyAttribute attributes,
+ v8::Handle<AccessorSignature> signature) {
i::Isolate* isolate = Utils::OpenHandle(*name)->GetIsolate();
if (descriptor.IsEmpty()) return i::Handle<i::DeclaredAccessorInfo>();
i::Handle<i::DeclaredAccessorInfo> obj =
@@ -1380,11 +1193,14 @@ static i::Handle<i::AccessorInfo> MakeAccessorInfo(
Local<ObjectTemplate> FunctionTemplate::InstanceTemplate() {
- i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- if (EmptyCheck("v8::FunctionTemplate::InstanceTemplate()", this))
+ i::Handle<i::FunctionTemplateInfo> handle = Utils::OpenHandle(this, true);
+ if (!Utils::ApiCheck(!handle.is_null(),
+ "v8::FunctionTemplate::InstanceTemplate()",
+ "Reading from empty handle")) {
return Local<ObjectTemplate>();
+ }
+ i::Isolate* isolate = handle->GetIsolate();
ENTER_V8(isolate);
- i::Handle<i::FunctionTemplateInfo> handle = Utils::OpenHandle(this);
if (handle->instance_template()->IsUndefined()) {
Local<ObjectTemplate> templ =
ObjectTemplate::New(isolate, ToApiHandle<FunctionTemplate>(handle));
@@ -1445,8 +1261,8 @@ Local<ObjectTemplate> ObjectTemplate::New() {
Local<ObjectTemplate> ObjectTemplate::New(
- i::Isolate* isolate,
- v8::Handle<FunctionTemplate> constructor) {
+ i::Isolate* isolate,
+ v8::Handle<FunctionTemplate> constructor) {
EnsureInitializedForIsolate(isolate, "v8::ObjectTemplate::New()");
LOG_API(isolate, "ObjectTemplate::New");
ENTER_V8(isolate);
@@ -1465,13 +1281,15 @@ Local<ObjectTemplate> ObjectTemplate::New(
// Ensure that the object template has a constructor. If no
// constructor is available we create one.
static i::Handle<i::FunctionTemplateInfo> EnsureConstructor(
+ i::Isolate* isolate,
ObjectTemplate* object_template) {
i::Object* obj = Utils::OpenHandle(object_template)->constructor();
if (!obj ->IsUndefined()) {
i::FunctionTemplateInfo* info = i::FunctionTemplateInfo::cast(obj);
- return i::Handle<i::FunctionTemplateInfo>(info, info->GetIsolate());
+ return i::Handle<i::FunctionTemplateInfo>(info, isolate);
}
- Local<FunctionTemplate> templ = FunctionTemplate::New();
+ Local<FunctionTemplate> templ =
+ FunctionTemplate::New(reinterpret_cast<Isolate*>(isolate));
i::Handle<i::FunctionTemplateInfo> constructor = Utils::OpenHandle(*templ);
constructor->set_instance_template(*Utils::OpenHandle(object_template));
Utils::OpenHandle(object_template)->set_constructor(*constructor);
@@ -1482,9 +1300,10 @@ static i::Handle<i::FunctionTemplateInfo> EnsureConstructor(
static inline void AddPropertyToTemplate(
i::Handle<i::TemplateInfo> info,
i::Handle<i::AccessorInfo> obj) {
- i::Handle<i::Object> list(info->property_accessors(), info->GetIsolate());
+ i::Isolate* isolate = info->GetIsolate();
+ i::Handle<i::Object> list(info->property_accessors(), isolate);
if (list->IsUndefined()) {
- list = NeanderArray().value();
+ list = NeanderArray(isolate).value();
info->set_property_accessors(*list);
}
NeanderArray array(list);
@@ -1493,6 +1312,7 @@ static inline void AddPropertyToTemplate(
static inline i::Handle<i::TemplateInfo> GetTemplateInfo(
+ i::Isolate* isolate,
Template* template_obj) {
return Utils::OpenHandle(template_obj);
}
@@ -1500,8 +1320,9 @@ static inline i::Handle<i::TemplateInfo> GetTemplateInfo(
// TODO(dcarney): remove this with ObjectTemplate::SetAccessor
static inline i::Handle<i::TemplateInfo> GetTemplateInfo(
+ i::Isolate* isolate,
ObjectTemplate* object_template) {
- EnsureConstructor(object_template);
+ EnsureConstructor(isolate, object_template);
return Utils::OpenHandle(object_template);
}
@@ -1522,7 +1343,7 @@ static bool TemplateSetAccessor(
i::Handle<i::AccessorInfo> obj = MakeAccessorInfo(
name, getter, setter, data, settings, attribute, signature);
if (obj.is_null()) return false;
- i::Handle<i::TemplateInfo> info = GetTemplateInfo(template_obj);
+ i::Handle<i::TemplateInfo> info = GetTemplateInfo(isolate, template_obj);
AddPropertyToTemplate(info, obj);
return true;
}
@@ -1574,7 +1395,7 @@ void ObjectTemplate::SetNamedPropertyHandler(
i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
ENTER_V8(isolate);
i::HandleScope scope(isolate);
- EnsureConstructor(this);
+ EnsureConstructor(isolate, this);
i::FunctionTemplateInfo* constructor = i::FunctionTemplateInfo::cast(
Utils::OpenHandle(this)->constructor());
i::Handle<i::FunctionTemplateInfo> cons(constructor);
@@ -1601,7 +1422,7 @@ void ObjectTemplate::MarkAsUndetectable() {
i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
ENTER_V8(isolate);
i::HandleScope scope(isolate);
- EnsureConstructor(this);
+ EnsureConstructor(isolate, this);
i::FunctionTemplateInfo* constructor =
i::FunctionTemplateInfo::cast(Utils::OpenHandle(this)->constructor());
i::Handle<i::FunctionTemplateInfo> cons(constructor);
@@ -1610,14 +1431,14 @@ void ObjectTemplate::MarkAsUndetectable() {
void ObjectTemplate::SetAccessCheckCallbacks(
- NamedSecurityCallback named_callback,
- IndexedSecurityCallback indexed_callback,
- Handle<Value> data,
- bool turned_on_by_default) {
+ NamedSecurityCallback named_callback,
+ IndexedSecurityCallback indexed_callback,
+ Handle<Value> data,
+ bool turned_on_by_default) {
i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
ENTER_V8(isolate);
i::HandleScope scope(isolate);
- EnsureConstructor(this);
+ EnsureConstructor(isolate, this);
i::Handle<i::Struct> struct_info =
isolate->factory()->NewStruct(i::ACCESS_CHECK_INFO_TYPE);
@@ -1641,16 +1462,16 @@ void ObjectTemplate::SetAccessCheckCallbacks(
void ObjectTemplate::SetIndexedPropertyHandler(
- IndexedPropertyGetterCallback getter,
- IndexedPropertySetterCallback setter,
- IndexedPropertyQueryCallback query,
- IndexedPropertyDeleterCallback remover,
- IndexedPropertyEnumeratorCallback enumerator,
- Handle<Value> data) {
+ IndexedPropertyGetterCallback getter,
+ IndexedPropertySetterCallback setter,
+ IndexedPropertyQueryCallback query,
+ IndexedPropertyDeleterCallback remover,
+ IndexedPropertyEnumeratorCallback enumerator,
+ Handle<Value> data) {
i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
ENTER_V8(isolate);
i::HandleScope scope(isolate);
- EnsureConstructor(this);
+ EnsureConstructor(isolate, this);
i::FunctionTemplateInfo* constructor = i::FunctionTemplateInfo::cast(
Utils::OpenHandle(this)->constructor());
i::Handle<i::FunctionTemplateInfo> cons(constructor);
@@ -1678,7 +1499,7 @@ void ObjectTemplate::SetCallAsFunctionHandler(FunctionCallback callback,
i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
ENTER_V8(isolate);
i::HandleScope scope(isolate);
- EnsureConstructor(this);
+ EnsureConstructor(isolate, this);
i::FunctionTemplateInfo* constructor = i::FunctionTemplateInfo::cast(
Utils::OpenHandle(this)->constructor());
i::Handle<i::FunctionTemplateInfo> cons(constructor);
@@ -1702,9 +1523,9 @@ int ObjectTemplate::InternalFieldCount() {
void ObjectTemplate::SetInternalFieldCount(int value) {
i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- if (!ApiCheck(i::Smi::IsValid(value),
- "v8::ObjectTemplate::SetInternalFieldCount()",
- "Invalid internal field count")) {
+ if (!Utils::ApiCheck(i::Smi::IsValid(value),
+ "v8::ObjectTemplate::SetInternalFieldCount()",
+ "Invalid internal field count")) {
return;
}
ENTER_V8(isolate);
@@ -1712,291 +1533,258 @@ void ObjectTemplate::SetInternalFieldCount(int value) {
// The internal field count is set by the constructor function's
// construct code, so we ensure that there is a constructor
// function to do the setting.
- EnsureConstructor(this);
+ EnsureConstructor(isolate, this);
}
Utils::OpenHandle(this)->set_internal_field_count(i::Smi::FromInt(value));
}
-// --- S c r i p t D a t a ---
+// --- S c r i p t s ---
-ScriptData* ScriptData::PreCompile(v8::Isolate* isolate,
- const char* input,
- int length) {
- i::Utf8ToUtf16CharacterStream stream(
- reinterpret_cast<const unsigned char*>(input), length);
- return i::PreParserApi::PreParse(
- reinterpret_cast<i::Isolate*>(isolate), &stream);
-}
+// Internally, UnboundScript is a SharedFunctionInfo, and Script is a
+// JSFunction.
-
-ScriptData* ScriptData::PreCompile(v8::Handle<String> source) {
- i::Handle<i::String> str = Utils::OpenHandle(*source);
- i::Isolate* isolate = str->GetIsolate();
- if (str->IsExternalTwoByteString()) {
- i::ExternalTwoByteStringUtf16CharacterStream stream(
- i::Handle<i::ExternalTwoByteString>::cast(str), 0, str->length());
- return i::PreParserApi::PreParse(isolate, &stream);
- } else {
- i::GenericStringUtf16CharacterStream stream(str, 0, str->length());
- return i::PreParserApi::PreParse(isolate, &stream);
- }
-}
+ScriptCompiler::CachedData::CachedData(const uint8_t* data_, int length_,
+ BufferPolicy buffer_policy_)
+ : data(data_), length(length_), buffer_policy(buffer_policy_) {}
-ScriptData* ScriptData::New(const char* data, int length) {
- // Return an empty ScriptData if the length is obviously invalid.
- if (length % sizeof(unsigned) != 0) {
- return new i::ScriptDataImpl();
+ScriptCompiler::CachedData::~CachedData() {
+ if (buffer_policy == BufferOwned) {
+ delete[] data;
}
-
- // Copy the data to ensure it is properly aligned.
- int deserialized_data_length = length / sizeof(unsigned);
- // If aligned, don't create a copy of the data.
- if (reinterpret_cast<intptr_t>(data) % sizeof(unsigned) == 0) {
- return new i::ScriptDataImpl(data, length);
- }
- // Copy the data to align it.
- unsigned* deserialized_data = i::NewArray<unsigned>(deserialized_data_length);
- i::CopyBytes(reinterpret_cast<char*>(deserialized_data),
- data, static_cast<size_t>(length));
-
- return new i::ScriptDataImpl(
- i::Vector<unsigned>(deserialized_data, deserialized_data_length));
}
-// --- S c r i p t ---
-
-
-Local<Script> Script::New(v8::Handle<String> source,
- v8::ScriptOrigin* origin,
- v8::ScriptData* pre_data,
- v8::Handle<String> script_data) {
- i::Handle<i::String> str = Utils::OpenHandle(*source);
- i::Isolate* isolate = str->GetIsolate();
- ON_BAILOUT(isolate, "v8::Script::New()", return Local<Script>());
- LOG_API(isolate, "Script::New");
- ENTER_V8(isolate);
- i::SharedFunctionInfo* raw_result = NULL;
- { i::HandleScope scope(isolate);
- i::Handle<i::Object> name_obj;
- int line_offset = 0;
- int column_offset = 0;
- bool is_shared_cross_origin = false;
- if (origin != NULL) {
- if (!origin->ResourceName().IsEmpty()) {
- name_obj = Utils::OpenHandle(*origin->ResourceName());
- }
- if (!origin->ResourceLineOffset().IsEmpty()) {
- line_offset = static_cast<int>(origin->ResourceLineOffset()->Value());
- }
- if (!origin->ResourceColumnOffset().IsEmpty()) {
- column_offset =
- static_cast<int>(origin->ResourceColumnOffset()->Value());
- }
- if (!origin->ResourceIsSharedCrossOrigin().IsEmpty()) {
- v8::Isolate* v8_isolate = reinterpret_cast<v8::Isolate*>(isolate);
- is_shared_cross_origin =
- origin->ResourceIsSharedCrossOrigin() == v8::True(v8_isolate);
- }
- }
- EXCEPTION_PREAMBLE(isolate);
- i::ScriptDataImpl* pre_data_impl =
- static_cast<i::ScriptDataImpl*>(pre_data);
- // We assert that the pre-data is sane, even though we can actually
- // handle it if it turns out not to be in release mode.
- ASSERT(pre_data_impl == NULL || pre_data_impl->SanityCheck());
- // If the pre-data isn't sane we simply ignore it
- if (pre_data_impl != NULL && !pre_data_impl->SanityCheck()) {
- pre_data_impl = NULL;
- }
- i::Handle<i::SharedFunctionInfo> result =
- i::Compiler::Compile(str,
- name_obj,
- line_offset,
- column_offset,
- is_shared_cross_origin,
- isolate->global_context(),
- NULL,
- pre_data_impl,
- Utils::OpenHandle(*script_data, true),
- i::NOT_NATIVES_CODE);
- has_pending_exception = result.is_null();
- EXCEPTION_BAILOUT_CHECK(isolate, Local<Script>());
- raw_result = *result;
- }
- i::Handle<i::SharedFunctionInfo> result(raw_result, isolate);
- return ToApiHandle<Script>(result);
+Local<Script> UnboundScript::BindToCurrentContext() {
+ i::Handle<i::HeapObject> obj =
+ i::Handle<i::HeapObject>::cast(Utils::OpenHandle(this));
+ i::Handle<i::SharedFunctionInfo>
+ function_info(i::SharedFunctionInfo::cast(*obj), obj->GetIsolate());
+ i::Handle<i::JSFunction> function =
+ obj->GetIsolate()->factory()->NewFunctionFromSharedFunctionInfo(
+ function_info, obj->GetIsolate()->global_context());
+ return ToApiHandle<Script>(function);
}
-Local<Script> Script::New(v8::Handle<String> source,
- v8::Handle<Value> file_name) {
- ScriptOrigin origin(file_name);
- return New(source, &origin);
+int UnboundScript::GetId() {
+ i::Handle<i::HeapObject> obj =
+ i::Handle<i::HeapObject>::cast(Utils::OpenHandle(this));
+ i::Isolate* isolate = obj->GetIsolate();
+ ON_BAILOUT(isolate, "v8::UnboundScript::GetId()", return -1);
+ LOG_API(isolate, "v8::UnboundScript::GetId");
+ {
+ i::HandleScope scope(isolate);
+ i::Handle<i::SharedFunctionInfo> function_info(
+ i::SharedFunctionInfo::cast(*obj));
+ i::Handle<i::Script> script(i::Script::cast(function_info->script()));
+ return script->id()->value();
+ }
}
-Local<Script> Script::Compile(v8::Handle<String> source,
- v8::ScriptOrigin* origin,
- v8::ScriptData* pre_data,
- v8::Handle<String> script_data) {
- i::Handle<i::String> str = Utils::OpenHandle(*source);
- i::Isolate* isolate = str->GetIsolate();
- ON_BAILOUT(isolate, "v8::Script::Compile()", return Local<Script>());
- LOG_API(isolate, "Script::Compile");
- ENTER_V8(isolate);
- Local<Script> generic = New(source, origin, pre_data, script_data);
- if (generic.IsEmpty())
- return generic;
- i::Handle<i::Object> obj = Utils::OpenHandle(*generic);
- i::Handle<i::SharedFunctionInfo> function =
- i::Handle<i::SharedFunctionInfo>(i::SharedFunctionInfo::cast(*obj));
- i::Handle<i::JSFunction> result =
- isolate->factory()->NewFunctionFromSharedFunctionInfo(
- function,
- isolate->global_context());
- return ToApiHandle<Script>(result);
+int UnboundScript::GetLineNumber(int code_pos) {
+ i::Handle<i::SharedFunctionInfo> obj =
+ i::Handle<i::SharedFunctionInfo>::cast(Utils::OpenHandle(this));
+ i::Isolate* isolate = obj->GetIsolate();
+ ON_BAILOUT(isolate, "v8::UnboundScript::GetLineNumber()", return -1);
+ LOG_API(isolate, "UnboundScript::GetLineNumber");
+ if (obj->script()->IsScript()) {
+ i::Handle<i::Script> script(i::Script::cast(obj->script()));
+ return i::Script::GetLineNumber(script, code_pos);
+ } else {
+ return -1;
+ }
}
-Local<Script> Script::Compile(v8::Handle<String> source,
- v8::Handle<Value> file_name,
- v8::Handle<String> script_data) {
- ScriptOrigin origin(file_name);
- return Compile(source, &origin, 0, script_data);
+Handle<Value> UnboundScript::GetScriptName() {
+ i::Handle<i::SharedFunctionInfo> obj =
+ i::Handle<i::SharedFunctionInfo>::cast(Utils::OpenHandle(this));
+ i::Isolate* isolate = obj->GetIsolate();
+ ON_BAILOUT(isolate, "v8::UnboundScript::GetName()",
+ return Handle<String>());
+ LOG_API(isolate, "UnboundScript::GetName");
+ if (obj->script()->IsScript()) {
+ i::Object* name = i::Script::cast(obj->script())->name();
+ return Utils::ToLocal(i::Handle<i::Object>(name, isolate));
+ } else {
+ return Handle<String>();
+ }
}
Local<Value> Script::Run() {
- // If execution is terminating, Compile(script)->Run() requires this check.
- if (this == NULL) return Local<Value>();
- i::Handle<i::HeapObject> obj =
- i::Handle<i::HeapObject>::cast(Utils::OpenHandle(this));
- i::Isolate* isolate = obj->GetIsolate();
+ i::Handle<i::Object> obj = Utils::OpenHandle(this, true);
+ // If execution is terminating, Compile(..)->Run() requires this
+ // check.
+ if (obj.is_null()) return Local<Value>();
+ i::Isolate* isolate = i::Handle<i::HeapObject>::cast(obj)->GetIsolate();
ON_BAILOUT(isolate, "v8::Script::Run()", return Local<Value>());
LOG_API(isolate, "Script::Run");
ENTER_V8(isolate);
i::Logger::TimerEventScope timer_scope(
isolate, i::Logger::TimerEventScope::v8_execute);
- i::Object* raw_result = NULL;
- {
- i::HandleScope scope(isolate);
- i::Handle<i::JSFunction> fun;
- if (obj->IsSharedFunctionInfo()) {
- i::Handle<i::SharedFunctionInfo>
- function_info(i::SharedFunctionInfo::cast(*obj), isolate);
- fun = isolate->factory()->NewFunctionFromSharedFunctionInfo(
- function_info, isolate->global_context());
- } else {
- fun = i::Handle<i::JSFunction>(i::JSFunction::cast(*obj), isolate);
- }
- EXCEPTION_PREAMBLE(isolate);
- i::Handle<i::Object> receiver(
- isolate->context()->global_proxy(), isolate);
- i::Handle<i::Object> result = i::Execution::Call(
- isolate, fun, receiver, 0, NULL, &has_pending_exception);
- EXCEPTION_BAILOUT_CHECK_DO_CALLBACK(isolate, Local<Value>());
- raw_result = *result;
- }
- i::Handle<i::Object> result(raw_result, isolate);
- return Utils::ToLocal(result);
+ i::HandleScope scope(isolate);
+ i::Handle<i::JSFunction> fun = i::Handle<i::JSFunction>::cast(obj);
+ EXCEPTION_PREAMBLE(isolate);
+ i::Handle<i::Object> receiver(
+ isolate->context()->global_proxy(), isolate);
+ i::Handle<i::Object> result;
+ has_pending_exception = !i::Execution::Call(
+ isolate, fun, receiver, 0, NULL).ToHandle(&result);
+ EXCEPTION_BAILOUT_CHECK_DO_CALLBACK(isolate, Local<Value>());
+ return Utils::ToLocal(scope.CloseAndEscape(result));
}
-static i::Handle<i::SharedFunctionInfo> OpenScript(Script* script) {
- i::Handle<i::Object> obj = Utils::OpenHandle(script);
- i::Handle<i::SharedFunctionInfo> result;
- if (obj->IsSharedFunctionInfo()) {
- result =
- i::Handle<i::SharedFunctionInfo>(i::SharedFunctionInfo::cast(*obj));
- } else {
- result =
- i::Handle<i::SharedFunctionInfo>(i::JSFunction::cast(*obj)->shared());
- }
- return result;
+Local<UnboundScript> Script::GetUnboundScript() {
+ i::Handle<i::Object> obj = Utils::OpenHandle(this);
+ return ToApiHandle<UnboundScript>(
+ i::Handle<i::SharedFunctionInfo>(i::JSFunction::cast(*obj)->shared()));
}
-Local<Value> Script::Id() {
- i::Handle<i::HeapObject> obj =
- i::Handle<i::HeapObject>::cast(Utils::OpenHandle(this));
- i::Isolate* isolate = obj->GetIsolate();
- ON_BAILOUT(isolate, "v8::Script::Id()", return Local<Value>());
- LOG_API(isolate, "Script::Id");
- i::Object* raw_id = NULL;
- {
- i::HandleScope scope(isolate);
- i::Handle<i::SharedFunctionInfo> function_info = OpenScript(this);
- i::Handle<i::Script> script(i::Script::cast(function_info->script()));
- i::Handle<i::Object> id(script->id(), isolate);
- raw_id = *id;
+Local<UnboundScript> ScriptCompiler::CompileUnbound(
+ Isolate* v8_isolate,
+ Source* source,
+ CompileOptions options) {
+ i::ScriptData* script_data_impl = NULL;
+ i::CachedDataMode cached_data_mode = i::NO_CACHED_DATA;
+ i::Isolate* isolate = reinterpret_cast<i::Isolate*>(v8_isolate);
+ ON_BAILOUT(isolate, "v8::ScriptCompiler::CompileUnbound()",
+ return Local<UnboundScript>());
+ if (options & kProduceDataToCache) {
+ cached_data_mode = i::PRODUCE_CACHED_DATA;
+ ASSERT(source->cached_data == NULL);
+ if (source->cached_data) {
+ // Asked to produce cached data even though there is some already -> not
+ // good. Fail the compilation.
+ EXCEPTION_PREAMBLE(isolate);
+ i::Handle<i::Object> result = isolate->factory()->NewSyntaxError(
+ "invalid_cached_data", isolate->factory()->NewJSArray(0));
+ isolate->Throw(*result);
+ isolate->ReportPendingMessages();
+ has_pending_exception = true;
+ EXCEPTION_BAILOUT_CHECK(isolate, Local<UnboundScript>());
+ }
+ } else if (source->cached_data) {
+ cached_data_mode = i::CONSUME_CACHED_DATA;
+ // ScriptData takes care of aligning, in case the data is not aligned
+ // correctly.
+ script_data_impl = i::ScriptData::New(
+ reinterpret_cast<const char*>(source->cached_data->data),
+ source->cached_data->length);
+ // If the cached data is not valid, fail the compilation.
+ if (script_data_impl == NULL || !script_data_impl->SanityCheck()) {
+ EXCEPTION_PREAMBLE(isolate);
+ i::Handle<i::Object> result = isolate->factory()->NewSyntaxError(
+ "invalid_cached_data", isolate->factory()->NewJSArray(0));
+ isolate->Throw(*result);
+ isolate->ReportPendingMessages();
+ delete script_data_impl;
+ has_pending_exception = true;
+ EXCEPTION_BAILOUT_CHECK(isolate, Local<UnboundScript>());
+ }
}
- i::Handle<i::Object> id(raw_id, isolate);
- return Utils::ToLocal(id);
-}
-
-int Script::GetId() {
- i::Handle<i::HeapObject> obj =
- i::Handle<i::HeapObject>::cast(Utils::OpenHandle(this));
- i::Isolate* isolate = obj->GetIsolate();
- ON_BAILOUT(isolate, "v8::Script::Id()", return -1);
- LOG_API(isolate, "Script::Id");
- {
- i::HandleScope scope(isolate);
- i::Handle<i::SharedFunctionInfo> function_info = OpenScript(this);
- i::Handle<i::Script> script(i::Script::cast(function_info->script()));
- return script->id()->value();
+ i::Handle<i::String> str = Utils::OpenHandle(*(source->source_string));
+ LOG_API(isolate, "ScriptCompiler::CompileUnbound");
+ ENTER_V8(isolate);
+ i::SharedFunctionInfo* raw_result = NULL;
+ { i::HandleScope scope(isolate);
+ i::Handle<i::Object> name_obj;
+ int line_offset = 0;
+ int column_offset = 0;
+ bool is_shared_cross_origin = false;
+ if (!source->resource_name.IsEmpty()) {
+ name_obj = Utils::OpenHandle(*(source->resource_name));
+ }
+ if (!source->resource_line_offset.IsEmpty()) {
+ line_offset = static_cast<int>(source->resource_line_offset->Value());
+ }
+ if (!source->resource_column_offset.IsEmpty()) {
+ column_offset =
+ static_cast<int>(source->resource_column_offset->Value());
+ }
+ if (!source->resource_is_shared_cross_origin.IsEmpty()) {
+ v8::Isolate* v8_isolate = reinterpret_cast<v8::Isolate*>(isolate);
+ is_shared_cross_origin =
+ source->resource_is_shared_cross_origin == v8::True(v8_isolate);
+ }
+ EXCEPTION_PREAMBLE(isolate);
+ i::Handle<i::SharedFunctionInfo> result =
+ i::Compiler::CompileScript(str,
+ name_obj,
+ line_offset,
+ column_offset,
+ is_shared_cross_origin,
+ isolate->global_context(),
+ NULL,
+ &script_data_impl,
+ cached_data_mode,
+ i::NOT_NATIVES_CODE);
+ has_pending_exception = result.is_null();
+ if (has_pending_exception && cached_data_mode == i::CONSUME_CACHED_DATA) {
+ // This case won't happen during normal operation; we have compiled
+ // successfully and produced cached data, and but the second compilation
+ // of the same source code fails.
+ delete script_data_impl;
+ script_data_impl = NULL;
+ }
+ EXCEPTION_BAILOUT_CHECK(isolate, Local<UnboundScript>());
+ raw_result = *result;
+ if ((options & kProduceDataToCache) && script_data_impl != NULL) {
+ // script_data_impl now contains the data that was generated. source will
+ // take the ownership.
+ source->cached_data = new CachedData(
+ reinterpret_cast<const uint8_t*>(script_data_impl->Data()),
+ script_data_impl->Length(), CachedData::BufferOwned);
+ script_data_impl->owns_store_ = false;
+ }
+ delete script_data_impl;
}
+ i::Handle<i::SharedFunctionInfo> result(raw_result, isolate);
+ return ToApiHandle<UnboundScript>(result);
}
-int Script::GetLineNumber(int code_pos) {
- i::Handle<i::HeapObject> obj =
- i::Handle<i::HeapObject>::cast(Utils::OpenHandle(this));
- i::Isolate* isolate = obj->GetIsolate();
- ON_BAILOUT(isolate, "v8::Script::GetLineNumber()", return -1);
- LOG_API(isolate, "Script::GetLineNumber");
- if (obj->IsScript()) {
- i::Handle<i::Script> script = i::Handle<i::Script>(i::Script::cast(*obj));
- return i::GetScriptLineNumber(script, code_pos);
- } else {
- return -1;
- }
+Local<Script> ScriptCompiler::Compile(
+ Isolate* v8_isolate,
+ Source* source,
+ CompileOptions options) {
+ i::Isolate* isolate = reinterpret_cast<i::Isolate*>(v8_isolate);
+ ON_BAILOUT(isolate, "v8::ScriptCompiler::Compile()", return Local<Script>());
+ LOG_API(isolate, "ScriptCompiler::CompiletBound()");
+ ENTER_V8(isolate);
+ Local<UnboundScript> generic = CompileUnbound(v8_isolate, source, options);
+ if (generic.IsEmpty()) return Local<Script>();
+ return generic->BindToCurrentContext();
}
-Handle<Value> Script::GetScriptName() {
- i::Handle<i::HeapObject> obj =
- i::Handle<i::HeapObject>::cast(Utils::OpenHandle(this));
- i::Isolate* isolate = obj->GetIsolate();
- ON_BAILOUT(isolate, "v8::Script::GetName()", return Handle<String>());
- LOG_API(isolate, "Script::GetName");
- if (obj->IsScript()) {
- i::Object* name = i::Script::cast(*obj)->name();
- return Utils::ToLocal(i::Handle<i::Object>(name, isolate));
- } else {
- return Handle<String>();
+Local<Script> Script::Compile(v8::Handle<String> source,
+ v8::ScriptOrigin* origin) {
+ i::Handle<i::String> str = Utils::OpenHandle(*source);
+ if (origin) {
+ ScriptCompiler::Source script_source(source, *origin);
+ return ScriptCompiler::Compile(
+ reinterpret_cast<v8::Isolate*>(str->GetIsolate()),
+ &script_source);
}
+ ScriptCompiler::Source script_source(source);
+ return ScriptCompiler::Compile(
+ reinterpret_cast<v8::Isolate*>(str->GetIsolate()),
+ &script_source);
}
-void Script::SetData(v8::Handle<String> data) {
- i::Handle<i::HeapObject> obj =
- i::Handle<i::HeapObject>::cast(Utils::OpenHandle(this));
- i::Isolate* isolate = obj->GetIsolate();
- ON_BAILOUT(isolate, "v8::Script::SetData()", return);
- LOG_API(isolate, "Script::SetData");
- {
- i::HandleScope scope(isolate);
- i::Handle<i::SharedFunctionInfo> function_info = OpenScript(this);
- i::Handle<i::Object> raw_data = Utils::OpenHandle(*data);
- i::Handle<i::Script> script(i::Script::cast(function_info->script()));
- script->set_data(*raw_data);
- }
+Local<Script> Script::Compile(v8::Handle<String> source,
+ v8::Handle<String> file_name) {
+ ScriptOrigin origin(file_name);
+ return Compile(source, &origin);
}
@@ -2005,13 +1793,26 @@ void Script::SetData(v8::Handle<String> data) {
v8::TryCatch::TryCatch()
: isolate_(i::Isolate::Current()),
- next_(isolate_->try_catch_handler_address()),
+ next_(isolate_->try_catch_handler()),
is_verbose_(false),
can_continue_(true),
capture_message_(true),
rethrow_(false),
has_terminated_(false) {
Reset();
+ js_stack_comparable_address_ = this;
+#ifdef V8_USE_ADDRESS_SANITIZER
+ void* asan_fake_stack_handle = __asan_get_current_fake_stack();
+ if (asan_fake_stack_handle != NULL) {
+ js_stack_comparable_address_ = __asan_addr_is_in_fake_stack(
+ asan_fake_stack_handle, js_stack_comparable_address_, NULL, NULL);
+ CHECK(js_stack_comparable_address_ != NULL);
+ }
+#endif
+ // Special handling for simulators which have a separate JS stack.
+ js_stack_comparable_address_ = reinterpret_cast<void*>(
+ v8::internal::SimulatorStack::RegisterCTryCatch(
+ reinterpret_cast<uintptr_t>(js_stack_comparable_address_)));
isolate_->RegisterTryCatchHandler(this);
}
@@ -2031,10 +1832,12 @@ v8::TryCatch::~TryCatch() {
isolate_->RestorePendingMessageFromTryCatch(this);
}
isolate_->UnregisterTryCatchHandler(this);
+ v8::internal::SimulatorStack::UnregisterCTryCatch();
reinterpret_cast<Isolate*>(isolate_)->ThrowException(exc);
ASSERT(!isolate_->thread_local_top()->rethrowing_message_);
} else {
isolate_->UnregisterTryCatchHandler(this);
+ v8::internal::SimulatorStack::UnregisterCTryCatch();
}
}
@@ -2082,8 +1885,10 @@ v8::Local<Value> v8::TryCatch::StackTrace() const {
i::Handle<i::JSObject> obj(i::JSObject::cast(raw_obj), isolate_);
i::Handle<i::String> name = isolate_->factory()->stack_string();
if (!i::JSReceiver::HasProperty(obj, name)) return v8::Local<Value>();
- i::Handle<i::Object> value = i::GetProperty(isolate_, obj, name);
- if (value.is_null()) return v8::Local<Value>();
+ i::Handle<i::Object> value;
+ if (!i::Object::GetProperty(obj, name).ToHandle(&value)) {
+ return v8::Local<Value>();
+ }
return v8::Utils::ToLocal(scope.CloseAndEscape(value));
} else {
return v8::Local<Value>();
@@ -2155,21 +1960,6 @@ v8::Handle<Value> Message::GetScriptResourceName() const {
}
-v8::Handle<Value> Message::GetScriptData() const {
- i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- ENTER_V8(isolate);
- EscapableHandleScope scope(reinterpret_cast<Isolate*>(isolate));
- i::Handle<i::JSMessageObject> message =
- i::Handle<i::JSMessageObject>::cast(Utils::OpenHandle(this));
- // Return this.script.data.
- i::Handle<i::JSValue> script =
- i::Handle<i::JSValue>::cast(i::Handle<i::Object>(message->script(),
- isolate));
- i::Handle<i::Object> data(i::Script::cast(script->value())->data(), isolate);
- return scope.Escape(Utils::ToLocal(data));
-}
-
-
v8::Handle<v8::StackTrace> Message::GetStackTrace() const {
i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
ENTER_V8(isolate);
@@ -2184,33 +1974,28 @@ v8::Handle<v8::StackTrace> Message::GetStackTrace() const {
}
-static i::Handle<i::Object> CallV8HeapFunction(const char* name,
- i::Handle<i::Object> recv,
- int argc,
- i::Handle<i::Object> argv[],
- bool* has_pending_exception) {
+MUST_USE_RESULT static i::MaybeHandle<i::Object> CallV8HeapFunction(
+ const char* name,
+ i::Handle<i::Object> recv,
+ int argc,
+ i::Handle<i::Object> argv[]) {
i::Isolate* isolate = i::Isolate::Current();
- i::Handle<i::String> fmt_str =
- isolate->factory()->InternalizeUtf8String(name);
- i::Object* object_fun =
- isolate->js_builtins_object()->GetPropertyNoExceptionThrown(*fmt_str);
- i::Handle<i::JSFunction> fun =
- i::Handle<i::JSFunction>(i::JSFunction::cast(object_fun));
- i::Handle<i::Object> value = i::Execution::Call(
- isolate, fun, recv, argc, argv, has_pending_exception);
- return value;
+ i::Handle<i::Object> object_fun =
+ i::Object::GetProperty(
+ isolate, isolate->js_builtins_object(), name).ToHandleChecked();
+ i::Handle<i::JSFunction> fun = i::Handle<i::JSFunction>::cast(object_fun);
+ return i::Execution::Call(isolate, fun, recv, argc, argv);
}
-static i::Handle<i::Object> CallV8HeapFunction(const char* name,
- i::Handle<i::Object> data,
- bool* has_pending_exception) {
+MUST_USE_RESULT static i::MaybeHandle<i::Object> CallV8HeapFunction(
+ const char* name,
+ i::Handle<i::Object> data) {
i::Handle<i::Object> argv[] = { data };
return CallV8HeapFunction(name,
i::Isolate::Current()->js_builtins_object(),
ARRAY_SIZE(argv),
- argv,
- has_pending_exception);
+ argv);
}
@@ -2221,9 +2006,9 @@ int Message::GetLineNumber() const {
i::HandleScope scope(isolate);
EXCEPTION_PREAMBLE(isolate);
- i::Handle<i::Object> result = CallV8HeapFunction("GetLineNumber",
- Utils::OpenHandle(this),
- &has_pending_exception);
+ i::Handle<i::Object> result;
+ has_pending_exception = !CallV8HeapFunction(
+ "GetLineNumber", Utils::OpenHandle(this)).ToHandle(&result);
EXCEPTION_BAILOUT_CHECK(isolate, 0);
return static_cast<int>(result->Number());
}
@@ -2255,10 +2040,9 @@ int Message::GetStartColumn() const {
i::HandleScope scope(isolate);
i::Handle<i::JSObject> data_obj = Utils::OpenHandle(this);
EXCEPTION_PREAMBLE(isolate);
- i::Handle<i::Object> start_col_obj = CallV8HeapFunction(
- "GetPositionInLine",
- data_obj,
- &has_pending_exception);
+ i::Handle<i::Object> start_col_obj;
+ has_pending_exception = !CallV8HeapFunction(
+ "GetPositionInLine", data_obj).ToHandle(&start_col_obj);
EXCEPTION_BAILOUT_CHECK(isolate, 0);
return static_cast<int>(start_col_obj->Number());
}
@@ -2270,10 +2054,9 @@ int Message::GetEndColumn() const {
i::HandleScope scope(isolate);
i::Handle<i::JSObject> data_obj = Utils::OpenHandle(this);
EXCEPTION_PREAMBLE(isolate);
- i::Handle<i::Object> start_col_obj = CallV8HeapFunction(
- "GetPositionInLine",
- data_obj,
- &has_pending_exception);
+ i::Handle<i::Object> start_col_obj;
+ has_pending_exception = !CallV8HeapFunction(
+ "GetPositionInLine", data_obj).ToHandle(&start_col_obj);
EXCEPTION_BAILOUT_CHECK(isolate, 0);
i::Handle<i::JSMessageObject> message =
i::Handle<i::JSMessageObject>::cast(data_obj);
@@ -2302,9 +2085,9 @@ Local<String> Message::GetSourceLine() const {
ENTER_V8(isolate);
EscapableHandleScope scope(reinterpret_cast<Isolate*>(isolate));
EXCEPTION_PREAMBLE(isolate);
- i::Handle<i::Object> result = CallV8HeapFunction("GetSourceLine",
- Utils::OpenHandle(this),
- &has_pending_exception);
+ i::Handle<i::Object> result;
+ has_pending_exception = !CallV8HeapFunction(
+ "GetSourceLine", Utils::OpenHandle(this)).ToHandle(&result);
EXCEPTION_BAILOUT_CHECK(isolate, Local<v8::String>());
if (result->IsString()) {
return scope.Escape(Utils::ToLocal(i::Handle<i::String>::cast(result)));
@@ -2321,11 +2104,6 @@ void Message::PrintCurrentStackTrace(Isolate* isolate, FILE* out) {
}
-void Message::PrintCurrentStackTrace(FILE* out) {
- PrintCurrentStackTrace(Isolate::GetCurrent(), out);
-}
-
-
// --- S t a c k T r a c e ---
Local<StackFrame> StackTrace::GetFrame(uint32_t index) const {
@@ -2333,9 +2111,10 @@ Local<StackFrame> StackTrace::GetFrame(uint32_t index) const {
ENTER_V8(isolate);
EscapableHandleScope scope(reinterpret_cast<Isolate*>(isolate));
i::Handle<i::JSArray> self = Utils::OpenHandle(this);
- i::Object* raw_object = self->GetElementNoExceptionThrown(isolate, index);
- i::Handle<i::JSObject> obj(i::JSObject::cast(raw_object));
- return scope.Escape(Utils::StackFrameToLocal(obj));
+ i::Handle<i::Object> obj =
+ i::Object::GetElement(isolate, self, index).ToHandleChecked();
+ i::Handle<i::JSObject> jsobj = i::Handle<i::JSObject>::cast(obj);
+ return scope.Escape(Utils::StackFrameToLocal(jsobj));
}
@@ -2359,18 +2138,15 @@ Local<StackTrace> StackTrace::CurrentStackTrace(
StackTraceOptions options) {
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
ENTER_V8(i_isolate);
+ // TODO(dcarney): remove when ScriptDebugServer is fixed.
+ options = static_cast<StackTraceOptions>(
+ static_cast<int>(options) | kExposeFramesAcrossSecurityOrigins);
i::Handle<i::JSArray> stackTrace =
i_isolate->CaptureCurrentStackTrace(frame_limit, options);
return Utils::StackTraceToLocal(stackTrace);
}
-Local<StackTrace> StackTrace::CurrentStackTrace(int frame_limit,
- StackTraceOptions options) {
- return CurrentStackTrace(Isolate::GetCurrent(), frame_limit, options);
-}
-
-
// --- S t a c k F r a m e ---
int StackFrame::GetLineNumber() const {
@@ -2378,7 +2154,8 @@ int StackFrame::GetLineNumber() const {
ENTER_V8(isolate);
i::HandleScope scope(isolate);
i::Handle<i::JSObject> self = Utils::OpenHandle(this);
- i::Handle<i::Object> line = GetProperty(self, "lineNumber");
+ i::Handle<i::Object> line = i::Object::GetProperty(
+ isolate, self, "lineNumber").ToHandleChecked();
if (!line->IsSmi()) {
return Message::kNoLineNumberInfo;
}
@@ -2391,7 +2168,8 @@ int StackFrame::GetColumn() const {
ENTER_V8(isolate);
i::HandleScope scope(isolate);
i::Handle<i::JSObject> self = Utils::OpenHandle(this);
- i::Handle<i::Object> column = GetProperty(self, "column");
+ i::Handle<i::Object> column = i::Object::GetProperty(
+ isolate, self, "column").ToHandleChecked();
if (!column->IsSmi()) {
return Message::kNoColumnInfo;
}
@@ -2404,7 +2182,8 @@ int StackFrame::GetScriptId() const {
ENTER_V8(isolate);
i::HandleScope scope(isolate);
i::Handle<i::JSObject> self = Utils::OpenHandle(this);
- i::Handle<i::Object> scriptId = GetProperty(self, "scriptId");
+ i::Handle<i::Object> scriptId = i::Object::GetProperty(
+ isolate, self, "scriptId").ToHandleChecked();
if (!scriptId->IsSmi()) {
return Message::kNoScriptIdInfo;
}
@@ -2417,7 +2196,8 @@ Local<String> StackFrame::GetScriptName() const {
ENTER_V8(isolate);
EscapableHandleScope scope(reinterpret_cast<Isolate*>(isolate));
i::Handle<i::JSObject> self = Utils::OpenHandle(this);
- i::Handle<i::Object> name = GetProperty(self, "scriptName");
+ i::Handle<i::Object> name = i::Object::GetProperty(
+ isolate, self, "scriptName").ToHandleChecked();
if (!name->IsString()) {
return Local<String>();
}
@@ -2430,7 +2210,8 @@ Local<String> StackFrame::GetScriptNameOrSourceURL() const {
ENTER_V8(isolate);
EscapableHandleScope scope(reinterpret_cast<Isolate*>(isolate));
i::Handle<i::JSObject> self = Utils::OpenHandle(this);
- i::Handle<i::Object> name = GetProperty(self, "scriptNameOrSourceURL");
+ i::Handle<i::Object> name = i::Object::GetProperty(
+ isolate, self, "scriptNameOrSourceURL").ToHandleChecked();
if (!name->IsString()) {
return Local<String>();
}
@@ -2443,7 +2224,8 @@ Local<String> StackFrame::GetFunctionName() const {
ENTER_V8(isolate);
EscapableHandleScope scope(reinterpret_cast<Isolate*>(isolate));
i::Handle<i::JSObject> self = Utils::OpenHandle(this);
- i::Handle<i::Object> name = GetProperty(self, "functionName");
+ i::Handle<i::Object> name = i::Object::GetProperty(
+ isolate, self, "functionName").ToHandleChecked();
if (!name->IsString()) {
return Local<String>();
}
@@ -2456,7 +2238,8 @@ bool StackFrame::IsEval() const {
ENTER_V8(isolate);
i::HandleScope scope(isolate);
i::Handle<i::JSObject> self = Utils::OpenHandle(this);
- i::Handle<i::Object> is_eval = GetProperty(self, "isEval");
+ i::Handle<i::Object> is_eval = i::Object::GetProperty(
+ isolate, self, "isEval").ToHandleChecked();
return is_eval->IsTrue();
}
@@ -2466,7 +2249,8 @@ bool StackFrame::IsConstructor() const {
ENTER_V8(isolate);
i::HandleScope scope(isolate);
i::Handle<i::JSObject> self = Utils::OpenHandle(this);
- i::Handle<i::Object> is_constructor = GetProperty(self, "isConstructor");
+ i::Handle<i::Object> is_constructor = i::Object::GetProperty(
+ isolate, self, "isConstructor").ToHandleChecked();
return is_constructor->IsTrue();
}
@@ -2474,20 +2258,18 @@ bool StackFrame::IsConstructor() const {
// --- J S O N ---
Local<Value> JSON::Parse(Local<String> json_string) {
- i::Isolate* isolate = i::Isolate::Current();
+ i::Handle<i::String> string = Utils::OpenHandle(*json_string);
+ i::Isolate* isolate = string->GetIsolate();
EnsureInitializedForIsolate(isolate, "v8::JSON::Parse");
ENTER_V8(isolate);
i::HandleScope scope(isolate);
- i::Handle<i::String> source = i::Handle<i::String>(
- FlattenGetString(Utils::OpenHandle(*json_string)));
+ i::Handle<i::String> source = i::String::Flatten(string);
EXCEPTION_PREAMBLE(isolate);
+ i::MaybeHandle<i::Object> maybe_result =
+ source->IsSeqOneByteString() ? i::JsonParser<true>::Parse(source)
+ : i::JsonParser<false>::Parse(source);
i::Handle<i::Object> result;
- if (source->IsSeqOneByteString()) {
- result = i::JsonParser<true>::Parse(source);
- } else {
- result = i::JsonParser<false>::Parse(source);
- }
- has_pending_exception = result.is_null();
+ has_pending_exception = !maybe_result.ToHandle(&result);
EXCEPTION_BAILOUT_CHECK(isolate, Local<Object>());
return Utils::ToLocal(
i::Handle<i::Object>::cast(scope.CloseAndEscape(result)));
@@ -2557,26 +2339,14 @@ bool Value::IsTypedArray() const {
}
-#define TYPED_ARRAY_LIST(F) \
-F(Uint8Array, kExternalUnsignedByteArray) \
-F(Int8Array, kExternalByteArray) \
-F(Uint16Array, kExternalUnsignedShortArray) \
-F(Int16Array, kExternalShortArray) \
-F(Uint32Array, kExternalUnsignedIntArray) \
-F(Int32Array, kExternalIntArray) \
-F(Float32Array, kExternalFloatArray) \
-F(Float64Array, kExternalDoubleArray) \
-F(Uint8ClampedArray, kExternalPixelArray)
-
-
-#define VALUE_IS_TYPED_ARRAY(TypedArray, type_const) \
- bool Value::Is##TypedArray() const { \
- i::Handle<i::Object> obj = Utils::OpenHandle(this); \
- if (!obj->IsJSTypedArray()) return false; \
- return i::JSTypedArray::cast(*obj)->type() == type_const; \
+#define VALUE_IS_TYPED_ARRAY(Type, typeName, TYPE, ctype, size) \
+ bool Value::Is##Type##Array() const { \
+ i::Handle<i::Object> obj = Utils::OpenHandle(this); \
+ return obj->IsJSTypedArray() && \
+ i::JSTypedArray::cast(*obj)->type() == kExternal##Type##Array; \
}
-TYPED_ARRAY_LIST(VALUE_IS_TYPED_ARRAY)
+TYPED_ARRAYS(VALUE_IS_TYPED_ARRAY)
#undef VALUE_IS_TYPED_ARRAY
@@ -2610,13 +2380,7 @@ bool Value::IsInt32() const {
i::Handle<i::Object> obj = Utils::OpenHandle(this);
if (obj->IsSmi()) return true;
if (obj->IsNumber()) {
- double value = obj->Number();
- static const i::DoubleRepresentation minus_zero(-0.0);
- i::DoubleRepresentation rep(value);
- if (rep.bits == minus_zero.bits) {
- return false;
- }
- return i::FastI2D(i::FastD2I(value)) == value;
+ return i::IsInt32Double(obj->Number());
}
return false;
}
@@ -2627,72 +2391,65 @@ bool Value::IsUint32() const {
if (obj->IsSmi()) return i::Smi::cast(*obj)->value() >= 0;
if (obj->IsNumber()) {
double value = obj->Number();
- static const i::DoubleRepresentation minus_zero(-0.0);
- i::DoubleRepresentation rep(value);
- if (rep.bits == minus_zero.bits) {
- return false;
- }
- return i::FastUI2D(i::FastD2UI(value)) == value;
+ return !i::IsMinusZero(value) &&
+ value >= 0 &&
+ value <= i::kMaxUInt32 &&
+ value == i::FastUI2D(i::FastD2UI(value));
}
return false;
}
bool Value::IsDate() const {
- i::Isolate* isolate = i::Isolate::Current();
i::Handle<i::Object> obj = Utils::OpenHandle(this);
+ if (!obj->IsHeapObject()) return false;
+ i::Isolate* isolate = i::HeapObject::cast(*obj)->GetIsolate();
return obj->HasSpecificClassOf(isolate->heap()->Date_string());
}
bool Value::IsStringObject() const {
- i::Isolate* isolate = i::Isolate::Current();
i::Handle<i::Object> obj = Utils::OpenHandle(this);
+ if (!obj->IsHeapObject()) return false;
+ i::Isolate* isolate = i::HeapObject::cast(*obj)->GetIsolate();
return obj->HasSpecificClassOf(isolate->heap()->String_string());
}
bool Value::IsSymbolObject() const {
- // TODO(svenpanne): these and other test functions should be written such
- // that they do not use Isolate::Current().
- i::Isolate* isolate = i::Isolate::Current();
i::Handle<i::Object> obj = Utils::OpenHandle(this);
+ if (!obj->IsHeapObject()) return false;
+ i::Isolate* isolate = i::HeapObject::cast(*obj)->GetIsolate();
return obj->HasSpecificClassOf(isolate->heap()->Symbol_string());
}
bool Value::IsNumberObject() const {
- i::Isolate* isolate = i::Isolate::Current();
i::Handle<i::Object> obj = Utils::OpenHandle(this);
+ if (!obj->IsHeapObject()) return false;
+ i::Isolate* isolate = i::HeapObject::cast(*obj)->GetIsolate();
return obj->HasSpecificClassOf(isolate->heap()->Number_string());
}
-static i::Object* LookupBuiltin(i::Isolate* isolate,
- const char* builtin_name) {
- i::Handle<i::String> string =
- isolate->factory()->InternalizeUtf8String(builtin_name);
- i::Handle<i::JSBuiltinsObject> builtins = isolate->js_builtins_object();
- return builtins->GetPropertyNoExceptionThrown(*string);
-}
-
-
static bool CheckConstructor(i::Isolate* isolate,
i::Handle<i::JSObject> obj,
const char* class_name) {
- i::Object* constr = obj->map()->constructor();
+ i::Handle<i::Object> constr(obj->map()->constructor(), isolate);
if (!constr->IsJSFunction()) return false;
- i::JSFunction* func = i::JSFunction::cast(constr);
- return func->shared()->native() &&
- constr == LookupBuiltin(isolate, class_name);
+ i::Handle<i::JSFunction> func = i::Handle<i::JSFunction>::cast(constr);
+ return func->shared()->native() && constr.is_identical_to(
+ i::Object::GetProperty(isolate,
+ isolate->js_builtins_object(),
+ class_name).ToHandleChecked());
}
bool Value::IsNativeError() const {
- i::Isolate* isolate = i::Isolate::Current();
i::Handle<i::Object> obj = Utils::OpenHandle(this);
if (obj->IsJSObject()) {
i::Handle<i::JSObject> js_obj(i::JSObject::cast(*obj));
+ i::Isolate* isolate = js_obj->GetIsolate();
return CheckConstructor(isolate, js_obj, "$Error") ||
CheckConstructor(isolate, js_obj, "$EvalError") ||
CheckConstructor(isolate, js_obj, "$RangeError") ||
@@ -2707,8 +2464,9 @@ bool Value::IsNativeError() const {
bool Value::IsBooleanObject() const {
- i::Isolate* isolate = i::Isolate::Current();
i::Handle<i::Object> obj = Utils::OpenHandle(this);
+ if (!obj->IsHeapObject()) return false;
+ i::Isolate* isolate = i::HeapObject::cast(*obj)->GetIsolate();
return obj->HasSpecificClassOf(isolate->heap()->Boolean_string());
}
@@ -2729,7 +2487,8 @@ Local<String> Value::ToString() const {
LOG_API(isolate, "ToString");
ENTER_V8(isolate);
EXCEPTION_PREAMBLE(isolate);
- str = i::Execution::ToString(isolate, obj, &has_pending_exception);
+ has_pending_exception = !i::Execution::ToString(
+ isolate, obj).ToHandle(&str);
EXCEPTION_BAILOUT_CHECK(isolate, Local<String>());
}
return ToApiHandle<String>(str);
@@ -2746,7 +2505,8 @@ Local<String> Value::ToDetailString() const {
LOG_API(isolate, "ToDetailString");
ENTER_V8(isolate);
EXCEPTION_PREAMBLE(isolate);
- str = i::Execution::ToDetailString(isolate, obj, &has_pending_exception);
+ has_pending_exception = !i::Execution::ToDetailString(
+ isolate, obj).ToHandle(&str);
EXCEPTION_BAILOUT_CHECK(isolate, Local<String>());
}
return ToApiHandle<String>(str);
@@ -2763,7 +2523,8 @@ Local<v8::Object> Value::ToObject() const {
LOG_API(isolate, "ToObject");
ENTER_V8(isolate);
EXCEPTION_PREAMBLE(isolate);
- val = i::Execution::ToObject(isolate, obj, &has_pending_exception);
+ has_pending_exception = !i::Execution::ToObject(
+ isolate, obj).ToHandle(&val);
EXCEPTION_BAILOUT_CHECK(isolate, Local<v8::Object>());
}
return ToApiHandle<Object>(val);
@@ -2791,11 +2552,12 @@ Local<Number> Value::ToNumber() const {
if (obj->IsNumber()) {
num = obj;
} else {
- i::Isolate* isolate = i::Isolate::Current();
+ i::Isolate* isolate = i::HeapObject::cast(*obj)->GetIsolate();
LOG_API(isolate, "ToNumber");
ENTER_V8(isolate);
EXCEPTION_PREAMBLE(isolate);
- num = i::Execution::ToNumber(isolate, obj, &has_pending_exception);
+ has_pending_exception = !i::Execution::ToNumber(
+ isolate, obj).ToHandle(&num);
EXCEPTION_BAILOUT_CHECK(isolate, Local<Number>());
}
return ToApiHandle<Number>(num);
@@ -2808,11 +2570,12 @@ Local<Integer> Value::ToInteger() const {
if (obj->IsSmi()) {
num = obj;
} else {
- i::Isolate* isolate = i::Isolate::Current();
+ i::Isolate* isolate = i::HeapObject::cast(*obj)->GetIsolate();
LOG_API(isolate, "ToInteger");
ENTER_V8(isolate);
EXCEPTION_PREAMBLE(isolate);
- num = i::Execution::ToInteger(isolate, obj, &has_pending_exception);
+ has_pending_exception = !i::Execution::ToInteger(
+ isolate, obj).ToHandle(&num);
EXCEPTION_BAILOUT_CHECK(isolate, Local<Integer>());
}
return ToApiHandle<Integer>(num);
@@ -2821,172 +2584,199 @@ Local<Integer> Value::ToInteger() const {
void i::Internals::CheckInitializedImpl(v8::Isolate* external_isolate) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(external_isolate);
- ApiCheck(isolate != NULL && isolate->IsInitialized() && !isolate->IsDead(),
- "v8::internal::Internals::CheckInitialized()",
- "Isolate is not initialized or V8 has died");
+ Utils::ApiCheck(isolate != NULL &&
+ isolate->IsInitialized() &&
+ !isolate->IsDead(),
+ "v8::internal::Internals::CheckInitialized()",
+ "Isolate is not initialized or V8 has died");
}
void External::CheckCast(v8::Value* that) {
- ApiCheck(Utils::OpenHandle(that)->IsExternal(),
- "v8::External::Cast()",
- "Could not convert to external");
+ Utils::ApiCheck(Utils::OpenHandle(that)->IsExternal(),
+ "v8::External::Cast()",
+ "Could not convert to external");
}
void v8::Object::CheckCast(Value* that) {
i::Handle<i::Object> obj = Utils::OpenHandle(that);
- ApiCheck(obj->IsJSObject(),
- "v8::Object::Cast()",
- "Could not convert to object");
+ Utils::ApiCheck(obj->IsJSObject(),
+ "v8::Object::Cast()",
+ "Could not convert to object");
}
void v8::Function::CheckCast(Value* that) {
i::Handle<i::Object> obj = Utils::OpenHandle(that);
- ApiCheck(obj->IsJSFunction(),
- "v8::Function::Cast()",
- "Could not convert to function");
+ Utils::ApiCheck(obj->IsJSFunction(),
+ "v8::Function::Cast()",
+ "Could not convert to function");
}
void v8::String::CheckCast(v8::Value* that) {
i::Handle<i::Object> obj = Utils::OpenHandle(that);
- ApiCheck(obj->IsString(),
- "v8::String::Cast()",
- "Could not convert to string");
+ Utils::ApiCheck(obj->IsString(),
+ "v8::String::Cast()",
+ "Could not convert to string");
}
void v8::Symbol::CheckCast(v8::Value* that) {
i::Handle<i::Object> obj = Utils::OpenHandle(that);
- ApiCheck(obj->IsSymbol(),
- "v8::Symbol::Cast()",
- "Could not convert to symbol");
+ Utils::ApiCheck(obj->IsSymbol(),
+ "v8::Symbol::Cast()",
+ "Could not convert to symbol");
}
void v8::Number::CheckCast(v8::Value* that) {
i::Handle<i::Object> obj = Utils::OpenHandle(that);
- ApiCheck(obj->IsNumber(),
- "v8::Number::Cast()",
- "Could not convert to number");
+ Utils::ApiCheck(obj->IsNumber(),
+ "v8::Number::Cast()",
+ "Could not convert to number");
}
void v8::Integer::CheckCast(v8::Value* that) {
i::Handle<i::Object> obj = Utils::OpenHandle(that);
- ApiCheck(obj->IsNumber(),
- "v8::Integer::Cast()",
- "Could not convert to number");
+ Utils::ApiCheck(obj->IsNumber(),
+ "v8::Integer::Cast()",
+ "Could not convert to number");
}
void v8::Array::CheckCast(Value* that) {
i::Handle<i::Object> obj = Utils::OpenHandle(that);
- ApiCheck(obj->IsJSArray(),
- "v8::Array::Cast()",
- "Could not convert to array");
+ Utils::ApiCheck(obj->IsJSArray(),
+ "v8::Array::Cast()",
+ "Could not convert to array");
+}
+
+
+void v8::Promise::CheckCast(Value* that) {
+ Utils::ApiCheck(that->IsPromise(),
+ "v8::Promise::Cast()",
+ "Could not convert to promise");
+}
+
+
+void v8::Promise::Resolver::CheckCast(Value* that) {
+ Utils::ApiCheck(that->IsPromise(),
+ "v8::Promise::Resolver::Cast()",
+ "Could not convert to promise resolver");
}
void v8::ArrayBuffer::CheckCast(Value* that) {
i::Handle<i::Object> obj = Utils::OpenHandle(that);
- ApiCheck(obj->IsJSArrayBuffer(),
- "v8::ArrayBuffer::Cast()",
- "Could not convert to ArrayBuffer");
+ Utils::ApiCheck(obj->IsJSArrayBuffer(),
+ "v8::ArrayBuffer::Cast()",
+ "Could not convert to ArrayBuffer");
}
void v8::ArrayBufferView::CheckCast(Value* that) {
i::Handle<i::Object> obj = Utils::OpenHandle(that);
- ApiCheck(obj->IsJSArrayBufferView(),
- "v8::ArrayBufferView::Cast()",
- "Could not convert to ArrayBufferView");
+ Utils::ApiCheck(obj->IsJSArrayBufferView(),
+ "v8::ArrayBufferView::Cast()",
+ "Could not convert to ArrayBufferView");
}
void v8::TypedArray::CheckCast(Value* that) {
i::Handle<i::Object> obj = Utils::OpenHandle(that);
- ApiCheck(obj->IsJSTypedArray(),
- "v8::TypedArray::Cast()",
- "Could not convert to TypedArray");
+ Utils::ApiCheck(obj->IsJSTypedArray(),
+ "v8::TypedArray::Cast()",
+ "Could not convert to TypedArray");
}
-#define CHECK_TYPED_ARRAY_CAST(ApiClass, typeConst) \
- void v8::ApiClass::CheckCast(Value* that) { \
- i::Handle<i::Object> obj = Utils::OpenHandle(that); \
- ApiCheck(obj->IsJSTypedArray() && \
- i::JSTypedArray::cast(*obj)->type() == typeConst, \
- "v8::" #ApiClass "::Cast()", \
- "Could not convert to " #ApiClass); \
+#define CHECK_TYPED_ARRAY_CAST(Type, typeName, TYPE, ctype, size) \
+ void v8::Type##Array::CheckCast(Value* that) { \
+ i::Handle<i::Object> obj = Utils::OpenHandle(that); \
+ Utils::ApiCheck(obj->IsJSTypedArray() && \
+ i::JSTypedArray::cast(*obj)->type() == \
+ kExternal##Type##Array, \
+ "v8::" #Type "Array::Cast()", \
+ "Could not convert to " #Type "Array"); \
}
-TYPED_ARRAY_LIST(CHECK_TYPED_ARRAY_CAST)
+TYPED_ARRAYS(CHECK_TYPED_ARRAY_CAST)
#undef CHECK_TYPED_ARRAY_CAST
void v8::DataView::CheckCast(Value* that) {
i::Handle<i::Object> obj = Utils::OpenHandle(that);
- ApiCheck(obj->IsJSDataView(),
- "v8::DataView::Cast()",
- "Could not convert to DataView");
+ Utils::ApiCheck(obj->IsJSDataView(),
+ "v8::DataView::Cast()",
+ "Could not convert to DataView");
}
void v8::Date::CheckCast(v8::Value* that) {
- i::Isolate* isolate = i::Isolate::Current();
i::Handle<i::Object> obj = Utils::OpenHandle(that);
- ApiCheck(obj->HasSpecificClassOf(isolate->heap()->Date_string()),
- "v8::Date::Cast()",
- "Could not convert to date");
+ i::Isolate* isolate = NULL;
+ if (obj->IsHeapObject()) isolate = i::HeapObject::cast(*obj)->GetIsolate();
+ Utils::ApiCheck(isolate != NULL &&
+ obj->HasSpecificClassOf(isolate->heap()->Date_string()),
+ "v8::Date::Cast()",
+ "Could not convert to date");
}
void v8::StringObject::CheckCast(v8::Value* that) {
- i::Isolate* isolate = i::Isolate::Current();
i::Handle<i::Object> obj = Utils::OpenHandle(that);
- ApiCheck(obj->HasSpecificClassOf(isolate->heap()->String_string()),
- "v8::StringObject::Cast()",
- "Could not convert to StringObject");
+ i::Isolate* isolate = NULL;
+ if (obj->IsHeapObject()) isolate = i::HeapObject::cast(*obj)->GetIsolate();
+ Utils::ApiCheck(isolate != NULL &&
+ obj->HasSpecificClassOf(isolate->heap()->String_string()),
+ "v8::StringObject::Cast()",
+ "Could not convert to StringObject");
}
void v8::SymbolObject::CheckCast(v8::Value* that) {
- i::Isolate* isolate = i::Isolate::Current();
i::Handle<i::Object> obj = Utils::OpenHandle(that);
- ApiCheck(obj->HasSpecificClassOf(isolate->heap()->Symbol_string()),
- "v8::SymbolObject::Cast()",
- "Could not convert to SymbolObject");
+ i::Isolate* isolate = NULL;
+ if (obj->IsHeapObject()) isolate = i::HeapObject::cast(*obj)->GetIsolate();
+ Utils::ApiCheck(isolate != NULL &&
+ obj->HasSpecificClassOf(isolate->heap()->Symbol_string()),
+ "v8::SymbolObject::Cast()",
+ "Could not convert to SymbolObject");
}
void v8::NumberObject::CheckCast(v8::Value* that) {
- i::Isolate* isolate = i::Isolate::Current();
i::Handle<i::Object> obj = Utils::OpenHandle(that);
- ApiCheck(obj->HasSpecificClassOf(isolate->heap()->Number_string()),
- "v8::NumberObject::Cast()",
- "Could not convert to NumberObject");
+ i::Isolate* isolate = NULL;
+ if (obj->IsHeapObject()) isolate = i::HeapObject::cast(*obj)->GetIsolate();
+ Utils::ApiCheck(isolate != NULL &&
+ obj->HasSpecificClassOf(isolate->heap()->Number_string()),
+ "v8::NumberObject::Cast()",
+ "Could not convert to NumberObject");
}
void v8::BooleanObject::CheckCast(v8::Value* that) {
- i::Isolate* isolate = i::Isolate::Current();
i::Handle<i::Object> obj = Utils::OpenHandle(that);
- ApiCheck(obj->HasSpecificClassOf(isolate->heap()->Boolean_string()),
- "v8::BooleanObject::Cast()",
- "Could not convert to BooleanObject");
+ i::Isolate* isolate = NULL;
+ if (obj->IsHeapObject()) isolate = i::HeapObject::cast(*obj)->GetIsolate();
+ Utils::ApiCheck(isolate != NULL &&
+ obj->HasSpecificClassOf(isolate->heap()->Boolean_string()),
+ "v8::BooleanObject::Cast()",
+ "Could not convert to BooleanObject");
}
void v8::RegExp::CheckCast(v8::Value* that) {
i::Handle<i::Object> obj = Utils::OpenHandle(that);
- ApiCheck(obj->IsJSRegExp(),
- "v8::RegExp::Cast()",
- "Could not convert to regular expression");
+ Utils::ApiCheck(obj->IsJSRegExp(),
+ "v8::RegExp::Cast()",
+ "Could not convert to regular expression");
}
@@ -3001,11 +2791,12 @@ double Value::NumberValue() const {
if (obj->IsNumber()) {
num = obj;
} else {
- i::Isolate* isolate = i::Isolate::Current();
+ i::Isolate* isolate = i::HeapObject::cast(*obj)->GetIsolate();
LOG_API(isolate, "NumberValue");
ENTER_V8(isolate);
EXCEPTION_PREAMBLE(isolate);
- num = i::Execution::ToNumber(isolate, obj, &has_pending_exception);
+ has_pending_exception = !i::Execution::ToNumber(
+ isolate, obj).ToHandle(&num);
EXCEPTION_BAILOUT_CHECK(isolate, i::OS::nan_value());
}
return num->Number();
@@ -3018,11 +2809,12 @@ int64_t Value::IntegerValue() const {
if (obj->IsNumber()) {
num = obj;
} else {
- i::Isolate* isolate = i::Isolate::Current();
+ i::Isolate* isolate = i::HeapObject::cast(*obj)->GetIsolate();
LOG_API(isolate, "IntegerValue");
ENTER_V8(isolate);
EXCEPTION_PREAMBLE(isolate);
- num = i::Execution::ToInteger(isolate, obj, &has_pending_exception);
+ has_pending_exception = !i::Execution::ToInteger(
+ isolate, obj).ToHandle(&num);
EXCEPTION_BAILOUT_CHECK(isolate, 0);
}
if (num->IsSmi()) {
@@ -3039,11 +2831,11 @@ Local<Int32> Value::ToInt32() const {
if (obj->IsSmi()) {
num = obj;
} else {
- i::Isolate* isolate = i::Isolate::Current();
+ i::Isolate* isolate = i::HeapObject::cast(*obj)->GetIsolate();
LOG_API(isolate, "ToInt32");
ENTER_V8(isolate);
EXCEPTION_PREAMBLE(isolate);
- num = i::Execution::ToInt32(isolate, obj, &has_pending_exception);
+ has_pending_exception = !i::Execution::ToInt32(isolate, obj).ToHandle(&num);
EXCEPTION_BAILOUT_CHECK(isolate, Local<Int32>());
}
return ToApiHandle<Int32>(num);
@@ -3056,11 +2848,12 @@ Local<Uint32> Value::ToUint32() const {
if (obj->IsSmi()) {
num = obj;
} else {
- i::Isolate* isolate = i::Isolate::Current();
+ i::Isolate* isolate = i::HeapObject::cast(*obj)->GetIsolate();
LOG_API(isolate, "ToUInt32");
ENTER_V8(isolate);
EXCEPTION_PREAMBLE(isolate);
- num = i::Execution::ToUint32(isolate, obj, &has_pending_exception);
+ has_pending_exception = !i::Execution::ToUint32(
+ isolate, obj).ToHandle(&num);
EXCEPTION_BAILOUT_CHECK(isolate, Local<Uint32>());
}
return ToApiHandle<Uint32>(num);
@@ -3073,12 +2866,13 @@ Local<Uint32> Value::ToArrayIndex() const {
if (i::Smi::cast(*obj)->value() >= 0) return Utils::Uint32ToLocal(obj);
return Local<Uint32>();
}
- i::Isolate* isolate = i::Isolate::Current();
+ i::Isolate* isolate = i::HeapObject::cast(*obj)->GetIsolate();
LOG_API(isolate, "ToArrayIndex");
ENTER_V8(isolate);
EXCEPTION_PREAMBLE(isolate);
- i::Handle<i::Object> string_obj =
- i::Execution::ToString(isolate, obj, &has_pending_exception);
+ i::Handle<i::Object> string_obj;
+ has_pending_exception = !i::Execution::ToString(
+ isolate, obj).ToHandle(&string_obj);
EXCEPTION_BAILOUT_CHECK(isolate, Local<Uint32>());
i::Handle<i::String> str = i::Handle<i::String>::cast(string_obj);
uint32_t index;
@@ -3100,12 +2894,12 @@ int32_t Value::Int32Value() const {
if (obj->IsSmi()) {
return i::Smi::cast(*obj)->value();
} else {
- i::Isolate* isolate = i::Isolate::Current();
+ i::Isolate* isolate = i::HeapObject::cast(*obj)->GetIsolate();
LOG_API(isolate, "Int32Value (slow)");
ENTER_V8(isolate);
EXCEPTION_PREAMBLE(isolate);
- i::Handle<i::Object> num =
- i::Execution::ToInt32(isolate, obj, &has_pending_exception);
+ i::Handle<i::Object> num;
+ has_pending_exception = !i::Execution::ToInt32(isolate, obj).ToHandle(&num);
EXCEPTION_BAILOUT_CHECK(isolate, 0);
if (num->IsSmi()) {
return i::Smi::cast(*num)->value();
@@ -3118,13 +2912,14 @@ int32_t Value::Int32Value() const {
bool Value::Equals(Handle<Value> that) const {
i::Isolate* isolate = i::Isolate::Current();
- if (EmptyCheck("v8::Value::Equals()", this) ||
- EmptyCheck("v8::Value::Equals()", that)) {
+ i::Handle<i::Object> obj = Utils::OpenHandle(this, true);
+ if (!Utils::ApiCheck(!obj.is_null() && !that.IsEmpty(),
+ "v8::Value::Equals()",
+ "Reading from empty handle")) {
return false;
}
LOG_API(isolate, "Equals");
ENTER_V8(isolate);
- i::Handle<i::Object> obj = Utils::OpenHandle(this);
i::Handle<i::Object> other = Utils::OpenHandle(*that);
// If both obj and other are JSObjects, we'd better compare by identity
// immediately when going into JS builtin. The reason is Invoke
@@ -3134,9 +2929,9 @@ bool Value::Equals(Handle<Value> that) const {
}
i::Handle<i::Object> args[] = { other };
EXCEPTION_PREAMBLE(isolate);
- i::Handle<i::Object> result =
- CallV8HeapFunction("EQUALS", obj, ARRAY_SIZE(args), args,
- &has_pending_exception);
+ i::Handle<i::Object> result;
+ has_pending_exception = !CallV8HeapFunction(
+ "EQUALS", obj, ARRAY_SIZE(args), args).ToHandle(&result);
EXCEPTION_BAILOUT_CHECK(isolate, false);
return *result == i::Smi::FromInt(i::EQUAL);
}
@@ -3144,12 +2939,13 @@ bool Value::Equals(Handle<Value> that) const {
bool Value::StrictEquals(Handle<Value> that) const {
i::Isolate* isolate = i::Isolate::Current();
- if (EmptyCheck("v8::Value::StrictEquals()", this) ||
- EmptyCheck("v8::Value::StrictEquals()", that)) {
+ i::Handle<i::Object> obj = Utils::OpenHandle(this, true);
+ if (!Utils::ApiCheck(!obj.is_null() && !that.IsEmpty(),
+ "v8::Value::StrictEquals()",
+ "Reading from empty handle")) {
return false;
}
LOG_API(isolate, "StrictEquals");
- i::Handle<i::Object> obj = Utils::OpenHandle(this);
i::Handle<i::Object> other = Utils::OpenHandle(*that);
// Must check HeapNumber first, since NaN !== NaN.
if (obj->IsHeapNumber()) {
@@ -3164,7 +2960,8 @@ bool Value::StrictEquals(Handle<Value> that) const {
return other->IsNumber() && obj->Number() == other->Number();
} else if (obj->IsString()) {
return other->IsString() &&
- i::String::cast(*obj)->Equals(i::String::cast(*other));
+ i::String::Equals(i::Handle<i::String>::cast(obj),
+ i::Handle<i::String>::cast(other));
} else if (obj->IsUndefined() || obj->IsUndetectableObject()) {
return other->IsUndefined() || other->IsUndetectableObject();
} else {
@@ -3174,13 +2971,12 @@ bool Value::StrictEquals(Handle<Value> that) const {
bool Value::SameValue(Handle<Value> that) const {
- i::Isolate* isolate = i::Isolate::Current();
- if (EmptyCheck("v8::Value::SameValue()", this) ||
- EmptyCheck("v8::Value::SameValue()", that)) {
+ i::Handle<i::Object> obj = Utils::OpenHandle(this, true);
+ if (!Utils::ApiCheck(!obj.is_null() && !that.IsEmpty(),
+ "v8::Value::SameValue()",
+ "Reading from empty handle")) {
return false;
}
- LOG_API(isolate, "SameValue");
- i::Handle<i::Object> obj = Utils::OpenHandle(this);
i::Handle<i::Object> other = Utils::OpenHandle(*that);
return obj->SameValue(*other);
}
@@ -3191,12 +2987,13 @@ uint32_t Value::Uint32Value() const {
if (obj->IsSmi()) {
return i::Smi::cast(*obj)->value();
} else {
- i::Isolate* isolate = i::Isolate::Current();
+ i::Isolate* isolate = i::HeapObject::cast(*obj)->GetIsolate();
LOG_API(isolate, "Uint32Value");
ENTER_V8(isolate);
EXCEPTION_PREAMBLE(isolate);
- i::Handle<i::Object> num =
- i::Execution::ToUint32(isolate, obj, &has_pending_exception);
+ i::Handle<i::Object> num;
+ has_pending_exception = !i::Execution::ToUint32(
+ isolate, obj).ToHandle(&num);
EXCEPTION_BAILOUT_CHECK(isolate, 0);
if (num->IsSmi()) {
return i::Smi::cast(*num)->value();
@@ -3217,14 +3014,13 @@ bool v8::Object::Set(v8::Handle<Value> key, v8::Handle<Value> value,
i::Handle<i::Object> key_obj = Utils::OpenHandle(*key);
i::Handle<i::Object> value_obj = Utils::OpenHandle(*value);
EXCEPTION_PREAMBLE(isolate);
- i::Handle<i::Object> obj = i::Runtime::SetObjectProperty(
+ has_pending_exception = i::Runtime::SetObjectProperty(
isolate,
self,
key_obj,
value_obj,
static_cast<PropertyAttributes>(attribs),
- i::kNonStrictMode);
- has_pending_exception = obj.is_null();
+ i::SLOPPY).is_null();
EXCEPTION_BAILOUT_CHECK(isolate, false);
return true;
}
@@ -3238,13 +3034,8 @@ bool v8::Object::Set(uint32_t index, v8::Handle<Value> value) {
i::Handle<i::JSObject> self = Utils::OpenHandle(this);
i::Handle<i::Object> value_obj = Utils::OpenHandle(*value);
EXCEPTION_PREAMBLE(isolate);
- i::Handle<i::Object> obj = i::JSObject::SetElement(
- self,
- index,
- value_obj,
- NONE,
- i::kNonStrictMode);
- has_pending_exception = obj.is_null();
+ has_pending_exception = i::JSObject::SetElement(
+ self, index, value_obj, NONE, i::SLOPPY).is_null();
EXCEPTION_BAILOUT_CHECK(isolate, false);
return true;
}
@@ -3261,20 +3052,19 @@ bool v8::Object::ForceSet(v8::Handle<Value> key,
i::Handle<i::Object> key_obj = Utils::OpenHandle(*key);
i::Handle<i::Object> value_obj = Utils::OpenHandle(*value);
EXCEPTION_PREAMBLE(isolate);
- i::Handle<i::Object> obj = i::ForceSetProperty(
+ has_pending_exception = i::Runtime::ForceSetObjectProperty(
self,
key_obj,
value_obj,
- static_cast<PropertyAttributes>(attribs));
- has_pending_exception = obj.is_null();
+ static_cast<PropertyAttributes>(attribs)).is_null();
EXCEPTION_BAILOUT_CHECK(isolate, false);
return true;
}
bool v8::Object::SetPrivate(v8::Handle<Private> key, v8::Handle<Value> value) {
- return Set(v8::Handle<Value>(reinterpret_cast<Value*>(*key)),
- value, DontEnum);
+ return ForceSet(v8::Handle<Value>(reinterpret_cast<Value*>(*key)),
+ value, DontEnum);
}
@@ -3295,8 +3085,9 @@ bool v8::Object::ForceDelete(v8::Handle<Value> key) {
}
EXCEPTION_PREAMBLE(isolate);
- i::Handle<i::Object> obj = i::ForceDeleteProperty(self, key_obj);
- has_pending_exception = obj.is_null();
+ i::Handle<i::Object> obj;
+ has_pending_exception = !i::Runtime::DeleteObjectProperty(
+ isolate, self, key_obj, i::JSReceiver::FORCE_DELETION).ToHandle(&obj);
EXCEPTION_BAILOUT_CHECK(isolate, false);
return obj->IsTrue();
}
@@ -3309,8 +3100,9 @@ Local<Value> v8::Object::Get(v8::Handle<Value> key) {
i::Handle<i::Object> self = Utils::OpenHandle(this);
i::Handle<i::Object> key_obj = Utils::OpenHandle(*key);
EXCEPTION_PREAMBLE(isolate);
- i::Handle<i::Object> result = i::GetProperty(isolate, self, key_obj);
- has_pending_exception = result.is_null();
+ i::Handle<i::Object> result;
+ has_pending_exception =
+ !i::Runtime::GetObjectProperty(isolate, self, key_obj).ToHandle(&result);
EXCEPTION_BAILOUT_CHECK(isolate, Local<Value>());
return Utils::ToLocal(result);
}
@@ -3322,8 +3114,9 @@ Local<Value> v8::Object::Get(uint32_t index) {
ENTER_V8(isolate);
i::Handle<i::JSObject> self = Utils::OpenHandle(this);
EXCEPTION_PREAMBLE(isolate);
- i::Handle<i::Object> result = i::Object::GetElement(isolate, self, index);
- has_pending_exception = result.is_null();
+ i::Handle<i::Object> result;
+ has_pending_exception =
+ !i::Object::GetElement(isolate, self, index).ToHandle(&result);
EXCEPTION_BAILOUT_CHECK(isolate, Local<Value>());
return Utils::ToLocal(result);
}
@@ -3336,7 +3129,7 @@ Local<Value> v8::Object::GetPrivate(v8::Handle<Private> key) {
PropertyAttribute v8::Object::GetPropertyAttributes(v8::Handle<Value> key) {
i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- ON_BAILOUT(isolate, "v8::Object::GetPropertyAttribute()",
+ ON_BAILOUT(isolate, "v8::Object::GetPropertyAttributes()",
return static_cast<PropertyAttribute>(NONE));
ENTER_V8(isolate);
i::HandleScope scope(isolate);
@@ -3344,11 +3137,13 @@ PropertyAttribute v8::Object::GetPropertyAttributes(v8::Handle<Value> key) {
i::Handle<i::Object> key_obj = Utils::OpenHandle(*key);
if (!key_obj->IsName()) {
EXCEPTION_PREAMBLE(isolate);
- key_obj = i::Execution::ToString(isolate, key_obj, &has_pending_exception);
+ has_pending_exception = !i::Execution::ToString(
+ isolate, key_obj).ToHandle(&key_obj);
EXCEPTION_BAILOUT_CHECK(isolate, static_cast<PropertyAttribute>(NONE));
}
i::Handle<i::Name> key_name = i::Handle<i::Name>::cast(key_obj);
- PropertyAttributes result = self->GetPropertyAttribute(*key_name);
+ PropertyAttributes result =
+ i::JSReceiver::GetPropertyAttributes(self, key_name);
if (result == ABSENT) return static_cast<PropertyAttribute>(NONE);
return static_cast<PropertyAttribute>(result);
}
@@ -3356,8 +3151,7 @@ PropertyAttribute v8::Object::GetPropertyAttributes(v8::Handle<Value> key) {
Local<Value> v8::Object::GetPrototype() {
i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- ON_BAILOUT(isolate, "v8::Object::GetPrototype()",
- return Local<v8::Value>());
+ ON_BAILOUT(isolate, "v8::Object::GetPrototype()", return Local<v8::Value>());
ENTER_V8(isolate);
i::Handle<i::Object> self = Utils::OpenHandle(this);
i::Handle<i::Object> result(self->GetPrototype(isolate), isolate);
@@ -3375,7 +3169,8 @@ bool v8::Object::SetPrototype(Handle<Value> value) {
// to propagate outside.
TryCatch try_catch;
EXCEPTION_PREAMBLE(isolate);
- i::Handle<i::Object> result = i::JSObject::SetPrototype(self, value_obj);
+ i::MaybeHandle<i::Object> result = i::JSObject::SetPrototype(
+ self, value_obj);
has_pending_exception = result.is_null();
EXCEPTION_BAILOUT_CHECK(isolate, false);
return true;
@@ -3407,10 +3202,11 @@ Local<Array> v8::Object::GetPropertyNames() {
ENTER_V8(isolate);
i::HandleScope scope(isolate);
i::Handle<i::JSObject> self = Utils::OpenHandle(this);
- bool threw = false;
- i::Handle<i::FixedArray> value =
- i::GetKeysInFixedArrayFor(self, i::INCLUDE_PROTOS, &threw);
- if (threw) return Local<v8::Array>();
+ EXCEPTION_PREAMBLE(isolate);
+ i::Handle<i::FixedArray> value;
+ has_pending_exception = !i::JSReceiver::GetKeys(
+ self, i::JSReceiver::INCLUDE_PROTOS).ToHandle(&value);
+ EXCEPTION_BAILOUT_CHECK(isolate, Local<v8::Array>());
// Because we use caching to speed up enumeration it is important
// to never change the result of the basic enumeration function so
// we clone the result.
@@ -3428,10 +3224,11 @@ Local<Array> v8::Object::GetOwnPropertyNames() {
ENTER_V8(isolate);
i::HandleScope scope(isolate);
i::Handle<i::JSObject> self = Utils::OpenHandle(this);
- bool threw = false;
- i::Handle<i::FixedArray> value =
- i::GetKeysInFixedArrayFor(self, i::LOCAL_ONLY, &threw);
- if (threw) return Local<v8::Array>();
+ EXCEPTION_PREAMBLE(isolate);
+ i::Handle<i::FixedArray> value;
+ has_pending_exception = !i::JSReceiver::GetKeys(
+ self, i::JSReceiver::OWN_ONLY).ToHandle(&value);
+ EXCEPTION_BAILOUT_CHECK(isolate, Local<v8::Array>());
// Because we use caching to speed up enumeration it is important
// to never change the result of the basic enumeration function so
// we clone the result.
@@ -3477,7 +3274,7 @@ Local<String> v8::Object::ObjectProtoToString() {
// Write prefix.
char* ptr = buf.start();
- i::OS::MemCopy(ptr, prefix, prefix_len * v8::internal::kCharSize);
+ i::MemCopy(ptr, prefix, prefix_len * v8::internal::kCharSize);
ptr += prefix_len;
// Write real content.
@@ -3485,7 +3282,7 @@ Local<String> v8::Object::ObjectProtoToString() {
ptr += str_len;
// Write postfix.
- i::OS::MemCopy(ptr, postfix, postfix_len * v8::internal::kCharSize);
+ i::MemCopy(ptr, postfix, postfix_len * v8::internal::kCharSize);
// Copy the buffer into a heap-allocated string and return it.
Local<String> result = v8::String::NewFromUtf8(
@@ -3526,8 +3323,9 @@ bool v8::Object::Delete(v8::Handle<Value> key) {
i::Handle<i::JSObject> self = Utils::OpenHandle(this);
i::Handle<i::Object> key_obj = Utils::OpenHandle(*key);
EXCEPTION_PREAMBLE(isolate);
- i::Handle<i::Object> obj = i::DeleteProperty(self, key_obj);
- has_pending_exception = obj.is_null();
+ i::Handle<i::Object> obj;
+ has_pending_exception = !i::Runtime::DeleteObjectProperty(
+ isolate, self, key_obj, i::JSReceiver::NORMAL_DELETION).ToHandle(&obj);
EXCEPTION_BAILOUT_CHECK(isolate, false);
return obj->IsTrue();
}
@@ -3545,14 +3343,17 @@ bool v8::Object::Has(v8::Handle<Value> key) {
i::Handle<i::JSReceiver> self = Utils::OpenHandle(this);
i::Handle<i::Object> key_obj = Utils::OpenHandle(*key);
EXCEPTION_PREAMBLE(isolate);
- i::Handle<i::Object> obj = i::HasProperty(self, key_obj);
- has_pending_exception = obj.is_null();
+ i::Handle<i::Object> obj;
+ has_pending_exception = !i::Runtime::HasObjectProperty(
+ isolate, self, key_obj).ToHandle(&obj);
EXCEPTION_BAILOUT_CHECK(isolate, false);
return obj->IsTrue();
}
bool v8::Object::HasPrivate(v8::Handle<Private> key) {
+ // TODO(rossberg): this should use HasOwnProperty, but we'd need to
+ // generalise that to a (noy yet existant) Name argument first.
return Has(v8::Handle<Value>(reinterpret_cast<Value*>(*key)));
}
@@ -3564,7 +3365,13 @@ bool v8::Object::Delete(uint32_t index) {
ENTER_V8(isolate);
HandleScope scope(reinterpret_cast<Isolate*>(isolate));
i::Handle<i::JSObject> self = Utils::OpenHandle(this);
- return i::JSReceiver::DeleteElement(self, index)->IsTrue();
+
+ EXCEPTION_PREAMBLE(isolate);
+ i::Handle<i::Object> obj;
+ has_pending_exception =
+ !i::JSReceiver::DeleteElement(self, index).ToHandle(&obj);
+ EXCEPTION_BAILOUT_CHECK(isolate, false);
+ return obj->IsTrue();
}
@@ -3593,9 +3400,12 @@ static inline bool ObjectSetAccessor(Object* obj,
name, getter, setter, data, settings, attributes, signature);
if (info.is_null()) return false;
bool fast = Utils::OpenHandle(obj)->HasFastProperties();
- i::Handle<i::Object> result =
- i::JSObject::SetAccessor(Utils::OpenHandle(obj), info);
- if (result.is_null() || result->IsUndefined()) return false;
+ i::Handle<i::Object> result;
+ ASSIGN_RETURN_ON_EXCEPTION_VALUE(
+ isolate, result,
+ i::JSObject::SetAccessor(Utils::OpenHandle(obj), info),
+ false);
+ if (result->IsUndefined()) return false;
if (fast) i::JSObject::TransformToFastProperties(Utils::OpenHandle(obj), 0);
return true;
}
@@ -3622,11 +3432,32 @@ bool Object::SetDeclaredAccessor(Local<String> name,
}
+void Object::SetAccessorProperty(Local<String> name,
+ Local<Function> getter,
+ Handle<Function> setter,
+ PropertyAttribute attribute,
+ AccessControl settings) {
+ i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
+ ON_BAILOUT(isolate, "v8::Object::SetAccessorProperty()", return);
+ ENTER_V8(isolate);
+ i::HandleScope scope(isolate);
+ i::Handle<i::Object> getter_i = v8::Utils::OpenHandle(*getter);
+ i::Handle<i::Object> setter_i = v8::Utils::OpenHandle(*setter, true);
+ if (setter_i.is_null()) setter_i = isolate->factory()->null_value();
+ i::JSObject::DefineAccessor(v8::Utils::OpenHandle(this),
+ v8::Utils::OpenHandle(*name),
+ getter_i,
+ setter_i,
+ static_cast<PropertyAttributes>(attribute),
+ settings);
+}
+
+
bool v8::Object::HasOwnProperty(Handle<String> key) {
i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
ON_BAILOUT(isolate, "v8::Object::HasOwnProperty()",
return false);
- return i::JSReceiver::HasLocalProperty(
+ return i::JSReceiver::HasOwnProperty(
Utils::OpenHandle(this), Utils::OpenHandle(*key));
}
@@ -3687,11 +3518,11 @@ static Local<Value> GetPropertyByLookup(i::Isolate* isolate,
// If the property being looked up is a callback, it can throw
// an exception.
EXCEPTION_PREAMBLE(isolate);
- PropertyAttributes ignored;
- i::Handle<i::Object> result =
- i::Object::GetProperty(receiver, receiver, lookup, name,
- &ignored);
- has_pending_exception = result.is_null();
+ i::LookupIterator it(
+ receiver, name, i::Handle<i::JSReceiver>(lookup->holder(), isolate),
+ i::LookupIterator::SKIP_INTERCEPTOR);
+ i::Handle<i::Object> result;
+ has_pending_exception = !i::Object::GetProperty(&it).ToHandle(&result);
EXCEPTION_BAILOUT_CHECK(isolate, Local<Value>());
return Utils::ToLocal(result);
@@ -3699,7 +3530,7 @@ static Local<Value> GetPropertyByLookup(i::Isolate* isolate,
Local<Value> v8::Object::GetRealNamedPropertyInPrototypeChain(
- Handle<String> key) {
+ Handle<String> key) {
i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
ON_BAILOUT(isolate,
"v8::Object::GetRealNamedPropertyInPrototypeChain()",
@@ -3708,7 +3539,7 @@ Local<Value> v8::Object::GetRealNamedPropertyInPrototypeChain(
i::Handle<i::JSObject> self_obj = Utils::OpenHandle(this);
i::Handle<i::String> key_obj = Utils::OpenHandle(*key);
i::LookupResult lookup(isolate);
- self_obj->LookupRealNamedPropertyInPrototypes(*key_obj, &lookup);
+ self_obj->LookupRealNamedPropertyInPrototypes(key_obj, &lookup);
return GetPropertyByLookup(isolate, self_obj, key_obj, &lookup);
}
@@ -3721,7 +3552,7 @@ Local<Value> v8::Object::GetRealNamedProperty(Handle<String> key) {
i::Handle<i::JSObject> self_obj = Utils::OpenHandle(this);
i::Handle<i::String> key_obj = Utils::OpenHandle(*key);
i::LookupResult lookup(isolate);
- self_obj->LookupRealNamedProperty(*key_obj, &lookup);
+ self_obj->LookupRealNamedProperty(key_obj, &lookup);
return GetPropertyByLookup(isolate, self_obj, key_obj, &lookup);
}
@@ -3740,8 +3571,7 @@ void v8::Object::TurnOnAccessCheck() {
// as optimized code does not always handle access checks.
i::Deoptimizer::DeoptimizeGlobalObject(*obj);
- i::Handle<i::Map> new_map =
- isolate->factory()->CopyMap(i::Handle<i::Map>(obj->map()));
+ i::Handle<i::Map> new_map = i::Map::Copy(i::Handle<i::Map>(obj->map()));
new_map->set_is_access_check_needed(true);
obj->set_map(*new_map);
}
@@ -3758,35 +3588,20 @@ Local<v8::Object> v8::Object::Clone() {
ENTER_V8(isolate);
i::Handle<i::JSObject> self = Utils::OpenHandle(this);
EXCEPTION_PREAMBLE(isolate);
- i::Handle<i::JSObject> result = i::JSObject::Copy(self);
+ i::Handle<i::JSObject> result = isolate->factory()->CopyJSObject(self);
has_pending_exception = result.is_null();
EXCEPTION_BAILOUT_CHECK(isolate, Local<Object>());
return Utils::ToLocal(result);
}
-static i::Context* GetCreationContext(i::JSObject* object) {
- i::Object* constructor = object->map()->constructor();
- i::JSFunction* function;
- if (!constructor->IsJSFunction()) {
- // Functions have null as a constructor,
- // but any JSFunction knows its context immediately.
- ASSERT(object->IsJSFunction());
- function = i::JSFunction::cast(object);
- } else {
- function = i::JSFunction::cast(constructor);
- }
- return function->context()->native_context();
-}
-
-
Local<v8::Context> v8::Object::CreationContext() {
i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
ON_BAILOUT(isolate,
"v8::Object::CreationContext()", return Local<v8::Context>());
ENTER_V8(isolate);
i::Handle<i::JSObject> self = Utils::OpenHandle(this);
- i::Context* context = GetCreationContext(*self);
+ i::Context* context = self->GetCreationContext();
return Utils::ToLocal(i::Handle<i::Context>(context));
}
@@ -3797,8 +3612,7 @@ int v8::Object::GetIdentityHash() {
ENTER_V8(isolate);
i::HandleScope scope(isolate);
i::Handle<i::JSObject> self = Utils::OpenHandle(this);
- return i::Handle<i::Smi>::cast(
- i::JSReceiver::GetOrCreateIdentityHash(self))->value();
+ return i::JSReceiver::GetOrCreateIdentityHash(self)->value();
}
@@ -3829,7 +3643,7 @@ v8::Local<v8::Value> v8::Object::GetHiddenValue(v8::Handle<v8::String> key) {
i::Handle<i::String> key_obj = Utils::OpenHandle(*key);
i::Handle<i::String> key_string =
isolate->factory()->InternalizeString(key_obj);
- i::Handle<i::Object> result(self->GetHiddenProperty(*key_string), isolate);
+ i::Handle<i::Object> result(self->GetHiddenProperty(key_string), isolate);
if (result->IsTheHole()) return v8::Local<v8::Value>();
return Utils::ToLocal(result);
}
@@ -3854,33 +3668,12 @@ namespace {
static i::ElementsKind GetElementsKindFromExternalArrayType(
ExternalArrayType array_type) {
switch (array_type) {
- case kExternalByteArray:
- return i::EXTERNAL_BYTE_ELEMENTS;
- break;
- case kExternalUnsignedByteArray:
- return i::EXTERNAL_UNSIGNED_BYTE_ELEMENTS;
- break;
- case kExternalShortArray:
- return i::EXTERNAL_SHORT_ELEMENTS;
- break;
- case kExternalUnsignedShortArray:
- return i::EXTERNAL_UNSIGNED_SHORT_ELEMENTS;
- break;
- case kExternalIntArray:
- return i::EXTERNAL_INT_ELEMENTS;
- break;
- case kExternalUnsignedIntArray:
- return i::EXTERNAL_UNSIGNED_INT_ELEMENTS;
- break;
- case kExternalFloatArray:
- return i::EXTERNAL_FLOAT_ELEMENTS;
- break;
- case kExternalDoubleArray:
- return i::EXTERNAL_DOUBLE_ELEMENTS;
- break;
- case kExternalPixelArray:
- return i::EXTERNAL_PIXEL_ELEMENTS;
- break;
+#define ARRAY_TYPE_TO_ELEMENTS_KIND(Type, type, TYPE, ctype, size) \
+ case kExternal##Type##Array: \
+ return i::EXTERNAL_##TYPE##_ELEMENTS;
+
+ TYPED_ARRAYS(ARRAY_TYPE_TO_ELEMENTS_KIND)
+#undef ARRAY_TYPE_TO_ELEMENTS_KIND
}
UNREACHABLE();
return i::DICTIONARY_ELEMENTS;
@@ -3896,12 +3689,11 @@ void PrepareExternalArrayElements(i::Handle<i::JSObject> object,
isolate->factory()->NewExternalArray(length, array_type, data);
i::Handle<i::Map> external_array_map =
- isolate->factory()->GetElementsTransitionMap(
+ i::JSObject::GetElementsTransitionMap(
object,
GetElementsKindFromExternalArrayType(array_type));
- object->set_map(*external_array_map);
- object->set_elements(*array);
+ i::JSObject::SetMapAndElements(object, external_array_map, array);
}
} // namespace
@@ -3912,18 +3704,19 @@ void v8::Object::SetIndexedPropertiesToPixelData(uint8_t* data, int length) {
ON_BAILOUT(isolate, "v8::SetElementsToPixelData()", return);
ENTER_V8(isolate);
i::HandleScope scope(isolate);
- if (!ApiCheck(length >= 0 && length <= i::ExternalPixelArray::kMaxLength,
- "v8::Object::SetIndexedPropertiesToPixelData()",
- "length exceeds max acceptable value")) {
+ if (!Utils::ApiCheck(length >= 0 &&
+ length <= i::ExternalUint8ClampedArray::kMaxLength,
+ "v8::Object::SetIndexedPropertiesToPixelData()",
+ "length exceeds max acceptable value")) {
return;
}
i::Handle<i::JSObject> self = Utils::OpenHandle(this);
- if (!ApiCheck(!self->IsJSArray(),
- "v8::Object::SetIndexedPropertiesToPixelData()",
- "JSArray is not supported")) {
+ if (!Utils::ApiCheck(!self->IsJSArray(),
+ "v8::Object::SetIndexedPropertiesToPixelData()",
+ "JSArray is not supported")) {
return;
}
- PrepareExternalArrayElements(self, data, kExternalPixelArray, length);
+ PrepareExternalArrayElements(self, data, kExternalUint8ClampedArray, length);
}
@@ -3931,7 +3724,7 @@ bool v8::Object::HasIndexedPropertiesInPixelData() {
i::Handle<i::JSObject> self = Utils::OpenHandle(this);
ON_BAILOUT(self->GetIsolate(), "v8::HasIndexedPropertiesInPixelData()",
return false);
- return self->HasExternalPixelElements();
+ return self->HasExternalUint8ClampedElements();
}
@@ -3939,9 +3732,9 @@ uint8_t* v8::Object::GetIndexedPropertiesPixelData() {
i::Handle<i::JSObject> self = Utils::OpenHandle(this);
ON_BAILOUT(self->GetIsolate(), "v8::GetIndexedPropertiesPixelData()",
return NULL);
- if (self->HasExternalPixelElements()) {
- return i::ExternalPixelArray::cast(self->elements())->
- external_pixel_pointer();
+ if (self->HasExternalUint8ClampedElements()) {
+ return i::ExternalUint8ClampedArray::cast(self->elements())->
+ external_uint8_clamped_pointer();
} else {
return NULL;
}
@@ -3952,8 +3745,8 @@ int v8::Object::GetIndexedPropertiesPixelDataLength() {
i::Handle<i::JSObject> self = Utils::OpenHandle(this);
ON_BAILOUT(self->GetIsolate(), "v8::GetIndexedPropertiesPixelDataLength()",
return -1);
- if (self->HasExternalPixelElements()) {
- return i::ExternalPixelArray::cast(self->elements())->length();
+ if (self->HasExternalUint8ClampedElements()) {
+ return i::ExternalUint8ClampedArray::cast(self->elements())->length();
} else {
return -1;
}
@@ -3968,15 +3761,15 @@ void v8::Object::SetIndexedPropertiesToExternalArrayData(
ON_BAILOUT(isolate, "v8::SetIndexedPropertiesToExternalArrayData()", return);
ENTER_V8(isolate);
i::HandleScope scope(isolate);
- if (!ApiCheck(length >= 0 && length <= i::ExternalArray::kMaxLength,
- "v8::Object::SetIndexedPropertiesToExternalArrayData()",
- "length exceeds max acceptable value")) {
+ if (!Utils::ApiCheck(length >= 0 && length <= i::ExternalArray::kMaxLength,
+ "v8::Object::SetIndexedPropertiesToExternalArrayData()",
+ "length exceeds max acceptable value")) {
return;
}
i::Handle<i::JSObject> self = Utils::OpenHandle(this);
- if (!ApiCheck(!self->IsJSArray(),
- "v8::Object::SetIndexedPropertiesToExternalArrayData()",
- "JSArray is not supported")) {
+ if (!Utils::ApiCheck(!self->IsJSArray(),
+ "v8::Object::SetIndexedPropertiesToExternalArrayData()",
+ "JSArray is not supported")) {
return;
}
PrepareExternalArrayElements(self, data, array_type, length);
@@ -4011,24 +3804,11 @@ ExternalArrayType v8::Object::GetIndexedPropertiesExternalArrayDataType() {
"v8::GetIndexedPropertiesExternalArrayDataType()",
return static_cast<ExternalArrayType>(-1));
switch (self->elements()->map()->instance_type()) {
- case i::EXTERNAL_BYTE_ARRAY_TYPE:
- return kExternalByteArray;
- case i::EXTERNAL_UNSIGNED_BYTE_ARRAY_TYPE:
- return kExternalUnsignedByteArray;
- case i::EXTERNAL_SHORT_ARRAY_TYPE:
- return kExternalShortArray;
- case i::EXTERNAL_UNSIGNED_SHORT_ARRAY_TYPE:
- return kExternalUnsignedShortArray;
- case i::EXTERNAL_INT_ARRAY_TYPE:
- return kExternalIntArray;
- case i::EXTERNAL_UNSIGNED_INT_ARRAY_TYPE:
- return kExternalUnsignedIntArray;
- case i::EXTERNAL_FLOAT_ARRAY_TYPE:
- return kExternalFloatArray;
- case i::EXTERNAL_DOUBLE_ARRAY_TYPE:
- return kExternalDoubleArray;
- case i::EXTERNAL_PIXEL_ARRAY_TYPE:
- return kExternalPixelArray;
+#define INSTANCE_TYPE_TO_ARRAY_TYPE(Type, type, TYPE, ctype, size) \
+ case i::EXTERNAL_##TYPE##_ARRAY_TYPE: \
+ return kExternal##Type##Array;
+ TYPED_ARRAYS(INSTANCE_TYPE_TO_ARRAY_TYPE)
+#undef INSTANCE_TYPE_TO_ARRAY_TYPE
default:
return static_cast<ExternalArrayType>(-1);
}
@@ -4078,15 +3858,17 @@ Local<v8::Value> Object::CallAsFunction(v8::Handle<v8::Value> recv,
fun = i::Handle<i::JSFunction>::cast(obj);
} else {
EXCEPTION_PREAMBLE(isolate);
- i::Handle<i::Object> delegate = i::Execution::TryGetFunctionDelegate(
- isolate, obj, &has_pending_exception);
+ i::Handle<i::Object> delegate;
+ has_pending_exception = !i::Execution::TryGetFunctionDelegate(
+ isolate, obj).ToHandle(&delegate);
EXCEPTION_BAILOUT_CHECK(isolate, Local<Value>());
fun = i::Handle<i::JSFunction>::cast(delegate);
recv_obj = obj;
}
EXCEPTION_PREAMBLE(isolate);
- i::Handle<i::Object> returned = i::Execution::Call(
- isolate, fun, recv_obj, argc, args, &has_pending_exception, true);
+ i::Handle<i::Object> returned;
+ has_pending_exception = !i::Execution::Call(
+ isolate, fun, recv_obj, argc, args, true).ToHandle(&returned);
EXCEPTION_BAILOUT_CHECK_DO_CALLBACK(isolate, Local<Value>());
return Utils::ToLocal(scope.CloseAndEscape(returned));
}
@@ -4108,21 +3890,24 @@ Local<v8::Value> Object::CallAsConstructor(int argc,
if (obj->IsJSFunction()) {
i::Handle<i::JSFunction> fun = i::Handle<i::JSFunction>::cast(obj);
EXCEPTION_PREAMBLE(isolate);
- i::Handle<i::Object> returned =
- i::Execution::New(fun, argc, args, &has_pending_exception);
+ i::Handle<i::Object> returned;
+ has_pending_exception = !i::Execution::New(
+ fun, argc, args).ToHandle(&returned);
EXCEPTION_BAILOUT_CHECK_DO_CALLBACK(isolate, Local<v8::Object>());
return Utils::ToLocal(scope.CloseAndEscape(
i::Handle<i::JSObject>::cast(returned)));
}
EXCEPTION_PREAMBLE(isolate);
- i::Handle<i::Object> delegate = i::Execution::TryGetConstructorDelegate(
- isolate, obj, &has_pending_exception);
+ i::Handle<i::Object> delegate;
+ has_pending_exception = !i::Execution::TryGetConstructorDelegate(
+ isolate, obj).ToHandle(&delegate);
EXCEPTION_BAILOUT_CHECK(isolate, Local<v8::Object>());
if (!delegate->IsUndefined()) {
i::Handle<i::JSFunction> fun = i::Handle<i::JSFunction>::cast(delegate);
EXCEPTION_PREAMBLE(isolate);
- i::Handle<i::Object> returned = i::Execution::Call(
- isolate, fun, obj, argc, args, &has_pending_exception);
+ i::Handle<i::Object> returned;
+ has_pending_exception = !i::Execution::Call(
+ isolate, fun, obj, argc, args).ToHandle(&returned);
EXCEPTION_BAILOUT_CHECK_DO_CALLBACK(isolate, Local<v8::Object>());
ASSERT(!delegate->IsUndefined());
return Utils::ToLocal(scope.CloseAndEscape(returned));
@@ -4140,7 +3925,7 @@ Local<Function> Function::New(Isolate* v8_isolate,
ENTER_V8(isolate);
return FunctionTemplateNew(
isolate, callback, data, Local<Signature>(), length, true)->
- GetFunction();
+ GetFunction();
}
@@ -4163,8 +3948,9 @@ Local<v8::Object> Function::NewInstance(int argc,
STATIC_ASSERT(sizeof(v8::Handle<v8::Value>) == sizeof(i::Object**));
i::Handle<i::Object>* args = reinterpret_cast<i::Handle<i::Object>*>(argv);
EXCEPTION_PREAMBLE(isolate);
- i::Handle<i::Object> returned =
- i::Execution::New(function, argc, args, &has_pending_exception);
+ i::Handle<i::Object> returned;
+ has_pending_exception = !i::Execution::New(
+ function, argc, args).ToHandle(&returned);
EXCEPTION_BAILOUT_CHECK_DO_CALLBACK(isolate, Local<v8::Object>());
return scope.Escape(Utils::ToLocal(i::Handle<i::JSObject>::cast(returned)));
}
@@ -4178,21 +3964,17 @@ Local<v8::Value> Function::Call(v8::Handle<v8::Value> recv, int argc,
ENTER_V8(isolate);
i::Logger::TimerEventScope timer_scope(
isolate, i::Logger::TimerEventScope::v8_execute);
- i::Object* raw_result = NULL;
- {
- i::HandleScope scope(isolate);
- i::Handle<i::JSFunction> fun = Utils::OpenHandle(this);
- i::Handle<i::Object> recv_obj = Utils::OpenHandle(*recv);
- STATIC_ASSERT(sizeof(v8::Handle<v8::Value>) == sizeof(i::Object**));
- i::Handle<i::Object>* args = reinterpret_cast<i::Handle<i::Object>*>(argv);
- EXCEPTION_PREAMBLE(isolate);
- i::Handle<i::Object> returned = i::Execution::Call(
- isolate, fun, recv_obj, argc, args, &has_pending_exception, true);
- EXCEPTION_BAILOUT_CHECK_DO_CALLBACK(isolate, Local<Object>());
- raw_result = *returned;
- }
- i::Handle<i::Object> result(raw_result, isolate);
- return Utils::ToLocal(result);
+ i::HandleScope scope(isolate);
+ i::Handle<i::JSFunction> fun = Utils::OpenHandle(this);
+ i::Handle<i::Object> recv_obj = Utils::OpenHandle(*recv);
+ STATIC_ASSERT(sizeof(v8::Handle<v8::Value>) == sizeof(i::Object**));
+ i::Handle<i::Object>* args = reinterpret_cast<i::Handle<i::Object>*>(argv);
+ EXCEPTION_PREAMBLE(isolate);
+ i::Handle<i::Object> returned;
+ has_pending_exception = !i::Execution::Call(
+ isolate, fun, recv_obj, argc, args, true).ToHandle(&returned);
+ EXCEPTION_BAILOUT_CHECK_DO_CALLBACK(isolate, Local<Object>());
+ return Utils::ToLocal(scope.CloseAndEscape(returned));
}
@@ -4223,14 +4005,14 @@ Handle<Value> Function::GetDisplayName() const {
i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
ON_BAILOUT(isolate, "v8::Function::GetDisplayName()",
return ToApiHandle<Primitive>(
- isolate->factory()->undefined_value()));
+ isolate->factory()->undefined_value()));
ENTER_V8(isolate);
i::Handle<i::JSFunction> func = Utils::OpenHandle(this);
i::Handle<i::String> property_name =
isolate->factory()->InternalizeOneByteString(
STATIC_ASCII_VECTOR("displayName"));
i::LookupResult lookup(isolate);
- func->LookupRealNamedProperty(*property_name, &lookup);
+ func->LookupRealNamedProperty(property_name, &lookup);
if (lookup.IsFound()) {
i::Object* value = lookup.GetLazyValue();
if (value && value->IsString()) {
@@ -4246,11 +4028,12 @@ ScriptOrigin Function::GetScriptOrigin() const {
i::Handle<i::JSFunction> func = Utils::OpenHandle(this);
if (func->shared()->script()->IsScript()) {
i::Handle<i::Script> script(i::Script::cast(func->shared()->script()));
- i::Handle<i::Object> scriptName = GetScriptNameOrSourceURL(script);
+ i::Handle<i::Object> scriptName = i::Script::GetNameOrSourceURL(script);
+ v8::Isolate* isolate = reinterpret_cast<v8::Isolate*>(func->GetIsolate());
v8::ScriptOrigin origin(
- Utils::ToLocal(scriptName),
- v8::Integer::New(script->line_offset()->value()),
- v8::Integer::New(script->column_offset()->value()));
+ Utils::ToLocal(scriptName),
+ v8::Integer::New(isolate, script->line_offset()->value()),
+ v8::Integer::New(isolate, script->column_offset()->value()));
return origin;
}
return v8::ScriptOrigin(Handle<Value>());
@@ -4264,7 +4047,7 @@ int Function::GetScriptLineNumber() const {
i::Handle<i::JSFunction> func = Utils::OpenHandle(this);
if (func->shared()->script()->IsScript()) {
i::Handle<i::Script> script(i::Script::cast(func->shared()->script()));
- return i::GetScriptLineNumber(script, func->shared()->start_position());
+ return i::Script::GetLineNumber(script, func->shared()->start_position());
}
return kLineOffsetNotFound;
}
@@ -4274,7 +4057,7 @@ int Function::GetScriptColumnNumber() const {
i::Handle<i::JSFunction> func = Utils::OpenHandle(this);
if (func->shared()->script()->IsScript()) {
i::Handle<i::Script> script(i::Script::cast(func->shared()->script()));
- return i::GetScriptColumnNumber(script, func->shared()->start_position());
+ return i::Script::GetColumnNumber(script, func->shared()->start_position());
}
return kLineOffsetNotFound;
}
@@ -4286,22 +4069,27 @@ bool Function::IsBuiltin() const {
}
-Handle<Value> Function::GetScriptId() const {
+int Function::ScriptId() const {
i::Handle<i::JSFunction> func = Utils::OpenHandle(this);
- i::Isolate* isolate = func->GetIsolate();
if (!func->shared()->script()->IsScript()) {
- return v8::Undefined(reinterpret_cast<v8::Isolate*>(isolate));
+ return v8::UnboundScript::kNoScriptId;
}
i::Handle<i::Script> script(i::Script::cast(func->shared()->script()));
- return Utils::ToLocal(i::Handle<i::Object>(script->id(), isolate));
+ return script->id()->value();
}
-int Function::ScriptId() const {
+Local<v8::Value> Function::GetBoundFunction() const {
i::Handle<i::JSFunction> func = Utils::OpenHandle(this);
- if (!func->shared()->script()->IsScript()) return v8::Script::kNoScriptId;
- i::Handle<i::Script> script(i::Script::cast(func->shared()->script()));
- return script->id()->value();
+ if (!func->shared()->bound()) {
+ return v8::Undefined(reinterpret_cast<v8::Isolate*>(func->GetIsolate()));
+ }
+ i::Handle<i::FixedArray> bound_args = i::Handle<i::FixedArray>(
+ i::FixedArray::cast(func->function_bindings()));
+ i::Handle<i::Object> original(
+ bound_args->get(i::JSFunction::kBoundFunctionIndex),
+ func->GetIsolate());
+ return Utils::ToLocal(i::Handle<i::JSFunction>::cast(original));
}
@@ -4334,7 +4122,7 @@ static inline bool Unaligned(const uint16_t* chars) {
static inline const uint16_t* Align(const uint16_t* chars) {
return reinterpret_cast<uint16_t*>(
- reinterpret_cast<uintptr_t>(chars) & ~kAlignmentMask);
+ reinterpret_cast<uintptr_t>(chars) & ~kAlignmentMask);
}
class ContainsOnlyOneByteHelper {
@@ -4354,7 +4142,7 @@ class ContainsOnlyOneByteHelper {
// Align to uintptr_t.
const uint16_t* end = chars + length;
while (Unaligned(chars) && chars != end) {
- acc |= *chars++;
+ acc |= *chars++;
}
// Read word aligned in blocks,
// checking the return value at the end of each block.
@@ -4458,8 +4246,8 @@ class Utf8LengthHelper : public i::AllStatic {
class Visitor {
public:
inline explicit Visitor()
- : utf8_length_(0),
- state_(kInitialState) {}
+ : utf8_length_(0),
+ state_(kInitialState) {}
void VisitOneByteString(const uint8_t* chars, int length) {
int utf8_length = 0;
@@ -4532,7 +4320,7 @@ class Utf8LengthHelper : public i::AllStatic {
if (!(*state & kRightmostEdgeIsCalculated)) {
ASSERT(!(*state & kRightmostEdgeIsSurrogate));
*state |= (kRightmostEdgeIsCalculated
- | (edge_surrogate ? kRightmostEdgeIsSurrogate : 0));
+ | (edge_surrogate ? kRightmostEdgeIsSurrogate : 0));
} else if (edge_surrogate && StartsWithSurrogate(*state)) {
*length -= unibrow::Utf8::kBytesSavedByCombiningSurrogates;
}
@@ -4638,28 +4426,35 @@ int String::Utf8Length() const {
class Utf8WriterVisitor {
public:
Utf8WriterVisitor(
- char* buffer, int capacity, bool skip_capacity_check)
+ char* buffer,
+ int capacity,
+ bool skip_capacity_check,
+ bool replace_invalid_utf8)
: early_termination_(false),
last_character_(unibrow::Utf16::kNoPreviousCharacter),
buffer_(buffer),
start_(buffer),
capacity_(capacity),
skip_capacity_check_(capacity == -1 || skip_capacity_check),
+ replace_invalid_utf8_(replace_invalid_utf8),
utf16_chars_read_(0) {
}
static int WriteEndCharacter(uint16_t character,
int last_character,
int remaining,
- char* const buffer) {
+ char* const buffer,
+ bool replace_invalid_utf8) {
using namespace unibrow;
ASSERT(remaining > 0);
// We can't use a local buffer here because Encode needs to modify
// previous characters in the stream. We know, however, that
// exactly one character will be advanced.
- if (Utf16::IsTrailSurrogate(character) &&
- Utf16::IsLeadSurrogate(last_character)) {
- int written = Utf8::Encode(buffer, character, last_character);
+ if (Utf16::IsSurrogatePair(last_character, character)) {
+ int written = Utf8::Encode(buffer,
+ character,
+ last_character,
+ replace_invalid_utf8);
ASSERT(written == 1);
return written;
}
@@ -4668,7 +4463,8 @@ class Utf8WriterVisitor {
// Can't encode using last_character as gcc has array bounds issues.
int written = Utf8::Encode(temp_buffer,
character,
- Utf16::kNoPreviousCharacter);
+ Utf16::kNoPreviousCharacter,
+ replace_invalid_utf8);
// Won't fit.
if (written > remaining) return 0;
// Copy over the character from temp_buffer.
@@ -4678,6 +4474,16 @@ class Utf8WriterVisitor {
return written;
}
+ // Visit writes out a group of code units (chars) of a v8::String to the
+ // internal buffer_. This is done in two phases. The first phase calculates a
+ // pesimistic estimate (writable_length) on how many code units can be safely
+ // written without exceeding the buffer capacity and without writing the last
+ // code unit (it could be a lead surrogate). The estimated number of code
+ // units is then written out in one go, and the reported byte usage is used
+ // to correct the estimate. This is repeated until the estimate becomes <= 0
+ // or all code units have been written out. The second phase writes out code
+ // units until the buffer capacity is reached, would be exceeded by the next
+ // unit, or all units have been written out.
template<typename Char>
void Visit(const Char* chars, const int length) {
using namespace unibrow;
@@ -4715,7 +4521,10 @@ class Utf8WriterVisitor {
} else {
for (; i < fast_length; i++) {
uint16_t character = *chars++;
- buffer += Utf8::Encode(buffer, character, last_character);
+ buffer += Utf8::Encode(buffer,
+ character,
+ last_character,
+ replace_invalid_utf8_);
last_character = character;
ASSERT(capacity_ == -1 || (buffer - start_) <= capacity_);
}
@@ -4735,10 +4544,17 @@ class Utf8WriterVisitor {
ASSERT(remaining_capacity >= 0);
for (; i < length && remaining_capacity > 0; i++) {
uint16_t character = *chars++;
+ // remaining_capacity is <= 3 bytes at this point, so we do not write out
+ // an umatched lead surrogate.
+ if (replace_invalid_utf8_ && Utf16::IsLeadSurrogate(character)) {
+ early_termination_ = true;
+ break;
+ }
int written = WriteEndCharacter(character,
last_character,
remaining_capacity,
- buffer);
+ buffer,
+ replace_invalid_utf8_);
if (written == 0) {
early_termination_ = true;
break;
@@ -4786,14 +4602,15 @@ class Utf8WriterVisitor {
char* const start_;
int capacity_;
bool const skip_capacity_check_;
+ bool const replace_invalid_utf8_;
int utf16_chars_read_;
DISALLOW_IMPLICIT_CONSTRUCTORS(Utf8WriterVisitor);
};
static bool RecursivelySerializeToUtf8(i::String* current,
- Utf8WriterVisitor* writer,
- int recursion_budget) {
+ Utf8WriterVisitor* writer,
+ int recursion_budget) {
while (!writer->IsDone()) {
i::ConsString* cons_string = i::String::VisitFlat(writer, current);
if (cons_string == NULL) return true; // Leaf node.
@@ -4820,13 +4637,15 @@ int String::WriteUtf8(char* buffer,
ENTER_V8(isolate);
i::Handle<i::String> str = Utils::OpenHandle(this);
if (options & HINT_MANY_WRITES_EXPECTED) {
- FlattenString(str); // Flatten the string for efficiency.
+ str = i::String::Flatten(str); // Flatten the string for efficiency.
}
const int string_length = str->length();
bool write_null = !(options & NO_NULL_TERMINATION);
+ bool replace_invalid_utf8 = (options & REPLACE_INVALID_UTF8);
+ int max16BitCodeUnitSize = unibrow::Utf8::kMax16BitCodeUnitSize;
// First check if we can just write the string without checking capacity.
- if (capacity == -1 || capacity / 3 >= string_length) {
- Utf8WriterVisitor writer(buffer, capacity, true);
+ if (capacity == -1 || capacity / max16BitCodeUnitSize >= string_length) {
+ Utf8WriterVisitor writer(buffer, capacity, true, replace_invalid_utf8);
const int kMaxRecursion = 100;
bool success = RecursivelySerializeToUtf8(*str, &writer, kMaxRecursion);
if (success) return writer.CompleteWrite(write_null, nchars_ref);
@@ -4853,8 +4672,8 @@ int String::WriteUtf8(char* buffer,
}
}
// Recursive slow path can potentially be unreasonable slow. Flatten.
- str = FlattenGetString(str);
- Utf8WriterVisitor writer(buffer, capacity, false);
+ str = i::String::Flatten(str);
+ Utf8WriterVisitor writer(buffer, capacity, false, replace_invalid_utf8);
i::String::VisitFlat(&writer, *str);
return writer.CompleteWrite(write_null, nchars_ref);
}
@@ -4875,7 +4694,7 @@ static inline int WriteHelper(const String* string,
if (options & String::HINT_MANY_WRITES_EXPECTED) {
// Flatten the string for efficiency. This applies whether we are
// using StringCharacterStream or Get(i) to access the characters.
- FlattenString(str);
+ str = i::String::Flatten(str);
}
int end = start + length;
if ((length == -1) || (length > str->length() - start) )
@@ -4951,14 +4770,14 @@ void v8::String::VerifyExternalStringResourceBase(
} else {
expected = NULL;
expectedEncoding = str->IsOneByteRepresentation() ? ASCII_ENCODING
- : TWO_BYTE_ENCODING;
+ : TWO_BYTE_ENCODING;
}
CHECK_EQ(expected, value);
CHECK_EQ(expectedEncoding, encoding);
}
const v8::String::ExternalAsciiStringResource*
- v8::String::GetExternalAsciiStringResource() const {
+v8::String::GetExternalAsciiStringResource() const {
i::Handle<i::String> str = Utils::OpenHandle(this);
if (i::StringShape(*str).IsExternalAscii()) {
const void* resource =
@@ -5033,9 +4852,9 @@ int v8::Object::InternalFieldCount() {
static bool InternalFieldOK(i::Handle<i::JSObject> obj,
int index,
const char* location) {
- return ApiCheck(index < obj->GetInternalFieldCount(),
- location,
- "Internal field out of bounds");
+ return Utils::ApiCheck(index < obj->GetInternalFieldCount(),
+ location,
+ "Internal field out of bounds");
}
@@ -5119,7 +4938,7 @@ void v8::V8::SetEntropySource(EntropySource entropy_source) {
void v8::V8::SetReturnAddressLocationResolver(
- ReturnAddressLocationResolver return_address_resolver) {
+ ReturnAddressLocationResolver return_address_resolver) {
i::V8::SetReturnAddressLocationResolver(return_address_resolver);
}
@@ -5157,21 +4976,15 @@ void v8::V8::SetJitCodeEventHandler(
void v8::V8::SetArrayBufferAllocator(
ArrayBuffer::Allocator* allocator) {
- if (!ApiCheck(i::V8::ArrayBufferAllocator() == NULL,
- "v8::V8::SetArrayBufferAllocator",
- "ArrayBufferAllocator might only be set once"))
+ if (!Utils::ApiCheck(i::V8::ArrayBufferAllocator() == NULL,
+ "v8::V8::SetArrayBufferAllocator",
+ "ArrayBufferAllocator might only be set once"))
return;
i::V8::SetArrayBufferAllocator(allocator);
}
bool v8::V8::Dispose() {
- i::Isolate* isolate = i::Isolate::Current();
- if (!ApiCheck(isolate != NULL && isolate->IsDefaultIsolate(),
- "v8::V8::Dispose()",
- "Use v8::Isolate::Dispose() for a non-default isolate.")) {
- return false;
- }
i::V8::TearDown();
return true;
}
@@ -5252,8 +5065,8 @@ int v8::V8::ContextDisposedNotification() {
}
-bool v8::V8::InitializeICU() {
- return i::InitializeICU();
+bool v8::V8::InitializeICU(const char* icu_data_file) {
+ return i::InitializeICU(icu_data_file);
}
@@ -5278,11 +5091,12 @@ static i::Handle<i::Context> CreateEnvironment(
if (!global_template.IsEmpty()) {
// Make sure that the global_template has a constructor.
- global_constructor = EnsureConstructor(*global_template);
+ global_constructor = EnsureConstructor(isolate, *global_template);
// Create a fresh template for the global proxy object.
- proxy_template = ObjectTemplate::New();
- proxy_constructor = EnsureConstructor(*proxy_template);
+ proxy_template = ObjectTemplate::New(
+ reinterpret_cast<v8::Isolate*>(isolate));
+ proxy_constructor = EnsureConstructor(isolate, *proxy_template);
// Set the global template to be the prototype template of
// global proxy template.
@@ -5318,7 +5132,6 @@ static i::Handle<i::Context> CreateEnvironment(
global_constructor->set_needs_access_check(
proxy_constructor->needs_access_check());
}
- isolate->runtime_profiler()->Reset();
}
// Leave V8.
@@ -5335,6 +5148,8 @@ Local<Context> v8::Context::New(
LOG_API(isolate, "Context::New");
ON_BAILOUT(isolate, "v8::Context::New()", return Local<Context>());
i::HandleScope scope(isolate);
+ ExtensionConfiguration no_extensions;
+ if (extensions == NULL) extensions = &no_extensions;
i::Handle<i::Context> env =
CreateEnvironment(isolate, extensions, global_template, global_object);
if (env.is_null()) return Local<Context>();
@@ -5368,44 +5183,12 @@ Handle<Value> v8::Context::GetSecurityToken() {
}
-bool Context::HasOutOfMemoryException() {
- i::Handle<i::Context> env = Utils::OpenHandle(this);
- return env->has_out_of_memory();
-}
-
-
-bool Context::InContext() {
- return i::Isolate::Current()->context() != NULL;
-}
-
-
v8::Isolate* Context::GetIsolate() {
i::Handle<i::Context> env = Utils::OpenHandle(this);
return reinterpret_cast<Isolate*>(env->GetIsolate());
}
-v8::Local<v8::Context> Context::GetEntered() {
- i::Isolate* isolate = i::Isolate::Current();
- if (!EnsureInitializedForIsolate(isolate, "v8::Context::GetEntered()")) {
- return Local<Context>();
- }
- return reinterpret_cast<Isolate*>(isolate)->GetEnteredContext();
-}
-
-
-v8::Local<v8::Context> Context::GetCurrent() {
- i::Isolate* isolate = i::Isolate::Current();
- return reinterpret_cast<Isolate*>(isolate)->GetCurrentContext();
-}
-
-
-v8::Local<v8::Context> Context::GetCalling() {
- i::Isolate* isolate = i::Isolate::Current();
- return reinterpret_cast<Isolate*>(isolate)->GetCallingContext();
-}
-
-
v8::Local<v8::Object> Context::Global() {
i::Handle<i::Context> context = Utils::OpenHandle(this);
i::Isolate* isolate = context->GetIsolate();
@@ -5414,7 +5197,7 @@ v8::Local<v8::Object> Context::Global() {
// but can't presently as calls to GetProtoype will return the wrong result.
if (i::Handle<i::JSGlobalProxy>::cast(
global)->IsDetachedFrom(context->global_object())) {
- global = i::Handle<i::Object>(context->global_object(), isolate);
+ global = i::Handle<i::Object>(context->global_object(), isolate);
}
return Utils::ToLocal(i::Handle<i::JSObject>::cast(global));
}
@@ -5458,9 +5241,9 @@ Local<v8::Object> ObjectTemplate::NewInstance() {
LOG_API(isolate, "ObjectTemplate::NewInstance");
ENTER_V8(isolate);
EXCEPTION_PREAMBLE(isolate);
- i::Handle<i::Object> obj =
- i::Execution::InstantiateObject(Utils::OpenHandle(this),
- &has_pending_exception);
+ i::Handle<i::Object> obj;
+ has_pending_exception = !i::Execution::InstantiateObject(
+ Utils::OpenHandle(this)).ToHandle(&obj);
EXCEPTION_BAILOUT_CHECK(isolate, Local<v8::Object>());
return Utils::ToLocal(i::Handle<i::JSObject>::cast(obj));
}
@@ -5473,9 +5256,9 @@ Local<v8::Function> FunctionTemplate::GetFunction() {
LOG_API(isolate, "FunctionTemplate::GetFunction");
ENTER_V8(isolate);
EXCEPTION_PREAMBLE(isolate);
- i::Handle<i::Object> obj =
- i::Execution::InstantiateFunction(Utils::OpenHandle(this),
- &has_pending_exception);
+ i::Handle<i::Object> obj;
+ has_pending_exception = !i::Execution::InstantiateFunction(
+ Utils::OpenHandle(this)).ToHandle(&obj);
EXCEPTION_BAILOUT_CHECK(isolate, Local<v8::Function>());
return Utils::ToLocal(i::Handle<i::JSFunction>::cast(obj));
}
@@ -5500,26 +5283,11 @@ Local<External> v8::External::New(Isolate* isolate, void* value) {
}
-Local<External> v8::External::New(void* value) {
- return v8::External::New(Isolate::GetCurrent(), value);
-}
-
-
void* External::Value() const {
return ExternalValue(*Utils::OpenHandle(this));
}
-Local<String> v8::String::Empty() {
- i::Isolate* isolate = i::Isolate::Current();
- if (!EnsureInitializedForIsolate(isolate, "v8::String::Empty()")) {
- return v8::Local<String>();
- }
- LOG_API(isolate, "String::Empty()");
- return Utils::ToLocal(isolate->factory()->empty_string());
-}
-
-
// anonymous namespace for string creation helper functions
namespace {
@@ -5541,19 +5309,21 @@ inline int StringLength(const uint16_t* string) {
}
-inline i::Handle<i::String> NewString(i::Factory* factory,
- String::NewStringType type,
- i::Vector<const char> string) {
- if (type ==String::kInternalizedString) {
+MUST_USE_RESULT
+inline i::MaybeHandle<i::String> NewString(i::Factory* factory,
+ String::NewStringType type,
+ i::Vector<const char> string) {
+ if (type == String::kInternalizedString) {
return factory->InternalizeUtf8String(string);
}
return factory->NewStringFromUtf8(string);
}
-inline i::Handle<i::String> NewString(i::Factory* factory,
- String::NewStringType type,
- i::Vector<const uint8_t> string) {
+MUST_USE_RESULT
+inline i::MaybeHandle<i::String> NewString(i::Factory* factory,
+ String::NewStringType type,
+ i::Vector<const uint8_t> string) {
if (type == String::kInternalizedString) {
return factory->InternalizeOneByteString(string);
}
@@ -5561,9 +5331,10 @@ inline i::Handle<i::String> NewString(i::Factory* factory,
}
-inline i::Handle<i::String> NewString(i::Factory* factory,
- String::NewStringType type,
- i::Vector<const uint16_t> string) {
+MUST_USE_RESULT
+inline i::MaybeHandle<i::String> NewString(i::Factory* factory,
+ String::NewStringType type,
+ i::Vector<const uint16_t> string) {
if (type == String::kInternalizedString) {
return factory->InternalizeTwoByteString(string);
}
@@ -5582,12 +5353,15 @@ inline Local<String> NewString(Isolate* v8_isolate,
EnsureInitializedForIsolate(isolate, location);
LOG_API(isolate, env);
if (length == 0 && type != String::kUndetectableString) {
- return String::Empty();
+ return String::Empty(v8_isolate);
}
ENTER_V8(isolate);
if (length == -1) length = StringLength(data);
+ // We do not expect this to fail. Change this if it does.
i::Handle<i::String> result = NewString(
- isolate->factory(), type, i::Vector<const Char>(data, length));
+ isolate->factory(),
+ type,
+ i::Vector<const Char>(data, length)).ToHandleChecked();
if (type == String::kUndetectableString) {
result->MarkAsUndetectable();
}
@@ -5643,48 +5417,34 @@ Local<String> v8::String::Concat(Handle<String> left, Handle<String> right) {
LOG_API(isolate, "String::New(char)");
ENTER_V8(isolate);
i::Handle<i::String> right_string = Utils::OpenHandle(*right);
- i::Handle<i::String> result = isolate->factory()->NewConsString(left_string,
- right_string);
+ // We do not expect this to fail. Change this if it does.
+ i::Handle<i::String> result = isolate->factory()->NewConsString(
+ left_string, right_string).ToHandleChecked();
return Utils::ToLocal(result);
}
-i::Handle<i::String> NewExternalStringHandle(i::Isolate* isolate,
- v8::String::ExternalStringResource* resource) {
- i::Handle<i::String> result =
- isolate->factory()->NewExternalStringFromTwoByte(resource);
- return result;
-}
-
-
-i::Handle<i::String> NewExternalAsciiStringHandle(i::Isolate* isolate,
- v8::String::ExternalAsciiStringResource* resource) {
- i::Handle<i::String> result =
- isolate->factory()->NewExternalStringFromAscii(resource);
- return result;
+static i::Handle<i::String> NewExternalStringHandle(
+ i::Isolate* isolate,
+ v8::String::ExternalStringResource* resource) {
+ // We do not expect this to fail. Change this if it does.
+ return isolate->factory()->NewExternalStringFromTwoByte(
+ resource).ToHandleChecked();
}
-bool RedirectToExternalString(i::Isolate* isolate,
- i::Handle<i::String> parent,
- i::Handle<i::String> external) {
- if (parent->IsConsString()) {
- i::Handle<i::ConsString> cons = i::Handle<i::ConsString>::cast(parent);
- cons->set_first(*external);
- cons->set_second(isolate->heap()->empty_string());
- } else {
- ASSERT(parent->IsSlicedString());
- i::Handle<i::SlicedString> slice = i::Handle<i::SlicedString>::cast(parent);
- slice->set_parent(*external);
- slice->set_offset(0);
- }
- return true;
+static i::Handle<i::String> NewExternalAsciiStringHandle(
+ i::Isolate* isolate,
+ v8::String::ExternalAsciiStringResource* resource) {
+ // We do not expect this to fail. Change this if it does.
+ return isolate->factory()->NewExternalStringFromAscii(
+ resource).ToHandleChecked();
}
Local<String> v8::String::NewExternal(
- Isolate* isolate,
- v8::String::ExternalStringResource* resource) {
+ Isolate* isolate,
+ v8::String::ExternalStringResource* resource) {
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
EnsureInitializedForIsolate(i_isolate, "v8::String::NewExternal()");
LOG_API(i_isolate, "String::NewExternal");
@@ -5696,12 +5456,6 @@ Local<String> v8::String::NewExternal(
}
-Local<String> v8::String::NewExternal(
- v8::String::ExternalStringResource* resource) {
- return NewExternal(Isolate::GetCurrent(), resource);
-}
-
-
bool v8::String::MakeExternal(v8::String::ExternalStringResource* resource) {
i::Handle<i::String> obj = Utils::OpenHandle(this);
i::Isolate* isolate = obj->GetIsolate();
@@ -5717,30 +5471,18 @@ bool v8::String::MakeExternal(v8::String::ExternalStringResource* resource) {
}
CHECK(resource && resource->data());
- bool result;
- i::Handle<i::String> external;
- if (isolate->heap()->old_pointer_space()->Contains(*obj)) {
- // We do not allow external strings in the old pointer space. Instead of
- // converting the string in-place, we keep the cons/sliced string and
- // point it to a newly-allocated external string.
- external = NewExternalStringHandle(isolate, resource);
- result = RedirectToExternalString(isolate, obj, external);
- } else {
- result = obj->MakeExternal(resource);
- external = obj;
- }
-
- ASSERT(external->IsExternalString());
- if (result && !external->IsInternalizedString()) {
- isolate->heap()->external_string_table()->AddString(*external);
+ bool result = obj->MakeExternal(resource);
+ if (result) {
+ ASSERT(obj->IsExternalString());
+ isolate->heap()->external_string_table()->AddString(*obj);
}
return result;
}
Local<String> v8::String::NewExternal(
- Isolate* isolate,
- v8::String::ExternalAsciiStringResource* resource) {
+ Isolate* isolate,
+ v8::String::ExternalAsciiStringResource* resource) {
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
EnsureInitializedForIsolate(i_isolate, "v8::String::NewExternal()");
LOG_API(i_isolate, "String::NewExternal");
@@ -5753,12 +5495,6 @@ Local<String> v8::String::NewExternal(
}
-Local<String> v8::String::NewExternal(
- v8::String::ExternalAsciiStringResource* resource) {
- return NewExternal(Isolate::GetCurrent(), resource);
-}
-
-
bool v8::String::MakeExternal(
v8::String::ExternalAsciiStringResource* resource) {
i::Handle<i::String> obj = Utils::OpenHandle(this);
@@ -5775,22 +5511,10 @@ bool v8::String::MakeExternal(
}
CHECK(resource && resource->data());
- bool result;
- i::Handle<i::String> external;
- if (isolate->heap()->old_pointer_space()->Contains(*obj)) {
- // We do not allow external strings in the old pointer space. Instead of
- // converting the string in-place, we keep the cons/sliced string and
- // point it to a newly-allocated external string.
- external = NewExternalAsciiStringHandle(isolate, resource);
- result = RedirectToExternalString(isolate, obj, external);
- } else {
- result = obj->MakeExternal(resource);
- external = obj;
- }
-
- ASSERT(external->IsExternalString());
- if (result && !external->IsInternalizedString()) {
- isolate->heap()->external_string_table()->AddString(*external);
+ bool result = obj->MakeExternal(resource);
+ if (result) {
+ ASSERT(obj->IsExternalString());
+ isolate->heap()->external_string_table()->AddString(*obj);
}
return result;
}
@@ -5825,32 +5549,23 @@ Local<v8::Object> v8::Object::New(Isolate* isolate) {
}
-Local<v8::Object> v8::Object::New() {
- return New(Isolate::GetCurrent());
-}
-
-
Local<v8::Value> v8::NumberObject::New(Isolate* isolate, double value) {
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
EnsureInitializedForIsolate(i_isolate, "v8::NumberObject::New()");
LOG_API(i_isolate, "NumberObject::New");
ENTER_V8(i_isolate);
i::Handle<i::Object> number = i_isolate->factory()->NewNumber(value);
- i::Handle<i::Object> obj = i_isolate->factory()->ToObject(number);
+ i::Handle<i::Object> obj =
+ i::Object::ToObject(i_isolate, number).ToHandleChecked();
return Utils::ToLocal(obj);
}
-Local<v8::Value> v8::NumberObject::New(double value) {
- return New(Isolate::GetCurrent(), value);
-}
-
-
double v8::NumberObject::ValueOf() const {
- i::Isolate* isolate = i::Isolate::Current();
- LOG_API(isolate, "NumberObject::NumberValue");
i::Handle<i::Object> obj = Utils::OpenHandle(this);
i::Handle<i::JSValue> jsvalue = i::Handle<i::JSValue>::cast(obj);
+ i::Isolate* isolate = jsvalue->GetIsolate();
+ LOG_API(isolate, "NumberObject::NumberValue");
return jsvalue->value()->Number();
}
@@ -5864,36 +5579,38 @@ Local<v8::Value> v8::BooleanObject::New(bool value) {
? isolate->heap()->true_value()
: isolate->heap()->false_value(),
isolate);
- i::Handle<i::Object> obj = isolate->factory()->ToObject(boolean);
+ i::Handle<i::Object> obj =
+ i::Object::ToObject(isolate, boolean).ToHandleChecked();
return Utils::ToLocal(obj);
}
bool v8::BooleanObject::ValueOf() const {
- i::Isolate* isolate = i::Isolate::Current();
- LOG_API(isolate, "BooleanObject::BooleanValue");
i::Handle<i::Object> obj = Utils::OpenHandle(this);
i::Handle<i::JSValue> jsvalue = i::Handle<i::JSValue>::cast(obj);
+ i::Isolate* isolate = jsvalue->GetIsolate();
+ LOG_API(isolate, "BooleanObject::BooleanValue");
return jsvalue->value()->IsTrue();
}
Local<v8::Value> v8::StringObject::New(Handle<String> value) {
- i::Isolate* isolate = i::Isolate::Current();
+ i::Handle<i::String> string = Utils::OpenHandle(*value);
+ i::Isolate* isolate = string->GetIsolate();
EnsureInitializedForIsolate(isolate, "v8::StringObject::New()");
LOG_API(isolate, "StringObject::New");
ENTER_V8(isolate);
i::Handle<i::Object> obj =
- isolate->factory()->ToObject(Utils::OpenHandle(*value));
+ i::Object::ToObject(isolate, string).ToHandleChecked();
return Utils::ToLocal(obj);
}
Local<v8::String> v8::StringObject::ValueOf() const {
- i::Isolate* isolate = i::Isolate::Current();
- LOG_API(isolate, "StringObject::StringValue");
i::Handle<i::Object> obj = Utils::OpenHandle(this);
i::Handle<i::JSValue> jsvalue = i::Handle<i::JSValue>::cast(obj);
+ i::Isolate* isolate = jsvalue->GetIsolate();
+ LOG_API(isolate, "StringObject::StringValue");
return Utils::ToLocal(
i::Handle<i::String>(i::String::cast(jsvalue->value())));
}
@@ -5904,17 +5621,17 @@ Local<v8::Value> v8::SymbolObject::New(Isolate* isolate, Handle<Symbol> value) {
EnsureInitializedForIsolate(i_isolate, "v8::SymbolObject::New()");
LOG_API(i_isolate, "SymbolObject::New");
ENTER_V8(i_isolate);
- i::Handle<i::Object> obj =
- i_isolate->factory()->ToObject(Utils::OpenHandle(*value));
+ i::Handle<i::Object> obj = i::Object::ToObject(
+ i_isolate, Utils::OpenHandle(*value)).ToHandleChecked();
return Utils::ToLocal(obj);
}
Local<v8::Symbol> v8::SymbolObject::ValueOf() const {
- i::Isolate* isolate = i::Isolate::Current();
- LOG_API(isolate, "SymbolObject::SymbolValue");
i::Handle<i::Object> obj = Utils::OpenHandle(this);
i::Handle<i::JSValue> jsvalue = i::Handle<i::JSValue>::cast(obj);
+ i::Isolate* isolate = jsvalue->GetIsolate();
+ LOG_API(isolate, "SymbolObject::SymbolValue");
return Utils::ToLocal(
i::Handle<i::Symbol>(i::Symbol::cast(jsvalue->value())));
}
@@ -5930,29 +5647,26 @@ Local<v8::Value> v8::Date::New(Isolate* isolate, double time) {
}
ENTER_V8(i_isolate);
EXCEPTION_PREAMBLE(i_isolate);
- i::Handle<i::Object> obj =
- i::Execution::NewDate(i_isolate, time, &has_pending_exception);
+ i::Handle<i::Object> obj;
+ has_pending_exception = !i::Execution::NewDate(
+ i_isolate, time).ToHandle(&obj);
EXCEPTION_BAILOUT_CHECK(i_isolate, Local<v8::Value>());
return Utils::ToLocal(obj);
}
-Local<v8::Value> v8::Date::New(double time) {
- return New(Isolate::GetCurrent(), time);
-}
-
-
double v8::Date::ValueOf() const {
- i::Isolate* isolate = i::Isolate::Current();
- LOG_API(isolate, "Date::NumberValue");
i::Handle<i::Object> obj = Utils::OpenHandle(this);
i::Handle<i::JSDate> jsdate = i::Handle<i::JSDate>::cast(obj);
+ i::Isolate* isolate = jsdate->GetIsolate();
+ LOG_API(isolate, "Date::NumberValue");
return jsdate->value()->Number();
}
void v8::Date::DateTimeConfigurationChangeNotification(Isolate* isolate) {
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
+ if (!i_isolate->IsInitialized()) return;
ON_BAILOUT(i_isolate, "v8::Date::DateTimeConfigurationChangeNotification()",
return);
LOG_API(i_isolate, "Date::DateTimeConfigurationChangeNotification");
@@ -5960,35 +5674,18 @@ void v8::Date::DateTimeConfigurationChangeNotification(Isolate* isolate) {
i_isolate->date_cache()->ResetDateCache();
- i::HandleScope scope(i_isolate);
- // Get the function ResetDateCache (defined in date.js).
- i::Handle<i::String> func_name_str =
- i_isolate->factory()->InternalizeOneByteString(
- STATIC_ASCII_VECTOR("ResetDateCache"));
- i::MaybeObject* result =
- i_isolate->js_builtins_object()->GetProperty(*func_name_str);
- i::Object* object_func;
- if (!result->ToObject(&object_func)) {
+ if (!i_isolate->eternal_handles()->Exists(
+ i::EternalHandles::DATE_CACHE_VERSION)) {
return;
}
-
- if (object_func->IsJSFunction()) {
- i::Handle<i::JSFunction> func =
- i::Handle<i::JSFunction>(i::JSFunction::cast(object_func));
-
- // Call ResetDateCache(0 but expect no exceptions:
- bool caught_exception = false;
- i::Execution::TryCall(func,
- i_isolate->js_builtins_object(),
- 0,
- NULL,
- &caught_exception);
- }
-}
-
-
-void v8::Date::DateTimeConfigurationChangeNotification() {
- DateTimeConfigurationChangeNotification(Isolate::GetCurrent());
+ i::Handle<i::FixedArray> date_cache_version =
+ i::Handle<i::FixedArray>::cast(i_isolate->eternal_handles()->GetSingleton(
+ i::EternalHandles::DATE_CACHE_VERSION));
+ ASSERT_EQ(1, date_cache_version->length());
+ CHECK(date_cache_version->get(0)->IsSmi());
+ date_cache_version->set(
+ 0,
+ i::Smi::FromInt(i::Smi::cast(date_cache_version->get(0))->value() + 1));
}
@@ -6012,10 +5709,10 @@ Local<v8::RegExp> v8::RegExp::New(Handle<String> pattern,
LOG_API(isolate, "RegExp::New");
ENTER_V8(isolate);
EXCEPTION_PREAMBLE(isolate);
- i::Handle<i::JSRegExp> obj = i::Execution::NewJSRegExp(
+ i::Handle<i::JSRegExp> obj;
+ has_pending_exception = !i::Execution::NewJSRegExp(
Utils::OpenHandle(*pattern),
- RegExpFlagsToString(flags),
- &has_pending_exception);
+ RegExpFlagsToString(flags)).ToHandle(&obj);
EXCEPTION_BAILOUT_CHECK(isolate, Local<v8::RegExp>());
return Utils::ToLocal(i::Handle<i::JSRegExp>::cast(obj));
}
@@ -6028,8 +5725,8 @@ Local<v8::String> v8::RegExp::GetSource() const {
// Assert that the static flags cast in GetFlags is valid.
-#define REGEXP_FLAG_ASSERT_EQ(api_flag, internal_flag) \
- STATIC_ASSERT(static_cast<int>(v8::RegExp::api_flag) == \
+#define REGEXP_FLAG_ASSERT_EQ(api_flag, internal_flag) \
+ STATIC_ASSERT(static_cast<int>(v8::RegExp::api_flag) == \
static_cast<int>(i::JSRegExp::internal_flag))
REGEXP_FLAG_ASSERT_EQ(kNone, NONE);
REGEXP_FLAG_ASSERT_EQ(kGlobal, GLOBAL);
@@ -6057,11 +5754,6 @@ Local<v8::Array> v8::Array::New(Isolate* isolate, int length) {
}
-Local<v8::Array> v8::Array::New(int length) {
- return New(Isolate::GetCurrent(), length);
-}
-
-
uint32_t v8::Array::Length() const {
i::Handle<i::JSArray> obj = Utils::OpenHandle(this);
i::Object* length = obj->length();
@@ -6088,13 +5780,156 @@ Local<Object> Array::CloneElementAt(uint32_t index) {
i::Handle<i::JSObject> paragon_handle(i::JSObject::cast(paragon));
EXCEPTION_PREAMBLE(isolate);
ENTER_V8(isolate);
- i::Handle<i::JSObject> result = i::JSObject::Copy(paragon_handle);
+ i::Handle<i::JSObject> result =
+ isolate->factory()->CopyJSObject(paragon_handle);
has_pending_exception = result.is_null();
EXCEPTION_BAILOUT_CHECK(isolate, Local<Object>());
return Utils::ToLocal(result);
}
+bool Value::IsPromise() const {
+ i::Handle<i::Object> val = Utils::OpenHandle(this);
+ if (!val->IsJSObject()) return false;
+ i::Handle<i::JSObject> obj = i::Handle<i::JSObject>::cast(val);
+ i::Isolate* isolate = obj->GetIsolate();
+ LOG_API(isolate, "IsPromise");
+ ENTER_V8(isolate);
+ EXCEPTION_PREAMBLE(isolate);
+ i::Handle<i::Object> argv[] = { obj };
+ i::Handle<i::Object> b;
+ has_pending_exception = !i::Execution::Call(
+ isolate,
+ handle(
+ isolate->context()->global_object()->native_context()->is_promise()),
+ isolate->factory()->undefined_value(),
+ ARRAY_SIZE(argv), argv,
+ false).ToHandle(&b);
+ EXCEPTION_BAILOUT_CHECK(isolate, false);
+ return b->BooleanValue();
+}
+
+
+Local<Promise::Resolver> Promise::Resolver::New(Isolate* v8_isolate) {
+ i::Isolate* isolate = reinterpret_cast<i::Isolate*>(v8_isolate);
+ LOG_API(isolate, "Promise::Resolver::New");
+ ENTER_V8(isolate);
+ EXCEPTION_PREAMBLE(isolate);
+ i::Handle<i::Object> result;
+ has_pending_exception = !i::Execution::Call(
+ isolate,
+ handle(isolate->context()->global_object()->native_context()->
+ promise_create()),
+ isolate->factory()->undefined_value(),
+ 0, NULL,
+ false).ToHandle(&result);
+ EXCEPTION_BAILOUT_CHECK(isolate, Local<Promise::Resolver>());
+ return Local<Promise::Resolver>::Cast(Utils::ToLocal(result));
+}
+
+
+Local<Promise> Promise::Resolver::GetPromise() {
+ i::Handle<i::JSObject> promise = Utils::OpenHandle(this);
+ return Local<Promise>::Cast(Utils::ToLocal(promise));
+}
+
+
+void Promise::Resolver::Resolve(Handle<Value> value) {
+ i::Handle<i::JSObject> promise = Utils::OpenHandle(this);
+ i::Isolate* isolate = promise->GetIsolate();
+ LOG_API(isolate, "Promise::Resolver::Resolve");
+ ENTER_V8(isolate);
+ EXCEPTION_PREAMBLE(isolate);
+ i::Handle<i::Object> argv[] = { promise, Utils::OpenHandle(*value) };
+ has_pending_exception = i::Execution::Call(
+ isolate,
+ handle(isolate->context()->global_object()->native_context()->
+ promise_resolve()),
+ isolate->factory()->undefined_value(),
+ ARRAY_SIZE(argv), argv,
+ false).is_null();
+ EXCEPTION_BAILOUT_CHECK(isolate, /* void */ ;);
+}
+
+
+void Promise::Resolver::Reject(Handle<Value> value) {
+ i::Handle<i::JSObject> promise = Utils::OpenHandle(this);
+ i::Isolate* isolate = promise->GetIsolate();
+ LOG_API(isolate, "Promise::Resolver::Reject");
+ ENTER_V8(isolate);
+ EXCEPTION_PREAMBLE(isolate);
+ i::Handle<i::Object> argv[] = { promise, Utils::OpenHandle(*value) };
+ has_pending_exception = i::Execution::Call(
+ isolate,
+ handle(isolate->context()->global_object()->native_context()->
+ promise_reject()),
+ isolate->factory()->undefined_value(),
+ ARRAY_SIZE(argv), argv,
+ false).is_null();
+ EXCEPTION_BAILOUT_CHECK(isolate, /* void */ ;);
+}
+
+
+Local<Promise> Promise::Chain(Handle<Function> handler) {
+ i::Handle<i::JSObject> promise = Utils::OpenHandle(this);
+ i::Isolate* isolate = promise->GetIsolate();
+ LOG_API(isolate, "Promise::Chain");
+ ENTER_V8(isolate);
+ EXCEPTION_PREAMBLE(isolate);
+ i::Handle<i::Object> argv[] = { Utils::OpenHandle(*handler) };
+ i::Handle<i::Object> result;
+ has_pending_exception = !i::Execution::Call(
+ isolate,
+ handle(isolate->context()->global_object()->native_context()->
+ promise_chain()),
+ promise,
+ ARRAY_SIZE(argv), argv,
+ false).ToHandle(&result);
+ EXCEPTION_BAILOUT_CHECK(isolate, Local<Promise>());
+ return Local<Promise>::Cast(Utils::ToLocal(result));
+}
+
+
+Local<Promise> Promise::Catch(Handle<Function> handler) {
+ i::Handle<i::JSObject> promise = Utils::OpenHandle(this);
+ i::Isolate* isolate = promise->GetIsolate();
+ LOG_API(isolate, "Promise::Catch");
+ ENTER_V8(isolate);
+ EXCEPTION_PREAMBLE(isolate);
+ i::Handle<i::Object> argv[] = { Utils::OpenHandle(*handler) };
+ i::Handle<i::Object> result;
+ has_pending_exception = !i::Execution::Call(
+ isolate,
+ handle(isolate->context()->global_object()->native_context()->
+ promise_catch()),
+ promise,
+ ARRAY_SIZE(argv), argv,
+ false).ToHandle(&result);
+ EXCEPTION_BAILOUT_CHECK(isolate, Local<Promise>());
+ return Local<Promise>::Cast(Utils::ToLocal(result));
+}
+
+
+Local<Promise> Promise::Then(Handle<Function> handler) {
+ i::Handle<i::JSObject> promise = Utils::OpenHandle(this);
+ i::Isolate* isolate = promise->GetIsolate();
+ LOG_API(isolate, "Promise::Then");
+ ENTER_V8(isolate);
+ EXCEPTION_PREAMBLE(isolate);
+ i::Handle<i::Object> argv[] = { Utils::OpenHandle(*handler) };
+ i::Handle<i::Object> result;
+ has_pending_exception = !i::Execution::Call(
+ isolate,
+ handle(isolate->context()->global_object()->native_context()->
+ promise_then()),
+ promise,
+ ARRAY_SIZE(argv), argv,
+ false).ToHandle(&result);
+ EXCEPTION_BAILOUT_CHECK(isolate, Local<Promise>());
+ return Local<Promise>::Cast(Utils::ToLocal(result));
+}
+
+
bool v8::ArrayBuffer::IsExternal() const {
return Utils::OpenHandle(this)->is_external();
}
@@ -6102,9 +5937,9 @@ bool v8::ArrayBuffer::IsExternal() const {
v8::ArrayBuffer::Contents v8::ArrayBuffer::Externalize() {
i::Handle<i::JSArrayBuffer> obj = Utils::OpenHandle(this);
- ApiCheck(!obj->is_external(),
- "v8::ArrayBuffer::Externalize",
- "ArrayBuffer already externalized");
+ Utils::ApiCheck(!obj->is_external(),
+ "v8::ArrayBuffer::Externalize",
+ "ArrayBuffer already externalized");
obj->set_is_external(true);
size_t byte_length = static_cast<size_t>(obj->byte_length()->Number());
Contents contents;
@@ -6117,25 +5952,12 @@ v8::ArrayBuffer::Contents v8::ArrayBuffer::Externalize() {
void v8::ArrayBuffer::Neuter() {
i::Handle<i::JSArrayBuffer> obj = Utils::OpenHandle(this);
i::Isolate* isolate = obj->GetIsolate();
- ApiCheck(obj->is_external(),
- "v8::ArrayBuffer::Neuter",
- "Only externalized ArrayBuffers can be neutered");
+ Utils::ApiCheck(obj->is_external(),
+ "v8::ArrayBuffer::Neuter",
+ "Only externalized ArrayBuffers can be neutered");
LOG_API(obj->GetIsolate(), "v8::ArrayBuffer::Neuter()");
ENTER_V8(isolate);
-
- for (i::Handle<i::Object> view_obj(obj->weak_first_view(), isolate);
- !view_obj->IsUndefined();) {
- i::Handle<i::JSArrayBufferView> view(i::JSArrayBufferView::cast(*view_obj));
- if (view->IsJSTypedArray()) {
- i::JSTypedArray::cast(*view)->Neuter();
- } else if (view->IsJSDataView()) {
- i::JSDataView::cast(*view)->Neuter();
- } else {
- UNREACHABLE();
- }
- view_obj = i::handle(view->weak_next(), isolate);
- }
- obj->Neuter();
+ i::Runtime::NeuterArrayBuffer(obj);
}
@@ -6157,11 +5979,6 @@ Local<ArrayBuffer> v8::ArrayBuffer::New(Isolate* isolate, size_t byte_length) {
}
-Local<ArrayBuffer> v8::ArrayBuffer::New(size_t byte_length) {
- return New(Isolate::GetCurrent(), byte_length);
-}
-
-
Local<ArrayBuffer> v8::ArrayBuffer::New(Isolate* isolate, void* data,
size_t byte_length) {
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
@@ -6175,15 +5992,17 @@ Local<ArrayBuffer> v8::ArrayBuffer::New(Isolate* isolate, void* data,
}
-Local<ArrayBuffer> v8::ArrayBuffer::New(void* data, size_t byte_length) {
- return New(Isolate::GetCurrent(), data, byte_length);
-}
-
-
Local<ArrayBuffer> v8::ArrayBufferView::Buffer() {
i::Handle<i::JSArrayBufferView> obj = Utils::OpenHandle(this);
- ASSERT(obj->buffer()->IsJSArrayBuffer());
- i::Handle<i::JSArrayBuffer> buffer(i::JSArrayBuffer::cast(obj->buffer()));
+ i::Handle<i::JSArrayBuffer> buffer;
+ if (obj->IsJSDataView()) {
+ i::Handle<i::JSDataView> data_view(i::JSDataView::cast(*obj));
+ ASSERT(data_view->buffer()->IsJSArrayBuffer());
+ buffer = i::handle(i::JSArrayBuffer::cast(data_view->buffer()));
+ } else {
+ ASSERT(obj->IsJSTypedArray());
+ buffer = i::JSTypedArray::cast(*obj)->GetBuffer();
+ }
return Utils::ToLocal(buffer);
}
@@ -6213,7 +6032,7 @@ static inline void SetupArrayBufferView(
size_t byte_offset,
size_t byte_length) {
ASSERT(byte_offset + byte_length <=
- static_cast<size_t>(buffer->byte_length()->Number()));
+ static_cast<size_t>(buffer->byte_length()->Number()));
obj->set_buffer(*buffer);
@@ -6221,11 +6040,11 @@ static inline void SetupArrayBufferView(
buffer->set_weak_first_view(*obj);
i::Handle<i::Object> byte_offset_object =
- isolate->factory()->NewNumberFromSize(byte_offset);
+ isolate->factory()->NewNumberFromSize(byte_offset);
obj->set_byte_offset(*byte_offset_object);
i::Handle<i::Object> byte_length_object =
- isolate->factory()->NewNumberFromSize(byte_length);
+ isolate->factory()->NewNumberFromSize(byte_length);
obj->set_byte_length(*byte_length_object);
}
@@ -6242,113 +6061,150 @@ i::Handle<i::JSTypedArray> NewTypedArray(
ASSERT(byte_offset % sizeof(ElementType) == 0);
CHECK(length <= (std::numeric_limits<size_t>::max() / sizeof(ElementType)));
+ CHECK(length <= static_cast<size_t>(i::Smi::kMaxValue));
size_t byte_length = length * sizeof(ElementType);
SetupArrayBufferView(
isolate, obj, buffer, byte_offset, byte_length);
i::Handle<i::Object> length_object =
- isolate->factory()->NewNumberFromSize(length);
+ isolate->factory()->NewNumberFromSize(length);
obj->set_length(*length_object);
i::Handle<i::ExternalArray> elements =
isolate->factory()->NewExternalArray(
static_cast<int>(length), array_type,
static_cast<uint8_t*>(buffer->backing_store()) + byte_offset);
- obj->set_elements(*elements);
+ i::Handle<i::Map> map =
+ i::JSObject::GetElementsTransitionMap(obj, elements_kind);
+ i::JSObject::SetMapAndElements(obj, map, elements);
return obj;
}
-#define TYPED_ARRAY_NEW(TypedArray, element_type, array_type, elements_kind) \
- Local<TypedArray> TypedArray::New(Handle<ArrayBuffer> array_buffer, \
+#define TYPED_ARRAY_NEW(Type, type, TYPE, ctype, size) \
+ Local<Type##Array> Type##Array::New(Handle<ArrayBuffer> array_buffer, \
size_t byte_offset, size_t length) { \
- i::Isolate* isolate = i::Isolate::Current(); \
+ i::Isolate* isolate = Utils::OpenHandle(*array_buffer)->GetIsolate(); \
EnsureInitializedForIsolate(isolate, \
- "v8::" #TypedArray "::New(Handle<ArrayBuffer>, size_t, size_t)"); \
+ "v8::" #Type "Array::New(Handle<ArrayBuffer>, size_t, size_t)"); \
LOG_API(isolate, \
- "v8::" #TypedArray "::New(Handle<ArrayBuffer>, size_t, size_t)"); \
+ "v8::" #Type "Array::New(Handle<ArrayBuffer>, size_t, size_t)"); \
ENTER_V8(isolate); \
+ if (!Utils::ApiCheck(length <= static_cast<size_t>(i::Smi::kMaxValue), \
+ "v8::" #Type "Array::New(Handle<ArrayBuffer>, size_t, size_t)", \
+ "length exceeds max allowed value")) { \
+ return Local<Type##Array>(); \
+ } \
i::Handle<i::JSTypedArray> obj = \
- NewTypedArray<element_type, array_type, elements_kind>( \
+ NewTypedArray<ctype, v8::kExternal##Type##Array, \
+ i::EXTERNAL_##TYPE##_ELEMENTS>( \
isolate, array_buffer, byte_offset, length); \
- return Utils::ToLocal##TypedArray(obj); \
- }
-
-
-TYPED_ARRAY_NEW(Uint8Array, uint8_t, kExternalUnsignedByteArray,
- i::EXTERNAL_UNSIGNED_BYTE_ELEMENTS)
-TYPED_ARRAY_NEW(Uint8ClampedArray, uint8_t, kExternalPixelArray,
- i::EXTERNAL_PIXEL_ELEMENTS)
-TYPED_ARRAY_NEW(Int8Array, int8_t, kExternalByteArray,
- i::EXTERNAL_BYTE_ELEMENTS)
-TYPED_ARRAY_NEW(Uint16Array, uint16_t, kExternalUnsignedShortArray,
- i::EXTERNAL_UNSIGNED_SHORT_ELEMENTS)
-TYPED_ARRAY_NEW(Int16Array, int16_t, kExternalShortArray,
- i::EXTERNAL_SHORT_ELEMENTS)
-TYPED_ARRAY_NEW(Uint32Array, uint32_t, kExternalUnsignedIntArray,
- i::EXTERNAL_UNSIGNED_INT_ELEMENTS)
-TYPED_ARRAY_NEW(Int32Array, int32_t, kExternalIntArray,
- i::EXTERNAL_INT_ELEMENTS)
-TYPED_ARRAY_NEW(Float32Array, float, kExternalFloatArray,
- i::EXTERNAL_FLOAT_ELEMENTS)
-TYPED_ARRAY_NEW(Float64Array, double, kExternalDoubleArray,
- i::EXTERNAL_DOUBLE_ELEMENTS)
+ return Utils::ToLocal##Type##Array(obj); \
+ }
+
+TYPED_ARRAYS(TYPED_ARRAY_NEW)
#undef TYPED_ARRAY_NEW
Local<DataView> DataView::New(Handle<ArrayBuffer> array_buffer,
size_t byte_offset, size_t byte_length) {
- i::Isolate* isolate = i::Isolate::Current();
+ i::Handle<i::JSArrayBuffer> buffer = Utils::OpenHandle(*array_buffer);
+ i::Isolate* isolate = buffer->GetIsolate();
EnsureInitializedForIsolate(
isolate, "v8::DataView::New(void*, size_t, size_t)");
LOG_API(isolate, "v8::DataView::New(void*, size_t, size_t)");
ENTER_V8(isolate);
i::Handle<i::JSDataView> obj = isolate->factory()->NewJSDataView();
- i::Handle<i::JSArrayBuffer> buffer = Utils::OpenHandle(*array_buffer);
SetupArrayBufferView(
isolate, obj, buffer, byte_offset, byte_length);
return Utils::ToLocal(obj);
}
-Local<Symbol> v8::Symbol::New(Isolate* isolate, const char* data, int length) {
+Local<Symbol> v8::Symbol::New(Isolate* isolate, Local<String> name) {
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
EnsureInitializedForIsolate(i_isolate, "v8::Symbol::New()");
LOG_API(i_isolate, "Symbol::New()");
ENTER_V8(i_isolate);
i::Handle<i::Symbol> result = i_isolate->factory()->NewSymbol();
- if (data != NULL) {
- if (length == -1) length = i::StrLength(data);
- i::Handle<i::String> name = i_isolate->factory()->NewStringFromUtf8(
- i::Vector<const char>(data, length));
- result->set_name(*name);
- }
+ if (!name.IsEmpty()) result->set_name(*Utils::OpenHandle(*name));
return Utils::ToLocal(result);
}
-Local<Private> v8::Private::New(
- Isolate* isolate, const char* data, int length) {
+Local<Symbol> v8::Symbol::For(Isolate* isolate, Local<String> name) {
+ i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
+ i::Handle<i::String> i_name = Utils::OpenHandle(*name);
+ i::Handle<i::JSObject> registry = i_isolate->GetSymbolRegistry();
+ i::Handle<i::String> part = i_isolate->factory()->for_string();
+ i::Handle<i::JSObject> symbols =
+ i::Handle<i::JSObject>::cast(
+ i::Object::GetPropertyOrElement(registry, part).ToHandleChecked());
+ i::Handle<i::Object> symbol =
+ i::Object::GetPropertyOrElement(symbols, i_name).ToHandleChecked();
+ if (!symbol->IsSymbol()) {
+ ASSERT(symbol->IsUndefined());
+ symbol = i_isolate->factory()->NewSymbol();
+ i::Handle<i::Symbol>::cast(symbol)->set_name(*i_name);
+ i::JSObject::SetProperty(
+ symbols, i_name, symbol, NONE, i::STRICT).Assert();
+ }
+ return Utils::ToLocal(i::Handle<i::Symbol>::cast(symbol));
+}
+
+
+Local<Symbol> v8::Symbol::ForApi(Isolate* isolate, Local<String> name) {
+ i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
+ i::Handle<i::String> i_name = Utils::OpenHandle(*name);
+ i::Handle<i::JSObject> registry = i_isolate->GetSymbolRegistry();
+ i::Handle<i::String> part = i_isolate->factory()->for_api_string();
+ i::Handle<i::JSObject> symbols =
+ i::Handle<i::JSObject>::cast(
+ i::Object::GetPropertyOrElement(registry, part).ToHandleChecked());
+ i::Handle<i::Object> symbol =
+ i::Object::GetPropertyOrElement(symbols, i_name).ToHandleChecked();
+ if (!symbol->IsSymbol()) {
+ ASSERT(symbol->IsUndefined());
+ symbol = i_isolate->factory()->NewSymbol();
+ i::Handle<i::Symbol>::cast(symbol)->set_name(*i_name);
+ i::JSObject::SetProperty(
+ symbols, i_name, symbol, NONE, i::STRICT).Assert();
+ }
+ return Utils::ToLocal(i::Handle<i::Symbol>::cast(symbol));
+}
+
+
+Local<Private> v8::Private::New(Isolate* isolate, Local<String> name) {
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
EnsureInitializedForIsolate(i_isolate, "v8::Private::New()");
LOG_API(i_isolate, "Private::New()");
ENTER_V8(i_isolate);
i::Handle<i::Symbol> symbol = i_isolate->factory()->NewPrivateSymbol();
- if (data != NULL) {
- if (length == -1) length = i::StrLength(data);
- i::Handle<i::String> name = i_isolate->factory()->NewStringFromUtf8(
- i::Vector<const char>(data, length));
- symbol->set_name(*name);
- }
+ if (!name.IsEmpty()) symbol->set_name(*Utils::OpenHandle(*name));
Local<Symbol> result = Utils::ToLocal(symbol);
return v8::Handle<Private>(reinterpret_cast<Private*>(*result));
}
-Local<Number> v8::Number::New(double value) {
- i::Isolate* isolate = i::Isolate::Current();
- EnsureInitializedForIsolate(isolate, "v8::Number::New()");
- return Number::New(reinterpret_cast<Isolate*>(isolate), value);
+Local<Private> v8::Private::ForApi(Isolate* isolate, Local<String> name) {
+ i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
+ i::Handle<i::String> i_name = Utils::OpenHandle(*name);
+ i::Handle<i::JSObject> registry = i_isolate->GetSymbolRegistry();
+ i::Handle<i::String> part = i_isolate->factory()->private_api_string();
+ i::Handle<i::JSObject> privates =
+ i::Handle<i::JSObject>::cast(
+ i::Object::GetPropertyOrElement(registry, part).ToHandleChecked());
+ i::Handle<i::Object> symbol =
+ i::Object::GetPropertyOrElement(privates, i_name).ToHandleChecked();
+ if (!symbol->IsSymbol()) {
+ ASSERT(symbol->IsUndefined());
+ symbol = i_isolate->factory()->NewPrivateSymbol();
+ i::Handle<i::Symbol>::cast(symbol)->set_name(*i_name);
+ i::JSObject::SetProperty(
+ privates, i_name, symbol, NONE, i::STRICT).Assert();
+ }
+ Local<Symbol> result = Utils::ToLocal(i::Handle<i::Symbol>::cast(symbol));
+ return v8::Handle<Private>(reinterpret_cast<Private*>(*result));
}
@@ -6365,30 +6221,6 @@ Local<Number> v8::Number::New(Isolate* isolate, double value) {
}
-Local<Integer> v8::Integer::New(int32_t value) {
- i::Isolate* isolate = i::Isolate::UncheckedCurrent();
- EnsureInitializedForIsolate(isolate, "v8::Integer::New()");
- return v8::Integer::New(reinterpret_cast<Isolate*>(isolate), value);
-}
-
-
-Local<Integer> Integer::NewFromUnsigned(uint32_t value) {
- i::Isolate* isolate = i::Isolate::Current();
- EnsureInitializedForIsolate(isolate, "v8::Integer::NewFromUnsigned()");
- return Integer::NewFromUnsigned(reinterpret_cast<Isolate*>(isolate), value);
-}
-
-
-Local<Integer> v8::Integer::New(int32_t value, Isolate* isolate) {
- return Integer::New(isolate, value);
-}
-
-
-Local<Integer> v8::Integer::NewFromUnsigned(uint32_t value, Isolate* isolate) {
- return Integer::NewFromUnsigned(isolate, value);
-}
-
-
Local<Integer> v8::Integer::New(Isolate* isolate, int32_t value) {
i::Isolate* internal_isolate = reinterpret_cast<i::Isolate*>(isolate);
ASSERT(internal_isolate->IsInitialized());
@@ -6407,7 +6239,7 @@ Local<Integer> v8::Integer::NewFromUnsigned(Isolate* isolate, uint32_t value) {
ASSERT(internal_isolate->IsInitialized());
bool fits_into_int32_t = (value & (1 << 31)) == 0;
if (fits_into_int32_t) {
- return Integer::New(static_cast<int32_t>(value), isolate);
+ return Integer::New(isolate, static_cast<int32_t>(value));
}
ENTER_V8(internal_isolate);
i::Handle<i::Object> result = internal_isolate->factory()->NewNumber(value);
@@ -6415,23 +6247,6 @@ Local<Integer> v8::Integer::NewFromUnsigned(Isolate* isolate, uint32_t value) {
}
-#ifdef DEBUG
-v8::AssertNoGCScope::AssertNoGCScope(v8::Isolate* isolate) {
- disallow_heap_allocation_ = new i::DisallowHeapAllocation();
-}
-
-
-v8::AssertNoGCScope::~AssertNoGCScope() {
- delete static_cast<i::DisallowHeapAllocation*>(disallow_heap_allocation_);
-}
-#endif
-
-
-void V8::IgnoreOutOfMemoryException() {
- EnterIsolateIfNeeded()->set_ignore_out_of_memory(true);
-}
-
-
bool V8::AddMessageListener(MessageCallback that, Handle<Value> data) {
i::Isolate* isolate = i::Isolate::Current();
EnsureInitializedForIsolate(isolate, "v8::V8::AddMessageListener()");
@@ -6439,10 +6254,10 @@ bool V8::AddMessageListener(MessageCallback that, Handle<Value> data) {
ENTER_V8(isolate);
i::HandleScope scope(isolate);
NeanderArray listeners(isolate->factory()->message_listeners());
- NeanderObject obj(2);
+ NeanderObject obj(isolate, 2);
obj.set(0, *isolate->factory()->NewForeign(FUNCTION_ADDR(that)));
obj.set(1, data.IsEmpty() ? isolate->heap()->undefined_value()
- : *Utils::OpenHandle(*data));
+ : *Utils::OpenHandle(*data));
listeners.add(obj.value());
return true;
}
@@ -6468,9 +6283,9 @@ void V8::RemoveMessageListeners(MessageCallback that) {
void V8::SetCaptureStackTraceForUncaughtExceptions(
- bool capture,
- int frame_limit,
- StackTrace::StackTraceOptions options) {
+ bool capture,
+ int frame_limit,
+ StackTrace::StackTraceOptions options) {
i::Isolate::Current()->SetCaptureStackTraceForUncaughtExceptions(
capture,
frame_limit,
@@ -6479,13 +6294,17 @@ void V8::SetCaptureStackTraceForUncaughtExceptions(
void V8::SetCounterFunction(CounterLookupCallback callback) {
- i::Isolate* isolate = EnterIsolateIfNeeded();
+ i::Isolate* isolate = i::Isolate::UncheckedCurrent();
+ // TODO(svenpanne) The Isolate should really be a parameter.
+ if (isolate == NULL) return;
isolate->stats_table()->SetCounterFunction(callback);
}
void V8::SetCreateHistogramFunction(CreateHistogramCallback callback) {
- i::Isolate* isolate = EnterIsolateIfNeeded();
+ i::Isolate* isolate = i::Isolate::UncheckedCurrent();
+ // TODO(svenpanne) The Isolate should really be a parameter.
+ if (isolate == NULL) return;
isolate->stats_table()->SetCreateHistogramFunction(callback);
isolate->InitializeLoggingAndCounters();
isolate->counters()->ResetHistograms();
@@ -6493,32 +6312,23 @@ void V8::SetCreateHistogramFunction(CreateHistogramCallback callback) {
void V8::SetAddHistogramSampleFunction(AddHistogramSampleCallback callback) {
- i::Isolate* isolate = EnterIsolateIfNeeded();
+ i::Isolate* isolate = i::Isolate::UncheckedCurrent();
+ // TODO(svenpanne) The Isolate should really be a parameter.
+ if (isolate == NULL) return;
isolate->stats_table()->
SetAddHistogramSampleFunction(callback);
}
void V8::SetFailedAccessCheckCallbackFunction(
- FailedAccessCheckCallback callback) {
+ FailedAccessCheckCallback callback) {
i::Isolate* isolate = i::Isolate::Current();
isolate->SetFailedAccessCheckCallback(callback);
}
-int64_t Isolate::AdjustAmountOfExternalAllocatedMemory(
- int64_t change_in_bytes) {
- i::Heap* heap = reinterpret_cast<i::Isolate*>(this)->heap();
- return heap->AdjustAmountOfExternalAllocatedMemory(change_in_bytes);
-}
-
-
-int64_t V8::AdjustAmountOfExternalAllocatedMemory(int64_t change_in_bytes) {
- i::Isolate* isolate = i::Isolate::UncheckedCurrent();
- if (isolate == NULL || !isolate->IsInitialized()) {
- return 0;
- }
- Isolate* isolate_ext = reinterpret_cast<Isolate*>(isolate);
- return isolate_ext->AdjustAmountOfExternalAllocatedMemory(change_in_bytes);
+void Isolate::CollectAllGarbage(const char* gc_reason) {
+ reinterpret_cast<i::Isolate*>(this)->heap()->CollectAllGarbage(
+ i::Heap::kNoGCFlags, gc_reason);
}
@@ -6684,24 +6494,9 @@ void V8::RemoveMemoryAllocationCallback(MemoryAllocationCallback callback) {
}
-void V8::AddCallCompletedCallback(CallCompletedCallback callback) {
- if (callback == NULL) return;
- i::V8::AddCallCompletedCallback(callback);
-}
-
-
-void V8::RemoveCallCompletedCallback(CallCompletedCallback callback) {
- i::V8::RemoveCallCompletedCallback(callback);
-}
-
-
void V8::TerminateExecution(Isolate* isolate) {
- // If no isolate is supplied, use the default isolate.
- if (isolate != NULL) {
- reinterpret_cast<i::Isolate*>(isolate)->stack_guard()->TerminateExecution();
- } else {
- i::Isolate::GetDefaultIsolateStackGuard()->TerminateExecution();
- }
+ i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
+ i_isolate->stack_guard()->RequestTerminateExecution();
}
@@ -6714,7 +6509,39 @@ bool V8::IsExecutionTerminating(Isolate* isolate) {
void V8::CancelTerminateExecution(Isolate* isolate) {
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
- i_isolate->stack_guard()->CancelTerminateExecution();
+ i_isolate->stack_guard()->ClearTerminateExecution();
+ i_isolate->CancelTerminateExecution();
+}
+
+
+void Isolate::RequestInterrupt(InterruptCallback callback, void* data) {
+ i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(this);
+ i_isolate->set_api_interrupt_callback(callback);
+ i_isolate->set_api_interrupt_callback_data(data);
+ i_isolate->stack_guard()->RequestApiInterrupt();
+}
+
+
+void Isolate::ClearInterrupt() {
+ i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(this);
+ i_isolate->stack_guard()->ClearApiInterrupt();
+ i_isolate->set_api_interrupt_callback(NULL);
+ i_isolate->set_api_interrupt_callback_data(NULL);
+}
+
+
+void Isolate::RequestGarbageCollectionForTesting(GarbageCollectionType type) {
+ CHECK(i::FLAG_expose_gc);
+ if (type == kMinorGarbageCollection) {
+ reinterpret_cast<i::Isolate*>(this)->heap()->CollectGarbage(
+ i::NEW_SPACE, "Isolate::RequestGarbageCollection",
+ kGCCallbackFlagForced);
+ } else {
+ ASSERT_EQ(kFullGarbageCollection, type);
+ reinterpret_cast<i::Isolate*>(this)->heap()->CollectAllGarbage(
+ i::Heap::kAbortIncrementalMarkingMask,
+ "Isolate::RequestGarbageCollection", kGCCallbackFlagForced);
+ }
}
@@ -6732,9 +6559,9 @@ Isolate* Isolate::New() {
void Isolate::Dispose() {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
- if (!ApiCheck(!isolate->IsInUse(),
- "v8::Isolate::Dispose()",
- "Disposing the isolate that is entered by a thread.")) {
+ if (!Utils::ApiCheck(!isolate->IsInUse(),
+ "v8::Isolate::Dispose()",
+ "Disposing the isolate that is entered by a thread.")) {
return;
}
isolate->TearDown();
@@ -6753,6 +6580,59 @@ void Isolate::Exit() {
}
+Isolate::DisallowJavascriptExecutionScope::DisallowJavascriptExecutionScope(
+ Isolate* isolate,
+ Isolate::DisallowJavascriptExecutionScope::OnFailure on_failure)
+ : on_failure_(on_failure) {
+ i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
+ if (on_failure_ == CRASH_ON_FAILURE) {
+ internal_ = reinterpret_cast<void*>(
+ new i::DisallowJavascriptExecution(i_isolate));
+ } else {
+ ASSERT_EQ(THROW_ON_FAILURE, on_failure);
+ internal_ = reinterpret_cast<void*>(
+ new i::ThrowOnJavascriptExecution(i_isolate));
+ }
+}
+
+
+Isolate::DisallowJavascriptExecutionScope::~DisallowJavascriptExecutionScope() {
+ if (on_failure_ == CRASH_ON_FAILURE) {
+ delete reinterpret_cast<i::DisallowJavascriptExecution*>(internal_);
+ } else {
+ delete reinterpret_cast<i::ThrowOnJavascriptExecution*>(internal_);
+ }
+}
+
+
+Isolate::AllowJavascriptExecutionScope::AllowJavascriptExecutionScope(
+ Isolate* isolate) {
+ i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
+ internal_assert_ = reinterpret_cast<void*>(
+ new i::AllowJavascriptExecution(i_isolate));
+ internal_throws_ = reinterpret_cast<void*>(
+ new i::NoThrowOnJavascriptExecution(i_isolate));
+}
+
+
+Isolate::AllowJavascriptExecutionScope::~AllowJavascriptExecutionScope() {
+ delete reinterpret_cast<i::AllowJavascriptExecution*>(internal_assert_);
+ delete reinterpret_cast<i::NoThrowOnJavascriptExecution*>(internal_throws_);
+}
+
+
+Isolate::SuppressMicrotaskExecutionScope::SuppressMicrotaskExecutionScope(
+ Isolate* isolate)
+ : isolate_(reinterpret_cast<i::Isolate*>(isolate)) {
+ isolate_->handle_scope_implementer()->IncrementCallDepth();
+}
+
+
+Isolate::SuppressMicrotaskExecutionScope::~SuppressMicrotaskExecutionScope() {
+ isolate_->handle_scope_implementer()->DecrementCallDepth();
+}
+
+
void Isolate::GetHeapStatistics(HeapStatistics* heap_statistics) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
if (!isolate->IsInitialized()) {
@@ -6773,28 +6653,85 @@ void Isolate::GetHeapStatistics(HeapStatistics* heap_statistics) {
}
-String::Utf8Value::Utf8Value(v8::Handle<v8::Value> obj)
- : str_(NULL), length_(0) {
- i::Isolate* isolate = i::Isolate::Current();
- if (obj.IsEmpty()) return;
- ENTER_V8(isolate);
+void Isolate::SetEventLogger(LogEventCallback that) {
+ // Do not overwrite the event logger if we want to log explicitly.
+ if (i::FLAG_log_timer_events) return;
+ i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
+ isolate->set_event_logger(that);
+}
+
+
+void Isolate::AddCallCompletedCallback(CallCompletedCallback callback) {
+ if (callback == NULL) return;
+ i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
+ isolate->AddCallCompletedCallback(callback);
+}
+
+
+void Isolate::RemoveCallCompletedCallback(CallCompletedCallback callback) {
+ i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
+ isolate->RemoveCallCompletedCallback(callback);
+}
+
+
+void Isolate::RunMicrotasks() {
+ reinterpret_cast<i::Isolate*>(this)->RunMicrotasks();
+}
+
+
+void Isolate::EnqueueMicrotask(Handle<Function> microtask) {
+ i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
+ isolate->EnqueueMicrotask(Utils::OpenHandle(*microtask));
+}
+
+
+void Isolate::EnqueueMicrotask(MicrotaskCallback microtask, void* data) {
+ i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
i::HandleScope scope(isolate);
- TryCatch try_catch;
- Handle<String> str = obj->ToString();
- if (str.IsEmpty()) return;
- i::Handle<i::String> i_str = Utils::OpenHandle(*str);
- length_ = v8::Utf8Length(*i_str, isolate);
- str_ = i::NewArray<char>(length_ + 1);
- str->WriteUtf8(str_);
+ i::Handle<i::CallHandlerInfo> callback_info =
+ i::Handle<i::CallHandlerInfo>::cast(
+ isolate->factory()->NewStruct(i::CALL_HANDLER_INFO_TYPE));
+ SET_FIELD_WRAPPED(callback_info, set_callback, microtask);
+ SET_FIELD_WRAPPED(callback_info, set_data, data);
+ isolate->EnqueueMicrotask(callback_info);
}
-String::Utf8Value::~Utf8Value() {
- i::DeleteArray(str_);
+void Isolate::SetAutorunMicrotasks(bool autorun) {
+ reinterpret_cast<i::Isolate*>(this)->set_autorun_microtasks(autorun);
+}
+
+
+bool Isolate::WillAutorunMicrotasks() const {
+ return reinterpret_cast<const i::Isolate*>(this)->autorun_microtasks();
+}
+
+
+void Isolate::SetCounterFunction(CounterLookupCallback callback) {
+ i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
+ isolate->stats_table()->SetCounterFunction(callback);
+ isolate->InitializeLoggingAndCounters();
+ isolate->counters()->ResetCounters();
}
-String::AsciiValue::AsciiValue(v8::Handle<v8::Value> obj)
+void Isolate::SetCreateHistogramFunction(CreateHistogramCallback callback) {
+ i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
+ isolate->stats_table()->SetCreateHistogramFunction(callback);
+ isolate->InitializeLoggingAndCounters();
+ isolate->counters()->ResetHistograms();
+}
+
+
+void Isolate::SetAddHistogramSampleFunction(
+ AddHistogramSampleCallback callback) {
+ reinterpret_cast<i::Isolate*>(this)
+ ->stats_table()
+ ->SetAddHistogramSampleFunction(callback);
+}
+
+
+String::Utf8Value::Utf8Value(v8::Handle<v8::Value> obj)
: str_(NULL), length_(0) {
i::Isolate* isolate = i::Isolate::Current();
if (obj.IsEmpty()) return;
@@ -6803,14 +6740,14 @@ String::AsciiValue::AsciiValue(v8::Handle<v8::Value> obj)
TryCatch try_catch;
Handle<String> str = obj->ToString();
if (str.IsEmpty()) return;
- length_ = str->Utf8Length();
+ i::Handle<i::String> i_str = Utils::OpenHandle(*str);
+ length_ = v8::Utf8Length(*i_str, isolate);
str_ = i::NewArray<char>(length_ + 1);
str->WriteUtf8(str_);
- ASSERT(i::String::NonAsciiStart(str_, length_) >= length_);
}
-String::AsciiValue::~AsciiValue() {
+String::Utf8Value::~Utf8Value() {
i::DeleteArray(str_);
}
@@ -6923,72 +6860,44 @@ Local<Value> Exception::Error(v8::Handle<v8::String> raw_message) {
// --- D e b u g S u p p o r t ---
-#ifdef ENABLE_DEBUGGER_SUPPORT
-
-bool Debug::SetDebugEventListener2(EventCallback2 that, Handle<Value> data) {
+bool Debug::SetDebugEventListener(EventCallback that, Handle<Value> data) {
i::Isolate* isolate = i::Isolate::Current();
- EnsureInitializedForIsolate(isolate, "v8::Debug::SetDebugEventListener2()");
- ON_BAILOUT(isolate, "v8::Debug::SetDebugEventListener2()", return false);
+ EnsureInitializedForIsolate(isolate, "v8::Debug::SetDebugEventListener()");
+ ON_BAILOUT(isolate, "v8::Debug::SetDebugEventListener()", return false);
ENTER_V8(isolate);
i::HandleScope scope(isolate);
i::Handle<i::Object> foreign = isolate->factory()->undefined_value();
if (that != NULL) {
foreign = isolate->factory()->NewForeign(FUNCTION_ADDR(that));
}
- isolate->debugger()->SetEventListener(foreign,
- Utils::OpenHandle(*data, true));
- return true;
-}
-
-
-bool Debug::SetDebugEventListener(v8::Handle<v8::Object> that,
- Handle<Value> data) {
- i::Isolate* isolate = i::Isolate::Current();
- ON_BAILOUT(isolate, "v8::Debug::SetDebugEventListener()", return false);
- ENTER_V8(isolate);
- isolate->debugger()->SetEventListener(Utils::OpenHandle(*that),
- Utils::OpenHandle(*data, true));
+ isolate->debug()->SetEventListener(foreign,
+ Utils::OpenHandle(*data, true));
return true;
}
void Debug::DebugBreak(Isolate* isolate) {
- // If no isolate is supplied, use the default isolate.
- if (isolate != NULL) {
- reinterpret_cast<i::Isolate*>(isolate)->stack_guard()->DebugBreak();
- } else {
- i::Isolate::GetDefaultIsolateStackGuard()->DebugBreak();
- }
+ reinterpret_cast<i::Isolate*>(isolate)->stack_guard()->RequestDebugBreak();
}
void Debug::CancelDebugBreak(Isolate* isolate) {
- // If no isolate is supplied, use the default isolate.
- if (isolate != NULL) {
- i::Isolate* internal_isolate = reinterpret_cast<i::Isolate*>(isolate);
- internal_isolate->stack_guard()->Continue(i::DEBUGBREAK);
- } else {
- i::Isolate::GetDefaultIsolateStackGuard()->Continue(i::DEBUGBREAK);
- }
+ i::Isolate* internal_isolate = reinterpret_cast<i::Isolate*>(isolate);
+ internal_isolate->stack_guard()->ClearDebugBreak();
}
-void Debug::DebugBreakForCommand(ClientData* data, Isolate* isolate) {
- // If no isolate is supplied, use the default isolate.
- if (isolate != NULL) {
- i::Isolate* internal_isolate = reinterpret_cast<i::Isolate*>(isolate);
- internal_isolate->debugger()->EnqueueDebugCommand(data);
- } else {
- i::Isolate::GetDefaultIsolateDebugger()->EnqueueDebugCommand(data);
- }
+void Debug::DebugBreakForCommand(Isolate* isolate, ClientData* data) {
+ i::Isolate* internal_isolate = reinterpret_cast<i::Isolate*>(isolate);
+ internal_isolate->debug()->EnqueueDebugCommand(data);
}
-void Debug::SetMessageHandler2(v8::Debug::MessageHandler2 handler) {
+void Debug::SetMessageHandler(v8::Debug::MessageHandler handler) {
i::Isolate* isolate = i::Isolate::Current();
EnsureInitializedForIsolate(isolate, "v8::Debug::SetMessageHandler");
ENTER_V8(isolate);
- isolate->debugger()->SetMessageHandler(handler);
+ isolate->debug()->SetMessageHandler(handler);
}
@@ -6997,64 +6906,28 @@ void Debug::SendCommand(Isolate* isolate,
int length,
ClientData* client_data) {
i::Isolate* internal_isolate = reinterpret_cast<i::Isolate*>(isolate);
- internal_isolate->debugger()->ProcessCommand(
+ internal_isolate->debug()->EnqueueCommandMessage(
i::Vector<const uint16_t>(command, length), client_data);
}
-void Debug::SendCommand(const uint16_t* command, int length,
- ClientData* client_data,
- Isolate* isolate) {
- // If no isolate is supplied, use the default isolate.
- if (isolate != NULL) {
- i::Isolate* internal_isolate = reinterpret_cast<i::Isolate*>(isolate);
- internal_isolate->debugger()->ProcessCommand(
- i::Vector<const uint16_t>(command, length), client_data);
- } else {
- i::Isolate::GetDefaultIsolateDebugger()->ProcessCommand(
- i::Vector<const uint16_t>(command, length), client_data);
- }
-}
-
-
-void Debug::SetHostDispatchHandler(HostDispatchHandler handler,
- int period) {
- i::Isolate* isolate = i::Isolate::Current();
- EnsureInitializedForIsolate(isolate, "v8::Debug::SetHostDispatchHandler");
- ENTER_V8(isolate);
- isolate->debugger()->SetHostDispatchHandler(
- handler, i::TimeDelta::FromMilliseconds(period));
-}
-
-
-void Debug::SetDebugMessageDispatchHandler(
- DebugMessageDispatchHandler handler, bool provide_locker) {
- i::Isolate* isolate = i::Isolate::Current();
- EnsureInitializedForIsolate(isolate,
- "v8::Debug::SetDebugMessageDispatchHandler");
- ENTER_V8(isolate);
- isolate->debugger()->SetDebugMessageDispatchHandler(
- handler, provide_locker);
-}
-
-
Local<Value> Debug::Call(v8::Handle<v8::Function> fun,
v8::Handle<v8::Value> data) {
i::Isolate* isolate = i::Isolate::Current();
if (!isolate->IsInitialized()) return Local<Value>();
ON_BAILOUT(isolate, "v8::Debug::Call()", return Local<Value>());
ENTER_V8(isolate);
- i::Handle<i::Object> result;
+ i::MaybeHandle<i::Object> maybe_result;
EXCEPTION_PREAMBLE(isolate);
if (data.IsEmpty()) {
- result = isolate->debugger()->Call(Utils::OpenHandle(*fun),
- isolate->factory()->undefined_value(),
- &has_pending_exception);
+ maybe_result = isolate->debug()->Call(
+ Utils::OpenHandle(*fun), isolate->factory()->undefined_value());
} else {
- result = isolate->debugger()->Call(Utils::OpenHandle(*fun),
- Utils::OpenHandle(*data),
- &has_pending_exception);
+ maybe_result = isolate->debug()->Call(
+ Utils::OpenHandle(*fun), Utils::OpenHandle(*data));
}
+ i::Handle<i::Object> result;
+ has_pending_exception = !maybe_result.ToHandle(&result);
EXCEPTION_BAILOUT_CHECK(isolate, Local<Value>());
return Utils::ToLocal(result);
}
@@ -7067,36 +6940,30 @@ Local<Value> Debug::GetMirror(v8::Handle<v8::Value> obj) {
ENTER_V8(isolate);
v8::EscapableHandleScope scope(reinterpret_cast<Isolate*>(isolate));
i::Debug* isolate_debug = isolate->debug();
- isolate_debug->Load();
- i::Handle<i::JSObject> debug(isolate_debug->debug_context()->global_object());
- i::Handle<i::String> name = isolate->factory()->InternalizeOneByteString(
- STATIC_ASCII_VECTOR("MakeMirror"));
- i::Handle<i::Object> fun_obj = i::GetProperty(isolate, debug, name);
- i::Handle<i::JSFunction> fun = i::Handle<i::JSFunction>::cast(fun_obj);
- v8::Handle<v8::Function> v8_fun = Utils::ToLocal(fun);
- const int kArgc = 1;
- v8::Handle<v8::Value> argv[kArgc] = { obj };
EXCEPTION_PREAMBLE(isolate);
- v8::Local<v8::Value> result =
- v8_fun->Call(Utils::ToLocal(debug), kArgc, argv);
+ has_pending_exception = !isolate_debug->Load();
+ v8::Local<v8::Value> result;
+ if (!has_pending_exception) {
+ i::Handle<i::JSObject> debug(
+ isolate_debug->debug_context()->global_object());
+ i::Handle<i::String> name = isolate->factory()->InternalizeOneByteString(
+ STATIC_ASCII_VECTOR("MakeMirror"));
+ i::Handle<i::Object> fun_obj =
+ i::Object::GetProperty(debug, name).ToHandleChecked();
+ i::Handle<i::JSFunction> fun = i::Handle<i::JSFunction>::cast(fun_obj);
+ v8::Handle<v8::Function> v8_fun = Utils::ToLocal(fun);
+ const int kArgc = 1;
+ v8::Handle<v8::Value> argv[kArgc] = { obj };
+ result = v8_fun->Call(Utils::ToLocal(debug), kArgc, argv);
+ has_pending_exception = result.IsEmpty();
+ }
EXCEPTION_BAILOUT_CHECK(isolate, Local<Value>());
return scope.Escape(result);
}
-bool Debug::EnableAgent(const char* name, int port, bool wait_for_connection) {
- return i::Isolate::Current()->debugger()->StartAgent(name, port,
- wait_for_connection);
-}
-
-
-void Debug::DisableAgent() {
- return i::Isolate::Current()->debugger()->StopAgent();
-}
-
-
void Debug::ProcessDebugMessages() {
- i::Execution::ProcessDebugMessages(i::Isolate::Current(), true);
+ i::Isolate::Current()->debug()->ProcessDebugMessages(true);
}
@@ -7104,37 +6971,30 @@ Local<Context> Debug::GetDebugContext() {
i::Isolate* isolate = i::Isolate::Current();
EnsureInitializedForIsolate(isolate, "v8::Debug::GetDebugContext()");
ENTER_V8(isolate);
- return Utils::ToLocal(i::Isolate::Current()->debugger()->GetDebugContext());
+ return Utils::ToLocal(i::Isolate::Current()->debug()->GetDebugContext());
}
-void Debug::SetLiveEditEnabled(bool enable, Isolate* isolate) {
- // If no isolate is supplied, use the default isolate.
- i::Debugger* debugger;
- if (isolate != NULL) {
- i::Isolate* internal_isolate = reinterpret_cast<i::Isolate*>(isolate);
- debugger = internal_isolate->debugger();
- } else {
- debugger = i::Isolate::GetDefaultIsolateDebugger();
- }
- debugger->set_live_edit_enabled(enable);
+void Debug::SetLiveEditEnabled(Isolate* isolate, bool enable) {
+ i::Isolate* internal_isolate = reinterpret_cast<i::Isolate*>(isolate);
+ internal_isolate->debug()->set_live_edit_enabled(enable);
}
-#endif // ENABLE_DEBUGGER_SUPPORT
-
-
Handle<String> CpuProfileNode::GetFunctionName() const {
i::Isolate* isolate = i::Isolate::Current();
const i::ProfileNode* node = reinterpret_cast<const i::ProfileNode*>(this);
const i::CodeEntry* entry = node->entry();
+ i::Handle<i::String> name =
+ isolate->factory()->InternalizeUtf8String(entry->name());
if (!entry->has_name_prefix()) {
- return ToApiHandle<String>(
- isolate->factory()->InternalizeUtf8String(entry->name()));
+ return ToApiHandle<String>(name);
} else {
- return ToApiHandle<String>(isolate->factory()->NewConsString(
+ // We do not expect this to fail. Change this if it does.
+ i::Handle<i::String> cons = isolate->factory()->NewConsString(
isolate->factory()->InternalizeUtf8String(entry->name_prefix()),
- isolate->factory()->InternalizeUtf8String(entry->name())));
+ name).ToHandleChecked();
+ return ToApiHandle<String>(cons);
}
}
@@ -7203,15 +7063,6 @@ void CpuProfile::Delete() {
i::CpuProfiler* profiler = isolate->cpu_profiler();
ASSERT(profiler != NULL);
profiler->DeleteProfile(reinterpret_cast<i::CpuProfile*>(this));
- if (profiler->GetProfilesCount() == 0) {
- // If this was the last profile, clean up all accessory data as well.
- profiler->DeleteAllProfiles();
- }
-}
-
-
-unsigned CpuProfile::GetUid() const {
- return reinterpret_cast<const i::CpuProfile*>(this)->uid();
}
@@ -7235,15 +7086,21 @@ const CpuProfileNode* CpuProfile::GetSample(int index) const {
}
+int64_t CpuProfile::GetSampleTimestamp(int index) const {
+ const i::CpuProfile* profile = reinterpret_cast<const i::CpuProfile*>(this);
+ return (profile->sample_timestamp(index) - i::TimeTicks()).InMicroseconds();
+}
+
+
int64_t CpuProfile::GetStartTime() const {
const i::CpuProfile* profile = reinterpret_cast<const i::CpuProfile*>(this);
- return (profile->start_time() - i::Time::UnixEpoch()).InMicroseconds();
+ return (profile->start_time() - i::TimeTicks()).InMicroseconds();
}
int64_t CpuProfile::GetEndTime() const {
const i::CpuProfile* profile = reinterpret_cast<const i::CpuProfile*>(this);
- return (profile->end_time() - i::Time::UnixEpoch()).InMicroseconds();
+ return (profile->end_time() - i::TimeTicks()).InMicroseconds();
}
@@ -7252,11 +7109,6 @@ int CpuProfile::GetSamplesCount() const {
}
-int CpuProfiler::GetProfileCount() {
- return reinterpret_cast<i::CpuProfiler*>(this)->GetProfilesCount();
-}
-
-
void CpuProfiler::SetSamplingInterval(int us) {
ASSERT(us >= 0);
return reinterpret_cast<i::CpuProfiler*>(this)->set_sampling_interval(
@@ -7264,27 +7116,26 @@ void CpuProfiler::SetSamplingInterval(int us) {
}
-const CpuProfile* CpuProfiler::GetCpuProfile(int index) {
- return reinterpret_cast<const CpuProfile*>(
- reinterpret_cast<i::CpuProfiler*>(this)->GetProfile(index));
+void CpuProfiler::StartProfiling(Handle<String> title, bool record_samples) {
+ reinterpret_cast<i::CpuProfiler*>(this)->StartProfiling(
+ *Utils::OpenHandle(*title), record_samples);
}
void CpuProfiler::StartCpuProfiling(Handle<String> title, bool record_samples) {
- reinterpret_cast<i::CpuProfiler*>(this)->StartProfiling(
- *Utils::OpenHandle(*title), record_samples);
+ StartProfiling(title, record_samples);
}
-const CpuProfile* CpuProfiler::StopCpuProfiling(Handle<String> title) {
- return reinterpret_cast<const CpuProfile*>(
+CpuProfile* CpuProfiler::StopProfiling(Handle<String> title) {
+ return reinterpret_cast<CpuProfile*>(
reinterpret_cast<i::CpuProfiler*>(this)->StopProfiling(
*Utils::OpenHandle(*title)));
}
-void CpuProfiler::DeleteAllCpuProfiles() {
- reinterpret_cast<i::CpuProfiler*>(this)->DeleteAllProfiles();
+const CpuProfile* CpuProfiler::StopCpuProfiling(Handle<String> title) {
+ return StopProfiling(title);
}
@@ -7320,11 +7171,11 @@ Handle<Value> HeapGraphEdge::GetName() const {
case i::HeapGraphEdge::kInternal:
case i::HeapGraphEdge::kProperty:
case i::HeapGraphEdge::kShortcut:
+ case i::HeapGraphEdge::kWeak:
return ToApiHandle<String>(
isolate->factory()->InternalizeUtf8String(edge->name()));
case i::HeapGraphEdge::kElement:
case i::HeapGraphEdge::kHidden:
- case i::HeapGraphEdge::kWeak:
return ToApiHandle<Number>(
isolate->factory()->NewNumberFromInt(edge->index()));
default: UNREACHABLE();
@@ -7369,6 +7220,13 @@ SnapshotObjectId HeapGraphNode::GetId() const {
int HeapGraphNode::GetSelfSize() const {
+ size_t size = ToInternal(this)->self_size();
+ CHECK(size <= static_cast<size_t>(internal::kMaxInt));
+ return static_cast<int>(size);
+}
+
+
+size_t HeapGraphNode::GetShallowSize() const {
return ToInternal(this)->self_size();
}
@@ -7384,15 +7242,6 @@ const HeapGraphEdge* HeapGraphNode::GetChild(int index) const {
}
-v8::Handle<v8::Value> HeapGraphNode::GetHeapValue() const {
- i::Isolate* isolate = i::Isolate::Current();
- i::Handle<i::HeapObject> object = ToInternal(this)->GetHeapObject();
- return !object.is_null() ?
- ToApiHandle<Value>(object) :
- ToApiHandle<Value>(isolate->factory()->undefined_value());
-}
-
-
static i::HeapSnapshot* ToInternal(const HeapSnapshot* snapshot) {
return const_cast<i::HeapSnapshot*>(
reinterpret_cast<const i::HeapSnapshot*>(snapshot));
@@ -7451,15 +7300,12 @@ SnapshotObjectId HeapSnapshot::GetMaxSnapshotJSObjectId() const {
void HeapSnapshot::Serialize(OutputStream* stream,
HeapSnapshot::SerializationFormat format) const {
- ApiCheck(format == kJSON,
- "v8::HeapSnapshot::Serialize",
- "Unknown serialization format");
- ApiCheck(stream->GetOutputEncoding() == OutputStream::kAscii,
- "v8::HeapSnapshot::Serialize",
- "Unsupported output encoding");
- ApiCheck(stream->GetChunkSize() > 0,
- "v8::HeapSnapshot::Serialize",
- "Invalid stream chunk size");
+ Utils::ApiCheck(format == kJSON,
+ "v8::HeapSnapshot::Serialize",
+ "Unknown serialization format");
+ Utils::ApiCheck(stream->GetChunkSize() > 0,
+ "v8::HeapSnapshot::Serialize",
+ "Invalid stream chunk size");
i::HeapSnapshotJSONSerializer serializer(ToInternal(this));
serializer.Serialize(stream);
}
@@ -7482,6 +7328,19 @@ SnapshotObjectId HeapProfiler::GetObjectId(Handle<Value> value) {
}
+Handle<Value> HeapProfiler::FindObjectById(SnapshotObjectId id) {
+ i::Handle<i::Object> obj =
+ reinterpret_cast<i::HeapProfiler*>(this)->FindHeapObjectById(id);
+ if (obj.is_null()) return Local<Value>();
+ return Utils::ToLocal(obj);
+}
+
+
+void HeapProfiler::ClearObjectIds() {
+ reinterpret_cast<i::HeapProfiler*>(this)->ClearHeapObjectMap();
+}
+
+
const HeapSnapshot* HeapProfiler::TakeHeapSnapshot(
Handle<String> title,
ActivityControl* control,
@@ -7532,16 +7391,6 @@ void HeapProfiler::SetRetainedObjectInfo(UniqueId id,
}
-void HeapProfiler::StartRecordingHeapAllocations() {
- reinterpret_cast<i::HeapProfiler*>(this)->StartHeapObjectsTracking(true);
-}
-
-
-void HeapProfiler::StopRecordingHeapAllocations() {
- reinterpret_cast<i::HeapProfiler*>(this)->StopHeapObjectsTracking();
-}
-
-
v8::Testing::StressType internal::Testing::stress_type_ =
v8::Testing::kStressTypeOpt;
@@ -7620,10 +7469,9 @@ void HandleScopeImplementer::FreeThreadResources() {
char* HandleScopeImplementer::ArchiveThread(char* storage) {
- v8::ImplementationUtilities::HandleScopeData* current =
- isolate_->handle_scope_data();
+ HandleScopeData* current = isolate_->handle_scope_data();
handle_scope_data_ = *current;
- OS::MemCopy(storage, this, sizeof(*this));
+ MemCopy(storage, this, sizeof(*this));
ResetAfterArchive();
current->Initialize();
@@ -7638,7 +7486,7 @@ int HandleScopeImplementer::ArchiveSpacePerThread() {
char* HandleScopeImplementer::RestoreThread(char* storage) {
- OS::MemCopy(this, storage, sizeof(*this));
+ MemCopy(this, storage, sizeof(*this));
*isolate_->handle_scope_data() = handle_scope_data_;
return storage + ArchiveSpacePerThread();
}
@@ -7682,8 +7530,7 @@ void HandleScopeImplementer::IterateThis(ObjectVisitor* v) {
void HandleScopeImplementer::Iterate(ObjectVisitor* v) {
- v8::ImplementationUtilities::HandleScopeData* current =
- isolate_->handle_scope_data();
+ HandleScopeData* current = isolate_->handle_scope_data();
handle_scope_data_ = *current;
IterateThis(v);
}
diff --git a/chromium/v8/src/api.h b/chromium/v8/src/api.h
index 5f19380e65b..7a688caeee0 100644
--- a/chromium/v8/src/api.h
+++ b/chromium/v8/src/api.h
@@ -1,41 +1,17 @@
// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
#ifndef V8_API_H_
#define V8_API_H_
-#include "v8.h"
+#include "src/v8.h"
-#include "../include/v8-testing.h"
-#include "apiutils.h"
-#include "contexts.h"
-#include "factory.h"
-#include "isolate.h"
-#include "list-inl.h"
+#include "include/v8-testing.h"
+#include "src/contexts.h"
+#include "src/factory.h"
+#include "src/isolate.h"
+#include "src/list-inl.h"
namespace v8 {
@@ -56,7 +32,7 @@ class Consts {
// env-independent JSObjects used by the api.
class NeanderObject {
public:
- explicit NeanderObject(int size);
+ explicit NeanderObject(v8::internal::Isolate* isolate, int size);
explicit inline NeanderObject(v8::internal::Handle<v8::internal::Object> obj);
explicit inline NeanderObject(v8::internal::Object* obj);
inline v8::internal::Object* get(int index);
@@ -72,7 +48,7 @@ class NeanderObject {
// array abstraction built on neander-objects.
class NeanderArray {
public:
- NeanderArray();
+ explicit NeanderArray(v8::internal::Isolate* isolate);
explicit inline NeanderArray(v8::internal::Handle<v8::internal::Object> obj);
inline v8::internal::Handle<v8::internal::JSObject> value() {
return obj_.value();
@@ -184,11 +160,12 @@ class RegisteredExtension {
V(DataView, JSDataView) \
V(String, String) \
V(Symbol, Symbol) \
- V(Script, Object) \
+ V(Script, JSFunction) \
+ V(UnboundScript, SharedFunctionInfo) \
V(Function, JSFunction) \
- V(Message, JSObject) \
+ V(Message, JSMessageObject) \
V(Context, Context) \
- V(External, Foreign) \
+ V(External, Object) \
V(StackTrace, JSArray) \
V(StackFrame, JSObject) \
V(DeclaredAccessorDescriptor, DeclaredAccessorDescriptor)
@@ -196,7 +173,12 @@ class RegisteredExtension {
class Utils {
public:
- static bool ReportApiFailure(const char* location, const char* message);
+ static inline bool ApiCheck(bool condition,
+ const char* location,
+ const char* message) {
+ if (!condition) Utils::ReportApiFailure(location, message);
+ return condition;
+ }
static Local<FunctionTemplate> ToFunctionTemplate(NeanderObject obj);
static Local<ObjectTemplate> ToObjectTemplate(NeanderObject obj);
@@ -303,6 +285,9 @@ OPEN_HANDLE_LIST(DECLARE_OPEN_HANDLE)
static inline v8::internal::Handle<To> OpenHandle(v8::Local<From> handle) {
return OpenHandle(*handle);
}
+
+ private:
+ static void ReportApiFailure(const char* location, const char* message);
};
@@ -337,11 +322,11 @@ inline v8::Local<T> ToApiHandle(
}
-#define MAKE_TO_LOCAL_TYPED_ARRAY(TypedArray, typeConst) \
- Local<v8::TypedArray> Utils::ToLocal##TypedArray( \
+#define MAKE_TO_LOCAL_TYPED_ARRAY(Type, typeName, TYPE, ctype, size) \
+ Local<v8::Type##Array> Utils::ToLocal##Type##Array( \
v8::internal::Handle<v8::internal::JSTypedArray> obj) { \
- ASSERT(obj->type() == typeConst); \
- return Convert<v8::internal::JSTypedArray, v8::TypedArray>(obj); \
+ ASSERT(obj->type() == kExternal##Type##Array); \
+ return Convert<v8::internal::JSTypedArray, v8::Type##Array>(obj); \
}
@@ -358,15 +343,7 @@ MAKE_TO_LOCAL(ToLocal, JSArrayBufferView, ArrayBufferView)
MAKE_TO_LOCAL(ToLocal, JSDataView, DataView)
MAKE_TO_LOCAL(ToLocal, JSTypedArray, TypedArray)
-MAKE_TO_LOCAL_TYPED_ARRAY(Uint8Array, kExternalUnsignedByteArray)
-MAKE_TO_LOCAL_TYPED_ARRAY(Uint8ClampedArray, kExternalPixelArray)
-MAKE_TO_LOCAL_TYPED_ARRAY(Int8Array, kExternalByteArray)
-MAKE_TO_LOCAL_TYPED_ARRAY(Uint16Array, kExternalUnsignedShortArray)
-MAKE_TO_LOCAL_TYPED_ARRAY(Int16Array, kExternalShortArray)
-MAKE_TO_LOCAL_TYPED_ARRAY(Uint32Array, kExternalUnsignedIntArray)
-MAKE_TO_LOCAL_TYPED_ARRAY(Int32Array, kExternalIntArray)
-MAKE_TO_LOCAL_TYPED_ARRAY(Float32Array, kExternalFloatArray)
-MAKE_TO_LOCAL_TYPED_ARRAY(Float64Array, kExternalDoubleArray)
+TYPED_ARRAYS(MAKE_TO_LOCAL_TYPED_ARRAY)
MAKE_TO_LOCAL(ToLocal, FunctionTemplateInfo, FunctionTemplate)
MAKE_TO_LOCAL(ToLocal, ObjectTemplateInfo, ObjectTemplate)
@@ -393,8 +370,8 @@ MAKE_TO_LOCAL(ToLocal, DeclaredAccessorDescriptor, DeclaredAccessorDescriptor)
const v8::From* that, bool allow_empty_handle) { \
EXTRA_CHECK(allow_empty_handle || that != NULL); \
EXTRA_CHECK(that == NULL || \
- !(*reinterpret_cast<v8::internal::To**>( \
- const_cast<v8::From*>(that)))->IsFailure()); \
+ (*reinterpret_cast<v8::internal::Object**>( \
+ const_cast<v8::From*>(that)))->Is##To()); \
return v8::internal::Handle<v8::internal::To>( \
reinterpret_cast<v8::internal::To**>(const_cast<v8::From*>(that))); \
}
@@ -543,7 +520,8 @@ class HandleScopeImplementer {
inline bool CallDepthIsZero() { return call_depth_ == 0; }
inline void EnterContext(Handle<Context> context);
- inline bool LeaveContext(Handle<Context> context);
+ inline void LeaveContext();
+ inline bool LastEnteredContextWas(Handle<Context> context);
// Returns the last entered context or an empty handle if no
// contexts have been entered.
@@ -599,7 +577,7 @@ class HandleScopeImplementer {
int call_depth_;
Object** last_handle_before_deferred_block_;
// This is only used for threading support.
- v8::ImplementationUtilities::HandleScopeData handle_scope_data_;
+ HandleScopeData handle_scope_data_;
void IterateThis(ObjectVisitor* v);
char* RestoreThreadHelper(char* from);
@@ -635,12 +613,13 @@ void HandleScopeImplementer::EnterContext(Handle<Context> context) {
}
-bool HandleScopeImplementer::LeaveContext(Handle<Context> context) {
- if (entered_contexts_.is_empty()) return false;
- // TODO(dcarney): figure out what's wrong here
- // if (entered_contexts_.last() != *context) return false;
+void HandleScopeImplementer::LeaveContext() {
entered_contexts_.RemoveLast();
- return true;
+}
+
+
+bool HandleScopeImplementer::LastEnteredContextWas(Handle<Context> context) {
+ return !entered_contexts_.is_empty() && entered_contexts_.last() == *context;
}
diff --git a/chromium/v8/src/apinatives.js b/chromium/v8/src/apinatives.js
index 6431901bf23..d4835affe99 100644
--- a/chromium/v8/src/apinatives.js
+++ b/chromium/v8/src/apinatives.js
@@ -1,29 +1,8 @@
// Copyright 2006-2008 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+"use strict";
// This file contains infrastructure used by the API. See
// v8natives.js for an explanation of these files are processed and
@@ -71,31 +50,25 @@ function InstantiateFunction(data, name) {
(serialNumber in cache) && (cache[serialNumber] != kUninitialized);
if (!isFunctionCached) {
try {
- var fun = %CreateApiFunction(data);
- if (name) %FunctionSetName(fun, name);
var flags = %GetTemplateField(data, kApiFlagOffset);
- var doNotCache = flags & (1 << kDoNotCacheBit);
- if (!doNotCache) cache[serialNumber] = fun;
- if (flags & (1 << kRemovePrototypeBit)) {
- %FunctionRemovePrototype(fun);
- } else {
- var prototype = %GetTemplateField(data, kApiPrototypeTemplateOffset);
- // Note: Do not directly use an object template as a condition, our
- // internal ToBoolean doesn't handle that!
- fun.prototype = typeof prototype === 'undefined' ?
- {} : Instantiate(prototype);
- if (flags & (1 << kReadOnlyPrototypeBit)) {
- %FunctionSetReadOnlyPrototype(fun);
- }
- %SetProperty(fun.prototype, "constructor", fun, DONT_ENUM);
+ var prototype;
+ if (!(flags & (1 << kRemovePrototypeBit))) {
+ var template = %GetTemplateField(data, kApiPrototypeTemplateOffset);
+ prototype = typeof template === 'undefined'
+ ? {} : Instantiate(template);
+
var parent = %GetTemplateField(data, kApiParentTemplateOffset);
// Note: Do not directly use a function template as a condition, our
// internal ToBoolean doesn't handle that!
- if (!(typeof parent === 'undefined')) {
+ if (typeof parent !== 'undefined') {
var parent_fun = Instantiate(parent);
- %SetPrototype(fun.prototype, parent_fun.prototype);
+ %SetPrototype(prototype, parent_fun.prototype);
}
}
+ var fun = %CreateApiFunction(data, prototype);
+ if (name) %FunctionSetName(fun, name);
+ var doNotCache = flags & (1 << kDoNotCacheBit);
+ if (!doNotCache) cache[serialNumber] = fun;
ConfigureTemplateInstance(fun, data);
if (doNotCache) return fun;
} catch (e) {
diff --git a/chromium/v8/src/apiutils.h b/chromium/v8/src/apiutils.h
deleted file mode 100644
index 07655856499..00000000000
--- a/chromium/v8/src/apiutils.h
+++ /dev/null
@@ -1,49 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_APIUTILS_H_
-#define V8_APIUTILS_H_
-
-namespace v8 {
-class ImplementationUtilities {
- public:
- static int GetNameCount(ExtensionConfiguration* that) {
- return that->name_count_;
- }
-
- static const char** GetNames(ExtensionConfiguration* that) {
- return that->names_;
- }
-
- // Introduce an alias for the handle scope data to allow non-friends
- // to access the HandleScope data.
- typedef v8::HandleScope::Data HandleScopeData;
-};
-
-} // namespace v8
-
-#endif // V8_APIUTILS_H_
diff --git a/chromium/v8/src/arguments.cc b/chromium/v8/src/arguments.cc
index 205da7c68a6..f4550ae9a97 100644
--- a/chromium/v8/src/arguments.cc
+++ b/chromium/v8/src/arguments.cc
@@ -1,34 +1,11 @@
// Copyright 2013 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-#include "arguments.h"
-
-#include "vm-state-inl.h"
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+#include "src/arguments.h"
+
+#include "src/vm-state-inl.h"
namespace v8 {
namespace internal {
diff --git a/chromium/v8/src/arguments.h b/chromium/v8/src/arguments.h
index b7137c3175a..320b6ad6d7d 100644
--- a/chromium/v8/src/arguments.h
+++ b/chromium/v8/src/arguments.h
@@ -1,34 +1,11 @@
// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
#ifndef V8_ARGUMENTS_H_
#define V8_ARGUMENTS_H_
-#include "allocation.h"
+#include "src/allocation.h"
namespace v8 {
namespace internal {
@@ -44,6 +21,9 @@ namespace internal {
// Object* Runtime_function(Arguments args) {
// ... use args[i] here ...
// }
+//
+// Note that length_ (whose value is in the integer range) is defined
+// as intptr_t to provide endian-neutrality on 64-bit archs.
class Arguments BASE_EMBEDDED {
public:
@@ -73,12 +53,12 @@ class Arguments BASE_EMBEDDED {
}
// Get the total number of arguments including the receiver.
- int length() const { return length_; }
+ int length() const { return static_cast<int>(length_); }
Object** arguments() { return arguments_; }
private:
- int length_;
+ intptr_t length_;
Object** arguments_;
};
@@ -299,18 +279,23 @@ double ClobberDoubleRegisters(double x1, double x2, double x3, double x4);
#endif
-#define DECLARE_RUNTIME_FUNCTION(Type, Name) \
-Type Name(int args_length, Object** args_object, Isolate* isolate)
+#define DECLARE_RUNTIME_FUNCTION(Name) \
+Object* Name(int args_length, Object** args_object, Isolate* isolate)
-#define RUNTIME_FUNCTION(Type, Name) \
-static Type __RT_impl_##Name(Arguments args, Isolate* isolate); \
-Type Name(int args_length, Object** args_object, Isolate* isolate) { \
- CLOBBER_DOUBLE_REGISTERS(); \
- Arguments args(args_length, args_object); \
- return __RT_impl_##Name(args, isolate); \
-} \
+#define RUNTIME_FUNCTION_RETURNS_TYPE(Type, Name) \
+static INLINE(Type __RT_impl_##Name(Arguments args, Isolate* isolate)); \
+Type Name(int args_length, Object** args_object, Isolate* isolate) { \
+ CLOBBER_DOUBLE_REGISTERS(); \
+ Arguments args(args_length, args_object); \
+ return __RT_impl_##Name(args, isolate); \
+} \
static Type __RT_impl_##Name(Arguments args, Isolate* isolate)
+
+#define RUNTIME_FUNCTION(Name) RUNTIME_FUNCTION_RETURNS_TYPE(Object*, Name)
+#define RUNTIME_FUNCTION_RETURN_PAIR(Name) \
+ RUNTIME_FUNCTION_RETURNS_TYPE(ObjectPair, Name)
+
#define RUNTIME_ARGUMENTS(isolate, args) \
args.length(), args.arguments(), isolate
diff --git a/chromium/v8/src/arm/OWNERS b/chromium/v8/src/arm/OWNERS
new file mode 100644
index 00000000000..906a5ce6418
--- /dev/null
+++ b/chromium/v8/src/arm/OWNERS
@@ -0,0 +1 @@
+rmcilroy@chromium.org
diff --git a/chromium/v8/src/arm/assembler-arm-inl.h b/chromium/v8/src/arm/assembler-arm-inl.h
index 3399958ee3d..d09e700e82e 100644
--- a/chromium/v8/src/arm/assembler-arm-inl.h
+++ b/chromium/v8/src/arm/assembler-arm-inl.h
@@ -37,16 +37,19 @@
#ifndef V8_ARM_ASSEMBLER_ARM_INL_H_
#define V8_ARM_ASSEMBLER_ARM_INL_H_
-#include "arm/assembler-arm.h"
+#include "src/arm/assembler-arm.h"
-#include "cpu.h"
-#include "debug.h"
+#include "src/cpu.h"
+#include "src/debug.h"
namespace v8 {
namespace internal {
+bool CpuFeatures::SupportsCrankshaft() { return IsSupported(VFP3); }
+
+
int Register::NumAllocatableRegisters() {
return kMaxNumAllocatableRegisters;
}
@@ -88,7 +91,7 @@ DwVfpRegister DwVfpRegister::FromAllocationIndex(int index) {
}
-void RelocInfo::apply(intptr_t delta) {
+void RelocInfo::apply(intptr_t delta, ICacheFlushMode icache_flush_mode) {
if (RelocInfo::IsInternalReference(rmode_)) {
// absolute code pointer inside code object moves with the code object.
int32_t* p = reinterpret_cast<int32_t*>(pc_);
@@ -101,7 +104,7 @@ void RelocInfo::apply(intptr_t delta) {
Address RelocInfo::target_address() {
ASSERT(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_));
- return Assembler::target_address_at(pc_);
+ return Assembler::target_address_at(pc_, host_);
}
@@ -109,7 +112,28 @@ Address RelocInfo::target_address_address() {
ASSERT(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_)
|| rmode_ == EMBEDDED_OBJECT
|| rmode_ == EXTERNAL_REFERENCE);
- return Assembler::target_pointer_address_at(pc_);
+ if (FLAG_enable_ool_constant_pool ||
+ Assembler::IsMovW(Memory::int32_at(pc_))) {
+ // We return the PC for ool constant pool since this function is used by the
+ // serializerer and expects the address to reside within the code object.
+ return reinterpret_cast<Address>(pc_);
+ } else {
+ ASSERT(Assembler::IsLdrPcImmediateOffset(Memory::int32_at(pc_)));
+ return Assembler::target_pointer_address_at(pc_);
+ }
+}
+
+
+Address RelocInfo::constant_pool_entry_address() {
+ ASSERT(IsInConstantPool());
+ if (FLAG_enable_ool_constant_pool) {
+ ASSERT(Assembler::IsLdrPpImmediateOffset(Memory::int32_at(pc_)));
+ return Assembler::target_constant_pool_address_at(pc_,
+ host_->constant_pool());
+ } else {
+ ASSERT(Assembler::IsLdrPcImmediateOffset(Memory::int32_at(pc_)));
+ return Assembler::target_pointer_address_at(pc_);
+ }
}
@@ -118,10 +142,13 @@ int RelocInfo::target_address_size() {
}
-void RelocInfo::set_target_address(Address target, WriteBarrierMode mode) {
+void RelocInfo::set_target_address(Address target,
+ WriteBarrierMode write_barrier_mode,
+ ICacheFlushMode icache_flush_mode) {
ASSERT(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_));
- Assembler::set_target_address_at(pc_, target);
- if (mode == UPDATE_WRITE_BARRIER && host() != NULL && IsCodeTarget(rmode_)) {
+ Assembler::set_target_address_at(pc_, host_, target, icache_flush_mode);
+ if (write_barrier_mode == UPDATE_WRITE_BARRIER &&
+ host() != NULL && IsCodeTarget(rmode_)) {
Object* target_code = Code::GetCodeFromTargetAddress(target);
host()->GetHeap()->incremental_marking()->RecordWriteIntoCode(
host(), this, HeapObject::cast(target_code));
@@ -131,22 +158,26 @@ void RelocInfo::set_target_address(Address target, WriteBarrierMode mode) {
Object* RelocInfo::target_object() {
ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
- return reinterpret_cast<Object*>(Assembler::target_address_at(pc_));
+ return reinterpret_cast<Object*>(Assembler::target_address_at(pc_, host_));
}
Handle<Object> RelocInfo::target_object_handle(Assembler* origin) {
ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
return Handle<Object>(reinterpret_cast<Object**>(
- Assembler::target_address_at(pc_)));
+ Assembler::target_address_at(pc_, host_)));
}
-void RelocInfo::set_target_object(Object* target, WriteBarrierMode mode) {
+void RelocInfo::set_target_object(Object* target,
+ WriteBarrierMode write_barrier_mode,
+ ICacheFlushMode icache_flush_mode) {
ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
ASSERT(!target->IsConsString());
- Assembler::set_target_address_at(pc_, reinterpret_cast<Address>(target));
- if (mode == UPDATE_WRITE_BARRIER &&
+ Assembler::set_target_address_at(pc_, host_,
+ reinterpret_cast<Address>(target),
+ icache_flush_mode);
+ if (write_barrier_mode == UPDATE_WRITE_BARRIER &&
host() != NULL &&
target->IsHeapObject()) {
host()->GetHeap()->incremental_marking()->RecordWrite(
@@ -157,7 +188,7 @@ void RelocInfo::set_target_object(Object* target, WriteBarrierMode mode) {
Address RelocInfo::target_reference() {
ASSERT(rmode_ == EXTERNAL_REFERENCE);
- return Assembler::target_address_at(pc_);
+ return Assembler::target_address_at(pc_, host_);
}
@@ -168,9 +199,11 @@ Address RelocInfo::target_runtime_entry(Assembler* origin) {
void RelocInfo::set_target_runtime_entry(Address target,
- WriteBarrierMode mode) {
+ WriteBarrierMode write_barrier_mode,
+ ICacheFlushMode icache_flush_mode) {
ASSERT(IsRuntimeEntry(rmode_));
- if (target_address() != target) set_target_address(target, mode);
+ if (target_address() != target)
+ set_target_address(target, write_barrier_mode, icache_flush_mode);
}
@@ -187,11 +220,13 @@ Cell* RelocInfo::target_cell() {
}
-void RelocInfo::set_target_cell(Cell* cell, WriteBarrierMode mode) {
+void RelocInfo::set_target_cell(Cell* cell,
+ WriteBarrierMode write_barrier_mode,
+ ICacheFlushMode icache_flush_mode) {
ASSERT(rmode_ == RelocInfo::CELL);
Address address = cell->address() + Cell::kValueOffset;
Memory::Address_at(pc_) = address;
- if (mode == UPDATE_WRITE_BARRIER && host() != NULL) {
+ if (write_barrier_mode == UPDATE_WRITE_BARRIER && host() != NULL) {
// TODO(1550) We are passing NULL as a slot because cell can never be on
// evacuation candidate.
host()->GetHeap()->incremental_marking()->RecordWrite(
@@ -200,7 +235,7 @@ void RelocInfo::set_target_cell(Cell* cell, WriteBarrierMode mode) {
}
-static const int kNoCodeAgeSequenceLength = 3;
+static const int kNoCodeAgeSequenceLength = 3 * Assembler::kInstrSize;
Handle<Object> RelocInfo::code_age_stub_handle(Assembler* origin) {
@@ -212,15 +247,16 @@ Handle<Object> RelocInfo::code_age_stub_handle(Assembler* origin) {
Code* RelocInfo::code_age_stub() {
ASSERT(rmode_ == RelocInfo::CODE_AGE_SEQUENCE);
return Code::GetCodeFromTargetAddress(
- Memory::Address_at(pc_ + Assembler::kInstrSize *
- (kNoCodeAgeSequenceLength - 1)));
+ Memory::Address_at(pc_ +
+ (kNoCodeAgeSequenceLength - Assembler::kInstrSize)));
}
-void RelocInfo::set_code_age_stub(Code* stub) {
+void RelocInfo::set_code_age_stub(Code* stub,
+ ICacheFlushMode icache_flush_mode) {
ASSERT(rmode_ == RelocInfo::CODE_AGE_SEQUENCE);
- Memory::Address_at(pc_ + Assembler::kInstrSize *
- (kNoCodeAgeSequenceLength - 1)) =
+ Memory::Address_at(pc_ +
+ (kNoCodeAgeSequenceLength - Assembler::kInstrSize)) =
stub->instruction_start();
}
@@ -268,7 +304,7 @@ void RelocInfo::WipeOut() {
IsCodeTarget(rmode_) ||
IsRuntimeEntry(rmode_) ||
IsExternalReference(rmode_));
- Assembler::set_target_address_at(pc_, NULL);
+ Assembler::set_target_address_at(pc_, host_, NULL);
}
@@ -301,14 +337,12 @@ void RelocInfo::Visit(Isolate* isolate, ObjectVisitor* visitor) {
visitor->VisitExternalReference(this);
} else if (RelocInfo::IsCodeAgeSequence(mode)) {
visitor->VisitCodeAgeSequence(this);
-#ifdef ENABLE_DEBUGGER_SUPPORT
} else if (((RelocInfo::IsJSReturn(mode) &&
IsPatchedReturnSequence()) ||
(RelocInfo::IsDebugBreakSlot(mode) &&
IsPatchedDebugBreakSlotSequence())) &&
isolate->debug()->has_break_points()) {
visitor->VisitDebugTarget(this);
-#endif
} else if (RelocInfo::IsRuntimeEntry(mode)) {
visitor->VisitRuntimeEntry(this);
}
@@ -328,14 +362,12 @@ void RelocInfo::Visit(Heap* heap) {
StaticVisitor::VisitExternalReference(this);
} else if (RelocInfo::IsCodeAgeSequence(mode)) {
StaticVisitor::VisitCodeAgeSequence(heap, this);
-#ifdef ENABLE_DEBUGGER_SUPPORT
} else if (heap->isolate()->debug()->has_break_points() &&
((RelocInfo::IsJSReturn(mode) &&
IsPatchedReturnSequence()) ||
(RelocInfo::IsDebugBreakSlot(mode) &&
IsPatchedDebugBreakSlotSequence()))) {
StaticVisitor::VisitDebugTarget(heap, this);
-#endif
} else if (RelocInfo::IsRuntimeEntry(mode)) {
StaticVisitor::VisitRuntimeEntry(this);
}
@@ -402,7 +434,18 @@ Address Assembler::target_pointer_address_at(Address pc) {
}
-Address Assembler::target_address_at(Address pc) {
+Address Assembler::target_constant_pool_address_at(
+ Address pc, ConstantPoolArray* constant_pool) {
+ ASSERT(constant_pool != NULL);
+ ASSERT(IsLdrPpImmediateOffset(Memory::int32_at(pc)));
+ Instr instr = Memory::int32_at(pc);
+ return reinterpret_cast<Address>(constant_pool) +
+ GetLdrRegisterImmediateOffset(instr);
+}
+
+
+Address Assembler::target_address_at(Address pc,
+ ConstantPoolArray* constant_pool) {
if (IsMovW(Memory::int32_at(pc))) {
ASSERT(IsMovT(Memory::int32_at(pc + kInstrSize)));
Instruction* instr = Instruction::At(pc);
@@ -410,9 +453,14 @@ Address Assembler::target_address_at(Address pc) {
return reinterpret_cast<Address>(
(next_instr->ImmedMovwMovtValue() << 16) |
instr->ImmedMovwMovtValue());
+ } else if (FLAG_enable_ool_constant_pool) {
+ ASSERT(IsLdrPpImmediateOffset(Memory::int32_at(pc)));
+ return Memory::Address_at(
+ target_constant_pool_address_at(pc, constant_pool));
+ } else {
+ ASSERT(IsLdrPcImmediateOffset(Memory::int32_at(pc)));
+ return Memory::Address_at(target_pointer_address_at(pc));
}
- ASSERT(IsLdrPcImmediateOffset(Memory::int32_at(pc)));
- return Memory::Address_at(target_pointer_address_at(pc));
}
@@ -430,7 +478,8 @@ Address Assembler::target_address_from_return_address(Address pc) {
// @ return address
Address candidate = pc - 2 * Assembler::kInstrSize;
Instr candidate_instr(Memory::int32_at(candidate));
- if (IsLdrPcImmediateOffset(candidate_instr)) {
+ if (IsLdrPcImmediateOffset(candidate_instr) |
+ IsLdrPpImmediateOffset(candidate_instr)) {
return candidate;
}
candidate = pc - 3 * Assembler::kInstrSize;
@@ -441,7 +490,8 @@ Address Assembler::target_address_from_return_address(Address pc) {
Address Assembler::return_address_from_call_start(Address pc) {
- if (IsLdrPcImmediateOffset(Memory::int32_at(pc))) {
+ if (IsLdrPcImmediateOffset(Memory::int32_at(pc)) |
+ IsLdrPpImmediateOffset(Memory::int32_at(pc))) {
return pc + kInstrSize * 2;
} else {
ASSERT(IsMovW(Memory::int32_at(pc)));
@@ -452,8 +502,12 @@ Address Assembler::return_address_from_call_start(Address pc) {
void Assembler::deserialization_set_special_target_at(
- Address constant_pool_entry, Address target) {
- Memory::Address_at(constant_pool_entry) = target;
+ Address constant_pool_entry, Code* code, Address target) {
+ if (FLAG_enable_ool_constant_pool) {
+ set_target_address_at(constant_pool_entry, code, target);
+ } else {
+ Memory::Address_at(constant_pool_entry) = target;
+ }
}
@@ -463,22 +517,31 @@ static Instr EncodeMovwImmediate(uint32_t immediate) {
}
-void Assembler::set_target_address_at(Address pc, Address target) {
+static Instr PatchMovwImmediate(Instr instruction, uint32_t immediate) {
+ instruction &= ~EncodeMovwImmediate(0xffff);
+ return instruction | EncodeMovwImmediate(immediate);
+}
+
+
+void Assembler::set_target_address_at(Address pc,
+ ConstantPoolArray* constant_pool,
+ Address target,
+ ICacheFlushMode icache_flush_mode) {
if (IsMovW(Memory::int32_at(pc))) {
ASSERT(IsMovT(Memory::int32_at(pc + kInstrSize)));
uint32_t* instr_ptr = reinterpret_cast<uint32_t*>(pc);
uint32_t immediate = reinterpret_cast<uint32_t>(target);
- uint32_t intermediate = instr_ptr[0];
- intermediate &= ~EncodeMovwImmediate(0xFFFF);
- intermediate |= EncodeMovwImmediate(immediate & 0xFFFF);
- instr_ptr[0] = intermediate;
- intermediate = instr_ptr[1];
- intermediate &= ~EncodeMovwImmediate(0xFFFF);
- intermediate |= EncodeMovwImmediate(immediate >> 16);
- instr_ptr[1] = intermediate;
+ instr_ptr[0] = PatchMovwImmediate(instr_ptr[0], immediate & 0xFFFF);
+ instr_ptr[1] = PatchMovwImmediate(instr_ptr[1], immediate >> 16);
ASSERT(IsMovW(Memory::int32_at(pc)));
ASSERT(IsMovT(Memory::int32_at(pc + kInstrSize)));
- CPU::FlushICache(pc, 2 * kInstrSize);
+ if (icache_flush_mode != SKIP_ICACHE_FLUSH) {
+ CPU::FlushICache(pc, 2 * kInstrSize);
+ }
+ } else if (FLAG_enable_ool_constant_pool) {
+ ASSERT(IsLdrPpImmediateOffset(Memory::int32_at(pc)));
+ Memory::Address_at(
+ target_constant_pool_address_at(pc, constant_pool)) = target;
} else {
ASSERT(IsLdrPcImmediateOffset(Memory::int32_at(pc)));
Memory::Address_at(target_pointer_address_at(pc)) = target;
diff --git a/chromium/v8/src/arm/assembler-arm.cc b/chromium/v8/src/arm/assembler-arm.cc
index 4171f7dcdf8..68738a7f3bb 100644
--- a/chromium/v8/src/arm/assembler-arm.cc
+++ b/chromium/v8/src/arm/assembler-arm.cc
@@ -34,32 +34,17 @@
// modified significantly by Google Inc.
// Copyright 2012 the V8 project authors. All rights reserved.
-#include "v8.h"
+#include "src/v8.h"
#if V8_TARGET_ARCH_ARM
-#include "arm/assembler-arm-inl.h"
-#include "macro-assembler.h"
-#include "serialize.h"
+#include "src/arm/assembler-arm-inl.h"
+#include "src/macro-assembler.h"
+#include "src/serialize.h"
namespace v8 {
namespace internal {
-#ifdef DEBUG
-bool CpuFeatures::initialized_ = false;
-#endif
-unsigned CpuFeatures::supported_ = 0;
-unsigned CpuFeatures::found_by_runtime_probing_only_ = 0;
-unsigned CpuFeatures::cross_compile_ = 0;
-unsigned CpuFeatures::cache_line_size_ = 64;
-
-
-ExternalReference ExternalReference::cpu_features() {
- ASSERT(CpuFeatures::initialized_);
- return ExternalReference(&CpuFeatures::supported_);
-}
-
-
// Get the CPU features enabled by the build. For cross compilation the
// preprocessor symbols CAN_USE_ARMV7_INSTRUCTIONS and CAN_USE_VFP3_INSTRUCTIONS
// can be defined to enable ARMv7 and VFPv3 instructions when building the
@@ -67,19 +52,16 @@ ExternalReference ExternalReference::cpu_features() {
static unsigned CpuFeaturesImpliedByCompiler() {
unsigned answer = 0;
#ifdef CAN_USE_ARMV7_INSTRUCTIONS
- if (FLAG_enable_armv7) {
- answer |= 1u << ARMv7;
- }
+ if (FLAG_enable_armv7) answer |= 1u << ARMv7;
#endif // CAN_USE_ARMV7_INSTRUCTIONS
#ifdef CAN_USE_VFP3_INSTRUCTIONS
- if (FLAG_enable_vfp3) {
- answer |= 1u << VFP3 | 1u << ARMv7;
- }
+ if (FLAG_enable_vfp3) answer |= 1u << VFP3 | 1u << ARMv7;
#endif // CAN_USE_VFP3_INSTRUCTIONS
#ifdef CAN_USE_VFP32DREGS
- if (FLAG_enable_32dregs) {
- answer |= 1u << VFP32DREGS;
- }
+ if (FLAG_enable_32dregs) answer |= 1u << VFP32DREGS;
+#endif // CAN_USE_VFP32DREGS
+#ifdef CAN_USE_NEON
+ if (FLAG_enable_neon) answer |= 1u << NEON;
#endif // CAN_USE_VFP32DREGS
if ((answer & (1u << ARMv7)) && FLAG_enable_unaligned_accesses) {
answer |= 1u << UNALIGNED_ACCESSES;
@@ -89,122 +71,58 @@ static unsigned CpuFeaturesImpliedByCompiler() {
}
-const char* DwVfpRegister::AllocationIndexToString(int index) {
- ASSERT(index >= 0 && index < NumAllocatableRegisters());
- ASSERT(kScratchDoubleReg.code() - kDoubleRegZero.code() ==
- kNumReservedRegisters - 1);
- if (index >= kDoubleRegZero.code())
- index += kNumReservedRegisters;
-
- return VFPRegisters::Name(index, true);
-}
-
-
-void CpuFeatures::Probe() {
- uint64_t standard_features = static_cast<unsigned>(
- OS::CpuFeaturesImpliedByPlatform()) | CpuFeaturesImpliedByCompiler();
- ASSERT(supported_ == 0 || supported_ == standard_features);
-#ifdef DEBUG
- initialized_ = true;
-#endif
-
- // Get the features implied by the OS and the compiler settings. This is the
- // minimal set of features which is also alowed for generated code in the
- // snapshot.
- supported_ |= standard_features;
+void CpuFeatures::ProbeImpl(bool cross_compile) {
+ supported_ |= CpuFeaturesImpliedByCompiler();
+ cache_line_size_ = 64;
- if (Serializer::enabled()) {
- // No probing for features if we might serialize (generate snapshot).
- printf(" ");
- PrintFeatures();
- return;
- }
+ // Only use statically determined features for cross compile (snapshot).
+ if (cross_compile) return;
#ifndef __arm__
- // For the simulator=arm build, use VFP when FLAG_enable_vfp3 is
- // enabled. VFPv3 implies ARMv7, see ARM DDI 0406B, page A1-6.
- if (FLAG_enable_vfp3) {
- supported_ |=
- static_cast<uint64_t>(1) << VFP3 |
- static_cast<uint64_t>(1) << ARMv7;
- }
- if (FLAG_enable_neon) {
- supported_ |= 1u << NEON;
- }
- // For the simulator=arm build, use ARMv7 when FLAG_enable_armv7 is enabled
+ // For the simulator build, use whatever the flags specify.
if (FLAG_enable_armv7) {
- supported_ |= static_cast<uint64_t>(1) << ARMv7;
- }
-
- if (FLAG_enable_sudiv) {
- supported_ |= static_cast<uint64_t>(1) << SUDIV;
- }
-
- if (FLAG_enable_movw_movt) {
- supported_ |= static_cast<uint64_t>(1) << MOVW_MOVT_IMMEDIATE_LOADS;
- }
-
- if (FLAG_enable_32dregs) {
- supported_ |= static_cast<uint64_t>(1) << VFP32DREGS;
- }
-
- if (FLAG_enable_unaligned_accesses) {
- supported_ |= static_cast<uint64_t>(1) << UNALIGNED_ACCESSES;
+ supported_ |= 1u << ARMv7;
+ if (FLAG_enable_vfp3) supported_ |= 1u << VFP3;
+ if (FLAG_enable_neon) supported_ |= 1u << NEON | 1u << VFP32DREGS;
+ if (FLAG_enable_sudiv) supported_ |= 1u << SUDIV;
+ if (FLAG_enable_movw_movt) supported_ |= 1u << MOVW_MOVT_IMMEDIATE_LOADS;
+ if (FLAG_enable_32dregs) supported_ |= 1u << VFP32DREGS;
}
+ if (FLAG_enable_mls) supported_ |= 1u << MLS;
+ if (FLAG_enable_unaligned_accesses) supported_ |= 1u << UNALIGNED_ACCESSES;
#else // __arm__
- // Probe for additional features not already known to be available.
+ // Probe for additional features at runtime.
CPU cpu;
- if (!IsSupported(VFP3) && FLAG_enable_vfp3 && cpu.has_vfp3()) {
+ if (FLAG_enable_vfp3 && cpu.has_vfp3()) {
// This implementation also sets the VFP flags if runtime
// detection of VFP returns true. VFPv3 implies ARMv7, see ARM DDI
// 0406B, page A1-6.
- found_by_runtime_probing_only_ |=
- static_cast<uint64_t>(1) << VFP3 |
- static_cast<uint64_t>(1) << ARMv7;
+ supported_ |= 1u << VFP3 | 1u << ARMv7;
}
- if (!IsSupported(NEON) && FLAG_enable_neon && cpu.has_neon()) {
- found_by_runtime_probing_only_ |= 1u << NEON;
- }
-
- if (!IsSupported(ARMv7) && FLAG_enable_armv7 && cpu.architecture() >= 7) {
- found_by_runtime_probing_only_ |= static_cast<uint64_t>(1) << ARMv7;
- }
+ if (FLAG_enable_neon && cpu.has_neon()) supported_ |= 1u << NEON;
+ if (FLAG_enable_sudiv && cpu.has_idiva()) supported_ |= 1u << SUDIV;
+ if (FLAG_enable_mls && cpu.has_thumb2()) supported_ |= 1u << MLS;
- if (!IsSupported(SUDIV) && FLAG_enable_sudiv && cpu.has_idiva()) {
- found_by_runtime_probing_only_ |= static_cast<uint64_t>(1) << SUDIV;
- }
-
- if (!IsSupported(UNALIGNED_ACCESSES) && FLAG_enable_unaligned_accesses
- && cpu.architecture() >= 7) {
- found_by_runtime_probing_only_ |=
- static_cast<uint64_t>(1) << UNALIGNED_ACCESSES;
- }
-
- // Use movw/movt for QUALCOMM ARMv7 cores.
- if (cpu.implementer() == CPU::QUALCOMM &&
- cpu.architecture() >= 7 &&
- FLAG_enable_movw_movt) {
- found_by_runtime_probing_only_ |=
- static_cast<uint64_t>(1) << MOVW_MOVT_IMMEDIATE_LOADS;
+ if (cpu.architecture() >= 7) {
+ if (FLAG_enable_armv7) supported_ |= 1u << ARMv7;
+ if (FLAG_enable_unaligned_accesses) supported_ |= 1u << UNALIGNED_ACCESSES;
+ // Use movw/movt for QUALCOMM ARMv7 cores.
+ if (FLAG_enable_movw_movt && cpu.implementer() == CPU::QUALCOMM) {
+ supported_ |= 1u << MOVW_MOVT_IMMEDIATE_LOADS;
+ }
}
// ARM Cortex-A9 and Cortex-A5 have 32 byte cachelines.
- if (cpu.implementer() == CPU::ARM &&
- (cpu.part() == CPU::ARM_CORTEX_A5 ||
- cpu.part() == CPU::ARM_CORTEX_A9)) {
+ if (cpu.implementer() == CPU::ARM && (cpu.part() == CPU::ARM_CORTEX_A5 ||
+ cpu.part() == CPU::ARM_CORTEX_A9)) {
cache_line_size_ = 32;
}
- if (!IsSupported(VFP32DREGS) && FLAG_enable_32dregs && cpu.has_vfp3_d32()) {
- found_by_runtime_probing_only_ |= static_cast<uint64_t>(1) << VFP32DREGS;
- }
-
- supported_ |= found_by_runtime_probing_only_;
+ if (FLAG_enable_32dregs && cpu.has_vfp3_d32()) supported_ |= 1u << VFP32DREGS;
#endif
- // Assert that VFP3 implies ARMv7.
ASSERT(!IsSupported(VFP3) || IsSupported(ARMv7));
}
@@ -287,16 +205,38 @@ void CpuFeatures::PrintFeatures() {
// -----------------------------------------------------------------------------
+// Implementation of DwVfpRegister
+
+const char* DwVfpRegister::AllocationIndexToString(int index) {
+ ASSERT(index >= 0 && index < NumAllocatableRegisters());
+ ASSERT(kScratchDoubleReg.code() - kDoubleRegZero.code() ==
+ kNumReservedRegisters - 1);
+ if (index >= kDoubleRegZero.code()) index += kNumReservedRegisters;
+ return VFPRegisters::Name(index, true);
+}
+
+
+// -----------------------------------------------------------------------------
// Implementation of RelocInfo
const int RelocInfo::kApplyMask = 0;
bool RelocInfo::IsCodedSpecially() {
- // The deserializer needs to know whether a pointer is specially coded. Being
- // specially coded on ARM means that it is a movw/movt instruction. We don't
- // generate those yet.
- return false;
+ // The deserializer needs to know whether a pointer is specially coded.  Being
+ // specially coded on ARM means that it is a movw/movt instruction, or is an
+ // out of line constant pool entry.  These only occur if
+ // FLAG_enable_ool_constant_pool is true.
+ return FLAG_enable_ool_constant_pool;
+}
+
+
+bool RelocInfo::IsInConstantPool() {
+ if (FLAG_enable_ool_constant_pool) {
+ return Assembler::IsLdrPpImmediateOffset(Memory::int32_at(pc_));
+ } else {
+ return Assembler::IsLdrPcImmediateOffset(Memory::int32_at(pc_));
+ }
}
@@ -344,12 +284,17 @@ Operand::Operand(Handle<Object> handle) {
Operand::Operand(Register rm, ShiftOp shift_op, int shift_imm) {
ASSERT(is_uint5(shift_imm));
- ASSERT(shift_op != ROR || shift_imm != 0); // use RRX if you mean it
+
rm_ = rm;
rs_ = no_reg;
shift_op_ = shift_op;
shift_imm_ = shift_imm & 31;
- if (shift_op == RRX) {
+
+ if ((shift_op == ROR) && (shift_imm == 0)) {
+ // ROR #0 is functionally equivalent to LSL #0 and this allow us to encode
+ // RRX as ROR #0 (See below).
+ shift_op = LSL;
+ } else if (shift_op == RRX) {
// encoded as ROR with shift_imm == 0
ASSERT(shift_imm == 0);
shift_op_ = ROR;
@@ -475,9 +420,15 @@ const Instr kMovLrPc = al | MOV | kRegister_pc_Code | kRegister_lr_Code * B12;
// ldr rd, [pc, #offset]
const Instr kLdrPCMask = 15 * B24 | 7 * B20 | 15 * B16;
const Instr kLdrPCPattern = 5 * B24 | L | kRegister_pc_Code * B16;
+// ldr rd, [pp, #offset]
+const Instr kLdrPpMask = 15 * B24 | 7 * B20 | 15 * B16;
+const Instr kLdrPpPattern = 5 * B24 | L | kRegister_r8_Code * B16;
// vldr dd, [pc, #offset]
const Instr kVldrDPCMask = 15 * B24 | 3 * B20 | 15 * B16 | 15 * B8;
const Instr kVldrDPCPattern = 13 * B24 | L | kRegister_pc_Code * B16 | 11 * B8;
+// vldr dd, [pp, #offset]
+const Instr kVldrDPpMask = 15 * B24 | 3 * B20 | 15 * B16 | 15 * B8;
+const Instr kVldrDPpPattern = 13 * B24 | L | kRegister_r8_Code * B16 | 11 * B8;
// blxcc rm
const Instr kBlxRegMask =
15 * B24 | 15 * B20 | 15 * B16 | 15 * B12 | 15 * B8 | 15 * B4;
@@ -515,6 +466,7 @@ const Instr kLdrStrOffsetMask = 0x00000fff;
Assembler::Assembler(Isolate* isolate, void* buffer, int buffer_size)
: AssemblerBase(isolate, buffer, buffer_size),
recorded_ast_id_(TypeFeedbackId::None()),
+ constant_pool_builder_(),
positions_recorder_(this) {
reloc_info_writer.Reposition(buffer_ + buffer_size_, pc_);
num_pending_32_bit_reloc_info_ = 0;
@@ -525,6 +477,8 @@ Assembler::Assembler(Isolate* isolate, void* buffer, int buffer_size)
first_const_pool_32_use_ = -1;
first_const_pool_64_use_ = -1;
last_bound_pos_ = 0;
+ constant_pool_available_ = !FLAG_enable_ool_constant_pool;
+ constant_pool_full_ = false;
ClearRecordedAstId();
}
@@ -535,11 +489,12 @@ Assembler::~Assembler() {
void Assembler::GetCode(CodeDesc* desc) {
- // Emit constant pool if necessary.
- CheckConstPool(true, false);
- ASSERT(num_pending_32_bit_reloc_info_ == 0);
- ASSERT(num_pending_64_bit_reloc_info_ == 0);
-
+ if (!FLAG_enable_ool_constant_pool) {
+ // Emit constant pool if necessary.
+ CheckConstPool(true, false);
+ ASSERT(num_pending_32_bit_reloc_info_ == 0);
+ ASSERT(num_pending_64_bit_reloc_info_ == 0);
+ }
// Set up code descriptor.
desc->buffer = buffer_;
desc->buffer_size = buffer_size_;
@@ -722,6 +677,13 @@ bool Assembler::IsLdrPcImmediateOffset(Instr instr) {
}
+bool Assembler::IsLdrPpImmediateOffset(Instr instr) {
+ // Check the instruction is indeed a
+ // ldr<cond> <Rd>, [pp +/- offset_12].
+ return (instr & kLdrPpMask) == kLdrPpPattern;
+}
+
+
bool Assembler::IsVldrDPcImmediateOffset(Instr instr) {
// Check the instruction is indeed a
// vldr<cond> <Dd>, [pc +/- offset_10].
@@ -729,6 +691,13 @@ bool Assembler::IsVldrDPcImmediateOffset(Instr instr) {
}
+bool Assembler::IsVldrDPpImmediateOffset(Instr instr) {
+ // Check the instruction is indeed a
+ // vldr<cond> <Dd>, [pp +/- offset_10].
+ return (instr & kVldrDPpMask) == kVldrDPpPattern;
+}
+
+
bool Assembler::IsTstImmediate(Instr instr) {
return (instr & (B27 | B26 | I | kOpCodeMask | S | kRdMask)) ==
(I | TST | S);
@@ -777,7 +746,7 @@ int Assembler::GetCmpImmediateRawImmediate(Instr instr) {
// same position.
-int Assembler::target_at(int pos) {
+int Assembler::target_at(int pos) {
Instr instr = instr_at(pos);
if (is_uint24(instr)) {
// Emitted link to a label, not part of a branch.
@@ -1040,13 +1009,8 @@ static bool fits_shifter(uint32_t imm32,
// encoded.
bool Operand::must_output_reloc_info(const Assembler* assembler) const {
if (rmode_ == RelocInfo::EXTERNAL_REFERENCE) {
-#ifdef DEBUG
- if (!Serializer::enabled()) {
- Serializer::TooLateToEnableNow();
- }
-#endif // def DEBUG
if (assembler != NULL && assembler->predictable_code_size()) return true;
- return Serializer::enabled();
+ return assembler->serializer_enabled();
} else if (RelocInfo::IsNone(rmode_)) {
return false;
}
@@ -1054,14 +1018,24 @@ bool Operand::must_output_reloc_info(const Assembler* assembler) const {
}
-static bool use_movw_movt(const Operand& x, const Assembler* assembler) {
- if (Assembler::use_immediate_embedded_pointer_loads(assembler)) {
+static bool use_mov_immediate_load(const Operand& x,
+ const Assembler* assembler) {
+ if (assembler != NULL && !assembler->can_use_constant_pool()) {
+ // If there is no constant pool available, we must use an mov immediate.
+ // TODO(rmcilroy): enable ARMv6 support.
+ ASSERT(CpuFeatures::IsSupported(ARMv7));
return true;
- }
- if (x.must_output_reloc_info(assembler)) {
+ } else if (CpuFeatures::IsSupported(MOVW_MOVT_IMMEDIATE_LOADS) &&
+ (assembler == NULL || !assembler->predictable_code_size())) {
+ // Prefer movw / movt to constant pool if it is more efficient on the CPU.
+ return true;
+ } else if (x.must_output_reloc_info(assembler)) {
+ // Prefer constant pool if data is likely to be patched.
return false;
+ } else {
+ // Otherwise, use immediate load if movw / movt is available.
+ return CpuFeatures::IsSupported(ARMv7);
}
- return CpuFeatures::IsSupported(ARMv7);
}
@@ -1075,7 +1049,7 @@ bool Operand::is_single_instruction(const Assembler* assembler,
// constant pool is required. For a mov instruction not setting the
// condition code additional instruction conventions can be used.
if ((instr & ~kCondMask) == 13*B21) { // mov, S not set
- return !use_movw_movt(*this, assembler);
+ return !use_mov_immediate_load(*this, assembler);
} else {
// If this is not a mov or mvn instruction there will always an additional
// instructions - either mov or ldr. The mov might actually be two
@@ -1091,26 +1065,34 @@ bool Operand::is_single_instruction(const Assembler* assembler,
}
-void Assembler::move_32_bit_immediate(Condition cond,
- Register rd,
- SBit s,
- const Operand& x) {
- if (rd.code() != pc.code() && s == LeaveCC) {
- if (use_movw_movt(x, this)) {
- if (x.must_output_reloc_info(this)) {
- RecordRelocInfo(x.rmode_, x.imm32_, DONT_USE_CONSTANT_POOL);
- // Make sure the movw/movt doesn't get separated.
- BlockConstPoolFor(2);
- }
- emit(cond | 0x30*B20 | rd.code()*B12 |
- EncodeMovwImmediate(x.imm32_ & 0xffff));
- movt(rd, static_cast<uint32_t>(x.imm32_) >> 16, cond);
- return;
- }
+void Assembler::move_32_bit_immediate(Register rd,
+ const Operand& x,
+ Condition cond) {
+ RelocInfo rinfo(pc_, x.rmode_, x.imm32_, NULL);
+ if (x.must_output_reloc_info(this)) {
+ RecordRelocInfo(rinfo);
}
- RecordRelocInfo(x.rmode_, x.imm32_, USE_CONSTANT_POOL);
- ldr(rd, MemOperand(pc, 0), cond);
+ if (use_mov_immediate_load(x, this)) {
+ Register target = rd.code() == pc.code() ? ip : rd;
+ // TODO(rmcilroy): add ARMv6 support for immediate loads.
+ ASSERT(CpuFeatures::IsSupported(ARMv7));
+ if (!FLAG_enable_ool_constant_pool &&
+ x.must_output_reloc_info(this)) {
+ // Make sure the movw/movt doesn't get separated.
+ BlockConstPoolFor(2);
+ }
+ emit(cond | 0x30*B20 | target.code()*B12 |
+ EncodeMovwImmediate(x.imm32_ & 0xffff));
+ movt(target, static_cast<uint32_t>(x.imm32_) >> 16, cond);
+ if (target.code() != rd.code()) {
+ mov(rd, target, LeaveCC, cond);
+ }
+ } else {
+ ASSERT(can_use_constant_pool());
+ ConstantPoolAddEntry(rinfo);
+ ldr(rd, MemOperand(FLAG_enable_ool_constant_pool ? pp : pc, 0), cond);
+ }
}
@@ -1133,20 +1115,9 @@ void Assembler::addrmod1(Instr instr,
CHECK(!rn.is(ip)); // rn should never be ip, or will be trashed
Condition cond = Instruction::ConditionField(instr);
if ((instr & ~kCondMask) == 13*B21) { // mov, S not set
- move_32_bit_immediate(cond, rd, LeaveCC, x);
+ move_32_bit_immediate(rd, x, cond);
} else {
- if ((instr & kMovMvnMask) == kMovMvnPattern) {
- // Moves need to use a constant pool entry.
- RecordRelocInfo(x.rmode_, x.imm32_, USE_CONSTANT_POOL);
- ldr(ip, MemOperand(pc, 0), cond);
- } else if (x.must_output_reloc_info(this)) {
- // Otherwise, use most efficient form of fetching from constant pool.
- move_32_bit_immediate(cond, ip, LeaveCC, x);
- } else {
- // If this is not a mov or mvn instruction we may still be able to
- // avoid a constant pool entry by using mvn or movw.
- mov(ip, x, LeaveCC, cond);
- }
+ mov(ip, x, LeaveCC, cond);
addrmod1(instr, rn, rd, Operand(ip));
}
return;
@@ -1512,6 +1483,7 @@ void Assembler::mla(Register dst, Register src1, Register src2, Register srcA,
void Assembler::mls(Register dst, Register src1, Register src2, Register srcA,
Condition cond) {
ASSERT(!dst.is(pc) && !src1.is(pc) && !src2.is(pc) && !srcA.is(pc));
+ ASSERT(IsEnabled(MLS));
emit(cond | B22 | B21 | dst.code()*B16 | srcA.code()*B12 |
src2.code()*B8 | B7 | B4 | src1.code());
}
@@ -1748,7 +1720,9 @@ void Assembler::uxtb(Register dst,
(src.shift_imm_ == 8) ||
(src.shift_imm_ == 16) ||
(src.shift_imm_ == 24));
- ASSERT(src.shift_op() == ROR);
+ // Operand maps ROR #0 to LSL #0.
+ ASSERT((src.shift_op() == ROR) ||
+ ((src.shift_op() == LSL) && (src.shift_imm_ == 0)));
emit(cond | 0x6E*B20 | 0xF*B16 | dst.code()*B12 |
((src.shift_imm_ >> 1)&0xC)*B8 | 7*B4 | src.rm().code());
}
@@ -1770,7 +1744,9 @@ void Assembler::uxtab(Register dst,
(src2.shift_imm_ == 8) ||
(src2.shift_imm_ == 16) ||
(src2.shift_imm_ == 24));
- ASSERT(src2.shift_op() == ROR);
+ // Operand maps ROR #0 to LSL #0.
+ ASSERT((src2.shift_op() == ROR) ||
+ ((src2.shift_op() == LSL) && (src2.shift_imm_ == 0)));
emit(cond | 0x6E*B20 | src1.code()*B16 | dst.code()*B12 |
((src2.shift_imm_ >> 1) &0xC)*B8 | 7*B4 | src2.rm().code());
}
@@ -1790,7 +1766,9 @@ void Assembler::uxtb16(Register dst,
(src.shift_imm_ == 8) ||
(src.shift_imm_ == 16) ||
(src.shift_imm_ == 24));
- ASSERT(src.shift_op() == ROR);
+ // Operand maps ROR #0 to LSL #0.
+ ASSERT((src.shift_op() == ROR) ||
+ ((src.shift_op() == LSL) && (src.shift_imm_ == 0)));
emit(cond | 0x6C*B20 | 0xF*B16 | dst.code()*B12 |
((src.shift_imm_ >> 1)&0xC)*B8 | 7*B4 | src.rm().code());
}
@@ -1814,8 +1792,7 @@ void Assembler::msr(SRegisterFieldMask fields, const Operand& src,
if (src.must_output_reloc_info(this) ||
!fits_shifter(src.imm32_, &rotate_imm, &immed_8, NULL)) {
// Immediate operand cannot be encoded, load it first to register ip.
- RecordRelocInfo(src.rmode_, src.imm32_);
- ldr(ip, MemOperand(pc, 0), cond);
+ move_32_bit_immediate(ip, src);
msr(fields, Operand(ip), cond);
return;
}
@@ -2350,7 +2327,7 @@ void Assembler::vstm(BlockAddrMode am,
static void DoubleAsTwoUInt32(double d, uint32_t* lo, uint32_t* hi) {
uint64_t i;
- OS::MemCopy(&i, &d, 8);
+ memcpy(&i, &d, 8);
*lo = i & 0xffffffff;
*hi = i >> 32;
@@ -2422,7 +2399,7 @@ void Assembler::vmov(const DwVfpRegister dst,
int vd, d;
dst.split_code(&vd, &d);
emit(al | 0x1D*B23 | d*B22 | 0x3*B20 | vd*B12 | 0x5*B9 | B8 | enc);
- } else if (FLAG_enable_vldr_imm) {
+ } else if (FLAG_enable_vldr_imm && can_use_constant_pool()) {
// TODO(jfb) Temporarily turned off until we have constant blinding or
// some equivalent mitigation: an attacker can otherwise control
// generated data which also happens to be executable, a Very Bad
@@ -2438,8 +2415,9 @@ void Assembler::vmov(const DwVfpRegister dst,
// The code could also randomize the order of values, though
// that's tricky because vldr has a limited reach. Furthermore
// it breaks load locality.
- RecordRelocInfo(imm);
- vldr(dst, MemOperand(pc, 0));
+ RelocInfo rinfo(pc_, imm);
+ ConstantPoolAddEntry(rinfo);
+ vldr(dst, MemOperand(FLAG_enable_ool_constant_pool ? pp : pc, 0));
} else {
// Synthesise the double from ARM immediates.
uint32_t lo, hi;
@@ -2776,8 +2754,9 @@ void Assembler::vcvt_f64_s32(const DwVfpRegister dst,
ASSERT(CpuFeatures::IsSupported(VFP3));
int vd, d;
dst.split_code(&vd, &d);
- int i = ((32 - fraction_bits) >> 4) & 1;
- int imm4 = (32 - fraction_bits) & 0xf;
+ int imm5 = 32 - fraction_bits;
+ int i = imm5 & 1;
+ int imm4 = (imm5 >> 1) & 0xf;
emit(cond | 0xE*B24 | B23 | d*B22 | 0x3*B20 | B19 | 0x2*B16 |
vd*B12 | 0x5*B9 | B8 | B7 | B6 | i*B5 | imm4);
}
@@ -3079,6 +3058,11 @@ bool Assembler::ImmediateFitsAddrMode1Instruction(int32_t imm32) {
}
+bool Assembler::ImmediateFitsAddrMode2Instruction(int32_t imm32) {
+ return is_uint12(abs(imm32));
+}
+
+
// Debugging.
void Assembler::RecordJSReturn() {
positions_recorder()->WriteRecordedPositions();
@@ -3105,9 +3089,7 @@ void Assembler::RecordComment(const char* msg) {
void Assembler::RecordConstPool(int size) {
// We only need this for debugger support, to correctly compute offsets in the
// code.
-#ifdef ENABLE_DEBUGGER_SUPPORT
RecordRelocInfo(RelocInfo::CONST_POOL, static_cast<intptr_t>(size));
-#endif
}
@@ -3134,9 +3116,9 @@ void Assembler::GrowBuffer() {
// Copy the data.
int pc_delta = desc.buffer - buffer_;
int rc_delta = (desc.buffer + desc.buffer_size) - (buffer_ + buffer_size_);
- OS::MemMove(desc.buffer, buffer_, desc.instr_size);
- OS::MemMove(reloc_info_writer.pos() + rc_delta,
- reloc_info_writer.pos(), desc.reloc_size);
+ MemMove(desc.buffer, buffer_, desc.instr_size);
+ MemMove(reloc_info_writer.pos() + rc_delta, reloc_info_writer.pos(),
+ desc.reloc_size);
// Switch buffers.
DeleteArray(buffer_);
@@ -3164,6 +3146,7 @@ void Assembler::GrowBuffer() {
ASSERT(rinfo.rmode() == RelocInfo::NONE64);
rinfo.set_pc(rinfo.pc() + pc_delta);
}
+ constant_pool_builder_.Relocate(pc_delta);
}
@@ -3199,41 +3182,23 @@ void Assembler::emit_code_stub_address(Code* stub) {
}
-void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data,
- UseConstantPoolMode mode) {
- // We do not try to reuse pool constants.
+void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
RelocInfo rinfo(pc_, rmode, data, NULL);
- if (((rmode >= RelocInfo::JS_RETURN) &&
- (rmode <= RelocInfo::DEBUG_BREAK_SLOT)) ||
- (rmode == RelocInfo::CONST_POOL) ||
- mode == DONT_USE_CONSTANT_POOL) {
- // Adjust code for new modes.
- ASSERT(RelocInfo::IsDebugBreakSlot(rmode)
- || RelocInfo::IsJSReturn(rmode)
- || RelocInfo::IsComment(rmode)
- || RelocInfo::IsPosition(rmode)
- || RelocInfo::IsConstPool(rmode)
- || mode == DONT_USE_CONSTANT_POOL);
- // These modes do not need an entry in the constant pool.
- } else {
- RecordRelocInfoConstantPoolEntryHelper(rinfo);
- }
+ RecordRelocInfo(rinfo);
+}
+
+
+void Assembler::RecordRelocInfo(const RelocInfo& rinfo) {
if (!RelocInfo::IsNone(rinfo.rmode())) {
// Don't record external references unless the heap will be serialized.
- if (rmode == RelocInfo::EXTERNAL_REFERENCE) {
-#ifdef DEBUG
- if (!Serializer::enabled()) {
- Serializer::TooLateToEnableNow();
- }
-#endif
- if (!Serializer::enabled() && !emit_debug_code()) {
- return;
- }
+ if (rinfo.rmode() == RelocInfo::EXTERNAL_REFERENCE &&
+ !serializer_enabled() && !emit_debug_code()) {
+ return;
}
ASSERT(buffer_space() >= kMaxRelocSize); // too late to grow buffer here
- if (rmode == RelocInfo::CODE_TARGET_WITH_ID) {
- RelocInfo reloc_info_with_ast_id(pc_,
- rmode,
+ if (rinfo.rmode() == RelocInfo::CODE_TARGET_WITH_ID) {
+ RelocInfo reloc_info_with_ast_id(rinfo.pc(),
+ rinfo.rmode(),
RecordedAstId().ToInt(),
NULL);
ClearRecordedAstId();
@@ -3245,34 +3210,38 @@ void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data,
}
-void Assembler::RecordRelocInfo(double data) {
- // We do not try to reuse pool constants.
- RelocInfo rinfo(pc_, data);
- RecordRelocInfoConstantPoolEntryHelper(rinfo);
-}
-
-
-void Assembler::RecordRelocInfoConstantPoolEntryHelper(const RelocInfo& rinfo) {
- if (rinfo.rmode() == RelocInfo::NONE64) {
- ASSERT(num_pending_64_bit_reloc_info_ < kMaxNumPending64RelocInfo);
- if (num_pending_64_bit_reloc_info_ == 0) {
- first_const_pool_64_use_ = pc_offset();
- }
- pending_64_bit_reloc_info_[num_pending_64_bit_reloc_info_++] = rinfo;
+void Assembler::ConstantPoolAddEntry(const RelocInfo& rinfo) {
+ if (FLAG_enable_ool_constant_pool) {
+ constant_pool_builder_.AddEntry(this, rinfo);
} else {
- ASSERT(num_pending_32_bit_reloc_info_ < kMaxNumPending32RelocInfo);
- if (num_pending_32_bit_reloc_info_ == 0) {
- first_const_pool_32_use_ = pc_offset();
+ if (rinfo.rmode() == RelocInfo::NONE64) {
+ ASSERT(num_pending_64_bit_reloc_info_ < kMaxNumPending64RelocInfo);
+ if (num_pending_64_bit_reloc_info_ == 0) {
+ first_const_pool_64_use_ = pc_offset();
+ }
+ pending_64_bit_reloc_info_[num_pending_64_bit_reloc_info_++] = rinfo;
+ } else {
+ ASSERT(num_pending_32_bit_reloc_info_ < kMaxNumPending32RelocInfo);
+ if (num_pending_32_bit_reloc_info_ == 0) {
+ first_const_pool_32_use_ = pc_offset();
+ }
+ pending_32_bit_reloc_info_[num_pending_32_bit_reloc_info_++] = rinfo;
}
- pending_32_bit_reloc_info_[num_pending_32_bit_reloc_info_++] = rinfo;
+ // Make sure the constant pool is not emitted in place of the next
+ // instruction for which we just recorded relocation info.
+ BlockConstPoolFor(1);
}
- // Make sure the constant pool is not emitted in place of the next
- // instruction for which we just recorded relocation info.
- BlockConstPoolFor(1);
}
void Assembler::BlockConstPoolFor(int instructions) {
+ if (FLAG_enable_ool_constant_pool) {
+ // Should be a no-op if using an out-of-line constant pool.
+ ASSERT(num_pending_32_bit_reloc_info_ == 0);
+ ASSERT(num_pending_64_bit_reloc_info_ == 0);
+ return;
+ }
+
int pc_limit = pc_offset() + instructions * kInstrSize;
if (no_const_pool_before_ < pc_limit) {
// Max pool start (if we need a jump and an alignment).
@@ -3294,6 +3263,13 @@ void Assembler::BlockConstPoolFor(int instructions) {
void Assembler::CheckConstPool(bool force_emit, bool require_jump) {
+ if (FLAG_enable_ool_constant_pool) {
+ // Should be a no-op if using an out-of-line constant pool.
+ ASSERT(num_pending_32_bit_reloc_info_ == 0);
+ ASSERT(num_pending_64_bit_reloc_info_ == 0);
+ return;
+ }
+
// Some short sequence of instruction mustn't be broken up by constant pool
// emission, such sequences are protected by calls to BlockConstPoolFor and
// BlockConstPoolScope.
@@ -3446,7 +3422,7 @@ void Assembler::CheckConstPool(bool force_emit, bool require_jump) {
// data
bool found = false;
- if (!Serializer::enabled() && (rinfo.rmode() >= RelocInfo::CELL)) {
+ if (!serializer_enabled() && rinfo.rmode() >= RelocInfo::CELL) {
for (int j = 0; j < i; j++) {
RelocInfo& rinfo2 = pending_32_bit_reloc_info_[j];
@@ -3491,6 +3467,207 @@ void Assembler::CheckConstPool(bool force_emit, bool require_jump) {
}
+Handle<ConstantPoolArray> Assembler::NewConstantPool(Isolate* isolate) {
+ if (!FLAG_enable_ool_constant_pool) {
+ return isolate->factory()->empty_constant_pool_array();
+ }
+ return constant_pool_builder_.New(isolate);
+}
+
+
+void Assembler::PopulateConstantPool(ConstantPoolArray* constant_pool) {
+ constant_pool_builder_.Populate(this, constant_pool);
+}
+
+
+ConstantPoolBuilder::ConstantPoolBuilder()
+ : entries_(),
+ merged_indexes_(),
+ count_of_64bit_(0),
+ count_of_code_ptr_(0),
+ count_of_heap_ptr_(0),
+ count_of_32bit_(0) { }
+
+
+bool ConstantPoolBuilder::IsEmpty() {
+ return entries_.size() == 0;
+}
+
+
+bool ConstantPoolBuilder::Is64BitEntry(RelocInfo::Mode rmode) {
+ return rmode == RelocInfo::NONE64;
+}
+
+
+bool ConstantPoolBuilder::Is32BitEntry(RelocInfo::Mode rmode) {
+ return !RelocInfo::IsGCRelocMode(rmode) && rmode != RelocInfo::NONE64;
+}
+
+
+bool ConstantPoolBuilder::IsCodePtrEntry(RelocInfo::Mode rmode) {
+ return RelocInfo::IsCodeTarget(rmode);
+}
+
+
+bool ConstantPoolBuilder::IsHeapPtrEntry(RelocInfo::Mode rmode) {
+ return RelocInfo::IsGCRelocMode(rmode) && !RelocInfo::IsCodeTarget(rmode);
+}
+
+
+void ConstantPoolBuilder::AddEntry(Assembler* assm,
+ const RelocInfo& rinfo) {
+ RelocInfo::Mode rmode = rinfo.rmode();
+ ASSERT(rmode != RelocInfo::COMMENT &&
+ rmode != RelocInfo::POSITION &&
+ rmode != RelocInfo::STATEMENT_POSITION &&
+ rmode != RelocInfo::CONST_POOL);
+
+
+ // Try to merge entries which won't be patched.
+ int merged_index = -1;
+ if (RelocInfo::IsNone(rmode) ||
+ (!assm->serializer_enabled() && (rmode >= RelocInfo::CELL))) {
+ size_t i;
+ std::vector<RelocInfo>::const_iterator it;
+ for (it = entries_.begin(), i = 0; it != entries_.end(); it++, i++) {
+ if (RelocInfo::IsEqual(rinfo, *it)) {
+ merged_index = i;
+ break;
+ }
+ }
+ }
+
+ entries_.push_back(rinfo);
+ merged_indexes_.push_back(merged_index);
+
+ if (merged_index == -1) {
+ // Not merged, so update the appropriate count.
+ if (Is64BitEntry(rmode)) {
+ count_of_64bit_++;
+ } else if (Is32BitEntry(rmode)) {
+ count_of_32bit_++;
+ } else if (IsCodePtrEntry(rmode)) {
+ count_of_code_ptr_++;
+ } else {
+ ASSERT(IsHeapPtrEntry(rmode));
+ count_of_heap_ptr_++;
+ }
+ }
+
+ // Check if we still have room for another entry given Arm's ldr and vldr
+ // immediate offset range.
+ // TODO(rmcilroy): Avoid creating a new object here when we support
+ // extended constant pools.
+ ConstantPoolArray::NumberOfEntries total(count_of_64bit_,
+ count_of_code_ptr_,
+ count_of_heap_ptr_,
+ count_of_32bit_);
+ ConstantPoolArray::NumberOfEntries int64_counts(count_of_64bit_, 0, 0, 0);
+ if (!(is_uint12(ConstantPoolArray::SizeFor(total)) &&
+ is_uint10(ConstantPoolArray::SizeFor(int64_counts)))) {
+ assm->set_constant_pool_full();
+ }
+}
+
+
+void ConstantPoolBuilder::Relocate(int pc_delta) {
+ for (std::vector<RelocInfo>::iterator rinfo = entries_.begin();
+ rinfo != entries_.end(); rinfo++) {
+ ASSERT(rinfo->rmode() != RelocInfo::JS_RETURN);
+ rinfo->set_pc(rinfo->pc() + pc_delta);
+ }
+}
+
+
+Handle<ConstantPoolArray> ConstantPoolBuilder::New(Isolate* isolate) {
+ if (IsEmpty()) {
+ return isolate->factory()->empty_constant_pool_array();
+ } else {
+ ConstantPoolArray::NumberOfEntries small(count_of_64bit_,
+ count_of_code_ptr_,
+ count_of_heap_ptr_,
+ count_of_32bit_);
+ return isolate->factory()->NewConstantPoolArray(small);
+ }
+}
+
+
+void ConstantPoolBuilder::Populate(Assembler* assm,
+ ConstantPoolArray* constant_pool) {
+ ASSERT(count_of_64bit_ == constant_pool->number_of_entries(
+ ConstantPoolArray::INT64, ConstantPoolArray::SMALL_SECTION));
+ ASSERT(count_of_code_ptr_ == constant_pool->number_of_entries(
+ ConstantPoolArray::CODE_PTR, ConstantPoolArray::SMALL_SECTION));
+ ASSERT(count_of_heap_ptr_ == constant_pool->number_of_entries(
+ ConstantPoolArray::HEAP_PTR, ConstantPoolArray::SMALL_SECTION));
+ ASSERT(count_of_32bit_ == constant_pool->number_of_entries(
+ ConstantPoolArray::INT32, ConstantPoolArray::SMALL_SECTION));
+ ASSERT(entries_.size() == merged_indexes_.size());
+
+ int index_64bit = 0;
+ int index_code_ptr = count_of_64bit_;
+ int index_heap_ptr = count_of_64bit_ + count_of_code_ptr_;
+ int index_32bit = count_of_64bit_ + count_of_code_ptr_ + count_of_heap_ptr_;
+
+ size_t i;
+ std::vector<RelocInfo>::const_iterator rinfo;
+ for (rinfo = entries_.begin(), i = 0; rinfo != entries_.end(); rinfo++, i++) {
+ RelocInfo::Mode rmode = rinfo->rmode();
+
+ // Update constant pool if necessary and get the entry's offset.
+ int offset;
+ if (merged_indexes_[i] == -1) {
+ if (Is64BitEntry(rmode)) {
+ offset = constant_pool->OffsetOfElementAt(index_64bit) - kHeapObjectTag;
+ constant_pool->set(index_64bit++, rinfo->data64());
+ } else if (Is32BitEntry(rmode)) {
+ offset = constant_pool->OffsetOfElementAt(index_32bit) - kHeapObjectTag;
+ constant_pool->set(index_32bit++, static_cast<int32_t>(rinfo->data()));
+ } else if (IsCodePtrEntry(rmode)) {
+ offset = constant_pool->OffsetOfElementAt(index_code_ptr) -
+ kHeapObjectTag;
+ constant_pool->set(index_code_ptr++,
+ reinterpret_cast<Address>(rinfo->data()));
+ } else {
+ ASSERT(IsHeapPtrEntry(rmode));
+ offset = constant_pool->OffsetOfElementAt(index_heap_ptr) -
+ kHeapObjectTag;
+ constant_pool->set(index_heap_ptr++,
+ reinterpret_cast<Object *>(rinfo->data()));
+ }
+ merged_indexes_[i] = offset; // Stash offset for merged entries.
+ } else {
+ size_t merged_index = static_cast<size_t>(merged_indexes_[i]);
+ ASSERT(merged_index < merged_indexes_.size() && merged_index < i);
+ offset = merged_indexes_[merged_index];
+ }
+
+ // Patch vldr/ldr instruction with correct offset.
+ Instr instr = assm->instr_at(rinfo->pc());
+ if (Is64BitEntry(rmode)) {
+ // Instruction to patch must be 'vldr rd, [pp, #0]'.
+ ASSERT((Assembler::IsVldrDPpImmediateOffset(instr) &&
+ Assembler::GetVldrDRegisterImmediateOffset(instr) == 0));
+ ASSERT(is_uint10(offset));
+ assm->instr_at_put(rinfo->pc(),
+ Assembler::SetVldrDRegisterImmediateOffset(instr, offset));
+ } else {
+ // Instruction to patch must be 'ldr rd, [pp, #0]'.
+ ASSERT((Assembler::IsLdrPpImmediateOffset(instr) &&
+ Assembler::GetLdrRegisterImmediateOffset(instr) == 0));
+ ASSERT(is_uint12(offset));
+ assm->instr_at_put(rinfo->pc(),
+ Assembler::SetLdrRegisterImmediateOffset(instr, offset));
+ }
+ }
+
+ ASSERT((index_64bit == count_of_64bit_) &&
+ (index_code_ptr == (index_64bit + count_of_code_ptr_)) &&
+ (index_heap_ptr == (index_code_ptr + count_of_heap_ptr_)) &&
+ (index_32bit == (index_heap_ptr + count_of_32bit_)));
+}
+
+
} } // namespace v8::internal
#endif // V8_TARGET_ARCH_ARM
diff --git a/chromium/v8/src/arm/assembler-arm.h b/chromium/v8/src/arm/assembler-arm.h
index 84bc8794e81..812f58f467b 100644
--- a/chromium/v8/src/arm/assembler-arm.h
+++ b/chromium/v8/src/arm/assembler-arm.h
@@ -39,81 +39,17 @@
#ifndef V8_ARM_ASSEMBLER_ARM_H_
#define V8_ARM_ASSEMBLER_ARM_H_
+
#include <stdio.h>
-#include "assembler.h"
-#include "constants-arm.h"
-#include "serialize.h"
+#include <vector>
+
+#include "src/assembler.h"
+#include "src/arm/constants-arm.h"
+#include "src/serialize.h"
namespace v8 {
namespace internal {
-// CpuFeatures keeps track of which features are supported by the target CPU.
-// Supported features must be enabled by a CpuFeatureScope before use.
-class CpuFeatures : public AllStatic {
- public:
- // Detect features of the target CPU. Set safe defaults if the serializer
- // is enabled (snapshots must be portable).
- static void Probe();
-
- // Display target use when compiling.
- static void PrintTarget();
-
- // Display features.
- static void PrintFeatures();
-
- // Check whether a feature is supported by the target CPU.
- static bool IsSupported(CpuFeature f) {
- ASSERT(initialized_);
- return Check(f, supported_);
- }
-
- static bool IsFoundByRuntimeProbingOnly(CpuFeature f) {
- ASSERT(initialized_);
- return Check(f, found_by_runtime_probing_only_);
- }
-
- static bool IsSafeForSnapshot(CpuFeature f) {
- return Check(f, cross_compile_) ||
- (IsSupported(f) &&
- (!Serializer::enabled() || !IsFoundByRuntimeProbingOnly(f)));
- }
-
- static unsigned cache_line_size() { return cache_line_size_; }
-
- static bool VerifyCrossCompiling() {
- return cross_compile_ == 0;
- }
-
- static bool VerifyCrossCompiling(CpuFeature f) {
- unsigned mask = flag2set(f);
- return cross_compile_ == 0 ||
- (cross_compile_ & mask) == mask;
- }
-
- private:
- static bool Check(CpuFeature f, unsigned set) {
- return (set & flag2set(f)) != 0;
- }
-
- static unsigned flag2set(CpuFeature f) {
- return 1u << f;
- }
-
-#ifdef DEBUG
- static bool initialized_;
-#endif
- static unsigned supported_;
- static unsigned found_by_runtime_probing_only_;
- static unsigned cache_line_size_;
-
- static unsigned cross_compile_;
-
- friend class ExternalReference;
- friend class PlatformFeatureScope;
- DISALLOW_COPY_AND_ASSIGN(CpuFeatures);
-};
-
-
// CPU Registers.
//
// 1) We would prefer to use an enum, but enum values are assignment-
@@ -376,8 +312,9 @@ struct QwNeonRegister {
}
void split_code(int* vm, int* m) const {
ASSERT(is_valid());
- *m = (code_ & 0x10) >> 4;
- *vm = code_ & 0x0F;
+ int encoded_code = code_ << 1;
+ *m = (encoded_code & 0x10) >> 4;
+ *vm = encoded_code & 0x0F;
}
int code_;
@@ -586,7 +523,8 @@ class Operand BASE_EMBEDDED {
// the instruction this operand is used for is a MOV or MVN instruction the
// actual instruction to use is required for this calculation. For other
// instructions instr is ignored.
- bool is_single_instruction(const Assembler* assembler, Instr instr = 0) const;
+ bool is_single_instruction(const Assembler* assembler,
+ Instr instr = 0) const;
bool must_output_reloc_info(const Assembler* assembler) const;
inline int32_t immediate() const {
@@ -702,9 +640,44 @@ class NeonListOperand BASE_EMBEDDED {
NeonListType type_;
};
+
+// Class used to build a constant pool.
+class ConstantPoolBuilder BASE_EMBEDDED {
+ public:
+ explicit ConstantPoolBuilder();
+ void AddEntry(Assembler* assm, const RelocInfo& rinfo);
+ void Relocate(int pc_delta);
+ bool IsEmpty();
+ Handle<ConstantPoolArray> New(Isolate* isolate);
+ void Populate(Assembler* assm, ConstantPoolArray* constant_pool);
+
+ inline int count_of_64bit() const { return count_of_64bit_; }
+ inline int count_of_code_ptr() const { return count_of_code_ptr_; }
+ inline int count_of_heap_ptr() const { return count_of_heap_ptr_; }
+ inline int count_of_32bit() const { return count_of_32bit_; }
+
+ private:
+ bool Is64BitEntry(RelocInfo::Mode rmode);
+ bool Is32BitEntry(RelocInfo::Mode rmode);
+ bool IsCodePtrEntry(RelocInfo::Mode rmode);
+ bool IsHeapPtrEntry(RelocInfo::Mode rmode);
+
+ // TODO(rmcilroy): This should ideally be a ZoneList, however that would mean
+ // RelocInfo would need to subclass ZoneObject which it currently doesn't.
+ std::vector<RelocInfo> entries_;
+ std::vector<int> merged_indexes_;
+ int count_of_64bit_;
+ int count_of_code_ptr_;
+ int count_of_heap_ptr_;
+ int count_of_32bit_;
+};
+
+
extern const Instr kMovLrPc;
extern const Instr kLdrPCMask;
extern const Instr kLdrPCPattern;
+extern const Instr kLdrPpMask;
+extern const Instr kLdrPpPattern;
extern const Instr kBlxRegMask;
extern const Instr kBlxRegPattern;
extern const Instr kBlxIp;
@@ -780,9 +753,31 @@ class Assembler : public AssemblerBase {
// the branch/call instruction at pc, or the object in a mov.
INLINE(static Address target_pointer_address_at(Address pc));
+ // Return the address in the constant pool of the code target address used by
+ // the branch/call instruction at pc, or the object in a mov.
+ INLINE(static Address target_constant_pool_address_at(
+ Address pc, ConstantPoolArray* constant_pool));
+
// Read/Modify the code target address in the branch/call instruction at pc.
- INLINE(static Address target_address_at(Address pc));
- INLINE(static void set_target_address_at(Address pc, Address target));
+ INLINE(static Address target_address_at(Address pc,
+ ConstantPoolArray* constant_pool));
+ INLINE(static void set_target_address_at(Address pc,
+ ConstantPoolArray* constant_pool,
+ Address target,
+ ICacheFlushMode icache_flush_mode =
+ FLUSH_ICACHE_IF_NEEDED));
+ INLINE(static Address target_address_at(Address pc, Code* code)) {
+ ConstantPoolArray* constant_pool = code ? code->constant_pool() : NULL;
+ return target_address_at(pc, constant_pool);
+ }
+ INLINE(static void set_target_address_at(Address pc,
+ Code* code,
+ Address target,
+ ICacheFlushMode icache_flush_mode =
+ FLUSH_ICACHE_IF_NEEDED)) {
+ ConstantPoolArray* constant_pool = code ? code->constant_pool() : NULL;
+ set_target_address_at(pc, constant_pool, target, icache_flush_mode);
+ }
// Return the code target address at a call site from the return address
// of that call in the instruction stream.
@@ -795,7 +790,7 @@ class Assembler : public AssemblerBase {
// This sets the branch destination (which is in the constant pool on ARM).
// This is for calls and branches within generated code.
inline static void deserialization_set_special_target_at(
- Address constant_pool_entry, Address target);
+ Address constant_pool_entry, Code* code, Address target);
// Here we are patching the address in the constant pool, not the actual call
// instruction. The address in the constant pool is the same size as a
@@ -1292,12 +1287,6 @@ class Assembler : public AssemblerBase {
// Jump unconditionally to given label.
void jmp(Label* L) { b(L, al); }
- static bool use_immediate_embedded_pointer_loads(
- const Assembler* assembler) {
- return CpuFeatures::IsSupported(MOVW_MOVT_IMMEDIATE_LOADS) &&
- (assembler == NULL || !assembler->predictable_code_size());
- }
-
// Check the code size generated from label to here.
int SizeOfCodeGeneratedSince(Label* label) {
return pc_offset() - label->pos();
@@ -1311,6 +1300,9 @@ class Assembler : public AssemblerBase {
// Check whether an immediate fits an addressing mode 1 instruction.
bool ImmediateFitsAddrMode1Instruction(int32_t imm32);
+ // Check whether an immediate fits an addressing mode 2 instruction.
+ bool ImmediateFitsAddrMode2Instruction(int32_t imm32);
+
// Class for scoping postponing the constant pool generation.
class BlockConstPoolScope {
public:
@@ -1361,9 +1353,9 @@ class Assembler : public AssemblerBase {
// function, compiled with and without debugger support (see for example
// Debug::PrepareForBreakPoints()).
// Compiling functions with debugger support generates additional code
- // (Debug::GenerateSlot()). This may affect the emission of the constant
- // pools and cause the version of the code with debugger support to have
- // constant pools generated in different places.
+ // (DebugCodegen::GenerateSlot()). This may affect the emission of the
+ // constant pools and cause the version of the code with debugger support to
+ // have constant pools generated in different places.
// Recording the position and size of emitted constant pools allows to
// correctly compute the offset mappings between the different versions of a
// function in all situations.
@@ -1398,6 +1390,8 @@ class Assembler : public AssemblerBase {
static int GetBranchOffset(Instr instr);
static bool IsLdrRegisterImmediate(Instr instr);
static bool IsVldrDRegisterImmediate(Instr instr);
+ static bool IsLdrPpImmediateOffset(Instr instr);
+ static bool IsVldrDPpImmediateOffset(Instr instr);
static int GetLdrRegisterImmediateOffset(Instr instr);
static int GetVldrDRegisterImmediateOffset(Instr instr);
static Instr SetLdrRegisterImmediateOffset(Instr instr, int offset);
@@ -1443,6 +1437,20 @@ class Assembler : public AssemblerBase {
// Check if is time to emit a constant pool.
void CheckConstPool(bool force_emit, bool require_jump);
+ // Allocate a constant pool of the correct size for the generated code.
+ Handle<ConstantPoolArray> NewConstantPool(Isolate* isolate);
+
+ // Generate the constant pool for the generated code.
+ void PopulateConstantPool(ConstantPoolArray* constant_pool);
+
+ bool can_use_constant_pool() const {
+ return is_constant_pool_available() && !constant_pool_full_;
+ }
+
+ void set_constant_pool_full() {
+ constant_pool_full_ = true;
+ }
+
protected:
// Relocation for a type-recording IC has the AST id added to it. This
// member variable is a way to pass the information from the call site to
@@ -1496,6 +1504,14 @@ class Assembler : public AssemblerBase {
(pc_offset() < no_const_pool_before_);
}
+ bool is_constant_pool_available() const {
+ return constant_pool_available_;
+ }
+
+ void set_constant_pool_available(bool available) {
+ constant_pool_available_ = available;
+ }
+
private:
int next_buffer_check_; // pc offset of next buffer check
@@ -1553,19 +1569,27 @@ class Assembler : public AssemblerBase {
// Number of pending reloc info entries in the 64 bits buffer.
int num_pending_64_bit_reloc_info_;
+ ConstantPoolBuilder constant_pool_builder_;
+
// The bound position, before this we cannot do instruction elimination.
int last_bound_pos_;
+ // Indicates whether the constant pool can be accessed, which is only possible
+ // if the pp register points to the current code object's constant pool.
+ bool constant_pool_available_;
+ // Indicates whether the constant pool is too full to accept new entries due
+ // to the ldr instruction's limitted immediate offset range.
+ bool constant_pool_full_;
+
// Code emission
inline void CheckBuffer();
void GrowBuffer();
inline void emit(Instr x);
// 32-bit immediate values
- void move_32_bit_immediate(Condition cond,
- Register rd,
- SBit s,
- const Operand& x);
+ void move_32_bit_immediate(Register rd,
+ const Operand& x,
+ Condition cond = al);
// Instruction generation
void addrmod1(Instr instr, Register rn, Register rd, const Operand& x);
@@ -1585,14 +1609,15 @@ class Assembler : public AssemblerBase {
};
// Record reloc info for current pc_
- void RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data = 0,
- UseConstantPoolMode mode = USE_CONSTANT_POOL);
- void RecordRelocInfo(double data);
- void RecordRelocInfoConstantPoolEntryHelper(const RelocInfo& rinfo);
+ void RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data = 0);
+ void RecordRelocInfo(const RelocInfo& rinfo);
+ void ConstantPoolAddEntry(const RelocInfo& rinfo);
friend class RelocInfo;
friend class CodePatcher;
friend class BlockConstPoolScope;
+ friend class FrameAndConstantPoolScope;
+ friend class ConstantPoolUnavailableScope;
PositionsRecorder positions_recorder_;
friend class PositionsRecorder;
diff --git a/chromium/v8/src/arm/builtins-arm.cc b/chromium/v8/src/arm/builtins-arm.cc
index bef4bc3c465..36898487c7a 100644
--- a/chromium/v8/src/arm/builtins-arm.cc
+++ b/chromium/v8/src/arm/builtins-arm.cc
@@ -1,39 +1,17 @@
// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
#if V8_TARGET_ARCH_ARM
-#include "codegen.h"
-#include "debug.h"
-#include "deoptimizer.h"
-#include "full-codegen.h"
-#include "runtime.h"
+#include "src/codegen.h"
+#include "src/debug.h"
+#include "src/deoptimizer.h"
+#include "src/full-codegen.h"
+#include "src/runtime.h"
+#include "src/stub-cache.h"
namespace v8 {
namespace internal {
@@ -154,10 +132,7 @@ void Builtins::Generate_ArrayCode(MacroAssembler* masm) {
// Run the native code for the Array function called as a normal function.
// tail call a stub
- Handle<Object> undefined_sentinel(
- masm->isolate()->heap()->undefined_value(),
- masm->isolate());
- __ mov(r2, Operand(undefined_sentinel));
+ __ LoadRoot(r2, Heap::kUndefinedValueRootIndex);
ArrayConstructorStub stub(masm->isolate());
__ TailCallStub(&stub);
}
@@ -261,7 +236,7 @@ void Builtins::Generate_StringConstructCode(MacroAssembler* masm) {
__ push(function); // Preserve the function.
__ IncrementCounter(counters->string_ctor_conversions(), 1, r3, r4);
{
- FrameScope scope(masm, StackFrame::INTERNAL);
+ FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
__ push(r0);
__ InvokeBuiltin(Builtins::TO_STRING, CALL_FUNCTION);
}
@@ -281,7 +256,7 @@ void Builtins::Generate_StringConstructCode(MacroAssembler* masm) {
__ bind(&gc_required);
__ IncrementCounter(counters->string_ctor_gc_required(), 1, r3, r4);
{
- FrameScope scope(masm, StackFrame::INTERNAL);
+ FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
__ push(argument);
__ CallRuntime(Runtime::kNewStringWrapper, 1);
}
@@ -289,17 +264,15 @@ void Builtins::Generate_StringConstructCode(MacroAssembler* masm) {
}
-static void CallRuntimePassFunction(MacroAssembler* masm,
- Runtime::FunctionId function_id) {
- FrameScope scope(masm, StackFrame::INTERNAL);
+static void CallRuntimePassFunction(
+ MacroAssembler* masm, Runtime::FunctionId function_id) {
+ FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
// Push a copy of the function onto the stack.
__ push(r1);
- // Push call kind information and function as parameter to the runtime call.
- __ Push(r5, r1);
+ // Push function as parameter to the runtime call.
+ __ Push(r1);
__ CallRuntime(function_id, 1);
- // Restore call kind information.
- __ pop(r5);
// Restore receiver.
__ pop(r1);
}
@@ -313,7 +286,13 @@ static void GenerateTailCallToSharedCode(MacroAssembler* masm) {
}
-void Builtins::Generate_InRecompileQueue(MacroAssembler* masm) {
+static void GenerateTailCallToReturnedCode(MacroAssembler* masm) {
+ __ add(r0, r0, Operand(Code::kHeaderSize - kHeapObjectTag));
+ __ Jump(r0);
+}
+
+
+void Builtins::Generate_InOptimizationQueue(MacroAssembler* masm) {
// Checking whether the queued function is ready for install is optional,
// since we come across interrupts and stack checks elsewhere. However,
// not checking may delay installing ready functions, and always checking
@@ -324,40 +303,38 @@ void Builtins::Generate_InRecompileQueue(MacroAssembler* masm) {
__ cmp(sp, Operand(ip));
__ b(hs, &ok);
- CallRuntimePassFunction(masm, Runtime::kTryInstallRecompiledCode);
- // Tail call to returned code.
- __ add(r0, r0, Operand(Code::kHeaderSize - kHeapObjectTag));
- __ Jump(r0);
+ CallRuntimePassFunction(masm, Runtime::kHiddenTryInstallOptimizedCode);
+ GenerateTailCallToReturnedCode(masm);
__ bind(&ok);
GenerateTailCallToSharedCode(masm);
}
-void Builtins::Generate_ConcurrentRecompile(MacroAssembler* masm) {
- CallRuntimePassFunction(masm, Runtime::kConcurrentRecompile);
- GenerateTailCallToSharedCode(masm);
-}
-
-
static void Generate_JSConstructStubHelper(MacroAssembler* masm,
bool is_api_function,
- bool count_constructions) {
+ bool create_memento) {
// ----------- S t a t e -------------
// -- r0 : number of arguments
// -- r1 : constructor function
+ // -- r2 : allocation site or undefined
// -- lr : return address
// -- sp[...]: constructor arguments
// -----------------------------------
- // Should never count constructions for api objects.
- ASSERT(!is_api_function || !count_constructions);
+ // Should never create mementos for api functions.
+ ASSERT(!is_api_function || !create_memento);
Isolate* isolate = masm->isolate();
// Enter a construct frame.
{
- FrameScope scope(masm, StackFrame::CONSTRUCT);
+ FrameAndConstantPoolScope scope(masm, StackFrame::CONSTRUCT);
+
+ if (create_memento) {
+ __ AssertUndefinedOrAllocationSite(r2, r3);
+ __ push(r2);
+ }
// Preserve the two incoming parameters on the stack.
__ SmiTag(r0);
@@ -369,14 +346,12 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
Label rt_call, allocated;
if (FLAG_inline_new) {
Label undo_allocation;
-#ifdef ENABLE_DEBUGGER_SUPPORT
ExternalReference debug_step_in_fp =
ExternalReference::debug_step_in_fp_address(isolate);
__ mov(r2, Operand(debug_step_in_fp));
__ ldr(r2, MemOperand(r2));
__ tst(r2, r2);
__ b(ne, &rt_call);
-#endif
// Load the initial map and verify that it is in fact a map.
// r1: constructor function
@@ -393,22 +368,24 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
__ CompareInstanceType(r2, r3, JS_FUNCTION_TYPE);
__ b(eq, &rt_call);
- if (count_constructions) {
+ if (!is_api_function) {
Label allocate;
+ MemOperand bit_field3 = FieldMemOperand(r2, Map::kBitField3Offset);
+ // Check if slack tracking is enabled.
+ __ ldr(r4, bit_field3);
+ __ DecodeField<Map::ConstructionCount>(r3, r4);
+ __ cmp(r3, Operand(JSFunction::kNoSlackTracking));
+ __ b(eq, &allocate);
// Decrease generous allocation count.
- __ ldr(r3, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
- MemOperand constructor_count =
- FieldMemOperand(r3, SharedFunctionInfo::kConstructionCountOffset);
- __ ldrb(r4, constructor_count);
- __ sub(r4, r4, Operand(1), SetCC);
- __ strb(r4, constructor_count);
+ __ sub(r4, r4, Operand(1 << Map::ConstructionCount::kShift));
+ __ str(r4, bit_field3);
+ __ cmp(r3, Operand(JSFunction::kFinishSlackTracking));
__ b(ne, &allocate);
__ push(r1);
__ Push(r2, r1); // r1 = constructor
- // The call will replace the stub, so the countdown is only done once.
- __ CallRuntime(Runtime::kFinalizeInstanceSize, 1);
+ __ CallRuntime(Runtime::kHiddenFinalizeInstanceSize, 1);
__ pop(r2);
__ pop(r1);
@@ -420,13 +397,17 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// r1: constructor function
// r2: initial map
__ ldrb(r3, FieldMemOperand(r2, Map::kInstanceSizeOffset));
+ if (create_memento) {
+ __ add(r3, r3, Operand(AllocationMemento::kSize / kPointerSize));
+ }
+
__ Allocate(r3, r4, r5, r6, &rt_call, SIZE_IN_WORDS);
// Allocated the JSObject, now initialize the fields. Map is set to
// initial map and properties and elements are set to empty fixed array.
// r1: constructor function
// r2: initial map
- // r3: object size
+ // r3: object size (not including memento if create_memento)
// r4: JSObject (not tagged)
__ LoadRoot(r6, Heap::kEmptyFixedArrayRootIndex);
__ mov(r5, r4);
@@ -440,12 +421,22 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// Fill all the in-object properties with the appropriate filler.
// r1: constructor function
// r2: initial map
- // r3: object size (in words)
+ // r3: object size (in words, including memento if create_memento)
// r4: JSObject (not tagged)
// r5: First in-object property of JSObject (not tagged)
ASSERT_EQ(3 * kPointerSize, JSObject::kHeaderSize);
__ LoadRoot(r6, Heap::kUndefinedValueRootIndex);
- if (count_constructions) {
+
+ if (!is_api_function) {
+ Label no_inobject_slack_tracking;
+
+ // Check if slack tracking is enabled.
+ __ ldr(ip, FieldMemOperand(r2, Map::kBitField3Offset));
+ __ DecodeField<Map::ConstructionCount>(ip);
+ __ cmp(ip, Operand(JSFunction::kNoSlackTracking));
+ __ b(eq, &no_inobject_slack_tracking);
+
+ // Allocate object with a slack.
__ ldr(r0, FieldMemOperand(r2, Map::kInstanceSizesOffset));
__ Ubfx(r0, r0, Map::kPreAllocatedPropertyFieldsByte * kBitsPerByte,
kBitsPerByte);
@@ -459,9 +450,29 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
__ InitializeFieldsWithFiller(r5, r0, r6);
// To allow for truncation.
__ LoadRoot(r6, Heap::kOnePointerFillerMapRootIndex);
+ // Fill the remaining fields with one pointer filler map.
+
+ __ bind(&no_inobject_slack_tracking);
+ }
+
+ if (create_memento) {
+ __ sub(ip, r3, Operand(AllocationMemento::kSize / kPointerSize));
+ __ add(r0, r4, Operand(ip, LSL, kPointerSizeLog2)); // End of object.
+ __ InitializeFieldsWithFiller(r5, r0, r6);
+
+ // Fill in memento fields.
+ // r5: points to the allocated but uninitialized memento.
+ __ LoadRoot(r6, Heap::kAllocationMementoMapRootIndex);
+ ASSERT_EQ(0 * kPointerSize, AllocationMemento::kMapOffset);
+ __ str(r6, MemOperand(r5, kPointerSize, PostIndex));
+ // Load the AllocationSite
+ __ ldr(r6, MemOperand(sp, 2 * kPointerSize));
+ ASSERT_EQ(1 * kPointerSize, AllocationMemento::kAllocationSiteOffset);
+ __ str(r6, MemOperand(r5, kPointerSize, PostIndex));
+ } else {
+ __ add(r0, r4, Operand(r3, LSL, kPointerSizeLog2)); // End of object.
+ __ InitializeFieldsWithFiller(r5, r0, r6);
}
- __ add(r0, r4, Operand(r3, LSL, kPointerSizeLog2)); // End of object.
- __ InitializeFieldsWithFiller(r5, r0, r6);
// Add the object tag to make the JSObject real, so that we can continue
// and jump into the continuation code at any time from now on. Any
@@ -559,13 +570,47 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// Allocate the new receiver object using the runtime call.
// r1: constructor function
__ bind(&rt_call);
+ if (create_memento) {
+ // Get the cell or allocation site.
+ __ ldr(r2, MemOperand(sp, 2 * kPointerSize));
+ __ push(r2);
+ }
+
__ push(r1); // argument for Runtime_NewObject
- __ CallRuntime(Runtime::kNewObject, 1);
+ if (create_memento) {
+ __ CallRuntime(Runtime::kHiddenNewObjectWithAllocationSite, 2);
+ } else {
+ __ CallRuntime(Runtime::kHiddenNewObject, 1);
+ }
__ mov(r4, r0);
+ // If we ended up using the runtime, and we want a memento, then the
+ // runtime call made it for us, and we shouldn't do create count
+ // increment.
+ Label count_incremented;
+ if (create_memento) {
+ __ jmp(&count_incremented);
+ }
+
// Receiver for constructor call allocated.
// r4: JSObject
__ bind(&allocated);
+
+ if (create_memento) {
+ __ ldr(r2, MemOperand(sp, kPointerSize * 2));
+ __ LoadRoot(r5, Heap::kUndefinedValueRootIndex);
+ __ cmp(r2, r5);
+ __ b(eq, &count_incremented);
+ // r2 is an AllocationSite. We are creating a memento from it, so we
+ // need to increment the memento create count.
+ __ ldr(r3, FieldMemOperand(r2,
+ AllocationSite::kPretenureCreateCountOffset));
+ __ add(r3, r3, Operand(Smi::FromInt(1)));
+ __ str(r3, FieldMemOperand(r2,
+ AllocationSite::kPretenureCreateCountOffset));
+ __ bind(&count_incremented);
+ }
+
__ push(r4);
__ push(r4);
@@ -608,17 +653,14 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
__ ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset));
Handle<Code> code =
masm->isolate()->builtins()->HandleApiCallConstruct();
- ParameterCount expected(0);
- __ InvokeCode(code, expected, expected,
- RelocInfo::CODE_TARGET, CALL_FUNCTION, CALL_AS_METHOD);
+ __ Call(code, RelocInfo::CODE_TARGET);
} else {
ParameterCount actual(r0);
- __ InvokeFunction(r1, actual, CALL_FUNCTION,
- NullCallWrapper(), CALL_AS_METHOD);
+ __ InvokeFunction(r1, actual, CALL_FUNCTION, NullCallWrapper());
}
// Store offset of return address for deoptimizer.
- if (!is_api_function && !count_constructions) {
+ if (!is_api_function) {
masm->isolate()->heap()->SetConstructStubDeoptPCOffset(masm->pc_offset());
}
@@ -670,13 +712,8 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
}
-void Builtins::Generate_JSConstructStubCountdown(MacroAssembler* masm) {
- Generate_JSConstructStubHelper(masm, false, true);
-}
-
-
void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
- Generate_JSConstructStubHelper(masm, false, false);
+ Generate_JSConstructStubHelper(masm, false, FLAG_pretenuring_call_new);
}
@@ -744,15 +781,12 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
__ mov(r0, Operand(r3));
if (is_construct) {
// No type feedback cell is available
- Handle<Object> undefined_sentinel(
- masm->isolate()->heap()->undefined_value(), masm->isolate());
- __ mov(r2, Operand(undefined_sentinel));
- CallConstructStub stub(NO_CALL_FUNCTION_FLAGS);
+ __ LoadRoot(r2, Heap::kUndefinedValueRootIndex);
+ CallConstructStub stub(masm->isolate(), NO_CALL_CONSTRUCTOR_FLAGS);
__ CallStub(&stub);
} else {
ParameterCount actual(r0);
- __ InvokeFunction(r1, actual, CALL_FUNCTION,
- NullCallWrapper(), CALL_AS_METHOD);
+ __ InvokeFunction(r1, actual, CALL_FUNCTION, NullCallWrapper());
}
// Exit the JS frame and remove the parameters (except function), and
// return.
@@ -774,19 +808,36 @@ void Builtins::Generate_JSConstructEntryTrampoline(MacroAssembler* masm) {
}
-void Builtins::Generate_LazyCompile(MacroAssembler* masm) {
- CallRuntimePassFunction(masm, Runtime::kLazyCompile);
- // Do a tail-call of the compiled function.
- __ add(r2, r0, Operand(Code::kHeaderSize - kHeapObjectTag));
- __ Jump(r2);
+void Builtins::Generate_CompileUnoptimized(MacroAssembler* masm) {
+ CallRuntimePassFunction(masm, Runtime::kHiddenCompileUnoptimized);
+ GenerateTailCallToReturnedCode(masm);
}
-void Builtins::Generate_LazyRecompile(MacroAssembler* masm) {
- CallRuntimePassFunction(masm, Runtime::kLazyRecompile);
- // Do a tail-call of the compiled function.
- __ add(r2, r0, Operand(Code::kHeaderSize - kHeapObjectTag));
- __ Jump(r2);
+static void CallCompileOptimized(MacroAssembler* masm, bool concurrent) {
+ FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
+ // Push a copy of the function onto the stack.
+ __ push(r1);
+ // Push function as parameter to the runtime call.
+ __ Push(r1);
+ // Whether to compile in a background thread.
+ __ Push(masm->isolate()->factory()->ToBoolean(concurrent));
+
+ __ CallRuntime(Runtime::kHiddenCompileOptimized, 2);
+ // Restore receiver.
+ __ pop(r1);
+}
+
+
+void Builtins::Generate_CompileOptimized(MacroAssembler* masm) {
+ CallCompileOptimized(masm, false);
+ GenerateTailCallToReturnedCode(masm);
+}
+
+
+void Builtins::Generate_CompileOptimizedConcurrent(MacroAssembler* masm) {
+ CallCompileOptimized(masm, true);
+ GenerateTailCallToReturnedCode(masm);
}
@@ -803,7 +854,7 @@ static void GenerateMakeCodeYoungAgainCommon(MacroAssembler* masm) {
// r1 - isolate
FrameScope scope(masm, StackFrame::MANUAL);
__ stm(db_w, sp, r0.bit() | r1.bit() | fp.bit() | lr.bit());
- __ PrepareCallCFunction(1, 0, r2);
+ __ PrepareCallCFunction(2, 0, r2);
__ mov(r1, Operand(ExternalReference::isolate_address(masm->isolate())));
__ CallCFunction(
ExternalReference::get_make_code_young_function(masm->isolate()), 2);
@@ -836,18 +887,18 @@ void Builtins::Generate_MarkCodeAsExecutedOnce(MacroAssembler* masm) {
// r1 - isolate
FrameScope scope(masm, StackFrame::MANUAL);
__ stm(db_w, sp, r0.bit() | r1.bit() | fp.bit() | lr.bit());
- __ PrepareCallCFunction(1, 0, r2);
+ __ PrepareCallCFunction(2, 0, r2);
__ mov(r1, Operand(ExternalReference::isolate_address(masm->isolate())));
__ CallCFunction(ExternalReference::get_mark_code_as_executed_function(
masm->isolate()), 2);
__ ldm(ia_w, sp, r0.bit() | r1.bit() | fp.bit() | lr.bit());
// Perform prologue operations usually performed by the young code stub.
- __ stm(db_w, sp, r1.bit() | cp.bit() | fp.bit() | lr.bit());
+ __ PushFixedFrame(r1);
__ add(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
// Jump to point after the code-age stub.
- __ add(r0, r0, Operand(kNoCodeAgeSequenceLength * Assembler::kInstrSize));
+ __ add(r0, r0, Operand(kNoCodeAgeSequenceLength));
__ mov(pc, r0);
}
@@ -860,14 +911,14 @@ void Builtins::Generate_MarkCodeAsExecutedTwice(MacroAssembler* masm) {
static void Generate_NotifyStubFailureHelper(MacroAssembler* masm,
SaveFPRegsMode save_doubles) {
{
- FrameScope scope(masm, StackFrame::INTERNAL);
+ FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
// Preserve registers across notification, this is important for compiled
// stubs that tail call the runtime on deopts passing their parameters in
// registers.
__ stm(db_w, sp, kJSCallerSaved | kCalleeSaved);
// Pass the function and deoptimization type to the runtime system.
- __ CallRuntime(Runtime::kNotifyStubFailure, 0, save_doubles);
+ __ CallRuntime(Runtime::kHiddenNotifyStubFailure, 0, save_doubles);
__ ldm(ia_w, sp, kJSCallerSaved | kCalleeSaved);
}
@@ -889,11 +940,11 @@ void Builtins::Generate_NotifyStubFailureSaveDoubles(MacroAssembler* masm) {
static void Generate_NotifyDeoptimizedHelper(MacroAssembler* masm,
Deoptimizer::BailoutType type) {
{
- FrameScope scope(masm, StackFrame::INTERNAL);
+ FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
// Pass the function and deoptimization type to the runtime system.
__ mov(r0, Operand(Smi::FromInt(static_cast<int>(type))));
__ push(r0);
- __ CallRuntime(Runtime::kNotifyDeoptimized, 1);
+ __ CallRuntime(Runtime::kHiddenNotifyDeoptimized, 1);
}
// Get the full codegen state from the stack and untag it -> r6.
@@ -937,19 +988,10 @@ void Builtins::Generate_OnStackReplacement(MacroAssembler* masm) {
// Lookup the function in the JavaScript frame.
__ ldr(r0, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
{
- FrameScope scope(masm, StackFrame::INTERNAL);
- // Lookup and calculate pc offset.
- __ ldr(r1, MemOperand(fp, StandardFrameConstants::kCallerPCOffset));
- __ ldr(r2, FieldMemOperand(r0, JSFunction::kSharedFunctionInfoOffset));
- __ ldr(r2, FieldMemOperand(r2, SharedFunctionInfo::kCodeOffset));
- __ sub(r1, r1, Operand(Code::kHeaderSize - kHeapObjectTag));
- __ sub(r1, r1, r2);
- __ SmiTag(r1);
-
- // Pass both function and pc offset as arguments.
+ FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
+ // Pass function as argument.
__ push(r0);
- __ push(r1);
- __ CallRuntime(Runtime::kCompileForOnStackReplacement, 2);
+ __ CallRuntime(Runtime::kCompileForOnStackReplacement, 1);
}
// If the code object is null, just return to the unoptimized code.
@@ -962,20 +1004,26 @@ void Builtins::Generate_OnStackReplacement(MacroAssembler* masm) {
// Load deoptimization data from the code object.
// <deopt_data> = <code>[#deoptimization_data_offset]
- __ ldr(r1, MemOperand(r0, Code::kDeoptimizationDataOffset - kHeapObjectTag));
+ __ ldr(r1, FieldMemOperand(r0, Code::kDeoptimizationDataOffset));
- // Load the OSR entrypoint offset from the deoptimization data.
- // <osr_offset> = <deopt_data>[#header_size + #osr_pc_offset]
- __ ldr(r1, MemOperand(r1, FixedArray::OffsetOfElementAt(
- DeoptimizationInputData::kOsrPcOffsetIndex) - kHeapObjectTag));
+ { ConstantPoolUnavailableScope constant_pool_unavailable(masm);
+ if (FLAG_enable_ool_constant_pool) {
+ __ ldr(pp, FieldMemOperand(r0, Code::kConstantPoolOffset));
+ }
- // Compute the target address = code_obj + header_size + osr_offset
- // <entry_addr> = <code_obj> + #header_size + <osr_offset>
- __ add(r0, r0, Operand::SmiUntag(r1));
- __ add(lr, r0, Operand(Code::kHeaderSize - kHeapObjectTag));
+ // Load the OSR entrypoint offset from the deoptimization data.
+ // <osr_offset> = <deopt_data>[#header_size + #osr_pc_offset]
+ __ ldr(r1, FieldMemOperand(r1, FixedArray::OffsetOfElementAt(
+ DeoptimizationInputData::kOsrPcOffsetIndex)));
- // And "return" to the OSR entry point of the function.
- __ Ret();
+ // Compute the target address = code_obj + header_size + osr_offset
+ // <entry_addr> = <code_obj> + #header_size + <osr_offset>
+ __ add(r0, r0, Operand::SmiUntag(r1));
+ __ add(lr, r0, Operand(Code::kHeaderSize - kHeapObjectTag));
+
+ // And "return" to the OSR entry point of the function.
+ __ Ret();
+ }
}
@@ -986,8 +1034,8 @@ void Builtins::Generate_OsrAfterStackCheck(MacroAssembler* masm) {
__ cmp(sp, Operand(ip));
__ b(hs, &ok);
{
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ CallRuntime(Runtime::kStackGuard, 0);
+ FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
+ __ CallRuntime(Runtime::kHiddenStackGuard, 0);
}
__ Jump(masm->isolate()->builtins()->OnStackReplacement(),
RelocInfo::CODE_TARGET);
@@ -1038,7 +1086,7 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
__ tst(r3, Operand(1 << (SharedFunctionInfo::kNative + kSmiTagSize)));
__ b(ne, &shift_arguments);
- // Compute the receiver in non-strict mode.
+ // Compute the receiver in sloppy mode.
__ add(r2, sp, Operand(r0, LSL, kPointerSizeLog2));
__ ldr(r2, MemOperand(r2, -kPointerSize));
// r0: actual number of arguments
@@ -1061,7 +1109,7 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
{
// Enter an internal frame in order to preserve argument count.
- FrameScope scope(masm, StackFrame::INTERNAL);
+ FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
__ SmiTag(r0);
__ push(r0);
@@ -1080,15 +1128,9 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
__ mov(r4, Operand::Zero());
__ jmp(&patch_receiver);
- // Use the global receiver object from the called function as the
- // receiver.
__ bind(&use_global_receiver);
- const int kGlobalIndex =
- Context::kHeaderSize + Context::GLOBAL_OBJECT_INDEX * kPointerSize;
- __ ldr(r2, FieldMemOperand(cp, kGlobalIndex));
- __ ldr(r2, FieldMemOperand(r2, GlobalObject::kNativeContextOffset));
- __ ldr(r2, FieldMemOperand(r2, kGlobalIndex));
- __ ldr(r2, FieldMemOperand(r2, GlobalObject::kGlobalReceiverOffset));
+ __ ldr(r2, ContextOperand(cp, Context::GLOBAL_OBJECT_INDEX));
+ __ ldr(r2, FieldMemOperand(r2, GlobalObject::kGlobalReceiverOffset));
__ bind(&patch_receiver);
__ add(r3, sp, Operand(r0, LSL, kPointerSizeLog2));
@@ -1148,18 +1190,17 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
__ b(eq, &function);
// Expected number of arguments is 0 for CALL_NON_FUNCTION.
__ mov(r2, Operand::Zero());
- __ SetCallKind(r5, CALL_AS_METHOD);
__ cmp(r4, Operand(1));
__ b(ne, &non_proxy);
__ push(r1); // re-add proxy object as additional argument
__ add(r0, r0, Operand(1));
- __ GetBuiltinEntry(r3, Builtins::CALL_FUNCTION_PROXY);
+ __ GetBuiltinFunction(r1, Builtins::CALL_FUNCTION_PROXY);
__ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
RelocInfo::CODE_TARGET);
__ bind(&non_proxy);
- __ GetBuiltinEntry(r3, Builtins::CALL_NON_FUNCTION);
+ __ GetBuiltinFunction(r1, Builtins::CALL_NON_FUNCTION);
__ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
RelocInfo::CODE_TARGET);
__ bind(&function);
@@ -1174,16 +1215,14 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
__ ldr(r2,
FieldMemOperand(r3, SharedFunctionInfo::kFormalParameterCountOffset));
__ SmiUntag(r2);
- __ ldr(r3, FieldMemOperand(r1, JSFunction::kCodeEntryOffset));
- __ SetCallKind(r5, CALL_AS_METHOD);
__ cmp(r2, r0); // Check formal and actual parameter counts.
__ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
RelocInfo::CODE_TARGET,
ne);
+ __ ldr(r3, FieldMemOperand(r1, JSFunction::kCodeEntryOffset));
ParameterCount expected(0);
- __ InvokeCode(r3, expected, expected, JUMP_FUNCTION,
- NullCallWrapper(), CALL_AS_METHOD);
+ __ InvokeCode(r3, expected, expected, JUMP_FUNCTION, NullCallWrapper());
}
@@ -1197,7 +1236,7 @@ void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
const int kFunctionOffset = 4 * kPointerSize;
{
- FrameScope frame_scope(masm, StackFrame::INTERNAL);
+ FrameAndConstantPoolScope frame_scope(masm, StackFrame::INTERNAL);
__ ldr(r0, MemOperand(fp, kFunctionOffset)); // get the function
__ push(r0);
@@ -1220,7 +1259,7 @@ void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
// Out of stack space.
__ ldr(r1, MemOperand(fp, kFunctionOffset));
__ Push(r1, r0);
- __ InvokeBuiltin(Builtins::APPLY_OVERFLOW, CALL_FUNCTION);
+ __ InvokeBuiltin(Builtins::STACK_OVERFLOW, CALL_FUNCTION);
// End of stack check.
// Push current limit and index.
@@ -1255,7 +1294,7 @@ void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
__ tst(r2, Operand(1 << (SharedFunctionInfo::kNative + kSmiTagSize)));
__ b(ne, &push_receiver);
- // Compute the receiver in non-strict mode.
+ // Compute the receiver in sloppy mode.
__ JumpIfSmi(r0, &call_to_object);
__ LoadRoot(r1, Heap::kNullValueRootIndex);
__ cmp(r0, r1);
@@ -1277,13 +1316,8 @@ void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
__ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
__ b(&push_receiver);
- // Use the current global receiver object as the receiver.
__ bind(&use_global_receiver);
- const int kGlobalOffset =
- Context::kHeaderSize + Context::GLOBAL_OBJECT_INDEX * kPointerSize;
- __ ldr(r0, FieldMemOperand(cp, kGlobalOffset));
- __ ldr(r0, FieldMemOperand(r0, GlobalObject::kNativeContextOffset));
- __ ldr(r0, FieldMemOperand(r0, kGlobalOffset));
+ __ ldr(r0, ContextOperand(cp, Context::GLOBAL_OBJECT_INDEX));
__ ldr(r0, FieldMemOperand(r0, GlobalObject::kGlobalReceiverOffset));
// Push the receiver.
@@ -1319,27 +1353,25 @@ void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
__ cmp(r0, r1);
__ b(ne, &loop);
- // Invoke the function.
+ // Call the function.
Label call_proxy;
ParameterCount actual(r0);
__ SmiUntag(r0);
__ ldr(r1, MemOperand(fp, kFunctionOffset));
__ CompareObjectType(r1, r2, r2, JS_FUNCTION_TYPE);
__ b(ne, &call_proxy);
- __ InvokeFunction(r1, actual, CALL_FUNCTION,
- NullCallWrapper(), CALL_AS_METHOD);
+ __ InvokeFunction(r1, actual, CALL_FUNCTION, NullCallWrapper());
frame_scope.GenerateLeaveFrame();
__ add(sp, sp, Operand(3 * kPointerSize));
__ Jump(lr);
- // Invoke the function proxy.
+ // Call the function proxy.
__ bind(&call_proxy);
__ push(r1); // add function proxy as last argument
__ add(r0, r0, Operand(1));
__ mov(r2, Operand::Zero());
- __ SetCallKind(r5, CALL_AS_METHOD);
- __ GetBuiltinEntry(r3, Builtins::CALL_FUNCTION_PROXY);
+ __ GetBuiltinFunction(r1, Builtins::CALL_FUNCTION_PROXY);
__ Call(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
RelocInfo::CODE_TARGET);
@@ -1350,10 +1382,32 @@ void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
}
+static void ArgumentAdaptorStackCheck(MacroAssembler* masm,
+ Label* stack_overflow) {
+ // ----------- S t a t e -------------
+ // -- r0 : actual number of arguments
+ // -- r1 : function (passed through to callee)
+ // -- r2 : expected number of arguments
+ // -----------------------------------
+ // Check the stack for overflow. We are not trying to catch
+ // interruptions (e.g. debug break and preemption) here, so the "real stack
+ // limit" is checked.
+ __ LoadRoot(r5, Heap::kRealStackLimitRootIndex);
+ // Make r5 the space we have left. The stack might already be overflowed
+ // here which will cause r5 to become negative.
+ __ sub(r5, sp, r5);
+ // Check if the arguments will overflow the stack.
+ __ cmp(r5, Operand(r2, LSL, kPointerSizeLog2));
+ __ b(le, stack_overflow); // Signed comparison.
+}
+
+
static void EnterArgumentsAdaptorFrame(MacroAssembler* masm) {
__ SmiTag(r0);
__ mov(r4, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
- __ stm(db_w, sp, r0.bit() | r1.bit() | r4.bit() | fp.bit() | lr.bit());
+ __ stm(db_w, sp, r0.bit() | r1.bit() | r4.bit() |
+ (FLAG_enable_ool_constant_pool ? pp.bit() : 0) |
+ fp.bit() | lr.bit());
__ add(fp, sp,
Operand(StandardFrameConstants::kFixedFrameSizeFromFp + kPointerSize));
}
@@ -1367,8 +1421,14 @@ static void LeaveArgumentsAdaptorFrame(MacroAssembler* masm) {
// then tear down the parameters.
__ ldr(r1, MemOperand(fp, -(StandardFrameConstants::kFixedFrameSizeFromFp +
kPointerSize)));
- __ mov(sp, fp);
- __ ldm(ia_w, sp, fp.bit() | lr.bit());
+
+ if (FLAG_enable_ool_constant_pool) {
+ __ add(sp, fp, Operand(StandardFrameConstants::kConstantPoolOffset));
+ __ ldm(ia_w, sp, pp.bit() | fp.bit() | lr.bit());
+ } else {
+ __ mov(sp, fp);;
+ __ ldm(ia_w, sp, fp.bit() | lr.bit());
+ }
__ add(sp, sp, Operand::PointerOffsetFromSmiKey(r1));
__ add(sp, sp, Operand(kPointerSize)); // adjust for receiver
}
@@ -1379,13 +1439,14 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// -- r0 : actual number of arguments
// -- r1 : function (passed through to callee)
// -- r2 : expected number of arguments
- // -- r3 : code entry to call
- // -- r5 : call kind information
// -----------------------------------
+ Label stack_overflow;
+ ArgumentAdaptorStackCheck(masm, &stack_overflow);
Label invoke, dont_adapt_arguments;
Label enough, too_few;
+ __ ldr(r3, FieldMemOperand(r1, JSFunction::kCodeEntryOffset));
__ cmp(r0, r2);
__ b(lt, &too_few);
__ cmp(r2, Operand(SharedFunctionInfo::kDontAdaptArgumentsSentinel));
@@ -1481,6 +1542,14 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// -------------------------------------------
__ bind(&dont_adapt_arguments);
__ Jump(r3);
+
+ __ bind(&stack_overflow);
+ {
+ FrameScope frame(masm, StackFrame::MANUAL);
+ EnterArgumentsAdaptorFrame(masm);
+ __ InvokeBuiltin(Builtins::STACK_OVERFLOW, CALL_FUNCTION);
+ __ bkpt(0);
+ }
}
diff --git a/chromium/v8/src/arm/code-stubs-arm.cc b/chromium/v8/src/arm/code-stubs-arm.cc
index cc2dbdcdee0..033413ba775 100644
--- a/chromium/v8/src/arm/code-stubs-arm.cc
+++ b/chromium/v8/src/arm/code-stubs-arm.cc
@@ -1,56 +1,40 @@
// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
#if V8_TARGET_ARCH_ARM
-#include "bootstrapper.h"
-#include "code-stubs.h"
-#include "regexp-macro-assembler.h"
-#include "stub-cache.h"
+#include "src/bootstrapper.h"
+#include "src/code-stubs.h"
+#include "src/regexp-macro-assembler.h"
+#include "src/stub-cache.h"
namespace v8 {
namespace internal {
void FastNewClosureStub::InitializeInterfaceDescriptor(
- Isolate* isolate,
CodeStubInterfaceDescriptor* descriptor) {
static Register registers[] = { r2 };
descriptor->register_param_count_ = 1;
descriptor->register_params_ = registers;
descriptor->deoptimization_handler_ =
- Runtime::FunctionForId(Runtime::kNewClosureFromStubFailure)->entry;
+ Runtime::FunctionForId(Runtime::kHiddenNewClosureFromStubFailure)->entry;
+}
+
+
+void FastNewContextStub::InitializeInterfaceDescriptor(
+ CodeStubInterfaceDescriptor* descriptor) {
+ static Register registers[] = { r1 };
+ descriptor->register_param_count_ = 1;
+ descriptor->register_params_ = registers;
+ descriptor->deoptimization_handler_ = NULL;
}
void ToNumberStub::InitializeInterfaceDescriptor(
- Isolate* isolate,
CodeStubInterfaceDescriptor* descriptor) {
static Register registers[] = { r0 };
descriptor->register_param_count_ = 1;
@@ -60,50 +44,51 @@ void ToNumberStub::InitializeInterfaceDescriptor(
void NumberToStringStub::InitializeInterfaceDescriptor(
- Isolate* isolate,
CodeStubInterfaceDescriptor* descriptor) {
static Register registers[] = { r0 };
descriptor->register_param_count_ = 1;
descriptor->register_params_ = registers;
descriptor->deoptimization_handler_ =
- Runtime::FunctionForId(Runtime::kNumberToString)->entry;
+ Runtime::FunctionForId(Runtime::kHiddenNumberToString)->entry;
}
void FastCloneShallowArrayStub::InitializeInterfaceDescriptor(
- Isolate* isolate,
CodeStubInterfaceDescriptor* descriptor) {
static Register registers[] = { r3, r2, r1 };
descriptor->register_param_count_ = 3;
descriptor->register_params_ = registers;
+ static Representation representations[] = {
+ Representation::Tagged(),
+ Representation::Smi(),
+ Representation::Tagged() };
+ descriptor->register_param_representations_ = representations;
descriptor->deoptimization_handler_ =
- Runtime::FunctionForId(Runtime::kCreateArrayLiteralStubBailout)->entry;
+ Runtime::FunctionForId(
+ Runtime::kHiddenCreateArrayLiteralStubBailout)->entry;
}
void FastCloneShallowObjectStub::InitializeInterfaceDescriptor(
- Isolate* isolate,
CodeStubInterfaceDescriptor* descriptor) {
static Register registers[] = { r3, r2, r1, r0 };
descriptor->register_param_count_ = 4;
descriptor->register_params_ = registers;
descriptor->deoptimization_handler_ =
- Runtime::FunctionForId(Runtime::kCreateObjectLiteral)->entry;
+ Runtime::FunctionForId(Runtime::kHiddenCreateObjectLiteral)->entry;
}
void CreateAllocationSiteStub::InitializeInterfaceDescriptor(
- Isolate* isolate,
CodeStubInterfaceDescriptor* descriptor) {
- static Register registers[] = { r2 };
- descriptor->register_param_count_ = 1;
+ static Register registers[] = { r2, r3 };
+ descriptor->register_param_count_ = 2;
descriptor->register_params_ = registers;
descriptor->deoptimization_handler_ = NULL;
}
void KeyedLoadFastElementStub::InitializeInterfaceDescriptor(
- Isolate* isolate,
CodeStubInterfaceDescriptor* descriptor) {
static Register registers[] = { r1, r0 };
descriptor->register_param_count_ = 2;
@@ -114,7 +99,6 @@ void KeyedLoadFastElementStub::InitializeInterfaceDescriptor(
void KeyedLoadDictionaryElementStub::InitializeInterfaceDescriptor(
- Isolate* isolate,
CodeStubInterfaceDescriptor* descriptor) {
static Register registers[] = { r1, r0 };
descriptor->register_param_count_ = 2;
@@ -124,8 +108,27 @@ void KeyedLoadDictionaryElementStub::InitializeInterfaceDescriptor(
}
+void RegExpConstructResultStub::InitializeInterfaceDescriptor(
+ CodeStubInterfaceDescriptor* descriptor) {
+ static Register registers[] = { r2, r1, r0 };
+ descriptor->register_param_count_ = 3;
+ descriptor->register_params_ = registers;
+ descriptor->deoptimization_handler_ =
+ Runtime::FunctionForId(Runtime::kHiddenRegExpConstructResult)->entry;
+}
+
+
+void KeyedLoadGenericElementStub::InitializeInterfaceDescriptor(
+ CodeStubInterfaceDescriptor* descriptor) {
+ static Register registers[] = { r1, r0 };
+ descriptor->register_param_count_ = 2;
+ descriptor->register_params_ = registers;
+ descriptor->deoptimization_handler_ =
+ Runtime::FunctionForId(Runtime::kKeyedGetProperty)->entry;
+}
+
+
void LoadFieldStub::InitializeInterfaceDescriptor(
- Isolate* isolate,
CodeStubInterfaceDescriptor* descriptor) {
static Register registers[] = { r0 };
descriptor->register_param_count_ = 1;
@@ -135,7 +138,6 @@ void LoadFieldStub::InitializeInterfaceDescriptor(
void KeyedLoadFieldStub::InitializeInterfaceDescriptor(
- Isolate* isolate,
CodeStubInterfaceDescriptor* descriptor) {
static Register registers[] = { r1 };
descriptor->register_param_count_ = 1;
@@ -144,21 +146,25 @@ void KeyedLoadFieldStub::InitializeInterfaceDescriptor(
}
-void KeyedArrayCallStub::InitializeInterfaceDescriptor(
- Isolate* isolate,
+void StringLengthStub::InitializeInterfaceDescriptor(
CodeStubInterfaceDescriptor* descriptor) {
- static Register registers[] = { r2 };
- descriptor->register_param_count_ = 1;
+ static Register registers[] = { r0, r2 };
+ descriptor->register_param_count_ = 2;
descriptor->register_params_ = registers;
- descriptor->continuation_type_ = TAIL_CALL_CONTINUATION;
- descriptor->handler_arguments_mode_ = PASS_ARGUMENTS;
- descriptor->deoptimization_handler_ =
- FUNCTION_ADDR(KeyedCallIC_MissFromStubFailure);
+ descriptor->deoptimization_handler_ = NULL;
+}
+
+
+void KeyedStringLengthStub::InitializeInterfaceDescriptor(
+ CodeStubInterfaceDescriptor* descriptor) {
+ static Register registers[] = { r1, r0 };
+ descriptor->register_param_count_ = 2;
+ descriptor->register_params_ = registers;
+ descriptor->deoptimization_handler_ = NULL;
}
void KeyedStoreFastElementStub::InitializeInterfaceDescriptor(
- Isolate* isolate,
CodeStubInterfaceDescriptor* descriptor) {
static Register registers[] = { r2, r1, r0 };
descriptor->register_param_count_ = 3;
@@ -169,7 +175,6 @@ void KeyedStoreFastElementStub::InitializeInterfaceDescriptor(
void TransitionElementsKindStub::InitializeInterfaceDescriptor(
- Isolate* isolate,
CodeStubInterfaceDescriptor* descriptor) {
static Register registers[] = { r0, r1 };
descriptor->register_param_count_ = 2;
@@ -181,7 +186,6 @@ void TransitionElementsKindStub::InitializeInterfaceDescriptor(
void CompareNilICStub::InitializeInterfaceDescriptor(
- Isolate* isolate,
CodeStubInterfaceDescriptor* descriptor) {
static Register registers[] = { r0 };
descriptor->register_param_count_ = 1;
@@ -189,30 +193,17 @@ void CompareNilICStub::InitializeInterfaceDescriptor(
descriptor->deoptimization_handler_ =
FUNCTION_ADDR(CompareNilIC_Miss);
descriptor->SetMissHandler(
- ExternalReference(IC_Utility(IC::kCompareNilIC_Miss), isolate));
-}
-
-
-void BinaryOpICStub::InitializeInterfaceDescriptor(
- Isolate* isolate,
- CodeStubInterfaceDescriptor* descriptor) {
- static Register registers[] = { r1, r0 };
- descriptor->register_param_count_ = 2;
- descriptor->register_params_ = registers;
- descriptor->deoptimization_handler_ = FUNCTION_ADDR(BinaryOpIC_Miss);
- descriptor->SetMissHandler(
- ExternalReference(IC_Utility(IC::kBinaryOpIC_Miss), isolate));
+ ExternalReference(IC_Utility(IC::kCompareNilIC_Miss), isolate()));
}
static void InitializeArrayConstructorDescriptor(
- Isolate* isolate,
CodeStubInterfaceDescriptor* descriptor,
int constant_stack_parameter_count) {
// register state
// r0 -- number of arguments
// r1 -- function
- // r2 -- type info cell with elements kind
+ // r2 -- allocation site with elements kind
static Register registers_variable_args[] = { r1, r2, r0 };
static Register registers_no_args[] = { r1, r2 };
@@ -225,17 +216,21 @@ static void InitializeArrayConstructorDescriptor(
descriptor->stack_parameter_count_ = r0;
descriptor->register_param_count_ = 3;
descriptor->register_params_ = registers_variable_args;
+ static Representation representations[] = {
+ Representation::Tagged(),
+ Representation::Tagged(),
+ Representation::Integer32() };
+ descriptor->register_param_representations_ = representations;
}
descriptor->hint_stack_parameter_count_ = constant_stack_parameter_count;
descriptor->function_mode_ = JS_FUNCTION_STUB_MODE;
descriptor->deoptimization_handler_ =
- Runtime::FunctionForId(Runtime::kArrayConstructor)->entry;
+ Runtime::FunctionForId(Runtime::kHiddenArrayConstructor)->entry;
}
static void InitializeInternalArrayConstructorDescriptor(
- Isolate* isolate,
CodeStubInterfaceDescriptor* descriptor,
int constant_stack_parameter_count) {
// register state
@@ -253,38 +248,38 @@ static void InitializeInternalArrayConstructorDescriptor(
descriptor->stack_parameter_count_ = r0;
descriptor->register_param_count_ = 2;
descriptor->register_params_ = registers_variable_args;
+ static Representation representations[] = {
+ Representation::Tagged(),
+ Representation::Integer32() };
+ descriptor->register_param_representations_ = representations;
}
descriptor->hint_stack_parameter_count_ = constant_stack_parameter_count;
descriptor->function_mode_ = JS_FUNCTION_STUB_MODE;
descriptor->deoptimization_handler_ =
- Runtime::FunctionForId(Runtime::kInternalArrayConstructor)->entry;
+ Runtime::FunctionForId(Runtime::kHiddenInternalArrayConstructor)->entry;
}
void ArrayNoArgumentConstructorStub::InitializeInterfaceDescriptor(
- Isolate* isolate,
CodeStubInterfaceDescriptor* descriptor) {
- InitializeArrayConstructorDescriptor(isolate, descriptor, 0);
+ InitializeArrayConstructorDescriptor(descriptor, 0);
}
void ArraySingleArgumentConstructorStub::InitializeInterfaceDescriptor(
- Isolate* isolate,
CodeStubInterfaceDescriptor* descriptor) {
- InitializeArrayConstructorDescriptor(isolate, descriptor, 1);
+ InitializeArrayConstructorDescriptor(descriptor, 1);
}
void ArrayNArgumentsConstructorStub::InitializeInterfaceDescriptor(
- Isolate* isolate,
CodeStubInterfaceDescriptor* descriptor) {
- InitializeArrayConstructorDescriptor(isolate, descriptor, -1);
+ InitializeArrayConstructorDescriptor(descriptor, -1);
}
void ToBooleanStub::InitializeInterfaceDescriptor(
- Isolate* isolate,
CodeStubInterfaceDescriptor* descriptor) {
static Register registers[] = { r0 };
descriptor->register_param_count_ = 1;
@@ -292,33 +287,29 @@ void ToBooleanStub::InitializeInterfaceDescriptor(
descriptor->deoptimization_handler_ =
FUNCTION_ADDR(ToBooleanIC_Miss);
descriptor->SetMissHandler(
- ExternalReference(IC_Utility(IC::kToBooleanIC_Miss), isolate));
+ ExternalReference(IC_Utility(IC::kToBooleanIC_Miss), isolate()));
}
void InternalArrayNoArgumentConstructorStub::InitializeInterfaceDescriptor(
- Isolate* isolate,
CodeStubInterfaceDescriptor* descriptor) {
- InitializeInternalArrayConstructorDescriptor(isolate, descriptor, 0);
+ InitializeInternalArrayConstructorDescriptor(descriptor, 0);
}
void InternalArraySingleArgumentConstructorStub::InitializeInterfaceDescriptor(
- Isolate* isolate,
CodeStubInterfaceDescriptor* descriptor) {
- InitializeInternalArrayConstructorDescriptor(isolate, descriptor, 1);
+ InitializeInternalArrayConstructorDescriptor(descriptor, 1);
}
void InternalArrayNArgumentsConstructorStub::InitializeInterfaceDescriptor(
- Isolate* isolate,
CodeStubInterfaceDescriptor* descriptor) {
- InitializeInternalArrayConstructorDescriptor(isolate, descriptor, -1);
+ InitializeInternalArrayConstructorDescriptor(descriptor, -1);
}
void StoreGlobalStub::InitializeInterfaceDescriptor(
- Isolate* isolate,
CodeStubInterfaceDescriptor* descriptor) {
static Register registers[] = { r1, r2, r0 };
descriptor->register_param_count_ = 3;
@@ -329,7 +320,6 @@ void StoreGlobalStub::InitializeInterfaceDescriptor(
void ElementsTransitionAndStoreStub::InitializeInterfaceDescriptor(
- Isolate* isolate,
CodeStubInterfaceDescriptor* descriptor) {
static Register registers[] = { r0, r3, r1, r2 };
descriptor->register_param_count_ = 4;
@@ -339,14 +329,129 @@ void ElementsTransitionAndStoreStub::InitializeInterfaceDescriptor(
}
-void NewStringAddStub::InitializeInterfaceDescriptor(
- Isolate* isolate,
+void BinaryOpICStub::InitializeInterfaceDescriptor(
+ CodeStubInterfaceDescriptor* descriptor) {
+ static Register registers[] = { r1, r0 };
+ descriptor->register_param_count_ = 2;
+ descriptor->register_params_ = registers;
+ descriptor->deoptimization_handler_ = FUNCTION_ADDR(BinaryOpIC_Miss);
+ descriptor->SetMissHandler(
+ ExternalReference(IC_Utility(IC::kBinaryOpIC_Miss), isolate()));
+}
+
+
+void BinaryOpWithAllocationSiteStub::InitializeInterfaceDescriptor(
+ CodeStubInterfaceDescriptor* descriptor) {
+ static Register registers[] = { r2, r1, r0 };
+ descriptor->register_param_count_ = 3;
+ descriptor->register_params_ = registers;
+ descriptor->deoptimization_handler_ =
+ FUNCTION_ADDR(BinaryOpIC_MissWithAllocationSite);
+}
+
+
+void StringAddStub::InitializeInterfaceDescriptor(
CodeStubInterfaceDescriptor* descriptor) {
static Register registers[] = { r1, r0 };
descriptor->register_param_count_ = 2;
descriptor->register_params_ = registers;
descriptor->deoptimization_handler_ =
- Runtime::FunctionForId(Runtime::kStringAdd)->entry;
+ Runtime::FunctionForId(Runtime::kHiddenStringAdd)->entry;
+}
+
+
+void CallDescriptors::InitializeForIsolate(Isolate* isolate) {
+ static PlatformCallInterfaceDescriptor default_descriptor =
+ PlatformCallInterfaceDescriptor(CAN_INLINE_TARGET_ADDRESS);
+
+ static PlatformCallInterfaceDescriptor noInlineDescriptor =
+ PlatformCallInterfaceDescriptor(NEVER_INLINE_TARGET_ADDRESS);
+
+ {
+ CallInterfaceDescriptor* descriptor =
+ isolate->call_descriptor(Isolate::ArgumentAdaptorCall);
+ static Register registers[] = { r1, // JSFunction
+ cp, // context
+ r0, // actual number of arguments
+ r2, // expected number of arguments
+ };
+ static Representation representations[] = {
+ Representation::Tagged(), // JSFunction
+ Representation::Tagged(), // context
+ Representation::Integer32(), // actual number of arguments
+ Representation::Integer32(), // expected number of arguments
+ };
+ descriptor->register_param_count_ = 4;
+ descriptor->register_params_ = registers;
+ descriptor->param_representations_ = representations;
+ descriptor->platform_specific_descriptor_ = &default_descriptor;
+ }
+ {
+ CallInterfaceDescriptor* descriptor =
+ isolate->call_descriptor(Isolate::KeyedCall);
+ static Register registers[] = { cp, // context
+ r2, // key
+ };
+ static Representation representations[] = {
+ Representation::Tagged(), // context
+ Representation::Tagged(), // key
+ };
+ descriptor->register_param_count_ = 2;
+ descriptor->register_params_ = registers;
+ descriptor->param_representations_ = representations;
+ descriptor->platform_specific_descriptor_ = &noInlineDescriptor;
+ }
+ {
+ CallInterfaceDescriptor* descriptor =
+ isolate->call_descriptor(Isolate::NamedCall);
+ static Register registers[] = { cp, // context
+ r2, // name
+ };
+ static Representation representations[] = {
+ Representation::Tagged(), // context
+ Representation::Tagged(), // name
+ };
+ descriptor->register_param_count_ = 2;
+ descriptor->register_params_ = registers;
+ descriptor->param_representations_ = representations;
+ descriptor->platform_specific_descriptor_ = &noInlineDescriptor;
+ }
+ {
+ CallInterfaceDescriptor* descriptor =
+ isolate->call_descriptor(Isolate::CallHandler);
+ static Register registers[] = { cp, // context
+ r0, // receiver
+ };
+ static Representation representations[] = {
+ Representation::Tagged(), // context
+ Representation::Tagged(), // receiver
+ };
+ descriptor->register_param_count_ = 2;
+ descriptor->register_params_ = registers;
+ descriptor->param_representations_ = representations;
+ descriptor->platform_specific_descriptor_ = &default_descriptor;
+ }
+ {
+ CallInterfaceDescriptor* descriptor =
+ isolate->call_descriptor(Isolate::ApiFunctionCall);
+ static Register registers[] = { r0, // callee
+ r4, // call_data
+ r2, // holder
+ r1, // api_function_address
+ cp, // context
+ };
+ static Representation representations[] = {
+ Representation::Tagged(), // callee
+ Representation::Tagged(), // call_data
+ Representation::Tagged(), // holder
+ Representation::External(), // api_function_address
+ Representation::Tagged(), // context
+ };
+ descriptor->register_param_count_ = 5;
+ descriptor->register_params_ = registers;
+ descriptor->param_representations_ = representations;
+ descriptor->platform_specific_descriptor_ = &default_descriptor;
+ }
}
@@ -369,14 +474,13 @@ static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm,
void HydrogenCodeStub::GenerateLightweightMiss(MacroAssembler* masm) {
// Update the static counter each time a new code stub is generated.
- Isolate* isolate = masm->isolate();
- isolate->counters()->code_stubs()->Increment();
+ isolate()->counters()->code_stubs()->Increment();
- CodeStubInterfaceDescriptor* descriptor = GetInterfaceDescriptor(isolate);
+ CodeStubInterfaceDescriptor* descriptor = GetInterfaceDescriptor();
int param_count = descriptor->register_param_count_;
{
// Call the runtime system in a fresh internal frame.
- FrameScope scope(masm, StackFrame::INTERNAL);
+ FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
ASSERT(descriptor->register_param_count_ == 0 ||
r0.is(descriptor->register_params_[param_count - 1]));
// Push arguments
@@ -391,110 +495,6 @@ void HydrogenCodeStub::GenerateLightweightMiss(MacroAssembler* masm) {
}
-void FastNewContextStub::Generate(MacroAssembler* masm) {
- // Try to allocate the context in new space.
- Label gc;
- int length = slots_ + Context::MIN_CONTEXT_SLOTS;
-
- // Attempt to allocate the context in new space.
- __ Allocate(FixedArray::SizeFor(length), r0, r1, r2, &gc, TAG_OBJECT);
-
- // Load the function from the stack.
- __ ldr(r3, MemOperand(sp, 0));
-
- // Set up the object header.
- __ LoadRoot(r1, Heap::kFunctionContextMapRootIndex);
- __ mov(r2, Operand(Smi::FromInt(length)));
- __ str(r2, FieldMemOperand(r0, FixedArray::kLengthOffset));
- __ str(r1, FieldMemOperand(r0, HeapObject::kMapOffset));
-
- // Set up the fixed slots, copy the global object from the previous context.
- __ ldr(r2, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
- __ mov(r1, Operand(Smi::FromInt(0)));
- __ str(r3, MemOperand(r0, Context::SlotOffset(Context::CLOSURE_INDEX)));
- __ str(cp, MemOperand(r0, Context::SlotOffset(Context::PREVIOUS_INDEX)));
- __ str(r1, MemOperand(r0, Context::SlotOffset(Context::EXTENSION_INDEX)));
- __ str(r2, MemOperand(r0, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
-
- // Initialize the rest of the slots to undefined.
- __ LoadRoot(r1, Heap::kUndefinedValueRootIndex);
- for (int i = Context::MIN_CONTEXT_SLOTS; i < length; i++) {
- __ str(r1, MemOperand(r0, Context::SlotOffset(i)));
- }
-
- // Remove the on-stack argument and return.
- __ mov(cp, r0);
- __ pop();
- __ Ret();
-
- // Need to collect. Call into runtime system.
- __ bind(&gc);
- __ TailCallRuntime(Runtime::kNewFunctionContext, 1, 1);
-}
-
-
-void FastNewBlockContextStub::Generate(MacroAssembler* masm) {
- // Stack layout on entry:
- //
- // [sp]: function.
- // [sp + kPointerSize]: serialized scope info
-
- // Try to allocate the context in new space.
- Label gc;
- int length = slots_ + Context::MIN_CONTEXT_SLOTS;
- __ Allocate(FixedArray::SizeFor(length), r0, r1, r2, &gc, TAG_OBJECT);
-
- // Load the function from the stack.
- __ ldr(r3, MemOperand(sp, 0));
-
- // Load the serialized scope info from the stack.
- __ ldr(r1, MemOperand(sp, 1 * kPointerSize));
-
- // Set up the object header.
- __ LoadRoot(r2, Heap::kBlockContextMapRootIndex);
- __ str(r2, FieldMemOperand(r0, HeapObject::kMapOffset));
- __ mov(r2, Operand(Smi::FromInt(length)));
- __ str(r2, FieldMemOperand(r0, FixedArray::kLengthOffset));
-
- // If this block context is nested in the native context we get a smi
- // sentinel instead of a function. The block context should get the
- // canonical empty function of the native context as its closure which
- // we still have to look up.
- Label after_sentinel;
- __ JumpIfNotSmi(r3, &after_sentinel);
- if (FLAG_debug_code) {
- __ cmp(r3, Operand::Zero());
- __ Assert(eq, kExpected0AsASmiSentinel);
- }
- __ ldr(r3, GlobalObjectOperand());
- __ ldr(r3, FieldMemOperand(r3, GlobalObject::kNativeContextOffset));
- __ ldr(r3, ContextOperand(r3, Context::CLOSURE_INDEX));
- __ bind(&after_sentinel);
-
- // Set up the fixed slots, copy the global object from the previous context.
- __ ldr(r2, ContextOperand(cp, Context::GLOBAL_OBJECT_INDEX));
- __ str(r3, ContextOperand(r0, Context::CLOSURE_INDEX));
- __ str(cp, ContextOperand(r0, Context::PREVIOUS_INDEX));
- __ str(r1, ContextOperand(r0, Context::EXTENSION_INDEX));
- __ str(r2, ContextOperand(r0, Context::GLOBAL_OBJECT_INDEX));
-
- // Initialize the rest of the slots to the hole value.
- __ LoadRoot(r1, Heap::kTheHoleValueRootIndex);
- for (int i = 0; i < slots_; i++) {
- __ str(r1, ContextOperand(r0, i + Context::MIN_CONTEXT_SLOTS));
- }
-
- // Remove the on-stack argument and return.
- __ mov(cp, r0);
- __ add(sp, sp, Operand(2 * kPointerSize));
- __ Ret();
-
- // Need to collect. Call into runtime system.
- __ bind(&gc);
- __ TailCallRuntime(Runtime::kPushBlockContext, 2, 1);
-}
-
-
// Takes a Smi and converts to an IEEE 64 bit floating point value in two
// registers. The format is 1 sign bit, 11 exponent bits (biased 1023) and
// 52 fraction bits (20 in the first word, 32 in the second). Zeros is a
@@ -502,11 +502,13 @@ void FastNewBlockContextStub::Generate(MacroAssembler* masm) {
// stub so you don't have to set up the frame.
class ConvertToDoubleStub : public PlatformCodeStub {
public:
- ConvertToDoubleStub(Register result_reg_1,
+ ConvertToDoubleStub(Isolate* isolate,
+ Register result_reg_1,
Register result_reg_2,
Register source_reg,
Register scratch_reg)
- : result1_(result_reg_1),
+ : PlatformCodeStub(isolate),
+ result1_(result_reg_1),
result2_(result_reg_2),
source_(source_reg),
zeros_(scratch_reg) { }
@@ -592,20 +594,20 @@ void DoubleToIStub::Generate(MacroAssembler* masm) {
Label out_of_range, only_low, negate, done;
Register input_reg = source();
Register result_reg = destination();
+ ASSERT(is_truncating());
int double_offset = offset();
// Account for saved regs if input is sp.
- if (input_reg.is(sp)) double_offset += 2 * kPointerSize;
+ if (input_reg.is(sp)) double_offset += 3 * kPointerSize;
- // Immediate values for this stub fit in instructions, so it's safe to use ip.
- Register scratch = ip;
+ Register scratch = GetRegisterThatIsNotOneOf(input_reg, result_reg);
Register scratch_low =
GetRegisterThatIsNotOneOf(input_reg, result_reg, scratch);
Register scratch_high =
GetRegisterThatIsNotOneOf(input_reg, result_reg, scratch, scratch_low);
LowDwVfpRegister double_scratch = kScratchDoubleReg;
- __ Push(scratch_high, scratch_low);
+ __ Push(scratch_high, scratch_low, scratch);
if (!skip_fastpath()) {
// Load double input.
@@ -688,17 +690,17 @@ void DoubleToIStub::Generate(MacroAssembler* masm) {
__ bind(&done);
- __ Pop(scratch_high, scratch_low);
+ __ Pop(scratch_high, scratch_low, scratch);
__ Ret();
}
void WriteInt32ToHeapNumberStub::GenerateFixedRegStubsAheadOfTime(
Isolate* isolate) {
- WriteInt32ToHeapNumberStub stub1(r1, r0, r2);
- WriteInt32ToHeapNumberStub stub2(r2, r0, r3);
- stub1.GetCode(isolate);
- stub2.GetCode(isolate);
+ WriteInt32ToHeapNumberStub stub1(isolate, r1, r0, r2);
+ WriteInt32ToHeapNumberStub stub2(isolate, r2, r0, r3);
+ stub1.GetCode();
+ stub2.GetCode();
}
@@ -1093,7 +1095,6 @@ void ICCompareStub::GenerateGeneric(MacroAssembler* masm) {
__ bind(&both_loaded_as_doubles);
// The arguments have been converted to doubles and stored in d6 and d7, if
// VFP3 is supported, or in r0, r1, r2, and r3.
- Isolate* isolate = masm->isolate();
__ bind(&lhs_not_nan);
Label no_nan;
// ARMv7 VFP3 instructions to implement double precision comparison.
@@ -1156,7 +1157,8 @@ void ICCompareStub::GenerateGeneric(MacroAssembler* masm) {
__ JumpIfNonSmisNotBothSequentialAsciiStrings(lhs, rhs, r2, r3, &slow);
- __ IncrementCounter(isolate->counters()->string_compare_native(), 1, r2, r3);
+ __ IncrementCounter(isolate()->counters()->string_compare_native(), 1, r2,
+ r3);
if (cc == eq) {
StringCompareStub::GenerateFlatAsciiStringEquals(masm,
lhs,
@@ -1220,9 +1222,9 @@ void StoreBufferOverflowStub::Generate(MacroAssembler* masm) {
AllowExternalCallThatCantCauseGC scope(masm);
__ PrepareCallCFunction(argument_count, fp_argument_count, scratch);
- __ mov(r0, Operand(ExternalReference::isolate_address(masm->isolate())));
+ __ mov(r0, Operand(ExternalReference::isolate_address(isolate())));
__ CallCFunction(
- ExternalReference::store_buffer_overflow_function(masm->isolate()),
+ ExternalReference::store_buffer_overflow_function(isolate()),
argument_count);
if (save_doubles_ == kSaveFPRegs) {
__ RestoreFPRegs(sp, scratch);
@@ -1231,231 +1233,6 @@ void StoreBufferOverflowStub::Generate(MacroAssembler* masm) {
}
-void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
- // Untagged case: double input in d2, double result goes
- // into d2.
- // Tagged case: tagged input on top of stack and in r0,
- // tagged result (heap number) goes into r0.
-
- Label input_not_smi;
- Label loaded;
- Label calculate;
- Label invalid_cache;
- const Register scratch0 = r9;
- Register scratch1 = no_reg; // will be r4
- const Register cache_entry = r0;
- const bool tagged = (argument_type_ == TAGGED);
-
- if (tagged) {
- // Argument is a number and is on stack and in r0.
- // Load argument and check if it is a smi.
- __ JumpIfNotSmi(r0, &input_not_smi);
-
- // Input is a smi. Convert to double and load the low and high words
- // of the double into r2, r3.
- __ SmiToDouble(d7, r0);
- __ vmov(r2, r3, d7);
- __ b(&loaded);
-
- __ bind(&input_not_smi);
- // Check if input is a HeapNumber.
- __ CheckMap(r0,
- r1,
- Heap::kHeapNumberMapRootIndex,
- &calculate,
- DONT_DO_SMI_CHECK);
- // Input is a HeapNumber. Load it to a double register and store the
- // low and high words into r2, r3.
- __ vldr(d0, FieldMemOperand(r0, HeapNumber::kValueOffset));
- __ vmov(r2, r3, d0);
- } else {
- // Input is untagged double in d2. Output goes to d2.
- __ vmov(r2, r3, d2);
- }
- __ bind(&loaded);
- // r2 = low 32 bits of double value
- // r3 = high 32 bits of double value
- // Compute hash (the shifts are arithmetic):
- // h = (low ^ high); h ^= h >> 16; h ^= h >> 8; h = h & (cacheSize - 1);
- __ eor(r1, r2, Operand(r3));
- __ eor(r1, r1, Operand(r1, ASR, 16));
- __ eor(r1, r1, Operand(r1, ASR, 8));
- ASSERT(IsPowerOf2(TranscendentalCache::SubCache::kCacheSize));
- __ And(r1, r1, Operand(TranscendentalCache::SubCache::kCacheSize - 1));
-
- // r2 = low 32 bits of double value.
- // r3 = high 32 bits of double value.
- // r1 = TranscendentalCache::hash(double value).
- Isolate* isolate = masm->isolate();
- ExternalReference cache_array =
- ExternalReference::transcendental_cache_array_address(isolate);
- __ mov(cache_entry, Operand(cache_array));
- // cache_entry points to cache array.
- int cache_array_index
- = type_ * sizeof(isolate->transcendental_cache()->caches_[0]);
- __ ldr(cache_entry, MemOperand(cache_entry, cache_array_index));
- // r0 points to the cache for the type type_.
- // If NULL, the cache hasn't been initialized yet, so go through runtime.
- __ cmp(cache_entry, Operand::Zero());
- __ b(eq, &invalid_cache);
-
-#ifdef DEBUG
- // Check that the layout of cache elements match expectations.
- { TranscendentalCache::SubCache::Element test_elem[2];
- char* elem_start = reinterpret_cast<char*>(&test_elem[0]);
- char* elem2_start = reinterpret_cast<char*>(&test_elem[1]);
- char* elem_in0 = reinterpret_cast<char*>(&(test_elem[0].in[0]));
- char* elem_in1 = reinterpret_cast<char*>(&(test_elem[0].in[1]));
- char* elem_out = reinterpret_cast<char*>(&(test_elem[0].output));
- CHECK_EQ(12, elem2_start - elem_start); // Two uint_32's and a pointer.
- CHECK_EQ(0, elem_in0 - elem_start);
- CHECK_EQ(kIntSize, elem_in1 - elem_start);
- CHECK_EQ(2 * kIntSize, elem_out - elem_start);
- }
-#endif
-
- // Find the address of the r1'st entry in the cache, i.e., &r0[r1*12].
- __ add(r1, r1, Operand(r1, LSL, 1));
- __ add(cache_entry, cache_entry, Operand(r1, LSL, 2));
- // Check if cache matches: Double value is stored in uint32_t[2] array.
- __ ldm(ia, cache_entry, r4.bit() | r5.bit() | r6.bit());
- __ cmp(r2, r4);
- __ cmp(r3, r5, eq);
- __ b(ne, &calculate);
-
- scratch1 = r4; // Start of scratch1 range.
-
- // Cache hit. Load result, cleanup and return.
- Counters* counters = masm->isolate()->counters();
- __ IncrementCounter(
- counters->transcendental_cache_hit(), 1, scratch0, scratch1);
- if (tagged) {
- // Pop input value from stack and load result into r0.
- __ pop();
- __ mov(r0, Operand(r6));
- } else {
- // Load result into d2.
- __ vldr(d2, FieldMemOperand(r6, HeapNumber::kValueOffset));
- }
- __ Ret();
-
- __ bind(&calculate);
- __ IncrementCounter(
- counters->transcendental_cache_miss(), 1, scratch0, scratch1);
- if (tagged) {
- __ bind(&invalid_cache);
- ExternalReference runtime_function =
- ExternalReference(RuntimeFunction(), masm->isolate());
- __ TailCallExternalReference(runtime_function, 1, 1);
- } else {
- Label no_update;
- Label skip_cache;
-
- // Call C function to calculate the result and update the cache.
- // r0: precalculated cache entry address.
- // r2 and r3: parts of the double value.
- // Store r0, r2 and r3 on stack for later before calling C function.
- __ Push(r3, r2, cache_entry);
- GenerateCallCFunction(masm, scratch0);
- __ GetCFunctionDoubleResult(d2);
-
- // Try to update the cache. If we cannot allocate a
- // heap number, we return the result without updating.
- __ Pop(r3, r2, cache_entry);
- __ LoadRoot(r5, Heap::kHeapNumberMapRootIndex);
- __ AllocateHeapNumber(r6, scratch0, scratch1, r5, &no_update);
- __ vstr(d2, FieldMemOperand(r6, HeapNumber::kValueOffset));
- __ stm(ia, cache_entry, r2.bit() | r3.bit() | r6.bit());
- __ Ret();
-
- __ bind(&invalid_cache);
- // The cache is invalid. Call runtime which will recreate the
- // cache.
- __ LoadRoot(r5, Heap::kHeapNumberMapRootIndex);
- __ AllocateHeapNumber(r0, scratch0, scratch1, r5, &skip_cache);
- __ vstr(d2, FieldMemOperand(r0, HeapNumber::kValueOffset));
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ push(r0);
- __ CallRuntime(RuntimeFunction(), 1);
- }
- __ vldr(d2, FieldMemOperand(r0, HeapNumber::kValueOffset));
- __ Ret();
-
- __ bind(&skip_cache);
- // Call C function to calculate the result and answer directly
- // without updating the cache.
- GenerateCallCFunction(masm, scratch0);
- __ GetCFunctionDoubleResult(d2);
- __ bind(&no_update);
-
- // We return the value in d2 without adding it to the cache, but
- // we cause a scavenging GC so that future allocations will succeed.
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
-
- // Allocate an aligned object larger than a HeapNumber.
- ASSERT(4 * kPointerSize >= HeapNumber::kSize);
- __ mov(scratch0, Operand(4 * kPointerSize));
- __ push(scratch0);
- __ CallRuntimeSaveDoubles(Runtime::kAllocateInNewSpace);
- }
- __ Ret();
- }
-}
-
-
-void TranscendentalCacheStub::GenerateCallCFunction(MacroAssembler* masm,
- Register scratch) {
- Isolate* isolate = masm->isolate();
-
- __ push(lr);
- __ PrepareCallCFunction(0, 1, scratch);
- if (masm->use_eabi_hardfloat()) {
- __ vmov(d0, d2);
- } else {
- __ vmov(r0, r1, d2);
- }
- AllowExternalCallThatCantCauseGC scope(masm);
- switch (type_) {
- case TranscendentalCache::SIN:
- __ CallCFunction(ExternalReference::math_sin_double_function(isolate),
- 0, 1);
- break;
- case TranscendentalCache::COS:
- __ CallCFunction(ExternalReference::math_cos_double_function(isolate),
- 0, 1);
- break;
- case TranscendentalCache::TAN:
- __ CallCFunction(ExternalReference::math_tan_double_function(isolate),
- 0, 1);
- break;
- case TranscendentalCache::LOG:
- __ CallCFunction(ExternalReference::math_log_double_function(isolate),
- 0, 1);
- break;
- default:
- UNIMPLEMENTED();
- break;
- }
- __ pop(lr);
-}
-
-
-Runtime::FunctionId TranscendentalCacheStub::RuntimeFunction() {
- switch (type_) {
- // Add more cases when necessary.
- case TranscendentalCache::SIN: return Runtime::kMath_sin;
- case TranscendentalCache::COS: return Runtime::kMath_cos;
- case TranscendentalCache::TAN: return Runtime::kMath_tan;
- case TranscendentalCache::LOG: return Runtime::kMath_log;
- default:
- UNIMPLEMENTED();
- return Runtime::kAbort;
- }
-}
-
-
void MathPowStub::Generate(MacroAssembler* masm) {
const Register base = r1;
const Register exponent = r2;
@@ -1565,13 +1342,13 @@ void MathPowStub::Generate(MacroAssembler* masm) {
{
AllowExternalCallThatCantCauseGC scope(masm);
__ PrepareCallCFunction(0, 2, scratch);
- __ SetCallCDoubleArguments(double_base, double_exponent);
+ __ MovToFloatParameters(double_base, double_exponent);
__ CallCFunction(
- ExternalReference::power_double_double_function(masm->isolate()),
+ ExternalReference::power_double_double_function(isolate()),
0, 2);
}
__ pop(lr);
- __ GetCFunctionDoubleResult(double_result);
+ __ MovFromFloatResult(double_result);
__ jmp(&done);
__ bind(&int_exponent_convert);
@@ -1618,11 +1395,11 @@ void MathPowStub::Generate(MacroAssembler* masm) {
__ vcvt_f64_s32(double_exponent, single_scratch);
// Returning or bailing out.
- Counters* counters = masm->isolate()->counters();
+ Counters* counters = isolate()->counters();
if (exponent_type_ == ON_STACK) {
// The arguments are still on the stack.
__ bind(&call_runtime);
- __ TailCallRuntime(Runtime::kMath_pow_cfunction, 2, 1);
+ __ TailCallRuntime(Runtime::kHiddenMathPow, 2, 1);
// The stub is called from non-optimized code, which expects the result
// as heap number in exponent.
@@ -1639,13 +1416,13 @@ void MathPowStub::Generate(MacroAssembler* masm) {
{
AllowExternalCallThatCantCauseGC scope(masm);
__ PrepareCallCFunction(0, 2, scratch);
- __ SetCallCDoubleArguments(double_base, double_exponent);
+ __ MovToFloatParameters(double_base, double_exponent);
__ CallCFunction(
- ExternalReference::power_double_double_function(masm->isolate()),
+ ExternalReference::power_double_double_function(isolate()),
0, 2);
}
__ pop(lr);
- __ GetCFunctionDoubleResult(double_result);
+ __ MovFromFloatResult(double_result);
__ bind(&done);
__ IncrementCounter(counters->math_pow(), 1, scratch, scratch2);
@@ -1667,79 +1444,63 @@ void CodeStub::GenerateStubsAheadOfTime(Isolate* isolate) {
ArrayConstructorStubBase::GenerateStubsAheadOfTime(isolate);
CreateAllocationSiteStub::GenerateAheadOfTime(isolate);
BinaryOpICStub::GenerateAheadOfTime(isolate);
+ BinaryOpICWithAllocationSiteStub::GenerateAheadOfTime(isolate);
}
void CodeStub::GenerateFPStubs(Isolate* isolate) {
SaveFPRegsMode mode = kSaveFPRegs;
- CEntryStub save_doubles(1, mode);
- StoreBufferOverflowStub stub(mode);
+ CEntryStub save_doubles(isolate, 1, mode);
+ StoreBufferOverflowStub stub(isolate, mode);
// These stubs might already be in the snapshot, detect that and don't
// regenerate, which would lead to code stub initialization state being messed
// up.
Code* save_doubles_code;
- if (!save_doubles.FindCodeInCache(&save_doubles_code, isolate)) {
- save_doubles_code = *save_doubles.GetCode(isolate);
+ if (!save_doubles.FindCodeInCache(&save_doubles_code)) {
+ save_doubles_code = *save_doubles.GetCode();
}
Code* store_buffer_overflow_code;
- if (!stub.FindCodeInCache(&store_buffer_overflow_code, isolate)) {
- store_buffer_overflow_code = *stub.GetCode(isolate);
+ if (!stub.FindCodeInCache(&store_buffer_overflow_code)) {
+ store_buffer_overflow_code = *stub.GetCode();
}
isolate->set_fp_stubs_generated(true);
}
void CEntryStub::GenerateAheadOfTime(Isolate* isolate) {
- CEntryStub stub(1, kDontSaveFPRegs);
- stub.GetCode(isolate);
+ CEntryStub stub(isolate, 1, kDontSaveFPRegs);
+ stub.GetCode();
}
-static void JumpIfOOM(MacroAssembler* masm,
- Register value,
- Register scratch,
- Label* oom_label) {
- STATIC_ASSERT(Failure::OUT_OF_MEMORY_EXCEPTION == 3);
- STATIC_ASSERT(kFailureTag == 3);
- __ and_(scratch, value, Operand(0xf));
- __ cmp(scratch, Operand(0xf));
- __ b(eq, oom_label);
-}
+void CEntryStub::Generate(MacroAssembler* masm) {
+ // Called from JavaScript; parameters are on stack as if calling JS function.
+ // r0: number of arguments including receiver
+ // r1: pointer to builtin function
+ // fp: frame pointer (restored after C call)
+ // sp: stack pointer (restored as callee's sp after C call)
+ // cp: current context (C callee-saved)
+ ProfileEntryHookStub::MaybeCallEntryHook(masm);
-void CEntryStub::GenerateCore(MacroAssembler* masm,
- Label* throw_normal_exception,
- Label* throw_termination_exception,
- Label* throw_out_of_memory_exception,
- bool do_gc,
- bool always_allocate) {
- // r0: result parameter for PerformGC, if any
- // r4: number of arguments including receiver (C callee-saved)
- // r5: pointer to builtin function (C callee-saved)
- // r6: pointer to the first argument (C callee-saved)
- Isolate* isolate = masm->isolate();
-
- if (do_gc) {
- // Passing r0.
- __ PrepareCallCFunction(2, 0, r1);
- __ mov(r1, Operand(ExternalReference::isolate_address(masm->isolate())));
- __ CallCFunction(ExternalReference::perform_gc_function(isolate),
- 2, 0);
- }
+ __ mov(r5, Operand(r1));
- ExternalReference scope_depth =
- ExternalReference::heap_always_allocate_scope_depth(isolate);
- if (always_allocate) {
- __ mov(r0, Operand(scope_depth));
- __ ldr(r1, MemOperand(r0));
- __ add(r1, r1, Operand(1));
- __ str(r1, MemOperand(r0));
- }
+ // Compute the argv pointer in a callee-saved register.
+ __ add(r1, sp, Operand(r0, LSL, kPointerSizeLog2));
+ __ sub(r1, r1, Operand(kPointerSize));
- // Call C built-in.
- // r0 = argc, r1 = argv
- __ mov(r0, Operand(r4));
- __ mov(r1, Operand(r6));
+ // Enter the exit frame that transitions from JavaScript to C++.
+ FrameScope scope(masm, StackFrame::MANUAL);
+ __ EnterExitFrame(save_doubles_);
+
+ // Store a copy of argc in callee-saved registers for later.
+ __ mov(r4, Operand(r0));
+
+ // r0, r4: number of arguments including receiver (C callee-saved)
+ // r1: pointer to the first argument (C callee-saved)
+ // r5: pointer to builtin function (C callee-saved)
+
+ // Result returned in r0 or r0+r1 by default.
#if V8_HOST_ARCH_ARM
int frame_alignment = MacroAssembler::ActivationFrameAlignment();
@@ -1757,7 +1518,9 @@ void CEntryStub::GenerateCore(MacroAssembler* masm,
}
#endif
- __ mov(r2, Operand(ExternalReference::isolate_address(isolate)));
+ // Call C built-in.
+ // r0 = argc, r1 = argv
+ __ mov(r2, Operand(ExternalReference::isolate_address(isolate())));
// To let the GC traverse the return address of the exit frames, we need to
// know where the return address is. The CEntryStub is unmovable, so
@@ -1769,162 +1532,74 @@ void CEntryStub::GenerateCore(MacroAssembler* masm,
{
// Prevent literal pool emission before return address.
Assembler::BlockConstPoolScope block_const_pool(masm);
- masm->add(lr, pc, Operand(4));
+ __ add(lr, pc, Operand(4));
__ str(lr, MemOperand(sp, 0));
- masm->Jump(r5);
+ __ Call(r5);
}
__ VFPEnsureFPSCRState(r2);
- if (always_allocate) {
- // It's okay to clobber r2 and r3 here. Don't mess with r0 and r1
- // though (contain the result).
- __ mov(r2, Operand(scope_depth));
- __ ldr(r3, MemOperand(r2));
- __ sub(r3, r3, Operand(1));
- __ str(r3, MemOperand(r2));
+ // Runtime functions should not return 'the hole'. Allowing it to escape may
+ // lead to crashes in the IC code later.
+ if (FLAG_debug_code) {
+ Label okay;
+ __ CompareRoot(r0, Heap::kTheHoleValueRootIndex);
+ __ b(ne, &okay);
+ __ stop("The hole escaped");
+ __ bind(&okay);
}
- // check for failure result
- Label failure_returned;
- STATIC_ASSERT(((kFailureTag + 1) & kFailureTagMask) == 0);
- // Lower 2 bits of r2 are 0 iff r0 has failure tag.
- __ add(r2, r0, Operand(1));
- __ tst(r2, Operand(kFailureTagMask));
- __ b(eq, &failure_returned);
+ // Check result for exception sentinel.
+ Label exception_returned;
+ __ CompareRoot(r0, Heap::kExceptionRootIndex);
+ __ b(eq, &exception_returned);
+
+ ExternalReference pending_exception_address(
+ Isolate::kPendingExceptionAddress, isolate());
+
+ // Check that there is no pending exception, otherwise we
+ // should have returned the exception sentinel.
+ if (FLAG_debug_code) {
+ Label okay;
+ __ mov(r2, Operand(pending_exception_address));
+ __ ldr(r2, MemOperand(r2));
+ __ CompareRoot(r2, Heap::kTheHoleValueRootIndex);
+ // Cannot use check here as it attempts to generate call into runtime.
+ __ b(eq, &okay);
+ __ stop("Unexpected pending exception");
+ __ bind(&okay);
+ }
// Exit C frame and return.
// r0:r1: result
// sp: stack pointer
// fp: frame pointer
- // Callee-saved register r4 still holds argc.
+ // Callee-saved register r4 still holds argc.
__ LeaveExitFrame(save_doubles_, r4, true);
__ mov(pc, lr);
- // check if we should retry or throw exception
- Label retry;
- __ bind(&failure_returned);
- STATIC_ASSERT(Failure::RETRY_AFTER_GC == 0);
- __ tst(r0, Operand(((1 << kFailureTypeTagSize) - 1) << kFailureTagSize));
- __ b(eq, &retry);
-
- // Special handling of out of memory exceptions.
- JumpIfOOM(masm, r0, ip, throw_out_of_memory_exception);
+ // Handling of exception.
+ __ bind(&exception_returned);
// Retrieve the pending exception.
- __ mov(ip, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
- isolate)));
- __ ldr(r0, MemOperand(ip));
-
- // See if we just retrieved an OOM exception.
- JumpIfOOM(masm, r0, ip, throw_out_of_memory_exception);
+ __ mov(r2, Operand(pending_exception_address));
+ __ ldr(r0, MemOperand(r2));
// Clear the pending exception.
- __ mov(r3, Operand(isolate->factory()->the_hole_value()));
- __ mov(ip, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
- isolate)));
- __ str(r3, MemOperand(ip));
+ __ LoadRoot(r3, Heap::kTheHoleValueRootIndex);
+ __ str(r3, MemOperand(r2));
// Special handling of termination exceptions which are uncatchable
// by javascript code.
- __ cmp(r0, Operand(isolate->factory()->termination_exception()));
- __ b(eq, throw_termination_exception);
+ Label throw_termination_exception;
+ __ CompareRoot(r0, Heap::kTerminationExceptionRootIndex);
+ __ b(eq, &throw_termination_exception);
// Handle normal exception.
- __ jmp(throw_normal_exception);
-
- __ bind(&retry); // pass last failure (r0) as parameter (r0) when retrying
-}
-
-
-void CEntryStub::Generate(MacroAssembler* masm) {
- // Called from JavaScript; parameters are on stack as if calling JS function
- // r0: number of arguments including receiver
- // r1: pointer to builtin function
- // fp: frame pointer (restored after C call)
- // sp: stack pointer (restored as callee's sp after C call)
- // cp: current context (C callee-saved)
-
- ProfileEntryHookStub::MaybeCallEntryHook(masm);
-
- // Result returned in r0 or r0+r1 by default.
-
- // NOTE: Invocations of builtins may return failure objects
- // instead of a proper result. The builtin entry handles
- // this by performing a garbage collection and retrying the
- // builtin once.
-
- // Compute the argv pointer in a callee-saved register.
- __ add(r6, sp, Operand(r0, LSL, kPointerSizeLog2));
- __ sub(r6, r6, Operand(kPointerSize));
-
- // Enter the exit frame that transitions from JavaScript to C++.
- FrameScope scope(masm, StackFrame::MANUAL);
- __ EnterExitFrame(save_doubles_);
-
- // Set up argc and the builtin function in callee-saved registers.
- __ mov(r4, Operand(r0));
- __ mov(r5, Operand(r1));
-
- // r4: number of arguments (C callee-saved)
- // r5: pointer to builtin function (C callee-saved)
- // r6: pointer to first argument (C callee-saved)
-
- Label throw_normal_exception;
- Label throw_termination_exception;
- Label throw_out_of_memory_exception;
-
- // Call into the runtime system.
- GenerateCore(masm,
- &throw_normal_exception,
- &throw_termination_exception,
- &throw_out_of_memory_exception,
- false,
- false);
-
- // Do space-specific GC and retry runtime call.
- GenerateCore(masm,
- &throw_normal_exception,
- &throw_termination_exception,
- &throw_out_of_memory_exception,
- true,
- false);
-
- // Do full GC and retry runtime call one final time.
- Failure* failure = Failure::InternalError();
- __ mov(r0, Operand(reinterpret_cast<int32_t>(failure)));
- GenerateCore(masm,
- &throw_normal_exception,
- &throw_termination_exception,
- &throw_out_of_memory_exception,
- true,
- true);
-
- __ bind(&throw_out_of_memory_exception);
- // Set external caught exception to false.
- Isolate* isolate = masm->isolate();
- ExternalReference external_caught(Isolate::kExternalCaughtExceptionAddress,
- isolate);
- __ mov(r0, Operand(false, RelocInfo::NONE32));
- __ mov(r2, Operand(external_caught));
- __ str(r0, MemOperand(r2));
-
- // Set pending exception and r0 to out of memory exception.
- Label already_have_failure;
- JumpIfOOM(masm, r0, ip, &already_have_failure);
- Failure* out_of_memory = Failure::OutOfMemoryException(0x1);
- __ mov(r0, Operand(reinterpret_cast<int32_t>(out_of_memory)));
- __ bind(&already_have_failure);
- __ mov(r2, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
- isolate)));
- __ str(r0, MemOperand(r2));
- // Fall through to the next label.
+ __ Throw(r0);
__ bind(&throw_termination_exception);
__ ThrowUncatchable(r0);
-
- __ bind(&throw_normal_exception);
- __ Throw(r0);
}
@@ -1967,22 +1642,26 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
// r2: receiver
// r3: argc
// r4: argv
- Isolate* isolate = masm->isolate();
int marker = is_construct ? StackFrame::ENTRY_CONSTRUCT : StackFrame::ENTRY;
- __ mov(r8, Operand(Smi::FromInt(marker)));
+ if (FLAG_enable_ool_constant_pool) {
+ __ mov(r8, Operand(isolate()->factory()->empty_constant_pool_array()));
+ }
+ __ mov(r7, Operand(Smi::FromInt(marker)));
__ mov(r6, Operand(Smi::FromInt(marker)));
__ mov(r5,
- Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate)));
+ Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate())));
__ ldr(r5, MemOperand(r5));
__ mov(ip, Operand(-1)); // Push a bad frame pointer to fail if it is used.
- __ Push(ip, r8, r6, r5);
+ __ stm(db_w, sp, r5.bit() | r6.bit() | r7.bit() |
+ (FLAG_enable_ool_constant_pool ? r8.bit() : 0) |
+ ip.bit());
// Set up frame pointer for the frame to be pushed.
__ add(fp, sp, Operand(-EntryFrameConstants::kCallerFPOffset));
// If this is the outermost JS call, set js_entry_sp value.
Label non_outermost_js;
- ExternalReference js_entry_sp(Isolate::kJSEntrySPAddress, isolate);
+ ExternalReference js_entry_sp(Isolate::kJSEntrySPAddress, isolate());
__ mov(r5, Operand(ExternalReference(js_entry_sp)));
__ ldr(r6, MemOperand(r5));
__ cmp(r6, Operand::Zero());
@@ -2012,10 +1691,10 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
// fp will be invalid because the PushTryHandler below sets it to 0 to
// signal the existence of the JSEntry frame.
__ mov(ip, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
- isolate)));
+ isolate())));
}
__ str(r0, MemOperand(ip));
- __ mov(r0, Operand(reinterpret_cast<int32_t>(Failure::Exception())));
+ __ LoadRoot(r0, Heap::kExceptionRootIndex);
__ b(&exit);
// Invoke: Link this frame into the handler chain. There's only one
@@ -2029,9 +1708,9 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
// saved values before returning a failure to C.
// Clear any pending exceptions.
- __ mov(r5, Operand(isolate->factory()->the_hole_value()));
+ __ mov(r5, Operand(isolate()->factory()->the_hole_value()));
__ mov(ip, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
- isolate)));
+ isolate())));
__ str(r5, MemOperand(ip));
// Invoke the function by calling through JS entry trampoline builtin.
@@ -2046,23 +1725,17 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
// r4: argv
if (is_construct) {
ExternalReference construct_entry(Builtins::kJSConstructEntryTrampoline,
- isolate);
+ isolate());
__ mov(ip, Operand(construct_entry));
} else {
- ExternalReference entry(Builtins::kJSEntryTrampoline, isolate);
+ ExternalReference entry(Builtins::kJSEntryTrampoline, isolate());
__ mov(ip, Operand(entry));
}
__ ldr(ip, MemOperand(ip)); // deref address
+ __ add(ip, ip, Operand(Code::kHeaderSize - kHeapObjectTag));
- // Branch and link to JSEntryTrampoline. We don't use the double underscore
- // macro for the add instruction because we don't want the coverage tool
- // inserting instructions here after we read the pc. We block literal pool
- // emission for the same reason.
- {
- Assembler::BlockConstPoolScope block_const_pool(masm);
- __ mov(lr, Operand(pc));
- masm->add(pc, ip, Operand(Code::kHeaderSize - kHeapObjectTag));
- }
+ // Branch and link to JSEntryTrampoline.
+ __ Call(ip);
// Unlink this frame from the handler chain.
__ PopTryHandler();
@@ -2081,7 +1754,7 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
// Restore the top frame descriptors from the stack.
__ pop(r3);
__ mov(ip,
- Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate)));
+ Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate())));
__ str(r3, MemOperand(ip));
// Reset the stack to the callee saved registers.
@@ -2107,8 +1780,7 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
// * function: r1 or at sp.
//
// An inlined call site may have been generated before calling this stub.
-// In this case the offset to the inline site to patch is passed on the stack,
-// in the safepoint slot for register r4.
+// In this case the offset to the inline site to patch is passed in r5.
// (See LCodeGen::DoInstanceOfKnownGlobal)
void InstanceofStub::Generate(MacroAssembler* masm) {
// Call site inlining and patching implies arguments in registers.
@@ -2167,14 +1839,14 @@ void InstanceofStub::Generate(MacroAssembler* masm) {
ASSERT(HasArgsInRegisters());
// Patch the (relocated) inlined map check.
- // The offset was stored in r4 safepoint slot.
- // (See LCodeGen::DoDeferredLInstanceOfKnownGlobal)
- __ LoadFromSafepointRegisterSlot(scratch, r4);
- __ sub(inline_site, lr, scratch);
- // Get the map location in scratch and patch it.
- __ GetRelocatedValueLocation(inline_site, scratch);
- __ ldr(scratch, MemOperand(scratch));
- __ str(map, FieldMemOperand(scratch, Cell::kValueOffset));
+ // The offset was stored in r5
+ // (See LCodeGen::DoDeferredLInstanceOfKnownGlobal).
+ const Register offset = r5;
+ __ sub(inline_site, lr, offset);
+ // Get the map location in r5 and patch it.
+ __ GetRelocatedValueLocation(inline_site, offset);
+ __ ldr(offset, MemOperand(offset));
+ __ str(map, FieldMemOperand(offset, Cell::kValueOffset));
}
// Register mapping: r3 is object map and r4 is function prototype.
@@ -2241,7 +1913,7 @@ void InstanceofStub::Generate(MacroAssembler* masm) {
__ b(ne, &slow);
// Null is not instance of anything.
- __ cmp(scratch, Operand(masm->isolate()->factory()->null_value()));
+ __ cmp(scratch, Operand(isolate()->factory()->null_value()));
__ b(ne, &object_not_null);
__ mov(r0, Operand(Smi::FromInt(1)));
__ Ret(HasArgsInRegisters() ? 0 : 2);
@@ -2267,7 +1939,7 @@ void InstanceofStub::Generate(MacroAssembler* masm) {
__ InvokeBuiltin(Builtins::INSTANCE_OF, JUMP_FUNCTION);
} else {
{
- FrameScope scope(masm, StackFrame::INTERNAL);
+ FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
__ Push(r0, r1);
__ InvokeBuiltin(Builtins::INSTANCE_OF, CALL_FUNCTION);
}
@@ -2288,7 +1960,7 @@ void FunctionPrototypeStub::Generate(MacroAssembler* masm) {
// -- r0 : key
// -- r1 : receiver
// -----------------------------------
- __ cmp(r0, Operand(masm->isolate()->factory()->prototype_string()));
+ __ cmp(r0, Operand(isolate()->factory()->prototype_string()));
__ b(ne, &miss);
receiver = r1;
} else {
@@ -2309,108 +1981,6 @@ void FunctionPrototypeStub::Generate(MacroAssembler* masm) {
}
-void StringLengthStub::Generate(MacroAssembler* masm) {
- Label miss;
- Register receiver;
- if (kind() == Code::KEYED_LOAD_IC) {
- // ----------- S t a t e -------------
- // -- lr : return address
- // -- r0 : key
- // -- r1 : receiver
- // -----------------------------------
- __ cmp(r0, Operand(masm->isolate()->factory()->length_string()));
- __ b(ne, &miss);
- receiver = r1;
- } else {
- ASSERT(kind() == Code::LOAD_IC);
- // ----------- S t a t e -------------
- // -- r2 : name
- // -- lr : return address
- // -- r0 : receiver
- // -- sp[0] : receiver
- // -----------------------------------
- receiver = r0;
- }
-
- StubCompiler::GenerateLoadStringLength(masm, receiver, r3, r4, &miss);
-
- __ bind(&miss);
- StubCompiler::TailCallBuiltin(
- masm, BaseLoadStoreStubCompiler::MissBuiltin(kind()));
-}
-
-
-void StoreArrayLengthStub::Generate(MacroAssembler* masm) {
- // This accepts as a receiver anything JSArray::SetElementsLength accepts
- // (currently anything except for external arrays which means anything with
- // elements of FixedArray type). Value must be a number, but only smis are
- // accepted as the most common case.
- Label miss;
-
- Register receiver;
- Register value;
- if (kind() == Code::KEYED_STORE_IC) {
- // ----------- S t a t e -------------
- // -- lr : return address
- // -- r0 : value
- // -- r1 : key
- // -- r2 : receiver
- // -----------------------------------
- __ cmp(r1, Operand(masm->isolate()->factory()->length_string()));
- __ b(ne, &miss);
- receiver = r2;
- value = r0;
- } else {
- ASSERT(kind() == Code::STORE_IC);
- // ----------- S t a t e -------------
- // -- lr : return address
- // -- r0 : value
- // -- r1 : receiver
- // -- r2 : key
- // -----------------------------------
- receiver = r1;
- value = r0;
- }
- Register scratch = r3;
-
- // Check that the receiver isn't a smi.
- __ JumpIfSmi(receiver, &miss);
-
- // Check that the object is a JS array.
- __ CompareObjectType(receiver, scratch, scratch, JS_ARRAY_TYPE);
- __ b(ne, &miss);
-
- // Check that elements are FixedArray.
- // We rely on StoreIC_ArrayLength below to deal with all types of
- // fast elements (including COW).
- __ ldr(scratch, FieldMemOperand(receiver, JSArray::kElementsOffset));
- __ CompareObjectType(scratch, scratch, scratch, FIXED_ARRAY_TYPE);
- __ b(ne, &miss);
-
- // Check that the array has fast properties, otherwise the length
- // property might have been redefined.
- __ ldr(scratch, FieldMemOperand(receiver, JSArray::kPropertiesOffset));
- __ ldr(scratch, FieldMemOperand(scratch, FixedArray::kMapOffset));
- __ CompareRoot(scratch, Heap::kHashTableMapRootIndex);
- __ b(eq, &miss);
-
- // Check that value is a smi.
- __ JumpIfNotSmi(value, &miss);
-
- // Prepare tail call to StoreIC_ArrayLength.
- __ Push(receiver, value);
-
- ExternalReference ref =
- ExternalReference(IC_Utility(IC::kStoreIC_ArrayLength), masm->isolate());
- __ TailCallExternalReference(ref, 2, 1);
-
- __ bind(&miss);
-
- StubCompiler::TailCallBuiltin(
- masm, BaseLoadStoreStubCompiler::MissBuiltin(kind()));
-}
-
-
Register InstanceofStub::left() { return r0; }
@@ -2468,7 +2038,7 @@ void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
}
-void ArgumentsAccessStub::GenerateNewNonStrictSlow(MacroAssembler* masm) {
+void ArgumentsAccessStub::GenerateNewSloppySlow(MacroAssembler* masm) {
// sp[0] : number of parameters
// sp[4] : receiver displacement
// sp[8] : function
@@ -2488,11 +2058,11 @@ void ArgumentsAccessStub::GenerateNewNonStrictSlow(MacroAssembler* masm) {
__ str(r3, MemOperand(sp, 1 * kPointerSize));
__ bind(&runtime);
- __ TailCallRuntime(Runtime::kNewArgumentsFast, 3, 1);
+ __ TailCallRuntime(Runtime::kHiddenNewSloppyArguments, 3, 1);
}
-void ArgumentsAccessStub::GenerateNewNonStrictFast(MacroAssembler* masm) {
+void ArgumentsAccessStub::GenerateNewSloppyFast(MacroAssembler* masm) {
// Stack layout:
// sp[0] : number of parameters (tagged)
// sp[4] : address of receiver argument
@@ -2546,7 +2116,7 @@ void ArgumentsAccessStub::GenerateNewNonStrictFast(MacroAssembler* masm) {
__ add(r9, r9, Operand(FixedArray::kHeaderSize));
// 3. Arguments object.
- __ add(r9, r9, Operand(Heap::kArgumentsObjectSize));
+ __ add(r9, r9, Operand(Heap::kSloppyArgumentsObjectSize));
// Do the allocation of all three objects in one go.
__ Allocate(r9, r0, r3, r4, &runtime, TAG_OBJECT);
@@ -2555,7 +2125,7 @@ void ArgumentsAccessStub::GenerateNewNonStrictFast(MacroAssembler* masm) {
// r2 = argument count (tagged)
// Get the arguments boilerplate from the current native context into r4.
const int kNormalOffset =
- Context::SlotOffset(Context::ARGUMENTS_BOILERPLATE_INDEX);
+ Context::SlotOffset(Context::SLOPPY_ARGUMENTS_BOILERPLATE_INDEX);
const int kAliasedOffset =
Context::SlotOffset(Context::ALIASED_ARGUMENTS_BOILERPLATE_INDEX);
@@ -2591,7 +2161,7 @@ void ArgumentsAccessStub::GenerateNewNonStrictFast(MacroAssembler* masm) {
// Set up the elements pointer in the allocated arguments object.
// If we allocated a parameter map, r4 will point there, otherwise
// it will point to the backing store.
- __ add(r4, r0, Operand(Heap::kArgumentsObjectSize));
+ __ add(r4, r0, Operand(Heap::kSloppyArgumentsObjectSize));
__ str(r4, FieldMemOperand(r0, JSObject::kElementsOffset));
// r0 = address of new object (tagged)
@@ -2606,7 +2176,7 @@ void ArgumentsAccessStub::GenerateNewNonStrictFast(MacroAssembler* masm) {
__ mov(r3, r4, LeaveCC, eq);
__ b(eq, &skip_parameter_map);
- __ LoadRoot(r6, Heap::kNonStrictArgumentsElementsMapRootIndex);
+ __ LoadRoot(r6, Heap::kSloppyArgumentsElementsMapRootIndex);
__ str(r6, FieldMemOperand(r4, FixedArray::kMapOffset));
__ add(r6, r1, Operand(Smi::FromInt(2)));
__ str(r6, FieldMemOperand(r4, FixedArray::kLengthOffset));
@@ -2636,7 +2206,7 @@ void ArgumentsAccessStub::GenerateNewNonStrictFast(MacroAssembler* masm) {
// r1 = mapping index (tagged)
// r3 = address of backing store (tagged)
// r4 = address of parameter map (tagged), which is also the address of new
- // object + Heap::kArgumentsObjectSize (tagged)
+ // object + Heap::kSloppyArgumentsObjectSize (tagged)
// r0 = temporary scratch (a.o., for address calculation)
// r5 = the hole value
__ jmp(&parameters_test);
@@ -2654,7 +2224,7 @@ void ArgumentsAccessStub::GenerateNewNonStrictFast(MacroAssembler* masm) {
__ b(ne, &parameters_loop);
// Restore r0 = new object (tagged)
- __ sub(r0, r4, Operand(Heap::kArgumentsObjectSize));
+ __ sub(r0, r4, Operand(Heap::kSloppyArgumentsObjectSize));
__ bind(&skip_parameter_map);
// r0 = address of new object (tagged)
@@ -2692,7 +2262,7 @@ void ArgumentsAccessStub::GenerateNewNonStrictFast(MacroAssembler* masm) {
// r2 = argument count (tagged)
__ bind(&runtime);
__ str(r2, MemOperand(sp, 0 * kPointerSize)); // Patch argument count.
- __ TailCallRuntime(Runtime::kNewArgumentsFast, 3, 1);
+ __ TailCallRuntime(Runtime::kHiddenNewSloppyArguments, 3, 1);
}
@@ -2727,7 +2297,7 @@ void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
__ b(eq, &add_arguments_object);
__ add(r1, r1, Operand(FixedArray::kHeaderSize / kPointerSize));
__ bind(&add_arguments_object);
- __ add(r1, r1, Operand(Heap::kArgumentsObjectSizeStrict / kPointerSize));
+ __ add(r1, r1, Operand(Heap::kStrictArgumentsObjectSize / kPointerSize));
// Do the allocation of both objects in one go.
__ Allocate(r1, r0, r2, r3, &runtime,
@@ -2737,7 +2307,7 @@ void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
__ ldr(r4, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
__ ldr(r4, FieldMemOperand(r4, GlobalObject::kNativeContextOffset));
__ ldr(r4, MemOperand(r4, Context::SlotOffset(
- Context::STRICT_MODE_ARGUMENTS_BOILERPLATE_INDEX)));
+ Context::STRICT_ARGUMENTS_BOILERPLATE_INDEX)));
// Copy the JS object part.
__ CopyFields(r0, r4, d0, JSObject::kHeaderSize / kPointerSize);
@@ -2758,7 +2328,7 @@ void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
// Set up the elements pointer in the allocated arguments object and
// initialize the header in the elements fixed array.
- __ add(r4, r0, Operand(Heap::kArgumentsObjectSizeStrict));
+ __ add(r4, r0, Operand(Heap::kStrictArgumentsObjectSize));
__ str(r4, FieldMemOperand(r0, JSObject::kElementsOffset));
__ LoadRoot(r3, Heap::kFixedArrayMapRootIndex);
__ str(r3, FieldMemOperand(r4, FixedArray::kMapOffset));
@@ -2786,7 +2356,7 @@ void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
// Do the runtime call to allocate the arguments object.
__ bind(&runtime);
- __ TailCallRuntime(Runtime::kNewStrictArgumentsFast, 3, 1);
+ __ TailCallRuntime(Runtime::kHiddenNewStrictArguments, 3, 1);
}
@@ -2795,7 +2365,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// time or if regexp entry in generated code is turned off runtime switch or
// at compilation.
#ifdef V8_INTERPRETED_REGEXP
- __ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
+ __ TailCallRuntime(Runtime::kHiddenRegExpExec, 4, 1);
#else // V8_INTERPRETED_REGEXP
// Stack frame on entry.
@@ -2820,11 +2390,10 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
Register last_match_info_elements = no_reg; // will be r6;
// Ensure that a RegExp stack is allocated.
- Isolate* isolate = masm->isolate();
ExternalReference address_of_regexp_stack_memory_address =
- ExternalReference::address_of_regexp_stack_memory_address(isolate);
+ ExternalReference::address_of_regexp_stack_memory_address(isolate());
ExternalReference address_of_regexp_stack_memory_size =
- ExternalReference::address_of_regexp_stack_memory_size(isolate);
+ ExternalReference::address_of_regexp_stack_memory_size(isolate());
__ mov(r0, Operand(address_of_regexp_stack_memory_size));
__ ldr(r0, MemOperand(r0, 0));
__ cmp(r0, Operand::Zero());
@@ -2928,8 +2497,8 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
STATIC_ASSERT(kSeqStringTag == 0);
__ tst(r0, Operand(kStringRepresentationMask));
// The underlying external string is never a short external string.
- STATIC_CHECK(ExternalString::kMaxShortLength < ConsString::kMinLength);
- STATIC_CHECK(ExternalString::kMaxShortLength < SlicedString::kMinLength);
+ STATIC_ASSERT(ExternalString::kMaxShortLength < ConsString::kMinLength);
+ STATIC_ASSERT(ExternalString::kMaxShortLength < SlicedString::kMinLength);
__ b(ne, &external_string); // Go to (7).
// (5) Sequential string. Load regexp code according to encoding.
@@ -2966,7 +2535,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// subject: Subject string
// regexp_data: RegExp data (FixedArray)
// All checks done. Now push arguments for native regexp code.
- __ IncrementCounter(isolate->counters()->regexp_entry_native(), 1, r0, r2);
+ __ IncrementCounter(isolate()->counters()->regexp_entry_native(), 1, r0, r2);
// Isolates: note we add an additional parameter here (isolate pointer).
const int kRegExpExecuteArguments = 9;
@@ -2977,7 +2546,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// Arguments are before that on the stack or in registers.
// Argument 9 (sp[20]): Pass current isolate address.
- __ mov(r0, Operand(ExternalReference::isolate_address(isolate)));
+ __ mov(r0, Operand(ExternalReference::isolate_address(isolate())));
__ str(r0, MemOperand(sp, 5 * kPointerSize));
// Argument 8 (sp[16]): Indicate that this is a direct call from JavaScript.
@@ -2999,7 +2568,8 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// Argument 5 (sp[4]): static offsets vector buffer.
__ mov(r0,
- Operand(ExternalReference::address_of_static_offsets_vector(isolate)));
+ Operand(ExternalReference::address_of_static_offsets_vector(
+ isolate())));
__ str(r0, MemOperand(sp, 1 * kPointerSize));
// For arguments 4 and 3 get string length, calculate start of string data and
@@ -3030,7 +2600,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// Locate the code entry and call it.
__ add(r6, r6, Operand(Code::kHeaderSize - kHeapObjectTag));
- DirectCEntryStub stub;
+ DirectCEntryStub stub(isolate());
stub.GenerateCall(masm, r6);
__ LeaveExitFrame(false, no_reg, true);
@@ -3057,9 +2627,9 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// stack overflow (on the backtrack stack) was detected in RegExp code but
// haven't created the exception yet. Handle that in the runtime system.
// TODO(592): Rerunning the RegExp to get the stack overflow exception.
- __ mov(r1, Operand(isolate->factory()->the_hole_value()));
+ __ mov(r1, Operand(isolate()->factory()->the_hole_value()));
__ mov(r2, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
- isolate)));
+ isolate())));
__ ldr(r0, MemOperand(r2, 0));
__ cmp(r0, r1);
__ b(eq, &runtime);
@@ -3079,7 +2649,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
__ bind(&failure);
// For failure and exception return null.
- __ mov(r0, Operand(masm->isolate()->factory()->null_value()));
+ __ mov(r0, Operand(isolate()->factory()->null_value()));
__ add(sp, sp, Operand(4 * kPointerSize));
__ Ret();
@@ -3141,7 +2711,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// Get the static offsets vector filled by the native regexp code.
ExternalReference address_of_static_offsets_vector =
- ExternalReference::address_of_static_offsets_vector(isolate);
+ ExternalReference::address_of_static_offsets_vector(isolate());
__ mov(r2, Operand(address_of_static_offsets_vector));
// r1: number of capture registers
@@ -3170,7 +2740,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// Do the runtime call to execute the regexp.
__ bind(&runtime);
- __ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
+ __ TailCallRuntime(Runtime::kHiddenRegExpExec, 4, 1);
// Deferred code for string handling.
// (6) Not a long external string? If yes, go to (8).
@@ -3213,290 +2783,255 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
}
-void RegExpConstructResultStub::Generate(MacroAssembler* masm) {
- const int kMaxInlineLength = 100;
- Label slowcase;
- Label done;
- Factory* factory = masm->isolate()->factory();
-
- __ ldr(r1, MemOperand(sp, kPointerSize * 2));
- STATIC_ASSERT(kSmiTag == 0);
- STATIC_ASSERT(kSmiTagSize == 1);
- __ JumpIfNotSmi(r1, &slowcase);
- __ cmp(r1, Operand(Smi::FromInt(kMaxInlineLength)));
- __ b(hi, &slowcase);
- // Smi-tagging is equivalent to multiplying by 2.
- // Allocate RegExpResult followed by FixedArray with size in ebx.
- // JSArray: [Map][empty properties][Elements][Length-smi][index][input]
- // Elements: [Map][Length][..elements..]
- // Size of JSArray with two in-object properties and the header of a
- // FixedArray.
- int objects_size =
- (JSRegExpResult::kSize + FixedArray::kHeaderSize) / kPointerSize;
- __ SmiUntag(r5, r1);
- __ add(r2, r5, Operand(objects_size));
- __ Allocate(
- r2, // In: Size, in words.
- r0, // Out: Start of allocation (tagged).
- r3, // Scratch register.
- r4, // Scratch register.
- &slowcase,
- static_cast<AllocationFlags>(TAG_OBJECT | SIZE_IN_WORDS));
- // r0: Start of allocated area, object-tagged.
- // r1: Number of elements in array, as smi.
- // r5: Number of elements, untagged.
-
- // Set JSArray map to global.regexp_result_map().
- // Set empty properties FixedArray.
- // Set elements to point to FixedArray allocated right after the JSArray.
- // Interleave operations for better latency.
- __ ldr(r2, ContextOperand(cp, Context::GLOBAL_OBJECT_INDEX));
- __ add(r3, r0, Operand(JSRegExpResult::kSize));
- __ mov(r4, Operand(factory->empty_fixed_array()));
- __ ldr(r2, FieldMemOperand(r2, GlobalObject::kNativeContextOffset));
- __ str(r3, FieldMemOperand(r0, JSObject::kElementsOffset));
- __ ldr(r2, ContextOperand(r2, Context::REGEXP_RESULT_MAP_INDEX));
- __ str(r4, FieldMemOperand(r0, JSObject::kPropertiesOffset));
- __ str(r2, FieldMemOperand(r0, HeapObject::kMapOffset));
-
- // Set input, index and length fields from arguments.
- __ ldr(r1, MemOperand(sp, kPointerSize * 0));
- __ ldr(r2, MemOperand(sp, kPointerSize * 1));
- __ ldr(r6, MemOperand(sp, kPointerSize * 2));
- __ str(r1, FieldMemOperand(r0, JSRegExpResult::kInputOffset));
- __ str(r2, FieldMemOperand(r0, JSRegExpResult::kIndexOffset));
- __ str(r6, FieldMemOperand(r0, JSArray::kLengthOffset));
-
- // Fill out the elements FixedArray.
- // r0: JSArray, tagged.
- // r3: FixedArray, tagged.
- // r5: Number of elements in array, untagged.
-
- // Set map.
- __ mov(r2, Operand(factory->fixed_array_map()));
- __ str(r2, FieldMemOperand(r3, HeapObject::kMapOffset));
- // Set FixedArray length.
- __ SmiTag(r6, r5);
- __ str(r6, FieldMemOperand(r3, FixedArray::kLengthOffset));
- // Fill contents of fixed-array with undefined.
- __ LoadRoot(r2, Heap::kUndefinedValueRootIndex);
- __ add(r3, r3, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
- // Fill fixed array elements with undefined.
- // r0: JSArray, tagged.
- // r2: undefined.
- // r3: Start of elements in FixedArray.
- // r5: Number of elements to fill.
- Label loop;
- __ cmp(r5, Operand::Zero());
- __ bind(&loop);
- __ b(le, &done); // Jump if r5 is negative or zero.
- __ sub(r5, r5, Operand(1), SetCC);
- __ str(r2, MemOperand(r3, r5, LSL, kPointerSizeLog2));
- __ jmp(&loop);
-
- __ bind(&done);
- __ add(sp, sp, Operand(3 * kPointerSize));
- __ Ret();
-
- __ bind(&slowcase);
- __ TailCallRuntime(Runtime::kRegExpConstructResult, 3, 1);
-}
-
-
static void GenerateRecordCallTarget(MacroAssembler* masm) {
- // Cache the called function in a global property cell. Cache states
+ // Cache the called function in a feedback vector slot. Cache states
// are uninitialized, monomorphic (indicated by a JSFunction), and
// megamorphic.
// r0 : number of arguments to the construct function
// r1 : the function to call
- // r2 : cache cell for call target
+ // r2 : Feedback vector
+ // r3 : slot in feedback vector (Smi)
Label initialize, done, miss, megamorphic, not_array_function;
- ASSERT_EQ(*TypeFeedbackCells::MegamorphicSentinel(masm->isolate()),
- masm->isolate()->heap()->undefined_value());
- ASSERT_EQ(*TypeFeedbackCells::UninitializedSentinel(masm->isolate()),
- masm->isolate()->heap()->the_hole_value());
+ ASSERT_EQ(*TypeFeedbackInfo::MegamorphicSentinel(masm->isolate()),
+ masm->isolate()->heap()->megamorphic_symbol());
+ ASSERT_EQ(*TypeFeedbackInfo::UninitializedSentinel(masm->isolate()),
+ masm->isolate()->heap()->uninitialized_symbol());
- // Load the cache state into r3.
- __ ldr(r3, FieldMemOperand(r2, Cell::kValueOffset));
+ // Load the cache state into r4.
+ __ add(r4, r2, Operand::PointerOffsetFromSmiKey(r3));
+ __ ldr(r4, FieldMemOperand(r4, FixedArray::kHeaderSize));
// A monomorphic cache hit or an already megamorphic state: invoke the
// function without changing the state.
- __ cmp(r3, r1);
+ __ cmp(r4, r1);
__ b(eq, &done);
- // If we came here, we need to see if we are the array function.
- // If we didn't have a matching function, and we didn't find the megamorph
- // sentinel, then we have in the cell either some other function or an
- // AllocationSite. Do a map check on the object in ecx.
- __ ldr(r5, FieldMemOperand(r3, 0));
- __ CompareRoot(r5, Heap::kAllocationSiteMapRootIndex);
- __ b(ne, &miss);
+ if (!FLAG_pretenuring_call_new) {
+ // If we came here, we need to see if we are the array function.
+ // If we didn't have a matching function, and we didn't find the megamorph
+ // sentinel, then we have in the slot either some other function or an
+ // AllocationSite. Do a map check on the object in ecx.
+ __ ldr(r5, FieldMemOperand(r4, 0));
+ __ CompareRoot(r5, Heap::kAllocationSiteMapRootIndex);
+ __ b(ne, &miss);
- // Make sure the function is the Array() function
- __ LoadArrayFunction(r3);
- __ cmp(r1, r3);
- __ b(ne, &megamorphic);
- __ jmp(&done);
+ // Make sure the function is the Array() function
+ __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, r4);
+ __ cmp(r1, r4);
+ __ b(ne, &megamorphic);
+ __ jmp(&done);
+ }
__ bind(&miss);
// A monomorphic miss (i.e, here the cache is not uninitialized) goes
// megamorphic.
- __ CompareRoot(r3, Heap::kTheHoleValueRootIndex);
+ __ CompareRoot(r4, Heap::kUninitializedSymbolRootIndex);
__ b(eq, &initialize);
// MegamorphicSentinel is an immortal immovable object (undefined) so no
// write-barrier is needed.
__ bind(&megamorphic);
- __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
- __ str(ip, FieldMemOperand(r2, Cell::kValueOffset));
+ __ add(r4, r2, Operand::PointerOffsetFromSmiKey(r3));
+ __ LoadRoot(ip, Heap::kMegamorphicSymbolRootIndex);
+ __ str(ip, FieldMemOperand(r4, FixedArray::kHeaderSize));
__ jmp(&done);
- // An uninitialized cache is patched with the function or sentinel to
- // indicate the ElementsKind if function is the Array constructor.
+ // An uninitialized cache is patched with the function
__ bind(&initialize);
- // Make sure the function is the Array() function
- __ LoadArrayFunction(r3);
- __ cmp(r1, r3);
- __ b(ne, &not_array_function);
- // The target function is the Array constructor,
- // Create an AllocationSite if we don't already have it, store it in the cell
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
+ if (!FLAG_pretenuring_call_new) {
+ // Make sure the function is the Array() function
+ __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, r4);
+ __ cmp(r1, r4);
+ __ b(ne, &not_array_function);
- // Arguments register must be smi-tagged to call out.
- __ SmiTag(r0);
- __ Push(r2, r1, r0);
+ // The target function is the Array constructor,
+ // Create an AllocationSite if we don't already have it, store it in the
+ // slot.
+ {
+ FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
+
+ // Arguments register must be smi-tagged to call out.
+ __ SmiTag(r0);
+ __ Push(r3, r2, r1, r0);
- CreateAllocationSiteStub create_stub;
- __ CallStub(&create_stub);
+ CreateAllocationSiteStub create_stub(masm->isolate());
+ __ CallStub(&create_stub);
+
+ __ Pop(r3, r2, r1, r0);
+ __ SmiUntag(r0);
+ }
+ __ b(&done);
- __ Pop(r2, r1, r0);
- __ SmiUntag(r0);
+ __ bind(&not_array_function);
}
- __ b(&done);
- __ bind(&not_array_function);
- __ str(r1, FieldMemOperand(r2, Cell::kValueOffset));
- // No need for a write barrier here - cells are rescanned.
+ __ add(r4, r2, Operand::PointerOffsetFromSmiKey(r3));
+ __ add(r4, r4, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+ __ str(r1, MemOperand(r4, 0));
+
+ __ Push(r4, r2, r1);
+ __ RecordWrite(r2, r4, r1, kLRHasNotBeenSaved, kDontSaveFPRegs,
+ EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
+ __ Pop(r4, r2, r1);
__ bind(&done);
}
-void CallFunctionStub::Generate(MacroAssembler* masm) {
- // r1 : the function to call
- // r2 : cache cell for call target
- Label slow, non_function;
-
- // The receiver might implicitly be the global object. This is
- // indicated by passing the hole as the receiver to the call
- // function stub.
- if (ReceiverMightBeImplicit()) {
- Label call;
- // Get the receiver from the stack.
- // function, receiver [, arguments]
- __ ldr(r4, MemOperand(sp, argc_ * kPointerSize));
- // Call as function is indicated with the hole.
- __ CompareRoot(r4, Heap::kTheHoleValueRootIndex);
- __ b(ne, &call);
- // Patch the receiver on the stack with the global receiver object.
- __ ldr(r3,
- MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
- __ ldr(r3, FieldMemOperand(r3, GlobalObject::kGlobalReceiverOffset));
- __ str(r3, MemOperand(sp, argc_ * kPointerSize));
- __ bind(&call);
- }
+static void EmitContinueIfStrictOrNative(MacroAssembler* masm, Label* cont) {
+ // Do not transform the receiver for strict mode functions.
+ __ ldr(r3, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
+ __ ldr(r4, FieldMemOperand(r3, SharedFunctionInfo::kCompilerHintsOffset));
+ __ tst(r4, Operand(1 << (SharedFunctionInfo::kStrictModeFunction +
+ kSmiTagSize)));
+ __ b(ne, cont);
- // Check that the function is really a JavaScript function.
- // r1: pushed function (to be verified)
- __ JumpIfSmi(r1, &non_function);
- // Get the map of the function object.
- __ CompareObjectType(r1, r3, r3, JS_FUNCTION_TYPE);
- __ b(ne, &slow);
-
- if (RecordCallTarget()) {
- GenerateRecordCallTarget(masm);
- }
+ // Do not transform the receiver for native (Compilerhints already in r3).
+ __ tst(r4, Operand(1 << (SharedFunctionInfo::kNative + kSmiTagSize)));
+ __ b(ne, cont);
+}
- // Fast-case: Invoke the function now.
- // r1: pushed function
- ParameterCount actual(argc_);
-
- if (ReceiverMightBeImplicit()) {
- Label call_as_function;
- __ CompareRoot(r4, Heap::kTheHoleValueRootIndex);
- __ b(eq, &call_as_function);
- __ InvokeFunction(r1,
- actual,
- JUMP_FUNCTION,
- NullCallWrapper(),
- CALL_AS_METHOD);
- __ bind(&call_as_function);
- }
- __ InvokeFunction(r1,
- actual,
- JUMP_FUNCTION,
- NullCallWrapper(),
- CALL_AS_FUNCTION);
- // Slow-case: Non-function called.
- __ bind(&slow);
- if (RecordCallTarget()) {
- // If there is a call target cache, mark it megamorphic in the
- // non-function case. MegamorphicSentinel is an immortal immovable
- // object (undefined) so no write barrier is needed.
- ASSERT_EQ(*TypeFeedbackCells::MegamorphicSentinel(masm->isolate()),
- masm->isolate()->heap()->undefined_value());
- __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
- __ str(ip, FieldMemOperand(r2, Cell::kValueOffset));
- }
+static void EmitSlowCase(MacroAssembler* masm,
+ int argc,
+ Label* non_function) {
// Check for function proxy.
- __ cmp(r3, Operand(JS_FUNCTION_PROXY_TYPE));
- __ b(ne, &non_function);
+ __ cmp(r4, Operand(JS_FUNCTION_PROXY_TYPE));
+ __ b(ne, non_function);
__ push(r1); // put proxy as additional argument
- __ mov(r0, Operand(argc_ + 1, RelocInfo::NONE32));
+ __ mov(r0, Operand(argc + 1, RelocInfo::NONE32));
__ mov(r2, Operand::Zero());
- __ GetBuiltinEntry(r3, Builtins::CALL_FUNCTION_PROXY);
- __ SetCallKind(r5, CALL_AS_METHOD);
+ __ GetBuiltinFunction(r1, Builtins::CALL_FUNCTION_PROXY);
{
Handle<Code> adaptor =
- masm->isolate()->builtins()->ArgumentsAdaptorTrampoline();
+ masm->isolate()->builtins()->ArgumentsAdaptorTrampoline();
__ Jump(adaptor, RelocInfo::CODE_TARGET);
}
// CALL_NON_FUNCTION expects the non-function callee as receiver (instead
// of the original receiver from the call site).
- __ bind(&non_function);
- __ str(r1, MemOperand(sp, argc_ * kPointerSize));
- __ mov(r0, Operand(argc_)); // Set up the number of arguments.
+ __ bind(non_function);
+ __ str(r1, MemOperand(sp, argc * kPointerSize));
+ __ mov(r0, Operand(argc)); // Set up the number of arguments.
__ mov(r2, Operand::Zero());
- __ GetBuiltinEntry(r3, Builtins::CALL_NON_FUNCTION);
- __ SetCallKind(r5, CALL_AS_METHOD);
+ __ GetBuiltinFunction(r1, Builtins::CALL_NON_FUNCTION);
__ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
RelocInfo::CODE_TARGET);
}
+static void EmitWrapCase(MacroAssembler* masm, int argc, Label* cont) {
+ // Wrap the receiver and patch it back onto the stack.
+ { FrameAndConstantPoolScope frame_scope(masm, StackFrame::INTERNAL);
+ __ Push(r1, r3);
+ __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
+ __ pop(r1);
+ }
+ __ str(r0, MemOperand(sp, argc * kPointerSize));
+ __ jmp(cont);
+}
+
+
+static void CallFunctionNoFeedback(MacroAssembler* masm,
+ int argc, bool needs_checks,
+ bool call_as_method) {
+ // r1 : the function to call
+ Label slow, non_function, wrap, cont;
+
+ if (needs_checks) {
+ // Check that the function is really a JavaScript function.
+ // r1: pushed function (to be verified)
+ __ JumpIfSmi(r1, &non_function);
+
+ // Goto slow case if we do not have a function.
+ __ CompareObjectType(r1, r4, r4, JS_FUNCTION_TYPE);
+ __ b(ne, &slow);
+ }
+
+ // Fast-case: Invoke the function now.
+ // r1: pushed function
+ ParameterCount actual(argc);
+
+ if (call_as_method) {
+ if (needs_checks) {
+ EmitContinueIfStrictOrNative(masm, &cont);
+ }
+
+ // Compute the receiver in sloppy mode.
+ __ ldr(r3, MemOperand(sp, argc * kPointerSize));
+
+ if (needs_checks) {
+ __ JumpIfSmi(r3, &wrap);
+ __ CompareObjectType(r3, r4, r4, FIRST_SPEC_OBJECT_TYPE);
+ __ b(lt, &wrap);
+ } else {
+ __ jmp(&wrap);
+ }
+
+ __ bind(&cont);
+ }
+
+ __ InvokeFunction(r1, actual, JUMP_FUNCTION, NullCallWrapper());
+
+ if (needs_checks) {
+ // Slow-case: Non-function called.
+ __ bind(&slow);
+ EmitSlowCase(masm, argc, &non_function);
+ }
+
+ if (call_as_method) {
+ __ bind(&wrap);
+ EmitWrapCase(masm, argc, &cont);
+ }
+}
+
+
+void CallFunctionStub::Generate(MacroAssembler* masm) {
+ CallFunctionNoFeedback(masm, argc_, NeedsChecks(), CallAsMethod());
+}
+
+
void CallConstructStub::Generate(MacroAssembler* masm) {
// r0 : number of arguments
// r1 : the function to call
- // r2 : cache cell for call target
+ // r2 : feedback vector
+ // r3 : (only if r2 is not the megamorphic symbol) slot in feedback
+ // vector (Smi)
Label slow, non_function_call;
// Check that the function is not a smi.
__ JumpIfSmi(r1, &non_function_call);
// Check that the function is a JSFunction.
- __ CompareObjectType(r1, r3, r3, JS_FUNCTION_TYPE);
+ __ CompareObjectType(r1, r4, r4, JS_FUNCTION_TYPE);
__ b(ne, &slow);
if (RecordCallTarget()) {
GenerateRecordCallTarget(masm);
+
+ __ add(r5, r2, Operand::PointerOffsetFromSmiKey(r3));
+ if (FLAG_pretenuring_call_new) {
+ // Put the AllocationSite from the feedback vector into r2.
+ // By adding kPointerSize we encode that we know the AllocationSite
+ // entry is at the feedback vector slot given by r3 + 1.
+ __ ldr(r2, FieldMemOperand(r5, FixedArray::kHeaderSize + kPointerSize));
+ } else {
+ Label feedback_register_initialized;
+ // Put the AllocationSite from the feedback vector into r2, or undefined.
+ __ ldr(r2, FieldMemOperand(r5, FixedArray::kHeaderSize));
+ __ ldr(r5, FieldMemOperand(r2, AllocationSite::kMapOffset));
+ __ CompareRoot(r5, Heap::kAllocationSiteMapRootIndex);
+ __ b(eq, &feedback_register_initialized);
+ __ LoadRoot(r2, Heap::kUndefinedValueRootIndex);
+ __ bind(&feedback_register_initialized);
+ }
+
+ __ AssertUndefinedOrAllocationSite(r2, r5);
}
// Jump to the function-specific construct stub.
- Register jmp_reg = r3;
+ Register jmp_reg = r4;
__ ldr(jmp_reg, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
__ ldr(jmp_reg, FieldMemOperand(jmp_reg,
SharedFunctionInfo::kConstructStubOffset));
@@ -3504,25 +3039,162 @@ void CallConstructStub::Generate(MacroAssembler* masm) {
// r0: number of arguments
// r1: called object
- // r3: object type
+ // r4: object type
Label do_call;
__ bind(&slow);
- __ cmp(r3, Operand(JS_FUNCTION_PROXY_TYPE));
+ __ cmp(r4, Operand(JS_FUNCTION_PROXY_TYPE));
__ b(ne, &non_function_call);
- __ GetBuiltinEntry(r3, Builtins::CALL_FUNCTION_PROXY_AS_CONSTRUCTOR);
+ __ GetBuiltinFunction(r1, Builtins::CALL_FUNCTION_PROXY_AS_CONSTRUCTOR);
__ jmp(&do_call);
__ bind(&non_function_call);
- __ GetBuiltinEntry(r3, Builtins::CALL_NON_FUNCTION_AS_CONSTRUCTOR);
+ __ GetBuiltinFunction(r1, Builtins::CALL_NON_FUNCTION_AS_CONSTRUCTOR);
__ bind(&do_call);
// Set expected number of arguments to zero (not changing r0).
__ mov(r2, Operand::Zero());
- __ SetCallKind(r5, CALL_AS_METHOD);
__ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
RelocInfo::CODE_TARGET);
}
+static void EmitLoadTypeFeedbackVector(MacroAssembler* masm, Register vector) {
+ __ ldr(vector, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+ __ ldr(vector, FieldMemOperand(vector,
+ JSFunction::kSharedFunctionInfoOffset));
+ __ ldr(vector, FieldMemOperand(vector,
+ SharedFunctionInfo::kFeedbackVectorOffset));
+}
+
+
+void CallIC_ArrayStub::Generate(MacroAssembler* masm) {
+ // r1 - function
+ // r3 - slot id
+ Label miss;
+ int argc = state_.arg_count();
+ ParameterCount actual(argc);
+
+ EmitLoadTypeFeedbackVector(masm, r2);
+
+ __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, r4);
+ __ cmp(r1, r4);
+ __ b(ne, &miss);
+
+ __ mov(r0, Operand(arg_count()));
+ __ add(r4, r2, Operand::PointerOffsetFromSmiKey(r3));
+ __ ldr(r2, FieldMemOperand(r4, FixedArray::kHeaderSize));
+ // Verify that r2 contains an AllocationSite
+ __ AssertUndefinedOrAllocationSite(r2, r4);
+ ArrayConstructorStub stub(masm->isolate(), arg_count());
+ __ TailCallStub(&stub);
+
+ __ bind(&miss);
+ GenerateMiss(masm, IC::kCallIC_Customization_Miss);
+
+ // The slow case, we need this no matter what to complete a call after a miss.
+ CallFunctionNoFeedback(masm,
+ arg_count(),
+ true,
+ CallAsMethod());
+
+ // Unreachable.
+ __ stop("Unexpected code address");
+}
+
+
+void CallICStub::Generate(MacroAssembler* masm) {
+ // r1 - function
+ // r3 - slot id (Smi)
+ Label extra_checks_or_miss, slow_start;
+ Label slow, non_function, wrap, cont;
+ Label have_js_function;
+ int argc = state_.arg_count();
+ ParameterCount actual(argc);
+
+ EmitLoadTypeFeedbackVector(masm, r2);
+
+ // The checks. First, does r1 match the recorded monomorphic target?
+ __ add(r4, r2, Operand::PointerOffsetFromSmiKey(r3));
+ __ ldr(r4, FieldMemOperand(r4, FixedArray::kHeaderSize));
+ __ cmp(r1, r4);
+ __ b(ne, &extra_checks_or_miss);
+
+ __ bind(&have_js_function);
+ if (state_.CallAsMethod()) {
+ EmitContinueIfStrictOrNative(masm, &cont);
+ // Compute the receiver in sloppy mode.
+ __ ldr(r3, MemOperand(sp, argc * kPointerSize));
+
+ __ JumpIfSmi(r3, &wrap);
+ __ CompareObjectType(r3, r4, r4, FIRST_SPEC_OBJECT_TYPE);
+ __ b(lt, &wrap);
+
+ __ bind(&cont);
+ }
+
+ __ InvokeFunction(r1, actual, JUMP_FUNCTION, NullCallWrapper());
+
+ __ bind(&slow);
+ EmitSlowCase(masm, argc, &non_function);
+
+ if (state_.CallAsMethod()) {
+ __ bind(&wrap);
+ EmitWrapCase(masm, argc, &cont);
+ }
+
+ __ bind(&extra_checks_or_miss);
+ Label miss;
+
+ __ CompareRoot(r4, Heap::kMegamorphicSymbolRootIndex);
+ __ b(eq, &slow_start);
+ __ CompareRoot(r4, Heap::kUninitializedSymbolRootIndex);
+ __ b(eq, &miss);
+
+ if (!FLAG_trace_ic) {
+ // We are going megamorphic, and we don't want to visit the runtime.
+ __ add(r4, r2, Operand::PointerOffsetFromSmiKey(r3));
+ __ LoadRoot(ip, Heap::kMegamorphicSymbolRootIndex);
+ __ str(ip, FieldMemOperand(r4, FixedArray::kHeaderSize));
+ __ jmp(&slow_start);
+ }
+
+ // We are here because tracing is on or we are going monomorphic.
+ __ bind(&miss);
+ GenerateMiss(masm, IC::kCallIC_Miss);
+
+ // the slow case
+ __ bind(&slow_start);
+ // Check that the function is really a JavaScript function.
+ // r1: pushed function (to be verified)
+ __ JumpIfSmi(r1, &non_function);
+
+ // Goto slow case if we do not have a function.
+ __ CompareObjectType(r1, r4, r4, JS_FUNCTION_TYPE);
+ __ b(ne, &slow);
+ __ jmp(&have_js_function);
+}
+
+
+void CallICStub::GenerateMiss(MacroAssembler* masm, IC::UtilityId id) {
+ // Get the receiver of the function from the stack; 1 ~ return address.
+ __ ldr(r4, MemOperand(sp, (state_.arg_count() + 1) * kPointerSize));
+
+ {
+ FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
+
+ // Push the receiver and the function and feedback info.
+ __ Push(r4, r1, r2, r3);
+
+ // Call the entry.
+ ExternalReference miss = ExternalReference(IC_Utility(id),
+ masm->isolate());
+ __ CallExternalReference(miss, 4);
+
+ // Move result to edi and exit the internal frame.
+ __ mov(r1, r0);
+ }
+}
+
+
// StringCharCodeAtGenerator
void StringCharCodeAtGenerator::GenerateFast(MacroAssembler* masm) {
Label flat_string;
@@ -3583,7 +3255,7 @@ void StringCharCodeAtGenerator::GenerateSlow(
} else {
ASSERT(index_flags_ == STRING_INDEX_IS_ARRAY_INDEX);
// NumberToSmi discards numbers that are not exact integers.
- __ CallRuntime(Runtime::kNumberToSmi, 1);
+ __ CallRuntime(Runtime::kHiddenNumberToSmi, 1);
}
// Save the conversion result before the pop instructions below
// have a chance to overwrite it.
@@ -3605,7 +3277,7 @@ void StringCharCodeAtGenerator::GenerateSlow(
call_helper.BeforeCall(masm);
__ SmiTag(index_);
__ Push(object_, index_);
- __ CallRuntime(Runtime::kStringCharCodeAt, 2);
+ __ CallRuntime(Runtime::kHiddenStringCharCodeAt, 2);
__ Move(result_, r0);
call_helper.AfterCall(masm);
__ jmp(&exit_);
@@ -3654,320 +3326,48 @@ void StringCharFromCodeGenerator::GenerateSlow(
}
-void StringHelper::GenerateCopyCharacters(MacroAssembler* masm,
- Register dest,
- Register src,
- Register count,
- Register scratch,
- bool ascii) {
- Label loop;
- Label done;
- // This loop just copies one character at a time, as it is only used for very
- // short strings.
- if (!ascii) {
- __ add(count, count, Operand(count), SetCC);
- } else {
- __ cmp(count, Operand::Zero());
- }
- __ b(eq, &done);
-
- __ bind(&loop);
- __ ldrb(scratch, MemOperand(src, 1, PostIndex));
- // Perform sub between load and dependent store to get the load time to
- // complete.
- __ sub(count, count, Operand(1), SetCC);
- __ strb(scratch, MemOperand(dest, 1, PostIndex));
- // last iteration.
- __ b(gt, &loop);
-
- __ bind(&done);
-}
-
-
enum CopyCharactersFlags {
COPY_ASCII = 1,
DEST_ALWAYS_ALIGNED = 2
};
-void StringHelper::GenerateCopyCharactersLong(MacroAssembler* masm,
- Register dest,
- Register src,
- Register count,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Register scratch4,
- int flags) {
- bool ascii = (flags & COPY_ASCII) != 0;
- bool dest_always_aligned = (flags & DEST_ALWAYS_ALIGNED) != 0;
-
- if (dest_always_aligned && FLAG_debug_code) {
- // Check that destination is actually word aligned if the flag says
- // that it is.
+void StringHelper::GenerateCopyCharacters(MacroAssembler* masm,
+ Register dest,
+ Register src,
+ Register count,
+ Register scratch,
+ String::Encoding encoding) {
+ if (FLAG_debug_code) {
+ // Check that destination is word aligned.
__ tst(dest, Operand(kPointerAlignmentMask));
__ Check(eq, kDestinationOfCopyNotAligned);
}
- const int kReadAlignment = 4;
- const int kReadAlignmentMask = kReadAlignment - 1;
- // Ensure that reading an entire aligned word containing the last character
- // of a string will not read outside the allocated area (because we pad up
- // to kObjectAlignment).
- STATIC_ASSERT(kObjectAlignment >= kReadAlignment);
// Assumes word reads and writes are little endian.
// Nothing to do for zero characters.
Label done;
- if (!ascii) {
+ if (encoding == String::TWO_BYTE_ENCODING) {
__ add(count, count, Operand(count), SetCC);
- } else {
- __ cmp(count, Operand::Zero());
- }
- __ b(eq, &done);
-
- // Assume that you cannot read (or write) unaligned.
- Label byte_loop;
- // Must copy at least eight bytes, otherwise just do it one byte at a time.
- __ cmp(count, Operand(8));
- __ add(count, dest, Operand(count));
- Register limit = count; // Read until src equals this.
- __ b(lt, &byte_loop);
-
- if (!dest_always_aligned) {
- // Align dest by byte copying. Copies between zero and three bytes.
- __ and_(scratch4, dest, Operand(kReadAlignmentMask), SetCC);
- Label dest_aligned;
- __ b(eq, &dest_aligned);
- __ cmp(scratch4, Operand(2));
- __ ldrb(scratch1, MemOperand(src, 1, PostIndex));
- __ ldrb(scratch2, MemOperand(src, 1, PostIndex), le);
- __ ldrb(scratch3, MemOperand(src, 1, PostIndex), lt);
- __ strb(scratch1, MemOperand(dest, 1, PostIndex));
- __ strb(scratch2, MemOperand(dest, 1, PostIndex), le);
- __ strb(scratch3, MemOperand(dest, 1, PostIndex), lt);
- __ bind(&dest_aligned);
}
- Label simple_loop;
+ Register limit = count; // Read until dest equals this.
+ __ add(limit, dest, Operand(count));
- __ sub(scratch4, dest, Operand(src));
- __ and_(scratch4, scratch4, Operand(0x03), SetCC);
- __ b(eq, &simple_loop);
- // Shift register is number of bits in a source word that
- // must be combined with bits in the next source word in order
- // to create a destination word.
-
- // Complex loop for src/dst that are not aligned the same way.
- {
- Label loop;
- __ mov(scratch4, Operand(scratch4, LSL, 3));
- Register left_shift = scratch4;
- __ and_(src, src, Operand(~3)); // Round down to load previous word.
- __ ldr(scratch1, MemOperand(src, 4, PostIndex));
- // Store the "shift" most significant bits of scratch in the least
- // signficant bits (i.e., shift down by (32-shift)).
- __ rsb(scratch2, left_shift, Operand(32));
- Register right_shift = scratch2;
- __ mov(scratch1, Operand(scratch1, LSR, right_shift));
-
- __ bind(&loop);
- __ ldr(scratch3, MemOperand(src, 4, PostIndex));
- __ orr(scratch1, scratch1, Operand(scratch3, LSL, left_shift));
- __ str(scratch1, MemOperand(dest, 4, PostIndex));
- __ mov(scratch1, Operand(scratch3, LSR, right_shift));
- // Loop if four or more bytes left to copy.
- __ sub(scratch3, limit, Operand(dest));
- __ sub(scratch3, scratch3, Operand(4), SetCC);
- __ b(ge, &loop);
- }
- // There is now between zero and three bytes left to copy (negative that
- // number is in scratch3), and between one and three bytes already read into
- // scratch1 (eight times that number in scratch4). We may have read past
- // the end of the string, but because objects are aligned, we have not read
- // past the end of the object.
- // Find the minimum of remaining characters to move and preloaded characters
- // and write those as bytes.
- __ add(scratch3, scratch3, Operand(4), SetCC);
- __ b(eq, &done);
- __ cmp(scratch4, Operand(scratch3, LSL, 3), ne);
- // Move minimum of bytes read and bytes left to copy to scratch4.
- __ mov(scratch3, Operand(scratch4, LSR, 3), LeaveCC, lt);
- // Between one and three (value in scratch3) characters already read into
- // scratch ready to write.
- __ cmp(scratch3, Operand(2));
- __ strb(scratch1, MemOperand(dest, 1, PostIndex));
- __ mov(scratch1, Operand(scratch1, LSR, 8), LeaveCC, ge);
- __ strb(scratch1, MemOperand(dest, 1, PostIndex), ge);
- __ mov(scratch1, Operand(scratch1, LSR, 8), LeaveCC, gt);
- __ strb(scratch1, MemOperand(dest, 1, PostIndex), gt);
- // Copy any remaining bytes.
- __ b(&byte_loop);
-
- // Simple loop.
- // Copy words from src to dst, until less than four bytes left.
- // Both src and dest are word aligned.
- __ bind(&simple_loop);
- {
- Label loop;
- __ bind(&loop);
- __ ldr(scratch1, MemOperand(src, 4, PostIndex));
- __ sub(scratch3, limit, Operand(dest));
- __ str(scratch1, MemOperand(dest, 4, PostIndex));
- // Compare to 8, not 4, because we do the substraction before increasing
- // dest.
- __ cmp(scratch3, Operand(8));
- __ b(ge, &loop);
- }
-
- // Copy bytes from src to dst until dst hits limit.
- __ bind(&byte_loop);
+ Label loop_entry, loop;
+ // Copy bytes from src to dest until dest hits limit.
+ __ b(&loop_entry);
+ __ bind(&loop);
+ __ ldrb(scratch, MemOperand(src, 1, PostIndex), lt);
+ __ strb(scratch, MemOperand(dest, 1, PostIndex));
+ __ bind(&loop_entry);
__ cmp(dest, Operand(limit));
- __ ldrb(scratch1, MemOperand(src, 1, PostIndex), lt);
- __ b(ge, &done);
- __ strb(scratch1, MemOperand(dest, 1, PostIndex));
- __ b(&byte_loop);
+ __ b(lt, &loop);
__ bind(&done);
}
-void StringHelper::GenerateTwoCharacterStringTableProbe(MacroAssembler* masm,
- Register c1,
- Register c2,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Register scratch4,
- Register scratch5,
- Label* not_found) {
- // Register scratch3 is the general scratch register in this function.
- Register scratch = scratch3;
-
- // Make sure that both characters are not digits as such strings has a
- // different hash algorithm. Don't try to look for these in the string table.
- Label not_array_index;
- __ sub(scratch, c1, Operand(static_cast<int>('0')));
- __ cmp(scratch, Operand(static_cast<int>('9' - '0')));
- __ b(hi, &not_array_index);
- __ sub(scratch, c2, Operand(static_cast<int>('0')));
- __ cmp(scratch, Operand(static_cast<int>('9' - '0')));
-
- // If check failed combine both characters into single halfword.
- // This is required by the contract of the method: code at the
- // not_found branch expects this combination in c1 register
- __ orr(c1, c1, Operand(c2, LSL, kBitsPerByte), LeaveCC, ls);
- __ b(ls, not_found);
-
- __ bind(&not_array_index);
- // Calculate the two character string hash.
- Register hash = scratch1;
- StringHelper::GenerateHashInit(masm, hash, c1);
- StringHelper::GenerateHashAddCharacter(masm, hash, c2);
- StringHelper::GenerateHashGetHash(masm, hash);
-
- // Collect the two characters in a register.
- Register chars = c1;
- __ orr(chars, chars, Operand(c2, LSL, kBitsPerByte));
-
- // chars: two character string, char 1 in byte 0 and char 2 in byte 1.
- // hash: hash of two character string.
-
- // Load string table
- // Load address of first element of the string table.
- Register string_table = c2;
- __ LoadRoot(string_table, Heap::kStringTableRootIndex);
-
- Register undefined = scratch4;
- __ LoadRoot(undefined, Heap::kUndefinedValueRootIndex);
-
- // Calculate capacity mask from the string table capacity.
- Register mask = scratch2;
- __ ldr(mask, FieldMemOperand(string_table, StringTable::kCapacityOffset));
- __ mov(mask, Operand(mask, ASR, 1));
- __ sub(mask, mask, Operand(1));
-
- // Calculate untagged address of the first element of the string table.
- Register first_string_table_element = string_table;
- __ add(first_string_table_element, string_table,
- Operand(StringTable::kElementsStartOffset - kHeapObjectTag));
-
- // Registers
- // chars: two character string, char 1 in byte 0 and char 2 in byte 1.
- // hash: hash of two character string
- // mask: capacity mask
- // first_string_table_element: address of the first element of
- // the string table
- // undefined: the undefined object
- // scratch: -
-
- // Perform a number of probes in the string table.
- const int kProbes = 4;
- Label found_in_string_table;
- Label next_probe[kProbes];
- Register candidate = scratch5; // Scratch register contains candidate.
- for (int i = 0; i < kProbes; i++) {
- // Calculate entry in string table.
- if (i > 0) {
- __ add(candidate, hash, Operand(StringTable::GetProbeOffset(i)));
- } else {
- __ mov(candidate, hash);
- }
-
- __ and_(candidate, candidate, Operand(mask));
-
- // Load the entry from the symble table.
- STATIC_ASSERT(StringTable::kEntrySize == 1);
- __ ldr(candidate,
- MemOperand(first_string_table_element,
- candidate,
- LSL,
- kPointerSizeLog2));
-
- // If entry is undefined no string with this hash can be found.
- Label is_string;
- __ CompareObjectType(candidate, scratch, scratch, ODDBALL_TYPE);
- __ b(ne, &is_string);
-
- __ cmp(undefined, candidate);
- __ b(eq, not_found);
- // Must be the hole (deleted entry).
- if (FLAG_debug_code) {
- __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
- __ cmp(ip, candidate);
- __ Assert(eq, kOddballInStringTableIsNotUndefinedOrTheHole);
- }
- __ jmp(&next_probe[i]);
-
- __ bind(&is_string);
-
- // Check that the candidate is a non-external ASCII string. The instance
- // type is still in the scratch register from the CompareObjectType
- // operation.
- __ JumpIfInstanceTypeIsNotSequentialAscii(scratch, scratch, &next_probe[i]);
-
- // If length is not 2 the string is not a candidate.
- __ ldr(scratch, FieldMemOperand(candidate, String::kLengthOffset));
- __ cmp(scratch, Operand(Smi::FromInt(2)));
- __ b(ne, &next_probe[i]);
-
- // Check if the two characters match.
- // Assumes that word load is little endian.
- __ ldrh(scratch, FieldMemOperand(candidate, SeqOneByteString::kHeaderSize));
- __ cmp(chars, scratch);
- __ b(eq, &found_in_string_table);
- __ bind(&next_probe[i]);
- }
-
- // No matching 2 character string found by probing.
- __ jmp(not_found);
-
- // Scratch register contains result when we fall through to here.
- Register result = candidate;
- __ bind(&found_in_string_table);
- __ Move(r0, result);
-}
-
-
void StringHelper::GenerateHashInit(MacroAssembler* masm,
Register hash,
Register character) {
@@ -4052,10 +3452,8 @@ void SubStringStub::Generate(MacroAssembler* masm) {
// Make sure first argument is a string.
__ ldr(r0, MemOperand(sp, kStringOffset));
- // Do a JumpIfSmi, but fold its jump into the subsequent string test.
- __ SmiTst(r0);
- Condition is_string = masm->IsObjectStringType(r0, r1, ne);
- ASSERT(is_string == eq);
+ __ JumpIfSmi(r0, &runtime);
+ Condition is_string = masm->IsObjectStringType(r0, r1);
__ b(NegateCondition(is_string), &runtime);
Label single_char;
@@ -4159,7 +3557,7 @@ void SubStringStub::Generate(MacroAssembler* masm) {
// Handle external string.
// Rule out short external strings.
- STATIC_CHECK(kShortExternalStringTag != 0);
+ STATIC_ASSERT(kShortExternalStringTag != 0);
__ tst(r1, Operand(kShortExternalStringTag));
__ b(ne, &runtime);
__ ldr(r5, FieldMemOperand(r5, ExternalString::kResourceDataOffset));
@@ -4190,8 +3588,8 @@ void SubStringStub::Generate(MacroAssembler* masm) {
// r2: result string length
// r5: first character of substring to copy
STATIC_ASSERT((SeqOneByteString::kHeaderSize & kObjectAlignmentMask) == 0);
- StringHelper::GenerateCopyCharactersLong(masm, r1, r5, r2, r3, r4, r6, r9,
- COPY_ASCII | DEST_ALWAYS_ALIGNED);
+ StringHelper::GenerateCopyCharacters(
+ masm, r1, r5, r2, r3, String::ONE_BYTE_ENCODING);
__ jmp(&return_r0);
// Allocate and copy the resulting two-byte string.
@@ -4209,18 +3607,18 @@ void SubStringStub::Generate(MacroAssembler* masm) {
// r2: result length.
// r5: first character of substring to copy.
STATIC_ASSERT((SeqTwoByteString::kHeaderSize & kObjectAlignmentMask) == 0);
- StringHelper::GenerateCopyCharactersLong(
- masm, r1, r5, r2, r3, r4, r6, r9, DEST_ALWAYS_ALIGNED);
+ StringHelper::GenerateCopyCharacters(
+ masm, r1, r5, r2, r3, String::TWO_BYTE_ENCODING);
__ bind(&return_r0);
- Counters* counters = masm->isolate()->counters();
+ Counters* counters = isolate()->counters();
__ IncrementCounter(counters->sub_string_native(), 1, r3, r4);
__ Drop(3);
__ Ret();
// Just jump to runtime to create the sub string.
__ bind(&runtime);
- __ TailCallRuntime(Runtime::kSubString, 3, 1);
+ __ TailCallRuntime(Runtime::kHiddenSubString, 3, 1);
__ bind(&single_char);
// r0: original string
@@ -4348,7 +3746,7 @@ void StringCompareStub::GenerateAsciiCharsCompareLoop(
void StringCompareStub::Generate(MacroAssembler* masm) {
Label runtime;
- Counters* counters = masm->isolate()->counters();
+ Counters* counters = isolate()->counters();
// Stack frame on entry.
// sp[0]: right string
@@ -4378,363 +3776,38 @@ void StringCompareStub::Generate(MacroAssembler* masm) {
// Call the runtime; it returns -1 (less), 0 (equal), or 1 (greater)
// tagged as a small integer.
__ bind(&runtime);
- __ TailCallRuntime(Runtime::kStringCompare, 2, 1);
+ __ TailCallRuntime(Runtime::kHiddenStringCompare, 2, 1);
}
-void StringAddStub::Generate(MacroAssembler* masm) {
- Label call_runtime, call_builtin;
- Builtins::JavaScript builtin_id = Builtins::ADD;
-
- Counters* counters = masm->isolate()->counters();
-
- // Stack on entry:
- // sp[0]: second argument (right).
- // sp[4]: first argument (left).
-
- // Load the two arguments.
- __ ldr(r0, MemOperand(sp, 1 * kPointerSize)); // First argument.
- __ ldr(r1, MemOperand(sp, 0 * kPointerSize)); // Second argument.
-
- // Make sure that both arguments are strings if not known in advance.
- // Otherwise, at least one of the arguments is definitely a string,
- // and we convert the one that is not known to be a string.
- if ((flags_ & STRING_ADD_CHECK_BOTH) == STRING_ADD_CHECK_BOTH) {
- ASSERT((flags_ & STRING_ADD_CHECK_LEFT) == STRING_ADD_CHECK_LEFT);
- ASSERT((flags_ & STRING_ADD_CHECK_RIGHT) == STRING_ADD_CHECK_RIGHT);
- __ JumpIfEitherSmi(r0, r1, &call_runtime);
- // Load instance types.
- __ ldr(r4, FieldMemOperand(r0, HeapObject::kMapOffset));
- __ ldr(r5, FieldMemOperand(r1, HeapObject::kMapOffset));
- __ ldrb(r4, FieldMemOperand(r4, Map::kInstanceTypeOffset));
- __ ldrb(r5, FieldMemOperand(r5, Map::kInstanceTypeOffset));
- STATIC_ASSERT(kStringTag == 0);
- // If either is not a string, go to runtime.
- __ tst(r4, Operand(kIsNotStringMask));
- __ tst(r5, Operand(kIsNotStringMask), eq);
- __ b(ne, &call_runtime);
- } else if ((flags_ & STRING_ADD_CHECK_LEFT) == STRING_ADD_CHECK_LEFT) {
- ASSERT((flags_ & STRING_ADD_CHECK_RIGHT) == 0);
- GenerateConvertArgument(
- masm, 1 * kPointerSize, r0, r2, r3, r4, r5, &call_builtin);
- builtin_id = Builtins::STRING_ADD_RIGHT;
- } else if ((flags_ & STRING_ADD_CHECK_RIGHT) == STRING_ADD_CHECK_RIGHT) {
- ASSERT((flags_ & STRING_ADD_CHECK_LEFT) == 0);
- GenerateConvertArgument(
- masm, 0 * kPointerSize, r1, r2, r3, r4, r5, &call_builtin);
- builtin_id = Builtins::STRING_ADD_LEFT;
- }
-
- // Both arguments are strings.
- // r0: first string
- // r1: second string
- // r4: first string instance type (if flags_ == NO_STRING_ADD_FLAGS)
- // r5: second string instance type (if flags_ == NO_STRING_ADD_FLAGS)
- {
- Label strings_not_empty;
- // Check if either of the strings are empty. In that case return the other.
- __ ldr(r2, FieldMemOperand(r0, String::kLengthOffset));
- __ ldr(r3, FieldMemOperand(r1, String::kLengthOffset));
- STATIC_ASSERT(kSmiTag == 0);
- __ cmp(r2, Operand(Smi::FromInt(0))); // Test if first string is empty.
- __ mov(r0, Operand(r1), LeaveCC, eq); // If first is empty, return second.
- STATIC_ASSERT(kSmiTag == 0);
- // Else test if second string is empty.
- __ cmp(r3, Operand(Smi::FromInt(0)), ne);
- __ b(ne, &strings_not_empty); // If either string was empty, return r0.
-
- __ IncrementCounter(counters->string_add_native(), 1, r2, r3);
- __ add(sp, sp, Operand(2 * kPointerSize));
- __ Ret();
-
- __ bind(&strings_not_empty);
- }
-
- __ SmiUntag(r2);
- __ SmiUntag(r3);
- // Both strings are non-empty.
- // r0: first string
- // r1: second string
- // r2: length of first string
- // r3: length of second string
- // r4: first string instance type (if flags_ == NO_STRING_ADD_FLAGS)
- // r5: second string instance type (if flags_ == NO_STRING_ADD_FLAGS)
- // Look at the length of the result of adding the two strings.
- Label string_add_flat_result, longer_than_two;
- // Adding two lengths can't overflow.
- STATIC_ASSERT(String::kMaxLength < String::kMaxLength * 2);
- __ add(r6, r2, Operand(r3));
- // Use the string table when adding two one character strings, as it
- // helps later optimizations to return a string here.
- __ cmp(r6, Operand(2));
- __ b(ne, &longer_than_two);
-
- // Check that both strings are non-external ASCII strings.
- if ((flags_ & STRING_ADD_CHECK_BOTH) != STRING_ADD_CHECK_BOTH) {
- __ ldr(r4, FieldMemOperand(r0, HeapObject::kMapOffset));
- __ ldr(r5, FieldMemOperand(r1, HeapObject::kMapOffset));
- __ ldrb(r4, FieldMemOperand(r4, Map::kInstanceTypeOffset));
- __ ldrb(r5, FieldMemOperand(r5, Map::kInstanceTypeOffset));
- }
- __ JumpIfBothInstanceTypesAreNotSequentialAscii(r4, r5, r6, r3,
- &call_runtime);
-
- // Get the two characters forming the sub string.
- __ ldrb(r2, FieldMemOperand(r0, SeqOneByteString::kHeaderSize));
- __ ldrb(r3, FieldMemOperand(r1, SeqOneByteString::kHeaderSize));
-
- // Try to lookup two character string in string table. If it is not found
- // just allocate a new one.
- Label make_two_character_string;
- StringHelper::GenerateTwoCharacterStringTableProbe(
- masm, r2, r3, r6, r0, r4, r5, r9, &make_two_character_string);
- __ IncrementCounter(counters->string_add_native(), 1, r2, r3);
- __ add(sp, sp, Operand(2 * kPointerSize));
- __ Ret();
-
- __ bind(&make_two_character_string);
- // Resulting string has length 2 and first chars of two strings
- // are combined into single halfword in r2 register.
- // So we can fill resulting string without two loops by a single
- // halfword store instruction (which assumes that processor is
- // in a little endian mode)
- __ mov(r6, Operand(2));
- __ AllocateAsciiString(r0, r6, r4, r5, r9, &call_runtime);
- __ strh(r2, FieldMemOperand(r0, SeqOneByteString::kHeaderSize));
- __ IncrementCounter(counters->string_add_native(), 1, r2, r3);
- __ add(sp, sp, Operand(2 * kPointerSize));
- __ Ret();
-
- __ bind(&longer_than_two);
- // Check if resulting string will be flat.
- __ cmp(r6, Operand(ConsString::kMinLength));
- __ b(lt, &string_add_flat_result);
- // Handle exceptionally long strings in the runtime system.
- STATIC_ASSERT((String::kMaxLength & 0x80000000) == 0);
- ASSERT(IsPowerOf2(String::kMaxLength + 1));
- // kMaxLength + 1 is representable as shifted literal, kMaxLength is not.
- __ cmp(r6, Operand(String::kMaxLength + 1));
- __ b(hs, &call_runtime);
-
- // If result is not supposed to be flat, allocate a cons string object.
- // If both strings are ASCII the result is an ASCII cons string.
- if ((flags_ & STRING_ADD_CHECK_BOTH) != STRING_ADD_CHECK_BOTH) {
- __ ldr(r4, FieldMemOperand(r0, HeapObject::kMapOffset));
- __ ldr(r5, FieldMemOperand(r1, HeapObject::kMapOffset));
- __ ldrb(r4, FieldMemOperand(r4, Map::kInstanceTypeOffset));
- __ ldrb(r5, FieldMemOperand(r5, Map::kInstanceTypeOffset));
- }
- Label non_ascii, allocated, ascii_data;
- STATIC_ASSERT(kTwoByteStringTag == 0);
- __ tst(r4, Operand(kStringEncodingMask));
- __ tst(r5, Operand(kStringEncodingMask), ne);
- __ b(eq, &non_ascii);
-
- // Allocate an ASCII cons string.
- __ bind(&ascii_data);
- __ AllocateAsciiConsString(r3, r6, r4, r5, &call_runtime);
- __ bind(&allocated);
- // Fill the fields of the cons string.
- Label skip_write_barrier, after_writing;
- ExternalReference high_promotion_mode = ExternalReference::
- new_space_high_promotion_mode_active_address(masm->isolate());
- __ mov(r4, Operand(high_promotion_mode));
- __ ldr(r4, MemOperand(r4, 0));
- __ cmp(r4, Operand::Zero());
- __ b(eq, &skip_write_barrier);
-
- __ str(r0, FieldMemOperand(r3, ConsString::kFirstOffset));
- __ RecordWriteField(r3,
- ConsString::kFirstOffset,
- r0,
- r4,
- kLRHasNotBeenSaved,
- kDontSaveFPRegs);
- __ str(r1, FieldMemOperand(r3, ConsString::kSecondOffset));
- __ RecordWriteField(r3,
- ConsString::kSecondOffset,
- r1,
- r4,
- kLRHasNotBeenSaved,
- kDontSaveFPRegs);
- __ jmp(&after_writing);
-
- __ bind(&skip_write_barrier);
- __ str(r0, FieldMemOperand(r3, ConsString::kFirstOffset));
- __ str(r1, FieldMemOperand(r3, ConsString::kSecondOffset));
-
- __ bind(&after_writing);
-
- __ mov(r0, Operand(r3));
- __ IncrementCounter(counters->string_add_native(), 1, r2, r3);
- __ add(sp, sp, Operand(2 * kPointerSize));
- __ Ret();
-
- __ bind(&non_ascii);
- // At least one of the strings is two-byte. Check whether it happens
- // to contain only one byte characters.
- // r4: first instance type.
- // r5: second instance type.
- __ tst(r4, Operand(kOneByteDataHintMask));
- __ tst(r5, Operand(kOneByteDataHintMask), ne);
- __ b(ne, &ascii_data);
- __ eor(r4, r4, Operand(r5));
- STATIC_ASSERT(kOneByteStringTag != 0 && kOneByteDataHintTag != 0);
- __ and_(r4, r4, Operand(kOneByteStringTag | kOneByteDataHintTag));
- __ cmp(r4, Operand(kOneByteStringTag | kOneByteDataHintTag));
- __ b(eq, &ascii_data);
-
- // Allocate a two byte cons string.
- __ AllocateTwoByteConsString(r3, r6, r4, r5, &call_runtime);
- __ jmp(&allocated);
-
- // We cannot encounter sliced strings or cons strings here since:
- STATIC_ASSERT(SlicedString::kMinLength >= ConsString::kMinLength);
- // Handle creating a flat result from either external or sequential strings.
- // Locate the first characters' locations.
- // r0: first string
- // r1: second string
- // r2: length of first string
- // r3: length of second string
- // r4: first string instance type (if flags_ == NO_STRING_ADD_FLAGS)
- // r5: second string instance type (if flags_ == NO_STRING_ADD_FLAGS)
- // r6: sum of lengths.
- Label first_prepared, second_prepared;
- __ bind(&string_add_flat_result);
- if ((flags_ & STRING_ADD_CHECK_BOTH) != STRING_ADD_CHECK_BOTH) {
- __ ldr(r4, FieldMemOperand(r0, HeapObject::kMapOffset));
- __ ldr(r5, FieldMemOperand(r1, HeapObject::kMapOffset));
- __ ldrb(r4, FieldMemOperand(r4, Map::kInstanceTypeOffset));
- __ ldrb(r5, FieldMemOperand(r5, Map::kInstanceTypeOffset));
- }
-
- // Check whether both strings have same encoding
- __ eor(ip, r4, Operand(r5));
- ASSERT(__ ImmediateFitsAddrMode1Instruction(kStringEncodingMask));
- __ tst(ip, Operand(kStringEncodingMask));
- __ b(ne, &call_runtime);
-
- STATIC_ASSERT(kSeqStringTag == 0);
- __ tst(r4, Operand(kStringRepresentationMask));
- STATIC_ASSERT(SeqOneByteString::kHeaderSize == SeqTwoByteString::kHeaderSize);
- __ add(r6,
- r0,
- Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag),
- LeaveCC,
- eq);
- __ b(eq, &first_prepared);
- // External string: rule out short external string and load string resource.
- STATIC_ASSERT(kShortExternalStringTag != 0);
- __ tst(r4, Operand(kShortExternalStringMask));
- __ b(ne, &call_runtime);
- __ ldr(r6, FieldMemOperand(r0, ExternalString::kResourceDataOffset));
- __ bind(&first_prepared);
-
- STATIC_ASSERT(kSeqStringTag == 0);
- __ tst(r5, Operand(kStringRepresentationMask));
- STATIC_ASSERT(SeqOneByteString::kHeaderSize == SeqTwoByteString::kHeaderSize);
- __ add(r1,
- r1,
- Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag),
- LeaveCC,
- eq);
- __ b(eq, &second_prepared);
- // External string: rule out short external string and load string resource.
- STATIC_ASSERT(kShortExternalStringTag != 0);
- __ tst(r5, Operand(kShortExternalStringMask));
- __ b(ne, &call_runtime);
- __ ldr(r1, FieldMemOperand(r1, ExternalString::kResourceDataOffset));
- __ bind(&second_prepared);
-
- Label non_ascii_string_add_flat_result;
- // r6: first character of first string
- // r1: first character of second string
- // r2: length of first string.
- // r3: length of second string.
- // Both strings have the same encoding.
- STATIC_ASSERT(kTwoByteStringTag == 0);
- __ tst(r5, Operand(kStringEncodingMask));
- __ b(eq, &non_ascii_string_add_flat_result);
-
- __ add(r2, r2, Operand(r3));
- __ AllocateAsciiString(r0, r2, r4, r5, r9, &call_runtime);
- __ sub(r2, r2, Operand(r3));
- __ add(r5, r0, Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
- // r0: result string.
- // r6: first character of first string.
- // r1: first character of second string.
- // r2: length of first string.
- // r3: length of second string.
- // r5: first character of result.
- StringHelper::GenerateCopyCharacters(masm, r5, r6, r2, r4, true);
- // r5: next character of result.
- StringHelper::GenerateCopyCharacters(masm, r5, r1, r3, r4, true);
- __ IncrementCounter(counters->string_add_native(), 1, r2, r3);
- __ add(sp, sp, Operand(2 * kPointerSize));
- __ Ret();
-
- __ bind(&non_ascii_string_add_flat_result);
- __ add(r2, r2, Operand(r3));
- __ AllocateTwoByteString(r0, r2, r4, r5, r9, &call_runtime);
- __ sub(r2, r2, Operand(r3));
- __ add(r5, r0, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
- // r0: result string.
- // r6: first character of first string.
- // r1: first character of second string.
- // r2: length of first string.
- // r3: length of second string.
- // r5: first character of result.
- StringHelper::GenerateCopyCharacters(masm, r5, r6, r2, r4, false);
- // r5: next character of result.
- StringHelper::GenerateCopyCharacters(masm, r5, r1, r3, r4, false);
- __ IncrementCounter(counters->string_add_native(), 1, r2, r3);
- __ add(sp, sp, Operand(2 * kPointerSize));
- __ Ret();
+void BinaryOpICWithAllocationSiteStub::Generate(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- r1 : left
+ // -- r0 : right
+ // -- lr : return address
+ // -----------------------------------
- // Just jump to runtime to add the two strings.
- __ bind(&call_runtime);
- __ TailCallRuntime(Runtime::kStringAdd, 2, 1);
+ // Load r2 with the allocation site. We stick an undefined dummy value here
+ // and replace it with the real allocation site later when we instantiate this
+ // stub in BinaryOpICWithAllocationSiteStub::GetCodeCopyFromTemplate().
+ __ Move(r2, handle(isolate()->heap()->undefined_value()));
- if (call_builtin.is_linked()) {
- __ bind(&call_builtin);
- __ InvokeBuiltin(builtin_id, JUMP_FUNCTION);
+ // Make sure that we actually patched the allocation site.
+ if (FLAG_debug_code) {
+ __ tst(r2, Operand(kSmiTagMask));
+ __ Assert(ne, kExpectedAllocationSite);
+ __ push(r2);
+ __ ldr(r2, FieldMemOperand(r2, HeapObject::kMapOffset));
+ __ LoadRoot(ip, Heap::kAllocationSiteMapRootIndex);
+ __ cmp(r2, ip);
+ __ pop(r2);
+ __ Assert(eq, kExpectedAllocationSite);
}
-}
-
-void StringAddStub::GenerateRegisterArgsPush(MacroAssembler* masm) {
- __ push(r0);
- __ push(r1);
-}
-
-
-void StringAddStub::GenerateRegisterArgsPop(MacroAssembler* masm) {
- __ pop(r1);
- __ pop(r0);
-}
-
-
-void StringAddStub::GenerateConvertArgument(MacroAssembler* masm,
- int stack_offset,
- Register arg,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Register scratch4,
- Label* slow) {
- // First check if the argument is already a string.
- Label not_string, done;
- __ JumpIfSmi(arg, &not_string);
- __ CompareObjectType(arg, scratch1, scratch1, FIRST_NONSTRING_TYPE);
- __ b(lt, &done);
-
- // Check the number to string cache.
- __ bind(&not_string);
- // Puts the cached result into scratch1.
- __ LookupNumberStringCache(arg, scratch1, scratch2, scratch3, scratch4, slow);
- __ mov(arg, scratch1);
- __ str(arg, MemOperand(sp, stack_offset));
- __ bind(&done);
+ // Tail call into the stub that handles binary operations with allocation
+ // sites.
+ BinaryOpWithAllocationSiteStub stub(isolate(), state_);
+ __ TailCallStub(&stub);
}
@@ -4811,9 +3884,9 @@ void ICCompareStub::GenerateNumbers(MacroAssembler* masm) {
__ bind(&unordered);
__ bind(&generic_stub);
- ICCompareStub stub(op_, CompareIC::GENERIC, CompareIC::GENERIC,
+ ICCompareStub stub(isolate(), op_, CompareIC::GENERIC, CompareIC::GENERIC,
CompareIC::GENERIC);
- __ Jump(stub.GetCode(masm->isolate()), RelocInfo::CODE_TARGET);
+ __ Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
__ bind(&maybe_undefined1);
if (Token::IsOrderedRelationalCompareOp(op_)) {
@@ -4984,7 +4057,7 @@ void ICCompareStub::GenerateStrings(MacroAssembler* masm) {
if (equality) {
__ TailCallRuntime(Runtime::kStringEquals, 2, 1);
} else {
- __ TailCallRuntime(Runtime::kStringCompare, 2, 1);
+ __ TailCallRuntime(Runtime::kHiddenStringCompare, 2, 1);
}
__ bind(&miss);
@@ -5036,9 +4109,9 @@ void ICCompareStub::GenerateMiss(MacroAssembler* masm) {
{
// Call the runtime system in a fresh internal frame.
ExternalReference miss =
- ExternalReference(IC_Utility(IC::kCompareIC_Miss), masm->isolate());
+ ExternalReference(IC_Utility(IC::kCompareIC_Miss), isolate());
- FrameScope scope(masm, StackFrame::INTERNAL);
+ FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
__ Push(r1, r0);
__ Push(lr, r1, r0);
__ mov(ip, Operand(Smi::FromInt(op_)));
@@ -5068,7 +4141,7 @@ void DirectCEntryStub::Generate(MacroAssembler* masm) {
void DirectCEntryStub::GenerateCall(MacroAssembler* masm,
Register target) {
intptr_t code =
- reinterpret_cast<intptr_t>(GetCode(masm->isolate()).location());
+ reinterpret_cast<intptr_t>(GetCode().location());
__ Move(ip, target);
__ mov(lr, Operand(code, RelocInfo::CODE_TARGET));
__ blx(lr); // Call the stub.
@@ -5144,7 +4217,7 @@ void NameDictionaryLookupStub::GenerateNegativeLookup(MacroAssembler* masm,
__ stm(db_w, sp, spill_mask);
__ ldr(r0, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
__ mov(r1, Operand(Handle<Name>(name)));
- NameDictionaryLookupStub stub(NEGATIVE_LOOKUP);
+ NameDictionaryLookupStub stub(masm->isolate(), NEGATIVE_LOOKUP);
__ CallStub(&stub);
__ cmp(r0, Operand::Zero());
__ ldm(ia_w, sp, spill_mask);
@@ -5220,7 +4293,7 @@ void NameDictionaryLookupStub::GeneratePositiveLookup(MacroAssembler* masm,
__ Move(r0, elements);
__ Move(r1, name);
}
- NameDictionaryLookupStub stub(POSITIVE_LOOKUP);
+ NameDictionaryLookupStub stub(masm->isolate(), POSITIVE_LOOKUP);
__ CallStub(&stub);
__ cmp(r0, Operand::Zero());
__ mov(scratch2, Operand(r2));
@@ -5324,16 +4397,11 @@ void NameDictionaryLookupStub::Generate(MacroAssembler* masm) {
void StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime(
Isolate* isolate) {
- StoreBufferOverflowStub stub1(kDontSaveFPRegs);
- stub1.GetCode(isolate);
+ StoreBufferOverflowStub stub1(isolate, kDontSaveFPRegs);
+ stub1.GetCode();
// Hydrogen code stubs need stub2 at snapshot time.
- StoreBufferOverflowStub stub2(kSaveFPRegs);
- stub2.GetCode(isolate);
-}
-
-
-bool CodeStub::CanUseFPRegisters() {
- return true; // VFP2 is a base requirement for V8
+ StoreBufferOverflowStub stub2(isolate, kSaveFPRegs);
+ stub2.GetCode();
}
@@ -5403,7 +4471,7 @@ void RecordWriteStub::GenerateIncremental(MacroAssembler* masm, Mode mode) {
// remembered set.
CheckNeedsToInformIncrementalMarker(
masm, kUpdateRememberedSetOnNoNeedToInformIncrementalMarker, mode);
- InformIncrementalMarker(masm, mode);
+ InformIncrementalMarker(masm);
regs_.Restore(masm);
__ RememberedSetHelper(object_,
address_,
@@ -5416,13 +4484,13 @@ void RecordWriteStub::GenerateIncremental(MacroAssembler* masm, Mode mode) {
CheckNeedsToInformIncrementalMarker(
masm, kReturnOnNoNeedToInformIncrementalMarker, mode);
- InformIncrementalMarker(masm, mode);
+ InformIncrementalMarker(masm);
regs_.Restore(masm);
__ Ret();
}
-void RecordWriteStub::InformIncrementalMarker(MacroAssembler* masm, Mode mode) {
+void RecordWriteStub::InformIncrementalMarker(MacroAssembler* masm) {
regs_.SaveCallerSaveRegisters(masm, save_fp_regs_mode_);
int argument_count = 3;
__ PrepareCallCFunction(argument_count, regs_.scratch0());
@@ -5433,21 +4501,12 @@ void RecordWriteStub::InformIncrementalMarker(MacroAssembler* masm, Mode mode) {
__ Move(address, regs_.address());
__ Move(r0, regs_.object());
__ Move(r1, address);
- __ mov(r2, Operand(ExternalReference::isolate_address(masm->isolate())));
+ __ mov(r2, Operand(ExternalReference::isolate_address(isolate())));
AllowExternalCallThatCantCauseGC scope(masm);
- if (mode == INCREMENTAL_COMPACTION) {
- __ CallCFunction(
- ExternalReference::incremental_evacuation_record_write_function(
- masm->isolate()),
- argument_count);
- } else {
- ASSERT(mode == INCREMENTAL);
- __ CallCFunction(
- ExternalReference::incremental_marking_record_write_function(
- masm->isolate()),
- argument_count);
- }
+ __ CallCFunction(
+ ExternalReference::incremental_marking_record_write_function(isolate()),
+ argument_count);
regs_.RestoreCallerSaveRegisters(masm, save_fp_regs_mode_);
}
@@ -5601,8 +4660,8 @@ void StoreArrayLiteralElementStub::Generate(MacroAssembler* masm) {
void StubFailureTrampolineStub::Generate(MacroAssembler* masm) {
- CEntryStub ces(1, fp_registers_ ? kSaveFPRegs : kDontSaveFPRegs);
- __ Call(ces.GetCode(masm->isolate()), RelocInfo::CODE_TARGET);
+ CEntryStub ces(isolate(), 1, kSaveFPRegs);
+ __ Call(ces.GetCode(), RelocInfo::CODE_TARGET);
int parameter_count_offset =
StubFailureTrampolineFrame::kCallerStackParameterCountFrameOffset;
__ ldr(r1, MemOperand(fp, parameter_count_offset));
@@ -5616,28 +4675,11 @@ void StubFailureTrampolineStub::Generate(MacroAssembler* masm) {
}
-void StubFailureTailCallTrampolineStub::Generate(MacroAssembler* masm) {
- CEntryStub ces(1, fp_registers_ ? kSaveFPRegs : kDontSaveFPRegs);
- __ Call(ces.GetCode(masm->isolate()), RelocInfo::CODE_TARGET);
- __ mov(r1, r0);
- int parameter_count_offset =
- StubFailureTrampolineFrame::kCallerStackParameterCountFrameOffset;
- __ ldr(r0, MemOperand(fp, parameter_count_offset));
- // The parameter count above includes the receiver for the arguments passed to
- // the deoptimization handler. Subtract the receiver for the parameter count
- // for the call.
- __ sub(r0, r0, Operand(1));
- masm->LeaveFrame(StackFrame::STUB_FAILURE_TRAMPOLINE);
- ParameterCount argument_count(r0);
- __ InvokeFunction(
- r1, argument_count, JUMP_FUNCTION, NullCallWrapper(), CALL_AS_METHOD);
-}
-
-
void ProfileEntryHookStub::MaybeCallEntryHook(MacroAssembler* masm) {
if (masm->isolate()->function_entry_hook() != NULL) {
- PredictableCodeSizeScope predictable(masm, 4 * Assembler::kInstrSize);
- ProfileEntryHookStub stub;
+ ProfileEntryHookStub stub(masm->isolate());
+ int code_size = masm->CallStubSize(&stub) + 2 * Assembler::kInstrSize;
+ PredictableCodeSizeScope predictable(masm, code_size);
__ push(lr);
__ CallStub(&stub);
__ pop(lr);
@@ -5683,18 +4725,18 @@ void ProfileEntryHookStub::Generate(MacroAssembler* masm) {
#if V8_HOST_ARCH_ARM
int32_t entry_hook =
- reinterpret_cast<int32_t>(masm->isolate()->function_entry_hook());
+ reinterpret_cast<int32_t>(isolate()->function_entry_hook());
__ mov(ip, Operand(entry_hook));
#else
// Under the simulator we need to indirect the entry hook through a
// trampoline function at a known address.
// It additionally takes an isolate as a third parameter
- __ mov(r2, Operand(ExternalReference::isolate_address(masm->isolate())));
+ __ mov(r2, Operand(ExternalReference::isolate_address(isolate())));
ApiFunction dispatcher(FUNCTION_ADDR(EntryHookTrampoline));
__ mov(ip, Operand(ExternalReference(&dispatcher,
ExternalReference::BUILTIN_CALL,
- masm->isolate())));
+ isolate())));
#endif
__ Call(ip);
@@ -5712,21 +4754,16 @@ template<class T>
static void CreateArrayDispatch(MacroAssembler* masm,
AllocationSiteOverrideMode mode) {
if (mode == DISABLE_ALLOCATION_SITES) {
- T stub(GetInitialFastElementsKind(),
- CONTEXT_CHECK_REQUIRED,
- mode);
+ T stub(masm->isolate(), GetInitialFastElementsKind(), mode);
__ TailCallStub(&stub);
} else if (mode == DONT_OVERRIDE) {
int last_index = GetSequenceIndexFromFastElementsKind(
TERMINAL_FAST_ELEMENTS_KIND);
for (int i = 0; i <= last_index; ++i) {
- Label next;
ElementsKind kind = GetFastElementsKindFromSequenceIndex(i);
__ cmp(r3, Operand(kind));
- __ b(ne, &next);
- T stub(kind);
- __ TailCallStub(&stub);
- __ bind(&next);
+ T stub(masm->isolate(), kind);
+ __ TailCallStub(&stub, eq);
}
// If we reached this point there is a problem.
@@ -5739,7 +4776,7 @@ static void CreateArrayDispatch(MacroAssembler* masm,
static void CreateArrayDispatchOneArgument(MacroAssembler* masm,
AllocationSiteOverrideMode mode) {
- // r2 - type info cell (if mode != DISABLE_ALLOCATION_SITES)
+ // r2 - allocation site (if mode != DISABLE_ALLOCATION_SITES)
// r3 - kind (if mode != DISABLE_ALLOCATION_SITES)
// r0 - number of arguments
// r1 - constructor?
@@ -5767,48 +4804,43 @@ static void CreateArrayDispatchOneArgument(MacroAssembler* masm,
ElementsKind initial = GetInitialFastElementsKind();
ElementsKind holey_initial = GetHoleyElementsKind(initial);
- ArraySingleArgumentConstructorStub stub_holey(holey_initial,
- CONTEXT_CHECK_REQUIRED,
+ ArraySingleArgumentConstructorStub stub_holey(masm->isolate(),
+ holey_initial,
DISABLE_ALLOCATION_SITES);
__ TailCallStub(&stub_holey);
__ bind(&normal_sequence);
- ArraySingleArgumentConstructorStub stub(initial,
- CONTEXT_CHECK_REQUIRED,
+ ArraySingleArgumentConstructorStub stub(masm->isolate(),
+ initial,
DISABLE_ALLOCATION_SITES);
__ TailCallStub(&stub);
} else if (mode == DONT_OVERRIDE) {
// We are going to create a holey array, but our kind is non-holey.
- // Fix kind and retry (only if we have an allocation site in the cell).
+ // Fix kind and retry (only if we have an allocation site in the slot).
__ add(r3, r3, Operand(1));
- __ ldr(r5, FieldMemOperand(r2, Cell::kValueOffset));
if (FLAG_debug_code) {
- __ ldr(r5, FieldMemOperand(r5, 0));
+ __ ldr(r5, FieldMemOperand(r2, 0));
__ CompareRoot(r5, Heap::kAllocationSiteMapRootIndex);
- __ Assert(eq, kExpectedAllocationSiteInCell);
- __ ldr(r5, FieldMemOperand(r2, Cell::kValueOffset));
+ __ Assert(eq, kExpectedAllocationSite);
}
// Save the resulting elements kind in type info. We can't just store r3
// in the AllocationSite::transition_info field because elements kind is
// restricted to a portion of the field...upper bits need to be left alone.
STATIC_ASSERT(AllocationSite::ElementsKindBits::kShift == 0);
- __ ldr(r4, FieldMemOperand(r5, AllocationSite::kTransitionInfoOffset));
+ __ ldr(r4, FieldMemOperand(r2, AllocationSite::kTransitionInfoOffset));
__ add(r4, r4, Operand(Smi::FromInt(kFastElementsKindPackedToHoley)));
- __ str(r4, FieldMemOperand(r5, AllocationSite::kTransitionInfoOffset));
+ __ str(r4, FieldMemOperand(r2, AllocationSite::kTransitionInfoOffset));
__ bind(&normal_sequence);
int last_index = GetSequenceIndexFromFastElementsKind(
TERMINAL_FAST_ELEMENTS_KIND);
for (int i = 0; i <= last_index; ++i) {
- Label next;
ElementsKind kind = GetFastElementsKindFromSequenceIndex(i);
__ cmp(r3, Operand(kind));
- __ b(ne, &next);
- ArraySingleArgumentConstructorStub stub(kind);
- __ TailCallStub(&stub);
- __ bind(&next);
+ ArraySingleArgumentConstructorStub stub(masm->isolate(), kind);
+ __ TailCallStub(&stub, eq);
}
// If we reached this point there is a problem.
@@ -5821,20 +4853,15 @@ static void CreateArrayDispatchOneArgument(MacroAssembler* masm,
template<class T>
static void ArrayConstructorStubAheadOfTimeHelper(Isolate* isolate) {
- ElementsKind initial_kind = GetInitialFastElementsKind();
- ElementsKind initial_holey_kind = GetHoleyElementsKind(initial_kind);
-
int to_index = GetSequenceIndexFromFastElementsKind(
TERMINAL_FAST_ELEMENTS_KIND);
for (int i = 0; i <= to_index; ++i) {
ElementsKind kind = GetFastElementsKindFromSequenceIndex(i);
- T stub(kind);
- stub.GetCode(isolate);
- if (AllocationSite::GetMode(kind) != DONT_TRACK_ALLOCATION_SITE ||
- (!FLAG_track_allocation_sites &&
- (kind == initial_kind || kind == initial_holey_kind))) {
- T stub1(kind, CONTEXT_CHECK_REQUIRED, DISABLE_ALLOCATION_SITES);
- stub1.GetCode(isolate);
+ T stub(isolate, kind);
+ stub.GetCode();
+ if (AllocationSite::GetMode(kind) != DONT_TRACK_ALLOCATION_SITE) {
+ T stub1(isolate, kind, DISABLE_ALLOCATION_SITES);
+ stub1.GetCode();
}
}
}
@@ -5855,12 +4882,12 @@ void InternalArrayConstructorStubBase::GenerateStubsAheadOfTime(
ElementsKind kinds[2] = { FAST_ELEMENTS, FAST_HOLEY_ELEMENTS };
for (int i = 0; i < 2; i++) {
// For internal arrays we only need a few things
- InternalArrayNoArgumentConstructorStub stubh1(kinds[i]);
- stubh1.GetCode(isolate);
- InternalArraySingleArgumentConstructorStub stubh2(kinds[i]);
- stubh2.GetCode(isolate);
- InternalArrayNArgumentsConstructorStub stubh3(kinds[i]);
- stubh3.GetCode(isolate);
+ InternalArrayNoArgumentConstructorStub stubh1(isolate, kinds[i]);
+ stubh1.GetCode();
+ InternalArraySingleArgumentConstructorStub stubh2(isolate, kinds[i]);
+ stubh2.GetCode();
+ InternalArrayNArgumentsConstructorStub stubh3(isolate, kinds[i]);
+ stubh3.GetCode();
}
}
@@ -5897,46 +4924,33 @@ void ArrayConstructorStub::Generate(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- r0 : argc (only if argument_count_ == ANY)
// -- r1 : constructor
- // -- r2 : type info cell
+ // -- r2 : AllocationSite or undefined
// -- sp[0] : return address
// -- sp[4] : last argument
// -----------------------------------
+
if (FLAG_debug_code) {
// The array construct code is only set for the global and natives
// builtin Array functions which always have maps.
// Initial map for the builtin Array function should be a map.
- __ ldr(r3, FieldMemOperand(r1, JSFunction::kPrototypeOrInitialMapOffset));
+ __ ldr(r4, FieldMemOperand(r1, JSFunction::kPrototypeOrInitialMapOffset));
// Will both indicate a NULL and a Smi.
- __ tst(r3, Operand(kSmiTagMask));
+ __ tst(r4, Operand(kSmiTagMask));
__ Assert(ne, kUnexpectedInitialMapForArrayFunction);
- __ CompareObjectType(r3, r3, r4, MAP_TYPE);
+ __ CompareObjectType(r4, r4, r5, MAP_TYPE);
__ Assert(eq, kUnexpectedInitialMapForArrayFunction);
- // We should either have undefined in ebx or a valid cell
- Label okay_here;
- Handle<Map> cell_map = masm->isolate()->factory()->cell_map();
- __ CompareRoot(r2, Heap::kUndefinedValueRootIndex);
- __ b(eq, &okay_here);
- __ ldr(r3, FieldMemOperand(r2, 0));
- __ cmp(r3, Operand(cell_map));
- __ Assert(eq, kExpectedPropertyCellInRegisterEbx);
- __ bind(&okay_here);
+ // We should either have undefined in r2 or a valid AllocationSite
+ __ AssertUndefinedOrAllocationSite(r2, r4);
}
Label no_info;
// Get the elements kind and case on that.
__ CompareRoot(r2, Heap::kUndefinedValueRootIndex);
__ b(eq, &no_info);
- __ ldr(r3, FieldMemOperand(r2, Cell::kValueOffset));
-
- // If the type cell is undefined, or contains anything other than an
- // AllocationSite, call an array constructor that doesn't use AllocationSites.
- __ ldr(r4, FieldMemOperand(r3, 0));
- __ CompareRoot(r4, Heap::kAllocationSiteMapRootIndex);
- __ b(ne, &no_info);
- __ ldr(r3, FieldMemOperand(r3, AllocationSite::kTransitionInfoOffset));
+ __ ldr(r3, FieldMemOperand(r2, AllocationSite::kTransitionInfoOffset));
__ SmiUntag(r3);
STATIC_ASSERT(AllocationSite::ElementsKindBits::kShift == 0);
__ and_(r3, r3, Operand(AllocationSite::ElementsKindBits::kMask));
@@ -5949,37 +4963,27 @@ void ArrayConstructorStub::Generate(MacroAssembler* masm) {
void InternalArrayConstructorStub::GenerateCase(
MacroAssembler* masm, ElementsKind kind) {
- Label not_zero_case, not_one_case;
- Label normal_sequence;
+ __ cmp(r0, Operand(1));
- __ tst(r0, r0);
- __ b(ne, &not_zero_case);
- InternalArrayNoArgumentConstructorStub stub0(kind);
- __ TailCallStub(&stub0);
+ InternalArrayNoArgumentConstructorStub stub0(isolate(), kind);
+ __ TailCallStub(&stub0, lo);
- __ bind(&not_zero_case);
- __ cmp(r0, Operand(1));
- __ b(gt, &not_one_case);
+ InternalArrayNArgumentsConstructorStub stubN(isolate(), kind);
+ __ TailCallStub(&stubN, hi);
if (IsFastPackedElementsKind(kind)) {
// We might need to create a holey array
// look at the first argument
__ ldr(r3, MemOperand(sp, 0));
__ cmp(r3, Operand::Zero());
- __ b(eq, &normal_sequence);
InternalArraySingleArgumentConstructorStub
- stub1_holey(GetHoleyElementsKind(kind));
- __ TailCallStub(&stub1_holey);
+ stub1_holey(isolate(), GetHoleyElementsKind(kind));
+ __ TailCallStub(&stub1_holey, ne);
}
- __ bind(&normal_sequence);
- InternalArraySingleArgumentConstructorStub stub1(kind);
+ InternalArraySingleArgumentConstructorStub stub1(isolate(), kind);
__ TailCallStub(&stub1);
-
- __ bind(&not_one_case);
- InternalArrayNArgumentsConstructorStub stubN(kind);
- __ TailCallStub(&stubN);
}
@@ -6010,7 +5014,7 @@ void InternalArrayConstructorStub::Generate(MacroAssembler* masm) {
// but the following bit field extraction takes care of that anyway.
__ ldr(r3, FieldMemOperand(r3, Map::kBitField2Offset));
// Retrieve elements_kind from bit field 2.
- __ Ubfx(r3, r3, Map::kElementsKindShift, Map::kElementsKindBitCount);
+ __ DecodeField<Map::ElementsKindBits>(r3);
if (FLAG_debug_code) {
Label done;
@@ -6032,6 +5036,151 @@ void InternalArrayConstructorStub::Generate(MacroAssembler* masm) {
}
+void CallApiFunctionStub::Generate(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- r0 : callee
+ // -- r4 : call_data
+ // -- r2 : holder
+ // -- r1 : api_function_address
+ // -- cp : context
+ // --
+ // -- sp[0] : last argument
+ // -- ...
+ // -- sp[(argc - 1)* 4] : first argument
+ // -- sp[argc * 4] : receiver
+ // -----------------------------------
+
+ Register callee = r0;
+ Register call_data = r4;
+ Register holder = r2;
+ Register api_function_address = r1;
+ Register context = cp;
+
+ int argc = ArgumentBits::decode(bit_field_);
+ bool is_store = IsStoreBits::decode(bit_field_);
+ bool call_data_undefined = CallDataUndefinedBits::decode(bit_field_);
+
+ typedef FunctionCallbackArguments FCA;
+
+ STATIC_ASSERT(FCA::kContextSaveIndex == 6);
+ STATIC_ASSERT(FCA::kCalleeIndex == 5);
+ STATIC_ASSERT(FCA::kDataIndex == 4);
+ STATIC_ASSERT(FCA::kReturnValueOffset == 3);
+ STATIC_ASSERT(FCA::kReturnValueDefaultValueIndex == 2);
+ STATIC_ASSERT(FCA::kIsolateIndex == 1);
+ STATIC_ASSERT(FCA::kHolderIndex == 0);
+ STATIC_ASSERT(FCA::kArgsLength == 7);
+
+ // context save
+ __ push(context);
+ // load context from callee
+ __ ldr(context, FieldMemOperand(callee, JSFunction::kContextOffset));
+
+ // callee
+ __ push(callee);
+
+ // call data
+ __ push(call_data);
+
+ Register scratch = call_data;
+ if (!call_data_undefined) {
+ __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
+ }
+ // return value
+ __ push(scratch);
+ // return value default
+ __ push(scratch);
+ // isolate
+ __ mov(scratch,
+ Operand(ExternalReference::isolate_address(isolate())));
+ __ push(scratch);
+ // holder
+ __ push(holder);
+
+ // Prepare arguments.
+ __ mov(scratch, sp);
+
+ // Allocate the v8::Arguments structure in the arguments' space since
+ // it's not controlled by GC.
+ const int kApiStackSpace = 4;
+
+ FrameScope frame_scope(masm, StackFrame::MANUAL);
+ __ EnterExitFrame(false, kApiStackSpace);
+
+ ASSERT(!api_function_address.is(r0) && !scratch.is(r0));
+ // r0 = FunctionCallbackInfo&
+ // Arguments is after the return address.
+ __ add(r0, sp, Operand(1 * kPointerSize));
+ // FunctionCallbackInfo::implicit_args_
+ __ str(scratch, MemOperand(r0, 0 * kPointerSize));
+ // FunctionCallbackInfo::values_
+ __ add(ip, scratch, Operand((FCA::kArgsLength - 1 + argc) * kPointerSize));
+ __ str(ip, MemOperand(r0, 1 * kPointerSize));
+ // FunctionCallbackInfo::length_ = argc
+ __ mov(ip, Operand(argc));
+ __ str(ip, MemOperand(r0, 2 * kPointerSize));
+ // FunctionCallbackInfo::is_construct_call = 0
+ __ mov(ip, Operand::Zero());
+ __ str(ip, MemOperand(r0, 3 * kPointerSize));
+
+ const int kStackUnwindSpace = argc + FCA::kArgsLength + 1;
+ ExternalReference thunk_ref =
+ ExternalReference::invoke_function_callback(isolate());
+
+ AllowExternalCallThatCantCauseGC scope(masm);
+ MemOperand context_restore_operand(
+ fp, (2 + FCA::kContextSaveIndex) * kPointerSize);
+ // Stores return the first js argument
+ int return_value_offset = 0;
+ if (is_store) {
+ return_value_offset = 2 + FCA::kArgsLength;
+ } else {
+ return_value_offset = 2 + FCA::kReturnValueOffset;
+ }
+ MemOperand return_value_operand(fp, return_value_offset * kPointerSize);
+
+ __ CallApiFunctionAndReturn(api_function_address,
+ thunk_ref,
+ kStackUnwindSpace,
+ return_value_operand,
+ &context_restore_operand);
+}
+
+
+void CallApiGetterStub::Generate(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- sp[0] : name
+ // -- sp[4 - kArgsLength*4] : PropertyCallbackArguments object
+ // -- ...
+ // -- r2 : api_function_address
+ // -----------------------------------
+
+ Register api_function_address = r2;
+
+ __ mov(r0, sp); // r0 = Handle<Name>
+ __ add(r1, r0, Operand(1 * kPointerSize)); // r1 = PCA
+
+ const int kApiStackSpace = 1;
+ FrameScope frame_scope(masm, StackFrame::MANUAL);
+ __ EnterExitFrame(false, kApiStackSpace);
+
+ // Create PropertyAccessorInfo instance on the stack above the exit frame with
+ // r1 (internal::Object** args_) as the data.
+ __ str(r1, MemOperand(sp, 1 * kPointerSize));
+ __ add(r1, sp, Operand(1 * kPointerSize)); // r1 = AccessorInfo&
+
+ const int kStackUnwindSpace = PropertyCallbackArguments::kArgsLength + 1;
+
+ ExternalReference thunk_ref =
+ ExternalReference::invoke_accessor_getter_callback(isolate());
+ __ CallApiFunctionAndReturn(api_function_address,
+ thunk_ref,
+ kStackUnwindSpace,
+ MemOperand(fp, 6 * kPointerSize),
+ NULL);
+}
+
+
#undef __
} } // namespace v8::internal
diff --git a/chromium/v8/src/arm/code-stubs-arm.h b/chromium/v8/src/arm/code-stubs-arm.h
index e4006861df0..5dde3372dd1 100644
--- a/chromium/v8/src/arm/code-stubs-arm.h
+++ b/chromium/v8/src/arm/code-stubs-arm.h
@@ -1,34 +1,11 @@
// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
#ifndef V8_ARM_CODE_STUBS_ARM_H_
#define V8_ARM_CODE_STUBS_ARM_H_
-#include "ic-inl.h"
+#include "src/ic-inl.h"
namespace v8 {
namespace internal {
@@ -37,34 +14,10 @@ namespace internal {
void ArrayNativeCode(MacroAssembler* masm, Label* call_generic_code);
-// Compute a transcendental math function natively, or call the
-// TranscendentalCache runtime function.
-class TranscendentalCacheStub: public PlatformCodeStub {
- public:
- enum ArgumentType {
- TAGGED = 0 << TranscendentalCache::kTranscendentalTypeBits,
- UNTAGGED = 1 << TranscendentalCache::kTranscendentalTypeBits
- };
-
- TranscendentalCacheStub(TranscendentalCache::Type type,
- ArgumentType argument_type)
- : type_(type), argument_type_(argument_type) { }
- void Generate(MacroAssembler* masm);
- private:
- TranscendentalCache::Type type_;
- ArgumentType argument_type_;
- void GenerateCallCFunction(MacroAssembler* masm, Register scratch);
-
- Major MajorKey() { return TranscendentalCache; }
- int MinorKey() { return type_ | argument_type_; }
- Runtime::FunctionId RuntimeFunction();
-};
-
-
class StoreBufferOverflowStub: public PlatformCodeStub {
public:
- explicit StoreBufferOverflowStub(SaveFPRegsMode save_fp)
- : save_doubles_(save_fp) {}
+ StoreBufferOverflowStub(Isolate* isolate, SaveFPRegsMode save_fp)
+ : PlatformCodeStub(isolate), save_doubles_(save_fp) {}
void Generate(MacroAssembler* masm);
@@ -81,49 +34,17 @@ class StoreBufferOverflowStub: public PlatformCodeStub {
class StringHelper : public AllStatic {
public:
- // Generate code for copying characters using a simple loop. This should only
- // be used in places where the number of characters is small and the
- // additional setup and checking in GenerateCopyCharactersLong adds too much
- // overhead. Copying of overlapping regions is not supported.
+ // Generate code for copying a large number of characters. This function
+ // is allowed to spend extra time setting up conditions to make copying
+ // faster. Copying of overlapping regions is not supported.
// Dest register ends at the position after the last character written.
static void GenerateCopyCharacters(MacroAssembler* masm,
Register dest,
Register src,
Register count,
Register scratch,
- bool ascii);
+ String::Encoding encoding);
- // Generate code for copying a large number of characters. This function
- // is allowed to spend extra time setting up conditions to make copying
- // faster. Copying of overlapping regions is not supported.
- // Dest register ends at the position after the last character written.
- static void GenerateCopyCharactersLong(MacroAssembler* masm,
- Register dest,
- Register src,
- Register count,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Register scratch4,
- int flags);
-
-
- // Probe the string table for a two character string. If the string is
- // not found by probing a jump to the label not_found is performed. This jump
- // does not guarantee that the string is not in the string table. If the
- // string is found the code falls through with the string in register r0.
- // Contents of both c1 and c2 registers are modified. At the exit c1 is
- // guaranteed to contain halfword with low and high bytes equal to
- // initial contents of c1 and c2 respectively.
- static void GenerateTwoCharacterStringTableProbe(MacroAssembler* masm,
- Register c1,
- Register c2,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Register scratch4,
- Register scratch5,
- Label* not_found);
// Generate string hash.
static void GenerateHashInit(MacroAssembler* masm,
@@ -142,35 +63,9 @@ class StringHelper : public AllStatic {
};
-class StringAddStub: public PlatformCodeStub {
- public:
- explicit StringAddStub(StringAddFlags flags) : flags_(flags) {}
-
- private:
- Major MajorKey() { return StringAdd; }
- int MinorKey() { return flags_; }
-
- void Generate(MacroAssembler* masm);
-
- void GenerateConvertArgument(MacroAssembler* masm,
- int stack_offset,
- Register arg,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Register scratch4,
- Label* slow);
-
- void GenerateRegisterArgsPush(MacroAssembler* masm);
- void GenerateRegisterArgsPop(MacroAssembler* masm);
-
- const StringAddFlags flags_;
-};
-
-
class SubStringStub: public PlatformCodeStub {
public:
- SubStringStub() {}
+ explicit SubStringStub(Isolate* isolate) : PlatformCodeStub(isolate) {}
private:
Major MajorKey() { return SubString; }
@@ -183,7 +78,7 @@ class SubStringStub: public PlatformCodeStub {
class StringCompareStub: public PlatformCodeStub {
public:
- StringCompareStub() { }
+ explicit StringCompareStub(Isolate* isolate) : PlatformCodeStub(isolate) { }
// Compares two flat ASCII strings and returns result in r0.
static void GenerateCompareFlatAsciiStrings(MacroAssembler* masm,
@@ -223,10 +118,12 @@ class StringCompareStub: public PlatformCodeStub {
// so you don't have to set up the frame.
class WriteInt32ToHeapNumberStub : public PlatformCodeStub {
public:
- WriteInt32ToHeapNumberStub(Register the_int,
+ WriteInt32ToHeapNumberStub(Isolate* isolate,
+ Register the_int,
Register the_heap_number,
Register scratch)
- : the_int_(the_int),
+ : PlatformCodeStub(isolate),
+ the_int_(the_int),
the_heap_number_(the_heap_number),
scratch_(scratch) { }
@@ -256,12 +153,14 @@ class WriteInt32ToHeapNumberStub : public PlatformCodeStub {
class RecordWriteStub: public PlatformCodeStub {
public:
- RecordWriteStub(Register object,
+ RecordWriteStub(Isolate* isolate,
+ Register object,
Register value,
Register address,
RememberedSetAction remembered_set_action,
SaveFPRegsMode fp_mode)
- : object_(object),
+ : PlatformCodeStub(isolate),
+ object_(object),
value_(value),
address_(address),
remembered_set_action_(remembered_set_action),
@@ -403,7 +302,7 @@ class RecordWriteStub: public PlatformCodeStub {
MacroAssembler* masm,
OnNoNeedToInformIncrementalMarker on_no_need,
Mode mode);
- void InformIncrementalMarker(MacroAssembler* masm, Mode mode);
+ void InformIncrementalMarker(MacroAssembler* masm);
Major MajorKey() { return RecordWrite; }
@@ -442,7 +341,7 @@ class RecordWriteStub: public PlatformCodeStub {
// moved by GC
class DirectCEntryStub: public PlatformCodeStub {
public:
- DirectCEntryStub() {}
+ explicit DirectCEntryStub(Isolate* isolate) : PlatformCodeStub(isolate) {}
void Generate(MacroAssembler* masm);
void GenerateCall(MacroAssembler* masm, Register target);
@@ -458,7 +357,8 @@ class NameDictionaryLookupStub: public PlatformCodeStub {
public:
enum LookupMode { POSITIVE_LOOKUP, NEGATIVE_LOOKUP };
- explicit NameDictionaryLookupStub(LookupMode mode) : mode_(mode) { }
+ NameDictionaryLookupStub(Isolate* isolate, LookupMode mode)
+ : PlatformCodeStub(isolate), mode_(mode) { }
void Generate(MacroAssembler* masm);
@@ -504,6 +404,18 @@ class NameDictionaryLookupStub: public PlatformCodeStub {
};
+struct PlatformCallInterfaceDescriptor {
+ explicit PlatformCallInterfaceDescriptor(
+ TargetAddressStorageMode storage_mode)
+ : storage_mode_(storage_mode) { }
+
+ TargetAddressStorageMode storage_mode() { return storage_mode_; }
+
+ private:
+ TargetAddressStorageMode storage_mode_;
+};
+
+
} } // namespace v8::internal
#endif // V8_ARM_CODE_STUBS_ARM_H_
diff --git a/chromium/v8/src/arm/codegen-arm.cc b/chromium/v8/src/arm/codegen-arm.cc
index 238d34ed27e..7835a6bd080 100644
--- a/chromium/v8/src/arm/codegen-arm.cc
+++ b/chromium/v8/src/arm/codegen-arm.cc
@@ -1,54 +1,19 @@
// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
#if V8_TARGET_ARCH_ARM
-#include "codegen.h"
-#include "macro-assembler.h"
-#include "simulator-arm.h"
+#include "src/codegen.h"
+#include "src/macro-assembler.h"
+#include "src/arm/simulator-arm.h"
namespace v8 {
namespace internal {
-UnaryMathFunction CreateTranscendentalFunction(TranscendentalCache::Type type) {
- switch (type) {
- case TranscendentalCache::SIN: return &sin;
- case TranscendentalCache::COS: return &cos;
- case TranscendentalCache::TAN: return &tan;
- case TranscendentalCache::LOG: return &log;
- default: UNIMPLEMENTED();
- }
- return NULL;
-}
-
-
#define __ masm.
@@ -62,10 +27,10 @@ double fast_exp_simulator(double x) {
UnaryMathFunction CreateExpFunction() {
- if (!FLAG_fast_math) return &exp;
+ if (!FLAG_fast_math) return &std::exp;
size_t actual_size;
byte* buffer = static_cast<byte*>(OS::Allocate(1 * KB, &actual_size, true));
- if (buffer == NULL) return &exp;
+ if (buffer == NULL) return &std::exp;
ExternalReference::InitializeMathExpData();
MacroAssembler masm(NULL, buffer, static_cast<int>(actual_size));
@@ -113,14 +78,11 @@ UnaryMathFunction CreateExpFunction() {
}
#if defined(V8_HOST_ARCH_ARM)
-OS::MemCopyUint8Function CreateMemCopyUint8Function(
- OS::MemCopyUint8Function stub) {
+MemCopyUint8Function CreateMemCopyUint8Function(MemCopyUint8Function stub) {
#if defined(USE_SIMULATOR)
return stub;
#else
- if (Serializer::enabled() || !CpuFeatures::IsSupported(UNALIGNED_ACCESSES)) {
- return stub;
- }
+ if (!CpuFeatures::IsSupported(UNALIGNED_ACCESSES)) return stub;
size_t actual_size;
byte* buffer = static_cast<byte*>(OS::Allocate(1 * KB, &actual_size, true));
if (buffer == NULL) return stub;
@@ -265,20 +227,18 @@ OS::MemCopyUint8Function CreateMemCopyUint8Function(
CPU::FlushICache(buffer, actual_size);
OS::ProtectCode(buffer, actual_size);
- return FUNCTION_CAST<OS::MemCopyUint8Function>(buffer);
+ return FUNCTION_CAST<MemCopyUint8Function>(buffer);
#endif
}
// Convert 8 to 16. The number of character to copy must be at least 8.
-OS::MemCopyUint16Uint8Function CreateMemCopyUint16Uint8Function(
- OS::MemCopyUint16Uint8Function stub) {
+MemCopyUint16Uint8Function CreateMemCopyUint16Uint8Function(
+ MemCopyUint16Uint8Function stub) {
#if defined(USE_SIMULATOR)
return stub;
#else
- if (Serializer::enabled() || !CpuFeatures::IsSupported(UNALIGNED_ACCESSES)) {
- return stub;
- }
+ if (!CpuFeatures::IsSupported(UNALIGNED_ACCESSES)) return stub;
size_t actual_size;
byte* buffer = static_cast<byte*>(OS::Allocate(1 * KB, &actual_size, true));
if (buffer == NULL) return stub;
@@ -354,18 +314,38 @@ OS::MemCopyUint16Uint8Function CreateMemCopyUint16Uint8Function(
CPU::FlushICache(buffer, actual_size);
OS::ProtectCode(buffer, actual_size);
- return FUNCTION_CAST<OS::MemCopyUint16Uint8Function>(buffer);
+ return FUNCTION_CAST<MemCopyUint16Uint8Function>(buffer);
#endif
}
#endif
-#undef __
+UnaryMathFunction CreateSqrtFunction() {
+#if defined(USE_SIMULATOR)
+ return &std::sqrt;
+#else
+ size_t actual_size;
+ byte* buffer = static_cast<byte*>(OS::Allocate(1 * KB, &actual_size, true));
+ if (buffer == NULL) return &std::sqrt;
+ MacroAssembler masm(NULL, buffer, static_cast<int>(actual_size));
-UnaryMathFunction CreateSqrtFunction() {
- return &sqrt;
+ __ MovFromFloatParameter(d0);
+ __ vsqrt(d0, d0);
+ __ MovToFloatResult(d0);
+ __ Ret();
+
+ CodeDesc desc;
+ masm.GetCode(&desc);
+ ASSERT(!RelocInfo::RequiresRelocation(desc));
+
+ CPU::FlushICache(buffer, actual_size);
+ OS::ProtectCode(buffer, actual_size);
+ return FUNCTION_CAST<UnaryMathFunction>(buffer);
+#endif
}
+#undef __
+
// -------------------------------------------------------------------------
// Platform-specific RuntimeCallHelper functions.
@@ -728,7 +708,7 @@ void StringCharLoadGenerator::Generate(MacroAssembler* masm,
__ Assert(eq, kExternalStringExpectedButNotFound);
}
// Rule out short external strings.
- STATIC_CHECK(kShortExternalStringTag != 0);
+ STATIC_ASSERT(kShortExternalStringTag != 0);
__ tst(result, Operand(kShortExternalStringMask));
__ b(ne, call_runtime);
__ ldr(string, FieldMemOperand(string, ExternalString::kResourceDataOffset));
@@ -836,47 +816,51 @@ void MathExpGenerator::EmitMathExp(MacroAssembler* masm,
#undef __
+#ifdef DEBUG
// add(r0, pc, Operand(-8))
static const uint32_t kCodeAgePatchFirstInstruction = 0xe24f0008;
+#endif
-static byte* GetNoCodeAgeSequence(uint32_t* length) {
- // The sequence of instructions that is patched out for aging code is the
- // following boilerplate stack-building prologue that is found in FUNCTIONS
- static bool initialized = false;
- static uint32_t sequence[kNoCodeAgeSequenceLength];
- byte* byte_sequence = reinterpret_cast<byte*>(sequence);
- *length = kNoCodeAgeSequenceLength * Assembler::kInstrSize;
- if (!initialized) {
- CodePatcher patcher(byte_sequence, kNoCodeAgeSequenceLength);
- PredictableCodeSizeScope scope(patcher.masm(), *length);
- patcher.masm()->stm(db_w, sp, r1.bit() | cp.bit() | fp.bit() | lr.bit());
- patcher.masm()->nop(ip.code());
- patcher.masm()->add(fp, sp,
- Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
- initialized = true;
- }
- return byte_sequence;
+CodeAgingHelper::CodeAgingHelper() {
+ ASSERT(young_sequence_.length() == kNoCodeAgeSequenceLength);
+ // Since patcher is a large object, allocate it dynamically when needed,
+ // to avoid overloading the stack in stress conditions.
+ // DONT_FLUSH is used because the CodeAgingHelper is initialized early in
+ // the process, before ARM simulator ICache is setup.
+ SmartPointer<CodePatcher> patcher(
+ new CodePatcher(young_sequence_.start(),
+ young_sequence_.length() / Assembler::kInstrSize,
+ CodePatcher::DONT_FLUSH));
+ PredictableCodeSizeScope scope(patcher->masm(), young_sequence_.length());
+ patcher->masm()->PushFixedFrame(r1);
+ patcher->masm()->nop(ip.code());
+ patcher->masm()->add(
+ fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
}
-bool Code::IsYoungSequence(byte* sequence) {
- uint32_t young_length;
- byte* young_sequence = GetNoCodeAgeSequence(&young_length);
- bool result = !memcmp(sequence, young_sequence, young_length);
- ASSERT(result ||
- Memory::uint32_at(sequence) == kCodeAgePatchFirstInstruction);
+#ifdef DEBUG
+bool CodeAgingHelper::IsOld(byte* candidate) const {
+ return Memory::uint32_at(candidate) == kCodeAgePatchFirstInstruction;
+}
+#endif
+
+
+bool Code::IsYoungSequence(Isolate* isolate, byte* sequence) {
+ bool result = isolate->code_aging_helper()->IsYoung(sequence);
+ ASSERT(result || isolate->code_aging_helper()->IsOld(sequence));
return result;
}
-void Code::GetCodeAgeAndParity(byte* sequence, Age* age,
+void Code::GetCodeAgeAndParity(Isolate* isolate, byte* sequence, Age* age,
MarkingParity* parity) {
- if (IsYoungSequence(sequence)) {
+ if (IsYoungSequence(isolate, sequence)) {
*age = kNoAgeCodeAge;
*parity = NO_MARKING_PARITY;
} else {
Address target_address = Memory::Address_at(
- sequence + Assembler::kInstrSize * (kNoCodeAgeSequenceLength - 1));
+ sequence + (kNoCodeAgeSequenceLength - Assembler::kInstrSize));
Code* stub = GetCodeFromTargetAddress(target_address);
GetCodeAgeAndParity(stub, age, parity);
}
@@ -887,10 +871,9 @@ void Code::PatchPlatformCodeAge(Isolate* isolate,
byte* sequence,
Code::Age age,
MarkingParity parity) {
- uint32_t young_length;
- byte* young_sequence = GetNoCodeAgeSequence(&young_length);
+ uint32_t young_length = isolate->code_aging_helper()->young_sequence_length();
if (age == kNoAgeCodeAge) {
- CopyBytes(sequence, young_sequence, young_length);
+ isolate->code_aging_helper()->CopyYoungSequenceTo(sequence);
CPU::FlushICache(sequence, young_length);
} else {
Code* stub = GetCodeAgeStub(isolate, age, parity);
diff --git a/chromium/v8/src/arm/codegen-arm.h b/chromium/v8/src/arm/codegen-arm.h
index ecbe64cbad3..9ec09583d97 100644
--- a/chromium/v8/src/arm/codegen-arm.h
+++ b/chromium/v8/src/arm/codegen-arm.h
@@ -1,83 +1,19 @@
// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
#ifndef V8_ARM_CODEGEN_ARM_H_
#define V8_ARM_CODEGEN_ARM_H_
-#include "ast.h"
-#include "ic-inl.h"
+#include "src/ast.h"
+#include "src/ic-inl.h"
namespace v8 {
namespace internal {
-// Forward declarations
-class CompilationInfo;
enum TypeofState { INSIDE_TYPEOF, NOT_INSIDE_TYPEOF };
-// -------------------------------------------------------------------------
-// CodeGenerator
-
-class CodeGenerator: public AstVisitor {
- public:
- explicit CodeGenerator(Isolate* isolate) {
- InitializeAstVisitor(isolate);
- }
-
- static bool MakeCode(CompilationInfo* info);
-
- // Printing of AST, etc. as requested by flags.
- static void MakeCodePrologue(CompilationInfo* info, const char* kind);
-
- // Allocate and install the code.
- static Handle<Code> MakeCodeEpilogue(MacroAssembler* masm,
- Code::Flags flags,
- CompilationInfo* info);
-
- // Print the code after compiling it.
- static void PrintCode(Handle<Code> code, CompilationInfo* info);
-
- static bool ShouldGenerateLog(Isolate* isolate, Expression* type);
-
- static void SetFunctionInfo(Handle<JSFunction> fun,
- FunctionLiteral* lit,
- bool is_toplevel,
- Handle<Script> script);
-
- static bool RecordPositions(MacroAssembler* masm,
- int pos,
- bool right_here = false);
-
- DEFINE_AST_VISITOR_SUBCLASS_MEMBERS();
-
- private:
- DISALLOW_COPY_AND_ASSIGN(CodeGenerator);
-};
-
class StringCharLoadGenerator : public AllStatic {
public:
diff --git a/chromium/v8/src/arm/constants-arm.cc b/chromium/v8/src/arm/constants-arm.cc
index 7d59a84b1d3..d00a09f568c 100644
--- a/chromium/v8/src/arm/constants-arm.cc
+++ b/chromium/v8/src/arm/constants-arm.cc
@@ -1,35 +1,12 @@
// Copyright 2009 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
#if V8_TARGET_ARCH_ARM
-#include "constants-arm.h"
+#include "src/arm/constants-arm.h"
namespace v8 {
@@ -51,7 +28,7 @@ double Instruction::DoubleImmedVmov() const {
uint64_t imm = high16 << 48;
double d;
- OS::MemCopy(&d, &imm, 8);
+ memcpy(&d, &imm, 8);
return d;
}
diff --git a/chromium/v8/src/arm/constants-arm.h b/chromium/v8/src/arm/constants-arm.h
index 78bb66c49fe..0847ea17cac 100644
--- a/chromium/v8/src/arm/constants-arm.h
+++ b/chromium/v8/src/arm/constants-arm.h
@@ -1,29 +1,6 @@
// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
#ifndef V8_ARM_CONSTANTS_ARM_H_
#define V8_ARM_CONSTANTS_ARM_H_
@@ -112,8 +89,8 @@ inline Condition NegateCondition(Condition cond) {
}
-// Corresponds to transposing the operands of a comparison.
-inline Condition ReverseCondition(Condition cond) {
+// Commute a condition such that {a cond b == b cond' a}.
+inline Condition CommuteCondition(Condition cond) {
switch (cond) {
case lo:
return hi;
@@ -133,7 +110,7 @@ inline Condition ReverseCondition(Condition cond) {
return ge;
default:
return cond;
- };
+ }
}
@@ -343,7 +320,7 @@ enum NeonSize {
Neon8 = 0x0,
Neon16 = 0x1,
Neon32 = 0x2,
- Neon64 = 0x4
+ Neon64 = 0x3
};
// -----------------------------------------------------------------------------
diff --git a/chromium/v8/src/arm/cpu-arm.cc b/chromium/v8/src/arm/cpu-arm.cc
index cf531e1292b..4ff82a78d42 100644
--- a/chromium/v8/src/arm/cpu-arm.cc
+++ b/chromium/v8/src/arm/cpu-arm.cc
@@ -1,69 +1,43 @@
// Copyright 2006-2009 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
// CPU specific code for arm independent of OS goes here.
#ifdef __arm__
+#ifdef __QNXNTO__
+#include <sys/mman.h> // for cache flushing.
+#undef MAP_TYPE
+#else
#include <sys/syscall.h> // for cache flushing.
#endif
+#endif
-#include "v8.h"
+#include "src/v8.h"
#if V8_TARGET_ARCH_ARM
-#include "cpu.h"
-#include "macro-assembler.h"
-#include "simulator.h" // for cache flushing.
+#include "src/cpu.h"
+#include "src/macro-assembler.h"
+#include "src/simulator.h" // for cache flushing.
namespace v8 {
namespace internal {
-void CPU::SetUp() {
- CpuFeatures::Probe();
-}
-
-
-bool CPU::SupportsCrankshaft() {
- return CpuFeatures::IsSupported(VFP3);
-}
-
-
void CPU::FlushICache(void* start, size_t size) {
// Nothing to do flushing no instructions.
if (size == 0) {
return;
}
-#if defined (USE_SIMULATOR)
+#if defined(USE_SIMULATOR)
// Not generating ARM instructions for C-code. This means that we are
// building an ARM emulator based target. We should notify the simulator
// that the Icache was flushed.
// None of this code ends up in the snapshot so there are no issues
// around whether or not to generate the code when building snapshots.
Simulator::FlushICache(Isolate::Current()->simulator_i_cache(), start, size);
+#elif V8_OS_QNX
+ msync(start, size, MS_SYNC | MS_INVALIDATE_ICACHE);
#else
// Ideally, we would call
// syscall(__ARM_NR_cacheflush, start,
diff --git a/chromium/v8/src/arm/debug-arm.cc b/chromium/v8/src/arm/debug-arm.cc
index efd11069b32..e5460f54903 100644
--- a/chromium/v8/src/arm/debug-arm.cc
+++ b/chromium/v8/src/arm/debug-arm.cc
@@ -1,41 +1,17 @@
// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
#if V8_TARGET_ARCH_ARM
-#include "codegen.h"
-#include "debug.h"
+#include "src/codegen.h"
+#include "src/debug.h"
namespace v8 {
namespace internal {
-#ifdef ENABLE_DEBUGGER_SUPPORT
bool BreakLocationIterator::IsDebugBreakAtReturn() {
return Debug::IsDebugBreakAtReturn(rinfo());
}
@@ -56,7 +32,7 @@ void BreakLocationIterator::SetDebugBreakAtReturn() {
patcher.masm()->ldr(v8::internal::ip, MemOperand(v8::internal::pc, 0));
patcher.masm()->blx(v8::internal::ip);
patcher.Emit(
- debug_info_->GetIsolate()->debug()->debug_break_return()->entry());
+ debug_info_->GetIsolate()->builtins()->Return_DebugBreak()->entry());
patcher.masm()->bkpt(0);
}
@@ -97,7 +73,7 @@ void BreakLocationIterator::SetDebugBreakAtSlot() {
patcher.masm()->ldr(v8::internal::ip, MemOperand(v8::internal::pc, 0));
patcher.masm()->blx(v8::internal::ip);
patcher.Emit(
- debug_info_->GetIsolate()->debug()->debug_break_slot()->entry());
+ debug_info_->GetIsolate()->builtins()->Slot_DebugBreak()->entry());
}
@@ -107,8 +83,6 @@ void BreakLocationIterator::ClearDebugBreakAtSlot() {
Assembler::kDebugBreakSlotInstructions);
}
-const bool Debug::FramePaddingLayout::kIsSupported = false;
-
#define __ ACCESS_MASM(masm)
@@ -117,7 +91,7 @@ static void Generate_DebugBreakCallHelper(MacroAssembler* masm,
RegList object_regs,
RegList non_object_regs) {
{
- FrameScope scope(masm, StackFrame::INTERNAL);
+ FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
// Store the registers containing live values on the expression stack to
// make sure that these are correctly updated during GC. Non object values
@@ -146,7 +120,7 @@ static void Generate_DebugBreakCallHelper(MacroAssembler* masm,
__ mov(r0, Operand::Zero()); // no arguments
__ mov(r1, Operand(ExternalReference::debug_break(masm->isolate())));
- CEntryStub ceb(1);
+ CEntryStub ceb(masm->isolate(), 1);
__ CallStub(&ceb);
// Restore the register values from the expression stack.
@@ -172,14 +146,24 @@ static void Generate_DebugBreakCallHelper(MacroAssembler* masm,
// jumping to the target address intended by the caller and that was
// overwritten by the address of DebugBreakXXX.
ExternalReference after_break_target =
- ExternalReference(Debug_Address::AfterBreakTarget(), masm->isolate());
+ ExternalReference::debug_after_break_target_address(masm->isolate());
__ mov(ip, Operand(after_break_target));
__ ldr(ip, MemOperand(ip));
__ Jump(ip);
}
-void Debug::GenerateLoadICDebugBreak(MacroAssembler* masm) {
+void DebugCodegen::GenerateCallICStubDebugBreak(MacroAssembler* masm) {
+ // Register state for CallICStub
+ // ----------- S t a t e -------------
+ // -- r1 : function
+ // -- r3 : slot in feedback array (smi)
+ // -----------------------------------
+ Generate_DebugBreakCallHelper(masm, r1.bit() | r3.bit(), 0);
+}
+
+
+void DebugCodegen::GenerateLoadICDebugBreak(MacroAssembler* masm) {
// Calling convention for IC load (from ic-arm.cc).
// ----------- S t a t e -------------
// -- r2 : name
@@ -193,7 +177,7 @@ void Debug::GenerateLoadICDebugBreak(MacroAssembler* masm) {
}
-void Debug::GenerateStoreICDebugBreak(MacroAssembler* masm) {
+void DebugCodegen::GenerateStoreICDebugBreak(MacroAssembler* masm) {
// Calling convention for IC store (from ic-arm.cc).
// ----------- S t a t e -------------
// -- r0 : value
@@ -207,7 +191,7 @@ void Debug::GenerateStoreICDebugBreak(MacroAssembler* masm) {
}
-void Debug::GenerateKeyedLoadICDebugBreak(MacroAssembler* masm) {
+void DebugCodegen::GenerateKeyedLoadICDebugBreak(MacroAssembler* masm) {
// ---------- S t a t e --------------
// -- lr : return address
// -- r0 : key
@@ -216,7 +200,7 @@ void Debug::GenerateKeyedLoadICDebugBreak(MacroAssembler* masm) {
}
-void Debug::GenerateKeyedStoreICDebugBreak(MacroAssembler* masm) {
+void DebugCodegen::GenerateKeyedStoreICDebugBreak(MacroAssembler* masm) {
// ---------- S t a t e --------------
// -- r0 : value
// -- r1 : key
@@ -226,7 +210,7 @@ void Debug::GenerateKeyedStoreICDebugBreak(MacroAssembler* masm) {
}
-void Debug::GenerateCompareNilICDebugBreak(MacroAssembler* masm) {
+void DebugCodegen::GenerateCompareNilICDebugBreak(MacroAssembler* masm) {
// Register state for CompareNil IC
// ----------- S t a t e -------------
// -- r0 : value
@@ -235,16 +219,7 @@ void Debug::GenerateCompareNilICDebugBreak(MacroAssembler* masm) {
}
-void Debug::GenerateCallICDebugBreak(MacroAssembler* masm) {
- // Calling convention for IC call (from ic-arm.cc)
- // ----------- S t a t e -------------
- // -- r2 : name
- // -----------------------------------
- Generate_DebugBreakCallHelper(masm, r2.bit(), 0);
-}
-
-
-void Debug::GenerateReturnDebugBreak(MacroAssembler* masm) {
+void DebugCodegen::GenerateReturnDebugBreak(MacroAssembler* masm) {
// In places other than IC call sites it is expected that r0 is TOS which
// is an object - this is not generally the case so this should be used with
// care.
@@ -252,7 +227,7 @@ void Debug::GenerateReturnDebugBreak(MacroAssembler* masm) {
}
-void Debug::GenerateCallFunctionStubDebugBreak(MacroAssembler* masm) {
+void DebugCodegen::GenerateCallFunctionStubDebugBreak(MacroAssembler* masm) {
// Register state for CallFunctionStub (from code-stubs-arm.cc).
// ----------- S t a t e -------------
// -- r1 : function
@@ -261,17 +236,7 @@ void Debug::GenerateCallFunctionStubDebugBreak(MacroAssembler* masm) {
}
-void Debug::GenerateCallFunctionStubRecordDebugBreak(MacroAssembler* masm) {
- // Register state for CallFunctionStub (from code-stubs-arm.cc).
- // ----------- S t a t e -------------
- // -- r1 : function
- // -- r2 : cache cell for call target
- // -----------------------------------
- Generate_DebugBreakCallHelper(masm, r1.bit() | r2.bit(), 0);
-}
-
-
-void Debug::GenerateCallConstructStubDebugBreak(MacroAssembler* masm) {
+void DebugCodegen::GenerateCallConstructStubDebugBreak(MacroAssembler* masm) {
// Calling convention for CallConstructStub (from code-stubs-arm.cc)
// ----------- S t a t e -------------
// -- r0 : number of arguments (not smi)
@@ -281,18 +246,20 @@ void Debug::GenerateCallConstructStubDebugBreak(MacroAssembler* masm) {
}
-void Debug::GenerateCallConstructStubRecordDebugBreak(MacroAssembler* masm) {
+void DebugCodegen::GenerateCallConstructStubRecordDebugBreak(
+ MacroAssembler* masm) {
// Calling convention for CallConstructStub (from code-stubs-arm.cc)
// ----------- S t a t e -------------
// -- r0 : number of arguments (not smi)
// -- r1 : constructor function
- // -- r2 : cache cell for call target
+ // -- r2 : feedback array
+ // -- r3 : feedback slot (smi)
// -----------------------------------
- Generate_DebugBreakCallHelper(masm, r1.bit() | r2.bit(), r0.bit());
+ Generate_DebugBreakCallHelper(masm, r1.bit() | r2.bit() | r3.bit(), r0.bit());
}
-void Debug::GenerateSlot(MacroAssembler* masm) {
+void DebugCodegen::GenerateSlot(MacroAssembler* masm) {
// Generate enough nop's to make space for a call instruction. Avoid emitting
// the constant pool in the debug break slot code.
Assembler::BlockConstPoolScope block_const_pool(masm);
@@ -307,29 +274,26 @@ void Debug::GenerateSlot(MacroAssembler* masm) {
}
-void Debug::GenerateSlotDebugBreak(MacroAssembler* masm) {
+void DebugCodegen::GenerateSlotDebugBreak(MacroAssembler* masm) {
// In the places where a debug break slot is inserted no registers can contain
// object pointers.
Generate_DebugBreakCallHelper(masm, 0, 0);
}
-void Debug::GeneratePlainReturnLiveEdit(MacroAssembler* masm) {
+void DebugCodegen::GeneratePlainReturnLiveEdit(MacroAssembler* masm) {
masm->Abort(kLiveEditFrameDroppingIsNotSupportedOnArm);
}
-void Debug::GenerateFrameDropperLiveEdit(MacroAssembler* masm) {
+void DebugCodegen::GenerateFrameDropperLiveEdit(MacroAssembler* masm) {
masm->Abort(kLiveEditFrameDroppingIsNotSupportedOnArm);
}
-const bool Debug::kFrameDropperSupported = false;
-
-#undef __
-
+const bool LiveEdit::kFrameDropperSupported = false;
-#endif // ENABLE_DEBUGGER_SUPPORT
+#undef __
} } // namespace v8::internal
diff --git a/chromium/v8/src/arm/deoptimizer-arm.cc b/chromium/v8/src/arm/deoptimizer-arm.cc
index 6031499dbd1..128819630b6 100644
--- a/chromium/v8/src/arm/deoptimizer-arm.cc
+++ b/chromium/v8/src/arm/deoptimizer-arm.cc
@@ -1,36 +1,13 @@
// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#include "codegen.h"
-#include "deoptimizer.h"
-#include "full-codegen.h"
-#include "safepoint-table.h"
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#include "src/codegen.h"
+#include "src/deoptimizer.h"
+#include "src/full-codegen.h"
+#include "src/safepoint-table.h"
namespace v8 {
namespace internal {
@@ -50,13 +27,36 @@ void Deoptimizer::PatchCodeForDeoptimization(Isolate* isolate, Code* code) {
// code patching below, and is not needed any more.
code->InvalidateRelocation();
- // For each LLazyBailout instruction insert a call to the corresponding
- // deoptimization entry.
+ if (FLAG_zap_code_space) {
+ // Fail hard and early if we enter this code object again.
+ byte* pointer = code->FindCodeAgeSequence();
+ if (pointer != NULL) {
+ pointer += kNoCodeAgeSequenceLength;
+ } else {
+ pointer = code->instruction_start();
+ }
+ CodePatcher patcher(pointer, 1);
+ patcher.masm()->bkpt(0);
+
+ DeoptimizationInputData* data =
+ DeoptimizationInputData::cast(code->deoptimization_data());
+ int osr_offset = data->OsrPcOffset()->value();
+ if (osr_offset > 0) {
+ CodePatcher osr_patcher(code->instruction_start() + osr_offset, 1);
+ osr_patcher.masm()->bkpt(0);
+ }
+ }
+
DeoptimizationInputData* deopt_data =
DeoptimizationInputData::cast(code->deoptimization_data());
+ SharedFunctionInfo* shared =
+ SharedFunctionInfo::cast(deopt_data->SharedFunctionInfo());
+ shared->EvictFromOptimizedCodeMap(code, "deoptimized code");
#ifdef DEBUG
Address prev_call_address = NULL;
#endif
+ // For each LLazyBailout instruction insert a call to the corresponding
+ // deoptimization entry.
for (int i = 0; i < deopt_data->DeoptCount(); i++) {
if (deopt_data->Pc(i)->value() == -1) continue;
Address call_address = code_start_address + deopt_data->Pc(i)->value();
@@ -64,7 +64,8 @@ void Deoptimizer::PatchCodeForDeoptimization(Isolate* isolate, Code* code) {
// We need calls to have a predictable size in the unoptimized code, but
// this is optimized code, so we don't have to have a predictable size.
int call_size_in_bytes =
- MacroAssembler::CallSizeNotPredictableCodeSize(deopt_entry,
+ MacroAssembler::CallSizeNotPredictableCodeSize(isolate,
+ deopt_entry,
RelocInfo::NONE32);
int call_size_in_words = call_size_in_bytes / Assembler::kInstrSize;
ASSERT(call_size_in_bytes % Assembler::kInstrSize == 0);
@@ -127,11 +128,6 @@ bool Deoptimizer::HasAlignmentPadding(JSFunction* function) {
}
-Code* Deoptimizer::NotifyStubFailureBuiltin() {
- return isolate_->builtins()->builtin(Builtins::kNotifyStubFailureSaveDoubles);
-}
-
-
#define __ masm()->
// This code tries to be close to ia32 code so that any changes can be
@@ -350,6 +346,12 @@ void FrameDescription::SetCallerFp(unsigned offset, intptr_t value) {
}
+void FrameDescription::SetCallerConstantPool(unsigned offset, intptr_t value) {
+ ASSERT(FLAG_enable_ool_constant_pool);
+ SetFrameSlot(offset, value);
+}
+
+
#undef __
} } // namespace v8::internal
diff --git a/chromium/v8/src/arm/disasm-arm.cc b/chromium/v8/src/arm/disasm-arm.cc
index 49e4126b326..48f77b4bb27 100644
--- a/chromium/v8/src/arm/disasm-arm.cc
+++ b/chromium/v8/src/arm/disasm-arm.cc
@@ -1,29 +1,6 @@
// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
// A Disassembler object is used to disassemble a block of code instruction by
// instruction. The default implementation of the NameConverter object can be
@@ -51,14 +28,14 @@
#include <stdarg.h>
#include <string.h>
-#include "v8.h"
+#include "src/v8.h"
#if V8_TARGET_ARCH_ARM
-#include "constants-arm.h"
-#include "disasm.h"
-#include "macro-assembler.h"
-#include "platform.h"
+#include "src/arm/constants-arm.h"
+#include "src/disasm.h"
+#include "src/macro-assembler.h"
+#include "src/platform.h"
namespace v8 {
@@ -230,15 +207,15 @@ void Decoder::PrintShiftRm(Instruction* instr) {
} else if (((shift == LSR) || (shift == ASR)) && (shift_amount == 0)) {
shift_amount = 32;
}
- out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_,
- ", %s #%d",
- shift_names[shift_index],
- shift_amount);
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
+ ", %s #%d",
+ shift_names[shift_index],
+ shift_amount);
} else {
// by register
int rs = instr->RsValue();
- out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_,
- ", %s ", shift_names[shift_index]);
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
+ ", %s ", shift_names[shift_index]);
PrintRegister(rs);
}
}
@@ -250,8 +227,7 @@ void Decoder::PrintShiftImm(Instruction* instr) {
int rotate = instr->RotateValue() * 2;
int immed8 = instr->Immed8Value();
int imm = (immed8 >> rotate) | (immed8 << (32 - rotate));
- out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_,
- "#%d", imm);
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "#%d", imm);
}
@@ -259,10 +235,10 @@ void Decoder::PrintShiftImm(Instruction* instr) {
void Decoder::PrintShiftSat(Instruction* instr) {
int shift = instr->Bits(11, 7);
if (shift > 0) {
- out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_,
- ", %s #%d",
- shift_names[instr->Bit(6) * 2],
- instr->Bits(11, 7));
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
+ ", %s #%d",
+ shift_names[instr->Bit(6) * 2],
+ instr->Bits(11, 7));
}
}
@@ -306,14 +282,14 @@ void Decoder::PrintSoftwareInterrupt(SoftwareInterruptCodes svc) {
return;
default:
if (svc >= kStopCode) {
- out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_,
- "%d - 0x%x",
- svc & kStopCodeMask,
- svc & kStopCodeMask);
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
+ "%d - 0x%x",
+ svc & kStopCodeMask,
+ svc & kStopCodeMask);
} else {
- out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_,
- "%d",
- svc);
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
+ "%d",
+ svc);
}
return;
}
@@ -422,35 +398,35 @@ int Decoder::FormatVFPinstruction(Instruction* instr, const char* format) {
void Decoder::FormatNeonList(int Vd, int type) {
if (type == nlt_1) {
- out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_,
- "{d%d}", Vd);
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
+ "{d%d}", Vd);
} else if (type == nlt_2) {
- out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_,
- "{d%d, d%d}", Vd, Vd + 1);
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
+ "{d%d, d%d}", Vd, Vd + 1);
} else if (type == nlt_3) {
- out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_,
- "{d%d, d%d, d%d}", Vd, Vd + 1, Vd + 2);
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
+ "{d%d, d%d, d%d}", Vd, Vd + 1, Vd + 2);
} else if (type == nlt_4) {
- out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_,
- "{d%d, d%d, d%d, d%d}", Vd, Vd + 1, Vd + 2, Vd + 3);
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
+ "{d%d, d%d, d%d, d%d}", Vd, Vd + 1, Vd + 2, Vd + 3);
}
}
void Decoder::FormatNeonMemory(int Rn, int align, int Rm) {
- out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_,
- "[r%d", Rn);
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
+ "[r%d", Rn);
if (align != 0) {
- out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_,
- ":%d", (1 << align) << 6);
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
+ ":%d", (1 << align) << 6);
}
if (Rm == 15) {
Print("]");
} else if (Rm == 13) {
Print("]!");
} else {
- out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_,
- "], r%d", Rm);
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
+ "], r%d", Rm);
}
}
@@ -460,8 +436,7 @@ void Decoder::PrintMovwMovt(Instruction* instr) {
int imm = instr->ImmedMovwMovtValue();
int rd = instr->RdValue();
PrintRegister(rd);
- out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_,
- ", #%d", imm);
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, ", #%d", imm);
}
@@ -493,8 +468,7 @@ int Decoder::FormatOption(Instruction* instr, const char* format) {
}
case 'd': { // 'd: vmov double immediate.
double d = instr->DoubleImmedVmov();
- out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_,
- "#%g", d);
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "#%g", d);
return 1;
}
case 'f': { // 'f: bitfield instructions - v7 and above.
@@ -507,8 +481,8 @@ int Decoder::FormatOption(Instruction* instr, const char* format) {
ASSERT(width > 0);
}
ASSERT((width + lsbit) <= 32);
- out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_,
- "#%d, #%d", lsbit, width);
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
+ "#%d, #%d", lsbit, width);
return 1;
}
case 'h': { // 'h: halfword operation for extra loads and stores
@@ -528,9 +502,9 @@ int Decoder::FormatOption(Instruction* instr, const char* format) {
ASSERT((lsb >= 0) && (lsb <= 31));
ASSERT((width + lsb) <= 32);
- out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_,
- "%d",
- instr->Bits(width + lsb - 1, lsb));
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
+ "%d",
+ instr->Bits(width + lsb - 1, lsb));
return 8;
}
case 'l': { // 'l: branch and link
@@ -567,31 +541,30 @@ int Decoder::FormatOption(Instruction* instr, const char* format) {
ASSERT(STRING_STARTS_WITH(format, "msg"));
byte* str =
reinterpret_cast<byte*>(instr->InstructionBits() & 0x0fffffff);
- out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_,
- "%s", converter_.NameInCode(str));
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
+ "%s", converter_.NameInCode(str));
return 3;
}
case 'o': {
if ((format[3] == '1') && (format[4] == '2')) {
// 'off12: 12-bit offset for load and store instructions
ASSERT(STRING_STARTS_WITH(format, "off12"));
- out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_,
- "%d", instr->Offset12Value());
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
+ "%d", instr->Offset12Value());
return 5;
} else if (format[3] == '0') {
// 'off0to3and8to19 16-bit immediate encoded in bits 19-8 and 3-0.
ASSERT(STRING_STARTS_WITH(format, "off0to3and8to19"));
- out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_,
- "%d",
- (instr->Bits(19, 8) << 4) +
- instr->Bits(3, 0));
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
+ "%d",
+ (instr->Bits(19, 8) << 4) +
+ instr->Bits(3, 0));
return 15;
}
// 'off8: 8-bit offset for extra load and store instructions
ASSERT(STRING_STARTS_WITH(format, "off8"));
int offs8 = (instr->ImmedHValue() << 4) | instr->ImmedLValue();
- out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_,
- "%d", offs8);
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "%d", offs8);
return 4;
}
case 'p': { // 'pu: P and U bits for load and store instructions
@@ -642,11 +615,11 @@ int Decoder::FormatOption(Instruction* instr, const char* format) {
case 't': { // 'target: target of branch instructions
ASSERT(STRING_STARTS_WITH(format, "target"));
int off = (instr->SImmed24Value() << 2) + 8;
- out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_,
- "%+d -> %s",
- off,
- converter_.NameOfAddress(
- reinterpret_cast<byte*>(instr) + off));
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
+ "%+d -> %s",
+ off,
+ converter_.NameOfAddress(
+ reinterpret_cast<byte*>(instr) + off));
return 6;
}
case 'u': { // 'u: signed or unsigned multiplies
@@ -1061,7 +1034,7 @@ void Decoder::DecodeType3(Instruction* instr) {
if (instr->Bits(19, 16) == 0xF) {
switch (instr->Bits(11, 10)) {
case 0:
- Format(instr, "uxtb16'cond 'rd, 'rm, ror #0");
+ Format(instr, "uxtb16'cond 'rd, 'rm");
break;
case 1:
Format(instr, "uxtb16'cond 'rd, 'rm, ror #8");
@@ -1085,7 +1058,7 @@ void Decoder::DecodeType3(Instruction* instr) {
if (instr->Bits(19, 16) == 0xF) {
switch (instr->Bits(11, 10)) {
case 0:
- Format(instr, "uxtb'cond 'rd, 'rm, ror #0");
+ Format(instr, "uxtb'cond 'rd, 'rm");
break;
case 1:
Format(instr, "uxtb'cond 'rd, 'rm, ror #8");
@@ -1100,7 +1073,7 @@ void Decoder::DecodeType3(Instruction* instr) {
} else {
switch (instr->Bits(11, 10)) {
case 0:
- Format(instr, "uxtab'cond 'rd, 'rn, 'rm, ror #0");
+ Format(instr, "uxtab'cond 'rd, 'rn, 'rm");
break;
case 1:
Format(instr, "uxtab'cond 'rd, 'rn, 'rm, ror #8");
@@ -1207,14 +1180,14 @@ int Decoder::DecodeType7(Instruction* instr) {
Format(instr, "stop'cond 'svc");
// Also print the stop message. Its address is encoded
// in the following 4 bytes.
- out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_,
- "\n %p %08x stop message: %s",
- reinterpret_cast<int32_t*>(instr
- + Instruction::kInstrSize),
- *reinterpret_cast<char**>(instr
- + Instruction::kInstrSize),
- *reinterpret_cast<char**>(instr
- + Instruction::kInstrSize));
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
+ "\n %p %08x stop message: %s",
+ reinterpret_cast<void*>(instr
+ + Instruction::kInstrSize),
+ *reinterpret_cast<uint32_t*>(instr
+ + Instruction::kInstrSize),
+ *reinterpret_cast<char**>(instr
+ + Instruction::kInstrSize));
// We have decoded 2 * Instruction::kInstrSize bytes.
return 2 * Instruction::kInstrSize;
} else {
@@ -1272,10 +1245,10 @@ void Decoder::DecodeTypeVFP(Instruction* instr) {
} else if ((instr->Opc2Value() == 0xA) && (instr->Opc3Value() == 0x3) &&
(instr->Bit(8) == 1)) {
// vcvt.f64.s32 Dd, Dd, #<fbits>
- int fraction_bits = 32 - ((instr->Bit(5) << 4) | instr->Bits(3, 0));
+ int fraction_bits = 32 - ((instr->Bits(3, 0) << 1) | instr->Bit(5));
Format(instr, "vcvt'cond.f64.s32 'Dd, 'Dd");
- out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_,
- ", #%d", fraction_bits);
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
+ ", #%d", fraction_bits);
} else if (((instr->Opc2Value() >> 1) == 0x6) &&
(instr->Opc3Value() & 0x1)) {
DecodeVCVTBetweenFloatingPointAndInteger(instr);
@@ -1566,11 +1539,12 @@ void Decoder::DecodeSpecialCondition(Instruction* instr) {
if ((instr->Bits(18, 16) == 0) && (instr->Bits(11, 6) == 0x28) &&
(instr->Bit(4) == 1)) {
// vmovl signed
- int Vd = (instr->Bit(22) << 4) | instr->VdValue();
+ if ((instr->VdValue() & 1) != 0) Unknown(instr);
+ int Vd = (instr->Bit(22) << 3) | (instr->VdValue() >> 1);
int Vm = (instr->Bit(5) << 4) | instr->VmValue();
int imm3 = instr->Bits(21, 19);
- out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_,
- "vmovl.s%d q%d, d%d", imm3*8, Vd, Vm);
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
+ "vmovl.s%d q%d, d%d", imm3*8, Vd, Vm);
} else {
Unknown(instr);
}
@@ -1579,11 +1553,12 @@ void Decoder::DecodeSpecialCondition(Instruction* instr) {
if ((instr->Bits(18, 16) == 0) && (instr->Bits(11, 6) == 0x28) &&
(instr->Bit(4) == 1)) {
// vmovl unsigned
- int Vd = (instr->Bit(22) << 4) | instr->VdValue();
+ if ((instr->VdValue() & 1) != 0) Unknown(instr);
+ int Vd = (instr->Bit(22) << 3) | (instr->VdValue() >> 1);
int Vm = (instr->Bit(5) << 4) | instr->VmValue();
int imm3 = instr->Bits(21, 19);
- out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_,
- "vmovl.u%d q%d, d%d", imm3*8, Vd, Vm);
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
+ "vmovl.u%d q%d, d%d", imm3*8, Vd, Vm);
} else {
Unknown(instr);
}
@@ -1597,8 +1572,8 @@ void Decoder::DecodeSpecialCondition(Instruction* instr) {
int size = instr->Bits(7, 6);
int align = instr->Bits(5, 4);
int Rm = instr->VmValue();
- out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_,
- "vst1.%d ", (1 << size) << 3);
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
+ "vst1.%d ", (1 << size) << 3);
FormatNeonList(Vd, type);
Print(", ");
FormatNeonMemory(Rn, align, Rm);
@@ -1610,8 +1585,8 @@ void Decoder::DecodeSpecialCondition(Instruction* instr) {
int size = instr->Bits(7, 6);
int align = instr->Bits(5, 4);
int Rm = instr->VmValue();
- out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_,
- "vld1.%d ", (1 << size) << 3);
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
+ "vld1.%d ", (1 << size) << 3);
FormatNeonList(Vd, type);
Print(", ");
FormatNeonMemory(Rn, align, Rm);
@@ -1625,14 +1600,14 @@ void Decoder::DecodeSpecialCondition(Instruction* instr) {
int Rn = instr->Bits(19, 16);
int offset = instr->Bits(11, 0);
if (offset == 0) {
- out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_,
- "pld [r%d]", Rn);
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
+ "pld [r%d]", Rn);
} else if (instr->Bit(23) == 0) {
- out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_,
- "pld [r%d, #-%d]", Rn, offset);
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
+ "pld [r%d, #-%d]", Rn, offset);
} else {
- out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_,
- "pld [r%d, #+%d]", Rn, offset);
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
+ "pld [r%d, #+%d]", Rn, offset);
}
} else {
Unknown(instr);
@@ -1666,26 +1641,26 @@ int Decoder::ConstantPoolSizeAt(byte* instr_ptr) {
int Decoder::InstructionDecode(byte* instr_ptr) {
Instruction* instr = Instruction::At(instr_ptr);
// Print raw instruction bytes.
- out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_,
- "%08x ",
- instr->InstructionBits());
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
+ "%08x ",
+ instr->InstructionBits());
if (instr->ConditionField() == kSpecialCondition) {
DecodeSpecialCondition(instr);
return Instruction::kInstrSize;
}
int instruction_bits = *(reinterpret_cast<int*>(instr_ptr));
if ((instruction_bits & kConstantPoolMarkerMask) == kConstantPoolMarker) {
- out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_,
- "constant pool begin (length %d)",
- DecodeConstantPoolLength(instruction_bits));
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
+ "constant pool begin (length %d)",
+ DecodeConstantPoolLength(instruction_bits));
return Instruction::kInstrSize;
} else if (instruction_bits == kCodeAgeJumpInstruction) {
// The code age prologue has a constant immediatly following the jump
// instruction.
Instruction* target = Instruction::At(instr_ptr + Instruction::kInstrSize);
DecodeType2(instr);
- OS::SNPrintF(out_buffer_ + out_buffer_pos_,
- " (0x%08x)", target->InstructionBits());
+ SNPrintF(out_buffer_ + out_buffer_pos_,
+ " (0x%08x)", target->InstructionBits());
return 2 * Instruction::kInstrSize;
}
switch (instr->TypeValue()) {
@@ -1737,7 +1712,7 @@ namespace disasm {
const char* NameConverter::NameOfAddress(byte* addr) const {
- v8::internal::OS::SNPrintF(tmp_buffer_, "%p", addr);
+ v8::internal::SNPrintF(tmp_buffer_, "%p", addr);
return tmp_buffer_.start();
}
diff --git a/chromium/v8/src/arm/frames-arm.cc b/chromium/v8/src/arm/frames-arm.cc
index b2071807d25..6051e021ab6 100644
--- a/chromium/v8/src/arm/frames-arm.cc
+++ b/chromium/v8/src/arm/frames-arm.cc
@@ -1,40 +1,17 @@
// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
#if V8_TARGET_ARCH_ARM
-#include "assembler.h"
-#include "assembler-arm.h"
-#include "assembler-arm-inl.h"
-#include "frames.h"
-#include "macro-assembler.h"
-#include "macro-assembler-arm.h"
+#include "src/assembler.h"
+#include "src/arm/assembler-arm.h"
+#include "src/arm/assembler-arm-inl.h"
+#include "src/frames.h"
+#include "src/macro-assembler.h"
+#include "src/arm/macro-assembler-arm.h"
namespace v8 {
namespace internal {
@@ -42,10 +19,25 @@ namespace internal {
Register JavaScriptFrame::fp_register() { return v8::internal::fp; }
Register JavaScriptFrame::context_register() { return cp; }
+Register JavaScriptFrame::constant_pool_pointer_register() {
+ ASSERT(FLAG_enable_ool_constant_pool);
+ return pp;
+}
Register StubFailureTrampolineFrame::fp_register() { return v8::internal::fp; }
Register StubFailureTrampolineFrame::context_register() { return cp; }
+Register StubFailureTrampolineFrame::constant_pool_pointer_register() {
+ ASSERT(FLAG_enable_ool_constant_pool);
+ return pp;
+}
+
+
+Object*& ExitFrame::constant_pool_slot() const {
+ ASSERT(FLAG_enable_ool_constant_pool);
+ const int offset = ExitFrameConstants::kConstantPoolOffset;
+ return Memory::Object_at(fp() + offset);
+}
} } // namespace v8::internal
diff --git a/chromium/v8/src/arm/frames-arm.h b/chromium/v8/src/arm/frames-arm.h
index e6ecda1fb53..ce65e887f84 100644
--- a/chromium/v8/src/arm/frames-arm.h
+++ b/chromium/v8/src/arm/frames-arm.h
@@ -1,29 +1,6 @@
// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
#ifndef V8_ARM_FRAMES_ARM_H_
#define V8_ARM_FRAMES_ARM_H_
@@ -52,8 +29,6 @@ const RegList kJSCallerSaved =
const int kNumJSCallerSaved = 4;
-typedef Object* JSCallerSavedBuffer[kNumJSCallerSaved];
-
// Return the code of the n-th caller-saved register available to JavaScript
// e.g. JSCallerSavedReg(0) returns r0.code() == 0
int JSCallerSavedCode(int n);
@@ -109,8 +84,13 @@ class EntryFrameConstants : public AllStatic {
class ExitFrameConstants : public AllStatic {
public:
- static const int kCodeOffset = -2 * kPointerSize;
- static const int kSPOffset = -1 * kPointerSize;
+ static const int kFrameSize = FLAG_enable_ool_constant_pool ?
+ 3 * kPointerSize : 2 * kPointerSize;
+
+ static const int kConstantPoolOffset = FLAG_enable_ool_constant_pool ?
+ -3 * kPointerSize : 0;
+ static const int kCodeOffset = -2 * kPointerSize;
+ static const int kSPOffset = -1 * kPointerSize;
// The caller fields are below the frame pointer on the stack.
static const int kCallerFPOffset = 0 * kPointerSize;
diff --git a/chromium/v8/src/arm/full-codegen-arm.cc b/chromium/v8/src/arm/full-codegen-arm.cc
index 55088033f04..8b079eddd95 100644
--- a/chromium/v8/src/arm/full-codegen-arm.cc
+++ b/chromium/v8/src/arm/full-codegen-arm.cc
@@ -1,46 +1,23 @@
// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
#if V8_TARGET_ARCH_ARM
-#include "code-stubs.h"
-#include "codegen.h"
-#include "compiler.h"
-#include "debug.h"
-#include "full-codegen.h"
-#include "isolate-inl.h"
-#include "parser.h"
-#include "scopes.h"
-#include "stub-cache.h"
+#include "src/code-stubs.h"
+#include "src/codegen.h"
+#include "src/compiler.h"
+#include "src/debug.h"
+#include "src/full-codegen.h"
+#include "src/isolate-inl.h"
+#include "src/parser.h"
+#include "src/scopes.h"
+#include "src/stub-cache.h"
-#include "arm/code-stubs-arm.h"
-#include "arm/macro-assembler-arm.h"
+#include "src/arm/code-stubs-arm.h"
+#include "src/arm/macro-assembler-arm.h"
namespace v8 {
namespace internal {
@@ -119,6 +96,7 @@ class JumpPatchSite BASE_EMBEDDED {
// The live registers are:
// o r1: the JS function object being called (i.e., ourselves)
// o cp: our context
+// o pp: our caller's constant pool pointer (if FLAG_enable_ool_constant_pool)
// o fp: our caller's frame pointer
// o sp: stack pointer
// o lr: return address
@@ -129,6 +107,7 @@ void FullCodeGenerator::Generate() {
CompilationInfo* info = info_;
handler_table_ =
isolate()->factory()->NewFixedArray(function()->handler_count(), TENURED);
+
profiling_counter_ = isolate()->factory()->NewCell(
Handle<Smi>(Smi::FromInt(FLAG_interrupt_budget), isolate()));
SetFunctionPosition(function());
@@ -143,15 +122,22 @@ void FullCodeGenerator::Generate() {
}
#endif
- // Strict mode functions and builtins need to replace the receiver
- // with undefined when called as functions (without an explicit
- // receiver object). r5 is zero for method calls and non-zero for
- // function calls.
- if (!info->is_classic_mode() || info->is_native()) {
- __ cmp(r5, Operand::Zero());
+ // Sloppy mode functions and builtins need to replace the receiver with the
+ // global proxy when called as functions (without an explicit receiver
+ // object).
+ if (info->strict_mode() == SLOPPY && !info->is_native()) {
+ Label ok;
int receiver_offset = info->scope()->num_parameters() * kPointerSize;
- __ LoadRoot(r2, Heap::kUndefinedValueRootIndex);
- __ str(r2, MemOperand(sp, receiver_offset), ne);
+ __ ldr(r2, MemOperand(sp, receiver_offset));
+ __ CompareRoot(r2, Heap::kUndefinedValueRootIndex);
+ __ b(ne, &ok);
+
+ __ ldr(r2, GlobalObjectOperand());
+ __ ldr(r2, FieldMemOperand(r2, GlobalObject::kGlobalReceiverOffset));
+
+ __ str(r2, MemOperand(sp, receiver_offset));
+
+ __ bind(&ok);
}
// Open a frame scope to indicate that there is a frame on the stack. The
@@ -160,7 +146,7 @@ void FullCodeGenerator::Generate() {
FrameScope frame_scope(masm_, StackFrame::MANUAL);
info->set_prologue_offset(masm_->pc_offset());
- __ Prologue(BUILD_FUNCTION_FRAME);
+ __ Prologue(info->IsCodePreAgingActive());
info->AddNoFrameRange(0, masm_->pc_offset());
{ Comment cmnt(masm_, "[ Allocate locals");
@@ -168,20 +154,34 @@ void FullCodeGenerator::Generate() {
// Generators allocate locals, if any, in context slots.
ASSERT(!info->function()->is_generator() || locals_count == 0);
if (locals_count > 0) {
- // Emit a loop to initialize stack cells for locals when optimizing for
- // size. Otherwise, unroll the loop for maximum performance.
+ if (locals_count >= 128) {
+ Label ok;
+ __ sub(r9, sp, Operand(locals_count * kPointerSize));
+ __ LoadRoot(r2, Heap::kRealStackLimitRootIndex);
+ __ cmp(r9, Operand(r2));
+ __ b(hs, &ok);
+ __ InvokeBuiltin(Builtins::STACK_OVERFLOW, CALL_FUNCTION);
+ __ bind(&ok);
+ }
__ LoadRoot(r9, Heap::kUndefinedValueRootIndex);
- if (FLAG_optimize_for_size && locals_count > 4) {
- Label loop;
- __ mov(r2, Operand(locals_count));
- __ bind(&loop);
- __ sub(r2, r2, Operand(1), SetCC);
- __ push(r9);
- __ b(&loop, ne);
- } else {
- for (int i = 0; i < locals_count; i++) {
+ int kMaxPushes = FLAG_optimize_for_size ? 4 : 32;
+ if (locals_count >= kMaxPushes) {
+ int loop_iterations = locals_count / kMaxPushes;
+ __ mov(r2, Operand(loop_iterations));
+ Label loop_header;
+ __ bind(&loop_header);
+ // Do pushes.
+ for (int i = 0; i < kMaxPushes; i++) {
__ push(r9);
}
+ // Continue loop if not done.
+ __ sub(r2, r2, Operand(1), SetCC);
+ __ b(&loop_header, ne);
+ }
+ int remaining = locals_count % kMaxPushes;
+ // Emit the remaining pushes.
+ for (int i = 0; i < remaining; i++) {
+ __ push(r9);
}
}
}
@@ -193,20 +193,25 @@ void FullCodeGenerator::Generate() {
if (heap_slots > 0) {
// Argument to NewContext is the function, which is still in r1.
Comment cmnt(masm_, "[ Allocate context");
- __ push(r1);
+ bool need_write_barrier = true;
if (FLAG_harmony_scoping && info->scope()->is_global_scope()) {
+ __ push(r1);
__ Push(info->scope()->GetScopeInfo());
- __ CallRuntime(Runtime::kNewGlobalContext, 2);
+ __ CallRuntime(Runtime::kHiddenNewGlobalContext, 2);
} else if (heap_slots <= FastNewContextStub::kMaximumSlots) {
- FastNewContextStub stub(heap_slots);
+ FastNewContextStub stub(isolate(), heap_slots);
__ CallStub(&stub);
+ // Result of FastNewContextStub is always in new space.
+ need_write_barrier = false;
} else {
- __ CallRuntime(Runtime::kNewFunctionContext, 1);
+ __ push(r1);
+ __ CallRuntime(Runtime::kHiddenNewFunctionContext, 1);
}
function_in_register = false;
- // Context is returned in both r0 and cp. It replaces the context
- // passed to us. It's saved in the stack and kept live in cp.
- __ str(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ // Context is returned in r0. It replaces the context passed to us.
+ // It's saved in the stack and kept live in cp.
+ __ mov(cp, r0);
+ __ str(r0, MemOperand(fp, StandardFrameConstants::kContextOffset));
// Copy any necessary parameters into the context.
int num_parameters = info->scope()->num_parameters();
for (int i = 0; i < num_parameters; i++) {
@@ -221,8 +226,15 @@ void FullCodeGenerator::Generate() {
__ str(r0, target);
// Update the write barrier.
- __ RecordWriteContextSlot(
- cp, target.offset(), r0, r3, kLRHasBeenSaved, kDontSaveFPRegs);
+ if (need_write_barrier) {
+ __ RecordWriteContextSlot(
+ cp, target.offset(), r0, r3, kLRHasBeenSaved, kDontSaveFPRegs);
+ } else if (FLAG_debug_code) {
+ Label done;
+ __ JumpIfInNewSpace(cp, r0, &done);
+ __ Abort(kExpectedNewSpaceObject);
+ __ bind(&done);
+ }
}
}
}
@@ -250,14 +262,14 @@ void FullCodeGenerator::Generate() {
// The stub will rewrite receiever and parameter count if the previous
// stack frame was an arguments adapter frame.
ArgumentsAccessStub::Type type;
- if (!is_classic_mode()) {
+ if (strict_mode() == STRICT) {
type = ArgumentsAccessStub::NEW_STRICT;
} else if (function()->has_duplicate_parameters()) {
- type = ArgumentsAccessStub::NEW_NON_STRICT_SLOW;
+ type = ArgumentsAccessStub::NEW_SLOPPY_SLOW;
} else {
- type = ArgumentsAccessStub::NEW_NON_STRICT_FAST;
+ type = ArgumentsAccessStub::NEW_SLOPPY_FAST;
}
- ArgumentsAccessStub stub(type);
+ ArgumentsAccessStub stub(isolate(), type);
__ CallStub(&stub);
SetVar(arguments, r0, r1, r2);
@@ -281,7 +293,7 @@ void FullCodeGenerator::Generate() {
if (scope()->is_function_scope() && scope()->function() != NULL) {
VariableDeclaration* function = scope()->function();
ASSERT(function->proxy()->var()->mode() == CONST ||
- function->proxy()->var()->mode() == CONST_HARMONY);
+ function->proxy()->var()->mode() == CONST_LEGACY);
ASSERT(function->proxy()->var()->location() != Variable::UNALLOCATED);
VisitVariableDeclaration(function);
}
@@ -294,8 +306,10 @@ void FullCodeGenerator::Generate() {
__ LoadRoot(ip, Heap::kStackLimitRootIndex);
__ cmp(sp, Operand(ip));
__ b(hs, &ok);
- PredictableCodeSizeScope predictable(masm_, 2 * Assembler::kInstrSize);
- __ Call(isolate()->builtins()->StackCheck(), RelocInfo::CODE_TARGET);
+ Handle<Code> stack_check = isolate()->builtins()->StackCheck();
+ PredictableCodeSizeScope predictable(masm_,
+ masm_->CallSize(stack_check, RelocInfo::CODE_TARGET));
+ __ Call(stack_check, RelocInfo::CODE_TARGET);
__ bind(&ok);
}
@@ -334,11 +348,7 @@ void FullCodeGenerator::EmitProfilingCounterDecrement(int delta) {
void FullCodeGenerator::EmitProfilingCounterReset() {
int reset_value = FLAG_interrupt_budget;
- if (info_->ShouldSelfOptimize() && !FLAG_retry_self_opt) {
- // Self-optimization is a one-off thing: if it fails, don't try again.
- reset_value = Smi::kMaxValue;
- }
- if (isolate()->IsDebuggerActive()) {
+ if (info_->is_debug()) {
// Detect debug break requests as soon as possible.
reset_value = FLAG_interrupt_budget >> 4;
}
@@ -355,13 +365,10 @@ void FullCodeGenerator::EmitBackEdgeBookkeeping(IterationStatement* stmt,
Assembler::BlockConstPoolScope block_const_pool(masm_);
Label ok;
- int weight = 1;
- if (FLAG_weighted_back_edges) {
- ASSERT(back_edge_target->is_bound());
- int distance = masm_->SizeOfCodeGeneratedSince(back_edge_target);
- weight = Min(kMaxBackEdgeWeight,
- Max(1, distance / kCodeSizeMultiplier));
- }
+ ASSERT(back_edge_target->is_bound());
+ int distance = masm_->SizeOfCodeGeneratedSince(back_edge_target);
+ int weight = Min(kMaxBackEdgeWeight,
+ Max(1, distance / kCodeSizeMultiplier));
EmitProfilingCounterDecrement(weight);
__ b(pl, &ok);
__ Call(isolate()->builtins()->InterruptCheck(), RelocInfo::CODE_TARGET);
@@ -394,53 +401,41 @@ void FullCodeGenerator::EmitReturnSequence() {
__ push(r0);
__ CallRuntime(Runtime::kTraceExit, 1);
}
- if (FLAG_interrupt_at_exit || FLAG_self_optimization) {
- // Pretend that the exit is a backwards jump to the entry.
- int weight = 1;
- if (info_->ShouldSelfOptimize()) {
- weight = FLAG_interrupt_budget / FLAG_self_opt_count;
- } else if (FLAG_weighted_back_edges) {
- int distance = masm_->pc_offset();
- weight = Min(kMaxBackEdgeWeight,
- Max(1, distance / kCodeSizeMultiplier));
- }
- EmitProfilingCounterDecrement(weight);
- Label ok;
- __ b(pl, &ok);
- __ push(r0);
- if (info_->ShouldSelfOptimize() && FLAG_direct_self_opt) {
- __ ldr(r2, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
- __ push(r2);
- __ CallRuntime(Runtime::kOptimizeFunctionOnNextCall, 1);
- } else {
- __ Call(isolate()->builtins()->InterruptCheck(),
- RelocInfo::CODE_TARGET);
- }
- __ pop(r0);
- EmitProfilingCounterReset();
- __ bind(&ok);
+ // Pretend that the exit is a backwards jump to the entry.
+ int weight = 1;
+ if (info_->ShouldSelfOptimize()) {
+ weight = FLAG_interrupt_budget / FLAG_self_opt_count;
+ } else {
+ int distance = masm_->pc_offset();
+ weight = Min(kMaxBackEdgeWeight,
+ Max(1, distance / kCodeSizeMultiplier));
}
+ EmitProfilingCounterDecrement(weight);
+ Label ok;
+ __ b(pl, &ok);
+ __ push(r0);
+ __ Call(isolate()->builtins()->InterruptCheck(),
+ RelocInfo::CODE_TARGET);
+ __ pop(r0);
+ EmitProfilingCounterReset();
+ __ bind(&ok);
#ifdef DEBUG
// Add a label for checking the size of the code used for returning.
Label check_exit_codesize;
- masm_->bind(&check_exit_codesize);
+ __ bind(&check_exit_codesize);
#endif
// Make sure that the constant pool is not emitted inside of the return
// sequence.
{ Assembler::BlockConstPoolScope block_const_pool(masm_);
- // Here we use masm_-> instead of the __ macro to avoid the code coverage
- // tool from instrumenting as we rely on the code size here.
int32_t sp_delta = (info_->scope()->num_parameters() + 1) * kPointerSize;
CodeGenerator::RecordPositions(masm_, function()->end_position() - 1);
// TODO(svenpanne) The code below is sometimes 4 words, sometimes 5!
PredictableCodeSizeScope predictable(masm_, -1);
__ RecordJSReturn();
- masm_->mov(sp, fp);
- int no_frame_start = masm_->pc_offset();
- masm_->ldm(ia_w, sp, fp.bit() | lr.bit());
- masm_->add(sp, sp, Operand(sp_delta));
- masm_->Jump(lr);
+ int no_frame_start = __ LeaveFrame(StackFrame::JAVA_SCRIPT);
+ __ add(sp, sp, Operand(sp_delta));
+ __ Jump(lr);
info_->AddNoFrameRange(no_frame_start, masm_->pc_offset());
}
@@ -676,7 +671,7 @@ void FullCodeGenerator::DoTest(Expression* condition,
Label* if_false,
Label* fall_through) {
Handle<Code> ic = ToBooleanStub::GetUninitialized(isolate());
- CallIC(ic, RelocInfo::CODE_TARGET, condition->test_id());
+ CallIC(ic, condition->test_id());
__ tst(result_register(), result_register());
Split(ne, if_true, if_false, fall_through);
}
@@ -797,7 +792,7 @@ void FullCodeGenerator::VisitVariableDeclaration(
VariableProxy* proxy = declaration->proxy();
VariableMode mode = declaration->mode();
Variable* variable = proxy->var();
- bool hole_init = mode == CONST || mode == CONST_HARMONY || mode == LET;
+ bool hole_init = mode == LET || mode == CONST || mode == CONST_LEGACY;
switch (variable->location()) {
case Variable::UNALLOCATED:
globals_->Add(variable->name(), zone());
@@ -846,7 +841,7 @@ void FullCodeGenerator::VisitVariableDeclaration(
__ mov(r0, Operand(Smi::FromInt(0))); // Indicates no initial value.
__ Push(cp, r2, r1, r0);
}
- __ CallRuntime(Runtime::kDeclareContextSlot, 4);
+ __ CallRuntime(Runtime::kHiddenDeclareContextSlot, 4);
break;
}
}
@@ -902,7 +897,7 @@ void FullCodeGenerator::VisitFunctionDeclaration(
__ Push(cp, r2, r1);
// Push initial value for function declaration.
VisitForStackValue(declaration->fun());
- __ CallRuntime(Runtime::kDeclareContextSlot, 4);
+ __ CallRuntime(Runtime::kHiddenDeclareContextSlot, 4);
break;
}
}
@@ -974,7 +969,7 @@ void FullCodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) {
__ mov(r1, Operand(pairs));
__ mov(r0, Operand(Smi::FromInt(DeclareGlobalsFlags())));
__ Push(cp, r1, r0);
- __ CallRuntime(Runtime::kDeclareGlobals, 3);
+ __ CallRuntime(Runtime::kHiddenDeclareGlobals, 3);
// Return value is ignored.
}
@@ -982,7 +977,7 @@ void FullCodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) {
void FullCodeGenerator::DeclareModules(Handle<FixedArray> descriptions) {
// Call the runtime to declare the modules.
__ Push(descriptions);
- __ CallRuntime(Runtime::kDeclareModules, 1);
+ __ CallRuntime(Runtime::kHiddenDeclareModules, 1);
// Return value is ignored.
}
@@ -1037,9 +1032,19 @@ void FullCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) {
// Record position before stub call for type feedback.
SetSourcePosition(clause->position());
Handle<Code> ic = CompareIC::GetUninitialized(isolate(), Token::EQ_STRICT);
- CallIC(ic, RelocInfo::CODE_TARGET, clause->CompareId());
+ CallIC(ic, clause->CompareId());
patch_site.EmitPatchInfo();
+ Label skip;
+ __ b(&skip);
+ PrepareForBailout(clause, TOS_REG);
+ __ LoadRoot(ip, Heap::kTrueValueRootIndex);
+ __ cmp(r0, ip);
+ __ b(ne, &next_test);
+ __ Drop(1);
+ __ jmp(clause->body_target());
+ __ bind(&skip);
+
__ cmp(r0, Operand::Zero());
__ b(ne, &next_test);
__ Drop(1); // Switch value is no longer needed.
@@ -1072,6 +1077,7 @@ void FullCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) {
void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
Comment cmnt(masm_, "[ ForInStatement");
+ int slot = stmt->ForInFeedbackSlot();
SetStatementPosition(stmt);
Label loop, exit;
@@ -1161,13 +1167,9 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
Label non_proxy;
__ bind(&fixed_array);
- Handle<Cell> cell = isolate()->factory()->NewCell(
- Handle<Object>(Smi::FromInt(TypeFeedbackCells::kForInFastCaseMarker),
- isolate()));
- RecordTypeFeedbackCell(stmt->ForInFeedbackId(), cell);
- __ Move(r1, cell);
- __ mov(r2, Operand(Smi::FromInt(TypeFeedbackCells::kForInSlowCaseMarker)));
- __ str(r2, FieldMemOperand(r1, Cell::kValueOffset));
+ __ Move(r1, FeedbackVector());
+ __ mov(r2, Operand(TypeFeedbackInfo::MegamorphicSentinel(isolate())));
+ __ str(r2, FieldMemOperand(r1, FixedArray::OffsetOfElementAt(slot)));
__ mov(r1, Operand(Smi::FromInt(1))); // Smi indicates slow check
__ ldr(r2, MemOperand(sp, 0 * kPointerSize)); // Get enumerated object
@@ -1260,8 +1262,8 @@ void FullCodeGenerator::VisitForOfStatement(ForOfStatement* stmt) {
Iteration loop_statement(this, stmt);
increment_loop_depth();
- // var iterator = iterable[@@iterator]()
- VisitForAccumulatorValue(stmt->assign_iterator());
+ // var iterable = subject
+ VisitForAccumulatorValue(stmt->assign_iterable());
// As with for-in, skip the loop if the iterator is null or undefined.
__ CompareRoot(r0, Heap::kUndefinedValueRootIndex);
@@ -1269,16 +1271,8 @@ void FullCodeGenerator::VisitForOfStatement(ForOfStatement* stmt) {
__ CompareRoot(r0, Heap::kNullValueRootIndex);
__ b(eq, loop_statement.break_label());
- // Convert the iterator to a JS object.
- Label convert, done_convert;
- __ JumpIfSmi(r0, &convert);
- __ CompareObjectType(r0, r1, r1, FIRST_SPEC_OBJECT_TYPE);
- __ b(ge, &done_convert);
- __ bind(&convert);
- __ push(r0);
- __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
- __ bind(&done_convert);
- __ push(r0);
+ // var iterator = iterable[Symbol.iterator]();
+ VisitForEffect(stmt->assign_iterator());
// Loop entry.
__ bind(loop_statement.continue_label());
@@ -1325,7 +1319,9 @@ void FullCodeGenerator::EmitNewClosure(Handle<SharedFunctionInfo> info,
!pretenure &&
scope()->is_function_scope() &&
info->num_literals() == 0) {
- FastNewClosureStub stub(info->language_mode(), info->is_generator());
+ FastNewClosureStub stub(isolate(),
+ info->strict_mode(),
+ info->is_generator());
__ mov(r2, Operand(info));
__ CallStub(&stub);
} else {
@@ -1333,7 +1329,7 @@ void FullCodeGenerator::EmitNewClosure(Handle<SharedFunctionInfo> info,
__ LoadRoot(r1, pretenure ? Heap::kTrueValueRootIndex
: Heap::kFalseValueRootIndex);
__ Push(cp, r0, r1);
- __ CallRuntime(Runtime::kNewClosure, 3);
+ __ CallRuntime(Runtime::kHiddenNewClosure, 3);
}
context()->Plug(r0);
}
@@ -1355,7 +1351,7 @@ void FullCodeGenerator::EmitLoadGlobalCheckExtensions(Variable* var,
Scope* s = scope();
while (s != NULL) {
if (s->num_heap_slots() > 0) {
- if (s->calls_non_strict_eval()) {
+ if (s->calls_sloppy_eval()) {
// Check that extension is NULL.
__ ldr(temp, ContextOperand(current, Context::EXTENSION_INDEX));
__ tst(temp, temp);
@@ -1368,7 +1364,7 @@ void FullCodeGenerator::EmitLoadGlobalCheckExtensions(Variable* var,
}
// If no outer scope calls eval, we do not need to check more
// context extensions.
- if (!s->outer_scope_calls_non_strict_eval() || s->is_eval_scope()) break;
+ if (!s->outer_scope_calls_sloppy_eval() || s->is_eval_scope()) break;
s = s->outer_scope();
}
@@ -1395,11 +1391,10 @@ void FullCodeGenerator::EmitLoadGlobalCheckExtensions(Variable* var,
__ ldr(r0, GlobalObjectOperand());
__ mov(r2, Operand(var->name()));
- RelocInfo::Mode mode = (typeof_state == INSIDE_TYPEOF)
- ? RelocInfo::CODE_TARGET
- : RelocInfo::CODE_TARGET_CONTEXT;
- Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
- CallIC(ic, mode);
+ ContextualMode mode = (typeof_state == INSIDE_TYPEOF)
+ ? NOT_CONTEXTUAL
+ : CONTEXTUAL;
+ CallLoadIC(mode);
}
@@ -1412,7 +1407,7 @@ MemOperand FullCodeGenerator::ContextSlotOperandCheckExtensions(Variable* var,
for (Scope* s = scope(); s != var->scope(); s = s->outer_scope()) {
if (s->num_heap_slots() > 0) {
- if (s->calls_non_strict_eval()) {
+ if (s->calls_sloppy_eval()) {
// Check that extension is NULL.
__ ldr(temp, ContextOperand(context, Context::EXTENSION_INDEX));
__ tst(temp, temp);
@@ -1450,17 +1445,16 @@ void FullCodeGenerator::EmitDynamicLookupFastCase(Variable* var,
} else if (var->mode() == DYNAMIC_LOCAL) {
Variable* local = var->local_if_not_shadowed();
__ ldr(r0, ContextSlotOperandCheckExtensions(local, slow));
- if (local->mode() == LET ||
- local->mode() == CONST ||
- local->mode() == CONST_HARMONY) {
+ if (local->mode() == LET || local->mode() == CONST ||
+ local->mode() == CONST_LEGACY) {
__ CompareRoot(r0, Heap::kTheHoleValueRootIndex);
- if (local->mode() == CONST) {
+ if (local->mode() == CONST_LEGACY) {
__ LoadRoot(r0, Heap::kUndefinedValueRootIndex, eq);
- } else { // LET || CONST_HARMONY
+ } else { // LET || CONST
__ b(ne, done);
__ mov(r0, Operand(var->name()));
__ push(r0);
- __ CallRuntime(Runtime::kThrowReferenceError, 1);
+ __ CallRuntime(Runtime::kHiddenThrowReferenceError, 1);
}
}
__ jmp(done);
@@ -1477,13 +1471,12 @@ void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy) {
// variables.
switch (var->location()) {
case Variable::UNALLOCATED: {
- Comment cmnt(masm_, "Global variable");
+ Comment cmnt(masm_, "[ Global variable");
// Use inline caching. Variable name is passed in r2 and the global
// object (receiver) in r0.
__ ldr(r0, GlobalObjectOperand());
__ mov(r2, Operand(var->name()));
- Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
- CallIC(ic, RelocInfo::CODE_TARGET_CONTEXT);
+ CallLoadIC(CONTEXTUAL);
context()->Plug(r0);
break;
}
@@ -1491,9 +1484,8 @@ void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy) {
case Variable::PARAMETER:
case Variable::LOCAL:
case Variable::CONTEXT: {
- Comment cmnt(masm_, var->IsContextSlot()
- ? "Context variable"
- : "Stack variable");
+ Comment cmnt(masm_, var->IsContextSlot() ? "[ Context variable"
+ : "[ Stack variable");
if (var->binding_needs_init()) {
// var->scope() may be NULL when the proxy is located in eval code and
// refers to a potential outside binding. Currently those bindings are
@@ -1525,7 +1517,7 @@ void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy) {
// Check that we always have valid source position.
ASSERT(var->initializer_position() != RelocInfo::kNoPosition);
ASSERT(proxy->position() != RelocInfo::kNoPosition);
- skip_init_check = var->mode() != CONST &&
+ skip_init_check = var->mode() != CONST_LEGACY &&
var->initializer_position() < proxy->position();
}
@@ -1533,18 +1525,18 @@ void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy) {
// Let and const need a read barrier.
GetVar(r0, var);
__ CompareRoot(r0, Heap::kTheHoleValueRootIndex);
- if (var->mode() == LET || var->mode() == CONST_HARMONY) {
+ if (var->mode() == LET || var->mode() == CONST) {
// Throw a reference error when using an uninitialized let/const
// binding in harmony mode.
Label done;
__ b(ne, &done);
__ mov(r0, Operand(var->name()));
__ push(r0);
- __ CallRuntime(Runtime::kThrowReferenceError, 1);
+ __ CallRuntime(Runtime::kHiddenThrowReferenceError, 1);
__ bind(&done);
} else {
// Uninitalized const bindings outside of harmony mode are unholed.
- ASSERT(var->mode() == CONST);
+ ASSERT(var->mode() == CONST_LEGACY);
__ LoadRoot(r0, Heap::kUndefinedValueRootIndex, eq);
}
context()->Plug(r0);
@@ -1556,15 +1548,15 @@ void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy) {
}
case Variable::LOOKUP: {
+ Comment cmnt(masm_, "[ Lookup variable");
Label done, slow;
// Generate code for loading from variables potentially shadowed
// by eval-introduced variables.
EmitDynamicLookupFastCase(var, NOT_INSIDE_TYPEOF, &slow, &done);
__ bind(&slow);
- Comment cmnt(masm_, "Lookup variable");
__ mov(r1, Operand(var->name()));
__ Push(cp, r1); // Context and name.
- __ CallRuntime(Runtime::kLoadContextSlot, 2);
+ __ CallRuntime(Runtime::kHiddenLoadContextSlot, 2);
__ bind(&done);
context()->Plug(r0);
}
@@ -1597,7 +1589,7 @@ void FullCodeGenerator::VisitRegExpLiteral(RegExpLiteral* expr) {
__ mov(r2, Operand(expr->pattern()));
__ mov(r1, Operand(expr->flags()));
__ Push(r4, r3, r2, r1);
- __ CallRuntime(Runtime::kMaterializeRegExpLiteral, 4);
+ __ CallRuntime(Runtime::kHiddenMaterializeRegExpLiteral, 4);
__ mov(r5, r0);
__ bind(&materialized);
@@ -1609,7 +1601,7 @@ void FullCodeGenerator::VisitRegExpLiteral(RegExpLiteral* expr) {
__ bind(&runtime_allocate);
__ mov(r0, Operand(Smi::FromInt(size)));
__ Push(r5, r0);
- __ CallRuntime(Runtime::kAllocateInNewSpace, 1);
+ __ CallRuntime(Runtime::kHiddenAllocateInNewSpace, 1);
__ pop(r5);
__ bind(&allocated);
@@ -1649,14 +1641,13 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
: ObjectLiteral::kNoFlags;
__ mov(r0, Operand(Smi::FromInt(flags)));
int properties_count = constant_properties->length() / 2;
- if ((FLAG_track_double_fields && expr->may_store_doubles()) ||
- expr->depth() > 1 || Serializer::enabled() ||
- flags != ObjectLiteral::kFastElements ||
+ if (expr->may_store_doubles() || expr->depth() > 1 ||
+ masm()->serializer_enabled() || flags != ObjectLiteral::kFastElements ||
properties_count > FastCloneShallowObjectStub::kMaximumClonedProperties) {
__ Push(r3, r2, r1, r0);
- __ CallRuntime(Runtime::kCreateObjectLiteral, 4);
+ __ CallRuntime(Runtime::kHiddenCreateObjectLiteral, 4);
} else {
- FastCloneShallowObjectStub stub(properties_count);
+ FastCloneShallowObjectStub stub(isolate(), properties_count);
__ CallStub(&stub);
}
@@ -1692,10 +1683,7 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
VisitForAccumulatorValue(value);
__ mov(r2, Operand(key->value()));
__ ldr(r1, MemOperand(sp));
- Handle<Code> ic = is_classic_mode()
- ? isolate()->builtins()->StoreIC_Initialize()
- : isolate()->builtins()->StoreIC_Initialize_Strict();
- CallIC(ic, RelocInfo::CODE_TARGET, key->LiteralFeedbackId());
+ CallStoreIC(key->LiteralFeedbackId());
PrepareForBailoutForId(key->id(), NO_REGISTERS);
} else {
VisitForEffect(value);
@@ -1784,8 +1772,7 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
Handle<FixedArrayBase> constant_elements_values(
FixedArrayBase::cast(constant_elements->get(1)));
- AllocationSiteMode allocation_site_mode = FLAG_track_allocation_sites
- ? TRACK_ALLOCATION_SITE : DONT_TRACK_ALLOCATION_SITE;
+ AllocationSiteMode allocation_site_mode = TRACK_ALLOCATION_SITE;
if (has_fast_elements && !FLAG_allocation_site_pretenuring) {
// If the only customer of allocation sites is transitioning, then
// we can turn it off if we don't have anywhere else to transition to.
@@ -1796,31 +1783,12 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
__ ldr(r3, FieldMemOperand(r3, JSFunction::kLiteralsOffset));
__ mov(r2, Operand(Smi::FromInt(expr->literal_index())));
__ mov(r1, Operand(constant_elements));
- if (has_fast_elements && constant_elements_values->map() ==
- isolate()->heap()->fixed_cow_array_map()) {
- FastCloneShallowArrayStub stub(
- FastCloneShallowArrayStub::COPY_ON_WRITE_ELEMENTS,
- allocation_site_mode,
- length);
- __ CallStub(&stub);
- __ IncrementCounter(
- isolate()->counters()->cow_arrays_created_stub(), 1, r1, r2);
- } else if (expr->depth() > 1 || Serializer::enabled() ||
- length > FastCloneShallowArrayStub::kMaximumClonedLength) {
+ if (expr->depth() > 1 || length > JSObject::kInitialMaxFastElementArray) {
__ mov(r0, Operand(Smi::FromInt(flags)));
__ Push(r3, r2, r1, r0);
- __ CallRuntime(Runtime::kCreateArrayLiteral, 4);
+ __ CallRuntime(Runtime::kHiddenCreateArrayLiteral, 4);
} else {
- ASSERT(IsFastSmiOrObjectElementsKind(constant_elements_kind) ||
- FLAG_smi_only_arrays);
- FastCloneShallowArrayStub::Mode mode =
- FastCloneShallowArrayStub::CLONE_ANY_ELEMENTS;
-
- if (has_fast_elements) {
- mode = FastCloneShallowArrayStub::CLONE_ELEMENTS;
- }
-
- FastCloneShallowArrayStub stub(mode, allocation_site_mode, length);
+ FastCloneShallowArrayStub stub(isolate(), allocation_site_mode);
__ CallStub(&stub);
}
@@ -1852,7 +1820,7 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
EMIT_REMEMBERED_SET, INLINE_SMI_CHECK);
} else {
__ mov(r3, Operand(Smi::FromInt(i)));
- StoreArrayLiteralElementStub stub;
+ StoreArrayLiteralElementStub stub(isolate());
__ CallStub(&stub);
}
@@ -1869,13 +1837,9 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
void FullCodeGenerator::VisitAssignment(Assignment* expr) {
+ ASSERT(expr->target()->IsValidReferenceExpression());
+
Comment cmnt(masm_, "[ Assignment");
- // Invalid left-hand sides are rewritten to have a 'throw ReferenceError'
- // on the left-hand side.
- if (!expr->target()->IsValidLeftHandSide()) {
- VisitForEffect(expr->target());
- return;
- }
// Left-hand side can only be a property, a global or a (parameter or local)
// slot.
@@ -2014,7 +1978,7 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
__ cmp(sp, r1);
__ b(eq, &post_runtime);
__ push(r0); // generator object
- __ CallRuntime(Runtime::kSuspendJSGeneratorObject, 1);
+ __ CallRuntime(Runtime::kHiddenSuspendJSGeneratorObject, 1);
__ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
__ bind(&post_runtime);
__ pop(result_register());
@@ -2053,9 +2017,9 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
// catch (e) { receiver = iter; f = 'throw'; arg = e; goto l_call; }
__ bind(&l_catch);
handler_table()->set(expr->index(), Smi::FromInt(l_catch.pos()));
- __ LoadRoot(r2, Heap::kthrow_stringRootIndex); // "throw"
- __ ldr(r3, MemOperand(sp, 1 * kPointerSize)); // iter
- __ Push(r3, r0); // iter, exception
+ __ LoadRoot(r2, Heap::kthrow_stringRootIndex); // "throw"
+ __ ldr(r3, MemOperand(sp, 1 * kPointerSize)); // iter
+ __ Push(r2, r3, r0); // "throw", iter, except
__ jmp(&l_call);
// try { received = %yield result }
@@ -2080,31 +2044,38 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
__ mov(r1, cp);
__ RecordWriteField(r0, JSGeneratorObject::kContextOffset, r1, r2,
kLRHasBeenSaved, kDontSaveFPRegs);
- __ CallRuntime(Runtime::kSuspendJSGeneratorObject, 1);
+ __ CallRuntime(Runtime::kHiddenSuspendJSGeneratorObject, 1);
__ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
- __ pop(r0); // result
+ __ pop(r0); // result
EmitReturnSequence();
- __ bind(&l_resume); // received in r0
+ __ bind(&l_resume); // received in r0
__ PopTryHandler();
// receiver = iter; f = 'next'; arg = received;
__ bind(&l_next);
- __ LoadRoot(r2, Heap::knext_stringRootIndex); // "next"
- __ ldr(r3, MemOperand(sp, 1 * kPointerSize)); // iter
- __ Push(r3, r0); // iter, received
+ __ LoadRoot(r2, Heap::knext_stringRootIndex); // "next"
+ __ ldr(r3, MemOperand(sp, 1 * kPointerSize)); // iter
+ __ Push(r2, r3, r0); // "next", iter, received
// result = receiver[f](arg);
__ bind(&l_call);
- Handle<Code> ic = isolate()->stub_cache()->ComputeKeyedCallInitialize(1);
- CallIC(ic);
+ __ ldr(r1, MemOperand(sp, kPointerSize));
+ __ ldr(r0, MemOperand(sp, 2 * kPointerSize));
+ Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize();
+ CallIC(ic, TypeFeedbackId::None());
+ __ mov(r1, r0);
+ __ str(r1, MemOperand(sp, 2 * kPointerSize));
+ CallFunctionStub stub(isolate(), 1, CALL_AS_METHOD);
+ __ CallStub(&stub);
+
__ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ __ Drop(1); // The function is still on the stack; drop it.
// if (!result.done) goto l_try;
__ bind(&l_loop);
__ push(r0); // save result
__ LoadRoot(r2, Heap::kdone_stringRootIndex); // "done"
- Handle<Code> done_ic = isolate()->builtins()->LoadIC_Initialize();
- CallIC(done_ic); // result.done in r0
+ CallLoadIC(NOT_CONTEXTUAL); // result.done in r0
Handle<Code> bool_ic = ToBooleanStub::GetUninitialized(isolate());
CallIC(bool_ic);
__ cmp(r0, Operand(0));
@@ -2113,8 +2084,7 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
// result.value
__ pop(r0); // result
__ LoadRoot(r2, Heap::kvalue_stringRootIndex); // "value"
- Handle<Code> value_ic = isolate()->builtins()->LoadIC_Initialize();
- CallIC(value_ic); // result.value in r0
+ CallLoadIC(NOT_CONTEXTUAL); // result.value in r0
context()->DropAndPlug(2, r0); // drop iter and g
break;
}
@@ -2126,19 +2096,21 @@ void FullCodeGenerator::EmitGeneratorResume(Expression *generator,
Expression *value,
JSGeneratorObject::ResumeMode resume_mode) {
// The value stays in r0, and is ultimately read by the resumed generator, as
- // if the CallRuntime(Runtime::kSuspendJSGeneratorObject) returned it. r1
- // will hold the generator object until the activation has been resumed.
+ // if CallRuntime(Runtime::kHiddenSuspendJSGeneratorObject) returned it. Or it
+ // is read to throw the value when the resumed generator is already closed.
+ // r1 will hold the generator object until the activation has been resumed.
VisitForStackValue(generator);
VisitForAccumulatorValue(value);
__ pop(r1);
// Check generator state.
- Label wrong_state, done;
+ Label wrong_state, closed_state, done;
__ ldr(r3, FieldMemOperand(r1, JSGeneratorObject::kContinuationOffset));
- STATIC_ASSERT(JSGeneratorObject::kGeneratorExecuting <= 0);
- STATIC_ASSERT(JSGeneratorObject::kGeneratorClosed <= 0);
+ STATIC_ASSERT(JSGeneratorObject::kGeneratorExecuting < 0);
+ STATIC_ASSERT(JSGeneratorObject::kGeneratorClosed == 0);
__ cmp(r3, Operand(Smi::FromInt(0)));
- __ b(le, &wrong_state);
+ __ b(eq, &closed_state);
+ __ b(lt, &wrong_state);
// Load suspended function and context.
__ ldr(cp, FieldMemOperand(r1, JSGeneratorObject::kContextOffset));
@@ -2169,11 +2141,12 @@ void FullCodeGenerator::EmitGeneratorResume(Expression *generator,
__ bind(&resume_frame);
// lr = return address.
// fp = caller's frame pointer.
+ // pp = caller's constant pool (if FLAG_enable_ool_constant_pool),
// cp = callee's context,
// r4 = callee's JS function.
- __ Push(lr, fp, cp, r4);
+ __ PushFixedFrame(r4);
// Adjust FP to point to saved FP.
- __ add(fp, sp, Operand(2 * kPointerSize));
+ __ add(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
// Load the operand stack size.
__ ldr(r3, FieldMemOperand(r1, JSGeneratorObject::kOperandStackOffset));
@@ -2187,12 +2160,21 @@ void FullCodeGenerator::EmitGeneratorResume(Expression *generator,
__ cmp(r3, Operand(0));
__ b(ne, &slow_resume);
__ ldr(r3, FieldMemOperand(r4, JSFunction::kCodeEntryOffset));
- __ ldr(r2, FieldMemOperand(r1, JSGeneratorObject::kContinuationOffset));
- __ SmiUntag(r2);
- __ add(r3, r3, r2);
- __ mov(r2, Operand(Smi::FromInt(JSGeneratorObject::kGeneratorExecuting)));
- __ str(r2, FieldMemOperand(r1, JSGeneratorObject::kContinuationOffset));
- __ Jump(r3);
+
+ { ConstantPoolUnavailableScope constant_pool_unavailable(masm_);
+ if (FLAG_enable_ool_constant_pool) {
+ // Load the new code object's constant pool pointer.
+ __ ldr(pp,
+ MemOperand(r3, Code::kConstantPoolOffset - Code::kHeaderSize));
+ }
+
+ __ ldr(r2, FieldMemOperand(r1, JSGeneratorObject::kContinuationOffset));
+ __ SmiUntag(r2);
+ __ add(r3, r3, r2);
+ __ mov(r2, Operand(Smi::FromInt(JSGeneratorObject::kGeneratorExecuting)));
+ __ str(r2, FieldMemOperand(r1, JSGeneratorObject::kContinuationOffset));
+ __ Jump(r3);
+ }
__ bind(&slow_resume);
}
@@ -2208,14 +2190,29 @@ void FullCodeGenerator::EmitGeneratorResume(Expression *generator,
ASSERT(!result_register().is(r1));
__ Push(r1, result_register());
__ Push(Smi::FromInt(resume_mode));
- __ CallRuntime(Runtime::kResumeJSGeneratorObject, 3);
+ __ CallRuntime(Runtime::kHiddenResumeJSGeneratorObject, 3);
// Not reached: the runtime call returns elsewhere.
__ stop("not-reached");
+ // Reach here when generator is closed.
+ __ bind(&closed_state);
+ if (resume_mode == JSGeneratorObject::NEXT) {
+ // Return completed iterator result when generator is closed.
+ __ LoadRoot(r2, Heap::kUndefinedValueRootIndex);
+ __ push(r2);
+ // Pop value from top-of-stack slot; box result into result register.
+ EmitCreateIteratorResult(true);
+ } else {
+ // Throw the provided value.
+ __ push(r0);
+ __ CallRuntime(Runtime::kHiddenThrow, 1);
+ }
+ __ jmp(&done);
+
// Throw error if we attempt to operate on a running generator.
__ bind(&wrong_state);
__ push(r1);
- __ CallRuntime(Runtime::kThrowGeneratorStateError, 1);
+ __ CallRuntime(Runtime::kHiddenThrowGeneratorStateError, 1);
__ bind(&done);
context()->Plug(result_register());
@@ -2226,14 +2223,14 @@ void FullCodeGenerator::EmitCreateIteratorResult(bool done) {
Label gc_required;
Label allocated;
- Handle<Map> map(isolate()->native_context()->generator_result_map());
+ Handle<Map> map(isolate()->native_context()->iterator_result_map());
__ Allocate(map->instance_size(), r0, r2, r3, &gc_required, TAG_OBJECT);
__ jmp(&allocated);
__ bind(&gc_required);
__ Push(Smi::FromInt(map->instance_size()));
- __ CallRuntime(Runtime::kAllocateInNewSpace, 1);
+ __ CallRuntime(Runtime::kHiddenAllocateInNewSpace, 1);
__ ldr(context_register(),
MemOperand(fp, StandardFrameConstants::kContextOffset));
@@ -2263,8 +2260,7 @@ void FullCodeGenerator::EmitNamedPropertyLoad(Property* prop) {
Literal* key = prop->key()->AsLiteral();
__ mov(r2, Operand(key->value()));
// Call load IC. It has arguments receiver and property name r0 and r2.
- Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
- CallIC(ic, RelocInfo::CODE_TARGET, prop->PropertyFeedbackId());
+ CallLoadIC(NOT_CONTEXTUAL, prop->PropertyFeedbackId());
}
@@ -2272,7 +2268,7 @@ void FullCodeGenerator::EmitKeyedPropertyLoad(Property* prop) {
SetSourcePosition(prop->position());
// Call keyed load IC. It has arguments key and receiver in r0 and r1.
Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize();
- CallIC(ic, RelocInfo::CODE_TARGET, prop->PropertyFeedbackId());
+ CallIC(ic, prop->PropertyFeedbackId());
}
@@ -2298,9 +2294,8 @@ void FullCodeGenerator::EmitInlineSmiBinaryOp(BinaryOperation* expr,
patch_site.EmitJumpIfSmi(scratch1, &smi_case);
__ bind(&stub_call);
- BinaryOpICStub stub(op, mode);
- CallIC(stub.GetCode(isolate()), RelocInfo::CODE_TARGET,
- expr->BinaryOperationFeedbackId());
+ BinaryOpICStub stub(isolate(), op, mode);
+ CallIC(stub.GetCode(), expr->BinaryOperationFeedbackId());
patch_site.EmitPatchInfo();
__ jmp(&done);
@@ -2375,22 +2370,16 @@ void FullCodeGenerator::EmitBinaryOp(BinaryOperation* expr,
Token::Value op,
OverwriteMode mode) {
__ pop(r1);
- BinaryOpICStub stub(op, mode);
+ BinaryOpICStub stub(isolate(), op, mode);
JumpPatchSite patch_site(masm_); // unbound, signals no inlined smi code.
- CallIC(stub.GetCode(isolate()), RelocInfo::CODE_TARGET,
- expr->BinaryOperationFeedbackId());
+ CallIC(stub.GetCode(), expr->BinaryOperationFeedbackId());
patch_site.EmitPatchInfo();
context()->Plug(r0);
}
void FullCodeGenerator::EmitAssignment(Expression* expr) {
- // Invalid left-hand sides are rewritten by the parser to have a 'throw
- // ReferenceError' on the left-hand side.
- if (!expr->IsValidLeftHandSide()) {
- VisitForEffect(expr);
- return;
- }
+ ASSERT(expr->IsValidReferenceExpression());
// Left-hand side can only be a property, a global or a (parameter or local)
// slot.
@@ -2416,10 +2405,7 @@ void FullCodeGenerator::EmitAssignment(Expression* expr) {
__ mov(r1, r0);
__ pop(r0); // Restore value.
__ mov(r2, Operand(prop->key()->AsLiteral()->value()));
- Handle<Code> ic = is_classic_mode()
- ? isolate()->builtins()->StoreIC_Initialize()
- : isolate()->builtins()->StoreIC_Initialize_Strict();
- CallIC(ic);
+ CallStoreIC();
break;
}
case KEYED_PROPERTY: {
@@ -2428,7 +2414,7 @@ void FullCodeGenerator::EmitAssignment(Expression* expr) {
VisitForAccumulatorValue(prop->key());
__ mov(r1, r0);
__ Pop(r0, r2); // r0 = restored value.
- Handle<Code> ic = is_classic_mode()
+ Handle<Code> ic = strict_mode() == SLOPPY
? isolate()->builtins()->KeyedStoreIC_Initialize()
: isolate()->builtins()->KeyedStoreIC_Initialize_Strict();
CallIC(ic);
@@ -2439,48 +2425,59 @@ void FullCodeGenerator::EmitAssignment(Expression* expr) {
}
-void FullCodeGenerator::EmitVariableAssignment(Variable* var,
- Token::Value op) {
+void FullCodeGenerator::EmitStoreToStackLocalOrContextSlot(
+ Variable* var, MemOperand location) {
+ __ str(result_register(), location);
+ if (var->IsContextSlot()) {
+ // RecordWrite may destroy all its register arguments.
+ __ mov(r3, result_register());
+ int offset = Context::SlotOffset(var->index());
+ __ RecordWriteContextSlot(
+ r1, offset, r3, r2, kLRHasBeenSaved, kDontSaveFPRegs);
+ }
+}
+
+
+void FullCodeGenerator::EmitCallStoreContextSlot(
+ Handle<String> name, StrictMode strict_mode) {
+ __ push(r0); // Value.
+ __ mov(r1, Operand(name));
+ __ mov(r0, Operand(Smi::FromInt(strict_mode)));
+ __ Push(cp, r1, r0); // Context, name, strict mode.
+ __ CallRuntime(Runtime::kHiddenStoreContextSlot, 4);
+}
+
+
+void FullCodeGenerator::EmitVariableAssignment(Variable* var, Token::Value op) {
if (var->IsUnallocated()) {
// Global var, const, or let.
__ mov(r2, Operand(var->name()));
__ ldr(r1, GlobalObjectOperand());
- Handle<Code> ic = is_classic_mode()
- ? isolate()->builtins()->StoreIC_Initialize()
- : isolate()->builtins()->StoreIC_Initialize_Strict();
- CallIC(ic, RelocInfo::CODE_TARGET_CONTEXT);
+ CallStoreIC();
- } else if (op == Token::INIT_CONST) {
+ } else if (op == Token::INIT_CONST_LEGACY) {
// Const initializers need a write barrier.
ASSERT(!var->IsParameter()); // No const parameters.
- if (var->IsStackLocal()) {
- Label skip;
- __ ldr(r1, StackOperand(var));
- __ CompareRoot(r1, Heap::kTheHoleValueRootIndex);
- __ b(ne, &skip);
- __ str(result_register(), StackOperand(var));
- __ bind(&skip);
- } else {
- ASSERT(var->IsContextSlot() || var->IsLookupSlot());
- // Like var declarations, const declarations are hoisted to function
- // scope. However, unlike var initializers, const initializers are
- // able to drill a hole to that function context, even from inside a
- // 'with' context. We thus bypass the normal static scope lookup for
- // var->IsContextSlot().
+ if (var->IsLookupSlot()) {
__ push(r0);
__ mov(r0, Operand(var->name()));
__ Push(cp, r0); // Context and name.
- __ CallRuntime(Runtime::kInitializeConstContextSlot, 3);
+ __ CallRuntime(Runtime::kHiddenInitializeConstContextSlot, 3);
+ } else {
+ ASSERT(var->IsStackAllocated() || var->IsContextSlot());
+ Label skip;
+ MemOperand location = VarOperand(var, r1);
+ __ ldr(r2, location);
+ __ CompareRoot(r2, Heap::kTheHoleValueRootIndex);
+ __ b(ne, &skip);
+ EmitStoreToStackLocalOrContextSlot(var, location);
+ __ bind(&skip);
}
} else if (var->mode() == LET && op != Token::INIT_LET) {
// Non-initializing assignment to let variable needs a write barrier.
if (var->IsLookupSlot()) {
- __ push(r0); // Value.
- __ mov(r1, Operand(var->name()));
- __ mov(r0, Operand(Smi::FromInt(language_mode())));
- __ Push(cp, r1, r0); // Context, name, strict mode.
- __ CallRuntime(Runtime::kStoreContextSlot, 4);
+ EmitCallStoreContextSlot(var->name(), strict_mode());
} else {
ASSERT(var->IsStackAllocated() || var->IsContextSlot());
Label assign;
@@ -2490,23 +2487,19 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var,
__ b(ne, &assign);
__ mov(r3, Operand(var->name()));
__ push(r3);
- __ CallRuntime(Runtime::kThrowReferenceError, 1);
+ __ CallRuntime(Runtime::kHiddenThrowReferenceError, 1);
// Perform the assignment.
__ bind(&assign);
- __ str(result_register(), location);
- if (var->IsContextSlot()) {
- // RecordWrite may destroy all its register arguments.
- __ mov(r3, result_register());
- int offset = Context::SlotOffset(var->index());
- __ RecordWriteContextSlot(
- r1, offset, r3, r2, kLRHasBeenSaved, kDontSaveFPRegs);
- }
+ EmitStoreToStackLocalOrContextSlot(var, location);
}
- } else if (!var->is_const_mode() || op == Token::INIT_CONST_HARMONY) {
+ } else if (!var->is_const_mode() || op == Token::INIT_CONST) {
// Assignment to var or initializing assignment to let/const
// in harmony mode.
- if (var->IsStackAllocated() || var->IsContextSlot()) {
+ if (var->IsLookupSlot()) {
+ EmitCallStoreContextSlot(var->name(), strict_mode());
+ } else {
+ ASSERT((var->IsStackAllocated() || var->IsContextSlot()));
MemOperand location = VarOperand(var, r1);
if (generate_debug_code_ && op == Token::INIT_LET) {
// Check for an uninitialized let binding.
@@ -2514,21 +2507,7 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var,
__ CompareRoot(r2, Heap::kTheHoleValueRootIndex);
__ Check(eq, kLetBindingReInitialization);
}
- // Perform the assignment.
- __ str(r0, location);
- if (var->IsContextSlot()) {
- __ mov(r3, r0);
- int offset = Context::SlotOffset(var->index());
- __ RecordWriteContextSlot(
- r1, offset, r3, r2, kLRHasBeenSaved, kDontSaveFPRegs);
- }
- } else {
- ASSERT(var->IsLookupSlot());
- __ push(r0); // Value.
- __ mov(r1, Operand(var->name()));
- __ mov(r0, Operand(Smi::FromInt(language_mode())));
- __ Push(cp, r1, r0); // Context, name, strict mode.
- __ CallRuntime(Runtime::kStoreContextSlot, 4);
+ EmitStoreToStackLocalOrContextSlot(var, location);
}
}
// Non-initializing assignments to consts are ignored.
@@ -2539,17 +2518,14 @@ void FullCodeGenerator::EmitNamedPropertyAssignment(Assignment* expr) {
// Assignment to a property, using a named store IC.
Property* prop = expr->target()->AsProperty();
ASSERT(prop != NULL);
- ASSERT(prop->key()->AsLiteral() != NULL);
+ ASSERT(prop->key()->IsLiteral());
// Record source code position before IC call.
SetSourcePosition(expr->position());
__ mov(r2, Operand(prop->key()->AsLiteral()->value()));
__ pop(r1);
- Handle<Code> ic = is_classic_mode()
- ? isolate()->builtins()->StoreIC_Initialize()
- : isolate()->builtins()->StoreIC_Initialize_Strict();
- CallIC(ic, RelocInfo::CODE_TARGET, expr->AssignmentFeedbackId());
+ CallStoreIC(expr->AssignmentFeedbackId());
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
context()->Plug(r0);
@@ -2563,10 +2539,10 @@ void FullCodeGenerator::EmitKeyedPropertyAssignment(Assignment* expr) {
SetSourcePosition(expr->position());
__ Pop(r2, r1); // r1 = key.
- Handle<Code> ic = is_classic_mode()
+ Handle<Code> ic = strict_mode() == SLOPPY
? isolate()->builtins()->KeyedStoreIC_Initialize()
: isolate()->builtins()->KeyedStoreIC_Initialize_Strict();
- CallIC(ic, RelocInfo::CODE_TARGET, expr->AssignmentFeedbackId());
+ CallIC(ic, expr->AssignmentFeedbackId());
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
context()->Plug(r0);
@@ -2593,74 +2569,73 @@ void FullCodeGenerator::VisitProperty(Property* expr) {
void FullCodeGenerator::CallIC(Handle<Code> code,
- RelocInfo::Mode rmode,
TypeFeedbackId ast_id) {
ic_total_count_++;
// All calls must have a predictable size in full-codegen code to ensure that
// the debugger can patch them correctly.
- __ Call(code, rmode, ast_id, al, NEVER_INLINE_TARGET_ADDRESS);
+ __ Call(code, RelocInfo::CODE_TARGET, ast_id, al,
+ NEVER_INLINE_TARGET_ADDRESS);
}
-void FullCodeGenerator::EmitCallWithIC(Call* expr,
- Handle<Object> name,
- RelocInfo::Mode mode) {
- // Code common for calls using the IC.
- ZoneList<Expression*>* args = expr->arguments();
- int arg_count = args->length();
- { PreservePositionScope scope(masm()->positions_recorder());
- for (int i = 0; i < arg_count; i++) {
- VisitForStackValue(args->at(i));
+
+// Code common for calls using the IC.
+void FullCodeGenerator::EmitCallWithLoadIC(Call* expr) {
+ Expression* callee = expr->expression();
+
+ CallIC::CallType call_type = callee->IsVariableProxy()
+ ? CallIC::FUNCTION
+ : CallIC::METHOD;
+
+ // Get the target function.
+ if (call_type == CallIC::FUNCTION) {
+ { StackValueContext context(this);
+ EmitVariableLoad(callee->AsVariableProxy());
+ PrepareForBailout(callee, NO_REGISTERS);
}
- __ mov(r2, Operand(name));
+ // Push undefined as receiver. This is patched in the method prologue if it
+ // is a sloppy mode method.
+ __ Push(isolate()->factory()->undefined_value());
+ } else {
+ // Load the function from the receiver.
+ ASSERT(callee->IsProperty());
+ __ ldr(r0, MemOperand(sp, 0));
+ EmitNamedPropertyLoad(callee->AsProperty());
+ PrepareForBailoutForId(callee->AsProperty()->LoadId(), TOS_REG);
+ // Push the target function under the receiver.
+ __ ldr(ip, MemOperand(sp, 0));
+ __ push(ip);
+ __ str(r0, MemOperand(sp, kPointerSize));
}
- // Record source position for debugger.
- SetSourcePosition(expr->position());
- // Call the IC initialization code.
- Handle<Code> ic =
- isolate()->stub_cache()->ComputeCallInitialize(arg_count, mode);
- CallIC(ic, mode, expr->CallFeedbackId());
- RecordJSReturnSite(expr);
- // Restore context register.
- __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
- context()->Plug(r0);
+
+ EmitCall(expr, call_type);
}
-void FullCodeGenerator::EmitKeyedCallWithIC(Call* expr,
- Expression* key) {
+// Code common for calls using the IC.
+void FullCodeGenerator::EmitKeyedCallWithLoadIC(Call* expr,
+ Expression* key) {
// Load the key.
VisitForAccumulatorValue(key);
- // Swap the name of the function and the receiver on the stack to follow
- // the calling convention for call ICs.
- __ pop(r1);
- __ push(r0);
- __ push(r1);
+ Expression* callee = expr->expression();
- // Code common for calls using the IC.
- ZoneList<Expression*>* args = expr->arguments();
- int arg_count = args->length();
- { PreservePositionScope scope(masm()->positions_recorder());
- for (int i = 0; i < arg_count; i++) {
- VisitForStackValue(args->at(i));
- }
- }
- // Record source position for debugger.
- SetSourcePosition(expr->position());
- // Call the IC initialization code.
- Handle<Code> ic =
- isolate()->stub_cache()->ComputeKeyedCallInitialize(arg_count);
- __ ldr(r2, MemOperand(sp, (arg_count + 1) * kPointerSize)); // Key.
- CallIC(ic, RelocInfo::CODE_TARGET, expr->CallFeedbackId());
- RecordJSReturnSite(expr);
- // Restore context register.
- __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
- context()->DropAndPlug(1, r0); // Drop the key still on the stack.
+ // Load the function from the receiver.
+ ASSERT(callee->IsProperty());
+ __ ldr(r1, MemOperand(sp, 0));
+ EmitKeyedPropertyLoad(callee->AsProperty());
+ PrepareForBailoutForId(callee->AsProperty()->LoadId(), TOS_REG);
+
+ // Push the target function under the receiver.
+ __ ldr(ip, MemOperand(sp, 0));
+ __ push(ip);
+ __ str(r0, MemOperand(sp, kPointerSize));
+
+ EmitCall(expr, CallIC::METHOD);
}
-void FullCodeGenerator::EmitCallWithStub(Call* expr, CallFunctionFlags flags) {
- // Code common for calls using the call stub.
+void FullCodeGenerator::EmitCall(Call* expr, CallIC::CallType call_type) {
+ // Load the arguments.
ZoneList<Expression*>* args = expr->arguments();
int arg_count = args->length();
{ PreservePositionScope scope(masm()->positions_recorder());
@@ -2668,20 +2643,17 @@ void FullCodeGenerator::EmitCallWithStub(Call* expr, CallFunctionFlags flags) {
VisitForStackValue(args->at(i));
}
}
- // Record source position for debugger.
- SetSourcePosition(expr->position());
- // Record call targets in unoptimized code.
- flags = static_cast<CallFunctionFlags>(flags | RECORD_CALL_TARGET);
- Handle<Object> uninitialized =
- TypeFeedbackCells::UninitializedSentinel(isolate());
- Handle<Cell> cell = isolate()->factory()->NewCell(uninitialized);
- RecordTypeFeedbackCell(expr->CallFeedbackId(), cell);
- __ mov(r2, Operand(cell));
-
- CallFunctionStub stub(arg_count, flags);
+ // Record source position of the IC call.
+ SetSourcePosition(expr->position());
+ Handle<Code> ic = CallIC::initialize_stub(
+ isolate(), arg_count, call_type);
+ __ mov(r3, Operand(Smi::FromInt(expr->CallFeedbackSlot())));
__ ldr(r1, MemOperand(sp, (arg_count + 1) * kPointerSize));
- __ CallStub(&stub, expr->CallFeedbackId());
+ // Don't assign a type feedback id to the IC, since type feedback is provided
+ // by the vector above.
+ CallIC(ic);
+
RecordJSReturnSite(expr);
// Restore context register.
__ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
@@ -2701,15 +2673,15 @@ void FullCodeGenerator::EmitResolvePossiblyDirectEval(int arg_count) {
int receiver_offset = 2 + info_->scope()->num_parameters();
__ ldr(r3, MemOperand(fp, receiver_offset * kPointerSize));
- // r2: the language mode.
- __ mov(r2, Operand(Smi::FromInt(language_mode())));
+ // r2: strict mode.
+ __ mov(r2, Operand(Smi::FromInt(strict_mode())));
// r1: the start position of the scope the calls resides in.
__ mov(r1, Operand(Smi::FromInt(scope()->start_position())));
// Do the runtime call.
__ Push(r4, r3, r2, r1);
- __ CallRuntime(Runtime::kResolvePossiblyDirectEval, 5);
+ __ CallRuntime(Runtime::kHiddenResolvePossiblyDirectEval, 5);
}
@@ -2722,12 +2694,11 @@ void FullCodeGenerator::VisitCall(Call* expr) {
Comment cmnt(masm_, "[ Call");
Expression* callee = expr->expression();
- VariableProxy* proxy = callee->AsVariableProxy();
- Property* property = callee->AsProperty();
+ Call::CallType call_type = expr->GetCallType(isolate());
- if (proxy != NULL && proxy->var()->is_possibly_eval(isolate())) {
- // In a call to eval, we first call %ResolvePossiblyDirectEval to
- // resolve the function we need to call and the receiver of the
+ if (call_type == Call::POSSIBLY_EVAL_CALL) {
+ // In a call to eval, we first call RuntimeHidden_ResolvePossiblyDirectEval
+ // to resolve the function we need to call and the receiver of the
// call. Then we call the resolved function using the given
// arguments.
ZoneList<Expression*>* args = expr->arguments();
@@ -2757,20 +2728,19 @@ void FullCodeGenerator::VisitCall(Call* expr) {
// Record source position for debugger.
SetSourcePosition(expr->position());
- CallFunctionStub stub(arg_count, RECEIVER_MIGHT_BE_IMPLICIT);
+ CallFunctionStub stub(isolate(), arg_count, NO_CALL_FUNCTION_FLAGS);
__ ldr(r1, MemOperand(sp, (arg_count + 1) * kPointerSize));
__ CallStub(&stub);
RecordJSReturnSite(expr);
// Restore context register.
__ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
context()->DropAndPlug(1, r0);
- } else if (proxy != NULL && proxy->var()->IsUnallocated()) {
- // Push global object as receiver for the call IC.
- __ ldr(r0, GlobalObjectOperand());
- __ push(r0);
- EmitCallWithIC(expr, proxy->name(), RelocInfo::CODE_TARGET_CONTEXT);
- } else if (proxy != NULL && proxy->var()->IsLookupSlot()) {
+ } else if (call_type == Call::GLOBAL_CALL) {
+ EmitCallWithLoadIC(expr);
+
+ } else if (call_type == Call::LOOKUP_SLOT_CALL) {
// Call to a lookup slot (dynamically introduced variable).
+ VariableProxy* proxy = callee->AsVariableProxy();
Label slow, done;
{ PreservePositionScope scope(masm()->positions_recorder());
@@ -2785,7 +2755,7 @@ void FullCodeGenerator::VisitCall(Call* expr) {
ASSERT(!context_register().is(r2));
__ mov(r2, Operand(proxy->name()));
__ Push(context_register(), r2);
- __ CallRuntime(Runtime::kLoadContextSlot, 2);
+ __ CallRuntime(Runtime::kHiddenLoadContextSlot, 2);
__ Push(r0, r1); // Function, receiver.
// If fast case code has been generated, emit code to push the
@@ -2799,37 +2769,34 @@ void FullCodeGenerator::VisitCall(Call* expr) {
__ push(r0);
// The receiver is implicitly the global receiver. Indicate this
// by passing the hole to the call function stub.
- __ LoadRoot(r1, Heap::kTheHoleValueRootIndex);
+ __ LoadRoot(r1, Heap::kUndefinedValueRootIndex);
__ push(r1);
__ bind(&call);
}
// The receiver is either the global receiver or an object found
- // by LoadContextSlot. That object could be the hole if the
- // receiver is implicitly the global object.
- EmitCallWithStub(expr, RECEIVER_MIGHT_BE_IMPLICIT);
- } else if (property != NULL) {
+ // by LoadContextSlot.
+ EmitCall(expr);
+ } else if (call_type == Call::PROPERTY_CALL) {
+ Property* property = callee->AsProperty();
{ PreservePositionScope scope(masm()->positions_recorder());
VisitForStackValue(property->obj());
}
if (property->key()->IsPropertyName()) {
- EmitCallWithIC(expr,
- property->key()->AsLiteral()->value(),
- RelocInfo::CODE_TARGET);
+ EmitCallWithLoadIC(expr);
} else {
- EmitKeyedCallWithIC(expr, property->key());
+ EmitKeyedCallWithLoadIC(expr, property->key());
}
} else {
+ ASSERT(call_type == Call::OTHER_CALL);
// Call to an arbitrary expression not handled specially above.
{ PreservePositionScope scope(masm()->positions_recorder());
VisitForStackValue(callee);
}
- // Load global receiver object.
- __ ldr(r1, GlobalObjectOperand());
- __ ldr(r1, FieldMemOperand(r1, GlobalObject::kGlobalReceiverOffset));
+ __ LoadRoot(r1, Heap::kUndefinedValueRootIndex);
__ push(r1);
// Emit function call.
- EmitCallWithStub(expr, NO_CALL_FUNCTION_FLAGS);
+ EmitCall(expr);
}
#ifdef DEBUG
@@ -2866,14 +2833,17 @@ void FullCodeGenerator::VisitCallNew(CallNew* expr) {
__ ldr(r1, MemOperand(sp, arg_count * kPointerSize));
// Record call targets in unoptimized code.
- Handle<Object> uninitialized =
- TypeFeedbackCells::UninitializedSentinel(isolate());
- Handle<Cell> cell = isolate()->factory()->NewCell(uninitialized);
- RecordTypeFeedbackCell(expr->CallNewFeedbackId(), cell);
- __ mov(r2, Operand(cell));
-
- CallConstructStub stub(RECORD_CALL_TARGET);
- __ Call(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL);
+ if (FLAG_pretenuring_call_new) {
+ EnsureSlotContainsAllocationSite(expr->AllocationSiteFeedbackSlot());
+ ASSERT(expr->AllocationSiteFeedbackSlot() ==
+ expr->CallNewFeedbackSlot() + 1);
+ }
+
+ __ Move(r2, FeedbackVector());
+ __ mov(r3, Operand(Smi::FromInt(expr->CallNewFeedbackSlot())));
+
+ CallConstructStub stub(isolate(), RECORD_CONSTRUCTOR_TARGET);
+ __ Call(stub.GetCode(), RelocInfo::CONSTRUCT_CALL);
PrepareForBailoutForId(expr->ReturnId(), TOS_REG);
context()->Plug(r0);
}
@@ -3047,7 +3017,7 @@ void FullCodeGenerator::EmitIsStringWrapperSafeForDefaultValueOf(
__ add(r4, r4, Operand(DescriptorArray::kFirstOffset - kHeapObjectTag));
// Calculate the end of the descriptor array.
__ mov(r2, r4);
- __ add(r2, r2, Operand::PointerOffsetFromSmiKey(r3));
+ __ add(r2, r2, Operand(r3, LSL, kPointerSizeLog2));
// Loop through all the keys in the descriptor array. If one of these is the
// string "valueOf" the result is false.
@@ -3196,14 +3166,11 @@ void FullCodeGenerator::EmitIsConstructCall(CallRuntime* expr) {
__ ldr(r2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
// Skip the arguments adaptor frame if it exists.
- Label check_frame_marker;
__ ldr(r1, MemOperand(r2, StandardFrameConstants::kContextOffset));
__ cmp(r1, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
- __ b(ne, &check_frame_marker);
- __ ldr(r2, MemOperand(r2, StandardFrameConstants::kCallerFPOffset));
+ __ ldr(r2, MemOperand(r2, StandardFrameConstants::kCallerFPOffset), eq);
// Check the marker in the calling frame.
- __ bind(&check_frame_marker);
__ ldr(r1, MemOperand(r2, StandardFrameConstants::kMarkerOffset));
__ cmp(r1, Operand(Smi::FromInt(StackFrame::CONSTRUCT)));
PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
@@ -3246,7 +3213,7 @@ void FullCodeGenerator::EmitArguments(CallRuntime* expr) {
VisitForAccumulatorValue(args->at(0));
__ mov(r1, r0);
__ mov(r0, Operand(Smi::FromInt(info_->scope()->num_parameters())));
- ArgumentsAccessStub stub(ArgumentsAccessStub::READ_ELEMENT);
+ ArgumentsAccessStub stub(isolate(), ArgumentsAccessStub::READ_ELEMENT);
__ CallStub(&stub);
context()->Plug(r0);
}
@@ -3254,7 +3221,7 @@ void FullCodeGenerator::EmitArguments(CallRuntime* expr) {
void FullCodeGenerator::EmitArgumentsLength(CallRuntime* expr) {
ASSERT(expr->arguments()->length() == 0);
- Label exit;
+
// Get the number of formal parameters.
__ mov(r0, Operand(Smi::FromInt(info_->scope()->num_parameters())));
@@ -3262,13 +3229,11 @@ void FullCodeGenerator::EmitArgumentsLength(CallRuntime* expr) {
__ ldr(r2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
__ ldr(r3, MemOperand(r2, StandardFrameConstants::kContextOffset));
__ cmp(r3, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
- __ b(ne, &exit);
// Arguments adaptor case: Read the arguments length from the
// adaptor frame.
- __ ldr(r0, MemOperand(r2, ArgumentsAdaptorFrameConstants::kLengthOffset));
+ __ ldr(r0, MemOperand(r2, ArgumentsAdaptorFrameConstants::kLengthOffset), eq);
- __ bind(&exit);
context()->Plug(r0);
}
@@ -3334,31 +3299,9 @@ void FullCodeGenerator::EmitClassOf(CallRuntime* expr) {
}
-void FullCodeGenerator::EmitLog(CallRuntime* expr) {
- // Conditionally generate a log call.
- // Args:
- // 0 (literal string): The type of logging (corresponds to the flags).
- // This is used to determine whether or not to generate the log call.
- // 1 (string): Format string. Access the string at argument index 2
- // with '%2s' (see Logger::LogRuntime for all the formats).
- // 2 (array): Arguments to the format string.
- ZoneList<Expression*>* args = expr->arguments();
- ASSERT_EQ(args->length(), 3);
- if (CodeGenerator::ShouldGenerateLog(isolate(), args->at(0))) {
- VisitForStackValue(args->at(1));
- VisitForStackValue(args->at(2));
- __ CallRuntime(Runtime::kLog, 2);
- }
-
- // Finally, we're expected to leave a value on the top of the stack.
- __ LoadRoot(r0, Heap::kUndefinedValueRootIndex);
- context()->Plug(r0);
-}
-
-
void FullCodeGenerator::EmitSubString(CallRuntime* expr) {
// Load the arguments on the stack and call the stub.
- SubStringStub stub;
+ SubStringStub stub(isolate());
ZoneList<Expression*>* args = expr->arguments();
ASSERT(args->length() == 3);
VisitForStackValue(args->at(0));
@@ -3371,7 +3314,7 @@ void FullCodeGenerator::EmitSubString(CallRuntime* expr) {
void FullCodeGenerator::EmitRegExpExec(CallRuntime* expr) {
// Load the arguments on the stack and call the stub.
- RegExpExecStub stub;
+ RegExpExecStub stub(isolate());
ZoneList<Expression*>* args = expr->arguments();
ASSERT(args->length() == 4);
VisitForStackValue(args->at(0));
@@ -3393,8 +3336,7 @@ void FullCodeGenerator::EmitValueOf(CallRuntime* expr) {
__ JumpIfSmi(r0, &done);
// If the object is not a value type, return the object.
__ CompareObjectType(r0, r1, r1, JS_VALUE_TYPE);
- __ b(ne, &done);
- __ ldr(r0, FieldMemOperand(r0, JSValue::kValueOffset));
+ __ ldr(r0, FieldMemOperand(r0, JSValue::kValueOffset), eq);
__ bind(&done);
context()->Plug(r0);
@@ -3442,7 +3384,7 @@ void FullCodeGenerator::EmitDateField(CallRuntime* expr) {
}
__ bind(&not_date_object);
- __ CallRuntime(Runtime::kThrowNotDateError, 0);
+ __ CallRuntime(Runtime::kHiddenThrowNotDateError, 0);
__ bind(&done);
context()->Plug(r0);
}
@@ -3463,9 +3405,9 @@ void FullCodeGenerator::EmitOneByteSeqStringSetChar(CallRuntime* expr) {
if (FLAG_debug_code) {
__ SmiTst(value);
- __ ThrowIf(ne, kNonSmiValue);
+ __ Check(eq, kNonSmiValue);
__ SmiTst(index);
- __ ThrowIf(ne, kNonSmiIndex);
+ __ Check(eq, kNonSmiIndex);
__ SmiUntag(index, index);
static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
__ EmitSeqStringSetCharCheck(string, index, value, one_byte_seq_type);
@@ -3496,9 +3438,9 @@ void FullCodeGenerator::EmitTwoByteSeqStringSetChar(CallRuntime* expr) {
if (FLAG_debug_code) {
__ SmiTst(value);
- __ ThrowIf(ne, kNonSmiValue);
+ __ Check(eq, kNonSmiValue);
__ SmiTst(index);
- __ ThrowIf(ne, kNonSmiIndex);
+ __ Check(eq, kNonSmiIndex);
__ SmiUntag(index, index);
static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
__ EmitSeqStringSetCharCheck(string, index, value, two_byte_seq_type);
@@ -3522,7 +3464,7 @@ void FullCodeGenerator::EmitMathPow(CallRuntime* expr) {
ASSERT(args->length() == 2);
VisitForStackValue(args->at(0));
VisitForStackValue(args->at(1));
- MathPowStub stub(MathPowStub::ON_STACK);
+ MathPowStub stub(isolate(), MathPowStub::ON_STACK);
__ CallStub(&stub);
context()->Plug(r0);
}
@@ -3562,7 +3504,7 @@ void FullCodeGenerator::EmitNumberToString(CallRuntime* expr) {
// Load the argument into r0 and call the stub.
VisitForAccumulatorValue(args->at(0));
- NumberToStringStub stub;
+ NumberToStringStub stub(isolate());
__ CallStub(&stub);
context()->Plug(r0);
}
@@ -3681,21 +3623,12 @@ void FullCodeGenerator::EmitStringCharAt(CallRuntime* expr) {
void FullCodeGenerator::EmitStringAdd(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
ASSERT_EQ(2, args->length());
+ VisitForStackValue(args->at(0));
+ VisitForAccumulatorValue(args->at(1));
- if (FLAG_new_string_add) {
- VisitForStackValue(args->at(0));
- VisitForAccumulatorValue(args->at(1));
-
- __ pop(r1);
- NewStringAddStub stub(STRING_ADD_CHECK_BOTH, NOT_TENURED);
- __ CallStub(&stub);
- } else {
- VisitForStackValue(args->at(0));
- VisitForStackValue(args->at(1));
-
- StringAddStub stub(STRING_ADD_CHECK_BOTH);
- __ CallStub(&stub);
- }
+ __ pop(r1);
+ StringAddStub stub(isolate(), STRING_ADD_CHECK_BOTH, NOT_TENURED);
+ __ CallStub(&stub);
context()->Plug(r0);
}
@@ -3706,34 +3639,12 @@ void FullCodeGenerator::EmitStringCompare(CallRuntime* expr) {
VisitForStackValue(args->at(0));
VisitForStackValue(args->at(1));
- StringCompareStub stub;
- __ CallStub(&stub);
- context()->Plug(r0);
-}
-
-
-void FullCodeGenerator::EmitMathLog(CallRuntime* expr) {
- // Load the argument on the stack and call the stub.
- TranscendentalCacheStub stub(TranscendentalCache::LOG,
- TranscendentalCacheStub::TAGGED);
- ZoneList<Expression*>* args = expr->arguments();
- ASSERT(args->length() == 1);
- VisitForStackValue(args->at(0));
+ StringCompareStub stub(isolate());
__ CallStub(&stub);
context()->Plug(r0);
}
-void FullCodeGenerator::EmitMathSqrt(CallRuntime* expr) {
- // Load the argument on the stack and call the runtime function.
- ZoneList<Expression*>* args = expr->arguments();
- ASSERT(args->length() == 1);
- VisitForStackValue(args->at(0));
- __ CallRuntime(Runtime::kMath_sqrt, 1);
- context()->Plug(r0);
-}
-
-
void FullCodeGenerator::EmitCallFunction(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
ASSERT(args->length() >= 2);
@@ -3753,8 +3664,7 @@ void FullCodeGenerator::EmitCallFunction(CallRuntime* expr) {
// InvokeFunction requires the function in r1. Move it in there.
__ mov(r1, result_register());
ParameterCount count(arg_count);
- __ InvokeFunction(r1, count, CALL_FUNCTION,
- NullCallWrapper(), CALL_AS_METHOD);
+ __ InvokeFunction(r1, count, CALL_FUNCTION, NullCallWrapper());
__ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
__ jmp(&done);
@@ -3768,12 +3678,14 @@ void FullCodeGenerator::EmitCallFunction(CallRuntime* expr) {
void FullCodeGenerator::EmitRegExpConstructResult(CallRuntime* expr) {
- RegExpConstructResultStub stub;
+ RegExpConstructResultStub stub(isolate());
ZoneList<Expression*>* args = expr->arguments();
ASSERT(args->length() == 3);
VisitForStackValue(args->at(0));
VisitForStackValue(args->at(1));
- VisitForStackValue(args->at(2));
+ VisitForAccumulatorValue(args->at(2));
+ __ pop(r1);
+ __ pop(r2);
__ CallStub(&stub);
context()->Plug(r0);
}
@@ -3806,7 +3718,6 @@ void FullCodeGenerator::EmitGetFromCache(CallRuntime* expr) {
Label done, not_found;
- // tmp now holds finger offset as a smi.
__ ldr(r2, FieldMemOperand(cache, JSFunctionResultCache::kFingerOffset));
// r2 now holds finger offset as a smi.
__ add(r3, cache, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
@@ -3822,50 +3733,9 @@ void FullCodeGenerator::EmitGetFromCache(CallRuntime* expr) {
__ bind(&not_found);
// Call runtime to perform the lookup.
__ Push(cache, key);
- __ CallRuntime(Runtime::kGetFromCache, 2);
-
- __ bind(&done);
- context()->Plug(r0);
-}
-
-
-void FullCodeGenerator::EmitIsRegExpEquivalent(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- ASSERT_EQ(2, args->length());
-
- Register right = r0;
- Register left = r1;
- Register tmp = r2;
- Register tmp2 = r3;
-
- VisitForStackValue(args->at(0));
- VisitForAccumulatorValue(args->at(1));
- __ pop(left);
+ __ CallRuntime(Runtime::kHiddenGetFromCache, 2);
- Label done, fail, ok;
- __ cmp(left, Operand(right));
- __ b(eq, &ok);
- // Fail if either is a non-HeapObject.
- __ and_(tmp, left, Operand(right));
- __ JumpIfSmi(tmp, &fail);
- __ ldr(tmp, FieldMemOperand(left, HeapObject::kMapOffset));
- __ ldrb(tmp2, FieldMemOperand(tmp, Map::kInstanceTypeOffset));
- __ cmp(tmp2, Operand(JS_REGEXP_TYPE));
- __ b(ne, &fail);
- __ ldr(tmp2, FieldMemOperand(right, HeapObject::kMapOffset));
- __ cmp(tmp, Operand(tmp2));
- __ b(ne, &fail);
- __ ldr(tmp, FieldMemOperand(left, JSRegExp::kDataOffset));
- __ ldr(tmp2, FieldMemOperand(right, JSRegExp::kDataOffset));
- __ cmp(tmp, tmp2);
- __ b(eq, &ok);
- __ bind(&fail);
- __ LoadRoot(r0, Heap::kFalseValueRootIndex);
- __ jmp(&done);
- __ bind(&ok);
- __ LoadRoot(r0, Heap::kTrueValueRootIndex);
__ bind(&done);
-
context()->Plug(r0);
}
@@ -4140,8 +4010,8 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
void FullCodeGenerator::VisitCallRuntime(CallRuntime* expr) {
- Handle<String> name = expr->name();
- if (name->length() > 0 && name->Get(0) == '_') {
+ if (expr->function() != NULL &&
+ expr->function()->intrinsic_type == Runtime::INLINE) {
Comment cmnt(masm_, "[ InlineRuntimeCall");
EmitInlineRuntimeCall(expr);
return;
@@ -4149,34 +4019,49 @@ void FullCodeGenerator::VisitCallRuntime(CallRuntime* expr) {
Comment cmnt(masm_, "[ CallRuntime");
ZoneList<Expression*>* args = expr->arguments();
+ int arg_count = args->length();
if (expr->is_jsruntime()) {
- // Prepare for calling JS runtime function.
+ // Push the builtins object as the receiver.
__ ldr(r0, GlobalObjectOperand());
__ ldr(r0, FieldMemOperand(r0, GlobalObject::kBuiltinsOffset));
__ push(r0);
- }
- // Push the arguments ("left-to-right").
- int arg_count = args->length();
- for (int i = 0; i < arg_count; i++) {
- VisitForStackValue(args->at(i));
- }
-
- if (expr->is_jsruntime()) {
- // Call the JS runtime function.
+ // Load the function from the receiver.
__ mov(r2, Operand(expr->name()));
- RelocInfo::Mode mode = RelocInfo::CODE_TARGET;
- Handle<Code> ic =
- isolate()->stub_cache()->ComputeCallInitialize(arg_count, mode);
- CallIC(ic, mode, expr->CallRuntimeFeedbackId());
+ CallLoadIC(NOT_CONTEXTUAL, expr->CallRuntimeFeedbackId());
+
+ // Push the target function under the receiver.
+ __ ldr(ip, MemOperand(sp, 0));
+ __ push(ip);
+ __ str(r0, MemOperand(sp, kPointerSize));
+
+ // Push the arguments ("left-to-right").
+ int arg_count = args->length();
+ for (int i = 0; i < arg_count; i++) {
+ VisitForStackValue(args->at(i));
+ }
+
+ // Record source position of the IC call.
+ SetSourcePosition(expr->position());
+ CallFunctionStub stub(isolate(), arg_count, NO_CALL_FUNCTION_FLAGS);
+ __ ldr(r1, MemOperand(sp, (arg_count + 1) * kPointerSize));
+ __ CallStub(&stub);
+
// Restore context register.
__ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+
+ context()->DropAndPlug(1, r0);
} else {
+ // Push the arguments ("left-to-right").
+ for (int i = 0; i < arg_count; i++) {
+ VisitForStackValue(args->at(i));
+ }
+
// Call the C runtime function.
__ CallRuntime(expr->function(), arg_count);
+ context()->Plug(r0);
}
- context()->Plug(r0);
}
@@ -4190,9 +4075,7 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
if (property != NULL) {
VisitForStackValue(property->obj());
VisitForStackValue(property->key());
- StrictModeFlag strict_mode_flag = (language_mode() == CLASSIC_MODE)
- ? kNonStrictMode : kStrictMode;
- __ mov(r1, Operand(Smi::FromInt(strict_mode_flag)));
+ __ mov(r1, Operand(Smi::FromInt(strict_mode())));
__ push(r1);
__ InvokeBuiltin(Builtins::DELETE, CALL_FUNCTION);
context()->Plug(r0);
@@ -4200,11 +4083,11 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
Variable* var = proxy->var();
// Delete of an unqualified identifier is disallowed in strict mode
// but "delete this" is allowed.
- ASSERT(language_mode() == CLASSIC_MODE || var->is_this());
+ ASSERT(strict_mode() == SLOPPY || var->is_this());
if (var->IsUnallocated()) {
__ ldr(r2, GlobalObjectOperand());
__ mov(r1, Operand(var->name()));
- __ mov(r0, Operand(Smi::FromInt(kNonStrictMode)));
+ __ mov(r0, Operand(Smi::FromInt(SLOPPY)));
__ Push(r2, r1, r0);
__ InvokeBuiltin(Builtins::DELETE, CALL_FUNCTION);
context()->Plug(r0);
@@ -4218,7 +4101,7 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
ASSERT(!context_register().is(r2));
__ mov(r2, Operand(var->name()));
__ Push(context_register(), r2);
- __ CallRuntime(Runtime::kDeleteContextSlot, 2);
+ __ CallRuntime(Runtime::kHiddenDeleteContextSlot, 2);
context()->Plug(r0);
}
} else {
@@ -4293,16 +4176,11 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
+ ASSERT(expr->expression()->IsValidReferenceExpression());
+
Comment cmnt(masm_, "[ CountOperation");
SetSourcePosition(expr->position());
- // Invalid left-hand sides are rewritten to have a 'throw ReferenceError'
- // as the left-hand side.
- if (!expr->expression()->IsValidLeftHandSide()) {
- VisitForEffect(expr->expression());
- return;
- }
-
// Expression can only be a property, a global or a (parameter or local)
// slot.
enum LhsKind { VARIABLE, NAMED_PROPERTY, KEYED_PROPERTY };
@@ -4384,7 +4262,7 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
__ jmp(&stub_call);
__ bind(&slow);
}
- ToNumberStub convert_stub;
+ ToNumberStub convert_stub(isolate());
__ CallStub(&convert_stub);
// Save result for postfix expressions.
@@ -4415,10 +4293,8 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
// Record position before stub call.
SetSourcePosition(expr->position());
- BinaryOpICStub stub(Token::ADD, NO_OVERWRITE);
- CallIC(stub.GetCode(isolate()),
- RelocInfo::CODE_TARGET,
- expr->CountBinOpFeedbackId());
+ BinaryOpICStub stub(isolate(), Token::ADD, NO_OVERWRITE);
+ CallIC(stub.GetCode(), expr->CountBinOpFeedbackId());
patch_site.EmitPatchInfo();
__ bind(&done);
@@ -4447,10 +4323,7 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
case NAMED_PROPERTY: {
__ mov(r2, Operand(prop->key()->AsLiteral()->value()));
__ pop(r1);
- Handle<Code> ic = is_classic_mode()
- ? isolate()->builtins()->StoreIC_Initialize()
- : isolate()->builtins()->StoreIC_Initialize_Strict();
- CallIC(ic, RelocInfo::CODE_TARGET, expr->CountStoreFeedbackId());
+ CallStoreIC(expr->CountStoreFeedbackId());
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
if (expr->is_postfix()) {
if (!context()->IsEffect()) {
@@ -4463,10 +4336,10 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
}
case KEYED_PROPERTY: {
__ Pop(r2, r1); // r1 = key. r2 = receiver.
- Handle<Code> ic = is_classic_mode()
+ Handle<Code> ic = strict_mode() == SLOPPY
? isolate()->builtins()->KeyedStoreIC_Initialize()
: isolate()->builtins()->KeyedStoreIC_Initialize_Strict();
- CallIC(ic, RelocInfo::CODE_TARGET, expr->CountStoreFeedbackId());
+ CallIC(ic, expr->CountStoreFeedbackId());
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
if (expr->is_postfix()) {
if (!context()->IsEffect()) {
@@ -4486,16 +4359,16 @@ void FullCodeGenerator::VisitForTypeofValue(Expression* expr) {
ASSERT(!context()->IsTest());
VariableProxy* proxy = expr->AsVariableProxy();
if (proxy != NULL && proxy->var()->IsUnallocated()) {
- Comment cmnt(masm_, "Global variable");
+ Comment cmnt(masm_, "[ Global variable");
__ ldr(r0, GlobalObjectOperand());
__ mov(r2, Operand(proxy->name()));
- Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
// Use a regular load, not a contextual load, to avoid a reference
// error.
- CallIC(ic);
+ CallLoadIC(NOT_CONTEXTUAL);
PrepareForBailout(expr, TOS_REG);
context()->Plug(r0);
} else if (proxy != NULL && proxy->var()->IsLookupSlot()) {
+ Comment cmnt(masm_, "[ Lookup slot");
Label done, slow;
// Generate code for loading from variables potentially shadowed
@@ -4505,7 +4378,7 @@ void FullCodeGenerator::VisitForTypeofValue(Expression* expr) {
__ bind(&slow);
__ mov(r0, Operand(proxy->name()));
__ Push(cp, r0);
- __ CallRuntime(Runtime::kLoadContextSlotNoReferenceError, 2);
+ __ CallRuntime(Runtime::kHiddenLoadContextSlotNoReferenceError, 2);
PrepareForBailout(expr, TOS_REG);
__ bind(&done);
@@ -4532,13 +4405,14 @@ void FullCodeGenerator::EmitLiteralCompareTypeof(Expression* expr,
}
PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
- if (check->Equals(isolate()->heap()->number_string())) {
+ Factory* factory = isolate()->factory();
+ if (String::Equals(check, factory->number_string())) {
__ JumpIfSmi(r0, if_true);
__ ldr(r0, FieldMemOperand(r0, HeapObject::kMapOffset));
__ LoadRoot(ip, Heap::kHeapNumberMapRootIndex);
__ cmp(r0, ip);
Split(eq, if_true, if_false, fall_through);
- } else if (check->Equals(isolate()->heap()->string_string())) {
+ } else if (String::Equals(check, factory->string_string())) {
__ JumpIfSmi(r0, if_false);
// Check for undetectable objects => false.
__ CompareObjectType(r0, r0, r1, FIRST_NONSTRING_TYPE);
@@ -4546,20 +4420,20 @@ void FullCodeGenerator::EmitLiteralCompareTypeof(Expression* expr,
__ ldrb(r1, FieldMemOperand(r0, Map::kBitFieldOffset));
__ tst(r1, Operand(1 << Map::kIsUndetectable));
Split(eq, if_true, if_false, fall_through);
- } else if (check->Equals(isolate()->heap()->symbol_string())) {
+ } else if (String::Equals(check, factory->symbol_string())) {
__ JumpIfSmi(r0, if_false);
__ CompareObjectType(r0, r0, r1, SYMBOL_TYPE);
Split(eq, if_true, if_false, fall_through);
- } else if (check->Equals(isolate()->heap()->boolean_string())) {
+ } else if (String::Equals(check, factory->boolean_string())) {
__ CompareRoot(r0, Heap::kTrueValueRootIndex);
__ b(eq, if_true);
__ CompareRoot(r0, Heap::kFalseValueRootIndex);
Split(eq, if_true, if_false, fall_through);
} else if (FLAG_harmony_typeof &&
- check->Equals(isolate()->heap()->null_string())) {
+ String::Equals(check, factory->null_string())) {
__ CompareRoot(r0, Heap::kNullValueRootIndex);
Split(eq, if_true, if_false, fall_through);
- } else if (check->Equals(isolate()->heap()->undefined_string())) {
+ } else if (String::Equals(check, factory->undefined_string())) {
__ CompareRoot(r0, Heap::kUndefinedValueRootIndex);
__ b(eq, if_true);
__ JumpIfSmi(r0, if_false);
@@ -4569,14 +4443,14 @@ void FullCodeGenerator::EmitLiteralCompareTypeof(Expression* expr,
__ tst(r1, Operand(1 << Map::kIsUndetectable));
Split(ne, if_true, if_false, fall_through);
- } else if (check->Equals(isolate()->heap()->function_string())) {
+ } else if (String::Equals(check, factory->function_string())) {
__ JumpIfSmi(r0, if_false);
STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
__ CompareObjectType(r0, r0, r1, JS_FUNCTION_TYPE);
__ b(eq, if_true);
__ cmp(r1, Operand(JS_FUNCTION_PROXY_TYPE));
Split(eq, if_true, if_false, fall_through);
- } else if (check->Equals(isolate()->heap()->object_string())) {
+ } else if (String::Equals(check, factory->object_string())) {
__ JumpIfSmi(r0, if_false);
if (!FLAG_harmony_typeof) {
__ CompareRoot(r0, Heap::kNullValueRootIndex);
@@ -4629,7 +4503,7 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
case Token::INSTANCEOF: {
VisitForStackValue(expr->right());
- InstanceofStub stub(InstanceofStub::kNoFlags);
+ InstanceofStub stub(isolate(), InstanceofStub::kNoFlags);
__ CallStub(&stub);
PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
// The stub returns 0 for true.
@@ -4657,7 +4531,7 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
// Record position and call the compare IC.
SetSourcePosition(expr->position());
Handle<Code> ic = CompareIC::GetUninitialized(isolate(), op);
- CallIC(ic, RelocInfo::CODE_TARGET, expr->CompareOperationFeedbackId());
+ CallIC(ic, expr->CompareOperationFeedbackId());
patch_site.EmitPatchInfo();
PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
__ cmp(r0, Operand::Zero());
@@ -4692,7 +4566,7 @@ void FullCodeGenerator::EmitLiteralCompareNil(CompareOperation* expr,
Split(eq, if_true, if_false, fall_through);
} else {
Handle<Code> ic = CompareNilICStub::GetUninitialized(isolate(), nil);
- CallIC(ic, RelocInfo::CODE_TARGET, expr->CompareOperationFeedbackId());
+ CallIC(ic, expr->CompareOperationFeedbackId());
__ cmp(r0, Operand(0));
Split(ne, if_true, if_false, fall_through);
}
@@ -4773,7 +4647,8 @@ void FullCodeGenerator::EnterFinallyBlock() {
ExternalReference has_pending_message =
ExternalReference::address_of_has_pending_message(isolate());
__ mov(ip, Operand(has_pending_message));
- __ ldr(r1, MemOperand(ip));
+ STATIC_ASSERT(sizeof(bool) == 1); // NOLINT(runtime/sizeof)
+ __ ldrb(r1, MemOperand(ip));
__ SmiTag(r1);
__ push(r1);
@@ -4799,7 +4674,8 @@ void FullCodeGenerator::ExitFinallyBlock() {
ExternalReference has_pending_message =
ExternalReference::address_of_has_pending_message(isolate());
__ mov(ip, Operand(has_pending_message));
- __ str(r1, MemOperand(ip));
+ STATIC_ASSERT(sizeof(bool) == 1); // NOLINT(runtime/sizeof)
+ __ strb(r1, MemOperand(ip));
__ pop(r1);
ExternalReference pending_message_obj =
@@ -4848,7 +4724,18 @@ FullCodeGenerator::NestedStatement* FullCodeGenerator::TryFinally::Exit(
#undef __
-static const int32_t kBranchBeforeInterrupt = 0x5a000004;
+static Address GetInterruptImmediateLoadAddress(Address pc) {
+ Address load_address = pc - 2 * Assembler::kInstrSize;
+ if (!FLAG_enable_ool_constant_pool) {
+ ASSERT(Assembler::IsLdrPcImmediateOffset(Memory::int32_at(load_address)));
+ } else if (Assembler::IsMovT(Memory::int32_at(load_address))) {
+ load_address -= Assembler::kInstrSize;
+ ASSERT(Assembler::IsMovW(Memory::int32_at(load_address)));
+ } else {
+ ASSERT(Assembler::IsLdrPpImmediateOffset(Memory::int32_at(load_address)));
+ }
+ return load_address;
+}
void BackEdgeTable::PatchAt(Code* unoptimized_code,
@@ -4856,37 +4743,42 @@ void BackEdgeTable::PatchAt(Code* unoptimized_code,
BackEdgeState target_state,
Code* replacement_code) {
static const int kInstrSize = Assembler::kInstrSize;
- Address branch_address = pc - 3 * kInstrSize;
+ Address pc_immediate_load_address = GetInterruptImmediateLoadAddress(pc);
+ Address branch_address = pc_immediate_load_address - kInstrSize;
CodePatcher patcher(branch_address, 1);
-
switch (target_state) {
case INTERRUPT:
+ {
// <decrement profiling counter>
- // 2a 00 00 01 bpl ok
- // e5 9f c? ?? ldr ip, [pc, <interrupt stub address>]
- // e1 2f ff 3c blx ip
+ // bpl ok
+ // ; load interrupt stub address into ip - either of:
+ // ldr ip, [pc/pp, <constant pool offset>] | movw ip, <immed low>
+ // | movt ip, <immed high>
+ // blx ip
// ok-label
- patcher.masm()->b(4 * kInstrSize, pl); // Jump offset is 4 instructions.
- ASSERT_EQ(kBranchBeforeInterrupt, Memory::int32_at(branch_address));
+
+ // Calculate branch offet to the ok-label - this is the difference between
+ // the branch address and |pc| (which points at <blx ip>) plus one instr.
+ int branch_offset = pc + kInstrSize - branch_address;
+ patcher.masm()->b(branch_offset, pl);
break;
+ }
case ON_STACK_REPLACEMENT:
case OSR_AFTER_STACK_CHECK:
// <decrement profiling counter>
- // e1 a0 00 00 mov r0, r0 (NOP)
- // e5 9f c? ?? ldr ip, [pc, <on-stack replacement address>]
- // e1 2f ff 3c blx ip
+ // mov r0, r0 (NOP)
+ // ; load on-stack replacement address into ip - either of:
+ // ldr ip, [pc/pp, <constant pool offset>] | movw ip, <immed low>
+ // | movt ip, <immed high>
+ // blx ip
// ok-label
patcher.masm()->nop();
break;
}
- Address pc_immediate_load_address = pc - 2 * kInstrSize;
// Replace the call address.
- uint32_t interrupt_address_offset =
- Memory::uint16_at(pc_immediate_load_address) & 0xfff;
- Address interrupt_address_pointer = pc + interrupt_address_offset;
- Memory::uint32_at(interrupt_address_pointer) =
- reinterpret_cast<uint32_t>(replacement_code->entry());
+ Assembler::set_target_address_at(pc_immediate_load_address, unoptimized_code,
+ replacement_code->entry());
unoptimized_code->GetHeap()->incremental_marking()->RecordCodeTargetPatch(
unoptimized_code, pc_immediate_load_address, replacement_code);
@@ -4900,34 +4792,26 @@ BackEdgeTable::BackEdgeState BackEdgeTable::GetBackEdgeState(
static const int kInstrSize = Assembler::kInstrSize;
ASSERT(Memory::int32_at(pc - kInstrSize) == kBlxIp);
- Address branch_address = pc - 3 * kInstrSize;
- Address pc_immediate_load_address = pc - 2 * kInstrSize;
- uint32_t interrupt_address_offset =
- Memory::uint16_at(pc_immediate_load_address) & 0xfff;
- Address interrupt_address_pointer = pc + interrupt_address_offset;
-
- if (Memory::int32_at(branch_address) == kBranchBeforeInterrupt) {
- ASSERT(Memory::uint32_at(interrupt_address_pointer) ==
- reinterpret_cast<uint32_t>(
- isolate->builtins()->InterruptCheck()->entry()));
- ASSERT(Assembler::IsLdrPcImmediateOffset(
- Assembler::instr_at(pc_immediate_load_address)));
+ Address pc_immediate_load_address = GetInterruptImmediateLoadAddress(pc);
+ Address branch_address = pc_immediate_load_address - kInstrSize;
+ Address interrupt_address = Assembler::target_address_at(
+ pc_immediate_load_address, unoptimized_code);
+
+ if (Assembler::IsBranch(Assembler::instr_at(branch_address))) {
+ ASSERT(interrupt_address ==
+ isolate->builtins()->InterruptCheck()->entry());
return INTERRUPT;
}
ASSERT(Assembler::IsNop(Assembler::instr_at(branch_address)));
- ASSERT(Assembler::IsLdrPcImmediateOffset(
- Assembler::instr_at(pc_immediate_load_address)));
- if (Memory::uint32_at(interrupt_address_pointer) ==
- reinterpret_cast<uint32_t>(
- isolate->builtins()->OnStackReplacement()->entry())) {
+ if (interrupt_address ==
+ isolate->builtins()->OnStackReplacement()->entry()) {
return ON_STACK_REPLACEMENT;
}
- ASSERT(Memory::uint32_at(interrupt_address_pointer) ==
- reinterpret_cast<uint32_t>(
- isolate->builtins()->OsrAfterStackCheck()->entry()));
+ ASSERT(interrupt_address ==
+ isolate->builtins()->OsrAfterStackCheck()->entry());
return OSR_AFTER_STACK_CHECK;
}
diff --git a/chromium/v8/src/arm/ic-arm.cc b/chromium/v8/src/arm/ic-arm.cc
index ea247b37639..1028f8fd2b8 100644
--- a/chromium/v8/src/arm/ic-arm.cc
+++ b/chromium/v8/src/arm/ic-arm.cc
@@ -1,41 +1,18 @@
// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
#if V8_TARGET_ARCH_ARM
-#include "assembler-arm.h"
-#include "code-stubs.h"
-#include "codegen.h"
-#include "disasm.h"
-#include "ic-inl.h"
-#include "runtime.h"
-#include "stub-cache.h"
+#include "src/arm/assembler-arm.h"
+#include "src/code-stubs.h"
+#include "src/codegen.h"
+#include "src/disasm.h"
+#include "src/ic-inl.h"
+#include "src/runtime.h"
+#include "src/stub-cache.h"
namespace v8 {
namespace internal {
@@ -104,7 +81,7 @@ static void GenerateNameDictionaryReceiverCheck(MacroAssembler* masm,
}
-// Helper function used from LoadIC/CallIC GenerateNormal.
+// Helper function used from LoadIC GenerateNormal.
//
// elements: Property dictionary. It is not clobbered if a jump to the miss
// label is done.
@@ -333,320 +310,6 @@ static void GenerateKeyNameCheck(MacroAssembler* masm,
}
-// Defined in ic.cc.
-Object* CallIC_Miss(Arguments args);
-
-// The generated code does not accept smi keys.
-// The generated code falls through if both probes miss.
-void CallICBase::GenerateMonomorphicCacheProbe(MacroAssembler* masm,
- int argc,
- Code::Kind kind,
- ExtraICState extra_state) {
- // ----------- S t a t e -------------
- // -- r1 : receiver
- // -- r2 : name
- // -----------------------------------
- Label number, non_number, non_string, boolean, probe, miss;
-
- // Probe the stub cache.
- Code::Flags flags = Code::ComputeFlags(kind,
- MONOMORPHIC,
- extra_state,
- Code::NORMAL,
- argc);
- masm->isolate()->stub_cache()->GenerateProbe(
- masm, flags, r1, r2, r3, r4, r5, r6);
-
- // If the stub cache probing failed, the receiver might be a value.
- // For value objects, we use the map of the prototype objects for
- // the corresponding JSValue for the cache and that is what we need
- // to probe.
- //
- // Check for number.
- __ JumpIfSmi(r1, &number);
- __ CompareObjectType(r1, r3, r3, HEAP_NUMBER_TYPE);
- __ b(ne, &non_number);
- __ bind(&number);
- StubCompiler::GenerateLoadGlobalFunctionPrototype(
- masm, Context::NUMBER_FUNCTION_INDEX, r1);
- __ b(&probe);
-
- // Check for string.
- __ bind(&non_number);
- __ cmp(r3, Operand(FIRST_NONSTRING_TYPE));
- __ b(hs, &non_string);
- StubCompiler::GenerateLoadGlobalFunctionPrototype(
- masm, Context::STRING_FUNCTION_INDEX, r1);
- __ b(&probe);
-
- // Check for boolean.
- __ bind(&non_string);
- __ LoadRoot(ip, Heap::kTrueValueRootIndex);
- __ cmp(r1, ip);
- __ b(eq, &boolean);
- __ LoadRoot(ip, Heap::kFalseValueRootIndex);
- __ cmp(r1, ip);
- __ b(ne, &miss);
- __ bind(&boolean);
- StubCompiler::GenerateLoadGlobalFunctionPrototype(
- masm, Context::BOOLEAN_FUNCTION_INDEX, r1);
-
- // Probe the stub cache for the value object.
- __ bind(&probe);
- masm->isolate()->stub_cache()->GenerateProbe(
- masm, flags, r1, r2, r3, r4, r5, r6);
-
- __ bind(&miss);
-}
-
-
-static void GenerateFunctionTailCall(MacroAssembler* masm,
- int argc,
- Label* miss,
- Register scratch) {
- // r1: function
-
- // Check that the value isn't a smi.
- __ JumpIfSmi(r1, miss);
-
- // Check that the value is a JSFunction.
- __ CompareObjectType(r1, scratch, scratch, JS_FUNCTION_TYPE);
- __ b(ne, miss);
-
- // Invoke the function.
- ParameterCount actual(argc);
- __ InvokeFunction(r1, actual, JUMP_FUNCTION,
- NullCallWrapper(), CALL_AS_METHOD);
-}
-
-
-void CallICBase::GenerateNormal(MacroAssembler* masm, int argc) {
- // ----------- S t a t e -------------
- // -- r2 : name
- // -- lr : return address
- // -----------------------------------
- Label miss;
-
- // Get the receiver of the function from the stack into r1.
- __ ldr(r1, MemOperand(sp, argc * kPointerSize));
-
- GenerateNameDictionaryReceiverCheck(masm, r1, r0, r3, r4, &miss);
-
- // r0: elements
- // Search the dictionary - put result in register r1.
- GenerateDictionaryLoad(masm, &miss, r0, r2, r1, r3, r4);
-
- GenerateFunctionTailCall(masm, argc, &miss, r4);
-
- __ bind(&miss);
-}
-
-
-void CallICBase::GenerateMiss(MacroAssembler* masm,
- int argc,
- IC::UtilityId id,
- ExtraICState extra_state) {
- // ----------- S t a t e -------------
- // -- r2 : name
- // -- lr : return address
- // -----------------------------------
- Isolate* isolate = masm->isolate();
-
- if (id == IC::kCallIC_Miss) {
- __ IncrementCounter(isolate->counters()->call_miss(), 1, r3, r4);
- } else {
- __ IncrementCounter(isolate->counters()->keyed_call_miss(), 1, r3, r4);
- }
-
- // Get the receiver of the function from the stack.
- __ ldr(r3, MemOperand(sp, argc * kPointerSize));
-
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
-
- // Push the receiver and the name of the function.
- __ Push(r3, r2);
-
- // Call the entry.
- __ mov(r0, Operand(2));
- __ mov(r1, Operand(ExternalReference(IC_Utility(id), isolate)));
-
- CEntryStub stub(1);
- __ CallStub(&stub);
-
- // Move result to r1 and leave the internal frame.
- __ mov(r1, Operand(r0));
- }
-
- // Check if the receiver is a global object of some sort.
- // This can happen only for regular CallIC but not KeyedCallIC.
- if (id == IC::kCallIC_Miss) {
- Label invoke, global;
- __ ldr(r2, MemOperand(sp, argc * kPointerSize)); // receiver
- __ JumpIfSmi(r2, &invoke);
- __ CompareObjectType(r2, r3, r3, JS_GLOBAL_OBJECT_TYPE);
- __ b(eq, &global);
- __ cmp(r3, Operand(JS_BUILTINS_OBJECT_TYPE));
- __ b(ne, &invoke);
-
- // Patch the receiver on the stack.
- __ bind(&global);
- __ ldr(r2, FieldMemOperand(r2, GlobalObject::kGlobalReceiverOffset));
- __ str(r2, MemOperand(sp, argc * kPointerSize));
- __ bind(&invoke);
- }
-
- // Invoke the function.
- CallKind call_kind = CallICBase::Contextual::decode(extra_state)
- ? CALL_AS_FUNCTION
- : CALL_AS_METHOD;
- ParameterCount actual(argc);
- __ InvokeFunction(r1,
- actual,
- JUMP_FUNCTION,
- NullCallWrapper(),
- call_kind);
-}
-
-
-void CallIC::GenerateMegamorphic(MacroAssembler* masm,
- int argc,
- ExtraICState extra_ic_state) {
- // ----------- S t a t e -------------
- // -- r2 : name
- // -- lr : return address
- // -----------------------------------
-
- // Get the receiver of the function from the stack into r1.
- __ ldr(r1, MemOperand(sp, argc * kPointerSize));
- GenerateMonomorphicCacheProbe(masm, argc, Code::CALL_IC, extra_ic_state);
- GenerateMiss(masm, argc, extra_ic_state);
-}
-
-
-void KeyedCallIC::GenerateMegamorphic(MacroAssembler* masm, int argc) {
- // ----------- S t a t e -------------
- // -- r2 : name
- // -- lr : return address
- // -----------------------------------
-
- // Get the receiver of the function from the stack into r1.
- __ ldr(r1, MemOperand(sp, argc * kPointerSize));
-
- Label do_call, slow_call, slow_load, slow_reload_receiver;
- Label check_number_dictionary, check_name, lookup_monomorphic_cache;
- Label index_smi, index_name;
-
- // Check that the key is a smi.
- __ JumpIfNotSmi(r2, &check_name);
- __ bind(&index_smi);
- // Now the key is known to be a smi. This place is also jumped to from below
- // where a numeric string is converted to a smi.
-
- GenerateKeyedLoadReceiverCheck(
- masm, r1, r0, r3, Map::kHasIndexedInterceptor, &slow_call);
-
- GenerateFastArrayLoad(
- masm, r1, r2, r4, r3, r0, r1, &check_number_dictionary, &slow_load);
- Counters* counters = masm->isolate()->counters();
- __ IncrementCounter(counters->keyed_call_generic_smi_fast(), 1, r0, r3);
-
- __ bind(&do_call);
- // receiver in r1 is not used after this point.
- // r2: key
- // r1: function
- GenerateFunctionTailCall(masm, argc, &slow_call, r0);
-
- __ bind(&check_number_dictionary);
- // r2: key
- // r3: elements map
- // r4: elements
- // Check whether the elements is a number dictionary.
- __ LoadRoot(ip, Heap::kHashTableMapRootIndex);
- __ cmp(r3, ip);
- __ b(ne, &slow_load);
- __ SmiUntag(r0, r2);
- // r0: untagged index
- __ LoadFromNumberDictionary(&slow_load, r4, r2, r1, r0, r3, r5);
- __ IncrementCounter(counters->keyed_call_generic_smi_dict(), 1, r0, r3);
- __ jmp(&do_call);
-
- __ bind(&slow_load);
- // This branch is taken when calling KeyedCallIC_Miss is neither required
- // nor beneficial.
- __ IncrementCounter(counters->keyed_call_generic_slow_load(), 1, r0, r3);
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ Push(r2, r1); // save the key and the receiver
- __ push(r2); // pass the receiver and the key
- __ CallRuntime(Runtime::kKeyedGetProperty, 2);
- __ pop(r2); // restore the key
- }
- __ mov(r1, r0);
- __ jmp(&do_call);
-
- __ bind(&check_name);
- GenerateKeyNameCheck(masm, r2, r0, r3, &index_name, &slow_call);
-
- // The key is known to be a unique name.
- // If the receiver is a regular JS object with slow properties then do
- // a quick inline probe of the receiver's dictionary.
- // Otherwise do the monomorphic cache probe.
- GenerateKeyedLoadReceiverCheck(
- masm, r1, r0, r3, Map::kHasNamedInterceptor, &lookup_monomorphic_cache);
-
- __ ldr(r0, FieldMemOperand(r1, JSObject::kPropertiesOffset));
- __ ldr(r3, FieldMemOperand(r0, HeapObject::kMapOffset));
- __ LoadRoot(ip, Heap::kHashTableMapRootIndex);
- __ cmp(r3, ip);
- __ b(ne, &lookup_monomorphic_cache);
-
- GenerateDictionaryLoad(masm, &slow_load, r0, r2, r1, r3, r4);
- __ IncrementCounter(counters->keyed_call_generic_lookup_dict(), 1, r0, r3);
- __ jmp(&do_call);
-
- __ bind(&lookup_monomorphic_cache);
- __ IncrementCounter(counters->keyed_call_generic_lookup_cache(), 1, r0, r3);
- GenerateMonomorphicCacheProbe(masm,
- argc,
- Code::KEYED_CALL_IC,
- kNoExtraICState);
- // Fall through on miss.
-
- __ bind(&slow_call);
- // This branch is taken if:
- // - the receiver requires boxing or access check,
- // - the key is neither smi nor a unique name,
- // - the value loaded is not a function,
- // - there is hope that the runtime will create a monomorphic call stub
- // that will get fetched next time.
- __ IncrementCounter(counters->keyed_call_generic_slow(), 1, r0, r3);
- GenerateMiss(masm, argc);
-
- __ bind(&index_name);
- __ IndexFromHash(r3, r2);
- // Now jump to the place where smi keys are handled.
- __ jmp(&index_smi);
-}
-
-
-void KeyedCallIC::GenerateNormal(MacroAssembler* masm, int argc) {
- // ----------- S t a t e -------------
- // -- r2 : name
- // -- lr : return address
- // -----------------------------------
-
- // Check if the name is really a name.
- Label miss;
- __ JumpIfSmi(r2, &miss);
- __ IsObjectNameType(r2, r0, &miss);
-
- CallICBase::GenerateNormal(masm, argc);
- __ bind(&miss);
- GenerateMiss(masm, argc);
-}
-
-
void LoadIC::GenerateMegamorphic(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- r2 : name
@@ -655,9 +318,7 @@ void LoadIC::GenerateMegamorphic(MacroAssembler* masm) {
// -----------------------------------
// Probe the stub cache.
- Code::Flags flags = Code::ComputeFlags(
- Code::HANDLER, MONOMORPHIC, kNoExtraICState,
- Code::NORMAL, Code::LOAD_IC);
+ Code::Flags flags = Code::ComputeHandlerFlags(Code::LOAD_IC);
masm->isolate()->stub_cache()->GenerateProbe(
masm, flags, r0, r2, r3, r4, r5, r6);
@@ -672,14 +333,18 @@ void LoadIC::GenerateNormal(MacroAssembler* masm) {
// -- lr : return address
// -- r0 : receiver
// -----------------------------------
- Label miss;
+ Label miss, slow;
GenerateNameDictionaryReceiverCheck(masm, r0, r1, r3, r4, &miss);
// r1: elements
- GenerateDictionaryLoad(masm, &miss, r1, r2, r0, r3, r4);
+ GenerateDictionaryLoad(masm, &slow, r1, r2, r0, r3, r4);
__ Ret();
+ // Dictionary load failed, go slow (but don't miss).
+ __ bind(&slow);
+ GenerateRuntimeGetProperty(masm);
+
// Cache miss: Jump to runtime.
__ bind(&miss);
GenerateMiss(masm);
@@ -743,7 +408,7 @@ static MemOperand GenerateMappedArgumentsLookup(MacroAssembler* masm,
__ b(ne, slow_case);
// Load the elements into scratch1 and check its map.
- Handle<Map> arguments_map(heap->non_strict_arguments_elements_map());
+ Handle<Map> arguments_map(heap->sloppy_arguments_elements_map());
__ ldr(scratch1, FieldMemOperand(object, JSObject::kElementsOffset));
__ CheckMap(scratch1, scratch2, arguments_map, slow_case, DONT_DO_SMI_CHECK);
@@ -805,7 +470,7 @@ static MemOperand GenerateUnmappedArgumentsLookup(MacroAssembler* masm,
}
-void KeyedLoadIC::GenerateNonStrictArguments(MacroAssembler* masm) {
+void KeyedLoadIC::GenerateSloppyArguments(MacroAssembler* masm) {
// ---------- S t a t e --------------
// -- lr : return address
// -- r0 : key
@@ -831,7 +496,7 @@ void KeyedLoadIC::GenerateNonStrictArguments(MacroAssembler* masm) {
}
-void KeyedStoreIC::GenerateNonStrictArguments(MacroAssembler* masm) {
+void KeyedStoreIC::GenerateSloppyArguments(MacroAssembler* masm) {
// ---------- S t a t e --------------
// -- r0 : value
// -- r1 : key
@@ -860,33 +525,6 @@ void KeyedStoreIC::GenerateNonStrictArguments(MacroAssembler* masm) {
}
-void KeyedCallIC::GenerateNonStrictArguments(MacroAssembler* masm,
- int argc) {
- // ----------- S t a t e -------------
- // -- r2 : name
- // -- lr : return address
- // -----------------------------------
- Label slow, notin;
- // Load receiver.
- __ ldr(r1, MemOperand(sp, argc * kPointerSize));
- MemOperand mapped_location =
- GenerateMappedArgumentsLookup(masm, r1, r2, r3, r4, r5, &notin, &slow);
- __ ldr(r1, mapped_location);
- GenerateFunctionTailCall(masm, argc, &slow, r3);
- __ bind(&notin);
- // The unmapped lookup expects that the parameter map is in r3.
- MemOperand unmapped_location =
- GenerateUnmappedArgumentsLookup(masm, r2, r3, r4, &slow);
- __ ldr(r1, unmapped_location);
- __ LoadRoot(r3, Heap::kTheHoleValueRootIndex);
- __ cmp(r1, r3);
- __ b(eq, &slow);
- GenerateFunctionTailCall(masm, argc, &slow, r3);
- __ bind(&slow);
- GenerateMiss(masm, argc);
-}
-
-
void KeyedLoadIC::GenerateMiss(MacroAssembler* masm) {
// ---------- S t a t e --------------
// -- lr : return address
@@ -1219,7 +857,7 @@ void KeyedStoreIC::GenerateSlow(MacroAssembler* masm) {
void KeyedStoreIC::GenerateRuntimeSetProperty(MacroAssembler* masm,
- StrictModeFlag strict_mode) {
+ StrictMode strict_mode) {
// ---------- S t a t e --------------
// -- r0 : value
// -- r1 : key
@@ -1403,7 +1041,7 @@ static void KeyedStoreGenerateGenericHelper(
void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm,
- StrictModeFlag strict_mode) {
+ StrictMode strict_mode) {
// ---------- S t a t e --------------
// -- r0 : value
// -- r1 : key
@@ -1502,8 +1140,7 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm,
}
-void StoreIC::GenerateMegamorphic(MacroAssembler* masm,
- ExtraICState extra_ic_state) {
+void StoreIC::GenerateMegamorphic(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- r0 : value
// -- r1 : receiver
@@ -1512,9 +1149,7 @@ void StoreIC::GenerateMegamorphic(MacroAssembler* masm,
// -----------------------------------
// Get the receiver from the stack and probe the stub cache.
- Code::Flags flags = Code::ComputeFlags(
- Code::HANDLER, MONOMORPHIC, extra_ic_state,
- Code::NORMAL, Code::STORE_IC);
+ Code::Flags flags = Code::ComputeHandlerFlags(Code::STORE_IC);
masm->isolate()->stub_cache()->GenerateProbe(
masm, flags, r1, r2, r3, r4, r5, r6);
@@ -1565,7 +1200,7 @@ void StoreIC::GenerateNormal(MacroAssembler* masm) {
void StoreIC::GenerateRuntimeSetProperty(MacroAssembler* masm,
- StrictModeFlag strict_mode) {
+ StrictMode strict_mode) {
// ----------- S t a t e -------------
// -- r0 : value
// -- r1 : receiver
diff --git a/chromium/v8/src/arm/lithium-arm.cc b/chromium/v8/src/arm/lithium-arm.cc
index 0b7a02eb664..93dc830dab3 100644
--- a/chromium/v8/src/arm/lithium-arm.cc
+++ b/chromium/v8/src/arm/lithium-arm.cc
@@ -1,36 +1,13 @@
// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#include "lithium-allocator-inl.h"
-#include "arm/lithium-arm.h"
-#include "arm/lithium-codegen-arm.h"
-#include "hydrogen-osr.h"
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#include "src/lithium-allocator-inl.h"
+#include "src/arm/lithium-arm.h"
+#include "src/arm/lithium-codegen-arm.h"
+#include "src/hydrogen-osr.h"
namespace v8 {
namespace internal {
@@ -256,7 +233,7 @@ void LTypeofIsAndBranch::PrintDataTo(StringStream* stream) {
stream->Add("if typeof ");
value()->PrintTo(stream);
stream->Add(" == \"%s\" then B%d else B%d",
- *hydrogen()->type_literal()->ToCString(),
+ hydrogen()->type_literal()->ToCString().get(),
true_block_id(), false_block_id());
}
@@ -277,7 +254,18 @@ void LInnerAllocatedObject::PrintDataTo(StringStream* stream) {
}
-void LCallConstantFunction::PrintDataTo(StringStream* stream) {
+void LCallJSFunction::PrintDataTo(StringStream* stream) {
+ stream->Add("= ");
+ function()->PrintTo(stream);
+ stream->Add("#%d / ", arity());
+}
+
+
+void LCallWithDescriptor::PrintDataTo(StringStream* stream) {
+ for (int i = 0; i < InputCount(); i++) {
+ InputAt(i)->PrintTo(stream);
+ stream->Add(" ");
+ }
stream->Add("#%d / ", arity());
}
@@ -302,28 +290,6 @@ void LInvokeFunction::PrintDataTo(StringStream* stream) {
}
-void LCallKeyed::PrintDataTo(StringStream* stream) {
- stream->Add("[r2] #%d / ", arity());
-}
-
-
-void LCallNamed::PrintDataTo(StringStream* stream) {
- SmartArrayPointer<char> name_string = name()->ToCString();
- stream->Add("%s #%d / ", *name_string, arity());
-}
-
-
-void LCallGlobal::PrintDataTo(StringStream* stream) {
- SmartArrayPointer<char> name_string = name()->ToCString();
- stream->Add("%s #%d / ", *name_string, arity());
-}
-
-
-void LCallKnownGlobal::PrintDataTo(StringStream* stream) {
- stream->Add("#%d / ", arity());
-}
-
-
void LCallNew::PrintDataTo(StringStream* stream) {
stream->Add("= ");
constructor()->PrintTo(stream);
@@ -360,7 +326,7 @@ void LStoreNamedField::PrintDataTo(StringStream* stream) {
void LStoreNamedGeneric::PrintDataTo(StringStream* stream) {
object()->PrintTo(stream);
stream->Add(".");
- stream->Add(*String::cast(*name())->ToCString());
+ stream->Add(String::cast(*name())->ToCString().get());
stream->Add(" <- ");
value()->PrintTo(stream);
}
@@ -371,7 +337,7 @@ void LLoadKeyed::PrintDataTo(StringStream* stream) {
stream->Add("[");
key()->PrintTo(stream);
if (hydrogen()->IsDehoisted()) {
- stream->Add(" + %d]", additional_index());
+ stream->Add(" + %d]", base_offset());
} else {
stream->Add("]");
}
@@ -383,7 +349,7 @@ void LStoreKeyed::PrintDataTo(StringStream* stream) {
stream->Add("[");
key()->PrintTo(stream);
if (hydrogen()->IsDehoisted()) {
- stream->Add(" + %d] <-", additional_index());
+ stream->Add(" + %d] <-", base_offset());
} else {
stream->Add("] <- ");
}
@@ -563,8 +529,7 @@ LOperand* LChunkBuilder::Use(HValue* value, LUnallocated* operand) {
}
-template<int I, int T>
-LInstruction* LChunkBuilder::Define(LTemplateInstruction<1, I, T>* instr,
+LInstruction* LChunkBuilder::Define(LTemplateResultInstruction<1>* instr,
LUnallocated* result) {
result->set_virtual_register(current_instruction_->id());
instr->set_result(result);
@@ -572,40 +537,35 @@ LInstruction* LChunkBuilder::Define(LTemplateInstruction<1, I, T>* instr,
}
-template<int I, int T>
LInstruction* LChunkBuilder::DefineAsRegister(
- LTemplateInstruction<1, I, T>* instr) {
+ LTemplateResultInstruction<1>* instr) {
return Define(instr,
new(zone()) LUnallocated(LUnallocated::MUST_HAVE_REGISTER));
}
-template<int I, int T>
LInstruction* LChunkBuilder::DefineAsSpilled(
- LTemplateInstruction<1, I, T>* instr, int index) {
+ LTemplateResultInstruction<1>* instr, int index) {
return Define(instr,
new(zone()) LUnallocated(LUnallocated::FIXED_SLOT, index));
}
-template<int I, int T>
LInstruction* LChunkBuilder::DefineSameAsFirst(
- LTemplateInstruction<1, I, T>* instr) {
+ LTemplateResultInstruction<1>* instr) {
return Define(instr,
new(zone()) LUnallocated(LUnallocated::SAME_AS_FIRST_INPUT));
}
-template<int I, int T>
LInstruction* LChunkBuilder::DefineFixed(
- LTemplateInstruction<1, I, T>* instr, Register reg) {
+ LTemplateResultInstruction<1>* instr, Register reg) {
return Define(instr, ToUnallocated(reg));
}
-template<int I, int T>
LInstruction* LChunkBuilder::DefineFixedDouble(
- LTemplateInstruction<1, I, T>* instr, DoubleRegister reg) {
+ LTemplateResultInstruction<1>* instr, DoubleRegister reg) {
return Define(instr, ToUnallocated(reg));
}
@@ -640,6 +600,8 @@ LInstruction* LChunkBuilder::MarkAsCall(LInstruction* instr,
!hinstr->HasObservableSideEffects();
if (needs_environment && !instr->HasEnvironment()) {
instr = AssignEnvironment(instr);
+ // We can't really figure out if the environment is needed or not.
+ instr->environment()->set_has_been_used();
}
return instr;
@@ -666,6 +628,19 @@ LUnallocated* LChunkBuilder::TempRegister() {
}
+LUnallocated* LChunkBuilder::TempDoubleRegister() {
+ LUnallocated* operand =
+ new(zone()) LUnallocated(LUnallocated::MUST_HAVE_DOUBLE_REGISTER);
+ int vreg = allocator_->GetVirtualRegister();
+ if (!allocator_->AllocationOk()) {
+ Abort(kOutOfVirtualRegistersWhileTryingToAllocateTempRegister);
+ vreg = 0;
+ }
+ operand->set_virtual_register(vreg);
+ return operand;
+}
+
+
LOperand* LChunkBuilder::FixedTemp(Register reg) {
LUnallocated* operand = ToUnallocated(reg);
ASSERT(operand->HasFixedPolicy());
@@ -848,176 +823,108 @@ void LChunkBuilder::DoBasicBlock(HBasicBlock* block, HBasicBlock* next_block) {
void LChunkBuilder::VisitInstruction(HInstruction* current) {
HInstruction* old_current = current_instruction_;
current_instruction_ = current;
- if (current->has_position()) position_ = current->position();
LInstruction* instr = NULL;
if (current->CanReplaceWithDummyUses()) {
if (current->OperandCount() == 0) {
instr = DefineAsRegister(new(zone()) LDummy());
} else {
+ ASSERT(!current->OperandAt(0)->IsControlInstruction());
instr = DefineAsRegister(new(zone())
LDummyUse(UseAny(current->OperandAt(0))));
}
for (int i = 1; i < current->OperandCount(); ++i) {
+ if (current->OperandAt(i)->IsControlInstruction()) continue;
LInstruction* dummy =
new(zone()) LDummyUse(UseAny(current->OperandAt(i)));
dummy->set_hydrogen_value(current);
chunk_->AddInstruction(dummy, current_block_);
}
} else {
- instr = current->CompileToLithium(this);
+ HBasicBlock* successor;
+ if (current->IsControlInstruction() &&
+ HControlInstruction::cast(current)->KnownSuccessorBlock(&successor) &&
+ successor != NULL) {
+ instr = new(zone()) LGoto(successor);
+ } else {
+ instr = current->CompileToLithium(this);
+ }
}
argument_count_ += current->argument_delta();
ASSERT(argument_count_ >= 0);
if (instr != NULL) {
- // Associate the hydrogen instruction first, since we may need it for
- // the ClobbersRegisters() or ClobbersDoubleRegisters() calls below.
- instr->set_hydrogen_value(current);
-
-#if DEBUG
- // Make sure that the lithium instruction has either no fixed register
- // constraints in temps or the result OR no uses that are only used at
- // start. If this invariant doesn't hold, the register allocator can decide
- // to insert a split of a range immediately before the instruction due to an
- // already allocated register needing to be used for the instruction's fixed
- // register constraint. In this case, The register allocator won't see an
- // interference between the split child and the use-at-start (it would if
- // the it was just a plain use), so it is free to move the split child into
- // the same register that is used for the use-at-start.
- // See https://code.google.com/p/chromium/issues/detail?id=201590
- if (!(instr->ClobbersRegisters() && instr->ClobbersDoubleRegisters())) {
- int fixed = 0;
- int used_at_start = 0;
- for (UseIterator it(instr); !it.Done(); it.Advance()) {
- LUnallocated* operand = LUnallocated::cast(it.Current());
- if (operand->IsUsedAtStart()) ++used_at_start;
- }
- if (instr->Output() != NULL) {
- if (LUnallocated::cast(instr->Output())->HasFixedPolicy()) ++fixed;
- }
- for (TempIterator it(instr); !it.Done(); it.Advance()) {
- LUnallocated* operand = LUnallocated::cast(it.Current());
- if (operand->HasFixedPolicy()) ++fixed;
- }
- ASSERT(fixed == 0 || used_at_start == 0);
- }
-#endif
-
- if (FLAG_stress_pointer_maps && !instr->HasPointerMap()) {
- instr = AssignPointerMap(instr);
- }
- if (FLAG_stress_environments && !instr->HasEnvironment()) {
- instr = AssignEnvironment(instr);
- }
- chunk_->AddInstruction(instr, current_block_);
-
- if (instr->IsCall()) {
- HValue* hydrogen_value_for_lazy_bailout = current;
- LInstruction* instruction_needing_environment = NULL;
- if (current->HasObservableSideEffects()) {
- HSimulate* sim = HSimulate::cast(current->next());
- instruction_needing_environment = instr;
- sim->ReplayEnvironment(current_block_->last_environment());
- hydrogen_value_for_lazy_bailout = sim;
- }
- LInstruction* bailout = AssignEnvironment(new(zone()) LLazyBailout());
- bailout->set_hydrogen_value(hydrogen_value_for_lazy_bailout);
- chunk_->AddInstruction(bailout, current_block_);
- if (instruction_needing_environment != NULL) {
- // Store the lazy deopt environment with the instruction if needed.
- // Right now it is only used for LInstanceOfKnownGlobal.
- instruction_needing_environment->
- SetDeferredLazyDeoptimizationEnvironment(bailout->environment());
- }
- }
+ AddInstruction(instr, current);
}
+
current_instruction_ = old_current;
}
-LEnvironment* LChunkBuilder::CreateEnvironment(
- HEnvironment* hydrogen_env,
- int* argument_index_accumulator,
- ZoneList<HValue*>* objects_to_materialize) {
- if (hydrogen_env == NULL) return NULL;
-
- LEnvironment* outer = CreateEnvironment(hydrogen_env->outer(),
- argument_index_accumulator,
- objects_to_materialize);
- BailoutId ast_id = hydrogen_env->ast_id();
- ASSERT(!ast_id.IsNone() ||
- hydrogen_env->frame_type() != JS_FUNCTION);
- int value_count = hydrogen_env->length() - hydrogen_env->specials_count();
- LEnvironment* result = new(zone()) LEnvironment(
- hydrogen_env->closure(),
- hydrogen_env->frame_type(),
- ast_id,
- hydrogen_env->parameter_count(),
- argument_count_,
- value_count,
- outer,
- hydrogen_env->entry(),
- zone());
- int argument_index = *argument_index_accumulator;
- int object_index = objects_to_materialize->length();
- for (int i = 0; i < hydrogen_env->length(); ++i) {
- if (hydrogen_env->is_special_index(i)) continue;
-
- LOperand* op;
- HValue* value = hydrogen_env->values()->at(i);
- if (value->IsArgumentsObject() || value->IsCapturedObject()) {
- objects_to_materialize->Add(value, zone());
- op = LEnvironment::materialization_marker();
- } else if (value->IsPushArgument()) {
- op = new(zone()) LArgument(argument_index++);
- } else {
- op = UseAny(value);
+void LChunkBuilder::AddInstruction(LInstruction* instr,
+ HInstruction* hydrogen_val) {
+ // Associate the hydrogen instruction first, since we may need it for
+ // the ClobbersRegisters() or ClobbersDoubleRegisters() calls below.
+ instr->set_hydrogen_value(hydrogen_val);
+
+#if DEBUG
+ // Make sure that the lithium instruction has either no fixed register
+ // constraints in temps or the result OR no uses that are only used at
+ // start. If this invariant doesn't hold, the register allocator can decide
+ // to insert a split of a range immediately before the instruction due to an
+ // already allocated register needing to be used for the instruction's fixed
+ // register constraint. In this case, The register allocator won't see an
+ // interference between the split child and the use-at-start (it would if
+ // the it was just a plain use), so it is free to move the split child into
+ // the same register that is used for the use-at-start.
+ // See https://code.google.com/p/chromium/issues/detail?id=201590
+ if (!(instr->ClobbersRegisters() &&
+ instr->ClobbersDoubleRegisters(isolate()))) {
+ int fixed = 0;
+ int used_at_start = 0;
+ for (UseIterator it(instr); !it.Done(); it.Advance()) {
+ LUnallocated* operand = LUnallocated::cast(it.Current());
+ if (operand->IsUsedAtStart()) ++used_at_start;
}
- result->AddValue(op,
- value->representation(),
- value->CheckFlag(HInstruction::kUint32));
- }
-
- for (int i = object_index; i < objects_to_materialize->length(); ++i) {
- HValue* object_to_materialize = objects_to_materialize->at(i);
- int previously_materialized_object = -1;
- for (int prev = 0; prev < i; ++prev) {
- if (objects_to_materialize->at(prev) == objects_to_materialize->at(i)) {
- previously_materialized_object = prev;
- break;
- }
+ if (instr->Output() != NULL) {
+ if (LUnallocated::cast(instr->Output())->HasFixedPolicy()) ++fixed;
}
- int length = object_to_materialize->OperandCount();
- bool is_arguments = object_to_materialize->IsArgumentsObject();
- if (previously_materialized_object >= 0) {
- result->AddDuplicateObject(previously_materialized_object);
- continue;
- } else {
- result->AddNewObject(is_arguments ? length - 1 : length, is_arguments);
- }
- for (int i = is_arguments ? 1 : 0; i < length; ++i) {
- LOperand* op;
- HValue* value = object_to_materialize->OperandAt(i);
- if (value->IsArgumentsObject() || value->IsCapturedObject()) {
- objects_to_materialize->Add(value, zone());
- op = LEnvironment::materialization_marker();
- } else {
- ASSERT(!value->IsPushArgument());
- op = UseAny(value);
- }
- result->AddValue(op,
- value->representation(),
- value->CheckFlag(HInstruction::kUint32));
+ for (TempIterator it(instr); !it.Done(); it.Advance()) {
+ LUnallocated* operand = LUnallocated::cast(it.Current());
+ if (operand->HasFixedPolicy()) ++fixed;
}
+ ASSERT(fixed == 0 || used_at_start == 0);
}
+#endif
- if (hydrogen_env->frame_type() == JS_FUNCTION) {
- *argument_index_accumulator = argument_index;
+ if (FLAG_stress_pointer_maps && !instr->HasPointerMap()) {
+ instr = AssignPointerMap(instr);
+ }
+ if (FLAG_stress_environments && !instr->HasEnvironment()) {
+ instr = AssignEnvironment(instr);
+ }
+ chunk_->AddInstruction(instr, current_block_);
+
+ if (instr->IsCall()) {
+ HValue* hydrogen_value_for_lazy_bailout = hydrogen_val;
+ LInstruction* instruction_needing_environment = NULL;
+ if (hydrogen_val->HasObservableSideEffects()) {
+ HSimulate* sim = HSimulate::cast(hydrogen_val->next());
+ instruction_needing_environment = instr;
+ sim->ReplayEnvironment(current_block_->last_environment());
+ hydrogen_value_for_lazy_bailout = sim;
+ }
+ LInstruction* bailout = AssignEnvironment(new(zone()) LLazyBailout());
+ bailout->set_hydrogen_value(hydrogen_value_for_lazy_bailout);
+ chunk_->AddInstruction(bailout, current_block_);
+ if (instruction_needing_environment != NULL) {
+ // Store the lazy deopt environment with the instruction if needed.
+ // Right now it is only used for LInstanceOfKnownGlobal.
+ instruction_needing_environment->
+ SetDeferredLazyDeoptimizationEnvironment(bailout->environment());
+ }
}
-
- return result;
}
@@ -1027,22 +934,21 @@ LInstruction* LChunkBuilder::DoGoto(HGoto* instr) {
LInstruction* LChunkBuilder::DoBranch(HBranch* instr) {
- LInstruction* goto_instr = CheckElideControlInstruction(instr);
- if (goto_instr != NULL) return goto_instr;
-
HValue* value = instr->value();
- LBranch* result = new(zone()) LBranch(UseRegister(value));
- // Tagged values that are not known smis or booleans require a
- // deoptimization environment. If the instruction is generic no
- // environment is needed since all cases are handled.
- Representation rep = value->representation();
+ Representation r = value->representation();
HType type = value->type();
ToBooleanStub::Types expected = instr->expected_input_types();
- if (rep.IsTagged() && !type.IsSmi() && !type.IsBoolean() &&
- !expected.IsGeneric()) {
- return AssignEnvironment(result);
+ if (expected.IsEmpty()) expected = ToBooleanStub::Types::Generic();
+
+ bool easy_case = !r.IsTagged() || type.IsBoolean() || type.IsSmi() ||
+ type.IsJSArray() || type.IsHeapNumber() || type.IsString();
+ LInstruction* branch = new(zone()) LBranch(UseRegister(value));
+ if (!easy_case &&
+ ((!expected.Contains(ToBooleanStub::SMI) && expected.NeedsMap()) ||
+ !expected.IsGeneric())) {
+ branch = AssignEnvironment(branch);
}
- return result;
+ return branch;
}
@@ -1113,9 +1019,13 @@ LInstruction* LChunkBuilder::DoApplyArguments(HApplyArguments* instr) {
}
-LInstruction* LChunkBuilder::DoPushArgument(HPushArgument* instr) {
- LOperand* argument = Use(instr->argument());
- return new(zone()) LPushArgument(argument);
+LInstruction* LChunkBuilder::DoPushArguments(HPushArguments* instr) {
+ int argc = instr->OperandCount();
+ for (int i = 0; i < argc; ++i) {
+ LOperand* argument = Use(instr->argument(i));
+ AddInstruction(new(zone()) LPushArgument(argument), instr);
+ }
+ return NULL;
}
@@ -1154,33 +1064,38 @@ LInstruction* LChunkBuilder::DoContext(HContext* instr) {
}
-LInstruction* LChunkBuilder::DoOuterContext(HOuterContext* instr) {
- LOperand* context = UseRegisterAtStart(instr->value());
- return DefineAsRegister(new(zone()) LOuterContext(context));
-}
-
-
LInstruction* LChunkBuilder::DoDeclareGlobals(HDeclareGlobals* instr) {
LOperand* context = UseFixed(instr->context(), cp);
return MarkAsCall(new(zone()) LDeclareGlobals(context), instr);
}
-LInstruction* LChunkBuilder::DoGlobalObject(HGlobalObject* instr) {
- LOperand* context = UseRegisterAtStart(instr->value());
- return DefineAsRegister(new(zone()) LGlobalObject(context));
-}
+LInstruction* LChunkBuilder::DoCallJSFunction(
+ HCallJSFunction* instr) {
+ LOperand* function = UseFixed(instr->function(), r1);
+ LCallJSFunction* result = new(zone()) LCallJSFunction(function);
-LInstruction* LChunkBuilder::DoGlobalReceiver(HGlobalReceiver* instr) {
- LOperand* global_object = UseRegisterAtStart(instr->value());
- return DefineAsRegister(new(zone()) LGlobalReceiver(global_object));
+ return MarkAsCall(DefineFixed(result, r0), instr);
}
-LInstruction* LChunkBuilder::DoCallConstantFunction(
- HCallConstantFunction* instr) {
- return MarkAsCall(DefineFixed(new(zone()) LCallConstantFunction, r0), instr);
+LInstruction* LChunkBuilder::DoCallWithDescriptor(
+ HCallWithDescriptor* instr) {
+ const CallInterfaceDescriptor* descriptor = instr->descriptor();
+
+ LOperand* target = UseRegisterOrConstantAtStart(instr->target());
+ ZoneList<LOperand*> ops(instr->OperandCount(), zone());
+ ops.Add(target, zone());
+ for (int i = 1; i < instr->OperandCount(); i++) {
+ LOperand* op = UseFixed(instr->OperandAt(i),
+ descriptor->GetParameterRegister(i - 1));
+ ops.Add(op, zone());
+ }
+
+ LCallWithDescriptor* result = new(zone()) LCallWithDescriptor(
+ descriptor, ops, zone());
+ return MarkAsCall(DefineFixed(result, r0), instr);
}
@@ -1198,12 +1113,10 @@ LInstruction* LChunkBuilder::DoUnaryMathOperation(HUnaryMathOperation* instr) {
case kMathRound: return DoMathRound(instr);
case kMathAbs: return DoMathAbs(instr);
case kMathLog: return DoMathLog(instr);
- case kMathSin: return DoMathSin(instr);
- case kMathCos: return DoMathCos(instr);
- case kMathTan: return DoMathTan(instr);
case kMathExp: return DoMathExp(instr);
case kMathSqrt: return DoMathSqrt(instr);
case kMathPowHalf: return DoMathPowHalf(instr);
+ case kMathClz32: return DoMathClz32(instr);
default:
UNREACHABLE();
return NULL;
@@ -1220,7 +1133,7 @@ LInstruction* LChunkBuilder::DoMathFloor(HUnaryMathOperation* instr) {
LInstruction* LChunkBuilder::DoMathRound(HUnaryMathOperation* instr) {
LOperand* input = UseRegister(instr->value());
- LOperand* temp = FixedTemp(d3);
+ LOperand* temp = TempDoubleRegister();
LMathRound* result = new(zone()) LMathRound(input, temp);
return AssignEnvironment(DefineAsRegister(result));
}
@@ -1232,36 +1145,26 @@ LInstruction* LChunkBuilder::DoMathAbs(HUnaryMathOperation* instr) {
? NULL
: UseFixed(instr->context(), cp);
LOperand* input = UseRegister(instr->value());
- LMathAbs* result = new(zone()) LMathAbs(context, input);
- return AssignEnvironment(AssignPointerMap(DefineAsRegister(result)));
+ LInstruction* result =
+ DefineAsRegister(new(zone()) LMathAbs(context, input));
+ if (!r.IsDouble() && !r.IsSmiOrInteger32()) result = AssignPointerMap(result);
+ if (!r.IsDouble()) result = AssignEnvironment(result);
+ return result;
}
LInstruction* LChunkBuilder::DoMathLog(HUnaryMathOperation* instr) {
- LOperand* input = UseFixedDouble(instr->value(), d2);
- LMathLog* result = new(zone()) LMathLog(input);
- return MarkAsCall(DefineFixedDouble(result, d2), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoMathSin(HUnaryMathOperation* instr) {
- LOperand* input = UseFixedDouble(instr->value(), d2);
- LMathSin* result = new(zone()) LMathSin(input);
- return MarkAsCall(DefineFixedDouble(result, d2), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoMathCos(HUnaryMathOperation* instr) {
- LOperand* input = UseFixedDouble(instr->value(), d2);
- LMathCos* result = new(zone()) LMathCos(input);
- return MarkAsCall(DefineFixedDouble(result, d2), instr);
+ ASSERT(instr->representation().IsDouble());
+ ASSERT(instr->value()->representation().IsDouble());
+ LOperand* input = UseFixedDouble(instr->value(), d0);
+ return MarkAsCall(DefineFixedDouble(new(zone()) LMathLog(input), d0), instr);
}
-LInstruction* LChunkBuilder::DoMathTan(HUnaryMathOperation* instr) {
- LOperand* input = UseFixedDouble(instr->value(), d2);
- LMathTan* result = new(zone()) LMathTan(input);
- return MarkAsCall(DefineFixedDouble(result, d2), instr);
+LInstruction* LChunkBuilder::DoMathClz32(HUnaryMathOperation* instr) {
+ LOperand* input = UseRegisterAtStart(instr->value());
+ LMathClz32* result = new(zone()) LMathClz32(input);
+ return DefineAsRegister(result);
}
@@ -1271,7 +1174,7 @@ LInstruction* LChunkBuilder::DoMathExp(HUnaryMathOperation* instr) {
LOperand* input = UseRegister(instr->value());
LOperand* temp1 = TempRegister();
LOperand* temp2 = TempRegister();
- LOperand* double_temp = FixedTemp(d3); // Chosen by fair dice roll.
+ LOperand* double_temp = TempDoubleRegister();
LMathExp* result = new(zone()) LMathExp(input, double_temp, temp1, temp2);
return DefineAsRegister(result);
}
@@ -1291,32 +1194,6 @@ LInstruction* LChunkBuilder::DoMathPowHalf(HUnaryMathOperation* instr) {
}
-LInstruction* LChunkBuilder::DoCallKeyed(HCallKeyed* instr) {
- ASSERT(instr->key()->representation().IsTagged());
- LOperand* context = UseFixed(instr->context(), cp);
- LOperand* key = UseFixed(instr->key(), r2);
- return MarkAsCall(
- DefineFixed(new(zone()) LCallKeyed(context, key), r0), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoCallNamed(HCallNamed* instr) {
- LOperand* context = UseFixed(instr->context(), cp);
- return MarkAsCall(DefineFixed(new(zone()) LCallNamed(context), r0), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoCallGlobal(HCallGlobal* instr) {
- LOperand* context = UseFixed(instr->context(), cp);
- return MarkAsCall(DefineFixed(new(zone()) LCallGlobal(context), r0), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoCallKnownGlobal(HCallKnownGlobal* instr) {
- return MarkAsCall(DefineFixed(new(zone()) LCallKnownGlobal, r0), instr);
-}
-
-
LInstruction* LChunkBuilder::DoCallNew(HCallNew* instr) {
LOperand* context = UseFixed(instr->context(), cp);
LOperand* constructor = UseFixed(instr->constructor(), r1);
@@ -1337,9 +1214,7 @@ LInstruction* LChunkBuilder::DoCallFunction(HCallFunction* instr) {
LOperand* context = UseFixed(instr->context(), cp);
LOperand* function = UseFixed(instr->function(), r1);
LCallFunction* call = new(zone()) LCallFunction(context, function);
- LInstruction* result = DefineFixed(call, r0);
- if (instr->IsTailCall()) return result;
- return MarkAsCall(result, instr);
+ return MarkAsCall(DefineFixed(call, r0), instr);
}
@@ -1384,21 +1259,73 @@ LInstruction* LChunkBuilder::DoBitwise(HBitwise* instr) {
}
+LInstruction* LChunkBuilder::DoDivByPowerOf2I(HDiv* instr) {
+ ASSERT(instr->representation().IsSmiOrInteger32());
+ ASSERT(instr->left()->representation().Equals(instr->representation()));
+ ASSERT(instr->right()->representation().Equals(instr->representation()));
+ LOperand* dividend = UseRegister(instr->left());
+ int32_t divisor = instr->right()->GetInteger32Constant();
+ LInstruction* result = DefineAsRegister(new(zone()) LDivByPowerOf2I(
+ dividend, divisor));
+ if ((instr->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) ||
+ (instr->CheckFlag(HValue::kCanOverflow) && divisor == -1) ||
+ (!instr->CheckFlag(HInstruction::kAllUsesTruncatingToInt32) &&
+ divisor != 1 && divisor != -1)) {
+ result = AssignEnvironment(result);
+ }
+ return result;
+}
+
+
+LInstruction* LChunkBuilder::DoDivByConstI(HDiv* instr) {
+ ASSERT(instr->representation().IsInteger32());
+ ASSERT(instr->left()->representation().Equals(instr->representation()));
+ ASSERT(instr->right()->representation().Equals(instr->representation()));
+ LOperand* dividend = UseRegister(instr->left());
+ int32_t divisor = instr->right()->GetInteger32Constant();
+ LInstruction* result = DefineAsRegister(new(zone()) LDivByConstI(
+ dividend, divisor));
+ if (divisor == 0 ||
+ (instr->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) ||
+ !instr->CheckFlag(HInstruction::kAllUsesTruncatingToInt32)) {
+ result = AssignEnvironment(result);
+ }
+ return result;
+}
+
+
+LInstruction* LChunkBuilder::DoDivI(HDiv* instr) {
+ ASSERT(instr->representation().IsSmiOrInteger32());
+ ASSERT(instr->left()->representation().Equals(instr->representation()));
+ ASSERT(instr->right()->representation().Equals(instr->representation()));
+ LOperand* dividend = UseRegister(instr->left());
+ LOperand* divisor = UseRegister(instr->right());
+ LOperand* temp =
+ CpuFeatures::IsSupported(SUDIV) ? NULL : TempDoubleRegister();
+ LInstruction* result =
+ DefineAsRegister(new(zone()) LDivI(dividend, divisor, temp));
+ if (instr->CheckFlag(HValue::kCanBeDivByZero) ||
+ instr->CheckFlag(HValue::kBailoutOnMinusZero) ||
+ (instr->CheckFlag(HValue::kCanOverflow) &&
+ (!CpuFeatures::IsSupported(SUDIV) ||
+ !instr->CheckFlag(HValue::kAllUsesTruncatingToInt32))) ||
+ (!instr->IsMathFloorOfDiv() &&
+ !instr->CheckFlag(HValue::kAllUsesTruncatingToInt32))) {
+ result = AssignEnvironment(result);
+ }
+ return result;
+}
+
+
LInstruction* LChunkBuilder::DoDiv(HDiv* instr) {
if (instr->representation().IsSmiOrInteger32()) {
- ASSERT(instr->left()->representation().Equals(instr->representation()));
- ASSERT(instr->right()->representation().Equals(instr->representation()));
- if (instr->HasPowerOf2Divisor()) {
- ASSERT(!instr->CheckFlag(HValue::kCanBeDivByZero));
- LOperand* value = UseRegisterAtStart(instr->left());
- LDivI* div = new(zone()) LDivI(value, UseConstant(instr->right()), NULL);
- return AssignEnvironment(DefineAsRegister(div));
+ if (instr->RightIsPowerOf2()) {
+ return DoDivByPowerOf2I(instr);
+ } else if (instr->right()->IsConstant()) {
+ return DoDivByConstI(instr);
+ } else {
+ return DoDivI(instr);
}
- LOperand* dividend = UseRegister(instr->left());
- LOperand* divisor = UseRegister(instr->right());
- LOperand* temp = CpuFeatures::IsSupported(SUDIV) ? NULL : FixedTemp(d4);
- LDivI* div = new(zone()) LDivI(dividend, divisor, temp);
- return AssignEnvironment(DefineAsRegister(div));
} else if (instr->representation().IsDouble()) {
return DoArithmeticD(Token::DIV, instr);
} else {
@@ -1407,115 +1334,121 @@ LInstruction* LChunkBuilder::DoDiv(HDiv* instr) {
}
-bool LChunkBuilder::HasMagicNumberForDivisor(int32_t divisor) {
- uint32_t divisor_abs = abs(divisor);
- // Dividing by 0, 1, and powers of 2 is easy.
- // Note that IsPowerOf2(0) returns true;
- ASSERT(IsPowerOf2(0) == true);
- if (IsPowerOf2(divisor_abs)) return true;
-
- // We have magic numbers for a few specific divisors.
- // Details and proofs can be found in:
- // - Hacker's Delight, Henry S. Warren, Jr.
- // - The PowerPC Compiler Writer’s Guide
- // and probably many others.
- //
- // We handle
- // <divisor with magic numbers> * <power of 2>
- // but not
- // <divisor with magic numbers> * <other divisor with magic numbers>
- int32_t power_of_2_factor =
- CompilerIntrinsics::CountTrailingZeros(divisor_abs);
- DivMagicNumbers magic_numbers =
- DivMagicNumberFor(divisor_abs >> power_of_2_factor);
- if (magic_numbers.M != InvalidDivMagicNumber.M) return true;
-
- return false;
-}
-
-
-HValue* LChunkBuilder::SimplifiedDivisorForMathFloorOfDiv(HValue* divisor) {
- if (CpuFeatures::IsSupported(SUDIV)) {
- // A value with an integer representation does not need to be transformed.
- if (divisor->representation().IsInteger32()) {
- return divisor;
- // A change from an integer32 can be replaced by the integer32 value.
- } else if (divisor->IsChange() &&
- HChange::cast(divisor)->from().IsInteger32()) {
- return HChange::cast(divisor)->value();
- }
+LInstruction* LChunkBuilder::DoFlooringDivByPowerOf2I(HMathFloorOfDiv* instr) {
+ LOperand* dividend = UseRegisterAtStart(instr->left());
+ int32_t divisor = instr->right()->GetInteger32Constant();
+ LInstruction* result = DefineAsRegister(new(zone()) LFlooringDivByPowerOf2I(
+ dividend, divisor));
+ if ((instr->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) ||
+ (instr->CheckFlag(HValue::kLeftCanBeMinInt) && divisor == -1)) {
+ result = AssignEnvironment(result);
}
+ return result;
+}
- if (divisor->IsConstant() && HConstant::cast(divisor)->HasInteger32Value()) {
- HConstant* constant_val = HConstant::cast(divisor);
- int32_t int32_val = constant_val->Integer32Value();
- if (LChunkBuilder::HasMagicNumberForDivisor(int32_val) ||
- CpuFeatures::IsSupported(SUDIV)) {
- return constant_val->CopyToRepresentation(Representation::Integer32(),
- divisor->block()->zone());
- }
+
+LInstruction* LChunkBuilder::DoFlooringDivByConstI(HMathFloorOfDiv* instr) {
+ ASSERT(instr->representation().IsInteger32());
+ ASSERT(instr->left()->representation().Equals(instr->representation()));
+ ASSERT(instr->right()->representation().Equals(instr->representation()));
+ LOperand* dividend = UseRegister(instr->left());
+ int32_t divisor = instr->right()->GetInteger32Constant();
+ LOperand* temp =
+ ((divisor > 0 && !instr->CheckFlag(HValue::kLeftCanBeNegative)) ||
+ (divisor < 0 && !instr->CheckFlag(HValue::kLeftCanBePositive))) ?
+ NULL : TempRegister();
+ LInstruction* result = DefineAsRegister(
+ new(zone()) LFlooringDivByConstI(dividend, divisor, temp));
+ if (divisor == 0 ||
+ (instr->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0)) {
+ result = AssignEnvironment(result);
}
+ return result;
+}
- return NULL;
+
+LInstruction* LChunkBuilder::DoFlooringDivI(HMathFloorOfDiv* instr) {
+ ASSERT(instr->representation().IsSmiOrInteger32());
+ ASSERT(instr->left()->representation().Equals(instr->representation()));
+ ASSERT(instr->right()->representation().Equals(instr->representation()));
+ LOperand* dividend = UseRegister(instr->left());
+ LOperand* divisor = UseRegister(instr->right());
+ LOperand* temp =
+ CpuFeatures::IsSupported(SUDIV) ? NULL : TempDoubleRegister();
+ LFlooringDivI* div = new(zone()) LFlooringDivI(dividend, divisor, temp);
+ return AssignEnvironment(DefineAsRegister(div));
}
LInstruction* LChunkBuilder::DoMathFloorOfDiv(HMathFloorOfDiv* instr) {
- HValue* right = instr->right();
+ if (instr->RightIsPowerOf2()) {
+ return DoFlooringDivByPowerOf2I(instr);
+ } else if (instr->right()->IsConstant()) {
+ return DoFlooringDivByConstI(instr);
+ } else {
+ return DoFlooringDivI(instr);
+ }
+}
+
+
+LInstruction* LChunkBuilder::DoModByPowerOf2I(HMod* instr) {
+ ASSERT(instr->representation().IsSmiOrInteger32());
+ ASSERT(instr->left()->representation().Equals(instr->representation()));
+ ASSERT(instr->right()->representation().Equals(instr->representation()));
+ LOperand* dividend = UseRegisterAtStart(instr->left());
+ int32_t divisor = instr->right()->GetInteger32Constant();
+ LInstruction* result = DefineSameAsFirst(new(zone()) LModByPowerOf2I(
+ dividend, divisor));
+ if (instr->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ result = AssignEnvironment(result);
+ }
+ return result;
+}
+
+
+LInstruction* LChunkBuilder::DoModByConstI(HMod* instr) {
+ ASSERT(instr->representation().IsSmiOrInteger32());
+ ASSERT(instr->left()->representation().Equals(instr->representation()));
+ ASSERT(instr->right()->representation().Equals(instr->representation()));
LOperand* dividend = UseRegister(instr->left());
- LOperand* divisor = CpuFeatures::IsSupported(SUDIV)
- ? UseRegister(right)
- : UseOrConstant(right);
- LOperand* remainder = TempRegister();
- ASSERT(CpuFeatures::IsSupported(SUDIV) ||
- (right->IsConstant() &&
- HConstant::cast(right)->HasInteger32Value() &&
- HasMagicNumberForDivisor(HConstant::cast(right)->Integer32Value())));
- return AssignEnvironment(DefineAsRegister(
- new(zone()) LMathFloorOfDiv(dividend, divisor, remainder)));
+ int32_t divisor = instr->right()->GetInteger32Constant();
+ LInstruction* result = DefineAsRegister(new(zone()) LModByConstI(
+ dividend, divisor));
+ if (divisor == 0 || instr->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ result = AssignEnvironment(result);
+ }
+ return result;
+}
+
+
+LInstruction* LChunkBuilder::DoModI(HMod* instr) {
+ ASSERT(instr->representation().IsSmiOrInteger32());
+ ASSERT(instr->left()->representation().Equals(instr->representation()));
+ ASSERT(instr->right()->representation().Equals(instr->representation()));
+ LOperand* dividend = UseRegister(instr->left());
+ LOperand* divisor = UseRegister(instr->right());
+ LOperand* temp =
+ CpuFeatures::IsSupported(SUDIV) ? NULL : TempDoubleRegister();
+ LOperand* temp2 =
+ CpuFeatures::IsSupported(SUDIV) ? NULL : TempDoubleRegister();
+ LInstruction* result = DefineAsRegister(new(zone()) LModI(
+ dividend, divisor, temp, temp2));
+ if (instr->CheckFlag(HValue::kCanBeDivByZero) ||
+ instr->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ result = AssignEnvironment(result);
+ }
+ return result;
}
LInstruction* LChunkBuilder::DoMod(HMod* instr) {
- HValue* left = instr->left();
- HValue* right = instr->right();
if (instr->representation().IsSmiOrInteger32()) {
- ASSERT(instr->left()->representation().Equals(instr->representation()));
- ASSERT(instr->right()->representation().Equals(instr->representation()));
- if (instr->HasPowerOf2Divisor()) {
- ASSERT(!right->CanBeZero());
- LModI* mod = new(zone()) LModI(UseRegisterAtStart(left),
- UseConstant(right));
- LInstruction* result = DefineAsRegister(mod);
- return (left->CanBeNegative() &&
- instr->CheckFlag(HValue::kBailoutOnMinusZero))
- ? AssignEnvironment(result)
- : result;
- } else if (CpuFeatures::IsSupported(SUDIV)) {
- LModI* mod = new(zone()) LModI(UseRegister(left),
- UseRegister(right));
- LInstruction* result = DefineAsRegister(mod);
- return (right->CanBeZero() ||
- (left->RangeCanInclude(kMinInt) &&
- right->RangeCanInclude(-1) &&
- instr->CheckFlag(HValue::kBailoutOnMinusZero)) ||
- (left->CanBeNegative() &&
- instr->CanBeZero() &&
- instr->CheckFlag(HValue::kBailoutOnMinusZero)))
- ? AssignEnvironment(result)
- : result;
+ if (instr->RightIsPowerOf2()) {
+ return DoModByPowerOf2I(instr);
+ } else if (instr->right()->IsConstant()) {
+ return DoModByConstI(instr);
} else {
- LModI* mod = new(zone()) LModI(UseRegister(left),
- UseRegister(right),
- FixedTemp(d10),
- FixedTemp(d11));
- LInstruction* result = DefineAsRegister(mod);
- return (right->CanBeZero() ||
- (left->CanBeNegative() &&
- instr->CanBeZero() &&
- instr->CheckFlag(HValue::kBailoutOnMinusZero)))
- ? AssignEnvironment(result)
- : result;
+ return DoModI(instr);
}
} else if (instr->representation().IsDouble()) {
return DoArithmeticD(Token::MOD, instr);
@@ -1770,8 +1703,6 @@ LInstruction* LChunkBuilder::DoCompareNumericAndBranch(
LInstruction* LChunkBuilder::DoCompareObjectEqAndBranch(
HCompareObjectEqAndBranch* instr) {
- LInstruction* goto_instr = CheckElideControlInstruction(instr);
- if (goto_instr != NULL) return goto_instr;
LOperand* left = UseRegisterAtStart(instr->left());
LOperand* right = UseRegisterAtStart(instr->right());
return new(zone()) LCmpObjectEqAndBranch(left, right);
@@ -1787,8 +1718,6 @@ LInstruction* LChunkBuilder::DoCompareHoleAndBranch(
LInstruction* LChunkBuilder::DoCompareMinusZeroAndBranch(
HCompareMinusZeroAndBranch* instr) {
- LInstruction* goto_instr = CheckElideControlInstruction(instr);
- if (goto_instr != NULL) return goto_instr;
LOperand* value = UseRegister(instr->value());
LOperand* scratch = TempRegister();
return new(zone()) LCompareMinusZeroAndBranch(value, scratch);
@@ -1877,19 +1806,6 @@ LInstruction* LChunkBuilder::DoMapEnumLength(HMapEnumLength* instr) {
}
-LInstruction* LChunkBuilder::DoElementsKind(HElementsKind* instr) {
- LOperand* object = UseRegisterAtStart(instr->value());
- return DefineAsRegister(new(zone()) LElementsKind(object));
-}
-
-
-LInstruction* LChunkBuilder::DoValueOf(HValueOf* instr) {
- LOperand* object = UseRegister(instr->value());
- LValueOf* result = new(zone()) LValueOf(object, TempRegister());
- return DefineAsRegister(result);
-}
-
-
LInstruction* LChunkBuilder::DoDateField(HDateField* instr) {
LOperand* object = UseFixed(instr->value(), r0);
LDateField* result =
@@ -1917,9 +1833,16 @@ LInstruction* LChunkBuilder::DoSeqStringSetChar(HSeqStringSetChar* instr) {
LInstruction* LChunkBuilder::DoBoundsCheck(HBoundsCheck* instr) {
- LOperand* value = UseRegisterOrConstantAtStart(instr->index());
- LOperand* length = UseRegister(instr->length());
- return AssignEnvironment(new(zone()) LBoundsCheck(value, length));
+ if (!FLAG_debug_code && instr->skip_check()) return NULL;
+ LOperand* index = UseRegisterOrConstantAtStart(instr->index());
+ LOperand* length = !index->IsConstantOperand()
+ ? UseRegisterOrConstantAtStart(instr->length())
+ : UseRegisterAtStart(instr->length());
+ LInstruction* result = new(zone()) LBoundsCheck(index, length);
+ if (!FLAG_debug_code || !instr->skip_check()) {
+ result = AssignEnvironment(result);
+ }
+ return result;
}
@@ -1937,13 +1860,6 @@ LInstruction* LChunkBuilder::DoAbnormalExit(HAbnormalExit* instr) {
}
-LInstruction* LChunkBuilder::DoThrow(HThrow* instr) {
- LOperand* context = UseFixed(instr->context(), cp);
- LOperand* value = UseFixed(instr->value(), r0);
- return MarkAsCall(new(zone()) LThrow(context, value), instr);
-}
-
-
LInstruction* LChunkBuilder::DoUseConst(HUseConst* instr) {
return NULL;
}
@@ -1960,20 +1876,21 @@ LInstruction* LChunkBuilder::DoForceRepresentation(HForceRepresentation* bad) {
LInstruction* LChunkBuilder::DoChange(HChange* instr) {
Representation from = instr->from();
Representation to = instr->to();
+ HValue* val = instr->value();
if (from.IsSmi()) {
if (to.IsTagged()) {
- LOperand* value = UseRegister(instr->value());
+ LOperand* value = UseRegister(val);
return DefineSameAsFirst(new(zone()) LDummyUse(value));
}
from = Representation::Tagged();
}
if (from.IsTagged()) {
if (to.IsDouble()) {
- LOperand* value = UseRegister(instr->value());
- LNumberUntagD* res = new(zone()) LNumberUntagD(value);
- return AssignEnvironment(DefineAsRegister(res));
+ LOperand* value = UseRegister(val);
+ LInstruction* result = DefineAsRegister(new(zone()) LNumberUntagD(value));
+ if (!val->representation().IsSmi()) result = AssignEnvironment(result);
+ return result;
} else if (to.IsSmi()) {
- HValue* val = instr->value();
LOperand* value = UseRegister(val);
if (val->type().IsSmi()) {
return DefineSameAsFirst(new(zone()) LDummyUse(value));
@@ -1981,78 +1898,71 @@ LInstruction* LChunkBuilder::DoChange(HChange* instr) {
return AssignEnvironment(DefineSameAsFirst(new(zone()) LCheckSmi(value)));
} else {
ASSERT(to.IsInteger32());
- LOperand* value = NULL;
- LInstruction* res = NULL;
- HValue* val = instr->value();
if (val->type().IsSmi() || val->representation().IsSmi()) {
- value = UseRegisterAtStart(val);
- res = DefineAsRegister(new(zone()) LSmiUntag(value, false));
+ LOperand* value = UseRegisterAtStart(val);
+ return DefineAsRegister(new(zone()) LSmiUntag(value, false));
} else {
- value = UseRegister(val);
+ LOperand* value = UseRegister(val);
LOperand* temp1 = TempRegister();
- LOperand* temp2 = FixedTemp(d11);
- res = DefineSameAsFirst(new(zone()) LTaggedToI(value,
- temp1,
- temp2));
- res = AssignEnvironment(res);
+ LOperand* temp2 = TempDoubleRegister();
+ LInstruction* result =
+ DefineSameAsFirst(new(zone()) LTaggedToI(value, temp1, temp2));
+ if (!val->representation().IsSmi()) result = AssignEnvironment(result);
+ return result;
}
- return res;
}
} else if (from.IsDouble()) {
if (to.IsTagged()) {
info()->MarkAsDeferredCalling();
- LOperand* value = UseRegister(instr->value());
+ LOperand* value = UseRegister(val);
LOperand* temp1 = TempRegister();
LOperand* temp2 = TempRegister();
-
- // Make sure that the temp and result_temp registers are
- // different.
LUnallocated* result_temp = TempRegister();
LNumberTagD* result = new(zone()) LNumberTagD(value, temp1, temp2);
- Define(result, result_temp);
- return AssignPointerMap(result);
+ return AssignPointerMap(Define(result, result_temp));
} else if (to.IsSmi()) {
- LOperand* value = UseRegister(instr->value());
+ LOperand* value = UseRegister(val);
return AssignEnvironment(
DefineAsRegister(new(zone()) LDoubleToSmi(value)));
} else {
ASSERT(to.IsInteger32());
- LOperand* value = UseRegister(instr->value());
- LDoubleToI* res = new(zone()) LDoubleToI(value);
- return AssignEnvironment(DefineAsRegister(res));
+ LOperand* value = UseRegister(val);
+ LInstruction* result = DefineAsRegister(new(zone()) LDoubleToI(value));
+ if (!instr->CanTruncateToInt32()) result = AssignEnvironment(result);
+ return result;
}
} else if (from.IsInteger32()) {
info()->MarkAsDeferredCalling();
if (to.IsTagged()) {
- HValue* val = instr->value();
- LOperand* value = UseRegisterAtStart(val);
- if (val->CheckFlag(HInstruction::kUint32)) {
- LNumberTagU* result = new(zone()) LNumberTagU(value);
- return AssignEnvironment(AssignPointerMap(DefineAsRegister(result)));
- } else if (val->HasRange() && val->range()->IsInSmiRange()) {
+ if (!instr->CheckFlag(HValue::kCanOverflow)) {
+ LOperand* value = UseRegisterAtStart(val);
return DefineAsRegister(new(zone()) LSmiTag(value));
+ } else if (val->CheckFlag(HInstruction::kUint32)) {
+ LOperand* value = UseRegisterAtStart(val);
+ LOperand* temp1 = TempRegister();
+ LOperand* temp2 = TempRegister();
+ LNumberTagU* result = new(zone()) LNumberTagU(value, temp1, temp2);
+ return AssignPointerMap(DefineAsRegister(result));
} else {
- LNumberTagI* result = new(zone()) LNumberTagI(value);
- return AssignEnvironment(AssignPointerMap(DefineAsRegister(result)));
+ LOperand* value = UseRegisterAtStart(val);
+ LOperand* temp1 = TempRegister();
+ LOperand* temp2 = TempRegister();
+ LNumberTagI* result = new(zone()) LNumberTagI(value, temp1, temp2);
+ return AssignPointerMap(DefineAsRegister(result));
}
} else if (to.IsSmi()) {
- HValue* val = instr->value();
LOperand* value = UseRegister(val);
- LInstruction* result = val->CheckFlag(HInstruction::kUint32)
- ? DefineAsRegister(new(zone()) LUint32ToSmi(value))
- : DefineAsRegister(new(zone()) LInteger32ToSmi(value));
- if (val->HasRange() && val->range()->IsInSmiRange()) {
- return result;
+ LInstruction* result = DefineAsRegister(new(zone()) LSmiTag(value));
+ if (instr->CheckFlag(HValue::kCanOverflow)) {
+ result = AssignEnvironment(result);
}
- return AssignEnvironment(result);
+ return result;
} else {
ASSERT(to.IsDouble());
- if (instr->value()->CheckFlag(HInstruction::kUint32)) {
- return DefineAsRegister(
- new(zone()) LUint32ToDouble(UseRegister(instr->value())));
+ if (val->CheckFlag(HInstruction::kUint32)) {
+ return DefineAsRegister(new(zone()) LUint32ToDouble(UseRegister(val)));
} else {
- return DefineAsRegister(
- new(zone()) LInteger32ToDouble(Use(instr->value())));
+ return DefineAsRegister(new(zone()) LInteger32ToDouble(Use(val)));
}
}
}
@@ -2063,7 +1973,11 @@ LInstruction* LChunkBuilder::DoChange(HChange* instr) {
LInstruction* LChunkBuilder::DoCheckHeapObject(HCheckHeapObject* instr) {
LOperand* value = UseRegisterAtStart(instr->value());
- return AssignEnvironment(new(zone()) LCheckNonSmi(value));
+ LInstruction* result = new(zone()) LCheckNonSmi(value);
+ if (!instr->value()->type().IsHeapObject()) {
+ result = AssignEnvironment(result);
+ }
+ return result;
}
@@ -2087,15 +2001,12 @@ LInstruction* LChunkBuilder::DoCheckValue(HCheckValue* instr) {
LInstruction* LChunkBuilder::DoCheckMaps(HCheckMaps* instr) {
- LOperand* value = NULL;
- if (!instr->CanOmitMapChecks()) {
- value = UseRegisterAtStart(instr->value());
- if (instr->has_migration_target()) info()->MarkAsDeferredCalling();
- }
- LCheckMaps* result = new(zone()) LCheckMaps(value);
- if (!instr->CanOmitMapChecks()) {
- AssignEnvironment(result);
- if (instr->has_migration_target()) return AssignPointerMap(result);
+ if (instr->IsStabilityCheck()) return new(zone()) LCheckMaps;
+ LOperand* value = UseRegisterAtStart(instr->value());
+ LInstruction* result = AssignEnvironment(new(zone()) LCheckMaps(value));
+ if (instr->HasMigrationTarget()) {
+ info()->MarkAsDeferredCalling();
+ result = AssignPointerMap(result);
}
return result;
}
@@ -2113,12 +2024,27 @@ LInstruction* LChunkBuilder::DoClampToUint8(HClampToUint8* instr) {
ASSERT(input_rep.IsSmiOrTagged());
// Register allocator doesn't (yet) support allocation of double
// temps. Reserve d1 explicitly.
- LClampTToUint8* result = new(zone()) LClampTToUint8(reg, FixedTemp(d11));
+ LClampTToUint8* result =
+ new(zone()) LClampTToUint8(reg, TempDoubleRegister());
return AssignEnvironment(DefineAsRegister(result));
}
}
+LInstruction* LChunkBuilder::DoDoubleBits(HDoubleBits* instr) {
+ HValue* value = instr->value();
+ ASSERT(value->representation().IsDouble());
+ return DefineAsRegister(new(zone()) LDoubleBits(UseRegister(value)));
+}
+
+
+LInstruction* LChunkBuilder::DoConstructDouble(HConstructDouble* instr) {
+ LOperand* lo = UseRegister(instr->lo());
+ LOperand* hi = UseRegister(instr->hi());
+ return DefineAsRegister(new(zone()) LConstructDouble(hi, lo));
+}
+
+
LInstruction* LChunkBuilder::DoReturn(HReturn* instr) {
LOperand* context = info()->IsStub()
? UseFixed(instr->context(), cp)
@@ -2175,21 +2101,14 @@ LInstruction* LChunkBuilder::DoStoreGlobalCell(HStoreGlobalCell* instr) {
}
-LInstruction* LChunkBuilder::DoStoreGlobalGeneric(HStoreGlobalGeneric* instr) {
- LOperand* context = UseFixed(instr->context(), cp);
- LOperand* global_object = UseFixed(instr->global_object(), r1);
- LOperand* value = UseFixed(instr->value(), r0);
- LStoreGlobalGeneric* result =
- new(zone()) LStoreGlobalGeneric(context, global_object, value);
- return MarkAsCall(result, instr);
-}
-
-
LInstruction* LChunkBuilder::DoLoadContextSlot(HLoadContextSlot* instr) {
LOperand* context = UseRegisterAtStart(instr->value());
LInstruction* result =
DefineAsRegister(new(zone()) LLoadContextSlot(context));
- return instr->RequiresHoleCheck() ? AssignEnvironment(result) : result;
+ if (instr->RequiresHoleCheck() && instr->DeoptimizesOnHole()) {
+ result = AssignEnvironment(result);
+ }
+ return result;
}
@@ -2204,7 +2123,10 @@ LInstruction* LChunkBuilder::DoStoreContextSlot(HStoreContextSlot* instr) {
value = UseRegister(instr->value());
}
LInstruction* result = new(zone()) LStoreContextSlot(context, value);
- return instr->RequiresHoleCheck() ? AssignEnvironment(result) : result;
+ if (instr->RequiresHoleCheck() && instr->DeoptimizesOnHole()) {
+ result = AssignEnvironment(result);
+ }
+ return result;
}
@@ -2235,20 +2157,13 @@ LInstruction* LChunkBuilder::DoLoadRoot(HLoadRoot* instr) {
}
-LInstruction* LChunkBuilder::DoLoadExternalArrayPointer(
- HLoadExternalArrayPointer* instr) {
- LOperand* input = UseRegisterAtStart(instr->value());
- return DefineAsRegister(new(zone()) LLoadExternalArrayPointer(input));
-}
-
-
LInstruction* LChunkBuilder::DoLoadKeyed(HLoadKeyed* instr) {
ASSERT(instr->key()->representation().IsSmiOrInteger32());
ElementsKind elements_kind = instr->elements_kind();
LOperand* key = UseRegisterOrConstantAtStart(instr->key());
- LLoadKeyed* result = NULL;
+ LInstruction* result = NULL;
- if (!instr->is_external()) {
+ if (!instr->is_typed_elements()) {
LOperand* obj = NULL;
if (instr->representation().IsDouble()) {
obj = UseRegister(instr->elements());
@@ -2256,25 +2171,28 @@ LInstruction* LChunkBuilder::DoLoadKeyed(HLoadKeyed* instr) {
ASSERT(instr->representation().IsSmiOrTagged());
obj = UseRegisterAtStart(instr->elements());
}
- result = new(zone()) LLoadKeyed(obj, key);
+ result = DefineAsRegister(new(zone()) LLoadKeyed(obj, key));
} else {
ASSERT(
(instr->representation().IsInteger32() &&
- (elements_kind != EXTERNAL_FLOAT_ELEMENTS) &&
- (elements_kind != EXTERNAL_DOUBLE_ELEMENTS)) ||
+ !IsDoubleOrFloatElementsKind(elements_kind)) ||
(instr->representation().IsDouble() &&
- ((elements_kind == EXTERNAL_FLOAT_ELEMENTS) ||
- (elements_kind == EXTERNAL_DOUBLE_ELEMENTS))));
- LOperand* external_pointer = UseRegister(instr->elements());
- result = new(zone()) LLoadKeyed(external_pointer, key);
+ IsDoubleOrFloatElementsKind(elements_kind)));
+ LOperand* backing_store = UseRegister(instr->elements());
+ result = DefineAsRegister(new(zone()) LLoadKeyed(backing_store, key));
}
- DefineAsRegister(result);
- // An unsigned int array load might overflow and cause a deopt, make sure it
- // has an environment.
- bool can_deoptimize = instr->RequiresHoleCheck() ||
- (elements_kind == EXTERNAL_UNSIGNED_INT_ELEMENTS);
- return can_deoptimize ? AssignEnvironment(result) : result;
+ if ((instr->is_external() || instr->is_fixed_typed_array()) ?
+ // see LCodeGen::DoLoadKeyedExternalArray
+ ((elements_kind == EXTERNAL_UINT32_ELEMENTS ||
+ elements_kind == UINT32_ELEMENTS) &&
+ !instr->CheckFlag(HInstruction::kUint32)) :
+ // see LCodeGen::DoLoadKeyedFixedDoubleArray and
+ // LCodeGen::DoLoadKeyedFixedArray
+ instr->RequiresHoleCheck()) {
+ result = AssignEnvironment(result);
+ }
+ return result;
}
@@ -2290,7 +2208,7 @@ LInstruction* LChunkBuilder::DoLoadKeyedGeneric(HLoadKeyedGeneric* instr) {
LInstruction* LChunkBuilder::DoStoreKeyed(HStoreKeyed* instr) {
- if (!instr->is_external()) {
+ if (!instr->is_typed_elements()) {
ASSERT(instr->elements()->representation().IsTagged());
bool needs_write_barrier = instr->NeedsWriteBarrier();
LOperand* object = NULL;
@@ -2319,16 +2237,17 @@ LInstruction* LChunkBuilder::DoStoreKeyed(HStoreKeyed* instr) {
ASSERT(
(instr->value()->representation().IsInteger32() &&
- (instr->elements_kind() != EXTERNAL_FLOAT_ELEMENTS) &&
- (instr->elements_kind() != EXTERNAL_DOUBLE_ELEMENTS)) ||
+ !IsDoubleOrFloatElementsKind(instr->elements_kind())) ||
(instr->value()->representation().IsDouble() &&
- ((instr->elements_kind() == EXTERNAL_FLOAT_ELEMENTS) ||
- (instr->elements_kind() == EXTERNAL_DOUBLE_ELEMENTS))));
- ASSERT(instr->elements()->representation().IsExternal());
+ IsDoubleOrFloatElementsKind(instr->elements_kind())));
+ ASSERT((instr->is_fixed_typed_array() &&
+ instr->elements()->representation().IsTagged()) ||
+ (instr->is_external() &&
+ instr->elements()->representation().IsExternal()));
LOperand* val = UseRegister(instr->value());
LOperand* key = UseRegisterOrConstantAtStart(instr->key());
- LOperand* external_pointer = UseRegister(instr->elements());
- return new(zone()) LStoreKeyed(external_pointer, key, val);
+ LOperand* backing_store = UseRegister(instr->elements());
+ return new(zone()) LStoreKeyed(backing_store, key, val);
}
@@ -2349,17 +2268,18 @@ LInstruction* LChunkBuilder::DoStoreKeyedGeneric(HStoreKeyedGeneric* instr) {
LInstruction* LChunkBuilder::DoTransitionElementsKind(
HTransitionElementsKind* instr) {
- LOperand* object = UseRegister(instr->object());
if (IsSimpleMapChangeTransition(instr->from_kind(), instr->to_kind())) {
+ LOperand* object = UseRegister(instr->object());
LOperand* new_map_reg = TempRegister();
LTransitionElementsKind* result =
new(zone()) LTransitionElementsKind(object, NULL, new_map_reg);
return result;
} else {
+ LOperand* object = UseFixed(instr->object(), r0);
LOperand* context = UseFixed(instr->context(), cp);
LTransitionElementsKind* result =
new(zone()) LTransitionElementsKind(object, context, NULL);
- return AssignPointerMap(result);
+ return MarkAsCall(result, instr);
}
}
@@ -2392,11 +2312,9 @@ LInstruction* LChunkBuilder::DoStoreNamedField(HStoreNamedField* instr) {
}
LOperand* val;
- if (needs_write_barrier ||
- (FLAG_track_fields && instr->field_representation().IsSmi())) {
+ if (needs_write_barrier || instr->field_representation().IsSmi()) {
val = UseTempRegister(instr->value());
- } else if (FLAG_track_double_fields &&
- instr->field_representation().IsDouble()) {
+ } else if (instr->field_representation().IsDouble()) {
val = UseRegisterAtStart(instr->value());
} else {
val = UseRegister(instr->value());
@@ -2405,14 +2323,7 @@ LInstruction* LChunkBuilder::DoStoreNamedField(HStoreNamedField* instr) {
// We need a temporary register for write barrier of the map field.
LOperand* temp = needs_write_barrier_for_map ? TempRegister() : NULL;
- LStoreNamedField* result = new(zone()) LStoreNamedField(obj, val, temp);
- if (FLAG_track_heap_object_fields &&
- instr->field_representation().IsHeapObject()) {
- if (!instr->value()->type().IsHeapObject()) {
- return AssignEnvironment(result);
- }
- }
- return result;
+ return new(zone()) LStoreNamedField(obj, val, temp);
}
@@ -2428,12 +2339,8 @@ LInstruction* LChunkBuilder::DoStoreNamedGeneric(HStoreNamedGeneric* instr) {
LInstruction* LChunkBuilder::DoStringAdd(HStringAdd* instr) {
LOperand* context = UseFixed(instr->context(), cp);
- LOperand* left = FLAG_new_string_add
- ? UseFixed(instr->left(), r1)
- : UseRegisterAtStart(instr->left());
- LOperand* right = FLAG_new_string_add
- ? UseFixed(instr->right(), r0)
- : UseRegisterAtStart(instr->right());
+ LOperand* left = UseFixed(instr->left(), r1);
+ LOperand* right = UseFixed(instr->right(), r0);
return MarkAsCall(
DefineFixed(new(zone()) LStringAdd(context, left, right), r0),
instr);
@@ -2446,7 +2353,7 @@ LInstruction* LChunkBuilder::DoStringCharCodeAt(HStringCharCodeAt* instr) {
LOperand* context = UseAny(instr->context());
LStringCharCodeAt* result =
new(zone()) LStringCharCodeAt(context, string, index);
- return AssignEnvironment(AssignPointerMap(DefineAsRegister(result)));
+ return AssignPointerMap(DefineAsRegister(result));
}
@@ -2502,7 +2409,7 @@ LInstruction* LChunkBuilder::DoParameter(HParameter* instr) {
} else {
ASSERT(info()->IsStub());
CodeStubInterfaceDescriptor* descriptor =
- info()->code_stub()->GetInterfaceDescriptor(info()->isolate());
+ info()->code_stub()->GetInterfaceDescriptor();
int index = static_cast<int>(instr->index());
Register reg = descriptor->GetParameterRegister(index);
return DefineFixed(result, reg);
@@ -2575,9 +2482,6 @@ LInstruction* LChunkBuilder::DoTypeof(HTypeof* instr) {
LInstruction* LChunkBuilder::DoTypeofIsAndBranch(HTypeofIsAndBranch* instr) {
- LInstruction* goto_instr = CheckElideControlInstruction(instr);
- if (goto_instr != NULL) return goto_instr;
-
return new(zone()) LTypeofIsAndBranch(UseRegister(instr->value()));
}
@@ -2609,13 +2513,13 @@ LInstruction* LChunkBuilder::DoStackCheck(HStackCheck* instr) {
LInstruction* LChunkBuilder::DoEnterInlined(HEnterInlined* instr) {
HEnvironment* outer = current_block_->last_environment();
+ outer->set_ast_id(instr->ReturnId());
HConstant* undefined = graph()->GetConstantUndefined();
HEnvironment* inner = outer->CopyForInlining(instr->closure(),
instr->arguments_count(),
instr->function(),
undefined,
- instr->inlining_kind(),
- instr->undefined_receiver());
+ instr->inlining_kind());
// Only replay binding of arguments object if it wasn't removed from graph.
if (instr->arguments_var() != NULL && instr->arguments_object()->IsLinked()) {
inner->Bind(instr->arguments_var(), instr->arguments_object());
@@ -2669,9 +2573,26 @@ LInstruction* LChunkBuilder::DoCheckMapValue(HCheckMapValue* instr) {
LInstruction* LChunkBuilder::DoLoadFieldByIndex(HLoadFieldByIndex* instr) {
LOperand* object = UseRegister(instr->object());
- LOperand* index = UseRegister(instr->index());
- return DefineAsRegister(new(zone()) LLoadFieldByIndex(object, index));
+ LOperand* index = UseTempRegister(instr->index());
+ LLoadFieldByIndex* load = new(zone()) LLoadFieldByIndex(object, index);
+ LInstruction* result = DefineSameAsFirst(load);
+ return AssignPointerMap(result);
+}
+
+
+LInstruction* LChunkBuilder::DoStoreFrameContext(HStoreFrameContext* instr) {
+ LOperand* context = UseRegisterAtStart(instr->context());
+ return new(zone()) LStoreFrameContext(context);
}
+LInstruction* LChunkBuilder::DoAllocateBlockContext(
+ HAllocateBlockContext* instr) {
+ LOperand* context = UseFixed(instr->context(), cp);
+ LOperand* function = UseRegisterAtStart(instr->function());
+ LAllocateBlockContext* result =
+ new(zone()) LAllocateBlockContext(context, function);
+ return MarkAsCall(DefineFixed(result, cp), instr);
+}
+
} } // namespace v8::internal
diff --git a/chromium/v8/src/arm/lithium-arm.h b/chromium/v8/src/arm/lithium-arm.h
index 4f420a3d262..e3858936075 100644
--- a/chromium/v8/src/arm/lithium-arm.h
+++ b/chromium/v8/src/arm/lithium-arm.h
@@ -1,38 +1,15 @@
// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
#ifndef V8_ARM_LITHIUM_ARM_H_
#define V8_ARM_LITHIUM_ARM_H_
-#include "hydrogen.h"
-#include "lithium-allocator.h"
-#include "lithium.h"
-#include "safepoint-table.h"
-#include "utils.h"
+#include "src/hydrogen.h"
+#include "src/lithium-allocator.h"
+#include "src/lithium.h"
+#include "src/safepoint-table.h"
+#include "src/utils.h"
namespace v8 {
namespace internal {
@@ -44,6 +21,7 @@ class LCodeGen;
V(AccessArgumentsAt) \
V(AddI) \
V(Allocate) \
+ V(AllocateBlockContext) \
V(ApplyArguments) \
V(ArgumentsElements) \
V(ArgumentsLength) \
@@ -52,12 +30,9 @@ class LCodeGen;
V(BitI) \
V(BoundsCheck) \
V(Branch) \
- V(CallConstantFunction) \
+ V(CallJSFunction) \
+ V(CallWithDescriptor) \
V(CallFunction) \
- V(CallGlobal) \
- V(CallKeyed) \
- V(CallKnownGlobal) \
- V(CallNamed) \
V(CallNew) \
V(CallNewArray) \
V(CallRuntime) \
@@ -83,24 +58,28 @@ class LCodeGen;
V(ConstantI) \
V(ConstantS) \
V(ConstantT) \
+ V(ConstructDouble) \
V(Context) \
V(DateField) \
V(DebugBreak) \
V(DeclareGlobals) \
V(Deoptimize) \
+ V(DivByConstI) \
+ V(DivByPowerOf2I) \
V(DivI) \
+ V(DoubleBits) \
V(DoubleToI) \
V(DoubleToSmi) \
V(Drop) \
V(Dummy) \
V(DummyUse) \
- V(ElementsKind) \
+ V(FlooringDivByConstI) \
+ V(FlooringDivByPowerOf2I) \
+ V(FlooringDivI) \
V(ForInCacheArray) \
V(ForInPrepareMap) \
V(FunctionLiteral) \
V(GetCachedArrayIndex) \
- V(GlobalObject) \
- V(GlobalReceiver) \
V(Goto) \
V(HasCachedArrayIndexAndBranch) \
V(HasInstanceTypeAndBranch) \
@@ -109,7 +88,6 @@ class LCodeGen;
V(InstanceOfKnownGlobal) \
V(InstructionGap) \
V(Integer32ToDouble) \
- V(Integer32ToSmi) \
V(InvokeFunction) \
V(IsConstructCallAndBranch) \
V(IsObjectAndBranch) \
@@ -119,7 +97,6 @@ class LCodeGen;
V(Label) \
V(LazyBailout) \
V(LoadContextSlot) \
- V(LoadExternalArrayPointer) \
V(LoadRoot) \
V(LoadFieldByIndex) \
V(LoadFunctionPrototype) \
@@ -131,17 +108,16 @@ class LCodeGen;
V(LoadNamedGeneric) \
V(MapEnumLength) \
V(MathAbs) \
- V(MathCos) \
+ V(MathClz32) \
V(MathExp) \
V(MathFloor) \
- V(MathFloorOfDiv) \
V(MathLog) \
V(MathMinMax) \
V(MathPowHalf) \
V(MathRound) \
- V(MathSin) \
V(MathSqrt) \
- V(MathTan) \
+ V(ModByConstI) \
+ V(ModByPowerOf2I) \
V(ModI) \
V(MulI) \
V(MultiplyAddD) \
@@ -151,7 +127,6 @@ class LCodeGen;
V(NumberTagU) \
V(NumberUntagD) \
V(OsrEntry) \
- V(OuterContext) \
V(Parameter) \
V(Power) \
V(PushArgument) \
@@ -165,8 +140,8 @@ class LCodeGen;
V(StackCheck) \
V(StoreCodeEntry) \
V(StoreContextSlot) \
+ V(StoreFrameContext) \
V(StoreGlobalCell) \
- V(StoreGlobalGeneric) \
V(StoreKeyed) \
V(StoreKeyedGeneric) \
V(StoreNamedField) \
@@ -179,16 +154,13 @@ class LCodeGen;
V(RSubI) \
V(TaggedToI) \
V(ThisFunction) \
- V(Throw) \
V(ToFastProperties) \
V(TransitionElementsKind) \
V(TrapAllocationMemento) \
V(Typeof) \
V(TypeofIsAndBranch) \
V(Uint32ToDouble) \
- V(Uint32ToSmi) \
V(UnknownOSRValue) \
- V(ValueOf) \
V(WrapReceiver)
@@ -269,7 +241,9 @@ class LInstruction : public ZoneObject {
// Interface to the register allocator and iterators.
bool ClobbersTemps() const { return IsCall(); }
bool ClobbersRegisters() const { return IsCall(); }
- virtual bool ClobbersDoubleRegisters() const { return IsCall(); }
+ virtual bool ClobbersDoubleRegisters(Isolate* isolate) const {
+ return IsCall();
+ }
// Interface to the register allocator and iterators.
bool IsMarkedAsCall() const { return IsCall(); }
@@ -306,10 +280,8 @@ class LInstruction : public ZoneObject {
// R = number of result operands (0 or 1).
-// I = number of input operands.
-// T = number of temporary operands.
-template<int R, int I, int T>
-class LTemplateInstruction : public LInstruction {
+template<int R>
+class LTemplateResultInstruction : public LInstruction {
public:
// Allow 0 or 1 output operands.
STATIC_ASSERT(R == 0 || R == 1);
@@ -321,10 +293,20 @@ class LTemplateInstruction : public LInstruction {
protected:
EmbeddedContainer<LOperand*, R> results_;
+};
+
+
+// R = number of result operands (0 or 1).
+// I = number of input operands.
+// T = number of temporary operands.
+template<int R, int I, int T>
+class LTemplateInstruction : public LTemplateResultInstruction<R> {
+ protected:
EmbeddedContainer<LOperand*, I> inputs_;
EmbeddedContainer<LOperand*, T> temps_;
private:
+ // Iterator support.
virtual int InputCount() V8_FINAL V8_OVERRIDE { return I; }
virtual LOperand* InputAt(int i) V8_FINAL V8_OVERRIDE { return inputs_[i]; }
@@ -443,6 +425,7 @@ class LDummyUse V8_FINAL : public LTemplateInstruction<1, 1, 0> {
class LDeoptimize V8_FINAL : public LTemplateInstruction<0, 0, 0> {
public:
+ virtual bool IsControl() const V8_OVERRIDE { return true; }
DECLARE_CONCRETE_INSTRUCTION(Deoptimize, "deoptimize")
DECLARE_HYDROGEN_ACCESSOR(Deoptimize)
};
@@ -491,10 +474,6 @@ class LCallStub V8_FINAL : public LTemplateInstruction<1, 1, 0> {
DECLARE_CONCRETE_INSTRUCTION(CallStub, "call-stub")
DECLARE_HYDROGEN_ACCESSOR(CallStub)
-
- TranscendentalCache::Type transcendental_type() {
- return hydrogen()->transcendental_type();
- }
};
@@ -559,6 +538,7 @@ class LWrapReceiver V8_FINAL : public LTemplateInstruction<1, 2, 0> {
}
DECLARE_CONCRETE_INSTRUCTION(WrapReceiver, "wrap-receiver")
+ DECLARE_HYDROGEN_ACCESSOR(WrapReceiver)
LOperand* receiver() { return inputs_[0]; }
LOperand* function() { return inputs_[1]; }
@@ -623,12 +603,45 @@ class LArgumentsElements V8_FINAL : public LTemplateInstruction<1, 0, 0> {
};
+class LModByPowerOf2I V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+ LModByPowerOf2I(LOperand* dividend, int32_t divisor) {
+ inputs_[0] = dividend;
+ divisor_ = divisor;
+ }
+
+ LOperand* dividend() { return inputs_[0]; }
+ int32_t divisor() const { return divisor_; }
+
+ DECLARE_CONCRETE_INSTRUCTION(ModByPowerOf2I, "mod-by-power-of-2-i")
+ DECLARE_HYDROGEN_ACCESSOR(Mod)
+
+ private:
+ int32_t divisor_;
+};
+
+
+class LModByConstI V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+ LModByConstI(LOperand* dividend, int32_t divisor) {
+ inputs_[0] = dividend;
+ divisor_ = divisor;
+ }
+
+ LOperand* dividend() { return inputs_[0]; }
+ int32_t divisor() const { return divisor_; }
+
+ DECLARE_CONCRETE_INSTRUCTION(ModByConstI, "mod-by-const-i")
+ DECLARE_HYDROGEN_ACCESSOR(Mod)
+
+ private:
+ int32_t divisor_;
+};
+
+
class LModI V8_FINAL : public LTemplateInstruction<1, 2, 2> {
public:
- LModI(LOperand* left,
- LOperand* right,
- LOperand* temp = NULL,
- LOperand* temp2 = NULL) {
+ LModI(LOperand* left, LOperand* right, LOperand* temp, LOperand* temp2) {
inputs_[0] = left;
inputs_[1] = right;
temps_[0] = temp;
@@ -645,38 +658,111 @@ class LModI V8_FINAL : public LTemplateInstruction<1, 2, 2> {
};
+class LDivByPowerOf2I V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+ LDivByPowerOf2I(LOperand* dividend, int32_t divisor) {
+ inputs_[0] = dividend;
+ divisor_ = divisor;
+ }
+
+ LOperand* dividend() { return inputs_[0]; }
+ int32_t divisor() const { return divisor_; }
+
+ DECLARE_CONCRETE_INSTRUCTION(DivByPowerOf2I, "div-by-power-of-2-i")
+ DECLARE_HYDROGEN_ACCESSOR(Div)
+
+ private:
+ int32_t divisor_;
+};
+
+
+class LDivByConstI V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+ LDivByConstI(LOperand* dividend, int32_t divisor) {
+ inputs_[0] = dividend;
+ divisor_ = divisor;
+ }
+
+ LOperand* dividend() { return inputs_[0]; }
+ int32_t divisor() const { return divisor_; }
+
+ DECLARE_CONCRETE_INSTRUCTION(DivByConstI, "div-by-const-i")
+ DECLARE_HYDROGEN_ACCESSOR(Div)
+
+ private:
+ int32_t divisor_;
+};
+
+
class LDivI V8_FINAL : public LTemplateInstruction<1, 2, 1> {
public:
- LDivI(LOperand* left, LOperand* right, LOperand* temp) {
- inputs_[0] = left;
- inputs_[1] = right;
+ LDivI(LOperand* dividend, LOperand* divisor, LOperand* temp) {
+ inputs_[0] = dividend;
+ inputs_[1] = divisor;
temps_[0] = temp;
}
- LOperand* left() { return inputs_[0]; }
- LOperand* right() { return inputs_[1]; }
+ LOperand* dividend() { return inputs_[0]; }
+ LOperand* divisor() { return inputs_[1]; }
LOperand* temp() { return temps_[0]; }
DECLARE_CONCRETE_INSTRUCTION(DivI, "div-i")
- DECLARE_HYDROGEN_ACCESSOR(Div)
+ DECLARE_HYDROGEN_ACCESSOR(BinaryOperation)
};
-class LMathFloorOfDiv V8_FINAL : public LTemplateInstruction<1, 2, 1> {
+class LFlooringDivByPowerOf2I V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
- LMathFloorOfDiv(LOperand* left,
- LOperand* right,
- LOperand* temp = NULL) {
- inputs_[0] = left;
- inputs_[1] = right;
+ LFlooringDivByPowerOf2I(LOperand* dividend, int32_t divisor) {
+ inputs_[0] = dividend;
+ divisor_ = divisor;
+ }
+
+ LOperand* dividend() { return inputs_[0]; }
+ int32_t divisor() { return divisor_; }
+
+ DECLARE_CONCRETE_INSTRUCTION(FlooringDivByPowerOf2I,
+ "flooring-div-by-power-of-2-i")
+ DECLARE_HYDROGEN_ACCESSOR(MathFloorOfDiv)
+
+ private:
+ int32_t divisor_;
+};
+
+
+class LFlooringDivByConstI V8_FINAL : public LTemplateInstruction<1, 1, 2> {
+ public:
+ LFlooringDivByConstI(LOperand* dividend, int32_t divisor, LOperand* temp) {
+ inputs_[0] = dividend;
+ divisor_ = divisor;
temps_[0] = temp;
}
- LOperand* left() { return inputs_[0]; }
- LOperand* right() { return inputs_[1]; }
+ LOperand* dividend() { return inputs_[0]; }
+ int32_t divisor() const { return divisor_; }
+ LOperand* temp() { return temps_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(FlooringDivByConstI, "flooring-div-by-const-i")
+ DECLARE_HYDROGEN_ACCESSOR(MathFloorOfDiv)
+
+ private:
+ int32_t divisor_;
+};
+
+
+class LFlooringDivI V8_FINAL : public LTemplateInstruction<1, 2, 1> {
+ public:
+ LFlooringDivI(LOperand* dividend, LOperand* divisor, LOperand* temp) {
+ inputs_[0] = dividend;
+ inputs_[1] = divisor;
+ temps_[0] = temp;
+ }
+
+ LOperand* dividend() { return inputs_[0]; }
+ LOperand* divisor() { return inputs_[1]; }
LOperand* temp() { return temps_[0]; }
- DECLARE_CONCRETE_INSTRUCTION(MathFloorOfDiv, "math-floor-of-div")
+ DECLARE_CONCRETE_INSTRUCTION(FlooringDivI, "flooring-div-i")
DECLARE_HYDROGEN_ACCESSOR(MathFloorOfDiv)
};
@@ -816,39 +902,15 @@ class LMathLog V8_FINAL : public LTemplateInstruction<1, 1, 0> {
};
-class LMathSin V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LMathClz32 V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
- explicit LMathSin(LOperand* value) {
+ explicit LMathClz32(LOperand* value) {
inputs_[0] = value;
}
LOperand* value() { return inputs_[0]; }
- DECLARE_CONCRETE_INSTRUCTION(MathSin, "math-sin")
-};
-
-
-class LMathCos V8_FINAL : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LMathCos(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(MathCos, "math-cos")
-};
-
-
-class LMathTan V8_FINAL : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LMathTan(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(MathTan, "math-tan")
+ DECLARE_CONCRETE_INSTRUCTION(MathClz32, "math-clz32")
};
@@ -1336,34 +1398,6 @@ class LMapEnumLength V8_FINAL : public LTemplateInstruction<1, 1, 0> {
};
-class LElementsKind V8_FINAL : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LElementsKind(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(ElementsKind, "elements-kind")
- DECLARE_HYDROGEN_ACCESSOR(ElementsKind)
-};
-
-
-class LValueOf V8_FINAL : public LTemplateInstruction<1, 1, 1> {
- public:
- LValueOf(LOperand* value, LOperand* temp) {
- inputs_[0] = value;
- temps_[0] = temp;
- }
-
- LOperand* value() { return inputs_[0]; }
- LOperand* temp() { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(ValueOf, "value-of")
- DECLARE_HYDROGEN_ACCESSOR(ValueOf)
-};
-
-
class LDateField V8_FINAL : public LTemplateInstruction<1, 1, 1> {
public:
LDateField(LOperand* date, LOperand* temp, Smi* index) : index_(index) {
@@ -1419,20 +1453,6 @@ class LSeqStringSetChar V8_FINAL : public LTemplateInstruction<1, 4, 0> {
};
-class LThrow V8_FINAL : public LTemplateInstruction<0, 2, 0> {
- public:
- LThrow(LOperand* context, LOperand* value) {
- inputs_[0] = context;
- inputs_[1] = value;
- }
-
- LOperand* context() { return inputs_[0]; }
- LOperand* value() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(Throw, "throw")
-};
-
-
class LAddI V8_FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LAddI(LOperand* left, LOperand* right) {
@@ -1604,20 +1624,6 @@ class LLoadRoot V8_FINAL : public LTemplateInstruction<1, 0, 0> {
};
-class LLoadExternalArrayPointer V8_FINAL
- : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LLoadExternalArrayPointer(LOperand* object) {
- inputs_[0] = object;
- }
-
- LOperand* object() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(LoadExternalArrayPointer,
- "load-external-array-pointer")
-};
-
-
class LLoadKeyed V8_FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LLoadKeyed(LOperand* elements, LOperand* key) {
@@ -1633,12 +1639,18 @@ class LLoadKeyed V8_FINAL : public LTemplateInstruction<1, 2, 0> {
bool is_external() const {
return hydrogen()->is_external();
}
+ bool is_fixed_typed_array() const {
+ return hydrogen()->is_fixed_typed_array();
+ }
+ bool is_typed_elements() const {
+ return is_external() || is_fixed_typed_array();
+ }
DECLARE_CONCRETE_INSTRUCTION(LoadKeyed, "load-keyed")
DECLARE_HYDROGEN_ACCESSOR(LoadKeyed)
virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
- uint32_t additional_index() const { return hydrogen()->index_offset(); }
+ uint32_t base_offset() const { return hydrogen()->base_offset(); }
};
@@ -1698,28 +1710,6 @@ class LStoreGlobalCell V8_FINAL : public LTemplateInstruction<0, 1, 1> {
};
-class LStoreGlobalGeneric V8_FINAL : public LTemplateInstruction<0, 3, 0> {
- public:
- LStoreGlobalGeneric(LOperand* context,
- LOperand* global_object,
- LOperand* value) {
- inputs_[0] = context;
- inputs_[1] = global_object;
- inputs_[2] = value;
- }
-
- LOperand* context() { return inputs_[0]; }
- LOperand* global_object() { return inputs_[1]; }
- LOperand* value() { return inputs_[2]; }
-
- DECLARE_CONCRETE_INSTRUCTION(StoreGlobalGeneric, "store-global-generic")
- DECLARE_HYDROGEN_ACCESSOR(StoreGlobalGeneric)
-
- Handle<Object> name() const { return hydrogen()->name(); }
- StrictModeFlag strict_mode_flag() { return hydrogen()->strict_mode_flag(); }
-};
-
-
class LLoadContextSlot V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LLoadContextSlot(LOperand* context) {
@@ -1781,15 +1771,15 @@ class LDrop V8_FINAL : public LTemplateInstruction<0, 0, 0> {
};
-class LStoreCodeEntry V8_FINAL: public LTemplateInstruction<0, 1, 1> {
+class LStoreCodeEntry V8_FINAL: public LTemplateInstruction<0, 2, 0> {
public:
LStoreCodeEntry(LOperand* function, LOperand* code_object) {
inputs_[0] = function;
- temps_[0] = code_object;
+ inputs_[1] = code_object;
}
LOperand* function() { return inputs_[0]; }
- LOperand* code_object() { return temps_[0]; }
+ LOperand* code_object() { return inputs_[1]; }
virtual void PrintDataTo(StringStream* stream);
@@ -1828,18 +1818,6 @@ class LContext V8_FINAL : public LTemplateInstruction<1, 0, 0> {
};
-class LOuterContext V8_FINAL : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LOuterContext(LOperand* context) {
- inputs_[0] = context;
- }
-
- LOperand* context() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(OuterContext, "outer-context")
-};
-
-
class LDeclareGlobals V8_FINAL : public LTemplateInstruction<0, 1, 0> {
public:
explicit LDeclareGlobals(LOperand* context) {
@@ -1853,95 +1831,73 @@ class LDeclareGlobals V8_FINAL : public LTemplateInstruction<0, 1, 0> {
};
-class LGlobalObject V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LCallJSFunction V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
- explicit LGlobalObject(LOperand* context) {
- inputs_[0] = context;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(GlobalObject, "global-object")
-
- LOperand* context() { return inputs_[0]; }
-};
-
-
-class LGlobalReceiver V8_FINAL : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LGlobalReceiver(LOperand* global_object) {
- inputs_[0] = global_object;
+ explicit LCallJSFunction(LOperand* function) {
+ inputs_[0] = function;
}
- LOperand* global_object() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(GlobalReceiver, "global-receiver")
-};
-
+ LOperand* function() { return inputs_[0]; }
-class LCallConstantFunction V8_FINAL : public LTemplateInstruction<1, 0, 0> {
- public:
- DECLARE_CONCRETE_INSTRUCTION(CallConstantFunction, "call-constant-function")
- DECLARE_HYDROGEN_ACCESSOR(CallConstantFunction)
+ DECLARE_CONCRETE_INSTRUCTION(CallJSFunction, "call-js-function")
+ DECLARE_HYDROGEN_ACCESSOR(CallJSFunction)
virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
- Handle<JSFunction> function() { return hydrogen()->function(); }
int arity() const { return hydrogen()->argument_count() - 1; }
};
-class LInvokeFunction V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+class LCallWithDescriptor V8_FINAL : public LTemplateResultInstruction<1> {
public:
- LInvokeFunction(LOperand* context, LOperand* function) {
- inputs_[0] = context;
- inputs_[1] = function;
+ LCallWithDescriptor(const CallInterfaceDescriptor* descriptor,
+ const ZoneList<LOperand*>& operands,
+ Zone* zone)
+ : descriptor_(descriptor),
+ inputs_(descriptor->environment_length() + 1, zone) {
+ ASSERT(descriptor->environment_length() + 1 == operands.length());
+ inputs_.AddAll(operands, zone);
}
- LOperand* context() { return inputs_[0]; }
- LOperand* function() { return inputs_[1]; }
+ LOperand* target() const { return inputs_[0]; }
- DECLARE_CONCRETE_INSTRUCTION(InvokeFunction, "invoke-function")
- DECLARE_HYDROGEN_ACCESSOR(InvokeFunction)
+ const CallInterfaceDescriptor* descriptor() { return descriptor_; }
+
+ private:
+ DECLARE_CONCRETE_INSTRUCTION(CallWithDescriptor, "call-with-descriptor")
+ DECLARE_HYDROGEN_ACCESSOR(CallWithDescriptor)
virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
int arity() const { return hydrogen()->argument_count() - 1; }
-};
-
-class LCallKeyed V8_FINAL : public LTemplateInstruction<1, 2, 0> {
- public:
- LCallKeyed(LOperand* context, LOperand* key) {
- inputs_[0] = context;
- inputs_[1] = key;
- }
+ const CallInterfaceDescriptor* descriptor_;
+ ZoneList<LOperand*> inputs_;
- LOperand* context() { return inputs_[0]; }
- LOperand* key() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(CallKeyed, "call-keyed")
- DECLARE_HYDROGEN_ACCESSOR(CallKeyed)
-
- virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+ // Iterator support.
+ virtual int InputCount() V8_FINAL V8_OVERRIDE { return inputs_.length(); }
+ virtual LOperand* InputAt(int i) V8_FINAL V8_OVERRIDE { return inputs_[i]; }
- int arity() const { return hydrogen()->argument_count() - 1; }
+ virtual int TempCount() V8_FINAL V8_OVERRIDE { return 0; }
+ virtual LOperand* TempAt(int i) V8_FINAL V8_OVERRIDE { return NULL; }
};
-
-class LCallNamed V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LInvokeFunction V8_FINAL : public LTemplateInstruction<1, 2, 0> {
public:
- explicit LCallNamed(LOperand* context) {
+ LInvokeFunction(LOperand* context, LOperand* function) {
inputs_[0] = context;
+ inputs_[1] = function;
}
LOperand* context() { return inputs_[0]; }
+ LOperand* function() { return inputs_[1]; }
- DECLARE_CONCRETE_INSTRUCTION(CallNamed, "call-named")
- DECLARE_HYDROGEN_ACCESSOR(CallNamed)
+ DECLARE_CONCRETE_INSTRUCTION(InvokeFunction, "invoke-function")
+ DECLARE_HYDROGEN_ACCESSOR(InvokeFunction)
virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
- Handle<String> name() const { return hydrogen()->name(); }
int arity() const { return hydrogen()->argument_count() - 1; }
};
@@ -1963,35 +1919,6 @@ class LCallFunction V8_FINAL : public LTemplateInstruction<1, 2, 0> {
};
-class LCallGlobal V8_FINAL : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LCallGlobal(LOperand* context) {
- inputs_[0] = context;
- }
-
- LOperand* context() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(CallGlobal, "call-global")
- DECLARE_HYDROGEN_ACCESSOR(CallGlobal)
-
- virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
-
- Handle<String> name() const {return hydrogen()->name(); }
- int arity() const { return hydrogen()->argument_count() - 1; }
-};
-
-
-class LCallKnownGlobal V8_FINAL : public LTemplateInstruction<1, 0, 0> {
- public:
- DECLARE_CONCRETE_INSTRUCTION(CallKnownGlobal, "call-known-global")
- DECLARE_HYDROGEN_ACCESSOR(CallKnownGlobal)
-
- virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
-
- int arity() const { return hydrogen()->argument_count() - 1; }
-};
-
-
class LCallNew V8_FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LCallNew(LOperand* context, LOperand* constructor) {
@@ -2041,7 +1968,7 @@ class LCallRuntime V8_FINAL : public LTemplateInstruction<1, 1, 0> {
DECLARE_CONCRETE_INSTRUCTION(CallRuntime, "call-runtime")
DECLARE_HYDROGEN_ACCESSOR(CallRuntime)
- virtual bool ClobbersDoubleRegisters() const V8_OVERRIDE {
+ virtual bool ClobbersDoubleRegisters(Isolate* isolate) const V8_OVERRIDE {
return save_doubles() == kDontSaveFPRegs;
}
@@ -2063,19 +1990,6 @@ class LInteger32ToDouble V8_FINAL : public LTemplateInstruction<1, 1, 0> {
};
-class LInteger32ToSmi V8_FINAL : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LInteger32ToSmi(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(Integer32ToSmi, "int32-to-smi")
- DECLARE_HYDROGEN_ACCESSOR(Change)
-};
-
-
class LUint32ToDouble V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LUint32ToDouble(LOperand* value) {
@@ -2088,38 +2002,33 @@ class LUint32ToDouble V8_FINAL : public LTemplateInstruction<1, 1, 0> {
};
-class LUint32ToSmi V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LNumberTagI V8_FINAL : public LTemplateInstruction<1, 1, 2> {
public:
- explicit LUint32ToSmi(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(Uint32ToSmi, "uint32-to-smi")
- DECLARE_HYDROGEN_ACCESSOR(Change)
-};
-
-
-class LNumberTagI V8_FINAL : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LNumberTagI(LOperand* value) {
+ LNumberTagI(LOperand* value, LOperand* temp1, LOperand* temp2) {
inputs_[0] = value;
+ temps_[0] = temp1;
+ temps_[1] = temp2;
}
LOperand* value() { return inputs_[0]; }
+ LOperand* temp1() { return temps_[0]; }
+ LOperand* temp2() { return temps_[1]; }
DECLARE_CONCRETE_INSTRUCTION(NumberTagI, "number-tag-i")
};
-class LNumberTagU V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LNumberTagU V8_FINAL : public LTemplateInstruction<1, 1, 2> {
public:
- explicit LNumberTagU(LOperand* value) {
+ LNumberTagU(LOperand* value, LOperand* temp1, LOperand* temp2) {
inputs_[0] = value;
+ temps_[0] = temp1;
+ temps_[1] = temp2;
}
LOperand* value() { return inputs_[0]; }
+ LOperand* temp1() { return temps_[0]; }
+ LOperand* temp2() { return temps_[1]; }
DECLARE_CONCRETE_INSTRUCTION(NumberTagU, "number-tag-u")
};
@@ -2204,6 +2113,7 @@ class LSmiTag V8_FINAL : public LTemplateInstruction<1, 1, 0> {
LOperand* value() { return inputs_[0]; }
DECLARE_CONCRETE_INSTRUCTION(SmiTag, "smi-tag")
+ DECLARE_HYDROGEN_ACCESSOR(Change)
};
@@ -2254,7 +2164,6 @@ class LStoreNamedField V8_FINAL : public LTemplateInstruction<0, 2, 1> {
virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
- Handle<Map> transition() const { return hydrogen()->transition_map(); }
Representation representation() const {
return hydrogen()->field_representation();
}
@@ -2279,7 +2188,7 @@ class LStoreNamedGeneric V8_FINAL : public LTemplateInstruction<0, 3, 0> {
virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
Handle<Object> name() const { return hydrogen()->name(); }
- StrictModeFlag strict_mode_flag() { return hydrogen()->strict_mode_flag(); }
+ StrictMode strict_mode() { return hydrogen()->strict_mode(); }
};
@@ -2292,6 +2201,12 @@ class LStoreKeyed V8_FINAL : public LTemplateInstruction<0, 3, 0> {
}
bool is_external() const { return hydrogen()->is_external(); }
+ bool is_fixed_typed_array() const {
+ return hydrogen()->is_fixed_typed_array();
+ }
+ bool is_typed_elements() const {
+ return is_external() || is_fixed_typed_array();
+ }
LOperand* elements() { return inputs_[0]; }
LOperand* key() { return inputs_[1]; }
LOperand* value() { return inputs_[2]; }
@@ -2310,7 +2225,7 @@ class LStoreKeyed V8_FINAL : public LTemplateInstruction<0, 3, 0> {
}
return hydrogen()->NeedsCanonicalization();
}
- uint32_t additional_index() const { return hydrogen()->index_offset(); }
+ uint32_t base_offset() const { return hydrogen()->base_offset(); }
};
@@ -2336,7 +2251,7 @@ class LStoreKeyedGeneric V8_FINAL : public LTemplateInstruction<0, 4, 0> {
virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
- StrictModeFlag strict_mode_flag() { return hydrogen()->strict_mode_flag(); }
+ StrictMode strict_mode() { return hydrogen()->strict_mode(); }
};
@@ -2463,7 +2378,7 @@ class LCheckInstanceType V8_FINAL : public LTemplateInstruction<0, 1, 0> {
class LCheckMaps V8_FINAL : public LTemplateInstruction<0, 1, 0> {
public:
- explicit LCheckMaps(LOperand* value) {
+ explicit LCheckMaps(LOperand* value = NULL) {
inputs_[0] = value;
}
@@ -2537,6 +2452,33 @@ class LClampTToUint8 V8_FINAL : public LTemplateInstruction<1, 1, 1> {
};
+class LDoubleBits V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LDoubleBits(LOperand* value) {
+ inputs_[0] = value;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(DoubleBits, "double-bits")
+ DECLARE_HYDROGEN_ACCESSOR(DoubleBits)
+};
+
+
+class LConstructDouble V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+ public:
+ LConstructDouble(LOperand* hi, LOperand* lo) {
+ inputs_[0] = hi;
+ inputs_[1] = lo;
+ }
+
+ LOperand* hi() { return inputs_[0]; }
+ LOperand* lo() { return inputs_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(ConstructDouble, "construct-double")
+};
+
+
class LAllocate V8_FINAL : public LTemplateInstruction<1, 2, 2> {
public:
LAllocate(LOperand* context,
@@ -2729,6 +2671,35 @@ class LLoadFieldByIndex V8_FINAL : public LTemplateInstruction<1, 2, 0> {
};
+class LStoreFrameContext: public LTemplateInstruction<0, 1, 0> {
+ public:
+ explicit LStoreFrameContext(LOperand* context) {
+ inputs_[0] = context;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(StoreFrameContext, "store-frame-context")
+};
+
+
+class LAllocateBlockContext: public LTemplateInstruction<1, 2, 0> {
+ public:
+ LAllocateBlockContext(LOperand* context, LOperand* function) {
+ inputs_[0] = context;
+ inputs_[1] = function;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+ LOperand* function() { return inputs_[1]; }
+
+ Handle<ScopeInfo> scope_info() { return hydrogen()->scope_info(); }
+
+ DECLARE_CONCRETE_INSTRUCTION(AllocateBlockContext, "allocate-block-context")
+ DECLARE_HYDROGEN_ACCESSOR(AllocateBlockContext)
+};
+
+
class LChunkBuilder;
class LPlatformChunk V8_FINAL : public LChunk {
public:
@@ -2740,26 +2711,24 @@ class LPlatformChunk V8_FINAL : public LChunk {
};
-class LChunkBuilder V8_FINAL BASE_EMBEDDED {
+class LChunkBuilder V8_FINAL : public LChunkBuilderBase {
public:
LChunkBuilder(CompilationInfo* info, HGraph* graph, LAllocator* allocator)
- : chunk_(NULL),
+ : LChunkBuilderBase(graph->zone()),
+ chunk_(NULL),
info_(info),
graph_(graph),
- zone_(graph->zone()),
status_(UNUSED),
current_instruction_(NULL),
current_block_(NULL),
next_block_(NULL),
- argument_count_(0),
- allocator_(allocator),
- position_(RelocInfo::kNoPosition) { }
+ allocator_(allocator) { }
+
+ Isolate* isolate() const { return graph_->isolate(); }
// Build the sequence for the graph.
LPlatformChunk* Build();
- LInstruction* CheckElideControlInstruction(HControlInstruction* instr);
-
// Declare methods that deal with the individual node types.
#define DECLARE_DO(type) LInstruction* Do##type(H##type* node);
HYDROGEN_CONCRETE_INSTRUCTION_LIST(DECLARE_DO)
@@ -2770,18 +2739,24 @@ class LChunkBuilder V8_FINAL BASE_EMBEDDED {
LInstruction* DoRSub(HSub* instr);
static bool HasMagicNumberForDivisor(int32_t divisor);
- static HValue* SimplifiedDivisorForMathFloorOfDiv(HValue* val);
LInstruction* DoMathFloor(HUnaryMathOperation* instr);
LInstruction* DoMathRound(HUnaryMathOperation* instr);
LInstruction* DoMathAbs(HUnaryMathOperation* instr);
LInstruction* DoMathLog(HUnaryMathOperation* instr);
- LInstruction* DoMathSin(HUnaryMathOperation* instr);
- LInstruction* DoMathCos(HUnaryMathOperation* instr);
- LInstruction* DoMathTan(HUnaryMathOperation* instr);
LInstruction* DoMathExp(HUnaryMathOperation* instr);
LInstruction* DoMathSqrt(HUnaryMathOperation* instr);
LInstruction* DoMathPowHalf(HUnaryMathOperation* instr);
+ LInstruction* DoMathClz32(HUnaryMathOperation* instr);
+ LInstruction* DoDivByPowerOf2I(HDiv* instr);
+ LInstruction* DoDivByConstI(HDiv* instr);
+ LInstruction* DoDivI(HDiv* instr);
+ LInstruction* DoModByPowerOf2I(HMod* instr);
+ LInstruction* DoModByConstI(HMod* instr);
+ LInstruction* DoModI(HMod* instr);
+ LInstruction* DoFlooringDivByPowerOf2I(HMathFloorOfDiv* instr);
+ LInstruction* DoFlooringDivByConstI(HMathFloorOfDiv* instr);
+ LInstruction* DoFlooringDivI(HMathFloorOfDiv* instr);
private:
enum Status {
@@ -2794,7 +2769,6 @@ class LChunkBuilder V8_FINAL BASE_EMBEDDED {
LPlatformChunk* chunk() const { return chunk_; }
CompilationInfo* info() const { return info_; }
HGraph* graph() const { return graph_; }
- Zone* zone() const { return zone_; }
bool is_unused() const { return status_ == UNUSED; }
bool is_building() const { return status_ == BUILDING; }
@@ -2844,31 +2818,26 @@ class LChunkBuilder V8_FINAL BASE_EMBEDDED {
// An input operand in register, stack slot or a constant operand.
// Will not be moved to a register even if one is freely available.
- MUST_USE_RESULT LOperand* UseAny(HValue* value);
+ virtual MUST_USE_RESULT LOperand* UseAny(HValue* value) V8_OVERRIDE;
// Temporary operand that must be in a register.
MUST_USE_RESULT LUnallocated* TempRegister();
+ MUST_USE_RESULT LUnallocated* TempDoubleRegister();
MUST_USE_RESULT LOperand* FixedTemp(Register reg);
MUST_USE_RESULT LOperand* FixedTemp(DoubleRegister reg);
// Methods for setting up define-use relationships.
// Return the same instruction that they are passed.
- template<int I, int T>
- LInstruction* Define(LTemplateInstruction<1, I, T>* instr,
- LUnallocated* result);
- template<int I, int T>
- LInstruction* DefineAsRegister(LTemplateInstruction<1, I, T>* instr);
- template<int I, int T>
- LInstruction* DefineAsSpilled(LTemplateInstruction<1, I, T>* instr,
- int index);
- template<int I, int T>
- LInstruction* DefineSameAsFirst(LTemplateInstruction<1, I, T>* instr);
- template<int I, int T>
- LInstruction* DefineFixed(LTemplateInstruction<1, I, T>* instr,
- Register reg);
- template<int I, int T>
- LInstruction* DefineFixedDouble(LTemplateInstruction<1, I, T>* instr,
- DoubleRegister reg);
+ LInstruction* Define(LTemplateResultInstruction<1>* instr,
+ LUnallocated* result);
+ LInstruction* DefineAsRegister(LTemplateResultInstruction<1>* instr);
+ LInstruction* DefineAsSpilled(LTemplateResultInstruction<1>* instr,
+ int index);
+ LInstruction* DefineSameAsFirst(LTemplateResultInstruction<1>* instr);
+ LInstruction* DefineFixed(LTemplateResultInstruction<1>* instr,
+ Register reg);
+ LInstruction* DefineFixedDouble(LTemplateResultInstruction<1>* instr,
+ DoubleRegister reg);
LInstruction* AssignEnvironment(LInstruction* instr);
LInstruction* AssignPointerMap(LInstruction* instr);
@@ -2882,11 +2851,8 @@ class LChunkBuilder V8_FINAL BASE_EMBEDDED {
HInstruction* hinstr,
CanDeoptimize can_deoptimize = CANNOT_DEOPTIMIZE_EAGERLY);
- LEnvironment* CreateEnvironment(HEnvironment* hydrogen_env,
- int* argument_index_accumulator,
- ZoneList<HValue*>* objects_to_materialize);
-
void VisitInstruction(HInstruction* current);
+ void AddInstruction(LInstruction* instr, HInstruction* current);
void DoBasicBlock(HBasicBlock* block, HBasicBlock* next_block);
LInstruction* DoShift(Token::Value op, HBitwiseBinaryOperation* instr);
@@ -2898,14 +2864,11 @@ class LChunkBuilder V8_FINAL BASE_EMBEDDED {
LPlatformChunk* chunk_;
CompilationInfo* info_;
HGraph* const graph_;
- Zone* zone_;
Status status_;
HInstruction* current_instruction_;
HBasicBlock* current_block_;
HBasicBlock* next_block_;
- int argument_count_;
LAllocator* allocator_;
- int position_;
DISALLOW_COPY_AND_ASSIGN(LChunkBuilder);
};
diff --git a/chromium/v8/src/arm/lithium-codegen-arm.cc b/chromium/v8/src/arm/lithium-codegen-arm.cc
index 0a3f043bc76..e98fcf4c087 100644
--- a/chromium/v8/src/arm/lithium-codegen-arm.cc
+++ b/chromium/v8/src/arm/lithium-codegen-arm.cc
@@ -1,37 +1,14 @@
// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#include "arm/lithium-codegen-arm.h"
-#include "arm/lithium-gap-resolver-arm.h"
-#include "code-stubs.h"
-#include "stub-cache.h"
-#include "hydrogen-osr.h"
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#include "src/arm/lithium-codegen-arm.h"
+#include "src/arm/lithium-gap-resolver-arm.h"
+#include "src/code-stubs.h"
+#include "src/stub-cache.h"
+#include "src/hydrogen-osr.h"
namespace v8 {
namespace internal {
@@ -84,17 +61,8 @@ void LCodeGen::FinishCode(Handle<Code> code) {
ASSERT(is_done());
code->set_stack_slots(GetStackSlotCount());
code->set_safepoint_table_offset(safepoints_.GetCodeOffset());
- if (FLAG_weak_embedded_maps_in_optimized_code) {
- RegisterDependentCodeForEmbeddedMaps(code);
- }
+ if (code->is_optimized_code()) RegisterWeakObjectsInOptimizedCode(code);
PopulateDeoptimizationData(code);
- info()->CommitDependencies(code);
-}
-
-
-void LCodeGen::Abort(BailoutReason reason) {
- info()->set_bailout_reason(reason);
- status_ = ABORTED;
}
@@ -145,24 +113,38 @@ bool LCodeGen::GeneratePrologue() {
// r1: Callee's JS function.
// cp: Callee's context.
+ // pp: Callee's constant pool pointer (if FLAG_enable_ool_constant_pool)
// fp: Caller's frame pointer.
// lr: Caller's pc.
- // Strict mode functions and builtins need to replace the receiver
- // with undefined when called as functions (without an explicit
- // receiver object). r5 is zero for method calls and non-zero for
- // function calls.
- if (!info_->is_classic_mode() || info_->is_native()) {
- __ cmp(r5, Operand::Zero());
- int receiver_offset = scope()->num_parameters() * kPointerSize;
- __ LoadRoot(r2, Heap::kUndefinedValueRootIndex);
- __ str(r2, MemOperand(sp, receiver_offset), ne);
+ // Sloppy mode functions and builtins need to replace the receiver with the
+ // global proxy when called as functions (without an explicit receiver
+ // object).
+ if (info_->this_has_uses() &&
+ info_->strict_mode() == SLOPPY &&
+ !info_->is_native()) {
+ Label ok;
+ int receiver_offset = info_->scope()->num_parameters() * kPointerSize;
+ __ ldr(r2, MemOperand(sp, receiver_offset));
+ __ CompareRoot(r2, Heap::kUndefinedValueRootIndex);
+ __ b(ne, &ok);
+
+ __ ldr(r2, GlobalObjectOperand());
+ __ ldr(r2, FieldMemOperand(r2, GlobalObject::kGlobalReceiverOffset));
+
+ __ str(r2, MemOperand(sp, receiver_offset));
+
+ __ bind(&ok);
}
}
info()->set_prologue_offset(masm_->pc_offset());
if (NeedsEagerFrame()) {
- __ Prologue(info()->IsStub() ? BUILD_STUB_FRAME : BUILD_FUNCTION_FRAME);
+ if (info()->IsStub()) {
+ __ StubPrologue();
+ } else {
+ __ Prologue(info()->IsCodePreAgingActive());
+ }
frame_is_built_ = true;
info_->AddNoFrameRange(0, masm_->pc_offset());
}
@@ -197,18 +179,22 @@ bool LCodeGen::GeneratePrologue() {
int heap_slots = info()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
if (heap_slots > 0) {
Comment(";;; Allocate local context");
+ bool need_write_barrier = true;
// Argument to NewContext is the function, which is in r1.
- __ push(r1);
if (heap_slots <= FastNewContextStub::kMaximumSlots) {
- FastNewContextStub stub(heap_slots);
+ FastNewContextStub stub(isolate(), heap_slots);
__ CallStub(&stub);
+ // Result of FastNewContextStub is always in new space.
+ need_write_barrier = false;
} else {
- __ CallRuntime(Runtime::kNewFunctionContext, 1);
+ __ push(r1);
+ __ CallRuntime(Runtime::kHiddenNewFunctionContext, 1);
}
RecordSafepoint(Safepoint::kNoLazyDeopt);
// Context is returned in both r0 and cp. It replaces the context
// passed to us. It's saved in the stack and kept live in cp.
- __ str(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ __ mov(cp, r0);
+ __ str(r0, MemOperand(fp, StandardFrameConstants::kContextOffset));
// Copy any necessary parameters into the context.
int num_parameters = scope()->num_parameters();
for (int i = 0; i < num_parameters; i++) {
@@ -222,13 +208,20 @@ bool LCodeGen::GeneratePrologue() {
MemOperand target = ContextOperand(cp, var->index());
__ str(r0, target);
// Update the write barrier. This clobbers r3 and r0.
- __ RecordWriteContextSlot(
- cp,
- target.offset(),
- r0,
- r3,
- GetLinkRegisterState(),
- kSaveFPRegs);
+ if (need_write_barrier) {
+ __ RecordWriteContextSlot(
+ cp,
+ target.offset(),
+ r0,
+ r3,
+ GetLinkRegisterState(),
+ kSaveFPRegs);
+ } else if (FLAG_debug_code) {
+ Label done;
+ __ JumpIfInNewSpace(cp, r0, &done);
+ __ Abort(kExpectedNewSpaceObject);
+ __ bind(&done);
+ }
}
}
Comment(";;; End allocate local context");
@@ -260,6 +253,9 @@ void LCodeGen::GenerateOsrPrologue() {
void LCodeGen::GenerateBodyInstructionPre(LInstruction* instr) {
+ if (instr->IsCall()) {
+ EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
+ }
if (!instr->IsLazyBailout() && !instr->IsGap()) {
safepoints_.BumpLastLazySafepointIndex();
}
@@ -274,7 +270,8 @@ bool LCodeGen::GenerateDeferredCode() {
HValue* value =
instructions_->at(code->instruction_index())->hydrogen_value();
- RecordAndWritePosition(value->position());
+ RecordAndWritePosition(
+ chunk()->graph()->SourcePositionToScriptPosition(value->position()));
Comment(";;; <@%d,#%d> "
"-------------------- Deferred %s --------------------",
@@ -287,7 +284,7 @@ bool LCodeGen::GenerateDeferredCode() {
ASSERT(!frame_is_built_);
ASSERT(info()->IsStub());
frame_is_built_ = true;
- __ stm(db_w, sp, cp.bit() | fp.bit() | lr.bit());
+ __ PushFixedFrame();
__ mov(scratch0(), Operand(Smi::FromInt(StackFrame::STUB)));
__ push(scratch0());
__ add(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
@@ -298,7 +295,7 @@ bool LCodeGen::GenerateDeferredCode() {
Comment(";;; Destroy frame");
ASSERT(frame_is_built_);
__ pop(ip);
- __ ldm(ia_w, sp, cp.bit() | fp.bit() | lr.bit());
+ __ PopFixedFrame();
frame_is_built_ = false;
}
__ jmp(code->exit());
@@ -349,7 +346,7 @@ bool LCodeGen::GenerateDeoptJumpTable() {
__ b(&needs_frame);
} else {
__ bind(&needs_frame);
- __ stm(db_w, sp, cp.bit() | fp.bit() | lr.bit());
+ __ PushFixedFrame();
// This variant of deopt can only be used with stubs. Since we don't
// have a function pointer to install in the stack frame that we're
// building, install a special marker there instead.
@@ -423,7 +420,7 @@ Register LCodeGen::EmitLoadRegister(LOperand* op, Register scratch) {
__ Move(scratch, literal);
}
return scratch;
- } else if (op->IsStackSlot() || op->IsArgument()) {
+ } else if (op->IsStackSlot()) {
__ ldr(scratch, ToMemOperand(op));
return scratch;
}
@@ -459,7 +456,7 @@ DwVfpRegister LCodeGen::EmitLoadDoubleRegister(LOperand* op,
} else if (r.IsTagged()) {
Abort(kUnsupportedTaggedImmediate);
}
- } else if (op->IsStackSlot() || op->IsArgument()) {
+ } else if (op->IsStackSlot()) {
// TODO(regis): Why is vldr not taking a MemOperand?
// __ vldr(dbl_scratch, ToMemOperand(op));
MemOperand mem_op = ToMemOperand(op);
@@ -679,10 +676,6 @@ void LCodeGen::AddToTranslation(LEnvironment* environment,
}
} else if (op->IsDoubleStackSlot()) {
translation->StoreDoubleStackSlot(op->index());
- } else if (op->IsArgument()) {
- ASSERT(is_tagged);
- int src_index = GetStackSlotCount() + op->index();
- translation->StoreStackSlot(src_index);
} else if (op->IsRegister()) {
Register reg = ToRegister(op);
if (is_tagged) {
@@ -705,6 +698,16 @@ void LCodeGen::AddToTranslation(LEnvironment* environment,
}
+int LCodeGen::CallCodeSize(Handle<Code> code, RelocInfo::Mode mode) {
+ int size = masm()->CallSize(code, mode);
+ if (code->kind() == Code::BINARY_OP_IC ||
+ code->kind() == Code::COMPARE_IC) {
+ size += Assembler::kInstrSize; // extra nop() added in CallCodeGeneric.
+ }
+ return size;
+}
+
+
void LCodeGen::CallCode(Handle<Code> code,
RelocInfo::Mode mode,
LInstruction* instr,
@@ -718,7 +721,6 @@ void LCodeGen::CallCodeGeneric(Handle<Code> code,
LInstruction* instr,
SafepointMode safepoint_mode,
TargetAddressStorageMode storage_mode) {
- EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
ASSERT(instr != NULL);
// Block literal pool emission to ensure nop indicating no inlined smi code
// is in the correct position.
@@ -775,6 +777,7 @@ void LCodeGen::CallRuntimeFromDeferred(Runtime::FunctionId id,
void LCodeGen::RegisterEnvironmentForDeoptimization(LEnvironment* environment,
Safepoint::DeoptMode mode) {
+ environment->set_has_been_used();
if (!environment->HasBeenRegistered()) {
// Physical stack frame layout:
// -x ............. -4 0 ..................................... y
@@ -894,46 +897,24 @@ void LCodeGen::DeoptimizeIf(Condition condition,
}
-void LCodeGen::RegisterDependentCodeForEmbeddedMaps(Handle<Code> code) {
- ZoneList<Handle<Map> > maps(1, zone());
- ZoneList<Handle<JSObject> > objects(1, zone());
- int mode_mask = RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT);
- for (RelocIterator it(*code, mode_mask); !it.done(); it.next()) {
- if (Code::IsWeakEmbeddedObject(code->kind(), it.rinfo()->target_object())) {
- if (it.rinfo()->target_object()->IsMap()) {
- Handle<Map> map(Map::cast(it.rinfo()->target_object()));
- maps.Add(map, zone());
- } else if (it.rinfo()->target_object()->IsJSObject()) {
- Handle<JSObject> object(JSObject::cast(it.rinfo()->target_object()));
- objects.Add(object, zone());
- }
- }
- }
-#ifdef VERIFY_HEAP
- // This disables verification of weak embedded objects after full GC.
- // AddDependentCode can cause a GC, which would observe the state where
- // this code is not yet in the depended code lists of the embedded maps.
- NoWeakObjectVerificationScope disable_verification_of_embedded_objects;
-#endif
- for (int i = 0; i < maps.length(); i++) {
- maps.at(i)->AddDependentCode(DependentCode::kWeaklyEmbeddedGroup, code);
- }
- for (int i = 0; i < objects.length(); i++) {
- AddWeakObjectToCodeDependency(isolate()->heap(), objects.at(i), code);
- }
-}
-
-
void LCodeGen::PopulateDeoptimizationData(Handle<Code> code) {
int length = deoptimizations_.length();
if (length == 0) return;
Handle<DeoptimizationInputData> data =
- factory()->NewDeoptimizationInputData(length, TENURED);
+ DeoptimizationInputData::New(isolate(), length, TENURED);
Handle<ByteArray> translations =
translations_.CreateByteArray(isolate()->factory());
data->SetTranslationByteArray(*translations);
data->SetInlinedFunctionCount(Smi::FromInt(inlined_function_count_));
+ data->SetOptimizationId(Smi::FromInt(info_->optimization_id()));
+ if (info_->IsOptimizing()) {
+ // Reference to shared function info does not change between phases.
+ AllowDeferredHandleDereference allow_handle_dereference;
+ data->SetSharedFunctionInfo(*info_->shared_info());
+ } else {
+ data->SetSharedFunctionInfo(Smi::FromInt(0));
+ }
Handle<FixedArray> literals =
factory()->NewFixedArray(deoptimization_literals_.length(), TENURED);
@@ -1016,6 +997,10 @@ void LCodeGen::RecordSafepoint(
safepoint.DefinePointerRegister(ToRegister(pointer), zone());
}
}
+ if (FLAG_enable_ool_constant_pool && (kind & Safepoint::kWithRegisters)) {
+ // Register pp always contains a pointer to the constant pool.
+ safepoint.DefinePointerRegister(pp, zone());
+ }
}
@@ -1104,31 +1089,19 @@ void LCodeGen::DoCallStub(LCallStub* instr) {
ASSERT(ToRegister(instr->context()).is(cp));
ASSERT(ToRegister(instr->result()).is(r0));
switch (instr->hydrogen()->major_key()) {
- case CodeStub::RegExpConstructResult: {
- RegExpConstructResultStub stub;
- CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
- break;
- }
case CodeStub::RegExpExec: {
- RegExpExecStub stub;
- CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
+ RegExpExecStub stub(isolate());
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
break;
}
case CodeStub::SubString: {
- SubStringStub stub;
- CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
+ SubStringStub stub(isolate());
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
break;
}
case CodeStub::StringCompare: {
- StringCompareStub stub;
- CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
- break;
- }
- case CodeStub::TranscendentalCache: {
- __ ldr(r0, MemOperand(sp, 0));
- TranscendentalCacheStub stub(instr->transcendental_type(),
- TranscendentalCacheStub::TAGGED);
- CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
+ StringCompareStub stub(isolate());
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
break;
}
default:
@@ -1142,36 +1115,70 @@ void LCodeGen::DoUnknownOSRValue(LUnknownOSRValue* instr) {
}
-void LCodeGen::DoModI(LModI* instr) {
+void LCodeGen::DoModByPowerOf2I(LModByPowerOf2I* instr) {
+ Register dividend = ToRegister(instr->dividend());
+ int32_t divisor = instr->divisor();
+ ASSERT(dividend.is(ToRegister(instr->result())));
+
+ // Theoretically, a variation of the branch-free code for integer division by
+ // a power of 2 (calculating the remainder via an additional multiplication
+ // (which gets simplified to an 'and') and subtraction) should be faster, and
+ // this is exactly what GCC and clang emit. Nevertheless, benchmarks seem to
+ // indicate that positive dividends are heavily favored, so the branching
+ // version performs better.
HMod* hmod = instr->hydrogen();
- HValue* left = hmod->left();
- HValue* right = hmod->right();
- if (hmod->HasPowerOf2Divisor()) {
- // TODO(svenpanne) We should really do the strength reduction on the
- // Hydrogen level.
- Register left_reg = ToRegister(instr->left());
- Register result_reg = ToRegister(instr->result());
+ int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1);
+ Label dividend_is_not_negative, done;
+ if (hmod->CheckFlag(HValue::kLeftCanBeNegative)) {
+ __ cmp(dividend, Operand::Zero());
+ __ b(pl, &dividend_is_not_negative);
+ // Note that this is correct even for kMinInt operands.
+ __ rsb(dividend, dividend, Operand::Zero());
+ __ and_(dividend, dividend, Operand(mask));
+ __ rsb(dividend, dividend, Operand::Zero(), SetCC);
+ if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ DeoptimizeIf(eq, instr->environment());
+ }
+ __ b(&done);
+ }
- // Note: The code below even works when right contains kMinInt.
- int32_t divisor = Abs(right->GetInteger32Constant());
+ __ bind(&dividend_is_not_negative);
+ __ and_(dividend, dividend, Operand(mask));
+ __ bind(&done);
+}
- Label left_is_not_negative, done;
- if (left->CanBeNegative()) {
- __ cmp(left_reg, Operand::Zero());
- __ b(pl, &left_is_not_negative);
- __ rsb(result_reg, left_reg, Operand::Zero());
- __ and_(result_reg, result_reg, Operand(divisor - 1));
- __ rsb(result_reg, result_reg, Operand::Zero(), SetCC);
- if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
- DeoptimizeIf(eq, instr->environment());
- }
- __ b(&done);
- }
- __ bind(&left_is_not_negative);
- __ and_(result_reg, left_reg, Operand(divisor - 1));
- __ bind(&done);
- } else if (CpuFeatures::IsSupported(SUDIV)) {
+void LCodeGen::DoModByConstI(LModByConstI* instr) {
+ Register dividend = ToRegister(instr->dividend());
+ int32_t divisor = instr->divisor();
+ Register result = ToRegister(instr->result());
+ ASSERT(!dividend.is(result));
+
+ if (divisor == 0) {
+ DeoptimizeIf(al, instr->environment());
+ return;
+ }
+
+ __ TruncatingDiv(result, dividend, Abs(divisor));
+ __ mov(ip, Operand(Abs(divisor)));
+ __ smull(result, ip, result, ip);
+ __ sub(result, dividend, result, SetCC);
+
+ // Check for negative zero.
+ HMod* hmod = instr->hydrogen();
+ if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ Label remainder_not_zero;
+ __ b(ne, &remainder_not_zero);
+ __ cmp(dividend, Operand::Zero());
+ DeoptimizeIf(lt, instr->environment());
+ __ bind(&remainder_not_zero);
+ }
+}
+
+
+void LCodeGen::DoModI(LModI* instr) {
+ HMod* hmod = instr->hydrogen();
+ if (CpuFeatures::IsSupported(SUDIV)) {
CpuFeatureScope scope(masm(), SUDIV);
Register left_reg = ToRegister(instr->left());
@@ -1181,14 +1188,14 @@ void LCodeGen::DoModI(LModI* instr) {
Label done;
// Check for x % 0, sdiv might signal an exception. We have to deopt in this
// case because we can't return a NaN.
- if (right->CanBeZero()) {
+ if (hmod->CheckFlag(HValue::kCanBeDivByZero)) {
__ cmp(right_reg, Operand::Zero());
DeoptimizeIf(eq, instr->environment());
}
// Check for kMinInt % -1, sdiv will return kMinInt, which is not what we
// want. We have to deopt if we care about -0, because we can't return that.
- if (left->RangeCanInclude(kMinInt) && right->RangeCanInclude(-1)) {
+ if (hmod->CheckFlag(HValue::kCanOverflow)) {
Label no_overflow_possible;
__ cmp(left_reg, Operand(kMinInt));
__ b(ne, &no_overflow_possible);
@@ -1208,12 +1215,10 @@ void LCodeGen::DoModI(LModI* instr) {
// mls r3, r3, r2, r1
__ sdiv(result_reg, left_reg, right_reg);
- __ mls(result_reg, result_reg, right_reg, left_reg);
+ __ Mls(result_reg, result_reg, right_reg, left_reg);
// If we care about -0, test if the dividend is <0 and the result is 0.
- if (left->CanBeNegative() &&
- hmod->CanBeZero() &&
- hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
__ cmp(result_reg, Operand::Zero());
__ b(ne, &done);
__ cmp(left_reg, Operand::Zero());
@@ -1240,7 +1245,7 @@ void LCodeGen::DoModI(LModI* instr) {
Label done;
// Check for x % 0, we have to deopt in this case because we can't return a
// NaN.
- if (right->CanBeZero()) {
+ if (hmod->CheckFlag(HValue::kCanBeDivByZero)) {
__ cmp(right_reg, Operand::Zero());
DeoptimizeIf(eq, instr->environment());
}
@@ -1269,9 +1274,7 @@ void LCodeGen::DoModI(LModI* instr) {
__ sub(result_reg, left_reg, scratch, SetCC);
// If we care about -0, test if the dividend is <0 and the result is 0.
- if (left->CanBeNegative() &&
- hmod->CanBeZero() &&
- hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
__ b(ne, &done);
__ cmp(left_reg, Operand::Zero());
DeoptimizeIf(mi, instr->environment());
@@ -1281,217 +1284,138 @@ void LCodeGen::DoModI(LModI* instr) {
}
-void LCodeGen::EmitSignedIntegerDivisionByConstant(
- Register result,
- Register dividend,
- int32_t divisor,
- Register remainder,
- Register scratch,
- LEnvironment* environment) {
- ASSERT(!AreAliased(dividend, scratch, ip));
- ASSERT(LChunkBuilder::HasMagicNumberForDivisor(divisor));
+void LCodeGen::DoDivByPowerOf2I(LDivByPowerOf2I* instr) {
+ Register dividend = ToRegister(instr->dividend());
+ int32_t divisor = instr->divisor();
+ Register result = ToRegister(instr->result());
+ ASSERT(divisor == kMinInt || IsPowerOf2(Abs(divisor)));
+ ASSERT(!result.is(dividend));
- uint32_t divisor_abs = abs(divisor);
+ // Check for (0 / -x) that will produce negative zero.
+ HDiv* hdiv = instr->hydrogen();
+ if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
+ __ cmp(dividend, Operand::Zero());
+ DeoptimizeIf(eq, instr->environment());
+ }
+ // Check for (kMinInt / -1).
+ if (hdiv->CheckFlag(HValue::kCanOverflow) && divisor == -1) {
+ __ cmp(dividend, Operand(kMinInt));
+ DeoptimizeIf(eq, instr->environment());
+ }
+ // Deoptimize if remainder will not be 0.
+ if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32) &&
+ divisor != 1 && divisor != -1) {
+ int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1);
+ __ tst(dividend, Operand(mask));
+ DeoptimizeIf(ne, instr->environment());
+ }
- int32_t power_of_2_factor =
- CompilerIntrinsics::CountTrailingZeros(divisor_abs);
+ if (divisor == -1) { // Nice shortcut, not needed for correctness.
+ __ rsb(result, dividend, Operand(0));
+ return;
+ }
+ int32_t shift = WhichPowerOf2Abs(divisor);
+ if (shift == 0) {
+ __ mov(result, dividend);
+ } else if (shift == 1) {
+ __ add(result, dividend, Operand(dividend, LSR, 31));
+ } else {
+ __ mov(result, Operand(dividend, ASR, 31));
+ __ add(result, dividend, Operand(result, LSR, 32 - shift));
+ }
+ if (shift > 0) __ mov(result, Operand(result, ASR, shift));
+ if (divisor < 0) __ rsb(result, result, Operand(0));
+}
- switch (divisor_abs) {
- case 0:
- DeoptimizeIf(al, environment);
- return;
- case 1:
- if (divisor > 0) {
- __ Move(result, dividend);
- } else {
- __ rsb(result, dividend, Operand::Zero(), SetCC);
- DeoptimizeIf(vs, environment);
- }
- // Compute the remainder.
- __ mov(remainder, Operand::Zero());
- return;
+void LCodeGen::DoDivByConstI(LDivByConstI* instr) {
+ Register dividend = ToRegister(instr->dividend());
+ int32_t divisor = instr->divisor();
+ Register result = ToRegister(instr->result());
+ ASSERT(!dividend.is(result));
- default:
- if (IsPowerOf2(divisor_abs)) {
- // Branch and condition free code for integer division by a power
- // of two.
- int32_t power = WhichPowerOf2(divisor_abs);
- if (power > 1) {
- __ mov(scratch, Operand(dividend, ASR, power - 1));
- }
- __ add(scratch, dividend, Operand(scratch, LSR, 32 - power));
- __ mov(result, Operand(scratch, ASR, power));
- // Negate if necessary.
- // We don't need to check for overflow because the case '-1' is
- // handled separately.
- if (divisor < 0) {
- ASSERT(divisor != -1);
- __ rsb(result, result, Operand::Zero());
- }
- // Compute the remainder.
- if (divisor > 0) {
- __ sub(remainder, dividend, Operand(result, LSL, power));
- } else {
- __ add(remainder, dividend, Operand(result, LSL, power));
- }
- return;
- } else {
- // Use magic numbers for a few specific divisors.
- // Details and proofs can be found in:
- // - Hacker's Delight, Henry S. Warren, Jr.
- // - The PowerPC Compiler Writer’s Guide
- // and probably many others.
- //
- // We handle
- // <divisor with magic numbers> * <power of 2>
- // but not
- // <divisor with magic numbers> * <other divisor with magic numbers>
- DivMagicNumbers magic_numbers =
- DivMagicNumberFor(divisor_abs >> power_of_2_factor);
- // Branch and condition free code for integer division by a power
- // of two.
- const int32_t M = magic_numbers.M;
- const int32_t s = magic_numbers.s + power_of_2_factor;
-
- __ mov(ip, Operand(M));
- __ smull(ip, scratch, dividend, ip);
- if (M < 0) {
- __ add(scratch, scratch, Operand(dividend));
- }
- if (s > 0) {
- __ mov(scratch, Operand(scratch, ASR, s));
- }
- __ add(result, scratch, Operand(dividend, LSR, 31));
- if (divisor < 0) __ rsb(result, result, Operand::Zero());
- // Compute the remainder.
- __ mov(ip, Operand(divisor));
- // This sequence could be replaced with 'mls' when
- // it gets implemented.
- __ mul(scratch, result, ip);
- __ sub(remainder, dividend, scratch);
- }
+ if (divisor == 0) {
+ DeoptimizeIf(al, instr->environment());
+ return;
}
-}
+ // Check for (0 / -x) that will produce negative zero.
+ HDiv* hdiv = instr->hydrogen();
+ if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
+ __ cmp(dividend, Operand::Zero());
+ DeoptimizeIf(eq, instr->environment());
+ }
-void LCodeGen::DoDivI(LDivI* instr) {
- if (instr->hydrogen()->HasPowerOf2Divisor()) {
- const Register dividend = ToRegister(instr->left());
- const Register result = ToRegister(instr->result());
- int32_t divisor = instr->hydrogen()->right()->GetInteger32Constant();
- int32_t test_value = 0;
- int32_t power = 0;
-
- if (divisor > 0) {
- test_value = divisor - 1;
- power = WhichPowerOf2(divisor);
- } else {
- // Check for (0 / -x) that will produce negative zero.
- if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
- __ cmp(dividend, Operand::Zero());
- DeoptimizeIf(eq, instr->environment());
- }
- // Check for (kMinInt / -1).
- if (divisor == -1 && instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
- __ cmp(dividend, Operand(kMinInt));
- DeoptimizeIf(eq, instr->environment());
- }
- test_value = - divisor - 1;
- power = WhichPowerOf2(-divisor);
- }
-
- if (test_value != 0) {
- if (instr->hydrogen()->CheckFlag(
- HInstruction::kAllUsesTruncatingToInt32)) {
- __ sub(result, dividend, Operand::Zero(), SetCC);
- __ rsb(result, result, Operand::Zero(), LeaveCC, lt);
- __ mov(result, Operand(result, ASR, power));
- if (divisor > 0) __ rsb(result, result, Operand::Zero(), LeaveCC, lt);
- if (divisor < 0) __ rsb(result, result, Operand::Zero(), LeaveCC, gt);
- return; // Don't fall through to "__ rsb" below.
- } else {
- // Deoptimize if remainder is not 0.
- __ tst(dividend, Operand(test_value));
- DeoptimizeIf(ne, instr->environment());
- __ mov(result, Operand(dividend, ASR, power));
- if (divisor < 0) __ rsb(result, result, Operand(0));
- }
- } else {
- if (divisor < 0) {
- __ rsb(result, dividend, Operand(0));
- } else {
- __ Move(result, dividend);
- }
- }
+ __ TruncatingDiv(result, dividend, Abs(divisor));
+ if (divisor < 0) __ rsb(result, result, Operand::Zero());
- return;
+ if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32)) {
+ __ mov(ip, Operand(divisor));
+ __ smull(scratch0(), ip, result, ip);
+ __ sub(scratch0(), scratch0(), dividend, SetCC);
+ DeoptimizeIf(ne, instr->environment());
}
+}
- const Register left = ToRegister(instr->left());
- const Register right = ToRegister(instr->right());
- const Register result = ToRegister(instr->result());
+
+// TODO(svenpanne) Refactor this to avoid code duplication with DoFlooringDivI.
+void LCodeGen::DoDivI(LDivI* instr) {
+ HBinaryOperation* hdiv = instr->hydrogen();
+ Register dividend = ToRegister(instr->dividend());
+ Register divisor = ToRegister(instr->divisor());
+ Register result = ToRegister(instr->result());
// Check for x / 0.
- if (instr->hydrogen()->CheckFlag(HValue::kCanBeDivByZero)) {
- __ cmp(right, Operand::Zero());
+ if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) {
+ __ cmp(divisor, Operand::Zero());
DeoptimizeIf(eq, instr->environment());
}
// Check for (0 / -x) that will produce negative zero.
- if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) {
Label positive;
- if (!instr->hydrogen()->CheckFlag(HValue::kCanBeDivByZero)) {
+ if (!instr->hydrogen_value()->CheckFlag(HValue::kCanBeDivByZero)) {
// Do the test only if it hadn't be done above.
- __ cmp(right, Operand::Zero());
+ __ cmp(divisor, Operand::Zero());
}
__ b(pl, &positive);
- __ cmp(left, Operand::Zero());
+ __ cmp(dividend, Operand::Zero());
DeoptimizeIf(eq, instr->environment());
__ bind(&positive);
}
// Check for (kMinInt / -1).
- if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
- Label left_not_min_int;
- __ cmp(left, Operand(kMinInt));
- __ b(ne, &left_not_min_int);
- __ cmp(right, Operand(-1));
+ if (hdiv->CheckFlag(HValue::kCanOverflow) &&
+ (!CpuFeatures::IsSupported(SUDIV) ||
+ !hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32))) {
+ // We don't need to check for overflow when truncating with sdiv
+ // support because, on ARM, sdiv kMinInt, -1 -> kMinInt.
+ __ cmp(dividend, Operand(kMinInt));
+ __ cmp(divisor, Operand(-1), eq);
DeoptimizeIf(eq, instr->environment());
- __ bind(&left_not_min_int);
}
if (CpuFeatures::IsSupported(SUDIV)) {
CpuFeatureScope scope(masm(), SUDIV);
- __ sdiv(result, left, right);
-
- if (!instr->hydrogen()->CheckFlag(
- HInstruction::kAllUsesTruncatingToInt32)) {
- // Compute remainder and deopt if it's not zero.
- const Register remainder = scratch0();
- __ mls(remainder, result, right, left);
- __ cmp(remainder, Operand::Zero());
- DeoptimizeIf(ne, instr->environment());
- }
+ __ sdiv(result, dividend, divisor);
} else {
- const DoubleRegister vleft = ToDoubleRegister(instr->temp());
- const DoubleRegister vright = double_scratch0();
- __ vmov(double_scratch0().low(), left);
+ DoubleRegister vleft = ToDoubleRegister(instr->temp());
+ DoubleRegister vright = double_scratch0();
+ __ vmov(double_scratch0().low(), dividend);
__ vcvt_f64_s32(vleft, double_scratch0().low());
- __ vmov(double_scratch0().low(), right);
+ __ vmov(double_scratch0().low(), divisor);
__ vcvt_f64_s32(vright, double_scratch0().low());
__ vdiv(vleft, vleft, vright); // vleft now contains the result.
__ vcvt_s32_f64(double_scratch0().low(), vleft);
__ vmov(result, double_scratch0().low());
+ }
- if (!instr->hydrogen()->CheckFlag(
- HInstruction::kAllUsesTruncatingToInt32)) {
- // Deopt if exact conversion to integer was not possible.
- // Use vright as scratch register.
- __ vcvt_f64_s32(double_scratch0(), double_scratch0().low());
- __ VFPCompareAndSetFlags(vleft, double_scratch0());
- DeoptimizeIf(ne, instr->environment());
- }
+ if (!hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) {
+ // Compute remainder and deopt if it's not zero.
+ Register remainder = scratch0();
+ __ Mls(remainder, result, divisor, dividend);
+ __ cmp(remainder, Operand::Zero());
+ DeoptimizeIf(ne, instr->environment());
}
}
@@ -1520,74 +1444,156 @@ void LCodeGen::DoMultiplySubD(LMultiplySubD* instr) {
}
-void LCodeGen::DoMathFloorOfDiv(LMathFloorOfDiv* instr) {
- const Register result = ToRegister(instr->result());
- const Register left = ToRegister(instr->left());
- const Register remainder = ToRegister(instr->temp());
- const Register scratch = scratch0();
+void LCodeGen::DoFlooringDivByPowerOf2I(LFlooringDivByPowerOf2I* instr) {
+ Register dividend = ToRegister(instr->dividend());
+ Register result = ToRegister(instr->result());
+ int32_t divisor = instr->divisor();
- if (!CpuFeatures::IsSupported(SUDIV)) {
- // If the CPU doesn't support sdiv instruction, we only optimize when we
- // have magic numbers for the divisor. The standard integer division routine
- // is usually slower than transitionning to VFP.
- ASSERT(instr->right()->IsConstantOperand());
- int32_t divisor = ToInteger32(LConstantOperand::cast(instr->right()));
- ASSERT(LChunkBuilder::HasMagicNumberForDivisor(divisor));
- if (divisor < 0) {
- __ cmp(left, Operand::Zero());
- DeoptimizeIf(eq, instr->environment());
- }
- EmitSignedIntegerDivisionByConstant(result,
- left,
- divisor,
- remainder,
- scratch,
- instr->environment());
- // We performed a truncating division. Correct the result if necessary.
- __ cmp(remainder, Operand::Zero());
- __ teq(remainder, Operand(divisor), ne);
- __ sub(result, result, Operand(1), LeaveCC, mi);
- } else {
- CpuFeatureScope scope(masm(), SUDIV);
- const Register right = ToRegister(instr->right());
+ // If the divisor is 1, return the dividend.
+ if (divisor == 1) {
+ __ Move(result, dividend);
+ return;
+ }
- // Check for x / 0.
- __ cmp(right, Operand::Zero());
+ // If the divisor is positive, things are easy: There can be no deopts and we
+ // can simply do an arithmetic right shift.
+ int32_t shift = WhichPowerOf2Abs(divisor);
+ if (divisor > 1) {
+ __ mov(result, Operand(dividend, ASR, shift));
+ return;
+ }
+
+ // If the divisor is negative, we have to negate and handle edge cases.
+ __ rsb(result, dividend, Operand::Zero(), SetCC);
+ if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
DeoptimizeIf(eq, instr->environment());
+ }
- // Check for (kMinInt / -1).
- if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
- Label left_not_min_int;
- __ cmp(left, Operand(kMinInt));
- __ b(ne, &left_not_min_int);
- __ cmp(right, Operand(-1));
- DeoptimizeIf(eq, instr->environment());
- __ bind(&left_not_min_int);
+ // Dividing by -1 is basically negation, unless we overflow.
+ if (divisor == -1) {
+ if (instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) {
+ DeoptimizeIf(vs, instr->environment());
}
+ return;
+ }
- // Check for (0 / -x) that will produce negative zero.
- if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ // If the negation could not overflow, simply shifting is OK.
+ if (!instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) {
+ __ mov(result, Operand(result, ASR, shift));
+ return;
+ }
+
+ __ mov(result, Operand(kMinInt / divisor), LeaveCC, vs);
+ __ mov(result, Operand(result, ASR, shift), LeaveCC, vc);
+}
+
+
+void LCodeGen::DoFlooringDivByConstI(LFlooringDivByConstI* instr) {
+ Register dividend = ToRegister(instr->dividend());
+ int32_t divisor = instr->divisor();
+ Register result = ToRegister(instr->result());
+ ASSERT(!dividend.is(result));
+
+ if (divisor == 0) {
+ DeoptimizeIf(al, instr->environment());
+ return;
+ }
+
+ // Check for (0 / -x) that will produce negative zero.
+ HMathFloorOfDiv* hdiv = instr->hydrogen();
+ if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
+ __ cmp(dividend, Operand::Zero());
+ DeoptimizeIf(eq, instr->environment());
+ }
+
+ // Easy case: We need no dynamic check for the dividend and the flooring
+ // division is the same as the truncating division.
+ if ((divisor > 0 && !hdiv->CheckFlag(HValue::kLeftCanBeNegative)) ||
+ (divisor < 0 && !hdiv->CheckFlag(HValue::kLeftCanBePositive))) {
+ __ TruncatingDiv(result, dividend, Abs(divisor));
+ if (divisor < 0) __ rsb(result, result, Operand::Zero());
+ return;
+ }
+
+ // In the general case we may need to adjust before and after the truncating
+ // division to get a flooring division.
+ Register temp = ToRegister(instr->temp());
+ ASSERT(!temp.is(dividend) && !temp.is(result));
+ Label needs_adjustment, done;
+ __ cmp(dividend, Operand::Zero());
+ __ b(divisor > 0 ? lt : gt, &needs_adjustment);
+ __ TruncatingDiv(result, dividend, Abs(divisor));
+ if (divisor < 0) __ rsb(result, result, Operand::Zero());
+ __ jmp(&done);
+ __ bind(&needs_adjustment);
+ __ add(temp, dividend, Operand(divisor > 0 ? 1 : -1));
+ __ TruncatingDiv(result, temp, Abs(divisor));
+ if (divisor < 0) __ rsb(result, result, Operand::Zero());
+ __ sub(result, result, Operand(1));
+ __ bind(&done);
+}
+
+
+// TODO(svenpanne) Refactor this to avoid code duplication with DoDivI.
+void LCodeGen::DoFlooringDivI(LFlooringDivI* instr) {
+ HBinaryOperation* hdiv = instr->hydrogen();
+ Register left = ToRegister(instr->dividend());
+ Register right = ToRegister(instr->divisor());
+ Register result = ToRegister(instr->result());
+
+ // Check for x / 0.
+ if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) {
+ __ cmp(right, Operand::Zero());
+ DeoptimizeIf(eq, instr->environment());
+ }
+
+ // Check for (0 / -x) that will produce negative zero.
+ if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ Label positive;
+ if (!instr->hydrogen_value()->CheckFlag(HValue::kCanBeDivByZero)) {
+ // Do the test only if it hadn't be done above.
__ cmp(right, Operand::Zero());
- __ cmp(left, Operand::Zero(), mi);
- // "right" can't be null because the code would have already been
- // deoptimized. The Z flag is set only if (right < 0) and (left == 0).
- // In this case we need to deoptimize to produce a -0.
- DeoptimizeIf(eq, instr->environment());
}
+ __ b(pl, &positive);
+ __ cmp(left, Operand::Zero());
+ DeoptimizeIf(eq, instr->environment());
+ __ bind(&positive);
+ }
- Label done;
- __ sdiv(result, left, right);
- // If both operands have the same sign then we are done.
- __ eor(remainder, left, Operand(right), SetCC);
- __ b(pl, &done);
-
- // Check if the result needs to be corrected.
- __ mls(remainder, result, right, left);
- __ cmp(remainder, Operand::Zero());
- __ sub(result, result, Operand(1), LeaveCC, ne);
+ // Check for (kMinInt / -1).
+ if (hdiv->CheckFlag(HValue::kCanOverflow) &&
+ (!CpuFeatures::IsSupported(SUDIV) ||
+ !hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32))) {
+ // We don't need to check for overflow when truncating with sdiv
+ // support because, on ARM, sdiv kMinInt, -1 -> kMinInt.
+ __ cmp(left, Operand(kMinInt));
+ __ cmp(right, Operand(-1), eq);
+ DeoptimizeIf(eq, instr->environment());
+ }
- __ bind(&done);
+ if (CpuFeatures::IsSupported(SUDIV)) {
+ CpuFeatureScope scope(masm(), SUDIV);
+ __ sdiv(result, left, right);
+ } else {
+ DoubleRegister vleft = ToDoubleRegister(instr->temp());
+ DoubleRegister vright = double_scratch0();
+ __ vmov(double_scratch0().low(), left);
+ __ vcvt_f64_s32(vleft, double_scratch0().low());
+ __ vmov(double_scratch0().low(), right);
+ __ vcvt_f64_s32(vright, double_scratch0().low());
+ __ vdiv(vleft, vleft, vright); // vleft now contains the result.
+ __ vcvt_s32_f64(double_scratch0().low(), vleft);
+ __ vmov(result, double_scratch0().low());
}
+
+ Label done;
+ Register remainder = scratch0();
+ __ Mls(remainder, result, right, left);
+ __ cmp(remainder, Operand::Zero());
+ __ b(eq, &done);
+ __ eor(remainder, remainder, Operand(right));
+ __ add(result, result, Operand(remainder, ASR, 31));
+ __ bind(&done);
}
@@ -1706,7 +1712,7 @@ void LCodeGen::DoBitI(LBitI* instr) {
Register result = ToRegister(instr->result());
Operand right(no_reg);
- if (right_op->IsStackSlot() || right_op->IsArgument()) {
+ if (right_op->IsStackSlot()) {
right = Operand(EmitLoadRegister(right_op, ip));
} else {
ASSERT(right_op->IsRegister() || right_op->IsConstantOperand());
@@ -1829,7 +1835,7 @@ void LCodeGen::DoSubI(LSubI* instr) {
bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
SBit set_cond = can_overflow ? SetCC : LeaveCC;
- if (right->IsStackSlot() || right->IsArgument()) {
+ if (right->IsStackSlot()) {
Register right_reg = EmitLoadRegister(right, ip);
__ sub(ToRegister(result), ToRegister(left), Operand(right_reg), set_cond);
} else {
@@ -1850,7 +1856,7 @@ void LCodeGen::DoRSubI(LRSubI* instr) {
bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
SBit set_cond = can_overflow ? SetCC : LeaveCC;
- if (right->IsStackSlot() || right->IsArgument()) {
+ if (right->IsStackSlot()) {
Register right_reg = EmitLoadRegister(right, ip);
__ rsb(ToRegister(result), ToRegister(left), Operand(right_reg), set_cond);
} else {
@@ -1888,9 +1894,9 @@ void LCodeGen::DoConstantE(LConstantE* instr) {
void LCodeGen::DoConstantT(LConstantT* instr) {
- Handle<Object> value = instr->value(isolate());
+ Handle<Object> object = instr->value(isolate());
AllowDeferredHandleDereference smi_check;
- __ Move(ToRegister(instr->result()), value);
+ __ Move(ToRegister(instr->result()), object);
}
@@ -1901,43 +1907,6 @@ void LCodeGen::DoMapEnumLength(LMapEnumLength* instr) {
}
-void LCodeGen::DoElementsKind(LElementsKind* instr) {
- Register result = ToRegister(instr->result());
- Register input = ToRegister(instr->value());
-
- // Load map into |result|.
- __ ldr(result, FieldMemOperand(input, HeapObject::kMapOffset));
- // Load the map's "bit field 2" into |result|. We only need the first byte,
- // but the following bit field extraction takes care of that anyway.
- __ ldr(result, FieldMemOperand(result, Map::kBitField2Offset));
- // Retrieve elements_kind from bit field 2.
- __ ubfx(result, result, Map::kElementsKindShift, Map::kElementsKindBitCount);
-}
-
-
-void LCodeGen::DoValueOf(LValueOf* instr) {
- Register input = ToRegister(instr->value());
- Register result = ToRegister(instr->result());
- Register map = ToRegister(instr->temp());
- Label done;
-
- if (!instr->hydrogen()->value()->IsHeapObject()) {
- // If the object is a smi return the object.
- __ SmiTst(input);
- __ Move(result, input, eq);
- __ b(eq, &done);
- }
-
- // If the object is not a value type, return the object.
- __ CompareObjectType(input, map, map, JS_VALUE_TYPE);
- __ Move(result, input, ne);
- __ b(ne, &done);
- __ ldr(result, FieldMemOperand(input, JSValue::kValueOffset));
-
- __ bind(&done);
-}
-
-
void LCodeGen::DoDateField(LDateField* instr) {
Register object = ToRegister(instr->date());
Register result = ToRegister(instr->result());
@@ -2053,17 +2022,6 @@ void LCodeGen::DoSeqStringSetChar(LSeqStringSetChar* instr) {
}
-void LCodeGen::DoThrow(LThrow* instr) {
- __ push(ToRegister(instr->value()));
- ASSERT(ToRegister(instr->context()).is(cp));
- CallRuntime(Runtime::kThrow, 1, instr);
-
- if (FLAG_debug_code) {
- __ stop("Unreachable code.");
- }
-}
-
-
void LCodeGen::DoAddI(LAddI* instr) {
LOperand* left = instr->left();
LOperand* right = instr->right();
@@ -2071,7 +2029,7 @@ void LCodeGen::DoAddI(LAddI* instr) {
bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
SBit set_cond = can_overflow ? SetCC : LeaveCC;
- if (right->IsStackSlot() || right->IsArgument()) {
+ if (right->IsStackSlot()) {
Register right_reg = EmitLoadRegister(right, ip);
__ add(ToRegister(result), ToRegister(left), Operand(right_reg), set_cond);
} else {
@@ -2171,12 +2129,12 @@ void LCodeGen::DoArithmeticD(LArithmeticD* instr) {
break;
case Token::MOD: {
__ PrepareCallCFunction(0, 2, scratch0());
- __ SetCallCDoubleArguments(left, right);
+ __ MovToFloatParameters(left, right);
__ CallCFunction(
- ExternalReference::double_fp_operation(Token::MOD, isolate()),
+ ExternalReference::mod_two_doubles_operation(isolate()),
0, 2);
// Move the result in the double result register.
- __ GetCFunctionDoubleResult(result);
+ __ MovFromFloatResult(result);
break;
}
default:
@@ -2192,11 +2150,11 @@ void LCodeGen::DoArithmeticT(LArithmeticT* instr) {
ASSERT(ToRegister(instr->right()).is(r0));
ASSERT(ToRegister(instr->result()).is(r0));
- BinaryOpICStub stub(instr->op(), NO_OVERWRITE);
+ BinaryOpICStub stub(isolate(), instr->op(), NO_OVERWRITE);
// Block literal pool emission to ensure nop indicating no inlined smi code
// is in the correct position.
Assembler::BlockConstPoolScope block_const_pool(masm());
- CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
}
@@ -2415,7 +2373,10 @@ Condition LCodeGen::TokenToCondition(Token::Value op, bool is_unsigned) {
void LCodeGen::DoCompareNumericAndBranch(LCompareNumericAndBranch* instr) {
LOperand* left = instr->left();
LOperand* right = instr->right();
- Condition cond = TokenToCondition(instr->op(), false);
+ bool is_unsigned =
+ instr->hydrogen()->left()->CheckFlag(HInstruction::kUint32) ||
+ instr->hydrogen()->right()->CheckFlag(HInstruction::kUint32);
+ Condition cond = TokenToCondition(instr->op(), is_unsigned);
if (left->IsConstantOperand() && right->IsConstantOperand()) {
// We can statically evaluate the comparison.
@@ -2447,8 +2408,8 @@ void LCodeGen::DoCompareNumericAndBranch(LCompareNumericAndBranch* instr) {
} else {
__ cmp(ToRegister(right), Operand(value));
}
- // We transposed the operands. Reverse the condition.
- cond = ReverseCondition(cond);
+ // We commuted the operands, so commute the condition.
+ cond = CommuteCondition(cond);
} else {
__ cmp(ToRegister(left), ToRegister(right));
}
@@ -2571,7 +2532,7 @@ void LCodeGen::DoIsStringAndBranch(LIsStringAndBranch* instr) {
Register temp1 = ToRegister(instr->temp());
SmiCheck check_needed =
- instr->hydrogen()->value()->IsHeapObject()
+ instr->hydrogen()->value()->type().IsHeapObject()
? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
Condition true_cond =
EmitIsString(reg, temp1, instr->FalseLabel(chunk_), check_needed);
@@ -2591,7 +2552,7 @@ void LCodeGen::DoIsUndetectableAndBranch(LIsUndetectableAndBranch* instr) {
Register input = ToRegister(instr->value());
Register temp = ToRegister(instr->temp());
- if (!instr->hydrogen()->value()->IsHeapObject()) {
+ if (!instr->hydrogen()->value()->type().IsHeapObject()) {
__ JumpIfSmi(input, instr->FalseLabel(chunk_));
}
__ ldr(temp, FieldMemOperand(input, HeapObject::kMapOffset));
@@ -2660,7 +2621,7 @@ void LCodeGen::DoHasInstanceTypeAndBranch(LHasInstanceTypeAndBranch* instr) {
Register scratch = scratch0();
Register input = ToRegister(instr->value());
- if (!instr->hydrogen()->value()->IsHeapObject()) {
+ if (!instr->hydrogen()->value()->type().IsHeapObject()) {
__ JumpIfSmi(input, instr->FalseLabel(chunk_));
}
@@ -2787,8 +2748,8 @@ void LCodeGen::DoInstanceOf(LInstanceOf* instr) {
ASSERT(ToRegister(instr->left()).is(r0)); // Object is in r0.
ASSERT(ToRegister(instr->right()).is(r1)); // Function is in r1.
- InstanceofStub stub(InstanceofStub::kArgsInRegisters);
- CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
+ InstanceofStub stub(isolate(), InstanceofStub::kArgsInRegisters);
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
__ cmp(r0, Operand::Zero());
__ mov(r0, Operand(factory()->false_value()), LeaveCC, ne);
@@ -2820,9 +2781,6 @@ void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) {
Register temp = ToRegister(instr->temp());
Register result = ToRegister(instr->result());
- ASSERT(object.is(r0));
- ASSERT(result.is(r0));
-
// A Smi is not instance of anything.
__ JumpIfSmi(object, &false_result);
@@ -2880,9 +2838,6 @@ void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) {
void LCodeGen::DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
Label* map_check) {
- Register result = ToRegister(instr->result());
- ASSERT(result.is(r0));
-
InstanceofStub::Flags flags = InstanceofStub::kNoFlags;
flags = static_cast<InstanceofStub::Flags>(
flags | InstanceofStub::kArgsInRegisters);
@@ -2890,42 +2845,37 @@ void LCodeGen::DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
flags | InstanceofStub::kCallSiteInlineCheck);
flags = static_cast<InstanceofStub::Flags>(
flags | InstanceofStub::kReturnTrueFalseObject);
- InstanceofStub stub(flags);
+ InstanceofStub stub(isolate(), flags);
PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
LoadContextFromDeferred(instr->context());
- // Get the temp register reserved by the instruction. This needs to be r4 as
- // its slot of the pushing of safepoint registers is used to communicate the
- // offset to the location of the map check.
- Register temp = ToRegister(instr->temp());
- ASSERT(temp.is(r4));
__ Move(InstanceofStub::right(), instr->function());
- static const int kAdditionalDelta = 5;
+ static const int kAdditionalDelta = 4;
// Make sure that code size is predicable, since we use specific constants
// offsets in the code to find embedded values..
- PredictableCodeSizeScope predictable(masm_, 6 * Assembler::kInstrSize);
+ PredictableCodeSizeScope predictable(masm_, 5 * Assembler::kInstrSize);
int delta = masm_->InstructionsGeneratedSince(map_check) + kAdditionalDelta;
Label before_push_delta;
__ bind(&before_push_delta);
__ BlockConstPoolFor(kAdditionalDelta);
- __ mov(temp, Operand(delta * kPointerSize));
+ // r5 is used to communicate the offset to the location of the map check.
+ __ mov(r5, Operand(delta * kPointerSize));
// The mov above can generate one or two instructions. The delta was computed
// for two instructions, so we need to pad here in case of one instruction.
if (masm_->InstructionsGeneratedSince(&before_push_delta) != 2) {
ASSERT_EQ(1, masm_->InstructionsGeneratedSince(&before_push_delta));
__ nop();
}
- __ StoreToSafepointRegisterSlot(temp, temp);
- CallCodeGeneric(stub.GetCode(isolate()),
+ CallCodeGeneric(stub.GetCode(),
RelocInfo::CODE_TARGET,
instr,
RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
LEnvironment* env = instr->GetDeferredLazyDeoptimizationEnvironment();
safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
- // Put the result value into the result register slot and
+ // Put the result value (r0) into the result register slot and
// restore all registers.
- __ StoreToSafepointRegisterSlot(result, result);
+ __ StoreToSafepointRegisterSlot(r0, ToRegister(instr->result()));
}
@@ -2963,9 +2913,7 @@ void LCodeGen::DoReturn(LReturn* instr) {
}
int no_frame_start = -1;
if (NeedsEagerFrame()) {
- __ mov(sp, fp);
- no_frame_start = masm_->pc_offset();
- __ ldm(ia_w, sp, fp.bit() | lr.bit());
+ no_frame_start = masm_->LeaveFrame(StackFrame::JAVA_SCRIPT);
}
if (instr->has_constant_parameter_count()) {
int parameter_count = ToInteger32(instr->constant_parameter_count());
@@ -3006,10 +2954,9 @@ void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) {
ASSERT(ToRegister(instr->result()).is(r0));
__ mov(r2, Operand(instr->name()));
- RelocInfo::Mode mode = instr->for_typeof() ? RelocInfo::CODE_TARGET
- : RelocInfo::CODE_TARGET_CONTEXT;
- Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
- CallCode(ic, mode, instr);
+ ContextualMode mode = instr->for_typeof() ? NOT_CONTEXTUAL : CONTEXTUAL;
+ Handle<Code> ic = LoadIC::initialize_stub(isolate(), mode);
+ CallCode(ic, RelocInfo::CODE_TARGET, instr);
}
@@ -3038,19 +2985,6 @@ void LCodeGen::DoStoreGlobalCell(LStoreGlobalCell* instr) {
}
-void LCodeGen::DoStoreGlobalGeneric(LStoreGlobalGeneric* instr) {
- ASSERT(ToRegister(instr->context()).is(cp));
- ASSERT(ToRegister(instr->global_object()).is(r1));
- ASSERT(ToRegister(instr->value()).is(r0));
-
- __ mov(r2, Operand(instr->name()));
- Handle<Code> ic = (instr->strict_mode_flag() == kStrictMode)
- ? isolate()->builtins()->StoreIC_Initialize_Strict()
- : isolate()->builtins()->StoreIC_Initialize();
- CallCode(ic, RelocInfo::CODE_TARGET_CONTEXT, instr);
-}
-
-
void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) {
Register context = ToRegister(instr->context());
Register result = ToRegister(instr->result());
@@ -3089,7 +3023,7 @@ void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) {
__ str(value, target);
if (instr->hydrogen()->NeedsWriteBarrier()) {
SmiCheck check_needed =
- instr->hydrogen()->value()->IsHeapObject()
+ instr->hydrogen()->value()->type().IsHeapObject()
? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
__ RecordWriteContextSlot(context,
target.offset(),
@@ -3140,7 +3074,7 @@ void LCodeGen::DoLoadNamedGeneric(LLoadNamedGeneric* instr) {
// Name is always in r2.
__ mov(r2, Operand(instr->name()));
- Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
+ Handle<Code> ic = LoadIC::initialize_stub(isolate(), NOT_CONTEXTUAL);
CallCode(ic, RelocInfo::CODE_TARGET, instr, NEVER_INLINE_TARGET_ADDRESS);
}
@@ -3195,15 +3129,6 @@ void LCodeGen::DoLoadRoot(LLoadRoot* instr) {
}
-void LCodeGen::DoLoadExternalArrayPointer(
- LLoadExternalArrayPointer* instr) {
- Register to_reg = ToRegister(instr->result());
- Register from_reg = ToRegister(instr->object());
- __ ldr(to_reg, FieldMemOperand(from_reg,
- ExternalArray::kExternalPointerOffset));
-}
-
-
void LCodeGen::DoAccessArgumentsAt(LAccessArgumentsAt* instr) {
Register arguments = ToRegister(instr->arguments());
Register result = ToRegister(instr->result());
@@ -3257,53 +3182,65 @@ void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) {
int element_size_shift = ElementsKindToShiftSize(elements_kind);
int shift_size = (instr->hydrogen()->key()->representation().IsSmi())
? (element_size_shift - kSmiTagSize) : element_size_shift;
- int additional_offset = instr->additional_index() << element_size_shift;
+ int base_offset = instr->base_offset();
- if (elements_kind == EXTERNAL_FLOAT_ELEMENTS ||
- elements_kind == EXTERNAL_DOUBLE_ELEMENTS) {
+ if (elements_kind == EXTERNAL_FLOAT32_ELEMENTS ||
+ elements_kind == FLOAT32_ELEMENTS ||
+ elements_kind == EXTERNAL_FLOAT64_ELEMENTS ||
+ elements_kind == FLOAT64_ELEMENTS) {
+ int base_offset = instr->base_offset();
DwVfpRegister result = ToDoubleRegister(instr->result());
Operand operand = key_is_constant
? Operand(constant_key << element_size_shift)
: Operand(key, LSL, shift_size);
__ add(scratch0(), external_pointer, operand);
- if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
- __ vldr(double_scratch0().low(), scratch0(), additional_offset);
+ if (elements_kind == EXTERNAL_FLOAT32_ELEMENTS ||
+ elements_kind == FLOAT32_ELEMENTS) {
+ __ vldr(double_scratch0().low(), scratch0(), base_offset);
__ vcvt_f64_f32(result, double_scratch0().low());
} else { // i.e. elements_kind == EXTERNAL_DOUBLE_ELEMENTS
- __ vldr(result, scratch0(), additional_offset);
+ __ vldr(result, scratch0(), base_offset);
}
} else {
Register result = ToRegister(instr->result());
MemOperand mem_operand = PrepareKeyedOperand(
key, external_pointer, key_is_constant, constant_key,
- element_size_shift, shift_size,
- instr->additional_index(), additional_offset);
+ element_size_shift, shift_size, base_offset);
switch (elements_kind) {
- case EXTERNAL_BYTE_ELEMENTS:
+ case EXTERNAL_INT8_ELEMENTS:
+ case INT8_ELEMENTS:
__ ldrsb(result, mem_operand);
break;
- case EXTERNAL_PIXEL_ELEMENTS:
- case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
+ case EXTERNAL_UINT8_CLAMPED_ELEMENTS:
+ case EXTERNAL_UINT8_ELEMENTS:
+ case UINT8_ELEMENTS:
+ case UINT8_CLAMPED_ELEMENTS:
__ ldrb(result, mem_operand);
break;
- case EXTERNAL_SHORT_ELEMENTS:
+ case EXTERNAL_INT16_ELEMENTS:
+ case INT16_ELEMENTS:
__ ldrsh(result, mem_operand);
break;
- case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
+ case EXTERNAL_UINT16_ELEMENTS:
+ case UINT16_ELEMENTS:
__ ldrh(result, mem_operand);
break;
- case EXTERNAL_INT_ELEMENTS:
+ case EXTERNAL_INT32_ELEMENTS:
+ case INT32_ELEMENTS:
__ ldr(result, mem_operand);
break;
- case EXTERNAL_UNSIGNED_INT_ELEMENTS:
+ case EXTERNAL_UINT32_ELEMENTS:
+ case UINT32_ELEMENTS:
__ ldr(result, mem_operand);
if (!instr->hydrogen()->CheckFlag(HInstruction::kUint32)) {
__ cmp(result, Operand(0x80000000));
DeoptimizeIf(cs, instr->environment());
}
break;
- case EXTERNAL_FLOAT_ELEMENTS:
- case EXTERNAL_DOUBLE_ELEMENTS:
+ case FLOAT32_ELEMENTS:
+ case FLOAT64_ELEMENTS:
+ case EXTERNAL_FLOAT32_ELEMENTS:
+ case EXTERNAL_FLOAT64_ELEMENTS:
case FAST_HOLEY_DOUBLE_ELEMENTS:
case FAST_HOLEY_ELEMENTS:
case FAST_HOLEY_SMI_ELEMENTS:
@@ -3311,7 +3248,7 @@ void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) {
case FAST_ELEMENTS:
case FAST_SMI_ELEMENTS:
case DICTIONARY_ELEMENTS:
- case NON_STRICT_ARGUMENTS_ELEMENTS:
+ case SLOPPY_ARGUMENTS_ELEMENTS:
UNREACHABLE();
break;
}
@@ -3328,15 +3265,13 @@ void LCodeGen::DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr) {
int element_size_shift = ElementsKindToShiftSize(FAST_DOUBLE_ELEMENTS);
- int base_offset =
- FixedDoubleArray::kHeaderSize - kHeapObjectTag +
- (instr->additional_index() << element_size_shift);
+ int base_offset = instr->base_offset();
if (key_is_constant) {
int constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
if (constant_key & 0xF0000000) {
Abort(kArrayIndexConstantValueTooBig);
}
- base_offset += constant_key << element_size_shift;
+ base_offset += constant_key * kDoubleSize;
}
__ add(scratch, elements, Operand(base_offset));
@@ -3362,12 +3297,11 @@ void LCodeGen::DoLoadKeyedFixedArray(LLoadKeyed* instr) {
Register result = ToRegister(instr->result());
Register scratch = scratch0();
Register store_base = scratch;
- int offset = 0;
+ int offset = instr->base_offset();
if (instr->key()->IsConstantOperand()) {
LConstantOperand* const_operand = LConstantOperand::cast(instr->key());
- offset = FixedArray::OffsetOfElementAt(ToInteger32(const_operand) +
- instr->additional_index());
+ offset += ToInteger32(const_operand) * kPointerSize;
store_base = elements;
} else {
Register key = ToRegister(instr->key());
@@ -3380,9 +3314,8 @@ void LCodeGen::DoLoadKeyedFixedArray(LLoadKeyed* instr) {
} else {
__ add(scratch, elements, Operand(key, LSL, kPointerSizeLog2));
}
- offset = FixedArray::OffsetOfElementAt(instr->additional_index());
}
- __ ldr(result, FieldMemOperand(store_base, offset));
+ __ ldr(result, MemOperand(store_base, offset));
// Check for the hole value.
if (instr->hydrogen()->RequiresHoleCheck()) {
@@ -3399,7 +3332,7 @@ void LCodeGen::DoLoadKeyedFixedArray(LLoadKeyed* instr) {
void LCodeGen::DoLoadKeyed(LLoadKeyed* instr) {
- if (instr->is_external()) {
+ if (instr->is_typed_elements()) {
DoLoadKeyedExternalArray(instr);
} else if (instr->hydrogen()->representation().IsDouble()) {
DoLoadKeyedFixedDoubleArray(instr);
@@ -3415,19 +3348,12 @@ MemOperand LCodeGen::PrepareKeyedOperand(Register key,
int constant_key,
int element_size,
int shift_size,
- int additional_index,
- int additional_offset) {
- if (additional_index != 0 && !key_is_constant) {
- additional_index *= 1 << (element_size - shift_size);
- __ add(scratch0(), key, Operand(additional_index));
- }
-
+ int base_offset) {
if (key_is_constant) {
- return MemOperand(base,
- (constant_key << element_size) + additional_offset);
+ return MemOperand(base, (constant_key << element_size) + base_offset);
}
- if (additional_index == 0) {
+ if (base_offset == 0) {
if (shift_size >= 0) {
return MemOperand(base, key, LSL, shift_size);
} else {
@@ -3437,10 +3363,12 @@ MemOperand LCodeGen::PrepareKeyedOperand(Register key,
}
if (shift_size >= 0) {
- return MemOperand(base, scratch0(), LSL, shift_size);
+ __ add(scratch0(), base, Operand(key, LSL, shift_size));
+ return MemOperand(scratch0(), base_offset);
} else {
ASSERT_EQ(-1, shift_size);
- return MemOperand(base, scratch0(), LSR, 1);
+ __ add(scratch0(), base, Operand(key, ASR, 1));
+ return MemOperand(scratch0(), base_offset);
}
}
@@ -3509,19 +3437,21 @@ void LCodeGen::DoWrapReceiver(LWrapReceiver* instr) {
// passed unchanged to builtins and strict-mode functions.
Label global_object, result_in_receiver;
- // Do not transform the receiver to object for strict mode
- // functions.
- __ ldr(scratch,
- FieldMemOperand(function, JSFunction::kSharedFunctionInfoOffset));
- __ ldr(scratch,
- FieldMemOperand(scratch, SharedFunctionInfo::kCompilerHintsOffset));
- __ tst(scratch,
- Operand(1 << (SharedFunctionInfo::kStrictModeFunction + kSmiTagSize)));
- __ b(ne, &result_in_receiver);
+ if (!instr->hydrogen()->known_function()) {
+ // Do not transform the receiver to object for strict mode
+ // functions.
+ __ ldr(scratch,
+ FieldMemOperand(function, JSFunction::kSharedFunctionInfoOffset));
+ __ ldr(scratch,
+ FieldMemOperand(scratch, SharedFunctionInfo::kCompilerHintsOffset));
+ int mask = 1 << (SharedFunctionInfo::kStrictModeFunction + kSmiTagSize);
+ __ tst(scratch, Operand(mask));
+ __ b(ne, &result_in_receiver);
- // Do not transform the receiver to object for builtins.
- __ tst(scratch, Operand(1 << (SharedFunctionInfo::kNative + kSmiTagSize)));
- __ b(ne, &result_in_receiver);
+ // Do not transform the receiver to object for builtins.
+ __ tst(scratch, Operand(1 << (SharedFunctionInfo::kNative + kSmiTagSize)));
+ __ b(ne, &result_in_receiver);
+ }
// Normal function. Replace undefined or null with global receiver.
__ LoadRoot(scratch, Heap::kNullValueRootIndex);
@@ -3536,14 +3466,15 @@ void LCodeGen::DoWrapReceiver(LWrapReceiver* instr) {
DeoptimizeIf(eq, instr->environment());
__ CompareObjectType(receiver, scratch, scratch, FIRST_SPEC_OBJECT_TYPE);
DeoptimizeIf(lt, instr->environment());
- __ b(&result_in_receiver);
+ __ b(&result_in_receiver);
__ bind(&global_object);
-
- __ ldr(result, MemOperand(fp, StandardFrameConstants::kContextOffset));
- __ ldr(result, ContextOperand(result, Context::GLOBAL_OBJECT_INDEX));
+ __ ldr(result, FieldMemOperand(function, JSFunction::kContextOffset));
+ __ ldr(result,
+ ContextOperand(result, Context::GLOBAL_OBJECT_INDEX));
__ ldr(result,
- FieldMemOperand(result, JSGlobalObject::kGlobalReceiverOffset));
+ FieldMemOperand(result, GlobalObject::kGlobalReceiverOffset));
+
if (result.is(receiver)) {
__ bind(&result_in_receiver);
} else {
@@ -3599,8 +3530,7 @@ void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
// The number of arguments is stored in receiver which is r0, as expected
// by InvokeFunction.
ParameterCount actual(receiver);
- __ InvokeFunction(function, actual, CALL_FUNCTION,
- safepoint_generator, CALL_AS_METHOD);
+ __ InvokeFunction(function, actual, CALL_FUNCTION, safepoint_generator);
}
@@ -3638,14 +3568,6 @@ void LCodeGen::DoContext(LContext* instr) {
}
-void LCodeGen::DoOuterContext(LOuterContext* instr) {
- Register context = ToRegister(instr->context());
- Register result = ToRegister(instr->result());
- __ ldr(result,
- MemOperand(context, Context::SlotOffset(Context::PREVIOUS_INDEX)));
-}
-
-
void LCodeGen::DoDeclareGlobals(LDeclareGlobals* instr) {
ASSERT(ToRegister(instr->context()).is(cp));
__ push(cp); // The context is the first argument.
@@ -3653,21 +3575,7 @@ void LCodeGen::DoDeclareGlobals(LDeclareGlobals* instr) {
__ push(scratch0());
__ mov(scratch0(), Operand(Smi::FromInt(instr->hydrogen()->flags())));
__ push(scratch0());
- CallRuntime(Runtime::kDeclareGlobals, 3, instr);
-}
-
-
-void LCodeGen::DoGlobalObject(LGlobalObject* instr) {
- Register context = ToRegister(instr->context());
- Register result = ToRegister(instr->result());
- __ ldr(result, ContextOperand(context, Context::GLOBAL_OBJECT_INDEX));
-}
-
-
-void LCodeGen::DoGlobalReceiver(LGlobalReceiver* instr) {
- Register global = ToRegister(instr->global_object());
- Register result = ToRegister(instr->result());
- __ ldr(result, FieldMemOperand(global, GlobalObject::kGlobalReceiverOffset));
+ CallRuntime(Runtime::kHiddenDeclareGlobals, 3, instr);
}
@@ -3675,7 +3583,6 @@ void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
int formal_parameter_count,
int arity,
LInstruction* instr,
- CallKind call_kind,
R1State r1_state) {
bool dont_adapt_arguments =
formal_parameter_count == SharedFunctionInfo::kDontAdaptArgumentsSentinel;
@@ -3699,7 +3606,6 @@ void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
}
// Invoke function.
- __ SetCallKind(r5, call_kind);
__ ldr(ip, FieldMemOperand(r1, JSFunction::kCodeEntryOffset));
__ Call(ip);
@@ -3709,23 +3615,11 @@ void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
ParameterCount count(arity);
ParameterCount expected(formal_parameter_count);
- __ InvokeFunction(
- function, expected, count, CALL_FUNCTION, generator, call_kind);
+ __ InvokeFunction(function, expected, count, CALL_FUNCTION, generator);
}
}
-void LCodeGen::DoCallConstantFunction(LCallConstantFunction* instr) {
- ASSERT(ToRegister(instr->result()).is(r0));
- CallKnownFunction(instr->hydrogen()->function(),
- instr->hydrogen()->formal_parameter_count(),
- instr->arity(),
- instr,
- CALL_AS_METHOD,
- R1_UNINITIALIZED);
-}
-
-
void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LMathAbs* instr) {
ASSERT(instr->context() != NULL);
ASSERT(ToRegister(instr->context()).is(cp));
@@ -3772,7 +3666,7 @@ void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LMathAbs* instr) {
// Slow case: Call the runtime system to do the number allocation.
__ bind(&slow);
- CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, instr,
+ CallRuntimeFromDeferred(Runtime::kHiddenAllocateHeapNumber, 0, instr,
instr->context());
// Set the pointer to the new heap number in tmp.
if (!tmp1.is(r0)) __ mov(tmp1, Operand(r0));
@@ -3943,7 +3837,7 @@ void LCodeGen::DoPower(LPower* instr) {
ASSERT(ToDoubleRegister(instr->result()).is(d2));
if (exponent_type.IsSmi()) {
- MathPowStub stub(MathPowStub::TAGGED);
+ MathPowStub stub(isolate(), MathPowStub::TAGGED);
__ CallStub(&stub);
} else if (exponent_type.IsTagged()) {
Label no_deopt;
@@ -3953,14 +3847,14 @@ void LCodeGen::DoPower(LPower* instr) {
__ cmp(r6, Operand(ip));
DeoptimizeIf(ne, instr->environment());
__ bind(&no_deopt);
- MathPowStub stub(MathPowStub::TAGGED);
+ MathPowStub stub(isolate(), MathPowStub::TAGGED);
__ CallStub(&stub);
} else if (exponent_type.IsInteger32()) {
- MathPowStub stub(MathPowStub::INTEGER);
+ MathPowStub stub(isolate(), MathPowStub::INTEGER);
__ CallStub(&stub);
} else {
ASSERT(exponent_type.IsDouble());
- MathPowStub stub(MathPowStub::DOUBLE);
+ MathPowStub stub(isolate(), MathPowStub::DOUBLE);
__ CallStub(&stub);
}
}
@@ -3981,46 +3875,18 @@ void LCodeGen::DoMathExp(LMathExp* instr) {
void LCodeGen::DoMathLog(LMathLog* instr) {
- ASSERT(ToDoubleRegister(instr->result()).is(d2));
- // Set the context register to a GC-safe fake value. Clobbering it is
- // OK because this instruction is marked as a call.
- __ mov(cp, Operand::Zero());
- TranscendentalCacheStub stub(TranscendentalCache::LOG,
- TranscendentalCacheStub::UNTAGGED);
- CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
-}
-
-
-void LCodeGen::DoMathTan(LMathTan* instr) {
- ASSERT(ToDoubleRegister(instr->result()).is(d2));
- // Set the context register to a GC-safe fake value. Clobbering it is
- // OK because this instruction is marked as a call.
- __ mov(cp, Operand::Zero());
- TranscendentalCacheStub stub(TranscendentalCache::TAN,
- TranscendentalCacheStub::UNTAGGED);
- CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
+ __ PrepareCallCFunction(0, 1, scratch0());
+ __ MovToFloatParameter(ToDoubleRegister(instr->value()));
+ __ CallCFunction(ExternalReference::math_log_double_function(isolate()),
+ 0, 1);
+ __ MovFromFloatResult(ToDoubleRegister(instr->result()));
}
-void LCodeGen::DoMathCos(LMathCos* instr) {
- ASSERT(ToDoubleRegister(instr->result()).is(d2));
- // Set the context register to a GC-safe fake value. Clobbering it is
- // OK because this instruction is marked as a call.
- __ mov(cp, Operand::Zero());
- TranscendentalCacheStub stub(TranscendentalCache::COS,
- TranscendentalCacheStub::UNTAGGED);
- CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
-}
-
-
-void LCodeGen::DoMathSin(LMathSin* instr) {
- ASSERT(ToDoubleRegister(instr->result()).is(d2));
- // Set the context register to a GC-safe fake value. Clobbering it is
- // OK because this instruction is marked as a call.
- __ mov(cp, Operand::Zero());
- TranscendentalCacheStub stub(TranscendentalCache::SIN,
- TranscendentalCacheStub::UNTAGGED);
- CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
+void LCodeGen::DoMathClz32(LMathClz32* instr) {
+ Register input = ToRegister(instr->value());
+ Register result = ToRegister(instr->result());
+ __ clz(result, input);
}
@@ -4034,79 +3900,69 @@ void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) {
LPointerMap* pointers = instr->pointer_map();
SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
ParameterCount count(instr->arity());
- __ InvokeFunction(r1, count, CALL_FUNCTION, generator, CALL_AS_METHOD);
+ __ InvokeFunction(r1, count, CALL_FUNCTION, generator);
} else {
CallKnownFunction(known_function,
instr->hydrogen()->formal_parameter_count(),
instr->arity(),
instr,
- CALL_AS_METHOD,
R1_CONTAINS_TARGET);
}
}
-void LCodeGen::DoCallKeyed(LCallKeyed* instr) {
- ASSERT(ToRegister(instr->context()).is(cp));
+void LCodeGen::DoCallWithDescriptor(LCallWithDescriptor* instr) {
ASSERT(ToRegister(instr->result()).is(r0));
- int arity = instr->arity();
- Handle<Code> ic =
- isolate()->stub_cache()->ComputeKeyedCallInitialize(arity);
- CallCode(ic, RelocInfo::CODE_TARGET, instr, NEVER_INLINE_TARGET_ADDRESS);
+ LPointerMap* pointers = instr->pointer_map();
+ SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
+
+ if (instr->target()->IsConstantOperand()) {
+ LConstantOperand* target = LConstantOperand::cast(instr->target());
+ Handle<Code> code = Handle<Code>::cast(ToHandle(target));
+ generator.BeforeCall(__ CallSize(code, RelocInfo::CODE_TARGET));
+ PlatformCallInterfaceDescriptor* call_descriptor =
+ instr->descriptor()->platform_specific_descriptor();
+ __ Call(code, RelocInfo::CODE_TARGET, TypeFeedbackId::None(), al,
+ call_descriptor->storage_mode());
+ } else {
+ ASSERT(instr->target()->IsRegister());
+ Register target = ToRegister(instr->target());
+ generator.BeforeCall(__ CallSize(target));
+ __ add(target, target, Operand(Code::kHeaderSize - kHeapObjectTag));
+ __ Call(target);
+ }
+ generator.AfterCall();
}
-void LCodeGen::DoCallNamed(LCallNamed* instr) {
- ASSERT(ToRegister(instr->context()).is(cp));
+void LCodeGen::DoCallJSFunction(LCallJSFunction* instr) {
+ ASSERT(ToRegister(instr->function()).is(r1));
ASSERT(ToRegister(instr->result()).is(r0));
- int arity = instr->arity();
- RelocInfo::Mode mode = RelocInfo::CODE_TARGET;
- Handle<Code> ic =
- isolate()->stub_cache()->ComputeCallInitialize(arity, mode);
- __ mov(r2, Operand(instr->name()));
- CallCode(ic, mode, instr, NEVER_INLINE_TARGET_ADDRESS);
-}
+ if (instr->hydrogen()->pass_argument_count()) {
+ __ mov(r0, Operand(instr->arity()));
+ }
+ // Change context.
+ __ ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset));
-void LCodeGen::DoCallFunction(LCallFunction* instr) {
- ASSERT(ToRegister(instr->context()).is(cp));
- ASSERT(ToRegister(instr->function()).is(r1));
- ASSERT(ToRegister(instr->result()).is(r0));
+ // Load the code entry address
+ __ ldr(ip, FieldMemOperand(r1, JSFunction::kCodeEntryOffset));
+ __ Call(ip);
- int arity = instr->arity();
- CallFunctionStub stub(arity, NO_CALL_FUNCTION_FLAGS);
- if (instr->hydrogen()->IsTailCall()) {
- if (NeedsEagerFrame()) __ mov(sp, fp);
- __ Jump(stub.GetCode(isolate()), RelocInfo::CODE_TARGET);
- } else {
- CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
- }
+ RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
}
-void LCodeGen::DoCallGlobal(LCallGlobal* instr) {
+void LCodeGen::DoCallFunction(LCallFunction* instr) {
ASSERT(ToRegister(instr->context()).is(cp));
+ ASSERT(ToRegister(instr->function()).is(r1));
ASSERT(ToRegister(instr->result()).is(r0));
int arity = instr->arity();
- RelocInfo::Mode mode = RelocInfo::CODE_TARGET_CONTEXT;
- Handle<Code> ic =
- isolate()->stub_cache()->ComputeCallInitialize(arity, mode);
- __ mov(r2, Operand(instr->name()));
- CallCode(ic, mode, instr, NEVER_INLINE_TARGET_ADDRESS);
-}
-
-
-void LCodeGen::DoCallKnownGlobal(LCallKnownGlobal* instr) {
- ASSERT(ToRegister(instr->result()).is(r0));
- CallKnownFunction(instr->hydrogen()->target(),
- instr->hydrogen()->formal_parameter_count(),
- instr->arity(),
- instr,
- CALL_AS_FUNCTION,
- R1_UNINITIALIZED);
+ CallFunctionStub stub(isolate(), arity, instr->hydrogen()->function_flags());
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
}
@@ -4117,10 +3973,9 @@ void LCodeGen::DoCallNew(LCallNew* instr) {
__ mov(r0, Operand(instr->arity()));
// No cell in r2 for construct type feedback in optimized code
- Handle<Object> undefined_value(isolate()->factory()->undefined_value());
- __ mov(r2, Operand(undefined_value));
- CallConstructStub stub(NO_CALL_FUNCTION_FLAGS);
- CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr);
+ __ LoadRoot(r2, Heap::kUndefinedValueRootIndex);
+ CallConstructStub stub(isolate(), NO_CALL_CONSTRUCTOR_FLAGS);
+ CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
}
@@ -4130,17 +3985,16 @@ void LCodeGen::DoCallNewArray(LCallNewArray* instr) {
ASSERT(ToRegister(instr->result()).is(r0));
__ mov(r0, Operand(instr->arity()));
- __ mov(r2, Operand(instr->hydrogen()->property_cell()));
+ __ LoadRoot(r2, Heap::kUndefinedValueRootIndex);
ElementsKind kind = instr->hydrogen()->elements_kind();
AllocationSiteOverrideMode override_mode =
(AllocationSite::GetMode(kind) == TRACK_ALLOCATION_SITE)
? DISABLE_ALLOCATION_SITES
: DONT_OVERRIDE;
- ContextCheckMode context_mode = CONTEXT_CHECK_NOT_REQUIRED;
if (instr->arity() == 0) {
- ArrayNoArgumentConstructorStub stub(kind, context_mode, override_mode);
- CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr);
+ ArrayNoArgumentConstructorStub stub(isolate(), kind, override_mode);
+ CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
} else if (instr->arity() == 1) {
Label done;
if (IsFastPackedElementsKind(kind)) {
@@ -4152,19 +4006,20 @@ void LCodeGen::DoCallNewArray(LCallNewArray* instr) {
__ b(eq, &packed_case);
ElementsKind holey_kind = GetHoleyElementsKind(kind);
- ArraySingleArgumentConstructorStub stub(holey_kind, context_mode,
+ ArraySingleArgumentConstructorStub stub(isolate(),
+ holey_kind,
override_mode);
- CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr);
+ CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
__ jmp(&done);
__ bind(&packed_case);
}
- ArraySingleArgumentConstructorStub stub(kind, context_mode, override_mode);
- CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr);
+ ArraySingleArgumentConstructorStub stub(isolate(), kind, override_mode);
+ CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
__ bind(&done);
} else {
- ArrayNArgumentsConstructorStub stub(kind, context_mode, override_mode);
- CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr);
+ ArrayNArgumentsConstructorStub stub(isolate(), kind, override_mode);
+ CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
}
}
@@ -4211,46 +4066,38 @@ void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
return;
}
- Handle<Map> transition = instr->transition();
+ __ AssertNotSmi(object);
- if (FLAG_track_heap_object_fields && representation.IsHeapObject()) {
- Register value = ToRegister(instr->value());
- if (!instr->hydrogen()->value()->type().IsHeapObject()) {
- __ SmiTst(value);
- DeoptimizeIf(eq, instr->environment());
- }
- } else if (FLAG_track_double_fields && representation.IsDouble()) {
- ASSERT(transition.is_null());
+ ASSERT(!representation.IsSmi() ||
+ !instr->value()->IsConstantOperand() ||
+ IsSmi(LConstantOperand::cast(instr->value())));
+ if (representation.IsDouble()) {
ASSERT(access.IsInobject());
+ ASSERT(!instr->hydrogen()->has_transition());
ASSERT(!instr->hydrogen()->NeedsWriteBarrier());
DwVfpRegister value = ToDoubleRegister(instr->value());
__ vstr(value, FieldMemOperand(object, offset));
return;
}
- if (!transition.is_null()) {
+ if (instr->hydrogen()->has_transition()) {
+ Handle<Map> transition = instr->hydrogen()->transition_map();
+ AddDeprecationDependency(transition);
__ mov(scratch, Operand(transition));
__ str(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
if (instr->hydrogen()->NeedsWriteBarrierForMap()) {
Register temp = ToRegister(instr->temp());
// Update the write barrier for the map field.
- __ RecordWriteField(object,
- HeapObject::kMapOffset,
- scratch,
- temp,
- GetLinkRegisterState(),
- kSaveFPRegs,
- OMIT_REMEMBERED_SET,
- OMIT_SMI_CHECK);
+ __ RecordWriteForMap(object,
+ scratch,
+ temp,
+ GetLinkRegisterState(),
+ kSaveFPRegs);
}
}
// Do the store.
Register value = ToRegister(instr->value());
- ASSERT(!object.is(value));
- SmiCheck check_needed =
- instr->hydrogen()->value()->IsHeapObject()
- ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
if (access.IsInobject()) {
MemOperand operand = FieldMemOperand(object, offset);
__ Store(value, operand, representation);
@@ -4263,7 +4110,8 @@ void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
GetLinkRegisterState(),
kSaveFPRegs,
EMIT_REMEMBERED_SET,
- check_needed);
+ instr->hydrogen()->SmiCheckForWriteBarrier(),
+ instr->hydrogen()->PointersToHereCheckForValue());
}
} else {
__ ldr(scratch, FieldMemOperand(object, JSObject::kPropertiesOffset));
@@ -4279,7 +4127,8 @@ void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
GetLinkRegisterState(),
kSaveFPRegs,
EMIT_REMEMBERED_SET,
- check_needed);
+ instr->hydrogen()->SmiCheckForWriteBarrier(),
+ instr->hydrogen()->PointersToHereCheckForValue());
}
}
}
@@ -4292,45 +4141,34 @@ void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) {
// Name is always in r2.
__ mov(r2, Operand(instr->name()));
- Handle<Code> ic = (instr->strict_mode_flag() == kStrictMode)
- ? isolate()->builtins()->StoreIC_Initialize_Strict()
- : isolate()->builtins()->StoreIC_Initialize();
+ Handle<Code> ic = StoreIC::initialize_stub(isolate(), instr->strict_mode());
CallCode(ic, RelocInfo::CODE_TARGET, instr, NEVER_INLINE_TARGET_ADDRESS);
}
-void LCodeGen::ApplyCheckIf(Condition condition, LBoundsCheck* check) {
- if (FLAG_debug_code && check->hydrogen()->skip_check()) {
+void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) {
+ Condition cc = instr->hydrogen()->allow_equality() ? hi : hs;
+ if (instr->index()->IsConstantOperand()) {
+ Operand index = ToOperand(instr->index());
+ Register length = ToRegister(instr->length());
+ __ cmp(length, index);
+ cc = CommuteCondition(cc);
+ } else {
+ Register index = ToRegister(instr->index());
+ Operand length = ToOperand(instr->length());
+ __ cmp(index, length);
+ }
+ if (FLAG_debug_code && instr->hydrogen()->skip_check()) {
Label done;
- __ b(NegateCondition(condition), &done);
+ __ b(NegateCondition(cc), &done);
__ stop("eliminated bounds check failed");
__ bind(&done);
} else {
- DeoptimizeIf(condition, check->environment());
+ DeoptimizeIf(cc, instr->environment());
}
}
-void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) {
- if (instr->hydrogen()->skip_check()) return;
-
- if (instr->index()->IsConstantOperand()) {
- int constant_index =
- ToInteger32(LConstantOperand::cast(instr->index()));
- if (instr->hydrogen()->length()->representation().IsSmi()) {
- __ mov(ip, Operand(Smi::FromInt(constant_index)));
- } else {
- __ mov(ip, Operand(constant_index));
- }
- __ cmp(ip, ToRegister(instr->length()));
- } else {
- __ cmp(ToRegister(instr->index()), ToRegister(instr->length()));
- }
- Condition condition = instr->hydrogen()->allow_equality() ? hi : hs;
- ApplyCheckIf(condition, instr);
-}
-
-
void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) {
Register external_pointer = ToRegister(instr->elements());
Register key = no_reg;
@@ -4348,10 +4186,12 @@ void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) {
int element_size_shift = ElementsKindToShiftSize(elements_kind);
int shift_size = (instr->hydrogen()->key()->representation().IsSmi())
? (element_size_shift - kSmiTagSize) : element_size_shift;
- int additional_offset = instr->additional_index() << element_size_shift;
+ int base_offset = instr->base_offset();
- if (elements_kind == EXTERNAL_FLOAT_ELEMENTS ||
- elements_kind == EXTERNAL_DOUBLE_ELEMENTS) {
+ if (elements_kind == EXTERNAL_FLOAT32_ELEMENTS ||
+ elements_kind == FLOAT32_ELEMENTS ||
+ elements_kind == EXTERNAL_FLOAT64_ELEMENTS ||
+ elements_kind == FLOAT64_ELEMENTS) {
Register address = scratch0();
DwVfpRegister value(ToDoubleRegister(instr->value()));
if (key_is_constant) {
@@ -4364,34 +4204,44 @@ void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) {
} else {
__ add(address, external_pointer, Operand(key, LSL, shift_size));
}
- if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
+ if (elements_kind == EXTERNAL_FLOAT32_ELEMENTS ||
+ elements_kind == FLOAT32_ELEMENTS) {
__ vcvt_f32_f64(double_scratch0().low(), value);
- __ vstr(double_scratch0().low(), address, additional_offset);
- } else { // i.e. elements_kind == EXTERNAL_DOUBLE_ELEMENTS
- __ vstr(value, address, additional_offset);
+ __ vstr(double_scratch0().low(), address, base_offset);
+ } else { // Storing doubles, not floats.
+ __ vstr(value, address, base_offset);
}
} else {
Register value(ToRegister(instr->value()));
MemOperand mem_operand = PrepareKeyedOperand(
key, external_pointer, key_is_constant, constant_key,
element_size_shift, shift_size,
- instr->additional_index(), additional_offset);
+ base_offset);
switch (elements_kind) {
- case EXTERNAL_PIXEL_ELEMENTS:
- case EXTERNAL_BYTE_ELEMENTS:
- case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
+ case EXTERNAL_UINT8_CLAMPED_ELEMENTS:
+ case EXTERNAL_INT8_ELEMENTS:
+ case EXTERNAL_UINT8_ELEMENTS:
+ case UINT8_ELEMENTS:
+ case UINT8_CLAMPED_ELEMENTS:
+ case INT8_ELEMENTS:
__ strb(value, mem_operand);
break;
- case EXTERNAL_SHORT_ELEMENTS:
- case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
+ case EXTERNAL_INT16_ELEMENTS:
+ case EXTERNAL_UINT16_ELEMENTS:
+ case INT16_ELEMENTS:
+ case UINT16_ELEMENTS:
__ strh(value, mem_operand);
break;
- case EXTERNAL_INT_ELEMENTS:
- case EXTERNAL_UNSIGNED_INT_ELEMENTS:
+ case EXTERNAL_INT32_ELEMENTS:
+ case EXTERNAL_UINT32_ELEMENTS:
+ case INT32_ELEMENTS:
+ case UINT32_ELEMENTS:
__ str(value, mem_operand);
break;
- case EXTERNAL_FLOAT_ELEMENTS:
- case EXTERNAL_DOUBLE_ELEMENTS:
+ case FLOAT32_ELEMENTS:
+ case FLOAT64_ELEMENTS:
+ case EXTERNAL_FLOAT32_ELEMENTS:
+ case EXTERNAL_FLOAT64_ELEMENTS:
case FAST_DOUBLE_ELEMENTS:
case FAST_ELEMENTS:
case FAST_SMI_ELEMENTS:
@@ -4399,7 +4249,7 @@ void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) {
case FAST_HOLEY_ELEMENTS:
case FAST_HOLEY_SMI_ELEMENTS:
case DICTIONARY_ELEMENTS:
- case NON_STRICT_ARGUMENTS_ELEMENTS:
+ case SLOPPY_ARGUMENTS_ELEMENTS:
UNREACHABLE();
break;
}
@@ -4413,6 +4263,7 @@ void LCodeGen::DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr) {
Register scratch = scratch0();
DwVfpRegister double_scratch = double_scratch0();
bool key_is_constant = instr->key()->IsConstantOperand();
+ int base_offset = instr->base_offset();
// Calculate the effective address of the slot in the array to store the
// double value.
@@ -4423,13 +4274,11 @@ void LCodeGen::DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr) {
Abort(kArrayIndexConstantValueTooBig);
}
__ add(scratch, elements,
- Operand((constant_key << element_size_shift) +
- FixedDoubleArray::kHeaderSize - kHeapObjectTag));
+ Operand((constant_key << element_size_shift) + base_offset));
} else {
int shift_size = (instr->hydrogen()->key()->representation().IsSmi())
? (element_size_shift - kSmiTagSize) : element_size_shift;
- __ add(scratch, elements,
- Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag));
+ __ add(scratch, elements, Operand(base_offset));
__ add(scratch, scratch,
Operand(ToRegister(instr->key()), LSL, shift_size));
}
@@ -4442,10 +4291,9 @@ void LCodeGen::DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr) {
__ Assert(ne, kDefaultNaNModeNotSet);
}
__ VFPCanonicalizeNaN(double_scratch, value);
- __ vstr(double_scratch, scratch,
- instr->additional_index() << element_size_shift);
+ __ vstr(double_scratch, scratch, 0);
} else {
- __ vstr(value, scratch, instr->additional_index() << element_size_shift);
+ __ vstr(value, scratch, 0);
}
}
@@ -4457,14 +4305,13 @@ void LCodeGen::DoStoreKeyedFixedArray(LStoreKeyed* instr) {
: no_reg;
Register scratch = scratch0();
Register store_base = scratch;
- int offset = 0;
+ int offset = instr->base_offset();
// Do the store.
if (instr->key()->IsConstantOperand()) {
ASSERT(!instr->hydrogen()->NeedsWriteBarrier());
LConstantOperand* const_operand = LConstantOperand::cast(instr->key());
- offset = FixedArray::OffsetOfElementAt(ToInteger32(const_operand) +
- instr->additional_index());
+ offset += ToInteger32(const_operand) * kPointerSize;
store_base = elements;
} else {
// Even though the HLoadKeyed instruction forces the input
@@ -4476,30 +4323,30 @@ void LCodeGen::DoStoreKeyedFixedArray(LStoreKeyed* instr) {
} else {
__ add(scratch, elements, Operand(key, LSL, kPointerSizeLog2));
}
- offset = FixedArray::OffsetOfElementAt(instr->additional_index());
}
- __ str(value, FieldMemOperand(store_base, offset));
+ __ str(value, MemOperand(store_base, offset));
if (instr->hydrogen()->NeedsWriteBarrier()) {
SmiCheck check_needed =
- instr->hydrogen()->value()->IsHeapObject()
+ instr->hydrogen()->value()->type().IsHeapObject()
? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
// Compute address of modified element and store it into key register.
- __ add(key, store_base, Operand(offset - kHeapObjectTag));
+ __ add(key, store_base, Operand(offset));
__ RecordWrite(elements,
key,
value,
GetLinkRegisterState(),
kSaveFPRegs,
EMIT_REMEMBERED_SET,
- check_needed);
+ check_needed,
+ instr->hydrogen()->PointersToHereCheckForValue());
}
}
void LCodeGen::DoStoreKeyed(LStoreKeyed* instr) {
// By cases: external, fast double
- if (instr->is_external()) {
+ if (instr->is_typed_elements()) {
DoStoreKeyedExternalArray(instr);
} else if (instr->hydrogen()->value()->representation().IsDouble()) {
DoStoreKeyedFixedDoubleArray(instr);
@@ -4515,7 +4362,7 @@ void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) {
ASSERT(ToRegister(instr->key()).is(r1));
ASSERT(ToRegister(instr->value()).is(r0));
- Handle<Code> ic = (instr->strict_mode_flag() == kStrictMode)
+ Handle<Code> ic = instr->strict_mode() == STRICT
? isolate()->builtins()->KeyedStoreIC_Initialize_Strict()
: isolate()->builtins()->KeyedStoreIC_Initialize();
CallCode(ic, RelocInfo::CODE_TARGET, instr, NEVER_INLINE_TARGET_ADDRESS);
@@ -4541,18 +4388,22 @@ void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) {
__ mov(new_map_reg, Operand(to_map));
__ str(new_map_reg, FieldMemOperand(object_reg, HeapObject::kMapOffset));
// Write barrier.
- __ RecordWriteField(object_reg, HeapObject::kMapOffset, new_map_reg,
- scratch, GetLinkRegisterState(), kDontSaveFPRegs);
+ __ RecordWriteForMap(object_reg,
+ new_map_reg,
+ scratch,
+ GetLinkRegisterState(),
+ kDontSaveFPRegs);
} else {
ASSERT(ToRegister(instr->context()).is(cp));
+ ASSERT(object_reg.is(r0));
PushSafepointRegistersScope scope(
this, Safepoint::kWithRegistersAndDoubles);
- __ Move(r0, object_reg);
__ Move(r1, to_map);
- TransitionElementsKindStub stub(from_kind, to_kind);
+ bool is_js_array = from_map->instance_type() == JS_ARRAY_TYPE;
+ TransitionElementsKindStub stub(isolate(), from_kind, to_kind, is_js_array);
__ CallStub(&stub);
RecordSafepointWithRegistersAndDoubles(
- instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
+ instr->pointer_map(), 0, Safepoint::kLazyDeopt);
}
__ bind(&not_applicable);
}
@@ -4570,18 +4421,12 @@ void LCodeGen::DoTrapAllocationMemento(LTrapAllocationMemento* instr) {
void LCodeGen::DoStringAdd(LStringAdd* instr) {
ASSERT(ToRegister(instr->context()).is(cp));
- if (FLAG_new_string_add) {
- ASSERT(ToRegister(instr->left()).is(r1));
- ASSERT(ToRegister(instr->right()).is(r0));
- NewStringAddStub stub(instr->hydrogen()->flags(),
- isolate()->heap()->GetPretenureMode());
- CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
- } else {
- __ push(ToRegister(instr->left()));
- __ push(ToRegister(instr->right()));
- StringAddStub stub(instr->hydrogen()->flags());
- CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
- }
+ ASSERT(ToRegister(instr->left()).is(r1));
+ ASSERT(ToRegister(instr->right()).is(r0));
+ StringAddStub stub(isolate(),
+ instr->hydrogen()->flags(),
+ instr->hydrogen()->pretenure_flag());
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
}
@@ -4633,7 +4478,7 @@ void LCodeGen::DoDeferredStringCharCodeAt(LStringCharCodeAt* instr) {
__ SmiTag(index);
__ push(index);
}
- CallRuntimeFromDeferred(Runtime::kStringCharCodeAt, 2, instr,
+ CallRuntimeFromDeferred(Runtime::kHiddenStringCharCodeAt, 2, instr,
instr->context());
__ AssertSmi(r0);
__ SmiUntag(r0);
@@ -4708,20 +4553,6 @@ void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) {
}
-void LCodeGen::DoInteger32ToSmi(LInteger32ToSmi* instr) {
- LOperand* input = instr->value();
- LOperand* output = instr->result();
- ASSERT(output->IsRegister());
- if (!instr->hydrogen()->value()->HasRange() ||
- !instr->hydrogen()->value()->range()->IsInSmiRange()) {
- __ SmiTag(ToRegister(output), ToRegister(input), SetCC);
- DeoptimizeIf(vs, instr->environment());
- } else {
- __ SmiTag(ToRegister(output), ToRegister(input));
- }
-}
-
-
void LCodeGen::DoUint32ToDouble(LUint32ToDouble* instr) {
LOperand* input = instr->value();
LOperand* output = instr->result();
@@ -4732,27 +4563,17 @@ void LCodeGen::DoUint32ToDouble(LUint32ToDouble* instr) {
}
-void LCodeGen::DoUint32ToSmi(LUint32ToSmi* instr) {
- LOperand* input = instr->value();
- LOperand* output = instr->result();
- if (!instr->hydrogen()->value()->HasRange() ||
- !instr->hydrogen()->value()->range()->IsInSmiRange()) {
- __ tst(ToRegister(input), Operand(0xc0000000));
- DeoptimizeIf(ne, instr->environment());
- }
- __ SmiTag(ToRegister(output), ToRegister(input));
-}
-
-
void LCodeGen::DoNumberTagI(LNumberTagI* instr) {
class DeferredNumberTagI V8_FINAL : public LDeferredCode {
public:
DeferredNumberTagI(LCodeGen* codegen, LNumberTagI* instr)
: LDeferredCode(codegen), instr_(instr) { }
virtual void Generate() V8_OVERRIDE {
- codegen()->DoDeferredNumberTagI(instr_,
- instr_->value(),
- SIGNED_INT32);
+ codegen()->DoDeferredNumberTagIU(instr_,
+ instr_->value(),
+ instr_->temp1(),
+ instr_->temp2(),
+ SIGNED_INT32);
}
virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
private:
@@ -4775,9 +4596,11 @@ void LCodeGen::DoNumberTagU(LNumberTagU* instr) {
DeferredNumberTagU(LCodeGen* codegen, LNumberTagU* instr)
: LDeferredCode(codegen), instr_(instr) { }
virtual void Generate() V8_OVERRIDE {
- codegen()->DoDeferredNumberTagI(instr_,
- instr_->value(),
- UNSIGNED_INT32);
+ codegen()->DoDeferredNumberTagIU(instr_,
+ instr_->value(),
+ instr_->temp1(),
+ instr_->temp2(),
+ UNSIGNED_INT32);
}
virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
private:
@@ -4795,18 +4618,19 @@ void LCodeGen::DoNumberTagU(LNumberTagU* instr) {
}
-void LCodeGen::DoDeferredNumberTagI(LInstruction* instr,
- LOperand* value,
- IntegerSignedness signedness) {
- Label slow;
+void LCodeGen::DoDeferredNumberTagIU(LInstruction* instr,
+ LOperand* value,
+ LOperand* temp1,
+ LOperand* temp2,
+ IntegerSignedness signedness) {
+ Label done, slow;
Register src = ToRegister(value);
Register dst = ToRegister(instr->result());
+ Register tmp1 = scratch0();
+ Register tmp2 = ToRegister(temp1);
+ Register tmp3 = ToRegister(temp2);
LowDwVfpRegister dbl_scratch = double_scratch0();
- // Preserve the value of all registers.
- PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
-
- Label done;
if (signedness == SIGNED_INT32) {
// There was overflow, so bits 30 and 31 of the original integer
// disagree. Try to allocate a heap number in new space and store
@@ -4823,38 +4647,40 @@ void LCodeGen::DoDeferredNumberTagI(LInstruction* instr,
}
if (FLAG_inline_new) {
- __ LoadRoot(scratch0(), Heap::kHeapNumberMapRootIndex);
- __ AllocateHeapNumber(r5, r3, r4, scratch0(), &slow, DONT_TAG_RESULT);
- __ Move(dst, r5);
+ __ LoadRoot(tmp3, Heap::kHeapNumberMapRootIndex);
+ __ AllocateHeapNumber(dst, tmp1, tmp2, tmp3, &slow, DONT_TAG_RESULT);
__ b(&done);
}
// Slow case: Call the runtime system to do the number allocation.
__ bind(&slow);
+ {
+ // TODO(3095996): Put a valid pointer value in the stack slot where the
+ // result register is stored, as this register is in the pointer map, but
+ // contains an integer value.
+ __ mov(dst, Operand::Zero());
- // TODO(3095996): Put a valid pointer value in the stack slot where the result
- // register is stored, as this register is in the pointer map, but contains an
- // integer value.
- __ mov(ip, Operand::Zero());
- __ StoreToSafepointRegisterSlot(ip, dst);
- // NumberTagI and NumberTagD use the context from the frame, rather than
- // the environment's HContext or HInlinedContext value.
- // They only call Runtime::kAllocateHeapNumber.
- // The corresponding HChange instructions are added in a phase that does
- // not have easy access to the local context.
- __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
- __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
- RecordSafepointWithRegisters(
- instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
- __ Move(dst, r0);
- __ sub(dst, dst, Operand(kHeapObjectTag));
+ // Preserve the value of all registers.
+ PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
+
+ // NumberTagI and NumberTagD use the context from the frame, rather than
+ // the environment's HContext or HInlinedContext value.
+ // They only call Runtime::kHiddenAllocateHeapNumber.
+ // The corresponding HChange instructions are added in a phase that does
+ // not have easy access to the local context.
+ __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ __ CallRuntimeSaveDoubles(Runtime::kHiddenAllocateHeapNumber);
+ RecordSafepointWithRegisters(
+ instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
+ __ sub(r0, r0, Operand(kHeapObjectTag));
+ __ StoreToSafepointRegisterSlot(r0, dst);
+ }
// Done. Put the value in dbl_scratch into the value of the allocated heap
// number.
__ bind(&done);
__ vstr(dbl_scratch, dst, HeapNumber::kValueOffset);
__ add(dst, dst, Operand(kHeapObjectTag));
- __ StoreToSafepointRegisterSlot(dst, dst);
}
@@ -4903,11 +4729,11 @@ void LCodeGen::DoDeferredNumberTagD(LNumberTagD* instr) {
PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
// NumberTagI and NumberTagD use the context from the frame, rather than
// the environment's HContext or HInlinedContext value.
- // They only call Runtime::kAllocateHeapNumber.
+ // They only call Runtime::kHiddenAllocateHeapNumber.
// The corresponding HChange instructions are added in a phase that does
// not have easy access to the local context.
__ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
- __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
+ __ CallRuntimeSaveDoubles(Runtime::kHiddenAllocateHeapNumber);
RecordSafepointWithRegisters(
instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
__ sub(r0, r0, Operand(kHeapObjectTag));
@@ -4916,8 +4742,21 @@ void LCodeGen::DoDeferredNumberTagD(LNumberTagD* instr) {
void LCodeGen::DoSmiTag(LSmiTag* instr) {
- ASSERT(!instr->hydrogen_value()->CheckFlag(HValue::kCanOverflow));
- __ SmiTag(ToRegister(instr->result()), ToRegister(instr->value()));
+ HChange* hchange = instr->hydrogen();
+ Register input = ToRegister(instr->value());
+ Register output = ToRegister(instr->result());
+ if (hchange->CheckFlag(HValue::kCanOverflow) &&
+ hchange->value()->CheckFlag(HValue::kUint32)) {
+ __ tst(input, Operand(0xc0000000));
+ DeoptimizeIf(ne, instr->environment());
+ }
+ if (hchange->CheckFlag(HValue::kCanOverflow) &&
+ !hchange->value()->CheckFlag(HValue::kUint32)) {
+ __ SmiTag(output, input, SetCC);
+ DeoptimizeIf(vs, instr->environment());
+ } else {
+ __ SmiTag(output, input);
+ }
}
@@ -5181,7 +5020,7 @@ void LCodeGen::DoCheckSmi(LCheckSmi* instr) {
void LCodeGen::DoCheckNonSmi(LCheckNonSmi* instr) {
- if (!instr->hydrogen()->value()->IsHeapObject()) {
+ if (!instr->hydrogen()->value()->type().IsHeapObject()) {
LOperand* input = instr->value();
__ SmiTst(ToRegister(input));
DeoptimizeIf(eq, instr->environment());
@@ -5254,7 +5093,7 @@ void LCodeGen::DoDeferredInstanceMigration(LCheckMaps* instr, Register object) {
PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
__ push(object);
__ mov(cp, Operand::Zero());
- __ CallRuntimeSaveDoubles(Runtime::kMigrateInstance);
+ __ CallRuntimeSaveDoubles(Runtime::kTryMigrateInstance);
RecordSafepointWithRegisters(
instr->pointer_map(), 1, Safepoint::kNoLazyDeopt);
__ StoreToSafepointRegisterSlot(r0, scratch0());
@@ -5282,7 +5121,14 @@ void LCodeGen::DoCheckMaps(LCheckMaps* instr) {
Register object_;
};
- if (instr->hydrogen()->CanOmitMapChecks()) return;
+ if (instr->hydrogen()->IsStabilityCheck()) {
+ const UniqueSet<Map>* maps = instr->hydrogen()->maps();
+ for (int i = 0; i < maps->size(); ++i) {
+ AddStabilityDependency(maps->at(i).handle());
+ }
+ return;
+ }
+
Register map_reg = scratch0();
LOperand* input = instr->value();
@@ -5292,22 +5138,22 @@ void LCodeGen::DoCheckMaps(LCheckMaps* instr) {
__ ldr(map_reg, FieldMemOperand(reg, HeapObject::kMapOffset));
DeferredCheckMaps* deferred = NULL;
- if (instr->hydrogen()->has_migration_target()) {
+ if (instr->hydrogen()->HasMigrationTarget()) {
deferred = new(zone()) DeferredCheckMaps(this, instr, reg);
__ bind(deferred->check_maps());
}
- UniqueSet<Map> map_set = instr->hydrogen()->map_set();
+ const UniqueSet<Map>* maps = instr->hydrogen()->maps();
Label success;
- for (int i = 0; i < map_set.size() - 1; i++) {
- Handle<Map> map = map_set.at(i).handle();
+ for (int i = 0; i < maps->size() - 1; i++) {
+ Handle<Map> map = maps->at(i).handle();
__ CompareMap(map_reg, map, &success);
__ b(eq, &success);
}
- Handle<Map> map = map_set.at(map_set.size() - 1).handle();
+ Handle<Map> map = maps->at(maps->size() - 1).handle();
__ CompareMap(map_reg, map, &success);
- if (instr->hydrogen()->has_migration_target()) {
+ if (instr->hydrogen()->HasMigrationTarget()) {
__ b(ne, deferred->entry());
} else {
DeoptimizeIf(ne, instr->environment());
@@ -5367,6 +5213,26 @@ void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) {
}
+void LCodeGen::DoDoubleBits(LDoubleBits* instr) {
+ DwVfpRegister value_reg = ToDoubleRegister(instr->value());
+ Register result_reg = ToRegister(instr->result());
+ if (instr->hydrogen()->bits() == HDoubleBits::HIGH) {
+ __ VmovHigh(result_reg, value_reg);
+ } else {
+ __ VmovLow(result_reg, value_reg);
+ }
+}
+
+
+void LCodeGen::DoConstructDouble(LConstructDouble* instr) {
+ Register hi_reg = ToRegister(instr->hi());
+ Register lo_reg = ToRegister(instr->lo());
+ DwVfpRegister result_reg = ToDoubleRegister(instr->result());
+ __ VmovHigh(result_reg, hi_reg);
+ __ VmovLow(result_reg, lo_reg);
+}
+
+
void LCodeGen::DoAllocate(LAllocate* instr) {
class DeferredAllocate V8_FINAL : public LDeferredCode {
public:
@@ -5457,7 +5323,13 @@ void LCodeGen::DoDeferredAllocate(LAllocate* instr) {
__ push(size);
} else {
int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
- __ Push(Smi::FromInt(size));
+ if (size >= 0 && size <= Smi::kMaxValue) {
+ __ Push(Smi::FromInt(size));
+ } else {
+ // We should never get here at runtime => abort
+ __ stop("invalid allocation size");
+ return;
+ }
}
int flags = AllocateDoubleAlignFlag::encode(
@@ -5475,7 +5347,7 @@ void LCodeGen::DoDeferredAllocate(LAllocate* instr) {
__ Push(Smi::FromInt(flags));
CallRuntimeFromDeferred(
- Runtime::kAllocateInTargetSpace, 2, instr, instr->context());
+ Runtime::kHiddenAllocateInTargetSpace, 2, instr, instr->context());
__ StoreToSafepointRegisterSlot(r0, result);
}
@@ -5509,7 +5381,7 @@ void LCodeGen::DoRegExpLiteral(LRegExpLiteral* instr) {
__ mov(r4, Operand(instr->hydrogen()->pattern()));
__ mov(r3, Operand(instr->hydrogen()->flags()));
__ Push(r6, r5, r4, r3);
- CallRuntime(Runtime::kMaterializeRegExpLiteral, 4, instr);
+ CallRuntime(Runtime::kHiddenMaterializeRegExpLiteral, 4, instr);
__ mov(r1, r0);
__ bind(&materialized);
@@ -5522,7 +5394,7 @@ void LCodeGen::DoRegExpLiteral(LRegExpLiteral* instr) {
__ bind(&runtime_allocate);
__ mov(r0, Operand(Smi::FromInt(size)));
__ Push(r1, r0);
- CallRuntime(Runtime::kAllocateInNewSpace, 1, instr);
+ CallRuntime(Runtime::kHiddenAllocateInNewSpace, 1, instr);
__ pop(r1);
__ bind(&allocated);
@@ -5537,16 +5409,17 @@ void LCodeGen::DoFunctionLiteral(LFunctionLiteral* instr) {
// space for nested functions that don't need literals cloning.
bool pretenure = instr->hydrogen()->pretenure();
if (!pretenure && instr->hydrogen()->has_no_literals()) {
- FastNewClosureStub stub(instr->hydrogen()->language_mode(),
+ FastNewClosureStub stub(isolate(),
+ instr->hydrogen()->strict_mode(),
instr->hydrogen()->is_generator());
__ mov(r2, Operand(instr->hydrogen()->shared_info()));
- CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
} else {
__ mov(r2, Operand(instr->hydrogen()->shared_info()));
__ mov(r1, Operand(pretenure ? factory()->true_value()
: factory()->false_value()));
__ Push(cp, r2, r1);
- CallRuntime(Runtime::kNewClosure, 3, instr);
+ CallRuntime(Runtime::kHiddenNewClosure, 3, instr);
}
}
@@ -5577,13 +5450,14 @@ Condition LCodeGen::EmitTypeofIs(Label* true_label,
Handle<String> type_name) {
Condition final_branch_condition = kNoCondition;
Register scratch = scratch0();
- if (type_name->Equals(heap()->number_string())) {
+ Factory* factory = isolate()->factory();
+ if (String::Equals(type_name, factory->number_string())) {
__ JumpIfSmi(input, true_label);
__ ldr(scratch, FieldMemOperand(input, HeapObject::kMapOffset));
__ CompareRoot(scratch, Heap::kHeapNumberMapRootIndex);
final_branch_condition = eq;
- } else if (type_name->Equals(heap()->string_string())) {
+ } else if (String::Equals(type_name, factory->string_string())) {
__ JumpIfSmi(input, false_label);
__ CompareObjectType(input, scratch, no_reg, FIRST_NONSTRING_TYPE);
__ b(ge, false_label);
@@ -5591,22 +5465,23 @@ Condition LCodeGen::EmitTypeofIs(Label* true_label,
__ tst(scratch, Operand(1 << Map::kIsUndetectable));
final_branch_condition = eq;
- } else if (type_name->Equals(heap()->symbol_string())) {
+ } else if (String::Equals(type_name, factory->symbol_string())) {
__ JumpIfSmi(input, false_label);
__ CompareObjectType(input, scratch, no_reg, SYMBOL_TYPE);
final_branch_condition = eq;
- } else if (type_name->Equals(heap()->boolean_string())) {
+ } else if (String::Equals(type_name, factory->boolean_string())) {
__ CompareRoot(input, Heap::kTrueValueRootIndex);
__ b(eq, true_label);
__ CompareRoot(input, Heap::kFalseValueRootIndex);
final_branch_condition = eq;
- } else if (FLAG_harmony_typeof && type_name->Equals(heap()->null_string())) {
+ } else if (FLAG_harmony_typeof &&
+ String::Equals(type_name, factory->null_string())) {
__ CompareRoot(input, Heap::kNullValueRootIndex);
final_branch_condition = eq;
- } else if (type_name->Equals(heap()->undefined_string())) {
+ } else if (String::Equals(type_name, factory->undefined_string())) {
__ CompareRoot(input, Heap::kUndefinedValueRootIndex);
__ b(eq, true_label);
__ JumpIfSmi(input, false_label);
@@ -5616,7 +5491,7 @@ Condition LCodeGen::EmitTypeofIs(Label* true_label,
__ tst(scratch, Operand(1 << Map::kIsUndetectable));
final_branch_condition = ne;
- } else if (type_name->Equals(heap()->function_string())) {
+ } else if (String::Equals(type_name, factory->function_string())) {
STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
Register type_reg = scratch;
__ JumpIfSmi(input, false_label);
@@ -5625,7 +5500,7 @@ Condition LCodeGen::EmitTypeofIs(Label* true_label,
__ cmp(type_reg, Operand(JS_FUNCTION_PROXY_TYPE));
final_branch_condition = eq;
- } else if (type_name->Equals(heap()->object_string())) {
+ } else if (String::Equals(type_name, factory->object_string())) {
Register map = scratch;
__ JumpIfSmi(input, false_label);
if (!FLAG_harmony_typeof) {
@@ -5664,39 +5539,37 @@ void LCodeGen::EmitIsConstructCall(Register temp1, Register temp2) {
__ ldr(temp1, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
// Skip the arguments adaptor frame if it exists.
- Label check_frame_marker;
__ ldr(temp2, MemOperand(temp1, StandardFrameConstants::kContextOffset));
__ cmp(temp2, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
- __ b(ne, &check_frame_marker);
- __ ldr(temp1, MemOperand(temp1, StandardFrameConstants::kCallerFPOffset));
+ __ ldr(temp1, MemOperand(temp1, StandardFrameConstants::kCallerFPOffset), eq);
// Check the marker in the calling frame.
- __ bind(&check_frame_marker);
__ ldr(temp1, MemOperand(temp1, StandardFrameConstants::kMarkerOffset));
__ cmp(temp1, Operand(Smi::FromInt(StackFrame::CONSTRUCT)));
}
void LCodeGen::EnsureSpaceForLazyDeopt(int space_needed) {
- if (info()->IsStub()) return;
- // Ensure that we have enough space after the previous lazy-bailout
- // instruction for patching the code here.
- int current_pc = masm()->pc_offset();
- if (current_pc < last_lazy_deopt_pc_ + space_needed) {
- // Block literal pool emission for duration of padding.
- Assembler::BlockConstPoolScope block_const_pool(masm());
- int padding_size = last_lazy_deopt_pc_ + space_needed - current_pc;
- ASSERT_EQ(0, padding_size % Assembler::kInstrSize);
- while (padding_size > 0) {
- __ nop();
- padding_size -= Assembler::kInstrSize;
+ if (!info()->IsStub()) {
+ // Ensure that we have enough space after the previous lazy-bailout
+ // instruction for patching the code here.
+ int current_pc = masm()->pc_offset();
+ if (current_pc < last_lazy_deopt_pc_ + space_needed) {
+ // Block literal pool emission for duration of padding.
+ Assembler::BlockConstPoolScope block_const_pool(masm());
+ int padding_size = last_lazy_deopt_pc_ + space_needed - current_pc;
+ ASSERT_EQ(0, padding_size % Assembler::kInstrSize);
+ while (padding_size > 0) {
+ __ nop();
+ padding_size -= Assembler::kInstrSize;
+ }
}
}
+ last_lazy_deopt_pc_ = masm()->pc_offset();
}
void LCodeGen::DoLazyBailout(LLazyBailout* instr) {
- EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
last_lazy_deopt_pc_ = masm()->pc_offset();
ASSERT(instr->HasEnvironment());
LEnvironment* env = instr->environment();
@@ -5733,7 +5606,7 @@ void LCodeGen::DoDummyUse(LDummyUse* instr) {
void LCodeGen::DoDeferredStackCheck(LStackCheck* instr) {
PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
LoadContextFromDeferred(instr->context());
- __ CallRuntimeSaveDoubles(Runtime::kStackGuard);
+ __ CallRuntimeSaveDoubles(Runtime::kHiddenStackGuard);
RecordSafepointWithLazyDeopt(
instr, RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
ASSERT(instr->HasEnvironment());
@@ -5765,17 +5638,13 @@ void LCodeGen::DoStackCheck(LStackCheck* instr) {
__ LoadRoot(ip, Heap::kStackLimitRootIndex);
__ cmp(sp, Operand(ip));
__ b(hs, &done);
- PredictableCodeSizeScope predictable(masm_, 2 * Assembler::kInstrSize);
+ Handle<Code> stack_check = isolate()->builtins()->StackCheck();
+ PredictableCodeSizeScope predictable(masm(),
+ CallCodeSize(stack_check, RelocInfo::CODE_TARGET));
ASSERT(instr->context()->IsRegister());
ASSERT(ToRegister(instr->context()).is(cp));
- CallCode(isolate()->builtins()->StackCheck(),
- RelocInfo::CODE_TARGET,
- instr);
- EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
- last_lazy_deopt_pc_ = masm()->pc_offset();
+ CallCode(stack_check, RelocInfo::CODE_TARGET, instr);
__ bind(&done);
- RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
- safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
} else {
ASSERT(instr->hydrogen()->is_backwards_branch());
// Perform stack overflow check if this goto needs it before jumping.
@@ -5785,7 +5654,6 @@ void LCodeGen::DoStackCheck(LStackCheck* instr) {
__ cmp(sp, Operand(ip));
__ b(lo, deferred_stack_check->entry());
EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
- last_lazy_deopt_pc_ = masm()->pc_offset();
__ bind(instr->done_label());
deferred_stack_check->SetExit(instr->done_label());
RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
@@ -5879,13 +5747,61 @@ void LCodeGen::DoCheckMapValue(LCheckMapValue* instr) {
}
+void LCodeGen::DoDeferredLoadMutableDouble(LLoadFieldByIndex* instr,
+ Register result,
+ Register object,
+ Register index) {
+ PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
+ __ Push(object);
+ __ Push(index);
+ __ mov(cp, Operand::Zero());
+ __ CallRuntimeSaveDoubles(Runtime::kLoadMutableDouble);
+ RecordSafepointWithRegisters(
+ instr->pointer_map(), 2, Safepoint::kNoLazyDeopt);
+ __ StoreToSafepointRegisterSlot(r0, result);
+}
+
+
void LCodeGen::DoLoadFieldByIndex(LLoadFieldByIndex* instr) {
+ class DeferredLoadMutableDouble V8_FINAL : public LDeferredCode {
+ public:
+ DeferredLoadMutableDouble(LCodeGen* codegen,
+ LLoadFieldByIndex* instr,
+ Register result,
+ Register object,
+ Register index)
+ : LDeferredCode(codegen),
+ instr_(instr),
+ result_(result),
+ object_(object),
+ index_(index) {
+ }
+ virtual void Generate() V8_OVERRIDE {
+ codegen()->DoDeferredLoadMutableDouble(instr_, result_, object_, index_);
+ }
+ virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
+ private:
+ LLoadFieldByIndex* instr_;
+ Register result_;
+ Register object_;
+ Register index_;
+ };
+
Register object = ToRegister(instr->object());
Register index = ToRegister(instr->index());
Register result = ToRegister(instr->result());
Register scratch = scratch0();
+ DeferredLoadMutableDouble* deferred;
+ deferred = new(zone()) DeferredLoadMutableDouble(
+ this, instr, result, object, index);
+
Label out_of_object, done;
+
+ __ tst(index, Operand(Smi::FromInt(1)));
+ __ b(ne, deferred->entry());
+ __ mov(index, Operand(index, ASR, 1));
+
__ cmp(index, Operand::Zero());
__ b(lt, &out_of_object);
@@ -5901,10 +5817,26 @@ void LCodeGen::DoLoadFieldByIndex(LLoadFieldByIndex* instr) {
__ sub(scratch, result, Operand::PointerOffsetFromSmiKey(index));
__ ldr(result, FieldMemOperand(scratch,
FixedArray::kHeaderSize - kPointerSize));
+ __ bind(deferred->exit());
__ bind(&done);
}
+void LCodeGen::DoStoreFrameContext(LStoreFrameContext* instr) {
+ Register context = ToRegister(instr->context());
+ __ str(context, MemOperand(fp, StandardFrameConstants::kContextOffset));
+}
+
+
+void LCodeGen::DoAllocateBlockContext(LAllocateBlockContext* instr) {
+ Handle<ScopeInfo> scope_info = instr->scope_info();
+ __ Push(scope_info);
+ __ push(ToRegister(instr->function()));
+ CallRuntime(Runtime::kHiddenPushBlockContext, 2, instr);
+ RecordSafepoint(Safepoint::kNoLazyDeopt);
+}
+
+
#undef __
} } // namespace v8::internal
diff --git a/chromium/v8/src/arm/lithium-codegen-arm.h b/chromium/v8/src/arm/lithium-codegen-arm.h
index de27a36fdcb..b20b3f28f8b 100644
--- a/chromium/v8/src/arm/lithium-codegen-arm.h
+++ b/chromium/v8/src/arm/lithium-codegen-arm.h
@@ -1,41 +1,18 @@
// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
#ifndef V8_ARM_LITHIUM_CODEGEN_ARM_H_
#define V8_ARM_LITHIUM_CODEGEN_ARM_H_
-#include "arm/lithium-arm.h"
+#include "src/arm/lithium-arm.h"
-#include "arm/lithium-gap-resolver-arm.h"
-#include "deoptimizer.h"
-#include "lithium-codegen.h"
-#include "safepoint-table.h"
-#include "scopes.h"
-#include "v8utils.h"
+#include "src/arm/lithium-gap-resolver-arm.h"
+#include "src/deoptimizer.h"
+#include "src/lithium-codegen.h"
+#include "src/safepoint-table.h"
+#include "src/scopes.h"
+#include "src/utils.h"
namespace v8 {
namespace internal {
@@ -126,9 +103,11 @@ class LCodeGen: public LCodeGenBase {
void DoDeferredNumberTagD(LNumberTagD* instr);
enum IntegerSignedness { SIGNED_INT32, UNSIGNED_INT32 };
- void DoDeferredNumberTagI(LInstruction* instr,
- LOperand* value,
- IntegerSignedness signedness);
+ void DoDeferredNumberTagIU(LInstruction* instr,
+ LOperand* value,
+ LOperand* temp1,
+ LOperand* temp2,
+ IntegerSignedness signedness);
void DoDeferredTaggedToI(LTaggedToI* instr);
void DoDeferredMathAbsTaggedHeapNumber(LMathAbs* instr);
@@ -139,6 +118,10 @@ class LCodeGen: public LCodeGenBase {
void DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
Label* map_check);
void DoDeferredInstanceMigration(LCheckMaps* instr, Register object);
+ void DoDeferredLoadMutableDouble(LLoadFieldByIndex* instr,
+ Register result,
+ Register object,
+ Register index);
// Parallel move support.
void DoParallelMove(LParallelMove* move);
@@ -150,8 +133,7 @@ class LCodeGen: public LCodeGenBase {
int constant_key,
int element_size,
int shift_size,
- int additional_index,
- int additional_offset);
+ int base_offset);
// Emit frame translation commands for an environment.
void WriteTranslation(LEnvironment* environment, Translation* translation);
@@ -162,9 +144,7 @@ class LCodeGen: public LCodeGenBase {
#undef DECLARE_DO
private:
- StrictModeFlag strict_mode_flag() const {
- return info()->is_classic_mode() ? kNonStrictMode : kStrictMode;
- }
+ StrictMode strict_mode() const { return info()->strict_mode(); }
Scope* scope() const { return scope_; }
@@ -182,8 +162,6 @@ class LCodeGen: public LCodeGenBase {
int GetStackSlotCount() const { return chunk()->spill_slot_count(); }
- void Abort(BailoutReason reason);
-
void AddDeferredCode(LDeferredCode* code) { deferred_.Add(code, zone()); }
void SaveCallerDoubles();
@@ -205,6 +183,8 @@ class LCodeGen: public LCodeGenBase {
RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS
};
+ int CallCodeSize(Handle<Code> code, RelocInfo::Mode mode);
+
void CallCode(
Handle<Code> code,
RelocInfo::Mode mode,
@@ -247,7 +227,6 @@ class LCodeGen: public LCodeGenBase {
int formal_parameter_count,
int arity,
LInstruction* instr,
- CallKind call_kind,
R1State r1_state);
void RecordSafepointWithLazyDeopt(LInstruction* instr,
@@ -259,7 +238,6 @@ class LCodeGen: public LCodeGenBase {
LEnvironment* environment,
Deoptimizer::BailoutType bailout_type);
void DeoptimizeIf(Condition condition, LEnvironment* environment);
- void ApplyCheckIf(Condition condition, LBoundsCheck* check);
void AddToTranslation(LEnvironment* environment,
Translation* translation,
@@ -268,7 +246,6 @@ class LCodeGen: public LCodeGenBase {
bool is_uint32,
int* object_index_pointer,
int* dematerialized_index_pointer);
- void RegisterDependentCodeForEmbeddedMaps(Handle<Code> code);
void PopulateDeoptimizationData(Handle<Code> code);
int DefineDeoptimizationLiteral(Handle<Object> literal);
@@ -350,17 +327,6 @@ class LCodeGen: public LCodeGenBase {
int* offset,
AllocationSiteMode mode);
- // Emit optimized code for integer division.
- // Inputs are signed.
- // All registers are clobbered.
- // If 'remainder' is no_reg, it is not computed.
- void EmitSignedIntegerDivisionByConstant(Register result,
- Register dividend,
- int32_t divisor,
- Register remainder,
- Register scratch,
- LEnvironment* environment);
-
void EnsureSpaceForLazyDeopt(int space_needed) V8_OVERRIDE;
void DoLoadKeyedExternalArray(LLoadKeyed* instr);
void DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr);
diff --git a/chromium/v8/src/arm/lithium-gap-resolver-arm.cc b/chromium/v8/src/arm/lithium-gap-resolver-arm.cc
index 0c6b2adadfd..8ca235a7e34 100644
--- a/chromium/v8/src/arm/lithium-gap-resolver-arm.cc
+++ b/chromium/v8/src/arm/lithium-gap-resolver-arm.cc
@@ -1,43 +1,31 @@
// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#include "arm/lithium-gap-resolver-arm.h"
-#include "arm/lithium-codegen-arm.h"
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#include "src/arm/lithium-gap-resolver-arm.h"
+#include "src/arm/lithium-codegen-arm.h"
namespace v8 {
namespace internal {
-static const Register kSavedValueRegister = { 9 };
+// We use the root register to spill a value while breaking a cycle in parallel
+// moves. We don't need access to roots while resolving the move list and using
+// the root register has two advantages:
+// - It is not in crankshaft allocatable registers list, so it can't interfere
+// with any of the moves we are resolving.
+// - We don't need to push it on the stack, as we can reload it with its value
+// once we have resolved a cycle.
+#define kSavedValueRegister kRootRegister
+
LGapResolver::LGapResolver(LCodeGen* owner)
: cgen_(owner), moves_(32, owner->zone()), root_index_(0), in_cycle_(false),
- saved_destination_(NULL) { }
+ saved_destination_(NULL), need_to_restore_root_(false) { }
+
+
+#define __ ACCESS_MASM(cgen_->masm())
void LGapResolver::Resolve(LParallelMove* parallel_move) {
@@ -67,6 +55,12 @@ void LGapResolver::Resolve(LParallelMove* parallel_move) {
}
}
+ if (need_to_restore_root_) {
+ ASSERT(kSavedValueRegister.is(kRootRegister));
+ __ InitializeRootRegister();
+ need_to_restore_root_ = false;
+ }
+
moves_.Rewind(0);
}
@@ -155,20 +149,21 @@ void LGapResolver::Verify() {
#endif
}
-#define __ ACCESS_MASM(cgen_->masm())
void LGapResolver::BreakCycle(int index) {
- // We save in a register the value that should end up in the source of
- // moves_[root_index]. After performing all moves in the tree rooted
- // in that move, we save the value to that source.
+ // We save in a register the source of that move and we remember its
+ // destination. Then we mark this move as resolved so the cycle is
+ // broken and we can perform the other moves.
ASSERT(moves_[index].destination()->Equals(moves_[root_index_].source()));
ASSERT(!in_cycle_);
in_cycle_ = true;
LOperand* source = moves_[index].source();
saved_destination_ = moves_[index].destination();
if (source->IsRegister()) {
+ need_to_restore_root_ = true;
__ mov(kSavedValueRegister, cgen_->ToRegister(source));
} else if (source->IsStackSlot()) {
+ need_to_restore_root_ = true;
__ ldr(kSavedValueRegister, cgen_->ToMemOperand(source));
} else if (source->IsDoubleRegister()) {
__ vmov(kScratchDoubleReg, cgen_->ToDoubleRegister(source));
@@ -186,7 +181,6 @@ void LGapResolver::RestoreValue() {
ASSERT(in_cycle_);
ASSERT(saved_destination_ != NULL);
- // Spilled value is in kSavedValueRegister or kSavedDoubleValueRegister.
if (saved_destination_->IsRegister()) {
__ mov(cgen_->ToRegister(saved_destination_), kSavedValueRegister);
} else if (saved_destination_->IsStackSlot()) {
@@ -226,20 +220,15 @@ void LGapResolver::EmitMove(int index) {
} else {
ASSERT(destination->IsStackSlot());
MemOperand destination_operand = cgen_->ToMemOperand(destination);
- if (in_cycle_) {
- if (!destination_operand.OffsetIsUint12Encodable()) {
- // ip is overwritten while saving the value to the destination.
- // Therefore we can't use ip. It is OK if the read from the source
- // destroys ip, since that happens before the value is read.
- __ vldr(kScratchDoubleReg.low(), source_operand);
- __ vstr(kScratchDoubleReg.low(), destination_operand);
- } else {
- __ ldr(ip, source_operand);
- __ str(ip, destination_operand);
- }
+ if (!destination_operand.OffsetIsUint12Encodable()) {
+ // ip is overwritten while saving the value to the destination.
+ // Therefore we can't use ip. It is OK if the read from the source
+ // destroys ip, since that happens before the value is read.
+ __ vldr(kScratchDoubleReg.low(), source_operand);
+ __ vstr(kScratchDoubleReg.low(), destination_operand);
} else {
- __ ldr(kSavedValueRegister, source_operand);
- __ str(kSavedValueRegister, destination_operand);
+ __ ldr(ip, source_operand);
+ __ str(ip, destination_operand);
}
}
@@ -261,14 +250,14 @@ void LGapResolver::EmitMove(int index) {
} else {
ASSERT(destination->IsStackSlot());
ASSERT(!in_cycle_); // Constant moves happen after all cycles are gone.
+ need_to_restore_root_ = true;
Representation r = cgen_->IsSmi(constant_source)
? Representation::Smi() : Representation::Integer32();
if (cgen_->IsInteger32(constant_source)) {
__ mov(kSavedValueRegister,
Operand(cgen_->ToRepresentation(constant_source, r)));
} else {
- __ Move(kSavedValueRegister,
- cgen_->ToHandle(constant_source));
+ __ Move(kSavedValueRegister, cgen_->ToHandle(constant_source));
}
__ str(kSavedValueRegister, cgen_->ToMemOperand(destination));
}
@@ -290,16 +279,11 @@ void LGapResolver::EmitMove(int index) {
ASSERT(destination->IsDoubleStackSlot());
MemOperand destination_operand = cgen_->ToMemOperand(destination);
if (in_cycle_) {
- // kSavedDoubleValueRegister was used to break the cycle,
- // but kSavedValueRegister is free.
- MemOperand source_high_operand =
- cgen_->ToHighMemOperand(source);
- MemOperand destination_high_operand =
- cgen_->ToHighMemOperand(destination);
- __ ldr(kSavedValueRegister, source_operand);
- __ str(kSavedValueRegister, destination_operand);
- __ ldr(kSavedValueRegister, source_high_operand);
- __ str(kSavedValueRegister, destination_high_operand);
+ // kScratchDoubleReg was used to break the cycle.
+ __ vstm(db_w, sp, kScratchDoubleReg, kScratchDoubleReg);
+ __ vldr(kScratchDoubleReg, source_operand);
+ __ vstr(kScratchDoubleReg, destination_operand);
+ __ vldm(ia_w, sp, kScratchDoubleReg, kScratchDoubleReg);
} else {
__ vldr(kScratchDoubleReg, source_operand);
__ vstr(kScratchDoubleReg, destination_operand);
diff --git a/chromium/v8/src/arm/lithium-gap-resolver-arm.h b/chromium/v8/src/arm/lithium-gap-resolver-arm.h
index 044c2864a42..909ea643980 100644
--- a/chromium/v8/src/arm/lithium-gap-resolver-arm.h
+++ b/chromium/v8/src/arm/lithium-gap-resolver-arm.h
@@ -1,36 +1,13 @@
// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
#ifndef V8_ARM_LITHIUM_GAP_RESOLVER_ARM_H_
#define V8_ARM_LITHIUM_GAP_RESOLVER_ARM_H_
-#include "v8.h"
+#include "src/v8.h"
-#include "lithium.h"
+#include "src/lithium.h"
namespace v8 {
namespace internal {
@@ -76,6 +53,10 @@ class LGapResolver V8_FINAL BASE_EMBEDDED {
int root_index_;
bool in_cycle_;
LOperand* saved_destination_;
+
+ // We use the root register as a scratch in a few places. When that happens,
+ // this flag is set to indicate that it needs to be restored.
+ bool need_to_restore_root_;
};
} } // namespace v8::internal
diff --git a/chromium/v8/src/arm/macro-assembler-arm.cc b/chromium/v8/src/arm/macro-assembler-arm.cc
index 5f6076b41df..c34a7f75b68 100644
--- a/chromium/v8/src/arm/macro-assembler-arm.cc
+++ b/chromium/v8/src/arm/macro-assembler-arm.cc
@@ -1,42 +1,19 @@
// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
#include <limits.h> // For LONG_MIN, LONG_MAX.
-#include "v8.h"
+#include "src/v8.h"
#if V8_TARGET_ARCH_ARM
-#include "bootstrapper.h"
-#include "codegen.h"
-#include "cpu-profiler.h"
-#include "debug.h"
-#include "isolate-inl.h"
-#include "runtime.h"
+#include "src/bootstrapper.h"
+#include "src/codegen.h"
+#include "src/cpu-profiler.h"
+#include "src/debug.h"
+#include "src/isolate-inl.h"
+#include "src/runtime.h"
namespace v8 {
namespace internal {
@@ -59,8 +36,8 @@ void MacroAssembler::Jump(Register target, Condition cond) {
void MacroAssembler::Jump(intptr_t target, RelocInfo::Mode rmode,
Condition cond) {
- mov(ip, Operand(target, rmode));
- bx(ip, cond);
+ ASSERT(RelocInfo::IsCodeTarget(rmode));
+ mov(pc, Operand(target, rmode), LeaveCC, cond);
}
@@ -107,8 +84,16 @@ int MacroAssembler::CallSize(
}
-int MacroAssembler::CallSizeNotPredictableCodeSize(
- Address target, RelocInfo::Mode rmode, Condition cond) {
+int MacroAssembler::CallStubSize(
+ CodeStub* stub, TypeFeedbackId ast_id, Condition cond) {
+ return CallSize(stub->GetCode(), RelocInfo::CODE_TARGET, ast_id, cond);
+}
+
+
+int MacroAssembler::CallSizeNotPredictableCodeSize(Isolate* isolate,
+ Address target,
+ RelocInfo::Mode rmode,
+ Condition cond) {
int size = 2 * kInstrSize;
Instr mov_instr = cond | MOV | LeaveCC;
intptr_t immediate = reinterpret_cast<intptr_t>(target);
@@ -133,6 +118,12 @@ void MacroAssembler::Call(Address target,
set_predictable_code_size(true);
}
+#ifdef DEBUG
+ // Check the expected size before generating code to ensure we assume the same
+ // constant pool availability (e.g., whether constant pool is full or not).
+ int expected_size = CallSize(target, rmode, cond);
+#endif
+
// Call sequence on V7 or later may be :
// movw ip, #... @ call address low 16
// movt ip, #... @ call address high 16
@@ -153,7 +144,7 @@ void MacroAssembler::Call(Address target,
mov(ip, Operand(reinterpret_cast<int32_t>(target), rmode));
blx(ip, cond);
- ASSERT_EQ(CallSize(target, rmode, cond), SizeOfCodeGeneratedSince(&start));
+ ASSERT_EQ(expected_size, SizeOfCodeGeneratedSince(&start));
if (mode == NEVER_INLINE_TARGET_ADDRESS) {
set_predictable_code_size(old_predictable_code_size);
}
@@ -263,6 +254,19 @@ void MacroAssembler::Move(DwVfpRegister dst, DwVfpRegister src) {
}
+void MacroAssembler::Mls(Register dst, Register src1, Register src2,
+ Register srcA, Condition cond) {
+ if (CpuFeatures::IsSupported(MLS)) {
+ CpuFeatureScope scope(this, MLS);
+ mls(dst, src1, src2, srcA, cond);
+ } else {
+ ASSERT(!dst.is(srcA));
+ mul(ip, src1, src2, LeaveCC, cond);
+ sub(dst, srcA, ip, LeaveCC, cond);
+ }
+}
+
+
void MacroAssembler::And(Register dst, Register src1, const Operand& src2,
Condition cond) {
if (!src2.is_reg() &&
@@ -410,6 +414,11 @@ void MacroAssembler::Store(Register src,
} else if (r.IsInteger16() || r.IsUInteger16()) {
strh(src, dst);
} else {
+ if (r.IsHeapObject()) {
+ AssertNotSmi(src);
+ } else if (r.IsSmi()) {
+ AssertSmi(src);
+ }
str(src, dst);
}
}
@@ -457,7 +466,8 @@ void MacroAssembler::RecordWriteField(
LinkRegisterStatus lr_status,
SaveFPRegsMode save_fp,
RememberedSetAction remembered_set_action,
- SmiCheck smi_check) {
+ SmiCheck smi_check,
+ PointersToHereCheck pointers_to_here_check_for_value) {
// First, check if a write barrier is even needed. The tests below
// catch stores of Smis.
Label done;
@@ -486,7 +496,8 @@ void MacroAssembler::RecordWriteField(
lr_status,
save_fp,
remembered_set_action,
- OMIT_SMI_CHECK);
+ OMIT_SMI_CHECK,
+ pointers_to_here_check_for_value);
bind(&done);
@@ -499,22 +510,100 @@ void MacroAssembler::RecordWriteField(
}
+// Will clobber 4 registers: object, map, dst, ip. The
+// register 'object' contains a heap object pointer.
+void MacroAssembler::RecordWriteForMap(Register object,
+ Register map,
+ Register dst,
+ LinkRegisterStatus lr_status,
+ SaveFPRegsMode fp_mode) {
+ if (emit_debug_code()) {
+ ldr(dst, FieldMemOperand(map, HeapObject::kMapOffset));
+ cmp(dst, Operand(isolate()->factory()->meta_map()));
+ Check(eq, kWrongAddressOrValuePassedToRecordWrite);
+ }
+
+ if (!FLAG_incremental_marking) {
+ return;
+ }
+
+ // Count number of write barriers in generated code.
+ isolate()->counters()->write_barriers_static()->Increment();
+ // TODO(mstarzinger): Dynamic counter missing.
+
+ if (emit_debug_code()) {
+ ldr(ip, FieldMemOperand(object, HeapObject::kMapOffset));
+ cmp(ip, map);
+ Check(eq, kWrongAddressOrValuePassedToRecordWrite);
+ }
+
+ Label done;
+
+ // A single check of the map's pages interesting flag suffices, since it is
+ // only set during incremental collection, and then it's also guaranteed that
+ // the from object's page's interesting flag is also set. This optimization
+ // relies on the fact that maps can never be in new space.
+ CheckPageFlag(map,
+ map, // Used as scratch.
+ MemoryChunk::kPointersToHereAreInterestingMask,
+ eq,
+ &done);
+
+ add(dst, object, Operand(HeapObject::kMapOffset - kHeapObjectTag));
+ if (emit_debug_code()) {
+ Label ok;
+ tst(dst, Operand((1 << kPointerSizeLog2) - 1));
+ b(eq, &ok);
+ stop("Unaligned cell in write barrier");
+ bind(&ok);
+ }
+
+ // Record the actual write.
+ if (lr_status == kLRHasNotBeenSaved) {
+ push(lr);
+ }
+ RecordWriteStub stub(isolate(), object, map, dst, OMIT_REMEMBERED_SET,
+ fp_mode);
+ CallStub(&stub);
+ if (lr_status == kLRHasNotBeenSaved) {
+ pop(lr);
+ }
+
+ bind(&done);
+
+ // Clobber clobbered registers when running with the debug-code flag
+ // turned on to provoke errors.
+ if (emit_debug_code()) {
+ mov(dst, Operand(BitCast<int32_t>(kZapValue + 12)));
+ mov(map, Operand(BitCast<int32_t>(kZapValue + 16)));
+ }
+}
+
+
// Will clobber 4 registers: object, address, scratch, ip. The
// register 'object' contains a heap object pointer. The heap object
// tag is shifted away.
-void MacroAssembler::RecordWrite(Register object,
- Register address,
- Register value,
- LinkRegisterStatus lr_status,
- SaveFPRegsMode fp_mode,
- RememberedSetAction remembered_set_action,
- SmiCheck smi_check) {
+void MacroAssembler::RecordWrite(
+ Register object,
+ Register address,
+ Register value,
+ LinkRegisterStatus lr_status,
+ SaveFPRegsMode fp_mode,
+ RememberedSetAction remembered_set_action,
+ SmiCheck smi_check,
+ PointersToHereCheck pointers_to_here_check_for_value) {
+ ASSERT(!object.is(value));
if (emit_debug_code()) {
ldr(ip, MemOperand(address));
cmp(ip, value);
Check(eq, kWrongAddressOrValuePassedToRecordWrite);
}
+ if (remembered_set_action == OMIT_REMEMBERED_SET &&
+ !FLAG_incremental_marking) {
+ return;
+ }
+
// Count number of write barriers in generated code.
isolate()->counters()->write_barriers_static()->Increment();
// TODO(mstarzinger): Dynamic counter missing.
@@ -527,11 +616,13 @@ void MacroAssembler::RecordWrite(Register object,
JumpIfSmi(value, &done);
}
- CheckPageFlag(value,
- value, // Used as scratch.
- MemoryChunk::kPointersToHereAreInterestingMask,
- eq,
- &done);
+ if (pointers_to_here_check_for_value != kPointersToHereAreAlwaysInteresting) {
+ CheckPageFlag(value,
+ value, // Used as scratch.
+ MemoryChunk::kPointersToHereAreInterestingMask,
+ eq,
+ &done);
+ }
CheckPageFlag(object,
value, // Used as scratch.
MemoryChunk::kPointersFromHereAreInterestingMask,
@@ -542,7 +633,8 @@ void MacroAssembler::RecordWrite(Register object,
if (lr_status == kLRHasNotBeenSaved) {
push(lr);
}
- RecordWriteStub stub(object, value, address, remembered_set_action, fp_mode);
+ RecordWriteStub stub(isolate(), object, value, address, remembered_set_action,
+ fp_mode);
CallStub(&stub);
if (lr_status == kLRHasNotBeenSaved) {
pop(lr);
@@ -591,7 +683,7 @@ void MacroAssembler::RememberedSetHelper(Register object, // For debug tests.
}
push(lr);
StoreBufferOverflowStub store_buffer_overflow =
- StoreBufferOverflowStub(fp_mode);
+ StoreBufferOverflowStub(isolate(), fp_mode);
CallStub(&store_buffer_overflow);
pop(lr);
bind(&done);
@@ -601,6 +693,26 @@ void MacroAssembler::RememberedSetHelper(Register object, // For debug tests.
}
+void MacroAssembler::PushFixedFrame(Register marker_reg) {
+ ASSERT(!marker_reg.is_valid() || marker_reg.code() < cp.code());
+ stm(db_w, sp, (marker_reg.is_valid() ? marker_reg.bit() : 0) |
+ cp.bit() |
+ (FLAG_enable_ool_constant_pool ? pp.bit() : 0) |
+ fp.bit() |
+ lr.bit());
+}
+
+
+void MacroAssembler::PopFixedFrame(Register marker_reg) {
+ ASSERT(!marker_reg.is_valid() || marker_reg.code() < cp.code());
+ ldm(ia_w, sp, (marker_reg.is_valid() ? marker_reg.bit() : 0) |
+ cp.bit() |
+ (FLAG_enable_ool_constant_pool ? pp.bit() : 0) |
+ fp.bit() |
+ lr.bit());
+}
+
+
// Push and pop all registers that can hold pointers.
void MacroAssembler::PushSafepointRegisters() {
// Safepoints expect a block of contiguous register values starting with r0:
@@ -623,7 +735,7 @@ void MacroAssembler::PopSafepointRegisters() {
void MacroAssembler::PushSafepointRegistersAndDoubles() {
// Number of d-regs not known at snapshot time.
- ASSERT(!Serializer::enabled());
+ ASSERT(!serializer_enabled());
PushSafepointRegisters();
// Only save allocatable registers.
ASSERT(kScratchDoubleReg.is(d15) && kDoubleRegZero.is(d14));
@@ -637,7 +749,7 @@ void MacroAssembler::PushSafepointRegistersAndDoubles() {
void MacroAssembler::PopSafepointRegistersAndDoubles() {
// Number of d-regs not known at snapshot time.
- ASSERT(!Serializer::enabled());
+ ASSERT(!serializer_enabled());
// Only save allocatable registers.
ASSERT(kScratchDoubleReg.is(d15) && kDoubleRegZero.is(d14));
ASSERT(DwVfpRegister::NumReservedRegisters() == 2);
@@ -679,7 +791,7 @@ MemOperand MacroAssembler::SafepointRegisterSlot(Register reg) {
MemOperand MacroAssembler::SafepointRegistersAndDoublesSlot(Register reg) {
// Number of d-regs not known at snapshot time.
- ASSERT(!Serializer::enabled());
+ ASSERT(!serializer_enabled());
// General purpose registers are pushed last on the stack.
int doubles_size = DwVfpRegister::NumAllocatableRegisters() * kDoubleSize;
int register_offset = SafepointRegisterStackIndex(reg.code()) * kPointerSize;
@@ -762,6 +874,14 @@ void MacroAssembler::VFPEnsureFPSCRState(Register scratch) {
// If needed, restore wanted bits of FPSCR.
Label fpscr_done;
vmrs(scratch);
+ if (emit_debug_code()) {
+ Label rounding_mode_correct;
+ tst(scratch, Operand(kVFPRoundingModeMask));
+ b(eq, &rounding_mode_correct);
+ // Don't call Assert here, since Runtime_Abort could re-enter here.
+ stop("Default rounding mode not set");
+ bind(&rounding_mode_correct);
+ }
tst(scratch, Operand(kVFPDefaultNaNModeControlBit));
b(ne, &fpscr_done);
orr(scratch, scratch, Operand(kVFPDefaultNaNModeControlBit));
@@ -815,11 +935,11 @@ void MacroAssembler::Vmov(const DwVfpRegister dst,
const Register scratch) {
static const DoubleRepresentation minus_zero(-0.0);
static const DoubleRepresentation zero(0.0);
- DoubleRepresentation value(imm);
+ DoubleRepresentation value_rep(imm);
// Handle special values first.
- if (value.bits == zero.bits) {
+ if (value_rep == zero) {
vmov(dst, kDoubleRegZero);
- } else if (value.bits == minus_zero.bits) {
+ } else if (value_rep == minus_zero) {
vneg(dst, kDoubleRegZero);
} else {
vmov(dst, imm, scratch);
@@ -867,36 +987,60 @@ void MacroAssembler::VmovLow(DwVfpRegister dst, Register src) {
}
-void MacroAssembler::Prologue(PrologueFrameMode frame_mode) {
- if (frame_mode == BUILD_STUB_FRAME) {
- stm(db_w, sp, cp.bit() | fp.bit() | lr.bit());
- Push(Smi::FromInt(StackFrame::STUB));
- // Adjust FP to point to saved FP.
- add(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
- } else {
- PredictableCodeSizeScope predictible_code_size_scope(
- this, kNoCodeAgeSequenceLength * Assembler::kInstrSize);
+void MacroAssembler::LoadConstantPoolPointerRegister() {
+ if (FLAG_enable_ool_constant_pool) {
+ int constant_pool_offset = Code::kConstantPoolOffset - Code::kHeaderSize -
+ pc_offset() - Instruction::kPCReadOffset;
+ ASSERT(ImmediateFitsAddrMode2Instruction(constant_pool_offset));
+ ldr(pp, MemOperand(pc, constant_pool_offset));
+ }
+}
+
+
+void MacroAssembler::StubPrologue() {
+ PushFixedFrame();
+ Push(Smi::FromInt(StackFrame::STUB));
+ // Adjust FP to point to saved FP.
+ add(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
+ if (FLAG_enable_ool_constant_pool) {
+ LoadConstantPoolPointerRegister();
+ set_constant_pool_available(true);
+ }
+}
+
+
+void MacroAssembler::Prologue(bool code_pre_aging) {
+ { PredictableCodeSizeScope predictible_code_size_scope(
+ this, kNoCodeAgeSequenceLength);
// The following three instructions must remain together and unmodified
// for code aging to work properly.
- if (isolate()->IsCodePreAgingActive()) {
+ if (code_pre_aging) {
// Pre-age the code.
Code* stub = Code::GetPreAgedCodeAgeStub(isolate());
add(r0, pc, Operand(-8));
ldr(pc, MemOperand(pc, -4));
emit_code_stub_address(stub);
} else {
- stm(db_w, sp, r1.bit() | cp.bit() | fp.bit() | lr.bit());
+ PushFixedFrame(r1);
nop(ip.code());
// Adjust FP to point to saved FP.
add(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
}
}
+ if (FLAG_enable_ool_constant_pool) {
+ LoadConstantPoolPointerRegister();
+ set_constant_pool_available(true);
+ }
}
-void MacroAssembler::EnterFrame(StackFrame::Type type) {
+void MacroAssembler::EnterFrame(StackFrame::Type type,
+ bool load_constant_pool) {
// r0-r3: preserved
- stm(db_w, sp, cp.bit() | fp.bit() | lr.bit());
+ PushFixedFrame();
+ if (FLAG_enable_ool_constant_pool && load_constant_pool) {
+ LoadConstantPoolPointerRegister();
+ }
mov(ip, Operand(Smi::FromInt(type)));
push(ip);
mov(ip, Operand(CodeObject()));
@@ -907,15 +1051,25 @@ void MacroAssembler::EnterFrame(StackFrame::Type type) {
}
-void MacroAssembler::LeaveFrame(StackFrame::Type type) {
+int MacroAssembler::LeaveFrame(StackFrame::Type type) {
// r0: preserved
// r1: preserved
// r2: preserved
// Drop the execution stack down to the frame pointer and restore
- // the caller frame pointer and return address.
- mov(sp, fp);
- ldm(ia_w, sp, fp.bit() | lr.bit());
+ // the caller frame pointer, return address and constant pool pointer
+ // (if FLAG_enable_ool_constant_pool).
+ int frame_ends;
+ if (FLAG_enable_ool_constant_pool) {
+ add(sp, fp, Operand(StandardFrameConstants::kConstantPoolOffset));
+ frame_ends = pc_offset();
+ ldm(ia_w, sp, pp.bit() | fp.bit() | lr.bit());
+ } else {
+ mov(sp, fp);
+ frame_ends = pc_offset();
+ ldm(ia_w, sp, fp.bit() | lr.bit());
+ }
+ return frame_ends;
}
@@ -927,11 +1081,14 @@ void MacroAssembler::EnterExitFrame(bool save_doubles, int stack_space) {
Push(lr, fp);
mov(fp, Operand(sp)); // Set up new frame pointer.
// Reserve room for saved entry sp and code object.
- sub(sp, sp, Operand(2 * kPointerSize));
+ sub(sp, sp, Operand(ExitFrameConstants::kFrameSize));
if (emit_debug_code()) {
mov(ip, Operand::Zero());
str(ip, MemOperand(fp, ExitFrameConstants::kSPOffset));
}
+ if (FLAG_enable_ool_constant_pool) {
+ str(pp, MemOperand(fp, ExitFrameConstants::kConstantPoolOffset));
+ }
mov(ip, Operand(CodeObject()));
str(ip, MemOperand(fp, ExitFrameConstants::kCodeOffset));
@@ -945,8 +1102,10 @@ void MacroAssembler::EnterExitFrame(bool save_doubles, int stack_space) {
if (save_doubles) {
SaveFPRegs(sp, ip);
// Note that d0 will be accessible at
- // fp - 2 * kPointerSize - DwVfpRegister::kMaxNumRegisters * kDoubleSize,
- // since the sp slot and code slot were pushed after the fp.
+ // fp - ExitFrameConstants::kFrameSize -
+ // DwVfpRegister::kMaxNumRegisters * kDoubleSize,
+ // since the sp slot, code slot and constant pool slot (if
+ // FLAG_enable_ool_constant_pool) were pushed after the fp.
}
// Reserve place for the return address and stack space and align the frame
@@ -999,10 +1158,12 @@ int MacroAssembler::ActivationFrameAlignment() {
void MacroAssembler::LeaveExitFrame(bool save_doubles,
Register argument_count,
bool restore_context) {
+ ConstantPoolUnavailableScope constant_pool_unavailable(this);
+
// Optionally restore all double registers.
if (save_doubles) {
// Calculate the stack location of the saved doubles and restore them.
- const int offset = 2 * kPointerSize;
+ const int offset = ExitFrameConstants::kFrameSize;
sub(r3, fp,
Operand(offset + DwVfpRegister::kMaxNumRegisters * kDoubleSize));
RestoreFPRegs(r3, ip);
@@ -1013,7 +1174,6 @@ void MacroAssembler::LeaveExitFrame(bool save_doubles,
mov(ip, Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate())));
str(r3, MemOperand(ip));
-
// Restore current context from top and clear it in debug mode.
if (restore_context) {
mov(ip, Operand(ExternalReference(Isolate::kContextAddress, isolate())));
@@ -1025,6 +1185,9 @@ void MacroAssembler::LeaveExitFrame(bool save_doubles,
#endif
// Tear down the exit frame, pop the arguments, and return.
+ if (FLAG_enable_ool_constant_pool) {
+ ldr(pp, MemOperand(fp, ExitFrameConstants::kConstantPoolOffset));
+ }
mov(sp, Operand(fp));
ldm(ia_w, sp, fp.bit() | lr.bit());
if (argument_count.is_valid()) {
@@ -1033,7 +1196,7 @@ void MacroAssembler::LeaveExitFrame(bool save_doubles,
}
-void MacroAssembler::GetCFunctionDoubleResult(const DwVfpRegister dst) {
+void MacroAssembler::MovFromFloatResult(const DwVfpRegister dst) {
if (use_eabi_hardfloat()) {
Move(dst, d0);
} else {
@@ -1042,17 +1205,9 @@ void MacroAssembler::GetCFunctionDoubleResult(const DwVfpRegister dst) {
}
-void MacroAssembler::SetCallKind(Register dst, CallKind call_kind) {
- // This macro takes the dst register to make the code more readable
- // at the call sites. However, the dst register has to be r5 to
- // follow the calling convention which requires the call type to be
- // in r5.
- ASSERT(dst.is(r5));
- if (call_kind == CALL_AS_FUNCTION) {
- mov(dst, Operand(Smi::FromInt(1)));
- } else {
- mov(dst, Operand(Smi::FromInt(0)));
- }
+// On ARM this is just a synonym to make the purpose clear.
+void MacroAssembler::MovFromFloatParameter(DwVfpRegister dst) {
+ MovFromFloatResult(dst);
}
@@ -1063,8 +1218,7 @@ void MacroAssembler::InvokePrologue(const ParameterCount& expected,
Label* done,
bool* definitely_mismatches,
InvokeFlag flag,
- const CallWrapper& call_wrapper,
- CallKind call_kind) {
+ const CallWrapper& call_wrapper) {
bool definitely_matches = false;
*definitely_mismatches = false;
Label regular_invoke;
@@ -1074,7 +1228,6 @@ void MacroAssembler::InvokePrologue(const ParameterCount& expected,
// r0: actual arguments count
// r1: function (passed through to callee)
// r2: expected arguments count
- // r3: callee code entry
// The code below is made a lot easier because the calling code already sets
// up actual and expected registers according to the contract if values are
@@ -1122,14 +1275,12 @@ void MacroAssembler::InvokePrologue(const ParameterCount& expected,
isolate()->builtins()->ArgumentsAdaptorTrampoline();
if (flag == CALL_FUNCTION) {
call_wrapper.BeforeCall(CallSize(adaptor));
- SetCallKind(r5, call_kind);
Call(adaptor);
call_wrapper.AfterCall();
if (!*definitely_mismatches) {
b(done);
}
} else {
- SetCallKind(r5, call_kind);
Jump(adaptor, RelocInfo::CODE_TARGET);
}
bind(&regular_invoke);
@@ -1141,8 +1292,7 @@ void MacroAssembler::InvokeCode(Register code,
const ParameterCount& expected,
const ParameterCount& actual,
InvokeFlag flag,
- const CallWrapper& call_wrapper,
- CallKind call_kind) {
+ const CallWrapper& call_wrapper) {
// You can't call a function without a valid frame.
ASSERT(flag == JUMP_FUNCTION || has_frame());
@@ -1150,16 +1300,14 @@ void MacroAssembler::InvokeCode(Register code,
bool definitely_mismatches = false;
InvokePrologue(expected, actual, Handle<Code>::null(), code,
&done, &definitely_mismatches, flag,
- call_wrapper, call_kind);
+ call_wrapper);
if (!definitely_mismatches) {
if (flag == CALL_FUNCTION) {
call_wrapper.BeforeCall(CallSize(code));
- SetCallKind(r5, call_kind);
Call(code);
call_wrapper.AfterCall();
} else {
ASSERT(flag == JUMP_FUNCTION);
- SetCallKind(r5, call_kind);
Jump(code);
}
@@ -1170,41 +1318,10 @@ void MacroAssembler::InvokeCode(Register code,
}
-void MacroAssembler::InvokeCode(Handle<Code> code,
- const ParameterCount& expected,
- const ParameterCount& actual,
- RelocInfo::Mode rmode,
- InvokeFlag flag,
- CallKind call_kind) {
- // You can't call a function without a valid frame.
- ASSERT(flag == JUMP_FUNCTION || has_frame());
-
- Label done;
- bool definitely_mismatches = false;
- InvokePrologue(expected, actual, code, no_reg,
- &done, &definitely_mismatches, flag,
- NullCallWrapper(), call_kind);
- if (!definitely_mismatches) {
- if (flag == CALL_FUNCTION) {
- SetCallKind(r5, call_kind);
- Call(code, rmode);
- } else {
- SetCallKind(r5, call_kind);
- Jump(code, rmode);
- }
-
- // Continue here if InvokePrologue does handle the invocation due to
- // mismatched parameter counts.
- bind(&done);
- }
-}
-
-
void MacroAssembler::InvokeFunction(Register fun,
const ParameterCount& actual,
InvokeFlag flag,
- const CallWrapper& call_wrapper,
- CallKind call_kind) {
+ const CallWrapper& call_wrapper) {
// You can't call a function without a valid frame.
ASSERT(flag == JUMP_FUNCTION || has_frame());
@@ -1224,7 +1341,7 @@ void MacroAssembler::InvokeFunction(Register fun,
FieldMemOperand(r1, JSFunction::kCodeEntryOffset));
ParameterCount expected(expected_reg);
- InvokeCode(code_reg, expected, actual, flag, call_wrapper, call_kind);
+ InvokeCode(code_reg, expected, actual, flag, call_wrapper);
}
@@ -1232,8 +1349,7 @@ void MacroAssembler::InvokeFunction(Register function,
const ParameterCount& expected,
const ParameterCount& actual,
InvokeFlag flag,
- const CallWrapper& call_wrapper,
- CallKind call_kind) {
+ const CallWrapper& call_wrapper) {
// You can't call a function without a valid frame.
ASSERT(flag == JUMP_FUNCTION || has_frame());
@@ -1247,7 +1363,7 @@ void MacroAssembler::InvokeFunction(Register function,
// allow recompilation to take effect without changing any of the
// call sites.
ldr(r3, FieldMemOperand(r1, JSFunction::kCodeEntryOffset));
- InvokeCode(r3, expected, actual, flag, call_wrapper, call_kind);
+ InvokeCode(r3, expected, actual, flag, call_wrapper);
}
@@ -1255,10 +1371,9 @@ void MacroAssembler::InvokeFunction(Handle<JSFunction> function,
const ParameterCount& expected,
const ParameterCount& actual,
InvokeFlag flag,
- const CallWrapper& call_wrapper,
- CallKind call_kind) {
+ const CallWrapper& call_wrapper) {
Move(r1, function);
- InvokeFunction(r1, expected, actual, flag, call_wrapper, call_kind);
+ InvokeFunction(r1, expected, actual, flag, call_wrapper);
}
@@ -1304,15 +1419,13 @@ void MacroAssembler::IsObjectNameType(Register object,
}
-#ifdef ENABLE_DEBUGGER_SUPPORT
void MacroAssembler::DebugBreak() {
mov(r0, Operand::Zero());
mov(r1, Operand(ExternalReference(Runtime::kDebugBreak, isolate())));
- CEntryStub ces(1);
+ CEntryStub ces(isolate(), 1);
ASSERT(AllowThisStubCall(&ces));
- Call(ces.GetCode(isolate()), RelocInfo::DEBUG_BREAK);
+ Call(ces.GetCode(), RelocInfo::DEBUG_BREAK);
}
-#endif
void MacroAssembler::PushTryHandler(StackHandler::Kind kind,
@@ -1365,6 +1478,11 @@ void MacroAssembler::JumpToHandlerEntry() {
// Compute the handler entry address and jump to it. The handler table is
// a fixed array of (smi-tagged) code offsets.
// r0 = exception, r1 = code object, r2 = state.
+
+ ConstantPoolUnavailableScope constant_pool_unavailable(this);
+ if (FLAG_enable_ool_constant_pool) {
+ ldr(pp, FieldMemOperand(r1, Code::kConstantPoolOffset)); // Constant pool.
+ }
ldr(r3, FieldMemOperand(r1, Code::kHandlerTableOffset)); // Handler table.
add(r3, r3, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
mov(r2, Operand(r2, LSR, StackHandler::kKindWidth)); // Handler index.
@@ -1639,7 +1757,7 @@ void MacroAssembler::Allocate(int object_size,
Register scratch2,
Label* gc_required,
AllocationFlags flags) {
- ASSERT(object_size <= Page::kMaxNonCodeHeapObjectSize);
+ ASSERT(object_size <= Page::kMaxRegularHeapObjectSize);
if (!FLAG_inline_new) {
if (emit_debug_code()) {
// Trash the registers to simulate an allocation failure.
@@ -1961,34 +2079,12 @@ void MacroAssembler::AllocateAsciiConsString(Register result,
Register scratch1,
Register scratch2,
Label* gc_required) {
- Label allocate_new_space, install_map;
- AllocationFlags flags = TAG_OBJECT;
-
- ExternalReference high_promotion_mode = ExternalReference::
- new_space_high_promotion_mode_active_address(isolate());
- mov(scratch1, Operand(high_promotion_mode));
- ldr(scratch1, MemOperand(scratch1, 0));
- cmp(scratch1, Operand::Zero());
- b(eq, &allocate_new_space);
-
- Allocate(ConsString::kSize,
- result,
- scratch1,
- scratch2,
- gc_required,
- static_cast<AllocationFlags>(flags | PRETENURE_OLD_POINTER_SPACE));
-
- jmp(&install_map);
-
- bind(&allocate_new_space);
Allocate(ConsString::kSize,
result,
scratch1,
scratch2,
gc_required,
- flags);
-
- bind(&install_map);
+ TAG_OBJECT);
InitializeNewString(result,
length,
@@ -2283,12 +2379,12 @@ void MacroAssembler::CallStub(CodeStub* stub,
TypeFeedbackId ast_id,
Condition cond) {
ASSERT(AllowThisStubCall(stub)); // Stub calls are not allowed in some stubs.
- Call(stub->GetCode(isolate()), RelocInfo::CODE_TARGET, ast_id, cond);
+ Call(stub->GetCode(), RelocInfo::CODE_TARGET, ast_id, cond);
}
void MacroAssembler::TailCallStub(CodeStub* stub, Condition cond) {
- Jump(stub->GetCode(isolate()), RelocInfo::CODE_TARGET, cond);
+ Jump(stub->GetCode(), RelocInfo::CODE_TARGET, cond);
}
@@ -2298,10 +2394,8 @@ static int AddressOffset(ExternalReference ref0, ExternalReference ref1) {
void MacroAssembler::CallApiFunctionAndReturn(
- ExternalReference function,
- Address function_address,
+ Register function_address,
ExternalReference thunk_ref,
- Register thunk_last_arg,
int stack_space,
MemOperand return_value_operand,
MemOperand* context_restore_operand) {
@@ -2315,7 +2409,22 @@ void MacroAssembler::CallApiFunctionAndReturn(
ExternalReference::handle_scope_level_address(isolate()),
next_address);
- ASSERT(!thunk_last_arg.is(r3));
+ ASSERT(function_address.is(r1) || function_address.is(r2));
+
+ Label profiler_disabled;
+ Label end_profiler_check;
+ mov(r9, Operand(ExternalReference::is_profiling_address(isolate())));
+ ldrb(r9, MemOperand(r9, 0));
+ cmp(r9, Operand(0));
+ b(eq, &profiler_disabled);
+
+ // Additional parameter is the address of the actual callback.
+ mov(r3, Operand(thunk_ref));
+ jmp(&end_profiler_check);
+
+ bind(&profiler_disabled);
+ Move(r3, function_address);
+ bind(&end_profiler_check);
// Allocate HandleScope in callee-save registers.
mov(r9, Operand(next_address));
@@ -2334,29 +2443,10 @@ void MacroAssembler::CallApiFunctionAndReturn(
PopSafepointRegisters();
}
- Label profiler_disabled;
- Label end_profiler_check;
- bool* is_profiling_flag =
- isolate()->cpu_profiler()->is_profiling_address();
- STATIC_ASSERT(sizeof(*is_profiling_flag) == 1);
- mov(r3, Operand(reinterpret_cast<int32_t>(is_profiling_flag)));
- ldrb(r3, MemOperand(r3, 0));
- cmp(r3, Operand(0));
- b(eq, &profiler_disabled);
-
- // Additional parameter is the address of the actual callback.
- mov(thunk_last_arg, Operand(reinterpret_cast<int32_t>(function_address)));
- mov(r3, Operand(thunk_ref));
- jmp(&end_profiler_check);
-
- bind(&profiler_disabled);
- mov(r3, Operand(function));
- bind(&end_profiler_check);
-
// Native call returns to the DirectCEntry stub which redirects to the
// return address pushed on stack (could have moved after GC).
// DirectCEntry stub itself is generated early and never moves.
- DirectCEntryStub stub;
+ DirectCEntryStub stub(isolate());
stub.GenerateCall(this, r3);
if (FLAG_log_timer_events) {
@@ -2413,7 +2503,7 @@ void MacroAssembler::CallApiFunctionAndReturn(
{
FrameScope frame(this, StackFrame::INTERNAL);
CallExternalReference(
- ExternalReference(Runtime::kPromoteScheduledException, isolate()),
+ ExternalReference(Runtime::kHiddenPromoteScheduledException, isolate()),
0);
}
jmp(&exception_handled);
@@ -2436,14 +2526,6 @@ bool MacroAssembler::AllowThisStubCall(CodeStub* stub) {
}
-void MacroAssembler::IllegalOperation(int num_arguments) {
- if (num_arguments > 0) {
- add(sp, sp, Operand(num_arguments * kPointerSize));
- }
- LoadRoot(r0, Heap::kUndefinedValueRootIndex);
-}
-
-
void MacroAssembler::IndexFromHash(Register hash, Register index) {
// If the hash field contains an array index pick it out. The assert checks
// that the constants for the maximum number of digits for an array index
@@ -2451,10 +2533,7 @@ void MacroAssembler::IndexFromHash(Register hash, Register index) {
// conflict.
ASSERT(TenToThe(String::kMaxCachedArrayIndexLength) <
(1 << String::kArrayIndexValueBits));
- // We want the smi-tagged index in key. kArrayIndexValueMask has zeros in
- // the low kHashShift bits.
- Ubfx(hash, hash, String::kHashShift, String::kArrayIndexValueBits);
- SmiTag(index, hash);
+ DecodeFieldToSmi<String::ArrayIndexValueBits>(index, hash);
}
@@ -2561,7 +2640,7 @@ void MacroAssembler::TruncateDoubleToI(Register result,
sub(sp, sp, Operand(kDoubleSize)); // Put input on stack.
vstr(double_input, MemOperand(sp, 0));
- DoubleToIStub stub(sp, result, 0, true, true);
+ DoubleToIStub stub(isolate(), sp, result, 0, true, true);
CallStub(&stub);
add(sp, sp, Operand(kDoubleSize));
@@ -2583,7 +2662,8 @@ void MacroAssembler::TruncateHeapNumberToI(Register result,
// If we fell through then inline version didn't succeed - call stub instead.
push(lr);
- DoubleToIStub stub(object,
+ DoubleToIStub stub(isolate(),
+ object,
result,
HeapNumber::kValueOffset - kHeapObjectTag,
true,
@@ -2638,10 +2718,7 @@ void MacroAssembler::CallRuntime(const Runtime::Function* f,
// If the expected number of arguments of the runtime function is
// constant, we check that the actual number of arguments match the
// expectation.
- if (f->nargs >= 0 && f->nargs != num_arguments) {
- IllegalOperation(num_arguments);
- return;
- }
+ CHECK(f->nargs < 0 || f->nargs == num_arguments);
// TODO(1236192): Most runtime routines don't need the number of
// arguments passed in because it is constant. At some point we
@@ -2649,7 +2726,7 @@ void MacroAssembler::CallRuntime(const Runtime::Function* f,
// smarter.
mov(r0, Operand(num_arguments));
mov(r1, Operand(ExternalReference(f, isolate())));
- CEntryStub stub(1, save_doubles);
+ CEntryStub stub(isolate(), 1, save_doubles);
CallStub(&stub);
}
@@ -2659,7 +2736,7 @@ void MacroAssembler::CallExternalReference(const ExternalReference& ext,
mov(r0, Operand(num_arguments));
mov(r1, Operand(ext));
- CEntryStub stub(1);
+ CEntryStub stub(isolate(), 1);
CallStub(&stub);
}
@@ -2691,8 +2768,8 @@ void MacroAssembler::JumpToExternalReference(const ExternalReference& builtin) {
ASSERT((reinterpret_cast<intptr_t>(builtin.address()) & 1) == 1);
#endif
mov(r1, Operand(builtin));
- CEntryStub stub(1);
- Jump(stub.GetCode(isolate()), RelocInfo::CODE_TARGET);
+ CEntryStub stub(isolate(), 1);
+ Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
}
@@ -2705,12 +2782,10 @@ void MacroAssembler::InvokeBuiltin(Builtins::JavaScript id,
GetBuiltinEntry(r2, id);
if (flag == CALL_FUNCTION) {
call_wrapper.BeforeCall(CallSize(r2));
- SetCallKind(r5, CALL_AS_METHOD);
Call(r2);
call_wrapper.AfterCall();
} else {
ASSERT(flag == JUMP_FUNCTION);
- SetCallKind(r5, CALL_AS_METHOD);
Jump(r2);
}
}
@@ -2810,16 +2885,8 @@ void MacroAssembler::Check(Condition cond, BailoutReason reason) {
void MacroAssembler::Abort(BailoutReason reason) {
Label abort_start;
bind(&abort_start);
- // We want to pass the msg string like a smi to avoid GC
- // problems, however msg is not guaranteed to be aligned
- // properly. Instead, we pass an aligned pointer that is
- // a proper v8 smi, but also pass the alignment difference
- // from the real pointer as a smi.
- const char* msg = GetBailoutReason(reason);
- intptr_t p1 = reinterpret_cast<intptr_t>(msg);
- intptr_t p0 = (p1 & ~kSmiTagMask) + kSmiTag;
- ASSERT(reinterpret_cast<Object*>(p0)->IsSmi());
#ifdef DEBUG
+ const char* msg = GetBailoutReason(reason);
if (msg != NULL) {
RecordComment("Abort message: ");
RecordComment(msg);
@@ -2831,25 +2898,24 @@ void MacroAssembler::Abort(BailoutReason reason) {
}
#endif
- mov(r0, Operand(p0));
- push(r0);
- mov(r0, Operand(Smi::FromInt(p1 - p0)));
+ mov(r0, Operand(Smi::FromInt(reason)));
push(r0);
+
// Disable stub call restrictions to always allow calls to abort.
if (!has_frame_) {
// We don't actually want to generate a pile of code for this, so just
// claim there is a stack frame, without generating one.
FrameScope scope(this, StackFrame::NONE);
- CallRuntime(Runtime::kAbort, 2);
+ CallRuntime(Runtime::kAbort, 1);
} else {
- CallRuntime(Runtime::kAbort, 2);
+ CallRuntime(Runtime::kAbort, 1);
}
// will not return here
if (is_const_pool_blocked()) {
// If the calling code cares about the exact number of
// instructions generated, we insert padding here to keep the size
// of the Abort macro constant.
- static const int kExpectedAbortInstructions = 10;
+ static const int kExpectedAbortInstructions = 7;
int abort_instructions = InstructionsGeneratedSince(&abort_start);
ASSERT(abort_instructions <= kExpectedAbortInstructions);
while (abort_instructions++ < kExpectedAbortInstructions) {
@@ -2903,31 +2969,6 @@ void MacroAssembler::LoadTransitionedArrayMapConditional(
}
-void MacroAssembler::LoadInitialArrayMap(
- Register function_in, Register scratch,
- Register map_out, bool can_have_holes) {
- ASSERT(!function_in.is(map_out));
- Label done;
- ldr(map_out, FieldMemOperand(function_in,
- JSFunction::kPrototypeOrInitialMapOffset));
- if (!FLAG_smi_only_arrays) {
- ElementsKind kind = can_have_holes ? FAST_HOLEY_ELEMENTS : FAST_ELEMENTS;
- LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS,
- kind,
- map_out,
- scratch,
- &done);
- } else if (can_have_holes) {
- LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS,
- FAST_HOLEY_SMI_ELEMENTS,
- map_out,
- scratch,
- &done);
- }
- bind(&done);
-}
-
-
void MacroAssembler::LoadGlobalFunction(int index, Register function) {
// Load the global or builtins object from the current context.
ldr(function,
@@ -2940,19 +2981,6 @@ void MacroAssembler::LoadGlobalFunction(int index, Register function) {
}
-void MacroAssembler::LoadArrayFunction(Register function) {
- // Load the global or builtins object from the current context.
- ldr(function,
- MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
- // Load the global context from the global or builtins object.
- ldr(function,
- FieldMemOperand(function, GlobalObject::kGlobalContextOffset));
- // Load the array function from the native context.
- ldr(function,
- MemOperand(function, Context::SlotOffset(Context::ARRAY_FUNCTION_INDEX)));
-}
-
-
void MacroAssembler::LoadGlobalFunctionInitialMap(Register function,
Register map,
Register scratch) {
@@ -3074,6 +3102,20 @@ void MacroAssembler::AssertName(Register object) {
}
+void MacroAssembler::AssertUndefinedOrAllocationSite(Register object,
+ Register scratch) {
+ if (emit_debug_code()) {
+ Label done_checking;
+ AssertNotSmi(object);
+ CompareRoot(object, Heap::kUndefinedValueRootIndex);
+ b(eq, &done_checking);
+ ldr(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
+ CompareRoot(scratch, Heap::kAllocationSiteMapRootIndex);
+ Assert(eq, kExpectedUndefinedOrCell);
+ bind(&done_checking);
+ }
+}
+
void MacroAssembler::AssertIsRoot(Register reg, Heap::RootListIndex index) {
if (emit_debug_code()) {
@@ -3436,14 +3478,14 @@ void MacroAssembler::EmitSeqStringSetCharCheck(Register string,
uint32_t encoding_mask) {
Label is_object;
SmiTst(string);
- ThrowIf(eq, kNonObject);
+ Check(ne, kNonObject);
ldr(ip, FieldMemOperand(string, HeapObject::kMapOffset));
ldrb(ip, FieldMemOperand(ip, Map::kInstanceTypeOffset));
and_(ip, ip, Operand(kStringRepresentationMask | kStringEncodingMask));
cmp(ip, Operand(encoding_mask));
- ThrowIf(ne, kUnexpectedStringType);
+ Check(eq, kUnexpectedStringType);
// The index is assumed to be untagged coming in, tag it to compare with the
// string length without using a temp register, it is restored at the end of
@@ -3452,15 +3494,15 @@ void MacroAssembler::EmitSeqStringSetCharCheck(Register string,
TrySmiTag(index, index, &index_tag_bad);
b(&index_tag_ok);
bind(&index_tag_bad);
- Throw(kIndexIsTooLarge);
+ Abort(kIndexIsTooLarge);
bind(&index_tag_ok);
ldr(ip, FieldMemOperand(string, String::kLengthOffset));
cmp(index, ip);
- ThrowIf(ge, kIndexIsTooLarge);
+ Check(lt, kIndexIsTooLarge);
cmp(index, Operand(Smi::FromInt(0)));
- ThrowIf(lt, kIndexIsNegative);
+ Check(ge, kIndexIsNegative);
SmiUntag(index, index);
}
@@ -3492,33 +3534,27 @@ void MacroAssembler::PrepareCallCFunction(int num_reg_arguments,
}
-void MacroAssembler::SetCallCDoubleArguments(DwVfpRegister dreg) {
- ASSERT(dreg.is(d0));
+void MacroAssembler::MovToFloatParameter(DwVfpRegister src) {
+ ASSERT(src.is(d0));
if (!use_eabi_hardfloat()) {
- vmov(r0, r1, dreg);
+ vmov(r0, r1, src);
}
}
-void MacroAssembler::SetCallCDoubleArguments(DwVfpRegister dreg1,
- DwVfpRegister dreg2) {
- ASSERT(dreg1.is(d0));
- ASSERT(dreg2.is(d1));
- if (!use_eabi_hardfloat()) {
- vmov(r0, r1, dreg1);
- vmov(r2, r3, dreg2);
- }
+// On ARM this is just a synonym to make the purpose clear.
+void MacroAssembler::MovToFloatResult(DwVfpRegister src) {
+ MovToFloatParameter(src);
}
-void MacroAssembler::SetCallCDoubleArguments(DwVfpRegister dreg,
- Register reg) {
- ASSERT(dreg.is(d0));
- if (use_eabi_hardfloat()) {
- Move(r0, reg);
- } else {
- Move(r2, reg);
- vmov(r0, r1, dreg);
+void MacroAssembler::MovToFloatParameters(DwVfpRegister src1,
+ DwVfpRegister src2) {
+ ASSERT(src1.is(d0));
+ ASSERT(src2.is(d1));
+ if (!use_eabi_hardfloat()) {
+ vmov(r0, r1, src1);
+ vmov(r2, r3, src2);
}
}
@@ -3589,22 +3625,31 @@ void MacroAssembler::CallCFunctionHelper(Register function,
void MacroAssembler::GetRelocatedValueLocation(Register ldr_location,
- Register result) {
+ Register result) {
const uint32_t kLdrOffsetMask = (1 << 12) - 1;
- const int32_t kPCRegOffset = 2 * kPointerSize;
ldr(result, MemOperand(ldr_location));
if (emit_debug_code()) {
- // Check that the instruction is a ldr reg, [pc + offset] .
- and_(result, result, Operand(kLdrPCPattern));
- cmp(result, Operand(kLdrPCPattern));
- Check(eq, kTheInstructionToPatchShouldBeALoadFromPc);
+ // Check that the instruction is a ldr reg, [<pc or pp> + offset] .
+ if (FLAG_enable_ool_constant_pool) {
+ and_(result, result, Operand(kLdrPpPattern));
+ cmp(result, Operand(kLdrPpPattern));
+ Check(eq, kTheInstructionToPatchShouldBeALoadFromPp);
+ } else {
+ and_(result, result, Operand(kLdrPCPattern));
+ cmp(result, Operand(kLdrPCPattern));
+ Check(eq, kTheInstructionToPatchShouldBeALoadFromPc);
+ }
// Result was clobbered. Restore it.
ldr(result, MemOperand(ldr_location));
}
// Get the address of the constant.
and_(result, result, Operand(kLdrOffsetMask));
- add(result, ldr_location, Operand(result));
- add(result, result, Operand(kPCRegOffset));
+ if (FLAG_enable_ool_constant_pool) {
+ add(result, pp, Operand(result));
+ } else {
+ add(result, ldr_location, Operand(result));
+ add(result, result, Operand(Instruction::kPCReadOffset));
+ }
}
@@ -3627,7 +3672,7 @@ void MacroAssembler::CheckMapDeprecated(Handle<Map> map,
if (map->CanBeDeprecated()) {
mov(scratch, Operand(map));
ldr(scratch, FieldMemOperand(scratch, Map::kBitField3Offset));
- tst(scratch, Operand(Smi::FromInt(Map::Deprecated::kMask)));
+ tst(scratch, Operand(Map::Deprecated::kMask));
b(ne, if_deprecated);
}
}
@@ -3807,83 +3852,20 @@ void MacroAssembler::ClampUint8(Register output_reg, Register input_reg) {
void MacroAssembler::ClampDoubleToUint8(Register result_reg,
DwVfpRegister input_reg,
LowDwVfpRegister double_scratch) {
- Label above_zero;
Label done;
- Label in_bounds;
-
- VFPCompareAndSetFlags(input_reg, 0.0);
- b(gt, &above_zero);
-
- // Double value is less than zero, NaN or Inf, return 0.
- mov(result_reg, Operand::Zero());
- b(al, &done);
- // Double value is >= 255, return 255.
- bind(&above_zero);
+ // Handle inputs >= 255 (including +infinity).
Vmov(double_scratch, 255.0, result_reg);
- VFPCompareAndSetFlags(input_reg, double_scratch);
- b(le, &in_bounds);
mov(result_reg, Operand(255));
- b(al, &done);
-
- // In 0-255 range, round and truncate.
- bind(&in_bounds);
- // Save FPSCR.
- vmrs(ip);
- // Set rounding mode to round to the nearest integer by clearing bits[23:22].
- bic(result_reg, ip, Operand(kVFPRoundingModeMask));
- vmsr(result_reg);
- vcvt_s32_f64(double_scratch.low(), input_reg, kFPSCRRounding);
- vmov(result_reg, double_scratch.low());
- // Restore FPSCR.
- vmsr(ip);
- bind(&done);
-}
-
-
-void MacroAssembler::Throw(BailoutReason reason) {
- Label throw_start;
- bind(&throw_start);
-#ifdef DEBUG
- const char* msg = GetBailoutReason(reason);
- if (msg != NULL) {
- RecordComment("Throw message: ");
- RecordComment(msg);
- }
-#endif
-
- mov(r0, Operand(Smi::FromInt(reason)));
- push(r0);
- // Disable stub call restrictions to always allow calls to throw.
- if (!has_frame_) {
- // We don't actually want to generate a pile of code for this, so just
- // claim there is a stack frame, without generating one.
- FrameScope scope(this, StackFrame::NONE);
- CallRuntime(Runtime::kThrowMessage, 1);
- } else {
- CallRuntime(Runtime::kThrowMessage, 1);
- }
- // will not return here
- if (is_const_pool_blocked()) {
- // If the calling code cares throw the exact number of
- // instructions generated, we insert padding here to keep the size
- // of the ThrowMessage macro constant.
- static const int kExpectedThrowMessageInstructions = 10;
- int throw_instructions = InstructionsGeneratedSince(&throw_start);
- ASSERT(throw_instructions <= kExpectedThrowMessageInstructions);
- while (throw_instructions++ < kExpectedThrowMessageInstructions) {
- nop();
- }
- }
-}
+ VFPCompareAndSetFlags(input_reg, double_scratch);
+ b(ge, &done);
+ // For inputs < 255 (including negative) vcvt_u32_f64 with round-to-nearest
+ // rounding mode will provide the correct result.
+ vcvt_u32_f64(double_scratch.low(), input_reg, kFPSCRRounding);
+ vmov(result_reg, double_scratch.low());
-void MacroAssembler::ThrowIf(Condition cc, BailoutReason reason) {
- Label L;
- b(NegateCondition(cc), &L);
- Throw(reason);
- // will not return here
- bind(&L);
+ bind(&done);
}
@@ -3902,7 +3884,8 @@ void MacroAssembler::NumberOfOwnDescriptors(Register dst, Register map) {
void MacroAssembler::EnumLength(Register dst, Register map) {
STATIC_ASSERT(Map::EnumLengthBits::kShift == 0);
ldr(dst, FieldMemOperand(map, Map::kBitField3Offset));
- and_(dst, dst, Operand(Smi::FromInt(Map::EnumLengthBits::kMask)));
+ and_(dst, dst, Operand(Map::EnumLengthBits::kMask));
+ SmiTag(dst);
}
@@ -3934,10 +3917,16 @@ void MacroAssembler::CheckEnumCache(Register null_value, Label* call_runtime) {
// Check that there are no elements. Register r2 contains the current JS
// object we've reached through the prototype chain.
+ Label no_elements;
ldr(r2, FieldMemOperand(r2, JSObject::kElementsOffset));
cmp(r2, empty_fixed_array_value);
+ b(eq, &no_elements);
+
+ // Second chance, the object may be using the empty slow element dictionary.
+ CompareRoot(r2, Heap::kEmptySlowElementDictionaryRootIndex);
b(ne, call_runtime);
+ bind(&no_elements);
ldr(r2, FieldMemOperand(r1, Map::kPrototypeOffset));
cmp(r2, null_value);
b(ne, &next);
@@ -4007,7 +3996,7 @@ void MacroAssembler::JumpIfDictionaryInPrototypeChain(
bind(&loop_again);
ldr(current, FieldMemOperand(current, HeapObject::kMapOffset));
ldr(scratch1, FieldMemOperand(current, Map::kBitField2Offset));
- Ubfx(scratch1, scratch1, Map::kElementsKindShift, Map::kElementsKindBitCount);
+ DecodeField<Map::ElementsKindBits>(scratch1);
cmp(scratch1, Operand(DICTIONARY_ELEMENTS));
b(eq, found);
ldr(current, FieldMemOperand(current, Map::kPrototypeOffset));
@@ -4083,6 +4072,26 @@ void CodePatcher::EmitCondition(Condition cond) {
}
+void MacroAssembler::TruncatingDiv(Register result,
+ Register dividend,
+ int32_t divisor) {
+ ASSERT(!dividend.is(result));
+ ASSERT(!dividend.is(ip));
+ ASSERT(!result.is(ip));
+ MultiplierAndShift ms(divisor);
+ mov(ip, Operand(ms.multiplier()));
+ smull(ip, result, dividend, ip);
+ if (divisor > 0 && ms.multiplier() < 0) {
+ add(result, result, Operand(dividend));
+ }
+ if (divisor < 0 && ms.multiplier() > 0) {
+ sub(result, result, Operand(dividend));
+ }
+ if (ms.shift() > 0) mov(result, Operand(result, ASR, ms.shift()));
+ add(result, result, Operand(dividend, LSR, 31));
+}
+
+
} } // namespace v8::internal
#endif // V8_TARGET_ARCH_ARM
diff --git a/chromium/v8/src/arm/macro-assembler-arm.h b/chromium/v8/src/arm/macro-assembler-arm.h
index f71c1a3852c..dbf305a4077 100644
--- a/chromium/v8/src/arm/macro-assembler-arm.h
+++ b/chromium/v8/src/arm/macro-assembler-arm.h
@@ -1,36 +1,13 @@
// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
#ifndef V8_ARM_MACRO_ASSEMBLER_ARM_H_
#define V8_ARM_MACRO_ASSEMBLER_ARM_H_
-#include "assembler.h"
-#include "frames.h"
-#include "v8globals.h"
+#include "src/assembler.h"
+#include "src/frames.h"
+#include "src/globals.h"
namespace v8 {
namespace internal {
@@ -60,6 +37,10 @@ enum TaggingMode {
enum RememberedSetAction { EMIT_REMEMBERED_SET, OMIT_REMEMBERED_SET };
enum SmiCheck { INLINE_SMI_CHECK, OMIT_SMI_CHECK };
+enum PointersToHereCheck {
+ kPointersToHereMaybeInteresting,
+ kPointersToHereAreAlwaysInteresting
+};
enum LinkRegisterStatus { kLRHasNotBeenSaved, kLRHasBeenSaved };
@@ -102,7 +83,11 @@ class MacroAssembler: public Assembler {
static int CallSize(Register target, Condition cond = al);
void Call(Register target, Condition cond = al);
int CallSize(Address target, RelocInfo::Mode rmode, Condition cond = al);
- static int CallSizeNotPredictableCodeSize(Address target,
+ int CallStubSize(CodeStub* stub,
+ TypeFeedbackId ast_id = TypeFeedbackId::None(),
+ Condition cond = al);
+ static int CallSizeNotPredictableCodeSize(Isolate* isolate,
+ Address target,
RelocInfo::Mode rmode,
Condition cond = al);
void Call(Address target, RelocInfo::Mode rmode,
@@ -132,7 +117,8 @@ class MacroAssembler: public Assembler {
Register scratch = no_reg,
Condition cond = al);
-
+ void Mls(Register dst, Register src1, Register src2, Register srcA,
+ Condition cond = al);
void And(Register dst, Register src1, const Operand& src2,
Condition cond = al);
void Ubfx(Register dst, Register src, int lsb, int width,
@@ -263,7 +249,9 @@ class MacroAssembler: public Assembler {
LinkRegisterStatus lr_status,
SaveFPRegsMode save_fp,
RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
- SmiCheck smi_check = INLINE_SMI_CHECK);
+ SmiCheck smi_check = INLINE_SMI_CHECK,
+ PointersToHereCheck pointers_to_here_check_for_value =
+ kPointersToHereMaybeInteresting);
// As above, but the offset has the tag presubtracted. For use with
// MemOperand(reg, off).
@@ -275,7 +263,9 @@ class MacroAssembler: public Assembler {
LinkRegisterStatus lr_status,
SaveFPRegsMode save_fp,
RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
- SmiCheck smi_check = INLINE_SMI_CHECK) {
+ SmiCheck smi_check = INLINE_SMI_CHECK,
+ PointersToHereCheck pointers_to_here_check_for_value =
+ kPointersToHereMaybeInteresting) {
RecordWriteField(context,
offset + kHeapObjectTag,
value,
@@ -283,9 +273,17 @@ class MacroAssembler: public Assembler {
lr_status,
save_fp,
remembered_set_action,
- smi_check);
+ smi_check,
+ pointers_to_here_check_for_value);
}
+ void RecordWriteForMap(
+ Register object,
+ Register map,
+ Register dst,
+ LinkRegisterStatus lr_status,
+ SaveFPRegsMode save_fp);
+
// For a given |object| notify the garbage collector that the slot |address|
// has been written. |value| is the object being stored. The value and
// address registers are clobbered by the operation.
@@ -296,7 +294,9 @@ class MacroAssembler: public Assembler {
LinkRegisterStatus lr_status,
SaveFPRegsMode save_fp,
RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
- SmiCheck smi_check = INLINE_SMI_CHECK);
+ SmiCheck smi_check = INLINE_SMI_CHECK,
+ PointersToHereCheck pointers_to_here_check_for_value =
+ kPointersToHereMaybeInteresting);
// Push a handle.
void Push(Handle<Object> handle);
@@ -389,7 +389,7 @@ class MacroAssembler: public Assembler {
}
} else {
Pop(src2, src3, cond);
- str(src1, MemOperand(sp, 4, PostIndex), cond);
+ ldr(src1, MemOperand(sp, 4, PostIndex), cond);
}
}
@@ -426,6 +426,12 @@ class MacroAssembler: public Assembler {
}
}
+ // Push a fixed frame, consisting of lr, fp, constant pool (if
+ // FLAG_enable_ool_constant_pool), context and JS function / marker id if
+ // marker_reg is a valid register.
+ void PushFixedFrame(Register marker_reg = no_reg);
+ void PopFixedFrame(Register marker_reg = no_reg);
+
// Push and pop the registers that can hold pointers, as defined by the
// RegList constant kSafepointSavedRegisters.
void PushSafepointRegisters();
@@ -532,7 +538,8 @@ class MacroAssembler: public Assembler {
Label* not_int32);
// Generates function and stub prologue code.
- void Prologue(PrologueFrameMode frame_mode);
+ void StubPrologue();
+ void Prologue(bool code_pre_aging);
// Enter exit frame.
// stack_space - extra stack space, used for alignment before call to C.
@@ -561,14 +568,7 @@ class MacroAssembler: public Assembler {
Register scratch,
Label* no_map_match);
- // Load the initial map for new Arrays from a JSFunction.
- void LoadInitialArrayMap(Register function_in,
- Register scratch,
- Register map_out,
- bool can_have_holes);
-
void LoadGlobalFunction(int index, Register function);
- void LoadArrayFunction(Register function);
// Load the initial map from the global function. The registers
// function and map can be the same, function is then overwritten.
@@ -585,47 +585,31 @@ class MacroAssembler: public Assembler {
// ---------------------------------------------------------------------------
// JavaScript invokes
- // Set up call kind marking in ecx. The method takes ecx as an
- // explicit first parameter to make the code more readable at the
- // call sites.
- void SetCallKind(Register dst, CallKind kind);
-
// Invoke the JavaScript function code by either calling or jumping.
void InvokeCode(Register code,
const ParameterCount& expected,
const ParameterCount& actual,
InvokeFlag flag,
- const CallWrapper& call_wrapper,
- CallKind call_kind);
-
- void InvokeCode(Handle<Code> code,
- const ParameterCount& expected,
- const ParameterCount& actual,
- RelocInfo::Mode rmode,
- InvokeFlag flag,
- CallKind call_kind);
+ const CallWrapper& call_wrapper);
// Invoke the JavaScript function in the given register. Changes the
// current context to the context in the function before invoking.
void InvokeFunction(Register function,
const ParameterCount& actual,
InvokeFlag flag,
- const CallWrapper& call_wrapper,
- CallKind call_kind);
+ const CallWrapper& call_wrapper);
void InvokeFunction(Register function,
const ParameterCount& expected,
const ParameterCount& actual,
InvokeFlag flag,
- const CallWrapper& call_wrapper,
- CallKind call_kind);
+ const CallWrapper& call_wrapper);
void InvokeFunction(Handle<JSFunction> function,
const ParameterCount& expected,
const ParameterCount& actual,
InvokeFlag flag,
- const CallWrapper& call_wrapper,
- CallKind call_kind);
+ const CallWrapper& call_wrapper);
void IsObjectJSObjectType(Register heap_object,
Register map,
@@ -644,12 +628,10 @@ class MacroAssembler: public Assembler {
Register scratch,
Label* fail);
-#ifdef ENABLE_DEBUGGER_SUPPORT
// ---------------------------------------------------------------------------
// Debugger Support
void DebugBreak();
-#endif
// ---------------------------------------------------------------------------
// Exception handling
@@ -668,12 +650,6 @@ class MacroAssembler: public Assembler {
// handler chain.
void ThrowUncatchable(Register value);
- // Throw a message string as an exception.
- void Throw(BailoutReason reason);
-
- // Throw a message string as an exception if a condition is not true.
- void ThrowIf(Condition cc, BailoutReason reason);
-
// ---------------------------------------------------------------------------
// Inline caching support
@@ -968,10 +944,6 @@ class MacroAssembler: public Assembler {
}
- // Generates code for reporting that an illegal operation has
- // occurred.
- void IllegalOperation(int num_arguments);
-
// Picks out an array index from the hash field.
// Register use:
// hash - holds the index's hash. Clobbered.
@@ -1116,9 +1088,9 @@ class MacroAssembler: public Assembler {
// whether soft or hard floating point ABI is used. These functions
// abstract parameter passing for the three different ways we call
// C functions from generated code.
- void SetCallCDoubleArguments(DwVfpRegister dreg);
- void SetCallCDoubleArguments(DwVfpRegister dreg1, DwVfpRegister dreg2);
- void SetCallCDoubleArguments(DwVfpRegister dreg, Register reg);
+ void MovToFloatParameter(DwVfpRegister src);
+ void MovToFloatParameters(DwVfpRegister src1, DwVfpRegister src2);
+ void MovToFloatResult(DwVfpRegister src);
// Calls a C function and cleans up the space for arguments allocated
// by PrepareCallCFunction. The called function is not allowed to trigger a
@@ -1134,16 +1106,15 @@ class MacroAssembler: public Assembler {
int num_reg_arguments,
int num_double_arguments);
- void GetCFunctionDoubleResult(const DwVfpRegister dst);
+ void MovFromFloatParameter(DwVfpRegister dst);
+ void MovFromFloatResult(DwVfpRegister dst);
// Calls an API function. Allocates HandleScope, extracts returned value
// from handle and propagates exceptions. Restores context. stack_space
// - space to be unwound on exit (includes the call JS arguments space and
// the additional space allocated for the fast call).
- void CallApiFunctionAndReturn(ExternalReference function,
- Address function_address,
+ void CallApiFunctionAndReturn(Register function_address,
ExternalReference thunk_ref,
- Register thunk_last_arg,
int stack_space,
MemOperand return_value_operand,
MemOperand* context_restore_operand);
@@ -1170,6 +1141,10 @@ class MacroAssembler: public Assembler {
}
+ // Emit code for a truncating division by a constant. The dividend register is
+ // unchanged and ip gets clobbered. Dividend and result must be different.
+ void TruncatingDiv(Register result, Register dividend, int32_t divisor);
+
// ---------------------------------------------------------------------------
// StatsCounter support
@@ -1304,6 +1279,10 @@ class MacroAssembler: public Assembler {
// Abort execution if argument is not a name, enabled via --debug-code.
void AssertName(Register object);
+ // Abort execution if argument is not undefined or an AllocationSite, enabled
+ // via --debug-code.
+ void AssertUndefinedOrAllocationSite(Register object, Register scratch);
+
// Abort execution if reg is not the root value with the given index,
// enabled via --debug-code.
void AssertIsRoot(Register reg, Heap::RootListIndex index);
@@ -1390,16 +1369,41 @@ class MacroAssembler: public Assembler {
void NumberOfOwnDescriptors(Register dst, Register map);
template<typename Field>
+ void DecodeField(Register dst, Register src) {
+ Ubfx(dst, src, Field::kShift, Field::kSize);
+ }
+
+ template<typename Field>
void DecodeField(Register reg) {
+ DecodeField<Field>(reg, reg);
+ }
+
+ template<typename Field>
+ void DecodeFieldToSmi(Register dst, Register src) {
static const int shift = Field::kShift;
- static const int mask = (Field::kMask >> shift) << kSmiTagSize;
- mov(reg, Operand(reg, LSR, shift));
- and_(reg, reg, Operand(mask));
+ static const int mask = Field::kMask >> shift << kSmiTagSize;
+ STATIC_ASSERT((mask & (0x80000000u >> (kSmiTagSize - 1))) == 0);
+ STATIC_ASSERT(kSmiTag == 0);
+ if (shift < kSmiTagSize) {
+ mov(dst, Operand(src, LSL, kSmiTagSize - shift));
+ and_(dst, dst, Operand(mask));
+ } else if (shift > kSmiTagSize) {
+ mov(dst, Operand(src, LSR, shift - kSmiTagSize));
+ and_(dst, dst, Operand(mask));
+ } else {
+ and_(dst, src, Operand(mask));
+ }
+ }
+
+ template<typename Field>
+ void DecodeFieldToSmi(Register reg) {
+ DecodeField<Field>(reg, reg);
}
// Activation support.
- void EnterFrame(StackFrame::Type type);
- void LeaveFrame(StackFrame::Type type);
+ void EnterFrame(StackFrame::Type type, bool load_constant_pool = false);
+ // Returns the pc offset at which the frame ends.
+ int LeaveFrame(StackFrame::Type type);
// Expects object in r0 and returns map with validated enum cache
// in r0. Assumes that any other register can be used as a scratch.
@@ -1444,8 +1448,7 @@ class MacroAssembler: public Assembler {
Label* done,
bool* definitely_mismatches,
InvokeFlag flag,
- const CallWrapper& call_wrapper,
- CallKind call_kind);
+ const CallWrapper& call_wrapper);
void InitializeNewString(Register string,
Register length,
@@ -1475,6 +1478,9 @@ class MacroAssembler: public Assembler {
MemOperand SafepointRegisterSlot(Register reg);
MemOperand SafepointRegistersAndDoublesSlot(Register reg);
+ // Loads the constant pool pointer (pp) register.
+ void LoadConstantPoolPointerRegister();
+
bool generating_stub_;
bool has_frame_;
// This handle will be patched with the code object on installation.
@@ -1524,6 +1530,71 @@ class CodePatcher {
};
+class FrameAndConstantPoolScope {
+ public:
+ FrameAndConstantPoolScope(MacroAssembler* masm, StackFrame::Type type)
+ : masm_(masm),
+ type_(type),
+ old_has_frame_(masm->has_frame()),
+ old_constant_pool_available_(masm->is_constant_pool_available()) {
+ // We only want to enable constant pool access for non-manual frame scopes
+ // to ensure the constant pool pointer is valid throughout the scope.
+ ASSERT(type_ != StackFrame::MANUAL && type_ != StackFrame::NONE);
+ masm->set_has_frame(true);
+ masm->set_constant_pool_available(true);
+ masm->EnterFrame(type, !old_constant_pool_available_);
+ }
+
+ ~FrameAndConstantPoolScope() {
+ masm_->LeaveFrame(type_);
+ masm_->set_has_frame(old_has_frame_);
+ masm_->set_constant_pool_available(old_constant_pool_available_);
+ }
+
+ // Normally we generate the leave-frame code when this object goes
+ // out of scope. Sometimes we may need to generate the code somewhere else
+ // in addition. Calling this will achieve that, but the object stays in
+ // scope, the MacroAssembler is still marked as being in a frame scope, and
+ // the code will be generated again when it goes out of scope.
+ void GenerateLeaveFrame() {
+ ASSERT(type_ != StackFrame::MANUAL && type_ != StackFrame::NONE);
+ masm_->LeaveFrame(type_);
+ }
+
+ private:
+ MacroAssembler* masm_;
+ StackFrame::Type type_;
+ bool old_has_frame_;
+ bool old_constant_pool_available_;
+
+ DISALLOW_IMPLICIT_CONSTRUCTORS(FrameAndConstantPoolScope);
+};
+
+
+// Class for scoping the the unavailability of constant pool access.
+class ConstantPoolUnavailableScope {
+ public:
+ explicit ConstantPoolUnavailableScope(MacroAssembler* masm)
+ : masm_(masm),
+ old_constant_pool_available_(masm->is_constant_pool_available()) {
+ if (FLAG_enable_ool_constant_pool) {
+ masm_->set_constant_pool_available(false);
+ }
+ }
+ ~ConstantPoolUnavailableScope() {
+ if (FLAG_enable_ool_constant_pool) {
+ masm_->set_constant_pool_available(old_constant_pool_available_);
+ }
+ }
+
+ private:
+ MacroAssembler* masm_;
+ int old_constant_pool_available_;
+
+ DISALLOW_IMPLICIT_CONSTRUCTORS(ConstantPoolUnavailableScope);
+};
+
+
// -----------------------------------------------------------------------------
// Static helper functions.
diff --git a/chromium/v8/src/arm/regexp-macro-assembler-arm.cc b/chromium/v8/src/arm/regexp-macro-assembler-arm.cc
index cbc34e10b95..e494305ac01 100644
--- a/chromium/v8/src/arm/regexp-macro-assembler-arm.cc
+++ b/chromium/v8/src/arm/regexp-macro-assembler-arm.cc
@@ -1,42 +1,19 @@
// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
#if V8_TARGET_ARCH_ARM
-#include "cpu-profiler.h"
-#include "unicode.h"
-#include "log.h"
-#include "code-stubs.h"
-#include "regexp-stack.h"
-#include "macro-assembler.h"
-#include "regexp-macro-assembler.h"
-#include "arm/regexp-macro-assembler-arm.h"
+#include "src/cpu-profiler.h"
+#include "src/unicode.h"
+#include "src/log.h"
+#include "src/code-stubs.h"
+#include "src/regexp-stack.h"
+#include "src/macro-assembler.h"
+#include "src/regexp-macro-assembler.h"
+#include "src/arm/regexp-macro-assembler-arm.h"
namespace v8 {
namespace internal {
@@ -1043,7 +1020,7 @@ void RegExpMacroAssemblerARM::CallCheckStackGuardState(Register scratch) {
ExternalReference stack_guard_check =
ExternalReference::re_check_stack_guard_state(isolate());
__ mov(ip, Operand(stack_guard_check));
- DirectCEntryStub stub;
+ DirectCEntryStub stub(isolate());
stub.GenerateCall(masm_, ip);
// Drop the return address from the stack.
@@ -1067,7 +1044,8 @@ int RegExpMacroAssemblerARM::CheckStackGuardState(Address* return_address,
Code* re_code,
Address re_frame) {
Isolate* isolate = frame_entry<Isolate*>(re_frame, kIsolate);
- if (isolate->stack_guard()->IsStackOverflow()) {
+ StackLimitCheck check(isolate);
+ if (check.JsHasOverflowed()) {
isolate->StackOverflow();
return EXCEPTION;
}
@@ -1094,7 +1072,7 @@ int RegExpMacroAssemblerARM::CheckStackGuardState(Address* return_address,
ASSERT(*return_address <=
re_code->instruction_start() + re_code->instruction_size());
- MaybeObject* result = Execution::HandleStackGuardInterrupt(isolate);
+ Object* result = isolate->stack_guard()->HandleInterrupts();
if (*code_handle != re_code) { // Return address no longer valid
int delta = code_handle->address() - re_code->address();
diff --git a/chromium/v8/src/arm/regexp-macro-assembler-arm.h b/chromium/v8/src/arm/regexp-macro-assembler-arm.h
index 8d9d515c76b..fef8413411f 100644
--- a/chromium/v8/src/arm/regexp-macro-assembler-arm.h
+++ b/chromium/v8/src/arm/regexp-macro-assembler-arm.h
@@ -1,36 +1,13 @@
// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
#ifndef V8_ARM_REGEXP_MACRO_ASSEMBLER_ARM_H_
#define V8_ARM_REGEXP_MACRO_ASSEMBLER_ARM_H_
-#include "arm/assembler-arm.h"
-#include "arm/assembler-arm-inl.h"
-#include "macro-assembler.h"
+#include "src/arm/assembler-arm.h"
+#include "src/arm/assembler-arm-inl.h"
+#include "src/macro-assembler.h"
namespace v8 {
namespace internal {
diff --git a/chromium/v8/src/arm/simulator-arm.cc b/chromium/v8/src/arm/simulator-arm.cc
index 461d032b99f..1ef4a9ce4a1 100644
--- a/chromium/v8/src/arm/simulator-arm.cc
+++ b/chromium/v8/src/arm/simulator-arm.cc
@@ -1,42 +1,20 @@
// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+#include <stdarg.h>
#include <stdlib.h>
#include <cmath>
-#include <cstdarg>
-#include "v8.h"
+
+#include "src/v8.h"
#if V8_TARGET_ARCH_ARM
-#include "disasm.h"
-#include "assembler.h"
-#include "codegen.h"
-#include "arm/constants-arm.h"
-#include "arm/simulator-arm.h"
+#include "src/disasm.h"
+#include "src/assembler.h"
+#include "src/codegen.h"
+#include "src/arm/constants-arm.h"
+#include "src/arm/simulator-arm.h"
#if defined(USE_SIMULATOR)
@@ -716,12 +694,12 @@ void Simulator::CheckICache(v8::internal::HashMap* i_cache,
char* cached_line = cache_page->CachedData(offset & ~CachePage::kLineMask);
if (cache_hit) {
// Check that the data in memory matches the contents of the I-cache.
- CHECK(memcmp(reinterpret_cast<void*>(instr),
- cache_page->CachedData(offset),
- Instruction::kInstrSize) == 0);
+ CHECK_EQ(0,
+ memcmp(reinterpret_cast<void*>(instr),
+ cache_page->CachedData(offset), Instruction::kInstrSize));
} else {
// Cache miss. Load memory into the cache.
- OS::MemCopy(cached_line, line, CachePage::kLineLength);
+ memcpy(cached_line, line, CachePage::kLineLength);
*cache_valid_byte = CachePage::LINE_VALID;
}
}
@@ -772,8 +750,8 @@ Simulator::Simulator(Isolate* isolate) : isolate_(isolate) {
z_flag_FPSCR_ = false;
c_flag_FPSCR_ = false;
v_flag_FPSCR_ = false;
- FPSCR_rounding_mode_ = RZ;
- FPSCR_default_NaN_mode_ = true;
+ FPSCR_rounding_mode_ = RN;
+ FPSCR_default_NaN_mode_ = false;
inv_op_vfp_flag_ = false;
div_zero_vfp_flag_ = false;
@@ -795,6 +773,10 @@ Simulator::Simulator(Isolate* isolate) : isolate_(isolate) {
}
+Simulator::~Simulator() {
+}
+
+
// When the generated code calls an external reference we need to catch that in
// the simulator. The external reference will be a function compiled for the
// host architecture. We need to call that function instead of trying to
@@ -845,6 +827,12 @@ class Redirection {
return reinterpret_cast<Redirection*>(addr_of_redirection);
}
+ static void* ReverseRedirection(int32_t reg) {
+ Redirection* redirection = FromSwiInstruction(
+ reinterpret_cast<Instruction*>(reinterpret_cast<void*>(reg)));
+ return redirection->external_function();
+ }
+
private:
void* external_function_;
uint32_t swi_instruction_;
@@ -906,8 +894,8 @@ double Simulator::get_double_from_register_pair(int reg) {
// Read the bits from the unsigned integer register_[] array
// into the double precision floating point value and return it.
char buffer[2 * sizeof(vfp_registers_[0])];
- OS::MemCopy(buffer, &registers_[reg], 2 * sizeof(registers_[0]));
- OS::MemCopy(&dm_val, buffer, 2 * sizeof(registers_[0]));
+ memcpy(buffer, &registers_[reg], 2 * sizeof(registers_[0]));
+ memcpy(&dm_val, buffer, 2 * sizeof(registers_[0]));
return(dm_val);
}
@@ -1011,9 +999,9 @@ void Simulator::SetVFPRegister(int reg_index, const InputType& value) {
if (register_size == 2) ASSERT(reg_index < DwVfpRegister::NumRegisters());
char buffer[register_size * sizeof(vfp_registers_[0])];
- OS::MemCopy(buffer, &value, register_size * sizeof(vfp_registers_[0]));
- OS::MemCopy(&vfp_registers_[reg_index * register_size], buffer,
- register_size * sizeof(vfp_registers_[0]));
+ memcpy(buffer, &value, register_size * sizeof(vfp_registers_[0]));
+ memcpy(&vfp_registers_[reg_index * register_size], buffer,
+ register_size * sizeof(vfp_registers_[0]));
}
@@ -1025,9 +1013,9 @@ ReturnType Simulator::GetFromVFPRegister(int reg_index) {
ReturnType value = 0;
char buffer[register_size * sizeof(vfp_registers_[0])];
- OS::MemCopy(buffer, &vfp_registers_[register_size * reg_index],
- register_size * sizeof(vfp_registers_[0]));
- OS::MemCopy(&value, buffer, register_size * sizeof(vfp_registers_[0]));
+ memcpy(buffer, &vfp_registers_[register_size * reg_index],
+ register_size * sizeof(vfp_registers_[0]));
+ memcpy(&value, buffer, register_size * sizeof(vfp_registers_[0]));
return value;
}
@@ -1056,14 +1044,14 @@ void Simulator::GetFpArgs(double* x, double* y, int32_t* z) {
void Simulator::SetFpResult(const double& result) {
if (use_eabi_hardfloat()) {
char buffer[2 * sizeof(vfp_registers_[0])];
- OS::MemCopy(buffer, &result, sizeof(buffer));
+ memcpy(buffer, &result, sizeof(buffer));
// Copy result to d0.
- OS::MemCopy(vfp_registers_, buffer, sizeof(buffer));
+ memcpy(vfp_registers_, buffer, sizeof(buffer));
} else {
char buffer[2 * sizeof(registers_[0])];
- OS::MemCopy(buffer, &result, sizeof(buffer));
+ memcpy(buffer, &result, sizeof(buffer));
// Copy result to r0 and r1.
- OS::MemCopy(registers_, buffer, sizeof(buffer));
+ memcpy(registers_, buffer, sizeof(buffer));
}
}
@@ -1647,12 +1635,12 @@ void Simulator::HandleVList(Instruction* instr) {
ReadW(reinterpret_cast<int32_t>(address + 1), instr)
};
double d;
- OS::MemCopy(&d, data, 8);
+ memcpy(&d, data, 8);
set_d_register_from_double(reg, d);
} else {
int32_t data[2];
double d = get_double_from_d_register(reg);
- OS::MemCopy(data, &d, 8);
+ memcpy(data, &d, 8);
WriteW(reinterpret_cast<int32_t>(address), data[0], instr);
WriteW(reinterpret_cast<int32_t>(address + 1), data[1], instr);
}
@@ -1688,12 +1676,12 @@ typedef double (*SimulatorRuntimeFPIntCall)(double darg0, int32_t arg0);
// This signature supports direct call in to API function native callback
// (refer to InvocationCallback in v8.h).
typedef void (*SimulatorRuntimeDirectApiCall)(int32_t arg0);
-typedef void (*SimulatorRuntimeProfilingApiCall)(int32_t arg0, int32_t arg1);
+typedef void (*SimulatorRuntimeProfilingApiCall)(int32_t arg0, void* arg1);
// This signature supports direct call to accessor getter callback.
typedef void (*SimulatorRuntimeDirectGetterCall)(int32_t arg0, int32_t arg1);
typedef void (*SimulatorRuntimeProfilingGetterCall)(
- int32_t arg0, int32_t arg1, int32_t arg2);
+ int32_t arg0, int32_t arg1, void* arg2);
// Software interrupt instructions are used by the simulator to call into the
// C-based V8 runtime.
@@ -1832,7 +1820,7 @@ void Simulator::SoftwareInterrupt(Instruction* instr) {
CHECK(stack_aligned);
SimulatorRuntimeProfilingApiCall target =
reinterpret_cast<SimulatorRuntimeProfilingApiCall>(external);
- target(arg0, arg1);
+ target(arg0, Redirection::ReverseRedirection(arg1));
} else if (
redirection->type() == ExternalReference::DIRECT_GETTER_CALL) {
if (::v8::internal::FLAG_trace_sim || !stack_aligned) {
@@ -1861,7 +1849,7 @@ void Simulator::SoftwareInterrupt(Instruction* instr) {
SimulatorRuntimeProfilingGetterCall target =
reinterpret_cast<SimulatorRuntimeProfilingGetterCall>(
external);
- target(arg0, arg1, arg2);
+ target(arg0, arg1, Redirection::ReverseRedirection(arg2));
} else {
// builtin call.
ASSERT(redirection->type() == ExternalReference::BUILTIN_CALL);
@@ -2733,7 +2721,11 @@ void Simulator::DecodeType3(Instruction* instr) {
int32_t rs_val = get_register(rs);
int32_t ret_val = 0;
ASSERT(rs_val != 0);
- ret_val = rm_val/rs_val;
+ if ((rm_val == kMinInt) && (rs_val == -1)) {
+ ret_val = kMinInt;
+ } else {
+ ret_val = rm_val / rs_val;
+ }
set_register(rn, ret_val);
return;
}
@@ -2905,7 +2897,7 @@ void Simulator::DecodeTypeVFP(Instruction* instr) {
} else if ((instr->Opc2Value() == 0x0) && (instr->Opc3Value() == 0x3)) {
// vabs
double dm_value = get_double_from_d_register(vm);
- double dd_value = fabs(dm_value);
+ double dd_value = std::fabs(dm_value);
dd_value = canonicalizeNaN(dd_value);
set_d_register_from_double(vd, dd_value);
} else if ((instr->Opc2Value() == 0x1) && (instr->Opc3Value() == 0x1)) {
@@ -2921,7 +2913,7 @@ void Simulator::DecodeTypeVFP(Instruction* instr) {
} else if ((instr->Opc2Value() == 0xA) && (instr->Opc3Value() == 0x3) &&
(instr->Bit(8) == 1)) {
// vcvt.f64.s32 Dd, Dd, #<fbits>
- int fraction_bits = 32 - ((instr->Bit(5) << 4) | instr->Bits(3, 0));
+ int fraction_bits = 32 - ((instr->Bits(3, 0) << 1) | instr->Bit(5));
int fixed_value = get_sinteger_from_s_register(vd * 2);
double divide = 1 << fraction_bits;
set_d_register_from_double(vd, fixed_value / divide);
@@ -2934,7 +2926,7 @@ void Simulator::DecodeTypeVFP(Instruction* instr) {
} else if (((instr->Opc2Value() == 0x1)) && (instr->Opc3Value() == 0x3)) {
// vsqrt
double dm_value = get_double_from_d_register(vm);
- double dd_value = sqrt(dm_value);
+ double dd_value = std::sqrt(dm_value);
dd_value = canonicalizeNaN(dd_value);
set_d_register_from_double(vd, dd_value);
} else if (instr->Opc3Value() == 0x0) {
@@ -3028,9 +3020,9 @@ void Simulator::DecodeTypeVFP(Instruction* instr) {
int vd = instr->Bits(19, 16) | (instr->Bit(7) << 4);
double dd_value = get_double_from_d_register(vd);
int32_t data[2];
- OS::MemCopy(data, &dd_value, 8);
+ memcpy(data, &dd_value, 8);
data[instr->Bit(21)] = get_register(instr->RtValue());
- OS::MemCopy(&dd_value, data, 8);
+ memcpy(&dd_value, data, 8);
set_d_register_from_double(vd, dd_value);
} else if ((instr->VLValue() == 0x1) &&
(instr->VCValue() == 0x1) &&
@@ -3039,7 +3031,7 @@ void Simulator::DecodeTypeVFP(Instruction* instr) {
int vn = instr->Bits(19, 16) | (instr->Bit(7) << 4);
double dn_value = get_double_from_d_register(vn);
int32_t data[2];
- OS::MemCopy(data, &dn_value, 8);
+ memcpy(data, &dn_value, 8);
set_register(instr->RtValue(), data[instr->Bit(21)]);
} else if ((instr->VLValue() == 0x1) &&
(instr->VCValue() == 0x0) &&
@@ -3270,8 +3262,8 @@ void Simulator::DecodeVCVTBetweenFloatingPointAndInteger(Instruction* instr) {
inv_op_vfp_flag_ = get_inv_op_vfp_flag(mode, val, unsigned_integer);
double abs_diff =
- unsigned_integer ? fabs(val - static_cast<uint32_t>(temp))
- : fabs(val - temp);
+ unsigned_integer ? std::fabs(val - static_cast<uint32_t>(temp))
+ : std::fabs(val - temp);
inexact_vfp_flag_ = (abs_diff != 0);
@@ -3390,13 +3382,13 @@ void Simulator::DecodeType6CoprocessorIns(Instruction* instr) {
if (instr->HasL()) {
int32_t data[2];
double d = get_double_from_d_register(vm);
- OS::MemCopy(data, &d, 8);
+ memcpy(data, &d, 8);
set_register(rt, data[0]);
set_register(rn, data[1]);
} else {
int32_t data[] = { get_register(rt), get_register(rn) };
double d;
- OS::MemCopy(&d, data, 8);
+ memcpy(&d, data, 8);
set_d_register_from_double(vm, d);
}
}
@@ -3419,13 +3411,13 @@ void Simulator::DecodeType6CoprocessorIns(Instruction* instr) {
ReadW(address + 4, instr)
};
double val;
- OS::MemCopy(&val, data, 8);
+ memcpy(&val, data, 8);
set_d_register_from_double(vd, val);
} else {
// Store double to memory: vstr.
int32_t data[2];
double val = get_double_from_d_register(vd);
- OS::MemCopy(data, &val, 8);
+ memcpy(data, &val, 8);
WriteW(address, data[0], instr);
WriteW(address + 4, data[1], instr);
}
@@ -3455,7 +3447,8 @@ void Simulator::DecodeSpecialCondition(Instruction* instr) {
if ((instr->Bits(18, 16) == 0) && (instr->Bits(11, 6) == 0x28) &&
(instr->Bit(4) == 1)) {
// vmovl signed
- int Vd = (instr->Bit(22) << 4) | instr->VdValue();
+ if ((instr->VdValue() & 1) != 0) UNIMPLEMENTED();
+ int Vd = (instr->Bit(22) << 3) | (instr->VdValue() >> 1);
int Vm = (instr->Bit(5) << 4) | instr->VmValue();
int imm3 = instr->Bits(21, 19);
if ((imm3 != 1) && (imm3 != 2) && (imm3 != 4)) UNIMPLEMENTED();
@@ -3478,7 +3471,8 @@ void Simulator::DecodeSpecialCondition(Instruction* instr) {
if ((instr->Bits(18, 16) == 0) && (instr->Bits(11, 6) == 0x28) &&
(instr->Bit(4) == 1)) {
// vmovl unsigned
- int Vd = (instr->Bit(22) << 4) | instr->VdValue();
+ if ((instr->VdValue() & 1) != 0) UNIMPLEMENTED();
+ int Vd = (instr->Bit(22) << 3) | (instr->VdValue() >> 1);
int Vm = (instr->Bit(5) << 4) | instr->VmValue();
int imm3 = instr->Bits(21, 19);
if ((imm3 != 1) && (imm3 != 2) && (imm3 != 4)) UNIMPLEMENTED();
diff --git a/chromium/v8/src/arm/simulator-arm.h b/chromium/v8/src/arm/simulator-arm.h
index 0af5162e938..9a2f192b7d8 100644
--- a/chromium/v8/src/arm/simulator-arm.h
+++ b/chromium/v8/src/arm/simulator-arm.h
@@ -1,29 +1,6 @@
// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
// Declares a Simulator for ARM instructions if we are not generating a native
@@ -36,7 +13,7 @@
#ifndef V8_ARM_SIMULATOR_ARM_H_
#define V8_ARM_SIMULATOR_ARM_H_
-#include "allocation.h"
+#include "src/allocation.h"
#if !defined(USE_SIMULATOR)
// Running without a simulator on a native arm platform.
@@ -60,9 +37,6 @@ typedef int (*arm_regexp_matcher)(String*, int, const byte*, const byte*,
(FUNCTION_CAST<arm_regexp_matcher>(entry)( \
p0, p1, p2, p3, NULL, p4, p5, p6, p7, p8))
-#define TRY_CATCH_FROM_ADDRESS(try_catch_address) \
- reinterpret_cast<TryCatch*>(try_catch_address)
-
// The stack limit beyond which we will throw stack overflow errors in
// generated code. Because generated code on arm uses the C stack, we
// just use the C stack limit.
@@ -86,9 +60,9 @@ class SimulatorStack : public v8::internal::AllStatic {
#else // !defined(USE_SIMULATOR)
// Running with a simulator.
-#include "constants-arm.h"
-#include "hashmap.h"
-#include "assembler.h"
+#include "src/arm/constants-arm.h"
+#include "src/hashmap.h"
+#include "src/assembler.h"
namespace v8 {
namespace internal {
@@ -207,6 +181,10 @@ class Simulator {
void set_pc(int32_t value);
int32_t get_pc() const;
+ Address get_sp() {
+ return reinterpret_cast<Address>(static_cast<intptr_t>(get_register(sp)));
+ }
+
// Accessor to the internal simulator stack area.
uintptr_t StackLimit() const;
@@ -284,7 +262,7 @@ class Simulator {
inline int GetCarry() {
return c_flag_ ? 1 : 0;
- };
+ }
// Support for VFP.
void Compute_FPSCR_Flags(double val1, double val2);
@@ -455,10 +433,6 @@ class Simulator {
Simulator::current(Isolate::Current())->Call( \
entry, 10, p0, p1, p2, p3, NULL, p4, p5, p6, p7, p8)
-#define TRY_CATCH_FROM_ADDRESS(try_catch_address) \
- try_catch_address == NULL ? \
- NULL : *(reinterpret_cast<TryCatch**>(try_catch_address))
-
// The simulator has its own stack. Thus it has a different stack limit from
// the C-based native code. Setting the c_limit to indicate a very small
diff --git a/chromium/v8/src/arm/stub-cache-arm.cc b/chromium/v8/src/arm/stub-cache-arm.cc
index 4ca5e27ed71..edc69530653 100644
--- a/chromium/v8/src/arm/stub-cache-arm.cc
+++ b/chromium/v8/src/arm/stub-cache-arm.cc
@@ -1,37 +1,14 @@
// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
#if V8_TARGET_ARCH_ARM
-#include "ic-inl.h"
-#include "codegen.h"
-#include "stub-cache.h"
+#include "src/ic-inl.h"
+#include "src/codegen.h"
+#include "src/stub-cache.h"
namespace v8 {
namespace internal {
@@ -295,15 +272,20 @@ void StubCompiler::GenerateDirectLoadGlobalFunctionPrototype(
Register prototype,
Label* miss) {
Isolate* isolate = masm->isolate();
- // Check we're still in the same context.
- __ ldr(prototype,
- MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
- __ Move(ip, isolate->global_object());
- __ cmp(prototype, ip);
- __ b(ne, miss);
// Get the global function with the given index.
Handle<JSFunction> function(
JSFunction::cast(isolate->native_context()->get(index)));
+
+ // Check we're still in the same context.
+ Register scratch = prototype;
+ const int offset = Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX);
+ __ ldr(scratch, MemOperand(cp, offset));
+ __ ldr(scratch, FieldMemOperand(scratch, GlobalObject::kNativeContextOffset));
+ __ ldr(scratch, MemOperand(scratch, Context::SlotOffset(index)));
+ __ Move(ip, function);
+ __ cmp(ip, scratch);
+ __ b(ne, miss);
+
// Load its initial map. The global functions all have initial maps.
__ Move(prototype, Handle<Map>(function->initial_map()));
// Load the prototype from the initial map.
@@ -317,7 +299,7 @@ void StubCompiler::GenerateFastPropertyLoad(MacroAssembler* masm,
bool inobject,
int index,
Representation representation) {
- ASSERT(!FLAG_track_double_fields || !representation.IsDouble());
+ ASSERT(!representation.IsDouble());
int offset = index * kPointerSize;
if (!inobject) {
// Calculate the offset into the properties array.
@@ -346,60 +328,6 @@ void StubCompiler::GenerateLoadArrayLength(MacroAssembler* masm,
}
-// Generate code to check if an object is a string. If the object is a
-// heap object, its map's instance type is left in the scratch1 register.
-// If this is not needed, scratch1 and scratch2 may be the same register.
-static void GenerateStringCheck(MacroAssembler* masm,
- Register receiver,
- Register scratch1,
- Register scratch2,
- Label* smi,
- Label* non_string_object) {
- // Check that the receiver isn't a smi.
- __ JumpIfSmi(receiver, smi);
-
- // Check that the object is a string.
- __ ldr(scratch1, FieldMemOperand(receiver, HeapObject::kMapOffset));
- __ ldrb(scratch1, FieldMemOperand(scratch1, Map::kInstanceTypeOffset));
- __ and_(scratch2, scratch1, Operand(kIsNotStringMask));
- // The cast is to resolve the overload for the argument of 0x0.
- __ cmp(scratch2, Operand(static_cast<int32_t>(kStringTag)));
- __ b(ne, non_string_object);
-}
-
-
-// Generate code to load the length from a string object and return the length.
-// If the receiver object is not a string or a wrapped string object the
-// execution continues at the miss label. The register containing the
-// receiver is potentially clobbered.
-void StubCompiler::GenerateLoadStringLength(MacroAssembler* masm,
- Register receiver,
- Register scratch1,
- Register scratch2,
- Label* miss) {
- Label check_wrapper;
-
- // Check if the object is a string leaving the instance type in the
- // scratch1 register.
- GenerateStringCheck(masm, receiver, scratch1, scratch2, miss, &check_wrapper);
-
- // Load length directly from the string.
- __ ldr(r0, FieldMemOperand(receiver, String::kLengthOffset));
- __ Ret();
-
- // Check if the object is a JSValue wrapper.
- __ bind(&check_wrapper);
- __ cmp(scratch1, Operand(JS_VALUE_TYPE));
- __ b(ne, miss);
-
- // Unwrap the value and check if the wrapped value is a string.
- __ ldr(scratch1, FieldMemOperand(receiver, JSValue::kValueOffset));
- GenerateStringCheck(masm, scratch1, scratch2, scratch2, miss, miss);
- __ ldr(r0, FieldMemOperand(scratch1, String::kLengthOffset));
- __ Ret();
-}
-
-
void StubCompiler::GenerateLoadFunctionPrototype(MacroAssembler* masm,
Register receiver,
Register scratch1,
@@ -476,11 +404,27 @@ void StoreStubCompiler::GenerateStoreTransition(MacroAssembler* masm,
__ Move(scratch1, constant);
__ cmp(value_reg, scratch1);
__ b(ne, miss_label);
- } else if (FLAG_track_fields && representation.IsSmi()) {
+ } else if (representation.IsSmi()) {
__ JumpIfNotSmi(value_reg, miss_label);
- } else if (FLAG_track_heap_object_fields && representation.IsHeapObject()) {
+ } else if (representation.IsHeapObject()) {
__ JumpIfSmi(value_reg, miss_label);
- } else if (FLAG_track_double_fields && representation.IsDouble()) {
+ HeapType* field_type = descriptors->GetFieldType(descriptor);
+ HeapType::Iterator<Map> it = field_type->Classes();
+ if (!it.Done()) {
+ __ ldr(scratch1, FieldMemOperand(value_reg, HeapObject::kMapOffset));
+ Label do_store;
+ while (true) {
+ __ CompareMap(scratch1, it.Current(), &do_store);
+ it.Advance();
+ if (it.Done()) {
+ __ b(ne, miss_label);
+ break;
+ }
+ __ b(eq, &do_store);
+ }
+ __ bind(&do_store);
+ }
+ } else if (representation.IsDouble()) {
Label do_store, heap_number;
__ LoadRoot(scratch3, Heap::kHeapNumberMapRootIndex);
__ AllocateHeapNumber(storage_reg, scratch1, scratch2, scratch3, slow);
@@ -554,15 +498,15 @@ void StoreStubCompiler::GenerateStoreTransition(MacroAssembler* masm,
if (index < 0) {
// Set the property straight into the object.
int offset = object->map()->instance_size() + (index * kPointerSize);
- if (FLAG_track_double_fields && representation.IsDouble()) {
+ if (representation.IsDouble()) {
__ str(storage_reg, FieldMemOperand(receiver_reg, offset));
} else {
__ str(value_reg, FieldMemOperand(receiver_reg, offset));
}
- if (!FLAG_track_fields || !representation.IsSmi()) {
+ if (!representation.IsSmi()) {
// Update the write barrier for the array address.
- if (!FLAG_track_double_fields || !representation.IsDouble()) {
+ if (!representation.IsDouble()) {
__ mov(storage_reg, value_reg);
}
__ RecordWriteField(receiver_reg,
@@ -580,15 +524,15 @@ void StoreStubCompiler::GenerateStoreTransition(MacroAssembler* masm,
// Get the properties array
__ ldr(scratch1,
FieldMemOperand(receiver_reg, JSObject::kPropertiesOffset));
- if (FLAG_track_double_fields && representation.IsDouble()) {
+ if (representation.IsDouble()) {
__ str(storage_reg, FieldMemOperand(scratch1, offset));
} else {
__ str(value_reg, FieldMemOperand(scratch1, offset));
}
- if (!FLAG_track_fields || !representation.IsSmi()) {
+ if (!representation.IsSmi()) {
// Update the write barrier for the array address.
- if (!FLAG_track_double_fields || !representation.IsDouble()) {
+ if (!representation.IsDouble()) {
__ mov(storage_reg, value_reg);
}
__ RecordWriteField(scratch1,
@@ -629,29 +573,38 @@ void StoreStubCompiler::GenerateStoreField(MacroAssembler* masm,
// checks.
ASSERT(object->IsJSGlobalProxy() || !object->IsAccessCheckNeeded());
- int index = lookup->GetFieldIndex().field_index();
-
- // Adjust for the number of properties stored in the object. Even in the
- // face of a transition we can use the old map here because the size of the
- // object and the number of in-object properties is not going to change.
- index -= object->map()->inobject_properties();
+ FieldIndex index = lookup->GetFieldIndex();
Representation representation = lookup->representation();
ASSERT(!representation.IsNone());
- if (FLAG_track_fields && representation.IsSmi()) {
+ if (representation.IsSmi()) {
__ JumpIfNotSmi(value_reg, miss_label);
- } else if (FLAG_track_heap_object_fields && representation.IsHeapObject()) {
+ } else if (representation.IsHeapObject()) {
__ JumpIfSmi(value_reg, miss_label);
- } else if (FLAG_track_double_fields && representation.IsDouble()) {
+ HeapType* field_type = lookup->GetFieldType();
+ HeapType::Iterator<Map> it = field_type->Classes();
+ if (!it.Done()) {
+ __ ldr(scratch1, FieldMemOperand(value_reg, HeapObject::kMapOffset));
+ Label do_store;
+ while (true) {
+ __ CompareMap(scratch1, it.Current(), &do_store);
+ it.Advance();
+ if (it.Done()) {
+ __ b(ne, miss_label);
+ break;
+ }
+ __ b(eq, &do_store);
+ }
+ __ bind(&do_store);
+ }
+ } else if (representation.IsDouble()) {
// Load the double storage.
- if (index < 0) {
- int offset = object->map()->instance_size() + (index * kPointerSize);
- __ ldr(scratch1, FieldMemOperand(receiver_reg, offset));
+ if (index.is_inobject()) {
+ __ ldr(scratch1, FieldMemOperand(receiver_reg, index.offset()));
} else {
__ ldr(scratch1,
FieldMemOperand(receiver_reg, JSObject::kPropertiesOffset));
- int offset = index * kPointerSize + FixedArray::kHeaderSize;
- __ ldr(scratch1, FieldMemOperand(scratch1, offset));
+ __ ldr(scratch1, FieldMemOperand(scratch1, index.offset()));
}
// Store the value into the storage.
@@ -678,12 +631,11 @@ void StoreStubCompiler::GenerateStoreField(MacroAssembler* masm,
// TODO(verwaest): Share this code as a code stub.
SmiCheck smi_check = representation.IsTagged()
? INLINE_SMI_CHECK : OMIT_SMI_CHECK;
- if (index < 0) {
+ if (index.is_inobject()) {
// Set the property straight into the object.
- int offset = object->map()->instance_size() + (index * kPointerSize);
- __ str(value_reg, FieldMemOperand(receiver_reg, offset));
+ __ str(value_reg, FieldMemOperand(receiver_reg, index.offset()));
- if (!FLAG_track_fields || !representation.IsSmi()) {
+ if (!representation.IsSmi()) {
// Skip updating write barrier if storing a smi.
__ JumpIfSmi(value_reg, &exit);
@@ -691,7 +643,7 @@ void StoreStubCompiler::GenerateStoreField(MacroAssembler* masm,
// Pass the now unused name_reg as a scratch register.
__ mov(name_reg, value_reg);
__ RecordWriteField(receiver_reg,
- offset,
+ index.offset(),
name_reg,
scratch1,
kLRHasNotBeenSaved,
@@ -701,13 +653,12 @@ void StoreStubCompiler::GenerateStoreField(MacroAssembler* masm,
}
} else {
// Write to the properties array.
- int offset = index * kPointerSize + FixedArray::kHeaderSize;
// Get the properties array
__ ldr(scratch1,
FieldMemOperand(receiver_reg, JSObject::kPropertiesOffset));
- __ str(value_reg, FieldMemOperand(scratch1, offset));
+ __ str(value_reg, FieldMemOperand(scratch1, index.offset()));
- if (!FLAG_track_fields || !representation.IsSmi()) {
+ if (!representation.IsSmi()) {
// Skip updating write barrier if storing a smi.
__ JumpIfSmi(value_reg, &exit);
@@ -715,7 +666,7 @@ void StoreStubCompiler::GenerateStoreField(MacroAssembler* masm,
// Ok to clobber receiver_reg and name_reg, since we return.
__ mov(name_reg, value_reg);
__ RecordWriteField(scratch1,
- offset,
+ index.offset(),
name_reg,
receiver_reg,
kLRHasNotBeenSaved,
@@ -777,343 +728,84 @@ static void CompileCallLoadPropertyWithInterceptor(
}
-static const int kFastApiCallArguments = FunctionCallbackArguments::kArgsLength;
-
-// Reserves space for the extra arguments to API function in the
-// caller's frame.
-//
-// These arguments are set by CheckPrototypes and GenerateFastApiDirectCall.
-static void ReserveSpaceForFastApiCall(MacroAssembler* masm,
- Register scratch) {
- __ mov(scratch, Operand(Smi::FromInt(0)));
- for (int i = 0; i < kFastApiCallArguments; i++) {
- __ push(scratch);
+// Generate call to api function.
+void StubCompiler::GenerateFastApiCall(MacroAssembler* masm,
+ const CallOptimization& optimization,
+ Handle<Map> receiver_map,
+ Register receiver,
+ Register scratch_in,
+ bool is_store,
+ int argc,
+ Register* values) {
+ ASSERT(!receiver.is(scratch_in));
+ __ push(receiver);
+ // Write the arguments to stack frame.
+ for (int i = 0; i < argc; i++) {
+ Register arg = values[argc-1-i];
+ ASSERT(!receiver.is(arg));
+ ASSERT(!scratch_in.is(arg));
+ __ push(arg);
}
-}
-
-
-// Undoes the effects of ReserveSpaceForFastApiCall.
-static void FreeSpaceForFastApiCall(MacroAssembler* masm) {
- __ Drop(kFastApiCallArguments);
-}
+ ASSERT(optimization.is_simple_api_call());
+ // Abi for CallApiFunctionStub.
+ Register callee = r0;
+ Register call_data = r4;
+ Register holder = r2;
+ Register api_function_address = r1;
+
+ // Put holder in place.
+ CallOptimization::HolderLookup holder_lookup;
+ Handle<JSObject> api_holder = optimization.LookupHolderOfExpectedType(
+ receiver_map,
+ &holder_lookup);
+ switch (holder_lookup) {
+ case CallOptimization::kHolderIsReceiver:
+ __ Move(holder, receiver);
+ break;
+ case CallOptimization::kHolderFound:
+ __ Move(holder, api_holder);
+ break;
+ case CallOptimization::kHolderNotFound:
+ UNREACHABLE();
+ break;
+ }
-static void GenerateFastApiDirectCall(MacroAssembler* masm,
- const CallOptimization& optimization,
- int argc,
- bool restore_context) {
- // ----------- S t a t e -------------
- // -- sp[0] - sp[24] : FunctionCallbackInfo, incl.
- // : holder (set by CheckPrototypes)
- // -- sp[28] : last JS argument
- // -- ...
- // -- sp[(argc + 6) * 4] : first JS argument
- // -- sp[(argc + 7) * 4] : receiver
- // -----------------------------------
- typedef FunctionCallbackArguments FCA;
- // Save calling context.
- __ str(cp, MemOperand(sp, FCA::kContextSaveIndex * kPointerSize));
- // Get the function and setup the context.
+ Isolate* isolate = masm->isolate();
Handle<JSFunction> function = optimization.constant_function();
- __ Move(r5, function);
- __ ldr(cp, FieldMemOperand(r5, JSFunction::kContextOffset));
- __ str(r5, MemOperand(sp, FCA::kCalleeIndex * kPointerSize));
-
- // Construct the FunctionCallbackInfo.
Handle<CallHandlerInfo> api_call_info = optimization.api_call_info();
- Handle<Object> call_data(api_call_info->data(), masm->isolate());
- if (masm->isolate()->heap()->InNewSpace(*call_data)) {
- __ Move(r0, api_call_info);
- __ ldr(r6, FieldMemOperand(r0, CallHandlerInfo::kDataOffset));
+ Handle<Object> call_data_obj(api_call_info->data(), isolate);
+
+ // Put callee in place.
+ __ Move(callee, function);
+
+ bool call_data_undefined = false;
+ // Put call_data in place.
+ if (isolate->heap()->InNewSpace(*call_data_obj)) {
+ __ Move(call_data, api_call_info);
+ __ ldr(call_data, FieldMemOperand(call_data, CallHandlerInfo::kDataOffset));
+ } else if (call_data_obj->IsUndefined()) {
+ call_data_undefined = true;
+ __ LoadRoot(call_data, Heap::kUndefinedValueRootIndex);
} else {
- __ Move(r6, call_data);
+ __ Move(call_data, call_data_obj);
}
- // Store call data.
- __ str(r6, MemOperand(sp, FCA::kDataIndex * kPointerSize));
- // Store isolate.
- __ mov(r5, Operand(ExternalReference::isolate_address(masm->isolate())));
- __ str(r5, MemOperand(sp, FCA::kIsolateIndex * kPointerSize));
- // Store ReturnValue default and ReturnValue.
- __ LoadRoot(r5, Heap::kUndefinedValueRootIndex);
- __ str(r5, MemOperand(sp, FCA::kReturnValueOffset * kPointerSize));
- __ str(r5, MemOperand(sp, FCA::kReturnValueDefaultValueIndex * kPointerSize));
-
- // Prepare arguments.
- __ mov(r2, sp);
-
- // Allocate the v8::Arguments structure in the arguments' space since
- // it's not controlled by GC.
- const int kApiStackSpace = 4;
-
- FrameScope frame_scope(masm, StackFrame::MANUAL);
- __ EnterExitFrame(false, kApiStackSpace);
-
- // r0 = FunctionCallbackInfo&
- // Arguments is after the return address.
- __ add(r0, sp, Operand(1 * kPointerSize));
- // FunctionCallbackInfo::implicit_args_
- __ str(r2, MemOperand(r0, 0 * kPointerSize));
- // FunctionCallbackInfo::values_
- __ add(ip, r2, Operand((kFastApiCallArguments - 1 + argc) * kPointerSize));
- __ str(ip, MemOperand(r0, 1 * kPointerSize));
- // FunctionCallbackInfo::length_ = argc
- __ mov(ip, Operand(argc));
- __ str(ip, MemOperand(r0, 2 * kPointerSize));
- // FunctionCallbackInfo::is_construct_call = 0
- __ mov(ip, Operand::Zero());
- __ str(ip, MemOperand(r0, 3 * kPointerSize));
-
- const int kStackUnwindSpace = argc + kFastApiCallArguments + 1;
+
+ // Put api_function_address in place.
Address function_address = v8::ToCData<Address>(api_call_info->callback());
ApiFunction fun(function_address);
ExternalReference::Type type = ExternalReference::DIRECT_API_CALL;
ExternalReference ref = ExternalReference(&fun,
type,
masm->isolate());
- Address thunk_address = FUNCTION_ADDR(&InvokeFunctionCallback);
- ExternalReference::Type thunk_type = ExternalReference::PROFILING_API_CALL;
- ApiFunction thunk_fun(thunk_address);
- ExternalReference thunk_ref = ExternalReference(&thunk_fun, thunk_type,
- masm->isolate());
-
- AllowExternalCallThatCantCauseGC scope(masm);
- MemOperand context_restore_operand(
- fp, (2 + FCA::kContextSaveIndex) * kPointerSize);
- MemOperand return_value_operand(fp,
- (2 + FCA::kReturnValueOffset) * kPointerSize);
-
- __ CallApiFunctionAndReturn(ref,
- function_address,
- thunk_ref,
- r1,
- kStackUnwindSpace,
- return_value_operand,
- restore_context ?
- &context_restore_operand : NULL);
-}
+ __ mov(api_function_address, Operand(ref));
-
-// Generate call to api function.
-static void GenerateFastApiCall(MacroAssembler* masm,
- const CallOptimization& optimization,
- Register receiver,
- Register scratch,
- int argc,
- Register* values) {
- ASSERT(optimization.is_simple_api_call());
- ASSERT(!receiver.is(scratch));
-
- typedef FunctionCallbackArguments FCA;
- const int stack_space = kFastApiCallArguments + argc + 1;
- // Assign stack space for the call arguments.
- __ sub(sp, sp, Operand(stack_space * kPointerSize));
- // Write holder to stack frame.
- __ str(receiver, MemOperand(sp, FCA::kHolderIndex * kPointerSize));
- // Write receiver to stack frame.
- int index = stack_space - 1;
- __ str(receiver, MemOperand(sp, index * kPointerSize));
- // Write the arguments to stack frame.
- for (int i = 0; i < argc; i++) {
- ASSERT(!receiver.is(values[i]));
- ASSERT(!scratch.is(values[i]));
- __ str(receiver, MemOperand(sp, index-- * kPointerSize));
- }
-
- GenerateFastApiDirectCall(masm, optimization, argc, true);
+ // Jump to stub.
+ CallApiFunctionStub stub(isolate, is_store, call_data_undefined, argc);
+ __ TailCallStub(&stub);
}
-class CallInterceptorCompiler BASE_EMBEDDED {
- public:
- CallInterceptorCompiler(CallStubCompiler* stub_compiler,
- const ParameterCount& arguments,
- Register name,
- ExtraICState extra_ic_state)
- : stub_compiler_(stub_compiler),
- arguments_(arguments),
- name_(name),
- extra_ic_state_(extra_ic_state) {}
-
- void Compile(MacroAssembler* masm,
- Handle<JSObject> object,
- Handle<JSObject> holder,
- Handle<Name> name,
- LookupResult* lookup,
- Register receiver,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Label* miss) {
- ASSERT(holder->HasNamedInterceptor());
- ASSERT(!holder->GetNamedInterceptor()->getter()->IsUndefined());
-
- // Check that the receiver isn't a smi.
- __ JumpIfSmi(receiver, miss);
- CallOptimization optimization(lookup);
- if (optimization.is_constant_call()) {
- CompileCacheable(masm, object, receiver, scratch1, scratch2, scratch3,
- holder, lookup, name, optimization, miss);
- } else {
- CompileRegular(masm, object, receiver, scratch1, scratch2, scratch3,
- name, holder, miss);
- }
- }
-
- private:
- void CompileCacheable(MacroAssembler* masm,
- Handle<JSObject> object,
- Register receiver,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Handle<JSObject> interceptor_holder,
- LookupResult* lookup,
- Handle<Name> name,
- const CallOptimization& optimization,
- Label* miss_label) {
- ASSERT(optimization.is_constant_call());
- ASSERT(!lookup->holder()->IsGlobalObject());
- Counters* counters = masm->isolate()->counters();
- int depth1 = kInvalidProtoDepth;
- int depth2 = kInvalidProtoDepth;
- bool can_do_fast_api_call = false;
- if (optimization.is_simple_api_call() &&
- !lookup->holder()->IsGlobalObject()) {
- depth1 = optimization.GetPrototypeDepthOfExpectedType(
- object, interceptor_holder);
- if (depth1 == kInvalidProtoDepth) {
- depth2 = optimization.GetPrototypeDepthOfExpectedType(
- interceptor_holder, Handle<JSObject>(lookup->holder()));
- }
- can_do_fast_api_call =
- depth1 != kInvalidProtoDepth || depth2 != kInvalidProtoDepth;
- }
-
- __ IncrementCounter(counters->call_const_interceptor(), 1,
- scratch1, scratch2);
-
- if (can_do_fast_api_call) {
- __ IncrementCounter(counters->call_const_interceptor_fast_api(), 1,
- scratch1, scratch2);
- ReserveSpaceForFastApiCall(masm, scratch1);
- }
-
- // Check that the maps from receiver to interceptor's holder
- // haven't changed and thus we can invoke interceptor.
- Label miss_cleanup;
- Label* miss = can_do_fast_api_call ? &miss_cleanup : miss_label;
- Register holder =
- stub_compiler_->CheckPrototypes(
- IC::CurrentTypeOf(object, masm->isolate()), receiver,
- interceptor_holder, scratch1, scratch2, scratch3,
- name, depth1, miss);
-
- // Invoke an interceptor and if it provides a value,
- // branch to |regular_invoke|.
- Label regular_invoke;
- LoadWithInterceptor(masm, receiver, holder, interceptor_holder, scratch2,
- &regular_invoke);
-
- // Interceptor returned nothing for this property. Try to use cached
- // constant function.
-
- // Check that the maps from interceptor's holder to constant function's
- // holder haven't changed and thus we can use cached constant function.
- if (*interceptor_holder != lookup->holder()) {
- stub_compiler_->CheckPrototypes(
- IC::CurrentTypeOf(interceptor_holder, masm->isolate()), holder,
- handle(lookup->holder()), scratch1, scratch2, scratch3,
- name, depth2, miss);
- } else {
- // CheckPrototypes has a side effect of fetching a 'holder'
- // for API (object which is instanceof for the signature). It's
- // safe to omit it here, as if present, it should be fetched
- // by the previous CheckPrototypes.
- ASSERT(depth2 == kInvalidProtoDepth);
- }
-
- // Invoke function.
- if (can_do_fast_api_call) {
- GenerateFastApiDirectCall(
- masm, optimization, arguments_.immediate(), false);
- } else {
- Handle<JSFunction> function = optimization.constant_function();
- __ Move(r0, receiver);
- stub_compiler_->GenerateJumpFunction(object, function);
- }
-
- // Deferred code for fast API call case---clean preallocated space.
- if (can_do_fast_api_call) {
- __ bind(&miss_cleanup);
- FreeSpaceForFastApiCall(masm);
- __ b(miss_label);
- }
-
- // Invoke a regular function.
- __ bind(&regular_invoke);
- if (can_do_fast_api_call) {
- FreeSpaceForFastApiCall(masm);
- }
- }
-
- void CompileRegular(MacroAssembler* masm,
- Handle<JSObject> object,
- Register receiver,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Handle<Name> name,
- Handle<JSObject> interceptor_holder,
- Label* miss_label) {
- Register holder =
- stub_compiler_->CheckPrototypes(
- IC::CurrentTypeOf(object, masm->isolate()), receiver,
- interceptor_holder, scratch1, scratch2, scratch3, name, miss_label);
-
- // Call a runtime function to load the interceptor property.
- FrameScope scope(masm, StackFrame::INTERNAL);
- // Save the name_ register across the call.
- __ push(name_);
-
- CompileCallLoadPropertyWithInterceptor(
- masm, receiver, holder, name_, interceptor_holder,
- IC::kLoadPropertyWithInterceptorForCall);
-
- // Restore the name_ register.
- __ pop(name_);
- // Leave the internal frame.
- }
-
- void LoadWithInterceptor(MacroAssembler* masm,
- Register receiver,
- Register holder,
- Handle<JSObject> holder_obj,
- Register scratch,
- Label* interceptor_succeeded) {
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ Push(receiver);
- __ Push(holder, name_);
- CompileCallLoadPropertyWithInterceptor(
- masm, receiver, holder, name_, holder_obj,
- IC::kLoadPropertyWithInterceptorOnly);
- __ pop(name_);
- __ pop(holder);
- __ pop(receiver);
- }
- // If interceptor returns no-result sentinel, call the constant function.
- __ LoadRoot(scratch, Heap::kNoInterceptorResultSentinelRootIndex);
- __ cmp(r0, scratch);
- __ b(ne, interceptor_succeeded);
- }
-
- CallStubCompiler* stub_compiler_;
- const ParameterCount& arguments_;
- Register name_;
- ExtraICState extra_ic_state_;
-};
-
-
void StubCompiler::GenerateTailCall(MacroAssembler* masm, Handle<Code> code) {
__ Jump(code, RelocInfo::CODE_TARGET);
}
@@ -1123,20 +815,16 @@ void StubCompiler::GenerateTailCall(MacroAssembler* masm, Handle<Code> code) {
#define __ ACCESS_MASM(masm())
-Register StubCompiler::CheckPrototypes(Handle<Type> type,
+Register StubCompiler::CheckPrototypes(Handle<HeapType> type,
Register object_reg,
Handle<JSObject> holder,
Register holder_reg,
Register scratch1,
Register scratch2,
Handle<Name> name,
- int save_at_depth,
Label* miss,
PrototypeCheckType check) {
Handle<Map> receiver_map(IC::TypeToMap(*type, isolate()));
- // Make sure that the type feedback oracle harvests the receiver map.
- // TODO(svenpanne) Remove this hack when all ICs are reworked.
- __ mov(scratch1, Operand(receiver_map));
// Make sure there's no overlap between holder and object registers.
ASSERT(!scratch1.is(object_reg) && !scratch1.is(holder_reg));
@@ -1147,13 +835,10 @@ Register StubCompiler::CheckPrototypes(Handle<Type> type,
Register reg = object_reg;
int depth = 0;
- typedef FunctionCallbackArguments FCA;
- if (save_at_depth == depth) {
- __ str(reg, MemOperand(sp, FCA::kHolderIndex * kPointerSize));
- }
-
Handle<JSObject> current = Handle<JSObject>::null();
- if (type->IsConstant()) current = Handle<JSObject>::cast(type->AsConstant());
+ if (type->IsConstant()) {
+ current = Handle<JSObject>::cast(type->AsConstant()->Value());
+ }
Handle<JSObject> prototype = Handle<JSObject>::null();
Handle<Map> current_map = receiver_map;
Handle<Map> holder_map(holder->map());
@@ -1176,7 +861,7 @@ Register StubCompiler::CheckPrototypes(Handle<Type> type,
name = factory()->InternalizeString(Handle<String>::cast(name));
}
ASSERT(current.is_null() ||
- current->property_dictionary()->FindEntry(*name) ==
+ current->property_dictionary()->FindEntry(name) ==
NameDictionary::kNotFound);
GenerateDictionaryNegativeLookup(masm(), miss, reg, name,
@@ -1217,10 +902,6 @@ Register StubCompiler::CheckPrototypes(Handle<Type> type,
}
}
- if (save_at_depth == depth) {
- __ str(reg, MemOperand(sp, FCA::kHolderIndex * kPointerSize));
- }
-
// Go to the next object in the prototype chain.
current = prototype;
current_map = handle(current->map());
@@ -1269,7 +950,7 @@ void StoreStubCompiler::HandlerFrontendFooter(Handle<Name> name, Label* miss) {
Register LoadStubCompiler::CallbackHandlerFrontend(
- Handle<Type> type,
+ Handle<HeapType> type,
Register object_reg,
Handle<JSObject> holder,
Handle<Name> name,
@@ -1316,19 +997,15 @@ Register LoadStubCompiler::CallbackHandlerFrontend(
void LoadStubCompiler::GenerateLoadField(Register reg,
Handle<JSObject> holder,
- PropertyIndex field,
+ FieldIndex field,
Representation representation) {
if (!reg.is(receiver())) __ mov(receiver(), reg);
if (kind() == Code::LOAD_IC) {
- LoadFieldStub stub(field.is_inobject(holder),
- field.translate(holder),
- representation);
- GenerateTailCall(masm(), stub.GetCode(isolate()));
+ LoadFieldStub stub(isolate(), field);
+ GenerateTailCall(masm(), stub.GetCode());
} else {
- KeyedLoadFieldStub stub(field.is_inobject(holder),
- field.translate(holder),
- representation);
- GenerateTailCall(masm(), stub.GetCode(isolate()));
+ KeyedLoadFieldStub stub(isolate(), field);
+ GenerateTailCall(masm(), stub.GetCode());
}
}
@@ -1341,13 +1018,6 @@ void LoadStubCompiler::GenerateLoadConstant(Handle<Object> value) {
void LoadStubCompiler::GenerateLoadCallback(
- const CallOptimization& call_optimization) {
- GenerateFastApiCall(
- masm(), call_optimization, receiver(), scratch3(), 0, NULL);
-}
-
-
-void LoadStubCompiler::GenerateLoadCallback(
Register reg,
Handle<ExecutableAccessorInfo> callback) {
// Build AccessorInfo::args_ list on the stack and push property name below
@@ -1379,37 +1049,18 @@ void LoadStubCompiler::GenerateLoadCallback(
__ Push(scratch4(), reg);
__ mov(scratch2(), sp); // scratch2 = PropertyAccessorInfo::args_
__ push(name());
- __ mov(r0, sp); // r0 = Handle<Name>
-
- const int kApiStackSpace = 1;
- FrameScope frame_scope(masm(), StackFrame::MANUAL);
- __ EnterExitFrame(false, kApiStackSpace);
- // Create PropertyAccessorInfo instance on the stack above the exit frame with
- // scratch2 (internal::Object** args_) as the data.
- __ str(scratch2(), MemOperand(sp, 1 * kPointerSize));
- __ add(r1, sp, Operand(1 * kPointerSize)); // r1 = AccessorInfo&
+ // Abi for CallApiGetter
+ Register getter_address_reg = r2;
- const int kStackUnwindSpace = PropertyCallbackArguments::kArgsLength + 1;
Address getter_address = v8::ToCData<Address>(callback->getter());
-
ApiFunction fun(getter_address);
ExternalReference::Type type = ExternalReference::DIRECT_GETTER_CALL;
ExternalReference ref = ExternalReference(&fun, type, isolate());
+ __ mov(getter_address_reg, Operand(ref));
- Address thunk_address = FUNCTION_ADDR(&InvokeAccessorGetterCallback);
- ExternalReference::Type thunk_type =
- ExternalReference::PROFILING_GETTER_CALL;
- ApiFunction thunk_fun(thunk_address);
- ExternalReference thunk_ref = ExternalReference(&thunk_fun, thunk_type,
- isolate());
- __ CallApiFunctionAndReturn(ref,
- getter_address,
- thunk_ref,
- r2,
- kStackUnwindSpace,
- MemOperand(fp, 6 * kPointerSize),
- NULL);
+ CallApiGetterStub stub(isolate());
+ __ TailCallStub(&stub);
}
@@ -1455,7 +1106,7 @@ void LoadStubCompiler::GenerateLoadInterceptor(
// Save necessary data before invoking an interceptor.
// Requires a frame to make GC aware of pushed pointers.
{
- FrameScope frame_scope(masm(), StackFrame::INTERNAL);
+ FrameAndConstantPoolScope frame_scope(masm(), StackFrame::INTERNAL);
if (must_preserve_receiver_reg) {
__ Push(receiver(), holder_reg, this->name());
} else {
@@ -1494,1041 +1145,26 @@ void LoadStubCompiler::GenerateLoadInterceptor(
this->name(), interceptor_holder);
ExternalReference ref =
- ExternalReference(IC_Utility(IC::kLoadPropertyWithInterceptorForLoad),
+ ExternalReference(IC_Utility(IC::kLoadPropertyWithInterceptor),
isolate());
__ TailCallExternalReference(ref, StubCache::kInterceptorArgsLength, 1);
}
}
-void CallStubCompiler::GenerateNameCheck(Handle<Name> name, Label* miss) {
- if (kind_ == Code::KEYED_CALL_IC) {
- __ cmp(r2, Operand(name));
- __ b(ne, miss);
- }
-}
-
-
-void CallStubCompiler::GenerateFunctionCheck(Register function,
- Register scratch,
- Label* miss) {
- __ JumpIfSmi(function, miss);
- __ CompareObjectType(function, scratch, scratch, JS_FUNCTION_TYPE);
- __ b(ne, miss);
-}
-
-
-void CallStubCompiler::GenerateLoadFunctionFromCell(
- Handle<Cell> cell,
- Handle<JSFunction> function,
- Label* miss) {
- // Get the value from the cell.
- __ mov(r3, Operand(cell));
- __ ldr(r1, FieldMemOperand(r3, Cell::kValueOffset));
-
- // Check that the cell contains the same function.
- if (heap()->InNewSpace(*function)) {
- // We can't embed a pointer to a function in new space so we have
- // to verify that the shared function info is unchanged. This has
- // the nice side effect that multiple closures based on the same
- // function can all use this call IC. Before we load through the
- // function, we have to verify that it still is a function.
- GenerateFunctionCheck(r1, r3, miss);
-
- // Check the shared function info. Make sure it hasn't changed.
- __ Move(r3, Handle<SharedFunctionInfo>(function->shared()));
- __ ldr(r4, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
- __ cmp(r4, r3);
- } else {
- __ cmp(r1, Operand(function));
- }
- __ b(ne, miss);
-}
-
-
-void CallStubCompiler::GenerateMissBranch() {
- Handle<Code> code =
- isolate()->stub_cache()->ComputeCallMiss(arguments().immediate(),
- kind_,
- extra_state());
- __ Jump(code, RelocInfo::CODE_TARGET);
-}
-
-
-Handle<Code> CallStubCompiler::CompileCallField(Handle<JSObject> object,
- Handle<JSObject> holder,
- PropertyIndex index,
- Handle<Name> name) {
- Label miss;
-
- Register reg = HandlerFrontendHeader(
- object, holder, name, RECEIVER_MAP_CHECK, &miss);
- GenerateFastPropertyLoad(masm(), r1, reg, index.is_inobject(holder),
- index.translate(holder), Representation::Tagged());
- GenerateJumpFunction(object, r1, &miss);
-
- HandlerFrontendFooter(&miss);
-
- // Return the generated code.
- return GetCode(Code::FAST, name);
-}
-
-
-Handle<Code> CallStubCompiler::CompileArrayCodeCall(
- Handle<Object> object,
- Handle<JSObject> holder,
- Handle<Cell> cell,
- Handle<JSFunction> function,
- Handle<String> name,
- Code::StubType type) {
- Label miss;
-
- HandlerFrontendHeader(object, holder, name, RECEIVER_MAP_CHECK, &miss);
- if (!cell.is_null()) {
- ASSERT(cell->value() == *function);
- GenerateLoadFunctionFromCell(cell, function, &miss);
- }
-
- Handle<AllocationSite> site = isolate()->factory()->NewAllocationSite();
- site->SetElementsKind(GetInitialFastElementsKind());
- Handle<Cell> site_feedback_cell = isolate()->factory()->NewCell(site);
- const int argc = arguments().immediate();
- __ mov(r0, Operand(argc));
- __ mov(r2, Operand(site_feedback_cell));
- __ mov(r1, Operand(function));
-
- ArrayConstructorStub stub(isolate());
- __ TailCallStub(&stub);
-
- HandlerFrontendFooter(&miss);
-
- // Return the generated code.
- return GetCode(type, name);
-}
-
-
-Handle<Code> CallStubCompiler::CompileArrayPushCall(
- Handle<Object> object,
- Handle<JSObject> holder,
- Handle<Cell> cell,
- Handle<JSFunction> function,
- Handle<String> name,
- Code::StubType type) {
- // If object is not an array or is observed or sealed, bail out to regular
- // call.
- if (!object->IsJSArray() ||
- !cell.is_null() ||
- Handle<JSArray>::cast(object)->map()->is_observed() ||
- !Handle<JSArray>::cast(object)->map()->is_extensible()) {
- return Handle<Code>::null();
- }
-
- Label miss;
-
- HandlerFrontendHeader(object, holder, name, RECEIVER_MAP_CHECK, &miss);
- Register receiver = r0;
- Register scratch = r1;
-
- const int argc = arguments().immediate();
- if (argc == 0) {
- // Nothing to do, just return the length.
- __ ldr(r0, FieldMemOperand(receiver, JSArray::kLengthOffset));
- __ Drop(argc + 1);
- __ Ret();
- } else {
- Label call_builtin;
-
- if (argc == 1) { // Otherwise fall through to call the builtin.
- Label attempt_to_grow_elements, with_write_barrier, check_double;
-
- Register elements = r6;
- Register end_elements = r5;
- // Get the elements array of the object.
- __ ldr(elements, FieldMemOperand(receiver, JSArray::kElementsOffset));
-
- // Check that the elements are in fast mode and writable.
- __ CheckMap(elements,
- scratch,
- Heap::kFixedArrayMapRootIndex,
- &check_double,
- DONT_DO_SMI_CHECK);
-
- // Get the array's length into scratch and calculate new length.
- __ ldr(scratch, FieldMemOperand(receiver, JSArray::kLengthOffset));
- __ add(scratch, scratch, Operand(Smi::FromInt(argc)));
-
- // Get the elements' length.
- __ ldr(r4, FieldMemOperand(elements, FixedArray::kLengthOffset));
-
- // Check if we could survive without allocation.
- __ cmp(scratch, r4);
- __ b(gt, &attempt_to_grow_elements);
-
- // Check if value is a smi.
- __ ldr(r4, MemOperand(sp, (argc - 1) * kPointerSize));
- __ JumpIfNotSmi(r4, &with_write_barrier);
-
- // Save new length.
- __ str(scratch, FieldMemOperand(receiver, JSArray::kLengthOffset));
-
- // Store the value.
- // We may need a register containing the address end_elements below,
- // so write back the value in end_elements.
- __ add(end_elements, elements, Operand::PointerOffsetFromSmiKey(scratch));
- const int kEndElementsOffset =
- FixedArray::kHeaderSize - kHeapObjectTag - argc * kPointerSize;
- __ str(r4, MemOperand(end_elements, kEndElementsOffset, PreIndex));
-
- // Check for a smi.
- __ Drop(argc + 1);
- __ mov(r0, scratch);
- __ Ret();
-
- __ bind(&check_double);
-
- // Check that the elements are in fast mode and writable.
- __ CheckMap(elements,
- scratch,
- Heap::kFixedDoubleArrayMapRootIndex,
- &call_builtin,
- DONT_DO_SMI_CHECK);
-
- // Get the array's length into scratch and calculate new length.
- __ ldr(scratch, FieldMemOperand(receiver, JSArray::kLengthOffset));
- __ add(scratch, scratch, Operand(Smi::FromInt(argc)));
-
- // Get the elements' length.
- __ ldr(r4, FieldMemOperand(elements, FixedArray::kLengthOffset));
-
- // Check if we could survive without allocation.
- __ cmp(scratch, r4);
- __ b(gt, &call_builtin);
-
- __ ldr(r4, MemOperand(sp, (argc - 1) * kPointerSize));
- __ StoreNumberToDoubleElements(r4, scratch, elements, r5, d0,
- &call_builtin, argc * kDoubleSize);
-
- // Save new length.
- __ str(scratch, FieldMemOperand(receiver, JSArray::kLengthOffset));
-
- __ Drop(argc + 1);
- __ mov(r0, scratch);
- __ Ret();
-
- __ bind(&with_write_barrier);
-
- __ ldr(r3, FieldMemOperand(receiver, HeapObject::kMapOffset));
-
- if (FLAG_smi_only_arrays && !FLAG_trace_elements_transitions) {
- Label fast_object, not_fast_object;
- __ CheckFastObjectElements(r3, r9, &not_fast_object);
- __ jmp(&fast_object);
- // In case of fast smi-only, convert to fast object, otherwise bail out.
- __ bind(&not_fast_object);
- __ CheckFastSmiElements(r3, r9, &call_builtin);
-
- __ ldr(r9, FieldMemOperand(r4, HeapObject::kMapOffset));
- __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex);
- __ cmp(r9, ip);
- __ b(eq, &call_builtin);
- // edx: receiver
- // r3: map
- Label try_holey_map;
- __ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS,
- FAST_ELEMENTS,
- r3,
- r9,
- &try_holey_map);
- __ mov(r2, receiver);
- ElementsTransitionGenerator::
- GenerateMapChangeElementsTransition(masm(),
- DONT_TRACK_ALLOCATION_SITE,
- NULL);
- __ jmp(&fast_object);
-
- __ bind(&try_holey_map);
- __ LoadTransitionedArrayMapConditional(FAST_HOLEY_SMI_ELEMENTS,
- FAST_HOLEY_ELEMENTS,
- r3,
- r9,
- &call_builtin);
- __ mov(r2, receiver);
- ElementsTransitionGenerator::
- GenerateMapChangeElementsTransition(masm(),
- DONT_TRACK_ALLOCATION_SITE,
- NULL);
- __ bind(&fast_object);
- } else {
- __ CheckFastObjectElements(r3, r3, &call_builtin);
- }
-
- // Save new length.
- __ str(scratch, FieldMemOperand(receiver, JSArray::kLengthOffset));
-
- // Store the value.
- // We may need a register containing the address end_elements below,
- // so write back the value in end_elements.
- __ add(end_elements, elements, Operand::PointerOffsetFromSmiKey(scratch));
- __ str(r4, MemOperand(end_elements, kEndElementsOffset, PreIndex));
-
- __ RecordWrite(elements,
- end_elements,
- r4,
- kLRHasNotBeenSaved,
- kDontSaveFPRegs,
- EMIT_REMEMBERED_SET,
- OMIT_SMI_CHECK);
- __ Drop(argc + 1);
- __ mov(r0, scratch);
- __ Ret();
-
- __ bind(&attempt_to_grow_elements);
- // scratch: array's length + 1.
-
- if (!FLAG_inline_new) {
- __ b(&call_builtin);
- }
-
- __ ldr(r2, MemOperand(sp, (argc - 1) * kPointerSize));
- // Growing elements that are SMI-only requires special handling in case
- // the new element is non-Smi. For now, delegate to the builtin.
- Label no_fast_elements_check;
- __ JumpIfSmi(r2, &no_fast_elements_check);
- __ ldr(r9, FieldMemOperand(receiver, HeapObject::kMapOffset));
- __ CheckFastObjectElements(r9, r9, &call_builtin);
- __ bind(&no_fast_elements_check);
-
- ExternalReference new_space_allocation_top =
- ExternalReference::new_space_allocation_top_address(isolate());
- ExternalReference new_space_allocation_limit =
- ExternalReference::new_space_allocation_limit_address(isolate());
-
- const int kAllocationDelta = 4;
- // Load top and check if it is the end of elements.
- __ add(end_elements, elements, Operand::PointerOffsetFromSmiKey(scratch));
- __ add(end_elements, end_elements, Operand(kEndElementsOffset));
- __ mov(r4, Operand(new_space_allocation_top));
- __ ldr(r3, MemOperand(r4));
- __ cmp(end_elements, r3);
- __ b(ne, &call_builtin);
-
- __ mov(r9, Operand(new_space_allocation_limit));
- __ ldr(r9, MemOperand(r9));
- __ add(r3, r3, Operand(kAllocationDelta * kPointerSize));
- __ cmp(r3, r9);
- __ b(hi, &call_builtin);
-
- // We fit and could grow elements.
- // Update new_space_allocation_top.
- __ str(r3, MemOperand(r4));
- // Push the argument.
- __ str(r2, MemOperand(end_elements));
- // Fill the rest with holes.
- __ LoadRoot(r3, Heap::kTheHoleValueRootIndex);
- for (int i = 1; i < kAllocationDelta; i++) {
- __ str(r3, MemOperand(end_elements, i * kPointerSize));
- }
-
- // Update elements' and array's sizes.
- __ str(scratch, FieldMemOperand(receiver, JSArray::kLengthOffset));
- __ ldr(r4, FieldMemOperand(elements, FixedArray::kLengthOffset));
- __ add(r4, r4, Operand(Smi::FromInt(kAllocationDelta)));
- __ str(r4, FieldMemOperand(elements, FixedArray::kLengthOffset));
-
- // Elements are in new space, so write barrier is not required.
- __ Drop(argc + 1);
- __ mov(r0, scratch);
- __ Ret();
- }
- __ bind(&call_builtin);
- __ TailCallExternalReference(
- ExternalReference(Builtins::c_ArrayPush, isolate()), argc + 1, 1);
- }
-
- HandlerFrontendFooter(&miss);
-
- // Return the generated code.
- return GetCode(type, name);
-}
-
-
-Handle<Code> CallStubCompiler::CompileArrayPopCall(
- Handle<Object> object,
- Handle<JSObject> holder,
- Handle<Cell> cell,
- Handle<JSFunction> function,
- Handle<String> name,
- Code::StubType type) {
- // If object is not an array or is observed or sealed, bail out to regular
- // call.
- if (!object->IsJSArray() ||
- !cell.is_null() ||
- Handle<JSArray>::cast(object)->map()->is_observed() ||
- !Handle<JSArray>::cast(object)->map()->is_extensible()) {
- return Handle<Code>::null();
- }
-
- Label miss, return_undefined, call_builtin;
- Register receiver = r0;
- Register scratch = r1;
- Register elements = r3;
-
- HandlerFrontendHeader(object, holder, name, RECEIVER_MAP_CHECK, &miss);
-
- // Get the elements array of the object.
- __ ldr(elements, FieldMemOperand(receiver, JSArray::kElementsOffset));
-
- // Check that the elements are in fast mode and writable.
- __ CheckMap(elements,
- scratch,
- Heap::kFixedArrayMapRootIndex,
- &call_builtin,
- DONT_DO_SMI_CHECK);
-
- // Get the array's length into r4 and calculate new length.
- __ ldr(r4, FieldMemOperand(receiver, JSArray::kLengthOffset));
- __ sub(r4, r4, Operand(Smi::FromInt(1)), SetCC);
- __ b(lt, &return_undefined);
-
- // Get the last element.
- __ LoadRoot(r6, Heap::kTheHoleValueRootIndex);
- // We can't address the last element in one operation. Compute the more
- // expensive shift first, and use an offset later on.
- __ add(elements, elements, Operand::PointerOffsetFromSmiKey(r4));
- __ ldr(scratch, FieldMemOperand(elements, FixedArray::kHeaderSize));
- __ cmp(scratch, r6);
- __ b(eq, &call_builtin);
-
- // Set the array's length.
- __ str(r4, FieldMemOperand(receiver, JSArray::kLengthOffset));
-
- // Fill with the hole.
- __ str(r6, FieldMemOperand(elements, FixedArray::kHeaderSize));
- const int argc = arguments().immediate();
- __ Drop(argc + 1);
- __ mov(r0, scratch);
- __ Ret();
-
- __ bind(&return_undefined);
- __ LoadRoot(r0, Heap::kUndefinedValueRootIndex);
- __ Drop(argc + 1);
- __ Ret();
-
- __ bind(&call_builtin);
- __ TailCallExternalReference(
- ExternalReference(Builtins::c_ArrayPop, isolate()), argc + 1, 1);
-
- HandlerFrontendFooter(&miss);
-
- // Return the generated code.
- return GetCode(type, name);
-}
-
-
-Handle<Code> CallStubCompiler::CompileStringCharCodeAtCall(
- Handle<Object> object,
- Handle<JSObject> holder,
- Handle<Cell> cell,
- Handle<JSFunction> function,
- Handle<String> name,
- Code::StubType type) {
- // If object is not a string, bail out to regular call.
- if (!object->IsString() || !cell.is_null()) return Handle<Code>::null();
-
- Label miss;
- Label name_miss;
- Label index_out_of_range;
- Label* index_out_of_range_label = &index_out_of_range;
-
- if (kind_ == Code::CALL_IC &&
- (CallICBase::StringStubState::decode(extra_state()) ==
- DEFAULT_STRING_STUB)) {
- index_out_of_range_label = &miss;
- }
-
- HandlerFrontendHeader(object, holder, name, STRING_CHECK, &name_miss);
-
- Register receiver = r0;
- Register index = r4;
- Register result = r1;
- const int argc = arguments().immediate();
- __ ldr(receiver, MemOperand(sp, argc * kPointerSize));
- if (argc > 0) {
- __ ldr(index, MemOperand(sp, (argc - 1) * kPointerSize));
- } else {
- __ LoadRoot(index, Heap::kUndefinedValueRootIndex);
- }
-
- StringCharCodeAtGenerator generator(receiver,
- index,
- result,
- &miss, // When not a string.
- &miss, // When not a number.
- index_out_of_range_label,
- STRING_INDEX_IS_NUMBER);
- generator.GenerateFast(masm());
- __ Drop(argc + 1);
- __ mov(r0, result);
- __ Ret();
-
- StubRuntimeCallHelper call_helper;
- generator.GenerateSlow(masm(), call_helper);
-
- if (index_out_of_range.is_linked()) {
- __ bind(&index_out_of_range);
- __ LoadRoot(r0, Heap::kNanValueRootIndex);
- __ Drop(argc + 1);
- __ Ret();
- }
-
- __ bind(&miss);
- // Restore function name in r2.
- __ Move(r2, name);
- HandlerFrontendFooter(&name_miss);
-
- // Return the generated code.
- return GetCode(type, name);
-}
-
-
-Handle<Code> CallStubCompiler::CompileStringCharAtCall(
- Handle<Object> object,
- Handle<JSObject> holder,
- Handle<Cell> cell,
- Handle<JSFunction> function,
- Handle<String> name,
- Code::StubType type) {
- // If object is not a string, bail out to regular call.
- if (!object->IsString() || !cell.is_null()) return Handle<Code>::null();
-
- const int argc = arguments().immediate();
- Label miss;
- Label name_miss;
- Label index_out_of_range;
- Label* index_out_of_range_label = &index_out_of_range;
- if (kind_ == Code::CALL_IC &&
- (CallICBase::StringStubState::decode(extra_state()) ==
- DEFAULT_STRING_STUB)) {
- index_out_of_range_label = &miss;
- }
-
- HandlerFrontendHeader(object, holder, name, STRING_CHECK, &name_miss);
-
- Register receiver = r0;
- Register index = r4;
- Register scratch = r3;
- Register result = r1;
- if (argc > 0) {
- __ ldr(index, MemOperand(sp, (argc - 1) * kPointerSize));
- } else {
- __ LoadRoot(index, Heap::kUndefinedValueRootIndex);
- }
-
- StringCharAtGenerator generator(receiver,
- index,
- scratch,
- result,
- &miss, // When not a string.
- &miss, // When not a number.
- index_out_of_range_label,
- STRING_INDEX_IS_NUMBER);
- generator.GenerateFast(masm());
- __ Drop(argc + 1);
- __ mov(r0, result);
- __ Ret();
-
- StubRuntimeCallHelper call_helper;
- generator.GenerateSlow(masm(), call_helper);
-
- if (index_out_of_range.is_linked()) {
- __ bind(&index_out_of_range);
- __ LoadRoot(r0, Heap::kempty_stringRootIndex);
- __ Drop(argc + 1);
- __ Ret();
- }
-
- __ bind(&miss);
- // Restore function name in r2.
- __ Move(r2, name);
- HandlerFrontendFooter(&name_miss);
-
- // Return the generated code.
- return GetCode(type, name);
-}
-
-
-Handle<Code> CallStubCompiler::CompileStringFromCharCodeCall(
- Handle<Object> object,
- Handle<JSObject> holder,
- Handle<Cell> cell,
- Handle<JSFunction> function,
- Handle<String> name,
- Code::StubType type) {
- const int argc = arguments().immediate();
-
- // If the object is not a JSObject or we got an unexpected number of
- // arguments, bail out to the regular call.
- if (!object->IsJSObject() || argc != 1) return Handle<Code>::null();
-
- Label miss;
-
- HandlerFrontendHeader(object, holder, name, RECEIVER_MAP_CHECK, &miss);
- if (!cell.is_null()) {
- ASSERT(cell->value() == *function);
- GenerateLoadFunctionFromCell(cell, function, &miss);
- }
-
- // Load the char code argument.
- Register code = r1;
- __ ldr(code, MemOperand(sp, 0 * kPointerSize));
-
- // Check the code is a smi.
- Label slow;
- __ JumpIfNotSmi(code, &slow);
-
- // Convert the smi code to uint16.
- __ and_(code, code, Operand(Smi::FromInt(0xffff)));
-
- StringCharFromCodeGenerator generator(code, r0);
- generator.GenerateFast(masm());
- __ Drop(argc + 1);
- __ Ret();
-
- StubRuntimeCallHelper call_helper;
- generator.GenerateSlow(masm(), call_helper);
-
- __ bind(&slow);
- // We do not have to patch the receiver because the function makes no use of
- // it.
- GenerateJumpFunctionIgnoreReceiver(function);
-
- HandlerFrontendFooter(&miss);
-
- // Return the generated code.
- return GetCode(type, name);
-}
-
-
-Handle<Code> CallStubCompiler::CompileMathFloorCall(
- Handle<Object> object,
- Handle<JSObject> holder,
- Handle<Cell> cell,
- Handle<JSFunction> function,
- Handle<String> name,
- Code::StubType type) {
- const int argc = arguments().immediate();
- // If the object is not a JSObject or we got an unexpected number of
- // arguments, bail out to the regular call.
- if (!object->IsJSObject() || argc != 1) return Handle<Code>::null();
-
- Label miss, slow;
-
- HandlerFrontendHeader(object, holder, name, RECEIVER_MAP_CHECK, &miss);
- if (!cell.is_null()) {
- ASSERT(cell->value() == *function);
- GenerateLoadFunctionFromCell(cell, function, &miss);
- }
-
- // Load the (only) argument into r0.
- __ ldr(r0, MemOperand(sp, 0 * kPointerSize));
-
- // If the argument is a smi, just return.
- __ SmiTst(r0);
- __ Drop(argc + 1, eq);
- __ Ret(eq);
-
- __ CheckMap(r0, r1, Heap::kHeapNumberMapRootIndex, &slow, DONT_DO_SMI_CHECK);
-
- Label smi_check, just_return;
-
- // Load the HeapNumber value.
- // We will need access to the value in the core registers, so we load it
- // with ldrd and move it to the fpu. It also spares a sub instruction for
- // updating the HeapNumber value address, as vldr expects a multiple
- // of 4 offset.
- __ Ldrd(r4, r5, FieldMemOperand(r0, HeapNumber::kValueOffset));
- __ vmov(d1, r4, r5);
-
- // Check for NaN, Infinities and -0.
- // They are invariant through a Math.Floor call, so just
- // return the original argument.
- __ Sbfx(r3, r5, HeapNumber::kExponentShift, HeapNumber::kExponentBits);
- __ cmp(r3, Operand(-1));
- __ b(eq, &just_return);
- __ eor(r3, r5, Operand(0x80000000u));
- __ orr(r3, r3, r4, SetCC);
- __ b(eq, &just_return);
- // Test for values that can be exactly represented as a
- // signed 32-bit integer.
- __ TryDoubleToInt32Exact(r0, d1, d2);
- // If exact, check smi
- __ b(eq, &smi_check);
- __ cmp(r5, Operand(0));
-
- // If input is in ]+0, +inf[, the cmp has cleared overflow and negative
- // (V=0 and N=0), the two following instructions won't execute and
- // we fall through smi_check to check if the result can fit into a smi.
-
- // If input is in ]-inf, -0[, sub one and, go to slow if we have
- // an overflow. Else we fall through smi check.
- // Hint: if x is a negative, non integer number,
- // floor(x) <=> round_to_zero(x) - 1.
- __ sub(r0, r0, Operand(1), SetCC, mi);
- __ b(vs, &slow);
-
- __ bind(&smi_check);
- // Check if the result can fit into an smi. If we had an overflow,
- // the result is either 0x80000000 or 0x7FFFFFFF and won't fit into an smi.
- // If result doesn't fit into an smi, branch to slow.
- __ SmiTag(r0, SetCC);
- __ b(vs, &slow);
-
- __ bind(&just_return);
- __ Drop(argc + 1);
- __ Ret();
-
- __ bind(&slow);
- // We do not have to patch the receiver because the function makes no use of
- // it.
- GenerateJumpFunctionIgnoreReceiver(function);
-
- HandlerFrontendFooter(&miss);
-
- // Return the generated code.
- return GetCode(type, name);
-}
-
-
-Handle<Code> CallStubCompiler::CompileMathAbsCall(
- Handle<Object> object,
- Handle<JSObject> holder,
- Handle<Cell> cell,
- Handle<JSFunction> function,
- Handle<String> name,
- Code::StubType type) {
- const int argc = arguments().immediate();
- // If the object is not a JSObject or we got an unexpected number of
- // arguments, bail out to the regular call.
- if (!object->IsJSObject() || argc != 1) return Handle<Code>::null();
-
- Label miss;
-
- HandlerFrontendHeader(object, holder, name, RECEIVER_MAP_CHECK, &miss);
- if (!cell.is_null()) {
- ASSERT(cell->value() == *function);
- GenerateLoadFunctionFromCell(cell, function, &miss);
- }
-
- // Load the (only) argument into r0.
- __ ldr(r0, MemOperand(sp, 0 * kPointerSize));
-
- // Check if the argument is a smi.
- Label not_smi;
- __ JumpIfNotSmi(r0, &not_smi);
-
- // Do bitwise not or do nothing depending on the sign of the
- // argument.
- __ eor(r1, r0, Operand(r0, ASR, kBitsPerInt - 1));
-
- // Add 1 or do nothing depending on the sign of the argument.
- __ sub(r0, r1, Operand(r0, ASR, kBitsPerInt - 1), SetCC);
-
- // If the result is still negative, go to the slow case.
- // This only happens for the most negative smi.
- Label slow;
- __ b(mi, &slow);
-
- // Smi case done.
- __ Drop(argc + 1);
- __ Ret();
-
- // Check if the argument is a heap number and load its exponent and
- // sign.
- __ bind(&not_smi);
- __ CheckMap(r0, r1, Heap::kHeapNumberMapRootIndex, &slow, DONT_DO_SMI_CHECK);
- __ ldr(r1, FieldMemOperand(r0, HeapNumber::kExponentOffset));
-
- // Check the sign of the argument. If the argument is positive,
- // just return it.
- Label negative_sign;
- __ tst(r1, Operand(HeapNumber::kSignMask));
- __ b(ne, &negative_sign);
- __ Drop(argc + 1);
- __ Ret();
-
- // If the argument is negative, clear the sign, and return a new
- // number.
- __ bind(&negative_sign);
- __ eor(r1, r1, Operand(HeapNumber::kSignMask));
- __ ldr(r3, FieldMemOperand(r0, HeapNumber::kMantissaOffset));
- __ LoadRoot(r6, Heap::kHeapNumberMapRootIndex);
- __ AllocateHeapNumber(r0, r4, r5, r6, &slow);
- __ str(r1, FieldMemOperand(r0, HeapNumber::kExponentOffset));
- __ str(r3, FieldMemOperand(r0, HeapNumber::kMantissaOffset));
- __ Drop(argc + 1);
- __ Ret();
-
- __ bind(&slow);
- // We do not have to patch the receiver because the function makes no use of
- // it.
- GenerateJumpFunctionIgnoreReceiver(function);
-
- HandlerFrontendFooter(&miss);
-
- // Return the generated code.
- return GetCode(type, name);
-}
-
-
-Handle<Code> CallStubCompiler::CompileFastApiCall(
- const CallOptimization& optimization,
- Handle<Object> object,
- Handle<JSObject> holder,
- Handle<Cell> cell,
- Handle<JSFunction> function,
- Handle<String> name) {
- Counters* counters = isolate()->counters();
-
- ASSERT(optimization.is_simple_api_call());
- // Bail out if object is a global object as we don't want to
- // repatch it to global receiver.
- if (object->IsGlobalObject()) return Handle<Code>::null();
- if (!cell.is_null()) return Handle<Code>::null();
- if (!object->IsJSObject()) return Handle<Code>::null();
- int depth = optimization.GetPrototypeDepthOfExpectedType(
- Handle<JSObject>::cast(object), holder);
- if (depth == kInvalidProtoDepth) return Handle<Code>::null();
-
- Label miss, miss_before_stack_reserved;
- GenerateNameCheck(name, &miss_before_stack_reserved);
-
- // Get the receiver from the stack.
- const int argc = arguments().immediate();
- __ ldr(r1, MemOperand(sp, argc * kPointerSize));
-
- // Check that the receiver isn't a smi.
- __ JumpIfSmi(r1, &miss_before_stack_reserved);
-
- __ IncrementCounter(counters->call_const(), 1, r0, r3);
- __ IncrementCounter(counters->call_const_fast_api(), 1, r0, r3);
-
- ReserveSpaceForFastApiCall(masm(), r0);
-
- // Check that the maps haven't changed and find a Holder as a side effect.
- CheckPrototypes(
- IC::CurrentTypeOf(object, isolate()),
- r1, holder, r0, r3, r4, name, depth, &miss);
-
- GenerateFastApiDirectCall(masm(), optimization, argc, false);
-
- __ bind(&miss);
- FreeSpaceForFastApiCall(masm());
-
- HandlerFrontendFooter(&miss_before_stack_reserved);
-
- // Return the generated code.
- return GetCode(function);
-}
-
-
-void StubCompiler::GenerateBooleanCheck(Register object, Label* miss) {
- Label success;
- // Check that the object is a boolean.
- __ LoadRoot(ip, Heap::kTrueValueRootIndex);
- __ cmp(object, ip);
- __ b(eq, &success);
- __ LoadRoot(ip, Heap::kFalseValueRootIndex);
- __ cmp(object, ip);
- __ b(ne, miss);
- __ bind(&success);
-}
-
-
-void CallStubCompiler::PatchGlobalProxy(Handle<Object> object) {
- if (object->IsGlobalObject()) {
- const int argc = arguments().immediate();
- const int receiver_offset = argc * kPointerSize;
- __ ldr(r3, FieldMemOperand(r0, GlobalObject::kGlobalReceiverOffset));
- __ str(r3, MemOperand(sp, receiver_offset));
- }
-}
-
-
-Register CallStubCompiler::HandlerFrontendHeader(Handle<Object> object,
- Handle<JSObject> holder,
- Handle<Name> name,
- CheckType check,
- Label* miss) {
- // ----------- S t a t e -------------
- // -- r2 : name
- // -- lr : return address
- // -----------------------------------
- GenerateNameCheck(name, miss);
-
- Register reg = r0;
-
- // Get the receiver from the stack
- const int argc = arguments().immediate();
- const int receiver_offset = argc * kPointerSize;
- __ ldr(r0, MemOperand(sp, receiver_offset));
-
- // Check that the receiver isn't a smi.
- if (check != NUMBER_CHECK) {
- __ JumpIfSmi(r0, miss);
- }
-
- // Make sure that it's okay not to patch the on stack receiver
- // unless we're doing a receiver map check.
- ASSERT(!object->IsGlobalObject() || check == RECEIVER_MAP_CHECK);
- switch (check) {
- case RECEIVER_MAP_CHECK:
- __ IncrementCounter(isolate()->counters()->call_const(), 1, r1, r3);
-
- // Check that the maps haven't changed.
- reg = CheckPrototypes(
- IC::CurrentTypeOf(object, isolate()),
- reg, holder, r1, r3, r4, name, miss);
- break;
-
- case STRING_CHECK: {
- // Check that the object is a string.
- __ CompareObjectType(reg, r3, r3, FIRST_NONSTRING_TYPE);
- __ b(ge, miss);
- // Check that the maps starting from the prototype haven't changed.
- GenerateDirectLoadGlobalFunctionPrototype(
- masm(), Context::STRING_FUNCTION_INDEX, r1, miss);
- break;
- }
- case SYMBOL_CHECK: {
- // Check that the object is a symbol.
- __ CompareObjectType(reg, r3, r3, SYMBOL_TYPE);
- __ b(ne, miss);
- // Check that the maps starting from the prototype haven't changed.
- GenerateDirectLoadGlobalFunctionPrototype(
- masm(), Context::SYMBOL_FUNCTION_INDEX, r1, miss);
- break;
- }
- case NUMBER_CHECK: {
- Label fast;
- // Check that the object is a smi or a heap number.
- __ JumpIfSmi(reg, &fast);
- __ CompareObjectType(reg, r3, r3, HEAP_NUMBER_TYPE);
- __ b(ne, miss);
- __ bind(&fast);
- // Check that the maps starting from the prototype haven't changed.
- GenerateDirectLoadGlobalFunctionPrototype(
- masm(), Context::NUMBER_FUNCTION_INDEX, r1, miss);
- break;
- }
- case BOOLEAN_CHECK: {
- GenerateBooleanCheck(reg, miss);
-
- // Check that the maps starting from the prototype haven't changed.
- GenerateDirectLoadGlobalFunctionPrototype(
- masm(), Context::BOOLEAN_FUNCTION_INDEX, r1, miss);
- break;
- }
- }
-
- if (check != RECEIVER_MAP_CHECK) {
- Handle<Object> prototype(object->GetPrototype(isolate()), isolate());
- reg = CheckPrototypes(
- IC::CurrentTypeOf(prototype, isolate()),
- r1, holder, r1, r3, r4, name, miss);
- }
-
- return reg;
-}
-
-
-void CallStubCompiler::GenerateJumpFunction(Handle<Object> object,
- Register function,
- Label* miss) {
- ASSERT(function.is(r1));
- // Check that the function really is a function.
- GenerateFunctionCheck(function, r3, miss);
- PatchGlobalProxy(object);
-
- // Invoke the function.
- __ InvokeFunction(r1, arguments(), JUMP_FUNCTION,
- NullCallWrapper(), call_kind());
-}
-
-
-Handle<Code> CallStubCompiler::CompileCallInterceptor(Handle<JSObject> object,
- Handle<JSObject> holder,
- Handle<Name> name) {
- Label miss;
- GenerateNameCheck(name, &miss);
-
- // Get the number of arguments.
- const int argc = arguments().immediate();
- LookupResult lookup(isolate());
- LookupPostInterceptor(holder, name, &lookup);
-
- // Get the receiver from the stack.
- __ ldr(r1, MemOperand(sp, argc * kPointerSize));
-
- CallInterceptorCompiler compiler(this, arguments(), r2, extra_state());
- compiler.Compile(masm(), object, holder, name, &lookup, r1, r3, r4, r0,
- &miss);
-
- // Move returned value, the function to call, to r1.
- __ mov(r1, r0);
- // Restore receiver.
- __ ldr(r0, MemOperand(sp, argc * kPointerSize));
-
- GenerateJumpFunction(object, r1, &miss);
-
- HandlerFrontendFooter(&miss);
-
- // Return the generated code.
- return GetCode(Code::FAST, name);
-}
-
-
-Handle<Code> CallStubCompiler::CompileCallGlobal(
- Handle<JSObject> object,
- Handle<GlobalObject> holder,
- Handle<PropertyCell> cell,
- Handle<JSFunction> function,
- Handle<Name> name) {
- if (HasCustomCallGenerator(function)) {
- Handle<Code> code = CompileCustomCall(
- object, holder, cell, function, Handle<String>::cast(name),
- Code::NORMAL);
- // A null handle means bail out to the regular compiler code below.
- if (!code.is_null()) return code;
- }
-
- Label miss;
- HandlerFrontendHeader(object, holder, name, RECEIVER_MAP_CHECK, &miss);
- // Potentially loads a closure that matches the shared function info of the
- // function, rather than function.
- GenerateLoadFunctionFromCell(cell, function, &miss);
-
- Counters* counters = isolate()->counters();
- __ IncrementCounter(counters->call_global_inline(), 1, r3, r4);
- GenerateJumpFunction(object, r1, function);
- HandlerFrontendFooter(&miss);
-
- // Return the generated code.
- return GetCode(Code::NORMAL, name);
-}
-
-
Handle<Code> StoreStubCompiler::CompileStoreCallback(
Handle<JSObject> object,
Handle<JSObject> holder,
Handle<Name> name,
Handle<ExecutableAccessorInfo> callback) {
- HandlerFrontend(IC::CurrentTypeOf(object, isolate()),
- receiver(), holder, name);
+ Register holder_reg = HandlerFrontend(
+ IC::CurrentTypeOf(object, isolate()), receiver(), holder, name);
// Stub never generated for non-global objects that require access checks.
ASSERT(holder->IsJSGlobalProxy() || !holder->IsAccessCheckNeeded());
__ push(receiver()); // receiver
+ __ push(holder_reg);
__ mov(ip, Operand(callback)); // callback info
__ push(ip);
__ mov(ip, Operand(name));
@@ -2537,24 +1173,7 @@ Handle<Code> StoreStubCompiler::CompileStoreCallback(
// Do tail-call to the runtime system.
ExternalReference store_callback_property =
ExternalReference(IC_Utility(IC::kStoreCallbackProperty), isolate());
- __ TailCallExternalReference(store_callback_property, 4, 1);
-
- // Return the generated code.
- return GetCode(kind(), Code::FAST, name);
-}
-
-
-Handle<Code> StoreStubCompiler::CompileStoreCallback(
- Handle<JSObject> object,
- Handle<JSObject> holder,
- Handle<Name> name,
- const CallOptimization& call_optimization) {
- HandlerFrontend(IC::CurrentTypeOf(object, isolate()),
- receiver(), holder, name);
-
- Register values[] = { value() };
- GenerateFastApiCall(
- masm(), call_optimization, receiver(), scratch3(), 1, values);
+ __ TailCallExternalReference(store_callback_property, 5, 1);
// Return the generated code.
return GetCode(kind(), Code::FAST, name);
@@ -2567,26 +1186,31 @@ Handle<Code> StoreStubCompiler::CompileStoreCallback(
void StoreStubCompiler::GenerateStoreViaSetter(
MacroAssembler* masm,
+ Handle<HeapType> type,
+ Register receiver,
Handle<JSFunction> setter) {
// ----------- S t a t e -------------
- // -- r0 : value
- // -- r1 : receiver
- // -- r2 : name
// -- lr : return address
// -----------------------------------
{
- FrameScope scope(masm, StackFrame::INTERNAL);
+ FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
// Save value register, so we can restore it later.
- __ push(r0);
+ __ push(value());
if (!setter.is_null()) {
// Call the JavaScript setter with receiver and value on the stack.
- __ Push(r1, r0);
+ if (IC::TypeToMap(*type, masm->isolate())->IsJSGlobalObjectMap()) {
+ // Swap in the global receiver.
+ __ ldr(receiver,
+ FieldMemOperand(
+ receiver, JSGlobalObject::kGlobalReceiverOffset));
+ }
+ __ Push(receiver, value());
ParameterCount actual(1);
ParameterCount expected(setter);
__ InvokeFunction(setter, expected, actual,
- CALL_FUNCTION, NullCallWrapper(), CALL_AS_METHOD);
+ CALL_FUNCTION, NullCallWrapper());
} else {
// If we generate a global code snippet for deoptimization only, remember
// the place to continue after deoptimization.
@@ -2610,21 +1234,6 @@ void StoreStubCompiler::GenerateStoreViaSetter(
Handle<Code> StoreStubCompiler::CompileStoreInterceptor(
Handle<JSObject> object,
Handle<Name> name) {
- Label miss;
-
- // Check that the map of the object hasn't changed.
- __ CheckMap(receiver(), scratch1(), Handle<Map>(object->map()), &miss,
- DO_SMI_CHECK);
-
- // Perform global security token check if needed.
- if (object->IsJSGlobalProxy()) {
- __ CheckAccessGlobalProxy(receiver(), scratch1(), &miss);
- }
-
- // Stub is never generated for non-global objects that require access
- // checks.
- ASSERT(object->IsJSGlobalProxy() || !object->IsAccessCheckNeeded());
-
__ Push(receiver(), this->name(), value());
// Do tail-call to the runtime system.
@@ -2632,16 +1241,12 @@ Handle<Code> StoreStubCompiler::CompileStoreInterceptor(
ExternalReference(IC_Utility(IC::kStoreInterceptorProperty), isolate());
__ TailCallExternalReference(store_ic_property, 3, 1);
- // Handle store cache miss.
- __ bind(&miss);
- TailCallBuiltin(masm(), MissBuiltin(kind()));
-
// Return the generated code.
return GetCode(kind(), Code::FAST, name);
}
-Handle<Code> LoadStubCompiler::CompileLoadNonexistent(Handle<Type> type,
+Handle<Code> LoadStubCompiler::CompileLoadNonexistent(Handle<HeapType> type,
Handle<JSObject> last,
Handle<Name> name) {
NonexistentHandlerFrontend(type, last, name);
@@ -2670,33 +1275,22 @@ Register* KeyedLoadStubCompiler::registers() {
}
-Register* StoreStubCompiler::registers() {
- // receiver, name, value, scratch1, scratch2, scratch3.
- static Register registers[] = { r1, r2, r0, r3, r4, r5 };
- return registers;
+Register StoreStubCompiler::value() {
+ return r0;
}
-Register* KeyedStoreStubCompiler::registers() {
- // receiver, name, value, scratch1, scratch2, scratch3.
- static Register registers[] = { r2, r1, r0, r3, r4, r5 };
+Register* StoreStubCompiler::registers() {
+ // receiver, name, scratch1, scratch2, scratch3.
+ static Register registers[] = { r1, r2, r3, r4, r5 };
return registers;
}
-void KeyedLoadStubCompiler::GenerateNameCheck(Handle<Name> name,
- Register name_reg,
- Label* miss) {
- __ cmp(name_reg, Operand(name));
- __ b(ne, miss);
-}
-
-
-void KeyedStoreStubCompiler::GenerateNameCheck(Handle<Name> name,
- Register name_reg,
- Label* miss) {
- __ cmp(name_reg, Operand(name));
- __ b(ne, miss);
+Register* KeyedStoreStubCompiler::registers() {
+ // receiver, name, scratch1, scratch2, scratch3.
+ static Register registers[] = { r2, r1, r3, r4, r5 };
+ return registers;
}
@@ -2705,6 +1299,7 @@ void KeyedStoreStubCompiler::GenerateNameCheck(Handle<Name> name,
void LoadStubCompiler::GenerateLoadViaGetter(MacroAssembler* masm,
+ Handle<HeapType> type,
Register receiver,
Handle<JSFunction> getter) {
// ----------- S t a t e -------------
@@ -2713,15 +1308,21 @@ void LoadStubCompiler::GenerateLoadViaGetter(MacroAssembler* masm,
// -- lr : return address
// -----------------------------------
{
- FrameScope scope(masm, StackFrame::INTERNAL);
+ FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
if (!getter.is_null()) {
// Call the JavaScript getter with the receiver on the stack.
+ if (IC::TypeToMap(*type, masm->isolate())->IsJSGlobalObjectMap()) {
+ // Swap in the global receiver.
+ __ ldr(receiver,
+ FieldMemOperand(
+ receiver, JSGlobalObject::kGlobalReceiverOffset));
+ }
__ push(receiver);
ParameterCount actual(0);
ParameterCount expected(getter);
__ InvokeFunction(getter, expected, actual,
- CALL_FUNCTION, NullCallWrapper(), CALL_AS_METHOD);
+ CALL_FUNCTION, NullCallWrapper());
} else {
// If we generate a global code snippet for deoptimization only, remember
// the place to continue after deoptimization.
@@ -2740,13 +1341,12 @@ void LoadStubCompiler::GenerateLoadViaGetter(MacroAssembler* masm,
Handle<Code> LoadStubCompiler::CompileLoadGlobal(
- Handle<Type> type,
+ Handle<HeapType> type,
Handle<GlobalObject> global,
Handle<PropertyCell> cell,
Handle<Name> name,
bool is_dont_delete) {
Label miss;
-
HandlerFrontendHeader(type, receiver(), global, name, &miss);
// Get the value from the cell.
@@ -2760,13 +1360,13 @@ Handle<Code> LoadStubCompiler::CompileLoadGlobal(
__ b(eq, &miss);
}
- HandlerFrontendFooter(name, &miss);
-
Counters* counters = isolate()->counters();
__ IncrementCounter(counters->named_load_global_stub(), 1, r1, r3);
__ mov(r0, r4);
__ Ret();
+ HandlerFrontendFooter(name, &miss);
+
// Return the generated code.
return GetCode(kind(), Code::NORMAL, name);
}
@@ -2780,8 +1380,10 @@ Handle<Code> BaseLoadStoreStubCompiler::CompilePolymorphicIC(
IcCheckType check) {
Label miss;
- if (check == PROPERTY) {
- GenerateNameCheck(name, this->name(), &miss);
+ if (check == PROPERTY &&
+ (kind() == Code::KEYED_LOAD_IC || kind() == Code::KEYED_STORE_IC)) {
+ __ cmp(this->name(), Operand(name));
+ __ b(ne, &miss);
}
Label number_case;
@@ -2794,13 +1396,13 @@ Handle<Code> BaseLoadStoreStubCompiler::CompilePolymorphicIC(
int number_of_handled_maps = 0;
__ ldr(map_reg, FieldMemOperand(receiver(), HeapObject::kMapOffset));
for (int current = 0; current < receiver_count; ++current) {
- Handle<Type> type = types->at(current);
+ Handle<HeapType> type = types->at(current);
Handle<Map> map = IC::TypeToMap(*type, isolate());
if (!map->is_deprecated()) {
number_of_handled_maps++;
__ mov(ip, Operand(map));
__ cmp(map_reg, ip);
- if (type->Is(Type::Number())) {
+ if (type->Is(HeapType::Number())) {
ASSERT(!number_case.is_unused());
__ bind(&number_case);
}
@@ -2819,6 +1421,17 @@ Handle<Code> BaseLoadStoreStubCompiler::CompilePolymorphicIC(
}
+void StoreStubCompiler::GenerateStoreArrayLength() {
+ // Prepare tail call to StoreIC_ArrayLength.
+ __ Push(receiver(), value());
+
+ ExternalReference ref =
+ ExternalReference(IC_Utility(IC::kStoreIC_ArrayLength),
+ masm()->isolate());
+ __ TailCallExternalReference(ref, 2, 1);
+}
+
+
Handle<Code> KeyedStoreStubCompiler::CompileStorePolymorphic(
MapHandleList* receiver_maps,
CodeHandleList* handler_stubs,
diff --git a/chromium/v8/src/arm64/OWNERS b/chromium/v8/src/arm64/OWNERS
new file mode 100644
index 00000000000..906a5ce6418
--- /dev/null
+++ b/chromium/v8/src/arm64/OWNERS
@@ -0,0 +1 @@
+rmcilroy@chromium.org
diff --git a/chromium/v8/src/arm64/assembler-arm64-inl.h b/chromium/v8/src/arm64/assembler-arm64-inl.h
new file mode 100644
index 00000000000..135858d8b1c
--- /dev/null
+++ b/chromium/v8/src/arm64/assembler-arm64-inl.h
@@ -0,0 +1,1264 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_ARM64_ASSEMBLER_ARM64_INL_H_
+#define V8_ARM64_ASSEMBLER_ARM64_INL_H_
+
+#include "src/arm64/assembler-arm64.h"
+#include "src/cpu.h"
+#include "src/debug.h"
+
+
+namespace v8 {
+namespace internal {
+
+
+bool CpuFeatures::SupportsCrankshaft() { return true; }
+
+
+void RelocInfo::apply(intptr_t delta, ICacheFlushMode icache_flush_mode) {
+ UNIMPLEMENTED();
+}
+
+
+void RelocInfo::set_target_address(Address target,
+ WriteBarrierMode write_barrier_mode,
+ ICacheFlushMode icache_flush_mode) {
+ ASSERT(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_));
+ Assembler::set_target_address_at(pc_, host_, target, icache_flush_mode);
+ if (write_barrier_mode == UPDATE_WRITE_BARRIER && host() != NULL &&
+ IsCodeTarget(rmode_)) {
+ Object* target_code = Code::GetCodeFromTargetAddress(target);
+ host()->GetHeap()->incremental_marking()->RecordWriteIntoCode(
+ host(), this, HeapObject::cast(target_code));
+ }
+}
+
+
+inline unsigned CPURegister::code() const {
+ ASSERT(IsValid());
+ return reg_code;
+}
+
+
+inline CPURegister::RegisterType CPURegister::type() const {
+ ASSERT(IsValidOrNone());
+ return reg_type;
+}
+
+
+inline RegList CPURegister::Bit() const {
+ ASSERT(reg_code < (sizeof(RegList) * kBitsPerByte));
+ return IsValid() ? 1UL << reg_code : 0;
+}
+
+
+inline unsigned CPURegister::SizeInBits() const {
+ ASSERT(IsValid());
+ return reg_size;
+}
+
+
+inline int CPURegister::SizeInBytes() const {
+ ASSERT(IsValid());
+ ASSERT(SizeInBits() % 8 == 0);
+ return reg_size / 8;
+}
+
+
+inline bool CPURegister::Is32Bits() const {
+ ASSERT(IsValid());
+ return reg_size == 32;
+}
+
+
+inline bool CPURegister::Is64Bits() const {
+ ASSERT(IsValid());
+ return reg_size == 64;
+}
+
+
+inline bool CPURegister::IsValid() const {
+ if (IsValidRegister() || IsValidFPRegister()) {
+ ASSERT(!IsNone());
+ return true;
+ } else {
+ ASSERT(IsNone());
+ return false;
+ }
+}
+
+
+inline bool CPURegister::IsValidRegister() const {
+ return IsRegister() &&
+ ((reg_size == kWRegSizeInBits) || (reg_size == kXRegSizeInBits)) &&
+ ((reg_code < kNumberOfRegisters) || (reg_code == kSPRegInternalCode));
+}
+
+
+inline bool CPURegister::IsValidFPRegister() const {
+ return IsFPRegister() &&
+ ((reg_size == kSRegSizeInBits) || (reg_size == kDRegSizeInBits)) &&
+ (reg_code < kNumberOfFPRegisters);
+}
+
+
+inline bool CPURegister::IsNone() const {
+ // kNoRegister types should always have size 0 and code 0.
+ ASSERT((reg_type != kNoRegister) || (reg_code == 0));
+ ASSERT((reg_type != kNoRegister) || (reg_size == 0));
+
+ return reg_type == kNoRegister;
+}
+
+
+inline bool CPURegister::Is(const CPURegister& other) const {
+ ASSERT(IsValidOrNone() && other.IsValidOrNone());
+ return Aliases(other) && (reg_size == other.reg_size);
+}
+
+
+inline bool CPURegister::Aliases(const CPURegister& other) const {
+ ASSERT(IsValidOrNone() && other.IsValidOrNone());
+ return (reg_code == other.reg_code) && (reg_type == other.reg_type);
+}
+
+
+inline bool CPURegister::IsRegister() const {
+ return reg_type == kRegister;
+}
+
+
+inline bool CPURegister::IsFPRegister() const {
+ return reg_type == kFPRegister;
+}
+
+
+inline bool CPURegister::IsSameSizeAndType(const CPURegister& other) const {
+ return (reg_size == other.reg_size) && (reg_type == other.reg_type);
+}
+
+
+inline bool CPURegister::IsValidOrNone() const {
+ return IsValid() || IsNone();
+}
+
+
+inline bool CPURegister::IsZero() const {
+ ASSERT(IsValid());
+ return IsRegister() && (reg_code == kZeroRegCode);
+}
+
+
+inline bool CPURegister::IsSP() const {
+ ASSERT(IsValid());
+ return IsRegister() && (reg_code == kSPRegInternalCode);
+}
+
+
+inline void CPURegList::Combine(const CPURegList& other) {
+ ASSERT(IsValid());
+ ASSERT(other.type() == type_);
+ ASSERT(other.RegisterSizeInBits() == size_);
+ list_ |= other.list();
+}
+
+
+inline void CPURegList::Remove(const CPURegList& other) {
+ ASSERT(IsValid());
+ if (other.type() == type_) {
+ list_ &= ~other.list();
+ }
+}
+
+
+inline void CPURegList::Combine(const CPURegister& other) {
+ ASSERT(other.type() == type_);
+ ASSERT(other.SizeInBits() == size_);
+ Combine(other.code());
+}
+
+
+inline void CPURegList::Remove(const CPURegister& other1,
+ const CPURegister& other2,
+ const CPURegister& other3,
+ const CPURegister& other4) {
+ if (!other1.IsNone() && (other1.type() == type_)) Remove(other1.code());
+ if (!other2.IsNone() && (other2.type() == type_)) Remove(other2.code());
+ if (!other3.IsNone() && (other3.type() == type_)) Remove(other3.code());
+ if (!other4.IsNone() && (other4.type() == type_)) Remove(other4.code());
+}
+
+
+inline void CPURegList::Combine(int code) {
+ ASSERT(IsValid());
+ ASSERT(CPURegister::Create(code, size_, type_).IsValid());
+ list_ |= (1UL << code);
+}
+
+
+inline void CPURegList::Remove(int code) {
+ ASSERT(IsValid());
+ ASSERT(CPURegister::Create(code, size_, type_).IsValid());
+ list_ &= ~(1UL << code);
+}
+
+
+inline Register Register::XRegFromCode(unsigned code) {
+ if (code == kSPRegInternalCode) {
+ return csp;
+ } else {
+ ASSERT(code < kNumberOfRegisters);
+ return Register::Create(code, kXRegSizeInBits);
+ }
+}
+
+
+inline Register Register::WRegFromCode(unsigned code) {
+ if (code == kSPRegInternalCode) {
+ return wcsp;
+ } else {
+ ASSERT(code < kNumberOfRegisters);
+ return Register::Create(code, kWRegSizeInBits);
+ }
+}
+
+
+inline FPRegister FPRegister::SRegFromCode(unsigned code) {
+ ASSERT(code < kNumberOfFPRegisters);
+ return FPRegister::Create(code, kSRegSizeInBits);
+}
+
+
+inline FPRegister FPRegister::DRegFromCode(unsigned code) {
+ ASSERT(code < kNumberOfFPRegisters);
+ return FPRegister::Create(code, kDRegSizeInBits);
+}
+
+
+inline Register CPURegister::W() const {
+ ASSERT(IsValidRegister());
+ return Register::WRegFromCode(reg_code);
+}
+
+
+inline Register CPURegister::X() const {
+ ASSERT(IsValidRegister());
+ return Register::XRegFromCode(reg_code);
+}
+
+
+inline FPRegister CPURegister::S() const {
+ ASSERT(IsValidFPRegister());
+ return FPRegister::SRegFromCode(reg_code);
+}
+
+
+inline FPRegister CPURegister::D() const {
+ ASSERT(IsValidFPRegister());
+ return FPRegister::DRegFromCode(reg_code);
+}
+
+
+// Immediate.
+// Default initializer is for int types
+template<typename T>
+struct ImmediateInitializer {
+ static const bool kIsIntType = true;
+ static inline RelocInfo::Mode rmode_for(T) {
+ return sizeof(T) == 8 ? RelocInfo::NONE64 : RelocInfo::NONE32;
+ }
+ static inline int64_t immediate_for(T t) {
+ STATIC_ASSERT(sizeof(T) <= 8);
+ return t;
+ }
+};
+
+
+template<>
+struct ImmediateInitializer<Smi*> {
+ static const bool kIsIntType = false;
+ static inline RelocInfo::Mode rmode_for(Smi* t) {
+ return RelocInfo::NONE64;
+ }
+ static inline int64_t immediate_for(Smi* t) {;
+ return reinterpret_cast<int64_t>(t);
+ }
+};
+
+
+template<>
+struct ImmediateInitializer<ExternalReference> {
+ static const bool kIsIntType = false;
+ static inline RelocInfo::Mode rmode_for(ExternalReference t) {
+ return RelocInfo::EXTERNAL_REFERENCE;
+ }
+ static inline int64_t immediate_for(ExternalReference t) {;
+ return reinterpret_cast<int64_t>(t.address());
+ }
+};
+
+
+template<typename T>
+Immediate::Immediate(Handle<T> value) {
+ InitializeHandle(value);
+}
+
+
+template<typename T>
+Immediate::Immediate(T t)
+ : value_(ImmediateInitializer<T>::immediate_for(t)),
+ rmode_(ImmediateInitializer<T>::rmode_for(t)) {}
+
+
+template<typename T>
+Immediate::Immediate(T t, RelocInfo::Mode rmode)
+ : value_(ImmediateInitializer<T>::immediate_for(t)),
+ rmode_(rmode) {
+ STATIC_ASSERT(ImmediateInitializer<T>::kIsIntType);
+}
+
+
+// Operand.
+template<typename T>
+Operand::Operand(Handle<T> value) : immediate_(value), reg_(NoReg) {}
+
+
+template<typename T>
+Operand::Operand(T t) : immediate_(t), reg_(NoReg) {}
+
+
+template<typename T>
+Operand::Operand(T t, RelocInfo::Mode rmode)
+ : immediate_(t, rmode),
+ reg_(NoReg) {}
+
+
+Operand::Operand(Register reg, Shift shift, unsigned shift_amount)
+ : immediate_(0),
+ reg_(reg),
+ shift_(shift),
+ extend_(NO_EXTEND),
+ shift_amount_(shift_amount) {
+ ASSERT(reg.Is64Bits() || (shift_amount < kWRegSizeInBits));
+ ASSERT(reg.Is32Bits() || (shift_amount < kXRegSizeInBits));
+ ASSERT(!reg.IsSP());
+}
+
+
+Operand::Operand(Register reg, Extend extend, unsigned shift_amount)
+ : immediate_(0),
+ reg_(reg),
+ shift_(NO_SHIFT),
+ extend_(extend),
+ shift_amount_(shift_amount) {
+ ASSERT(reg.IsValid());
+ ASSERT(shift_amount <= 4);
+ ASSERT(!reg.IsSP());
+
+ // Extend modes SXTX and UXTX require a 64-bit register.
+ ASSERT(reg.Is64Bits() || ((extend != SXTX) && (extend != UXTX)));
+}
+
+
+bool Operand::IsImmediate() const {
+ return reg_.Is(NoReg);
+}
+
+
+bool Operand::IsShiftedRegister() const {
+ return reg_.IsValid() && (shift_ != NO_SHIFT);
+}
+
+
+bool Operand::IsExtendedRegister() const {
+ return reg_.IsValid() && (extend_ != NO_EXTEND);
+}
+
+
+bool Operand::IsZero() const {
+ if (IsImmediate()) {
+ return ImmediateValue() == 0;
+ } else {
+ return reg().IsZero();
+ }
+}
+
+
+Operand Operand::ToExtendedRegister() const {
+ ASSERT(IsShiftedRegister());
+ ASSERT((shift_ == LSL) && (shift_amount_ <= 4));
+ return Operand(reg_, reg_.Is64Bits() ? UXTX : UXTW, shift_amount_);
+}
+
+
+Immediate Operand::immediate() const {
+ ASSERT(IsImmediate());
+ return immediate_;
+}
+
+
+int64_t Operand::ImmediateValue() const {
+ ASSERT(IsImmediate());
+ return immediate_.value();
+}
+
+
+Register Operand::reg() const {
+ ASSERT(IsShiftedRegister() || IsExtendedRegister());
+ return reg_;
+}
+
+
+Shift Operand::shift() const {
+ ASSERT(IsShiftedRegister());
+ return shift_;
+}
+
+
+Extend Operand::extend() const {
+ ASSERT(IsExtendedRegister());
+ return extend_;
+}
+
+
+unsigned Operand::shift_amount() const {
+ ASSERT(IsShiftedRegister() || IsExtendedRegister());
+ return shift_amount_;
+}
+
+
+Operand Operand::UntagSmi(Register smi) {
+ ASSERT(smi.Is64Bits());
+ return Operand(smi, ASR, kSmiShift);
+}
+
+
+Operand Operand::UntagSmiAndScale(Register smi, int scale) {
+ ASSERT(smi.Is64Bits());
+ ASSERT((scale >= 0) && (scale <= (64 - kSmiValueSize)));
+ if (scale > kSmiShift) {
+ return Operand(smi, LSL, scale - kSmiShift);
+ } else if (scale < kSmiShift) {
+ return Operand(smi, ASR, kSmiShift - scale);
+ }
+ return Operand(smi);
+}
+
+
+MemOperand::MemOperand()
+ : base_(NoReg), regoffset_(NoReg), offset_(0), addrmode_(Offset),
+ shift_(NO_SHIFT), extend_(NO_EXTEND), shift_amount_(0) {
+}
+
+
+MemOperand::MemOperand(Register base, ptrdiff_t offset, AddrMode addrmode)
+ : base_(base), regoffset_(NoReg), offset_(offset), addrmode_(addrmode),
+ shift_(NO_SHIFT), extend_(NO_EXTEND), shift_amount_(0) {
+ ASSERT(base.Is64Bits() && !base.IsZero());
+}
+
+
+MemOperand::MemOperand(Register base,
+ Register regoffset,
+ Extend extend,
+ unsigned shift_amount)
+ : base_(base), regoffset_(regoffset), offset_(0), addrmode_(Offset),
+ shift_(NO_SHIFT), extend_(extend), shift_amount_(shift_amount) {
+ ASSERT(base.Is64Bits() && !base.IsZero());
+ ASSERT(!regoffset.IsSP());
+ ASSERT((extend == UXTW) || (extend == SXTW) || (extend == SXTX));
+
+ // SXTX extend mode requires a 64-bit offset register.
+ ASSERT(regoffset.Is64Bits() || (extend != SXTX));
+}
+
+
+MemOperand::MemOperand(Register base,
+ Register regoffset,
+ Shift shift,
+ unsigned shift_amount)
+ : base_(base), regoffset_(regoffset), offset_(0), addrmode_(Offset),
+ shift_(shift), extend_(NO_EXTEND), shift_amount_(shift_amount) {
+ ASSERT(base.Is64Bits() && !base.IsZero());
+ ASSERT(regoffset.Is64Bits() && !regoffset.IsSP());
+ ASSERT(shift == LSL);
+}
+
+
+MemOperand::MemOperand(Register base, const Operand& offset, AddrMode addrmode)
+ : base_(base), addrmode_(addrmode) {
+ ASSERT(base.Is64Bits() && !base.IsZero());
+
+ if (offset.IsImmediate()) {
+ offset_ = offset.ImmediateValue();
+
+ regoffset_ = NoReg;
+ } else if (offset.IsShiftedRegister()) {
+ ASSERT(addrmode == Offset);
+
+ regoffset_ = offset.reg();
+ shift_= offset.shift();
+ shift_amount_ = offset.shift_amount();
+
+ extend_ = NO_EXTEND;
+ offset_ = 0;
+
+ // These assertions match those in the shifted-register constructor.
+ ASSERT(regoffset_.Is64Bits() && !regoffset_.IsSP());
+ ASSERT(shift_ == LSL);
+ } else {
+ ASSERT(offset.IsExtendedRegister());
+ ASSERT(addrmode == Offset);
+
+ regoffset_ = offset.reg();
+ extend_ = offset.extend();
+ shift_amount_ = offset.shift_amount();
+
+ shift_= NO_SHIFT;
+ offset_ = 0;
+
+ // These assertions match those in the extended-register constructor.
+ ASSERT(!regoffset_.IsSP());
+ ASSERT((extend_ == UXTW) || (extend_ == SXTW) || (extend_ == SXTX));
+ ASSERT((regoffset_.Is64Bits() || (extend_ != SXTX)));
+ }
+}
+
+bool MemOperand::IsImmediateOffset() const {
+ return (addrmode_ == Offset) && regoffset_.Is(NoReg);
+}
+
+
+bool MemOperand::IsRegisterOffset() const {
+ return (addrmode_ == Offset) && !regoffset_.Is(NoReg);
+}
+
+
+bool MemOperand::IsPreIndex() const {
+ return addrmode_ == PreIndex;
+}
+
+
+bool MemOperand::IsPostIndex() const {
+ return addrmode_ == PostIndex;
+}
+
+Operand MemOperand::OffsetAsOperand() const {
+ if (IsImmediateOffset()) {
+ return offset();
+ } else {
+ ASSERT(IsRegisterOffset());
+ if (extend() == NO_EXTEND) {
+ return Operand(regoffset(), shift(), shift_amount());
+ } else {
+ return Operand(regoffset(), extend(), shift_amount());
+ }
+ }
+}
+
+
+void Assembler::Unreachable() {
+#ifdef USE_SIMULATOR
+ debug("UNREACHABLE", __LINE__, BREAK);
+#else
+ // Crash by branching to 0. lr now points near the fault.
+ Emit(BLR | Rn(xzr));
+#endif
+}
+
+
+Address Assembler::target_pointer_address_at(Address pc) {
+ Instruction* instr = reinterpret_cast<Instruction*>(pc);
+ ASSERT(instr->IsLdrLiteralX());
+ return reinterpret_cast<Address>(instr->ImmPCOffsetTarget());
+}
+
+
+// Read/Modify the code target address in the branch/call instruction at pc.
+Address Assembler::target_address_at(Address pc,
+ ConstantPoolArray* constant_pool) {
+ return Memory::Address_at(target_pointer_address_at(pc));
+}
+
+
+Address Assembler::target_address_at(Address pc, Code* code) {
+ ConstantPoolArray* constant_pool = code ? code->constant_pool() : NULL;
+ return target_address_at(pc, constant_pool);
+}
+
+
+Address Assembler::target_address_from_return_address(Address pc) {
+ // Returns the address of the call target from the return address that will
+ // be returned to after a call.
+ // Call sequence on ARM64 is:
+ // ldr ip0, #... @ load from literal pool
+ // blr ip0
+ Address candidate = pc - 2 * kInstructionSize;
+ Instruction* instr = reinterpret_cast<Instruction*>(candidate);
+ USE(instr);
+ ASSERT(instr->IsLdrLiteralX());
+ return candidate;
+}
+
+
+Address Assembler::return_address_from_call_start(Address pc) {
+ // The call, generated by MacroAssembler::Call, is one of two possible
+ // sequences:
+ //
+ // Without relocation:
+ // movz temp, #(target & 0x000000000000ffff)
+ // movk temp, #(target & 0x00000000ffff0000)
+ // movk temp, #(target & 0x0000ffff00000000)
+ // blr temp
+ //
+ // With relocation:
+ // ldr temp, =target
+ // blr temp
+ //
+ // The return address is immediately after the blr instruction in both cases,
+ // so it can be found by adding the call size to the address at the start of
+ // the call sequence.
+ STATIC_ASSERT(Assembler::kCallSizeWithoutRelocation == 4 * kInstructionSize);
+ STATIC_ASSERT(Assembler::kCallSizeWithRelocation == 2 * kInstructionSize);
+
+ Instruction* instr = reinterpret_cast<Instruction*>(pc);
+ if (instr->IsMovz()) {
+ // Verify the instruction sequence.
+ ASSERT(instr->following(1)->IsMovk());
+ ASSERT(instr->following(2)->IsMovk());
+ ASSERT(instr->following(3)->IsBranchAndLinkToRegister());
+ return pc + Assembler::kCallSizeWithoutRelocation;
+ } else {
+ // Verify the instruction sequence.
+ ASSERT(instr->IsLdrLiteralX());
+ ASSERT(instr->following(1)->IsBranchAndLinkToRegister());
+ return pc + Assembler::kCallSizeWithRelocation;
+ }
+}
+
+
+void Assembler::deserialization_set_special_target_at(
+ Address constant_pool_entry, Code* code, Address target) {
+ Memory::Address_at(constant_pool_entry) = target;
+}
+
+
+void Assembler::set_target_address_at(Address pc,
+ ConstantPoolArray* constant_pool,
+ Address target,
+ ICacheFlushMode icache_flush_mode) {
+ Memory::Address_at(target_pointer_address_at(pc)) = target;
+ // Intuitively, we would think it is necessary to always flush the
+ // instruction cache after patching a target address in the code as follows:
+ // CPU::FlushICache(pc, sizeof(target));
+ // However, on ARM, an instruction is actually patched in the case of
+ // embedded constants of the form:
+ // ldr ip, [pc, #...]
+ // since the instruction accessing this address in the constant pool remains
+ // unchanged, a flush is not required.
+}
+
+
+void Assembler::set_target_address_at(Address pc,
+ Code* code,
+ Address target,
+ ICacheFlushMode icache_flush_mode) {
+ ConstantPoolArray* constant_pool = code ? code->constant_pool() : NULL;
+ set_target_address_at(pc, constant_pool, target, icache_flush_mode);
+}
+
+
+int RelocInfo::target_address_size() {
+ return kPointerSize;
+}
+
+
+Address RelocInfo::target_address() {
+ ASSERT(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_));
+ return Assembler::target_address_at(pc_, host_);
+}
+
+
+Address RelocInfo::target_address_address() {
+ ASSERT(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_)
+ || rmode_ == EMBEDDED_OBJECT
+ || rmode_ == EXTERNAL_REFERENCE);
+ return Assembler::target_pointer_address_at(pc_);
+}
+
+
+Address RelocInfo::constant_pool_entry_address() {
+ ASSERT(IsInConstantPool());
+ return Assembler::target_pointer_address_at(pc_);
+}
+
+
+Object* RelocInfo::target_object() {
+ ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
+ return reinterpret_cast<Object*>(Assembler::target_address_at(pc_, host_));
+}
+
+
+Handle<Object> RelocInfo::target_object_handle(Assembler* origin) {
+ ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
+ return Handle<Object>(reinterpret_cast<Object**>(
+ Assembler::target_address_at(pc_, host_)));
+}
+
+
+void RelocInfo::set_target_object(Object* target,
+ WriteBarrierMode write_barrier_mode,
+ ICacheFlushMode icache_flush_mode) {
+ ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
+ ASSERT(!target->IsConsString());
+ Assembler::set_target_address_at(pc_, host_,
+ reinterpret_cast<Address>(target),
+ icache_flush_mode);
+ if (write_barrier_mode == UPDATE_WRITE_BARRIER &&
+ host() != NULL &&
+ target->IsHeapObject()) {
+ host()->GetHeap()->incremental_marking()->RecordWrite(
+ host(), &Memory::Object_at(pc_), HeapObject::cast(target));
+ }
+}
+
+
+Address RelocInfo::target_reference() {
+ ASSERT(rmode_ == EXTERNAL_REFERENCE);
+ return Assembler::target_address_at(pc_, host_);
+}
+
+
+Address RelocInfo::target_runtime_entry(Assembler* origin) {
+ ASSERT(IsRuntimeEntry(rmode_));
+ return target_address();
+}
+
+
+void RelocInfo::set_target_runtime_entry(Address target,
+ WriteBarrierMode write_barrier_mode,
+ ICacheFlushMode icache_flush_mode) {
+ ASSERT(IsRuntimeEntry(rmode_));
+ if (target_address() != target) {
+ set_target_address(target, write_barrier_mode, icache_flush_mode);
+ }
+}
+
+
+Handle<Cell> RelocInfo::target_cell_handle() {
+ UNIMPLEMENTED();
+ Cell *null_cell = NULL;
+ return Handle<Cell>(null_cell);
+}
+
+
+Cell* RelocInfo::target_cell() {
+ ASSERT(rmode_ == RelocInfo::CELL);
+ return Cell::FromValueAddress(Memory::Address_at(pc_));
+}
+
+
+void RelocInfo::set_target_cell(Cell* cell,
+ WriteBarrierMode write_barrier_mode,
+ ICacheFlushMode icache_flush_mode) {
+ UNIMPLEMENTED();
+}
+
+
+static const int kNoCodeAgeSequenceLength = 5 * kInstructionSize;
+static const int kCodeAgeStubEntryOffset = 3 * kInstructionSize;
+
+
+Handle<Object> RelocInfo::code_age_stub_handle(Assembler* origin) {
+ UNREACHABLE(); // This should never be reached on ARM64.
+ return Handle<Object>();
+}
+
+
+Code* RelocInfo::code_age_stub() {
+ ASSERT(rmode_ == RelocInfo::CODE_AGE_SEQUENCE);
+ // Read the stub entry point from the code age sequence.
+ Address stub_entry_address = pc_ + kCodeAgeStubEntryOffset;
+ return Code::GetCodeFromTargetAddress(Memory::Address_at(stub_entry_address));
+}
+
+
+void RelocInfo::set_code_age_stub(Code* stub,
+ ICacheFlushMode icache_flush_mode) {
+ ASSERT(rmode_ == RelocInfo::CODE_AGE_SEQUENCE);
+ ASSERT(!Code::IsYoungSequence(stub->GetIsolate(), pc_));
+ // Overwrite the stub entry point in the code age sequence. This is loaded as
+ // a literal so there is no need to call FlushICache here.
+ Address stub_entry_address = pc_ + kCodeAgeStubEntryOffset;
+ Memory::Address_at(stub_entry_address) = stub->instruction_start();
+}
+
+
+Address RelocInfo::call_address() {
+ ASSERT((IsJSReturn(rmode()) && IsPatchedReturnSequence()) ||
+ (IsDebugBreakSlot(rmode()) && IsPatchedDebugBreakSlotSequence()));
+ // For the above sequences the Relocinfo points to the load literal loading
+ // the call address.
+ return Assembler::target_address_at(pc_, host_);
+}
+
+
+void RelocInfo::set_call_address(Address target) {
+ ASSERT((IsJSReturn(rmode()) && IsPatchedReturnSequence()) ||
+ (IsDebugBreakSlot(rmode()) && IsPatchedDebugBreakSlotSequence()));
+ Assembler::set_target_address_at(pc_, host_, target);
+ if (host() != NULL) {
+ Object* target_code = Code::GetCodeFromTargetAddress(target);
+ host()->GetHeap()->incremental_marking()->RecordWriteIntoCode(
+ host(), this, HeapObject::cast(target_code));
+ }
+}
+
+
+void RelocInfo::WipeOut() {
+ ASSERT(IsEmbeddedObject(rmode_) ||
+ IsCodeTarget(rmode_) ||
+ IsRuntimeEntry(rmode_) ||
+ IsExternalReference(rmode_));
+ Assembler::set_target_address_at(pc_, host_, NULL);
+}
+
+
+bool RelocInfo::IsPatchedReturnSequence() {
+ // The sequence must be:
+ // ldr ip0, [pc, #offset]
+ // blr ip0
+ // See arm64/debug-arm64.cc BreakLocationIterator::SetDebugBreakAtReturn().
+ Instruction* i1 = reinterpret_cast<Instruction*>(pc_);
+ Instruction* i2 = i1->following();
+ return i1->IsLdrLiteralX() && (i1->Rt() == ip0.code()) &&
+ i2->IsBranchAndLinkToRegister() && (i2->Rn() == ip0.code());
+}
+
+
+bool RelocInfo::IsPatchedDebugBreakSlotSequence() {
+ Instruction* current_instr = reinterpret_cast<Instruction*>(pc_);
+ return !current_instr->IsNop(Assembler::DEBUG_BREAK_NOP);
+}
+
+
+void RelocInfo::Visit(Isolate* isolate, ObjectVisitor* visitor) {
+ RelocInfo::Mode mode = rmode();
+ if (mode == RelocInfo::EMBEDDED_OBJECT) {
+ visitor->VisitEmbeddedPointer(this);
+ } else if (RelocInfo::IsCodeTarget(mode)) {
+ visitor->VisitCodeTarget(this);
+ } else if (mode == RelocInfo::CELL) {
+ visitor->VisitCell(this);
+ } else if (mode == RelocInfo::EXTERNAL_REFERENCE) {
+ visitor->VisitExternalReference(this);
+ } else if (((RelocInfo::IsJSReturn(mode) &&
+ IsPatchedReturnSequence()) ||
+ (RelocInfo::IsDebugBreakSlot(mode) &&
+ IsPatchedDebugBreakSlotSequence())) &&
+ isolate->debug()->has_break_points()) {
+ visitor->VisitDebugTarget(this);
+ } else if (RelocInfo::IsRuntimeEntry(mode)) {
+ visitor->VisitRuntimeEntry(this);
+ }
+}
+
+
+template<typename StaticVisitor>
+void RelocInfo::Visit(Heap* heap) {
+ RelocInfo::Mode mode = rmode();
+ if (mode == RelocInfo::EMBEDDED_OBJECT) {
+ StaticVisitor::VisitEmbeddedPointer(heap, this);
+ } else if (RelocInfo::IsCodeTarget(mode)) {
+ StaticVisitor::VisitCodeTarget(heap, this);
+ } else if (mode == RelocInfo::CELL) {
+ StaticVisitor::VisitCell(heap, this);
+ } else if (mode == RelocInfo::EXTERNAL_REFERENCE) {
+ StaticVisitor::VisitExternalReference(this);
+ } else if (heap->isolate()->debug()->has_break_points() &&
+ ((RelocInfo::IsJSReturn(mode) &&
+ IsPatchedReturnSequence()) ||
+ (RelocInfo::IsDebugBreakSlot(mode) &&
+ IsPatchedDebugBreakSlotSequence()))) {
+ StaticVisitor::VisitDebugTarget(heap, this);
+ } else if (RelocInfo::IsRuntimeEntry(mode)) {
+ StaticVisitor::VisitRuntimeEntry(this);
+ }
+}
+
+
+LoadStoreOp Assembler::LoadOpFor(const CPURegister& rt) {
+ ASSERT(rt.IsValid());
+ if (rt.IsRegister()) {
+ return rt.Is64Bits() ? LDR_x : LDR_w;
+ } else {
+ ASSERT(rt.IsFPRegister());
+ return rt.Is64Bits() ? LDR_d : LDR_s;
+ }
+}
+
+
+LoadStorePairOp Assembler::LoadPairOpFor(const CPURegister& rt,
+ const CPURegister& rt2) {
+ ASSERT(AreSameSizeAndType(rt, rt2));
+ USE(rt2);
+ if (rt.IsRegister()) {
+ return rt.Is64Bits() ? LDP_x : LDP_w;
+ } else {
+ ASSERT(rt.IsFPRegister());
+ return rt.Is64Bits() ? LDP_d : LDP_s;
+ }
+}
+
+
+LoadStoreOp Assembler::StoreOpFor(const CPURegister& rt) {
+ ASSERT(rt.IsValid());
+ if (rt.IsRegister()) {
+ return rt.Is64Bits() ? STR_x : STR_w;
+ } else {
+ ASSERT(rt.IsFPRegister());
+ return rt.Is64Bits() ? STR_d : STR_s;
+ }
+}
+
+
+LoadStorePairOp Assembler::StorePairOpFor(const CPURegister& rt,
+ const CPURegister& rt2) {
+ ASSERT(AreSameSizeAndType(rt, rt2));
+ USE(rt2);
+ if (rt.IsRegister()) {
+ return rt.Is64Bits() ? STP_x : STP_w;
+ } else {
+ ASSERT(rt.IsFPRegister());
+ return rt.Is64Bits() ? STP_d : STP_s;
+ }
+}
+
+
+LoadStorePairNonTemporalOp Assembler::LoadPairNonTemporalOpFor(
+ const CPURegister& rt, const CPURegister& rt2) {
+ ASSERT(AreSameSizeAndType(rt, rt2));
+ USE(rt2);
+ if (rt.IsRegister()) {
+ return rt.Is64Bits() ? LDNP_x : LDNP_w;
+ } else {
+ ASSERT(rt.IsFPRegister());
+ return rt.Is64Bits() ? LDNP_d : LDNP_s;
+ }
+}
+
+
+LoadStorePairNonTemporalOp Assembler::StorePairNonTemporalOpFor(
+ const CPURegister& rt, const CPURegister& rt2) {
+ ASSERT(AreSameSizeAndType(rt, rt2));
+ USE(rt2);
+ if (rt.IsRegister()) {
+ return rt.Is64Bits() ? STNP_x : STNP_w;
+ } else {
+ ASSERT(rt.IsFPRegister());
+ return rt.Is64Bits() ? STNP_d : STNP_s;
+ }
+}
+
+
+LoadLiteralOp Assembler::LoadLiteralOpFor(const CPURegister& rt) {
+ if (rt.IsRegister()) {
+ return rt.Is64Bits() ? LDR_x_lit : LDR_w_lit;
+ } else {
+ ASSERT(rt.IsFPRegister());
+ return rt.Is64Bits() ? LDR_d_lit : LDR_s_lit;
+ }
+}
+
+
+int Assembler::LinkAndGetInstructionOffsetTo(Label* label) {
+ ASSERT(kStartOfLabelLinkChain == 0);
+ int offset = LinkAndGetByteOffsetTo(label);
+ ASSERT(IsAligned(offset, kInstructionSize));
+ return offset >> kInstructionSizeLog2;
+}
+
+
+Instr Assembler::Flags(FlagsUpdate S) {
+ if (S == SetFlags) {
+ return 1 << FlagsUpdate_offset;
+ } else if (S == LeaveFlags) {
+ return 0 << FlagsUpdate_offset;
+ }
+ UNREACHABLE();
+ return 0;
+}
+
+
+Instr Assembler::Cond(Condition cond) {
+ return cond << Condition_offset;
+}
+
+
+Instr Assembler::ImmPCRelAddress(int imm21) {
+ CHECK(is_int21(imm21));
+ Instr imm = static_cast<Instr>(truncate_to_int21(imm21));
+ Instr immhi = (imm >> ImmPCRelLo_width) << ImmPCRelHi_offset;
+ Instr immlo = imm << ImmPCRelLo_offset;
+ return (immhi & ImmPCRelHi_mask) | (immlo & ImmPCRelLo_mask);
+}
+
+
+Instr Assembler::ImmUncondBranch(int imm26) {
+ CHECK(is_int26(imm26));
+ return truncate_to_int26(imm26) << ImmUncondBranch_offset;
+}
+
+
+Instr Assembler::ImmCondBranch(int imm19) {
+ CHECK(is_int19(imm19));
+ return truncate_to_int19(imm19) << ImmCondBranch_offset;
+}
+
+
+Instr Assembler::ImmCmpBranch(int imm19) {
+ CHECK(is_int19(imm19));
+ return truncate_to_int19(imm19) << ImmCmpBranch_offset;
+}
+
+
+Instr Assembler::ImmTestBranch(int imm14) {
+ CHECK(is_int14(imm14));
+ return truncate_to_int14(imm14) << ImmTestBranch_offset;
+}
+
+
+Instr Assembler::ImmTestBranchBit(unsigned bit_pos) {
+ ASSERT(is_uint6(bit_pos));
+ // Subtract five from the shift offset, as we need bit 5 from bit_pos.
+ unsigned b5 = bit_pos << (ImmTestBranchBit5_offset - 5);
+ unsigned b40 = bit_pos << ImmTestBranchBit40_offset;
+ b5 &= ImmTestBranchBit5_mask;
+ b40 &= ImmTestBranchBit40_mask;
+ return b5 | b40;
+}
+
+
+Instr Assembler::SF(Register rd) {
+ return rd.Is64Bits() ? SixtyFourBits : ThirtyTwoBits;
+}
+
+
+Instr Assembler::ImmAddSub(int64_t imm) {
+ ASSERT(IsImmAddSub(imm));
+ if (is_uint12(imm)) { // No shift required.
+ return imm << ImmAddSub_offset;
+ } else {
+ return ((imm >> 12) << ImmAddSub_offset) | (1 << ShiftAddSub_offset);
+ }
+}
+
+
+Instr Assembler::ImmS(unsigned imms, unsigned reg_size) {
+ ASSERT(((reg_size == kXRegSizeInBits) && is_uint6(imms)) ||
+ ((reg_size == kWRegSizeInBits) && is_uint5(imms)));
+ USE(reg_size);
+ return imms << ImmS_offset;
+}
+
+
+Instr Assembler::ImmR(unsigned immr, unsigned reg_size) {
+ ASSERT(((reg_size == kXRegSizeInBits) && is_uint6(immr)) ||
+ ((reg_size == kWRegSizeInBits) && is_uint5(immr)));
+ USE(reg_size);
+ ASSERT(is_uint6(immr));
+ return immr << ImmR_offset;
+}
+
+
+Instr Assembler::ImmSetBits(unsigned imms, unsigned reg_size) {
+ ASSERT((reg_size == kWRegSizeInBits) || (reg_size == kXRegSizeInBits));
+ ASSERT(is_uint6(imms));
+ ASSERT((reg_size == kXRegSizeInBits) || is_uint6(imms + 3));
+ USE(reg_size);
+ return imms << ImmSetBits_offset;
+}
+
+
+Instr Assembler::ImmRotate(unsigned immr, unsigned reg_size) {
+ ASSERT((reg_size == kWRegSizeInBits) || (reg_size == kXRegSizeInBits));
+ ASSERT(((reg_size == kXRegSizeInBits) && is_uint6(immr)) ||
+ ((reg_size == kWRegSizeInBits) && is_uint5(immr)));
+ USE(reg_size);
+ return immr << ImmRotate_offset;
+}
+
+
+Instr Assembler::ImmLLiteral(int imm19) {
+ CHECK(is_int19(imm19));
+ return truncate_to_int19(imm19) << ImmLLiteral_offset;
+}
+
+
+Instr Assembler::BitN(unsigned bitn, unsigned reg_size) {
+ ASSERT((reg_size == kWRegSizeInBits) || (reg_size == kXRegSizeInBits));
+ ASSERT((reg_size == kXRegSizeInBits) || (bitn == 0));
+ USE(reg_size);
+ return bitn << BitN_offset;
+}
+
+
+Instr Assembler::ShiftDP(Shift shift) {
+ ASSERT(shift == LSL || shift == LSR || shift == ASR || shift == ROR);
+ return shift << ShiftDP_offset;
+}
+
+
+Instr Assembler::ImmDPShift(unsigned amount) {
+ ASSERT(is_uint6(amount));
+ return amount << ImmDPShift_offset;
+}
+
+
+Instr Assembler::ExtendMode(Extend extend) {
+ return extend << ExtendMode_offset;
+}
+
+
+Instr Assembler::ImmExtendShift(unsigned left_shift) {
+ ASSERT(left_shift <= 4);
+ return left_shift << ImmExtendShift_offset;
+}
+
+
+Instr Assembler::ImmCondCmp(unsigned imm) {
+ ASSERT(is_uint5(imm));
+ return imm << ImmCondCmp_offset;
+}
+
+
+Instr Assembler::Nzcv(StatusFlags nzcv) {
+ return ((nzcv >> Flags_offset) & 0xf) << Nzcv_offset;
+}
+
+
+Instr Assembler::ImmLSUnsigned(int imm12) {
+ ASSERT(is_uint12(imm12));
+ return imm12 << ImmLSUnsigned_offset;
+}
+
+
+Instr Assembler::ImmLS(int imm9) {
+ ASSERT(is_int9(imm9));
+ return truncate_to_int9(imm9) << ImmLS_offset;
+}
+
+
+Instr Assembler::ImmLSPair(int imm7, LSDataSize size) {
+ ASSERT(((imm7 >> size) << size) == imm7);
+ int scaled_imm7 = imm7 >> size;
+ ASSERT(is_int7(scaled_imm7));
+ return truncate_to_int7(scaled_imm7) << ImmLSPair_offset;
+}
+
+
+Instr Assembler::ImmShiftLS(unsigned shift_amount) {
+ ASSERT(is_uint1(shift_amount));
+ return shift_amount << ImmShiftLS_offset;
+}
+
+
+Instr Assembler::ImmException(int imm16) {
+ ASSERT(is_uint16(imm16));
+ return imm16 << ImmException_offset;
+}
+
+
+Instr Assembler::ImmSystemRegister(int imm15) {
+ ASSERT(is_uint15(imm15));
+ return imm15 << ImmSystemRegister_offset;
+}
+
+
+Instr Assembler::ImmHint(int imm7) {
+ ASSERT(is_uint7(imm7));
+ return imm7 << ImmHint_offset;
+}
+
+
+Instr Assembler::ImmBarrierDomain(int imm2) {
+ ASSERT(is_uint2(imm2));
+ return imm2 << ImmBarrierDomain_offset;
+}
+
+
+Instr Assembler::ImmBarrierType(int imm2) {
+ ASSERT(is_uint2(imm2));
+ return imm2 << ImmBarrierType_offset;
+}
+
+
+LSDataSize Assembler::CalcLSDataSize(LoadStoreOp op) {
+ ASSERT((SizeLS_offset + SizeLS_width) == (kInstructionSize * 8));
+ return static_cast<LSDataSize>(op >> SizeLS_offset);
+}
+
+
+Instr Assembler::ImmMoveWide(uint64_t imm) {
+ ASSERT(is_uint16(imm));
+ return imm << ImmMoveWide_offset;
+}
+
+
+Instr Assembler::ShiftMoveWide(int64_t shift) {
+ ASSERT(is_uint2(shift));
+ return shift << ShiftMoveWide_offset;
+}
+
+
+Instr Assembler::FPType(FPRegister fd) {
+ return fd.Is64Bits() ? FP64 : FP32;
+}
+
+
+Instr Assembler::FPScale(unsigned scale) {
+ ASSERT(is_uint6(scale));
+ return scale << FPScale_offset;
+}
+
+
+const Register& Assembler::AppropriateZeroRegFor(const CPURegister& reg) const {
+ return reg.Is64Bits() ? xzr : wzr;
+}
+
+
+inline void Assembler::CheckBufferSpace() {
+ ASSERT(pc_ < (buffer_ + buffer_size_));
+ if (buffer_space() < kGap) {
+ GrowBuffer();
+ }
+}
+
+
+inline void Assembler::CheckBuffer() {
+ CheckBufferSpace();
+ if (pc_offset() >= next_veneer_pool_check_) {
+ CheckVeneerPool(false, true);
+ }
+ if (pc_offset() >= next_constant_pool_check_) {
+ CheckConstPool(false, true);
+ }
+}
+
+
+TypeFeedbackId Assembler::RecordedAstId() {
+ ASSERT(!recorded_ast_id_.IsNone());
+ return recorded_ast_id_;
+}
+
+
+void Assembler::ClearRecordedAstId() {
+ recorded_ast_id_ = TypeFeedbackId::None();
+}
+
+
+} } // namespace v8::internal
+
+#endif // V8_ARM64_ASSEMBLER_ARM64_INL_H_
diff --git a/chromium/v8/src/arm64/assembler-arm64.cc b/chromium/v8/src/arm64/assembler-arm64.cc
new file mode 100644
index 00000000000..90cff59620e
--- /dev/null
+++ b/chromium/v8/src/arm64/assembler-arm64.cc
@@ -0,0 +1,2892 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "src/v8.h"
+
+#if V8_TARGET_ARCH_ARM64
+
+#define ARM64_DEFINE_REG_STATICS
+
+#include "src/arm64/assembler-arm64-inl.h"
+
+namespace v8 {
+namespace internal {
+
+
+// -----------------------------------------------------------------------------
+// CpuFeatures implementation.
+
+void CpuFeatures::ProbeImpl(bool cross_compile) {
+ if (cross_compile) {
+ // Always align csp in cross compiled code - this is safe and ensures that
+ // csp will always be aligned if it is enabled by probing at runtime.
+ if (FLAG_enable_always_align_csp) supported_ |= 1u << ALWAYS_ALIGN_CSP;
+ } else {
+ CPU cpu;
+ if (FLAG_enable_always_align_csp && (cpu.implementer() == CPU::NVIDIA ||
+ FLAG_debug_code)) {
+ supported_ |= 1u << ALWAYS_ALIGN_CSP;
+ }
+ }
+}
+
+
+void CpuFeatures::PrintTarget() { }
+void CpuFeatures::PrintFeatures() { }
+
+
+// -----------------------------------------------------------------------------
+// CPURegList utilities.
+
+CPURegister CPURegList::PopLowestIndex() {
+ ASSERT(IsValid());
+ if (IsEmpty()) {
+ return NoCPUReg;
+ }
+ int index = CountTrailingZeros(list_, kRegListSizeInBits);
+ ASSERT((1 << index) & list_);
+ Remove(index);
+ return CPURegister::Create(index, size_, type_);
+}
+
+
+CPURegister CPURegList::PopHighestIndex() {
+ ASSERT(IsValid());
+ if (IsEmpty()) {
+ return NoCPUReg;
+ }
+ int index = CountLeadingZeros(list_, kRegListSizeInBits);
+ index = kRegListSizeInBits - 1 - index;
+ ASSERT((1 << index) & list_);
+ Remove(index);
+ return CPURegister::Create(index, size_, type_);
+}
+
+
+void CPURegList::RemoveCalleeSaved() {
+ if (type() == CPURegister::kRegister) {
+ Remove(GetCalleeSaved(RegisterSizeInBits()));
+ } else if (type() == CPURegister::kFPRegister) {
+ Remove(GetCalleeSavedFP(RegisterSizeInBits()));
+ } else {
+ ASSERT(type() == CPURegister::kNoRegister);
+ ASSERT(IsEmpty());
+ // The list must already be empty, so do nothing.
+ }
+}
+
+
+CPURegList CPURegList::GetCalleeSaved(unsigned size) {
+ return CPURegList(CPURegister::kRegister, size, 19, 29);
+}
+
+
+CPURegList CPURegList::GetCalleeSavedFP(unsigned size) {
+ return CPURegList(CPURegister::kFPRegister, size, 8, 15);
+}
+
+
+CPURegList CPURegList::GetCallerSaved(unsigned size) {
+ // Registers x0-x18 and lr (x30) are caller-saved.
+ CPURegList list = CPURegList(CPURegister::kRegister, size, 0, 18);
+ list.Combine(lr);
+ return list;
+}
+
+
+CPURegList CPURegList::GetCallerSavedFP(unsigned size) {
+ // Registers d0-d7 and d16-d31 are caller-saved.
+ CPURegList list = CPURegList(CPURegister::kFPRegister, size, 0, 7);
+ list.Combine(CPURegList(CPURegister::kFPRegister, size, 16, 31));
+ return list;
+}
+
+
+// This function defines the list of registers which are associated with a
+// safepoint slot. Safepoint register slots are saved contiguously on the stack.
+// MacroAssembler::SafepointRegisterStackIndex handles mapping from register
+// code to index in the safepoint register slots. Any change here can affect
+// this mapping.
+CPURegList CPURegList::GetSafepointSavedRegisters() {
+ CPURegList list = CPURegList::GetCalleeSaved();
+ list.Combine(
+ CPURegList(CPURegister::kRegister, kXRegSizeInBits, kJSCallerSaved));
+
+ // Note that unfortunately we can't use symbolic names for registers and have
+ // to directly use register codes. This is because this function is used to
+ // initialize some static variables and we can't rely on register variables
+ // to be initialized due to static initialization order issues in C++.
+
+ // Drop ip0 and ip1 (i.e. x16 and x17), as they should not be expected to be
+ // preserved outside of the macro assembler.
+ list.Remove(16);
+ list.Remove(17);
+
+ // Add x18 to the safepoint list, as although it's not in kJSCallerSaved, it
+ // is a caller-saved register according to the procedure call standard.
+ list.Combine(18);
+
+ // Drop jssp as the stack pointer doesn't need to be included.
+ list.Remove(28);
+
+ // Add the link register (x30) to the safepoint list.
+ list.Combine(30);
+
+ return list;
+}
+
+
+// -----------------------------------------------------------------------------
+// Implementation of RelocInfo
+
+const int RelocInfo::kApplyMask = 0;
+
+
+bool RelocInfo::IsCodedSpecially() {
+ // The deserializer needs to know whether a pointer is specially coded. Being
+ // specially coded on ARM64 means that it is a movz/movk sequence. We don't
+ // generate those for relocatable pointers.
+ return false;
+}
+
+
+bool RelocInfo::IsInConstantPool() {
+ Instruction* instr = reinterpret_cast<Instruction*>(pc_);
+ return instr->IsLdrLiteralX();
+}
+
+
+void RelocInfo::PatchCode(byte* instructions, int instruction_count) {
+ // Patch the code at the current address with the supplied instructions.
+ Instr* pc = reinterpret_cast<Instr*>(pc_);
+ Instr* instr = reinterpret_cast<Instr*>(instructions);
+ for (int i = 0; i < instruction_count; i++) {
+ *(pc + i) = *(instr + i);
+ }
+
+ // Indicate that code has changed.
+ CPU::FlushICache(pc_, instruction_count * kInstructionSize);
+}
+
+
+// Patch the code at the current PC with a call to the target address.
+// Additional guard instructions can be added if required.
+void RelocInfo::PatchCodeWithCall(Address target, int guard_bytes) {
+ UNIMPLEMENTED();
+}
+
+
+Register GetAllocatableRegisterThatIsNotOneOf(Register reg1, Register reg2,
+ Register reg3, Register reg4) {
+ CPURegList regs(reg1, reg2, reg3, reg4);
+ for (int i = 0; i < Register::NumAllocatableRegisters(); i++) {
+ Register candidate = Register::FromAllocationIndex(i);
+ if (regs.IncludesAliasOf(candidate)) continue;
+ return candidate;
+ }
+ UNREACHABLE();
+ return NoReg;
+}
+
+
+bool AreAliased(const CPURegister& reg1, const CPURegister& reg2,
+ const CPURegister& reg3, const CPURegister& reg4,
+ const CPURegister& reg5, const CPURegister& reg6,
+ const CPURegister& reg7, const CPURegister& reg8) {
+ int number_of_valid_regs = 0;
+ int number_of_valid_fpregs = 0;
+
+ RegList unique_regs = 0;
+ RegList unique_fpregs = 0;
+
+ const CPURegister regs[] = {reg1, reg2, reg3, reg4, reg5, reg6, reg7, reg8};
+
+ for (unsigned i = 0; i < sizeof(regs) / sizeof(regs[0]); i++) {
+ if (regs[i].IsRegister()) {
+ number_of_valid_regs++;
+ unique_regs |= regs[i].Bit();
+ } else if (regs[i].IsFPRegister()) {
+ number_of_valid_fpregs++;
+ unique_fpregs |= regs[i].Bit();
+ } else {
+ ASSERT(!regs[i].IsValid());
+ }
+ }
+
+ int number_of_unique_regs =
+ CountSetBits(unique_regs, sizeof(unique_regs) * kBitsPerByte);
+ int number_of_unique_fpregs =
+ CountSetBits(unique_fpregs, sizeof(unique_fpregs) * kBitsPerByte);
+
+ ASSERT(number_of_valid_regs >= number_of_unique_regs);
+ ASSERT(number_of_valid_fpregs >= number_of_unique_fpregs);
+
+ return (number_of_valid_regs != number_of_unique_regs) ||
+ (number_of_valid_fpregs != number_of_unique_fpregs);
+}
+
+
+bool AreSameSizeAndType(const CPURegister& reg1, const CPURegister& reg2,
+ const CPURegister& reg3, const CPURegister& reg4,
+ const CPURegister& reg5, const CPURegister& reg6,
+ const CPURegister& reg7, const CPURegister& reg8) {
+ ASSERT(reg1.IsValid());
+ bool match = true;
+ match &= !reg2.IsValid() || reg2.IsSameSizeAndType(reg1);
+ match &= !reg3.IsValid() || reg3.IsSameSizeAndType(reg1);
+ match &= !reg4.IsValid() || reg4.IsSameSizeAndType(reg1);
+ match &= !reg5.IsValid() || reg5.IsSameSizeAndType(reg1);
+ match &= !reg6.IsValid() || reg6.IsSameSizeAndType(reg1);
+ match &= !reg7.IsValid() || reg7.IsSameSizeAndType(reg1);
+ match &= !reg8.IsValid() || reg8.IsSameSizeAndType(reg1);
+ return match;
+}
+
+
+void Immediate::InitializeHandle(Handle<Object> handle) {
+ AllowDeferredHandleDereference using_raw_address;
+
+ // Verify all Objects referred by code are NOT in new space.
+ Object* obj = *handle;
+ if (obj->IsHeapObject()) {
+ ASSERT(!HeapObject::cast(obj)->GetHeap()->InNewSpace(obj));
+ value_ = reinterpret_cast<intptr_t>(handle.location());
+ rmode_ = RelocInfo::EMBEDDED_OBJECT;
+ } else {
+ STATIC_ASSERT(sizeof(intptr_t) == sizeof(int64_t));
+ value_ = reinterpret_cast<intptr_t>(obj);
+ rmode_ = RelocInfo::NONE64;
+ }
+}
+
+
+bool Operand::NeedsRelocation(const Assembler* assembler) const {
+ RelocInfo::Mode rmode = immediate_.rmode();
+
+ if (rmode == RelocInfo::EXTERNAL_REFERENCE) {
+ return assembler->serializer_enabled();
+ }
+
+ return !RelocInfo::IsNone(rmode);
+}
+
+
+// Assembler
+
+Assembler::Assembler(Isolate* isolate, void* buffer, int buffer_size)
+ : AssemblerBase(isolate, buffer, buffer_size),
+ recorded_ast_id_(TypeFeedbackId::None()),
+ unresolved_branches_(),
+ positions_recorder_(this) {
+ const_pool_blocked_nesting_ = 0;
+ veneer_pool_blocked_nesting_ = 0;
+ Reset();
+}
+
+
+Assembler::~Assembler() {
+ ASSERT(num_pending_reloc_info_ == 0);
+ ASSERT(const_pool_blocked_nesting_ == 0);
+ ASSERT(veneer_pool_blocked_nesting_ == 0);
+}
+
+
+void Assembler::Reset() {
+#ifdef DEBUG
+ ASSERT((pc_ >= buffer_) && (pc_ < buffer_ + buffer_size_));
+ ASSERT(const_pool_blocked_nesting_ == 0);
+ ASSERT(veneer_pool_blocked_nesting_ == 0);
+ ASSERT(unresolved_branches_.empty());
+ memset(buffer_, 0, pc_ - buffer_);
+#endif
+ pc_ = buffer_;
+ reloc_info_writer.Reposition(reinterpret_cast<byte*>(buffer_ + buffer_size_),
+ reinterpret_cast<byte*>(pc_));
+ num_pending_reloc_info_ = 0;
+ next_constant_pool_check_ = 0;
+ next_veneer_pool_check_ = kMaxInt;
+ no_const_pool_before_ = 0;
+ first_const_pool_use_ = -1;
+ ClearRecordedAstId();
+}
+
+
+void Assembler::GetCode(CodeDesc* desc) {
+ // Emit constant pool if necessary.
+ CheckConstPool(true, false);
+ ASSERT(num_pending_reloc_info_ == 0);
+
+ // Set up code descriptor.
+ if (desc) {
+ desc->buffer = reinterpret_cast<byte*>(buffer_);
+ desc->buffer_size = buffer_size_;
+ desc->instr_size = pc_offset();
+ desc->reloc_size = (reinterpret_cast<byte*>(buffer_) + buffer_size_) -
+ reloc_info_writer.pos();
+ desc->origin = this;
+ }
+}
+
+
+void Assembler::Align(int m) {
+ ASSERT(m >= 4 && IsPowerOf2(m));
+ while ((pc_offset() & (m - 1)) != 0) {
+ nop();
+ }
+}
+
+
+void Assembler::CheckLabelLinkChain(Label const * label) {
+#ifdef DEBUG
+ if (label->is_linked()) {
+ int linkoffset = label->pos();
+ bool end_of_chain = false;
+ while (!end_of_chain) {
+ Instruction * link = InstructionAt(linkoffset);
+ int linkpcoffset = link->ImmPCOffset();
+ int prevlinkoffset = linkoffset + linkpcoffset;
+
+ end_of_chain = (linkoffset == prevlinkoffset);
+ linkoffset = linkoffset + linkpcoffset;
+ }
+ }
+#endif
+}
+
+
+void Assembler::RemoveBranchFromLabelLinkChain(Instruction* branch,
+ Label* label,
+ Instruction* label_veneer) {
+ ASSERT(label->is_linked());
+
+ CheckLabelLinkChain(label);
+
+ Instruction* link = InstructionAt(label->pos());
+ Instruction* prev_link = link;
+ Instruction* next_link;
+ bool end_of_chain = false;
+
+ while (link != branch && !end_of_chain) {
+ next_link = link->ImmPCOffsetTarget();
+ end_of_chain = (link == next_link);
+ prev_link = link;
+ link = next_link;
+ }
+
+ ASSERT(branch == link);
+ next_link = branch->ImmPCOffsetTarget();
+
+ if (branch == prev_link) {
+ // The branch is the first instruction in the chain.
+ if (branch == next_link) {
+ // It is also the last instruction in the chain, so it is the only branch
+ // currently referring to this label.
+ label->Unuse();
+ } else {
+ label->link_to(reinterpret_cast<byte*>(next_link) - buffer_);
+ }
+
+ } else if (branch == next_link) {
+ // The branch is the last (but not also the first) instruction in the chain.
+ prev_link->SetImmPCOffsetTarget(prev_link);
+
+ } else {
+ // The branch is in the middle of the chain.
+ if (prev_link->IsTargetInImmPCOffsetRange(next_link)) {
+ prev_link->SetImmPCOffsetTarget(next_link);
+ } else if (label_veneer != NULL) {
+ // Use the veneer for all previous links in the chain.
+ prev_link->SetImmPCOffsetTarget(prev_link);
+
+ end_of_chain = false;
+ link = next_link;
+ while (!end_of_chain) {
+ next_link = link->ImmPCOffsetTarget();
+ end_of_chain = (link == next_link);
+ link->SetImmPCOffsetTarget(label_veneer);
+ link = next_link;
+ }
+ } else {
+ // The assert below will fire.
+ // Some other work could be attempted to fix up the chain, but it would be
+ // rather complicated. If we crash here, we may want to consider using an
+ // other mechanism than a chain of branches.
+ //
+ // Note that this situation currently should not happen, as we always call
+ // this function with a veneer to the target label.
+ // However this could happen with a MacroAssembler in the following state:
+ // [previous code]
+ // B(label);
+ // [20KB code]
+ // Tbz(label); // First tbz. Pointing to unconditional branch.
+ // [20KB code]
+ // Tbz(label); // Second tbz. Pointing to the first tbz.
+ // [more code]
+ // and this function is called to remove the first tbz from the label link
+ // chain. Since tbz has a range of +-32KB, the second tbz cannot point to
+ // the unconditional branch.
+ CHECK(prev_link->IsTargetInImmPCOffsetRange(next_link));
+ UNREACHABLE();
+ }
+ }
+
+ CheckLabelLinkChain(label);
+}
+
+
+void Assembler::bind(Label* label) {
+ // Bind label to the address at pc_. All instructions (most likely branches)
+ // that are linked to this label will be updated to point to the newly-bound
+ // label.
+
+ ASSERT(!label->is_near_linked());
+ ASSERT(!label->is_bound());
+
+ DeleteUnresolvedBranchInfoForLabel(label);
+
+ // If the label is linked, the link chain looks something like this:
+ //
+ // |--I----I-------I-------L
+ // |---------------------->| pc_offset
+ // |-------------->| linkoffset = label->pos()
+ // |<------| link->ImmPCOffset()
+ // |------>| prevlinkoffset = linkoffset + link->ImmPCOffset()
+ //
+ // On each iteration, the last link is updated and then removed from the
+ // chain until only one remains. At that point, the label is bound.
+ //
+ // If the label is not linked, no preparation is required before binding.
+ while (label->is_linked()) {
+ int linkoffset = label->pos();
+ Instruction* link = InstructionAt(linkoffset);
+ int prevlinkoffset = linkoffset + link->ImmPCOffset();
+
+ CheckLabelLinkChain(label);
+
+ ASSERT(linkoffset >= 0);
+ ASSERT(linkoffset < pc_offset());
+ ASSERT((linkoffset > prevlinkoffset) ||
+ (linkoffset - prevlinkoffset == kStartOfLabelLinkChain));
+ ASSERT(prevlinkoffset >= 0);
+
+ // Update the link to point to the label.
+ link->SetImmPCOffsetTarget(reinterpret_cast<Instruction*>(pc_));
+
+ // Link the label to the previous link in the chain.
+ if (linkoffset - prevlinkoffset == kStartOfLabelLinkChain) {
+ // We hit kStartOfLabelLinkChain, so the chain is fully processed.
+ label->Unuse();
+ } else {
+ // Update the label for the next iteration.
+ label->link_to(prevlinkoffset);
+ }
+ }
+ label->bind_to(pc_offset());
+
+ ASSERT(label->is_bound());
+ ASSERT(!label->is_linked());
+}
+
+
+int Assembler::LinkAndGetByteOffsetTo(Label* label) {
+ ASSERT(sizeof(*pc_) == 1);
+ CheckLabelLinkChain(label);
+
+ int offset;
+ if (label->is_bound()) {
+ // The label is bound, so it does not need to be updated. Referring
+ // instructions must link directly to the label as they will not be
+ // updated.
+ //
+ // In this case, label->pos() returns the offset of the label from the
+ // start of the buffer.
+ //
+ // Note that offset can be zero for self-referential instructions. (This
+ // could be useful for ADR, for example.)
+ offset = label->pos() - pc_offset();
+ ASSERT(offset <= 0);
+ } else {
+ if (label->is_linked()) {
+ // The label is linked, so the referring instruction should be added onto
+ // the end of the label's link chain.
+ //
+ // In this case, label->pos() returns the offset of the last linked
+ // instruction from the start of the buffer.
+ offset = label->pos() - pc_offset();
+ ASSERT(offset != kStartOfLabelLinkChain);
+ // Note that the offset here needs to be PC-relative only so that the
+ // first instruction in a buffer can link to an unbound label. Otherwise,
+ // the offset would be 0 for this case, and 0 is reserved for
+ // kStartOfLabelLinkChain.
+ } else {
+ // The label is unused, so it now becomes linked and the referring
+ // instruction is at the start of the new link chain.
+ offset = kStartOfLabelLinkChain;
+ }
+ // The instruction at pc is now the last link in the label's chain.
+ label->link_to(pc_offset());
+ }
+
+ return offset;
+}
+
+
+void Assembler::DeleteUnresolvedBranchInfoForLabelTraverse(Label* label) {
+ ASSERT(label->is_linked());
+ CheckLabelLinkChain(label);
+
+ int link_offset = label->pos();
+ int link_pcoffset;
+ bool end_of_chain = false;
+
+ while (!end_of_chain) {
+ Instruction * link = InstructionAt(link_offset);
+ link_pcoffset = link->ImmPCOffset();
+
+ // ADR instructions are not handled by veneers.
+ if (link->IsImmBranch()) {
+ int max_reachable_pc = InstructionOffset(link) +
+ Instruction::ImmBranchRange(link->BranchType());
+ typedef std::multimap<int, FarBranchInfo>::iterator unresolved_info_it;
+ std::pair<unresolved_info_it, unresolved_info_it> range;
+ range = unresolved_branches_.equal_range(max_reachable_pc);
+ unresolved_info_it it;
+ for (it = range.first; it != range.second; ++it) {
+ if (it->second.pc_offset_ == link_offset) {
+ unresolved_branches_.erase(it);
+ break;
+ }
+ }
+ }
+
+ end_of_chain = (link_pcoffset == 0);
+ link_offset = link_offset + link_pcoffset;
+ }
+}
+
+
+void Assembler::DeleteUnresolvedBranchInfoForLabel(Label* label) {
+ if (unresolved_branches_.empty()) {
+ ASSERT(next_veneer_pool_check_ == kMaxInt);
+ return;
+ }
+
+ if (label->is_linked()) {
+ // Branches to this label will be resolved when the label is bound, normally
+ // just after all the associated info has been deleted.
+ DeleteUnresolvedBranchInfoForLabelTraverse(label);
+ }
+ if (unresolved_branches_.empty()) {
+ next_veneer_pool_check_ = kMaxInt;
+ } else {
+ next_veneer_pool_check_ =
+ unresolved_branches_first_limit() - kVeneerDistanceCheckMargin;
+ }
+}
+
+
+void Assembler::StartBlockConstPool() {
+ if (const_pool_blocked_nesting_++ == 0) {
+ // Prevent constant pool checks happening by setting the next check to
+ // the biggest possible offset.
+ next_constant_pool_check_ = kMaxInt;
+ }
+}
+
+
+void Assembler::EndBlockConstPool() {
+ if (--const_pool_blocked_nesting_ == 0) {
+ // Check the constant pool hasn't been blocked for too long.
+ ASSERT((num_pending_reloc_info_ == 0) ||
+ (pc_offset() < (first_const_pool_use_ + kMaxDistToConstPool)));
+ // Two cases:
+ // * no_const_pool_before_ >= next_constant_pool_check_ and the emission is
+ // still blocked
+ // * no_const_pool_before_ < next_constant_pool_check_ and the next emit
+ // will trigger a check.
+ next_constant_pool_check_ = no_const_pool_before_;
+ }
+}
+
+
+bool Assembler::is_const_pool_blocked() const {
+ return (const_pool_blocked_nesting_ > 0) ||
+ (pc_offset() < no_const_pool_before_);
+}
+
+
+bool Assembler::IsConstantPoolAt(Instruction* instr) {
+ // The constant pool marker is made of two instructions. These instructions
+ // will never be emitted by the JIT, so checking for the first one is enough:
+ // 0: ldr xzr, #<size of pool>
+ bool result = instr->IsLdrLiteralX() && (instr->Rt() == xzr.code());
+
+ // It is still worth asserting the marker is complete.
+ // 4: blr xzr
+ ASSERT(!result || (instr->following()->IsBranchAndLinkToRegister() &&
+ instr->following()->Rn() == xzr.code()));
+
+ return result;
+}
+
+
+int Assembler::ConstantPoolSizeAt(Instruction* instr) {
+#ifdef USE_SIMULATOR
+ // Assembler::debug() embeds constants directly into the instruction stream.
+ // Although this is not a genuine constant pool, treat it like one to avoid
+ // disassembling the constants.
+ if ((instr->Mask(ExceptionMask) == HLT) &&
+ (instr->ImmException() == kImmExceptionIsDebug)) {
+ const char* message =
+ reinterpret_cast<const char*>(
+ instr->InstructionAtOffset(kDebugMessageOffset));
+ int size = kDebugMessageOffset + strlen(message) + 1;
+ return RoundUp(size, kInstructionSize) / kInstructionSize;
+ }
+ // Same for printf support, see MacroAssembler::CallPrintf().
+ if ((instr->Mask(ExceptionMask) == HLT) &&
+ (instr->ImmException() == kImmExceptionIsPrintf)) {
+ return kPrintfLength / kInstructionSize;
+ }
+#endif
+ if (IsConstantPoolAt(instr)) {
+ return instr->ImmLLiteral();
+ } else {
+ return -1;
+ }
+}
+
+
+void Assembler::ConstantPoolMarker(uint32_t size) {
+ ASSERT(is_const_pool_blocked());
+ // + 1 is for the crash guard.
+ Emit(LDR_x_lit | ImmLLiteral(size + 1) | Rt(xzr));
+}
+
+
+void Assembler::EmitPoolGuard() {
+ // We must generate only one instruction as this is used in scopes that
+ // control the size of the code generated.
+ Emit(BLR | Rn(xzr));
+}
+
+
+void Assembler::ConstantPoolGuard() {
+#ifdef DEBUG
+ // Currently this is only used after a constant pool marker.
+ ASSERT(is_const_pool_blocked());
+ Instruction* instr = reinterpret_cast<Instruction*>(pc_);
+ ASSERT(instr->preceding()->IsLdrLiteralX() &&
+ instr->preceding()->Rt() == xzr.code());
+#endif
+ EmitPoolGuard();
+}
+
+
+void Assembler::StartBlockVeneerPool() {
+ ++veneer_pool_blocked_nesting_;
+}
+
+
+void Assembler::EndBlockVeneerPool() {
+ if (--veneer_pool_blocked_nesting_ == 0) {
+ // Check the veneer pool hasn't been blocked for too long.
+ ASSERT(unresolved_branches_.empty() ||
+ (pc_offset() < unresolved_branches_first_limit()));
+ }
+}
+
+
+void Assembler::br(const Register& xn) {
+ positions_recorder()->WriteRecordedPositions();
+ ASSERT(xn.Is64Bits());
+ Emit(BR | Rn(xn));
+}
+
+
+void Assembler::blr(const Register& xn) {
+ positions_recorder()->WriteRecordedPositions();
+ ASSERT(xn.Is64Bits());
+ // The pattern 'blr xzr' is used as a guard to detect when execution falls
+ // through the constant pool. It should not be emitted.
+ ASSERT(!xn.Is(xzr));
+ Emit(BLR | Rn(xn));
+}
+
+
+void Assembler::ret(const Register& xn) {
+ positions_recorder()->WriteRecordedPositions();
+ ASSERT(xn.Is64Bits());
+ Emit(RET | Rn(xn));
+}
+
+
+void Assembler::b(int imm26) {
+ Emit(B | ImmUncondBranch(imm26));
+}
+
+
+void Assembler::b(Label* label) {
+ positions_recorder()->WriteRecordedPositions();
+ b(LinkAndGetInstructionOffsetTo(label));
+}
+
+
+void Assembler::b(int imm19, Condition cond) {
+ Emit(B_cond | ImmCondBranch(imm19) | cond);
+}
+
+
+void Assembler::b(Label* label, Condition cond) {
+ positions_recorder()->WriteRecordedPositions();
+ b(LinkAndGetInstructionOffsetTo(label), cond);
+}
+
+
+void Assembler::bl(int imm26) {
+ positions_recorder()->WriteRecordedPositions();
+ Emit(BL | ImmUncondBranch(imm26));
+}
+
+
+void Assembler::bl(Label* label) {
+ positions_recorder()->WriteRecordedPositions();
+ bl(LinkAndGetInstructionOffsetTo(label));
+}
+
+
+void Assembler::cbz(const Register& rt,
+ int imm19) {
+ positions_recorder()->WriteRecordedPositions();
+ Emit(SF(rt) | CBZ | ImmCmpBranch(imm19) | Rt(rt));
+}
+
+
+void Assembler::cbz(const Register& rt,
+ Label* label) {
+ positions_recorder()->WriteRecordedPositions();
+ cbz(rt, LinkAndGetInstructionOffsetTo(label));
+}
+
+
+void Assembler::cbnz(const Register& rt,
+ int imm19) {
+ positions_recorder()->WriteRecordedPositions();
+ Emit(SF(rt) | CBNZ | ImmCmpBranch(imm19) | Rt(rt));
+}
+
+
+void Assembler::cbnz(const Register& rt,
+ Label* label) {
+ positions_recorder()->WriteRecordedPositions();
+ cbnz(rt, LinkAndGetInstructionOffsetTo(label));
+}
+
+
+void Assembler::tbz(const Register& rt,
+ unsigned bit_pos,
+ int imm14) {
+ positions_recorder()->WriteRecordedPositions();
+ ASSERT(rt.Is64Bits() || (rt.Is32Bits() && (bit_pos < kWRegSizeInBits)));
+ Emit(TBZ | ImmTestBranchBit(bit_pos) | ImmTestBranch(imm14) | Rt(rt));
+}
+
+
+void Assembler::tbz(const Register& rt,
+ unsigned bit_pos,
+ Label* label) {
+ positions_recorder()->WriteRecordedPositions();
+ tbz(rt, bit_pos, LinkAndGetInstructionOffsetTo(label));
+}
+
+
+void Assembler::tbnz(const Register& rt,
+ unsigned bit_pos,
+ int imm14) {
+ positions_recorder()->WriteRecordedPositions();
+ ASSERT(rt.Is64Bits() || (rt.Is32Bits() && (bit_pos < kWRegSizeInBits)));
+ Emit(TBNZ | ImmTestBranchBit(bit_pos) | ImmTestBranch(imm14) | Rt(rt));
+}
+
+
+void Assembler::tbnz(const Register& rt,
+ unsigned bit_pos,
+ Label* label) {
+ positions_recorder()->WriteRecordedPositions();
+ tbnz(rt, bit_pos, LinkAndGetInstructionOffsetTo(label));
+}
+
+
+void Assembler::adr(const Register& rd, int imm21) {
+ ASSERT(rd.Is64Bits());
+ Emit(ADR | ImmPCRelAddress(imm21) | Rd(rd));
+}
+
+
+void Assembler::adr(const Register& rd, Label* label) {
+ adr(rd, LinkAndGetByteOffsetTo(label));
+}
+
+
+void Assembler::add(const Register& rd,
+ const Register& rn,
+ const Operand& operand) {
+ AddSub(rd, rn, operand, LeaveFlags, ADD);
+}
+
+
+void Assembler::adds(const Register& rd,
+ const Register& rn,
+ const Operand& operand) {
+ AddSub(rd, rn, operand, SetFlags, ADD);
+}
+
+
+void Assembler::cmn(const Register& rn,
+ const Operand& operand) {
+ Register zr = AppropriateZeroRegFor(rn);
+ adds(zr, rn, operand);
+}
+
+
+void Assembler::sub(const Register& rd,
+ const Register& rn,
+ const Operand& operand) {
+ AddSub(rd, rn, operand, LeaveFlags, SUB);
+}
+
+
+void Assembler::subs(const Register& rd,
+ const Register& rn,
+ const Operand& operand) {
+ AddSub(rd, rn, operand, SetFlags, SUB);
+}
+
+
+void Assembler::cmp(const Register& rn, const Operand& operand) {
+ Register zr = AppropriateZeroRegFor(rn);
+ subs(zr, rn, operand);
+}
+
+
+void Assembler::neg(const Register& rd, const Operand& operand) {
+ Register zr = AppropriateZeroRegFor(rd);
+ sub(rd, zr, operand);
+}
+
+
+void Assembler::negs(const Register& rd, const Operand& operand) {
+ Register zr = AppropriateZeroRegFor(rd);
+ subs(rd, zr, operand);
+}
+
+
+void Assembler::adc(const Register& rd,
+ const Register& rn,
+ const Operand& operand) {
+ AddSubWithCarry(rd, rn, operand, LeaveFlags, ADC);
+}
+
+
+void Assembler::adcs(const Register& rd,
+ const Register& rn,
+ const Operand& operand) {
+ AddSubWithCarry(rd, rn, operand, SetFlags, ADC);
+}
+
+
+void Assembler::sbc(const Register& rd,
+ const Register& rn,
+ const Operand& operand) {
+ AddSubWithCarry(rd, rn, operand, LeaveFlags, SBC);
+}
+
+
+void Assembler::sbcs(const Register& rd,
+ const Register& rn,
+ const Operand& operand) {
+ AddSubWithCarry(rd, rn, operand, SetFlags, SBC);
+}
+
+
+void Assembler::ngc(const Register& rd, const Operand& operand) {
+ Register zr = AppropriateZeroRegFor(rd);
+ sbc(rd, zr, operand);
+}
+
+
+void Assembler::ngcs(const Register& rd, const Operand& operand) {
+ Register zr = AppropriateZeroRegFor(rd);
+ sbcs(rd, zr, operand);
+}
+
+
+// Logical instructions.
+void Assembler::and_(const Register& rd,
+ const Register& rn,
+ const Operand& operand) {
+ Logical(rd, rn, operand, AND);
+}
+
+
+void Assembler::ands(const Register& rd,
+ const Register& rn,
+ const Operand& operand) {
+ Logical(rd, rn, operand, ANDS);
+}
+
+
+void Assembler::tst(const Register& rn,
+ const Operand& operand) {
+ ands(AppropriateZeroRegFor(rn), rn, operand);
+}
+
+
+void Assembler::bic(const Register& rd,
+ const Register& rn,
+ const Operand& operand) {
+ Logical(rd, rn, operand, BIC);
+}
+
+
+void Assembler::bics(const Register& rd,
+ const Register& rn,
+ const Operand& operand) {
+ Logical(rd, rn, operand, BICS);
+}
+
+
+void Assembler::orr(const Register& rd,
+ const Register& rn,
+ const Operand& operand) {
+ Logical(rd, rn, operand, ORR);
+}
+
+
+void Assembler::orn(const Register& rd,
+ const Register& rn,
+ const Operand& operand) {
+ Logical(rd, rn, operand, ORN);
+}
+
+
+void Assembler::eor(const Register& rd,
+ const Register& rn,
+ const Operand& operand) {
+ Logical(rd, rn, operand, EOR);
+}
+
+
+void Assembler::eon(const Register& rd,
+ const Register& rn,
+ const Operand& operand) {
+ Logical(rd, rn, operand, EON);
+}
+
+
+void Assembler::lslv(const Register& rd,
+ const Register& rn,
+ const Register& rm) {
+ ASSERT(rd.SizeInBits() == rn.SizeInBits());
+ ASSERT(rd.SizeInBits() == rm.SizeInBits());
+ Emit(SF(rd) | LSLV | Rm(rm) | Rn(rn) | Rd(rd));
+}
+
+
+void Assembler::lsrv(const Register& rd,
+ const Register& rn,
+ const Register& rm) {
+ ASSERT(rd.SizeInBits() == rn.SizeInBits());
+ ASSERT(rd.SizeInBits() == rm.SizeInBits());
+ Emit(SF(rd) | LSRV | Rm(rm) | Rn(rn) | Rd(rd));
+}
+
+
+void Assembler::asrv(const Register& rd,
+ const Register& rn,
+ const Register& rm) {
+ ASSERT(rd.SizeInBits() == rn.SizeInBits());
+ ASSERT(rd.SizeInBits() == rm.SizeInBits());
+ Emit(SF(rd) | ASRV | Rm(rm) | Rn(rn) | Rd(rd));
+}
+
+
+void Assembler::rorv(const Register& rd,
+ const Register& rn,
+ const Register& rm) {
+ ASSERT(rd.SizeInBits() == rn.SizeInBits());
+ ASSERT(rd.SizeInBits() == rm.SizeInBits());
+ Emit(SF(rd) | RORV | Rm(rm) | Rn(rn) | Rd(rd));
+}
+
+
+// Bitfield operations.
+void Assembler::bfm(const Register& rd,
+ const Register& rn,
+ unsigned immr,
+ unsigned imms) {
+ ASSERT(rd.SizeInBits() == rn.SizeInBits());
+ Instr N = SF(rd) >> (kSFOffset - kBitfieldNOffset);
+ Emit(SF(rd) | BFM | N |
+ ImmR(immr, rd.SizeInBits()) |
+ ImmS(imms, rn.SizeInBits()) |
+ Rn(rn) | Rd(rd));
+}
+
+
+void Assembler::sbfm(const Register& rd,
+ const Register& rn,
+ unsigned immr,
+ unsigned imms) {
+ ASSERT(rd.Is64Bits() || rn.Is32Bits());
+ Instr N = SF(rd) >> (kSFOffset - kBitfieldNOffset);
+ Emit(SF(rd) | SBFM | N |
+ ImmR(immr, rd.SizeInBits()) |
+ ImmS(imms, rn.SizeInBits()) |
+ Rn(rn) | Rd(rd));
+}
+
+
+void Assembler::ubfm(const Register& rd,
+ const Register& rn,
+ unsigned immr,
+ unsigned imms) {
+ ASSERT(rd.SizeInBits() == rn.SizeInBits());
+ Instr N = SF(rd) >> (kSFOffset - kBitfieldNOffset);
+ Emit(SF(rd) | UBFM | N |
+ ImmR(immr, rd.SizeInBits()) |
+ ImmS(imms, rn.SizeInBits()) |
+ Rn(rn) | Rd(rd));
+}
+
+
+void Assembler::extr(const Register& rd,
+ const Register& rn,
+ const Register& rm,
+ unsigned lsb) {
+ ASSERT(rd.SizeInBits() == rn.SizeInBits());
+ ASSERT(rd.SizeInBits() == rm.SizeInBits());
+ Instr N = SF(rd) >> (kSFOffset - kBitfieldNOffset);
+ Emit(SF(rd) | EXTR | N | Rm(rm) |
+ ImmS(lsb, rn.SizeInBits()) | Rn(rn) | Rd(rd));
+}
+
+
+void Assembler::csel(const Register& rd,
+ const Register& rn,
+ const Register& rm,
+ Condition cond) {
+ ConditionalSelect(rd, rn, rm, cond, CSEL);
+}
+
+
+void Assembler::csinc(const Register& rd,
+ const Register& rn,
+ const Register& rm,
+ Condition cond) {
+ ConditionalSelect(rd, rn, rm, cond, CSINC);
+}
+
+
+void Assembler::csinv(const Register& rd,
+ const Register& rn,
+ const Register& rm,
+ Condition cond) {
+ ConditionalSelect(rd, rn, rm, cond, CSINV);
+}
+
+
+void Assembler::csneg(const Register& rd,
+ const Register& rn,
+ const Register& rm,
+ Condition cond) {
+ ConditionalSelect(rd, rn, rm, cond, CSNEG);
+}
+
+
+void Assembler::cset(const Register &rd, Condition cond) {
+ ASSERT((cond != al) && (cond != nv));
+ Register zr = AppropriateZeroRegFor(rd);
+ csinc(rd, zr, zr, NegateCondition(cond));
+}
+
+
+void Assembler::csetm(const Register &rd, Condition cond) {
+ ASSERT((cond != al) && (cond != nv));
+ Register zr = AppropriateZeroRegFor(rd);
+ csinv(rd, zr, zr, NegateCondition(cond));
+}
+
+
+void Assembler::cinc(const Register &rd, const Register &rn, Condition cond) {
+ ASSERT((cond != al) && (cond != nv));
+ csinc(rd, rn, rn, NegateCondition(cond));
+}
+
+
+void Assembler::cinv(const Register &rd, const Register &rn, Condition cond) {
+ ASSERT((cond != al) && (cond != nv));
+ csinv(rd, rn, rn, NegateCondition(cond));
+}
+
+
+void Assembler::cneg(const Register &rd, const Register &rn, Condition cond) {
+ ASSERT((cond != al) && (cond != nv));
+ csneg(rd, rn, rn, NegateCondition(cond));
+}
+
+
+void Assembler::ConditionalSelect(const Register& rd,
+ const Register& rn,
+ const Register& rm,
+ Condition cond,
+ ConditionalSelectOp op) {
+ ASSERT(rd.SizeInBits() == rn.SizeInBits());
+ ASSERT(rd.SizeInBits() == rm.SizeInBits());
+ Emit(SF(rd) | op | Rm(rm) | Cond(cond) | Rn(rn) | Rd(rd));
+}
+
+
+void Assembler::ccmn(const Register& rn,
+ const Operand& operand,
+ StatusFlags nzcv,
+ Condition cond) {
+ ConditionalCompare(rn, operand, nzcv, cond, CCMN);
+}
+
+
+void Assembler::ccmp(const Register& rn,
+ const Operand& operand,
+ StatusFlags nzcv,
+ Condition cond) {
+ ConditionalCompare(rn, operand, nzcv, cond, CCMP);
+}
+
+
+void Assembler::DataProcessing3Source(const Register& rd,
+ const Register& rn,
+ const Register& rm,
+ const Register& ra,
+ DataProcessing3SourceOp op) {
+ Emit(SF(rd) | op | Rm(rm) | Ra(ra) | Rn(rn) | Rd(rd));
+}
+
+
+void Assembler::mul(const Register& rd,
+ const Register& rn,
+ const Register& rm) {
+ ASSERT(AreSameSizeAndType(rd, rn, rm));
+ Register zr = AppropriateZeroRegFor(rn);
+ DataProcessing3Source(rd, rn, rm, zr, MADD);
+}
+
+
+void Assembler::madd(const Register& rd,
+ const Register& rn,
+ const Register& rm,
+ const Register& ra) {
+ ASSERT(AreSameSizeAndType(rd, rn, rm, ra));
+ DataProcessing3Source(rd, rn, rm, ra, MADD);
+}
+
+
+void Assembler::mneg(const Register& rd,
+ const Register& rn,
+ const Register& rm) {
+ ASSERT(AreSameSizeAndType(rd, rn, rm));
+ Register zr = AppropriateZeroRegFor(rn);
+ DataProcessing3Source(rd, rn, rm, zr, MSUB);
+}
+
+
+void Assembler::msub(const Register& rd,
+ const Register& rn,
+ const Register& rm,
+ const Register& ra) {
+ ASSERT(AreSameSizeAndType(rd, rn, rm, ra));
+ DataProcessing3Source(rd, rn, rm, ra, MSUB);
+}
+
+
+void Assembler::smaddl(const Register& rd,
+ const Register& rn,
+ const Register& rm,
+ const Register& ra) {
+ ASSERT(rd.Is64Bits() && ra.Is64Bits());
+ ASSERT(rn.Is32Bits() && rm.Is32Bits());
+ DataProcessing3Source(rd, rn, rm, ra, SMADDL_x);
+}
+
+
+void Assembler::smsubl(const Register& rd,
+ const Register& rn,
+ const Register& rm,
+ const Register& ra) {
+ ASSERT(rd.Is64Bits() && ra.Is64Bits());
+ ASSERT(rn.Is32Bits() && rm.Is32Bits());
+ DataProcessing3Source(rd, rn, rm, ra, SMSUBL_x);
+}
+
+
+void Assembler::umaddl(const Register& rd,
+ const Register& rn,
+ const Register& rm,
+ const Register& ra) {
+ ASSERT(rd.Is64Bits() && ra.Is64Bits());
+ ASSERT(rn.Is32Bits() && rm.Is32Bits());
+ DataProcessing3Source(rd, rn, rm, ra, UMADDL_x);
+}
+
+
+void Assembler::umsubl(const Register& rd,
+ const Register& rn,
+ const Register& rm,
+ const Register& ra) {
+ ASSERT(rd.Is64Bits() && ra.Is64Bits());
+ ASSERT(rn.Is32Bits() && rm.Is32Bits());
+ DataProcessing3Source(rd, rn, rm, ra, UMSUBL_x);
+}
+
+
+void Assembler::smull(const Register& rd,
+ const Register& rn,
+ const Register& rm) {
+ ASSERT(rd.Is64Bits());
+ ASSERT(rn.Is32Bits() && rm.Is32Bits());
+ DataProcessing3Source(rd, rn, rm, xzr, SMADDL_x);
+}
+
+
+void Assembler::smulh(const Register& rd,
+ const Register& rn,
+ const Register& rm) {
+ ASSERT(AreSameSizeAndType(rd, rn, rm));
+ DataProcessing3Source(rd, rn, rm, xzr, SMULH_x);
+}
+
+
+void Assembler::sdiv(const Register& rd,
+ const Register& rn,
+ const Register& rm) {
+ ASSERT(rd.SizeInBits() == rn.SizeInBits());
+ ASSERT(rd.SizeInBits() == rm.SizeInBits());
+ Emit(SF(rd) | SDIV | Rm(rm) | Rn(rn) | Rd(rd));
+}
+
+
+void Assembler::udiv(const Register& rd,
+ const Register& rn,
+ const Register& rm) {
+ ASSERT(rd.SizeInBits() == rn.SizeInBits());
+ ASSERT(rd.SizeInBits() == rm.SizeInBits());
+ Emit(SF(rd) | UDIV | Rm(rm) | Rn(rn) | Rd(rd));
+}
+
+
+void Assembler::rbit(const Register& rd,
+ const Register& rn) {
+ DataProcessing1Source(rd, rn, RBIT);
+}
+
+
+void Assembler::rev16(const Register& rd,
+ const Register& rn) {
+ DataProcessing1Source(rd, rn, REV16);
+}
+
+
+void Assembler::rev32(const Register& rd,
+ const Register& rn) {
+ ASSERT(rd.Is64Bits());
+ DataProcessing1Source(rd, rn, REV);
+}
+
+
+void Assembler::rev(const Register& rd,
+ const Register& rn) {
+ DataProcessing1Source(rd, rn, rd.Is64Bits() ? REV_x : REV_w);
+}
+
+
+void Assembler::clz(const Register& rd,
+ const Register& rn) {
+ DataProcessing1Source(rd, rn, CLZ);
+}
+
+
+void Assembler::cls(const Register& rd,
+ const Register& rn) {
+ DataProcessing1Source(rd, rn, CLS);
+}
+
+
+void Assembler::ldp(const CPURegister& rt,
+ const CPURegister& rt2,
+ const MemOperand& src) {
+ LoadStorePair(rt, rt2, src, LoadPairOpFor(rt, rt2));
+}
+
+
+void Assembler::stp(const CPURegister& rt,
+ const CPURegister& rt2,
+ const MemOperand& dst) {
+ LoadStorePair(rt, rt2, dst, StorePairOpFor(rt, rt2));
+}
+
+
+void Assembler::ldpsw(const Register& rt,
+ const Register& rt2,
+ const MemOperand& src) {
+ ASSERT(rt.Is64Bits());
+ LoadStorePair(rt, rt2, src, LDPSW_x);
+}
+
+
+void Assembler::LoadStorePair(const CPURegister& rt,
+ const CPURegister& rt2,
+ const MemOperand& addr,
+ LoadStorePairOp op) {
+ // 'rt' and 'rt2' can only be aliased for stores.
+ ASSERT(((op & LoadStorePairLBit) == 0) || !rt.Is(rt2));
+ ASSERT(AreSameSizeAndType(rt, rt2));
+
+ Instr memop = op | Rt(rt) | Rt2(rt2) | RnSP(addr.base()) |
+ ImmLSPair(addr.offset(), CalcLSPairDataSize(op));
+
+ Instr addrmodeop;
+ if (addr.IsImmediateOffset()) {
+ addrmodeop = LoadStorePairOffsetFixed;
+ } else {
+ // Pre-index and post-index modes.
+ ASSERT(!rt.Is(addr.base()));
+ ASSERT(!rt2.Is(addr.base()));
+ ASSERT(addr.offset() != 0);
+ if (addr.IsPreIndex()) {
+ addrmodeop = LoadStorePairPreIndexFixed;
+ } else {
+ ASSERT(addr.IsPostIndex());
+ addrmodeop = LoadStorePairPostIndexFixed;
+ }
+ }
+ Emit(addrmodeop | memop);
+}
+
+
+void Assembler::ldnp(const CPURegister& rt,
+ const CPURegister& rt2,
+ const MemOperand& src) {
+ LoadStorePairNonTemporal(rt, rt2, src,
+ LoadPairNonTemporalOpFor(rt, rt2));
+}
+
+
+void Assembler::stnp(const CPURegister& rt,
+ const CPURegister& rt2,
+ const MemOperand& dst) {
+ LoadStorePairNonTemporal(rt, rt2, dst,
+ StorePairNonTemporalOpFor(rt, rt2));
+}
+
+
+void Assembler::LoadStorePairNonTemporal(const CPURegister& rt,
+ const CPURegister& rt2,
+ const MemOperand& addr,
+ LoadStorePairNonTemporalOp op) {
+ ASSERT(!rt.Is(rt2));
+ ASSERT(AreSameSizeAndType(rt, rt2));
+ ASSERT(addr.IsImmediateOffset());
+
+ LSDataSize size = CalcLSPairDataSize(
+ static_cast<LoadStorePairOp>(op & LoadStorePairMask));
+ Emit(op | Rt(rt) | Rt2(rt2) | RnSP(addr.base()) |
+ ImmLSPair(addr.offset(), size));
+}
+
+
+// Memory instructions.
+void Assembler::ldrb(const Register& rt, const MemOperand& src) {
+ LoadStore(rt, src, LDRB_w);
+}
+
+
+void Assembler::strb(const Register& rt, const MemOperand& dst) {
+ LoadStore(rt, dst, STRB_w);
+}
+
+
+void Assembler::ldrsb(const Register& rt, const MemOperand& src) {
+ LoadStore(rt, src, rt.Is64Bits() ? LDRSB_x : LDRSB_w);
+}
+
+
+void Assembler::ldrh(const Register& rt, const MemOperand& src) {
+ LoadStore(rt, src, LDRH_w);
+}
+
+
+void Assembler::strh(const Register& rt, const MemOperand& dst) {
+ LoadStore(rt, dst, STRH_w);
+}
+
+
+void Assembler::ldrsh(const Register& rt, const MemOperand& src) {
+ LoadStore(rt, src, rt.Is64Bits() ? LDRSH_x : LDRSH_w);
+}
+
+
+void Assembler::ldr(const CPURegister& rt, const MemOperand& src) {
+ LoadStore(rt, src, LoadOpFor(rt));
+}
+
+
+void Assembler::str(const CPURegister& rt, const MemOperand& src) {
+ LoadStore(rt, src, StoreOpFor(rt));
+}
+
+
+void Assembler::ldrsw(const Register& rt, const MemOperand& src) {
+ ASSERT(rt.Is64Bits());
+ LoadStore(rt, src, LDRSW_x);
+}
+
+
+void Assembler::ldr_pcrel(const CPURegister& rt, int imm19) {
+ // The pattern 'ldr xzr, #offset' is used to indicate the beginning of a
+ // constant pool. It should not be emitted.
+ ASSERT(!rt.IsZero());
+ Emit(LoadLiteralOpFor(rt) | ImmLLiteral(imm19) | Rt(rt));
+}
+
+
+void Assembler::ldr(const CPURegister& rt, const Immediate& imm) {
+ // Currently we only support 64-bit literals.
+ ASSERT(rt.Is64Bits());
+
+ RecordRelocInfo(imm.rmode(), imm.value());
+ BlockConstPoolFor(1);
+ // The load will be patched when the constpool is emitted, patching code
+ // expect a load literal with offset 0.
+ ldr_pcrel(rt, 0);
+}
+
+
+void Assembler::mov(const Register& rd, const Register& rm) {
+ // Moves involving the stack pointer are encoded as add immediate with
+ // second operand of zero. Otherwise, orr with first operand zr is
+ // used.
+ if (rd.IsSP() || rm.IsSP()) {
+ add(rd, rm, 0);
+ } else {
+ orr(rd, AppropriateZeroRegFor(rd), rm);
+ }
+}
+
+
+void Assembler::mvn(const Register& rd, const Operand& operand) {
+ orn(rd, AppropriateZeroRegFor(rd), operand);
+}
+
+
+void Assembler::mrs(const Register& rt, SystemRegister sysreg) {
+ ASSERT(rt.Is64Bits());
+ Emit(MRS | ImmSystemRegister(sysreg) | Rt(rt));
+}
+
+
+void Assembler::msr(SystemRegister sysreg, const Register& rt) {
+ ASSERT(rt.Is64Bits());
+ Emit(MSR | Rt(rt) | ImmSystemRegister(sysreg));
+}
+
+
+void Assembler::hint(SystemHint code) {
+ Emit(HINT | ImmHint(code) | Rt(xzr));
+}
+
+
+void Assembler::dmb(BarrierDomain domain, BarrierType type) {
+ Emit(DMB | ImmBarrierDomain(domain) | ImmBarrierType(type));
+}
+
+
+void Assembler::dsb(BarrierDomain domain, BarrierType type) {
+ Emit(DSB | ImmBarrierDomain(domain) | ImmBarrierType(type));
+}
+
+
+void Assembler::isb() {
+ Emit(ISB | ImmBarrierDomain(FullSystem) | ImmBarrierType(BarrierAll));
+}
+
+
+void Assembler::fmov(FPRegister fd, double imm) {
+ ASSERT(fd.Is64Bits());
+ ASSERT(IsImmFP64(imm));
+ Emit(FMOV_d_imm | Rd(fd) | ImmFP64(imm));
+}
+
+
+void Assembler::fmov(FPRegister fd, float imm) {
+ ASSERT(fd.Is32Bits());
+ ASSERT(IsImmFP32(imm));
+ Emit(FMOV_s_imm | Rd(fd) | ImmFP32(imm));
+}
+
+
+void Assembler::fmov(Register rd, FPRegister fn) {
+ ASSERT(rd.SizeInBits() == fn.SizeInBits());
+ FPIntegerConvertOp op = rd.Is32Bits() ? FMOV_ws : FMOV_xd;
+ Emit(op | Rd(rd) | Rn(fn));
+}
+
+
+void Assembler::fmov(FPRegister fd, Register rn) {
+ ASSERT(fd.SizeInBits() == rn.SizeInBits());
+ FPIntegerConvertOp op = fd.Is32Bits() ? FMOV_sw : FMOV_dx;
+ Emit(op | Rd(fd) | Rn(rn));
+}
+
+
+void Assembler::fmov(FPRegister fd, FPRegister fn) {
+ ASSERT(fd.SizeInBits() == fn.SizeInBits());
+ Emit(FPType(fd) | FMOV | Rd(fd) | Rn(fn));
+}
+
+
+void Assembler::fadd(const FPRegister& fd,
+ const FPRegister& fn,
+ const FPRegister& fm) {
+ FPDataProcessing2Source(fd, fn, fm, FADD);
+}
+
+
+void Assembler::fsub(const FPRegister& fd,
+ const FPRegister& fn,
+ const FPRegister& fm) {
+ FPDataProcessing2Source(fd, fn, fm, FSUB);
+}
+
+
+void Assembler::fmul(const FPRegister& fd,
+ const FPRegister& fn,
+ const FPRegister& fm) {
+ FPDataProcessing2Source(fd, fn, fm, FMUL);
+}
+
+
+void Assembler::fmadd(const FPRegister& fd,
+ const FPRegister& fn,
+ const FPRegister& fm,
+ const FPRegister& fa) {
+ FPDataProcessing3Source(fd, fn, fm, fa, fd.Is32Bits() ? FMADD_s : FMADD_d);
+}
+
+
+void Assembler::fmsub(const FPRegister& fd,
+ const FPRegister& fn,
+ const FPRegister& fm,
+ const FPRegister& fa) {
+ FPDataProcessing3Source(fd, fn, fm, fa, fd.Is32Bits() ? FMSUB_s : FMSUB_d);
+}
+
+
+void Assembler::fnmadd(const FPRegister& fd,
+ const FPRegister& fn,
+ const FPRegister& fm,
+ const FPRegister& fa) {
+ FPDataProcessing3Source(fd, fn, fm, fa, fd.Is32Bits() ? FNMADD_s : FNMADD_d);
+}
+
+
+void Assembler::fnmsub(const FPRegister& fd,
+ const FPRegister& fn,
+ const FPRegister& fm,
+ const FPRegister& fa) {
+ FPDataProcessing3Source(fd, fn, fm, fa, fd.Is32Bits() ? FNMSUB_s : FNMSUB_d);
+}
+
+
+void Assembler::fdiv(const FPRegister& fd,
+ const FPRegister& fn,
+ const FPRegister& fm) {
+ FPDataProcessing2Source(fd, fn, fm, FDIV);
+}
+
+
+void Assembler::fmax(const FPRegister& fd,
+ const FPRegister& fn,
+ const FPRegister& fm) {
+ FPDataProcessing2Source(fd, fn, fm, FMAX);
+}
+
+
+void Assembler::fmaxnm(const FPRegister& fd,
+ const FPRegister& fn,
+ const FPRegister& fm) {
+ FPDataProcessing2Source(fd, fn, fm, FMAXNM);
+}
+
+
+void Assembler::fmin(const FPRegister& fd,
+ const FPRegister& fn,
+ const FPRegister& fm) {
+ FPDataProcessing2Source(fd, fn, fm, FMIN);
+}
+
+
+void Assembler::fminnm(const FPRegister& fd,
+ const FPRegister& fn,
+ const FPRegister& fm) {
+ FPDataProcessing2Source(fd, fn, fm, FMINNM);
+}
+
+
+void Assembler::fabs(const FPRegister& fd,
+ const FPRegister& fn) {
+ ASSERT(fd.SizeInBits() == fn.SizeInBits());
+ FPDataProcessing1Source(fd, fn, FABS);
+}
+
+
+void Assembler::fneg(const FPRegister& fd,
+ const FPRegister& fn) {
+ ASSERT(fd.SizeInBits() == fn.SizeInBits());
+ FPDataProcessing1Source(fd, fn, FNEG);
+}
+
+
+void Assembler::fsqrt(const FPRegister& fd,
+ const FPRegister& fn) {
+ ASSERT(fd.SizeInBits() == fn.SizeInBits());
+ FPDataProcessing1Source(fd, fn, FSQRT);
+}
+
+
+void Assembler::frinta(const FPRegister& fd,
+ const FPRegister& fn) {
+ ASSERT(fd.SizeInBits() == fn.SizeInBits());
+ FPDataProcessing1Source(fd, fn, FRINTA);
+}
+
+
+void Assembler::frintm(const FPRegister& fd,
+ const FPRegister& fn) {
+ ASSERT(fd.SizeInBits() == fn.SizeInBits());
+ FPDataProcessing1Source(fd, fn, FRINTM);
+}
+
+
+void Assembler::frintn(const FPRegister& fd,
+ const FPRegister& fn) {
+ ASSERT(fd.SizeInBits() == fn.SizeInBits());
+ FPDataProcessing1Source(fd, fn, FRINTN);
+}
+
+
+void Assembler::frintz(const FPRegister& fd,
+ const FPRegister& fn) {
+ ASSERT(fd.SizeInBits() == fn.SizeInBits());
+ FPDataProcessing1Source(fd, fn, FRINTZ);
+}
+
+
+void Assembler::fcmp(const FPRegister& fn,
+ const FPRegister& fm) {
+ ASSERT(fn.SizeInBits() == fm.SizeInBits());
+ Emit(FPType(fn) | FCMP | Rm(fm) | Rn(fn));
+}
+
+
+void Assembler::fcmp(const FPRegister& fn,
+ double value) {
+ USE(value);
+ // Although the fcmp instruction can strictly only take an immediate value of
+ // +0.0, we don't need to check for -0.0 because the sign of 0.0 doesn't
+ // affect the result of the comparison.
+ ASSERT(value == 0.0);
+ Emit(FPType(fn) | FCMP_zero | Rn(fn));
+}
+
+
+void Assembler::fccmp(const FPRegister& fn,
+ const FPRegister& fm,
+ StatusFlags nzcv,
+ Condition cond) {
+ ASSERT(fn.SizeInBits() == fm.SizeInBits());
+ Emit(FPType(fn) | FCCMP | Rm(fm) | Cond(cond) | Rn(fn) | Nzcv(nzcv));
+}
+
+
+void Assembler::fcsel(const FPRegister& fd,
+ const FPRegister& fn,
+ const FPRegister& fm,
+ Condition cond) {
+ ASSERT(fd.SizeInBits() == fn.SizeInBits());
+ ASSERT(fd.SizeInBits() == fm.SizeInBits());
+ Emit(FPType(fd) | FCSEL | Rm(fm) | Cond(cond) | Rn(fn) | Rd(fd));
+}
+
+
+void Assembler::FPConvertToInt(const Register& rd,
+ const FPRegister& fn,
+ FPIntegerConvertOp op) {
+ Emit(SF(rd) | FPType(fn) | op | Rn(fn) | Rd(rd));
+}
+
+
+void Assembler::fcvt(const FPRegister& fd,
+ const FPRegister& fn) {
+ if (fd.Is64Bits()) {
+ // Convert float to double.
+ ASSERT(fn.Is32Bits());
+ FPDataProcessing1Source(fd, fn, FCVT_ds);
+ } else {
+ // Convert double to float.
+ ASSERT(fn.Is64Bits());
+ FPDataProcessing1Source(fd, fn, FCVT_sd);
+ }
+}
+
+
+void Assembler::fcvtau(const Register& rd, const FPRegister& fn) {
+ FPConvertToInt(rd, fn, FCVTAU);
+}
+
+
+void Assembler::fcvtas(const Register& rd, const FPRegister& fn) {
+ FPConvertToInt(rd, fn, FCVTAS);
+}
+
+
+void Assembler::fcvtmu(const Register& rd, const FPRegister& fn) {
+ FPConvertToInt(rd, fn, FCVTMU);
+}
+
+
+void Assembler::fcvtms(const Register& rd, const FPRegister& fn) {
+ FPConvertToInt(rd, fn, FCVTMS);
+}
+
+
+void Assembler::fcvtnu(const Register& rd, const FPRegister& fn) {
+ FPConvertToInt(rd, fn, FCVTNU);
+}
+
+
+void Assembler::fcvtns(const Register& rd, const FPRegister& fn) {
+ FPConvertToInt(rd, fn, FCVTNS);
+}
+
+
+void Assembler::fcvtzu(const Register& rd, const FPRegister& fn) {
+ FPConvertToInt(rd, fn, FCVTZU);
+}
+
+
+void Assembler::fcvtzs(const Register& rd, const FPRegister& fn) {
+ FPConvertToInt(rd, fn, FCVTZS);
+}
+
+
+void Assembler::scvtf(const FPRegister& fd,
+ const Register& rn,
+ unsigned fbits) {
+ if (fbits == 0) {
+ Emit(SF(rn) | FPType(fd) | SCVTF | Rn(rn) | Rd(fd));
+ } else {
+ Emit(SF(rn) | FPType(fd) | SCVTF_fixed | FPScale(64 - fbits) | Rn(rn) |
+ Rd(fd));
+ }
+}
+
+
+void Assembler::ucvtf(const FPRegister& fd,
+ const Register& rn,
+ unsigned fbits) {
+ if (fbits == 0) {
+ Emit(SF(rn) | FPType(fd) | UCVTF | Rn(rn) | Rd(fd));
+ } else {
+ Emit(SF(rn) | FPType(fd) | UCVTF_fixed | FPScale(64 - fbits) | Rn(rn) |
+ Rd(fd));
+ }
+}
+
+
+// Note:
+// Below, a difference in case for the same letter indicates a
+// negated bit.
+// If b is 1, then B is 0.
+Instr Assembler::ImmFP32(float imm) {
+ ASSERT(IsImmFP32(imm));
+ // bits: aBbb.bbbc.defg.h000.0000.0000.0000.0000
+ uint32_t bits = float_to_rawbits(imm);
+ // bit7: a000.0000
+ uint32_t bit7 = ((bits >> 31) & 0x1) << 7;
+ // bit6: 0b00.0000
+ uint32_t bit6 = ((bits >> 29) & 0x1) << 6;
+ // bit5_to_0: 00cd.efgh
+ uint32_t bit5_to_0 = (bits >> 19) & 0x3f;
+
+ return (bit7 | bit6 | bit5_to_0) << ImmFP_offset;
+}
+
+
+Instr Assembler::ImmFP64(double imm) {
+ ASSERT(IsImmFP64(imm));
+ // bits: aBbb.bbbb.bbcd.efgh.0000.0000.0000.0000
+ // 0000.0000.0000.0000.0000.0000.0000.0000
+ uint64_t bits = double_to_rawbits(imm);
+ // bit7: a000.0000
+ uint32_t bit7 = ((bits >> 63) & 0x1) << 7;
+ // bit6: 0b00.0000
+ uint32_t bit6 = ((bits >> 61) & 0x1) << 6;
+ // bit5_to_0: 00cd.efgh
+ uint32_t bit5_to_0 = (bits >> 48) & 0x3f;
+
+ return (bit7 | bit6 | bit5_to_0) << ImmFP_offset;
+}
+
+
+// Code generation helpers.
+void Assembler::MoveWide(const Register& rd,
+ uint64_t imm,
+ int shift,
+ MoveWideImmediateOp mov_op) {
+ if (shift >= 0) {
+ // Explicit shift specified.
+ ASSERT((shift == 0) || (shift == 16) || (shift == 32) || (shift == 48));
+ ASSERT(rd.Is64Bits() || (shift == 0) || (shift == 16));
+ shift /= 16;
+ } else {
+ // Calculate a new immediate and shift combination to encode the immediate
+ // argument.
+ shift = 0;
+ if ((imm & ~0xffffUL) == 0) {
+ // Nothing to do.
+ } else if ((imm & ~(0xffffUL << 16)) == 0) {
+ imm >>= 16;
+ shift = 1;
+ } else if ((imm & ~(0xffffUL << 32)) == 0) {
+ ASSERT(rd.Is64Bits());
+ imm >>= 32;
+ shift = 2;
+ } else if ((imm & ~(0xffffUL << 48)) == 0) {
+ ASSERT(rd.Is64Bits());
+ imm >>= 48;
+ shift = 3;
+ }
+ }
+
+ ASSERT(is_uint16(imm));
+
+ Emit(SF(rd) | MoveWideImmediateFixed | mov_op |
+ Rd(rd) | ImmMoveWide(imm) | ShiftMoveWide(shift));
+}
+
+
+void Assembler::AddSub(const Register& rd,
+ const Register& rn,
+ const Operand& operand,
+ FlagsUpdate S,
+ AddSubOp op) {
+ ASSERT(rd.SizeInBits() == rn.SizeInBits());
+ ASSERT(!operand.NeedsRelocation(this));
+ if (operand.IsImmediate()) {
+ int64_t immediate = operand.ImmediateValue();
+ ASSERT(IsImmAddSub(immediate));
+ Instr dest_reg = (S == SetFlags) ? Rd(rd) : RdSP(rd);
+ Emit(SF(rd) | AddSubImmediateFixed | op | Flags(S) |
+ ImmAddSub(immediate) | dest_reg | RnSP(rn));
+ } else if (operand.IsShiftedRegister()) {
+ ASSERT(operand.reg().SizeInBits() == rd.SizeInBits());
+ ASSERT(operand.shift() != ROR);
+
+ // For instructions of the form:
+ // add/sub wsp, <Wn>, <Wm> [, LSL #0-3 ]
+ // add/sub <Wd>, wsp, <Wm> [, LSL #0-3 ]
+ // add/sub wsp, wsp, <Wm> [, LSL #0-3 ]
+ // adds/subs <Wd>, wsp, <Wm> [, LSL #0-3 ]
+ // or their 64-bit register equivalents, convert the operand from shifted to
+ // extended register mode, and emit an add/sub extended instruction.
+ if (rn.IsSP() || rd.IsSP()) {
+ ASSERT(!(rd.IsSP() && (S == SetFlags)));
+ DataProcExtendedRegister(rd, rn, operand.ToExtendedRegister(), S,
+ AddSubExtendedFixed | op);
+ } else {
+ DataProcShiftedRegister(rd, rn, operand, S, AddSubShiftedFixed | op);
+ }
+ } else {
+ ASSERT(operand.IsExtendedRegister());
+ DataProcExtendedRegister(rd, rn, operand, S, AddSubExtendedFixed | op);
+ }
+}
+
+
+void Assembler::AddSubWithCarry(const Register& rd,
+ const Register& rn,
+ const Operand& operand,
+ FlagsUpdate S,
+ AddSubWithCarryOp op) {
+ ASSERT(rd.SizeInBits() == rn.SizeInBits());
+ ASSERT(rd.SizeInBits() == operand.reg().SizeInBits());
+ ASSERT(operand.IsShiftedRegister() && (operand.shift_amount() == 0));
+ ASSERT(!operand.NeedsRelocation(this));
+ Emit(SF(rd) | op | Flags(S) | Rm(operand.reg()) | Rn(rn) | Rd(rd));
+}
+
+
+void Assembler::hlt(int code) {
+ ASSERT(is_uint16(code));
+ Emit(HLT | ImmException(code));
+}
+
+
+void Assembler::brk(int code) {
+ ASSERT(is_uint16(code));
+ Emit(BRK | ImmException(code));
+}
+
+
+void Assembler::debug(const char* message, uint32_t code, Instr params) {
+#ifdef USE_SIMULATOR
+ // Don't generate simulator specific code if we are building a snapshot, which
+ // might be run on real hardware.
+ if (!serializer_enabled()) {
+ // The arguments to the debug marker need to be contiguous in memory, so
+ // make sure we don't try to emit pools.
+ BlockPoolsScope scope(this);
+
+ Label start;
+ bind(&start);
+
+ // Refer to instructions-arm64.h for a description of the marker and its
+ // arguments.
+ hlt(kImmExceptionIsDebug);
+ ASSERT(SizeOfCodeGeneratedSince(&start) == kDebugCodeOffset);
+ dc32(code);
+ ASSERT(SizeOfCodeGeneratedSince(&start) == kDebugParamsOffset);
+ dc32(params);
+ ASSERT(SizeOfCodeGeneratedSince(&start) == kDebugMessageOffset);
+ EmitStringData(message);
+ hlt(kImmExceptionIsUnreachable);
+
+ return;
+ }
+ // Fall through if Serializer is enabled.
+#endif
+
+ if (params & BREAK) {
+ hlt(kImmExceptionIsDebug);
+ }
+}
+
+
+void Assembler::Logical(const Register& rd,
+ const Register& rn,
+ const Operand& operand,
+ LogicalOp op) {
+ ASSERT(rd.SizeInBits() == rn.SizeInBits());
+ ASSERT(!operand.NeedsRelocation(this));
+ if (operand.IsImmediate()) {
+ int64_t immediate = operand.ImmediateValue();
+ unsigned reg_size = rd.SizeInBits();
+
+ ASSERT(immediate != 0);
+ ASSERT(immediate != -1);
+ ASSERT(rd.Is64Bits() || is_uint32(immediate));
+
+ // If the operation is NOT, invert the operation and immediate.
+ if ((op & NOT) == NOT) {
+ op = static_cast<LogicalOp>(op & ~NOT);
+ immediate = rd.Is64Bits() ? ~immediate : (~immediate & kWRegMask);
+ }
+
+ unsigned n, imm_s, imm_r;
+ if (IsImmLogical(immediate, reg_size, &n, &imm_s, &imm_r)) {
+ // Immediate can be encoded in the instruction.
+ LogicalImmediate(rd, rn, n, imm_s, imm_r, op);
+ } else {
+ // This case is handled in the macro assembler.
+ UNREACHABLE();
+ }
+ } else {
+ ASSERT(operand.IsShiftedRegister());
+ ASSERT(operand.reg().SizeInBits() == rd.SizeInBits());
+ Instr dp_op = static_cast<Instr>(op | LogicalShiftedFixed);
+ DataProcShiftedRegister(rd, rn, operand, LeaveFlags, dp_op);
+ }
+}
+
+
+void Assembler::LogicalImmediate(const Register& rd,
+ const Register& rn,
+ unsigned n,
+ unsigned imm_s,
+ unsigned imm_r,
+ LogicalOp op) {
+ unsigned reg_size = rd.SizeInBits();
+ Instr dest_reg = (op == ANDS) ? Rd(rd) : RdSP(rd);
+ Emit(SF(rd) | LogicalImmediateFixed | op | BitN(n, reg_size) |
+ ImmSetBits(imm_s, reg_size) | ImmRotate(imm_r, reg_size) | dest_reg |
+ Rn(rn));
+}
+
+
+void Assembler::ConditionalCompare(const Register& rn,
+ const Operand& operand,
+ StatusFlags nzcv,
+ Condition cond,
+ ConditionalCompareOp op) {
+ Instr ccmpop;
+ ASSERT(!operand.NeedsRelocation(this));
+ if (operand.IsImmediate()) {
+ int64_t immediate = operand.ImmediateValue();
+ ASSERT(IsImmConditionalCompare(immediate));
+ ccmpop = ConditionalCompareImmediateFixed | op | ImmCondCmp(immediate);
+ } else {
+ ASSERT(operand.IsShiftedRegister() && (operand.shift_amount() == 0));
+ ccmpop = ConditionalCompareRegisterFixed | op | Rm(operand.reg());
+ }
+ Emit(SF(rn) | ccmpop | Cond(cond) | Rn(rn) | Nzcv(nzcv));
+}
+
+
+void Assembler::DataProcessing1Source(const Register& rd,
+ const Register& rn,
+ DataProcessing1SourceOp op) {
+ ASSERT(rd.SizeInBits() == rn.SizeInBits());
+ Emit(SF(rn) | op | Rn(rn) | Rd(rd));
+}
+
+
+void Assembler::FPDataProcessing1Source(const FPRegister& fd,
+ const FPRegister& fn,
+ FPDataProcessing1SourceOp op) {
+ Emit(FPType(fn) | op | Rn(fn) | Rd(fd));
+}
+
+
+void Assembler::FPDataProcessing2Source(const FPRegister& fd,
+ const FPRegister& fn,
+ const FPRegister& fm,
+ FPDataProcessing2SourceOp op) {
+ ASSERT(fd.SizeInBits() == fn.SizeInBits());
+ ASSERT(fd.SizeInBits() == fm.SizeInBits());
+ Emit(FPType(fd) | op | Rm(fm) | Rn(fn) | Rd(fd));
+}
+
+
+void Assembler::FPDataProcessing3Source(const FPRegister& fd,
+ const FPRegister& fn,
+ const FPRegister& fm,
+ const FPRegister& fa,
+ FPDataProcessing3SourceOp op) {
+ ASSERT(AreSameSizeAndType(fd, fn, fm, fa));
+ Emit(FPType(fd) | op | Rm(fm) | Rn(fn) | Rd(fd) | Ra(fa));
+}
+
+
+void Assembler::EmitShift(const Register& rd,
+ const Register& rn,
+ Shift shift,
+ unsigned shift_amount) {
+ switch (shift) {
+ case LSL:
+ lsl(rd, rn, shift_amount);
+ break;
+ case LSR:
+ lsr(rd, rn, shift_amount);
+ break;
+ case ASR:
+ asr(rd, rn, shift_amount);
+ break;
+ case ROR:
+ ror(rd, rn, shift_amount);
+ break;
+ default:
+ UNREACHABLE();
+ }
+}
+
+
+void Assembler::EmitExtendShift(const Register& rd,
+ const Register& rn,
+ Extend extend,
+ unsigned left_shift) {
+ ASSERT(rd.SizeInBits() >= rn.SizeInBits());
+ unsigned reg_size = rd.SizeInBits();
+ // Use the correct size of register.
+ Register rn_ = Register::Create(rn.code(), rd.SizeInBits());
+ // Bits extracted are high_bit:0.
+ unsigned high_bit = (8 << (extend & 0x3)) - 1;
+ // Number of bits left in the result that are not introduced by the shift.
+ unsigned non_shift_bits = (reg_size - left_shift) & (reg_size - 1);
+
+ if ((non_shift_bits > high_bit) || (non_shift_bits == 0)) {
+ switch (extend) {
+ case UXTB:
+ case UXTH:
+ case UXTW: ubfm(rd, rn_, non_shift_bits, high_bit); break;
+ case SXTB:
+ case SXTH:
+ case SXTW: sbfm(rd, rn_, non_shift_bits, high_bit); break;
+ case UXTX:
+ case SXTX: {
+ ASSERT(rn.SizeInBits() == kXRegSizeInBits);
+ // Nothing to extend. Just shift.
+ lsl(rd, rn_, left_shift);
+ break;
+ }
+ default: UNREACHABLE();
+ }
+ } else {
+ // No need to extend as the extended bits would be shifted away.
+ lsl(rd, rn_, left_shift);
+ }
+}
+
+
+void Assembler::DataProcShiftedRegister(const Register& rd,
+ const Register& rn,
+ const Operand& operand,
+ FlagsUpdate S,
+ Instr op) {
+ ASSERT(operand.IsShiftedRegister());
+ ASSERT(rn.Is64Bits() || (rn.Is32Bits() && is_uint5(operand.shift_amount())));
+ ASSERT(!operand.NeedsRelocation(this));
+ Emit(SF(rd) | op | Flags(S) |
+ ShiftDP(operand.shift()) | ImmDPShift(operand.shift_amount()) |
+ Rm(operand.reg()) | Rn(rn) | Rd(rd));
+}
+
+
+void Assembler::DataProcExtendedRegister(const Register& rd,
+ const Register& rn,
+ const Operand& operand,
+ FlagsUpdate S,
+ Instr op) {
+ ASSERT(!operand.NeedsRelocation(this));
+ Instr dest_reg = (S == SetFlags) ? Rd(rd) : RdSP(rd);
+ Emit(SF(rd) | op | Flags(S) | Rm(operand.reg()) |
+ ExtendMode(operand.extend()) | ImmExtendShift(operand.shift_amount()) |
+ dest_reg | RnSP(rn));
+}
+
+
+bool Assembler::IsImmAddSub(int64_t immediate) {
+ return is_uint12(immediate) ||
+ (is_uint12(immediate >> 12) && ((immediate & 0xfff) == 0));
+}
+
+void Assembler::LoadStore(const CPURegister& rt,
+ const MemOperand& addr,
+ LoadStoreOp op) {
+ Instr memop = op | Rt(rt) | RnSP(addr.base());
+ ptrdiff_t offset = addr.offset();
+
+ if (addr.IsImmediateOffset()) {
+ LSDataSize size = CalcLSDataSize(op);
+ if (IsImmLSScaled(offset, size)) {
+ // Use the scaled addressing mode.
+ Emit(LoadStoreUnsignedOffsetFixed | memop |
+ ImmLSUnsigned(offset >> size));
+ } else if (IsImmLSUnscaled(offset)) {
+ // Use the unscaled addressing mode.
+ Emit(LoadStoreUnscaledOffsetFixed | memop | ImmLS(offset));
+ } else {
+ // This case is handled in the macro assembler.
+ UNREACHABLE();
+ }
+ } else if (addr.IsRegisterOffset()) {
+ Extend ext = addr.extend();
+ Shift shift = addr.shift();
+ unsigned shift_amount = addr.shift_amount();
+
+ // LSL is encoded in the option field as UXTX.
+ if (shift == LSL) {
+ ext = UXTX;
+ }
+
+ // Shifts are encoded in one bit, indicating a left shift by the memory
+ // access size.
+ ASSERT((shift_amount == 0) ||
+ (shift_amount == static_cast<unsigned>(CalcLSDataSize(op))));
+ Emit(LoadStoreRegisterOffsetFixed | memop | Rm(addr.regoffset()) |
+ ExtendMode(ext) | ImmShiftLS((shift_amount > 0) ? 1 : 0));
+ } else {
+ // Pre-index and post-index modes.
+ ASSERT(!rt.Is(addr.base()));
+ if (IsImmLSUnscaled(offset)) {
+ if (addr.IsPreIndex()) {
+ Emit(LoadStorePreIndexFixed | memop | ImmLS(offset));
+ } else {
+ ASSERT(addr.IsPostIndex());
+ Emit(LoadStorePostIndexFixed | memop | ImmLS(offset));
+ }
+ } else {
+ // This case is handled in the macro assembler.
+ UNREACHABLE();
+ }
+ }
+}
+
+
+bool Assembler::IsImmLSUnscaled(ptrdiff_t offset) {
+ return is_int9(offset);
+}
+
+
+bool Assembler::IsImmLSScaled(ptrdiff_t offset, LSDataSize size) {
+ bool offset_is_size_multiple = (((offset >> size) << size) == offset);
+ return offset_is_size_multiple && is_uint12(offset >> size);
+}
+
+
+// Test if a given value can be encoded in the immediate field of a logical
+// instruction.
+// If it can be encoded, the function returns true, and values pointed to by n,
+// imm_s and imm_r are updated with immediates encoded in the format required
+// by the corresponding fields in the logical instruction.
+// If it can not be encoded, the function returns false, and the values pointed
+// to by n, imm_s and imm_r are undefined.
+bool Assembler::IsImmLogical(uint64_t value,
+ unsigned width,
+ unsigned* n,
+ unsigned* imm_s,
+ unsigned* imm_r) {
+ ASSERT((n != NULL) && (imm_s != NULL) && (imm_r != NULL));
+ ASSERT((width == kWRegSizeInBits) || (width == kXRegSizeInBits));
+
+ // Logical immediates are encoded using parameters n, imm_s and imm_r using
+ // the following table:
+ //
+ // N imms immr size S R
+ // 1 ssssss rrrrrr 64 UInt(ssssss) UInt(rrrrrr)
+ // 0 0sssss xrrrrr 32 UInt(sssss) UInt(rrrrr)
+ // 0 10ssss xxrrrr 16 UInt(ssss) UInt(rrrr)
+ // 0 110sss xxxrrr 8 UInt(sss) UInt(rrr)
+ // 0 1110ss xxxxrr 4 UInt(ss) UInt(rr)
+ // 0 11110s xxxxxr 2 UInt(s) UInt(r)
+ // (s bits must not be all set)
+ //
+ // A pattern is constructed of size bits, where the least significant S+1
+ // bits are set. The pattern is rotated right by R, and repeated across a
+ // 32 or 64-bit value, depending on destination register width.
+ //
+ // To test if an arbitary immediate can be encoded using this scheme, an
+ // iterative algorithm is used.
+ //
+ // TODO(mcapewel) This code does not consider using X/W register overlap to
+ // support 64-bit immediates where the top 32-bits are zero, and the bottom
+ // 32-bits are an encodable logical immediate.
+
+ // 1. If the value has all set or all clear bits, it can't be encoded.
+ if ((value == 0) || (value == 0xffffffffffffffffUL) ||
+ ((width == kWRegSizeInBits) && (value == 0xffffffff))) {
+ return false;
+ }
+
+ unsigned lead_zero = CountLeadingZeros(value, width);
+ unsigned lead_one = CountLeadingZeros(~value, width);
+ unsigned trail_zero = CountTrailingZeros(value, width);
+ unsigned trail_one = CountTrailingZeros(~value, width);
+ unsigned set_bits = CountSetBits(value, width);
+
+ // The fixed bits in the immediate s field.
+ // If width == 64 (X reg), start at 0xFFFFFF80.
+ // If width == 32 (W reg), start at 0xFFFFFFC0, as the iteration for 64-bit
+ // widths won't be executed.
+ int imm_s_fixed = (width == kXRegSizeInBits) ? -128 : -64;
+ int imm_s_mask = 0x3F;
+
+ for (;;) {
+ // 2. If the value is two bits wide, it can be encoded.
+ if (width == 2) {
+ *n = 0;
+ *imm_s = 0x3C;
+ *imm_r = (value & 3) - 1;
+ return true;
+ }
+
+ *n = (width == 64) ? 1 : 0;
+ *imm_s = ((imm_s_fixed | (set_bits - 1)) & imm_s_mask);
+ if ((lead_zero + set_bits) == width) {
+ *imm_r = 0;
+ } else {
+ *imm_r = (lead_zero > 0) ? (width - trail_zero) : lead_one;
+ }
+
+ // 3. If the sum of leading zeros, trailing zeros and set bits is equal to
+ // the bit width of the value, it can be encoded.
+ if (lead_zero + trail_zero + set_bits == width) {
+ return true;
+ }
+
+ // 4. If the sum of leading ones, trailing ones and unset bits in the
+ // value is equal to the bit width of the value, it can be encoded.
+ if (lead_one + trail_one + (width - set_bits) == width) {
+ return true;
+ }
+
+ // 5. If the most-significant half of the bitwise value is equal to the
+ // least-significant half, return to step 2 using the least-significant
+ // half of the value.
+ uint64_t mask = (1UL << (width >> 1)) - 1;
+ if ((value & mask) == ((value >> (width >> 1)) & mask)) {
+ width >>= 1;
+ set_bits >>= 1;
+ imm_s_fixed >>= 1;
+ continue;
+ }
+
+ // 6. Otherwise, the value can't be encoded.
+ return false;
+ }
+}
+
+
+bool Assembler::IsImmConditionalCompare(int64_t immediate) {
+ return is_uint5(immediate);
+}
+
+
+bool Assembler::IsImmFP32(float imm) {
+ // Valid values will have the form:
+ // aBbb.bbbc.defg.h000.0000.0000.0000.0000
+ uint32_t bits = float_to_rawbits(imm);
+ // bits[19..0] are cleared.
+ if ((bits & 0x7ffff) != 0) {
+ return false;
+ }
+
+ // bits[29..25] are all set or all cleared.
+ uint32_t b_pattern = (bits >> 16) & 0x3e00;
+ if (b_pattern != 0 && b_pattern != 0x3e00) {
+ return false;
+ }
+
+ // bit[30] and bit[29] are opposite.
+ if (((bits ^ (bits << 1)) & 0x40000000) == 0) {
+ return false;
+ }
+
+ return true;
+}
+
+
+bool Assembler::IsImmFP64(double imm) {
+ // Valid values will have the form:
+ // aBbb.bbbb.bbcd.efgh.0000.0000.0000.0000
+ // 0000.0000.0000.0000.0000.0000.0000.0000
+ uint64_t bits = double_to_rawbits(imm);
+ // bits[47..0] are cleared.
+ if ((bits & 0xffffffffffffL) != 0) {
+ return false;
+ }
+
+ // bits[61..54] are all set or all cleared.
+ uint32_t b_pattern = (bits >> 48) & 0x3fc0;
+ if (b_pattern != 0 && b_pattern != 0x3fc0) {
+ return false;
+ }
+
+ // bit[62] and bit[61] are opposite.
+ if (((bits ^ (bits << 1)) & 0x4000000000000000L) == 0) {
+ return false;
+ }
+
+ return true;
+}
+
+
+void Assembler::GrowBuffer() {
+ if (!own_buffer_) FATAL("external code buffer is too small");
+
+ // Compute new buffer size.
+ CodeDesc desc; // the new buffer
+ if (buffer_size_ < 4 * KB) {
+ desc.buffer_size = 4 * KB;
+ } else if (buffer_size_ < 1 * MB) {
+ desc.buffer_size = 2 * buffer_size_;
+ } else {
+ desc.buffer_size = buffer_size_ + 1 * MB;
+ }
+ CHECK_GT(desc.buffer_size, 0); // No overflow.
+
+ byte* buffer = reinterpret_cast<byte*>(buffer_);
+
+ // Set up new buffer.
+ desc.buffer = NewArray<byte>(desc.buffer_size);
+
+ desc.instr_size = pc_offset();
+ desc.reloc_size = (buffer + buffer_size_) - reloc_info_writer.pos();
+
+ // Copy the data.
+ intptr_t pc_delta = desc.buffer - buffer;
+ intptr_t rc_delta = (desc.buffer + desc.buffer_size) -
+ (buffer + buffer_size_);
+ memmove(desc.buffer, buffer, desc.instr_size);
+ memmove(reloc_info_writer.pos() + rc_delta,
+ reloc_info_writer.pos(), desc.reloc_size);
+
+ // Switch buffers.
+ DeleteArray(buffer_);
+ buffer_ = desc.buffer;
+ buffer_size_ = desc.buffer_size;
+ pc_ = reinterpret_cast<byte*>(pc_) + pc_delta;
+ reloc_info_writer.Reposition(reloc_info_writer.pos() + rc_delta,
+ reloc_info_writer.last_pc() + pc_delta);
+
+ // None of our relocation types are pc relative pointing outside the code
+ // buffer nor pc absolute pointing inside the code buffer, so there is no need
+ // to relocate any emitted relocation entries.
+
+ // Relocate pending relocation entries.
+ for (int i = 0; i < num_pending_reloc_info_; i++) {
+ RelocInfo& rinfo = pending_reloc_info_[i];
+ ASSERT(rinfo.rmode() != RelocInfo::COMMENT &&
+ rinfo.rmode() != RelocInfo::POSITION);
+ if (rinfo.rmode() != RelocInfo::JS_RETURN) {
+ rinfo.set_pc(rinfo.pc() + pc_delta);
+ }
+ }
+}
+
+
+void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
+ // We do not try to reuse pool constants.
+ RelocInfo rinfo(reinterpret_cast<byte*>(pc_), rmode, data, NULL);
+ if (((rmode >= RelocInfo::JS_RETURN) &&
+ (rmode <= RelocInfo::DEBUG_BREAK_SLOT)) ||
+ (rmode == RelocInfo::CONST_POOL) ||
+ (rmode == RelocInfo::VENEER_POOL)) {
+ // Adjust code for new modes.
+ ASSERT(RelocInfo::IsDebugBreakSlot(rmode)
+ || RelocInfo::IsJSReturn(rmode)
+ || RelocInfo::IsComment(rmode)
+ || RelocInfo::IsPosition(rmode)
+ || RelocInfo::IsConstPool(rmode)
+ || RelocInfo::IsVeneerPool(rmode));
+ // These modes do not need an entry in the constant pool.
+ } else {
+ ASSERT(num_pending_reloc_info_ < kMaxNumPendingRelocInfo);
+ if (num_pending_reloc_info_ == 0) {
+ first_const_pool_use_ = pc_offset();
+ }
+ pending_reloc_info_[num_pending_reloc_info_++] = rinfo;
+ // Make sure the constant pool is not emitted in place of the next
+ // instruction for which we just recorded relocation info.
+ BlockConstPoolFor(1);
+ }
+
+ if (!RelocInfo::IsNone(rmode)) {
+ // Don't record external references unless the heap will be serialized.
+ if (rmode == RelocInfo::EXTERNAL_REFERENCE &&
+ !serializer_enabled() && !emit_debug_code()) {
+ return;
+ }
+ ASSERT(buffer_space() >= kMaxRelocSize); // too late to grow buffer here
+ if (rmode == RelocInfo::CODE_TARGET_WITH_ID) {
+ RelocInfo reloc_info_with_ast_id(
+ reinterpret_cast<byte*>(pc_), rmode, RecordedAstId().ToInt(), NULL);
+ ClearRecordedAstId();
+ reloc_info_writer.Write(&reloc_info_with_ast_id);
+ } else {
+ reloc_info_writer.Write(&rinfo);
+ }
+ }
+}
+
+
+void Assembler::BlockConstPoolFor(int instructions) {
+ int pc_limit = pc_offset() + instructions * kInstructionSize;
+ if (no_const_pool_before_ < pc_limit) {
+ // If there are some pending entries, the constant pool cannot be blocked
+ // further than first_const_pool_use_ + kMaxDistToConstPool
+ ASSERT((num_pending_reloc_info_ == 0) ||
+ (pc_limit < (first_const_pool_use_ + kMaxDistToConstPool)));
+ no_const_pool_before_ = pc_limit;
+ }
+
+ if (next_constant_pool_check_ < no_const_pool_before_) {
+ next_constant_pool_check_ = no_const_pool_before_;
+ }
+}
+
+
+void Assembler::CheckConstPool(bool force_emit, bool require_jump) {
+ // Some short sequence of instruction mustn't be broken up by constant pool
+ // emission, such sequences are protected by calls to BlockConstPoolFor and
+ // BlockConstPoolScope.
+ if (is_const_pool_blocked()) {
+ // Something is wrong if emission is forced and blocked at the same time.
+ ASSERT(!force_emit);
+ return;
+ }
+
+ // There is nothing to do if there are no pending constant pool entries.
+ if (num_pending_reloc_info_ == 0) {
+ // Calculate the offset of the next check.
+ next_constant_pool_check_ = pc_offset() + kCheckConstPoolInterval;
+ return;
+ }
+
+ // We emit a constant pool when:
+ // * requested to do so by parameter force_emit (e.g. after each function).
+ // * the distance to the first instruction accessing the constant pool is
+ // kAvgDistToConstPool or more.
+ // * no jump is required and the distance to the first instruction accessing
+ // the constant pool is at least kMaxDistToPConstool / 2.
+ ASSERT(first_const_pool_use_ >= 0);
+ int dist = pc_offset() - first_const_pool_use_;
+ if (!force_emit && dist < kAvgDistToConstPool &&
+ (require_jump || (dist < (kMaxDistToConstPool / 2)))) {
+ return;
+ }
+
+ int jump_instr = require_jump ? kInstructionSize : 0;
+ int size_pool_marker = kInstructionSize;
+ int size_pool_guard = kInstructionSize;
+ int pool_size = jump_instr + size_pool_marker + size_pool_guard +
+ num_pending_reloc_info_ * kPointerSize;
+ int needed_space = pool_size + kGap;
+
+ // Emit veneers for branches that would go out of range during emission of the
+ // constant pool.
+ CheckVeneerPool(false, require_jump, kVeneerDistanceMargin + pool_size);
+
+ Label size_check;
+ bind(&size_check);
+
+ // Check that the code buffer is large enough before emitting the constant
+ // pool (include the jump over the pool, the constant pool marker, the
+ // constant pool guard, and the gap to the relocation information).
+ while (buffer_space() <= needed_space) {
+ GrowBuffer();
+ }
+
+ {
+ // Block recursive calls to CheckConstPool and protect from veneer pools.
+ BlockPoolsScope block_pools(this);
+ RecordConstPool(pool_size);
+
+ // Emit jump over constant pool if necessary.
+ Label after_pool;
+ if (require_jump) {
+ b(&after_pool);
+ }
+
+ // Emit a constant pool header. The header has two goals:
+ // 1) Encode the size of the constant pool, for use by the disassembler.
+ // 2) Terminate the program, to try to prevent execution from accidentally
+ // flowing into the constant pool.
+ // The header is therefore made of two arm64 instructions:
+ // ldr xzr, #<size of the constant pool in 32-bit words>
+ // blr xzr
+ // If executed the code will likely segfault and lr will point to the
+ // beginning of the constant pool.
+ // TODO(all): currently each relocated constant is 64 bits, consider adding
+ // support for 32-bit entries.
+ RecordComment("[ Constant Pool");
+ ConstantPoolMarker(2 * num_pending_reloc_info_);
+ ConstantPoolGuard();
+
+ // Emit constant pool entries.
+ for (int i = 0; i < num_pending_reloc_info_; i++) {
+ RelocInfo& rinfo = pending_reloc_info_[i];
+ ASSERT(rinfo.rmode() != RelocInfo::COMMENT &&
+ rinfo.rmode() != RelocInfo::POSITION &&
+ rinfo.rmode() != RelocInfo::STATEMENT_POSITION &&
+ rinfo.rmode() != RelocInfo::CONST_POOL &&
+ rinfo.rmode() != RelocInfo::VENEER_POOL);
+
+ Instruction* instr = reinterpret_cast<Instruction*>(rinfo.pc());
+ // Instruction to patch must be 'ldr rd, [pc, #offset]' with offset == 0.
+ ASSERT(instr->IsLdrLiteral() &&
+ instr->ImmLLiteral() == 0);
+
+ instr->SetImmPCOffsetTarget(reinterpret_cast<Instruction*>(pc_));
+ dc64(rinfo.data());
+ }
+
+ num_pending_reloc_info_ = 0;
+ first_const_pool_use_ = -1;
+
+ RecordComment("]");
+
+ if (after_pool.is_linked()) {
+ bind(&after_pool);
+ }
+ }
+
+ // Since a constant pool was just emitted, move the check offset forward by
+ // the standard interval.
+ next_constant_pool_check_ = pc_offset() + kCheckConstPoolInterval;
+
+ ASSERT(SizeOfCodeGeneratedSince(&size_check) ==
+ static_cast<unsigned>(pool_size));
+}
+
+
+bool Assembler::ShouldEmitVeneer(int max_reachable_pc, int margin) {
+ // Account for the branch around the veneers and the guard.
+ int protection_offset = 2 * kInstructionSize;
+ return pc_offset() > max_reachable_pc - margin - protection_offset -
+ static_cast<int>(unresolved_branches_.size() * kMaxVeneerCodeSize);
+}
+
+
+void Assembler::RecordVeneerPool(int location_offset, int size) {
+ RelocInfo rinfo(buffer_ + location_offset,
+ RelocInfo::VENEER_POOL, static_cast<intptr_t>(size),
+ NULL);
+ reloc_info_writer.Write(&rinfo);
+}
+
+
+void Assembler::EmitVeneers(bool force_emit, bool need_protection, int margin) {
+ BlockPoolsScope scope(this);
+ RecordComment("[ Veneers");
+
+ // The exact size of the veneer pool must be recorded (see the comment at the
+ // declaration site of RecordConstPool()), but computing the number of
+ // veneers that will be generated is not obvious. So instead we remember the
+ // current position and will record the size after the pool has been
+ // generated.
+ Label size_check;
+ bind(&size_check);
+ int veneer_pool_relocinfo_loc = pc_offset();
+
+ Label end;
+ if (need_protection) {
+ b(&end);
+ }
+
+ EmitVeneersGuard();
+
+ Label veneer_size_check;
+
+ std::multimap<int, FarBranchInfo>::iterator it, it_to_delete;
+
+ it = unresolved_branches_.begin();
+ while (it != unresolved_branches_.end()) {
+ if (force_emit || ShouldEmitVeneer(it->first, margin)) {
+ Instruction* branch = InstructionAt(it->second.pc_offset_);
+ Label* label = it->second.label_;
+
+#ifdef DEBUG
+ bind(&veneer_size_check);
+#endif
+ // Patch the branch to point to the current position, and emit a branch
+ // to the label.
+ Instruction* veneer = reinterpret_cast<Instruction*>(pc_);
+ RemoveBranchFromLabelLinkChain(branch, label, veneer);
+ branch->SetImmPCOffsetTarget(veneer);
+ b(label);
+#ifdef DEBUG
+ ASSERT(SizeOfCodeGeneratedSince(&veneer_size_check) <=
+ static_cast<uint64_t>(kMaxVeneerCodeSize));
+ veneer_size_check.Unuse();
+#endif
+
+ it_to_delete = it++;
+ unresolved_branches_.erase(it_to_delete);
+ } else {
+ ++it;
+ }
+ }
+
+ // Record the veneer pool size.
+ int pool_size = SizeOfCodeGeneratedSince(&size_check);
+ RecordVeneerPool(veneer_pool_relocinfo_loc, pool_size);
+
+ if (unresolved_branches_.empty()) {
+ next_veneer_pool_check_ = kMaxInt;
+ } else {
+ next_veneer_pool_check_ =
+ unresolved_branches_first_limit() - kVeneerDistanceCheckMargin;
+ }
+
+ bind(&end);
+
+ RecordComment("]");
+}
+
+
+void Assembler::CheckVeneerPool(bool force_emit, bool require_jump,
+ int margin) {
+ // There is nothing to do if there are no pending veneer pool entries.
+ if (unresolved_branches_.empty()) {
+ ASSERT(next_veneer_pool_check_ == kMaxInt);
+ return;
+ }
+
+ ASSERT(pc_offset() < unresolved_branches_first_limit());
+
+ // Some short sequence of instruction mustn't be broken up by veneer pool
+ // emission, such sequences are protected by calls to BlockVeneerPoolFor and
+ // BlockVeneerPoolScope.
+ if (is_veneer_pool_blocked()) {
+ ASSERT(!force_emit);
+ return;
+ }
+
+ if (!require_jump) {
+ // Prefer emitting veneers protected by an existing instruction.
+ margin *= kVeneerNoProtectionFactor;
+ }
+ if (force_emit || ShouldEmitVeneers(margin)) {
+ EmitVeneers(force_emit, require_jump, margin);
+ } else {
+ next_veneer_pool_check_ =
+ unresolved_branches_first_limit() - kVeneerDistanceCheckMargin;
+ }
+}
+
+
+void Assembler::RecordComment(const char* msg) {
+ if (FLAG_code_comments) {
+ CheckBuffer();
+ RecordRelocInfo(RelocInfo::COMMENT, reinterpret_cast<intptr_t>(msg));
+ }
+}
+
+
+int Assembler::buffer_space() const {
+ return reloc_info_writer.pos() - reinterpret_cast<byte*>(pc_);
+}
+
+
+void Assembler::RecordJSReturn() {
+ positions_recorder()->WriteRecordedPositions();
+ CheckBuffer();
+ RecordRelocInfo(RelocInfo::JS_RETURN);
+}
+
+
+void Assembler::RecordDebugBreakSlot() {
+ positions_recorder()->WriteRecordedPositions();
+ CheckBuffer();
+ RecordRelocInfo(RelocInfo::DEBUG_BREAK_SLOT);
+}
+
+
+void Assembler::RecordConstPool(int size) {
+ // We only need this for debugger support, to correctly compute offsets in the
+ // code.
+ RecordRelocInfo(RelocInfo::CONST_POOL, static_cast<intptr_t>(size));
+}
+
+
+Handle<ConstantPoolArray> Assembler::NewConstantPool(Isolate* isolate) {
+ // No out-of-line constant pool support.
+ ASSERT(!FLAG_enable_ool_constant_pool);
+ return isolate->factory()->empty_constant_pool_array();
+}
+
+
+void Assembler::PopulateConstantPool(ConstantPoolArray* constant_pool) {
+ // No out-of-line constant pool support.
+ ASSERT(!FLAG_enable_ool_constant_pool);
+ return;
+}
+
+
+void PatchingAssembler::MovInt64(const Register& rd, int64_t imm) {
+ Label start;
+ bind(&start);
+
+ ASSERT(rd.Is64Bits());
+ ASSERT(!rd.IsSP());
+
+ for (unsigned i = 0; i < (rd.SizeInBits() / 16); i++) {
+ uint64_t imm16 = (imm >> (16 * i)) & 0xffffL;
+ movk(rd, imm16, 16 * i);
+ }
+
+ ASSERT(SizeOfCodeGeneratedSince(&start) ==
+ kMovInt64NInstrs * kInstructionSize);
+}
+
+
+void PatchingAssembler::PatchAdrFar(Instruction* target) {
+ // The code at the current instruction should be:
+ // adr rd, 0
+ // nop (adr_far)
+ // nop (adr_far)
+ // nop (adr_far)
+ // movz scratch, 0
+ // add rd, rd, scratch
+
+ // Verify the expected code.
+ Instruction* expected_adr = InstructionAt(0);
+ CHECK(expected_adr->IsAdr() && (expected_adr->ImmPCRel() == 0));
+ int rd_code = expected_adr->Rd();
+ for (int i = 0; i < kAdrFarPatchableNNops; ++i) {
+ CHECK(InstructionAt((i + 1) * kInstructionSize)->IsNop(ADR_FAR_NOP));
+ }
+ Instruction* expected_movz =
+ InstructionAt((kAdrFarPatchableNInstrs - 2) * kInstructionSize);
+ CHECK(expected_movz->IsMovz() &&
+ (expected_movz->ImmMoveWide() == 0) &&
+ (expected_movz->ShiftMoveWide() == 0));
+ int scratch_code = expected_movz->Rd();
+ Instruction* expected_add =
+ InstructionAt((kAdrFarPatchableNInstrs - 1) * kInstructionSize);
+ CHECK(expected_add->IsAddSubShifted() &&
+ (expected_add->Mask(AddSubOpMask) == ADD) &&
+ expected_add->SixtyFourBits() &&
+ (expected_add->Rd() == rd_code) && (expected_add->Rn() == rd_code) &&
+ (expected_add->Rm() == scratch_code) &&
+ (static_cast<Shift>(expected_add->ShiftDP()) == LSL) &&
+ (expected_add->ImmDPShift() == 0));
+
+ // Patch to load the correct address.
+ Label start;
+ bind(&start);
+ Register rd = Register::XRegFromCode(rd_code);
+ // If the target is in range, we only patch the adr. Otherwise we patch the
+ // nops with fixup instructions.
+ int target_offset = expected_adr->DistanceTo(target);
+ if (Instruction::IsValidPCRelOffset(target_offset)) {
+ adr(rd, target_offset);
+ for (int i = 0; i < kAdrFarPatchableNInstrs - 2; ++i) {
+ nop(ADR_FAR_NOP);
+ }
+ } else {
+ Register scratch = Register::XRegFromCode(scratch_code);
+ adr(rd, 0);
+ MovInt64(scratch, target_offset);
+ add(rd, rd, scratch);
+ }
+}
+
+
+} } // namespace v8::internal
+
+#endif // V8_TARGET_ARCH_ARM64
diff --git a/chromium/v8/src/arm64/assembler-arm64.h b/chromium/v8/src/arm64/assembler-arm64.h
new file mode 100644
index 00000000000..c0ad4d053b1
--- /dev/null
+++ b/chromium/v8/src/arm64/assembler-arm64.h
@@ -0,0 +1,2256 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_ARM64_ASSEMBLER_ARM64_H_
+#define V8_ARM64_ASSEMBLER_ARM64_H_
+
+#include <list>
+#include <map>
+
+#include "src/cpu.h"
+#include "src/globals.h"
+#include "src/utils.h"
+#include "src/assembler.h"
+#include "src/serialize.h"
+#include "src/arm64/instructions-arm64.h"
+
+
+namespace v8 {
+namespace internal {
+
+
+// -----------------------------------------------------------------------------
+// Registers.
+#define REGISTER_CODE_LIST(R) \
+R(0) R(1) R(2) R(3) R(4) R(5) R(6) R(7) \
+R(8) R(9) R(10) R(11) R(12) R(13) R(14) R(15) \
+R(16) R(17) R(18) R(19) R(20) R(21) R(22) R(23) \
+R(24) R(25) R(26) R(27) R(28) R(29) R(30) R(31)
+
+
+static const int kRegListSizeInBits = sizeof(RegList) * kBitsPerByte;
+
+
+// Some CPURegister methods can return Register and FPRegister types, so we
+// need to declare them in advance.
+struct Register;
+struct FPRegister;
+
+
+struct CPURegister {
+ enum RegisterType {
+ // The kInvalid value is used to detect uninitialized static instances,
+ // which are always zero-initialized before any constructors are called.
+ kInvalid = 0,
+ kRegister,
+ kFPRegister,
+ kNoRegister
+ };
+
+ static CPURegister Create(unsigned code, unsigned size, RegisterType type) {
+ CPURegister r = {code, size, type};
+ return r;
+ }
+
+ unsigned code() const;
+ RegisterType type() const;
+ RegList Bit() const;
+ unsigned SizeInBits() const;
+ int SizeInBytes() const;
+ bool Is32Bits() const;
+ bool Is64Bits() const;
+ bool IsValid() const;
+ bool IsValidOrNone() const;
+ bool IsValidRegister() const;
+ bool IsValidFPRegister() const;
+ bool IsNone() const;
+ bool Is(const CPURegister& other) const;
+ bool Aliases(const CPURegister& other) const;
+
+ bool IsZero() const;
+ bool IsSP() const;
+
+ bool IsRegister() const;
+ bool IsFPRegister() const;
+
+ Register X() const;
+ Register W() const;
+ FPRegister D() const;
+ FPRegister S() const;
+
+ bool IsSameSizeAndType(const CPURegister& other) const;
+
+ // V8 compatibility.
+ bool is(const CPURegister& other) const { return Is(other); }
+ bool is_valid() const { return IsValid(); }
+
+ unsigned reg_code;
+ unsigned reg_size;
+ RegisterType reg_type;
+};
+
+
+struct Register : public CPURegister {
+ static Register Create(unsigned code, unsigned size) {
+ return Register(CPURegister::Create(code, size, CPURegister::kRegister));
+ }
+
+ Register() {
+ reg_code = 0;
+ reg_size = 0;
+ reg_type = CPURegister::kNoRegister;
+ }
+
+ explicit Register(const CPURegister& r) {
+ reg_code = r.reg_code;
+ reg_size = r.reg_size;
+ reg_type = r.reg_type;
+ ASSERT(IsValidOrNone());
+ }
+
+ Register(const Register& r) { // NOLINT(runtime/explicit)
+ reg_code = r.reg_code;
+ reg_size = r.reg_size;
+ reg_type = r.reg_type;
+ ASSERT(IsValidOrNone());
+ }
+
+ bool IsValid() const {
+ ASSERT(IsRegister() || IsNone());
+ return IsValidRegister();
+ }
+
+ static Register XRegFromCode(unsigned code);
+ static Register WRegFromCode(unsigned code);
+
+ // Start of V8 compatibility section ---------------------
+ // These memebers are necessary for compilation.
+ // A few of them may be unused for now.
+
+ static const int kNumRegisters = kNumberOfRegisters;
+ static int NumRegisters() { return kNumRegisters; }
+
+ // We allow crankshaft to use the following registers:
+ // - x0 to x15
+ // - x18 to x24
+ // - x27 (also context)
+ //
+ // TODO(all): Register x25 is currently free and could be available for
+ // crankshaft, but we don't use it as we might use it as a per function
+ // literal pool pointer in the future.
+ //
+ // TODO(all): Consider storing cp in x25 to have only two ranges.
+ // We split allocatable registers in three ranges called
+ // - "low range"
+ // - "high range"
+ // - "context"
+ static const unsigned kAllocatableLowRangeBegin = 0;
+ static const unsigned kAllocatableLowRangeEnd = 15;
+ static const unsigned kAllocatableHighRangeBegin = 18;
+ static const unsigned kAllocatableHighRangeEnd = 24;
+ static const unsigned kAllocatableContext = 27;
+
+ // Gap between low and high ranges.
+ static const int kAllocatableRangeGapSize =
+ (kAllocatableHighRangeBegin - kAllocatableLowRangeEnd) - 1;
+
+ static const int kMaxNumAllocatableRegisters =
+ (kAllocatableLowRangeEnd - kAllocatableLowRangeBegin + 1) +
+ (kAllocatableHighRangeEnd - kAllocatableHighRangeBegin + 1) + 1; // cp
+ static int NumAllocatableRegisters() { return kMaxNumAllocatableRegisters; }
+
+ // Return true if the register is one that crankshaft can allocate.
+ bool IsAllocatable() const {
+ return ((reg_code == kAllocatableContext) ||
+ (reg_code <= kAllocatableLowRangeEnd) ||
+ ((reg_code >= kAllocatableHighRangeBegin) &&
+ (reg_code <= kAllocatableHighRangeEnd)));
+ }
+
+ static Register FromAllocationIndex(unsigned index) {
+ ASSERT(index < static_cast<unsigned>(NumAllocatableRegisters()));
+ // cp is the last allocatable register.
+ if (index == (static_cast<unsigned>(NumAllocatableRegisters() - 1))) {
+ return from_code(kAllocatableContext);
+ }
+
+ // Handle low and high ranges.
+ return (index <= kAllocatableLowRangeEnd)
+ ? from_code(index)
+ : from_code(index + kAllocatableRangeGapSize);
+ }
+
+ static const char* AllocationIndexToString(int index) {
+ ASSERT((index >= 0) && (index < NumAllocatableRegisters()));
+ ASSERT((kAllocatableLowRangeBegin == 0) &&
+ (kAllocatableLowRangeEnd == 15) &&
+ (kAllocatableHighRangeBegin == 18) &&
+ (kAllocatableHighRangeEnd == 24) &&
+ (kAllocatableContext == 27));
+ const char* const names[] = {
+ "x0", "x1", "x2", "x3", "x4",
+ "x5", "x6", "x7", "x8", "x9",
+ "x10", "x11", "x12", "x13", "x14",
+ "x15", "x18", "x19", "x20", "x21",
+ "x22", "x23", "x24", "x27",
+ };
+ return names[index];
+ }
+
+ static int ToAllocationIndex(Register reg) {
+ ASSERT(reg.IsAllocatable());
+ unsigned code = reg.code();
+ if (code == kAllocatableContext) {
+ return NumAllocatableRegisters() - 1;
+ }
+
+ return (code <= kAllocatableLowRangeEnd)
+ ? code
+ : code - kAllocatableRangeGapSize;
+ }
+
+ static Register from_code(int code) {
+ // Always return an X register.
+ return Register::Create(code, kXRegSizeInBits);
+ }
+
+ // End of V8 compatibility section -----------------------
+};
+
+
+struct FPRegister : public CPURegister {
+ static FPRegister Create(unsigned code, unsigned size) {
+ return FPRegister(
+ CPURegister::Create(code, size, CPURegister::kFPRegister));
+ }
+
+ FPRegister() {
+ reg_code = 0;
+ reg_size = 0;
+ reg_type = CPURegister::kNoRegister;
+ }
+
+ explicit FPRegister(const CPURegister& r) {
+ reg_code = r.reg_code;
+ reg_size = r.reg_size;
+ reg_type = r.reg_type;
+ ASSERT(IsValidOrNone());
+ }
+
+ FPRegister(const FPRegister& r) { // NOLINT(runtime/explicit)
+ reg_code = r.reg_code;
+ reg_size = r.reg_size;
+ reg_type = r.reg_type;
+ ASSERT(IsValidOrNone());
+ }
+
+ bool IsValid() const {
+ ASSERT(IsFPRegister() || IsNone());
+ return IsValidFPRegister();
+ }
+
+ static FPRegister SRegFromCode(unsigned code);
+ static FPRegister DRegFromCode(unsigned code);
+
+ // Start of V8 compatibility section ---------------------
+ static const int kMaxNumRegisters = kNumberOfFPRegisters;
+
+ // Crankshaft can use all the FP registers except:
+ // - d15 which is used to keep the 0 double value
+ // - d30 which is used in crankshaft as a double scratch register
+ // - d31 which is used in the MacroAssembler as a double scratch register
+ static const unsigned kAllocatableLowRangeBegin = 0;
+ static const unsigned kAllocatableLowRangeEnd = 14;
+ static const unsigned kAllocatableHighRangeBegin = 16;
+ static const unsigned kAllocatableHighRangeEnd = 28;
+
+ static const RegList kAllocatableFPRegisters = 0x1fff7fff;
+
+ // Gap between low and high ranges.
+ static const int kAllocatableRangeGapSize =
+ (kAllocatableHighRangeBegin - kAllocatableLowRangeEnd) - 1;
+
+ static const int kMaxNumAllocatableRegisters =
+ (kAllocatableLowRangeEnd - kAllocatableLowRangeBegin + 1) +
+ (kAllocatableHighRangeEnd - kAllocatableHighRangeBegin + 1);
+ static int NumAllocatableRegisters() { return kMaxNumAllocatableRegisters; }
+
+ // Return true if the register is one that crankshaft can allocate.
+ bool IsAllocatable() const {
+ return (Bit() & kAllocatableFPRegisters) != 0;
+ }
+
+ static FPRegister FromAllocationIndex(unsigned int index) {
+ ASSERT(index < static_cast<unsigned>(NumAllocatableRegisters()));
+
+ return (index <= kAllocatableLowRangeEnd)
+ ? from_code(index)
+ : from_code(index + kAllocatableRangeGapSize);
+ }
+
+ static const char* AllocationIndexToString(int index) {
+ ASSERT((index >= 0) && (index < NumAllocatableRegisters()));
+ ASSERT((kAllocatableLowRangeBegin == 0) &&
+ (kAllocatableLowRangeEnd == 14) &&
+ (kAllocatableHighRangeBegin == 16) &&
+ (kAllocatableHighRangeEnd == 28));
+ const char* const names[] = {
+ "d0", "d1", "d2", "d3", "d4", "d5", "d6", "d7",
+ "d8", "d9", "d10", "d11", "d12", "d13", "d14",
+ "d16", "d17", "d18", "d19", "d20", "d21", "d22", "d23",
+ "d24", "d25", "d26", "d27", "d28"
+ };
+ return names[index];
+ }
+
+ static int ToAllocationIndex(FPRegister reg) {
+ ASSERT(reg.IsAllocatable());
+ unsigned code = reg.code();
+
+ return (code <= kAllocatableLowRangeEnd)
+ ? code
+ : code - kAllocatableRangeGapSize;
+ }
+
+ static FPRegister from_code(int code) {
+ // Always return a D register.
+ return FPRegister::Create(code, kDRegSizeInBits);
+ }
+ // End of V8 compatibility section -----------------------
+};
+
+
+STATIC_ASSERT(sizeof(CPURegister) == sizeof(Register));
+STATIC_ASSERT(sizeof(CPURegister) == sizeof(FPRegister));
+
+
+#if defined(ARM64_DEFINE_REG_STATICS)
+#define INITIALIZE_REGISTER(register_class, name, code, size, type) \
+ const CPURegister init_##register_class##_##name = {code, size, type}; \
+ const register_class& name = *reinterpret_cast<const register_class*>( \
+ &init_##register_class##_##name)
+#define ALIAS_REGISTER(register_class, alias, name) \
+ const register_class& alias = *reinterpret_cast<const register_class*>( \
+ &init_##register_class##_##name)
+#else
+#define INITIALIZE_REGISTER(register_class, name, code, size, type) \
+ extern const register_class& name
+#define ALIAS_REGISTER(register_class, alias, name) \
+ extern const register_class& alias
+#endif // defined(ARM64_DEFINE_REG_STATICS)
+
+// No*Reg is used to indicate an unused argument, or an error case. Note that
+// these all compare equal (using the Is() method). The Register and FPRegister
+// variants are provided for convenience.
+INITIALIZE_REGISTER(Register, NoReg, 0, 0, CPURegister::kNoRegister);
+INITIALIZE_REGISTER(FPRegister, NoFPReg, 0, 0, CPURegister::kNoRegister);
+INITIALIZE_REGISTER(CPURegister, NoCPUReg, 0, 0, CPURegister::kNoRegister);
+
+// v8 compatibility.
+INITIALIZE_REGISTER(Register, no_reg, 0, 0, CPURegister::kNoRegister);
+
+#define DEFINE_REGISTERS(N) \
+ INITIALIZE_REGISTER(Register, w##N, N, \
+ kWRegSizeInBits, CPURegister::kRegister); \
+ INITIALIZE_REGISTER(Register, x##N, N, \
+ kXRegSizeInBits, CPURegister::kRegister);
+REGISTER_CODE_LIST(DEFINE_REGISTERS)
+#undef DEFINE_REGISTERS
+
+INITIALIZE_REGISTER(Register, wcsp, kSPRegInternalCode, kWRegSizeInBits,
+ CPURegister::kRegister);
+INITIALIZE_REGISTER(Register, csp, kSPRegInternalCode, kXRegSizeInBits,
+ CPURegister::kRegister);
+
+#define DEFINE_FPREGISTERS(N) \
+ INITIALIZE_REGISTER(FPRegister, s##N, N, \
+ kSRegSizeInBits, CPURegister::kFPRegister); \
+ INITIALIZE_REGISTER(FPRegister, d##N, N, \
+ kDRegSizeInBits, CPURegister::kFPRegister);
+REGISTER_CODE_LIST(DEFINE_FPREGISTERS)
+#undef DEFINE_FPREGISTERS
+
+#undef INITIALIZE_REGISTER
+
+// Registers aliases.
+ALIAS_REGISTER(Register, ip0, x16);
+ALIAS_REGISTER(Register, ip1, x17);
+ALIAS_REGISTER(Register, wip0, w16);
+ALIAS_REGISTER(Register, wip1, w17);
+// Root register.
+ALIAS_REGISTER(Register, root, x26);
+ALIAS_REGISTER(Register, rr, x26);
+// Context pointer register.
+ALIAS_REGISTER(Register, cp, x27);
+// We use a register as a JS stack pointer to overcome the restriction on the
+// architectural SP alignment.
+// We chose x28 because it is contiguous with the other specific purpose
+// registers.
+STATIC_ASSERT(kJSSPCode == 28);
+ALIAS_REGISTER(Register, jssp, x28);
+ALIAS_REGISTER(Register, wjssp, w28);
+ALIAS_REGISTER(Register, fp, x29);
+ALIAS_REGISTER(Register, lr, x30);
+ALIAS_REGISTER(Register, xzr, x31);
+ALIAS_REGISTER(Register, wzr, w31);
+
+// Keeps the 0 double value.
+ALIAS_REGISTER(FPRegister, fp_zero, d15);
+// Crankshaft double scratch register.
+ALIAS_REGISTER(FPRegister, crankshaft_fp_scratch, d29);
+// MacroAssembler double scratch registers.
+ALIAS_REGISTER(FPRegister, fp_scratch, d30);
+ALIAS_REGISTER(FPRegister, fp_scratch1, d30);
+ALIAS_REGISTER(FPRegister, fp_scratch2, d31);
+
+#undef ALIAS_REGISTER
+
+
+Register GetAllocatableRegisterThatIsNotOneOf(Register reg1,
+ Register reg2 = NoReg,
+ Register reg3 = NoReg,
+ Register reg4 = NoReg);
+
+
+// AreAliased returns true if any of the named registers overlap. Arguments set
+// to NoReg are ignored. The system stack pointer may be specified.
+bool AreAliased(const CPURegister& reg1,
+ const CPURegister& reg2,
+ const CPURegister& reg3 = NoReg,
+ const CPURegister& reg4 = NoReg,
+ const CPURegister& reg5 = NoReg,
+ const CPURegister& reg6 = NoReg,
+ const CPURegister& reg7 = NoReg,
+ const CPURegister& reg8 = NoReg);
+
+// AreSameSizeAndType returns true if all of the specified registers have the
+// same size, and are of the same type. The system stack pointer may be
+// specified. Arguments set to NoReg are ignored, as are any subsequent
+// arguments. At least one argument (reg1) must be valid (not NoCPUReg).
+bool AreSameSizeAndType(const CPURegister& reg1,
+ const CPURegister& reg2,
+ const CPURegister& reg3 = NoCPUReg,
+ const CPURegister& reg4 = NoCPUReg,
+ const CPURegister& reg5 = NoCPUReg,
+ const CPURegister& reg6 = NoCPUReg,
+ const CPURegister& reg7 = NoCPUReg,
+ const CPURegister& reg8 = NoCPUReg);
+
+
+typedef FPRegister DoubleRegister;
+
+
+// -----------------------------------------------------------------------------
+// Lists of registers.
+class CPURegList {
+ public:
+ explicit CPURegList(CPURegister reg1,
+ CPURegister reg2 = NoCPUReg,
+ CPURegister reg3 = NoCPUReg,
+ CPURegister reg4 = NoCPUReg)
+ : list_(reg1.Bit() | reg2.Bit() | reg3.Bit() | reg4.Bit()),
+ size_(reg1.SizeInBits()), type_(reg1.type()) {
+ ASSERT(AreSameSizeAndType(reg1, reg2, reg3, reg4));
+ ASSERT(IsValid());
+ }
+
+ CPURegList(CPURegister::RegisterType type, unsigned size, RegList list)
+ : list_(list), size_(size), type_(type) {
+ ASSERT(IsValid());
+ }
+
+ CPURegList(CPURegister::RegisterType type, unsigned size,
+ unsigned first_reg, unsigned last_reg)
+ : size_(size), type_(type) {
+ ASSERT(((type == CPURegister::kRegister) &&
+ (last_reg < kNumberOfRegisters)) ||
+ ((type == CPURegister::kFPRegister) &&
+ (last_reg < kNumberOfFPRegisters)));
+ ASSERT(last_reg >= first_reg);
+ list_ = (1UL << (last_reg + 1)) - 1;
+ list_ &= ~((1UL << first_reg) - 1);
+ ASSERT(IsValid());
+ }
+
+ CPURegister::RegisterType type() const {
+ ASSERT(IsValid());
+ return type_;
+ }
+
+ RegList list() const {
+ ASSERT(IsValid());
+ return list_;
+ }
+
+ inline void set_list(RegList new_list) {
+ ASSERT(IsValid());
+ list_ = new_list;
+ }
+
+ // Combine another CPURegList into this one. Registers that already exist in
+ // this list are left unchanged. The type and size of the registers in the
+ // 'other' list must match those in this list.
+ void Combine(const CPURegList& other);
+
+ // Remove every register in the other CPURegList from this one. Registers that
+ // do not exist in this list are ignored. The type of the registers in the
+ // 'other' list must match those in this list.
+ void Remove(const CPURegList& other);
+
+ // Variants of Combine and Remove which take CPURegisters.
+ void Combine(const CPURegister& other);
+ void Remove(const CPURegister& other1,
+ const CPURegister& other2 = NoCPUReg,
+ const CPURegister& other3 = NoCPUReg,
+ const CPURegister& other4 = NoCPUReg);
+
+ // Variants of Combine and Remove which take a single register by its code;
+ // the type and size of the register is inferred from this list.
+ void Combine(int code);
+ void Remove(int code);
+
+ // Remove all callee-saved registers from the list. This can be useful when
+ // preparing registers for an AAPCS64 function call, for example.
+ void RemoveCalleeSaved();
+
+ CPURegister PopLowestIndex();
+ CPURegister PopHighestIndex();
+
+ // AAPCS64 callee-saved registers.
+ static CPURegList GetCalleeSaved(unsigned size = kXRegSizeInBits);
+ static CPURegList GetCalleeSavedFP(unsigned size = kDRegSizeInBits);
+
+ // AAPCS64 caller-saved registers. Note that this includes lr.
+ static CPURegList GetCallerSaved(unsigned size = kXRegSizeInBits);
+ static CPURegList GetCallerSavedFP(unsigned size = kDRegSizeInBits);
+
+ // Registers saved as safepoints.
+ static CPURegList GetSafepointSavedRegisters();
+
+ bool IsEmpty() const {
+ ASSERT(IsValid());
+ return list_ == 0;
+ }
+
+ bool IncludesAliasOf(const CPURegister& other1,
+ const CPURegister& other2 = NoCPUReg,
+ const CPURegister& other3 = NoCPUReg,
+ const CPURegister& other4 = NoCPUReg) const {
+ ASSERT(IsValid());
+ RegList list = 0;
+ if (!other1.IsNone() && (other1.type() == type_)) list |= other1.Bit();
+ if (!other2.IsNone() && (other2.type() == type_)) list |= other2.Bit();
+ if (!other3.IsNone() && (other3.type() == type_)) list |= other3.Bit();
+ if (!other4.IsNone() && (other4.type() == type_)) list |= other4.Bit();
+ return (list_ & list) != 0;
+ }
+
+ int Count() const {
+ ASSERT(IsValid());
+ return CountSetBits(list_, kRegListSizeInBits);
+ }
+
+ unsigned RegisterSizeInBits() const {
+ ASSERT(IsValid());
+ return size_;
+ }
+
+ unsigned RegisterSizeInBytes() const {
+ int size_in_bits = RegisterSizeInBits();
+ ASSERT((size_in_bits % kBitsPerByte) == 0);
+ return size_in_bits / kBitsPerByte;
+ }
+
+ unsigned TotalSizeInBytes() const {
+ ASSERT(IsValid());
+ return RegisterSizeInBytes() * Count();
+ }
+
+ private:
+ RegList list_;
+ unsigned size_;
+ CPURegister::RegisterType type_;
+
+ bool IsValid() const {
+ const RegList kValidRegisters = 0x8000000ffffffff;
+ const RegList kValidFPRegisters = 0x0000000ffffffff;
+ switch (type_) {
+ case CPURegister::kRegister:
+ return (list_ & kValidRegisters) == list_;
+ case CPURegister::kFPRegister:
+ return (list_ & kValidFPRegisters) == list_;
+ case CPURegister::kNoRegister:
+ return list_ == 0;
+ default:
+ UNREACHABLE();
+ return false;
+ }
+ }
+};
+
+
+// AAPCS64 callee-saved registers.
+#define kCalleeSaved CPURegList::GetCalleeSaved()
+#define kCalleeSavedFP CPURegList::GetCalleeSavedFP()
+
+
+// AAPCS64 caller-saved registers. Note that this includes lr.
+#define kCallerSaved CPURegList::GetCallerSaved()
+#define kCallerSavedFP CPURegList::GetCallerSavedFP()
+
+// -----------------------------------------------------------------------------
+// Immediates.
+class Immediate {
+ public:
+ template<typename T>
+ inline explicit Immediate(Handle<T> handle);
+
+ // This is allowed to be an implicit constructor because Immediate is
+ // a wrapper class that doesn't normally perform any type conversion.
+ template<typename T>
+ inline Immediate(T value); // NOLINT(runtime/explicit)
+
+ template<typename T>
+ inline Immediate(T value, RelocInfo::Mode rmode);
+
+ int64_t value() const { return value_; }
+ RelocInfo::Mode rmode() const { return rmode_; }
+
+ private:
+ void InitializeHandle(Handle<Object> value);
+
+ int64_t value_;
+ RelocInfo::Mode rmode_;
+};
+
+
+// -----------------------------------------------------------------------------
+// Operands.
+const int kSmiShift = kSmiTagSize + kSmiShiftSize;
+const uint64_t kSmiShiftMask = (1UL << kSmiShift) - 1;
+
+// Represents an operand in a machine instruction.
+class Operand {
+ // TODO(all): If necessary, study more in details which methods
+ // TODO(all): should be inlined or not.
+ public:
+ // rm, {<shift> {#<shift_amount>}}
+ // where <shift> is one of {LSL, LSR, ASR, ROR}.
+ // <shift_amount> is uint6_t.
+ // This is allowed to be an implicit constructor because Operand is
+ // a wrapper class that doesn't normally perform any type conversion.
+ inline Operand(Register reg,
+ Shift shift = LSL,
+ unsigned shift_amount = 0); // NOLINT(runtime/explicit)
+
+ // rm, <extend> {#<shift_amount>}
+ // where <extend> is one of {UXTB, UXTH, UXTW, UXTX, SXTB, SXTH, SXTW, SXTX}.
+ // <shift_amount> is uint2_t.
+ inline Operand(Register reg,
+ Extend extend,
+ unsigned shift_amount = 0);
+
+ template<typename T>
+ inline explicit Operand(Handle<T> handle);
+
+ // Implicit constructor for all int types, ExternalReference, and Smi.
+ template<typename T>
+ inline Operand(T t); // NOLINT(runtime/explicit)
+
+ // Implicit constructor for int types.
+ template<typename T>
+ inline Operand(T t, RelocInfo::Mode rmode);
+
+ inline bool IsImmediate() const;
+ inline bool IsShiftedRegister() const;
+ inline bool IsExtendedRegister() const;
+ inline bool IsZero() const;
+
+ // This returns an LSL shift (<= 4) operand as an equivalent extend operand,
+ // which helps in the encoding of instructions that use the stack pointer.
+ inline Operand ToExtendedRegister() const;
+
+ inline Immediate immediate() const;
+ inline int64_t ImmediateValue() const;
+ inline Register reg() const;
+ inline Shift shift() const;
+ inline Extend extend() const;
+ inline unsigned shift_amount() const;
+
+ // Relocation information.
+ bool NeedsRelocation(const Assembler* assembler) const;
+
+ // Helpers
+ inline static Operand UntagSmi(Register smi);
+ inline static Operand UntagSmiAndScale(Register smi, int scale);
+
+ private:
+ Immediate immediate_;
+ Register reg_;
+ Shift shift_;
+ Extend extend_;
+ unsigned shift_amount_;
+};
+
+
+// MemOperand represents a memory operand in a load or store instruction.
+class MemOperand {
+ public:
+ inline explicit MemOperand();
+ inline explicit MemOperand(Register base,
+ ptrdiff_t offset = 0,
+ AddrMode addrmode = Offset);
+ inline explicit MemOperand(Register base,
+ Register regoffset,
+ Shift shift = LSL,
+ unsigned shift_amount = 0);
+ inline explicit MemOperand(Register base,
+ Register regoffset,
+ Extend extend,
+ unsigned shift_amount = 0);
+ inline explicit MemOperand(Register base,
+ const Operand& offset,
+ AddrMode addrmode = Offset);
+
+ const Register& base() const { return base_; }
+ const Register& regoffset() const { return regoffset_; }
+ ptrdiff_t offset() const { return offset_; }
+ AddrMode addrmode() const { return addrmode_; }
+ Shift shift() const { return shift_; }
+ Extend extend() const { return extend_; }
+ unsigned shift_amount() const { return shift_amount_; }
+ inline bool IsImmediateOffset() const;
+ inline bool IsRegisterOffset() const;
+ inline bool IsPreIndex() const;
+ inline bool IsPostIndex() const;
+
+ // For offset modes, return the offset as an Operand. This helper cannot
+ // handle indexed modes.
+ inline Operand OffsetAsOperand() const;
+
+ private:
+ Register base_;
+ Register regoffset_;
+ ptrdiff_t offset_;
+ AddrMode addrmode_;
+ Shift shift_;
+ Extend extend_;
+ unsigned shift_amount_;
+};
+
+
+// -----------------------------------------------------------------------------
+// Assembler.
+
+class Assembler : public AssemblerBase {
+ public:
+ // Create an assembler. Instructions and relocation information are emitted
+ // into a buffer, with the instructions starting from the beginning and the
+ // relocation information starting from the end of the buffer. See CodeDesc
+ // for a detailed comment on the layout (globals.h).
+ //
+ // If the provided buffer is NULL, the assembler allocates and grows its own
+ // buffer, and buffer_size determines the initial buffer size. The buffer is
+ // owned by the assembler and deallocated upon destruction of the assembler.
+ //
+ // If the provided buffer is not NULL, the assembler uses the provided buffer
+ // for code generation and assumes its size to be buffer_size. If the buffer
+ // is too small, a fatal error occurs. No deallocation of the buffer is done
+ // upon destruction of the assembler.
+ Assembler(Isolate* arg_isolate, void* buffer, int buffer_size);
+
+ virtual ~Assembler();
+
+ virtual void AbortedCodeGeneration() {
+ num_pending_reloc_info_ = 0;
+ }
+
+ // System functions ---------------------------------------------------------
+ // Start generating code from the beginning of the buffer, discarding any code
+ // and data that has already been emitted into the buffer.
+ //
+ // In order to avoid any accidental transfer of state, Reset ASSERTs that the
+ // constant pool is not blocked.
+ void Reset();
+
+ // GetCode emits any pending (non-emitted) code and fills the descriptor
+ // desc. GetCode() is idempotent; it returns the same result if no other
+ // Assembler functions are invoked in between GetCode() calls.
+ //
+ // The descriptor (desc) can be NULL. In that case, the code is finalized as
+ // usual, but the descriptor is not populated.
+ void GetCode(CodeDesc* desc);
+
+ // Insert the smallest number of nop instructions
+ // possible to align the pc offset to a multiple
+ // of m. m must be a power of 2 (>= 4).
+ void Align(int m);
+
+ inline void Unreachable();
+
+ // Label --------------------------------------------------------------------
+ // Bind a label to the current pc. Note that labels can only be bound once,
+ // and if labels are linked to other instructions, they _must_ be bound
+ // before they go out of scope.
+ void bind(Label* label);
+
+
+ // RelocInfo and pools ------------------------------------------------------
+
+ // Record relocation information for current pc_.
+ void RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data = 0);
+
+ // Return the address in the constant pool of the code target address used by
+ // the branch/call instruction at pc.
+ inline static Address target_pointer_address_at(Address pc);
+
+ // Read/Modify the code target address in the branch/call instruction at pc.
+ inline static Address target_address_at(Address pc,
+ ConstantPoolArray* constant_pool);
+ inline static void set_target_address_at(Address pc,
+ ConstantPoolArray* constant_pool,
+ Address target,
+ ICacheFlushMode icache_flush_mode =
+ FLUSH_ICACHE_IF_NEEDED);
+ static inline Address target_address_at(Address pc, Code* code);
+ static inline void set_target_address_at(Address pc,
+ Code* code,
+ Address target,
+ ICacheFlushMode icache_flush_mode =
+ FLUSH_ICACHE_IF_NEEDED);
+
+ // Return the code target address at a call site from the return address of
+ // that call in the instruction stream.
+ inline static Address target_address_from_return_address(Address pc);
+
+ // Given the address of the beginning of a call, return the address in the
+ // instruction stream that call will return from.
+ inline static Address return_address_from_call_start(Address pc);
+
+ // This sets the branch destination (which is in the constant pool on ARM).
+ // This is for calls and branches within generated code.
+ inline static void deserialization_set_special_target_at(
+ Address constant_pool_entry, Code* code, Address target);
+
+ // All addresses in the constant pool are the same size as pointers.
+ static const int kSpecialTargetSize = kPointerSize;
+
+ // The sizes of the call sequences emitted by MacroAssembler::Call.
+ // Wherever possible, use MacroAssembler::CallSize instead of these constants,
+ // as it will choose the correct value for a given relocation mode.
+ //
+ // Without relocation:
+ // movz temp, #(target & 0x000000000000ffff)
+ // movk temp, #(target & 0x00000000ffff0000)
+ // movk temp, #(target & 0x0000ffff00000000)
+ // blr temp
+ //
+ // With relocation:
+ // ldr temp, =target
+ // blr temp
+ static const int kCallSizeWithoutRelocation = 4 * kInstructionSize;
+ static const int kCallSizeWithRelocation = 2 * kInstructionSize;
+
+ // Size of the generated code in bytes
+ uint64_t SizeOfGeneratedCode() const {
+ ASSERT((pc_ >= buffer_) && (pc_ < (buffer_ + buffer_size_)));
+ return pc_ - buffer_;
+ }
+
+ // Return the code size generated from label to the current position.
+ uint64_t SizeOfCodeGeneratedSince(const Label* label) {
+ ASSERT(label->is_bound());
+ ASSERT(pc_offset() >= label->pos());
+ ASSERT(pc_offset() < buffer_size_);
+ return pc_offset() - label->pos();
+ }
+
+ // Check the size of the code generated since the given label. This function
+ // is used primarily to work around comparisons between signed and unsigned
+ // quantities, since V8 uses both.
+ // TODO(jbramley): Work out what sign to use for these things and if possible,
+ // change things to be consistent.
+ void AssertSizeOfCodeGeneratedSince(const Label* label, ptrdiff_t size) {
+ ASSERT(size >= 0);
+ ASSERT(static_cast<uint64_t>(size) == SizeOfCodeGeneratedSince(label));
+ }
+
+ // Return the number of instructions generated from label to the
+ // current position.
+ int InstructionsGeneratedSince(const Label* label) {
+ return SizeOfCodeGeneratedSince(label) / kInstructionSize;
+ }
+
+ // Number of instructions generated for the return sequence in
+ // FullCodeGenerator::EmitReturnSequence.
+ static const int kJSRetSequenceInstructions = 7;
+ // Distance between start of patched return sequence and the emitted address
+ // to jump to.
+ static const int kPatchReturnSequenceAddressOffset = 0;
+ static const int kPatchDebugBreakSlotAddressOffset = 0;
+
+ // Number of instructions necessary to be able to later patch it to a call.
+ // See DebugCodegen::GenerateSlot() and
+ // BreakLocationIterator::SetDebugBreakAtSlot().
+ static const int kDebugBreakSlotInstructions = 4;
+ static const int kDebugBreakSlotLength =
+ kDebugBreakSlotInstructions * kInstructionSize;
+
+ static const int kPatchDebugBreakSlotReturnOffset = 2 * kInstructionSize;
+
+ // Prevent contant pool emission until EndBlockConstPool is called.
+ // Call to this function can be nested but must be followed by an equal
+ // number of call to EndBlockConstpool.
+ void StartBlockConstPool();
+
+ // Resume constant pool emission. Need to be called as many time as
+ // StartBlockConstPool to have an effect.
+ void EndBlockConstPool();
+
+ bool is_const_pool_blocked() const;
+ static bool IsConstantPoolAt(Instruction* instr);
+ static int ConstantPoolSizeAt(Instruction* instr);
+ // See Assembler::CheckConstPool for more info.
+ void ConstantPoolMarker(uint32_t size);
+ void EmitPoolGuard();
+ void ConstantPoolGuard();
+
+ // Prevent veneer pool emission until EndBlockVeneerPool is called.
+ // Call to this function can be nested but must be followed by an equal
+ // number of call to EndBlockConstpool.
+ void StartBlockVeneerPool();
+
+ // Resume constant pool emission. Need to be called as many time as
+ // StartBlockVeneerPool to have an effect.
+ void EndBlockVeneerPool();
+
+ bool is_veneer_pool_blocked() const {
+ return veneer_pool_blocked_nesting_ > 0;
+ }
+
+ // Block/resume emission of constant pools and veneer pools.
+ void StartBlockPools() {
+ StartBlockConstPool();
+ StartBlockVeneerPool();
+ }
+ void EndBlockPools() {
+ EndBlockConstPool();
+ EndBlockVeneerPool();
+ }
+
+ // Debugging ----------------------------------------------------------------
+ PositionsRecorder* positions_recorder() { return &positions_recorder_; }
+ void RecordComment(const char* msg);
+ int buffer_space() const;
+
+ // Mark address of the ExitJSFrame code.
+ void RecordJSReturn();
+
+ // Mark address of a debug break slot.
+ void RecordDebugBreakSlot();
+
+ // Record the emission of a constant pool.
+ //
+ // The emission of constant and veneer pools depends on the size of the code
+ // generated and the number of RelocInfo recorded.
+ // The Debug mechanism needs to map code offsets between two versions of a
+ // function, compiled with and without debugger support (see for example
+ // Debug::PrepareForBreakPoints()).
+ // Compiling functions with debugger support generates additional code
+ // (DebugCodegen::GenerateSlot()). This may affect the emission of the pools
+ // and cause the version of the code with debugger support to have pools
+ // generated in different places.
+ // Recording the position and size of emitted pools allows to correctly
+ // compute the offset mappings between the different versions of a function in
+ // all situations.
+ //
+ // The parameter indicates the size of the pool (in bytes), including
+ // the marker and branch over the data.
+ void RecordConstPool(int size);
+
+
+ // Instruction set functions ------------------------------------------------
+
+ // Branch / Jump instructions.
+ // For branches offsets are scaled, i.e. they in instrcutions not in bytes.
+ // Branch to register.
+ void br(const Register& xn);
+
+ // Branch-link to register.
+ void blr(const Register& xn);
+
+ // Branch to register with return hint.
+ void ret(const Register& xn = lr);
+
+ // Unconditional branch to label.
+ void b(Label* label);
+
+ // Conditional branch to label.
+ void b(Label* label, Condition cond);
+
+ // Unconditional branch to PC offset.
+ void b(int imm26);
+
+ // Conditional branch to PC offset.
+ void b(int imm19, Condition cond);
+
+ // Branch-link to label / pc offset.
+ void bl(Label* label);
+ void bl(int imm26);
+
+ // Compare and branch to label / pc offset if zero.
+ void cbz(const Register& rt, Label* label);
+ void cbz(const Register& rt, int imm19);
+
+ // Compare and branch to label / pc offset if not zero.
+ void cbnz(const Register& rt, Label* label);
+ void cbnz(const Register& rt, int imm19);
+
+ // Test bit and branch to label / pc offset if zero.
+ void tbz(const Register& rt, unsigned bit_pos, Label* label);
+ void tbz(const Register& rt, unsigned bit_pos, int imm14);
+
+ // Test bit and branch to label / pc offset if not zero.
+ void tbnz(const Register& rt, unsigned bit_pos, Label* label);
+ void tbnz(const Register& rt, unsigned bit_pos, int imm14);
+
+ // Address calculation instructions.
+ // Calculate a PC-relative address. Unlike for branches the offset in adr is
+ // unscaled (i.e. the result can be unaligned).
+ void adr(const Register& rd, Label* label);
+ void adr(const Register& rd, int imm21);
+
+ // Data Processing instructions.
+ // Add.
+ void add(const Register& rd,
+ const Register& rn,
+ const Operand& operand);
+
+ // Add and update status flags.
+ void adds(const Register& rd,
+ const Register& rn,
+ const Operand& operand);
+
+ // Compare negative.
+ void cmn(const Register& rn, const Operand& operand);
+
+ // Subtract.
+ void sub(const Register& rd,
+ const Register& rn,
+ const Operand& operand);
+
+ // Subtract and update status flags.
+ void subs(const Register& rd,
+ const Register& rn,
+ const Operand& operand);
+
+ // Compare.
+ void cmp(const Register& rn, const Operand& operand);
+
+ // Negate.
+ void neg(const Register& rd,
+ const Operand& operand);
+
+ // Negate and update status flags.
+ void negs(const Register& rd,
+ const Operand& operand);
+
+ // Add with carry bit.
+ void adc(const Register& rd,
+ const Register& rn,
+ const Operand& operand);
+
+ // Add with carry bit and update status flags.
+ void adcs(const Register& rd,
+ const Register& rn,
+ const Operand& operand);
+
+ // Subtract with carry bit.
+ void sbc(const Register& rd,
+ const Register& rn,
+ const Operand& operand);
+
+ // Subtract with carry bit and update status flags.
+ void sbcs(const Register& rd,
+ const Register& rn,
+ const Operand& operand);
+
+ // Negate with carry bit.
+ void ngc(const Register& rd,
+ const Operand& operand);
+
+ // Negate with carry bit and update status flags.
+ void ngcs(const Register& rd,
+ const Operand& operand);
+
+ // Logical instructions.
+ // Bitwise and (A & B).
+ void and_(const Register& rd,
+ const Register& rn,
+ const Operand& operand);
+
+ // Bitwise and (A & B) and update status flags.
+ void ands(const Register& rd,
+ const Register& rn,
+ const Operand& operand);
+
+ // Bit test, and set flags.
+ void tst(const Register& rn, const Operand& operand);
+
+ // Bit clear (A & ~B).
+ void bic(const Register& rd,
+ const Register& rn,
+ const Operand& operand);
+
+ // Bit clear (A & ~B) and update status flags.
+ void bics(const Register& rd,
+ const Register& rn,
+ const Operand& operand);
+
+ // Bitwise or (A | B).
+ void orr(const Register& rd, const Register& rn, const Operand& operand);
+
+ // Bitwise nor (A | ~B).
+ void orn(const Register& rd, const Register& rn, const Operand& operand);
+
+ // Bitwise eor/xor (A ^ B).
+ void eor(const Register& rd, const Register& rn, const Operand& operand);
+
+ // Bitwise enor/xnor (A ^ ~B).
+ void eon(const Register& rd, const Register& rn, const Operand& operand);
+
+ // Logical shift left variable.
+ void lslv(const Register& rd, const Register& rn, const Register& rm);
+
+ // Logical shift right variable.
+ void lsrv(const Register& rd, const Register& rn, const Register& rm);
+
+ // Arithmetic shift right variable.
+ void asrv(const Register& rd, const Register& rn, const Register& rm);
+
+ // Rotate right variable.
+ void rorv(const Register& rd, const Register& rn, const Register& rm);
+
+ // Bitfield instructions.
+ // Bitfield move.
+ void bfm(const Register& rd,
+ const Register& rn,
+ unsigned immr,
+ unsigned imms);
+
+ // Signed bitfield move.
+ void sbfm(const Register& rd,
+ const Register& rn,
+ unsigned immr,
+ unsigned imms);
+
+ // Unsigned bitfield move.
+ void ubfm(const Register& rd,
+ const Register& rn,
+ unsigned immr,
+ unsigned imms);
+
+ // Bfm aliases.
+ // Bitfield insert.
+ void bfi(const Register& rd,
+ const Register& rn,
+ unsigned lsb,
+ unsigned width) {
+ ASSERT(width >= 1);
+ ASSERT(lsb + width <= rn.SizeInBits());
+ bfm(rd, rn, (rd.SizeInBits() - lsb) & (rd.SizeInBits() - 1), width - 1);
+ }
+
+ // Bitfield extract and insert low.
+ void bfxil(const Register& rd,
+ const Register& rn,
+ unsigned lsb,
+ unsigned width) {
+ ASSERT(width >= 1);
+ ASSERT(lsb + width <= rn.SizeInBits());
+ bfm(rd, rn, lsb, lsb + width - 1);
+ }
+
+ // Sbfm aliases.
+ // Arithmetic shift right.
+ void asr(const Register& rd, const Register& rn, unsigned shift) {
+ ASSERT(shift < rd.SizeInBits());
+ sbfm(rd, rn, shift, rd.SizeInBits() - 1);
+ }
+
+ // Signed bitfield insert in zero.
+ void sbfiz(const Register& rd,
+ const Register& rn,
+ unsigned lsb,
+ unsigned width) {
+ ASSERT(width >= 1);
+ ASSERT(lsb + width <= rn.SizeInBits());
+ sbfm(rd, rn, (rd.SizeInBits() - lsb) & (rd.SizeInBits() - 1), width - 1);
+ }
+
+ // Signed bitfield extract.
+ void sbfx(const Register& rd,
+ const Register& rn,
+ unsigned lsb,
+ unsigned width) {
+ ASSERT(width >= 1);
+ ASSERT(lsb + width <= rn.SizeInBits());
+ sbfm(rd, rn, lsb, lsb + width - 1);
+ }
+
+ // Signed extend byte.
+ void sxtb(const Register& rd, const Register& rn) {
+ sbfm(rd, rn, 0, 7);
+ }
+
+ // Signed extend halfword.
+ void sxth(const Register& rd, const Register& rn) {
+ sbfm(rd, rn, 0, 15);
+ }
+
+ // Signed extend word.
+ void sxtw(const Register& rd, const Register& rn) {
+ sbfm(rd, rn, 0, 31);
+ }
+
+ // Ubfm aliases.
+ // Logical shift left.
+ void lsl(const Register& rd, const Register& rn, unsigned shift) {
+ unsigned reg_size = rd.SizeInBits();
+ ASSERT(shift < reg_size);
+ ubfm(rd, rn, (reg_size - shift) % reg_size, reg_size - shift - 1);
+ }
+
+ // Logical shift right.
+ void lsr(const Register& rd, const Register& rn, unsigned shift) {
+ ASSERT(shift < rd.SizeInBits());
+ ubfm(rd, rn, shift, rd.SizeInBits() - 1);
+ }
+
+ // Unsigned bitfield insert in zero.
+ void ubfiz(const Register& rd,
+ const Register& rn,
+ unsigned lsb,
+ unsigned width) {
+ ASSERT(width >= 1);
+ ASSERT(lsb + width <= rn.SizeInBits());
+ ubfm(rd, rn, (rd.SizeInBits() - lsb) & (rd.SizeInBits() - 1), width - 1);
+ }
+
+ // Unsigned bitfield extract.
+ void ubfx(const Register& rd,
+ const Register& rn,
+ unsigned lsb,
+ unsigned width) {
+ ASSERT(width >= 1);
+ ASSERT(lsb + width <= rn.SizeInBits());
+ ubfm(rd, rn, lsb, lsb + width - 1);
+ }
+
+ // Unsigned extend byte.
+ void uxtb(const Register& rd, const Register& rn) {
+ ubfm(rd, rn, 0, 7);
+ }
+
+ // Unsigned extend halfword.
+ void uxth(const Register& rd, const Register& rn) {
+ ubfm(rd, rn, 0, 15);
+ }
+
+ // Unsigned extend word.
+ void uxtw(const Register& rd, const Register& rn) {
+ ubfm(rd, rn, 0, 31);
+ }
+
+ // Extract.
+ void extr(const Register& rd,
+ const Register& rn,
+ const Register& rm,
+ unsigned lsb);
+
+ // Conditional select: rd = cond ? rn : rm.
+ void csel(const Register& rd,
+ const Register& rn,
+ const Register& rm,
+ Condition cond);
+
+ // Conditional select increment: rd = cond ? rn : rm + 1.
+ void csinc(const Register& rd,
+ const Register& rn,
+ const Register& rm,
+ Condition cond);
+
+ // Conditional select inversion: rd = cond ? rn : ~rm.
+ void csinv(const Register& rd,
+ const Register& rn,
+ const Register& rm,
+ Condition cond);
+
+ // Conditional select negation: rd = cond ? rn : -rm.
+ void csneg(const Register& rd,
+ const Register& rn,
+ const Register& rm,
+ Condition cond);
+
+ // Conditional set: rd = cond ? 1 : 0.
+ void cset(const Register& rd, Condition cond);
+
+ // Conditional set minus: rd = cond ? -1 : 0.
+ void csetm(const Register& rd, Condition cond);
+
+ // Conditional increment: rd = cond ? rn + 1 : rn.
+ void cinc(const Register& rd, const Register& rn, Condition cond);
+
+ // Conditional invert: rd = cond ? ~rn : rn.
+ void cinv(const Register& rd, const Register& rn, Condition cond);
+
+ // Conditional negate: rd = cond ? -rn : rn.
+ void cneg(const Register& rd, const Register& rn, Condition cond);
+
+ // Extr aliases.
+ void ror(const Register& rd, const Register& rs, unsigned shift) {
+ extr(rd, rs, rs, shift);
+ }
+
+ // Conditional comparison.
+ // Conditional compare negative.
+ void ccmn(const Register& rn,
+ const Operand& operand,
+ StatusFlags nzcv,
+ Condition cond);
+
+ // Conditional compare.
+ void ccmp(const Register& rn,
+ const Operand& operand,
+ StatusFlags nzcv,
+ Condition cond);
+
+ // Multiplication.
+ // 32 x 32 -> 32-bit and 64 x 64 -> 64-bit multiply.
+ void mul(const Register& rd, const Register& rn, const Register& rm);
+
+ // 32 + 32 x 32 -> 32-bit and 64 + 64 x 64 -> 64-bit multiply accumulate.
+ void madd(const Register& rd,
+ const Register& rn,
+ const Register& rm,
+ const Register& ra);
+
+ // -(32 x 32) -> 32-bit and -(64 x 64) -> 64-bit multiply.
+ void mneg(const Register& rd, const Register& rn, const Register& rm);
+
+ // 32 - 32 x 32 -> 32-bit and 64 - 64 x 64 -> 64-bit multiply subtract.
+ void msub(const Register& rd,
+ const Register& rn,
+ const Register& rm,
+ const Register& ra);
+
+ // 32 x 32 -> 64-bit multiply.
+ void smull(const Register& rd, const Register& rn, const Register& rm);
+
+ // Xd = bits<127:64> of Xn * Xm.
+ void smulh(const Register& rd, const Register& rn, const Register& rm);
+
+ // Signed 32 x 32 -> 64-bit multiply and accumulate.
+ void smaddl(const Register& rd,
+ const Register& rn,
+ const Register& rm,
+ const Register& ra);
+
+ // Unsigned 32 x 32 -> 64-bit multiply and accumulate.
+ void umaddl(const Register& rd,
+ const Register& rn,
+ const Register& rm,
+ const Register& ra);
+
+ // Signed 32 x 32 -> 64-bit multiply and subtract.
+ void smsubl(const Register& rd,
+ const Register& rn,
+ const Register& rm,
+ const Register& ra);
+
+ // Unsigned 32 x 32 -> 64-bit multiply and subtract.
+ void umsubl(const Register& rd,
+ const Register& rn,
+ const Register& rm,
+ const Register& ra);
+
+ // Signed integer divide.
+ void sdiv(const Register& rd, const Register& rn, const Register& rm);
+
+ // Unsigned integer divide.
+ void udiv(const Register& rd, const Register& rn, const Register& rm);
+
+ // Bit count, bit reverse and endian reverse.
+ void rbit(const Register& rd, const Register& rn);
+ void rev16(const Register& rd, const Register& rn);
+ void rev32(const Register& rd, const Register& rn);
+ void rev(const Register& rd, const Register& rn);
+ void clz(const Register& rd, const Register& rn);
+ void cls(const Register& rd, const Register& rn);
+
+ // Memory instructions.
+
+ // Load integer or FP register.
+ void ldr(const CPURegister& rt, const MemOperand& src);
+
+ // Store integer or FP register.
+ void str(const CPURegister& rt, const MemOperand& dst);
+
+ // Load word with sign extension.
+ void ldrsw(const Register& rt, const MemOperand& src);
+
+ // Load byte.
+ void ldrb(const Register& rt, const MemOperand& src);
+
+ // Store byte.
+ void strb(const Register& rt, const MemOperand& dst);
+
+ // Load byte with sign extension.
+ void ldrsb(const Register& rt, const MemOperand& src);
+
+ // Load half-word.
+ void ldrh(const Register& rt, const MemOperand& src);
+
+ // Store half-word.
+ void strh(const Register& rt, const MemOperand& dst);
+
+ // Load half-word with sign extension.
+ void ldrsh(const Register& rt, const MemOperand& src);
+
+ // Load integer or FP register pair.
+ void ldp(const CPURegister& rt, const CPURegister& rt2,
+ const MemOperand& src);
+
+ // Store integer or FP register pair.
+ void stp(const CPURegister& rt, const CPURegister& rt2,
+ const MemOperand& dst);
+
+ // Load word pair with sign extension.
+ void ldpsw(const Register& rt, const Register& rt2, const MemOperand& src);
+
+ // Load integer or FP register pair, non-temporal.
+ void ldnp(const CPURegister& rt, const CPURegister& rt2,
+ const MemOperand& src);
+
+ // Store integer or FP register pair, non-temporal.
+ void stnp(const CPURegister& rt, const CPURegister& rt2,
+ const MemOperand& dst);
+
+ // Load literal to register from a pc relative address.
+ void ldr_pcrel(const CPURegister& rt, int imm19);
+
+ // Load literal to register.
+ void ldr(const CPURegister& rt, const Immediate& imm);
+
+ // Move instructions. The default shift of -1 indicates that the move
+ // instruction will calculate an appropriate 16-bit immediate and left shift
+ // that is equal to the 64-bit immediate argument. If an explicit left shift
+ // is specified (0, 16, 32 or 48), the immediate must be a 16-bit value.
+ //
+ // For movk, an explicit shift can be used to indicate which half word should
+ // be overwritten, eg. movk(x0, 0, 0) will overwrite the least-significant
+ // half word with zero, whereas movk(x0, 0, 48) will overwrite the
+ // most-significant.
+
+ // Move and keep.
+ void movk(const Register& rd, uint64_t imm, int shift = -1) {
+ MoveWide(rd, imm, shift, MOVK);
+ }
+
+ // Move with non-zero.
+ void movn(const Register& rd, uint64_t imm, int shift = -1) {
+ MoveWide(rd, imm, shift, MOVN);
+ }
+
+ // Move with zero.
+ void movz(const Register& rd, uint64_t imm, int shift = -1) {
+ MoveWide(rd, imm, shift, MOVZ);
+ }
+
+ // Misc instructions.
+ // Monitor debug-mode breakpoint.
+ void brk(int code);
+
+ // Halting debug-mode breakpoint.
+ void hlt(int code);
+
+ // Move register to register.
+ void mov(const Register& rd, const Register& rn);
+
+ // Move NOT(operand) to register.
+ void mvn(const Register& rd, const Operand& operand);
+
+ // System instructions.
+ // Move to register from system register.
+ void mrs(const Register& rt, SystemRegister sysreg);
+
+ // Move from register to system register.
+ void msr(SystemRegister sysreg, const Register& rt);
+
+ // System hint.
+ void hint(SystemHint code);
+
+ // Data memory barrier
+ void dmb(BarrierDomain domain, BarrierType type);
+
+ // Data synchronization barrier
+ void dsb(BarrierDomain domain, BarrierType type);
+
+ // Instruction synchronization barrier
+ void isb();
+
+ // Alias for system instructions.
+ void nop() { hint(NOP); }
+
+ // Different nop operations are used by the code generator to detect certain
+ // states of the generated code.
+ enum NopMarkerTypes {
+ DEBUG_BREAK_NOP,
+ INTERRUPT_CODE_NOP,
+ ADR_FAR_NOP,
+ FIRST_NOP_MARKER = DEBUG_BREAK_NOP,
+ LAST_NOP_MARKER = ADR_FAR_NOP
+ };
+
+ void nop(NopMarkerTypes n) {
+ ASSERT((FIRST_NOP_MARKER <= n) && (n <= LAST_NOP_MARKER));
+ mov(Register::XRegFromCode(n), Register::XRegFromCode(n));
+ }
+
+ // FP instructions.
+ // Move immediate to FP register.
+ void fmov(FPRegister fd, double imm);
+ void fmov(FPRegister fd, float imm);
+
+ // Move FP register to register.
+ void fmov(Register rd, FPRegister fn);
+
+ // Move register to FP register.
+ void fmov(FPRegister fd, Register rn);
+
+ // Move FP register to FP register.
+ void fmov(FPRegister fd, FPRegister fn);
+
+ // FP add.
+ void fadd(const FPRegister& fd, const FPRegister& fn, const FPRegister& fm);
+
+ // FP subtract.
+ void fsub(const FPRegister& fd, const FPRegister& fn, const FPRegister& fm);
+
+ // FP multiply.
+ void fmul(const FPRegister& fd, const FPRegister& fn, const FPRegister& fm);
+
+ // FP fused multiply and add.
+ void fmadd(const FPRegister& fd,
+ const FPRegister& fn,
+ const FPRegister& fm,
+ const FPRegister& fa);
+
+ // FP fused multiply and subtract.
+ void fmsub(const FPRegister& fd,
+ const FPRegister& fn,
+ const FPRegister& fm,
+ const FPRegister& fa);
+
+ // FP fused multiply, add and negate.
+ void fnmadd(const FPRegister& fd,
+ const FPRegister& fn,
+ const FPRegister& fm,
+ const FPRegister& fa);
+
+ // FP fused multiply, subtract and negate.
+ void fnmsub(const FPRegister& fd,
+ const FPRegister& fn,
+ const FPRegister& fm,
+ const FPRegister& fa);
+
+ // FP divide.
+ void fdiv(const FPRegister& fd, const FPRegister& fn, const FPRegister& fm);
+
+ // FP maximum.
+ void fmax(const FPRegister& fd, const FPRegister& fn, const FPRegister& fm);
+
+ // FP minimum.
+ void fmin(const FPRegister& fd, const FPRegister& fn, const FPRegister& fm);
+
+ // FP maximum.
+ void fmaxnm(const FPRegister& fd, const FPRegister& fn, const FPRegister& fm);
+
+ // FP minimum.
+ void fminnm(const FPRegister& fd, const FPRegister& fn, const FPRegister& fm);
+
+ // FP absolute.
+ void fabs(const FPRegister& fd, const FPRegister& fn);
+
+ // FP negate.
+ void fneg(const FPRegister& fd, const FPRegister& fn);
+
+ // FP square root.
+ void fsqrt(const FPRegister& fd, const FPRegister& fn);
+
+ // FP round to integer (nearest with ties to away).
+ void frinta(const FPRegister& fd, const FPRegister& fn);
+
+ // FP round to integer (toward minus infinity).
+ void frintm(const FPRegister& fd, const FPRegister& fn);
+
+ // FP round to integer (nearest with ties to even).
+ void frintn(const FPRegister& fd, const FPRegister& fn);
+
+ // FP round to integer (towards zero.)
+ void frintz(const FPRegister& fd, const FPRegister& fn);
+
+ // FP compare registers.
+ void fcmp(const FPRegister& fn, const FPRegister& fm);
+
+ // FP compare immediate.
+ void fcmp(const FPRegister& fn, double value);
+
+ // FP conditional compare.
+ void fccmp(const FPRegister& fn,
+ const FPRegister& fm,
+ StatusFlags nzcv,
+ Condition cond);
+
+ // FP conditional select.
+ void fcsel(const FPRegister& fd,
+ const FPRegister& fn,
+ const FPRegister& fm,
+ Condition cond);
+
+ // Common FP Convert function
+ void FPConvertToInt(const Register& rd,
+ const FPRegister& fn,
+ FPIntegerConvertOp op);
+
+ // FP convert between single and double precision.
+ void fcvt(const FPRegister& fd, const FPRegister& fn);
+
+ // Convert FP to unsigned integer (nearest with ties to away).
+ void fcvtau(const Register& rd, const FPRegister& fn);
+
+ // Convert FP to signed integer (nearest with ties to away).
+ void fcvtas(const Register& rd, const FPRegister& fn);
+
+ // Convert FP to unsigned integer (round towards -infinity).
+ void fcvtmu(const Register& rd, const FPRegister& fn);
+
+ // Convert FP to signed integer (round towards -infinity).
+ void fcvtms(const Register& rd, const FPRegister& fn);
+
+ // Convert FP to unsigned integer (nearest with ties to even).
+ void fcvtnu(const Register& rd, const FPRegister& fn);
+
+ // Convert FP to signed integer (nearest with ties to even).
+ void fcvtns(const Register& rd, const FPRegister& fn);
+
+ // Convert FP to unsigned integer (round towards zero).
+ void fcvtzu(const Register& rd, const FPRegister& fn);
+
+ // Convert FP to signed integer (rounf towards zero).
+ void fcvtzs(const Register& rd, const FPRegister& fn);
+
+ // Convert signed integer or fixed point to FP.
+ void scvtf(const FPRegister& fd, const Register& rn, unsigned fbits = 0);
+
+ // Convert unsigned integer or fixed point to FP.
+ void ucvtf(const FPRegister& fd, const Register& rn, unsigned fbits = 0);
+
+ // Instruction functions used only for test, debug, and patching.
+ // Emit raw instructions in the instruction stream.
+ void dci(Instr raw_inst) { Emit(raw_inst); }
+
+ // Emit 8 bits of data in the instruction stream.
+ void dc8(uint8_t data) { EmitData(&data, sizeof(data)); }
+
+ // Emit 32 bits of data in the instruction stream.
+ void dc32(uint32_t data) { EmitData(&data, sizeof(data)); }
+
+ // Emit 64 bits of data in the instruction stream.
+ void dc64(uint64_t data) { EmitData(&data, sizeof(data)); }
+
+ // Copy a string into the instruction stream, including the terminating NULL
+ // character. The instruction pointer (pc_) is then aligned correctly for
+ // subsequent instructions.
+ void EmitStringData(const char * string) {
+ size_t len = strlen(string) + 1;
+ ASSERT(RoundUp(len, kInstructionSize) <= static_cast<size_t>(kGap));
+ EmitData(string, len);
+ // Pad with NULL characters until pc_ is aligned.
+ const char pad[] = {'\0', '\0', '\0', '\0'};
+ STATIC_ASSERT(sizeof(pad) == kInstructionSize);
+ byte* next_pc = AlignUp(pc_, kInstructionSize);
+ EmitData(&pad, next_pc - pc_);
+ }
+
+ // Pseudo-instructions ------------------------------------------------------
+
+ // Parameters are described in arm64/instructions-arm64.h.
+ void debug(const char* message, uint32_t code, Instr params = BREAK);
+
+ // Required by V8.
+ void dd(uint32_t data) { dc32(data); }
+ void db(uint8_t data) { dc8(data); }
+
+ // Code generation helpers --------------------------------------------------
+
+ unsigned num_pending_reloc_info() const { return num_pending_reloc_info_; }
+
+ Instruction* InstructionAt(int offset) const {
+ return reinterpret_cast<Instruction*>(buffer_ + offset);
+ }
+
+ ptrdiff_t InstructionOffset(Instruction* instr) const {
+ return reinterpret_cast<byte*>(instr) - buffer_;
+ }
+
+ // Register encoding.
+ static Instr Rd(CPURegister rd) {
+ ASSERT(rd.code() != kSPRegInternalCode);
+ return rd.code() << Rd_offset;
+ }
+
+ static Instr Rn(CPURegister rn) {
+ ASSERT(rn.code() != kSPRegInternalCode);
+ return rn.code() << Rn_offset;
+ }
+
+ static Instr Rm(CPURegister rm) {
+ ASSERT(rm.code() != kSPRegInternalCode);
+ return rm.code() << Rm_offset;
+ }
+
+ static Instr Ra(CPURegister ra) {
+ ASSERT(ra.code() != kSPRegInternalCode);
+ return ra.code() << Ra_offset;
+ }
+
+ static Instr Rt(CPURegister rt) {
+ ASSERT(rt.code() != kSPRegInternalCode);
+ return rt.code() << Rt_offset;
+ }
+
+ static Instr Rt2(CPURegister rt2) {
+ ASSERT(rt2.code() != kSPRegInternalCode);
+ return rt2.code() << Rt2_offset;
+ }
+
+ // These encoding functions allow the stack pointer to be encoded, and
+ // disallow the zero register.
+ static Instr RdSP(Register rd) {
+ ASSERT(!rd.IsZero());
+ return (rd.code() & kRegCodeMask) << Rd_offset;
+ }
+
+ static Instr RnSP(Register rn) {
+ ASSERT(!rn.IsZero());
+ return (rn.code() & kRegCodeMask) << Rn_offset;
+ }
+
+ // Flags encoding.
+ inline static Instr Flags(FlagsUpdate S);
+ inline static Instr Cond(Condition cond);
+
+ // PC-relative address encoding.
+ inline static Instr ImmPCRelAddress(int imm21);
+
+ // Branch encoding.
+ inline static Instr ImmUncondBranch(int imm26);
+ inline static Instr ImmCondBranch(int imm19);
+ inline static Instr ImmCmpBranch(int imm19);
+ inline static Instr ImmTestBranch(int imm14);
+ inline static Instr ImmTestBranchBit(unsigned bit_pos);
+
+ // Data Processing encoding.
+ inline static Instr SF(Register rd);
+ inline static Instr ImmAddSub(int64_t imm);
+ inline static Instr ImmS(unsigned imms, unsigned reg_size);
+ inline static Instr ImmR(unsigned immr, unsigned reg_size);
+ inline static Instr ImmSetBits(unsigned imms, unsigned reg_size);
+ inline static Instr ImmRotate(unsigned immr, unsigned reg_size);
+ inline static Instr ImmLLiteral(int imm19);
+ inline static Instr BitN(unsigned bitn, unsigned reg_size);
+ inline static Instr ShiftDP(Shift shift);
+ inline static Instr ImmDPShift(unsigned amount);
+ inline static Instr ExtendMode(Extend extend);
+ inline static Instr ImmExtendShift(unsigned left_shift);
+ inline static Instr ImmCondCmp(unsigned imm);
+ inline static Instr Nzcv(StatusFlags nzcv);
+
+ static bool IsImmAddSub(int64_t immediate);
+ static bool IsImmLogical(uint64_t value,
+ unsigned width,
+ unsigned* n,
+ unsigned* imm_s,
+ unsigned* imm_r);
+
+ // MemOperand offset encoding.
+ inline static Instr ImmLSUnsigned(int imm12);
+ inline static Instr ImmLS(int imm9);
+ inline static Instr ImmLSPair(int imm7, LSDataSize size);
+ inline static Instr ImmShiftLS(unsigned shift_amount);
+ inline static Instr ImmException(int imm16);
+ inline static Instr ImmSystemRegister(int imm15);
+ inline static Instr ImmHint(int imm7);
+ inline static Instr ImmBarrierDomain(int imm2);
+ inline static Instr ImmBarrierType(int imm2);
+ inline static LSDataSize CalcLSDataSize(LoadStoreOp op);
+
+ // Move immediates encoding.
+ inline static Instr ImmMoveWide(uint64_t imm);
+ inline static Instr ShiftMoveWide(int64_t shift);
+
+ // FP Immediates.
+ static Instr ImmFP32(float imm);
+ static Instr ImmFP64(double imm);
+ inline static Instr FPScale(unsigned scale);
+
+ // FP register type.
+ inline static Instr FPType(FPRegister fd);
+
+ // Class for scoping postponing the constant pool generation.
+ class BlockConstPoolScope {
+ public:
+ explicit BlockConstPoolScope(Assembler* assem) : assem_(assem) {
+ assem_->StartBlockConstPool();
+ }
+ ~BlockConstPoolScope() {
+ assem_->EndBlockConstPool();
+ }
+
+ private:
+ Assembler* assem_;
+
+ DISALLOW_IMPLICIT_CONSTRUCTORS(BlockConstPoolScope);
+ };
+
+ // Check if is time to emit a constant pool.
+ void CheckConstPool(bool force_emit, bool require_jump);
+
+ // Allocate a constant pool of the correct size for the generated code.
+ Handle<ConstantPoolArray> NewConstantPool(Isolate* isolate);
+
+ // Generate the constant pool for the generated code.
+ void PopulateConstantPool(ConstantPoolArray* constant_pool);
+
+ // Returns true if we should emit a veneer as soon as possible for a branch
+ // which can at most reach to specified pc.
+ bool ShouldEmitVeneer(int max_reachable_pc,
+ int margin = kVeneerDistanceMargin);
+ bool ShouldEmitVeneers(int margin = kVeneerDistanceMargin) {
+ return ShouldEmitVeneer(unresolved_branches_first_limit(), margin);
+ }
+
+ // The maximum code size generated for a veneer. Currently one branch
+ // instruction. This is for code size checking purposes, and can be extended
+ // in the future for example if we decide to add nops between the veneers.
+ static const int kMaxVeneerCodeSize = 1 * kInstructionSize;
+
+ void RecordVeneerPool(int location_offset, int size);
+ // Emits veneers for branches that are approaching their maximum range.
+ // If need_protection is true, the veneers are protected by a branch jumping
+ // over the code.
+ void EmitVeneers(bool force_emit, bool need_protection,
+ int margin = kVeneerDistanceMargin);
+ void EmitVeneersGuard() { EmitPoolGuard(); }
+ // Checks whether veneers need to be emitted at this point.
+ // If force_emit is set, a veneer is generated for *all* unresolved branches.
+ void CheckVeneerPool(bool force_emit, bool require_jump,
+ int margin = kVeneerDistanceMargin);
+
+ class BlockPoolsScope {
+ public:
+ explicit BlockPoolsScope(Assembler* assem) : assem_(assem) {
+ assem_->StartBlockPools();
+ }
+ ~BlockPoolsScope() {
+ assem_->EndBlockPools();
+ }
+
+ private:
+ Assembler* assem_;
+
+ DISALLOW_IMPLICIT_CONSTRUCTORS(BlockPoolsScope);
+ };
+
+ protected:
+ inline const Register& AppropriateZeroRegFor(const CPURegister& reg) const;
+
+ void LoadStore(const CPURegister& rt,
+ const MemOperand& addr,
+ LoadStoreOp op);
+ static bool IsImmLSUnscaled(ptrdiff_t offset);
+ static bool IsImmLSScaled(ptrdiff_t offset, LSDataSize size);
+
+ void Logical(const Register& rd,
+ const Register& rn,
+ const Operand& operand,
+ LogicalOp op);
+ void LogicalImmediate(const Register& rd,
+ const Register& rn,
+ unsigned n,
+ unsigned imm_s,
+ unsigned imm_r,
+ LogicalOp op);
+
+ void ConditionalCompare(const Register& rn,
+ const Operand& operand,
+ StatusFlags nzcv,
+ Condition cond,
+ ConditionalCompareOp op);
+ static bool IsImmConditionalCompare(int64_t immediate);
+
+ void AddSubWithCarry(const Register& rd,
+ const Register& rn,
+ const Operand& operand,
+ FlagsUpdate S,
+ AddSubWithCarryOp op);
+
+ // Functions for emulating operands not directly supported by the instruction
+ // set.
+ void EmitShift(const Register& rd,
+ const Register& rn,
+ Shift shift,
+ unsigned amount);
+ void EmitExtendShift(const Register& rd,
+ const Register& rn,
+ Extend extend,
+ unsigned left_shift);
+
+ void AddSub(const Register& rd,
+ const Register& rn,
+ const Operand& operand,
+ FlagsUpdate S,
+ AddSubOp op);
+
+ static bool IsImmFP32(float imm);
+ static bool IsImmFP64(double imm);
+
+ // Find an appropriate LoadStoreOp or LoadStorePairOp for the specified
+ // registers. Only simple loads are supported; sign- and zero-extension (such
+ // as in LDPSW_x or LDRB_w) are not supported.
+ static inline LoadStoreOp LoadOpFor(const CPURegister& rt);
+ static inline LoadStorePairOp LoadPairOpFor(const CPURegister& rt,
+ const CPURegister& rt2);
+ static inline LoadStoreOp StoreOpFor(const CPURegister& rt);
+ static inline LoadStorePairOp StorePairOpFor(const CPURegister& rt,
+ const CPURegister& rt2);
+ static inline LoadStorePairNonTemporalOp LoadPairNonTemporalOpFor(
+ const CPURegister& rt, const CPURegister& rt2);
+ static inline LoadStorePairNonTemporalOp StorePairNonTemporalOpFor(
+ const CPURegister& rt, const CPURegister& rt2);
+ static inline LoadLiteralOp LoadLiteralOpFor(const CPURegister& rt);
+
+ // Remove the specified branch from the unbound label link chain.
+ // If available, a veneer for this label can be used for other branches in the
+ // chain if the link chain cannot be fixed up without this branch.
+ void RemoveBranchFromLabelLinkChain(Instruction* branch,
+ Label* label,
+ Instruction* label_veneer = NULL);
+
+ private:
+ // Instruction helpers.
+ void MoveWide(const Register& rd,
+ uint64_t imm,
+ int shift,
+ MoveWideImmediateOp mov_op);
+ void DataProcShiftedRegister(const Register& rd,
+ const Register& rn,
+ const Operand& operand,
+ FlagsUpdate S,
+ Instr op);
+ void DataProcExtendedRegister(const Register& rd,
+ const Register& rn,
+ const Operand& operand,
+ FlagsUpdate S,
+ Instr op);
+ void LoadStorePair(const CPURegister& rt,
+ const CPURegister& rt2,
+ const MemOperand& addr,
+ LoadStorePairOp op);
+ void LoadStorePairNonTemporal(const CPURegister& rt,
+ const CPURegister& rt2,
+ const MemOperand& addr,
+ LoadStorePairNonTemporalOp op);
+ void ConditionalSelect(const Register& rd,
+ const Register& rn,
+ const Register& rm,
+ Condition cond,
+ ConditionalSelectOp op);
+ void DataProcessing1Source(const Register& rd,
+ const Register& rn,
+ DataProcessing1SourceOp op);
+ void DataProcessing3Source(const Register& rd,
+ const Register& rn,
+ const Register& rm,
+ const Register& ra,
+ DataProcessing3SourceOp op);
+ void FPDataProcessing1Source(const FPRegister& fd,
+ const FPRegister& fn,
+ FPDataProcessing1SourceOp op);
+ void FPDataProcessing2Source(const FPRegister& fd,
+ const FPRegister& fn,
+ const FPRegister& fm,
+ FPDataProcessing2SourceOp op);
+ void FPDataProcessing3Source(const FPRegister& fd,
+ const FPRegister& fn,
+ const FPRegister& fm,
+ const FPRegister& fa,
+ FPDataProcessing3SourceOp op);
+
+ // Label helpers.
+
+ // Return an offset for a label-referencing instruction, typically a branch.
+ int LinkAndGetByteOffsetTo(Label* label);
+
+ // This is the same as LinkAndGetByteOffsetTo, but return an offset
+ // suitable for fields that take instruction offsets.
+ inline int LinkAndGetInstructionOffsetTo(Label* label);
+
+ static const int kStartOfLabelLinkChain = 0;
+
+ // Verify that a label's link chain is intact.
+ void CheckLabelLinkChain(Label const * label);
+
+ void RecordLiteral(int64_t imm, unsigned size);
+
+ // Postpone the generation of the constant pool for the specified number of
+ // instructions.
+ void BlockConstPoolFor(int instructions);
+
+ // Emit the instruction at pc_.
+ void Emit(Instr instruction) {
+ STATIC_ASSERT(sizeof(*pc_) == 1);
+ STATIC_ASSERT(sizeof(instruction) == kInstructionSize);
+ ASSERT((pc_ + sizeof(instruction)) <= (buffer_ + buffer_size_));
+
+ memcpy(pc_, &instruction, sizeof(instruction));
+ pc_ += sizeof(instruction);
+ CheckBuffer();
+ }
+
+ // Emit data inline in the instruction stream.
+ void EmitData(void const * data, unsigned size) {
+ ASSERT(sizeof(*pc_) == 1);
+ ASSERT((pc_ + size) <= (buffer_ + buffer_size_));
+
+ // TODO(all): Somehow register we have some data here. Then we can
+ // disassemble it correctly.
+ memcpy(pc_, data, size);
+ pc_ += size;
+ CheckBuffer();
+ }
+
+ void GrowBuffer();
+ void CheckBufferSpace();
+ void CheckBuffer();
+
+ // Pc offset of the next constant pool check.
+ int next_constant_pool_check_;
+
+ // Constant pool generation
+ // Pools are emitted in the instruction stream, preferably after unconditional
+ // jumps or after returns from functions (in dead code locations).
+ // If a long code sequence does not contain unconditional jumps, it is
+ // necessary to emit the constant pool before the pool gets too far from the
+ // location it is accessed from. In this case, we emit a jump over the emitted
+ // constant pool.
+ // Constants in the pool may be addresses of functions that gets relocated;
+ // if so, a relocation info entry is associated to the constant pool entry.
+
+ // Repeated checking whether the constant pool should be emitted is rather
+ // expensive. By default we only check again once a number of instructions
+ // has been generated. That also means that the sizing of the buffers is not
+ // an exact science, and that we rely on some slop to not overrun buffers.
+ static const int kCheckConstPoolIntervalInst = 128;
+ static const int kCheckConstPoolInterval =
+ kCheckConstPoolIntervalInst * kInstructionSize;
+
+ // Constants in pools are accessed via pc relative addressing, which can
+ // reach +/-4KB thereby defining a maximum distance between the instruction
+ // and the accessed constant.
+ static const int kMaxDistToConstPool = 4 * KB;
+ static const int kMaxNumPendingRelocInfo =
+ kMaxDistToConstPool / kInstructionSize;
+
+
+ // Average distance beetween a constant pool and the first instruction
+ // accessing the constant pool. Longer distance should result in less I-cache
+ // pollution.
+ // In practice the distance will be smaller since constant pool emission is
+ // forced after function return and sometimes after unconditional branches.
+ static const int kAvgDistToConstPool =
+ kMaxDistToConstPool - kCheckConstPoolInterval;
+
+ // Emission of the constant pool may be blocked in some code sequences.
+ int const_pool_blocked_nesting_; // Block emission if this is not zero.
+ int no_const_pool_before_; // Block emission before this pc offset.
+
+ // Keep track of the first instruction requiring a constant pool entry
+ // since the previous constant pool was emitted.
+ int first_const_pool_use_;
+
+ // Emission of the veneer pools may be blocked in some code sequences.
+ int veneer_pool_blocked_nesting_; // Block emission if this is not zero.
+
+ // Relocation info generation
+ // Each relocation is encoded as a variable size value
+ static const int kMaxRelocSize = RelocInfoWriter::kMaxSize;
+ RelocInfoWriter reloc_info_writer;
+
+ // Relocation info records are also used during code generation as temporary
+ // containers for constants and code target addresses until they are emitted
+ // to the constant pool. These pending relocation info records are temporarily
+ // stored in a separate buffer until a constant pool is emitted.
+ // If every instruction in a long sequence is accessing the pool, we need one
+ // pending relocation entry per instruction.
+
+ // the buffer of pending relocation info
+ RelocInfo pending_reloc_info_[kMaxNumPendingRelocInfo];
+ // number of pending reloc info entries in the buffer
+ int num_pending_reloc_info_;
+
+ // Relocation for a type-recording IC has the AST id added to it. This
+ // member variable is a way to pass the information from the call site to
+ // the relocation info.
+ TypeFeedbackId recorded_ast_id_;
+
+ inline TypeFeedbackId RecordedAstId();
+ inline void ClearRecordedAstId();
+
+ protected:
+ // Record the AST id of the CallIC being compiled, so that it can be placed
+ // in the relocation information.
+ void SetRecordedAstId(TypeFeedbackId ast_id) {
+ ASSERT(recorded_ast_id_.IsNone());
+ recorded_ast_id_ = ast_id;
+ }
+
+ // Code generation
+ // The relocation writer's position is at least kGap bytes below the end of
+ // the generated instructions. This is so that multi-instruction sequences do
+ // not have to check for overflow. The same is true for writes of large
+ // relocation info entries, and debug strings encoded in the instruction
+ // stream.
+ static const int kGap = 128;
+
+ public:
+ class FarBranchInfo {
+ public:
+ FarBranchInfo(int offset, Label* label)
+ : pc_offset_(offset), label_(label) {}
+ // Offset of the branch in the code generation buffer.
+ int pc_offset_;
+ // The label branched to.
+ Label* label_;
+ };
+
+ protected:
+ // Information about unresolved (forward) branches.
+ // The Assembler is only allowed to delete out-of-date information from here
+ // after a label is bound. The MacroAssembler uses this information to
+ // generate veneers.
+ //
+ // The second member gives information about the unresolved branch. The first
+ // member of the pair is the maximum offset that the branch can reach in the
+ // buffer. The map is sorted according to this reachable offset, allowing to
+ // easily check when veneers need to be emitted.
+ // Note that the maximum reachable offset (first member of the pairs) should
+ // always be positive but has the same type as the return value for
+ // pc_offset() for convenience.
+ std::multimap<int, FarBranchInfo> unresolved_branches_;
+
+ // We generate a veneer for a branch if we reach within this distance of the
+ // limit of the range.
+ static const int kVeneerDistanceMargin = 1 * KB;
+ // The factor of 2 is a finger in the air guess. With a default margin of
+ // 1KB, that leaves us an addional 256 instructions to avoid generating a
+ // protective branch.
+ static const int kVeneerNoProtectionFactor = 2;
+ static const int kVeneerDistanceCheckMargin =
+ kVeneerNoProtectionFactor * kVeneerDistanceMargin;
+ int unresolved_branches_first_limit() const {
+ ASSERT(!unresolved_branches_.empty());
+ return unresolved_branches_.begin()->first;
+ }
+ // This is similar to next_constant_pool_check_ and helps reduce the overhead
+ // of checking for veneer pools.
+ // It is maintained to the closest unresolved branch limit minus the maximum
+ // veneer margin (or kMaxInt if there are no unresolved branches).
+ int next_veneer_pool_check_;
+
+ private:
+ // If a veneer is emitted for a branch instruction, that instruction must be
+ // removed from the associated label's link chain so that the assembler does
+ // not later attempt (likely unsuccessfully) to patch it to branch directly to
+ // the label.
+ void DeleteUnresolvedBranchInfoForLabel(Label* label);
+ // This function deletes the information related to the label by traversing
+ // the label chain, and for each PC-relative instruction in the chain checking
+ // if pending unresolved information exists. Its complexity is proportional to
+ // the length of the label chain.
+ void DeleteUnresolvedBranchInfoForLabelTraverse(Label* label);
+
+ private:
+ PositionsRecorder positions_recorder_;
+ friend class PositionsRecorder;
+ friend class EnsureSpace;
+};
+
+class PatchingAssembler : public Assembler {
+ public:
+ // Create an Assembler with a buffer starting at 'start'.
+ // The buffer size is
+ // size of instructions to patch + kGap
+ // Where kGap is the distance from which the Assembler tries to grow the
+ // buffer.
+ // If more or fewer instructions than expected are generated or if some
+ // relocation information takes space in the buffer, the PatchingAssembler
+ // will crash trying to grow the buffer.
+ PatchingAssembler(Instruction* start, unsigned count)
+ : Assembler(NULL,
+ reinterpret_cast<byte*>(start),
+ count * kInstructionSize + kGap) {
+ StartBlockPools();
+ }
+
+ PatchingAssembler(byte* start, unsigned count)
+ : Assembler(NULL, start, count * kInstructionSize + kGap) {
+ // Block constant pool emission.
+ StartBlockPools();
+ }
+
+ ~PatchingAssembler() {
+ // Const pool should still be blocked.
+ ASSERT(is_const_pool_blocked());
+ EndBlockPools();
+ // Verify we have generated the number of instruction we expected.
+ ASSERT((pc_offset() + kGap) == buffer_size_);
+ // Verify no relocation information has been emitted.
+ ASSERT(num_pending_reloc_info() == 0);
+ // Flush the Instruction cache.
+ size_t length = buffer_size_ - kGap;
+ CPU::FlushICache(buffer_, length);
+ }
+
+ static const int kMovInt64NInstrs = 4;
+ void MovInt64(const Register& rd, int64_t imm);
+
+ // See definition of PatchAdrFar() for details.
+ static const int kAdrFarPatchableNNops = kMovInt64NInstrs - 1;
+ static const int kAdrFarPatchableNInstrs = kAdrFarPatchableNNops + 3;
+ void PatchAdrFar(Instruction* target);
+};
+
+
+class EnsureSpace BASE_EMBEDDED {
+ public:
+ explicit EnsureSpace(Assembler* assembler) {
+ assembler->CheckBufferSpace();
+ }
+};
+
+} } // namespace v8::internal
+
+#endif // V8_ARM64_ASSEMBLER_ARM64_H_
diff --git a/chromium/v8/src/arm64/builtins-arm64.cc b/chromium/v8/src/arm64/builtins-arm64.cc
new file mode 100644
index 00000000000..9dc7221c304
--- /dev/null
+++ b/chromium/v8/src/arm64/builtins-arm64.cc
@@ -0,0 +1,1565 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#if V8_TARGET_ARCH_ARM64
+
+#include "src/codegen.h"
+#include "src/debug.h"
+#include "src/deoptimizer.h"
+#include "src/full-codegen.h"
+#include "src/runtime.h"
+#include "src/stub-cache.h"
+
+namespace v8 {
+namespace internal {
+
+
+#define __ ACCESS_MASM(masm)
+
+
+// Load the built-in Array function from the current context.
+static void GenerateLoadArrayFunction(MacroAssembler* masm, Register result) {
+ // Load the native context.
+ __ Ldr(result, GlobalObjectMemOperand());
+ __ Ldr(result,
+ FieldMemOperand(result, GlobalObject::kNativeContextOffset));
+ // Load the InternalArray function from the native context.
+ __ Ldr(result,
+ MemOperand(result,
+ Context::SlotOffset(Context::ARRAY_FUNCTION_INDEX)));
+}
+
+
+// Load the built-in InternalArray function from the current context.
+static void GenerateLoadInternalArrayFunction(MacroAssembler* masm,
+ Register result) {
+ // Load the native context.
+ __ Ldr(result, GlobalObjectMemOperand());
+ __ Ldr(result,
+ FieldMemOperand(result, GlobalObject::kNativeContextOffset));
+ // Load the InternalArray function from the native context.
+ __ Ldr(result, ContextMemOperand(result,
+ Context::INTERNAL_ARRAY_FUNCTION_INDEX));
+}
+
+
+void Builtins::Generate_Adaptor(MacroAssembler* masm,
+ CFunctionId id,
+ BuiltinExtraArguments extra_args) {
+ // ----------- S t a t e -------------
+ // -- x0 : number of arguments excluding receiver
+ // -- x1 : called function (only guaranteed when
+ // extra_args requires it)
+ // -- cp : context
+ // -- sp[0] : last argument
+ // -- ...
+ // -- sp[4 * (argc - 1)] : first argument (argc == x0)
+ // -- sp[4 * argc] : receiver
+ // -----------------------------------
+
+ // Insert extra arguments.
+ int num_extra_args = 0;
+ if (extra_args == NEEDS_CALLED_FUNCTION) {
+ num_extra_args = 1;
+ __ Push(x1);
+ } else {
+ ASSERT(extra_args == NO_EXTRA_ARGUMENTS);
+ }
+
+ // JumpToExternalReference expects x0 to contain the number of arguments
+ // including the receiver and the extra arguments.
+ __ Add(x0, x0, num_extra_args + 1);
+ __ JumpToExternalReference(ExternalReference(id, masm->isolate()));
+}
+
+
+void Builtins::Generate_InternalArrayCode(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- x0 : number of arguments
+ // -- lr : return address
+ // -- sp[...]: constructor arguments
+ // -----------------------------------
+ ASM_LOCATION("Builtins::Generate_InternalArrayCode");
+ Label generic_array_code;
+
+ // Get the InternalArray function.
+ GenerateLoadInternalArrayFunction(masm, x1);
+
+ if (FLAG_debug_code) {
+ // Initial map for the builtin InternalArray functions should be maps.
+ __ Ldr(x10, FieldMemOperand(x1, JSFunction::kPrototypeOrInitialMapOffset));
+ __ Tst(x10, kSmiTagMask);
+ __ Assert(ne, kUnexpectedInitialMapForInternalArrayFunction);
+ __ CompareObjectType(x10, x11, x12, MAP_TYPE);
+ __ Assert(eq, kUnexpectedInitialMapForInternalArrayFunction);
+ }
+
+ // Run the native code for the InternalArray function called as a normal
+ // function.
+ InternalArrayConstructorStub stub(masm->isolate());
+ __ TailCallStub(&stub);
+}
+
+
+void Builtins::Generate_ArrayCode(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- x0 : number of arguments
+ // -- lr : return address
+ // -- sp[...]: constructor arguments
+ // -----------------------------------
+ ASM_LOCATION("Builtins::Generate_ArrayCode");
+ Label generic_array_code, one_or_more_arguments, two_or_more_arguments;
+
+ // Get the Array function.
+ GenerateLoadArrayFunction(masm, x1);
+
+ if (FLAG_debug_code) {
+ // Initial map for the builtin Array functions should be maps.
+ __ Ldr(x10, FieldMemOperand(x1, JSFunction::kPrototypeOrInitialMapOffset));
+ __ Tst(x10, kSmiTagMask);
+ __ Assert(ne, kUnexpectedInitialMapForArrayFunction);
+ __ CompareObjectType(x10, x11, x12, MAP_TYPE);
+ __ Assert(eq, kUnexpectedInitialMapForArrayFunction);
+ }
+
+ // Run the native code for the Array function called as a normal function.
+ __ LoadRoot(x2, Heap::kUndefinedValueRootIndex);
+ ArrayConstructorStub stub(masm->isolate());
+ __ TailCallStub(&stub);
+}
+
+
+void Builtins::Generate_StringConstructCode(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- x0 : number of arguments
+ // -- x1 : constructor function
+ // -- lr : return address
+ // -- sp[(argc - n - 1) * 8] : arg[n] (zero based)
+ // -- sp[argc * 8] : receiver
+ // -----------------------------------
+ ASM_LOCATION("Builtins::Generate_StringConstructCode");
+ Counters* counters = masm->isolate()->counters();
+ __ IncrementCounter(counters->string_ctor_calls(), 1, x10, x11);
+
+ Register argc = x0;
+ Register function = x1;
+ if (FLAG_debug_code) {
+ __ LoadGlobalFunction(Context::STRING_FUNCTION_INDEX, x10);
+ __ Cmp(function, x10);
+ __ Assert(eq, kUnexpectedStringFunction);
+ }
+
+ // Load the first arguments in x0 and get rid of the rest.
+ Label no_arguments;
+ __ Cbz(argc, &no_arguments);
+ // First args = sp[(argc - 1) * 8].
+ __ Sub(argc, argc, 1);
+ __ Claim(argc, kXRegSize);
+ // jssp now point to args[0], load and drop args[0] + receiver.
+ Register arg = argc;
+ __ Ldr(arg, MemOperand(jssp, 2 * kPointerSize, PostIndex));
+ argc = NoReg;
+
+ Register argument = x2;
+ Label not_cached, argument_is_string;
+ __ LookupNumberStringCache(arg, // Input.
+ argument, // Result.
+ x10, // Scratch.
+ x11, // Scratch.
+ x12, // Scratch.
+ &not_cached);
+ __ IncrementCounter(counters->string_ctor_cached_number(), 1, x10, x11);
+ __ Bind(&argument_is_string);
+
+ // ----------- S t a t e -------------
+ // -- x2 : argument converted to string
+ // -- x1 : constructor function
+ // -- lr : return address
+ // -----------------------------------
+
+ Label gc_required;
+ Register new_obj = x0;
+ __ Allocate(JSValue::kSize, new_obj, x10, x11, &gc_required, TAG_OBJECT);
+
+ // Initialize the String object.
+ Register map = x3;
+ __ LoadGlobalFunctionInitialMap(function, map, x10);
+ if (FLAG_debug_code) {
+ __ Ldrb(x4, FieldMemOperand(map, Map::kInstanceSizeOffset));
+ __ Cmp(x4, JSValue::kSize >> kPointerSizeLog2);
+ __ Assert(eq, kUnexpectedStringWrapperInstanceSize);
+ __ Ldrb(x4, FieldMemOperand(map, Map::kUnusedPropertyFieldsOffset));
+ __ Cmp(x4, 0);
+ __ Assert(eq, kUnexpectedUnusedPropertiesOfStringWrapper);
+ }
+ __ Str(map, FieldMemOperand(new_obj, HeapObject::kMapOffset));
+
+ Register empty = x3;
+ __ LoadRoot(empty, Heap::kEmptyFixedArrayRootIndex);
+ __ Str(empty, FieldMemOperand(new_obj, JSObject::kPropertiesOffset));
+ __ Str(empty, FieldMemOperand(new_obj, JSObject::kElementsOffset));
+
+ __ Str(argument, FieldMemOperand(new_obj, JSValue::kValueOffset));
+
+ // Ensure the object is fully initialized.
+ STATIC_ASSERT(JSValue::kSize == (4 * kPointerSize));
+
+ __ Ret();
+
+ // The argument was not found in the number to string cache. Check
+ // if it's a string already before calling the conversion builtin.
+ Label convert_argument;
+ __ Bind(&not_cached);
+ __ JumpIfSmi(arg, &convert_argument);
+
+ // Is it a String?
+ __ Ldr(x10, FieldMemOperand(x0, HeapObject::kMapOffset));
+ __ Ldrb(x11, FieldMemOperand(x10, Map::kInstanceTypeOffset));
+ __ Tbnz(x11, MaskToBit(kIsNotStringMask), &convert_argument);
+ __ Mov(argument, arg);
+ __ IncrementCounter(counters->string_ctor_string_value(), 1, x10, x11);
+ __ B(&argument_is_string);
+
+ // Invoke the conversion builtin and put the result into x2.
+ __ Bind(&convert_argument);
+ __ Push(function); // Preserve the function.
+ __ IncrementCounter(counters->string_ctor_conversions(), 1, x10, x11);
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ Push(arg);
+ __ InvokeBuiltin(Builtins::TO_STRING, CALL_FUNCTION);
+ }
+ __ Pop(function);
+ __ Mov(argument, x0);
+ __ B(&argument_is_string);
+
+ // Load the empty string into x2, remove the receiver from the
+ // stack, and jump back to the case where the argument is a string.
+ __ Bind(&no_arguments);
+ __ LoadRoot(argument, Heap::kempty_stringRootIndex);
+ __ Drop(1);
+ __ B(&argument_is_string);
+
+ // At this point the argument is already a string. Call runtime to create a
+ // string wrapper.
+ __ Bind(&gc_required);
+ __ IncrementCounter(counters->string_ctor_gc_required(), 1, x10, x11);
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ Push(argument);
+ __ CallRuntime(Runtime::kNewStringWrapper, 1);
+ }
+ __ Ret();
+}
+
+
+static void CallRuntimePassFunction(MacroAssembler* masm,
+ Runtime::FunctionId function_id) {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ // - Push a copy of the function onto the stack.
+ // - Push another copy as a parameter to the runtime call.
+ __ Push(x1, x1);
+
+ __ CallRuntime(function_id, 1);
+
+ // - Restore receiver.
+ __ Pop(x1);
+}
+
+
+static void GenerateTailCallToSharedCode(MacroAssembler* masm) {
+ __ Ldr(x2, FieldMemOperand(x1, JSFunction::kSharedFunctionInfoOffset));
+ __ Ldr(x2, FieldMemOperand(x2, SharedFunctionInfo::kCodeOffset));
+ __ Add(x2, x2, Code::kHeaderSize - kHeapObjectTag);
+ __ Br(x2);
+}
+
+
+static void GenerateTailCallToReturnedCode(MacroAssembler* masm) {
+ __ Add(x0, x0, Code::kHeaderSize - kHeapObjectTag);
+ __ Br(x0);
+}
+
+
+void Builtins::Generate_InOptimizationQueue(MacroAssembler* masm) {
+ // Checking whether the queued function is ready for install is optional,
+ // since we come across interrupts and stack checks elsewhere. However, not
+ // checking may delay installing ready functions, and always checking would be
+ // quite expensive. A good compromise is to first check against stack limit as
+ // a cue for an interrupt signal.
+ Label ok;
+ __ CompareRoot(masm->StackPointer(), Heap::kStackLimitRootIndex);
+ __ B(hs, &ok);
+
+ CallRuntimePassFunction(masm, Runtime::kHiddenTryInstallOptimizedCode);
+ GenerateTailCallToReturnedCode(masm);
+
+ __ Bind(&ok);
+ GenerateTailCallToSharedCode(masm);
+}
+
+
+static void Generate_JSConstructStubHelper(MacroAssembler* masm,
+ bool is_api_function,
+ bool create_memento) {
+ // ----------- S t a t e -------------
+ // -- x0 : number of arguments
+ // -- x1 : constructor function
+ // -- x2 : allocation site or undefined
+ // -- lr : return address
+ // -- sp[...]: constructor arguments
+ // -----------------------------------
+
+ ASM_LOCATION("Builtins::Generate_JSConstructStubHelper");
+ // Should never create mementos for api functions.
+ ASSERT(!is_api_function || !create_memento);
+
+ Isolate* isolate = masm->isolate();
+
+ // Enter a construct frame.
+ {
+ FrameScope scope(masm, StackFrame::CONSTRUCT);
+
+ // Preserve the three incoming parameters on the stack.
+ if (create_memento) {
+ __ AssertUndefinedOrAllocationSite(x2, x10);
+ __ Push(x2);
+ }
+
+ Register argc = x0;
+ Register constructor = x1;
+ // x1: constructor function
+ __ SmiTag(argc);
+ __ Push(argc, constructor);
+ // sp[0] : Constructor function.
+ // sp[1]: number of arguments (smi-tagged)
+
+ // Try to allocate the object without transitioning into C code. If any of
+ // the preconditions is not met, the code bails out to the runtime call.
+ Label rt_call, allocated;
+ if (FLAG_inline_new) {
+ Label undo_allocation;
+ ExternalReference debug_step_in_fp =
+ ExternalReference::debug_step_in_fp_address(isolate);
+ __ Mov(x2, Operand(debug_step_in_fp));
+ __ Ldr(x2, MemOperand(x2));
+ __ Cbnz(x2, &rt_call);
+ // Load the initial map and verify that it is in fact a map.
+ Register init_map = x2;
+ __ Ldr(init_map,
+ FieldMemOperand(constructor,
+ JSFunction::kPrototypeOrInitialMapOffset));
+ __ JumpIfSmi(init_map, &rt_call);
+ __ JumpIfNotObjectType(init_map, x10, x11, MAP_TYPE, &rt_call);
+
+ // Check that the constructor is not constructing a JSFunction (see
+ // comments in Runtime_NewObject in runtime.cc). In which case the initial
+ // map's instance type would be JS_FUNCTION_TYPE.
+ __ CompareInstanceType(init_map, x10, JS_FUNCTION_TYPE);
+ __ B(eq, &rt_call);
+
+ Register constructon_count = x14;
+ if (!is_api_function) {
+ Label allocate;
+ MemOperand bit_field3 =
+ FieldMemOperand(init_map, Map::kBitField3Offset);
+ // Check if slack tracking is enabled.
+ __ Ldr(x4, bit_field3);
+ __ DecodeField<Map::ConstructionCount>(constructon_count, x4);
+ __ Cmp(constructon_count, Operand(JSFunction::kNoSlackTracking));
+ __ B(eq, &allocate);
+ // Decrease generous allocation count.
+ __ Subs(x4, x4, Operand(1 << Map::ConstructionCount::kShift));
+ __ Str(x4, bit_field3);
+ __ Cmp(constructon_count, Operand(JSFunction::kFinishSlackTracking));
+ __ B(ne, &allocate);
+
+ // Push the constructor and map to the stack, and the constructor again
+ // as argument to the runtime call.
+ __ Push(constructor, init_map, constructor);
+ __ CallRuntime(Runtime::kHiddenFinalizeInstanceSize, 1);
+ __ Pop(init_map, constructor);
+ __ Mov(constructon_count, Operand(JSFunction::kNoSlackTracking));
+ __ Bind(&allocate);
+ }
+
+ // Now allocate the JSObject on the heap.
+ Register obj_size = x3;
+ Register new_obj = x4;
+ __ Ldrb(obj_size, FieldMemOperand(init_map, Map::kInstanceSizeOffset));
+ if (create_memento) {
+ __ Add(x7, obj_size,
+ Operand(AllocationMemento::kSize / kPointerSize));
+ __ Allocate(x7, new_obj, x10, x11, &rt_call, SIZE_IN_WORDS);
+ } else {
+ __ Allocate(obj_size, new_obj, x10, x11, &rt_call, SIZE_IN_WORDS);
+ }
+
+ // Allocated the JSObject, now initialize the fields. Map is set to
+ // initial map and properties and elements are set to empty fixed array.
+ // NB. the object pointer is not tagged, so MemOperand is used.
+ Register empty = x5;
+ __ LoadRoot(empty, Heap::kEmptyFixedArrayRootIndex);
+ __ Str(init_map, MemOperand(new_obj, JSObject::kMapOffset));
+ STATIC_ASSERT(JSObject::kElementsOffset ==
+ (JSObject::kPropertiesOffset + kPointerSize));
+ __ Stp(empty, empty, MemOperand(new_obj, JSObject::kPropertiesOffset));
+
+ Register first_prop = x5;
+ __ Add(first_prop, new_obj, JSObject::kHeaderSize);
+
+ // Fill all of the in-object properties with the appropriate filler.
+ Register filler = x7;
+ __ LoadRoot(filler, Heap::kUndefinedValueRootIndex);
+
+ // Obtain number of pre-allocated property fields and in-object
+ // properties.
+ Register prealloc_fields = x10;
+ Register inobject_props = x11;
+ Register inst_sizes = x11;
+ __ Ldr(inst_sizes, FieldMemOperand(init_map, Map::kInstanceSizesOffset));
+ __ Ubfx(prealloc_fields, inst_sizes,
+ Map::kPreAllocatedPropertyFieldsByte * kBitsPerByte,
+ kBitsPerByte);
+ __ Ubfx(inobject_props, inst_sizes,
+ Map::kInObjectPropertiesByte * kBitsPerByte, kBitsPerByte);
+
+ // Calculate number of property fields in the object.
+ Register prop_fields = x6;
+ __ Sub(prop_fields, obj_size, JSObject::kHeaderSize / kPointerSize);
+
+ if (!is_api_function) {
+ Label no_inobject_slack_tracking;
+
+ // Check if slack tracking is enabled.
+ __ Cmp(constructon_count, Operand(JSFunction::kNoSlackTracking));
+ __ B(eq, &no_inobject_slack_tracking);
+ constructon_count = NoReg;
+
+ // Fill the pre-allocated fields with undef.
+ __ FillFields(first_prop, prealloc_fields, filler);
+
+ // Update first_prop register to be the offset of the first field after
+ // pre-allocated fields.
+ __ Add(first_prop, first_prop,
+ Operand(prealloc_fields, LSL, kPointerSizeLog2));
+
+ if (FLAG_debug_code) {
+ Register obj_end = x14;
+ __ Add(obj_end, new_obj, Operand(obj_size, LSL, kPointerSizeLog2));
+ __ Cmp(first_prop, obj_end);
+ __ Assert(le, kUnexpectedNumberOfPreAllocatedPropertyFields);
+ }
+
+ // Fill the remaining fields with one pointer filler map.
+ __ LoadRoot(filler, Heap::kOnePointerFillerMapRootIndex);
+ __ Sub(prop_fields, prop_fields, prealloc_fields);
+
+ __ bind(&no_inobject_slack_tracking);
+ }
+ if (create_memento) {
+ // Fill the pre-allocated fields with undef.
+ __ FillFields(first_prop, prop_fields, filler);
+ __ Add(first_prop, new_obj, Operand(obj_size, LSL, kPointerSizeLog2));
+ __ LoadRoot(x14, Heap::kAllocationMementoMapRootIndex);
+ ASSERT_EQ(0 * kPointerSize, AllocationMemento::kMapOffset);
+ __ Str(x14, MemOperand(first_prop, kPointerSize, PostIndex));
+ // Load the AllocationSite
+ __ Peek(x14, 2 * kXRegSize);
+ ASSERT_EQ(1 * kPointerSize, AllocationMemento::kAllocationSiteOffset);
+ __ Str(x14, MemOperand(first_prop, kPointerSize, PostIndex));
+ first_prop = NoReg;
+ } else {
+ // Fill all of the property fields with undef.
+ __ FillFields(first_prop, prop_fields, filler);
+ first_prop = NoReg;
+ prop_fields = NoReg;
+ }
+
+ // Add the object tag to make the JSObject real, so that we can continue
+ // and jump into the continuation code at any time from now on. Any
+ // failures need to undo the allocation, so that the heap is in a
+ // consistent state and verifiable.
+ __ Add(new_obj, new_obj, kHeapObjectTag);
+
+ // Check if a non-empty properties array is needed. Continue with
+ // allocated object if not, or fall through to runtime call if it is.
+ Register element_count = x3;
+ __ Ldrb(element_count,
+ FieldMemOperand(init_map, Map::kUnusedPropertyFieldsOffset));
+ // The field instance sizes contains both pre-allocated property fields
+ // and in-object properties.
+ __ Add(element_count, element_count, prealloc_fields);
+ __ Subs(element_count, element_count, inobject_props);
+
+ // Done if no extra properties are to be allocated.
+ __ B(eq, &allocated);
+ __ Assert(pl, kPropertyAllocationCountFailed);
+
+ // Scale the number of elements by pointer size and add the header for
+ // FixedArrays to the start of the next object calculation from above.
+ Register new_array = x5;
+ Register array_size = x6;
+ __ Add(array_size, element_count, FixedArray::kHeaderSize / kPointerSize);
+ __ Allocate(array_size, new_array, x11, x12, &undo_allocation,
+ static_cast<AllocationFlags>(RESULT_CONTAINS_TOP |
+ SIZE_IN_WORDS));
+
+ Register array_map = x10;
+ __ LoadRoot(array_map, Heap::kFixedArrayMapRootIndex);
+ __ Str(array_map, MemOperand(new_array, FixedArray::kMapOffset));
+ __ SmiTag(x0, element_count);
+ __ Str(x0, MemOperand(new_array, FixedArray::kLengthOffset));
+
+ // Initialize the fields to undefined.
+ Register elements = x10;
+ __ Add(elements, new_array, FixedArray::kHeaderSize);
+ __ FillFields(elements, element_count, filler);
+
+ // Store the initialized FixedArray into the properties field of the
+ // JSObject.
+ __ Add(new_array, new_array, kHeapObjectTag);
+ __ Str(new_array, FieldMemOperand(new_obj, JSObject::kPropertiesOffset));
+
+ // Continue with JSObject being successfully allocated.
+ __ B(&allocated);
+
+ // Undo the setting of the new top so that the heap is verifiable. For
+ // example, the map's unused properties potentially do not match the
+ // allocated objects unused properties.
+ __ Bind(&undo_allocation);
+ __ UndoAllocationInNewSpace(new_obj, x14);
+ }
+
+ // Allocate the new receiver object using the runtime call.
+ __ Bind(&rt_call);
+ Label count_incremented;
+ if (create_memento) {
+ // Get the cell or allocation site.
+ __ Peek(x4, 2 * kXRegSize);
+ __ Push(x4);
+ __ Push(constructor); // Argument for Runtime_NewObject.
+ __ CallRuntime(Runtime::kHiddenNewObjectWithAllocationSite, 2);
+ __ Mov(x4, x0);
+ // If we ended up using the runtime, and we want a memento, then the
+ // runtime call made it for us, and we shouldn't do create count
+ // increment.
+ __ jmp(&count_incremented);
+ } else {
+ __ Push(constructor); // Argument for Runtime_NewObject.
+ __ CallRuntime(Runtime::kHiddenNewObject, 1);
+ __ Mov(x4, x0);
+ }
+
+ // Receiver for constructor call allocated.
+ // x4: JSObject
+ __ Bind(&allocated);
+
+ if (create_memento) {
+ __ Peek(x10, 2 * kXRegSize);
+ __ JumpIfRoot(x10, Heap::kUndefinedValueRootIndex, &count_incremented);
+ // r2 is an AllocationSite. We are creating a memento from it, so we
+ // need to increment the memento create count.
+ __ Ldr(x5, FieldMemOperand(x10,
+ AllocationSite::kPretenureCreateCountOffset));
+ __ Add(x5, x5, Operand(Smi::FromInt(1)));
+ __ Str(x5, FieldMemOperand(x10,
+ AllocationSite::kPretenureCreateCountOffset));
+ __ bind(&count_incremented);
+ }
+
+ __ Push(x4, x4);
+
+ // Reload the number of arguments from the stack.
+ // Set it up in x0 for the function call below.
+ // jssp[0]: receiver
+ // jssp[1]: receiver
+ // jssp[2]: constructor function
+ // jssp[3]: number of arguments (smi-tagged)
+ __ Peek(constructor, 2 * kXRegSize); // Load constructor.
+ __ Peek(argc, 3 * kXRegSize); // Load number of arguments.
+ __ SmiUntag(argc);
+
+ // Set up pointer to last argument.
+ __ Add(x2, fp, StandardFrameConstants::kCallerSPOffset);
+
+ // Copy arguments and receiver to the expression stack.
+ // Copy 2 values every loop to use ldp/stp.
+ // x0: number of arguments
+ // x1: constructor function
+ // x2: address of last argument (caller sp)
+ // jssp[0]: receiver
+ // jssp[1]: receiver
+ // jssp[2]: constructor function
+ // jssp[3]: number of arguments (smi-tagged)
+ // Compute the start address of the copy in x3.
+ __ Add(x3, x2, Operand(argc, LSL, kPointerSizeLog2));
+ Label loop, entry, done_copying_arguments;
+ __ B(&entry);
+ __ Bind(&loop);
+ __ Ldp(x10, x11, MemOperand(x3, -2 * kPointerSize, PreIndex));
+ __ Push(x11, x10);
+ __ Bind(&entry);
+ __ Cmp(x3, x2);
+ __ B(gt, &loop);
+ // Because we copied values 2 by 2 we may have copied one extra value.
+ // Drop it if that is the case.
+ __ B(eq, &done_copying_arguments);
+ __ Drop(1);
+ __ Bind(&done_copying_arguments);
+
+ // Call the function.
+ // x0: number of arguments
+ // x1: constructor function
+ if (is_api_function) {
+ __ Ldr(cp, FieldMemOperand(constructor, JSFunction::kContextOffset));
+ Handle<Code> code =
+ masm->isolate()->builtins()->HandleApiCallConstruct();
+ __ Call(code, RelocInfo::CODE_TARGET);
+ } else {
+ ParameterCount actual(argc);
+ __ InvokeFunction(constructor, actual, CALL_FUNCTION, NullCallWrapper());
+ }
+
+ // Store offset of return address for deoptimizer.
+ if (!is_api_function) {
+ masm->isolate()->heap()->SetConstructStubDeoptPCOffset(masm->pc_offset());
+ }
+
+ // Restore the context from the frame.
+ // x0: result
+ // jssp[0]: receiver
+ // jssp[1]: constructor function
+ // jssp[2]: number of arguments (smi-tagged)
+ __ Ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+
+ // If the result is an object (in the ECMA sense), we should get rid
+ // of the receiver and use the result; see ECMA-262 section 13.2.2-7
+ // on page 74.
+ Label use_receiver, exit;
+
+ // If the result is a smi, it is *not* an object in the ECMA sense.
+ // x0: result
+ // jssp[0]: receiver (newly allocated object)
+ // jssp[1]: constructor function
+ // jssp[2]: number of arguments (smi-tagged)
+ __ JumpIfSmi(x0, &use_receiver);
+
+ // If the type of the result (stored in its map) is less than
+ // FIRST_SPEC_OBJECT_TYPE, it is not an object in the ECMA sense.
+ __ JumpIfObjectType(x0, x1, x3, FIRST_SPEC_OBJECT_TYPE, &exit, ge);
+
+ // Throw away the result of the constructor invocation and use the
+ // on-stack receiver as the result.
+ __ Bind(&use_receiver);
+ __ Peek(x0, 0);
+
+ // Remove the receiver from the stack, remove caller arguments, and
+ // return.
+ __ Bind(&exit);
+ // x0: result
+ // jssp[0]: receiver (newly allocated object)
+ // jssp[1]: constructor function
+ // jssp[2]: number of arguments (smi-tagged)
+ __ Peek(x1, 2 * kXRegSize);
+
+ // Leave construct frame.
+ }
+
+ __ DropBySMI(x1);
+ __ Drop(1);
+ __ IncrementCounter(isolate->counters()->constructed_objects(), 1, x1, x2);
+ __ Ret();
+}
+
+
+void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
+ Generate_JSConstructStubHelper(masm, false, FLAG_pretenuring_call_new);
+}
+
+
+void Builtins::Generate_JSConstructStubApi(MacroAssembler* masm) {
+ Generate_JSConstructStubHelper(masm, true, false);
+}
+
+
+// Input:
+// x0: code entry.
+// x1: function.
+// x2: receiver.
+// x3: argc.
+// x4: argv.
+// Output:
+// x0: result.
+static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
+ bool is_construct) {
+ // Called from JSEntryStub::GenerateBody().
+ Register function = x1;
+ Register receiver = x2;
+ Register argc = x3;
+ Register argv = x4;
+
+ ProfileEntryHookStub::MaybeCallEntryHook(masm);
+
+ // Clear the context before we push it when entering the internal frame.
+ __ Mov(cp, 0);
+
+ {
+ // Enter an internal frame.
+ FrameScope scope(masm, StackFrame::INTERNAL);
+
+ // Set up the context from the function argument.
+ __ Ldr(cp, FieldMemOperand(function, JSFunction::kContextOffset));
+
+ __ InitializeRootRegister();
+
+ // Push the function and the receiver onto the stack.
+ __ Push(function, receiver);
+
+ // Copy arguments to the stack in a loop, in reverse order.
+ // x3: argc.
+ // x4: argv.
+ Label loop, entry;
+ // Compute the copy end address.
+ __ Add(x10, argv, Operand(argc, LSL, kPointerSizeLog2));
+
+ __ B(&entry);
+ __ Bind(&loop);
+ __ Ldr(x11, MemOperand(argv, kPointerSize, PostIndex));
+ __ Ldr(x12, MemOperand(x11)); // Dereference the handle.
+ __ Push(x12); // Push the argument.
+ __ Bind(&entry);
+ __ Cmp(x10, argv);
+ __ B(ne, &loop);
+
+ // Initialize all JavaScript callee-saved registers, since they will be seen
+ // by the garbage collector as part of handlers.
+ // The original values have been saved in JSEntryStub::GenerateBody().
+ __ LoadRoot(x19, Heap::kUndefinedValueRootIndex);
+ __ Mov(x20, x19);
+ __ Mov(x21, x19);
+ __ Mov(x22, x19);
+ __ Mov(x23, x19);
+ __ Mov(x24, x19);
+ __ Mov(x25, x19);
+ // Don't initialize the reserved registers.
+ // x26 : root register (root).
+ // x27 : context pointer (cp).
+ // x28 : JS stack pointer (jssp).
+ // x29 : frame pointer (fp).
+
+ __ Mov(x0, argc);
+ if (is_construct) {
+ // No type feedback cell is available.
+ __ LoadRoot(x2, Heap::kUndefinedValueRootIndex);
+
+ CallConstructStub stub(masm->isolate(), NO_CALL_CONSTRUCTOR_FLAGS);
+ __ CallStub(&stub);
+ } else {
+ ParameterCount actual(x0);
+ __ InvokeFunction(function, actual, CALL_FUNCTION, NullCallWrapper());
+ }
+ // Exit the JS internal frame and remove the parameters (except function),
+ // and return.
+ }
+
+ // Result is in x0. Return.
+ __ Ret();
+}
+
+
+void Builtins::Generate_JSEntryTrampoline(MacroAssembler* masm) {
+ Generate_JSEntryTrampolineHelper(masm, false);
+}
+
+
+void Builtins::Generate_JSConstructEntryTrampoline(MacroAssembler* masm) {
+ Generate_JSEntryTrampolineHelper(masm, true);
+}
+
+
+void Builtins::Generate_CompileUnoptimized(MacroAssembler* masm) {
+ CallRuntimePassFunction(masm, Runtime::kHiddenCompileUnoptimized);
+ GenerateTailCallToReturnedCode(masm);
+}
+
+
+static void CallCompileOptimized(MacroAssembler* masm, bool concurrent) {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ Register function = x1;
+
+ // Preserve function. At the same time, push arguments for
+ // kHiddenCompileOptimized.
+ __ LoadObject(x10, masm->isolate()->factory()->ToBoolean(concurrent));
+ __ Push(function, function, x10);
+
+ __ CallRuntime(Runtime::kHiddenCompileOptimized, 2);
+
+ // Restore receiver.
+ __ Pop(function);
+}
+
+
+void Builtins::Generate_CompileOptimized(MacroAssembler* masm) {
+ CallCompileOptimized(masm, false);
+ GenerateTailCallToReturnedCode(masm);
+}
+
+
+void Builtins::Generate_CompileOptimizedConcurrent(MacroAssembler* masm) {
+ CallCompileOptimized(masm, true);
+ GenerateTailCallToReturnedCode(masm);
+}
+
+
+static void GenerateMakeCodeYoungAgainCommon(MacroAssembler* masm) {
+ // For now, we are relying on the fact that make_code_young doesn't do any
+ // garbage collection which allows us to save/restore the registers without
+ // worrying about which of them contain pointers. We also don't build an
+ // internal frame to make the code fast, since we shouldn't have to do stack
+ // crawls in MakeCodeYoung. This seems a bit fragile.
+
+ // The following caller-saved registers must be saved and restored when
+ // calling through to the runtime:
+ // x0 - The address from which to resume execution.
+ // x1 - isolate
+ // lr - The return address for the JSFunction itself. It has not yet been
+ // preserved on the stack because the frame setup code was replaced
+ // with a call to this stub, to handle code ageing.
+ {
+ FrameScope scope(masm, StackFrame::MANUAL);
+ __ Push(x0, x1, fp, lr);
+ __ Mov(x1, ExternalReference::isolate_address(masm->isolate()));
+ __ CallCFunction(
+ ExternalReference::get_make_code_young_function(masm->isolate()), 2);
+ __ Pop(lr, fp, x1, x0);
+ }
+
+ // The calling function has been made young again, so return to execute the
+ // real frame set-up code.
+ __ Br(x0);
+}
+
+#define DEFINE_CODE_AGE_BUILTIN_GENERATOR(C) \
+void Builtins::Generate_Make##C##CodeYoungAgainEvenMarking( \
+ MacroAssembler* masm) { \
+ GenerateMakeCodeYoungAgainCommon(masm); \
+} \
+void Builtins::Generate_Make##C##CodeYoungAgainOddMarking( \
+ MacroAssembler* masm) { \
+ GenerateMakeCodeYoungAgainCommon(masm); \
+}
+CODE_AGE_LIST(DEFINE_CODE_AGE_BUILTIN_GENERATOR)
+#undef DEFINE_CODE_AGE_BUILTIN_GENERATOR
+
+
+void Builtins::Generate_MarkCodeAsExecutedOnce(MacroAssembler* masm) {
+ // For now, as in GenerateMakeCodeYoungAgainCommon, we are relying on the fact
+ // that make_code_young doesn't do any garbage collection which allows us to
+ // save/restore the registers without worrying about which of them contain
+ // pointers.
+
+ // The following caller-saved registers must be saved and restored when
+ // calling through to the runtime:
+ // x0 - The address from which to resume execution.
+ // x1 - isolate
+ // lr - The return address for the JSFunction itself. It has not yet been
+ // preserved on the stack because the frame setup code was replaced
+ // with a call to this stub, to handle code ageing.
+ {
+ FrameScope scope(masm, StackFrame::MANUAL);
+ __ Push(x0, x1, fp, lr);
+ __ Mov(x1, ExternalReference::isolate_address(masm->isolate()));
+ __ CallCFunction(
+ ExternalReference::get_mark_code_as_executed_function(
+ masm->isolate()), 2);
+ __ Pop(lr, fp, x1, x0);
+
+ // Perform prologue operations usually performed by the young code stub.
+ __ EmitFrameSetupForCodeAgePatching(masm);
+ }
+
+ // Jump to point after the code-age stub.
+ __ Add(x0, x0, kNoCodeAgeSequenceLength);
+ __ Br(x0);
+}
+
+
+void Builtins::Generate_MarkCodeAsExecutedTwice(MacroAssembler* masm) {
+ GenerateMakeCodeYoungAgainCommon(masm);
+}
+
+
+static void Generate_NotifyStubFailureHelper(MacroAssembler* masm,
+ SaveFPRegsMode save_doubles) {
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+
+ // Preserve registers across notification, this is important for compiled
+ // stubs that tail call the runtime on deopts passing their parameters in
+ // registers.
+ // TODO(jbramley): Is it correct (and appropriate) to use safepoint
+ // registers here? According to the comment above, we should only need to
+ // preserve the registers with parameters.
+ __ PushXRegList(kSafepointSavedRegisters);
+ // Pass the function and deoptimization type to the runtime system.
+ __ CallRuntime(Runtime::kHiddenNotifyStubFailure, 0, save_doubles);
+ __ PopXRegList(kSafepointSavedRegisters);
+ }
+
+ // Ignore state (pushed by Deoptimizer::EntryGenerator::Generate).
+ __ Drop(1);
+
+ // Jump to the miss handler. Deoptimizer::EntryGenerator::Generate loads this
+ // into lr before it jumps here.
+ __ Br(lr);
+}
+
+
+void Builtins::Generate_NotifyStubFailure(MacroAssembler* masm) {
+ Generate_NotifyStubFailureHelper(masm, kDontSaveFPRegs);
+}
+
+
+void Builtins::Generate_NotifyStubFailureSaveDoubles(MacroAssembler* masm) {
+ Generate_NotifyStubFailureHelper(masm, kSaveFPRegs);
+}
+
+
+static void Generate_NotifyDeoptimizedHelper(MacroAssembler* masm,
+ Deoptimizer::BailoutType type) {
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ // Pass the deoptimization type to the runtime system.
+ __ Mov(x0, Smi::FromInt(static_cast<int>(type)));
+ __ Push(x0);
+ __ CallRuntime(Runtime::kHiddenNotifyDeoptimized, 1);
+ }
+
+ // Get the full codegen state from the stack and untag it.
+ Register state = x6;
+ __ Peek(state, 0);
+ __ SmiUntag(state);
+
+ // Switch on the state.
+ Label with_tos_register, unknown_state;
+ __ CompareAndBranch(
+ state, FullCodeGenerator::NO_REGISTERS, ne, &with_tos_register);
+ __ Drop(1); // Remove state.
+ __ Ret();
+
+ __ Bind(&with_tos_register);
+ // Reload TOS register.
+ __ Peek(x0, kPointerSize);
+ __ CompareAndBranch(state, FullCodeGenerator::TOS_REG, ne, &unknown_state);
+ __ Drop(2); // Remove state and TOS.
+ __ Ret();
+
+ __ Bind(&unknown_state);
+ __ Abort(kInvalidFullCodegenState);
+}
+
+
+void Builtins::Generate_NotifyDeoptimized(MacroAssembler* masm) {
+ Generate_NotifyDeoptimizedHelper(masm, Deoptimizer::EAGER);
+}
+
+
+void Builtins::Generate_NotifyLazyDeoptimized(MacroAssembler* masm) {
+ Generate_NotifyDeoptimizedHelper(masm, Deoptimizer::LAZY);
+}
+
+
+void Builtins::Generate_NotifySoftDeoptimized(MacroAssembler* masm) {
+ Generate_NotifyDeoptimizedHelper(masm, Deoptimizer::SOFT);
+}
+
+
+void Builtins::Generate_OnStackReplacement(MacroAssembler* masm) {
+ // Lookup the function in the JavaScript frame.
+ __ Ldr(x0, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ // Pass function as argument.
+ __ Push(x0);
+ __ CallRuntime(Runtime::kCompileForOnStackReplacement, 1);
+ }
+
+ // If the code object is null, just return to the unoptimized code.
+ Label skip;
+ __ CompareAndBranch(x0, Smi::FromInt(0), ne, &skip);
+ __ Ret();
+
+ __ Bind(&skip);
+
+ // Load deoptimization data from the code object.
+ // <deopt_data> = <code>[#deoptimization_data_offset]
+ __ Ldr(x1, MemOperand(x0, Code::kDeoptimizationDataOffset - kHeapObjectTag));
+
+ // Load the OSR entrypoint offset from the deoptimization data.
+ // <osr_offset> = <deopt_data>[#header_size + #osr_pc_offset]
+ __ Ldrsw(w1, UntagSmiFieldMemOperand(x1, FixedArray::OffsetOfElementAt(
+ DeoptimizationInputData::kOsrPcOffsetIndex)));
+
+ // Compute the target address = code_obj + header_size + osr_offset
+ // <entry_addr> = <code_obj> + #header_size + <osr_offset>
+ __ Add(x0, x0, x1);
+ __ Add(lr, x0, Code::kHeaderSize - kHeapObjectTag);
+
+ // And "return" to the OSR entry point of the function.
+ __ Ret();
+}
+
+
+void Builtins::Generate_OsrAfterStackCheck(MacroAssembler* masm) {
+ // We check the stack limit as indicator that recompilation might be done.
+ Label ok;
+ __ CompareRoot(jssp, Heap::kStackLimitRootIndex);
+ __ B(hs, &ok);
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ CallRuntime(Runtime::kHiddenStackGuard, 0);
+ }
+ __ Jump(masm->isolate()->builtins()->OnStackReplacement(),
+ RelocInfo::CODE_TARGET);
+
+ __ Bind(&ok);
+ __ Ret();
+}
+
+
+void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
+ enum {
+ call_type_JS_func = 0,
+ call_type_func_proxy = 1,
+ call_type_non_func = 2
+ };
+ Register argc = x0;
+ Register function = x1;
+ Register call_type = x4;
+ Register scratch1 = x10;
+ Register scratch2 = x11;
+ Register receiver_type = x13;
+
+ ASM_LOCATION("Builtins::Generate_FunctionCall");
+ // 1. Make sure we have at least one argument.
+ { Label done;
+ __ Cbnz(argc, &done);
+ __ LoadRoot(scratch1, Heap::kUndefinedValueRootIndex);
+ __ Push(scratch1);
+ __ Mov(argc, 1);
+ __ Bind(&done);
+ }
+
+ // 2. Get the function to call (passed as receiver) from the stack, check
+ // if it is a function.
+ Label slow, non_function;
+ __ Peek(function, Operand(argc, LSL, kXRegSizeLog2));
+ __ JumpIfSmi(function, &non_function);
+ __ JumpIfNotObjectType(function, scratch1, receiver_type,
+ JS_FUNCTION_TYPE, &slow);
+
+ // 3a. Patch the first argument if necessary when calling a function.
+ Label shift_arguments;
+ __ Mov(call_type, static_cast<int>(call_type_JS_func));
+ { Label convert_to_object, use_global_receiver, patch_receiver;
+ // Change context eagerly in case we need the global receiver.
+ __ Ldr(cp, FieldMemOperand(function, JSFunction::kContextOffset));
+
+ // Do not transform the receiver for strict mode functions.
+ // Also do not transform the receiver for native (Compilerhints already in
+ // x3).
+ __ Ldr(scratch1,
+ FieldMemOperand(function, JSFunction::kSharedFunctionInfoOffset));
+ __ Ldr(scratch2.W(),
+ FieldMemOperand(scratch1, SharedFunctionInfo::kCompilerHintsOffset));
+ __ TestAndBranchIfAnySet(
+ scratch2.W(),
+ (1 << SharedFunctionInfo::kStrictModeFunction) |
+ (1 << SharedFunctionInfo::kNative),
+ &shift_arguments);
+
+ // Compute the receiver in sloppy mode.
+ Register receiver = x2;
+ __ Sub(scratch1, argc, 1);
+ __ Peek(receiver, Operand(scratch1, LSL, kXRegSizeLog2));
+ __ JumpIfSmi(receiver, &convert_to_object);
+
+ __ JumpIfRoot(receiver, Heap::kUndefinedValueRootIndex,
+ &use_global_receiver);
+ __ JumpIfRoot(receiver, Heap::kNullValueRootIndex, &use_global_receiver);
+
+ STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE);
+ __ JumpIfObjectType(receiver, scratch1, scratch2,
+ FIRST_SPEC_OBJECT_TYPE, &shift_arguments, ge);
+
+ __ Bind(&convert_to_object);
+
+ {
+ // Enter an internal frame in order to preserve argument count.
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ SmiTag(argc);
+
+ __ Push(argc, receiver);
+ __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
+ __ Mov(receiver, x0);
+
+ __ Pop(argc);
+ __ SmiUntag(argc);
+
+ // Exit the internal frame.
+ }
+
+ // Restore the function and flag in the registers.
+ __ Peek(function, Operand(argc, LSL, kXRegSizeLog2));
+ __ Mov(call_type, static_cast<int>(call_type_JS_func));
+ __ B(&patch_receiver);
+
+ __ Bind(&use_global_receiver);
+ __ Ldr(receiver, GlobalObjectMemOperand());
+ __ Ldr(receiver,
+ FieldMemOperand(receiver, GlobalObject::kGlobalReceiverOffset));
+
+
+ __ Bind(&patch_receiver);
+ __ Sub(scratch1, argc, 1);
+ __ Poke(receiver, Operand(scratch1, LSL, kXRegSizeLog2));
+
+ __ B(&shift_arguments);
+ }
+
+ // 3b. Check for function proxy.
+ __ Bind(&slow);
+ __ Mov(call_type, static_cast<int>(call_type_func_proxy));
+ __ Cmp(receiver_type, JS_FUNCTION_PROXY_TYPE);
+ __ B(eq, &shift_arguments);
+ __ Bind(&non_function);
+ __ Mov(call_type, static_cast<int>(call_type_non_func));
+
+ // 3c. Patch the first argument when calling a non-function. The
+ // CALL_NON_FUNCTION builtin expects the non-function callee as
+ // receiver, so overwrite the first argument which will ultimately
+ // become the receiver.
+ // call type (0: JS function, 1: function proxy, 2: non-function)
+ __ Sub(scratch1, argc, 1);
+ __ Poke(function, Operand(scratch1, LSL, kXRegSizeLog2));
+
+ // 4. Shift arguments and return address one slot down on the stack
+ // (overwriting the original receiver). Adjust argument count to make
+ // the original first argument the new receiver.
+ // call type (0: JS function, 1: function proxy, 2: non-function)
+ __ Bind(&shift_arguments);
+ { Label loop;
+ // Calculate the copy start address (destination). Copy end address is jssp.
+ __ Add(scratch2, jssp, Operand(argc, LSL, kPointerSizeLog2));
+ __ Sub(scratch1, scratch2, kPointerSize);
+
+ __ Bind(&loop);
+ __ Ldr(x12, MemOperand(scratch1, -kPointerSize, PostIndex));
+ __ Str(x12, MemOperand(scratch2, -kPointerSize, PostIndex));
+ __ Cmp(scratch1, jssp);
+ __ B(ge, &loop);
+ // Adjust the actual number of arguments and remove the top element
+ // (which is a copy of the last argument).
+ __ Sub(argc, argc, 1);
+ __ Drop(1);
+ }
+
+ // 5a. Call non-function via tail call to CALL_NON_FUNCTION builtin,
+ // or a function proxy via CALL_FUNCTION_PROXY.
+ // call type (0: JS function, 1: function proxy, 2: non-function)
+ { Label js_function, non_proxy;
+ __ Cbz(call_type, &js_function);
+ // Expected number of arguments is 0 for CALL_NON_FUNCTION.
+ __ Mov(x2, 0);
+ __ Cmp(call_type, static_cast<int>(call_type_func_proxy));
+ __ B(ne, &non_proxy);
+
+ __ Push(function); // Re-add proxy object as additional argument.
+ __ Add(argc, argc, 1);
+ __ GetBuiltinFunction(function, Builtins::CALL_FUNCTION_PROXY);
+ __ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
+ RelocInfo::CODE_TARGET);
+
+ __ Bind(&non_proxy);
+ __ GetBuiltinFunction(function, Builtins::CALL_NON_FUNCTION);
+ __ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
+ RelocInfo::CODE_TARGET);
+ __ Bind(&js_function);
+ }
+
+ // 5b. Get the code to call from the function and check that the number of
+ // expected arguments matches what we're providing. If so, jump
+ // (tail-call) to the code in register edx without checking arguments.
+ __ Ldr(x3, FieldMemOperand(function, JSFunction::kSharedFunctionInfoOffset));
+ __ Ldrsw(x2,
+ FieldMemOperand(x3,
+ SharedFunctionInfo::kFormalParameterCountOffset));
+ Label dont_adapt_args;
+ __ Cmp(x2, argc); // Check formal and actual parameter counts.
+ __ B(eq, &dont_adapt_args);
+ __ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
+ RelocInfo::CODE_TARGET);
+ __ Bind(&dont_adapt_args);
+
+ __ Ldr(x3, FieldMemOperand(function, JSFunction::kCodeEntryOffset));
+ ParameterCount expected(0);
+ __ InvokeCode(x3, expected, expected, JUMP_FUNCTION, NullCallWrapper());
+}
+
+
+void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
+ ASM_LOCATION("Builtins::Generate_FunctionApply");
+ const int kIndexOffset =
+ StandardFrameConstants::kExpressionsOffset - (2 * kPointerSize);
+ const int kLimitOffset =
+ StandardFrameConstants::kExpressionsOffset - (1 * kPointerSize);
+ const int kArgsOffset = 2 * kPointerSize;
+ const int kReceiverOffset = 3 * kPointerSize;
+ const int kFunctionOffset = 4 * kPointerSize;
+
+ {
+ FrameScope frame_scope(masm, StackFrame::INTERNAL);
+
+ Register args = x12;
+ Register receiver = x14;
+ Register function = x15;
+
+ // Get the length of the arguments via a builtin call.
+ __ Ldr(function, MemOperand(fp, kFunctionOffset));
+ __ Ldr(args, MemOperand(fp, kArgsOffset));
+ __ Push(function, args);
+ __ InvokeBuiltin(Builtins::APPLY_PREPARE, CALL_FUNCTION);
+ Register argc = x0;
+
+ // Check the stack for overflow.
+ // We are not trying to catch interruptions (e.g. debug break and
+ // preemption) here, so the "real stack limit" is checked.
+ Label enough_stack_space;
+ __ LoadRoot(x10, Heap::kRealStackLimitRootIndex);
+ __ Ldr(function, MemOperand(fp, kFunctionOffset));
+ // Make x10 the space we have left. The stack might already be overflowed
+ // here which will cause x10 to become negative.
+ // TODO(jbramley): Check that the stack usage here is safe.
+ __ Sub(x10, jssp, x10);
+ // Check if the arguments will overflow the stack.
+ __ Cmp(x10, Operand(argc, LSR, kSmiShift - kPointerSizeLog2));
+ __ B(gt, &enough_stack_space);
+ // There is not enough stack space, so use a builtin to throw an appropriate
+ // error.
+ __ Push(function, argc);
+ __ InvokeBuiltin(Builtins::STACK_OVERFLOW, CALL_FUNCTION);
+ // We should never return from the APPLY_OVERFLOW builtin.
+ if (__ emit_debug_code()) {
+ __ Unreachable();
+ }
+
+ __ Bind(&enough_stack_space);
+ // Push current limit and index.
+ __ Mov(x1, 0); // Initial index.
+ __ Push(argc, x1);
+
+ Label push_receiver;
+ __ Ldr(receiver, MemOperand(fp, kReceiverOffset));
+
+ // Check that the function is a JS function. Otherwise it must be a proxy.
+ // When it is not the function proxy will be invoked later.
+ __ JumpIfNotObjectType(function, x10, x11, JS_FUNCTION_TYPE,
+ &push_receiver);
+
+ // Change context eagerly to get the right global object if necessary.
+ __ Ldr(cp, FieldMemOperand(function, JSFunction::kContextOffset));
+ // Load the shared function info.
+ __ Ldr(x2, FieldMemOperand(function,
+ JSFunction::kSharedFunctionInfoOffset));
+
+ // Compute and push the receiver.
+ // Do not transform the receiver for strict mode functions.
+ Label convert_receiver_to_object, use_global_receiver;
+ __ Ldr(w10, FieldMemOperand(x2, SharedFunctionInfo::kCompilerHintsOffset));
+ __ Tbnz(x10, SharedFunctionInfo::kStrictModeFunction, &push_receiver);
+ // Do not transform the receiver for native functions.
+ __ Tbnz(x10, SharedFunctionInfo::kNative, &push_receiver);
+
+ // Compute the receiver in sloppy mode.
+ __ JumpIfSmi(receiver, &convert_receiver_to_object);
+ __ JumpIfRoot(receiver, Heap::kNullValueRootIndex, &use_global_receiver);
+ __ JumpIfRoot(receiver, Heap::kUndefinedValueRootIndex,
+ &use_global_receiver);
+
+ // Check if the receiver is already a JavaScript object.
+ STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE);
+ __ JumpIfObjectType(receiver, x10, x11, FIRST_SPEC_OBJECT_TYPE,
+ &push_receiver, ge);
+
+ // Call a builtin to convert the receiver to a regular object.
+ __ Bind(&convert_receiver_to_object);
+ __ Push(receiver);
+ __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
+ __ Mov(receiver, x0);
+ __ B(&push_receiver);
+
+ __ Bind(&use_global_receiver);
+ __ Ldr(x10, GlobalObjectMemOperand());
+ __ Ldr(receiver, FieldMemOperand(x10, GlobalObject::kGlobalReceiverOffset));
+
+ // Push the receiver
+ __ Bind(&push_receiver);
+ __ Push(receiver);
+
+ // Copy all arguments from the array to the stack.
+ Label entry, loop;
+ Register current = x0;
+ __ Ldr(current, MemOperand(fp, kIndexOffset));
+ __ B(&entry);
+
+ __ Bind(&loop);
+ // Load the current argument from the arguments array and push it.
+ // TODO(all): Couldn't we optimize this for JS arrays?
+
+ __ Ldr(x1, MemOperand(fp, kArgsOffset));
+ __ Push(x1, current);
+
+ // Call the runtime to access the property in the arguments array.
+ __ CallRuntime(Runtime::kGetProperty, 2);
+ __ Push(x0);
+
+ // Use inline caching to access the arguments.
+ __ Ldr(current, MemOperand(fp, kIndexOffset));
+ __ Add(current, current, Smi::FromInt(1));
+ __ Str(current, MemOperand(fp, kIndexOffset));
+
+ // Test if the copy loop has finished copying all the elements from the
+ // arguments object.
+ __ Bind(&entry);
+ __ Ldr(x1, MemOperand(fp, kLimitOffset));
+ __ Cmp(current, x1);
+ __ B(ne, &loop);
+
+ // At the end of the loop, the number of arguments is stored in 'current',
+ // represented as a smi.
+
+ function = x1; // From now on we want the function to be kept in x1;
+ __ Ldr(function, MemOperand(fp, kFunctionOffset));
+
+ // Call the function.
+ Label call_proxy;
+ ParameterCount actual(current);
+ __ SmiUntag(current);
+ __ JumpIfNotObjectType(function, x10, x11, JS_FUNCTION_TYPE, &call_proxy);
+ __ InvokeFunction(function, actual, CALL_FUNCTION, NullCallWrapper());
+ frame_scope.GenerateLeaveFrame();
+ __ Drop(3);
+ __ Ret();
+
+ // Call the function proxy.
+ __ Bind(&call_proxy);
+ // x0 : argc
+ // x1 : function
+ __ Push(function); // Add function proxy as last argument.
+ __ Add(x0, x0, 1);
+ __ Mov(x2, 0);
+ __ GetBuiltinFunction(x1, Builtins::CALL_FUNCTION_PROXY);
+ __ Call(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
+ RelocInfo::CODE_TARGET);
+ }
+ __ Drop(3);
+ __ Ret();
+}
+
+
+static void ArgumentAdaptorStackCheck(MacroAssembler* masm,
+ Label* stack_overflow) {
+ // ----------- S t a t e -------------
+ // -- x0 : actual number of arguments
+ // -- x1 : function (passed through to callee)
+ // -- x2 : expected number of arguments
+ // -----------------------------------
+ // Check the stack for overflow.
+ // We are not trying to catch interruptions (e.g. debug break and
+ // preemption) here, so the "real stack limit" is checked.
+ Label enough_stack_space;
+ __ LoadRoot(x10, Heap::kRealStackLimitRootIndex);
+ // Make x10 the space we have left. The stack might already be overflowed
+ // here which will cause x10 to become negative.
+ __ Sub(x10, jssp, x10);
+ // Check if the arguments will overflow the stack.
+ __ Cmp(x10, Operand(x2, LSL, kPointerSizeLog2));
+ __ B(le, stack_overflow);
+}
+
+
+static void EnterArgumentsAdaptorFrame(MacroAssembler* masm) {
+ __ SmiTag(x10, x0);
+ __ Mov(x11, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
+ __ Push(lr, fp);
+ __ Push(x11, x1, x10);
+ __ Add(fp, jssp,
+ StandardFrameConstants::kFixedFrameSizeFromFp + kPointerSize);
+}
+
+
+static void LeaveArgumentsAdaptorFrame(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- x0 : result being passed through
+ // -----------------------------------
+ // Get the number of arguments passed (as a smi), tear down the frame and
+ // then drop the parameters and the receiver.
+ __ Ldr(x10, MemOperand(fp, -(StandardFrameConstants::kFixedFrameSizeFromFp +
+ kPointerSize)));
+ __ Mov(jssp, fp);
+ __ Pop(fp, lr);
+ __ DropBySMI(x10, kXRegSize);
+ __ Drop(1);
+}
+
+
+void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
+ ASM_LOCATION("Builtins::Generate_ArgumentsAdaptorTrampoline");
+ // ----------- S t a t e -------------
+ // -- x0 : actual number of arguments
+ // -- x1 : function (passed through to callee)
+ // -- x2 : expected number of arguments
+ // -----------------------------------
+
+ Label stack_overflow;
+ ArgumentAdaptorStackCheck(masm, &stack_overflow);
+
+ Register argc_actual = x0; // Excluding the receiver.
+ Register argc_expected = x2; // Excluding the receiver.
+ Register function = x1;
+ Register code_entry = x3;
+
+ Label invoke, dont_adapt_arguments;
+
+ Label enough, too_few;
+ __ Ldr(code_entry, FieldMemOperand(function, JSFunction::kCodeEntryOffset));
+ __ Cmp(argc_actual, argc_expected);
+ __ B(lt, &too_few);
+ __ Cmp(argc_expected, SharedFunctionInfo::kDontAdaptArgumentsSentinel);
+ __ B(eq, &dont_adapt_arguments);
+
+ { // Enough parameters: actual >= expected
+ EnterArgumentsAdaptorFrame(masm);
+
+ Register copy_start = x10;
+ Register copy_end = x11;
+ Register copy_to = x12;
+ Register scratch1 = x13, scratch2 = x14;
+
+ __ Lsl(argc_expected, argc_expected, kPointerSizeLog2);
+
+ // Adjust for fp, lr, and the receiver.
+ __ Add(copy_start, fp, 3 * kPointerSize);
+ __ Add(copy_start, copy_start, Operand(argc_actual, LSL, kPointerSizeLog2));
+ __ Sub(copy_end, copy_start, argc_expected);
+ __ Sub(copy_end, copy_end, kPointerSize);
+ __ Mov(copy_to, jssp);
+
+ // Claim space for the arguments, the receiver, and one extra slot.
+ // The extra slot ensures we do not write under jssp. It will be popped
+ // later.
+ __ Add(scratch1, argc_expected, 2 * kPointerSize);
+ __ Claim(scratch1, 1);
+
+ // Copy the arguments (including the receiver) to the new stack frame.
+ Label copy_2_by_2;
+ __ Bind(&copy_2_by_2);
+ __ Ldp(scratch1, scratch2,
+ MemOperand(copy_start, - 2 * kPointerSize, PreIndex));
+ __ Stp(scratch1, scratch2,
+ MemOperand(copy_to, - 2 * kPointerSize, PreIndex));
+ __ Cmp(copy_start, copy_end);
+ __ B(hi, &copy_2_by_2);
+
+ // Correct the space allocated for the extra slot.
+ __ Drop(1);
+
+ __ B(&invoke);
+ }
+
+ { // Too few parameters: Actual < expected
+ __ Bind(&too_few);
+ EnterArgumentsAdaptorFrame(masm);
+
+ Register copy_from = x10;
+ Register copy_end = x11;
+ Register copy_to = x12;
+ Register scratch1 = x13, scratch2 = x14;
+
+ __ Lsl(argc_expected, argc_expected, kPointerSizeLog2);
+ __ Lsl(argc_actual, argc_actual, kPointerSizeLog2);
+
+ // Adjust for fp, lr, and the receiver.
+ __ Add(copy_from, fp, 3 * kPointerSize);
+ __ Add(copy_from, copy_from, argc_actual);
+ __ Mov(copy_to, jssp);
+ __ Sub(copy_end, copy_to, 1 * kPointerSize); // Adjust for the receiver.
+ __ Sub(copy_end, copy_end, argc_actual);
+
+ // Claim space for the arguments, the receiver, and one extra slot.
+ // The extra slot ensures we do not write under jssp. It will be popped
+ // later.
+ __ Add(scratch1, argc_expected, 2 * kPointerSize);
+ __ Claim(scratch1, 1);
+
+ // Copy the arguments (including the receiver) to the new stack frame.
+ Label copy_2_by_2;
+ __ Bind(&copy_2_by_2);
+ __ Ldp(scratch1, scratch2,
+ MemOperand(copy_from, - 2 * kPointerSize, PreIndex));
+ __ Stp(scratch1, scratch2,
+ MemOperand(copy_to, - 2 * kPointerSize, PreIndex));
+ __ Cmp(copy_to, copy_end);
+ __ B(hi, &copy_2_by_2);
+
+ __ Mov(copy_to, copy_end);
+
+ // Fill the remaining expected arguments with undefined.
+ __ LoadRoot(scratch1, Heap::kUndefinedValueRootIndex);
+ __ Add(copy_end, jssp, kPointerSize);
+
+ Label fill;
+ __ Bind(&fill);
+ __ Stp(scratch1, scratch1,
+ MemOperand(copy_to, - 2 * kPointerSize, PreIndex));
+ __ Cmp(copy_to, copy_end);
+ __ B(hi, &fill);
+
+ // Correct the space allocated for the extra slot.
+ __ Drop(1);
+ }
+
+ // Arguments have been adapted. Now call the entry point.
+ __ Bind(&invoke);
+ __ Call(code_entry);
+
+ // Store offset of return address for deoptimizer.
+ masm->isolate()->heap()->SetArgumentsAdaptorDeoptPCOffset(masm->pc_offset());
+
+ // Exit frame and return.
+ LeaveArgumentsAdaptorFrame(masm);
+ __ Ret();
+
+ // Call the entry point without adapting the arguments.
+ __ Bind(&dont_adapt_arguments);
+ __ Jump(code_entry);
+
+ __ Bind(&stack_overflow);
+ {
+ FrameScope frame(masm, StackFrame::MANUAL);
+ EnterArgumentsAdaptorFrame(masm);
+ __ InvokeBuiltin(Builtins::STACK_OVERFLOW, CALL_FUNCTION);
+ __ Unreachable();
+ }
+}
+
+
+#undef __
+
+} } // namespace v8::internal
+
+#endif // V8_TARGET_ARCH_ARM
diff --git a/chromium/v8/src/arm64/code-stubs-arm64.cc b/chromium/v8/src/arm64/code-stubs-arm64.cc
new file mode 100644
index 00000000000..70ead443fd6
--- /dev/null
+++ b/chromium/v8/src/arm64/code-stubs-arm64.cc
@@ -0,0 +1,5555 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#if V8_TARGET_ARCH_ARM64
+
+#include "src/bootstrapper.h"
+#include "src/code-stubs.h"
+#include "src/regexp-macro-assembler.h"
+#include "src/stub-cache.h"
+
+namespace v8 {
+namespace internal {
+
+
+void FastNewClosureStub::InitializeInterfaceDescriptor(
+ CodeStubInterfaceDescriptor* descriptor) {
+ // x2: function info
+ static Register registers[] = { x2 };
+ descriptor->register_param_count_ = sizeof(registers) / sizeof(registers[0]);
+ descriptor->register_params_ = registers;
+ descriptor->deoptimization_handler_ =
+ Runtime::FunctionForId(Runtime::kHiddenNewClosureFromStubFailure)->entry;
+}
+
+
+void FastNewContextStub::InitializeInterfaceDescriptor(
+ CodeStubInterfaceDescriptor* descriptor) {
+ // x1: function
+ static Register registers[] = { x1 };
+ descriptor->register_param_count_ = sizeof(registers) / sizeof(registers[0]);
+ descriptor->register_params_ = registers;
+ descriptor->deoptimization_handler_ = NULL;
+}
+
+
+void ToNumberStub::InitializeInterfaceDescriptor(
+ CodeStubInterfaceDescriptor* descriptor) {
+ // x0: value
+ static Register registers[] = { x0 };
+ descriptor->register_param_count_ = sizeof(registers) / sizeof(registers[0]);
+ descriptor->register_params_ = registers;
+ descriptor->deoptimization_handler_ = NULL;
+}
+
+
+void NumberToStringStub::InitializeInterfaceDescriptor(
+ CodeStubInterfaceDescriptor* descriptor) {
+ // x0: value
+ static Register registers[] = { x0 };
+ descriptor->register_param_count_ = sizeof(registers) / sizeof(registers[0]);
+ descriptor->register_params_ = registers;
+ descriptor->deoptimization_handler_ =
+ Runtime::FunctionForId(Runtime::kHiddenNumberToString)->entry;
+}
+
+
+void FastCloneShallowArrayStub::InitializeInterfaceDescriptor(
+ CodeStubInterfaceDescriptor* descriptor) {
+ // x3: array literals array
+ // x2: array literal index
+ // x1: constant elements
+ static Register registers[] = { x3, x2, x1 };
+ descriptor->register_param_count_ = sizeof(registers) / sizeof(registers[0]);
+ descriptor->register_params_ = registers;
+ static Representation representations[] = {
+ Representation::Tagged(),
+ Representation::Smi(),
+ Representation::Tagged() };
+ descriptor->register_param_representations_ = representations;
+ descriptor->deoptimization_handler_ =
+ Runtime::FunctionForId(
+ Runtime::kHiddenCreateArrayLiteralStubBailout)->entry;
+}
+
+
+void FastCloneShallowObjectStub::InitializeInterfaceDescriptor(
+ CodeStubInterfaceDescriptor* descriptor) {
+ // x3: object literals array
+ // x2: object literal index
+ // x1: constant properties
+ // x0: object literal flags
+ static Register registers[] = { x3, x2, x1, x0 };
+ descriptor->register_param_count_ = sizeof(registers) / sizeof(registers[0]);
+ descriptor->register_params_ = registers;
+ descriptor->deoptimization_handler_ =
+ Runtime::FunctionForId(Runtime::kHiddenCreateObjectLiteral)->entry;
+}
+
+
+void CreateAllocationSiteStub::InitializeInterfaceDescriptor(
+ CodeStubInterfaceDescriptor* descriptor) {
+ // x2: feedback vector
+ // x3: call feedback slot
+ static Register registers[] = { x2, x3 };
+ descriptor->register_param_count_ = sizeof(registers) / sizeof(registers[0]);
+ descriptor->register_params_ = registers;
+ descriptor->deoptimization_handler_ = NULL;
+}
+
+
+void KeyedLoadGenericElementStub::InitializeInterfaceDescriptor(
+ CodeStubInterfaceDescriptor* descriptor) {
+ static Register registers[] = { x1, x0 };
+ descriptor->register_param_count_ = 2;
+ descriptor->register_params_ = registers;
+ descriptor->deoptimization_handler_ =
+ Runtime::FunctionForId(Runtime::kKeyedGetProperty)->entry;
+}
+
+
+void KeyedLoadFastElementStub::InitializeInterfaceDescriptor(
+ CodeStubInterfaceDescriptor* descriptor) {
+ // x1: receiver
+ // x0: key
+ static Register registers[] = { x1, x0 };
+ descriptor->register_param_count_ = sizeof(registers) / sizeof(registers[0]);
+ descriptor->register_params_ = registers;
+ descriptor->deoptimization_handler_ =
+ FUNCTION_ADDR(KeyedLoadIC_MissFromStubFailure);
+}
+
+
+void KeyedLoadDictionaryElementStub::InitializeInterfaceDescriptor(
+ CodeStubInterfaceDescriptor* descriptor) {
+ // x1: receiver
+ // x0: key
+ static Register registers[] = { x1, x0 };
+ descriptor->register_param_count_ = sizeof(registers) / sizeof(registers[0]);
+ descriptor->register_params_ = registers;
+ descriptor->deoptimization_handler_ =
+ FUNCTION_ADDR(KeyedLoadIC_MissFromStubFailure);
+}
+
+
+void RegExpConstructResultStub::InitializeInterfaceDescriptor(
+ CodeStubInterfaceDescriptor* descriptor) {
+ // x2: length
+ // x1: index (of last match)
+ // x0: string
+ static Register registers[] = { x2, x1, x0 };
+ descriptor->register_param_count_ = sizeof(registers) / sizeof(registers[0]);
+ descriptor->register_params_ = registers;
+ descriptor->deoptimization_handler_ =
+ Runtime::FunctionForId(Runtime::kHiddenRegExpConstructResult)->entry;
+}
+
+
+void LoadFieldStub::InitializeInterfaceDescriptor(
+ CodeStubInterfaceDescriptor* descriptor) {
+ // x0: receiver
+ static Register registers[] = { x0 };
+ descriptor->register_param_count_ = sizeof(registers) / sizeof(registers[0]);
+ descriptor->register_params_ = registers;
+ descriptor->deoptimization_handler_ = NULL;
+}
+
+
+void KeyedLoadFieldStub::InitializeInterfaceDescriptor(
+ CodeStubInterfaceDescriptor* descriptor) {
+ // x1: receiver
+ static Register registers[] = { x1 };
+ descriptor->register_param_count_ = sizeof(registers) / sizeof(registers[0]);
+ descriptor->register_params_ = registers;
+ descriptor->deoptimization_handler_ = NULL;
+}
+
+
+void StringLengthStub::InitializeInterfaceDescriptor(
+ CodeStubInterfaceDescriptor* descriptor) {
+ static Register registers[] = { x0, x2 };
+ descriptor->register_param_count_ = 2;
+ descriptor->register_params_ = registers;
+ descriptor->deoptimization_handler_ = NULL;
+}
+
+
+void KeyedStringLengthStub::InitializeInterfaceDescriptor(
+ CodeStubInterfaceDescriptor* descriptor) {
+ static Register registers[] = { x1, x0 };
+ descriptor->register_param_count_ = 2;
+ descriptor->register_params_ = registers;
+ descriptor->deoptimization_handler_ = NULL;
+}
+
+
+void KeyedStoreFastElementStub::InitializeInterfaceDescriptor(
+ CodeStubInterfaceDescriptor* descriptor) {
+ // x2: receiver
+ // x1: key
+ // x0: value
+ static Register registers[] = { x2, x1, x0 };
+ descriptor->register_param_count_ = sizeof(registers) / sizeof(registers[0]);
+ descriptor->register_params_ = registers;
+ descriptor->deoptimization_handler_ =
+ FUNCTION_ADDR(KeyedStoreIC_MissFromStubFailure);
+}
+
+
+void TransitionElementsKindStub::InitializeInterfaceDescriptor(
+ CodeStubInterfaceDescriptor* descriptor) {
+ // x0: value (js_array)
+ // x1: to_map
+ static Register registers[] = { x0, x1 };
+ descriptor->register_param_count_ = sizeof(registers) / sizeof(registers[0]);
+ descriptor->register_params_ = registers;
+ Address entry =
+ Runtime::FunctionForId(Runtime::kTransitionElementsKind)->entry;
+ descriptor->deoptimization_handler_ = FUNCTION_ADDR(entry);
+}
+
+
+void CompareNilICStub::InitializeInterfaceDescriptor(
+ CodeStubInterfaceDescriptor* descriptor) {
+ // x0: value to compare
+ static Register registers[] = { x0 };
+ descriptor->register_param_count_ = sizeof(registers) / sizeof(registers[0]);
+ descriptor->register_params_ = registers;
+ descriptor->deoptimization_handler_ =
+ FUNCTION_ADDR(CompareNilIC_Miss);
+ descriptor->SetMissHandler(
+ ExternalReference(IC_Utility(IC::kCompareNilIC_Miss), isolate()));
+}
+
+
+static void InitializeArrayConstructorDescriptor(
+ CodeStubInterfaceDescriptor* descriptor,
+ int constant_stack_parameter_count) {
+ // x1: function
+ // x2: allocation site with elements kind
+ // x0: number of arguments to the constructor function
+ static Register registers_variable_args[] = { x1, x2, x0 };
+ static Register registers_no_args[] = { x1, x2 };
+
+ if (constant_stack_parameter_count == 0) {
+ descriptor->register_param_count_ =
+ sizeof(registers_no_args) / sizeof(registers_no_args[0]);
+ descriptor->register_params_ = registers_no_args;
+ } else {
+ // stack param count needs (constructor pointer, and single argument)
+ descriptor->handler_arguments_mode_ = PASS_ARGUMENTS;
+ descriptor->stack_parameter_count_ = x0;
+ descriptor->register_param_count_ =
+ sizeof(registers_variable_args) / sizeof(registers_variable_args[0]);
+ descriptor->register_params_ = registers_variable_args;
+ static Representation representations[] = {
+ Representation::Tagged(),
+ Representation::Tagged(),
+ Representation::Integer32() };
+ descriptor->register_param_representations_ = representations;
+ }
+
+ descriptor->hint_stack_parameter_count_ = constant_stack_parameter_count;
+ descriptor->function_mode_ = JS_FUNCTION_STUB_MODE;
+ descriptor->deoptimization_handler_ =
+ Runtime::FunctionForId(Runtime::kHiddenArrayConstructor)->entry;
+}
+
+
+void ArrayNoArgumentConstructorStub::InitializeInterfaceDescriptor(
+ CodeStubInterfaceDescriptor* descriptor) {
+ InitializeArrayConstructorDescriptor(descriptor, 0);
+}
+
+
+void ArraySingleArgumentConstructorStub::InitializeInterfaceDescriptor(
+ CodeStubInterfaceDescriptor* descriptor) {
+ InitializeArrayConstructorDescriptor(descriptor, 1);
+}
+
+
+void ArrayNArgumentsConstructorStub::InitializeInterfaceDescriptor(
+ CodeStubInterfaceDescriptor* descriptor) {
+ InitializeArrayConstructorDescriptor(descriptor, -1);
+}
+
+
+static void InitializeInternalArrayConstructorDescriptor(
+ CodeStubInterfaceDescriptor* descriptor,
+ int constant_stack_parameter_count) {
+ // x1: constructor function
+ // x0: number of arguments to the constructor function
+ static Register registers_variable_args[] = { x1, x0 };
+ static Register registers_no_args[] = { x1 };
+
+ if (constant_stack_parameter_count == 0) {
+ descriptor->register_param_count_ =
+ sizeof(registers_no_args) / sizeof(registers_no_args[0]);
+ descriptor->register_params_ = registers_no_args;
+ } else {
+ // stack param count needs (constructor pointer, and single argument)
+ descriptor->handler_arguments_mode_ = PASS_ARGUMENTS;
+ descriptor->stack_parameter_count_ = x0;
+ descriptor->register_param_count_ =
+ sizeof(registers_variable_args) / sizeof(registers_variable_args[0]);
+ descriptor->register_params_ = registers_variable_args;
+ static Representation representations[] = {
+ Representation::Tagged(),
+ Representation::Integer32() };
+ descriptor->register_param_representations_ = representations;
+ }
+
+ descriptor->hint_stack_parameter_count_ = constant_stack_parameter_count;
+ descriptor->function_mode_ = JS_FUNCTION_STUB_MODE;
+ descriptor->deoptimization_handler_ =
+ Runtime::FunctionForId(Runtime::kHiddenInternalArrayConstructor)->entry;
+}
+
+
+void InternalArrayNoArgumentConstructorStub::InitializeInterfaceDescriptor(
+ CodeStubInterfaceDescriptor* descriptor) {
+ InitializeInternalArrayConstructorDescriptor(descriptor, 0);
+}
+
+
+void InternalArraySingleArgumentConstructorStub::InitializeInterfaceDescriptor(
+ CodeStubInterfaceDescriptor* descriptor) {
+ InitializeInternalArrayConstructorDescriptor(descriptor, 1);
+}
+
+
+void InternalArrayNArgumentsConstructorStub::InitializeInterfaceDescriptor(
+ CodeStubInterfaceDescriptor* descriptor) {
+ InitializeInternalArrayConstructorDescriptor(descriptor, -1);
+}
+
+
+void ToBooleanStub::InitializeInterfaceDescriptor(
+ CodeStubInterfaceDescriptor* descriptor) {
+ // x0: value
+ static Register registers[] = { x0 };
+ descriptor->register_param_count_ = sizeof(registers) / sizeof(registers[0]);
+ descriptor->register_params_ = registers;
+ descriptor->deoptimization_handler_ = FUNCTION_ADDR(ToBooleanIC_Miss);
+ descriptor->SetMissHandler(
+ ExternalReference(IC_Utility(IC::kToBooleanIC_Miss), isolate()));
+}
+
+
+void StoreGlobalStub::InitializeInterfaceDescriptor(
+ CodeStubInterfaceDescriptor* descriptor) {
+ // x1: receiver
+ // x2: key (unused)
+ // x0: value
+ static Register registers[] = { x1, x2, x0 };
+ descriptor->register_param_count_ = sizeof(registers) / sizeof(registers[0]);
+ descriptor->register_params_ = registers;
+ descriptor->deoptimization_handler_ =
+ FUNCTION_ADDR(StoreIC_MissFromStubFailure);
+}
+
+
+void ElementsTransitionAndStoreStub::InitializeInterfaceDescriptor(
+ CodeStubInterfaceDescriptor* descriptor) {
+ // x0: value
+ // x3: target map
+ // x1: key
+ // x2: receiver
+ static Register registers[] = { x0, x3, x1, x2 };
+ descriptor->register_param_count_ = sizeof(registers) / sizeof(registers[0]);
+ descriptor->register_params_ = registers;
+ descriptor->deoptimization_handler_ =
+ FUNCTION_ADDR(ElementsTransitionAndStoreIC_Miss);
+}
+
+
+void BinaryOpICStub::InitializeInterfaceDescriptor(
+ CodeStubInterfaceDescriptor* descriptor) {
+ // x1: left operand
+ // x0: right operand
+ static Register registers[] = { x1, x0 };
+ descriptor->register_param_count_ = sizeof(registers) / sizeof(registers[0]);
+ descriptor->register_params_ = registers;
+ descriptor->deoptimization_handler_ = FUNCTION_ADDR(BinaryOpIC_Miss);
+ descriptor->SetMissHandler(
+ ExternalReference(IC_Utility(IC::kBinaryOpIC_Miss), isolate()));
+}
+
+
+void BinaryOpWithAllocationSiteStub::InitializeInterfaceDescriptor(
+ CodeStubInterfaceDescriptor* descriptor) {
+ // x2: allocation site
+ // x1: left operand
+ // x0: right operand
+ static Register registers[] = { x2, x1, x0 };
+ descriptor->register_param_count_ = sizeof(registers) / sizeof(registers[0]);
+ descriptor->register_params_ = registers;
+ descriptor->deoptimization_handler_ =
+ FUNCTION_ADDR(BinaryOpIC_MissWithAllocationSite);
+}
+
+
+void StringAddStub::InitializeInterfaceDescriptor(
+ CodeStubInterfaceDescriptor* descriptor) {
+ // x1: left operand
+ // x0: right operand
+ static Register registers[] = { x1, x0 };
+ descriptor->register_param_count_ = sizeof(registers) / sizeof(registers[0]);
+ descriptor->register_params_ = registers;
+ descriptor->deoptimization_handler_ =
+ Runtime::FunctionForId(Runtime::kHiddenStringAdd)->entry;
+}
+
+
+void CallDescriptors::InitializeForIsolate(Isolate* isolate) {
+ static PlatformCallInterfaceDescriptor default_descriptor =
+ PlatformCallInterfaceDescriptor(CAN_INLINE_TARGET_ADDRESS);
+
+ static PlatformCallInterfaceDescriptor noInlineDescriptor =
+ PlatformCallInterfaceDescriptor(NEVER_INLINE_TARGET_ADDRESS);
+
+ {
+ CallInterfaceDescriptor* descriptor =
+ isolate->call_descriptor(Isolate::ArgumentAdaptorCall);
+ static Register registers[] = { x1, // JSFunction
+ cp, // context
+ x0, // actual number of arguments
+ x2, // expected number of arguments
+ };
+ static Representation representations[] = {
+ Representation::Tagged(), // JSFunction
+ Representation::Tagged(), // context
+ Representation::Integer32(), // actual number of arguments
+ Representation::Integer32(), // expected number of arguments
+ };
+ descriptor->register_param_count_ = 4;
+ descriptor->register_params_ = registers;
+ descriptor->param_representations_ = representations;
+ descriptor->platform_specific_descriptor_ = &default_descriptor;
+ }
+ {
+ CallInterfaceDescriptor* descriptor =
+ isolate->call_descriptor(Isolate::KeyedCall);
+ static Register registers[] = { cp, // context
+ x2, // key
+ };
+ static Representation representations[] = {
+ Representation::Tagged(), // context
+ Representation::Tagged(), // key
+ };
+ descriptor->register_param_count_ = 2;
+ descriptor->register_params_ = registers;
+ descriptor->param_representations_ = representations;
+ descriptor->platform_specific_descriptor_ = &noInlineDescriptor;
+ }
+ {
+ CallInterfaceDescriptor* descriptor =
+ isolate->call_descriptor(Isolate::NamedCall);
+ static Register registers[] = { cp, // context
+ x2, // name
+ };
+ static Representation representations[] = {
+ Representation::Tagged(), // context
+ Representation::Tagged(), // name
+ };
+ descriptor->register_param_count_ = 2;
+ descriptor->register_params_ = registers;
+ descriptor->param_representations_ = representations;
+ descriptor->platform_specific_descriptor_ = &noInlineDescriptor;
+ }
+ {
+ CallInterfaceDescriptor* descriptor =
+ isolate->call_descriptor(Isolate::CallHandler);
+ static Register registers[] = { cp, // context
+ x0, // receiver
+ };
+ static Representation representations[] = {
+ Representation::Tagged(), // context
+ Representation::Tagged(), // receiver
+ };
+ descriptor->register_param_count_ = 2;
+ descriptor->register_params_ = registers;
+ descriptor->param_representations_ = representations;
+ descriptor->platform_specific_descriptor_ = &default_descriptor;
+ }
+ {
+ CallInterfaceDescriptor* descriptor =
+ isolate->call_descriptor(Isolate::ApiFunctionCall);
+ static Register registers[] = { x0, // callee
+ x4, // call_data
+ x2, // holder
+ x1, // api_function_address
+ cp, // context
+ };
+ static Representation representations[] = {
+ Representation::Tagged(), // callee
+ Representation::Tagged(), // call_data
+ Representation::Tagged(), // holder
+ Representation::External(), // api_function_address
+ Representation::Tagged(), // context
+ };
+ descriptor->register_param_count_ = 5;
+ descriptor->register_params_ = registers;
+ descriptor->param_representations_ = representations;
+ descriptor->platform_specific_descriptor_ = &default_descriptor;
+ }
+}
+
+
+#define __ ACCESS_MASM(masm)
+
+
+void HydrogenCodeStub::GenerateLightweightMiss(MacroAssembler* masm) {
+ // Update the static counter each time a new code stub is generated.
+ isolate()->counters()->code_stubs()->Increment();
+
+ CodeStubInterfaceDescriptor* descriptor = GetInterfaceDescriptor();
+ int param_count = descriptor->register_param_count_;
+ {
+ // Call the runtime system in a fresh internal frame.
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ ASSERT((descriptor->register_param_count_ == 0) ||
+ x0.Is(descriptor->register_params_[param_count - 1]));
+
+ // Push arguments
+ MacroAssembler::PushPopQueue queue(masm);
+ for (int i = 0; i < param_count; ++i) {
+ queue.Queue(descriptor->register_params_[i]);
+ }
+ queue.PushQueued();
+
+ ExternalReference miss = descriptor->miss_handler();
+ __ CallExternalReference(miss, descriptor->register_param_count_);
+ }
+
+ __ Ret();
+}
+
+
+void DoubleToIStub::Generate(MacroAssembler* masm) {
+ Label done;
+ Register input = source();
+ Register result = destination();
+ ASSERT(is_truncating());
+
+ ASSERT(result.Is64Bits());
+ ASSERT(jssp.Is(masm->StackPointer()));
+
+ int double_offset = offset();
+
+ DoubleRegister double_scratch = d0; // only used if !skip_fastpath()
+ Register scratch1 = GetAllocatableRegisterThatIsNotOneOf(input, result);
+ Register scratch2 =
+ GetAllocatableRegisterThatIsNotOneOf(input, result, scratch1);
+
+ __ Push(scratch1, scratch2);
+ // Account for saved regs if input is jssp.
+ if (input.is(jssp)) double_offset += 2 * kPointerSize;
+
+ if (!skip_fastpath()) {
+ __ Push(double_scratch);
+ if (input.is(jssp)) double_offset += 1 * kDoubleSize;
+ __ Ldr(double_scratch, MemOperand(input, double_offset));
+ // Try to convert with a FPU convert instruction. This handles all
+ // non-saturating cases.
+ __ TryConvertDoubleToInt64(result, double_scratch, &done);
+ __ Fmov(result, double_scratch);
+ } else {
+ __ Ldr(result, MemOperand(input, double_offset));
+ }
+
+ // If we reach here we need to manually convert the input to an int32.
+
+ // Extract the exponent.
+ Register exponent = scratch1;
+ __ Ubfx(exponent, result, HeapNumber::kMantissaBits,
+ HeapNumber::kExponentBits);
+
+ // It the exponent is >= 84 (kMantissaBits + 32), the result is always 0 since
+ // the mantissa gets shifted completely out of the int32_t result.
+ __ Cmp(exponent, HeapNumber::kExponentBias + HeapNumber::kMantissaBits + 32);
+ __ CzeroX(result, ge);
+ __ B(ge, &done);
+
+ // The Fcvtzs sequence handles all cases except where the conversion causes
+ // signed overflow in the int64_t target. Since we've already handled
+ // exponents >= 84, we can guarantee that 63 <= exponent < 84.
+
+ if (masm->emit_debug_code()) {
+ __ Cmp(exponent, HeapNumber::kExponentBias + 63);
+ // Exponents less than this should have been handled by the Fcvt case.
+ __ Check(ge, kUnexpectedValue);
+ }
+
+ // Isolate the mantissa bits, and set the implicit '1'.
+ Register mantissa = scratch2;
+ __ Ubfx(mantissa, result, 0, HeapNumber::kMantissaBits);
+ __ Orr(mantissa, mantissa, 1UL << HeapNumber::kMantissaBits);
+
+ // Negate the mantissa if necessary.
+ __ Tst(result, kXSignMask);
+ __ Cneg(mantissa, mantissa, ne);
+
+ // Shift the mantissa bits in the correct place. We know that we have to shift
+ // it left here, because exponent >= 63 >= kMantissaBits.
+ __ Sub(exponent, exponent,
+ HeapNumber::kExponentBias + HeapNumber::kMantissaBits);
+ __ Lsl(result, mantissa, exponent);
+
+ __ Bind(&done);
+ if (!skip_fastpath()) {
+ __ Pop(double_scratch);
+ }
+ __ Pop(scratch2, scratch1);
+ __ Ret();
+}
+
+
+// See call site for description.
+static void EmitIdenticalObjectComparison(MacroAssembler* masm,
+ Register left,
+ Register right,
+ Register scratch,
+ FPRegister double_scratch,
+ Label* slow,
+ Condition cond) {
+ ASSERT(!AreAliased(left, right, scratch));
+ Label not_identical, return_equal, heap_number;
+ Register result = x0;
+
+ __ Cmp(right, left);
+ __ B(ne, &not_identical);
+
+ // Test for NaN. Sadly, we can't just compare to factory::nan_value(),
+ // so we do the second best thing - test it ourselves.
+ // They are both equal and they are not both Smis so both of them are not
+ // Smis. If it's not a heap number, then return equal.
+ if ((cond == lt) || (cond == gt)) {
+ __ JumpIfObjectType(right, scratch, scratch, FIRST_SPEC_OBJECT_TYPE, slow,
+ ge);
+ } else {
+ Register right_type = scratch;
+ __ JumpIfObjectType(right, right_type, right_type, HEAP_NUMBER_TYPE,
+ &heap_number);
+ // Comparing JS objects with <=, >= is complicated.
+ if (cond != eq) {
+ __ Cmp(right_type, FIRST_SPEC_OBJECT_TYPE);
+ __ B(ge, slow);
+ // Normally here we fall through to return_equal, but undefined is
+ // special: (undefined == undefined) == true, but
+ // (undefined <= undefined) == false! See ECMAScript 11.8.5.
+ if ((cond == le) || (cond == ge)) {
+ __ Cmp(right_type, ODDBALL_TYPE);
+ __ B(ne, &return_equal);
+ __ JumpIfNotRoot(right, Heap::kUndefinedValueRootIndex, &return_equal);
+ if (cond == le) {
+ // undefined <= undefined should fail.
+ __ Mov(result, GREATER);
+ } else {
+ // undefined >= undefined should fail.
+ __ Mov(result, LESS);
+ }
+ __ Ret();
+ }
+ }
+ }
+
+ __ Bind(&return_equal);
+ if (cond == lt) {
+ __ Mov(result, GREATER); // Things aren't less than themselves.
+ } else if (cond == gt) {
+ __ Mov(result, LESS); // Things aren't greater than themselves.
+ } else {
+ __ Mov(result, EQUAL); // Things are <=, >=, ==, === themselves.
+ }
+ __ Ret();
+
+ // Cases lt and gt have been handled earlier, and case ne is never seen, as
+ // it is handled in the parser (see Parser::ParseBinaryExpression). We are
+ // only concerned with cases ge, le and eq here.
+ if ((cond != lt) && (cond != gt)) {
+ ASSERT((cond == ge) || (cond == le) || (cond == eq));
+ __ Bind(&heap_number);
+ // Left and right are identical pointers to a heap number object. Return
+ // non-equal if the heap number is a NaN, and equal otherwise. Comparing
+ // the number to itself will set the overflow flag iff the number is NaN.
+ __ Ldr(double_scratch, FieldMemOperand(right, HeapNumber::kValueOffset));
+ __ Fcmp(double_scratch, double_scratch);
+ __ B(vc, &return_equal); // Not NaN, so treat as normal heap number.
+
+ if (cond == le) {
+ __ Mov(result, GREATER);
+ } else {
+ __ Mov(result, LESS);
+ }
+ __ Ret();
+ }
+
+ // No fall through here.
+ if (FLAG_debug_code) {
+ __ Unreachable();
+ }
+
+ __ Bind(&not_identical);
+}
+
+
+// See call site for description.
+static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm,
+ Register left,
+ Register right,
+ Register left_type,
+ Register right_type,
+ Register scratch) {
+ ASSERT(!AreAliased(left, right, left_type, right_type, scratch));
+
+ if (masm->emit_debug_code()) {
+ // We assume that the arguments are not identical.
+ __ Cmp(left, right);
+ __ Assert(ne, kExpectedNonIdenticalObjects);
+ }
+
+ // If either operand is a JS object or an oddball value, then they are not
+ // equal since their pointers are different.
+ // There is no test for undetectability in strict equality.
+ STATIC_ASSERT(LAST_TYPE == LAST_SPEC_OBJECT_TYPE);
+ Label right_non_object;
+
+ __ Cmp(right_type, FIRST_SPEC_OBJECT_TYPE);
+ __ B(lt, &right_non_object);
+
+ // Return non-zero - x0 already contains a non-zero pointer.
+ ASSERT(left.is(x0) || right.is(x0));
+ Label return_not_equal;
+ __ Bind(&return_not_equal);
+ __ Ret();
+
+ __ Bind(&right_non_object);
+
+ // Check for oddballs: true, false, null, undefined.
+ __ Cmp(right_type, ODDBALL_TYPE);
+
+ // If right is not ODDBALL, test left. Otherwise, set eq condition.
+ __ Ccmp(left_type, ODDBALL_TYPE, ZFlag, ne);
+
+ // If right or left is not ODDBALL, test left >= FIRST_SPEC_OBJECT_TYPE.
+ // Otherwise, right or left is ODDBALL, so set a ge condition.
+ __ Ccmp(left_type, FIRST_SPEC_OBJECT_TYPE, NVFlag, ne);
+
+ __ B(ge, &return_not_equal);
+
+ // Internalized strings are unique, so they can only be equal if they are the
+ // same object. We have already tested that case, so if left and right are
+ // both internalized strings, they cannot be equal.
+ STATIC_ASSERT((kInternalizedTag == 0) && (kStringTag == 0));
+ __ Orr(scratch, left_type, right_type);
+ __ TestAndBranchIfAllClear(
+ scratch, kIsNotStringMask | kIsNotInternalizedMask, &return_not_equal);
+}
+
+
+// See call site for description.
+static void EmitSmiNonsmiComparison(MacroAssembler* masm,
+ Register left,
+ Register right,
+ FPRegister left_d,
+ FPRegister right_d,
+ Register scratch,
+ Label* slow,
+ bool strict) {
+ ASSERT(!AreAliased(left, right, scratch));
+ ASSERT(!AreAliased(left_d, right_d));
+ ASSERT((left.is(x0) && right.is(x1)) ||
+ (right.is(x0) && left.is(x1)));
+ Register result = x0;
+
+ Label right_is_smi, done;
+ __ JumpIfSmi(right, &right_is_smi);
+
+ // Left is the smi. Check whether right is a heap number.
+ if (strict) {
+ // If right is not a number and left is a smi, then strict equality cannot
+ // succeed. Return non-equal.
+ Label is_heap_number;
+ __ JumpIfObjectType(right, scratch, scratch, HEAP_NUMBER_TYPE,
+ &is_heap_number);
+ // Register right is a non-zero pointer, which is a valid NOT_EQUAL result.
+ if (!right.is(result)) {
+ __ Mov(result, NOT_EQUAL);
+ }
+ __ Ret();
+ __ Bind(&is_heap_number);
+ } else {
+ // Smi compared non-strictly with a non-smi, non-heap-number. Call the
+ // runtime.
+ __ JumpIfNotObjectType(right, scratch, scratch, HEAP_NUMBER_TYPE, slow);
+ }
+
+ // Left is the smi. Right is a heap number. Load right value into right_d, and
+ // convert left smi into double in left_d.
+ __ Ldr(right_d, FieldMemOperand(right, HeapNumber::kValueOffset));
+ __ SmiUntagToDouble(left_d, left);
+ __ B(&done);
+
+ __ Bind(&right_is_smi);
+ // Right is a smi. Check whether the non-smi left is a heap number.
+ if (strict) {
+ // If left is not a number and right is a smi then strict equality cannot
+ // succeed. Return non-equal.
+ Label is_heap_number;
+ __ JumpIfObjectType(left, scratch, scratch, HEAP_NUMBER_TYPE,
+ &is_heap_number);
+ // Register left is a non-zero pointer, which is a valid NOT_EQUAL result.
+ if (!left.is(result)) {
+ __ Mov(result, NOT_EQUAL);
+ }
+ __ Ret();
+ __ Bind(&is_heap_number);
+ } else {
+ // Smi compared non-strictly with a non-smi, non-heap-number. Call the
+ // runtime.
+ __ JumpIfNotObjectType(left, scratch, scratch, HEAP_NUMBER_TYPE, slow);
+ }
+
+ // Right is the smi. Left is a heap number. Load left value into left_d, and
+ // convert right smi into double in right_d.
+ __ Ldr(left_d, FieldMemOperand(left, HeapNumber::kValueOffset));
+ __ SmiUntagToDouble(right_d, right);
+
+ // Fall through to both_loaded_as_doubles.
+ __ Bind(&done);
+}
+
+
+// Fast negative check for internalized-to-internalized equality.
+// See call site for description.
+static void EmitCheckForInternalizedStringsOrObjects(MacroAssembler* masm,
+ Register left,
+ Register right,
+ Register left_map,
+ Register right_map,
+ Register left_type,
+ Register right_type,
+ Label* possible_strings,
+ Label* not_both_strings) {
+ ASSERT(!AreAliased(left, right, left_map, right_map, left_type, right_type));
+ Register result = x0;
+
+ Label object_test;
+ STATIC_ASSERT((kInternalizedTag == 0) && (kStringTag == 0));
+ // TODO(all): reexamine this branch sequence for optimisation wrt branch
+ // prediction.
+ __ Tbnz(right_type, MaskToBit(kIsNotStringMask), &object_test);
+ __ Tbnz(right_type, MaskToBit(kIsNotInternalizedMask), possible_strings);
+ __ Tbnz(left_type, MaskToBit(kIsNotStringMask), not_both_strings);
+ __ Tbnz(left_type, MaskToBit(kIsNotInternalizedMask), possible_strings);
+
+ // Both are internalized. We already checked that they weren't the same
+ // pointer, so they are not equal.
+ __ Mov(result, NOT_EQUAL);
+ __ Ret();
+
+ __ Bind(&object_test);
+
+ __ Cmp(right_type, FIRST_SPEC_OBJECT_TYPE);
+
+ // If right >= FIRST_SPEC_OBJECT_TYPE, test left.
+ // Otherwise, right < FIRST_SPEC_OBJECT_TYPE, so set lt condition.
+ __ Ccmp(left_type, FIRST_SPEC_OBJECT_TYPE, NFlag, ge);
+
+ __ B(lt, not_both_strings);
+
+ // If both objects are undetectable, they are equal. Otherwise, they are not
+ // equal, since they are different objects and an object is not equal to
+ // undefined.
+
+ // Returning here, so we can corrupt right_type and left_type.
+ Register right_bitfield = right_type;
+ Register left_bitfield = left_type;
+ __ Ldrb(right_bitfield, FieldMemOperand(right_map, Map::kBitFieldOffset));
+ __ Ldrb(left_bitfield, FieldMemOperand(left_map, Map::kBitFieldOffset));
+ __ And(result, right_bitfield, left_bitfield);
+ __ And(result, result, 1 << Map::kIsUndetectable);
+ __ Eor(result, result, 1 << Map::kIsUndetectable);
+ __ Ret();
+}
+
+
+static void ICCompareStub_CheckInputType(MacroAssembler* masm,
+ Register input,
+ Register scratch,
+ CompareIC::State expected,
+ Label* fail) {
+ Label ok;
+ if (expected == CompareIC::SMI) {
+ __ JumpIfNotSmi(input, fail);
+ } else if (expected == CompareIC::NUMBER) {
+ __ JumpIfSmi(input, &ok);
+ __ CheckMap(input, scratch, Heap::kHeapNumberMapRootIndex, fail,
+ DONT_DO_SMI_CHECK);
+ }
+ // We could be strict about internalized/non-internalized here, but as long as
+ // hydrogen doesn't care, the stub doesn't have to care either.
+ __ Bind(&ok);
+}
+
+
+void ICCompareStub::GenerateGeneric(MacroAssembler* masm) {
+ Register lhs = x1;
+ Register rhs = x0;
+ Register result = x0;
+ Condition cond = GetCondition();
+
+ Label miss;
+ ICCompareStub_CheckInputType(masm, lhs, x2, left_, &miss);
+ ICCompareStub_CheckInputType(masm, rhs, x3, right_, &miss);
+
+ Label slow; // Call builtin.
+ Label not_smis, both_loaded_as_doubles;
+ Label not_two_smis, smi_done;
+ __ JumpIfEitherNotSmi(lhs, rhs, &not_two_smis);
+ __ SmiUntag(lhs);
+ __ Sub(result, lhs, Operand::UntagSmi(rhs));
+ __ Ret();
+
+ __ Bind(&not_two_smis);
+
+ // NOTICE! This code is only reached after a smi-fast-case check, so it is
+ // certain that at least one operand isn't a smi.
+
+ // Handle the case where the objects are identical. Either returns the answer
+ // or goes to slow. Only falls through if the objects were not identical.
+ EmitIdenticalObjectComparison(masm, lhs, rhs, x10, d0, &slow, cond);
+
+ // If either is a smi (we know that at least one is not a smi), then they can
+ // only be strictly equal if the other is a HeapNumber.
+ __ JumpIfBothNotSmi(lhs, rhs, &not_smis);
+
+ // Exactly one operand is a smi. EmitSmiNonsmiComparison generates code that
+ // can:
+ // 1) Return the answer.
+ // 2) Branch to the slow case.
+ // 3) Fall through to both_loaded_as_doubles.
+ // In case 3, we have found out that we were dealing with a number-number
+ // comparison. The double values of the numbers have been loaded, right into
+ // rhs_d, left into lhs_d.
+ FPRegister rhs_d = d0;
+ FPRegister lhs_d = d1;
+ EmitSmiNonsmiComparison(masm, lhs, rhs, lhs_d, rhs_d, x10, &slow, strict());
+
+ __ Bind(&both_loaded_as_doubles);
+ // The arguments have been converted to doubles and stored in rhs_d and
+ // lhs_d.
+ Label nan;
+ __ Fcmp(lhs_d, rhs_d);
+ __ B(vs, &nan); // Overflow flag set if either is NaN.
+ STATIC_ASSERT((LESS == -1) && (EQUAL == 0) && (GREATER == 1));
+ __ Cset(result, gt); // gt => 1, otherwise (lt, eq) => 0 (EQUAL).
+ __ Csinv(result, result, xzr, ge); // lt => -1, gt => 1, eq => 0.
+ __ Ret();
+
+ __ Bind(&nan);
+ // Left and/or right is a NaN. Load the result register with whatever makes
+ // the comparison fail, since comparisons with NaN always fail (except ne,
+ // which is filtered out at a higher level.)
+ ASSERT(cond != ne);
+ if ((cond == lt) || (cond == le)) {
+ __ Mov(result, GREATER);
+ } else {
+ __ Mov(result, LESS);
+ }
+ __ Ret();
+
+ __ Bind(&not_smis);
+ // At this point we know we are dealing with two different objects, and
+ // neither of them is a smi. The objects are in rhs_ and lhs_.
+
+ // Load the maps and types of the objects.
+ Register rhs_map = x10;
+ Register rhs_type = x11;
+ Register lhs_map = x12;
+ Register lhs_type = x13;
+ __ Ldr(rhs_map, FieldMemOperand(rhs, HeapObject::kMapOffset));
+ __ Ldr(lhs_map, FieldMemOperand(lhs, HeapObject::kMapOffset));
+ __ Ldrb(rhs_type, FieldMemOperand(rhs_map, Map::kInstanceTypeOffset));
+ __ Ldrb(lhs_type, FieldMemOperand(lhs_map, Map::kInstanceTypeOffset));
+
+ if (strict()) {
+ // This emits a non-equal return sequence for some object types, or falls
+ // through if it was not lucky.
+ EmitStrictTwoHeapObjectCompare(masm, lhs, rhs, lhs_type, rhs_type, x14);
+ }
+
+ Label check_for_internalized_strings;
+ Label flat_string_check;
+ // Check for heap number comparison. Branch to earlier double comparison code
+ // if they are heap numbers, otherwise, branch to internalized string check.
+ __ Cmp(rhs_type, HEAP_NUMBER_TYPE);
+ __ B(ne, &check_for_internalized_strings);
+ __ Cmp(lhs_map, rhs_map);
+
+ // If maps aren't equal, lhs_ and rhs_ are not heap numbers. Branch to flat
+ // string check.
+ __ B(ne, &flat_string_check);
+
+ // Both lhs_ and rhs_ are heap numbers. Load them and branch to the double
+ // comparison code.
+ __ Ldr(lhs_d, FieldMemOperand(lhs, HeapNumber::kValueOffset));
+ __ Ldr(rhs_d, FieldMemOperand(rhs, HeapNumber::kValueOffset));
+ __ B(&both_loaded_as_doubles);
+
+ __ Bind(&check_for_internalized_strings);
+ // In the strict case, the EmitStrictTwoHeapObjectCompare already took care
+ // of internalized strings.
+ if ((cond == eq) && !strict()) {
+ // Returns an answer for two internalized strings or two detectable objects.
+ // Otherwise branches to the string case or not both strings case.
+ EmitCheckForInternalizedStringsOrObjects(masm, lhs, rhs, lhs_map, rhs_map,
+ lhs_type, rhs_type,
+ &flat_string_check, &slow);
+ }
+
+ // Check for both being sequential ASCII strings, and inline if that is the
+ // case.
+ __ Bind(&flat_string_check);
+ __ JumpIfBothInstanceTypesAreNotSequentialAscii(lhs_type, rhs_type, x14,
+ x15, &slow);
+
+ __ IncrementCounter(isolate()->counters()->string_compare_native(), 1, x10,
+ x11);
+ if (cond == eq) {
+ StringCompareStub::GenerateFlatAsciiStringEquals(masm, lhs, rhs,
+ x10, x11, x12);
+ } else {
+ StringCompareStub::GenerateCompareFlatAsciiStrings(masm, lhs, rhs,
+ x10, x11, x12, x13);
+ }
+
+ // Never fall through to here.
+ if (FLAG_debug_code) {
+ __ Unreachable();
+ }
+
+ __ Bind(&slow);
+
+ __ Push(lhs, rhs);
+ // Figure out which native to call and setup the arguments.
+ Builtins::JavaScript native;
+ if (cond == eq) {
+ native = strict() ? Builtins::STRICT_EQUALS : Builtins::EQUALS;
+ } else {
+ native = Builtins::COMPARE;
+ int ncr; // NaN compare result
+ if ((cond == lt) || (cond == le)) {
+ ncr = GREATER;
+ } else {
+ ASSERT((cond == gt) || (cond == ge)); // remaining cases
+ ncr = LESS;
+ }
+ __ Mov(x10, Smi::FromInt(ncr));
+ __ Push(x10);
+ }
+
+ // Call the native; it returns -1 (less), 0 (equal), or 1 (greater)
+ // tagged as a small integer.
+ __ InvokeBuiltin(native, JUMP_FUNCTION);
+
+ __ Bind(&miss);
+ GenerateMiss(masm);
+}
+
+
+void StoreBufferOverflowStub::Generate(MacroAssembler* masm) {
+ CPURegList saved_regs = kCallerSaved;
+ CPURegList saved_fp_regs = kCallerSavedFP;
+
+ // We don't allow a GC during a store buffer overflow so there is no need to
+ // store the registers in any particular way, but we do have to store and
+ // restore them.
+
+ // We don't care if MacroAssembler scratch registers are corrupted.
+ saved_regs.Remove(*(masm->TmpList()));
+ saved_fp_regs.Remove(*(masm->FPTmpList()));
+
+ __ PushCPURegList(saved_regs);
+ if (save_doubles_ == kSaveFPRegs) {
+ __ PushCPURegList(saved_fp_regs);
+ }
+
+ AllowExternalCallThatCantCauseGC scope(masm);
+ __ Mov(x0, ExternalReference::isolate_address(isolate()));
+ __ CallCFunction(
+ ExternalReference::store_buffer_overflow_function(isolate()), 1, 0);
+
+ if (save_doubles_ == kSaveFPRegs) {
+ __ PopCPURegList(saved_fp_regs);
+ }
+ __ PopCPURegList(saved_regs);
+ __ Ret();
+}
+
+
+void StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime(
+ Isolate* isolate) {
+ StoreBufferOverflowStub stub1(isolate, kDontSaveFPRegs);
+ stub1.GetCode();
+ StoreBufferOverflowStub stub2(isolate, kSaveFPRegs);
+ stub2.GetCode();
+}
+
+
+void StoreRegistersStateStub::Generate(MacroAssembler* masm) {
+ MacroAssembler::NoUseRealAbortsScope no_use_real_aborts(masm);
+ UseScratchRegisterScope temps(masm);
+ Register saved_lr = temps.UnsafeAcquire(to_be_pushed_lr());
+ Register return_address = temps.AcquireX();
+ __ Mov(return_address, lr);
+ // Restore lr with the value it had before the call to this stub (the value
+ // which must be pushed).
+ __ Mov(lr, saved_lr);
+ if (save_doubles_ == kSaveFPRegs) {
+ __ PushSafepointRegistersAndDoubles();
+ } else {
+ __ PushSafepointRegisters();
+ }
+ __ Ret(return_address);
+}
+
+
+void RestoreRegistersStateStub::Generate(MacroAssembler* masm) {
+ MacroAssembler::NoUseRealAbortsScope no_use_real_aborts(masm);
+ UseScratchRegisterScope temps(masm);
+ Register return_address = temps.AcquireX();
+ // Preserve the return address (lr will be clobbered by the pop).
+ __ Mov(return_address, lr);
+ if (save_doubles_ == kSaveFPRegs) {
+ __ PopSafepointRegistersAndDoubles();
+ } else {
+ __ PopSafepointRegisters();
+ }
+ __ Ret(return_address);
+}
+
+
+void MathPowStub::Generate(MacroAssembler* masm) {
+ // Stack on entry:
+ // jssp[0]: Exponent (as a tagged value).
+ // jssp[1]: Base (as a tagged value).
+ //
+ // The (tagged) result will be returned in x0, as a heap number.
+
+ Register result_tagged = x0;
+ Register base_tagged = x10;
+ Register exponent_tagged = x11;
+ Register exponent_integer = x12;
+ Register scratch1 = x14;
+ Register scratch0 = x15;
+ Register saved_lr = x19;
+ FPRegister result_double = d0;
+ FPRegister base_double = d0;
+ FPRegister exponent_double = d1;
+ FPRegister base_double_copy = d2;
+ FPRegister scratch1_double = d6;
+ FPRegister scratch0_double = d7;
+
+ // A fast-path for integer exponents.
+ Label exponent_is_smi, exponent_is_integer;
+ // Bail out to runtime.
+ Label call_runtime;
+ // Allocate a heap number for the result, and return it.
+ Label done;
+
+ // Unpack the inputs.
+ if (exponent_type_ == ON_STACK) {
+ Label base_is_smi;
+ Label unpack_exponent;
+
+ __ Pop(exponent_tagged, base_tagged);
+
+ __ JumpIfSmi(base_tagged, &base_is_smi);
+ __ JumpIfNotHeapNumber(base_tagged, &call_runtime);
+ // base_tagged is a heap number, so load its double value.
+ __ Ldr(base_double, FieldMemOperand(base_tagged, HeapNumber::kValueOffset));
+ __ B(&unpack_exponent);
+ __ Bind(&base_is_smi);
+ // base_tagged is a SMI, so untag it and convert it to a double.
+ __ SmiUntagToDouble(base_double, base_tagged);
+
+ __ Bind(&unpack_exponent);
+ // x10 base_tagged The tagged base (input).
+ // x11 exponent_tagged The tagged exponent (input).
+ // d1 base_double The base as a double.
+ __ JumpIfSmi(exponent_tagged, &exponent_is_smi);
+ __ JumpIfNotHeapNumber(exponent_tagged, &call_runtime);
+ // exponent_tagged is a heap number, so load its double value.
+ __ Ldr(exponent_double,
+ FieldMemOperand(exponent_tagged, HeapNumber::kValueOffset));
+ } else if (exponent_type_ == TAGGED) {
+ __ JumpIfSmi(exponent_tagged, &exponent_is_smi);
+ __ Ldr(exponent_double,
+ FieldMemOperand(exponent_tagged, HeapNumber::kValueOffset));
+ }
+
+ // Handle double (heap number) exponents.
+ if (exponent_type_ != INTEGER) {
+ // Detect integer exponents stored as doubles and handle those in the
+ // integer fast-path.
+ __ TryRepresentDoubleAsInt64(exponent_integer, exponent_double,
+ scratch0_double, &exponent_is_integer);
+
+ if (exponent_type_ == ON_STACK) {
+ FPRegister half_double = d3;
+ FPRegister minus_half_double = d4;
+ // Detect square root case. Crankshaft detects constant +/-0.5 at compile
+ // time and uses DoMathPowHalf instead. We then skip this check for
+ // non-constant cases of +/-0.5 as these hardly occur.
+
+ __ Fmov(minus_half_double, -0.5);
+ __ Fmov(half_double, 0.5);
+ __ Fcmp(minus_half_double, exponent_double);
+ __ Fccmp(half_double, exponent_double, NZFlag, ne);
+ // Condition flags at this point:
+ // 0.5; nZCv // Identified by eq && pl
+ // -0.5: NZcv // Identified by eq && mi
+ // other: ?z?? // Identified by ne
+ __ B(ne, &call_runtime);
+
+ // The exponent is 0.5 or -0.5.
+
+ // Given that exponent is known to be either 0.5 or -0.5, the following
+ // special cases could apply (according to ECMA-262 15.8.2.13):
+ //
+ // base.isNaN(): The result is NaN.
+ // (base == +INFINITY) || (base == -INFINITY)
+ // exponent == 0.5: The result is +INFINITY.
+ // exponent == -0.5: The result is +0.
+ // (base == +0) || (base == -0)
+ // exponent == 0.5: The result is +0.
+ // exponent == -0.5: The result is +INFINITY.
+ // (base < 0) && base.isFinite(): The result is NaN.
+ //
+ // Fsqrt (and Fdiv for the -0.5 case) can handle all of those except
+ // where base is -INFINITY or -0.
+
+ // Add +0 to base. This has no effect other than turning -0 into +0.
+ __ Fadd(base_double, base_double, fp_zero);
+ // The operation -0+0 results in +0 in all cases except where the
+ // FPCR rounding mode is 'round towards minus infinity' (RM). The
+ // ARM64 simulator does not currently simulate FPCR (where the rounding
+ // mode is set), so test the operation with some debug code.
+ if (masm->emit_debug_code()) {
+ UseScratchRegisterScope temps(masm);
+ Register temp = temps.AcquireX();
+ __ Fneg(scratch0_double, fp_zero);
+ // Verify that we correctly generated +0.0 and -0.0.
+ // bits(+0.0) = 0x0000000000000000
+ // bits(-0.0) = 0x8000000000000000
+ __ Fmov(temp, fp_zero);
+ __ CheckRegisterIsClear(temp, kCouldNotGenerateZero);
+ __ Fmov(temp, scratch0_double);
+ __ Eor(temp, temp, kDSignMask);
+ __ CheckRegisterIsClear(temp, kCouldNotGenerateNegativeZero);
+ // Check that -0.0 + 0.0 == +0.0.
+ __ Fadd(scratch0_double, scratch0_double, fp_zero);
+ __ Fmov(temp, scratch0_double);
+ __ CheckRegisterIsClear(temp, kExpectedPositiveZero);
+ }
+
+ // If base is -INFINITY, make it +INFINITY.
+ // * Calculate base - base: All infinities will become NaNs since both
+ // -INFINITY+INFINITY and +INFINITY-INFINITY are NaN in ARM64.
+ // * If the result is NaN, calculate abs(base).
+ __ Fsub(scratch0_double, base_double, base_double);
+ __ Fcmp(scratch0_double, 0.0);
+ __ Fabs(scratch1_double, base_double);
+ __ Fcsel(base_double, scratch1_double, base_double, vs);
+
+ // Calculate the square root of base.
+ __ Fsqrt(result_double, base_double);
+ __ Fcmp(exponent_double, 0.0);
+ __ B(ge, &done); // Finish now for exponents of 0.5.
+ // Find the inverse for exponents of -0.5.
+ __ Fmov(scratch0_double, 1.0);
+ __ Fdiv(result_double, scratch0_double, result_double);
+ __ B(&done);
+ }
+
+ {
+ AllowExternalCallThatCantCauseGC scope(masm);
+ __ Mov(saved_lr, lr);
+ __ CallCFunction(
+ ExternalReference::power_double_double_function(isolate()),
+ 0, 2);
+ __ Mov(lr, saved_lr);
+ __ B(&done);
+ }
+
+ // Handle SMI exponents.
+ __ Bind(&exponent_is_smi);
+ // x10 base_tagged The tagged base (input).
+ // x11 exponent_tagged The tagged exponent (input).
+ // d1 base_double The base as a double.
+ __ SmiUntag(exponent_integer, exponent_tagged);
+ }
+
+ __ Bind(&exponent_is_integer);
+ // x10 base_tagged The tagged base (input).
+ // x11 exponent_tagged The tagged exponent (input).
+ // x12 exponent_integer The exponent as an integer.
+ // d1 base_double The base as a double.
+
+ // Find abs(exponent). For negative exponents, we can find the inverse later.
+ Register exponent_abs = x13;
+ __ Cmp(exponent_integer, 0);
+ __ Cneg(exponent_abs, exponent_integer, mi);
+ // x13 exponent_abs The value of abs(exponent_integer).
+
+ // Repeatedly multiply to calculate the power.
+ // result = 1.0;
+ // For each bit n (exponent_integer{n}) {
+ // if (exponent_integer{n}) {
+ // result *= base;
+ // }
+ // base *= base;
+ // if (remaining bits in exponent_integer are all zero) {
+ // break;
+ // }
+ // }
+ Label power_loop, power_loop_entry, power_loop_exit;
+ __ Fmov(scratch1_double, base_double);
+ __ Fmov(base_double_copy, base_double);
+ __ Fmov(result_double, 1.0);
+ __ B(&power_loop_entry);
+
+ __ Bind(&power_loop);
+ __ Fmul(scratch1_double, scratch1_double, scratch1_double);
+ __ Lsr(exponent_abs, exponent_abs, 1);
+ __ Cbz(exponent_abs, &power_loop_exit);
+
+ __ Bind(&power_loop_entry);
+ __ Tbz(exponent_abs, 0, &power_loop);
+ __ Fmul(result_double, result_double, scratch1_double);
+ __ B(&power_loop);
+
+ __ Bind(&power_loop_exit);
+
+ // If the exponent was positive, result_double holds the result.
+ __ Tbz(exponent_integer, kXSignBit, &done);
+
+ // The exponent was negative, so find the inverse.
+ __ Fmov(scratch0_double, 1.0);
+ __ Fdiv(result_double, scratch0_double, result_double);
+ // ECMA-262 only requires Math.pow to return an 'implementation-dependent
+ // approximation' of base^exponent. However, mjsunit/math-pow uses Math.pow
+ // to calculate the subnormal value 2^-1074. This method of calculating
+ // negative powers doesn't work because 2^1074 overflows to infinity. To
+ // catch this corner-case, we bail out if the result was 0. (This can only
+ // occur if the divisor is infinity or the base is zero.)
+ __ Fcmp(result_double, 0.0);
+ __ B(&done, ne);
+
+ if (exponent_type_ == ON_STACK) {
+ // Bail out to runtime code.
+ __ Bind(&call_runtime);
+ // Put the arguments back on the stack.
+ __ Push(base_tagged, exponent_tagged);
+ __ TailCallRuntime(Runtime::kHiddenMathPow, 2, 1);
+
+ // Return.
+ __ Bind(&done);
+ __ AllocateHeapNumber(result_tagged, &call_runtime, scratch0, scratch1,
+ result_double);
+ ASSERT(result_tagged.is(x0));
+ __ IncrementCounter(
+ isolate()->counters()->math_pow(), 1, scratch0, scratch1);
+ __ Ret();
+ } else {
+ AllowExternalCallThatCantCauseGC scope(masm);
+ __ Mov(saved_lr, lr);
+ __ Fmov(base_double, base_double_copy);
+ __ Scvtf(exponent_double, exponent_integer);
+ __ CallCFunction(
+ ExternalReference::power_double_double_function(isolate()),
+ 0, 2);
+ __ Mov(lr, saved_lr);
+ __ Bind(&done);
+ __ IncrementCounter(
+ isolate()->counters()->math_pow(), 1, scratch0, scratch1);
+ __ Ret();
+ }
+}
+
+
+void CodeStub::GenerateStubsAheadOfTime(Isolate* isolate) {
+ // It is important that the following stubs are generated in this order
+ // because pregenerated stubs can only call other pregenerated stubs.
+ // RecordWriteStub uses StoreBufferOverflowStub, which in turn uses
+ // CEntryStub.
+ CEntryStub::GenerateAheadOfTime(isolate);
+ StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime(isolate);
+ StubFailureTrampolineStub::GenerateAheadOfTime(isolate);
+ ArrayConstructorStubBase::GenerateStubsAheadOfTime(isolate);
+ CreateAllocationSiteStub::GenerateAheadOfTime(isolate);
+ BinaryOpICStub::GenerateAheadOfTime(isolate);
+ StoreRegistersStateStub::GenerateAheadOfTime(isolate);
+ RestoreRegistersStateStub::GenerateAheadOfTime(isolate);
+ BinaryOpICWithAllocationSiteStub::GenerateAheadOfTime(isolate);
+}
+
+
+void StoreRegistersStateStub::GenerateAheadOfTime(Isolate* isolate) {
+ StoreRegistersStateStub stub1(isolate, kDontSaveFPRegs);
+ stub1.GetCode();
+ StoreRegistersStateStub stub2(isolate, kSaveFPRegs);
+ stub2.GetCode();
+}
+
+
+void RestoreRegistersStateStub::GenerateAheadOfTime(Isolate* isolate) {
+ RestoreRegistersStateStub stub1(isolate, kDontSaveFPRegs);
+ stub1.GetCode();
+ RestoreRegistersStateStub stub2(isolate, kSaveFPRegs);
+ stub2.GetCode();
+}
+
+
+void CodeStub::GenerateFPStubs(Isolate* isolate) {
+ // Floating-point code doesn't get special handling in ARM64, so there's
+ // nothing to do here.
+ USE(isolate);
+}
+
+
+bool CEntryStub::NeedsImmovableCode() {
+ // CEntryStub stores the return address on the stack before calling into
+ // C++ code. In some cases, the VM accesses this address, but it is not used
+ // when the C++ code returns to the stub because LR holds the return address
+ // in AAPCS64. If the stub is moved (perhaps during a GC), we could end up
+ // returning to dead code.
+ // TODO(jbramley): Whilst this is the only analysis that makes sense, I can't
+ // find any comment to confirm this, and I don't hit any crashes whatever
+ // this function returns. The anaylsis should be properly confirmed.
+ return true;
+}
+
+
+void CEntryStub::GenerateAheadOfTime(Isolate* isolate) {
+ CEntryStub stub(isolate, 1, kDontSaveFPRegs);
+ stub.GetCode();
+ CEntryStub stub_fp(isolate, 1, kSaveFPRegs);
+ stub_fp.GetCode();
+}
+
+
+void CEntryStub::Generate(MacroAssembler* masm) {
+ // The Abort mechanism relies on CallRuntime, which in turn relies on
+ // CEntryStub, so until this stub has been generated, we have to use a
+ // fall-back Abort mechanism.
+ //
+ // Note that this stub must be generated before any use of Abort.
+ MacroAssembler::NoUseRealAbortsScope no_use_real_aborts(masm);
+
+ ASM_LOCATION("CEntryStub::Generate entry");
+ ProfileEntryHookStub::MaybeCallEntryHook(masm);
+
+ // Register parameters:
+ // x0: argc (including receiver, untagged)
+ // x1: target
+ //
+ // The stack on entry holds the arguments and the receiver, with the receiver
+ // at the highest address:
+ //
+ // jssp]argc-1]: receiver
+ // jssp[argc-2]: arg[argc-2]
+ // ... ...
+ // jssp[1]: arg[1]
+ // jssp[0]: arg[0]
+ //
+ // The arguments are in reverse order, so that arg[argc-2] is actually the
+ // first argument to the target function and arg[0] is the last.
+ ASSERT(jssp.Is(__ StackPointer()));
+ const Register& argc_input = x0;
+ const Register& target_input = x1;
+
+ // Calculate argv, argc and the target address, and store them in
+ // callee-saved registers so we can retry the call without having to reload
+ // these arguments.
+ // TODO(jbramley): If the first call attempt succeeds in the common case (as
+ // it should), then we might be better off putting these parameters directly
+ // into their argument registers, rather than using callee-saved registers and
+ // preserving them on the stack.
+ const Register& argv = x21;
+ const Register& argc = x22;
+ const Register& target = x23;
+
+ // Derive argv from the stack pointer so that it points to the first argument
+ // (arg[argc-2]), or just below the receiver in case there are no arguments.
+ // - Adjust for the arg[] array.
+ Register temp_argv = x11;
+ __ Add(temp_argv, jssp, Operand(x0, LSL, kPointerSizeLog2));
+ // - Adjust for the receiver.
+ __ Sub(temp_argv, temp_argv, 1 * kPointerSize);
+
+ // Enter the exit frame. Reserve three slots to preserve x21-x23 callee-saved
+ // registers.
+ FrameScope scope(masm, StackFrame::MANUAL);
+ __ EnterExitFrame(save_doubles_, x10, 3);
+ ASSERT(csp.Is(__ StackPointer()));
+
+ // Poke callee-saved registers into reserved space.
+ __ Poke(argv, 1 * kPointerSize);
+ __ Poke(argc, 2 * kPointerSize);
+ __ Poke(target, 3 * kPointerSize);
+
+ // We normally only keep tagged values in callee-saved registers, as they
+ // could be pushed onto the stack by called stubs and functions, and on the
+ // stack they can confuse the GC. However, we're only calling C functions
+ // which can push arbitrary data onto the stack anyway, and so the GC won't
+ // examine that part of the stack.
+ __ Mov(argc, argc_input);
+ __ Mov(target, target_input);
+ __ Mov(argv, temp_argv);
+
+ // x21 : argv
+ // x22 : argc
+ // x23 : call target
+ //
+ // The stack (on entry) holds the arguments and the receiver, with the
+ // receiver at the highest address:
+ //
+ // argv[8]: receiver
+ // argv -> argv[0]: arg[argc-2]
+ // ... ...
+ // argv[...]: arg[1]
+ // argv[...]: arg[0]
+ //
+ // Immediately below (after) this is the exit frame, as constructed by
+ // EnterExitFrame:
+ // fp[8]: CallerPC (lr)
+ // fp -> fp[0]: CallerFP (old fp)
+ // fp[-8]: Space reserved for SPOffset.
+ // fp[-16]: CodeObject()
+ // csp[...]: Saved doubles, if saved_doubles is true.
+ // csp[32]: Alignment padding, if necessary.
+ // csp[24]: Preserved x23 (used for target).
+ // csp[16]: Preserved x22 (used for argc).
+ // csp[8]: Preserved x21 (used for argv).
+ // csp -> csp[0]: Space reserved for the return address.
+ //
+ // After a successful call, the exit frame, preserved registers (x21-x23) and
+ // the arguments (including the receiver) are dropped or popped as
+ // appropriate. The stub then returns.
+ //
+ // After an unsuccessful call, the exit frame and suchlike are left
+ // untouched, and the stub either throws an exception by jumping to one of
+ // the exception_returned label.
+
+ ASSERT(csp.Is(__ StackPointer()));
+
+ // Prepare AAPCS64 arguments to pass to the builtin.
+ __ Mov(x0, argc);
+ __ Mov(x1, argv);
+ __ Mov(x2, ExternalReference::isolate_address(isolate()));
+
+ Label return_location;
+ __ Adr(x12, &return_location);
+ __ Poke(x12, 0);
+
+ if (__ emit_debug_code()) {
+ // Verify that the slot below fp[kSPOffset]-8 points to the return location
+ // (currently in x12).
+ UseScratchRegisterScope temps(masm);
+ Register temp = temps.AcquireX();
+ __ Ldr(temp, MemOperand(fp, ExitFrameConstants::kSPOffset));
+ __ Ldr(temp, MemOperand(temp, -static_cast<int64_t>(kXRegSize)));
+ __ Cmp(temp, x12);
+ __ Check(eq, kReturnAddressNotFoundInFrame);
+ }
+
+ // Call the builtin.
+ __ Blr(target);
+ __ Bind(&return_location);
+
+ // x0 result The return code from the call.
+ // x21 argv
+ // x22 argc
+ // x23 target
+ const Register& result = x0;
+
+ // Check result for exception sentinel.
+ Label exception_returned;
+ __ CompareRoot(result, Heap::kExceptionRootIndex);
+ __ B(eq, &exception_returned);
+
+ // The call succeeded, so unwind the stack and return.
+
+ // Restore callee-saved registers x21-x23.
+ __ Mov(x11, argc);
+
+ __ Peek(argv, 1 * kPointerSize);
+ __ Peek(argc, 2 * kPointerSize);
+ __ Peek(target, 3 * kPointerSize);
+
+ __ LeaveExitFrame(save_doubles_, x10, true);
+ ASSERT(jssp.Is(__ StackPointer()));
+ // Pop or drop the remaining stack slots and return from the stub.
+ // jssp[24]: Arguments array (of size argc), including receiver.
+ // jssp[16]: Preserved x23 (used for target).
+ // jssp[8]: Preserved x22 (used for argc).
+ // jssp[0]: Preserved x21 (used for argv).
+ __ Drop(x11);
+ __ AssertFPCRState();
+ __ Ret();
+
+ // The stack pointer is still csp if we aren't returning, and the frame
+ // hasn't changed (except for the return address).
+ __ SetStackPointer(csp);
+
+ // Handling of exception.
+ __ Bind(&exception_returned);
+
+ // Retrieve the pending exception.
+ ExternalReference pending_exception_address(
+ Isolate::kPendingExceptionAddress, isolate());
+ const Register& exception = result;
+ const Register& exception_address = x11;
+ __ Mov(exception_address, Operand(pending_exception_address));
+ __ Ldr(exception, MemOperand(exception_address));
+
+ // Clear the pending exception.
+ __ Mov(x10, Operand(isolate()->factory()->the_hole_value()));
+ __ Str(x10, MemOperand(exception_address));
+
+ // x0 exception The exception descriptor.
+ // x21 argv
+ // x22 argc
+ // x23 target
+
+ // Special handling of termination exceptions, which are uncatchable by
+ // JavaScript code.
+ Label throw_termination_exception;
+ __ Cmp(exception, Operand(isolate()->factory()->termination_exception()));
+ __ B(eq, &throw_termination_exception);
+
+ // We didn't execute a return case, so the stack frame hasn't been updated
+ // (except for the return address slot). However, we don't need to initialize
+ // jssp because the throw method will immediately overwrite it when it
+ // unwinds the stack.
+ __ SetStackPointer(jssp);
+
+ ASM_LOCATION("Throw normal");
+ __ Mov(argv, 0);
+ __ Mov(argc, 0);
+ __ Mov(target, 0);
+ __ Throw(x0, x10, x11, x12, x13);
+
+ __ Bind(&throw_termination_exception);
+ ASM_LOCATION("Throw termination");
+ __ Mov(argv, 0);
+ __ Mov(argc, 0);
+ __ Mov(target, 0);
+ __ ThrowUncatchable(x0, x10, x11, x12, x13);
+}
+
+
+// This is the entry point from C++. 5 arguments are provided in x0-x4.
+// See use of the CALL_GENERATED_CODE macro for example in src/execution.cc.
+// Input:
+// x0: code entry.
+// x1: function.
+// x2: receiver.
+// x3: argc.
+// x4: argv.
+// Output:
+// x0: result.
+void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
+ ASSERT(jssp.Is(__ StackPointer()));
+ Register code_entry = x0;
+
+ // Enable instruction instrumentation. This only works on the simulator, and
+ // will have no effect on the model or real hardware.
+ __ EnableInstrumentation();
+
+ Label invoke, handler_entry, exit;
+
+ // Push callee-saved registers and synchronize the system stack pointer (csp)
+ // and the JavaScript stack pointer (jssp).
+ //
+ // We must not write to jssp until after the PushCalleeSavedRegisters()
+ // call, since jssp is itself a callee-saved register.
+ __ SetStackPointer(csp);
+ __ PushCalleeSavedRegisters();
+ __ Mov(jssp, csp);
+ __ SetStackPointer(jssp);
+
+ // Configure the FPCR. We don't restore it, so this is technically not allowed
+ // according to AAPCS64. However, we only set default-NaN mode and this will
+ // be harmless for most C code. Also, it works for ARM.
+ __ ConfigureFPCR();
+
+ ProfileEntryHookStub::MaybeCallEntryHook(masm);
+
+ // Set up the reserved register for 0.0.
+ __ Fmov(fp_zero, 0.0);
+
+ // Build an entry frame (see layout below).
+ int marker = is_construct ? StackFrame::ENTRY_CONSTRUCT : StackFrame::ENTRY;
+ int64_t bad_frame_pointer = -1L; // Bad frame pointer to fail if it is used.
+ __ Mov(x13, bad_frame_pointer);
+ __ Mov(x12, Smi::FromInt(marker));
+ __ Mov(x11, ExternalReference(Isolate::kCEntryFPAddress, isolate()));
+ __ Ldr(x10, MemOperand(x11));
+
+ __ Push(x13, xzr, x12, x10);
+ // Set up fp.
+ __ Sub(fp, jssp, EntryFrameConstants::kCallerFPOffset);
+
+ // Push the JS entry frame marker. Also set js_entry_sp if this is the
+ // outermost JS call.
+ Label non_outermost_js, done;
+ ExternalReference js_entry_sp(Isolate::kJSEntrySPAddress, isolate());
+ __ Mov(x10, ExternalReference(js_entry_sp));
+ __ Ldr(x11, MemOperand(x10));
+ __ Cbnz(x11, &non_outermost_js);
+ __ Str(fp, MemOperand(x10));
+ __ Mov(x12, Smi::FromInt(StackFrame::OUTERMOST_JSENTRY_FRAME));
+ __ Push(x12);
+ __ B(&done);
+ __ Bind(&non_outermost_js);
+ // We spare one instruction by pushing xzr since the marker is 0.
+ ASSERT(Smi::FromInt(StackFrame::INNER_JSENTRY_FRAME) == NULL);
+ __ Push(xzr);
+ __ Bind(&done);
+
+ // The frame set up looks like this:
+ // jssp[0] : JS entry frame marker.
+ // jssp[1] : C entry FP.
+ // jssp[2] : stack frame marker.
+ // jssp[3] : stack frmae marker.
+ // jssp[4] : bad frame pointer 0xfff...ff <- fp points here.
+
+
+ // Jump to a faked try block that does the invoke, with a faked catch
+ // block that sets the pending exception.
+ __ B(&invoke);
+
+ // Prevent the constant pool from being emitted between the record of the
+ // handler_entry position and the first instruction of the sequence here.
+ // There is no risk because Assembler::Emit() emits the instruction before
+ // checking for constant pool emission, but we do not want to depend on
+ // that.
+ {
+ Assembler::BlockPoolsScope block_pools(masm);
+ __ bind(&handler_entry);
+ handler_offset_ = handler_entry.pos();
+ // Caught exception: Store result (exception) in the pending exception
+ // field in the JSEnv and return a failure sentinel. Coming in here the
+ // fp will be invalid because the PushTryHandler below sets it to 0 to
+ // signal the existence of the JSEntry frame.
+ __ Mov(x10, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
+ isolate())));
+ }
+ __ Str(code_entry, MemOperand(x10));
+ __ LoadRoot(x0, Heap::kExceptionRootIndex);
+ __ B(&exit);
+
+ // Invoke: Link this frame into the handler chain. There's only one
+ // handler block in this code object, so its index is 0.
+ __ Bind(&invoke);
+ __ PushTryHandler(StackHandler::JS_ENTRY, 0);
+ // If an exception not caught by another handler occurs, this handler
+ // returns control to the code after the B(&invoke) above, which
+ // restores all callee-saved registers (including cp and fp) to their
+ // saved values before returning a failure to C.
+
+ // Clear any pending exceptions.
+ __ Mov(x10, Operand(isolate()->factory()->the_hole_value()));
+ __ Mov(x11, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
+ isolate())));
+ __ Str(x10, MemOperand(x11));
+
+ // Invoke the function by calling through the JS entry trampoline builtin.
+ // Notice that we cannot store a reference to the trampoline code directly in
+ // this stub, because runtime stubs are not traversed when doing GC.
+
+ // Expected registers by Builtins::JSEntryTrampoline
+ // x0: code entry.
+ // x1: function.
+ // x2: receiver.
+ // x3: argc.
+ // x4: argv.
+ ExternalReference entry(is_construct ? Builtins::kJSConstructEntryTrampoline
+ : Builtins::kJSEntryTrampoline,
+ isolate());
+ __ Mov(x10, entry);
+
+ // Call the JSEntryTrampoline.
+ __ Ldr(x11, MemOperand(x10)); // Dereference the address.
+ __ Add(x12, x11, Code::kHeaderSize - kHeapObjectTag);
+ __ Blr(x12);
+
+ // Unlink this frame from the handler chain.
+ __ PopTryHandler();
+
+
+ __ Bind(&exit);
+ // x0 holds the result.
+ // The stack pointer points to the top of the entry frame pushed on entry from
+ // C++ (at the beginning of this stub):
+ // jssp[0] : JS entry frame marker.
+ // jssp[1] : C entry FP.
+ // jssp[2] : stack frame marker.
+ // jssp[3] : stack frmae marker.
+ // jssp[4] : bad frame pointer 0xfff...ff <- fp points here.
+
+ // Check if the current stack frame is marked as the outermost JS frame.
+ Label non_outermost_js_2;
+ __ Pop(x10);
+ __ Cmp(x10, Smi::FromInt(StackFrame::OUTERMOST_JSENTRY_FRAME));
+ __ B(ne, &non_outermost_js_2);
+ __ Mov(x11, ExternalReference(js_entry_sp));
+ __ Str(xzr, MemOperand(x11));
+ __ Bind(&non_outermost_js_2);
+
+ // Restore the top frame descriptors from the stack.
+ __ Pop(x10);
+ __ Mov(x11, ExternalReference(Isolate::kCEntryFPAddress, isolate()));
+ __ Str(x10, MemOperand(x11));
+
+ // Reset the stack to the callee saved registers.
+ __ Drop(-EntryFrameConstants::kCallerFPOffset, kByteSizeInBytes);
+ // Restore the callee-saved registers and return.
+ ASSERT(jssp.Is(__ StackPointer()));
+ __ Mov(csp, jssp);
+ __ SetStackPointer(csp);
+ __ PopCalleeSavedRegisters();
+ // After this point, we must not modify jssp because it is a callee-saved
+ // register which we have just restored.
+ __ Ret();
+}
+
+
+void FunctionPrototypeStub::Generate(MacroAssembler* masm) {
+ Label miss;
+ Register receiver;
+ if (kind() == Code::KEYED_LOAD_IC) {
+ // ----------- S t a t e -------------
+ // -- lr : return address
+ // -- x1 : receiver
+ // -- x0 : key
+ // -----------------------------------
+ Register key = x0;
+ receiver = x1;
+ __ Cmp(key, Operand(isolate()->factory()->prototype_string()));
+ __ B(ne, &miss);
+ } else {
+ ASSERT(kind() == Code::LOAD_IC);
+ // ----------- S t a t e -------------
+ // -- lr : return address
+ // -- x2 : name
+ // -- x0 : receiver
+ // -- sp[0] : receiver
+ // -----------------------------------
+ receiver = x0;
+ }
+
+ StubCompiler::GenerateLoadFunctionPrototype(masm, receiver, x10, x11, &miss);
+
+ __ Bind(&miss);
+ StubCompiler::TailCallBuiltin(masm,
+ BaseLoadStoreStubCompiler::MissBuiltin(kind()));
+}
+
+
+void InstanceofStub::Generate(MacroAssembler* masm) {
+ // Stack on entry:
+ // jssp[0]: function.
+ // jssp[8]: object.
+ //
+ // Returns result in x0. Zero indicates instanceof, smi 1 indicates not
+ // instanceof.
+
+ Register result = x0;
+ Register function = right();
+ Register object = left();
+ Register scratch1 = x6;
+ Register scratch2 = x7;
+ Register res_true = x8;
+ Register res_false = x9;
+ // Only used if there was an inline map check site. (See
+ // LCodeGen::DoInstanceOfKnownGlobal().)
+ Register map_check_site = x4;
+ // Delta for the instructions generated between the inline map check and the
+ // instruction setting the result.
+ const int32_t kDeltaToLoadBoolResult = 4 * kInstructionSize;
+
+ Label not_js_object, slow;
+
+ if (!HasArgsInRegisters()) {
+ __ Pop(function, object);
+ }
+
+ if (ReturnTrueFalseObject()) {
+ __ LoadTrueFalseRoots(res_true, res_false);
+ } else {
+ // This is counter-intuitive, but correct.
+ __ Mov(res_true, Smi::FromInt(0));
+ __ Mov(res_false, Smi::FromInt(1));
+ }
+
+ // Check that the left hand side is a JS object and load its map as a side
+ // effect.
+ Register map = x12;
+ __ JumpIfSmi(object, &not_js_object);
+ __ IsObjectJSObjectType(object, map, scratch2, &not_js_object);
+
+ // If there is a call site cache, don't look in the global cache, but do the
+ // real lookup and update the call site cache.
+ if (!HasCallSiteInlineCheck()) {
+ Label miss;
+ __ JumpIfNotRoot(function, Heap::kInstanceofCacheFunctionRootIndex, &miss);
+ __ JumpIfNotRoot(map, Heap::kInstanceofCacheMapRootIndex, &miss);
+ __ LoadRoot(result, Heap::kInstanceofCacheAnswerRootIndex);
+ __ Ret();
+ __ Bind(&miss);
+ }
+
+ // Get the prototype of the function.
+ Register prototype = x13;
+ __ TryGetFunctionPrototype(function, prototype, scratch2, &slow,
+ MacroAssembler::kMissOnBoundFunction);
+
+ // Check that the function prototype is a JS object.
+ __ JumpIfSmi(prototype, &slow);
+ __ IsObjectJSObjectType(prototype, scratch1, scratch2, &slow);
+
+ // Update the global instanceof or call site inlined cache with the current
+ // map and function. The cached answer will be set when it is known below.
+ if (HasCallSiteInlineCheck()) {
+ // Patch the (relocated) inlined map check.
+ __ GetRelocatedValueLocation(map_check_site, scratch1);
+ // We have a cell, so need another level of dereferencing.
+ __ Ldr(scratch1, MemOperand(scratch1));
+ __ Str(map, FieldMemOperand(scratch1, Cell::kValueOffset));
+ } else {
+ __ StoreRoot(function, Heap::kInstanceofCacheFunctionRootIndex);
+ __ StoreRoot(map, Heap::kInstanceofCacheMapRootIndex);
+ }
+
+ Label return_true, return_result;
+ {
+ // Loop through the prototype chain looking for the function prototype.
+ Register chain_map = x1;
+ Register chain_prototype = x14;
+ Register null_value = x15;
+ Label loop;
+ __ Ldr(chain_prototype, FieldMemOperand(map, Map::kPrototypeOffset));
+ __ LoadRoot(null_value, Heap::kNullValueRootIndex);
+ // Speculatively set a result.
+ __ Mov(result, res_false);
+
+ __ Bind(&loop);
+
+ // If the chain prototype is the object prototype, return true.
+ __ Cmp(chain_prototype, prototype);
+ __ B(eq, &return_true);
+
+ // If the chain prototype is null, we've reached the end of the chain, so
+ // return false.
+ __ Cmp(chain_prototype, null_value);
+ __ B(eq, &return_result);
+
+ // Otherwise, load the next prototype in the chain, and loop.
+ __ Ldr(chain_map, FieldMemOperand(chain_prototype, HeapObject::kMapOffset));
+ __ Ldr(chain_prototype, FieldMemOperand(chain_map, Map::kPrototypeOffset));
+ __ B(&loop);
+ }
+
+ // Return sequence when no arguments are on the stack.
+ // We cannot fall through to here.
+ __ Bind(&return_true);
+ __ Mov(result, res_true);
+ __ Bind(&return_result);
+ if (HasCallSiteInlineCheck()) {
+ ASSERT(ReturnTrueFalseObject());
+ __ Add(map_check_site, map_check_site, kDeltaToLoadBoolResult);
+ __ GetRelocatedValueLocation(map_check_site, scratch2);
+ __ Str(result, MemOperand(scratch2));
+ } else {
+ __ StoreRoot(result, Heap::kInstanceofCacheAnswerRootIndex);
+ }
+ __ Ret();
+
+ Label object_not_null, object_not_null_or_smi;
+
+ __ Bind(&not_js_object);
+ Register object_type = x14;
+ // x0 result result return register (uninit)
+ // x10 function pointer to function
+ // x11 object pointer to object
+ // x14 object_type type of object (uninit)
+
+ // Before null, smi and string checks, check that the rhs is a function.
+ // For a non-function rhs, an exception must be thrown.
+ __ JumpIfSmi(function, &slow);
+ __ JumpIfNotObjectType(
+ function, scratch1, object_type, JS_FUNCTION_TYPE, &slow);
+
+ __ Mov(result, res_false);
+
+ // Null is not instance of anything.
+ __ Cmp(object_type, Operand(isolate()->factory()->null_value()));
+ __ B(ne, &object_not_null);
+ __ Ret();
+
+ __ Bind(&object_not_null);
+ // Smi values are not instances of anything.
+ __ JumpIfNotSmi(object, &object_not_null_or_smi);
+ __ Ret();
+
+ __ Bind(&object_not_null_or_smi);
+ // String values are not instances of anything.
+ __ IsObjectJSStringType(object, scratch2, &slow);
+ __ Ret();
+
+ // Slow-case. Tail call builtin.
+ __ Bind(&slow);
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ // Arguments have either been passed into registers or have been previously
+ // popped. We need to push them before calling builtin.
+ __ Push(object, function);
+ __ InvokeBuiltin(Builtins::INSTANCE_OF, CALL_FUNCTION);
+ }
+ if (ReturnTrueFalseObject()) {
+ // Reload true/false because they were clobbered in the builtin call.
+ __ LoadTrueFalseRoots(res_true, res_false);
+ __ Cmp(result, 0);
+ __ Csel(result, res_true, res_false, eq);
+ }
+ __ Ret();
+}
+
+
+Register InstanceofStub::left() {
+ // Object to check (instanceof lhs).
+ return x11;
+}
+
+
+Register InstanceofStub::right() {
+ // Constructor function (instanceof rhs).
+ return x10;
+}
+
+
+void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
+ Register arg_count = x0;
+ Register key = x1;
+
+ // The displacement is the offset of the last parameter (if any) relative
+ // to the frame pointer.
+ static const int kDisplacement =
+ StandardFrameConstants::kCallerSPOffset - kPointerSize;
+
+ // Check that the key is a smi.
+ Label slow;
+ __ JumpIfNotSmi(key, &slow);
+
+ // Check if the calling frame is an arguments adaptor frame.
+ Register local_fp = x11;
+ Register caller_fp = x11;
+ Register caller_ctx = x12;
+ Label skip_adaptor;
+ __ Ldr(caller_fp, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+ __ Ldr(caller_ctx, MemOperand(caller_fp,
+ StandardFrameConstants::kContextOffset));
+ __ Cmp(caller_ctx, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
+ __ Csel(local_fp, fp, caller_fp, ne);
+ __ B(ne, &skip_adaptor);
+
+ // Load the actual arguments limit found in the arguments adaptor frame.
+ __ Ldr(arg_count, MemOperand(caller_fp,
+ ArgumentsAdaptorFrameConstants::kLengthOffset));
+ __ Bind(&skip_adaptor);
+
+ // Check index against formal parameters count limit. Use unsigned comparison
+ // to get negative check for free: branch if key < 0 or key >= arg_count.
+ __ Cmp(key, arg_count);
+ __ B(hs, &slow);
+
+ // Read the argument from the stack and return it.
+ __ Sub(x10, arg_count, key);
+ __ Add(x10, local_fp, Operand::UntagSmiAndScale(x10, kPointerSizeLog2));
+ __ Ldr(x0, MemOperand(x10, kDisplacement));
+ __ Ret();
+
+ // Slow case: handle non-smi or out-of-bounds access to arguments by calling
+ // the runtime system.
+ __ Bind(&slow);
+ __ Push(key);
+ __ TailCallRuntime(Runtime::kGetArgumentsProperty, 1, 1);
+}
+
+
+void ArgumentsAccessStub::GenerateNewSloppySlow(MacroAssembler* masm) {
+ // Stack layout on entry.
+ // jssp[0]: number of parameters (tagged)
+ // jssp[8]: address of receiver argument
+ // jssp[16]: function
+
+ // Check if the calling frame is an arguments adaptor frame.
+ Label runtime;
+ Register caller_fp = x10;
+ __ Ldr(caller_fp, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+ // Load and untag the context.
+ STATIC_ASSERT((kSmiShift / kBitsPerByte) == 4);
+ __ Ldr(w11, MemOperand(caller_fp, StandardFrameConstants::kContextOffset +
+ (kSmiShift / kBitsPerByte)));
+ __ Cmp(w11, StackFrame::ARGUMENTS_ADAPTOR);
+ __ B(ne, &runtime);
+
+ // Patch the arguments.length and parameters pointer in the current frame.
+ __ Ldr(x11, MemOperand(caller_fp,
+ ArgumentsAdaptorFrameConstants::kLengthOffset));
+ __ Poke(x11, 0 * kXRegSize);
+ __ Add(x10, caller_fp, Operand::UntagSmiAndScale(x11, kPointerSizeLog2));
+ __ Add(x10, x10, StandardFrameConstants::kCallerSPOffset);
+ __ Poke(x10, 1 * kXRegSize);
+
+ __ Bind(&runtime);
+ __ TailCallRuntime(Runtime::kHiddenNewSloppyArguments, 3, 1);
+}
+
+
+void ArgumentsAccessStub::GenerateNewSloppyFast(MacroAssembler* masm) {
+ // Stack layout on entry.
+ // jssp[0]: number of parameters (tagged)
+ // jssp[8]: address of receiver argument
+ // jssp[16]: function
+ //
+ // Returns pointer to result object in x0.
+
+ // Note: arg_count_smi is an alias of param_count_smi.
+ Register arg_count_smi = x3;
+ Register param_count_smi = x3;
+ Register param_count = x7;
+ Register recv_arg = x14;
+ Register function = x4;
+ __ Pop(param_count_smi, recv_arg, function);
+ __ SmiUntag(param_count, param_count_smi);
+
+ // Check if the calling frame is an arguments adaptor frame.
+ Register caller_fp = x11;
+ Register caller_ctx = x12;
+ Label runtime;
+ Label adaptor_frame, try_allocate;
+ __ Ldr(caller_fp, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+ __ Ldr(caller_ctx, MemOperand(caller_fp,
+ StandardFrameConstants::kContextOffset));
+ __ Cmp(caller_ctx, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
+ __ B(eq, &adaptor_frame);
+
+ // No adaptor, parameter count = argument count.
+
+ // x1 mapped_params number of mapped params, min(params, args) (uninit)
+ // x2 arg_count number of function arguments (uninit)
+ // x3 arg_count_smi number of function arguments (smi)
+ // x4 function function pointer
+ // x7 param_count number of function parameters
+ // x11 caller_fp caller's frame pointer
+ // x14 recv_arg pointer to receiver arguments
+
+ Register arg_count = x2;
+ __ Mov(arg_count, param_count);
+ __ B(&try_allocate);
+
+ // We have an adaptor frame. Patch the parameters pointer.
+ __ Bind(&adaptor_frame);
+ __ Ldr(arg_count_smi,
+ MemOperand(caller_fp,
+ ArgumentsAdaptorFrameConstants::kLengthOffset));
+ __ SmiUntag(arg_count, arg_count_smi);
+ __ Add(x10, caller_fp, Operand(arg_count, LSL, kPointerSizeLog2));
+ __ Add(recv_arg, x10, StandardFrameConstants::kCallerSPOffset);
+
+ // Compute the mapped parameter count = min(param_count, arg_count)
+ Register mapped_params = x1;
+ __ Cmp(param_count, arg_count);
+ __ Csel(mapped_params, param_count, arg_count, lt);
+
+ __ Bind(&try_allocate);
+
+ // x0 alloc_obj pointer to allocated objects: param map, backing
+ // store, arguments (uninit)
+ // x1 mapped_params number of mapped parameters, min(params, args)
+ // x2 arg_count number of function arguments
+ // x3 arg_count_smi number of function arguments (smi)
+ // x4 function function pointer
+ // x7 param_count number of function parameters
+ // x10 size size of objects to allocate (uninit)
+ // x14 recv_arg pointer to receiver arguments
+
+ // Compute the size of backing store, parameter map, and arguments object.
+ // 1. Parameter map, has two extra words containing context and backing
+ // store.
+ const int kParameterMapHeaderSize =
+ FixedArray::kHeaderSize + 2 * kPointerSize;
+
+ // Calculate the parameter map size, assuming it exists.
+ Register size = x10;
+ __ Mov(size, Operand(mapped_params, LSL, kPointerSizeLog2));
+ __ Add(size, size, kParameterMapHeaderSize);
+
+ // If there are no mapped parameters, set the running size total to zero.
+ // Otherwise, use the parameter map size calculated earlier.
+ __ Cmp(mapped_params, 0);
+ __ CzeroX(size, eq);
+
+ // 2. Add the size of the backing store and arguments object.
+ __ Add(size, size, Operand(arg_count, LSL, kPointerSizeLog2));
+ __ Add(size, size,
+ FixedArray::kHeaderSize + Heap::kSloppyArgumentsObjectSize);
+
+ // Do the allocation of all three objects in one go. Assign this to x0, as it
+ // will be returned to the caller.
+ Register alloc_obj = x0;
+ __ Allocate(size, alloc_obj, x11, x12, &runtime, TAG_OBJECT);
+
+ // Get the arguments boilerplate from the current (global) context.
+
+ // x0 alloc_obj pointer to allocated objects (param map, backing
+ // store, arguments)
+ // x1 mapped_params number of mapped parameters, min(params, args)
+ // x2 arg_count number of function arguments
+ // x3 arg_count_smi number of function arguments (smi)
+ // x4 function function pointer
+ // x7 param_count number of function parameters
+ // x11 args_offset offset to args (or aliased args) boilerplate (uninit)
+ // x14 recv_arg pointer to receiver arguments
+
+ Register global_object = x10;
+ Register global_ctx = x10;
+ Register args_offset = x11;
+ Register aliased_args_offset = x10;
+ __ Ldr(global_object, GlobalObjectMemOperand());
+ __ Ldr(global_ctx, FieldMemOperand(global_object,
+ GlobalObject::kNativeContextOffset));
+
+ __ Ldr(args_offset,
+ ContextMemOperand(global_ctx,
+ Context::SLOPPY_ARGUMENTS_BOILERPLATE_INDEX));
+ __ Ldr(aliased_args_offset,
+ ContextMemOperand(global_ctx,
+ Context::ALIASED_ARGUMENTS_BOILERPLATE_INDEX));
+ __ Cmp(mapped_params, 0);
+ __ CmovX(args_offset, aliased_args_offset, ne);
+
+ // Copy the JS object part.
+ __ CopyFields(alloc_obj, args_offset, CPURegList(x10, x12, x13),
+ JSObject::kHeaderSize / kPointerSize);
+
+ // Set up the callee in-object property.
+ STATIC_ASSERT(Heap::kArgumentsCalleeIndex == 1);
+ const int kCalleeOffset = JSObject::kHeaderSize +
+ Heap::kArgumentsCalleeIndex * kPointerSize;
+ __ Str(function, FieldMemOperand(alloc_obj, kCalleeOffset));
+
+ // Use the length and set that as an in-object property.
+ STATIC_ASSERT(Heap::kArgumentsLengthIndex == 0);
+ const int kLengthOffset = JSObject::kHeaderSize +
+ Heap::kArgumentsLengthIndex * kPointerSize;
+ __ Str(arg_count_smi, FieldMemOperand(alloc_obj, kLengthOffset));
+
+ // Set up the elements pointer in the allocated arguments object.
+ // If we allocated a parameter map, "elements" will point there, otherwise
+ // it will point to the backing store.
+
+ // x0 alloc_obj pointer to allocated objects (param map, backing
+ // store, arguments)
+ // x1 mapped_params number of mapped parameters, min(params, args)
+ // x2 arg_count number of function arguments
+ // x3 arg_count_smi number of function arguments (smi)
+ // x4 function function pointer
+ // x5 elements pointer to parameter map or backing store (uninit)
+ // x6 backing_store pointer to backing store (uninit)
+ // x7 param_count number of function parameters
+ // x14 recv_arg pointer to receiver arguments
+
+ Register elements = x5;
+ __ Add(elements, alloc_obj, Heap::kSloppyArgumentsObjectSize);
+ __ Str(elements, FieldMemOperand(alloc_obj, JSObject::kElementsOffset));
+
+ // Initialize parameter map. If there are no mapped arguments, we're done.
+ Label skip_parameter_map;
+ __ Cmp(mapped_params, 0);
+ // Set up backing store address, because it is needed later for filling in
+ // the unmapped arguments.
+ Register backing_store = x6;
+ __ CmovX(backing_store, elements, eq);
+ __ B(eq, &skip_parameter_map);
+
+ __ LoadRoot(x10, Heap::kSloppyArgumentsElementsMapRootIndex);
+ __ Str(x10, FieldMemOperand(elements, FixedArray::kMapOffset));
+ __ Add(x10, mapped_params, 2);
+ __ SmiTag(x10);
+ __ Str(x10, FieldMemOperand(elements, FixedArray::kLengthOffset));
+ __ Str(cp, FieldMemOperand(elements,
+ FixedArray::kHeaderSize + 0 * kPointerSize));
+ __ Add(x10, elements, Operand(mapped_params, LSL, kPointerSizeLog2));
+ __ Add(x10, x10, kParameterMapHeaderSize);
+ __ Str(x10, FieldMemOperand(elements,
+ FixedArray::kHeaderSize + 1 * kPointerSize));
+
+ // Copy the parameter slots and the holes in the arguments.
+ // We need to fill in mapped_parameter_count slots. Then index the context,
+ // where parameters are stored in reverse order, at:
+ //
+ // MIN_CONTEXT_SLOTS .. MIN_CONTEXT_SLOTS + parameter_count - 1
+ //
+ // The mapped parameter thus needs to get indices:
+ //
+ // MIN_CONTEXT_SLOTS + parameter_count - 1 ..
+ // MIN_CONTEXT_SLOTS + parameter_count - mapped_parameter_count
+ //
+ // We loop from right to left.
+
+ // x0 alloc_obj pointer to allocated objects (param map, backing
+ // store, arguments)
+ // x1 mapped_params number of mapped parameters, min(params, args)
+ // x2 arg_count number of function arguments
+ // x3 arg_count_smi number of function arguments (smi)
+ // x4 function function pointer
+ // x5 elements pointer to parameter map or backing store (uninit)
+ // x6 backing_store pointer to backing store (uninit)
+ // x7 param_count number of function parameters
+ // x11 loop_count parameter loop counter (uninit)
+ // x12 index parameter index (smi, uninit)
+ // x13 the_hole hole value (uninit)
+ // x14 recv_arg pointer to receiver arguments
+
+ Register loop_count = x11;
+ Register index = x12;
+ Register the_hole = x13;
+ Label parameters_loop, parameters_test;
+ __ Mov(loop_count, mapped_params);
+ __ Add(index, param_count, static_cast<int>(Context::MIN_CONTEXT_SLOTS));
+ __ Sub(index, index, mapped_params);
+ __ SmiTag(index);
+ __ LoadRoot(the_hole, Heap::kTheHoleValueRootIndex);
+ __ Add(backing_store, elements, Operand(loop_count, LSL, kPointerSizeLog2));
+ __ Add(backing_store, backing_store, kParameterMapHeaderSize);
+
+ __ B(&parameters_test);
+
+ __ Bind(&parameters_loop);
+ __ Sub(loop_count, loop_count, 1);
+ __ Mov(x10, Operand(loop_count, LSL, kPointerSizeLog2));
+ __ Add(x10, x10, kParameterMapHeaderSize - kHeapObjectTag);
+ __ Str(index, MemOperand(elements, x10));
+ __ Sub(x10, x10, kParameterMapHeaderSize - FixedArray::kHeaderSize);
+ __ Str(the_hole, MemOperand(backing_store, x10));
+ __ Add(index, index, Smi::FromInt(1));
+ __ Bind(&parameters_test);
+ __ Cbnz(loop_count, &parameters_loop);
+
+ __ Bind(&skip_parameter_map);
+ // Copy arguments header and remaining slots (if there are any.)
+ __ LoadRoot(x10, Heap::kFixedArrayMapRootIndex);
+ __ Str(x10, FieldMemOperand(backing_store, FixedArray::kMapOffset));
+ __ Str(arg_count_smi, FieldMemOperand(backing_store,
+ FixedArray::kLengthOffset));
+
+ // x0 alloc_obj pointer to allocated objects (param map, backing
+ // store, arguments)
+ // x1 mapped_params number of mapped parameters, min(params, args)
+ // x2 arg_count number of function arguments
+ // x4 function function pointer
+ // x3 arg_count_smi number of function arguments (smi)
+ // x6 backing_store pointer to backing store (uninit)
+ // x14 recv_arg pointer to receiver arguments
+
+ Label arguments_loop, arguments_test;
+ __ Mov(x10, mapped_params);
+ __ Sub(recv_arg, recv_arg, Operand(x10, LSL, kPointerSizeLog2));
+ __ B(&arguments_test);
+
+ __ Bind(&arguments_loop);
+ __ Sub(recv_arg, recv_arg, kPointerSize);
+ __ Ldr(x11, MemOperand(recv_arg));
+ __ Add(x12, backing_store, Operand(x10, LSL, kPointerSizeLog2));
+ __ Str(x11, FieldMemOperand(x12, FixedArray::kHeaderSize));
+ __ Add(x10, x10, 1);
+
+ __ Bind(&arguments_test);
+ __ Cmp(x10, arg_count);
+ __ B(lt, &arguments_loop);
+
+ __ Ret();
+
+ // Do the runtime call to allocate the arguments object.
+ __ Bind(&runtime);
+ __ Push(function, recv_arg, arg_count_smi);
+ __ TailCallRuntime(Runtime::kHiddenNewSloppyArguments, 3, 1);
+}
+
+
+void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
+ // Stack layout on entry.
+ // jssp[0]: number of parameters (tagged)
+ // jssp[8]: address of receiver argument
+ // jssp[16]: function
+ //
+ // Returns pointer to result object in x0.
+
+ // Get the stub arguments from the frame, and make an untagged copy of the
+ // parameter count.
+ Register param_count_smi = x1;
+ Register params = x2;
+ Register function = x3;
+ Register param_count = x13;
+ __ Pop(param_count_smi, params, function);
+ __ SmiUntag(param_count, param_count_smi);
+
+ // Test if arguments adaptor needed.
+ Register caller_fp = x11;
+ Register caller_ctx = x12;
+ Label try_allocate, runtime;
+ __ Ldr(caller_fp, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+ __ Ldr(caller_ctx, MemOperand(caller_fp,
+ StandardFrameConstants::kContextOffset));
+ __ Cmp(caller_ctx, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
+ __ B(ne, &try_allocate);
+
+ // x1 param_count_smi number of parameters passed to function (smi)
+ // x2 params pointer to parameters
+ // x3 function function pointer
+ // x11 caller_fp caller's frame pointer
+ // x13 param_count number of parameters passed to function
+
+ // Patch the argument length and parameters pointer.
+ __ Ldr(param_count_smi,
+ MemOperand(caller_fp,
+ ArgumentsAdaptorFrameConstants::kLengthOffset));
+ __ SmiUntag(param_count, param_count_smi);
+ __ Add(x10, caller_fp, Operand(param_count, LSL, kPointerSizeLog2));
+ __ Add(params, x10, StandardFrameConstants::kCallerSPOffset);
+
+ // Try the new space allocation. Start out with computing the size of the
+ // arguments object and the elements array in words.
+ Register size = x10;
+ __ Bind(&try_allocate);
+ __ Add(size, param_count, FixedArray::kHeaderSize / kPointerSize);
+ __ Cmp(param_count, 0);
+ __ CzeroX(size, eq);
+ __ Add(size, size, Heap::kStrictArgumentsObjectSize / kPointerSize);
+
+ // Do the allocation of both objects in one go. Assign this to x0, as it will
+ // be returned to the caller.
+ Register alloc_obj = x0;
+ __ Allocate(size, alloc_obj, x11, x12, &runtime,
+ static_cast<AllocationFlags>(TAG_OBJECT | SIZE_IN_WORDS));
+
+ // Get the arguments boilerplate from the current (native) context.
+ Register global_object = x10;
+ Register global_ctx = x10;
+ Register args_offset = x4;
+ __ Ldr(global_object, GlobalObjectMemOperand());
+ __ Ldr(global_ctx, FieldMemOperand(global_object,
+ GlobalObject::kNativeContextOffset));
+ __ Ldr(args_offset,
+ ContextMemOperand(global_ctx,
+ Context::STRICT_ARGUMENTS_BOILERPLATE_INDEX));
+
+ // x0 alloc_obj pointer to allocated objects: parameter array and
+ // arguments object
+ // x1 param_count_smi number of parameters passed to function (smi)
+ // x2 params pointer to parameters
+ // x3 function function pointer
+ // x4 args_offset offset to arguments boilerplate
+ // x13 param_count number of parameters passed to function
+
+ // Copy the JS object part.
+ __ CopyFields(alloc_obj, args_offset, CPURegList(x5, x6, x7),
+ JSObject::kHeaderSize / kPointerSize);
+
+ // Set the smi-tagged length as an in-object property.
+ STATIC_ASSERT(Heap::kArgumentsLengthIndex == 0);
+ const int kLengthOffset = JSObject::kHeaderSize +
+ Heap::kArgumentsLengthIndex * kPointerSize;
+ __ Str(param_count_smi, FieldMemOperand(alloc_obj, kLengthOffset));
+
+ // If there are no actual arguments, we're done.
+ Label done;
+ __ Cbz(param_count, &done);
+
+ // Set up the elements pointer in the allocated arguments object and
+ // initialize the header in the elements fixed array.
+ Register elements = x5;
+ __ Add(elements, alloc_obj, Heap::kStrictArgumentsObjectSize);
+ __ Str(elements, FieldMemOperand(alloc_obj, JSObject::kElementsOffset));
+ __ LoadRoot(x10, Heap::kFixedArrayMapRootIndex);
+ __ Str(x10, FieldMemOperand(elements, FixedArray::kMapOffset));
+ __ Str(param_count_smi, FieldMemOperand(elements, FixedArray::kLengthOffset));
+
+ // x0 alloc_obj pointer to allocated objects: parameter array and
+ // arguments object
+ // x1 param_count_smi number of parameters passed to function (smi)
+ // x2 params pointer to parameters
+ // x3 function function pointer
+ // x4 array pointer to array slot (uninit)
+ // x5 elements pointer to elements array of alloc_obj
+ // x13 param_count number of parameters passed to function
+
+ // Copy the fixed array slots.
+ Label loop;
+ Register array = x4;
+ // Set up pointer to first array slot.
+ __ Add(array, elements, FixedArray::kHeaderSize - kHeapObjectTag);
+
+ __ Bind(&loop);
+ // Pre-decrement the parameters pointer by kPointerSize on each iteration.
+ // Pre-decrement in order to skip receiver.
+ __ Ldr(x10, MemOperand(params, -kPointerSize, PreIndex));
+ // Post-increment elements by kPointerSize on each iteration.
+ __ Str(x10, MemOperand(array, kPointerSize, PostIndex));
+ __ Sub(param_count, param_count, 1);
+ __ Cbnz(param_count, &loop);
+
+ // Return from stub.
+ __ Bind(&done);
+ __ Ret();
+
+ // Do the runtime call to allocate the arguments object.
+ __ Bind(&runtime);
+ __ Push(function, params, param_count_smi);
+ __ TailCallRuntime(Runtime::kHiddenNewStrictArguments, 3, 1);
+}
+
+
+void RegExpExecStub::Generate(MacroAssembler* masm) {
+#ifdef V8_INTERPRETED_REGEXP
+ __ TailCallRuntime(Runtime::kHiddenRegExpExec, 4, 1);
+#else // V8_INTERPRETED_REGEXP
+
+ // Stack frame on entry.
+ // jssp[0]: last_match_info (expected JSArray)
+ // jssp[8]: previous index
+ // jssp[16]: subject string
+ // jssp[24]: JSRegExp object
+ Label runtime;
+
+ // Use of registers for this function.
+
+ // Variable registers:
+ // x10-x13 used as scratch registers
+ // w0 string_type type of subject string
+ // x2 jsstring_length subject string length
+ // x3 jsregexp_object JSRegExp object
+ // w4 string_encoding ASCII or UC16
+ // w5 sliced_string_offset if the string is a SlicedString
+ // offset to the underlying string
+ // w6 string_representation groups attributes of the string:
+ // - is a string
+ // - type of the string
+ // - is a short external string
+ Register string_type = w0;
+ Register jsstring_length = x2;
+ Register jsregexp_object = x3;
+ Register string_encoding = w4;
+ Register sliced_string_offset = w5;
+ Register string_representation = w6;
+
+ // These are in callee save registers and will be preserved by the call
+ // to the native RegExp code, as this code is called using the normal
+ // C calling convention. When calling directly from generated code the
+ // native RegExp code will not do a GC and therefore the content of
+ // these registers are safe to use after the call.
+
+ // x19 subject subject string
+ // x20 regexp_data RegExp data (FixedArray)
+ // x21 last_match_info_elements info relative to the last match
+ // (FixedArray)
+ // x22 code_object generated regexp code
+ Register subject = x19;
+ Register regexp_data = x20;
+ Register last_match_info_elements = x21;
+ Register code_object = x22;
+
+ // TODO(jbramley): Is it necessary to preserve these? I don't think ARM does.
+ CPURegList used_callee_saved_registers(subject,
+ regexp_data,
+ last_match_info_elements,
+ code_object);
+ __ PushCPURegList(used_callee_saved_registers);
+
+ // Stack frame.
+ // jssp[0] : x19
+ // jssp[8] : x20
+ // jssp[16]: x21
+ // jssp[24]: x22
+ // jssp[32]: last_match_info (JSArray)
+ // jssp[40]: previous index
+ // jssp[48]: subject string
+ // jssp[56]: JSRegExp object
+
+ const int kLastMatchInfoOffset = 4 * kPointerSize;
+ const int kPreviousIndexOffset = 5 * kPointerSize;
+ const int kSubjectOffset = 6 * kPointerSize;
+ const int kJSRegExpOffset = 7 * kPointerSize;
+
+ // Ensure that a RegExp stack is allocated.
+ ExternalReference address_of_regexp_stack_memory_address =
+ ExternalReference::address_of_regexp_stack_memory_address(isolate());
+ ExternalReference address_of_regexp_stack_memory_size =
+ ExternalReference::address_of_regexp_stack_memory_size(isolate());
+ __ Mov(x10, address_of_regexp_stack_memory_size);
+ __ Ldr(x10, MemOperand(x10));
+ __ Cbz(x10, &runtime);
+
+ // Check that the first argument is a JSRegExp object.
+ ASSERT(jssp.Is(__ StackPointer()));
+ __ Peek(jsregexp_object, kJSRegExpOffset);
+ __ JumpIfSmi(jsregexp_object, &runtime);
+ __ JumpIfNotObjectType(jsregexp_object, x10, x10, JS_REGEXP_TYPE, &runtime);
+
+ // Check that the RegExp has been compiled (data contains a fixed array).
+ __ Ldr(regexp_data, FieldMemOperand(jsregexp_object, JSRegExp::kDataOffset));
+ if (FLAG_debug_code) {
+ STATIC_ASSERT(kSmiTag == 0);
+ __ Tst(regexp_data, kSmiTagMask);
+ __ Check(ne, kUnexpectedTypeForRegExpDataFixedArrayExpected);
+ __ CompareObjectType(regexp_data, x10, x10, FIXED_ARRAY_TYPE);
+ __ Check(eq, kUnexpectedTypeForRegExpDataFixedArrayExpected);
+ }
+
+ // Check the type of the RegExp. Only continue if type is JSRegExp::IRREGEXP.
+ __ Ldr(x10, FieldMemOperand(regexp_data, JSRegExp::kDataTagOffset));
+ __ Cmp(x10, Smi::FromInt(JSRegExp::IRREGEXP));
+ __ B(ne, &runtime);
+
+ // Check that the number of captures fit in the static offsets vector buffer.
+ // We have always at least one capture for the whole match, plus additional
+ // ones due to capturing parentheses. A capture takes 2 registers.
+ // The number of capture registers then is (number_of_captures + 1) * 2.
+ __ Ldrsw(x10,
+ UntagSmiFieldMemOperand(regexp_data,
+ JSRegExp::kIrregexpCaptureCountOffset));
+ // Check (number_of_captures + 1) * 2 <= offsets vector size
+ // number_of_captures * 2 <= offsets vector size - 2
+ STATIC_ASSERT(Isolate::kJSRegexpStaticOffsetsVectorSize >= 2);
+ __ Add(x10, x10, x10);
+ __ Cmp(x10, Isolate::kJSRegexpStaticOffsetsVectorSize - 2);
+ __ B(hi, &runtime);
+
+ // Initialize offset for possibly sliced string.
+ __ Mov(sliced_string_offset, 0);
+
+ ASSERT(jssp.Is(__ StackPointer()));
+ __ Peek(subject, kSubjectOffset);
+ __ JumpIfSmi(subject, &runtime);
+
+ __ Ldr(x10, FieldMemOperand(subject, HeapObject::kMapOffset));
+ __ Ldrb(string_type, FieldMemOperand(x10, Map::kInstanceTypeOffset));
+
+ __ Ldr(jsstring_length, FieldMemOperand(subject, String::kLengthOffset));
+
+ // Handle subject string according to its encoding and representation:
+ // (1) Sequential string? If yes, go to (5).
+ // (2) Anything but sequential or cons? If yes, go to (6).
+ // (3) Cons string. If the string is flat, replace subject with first string.
+ // Otherwise bailout.
+ // (4) Is subject external? If yes, go to (7).
+ // (5) Sequential string. Load regexp code according to encoding.
+ // (E) Carry on.
+ /// [...]
+
+ // Deferred code at the end of the stub:
+ // (6) Not a long external string? If yes, go to (8).
+ // (7) External string. Make it, offset-wise, look like a sequential string.
+ // Go to (5).
+ // (8) Short external string or not a string? If yes, bail out to runtime.
+ // (9) Sliced string. Replace subject with parent. Go to (4).
+
+ Label check_underlying; // (4)
+ Label seq_string; // (5)
+ Label not_seq_nor_cons; // (6)
+ Label external_string; // (7)
+ Label not_long_external; // (8)
+
+ // (1) Sequential string? If yes, go to (5).
+ __ And(string_representation,
+ string_type,
+ kIsNotStringMask |
+ kStringRepresentationMask |
+ kShortExternalStringMask);
+ // We depend on the fact that Strings of type
+ // SeqString and not ShortExternalString are defined
+ // by the following pattern:
+ // string_type: 0XX0 XX00
+ // ^ ^ ^^
+ // | | ||
+ // | | is a SeqString
+ // | is not a short external String
+ // is a String
+ STATIC_ASSERT((kStringTag | kSeqStringTag) == 0);
+ STATIC_ASSERT(kShortExternalStringTag != 0);
+ __ Cbz(string_representation, &seq_string); // Go to (5).
+
+ // (2) Anything but sequential or cons? If yes, go to (6).
+ STATIC_ASSERT(kConsStringTag < kExternalStringTag);
+ STATIC_ASSERT(kSlicedStringTag > kExternalStringTag);
+ STATIC_ASSERT(kIsNotStringMask > kExternalStringTag);
+ STATIC_ASSERT(kShortExternalStringTag > kExternalStringTag);
+ __ Cmp(string_representation, kExternalStringTag);
+ __ B(ge, &not_seq_nor_cons); // Go to (6).
+
+ // (3) Cons string. Check that it's flat.
+ __ Ldr(x10, FieldMemOperand(subject, ConsString::kSecondOffset));
+ __ JumpIfNotRoot(x10, Heap::kempty_stringRootIndex, &runtime);
+ // Replace subject with first string.
+ __ Ldr(subject, FieldMemOperand(subject, ConsString::kFirstOffset));
+
+ // (4) Is subject external? If yes, go to (7).
+ __ Bind(&check_underlying);
+ // Reload the string type.
+ __ Ldr(x10, FieldMemOperand(subject, HeapObject::kMapOffset));
+ __ Ldrb(string_type, FieldMemOperand(x10, Map::kInstanceTypeOffset));
+ STATIC_ASSERT(kSeqStringTag == 0);
+ // The underlying external string is never a short external string.
+ STATIC_ASSERT(ExternalString::kMaxShortLength < ConsString::kMinLength);
+ STATIC_ASSERT(ExternalString::kMaxShortLength < SlicedString::kMinLength);
+ __ TestAndBranchIfAnySet(string_type.X(),
+ kStringRepresentationMask,
+ &external_string); // Go to (7).
+
+ // (5) Sequential string. Load regexp code according to encoding.
+ __ Bind(&seq_string);
+
+ // Check that the third argument is a positive smi less than the subject
+ // string length. A negative value will be greater (unsigned comparison).
+ ASSERT(jssp.Is(__ StackPointer()));
+ __ Peek(x10, kPreviousIndexOffset);
+ __ JumpIfNotSmi(x10, &runtime);
+ __ Cmp(jsstring_length, x10);
+ __ B(ls, &runtime);
+
+ // Argument 2 (x1): We need to load argument 2 (the previous index) into x1
+ // before entering the exit frame.
+ __ SmiUntag(x1, x10);
+
+ // The third bit determines the string encoding in string_type.
+ STATIC_ASSERT(kOneByteStringTag == 0x04);
+ STATIC_ASSERT(kTwoByteStringTag == 0x00);
+ STATIC_ASSERT(kStringEncodingMask == 0x04);
+
+ // Find the code object based on the assumptions above.
+ // kDataAsciiCodeOffset and kDataUC16CodeOffset are adjacent, adds an offset
+ // of kPointerSize to reach the latter.
+ ASSERT_EQ(JSRegExp::kDataAsciiCodeOffset + kPointerSize,
+ JSRegExp::kDataUC16CodeOffset);
+ __ Mov(x10, kPointerSize);
+ // We will need the encoding later: ASCII = 0x04
+ // UC16 = 0x00
+ __ Ands(string_encoding, string_type, kStringEncodingMask);
+ __ CzeroX(x10, ne);
+ __ Add(x10, regexp_data, x10);
+ __ Ldr(code_object, FieldMemOperand(x10, JSRegExp::kDataAsciiCodeOffset));
+
+ // (E) Carry on. String handling is done.
+
+ // Check that the irregexp code has been generated for the actual string
+ // encoding. If it has, the field contains a code object otherwise it contains
+ // a smi (code flushing support).
+ __ JumpIfSmi(code_object, &runtime);
+
+ // All checks done. Now push arguments for native regexp code.
+ __ IncrementCounter(isolate()->counters()->regexp_entry_native(), 1,
+ x10,
+ x11);
+
+ // Isolates: note we add an additional parameter here (isolate pointer).
+ __ EnterExitFrame(false, x10, 1);
+ ASSERT(csp.Is(__ StackPointer()));
+
+ // We have 9 arguments to pass to the regexp code, therefore we have to pass
+ // one on the stack and the rest as registers.
+
+ // Note that the placement of the argument on the stack isn't standard
+ // AAPCS64:
+ // csp[0]: Space for the return address placed by DirectCEntryStub.
+ // csp[8]: Argument 9, the current isolate address.
+
+ __ Mov(x10, ExternalReference::isolate_address(isolate()));
+ __ Poke(x10, kPointerSize);
+
+ Register length = w11;
+ Register previous_index_in_bytes = w12;
+ Register start = x13;
+
+ // Load start of the subject string.
+ __ Add(start, subject, SeqString::kHeaderSize - kHeapObjectTag);
+ // Load the length from the original subject string from the previous stack
+ // frame. Therefore we have to use fp, which points exactly to two pointer
+ // sizes below the previous sp. (Because creating a new stack frame pushes
+ // the previous fp onto the stack and decrements sp by 2 * kPointerSize.)
+ __ Ldr(subject, MemOperand(fp, kSubjectOffset + 2 * kPointerSize));
+ __ Ldr(length, UntagSmiFieldMemOperand(subject, String::kLengthOffset));
+
+ // Handle UC16 encoding, two bytes make one character.
+ // string_encoding: if ASCII: 0x04
+ // if UC16: 0x00
+ STATIC_ASSERT(kStringEncodingMask == 0x04);
+ __ Ubfx(string_encoding, string_encoding, 2, 1);
+ __ Eor(string_encoding, string_encoding, 1);
+ // string_encoding: if ASCII: 0
+ // if UC16: 1
+
+ // Convert string positions from characters to bytes.
+ // Previous index is in x1.
+ __ Lsl(previous_index_in_bytes, w1, string_encoding);
+ __ Lsl(length, length, string_encoding);
+ __ Lsl(sliced_string_offset, sliced_string_offset, string_encoding);
+
+ // Argument 1 (x0): Subject string.
+ __ Mov(x0, subject);
+
+ // Argument 2 (x1): Previous index, already there.
+
+ // Argument 3 (x2): Get the start of input.
+ // Start of input = start of string + previous index + substring offset
+ // (0 if the string
+ // is not sliced).
+ __ Add(w10, previous_index_in_bytes, sliced_string_offset);
+ __ Add(x2, start, Operand(w10, UXTW));
+
+ // Argument 4 (x3):
+ // End of input = start of input + (length of input - previous index)
+ __ Sub(w10, length, previous_index_in_bytes);
+ __ Add(x3, x2, Operand(w10, UXTW));
+
+ // Argument 5 (x4): static offsets vector buffer.
+ __ Mov(x4, ExternalReference::address_of_static_offsets_vector(isolate()));
+
+ // Argument 6 (x5): Set the number of capture registers to zero to force
+ // global regexps to behave as non-global. This stub is not used for global
+ // regexps.
+ __ Mov(x5, 0);
+
+ // Argument 7 (x6): Start (high end) of backtracking stack memory area.
+ __ Mov(x10, address_of_regexp_stack_memory_address);
+ __ Ldr(x10, MemOperand(x10));
+ __ Mov(x11, address_of_regexp_stack_memory_size);
+ __ Ldr(x11, MemOperand(x11));
+ __ Add(x6, x10, x11);
+
+ // Argument 8 (x7): Indicate that this is a direct call from JavaScript.
+ __ Mov(x7, 1);
+
+ // Locate the code entry and call it.
+ __ Add(code_object, code_object, Code::kHeaderSize - kHeapObjectTag);
+ DirectCEntryStub stub(isolate());
+ stub.GenerateCall(masm, code_object);
+
+ __ LeaveExitFrame(false, x10, true);
+
+ // The generated regexp code returns an int32 in w0.
+ Label failure, exception;
+ __ CompareAndBranch(w0, NativeRegExpMacroAssembler::FAILURE, eq, &failure);
+ __ CompareAndBranch(w0,
+ NativeRegExpMacroAssembler::EXCEPTION,
+ eq,
+ &exception);
+ __ CompareAndBranch(w0, NativeRegExpMacroAssembler::RETRY, eq, &runtime);
+
+ // Success: process the result from the native regexp code.
+ Register number_of_capture_registers = x12;
+
+ // Calculate number of capture registers (number_of_captures + 1) * 2
+ // and store it in the last match info.
+ __ Ldrsw(x10,
+ UntagSmiFieldMemOperand(regexp_data,
+ JSRegExp::kIrregexpCaptureCountOffset));
+ __ Add(x10, x10, x10);
+ __ Add(number_of_capture_registers, x10, 2);
+
+ // Check that the fourth object is a JSArray object.
+ ASSERT(jssp.Is(__ StackPointer()));
+ __ Peek(x10, kLastMatchInfoOffset);
+ __ JumpIfSmi(x10, &runtime);
+ __ JumpIfNotObjectType(x10, x11, x11, JS_ARRAY_TYPE, &runtime);
+
+ // Check that the JSArray is the fast case.
+ __ Ldr(last_match_info_elements,
+ FieldMemOperand(x10, JSArray::kElementsOffset));
+ __ Ldr(x10,
+ FieldMemOperand(last_match_info_elements, HeapObject::kMapOffset));
+ __ JumpIfNotRoot(x10, Heap::kFixedArrayMapRootIndex, &runtime);
+
+ // Check that the last match info has space for the capture registers and the
+ // additional information (overhead).
+ // (number_of_captures + 1) * 2 + overhead <= last match info size
+ // (number_of_captures * 2) + 2 + overhead <= last match info size
+ // number_of_capture_registers + overhead <= last match info size
+ __ Ldrsw(x10,
+ UntagSmiFieldMemOperand(last_match_info_elements,
+ FixedArray::kLengthOffset));
+ __ Add(x11, number_of_capture_registers, RegExpImpl::kLastMatchOverhead);
+ __ Cmp(x11, x10);
+ __ B(gt, &runtime);
+
+ // Store the capture count.
+ __ SmiTag(x10, number_of_capture_registers);
+ __ Str(x10,
+ FieldMemOperand(last_match_info_elements,
+ RegExpImpl::kLastCaptureCountOffset));
+ // Store last subject and last input.
+ __ Str(subject,
+ FieldMemOperand(last_match_info_elements,
+ RegExpImpl::kLastSubjectOffset));
+ // Use x10 as the subject string in order to only need
+ // one RecordWriteStub.
+ __ Mov(x10, subject);
+ __ RecordWriteField(last_match_info_elements,
+ RegExpImpl::kLastSubjectOffset,
+ x10,
+ x11,
+ kLRHasNotBeenSaved,
+ kDontSaveFPRegs);
+ __ Str(subject,
+ FieldMemOperand(last_match_info_elements,
+ RegExpImpl::kLastInputOffset));
+ __ Mov(x10, subject);
+ __ RecordWriteField(last_match_info_elements,
+ RegExpImpl::kLastInputOffset,
+ x10,
+ x11,
+ kLRHasNotBeenSaved,
+ kDontSaveFPRegs);
+
+ Register last_match_offsets = x13;
+ Register offsets_vector_index = x14;
+ Register current_offset = x15;
+
+ // Get the static offsets vector filled by the native regexp code
+ // and fill the last match info.
+ ExternalReference address_of_static_offsets_vector =
+ ExternalReference::address_of_static_offsets_vector(isolate());
+ __ Mov(offsets_vector_index, address_of_static_offsets_vector);
+
+ Label next_capture, done;
+ // Capture register counter starts from number of capture registers and
+ // iterates down to zero (inclusive).
+ __ Add(last_match_offsets,
+ last_match_info_elements,
+ RegExpImpl::kFirstCaptureOffset - kHeapObjectTag);
+ __ Bind(&next_capture);
+ __ Subs(number_of_capture_registers, number_of_capture_registers, 2);
+ __ B(mi, &done);
+ // Read two 32 bit values from the static offsets vector buffer into
+ // an X register
+ __ Ldr(current_offset,
+ MemOperand(offsets_vector_index, kWRegSize * 2, PostIndex));
+ // Store the smi values in the last match info.
+ __ SmiTag(x10, current_offset);
+ // Clearing the 32 bottom bits gives us a Smi.
+ STATIC_ASSERT(kSmiShift == 32);
+ __ And(x11, current_offset, ~kWRegMask);
+ __ Stp(x10,
+ x11,
+ MemOperand(last_match_offsets, kXRegSize * 2, PostIndex));
+ __ B(&next_capture);
+ __ Bind(&done);
+
+ // Return last match info.
+ __ Peek(x0, kLastMatchInfoOffset);
+ __ PopCPURegList(used_callee_saved_registers);
+ // Drop the 4 arguments of the stub from the stack.
+ __ Drop(4);
+ __ Ret();
+
+ __ Bind(&exception);
+ Register exception_value = x0;
+ // A stack overflow (on the backtrack stack) may have occured
+ // in the RegExp code but no exception has been created yet.
+ // If there is no pending exception, handle that in the runtime system.
+ __ Mov(x10, Operand(isolate()->factory()->the_hole_value()));
+ __ Mov(x11,
+ Operand(ExternalReference(Isolate::kPendingExceptionAddress,
+ isolate())));
+ __ Ldr(exception_value, MemOperand(x11));
+ __ Cmp(x10, exception_value);
+ __ B(eq, &runtime);
+
+ __ Str(x10, MemOperand(x11)); // Clear pending exception.
+
+ // Check if the exception is a termination. If so, throw as uncatchable.
+ Label termination_exception;
+ __ JumpIfRoot(exception_value,
+ Heap::kTerminationExceptionRootIndex,
+ &termination_exception);
+
+ __ Throw(exception_value, x10, x11, x12, x13);
+
+ __ Bind(&termination_exception);
+ __ ThrowUncatchable(exception_value, x10, x11, x12, x13);
+
+ __ Bind(&failure);
+ __ Mov(x0, Operand(isolate()->factory()->null_value()));
+ __ PopCPURegList(used_callee_saved_registers);
+ // Drop the 4 arguments of the stub from the stack.
+ __ Drop(4);
+ __ Ret();
+
+ __ Bind(&runtime);
+ __ PopCPURegList(used_callee_saved_registers);
+ __ TailCallRuntime(Runtime::kHiddenRegExpExec, 4, 1);
+
+ // Deferred code for string handling.
+ // (6) Not a long external string? If yes, go to (8).
+ __ Bind(&not_seq_nor_cons);
+ // Compare flags are still set.
+ __ B(ne, &not_long_external); // Go to (8).
+
+ // (7) External string. Make it, offset-wise, look like a sequential string.
+ __ Bind(&external_string);
+ if (masm->emit_debug_code()) {
+ // Assert that we do not have a cons or slice (indirect strings) here.
+ // Sequential strings have already been ruled out.
+ __ Ldr(x10, FieldMemOperand(subject, HeapObject::kMapOffset));
+ __ Ldrb(x10, FieldMemOperand(x10, Map::kInstanceTypeOffset));
+ __ Tst(x10, kIsIndirectStringMask);
+ __ Check(eq, kExternalStringExpectedButNotFound);
+ __ And(x10, x10, kStringRepresentationMask);
+ __ Cmp(x10, 0);
+ __ Check(ne, kExternalStringExpectedButNotFound);
+ }
+ __ Ldr(subject,
+ FieldMemOperand(subject, ExternalString::kResourceDataOffset));
+ // Move the pointer so that offset-wise, it looks like a sequential string.
+ STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqOneByteString::kHeaderSize);
+ __ Sub(subject, subject, SeqTwoByteString::kHeaderSize - kHeapObjectTag);
+ __ B(&seq_string); // Go to (5).
+
+ // (8) If this is a short external string or not a string, bail out to
+ // runtime.
+ __ Bind(&not_long_external);
+ STATIC_ASSERT(kShortExternalStringTag != 0);
+ __ TestAndBranchIfAnySet(string_representation,
+ kShortExternalStringMask | kIsNotStringMask,
+ &runtime);
+
+ // (9) Sliced string. Replace subject with parent.
+ __ Ldr(sliced_string_offset,
+ UntagSmiFieldMemOperand(subject, SlicedString::kOffsetOffset));
+ __ Ldr(subject, FieldMemOperand(subject, SlicedString::kParentOffset));
+ __ B(&check_underlying); // Go to (4).
+#endif
+}
+
+
+static void GenerateRecordCallTarget(MacroAssembler* masm,
+ Register argc,
+ Register function,
+ Register feedback_vector,
+ Register index,
+ Register scratch1,
+ Register scratch2) {
+ ASM_LOCATION("GenerateRecordCallTarget");
+ ASSERT(!AreAliased(scratch1, scratch2,
+ argc, function, feedback_vector, index));
+ // Cache the called function in a feedback vector slot. Cache states are
+ // uninitialized, monomorphic (indicated by a JSFunction), and megamorphic.
+ // argc : number of arguments to the construct function
+ // function : the function to call
+ // feedback_vector : the feedback vector
+ // index : slot in feedback vector (smi)
+ Label initialize, done, miss, megamorphic, not_array_function;
+
+ ASSERT_EQ(*TypeFeedbackInfo::MegamorphicSentinel(masm->isolate()),
+ masm->isolate()->heap()->megamorphic_symbol());
+ ASSERT_EQ(*TypeFeedbackInfo::UninitializedSentinel(masm->isolate()),
+ masm->isolate()->heap()->uninitialized_symbol());
+
+ // Load the cache state.
+ __ Add(scratch1, feedback_vector,
+ Operand::UntagSmiAndScale(index, kPointerSizeLog2));
+ __ Ldr(scratch1, FieldMemOperand(scratch1, FixedArray::kHeaderSize));
+
+ // A monomorphic cache hit or an already megamorphic state: invoke the
+ // function without changing the state.
+ __ Cmp(scratch1, function);
+ __ B(eq, &done);
+
+ if (!FLAG_pretenuring_call_new) {
+ // If we came here, we need to see if we are the array function.
+ // If we didn't have a matching function, and we didn't find the megamorph
+ // sentinel, then we have in the slot either some other function or an
+ // AllocationSite. Do a map check on the object in scratch1 register.
+ __ Ldr(scratch2, FieldMemOperand(scratch1, AllocationSite::kMapOffset));
+ __ JumpIfNotRoot(scratch2, Heap::kAllocationSiteMapRootIndex, &miss);
+
+ // Make sure the function is the Array() function
+ __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, scratch1);
+ __ Cmp(function, scratch1);
+ __ B(ne, &megamorphic);
+ __ B(&done);
+ }
+
+ __ Bind(&miss);
+
+ // A monomorphic miss (i.e, here the cache is not uninitialized) goes
+ // megamorphic.
+ __ JumpIfRoot(scratch1, Heap::kUninitializedSymbolRootIndex, &initialize);
+ // MegamorphicSentinel is an immortal immovable object (undefined) so no
+ // write-barrier is needed.
+ __ Bind(&megamorphic);
+ __ Add(scratch1, feedback_vector,
+ Operand::UntagSmiAndScale(index, kPointerSizeLog2));
+ __ LoadRoot(scratch2, Heap::kMegamorphicSymbolRootIndex);
+ __ Str(scratch2, FieldMemOperand(scratch1, FixedArray::kHeaderSize));
+ __ B(&done);
+
+ // An uninitialized cache is patched with the function or sentinel to
+ // indicate the ElementsKind if function is the Array constructor.
+ __ Bind(&initialize);
+
+ if (!FLAG_pretenuring_call_new) {
+ // Make sure the function is the Array() function
+ __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, scratch1);
+ __ Cmp(function, scratch1);
+ __ B(ne, &not_array_function);
+
+ // The target function is the Array constructor,
+ // Create an AllocationSite if we don't already have it, store it in the
+ // slot.
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ CreateAllocationSiteStub create_stub(masm->isolate());
+
+ // Arguments register must be smi-tagged to call out.
+ __ SmiTag(argc);
+ __ Push(argc, function, feedback_vector, index);
+
+ // CreateAllocationSiteStub expect the feedback vector in x2 and the slot
+ // index in x3.
+ ASSERT(feedback_vector.Is(x2) && index.Is(x3));
+ __ CallStub(&create_stub);
+
+ __ Pop(index, feedback_vector, function, argc);
+ __ SmiUntag(argc);
+ }
+ __ B(&done);
+
+ __ Bind(&not_array_function);
+ }
+
+ // An uninitialized cache is patched with the function.
+
+ __ Add(scratch1, feedback_vector,
+ Operand::UntagSmiAndScale(index, kPointerSizeLog2));
+ __ Add(scratch1, scratch1, FixedArray::kHeaderSize - kHeapObjectTag);
+ __ Str(function, MemOperand(scratch1, 0));
+
+ __ Push(function);
+ __ RecordWrite(feedback_vector, scratch1, function, kLRHasNotBeenSaved,
+ kDontSaveFPRegs, EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
+ __ Pop(function);
+
+ __ Bind(&done);
+}
+
+
+static void EmitContinueIfStrictOrNative(MacroAssembler* masm, Label* cont) {
+ // Do not transform the receiver for strict mode functions.
+ __ Ldr(x3, FieldMemOperand(x1, JSFunction::kSharedFunctionInfoOffset));
+ __ Ldr(w4, FieldMemOperand(x3, SharedFunctionInfo::kCompilerHintsOffset));
+ __ Tbnz(w4, SharedFunctionInfo::kStrictModeFunction, cont);
+
+ // Do not transform the receiver for native (Compilerhints already in x3).
+ __ Tbnz(w4, SharedFunctionInfo::kNative, cont);
+}
+
+
+static void EmitSlowCase(MacroAssembler* masm,
+ int argc,
+ Register function,
+ Register type,
+ Label* non_function) {
+ // Check for function proxy.
+ // x10 : function type.
+ __ CompareAndBranch(type, JS_FUNCTION_PROXY_TYPE, ne, non_function);
+ __ Push(function); // put proxy as additional argument
+ __ Mov(x0, argc + 1);
+ __ Mov(x2, 0);
+ __ GetBuiltinFunction(x1, Builtins::CALL_FUNCTION_PROXY);
+ {
+ Handle<Code> adaptor =
+ masm->isolate()->builtins()->ArgumentsAdaptorTrampoline();
+ __ Jump(adaptor, RelocInfo::CODE_TARGET);
+ }
+
+ // CALL_NON_FUNCTION expects the non-function callee as receiver (instead
+ // of the original receiver from the call site).
+ __ Bind(non_function);
+ __ Poke(function, argc * kXRegSize);
+ __ Mov(x0, argc); // Set up the number of arguments.
+ __ Mov(x2, 0);
+ __ GetBuiltinFunction(function, Builtins::CALL_NON_FUNCTION);
+ __ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
+ RelocInfo::CODE_TARGET);
+}
+
+
+static void EmitWrapCase(MacroAssembler* masm, int argc, Label* cont) {
+ // Wrap the receiver and patch it back onto the stack.
+ { FrameScope frame_scope(masm, StackFrame::INTERNAL);
+ __ Push(x1, x3);
+ __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
+ __ Pop(x1);
+ }
+ __ Poke(x0, argc * kPointerSize);
+ __ B(cont);
+}
+
+
+static void CallFunctionNoFeedback(MacroAssembler* masm,
+ int argc, bool needs_checks,
+ bool call_as_method) {
+ // x1 function the function to call
+ Register function = x1;
+ Register type = x4;
+ Label slow, non_function, wrap, cont;
+
+ // TODO(jbramley): This function has a lot of unnamed registers. Name them,
+ // and tidy things up a bit.
+
+ if (needs_checks) {
+ // Check that the function is really a JavaScript function.
+ __ JumpIfSmi(function, &non_function);
+
+ // Goto slow case if we do not have a function.
+ __ JumpIfNotObjectType(function, x10, type, JS_FUNCTION_TYPE, &slow);
+ }
+
+ // Fast-case: Invoke the function now.
+ // x1 function pushed function
+ ParameterCount actual(argc);
+
+ if (call_as_method) {
+ if (needs_checks) {
+ EmitContinueIfStrictOrNative(masm, &cont);
+ }
+
+ // Compute the receiver in sloppy mode.
+ __ Peek(x3, argc * kPointerSize);
+
+ if (needs_checks) {
+ __ JumpIfSmi(x3, &wrap);
+ __ JumpIfObjectType(x3, x10, type, FIRST_SPEC_OBJECT_TYPE, &wrap, lt);
+ } else {
+ __ B(&wrap);
+ }
+
+ __ Bind(&cont);
+ }
+
+ __ InvokeFunction(function,
+ actual,
+ JUMP_FUNCTION,
+ NullCallWrapper());
+ if (needs_checks) {
+ // Slow-case: Non-function called.
+ __ Bind(&slow);
+ EmitSlowCase(masm, argc, function, type, &non_function);
+ }
+
+ if (call_as_method) {
+ __ Bind(&wrap);
+ EmitWrapCase(masm, argc, &cont);
+ }
+}
+
+
+void CallFunctionStub::Generate(MacroAssembler* masm) {
+ ASM_LOCATION("CallFunctionStub::Generate");
+ CallFunctionNoFeedback(masm, argc_, NeedsChecks(), CallAsMethod());
+}
+
+
+void CallConstructStub::Generate(MacroAssembler* masm) {
+ ASM_LOCATION("CallConstructStub::Generate");
+ // x0 : number of arguments
+ // x1 : the function to call
+ // x2 : feedback vector
+ // x3 : slot in feedback vector (smi) (if r2 is not the megamorphic symbol)
+ Register function = x1;
+ Label slow, non_function_call;
+
+ // Check that the function is not a smi.
+ __ JumpIfSmi(function, &non_function_call);
+ // Check that the function is a JSFunction.
+ Register object_type = x10;
+ __ JumpIfNotObjectType(function, object_type, object_type, JS_FUNCTION_TYPE,
+ &slow);
+
+ if (RecordCallTarget()) {
+ GenerateRecordCallTarget(masm, x0, function, x2, x3, x4, x5);
+
+ __ Add(x5, x2, Operand::UntagSmiAndScale(x3, kPointerSizeLog2));
+ if (FLAG_pretenuring_call_new) {
+ // Put the AllocationSite from the feedback vector into x2.
+ // By adding kPointerSize we encode that we know the AllocationSite
+ // entry is at the feedback vector slot given by x3 + 1.
+ __ Ldr(x2, FieldMemOperand(x5, FixedArray::kHeaderSize + kPointerSize));
+ } else {
+ Label feedback_register_initialized;
+ // Put the AllocationSite from the feedback vector into x2, or undefined.
+ __ Ldr(x2, FieldMemOperand(x5, FixedArray::kHeaderSize));
+ __ Ldr(x5, FieldMemOperand(x2, AllocationSite::kMapOffset));
+ __ JumpIfRoot(x5, Heap::kAllocationSiteMapRootIndex,
+ &feedback_register_initialized);
+ __ LoadRoot(x2, Heap::kUndefinedValueRootIndex);
+ __ bind(&feedback_register_initialized);
+ }
+
+ __ AssertUndefinedOrAllocationSite(x2, x5);
+ }
+
+ // Jump to the function-specific construct stub.
+ Register jump_reg = x4;
+ Register shared_func_info = jump_reg;
+ Register cons_stub = jump_reg;
+ Register cons_stub_code = jump_reg;
+ __ Ldr(shared_func_info,
+ FieldMemOperand(function, JSFunction::kSharedFunctionInfoOffset));
+ __ Ldr(cons_stub,
+ FieldMemOperand(shared_func_info,
+ SharedFunctionInfo::kConstructStubOffset));
+ __ Add(cons_stub_code, cons_stub, Code::kHeaderSize - kHeapObjectTag);
+ __ Br(cons_stub_code);
+
+ Label do_call;
+ __ Bind(&slow);
+ __ Cmp(object_type, JS_FUNCTION_PROXY_TYPE);
+ __ B(ne, &non_function_call);
+ __ GetBuiltinFunction(x1, Builtins::CALL_FUNCTION_PROXY_AS_CONSTRUCTOR);
+ __ B(&do_call);
+
+ __ Bind(&non_function_call);
+ __ GetBuiltinFunction(x1, Builtins::CALL_NON_FUNCTION_AS_CONSTRUCTOR);
+
+ __ Bind(&do_call);
+ // Set expected number of arguments to zero (not changing x0).
+ __ Mov(x2, 0);
+ __ Jump(isolate()->builtins()->ArgumentsAdaptorTrampoline(),
+ RelocInfo::CODE_TARGET);
+}
+
+
+static void EmitLoadTypeFeedbackVector(MacroAssembler* masm, Register vector) {
+ __ Ldr(vector, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+ __ Ldr(vector, FieldMemOperand(vector,
+ JSFunction::kSharedFunctionInfoOffset));
+ __ Ldr(vector, FieldMemOperand(vector,
+ SharedFunctionInfo::kFeedbackVectorOffset));
+}
+
+
+void CallIC_ArrayStub::Generate(MacroAssembler* masm) {
+ // x1 - function
+ // x3 - slot id
+ Label miss;
+ Register function = x1;
+ Register feedback_vector = x2;
+ Register index = x3;
+ Register scratch = x4;
+
+ EmitLoadTypeFeedbackVector(masm, feedback_vector);
+
+ __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, scratch);
+ __ Cmp(function, scratch);
+ __ B(ne, &miss);
+
+ Register allocation_site = feedback_vector;
+ __ Mov(x0, Operand(arg_count()));
+
+ __ Add(scratch, feedback_vector,
+ Operand::UntagSmiAndScale(index, kPointerSizeLog2));
+ __ Ldr(allocation_site, FieldMemOperand(scratch, FixedArray::kHeaderSize));
+
+ // Verify that x2 contains an AllocationSite
+ __ AssertUndefinedOrAllocationSite(allocation_site, scratch);
+ ArrayConstructorStub stub(masm->isolate(), arg_count());
+ __ TailCallStub(&stub);
+
+ __ bind(&miss);
+ GenerateMiss(masm, IC::kCallIC_Customization_Miss);
+
+ // The slow case, we need this no matter what to complete a call after a miss.
+ CallFunctionNoFeedback(masm,
+ arg_count(),
+ true,
+ CallAsMethod());
+
+ __ Unreachable();
+}
+
+
+void CallICStub::Generate(MacroAssembler* masm) {
+ ASM_LOCATION("CallICStub");
+
+ // x1 - function
+ // x3 - slot id (Smi)
+ Label extra_checks_or_miss, slow_start;
+ Label slow, non_function, wrap, cont;
+ Label have_js_function;
+ int argc = state_.arg_count();
+ ParameterCount actual(argc);
+
+ Register function = x1;
+ Register feedback_vector = x2;
+ Register index = x3;
+ Register type = x4;
+
+ EmitLoadTypeFeedbackVector(masm, feedback_vector);
+
+ // The checks. First, does x1 match the recorded monomorphic target?
+ __ Add(x4, feedback_vector,
+ Operand::UntagSmiAndScale(index, kPointerSizeLog2));
+ __ Ldr(x4, FieldMemOperand(x4, FixedArray::kHeaderSize));
+
+ __ Cmp(x4, function);
+ __ B(ne, &extra_checks_or_miss);
+
+ __ bind(&have_js_function);
+ if (state_.CallAsMethod()) {
+ EmitContinueIfStrictOrNative(masm, &cont);
+
+ // Compute the receiver in sloppy mode.
+ __ Peek(x3, argc * kPointerSize);
+
+ __ JumpIfSmi(x3, &wrap);
+ __ JumpIfObjectType(x3, x10, type, FIRST_SPEC_OBJECT_TYPE, &wrap, lt);
+
+ __ Bind(&cont);
+ }
+
+ __ InvokeFunction(function,
+ actual,
+ JUMP_FUNCTION,
+ NullCallWrapper());
+
+ __ bind(&slow);
+ EmitSlowCase(masm, argc, function, type, &non_function);
+
+ if (state_.CallAsMethod()) {
+ __ bind(&wrap);
+ EmitWrapCase(masm, argc, &cont);
+ }
+
+ __ bind(&extra_checks_or_miss);
+ Label miss;
+
+ __ JumpIfRoot(x4, Heap::kMegamorphicSymbolRootIndex, &slow_start);
+ __ JumpIfRoot(x4, Heap::kUninitializedSymbolRootIndex, &miss);
+
+ if (!FLAG_trace_ic) {
+ // We are going megamorphic, and we don't want to visit the runtime.
+ __ Add(x4, feedback_vector,
+ Operand::UntagSmiAndScale(index, kPointerSizeLog2));
+ __ LoadRoot(x5, Heap::kMegamorphicSymbolRootIndex);
+ __ Str(x5, FieldMemOperand(x4, FixedArray::kHeaderSize));
+ __ B(&slow_start);
+ }
+
+ // We are here because tracing is on or we are going monomorphic.
+ __ bind(&miss);
+ GenerateMiss(masm, IC::kCallIC_Miss);
+
+ // the slow case
+ __ bind(&slow_start);
+
+ // Check that the function is really a JavaScript function.
+ __ JumpIfSmi(function, &non_function);
+
+ // Goto slow case if we do not have a function.
+ __ JumpIfNotObjectType(function, x10, type, JS_FUNCTION_TYPE, &slow);
+ __ B(&have_js_function);
+}
+
+
+void CallICStub::GenerateMiss(MacroAssembler* masm, IC::UtilityId id) {
+ ASM_LOCATION("CallICStub[Miss]");
+
+ // Get the receiver of the function from the stack; 1 ~ return address.
+ __ Peek(x4, (state_.arg_count() + 1) * kPointerSize);
+
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+
+ // Push the receiver and the function and feedback info.
+ __ Push(x4, x1, x2, x3);
+
+ // Call the entry.
+ ExternalReference miss = ExternalReference(IC_Utility(id),
+ masm->isolate());
+ __ CallExternalReference(miss, 4);
+
+ // Move result to edi and exit the internal frame.
+ __ Mov(x1, x0);
+ }
+}
+
+
+void StringCharCodeAtGenerator::GenerateFast(MacroAssembler* masm) {
+ // If the receiver is a smi trigger the non-string case.
+ __ JumpIfSmi(object_, receiver_not_string_);
+
+ // Fetch the instance type of the receiver into result register.
+ __ Ldr(result_, FieldMemOperand(object_, HeapObject::kMapOffset));
+ __ Ldrb(result_, FieldMemOperand(result_, Map::kInstanceTypeOffset));
+
+ // If the receiver is not a string trigger the non-string case.
+ __ TestAndBranchIfAnySet(result_, kIsNotStringMask, receiver_not_string_);
+
+ // If the index is non-smi trigger the non-smi case.
+ __ JumpIfNotSmi(index_, &index_not_smi_);
+
+ __ Bind(&got_smi_index_);
+ // Check for index out of range.
+ __ Ldrsw(result_, UntagSmiFieldMemOperand(object_, String::kLengthOffset));
+ __ Cmp(result_, Operand::UntagSmi(index_));
+ __ B(ls, index_out_of_range_);
+
+ __ SmiUntag(index_);
+
+ StringCharLoadGenerator::Generate(masm,
+ object_,
+ index_.W(),
+ result_,
+ &call_runtime_);
+ __ SmiTag(result_);
+ __ Bind(&exit_);
+}
+
+
+void StringCharCodeAtGenerator::GenerateSlow(
+ MacroAssembler* masm,
+ const RuntimeCallHelper& call_helper) {
+ __ Abort(kUnexpectedFallthroughToCharCodeAtSlowCase);
+
+ __ Bind(&index_not_smi_);
+ // If index is a heap number, try converting it to an integer.
+ __ CheckMap(index_,
+ result_,
+ Heap::kHeapNumberMapRootIndex,
+ index_not_number_,
+ DONT_DO_SMI_CHECK);
+ call_helper.BeforeCall(masm);
+ // Save object_ on the stack and pass index_ as argument for runtime call.
+ __ Push(object_, index_);
+ if (index_flags_ == STRING_INDEX_IS_NUMBER) {
+ __ CallRuntime(Runtime::kNumberToIntegerMapMinusZero, 1);
+ } else {
+ ASSERT(index_flags_ == STRING_INDEX_IS_ARRAY_INDEX);
+ // NumberToSmi discards numbers that are not exact integers.
+ __ CallRuntime(Runtime::kHiddenNumberToSmi, 1);
+ }
+ // Save the conversion result before the pop instructions below
+ // have a chance to overwrite it.
+ __ Mov(index_, x0);
+ __ Pop(object_);
+ // Reload the instance type.
+ __ Ldr(result_, FieldMemOperand(object_, HeapObject::kMapOffset));
+ __ Ldrb(result_, FieldMemOperand(result_, Map::kInstanceTypeOffset));
+ call_helper.AfterCall(masm);
+
+ // If index is still not a smi, it must be out of range.
+ __ JumpIfNotSmi(index_, index_out_of_range_);
+ // Otherwise, return to the fast path.
+ __ B(&got_smi_index_);
+
+ // Call runtime. We get here when the receiver is a string and the
+ // index is a number, but the code of getting the actual character
+ // is too complex (e.g., when the string needs to be flattened).
+ __ Bind(&call_runtime_);
+ call_helper.BeforeCall(masm);
+ __ SmiTag(index_);
+ __ Push(object_, index_);
+ __ CallRuntime(Runtime::kHiddenStringCharCodeAt, 2);
+ __ Mov(result_, x0);
+ call_helper.AfterCall(masm);
+ __ B(&exit_);
+
+ __ Abort(kUnexpectedFallthroughFromCharCodeAtSlowCase);
+}
+
+
+void StringCharFromCodeGenerator::GenerateFast(MacroAssembler* masm) {
+ __ JumpIfNotSmi(code_, &slow_case_);
+ __ Cmp(code_, Smi::FromInt(String::kMaxOneByteCharCode));
+ __ B(hi, &slow_case_);
+
+ __ LoadRoot(result_, Heap::kSingleCharacterStringCacheRootIndex);
+ // At this point code register contains smi tagged ASCII char code.
+ STATIC_ASSERT(kSmiShift > kPointerSizeLog2);
+ __ Add(result_, result_, Operand(code_, LSR, kSmiShift - kPointerSizeLog2));
+ __ Ldr(result_, FieldMemOperand(result_, FixedArray::kHeaderSize));
+ __ JumpIfRoot(result_, Heap::kUndefinedValueRootIndex, &slow_case_);
+ __ Bind(&exit_);
+}
+
+
+void StringCharFromCodeGenerator::GenerateSlow(
+ MacroAssembler* masm,
+ const RuntimeCallHelper& call_helper) {
+ __ Abort(kUnexpectedFallthroughToCharFromCodeSlowCase);
+
+ __ Bind(&slow_case_);
+ call_helper.BeforeCall(masm);
+ __ Push(code_);
+ __ CallRuntime(Runtime::kCharFromCode, 1);
+ __ Mov(result_, x0);
+ call_helper.AfterCall(masm);
+ __ B(&exit_);
+
+ __ Abort(kUnexpectedFallthroughFromCharFromCodeSlowCase);
+}
+
+
+void ICCompareStub::GenerateSmis(MacroAssembler* masm) {
+ // Inputs are in x0 (lhs) and x1 (rhs).
+ ASSERT(state_ == CompareIC::SMI);
+ ASM_LOCATION("ICCompareStub[Smis]");
+ Label miss;
+ // Bail out (to 'miss') unless both x0 and x1 are smis.
+ __ JumpIfEitherNotSmi(x0, x1, &miss);
+
+ if (GetCondition() == eq) {
+ // For equality we do not care about the sign of the result.
+ __ Sub(x0, x0, x1);
+ } else {
+ // Untag before subtracting to avoid handling overflow.
+ __ SmiUntag(x1);
+ __ Sub(x0, x1, Operand::UntagSmi(x0));
+ }
+ __ Ret();
+
+ __ Bind(&miss);
+ GenerateMiss(masm);
+}
+
+
+void ICCompareStub::GenerateNumbers(MacroAssembler* masm) {
+ ASSERT(state_ == CompareIC::NUMBER);
+ ASM_LOCATION("ICCompareStub[HeapNumbers]");
+
+ Label unordered, maybe_undefined1, maybe_undefined2;
+ Label miss, handle_lhs, values_in_d_regs;
+ Label untag_rhs, untag_lhs;
+
+ Register result = x0;
+ Register rhs = x0;
+ Register lhs = x1;
+ FPRegister rhs_d = d0;
+ FPRegister lhs_d = d1;
+
+ if (left_ == CompareIC::SMI) {
+ __ JumpIfNotSmi(lhs, &miss);
+ }
+ if (right_ == CompareIC::SMI) {
+ __ JumpIfNotSmi(rhs, &miss);
+ }
+
+ __ SmiUntagToDouble(rhs_d, rhs, kSpeculativeUntag);
+ __ SmiUntagToDouble(lhs_d, lhs, kSpeculativeUntag);
+
+ // Load rhs if it's a heap number.
+ __ JumpIfSmi(rhs, &handle_lhs);
+ __ CheckMap(rhs, x10, Heap::kHeapNumberMapRootIndex, &maybe_undefined1,
+ DONT_DO_SMI_CHECK);
+ __ Ldr(rhs_d, FieldMemOperand(rhs, HeapNumber::kValueOffset));
+
+ // Load lhs if it's a heap number.
+ __ Bind(&handle_lhs);
+ __ JumpIfSmi(lhs, &values_in_d_regs);
+ __ CheckMap(lhs, x10, Heap::kHeapNumberMapRootIndex, &maybe_undefined2,
+ DONT_DO_SMI_CHECK);
+ __ Ldr(lhs_d, FieldMemOperand(lhs, HeapNumber::kValueOffset));
+
+ __ Bind(&values_in_d_regs);
+ __ Fcmp(lhs_d, rhs_d);
+ __ B(vs, &unordered); // Overflow flag set if either is NaN.
+ STATIC_ASSERT((LESS == -1) && (EQUAL == 0) && (GREATER == 1));
+ __ Cset(result, gt); // gt => 1, otherwise (lt, eq) => 0 (EQUAL).
+ __ Csinv(result, result, xzr, ge); // lt => -1, gt => 1, eq => 0.
+ __ Ret();
+
+ __ Bind(&unordered);
+ ICCompareStub stub(isolate(), op_, CompareIC::GENERIC, CompareIC::GENERIC,
+ CompareIC::GENERIC);
+ __ Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
+
+ __ Bind(&maybe_undefined1);
+ if (Token::IsOrderedRelationalCompareOp(op_)) {
+ __ JumpIfNotRoot(rhs, Heap::kUndefinedValueRootIndex, &miss);
+ __ JumpIfSmi(lhs, &unordered);
+ __ JumpIfNotObjectType(lhs, x10, x10, HEAP_NUMBER_TYPE, &maybe_undefined2);
+ __ B(&unordered);
+ }
+
+ __ Bind(&maybe_undefined2);
+ if (Token::IsOrderedRelationalCompareOp(op_)) {
+ __ JumpIfRoot(lhs, Heap::kUndefinedValueRootIndex, &unordered);
+ }
+
+ __ Bind(&miss);
+ GenerateMiss(masm);
+}
+
+
+void ICCompareStub::GenerateInternalizedStrings(MacroAssembler* masm) {
+ ASSERT(state_ == CompareIC::INTERNALIZED_STRING);
+ ASM_LOCATION("ICCompareStub[InternalizedStrings]");
+ Label miss;
+
+ Register result = x0;
+ Register rhs = x0;
+ Register lhs = x1;
+
+ // Check that both operands are heap objects.
+ __ JumpIfEitherSmi(lhs, rhs, &miss);
+
+ // Check that both operands are internalized strings.
+ Register rhs_map = x10;
+ Register lhs_map = x11;
+ Register rhs_type = x10;
+ Register lhs_type = x11;
+ __ Ldr(lhs_map, FieldMemOperand(lhs, HeapObject::kMapOffset));
+ __ Ldr(rhs_map, FieldMemOperand(rhs, HeapObject::kMapOffset));
+ __ Ldrb(lhs_type, FieldMemOperand(lhs_map, Map::kInstanceTypeOffset));
+ __ Ldrb(rhs_type, FieldMemOperand(rhs_map, Map::kInstanceTypeOffset));
+
+ STATIC_ASSERT((kInternalizedTag == 0) && (kStringTag == 0));
+ __ Orr(x12, lhs_type, rhs_type);
+ __ TestAndBranchIfAnySet(
+ x12, kIsNotStringMask | kIsNotInternalizedMask, &miss);
+
+ // Internalized strings are compared by identity.
+ STATIC_ASSERT(EQUAL == 0);
+ __ Cmp(lhs, rhs);
+ __ Cset(result, ne);
+ __ Ret();
+
+ __ Bind(&miss);
+ GenerateMiss(masm);
+}
+
+
+void ICCompareStub::GenerateUniqueNames(MacroAssembler* masm) {
+ ASSERT(state_ == CompareIC::UNIQUE_NAME);
+ ASM_LOCATION("ICCompareStub[UniqueNames]");
+ ASSERT(GetCondition() == eq);
+ Label miss;
+
+ Register result = x0;
+ Register rhs = x0;
+ Register lhs = x1;
+
+ Register lhs_instance_type = w2;
+ Register rhs_instance_type = w3;
+
+ // Check that both operands are heap objects.
+ __ JumpIfEitherSmi(lhs, rhs, &miss);
+
+ // Check that both operands are unique names. This leaves the instance
+ // types loaded in tmp1 and tmp2.
+ __ Ldr(x10, FieldMemOperand(lhs, HeapObject::kMapOffset));
+ __ Ldr(x11, FieldMemOperand(rhs, HeapObject::kMapOffset));
+ __ Ldrb(lhs_instance_type, FieldMemOperand(x10, Map::kInstanceTypeOffset));
+ __ Ldrb(rhs_instance_type, FieldMemOperand(x11, Map::kInstanceTypeOffset));
+
+ // To avoid a miss, each instance type should be either SYMBOL_TYPE or it
+ // should have kInternalizedTag set.
+ __ JumpIfNotUniqueName(lhs_instance_type, &miss);
+ __ JumpIfNotUniqueName(rhs_instance_type, &miss);
+
+ // Unique names are compared by identity.
+ STATIC_ASSERT(EQUAL == 0);
+ __ Cmp(lhs, rhs);
+ __ Cset(result, ne);
+ __ Ret();
+
+ __ Bind(&miss);
+ GenerateMiss(masm);
+}
+
+
+void ICCompareStub::GenerateStrings(MacroAssembler* masm) {
+ ASSERT(state_ == CompareIC::STRING);
+ ASM_LOCATION("ICCompareStub[Strings]");
+
+ Label miss;
+
+ bool equality = Token::IsEqualityOp(op_);
+
+ Register result = x0;
+ Register rhs = x0;
+ Register lhs = x1;
+
+ // Check that both operands are heap objects.
+ __ JumpIfEitherSmi(rhs, lhs, &miss);
+
+ // Check that both operands are strings.
+ Register rhs_map = x10;
+ Register lhs_map = x11;
+ Register rhs_type = x10;
+ Register lhs_type = x11;
+ __ Ldr(lhs_map, FieldMemOperand(lhs, HeapObject::kMapOffset));
+ __ Ldr(rhs_map, FieldMemOperand(rhs, HeapObject::kMapOffset));
+ __ Ldrb(lhs_type, FieldMemOperand(lhs_map, Map::kInstanceTypeOffset));
+ __ Ldrb(rhs_type, FieldMemOperand(rhs_map, Map::kInstanceTypeOffset));
+ STATIC_ASSERT(kNotStringTag != 0);
+ __ Orr(x12, lhs_type, rhs_type);
+ __ Tbnz(x12, MaskToBit(kIsNotStringMask), &miss);
+
+ // Fast check for identical strings.
+ Label not_equal;
+ __ Cmp(lhs, rhs);
+ __ B(ne, &not_equal);
+ __ Mov(result, EQUAL);
+ __ Ret();
+
+ __ Bind(&not_equal);
+ // Handle not identical strings
+
+ // Check that both strings are internalized strings. If they are, we're done
+ // because we already know they are not identical. We know they are both
+ // strings.
+ if (equality) {
+ ASSERT(GetCondition() == eq);
+ STATIC_ASSERT(kInternalizedTag == 0);
+ Label not_internalized_strings;
+ __ Orr(x12, lhs_type, rhs_type);
+ __ TestAndBranchIfAnySet(
+ x12, kIsNotInternalizedMask, &not_internalized_strings);
+ // Result is in rhs (x0), and not EQUAL, as rhs is not a smi.
+ __ Ret();
+ __ Bind(&not_internalized_strings);
+ }
+
+ // Check that both strings are sequential ASCII.
+ Label runtime;
+ __ JumpIfBothInstanceTypesAreNotSequentialAscii(
+ lhs_type, rhs_type, x12, x13, &runtime);
+
+ // Compare flat ASCII strings. Returns when done.
+ if (equality) {
+ StringCompareStub::GenerateFlatAsciiStringEquals(
+ masm, lhs, rhs, x10, x11, x12);
+ } else {
+ StringCompareStub::GenerateCompareFlatAsciiStrings(
+ masm, lhs, rhs, x10, x11, x12, x13);
+ }
+
+ // Handle more complex cases in runtime.
+ __ Bind(&runtime);
+ __ Push(lhs, rhs);
+ if (equality) {
+ __ TailCallRuntime(Runtime::kStringEquals, 2, 1);
+ } else {
+ __ TailCallRuntime(Runtime::kHiddenStringCompare, 2, 1);
+ }
+
+ __ Bind(&miss);
+ GenerateMiss(masm);
+}
+
+
+void ICCompareStub::GenerateObjects(MacroAssembler* masm) {
+ ASSERT(state_ == CompareIC::OBJECT);
+ ASM_LOCATION("ICCompareStub[Objects]");
+
+ Label miss;
+
+ Register result = x0;
+ Register rhs = x0;
+ Register lhs = x1;
+
+ __ JumpIfEitherSmi(rhs, lhs, &miss);
+
+ __ JumpIfNotObjectType(rhs, x10, x10, JS_OBJECT_TYPE, &miss);
+ __ JumpIfNotObjectType(lhs, x10, x10, JS_OBJECT_TYPE, &miss);
+
+ ASSERT(GetCondition() == eq);
+ __ Sub(result, rhs, lhs);
+ __ Ret();
+
+ __ Bind(&miss);
+ GenerateMiss(masm);
+}
+
+
+void ICCompareStub::GenerateKnownObjects(MacroAssembler* masm) {
+ ASM_LOCATION("ICCompareStub[KnownObjects]");
+
+ Label miss;
+
+ Register result = x0;
+ Register rhs = x0;
+ Register lhs = x1;
+
+ __ JumpIfEitherSmi(rhs, lhs, &miss);
+
+ Register rhs_map = x10;
+ Register lhs_map = x11;
+ __ Ldr(rhs_map, FieldMemOperand(rhs, HeapObject::kMapOffset));
+ __ Ldr(lhs_map, FieldMemOperand(lhs, HeapObject::kMapOffset));
+ __ Cmp(rhs_map, Operand(known_map_));
+ __ B(ne, &miss);
+ __ Cmp(lhs_map, Operand(known_map_));
+ __ B(ne, &miss);
+
+ __ Sub(result, rhs, lhs);
+ __ Ret();
+
+ __ Bind(&miss);
+ GenerateMiss(masm);
+}
+
+
+// This method handles the case where a compare stub had the wrong
+// implementation. It calls a miss handler, which re-writes the stub. All other
+// ICCompareStub::Generate* methods should fall back into this one if their
+// operands were not the expected types.
+void ICCompareStub::GenerateMiss(MacroAssembler* masm) {
+ ASM_LOCATION("ICCompareStub[Miss]");
+
+ Register stub_entry = x11;
+ {
+ ExternalReference miss =
+ ExternalReference(IC_Utility(IC::kCompareIC_Miss), isolate());
+
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ Register op = x10;
+ Register left = x1;
+ Register right = x0;
+ // Preserve some caller-saved registers.
+ __ Push(x1, x0, lr);
+ // Push the arguments.
+ __ Mov(op, Smi::FromInt(op_));
+ __ Push(left, right, op);
+
+ // Call the miss handler. This also pops the arguments.
+ __ CallExternalReference(miss, 3);
+
+ // Compute the entry point of the rewritten stub.
+ __ Add(stub_entry, x0, Code::kHeaderSize - kHeapObjectTag);
+ // Restore caller-saved registers.
+ __ Pop(lr, x0, x1);
+ }
+
+ // Tail-call to the new stub.
+ __ Jump(stub_entry);
+}
+
+
+void StringHelper::GenerateHashInit(MacroAssembler* masm,
+ Register hash,
+ Register character) {
+ ASSERT(!AreAliased(hash, character));
+
+ // hash = character + (character << 10);
+ __ LoadRoot(hash, Heap::kHashSeedRootIndex);
+ // Untag smi seed and add the character.
+ __ Add(hash, character, Operand(hash, LSR, kSmiShift));
+
+ // Compute hashes modulo 2^32 using a 32-bit W register.
+ Register hash_w = hash.W();
+
+ // hash += hash << 10;
+ __ Add(hash_w, hash_w, Operand(hash_w, LSL, 10));
+ // hash ^= hash >> 6;
+ __ Eor(hash_w, hash_w, Operand(hash_w, LSR, 6));
+}
+
+
+void StringHelper::GenerateHashAddCharacter(MacroAssembler* masm,
+ Register hash,
+ Register character) {
+ ASSERT(!AreAliased(hash, character));
+
+ // hash += character;
+ __ Add(hash, hash, character);
+
+ // Compute hashes modulo 2^32 using a 32-bit W register.
+ Register hash_w = hash.W();
+
+ // hash += hash << 10;
+ __ Add(hash_w, hash_w, Operand(hash_w, LSL, 10));
+ // hash ^= hash >> 6;
+ __ Eor(hash_w, hash_w, Operand(hash_w, LSR, 6));
+}
+
+
+void StringHelper::GenerateHashGetHash(MacroAssembler* masm,
+ Register hash,
+ Register scratch) {
+ // Compute hashes modulo 2^32 using a 32-bit W register.
+ Register hash_w = hash.W();
+ Register scratch_w = scratch.W();
+ ASSERT(!AreAliased(hash_w, scratch_w));
+
+ // hash += hash << 3;
+ __ Add(hash_w, hash_w, Operand(hash_w, LSL, 3));
+ // hash ^= hash >> 11;
+ __ Eor(hash_w, hash_w, Operand(hash_w, LSR, 11));
+ // hash += hash << 15;
+ __ Add(hash_w, hash_w, Operand(hash_w, LSL, 15));
+
+ __ Ands(hash_w, hash_w, String::kHashBitMask);
+
+ // if (hash == 0) hash = 27;
+ __ Mov(scratch_w, StringHasher::kZeroHash);
+ __ Csel(hash_w, scratch_w, hash_w, eq);
+}
+
+
+void SubStringStub::Generate(MacroAssembler* masm) {
+ ASM_LOCATION("SubStringStub::Generate");
+ Label runtime;
+
+ // Stack frame on entry.
+ // lr: return address
+ // jssp[0]: substring "to" offset
+ // jssp[8]: substring "from" offset
+ // jssp[16]: pointer to string object
+
+ // This stub is called from the native-call %_SubString(...), so
+ // nothing can be assumed about the arguments. It is tested that:
+ // "string" is a sequential string,
+ // both "from" and "to" are smis, and
+ // 0 <= from <= to <= string.length (in debug mode.)
+ // If any of these assumptions fail, we call the runtime system.
+
+ static const int kToOffset = 0 * kPointerSize;
+ static const int kFromOffset = 1 * kPointerSize;
+ static const int kStringOffset = 2 * kPointerSize;
+
+ Register to = x0;
+ Register from = x15;
+ Register input_string = x10;
+ Register input_length = x11;
+ Register input_type = x12;
+ Register result_string = x0;
+ Register result_length = x1;
+ Register temp = x3;
+
+ __ Peek(to, kToOffset);
+ __ Peek(from, kFromOffset);
+
+ // Check that both from and to are smis. If not, jump to runtime.
+ __ JumpIfEitherNotSmi(from, to, &runtime);
+ __ SmiUntag(from);
+ __ SmiUntag(to);
+
+ // Calculate difference between from and to. If to < from, branch to runtime.
+ __ Subs(result_length, to, from);
+ __ B(mi, &runtime);
+
+ // Check from is positive.
+ __ Tbnz(from, kWSignBit, &runtime);
+
+ // Make sure first argument is a string.
+ __ Peek(input_string, kStringOffset);
+ __ JumpIfSmi(input_string, &runtime);
+ __ IsObjectJSStringType(input_string, input_type, &runtime);
+
+ Label single_char;
+ __ Cmp(result_length, 1);
+ __ B(eq, &single_char);
+
+ // Short-cut for the case of trivial substring.
+ Label return_x0;
+ __ Ldrsw(input_length,
+ UntagSmiFieldMemOperand(input_string, String::kLengthOffset));
+
+ __ Cmp(result_length, input_length);
+ __ CmovX(x0, input_string, eq);
+ // Return original string.
+ __ B(eq, &return_x0);
+
+ // Longer than original string's length or negative: unsafe arguments.
+ __ B(hi, &runtime);
+
+ // Shorter than original string's length: an actual substring.
+
+ // x0 to substring end character offset
+ // x1 result_length length of substring result
+ // x10 input_string pointer to input string object
+ // x10 unpacked_string pointer to unpacked string object
+ // x11 input_length length of input string
+ // x12 input_type instance type of input string
+ // x15 from substring start character offset
+
+ // Deal with different string types: update the index if necessary and put
+ // the underlying string into register unpacked_string.
+ Label underlying_unpacked, sliced_string, seq_or_external_string;
+ Label update_instance_type;
+ // If the string is not indirect, it can only be sequential or external.
+ STATIC_ASSERT(kIsIndirectStringMask == (kSlicedStringTag & kConsStringTag));
+ STATIC_ASSERT(kIsIndirectStringMask != 0);
+
+ // Test for string types, and branch/fall through to appropriate unpacking
+ // code.
+ __ Tst(input_type, kIsIndirectStringMask);
+ __ B(eq, &seq_or_external_string);
+ __ Tst(input_type, kSlicedNotConsMask);
+ __ B(ne, &sliced_string);
+
+ Register unpacked_string = input_string;
+
+ // Cons string. Check whether it is flat, then fetch first part.
+ __ Ldr(temp, FieldMemOperand(input_string, ConsString::kSecondOffset));
+ __ JumpIfNotRoot(temp, Heap::kempty_stringRootIndex, &runtime);
+ __ Ldr(unpacked_string,
+ FieldMemOperand(input_string, ConsString::kFirstOffset));
+ __ B(&update_instance_type);
+
+ __ Bind(&sliced_string);
+ // Sliced string. Fetch parent and correct start index by offset.
+ __ Ldrsw(temp,
+ UntagSmiFieldMemOperand(input_string, SlicedString::kOffsetOffset));
+ __ Add(from, from, temp);
+ __ Ldr(unpacked_string,
+ FieldMemOperand(input_string, SlicedString::kParentOffset));
+
+ __ Bind(&update_instance_type);
+ __ Ldr(temp, FieldMemOperand(unpacked_string, HeapObject::kMapOffset));
+ __ Ldrb(input_type, FieldMemOperand(temp, Map::kInstanceTypeOffset));
+ // Now control must go to &underlying_unpacked. Since the no code is generated
+ // before then we fall through instead of generating a useless branch.
+
+ __ Bind(&seq_or_external_string);
+ // Sequential or external string. Registers unpacked_string and input_string
+ // alias, so there's nothing to do here.
+ // Note that if code is added here, the above code must be updated.
+
+ // x0 result_string pointer to result string object (uninit)
+ // x1 result_length length of substring result
+ // x10 unpacked_string pointer to unpacked string object
+ // x11 input_length length of input string
+ // x12 input_type instance type of input string
+ // x15 from substring start character offset
+ __ Bind(&underlying_unpacked);
+
+ if (FLAG_string_slices) {
+ Label copy_routine;
+ __ Cmp(result_length, SlicedString::kMinLength);
+ // Short slice. Copy instead of slicing.
+ __ B(lt, &copy_routine);
+ // Allocate new sliced string. At this point we do not reload the instance
+ // type including the string encoding because we simply rely on the info
+ // provided by the original string. It does not matter if the original
+ // string's encoding is wrong because we always have to recheck encoding of
+ // the newly created string's parent anyway due to externalized strings.
+ Label two_byte_slice, set_slice_header;
+ STATIC_ASSERT((kStringEncodingMask & kOneByteStringTag) != 0);
+ STATIC_ASSERT((kStringEncodingMask & kTwoByteStringTag) == 0);
+ __ Tbz(input_type, MaskToBit(kStringEncodingMask), &two_byte_slice);
+ __ AllocateAsciiSlicedString(result_string, result_length, x3, x4,
+ &runtime);
+ __ B(&set_slice_header);
+
+ __ Bind(&two_byte_slice);
+ __ AllocateTwoByteSlicedString(result_string, result_length, x3, x4,
+ &runtime);
+
+ __ Bind(&set_slice_header);
+ __ SmiTag(from);
+ __ Str(from, FieldMemOperand(result_string, SlicedString::kOffsetOffset));
+ __ Str(unpacked_string,
+ FieldMemOperand(result_string, SlicedString::kParentOffset));
+ __ B(&return_x0);
+
+ __ Bind(&copy_routine);
+ }
+
+ // x0 result_string pointer to result string object (uninit)
+ // x1 result_length length of substring result
+ // x10 unpacked_string pointer to unpacked string object
+ // x11 input_length length of input string
+ // x12 input_type instance type of input string
+ // x13 unpacked_char0 pointer to first char of unpacked string (uninit)
+ // x13 substring_char0 pointer to first char of substring (uninit)
+ // x14 result_char0 pointer to first char of result (uninit)
+ // x15 from substring start character offset
+ Register unpacked_char0 = x13;
+ Register substring_char0 = x13;
+ Register result_char0 = x14;
+ Label two_byte_sequential, sequential_string, allocate_result;
+ STATIC_ASSERT(kExternalStringTag != 0);
+ STATIC_ASSERT(kSeqStringTag == 0);
+
+ __ Tst(input_type, kExternalStringTag);
+ __ B(eq, &sequential_string);
+
+ __ Tst(input_type, kShortExternalStringTag);
+ __ B(ne, &runtime);
+ __ Ldr(unpacked_char0,
+ FieldMemOperand(unpacked_string, ExternalString::kResourceDataOffset));
+ // unpacked_char0 points to the first character of the underlying string.
+ __ B(&allocate_result);
+
+ __ Bind(&sequential_string);
+ // Locate first character of underlying subject string.
+ STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqOneByteString::kHeaderSize);
+ __ Add(unpacked_char0, unpacked_string,
+ SeqOneByteString::kHeaderSize - kHeapObjectTag);
+
+ __ Bind(&allocate_result);
+ // Sequential ASCII string. Allocate the result.
+ STATIC_ASSERT((kOneByteStringTag & kStringEncodingMask) != 0);
+ __ Tbz(input_type, MaskToBit(kStringEncodingMask), &two_byte_sequential);
+
+ // Allocate and copy the resulting ASCII string.
+ __ AllocateAsciiString(result_string, result_length, x3, x4, x5, &runtime);
+
+ // Locate first character of substring to copy.
+ __ Add(substring_char0, unpacked_char0, from);
+
+ // Locate first character of result.
+ __ Add(result_char0, result_string,
+ SeqOneByteString::kHeaderSize - kHeapObjectTag);
+
+ STATIC_ASSERT((SeqOneByteString::kHeaderSize & kObjectAlignmentMask) == 0);
+ __ CopyBytes(result_char0, substring_char0, result_length, x3, kCopyLong);
+ __ B(&return_x0);
+
+ // Allocate and copy the resulting two-byte string.
+ __ Bind(&two_byte_sequential);
+ __ AllocateTwoByteString(result_string, result_length, x3, x4, x5, &runtime);
+
+ // Locate first character of substring to copy.
+ __ Add(substring_char0, unpacked_char0, Operand(from, LSL, 1));
+
+ // Locate first character of result.
+ __ Add(result_char0, result_string,
+ SeqTwoByteString::kHeaderSize - kHeapObjectTag);
+
+ STATIC_ASSERT((SeqTwoByteString::kHeaderSize & kObjectAlignmentMask) == 0);
+ __ Add(result_length, result_length, result_length);
+ __ CopyBytes(result_char0, substring_char0, result_length, x3, kCopyLong);
+
+ __ Bind(&return_x0);
+ Counters* counters = isolate()->counters();
+ __ IncrementCounter(counters->sub_string_native(), 1, x3, x4);
+ __ Drop(3);
+ __ Ret();
+
+ __ Bind(&runtime);
+ __ TailCallRuntime(Runtime::kHiddenSubString, 3, 1);
+
+ __ bind(&single_char);
+ // x1: result_length
+ // x10: input_string
+ // x12: input_type
+ // x15: from (untagged)
+ __ SmiTag(from);
+ StringCharAtGenerator generator(
+ input_string, from, result_length, x0,
+ &runtime, &runtime, &runtime, STRING_INDEX_IS_NUMBER);
+ generator.GenerateFast(masm);
+ __ Drop(3);
+ __ Ret();
+ generator.SkipSlow(masm, &runtime);
+}
+
+
+void StringCompareStub::GenerateFlatAsciiStringEquals(MacroAssembler* masm,
+ Register left,
+ Register right,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3) {
+ ASSERT(!AreAliased(left, right, scratch1, scratch2, scratch3));
+ Register result = x0;
+ Register left_length = scratch1;
+ Register right_length = scratch2;
+
+ // Compare lengths. If lengths differ, strings can't be equal. Lengths are
+ // smis, and don't need to be untagged.
+ Label strings_not_equal, check_zero_length;
+ __ Ldr(left_length, FieldMemOperand(left, String::kLengthOffset));
+ __ Ldr(right_length, FieldMemOperand(right, String::kLengthOffset));
+ __ Cmp(left_length, right_length);
+ __ B(eq, &check_zero_length);
+
+ __ Bind(&strings_not_equal);
+ __ Mov(result, Smi::FromInt(NOT_EQUAL));
+ __ Ret();
+
+ // Check if the length is zero. If so, the strings must be equal (and empty.)
+ Label compare_chars;
+ __ Bind(&check_zero_length);
+ STATIC_ASSERT(kSmiTag == 0);
+ __ Cbnz(left_length, &compare_chars);
+ __ Mov(result, Smi::FromInt(EQUAL));
+ __ Ret();
+
+ // Compare characters. Falls through if all characters are equal.
+ __ Bind(&compare_chars);
+ GenerateAsciiCharsCompareLoop(masm, left, right, left_length, scratch2,
+ scratch3, &strings_not_equal);
+
+ // Characters in strings are equal.
+ __ Mov(result, Smi::FromInt(EQUAL));
+ __ Ret();
+}
+
+
+void StringCompareStub::GenerateCompareFlatAsciiStrings(MacroAssembler* masm,
+ Register left,
+ Register right,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ Register scratch4) {
+ ASSERT(!AreAliased(left, right, scratch1, scratch2, scratch3, scratch4));
+ Label result_not_equal, compare_lengths;
+
+ // Find minimum length and length difference.
+ Register length_delta = scratch3;
+ __ Ldr(scratch1, FieldMemOperand(left, String::kLengthOffset));
+ __ Ldr(scratch2, FieldMemOperand(right, String::kLengthOffset));
+ __ Subs(length_delta, scratch1, scratch2);
+
+ Register min_length = scratch1;
+ __ Csel(min_length, scratch2, scratch1, gt);
+ __ Cbz(min_length, &compare_lengths);
+
+ // Compare loop.
+ GenerateAsciiCharsCompareLoop(masm,
+ left, right, min_length, scratch2, scratch4,
+ &result_not_equal);
+
+ // Compare lengths - strings up to min-length are equal.
+ __ Bind(&compare_lengths);
+
+ ASSERT(Smi::FromInt(EQUAL) == static_cast<Smi*>(0));
+
+ // Use length_delta as result if it's zero.
+ Register result = x0;
+ __ Subs(result, length_delta, 0);
+
+ __ Bind(&result_not_equal);
+ Register greater = x10;
+ Register less = x11;
+ __ Mov(greater, Smi::FromInt(GREATER));
+ __ Mov(less, Smi::FromInt(LESS));
+ __ CmovX(result, greater, gt);
+ __ CmovX(result, less, lt);
+ __ Ret();
+}
+
+
+void StringCompareStub::GenerateAsciiCharsCompareLoop(
+ MacroAssembler* masm,
+ Register left,
+ Register right,
+ Register length,
+ Register scratch1,
+ Register scratch2,
+ Label* chars_not_equal) {
+ ASSERT(!AreAliased(left, right, length, scratch1, scratch2));
+
+ // Change index to run from -length to -1 by adding length to string
+ // start. This means that loop ends when index reaches zero, which
+ // doesn't need an additional compare.
+ __ SmiUntag(length);
+ __ Add(scratch1, length, SeqOneByteString::kHeaderSize - kHeapObjectTag);
+ __ Add(left, left, scratch1);
+ __ Add(right, right, scratch1);
+
+ Register index = length;
+ __ Neg(index, length); // index = -length;
+
+ // Compare loop
+ Label loop;
+ __ Bind(&loop);
+ __ Ldrb(scratch1, MemOperand(left, index));
+ __ Ldrb(scratch2, MemOperand(right, index));
+ __ Cmp(scratch1, scratch2);
+ __ B(ne, chars_not_equal);
+ __ Add(index, index, 1);
+ __ Cbnz(index, &loop);
+}
+
+
+void StringCompareStub::Generate(MacroAssembler* masm) {
+ Label runtime;
+
+ Counters* counters = isolate()->counters();
+
+ // Stack frame on entry.
+ // sp[0]: right string
+ // sp[8]: left string
+ Register right = x10;
+ Register left = x11;
+ Register result = x0;
+ __ Pop(right, left);
+
+ Label not_same;
+ __ Subs(result, right, left);
+ __ B(ne, &not_same);
+ STATIC_ASSERT(EQUAL == 0);
+ __ IncrementCounter(counters->string_compare_native(), 1, x3, x4);
+ __ Ret();
+
+ __ Bind(&not_same);
+
+ // Check that both objects are sequential ASCII strings.
+ __ JumpIfEitherIsNotSequentialAsciiStrings(left, right, x12, x13, &runtime);
+
+ // Compare flat ASCII strings natively. Remove arguments from stack first,
+ // as this function will generate a return.
+ __ IncrementCounter(counters->string_compare_native(), 1, x3, x4);
+ GenerateCompareFlatAsciiStrings(masm, left, right, x12, x13, x14, x15);
+
+ __ Bind(&runtime);
+
+ // Push arguments back on to the stack.
+ // sp[0] = right string
+ // sp[8] = left string.
+ __ Push(left, right);
+
+ // Call the runtime.
+ // Returns -1 (less), 0 (equal), or 1 (greater) tagged as a small integer.
+ __ TailCallRuntime(Runtime::kHiddenStringCompare, 2, 1);
+}
+
+
+void BinaryOpICWithAllocationSiteStub::Generate(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- x1 : left
+ // -- x0 : right
+ // -- lr : return address
+ // -----------------------------------
+
+ // Load x2 with the allocation site. We stick an undefined dummy value here
+ // and replace it with the real allocation site later when we instantiate this
+ // stub in BinaryOpICWithAllocationSiteStub::GetCodeCopyFromTemplate().
+ __ LoadObject(x2, handle(isolate()->heap()->undefined_value()));
+
+ // Make sure that we actually patched the allocation site.
+ if (FLAG_debug_code) {
+ __ AssertNotSmi(x2, kExpectedAllocationSite);
+ __ Ldr(x10, FieldMemOperand(x2, HeapObject::kMapOffset));
+ __ AssertRegisterIsRoot(x10, Heap::kAllocationSiteMapRootIndex,
+ kExpectedAllocationSite);
+ }
+
+ // Tail call into the stub that handles binary operations with allocation
+ // sites.
+ BinaryOpWithAllocationSiteStub stub(isolate(), state_);
+ __ TailCallStub(&stub);
+}
+
+
+void RecordWriteStub::GenerateIncremental(MacroAssembler* masm, Mode mode) {
+ // We need some extra registers for this stub, they have been allocated
+ // but we need to save them before using them.
+ regs_.Save(masm);
+
+ if (remembered_set_action_ == EMIT_REMEMBERED_SET) {
+ Label dont_need_remembered_set;
+
+ Register value = regs_.scratch0();
+ __ Ldr(value, MemOperand(regs_.address()));
+ __ JumpIfNotInNewSpace(value, &dont_need_remembered_set);
+
+ __ CheckPageFlagSet(regs_.object(),
+ value,
+ 1 << MemoryChunk::SCAN_ON_SCAVENGE,
+ &dont_need_remembered_set);
+
+ // First notify the incremental marker if necessary, then update the
+ // remembered set.
+ CheckNeedsToInformIncrementalMarker(
+ masm, kUpdateRememberedSetOnNoNeedToInformIncrementalMarker, mode);
+ InformIncrementalMarker(masm);
+ regs_.Restore(masm); // Restore the extra scratch registers we used.
+
+ __ RememberedSetHelper(object_,
+ address_,
+ value_, // scratch1
+ save_fp_regs_mode_,
+ MacroAssembler::kReturnAtEnd);
+
+ __ Bind(&dont_need_remembered_set);
+ }
+
+ CheckNeedsToInformIncrementalMarker(
+ masm, kReturnOnNoNeedToInformIncrementalMarker, mode);
+ InformIncrementalMarker(masm);
+ regs_.Restore(masm); // Restore the extra scratch registers we used.
+ __ Ret();
+}
+
+
+void RecordWriteStub::InformIncrementalMarker(MacroAssembler* masm) {
+ regs_.SaveCallerSaveRegisters(masm, save_fp_regs_mode_);
+ Register address =
+ x0.Is(regs_.address()) ? regs_.scratch0() : regs_.address();
+ ASSERT(!address.Is(regs_.object()));
+ ASSERT(!address.Is(x0));
+ __ Mov(address, regs_.address());
+ __ Mov(x0, regs_.object());
+ __ Mov(x1, address);
+ __ Mov(x2, ExternalReference::isolate_address(isolate()));
+
+ AllowExternalCallThatCantCauseGC scope(masm);
+ ExternalReference function =
+ ExternalReference::incremental_marking_record_write_function(
+ isolate());
+ __ CallCFunction(function, 3, 0);
+
+ regs_.RestoreCallerSaveRegisters(masm, save_fp_regs_mode_);
+}
+
+
+void RecordWriteStub::CheckNeedsToInformIncrementalMarker(
+ MacroAssembler* masm,
+ OnNoNeedToInformIncrementalMarker on_no_need,
+ Mode mode) {
+ Label on_black;
+ Label need_incremental;
+ Label need_incremental_pop_scratch;
+
+ Register mem_chunk = regs_.scratch0();
+ Register counter = regs_.scratch1();
+ __ Bic(mem_chunk, regs_.object(), Page::kPageAlignmentMask);
+ __ Ldr(counter,
+ MemOperand(mem_chunk, MemoryChunk::kWriteBarrierCounterOffset));
+ __ Subs(counter, counter, 1);
+ __ Str(counter,
+ MemOperand(mem_chunk, MemoryChunk::kWriteBarrierCounterOffset));
+ __ B(mi, &need_incremental);
+
+ // If the object is not black we don't have to inform the incremental marker.
+ __ JumpIfBlack(regs_.object(), regs_.scratch0(), regs_.scratch1(), &on_black);
+
+ regs_.Restore(masm); // Restore the extra scratch registers we used.
+ if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) {
+ __ RememberedSetHelper(object_,
+ address_,
+ value_, // scratch1
+ save_fp_regs_mode_,
+ MacroAssembler::kReturnAtEnd);
+ } else {
+ __ Ret();
+ }
+
+ __ Bind(&on_black);
+ // Get the value from the slot.
+ Register value = regs_.scratch0();
+ __ Ldr(value, MemOperand(regs_.address()));
+
+ if (mode == INCREMENTAL_COMPACTION) {
+ Label ensure_not_white;
+
+ __ CheckPageFlagClear(value,
+ regs_.scratch1(),
+ MemoryChunk::kEvacuationCandidateMask,
+ &ensure_not_white);
+
+ __ CheckPageFlagClear(regs_.object(),
+ regs_.scratch1(),
+ MemoryChunk::kSkipEvacuationSlotsRecordingMask,
+ &need_incremental);
+
+ __ Bind(&ensure_not_white);
+ }
+
+ // We need extra registers for this, so we push the object and the address
+ // register temporarily.
+ __ Push(regs_.address(), regs_.object());
+ __ EnsureNotWhite(value,
+ regs_.scratch1(), // Scratch.
+ regs_.object(), // Scratch.
+ regs_.address(), // Scratch.
+ regs_.scratch2(), // Scratch.
+ &need_incremental_pop_scratch);
+ __ Pop(regs_.object(), regs_.address());
+
+ regs_.Restore(masm); // Restore the extra scratch registers we used.
+ if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) {
+ __ RememberedSetHelper(object_,
+ address_,
+ value_, // scratch1
+ save_fp_regs_mode_,
+ MacroAssembler::kReturnAtEnd);
+ } else {
+ __ Ret();
+ }
+
+ __ Bind(&need_incremental_pop_scratch);
+ __ Pop(regs_.object(), regs_.address());
+
+ __ Bind(&need_incremental);
+ // Fall through when we need to inform the incremental marker.
+}
+
+
+void RecordWriteStub::Generate(MacroAssembler* masm) {
+ Label skip_to_incremental_noncompacting;
+ Label skip_to_incremental_compacting;
+
+ // We patch these two first instructions back and forth between a nop and
+ // real branch when we start and stop incremental heap marking.
+ // Initially the stub is expected to be in STORE_BUFFER_ONLY mode, so 2 nops
+ // are generated.
+ // See RecordWriteStub::Patch for details.
+ {
+ InstructionAccurateScope scope(masm, 2);
+ __ adr(xzr, &skip_to_incremental_noncompacting);
+ __ adr(xzr, &skip_to_incremental_compacting);
+ }
+
+ if (remembered_set_action_ == EMIT_REMEMBERED_SET) {
+ __ RememberedSetHelper(object_,
+ address_,
+ value_, // scratch1
+ save_fp_regs_mode_,
+ MacroAssembler::kReturnAtEnd);
+ }
+ __ Ret();
+
+ __ Bind(&skip_to_incremental_noncompacting);
+ GenerateIncremental(masm, INCREMENTAL);
+
+ __ Bind(&skip_to_incremental_compacting);
+ GenerateIncremental(masm, INCREMENTAL_COMPACTION);
+}
+
+
+void StoreArrayLiteralElementStub::Generate(MacroAssembler* masm) {
+ // x0 value element value to store
+ // x3 index_smi element index as smi
+ // sp[0] array_index_smi array literal index in function as smi
+ // sp[1] array array literal
+
+ Register value = x0;
+ Register index_smi = x3;
+
+ Register array = x1;
+ Register array_map = x2;
+ Register array_index_smi = x4;
+ __ PeekPair(array_index_smi, array, 0);
+ __ Ldr(array_map, FieldMemOperand(array, JSObject::kMapOffset));
+
+ Label double_elements, smi_element, fast_elements, slow_elements;
+ Register bitfield2 = x10;
+ __ Ldrb(bitfield2, FieldMemOperand(array_map, Map::kBitField2Offset));
+
+ // Jump if array's ElementsKind is not FAST*_SMI_ELEMENTS, FAST_ELEMENTS or
+ // FAST_HOLEY_ELEMENTS.
+ STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
+ STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
+ STATIC_ASSERT(FAST_ELEMENTS == 2);
+ STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
+ __ Cmp(bitfield2, Map::kMaximumBitField2FastHoleyElementValue);
+ __ B(hi, &double_elements);
+
+ __ JumpIfSmi(value, &smi_element);
+
+ // Jump if array's ElementsKind is not FAST_ELEMENTS or FAST_HOLEY_ELEMENTS.
+ __ Tbnz(bitfield2, MaskToBit(FAST_ELEMENTS << Map::ElementsKindBits::kShift),
+ &fast_elements);
+
+ // Store into the array literal requires an elements transition. Call into
+ // the runtime.
+ __ Bind(&slow_elements);
+ __ Push(array, index_smi, value);
+ __ Ldr(x10, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+ __ Ldr(x11, FieldMemOperand(x10, JSFunction::kLiteralsOffset));
+ __ Push(x11, array_index_smi);
+ __ TailCallRuntime(Runtime::kStoreArrayLiteralElement, 5, 1);
+
+ // Array literal has ElementsKind of FAST_*_ELEMENTS and value is an object.
+ __ Bind(&fast_elements);
+ __ Ldr(x10, FieldMemOperand(array, JSObject::kElementsOffset));
+ __ Add(x11, x10, Operand::UntagSmiAndScale(index_smi, kPointerSizeLog2));
+ __ Add(x11, x11, FixedArray::kHeaderSize - kHeapObjectTag);
+ __ Str(value, MemOperand(x11));
+ // Update the write barrier for the array store.
+ __ RecordWrite(x10, x11, value, kLRHasNotBeenSaved, kDontSaveFPRegs,
+ EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
+ __ Ret();
+
+ // Array literal has ElementsKind of FAST_*_SMI_ELEMENTS or FAST_*_ELEMENTS,
+ // and value is Smi.
+ __ Bind(&smi_element);
+ __ Ldr(x10, FieldMemOperand(array, JSObject::kElementsOffset));
+ __ Add(x11, x10, Operand::UntagSmiAndScale(index_smi, kPointerSizeLog2));
+ __ Str(value, FieldMemOperand(x11, FixedArray::kHeaderSize));
+ __ Ret();
+
+ __ Bind(&double_elements);
+ __ Ldr(x10, FieldMemOperand(array, JSObject::kElementsOffset));
+ __ StoreNumberToDoubleElements(value, index_smi, x10, x11, d0,
+ &slow_elements);
+ __ Ret();
+}
+
+
+void StubFailureTrampolineStub::Generate(MacroAssembler* masm) {
+ CEntryStub ces(isolate(), 1, kSaveFPRegs);
+ __ Call(ces.GetCode(), RelocInfo::CODE_TARGET);
+ int parameter_count_offset =
+ StubFailureTrampolineFrame::kCallerStackParameterCountFrameOffset;
+ __ Ldr(x1, MemOperand(fp, parameter_count_offset));
+ if (function_mode_ == JS_FUNCTION_STUB_MODE) {
+ __ Add(x1, x1, 1);
+ }
+ masm->LeaveFrame(StackFrame::STUB_FAILURE_TRAMPOLINE);
+ __ Drop(x1);
+ // Return to IC Miss stub, continuation still on stack.
+ __ Ret();
+}
+
+
+static unsigned int GetProfileEntryHookCallSize(MacroAssembler* masm) {
+ // The entry hook is a "BumpSystemStackPointer" instruction (sub),
+ // followed by a "Push lr" instruction, followed by a call.
+ unsigned int size =
+ Assembler::kCallSizeWithRelocation + (2 * kInstructionSize);
+ if (CpuFeatures::IsSupported(ALWAYS_ALIGN_CSP)) {
+ // If ALWAYS_ALIGN_CSP then there will be an extra bic instruction in
+ // "BumpSystemStackPointer".
+ size += kInstructionSize;
+ }
+ return size;
+}
+
+
+void ProfileEntryHookStub::MaybeCallEntryHook(MacroAssembler* masm) {
+ if (masm->isolate()->function_entry_hook() != NULL) {
+ ProfileEntryHookStub stub(masm->isolate());
+ Assembler::BlockConstPoolScope no_const_pools(masm);
+ DontEmitDebugCodeScope no_debug_code(masm);
+ Label entry_hook_call_start;
+ __ Bind(&entry_hook_call_start);
+ __ Push(lr);
+ __ CallStub(&stub);
+ ASSERT(masm->SizeOfCodeGeneratedSince(&entry_hook_call_start) ==
+ GetProfileEntryHookCallSize(masm));
+
+ __ Pop(lr);
+ }
+}
+
+
+void ProfileEntryHookStub::Generate(MacroAssembler* masm) {
+ MacroAssembler::NoUseRealAbortsScope no_use_real_aborts(masm);
+
+ // Save all kCallerSaved registers (including lr), since this can be called
+ // from anywhere.
+ // TODO(jbramley): What about FP registers?
+ __ PushCPURegList(kCallerSaved);
+ ASSERT(kCallerSaved.IncludesAliasOf(lr));
+ const int kNumSavedRegs = kCallerSaved.Count();
+
+ // Compute the function's address as the first argument.
+ __ Sub(x0, lr, GetProfileEntryHookCallSize(masm));
+
+#if V8_HOST_ARCH_ARM64
+ uintptr_t entry_hook =
+ reinterpret_cast<uintptr_t>(isolate()->function_entry_hook());
+ __ Mov(x10, entry_hook);
+#else
+ // Under the simulator we need to indirect the entry hook through a trampoline
+ // function at a known address.
+ ApiFunction dispatcher(FUNCTION_ADDR(EntryHookTrampoline));
+ __ Mov(x10, Operand(ExternalReference(&dispatcher,
+ ExternalReference::BUILTIN_CALL,
+ isolate())));
+ // It additionally takes an isolate as a third parameter
+ __ Mov(x2, ExternalReference::isolate_address(isolate()));
+#endif
+
+ // The caller's return address is above the saved temporaries.
+ // Grab its location for the second argument to the hook.
+ __ Add(x1, __ StackPointer(), kNumSavedRegs * kPointerSize);
+
+ {
+ // Create a dummy frame, as CallCFunction requires this.
+ FrameScope frame(masm, StackFrame::MANUAL);
+ __ CallCFunction(x10, 2, 0);
+ }
+
+ __ PopCPURegList(kCallerSaved);
+ __ Ret();
+}
+
+
+void DirectCEntryStub::Generate(MacroAssembler* masm) {
+ // When calling into C++ code the stack pointer must be csp.
+ // Therefore this code must use csp for peek/poke operations when the
+ // stub is generated. When the stub is called
+ // (via DirectCEntryStub::GenerateCall), the caller must setup an ExitFrame
+ // and configure the stack pointer *before* doing the call.
+ const Register old_stack_pointer = __ StackPointer();
+ __ SetStackPointer(csp);
+
+ // Put return address on the stack (accessible to GC through exit frame pc).
+ __ Poke(lr, 0);
+ // Call the C++ function.
+ __ Blr(x10);
+ // Return to calling code.
+ __ Peek(lr, 0);
+ __ AssertFPCRState();
+ __ Ret();
+
+ __ SetStackPointer(old_stack_pointer);
+}
+
+void DirectCEntryStub::GenerateCall(MacroAssembler* masm,
+ Register target) {
+ // Make sure the caller configured the stack pointer (see comment in
+ // DirectCEntryStub::Generate).
+ ASSERT(csp.Is(__ StackPointer()));
+
+ intptr_t code =
+ reinterpret_cast<intptr_t>(GetCode().location());
+ __ Mov(lr, Operand(code, RelocInfo::CODE_TARGET));
+ __ Mov(x10, target);
+ // Branch to the stub.
+ __ Blr(lr);
+}
+
+
+// Probe the name dictionary in the 'elements' register.
+// Jump to the 'done' label if a property with the given name is found.
+// Jump to the 'miss' label otherwise.
+//
+// If lookup was successful 'scratch2' will be equal to elements + 4 * index.
+// 'elements' and 'name' registers are preserved on miss.
+void NameDictionaryLookupStub::GeneratePositiveLookup(
+ MacroAssembler* masm,
+ Label* miss,
+ Label* done,
+ Register elements,
+ Register name,
+ Register scratch1,
+ Register scratch2) {
+ ASSERT(!AreAliased(elements, name, scratch1, scratch2));
+
+ // Assert that name contains a string.
+ __ AssertName(name);
+
+ // Compute the capacity mask.
+ __ Ldrsw(scratch1, UntagSmiFieldMemOperand(elements, kCapacityOffset));
+ __ Sub(scratch1, scratch1, 1);
+
+ // Generate an unrolled loop that performs a few probes before giving up.
+ for (int i = 0; i < kInlinedProbes; i++) {
+ // Compute the masked index: (hash + i + i * i) & mask.
+ __ Ldr(scratch2, FieldMemOperand(name, Name::kHashFieldOffset));
+ if (i > 0) {
+ // Add the probe offset (i + i * i) left shifted to avoid right shifting
+ // the hash in a separate instruction. The value hash + i + i * i is right
+ // shifted in the following and instruction.
+ ASSERT(NameDictionary::GetProbeOffset(i) <
+ 1 << (32 - Name::kHashFieldOffset));
+ __ Add(scratch2, scratch2, Operand(
+ NameDictionary::GetProbeOffset(i) << Name::kHashShift));
+ }
+ __ And(scratch2, scratch1, Operand(scratch2, LSR, Name::kHashShift));
+
+ // Scale the index by multiplying by the element size.
+ ASSERT(NameDictionary::kEntrySize == 3);
+ __ Add(scratch2, scratch2, Operand(scratch2, LSL, 1));
+
+ // Check if the key is identical to the name.
+ UseScratchRegisterScope temps(masm);
+ Register scratch3 = temps.AcquireX();
+ __ Add(scratch2, elements, Operand(scratch2, LSL, kPointerSizeLog2));
+ __ Ldr(scratch3, FieldMemOperand(scratch2, kElementsStartOffset));
+ __ Cmp(name, scratch3);
+ __ B(eq, done);
+ }
+
+ // The inlined probes didn't find the entry.
+ // Call the complete stub to scan the whole dictionary.
+
+ CPURegList spill_list(CPURegister::kRegister, kXRegSizeInBits, 0, 6);
+ spill_list.Combine(lr);
+ spill_list.Remove(scratch1);
+ spill_list.Remove(scratch2);
+
+ __ PushCPURegList(spill_list);
+
+ if (name.is(x0)) {
+ ASSERT(!elements.is(x1));
+ __ Mov(x1, name);
+ __ Mov(x0, elements);
+ } else {
+ __ Mov(x0, elements);
+ __ Mov(x1, name);
+ }
+
+ Label not_found;
+ NameDictionaryLookupStub stub(masm->isolate(), POSITIVE_LOOKUP);
+ __ CallStub(&stub);
+ __ Cbz(x0, &not_found);
+ __ Mov(scratch2, x2); // Move entry index into scratch2.
+ __ PopCPURegList(spill_list);
+ __ B(done);
+
+ __ Bind(&not_found);
+ __ PopCPURegList(spill_list);
+ __ B(miss);
+}
+
+
+void NameDictionaryLookupStub::GenerateNegativeLookup(MacroAssembler* masm,
+ Label* miss,
+ Label* done,
+ Register receiver,
+ Register properties,
+ Handle<Name> name,
+ Register scratch0) {
+ ASSERT(!AreAliased(receiver, properties, scratch0));
+ ASSERT(name->IsUniqueName());
+ // If names of slots in range from 1 to kProbes - 1 for the hash value are
+ // not equal to the name and kProbes-th slot is not used (its name is the
+ // undefined value), it guarantees the hash table doesn't contain the
+ // property. It's true even if some slots represent deleted properties
+ // (their names are the hole value).
+ for (int i = 0; i < kInlinedProbes; i++) {
+ // scratch0 points to properties hash.
+ // Compute the masked index: (hash + i + i * i) & mask.
+ Register index = scratch0;
+ // Capacity is smi 2^n.
+ __ Ldrsw(index, UntagSmiFieldMemOperand(properties, kCapacityOffset));
+ __ Sub(index, index, 1);
+ __ And(index, index, name->Hash() + NameDictionary::GetProbeOffset(i));
+
+ // Scale the index by multiplying by the entry size.
+ ASSERT(NameDictionary::kEntrySize == 3);
+ __ Add(index, index, Operand(index, LSL, 1)); // index *= 3.
+
+ Register entity_name = scratch0;
+ // Having undefined at this place means the name is not contained.
+ Register tmp = index;
+ __ Add(tmp, properties, Operand(index, LSL, kPointerSizeLog2));
+ __ Ldr(entity_name, FieldMemOperand(tmp, kElementsStartOffset));
+
+ __ JumpIfRoot(entity_name, Heap::kUndefinedValueRootIndex, done);
+
+ // Stop if found the property.
+ __ Cmp(entity_name, Operand(name));
+ __ B(eq, miss);
+
+ Label good;
+ __ JumpIfRoot(entity_name, Heap::kTheHoleValueRootIndex, &good);
+
+ // Check if the entry name is not a unique name.
+ __ Ldr(entity_name, FieldMemOperand(entity_name, HeapObject::kMapOffset));
+ __ Ldrb(entity_name,
+ FieldMemOperand(entity_name, Map::kInstanceTypeOffset));
+ __ JumpIfNotUniqueName(entity_name, miss);
+ __ Bind(&good);
+ }
+
+ CPURegList spill_list(CPURegister::kRegister, kXRegSizeInBits, 0, 6);
+ spill_list.Combine(lr);
+ spill_list.Remove(scratch0); // Scratch registers don't need to be preserved.
+
+ __ PushCPURegList(spill_list);
+
+ __ Ldr(x0, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
+ __ Mov(x1, Operand(name));
+ NameDictionaryLookupStub stub(masm->isolate(), NEGATIVE_LOOKUP);
+ __ CallStub(&stub);
+ // Move stub return value to scratch0. Note that scratch0 is not included in
+ // spill_list and won't be clobbered by PopCPURegList.
+ __ Mov(scratch0, x0);
+ __ PopCPURegList(spill_list);
+
+ __ Cbz(scratch0, done);
+ __ B(miss);
+}
+
+
+void NameDictionaryLookupStub::Generate(MacroAssembler* masm) {
+ // This stub overrides SometimesSetsUpAFrame() to return false. That means
+ // we cannot call anything that could cause a GC from this stub.
+ //
+ // Arguments are in x0 and x1:
+ // x0: property dictionary.
+ // x1: the name of the property we are looking for.
+ //
+ // Return value is in x0 and is zero if lookup failed, non zero otherwise.
+ // If the lookup is successful, x2 will contains the index of the entry.
+
+ Register result = x0;
+ Register dictionary = x0;
+ Register key = x1;
+ Register index = x2;
+ Register mask = x3;
+ Register hash = x4;
+ Register undefined = x5;
+ Register entry_key = x6;
+
+ Label in_dictionary, maybe_in_dictionary, not_in_dictionary;
+
+ __ Ldrsw(mask, UntagSmiFieldMemOperand(dictionary, kCapacityOffset));
+ __ Sub(mask, mask, 1);
+
+ __ Ldr(hash, FieldMemOperand(key, Name::kHashFieldOffset));
+ __ LoadRoot(undefined, Heap::kUndefinedValueRootIndex);
+
+ for (int i = kInlinedProbes; i < kTotalProbes; i++) {
+ // Compute the masked index: (hash + i + i * i) & mask.
+ // Capacity is smi 2^n.
+ if (i > 0) {
+ // Add the probe offset (i + i * i) left shifted to avoid right shifting
+ // the hash in a separate instruction. The value hash + i + i * i is right
+ // shifted in the following and instruction.
+ ASSERT(NameDictionary::GetProbeOffset(i) <
+ 1 << (32 - Name::kHashFieldOffset));
+ __ Add(index, hash,
+ NameDictionary::GetProbeOffset(i) << Name::kHashShift);
+ } else {
+ __ Mov(index, hash);
+ }
+ __ And(index, mask, Operand(index, LSR, Name::kHashShift));
+
+ // Scale the index by multiplying by the entry size.
+ ASSERT(NameDictionary::kEntrySize == 3);
+ __ Add(index, index, Operand(index, LSL, 1)); // index *= 3.
+
+ __ Add(index, dictionary, Operand(index, LSL, kPointerSizeLog2));
+ __ Ldr(entry_key, FieldMemOperand(index, kElementsStartOffset));
+
+ // Having undefined at this place means the name is not contained.
+ __ Cmp(entry_key, undefined);
+ __ B(eq, &not_in_dictionary);
+
+ // Stop if found the property.
+ __ Cmp(entry_key, key);
+ __ B(eq, &in_dictionary);
+
+ if (i != kTotalProbes - 1 && mode_ == NEGATIVE_LOOKUP) {
+ // Check if the entry name is not a unique name.
+ __ Ldr(entry_key, FieldMemOperand(entry_key, HeapObject::kMapOffset));
+ __ Ldrb(entry_key, FieldMemOperand(entry_key, Map::kInstanceTypeOffset));
+ __ JumpIfNotUniqueName(entry_key, &maybe_in_dictionary);
+ }
+ }
+
+ __ Bind(&maybe_in_dictionary);
+ // If we are doing negative lookup then probing failure should be
+ // treated as a lookup success. For positive lookup, probing failure
+ // should be treated as lookup failure.
+ if (mode_ == POSITIVE_LOOKUP) {
+ __ Mov(result, 0);
+ __ Ret();
+ }
+
+ __ Bind(&in_dictionary);
+ __ Mov(result, 1);
+ __ Ret();
+
+ __ Bind(&not_in_dictionary);
+ __ Mov(result, 0);
+ __ Ret();
+}
+
+
+template<class T>
+static void CreateArrayDispatch(MacroAssembler* masm,
+ AllocationSiteOverrideMode mode) {
+ ASM_LOCATION("CreateArrayDispatch");
+ if (mode == DISABLE_ALLOCATION_SITES) {
+ T stub(masm->isolate(), GetInitialFastElementsKind(), mode);
+ __ TailCallStub(&stub);
+
+ } else if (mode == DONT_OVERRIDE) {
+ Register kind = x3;
+ int last_index =
+ GetSequenceIndexFromFastElementsKind(TERMINAL_FAST_ELEMENTS_KIND);
+ for (int i = 0; i <= last_index; ++i) {
+ Label next;
+ ElementsKind candidate_kind = GetFastElementsKindFromSequenceIndex(i);
+ // TODO(jbramley): Is this the best way to handle this? Can we make the
+ // tail calls conditional, rather than hopping over each one?
+ __ CompareAndBranch(kind, candidate_kind, ne, &next);
+ T stub(masm->isolate(), candidate_kind);
+ __ TailCallStub(&stub);
+ __ Bind(&next);
+ }
+
+ // If we reached this point there is a problem.
+ __ Abort(kUnexpectedElementsKindInArrayConstructor);
+
+ } else {
+ UNREACHABLE();
+ }
+}
+
+
+// TODO(jbramley): If this needs to be a special case, make it a proper template
+// specialization, and not a separate function.
+static void CreateArrayDispatchOneArgument(MacroAssembler* masm,
+ AllocationSiteOverrideMode mode) {
+ ASM_LOCATION("CreateArrayDispatchOneArgument");
+ // x0 - argc
+ // x1 - constructor?
+ // x2 - allocation site (if mode != DISABLE_ALLOCATION_SITES)
+ // x3 - kind (if mode != DISABLE_ALLOCATION_SITES)
+ // sp[0] - last argument
+
+ Register allocation_site = x2;
+ Register kind = x3;
+
+ Label normal_sequence;
+ if (mode == DONT_OVERRIDE) {
+ STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
+ STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
+ STATIC_ASSERT(FAST_ELEMENTS == 2);
+ STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
+ STATIC_ASSERT(FAST_DOUBLE_ELEMENTS == 4);
+ STATIC_ASSERT(FAST_HOLEY_DOUBLE_ELEMENTS == 5);
+
+ // Is the low bit set? If so, the array is holey.
+ __ Tbnz(kind, 0, &normal_sequence);
+ }
+
+ // Look at the last argument.
+ // TODO(jbramley): What does a 0 argument represent?
+ __ Peek(x10, 0);
+ __ Cbz(x10, &normal_sequence);
+
+ if (mode == DISABLE_ALLOCATION_SITES) {
+ ElementsKind initial = GetInitialFastElementsKind();
+ ElementsKind holey_initial = GetHoleyElementsKind(initial);
+
+ ArraySingleArgumentConstructorStub stub_holey(masm->isolate(),
+ holey_initial,
+ DISABLE_ALLOCATION_SITES);
+ __ TailCallStub(&stub_holey);
+
+ __ Bind(&normal_sequence);
+ ArraySingleArgumentConstructorStub stub(masm->isolate(),
+ initial,
+ DISABLE_ALLOCATION_SITES);
+ __ TailCallStub(&stub);
+ } else if (mode == DONT_OVERRIDE) {
+ // We are going to create a holey array, but our kind is non-holey.
+ // Fix kind and retry (only if we have an allocation site in the slot).
+ __ Orr(kind, kind, 1);
+
+ if (FLAG_debug_code) {
+ __ Ldr(x10, FieldMemOperand(allocation_site, 0));
+ __ JumpIfNotRoot(x10, Heap::kAllocationSiteMapRootIndex,
+ &normal_sequence);
+ __ Assert(eq, kExpectedAllocationSite);
+ }
+
+ // Save the resulting elements kind in type info. We can't just store 'kind'
+ // in the AllocationSite::transition_info field because elements kind is
+ // restricted to a portion of the field; upper bits need to be left alone.
+ STATIC_ASSERT(AllocationSite::ElementsKindBits::kShift == 0);
+ __ Ldr(x11, FieldMemOperand(allocation_site,
+ AllocationSite::kTransitionInfoOffset));
+ __ Add(x11, x11, Smi::FromInt(kFastElementsKindPackedToHoley));
+ __ Str(x11, FieldMemOperand(allocation_site,
+ AllocationSite::kTransitionInfoOffset));
+
+ __ Bind(&normal_sequence);
+ int last_index =
+ GetSequenceIndexFromFastElementsKind(TERMINAL_FAST_ELEMENTS_KIND);
+ for (int i = 0; i <= last_index; ++i) {
+ Label next;
+ ElementsKind candidate_kind = GetFastElementsKindFromSequenceIndex(i);
+ __ CompareAndBranch(kind, candidate_kind, ne, &next);
+ ArraySingleArgumentConstructorStub stub(masm->isolate(), candidate_kind);
+ __ TailCallStub(&stub);
+ __ Bind(&next);
+ }
+
+ // If we reached this point there is a problem.
+ __ Abort(kUnexpectedElementsKindInArrayConstructor);
+ } else {
+ UNREACHABLE();
+ }
+}
+
+
+template<class T>
+static void ArrayConstructorStubAheadOfTimeHelper(Isolate* isolate) {
+ int to_index = GetSequenceIndexFromFastElementsKind(
+ TERMINAL_FAST_ELEMENTS_KIND);
+ for (int i = 0; i <= to_index; ++i) {
+ ElementsKind kind = GetFastElementsKindFromSequenceIndex(i);
+ T stub(isolate, kind);
+ stub.GetCode();
+ if (AllocationSite::GetMode(kind) != DONT_TRACK_ALLOCATION_SITE) {
+ T stub1(isolate, kind, DISABLE_ALLOCATION_SITES);
+ stub1.GetCode();
+ }
+ }
+}
+
+
+void ArrayConstructorStubBase::GenerateStubsAheadOfTime(Isolate* isolate) {
+ ArrayConstructorStubAheadOfTimeHelper<ArrayNoArgumentConstructorStub>(
+ isolate);
+ ArrayConstructorStubAheadOfTimeHelper<ArraySingleArgumentConstructorStub>(
+ isolate);
+ ArrayConstructorStubAheadOfTimeHelper<ArrayNArgumentsConstructorStub>(
+ isolate);
+}
+
+
+void InternalArrayConstructorStubBase::GenerateStubsAheadOfTime(
+ Isolate* isolate) {
+ ElementsKind kinds[2] = { FAST_ELEMENTS, FAST_HOLEY_ELEMENTS };
+ for (int i = 0; i < 2; i++) {
+ // For internal arrays we only need a few things
+ InternalArrayNoArgumentConstructorStub stubh1(isolate, kinds[i]);
+ stubh1.GetCode();
+ InternalArraySingleArgumentConstructorStub stubh2(isolate, kinds[i]);
+ stubh2.GetCode();
+ InternalArrayNArgumentsConstructorStub stubh3(isolate, kinds[i]);
+ stubh3.GetCode();
+ }
+}
+
+
+void ArrayConstructorStub::GenerateDispatchToArrayStub(
+ MacroAssembler* masm,
+ AllocationSiteOverrideMode mode) {
+ Register argc = x0;
+ if (argument_count_ == ANY) {
+ Label zero_case, n_case;
+ __ Cbz(argc, &zero_case);
+ __ Cmp(argc, 1);
+ __ B(ne, &n_case);
+
+ // One argument.
+ CreateArrayDispatchOneArgument(masm, mode);
+
+ __ Bind(&zero_case);
+ // No arguments.
+ CreateArrayDispatch<ArrayNoArgumentConstructorStub>(masm, mode);
+
+ __ Bind(&n_case);
+ // N arguments.
+ CreateArrayDispatch<ArrayNArgumentsConstructorStub>(masm, mode);
+
+ } else if (argument_count_ == NONE) {
+ CreateArrayDispatch<ArrayNoArgumentConstructorStub>(masm, mode);
+ } else if (argument_count_ == ONE) {
+ CreateArrayDispatchOneArgument(masm, mode);
+ } else if (argument_count_ == MORE_THAN_ONE) {
+ CreateArrayDispatch<ArrayNArgumentsConstructorStub>(masm, mode);
+ } else {
+ UNREACHABLE();
+ }
+}
+
+
+void ArrayConstructorStub::Generate(MacroAssembler* masm) {
+ ASM_LOCATION("ArrayConstructorStub::Generate");
+ // ----------- S t a t e -------------
+ // -- x0 : argc (only if argument_count_ == ANY)
+ // -- x1 : constructor
+ // -- x2 : AllocationSite or undefined
+ // -- sp[0] : return address
+ // -- sp[4] : last argument
+ // -----------------------------------
+ Register constructor = x1;
+ Register allocation_site = x2;
+
+ if (FLAG_debug_code) {
+ // The array construct code is only set for the global and natives
+ // builtin Array functions which always have maps.
+
+ Label unexpected_map, map_ok;
+ // Initial map for the builtin Array function should be a map.
+ __ Ldr(x10, FieldMemOperand(constructor,
+ JSFunction::kPrototypeOrInitialMapOffset));
+ // Will both indicate a NULL and a Smi.
+ __ JumpIfSmi(x10, &unexpected_map);
+ __ JumpIfObjectType(x10, x10, x11, MAP_TYPE, &map_ok);
+ __ Bind(&unexpected_map);
+ __ Abort(kUnexpectedInitialMapForArrayFunction);
+ __ Bind(&map_ok);
+
+ // We should either have undefined in the allocation_site register or a
+ // valid AllocationSite.
+ __ AssertUndefinedOrAllocationSite(allocation_site, x10);
+ }
+
+ Register kind = x3;
+ Label no_info;
+ // Get the elements kind and case on that.
+ __ JumpIfRoot(allocation_site, Heap::kUndefinedValueRootIndex, &no_info);
+
+ __ Ldrsw(kind,
+ UntagSmiFieldMemOperand(allocation_site,
+ AllocationSite::kTransitionInfoOffset));
+ __ And(kind, kind, AllocationSite::ElementsKindBits::kMask);
+ GenerateDispatchToArrayStub(masm, DONT_OVERRIDE);
+
+ __ Bind(&no_info);
+ GenerateDispatchToArrayStub(masm, DISABLE_ALLOCATION_SITES);
+}
+
+
+void InternalArrayConstructorStub::GenerateCase(
+ MacroAssembler* masm, ElementsKind kind) {
+ Label zero_case, n_case;
+ Register argc = x0;
+
+ __ Cbz(argc, &zero_case);
+ __ CompareAndBranch(argc, 1, ne, &n_case);
+
+ // One argument.
+ if (IsFastPackedElementsKind(kind)) {
+ Label packed_case;
+
+ // We might need to create a holey array; look at the first argument.
+ __ Peek(x10, 0);
+ __ Cbz(x10, &packed_case);
+
+ InternalArraySingleArgumentConstructorStub
+ stub1_holey(isolate(), GetHoleyElementsKind(kind));
+ __ TailCallStub(&stub1_holey);
+
+ __ Bind(&packed_case);
+ }
+ InternalArraySingleArgumentConstructorStub stub1(isolate(), kind);
+ __ TailCallStub(&stub1);
+
+ __ Bind(&zero_case);
+ // No arguments.
+ InternalArrayNoArgumentConstructorStub stub0(isolate(), kind);
+ __ TailCallStub(&stub0);
+
+ __ Bind(&n_case);
+ // N arguments.
+ InternalArrayNArgumentsConstructorStub stubN(isolate(), kind);
+ __ TailCallStub(&stubN);
+}
+
+
+void InternalArrayConstructorStub::Generate(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- x0 : argc
+ // -- x1 : constructor
+ // -- sp[0] : return address
+ // -- sp[4] : last argument
+ // -----------------------------------
+
+ Register constructor = x1;
+
+ if (FLAG_debug_code) {
+ // The array construct code is only set for the global and natives
+ // builtin Array functions which always have maps.
+
+ Label unexpected_map, map_ok;
+ // Initial map for the builtin Array function should be a map.
+ __ Ldr(x10, FieldMemOperand(constructor,
+ JSFunction::kPrototypeOrInitialMapOffset));
+ // Will both indicate a NULL and a Smi.
+ __ JumpIfSmi(x10, &unexpected_map);
+ __ JumpIfObjectType(x10, x10, x11, MAP_TYPE, &map_ok);
+ __ Bind(&unexpected_map);
+ __ Abort(kUnexpectedInitialMapForArrayFunction);
+ __ Bind(&map_ok);
+ }
+
+ Register kind = w3;
+ // Figure out the right elements kind
+ __ Ldr(x10, FieldMemOperand(constructor,
+ JSFunction::kPrototypeOrInitialMapOffset));
+
+ // Retrieve elements_kind from map.
+ __ LoadElementsKindFromMap(kind, x10);
+
+ if (FLAG_debug_code) {
+ Label done;
+ __ Cmp(x3, FAST_ELEMENTS);
+ __ Ccmp(x3, FAST_HOLEY_ELEMENTS, ZFlag, ne);
+ __ Assert(eq, kInvalidElementsKindForInternalArrayOrInternalPackedArray);
+ }
+
+ Label fast_elements_case;
+ __ CompareAndBranch(kind, FAST_ELEMENTS, eq, &fast_elements_case);
+ GenerateCase(masm, FAST_HOLEY_ELEMENTS);
+
+ __ Bind(&fast_elements_case);
+ GenerateCase(masm, FAST_ELEMENTS);
+}
+
+
+void CallApiFunctionStub::Generate(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- x0 : callee
+ // -- x4 : call_data
+ // -- x2 : holder
+ // -- x1 : api_function_address
+ // -- cp : context
+ // --
+ // -- sp[0] : last argument
+ // -- ...
+ // -- sp[(argc - 1) * 8] : first argument
+ // -- sp[argc * 8] : receiver
+ // -----------------------------------
+
+ Register callee = x0;
+ Register call_data = x4;
+ Register holder = x2;
+ Register api_function_address = x1;
+ Register context = cp;
+
+ int argc = ArgumentBits::decode(bit_field_);
+ bool is_store = IsStoreBits::decode(bit_field_);
+ bool call_data_undefined = CallDataUndefinedBits::decode(bit_field_);
+
+ typedef FunctionCallbackArguments FCA;
+
+ STATIC_ASSERT(FCA::kContextSaveIndex == 6);
+ STATIC_ASSERT(FCA::kCalleeIndex == 5);
+ STATIC_ASSERT(FCA::kDataIndex == 4);
+ STATIC_ASSERT(FCA::kReturnValueOffset == 3);
+ STATIC_ASSERT(FCA::kReturnValueDefaultValueIndex == 2);
+ STATIC_ASSERT(FCA::kIsolateIndex == 1);
+ STATIC_ASSERT(FCA::kHolderIndex == 0);
+ STATIC_ASSERT(FCA::kArgsLength == 7);
+
+ // FunctionCallbackArguments: context, callee and call data.
+ __ Push(context, callee, call_data);
+
+ // Load context from callee
+ __ Ldr(context, FieldMemOperand(callee, JSFunction::kContextOffset));
+
+ if (!call_data_undefined) {
+ __ LoadRoot(call_data, Heap::kUndefinedValueRootIndex);
+ }
+ Register isolate_reg = x5;
+ __ Mov(isolate_reg, ExternalReference::isolate_address(isolate()));
+
+ // FunctionCallbackArguments:
+ // return value, return value default, isolate, holder.
+ __ Push(call_data, call_data, isolate_reg, holder);
+
+ // Prepare arguments.
+ Register args = x6;
+ __ Mov(args, masm->StackPointer());
+
+ // Allocate the v8::Arguments structure in the arguments' space, since it's
+ // not controlled by GC.
+ const int kApiStackSpace = 4;
+
+ // Allocate space for CallApiFunctionAndReturn can store some scratch
+ // registeres on the stack.
+ const int kCallApiFunctionSpillSpace = 4;
+
+ FrameScope frame_scope(masm, StackFrame::MANUAL);
+ __ EnterExitFrame(false, x10, kApiStackSpace + kCallApiFunctionSpillSpace);
+
+ ASSERT(!AreAliased(x0, api_function_address));
+ // x0 = FunctionCallbackInfo&
+ // Arguments is after the return address.
+ __ Add(x0, masm->StackPointer(), 1 * kPointerSize);
+ // FunctionCallbackInfo::implicit_args_ and FunctionCallbackInfo::values_
+ __ Add(x10, args, Operand((FCA::kArgsLength - 1 + argc) * kPointerSize));
+ __ Stp(args, x10, MemOperand(x0, 0 * kPointerSize));
+ // FunctionCallbackInfo::length_ = argc and
+ // FunctionCallbackInfo::is_construct_call = 0
+ __ Mov(x10, argc);
+ __ Stp(x10, xzr, MemOperand(x0, 2 * kPointerSize));
+
+ const int kStackUnwindSpace = argc + FCA::kArgsLength + 1;
+ ExternalReference thunk_ref =
+ ExternalReference::invoke_function_callback(isolate());
+
+ AllowExternalCallThatCantCauseGC scope(masm);
+ MemOperand context_restore_operand(
+ fp, (2 + FCA::kContextSaveIndex) * kPointerSize);
+ // Stores return the first js argument
+ int return_value_offset = 0;
+ if (is_store) {
+ return_value_offset = 2 + FCA::kArgsLength;
+ } else {
+ return_value_offset = 2 + FCA::kReturnValueOffset;
+ }
+ MemOperand return_value_operand(fp, return_value_offset * kPointerSize);
+
+ const int spill_offset = 1 + kApiStackSpace;
+ __ CallApiFunctionAndReturn(api_function_address,
+ thunk_ref,
+ kStackUnwindSpace,
+ spill_offset,
+ return_value_operand,
+ &context_restore_operand);
+}
+
+
+void CallApiGetterStub::Generate(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- sp[0] : name
+ // -- sp[8 - kArgsLength*8] : PropertyCallbackArguments object
+ // -- ...
+ // -- x2 : api_function_address
+ // -----------------------------------
+
+ Register api_function_address = x2;
+
+ __ Mov(x0, masm->StackPointer()); // x0 = Handle<Name>
+ __ Add(x1, x0, 1 * kPointerSize); // x1 = PCA
+
+ const int kApiStackSpace = 1;
+
+ // Allocate space for CallApiFunctionAndReturn can store some scratch
+ // registeres on the stack.
+ const int kCallApiFunctionSpillSpace = 4;
+
+ FrameScope frame_scope(masm, StackFrame::MANUAL);
+ __ EnterExitFrame(false, x10, kApiStackSpace + kCallApiFunctionSpillSpace);
+
+ // Create PropertyAccessorInfo instance on the stack above the exit frame with
+ // x1 (internal::Object** args_) as the data.
+ __ Poke(x1, 1 * kPointerSize);
+ __ Add(x1, masm->StackPointer(), 1 * kPointerSize); // x1 = AccessorInfo&
+
+ const int kStackUnwindSpace = PropertyCallbackArguments::kArgsLength + 1;
+
+ ExternalReference thunk_ref =
+ ExternalReference::invoke_accessor_getter_callback(isolate());
+
+ const int spill_offset = 1 + kApiStackSpace;
+ __ CallApiFunctionAndReturn(api_function_address,
+ thunk_ref,
+ kStackUnwindSpace,
+ spill_offset,
+ MemOperand(fp, 6 * kPointerSize),
+ NULL);
+}
+
+
+#undef __
+
+} } // namespace v8::internal
+
+#endif // V8_TARGET_ARCH_ARM64
diff --git a/chromium/v8/src/arm64/code-stubs-arm64.h b/chromium/v8/src/arm64/code-stubs-arm64.h
new file mode 100644
index 00000000000..6baf96989ad
--- /dev/null
+++ b/chromium/v8/src/arm64/code-stubs-arm64.h
@@ -0,0 +1,478 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_ARM64_CODE_STUBS_ARM64_H_
+#define V8_ARM64_CODE_STUBS_ARM64_H_
+
+#include "src/ic-inl.h"
+
+namespace v8 {
+namespace internal {
+
+
+void ArrayNativeCode(MacroAssembler* masm, Label* call_generic_code);
+
+
+class StoreBufferOverflowStub: public PlatformCodeStub {
+ public:
+ StoreBufferOverflowStub(Isolate* isolate, SaveFPRegsMode save_fp)
+ : PlatformCodeStub(isolate), save_doubles_(save_fp) { }
+
+ void Generate(MacroAssembler* masm);
+
+ static void GenerateFixedRegStubsAheadOfTime(Isolate* isolate);
+ virtual bool SometimesSetsUpAFrame() { return false; }
+
+ private:
+ SaveFPRegsMode save_doubles_;
+
+ Major MajorKey() { return StoreBufferOverflow; }
+ int MinorKey() { return (save_doubles_ == kSaveFPRegs) ? 1 : 0; }
+};
+
+
+class StringHelper : public AllStatic {
+ public:
+ // TODO(all): These don't seem to be used any more. Delete them.
+
+ // Generate string hash.
+ static void GenerateHashInit(MacroAssembler* masm,
+ Register hash,
+ Register character);
+
+ static void GenerateHashAddCharacter(MacroAssembler* masm,
+ Register hash,
+ Register character);
+
+ static void GenerateHashGetHash(MacroAssembler* masm,
+ Register hash,
+ Register scratch);
+
+ private:
+ DISALLOW_IMPLICIT_CONSTRUCTORS(StringHelper);
+};
+
+
+class StoreRegistersStateStub: public PlatformCodeStub {
+ public:
+ StoreRegistersStateStub(Isolate* isolate, SaveFPRegsMode with_fp)
+ : PlatformCodeStub(isolate), save_doubles_(with_fp) {}
+
+ static Register to_be_pushed_lr() { return ip0; }
+ static void GenerateAheadOfTime(Isolate* isolate);
+ private:
+ Major MajorKey() { return StoreRegistersState; }
+ int MinorKey() { return (save_doubles_ == kSaveFPRegs) ? 1 : 0; }
+ SaveFPRegsMode save_doubles_;
+
+ void Generate(MacroAssembler* masm);
+};
+
+
+class RestoreRegistersStateStub: public PlatformCodeStub {
+ public:
+ RestoreRegistersStateStub(Isolate* isolate, SaveFPRegsMode with_fp)
+ : PlatformCodeStub(isolate), save_doubles_(with_fp) {}
+
+ static void GenerateAheadOfTime(Isolate* isolate);
+ private:
+ Major MajorKey() { return RestoreRegistersState; }
+ int MinorKey() { return (save_doubles_ == kSaveFPRegs) ? 1 : 0; }
+ SaveFPRegsMode save_doubles_;
+
+ void Generate(MacroAssembler* masm);
+};
+
+
+class RecordWriteStub: public PlatformCodeStub {
+ public:
+ // Stub to record the write of 'value' at 'address' in 'object'.
+ // Typically 'address' = 'object' + <some offset>.
+ // See MacroAssembler::RecordWriteField() for example.
+ RecordWriteStub(Isolate* isolate,
+ Register object,
+ Register value,
+ Register address,
+ RememberedSetAction remembered_set_action,
+ SaveFPRegsMode fp_mode)
+ : PlatformCodeStub(isolate),
+ object_(object),
+ value_(value),
+ address_(address),
+ remembered_set_action_(remembered_set_action),
+ save_fp_regs_mode_(fp_mode),
+ regs_(object, // An input reg.
+ address, // An input reg.
+ value) { // One scratch reg.
+ }
+
+ enum Mode {
+ STORE_BUFFER_ONLY,
+ INCREMENTAL,
+ INCREMENTAL_COMPACTION
+ };
+
+ virtual bool SometimesSetsUpAFrame() { return false; }
+
+ static Mode GetMode(Code* stub) {
+ // Find the mode depending on the first two instructions.
+ Instruction* instr1 =
+ reinterpret_cast<Instruction*>(stub->instruction_start());
+ Instruction* instr2 = instr1->following();
+
+ if (instr1->IsUncondBranchImm()) {
+ ASSERT(instr2->IsPCRelAddressing() && (instr2->Rd() == xzr.code()));
+ return INCREMENTAL;
+ }
+
+ ASSERT(instr1->IsPCRelAddressing() && (instr1->Rd() == xzr.code()));
+
+ if (instr2->IsUncondBranchImm()) {
+ return INCREMENTAL_COMPACTION;
+ }
+
+ ASSERT(instr2->IsPCRelAddressing());
+
+ return STORE_BUFFER_ONLY;
+ }
+
+ // We patch the two first instructions of the stub back and forth between an
+ // adr and branch when we start and stop incremental heap marking.
+ // The branch is
+ // b label
+ // The adr is
+ // adr xzr label
+ // so effectively a nop.
+ static void Patch(Code* stub, Mode mode) {
+ // We are going to patch the two first instructions of the stub.
+ PatchingAssembler patcher(
+ reinterpret_cast<Instruction*>(stub->instruction_start()), 2);
+ Instruction* instr1 = patcher.InstructionAt(0);
+ Instruction* instr2 = patcher.InstructionAt(kInstructionSize);
+ // Instructions must be either 'adr' or 'b'.
+ ASSERT(instr1->IsPCRelAddressing() || instr1->IsUncondBranchImm());
+ ASSERT(instr2->IsPCRelAddressing() || instr2->IsUncondBranchImm());
+ // Retrieve the offsets to the labels.
+ int32_t offset_to_incremental_noncompacting = instr1->ImmPCOffset();
+ int32_t offset_to_incremental_compacting = instr2->ImmPCOffset();
+
+ switch (mode) {
+ case STORE_BUFFER_ONLY:
+ ASSERT(GetMode(stub) == INCREMENTAL ||
+ GetMode(stub) == INCREMENTAL_COMPACTION);
+ patcher.adr(xzr, offset_to_incremental_noncompacting);
+ patcher.adr(xzr, offset_to_incremental_compacting);
+ break;
+ case INCREMENTAL:
+ ASSERT(GetMode(stub) == STORE_BUFFER_ONLY);
+ patcher.b(offset_to_incremental_noncompacting >> kInstructionSizeLog2);
+ patcher.adr(xzr, offset_to_incremental_compacting);
+ break;
+ case INCREMENTAL_COMPACTION:
+ ASSERT(GetMode(stub) == STORE_BUFFER_ONLY);
+ patcher.adr(xzr, offset_to_incremental_noncompacting);
+ patcher.b(offset_to_incremental_compacting >> kInstructionSizeLog2);
+ break;
+ }
+ ASSERT(GetMode(stub) == mode);
+ }
+
+ private:
+ // This is a helper class to manage the registers associated with the stub.
+ // The 'object' and 'address' registers must be preserved.
+ class RegisterAllocation {
+ public:
+ RegisterAllocation(Register object,
+ Register address,
+ Register scratch)
+ : object_(object),
+ address_(address),
+ scratch0_(scratch),
+ saved_regs_(kCallerSaved),
+ saved_fp_regs_(kCallerSavedFP) {
+ ASSERT(!AreAliased(scratch, object, address));
+
+ // The SaveCallerSaveRegisters method needs to save caller-saved
+ // registers, but we don't bother saving MacroAssembler scratch registers.
+ saved_regs_.Remove(MacroAssembler::DefaultTmpList());
+ saved_fp_regs_.Remove(MacroAssembler::DefaultFPTmpList());
+
+ // We would like to require more scratch registers for this stub,
+ // but the number of registers comes down to the ones used in
+ // FullCodeGen::SetVar(), which is architecture independent.
+ // We allocate 2 extra scratch registers that we'll save on the stack.
+ CPURegList pool_available = GetValidRegistersForAllocation();
+ CPURegList used_regs(object, address, scratch);
+ pool_available.Remove(used_regs);
+ scratch1_ = Register(pool_available.PopLowestIndex());
+ scratch2_ = Register(pool_available.PopLowestIndex());
+
+ // The scratch registers will be restored by other means so we don't need
+ // to save them with the other caller saved registers.
+ saved_regs_.Remove(scratch0_);
+ saved_regs_.Remove(scratch1_);
+ saved_regs_.Remove(scratch2_);
+ }
+
+ void Save(MacroAssembler* masm) {
+ // We don't have to save scratch0_ because it was given to us as
+ // a scratch register.
+ masm->Push(scratch1_, scratch2_);
+ }
+
+ void Restore(MacroAssembler* masm) {
+ masm->Pop(scratch2_, scratch1_);
+ }
+
+ // If we have to call into C then we need to save and restore all caller-
+ // saved registers that were not already preserved.
+ void SaveCallerSaveRegisters(MacroAssembler* masm, SaveFPRegsMode mode) {
+ // TODO(all): This can be very expensive, and it is likely that not every
+ // register will need to be preserved. Can we improve this?
+ masm->PushCPURegList(saved_regs_);
+ if (mode == kSaveFPRegs) {
+ masm->PushCPURegList(saved_fp_regs_);
+ }
+ }
+
+ void RestoreCallerSaveRegisters(MacroAssembler*masm, SaveFPRegsMode mode) {
+ // TODO(all): This can be very expensive, and it is likely that not every
+ // register will need to be preserved. Can we improve this?
+ if (mode == kSaveFPRegs) {
+ masm->PopCPURegList(saved_fp_regs_);
+ }
+ masm->PopCPURegList(saved_regs_);
+ }
+
+ Register object() { return object_; }
+ Register address() { return address_; }
+ Register scratch0() { return scratch0_; }
+ Register scratch1() { return scratch1_; }
+ Register scratch2() { return scratch2_; }
+
+ private:
+ Register object_;
+ Register address_;
+ Register scratch0_;
+ Register scratch1_;
+ Register scratch2_;
+ CPURegList saved_regs_;
+ CPURegList saved_fp_regs_;
+
+ // TODO(all): We should consider moving this somewhere else.
+ static CPURegList GetValidRegistersForAllocation() {
+ // The list of valid registers for allocation is defined as all the
+ // registers without those with a special meaning.
+ //
+ // The default list excludes registers x26 to x31 because they are
+ // reserved for the following purpose:
+ // - x26 root register
+ // - x27 context pointer register
+ // - x28 jssp
+ // - x29 frame pointer
+ // - x30 link register(lr)
+ // - x31 xzr/stack pointer
+ CPURegList list(CPURegister::kRegister, kXRegSizeInBits, 0, 25);
+
+ // We also remove MacroAssembler's scratch registers.
+ list.Remove(MacroAssembler::DefaultTmpList());
+
+ return list;
+ }
+
+ friend class RecordWriteStub;
+ };
+
+ // A list of stub variants which are pregenerated.
+ // The variants are stored in the same format as the minor key, so
+ // MinorKeyFor() can be used to populate and check this list.
+ static const int kAheadOfTime[];
+
+ void Generate(MacroAssembler* masm);
+ void GenerateIncremental(MacroAssembler* masm, Mode mode);
+
+ enum OnNoNeedToInformIncrementalMarker {
+ kReturnOnNoNeedToInformIncrementalMarker,
+ kUpdateRememberedSetOnNoNeedToInformIncrementalMarker
+ };
+
+ void CheckNeedsToInformIncrementalMarker(
+ MacroAssembler* masm,
+ OnNoNeedToInformIncrementalMarker on_no_need,
+ Mode mode);
+ void InformIncrementalMarker(MacroAssembler* masm);
+
+ Major MajorKey() { return RecordWrite; }
+
+ int MinorKey() {
+ return MinorKeyFor(object_, value_, address_, remembered_set_action_,
+ save_fp_regs_mode_);
+ }
+
+ static int MinorKeyFor(Register object,
+ Register value,
+ Register address,
+ RememberedSetAction action,
+ SaveFPRegsMode fp_mode) {
+ ASSERT(object.Is64Bits());
+ ASSERT(value.Is64Bits());
+ ASSERT(address.Is64Bits());
+ return ObjectBits::encode(object.code()) |
+ ValueBits::encode(value.code()) |
+ AddressBits::encode(address.code()) |
+ RememberedSetActionBits::encode(action) |
+ SaveFPRegsModeBits::encode(fp_mode);
+ }
+
+ void Activate(Code* code) {
+ code->GetHeap()->incremental_marking()->ActivateGeneratedStub(code);
+ }
+
+ class ObjectBits: public BitField<int, 0, 5> {};
+ class ValueBits: public BitField<int, 5, 5> {};
+ class AddressBits: public BitField<int, 10, 5> {};
+ class RememberedSetActionBits: public BitField<RememberedSetAction, 15, 1> {};
+ class SaveFPRegsModeBits: public BitField<SaveFPRegsMode, 16, 1> {};
+
+ Register object_;
+ Register value_;
+ Register address_;
+ RememberedSetAction remembered_set_action_;
+ SaveFPRegsMode save_fp_regs_mode_;
+ Label slow_;
+ RegisterAllocation regs_;
+};
+
+
+// Helper to call C++ functions from generated code. The caller must prepare
+// the exit frame before doing the call with GenerateCall.
+class DirectCEntryStub: public PlatformCodeStub {
+ public:
+ explicit DirectCEntryStub(Isolate* isolate) : PlatformCodeStub(isolate) {}
+ void Generate(MacroAssembler* masm);
+ void GenerateCall(MacroAssembler* masm, Register target);
+
+ private:
+ Major MajorKey() { return DirectCEntry; }
+ int MinorKey() { return 0; }
+
+ bool NeedsImmovableCode() { return true; }
+};
+
+
+class NameDictionaryLookupStub: public PlatformCodeStub {
+ public:
+ enum LookupMode { POSITIVE_LOOKUP, NEGATIVE_LOOKUP };
+
+ NameDictionaryLookupStub(Isolate* isolate, LookupMode mode)
+ : PlatformCodeStub(isolate), mode_(mode) { }
+
+ void Generate(MacroAssembler* masm);
+
+ static void GenerateNegativeLookup(MacroAssembler* masm,
+ Label* miss,
+ Label* done,
+ Register receiver,
+ Register properties,
+ Handle<Name> name,
+ Register scratch0);
+
+ static void GeneratePositiveLookup(MacroAssembler* masm,
+ Label* miss,
+ Label* done,
+ Register elements,
+ Register name,
+ Register scratch1,
+ Register scratch2);
+
+ virtual bool SometimesSetsUpAFrame() { return false; }
+
+ private:
+ static const int kInlinedProbes = 4;
+ static const int kTotalProbes = 20;
+
+ static const int kCapacityOffset =
+ NameDictionary::kHeaderSize +
+ NameDictionary::kCapacityIndex * kPointerSize;
+
+ static const int kElementsStartOffset =
+ NameDictionary::kHeaderSize +
+ NameDictionary::kElementsStartIndex * kPointerSize;
+
+ Major MajorKey() { return NameDictionaryLookup; }
+
+ int MinorKey() {
+ return LookupModeBits::encode(mode_);
+ }
+
+ class LookupModeBits: public BitField<LookupMode, 0, 1> {};
+
+ LookupMode mode_;
+};
+
+
+class SubStringStub: public PlatformCodeStub {
+ public:
+ explicit SubStringStub(Isolate* isolate) : PlatformCodeStub(isolate) {}
+
+ private:
+ Major MajorKey() { return SubString; }
+ int MinorKey() { return 0; }
+
+ void Generate(MacroAssembler* masm);
+};
+
+
+class StringCompareStub: public PlatformCodeStub {
+ public:
+ explicit StringCompareStub(Isolate* isolate) : PlatformCodeStub(isolate) { }
+
+ // Compares two flat ASCII strings and returns result in x0.
+ static void GenerateCompareFlatAsciiStrings(MacroAssembler* masm,
+ Register left,
+ Register right,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ Register scratch4);
+
+ // Compare two flat ASCII strings for equality and returns result
+ // in x0.
+ static void GenerateFlatAsciiStringEquals(MacroAssembler* masm,
+ Register left,
+ Register right,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3);
+
+ private:
+ virtual Major MajorKey() { return StringCompare; }
+ virtual int MinorKey() { return 0; }
+ virtual void Generate(MacroAssembler* masm);
+
+ static void GenerateAsciiCharsCompareLoop(MacroAssembler* masm,
+ Register left,
+ Register right,
+ Register length,
+ Register scratch1,
+ Register scratch2,
+ Label* chars_not_equal);
+};
+
+
+struct PlatformCallInterfaceDescriptor {
+ explicit PlatformCallInterfaceDescriptor(
+ TargetAddressStorageMode storage_mode)
+ : storage_mode_(storage_mode) { }
+
+ TargetAddressStorageMode storage_mode() { return storage_mode_; }
+
+ private:
+ TargetAddressStorageMode storage_mode_;
+};
+
+
+} } // namespace v8::internal
+
+#endif // V8_ARM64_CODE_STUBS_ARM64_H_
diff --git a/chromium/v8/src/arm64/codegen-arm64.cc b/chromium/v8/src/arm64/codegen-arm64.cc
new file mode 100644
index 00000000000..9eb0d4a5f76
--- /dev/null
+++ b/chromium/v8/src/arm64/codegen-arm64.cc
@@ -0,0 +1,620 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#if V8_TARGET_ARCH_ARM64
+
+#include "src/codegen.h"
+#include "src/macro-assembler.h"
+#include "src/arm64/simulator-arm64.h"
+
+namespace v8 {
+namespace internal {
+
+#define __ ACCESS_MASM(masm)
+
+#if defined(USE_SIMULATOR)
+byte* fast_exp_arm64_machine_code = NULL;
+double fast_exp_simulator(double x) {
+ Simulator * simulator = Simulator::current(Isolate::Current());
+ Simulator::CallArgument args[] = {
+ Simulator::CallArgument(x),
+ Simulator::CallArgument::End()
+ };
+ return simulator->CallDouble(fast_exp_arm64_machine_code, args);
+}
+#endif
+
+
+UnaryMathFunction CreateExpFunction() {
+ if (!FLAG_fast_math) return &std::exp;
+
+ // Use the Math.exp implemetation in MathExpGenerator::EmitMathExp() to create
+ // an AAPCS64-compliant exp() function. This will be faster than the C
+ // library's exp() function, but probably less accurate.
+ size_t actual_size;
+ byte* buffer = static_cast<byte*>(OS::Allocate(1 * KB, &actual_size, true));
+ if (buffer == NULL) return &std::exp;
+
+ ExternalReference::InitializeMathExpData();
+ MacroAssembler masm(NULL, buffer, static_cast<int>(actual_size));
+ masm.SetStackPointer(csp);
+
+ // The argument will be in d0 on entry.
+ DoubleRegister input = d0;
+ // Use other caller-saved registers for all other values.
+ DoubleRegister result = d1;
+ DoubleRegister double_temp1 = d2;
+ DoubleRegister double_temp2 = d3;
+ Register temp1 = x10;
+ Register temp2 = x11;
+ Register temp3 = x12;
+
+ MathExpGenerator::EmitMathExp(&masm, input, result,
+ double_temp1, double_temp2,
+ temp1, temp2, temp3);
+ // Move the result to the return register.
+ masm.Fmov(d0, result);
+ masm.Ret();
+
+ CodeDesc desc;
+ masm.GetCode(&desc);
+ ASSERT(!RelocInfo::RequiresRelocation(desc));
+
+ CPU::FlushICache(buffer, actual_size);
+ OS::ProtectCode(buffer, actual_size);
+
+#if !defined(USE_SIMULATOR)
+ return FUNCTION_CAST<UnaryMathFunction>(buffer);
+#else
+ fast_exp_arm64_machine_code = buffer;
+ return &fast_exp_simulator;
+#endif
+}
+
+
+UnaryMathFunction CreateSqrtFunction() {
+ return &std::sqrt;
+}
+
+
+// -------------------------------------------------------------------------
+// Platform-specific RuntimeCallHelper functions.
+
+void StubRuntimeCallHelper::BeforeCall(MacroAssembler* masm) const {
+ masm->EnterFrame(StackFrame::INTERNAL);
+ ASSERT(!masm->has_frame());
+ masm->set_has_frame(true);
+}
+
+
+void StubRuntimeCallHelper::AfterCall(MacroAssembler* masm) const {
+ masm->LeaveFrame(StackFrame::INTERNAL);
+ ASSERT(masm->has_frame());
+ masm->set_has_frame(false);
+}
+
+
+// -------------------------------------------------------------------------
+// Code generators
+
+void ElementsTransitionGenerator::GenerateMapChangeElementsTransition(
+ MacroAssembler* masm, AllocationSiteMode mode,
+ Label* allocation_memento_found) {
+ // ----------- S t a t e -------------
+ // -- x2 : receiver
+ // -- x3 : target map
+ // -----------------------------------
+ Register receiver = x2;
+ Register map = x3;
+
+ if (mode == TRACK_ALLOCATION_SITE) {
+ ASSERT(allocation_memento_found != NULL);
+ __ JumpIfJSArrayHasAllocationMemento(receiver, x10, x11,
+ allocation_memento_found);
+ }
+
+ // Set transitioned map.
+ __ Str(map, FieldMemOperand(receiver, HeapObject::kMapOffset));
+ __ RecordWriteField(receiver,
+ HeapObject::kMapOffset,
+ map,
+ x10,
+ kLRHasNotBeenSaved,
+ kDontSaveFPRegs,
+ EMIT_REMEMBERED_SET,
+ OMIT_SMI_CHECK);
+}
+
+
+void ElementsTransitionGenerator::GenerateSmiToDouble(
+ MacroAssembler* masm, AllocationSiteMode mode, Label* fail) {
+ ASM_LOCATION("ElementsTransitionGenerator::GenerateSmiToDouble");
+ // ----------- S t a t e -------------
+ // -- lr : return address
+ // -- x0 : value
+ // -- x1 : key
+ // -- x2 : receiver
+ // -- x3 : target map, scratch for subsequent call
+ // -----------------------------------
+ Register receiver = x2;
+ Register target_map = x3;
+
+ Label gc_required, only_change_map;
+
+ if (mode == TRACK_ALLOCATION_SITE) {
+ __ JumpIfJSArrayHasAllocationMemento(receiver, x10, x11, fail);
+ }
+
+ // Check for empty arrays, which only require a map transition and no changes
+ // to the backing store.
+ Register elements = x4;
+ __ Ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
+ __ JumpIfRoot(elements, Heap::kEmptyFixedArrayRootIndex, &only_change_map);
+
+ __ Push(lr);
+ Register length = x5;
+ __ Ldrsw(length, UntagSmiFieldMemOperand(elements,
+ FixedArray::kLengthOffset));
+
+ // Allocate new FixedDoubleArray.
+ Register array_size = x6;
+ Register array = x7;
+ __ Lsl(array_size, length, kDoubleSizeLog2);
+ __ Add(array_size, array_size, FixedDoubleArray::kHeaderSize);
+ __ Allocate(array_size, array, x10, x11, &gc_required, DOUBLE_ALIGNMENT);
+ // Register array is non-tagged heap object.
+
+ // Set the destination FixedDoubleArray's length and map.
+ Register map_root = x6;
+ __ LoadRoot(map_root, Heap::kFixedDoubleArrayMapRootIndex);
+ __ SmiTag(x11, length);
+ __ Str(x11, MemOperand(array, FixedDoubleArray::kLengthOffset));
+ __ Str(map_root, MemOperand(array, HeapObject::kMapOffset));
+
+ __ Str(target_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
+ __ RecordWriteField(receiver, HeapObject::kMapOffset, target_map, x6,
+ kLRHasBeenSaved, kDontSaveFPRegs, OMIT_REMEMBERED_SET,
+ OMIT_SMI_CHECK);
+
+ // Replace receiver's backing store with newly created FixedDoubleArray.
+ __ Add(x10, array, kHeapObjectTag);
+ __ Str(x10, FieldMemOperand(receiver, JSObject::kElementsOffset));
+ __ RecordWriteField(receiver, JSObject::kElementsOffset, x10,
+ x6, kLRHasBeenSaved, kDontSaveFPRegs,
+ EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
+
+ // Prepare for conversion loop.
+ Register src_elements = x10;
+ Register dst_elements = x11;
+ Register dst_end = x12;
+ __ Add(src_elements, elements, FixedArray::kHeaderSize - kHeapObjectTag);
+ __ Add(dst_elements, array, FixedDoubleArray::kHeaderSize);
+ __ Add(dst_end, dst_elements, Operand(length, LSL, kDoubleSizeLog2));
+
+ FPRegister nan_d = d1;
+ __ Fmov(nan_d, rawbits_to_double(kHoleNanInt64));
+
+ Label entry, done;
+ __ B(&entry);
+
+ __ Bind(&only_change_map);
+ __ Str(target_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
+ __ RecordWriteField(receiver, HeapObject::kMapOffset, target_map, x6,
+ kLRHasNotBeenSaved, kDontSaveFPRegs, OMIT_REMEMBERED_SET,
+ OMIT_SMI_CHECK);
+ __ B(&done);
+
+ // Call into runtime if GC is required.
+ __ Bind(&gc_required);
+ __ Pop(lr);
+ __ B(fail);
+
+ // Iterate over the array, copying and coverting smis to doubles. If an
+ // element is non-smi, write a hole to the destination.
+ {
+ Label loop;
+ __ Bind(&loop);
+ __ Ldr(x13, MemOperand(src_elements, kPointerSize, PostIndex));
+ __ SmiUntagToDouble(d0, x13, kSpeculativeUntag);
+ __ Tst(x13, kSmiTagMask);
+ __ Fcsel(d0, d0, nan_d, eq);
+ __ Str(d0, MemOperand(dst_elements, kDoubleSize, PostIndex));
+
+ __ Bind(&entry);
+ __ Cmp(dst_elements, dst_end);
+ __ B(lt, &loop);
+ }
+
+ __ Pop(lr);
+ __ Bind(&done);
+}
+
+
+void ElementsTransitionGenerator::GenerateDoubleToObject(
+ MacroAssembler* masm, AllocationSiteMode mode, Label* fail) {
+ ASM_LOCATION("ElementsTransitionGenerator::GenerateDoubleToObject");
+ // ----------- S t a t e -------------
+ // -- x0 : value
+ // -- x1 : key
+ // -- x2 : receiver
+ // -- lr : return address
+ // -- x3 : target map, scratch for subsequent call
+ // -- x4 : scratch (elements)
+ // -----------------------------------
+ Register value = x0;
+ Register key = x1;
+ Register receiver = x2;
+ Register target_map = x3;
+
+ if (mode == TRACK_ALLOCATION_SITE) {
+ __ JumpIfJSArrayHasAllocationMemento(receiver, x10, x11, fail);
+ }
+
+ // Check for empty arrays, which only require a map transition and no changes
+ // to the backing store.
+ Label only_change_map;
+ Register elements = x4;
+ __ Ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
+ __ JumpIfRoot(elements, Heap::kEmptyFixedArrayRootIndex, &only_change_map);
+
+ __ Push(lr);
+ // TODO(all): These registers may not need to be pushed. Examine
+ // RecordWriteStub and check whether it's needed.
+ __ Push(target_map, receiver, key, value);
+ Register length = x5;
+ __ Ldrsw(length, UntagSmiFieldMemOperand(elements,
+ FixedArray::kLengthOffset));
+
+ // Allocate new FixedArray.
+ Register array_size = x6;
+ Register array = x7;
+ Label gc_required;
+ __ Mov(array_size, FixedDoubleArray::kHeaderSize);
+ __ Add(array_size, array_size, Operand(length, LSL, kPointerSizeLog2));
+ __ Allocate(array_size, array, x10, x11, &gc_required, NO_ALLOCATION_FLAGS);
+
+ // Set destination FixedDoubleArray's length and map.
+ Register map_root = x6;
+ __ LoadRoot(map_root, Heap::kFixedArrayMapRootIndex);
+ __ SmiTag(x11, length);
+ __ Str(x11, MemOperand(array, FixedDoubleArray::kLengthOffset));
+ __ Str(map_root, MemOperand(array, HeapObject::kMapOffset));
+
+ // Prepare for conversion loop.
+ Register src_elements = x10;
+ Register dst_elements = x11;
+ Register dst_end = x12;
+ __ Add(src_elements, elements,
+ FixedDoubleArray::kHeaderSize - kHeapObjectTag);
+ __ Add(dst_elements, array, FixedArray::kHeaderSize);
+ __ Add(array, array, kHeapObjectTag);
+ __ Add(dst_end, dst_elements, Operand(length, LSL, kPointerSizeLog2));
+
+ Register the_hole = x14;
+ Register heap_num_map = x15;
+ __ LoadRoot(the_hole, Heap::kTheHoleValueRootIndex);
+ __ LoadRoot(heap_num_map, Heap::kHeapNumberMapRootIndex);
+
+ Label entry;
+ __ B(&entry);
+
+ // Call into runtime if GC is required.
+ __ Bind(&gc_required);
+ __ Pop(value, key, receiver, target_map);
+ __ Pop(lr);
+ __ B(fail);
+
+ {
+ Label loop, convert_hole;
+ __ Bind(&loop);
+ __ Ldr(x13, MemOperand(src_elements, kPointerSize, PostIndex));
+ __ Cmp(x13, kHoleNanInt64);
+ __ B(eq, &convert_hole);
+
+ // Non-hole double, copy value into a heap number.
+ Register heap_num = x5;
+ __ AllocateHeapNumber(heap_num, &gc_required, x6, x4,
+ x13, heap_num_map);
+ __ Mov(x13, dst_elements);
+ __ Str(heap_num, MemOperand(dst_elements, kPointerSize, PostIndex));
+ __ RecordWrite(array, x13, heap_num, kLRHasBeenSaved, kDontSaveFPRegs,
+ EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
+
+ __ B(&entry);
+
+ // Replace the-hole NaN with the-hole pointer.
+ __ Bind(&convert_hole);
+ __ Str(the_hole, MemOperand(dst_elements, kPointerSize, PostIndex));
+
+ __ Bind(&entry);
+ __ Cmp(dst_elements, dst_end);
+ __ B(lt, &loop);
+ }
+
+ __ Pop(value, key, receiver, target_map);
+ // Replace receiver's backing store with newly created and filled FixedArray.
+ __ Str(array, FieldMemOperand(receiver, JSObject::kElementsOffset));
+ __ RecordWriteField(receiver, JSObject::kElementsOffset, array, x13,
+ kLRHasBeenSaved, kDontSaveFPRegs, EMIT_REMEMBERED_SET,
+ OMIT_SMI_CHECK);
+ __ Pop(lr);
+
+ __ Bind(&only_change_map);
+ __ Str(target_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
+ __ RecordWriteField(receiver, HeapObject::kMapOffset, target_map, x13,
+ kLRHasNotBeenSaved, kDontSaveFPRegs, OMIT_REMEMBERED_SET,
+ OMIT_SMI_CHECK);
+}
+
+
+CodeAgingHelper::CodeAgingHelper() {
+ ASSERT(young_sequence_.length() == kNoCodeAgeSequenceLength);
+ // The sequence of instructions that is patched out for aging code is the
+ // following boilerplate stack-building prologue that is found both in
+ // FUNCTION and OPTIMIZED_FUNCTION code:
+ PatchingAssembler patcher(young_sequence_.start(),
+ young_sequence_.length() / kInstructionSize);
+ // The young sequence is the frame setup code for FUNCTION code types. It is
+ // generated by FullCodeGenerator::Generate.
+ MacroAssembler::EmitFrameSetupForCodeAgePatching(&patcher);
+
+#ifdef DEBUG
+ const int length = kCodeAgeStubEntryOffset / kInstructionSize;
+ ASSERT(old_sequence_.length() >= kCodeAgeStubEntryOffset);
+ PatchingAssembler patcher_old(old_sequence_.start(), length);
+ MacroAssembler::EmitCodeAgeSequence(&patcher_old, NULL);
+#endif
+}
+
+
+#ifdef DEBUG
+bool CodeAgingHelper::IsOld(byte* candidate) const {
+ return memcmp(candidate, old_sequence_.start(), kCodeAgeStubEntryOffset) == 0;
+}
+#endif
+
+
+bool Code::IsYoungSequence(Isolate* isolate, byte* sequence) {
+ return MacroAssembler::IsYoungSequence(isolate, sequence);
+}
+
+
+void Code::GetCodeAgeAndParity(Isolate* isolate, byte* sequence, Age* age,
+ MarkingParity* parity) {
+ if (IsYoungSequence(isolate, sequence)) {
+ *age = kNoAgeCodeAge;
+ *parity = NO_MARKING_PARITY;
+ } else {
+ byte* target = sequence + kCodeAgeStubEntryOffset;
+ Code* stub = GetCodeFromTargetAddress(Memory::Address_at(target));
+ GetCodeAgeAndParity(stub, age, parity);
+ }
+}
+
+
+void Code::PatchPlatformCodeAge(Isolate* isolate,
+ byte* sequence,
+ Code::Age age,
+ MarkingParity parity) {
+ PatchingAssembler patcher(sequence,
+ kNoCodeAgeSequenceLength / kInstructionSize);
+ if (age == kNoAgeCodeAge) {
+ MacroAssembler::EmitFrameSetupForCodeAgePatching(&patcher);
+ } else {
+ Code * stub = GetCodeAgeStub(isolate, age, parity);
+ MacroAssembler::EmitCodeAgeSequence(&patcher, stub);
+ }
+}
+
+
+void StringCharLoadGenerator::Generate(MacroAssembler* masm,
+ Register string,
+ Register index,
+ Register result,
+ Label* call_runtime) {
+ ASSERT(string.Is64Bits() && index.Is32Bits() && result.Is64Bits());
+ // Fetch the instance type of the receiver into result register.
+ __ Ldr(result, FieldMemOperand(string, HeapObject::kMapOffset));
+ __ Ldrb(result, FieldMemOperand(result, Map::kInstanceTypeOffset));
+
+ // We need special handling for indirect strings.
+ Label check_sequential;
+ __ TestAndBranchIfAllClear(result, kIsIndirectStringMask, &check_sequential);
+
+ // Dispatch on the indirect string shape: slice or cons.
+ Label cons_string;
+ __ TestAndBranchIfAllClear(result, kSlicedNotConsMask, &cons_string);
+
+ // Handle slices.
+ Label indirect_string_loaded;
+ __ Ldr(result.W(),
+ UntagSmiFieldMemOperand(string, SlicedString::kOffsetOffset));
+ __ Ldr(string, FieldMemOperand(string, SlicedString::kParentOffset));
+ __ Add(index, index, result.W());
+ __ B(&indirect_string_loaded);
+
+ // Handle cons strings.
+ // Check whether the right hand side is the empty string (i.e. if
+ // this is really a flat string in a cons string). If that is not
+ // the case we would rather go to the runtime system now to flatten
+ // the string.
+ __ Bind(&cons_string);
+ __ Ldr(result, FieldMemOperand(string, ConsString::kSecondOffset));
+ __ JumpIfNotRoot(result, Heap::kempty_stringRootIndex, call_runtime);
+ // Get the first of the two strings and load its instance type.
+ __ Ldr(string, FieldMemOperand(string, ConsString::kFirstOffset));
+
+ __ Bind(&indirect_string_loaded);
+ __ Ldr(result, FieldMemOperand(string, HeapObject::kMapOffset));
+ __ Ldrb(result, FieldMemOperand(result, Map::kInstanceTypeOffset));
+
+ // Distinguish sequential and external strings. Only these two string
+ // representations can reach here (slices and flat cons strings have been
+ // reduced to the underlying sequential or external string).
+ Label external_string, check_encoding;
+ __ Bind(&check_sequential);
+ STATIC_ASSERT(kSeqStringTag == 0);
+ __ TestAndBranchIfAnySet(result, kStringRepresentationMask, &external_string);
+
+ // Prepare sequential strings
+ STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqOneByteString::kHeaderSize);
+ __ Add(string, string, SeqTwoByteString::kHeaderSize - kHeapObjectTag);
+ __ B(&check_encoding);
+
+ // Handle external strings.
+ __ Bind(&external_string);
+ if (FLAG_debug_code) {
+ // Assert that we do not have a cons or slice (indirect strings) here.
+ // Sequential strings have already been ruled out.
+ __ Tst(result, kIsIndirectStringMask);
+ __ Assert(eq, kExternalStringExpectedButNotFound);
+ }
+ // Rule out short external strings.
+ STATIC_ASSERT(kShortExternalStringTag != 0);
+ // TestAndBranchIfAnySet can emit Tbnz. Do not use it because call_runtime
+ // can be bound far away in deferred code.
+ __ Tst(result, kShortExternalStringMask);
+ __ B(ne, call_runtime);
+ __ Ldr(string, FieldMemOperand(string, ExternalString::kResourceDataOffset));
+
+ Label ascii, done;
+ __ Bind(&check_encoding);
+ STATIC_ASSERT(kTwoByteStringTag == 0);
+ __ TestAndBranchIfAnySet(result, kStringEncodingMask, &ascii);
+ // Two-byte string.
+ __ Ldrh(result, MemOperand(string, index, SXTW, 1));
+ __ B(&done);
+ __ Bind(&ascii);
+ // Ascii string.
+ __ Ldrb(result, MemOperand(string, index, SXTW));
+ __ Bind(&done);
+}
+
+
+static MemOperand ExpConstant(Register base, int index) {
+ return MemOperand(base, index * kDoubleSize);
+}
+
+
+void MathExpGenerator::EmitMathExp(MacroAssembler* masm,
+ DoubleRegister input,
+ DoubleRegister result,
+ DoubleRegister double_temp1,
+ DoubleRegister double_temp2,
+ Register temp1,
+ Register temp2,
+ Register temp3) {
+ // TODO(jbramley): There are several instances where fnmsub could be used
+ // instead of fmul and fsub. Doing this changes the result, but since this is
+ // an estimation anyway, does it matter?
+
+ ASSERT(!AreAliased(input, result,
+ double_temp1, double_temp2,
+ temp1, temp2, temp3));
+ ASSERT(ExternalReference::math_exp_constants(0).address() != NULL);
+
+ Label done;
+ DoubleRegister double_temp3 = result;
+ Register constants = temp3;
+
+ // The algorithm used relies on some magic constants which are initialized in
+ // ExternalReference::InitializeMathExpData().
+
+ // Load the address of the start of the array.
+ __ Mov(constants, ExternalReference::math_exp_constants(0));
+
+ // We have to do a four-way split here:
+ // - If input <= about -708.4, the output always rounds to zero.
+ // - If input >= about 709.8, the output always rounds to +infinity.
+ // - If the input is NaN, the output is NaN.
+ // - Otherwise, the result needs to be calculated.
+ Label result_is_finite_non_zero;
+ // Assert that we can load offset 0 (the small input threshold) and offset 1
+ // (the large input threshold) with a single ldp.
+ ASSERT(kDRegSize == (ExpConstant(constants, 1).offset() -
+ ExpConstant(constants, 0).offset()));
+ __ Ldp(double_temp1, double_temp2, ExpConstant(constants, 0));
+
+ __ Fcmp(input, double_temp1);
+ __ Fccmp(input, double_temp2, NoFlag, hi);
+ // At this point, the condition flags can be in one of five states:
+ // NZCV
+ // 1000 -708.4 < input < 709.8 result = exp(input)
+ // 0110 input == 709.8 result = +infinity
+ // 0010 input > 709.8 result = +infinity
+ // 0011 input is NaN result = input
+ // 0000 input <= -708.4 result = +0.0
+
+ // Continue the common case first. 'mi' tests N == 1.
+ __ B(&result_is_finite_non_zero, mi);
+
+ // TODO(jbramley): Consider adding a +infinity register for ARM64.
+ __ Ldr(double_temp2, ExpConstant(constants, 2)); // Synthesize +infinity.
+
+ // Select between +0.0 and +infinity. 'lo' tests C == 0.
+ __ Fcsel(result, fp_zero, double_temp2, lo);
+ // Select between {+0.0 or +infinity} and input. 'vc' tests V == 0.
+ __ Fcsel(result, result, input, vc);
+ __ B(&done);
+
+ // The rest is magic, as described in InitializeMathExpData().
+ __ Bind(&result_is_finite_non_zero);
+
+ // Assert that we can load offset 3 and offset 4 with a single ldp.
+ ASSERT(kDRegSize == (ExpConstant(constants, 4).offset() -
+ ExpConstant(constants, 3).offset()));
+ __ Ldp(double_temp1, double_temp3, ExpConstant(constants, 3));
+ __ Fmadd(double_temp1, double_temp1, input, double_temp3);
+ __ Fmov(temp2.W(), double_temp1.S());
+ __ Fsub(double_temp1, double_temp1, double_temp3);
+
+ // Assert that we can load offset 5 and offset 6 with a single ldp.
+ ASSERT(kDRegSize == (ExpConstant(constants, 6).offset() -
+ ExpConstant(constants, 5).offset()));
+ __ Ldp(double_temp2, double_temp3, ExpConstant(constants, 5));
+ // TODO(jbramley): Consider using Fnmsub here.
+ __ Fmul(double_temp1, double_temp1, double_temp2);
+ __ Fsub(double_temp1, double_temp1, input);
+
+ __ Fmul(double_temp2, double_temp1, double_temp1);
+ __ Fsub(double_temp3, double_temp3, double_temp1);
+ __ Fmul(double_temp3, double_temp3, double_temp2);
+
+ __ Mov(temp1.W(), Operand(temp2.W(), LSR, 11));
+
+ __ Ldr(double_temp2, ExpConstant(constants, 7));
+ // TODO(jbramley): Consider using Fnmsub here.
+ __ Fmul(double_temp3, double_temp3, double_temp2);
+ __ Fsub(double_temp3, double_temp3, double_temp1);
+
+ // The 8th constant is 1.0, so use an immediate move rather than a load.
+ // We can't generate a runtime assertion here as we would need to call Abort
+ // in the runtime and we don't have an Isolate when we generate this code.
+ __ Fmov(double_temp2, 1.0);
+ __ Fadd(double_temp3, double_temp3, double_temp2);
+
+ __ And(temp2, temp2, 0x7ff);
+ __ Add(temp1, temp1, 0x3ff);
+
+ // Do the final table lookup.
+ __ Mov(temp3, ExternalReference::math_exp_log_table());
+
+ __ Add(temp3, temp3, Operand(temp2, LSL, kDRegSizeLog2));
+ __ Ldp(temp2.W(), temp3.W(), MemOperand(temp3));
+ __ Orr(temp1.W(), temp3.W(), Operand(temp1.W(), LSL, 20));
+ __ Bfi(temp2, temp1, 32, 32);
+ __ Fmov(double_temp1, temp2);
+
+ __ Fmul(result, double_temp3, double_temp1);
+
+ __ Bind(&done);
+}
+
+#undef __
+
+} } // namespace v8::internal
+
+#endif // V8_TARGET_ARCH_ARM64
diff --git a/chromium/v8/src/arm64/codegen-arm64.h b/chromium/v8/src/arm64/codegen-arm64.h
new file mode 100644
index 00000000000..9ef148cc409
--- /dev/null
+++ b/chromium/v8/src/arm64/codegen-arm64.h
@@ -0,0 +1,48 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_ARM64_CODEGEN_ARM64_H_
+#define V8_ARM64_CODEGEN_ARM64_H_
+
+#include "src/ast.h"
+#include "src/ic-inl.h"
+
+namespace v8 {
+namespace internal {
+
+class StringCharLoadGenerator : public AllStatic {
+ public:
+ // Generates the code for handling different string types and loading the
+ // indexed character into |result|. We expect |index| as untagged input and
+ // |result| as untagged output. Register index is asserted to be a 32-bit W
+ // register.
+ static void Generate(MacroAssembler* masm,
+ Register string,
+ Register index,
+ Register result,
+ Label* call_runtime);
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(StringCharLoadGenerator);
+};
+
+
+class MathExpGenerator : public AllStatic {
+ public:
+ static void EmitMathExp(MacroAssembler* masm,
+ DoubleRegister input,
+ DoubleRegister result,
+ DoubleRegister double_scratch1,
+ DoubleRegister double_scratch2,
+ Register temp1,
+ Register temp2,
+ Register temp3);
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(MathExpGenerator);
+};
+
+} } // namespace v8::internal
+
+#endif // V8_ARM64_CODEGEN_ARM64_H_
diff --git a/chromium/v8/src/arm64/constants-arm64.h b/chromium/v8/src/arm64/constants-arm64.h
new file mode 100644
index 00000000000..f459b4b7576
--- /dev/null
+++ b/chromium/v8/src/arm64/constants-arm64.h
@@ -0,0 +1,1250 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_ARM64_CONSTANTS_ARM64_H_
+#define V8_ARM64_CONSTANTS_ARM64_H_
+
+
+// Assert that this is an LP64 system.
+STATIC_ASSERT(sizeof(int) == sizeof(int32_t)); // NOLINT(runtime/sizeof)
+STATIC_ASSERT(sizeof(long) == sizeof(int64_t)); // NOLINT(runtime/int)
+STATIC_ASSERT(sizeof(void *) == sizeof(int64_t)); // NOLINT(runtime/sizeof)
+STATIC_ASSERT(sizeof(1) == sizeof(int32_t)); // NOLINT(runtime/sizeof)
+STATIC_ASSERT(sizeof(1L) == sizeof(int64_t)); // NOLINT(runtime/sizeof)
+
+
+// Get the standard printf format macros for C99 stdint types.
+#define __STDC_FORMAT_MACROS
+#include <inttypes.h>
+
+
+namespace v8 {
+namespace internal {
+
+
+const unsigned kInstructionSize = 4;
+const unsigned kInstructionSizeLog2 = 2;
+const unsigned kLoadLiteralScaleLog2 = 2;
+const unsigned kMaxLoadLiteralRange = 1 * MB;
+
+const unsigned kNumberOfRegisters = 32;
+const unsigned kNumberOfFPRegisters = 32;
+// Callee saved registers are x19-x30(lr).
+const int kNumberOfCalleeSavedRegisters = 11;
+const int kFirstCalleeSavedRegisterIndex = 19;
+// Callee saved FP registers are d8-d15.
+const int kNumberOfCalleeSavedFPRegisters = 8;
+const int kFirstCalleeSavedFPRegisterIndex = 8;
+// Callee saved registers with no specific purpose in JS are x19-x25.
+const unsigned kJSCalleeSavedRegList = 0x03f80000;
+// TODO(all): k<Y>RegSize should probably be k<Y>RegSizeInBits.
+const unsigned kWRegSizeInBits = 32;
+const unsigned kWRegSizeInBitsLog2 = 5;
+const unsigned kWRegSize = kWRegSizeInBits >> 3;
+const unsigned kWRegSizeLog2 = kWRegSizeInBitsLog2 - 3;
+const unsigned kXRegSizeInBits = 64;
+const unsigned kXRegSizeInBitsLog2 = 6;
+const unsigned kXRegSize = kXRegSizeInBits >> 3;
+const unsigned kXRegSizeLog2 = kXRegSizeInBitsLog2 - 3;
+const unsigned kSRegSizeInBits = 32;
+const unsigned kSRegSizeInBitsLog2 = 5;
+const unsigned kSRegSize = kSRegSizeInBits >> 3;
+const unsigned kSRegSizeLog2 = kSRegSizeInBitsLog2 - 3;
+const unsigned kDRegSizeInBits = 64;
+const unsigned kDRegSizeInBitsLog2 = 6;
+const unsigned kDRegSize = kDRegSizeInBits >> 3;
+const unsigned kDRegSizeLog2 = kDRegSizeInBitsLog2 - 3;
+const int64_t kWRegMask = 0x00000000ffffffffL;
+const int64_t kXRegMask = 0xffffffffffffffffL;
+const int64_t kSRegMask = 0x00000000ffffffffL;
+const int64_t kDRegMask = 0xffffffffffffffffL;
+// TODO(all) check if the expression below works on all compilers or if it
+// triggers an overflow error.
+const int64_t kDSignBit = 63;
+const int64_t kDSignMask = 0x1L << kDSignBit;
+const int64_t kSSignBit = 31;
+const int64_t kSSignMask = 0x1L << kSSignBit;
+const int64_t kXSignBit = 63;
+const int64_t kXSignMask = 0x1L << kXSignBit;
+const int64_t kWSignBit = 31;
+const int64_t kWSignMask = 0x1L << kWSignBit;
+const int64_t kDQuietNanBit = 51;
+const int64_t kDQuietNanMask = 0x1L << kDQuietNanBit;
+const int64_t kSQuietNanBit = 22;
+const int64_t kSQuietNanMask = 0x1L << kSQuietNanBit;
+const int64_t kByteMask = 0xffL;
+const int64_t kHalfWordMask = 0xffffL;
+const int64_t kWordMask = 0xffffffffL;
+const uint64_t kXMaxUInt = 0xffffffffffffffffUL;
+const uint64_t kWMaxUInt = 0xffffffffUL;
+const int64_t kXMaxInt = 0x7fffffffffffffffL;
+const int64_t kXMinInt = 0x8000000000000000L;
+const int32_t kWMaxInt = 0x7fffffff;
+const int32_t kWMinInt = 0x80000000;
+const unsigned kFramePointerRegCode = 29;
+const unsigned kLinkRegCode = 30;
+const unsigned kZeroRegCode = 31;
+const unsigned kJSSPCode = 28;
+const unsigned kSPRegInternalCode = 63;
+const unsigned kRegCodeMask = 0x1f;
+const unsigned kShiftAmountWRegMask = 0x1f;
+const unsigned kShiftAmountXRegMask = 0x3f;
+// Standard machine types defined by AAPCS64.
+const unsigned kByteSize = 8;
+const unsigned kByteSizeInBytes = kByteSize >> 3;
+const unsigned kHalfWordSize = 16;
+const unsigned kHalfWordSizeLog2 = 4;
+const unsigned kHalfWordSizeInBytes = kHalfWordSize >> 3;
+const unsigned kHalfWordSizeInBytesLog2 = kHalfWordSizeLog2 - 3;
+const unsigned kWordSize = 32;
+const unsigned kWordSizeLog2 = 5;
+const unsigned kWordSizeInBytes = kWordSize >> 3;
+const unsigned kWordSizeInBytesLog2 = kWordSizeLog2 - 3;
+const unsigned kDoubleWordSize = 64;
+const unsigned kDoubleWordSizeInBytes = kDoubleWordSize >> 3;
+const unsigned kQuadWordSize = 128;
+const unsigned kQuadWordSizeInBytes = kQuadWordSize >> 3;
+// AArch64 floating-point specifics. These match IEEE-754.
+const unsigned kDoubleMantissaBits = 52;
+const unsigned kDoubleExponentBits = 11;
+const unsigned kDoubleExponentBias = 1023;
+const unsigned kFloatMantissaBits = 23;
+const unsigned kFloatExponentBits = 8;
+
+#define REGISTER_CODE_LIST(R) \
+R(0) R(1) R(2) R(3) R(4) R(5) R(6) R(7) \
+R(8) R(9) R(10) R(11) R(12) R(13) R(14) R(15) \
+R(16) R(17) R(18) R(19) R(20) R(21) R(22) R(23) \
+R(24) R(25) R(26) R(27) R(28) R(29) R(30) R(31)
+
+#define INSTRUCTION_FIELDS_LIST(V_) \
+/* Register fields */ \
+V_(Rd, 4, 0, Bits) /* Destination register. */ \
+V_(Rn, 9, 5, Bits) /* First source register. */ \
+V_(Rm, 20, 16, Bits) /* Second source register. */ \
+V_(Ra, 14, 10, Bits) /* Third source register. */ \
+V_(Rt, 4, 0, Bits) /* Load dest / store source. */ \
+V_(Rt2, 14, 10, Bits) /* Load second dest / */ \
+ /* store second source. */ \
+V_(PrefetchMode, 4, 0, Bits) \
+ \
+/* Common bits */ \
+V_(SixtyFourBits, 31, 31, Bits) \
+V_(FlagsUpdate, 29, 29, Bits) \
+ \
+/* PC relative addressing */ \
+V_(ImmPCRelHi, 23, 5, SignedBits) \
+V_(ImmPCRelLo, 30, 29, Bits) \
+ \
+/* Add/subtract/logical shift register */ \
+V_(ShiftDP, 23, 22, Bits) \
+V_(ImmDPShift, 15, 10, Bits) \
+ \
+/* Add/subtract immediate */ \
+V_(ImmAddSub, 21, 10, Bits) \
+V_(ShiftAddSub, 23, 22, Bits) \
+ \
+/* Add/substract extend */ \
+V_(ImmExtendShift, 12, 10, Bits) \
+V_(ExtendMode, 15, 13, Bits) \
+ \
+/* Move wide */ \
+V_(ImmMoveWide, 20, 5, Bits) \
+V_(ShiftMoveWide, 22, 21, Bits) \
+ \
+/* Logical immediate, bitfield and extract */ \
+V_(BitN, 22, 22, Bits) \
+V_(ImmRotate, 21, 16, Bits) \
+V_(ImmSetBits, 15, 10, Bits) \
+V_(ImmR, 21, 16, Bits) \
+V_(ImmS, 15, 10, Bits) \
+ \
+/* Test and branch immediate */ \
+V_(ImmTestBranch, 18, 5, SignedBits) \
+V_(ImmTestBranchBit40, 23, 19, Bits) \
+V_(ImmTestBranchBit5, 31, 31, Bits) \
+ \
+/* Conditionals */ \
+V_(Condition, 15, 12, Bits) \
+V_(ConditionBranch, 3, 0, Bits) \
+V_(Nzcv, 3, 0, Bits) \
+V_(ImmCondCmp, 20, 16, Bits) \
+V_(ImmCondBranch, 23, 5, SignedBits) \
+ \
+/* Floating point */ \
+V_(FPType, 23, 22, Bits) \
+V_(ImmFP, 20, 13, Bits) \
+V_(FPScale, 15, 10, Bits) \
+ \
+/* Load Store */ \
+V_(ImmLS, 20, 12, SignedBits) \
+V_(ImmLSUnsigned, 21, 10, Bits) \
+V_(ImmLSPair, 21, 15, SignedBits) \
+V_(SizeLS, 31, 30, Bits) \
+V_(ImmShiftLS, 12, 12, Bits) \
+ \
+/* Other immediates */ \
+V_(ImmUncondBranch, 25, 0, SignedBits) \
+V_(ImmCmpBranch, 23, 5, SignedBits) \
+V_(ImmLLiteral, 23, 5, SignedBits) \
+V_(ImmException, 20, 5, Bits) \
+V_(ImmHint, 11, 5, Bits) \
+V_(ImmBarrierDomain, 11, 10, Bits) \
+V_(ImmBarrierType, 9, 8, Bits) \
+ \
+/* System (MRS, MSR) */ \
+V_(ImmSystemRegister, 19, 5, Bits) \
+V_(SysO0, 19, 19, Bits) \
+V_(SysOp1, 18, 16, Bits) \
+V_(SysOp2, 7, 5, Bits) \
+V_(CRn, 15, 12, Bits) \
+V_(CRm, 11, 8, Bits) \
+
+
+#define SYSTEM_REGISTER_FIELDS_LIST(V_, M_) \
+/* NZCV */ \
+V_(Flags, 31, 28, Bits, uint32_t) \
+V_(N, 31, 31, Bits, bool) \
+V_(Z, 30, 30, Bits, bool) \
+V_(C, 29, 29, Bits, bool) \
+V_(V, 28, 28, Bits, uint32_t) \
+M_(NZCV, Flags_mask) \
+ \
+/* FPCR */ \
+V_(AHP, 26, 26, Bits, bool) \
+V_(DN, 25, 25, Bits, bool) \
+V_(FZ, 24, 24, Bits, bool) \
+V_(RMode, 23, 22, Bits, FPRounding) \
+M_(FPCR, AHP_mask | DN_mask | FZ_mask | RMode_mask)
+
+
+// Fields offsets.
+#define DECLARE_FIELDS_OFFSETS(Name, HighBit, LowBit, unused_1, unused_2) \
+ const int Name##_offset = LowBit; \
+ const int Name##_width = HighBit - LowBit + 1; \
+ const uint32_t Name##_mask = ((1 << Name##_width) - 1) << LowBit;
+#define DECLARE_INSTRUCTION_FIELDS_OFFSETS(Name, HighBit, LowBit, unused_1) \
+ DECLARE_FIELDS_OFFSETS(Name, HighBit, LowBit, unused_1, unused_2)
+#define NOTHING(A, B)
+INSTRUCTION_FIELDS_LIST(DECLARE_INSTRUCTION_FIELDS_OFFSETS)
+SYSTEM_REGISTER_FIELDS_LIST(DECLARE_FIELDS_OFFSETS, NOTHING)
+#undef NOTHING
+#undef DECLARE_FIELDS_OFFSETS
+#undef DECLARE_INSTRUCTION_FIELDS_OFFSETS
+
+// ImmPCRel is a compound field (not present in INSTRUCTION_FIELDS_LIST), formed
+// from ImmPCRelLo and ImmPCRelHi.
+const int ImmPCRel_mask = ImmPCRelLo_mask | ImmPCRelHi_mask;
+
+// Condition codes.
+enum Condition {
+ eq = 0,
+ ne = 1,
+ hs = 2, cs = hs,
+ lo = 3, cc = lo,
+ mi = 4,
+ pl = 5,
+ vs = 6,
+ vc = 7,
+ hi = 8,
+ ls = 9,
+ ge = 10,
+ lt = 11,
+ gt = 12,
+ le = 13,
+ al = 14,
+ nv = 15 // Behaves as always/al.
+};
+
+inline Condition NegateCondition(Condition cond) {
+ // Conditions al and nv behave identically, as "always true". They can't be
+ // inverted, because there is no never condition.
+ ASSERT((cond != al) && (cond != nv));
+ return static_cast<Condition>(cond ^ 1);
+}
+
+// Commute a condition such that {a cond b == b cond' a}.
+inline Condition CommuteCondition(Condition cond) {
+ switch (cond) {
+ case lo:
+ return hi;
+ case hi:
+ return lo;
+ case hs:
+ return ls;
+ case ls:
+ return hs;
+ case lt:
+ return gt;
+ case gt:
+ return lt;
+ case ge:
+ return le;
+ case le:
+ return ge;
+ case eq:
+ return eq;
+ default:
+ // In practice this function is only used with a condition coming from
+ // TokenToCondition in lithium-codegen-arm64.cc. Any other condition is
+ // invalid as it doesn't necessary make sense to reverse it (consider
+ // 'mi' for instance).
+ UNREACHABLE();
+ return nv;
+ }
+}
+
+enum FlagsUpdate {
+ SetFlags = 1,
+ LeaveFlags = 0
+};
+
+enum StatusFlags {
+ NoFlag = 0,
+
+ // Derive the flag combinations from the system register bit descriptions.
+ NFlag = N_mask,
+ ZFlag = Z_mask,
+ CFlag = C_mask,
+ VFlag = V_mask,
+ NZFlag = NFlag | ZFlag,
+ NCFlag = NFlag | CFlag,
+ NVFlag = NFlag | VFlag,
+ ZCFlag = ZFlag | CFlag,
+ ZVFlag = ZFlag | VFlag,
+ CVFlag = CFlag | VFlag,
+ NZCFlag = NFlag | ZFlag | CFlag,
+ NZVFlag = NFlag | ZFlag | VFlag,
+ NCVFlag = NFlag | CFlag | VFlag,
+ ZCVFlag = ZFlag | CFlag | VFlag,
+ NZCVFlag = NFlag | ZFlag | CFlag | VFlag,
+
+ // Floating-point comparison results.
+ FPEqualFlag = ZCFlag,
+ FPLessThanFlag = NFlag,
+ FPGreaterThanFlag = CFlag,
+ FPUnorderedFlag = CVFlag
+};
+
+enum Shift {
+ NO_SHIFT = -1,
+ LSL = 0x0,
+ LSR = 0x1,
+ ASR = 0x2,
+ ROR = 0x3
+};
+
+enum Extend {
+ NO_EXTEND = -1,
+ UXTB = 0,
+ UXTH = 1,
+ UXTW = 2,
+ UXTX = 3,
+ SXTB = 4,
+ SXTH = 5,
+ SXTW = 6,
+ SXTX = 7
+};
+
+enum SystemHint {
+ NOP = 0,
+ YIELD = 1,
+ WFE = 2,
+ WFI = 3,
+ SEV = 4,
+ SEVL = 5
+};
+
+enum BarrierDomain {
+ OuterShareable = 0,
+ NonShareable = 1,
+ InnerShareable = 2,
+ FullSystem = 3
+};
+
+enum BarrierType {
+ BarrierOther = 0,
+ BarrierReads = 1,
+ BarrierWrites = 2,
+ BarrierAll = 3
+};
+
+// System/special register names.
+// This information is not encoded as one field but as the concatenation of
+// multiple fields (Op0<0>, Op1, Crn, Crm, Op2).
+enum SystemRegister {
+ NZCV = ((0x1 << SysO0_offset) |
+ (0x3 << SysOp1_offset) |
+ (0x4 << CRn_offset) |
+ (0x2 << CRm_offset) |
+ (0x0 << SysOp2_offset)) >> ImmSystemRegister_offset,
+ FPCR = ((0x1 << SysO0_offset) |
+ (0x3 << SysOp1_offset) |
+ (0x4 << CRn_offset) |
+ (0x4 << CRm_offset) |
+ (0x0 << SysOp2_offset)) >> ImmSystemRegister_offset
+};
+
+// Instruction enumerations.
+//
+// These are the masks that define a class of instructions, and the list of
+// instructions within each class. Each enumeration has a Fixed, FMask and
+// Mask value.
+//
+// Fixed: The fixed bits in this instruction class.
+// FMask: The mask used to extract the fixed bits in the class.
+// Mask: The mask used to identify the instructions within a class.
+//
+// The enumerations can be used like this:
+//
+// ASSERT(instr->Mask(PCRelAddressingFMask) == PCRelAddressingFixed);
+// switch(instr->Mask(PCRelAddressingMask)) {
+// case ADR: Format("adr 'Xd, 'AddrPCRelByte"); break;
+// case ADRP: Format("adrp 'Xd, 'AddrPCRelPage"); break;
+// default: printf("Unknown instruction\n");
+// }
+
+
+// Generic fields.
+enum GenericInstrField {
+ SixtyFourBits = 0x80000000,
+ ThirtyTwoBits = 0x00000000,
+ FP32 = 0x00000000,
+ FP64 = 0x00400000
+};
+
+// PC relative addressing.
+enum PCRelAddressingOp {
+ PCRelAddressingFixed = 0x10000000,
+ PCRelAddressingFMask = 0x1F000000,
+ PCRelAddressingMask = 0x9F000000,
+ ADR = PCRelAddressingFixed | 0x00000000,
+ ADRP = PCRelAddressingFixed | 0x80000000
+};
+
+// Add/sub (immediate, shifted and extended.)
+const int kSFOffset = 31;
+enum AddSubOp {
+ AddSubOpMask = 0x60000000,
+ AddSubSetFlagsBit = 0x20000000,
+ ADD = 0x00000000,
+ ADDS = ADD | AddSubSetFlagsBit,
+ SUB = 0x40000000,
+ SUBS = SUB | AddSubSetFlagsBit
+};
+
+#define ADD_SUB_OP_LIST(V) \
+ V(ADD), \
+ V(ADDS), \
+ V(SUB), \
+ V(SUBS)
+
+enum AddSubImmediateOp {
+ AddSubImmediateFixed = 0x11000000,
+ AddSubImmediateFMask = 0x1F000000,
+ AddSubImmediateMask = 0xFF000000,
+ #define ADD_SUB_IMMEDIATE(A) \
+ A##_w_imm = AddSubImmediateFixed | A, \
+ A##_x_imm = AddSubImmediateFixed | A | SixtyFourBits
+ ADD_SUB_OP_LIST(ADD_SUB_IMMEDIATE)
+ #undef ADD_SUB_IMMEDIATE
+};
+
+enum AddSubShiftedOp {
+ AddSubShiftedFixed = 0x0B000000,
+ AddSubShiftedFMask = 0x1F200000,
+ AddSubShiftedMask = 0xFF200000,
+ #define ADD_SUB_SHIFTED(A) \
+ A##_w_shift = AddSubShiftedFixed | A, \
+ A##_x_shift = AddSubShiftedFixed | A | SixtyFourBits
+ ADD_SUB_OP_LIST(ADD_SUB_SHIFTED)
+ #undef ADD_SUB_SHIFTED
+};
+
+enum AddSubExtendedOp {
+ AddSubExtendedFixed = 0x0B200000,
+ AddSubExtendedFMask = 0x1F200000,
+ AddSubExtendedMask = 0xFFE00000,
+ #define ADD_SUB_EXTENDED(A) \
+ A##_w_ext = AddSubExtendedFixed | A, \
+ A##_x_ext = AddSubExtendedFixed | A | SixtyFourBits
+ ADD_SUB_OP_LIST(ADD_SUB_EXTENDED)
+ #undef ADD_SUB_EXTENDED
+};
+
+// Add/sub with carry.
+enum AddSubWithCarryOp {
+ AddSubWithCarryFixed = 0x1A000000,
+ AddSubWithCarryFMask = 0x1FE00000,
+ AddSubWithCarryMask = 0xFFE0FC00,
+ ADC_w = AddSubWithCarryFixed | ADD,
+ ADC_x = AddSubWithCarryFixed | ADD | SixtyFourBits,
+ ADC = ADC_w,
+ ADCS_w = AddSubWithCarryFixed | ADDS,
+ ADCS_x = AddSubWithCarryFixed | ADDS | SixtyFourBits,
+ SBC_w = AddSubWithCarryFixed | SUB,
+ SBC_x = AddSubWithCarryFixed | SUB | SixtyFourBits,
+ SBC = SBC_w,
+ SBCS_w = AddSubWithCarryFixed | SUBS,
+ SBCS_x = AddSubWithCarryFixed | SUBS | SixtyFourBits
+};
+
+
+// Logical (immediate and shifted register).
+enum LogicalOp {
+ LogicalOpMask = 0x60200000,
+ NOT = 0x00200000,
+ AND = 0x00000000,
+ BIC = AND | NOT,
+ ORR = 0x20000000,
+ ORN = ORR | NOT,
+ EOR = 0x40000000,
+ EON = EOR | NOT,
+ ANDS = 0x60000000,
+ BICS = ANDS | NOT
+};
+
+// Logical immediate.
+enum LogicalImmediateOp {
+ LogicalImmediateFixed = 0x12000000,
+ LogicalImmediateFMask = 0x1F800000,
+ LogicalImmediateMask = 0xFF800000,
+ AND_w_imm = LogicalImmediateFixed | AND,
+ AND_x_imm = LogicalImmediateFixed | AND | SixtyFourBits,
+ ORR_w_imm = LogicalImmediateFixed | ORR,
+ ORR_x_imm = LogicalImmediateFixed | ORR | SixtyFourBits,
+ EOR_w_imm = LogicalImmediateFixed | EOR,
+ EOR_x_imm = LogicalImmediateFixed | EOR | SixtyFourBits,
+ ANDS_w_imm = LogicalImmediateFixed | ANDS,
+ ANDS_x_imm = LogicalImmediateFixed | ANDS | SixtyFourBits
+};
+
+// Logical shifted register.
+enum LogicalShiftedOp {
+ LogicalShiftedFixed = 0x0A000000,
+ LogicalShiftedFMask = 0x1F000000,
+ LogicalShiftedMask = 0xFF200000,
+ AND_w = LogicalShiftedFixed | AND,
+ AND_x = LogicalShiftedFixed | AND | SixtyFourBits,
+ AND_shift = AND_w,
+ BIC_w = LogicalShiftedFixed | BIC,
+ BIC_x = LogicalShiftedFixed | BIC | SixtyFourBits,
+ BIC_shift = BIC_w,
+ ORR_w = LogicalShiftedFixed | ORR,
+ ORR_x = LogicalShiftedFixed | ORR | SixtyFourBits,
+ ORR_shift = ORR_w,
+ ORN_w = LogicalShiftedFixed | ORN,
+ ORN_x = LogicalShiftedFixed | ORN | SixtyFourBits,
+ ORN_shift = ORN_w,
+ EOR_w = LogicalShiftedFixed | EOR,
+ EOR_x = LogicalShiftedFixed | EOR | SixtyFourBits,
+ EOR_shift = EOR_w,
+ EON_w = LogicalShiftedFixed | EON,
+ EON_x = LogicalShiftedFixed | EON | SixtyFourBits,
+ EON_shift = EON_w,
+ ANDS_w = LogicalShiftedFixed | ANDS,
+ ANDS_x = LogicalShiftedFixed | ANDS | SixtyFourBits,
+ ANDS_shift = ANDS_w,
+ BICS_w = LogicalShiftedFixed | BICS,
+ BICS_x = LogicalShiftedFixed | BICS | SixtyFourBits,
+ BICS_shift = BICS_w
+};
+
+// Move wide immediate.
+enum MoveWideImmediateOp {
+ MoveWideImmediateFixed = 0x12800000,
+ MoveWideImmediateFMask = 0x1F800000,
+ MoveWideImmediateMask = 0xFF800000,
+ MOVN = 0x00000000,
+ MOVZ = 0x40000000,
+ MOVK = 0x60000000,
+ MOVN_w = MoveWideImmediateFixed | MOVN,
+ MOVN_x = MoveWideImmediateFixed | MOVN | SixtyFourBits,
+ MOVZ_w = MoveWideImmediateFixed | MOVZ,
+ MOVZ_x = MoveWideImmediateFixed | MOVZ | SixtyFourBits,
+ MOVK_w = MoveWideImmediateFixed | MOVK,
+ MOVK_x = MoveWideImmediateFixed | MOVK | SixtyFourBits
+};
+
+// Bitfield.
+const int kBitfieldNOffset = 22;
+enum BitfieldOp {
+ BitfieldFixed = 0x13000000,
+ BitfieldFMask = 0x1F800000,
+ BitfieldMask = 0xFF800000,
+ SBFM_w = BitfieldFixed | 0x00000000,
+ SBFM_x = BitfieldFixed | 0x80000000,
+ SBFM = SBFM_w,
+ BFM_w = BitfieldFixed | 0x20000000,
+ BFM_x = BitfieldFixed | 0xA0000000,
+ BFM = BFM_w,
+ UBFM_w = BitfieldFixed | 0x40000000,
+ UBFM_x = BitfieldFixed | 0xC0000000,
+ UBFM = UBFM_w
+ // Bitfield N field.
+};
+
+// Extract.
+enum ExtractOp {
+ ExtractFixed = 0x13800000,
+ ExtractFMask = 0x1F800000,
+ ExtractMask = 0xFFA00000,
+ EXTR_w = ExtractFixed | 0x00000000,
+ EXTR_x = ExtractFixed | 0x80000000,
+ EXTR = EXTR_w
+};
+
+// Unconditional branch.
+enum UnconditionalBranchOp {
+ UnconditionalBranchFixed = 0x14000000,
+ UnconditionalBranchFMask = 0x7C000000,
+ UnconditionalBranchMask = 0xFC000000,
+ B = UnconditionalBranchFixed | 0x00000000,
+ BL = UnconditionalBranchFixed | 0x80000000
+};
+
+// Unconditional branch to register.
+enum UnconditionalBranchToRegisterOp {
+ UnconditionalBranchToRegisterFixed = 0xD6000000,
+ UnconditionalBranchToRegisterFMask = 0xFE000000,
+ UnconditionalBranchToRegisterMask = 0xFFFFFC1F,
+ BR = UnconditionalBranchToRegisterFixed | 0x001F0000,
+ BLR = UnconditionalBranchToRegisterFixed | 0x003F0000,
+ RET = UnconditionalBranchToRegisterFixed | 0x005F0000
+};
+
+// Compare and branch.
+enum CompareBranchOp {
+ CompareBranchFixed = 0x34000000,
+ CompareBranchFMask = 0x7E000000,
+ CompareBranchMask = 0xFF000000,
+ CBZ_w = CompareBranchFixed | 0x00000000,
+ CBZ_x = CompareBranchFixed | 0x80000000,
+ CBZ = CBZ_w,
+ CBNZ_w = CompareBranchFixed | 0x01000000,
+ CBNZ_x = CompareBranchFixed | 0x81000000,
+ CBNZ = CBNZ_w
+};
+
+// Test and branch.
+enum TestBranchOp {
+ TestBranchFixed = 0x36000000,
+ TestBranchFMask = 0x7E000000,
+ TestBranchMask = 0x7F000000,
+ TBZ = TestBranchFixed | 0x00000000,
+ TBNZ = TestBranchFixed | 0x01000000
+};
+
+// Conditional branch.
+enum ConditionalBranchOp {
+ ConditionalBranchFixed = 0x54000000,
+ ConditionalBranchFMask = 0xFE000000,
+ ConditionalBranchMask = 0xFF000010,
+ B_cond = ConditionalBranchFixed | 0x00000000
+};
+
+// System.
+// System instruction encoding is complicated because some instructions use op
+// and CR fields to encode parameters. To handle this cleanly, the system
+// instructions are split into more than one enum.
+
+enum SystemOp {
+ SystemFixed = 0xD5000000,
+ SystemFMask = 0xFFC00000
+};
+
+enum SystemSysRegOp {
+ SystemSysRegFixed = 0xD5100000,
+ SystemSysRegFMask = 0xFFD00000,
+ SystemSysRegMask = 0xFFF00000,
+ MRS = SystemSysRegFixed | 0x00200000,
+ MSR = SystemSysRegFixed | 0x00000000
+};
+
+enum SystemHintOp {
+ SystemHintFixed = 0xD503201F,
+ SystemHintFMask = 0xFFFFF01F,
+ SystemHintMask = 0xFFFFF01F,
+ HINT = SystemHintFixed | 0x00000000
+};
+
+// Exception.
+enum ExceptionOp {
+ ExceptionFixed = 0xD4000000,
+ ExceptionFMask = 0xFF000000,
+ ExceptionMask = 0xFFE0001F,
+ HLT = ExceptionFixed | 0x00400000,
+ BRK = ExceptionFixed | 0x00200000,
+ SVC = ExceptionFixed | 0x00000001,
+ HVC = ExceptionFixed | 0x00000002,
+ SMC = ExceptionFixed | 0x00000003,
+ DCPS1 = ExceptionFixed | 0x00A00001,
+ DCPS2 = ExceptionFixed | 0x00A00002,
+ DCPS3 = ExceptionFixed | 0x00A00003
+};
+// Code used to spot hlt instructions that should not be hit.
+const int kHltBadCode = 0xbad;
+
+enum MemBarrierOp {
+ MemBarrierFixed = 0xD503309F,
+ MemBarrierFMask = 0xFFFFF09F,
+ MemBarrierMask = 0xFFFFF0FF,
+ DSB = MemBarrierFixed | 0x00000000,
+ DMB = MemBarrierFixed | 0x00000020,
+ ISB = MemBarrierFixed | 0x00000040
+};
+
+// Any load or store (including pair).
+enum LoadStoreAnyOp {
+ LoadStoreAnyFMask = 0x0a000000,
+ LoadStoreAnyFixed = 0x08000000
+};
+
+// Any load pair or store pair.
+enum LoadStorePairAnyOp {
+ LoadStorePairAnyFMask = 0x3a000000,
+ LoadStorePairAnyFixed = 0x28000000
+};
+
+#define LOAD_STORE_PAIR_OP_LIST(V) \
+ V(STP, w, 0x00000000), \
+ V(LDP, w, 0x00400000), \
+ V(LDPSW, x, 0x40400000), \
+ V(STP, x, 0x80000000), \
+ V(LDP, x, 0x80400000), \
+ V(STP, s, 0x04000000), \
+ V(LDP, s, 0x04400000), \
+ V(STP, d, 0x44000000), \
+ V(LDP, d, 0x44400000)
+
+// Load/store pair (post, pre and offset.)
+enum LoadStorePairOp {
+ LoadStorePairMask = 0xC4400000,
+ LoadStorePairLBit = 1 << 22,
+ #define LOAD_STORE_PAIR(A, B, C) \
+ A##_##B = C
+ LOAD_STORE_PAIR_OP_LIST(LOAD_STORE_PAIR)
+ #undef LOAD_STORE_PAIR
+};
+
+enum LoadStorePairPostIndexOp {
+ LoadStorePairPostIndexFixed = 0x28800000,
+ LoadStorePairPostIndexFMask = 0x3B800000,
+ LoadStorePairPostIndexMask = 0xFFC00000,
+ #define LOAD_STORE_PAIR_POST_INDEX(A, B, C) \
+ A##_##B##_post = LoadStorePairPostIndexFixed | A##_##B
+ LOAD_STORE_PAIR_OP_LIST(LOAD_STORE_PAIR_POST_INDEX)
+ #undef LOAD_STORE_PAIR_POST_INDEX
+};
+
+enum LoadStorePairPreIndexOp {
+ LoadStorePairPreIndexFixed = 0x29800000,
+ LoadStorePairPreIndexFMask = 0x3B800000,
+ LoadStorePairPreIndexMask = 0xFFC00000,
+ #define LOAD_STORE_PAIR_PRE_INDEX(A, B, C) \
+ A##_##B##_pre = LoadStorePairPreIndexFixed | A##_##B
+ LOAD_STORE_PAIR_OP_LIST(LOAD_STORE_PAIR_PRE_INDEX)
+ #undef LOAD_STORE_PAIR_PRE_INDEX
+};
+
+enum LoadStorePairOffsetOp {
+ LoadStorePairOffsetFixed = 0x29000000,
+ LoadStorePairOffsetFMask = 0x3B800000,
+ LoadStorePairOffsetMask = 0xFFC00000,
+ #define LOAD_STORE_PAIR_OFFSET(A, B, C) \
+ A##_##B##_off = LoadStorePairOffsetFixed | A##_##B
+ LOAD_STORE_PAIR_OP_LIST(LOAD_STORE_PAIR_OFFSET)
+ #undef LOAD_STORE_PAIR_OFFSET
+};
+
+enum LoadStorePairNonTemporalOp {
+ LoadStorePairNonTemporalFixed = 0x28000000,
+ LoadStorePairNonTemporalFMask = 0x3B800000,
+ LoadStorePairNonTemporalMask = 0xFFC00000,
+ STNP_w = LoadStorePairNonTemporalFixed | STP_w,
+ LDNP_w = LoadStorePairNonTemporalFixed | LDP_w,
+ STNP_x = LoadStorePairNonTemporalFixed | STP_x,
+ LDNP_x = LoadStorePairNonTemporalFixed | LDP_x,
+ STNP_s = LoadStorePairNonTemporalFixed | STP_s,
+ LDNP_s = LoadStorePairNonTemporalFixed | LDP_s,
+ STNP_d = LoadStorePairNonTemporalFixed | STP_d,
+ LDNP_d = LoadStorePairNonTemporalFixed | LDP_d
+};
+
+// Load literal.
+enum LoadLiteralOp {
+ LoadLiteralFixed = 0x18000000,
+ LoadLiteralFMask = 0x3B000000,
+ LoadLiteralMask = 0xFF000000,
+ LDR_w_lit = LoadLiteralFixed | 0x00000000,
+ LDR_x_lit = LoadLiteralFixed | 0x40000000,
+ LDRSW_x_lit = LoadLiteralFixed | 0x80000000,
+ PRFM_lit = LoadLiteralFixed | 0xC0000000,
+ LDR_s_lit = LoadLiteralFixed | 0x04000000,
+ LDR_d_lit = LoadLiteralFixed | 0x44000000
+};
+
+#define LOAD_STORE_OP_LIST(V) \
+ V(ST, RB, w, 0x00000000), \
+ V(ST, RH, w, 0x40000000), \
+ V(ST, R, w, 0x80000000), \
+ V(ST, R, x, 0xC0000000), \
+ V(LD, RB, w, 0x00400000), \
+ V(LD, RH, w, 0x40400000), \
+ V(LD, R, w, 0x80400000), \
+ V(LD, R, x, 0xC0400000), \
+ V(LD, RSB, x, 0x00800000), \
+ V(LD, RSH, x, 0x40800000), \
+ V(LD, RSW, x, 0x80800000), \
+ V(LD, RSB, w, 0x00C00000), \
+ V(LD, RSH, w, 0x40C00000), \
+ V(ST, R, s, 0x84000000), \
+ V(ST, R, d, 0xC4000000), \
+ V(LD, R, s, 0x84400000), \
+ V(LD, R, d, 0xC4400000)
+
+
+// Load/store unscaled offset.
+enum LoadStoreUnscaledOffsetOp {
+ LoadStoreUnscaledOffsetFixed = 0x38000000,
+ LoadStoreUnscaledOffsetFMask = 0x3B200C00,
+ LoadStoreUnscaledOffsetMask = 0xFFE00C00,
+ #define LOAD_STORE_UNSCALED(A, B, C, D) \
+ A##U##B##_##C = LoadStoreUnscaledOffsetFixed | D
+ LOAD_STORE_OP_LIST(LOAD_STORE_UNSCALED)
+ #undef LOAD_STORE_UNSCALED
+};
+
+// Load/store (post, pre, offset and unsigned.)
+enum LoadStoreOp {
+ LoadStoreOpMask = 0xC4C00000,
+ #define LOAD_STORE(A, B, C, D) \
+ A##B##_##C = D
+ LOAD_STORE_OP_LIST(LOAD_STORE),
+ #undef LOAD_STORE
+ PRFM = 0xC0800000
+};
+
+// Load/store post index.
+enum LoadStorePostIndex {
+ LoadStorePostIndexFixed = 0x38000400,
+ LoadStorePostIndexFMask = 0x3B200C00,
+ LoadStorePostIndexMask = 0xFFE00C00,
+ #define LOAD_STORE_POST_INDEX(A, B, C, D) \
+ A##B##_##C##_post = LoadStorePostIndexFixed | D
+ LOAD_STORE_OP_LIST(LOAD_STORE_POST_INDEX)
+ #undef LOAD_STORE_POST_INDEX
+};
+
+// Load/store pre index.
+enum LoadStorePreIndex {
+ LoadStorePreIndexFixed = 0x38000C00,
+ LoadStorePreIndexFMask = 0x3B200C00,
+ LoadStorePreIndexMask = 0xFFE00C00,
+ #define LOAD_STORE_PRE_INDEX(A, B, C, D) \
+ A##B##_##C##_pre = LoadStorePreIndexFixed | D
+ LOAD_STORE_OP_LIST(LOAD_STORE_PRE_INDEX)
+ #undef LOAD_STORE_PRE_INDEX
+};
+
+// Load/store unsigned offset.
+enum LoadStoreUnsignedOffset {
+ LoadStoreUnsignedOffsetFixed = 0x39000000,
+ LoadStoreUnsignedOffsetFMask = 0x3B000000,
+ LoadStoreUnsignedOffsetMask = 0xFFC00000,
+ PRFM_unsigned = LoadStoreUnsignedOffsetFixed | PRFM,
+ #define LOAD_STORE_UNSIGNED_OFFSET(A, B, C, D) \
+ A##B##_##C##_unsigned = LoadStoreUnsignedOffsetFixed | D
+ LOAD_STORE_OP_LIST(LOAD_STORE_UNSIGNED_OFFSET)
+ #undef LOAD_STORE_UNSIGNED_OFFSET
+};
+
+// Load/store register offset.
+enum LoadStoreRegisterOffset {
+ LoadStoreRegisterOffsetFixed = 0x38200800,
+ LoadStoreRegisterOffsetFMask = 0x3B200C00,
+ LoadStoreRegisterOffsetMask = 0xFFE00C00,
+ PRFM_reg = LoadStoreRegisterOffsetFixed | PRFM,
+ #define LOAD_STORE_REGISTER_OFFSET(A, B, C, D) \
+ A##B##_##C##_reg = LoadStoreRegisterOffsetFixed | D
+ LOAD_STORE_OP_LIST(LOAD_STORE_REGISTER_OFFSET)
+ #undef LOAD_STORE_REGISTER_OFFSET
+};
+
+// Conditional compare.
+enum ConditionalCompareOp {
+ ConditionalCompareMask = 0x60000000,
+ CCMN = 0x20000000,
+ CCMP = 0x60000000
+};
+
+// Conditional compare register.
+enum ConditionalCompareRegisterOp {
+ ConditionalCompareRegisterFixed = 0x1A400000,
+ ConditionalCompareRegisterFMask = 0x1FE00800,
+ ConditionalCompareRegisterMask = 0xFFE00C10,
+ CCMN_w = ConditionalCompareRegisterFixed | CCMN,
+ CCMN_x = ConditionalCompareRegisterFixed | SixtyFourBits | CCMN,
+ CCMP_w = ConditionalCompareRegisterFixed | CCMP,
+ CCMP_x = ConditionalCompareRegisterFixed | SixtyFourBits | CCMP
+};
+
+// Conditional compare immediate.
+enum ConditionalCompareImmediateOp {
+ ConditionalCompareImmediateFixed = 0x1A400800,
+ ConditionalCompareImmediateFMask = 0x1FE00800,
+ ConditionalCompareImmediateMask = 0xFFE00C10,
+ CCMN_w_imm = ConditionalCompareImmediateFixed | CCMN,
+ CCMN_x_imm = ConditionalCompareImmediateFixed | SixtyFourBits | CCMN,
+ CCMP_w_imm = ConditionalCompareImmediateFixed | CCMP,
+ CCMP_x_imm = ConditionalCompareImmediateFixed | SixtyFourBits | CCMP
+};
+
+// Conditional select.
+enum ConditionalSelectOp {
+ ConditionalSelectFixed = 0x1A800000,
+ ConditionalSelectFMask = 0x1FE00000,
+ ConditionalSelectMask = 0xFFE00C00,
+ CSEL_w = ConditionalSelectFixed | 0x00000000,
+ CSEL_x = ConditionalSelectFixed | 0x80000000,
+ CSEL = CSEL_w,
+ CSINC_w = ConditionalSelectFixed | 0x00000400,
+ CSINC_x = ConditionalSelectFixed | 0x80000400,
+ CSINC = CSINC_w,
+ CSINV_w = ConditionalSelectFixed | 0x40000000,
+ CSINV_x = ConditionalSelectFixed | 0xC0000000,
+ CSINV = CSINV_w,
+ CSNEG_w = ConditionalSelectFixed | 0x40000400,
+ CSNEG_x = ConditionalSelectFixed | 0xC0000400,
+ CSNEG = CSNEG_w
+};
+
+// Data processing 1 source.
+enum DataProcessing1SourceOp {
+ DataProcessing1SourceFixed = 0x5AC00000,
+ DataProcessing1SourceFMask = 0x5FE00000,
+ DataProcessing1SourceMask = 0xFFFFFC00,
+ RBIT = DataProcessing1SourceFixed | 0x00000000,
+ RBIT_w = RBIT,
+ RBIT_x = RBIT | SixtyFourBits,
+ REV16 = DataProcessing1SourceFixed | 0x00000400,
+ REV16_w = REV16,
+ REV16_x = REV16 | SixtyFourBits,
+ REV = DataProcessing1SourceFixed | 0x00000800,
+ REV_w = REV,
+ REV32_x = REV | SixtyFourBits,
+ REV_x = DataProcessing1SourceFixed | SixtyFourBits | 0x00000C00,
+ CLZ = DataProcessing1SourceFixed | 0x00001000,
+ CLZ_w = CLZ,
+ CLZ_x = CLZ | SixtyFourBits,
+ CLS = DataProcessing1SourceFixed | 0x00001400,
+ CLS_w = CLS,
+ CLS_x = CLS | SixtyFourBits
+};
+
+// Data processing 2 source.
+enum DataProcessing2SourceOp {
+ DataProcessing2SourceFixed = 0x1AC00000,
+ DataProcessing2SourceFMask = 0x5FE00000,
+ DataProcessing2SourceMask = 0xFFE0FC00,
+ UDIV_w = DataProcessing2SourceFixed | 0x00000800,
+ UDIV_x = DataProcessing2SourceFixed | 0x80000800,
+ UDIV = UDIV_w,
+ SDIV_w = DataProcessing2SourceFixed | 0x00000C00,
+ SDIV_x = DataProcessing2SourceFixed | 0x80000C00,
+ SDIV = SDIV_w,
+ LSLV_w = DataProcessing2SourceFixed | 0x00002000,
+ LSLV_x = DataProcessing2SourceFixed | 0x80002000,
+ LSLV = LSLV_w,
+ LSRV_w = DataProcessing2SourceFixed | 0x00002400,
+ LSRV_x = DataProcessing2SourceFixed | 0x80002400,
+ LSRV = LSRV_w,
+ ASRV_w = DataProcessing2SourceFixed | 0x00002800,
+ ASRV_x = DataProcessing2SourceFixed | 0x80002800,
+ ASRV = ASRV_w,
+ RORV_w = DataProcessing2SourceFixed | 0x00002C00,
+ RORV_x = DataProcessing2SourceFixed | 0x80002C00,
+ RORV = RORV_w,
+ CRC32B = DataProcessing2SourceFixed | 0x00004000,
+ CRC32H = DataProcessing2SourceFixed | 0x00004400,
+ CRC32W = DataProcessing2SourceFixed | 0x00004800,
+ CRC32X = DataProcessing2SourceFixed | SixtyFourBits | 0x00004C00,
+ CRC32CB = DataProcessing2SourceFixed | 0x00005000,
+ CRC32CH = DataProcessing2SourceFixed | 0x00005400,
+ CRC32CW = DataProcessing2SourceFixed | 0x00005800,
+ CRC32CX = DataProcessing2SourceFixed | SixtyFourBits | 0x00005C00
+};
+
+// Data processing 3 source.
+enum DataProcessing3SourceOp {
+ DataProcessing3SourceFixed = 0x1B000000,
+ DataProcessing3SourceFMask = 0x1F000000,
+ DataProcessing3SourceMask = 0xFFE08000,
+ MADD_w = DataProcessing3SourceFixed | 0x00000000,
+ MADD_x = DataProcessing3SourceFixed | 0x80000000,
+ MADD = MADD_w,
+ MSUB_w = DataProcessing3SourceFixed | 0x00008000,
+ MSUB_x = DataProcessing3SourceFixed | 0x80008000,
+ MSUB = MSUB_w,
+ SMADDL_x = DataProcessing3SourceFixed | 0x80200000,
+ SMSUBL_x = DataProcessing3SourceFixed | 0x80208000,
+ SMULH_x = DataProcessing3SourceFixed | 0x80400000,
+ UMADDL_x = DataProcessing3SourceFixed | 0x80A00000,
+ UMSUBL_x = DataProcessing3SourceFixed | 0x80A08000,
+ UMULH_x = DataProcessing3SourceFixed | 0x80C00000
+};
+
+// Floating point compare.
+enum FPCompareOp {
+ FPCompareFixed = 0x1E202000,
+ FPCompareFMask = 0x5F203C00,
+ FPCompareMask = 0xFFE0FC1F,
+ FCMP_s = FPCompareFixed | 0x00000000,
+ FCMP_d = FPCompareFixed | FP64 | 0x00000000,
+ FCMP = FCMP_s,
+ FCMP_s_zero = FPCompareFixed | 0x00000008,
+ FCMP_d_zero = FPCompareFixed | FP64 | 0x00000008,
+ FCMP_zero = FCMP_s_zero,
+ FCMPE_s = FPCompareFixed | 0x00000010,
+ FCMPE_d = FPCompareFixed | FP64 | 0x00000010,
+ FCMPE_s_zero = FPCompareFixed | 0x00000018,
+ FCMPE_d_zero = FPCompareFixed | FP64 | 0x00000018
+};
+
+// Floating point conditional compare.
+enum FPConditionalCompareOp {
+ FPConditionalCompareFixed = 0x1E200400,
+ FPConditionalCompareFMask = 0x5F200C00,
+ FPConditionalCompareMask = 0xFFE00C10,
+ FCCMP_s = FPConditionalCompareFixed | 0x00000000,
+ FCCMP_d = FPConditionalCompareFixed | FP64 | 0x00000000,
+ FCCMP = FCCMP_s,
+ FCCMPE_s = FPConditionalCompareFixed | 0x00000010,
+ FCCMPE_d = FPConditionalCompareFixed | FP64 | 0x00000010,
+ FCCMPE = FCCMPE_s
+};
+
+// Floating point conditional select.
+enum FPConditionalSelectOp {
+ FPConditionalSelectFixed = 0x1E200C00,
+ FPConditionalSelectFMask = 0x5F200C00,
+ FPConditionalSelectMask = 0xFFE00C00,
+ FCSEL_s = FPConditionalSelectFixed | 0x00000000,
+ FCSEL_d = FPConditionalSelectFixed | FP64 | 0x00000000,
+ FCSEL = FCSEL_s
+};
+
+// Floating point immediate.
+enum FPImmediateOp {
+ FPImmediateFixed = 0x1E201000,
+ FPImmediateFMask = 0x5F201C00,
+ FPImmediateMask = 0xFFE01C00,
+ FMOV_s_imm = FPImmediateFixed | 0x00000000,
+ FMOV_d_imm = FPImmediateFixed | FP64 | 0x00000000
+};
+
+// Floating point data processing 1 source.
+enum FPDataProcessing1SourceOp {
+ FPDataProcessing1SourceFixed = 0x1E204000,
+ FPDataProcessing1SourceFMask = 0x5F207C00,
+ FPDataProcessing1SourceMask = 0xFFFFFC00,
+ FMOV_s = FPDataProcessing1SourceFixed | 0x00000000,
+ FMOV_d = FPDataProcessing1SourceFixed | FP64 | 0x00000000,
+ FMOV = FMOV_s,
+ FABS_s = FPDataProcessing1SourceFixed | 0x00008000,
+ FABS_d = FPDataProcessing1SourceFixed | FP64 | 0x00008000,
+ FABS = FABS_s,
+ FNEG_s = FPDataProcessing1SourceFixed | 0x00010000,
+ FNEG_d = FPDataProcessing1SourceFixed | FP64 | 0x00010000,
+ FNEG = FNEG_s,
+ FSQRT_s = FPDataProcessing1SourceFixed | 0x00018000,
+ FSQRT_d = FPDataProcessing1SourceFixed | FP64 | 0x00018000,
+ FSQRT = FSQRT_s,
+ FCVT_ds = FPDataProcessing1SourceFixed | 0x00028000,
+ FCVT_sd = FPDataProcessing1SourceFixed | FP64 | 0x00020000,
+ FRINTN_s = FPDataProcessing1SourceFixed | 0x00040000,
+ FRINTN_d = FPDataProcessing1SourceFixed | FP64 | 0x00040000,
+ FRINTN = FRINTN_s,
+ FRINTP_s = FPDataProcessing1SourceFixed | 0x00048000,
+ FRINTP_d = FPDataProcessing1SourceFixed | FP64 | 0x00048000,
+ FRINTP = FRINTP_s,
+ FRINTM_s = FPDataProcessing1SourceFixed | 0x00050000,
+ FRINTM_d = FPDataProcessing1SourceFixed | FP64 | 0x00050000,
+ FRINTM = FRINTM_s,
+ FRINTZ_s = FPDataProcessing1SourceFixed | 0x00058000,
+ FRINTZ_d = FPDataProcessing1SourceFixed | FP64 | 0x00058000,
+ FRINTZ = FRINTZ_s,
+ FRINTA_s = FPDataProcessing1SourceFixed | 0x00060000,
+ FRINTA_d = FPDataProcessing1SourceFixed | FP64 | 0x00060000,
+ FRINTA = FRINTA_s,
+ FRINTX_s = FPDataProcessing1SourceFixed | 0x00070000,
+ FRINTX_d = FPDataProcessing1SourceFixed | FP64 | 0x00070000,
+ FRINTX = FRINTX_s,
+ FRINTI_s = FPDataProcessing1SourceFixed | 0x00078000,
+ FRINTI_d = FPDataProcessing1SourceFixed | FP64 | 0x00078000,
+ FRINTI = FRINTI_s
+};
+
+// Floating point data processing 2 source.
+enum FPDataProcessing2SourceOp {
+ FPDataProcessing2SourceFixed = 0x1E200800,
+ FPDataProcessing2SourceFMask = 0x5F200C00,
+ FPDataProcessing2SourceMask = 0xFFE0FC00,
+ FMUL = FPDataProcessing2SourceFixed | 0x00000000,
+ FMUL_s = FMUL,
+ FMUL_d = FMUL | FP64,
+ FDIV = FPDataProcessing2SourceFixed | 0x00001000,
+ FDIV_s = FDIV,
+ FDIV_d = FDIV | FP64,
+ FADD = FPDataProcessing2SourceFixed | 0x00002000,
+ FADD_s = FADD,
+ FADD_d = FADD | FP64,
+ FSUB = FPDataProcessing2SourceFixed | 0x00003000,
+ FSUB_s = FSUB,
+ FSUB_d = FSUB | FP64,
+ FMAX = FPDataProcessing2SourceFixed | 0x00004000,
+ FMAX_s = FMAX,
+ FMAX_d = FMAX | FP64,
+ FMIN = FPDataProcessing2SourceFixed | 0x00005000,
+ FMIN_s = FMIN,
+ FMIN_d = FMIN | FP64,
+ FMAXNM = FPDataProcessing2SourceFixed | 0x00006000,
+ FMAXNM_s = FMAXNM,
+ FMAXNM_d = FMAXNM | FP64,
+ FMINNM = FPDataProcessing2SourceFixed | 0x00007000,
+ FMINNM_s = FMINNM,
+ FMINNM_d = FMINNM | FP64,
+ FNMUL = FPDataProcessing2SourceFixed | 0x00008000,
+ FNMUL_s = FNMUL,
+ FNMUL_d = FNMUL | FP64
+};
+
+// Floating point data processing 3 source.
+enum FPDataProcessing3SourceOp {
+ FPDataProcessing3SourceFixed = 0x1F000000,
+ FPDataProcessing3SourceFMask = 0x5F000000,
+ FPDataProcessing3SourceMask = 0xFFE08000,
+ FMADD_s = FPDataProcessing3SourceFixed | 0x00000000,
+ FMSUB_s = FPDataProcessing3SourceFixed | 0x00008000,
+ FNMADD_s = FPDataProcessing3SourceFixed | 0x00200000,
+ FNMSUB_s = FPDataProcessing3SourceFixed | 0x00208000,
+ FMADD_d = FPDataProcessing3SourceFixed | 0x00400000,
+ FMSUB_d = FPDataProcessing3SourceFixed | 0x00408000,
+ FNMADD_d = FPDataProcessing3SourceFixed | 0x00600000,
+ FNMSUB_d = FPDataProcessing3SourceFixed | 0x00608000
+};
+
+// Conversion between floating point and integer.
+enum FPIntegerConvertOp {
+ FPIntegerConvertFixed = 0x1E200000,
+ FPIntegerConvertFMask = 0x5F20FC00,
+ FPIntegerConvertMask = 0xFFFFFC00,
+ FCVTNS = FPIntegerConvertFixed | 0x00000000,
+ FCVTNS_ws = FCVTNS,
+ FCVTNS_xs = FCVTNS | SixtyFourBits,
+ FCVTNS_wd = FCVTNS | FP64,
+ FCVTNS_xd = FCVTNS | SixtyFourBits | FP64,
+ FCVTNU = FPIntegerConvertFixed | 0x00010000,
+ FCVTNU_ws = FCVTNU,
+ FCVTNU_xs = FCVTNU | SixtyFourBits,
+ FCVTNU_wd = FCVTNU | FP64,
+ FCVTNU_xd = FCVTNU | SixtyFourBits | FP64,
+ FCVTPS = FPIntegerConvertFixed | 0x00080000,
+ FCVTPS_ws = FCVTPS,
+ FCVTPS_xs = FCVTPS | SixtyFourBits,
+ FCVTPS_wd = FCVTPS | FP64,
+ FCVTPS_xd = FCVTPS | SixtyFourBits | FP64,
+ FCVTPU = FPIntegerConvertFixed | 0x00090000,
+ FCVTPU_ws = FCVTPU,
+ FCVTPU_xs = FCVTPU | SixtyFourBits,
+ FCVTPU_wd = FCVTPU | FP64,
+ FCVTPU_xd = FCVTPU | SixtyFourBits | FP64,
+ FCVTMS = FPIntegerConvertFixed | 0x00100000,
+ FCVTMS_ws = FCVTMS,
+ FCVTMS_xs = FCVTMS | SixtyFourBits,
+ FCVTMS_wd = FCVTMS | FP64,
+ FCVTMS_xd = FCVTMS | SixtyFourBits | FP64,
+ FCVTMU = FPIntegerConvertFixed | 0x00110000,
+ FCVTMU_ws = FCVTMU,
+ FCVTMU_xs = FCVTMU | SixtyFourBits,
+ FCVTMU_wd = FCVTMU | FP64,
+ FCVTMU_xd = FCVTMU | SixtyFourBits | FP64,
+ FCVTZS = FPIntegerConvertFixed | 0x00180000,
+ FCVTZS_ws = FCVTZS,
+ FCVTZS_xs = FCVTZS | SixtyFourBits,
+ FCVTZS_wd = FCVTZS | FP64,
+ FCVTZS_xd = FCVTZS | SixtyFourBits | FP64,
+ FCVTZU = FPIntegerConvertFixed | 0x00190000,
+ FCVTZU_ws = FCVTZU,
+ FCVTZU_xs = FCVTZU | SixtyFourBits,
+ FCVTZU_wd = FCVTZU | FP64,
+ FCVTZU_xd = FCVTZU | SixtyFourBits | FP64,
+ SCVTF = FPIntegerConvertFixed | 0x00020000,
+ SCVTF_sw = SCVTF,
+ SCVTF_sx = SCVTF | SixtyFourBits,
+ SCVTF_dw = SCVTF | FP64,
+ SCVTF_dx = SCVTF | SixtyFourBits | FP64,
+ UCVTF = FPIntegerConvertFixed | 0x00030000,
+ UCVTF_sw = UCVTF,
+ UCVTF_sx = UCVTF | SixtyFourBits,
+ UCVTF_dw = UCVTF | FP64,
+ UCVTF_dx = UCVTF | SixtyFourBits | FP64,
+ FCVTAS = FPIntegerConvertFixed | 0x00040000,
+ FCVTAS_ws = FCVTAS,
+ FCVTAS_xs = FCVTAS | SixtyFourBits,
+ FCVTAS_wd = FCVTAS | FP64,
+ FCVTAS_xd = FCVTAS | SixtyFourBits | FP64,
+ FCVTAU = FPIntegerConvertFixed | 0x00050000,
+ FCVTAU_ws = FCVTAU,
+ FCVTAU_xs = FCVTAU | SixtyFourBits,
+ FCVTAU_wd = FCVTAU | FP64,
+ FCVTAU_xd = FCVTAU | SixtyFourBits | FP64,
+ FMOV_ws = FPIntegerConvertFixed | 0x00060000,
+ FMOV_sw = FPIntegerConvertFixed | 0x00070000,
+ FMOV_xd = FMOV_ws | SixtyFourBits | FP64,
+ FMOV_dx = FMOV_sw | SixtyFourBits | FP64
+};
+
+// Conversion between fixed point and floating point.
+enum FPFixedPointConvertOp {
+ FPFixedPointConvertFixed = 0x1E000000,
+ FPFixedPointConvertFMask = 0x5F200000,
+ FPFixedPointConvertMask = 0xFFFF0000,
+ FCVTZS_fixed = FPFixedPointConvertFixed | 0x00180000,
+ FCVTZS_ws_fixed = FCVTZS_fixed,
+ FCVTZS_xs_fixed = FCVTZS_fixed | SixtyFourBits,
+ FCVTZS_wd_fixed = FCVTZS_fixed | FP64,
+ FCVTZS_xd_fixed = FCVTZS_fixed | SixtyFourBits | FP64,
+ FCVTZU_fixed = FPFixedPointConvertFixed | 0x00190000,
+ FCVTZU_ws_fixed = FCVTZU_fixed,
+ FCVTZU_xs_fixed = FCVTZU_fixed | SixtyFourBits,
+ FCVTZU_wd_fixed = FCVTZU_fixed | FP64,
+ FCVTZU_xd_fixed = FCVTZU_fixed | SixtyFourBits | FP64,
+ SCVTF_fixed = FPFixedPointConvertFixed | 0x00020000,
+ SCVTF_sw_fixed = SCVTF_fixed,
+ SCVTF_sx_fixed = SCVTF_fixed | SixtyFourBits,
+ SCVTF_dw_fixed = SCVTF_fixed | FP64,
+ SCVTF_dx_fixed = SCVTF_fixed | SixtyFourBits | FP64,
+ UCVTF_fixed = FPFixedPointConvertFixed | 0x00030000,
+ UCVTF_sw_fixed = UCVTF_fixed,
+ UCVTF_sx_fixed = UCVTF_fixed | SixtyFourBits,
+ UCVTF_dw_fixed = UCVTF_fixed | FP64,
+ UCVTF_dx_fixed = UCVTF_fixed | SixtyFourBits | FP64
+};
+
+// Unimplemented and unallocated instructions. These are defined to make fixed
+// bit assertion easier.
+enum UnimplementedOp {
+ UnimplementedFixed = 0x00000000,
+ UnimplementedFMask = 0x00000000
+};
+
+enum UnallocatedOp {
+ UnallocatedFixed = 0x00000000,
+ UnallocatedFMask = 0x00000000
+};
+
+} } // namespace v8::internal
+
+#endif // V8_ARM64_CONSTANTS_ARM64_H_
diff --git a/chromium/v8/src/arm64/cpu-arm64.cc b/chromium/v8/src/arm64/cpu-arm64.cc
new file mode 100644
index 00000000000..4cfc4f04b62
--- /dev/null
+++ b/chromium/v8/src/arm64/cpu-arm64.cc
@@ -0,0 +1,123 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// CPU specific code for arm independent of OS goes here.
+
+#include "src/v8.h"
+
+#if V8_TARGET_ARCH_ARM64
+
+#include "src/cpu.h"
+#include "src/arm64/utils-arm64.h"
+
+namespace v8 {
+namespace internal {
+
+class CacheLineSizes {
+ public:
+ CacheLineSizes() {
+#ifdef USE_SIMULATOR
+ cache_type_register_ = 0;
+#else
+ // Copy the content of the cache type register to a core register.
+ __asm__ __volatile__ ("mrs %[ctr], ctr_el0" // NOLINT
+ : [ctr] "=r" (cache_type_register_));
+#endif
+ }
+
+ uint32_t icache_line_size() const { return ExtractCacheLineSize(0); }
+ uint32_t dcache_line_size() const { return ExtractCacheLineSize(16); }
+
+ private:
+ uint32_t ExtractCacheLineSize(int cache_line_size_shift) const {
+ // The cache type register holds the size of cache lines in words as a
+ // power of two.
+ return 4 << ((cache_type_register_ >> cache_line_size_shift) & 0xf);
+ }
+
+ uint32_t cache_type_register_;
+};
+
+
+void CPU::FlushICache(void* address, size_t length) {
+ if (length == 0) return;
+
+#ifdef USE_SIMULATOR
+ // TODO(all): consider doing some cache simulation to ensure every address
+ // run has been synced.
+ USE(address);
+ USE(length);
+#else
+ // The code below assumes user space cache operations are allowed. The goal
+ // of this routine is to make sure the code generated is visible to the I
+ // side of the CPU.
+
+ uintptr_t start = reinterpret_cast<uintptr_t>(address);
+ // Sizes will be used to generate a mask big enough to cover a pointer.
+ CacheLineSizes sizes;
+ uintptr_t dsize = sizes.dcache_line_size();
+ uintptr_t isize = sizes.icache_line_size();
+ // Cache line sizes are always a power of 2.
+ ASSERT(CountSetBits(dsize, 64) == 1);
+ ASSERT(CountSetBits(isize, 64) == 1);
+ uintptr_t dstart = start & ~(dsize - 1);
+ uintptr_t istart = start & ~(isize - 1);
+ uintptr_t end = start + length;
+
+ __asm__ __volatile__ ( // NOLINT
+ // Clean every line of the D cache containing the target data.
+ "0: \n\t"
+ // dc : Data Cache maintenance
+ // c : Clean
+ // va : by (Virtual) Address
+ // u : to the point of Unification
+ // The point of unification for a processor is the point by which the
+ // instruction and data caches are guaranteed to see the same copy of a
+ // memory location. See ARM DDI 0406B page B2-12 for more information.
+ "dc cvau, %[dline] \n\t"
+ "add %[dline], %[dline], %[dsize] \n\t"
+ "cmp %[dline], %[end] \n\t"
+ "b.lt 0b \n\t"
+ // Barrier to make sure the effect of the code above is visible to the rest
+ // of the world.
+ // dsb : Data Synchronisation Barrier
+ // ish : Inner SHareable domain
+ // The point of unification for an Inner Shareable shareability domain is
+ // the point by which the instruction and data caches of all the processors
+ // in that Inner Shareable shareability domain are guaranteed to see the
+ // same copy of a memory location. See ARM DDI 0406B page B2-12 for more
+ // information.
+ "dsb ish \n\t"
+ // Invalidate every line of the I cache containing the target data.
+ "1: \n\t"
+ // ic : instruction cache maintenance
+ // i : invalidate
+ // va : by address
+ // u : to the point of unification
+ "ic ivau, %[iline] \n\t"
+ "add %[iline], %[iline], %[isize] \n\t"
+ "cmp %[iline], %[end] \n\t"
+ "b.lt 1b \n\t"
+ // Barrier to make sure the effect of the code above is visible to the rest
+ // of the world.
+ "dsb ish \n\t"
+ // Barrier to ensure any prefetching which happened before this code is
+ // discarded.
+ // isb : Instruction Synchronisation Barrier
+ "isb \n\t"
+ : [dline] "+r" (dstart),
+ [iline] "+r" (istart)
+ : [dsize] "r" (dsize),
+ [isize] "r" (isize),
+ [end] "r" (end)
+ // This code does not write to memory but without the dependency gcc might
+ // move this code before the code is generated.
+ : "cc", "memory"
+ ); // NOLINT
+#endif
+}
+
+} } // namespace v8::internal
+
+#endif // V8_TARGET_ARCH_ARM64
diff --git a/chromium/v8/src/arm64/debug-arm64.cc b/chromium/v8/src/arm64/debug-arm64.cc
new file mode 100644
index 00000000000..556231602e6
--- /dev/null
+++ b/chromium/v8/src/arm64/debug-arm64.cc
@@ -0,0 +1,357 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#if V8_TARGET_ARCH_ARM64
+
+#include "src/codegen.h"
+#include "src/debug.h"
+
+namespace v8 {
+namespace internal {
+
+
+#define __ ACCESS_MASM(masm)
+
+bool BreakLocationIterator::IsDebugBreakAtReturn() {
+ return Debug::IsDebugBreakAtReturn(rinfo());
+}
+
+
+void BreakLocationIterator::SetDebugBreakAtReturn() {
+ // Patch the code emitted by FullCodeGenerator::EmitReturnSequence, changing
+ // the return from JS function sequence from
+ // mov sp, fp
+ // ldp fp, lr, [sp] #16
+ // lrd ip0, [pc, #(3 * kInstructionSize)]
+ // add sp, sp, ip0
+ // ret
+ // <number of paramters ...
+ // ... plus one (64 bits)>
+ // to a call to the debug break return code.
+ // ldr ip0, [pc, #(3 * kInstructionSize)]
+ // blr ip0
+ // hlt kHltBadCode @ code should not return, catch if it does.
+ // <debug break return code ...
+ // ... entry point address (64 bits)>
+
+ // The patching code must not overflow the space occupied by the return
+ // sequence.
+ STATIC_ASSERT(Assembler::kJSRetSequenceInstructions >= 5);
+ PatchingAssembler patcher(reinterpret_cast<Instruction*>(rinfo()->pc()), 5);
+ byte* entry =
+ debug_info_->GetIsolate()->builtins()->Return_DebugBreak()->entry();
+
+ // The first instruction of a patched return sequence must be a load literal
+ // loading the address of the debug break return code.
+ patcher.ldr_pcrel(ip0, (3 * kInstructionSize) >> kLoadLiteralScaleLog2);
+ // TODO(all): check the following is correct.
+ // The debug break return code will push a frame and call statically compiled
+ // code. By using blr, even though control will not return after the branch,
+ // this call site will be registered in the frame (lr being saved as the pc
+ // of the next instruction to execute for this frame). The debugger can now
+ // iterate on the frames to find call to debug break return code.
+ patcher.blr(ip0);
+ patcher.hlt(kHltBadCode);
+ patcher.dc64(reinterpret_cast<int64_t>(entry));
+}
+
+
+void BreakLocationIterator::ClearDebugBreakAtReturn() {
+ // Reset the code emitted by EmitReturnSequence to its original state.
+ rinfo()->PatchCode(original_rinfo()->pc(),
+ Assembler::kJSRetSequenceInstructions);
+}
+
+
+bool Debug::IsDebugBreakAtReturn(RelocInfo* rinfo) {
+ ASSERT(RelocInfo::IsJSReturn(rinfo->rmode()));
+ return rinfo->IsPatchedReturnSequence();
+}
+
+
+bool BreakLocationIterator::IsDebugBreakAtSlot() {
+ ASSERT(IsDebugBreakSlot());
+ // Check whether the debug break slot instructions have been patched.
+ return rinfo()->IsPatchedDebugBreakSlotSequence();
+}
+
+
+void BreakLocationIterator::SetDebugBreakAtSlot() {
+ // Patch the code emitted by DebugCodegen::GenerateSlots, changing the debug
+ // break slot code from
+ // mov x0, x0 @ nop DEBUG_BREAK_NOP
+ // mov x0, x0 @ nop DEBUG_BREAK_NOP
+ // mov x0, x0 @ nop DEBUG_BREAK_NOP
+ // mov x0, x0 @ nop DEBUG_BREAK_NOP
+ // to a call to the debug slot code.
+ // ldr ip0, [pc, #(2 * kInstructionSize)]
+ // blr ip0
+ // <debug break slot code ...
+ // ... entry point address (64 bits)>
+
+ // TODO(all): consider adding a hlt instruction after the blr as we don't
+ // expect control to return here. This implies increasing
+ // kDebugBreakSlotInstructions to 5 instructions.
+
+ // The patching code must not overflow the space occupied by the return
+ // sequence.
+ STATIC_ASSERT(Assembler::kDebugBreakSlotInstructions >= 4);
+ PatchingAssembler patcher(reinterpret_cast<Instruction*>(rinfo()->pc()), 4);
+ byte* entry =
+ debug_info_->GetIsolate()->builtins()->Slot_DebugBreak()->entry();
+
+ // The first instruction of a patched debug break slot must be a load literal
+ // loading the address of the debug break slot code.
+ patcher.ldr_pcrel(ip0, (2 * kInstructionSize) >> kLoadLiteralScaleLog2);
+ // TODO(all): check the following is correct.
+ // The debug break slot code will push a frame and call statically compiled
+ // code. By using blr, event hough control will not return after the branch,
+ // this call site will be registered in the frame (lr being saved as the pc
+ // of the next instruction to execute for this frame). The debugger can now
+ // iterate on the frames to find call to debug break slot code.
+ patcher.blr(ip0);
+ patcher.dc64(reinterpret_cast<int64_t>(entry));
+}
+
+
+void BreakLocationIterator::ClearDebugBreakAtSlot() {
+ ASSERT(IsDebugBreakSlot());
+ rinfo()->PatchCode(original_rinfo()->pc(),
+ Assembler::kDebugBreakSlotInstructions);
+}
+
+
+static void Generate_DebugBreakCallHelper(MacroAssembler* masm,
+ RegList object_regs,
+ RegList non_object_regs,
+ Register scratch) {
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+
+ // Any live values (object_regs and non_object_regs) in caller-saved
+ // registers (or lr) need to be stored on the stack so that their values are
+ // safely preserved for a call into C code.
+ //
+ // Also:
+ // * object_regs may be modified during the C code by the garbage
+ // collector. Every object register must be a valid tagged pointer or
+ // SMI.
+ //
+ // * non_object_regs will be converted to SMIs so that the garbage
+ // collector doesn't try to interpret them as pointers.
+ //
+ // TODO(jbramley): Why can't this handle callee-saved registers?
+ ASSERT((~kCallerSaved.list() & object_regs) == 0);
+ ASSERT((~kCallerSaved.list() & non_object_regs) == 0);
+ ASSERT((object_regs & non_object_regs) == 0);
+ ASSERT((scratch.Bit() & object_regs) == 0);
+ ASSERT((scratch.Bit() & non_object_regs) == 0);
+ ASSERT((masm->TmpList()->list() & (object_regs | non_object_regs)) == 0);
+ STATIC_ASSERT(kSmiValueSize == 32);
+
+ CPURegList non_object_list =
+ CPURegList(CPURegister::kRegister, kXRegSizeInBits, non_object_regs);
+ while (!non_object_list.IsEmpty()) {
+ // Store each non-object register as two SMIs.
+ Register reg = Register(non_object_list.PopLowestIndex());
+ __ Lsr(scratch, reg, 32);
+ __ SmiTagAndPush(scratch, reg);
+
+ // Stack:
+ // jssp[12]: reg[63:32]
+ // jssp[8]: 0x00000000 (SMI tag & padding)
+ // jssp[4]: reg[31:0]
+ // jssp[0]: 0x00000000 (SMI tag & padding)
+ STATIC_ASSERT((kSmiTag == 0) && (kSmiShift == 32));
+ }
+
+ if (object_regs != 0) {
+ __ PushXRegList(object_regs);
+ }
+
+#ifdef DEBUG
+ __ RecordComment("// Calling from debug break to runtime - come in - over");
+#endif
+ __ Mov(x0, 0); // No arguments.
+ __ Mov(x1, ExternalReference::debug_break(masm->isolate()));
+
+ CEntryStub stub(masm->isolate(), 1);
+ __ CallStub(&stub);
+
+ // Restore the register values from the expression stack.
+ if (object_regs != 0) {
+ __ PopXRegList(object_regs);
+ }
+
+ non_object_list =
+ CPURegList(CPURegister::kRegister, kXRegSizeInBits, non_object_regs);
+ while (!non_object_list.IsEmpty()) {
+ // Load each non-object register from two SMIs.
+ // Stack:
+ // jssp[12]: reg[63:32]
+ // jssp[8]: 0x00000000 (SMI tag & padding)
+ // jssp[4]: reg[31:0]
+ // jssp[0]: 0x00000000 (SMI tag & padding)
+ Register reg = Register(non_object_list.PopHighestIndex());
+ __ Pop(scratch, reg);
+ __ Bfxil(reg, scratch, 32, 32);
+ }
+
+ // Leave the internal frame.
+ }
+
+ // Now that the break point has been handled, resume normal execution by
+ // jumping to the target address intended by the caller and that was
+ // overwritten by the address of DebugBreakXXX.
+ ExternalReference after_break_target =
+ ExternalReference::debug_after_break_target_address(masm->isolate());
+ __ Mov(scratch, after_break_target);
+ __ Ldr(scratch, MemOperand(scratch));
+ __ Br(scratch);
+}
+
+
+void DebugCodegen::GenerateCallICStubDebugBreak(MacroAssembler* masm) {
+ // Register state for CallICStub
+ // ----------- S t a t e -------------
+ // -- x1 : function
+ // -- x3 : slot in feedback array
+ // -----------------------------------
+ Generate_DebugBreakCallHelper(masm, x1.Bit() | x3.Bit(), 0, x10);
+}
+
+
+void DebugCodegen::GenerateLoadICDebugBreak(MacroAssembler* masm) {
+ // Calling convention for IC load (from ic-arm.cc).
+ // ----------- S t a t e -------------
+ // -- x2 : name
+ // -- lr : return address
+ // -- x0 : receiver
+ // -- [sp] : receiver
+ // -----------------------------------
+ // Registers x0 and x2 contain objects that need to be pushed on the
+ // expression stack of the fake JS frame.
+ Generate_DebugBreakCallHelper(masm, x0.Bit() | x2.Bit(), 0, x10);
+}
+
+
+void DebugCodegen::GenerateStoreICDebugBreak(MacroAssembler* masm) {
+ // Calling convention for IC store (from ic-arm.cc).
+ // ----------- S t a t e -------------
+ // -- x0 : value
+ // -- x1 : receiver
+ // -- x2 : name
+ // -- lr : return address
+ // -----------------------------------
+ // Registers x0, x1, and x2 contain objects that need to be pushed on the
+ // expression stack of the fake JS frame.
+ Generate_DebugBreakCallHelper(masm, x0.Bit() | x1.Bit() | x2.Bit(), 0, x10);
+}
+
+
+void DebugCodegen::GenerateKeyedLoadICDebugBreak(MacroAssembler* masm) {
+ // ---------- S t a t e --------------
+ // -- lr : return address
+ // -- x0 : key
+ // -- x1 : receiver
+ Generate_DebugBreakCallHelper(masm, x0.Bit() | x1.Bit(), 0, x10);
+}
+
+
+void DebugCodegen::GenerateKeyedStoreICDebugBreak(MacroAssembler* masm) {
+ // ---------- S t a t e --------------
+ // -- x0 : value
+ // -- x1 : key
+ // -- x2 : receiver
+ // -- lr : return address
+ Generate_DebugBreakCallHelper(masm, x0.Bit() | x1.Bit() | x2.Bit(), 0, x10);
+}
+
+
+void DebugCodegen::GenerateCompareNilICDebugBreak(MacroAssembler* masm) {
+ // Register state for CompareNil IC
+ // ----------- S t a t e -------------
+ // -- r0 : value
+ // -----------------------------------
+ Generate_DebugBreakCallHelper(masm, x0.Bit(), 0, x10);
+}
+
+
+void DebugCodegen::GenerateReturnDebugBreak(MacroAssembler* masm) {
+ // In places other than IC call sites it is expected that r0 is TOS which
+ // is an object - this is not generally the case so this should be used with
+ // care.
+ Generate_DebugBreakCallHelper(masm, x0.Bit(), 0, x10);
+}
+
+
+void DebugCodegen::GenerateCallFunctionStubDebugBreak(MacroAssembler* masm) {
+ // Register state for CallFunctionStub (from code-stubs-arm64.cc).
+ // ----------- S t a t e -------------
+ // -- x1 : function
+ // -----------------------------------
+ Generate_DebugBreakCallHelper(masm, x1.Bit(), 0, x10);
+}
+
+
+void DebugCodegen::GenerateCallConstructStubDebugBreak(MacroAssembler* masm) {
+ // Calling convention for CallConstructStub (from code-stubs-arm64.cc).
+ // ----------- S t a t e -------------
+ // -- x0 : number of arguments (not smi)
+ // -- x1 : constructor function
+ // -----------------------------------
+ Generate_DebugBreakCallHelper(masm, x1.Bit(), x0.Bit(), x10);
+}
+
+
+void DebugCodegen::GenerateCallConstructStubRecordDebugBreak(
+ MacroAssembler* masm) {
+ // Calling convention for CallConstructStub (from code-stubs-arm64.cc).
+ // ----------- S t a t e -------------
+ // -- x0 : number of arguments (not smi)
+ // -- x1 : constructor function
+ // -- x2 : feedback array
+ // -- x3 : feedback slot (smi)
+ // -----------------------------------
+ Generate_DebugBreakCallHelper(
+ masm, x1.Bit() | x2.Bit() | x3.Bit(), x0.Bit(), x10);
+}
+
+
+void DebugCodegen::GenerateSlot(MacroAssembler* masm) {
+ // Generate enough nop's to make space for a call instruction. Avoid emitting
+ // the constant pool in the debug break slot code.
+ InstructionAccurateScope scope(masm, Assembler::kDebugBreakSlotInstructions);
+
+ __ RecordDebugBreakSlot();
+ for (int i = 0; i < Assembler::kDebugBreakSlotInstructions; i++) {
+ __ nop(Assembler::DEBUG_BREAK_NOP);
+ }
+}
+
+
+void DebugCodegen::GenerateSlotDebugBreak(MacroAssembler* masm) {
+ // In the places where a debug break slot is inserted no registers can contain
+ // object pointers.
+ Generate_DebugBreakCallHelper(masm, 0, 0, x10);
+}
+
+
+void DebugCodegen::GeneratePlainReturnLiveEdit(MacroAssembler* masm) {
+ masm->Abort(kLiveEditFrameDroppingIsNotSupportedOnARM64);
+}
+
+
+void DebugCodegen::GenerateFrameDropperLiveEdit(MacroAssembler* masm) {
+ masm->Abort(kLiveEditFrameDroppingIsNotSupportedOnARM64);
+}
+
+
+const bool LiveEdit::kFrameDropperSupported = false;
+
+} } // namespace v8::internal
+
+#endif // V8_TARGET_ARCH_ARM64
diff --git a/chromium/v8/src/arm64/decoder-arm64-inl.h b/chromium/v8/src/arm64/decoder-arm64-inl.h
new file mode 100644
index 00000000000..e8eef5e14eb
--- /dev/null
+++ b/chromium/v8/src/arm64/decoder-arm64-inl.h
@@ -0,0 +1,648 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_ARM64_DECODER_ARM64_INL_H_
+#define V8_ARM64_DECODER_ARM64_INL_H_
+
+#include "src/arm64/decoder-arm64.h"
+#include "src/globals.h"
+#include "src/utils.h"
+
+
+namespace v8 {
+namespace internal {
+
+
+// Top-level instruction decode function.
+template<typename V>
+void Decoder<V>::Decode(Instruction *instr) {
+ if (instr->Bits(28, 27) == 0) {
+ V::VisitUnallocated(instr);
+ } else {
+ switch (instr->Bits(27, 24)) {
+ // 0: PC relative addressing.
+ case 0x0: DecodePCRelAddressing(instr); break;
+
+ // 1: Add/sub immediate.
+ case 0x1: DecodeAddSubImmediate(instr); break;
+
+ // A: Logical shifted register.
+ // Add/sub with carry.
+ // Conditional compare register.
+ // Conditional compare immediate.
+ // Conditional select.
+ // Data processing 1 source.
+ // Data processing 2 source.
+ // B: Add/sub shifted register.
+ // Add/sub extended register.
+ // Data processing 3 source.
+ case 0xA:
+ case 0xB: DecodeDataProcessing(instr); break;
+
+ // 2: Logical immediate.
+ // Move wide immediate.
+ case 0x2: DecodeLogical(instr); break;
+
+ // 3: Bitfield.
+ // Extract.
+ case 0x3: DecodeBitfieldExtract(instr); break;
+
+ // 4: Unconditional branch immediate.
+ // Exception generation.
+ // Compare and branch immediate.
+ // 5: Compare and branch immediate.
+ // Conditional branch.
+ // System.
+ // 6,7: Unconditional branch.
+ // Test and branch immediate.
+ case 0x4:
+ case 0x5:
+ case 0x6:
+ case 0x7: DecodeBranchSystemException(instr); break;
+
+ // 8,9: Load/store register pair post-index.
+ // Load register literal.
+ // Load/store register unscaled immediate.
+ // Load/store register immediate post-index.
+ // Load/store register immediate pre-index.
+ // Load/store register offset.
+ // C,D: Load/store register pair offset.
+ // Load/store register pair pre-index.
+ // Load/store register unsigned immediate.
+ // Advanced SIMD.
+ case 0x8:
+ case 0x9:
+ case 0xC:
+ case 0xD: DecodeLoadStore(instr); break;
+
+ // E: FP fixed point conversion.
+ // FP integer conversion.
+ // FP data processing 1 source.
+ // FP compare.
+ // FP immediate.
+ // FP data processing 2 source.
+ // FP conditional compare.
+ // FP conditional select.
+ // Advanced SIMD.
+ // F: FP data processing 3 source.
+ // Advanced SIMD.
+ case 0xE:
+ case 0xF: DecodeFP(instr); break;
+ }
+ }
+}
+
+
+template<typename V>
+void Decoder<V>::DecodePCRelAddressing(Instruction* instr) {
+ ASSERT(instr->Bits(27, 24) == 0x0);
+ // We know bit 28 is set, as <b28:b27> = 0 is filtered out at the top level
+ // decode.
+ ASSERT(instr->Bit(28) == 0x1);
+ V::VisitPCRelAddressing(instr);
+}
+
+
+template<typename V>
+void Decoder<V>::DecodeBranchSystemException(Instruction* instr) {
+ ASSERT((instr->Bits(27, 24) == 0x4) ||
+ (instr->Bits(27, 24) == 0x5) ||
+ (instr->Bits(27, 24) == 0x6) ||
+ (instr->Bits(27, 24) == 0x7) );
+
+ switch (instr->Bits(31, 29)) {
+ case 0:
+ case 4: {
+ V::VisitUnconditionalBranch(instr);
+ break;
+ }
+ case 1:
+ case 5: {
+ if (instr->Bit(25) == 0) {
+ V::VisitCompareBranch(instr);
+ } else {
+ V::VisitTestBranch(instr);
+ }
+ break;
+ }
+ case 2: {
+ if (instr->Bit(25) == 0) {
+ if ((instr->Bit(24) == 0x1) ||
+ (instr->Mask(0x01000010) == 0x00000010)) {
+ V::VisitUnallocated(instr);
+ } else {
+ V::VisitConditionalBranch(instr);
+ }
+ } else {
+ V::VisitUnallocated(instr);
+ }
+ break;
+ }
+ case 6: {
+ if (instr->Bit(25) == 0) {
+ if (instr->Bit(24) == 0) {
+ if ((instr->Bits(4, 2) != 0) ||
+ (instr->Mask(0x00E0001D) == 0x00200001) ||
+ (instr->Mask(0x00E0001D) == 0x00400001) ||
+ (instr->Mask(0x00E0001E) == 0x00200002) ||
+ (instr->Mask(0x00E0001E) == 0x00400002) ||
+ (instr->Mask(0x00E0001C) == 0x00600000) ||
+ (instr->Mask(0x00E0001C) == 0x00800000) ||
+ (instr->Mask(0x00E0001F) == 0x00A00000) ||
+ (instr->Mask(0x00C0001C) == 0x00C00000)) {
+ V::VisitUnallocated(instr);
+ } else {
+ V::VisitException(instr);
+ }
+ } else {
+ if (instr->Bits(23, 22) == 0) {
+ const Instr masked_003FF0E0 = instr->Mask(0x003FF0E0);
+ if ((instr->Bits(21, 19) == 0x4) ||
+ (masked_003FF0E0 == 0x00033000) ||
+ (masked_003FF0E0 == 0x003FF020) ||
+ (masked_003FF0E0 == 0x003FF060) ||
+ (masked_003FF0E0 == 0x003FF0E0) ||
+ (instr->Mask(0x00388000) == 0x00008000) ||
+ (instr->Mask(0x0038E000) == 0x00000000) ||
+ (instr->Mask(0x0039E000) == 0x00002000) ||
+ (instr->Mask(0x003AE000) == 0x00002000) ||
+ (instr->Mask(0x003CE000) == 0x00042000) ||
+ (instr->Mask(0x003FFFC0) == 0x000320C0) ||
+ (instr->Mask(0x003FF100) == 0x00032100) ||
+ (instr->Mask(0x003FF200) == 0x00032200) ||
+ (instr->Mask(0x003FF400) == 0x00032400) ||
+ (instr->Mask(0x003FF800) == 0x00032800) ||
+ (instr->Mask(0x0038F000) == 0x00005000) ||
+ (instr->Mask(0x0038E000) == 0x00006000)) {
+ V::VisitUnallocated(instr);
+ } else {
+ V::VisitSystem(instr);
+ }
+ } else {
+ V::VisitUnallocated(instr);
+ }
+ }
+ } else {
+ if ((instr->Bit(24) == 0x1) ||
+ (instr->Bits(20, 16) != 0x1F) ||
+ (instr->Bits(15, 10) != 0) ||
+ (instr->Bits(4, 0) != 0) ||
+ (instr->Bits(24, 21) == 0x3) ||
+ (instr->Bits(24, 22) == 0x3)) {
+ V::VisitUnallocated(instr);
+ } else {
+ V::VisitUnconditionalBranchToRegister(instr);
+ }
+ }
+ break;
+ }
+ case 3:
+ case 7: {
+ V::VisitUnallocated(instr);
+ break;
+ }
+ }
+}
+
+
+template<typename V>
+void Decoder<V>::DecodeLoadStore(Instruction* instr) {
+ ASSERT((instr->Bits(27, 24) == 0x8) ||
+ (instr->Bits(27, 24) == 0x9) ||
+ (instr->Bits(27, 24) == 0xC) ||
+ (instr->Bits(27, 24) == 0xD) );
+
+ if (instr->Bit(24) == 0) {
+ if (instr->Bit(28) == 0) {
+ if (instr->Bit(29) == 0) {
+ if (instr->Bit(26) == 0) {
+ // TODO(all): VisitLoadStoreExclusive.
+ V::VisitUnimplemented(instr);
+ } else {
+ DecodeAdvSIMDLoadStore(instr);
+ }
+ } else {
+ if ((instr->Bits(31, 30) == 0x3) ||
+ (instr->Mask(0xC4400000) == 0x40000000)) {
+ V::VisitUnallocated(instr);
+ } else {
+ if (instr->Bit(23) == 0) {
+ if (instr->Mask(0xC4400000) == 0xC0400000) {
+ V::VisitUnallocated(instr);
+ } else {
+ V::VisitLoadStorePairNonTemporal(instr);
+ }
+ } else {
+ V::VisitLoadStorePairPostIndex(instr);
+ }
+ }
+ }
+ } else {
+ if (instr->Bit(29) == 0) {
+ if (instr->Mask(0xC4000000) == 0xC4000000) {
+ V::VisitUnallocated(instr);
+ } else {
+ V::VisitLoadLiteral(instr);
+ }
+ } else {
+ if ((instr->Mask(0x84C00000) == 0x80C00000) ||
+ (instr->Mask(0x44800000) == 0x44800000) ||
+ (instr->Mask(0x84800000) == 0x84800000)) {
+ V::VisitUnallocated(instr);
+ } else {
+ if (instr->Bit(21) == 0) {
+ switch (instr->Bits(11, 10)) {
+ case 0: {
+ V::VisitLoadStoreUnscaledOffset(instr);
+ break;
+ }
+ case 1: {
+ if (instr->Mask(0xC4C00000) == 0xC0800000) {
+ V::VisitUnallocated(instr);
+ } else {
+ V::VisitLoadStorePostIndex(instr);
+ }
+ break;
+ }
+ case 2: {
+ // TODO(all): VisitLoadStoreRegisterOffsetUnpriv.
+ V::VisitUnimplemented(instr);
+ break;
+ }
+ case 3: {
+ if (instr->Mask(0xC4C00000) == 0xC0800000) {
+ V::VisitUnallocated(instr);
+ } else {
+ V::VisitLoadStorePreIndex(instr);
+ }
+ break;
+ }
+ }
+ } else {
+ if (instr->Bits(11, 10) == 0x2) {
+ if (instr->Bit(14) == 0) {
+ V::VisitUnallocated(instr);
+ } else {
+ V::VisitLoadStoreRegisterOffset(instr);
+ }
+ } else {
+ V::VisitUnallocated(instr);
+ }
+ }
+ }
+ }
+ }
+ } else {
+ if (instr->Bit(28) == 0) {
+ if (instr->Bit(29) == 0) {
+ V::VisitUnallocated(instr);
+ } else {
+ if ((instr->Bits(31, 30) == 0x3) ||
+ (instr->Mask(0xC4400000) == 0x40000000)) {
+ V::VisitUnallocated(instr);
+ } else {
+ if (instr->Bit(23) == 0) {
+ V::VisitLoadStorePairOffset(instr);
+ } else {
+ V::VisitLoadStorePairPreIndex(instr);
+ }
+ }
+ }
+ } else {
+ if (instr->Bit(29) == 0) {
+ V::VisitUnallocated(instr);
+ } else {
+ if ((instr->Mask(0x84C00000) == 0x80C00000) ||
+ (instr->Mask(0x44800000) == 0x44800000) ||
+ (instr->Mask(0x84800000) == 0x84800000)) {
+ V::VisitUnallocated(instr);
+ } else {
+ V::VisitLoadStoreUnsignedOffset(instr);
+ }
+ }
+ }
+ }
+}
+
+
+template<typename V>
+void Decoder<V>::DecodeLogical(Instruction* instr) {
+ ASSERT(instr->Bits(27, 24) == 0x2);
+
+ if (instr->Mask(0x80400000) == 0x00400000) {
+ V::VisitUnallocated(instr);
+ } else {
+ if (instr->Bit(23) == 0) {
+ V::VisitLogicalImmediate(instr);
+ } else {
+ if (instr->Bits(30, 29) == 0x1) {
+ V::VisitUnallocated(instr);
+ } else {
+ V::VisitMoveWideImmediate(instr);
+ }
+ }
+ }
+}
+
+
+template<typename V>
+void Decoder<V>::DecodeBitfieldExtract(Instruction* instr) {
+ ASSERT(instr->Bits(27, 24) == 0x3);
+
+ if ((instr->Mask(0x80400000) == 0x80000000) ||
+ (instr->Mask(0x80400000) == 0x00400000) ||
+ (instr->Mask(0x80008000) == 0x00008000)) {
+ V::VisitUnallocated(instr);
+ } else if (instr->Bit(23) == 0) {
+ if ((instr->Mask(0x80200000) == 0x00200000) ||
+ (instr->Mask(0x60000000) == 0x60000000)) {
+ V::VisitUnallocated(instr);
+ } else {
+ V::VisitBitfield(instr);
+ }
+ } else {
+ if ((instr->Mask(0x60200000) == 0x00200000) ||
+ (instr->Mask(0x60000000) != 0x00000000)) {
+ V::VisitUnallocated(instr);
+ } else {
+ V::VisitExtract(instr);
+ }
+ }
+}
+
+
+template<typename V>
+void Decoder<V>::DecodeAddSubImmediate(Instruction* instr) {
+ ASSERT(instr->Bits(27, 24) == 0x1);
+ if (instr->Bit(23) == 1) {
+ V::VisitUnallocated(instr);
+ } else {
+ V::VisitAddSubImmediate(instr);
+ }
+}
+
+
+template<typename V>
+void Decoder<V>::DecodeDataProcessing(Instruction* instr) {
+ ASSERT((instr->Bits(27, 24) == 0xA) ||
+ (instr->Bits(27, 24) == 0xB) );
+
+ if (instr->Bit(24) == 0) {
+ if (instr->Bit(28) == 0) {
+ if (instr->Mask(0x80008000) == 0x00008000) {
+ V::VisitUnallocated(instr);
+ } else {
+ V::VisitLogicalShifted(instr);
+ }
+ } else {
+ switch (instr->Bits(23, 21)) {
+ case 0: {
+ if (instr->Mask(0x0000FC00) != 0) {
+ V::VisitUnallocated(instr);
+ } else {
+ V::VisitAddSubWithCarry(instr);
+ }
+ break;
+ }
+ case 2: {
+ if ((instr->Bit(29) == 0) ||
+ (instr->Mask(0x00000410) != 0)) {
+ V::VisitUnallocated(instr);
+ } else {
+ if (instr->Bit(11) == 0) {
+ V::VisitConditionalCompareRegister(instr);
+ } else {
+ V::VisitConditionalCompareImmediate(instr);
+ }
+ }
+ break;
+ }
+ case 4: {
+ if (instr->Mask(0x20000800) != 0x00000000) {
+ V::VisitUnallocated(instr);
+ } else {
+ V::VisitConditionalSelect(instr);
+ }
+ break;
+ }
+ case 6: {
+ if (instr->Bit(29) == 0x1) {
+ V::VisitUnallocated(instr);
+ } else {
+ if (instr->Bit(30) == 0) {
+ if ((instr->Bit(15) == 0x1) ||
+ (instr->Bits(15, 11) == 0) ||
+ (instr->Bits(15, 12) == 0x1) ||
+ (instr->Bits(15, 12) == 0x3) ||
+ (instr->Bits(15, 13) == 0x3) ||
+ (instr->Mask(0x8000EC00) == 0x00004C00) ||
+ (instr->Mask(0x8000E800) == 0x80004000) ||
+ (instr->Mask(0x8000E400) == 0x80004000)) {
+ V::VisitUnallocated(instr);
+ } else {
+ V::VisitDataProcessing2Source(instr);
+ }
+ } else {
+ if ((instr->Bit(13) == 1) ||
+ (instr->Bits(20, 16) != 0) ||
+ (instr->Bits(15, 14) != 0) ||
+ (instr->Mask(0xA01FFC00) == 0x00000C00) ||
+ (instr->Mask(0x201FF800) == 0x00001800)) {
+ V::VisitUnallocated(instr);
+ } else {
+ V::VisitDataProcessing1Source(instr);
+ }
+ }
+ break;
+ }
+ }
+ case 1:
+ case 3:
+ case 5:
+ case 7: V::VisitUnallocated(instr); break;
+ }
+ }
+ } else {
+ if (instr->Bit(28) == 0) {
+ if (instr->Bit(21) == 0) {
+ if ((instr->Bits(23, 22) == 0x3) ||
+ (instr->Mask(0x80008000) == 0x00008000)) {
+ V::VisitUnallocated(instr);
+ } else {
+ V::VisitAddSubShifted(instr);
+ }
+ } else {
+ if ((instr->Mask(0x00C00000) != 0x00000000) ||
+ (instr->Mask(0x00001400) == 0x00001400) ||
+ (instr->Mask(0x00001800) == 0x00001800)) {
+ V::VisitUnallocated(instr);
+ } else {
+ V::VisitAddSubExtended(instr);
+ }
+ }
+ } else {
+ if ((instr->Bit(30) == 0x1) ||
+ (instr->Bits(30, 29) == 0x1) ||
+ (instr->Mask(0xE0600000) == 0x00200000) ||
+ (instr->Mask(0xE0608000) == 0x00400000) ||
+ (instr->Mask(0x60608000) == 0x00408000) ||
+ (instr->Mask(0x60E00000) == 0x00E00000) ||
+ (instr->Mask(0x60E00000) == 0x00800000) ||
+ (instr->Mask(0x60E00000) == 0x00600000)) {
+ V::VisitUnallocated(instr);
+ } else {
+ V::VisitDataProcessing3Source(instr);
+ }
+ }
+ }
+}
+
+
+template<typename V>
+void Decoder<V>::DecodeFP(Instruction* instr) {
+ ASSERT((instr->Bits(27, 24) == 0xE) ||
+ (instr->Bits(27, 24) == 0xF) );
+
+ if (instr->Bit(28) == 0) {
+ DecodeAdvSIMDDataProcessing(instr);
+ } else {
+ if (instr->Bit(29) == 1) {
+ V::VisitUnallocated(instr);
+ } else {
+ if (instr->Bits(31, 30) == 0x3) {
+ V::VisitUnallocated(instr);
+ } else if (instr->Bits(31, 30) == 0x1) {
+ DecodeAdvSIMDDataProcessing(instr);
+ } else {
+ if (instr->Bit(24) == 0) {
+ if (instr->Bit(21) == 0) {
+ if ((instr->Bit(23) == 1) ||
+ (instr->Bit(18) == 1) ||
+ (instr->Mask(0x80008000) == 0x00000000) ||
+ (instr->Mask(0x000E0000) == 0x00000000) ||
+ (instr->Mask(0x000E0000) == 0x000A0000) ||
+ (instr->Mask(0x00160000) == 0x00000000) ||
+ (instr->Mask(0x00160000) == 0x00120000)) {
+ V::VisitUnallocated(instr);
+ } else {
+ V::VisitFPFixedPointConvert(instr);
+ }
+ } else {
+ if (instr->Bits(15, 10) == 32) {
+ V::VisitUnallocated(instr);
+ } else if (instr->Bits(15, 10) == 0) {
+ if ((instr->Bits(23, 22) == 0x3) ||
+ (instr->Mask(0x000E0000) == 0x000A0000) ||
+ (instr->Mask(0x000E0000) == 0x000C0000) ||
+ (instr->Mask(0x00160000) == 0x00120000) ||
+ (instr->Mask(0x00160000) == 0x00140000) ||
+ (instr->Mask(0x20C40000) == 0x00800000) ||
+ (instr->Mask(0x20C60000) == 0x00840000) ||
+ (instr->Mask(0xA0C60000) == 0x80060000) ||
+ (instr->Mask(0xA0C60000) == 0x00860000) ||
+ (instr->Mask(0xA0C60000) == 0x00460000) ||
+ (instr->Mask(0xA0CE0000) == 0x80860000) ||
+ (instr->Mask(0xA0CE0000) == 0x804E0000) ||
+ (instr->Mask(0xA0CE0000) == 0x000E0000) ||
+ (instr->Mask(0xA0D60000) == 0x00160000) ||
+ (instr->Mask(0xA0D60000) == 0x80560000) ||
+ (instr->Mask(0xA0D60000) == 0x80960000)) {
+ V::VisitUnallocated(instr);
+ } else {
+ V::VisitFPIntegerConvert(instr);
+ }
+ } else if (instr->Bits(14, 10) == 16) {
+ const Instr masked_A0DF8000 = instr->Mask(0xA0DF8000);
+ if ((instr->Mask(0x80180000) != 0) ||
+ (masked_A0DF8000 == 0x00020000) ||
+ (masked_A0DF8000 == 0x00030000) ||
+ (masked_A0DF8000 == 0x00068000) ||
+ (masked_A0DF8000 == 0x00428000) ||
+ (masked_A0DF8000 == 0x00430000) ||
+ (masked_A0DF8000 == 0x00468000) ||
+ (instr->Mask(0xA0D80000) == 0x00800000) ||
+ (instr->Mask(0xA0DE0000) == 0x00C00000) ||
+ (instr->Mask(0xA0DF0000) == 0x00C30000) ||
+ (instr->Mask(0xA0DC0000) == 0x00C40000)) {
+ V::VisitUnallocated(instr);
+ } else {
+ V::VisitFPDataProcessing1Source(instr);
+ }
+ } else if (instr->Bits(13, 10) == 8) {
+ if ((instr->Bits(15, 14) != 0) ||
+ (instr->Bits(2, 0) != 0) ||
+ (instr->Mask(0x80800000) != 0x00000000)) {
+ V::VisitUnallocated(instr);
+ } else {
+ V::VisitFPCompare(instr);
+ }
+ } else if (instr->Bits(12, 10) == 4) {
+ if ((instr->Bits(9, 5) != 0) ||
+ (instr->Mask(0x80800000) != 0x00000000)) {
+ V::VisitUnallocated(instr);
+ } else {
+ V::VisitFPImmediate(instr);
+ }
+ } else {
+ if (instr->Mask(0x80800000) != 0x00000000) {
+ V::VisitUnallocated(instr);
+ } else {
+ switch (instr->Bits(11, 10)) {
+ case 1: {
+ V::VisitFPConditionalCompare(instr);
+ break;
+ }
+ case 2: {
+ if ((instr->Bits(15, 14) == 0x3) ||
+ (instr->Mask(0x00009000) == 0x00009000) ||
+ (instr->Mask(0x0000A000) == 0x0000A000)) {
+ V::VisitUnallocated(instr);
+ } else {
+ V::VisitFPDataProcessing2Source(instr);
+ }
+ break;
+ }
+ case 3: {
+ V::VisitFPConditionalSelect(instr);
+ break;
+ }
+ default: UNREACHABLE();
+ }
+ }
+ }
+ }
+ } else {
+ // Bit 30 == 1 has been handled earlier.
+ ASSERT(instr->Bit(30) == 0);
+ if (instr->Mask(0xA0800000) != 0) {
+ V::VisitUnallocated(instr);
+ } else {
+ V::VisitFPDataProcessing3Source(instr);
+ }
+ }
+ }
+ }
+ }
+}
+
+
+template<typename V>
+void Decoder<V>::DecodeAdvSIMDLoadStore(Instruction* instr) {
+ // TODO(all): Implement Advanced SIMD load/store instruction decode.
+ ASSERT(instr->Bits(29, 25) == 0x6);
+ V::VisitUnimplemented(instr);
+}
+
+
+template<typename V>
+void Decoder<V>::DecodeAdvSIMDDataProcessing(Instruction* instr) {
+ // TODO(all): Implement Advanced SIMD data processing instruction decode.
+ ASSERT(instr->Bits(27, 25) == 0x7);
+ V::VisitUnimplemented(instr);
+}
+
+
+} } // namespace v8::internal
+
+#endif // V8_ARM64_DECODER_ARM64_INL_H_
diff --git a/chromium/v8/src/arm64/decoder-arm64.cc b/chromium/v8/src/arm64/decoder-arm64.cc
new file mode 100644
index 00000000000..08881c2d5a5
--- /dev/null
+++ b/chromium/v8/src/arm64/decoder-arm64.cc
@@ -0,0 +1,86 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#if V8_TARGET_ARCH_ARM64
+
+#include "src/globals.h"
+#include "src/utils.h"
+#include "src/arm64/decoder-arm64.h"
+
+
+namespace v8 {
+namespace internal {
+
+
+void DispatchingDecoderVisitor::AppendVisitor(DecoderVisitor* new_visitor) {
+ visitors_.remove(new_visitor);
+ visitors_.push_front(new_visitor);
+}
+
+
+void DispatchingDecoderVisitor::PrependVisitor(DecoderVisitor* new_visitor) {
+ visitors_.remove(new_visitor);
+ visitors_.push_back(new_visitor);
+}
+
+
+void DispatchingDecoderVisitor::InsertVisitorBefore(
+ DecoderVisitor* new_visitor, DecoderVisitor* registered_visitor) {
+ visitors_.remove(new_visitor);
+ std::list<DecoderVisitor*>::iterator it;
+ for (it = visitors_.begin(); it != visitors_.end(); it++) {
+ if (*it == registered_visitor) {
+ visitors_.insert(it, new_visitor);
+ return;
+ }
+ }
+ // We reached the end of the list. The last element must be
+ // registered_visitor.
+ ASSERT(*it == registered_visitor);
+ visitors_.insert(it, new_visitor);
+}
+
+
+void DispatchingDecoderVisitor::InsertVisitorAfter(
+ DecoderVisitor* new_visitor, DecoderVisitor* registered_visitor) {
+ visitors_.remove(new_visitor);
+ std::list<DecoderVisitor*>::iterator it;
+ for (it = visitors_.begin(); it != visitors_.end(); it++) {
+ if (*it == registered_visitor) {
+ it++;
+ visitors_.insert(it, new_visitor);
+ return;
+ }
+ }
+ // We reached the end of the list. The last element must be
+ // registered_visitor.
+ ASSERT(*it == registered_visitor);
+ visitors_.push_back(new_visitor);
+}
+
+
+void DispatchingDecoderVisitor::RemoveVisitor(DecoderVisitor* visitor) {
+ visitors_.remove(visitor);
+}
+
+
+#define DEFINE_VISITOR_CALLERS(A) \
+ void DispatchingDecoderVisitor::Visit##A(Instruction* instr) { \
+ if (!(instr->Mask(A##FMask) == A##Fixed)) { \
+ ASSERT(instr->Mask(A##FMask) == A##Fixed); \
+ } \
+ std::list<DecoderVisitor*>::iterator it; \
+ for (it = visitors_.begin(); it != visitors_.end(); it++) { \
+ (*it)->Visit##A(instr); \
+ } \
+ }
+VISITOR_LIST(DEFINE_VISITOR_CALLERS)
+#undef DEFINE_VISITOR_CALLERS
+
+
+} } // namespace v8::internal
+
+#endif // V8_TARGET_ARCH_ARM64
diff --git a/chromium/v8/src/arm64/decoder-arm64.h b/chromium/v8/src/arm64/decoder-arm64.h
new file mode 100644
index 00000000000..0ce84253baa
--- /dev/null
+++ b/chromium/v8/src/arm64/decoder-arm64.h
@@ -0,0 +1,187 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_ARM64_DECODER_ARM64_H_
+#define V8_ARM64_DECODER_ARM64_H_
+
+#include <list>
+
+#include "src/globals.h"
+#include "src/arm64/instructions-arm64.h"
+
+namespace v8 {
+namespace internal {
+
+
+// List macro containing all visitors needed by the decoder class.
+
+#define VISITOR_LIST(V) \
+ V(PCRelAddressing) \
+ V(AddSubImmediate) \
+ V(LogicalImmediate) \
+ V(MoveWideImmediate) \
+ V(Bitfield) \
+ V(Extract) \
+ V(UnconditionalBranch) \
+ V(UnconditionalBranchToRegister) \
+ V(CompareBranch) \
+ V(TestBranch) \
+ V(ConditionalBranch) \
+ V(System) \
+ V(Exception) \
+ V(LoadStorePairPostIndex) \
+ V(LoadStorePairOffset) \
+ V(LoadStorePairPreIndex) \
+ V(LoadStorePairNonTemporal) \
+ V(LoadLiteral) \
+ V(LoadStoreUnscaledOffset) \
+ V(LoadStorePostIndex) \
+ V(LoadStorePreIndex) \
+ V(LoadStoreRegisterOffset) \
+ V(LoadStoreUnsignedOffset) \
+ V(LogicalShifted) \
+ V(AddSubShifted) \
+ V(AddSubExtended) \
+ V(AddSubWithCarry) \
+ V(ConditionalCompareRegister) \
+ V(ConditionalCompareImmediate) \
+ V(ConditionalSelect) \
+ V(DataProcessing1Source) \
+ V(DataProcessing2Source) \
+ V(DataProcessing3Source) \
+ V(FPCompare) \
+ V(FPConditionalCompare) \
+ V(FPConditionalSelect) \
+ V(FPImmediate) \
+ V(FPDataProcessing1Source) \
+ V(FPDataProcessing2Source) \
+ V(FPDataProcessing3Source) \
+ V(FPIntegerConvert) \
+ V(FPFixedPointConvert) \
+ V(Unallocated) \
+ V(Unimplemented)
+
+// The Visitor interface. Disassembler and simulator (and other tools)
+// must provide implementations for all of these functions.
+class DecoderVisitor {
+ public:
+ virtual ~DecoderVisitor() {}
+
+ #define DECLARE(A) virtual void Visit##A(Instruction* instr) = 0;
+ VISITOR_LIST(DECLARE)
+ #undef DECLARE
+};
+
+
+// A visitor that dispatches to a list of visitors.
+class DispatchingDecoderVisitor : public DecoderVisitor {
+ public:
+ DispatchingDecoderVisitor() {}
+ virtual ~DispatchingDecoderVisitor() {}
+
+ // Register a new visitor class with the decoder.
+ // Decode() will call the corresponding visitor method from all registered
+ // visitor classes when decoding reaches the leaf node of the instruction
+ // decode tree.
+ // Visitors are called in the order.
+ // A visitor can only be registered once.
+ // Registering an already registered visitor will update its position.
+ //
+ // d.AppendVisitor(V1);
+ // d.AppendVisitor(V2);
+ // d.PrependVisitor(V2); // Move V2 at the start of the list.
+ // d.InsertVisitorBefore(V3, V2);
+ // d.AppendVisitor(V4);
+ // d.AppendVisitor(V4); // No effect.
+ //
+ // d.Decode(i);
+ //
+ // will call in order visitor methods in V3, V2, V1, V4.
+ void AppendVisitor(DecoderVisitor* visitor);
+ void PrependVisitor(DecoderVisitor* visitor);
+ void InsertVisitorBefore(DecoderVisitor* new_visitor,
+ DecoderVisitor* registered_visitor);
+ void InsertVisitorAfter(DecoderVisitor* new_visitor,
+ DecoderVisitor* registered_visitor);
+
+ // Remove a previously registered visitor class from the list of visitors
+ // stored by the decoder.
+ void RemoveVisitor(DecoderVisitor* visitor);
+
+ #define DECLARE(A) void Visit##A(Instruction* instr);
+ VISITOR_LIST(DECLARE)
+ #undef DECLARE
+
+ private:
+ // Visitors are registered in a list.
+ std::list<DecoderVisitor*> visitors_;
+};
+
+
+template<typename V>
+class Decoder : public V {
+ public:
+ Decoder() {}
+ virtual ~Decoder() {}
+
+ // Top-level instruction decoder function. Decodes an instruction and calls
+ // the visitor functions registered with the Decoder class.
+ virtual void Decode(Instruction *instr);
+
+ private:
+ // Decode the PC relative addressing instruction, and call the corresponding
+ // visitors.
+ // On entry, instruction bits 27:24 = 0x0.
+ void DecodePCRelAddressing(Instruction* instr);
+
+ // Decode the add/subtract immediate instruction, and call the corresponding
+ // visitors.
+ // On entry, instruction bits 27:24 = 0x1.
+ void DecodeAddSubImmediate(Instruction* instr);
+
+ // Decode the branch, system command, and exception generation parts of
+ // the instruction tree, and call the corresponding visitors.
+ // On entry, instruction bits 27:24 = {0x4, 0x5, 0x6, 0x7}.
+ void DecodeBranchSystemException(Instruction* instr);
+
+ // Decode the load and store parts of the instruction tree, and call
+ // the corresponding visitors.
+ // On entry, instruction bits 27:24 = {0x8, 0x9, 0xC, 0xD}.
+ void DecodeLoadStore(Instruction* instr);
+
+ // Decode the logical immediate and move wide immediate parts of the
+ // instruction tree, and call the corresponding visitors.
+ // On entry, instruction bits 27:24 = 0x2.
+ void DecodeLogical(Instruction* instr);
+
+ // Decode the bitfield and extraction parts of the instruction tree,
+ // and call the corresponding visitors.
+ // On entry, instruction bits 27:24 = 0x3.
+ void DecodeBitfieldExtract(Instruction* instr);
+
+ // Decode the data processing parts of the instruction tree, and call the
+ // corresponding visitors.
+ // On entry, instruction bits 27:24 = {0x1, 0xA, 0xB}.
+ void DecodeDataProcessing(Instruction* instr);
+
+ // Decode the floating point parts of the instruction tree, and call the
+ // corresponding visitors.
+ // On entry, instruction bits 27:24 = {0xE, 0xF}.
+ void DecodeFP(Instruction* instr);
+
+ // Decode the Advanced SIMD (NEON) load/store part of the instruction tree,
+ // and call the corresponding visitors.
+ // On entry, instruction bits 29:25 = 0x6.
+ void DecodeAdvSIMDLoadStore(Instruction* instr);
+
+ // Decode the Advanced SIMD (NEON) data processing part of the instruction
+ // tree, and call the corresponding visitors.
+ // On entry, instruction bits 27:25 = 0x7.
+ void DecodeAdvSIMDDataProcessing(Instruction* instr);
+};
+
+
+} } // namespace v8::internal
+
+#endif // V8_ARM64_DECODER_ARM64_H_
diff --git a/chromium/v8/src/arm64/deoptimizer-arm64.cc b/chromium/v8/src/arm64/deoptimizer-arm64.cc
new file mode 100644
index 00000000000..7ac5bd0d2d5
--- /dev/null
+++ b/chromium/v8/src/arm64/deoptimizer-arm64.cc
@@ -0,0 +1,385 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#include "src/codegen.h"
+#include "src/deoptimizer.h"
+#include "src/full-codegen.h"
+#include "src/safepoint-table.h"
+
+
+namespace v8 {
+namespace internal {
+
+
+int Deoptimizer::patch_size() {
+ // Size of the code used to patch lazy bailout points.
+ // Patching is done by Deoptimizer::DeoptimizeFunction.
+ return 4 * kInstructionSize;
+}
+
+
+
+void Deoptimizer::PatchCodeForDeoptimization(Isolate* isolate, Code* code) {
+ // Invalidate the relocation information, as it will become invalid by the
+ // code patching below, and is not needed any more.
+ code->InvalidateRelocation();
+
+ // TODO(jkummerow): if (FLAG_zap_code_space), make the code object's
+ // entry sequence unusable (see other architectures).
+
+ DeoptimizationInputData* deopt_data =
+ DeoptimizationInputData::cast(code->deoptimization_data());
+ SharedFunctionInfo* shared =
+ SharedFunctionInfo::cast(deopt_data->SharedFunctionInfo());
+ shared->EvictFromOptimizedCodeMap(code, "deoptimized code");
+ Address code_start_address = code->instruction_start();
+#ifdef DEBUG
+ Address prev_call_address = NULL;
+#endif
+ // For each LLazyBailout instruction insert a call to the corresponding
+ // deoptimization entry.
+ for (int i = 0; i < deopt_data->DeoptCount(); i++) {
+ if (deopt_data->Pc(i)->value() == -1) continue;
+
+ Address call_address = code_start_address + deopt_data->Pc(i)->value();
+ Address deopt_entry = GetDeoptimizationEntry(isolate, i, LAZY);
+
+ PatchingAssembler patcher(call_address, patch_size() / kInstructionSize);
+ patcher.ldr_pcrel(ip0, (2 * kInstructionSize) >> kLoadLiteralScaleLog2);
+ patcher.blr(ip0);
+ patcher.dc64(reinterpret_cast<intptr_t>(deopt_entry));
+
+ ASSERT((prev_call_address == NULL) ||
+ (call_address >= prev_call_address + patch_size()));
+ ASSERT(call_address + patch_size() <= code->instruction_end());
+#ifdef DEBUG
+ prev_call_address = call_address;
+#endif
+ }
+}
+
+
+void Deoptimizer::FillInputFrame(Address tos, JavaScriptFrame* frame) {
+ // Set the register values. The values are not important as there are no
+ // callee saved registers in JavaScript frames, so all registers are
+ // spilled. Registers fp and sp are set to the correct values though.
+ for (int i = 0; i < Register::NumRegisters(); i++) {
+ input_->SetRegister(i, 0);
+ }
+
+ // TODO(all): Do we also need to set a value to csp?
+ input_->SetRegister(jssp.code(), reinterpret_cast<intptr_t>(frame->sp()));
+ input_->SetRegister(fp.code(), reinterpret_cast<intptr_t>(frame->fp()));
+
+ for (int i = 0; i < DoubleRegister::NumAllocatableRegisters(); i++) {
+ input_->SetDoubleRegister(i, 0.0);
+ }
+
+ // Fill the frame content from the actual data on the frame.
+ for (unsigned i = 0; i < input_->GetFrameSize(); i += kPointerSize) {
+ input_->SetFrameSlot(i, Memory::uint64_at(tos + i));
+ }
+}
+
+
+bool Deoptimizer::HasAlignmentPadding(JSFunction* function) {
+ // There is no dynamic alignment padding on ARM64 in the input frame.
+ return false;
+}
+
+
+void Deoptimizer::SetPlatformCompiledStubRegisters(
+ FrameDescription* output_frame, CodeStubInterfaceDescriptor* descriptor) {
+ ApiFunction function(descriptor->deoptimization_handler_);
+ ExternalReference xref(&function, ExternalReference::BUILTIN_CALL, isolate_);
+ intptr_t handler = reinterpret_cast<intptr_t>(xref.address());
+ int params = descriptor->GetHandlerParameterCount();
+ output_frame->SetRegister(x0.code(), params);
+ output_frame->SetRegister(x1.code(), handler);
+}
+
+
+void Deoptimizer::CopyDoubleRegisters(FrameDescription* output_frame) {
+ for (int i = 0; i < DoubleRegister::kMaxNumRegisters; ++i) {
+ double double_value = input_->GetDoubleRegister(i);
+ output_frame->SetDoubleRegister(i, double_value);
+ }
+}
+
+
+#define __ masm->
+
+static void CopyRegisterDumpToFrame(MacroAssembler* masm,
+ Register frame,
+ CPURegList reg_list,
+ Register scratch1,
+ Register scratch2,
+ int src_offset,
+ int dst_offset) {
+ int offset0, offset1;
+ CPURegList copy_to_input = reg_list;
+ int reg_count = reg_list.Count();
+ int reg_size = reg_list.RegisterSizeInBytes();
+ for (int i = 0; i < (reg_count / 2); i++) {
+ __ PeekPair(scratch1, scratch2, src_offset + (i * reg_size * 2));
+
+ offset0 = (copy_to_input.PopLowestIndex().code() * reg_size) + dst_offset;
+ offset1 = (copy_to_input.PopLowestIndex().code() * reg_size) + dst_offset;
+
+ if ((offset0 + reg_size) == offset1) {
+ // Registers are adjacent: store in pairs.
+ __ Stp(scratch1, scratch2, MemOperand(frame, offset0));
+ } else {
+ // Registers are not adjacent: store individually.
+ __ Str(scratch1, MemOperand(frame, offset0));
+ __ Str(scratch2, MemOperand(frame, offset1));
+ }
+ }
+ if ((reg_count & 1) != 0) {
+ __ Peek(scratch1, src_offset + (reg_count - 1) * reg_size);
+ offset0 = (copy_to_input.PopLowestIndex().code() * reg_size) + dst_offset;
+ __ Str(scratch1, MemOperand(frame, offset0));
+ }
+}
+
+#undef __
+
+#define __ masm()->
+
+void Deoptimizer::EntryGenerator::Generate() {
+ GeneratePrologue();
+
+ // TODO(all): This code needs to be revisited. We probably only need to save
+ // caller-saved registers here. Callee-saved registers can be stored directly
+ // in the input frame.
+
+ // Save all allocatable floating point registers.
+ CPURegList saved_fp_registers(CPURegister::kFPRegister, kDRegSizeInBits,
+ FPRegister::kAllocatableFPRegisters);
+ __ PushCPURegList(saved_fp_registers);
+
+ // We save all the registers expcept jssp, sp and lr.
+ CPURegList saved_registers(CPURegister::kRegister, kXRegSizeInBits, 0, 27);
+ saved_registers.Combine(fp);
+ __ PushCPURegList(saved_registers);
+
+ const int kSavedRegistersAreaSize =
+ (saved_registers.Count() * kXRegSize) +
+ (saved_fp_registers.Count() * kDRegSize);
+
+ // Floating point registers are saved on the stack above core registers.
+ const int kFPRegistersOffset = saved_registers.Count() * kXRegSize;
+
+ // Get the bailout id from the stack.
+ Register bailout_id = x2;
+ __ Peek(bailout_id, kSavedRegistersAreaSize);
+
+ Register code_object = x3;
+ Register fp_to_sp = x4;
+ // Get the address of the location in the code object. This is the return
+ // address for lazy deoptimization.
+ __ Mov(code_object, lr);
+ // Compute the fp-to-sp delta, and correct one word for bailout id.
+ __ Add(fp_to_sp, masm()->StackPointer(),
+ kSavedRegistersAreaSize + (1 * kPointerSize));
+ __ Sub(fp_to_sp, fp, fp_to_sp);
+
+ // Allocate a new deoptimizer object.
+ __ Ldr(x0, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+ __ Mov(x1, type());
+ // Following arguments are already loaded:
+ // - x2: bailout id
+ // - x3: code object address
+ // - x4: fp-to-sp delta
+ __ Mov(x5, ExternalReference::isolate_address(isolate()));
+
+ {
+ // Call Deoptimizer::New().
+ AllowExternalCallThatCantCauseGC scope(masm());
+ __ CallCFunction(ExternalReference::new_deoptimizer_function(isolate()), 6);
+ }
+
+ // Preserve "deoptimizer" object in register x0.
+ Register deoptimizer = x0;
+
+ // Get the input frame descriptor pointer.
+ __ Ldr(x1, MemOperand(deoptimizer, Deoptimizer::input_offset()));
+
+ // Copy core registers into the input frame.
+ CopyRegisterDumpToFrame(masm(), x1, saved_registers, x2, x4, 0,
+ FrameDescription::registers_offset());
+
+ // Copy FP registers to the input frame.
+ CopyRegisterDumpToFrame(masm(), x1, saved_fp_registers, x2, x4,
+ kFPRegistersOffset,
+ FrameDescription::double_registers_offset());
+
+ // Remove the bailout id and the saved registers from the stack.
+ __ Drop(1 + (kSavedRegistersAreaSize / kXRegSize));
+
+ // Compute a pointer to the unwinding limit in register x2; that is
+ // the first stack slot not part of the input frame.
+ Register unwind_limit = x2;
+ __ Ldr(unwind_limit, MemOperand(x1, FrameDescription::frame_size_offset()));
+ __ Add(unwind_limit, unwind_limit, __ StackPointer());
+
+ // Unwind the stack down to - but not including - the unwinding
+ // limit and copy the contents of the activation frame to the input
+ // frame description.
+ __ Add(x3, x1, FrameDescription::frame_content_offset());
+ Label pop_loop;
+ Label pop_loop_header;
+ __ B(&pop_loop_header);
+ __ Bind(&pop_loop);
+ __ Pop(x4);
+ __ Str(x4, MemOperand(x3, kPointerSize, PostIndex));
+ __ Bind(&pop_loop_header);
+ __ Cmp(unwind_limit, __ StackPointer());
+ __ B(ne, &pop_loop);
+
+ // Compute the output frame in the deoptimizer.
+ __ Push(x0); // Preserve deoptimizer object across call.
+
+ {
+ // Call Deoptimizer::ComputeOutputFrames().
+ AllowExternalCallThatCantCauseGC scope(masm());
+ __ CallCFunction(
+ ExternalReference::compute_output_frames_function(isolate()), 1);
+ }
+ __ Pop(x4); // Restore deoptimizer object (class Deoptimizer).
+
+ // Replace the current (input) frame with the output frames.
+ Label outer_push_loop, inner_push_loop,
+ outer_loop_header, inner_loop_header;
+ __ Ldrsw(x1, MemOperand(x4, Deoptimizer::output_count_offset()));
+ __ Ldr(x0, MemOperand(x4, Deoptimizer::output_offset()));
+ __ Add(x1, x0, Operand(x1, LSL, kPointerSizeLog2));
+ __ B(&outer_loop_header);
+
+ __ Bind(&outer_push_loop);
+ Register current_frame = x2;
+ __ Ldr(current_frame, MemOperand(x0, 0));
+ __ Ldr(x3, MemOperand(current_frame, FrameDescription::frame_size_offset()));
+ __ B(&inner_loop_header);
+
+ __ Bind(&inner_push_loop);
+ __ Sub(x3, x3, kPointerSize);
+ __ Add(x6, current_frame, x3);
+ __ Ldr(x7, MemOperand(x6, FrameDescription::frame_content_offset()));
+ __ Push(x7);
+ __ Bind(&inner_loop_header);
+ __ Cbnz(x3, &inner_push_loop);
+
+ __ Add(x0, x0, kPointerSize);
+ __ Bind(&outer_loop_header);
+ __ Cmp(x0, x1);
+ __ B(lt, &outer_push_loop);
+
+ __ Ldr(x1, MemOperand(x4, Deoptimizer::input_offset()));
+ ASSERT(!saved_fp_registers.IncludesAliasOf(crankshaft_fp_scratch) &&
+ !saved_fp_registers.IncludesAliasOf(fp_zero) &&
+ !saved_fp_registers.IncludesAliasOf(fp_scratch));
+ int src_offset = FrameDescription::double_registers_offset();
+ while (!saved_fp_registers.IsEmpty()) {
+ const CPURegister reg = saved_fp_registers.PopLowestIndex();
+ __ Ldr(reg, MemOperand(x1, src_offset));
+ src_offset += kDoubleSize;
+ }
+
+ // Push state from the last output frame.
+ __ Ldr(x6, MemOperand(current_frame, FrameDescription::state_offset()));
+ __ Push(x6);
+
+ // TODO(all): ARM copies a lot (if not all) of the last output frame onto the
+ // stack, then pops it all into registers. Here, we try to load it directly
+ // into the relevant registers. Is this correct? If so, we should improve the
+ // ARM code.
+
+ // TODO(all): This code needs to be revisited, We probably don't need to
+ // restore all the registers as fullcodegen does not keep live values in
+ // registers (note that at least fp must be restored though).
+
+ // Restore registers from the last output frame.
+ // Note that lr is not in the list of saved_registers and will be restored
+ // later. We can use it to hold the address of last output frame while
+ // reloading the other registers.
+ ASSERT(!saved_registers.IncludesAliasOf(lr));
+ Register last_output_frame = lr;
+ __ Mov(last_output_frame, current_frame);
+
+ // We don't need to restore x7 as it will be clobbered later to hold the
+ // continuation address.
+ Register continuation = x7;
+ saved_registers.Remove(continuation);
+
+ while (!saved_registers.IsEmpty()) {
+ // TODO(all): Look for opportunities to optimize this by using ldp.
+ CPURegister current_reg = saved_registers.PopLowestIndex();
+ int offset = (current_reg.code() * kPointerSize) +
+ FrameDescription::registers_offset();
+ __ Ldr(current_reg, MemOperand(last_output_frame, offset));
+ }
+
+ __ Ldr(continuation, MemOperand(last_output_frame,
+ FrameDescription::continuation_offset()));
+ __ Ldr(lr, MemOperand(last_output_frame, FrameDescription::pc_offset()));
+ __ InitializeRootRegister();
+ __ Br(continuation);
+}
+
+
+// Size of an entry of the second level deopt table.
+// This is the code size generated by GeneratePrologue for one entry.
+const int Deoptimizer::table_entry_size_ = 2 * kInstructionSize;
+
+
+void Deoptimizer::TableEntryGenerator::GeneratePrologue() {
+ UseScratchRegisterScope temps(masm());
+ Register entry_id = temps.AcquireX();
+
+ // Create a sequence of deoptimization entries.
+ // Note that registers are still live when jumping to an entry.
+ Label done;
+ {
+ InstructionAccurateScope scope(masm());
+
+ // The number of entry will never exceed kMaxNumberOfEntries.
+ // As long as kMaxNumberOfEntries is a valid 16 bits immediate you can use
+ // a movz instruction to load the entry id.
+ ASSERT(is_uint16(Deoptimizer::kMaxNumberOfEntries));
+
+ for (int i = 0; i < count(); i++) {
+ int start = masm()->pc_offset();
+ USE(start);
+ __ movz(entry_id, i);
+ __ b(&done);
+ ASSERT(masm()->pc_offset() - start == table_entry_size_);
+ }
+ }
+ __ Bind(&done);
+ __ Push(entry_id);
+}
+
+
+void FrameDescription::SetCallerPc(unsigned offset, intptr_t value) {
+ SetFrameSlot(offset, value);
+}
+
+
+void FrameDescription::SetCallerFp(unsigned offset, intptr_t value) {
+ SetFrameSlot(offset, value);
+}
+
+
+void FrameDescription::SetCallerConstantPool(unsigned offset, intptr_t value) {
+ // No out-of-line constant pool support.
+ UNREACHABLE();
+}
+
+
+#undef __
+
+} } // namespace v8::internal
diff --git a/chromium/v8/src/arm64/disasm-arm64.cc b/chromium/v8/src/arm64/disasm-arm64.cc
new file mode 100644
index 00000000000..e6a30b477e8
--- /dev/null
+++ b/chromium/v8/src/arm64/disasm-arm64.cc
@@ -0,0 +1,1832 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <assert.h>
+#include <stdio.h>
+#include <stdarg.h>
+#include <string.h>
+
+#include "src/v8.h"
+
+#if V8_TARGET_ARCH_ARM64
+
+#include "src/disasm.h"
+#include "src/arm64/decoder-arm64-inl.h"
+#include "src/arm64/disasm-arm64.h"
+#include "src/macro-assembler.h"
+#include "src/platform.h"
+
+namespace v8 {
+namespace internal {
+
+
+Disassembler::Disassembler() {
+ buffer_size_ = 256;
+ buffer_ = reinterpret_cast<char*>(malloc(buffer_size_));
+ buffer_pos_ = 0;
+ own_buffer_ = true;
+}
+
+
+Disassembler::Disassembler(char* text_buffer, int buffer_size) {
+ buffer_size_ = buffer_size;
+ buffer_ = text_buffer;
+ buffer_pos_ = 0;
+ own_buffer_ = false;
+}
+
+
+Disassembler::~Disassembler() {
+ if (own_buffer_) {
+ free(buffer_);
+ }
+}
+
+
+char* Disassembler::GetOutput() {
+ return buffer_;
+}
+
+
+void Disassembler::VisitAddSubImmediate(Instruction* instr) {
+ bool rd_is_zr = RdIsZROrSP(instr);
+ bool stack_op = (rd_is_zr || RnIsZROrSP(instr)) &&
+ (instr->ImmAddSub() == 0) ? true : false;
+ const char *mnemonic = "";
+ const char *form = "'Rds, 'Rns, 'IAddSub";
+ const char *form_cmp = "'Rns, 'IAddSub";
+ const char *form_mov = "'Rds, 'Rns";
+
+ switch (instr->Mask(AddSubImmediateMask)) {
+ case ADD_w_imm:
+ case ADD_x_imm: {
+ mnemonic = "add";
+ if (stack_op) {
+ mnemonic = "mov";
+ form = form_mov;
+ }
+ break;
+ }
+ case ADDS_w_imm:
+ case ADDS_x_imm: {
+ mnemonic = "adds";
+ if (rd_is_zr) {
+ mnemonic = "cmn";
+ form = form_cmp;
+ }
+ break;
+ }
+ case SUB_w_imm:
+ case SUB_x_imm: mnemonic = "sub"; break;
+ case SUBS_w_imm:
+ case SUBS_x_imm: {
+ mnemonic = "subs";
+ if (rd_is_zr) {
+ mnemonic = "cmp";
+ form = form_cmp;
+ }
+ break;
+ }
+ default: UNREACHABLE();
+ }
+ Format(instr, mnemonic, form);
+}
+
+
+void Disassembler::VisitAddSubShifted(Instruction* instr) {
+ bool rd_is_zr = RdIsZROrSP(instr);
+ bool rn_is_zr = RnIsZROrSP(instr);
+ const char *mnemonic = "";
+ const char *form = "'Rd, 'Rn, 'Rm'HDP";
+ const char *form_cmp = "'Rn, 'Rm'HDP";
+ const char *form_neg = "'Rd, 'Rm'HDP";
+
+ switch (instr->Mask(AddSubShiftedMask)) {
+ case ADD_w_shift:
+ case ADD_x_shift: mnemonic = "add"; break;
+ case ADDS_w_shift:
+ case ADDS_x_shift: {
+ mnemonic = "adds";
+ if (rd_is_zr) {
+ mnemonic = "cmn";
+ form = form_cmp;
+ }
+ break;
+ }
+ case SUB_w_shift:
+ case SUB_x_shift: {
+ mnemonic = "sub";
+ if (rn_is_zr) {
+ mnemonic = "neg";
+ form = form_neg;
+ }
+ break;
+ }
+ case SUBS_w_shift:
+ case SUBS_x_shift: {
+ mnemonic = "subs";
+ if (rd_is_zr) {
+ mnemonic = "cmp";
+ form = form_cmp;
+ } else if (rn_is_zr) {
+ mnemonic = "negs";
+ form = form_neg;
+ }
+ break;
+ }
+ default: UNREACHABLE();
+ }
+ Format(instr, mnemonic, form);
+}
+
+
+void Disassembler::VisitAddSubExtended(Instruction* instr) {
+ bool rd_is_zr = RdIsZROrSP(instr);
+ const char *mnemonic = "";
+ Extend mode = static_cast<Extend>(instr->ExtendMode());
+ const char *form = ((mode == UXTX) || (mode == SXTX)) ?
+ "'Rds, 'Rns, 'Xm'Ext" : "'Rds, 'Rns, 'Wm'Ext";
+ const char *form_cmp = ((mode == UXTX) || (mode == SXTX)) ?
+ "'Rns, 'Xm'Ext" : "'Rns, 'Wm'Ext";
+
+ switch (instr->Mask(AddSubExtendedMask)) {
+ case ADD_w_ext:
+ case ADD_x_ext: mnemonic = "add"; break;
+ case ADDS_w_ext:
+ case ADDS_x_ext: {
+ mnemonic = "adds";
+ if (rd_is_zr) {
+ mnemonic = "cmn";
+ form = form_cmp;
+ }
+ break;
+ }
+ case SUB_w_ext:
+ case SUB_x_ext: mnemonic = "sub"; break;
+ case SUBS_w_ext:
+ case SUBS_x_ext: {
+ mnemonic = "subs";
+ if (rd_is_zr) {
+ mnemonic = "cmp";
+ form = form_cmp;
+ }
+ break;
+ }
+ default: UNREACHABLE();
+ }
+ Format(instr, mnemonic, form);
+}
+
+
+void Disassembler::VisitAddSubWithCarry(Instruction* instr) {
+ bool rn_is_zr = RnIsZROrSP(instr);
+ const char *mnemonic = "";
+ const char *form = "'Rd, 'Rn, 'Rm";
+ const char *form_neg = "'Rd, 'Rm";
+
+ switch (instr->Mask(AddSubWithCarryMask)) {
+ case ADC_w:
+ case ADC_x: mnemonic = "adc"; break;
+ case ADCS_w:
+ case ADCS_x: mnemonic = "adcs"; break;
+ case SBC_w:
+ case SBC_x: {
+ mnemonic = "sbc";
+ if (rn_is_zr) {
+ mnemonic = "ngc";
+ form = form_neg;
+ }
+ break;
+ }
+ case SBCS_w:
+ case SBCS_x: {
+ mnemonic = "sbcs";
+ if (rn_is_zr) {
+ mnemonic = "ngcs";
+ form = form_neg;
+ }
+ break;
+ }
+ default: UNREACHABLE();
+ }
+ Format(instr, mnemonic, form);
+}
+
+
+void Disassembler::VisitLogicalImmediate(Instruction* instr) {
+ bool rd_is_zr = RdIsZROrSP(instr);
+ bool rn_is_zr = RnIsZROrSP(instr);
+ const char *mnemonic = "";
+ const char *form = "'Rds, 'Rn, 'ITri";
+
+ if (instr->ImmLogical() == 0) {
+ // The immediate encoded in the instruction is not in the expected format.
+ Format(instr, "unallocated", "(LogicalImmediate)");
+ return;
+ }
+
+ switch (instr->Mask(LogicalImmediateMask)) {
+ case AND_w_imm:
+ case AND_x_imm: mnemonic = "and"; break;
+ case ORR_w_imm:
+ case ORR_x_imm: {
+ mnemonic = "orr";
+ unsigned reg_size = (instr->SixtyFourBits() == 1) ? kXRegSizeInBits
+ : kWRegSizeInBits;
+ if (rn_is_zr && !IsMovzMovnImm(reg_size, instr->ImmLogical())) {
+ mnemonic = "mov";
+ form = "'Rds, 'ITri";
+ }
+ break;
+ }
+ case EOR_w_imm:
+ case EOR_x_imm: mnemonic = "eor"; break;
+ case ANDS_w_imm:
+ case ANDS_x_imm: {
+ mnemonic = "ands";
+ if (rd_is_zr) {
+ mnemonic = "tst";
+ form = "'Rn, 'ITri";
+ }
+ break;
+ }
+ default: UNREACHABLE();
+ }
+ Format(instr, mnemonic, form);
+}
+
+
+bool Disassembler::IsMovzMovnImm(unsigned reg_size, uint64_t value) {
+ ASSERT((reg_size == kXRegSizeInBits) ||
+ ((reg_size == kWRegSizeInBits) && (value <= 0xffffffff)));
+
+ // Test for movz: 16-bits set at positions 0, 16, 32 or 48.
+ if (((value & 0xffffffffffff0000UL) == 0UL) ||
+ ((value & 0xffffffff0000ffffUL) == 0UL) ||
+ ((value & 0xffff0000ffffffffUL) == 0UL) ||
+ ((value & 0x0000ffffffffffffUL) == 0UL)) {
+ return true;
+ }
+
+ // Test for movn: NOT(16-bits set at positions 0, 16, 32 or 48).
+ if ((reg_size == kXRegSizeInBits) &&
+ (((value & 0xffffffffffff0000UL) == 0xffffffffffff0000UL) ||
+ ((value & 0xffffffff0000ffffUL) == 0xffffffff0000ffffUL) ||
+ ((value & 0xffff0000ffffffffUL) == 0xffff0000ffffffffUL) ||
+ ((value & 0x0000ffffffffffffUL) == 0x0000ffffffffffffUL))) {
+ return true;
+ }
+ if ((reg_size == kWRegSizeInBits) &&
+ (((value & 0xffff0000) == 0xffff0000) ||
+ ((value & 0x0000ffff) == 0x0000ffff))) {
+ return true;
+ }
+ return false;
+}
+
+
+void Disassembler::VisitLogicalShifted(Instruction* instr) {
+ bool rd_is_zr = RdIsZROrSP(instr);
+ bool rn_is_zr = RnIsZROrSP(instr);
+ const char *mnemonic = "";
+ const char *form = "'Rd, 'Rn, 'Rm'HLo";
+
+ switch (instr->Mask(LogicalShiftedMask)) {
+ case AND_w:
+ case AND_x: mnemonic = "and"; break;
+ case BIC_w:
+ case BIC_x: mnemonic = "bic"; break;
+ case EOR_w:
+ case EOR_x: mnemonic = "eor"; break;
+ case EON_w:
+ case EON_x: mnemonic = "eon"; break;
+ case BICS_w:
+ case BICS_x: mnemonic = "bics"; break;
+ case ANDS_w:
+ case ANDS_x: {
+ mnemonic = "ands";
+ if (rd_is_zr) {
+ mnemonic = "tst";
+ form = "'Rn, 'Rm'HLo";
+ }
+ break;
+ }
+ case ORR_w:
+ case ORR_x: {
+ mnemonic = "orr";
+ if (rn_is_zr && (instr->ImmDPShift() == 0) && (instr->ShiftDP() == LSL)) {
+ mnemonic = "mov";
+ form = "'Rd, 'Rm";
+ }
+ break;
+ }
+ case ORN_w:
+ case ORN_x: {
+ mnemonic = "orn";
+ if (rn_is_zr) {
+ mnemonic = "mvn";
+ form = "'Rd, 'Rm'HLo";
+ }
+ break;
+ }
+ default: UNREACHABLE();
+ }
+
+ Format(instr, mnemonic, form);
+}
+
+
+void Disassembler::VisitConditionalCompareRegister(Instruction* instr) {
+ const char *mnemonic = "";
+ const char *form = "'Rn, 'Rm, 'INzcv, 'Cond";
+
+ switch (instr->Mask(ConditionalCompareRegisterMask)) {
+ case CCMN_w:
+ case CCMN_x: mnemonic = "ccmn"; break;
+ case CCMP_w:
+ case CCMP_x: mnemonic = "ccmp"; break;
+ default: UNREACHABLE();
+ }
+ Format(instr, mnemonic, form);
+}
+
+
+void Disassembler::VisitConditionalCompareImmediate(Instruction* instr) {
+ const char *mnemonic = "";
+ const char *form = "'Rn, 'IP, 'INzcv, 'Cond";
+
+ switch (instr->Mask(ConditionalCompareImmediateMask)) {
+ case CCMN_w_imm:
+ case CCMN_x_imm: mnemonic = "ccmn"; break;
+ case CCMP_w_imm:
+ case CCMP_x_imm: mnemonic = "ccmp"; break;
+ default: UNREACHABLE();
+ }
+ Format(instr, mnemonic, form);
+}
+
+
+void Disassembler::VisitConditionalSelect(Instruction* instr) {
+ bool rnm_is_zr = (RnIsZROrSP(instr) && RmIsZROrSP(instr));
+ bool rn_is_rm = (instr->Rn() == instr->Rm());
+ const char *mnemonic = "";
+ const char *form = "'Rd, 'Rn, 'Rm, 'Cond";
+ const char *form_test = "'Rd, 'CInv";
+ const char *form_update = "'Rd, 'Rn, 'CInv";
+
+ Condition cond = static_cast<Condition>(instr->Condition());
+ bool invertible_cond = (cond != al) && (cond != nv);
+
+ switch (instr->Mask(ConditionalSelectMask)) {
+ case CSEL_w:
+ case CSEL_x: mnemonic = "csel"; break;
+ case CSINC_w:
+ case CSINC_x: {
+ mnemonic = "csinc";
+ if (rnm_is_zr && invertible_cond) {
+ mnemonic = "cset";
+ form = form_test;
+ } else if (rn_is_rm && invertible_cond) {
+ mnemonic = "cinc";
+ form = form_update;
+ }
+ break;
+ }
+ case CSINV_w:
+ case CSINV_x: {
+ mnemonic = "csinv";
+ if (rnm_is_zr && invertible_cond) {
+ mnemonic = "csetm";
+ form = form_test;
+ } else if (rn_is_rm && invertible_cond) {
+ mnemonic = "cinv";
+ form = form_update;
+ }
+ break;
+ }
+ case CSNEG_w:
+ case CSNEG_x: {
+ mnemonic = "csneg";
+ if (rn_is_rm && invertible_cond) {
+ mnemonic = "cneg";
+ form = form_update;
+ }
+ break;
+ }
+ default: UNREACHABLE();
+ }
+ Format(instr, mnemonic, form);
+}
+
+
+void Disassembler::VisitBitfield(Instruction* instr) {
+ unsigned s = instr->ImmS();
+ unsigned r = instr->ImmR();
+ unsigned rd_size_minus_1 =
+ ((instr->SixtyFourBits() == 1) ? kXRegSizeInBits : kWRegSizeInBits) - 1;
+ const char *mnemonic = "";
+ const char *form = "";
+ const char *form_shift_right = "'Rd, 'Rn, 'IBr";
+ const char *form_extend = "'Rd, 'Wn";
+ const char *form_bfiz = "'Rd, 'Rn, 'IBZ-r, 'IBs+1";
+ const char *form_bfx = "'Rd, 'Rn, 'IBr, 'IBs-r+1";
+ const char *form_lsl = "'Rd, 'Rn, 'IBZ-r";
+
+ switch (instr->Mask(BitfieldMask)) {
+ case SBFM_w:
+ case SBFM_x: {
+ mnemonic = "sbfx";
+ form = form_bfx;
+ if (r == 0) {
+ form = form_extend;
+ if (s == 7) {
+ mnemonic = "sxtb";
+ } else if (s == 15) {
+ mnemonic = "sxth";
+ } else if ((s == 31) && (instr->SixtyFourBits() == 1)) {
+ mnemonic = "sxtw";
+ } else {
+ form = form_bfx;
+ }
+ } else if (s == rd_size_minus_1) {
+ mnemonic = "asr";
+ form = form_shift_right;
+ } else if (s < r) {
+ mnemonic = "sbfiz";
+ form = form_bfiz;
+ }
+ break;
+ }
+ case UBFM_w:
+ case UBFM_x: {
+ mnemonic = "ubfx";
+ form = form_bfx;
+ if (r == 0) {
+ form = form_extend;
+ if (s == 7) {
+ mnemonic = "uxtb";
+ } else if (s == 15) {
+ mnemonic = "uxth";
+ } else {
+ form = form_bfx;
+ }
+ }
+ if (s == rd_size_minus_1) {
+ mnemonic = "lsr";
+ form = form_shift_right;
+ } else if (r == s + 1) {
+ mnemonic = "lsl";
+ form = form_lsl;
+ } else if (s < r) {
+ mnemonic = "ubfiz";
+ form = form_bfiz;
+ }
+ break;
+ }
+ case BFM_w:
+ case BFM_x: {
+ mnemonic = "bfxil";
+ form = form_bfx;
+ if (s < r) {
+ mnemonic = "bfi";
+ form = form_bfiz;
+ }
+ }
+ }
+ Format(instr, mnemonic, form);
+}
+
+
+void Disassembler::VisitExtract(Instruction* instr) {
+ const char *mnemonic = "";
+ const char *form = "'Rd, 'Rn, 'Rm, 'IExtract";
+
+ switch (instr->Mask(ExtractMask)) {
+ case EXTR_w:
+ case EXTR_x: {
+ if (instr->Rn() == instr->Rm()) {
+ mnemonic = "ror";
+ form = "'Rd, 'Rn, 'IExtract";
+ } else {
+ mnemonic = "extr";
+ }
+ break;
+ }
+ default: UNREACHABLE();
+ }
+ Format(instr, mnemonic, form);
+}
+
+
+void Disassembler::VisitPCRelAddressing(Instruction* instr) {
+ switch (instr->Mask(PCRelAddressingMask)) {
+ case ADR: Format(instr, "adr", "'Xd, 'AddrPCRelByte"); break;
+ // ADRP is not implemented.
+ default: Format(instr, "unimplemented", "(PCRelAddressing)");
+ }
+}
+
+
+void Disassembler::VisitConditionalBranch(Instruction* instr) {
+ switch (instr->Mask(ConditionalBranchMask)) {
+ case B_cond: Format(instr, "b.'CBrn", "'BImmCond"); break;
+ default: UNREACHABLE();
+ }
+}
+
+
+void Disassembler::VisitUnconditionalBranchToRegister(Instruction* instr) {
+ const char *mnemonic = "unimplemented";
+ const char *form = "'Xn";
+
+ switch (instr->Mask(UnconditionalBranchToRegisterMask)) {
+ case BR: mnemonic = "br"; break;
+ case BLR: mnemonic = "blr"; break;
+ case RET: {
+ mnemonic = "ret";
+ if (instr->Rn() == kLinkRegCode) {
+ form = NULL;
+ }
+ break;
+ }
+ default: form = "(UnconditionalBranchToRegister)";
+ }
+ Format(instr, mnemonic, form);
+}
+
+
+void Disassembler::VisitUnconditionalBranch(Instruction* instr) {
+ const char *mnemonic = "";
+ const char *form = "'BImmUncn";
+
+ switch (instr->Mask(UnconditionalBranchMask)) {
+ case B: mnemonic = "b"; break;
+ case BL: mnemonic = "bl"; break;
+ default: UNREACHABLE();
+ }
+ Format(instr, mnemonic, form);
+}
+
+
+void Disassembler::VisitDataProcessing1Source(Instruction* instr) {
+ const char *mnemonic = "";
+ const char *form = "'Rd, 'Rn";
+
+ switch (instr->Mask(DataProcessing1SourceMask)) {
+ #define FORMAT(A, B) \
+ case A##_w: \
+ case A##_x: mnemonic = B; break;
+ FORMAT(RBIT, "rbit");
+ FORMAT(REV16, "rev16");
+ FORMAT(REV, "rev");
+ FORMAT(CLZ, "clz");
+ FORMAT(CLS, "cls");
+ #undef FORMAT
+ case REV32_x: mnemonic = "rev32"; break;
+ default: UNREACHABLE();
+ }
+ Format(instr, mnemonic, form);
+}
+
+
+void Disassembler::VisitDataProcessing2Source(Instruction* instr) {
+ const char *mnemonic = "unimplemented";
+ const char *form = "'Rd, 'Rn, 'Rm";
+
+ switch (instr->Mask(DataProcessing2SourceMask)) {
+ #define FORMAT(A, B) \
+ case A##_w: \
+ case A##_x: mnemonic = B; break;
+ FORMAT(UDIV, "udiv");
+ FORMAT(SDIV, "sdiv");
+ FORMAT(LSLV, "lsl");
+ FORMAT(LSRV, "lsr");
+ FORMAT(ASRV, "asr");
+ FORMAT(RORV, "ror");
+ #undef FORMAT
+ default: form = "(DataProcessing2Source)";
+ }
+ Format(instr, mnemonic, form);
+}
+
+
+void Disassembler::VisitDataProcessing3Source(Instruction* instr) {
+ bool ra_is_zr = RaIsZROrSP(instr);
+ const char *mnemonic = "";
+ const char *form = "'Xd, 'Wn, 'Wm, 'Xa";
+ const char *form_rrr = "'Rd, 'Rn, 'Rm";
+ const char *form_rrrr = "'Rd, 'Rn, 'Rm, 'Ra";
+ const char *form_xww = "'Xd, 'Wn, 'Wm";
+ const char *form_xxx = "'Xd, 'Xn, 'Xm";
+
+ switch (instr->Mask(DataProcessing3SourceMask)) {
+ case MADD_w:
+ case MADD_x: {
+ mnemonic = "madd";
+ form = form_rrrr;
+ if (ra_is_zr) {
+ mnemonic = "mul";
+ form = form_rrr;
+ }
+ break;
+ }
+ case MSUB_w:
+ case MSUB_x: {
+ mnemonic = "msub";
+ form = form_rrrr;
+ if (ra_is_zr) {
+ mnemonic = "mneg";
+ form = form_rrr;
+ }
+ break;
+ }
+ case SMADDL_x: {
+ mnemonic = "smaddl";
+ if (ra_is_zr) {
+ mnemonic = "smull";
+ form = form_xww;
+ }
+ break;
+ }
+ case SMSUBL_x: {
+ mnemonic = "smsubl";
+ if (ra_is_zr) {
+ mnemonic = "smnegl";
+ form = form_xww;
+ }
+ break;
+ }
+ case UMADDL_x: {
+ mnemonic = "umaddl";
+ if (ra_is_zr) {
+ mnemonic = "umull";
+ form = form_xww;
+ }
+ break;
+ }
+ case UMSUBL_x: {
+ mnemonic = "umsubl";
+ if (ra_is_zr) {
+ mnemonic = "umnegl";
+ form = form_xww;
+ }
+ break;
+ }
+ case SMULH_x: {
+ mnemonic = "smulh";
+ form = form_xxx;
+ break;
+ }
+ case UMULH_x: {
+ mnemonic = "umulh";
+ form = form_xxx;
+ break;
+ }
+ default: UNREACHABLE();
+ }
+ Format(instr, mnemonic, form);
+}
+
+
+void Disassembler::VisitCompareBranch(Instruction* instr) {
+ const char *mnemonic = "";
+ const char *form = "'Rt, 'BImmCmpa";
+
+ switch (instr->Mask(CompareBranchMask)) {
+ case CBZ_w:
+ case CBZ_x: mnemonic = "cbz"; break;
+ case CBNZ_w:
+ case CBNZ_x: mnemonic = "cbnz"; break;
+ default: UNREACHABLE();
+ }
+ Format(instr, mnemonic, form);
+}
+
+
+void Disassembler::VisitTestBranch(Instruction* instr) {
+ const char *mnemonic = "";
+ // If the top bit of the immediate is clear, the tested register is
+ // disassembled as Wt, otherwise Xt. As the top bit of the immediate is
+ // encoded in bit 31 of the instruction, we can reuse the Rt form, which
+ // uses bit 31 (normally "sf") to choose the register size.
+ const char *form = "'Rt, 'IS, 'BImmTest";
+
+ switch (instr->Mask(TestBranchMask)) {
+ case TBZ: mnemonic = "tbz"; break;
+ case TBNZ: mnemonic = "tbnz"; break;
+ default: UNREACHABLE();
+ }
+ Format(instr, mnemonic, form);
+}
+
+
+void Disassembler::VisitMoveWideImmediate(Instruction* instr) {
+ const char *mnemonic = "";
+ const char *form = "'Rd, 'IMoveImm";
+
+ // Print the shift separately for movk, to make it clear which half word will
+ // be overwritten. Movn and movz print the computed immediate, which includes
+ // shift calculation.
+ switch (instr->Mask(MoveWideImmediateMask)) {
+ case MOVN_w:
+ case MOVN_x: mnemonic = "movn"; break;
+ case MOVZ_w:
+ case MOVZ_x: mnemonic = "movz"; break;
+ case MOVK_w:
+ case MOVK_x: mnemonic = "movk"; form = "'Rd, 'IMoveLSL"; break;
+ default: UNREACHABLE();
+ }
+ Format(instr, mnemonic, form);
+}
+
+
+#define LOAD_STORE_LIST(V) \
+ V(STRB_w, "strb", "'Wt") \
+ V(STRH_w, "strh", "'Wt") \
+ V(STR_w, "str", "'Wt") \
+ V(STR_x, "str", "'Xt") \
+ V(LDRB_w, "ldrb", "'Wt") \
+ V(LDRH_w, "ldrh", "'Wt") \
+ V(LDR_w, "ldr", "'Wt") \
+ V(LDR_x, "ldr", "'Xt") \
+ V(LDRSB_x, "ldrsb", "'Xt") \
+ V(LDRSH_x, "ldrsh", "'Xt") \
+ V(LDRSW_x, "ldrsw", "'Xt") \
+ V(LDRSB_w, "ldrsb", "'Wt") \
+ V(LDRSH_w, "ldrsh", "'Wt") \
+ V(STR_s, "str", "'St") \
+ V(STR_d, "str", "'Dt") \
+ V(LDR_s, "ldr", "'St") \
+ V(LDR_d, "ldr", "'Dt")
+
+void Disassembler::VisitLoadStorePreIndex(Instruction* instr) {
+ const char *mnemonic = "unimplemented";
+ const char *form = "(LoadStorePreIndex)";
+
+ switch (instr->Mask(LoadStorePreIndexMask)) {
+ #define LS_PREINDEX(A, B, C) \
+ case A##_pre: mnemonic = B; form = C ", ['Xns'ILS]!"; break;
+ LOAD_STORE_LIST(LS_PREINDEX)
+ #undef LS_PREINDEX
+ }
+ Format(instr, mnemonic, form);
+}
+
+
+void Disassembler::VisitLoadStorePostIndex(Instruction* instr) {
+ const char *mnemonic = "unimplemented";
+ const char *form = "(LoadStorePostIndex)";
+
+ switch (instr->Mask(LoadStorePostIndexMask)) {
+ #define LS_POSTINDEX(A, B, C) \
+ case A##_post: mnemonic = B; form = C ", ['Xns]'ILS"; break;
+ LOAD_STORE_LIST(LS_POSTINDEX)
+ #undef LS_POSTINDEX
+ }
+ Format(instr, mnemonic, form);
+}
+
+
+void Disassembler::VisitLoadStoreUnsignedOffset(Instruction* instr) {
+ const char *mnemonic = "unimplemented";
+ const char *form = "(LoadStoreUnsignedOffset)";
+
+ switch (instr->Mask(LoadStoreUnsignedOffsetMask)) {
+ #define LS_UNSIGNEDOFFSET(A, B, C) \
+ case A##_unsigned: mnemonic = B; form = C ", ['Xns'ILU]"; break;
+ LOAD_STORE_LIST(LS_UNSIGNEDOFFSET)
+ #undef LS_UNSIGNEDOFFSET
+ case PRFM_unsigned: mnemonic = "prfm"; form = "'PrefOp, ['Xn'ILU]";
+ }
+ Format(instr, mnemonic, form);
+}
+
+
+void Disassembler::VisitLoadStoreRegisterOffset(Instruction* instr) {
+ const char *mnemonic = "unimplemented";
+ const char *form = "(LoadStoreRegisterOffset)";
+
+ switch (instr->Mask(LoadStoreRegisterOffsetMask)) {
+ #define LS_REGISTEROFFSET(A, B, C) \
+ case A##_reg: mnemonic = B; form = C ", ['Xns, 'Offsetreg]"; break;
+ LOAD_STORE_LIST(LS_REGISTEROFFSET)
+ #undef LS_REGISTEROFFSET
+ case PRFM_reg: mnemonic = "prfm"; form = "'PrefOp, ['Xns, 'Offsetreg]";
+ }
+ Format(instr, mnemonic, form);
+}
+
+
+void Disassembler::VisitLoadStoreUnscaledOffset(Instruction* instr) {
+ const char *mnemonic = "unimplemented";
+ const char *form = "'Wt, ['Xns'ILS]";
+ const char *form_x = "'Xt, ['Xns'ILS]";
+ const char *form_s = "'St, ['Xns'ILS]";
+ const char *form_d = "'Dt, ['Xns'ILS]";
+
+ switch (instr->Mask(LoadStoreUnscaledOffsetMask)) {
+ case STURB_w: mnemonic = "sturb"; break;
+ case STURH_w: mnemonic = "sturh"; break;
+ case STUR_w: mnemonic = "stur"; break;
+ case STUR_x: mnemonic = "stur"; form = form_x; break;
+ case STUR_s: mnemonic = "stur"; form = form_s; break;
+ case STUR_d: mnemonic = "stur"; form = form_d; break;
+ case LDURB_w: mnemonic = "ldurb"; break;
+ case LDURH_w: mnemonic = "ldurh"; break;
+ case LDUR_w: mnemonic = "ldur"; break;
+ case LDUR_x: mnemonic = "ldur"; form = form_x; break;
+ case LDUR_s: mnemonic = "ldur"; form = form_s; break;
+ case LDUR_d: mnemonic = "ldur"; form = form_d; break;
+ case LDURSB_x: form = form_x; // Fall through.
+ case LDURSB_w: mnemonic = "ldursb"; break;
+ case LDURSH_x: form = form_x; // Fall through.
+ case LDURSH_w: mnemonic = "ldursh"; break;
+ case LDURSW_x: mnemonic = "ldursw"; form = form_x; break;
+ default: form = "(LoadStoreUnscaledOffset)";
+ }
+ Format(instr, mnemonic, form);
+}
+
+
+void Disassembler::VisitLoadLiteral(Instruction* instr) {
+ const char *mnemonic = "ldr";
+ const char *form = "(LoadLiteral)";
+
+ switch (instr->Mask(LoadLiteralMask)) {
+ case LDR_w_lit: form = "'Wt, 'ILLiteral 'LValue"; break;
+ case LDR_x_lit: form = "'Xt, 'ILLiteral 'LValue"; break;
+ case LDR_s_lit: form = "'St, 'ILLiteral 'LValue"; break;
+ case LDR_d_lit: form = "'Dt, 'ILLiteral 'LValue"; break;
+ default: mnemonic = "unimplemented";
+ }
+ Format(instr, mnemonic, form);
+}
+
+
+#define LOAD_STORE_PAIR_LIST(V) \
+ V(STP_w, "stp", "'Wt, 'Wt2", "4") \
+ V(LDP_w, "ldp", "'Wt, 'Wt2", "4") \
+ V(LDPSW_x, "ldpsw", "'Xt, 'Xt2", "4") \
+ V(STP_x, "stp", "'Xt, 'Xt2", "8") \
+ V(LDP_x, "ldp", "'Xt, 'Xt2", "8") \
+ V(STP_s, "stp", "'St, 'St2", "4") \
+ V(LDP_s, "ldp", "'St, 'St2", "4") \
+ V(STP_d, "stp", "'Dt, 'Dt2", "8") \
+ V(LDP_d, "ldp", "'Dt, 'Dt2", "8")
+
+void Disassembler::VisitLoadStorePairPostIndex(Instruction* instr) {
+ const char *mnemonic = "unimplemented";
+ const char *form = "(LoadStorePairPostIndex)";
+
+ switch (instr->Mask(LoadStorePairPostIndexMask)) {
+ #define LSP_POSTINDEX(A, B, C, D) \
+ case A##_post: mnemonic = B; form = C ", ['Xns]'ILP" D; break;
+ LOAD_STORE_PAIR_LIST(LSP_POSTINDEX)
+ #undef LSP_POSTINDEX
+ }
+ Format(instr, mnemonic, form);
+}
+
+
+void Disassembler::VisitLoadStorePairPreIndex(Instruction* instr) {
+ const char *mnemonic = "unimplemented";
+ const char *form = "(LoadStorePairPreIndex)";
+
+ switch (instr->Mask(LoadStorePairPreIndexMask)) {
+ #define LSP_PREINDEX(A, B, C, D) \
+ case A##_pre: mnemonic = B; form = C ", ['Xns'ILP" D "]!"; break;
+ LOAD_STORE_PAIR_LIST(LSP_PREINDEX)
+ #undef LSP_PREINDEX
+ }
+ Format(instr, mnemonic, form);
+}
+
+
+void Disassembler::VisitLoadStorePairOffset(Instruction* instr) {
+ const char *mnemonic = "unimplemented";
+ const char *form = "(LoadStorePairOffset)";
+
+ switch (instr->Mask(LoadStorePairOffsetMask)) {
+ #define LSP_OFFSET(A, B, C, D) \
+ case A##_off: mnemonic = B; form = C ", ['Xns'ILP" D "]"; break;
+ LOAD_STORE_PAIR_LIST(LSP_OFFSET)
+ #undef LSP_OFFSET
+ }
+ Format(instr, mnemonic, form);
+}
+
+
+void Disassembler::VisitLoadStorePairNonTemporal(Instruction* instr) {
+ const char *mnemonic = "unimplemented";
+ const char *form;
+
+ switch (instr->Mask(LoadStorePairNonTemporalMask)) {
+ case STNP_w: mnemonic = "stnp"; form = "'Wt, 'Wt2, ['Xns'ILP4]"; break;
+ case LDNP_w: mnemonic = "ldnp"; form = "'Wt, 'Wt2, ['Xns'ILP4]"; break;
+ case STNP_x: mnemonic = "stnp"; form = "'Xt, 'Xt2, ['Xns'ILP8]"; break;
+ case LDNP_x: mnemonic = "ldnp"; form = "'Xt, 'Xt2, ['Xns'ILP8]"; break;
+ case STNP_s: mnemonic = "stnp"; form = "'St, 'St2, ['Xns'ILP4]"; break;
+ case LDNP_s: mnemonic = "ldnp"; form = "'St, 'St2, ['Xns'ILP4]"; break;
+ case STNP_d: mnemonic = "stnp"; form = "'Dt, 'Dt2, ['Xns'ILP8]"; break;
+ case LDNP_d: mnemonic = "ldnp"; form = "'Dt, 'Dt2, ['Xns'ILP8]"; break;
+ default: form = "(LoadStorePairNonTemporal)";
+ }
+ Format(instr, mnemonic, form);
+}
+
+
+void Disassembler::VisitFPCompare(Instruction* instr) {
+ const char *mnemonic = "unimplemented";
+ const char *form = "'Fn, 'Fm";
+ const char *form_zero = "'Fn, #0.0";
+
+ switch (instr->Mask(FPCompareMask)) {
+ case FCMP_s_zero:
+ case FCMP_d_zero: form = form_zero; // Fall through.
+ case FCMP_s:
+ case FCMP_d: mnemonic = "fcmp"; break;
+ default: form = "(FPCompare)";
+ }
+ Format(instr, mnemonic, form);
+}
+
+
+void Disassembler::VisitFPConditionalCompare(Instruction* instr) {
+ const char *mnemonic = "unimplemented";
+ const char *form = "'Fn, 'Fm, 'INzcv, 'Cond";
+
+ switch (instr->Mask(FPConditionalCompareMask)) {
+ case FCCMP_s:
+ case FCCMP_d: mnemonic = "fccmp"; break;
+ case FCCMPE_s:
+ case FCCMPE_d: mnemonic = "fccmpe"; break;
+ default: form = "(FPConditionalCompare)";
+ }
+ Format(instr, mnemonic, form);
+}
+
+
+void Disassembler::VisitFPConditionalSelect(Instruction* instr) {
+ const char *mnemonic = "";
+ const char *form = "'Fd, 'Fn, 'Fm, 'Cond";
+
+ switch (instr->Mask(FPConditionalSelectMask)) {
+ case FCSEL_s:
+ case FCSEL_d: mnemonic = "fcsel"; break;
+ default: UNREACHABLE();
+ }
+ Format(instr, mnemonic, form);
+}
+
+
+void Disassembler::VisitFPDataProcessing1Source(Instruction* instr) {
+ const char *mnemonic = "unimplemented";
+ const char *form = "'Fd, 'Fn";
+
+ switch (instr->Mask(FPDataProcessing1SourceMask)) {
+ #define FORMAT(A, B) \
+ case A##_s: \
+ case A##_d: mnemonic = B; break;
+ FORMAT(FMOV, "fmov");
+ FORMAT(FABS, "fabs");
+ FORMAT(FNEG, "fneg");
+ FORMAT(FSQRT, "fsqrt");
+ FORMAT(FRINTN, "frintn");
+ FORMAT(FRINTP, "frintp");
+ FORMAT(FRINTM, "frintm");
+ FORMAT(FRINTZ, "frintz");
+ FORMAT(FRINTA, "frinta");
+ FORMAT(FRINTX, "frintx");
+ FORMAT(FRINTI, "frinti");
+ #undef FORMAT
+ case FCVT_ds: mnemonic = "fcvt"; form = "'Dd, 'Sn"; break;
+ case FCVT_sd: mnemonic = "fcvt"; form = "'Sd, 'Dn"; break;
+ default: form = "(FPDataProcessing1Source)";
+ }
+ Format(instr, mnemonic, form);
+}
+
+
+void Disassembler::VisitFPDataProcessing2Source(Instruction* instr) {
+ const char *mnemonic = "";
+ const char *form = "'Fd, 'Fn, 'Fm";
+
+ switch (instr->Mask(FPDataProcessing2SourceMask)) {
+ #define FORMAT(A, B) \
+ case A##_s: \
+ case A##_d: mnemonic = B; break;
+ FORMAT(FMUL, "fmul");
+ FORMAT(FDIV, "fdiv");
+ FORMAT(FADD, "fadd");
+ FORMAT(FSUB, "fsub");
+ FORMAT(FMAX, "fmax");
+ FORMAT(FMIN, "fmin");
+ FORMAT(FMAXNM, "fmaxnm");
+ FORMAT(FMINNM, "fminnm");
+ FORMAT(FNMUL, "fnmul");
+ #undef FORMAT
+ default: UNREACHABLE();
+ }
+ Format(instr, mnemonic, form);
+}
+
+
+void Disassembler::VisitFPDataProcessing3Source(Instruction* instr) {
+ const char *mnemonic = "";
+ const char *form = "'Fd, 'Fn, 'Fm, 'Fa";
+
+ switch (instr->Mask(FPDataProcessing3SourceMask)) {
+ #define FORMAT(A, B) \
+ case A##_s: \
+ case A##_d: mnemonic = B; break;
+ FORMAT(FMADD, "fmadd");
+ FORMAT(FMSUB, "fmsub");
+ FORMAT(FNMADD, "fnmadd");
+ FORMAT(FNMSUB, "fnmsub");
+ #undef FORMAT
+ default: UNREACHABLE();
+ }
+ Format(instr, mnemonic, form);
+}
+
+
+void Disassembler::VisitFPImmediate(Instruction* instr) {
+ const char *mnemonic = "";
+ const char *form = "(FPImmediate)";
+
+ switch (instr->Mask(FPImmediateMask)) {
+ case FMOV_s_imm: mnemonic = "fmov"; form = "'Sd, 'IFPSingle"; break;
+ case FMOV_d_imm: mnemonic = "fmov"; form = "'Dd, 'IFPDouble"; break;
+ default: UNREACHABLE();
+ }
+ Format(instr, mnemonic, form);
+}
+
+
+void Disassembler::VisitFPIntegerConvert(Instruction* instr) {
+ const char *mnemonic = "unimplemented";
+ const char *form = "(FPIntegerConvert)";
+ const char *form_rf = "'Rd, 'Fn";
+ const char *form_fr = "'Fd, 'Rn";
+
+ switch (instr->Mask(FPIntegerConvertMask)) {
+ case FMOV_ws:
+ case FMOV_xd: mnemonic = "fmov"; form = form_rf; break;
+ case FMOV_sw:
+ case FMOV_dx: mnemonic = "fmov"; form = form_fr; break;
+ case FCVTAS_ws:
+ case FCVTAS_xs:
+ case FCVTAS_wd:
+ case FCVTAS_xd: mnemonic = "fcvtas"; form = form_rf; break;
+ case FCVTAU_ws:
+ case FCVTAU_xs:
+ case FCVTAU_wd:
+ case FCVTAU_xd: mnemonic = "fcvtau"; form = form_rf; break;
+ case FCVTMS_ws:
+ case FCVTMS_xs:
+ case FCVTMS_wd:
+ case FCVTMS_xd: mnemonic = "fcvtms"; form = form_rf; break;
+ case FCVTMU_ws:
+ case FCVTMU_xs:
+ case FCVTMU_wd:
+ case FCVTMU_xd: mnemonic = "fcvtmu"; form = form_rf; break;
+ case FCVTNS_ws:
+ case FCVTNS_xs:
+ case FCVTNS_wd:
+ case FCVTNS_xd: mnemonic = "fcvtns"; form = form_rf; break;
+ case FCVTNU_ws:
+ case FCVTNU_xs:
+ case FCVTNU_wd:
+ case FCVTNU_xd: mnemonic = "fcvtnu"; form = form_rf; break;
+ case FCVTZU_xd:
+ case FCVTZU_ws:
+ case FCVTZU_wd:
+ case FCVTZU_xs: mnemonic = "fcvtzu"; form = form_rf; break;
+ case FCVTZS_xd:
+ case FCVTZS_wd:
+ case FCVTZS_xs:
+ case FCVTZS_ws: mnemonic = "fcvtzs"; form = form_rf; break;
+ case SCVTF_sw:
+ case SCVTF_sx:
+ case SCVTF_dw:
+ case SCVTF_dx: mnemonic = "scvtf"; form = form_fr; break;
+ case UCVTF_sw:
+ case UCVTF_sx:
+ case UCVTF_dw:
+ case UCVTF_dx: mnemonic = "ucvtf"; form = form_fr; break;
+ }
+ Format(instr, mnemonic, form);
+}
+
+
+void Disassembler::VisitFPFixedPointConvert(Instruction* instr) {
+ const char *mnemonic = "";
+ const char *form = "'Rd, 'Fn, 'IFPFBits";
+ const char *form_fr = "'Fd, 'Rn, 'IFPFBits";
+
+ switch (instr->Mask(FPFixedPointConvertMask)) {
+ case FCVTZS_ws_fixed:
+ case FCVTZS_xs_fixed:
+ case FCVTZS_wd_fixed:
+ case FCVTZS_xd_fixed: mnemonic = "fcvtzs"; break;
+ case FCVTZU_ws_fixed:
+ case FCVTZU_xs_fixed:
+ case FCVTZU_wd_fixed:
+ case FCVTZU_xd_fixed: mnemonic = "fcvtzu"; break;
+ case SCVTF_sw_fixed:
+ case SCVTF_sx_fixed:
+ case SCVTF_dw_fixed:
+ case SCVTF_dx_fixed: mnemonic = "scvtf"; form = form_fr; break;
+ case UCVTF_sw_fixed:
+ case UCVTF_sx_fixed:
+ case UCVTF_dw_fixed:
+ case UCVTF_dx_fixed: mnemonic = "ucvtf"; form = form_fr; break;
+ }
+ Format(instr, mnemonic, form);
+}
+
+
+void Disassembler::VisitSystem(Instruction* instr) {
+ // Some system instructions hijack their Op and Cp fields to represent a
+ // range of immediates instead of indicating a different instruction. This
+ // makes the decoding tricky.
+ const char *mnemonic = "unimplemented";
+ const char *form = "(System)";
+
+ if (instr->Mask(SystemSysRegFMask) == SystemSysRegFixed) {
+ switch (instr->Mask(SystemSysRegMask)) {
+ case MRS: {
+ mnemonic = "mrs";
+ switch (instr->ImmSystemRegister()) {
+ case NZCV: form = "'Xt, nzcv"; break;
+ case FPCR: form = "'Xt, fpcr"; break;
+ default: form = "'Xt, (unknown)"; break;
+ }
+ break;
+ }
+ case MSR: {
+ mnemonic = "msr";
+ switch (instr->ImmSystemRegister()) {
+ case NZCV: form = "nzcv, 'Xt"; break;
+ case FPCR: form = "fpcr, 'Xt"; break;
+ default: form = "(unknown), 'Xt"; break;
+ }
+ break;
+ }
+ }
+ } else if (instr->Mask(SystemHintFMask) == SystemHintFixed) {
+ ASSERT(instr->Mask(SystemHintMask) == HINT);
+ switch (instr->ImmHint()) {
+ case NOP: {
+ mnemonic = "nop";
+ form = NULL;
+ break;
+ }
+ }
+ } else if (instr->Mask(MemBarrierFMask) == MemBarrierFixed) {
+ switch (instr->Mask(MemBarrierMask)) {
+ case DMB: {
+ mnemonic = "dmb";
+ form = "'M";
+ break;
+ }
+ case DSB: {
+ mnemonic = "dsb";
+ form = "'M";
+ break;
+ }
+ case ISB: {
+ mnemonic = "isb";
+ form = NULL;
+ break;
+ }
+ }
+ }
+
+ Format(instr, mnemonic, form);
+}
+
+
+void Disassembler::VisitException(Instruction* instr) {
+ const char *mnemonic = "unimplemented";
+ const char *form = "'IDebug";
+
+ switch (instr->Mask(ExceptionMask)) {
+ case HLT: mnemonic = "hlt"; break;
+ case BRK: mnemonic = "brk"; break;
+ case SVC: mnemonic = "svc"; break;
+ case HVC: mnemonic = "hvc"; break;
+ case SMC: mnemonic = "smc"; break;
+ case DCPS1: mnemonic = "dcps1"; form = "{'IDebug}"; break;
+ case DCPS2: mnemonic = "dcps2"; form = "{'IDebug}"; break;
+ case DCPS3: mnemonic = "dcps3"; form = "{'IDebug}"; break;
+ default: form = "(Exception)";
+ }
+ Format(instr, mnemonic, form);
+}
+
+
+void Disassembler::VisitUnimplemented(Instruction* instr) {
+ Format(instr, "unimplemented", "(Unimplemented)");
+}
+
+
+void Disassembler::VisitUnallocated(Instruction* instr) {
+ Format(instr, "unallocated", "(Unallocated)");
+}
+
+
+void Disassembler::ProcessOutput(Instruction* /*instr*/) {
+ // The base disasm does nothing more than disassembling into a buffer.
+}
+
+
+void Disassembler::Format(Instruction* instr, const char* mnemonic,
+ const char* format) {
+ // TODO(mcapewel) don't think I can use the instr address here - there needs
+ // to be a base address too
+ ASSERT(mnemonic != NULL);
+ ResetOutput();
+ Substitute(instr, mnemonic);
+ if (format != NULL) {
+ buffer_[buffer_pos_++] = ' ';
+ Substitute(instr, format);
+ }
+ buffer_[buffer_pos_] = 0;
+ ProcessOutput(instr);
+}
+
+
+void Disassembler::Substitute(Instruction* instr, const char* string) {
+ char chr = *string++;
+ while (chr != '\0') {
+ if (chr == '\'') {
+ string += SubstituteField(instr, string);
+ } else {
+ buffer_[buffer_pos_++] = chr;
+ }
+ chr = *string++;
+ }
+}
+
+
+int Disassembler::SubstituteField(Instruction* instr, const char* format) {
+ switch (format[0]) {
+ case 'R': // Register. X or W, selected by sf bit.
+ case 'F': // FP Register. S or D, selected by type field.
+ case 'W':
+ case 'X':
+ case 'S':
+ case 'D': return SubstituteRegisterField(instr, format);
+ case 'I': return SubstituteImmediateField(instr, format);
+ case 'L': return SubstituteLiteralField(instr, format);
+ case 'H': return SubstituteShiftField(instr, format);
+ case 'P': return SubstitutePrefetchField(instr, format);
+ case 'C': return SubstituteConditionField(instr, format);
+ case 'E': return SubstituteExtendField(instr, format);
+ case 'A': return SubstitutePCRelAddressField(instr, format);
+ case 'B': return SubstituteBranchTargetField(instr, format);
+ case 'O': return SubstituteLSRegOffsetField(instr, format);
+ case 'M': return SubstituteBarrierField(instr, format);
+ default: {
+ UNREACHABLE();
+ return 1;
+ }
+ }
+}
+
+
+int Disassembler::SubstituteRegisterField(Instruction* instr,
+ const char* format) {
+ unsigned reg_num = 0;
+ unsigned field_len = 2;
+ switch (format[1]) {
+ case 'd': reg_num = instr->Rd(); break;
+ case 'n': reg_num = instr->Rn(); break;
+ case 'm': reg_num = instr->Rm(); break;
+ case 'a': reg_num = instr->Ra(); break;
+ case 't': {
+ if (format[2] == '2') {
+ reg_num = instr->Rt2();
+ field_len = 3;
+ } else {
+ reg_num = instr->Rt();
+ }
+ break;
+ }
+ default: UNREACHABLE();
+ }
+
+ // Increase field length for registers tagged as stack.
+ if (format[2] == 's') {
+ field_len = 3;
+ }
+
+ char reg_type;
+ if (format[0] == 'R') {
+ // Register type is R: use sf bit to choose X and W.
+ reg_type = instr->SixtyFourBits() ? 'x' : 'w';
+ } else if (format[0] == 'F') {
+ // Floating-point register: use type field to choose S or D.
+ reg_type = ((instr->FPType() & 1) == 0) ? 's' : 'd';
+ } else {
+ // Register type is specified. Make it lower case.
+ reg_type = format[0] + 0x20;
+ }
+
+ if ((reg_num != kZeroRegCode) || (reg_type == 's') || (reg_type == 'd')) {
+ // A normal register: w0 - w30, x0 - x30, s0 - s31, d0 - d31.
+
+ // Filter special registers
+ if ((reg_type == 'x') && (reg_num == 27)) {
+ AppendToOutput("cp");
+ } else if ((reg_type == 'x') && (reg_num == 28)) {
+ AppendToOutput("jssp");
+ } else if ((reg_type == 'x') && (reg_num == 29)) {
+ AppendToOutput("fp");
+ } else if ((reg_type == 'x') && (reg_num == 30)) {
+ AppendToOutput("lr");
+ } else {
+ AppendToOutput("%c%d", reg_type, reg_num);
+ }
+ } else if (format[2] == 's') {
+ // Disassemble w31/x31 as stack pointer wcsp/csp.
+ AppendToOutput("%s", (reg_type == 'w') ? "wcsp" : "csp");
+ } else {
+ // Disassemble w31/x31 as zero register wzr/xzr.
+ AppendToOutput("%czr", reg_type);
+ }
+
+ return field_len;
+}
+
+
+int Disassembler::SubstituteImmediateField(Instruction* instr,
+ const char* format) {
+ ASSERT(format[0] == 'I');
+
+ switch (format[1]) {
+ case 'M': { // IMoveImm or IMoveLSL.
+ if (format[5] == 'I') {
+ uint64_t imm = instr->ImmMoveWide() << (16 * instr->ShiftMoveWide());
+ AppendToOutput("#0x%" PRIx64, imm);
+ } else {
+ ASSERT(format[5] == 'L');
+ AppendToOutput("#0x%" PRIx64, instr->ImmMoveWide());
+ if (instr->ShiftMoveWide() > 0) {
+ AppendToOutput(", lsl #%d", 16 * instr->ShiftMoveWide());
+ }
+ }
+ return 8;
+ }
+ case 'L': {
+ switch (format[2]) {
+ case 'L': { // ILLiteral - Immediate Load Literal.
+ AppendToOutput("pc%+" PRId64,
+ instr->ImmLLiteral() << kLoadLiteralScaleLog2);
+ return 9;
+ }
+ case 'S': { // ILS - Immediate Load/Store.
+ if (instr->ImmLS() != 0) {
+ AppendToOutput(", #%" PRId64, instr->ImmLS());
+ }
+ return 3;
+ }
+ case 'P': { // ILPx - Immediate Load/Store Pair, x = access size.
+ if (instr->ImmLSPair() != 0) {
+ // format[3] is the scale value. Convert to a number.
+ int scale = format[3] - 0x30;
+ AppendToOutput(", #%" PRId64, instr->ImmLSPair() * scale);
+ }
+ return 4;
+ }
+ case 'U': { // ILU - Immediate Load/Store Unsigned.
+ if (instr->ImmLSUnsigned() != 0) {
+ AppendToOutput(", #%" PRIu64,
+ instr->ImmLSUnsigned() << instr->SizeLS());
+ }
+ return 3;
+ }
+ }
+ }
+ case 'C': { // ICondB - Immediate Conditional Branch.
+ int64_t offset = instr->ImmCondBranch() << 2;
+ char sign = (offset >= 0) ? '+' : '-';
+ AppendToOutput("#%c0x%" PRIx64, sign, offset);
+ return 6;
+ }
+ case 'A': { // IAddSub.
+ ASSERT(instr->ShiftAddSub() <= 1);
+ int64_t imm = instr->ImmAddSub() << (12 * instr->ShiftAddSub());
+ AppendToOutput("#0x%" PRIx64 " (%" PRId64 ")", imm, imm);
+ return 7;
+ }
+ case 'F': { // IFPSingle, IFPDouble or IFPFBits.
+ if (format[3] == 'F') { // IFPFBits.
+ AppendToOutput("#%d", 64 - instr->FPScale());
+ return 8;
+ } else {
+ AppendToOutput("#0x%" PRIx64 " (%.4f)", instr->ImmFP(),
+ format[3] == 'S' ? instr->ImmFP32() : instr->ImmFP64());
+ return 9;
+ }
+ }
+ case 'T': { // ITri - Immediate Triangular Encoded.
+ AppendToOutput("#0x%" PRIx64, instr->ImmLogical());
+ return 4;
+ }
+ case 'N': { // INzcv.
+ int nzcv = (instr->Nzcv() << Flags_offset);
+ AppendToOutput("#%c%c%c%c", ((nzcv & NFlag) == 0) ? 'n' : 'N',
+ ((nzcv & ZFlag) == 0) ? 'z' : 'Z',
+ ((nzcv & CFlag) == 0) ? 'c' : 'C',
+ ((nzcv & VFlag) == 0) ? 'v' : 'V');
+ return 5;
+ }
+ case 'P': { // IP - Conditional compare.
+ AppendToOutput("#%d", instr->ImmCondCmp());
+ return 2;
+ }
+ case 'B': { // Bitfields.
+ return SubstituteBitfieldImmediateField(instr, format);
+ }
+ case 'E': { // IExtract.
+ AppendToOutput("#%d", instr->ImmS());
+ return 8;
+ }
+ case 'S': { // IS - Test and branch bit.
+ AppendToOutput("#%d", (instr->ImmTestBranchBit5() << 5) |
+ instr->ImmTestBranchBit40());
+ return 2;
+ }
+ case 'D': { // IDebug - HLT and BRK instructions.
+ AppendToOutput("#0x%x", instr->ImmException());
+ return 6;
+ }
+ default: {
+ UNREACHABLE();
+ return 0;
+ }
+ }
+}
+
+
+int Disassembler::SubstituteBitfieldImmediateField(Instruction* instr,
+ const char* format) {
+ ASSERT((format[0] == 'I') && (format[1] == 'B'));
+ unsigned r = instr->ImmR();
+ unsigned s = instr->ImmS();
+
+ switch (format[2]) {
+ case 'r': { // IBr.
+ AppendToOutput("#%d", r);
+ return 3;
+ }
+ case 's': { // IBs+1 or IBs-r+1.
+ if (format[3] == '+') {
+ AppendToOutput("#%d", s + 1);
+ return 5;
+ } else {
+ ASSERT(format[3] == '-');
+ AppendToOutput("#%d", s - r + 1);
+ return 7;
+ }
+ }
+ case 'Z': { // IBZ-r.
+ ASSERT((format[3] == '-') && (format[4] == 'r'));
+ unsigned reg_size = (instr->SixtyFourBits() == 1) ? kXRegSizeInBits
+ : kWRegSizeInBits;
+ AppendToOutput("#%d", reg_size - r);
+ return 5;
+ }
+ default: {
+ UNREACHABLE();
+ return 0;
+ }
+ }
+}
+
+
+int Disassembler::SubstituteLiteralField(Instruction* instr,
+ const char* format) {
+ ASSERT(strncmp(format, "LValue", 6) == 0);
+ USE(format);
+
+ switch (instr->Mask(LoadLiteralMask)) {
+ case LDR_w_lit:
+ case LDR_x_lit:
+ case LDR_s_lit:
+ case LDR_d_lit: AppendToOutput("(addr %p)", instr->LiteralAddress()); break;
+ default: UNREACHABLE();
+ }
+
+ return 6;
+}
+
+
+int Disassembler::SubstituteShiftField(Instruction* instr, const char* format) {
+ ASSERT(format[0] == 'H');
+ ASSERT(instr->ShiftDP() <= 0x3);
+
+ switch (format[1]) {
+ case 'D': { // HDP.
+ ASSERT(instr->ShiftDP() != ROR);
+ } // Fall through.
+ case 'L': { // HLo.
+ if (instr->ImmDPShift() != 0) {
+ const char* shift_type[] = {"lsl", "lsr", "asr", "ror"};
+ AppendToOutput(", %s #%" PRId64, shift_type[instr->ShiftDP()],
+ instr->ImmDPShift());
+ }
+ return 3;
+ }
+ default:
+ UNREACHABLE();
+ return 0;
+ }
+}
+
+
+int Disassembler::SubstituteConditionField(Instruction* instr,
+ const char* format) {
+ ASSERT(format[0] == 'C');
+ const char* condition_code[] = { "eq", "ne", "hs", "lo",
+ "mi", "pl", "vs", "vc",
+ "hi", "ls", "ge", "lt",
+ "gt", "le", "al", "nv" };
+ int cond;
+ switch (format[1]) {
+ case 'B': cond = instr->ConditionBranch(); break;
+ case 'I': {
+ cond = NegateCondition(static_cast<Condition>(instr->Condition()));
+ break;
+ }
+ default: cond = instr->Condition();
+ }
+ AppendToOutput("%s", condition_code[cond]);
+ return 4;
+}
+
+
+int Disassembler::SubstitutePCRelAddressField(Instruction* instr,
+ const char* format) {
+ USE(format);
+ ASSERT(strncmp(format, "AddrPCRel", 9) == 0);
+
+ int offset = instr->ImmPCRel();
+
+ // Only ADR (AddrPCRelByte) is supported.
+ ASSERT(strcmp(format, "AddrPCRelByte") == 0);
+
+ char sign = '+';
+ if (offset < 0) {
+ offset = -offset;
+ sign = '-';
+ }
+ AppendToOutput("#%c0x%x (addr %p)", sign, offset,
+ instr->InstructionAtOffset(offset, Instruction::NO_CHECK));
+ return 13;
+}
+
+
+int Disassembler::SubstituteBranchTargetField(Instruction* instr,
+ const char* format) {
+ ASSERT(strncmp(format, "BImm", 4) == 0);
+
+ int64_t offset = 0;
+ switch (format[5]) {
+ // BImmUncn - unconditional branch immediate.
+ case 'n': offset = instr->ImmUncondBranch(); break;
+ // BImmCond - conditional branch immediate.
+ case 'o': offset = instr->ImmCondBranch(); break;
+ // BImmCmpa - compare and branch immediate.
+ case 'm': offset = instr->ImmCmpBranch(); break;
+ // BImmTest - test and branch immediate.
+ case 'e': offset = instr->ImmTestBranch(); break;
+ default: UNREACHABLE();
+ }
+ offset <<= kInstructionSizeLog2;
+ char sign = '+';
+ if (offset < 0) {
+ sign = '-';
+ }
+ AppendToOutput("#%c0x%" PRIx64 " (addr %p)", sign, Abs(offset),
+ instr->InstructionAtOffset(offset), Instruction::NO_CHECK);
+ return 8;
+}
+
+
+int Disassembler::SubstituteExtendField(Instruction* instr,
+ const char* format) {
+ ASSERT(strncmp(format, "Ext", 3) == 0);
+ ASSERT(instr->ExtendMode() <= 7);
+ USE(format);
+
+ const char* extend_mode[] = { "uxtb", "uxth", "uxtw", "uxtx",
+ "sxtb", "sxth", "sxtw", "sxtx" };
+
+ // If rd or rn is SP, uxtw on 32-bit registers and uxtx on 64-bit
+ // registers becomes lsl.
+ if (((instr->Rd() == kZeroRegCode) || (instr->Rn() == kZeroRegCode)) &&
+ (((instr->ExtendMode() == UXTW) && (instr->SixtyFourBits() == 0)) ||
+ (instr->ExtendMode() == UXTX))) {
+ if (instr->ImmExtendShift() > 0) {
+ AppendToOutput(", lsl #%d", instr->ImmExtendShift());
+ }
+ } else {
+ AppendToOutput(", %s", extend_mode[instr->ExtendMode()]);
+ if (instr->ImmExtendShift() > 0) {
+ AppendToOutput(" #%d", instr->ImmExtendShift());
+ }
+ }
+ return 3;
+}
+
+
+int Disassembler::SubstituteLSRegOffsetField(Instruction* instr,
+ const char* format) {
+ ASSERT(strncmp(format, "Offsetreg", 9) == 0);
+ const char* extend_mode[] = { "undefined", "undefined", "uxtw", "lsl",
+ "undefined", "undefined", "sxtw", "sxtx" };
+ USE(format);
+
+ unsigned shift = instr->ImmShiftLS();
+ Extend ext = static_cast<Extend>(instr->ExtendMode());
+ char reg_type = ((ext == UXTW) || (ext == SXTW)) ? 'w' : 'x';
+
+ unsigned rm = instr->Rm();
+ if (rm == kZeroRegCode) {
+ AppendToOutput("%czr", reg_type);
+ } else {
+ AppendToOutput("%c%d", reg_type, rm);
+ }
+
+ // Extend mode UXTX is an alias for shift mode LSL here.
+ if (!((ext == UXTX) && (shift == 0))) {
+ AppendToOutput(", %s", extend_mode[ext]);
+ if (shift != 0) {
+ AppendToOutput(" #%d", instr->SizeLS());
+ }
+ }
+ return 9;
+}
+
+
+int Disassembler::SubstitutePrefetchField(Instruction* instr,
+ const char* format) {
+ ASSERT(format[0] == 'P');
+ USE(format);
+
+ int prefetch_mode = instr->PrefetchMode();
+
+ const char* ls = (prefetch_mode & 0x10) ? "st" : "ld";
+ int level = (prefetch_mode >> 1) + 1;
+ const char* ks = (prefetch_mode & 1) ? "strm" : "keep";
+
+ AppendToOutput("p%sl%d%s", ls, level, ks);
+ return 6;
+}
+
+int Disassembler::SubstituteBarrierField(Instruction* instr,
+ const char* format) {
+ ASSERT(format[0] == 'M');
+ USE(format);
+
+ static const char* options[4][4] = {
+ { "sy (0b0000)", "oshld", "oshst", "osh" },
+ { "sy (0b0100)", "nshld", "nshst", "nsh" },
+ { "sy (0b1000)", "ishld", "ishst", "ish" },
+ { "sy (0b1100)", "ld", "st", "sy" }
+ };
+ int domain = instr->ImmBarrierDomain();
+ int type = instr->ImmBarrierType();
+
+ AppendToOutput("%s", options[domain][type]);
+ return 1;
+}
+
+
+void Disassembler::ResetOutput() {
+ buffer_pos_ = 0;
+ buffer_[buffer_pos_] = 0;
+}
+
+
+void Disassembler::AppendToOutput(const char* format, ...) {
+ va_list args;
+ va_start(args, format);
+ buffer_pos_ += vsnprintf(&buffer_[buffer_pos_], buffer_size_, format, args);
+ va_end(args);
+}
+
+
+void PrintDisassembler::ProcessOutput(Instruction* instr) {
+ fprintf(stream_, "0x%016" PRIx64 " %08" PRIx32 "\t\t%s\n",
+ reinterpret_cast<uint64_t>(instr), instr->InstructionBits(),
+ GetOutput());
+}
+
+} } // namespace v8::internal
+
+
+namespace disasm {
+
+
+const char* NameConverter::NameOfAddress(byte* addr) const {
+ v8::internal::SNPrintF(tmp_buffer_, "%p", addr);
+ return tmp_buffer_.start();
+}
+
+
+const char* NameConverter::NameOfConstant(byte* addr) const {
+ return NameOfAddress(addr);
+}
+
+
+const char* NameConverter::NameOfCPURegister(int reg) const {
+ unsigned ureg = reg; // Avoid warnings about signed/unsigned comparisons.
+ if (ureg >= v8::internal::kNumberOfRegisters) {
+ return "noreg";
+ }
+ if (ureg == v8::internal::kZeroRegCode) {
+ return "xzr";
+ }
+ v8::internal::SNPrintF(tmp_buffer_, "x%u", ureg);
+ return tmp_buffer_.start();
+}
+
+
+const char* NameConverter::NameOfByteCPURegister(int reg) const {
+ UNREACHABLE(); // ARM64 does not have the concept of a byte register
+ return "nobytereg";
+}
+
+
+const char* NameConverter::NameOfXMMRegister(int reg) const {
+ UNREACHABLE(); // ARM64 does not have any XMM registers
+ return "noxmmreg";
+}
+
+
+const char* NameConverter::NameInCode(byte* addr) const {
+ // The default name converter is called for unknown code, so we will not try
+ // to access any memory.
+ return "";
+}
+
+
+//------------------------------------------------------------------------------
+
+class BufferDisassembler : public v8::internal::Disassembler {
+ public:
+ explicit BufferDisassembler(v8::internal::Vector<char> out_buffer)
+ : out_buffer_(out_buffer) { }
+
+ ~BufferDisassembler() { }
+
+ virtual void ProcessOutput(v8::internal::Instruction* instr) {
+ v8::internal::SNPrintF(out_buffer_, "%s", GetOutput());
+ }
+
+ private:
+ v8::internal::Vector<char> out_buffer_;
+};
+
+Disassembler::Disassembler(const NameConverter& converter)
+ : converter_(converter) {}
+
+
+Disassembler::~Disassembler() {}
+
+
+int Disassembler::InstructionDecode(v8::internal::Vector<char> buffer,
+ byte* instr) {
+ v8::internal::Decoder<v8::internal::DispatchingDecoderVisitor> decoder;
+ BufferDisassembler disasm(buffer);
+ decoder.AppendVisitor(&disasm);
+
+ decoder.Decode(reinterpret_cast<v8::internal::Instruction*>(instr));
+ return v8::internal::kInstructionSize;
+}
+
+
+int Disassembler::ConstantPoolSizeAt(byte* instr) {
+ return v8::internal::Assembler::ConstantPoolSizeAt(
+ reinterpret_cast<v8::internal::Instruction*>(instr));
+}
+
+
+void Disassembler::Disassemble(FILE* file, byte* start, byte* end) {
+ v8::internal::Decoder<v8::internal::DispatchingDecoderVisitor> decoder;
+ v8::internal::PrintDisassembler disasm(file);
+ decoder.AppendVisitor(&disasm);
+
+ for (byte* pc = start; pc < end; pc += v8::internal::kInstructionSize) {
+ decoder.Decode(reinterpret_cast<v8::internal::Instruction*>(pc));
+ }
+}
+
+} // namespace disasm
+
+#endif // V8_TARGET_ARCH_ARM64
diff --git a/chromium/v8/src/arm64/disasm-arm64.h b/chromium/v8/src/arm64/disasm-arm64.h
new file mode 100644
index 00000000000..a7db4d2414d
--- /dev/null
+++ b/chromium/v8/src/arm64/disasm-arm64.h
@@ -0,0 +1,92 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_ARM64_DISASM_ARM64_H
+#define V8_ARM64_DISASM_ARM64_H
+
+#include "src/v8.h"
+
+#include "src/globals.h"
+#include "src/utils.h"
+#include "src/arm64/instructions-arm64.h"
+#include "src/arm64/decoder-arm64.h"
+
+namespace v8 {
+namespace internal {
+
+
+class Disassembler: public DecoderVisitor {
+ public:
+ Disassembler();
+ Disassembler(char* text_buffer, int buffer_size);
+ virtual ~Disassembler();
+ char* GetOutput();
+
+ // Declare all Visitor functions.
+ #define DECLARE(A) void Visit##A(Instruction* instr);
+ VISITOR_LIST(DECLARE)
+ #undef DECLARE
+
+ protected:
+ virtual void ProcessOutput(Instruction* instr);
+
+ void Format(Instruction* instr, const char* mnemonic, const char* format);
+ void Substitute(Instruction* instr, const char* string);
+ int SubstituteField(Instruction* instr, const char* format);
+ int SubstituteRegisterField(Instruction* instr, const char* format);
+ int SubstituteImmediateField(Instruction* instr, const char* format);
+ int SubstituteLiteralField(Instruction* instr, const char* format);
+ int SubstituteBitfieldImmediateField(Instruction* instr, const char* format);
+ int SubstituteShiftField(Instruction* instr, const char* format);
+ int SubstituteExtendField(Instruction* instr, const char* format);
+ int SubstituteConditionField(Instruction* instr, const char* format);
+ int SubstitutePCRelAddressField(Instruction* instr, const char* format);
+ int SubstituteBranchTargetField(Instruction* instr, const char* format);
+ int SubstituteLSRegOffsetField(Instruction* instr, const char* format);
+ int SubstitutePrefetchField(Instruction* instr, const char* format);
+ int SubstituteBarrierField(Instruction* instr, const char* format);
+
+ bool RdIsZROrSP(Instruction* instr) const {
+ return (instr->Rd() == kZeroRegCode);
+ }
+
+ bool RnIsZROrSP(Instruction* instr) const {
+ return (instr->Rn() == kZeroRegCode);
+ }
+
+ bool RmIsZROrSP(Instruction* instr) const {
+ return (instr->Rm() == kZeroRegCode);
+ }
+
+ bool RaIsZROrSP(Instruction* instr) const {
+ return (instr->Ra() == kZeroRegCode);
+ }
+
+ bool IsMovzMovnImm(unsigned reg_size, uint64_t value);
+
+ void ResetOutput();
+ void AppendToOutput(const char* string, ...);
+
+ char* buffer_;
+ uint32_t buffer_pos_;
+ uint32_t buffer_size_;
+ bool own_buffer_;
+};
+
+
+class PrintDisassembler: public Disassembler {
+ public:
+ explicit PrintDisassembler(FILE* stream) : stream_(stream) { }
+ ~PrintDisassembler() { }
+
+ virtual void ProcessOutput(Instruction* instr);
+
+ private:
+ FILE *stream_;
+};
+
+
+} } // namespace v8::internal
+
+#endif // V8_ARM64_DISASM_ARM64_H
diff --git a/chromium/v8/src/arm64/frames-arm64.cc b/chromium/v8/src/arm64/frames-arm64.cc
new file mode 100644
index 00000000000..122ac218ced
--- /dev/null
+++ b/chromium/v8/src/arm64/frames-arm64.cc
@@ -0,0 +1,42 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#if V8_TARGET_ARCH_ARM64
+
+#include "src/assembler.h"
+#include "src/arm64/assembler-arm64.h"
+#include "src/arm64/assembler-arm64-inl.h"
+#include "src/frames.h"
+
+namespace v8 {
+namespace internal {
+
+
+Register JavaScriptFrame::fp_register() { return v8::internal::fp; }
+Register JavaScriptFrame::context_register() { return cp; }
+Register JavaScriptFrame::constant_pool_pointer_register() {
+ UNREACHABLE();
+ return no_reg;
+}
+
+
+Register StubFailureTrampolineFrame::fp_register() { return v8::internal::fp; }
+Register StubFailureTrampolineFrame::context_register() { return cp; }
+Register StubFailureTrampolineFrame::constant_pool_pointer_register() {
+ UNREACHABLE();
+ return no_reg;
+}
+
+
+Object*& ExitFrame::constant_pool_slot() const {
+ UNREACHABLE();
+ return Memory::Object_at(NULL);
+}
+
+
+} } // namespace v8::internal
+
+#endif // V8_TARGET_ARCH_ARM64
diff --git a/chromium/v8/src/arm64/frames-arm64.h b/chromium/v8/src/arm64/frames-arm64.h
new file mode 100644
index 00000000000..557c955fe5c
--- /dev/null
+++ b/chromium/v8/src/arm64/frames-arm64.h
@@ -0,0 +1,109 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/arm64/constants-arm64.h"
+#include "src/arm64/assembler-arm64.h"
+
+#ifndef V8_ARM64_FRAMES_ARM64_H_
+#define V8_ARM64_FRAMES_ARM64_H_
+
+namespace v8 {
+namespace internal {
+
+const int kNumRegs = kNumberOfRegisters;
+// Registers x0-x17 are caller-saved.
+const int kNumJSCallerSaved = 18;
+const RegList kJSCallerSaved = 0x3ffff;
+
+// Number of registers for which space is reserved in safepoints. Must be a
+// multiple of eight.
+// TODO(all): Refine this number.
+const int kNumSafepointRegisters = 32;
+
+// Define the list of registers actually saved at safepoints.
+// Note that the number of saved registers may be smaller than the reserved
+// space, i.e. kNumSafepointSavedRegisters <= kNumSafepointRegisters.
+#define kSafepointSavedRegisters CPURegList::GetSafepointSavedRegisters().list()
+#define kNumSafepointSavedRegisters \
+ CPURegList::GetSafepointSavedRegisters().Count();
+
+class EntryFrameConstants : public AllStatic {
+ public:
+ static const int kCallerFPOffset =
+ -(StandardFrameConstants::kFixedFrameSizeFromFp + kPointerSize);
+};
+
+
+class ExitFrameConstants : public AllStatic {
+ public:
+ static const int kFrameSize = 2 * kPointerSize;
+
+ static const int kCallerSPDisplacement = 2 * kPointerSize;
+ static const int kCallerPCOffset = 1 * kPointerSize;
+ static const int kCallerFPOffset = 0 * kPointerSize; // <- fp
+ static const int kSPOffset = -1 * kPointerSize;
+ static const int kCodeOffset = -2 * kPointerSize;
+ static const int kLastExitFrameField = kCodeOffset;
+
+ static const int kConstantPoolOffset = 0; // Not used
+};
+
+
+class JavaScriptFrameConstants : public AllStatic {
+ public:
+ // FP-relative.
+ static const int kLocal0Offset = StandardFrameConstants::kExpressionsOffset;
+
+ // There are two words on the stack (saved fp and saved lr) between fp and
+ // the arguments.
+ static const int kLastParameterOffset = 2 * kPointerSize;
+
+ static const int kFunctionOffset = StandardFrameConstants::kMarkerOffset;
+};
+
+
+class ArgumentsAdaptorFrameConstants : public AllStatic {
+ public:
+ // FP-relative.
+ static const int kLengthOffset = StandardFrameConstants::kExpressionsOffset;
+
+ static const int kFrameSize =
+ StandardFrameConstants::kFixedFrameSize + kPointerSize;
+};
+
+
+class ConstructFrameConstants : public AllStatic {
+ public:
+ // FP-relative.
+ static const int kCodeOffset = StandardFrameConstants::kExpressionsOffset;
+ static const int kLengthOffset = -4 * kPointerSize;
+ static const int kConstructorOffset = -5 * kPointerSize;
+ static const int kImplicitReceiverOffset = -6 * kPointerSize;
+
+ static const int kFrameSize =
+ StandardFrameConstants::kFixedFrameSize + 4 * kPointerSize;
+};
+
+
+class InternalFrameConstants : public AllStatic {
+ public:
+ // FP-relative.
+ static const int kCodeOffset = StandardFrameConstants::kExpressionsOffset;
+};
+
+
+inline Object* JavaScriptFrame::function_slot_object() const {
+ const int offset = JavaScriptFrameConstants::kFunctionOffset;
+ return Memory::Object_at(fp() + offset);
+}
+
+
+inline void StackHandler::SetFp(Address slot, Address fp) {
+ Memory::Address_at(slot) = fp;
+}
+
+
+} } // namespace v8::internal
+
+#endif // V8_ARM64_FRAMES_ARM64_H_
diff --git a/chromium/v8/src/arm64/full-codegen-arm64.cc b/chromium/v8/src/arm64/full-codegen-arm64.cc
new file mode 100644
index 00000000000..4a44e35b05d
--- /dev/null
+++ b/chromium/v8/src/arm64/full-codegen-arm64.cc
@@ -0,0 +1,4884 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#if V8_TARGET_ARCH_ARM64
+
+#include "src/code-stubs.h"
+#include "src/codegen.h"
+#include "src/compiler.h"
+#include "src/debug.h"
+#include "src/full-codegen.h"
+#include "src/isolate-inl.h"
+#include "src/parser.h"
+#include "src/scopes.h"
+#include "src/stub-cache.h"
+
+#include "src/arm64/code-stubs-arm64.h"
+#include "src/arm64/macro-assembler-arm64.h"
+
+namespace v8 {
+namespace internal {
+
+#define __ ACCESS_MASM(masm_)
+
+class JumpPatchSite BASE_EMBEDDED {
+ public:
+ explicit JumpPatchSite(MacroAssembler* masm) : masm_(masm), reg_(NoReg) {
+#ifdef DEBUG
+ info_emitted_ = false;
+#endif
+ }
+
+ ~JumpPatchSite() {
+ if (patch_site_.is_bound()) {
+ ASSERT(info_emitted_);
+ } else {
+ ASSERT(reg_.IsNone());
+ }
+ }
+
+ void EmitJumpIfNotSmi(Register reg, Label* target) {
+ // This code will be patched by PatchInlinedSmiCode, in ic-arm64.cc.
+ InstructionAccurateScope scope(masm_, 1);
+ ASSERT(!info_emitted_);
+ ASSERT(reg.Is64Bits());
+ ASSERT(!reg.Is(csp));
+ reg_ = reg;
+ __ bind(&patch_site_);
+ __ tbz(xzr, 0, target); // Always taken before patched.
+ }
+
+ void EmitJumpIfSmi(Register reg, Label* target) {
+ // This code will be patched by PatchInlinedSmiCode, in ic-arm64.cc.
+ InstructionAccurateScope scope(masm_, 1);
+ ASSERT(!info_emitted_);
+ ASSERT(reg.Is64Bits());
+ ASSERT(!reg.Is(csp));
+ reg_ = reg;
+ __ bind(&patch_site_);
+ __ tbnz(xzr, 0, target); // Never taken before patched.
+ }
+
+ void EmitJumpIfEitherNotSmi(Register reg1, Register reg2, Label* target) {
+ UseScratchRegisterScope temps(masm_);
+ Register temp = temps.AcquireX();
+ __ Orr(temp, reg1, reg2);
+ EmitJumpIfNotSmi(temp, target);
+ }
+
+ void EmitPatchInfo() {
+ Assembler::BlockPoolsScope scope(masm_);
+ InlineSmiCheckInfo::Emit(masm_, reg_, &patch_site_);
+#ifdef DEBUG
+ info_emitted_ = true;
+#endif
+ }
+
+ private:
+ MacroAssembler* masm_;
+ Label patch_site_;
+ Register reg_;
+#ifdef DEBUG
+ bool info_emitted_;
+#endif
+};
+
+
+// Generate code for a JS function. On entry to the function the receiver
+// and arguments have been pushed on the stack left to right. The actual
+// argument count matches the formal parameter count expected by the
+// function.
+//
+// The live registers are:
+// - x1: the JS function object being called (i.e. ourselves).
+// - cp: our context.
+// - fp: our caller's frame pointer.
+// - jssp: stack pointer.
+// - lr: return address.
+//
+// The function builds a JS frame. See JavaScriptFrameConstants in
+// frames-arm.h for its layout.
+void FullCodeGenerator::Generate() {
+ CompilationInfo* info = info_;
+ handler_table_ =
+ isolate()->factory()->NewFixedArray(function()->handler_count(), TENURED);
+
+ profiling_counter_ = isolate()->factory()->NewCell(
+ Handle<Smi>(Smi::FromInt(FLAG_interrupt_budget), isolate()));
+ SetFunctionPosition(function());
+ Comment cmnt(masm_, "[ Function compiled by full code generator");
+
+ ProfileEntryHookStub::MaybeCallEntryHook(masm_);
+
+#ifdef DEBUG
+ if (strlen(FLAG_stop_at) > 0 &&
+ info->function()->name()->IsUtf8EqualTo(CStrVector(FLAG_stop_at))) {
+ __ Debug("stop-at", __LINE__, BREAK);
+ }
+#endif
+
+ // Sloppy mode functions and builtins need to replace the receiver with the
+ // global proxy when called as functions (without an explicit receiver
+ // object).
+ if (info->strict_mode() == SLOPPY && !info->is_native()) {
+ Label ok;
+ int receiver_offset = info->scope()->num_parameters() * kXRegSize;
+ __ Peek(x10, receiver_offset);
+ __ JumpIfNotRoot(x10, Heap::kUndefinedValueRootIndex, &ok);
+
+ __ Ldr(x10, GlobalObjectMemOperand());
+ __ Ldr(x10, FieldMemOperand(x10, GlobalObject::kGlobalReceiverOffset));
+ __ Poke(x10, receiver_offset);
+
+ __ Bind(&ok);
+ }
+
+
+ // Open a frame scope to indicate that there is a frame on the stack.
+ // The MANUAL indicates that the scope shouldn't actually generate code
+ // to set up the frame because we do it manually below.
+ FrameScope frame_scope(masm_, StackFrame::MANUAL);
+
+ // This call emits the following sequence in a way that can be patched for
+ // code ageing support:
+ // Push(lr, fp, cp, x1);
+ // Add(fp, jssp, 2 * kPointerSize);
+ info->set_prologue_offset(masm_->pc_offset());
+ __ Prologue(info->IsCodePreAgingActive());
+ info->AddNoFrameRange(0, masm_->pc_offset());
+
+ // Reserve space on the stack for locals.
+ { Comment cmnt(masm_, "[ Allocate locals");
+ int locals_count = info->scope()->num_stack_slots();
+ // Generators allocate locals, if any, in context slots.
+ ASSERT(!info->function()->is_generator() || locals_count == 0);
+
+ if (locals_count > 0) {
+ if (locals_count >= 128) {
+ Label ok;
+ ASSERT(jssp.Is(__ StackPointer()));
+ __ Sub(x10, jssp, locals_count * kPointerSize);
+ __ CompareRoot(x10, Heap::kRealStackLimitRootIndex);
+ __ B(hs, &ok);
+ __ InvokeBuiltin(Builtins::STACK_OVERFLOW, CALL_FUNCTION);
+ __ Bind(&ok);
+ }
+ __ LoadRoot(x10, Heap::kUndefinedValueRootIndex);
+ if (FLAG_optimize_for_size) {
+ __ PushMultipleTimes(x10 , locals_count);
+ } else {
+ const int kMaxPushes = 32;
+ if (locals_count >= kMaxPushes) {
+ int loop_iterations = locals_count / kMaxPushes;
+ __ Mov(x3, loop_iterations);
+ Label loop_header;
+ __ Bind(&loop_header);
+ // Do pushes.
+ __ PushMultipleTimes(x10 , kMaxPushes);
+ __ Subs(x3, x3, 1);
+ __ B(ne, &loop_header);
+ }
+ int remaining = locals_count % kMaxPushes;
+ // Emit the remaining pushes.
+ __ PushMultipleTimes(x10 , remaining);
+ }
+ }
+ }
+
+ bool function_in_register_x1 = true;
+
+ int heap_slots = info->scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
+ if (heap_slots > 0) {
+ // Argument to NewContext is the function, which is still in x1.
+ Comment cmnt(masm_, "[ Allocate context");
+ bool need_write_barrier = true;
+ if (FLAG_harmony_scoping && info->scope()->is_global_scope()) {
+ __ Mov(x10, Operand(info->scope()->GetScopeInfo()));
+ __ Push(x1, x10);
+ __ CallRuntime(Runtime::kHiddenNewGlobalContext, 2);
+ } else if (heap_slots <= FastNewContextStub::kMaximumSlots) {
+ FastNewContextStub stub(isolate(), heap_slots);
+ __ CallStub(&stub);
+ // Result of FastNewContextStub is always in new space.
+ need_write_barrier = false;
+ } else {
+ __ Push(x1);
+ __ CallRuntime(Runtime::kHiddenNewFunctionContext, 1);
+ }
+ function_in_register_x1 = false;
+ // Context is returned in x0. It replaces the context passed to us.
+ // It's saved in the stack and kept live in cp.
+ __ Mov(cp, x0);
+ __ Str(x0, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ // Copy any necessary parameters into the context.
+ int num_parameters = info->scope()->num_parameters();
+ for (int i = 0; i < num_parameters; i++) {
+ Variable* var = scope()->parameter(i);
+ if (var->IsContextSlot()) {
+ int parameter_offset = StandardFrameConstants::kCallerSPOffset +
+ (num_parameters - 1 - i) * kPointerSize;
+ // Load parameter from stack.
+ __ Ldr(x10, MemOperand(fp, parameter_offset));
+ // Store it in the context.
+ MemOperand target = ContextMemOperand(cp, var->index());
+ __ Str(x10, target);
+
+ // Update the write barrier.
+ if (need_write_barrier) {
+ __ RecordWriteContextSlot(
+ cp, target.offset(), x10, x11, kLRHasBeenSaved, kDontSaveFPRegs);
+ } else if (FLAG_debug_code) {
+ Label done;
+ __ JumpIfInNewSpace(cp, &done);
+ __ Abort(kExpectedNewSpaceObject);
+ __ bind(&done);
+ }
+ }
+ }
+ }
+
+ Variable* arguments = scope()->arguments();
+ if (arguments != NULL) {
+ // Function uses arguments object.
+ Comment cmnt(masm_, "[ Allocate arguments object");
+ if (!function_in_register_x1) {
+ // Load this again, if it's used by the local context below.
+ __ Ldr(x3, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+ } else {
+ __ Mov(x3, x1);
+ }
+ // Receiver is just before the parameters on the caller's stack.
+ int num_parameters = info->scope()->num_parameters();
+ int offset = num_parameters * kPointerSize;
+ __ Add(x2, fp, StandardFrameConstants::kCallerSPOffset + offset);
+ __ Mov(x1, Smi::FromInt(num_parameters));
+ __ Push(x3, x2, x1);
+
+ // Arguments to ArgumentsAccessStub:
+ // function, receiver address, parameter count.
+ // The stub will rewrite receiver and parameter count if the previous
+ // stack frame was an arguments adapter frame.
+ ArgumentsAccessStub::Type type;
+ if (strict_mode() == STRICT) {
+ type = ArgumentsAccessStub::NEW_STRICT;
+ } else if (function()->has_duplicate_parameters()) {
+ type = ArgumentsAccessStub::NEW_SLOPPY_SLOW;
+ } else {
+ type = ArgumentsAccessStub::NEW_SLOPPY_FAST;
+ }
+ ArgumentsAccessStub stub(isolate(), type);
+ __ CallStub(&stub);
+
+ SetVar(arguments, x0, x1, x2);
+ }
+
+ if (FLAG_trace) {
+ __ CallRuntime(Runtime::kTraceEnter, 0);
+ }
+
+
+ // Visit the declarations and body unless there is an illegal
+ // redeclaration.
+ if (scope()->HasIllegalRedeclaration()) {
+ Comment cmnt(masm_, "[ Declarations");
+ scope()->VisitIllegalRedeclaration(this);
+
+ } else {
+ PrepareForBailoutForId(BailoutId::FunctionEntry(), NO_REGISTERS);
+ { Comment cmnt(masm_, "[ Declarations");
+ if (scope()->is_function_scope() && scope()->function() != NULL) {
+ VariableDeclaration* function = scope()->function();
+ ASSERT(function->proxy()->var()->mode() == CONST ||
+ function->proxy()->var()->mode() == CONST_LEGACY);
+ ASSERT(function->proxy()->var()->location() != Variable::UNALLOCATED);
+ VisitVariableDeclaration(function);
+ }
+ VisitDeclarations(scope()->declarations());
+ }
+ }
+
+ { Comment cmnt(masm_, "[ Stack check");
+ PrepareForBailoutForId(BailoutId::Declarations(), NO_REGISTERS);
+ Label ok;
+ ASSERT(jssp.Is(__ StackPointer()));
+ __ CompareRoot(jssp, Heap::kStackLimitRootIndex);
+ __ B(hs, &ok);
+ PredictableCodeSizeScope predictable(masm_,
+ Assembler::kCallSizeWithRelocation);
+ __ Call(isolate()->builtins()->StackCheck(), RelocInfo::CODE_TARGET);
+ __ Bind(&ok);
+ }
+
+ { Comment cmnt(masm_, "[ Body");
+ ASSERT(loop_depth() == 0);
+ VisitStatements(function()->body());
+ ASSERT(loop_depth() == 0);
+ }
+
+ // Always emit a 'return undefined' in case control fell off the end of
+ // the body.
+ { Comment cmnt(masm_, "[ return <undefined>;");
+ __ LoadRoot(x0, Heap::kUndefinedValueRootIndex);
+ }
+ EmitReturnSequence();
+
+ // Force emission of the pools, so they don't get emitted in the middle
+ // of the back edge table.
+ masm()->CheckVeneerPool(true, false);
+ masm()->CheckConstPool(true, false);
+}
+
+
+void FullCodeGenerator::ClearAccumulator() {
+ __ Mov(x0, Smi::FromInt(0));
+}
+
+
+void FullCodeGenerator::EmitProfilingCounterDecrement(int delta) {
+ __ Mov(x2, Operand(profiling_counter_));
+ __ Ldr(x3, FieldMemOperand(x2, Cell::kValueOffset));
+ __ Subs(x3, x3, Smi::FromInt(delta));
+ __ Str(x3, FieldMemOperand(x2, Cell::kValueOffset));
+}
+
+
+void FullCodeGenerator::EmitProfilingCounterReset() {
+ int reset_value = FLAG_interrupt_budget;
+ if (info_->is_debug()) {
+ // Detect debug break requests as soon as possible.
+ reset_value = FLAG_interrupt_budget >> 4;
+ }
+ __ Mov(x2, Operand(profiling_counter_));
+ __ Mov(x3, Smi::FromInt(reset_value));
+ __ Str(x3, FieldMemOperand(x2, Cell::kValueOffset));
+}
+
+
+void FullCodeGenerator::EmitBackEdgeBookkeeping(IterationStatement* stmt,
+ Label* back_edge_target) {
+ ASSERT(jssp.Is(__ StackPointer()));
+ Comment cmnt(masm_, "[ Back edge bookkeeping");
+ // Block literal pools whilst emitting back edge code.
+ Assembler::BlockPoolsScope block_const_pool(masm_);
+ Label ok;
+
+ ASSERT(back_edge_target->is_bound());
+ // We want to do a round rather than a floor of distance/kCodeSizeMultiplier
+ // to reduce the absolute error due to the integer division. To do that,
+ // we add kCodeSizeMultiplier/2 to the distance (equivalent to adding 0.5 to
+ // the result).
+ int distance =
+ masm_->SizeOfCodeGeneratedSince(back_edge_target) + kCodeSizeMultiplier / 2;
+ int weight = Min(kMaxBackEdgeWeight,
+ Max(1, distance / kCodeSizeMultiplier));
+ EmitProfilingCounterDecrement(weight);
+ __ B(pl, &ok);
+ __ Call(isolate()->builtins()->InterruptCheck(), RelocInfo::CODE_TARGET);
+
+ // Record a mapping of this PC offset to the OSR id. This is used to find
+ // the AST id from the unoptimized code in order to use it as a key into
+ // the deoptimization input data found in the optimized code.
+ RecordBackEdge(stmt->OsrEntryId());
+
+ EmitProfilingCounterReset();
+
+ __ Bind(&ok);
+ PrepareForBailoutForId(stmt->EntryId(), NO_REGISTERS);
+ // Record a mapping of the OSR id to this PC. This is used if the OSR
+ // entry becomes the target of a bailout. We don't expect it to be, but
+ // we want it to work if it is.
+ PrepareForBailoutForId(stmt->OsrEntryId(), NO_REGISTERS);
+}
+
+
+void FullCodeGenerator::EmitReturnSequence() {
+ Comment cmnt(masm_, "[ Return sequence");
+
+ if (return_label_.is_bound()) {
+ __ B(&return_label_);
+
+ } else {
+ __ Bind(&return_label_);
+ if (FLAG_trace) {
+ // Push the return value on the stack as the parameter.
+ // Runtime::TraceExit returns its parameter in x0.
+ __ Push(result_register());
+ __ CallRuntime(Runtime::kTraceExit, 1);
+ ASSERT(x0.Is(result_register()));
+ }
+ // Pretend that the exit is a backwards jump to the entry.
+ int weight = 1;
+ if (info_->ShouldSelfOptimize()) {
+ weight = FLAG_interrupt_budget / FLAG_self_opt_count;
+ } else {
+ int distance = masm_->pc_offset() + kCodeSizeMultiplier / 2;
+ weight = Min(kMaxBackEdgeWeight,
+ Max(1, distance / kCodeSizeMultiplier));
+ }
+ EmitProfilingCounterDecrement(weight);
+ Label ok;
+ __ B(pl, &ok);
+ __ Push(x0);
+ __ Call(isolate()->builtins()->InterruptCheck(),
+ RelocInfo::CODE_TARGET);
+ __ Pop(x0);
+ EmitProfilingCounterReset();
+ __ Bind(&ok);
+
+ // Make sure that the constant pool is not emitted inside of the return
+ // sequence. This sequence can get patched when the debugger is used. See
+ // debug-arm64.cc:BreakLocationIterator::SetDebugBreakAtReturn().
+ {
+ InstructionAccurateScope scope(masm_,
+ Assembler::kJSRetSequenceInstructions);
+ CodeGenerator::RecordPositions(masm_, function()->end_position() - 1);
+ __ RecordJSReturn();
+ // This code is generated using Assembler methods rather than Macro
+ // Assembler methods because it will be patched later on, and so the size
+ // of the generated code must be consistent.
+ const Register& current_sp = __ StackPointer();
+ // Nothing ensures 16 bytes alignment here.
+ ASSERT(!current_sp.Is(csp));
+ __ mov(current_sp, fp);
+ int no_frame_start = masm_->pc_offset();
+ __ ldp(fp, lr, MemOperand(current_sp, 2 * kXRegSize, PostIndex));
+ // Drop the arguments and receiver and return.
+ // TODO(all): This implementation is overkill as it supports 2**31+1
+ // arguments, consider how to improve it without creating a security
+ // hole.
+ __ ldr_pcrel(ip0, (3 * kInstructionSize) >> kLoadLiteralScaleLog2);
+ __ add(current_sp, current_sp, ip0);
+ __ ret();
+ __ dc64(kXRegSize * (info_->scope()->num_parameters() + 1));
+ info_->AddNoFrameRange(no_frame_start, masm_->pc_offset());
+ }
+ }
+}
+
+
+void FullCodeGenerator::EffectContext::Plug(Variable* var) const {
+ ASSERT(var->IsStackAllocated() || var->IsContextSlot());
+}
+
+
+void FullCodeGenerator::AccumulatorValueContext::Plug(Variable* var) const {
+ ASSERT(var->IsStackAllocated() || var->IsContextSlot());
+ codegen()->GetVar(result_register(), var);
+}
+
+
+void FullCodeGenerator::StackValueContext::Plug(Variable* var) const {
+ ASSERT(var->IsStackAllocated() || var->IsContextSlot());
+ codegen()->GetVar(result_register(), var);
+ __ Push(result_register());
+}
+
+
+void FullCodeGenerator::TestContext::Plug(Variable* var) const {
+ ASSERT(var->IsStackAllocated() || var->IsContextSlot());
+ // For simplicity we always test the accumulator register.
+ codegen()->GetVar(result_register(), var);
+ codegen()->PrepareForBailoutBeforeSplit(condition(), false, NULL, NULL);
+ codegen()->DoTest(this);
+}
+
+
+void FullCodeGenerator::EffectContext::Plug(Heap::RootListIndex index) const {
+ // Root values have no side effects.
+}
+
+
+void FullCodeGenerator::AccumulatorValueContext::Plug(
+ Heap::RootListIndex index) const {
+ __ LoadRoot(result_register(), index);
+}
+
+
+void FullCodeGenerator::StackValueContext::Plug(
+ Heap::RootListIndex index) const {
+ __ LoadRoot(result_register(), index);
+ __ Push(result_register());
+}
+
+
+void FullCodeGenerator::TestContext::Plug(Heap::RootListIndex index) const {
+ codegen()->PrepareForBailoutBeforeSplit(condition(), true, true_label_,
+ false_label_);
+ if (index == Heap::kUndefinedValueRootIndex ||
+ index == Heap::kNullValueRootIndex ||
+ index == Heap::kFalseValueRootIndex) {
+ if (false_label_ != fall_through_) __ B(false_label_);
+ } else if (index == Heap::kTrueValueRootIndex) {
+ if (true_label_ != fall_through_) __ B(true_label_);
+ } else {
+ __ LoadRoot(result_register(), index);
+ codegen()->DoTest(this);
+ }
+}
+
+
+void FullCodeGenerator::EffectContext::Plug(Handle<Object> lit) const {
+}
+
+
+void FullCodeGenerator::AccumulatorValueContext::Plug(
+ Handle<Object> lit) const {
+ __ Mov(result_register(), Operand(lit));
+}
+
+
+void FullCodeGenerator::StackValueContext::Plug(Handle<Object> lit) const {
+ // Immediates cannot be pushed directly.
+ __ Mov(result_register(), Operand(lit));
+ __ Push(result_register());
+}
+
+
+void FullCodeGenerator::TestContext::Plug(Handle<Object> lit) const {
+ codegen()->PrepareForBailoutBeforeSplit(condition(),
+ true,
+ true_label_,
+ false_label_);
+ ASSERT(!lit->IsUndetectableObject()); // There are no undetectable literals.
+ if (lit->IsUndefined() || lit->IsNull() || lit->IsFalse()) {
+ if (false_label_ != fall_through_) __ B(false_label_);
+ } else if (lit->IsTrue() || lit->IsJSObject()) {
+ if (true_label_ != fall_through_) __ B(true_label_);
+ } else if (lit->IsString()) {
+ if (String::cast(*lit)->length() == 0) {
+ if (false_label_ != fall_through_) __ B(false_label_);
+ } else {
+ if (true_label_ != fall_through_) __ B(true_label_);
+ }
+ } else if (lit->IsSmi()) {
+ if (Smi::cast(*lit)->value() == 0) {
+ if (false_label_ != fall_through_) __ B(false_label_);
+ } else {
+ if (true_label_ != fall_through_) __ B(true_label_);
+ }
+ } else {
+ // For simplicity we always test the accumulator register.
+ __ Mov(result_register(), Operand(lit));
+ codegen()->DoTest(this);
+ }
+}
+
+
+void FullCodeGenerator::EffectContext::DropAndPlug(int count,
+ Register reg) const {
+ ASSERT(count > 0);
+ __ Drop(count);
+}
+
+
+void FullCodeGenerator::AccumulatorValueContext::DropAndPlug(
+ int count,
+ Register reg) const {
+ ASSERT(count > 0);
+ __ Drop(count);
+ __ Move(result_register(), reg);
+}
+
+
+void FullCodeGenerator::StackValueContext::DropAndPlug(int count,
+ Register reg) const {
+ ASSERT(count > 0);
+ if (count > 1) __ Drop(count - 1);
+ __ Poke(reg, 0);
+}
+
+
+void FullCodeGenerator::TestContext::DropAndPlug(int count,
+ Register reg) const {
+ ASSERT(count > 0);
+ // For simplicity we always test the accumulator register.
+ __ Drop(count);
+ __ Mov(result_register(), reg);
+ codegen()->PrepareForBailoutBeforeSplit(condition(), false, NULL, NULL);
+ codegen()->DoTest(this);
+}
+
+
+void FullCodeGenerator::EffectContext::Plug(Label* materialize_true,
+ Label* materialize_false) const {
+ ASSERT(materialize_true == materialize_false);
+ __ Bind(materialize_true);
+}
+
+
+void FullCodeGenerator::AccumulatorValueContext::Plug(
+ Label* materialize_true,
+ Label* materialize_false) const {
+ Label done;
+ __ Bind(materialize_true);
+ __ LoadRoot(result_register(), Heap::kTrueValueRootIndex);
+ __ B(&done);
+ __ Bind(materialize_false);
+ __ LoadRoot(result_register(), Heap::kFalseValueRootIndex);
+ __ Bind(&done);
+}
+
+
+void FullCodeGenerator::StackValueContext::Plug(
+ Label* materialize_true,
+ Label* materialize_false) const {
+ Label done;
+ __ Bind(materialize_true);
+ __ LoadRoot(x10, Heap::kTrueValueRootIndex);
+ __ B(&done);
+ __ Bind(materialize_false);
+ __ LoadRoot(x10, Heap::kFalseValueRootIndex);
+ __ Bind(&done);
+ __ Push(x10);
+}
+
+
+void FullCodeGenerator::TestContext::Plug(Label* materialize_true,
+ Label* materialize_false) const {
+ ASSERT(materialize_true == true_label_);
+ ASSERT(materialize_false == false_label_);
+}
+
+
+void FullCodeGenerator::EffectContext::Plug(bool flag) const {
+}
+
+
+void FullCodeGenerator::AccumulatorValueContext::Plug(bool flag) const {
+ Heap::RootListIndex value_root_index =
+ flag ? Heap::kTrueValueRootIndex : Heap::kFalseValueRootIndex;
+ __ LoadRoot(result_register(), value_root_index);
+}
+
+
+void FullCodeGenerator::StackValueContext::Plug(bool flag) const {
+ Heap::RootListIndex value_root_index =
+ flag ? Heap::kTrueValueRootIndex : Heap::kFalseValueRootIndex;
+ __ LoadRoot(x10, value_root_index);
+ __ Push(x10);
+}
+
+
+void FullCodeGenerator::TestContext::Plug(bool flag) const {
+ codegen()->PrepareForBailoutBeforeSplit(condition(),
+ true,
+ true_label_,
+ false_label_);
+ if (flag) {
+ if (true_label_ != fall_through_) {
+ __ B(true_label_);
+ }
+ } else {
+ if (false_label_ != fall_through_) {
+ __ B(false_label_);
+ }
+ }
+}
+
+
+void FullCodeGenerator::DoTest(Expression* condition,
+ Label* if_true,
+ Label* if_false,
+ Label* fall_through) {
+ Handle<Code> ic = ToBooleanStub::GetUninitialized(isolate());
+ CallIC(ic, condition->test_id());
+ __ CompareAndSplit(result_register(), 0, ne, if_true, if_false, fall_through);
+}
+
+
+// If (cond), branch to if_true.
+// If (!cond), branch to if_false.
+// fall_through is used as an optimization in cases where only one branch
+// instruction is necessary.
+void FullCodeGenerator::Split(Condition cond,
+ Label* if_true,
+ Label* if_false,
+ Label* fall_through) {
+ if (if_false == fall_through) {
+ __ B(cond, if_true);
+ } else if (if_true == fall_through) {
+ ASSERT(if_false != fall_through);
+ __ B(NegateCondition(cond), if_false);
+ } else {
+ __ B(cond, if_true);
+ __ B(if_false);
+ }
+}
+
+
+MemOperand FullCodeGenerator::StackOperand(Variable* var) {
+ // Offset is negative because higher indexes are at lower addresses.
+ int offset = -var->index() * kXRegSize;
+ // Adjust by a (parameter or local) base offset.
+ if (var->IsParameter()) {
+ offset += (info_->scope()->num_parameters() + 1) * kPointerSize;
+ } else {
+ offset += JavaScriptFrameConstants::kLocal0Offset;
+ }
+ return MemOperand(fp, offset);
+}
+
+
+MemOperand FullCodeGenerator::VarOperand(Variable* var, Register scratch) {
+ ASSERT(var->IsContextSlot() || var->IsStackAllocated());
+ if (var->IsContextSlot()) {
+ int context_chain_length = scope()->ContextChainLength(var->scope());
+ __ LoadContext(scratch, context_chain_length);
+ return ContextMemOperand(scratch, var->index());
+ } else {
+ return StackOperand(var);
+ }
+}
+
+
+void FullCodeGenerator::GetVar(Register dest, Variable* var) {
+ // Use destination as scratch.
+ MemOperand location = VarOperand(var, dest);
+ __ Ldr(dest, location);
+}
+
+
+void FullCodeGenerator::SetVar(Variable* var,
+ Register src,
+ Register scratch0,
+ Register scratch1) {
+ ASSERT(var->IsContextSlot() || var->IsStackAllocated());
+ ASSERT(!AreAliased(src, scratch0, scratch1));
+ MemOperand location = VarOperand(var, scratch0);
+ __ Str(src, location);
+
+ // Emit the write barrier code if the location is in the heap.
+ if (var->IsContextSlot()) {
+ // scratch0 contains the correct context.
+ __ RecordWriteContextSlot(scratch0,
+ location.offset(),
+ src,
+ scratch1,
+ kLRHasBeenSaved,
+ kDontSaveFPRegs);
+ }
+}
+
+
+void FullCodeGenerator::PrepareForBailoutBeforeSplit(Expression* expr,
+ bool should_normalize,
+ Label* if_true,
+ Label* if_false) {
+ // Only prepare for bailouts before splits if we're in a test
+ // context. Otherwise, we let the Visit function deal with the
+ // preparation to avoid preparing with the same AST id twice.
+ if (!context()->IsTest() || !info_->IsOptimizable()) return;
+
+ // TODO(all): Investigate to see if there is something to work on here.
+ Label skip;
+ if (should_normalize) {
+ __ B(&skip);
+ }
+ PrepareForBailout(expr, TOS_REG);
+ if (should_normalize) {
+ __ CompareRoot(x0, Heap::kTrueValueRootIndex);
+ Split(eq, if_true, if_false, NULL);
+ __ Bind(&skip);
+ }
+}
+
+
+void FullCodeGenerator::EmitDebugCheckDeclarationContext(Variable* variable) {
+ // The variable in the declaration always resides in the current function
+ // context.
+ ASSERT_EQ(0, scope()->ContextChainLength(variable->scope()));
+ if (generate_debug_code_) {
+ // Check that we're not inside a with or catch context.
+ __ Ldr(x1, FieldMemOperand(cp, HeapObject::kMapOffset));
+ __ CompareRoot(x1, Heap::kWithContextMapRootIndex);
+ __ Check(ne, kDeclarationInWithContext);
+ __ CompareRoot(x1, Heap::kCatchContextMapRootIndex);
+ __ Check(ne, kDeclarationInCatchContext);
+ }
+}
+
+
+void FullCodeGenerator::VisitVariableDeclaration(
+ VariableDeclaration* declaration) {
+ // If it was not possible to allocate the variable at compile time, we
+ // need to "declare" it at runtime to make sure it actually exists in the
+ // local context.
+ VariableProxy* proxy = declaration->proxy();
+ VariableMode mode = declaration->mode();
+ Variable* variable = proxy->var();
+ bool hole_init = mode == LET || mode == CONST || mode == CONST_LEGACY;
+
+ switch (variable->location()) {
+ case Variable::UNALLOCATED:
+ globals_->Add(variable->name(), zone());
+ globals_->Add(variable->binding_needs_init()
+ ? isolate()->factory()->the_hole_value()
+ : isolate()->factory()->undefined_value(),
+ zone());
+ break;
+
+ case Variable::PARAMETER:
+ case Variable::LOCAL:
+ if (hole_init) {
+ Comment cmnt(masm_, "[ VariableDeclaration");
+ __ LoadRoot(x10, Heap::kTheHoleValueRootIndex);
+ __ Str(x10, StackOperand(variable));
+ }
+ break;
+
+ case Variable::CONTEXT:
+ if (hole_init) {
+ Comment cmnt(masm_, "[ VariableDeclaration");
+ EmitDebugCheckDeclarationContext(variable);
+ __ LoadRoot(x10, Heap::kTheHoleValueRootIndex);
+ __ Str(x10, ContextMemOperand(cp, variable->index()));
+ // No write barrier since the_hole_value is in old space.
+ PrepareForBailoutForId(proxy->id(), NO_REGISTERS);
+ }
+ break;
+
+ case Variable::LOOKUP: {
+ Comment cmnt(masm_, "[ VariableDeclaration");
+ __ Mov(x2, Operand(variable->name()));
+ // Declaration nodes are always introduced in one of four modes.
+ ASSERT(IsDeclaredVariableMode(mode));
+ PropertyAttributes attr = IsImmutableVariableMode(mode) ? READ_ONLY
+ : NONE;
+ __ Mov(x1, Smi::FromInt(attr));
+ // Push initial value, if any.
+ // Note: For variables we must not push an initial value (such as
+ // 'undefined') because we may have a (legal) redeclaration and we
+ // must not destroy the current value.
+ if (hole_init) {
+ __ LoadRoot(x0, Heap::kTheHoleValueRootIndex);
+ __ Push(cp, x2, x1, x0);
+ } else {
+ // Pushing 0 (xzr) indicates no initial value.
+ __ Push(cp, x2, x1, xzr);
+ }
+ __ CallRuntime(Runtime::kHiddenDeclareContextSlot, 4);
+ break;
+ }
+ }
+}
+
+
+void FullCodeGenerator::VisitFunctionDeclaration(
+ FunctionDeclaration* declaration) {
+ VariableProxy* proxy = declaration->proxy();
+ Variable* variable = proxy->var();
+ switch (variable->location()) {
+ case Variable::UNALLOCATED: {
+ globals_->Add(variable->name(), zone());
+ Handle<SharedFunctionInfo> function =
+ Compiler::BuildFunctionInfo(declaration->fun(), script());
+ // Check for stack overflow exception.
+ if (function.is_null()) return SetStackOverflow();
+ globals_->Add(function, zone());
+ break;
+ }
+
+ case Variable::PARAMETER:
+ case Variable::LOCAL: {
+ Comment cmnt(masm_, "[ Function Declaration");
+ VisitForAccumulatorValue(declaration->fun());
+ __ Str(result_register(), StackOperand(variable));
+ break;
+ }
+
+ case Variable::CONTEXT: {
+ Comment cmnt(masm_, "[ Function Declaration");
+ EmitDebugCheckDeclarationContext(variable);
+ VisitForAccumulatorValue(declaration->fun());
+ __ Str(result_register(), ContextMemOperand(cp, variable->index()));
+ int offset = Context::SlotOffset(variable->index());
+ // We know that we have written a function, which is not a smi.
+ __ RecordWriteContextSlot(cp,
+ offset,
+ result_register(),
+ x2,
+ kLRHasBeenSaved,
+ kDontSaveFPRegs,
+ EMIT_REMEMBERED_SET,
+ OMIT_SMI_CHECK);
+ PrepareForBailoutForId(proxy->id(), NO_REGISTERS);
+ break;
+ }
+
+ case Variable::LOOKUP: {
+ Comment cmnt(masm_, "[ Function Declaration");
+ __ Mov(x2, Operand(variable->name()));
+ __ Mov(x1, Smi::FromInt(NONE));
+ __ Push(cp, x2, x1);
+ // Push initial value for function declaration.
+ VisitForStackValue(declaration->fun());
+ __ CallRuntime(Runtime::kHiddenDeclareContextSlot, 4);
+ break;
+ }
+ }
+}
+
+
+void FullCodeGenerator::VisitModuleDeclaration(ModuleDeclaration* declaration) {
+ Variable* variable = declaration->proxy()->var();
+ ASSERT(variable->location() == Variable::CONTEXT);
+ ASSERT(variable->interface()->IsFrozen());
+
+ Comment cmnt(masm_, "[ ModuleDeclaration");
+ EmitDebugCheckDeclarationContext(variable);
+
+ // Load instance object.
+ __ LoadContext(x1, scope_->ContextChainLength(scope_->GlobalScope()));
+ __ Ldr(x1, ContextMemOperand(x1, variable->interface()->Index()));
+ __ Ldr(x1, ContextMemOperand(x1, Context::EXTENSION_INDEX));
+
+ // Assign it.
+ __ Str(x1, ContextMemOperand(cp, variable->index()));
+ // We know that we have written a module, which is not a smi.
+ __ RecordWriteContextSlot(cp,
+ Context::SlotOffset(variable->index()),
+ x1,
+ x3,
+ kLRHasBeenSaved,
+ kDontSaveFPRegs,
+ EMIT_REMEMBERED_SET,
+ OMIT_SMI_CHECK);
+ PrepareForBailoutForId(declaration->proxy()->id(), NO_REGISTERS);
+
+ // Traverse info body.
+ Visit(declaration->module());
+}
+
+
+void FullCodeGenerator::VisitImportDeclaration(ImportDeclaration* declaration) {
+ VariableProxy* proxy = declaration->proxy();
+ Variable* variable = proxy->var();
+ switch (variable->location()) {
+ case Variable::UNALLOCATED:
+ // TODO(rossberg)
+ break;
+
+ case Variable::CONTEXT: {
+ Comment cmnt(masm_, "[ ImportDeclaration");
+ EmitDebugCheckDeclarationContext(variable);
+ // TODO(rossberg)
+ break;
+ }
+
+ case Variable::PARAMETER:
+ case Variable::LOCAL:
+ case Variable::LOOKUP:
+ UNREACHABLE();
+ }
+}
+
+
+void FullCodeGenerator::VisitExportDeclaration(ExportDeclaration* declaration) {
+ // TODO(rossberg)
+}
+
+
+void FullCodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) {
+ // Call the runtime to declare the globals.
+ __ Mov(x11, Operand(pairs));
+ Register flags = xzr;
+ if (Smi::FromInt(DeclareGlobalsFlags())) {
+ flags = x10;
+ __ Mov(flags, Smi::FromInt(DeclareGlobalsFlags()));
+ }
+ __ Push(cp, x11, flags);
+ __ CallRuntime(Runtime::kHiddenDeclareGlobals, 3);
+ // Return value is ignored.
+}
+
+
+void FullCodeGenerator::DeclareModules(Handle<FixedArray> descriptions) {
+ // Call the runtime to declare the modules.
+ __ Push(descriptions);
+ __ CallRuntime(Runtime::kHiddenDeclareModules, 1);
+ // Return value is ignored.
+}
+
+
+void FullCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) {
+ ASM_LOCATION("FullCodeGenerator::VisitSwitchStatement");
+ Comment cmnt(masm_, "[ SwitchStatement");
+ Breakable nested_statement(this, stmt);
+ SetStatementPosition(stmt);
+
+ // Keep the switch value on the stack until a case matches.
+ VisitForStackValue(stmt->tag());
+ PrepareForBailoutForId(stmt->EntryId(), NO_REGISTERS);
+
+ ZoneList<CaseClause*>* clauses = stmt->cases();
+ CaseClause* default_clause = NULL; // Can occur anywhere in the list.
+
+ Label next_test; // Recycled for each test.
+ // Compile all the tests with branches to their bodies.
+ for (int i = 0; i < clauses->length(); i++) {
+ CaseClause* clause = clauses->at(i);
+ clause->body_target()->Unuse();
+
+ // The default is not a test, but remember it as final fall through.
+ if (clause->is_default()) {
+ default_clause = clause;
+ continue;
+ }
+
+ Comment cmnt(masm_, "[ Case comparison");
+ __ Bind(&next_test);
+ next_test.Unuse();
+
+ // Compile the label expression.
+ VisitForAccumulatorValue(clause->label());
+
+ // Perform the comparison as if via '==='.
+ __ Peek(x1, 0); // Switch value.
+
+ JumpPatchSite patch_site(masm_);
+ if (ShouldInlineSmiCase(Token::EQ_STRICT)) {
+ Label slow_case;
+ patch_site.EmitJumpIfEitherNotSmi(x0, x1, &slow_case);
+ __ Cmp(x1, x0);
+ __ B(ne, &next_test);
+ __ Drop(1); // Switch value is no longer needed.
+ __ B(clause->body_target());
+ __ Bind(&slow_case);
+ }
+
+ // Record position before stub call for type feedback.
+ SetSourcePosition(clause->position());
+ Handle<Code> ic = CompareIC::GetUninitialized(isolate(), Token::EQ_STRICT);
+ CallIC(ic, clause->CompareId());
+ patch_site.EmitPatchInfo();
+
+ Label skip;
+ __ B(&skip);
+ PrepareForBailout(clause, TOS_REG);
+ __ JumpIfNotRoot(x0, Heap::kTrueValueRootIndex, &next_test);
+ __ Drop(1);
+ __ B(clause->body_target());
+ __ Bind(&skip);
+
+ __ Cbnz(x0, &next_test);
+ __ Drop(1); // Switch value is no longer needed.
+ __ B(clause->body_target());
+ }
+
+ // Discard the test value and jump to the default if present, otherwise to
+ // the end of the statement.
+ __ Bind(&next_test);
+ __ Drop(1); // Switch value is no longer needed.
+ if (default_clause == NULL) {
+ __ B(nested_statement.break_label());
+ } else {
+ __ B(default_clause->body_target());
+ }
+
+ // Compile all the case bodies.
+ for (int i = 0; i < clauses->length(); i++) {
+ Comment cmnt(masm_, "[ Case body");
+ CaseClause* clause = clauses->at(i);
+ __ Bind(clause->body_target());
+ PrepareForBailoutForId(clause->EntryId(), NO_REGISTERS);
+ VisitStatements(clause->statements());
+ }
+
+ __ Bind(nested_statement.break_label());
+ PrepareForBailoutForId(stmt->ExitId(), NO_REGISTERS);
+}
+
+
+void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
+ ASM_LOCATION("FullCodeGenerator::VisitForInStatement");
+ Comment cmnt(masm_, "[ ForInStatement");
+ int slot = stmt->ForInFeedbackSlot();
+ // TODO(all): This visitor probably needs better comments and a revisit.
+ SetStatementPosition(stmt);
+
+ Label loop, exit;
+ ForIn loop_statement(this, stmt);
+ increment_loop_depth();
+
+ // Get the object to enumerate over. If the object is null or undefined, skip
+ // over the loop. See ECMA-262 version 5, section 12.6.4.
+ VisitForAccumulatorValue(stmt->enumerable());
+ __ JumpIfRoot(x0, Heap::kUndefinedValueRootIndex, &exit);
+ Register null_value = x15;
+ __ LoadRoot(null_value, Heap::kNullValueRootIndex);
+ __ Cmp(x0, null_value);
+ __ B(eq, &exit);
+
+ PrepareForBailoutForId(stmt->PrepareId(), TOS_REG);
+
+ // Convert the object to a JS object.
+ Label convert, done_convert;
+ __ JumpIfSmi(x0, &convert);
+ __ JumpIfObjectType(x0, x10, x11, FIRST_SPEC_OBJECT_TYPE, &done_convert, ge);
+ __ Bind(&convert);
+ __ Push(x0);
+ __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
+ __ Bind(&done_convert);
+ __ Push(x0);
+
+ // Check for proxies.
+ Label call_runtime;
+ STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE);
+ __ JumpIfObjectType(x0, x10, x11, LAST_JS_PROXY_TYPE, &call_runtime, le);
+
+ // Check cache validity in generated code. This is a fast case for
+ // the JSObject::IsSimpleEnum cache validity checks. If we cannot
+ // guarantee cache validity, call the runtime system to check cache
+ // validity or get the property names in a fixed array.
+ __ CheckEnumCache(x0, null_value, x10, x11, x12, x13, &call_runtime);
+
+ // The enum cache is valid. Load the map of the object being
+ // iterated over and use the cache for the iteration.
+ Label use_cache;
+ __ Ldr(x0, FieldMemOperand(x0, HeapObject::kMapOffset));
+ __ B(&use_cache);
+
+ // Get the set of properties to enumerate.
+ __ Bind(&call_runtime);
+ __ Push(x0); // Duplicate the enumerable object on the stack.
+ __ CallRuntime(Runtime::kGetPropertyNamesFast, 1);
+
+ // If we got a map from the runtime call, we can do a fast
+ // modification check. Otherwise, we got a fixed array, and we have
+ // to do a slow check.
+ Label fixed_array, no_descriptors;
+ __ Ldr(x2, FieldMemOperand(x0, HeapObject::kMapOffset));
+ __ JumpIfNotRoot(x2, Heap::kMetaMapRootIndex, &fixed_array);
+
+ // We got a map in register x0. Get the enumeration cache from it.
+ __ Bind(&use_cache);
+
+ __ EnumLengthUntagged(x1, x0);
+ __ Cbz(x1, &no_descriptors);
+
+ __ LoadInstanceDescriptors(x0, x2);
+ __ Ldr(x2, FieldMemOperand(x2, DescriptorArray::kEnumCacheOffset));
+ __ Ldr(x2,
+ FieldMemOperand(x2, DescriptorArray::kEnumCacheBridgeCacheOffset));
+
+ // Set up the four remaining stack slots.
+ __ Push(x0, x2); // Map, enumeration cache.
+ __ SmiTagAndPush(x1, xzr); // Enum cache length, zero (both as smis).
+ __ B(&loop);
+
+ __ Bind(&no_descriptors);
+ __ Drop(1);
+ __ B(&exit);
+
+ // We got a fixed array in register x0. Iterate through that.
+ __ Bind(&fixed_array);
+
+ __ LoadObject(x1, FeedbackVector());
+ __ Mov(x10, Operand(TypeFeedbackInfo::MegamorphicSentinel(isolate())));
+ __ Str(x10, FieldMemOperand(x1, FixedArray::OffsetOfElementAt(slot)));
+
+ __ Mov(x1, Smi::FromInt(1)); // Smi indicates slow check.
+ __ Peek(x10, 0); // Get enumerated object.
+ STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE);
+ // TODO(all): similar check was done already. Can we avoid it here?
+ __ CompareObjectType(x10, x11, x12, LAST_JS_PROXY_TYPE);
+ ASSERT(Smi::FromInt(0) == 0);
+ __ CzeroX(x1, le); // Zero indicates proxy.
+ __ Push(x1, x0); // Smi and array
+ __ Ldr(x1, FieldMemOperand(x0, FixedArray::kLengthOffset));
+ __ Push(x1, xzr); // Fixed array length (as smi) and initial index.
+
+ // Generate code for doing the condition check.
+ PrepareForBailoutForId(stmt->BodyId(), NO_REGISTERS);
+ __ Bind(&loop);
+ // Load the current count to x0, load the length to x1.
+ __ PeekPair(x0, x1, 0);
+ __ Cmp(x0, x1); // Compare to the array length.
+ __ B(hs, loop_statement.break_label());
+
+ // Get the current entry of the array into register r3.
+ __ Peek(x10, 2 * kXRegSize);
+ __ Add(x10, x10, Operand::UntagSmiAndScale(x0, kPointerSizeLog2));
+ __ Ldr(x3, MemOperand(x10, FixedArray::kHeaderSize - kHeapObjectTag));
+
+ // Get the expected map from the stack or a smi in the
+ // permanent slow case into register x10.
+ __ Peek(x2, 3 * kXRegSize);
+
+ // Check if the expected map still matches that of the enumerable.
+ // If not, we may have to filter the key.
+ Label update_each;
+ __ Peek(x1, 4 * kXRegSize);
+ __ Ldr(x11, FieldMemOperand(x1, HeapObject::kMapOffset));
+ __ Cmp(x11, x2);
+ __ B(eq, &update_each);
+
+ // For proxies, no filtering is done.
+ // TODO(rossberg): What if only a prototype is a proxy? Not specified yet.
+ STATIC_ASSERT(kSmiTag == 0);
+ __ Cbz(x2, &update_each);
+
+ // Convert the entry to a string or (smi) 0 if it isn't a property
+ // any more. If the property has been removed while iterating, we
+ // just skip it.
+ __ Push(x1, x3);
+ __ InvokeBuiltin(Builtins::FILTER_KEY, CALL_FUNCTION);
+ __ Mov(x3, x0);
+ __ Cbz(x0, loop_statement.continue_label());
+
+ // Update the 'each' property or variable from the possibly filtered
+ // entry in register x3.
+ __ Bind(&update_each);
+ __ Mov(result_register(), x3);
+ // Perform the assignment as if via '='.
+ { EffectContext context(this);
+ EmitAssignment(stmt->each());
+ }
+
+ // Generate code for the body of the loop.
+ Visit(stmt->body());
+
+ // Generate code for going to the next element by incrementing
+ // the index (smi) stored on top of the stack.
+ __ Bind(loop_statement.continue_label());
+ // TODO(all): We could use a callee saved register to avoid popping.
+ __ Pop(x0);
+ __ Add(x0, x0, Smi::FromInt(1));
+ __ Push(x0);
+
+ EmitBackEdgeBookkeeping(stmt, &loop);
+ __ B(&loop);
+
+ // Remove the pointers stored on the stack.
+ __ Bind(loop_statement.break_label());
+ __ Drop(5);
+
+ // Exit and decrement the loop depth.
+ PrepareForBailoutForId(stmt->ExitId(), NO_REGISTERS);
+ __ Bind(&exit);
+ decrement_loop_depth();
+}
+
+
+void FullCodeGenerator::VisitForOfStatement(ForOfStatement* stmt) {
+ Comment cmnt(masm_, "[ ForOfStatement");
+ SetStatementPosition(stmt);
+
+ Iteration loop_statement(this, stmt);
+ increment_loop_depth();
+
+ // var iterable = subject
+ VisitForAccumulatorValue(stmt->assign_iterable());
+
+ // As with for-in, skip the loop if the iterator is null or undefined.
+ Register iterator = x0;
+ __ JumpIfRoot(iterator, Heap::kUndefinedValueRootIndex,
+ loop_statement.break_label());
+ __ JumpIfRoot(iterator, Heap::kNullValueRootIndex,
+ loop_statement.break_label());
+
+ // var iterator = iterable[Symbol.iterator]();
+ VisitForEffect(stmt->assign_iterator());
+
+ // Loop entry.
+ __ Bind(loop_statement.continue_label());
+
+ // result = iterator.next()
+ VisitForEffect(stmt->next_result());
+
+ // if (result.done) break;
+ Label result_not_done;
+ VisitForControl(stmt->result_done(),
+ loop_statement.break_label(),
+ &result_not_done,
+ &result_not_done);
+ __ Bind(&result_not_done);
+
+ // each = result.value
+ VisitForEffect(stmt->assign_each());
+
+ // Generate code for the body of the loop.
+ Visit(stmt->body());
+
+ // Check stack before looping.
+ PrepareForBailoutForId(stmt->BackEdgeId(), NO_REGISTERS);
+ EmitBackEdgeBookkeeping(stmt, loop_statement.continue_label());
+ __ B(loop_statement.continue_label());
+
+ // Exit and decrement the loop depth.
+ PrepareForBailoutForId(stmt->ExitId(), NO_REGISTERS);
+ __ Bind(loop_statement.break_label());
+ decrement_loop_depth();
+}
+
+
+void FullCodeGenerator::EmitNewClosure(Handle<SharedFunctionInfo> info,
+ bool pretenure) {
+ // Use the fast case closure allocation code that allocates in new space for
+ // nested functions that don't need literals cloning. If we're running with
+ // the --always-opt or the --prepare-always-opt flag, we need to use the
+ // runtime function so that the new function we are creating here gets a
+ // chance to have its code optimized and doesn't just get a copy of the
+ // existing unoptimized code.
+ if (!FLAG_always_opt &&
+ !FLAG_prepare_always_opt &&
+ !pretenure &&
+ scope()->is_function_scope() &&
+ info->num_literals() == 0) {
+ FastNewClosureStub stub(isolate(),
+ info->strict_mode(),
+ info->is_generator());
+ __ Mov(x2, Operand(info));
+ __ CallStub(&stub);
+ } else {
+ __ Mov(x11, Operand(info));
+ __ LoadRoot(x10, pretenure ? Heap::kTrueValueRootIndex
+ : Heap::kFalseValueRootIndex);
+ __ Push(cp, x11, x10);
+ __ CallRuntime(Runtime::kHiddenNewClosure, 3);
+ }
+ context()->Plug(x0);
+}
+
+
+void FullCodeGenerator::VisitVariableProxy(VariableProxy* expr) {
+ Comment cmnt(masm_, "[ VariableProxy");
+ EmitVariableLoad(expr);
+}
+
+
+void FullCodeGenerator::EmitLoadGlobalCheckExtensions(Variable* var,
+ TypeofState typeof_state,
+ Label* slow) {
+ Register current = cp;
+ Register next = x10;
+ Register temp = x11;
+
+ Scope* s = scope();
+ while (s != NULL) {
+ if (s->num_heap_slots() > 0) {
+ if (s->calls_sloppy_eval()) {
+ // Check that extension is NULL.
+ __ Ldr(temp, ContextMemOperand(current, Context::EXTENSION_INDEX));
+ __ Cbnz(temp, slow);
+ }
+ // Load next context in chain.
+ __ Ldr(next, ContextMemOperand(current, Context::PREVIOUS_INDEX));
+ // Walk the rest of the chain without clobbering cp.
+ current = next;
+ }
+ // If no outer scope calls eval, we do not need to check more
+ // context extensions.
+ if (!s->outer_scope_calls_sloppy_eval() || s->is_eval_scope()) break;
+ s = s->outer_scope();
+ }
+
+ if (s->is_eval_scope()) {
+ Label loop, fast;
+ __ Mov(next, current);
+
+ __ Bind(&loop);
+ // Terminate at native context.
+ __ Ldr(temp, FieldMemOperand(next, HeapObject::kMapOffset));
+ __ JumpIfRoot(temp, Heap::kNativeContextMapRootIndex, &fast);
+ // Check that extension is NULL.
+ __ Ldr(temp, ContextMemOperand(next, Context::EXTENSION_INDEX));
+ __ Cbnz(temp, slow);
+ // Load next context in chain.
+ __ Ldr(next, ContextMemOperand(next, Context::PREVIOUS_INDEX));
+ __ B(&loop);
+ __ Bind(&fast);
+ }
+
+ __ Ldr(x0, GlobalObjectMemOperand());
+ __ Mov(x2, Operand(var->name()));
+ ContextualMode mode = (typeof_state == INSIDE_TYPEOF) ? NOT_CONTEXTUAL
+ : CONTEXTUAL;
+ CallLoadIC(mode);
+}
+
+
+MemOperand FullCodeGenerator::ContextSlotOperandCheckExtensions(Variable* var,
+ Label* slow) {
+ ASSERT(var->IsContextSlot());
+ Register context = cp;
+ Register next = x10;
+ Register temp = x11;
+
+ for (Scope* s = scope(); s != var->scope(); s = s->outer_scope()) {
+ if (s->num_heap_slots() > 0) {
+ if (s->calls_sloppy_eval()) {
+ // Check that extension is NULL.
+ __ Ldr(temp, ContextMemOperand(context, Context::EXTENSION_INDEX));
+ __ Cbnz(temp, slow);
+ }
+ __ Ldr(next, ContextMemOperand(context, Context::PREVIOUS_INDEX));
+ // Walk the rest of the chain without clobbering cp.
+ context = next;
+ }
+ }
+ // Check that last extension is NULL.
+ __ Ldr(temp, ContextMemOperand(context, Context::EXTENSION_INDEX));
+ __ Cbnz(temp, slow);
+
+ // This function is used only for loads, not stores, so it's safe to
+ // return an cp-based operand (the write barrier cannot be allowed to
+ // destroy the cp register).
+ return ContextMemOperand(context, var->index());
+}
+
+
+void FullCodeGenerator::EmitDynamicLookupFastCase(Variable* var,
+ TypeofState typeof_state,
+ Label* slow,
+ Label* done) {
+ // Generate fast-case code for variables that might be shadowed by
+ // eval-introduced variables. Eval is used a lot without
+ // introducing variables. In those cases, we do not want to
+ // perform a runtime call for all variables in the scope
+ // containing the eval.
+ if (var->mode() == DYNAMIC_GLOBAL) {
+ EmitLoadGlobalCheckExtensions(var, typeof_state, slow);
+ __ B(done);
+ } else if (var->mode() == DYNAMIC_LOCAL) {
+ Variable* local = var->local_if_not_shadowed();
+ __ Ldr(x0, ContextSlotOperandCheckExtensions(local, slow));
+ if (local->mode() == LET || local->mode() == CONST ||
+ local->mode() == CONST_LEGACY) {
+ __ JumpIfNotRoot(x0, Heap::kTheHoleValueRootIndex, done);
+ if (local->mode() == CONST_LEGACY) {
+ __ LoadRoot(x0, Heap::kUndefinedValueRootIndex);
+ } else { // LET || CONST
+ __ Mov(x0, Operand(var->name()));
+ __ Push(x0);
+ __ CallRuntime(Runtime::kHiddenThrowReferenceError, 1);
+ }
+ }
+ __ B(done);
+ }
+}
+
+
+void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy) {
+ // Record position before possible IC call.
+ SetSourcePosition(proxy->position());
+ Variable* var = proxy->var();
+
+ // Three cases: global variables, lookup variables, and all other types of
+ // variables.
+ switch (var->location()) {
+ case Variable::UNALLOCATED: {
+ Comment cmnt(masm_, "Global variable");
+ // Use inline caching. Variable name is passed in x2 and the global
+ // object (receiver) in x0.
+ __ Ldr(x0, GlobalObjectMemOperand());
+ __ Mov(x2, Operand(var->name()));
+ CallLoadIC(CONTEXTUAL);
+ context()->Plug(x0);
+ break;
+ }
+
+ case Variable::PARAMETER:
+ case Variable::LOCAL:
+ case Variable::CONTEXT: {
+ Comment cmnt(masm_, var->IsContextSlot()
+ ? "Context variable"
+ : "Stack variable");
+ if (var->binding_needs_init()) {
+ // var->scope() may be NULL when the proxy is located in eval code and
+ // refers to a potential outside binding. Currently those bindings are
+ // always looked up dynamically, i.e. in that case
+ // var->location() == LOOKUP.
+ // always holds.
+ ASSERT(var->scope() != NULL);
+
+ // Check if the binding really needs an initialization check. The check
+ // can be skipped in the following situation: we have a LET or CONST
+ // binding in harmony mode, both the Variable and the VariableProxy have
+ // the same declaration scope (i.e. they are both in global code, in the
+ // same function or in the same eval code) and the VariableProxy is in
+ // the source physically located after the initializer of the variable.
+ //
+ // We cannot skip any initialization checks for CONST in non-harmony
+ // mode because const variables may be declared but never initialized:
+ // if (false) { const x; }; var y = x;
+ //
+ // The condition on the declaration scopes is a conservative check for
+ // nested functions that access a binding and are called before the
+ // binding is initialized:
+ // function() { f(); let x = 1; function f() { x = 2; } }
+ //
+ bool skip_init_check;
+ if (var->scope()->DeclarationScope() != scope()->DeclarationScope()) {
+ skip_init_check = false;
+ } else {
+ // Check that we always have valid source position.
+ ASSERT(var->initializer_position() != RelocInfo::kNoPosition);
+ ASSERT(proxy->position() != RelocInfo::kNoPosition);
+ skip_init_check = var->mode() != CONST_LEGACY &&
+ var->initializer_position() < proxy->position();
+ }
+
+ if (!skip_init_check) {
+ // Let and const need a read barrier.
+ GetVar(x0, var);
+ Label done;
+ __ JumpIfNotRoot(x0, Heap::kTheHoleValueRootIndex, &done);
+ if (var->mode() == LET || var->mode() == CONST) {
+ // Throw a reference error when using an uninitialized let/const
+ // binding in harmony mode.
+ __ Mov(x0, Operand(var->name()));
+ __ Push(x0);
+ __ CallRuntime(Runtime::kHiddenThrowReferenceError, 1);
+ __ Bind(&done);
+ } else {
+ // Uninitalized const bindings outside of harmony mode are unholed.
+ ASSERT(var->mode() == CONST_LEGACY);
+ __ LoadRoot(x0, Heap::kUndefinedValueRootIndex);
+ __ Bind(&done);
+ }
+ context()->Plug(x0);
+ break;
+ }
+ }
+ context()->Plug(var);
+ break;
+ }
+
+ case Variable::LOOKUP: {
+ Label done, slow;
+ // Generate code for loading from variables potentially shadowed by
+ // eval-introduced variables.
+ EmitDynamicLookupFastCase(var, NOT_INSIDE_TYPEOF, &slow, &done);
+ __ Bind(&slow);
+ Comment cmnt(masm_, "Lookup variable");
+ __ Mov(x1, Operand(var->name()));
+ __ Push(cp, x1); // Context and name.
+ __ CallRuntime(Runtime::kHiddenLoadContextSlot, 2);
+ __ Bind(&done);
+ context()->Plug(x0);
+ break;
+ }
+ }
+}
+
+
+void FullCodeGenerator::VisitRegExpLiteral(RegExpLiteral* expr) {
+ Comment cmnt(masm_, "[ RegExpLiteral");
+ Label materialized;
+ // Registers will be used as follows:
+ // x5 = materialized value (RegExp literal)
+ // x4 = JS function, literals array
+ // x3 = literal index
+ // x2 = RegExp pattern
+ // x1 = RegExp flags
+ // x0 = RegExp literal clone
+ __ Ldr(x10, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+ __ Ldr(x4, FieldMemOperand(x10, JSFunction::kLiteralsOffset));
+ int literal_offset =
+ FixedArray::kHeaderSize + expr->literal_index() * kPointerSize;
+ __ Ldr(x5, FieldMemOperand(x4, literal_offset));
+ __ JumpIfNotRoot(x5, Heap::kUndefinedValueRootIndex, &materialized);
+
+ // Create regexp literal using runtime function.
+ // Result will be in x0.
+ __ Mov(x3, Smi::FromInt(expr->literal_index()));
+ __ Mov(x2, Operand(expr->pattern()));
+ __ Mov(x1, Operand(expr->flags()));
+ __ Push(x4, x3, x2, x1);
+ __ CallRuntime(Runtime::kHiddenMaterializeRegExpLiteral, 4);
+ __ Mov(x5, x0);
+
+ __ Bind(&materialized);
+ int size = JSRegExp::kSize + JSRegExp::kInObjectFieldCount * kPointerSize;
+ Label allocated, runtime_allocate;
+ __ Allocate(size, x0, x2, x3, &runtime_allocate, TAG_OBJECT);
+ __ B(&allocated);
+
+ __ Bind(&runtime_allocate);
+ __ Mov(x10, Smi::FromInt(size));
+ __ Push(x5, x10);
+ __ CallRuntime(Runtime::kHiddenAllocateInNewSpace, 1);
+ __ Pop(x5);
+
+ __ Bind(&allocated);
+ // After this, registers are used as follows:
+ // x0: Newly allocated regexp.
+ // x5: Materialized regexp.
+ // x10, x11, x12: temps.
+ __ CopyFields(x0, x5, CPURegList(x10, x11, x12), size / kPointerSize);
+ context()->Plug(x0);
+}
+
+
+void FullCodeGenerator::EmitAccessor(Expression* expression) {
+ if (expression == NULL) {
+ __ LoadRoot(x10, Heap::kNullValueRootIndex);
+ __ Push(x10);
+ } else {
+ VisitForStackValue(expression);
+ }
+}
+
+
+void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
+ Comment cmnt(masm_, "[ ObjectLiteral");
+
+ expr->BuildConstantProperties(isolate());
+ Handle<FixedArray> constant_properties = expr->constant_properties();
+ __ Ldr(x3, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+ __ Ldr(x3, FieldMemOperand(x3, JSFunction::kLiteralsOffset));
+ __ Mov(x2, Smi::FromInt(expr->literal_index()));
+ __ Mov(x1, Operand(constant_properties));
+ int flags = expr->fast_elements()
+ ? ObjectLiteral::kFastElements
+ : ObjectLiteral::kNoFlags;
+ flags |= expr->has_function()
+ ? ObjectLiteral::kHasFunction
+ : ObjectLiteral::kNoFlags;
+ __ Mov(x0, Smi::FromInt(flags));
+ int properties_count = constant_properties->length() / 2;
+ const int max_cloned_properties =
+ FastCloneShallowObjectStub::kMaximumClonedProperties;
+ if (expr->may_store_doubles() || expr->depth() > 1 ||
+ masm()->serializer_enabled() || flags != ObjectLiteral::kFastElements ||
+ properties_count > max_cloned_properties) {
+ __ Push(x3, x2, x1, x0);
+ __ CallRuntime(Runtime::kHiddenCreateObjectLiteral, 4);
+ } else {
+ FastCloneShallowObjectStub stub(isolate(), properties_count);
+ __ CallStub(&stub);
+ }
+
+ // If result_saved is true the result is on top of the stack. If
+ // result_saved is false the result is in x0.
+ bool result_saved = false;
+
+ // Mark all computed expressions that are bound to a key that
+ // is shadowed by a later occurrence of the same key. For the
+ // marked expressions, no store code is emitted.
+ expr->CalculateEmitStore(zone());
+
+ AccessorTable accessor_table(zone());
+ for (int i = 0; i < expr->properties()->length(); i++) {
+ ObjectLiteral::Property* property = expr->properties()->at(i);
+ if (property->IsCompileTimeValue()) continue;
+
+ Literal* key = property->key();
+ Expression* value = property->value();
+ if (!result_saved) {
+ __ Push(x0); // Save result on stack
+ result_saved = true;
+ }
+ switch (property->kind()) {
+ case ObjectLiteral::Property::CONSTANT:
+ UNREACHABLE();
+ case ObjectLiteral::Property::MATERIALIZED_LITERAL:
+ ASSERT(!CompileTimeValue::IsCompileTimeValue(property->value()));
+ // Fall through.
+ case ObjectLiteral::Property::COMPUTED:
+ if (key->value()->IsInternalizedString()) {
+ if (property->emit_store()) {
+ VisitForAccumulatorValue(value);
+ __ Mov(x2, Operand(key->value()));
+ __ Peek(x1, 0);
+ CallStoreIC(key->LiteralFeedbackId());
+ PrepareForBailoutForId(key->id(), NO_REGISTERS);
+ } else {
+ VisitForEffect(value);
+ }
+ break;
+ }
+ if (property->emit_store()) {
+ // Duplicate receiver on stack.
+ __ Peek(x0, 0);
+ __ Push(x0);
+ VisitForStackValue(key);
+ VisitForStackValue(value);
+ __ Mov(x0, Smi::FromInt(NONE)); // PropertyAttributes
+ __ Push(x0);
+ __ CallRuntime(Runtime::kSetProperty, 4);
+ } else {
+ VisitForEffect(key);
+ VisitForEffect(value);
+ }
+ break;
+ case ObjectLiteral::Property::PROTOTYPE:
+ if (property->emit_store()) {
+ // Duplicate receiver on stack.
+ __ Peek(x0, 0);
+ __ Push(x0);
+ VisitForStackValue(value);
+ __ CallRuntime(Runtime::kSetPrototype, 2);
+ } else {
+ VisitForEffect(value);
+ }
+ break;
+ case ObjectLiteral::Property::GETTER:
+ accessor_table.lookup(key)->second->getter = value;
+ break;
+ case ObjectLiteral::Property::SETTER:
+ accessor_table.lookup(key)->second->setter = value;
+ break;
+ }
+ }
+
+ // Emit code to define accessors, using only a single call to the runtime for
+ // each pair of corresponding getters and setters.
+ for (AccessorTable::Iterator it = accessor_table.begin();
+ it != accessor_table.end();
+ ++it) {
+ __ Peek(x10, 0); // Duplicate receiver.
+ __ Push(x10);
+ VisitForStackValue(it->first);
+ EmitAccessor(it->second->getter);
+ EmitAccessor(it->second->setter);
+ __ Mov(x10, Smi::FromInt(NONE));
+ __ Push(x10);
+ __ CallRuntime(Runtime::kDefineOrRedefineAccessorProperty, 5);
+ }
+
+ if (expr->has_function()) {
+ ASSERT(result_saved);
+ __ Peek(x0, 0);
+ __ Push(x0);
+ __ CallRuntime(Runtime::kToFastProperties, 1);
+ }
+
+ if (result_saved) {
+ context()->PlugTOS();
+ } else {
+ context()->Plug(x0);
+ }
+}
+
+
+void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
+ Comment cmnt(masm_, "[ ArrayLiteral");
+
+ expr->BuildConstantElements(isolate());
+ int flags = (expr->depth() == 1) ? ArrayLiteral::kShallowElements
+ : ArrayLiteral::kNoFlags;
+
+ ZoneList<Expression*>* subexprs = expr->values();
+ int length = subexprs->length();
+ Handle<FixedArray> constant_elements = expr->constant_elements();
+ ASSERT_EQ(2, constant_elements->length());
+ ElementsKind constant_elements_kind =
+ static_cast<ElementsKind>(Smi::cast(constant_elements->get(0))->value());
+ bool has_fast_elements = IsFastObjectElementsKind(constant_elements_kind);
+ Handle<FixedArrayBase> constant_elements_values(
+ FixedArrayBase::cast(constant_elements->get(1)));
+
+ AllocationSiteMode allocation_site_mode = TRACK_ALLOCATION_SITE;
+ if (has_fast_elements && !FLAG_allocation_site_pretenuring) {
+ // If the only customer of allocation sites is transitioning, then
+ // we can turn it off if we don't have anywhere else to transition to.
+ allocation_site_mode = DONT_TRACK_ALLOCATION_SITE;
+ }
+
+ __ Ldr(x3, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+ __ Ldr(x3, FieldMemOperand(x3, JSFunction::kLiteralsOffset));
+ __ Mov(x2, Smi::FromInt(expr->literal_index()));
+ __ Mov(x1, Operand(constant_elements));
+ if (expr->depth() > 1 || length > JSObject::kInitialMaxFastElementArray) {
+ __ Mov(x0, Smi::FromInt(flags));
+ __ Push(x3, x2, x1, x0);
+ __ CallRuntime(Runtime::kHiddenCreateArrayLiteral, 4);
+ } else {
+ FastCloneShallowArrayStub stub(isolate(), allocation_site_mode);
+ __ CallStub(&stub);
+ }
+
+ bool result_saved = false; // Is the result saved to the stack?
+
+ // Emit code to evaluate all the non-constant subexpressions and to store
+ // them into the newly cloned array.
+ for (int i = 0; i < length; i++) {
+ Expression* subexpr = subexprs->at(i);
+ // If the subexpression is a literal or a simple materialized literal it
+ // is already set in the cloned array.
+ if (CompileTimeValue::IsCompileTimeValue(subexpr)) continue;
+
+ if (!result_saved) {
+ __ Push(x0);
+ __ Push(Smi::FromInt(expr->literal_index()));
+ result_saved = true;
+ }
+ VisitForAccumulatorValue(subexpr);
+
+ if (IsFastObjectElementsKind(constant_elements_kind)) {
+ int offset = FixedArray::kHeaderSize + (i * kPointerSize);
+ __ Peek(x6, kPointerSize); // Copy of array literal.
+ __ Ldr(x1, FieldMemOperand(x6, JSObject::kElementsOffset));
+ __ Str(result_register(), FieldMemOperand(x1, offset));
+ // Update the write barrier for the array store.
+ __ RecordWriteField(x1, offset, result_register(), x10,
+ kLRHasBeenSaved, kDontSaveFPRegs,
+ EMIT_REMEMBERED_SET, INLINE_SMI_CHECK);
+ } else {
+ __ Mov(x3, Smi::FromInt(i));
+ StoreArrayLiteralElementStub stub(isolate());
+ __ CallStub(&stub);
+ }
+
+ PrepareForBailoutForId(expr->GetIdForElement(i), NO_REGISTERS);
+ }
+
+ if (result_saved) {
+ __ Drop(1); // literal index
+ context()->PlugTOS();
+ } else {
+ context()->Plug(x0);
+ }
+}
+
+
+void FullCodeGenerator::VisitAssignment(Assignment* expr) {
+ ASSERT(expr->target()->IsValidReferenceExpression());
+
+ Comment cmnt(masm_, "[ Assignment");
+
+ // Left-hand side can only be a property, a global or a (parameter or local)
+ // slot.
+ enum LhsKind { VARIABLE, NAMED_PROPERTY, KEYED_PROPERTY };
+ LhsKind assign_type = VARIABLE;
+ Property* property = expr->target()->AsProperty();
+ if (property != NULL) {
+ assign_type = (property->key()->IsPropertyName())
+ ? NAMED_PROPERTY
+ : KEYED_PROPERTY;
+ }
+
+ // Evaluate LHS expression.
+ switch (assign_type) {
+ case VARIABLE:
+ // Nothing to do here.
+ break;
+ case NAMED_PROPERTY:
+ if (expr->is_compound()) {
+ // We need the receiver both on the stack and in the accumulator.
+ VisitForAccumulatorValue(property->obj());
+ __ Push(result_register());
+ } else {
+ VisitForStackValue(property->obj());
+ }
+ break;
+ case KEYED_PROPERTY:
+ if (expr->is_compound()) {
+ VisitForStackValue(property->obj());
+ VisitForAccumulatorValue(property->key());
+ __ Peek(x1, 0);
+ __ Push(x0);
+ } else {
+ VisitForStackValue(property->obj());
+ VisitForStackValue(property->key());
+ }
+ break;
+ }
+
+ // For compound assignments we need another deoptimization point after the
+ // variable/property load.
+ if (expr->is_compound()) {
+ { AccumulatorValueContext context(this);
+ switch (assign_type) {
+ case VARIABLE:
+ EmitVariableLoad(expr->target()->AsVariableProxy());
+ PrepareForBailout(expr->target(), TOS_REG);
+ break;
+ case NAMED_PROPERTY:
+ EmitNamedPropertyLoad(property);
+ PrepareForBailoutForId(property->LoadId(), TOS_REG);
+ break;
+ case KEYED_PROPERTY:
+ EmitKeyedPropertyLoad(property);
+ PrepareForBailoutForId(property->LoadId(), TOS_REG);
+ break;
+ }
+ }
+
+ Token::Value op = expr->binary_op();
+ __ Push(x0); // Left operand goes on the stack.
+ VisitForAccumulatorValue(expr->value());
+
+ OverwriteMode mode = expr->value()->ResultOverwriteAllowed()
+ ? OVERWRITE_RIGHT
+ : NO_OVERWRITE;
+ SetSourcePosition(expr->position() + 1);
+ AccumulatorValueContext context(this);
+ if (ShouldInlineSmiCase(op)) {
+ EmitInlineSmiBinaryOp(expr->binary_operation(),
+ op,
+ mode,
+ expr->target(),
+ expr->value());
+ } else {
+ EmitBinaryOp(expr->binary_operation(), op, mode);
+ }
+
+ // Deoptimization point in case the binary operation may have side effects.
+ PrepareForBailout(expr->binary_operation(), TOS_REG);
+ } else {
+ VisitForAccumulatorValue(expr->value());
+ }
+
+ // Record source position before possible IC call.
+ SetSourcePosition(expr->position());
+
+ // Store the value.
+ switch (assign_type) {
+ case VARIABLE:
+ EmitVariableAssignment(expr->target()->AsVariableProxy()->var(),
+ expr->op());
+ PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
+ context()->Plug(x0);
+ break;
+ case NAMED_PROPERTY:
+ EmitNamedPropertyAssignment(expr);
+ break;
+ case KEYED_PROPERTY:
+ EmitKeyedPropertyAssignment(expr);
+ break;
+ }
+}
+
+
+void FullCodeGenerator::EmitNamedPropertyLoad(Property* prop) {
+ SetSourcePosition(prop->position());
+ Literal* key = prop->key()->AsLiteral();
+ __ Mov(x2, Operand(key->value()));
+ // Call load IC. It has arguments receiver and property name x0 and x2.
+ CallLoadIC(NOT_CONTEXTUAL, prop->PropertyFeedbackId());
+}
+
+
+void FullCodeGenerator::EmitKeyedPropertyLoad(Property* prop) {
+ SetSourcePosition(prop->position());
+ // Call keyed load IC. It has arguments key and receiver in r0 and r1.
+ Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize();
+ CallIC(ic, prop->PropertyFeedbackId());
+}
+
+
+void FullCodeGenerator::EmitInlineSmiBinaryOp(BinaryOperation* expr,
+ Token::Value op,
+ OverwriteMode mode,
+ Expression* left_expr,
+ Expression* right_expr) {
+ Label done, both_smis, stub_call;
+
+ // Get the arguments.
+ Register left = x1;
+ Register right = x0;
+ Register result = x0;
+ __ Pop(left);
+
+ // Perform combined smi check on both operands.
+ __ Orr(x10, left, right);
+ JumpPatchSite patch_site(masm_);
+ patch_site.EmitJumpIfSmi(x10, &both_smis);
+
+ __ Bind(&stub_call);
+ BinaryOpICStub stub(isolate(), op, mode);
+ {
+ Assembler::BlockPoolsScope scope(masm_);
+ CallIC(stub.GetCode(), expr->BinaryOperationFeedbackId());
+ patch_site.EmitPatchInfo();
+ }
+ __ B(&done);
+
+ __ Bind(&both_smis);
+ // Smi case. This code works in the same way as the smi-smi case in the type
+ // recording binary operation stub, see
+ // BinaryOpStub::GenerateSmiSmiOperation for comments.
+ // TODO(all): That doesn't exist any more. Where are the comments?
+ //
+ // The set of operations that needs to be supported here is controlled by
+ // FullCodeGenerator::ShouldInlineSmiCase().
+ switch (op) {
+ case Token::SAR:
+ __ Ubfx(right, right, kSmiShift, 5);
+ __ Asr(result, left, right);
+ __ Bic(result, result, kSmiShiftMask);
+ break;
+ case Token::SHL:
+ __ Ubfx(right, right, kSmiShift, 5);
+ __ Lsl(result, left, right);
+ break;
+ case Token::SHR: {
+ Label right_not_zero;
+ __ Cbnz(right, &right_not_zero);
+ __ Tbnz(left, kXSignBit, &stub_call);
+ __ Bind(&right_not_zero);
+ __ Ubfx(right, right, kSmiShift, 5);
+ __ Lsr(result, left, right);
+ __ Bic(result, result, kSmiShiftMask);
+ break;
+ }
+ case Token::ADD:
+ __ Adds(x10, left, right);
+ __ B(vs, &stub_call);
+ __ Mov(result, x10);
+ break;
+ case Token::SUB:
+ __ Subs(x10, left, right);
+ __ B(vs, &stub_call);
+ __ Mov(result, x10);
+ break;
+ case Token::MUL: {
+ Label not_minus_zero, done;
+ __ Smulh(x10, left, right);
+ __ Cbnz(x10, &not_minus_zero);
+ __ Eor(x11, left, right);
+ __ Tbnz(x11, kXSignBit, &stub_call);
+ STATIC_ASSERT(kSmiTag == 0);
+ __ Mov(result, x10);
+ __ B(&done);
+ __ Bind(&not_minus_zero);
+ __ Cls(x11, x10);
+ __ Cmp(x11, kXRegSizeInBits - kSmiShift);
+ __ B(lt, &stub_call);
+ __ SmiTag(result, x10);
+ __ Bind(&done);
+ break;
+ }
+ case Token::BIT_OR:
+ __ Orr(result, left, right);
+ break;
+ case Token::BIT_AND:
+ __ And(result, left, right);
+ break;
+ case Token::BIT_XOR:
+ __ Eor(result, left, right);
+ break;
+ default:
+ UNREACHABLE();
+ }
+
+ __ Bind(&done);
+ context()->Plug(x0);
+}
+
+
+void FullCodeGenerator::EmitBinaryOp(BinaryOperation* expr,
+ Token::Value op,
+ OverwriteMode mode) {
+ __ Pop(x1);
+ BinaryOpICStub stub(isolate(), op, mode);
+ JumpPatchSite patch_site(masm_); // Unbound, signals no inlined smi code.
+ {
+ Assembler::BlockPoolsScope scope(masm_);
+ CallIC(stub.GetCode(), expr->BinaryOperationFeedbackId());
+ patch_site.EmitPatchInfo();
+ }
+ context()->Plug(x0);
+}
+
+
+void FullCodeGenerator::EmitAssignment(Expression* expr) {
+ ASSERT(expr->IsValidReferenceExpression());
+
+ // Left-hand side can only be a property, a global or a (parameter or local)
+ // slot.
+ enum LhsKind { VARIABLE, NAMED_PROPERTY, KEYED_PROPERTY };
+ LhsKind assign_type = VARIABLE;
+ Property* prop = expr->AsProperty();
+ if (prop != NULL) {
+ assign_type = (prop->key()->IsPropertyName())
+ ? NAMED_PROPERTY
+ : KEYED_PROPERTY;
+ }
+
+ switch (assign_type) {
+ case VARIABLE: {
+ Variable* var = expr->AsVariableProxy()->var();
+ EffectContext context(this);
+ EmitVariableAssignment(var, Token::ASSIGN);
+ break;
+ }
+ case NAMED_PROPERTY: {
+ __ Push(x0); // Preserve value.
+ VisitForAccumulatorValue(prop->obj());
+ // TODO(all): We could introduce a VisitForRegValue(reg, expr) to avoid
+ // this copy.
+ __ Mov(x1, x0);
+ __ Pop(x0); // Restore value.
+ __ Mov(x2, Operand(prop->key()->AsLiteral()->value()));
+ CallStoreIC();
+ break;
+ }
+ case KEYED_PROPERTY: {
+ __ Push(x0); // Preserve value.
+ VisitForStackValue(prop->obj());
+ VisitForAccumulatorValue(prop->key());
+ __ Mov(x1, x0);
+ __ Pop(x2, x0);
+ Handle<Code> ic = strict_mode() == SLOPPY
+ ? isolate()->builtins()->KeyedStoreIC_Initialize()
+ : isolate()->builtins()->KeyedStoreIC_Initialize_Strict();
+ CallIC(ic);
+ break;
+ }
+ }
+ context()->Plug(x0);
+}
+
+
+void FullCodeGenerator::EmitStoreToStackLocalOrContextSlot(
+ Variable* var, MemOperand location) {
+ __ Str(result_register(), location);
+ if (var->IsContextSlot()) {
+ // RecordWrite may destroy all its register arguments.
+ __ Mov(x10, result_register());
+ int offset = Context::SlotOffset(var->index());
+ __ RecordWriteContextSlot(
+ x1, offset, x10, x11, kLRHasBeenSaved, kDontSaveFPRegs);
+ }
+}
+
+
+void FullCodeGenerator::EmitCallStoreContextSlot(
+ Handle<String> name, StrictMode strict_mode) {
+ __ Mov(x11, Operand(name));
+ __ Mov(x10, Smi::FromInt(strict_mode));
+ // jssp[0] : mode.
+ // jssp[8] : name.
+ // jssp[16] : context.
+ // jssp[24] : value.
+ __ Push(x0, cp, x11, x10);
+ __ CallRuntime(Runtime::kHiddenStoreContextSlot, 4);
+}
+
+
+void FullCodeGenerator::EmitVariableAssignment(Variable* var,
+ Token::Value op) {
+ ASM_LOCATION("FullCodeGenerator::EmitVariableAssignment");
+ if (var->IsUnallocated()) {
+ // Global var, const, or let.
+ __ Mov(x2, Operand(var->name()));
+ __ Ldr(x1, GlobalObjectMemOperand());
+ CallStoreIC();
+
+ } else if (op == Token::INIT_CONST_LEGACY) {
+ // Const initializers need a write barrier.
+ ASSERT(!var->IsParameter()); // No const parameters.
+ if (var->IsLookupSlot()) {
+ __ Push(x0);
+ __ Mov(x0, Operand(var->name()));
+ __ Push(cp, x0); // Context and name.
+ __ CallRuntime(Runtime::kHiddenInitializeConstContextSlot, 3);
+ } else {
+ ASSERT(var->IsStackLocal() || var->IsContextSlot());
+ Label skip;
+ MemOperand location = VarOperand(var, x1);
+ __ Ldr(x10, location);
+ __ JumpIfNotRoot(x10, Heap::kTheHoleValueRootIndex, &skip);
+ EmitStoreToStackLocalOrContextSlot(var, location);
+ __ Bind(&skip);
+ }
+
+ } else if (var->mode() == LET && op != Token::INIT_LET) {
+ // Non-initializing assignment to let variable needs a write barrier.
+ if (var->IsLookupSlot()) {
+ EmitCallStoreContextSlot(var->name(), strict_mode());
+ } else {
+ ASSERT(var->IsStackAllocated() || var->IsContextSlot());
+ Label assign;
+ MemOperand location = VarOperand(var, x1);
+ __ Ldr(x10, location);
+ __ JumpIfNotRoot(x10, Heap::kTheHoleValueRootIndex, &assign);
+ __ Mov(x10, Operand(var->name()));
+ __ Push(x10);
+ __ CallRuntime(Runtime::kHiddenThrowReferenceError, 1);
+ // Perform the assignment.
+ __ Bind(&assign);
+ EmitStoreToStackLocalOrContextSlot(var, location);
+ }
+
+ } else if (!var->is_const_mode() || op == Token::INIT_CONST) {
+ // Assignment to var or initializing assignment to let/const
+ // in harmony mode.
+ if (var->IsLookupSlot()) {
+ EmitCallStoreContextSlot(var->name(), strict_mode());
+ } else {
+ ASSERT(var->IsStackAllocated() || var->IsContextSlot());
+ MemOperand location = VarOperand(var, x1);
+ if (FLAG_debug_code && op == Token::INIT_LET) {
+ __ Ldr(x10, location);
+ __ CompareRoot(x10, Heap::kTheHoleValueRootIndex);
+ __ Check(eq, kLetBindingReInitialization);
+ }
+ EmitStoreToStackLocalOrContextSlot(var, location);
+ }
+ }
+ // Non-initializing assignments to consts are ignored.
+}
+
+
+void FullCodeGenerator::EmitNamedPropertyAssignment(Assignment* expr) {
+ ASM_LOCATION("FullCodeGenerator::EmitNamedPropertyAssignment");
+ // Assignment to a property, using a named store IC.
+ Property* prop = expr->target()->AsProperty();
+ ASSERT(prop != NULL);
+ ASSERT(prop->key()->IsLiteral());
+
+ // Record source code position before IC call.
+ SetSourcePosition(expr->position());
+ __ Mov(x2, Operand(prop->key()->AsLiteral()->value()));
+ __ Pop(x1);
+
+ CallStoreIC(expr->AssignmentFeedbackId());
+
+ PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
+ context()->Plug(x0);
+}
+
+
+void FullCodeGenerator::EmitKeyedPropertyAssignment(Assignment* expr) {
+ ASM_LOCATION("FullCodeGenerator::EmitKeyedPropertyAssignment");
+ // Assignment to a property, using a keyed store IC.
+
+ // Record source code position before IC call.
+ SetSourcePosition(expr->position());
+ // TODO(all): Could we pass this in registers rather than on the stack?
+ __ Pop(x1, x2); // Key and object holding the property.
+
+ Handle<Code> ic = strict_mode() == SLOPPY
+ ? isolate()->builtins()->KeyedStoreIC_Initialize()
+ : isolate()->builtins()->KeyedStoreIC_Initialize_Strict();
+ CallIC(ic, expr->AssignmentFeedbackId());
+
+ PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
+ context()->Plug(x0);
+}
+
+
+void FullCodeGenerator::VisitProperty(Property* expr) {
+ Comment cmnt(masm_, "[ Property");
+ Expression* key = expr->key();
+
+ if (key->IsPropertyName()) {
+ VisitForAccumulatorValue(expr->obj());
+ EmitNamedPropertyLoad(expr);
+ PrepareForBailoutForId(expr->LoadId(), TOS_REG);
+ context()->Plug(x0);
+ } else {
+ VisitForStackValue(expr->obj());
+ VisitForAccumulatorValue(expr->key());
+ __ Pop(x1);
+ EmitKeyedPropertyLoad(expr);
+ context()->Plug(x0);
+ }
+}
+
+
+void FullCodeGenerator::CallIC(Handle<Code> code,
+ TypeFeedbackId ast_id) {
+ ic_total_count_++;
+ // All calls must have a predictable size in full-codegen code to ensure that
+ // the debugger can patch them correctly.
+ __ Call(code, RelocInfo::CODE_TARGET, ast_id);
+}
+
+
+// Code common for calls using the IC.
+void FullCodeGenerator::EmitCallWithLoadIC(Call* expr) {
+ Expression* callee = expr->expression();
+
+ CallIC::CallType call_type = callee->IsVariableProxy()
+ ? CallIC::FUNCTION
+ : CallIC::METHOD;
+
+ // Get the target function.
+ if (call_type == CallIC::FUNCTION) {
+ { StackValueContext context(this);
+ EmitVariableLoad(callee->AsVariableProxy());
+ PrepareForBailout(callee, NO_REGISTERS);
+ }
+ // Push undefined as receiver. This is patched in the method prologue if it
+ // is a sloppy mode method.
+ __ Push(isolate()->factory()->undefined_value());
+ } else {
+ // Load the function from the receiver.
+ ASSERT(callee->IsProperty());
+ __ Peek(x0, 0);
+ EmitNamedPropertyLoad(callee->AsProperty());
+ PrepareForBailoutForId(callee->AsProperty()->LoadId(), TOS_REG);
+ // Push the target function under the receiver.
+ __ Pop(x10);
+ __ Push(x0, x10);
+ }
+
+ EmitCall(expr, call_type);
+}
+
+
+// Code common for calls using the IC.
+void FullCodeGenerator::EmitKeyedCallWithLoadIC(Call* expr,
+ Expression* key) {
+ // Load the key.
+ VisitForAccumulatorValue(key);
+
+ Expression* callee = expr->expression();
+
+ // Load the function from the receiver.
+ ASSERT(callee->IsProperty());
+ __ Peek(x1, 0);
+ EmitKeyedPropertyLoad(callee->AsProperty());
+ PrepareForBailoutForId(callee->AsProperty()->LoadId(), TOS_REG);
+
+ // Push the target function under the receiver.
+ __ Pop(x10);
+ __ Push(x0, x10);
+
+ EmitCall(expr, CallIC::METHOD);
+}
+
+
+void FullCodeGenerator::EmitCall(Call* expr, CallIC::CallType call_type) {
+ // Load the arguments.
+ ZoneList<Expression*>* args = expr->arguments();
+ int arg_count = args->length();
+ { PreservePositionScope scope(masm()->positions_recorder());
+ for (int i = 0; i < arg_count; i++) {
+ VisitForStackValue(args->at(i));
+ }
+ }
+ // Record source position of the IC call.
+ SetSourcePosition(expr->position());
+
+ Handle<Code> ic = CallIC::initialize_stub(
+ isolate(), arg_count, call_type);
+ __ Mov(x3, Smi::FromInt(expr->CallFeedbackSlot()));
+ __ Peek(x1, (arg_count + 1) * kXRegSize);
+ // Don't assign a type feedback id to the IC, since type feedback is provided
+ // by the vector above.
+ CallIC(ic);
+
+ RecordJSReturnSite(expr);
+ // Restore context register.
+ __ Ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ context()->DropAndPlug(1, x0);
+}
+
+
+void FullCodeGenerator::EmitResolvePossiblyDirectEval(int arg_count) {
+ ASM_LOCATION("FullCodeGenerator::EmitResolvePossiblyDirectEval");
+ // Prepare to push a copy of the first argument or undefined if it doesn't
+ // exist.
+ if (arg_count > 0) {
+ __ Peek(x10, arg_count * kXRegSize);
+ } else {
+ __ LoadRoot(x10, Heap::kUndefinedValueRootIndex);
+ }
+
+ // Prepare to push the receiver of the enclosing function.
+ int receiver_offset = 2 + info_->scope()->num_parameters();
+ __ Ldr(x11, MemOperand(fp, receiver_offset * kPointerSize));
+
+ // Push.
+ __ Push(x10, x11);
+
+ // Prepare to push the language mode.
+ __ Mov(x10, Smi::FromInt(strict_mode()));
+ // Prepare to push the start position of the scope the calls resides in.
+ __ Mov(x11, Smi::FromInt(scope()->start_position()));
+
+ // Push.
+ __ Push(x10, x11);
+
+ // Do the runtime call.
+ __ CallRuntime(Runtime::kHiddenResolvePossiblyDirectEval, 5);
+}
+
+
+void FullCodeGenerator::VisitCall(Call* expr) {
+#ifdef DEBUG
+ // We want to verify that RecordJSReturnSite gets called on all paths
+ // through this function. Avoid early returns.
+ expr->return_is_recorded_ = false;
+#endif
+
+ Comment cmnt(masm_, "[ Call");
+ Expression* callee = expr->expression();
+ Call::CallType call_type = expr->GetCallType(isolate());
+
+ if (call_type == Call::POSSIBLY_EVAL_CALL) {
+ // In a call to eval, we first call RuntimeHidden_ResolvePossiblyDirectEval
+ // to resolve the function we need to call and the receiver of the
+ // call. Then we call the resolved function using the given
+ // arguments.
+ ZoneList<Expression*>* args = expr->arguments();
+ int arg_count = args->length();
+
+ {
+ PreservePositionScope pos_scope(masm()->positions_recorder());
+ VisitForStackValue(callee);
+ __ LoadRoot(x10, Heap::kUndefinedValueRootIndex);
+ __ Push(x10); // Reserved receiver slot.
+
+ // Push the arguments.
+ for (int i = 0; i < arg_count; i++) {
+ VisitForStackValue(args->at(i));
+ }
+
+ // Push a copy of the function (found below the arguments) and
+ // resolve eval.
+ __ Peek(x10, (arg_count + 1) * kPointerSize);
+ __ Push(x10);
+ EmitResolvePossiblyDirectEval(arg_count);
+
+ // The runtime call returns a pair of values in x0 (function) and
+ // x1 (receiver). Touch up the stack with the right values.
+ __ PokePair(x1, x0, arg_count * kPointerSize);
+ }
+
+ // Record source position for debugger.
+ SetSourcePosition(expr->position());
+
+ // Call the evaluated function.
+ CallFunctionStub stub(isolate(), arg_count, NO_CALL_FUNCTION_FLAGS);
+ __ Peek(x1, (arg_count + 1) * kXRegSize);
+ __ CallStub(&stub);
+ RecordJSReturnSite(expr);
+ // Restore context register.
+ __ Ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ context()->DropAndPlug(1, x0);
+
+ } else if (call_type == Call::GLOBAL_CALL) {
+ EmitCallWithLoadIC(expr);
+
+ } else if (call_type == Call::LOOKUP_SLOT_CALL) {
+ // Call to a lookup slot (dynamically introduced variable).
+ VariableProxy* proxy = callee->AsVariableProxy();
+ Label slow, done;
+
+ { PreservePositionScope scope(masm()->positions_recorder());
+ // Generate code for loading from variables potentially shadowed
+ // by eval-introduced variables.
+ EmitDynamicLookupFastCase(proxy->var(), NOT_INSIDE_TYPEOF, &slow, &done);
+ }
+
+ __ Bind(&slow);
+ // Call the runtime to find the function to call (returned in x0)
+ // and the object holding it (returned in x1).
+ __ Push(context_register());
+ __ Mov(x10, Operand(proxy->name()));
+ __ Push(x10);
+ __ CallRuntime(Runtime::kHiddenLoadContextSlot, 2);
+ __ Push(x0, x1); // Receiver, function.
+
+ // If fast case code has been generated, emit code to push the
+ // function and receiver and have the slow path jump around this
+ // code.
+ if (done.is_linked()) {
+ Label call;
+ __ B(&call);
+ __ Bind(&done);
+ // Push function.
+ __ Push(x0);
+ // The receiver is implicitly the global receiver. Indicate this
+ // by passing the undefined to the call function stub.
+ __ LoadRoot(x1, Heap::kUndefinedValueRootIndex);
+ __ Push(x1);
+ __ Bind(&call);
+ }
+
+ // The receiver is either the global receiver or an object found
+ // by LoadContextSlot.
+ EmitCall(expr);
+ } else if (call_type == Call::PROPERTY_CALL) {
+ Property* property = callee->AsProperty();
+ { PreservePositionScope scope(masm()->positions_recorder());
+ VisitForStackValue(property->obj());
+ }
+ if (property->key()->IsPropertyName()) {
+ EmitCallWithLoadIC(expr);
+ } else {
+ EmitKeyedCallWithLoadIC(expr, property->key());
+ }
+
+ } else {
+ ASSERT(call_type == Call::OTHER_CALL);
+ // Call to an arbitrary expression not handled specially above.
+ { PreservePositionScope scope(masm()->positions_recorder());
+ VisitForStackValue(callee);
+ }
+ __ LoadRoot(x1, Heap::kUndefinedValueRootIndex);
+ __ Push(x1);
+ // Emit function call.
+ EmitCall(expr);
+ }
+
+#ifdef DEBUG
+ // RecordJSReturnSite should have been called.
+ ASSERT(expr->return_is_recorded_);
+#endif
+}
+
+
+void FullCodeGenerator::VisitCallNew(CallNew* expr) {
+ Comment cmnt(masm_, "[ CallNew");
+ // According to ECMA-262, section 11.2.2, page 44, the function
+ // expression in new calls must be evaluated before the
+ // arguments.
+
+ // Push constructor on the stack. If it's not a function it's used as
+ // receiver for CALL_NON_FUNCTION, otherwise the value on the stack is
+ // ignored.
+ VisitForStackValue(expr->expression());
+
+ // Push the arguments ("left-to-right") on the stack.
+ ZoneList<Expression*>* args = expr->arguments();
+ int arg_count = args->length();
+ for (int i = 0; i < arg_count; i++) {
+ VisitForStackValue(args->at(i));
+ }
+
+ // Call the construct call builtin that handles allocation and
+ // constructor invocation.
+ SetSourcePosition(expr->position());
+
+ // Load function and argument count into x1 and x0.
+ __ Mov(x0, arg_count);
+ __ Peek(x1, arg_count * kXRegSize);
+
+ // Record call targets in unoptimized code.
+ if (FLAG_pretenuring_call_new) {
+ EnsureSlotContainsAllocationSite(expr->AllocationSiteFeedbackSlot());
+ ASSERT(expr->AllocationSiteFeedbackSlot() ==
+ expr->CallNewFeedbackSlot() + 1);
+ }
+
+ __ LoadObject(x2, FeedbackVector());
+ __ Mov(x3, Smi::FromInt(expr->CallNewFeedbackSlot()));
+
+ CallConstructStub stub(isolate(), RECORD_CONSTRUCTOR_TARGET);
+ __ Call(stub.GetCode(), RelocInfo::CONSTRUCT_CALL);
+ PrepareForBailoutForId(expr->ReturnId(), TOS_REG);
+ context()->Plug(x0);
+}
+
+
+void FullCodeGenerator::EmitIsSmi(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
+ ASSERT(args->length() == 1);
+
+ VisitForAccumulatorValue(args->at(0));
+
+ Label materialize_true, materialize_false;
+ Label* if_true = NULL;
+ Label* if_false = NULL;
+ Label* fall_through = NULL;
+ context()->PrepareTest(&materialize_true, &materialize_false,
+ &if_true, &if_false, &fall_through);
+
+ PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
+ __ TestAndSplit(x0, kSmiTagMask, if_true, if_false, fall_through);
+
+ context()->Plug(if_true, if_false);
+}
+
+
+void FullCodeGenerator::EmitIsNonNegativeSmi(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
+ ASSERT(args->length() == 1);
+
+ VisitForAccumulatorValue(args->at(0));
+
+ Label materialize_true, materialize_false;
+ Label* if_true = NULL;
+ Label* if_false = NULL;
+ Label* fall_through = NULL;
+ context()->PrepareTest(&materialize_true, &materialize_false,
+ &if_true, &if_false, &fall_through);
+
+ PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
+ __ TestAndSplit(x0, kSmiTagMask | (0x80000000UL << kSmiShift), if_true,
+ if_false, fall_through);
+
+ context()->Plug(if_true, if_false);
+}
+
+
+void FullCodeGenerator::EmitIsObject(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
+ ASSERT(args->length() == 1);
+
+ VisitForAccumulatorValue(args->at(0));
+
+ Label materialize_true, materialize_false;
+ Label* if_true = NULL;
+ Label* if_false = NULL;
+ Label* fall_through = NULL;
+ context()->PrepareTest(&materialize_true, &materialize_false,
+ &if_true, &if_false, &fall_through);
+
+ __ JumpIfSmi(x0, if_false);
+ __ JumpIfRoot(x0, Heap::kNullValueRootIndex, if_true);
+ __ Ldr(x10, FieldMemOperand(x0, HeapObject::kMapOffset));
+ // Undetectable objects behave like undefined when tested with typeof.
+ __ Ldrb(x11, FieldMemOperand(x10, Map::kBitFieldOffset));
+ __ Tbnz(x11, Map::kIsUndetectable, if_false);
+ __ Ldrb(x12, FieldMemOperand(x10, Map::kInstanceTypeOffset));
+ __ Cmp(x12, FIRST_NONCALLABLE_SPEC_OBJECT_TYPE);
+ __ B(lt, if_false);
+ __ Cmp(x12, LAST_NONCALLABLE_SPEC_OBJECT_TYPE);
+ PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
+ Split(le, if_true, if_false, fall_through);
+
+ context()->Plug(if_true, if_false);
+}
+
+
+void FullCodeGenerator::EmitIsSpecObject(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
+ ASSERT(args->length() == 1);
+
+ VisitForAccumulatorValue(args->at(0));
+
+ Label materialize_true, materialize_false;
+ Label* if_true = NULL;
+ Label* if_false = NULL;
+ Label* fall_through = NULL;
+ context()->PrepareTest(&materialize_true, &materialize_false,
+ &if_true, &if_false, &fall_through);
+
+ __ JumpIfSmi(x0, if_false);
+ __ CompareObjectType(x0, x10, x11, FIRST_SPEC_OBJECT_TYPE);
+ PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
+ Split(ge, if_true, if_false, fall_through);
+
+ context()->Plug(if_true, if_false);
+}
+
+
+void FullCodeGenerator::EmitIsUndetectableObject(CallRuntime* expr) {
+ ASM_LOCATION("FullCodeGenerator::EmitIsUndetectableObject");
+ ZoneList<Expression*>* args = expr->arguments();
+ ASSERT(args->length() == 1);
+
+ VisitForAccumulatorValue(args->at(0));
+
+ Label materialize_true, materialize_false;
+ Label* if_true = NULL;
+ Label* if_false = NULL;
+ Label* fall_through = NULL;
+ context()->PrepareTest(&materialize_true, &materialize_false,
+ &if_true, &if_false, &fall_through);
+
+ __ JumpIfSmi(x0, if_false);
+ __ Ldr(x10, FieldMemOperand(x0, HeapObject::kMapOffset));
+ __ Ldrb(x11, FieldMemOperand(x10, Map::kBitFieldOffset));
+ __ Tst(x11, 1 << Map::kIsUndetectable);
+ PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
+ Split(ne, if_true, if_false, fall_through);
+
+ context()->Plug(if_true, if_false);
+}
+
+
+void FullCodeGenerator::EmitIsStringWrapperSafeForDefaultValueOf(
+ CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
+ ASSERT(args->length() == 1);
+ VisitForAccumulatorValue(args->at(0));
+
+ Label materialize_true, materialize_false, skip_lookup;
+ Label* if_true = NULL;
+ Label* if_false = NULL;
+ Label* fall_through = NULL;
+ context()->PrepareTest(&materialize_true, &materialize_false,
+ &if_true, &if_false, &fall_through);
+
+ Register object = x0;
+ __ AssertNotSmi(object);
+
+ Register map = x10;
+ Register bitfield2 = x11;
+ __ Ldr(map, FieldMemOperand(object, HeapObject::kMapOffset));
+ __ Ldrb(bitfield2, FieldMemOperand(map, Map::kBitField2Offset));
+ __ Tbnz(bitfield2, Map::kStringWrapperSafeForDefaultValueOf, &skip_lookup);
+
+ // Check for fast case object. Generate false result for slow case object.
+ Register props = x12;
+ Register props_map = x12;
+ Register hash_table_map = x13;
+ __ Ldr(props, FieldMemOperand(object, JSObject::kPropertiesOffset));
+ __ Ldr(props_map, FieldMemOperand(props, HeapObject::kMapOffset));
+ __ LoadRoot(hash_table_map, Heap::kHashTableMapRootIndex);
+ __ Cmp(props_map, hash_table_map);
+ __ B(eq, if_false);
+
+ // Look for valueOf name in the descriptor array, and indicate false if found.
+ // Since we omit an enumeration index check, if it is added via a transition
+ // that shares its descriptor array, this is a false positive.
+ Label loop, done;
+
+ // Skip loop if no descriptors are valid.
+ Register descriptors = x12;
+ Register descriptors_length = x13;
+ __ NumberOfOwnDescriptors(descriptors_length, map);
+ __ Cbz(descriptors_length, &done);
+
+ __ LoadInstanceDescriptors(map, descriptors);
+
+ // Calculate the end of the descriptor array.
+ Register descriptors_end = x14;
+ __ Mov(x15, DescriptorArray::kDescriptorSize);
+ __ Mul(descriptors_length, descriptors_length, x15);
+ // Calculate location of the first key name.
+ __ Add(descriptors, descriptors,
+ DescriptorArray::kFirstOffset - kHeapObjectTag);
+ // Calculate the end of the descriptor array.
+ __ Add(descriptors_end, descriptors,
+ Operand(descriptors_length, LSL, kPointerSizeLog2));
+
+ // Loop through all the keys in the descriptor array. If one of these is the
+ // string "valueOf" the result is false.
+ Register valueof_string = x1;
+ int descriptor_size = DescriptorArray::kDescriptorSize * kPointerSize;
+ __ Mov(valueof_string, Operand(isolate()->factory()->value_of_string()));
+ __ Bind(&loop);
+ __ Ldr(x15, MemOperand(descriptors, descriptor_size, PostIndex));
+ __ Cmp(x15, valueof_string);
+ __ B(eq, if_false);
+ __ Cmp(descriptors, descriptors_end);
+ __ B(ne, &loop);
+
+ __ Bind(&done);
+
+ // Set the bit in the map to indicate that there is no local valueOf field.
+ __ Ldrb(x2, FieldMemOperand(map, Map::kBitField2Offset));
+ __ Orr(x2, x2, 1 << Map::kStringWrapperSafeForDefaultValueOf);
+ __ Strb(x2, FieldMemOperand(map, Map::kBitField2Offset));
+
+ __ Bind(&skip_lookup);
+
+ // If a valueOf property is not found on the object check that its prototype
+ // is the unmodified String prototype. If not result is false.
+ Register prototype = x1;
+ Register global_idx = x2;
+ Register native_context = x2;
+ Register string_proto = x3;
+ Register proto_map = x4;
+ __ Ldr(prototype, FieldMemOperand(map, Map::kPrototypeOffset));
+ __ JumpIfSmi(prototype, if_false);
+ __ Ldr(proto_map, FieldMemOperand(prototype, HeapObject::kMapOffset));
+ __ Ldr(global_idx, GlobalObjectMemOperand());
+ __ Ldr(native_context,
+ FieldMemOperand(global_idx, GlobalObject::kNativeContextOffset));
+ __ Ldr(string_proto,
+ ContextMemOperand(native_context,
+ Context::STRING_FUNCTION_PROTOTYPE_MAP_INDEX));
+ __ Cmp(proto_map, string_proto);
+
+ PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
+ Split(eq, if_true, if_false, fall_through);
+
+ context()->Plug(if_true, if_false);
+}
+
+
+void FullCodeGenerator::EmitIsFunction(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
+ ASSERT(args->length() == 1);
+
+ VisitForAccumulatorValue(args->at(0));
+
+ Label materialize_true, materialize_false;
+ Label* if_true = NULL;
+ Label* if_false = NULL;
+ Label* fall_through = NULL;
+ context()->PrepareTest(&materialize_true, &materialize_false,
+ &if_true, &if_false, &fall_through);
+
+ __ JumpIfSmi(x0, if_false);
+ __ CompareObjectType(x0, x10, x11, JS_FUNCTION_TYPE);
+ PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
+ Split(eq, if_true, if_false, fall_through);
+
+ context()->Plug(if_true, if_false);
+}
+
+
+void FullCodeGenerator::EmitIsMinusZero(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
+ ASSERT(args->length() == 1);
+
+ VisitForAccumulatorValue(args->at(0));
+
+ Label materialize_true, materialize_false;
+ Label* if_true = NULL;
+ Label* if_false = NULL;
+ Label* fall_through = NULL;
+ context()->PrepareTest(&materialize_true, &materialize_false,
+ &if_true, &if_false, &fall_through);
+
+ // Only a HeapNumber can be -0.0, so return false if we have something else.
+ __ CheckMap(x0, x1, Heap::kHeapNumberMapRootIndex, if_false, DO_SMI_CHECK);
+
+ // Test the bit pattern.
+ __ Ldr(x10, FieldMemOperand(x0, HeapNumber::kValueOffset));
+ __ Cmp(x10, 1); // Set V on 0x8000000000000000.
+
+ PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
+ Split(vs, if_true, if_false, fall_through);
+
+ context()->Plug(if_true, if_false);
+}
+
+
+void FullCodeGenerator::EmitIsArray(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
+ ASSERT(args->length() == 1);
+
+ VisitForAccumulatorValue(args->at(0));
+
+ Label materialize_true, materialize_false;
+ Label* if_true = NULL;
+ Label* if_false = NULL;
+ Label* fall_through = NULL;
+ context()->PrepareTest(&materialize_true, &materialize_false,
+ &if_true, &if_false, &fall_through);
+
+ __ JumpIfSmi(x0, if_false);
+ __ CompareObjectType(x0, x10, x11, JS_ARRAY_TYPE);
+ PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
+ Split(eq, if_true, if_false, fall_through);
+
+ context()->Plug(if_true, if_false);
+}
+
+
+void FullCodeGenerator::EmitIsRegExp(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
+ ASSERT(args->length() == 1);
+
+ VisitForAccumulatorValue(args->at(0));
+
+ Label materialize_true, materialize_false;
+ Label* if_true = NULL;
+ Label* if_false = NULL;
+ Label* fall_through = NULL;
+ context()->PrepareTest(&materialize_true, &materialize_false,
+ &if_true, &if_false, &fall_through);
+
+ __ JumpIfSmi(x0, if_false);
+ __ CompareObjectType(x0, x10, x11, JS_REGEXP_TYPE);
+ PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
+ Split(eq, if_true, if_false, fall_through);
+
+ context()->Plug(if_true, if_false);
+}
+
+
+
+void FullCodeGenerator::EmitIsConstructCall(CallRuntime* expr) {
+ ASSERT(expr->arguments()->length() == 0);
+
+ Label materialize_true, materialize_false;
+ Label* if_true = NULL;
+ Label* if_false = NULL;
+ Label* fall_through = NULL;
+ context()->PrepareTest(&materialize_true, &materialize_false,
+ &if_true, &if_false, &fall_through);
+
+ // Get the frame pointer for the calling frame.
+ __ Ldr(x2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+
+ // Skip the arguments adaptor frame if it exists.
+ Label check_frame_marker;
+ __ Ldr(x1, MemOperand(x2, StandardFrameConstants::kContextOffset));
+ __ Cmp(x1, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
+ __ B(ne, &check_frame_marker);
+ __ Ldr(x2, MemOperand(x2, StandardFrameConstants::kCallerFPOffset));
+
+ // Check the marker in the calling frame.
+ __ Bind(&check_frame_marker);
+ __ Ldr(x1, MemOperand(x2, StandardFrameConstants::kMarkerOffset));
+ __ Cmp(x1, Smi::FromInt(StackFrame::CONSTRUCT));
+ PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
+ Split(eq, if_true, if_false, fall_through);
+
+ context()->Plug(if_true, if_false);
+}
+
+
+void FullCodeGenerator::EmitObjectEquals(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
+ ASSERT(args->length() == 2);
+
+ // Load the two objects into registers and perform the comparison.
+ VisitForStackValue(args->at(0));
+ VisitForAccumulatorValue(args->at(1));
+
+ Label materialize_true, materialize_false;
+ Label* if_true = NULL;
+ Label* if_false = NULL;
+ Label* fall_through = NULL;
+ context()->PrepareTest(&materialize_true, &materialize_false,
+ &if_true, &if_false, &fall_through);
+
+ __ Pop(x1);
+ __ Cmp(x0, x1);
+ PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
+ Split(eq, if_true, if_false, fall_through);
+
+ context()->Plug(if_true, if_false);
+}
+
+
+void FullCodeGenerator::EmitArguments(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
+ ASSERT(args->length() == 1);
+
+ // ArgumentsAccessStub expects the key in x1.
+ VisitForAccumulatorValue(args->at(0));
+ __ Mov(x1, x0);
+ __ Mov(x0, Smi::FromInt(info_->scope()->num_parameters()));
+ ArgumentsAccessStub stub(isolate(), ArgumentsAccessStub::READ_ELEMENT);
+ __ CallStub(&stub);
+ context()->Plug(x0);
+}
+
+
+void FullCodeGenerator::EmitArgumentsLength(CallRuntime* expr) {
+ ASSERT(expr->arguments()->length() == 0);
+ Label exit;
+ // Get the number of formal parameters.
+ __ Mov(x0, Smi::FromInt(info_->scope()->num_parameters()));
+
+ // Check if the calling frame is an arguments adaptor frame.
+ __ Ldr(x12, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+ __ Ldr(x13, MemOperand(x12, StandardFrameConstants::kContextOffset));
+ __ Cmp(x13, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
+ __ B(ne, &exit);
+
+ // Arguments adaptor case: Read the arguments length from the
+ // adaptor frame.
+ __ Ldr(x0, MemOperand(x12, ArgumentsAdaptorFrameConstants::kLengthOffset));
+
+ __ Bind(&exit);
+ context()->Plug(x0);
+}
+
+
+void FullCodeGenerator::EmitClassOf(CallRuntime* expr) {
+ ASM_LOCATION("FullCodeGenerator::EmitClassOf");
+ ZoneList<Expression*>* args = expr->arguments();
+ ASSERT(args->length() == 1);
+ Label done, null, function, non_function_constructor;
+
+ VisitForAccumulatorValue(args->at(0));
+
+ // If the object is a smi, we return null.
+ __ JumpIfSmi(x0, &null);
+
+ // Check that the object is a JS object but take special care of JS
+ // functions to make sure they have 'Function' as their class.
+ // Assume that there are only two callable types, and one of them is at
+ // either end of the type range for JS object types. Saves extra comparisons.
+ STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
+ __ CompareObjectType(x0, x10, x11, FIRST_SPEC_OBJECT_TYPE);
+ // x10: object's map.
+ // x11: object's type.
+ __ B(lt, &null);
+ STATIC_ASSERT(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE ==
+ FIRST_SPEC_OBJECT_TYPE + 1);
+ __ B(eq, &function);
+
+ __ Cmp(x11, LAST_SPEC_OBJECT_TYPE);
+ STATIC_ASSERT(LAST_NONCALLABLE_SPEC_OBJECT_TYPE ==
+ LAST_SPEC_OBJECT_TYPE - 1);
+ __ B(eq, &function);
+ // Assume that there is no larger type.
+ STATIC_ASSERT(LAST_NONCALLABLE_SPEC_OBJECT_TYPE == LAST_TYPE - 1);
+
+ // Check if the constructor in the map is a JS function.
+ __ Ldr(x12, FieldMemOperand(x10, Map::kConstructorOffset));
+ __ JumpIfNotObjectType(x12, x13, x14, JS_FUNCTION_TYPE,
+ &non_function_constructor);
+
+ // x12 now contains the constructor function. Grab the
+ // instance class name from there.
+ __ Ldr(x13, FieldMemOperand(x12, JSFunction::kSharedFunctionInfoOffset));
+ __ Ldr(x0,
+ FieldMemOperand(x13, SharedFunctionInfo::kInstanceClassNameOffset));
+ __ B(&done);
+
+ // Functions have class 'Function'.
+ __ Bind(&function);
+ __ LoadRoot(x0, Heap::kfunction_class_stringRootIndex);
+ __ B(&done);
+
+ // Objects with a non-function constructor have class 'Object'.
+ __ Bind(&non_function_constructor);
+ __ LoadRoot(x0, Heap::kObject_stringRootIndex);
+ __ B(&done);
+
+ // Non-JS objects have class null.
+ __ Bind(&null);
+ __ LoadRoot(x0, Heap::kNullValueRootIndex);
+
+ // All done.
+ __ Bind(&done);
+
+ context()->Plug(x0);
+}
+
+
+void FullCodeGenerator::EmitSubString(CallRuntime* expr) {
+ // Load the arguments on the stack and call the stub.
+ SubStringStub stub(isolate());
+ ZoneList<Expression*>* args = expr->arguments();
+ ASSERT(args->length() == 3);
+ VisitForStackValue(args->at(0));
+ VisitForStackValue(args->at(1));
+ VisitForStackValue(args->at(2));
+ __ CallStub(&stub);
+ context()->Plug(x0);
+}
+
+
+void FullCodeGenerator::EmitRegExpExec(CallRuntime* expr) {
+ // Load the arguments on the stack and call the stub.
+ RegExpExecStub stub(isolate());
+ ZoneList<Expression*>* args = expr->arguments();
+ ASSERT(args->length() == 4);
+ VisitForStackValue(args->at(0));
+ VisitForStackValue(args->at(1));
+ VisitForStackValue(args->at(2));
+ VisitForStackValue(args->at(3));
+ __ CallStub(&stub);
+ context()->Plug(x0);
+}
+
+
+void FullCodeGenerator::EmitValueOf(CallRuntime* expr) {
+ ASM_LOCATION("FullCodeGenerator::EmitValueOf");
+ ZoneList<Expression*>* args = expr->arguments();
+ ASSERT(args->length() == 1);
+ VisitForAccumulatorValue(args->at(0)); // Load the object.
+
+ Label done;
+ // If the object is a smi return the object.
+ __ JumpIfSmi(x0, &done);
+ // If the object is not a value type, return the object.
+ __ JumpIfNotObjectType(x0, x10, x11, JS_VALUE_TYPE, &done);
+ __ Ldr(x0, FieldMemOperand(x0, JSValue::kValueOffset));
+
+ __ Bind(&done);
+ context()->Plug(x0);
+}
+
+
+void FullCodeGenerator::EmitDateField(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
+ ASSERT(args->length() == 2);
+ ASSERT_NE(NULL, args->at(1)->AsLiteral());
+ Smi* index = Smi::cast(*(args->at(1)->AsLiteral()->value()));
+
+ VisitForAccumulatorValue(args->at(0)); // Load the object.
+
+ Label runtime, done, not_date_object;
+ Register object = x0;
+ Register result = x0;
+ Register stamp_addr = x10;
+ Register stamp_cache = x11;
+
+ __ JumpIfSmi(object, &not_date_object);
+ __ JumpIfNotObjectType(object, x10, x10, JS_DATE_TYPE, &not_date_object);
+
+ if (index->value() == 0) {
+ __ Ldr(result, FieldMemOperand(object, JSDate::kValueOffset));
+ __ B(&done);
+ } else {
+ if (index->value() < JSDate::kFirstUncachedField) {
+ ExternalReference stamp = ExternalReference::date_cache_stamp(isolate());
+ __ Mov(x10, stamp);
+ __ Ldr(stamp_addr, MemOperand(x10));
+ __ Ldr(stamp_cache, FieldMemOperand(object, JSDate::kCacheStampOffset));
+ __ Cmp(stamp_addr, stamp_cache);
+ __ B(ne, &runtime);
+ __ Ldr(result, FieldMemOperand(object, JSDate::kValueOffset +
+ kPointerSize * index->value()));
+ __ B(&done);
+ }
+
+ __ Bind(&runtime);
+ __ Mov(x1, index);
+ __ CallCFunction(ExternalReference::get_date_field_function(isolate()), 2);
+ __ B(&done);
+ }
+
+ __ Bind(&not_date_object);
+ __ CallRuntime(Runtime::kHiddenThrowNotDateError, 0);
+ __ Bind(&done);
+ context()->Plug(x0);
+}
+
+
+void FullCodeGenerator::EmitOneByteSeqStringSetChar(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
+ ASSERT_EQ(3, args->length());
+
+ Register string = x0;
+ Register index = x1;
+ Register value = x2;
+ Register scratch = x10;
+
+ VisitForStackValue(args->at(1)); // index
+ VisitForStackValue(args->at(2)); // value
+ VisitForAccumulatorValue(args->at(0)); // string
+ __ Pop(value, index);
+
+ if (FLAG_debug_code) {
+ __ AssertSmi(value, kNonSmiValue);
+ __ AssertSmi(index, kNonSmiIndex);
+ static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
+ __ EmitSeqStringSetCharCheck(string, index, kIndexIsSmi, scratch,
+ one_byte_seq_type);
+ }
+
+ __ Add(scratch, string, SeqOneByteString::kHeaderSize - kHeapObjectTag);
+ __ SmiUntag(value);
+ __ SmiUntag(index);
+ __ Strb(value, MemOperand(scratch, index));
+ context()->Plug(string);
+}
+
+
+void FullCodeGenerator::EmitTwoByteSeqStringSetChar(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
+ ASSERT_EQ(3, args->length());
+
+ Register string = x0;
+ Register index = x1;
+ Register value = x2;
+ Register scratch = x10;
+
+ VisitForStackValue(args->at(1)); // index
+ VisitForStackValue(args->at(2)); // value
+ VisitForAccumulatorValue(args->at(0)); // string
+ __ Pop(value, index);
+
+ if (FLAG_debug_code) {
+ __ AssertSmi(value, kNonSmiValue);
+ __ AssertSmi(index, kNonSmiIndex);
+ static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
+ __ EmitSeqStringSetCharCheck(string, index, kIndexIsSmi, scratch,
+ two_byte_seq_type);
+ }
+
+ __ Add(scratch, string, SeqTwoByteString::kHeaderSize - kHeapObjectTag);
+ __ SmiUntag(value);
+ __ SmiUntag(index);
+ __ Strh(value, MemOperand(scratch, index, LSL, 1));
+ context()->Plug(string);
+}
+
+
+void FullCodeGenerator::EmitMathPow(CallRuntime* expr) {
+ // Load the arguments on the stack and call the MathPow stub.
+ ZoneList<Expression*>* args = expr->arguments();
+ ASSERT(args->length() == 2);
+ VisitForStackValue(args->at(0));
+ VisitForStackValue(args->at(1));
+ MathPowStub stub(isolate(), MathPowStub::ON_STACK);
+ __ CallStub(&stub);
+ context()->Plug(x0);
+}
+
+
+void FullCodeGenerator::EmitSetValueOf(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
+ ASSERT(args->length() == 2);
+ VisitForStackValue(args->at(0)); // Load the object.
+ VisitForAccumulatorValue(args->at(1)); // Load the value.
+ __ Pop(x1);
+ // x0 = value.
+ // x1 = object.
+
+ Label done;
+ // If the object is a smi, return the value.
+ __ JumpIfSmi(x1, &done);
+
+ // If the object is not a value type, return the value.
+ __ JumpIfNotObjectType(x1, x10, x11, JS_VALUE_TYPE, &done);
+
+ // Store the value.
+ __ Str(x0, FieldMemOperand(x1, JSValue::kValueOffset));
+ // Update the write barrier. Save the value as it will be
+ // overwritten by the write barrier code and is needed afterward.
+ __ Mov(x10, x0);
+ __ RecordWriteField(
+ x1, JSValue::kValueOffset, x10, x11, kLRHasBeenSaved, kDontSaveFPRegs);
+
+ __ Bind(&done);
+ context()->Plug(x0);
+}
+
+
+void FullCodeGenerator::EmitNumberToString(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
+ ASSERT_EQ(args->length(), 1);
+
+ // Load the argument into x0 and call the stub.
+ VisitForAccumulatorValue(args->at(0));
+
+ NumberToStringStub stub(isolate());
+ __ CallStub(&stub);
+ context()->Plug(x0);
+}
+
+
+void FullCodeGenerator::EmitStringCharFromCode(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
+ ASSERT(args->length() == 1);
+
+ VisitForAccumulatorValue(args->at(0));
+
+ Label done;
+ Register code = x0;
+ Register result = x1;
+
+ StringCharFromCodeGenerator generator(code, result);
+ generator.GenerateFast(masm_);
+ __ B(&done);
+
+ NopRuntimeCallHelper call_helper;
+ generator.GenerateSlow(masm_, call_helper);
+
+ __ Bind(&done);
+ context()->Plug(result);
+}
+
+
+void FullCodeGenerator::EmitStringCharCodeAt(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
+ ASSERT(args->length() == 2);
+
+ VisitForStackValue(args->at(0));
+ VisitForAccumulatorValue(args->at(1));
+
+ Register object = x1;
+ Register index = x0;
+ Register result = x3;
+
+ __ Pop(object);
+
+ Label need_conversion;
+ Label index_out_of_range;
+ Label done;
+ StringCharCodeAtGenerator generator(object,
+ index,
+ result,
+ &need_conversion,
+ &need_conversion,
+ &index_out_of_range,
+ STRING_INDEX_IS_NUMBER);
+ generator.GenerateFast(masm_);
+ __ B(&done);
+
+ __ Bind(&index_out_of_range);
+ // When the index is out of range, the spec requires us to return NaN.
+ __ LoadRoot(result, Heap::kNanValueRootIndex);
+ __ B(&done);
+
+ __ Bind(&need_conversion);
+ // Load the undefined value into the result register, which will
+ // trigger conversion.
+ __ LoadRoot(result, Heap::kUndefinedValueRootIndex);
+ __ B(&done);
+
+ NopRuntimeCallHelper call_helper;
+ generator.GenerateSlow(masm_, call_helper);
+
+ __ Bind(&done);
+ context()->Plug(result);
+}
+
+
+void FullCodeGenerator::EmitStringCharAt(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
+ ASSERT(args->length() == 2);
+
+ VisitForStackValue(args->at(0));
+ VisitForAccumulatorValue(args->at(1));
+
+ Register object = x1;
+ Register index = x0;
+ Register result = x0;
+
+ __ Pop(object);
+
+ Label need_conversion;
+ Label index_out_of_range;
+ Label done;
+ StringCharAtGenerator generator(object,
+ index,
+ x3,
+ result,
+ &need_conversion,
+ &need_conversion,
+ &index_out_of_range,
+ STRING_INDEX_IS_NUMBER);
+ generator.GenerateFast(masm_);
+ __ B(&done);
+
+ __ Bind(&index_out_of_range);
+ // When the index is out of range, the spec requires us to return
+ // the empty string.
+ __ LoadRoot(result, Heap::kempty_stringRootIndex);
+ __ B(&done);
+
+ __ Bind(&need_conversion);
+ // Move smi zero into the result register, which will trigger conversion.
+ __ Mov(result, Smi::FromInt(0));
+ __ B(&done);
+
+ NopRuntimeCallHelper call_helper;
+ generator.GenerateSlow(masm_, call_helper);
+
+ __ Bind(&done);
+ context()->Plug(result);
+}
+
+
+void FullCodeGenerator::EmitStringAdd(CallRuntime* expr) {
+ ASM_LOCATION("FullCodeGenerator::EmitStringAdd");
+ ZoneList<Expression*>* args = expr->arguments();
+ ASSERT_EQ(2, args->length());
+
+ VisitForStackValue(args->at(0));
+ VisitForAccumulatorValue(args->at(1));
+
+ __ Pop(x1);
+ StringAddStub stub(isolate(), STRING_ADD_CHECK_BOTH, NOT_TENURED);
+ __ CallStub(&stub);
+
+ context()->Plug(x0);
+}
+
+
+void FullCodeGenerator::EmitStringCompare(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
+ ASSERT_EQ(2, args->length());
+ VisitForStackValue(args->at(0));
+ VisitForStackValue(args->at(1));
+
+ StringCompareStub stub(isolate());
+ __ CallStub(&stub);
+ context()->Plug(x0);
+}
+
+
+void FullCodeGenerator::EmitCallFunction(CallRuntime* expr) {
+ ASM_LOCATION("FullCodeGenerator::EmitCallFunction");
+ ZoneList<Expression*>* args = expr->arguments();
+ ASSERT(args->length() >= 2);
+
+ int arg_count = args->length() - 2; // 2 ~ receiver and function.
+ for (int i = 0; i < arg_count + 1; i++) {
+ VisitForStackValue(args->at(i));
+ }
+ VisitForAccumulatorValue(args->last()); // Function.
+
+ Label runtime, done;
+ // Check for non-function argument (including proxy).
+ __ JumpIfSmi(x0, &runtime);
+ __ JumpIfNotObjectType(x0, x1, x1, JS_FUNCTION_TYPE, &runtime);
+
+ // InvokeFunction requires the function in x1. Move it in there.
+ __ Mov(x1, x0);
+ ParameterCount count(arg_count);
+ __ InvokeFunction(x1, count, CALL_FUNCTION, NullCallWrapper());
+ __ Ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ __ B(&done);
+
+ __ Bind(&runtime);
+ __ Push(x0);
+ __ CallRuntime(Runtime::kCall, args->length());
+ __ Bind(&done);
+
+ context()->Plug(x0);
+}
+
+
+void FullCodeGenerator::EmitRegExpConstructResult(CallRuntime* expr) {
+ RegExpConstructResultStub stub(isolate());
+ ZoneList<Expression*>* args = expr->arguments();
+ ASSERT(args->length() == 3);
+ VisitForStackValue(args->at(0));
+ VisitForStackValue(args->at(1));
+ VisitForAccumulatorValue(args->at(2));
+ __ Pop(x1, x2);
+ __ CallStub(&stub);
+ context()->Plug(x0);
+}
+
+
+void FullCodeGenerator::EmitGetFromCache(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
+ ASSERT_EQ(2, args->length());
+ ASSERT_NE(NULL, args->at(0)->AsLiteral());
+ int cache_id = Smi::cast(*(args->at(0)->AsLiteral()->value()))->value();
+
+ Handle<FixedArray> jsfunction_result_caches(
+ isolate()->native_context()->jsfunction_result_caches());
+ if (jsfunction_result_caches->length() <= cache_id) {
+ __ Abort(kAttemptToUseUndefinedCache);
+ __ LoadRoot(x0, Heap::kUndefinedValueRootIndex);
+ context()->Plug(x0);
+ return;
+ }
+
+ VisitForAccumulatorValue(args->at(1));
+
+ Register key = x0;
+ Register cache = x1;
+ __ Ldr(cache, GlobalObjectMemOperand());
+ __ Ldr(cache, FieldMemOperand(cache, GlobalObject::kNativeContextOffset));
+ __ Ldr(cache, ContextMemOperand(cache,
+ Context::JSFUNCTION_RESULT_CACHES_INDEX));
+ __ Ldr(cache,
+ FieldMemOperand(cache, FixedArray::OffsetOfElementAt(cache_id)));
+
+ Label done;
+ __ Ldrsw(x2, UntagSmiFieldMemOperand(cache,
+ JSFunctionResultCache::kFingerOffset));
+ __ Add(x3, cache, FixedArray::kHeaderSize - kHeapObjectTag);
+ __ Add(x3, x3, Operand(x2, LSL, kPointerSizeLog2));
+
+ // Load the key and data from the cache.
+ __ Ldp(x2, x3, MemOperand(x3));
+
+ __ Cmp(key, x2);
+ __ CmovX(x0, x3, eq);
+ __ B(eq, &done);
+
+ // Call runtime to perform the lookup.
+ __ Push(cache, key);
+ __ CallRuntime(Runtime::kHiddenGetFromCache, 2);
+
+ __ Bind(&done);
+ context()->Plug(x0);
+}
+
+
+void FullCodeGenerator::EmitHasCachedArrayIndex(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
+ VisitForAccumulatorValue(args->at(0));
+
+ Label materialize_true, materialize_false;
+ Label* if_true = NULL;
+ Label* if_false = NULL;
+ Label* fall_through = NULL;
+ context()->PrepareTest(&materialize_true, &materialize_false,
+ &if_true, &if_false, &fall_through);
+
+ __ Ldr(x10, FieldMemOperand(x0, String::kHashFieldOffset));
+ __ Tst(x10, String::kContainsCachedArrayIndexMask);
+ PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
+ Split(eq, if_true, if_false, fall_through);
+
+ context()->Plug(if_true, if_false);
+}
+
+
+void FullCodeGenerator::EmitGetCachedArrayIndex(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
+ ASSERT(args->length() == 1);
+ VisitForAccumulatorValue(args->at(0));
+
+ __ AssertString(x0);
+
+ __ Ldr(x10, FieldMemOperand(x0, String::kHashFieldOffset));
+ __ IndexFromHash(x10, x0);
+
+ context()->Plug(x0);
+}
+
+
+void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
+ ASM_LOCATION("FullCodeGenerator::EmitFastAsciiArrayJoin");
+
+ ZoneList<Expression*>* args = expr->arguments();
+ ASSERT(args->length() == 2);
+ VisitForStackValue(args->at(1));
+ VisitForAccumulatorValue(args->at(0));
+
+ Register array = x0;
+ Register result = x0;
+ Register elements = x1;
+ Register element = x2;
+ Register separator = x3;
+ Register array_length = x4;
+ Register result_pos = x5;
+ Register map = x6;
+ Register string_length = x10;
+ Register elements_end = x11;
+ Register string = x12;
+ Register scratch1 = x13;
+ Register scratch2 = x14;
+ Register scratch3 = x7;
+ Register separator_length = x15;
+
+ Label bailout, done, one_char_separator, long_separator,
+ non_trivial_array, not_size_one_array, loop,
+ empty_separator_loop, one_char_separator_loop,
+ one_char_separator_loop_entry, long_separator_loop;
+
+ // The separator operand is on the stack.
+ __ Pop(separator);
+
+ // Check that the array is a JSArray.
+ __ JumpIfSmi(array, &bailout);
+ __ JumpIfNotObjectType(array, map, scratch1, JS_ARRAY_TYPE, &bailout);
+
+ // Check that the array has fast elements.
+ __ CheckFastElements(map, scratch1, &bailout);
+
+ // If the array has length zero, return the empty string.
+ // Load and untag the length of the array.
+ // It is an unsigned value, so we can skip sign extension.
+ // We assume little endianness.
+ __ Ldrsw(array_length,
+ UntagSmiFieldMemOperand(array, JSArray::kLengthOffset));
+ __ Cbnz(array_length, &non_trivial_array);
+ __ LoadRoot(result, Heap::kempty_stringRootIndex);
+ __ B(&done);
+
+ __ Bind(&non_trivial_array);
+ // Get the FixedArray containing array's elements.
+ __ Ldr(elements, FieldMemOperand(array, JSArray::kElementsOffset));
+
+ // Check that all array elements are sequential ASCII strings, and
+ // accumulate the sum of their lengths.
+ __ Mov(string_length, 0);
+ __ Add(element, elements, FixedArray::kHeaderSize - kHeapObjectTag);
+ __ Add(elements_end, element, Operand(array_length, LSL, kPointerSizeLog2));
+ // Loop condition: while (element < elements_end).
+ // Live values in registers:
+ // elements: Fixed array of strings.
+ // array_length: Length of the fixed array of strings (not smi)
+ // separator: Separator string
+ // string_length: Accumulated sum of string lengths (not smi).
+ // element: Current array element.
+ // elements_end: Array end.
+ if (FLAG_debug_code) {
+ __ Cmp(array_length, 0);
+ __ Assert(gt, kNoEmptyArraysHereInEmitFastAsciiArrayJoin);
+ }
+ __ Bind(&loop);
+ __ Ldr(string, MemOperand(element, kPointerSize, PostIndex));
+ __ JumpIfSmi(string, &bailout);
+ __ Ldr(scratch1, FieldMemOperand(string, HeapObject::kMapOffset));
+ __ Ldrb(scratch1, FieldMemOperand(scratch1, Map::kInstanceTypeOffset));
+ __ JumpIfInstanceTypeIsNotSequentialAscii(scratch1, scratch2, &bailout);
+ __ Ldrsw(scratch1,
+ UntagSmiFieldMemOperand(string, SeqOneByteString::kLengthOffset));
+ __ Adds(string_length, string_length, scratch1);
+ __ B(vs, &bailout);
+ __ Cmp(element, elements_end);
+ __ B(lt, &loop);
+
+ // If array_length is 1, return elements[0], a string.
+ __ Cmp(array_length, 1);
+ __ B(ne, &not_size_one_array);
+ __ Ldr(result, FieldMemOperand(elements, FixedArray::kHeaderSize));
+ __ B(&done);
+
+ __ Bind(&not_size_one_array);
+
+ // Live values in registers:
+ // separator: Separator string
+ // array_length: Length of the array (not smi).
+ // string_length: Sum of string lengths (not smi).
+ // elements: FixedArray of strings.
+
+ // Check that the separator is a flat ASCII string.
+ __ JumpIfSmi(separator, &bailout);
+ __ Ldr(scratch1, FieldMemOperand(separator, HeapObject::kMapOffset));
+ __ Ldrb(scratch1, FieldMemOperand(scratch1, Map::kInstanceTypeOffset));
+ __ JumpIfInstanceTypeIsNotSequentialAscii(scratch1, scratch2, &bailout);
+
+ // Add (separator length times array_length) - separator length to the
+ // string_length to get the length of the result string.
+ // Load the separator length as untagged.
+ // We assume little endianness, and that the length is positive.
+ __ Ldrsw(separator_length,
+ UntagSmiFieldMemOperand(separator,
+ SeqOneByteString::kLengthOffset));
+ __ Sub(string_length, string_length, separator_length);
+ __ Umaddl(string_length, array_length.W(), separator_length.W(),
+ string_length);
+
+ // Get first element in the array.
+ __ Add(element, elements, FixedArray::kHeaderSize - kHeapObjectTag);
+ // Live values in registers:
+ // element: First array element
+ // separator: Separator string
+ // string_length: Length of result string (not smi)
+ // array_length: Length of the array (not smi).
+ __ AllocateAsciiString(result, string_length, scratch1, scratch2, scratch3,
+ &bailout);
+
+ // Prepare for looping. Set up elements_end to end of the array. Set
+ // result_pos to the position of the result where to write the first
+ // character.
+ // TODO(all): useless unless AllocateAsciiString trashes the register.
+ __ Add(elements_end, element, Operand(array_length, LSL, kPointerSizeLog2));
+ __ Add(result_pos, result, SeqOneByteString::kHeaderSize - kHeapObjectTag);
+
+ // Check the length of the separator.
+ __ Cmp(separator_length, 1);
+ __ B(eq, &one_char_separator);
+ __ B(gt, &long_separator);
+
+ // Empty separator case
+ __ Bind(&empty_separator_loop);
+ // Live values in registers:
+ // result_pos: the position to which we are currently copying characters.
+ // element: Current array element.
+ // elements_end: Array end.
+
+ // Copy next array element to the result.
+ __ Ldr(string, MemOperand(element, kPointerSize, PostIndex));
+ __ Ldrsw(string_length,
+ UntagSmiFieldMemOperand(string, String::kLengthOffset));
+ __ Add(string, string, SeqOneByteString::kHeaderSize - kHeapObjectTag);
+ __ CopyBytes(result_pos, string, string_length, scratch1);
+ __ Cmp(element, elements_end);
+ __ B(lt, &empty_separator_loop); // End while (element < elements_end).
+ __ B(&done);
+
+ // One-character separator case
+ __ Bind(&one_char_separator);
+ // Replace separator with its ASCII character value.
+ __ Ldrb(separator, FieldMemOperand(separator, SeqOneByteString::kHeaderSize));
+ // Jump into the loop after the code that copies the separator, so the first
+ // element is not preceded by a separator
+ __ B(&one_char_separator_loop_entry);
+
+ __ Bind(&one_char_separator_loop);
+ // Live values in registers:
+ // result_pos: the position to which we are currently copying characters.
+ // element: Current array element.
+ // elements_end: Array end.
+ // separator: Single separator ASCII char (in lower byte).
+
+ // Copy the separator character to the result.
+ __ Strb(separator, MemOperand(result_pos, 1, PostIndex));
+
+ // Copy next array element to the result.
+ __ Bind(&one_char_separator_loop_entry);
+ __ Ldr(string, MemOperand(element, kPointerSize, PostIndex));
+ __ Ldrsw(string_length,
+ UntagSmiFieldMemOperand(string, String::kLengthOffset));
+ __ Add(string, string, SeqOneByteString::kHeaderSize - kHeapObjectTag);
+ __ CopyBytes(result_pos, string, string_length, scratch1);
+ __ Cmp(element, elements_end);
+ __ B(lt, &one_char_separator_loop); // End while (element < elements_end).
+ __ B(&done);
+
+ // Long separator case (separator is more than one character). Entry is at the
+ // label long_separator below.
+ __ Bind(&long_separator_loop);
+ // Live values in registers:
+ // result_pos: the position to which we are currently copying characters.
+ // element: Current array element.
+ // elements_end: Array end.
+ // separator: Separator string.
+
+ // Copy the separator to the result.
+ // TODO(all): hoist next two instructions.
+ __ Ldrsw(string_length,
+ UntagSmiFieldMemOperand(separator, String::kLengthOffset));
+ __ Add(string, separator, SeqOneByteString::kHeaderSize - kHeapObjectTag);
+ __ CopyBytes(result_pos, string, string_length, scratch1);
+
+ __ Bind(&long_separator);
+ __ Ldr(string, MemOperand(element, kPointerSize, PostIndex));
+ __ Ldrsw(string_length,
+ UntagSmiFieldMemOperand(string, String::kLengthOffset));
+ __ Add(string, string, SeqOneByteString::kHeaderSize - kHeapObjectTag);
+ __ CopyBytes(result_pos, string, string_length, scratch1);
+ __ Cmp(element, elements_end);
+ __ B(lt, &long_separator_loop); // End while (element < elements_end).
+ __ B(&done);
+
+ __ Bind(&bailout);
+ // Returning undefined will force slower code to handle it.
+ __ LoadRoot(result, Heap::kUndefinedValueRootIndex);
+ __ Bind(&done);
+ context()->Plug(result);
+}
+
+
+void FullCodeGenerator::VisitCallRuntime(CallRuntime* expr) {
+ if (expr->function() != NULL &&
+ expr->function()->intrinsic_type == Runtime::INLINE) {
+ Comment cmnt(masm_, "[ InlineRuntimeCall");
+ EmitInlineRuntimeCall(expr);
+ return;
+ }
+
+ Comment cmnt(masm_, "[ CallRunTime");
+ ZoneList<Expression*>* args = expr->arguments();
+ int arg_count = args->length();
+
+ if (expr->is_jsruntime()) {
+ // Push the builtins object as the receiver.
+ __ Ldr(x10, GlobalObjectMemOperand());
+ __ Ldr(x0, FieldMemOperand(x10, GlobalObject::kBuiltinsOffset));
+ __ Push(x0);
+
+ // Load the function from the receiver.
+ Handle<String> name = expr->name();
+ __ Mov(x2, Operand(name));
+ CallLoadIC(NOT_CONTEXTUAL, expr->CallRuntimeFeedbackId());
+
+ // Push the target function under the receiver.
+ __ Pop(x10);
+ __ Push(x0, x10);
+
+ int arg_count = args->length();
+ for (int i = 0; i < arg_count; i++) {
+ VisitForStackValue(args->at(i));
+ }
+
+ // Record source position of the IC call.
+ SetSourcePosition(expr->position());
+ CallFunctionStub stub(isolate(), arg_count, NO_CALL_FUNCTION_FLAGS);
+ __ Peek(x1, (arg_count + 1) * kPointerSize);
+ __ CallStub(&stub);
+
+ // Restore context register.
+ __ Ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+
+ context()->DropAndPlug(1, x0);
+ } else {
+ // Push the arguments ("left-to-right").
+ for (int i = 0; i < arg_count; i++) {
+ VisitForStackValue(args->at(i));
+ }
+
+ // Call the C runtime function.
+ __ CallRuntime(expr->function(), arg_count);
+ context()->Plug(x0);
+ }
+}
+
+
+void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
+ switch (expr->op()) {
+ case Token::DELETE: {
+ Comment cmnt(masm_, "[ UnaryOperation (DELETE)");
+ Property* property = expr->expression()->AsProperty();
+ VariableProxy* proxy = expr->expression()->AsVariableProxy();
+
+ if (property != NULL) {
+ VisitForStackValue(property->obj());
+ VisitForStackValue(property->key());
+ __ Mov(x10, Smi::FromInt(strict_mode()));
+ __ Push(x10);
+ __ InvokeBuiltin(Builtins::DELETE, CALL_FUNCTION);
+ context()->Plug(x0);
+ } else if (proxy != NULL) {
+ Variable* var = proxy->var();
+ // Delete of an unqualified identifier is disallowed in strict mode
+ // but "delete this" is allowed.
+ ASSERT(strict_mode() == SLOPPY || var->is_this());
+ if (var->IsUnallocated()) {
+ __ Ldr(x12, GlobalObjectMemOperand());
+ __ Mov(x11, Operand(var->name()));
+ __ Mov(x10, Smi::FromInt(SLOPPY));
+ __ Push(x12, x11, x10);
+ __ InvokeBuiltin(Builtins::DELETE, CALL_FUNCTION);
+ context()->Plug(x0);
+ } else if (var->IsStackAllocated() || var->IsContextSlot()) {
+ // Result of deleting non-global, non-dynamic variables is false.
+ // The subexpression does not have side effects.
+ context()->Plug(var->is_this());
+ } else {
+ // Non-global variable. Call the runtime to try to delete from the
+ // context where the variable was introduced.
+ __ Mov(x2, Operand(var->name()));
+ __ Push(context_register(), x2);
+ __ CallRuntime(Runtime::kHiddenDeleteContextSlot, 2);
+ context()->Plug(x0);
+ }
+ } else {
+ // Result of deleting non-property, non-variable reference is true.
+ // The subexpression may have side effects.
+ VisitForEffect(expr->expression());
+ context()->Plug(true);
+ }
+ break;
+ break;
+ }
+ case Token::VOID: {
+ Comment cmnt(masm_, "[ UnaryOperation (VOID)");
+ VisitForEffect(expr->expression());
+ context()->Plug(Heap::kUndefinedValueRootIndex);
+ break;
+ }
+ case Token::NOT: {
+ Comment cmnt(masm_, "[ UnaryOperation (NOT)");
+ if (context()->IsEffect()) {
+ // Unary NOT has no side effects so it's only necessary to visit the
+ // subexpression. Match the optimizing compiler by not branching.
+ VisitForEffect(expr->expression());
+ } else if (context()->IsTest()) {
+ const TestContext* test = TestContext::cast(context());
+ // The labels are swapped for the recursive call.
+ VisitForControl(expr->expression(),
+ test->false_label(),
+ test->true_label(),
+ test->fall_through());
+ context()->Plug(test->true_label(), test->false_label());
+ } else {
+ ASSERT(context()->IsAccumulatorValue() || context()->IsStackValue());
+ // TODO(jbramley): This could be much more efficient using (for
+ // example) the CSEL instruction.
+ Label materialize_true, materialize_false, done;
+ VisitForControl(expr->expression(),
+ &materialize_false,
+ &materialize_true,
+ &materialize_true);
+
+ __ Bind(&materialize_true);
+ PrepareForBailoutForId(expr->MaterializeTrueId(), NO_REGISTERS);
+ __ LoadRoot(result_register(), Heap::kTrueValueRootIndex);
+ __ B(&done);
+
+ __ Bind(&materialize_false);
+ PrepareForBailoutForId(expr->MaterializeFalseId(), NO_REGISTERS);
+ __ LoadRoot(result_register(), Heap::kFalseValueRootIndex);
+ __ B(&done);
+
+ __ Bind(&done);
+ if (context()->IsStackValue()) {
+ __ Push(result_register());
+ }
+ }
+ break;
+ }
+ case Token::TYPEOF: {
+ Comment cmnt(masm_, "[ UnaryOperation (TYPEOF)");
+ {
+ StackValueContext context(this);
+ VisitForTypeofValue(expr->expression());
+ }
+ __ CallRuntime(Runtime::kTypeof, 1);
+ context()->Plug(x0);
+ break;
+ }
+ default:
+ UNREACHABLE();
+ }
+}
+
+
+void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
+ ASSERT(expr->expression()->IsValidReferenceExpression());
+
+ Comment cmnt(masm_, "[ CountOperation");
+ SetSourcePosition(expr->position());
+
+ // Expression can only be a property, a global or a (parameter or local)
+ // slot.
+ enum LhsKind { VARIABLE, NAMED_PROPERTY, KEYED_PROPERTY };
+ LhsKind assign_type = VARIABLE;
+ Property* prop = expr->expression()->AsProperty();
+ // In case of a property we use the uninitialized expression context
+ // of the key to detect a named property.
+ if (prop != NULL) {
+ assign_type =
+ (prop->key()->IsPropertyName()) ? NAMED_PROPERTY : KEYED_PROPERTY;
+ }
+
+ // Evaluate expression and get value.
+ if (assign_type == VARIABLE) {
+ ASSERT(expr->expression()->AsVariableProxy()->var() != NULL);
+ AccumulatorValueContext context(this);
+ EmitVariableLoad(expr->expression()->AsVariableProxy());
+ } else {
+ // Reserve space for result of postfix operation.
+ if (expr->is_postfix() && !context()->IsEffect()) {
+ __ Push(xzr);
+ }
+ if (assign_type == NAMED_PROPERTY) {
+ // Put the object both on the stack and in the accumulator.
+ VisitForAccumulatorValue(prop->obj());
+ __ Push(x0);
+ EmitNamedPropertyLoad(prop);
+ } else {
+ // KEYED_PROPERTY
+ VisitForStackValue(prop->obj());
+ VisitForAccumulatorValue(prop->key());
+ __ Peek(x1, 0);
+ __ Push(x0);
+ EmitKeyedPropertyLoad(prop);
+ }
+ }
+
+ // We need a second deoptimization point after loading the value
+ // in case evaluating the property load my have a side effect.
+ if (assign_type == VARIABLE) {
+ PrepareForBailout(expr->expression(), TOS_REG);
+ } else {
+ PrepareForBailoutForId(prop->LoadId(), TOS_REG);
+ }
+
+ // Inline smi case if we are in a loop.
+ Label stub_call, done;
+ JumpPatchSite patch_site(masm_);
+
+ int count_value = expr->op() == Token::INC ? 1 : -1;
+ if (ShouldInlineSmiCase(expr->op())) {
+ Label slow;
+ patch_site.EmitJumpIfNotSmi(x0, &slow);
+
+ // Save result for postfix expressions.
+ if (expr->is_postfix()) {
+ if (!context()->IsEffect()) {
+ // Save the result on the stack. If we have a named or keyed property we
+ // store the result under the receiver that is currently on top of the
+ // stack.
+ switch (assign_type) {
+ case VARIABLE:
+ __ Push(x0);
+ break;
+ case NAMED_PROPERTY:
+ __ Poke(x0, kPointerSize);
+ break;
+ case KEYED_PROPERTY:
+ __ Poke(x0, kPointerSize * 2);
+ break;
+ }
+ }
+ }
+
+ __ Adds(x0, x0, Smi::FromInt(count_value));
+ __ B(vc, &done);
+ // Call stub. Undo operation first.
+ __ Sub(x0, x0, Smi::FromInt(count_value));
+ __ B(&stub_call);
+ __ Bind(&slow);
+ }
+ ToNumberStub convert_stub(isolate());
+ __ CallStub(&convert_stub);
+
+ // Save result for postfix expressions.
+ if (expr->is_postfix()) {
+ if (!context()->IsEffect()) {
+ // Save the result on the stack. If we have a named or keyed property
+ // we store the result under the receiver that is currently on top
+ // of the stack.
+ switch (assign_type) {
+ case VARIABLE:
+ __ Push(x0);
+ break;
+ case NAMED_PROPERTY:
+ __ Poke(x0, kXRegSize);
+ break;
+ case KEYED_PROPERTY:
+ __ Poke(x0, 2 * kXRegSize);
+ break;
+ }
+ }
+ }
+
+ __ Bind(&stub_call);
+ __ Mov(x1, x0);
+ __ Mov(x0, Smi::FromInt(count_value));
+
+ // Record position before stub call.
+ SetSourcePosition(expr->position());
+
+ {
+ Assembler::BlockPoolsScope scope(masm_);
+ BinaryOpICStub stub(isolate(), Token::ADD, NO_OVERWRITE);
+ CallIC(stub.GetCode(), expr->CountBinOpFeedbackId());
+ patch_site.EmitPatchInfo();
+ }
+ __ Bind(&done);
+
+ // Store the value returned in x0.
+ switch (assign_type) {
+ case VARIABLE:
+ if (expr->is_postfix()) {
+ { EffectContext context(this);
+ EmitVariableAssignment(expr->expression()->AsVariableProxy()->var(),
+ Token::ASSIGN);
+ PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
+ context.Plug(x0);
+ }
+ // For all contexts except EffectConstant We have the result on
+ // top of the stack.
+ if (!context()->IsEffect()) {
+ context()->PlugTOS();
+ }
+ } else {
+ EmitVariableAssignment(expr->expression()->AsVariableProxy()->var(),
+ Token::ASSIGN);
+ PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
+ context()->Plug(x0);
+ }
+ break;
+ case NAMED_PROPERTY: {
+ __ Mov(x2, Operand(prop->key()->AsLiteral()->value()));
+ __ Pop(x1);
+ CallStoreIC(expr->CountStoreFeedbackId());
+ PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
+ if (expr->is_postfix()) {
+ if (!context()->IsEffect()) {
+ context()->PlugTOS();
+ }
+ } else {
+ context()->Plug(x0);
+ }
+ break;
+ }
+ case KEYED_PROPERTY: {
+ __ Pop(x1); // Key.
+ __ Pop(x2); // Receiver.
+ Handle<Code> ic = strict_mode() == SLOPPY
+ ? isolate()->builtins()->KeyedStoreIC_Initialize()
+ : isolate()->builtins()->KeyedStoreIC_Initialize_Strict();
+ CallIC(ic, expr->CountStoreFeedbackId());
+ PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
+ if (expr->is_postfix()) {
+ if (!context()->IsEffect()) {
+ context()->PlugTOS();
+ }
+ } else {
+ context()->Plug(x0);
+ }
+ break;
+ }
+ }
+}
+
+
+void FullCodeGenerator::VisitForTypeofValue(Expression* expr) {
+ ASSERT(!context()->IsEffect());
+ ASSERT(!context()->IsTest());
+ VariableProxy* proxy = expr->AsVariableProxy();
+ if (proxy != NULL && proxy->var()->IsUnallocated()) {
+ Comment cmnt(masm_, "Global variable");
+ __ Ldr(x0, GlobalObjectMemOperand());
+ __ Mov(x2, Operand(proxy->name()));
+ // Use a regular load, not a contextual load, to avoid a reference
+ // error.
+ CallLoadIC(NOT_CONTEXTUAL);
+ PrepareForBailout(expr, TOS_REG);
+ context()->Plug(x0);
+ } else if (proxy != NULL && proxy->var()->IsLookupSlot()) {
+ Label done, slow;
+
+ // Generate code for loading from variables potentially shadowed
+ // by eval-introduced variables.
+ EmitDynamicLookupFastCase(proxy->var(), INSIDE_TYPEOF, &slow, &done);
+
+ __ Bind(&slow);
+ __ Mov(x0, Operand(proxy->name()));
+ __ Push(cp, x0);
+ __ CallRuntime(Runtime::kHiddenLoadContextSlotNoReferenceError, 2);
+ PrepareForBailout(expr, TOS_REG);
+ __ Bind(&done);
+
+ context()->Plug(x0);
+ } else {
+ // This expression cannot throw a reference error at the top level.
+ VisitInDuplicateContext(expr);
+ }
+}
+
+
+void FullCodeGenerator::EmitLiteralCompareTypeof(Expression* expr,
+ Expression* sub_expr,
+ Handle<String> check) {
+ ASM_LOCATION("FullCodeGenerator::EmitLiteralCompareTypeof");
+ Comment cmnt(masm_, "[ EmitLiteralCompareTypeof");
+ Label materialize_true, materialize_false;
+ Label* if_true = NULL;
+ Label* if_false = NULL;
+ Label* fall_through = NULL;
+ context()->PrepareTest(&materialize_true, &materialize_false,
+ &if_true, &if_false, &fall_through);
+
+ { AccumulatorValueContext context(this);
+ VisitForTypeofValue(sub_expr);
+ }
+ PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
+
+ Factory* factory = isolate()->factory();
+ if (String::Equals(check, factory->number_string())) {
+ ASM_LOCATION("FullCodeGenerator::EmitLiteralCompareTypeof number_string");
+ __ JumpIfSmi(x0, if_true);
+ __ Ldr(x0, FieldMemOperand(x0, HeapObject::kMapOffset));
+ __ CompareRoot(x0, Heap::kHeapNumberMapRootIndex);
+ Split(eq, if_true, if_false, fall_through);
+ } else if (String::Equals(check, factory->string_string())) {
+ ASM_LOCATION("FullCodeGenerator::EmitLiteralCompareTypeof string_string");
+ __ JumpIfSmi(x0, if_false);
+ // Check for undetectable objects => false.
+ __ JumpIfObjectType(x0, x0, x1, FIRST_NONSTRING_TYPE, if_false, ge);
+ __ Ldrb(x1, FieldMemOperand(x0, Map::kBitFieldOffset));
+ __ TestAndSplit(x1, 1 << Map::kIsUndetectable, if_true, if_false,
+ fall_through);
+ } else if (String::Equals(check, factory->symbol_string())) {
+ ASM_LOCATION("FullCodeGenerator::EmitLiteralCompareTypeof symbol_string");
+ __ JumpIfSmi(x0, if_false);
+ __ CompareObjectType(x0, x0, x1, SYMBOL_TYPE);
+ Split(eq, if_true, if_false, fall_through);
+ } else if (String::Equals(check, factory->boolean_string())) {
+ ASM_LOCATION("FullCodeGenerator::EmitLiteralCompareTypeof boolean_string");
+ __ JumpIfRoot(x0, Heap::kTrueValueRootIndex, if_true);
+ __ CompareRoot(x0, Heap::kFalseValueRootIndex);
+ Split(eq, if_true, if_false, fall_through);
+ } else if (FLAG_harmony_typeof &&
+ String::Equals(check, factory->null_string())) {
+ ASM_LOCATION("FullCodeGenerator::EmitLiteralCompareTypeof null_string");
+ __ CompareRoot(x0, Heap::kNullValueRootIndex);
+ Split(eq, if_true, if_false, fall_through);
+ } else if (String::Equals(check, factory->undefined_string())) {
+ ASM_LOCATION(
+ "FullCodeGenerator::EmitLiteralCompareTypeof undefined_string");
+ __ JumpIfRoot(x0, Heap::kUndefinedValueRootIndex, if_true);
+ __ JumpIfSmi(x0, if_false);
+ // Check for undetectable objects => true.
+ __ Ldr(x0, FieldMemOperand(x0, HeapObject::kMapOffset));
+ __ Ldrb(x1, FieldMemOperand(x0, Map::kBitFieldOffset));
+ __ TestAndSplit(x1, 1 << Map::kIsUndetectable, if_false, if_true,
+ fall_through);
+ } else if (String::Equals(check, factory->function_string())) {
+ ASM_LOCATION("FullCodeGenerator::EmitLiteralCompareTypeof function_string");
+ __ JumpIfSmi(x0, if_false);
+ STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
+ __ JumpIfObjectType(x0, x10, x11, JS_FUNCTION_TYPE, if_true);
+ __ CompareAndSplit(x11, JS_FUNCTION_PROXY_TYPE, eq, if_true, if_false,
+ fall_through);
+
+ } else if (String::Equals(check, factory->object_string())) {
+ ASM_LOCATION("FullCodeGenerator::EmitLiteralCompareTypeof object_string");
+ __ JumpIfSmi(x0, if_false);
+ if (!FLAG_harmony_typeof) {
+ __ JumpIfRoot(x0, Heap::kNullValueRootIndex, if_true);
+ }
+ // Check for JS objects => true.
+ Register map = x10;
+ __ JumpIfObjectType(x0, map, x11, FIRST_NONCALLABLE_SPEC_OBJECT_TYPE,
+ if_false, lt);
+ __ CompareInstanceType(map, x11, LAST_NONCALLABLE_SPEC_OBJECT_TYPE);
+ __ B(gt, if_false);
+ // Check for undetectable objects => false.
+ __ Ldrb(x10, FieldMemOperand(map, Map::kBitFieldOffset));
+
+ __ TestAndSplit(x10, 1 << Map::kIsUndetectable, if_true, if_false,
+ fall_through);
+
+ } else {
+ ASM_LOCATION("FullCodeGenerator::EmitLiteralCompareTypeof other");
+ if (if_false != fall_through) __ B(if_false);
+ }
+ context()->Plug(if_true, if_false);
+}
+
+
+void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
+ Comment cmnt(masm_, "[ CompareOperation");
+ SetSourcePosition(expr->position());
+
+ // Try to generate an optimized comparison with a literal value.
+ // TODO(jbramley): This only checks common values like NaN or undefined.
+ // Should it also handle ARM64 immediate operands?
+ if (TryLiteralCompare(expr)) {
+ return;
+ }
+
+ // Assign labels according to context()->PrepareTest.
+ Label materialize_true;
+ Label materialize_false;
+ Label* if_true = NULL;
+ Label* if_false = NULL;
+ Label* fall_through = NULL;
+ context()->PrepareTest(&materialize_true, &materialize_false,
+ &if_true, &if_false, &fall_through);
+
+ Token::Value op = expr->op();
+ VisitForStackValue(expr->left());
+ switch (op) {
+ case Token::IN:
+ VisitForStackValue(expr->right());
+ __ InvokeBuiltin(Builtins::IN, CALL_FUNCTION);
+ PrepareForBailoutBeforeSplit(expr, false, NULL, NULL);
+ __ CompareRoot(x0, Heap::kTrueValueRootIndex);
+ Split(eq, if_true, if_false, fall_through);
+ break;
+
+ case Token::INSTANCEOF: {
+ VisitForStackValue(expr->right());
+ InstanceofStub stub(isolate(), InstanceofStub::kNoFlags);
+ __ CallStub(&stub);
+ PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
+ // The stub returns 0 for true.
+ __ CompareAndSplit(x0, 0, eq, if_true, if_false, fall_through);
+ break;
+ }
+
+ default: {
+ VisitForAccumulatorValue(expr->right());
+ Condition cond = CompareIC::ComputeCondition(op);
+
+ // Pop the stack value.
+ __ Pop(x1);
+
+ JumpPatchSite patch_site(masm_);
+ if (ShouldInlineSmiCase(op)) {
+ Label slow_case;
+ patch_site.EmitJumpIfEitherNotSmi(x0, x1, &slow_case);
+ __ Cmp(x1, x0);
+ Split(cond, if_true, if_false, NULL);
+ __ Bind(&slow_case);
+ }
+
+ // Record position and call the compare IC.
+ SetSourcePosition(expr->position());
+ Handle<Code> ic = CompareIC::GetUninitialized(isolate(), op);
+ CallIC(ic, expr->CompareOperationFeedbackId());
+ patch_site.EmitPatchInfo();
+ PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
+ __ CompareAndSplit(x0, 0, cond, if_true, if_false, fall_through);
+ }
+ }
+
+ // Convert the result of the comparison into one expected for this
+ // expression's context.
+ context()->Plug(if_true, if_false);
+}
+
+
+void FullCodeGenerator::EmitLiteralCompareNil(CompareOperation* expr,
+ Expression* sub_expr,
+ NilValue nil) {
+ ASM_LOCATION("FullCodeGenerator::EmitLiteralCompareNil");
+ Label materialize_true, materialize_false;
+ Label* if_true = NULL;
+ Label* if_false = NULL;
+ Label* fall_through = NULL;
+ context()->PrepareTest(&materialize_true, &materialize_false,
+ &if_true, &if_false, &fall_through);
+
+ VisitForAccumulatorValue(sub_expr);
+ PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
+
+ if (expr->op() == Token::EQ_STRICT) {
+ Heap::RootListIndex nil_value = nil == kNullValue ?
+ Heap::kNullValueRootIndex :
+ Heap::kUndefinedValueRootIndex;
+ __ CompareRoot(x0, nil_value);
+ Split(eq, if_true, if_false, fall_through);
+ } else {
+ Handle<Code> ic = CompareNilICStub::GetUninitialized(isolate(), nil);
+ CallIC(ic, expr->CompareOperationFeedbackId());
+ __ CompareAndSplit(x0, 0, ne, if_true, if_false, fall_through);
+ }
+
+ context()->Plug(if_true, if_false);
+}
+
+
+void FullCodeGenerator::VisitThisFunction(ThisFunction* expr) {
+ __ Ldr(x0, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+ context()->Plug(x0);
+}
+
+
+void FullCodeGenerator::VisitYield(Yield* expr) {
+ Comment cmnt(masm_, "[ Yield");
+ // Evaluate yielded value first; the initial iterator definition depends on
+ // this. It stays on the stack while we update the iterator.
+ VisitForStackValue(expr->expression());
+
+ // TODO(jbramley): Tidy this up once the merge is done, using named registers
+ // and suchlike. The implementation changes a little by bleeding_edge so I
+ // don't want to spend too much time on it now.
+
+ switch (expr->yield_kind()) {
+ case Yield::SUSPEND:
+ // Pop value from top-of-stack slot; box result into result register.
+ EmitCreateIteratorResult(false);
+ __ Push(result_register());
+ // Fall through.
+ case Yield::INITIAL: {
+ Label suspend, continuation, post_runtime, resume;
+
+ __ B(&suspend);
+
+ // TODO(jbramley): This label is bound here because the following code
+ // looks at its pos(). Is it possible to do something more efficient here,
+ // perhaps using Adr?
+ __ Bind(&continuation);
+ __ B(&resume);
+
+ __ Bind(&suspend);
+ VisitForAccumulatorValue(expr->generator_object());
+ ASSERT((continuation.pos() > 0) && Smi::IsValid(continuation.pos()));
+ __ Mov(x1, Smi::FromInt(continuation.pos()));
+ __ Str(x1, FieldMemOperand(x0, JSGeneratorObject::kContinuationOffset));
+ __ Str(cp, FieldMemOperand(x0, JSGeneratorObject::kContextOffset));
+ __ Mov(x1, cp);
+ __ RecordWriteField(x0, JSGeneratorObject::kContextOffset, x1, x2,
+ kLRHasBeenSaved, kDontSaveFPRegs);
+ __ Add(x1, fp, StandardFrameConstants::kExpressionsOffset);
+ __ Cmp(__ StackPointer(), x1);
+ __ B(eq, &post_runtime);
+ __ Push(x0); // generator object
+ __ CallRuntime(Runtime::kHiddenSuspendJSGeneratorObject, 1);
+ __ Ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ __ Bind(&post_runtime);
+ __ Pop(result_register());
+ EmitReturnSequence();
+
+ __ Bind(&resume);
+ context()->Plug(result_register());
+ break;
+ }
+
+ case Yield::FINAL: {
+ VisitForAccumulatorValue(expr->generator_object());
+ __ Mov(x1, Smi::FromInt(JSGeneratorObject::kGeneratorClosed));
+ __ Str(x1, FieldMemOperand(result_register(),
+ JSGeneratorObject::kContinuationOffset));
+ // Pop value from top-of-stack slot, box result into result register.
+ EmitCreateIteratorResult(true);
+ EmitUnwindBeforeReturn();
+ EmitReturnSequence();
+ break;
+ }
+
+ case Yield::DELEGATING: {
+ VisitForStackValue(expr->generator_object());
+
+ // Initial stack layout is as follows:
+ // [sp + 1 * kPointerSize] iter
+ // [sp + 0 * kPointerSize] g
+
+ Label l_catch, l_try, l_suspend, l_continuation, l_resume;
+ Label l_next, l_call, l_loop;
+ // Initial send value is undefined.
+ __ LoadRoot(x0, Heap::kUndefinedValueRootIndex);
+ __ B(&l_next);
+
+ // catch (e) { receiver = iter; f = 'throw'; arg = e; goto l_call; }
+ __ Bind(&l_catch);
+ handler_table()->set(expr->index(), Smi::FromInt(l_catch.pos()));
+ __ LoadRoot(x2, Heap::kthrow_stringRootIndex); // "throw"
+ __ Peek(x3, 1 * kPointerSize); // iter
+ __ Push(x2, x3, x0); // "throw", iter, except
+ __ B(&l_call);
+
+ // try { received = %yield result }
+ // Shuffle the received result above a try handler and yield it without
+ // re-boxing.
+ __ Bind(&l_try);
+ __ Pop(x0); // result
+ __ PushTryHandler(StackHandler::CATCH, expr->index());
+ const int handler_size = StackHandlerConstants::kSize;
+ __ Push(x0); // result
+ __ B(&l_suspend);
+
+ // TODO(jbramley): This label is bound here because the following code
+ // looks at its pos(). Is it possible to do something more efficient here,
+ // perhaps using Adr?
+ __ Bind(&l_continuation);
+ __ B(&l_resume);
+
+ __ Bind(&l_suspend);
+ const int generator_object_depth = kPointerSize + handler_size;
+ __ Peek(x0, generator_object_depth);
+ __ Push(x0); // g
+ ASSERT((l_continuation.pos() > 0) && Smi::IsValid(l_continuation.pos()));
+ __ Mov(x1, Smi::FromInt(l_continuation.pos()));
+ __ Str(x1, FieldMemOperand(x0, JSGeneratorObject::kContinuationOffset));
+ __ Str(cp, FieldMemOperand(x0, JSGeneratorObject::kContextOffset));
+ __ Mov(x1, cp);
+ __ RecordWriteField(x0, JSGeneratorObject::kContextOffset, x1, x2,
+ kLRHasBeenSaved, kDontSaveFPRegs);
+ __ CallRuntime(Runtime::kHiddenSuspendJSGeneratorObject, 1);
+ __ Ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ __ Pop(x0); // result
+ EmitReturnSequence();
+ __ Bind(&l_resume); // received in x0
+ __ PopTryHandler();
+
+ // receiver = iter; f = 'next'; arg = received;
+ __ Bind(&l_next);
+ __ LoadRoot(x2, Heap::knext_stringRootIndex); // "next"
+ __ Peek(x3, 1 * kPointerSize); // iter
+ __ Push(x2, x3, x0); // "next", iter, received
+
+ // result = receiver[f](arg);
+ __ Bind(&l_call);
+ __ Peek(x1, 1 * kPointerSize);
+ __ Peek(x0, 2 * kPointerSize);
+ Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize();
+ CallIC(ic, TypeFeedbackId::None());
+ __ Mov(x1, x0);
+ __ Poke(x1, 2 * kPointerSize);
+ CallFunctionStub stub(isolate(), 1, CALL_AS_METHOD);
+ __ CallStub(&stub);
+
+ __ Ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ __ Drop(1); // The function is still on the stack; drop it.
+
+ // if (!result.done) goto l_try;
+ __ Bind(&l_loop);
+ __ Push(x0); // save result
+ __ LoadRoot(x2, Heap::kdone_stringRootIndex); // "done"
+ CallLoadIC(NOT_CONTEXTUAL); // result.done in x0
+ // The ToBooleanStub argument (result.done) is in x0.
+ Handle<Code> bool_ic = ToBooleanStub::GetUninitialized(isolate());
+ CallIC(bool_ic);
+ __ Cbz(x0, &l_try);
+
+ // result.value
+ __ Pop(x0); // result
+ __ LoadRoot(x2, Heap::kvalue_stringRootIndex); // "value"
+ CallLoadIC(NOT_CONTEXTUAL); // result.value in x0
+ context()->DropAndPlug(2, x0); // drop iter and g
+ break;
+ }
+ }
+}
+
+
+void FullCodeGenerator::EmitGeneratorResume(Expression *generator,
+ Expression *value,
+ JSGeneratorObject::ResumeMode resume_mode) {
+ ASM_LOCATION("FullCodeGenerator::EmitGeneratorResume");
+ Register value_reg = x0;
+ Register generator_object = x1;
+ Register the_hole = x2;
+ Register operand_stack_size = w3;
+ Register function = x4;
+
+ // The value stays in x0, and is ultimately read by the resumed generator, as
+ // if CallRuntime(Runtime::kHiddenSuspendJSGeneratorObject) returned it. Or it
+ // is read to throw the value when the resumed generator is already closed. r1
+ // will hold the generator object until the activation has been resumed.
+ VisitForStackValue(generator);
+ VisitForAccumulatorValue(value);
+ __ Pop(generator_object);
+
+ // Check generator state.
+ Label wrong_state, closed_state, done;
+ __ Ldr(x10, FieldMemOperand(generator_object,
+ JSGeneratorObject::kContinuationOffset));
+ STATIC_ASSERT(JSGeneratorObject::kGeneratorExecuting < 0);
+ STATIC_ASSERT(JSGeneratorObject::kGeneratorClosed == 0);
+ __ CompareAndBranch(x10, Smi::FromInt(0), eq, &closed_state);
+ __ CompareAndBranch(x10, Smi::FromInt(0), lt, &wrong_state);
+
+ // Load suspended function and context.
+ __ Ldr(cp, FieldMemOperand(generator_object,
+ JSGeneratorObject::kContextOffset));
+ __ Ldr(function, FieldMemOperand(generator_object,
+ JSGeneratorObject::kFunctionOffset));
+
+ // Load receiver and store as the first argument.
+ __ Ldr(x10, FieldMemOperand(generator_object,
+ JSGeneratorObject::kReceiverOffset));
+ __ Push(x10);
+
+ // Push holes for the rest of the arguments to the generator function.
+ __ Ldr(x10, FieldMemOperand(function, JSFunction::kSharedFunctionInfoOffset));
+
+ // The number of arguments is stored as an int32_t, and -1 is a marker
+ // (SharedFunctionInfo::kDontAdaptArgumentsSentinel), so we need sign
+ // extension to correctly handle it. However, in this case, we operate on
+ // 32-bit W registers, so extension isn't required.
+ __ Ldr(w10, FieldMemOperand(x10,
+ SharedFunctionInfo::kFormalParameterCountOffset));
+ __ LoadRoot(the_hole, Heap::kTheHoleValueRootIndex);
+ __ PushMultipleTimes(the_hole, w10);
+
+ // Enter a new JavaScript frame, and initialize its slots as they were when
+ // the generator was suspended.
+ Label resume_frame;
+ __ Bl(&resume_frame);
+ __ B(&done);
+
+ __ Bind(&resume_frame);
+ __ Push(lr, // Return address.
+ fp, // Caller's frame pointer.
+ cp, // Callee's context.
+ function); // Callee's JS Function.
+ __ Add(fp, __ StackPointer(), kPointerSize * 2);
+
+ // Load and untag the operand stack size.
+ __ Ldr(x10, FieldMemOperand(generator_object,
+ JSGeneratorObject::kOperandStackOffset));
+ __ Ldr(operand_stack_size,
+ UntagSmiFieldMemOperand(x10, FixedArray::kLengthOffset));
+
+ // If we are sending a value and there is no operand stack, we can jump back
+ // in directly.
+ if (resume_mode == JSGeneratorObject::NEXT) {
+ Label slow_resume;
+ __ Cbnz(operand_stack_size, &slow_resume);
+ __ Ldr(x10, FieldMemOperand(function, JSFunction::kCodeEntryOffset));
+ __ Ldrsw(x11,
+ UntagSmiFieldMemOperand(generator_object,
+ JSGeneratorObject::kContinuationOffset));
+ __ Add(x10, x10, x11);
+ __ Mov(x12, Smi::FromInt(JSGeneratorObject::kGeneratorExecuting));
+ __ Str(x12, FieldMemOperand(generator_object,
+ JSGeneratorObject::kContinuationOffset));
+ __ Br(x10);
+
+ __ Bind(&slow_resume);
+ }
+
+ // Otherwise, we push holes for the operand stack and call the runtime to fix
+ // up the stack and the handlers.
+ __ PushMultipleTimes(the_hole, operand_stack_size);
+
+ __ Mov(x10, Smi::FromInt(resume_mode));
+ __ Push(generator_object, result_register(), x10);
+ __ CallRuntime(Runtime::kHiddenResumeJSGeneratorObject, 3);
+ // Not reached: the runtime call returns elsewhere.
+ __ Unreachable();
+
+ // Reach here when generator is closed.
+ __ Bind(&closed_state);
+ if (resume_mode == JSGeneratorObject::NEXT) {
+ // Return completed iterator result when generator is closed.
+ __ LoadRoot(x10, Heap::kUndefinedValueRootIndex);
+ __ Push(x10);
+ // Pop value from top-of-stack slot; box result into result register.
+ EmitCreateIteratorResult(true);
+ } else {
+ // Throw the provided value.
+ __ Push(value_reg);
+ __ CallRuntime(Runtime::kHiddenThrow, 1);
+ }
+ __ B(&done);
+
+ // Throw error if we attempt to operate on a running generator.
+ __ Bind(&wrong_state);
+ __ Push(generator_object);
+ __ CallRuntime(Runtime::kHiddenThrowGeneratorStateError, 1);
+
+ __ Bind(&done);
+ context()->Plug(result_register());
+}
+
+
+void FullCodeGenerator::EmitCreateIteratorResult(bool done) {
+ Label gc_required;
+ Label allocated;
+
+ Handle<Map> map(isolate()->native_context()->iterator_result_map());
+
+ // Allocate and populate an object with this form: { value: VAL, done: DONE }
+
+ Register result = x0;
+ __ Allocate(map->instance_size(), result, x10, x11, &gc_required, TAG_OBJECT);
+ __ B(&allocated);
+
+ __ Bind(&gc_required);
+ __ Push(Smi::FromInt(map->instance_size()));
+ __ CallRuntime(Runtime::kHiddenAllocateInNewSpace, 1);
+ __ Ldr(context_register(),
+ MemOperand(fp, StandardFrameConstants::kContextOffset));
+
+ __ Bind(&allocated);
+ Register map_reg = x1;
+ Register result_value = x2;
+ Register boolean_done = x3;
+ Register empty_fixed_array = x4;
+ Register untagged_result = x5;
+ __ Mov(map_reg, Operand(map));
+ __ Pop(result_value);
+ __ Mov(boolean_done, Operand(isolate()->factory()->ToBoolean(done)));
+ __ Mov(empty_fixed_array, Operand(isolate()->factory()->empty_fixed_array()));
+ ASSERT_EQ(map->instance_size(), 5 * kPointerSize);
+ STATIC_ASSERT(JSObject::kPropertiesOffset + kPointerSize ==
+ JSObject::kElementsOffset);
+ STATIC_ASSERT(JSGeneratorObject::kResultValuePropertyOffset + kPointerSize ==
+ JSGeneratorObject::kResultDonePropertyOffset);
+ __ ObjectUntag(untagged_result, result);
+ __ Str(map_reg, MemOperand(untagged_result, HeapObject::kMapOffset));
+ __ Stp(empty_fixed_array, empty_fixed_array,
+ MemOperand(untagged_result, JSObject::kPropertiesOffset));
+ __ Stp(result_value, boolean_done,
+ MemOperand(untagged_result,
+ JSGeneratorObject::kResultValuePropertyOffset));
+
+ // Only the value field needs a write barrier, as the other values are in the
+ // root set.
+ __ RecordWriteField(result, JSGeneratorObject::kResultValuePropertyOffset,
+ x10, x11, kLRHasBeenSaved, kDontSaveFPRegs);
+}
+
+
+// TODO(all): I don't like this method.
+// It seems to me that in too many places x0 is used in place of this.
+// Also, this function is not suitable for all places where x0 should be
+// abstracted (eg. when used as an argument). But some places assume that the
+// first argument register is x0, and use this function instead.
+// Considering that most of the register allocation is hard-coded in the
+// FullCodeGen, that it is unlikely we will need to change it extensively, and
+// that abstracting the allocation through functions would not yield any
+// performance benefit, I think the existence of this function is debatable.
+Register FullCodeGenerator::result_register() {
+ return x0;
+}
+
+
+Register FullCodeGenerator::context_register() {
+ return cp;
+}
+
+
+void FullCodeGenerator::StoreToFrameField(int frame_offset, Register value) {
+ ASSERT(POINTER_SIZE_ALIGN(frame_offset) == frame_offset);
+ __ Str(value, MemOperand(fp, frame_offset));
+}
+
+
+void FullCodeGenerator::LoadContextField(Register dst, int context_index) {
+ __ Ldr(dst, ContextMemOperand(cp, context_index));
+}
+
+
+void FullCodeGenerator::PushFunctionArgumentForContextAllocation() {
+ Scope* declaration_scope = scope()->DeclarationScope();
+ if (declaration_scope->is_global_scope() ||
+ declaration_scope->is_module_scope()) {
+ // Contexts nested in the native context have a canonical empty function
+ // as their closure, not the anonymous closure containing the global
+ // code. Pass a smi sentinel and let the runtime look up the empty
+ // function.
+ ASSERT(kSmiTag == 0);
+ __ Push(xzr);
+ } else if (declaration_scope->is_eval_scope()) {
+ // Contexts created by a call to eval have the same closure as the
+ // context calling eval, not the anonymous closure containing the eval
+ // code. Fetch it from the context.
+ __ Ldr(x10, ContextMemOperand(cp, Context::CLOSURE_INDEX));
+ __ Push(x10);
+ } else {
+ ASSERT(declaration_scope->is_function_scope());
+ __ Ldr(x10, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+ __ Push(x10);
+ }
+}
+
+
+void FullCodeGenerator::EnterFinallyBlock() {
+ ASM_LOCATION("FullCodeGenerator::EnterFinallyBlock");
+ ASSERT(!result_register().is(x10));
+ // Preserve the result register while executing finally block.
+ // Also cook the return address in lr to the stack (smi encoded Code* delta).
+ __ Sub(x10, lr, Operand(masm_->CodeObject()));
+ __ SmiTag(x10);
+ __ Push(result_register(), x10);
+
+ // Store pending message while executing finally block.
+ ExternalReference pending_message_obj =
+ ExternalReference::address_of_pending_message_obj(isolate());
+ __ Mov(x10, pending_message_obj);
+ __ Ldr(x10, MemOperand(x10));
+
+ ExternalReference has_pending_message =
+ ExternalReference::address_of_has_pending_message(isolate());
+ STATIC_ASSERT(sizeof(bool) == 1); // NOLINT(runtime/sizeof)
+ __ Mov(x11, has_pending_message);
+ __ Ldrb(x11, MemOperand(x11));
+ __ SmiTag(x11);
+
+ __ Push(x10, x11);
+
+ ExternalReference pending_message_script =
+ ExternalReference::address_of_pending_message_script(isolate());
+ __ Mov(x10, pending_message_script);
+ __ Ldr(x10, MemOperand(x10));
+ __ Push(x10);
+}
+
+
+void FullCodeGenerator::ExitFinallyBlock() {
+ ASM_LOCATION("FullCodeGenerator::ExitFinallyBlock");
+ ASSERT(!result_register().is(x10));
+
+ // Restore pending message from stack.
+ __ Pop(x10, x11, x12);
+ ExternalReference pending_message_script =
+ ExternalReference::address_of_pending_message_script(isolate());
+ __ Mov(x13, pending_message_script);
+ __ Str(x10, MemOperand(x13));
+
+ __ SmiUntag(x11);
+ ExternalReference has_pending_message =
+ ExternalReference::address_of_has_pending_message(isolate());
+ __ Mov(x13, has_pending_message);
+ STATIC_ASSERT(sizeof(bool) == 1); // NOLINT(runtime/sizeof)
+ __ Strb(x11, MemOperand(x13));
+
+ ExternalReference pending_message_obj =
+ ExternalReference::address_of_pending_message_obj(isolate());
+ __ Mov(x13, pending_message_obj);
+ __ Str(x12, MemOperand(x13));
+
+ // Restore result register and cooked return address from the stack.
+ __ Pop(x10, result_register());
+
+ // Uncook the return address (see EnterFinallyBlock).
+ __ SmiUntag(x10);
+ __ Add(x11, x10, Operand(masm_->CodeObject()));
+ __ Br(x11);
+}
+
+
+#undef __
+
+
+void BackEdgeTable::PatchAt(Code* unoptimized_code,
+ Address pc,
+ BackEdgeState target_state,
+ Code* replacement_code) {
+ // Turn the jump into a nop.
+ Address branch_address = pc - 3 * kInstructionSize;
+ PatchingAssembler patcher(branch_address, 1);
+
+ ASSERT(Instruction::Cast(branch_address)
+ ->IsNop(Assembler::INTERRUPT_CODE_NOP) ||
+ (Instruction::Cast(branch_address)->IsCondBranchImm() &&
+ Instruction::Cast(branch_address)->ImmPCOffset() ==
+ 6 * kInstructionSize));
+
+ switch (target_state) {
+ case INTERRUPT:
+ // <decrement profiling counter>
+ // .. .. .. .. b.pl ok
+ // .. .. .. .. ldr x16, pc+<interrupt stub address>
+ // .. .. .. .. blr x16
+ // ... more instructions.
+ // ok-label
+ // Jump offset is 6 instructions.
+ patcher.b(6, pl);
+ break;
+ case ON_STACK_REPLACEMENT:
+ case OSR_AFTER_STACK_CHECK:
+ // <decrement profiling counter>
+ // .. .. .. .. mov x0, x0 (NOP)
+ // .. .. .. .. ldr x16, pc+<on-stack replacement address>
+ // .. .. .. .. blr x16
+ patcher.nop(Assembler::INTERRUPT_CODE_NOP);
+ break;
+ }
+
+ // Replace the call address.
+ Instruction* load = Instruction::Cast(pc)->preceding(2);
+ Address interrupt_address_pointer =
+ reinterpret_cast<Address>(load) + load->ImmPCOffset();
+ ASSERT((Memory::uint64_at(interrupt_address_pointer) ==
+ reinterpret_cast<uint64_t>(unoptimized_code->GetIsolate()
+ ->builtins()
+ ->OnStackReplacement()
+ ->entry())) ||
+ (Memory::uint64_at(interrupt_address_pointer) ==
+ reinterpret_cast<uint64_t>(unoptimized_code->GetIsolate()
+ ->builtins()
+ ->InterruptCheck()
+ ->entry())) ||
+ (Memory::uint64_at(interrupt_address_pointer) ==
+ reinterpret_cast<uint64_t>(unoptimized_code->GetIsolate()
+ ->builtins()
+ ->OsrAfterStackCheck()
+ ->entry())) ||
+ (Memory::uint64_at(interrupt_address_pointer) ==
+ reinterpret_cast<uint64_t>(unoptimized_code->GetIsolate()
+ ->builtins()
+ ->OnStackReplacement()
+ ->entry())));
+ Memory::uint64_at(interrupt_address_pointer) =
+ reinterpret_cast<uint64_t>(replacement_code->entry());
+
+ unoptimized_code->GetHeap()->incremental_marking()->RecordCodeTargetPatch(
+ unoptimized_code, reinterpret_cast<Address>(load), replacement_code);
+}
+
+
+BackEdgeTable::BackEdgeState BackEdgeTable::GetBackEdgeState(
+ Isolate* isolate,
+ Code* unoptimized_code,
+ Address pc) {
+ // TODO(jbramley): There should be some extra assertions here (as in the ARM
+ // back-end), but this function is gone in bleeding_edge so it might not
+ // matter anyway.
+ Instruction* jump_or_nop = Instruction::Cast(pc)->preceding(3);
+
+ if (jump_or_nop->IsNop(Assembler::INTERRUPT_CODE_NOP)) {
+ Instruction* load = Instruction::Cast(pc)->preceding(2);
+ uint64_t entry = Memory::uint64_at(reinterpret_cast<Address>(load) +
+ load->ImmPCOffset());
+ if (entry == reinterpret_cast<uint64_t>(
+ isolate->builtins()->OnStackReplacement()->entry())) {
+ return ON_STACK_REPLACEMENT;
+ } else if (entry == reinterpret_cast<uint64_t>(
+ isolate->builtins()->OsrAfterStackCheck()->entry())) {
+ return OSR_AFTER_STACK_CHECK;
+ } else {
+ UNREACHABLE();
+ }
+ }
+
+ return INTERRUPT;
+}
+
+
+#define __ ACCESS_MASM(masm())
+
+
+FullCodeGenerator::NestedStatement* FullCodeGenerator::TryFinally::Exit(
+ int* stack_depth,
+ int* context_length) {
+ ASM_LOCATION("FullCodeGenerator::TryFinally::Exit");
+ // The macros used here must preserve the result register.
+
+ // Because the handler block contains the context of the finally
+ // code, we can restore it directly from there for the finally code
+ // rather than iteratively unwinding contexts via their previous
+ // links.
+ __ Drop(*stack_depth); // Down to the handler block.
+ if (*context_length > 0) {
+ // Restore the context to its dedicated register and the stack.
+ __ Peek(cp, StackHandlerConstants::kContextOffset);
+ __ Str(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ }
+ __ PopTryHandler();
+ __ Bl(finally_entry_);
+
+ *stack_depth = 0;
+ *context_length = 0;
+ return previous_;
+}
+
+
+#undef __
+
+
+} } // namespace v8::internal
+
+#endif // V8_TARGET_ARCH_ARM64
diff --git a/chromium/v8/src/arm64/ic-arm64.cc b/chromium/v8/src/arm64/ic-arm64.cc
new file mode 100644
index 00000000000..842b3e75dcb
--- /dev/null
+++ b/chromium/v8/src/arm64/ic-arm64.cc
@@ -0,0 +1,1387 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#if V8_TARGET_ARCH_ARM64
+
+#include "src/arm64/assembler-arm64.h"
+#include "src/code-stubs.h"
+#include "src/codegen.h"
+#include "src/disasm.h"
+#include "src/ic-inl.h"
+#include "src/runtime.h"
+#include "src/stub-cache.h"
+
+namespace v8 {
+namespace internal {
+
+
+#define __ ACCESS_MASM(masm)
+
+
+// "type" holds an instance type on entry and is not clobbered.
+// Generated code branch on "global_object" if type is any kind of global
+// JS object.
+static void GenerateGlobalInstanceTypeCheck(MacroAssembler* masm,
+ Register type,
+ Label* global_object) {
+ __ Cmp(type, JS_GLOBAL_OBJECT_TYPE);
+ __ Ccmp(type, JS_BUILTINS_OBJECT_TYPE, ZFlag, ne);
+ __ Ccmp(type, JS_GLOBAL_PROXY_TYPE, ZFlag, ne);
+ __ B(eq, global_object);
+}
+
+
+// Generated code falls through if the receiver is a regular non-global
+// JS object with slow properties and no interceptors.
+//
+// "receiver" holds the receiver on entry and is unchanged.
+// "elements" holds the property dictionary on fall through.
+static void GenerateNameDictionaryReceiverCheck(MacroAssembler* masm,
+ Register receiver,
+ Register elements,
+ Register scratch0,
+ Register scratch1,
+ Label* miss) {
+ ASSERT(!AreAliased(receiver, elements, scratch0, scratch1));
+
+ // Check that the receiver isn't a smi.
+ __ JumpIfSmi(receiver, miss);
+
+ // Check that the receiver is a valid JS object.
+ // Let t be the object instance type, we want:
+ // FIRST_SPEC_OBJECT_TYPE <= t <= LAST_SPEC_OBJECT_TYPE.
+ // Since LAST_SPEC_OBJECT_TYPE is the last possible instance type we only
+ // check the lower bound.
+ STATIC_ASSERT(LAST_TYPE == LAST_SPEC_OBJECT_TYPE);
+
+ __ JumpIfObjectType(receiver, scratch0, scratch1, FIRST_SPEC_OBJECT_TYPE,
+ miss, lt);
+
+ // scratch0 now contains the map of the receiver and scratch1 the object type.
+ Register map = scratch0;
+ Register type = scratch1;
+
+ // Check if the receiver is a global JS object.
+ GenerateGlobalInstanceTypeCheck(masm, type, miss);
+
+ // Check that the object does not require access checks.
+ __ Ldrb(scratch1, FieldMemOperand(map, Map::kBitFieldOffset));
+ __ Tbnz(scratch1, Map::kIsAccessCheckNeeded, miss);
+ __ Tbnz(scratch1, Map::kHasNamedInterceptor, miss);
+
+ // Check that the properties dictionary is valid.
+ __ Ldr(elements, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
+ __ Ldr(scratch1, FieldMemOperand(elements, HeapObject::kMapOffset));
+ __ JumpIfNotRoot(scratch1, Heap::kHashTableMapRootIndex, miss);
+}
+
+
+// Helper function used from LoadIC GenerateNormal.
+//
+// elements: Property dictionary. It is not clobbered if a jump to the miss
+// label is done.
+// name: Property name. It is not clobbered if a jump to the miss label is
+// done
+// result: Register for the result. It is only updated if a jump to the miss
+// label is not done.
+// The scratch registers need to be different from elements, name and result.
+// The generated code assumes that the receiver has slow properties,
+// is not a global object and does not have interceptors.
+static void GenerateDictionaryLoad(MacroAssembler* masm,
+ Label* miss,
+ Register elements,
+ Register name,
+ Register result,
+ Register scratch1,
+ Register scratch2) {
+ ASSERT(!AreAliased(elements, name, scratch1, scratch2));
+ ASSERT(!AreAliased(result, scratch1, scratch2));
+
+ Label done;
+
+ // Probe the dictionary.
+ NameDictionaryLookupStub::GeneratePositiveLookup(masm,
+ miss,
+ &done,
+ elements,
+ name,
+ scratch1,
+ scratch2);
+
+ // If probing finds an entry check that the value is a normal property.
+ __ Bind(&done);
+
+ static const int kElementsStartOffset = NameDictionary::kHeaderSize +
+ NameDictionary::kElementsStartIndex * kPointerSize;
+ static const int kDetailsOffset = kElementsStartOffset + 2 * kPointerSize;
+ __ Ldr(scratch1, FieldMemOperand(scratch2, kDetailsOffset));
+ __ Tst(scratch1, Smi::FromInt(PropertyDetails::TypeField::kMask));
+ __ B(ne, miss);
+
+ // Get the value at the masked, scaled index and return.
+ __ Ldr(result,
+ FieldMemOperand(scratch2, kElementsStartOffset + 1 * kPointerSize));
+}
+
+
+// Helper function used from StoreIC::GenerateNormal.
+//
+// elements: Property dictionary. It is not clobbered if a jump to the miss
+// label is done.
+// name: Property name. It is not clobbered if a jump to the miss label is
+// done
+// value: The value to store (never clobbered).
+//
+// The generated code assumes that the receiver has slow properties,
+// is not a global object and does not have interceptors.
+static void GenerateDictionaryStore(MacroAssembler* masm,
+ Label* miss,
+ Register elements,
+ Register name,
+ Register value,
+ Register scratch1,
+ Register scratch2) {
+ ASSERT(!AreAliased(elements, name, value, scratch1, scratch2));
+
+ Label done;
+
+ // Probe the dictionary.
+ NameDictionaryLookupStub::GeneratePositiveLookup(masm,
+ miss,
+ &done,
+ elements,
+ name,
+ scratch1,
+ scratch2);
+
+ // If probing finds an entry in the dictionary check that the value
+ // is a normal property that is not read only.
+ __ Bind(&done);
+
+ static const int kElementsStartOffset = NameDictionary::kHeaderSize +
+ NameDictionary::kElementsStartIndex * kPointerSize;
+ static const int kDetailsOffset = kElementsStartOffset + 2 * kPointerSize;
+ static const int kTypeAndReadOnlyMask =
+ PropertyDetails::TypeField::kMask |
+ PropertyDetails::AttributesField::encode(READ_ONLY);
+ __ Ldrsw(scratch1, UntagSmiFieldMemOperand(scratch2, kDetailsOffset));
+ __ Tst(scratch1, kTypeAndReadOnlyMask);
+ __ B(ne, miss);
+
+ // Store the value at the masked, scaled index and return.
+ static const int kValueOffset = kElementsStartOffset + kPointerSize;
+ __ Add(scratch2, scratch2, kValueOffset - kHeapObjectTag);
+ __ Str(value, MemOperand(scratch2));
+
+ // Update the write barrier. Make sure not to clobber the value.
+ __ Mov(scratch1, value);
+ __ RecordWrite(
+ elements, scratch2, scratch1, kLRHasNotBeenSaved, kDontSaveFPRegs);
+}
+
+
+// Checks the receiver for special cases (value type, slow case bits).
+// Falls through for regular JS object and return the map of the
+// receiver in 'map_scratch' if the receiver is not a SMI.
+static void GenerateKeyedLoadReceiverCheck(MacroAssembler* masm,
+ Register receiver,
+ Register map_scratch,
+ Register scratch,
+ int interceptor_bit,
+ Label* slow) {
+ ASSERT(!AreAliased(map_scratch, scratch));
+
+ // Check that the object isn't a smi.
+ __ JumpIfSmi(receiver, slow);
+ // Get the map of the receiver.
+ __ Ldr(map_scratch, FieldMemOperand(receiver, HeapObject::kMapOffset));
+ // Check bit field.
+ __ Ldrb(scratch, FieldMemOperand(map_scratch, Map::kBitFieldOffset));
+ __ Tbnz(scratch, Map::kIsAccessCheckNeeded, slow);
+ __ Tbnz(scratch, interceptor_bit, slow);
+
+ // Check that the object is some kind of JS object EXCEPT JS Value type.
+ // In the case that the object is a value-wrapper object, we enter the
+ // runtime system to make sure that indexing into string objects work
+ // as intended.
+ STATIC_ASSERT(JS_OBJECT_TYPE > JS_VALUE_TYPE);
+ __ Ldrb(scratch, FieldMemOperand(map_scratch, Map::kInstanceTypeOffset));
+ __ Cmp(scratch, JS_OBJECT_TYPE);
+ __ B(lt, slow);
+}
+
+
+// Loads an indexed element from a fast case array.
+// If not_fast_array is NULL, doesn't perform the elements map check.
+//
+// receiver - holds the receiver on entry.
+// Unchanged unless 'result' is the same register.
+//
+// key - holds the smi key on entry.
+// Unchanged unless 'result' is the same register.
+//
+// elements - holds the elements of the receiver on exit.
+//
+// elements_map - holds the elements map on exit if the not_fast_array branch is
+// taken. Otherwise, this is used as a scratch register.
+//
+// result - holds the result on exit if the load succeeded.
+// Allowed to be the the same as 'receiver' or 'key'.
+// Unchanged on bailout so 'receiver' and 'key' can be safely
+// used by further computation.
+static void GenerateFastArrayLoad(MacroAssembler* masm,
+ Register receiver,
+ Register key,
+ Register elements,
+ Register elements_map,
+ Register scratch2,
+ Register result,
+ Label* not_fast_array,
+ Label* slow) {
+ ASSERT(!AreAliased(receiver, key, elements, elements_map, scratch2));
+
+ // Check for fast array.
+ __ Ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
+ if (not_fast_array != NULL) {
+ // Check that the object is in fast mode and writable.
+ __ Ldr(elements_map, FieldMemOperand(elements, HeapObject::kMapOffset));
+ __ JumpIfNotRoot(elements_map, Heap::kFixedArrayMapRootIndex,
+ not_fast_array);
+ } else {
+ __ AssertFastElements(elements);
+ }
+
+ // The elements_map register is only used for the not_fast_array path, which
+ // was handled above. From this point onward it is a scratch register.
+ Register scratch1 = elements_map;
+
+ // Check that the key (index) is within bounds.
+ __ Ldr(scratch1, FieldMemOperand(elements, FixedArray::kLengthOffset));
+ __ Cmp(key, scratch1);
+ __ B(hs, slow);
+
+ // Fast case: Do the load.
+ __ Add(scratch1, elements, FixedArray::kHeaderSize - kHeapObjectTag);
+ __ SmiUntag(scratch2, key);
+ __ Ldr(scratch2, MemOperand(scratch1, scratch2, LSL, kPointerSizeLog2));
+
+ // In case the loaded value is the_hole we have to consult GetProperty
+ // to ensure the prototype chain is searched.
+ __ JumpIfRoot(scratch2, Heap::kTheHoleValueRootIndex, slow);
+
+ // Move the value to the result register.
+ // 'result' can alias with 'receiver' or 'key' but these two must be
+ // preserved if we jump to 'slow'.
+ __ Mov(result, scratch2);
+}
+
+
+// Checks whether a key is an array index string or a unique name.
+// Falls through if a key is a unique name.
+// The map of the key is returned in 'map_scratch'.
+// If the jump to 'index_string' is done the hash of the key is left
+// in 'hash_scratch'.
+static void GenerateKeyNameCheck(MacroAssembler* masm,
+ Register key,
+ Register map_scratch,
+ Register hash_scratch,
+ Label* index_string,
+ Label* not_unique) {
+ ASSERT(!AreAliased(key, map_scratch, hash_scratch));
+
+ // Is the key a name?
+ Label unique;
+ __ JumpIfObjectType(key, map_scratch, hash_scratch, LAST_UNIQUE_NAME_TYPE,
+ not_unique, hi);
+ STATIC_ASSERT(LAST_UNIQUE_NAME_TYPE == FIRST_NONSTRING_TYPE);
+ __ B(eq, &unique);
+
+ // Is the string an array index with cached numeric value?
+ __ Ldr(hash_scratch.W(), FieldMemOperand(key, Name::kHashFieldOffset));
+ __ TestAndBranchIfAllClear(hash_scratch,
+ Name::kContainsCachedArrayIndexMask,
+ index_string);
+
+ // Is the string internalized? We know it's a string, so a single bit test is
+ // enough.
+ __ Ldrb(hash_scratch, FieldMemOperand(map_scratch, Map::kInstanceTypeOffset));
+ STATIC_ASSERT(kInternalizedTag == 0);
+ __ TestAndBranchIfAnySet(hash_scratch, kIsNotInternalizedMask, not_unique);
+
+ __ Bind(&unique);
+ // Fall through if the key is a unique name.
+}
+
+
+// Neither 'object' nor 'key' are modified by this function.
+//
+// If the 'unmapped_case' or 'slow_case' exit is taken, the 'map' register is
+// left with the object's elements map. Otherwise, it is used as a scratch
+// register.
+static MemOperand GenerateMappedArgumentsLookup(MacroAssembler* masm,
+ Register object,
+ Register key,
+ Register map,
+ Register scratch1,
+ Register scratch2,
+ Label* unmapped_case,
+ Label* slow_case) {
+ ASSERT(!AreAliased(object, key, map, scratch1, scratch2));
+
+ Heap* heap = masm->isolate()->heap();
+
+ // Check that the receiver is a JSObject. Because of the elements
+ // map check later, we do not need to check for interceptors or
+ // whether it requires access checks.
+ __ JumpIfSmi(object, slow_case);
+ // Check that the object is some kind of JSObject.
+ __ JumpIfObjectType(object, map, scratch1, FIRST_JS_RECEIVER_TYPE,
+ slow_case, lt);
+
+ // Check that the key is a positive smi.
+ __ JumpIfNotSmi(key, slow_case);
+ __ Tbnz(key, kXSignBit, slow_case);
+
+ // Load the elements object and check its map.
+ Handle<Map> arguments_map(heap->sloppy_arguments_elements_map());
+ __ Ldr(map, FieldMemOperand(object, JSObject::kElementsOffset));
+ __ CheckMap(map, scratch1, arguments_map, slow_case, DONT_DO_SMI_CHECK);
+
+ // Check if element is in the range of mapped arguments. If not, jump
+ // to the unmapped lookup.
+ __ Ldr(scratch1, FieldMemOperand(map, FixedArray::kLengthOffset));
+ __ Sub(scratch1, scratch1, Smi::FromInt(2));
+ __ Cmp(key, scratch1);
+ __ B(hs, unmapped_case);
+
+ // Load element index and check whether it is the hole.
+ static const int offset =
+ FixedArray::kHeaderSize + 2 * kPointerSize - kHeapObjectTag;
+
+ __ Add(scratch1, map, offset);
+ __ SmiUntag(scratch2, key);
+ __ Ldr(scratch1, MemOperand(scratch1, scratch2, LSL, kPointerSizeLog2));
+ __ JumpIfRoot(scratch1, Heap::kTheHoleValueRootIndex, unmapped_case);
+
+ // Load value from context and return it.
+ __ Ldr(scratch2, FieldMemOperand(map, FixedArray::kHeaderSize));
+ __ SmiUntag(scratch1);
+ __ Lsl(scratch1, scratch1, kPointerSizeLog2);
+ __ Add(scratch1, scratch1, Context::kHeaderSize - kHeapObjectTag);
+ // The base of the result (scratch2) is passed to RecordWrite in
+ // KeyedStoreIC::GenerateSloppyArguments and it must be a HeapObject.
+ return MemOperand(scratch2, scratch1);
+}
+
+
+// The 'parameter_map' register must be loaded with the parameter map of the
+// arguments object and is overwritten.
+static MemOperand GenerateUnmappedArgumentsLookup(MacroAssembler* masm,
+ Register key,
+ Register parameter_map,
+ Register scratch,
+ Label* slow_case) {
+ ASSERT(!AreAliased(key, parameter_map, scratch));
+
+ // Element is in arguments backing store, which is referenced by the
+ // second element of the parameter_map.
+ const int kBackingStoreOffset = FixedArray::kHeaderSize + kPointerSize;
+ Register backing_store = parameter_map;
+ __ Ldr(backing_store, FieldMemOperand(parameter_map, kBackingStoreOffset));
+ Handle<Map> fixed_array_map(masm->isolate()->heap()->fixed_array_map());
+ __ CheckMap(
+ backing_store, scratch, fixed_array_map, slow_case, DONT_DO_SMI_CHECK);
+ __ Ldr(scratch, FieldMemOperand(backing_store, FixedArray::kLengthOffset));
+ __ Cmp(key, scratch);
+ __ B(hs, slow_case);
+
+ __ Add(backing_store,
+ backing_store,
+ FixedArray::kHeaderSize - kHeapObjectTag);
+ __ SmiUntag(scratch, key);
+ return MemOperand(backing_store, scratch, LSL, kPointerSizeLog2);
+}
+
+
+void LoadIC::GenerateMegamorphic(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- x2 : name
+ // -- lr : return address
+ // -- x0 : receiver
+ // -----------------------------------
+
+ // Probe the stub cache.
+ Code::Flags flags = Code::ComputeHandlerFlags(Code::LOAD_IC);
+ masm->isolate()->stub_cache()->GenerateProbe(
+ masm, flags, x0, x2, x3, x4, x5, x6);
+
+ // Cache miss: Jump to runtime.
+ GenerateMiss(masm);
+}
+
+
+void LoadIC::GenerateNormal(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- x2 : name
+ // -- lr : return address
+ // -- x0 : receiver
+ // -----------------------------------
+ Label miss, slow;
+
+ GenerateNameDictionaryReceiverCheck(masm, x0, x1, x3, x4, &miss);
+
+ // x1 now holds the property dictionary.
+ GenerateDictionaryLoad(masm, &slow, x1, x2, x0, x3, x4);
+ __ Ret();
+
+ // Dictionary load failed, go slow (but don't miss).
+ __ Bind(&slow);
+ GenerateRuntimeGetProperty(masm);
+
+ // Cache miss: Jump to runtime.
+ __ Bind(&miss);
+ GenerateMiss(masm);
+}
+
+
+void LoadIC::GenerateMiss(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- x2 : name
+ // -- lr : return address
+ // -- x0 : receiver
+ // -----------------------------------
+ Isolate* isolate = masm->isolate();
+ ASM_LOCATION("LoadIC::GenerateMiss");
+
+ __ IncrementCounter(isolate->counters()->load_miss(), 1, x3, x4);
+
+ // Perform tail call to the entry.
+ __ Push(x0, x2);
+ ExternalReference ref =
+ ExternalReference(IC_Utility(kLoadIC_Miss), isolate);
+ __ TailCallExternalReference(ref, 2, 1);
+}
+
+
+void LoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
+ // ---------- S t a t e --------------
+ // -- x2 : name
+ // -- lr : return address
+ // -- x0 : receiver
+ // -----------------------------------
+
+ __ Push(x0, x2);
+ __ TailCallRuntime(Runtime::kGetProperty, 2, 1);
+}
+
+
+void KeyedLoadIC::GenerateSloppyArguments(MacroAssembler* masm) {
+ // ---------- S t a t e --------------
+ // -- lr : return address
+ // -- x0 : key
+ // -- x1 : receiver
+ // -----------------------------------
+ Register result = x0;
+ Register key = x0;
+ Register receiver = x1;
+ Label miss, unmapped;
+
+ Register map_scratch = x2;
+ MemOperand mapped_location = GenerateMappedArgumentsLookup(
+ masm, receiver, key, map_scratch, x3, x4, &unmapped, &miss);
+ __ Ldr(result, mapped_location);
+ __ Ret();
+
+ __ Bind(&unmapped);
+ // Parameter map is left in map_scratch when a jump on unmapped is done.
+ MemOperand unmapped_location =
+ GenerateUnmappedArgumentsLookup(masm, key, map_scratch, x3, &miss);
+ __ Ldr(x2, unmapped_location);
+ __ JumpIfRoot(x2, Heap::kTheHoleValueRootIndex, &miss);
+ // Move the result in x0. x0 must be preserved on miss.
+ __ Mov(result, x2);
+ __ Ret();
+
+ __ Bind(&miss);
+ GenerateMiss(masm);
+}
+
+
+void KeyedStoreIC::GenerateSloppyArguments(MacroAssembler* masm) {
+ ASM_LOCATION("KeyedStoreIC::GenerateSloppyArguments");
+ // ---------- S t a t e --------------
+ // -- lr : return address
+ // -- x0 : value
+ // -- x1 : key
+ // -- x2 : receiver
+ // -----------------------------------
+
+ Label slow, notin;
+
+ Register value = x0;
+ Register key = x1;
+ Register receiver = x2;
+ Register map = x3;
+
+ // These registers are used by GenerateMappedArgumentsLookup to build a
+ // MemOperand. They are live for as long as the MemOperand is live.
+ Register mapped1 = x4;
+ Register mapped2 = x5;
+
+ MemOperand mapped =
+ GenerateMappedArgumentsLookup(masm, receiver, key, map,
+ mapped1, mapped2,
+ &notin, &slow);
+ Operand mapped_offset = mapped.OffsetAsOperand();
+ __ Str(value, mapped);
+ __ Add(x10, mapped.base(), mapped_offset);
+ __ Mov(x11, value);
+ __ RecordWrite(mapped.base(), x10, x11, kLRHasNotBeenSaved, kDontSaveFPRegs);
+ __ Ret();
+
+ __ Bind(&notin);
+
+ // These registers are used by GenerateMappedArgumentsLookup to build a
+ // MemOperand. They are live for as long as the MemOperand is live.
+ Register unmapped1 = map; // This is assumed to alias 'map'.
+ Register unmapped2 = x4;
+ MemOperand unmapped =
+ GenerateUnmappedArgumentsLookup(masm, key, unmapped1, unmapped2, &slow);
+ Operand unmapped_offset = unmapped.OffsetAsOperand();
+ __ Str(value, unmapped);
+ __ Add(x10, unmapped.base(), unmapped_offset);
+ __ Mov(x11, value);
+ __ RecordWrite(unmapped.base(), x10, x11,
+ kLRHasNotBeenSaved, kDontSaveFPRegs);
+ __ Ret();
+ __ Bind(&slow);
+ GenerateMiss(masm);
+}
+
+
+void KeyedLoadIC::GenerateMiss(MacroAssembler* masm) {
+ // ---------- S t a t e --------------
+ // -- lr : return address
+ // -- x0 : key
+ // -- x1 : receiver
+ // -----------------------------------
+ Isolate* isolate = masm->isolate();
+
+ __ IncrementCounter(isolate->counters()->keyed_load_miss(), 1, x10, x11);
+
+ __ Push(x1, x0);
+
+ // Perform tail call to the entry.
+ ExternalReference ref =
+ ExternalReference(IC_Utility(kKeyedLoadIC_Miss), isolate);
+
+ __ TailCallExternalReference(ref, 2, 1);
+}
+
+
+void KeyedLoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
+ // ---------- S t a t e --------------
+ // -- lr : return address
+ // -- x0 : key
+ // -- x1 : receiver
+ // -----------------------------------
+ Register key = x0;
+ Register receiver = x1;
+
+ __ Push(receiver, key);
+ __ TailCallRuntime(Runtime::kKeyedGetProperty, 2, 1);
+}
+
+
+static void GenerateKeyedLoadWithSmiKey(MacroAssembler* masm,
+ Register key,
+ Register receiver,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ Register scratch4,
+ Register scratch5,
+ Label *slow) {
+ ASSERT(!AreAliased(
+ key, receiver, scratch1, scratch2, scratch3, scratch4, scratch5));
+
+ Isolate* isolate = masm->isolate();
+ Label check_number_dictionary;
+ // If we can load the value, it should be returned in x0.
+ Register result = x0;
+
+ GenerateKeyedLoadReceiverCheck(
+ masm, receiver, scratch1, scratch2, Map::kHasIndexedInterceptor, slow);
+
+ // Check the receiver's map to see if it has fast elements.
+ __ CheckFastElements(scratch1, scratch2, &check_number_dictionary);
+
+ GenerateFastArrayLoad(
+ masm, receiver, key, scratch3, scratch2, scratch1, result, NULL, slow);
+ __ IncrementCounter(
+ isolate->counters()->keyed_load_generic_smi(), 1, scratch1, scratch2);
+ __ Ret();
+
+ __ Bind(&check_number_dictionary);
+ __ Ldr(scratch3, FieldMemOperand(receiver, JSObject::kElementsOffset));
+ __ Ldr(scratch2, FieldMemOperand(scratch3, JSObject::kMapOffset));
+
+ // Check whether we have a number dictionary.
+ __ JumpIfNotRoot(scratch2, Heap::kHashTableMapRootIndex, slow);
+
+ __ LoadFromNumberDictionary(
+ slow, scratch3, key, result, scratch1, scratch2, scratch4, scratch5);
+ __ Ret();
+}
+
+static void GenerateKeyedLoadWithNameKey(MacroAssembler* masm,
+ Register key,
+ Register receiver,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ Register scratch4,
+ Register scratch5,
+ Label *slow) {
+ ASSERT(!AreAliased(
+ key, receiver, scratch1, scratch2, scratch3, scratch4, scratch5));
+
+ Isolate* isolate = masm->isolate();
+ Label probe_dictionary, property_array_property;
+ // If we can load the value, it should be returned in x0.
+ Register result = x0;
+
+ GenerateKeyedLoadReceiverCheck(
+ masm, receiver, scratch1, scratch2, Map::kHasNamedInterceptor, slow);
+
+ // If the receiver is a fast-case object, check the keyed lookup cache.
+ // Otherwise probe the dictionary.
+ __ Ldr(scratch2, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
+ __ Ldr(scratch3, FieldMemOperand(scratch2, HeapObject::kMapOffset));
+ __ JumpIfRoot(scratch3, Heap::kHashTableMapRootIndex, &probe_dictionary);
+
+ // We keep the map of the receiver in scratch1.
+ Register receiver_map = scratch1;
+
+ // Load the map of the receiver, compute the keyed lookup cache hash
+ // based on 32 bits of the map pointer and the name hash.
+ __ Ldr(receiver_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
+ __ Mov(scratch2, Operand(receiver_map, ASR, KeyedLookupCache::kMapHashShift));
+ __ Ldr(scratch3.W(), FieldMemOperand(key, Name::kHashFieldOffset));
+ __ Eor(scratch2, scratch2, Operand(scratch3, ASR, Name::kHashShift));
+ int mask = KeyedLookupCache::kCapacityMask & KeyedLookupCache::kHashMask;
+ __ And(scratch2, scratch2, mask);
+
+ // Load the key (consisting of map and unique name) from the cache and
+ // check for match.
+ Label load_in_object_property;
+ static const int kEntriesPerBucket = KeyedLookupCache::kEntriesPerBucket;
+ Label hit_on_nth_entry[kEntriesPerBucket];
+ ExternalReference cache_keys =
+ ExternalReference::keyed_lookup_cache_keys(isolate);
+
+ __ Mov(scratch3, cache_keys);
+ __ Add(scratch3, scratch3, Operand(scratch2, LSL, kPointerSizeLog2 + 1));
+
+ for (int i = 0; i < kEntriesPerBucket - 1; i++) {
+ Label try_next_entry;
+ // Load map and make scratch3 pointing to the next entry.
+ __ Ldr(scratch4, MemOperand(scratch3, kPointerSize * 2, PostIndex));
+ __ Cmp(receiver_map, scratch4);
+ __ B(ne, &try_next_entry);
+ __ Ldr(scratch4, MemOperand(scratch3, -kPointerSize)); // Load name
+ __ Cmp(key, scratch4);
+ __ B(eq, &hit_on_nth_entry[i]);
+ __ Bind(&try_next_entry);
+ }
+
+ // Last entry.
+ __ Ldr(scratch4, MemOperand(scratch3, kPointerSize, PostIndex));
+ __ Cmp(receiver_map, scratch4);
+ __ B(ne, slow);
+ __ Ldr(scratch4, MemOperand(scratch3));
+ __ Cmp(key, scratch4);
+ __ B(ne, slow);
+
+ // Get field offset.
+ ExternalReference cache_field_offsets =
+ ExternalReference::keyed_lookup_cache_field_offsets(isolate);
+
+ // Hit on nth entry.
+ for (int i = kEntriesPerBucket - 1; i >= 0; i--) {
+ __ Bind(&hit_on_nth_entry[i]);
+ __ Mov(scratch3, cache_field_offsets);
+ if (i != 0) {
+ __ Add(scratch2, scratch2, i);
+ }
+ __ Ldr(scratch4.W(), MemOperand(scratch3, scratch2, LSL, 2));
+ __ Ldrb(scratch5,
+ FieldMemOperand(receiver_map, Map::kInObjectPropertiesOffset));
+ __ Subs(scratch4, scratch4, scratch5);
+ __ B(ge, &property_array_property);
+ if (i != 0) {
+ __ B(&load_in_object_property);
+ }
+ }
+
+ // Load in-object property.
+ __ Bind(&load_in_object_property);
+ __ Ldrb(scratch5, FieldMemOperand(receiver_map, Map::kInstanceSizeOffset));
+ __ Add(scratch5, scratch5, scratch4); // Index from start of object.
+ __ Sub(receiver, receiver, kHeapObjectTag); // Remove the heap tag.
+ __ Ldr(result, MemOperand(receiver, scratch5, LSL, kPointerSizeLog2));
+ __ IncrementCounter(isolate->counters()->keyed_load_generic_lookup_cache(),
+ 1, scratch1, scratch2);
+ __ Ret();
+
+ // Load property array property.
+ __ Bind(&property_array_property);
+ __ Ldr(scratch1, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
+ __ Add(scratch1, scratch1, FixedArray::kHeaderSize - kHeapObjectTag);
+ __ Ldr(result, MemOperand(scratch1, scratch4, LSL, kPointerSizeLog2));
+ __ IncrementCounter(isolate->counters()->keyed_load_generic_lookup_cache(),
+ 1, scratch1, scratch2);
+ __ Ret();
+
+ // Do a quick inline probe of the receiver's dictionary, if it exists.
+ __ Bind(&probe_dictionary);
+ __ Ldr(scratch1, FieldMemOperand(receiver, HeapObject::kMapOffset));
+ __ Ldrb(scratch1, FieldMemOperand(scratch1, Map::kInstanceTypeOffset));
+ GenerateGlobalInstanceTypeCheck(masm, scratch1, slow);
+ // Load the property.
+ GenerateDictionaryLoad(masm, slow, scratch2, key, result, scratch1, scratch3);
+ __ IncrementCounter(isolate->counters()->keyed_load_generic_symbol(),
+ 1, scratch1, scratch2);
+ __ Ret();
+}
+
+
+void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
+ // ---------- S t a t e --------------
+ // -- lr : return address
+ // -- x0 : key
+ // -- x1 : receiver
+ // -----------------------------------
+ Label slow, check_name, index_smi, index_name;
+
+ Register key = x0;
+ Register receiver = x1;
+
+ __ JumpIfNotSmi(key, &check_name);
+ __ Bind(&index_smi);
+ // Now the key is known to be a smi. This place is also jumped to from below
+ // where a numeric string is converted to a smi.
+ GenerateKeyedLoadWithSmiKey(masm, key, receiver, x2, x3, x4, x5, x6, &slow);
+
+ // Slow case, key and receiver still in x0 and x1.
+ __ Bind(&slow);
+ __ IncrementCounter(
+ masm->isolate()->counters()->keyed_load_generic_slow(), 1, x2, x3);
+ GenerateRuntimeGetProperty(masm);
+
+ __ Bind(&check_name);
+ GenerateKeyNameCheck(masm, key, x2, x3, &index_name, &slow);
+
+ GenerateKeyedLoadWithNameKey(masm, key, receiver, x2, x3, x4, x5, x6, &slow);
+
+ __ Bind(&index_name);
+ __ IndexFromHash(x3, key);
+ // Now jump to the place where smi keys are handled.
+ __ B(&index_smi);
+}
+
+
+void KeyedLoadIC::GenerateString(MacroAssembler* masm) {
+ // ---------- S t a t e --------------
+ // -- lr : return address
+ // -- x0 : key (index)
+ // -- x1 : receiver
+ // -----------------------------------
+ Label miss;
+
+ Register index = x0;
+ Register receiver = x1;
+ Register result = x0;
+ Register scratch = x3;
+
+ StringCharAtGenerator char_at_generator(receiver,
+ index,
+ scratch,
+ result,
+ &miss, // When not a string.
+ &miss, // When not a number.
+ &miss, // When index out of range.
+ STRING_INDEX_IS_ARRAY_INDEX);
+ char_at_generator.GenerateFast(masm);
+ __ Ret();
+
+ StubRuntimeCallHelper call_helper;
+ char_at_generator.GenerateSlow(masm, call_helper);
+
+ __ Bind(&miss);
+ GenerateMiss(masm);
+}
+
+
+void KeyedLoadIC::GenerateIndexedInterceptor(MacroAssembler* masm) {
+ // ---------- S t a t e --------------
+ // -- lr : return address
+ // -- x0 : key
+ // -- x1 : receiver
+ // -----------------------------------
+ Label slow;
+ Register key = x0;
+ Register receiver = x1;
+
+ // Check that the receiver isn't a smi.
+ __ JumpIfSmi(receiver, &slow);
+
+ // Check that the key is an array index, that is Uint32.
+ __ TestAndBranchIfAnySet(key, kSmiTagMask | kSmiSignMask, &slow);
+
+ // Get the map of the receiver.
+ Register map = x2;
+ __ Ldr(map, FieldMemOperand(receiver, HeapObject::kMapOffset));
+
+ // Check that it has indexed interceptor and access checks
+ // are not enabled for this object.
+ __ Ldrb(x3, FieldMemOperand(map, Map::kBitFieldOffset));
+ ASSERT(kSlowCaseBitFieldMask ==
+ ((1 << Map::kIsAccessCheckNeeded) | (1 << Map::kHasIndexedInterceptor)));
+ __ Tbnz(x3, Map::kIsAccessCheckNeeded, &slow);
+ __ Tbz(x3, Map::kHasIndexedInterceptor, &slow);
+
+ // Everything is fine, call runtime.
+ __ Push(receiver, key);
+ __ TailCallExternalReference(
+ ExternalReference(IC_Utility(kKeyedLoadPropertyWithInterceptor),
+ masm->isolate()),
+ 2,
+ 1);
+
+ __ Bind(&slow);
+ GenerateMiss(masm);
+}
+
+
+void KeyedStoreIC::GenerateMiss(MacroAssembler* masm) {
+ ASM_LOCATION("KeyedStoreIC::GenerateMiss");
+ // ---------- S t a t e --------------
+ // -- x0 : value
+ // -- x1 : key
+ // -- x2 : receiver
+ // -- lr : return address
+ // -----------------------------------
+
+ // Push receiver, key and value for runtime call.
+ __ Push(x2, x1, x0);
+
+ ExternalReference ref =
+ ExternalReference(IC_Utility(kKeyedStoreIC_Miss), masm->isolate());
+ __ TailCallExternalReference(ref, 3, 1);
+}
+
+
+void KeyedStoreIC::GenerateSlow(MacroAssembler* masm) {
+ ASM_LOCATION("KeyedStoreIC::GenerateSlow");
+ // ---------- S t a t e --------------
+ // -- lr : return address
+ // -- x0 : value
+ // -- x1 : key
+ // -- x2 : receiver
+ // -----------------------------------
+
+ // Push receiver, key and value for runtime call.
+ __ Push(x2, x1, x0);
+
+ // The slow case calls into the runtime to complete the store without causing
+ // an IC miss that would otherwise cause a transition to the generic stub.
+ ExternalReference ref =
+ ExternalReference(IC_Utility(kKeyedStoreIC_Slow), masm->isolate());
+ __ TailCallExternalReference(ref, 3, 1);
+}
+
+
+void KeyedStoreIC::GenerateRuntimeSetProperty(MacroAssembler* masm,
+ StrictMode strict_mode) {
+ ASM_LOCATION("KeyedStoreIC::GenerateRuntimeSetProperty");
+ // ---------- S t a t e --------------
+ // -- x0 : value
+ // -- x1 : key
+ // -- x2 : receiver
+ // -- lr : return address
+ // -----------------------------------
+
+ // Push receiver, key and value for runtime call.
+ __ Push(x2, x1, x0);
+
+ // Push PropertyAttributes(NONE) and strict_mode for runtime call.
+ STATIC_ASSERT(NONE == 0);
+ __ Mov(x10, Smi::FromInt(strict_mode));
+ __ Push(xzr, x10);
+
+ __ TailCallRuntime(Runtime::kSetProperty, 5, 1);
+}
+
+
+static void KeyedStoreGenerateGenericHelper(
+ MacroAssembler* masm,
+ Label* fast_object,
+ Label* fast_double,
+ Label* slow,
+ KeyedStoreCheckMap check_map,
+ KeyedStoreIncrementLength increment_length,
+ Register value,
+ Register key,
+ Register receiver,
+ Register receiver_map,
+ Register elements_map,
+ Register elements) {
+ ASSERT(!AreAliased(
+ value, key, receiver, receiver_map, elements_map, elements, x10, x11));
+
+ Label transition_smi_elements;
+ Label transition_double_elements;
+ Label fast_double_without_map_check;
+ Label non_double_value;
+ Label finish_store;
+
+ __ Bind(fast_object);
+ if (check_map == kCheckMap) {
+ __ Ldr(elements_map, FieldMemOperand(elements, HeapObject::kMapOffset));
+ __ Cmp(elements_map,
+ Operand(masm->isolate()->factory()->fixed_array_map()));
+ __ B(ne, fast_double);
+ }
+
+ // HOLECHECK: guards "A[i] = V"
+ // We have to go to the runtime if the current value is the hole because there
+ // may be a callback on the element.
+ Label holecheck_passed;
+ __ Add(x10, elements, FixedArray::kHeaderSize - kHeapObjectTag);
+ __ Add(x10, x10, Operand::UntagSmiAndScale(key, kPointerSizeLog2));
+ __ Ldr(x11, MemOperand(x10));
+ __ JumpIfNotRoot(x11, Heap::kTheHoleValueRootIndex, &holecheck_passed);
+ __ JumpIfDictionaryInPrototypeChain(receiver, elements_map, x10, slow);
+ __ bind(&holecheck_passed);
+
+ // Smi stores don't require further checks.
+ __ JumpIfSmi(value, &finish_store);
+
+ // Escape to elements kind transition case.
+ __ CheckFastObjectElements(receiver_map, x10, &transition_smi_elements);
+
+ __ Bind(&finish_store);
+ if (increment_length == kIncrementLength) {
+ // Add 1 to receiver->length.
+ __ Add(x10, key, Smi::FromInt(1));
+ __ Str(x10, FieldMemOperand(receiver, JSArray::kLengthOffset));
+ }
+
+ Register address = x11;
+ __ Add(address, elements, FixedArray::kHeaderSize - kHeapObjectTag);
+ __ Add(address, address, Operand::UntagSmiAndScale(key, kPointerSizeLog2));
+ __ Str(value, MemOperand(address));
+
+ Label dont_record_write;
+ __ JumpIfSmi(value, &dont_record_write);
+
+ // Update write barrier for the elements array address.
+ __ Mov(x10, value); // Preserve the value which is returned.
+ __ RecordWrite(elements,
+ address,
+ x10,
+ kLRHasNotBeenSaved,
+ kDontSaveFPRegs,
+ EMIT_REMEMBERED_SET,
+ OMIT_SMI_CHECK);
+
+ __ Bind(&dont_record_write);
+ __ Ret();
+
+
+ __ Bind(fast_double);
+ if (check_map == kCheckMap) {
+ // Check for fast double array case. If this fails, call through to the
+ // runtime.
+ __ JumpIfNotRoot(elements_map, Heap::kFixedDoubleArrayMapRootIndex, slow);
+ }
+
+ // HOLECHECK: guards "A[i] double hole?"
+ // We have to see if the double version of the hole is present. If so go to
+ // the runtime.
+ __ Add(x10, elements, FixedDoubleArray::kHeaderSize - kHeapObjectTag);
+ __ Add(x10, x10, Operand::UntagSmiAndScale(key, kPointerSizeLog2));
+ __ Ldr(x11, MemOperand(x10));
+ __ CompareAndBranch(x11, kHoleNanInt64, ne, &fast_double_without_map_check);
+ __ JumpIfDictionaryInPrototypeChain(receiver, elements_map, x10, slow);
+
+ __ Bind(&fast_double_without_map_check);
+ __ StoreNumberToDoubleElements(value,
+ key,
+ elements,
+ x10,
+ d0,
+ &transition_double_elements);
+ if (increment_length == kIncrementLength) {
+ // Add 1 to receiver->length.
+ __ Add(x10, key, Smi::FromInt(1));
+ __ Str(x10, FieldMemOperand(receiver, JSArray::kLengthOffset));
+ }
+ __ Ret();
+
+
+ __ Bind(&transition_smi_elements);
+ // Transition the array appropriately depending on the value type.
+ __ Ldr(x10, FieldMemOperand(value, HeapObject::kMapOffset));
+ __ JumpIfNotRoot(x10, Heap::kHeapNumberMapRootIndex, &non_double_value);
+
+ // Value is a double. Transition FAST_SMI_ELEMENTS ->
+ // FAST_DOUBLE_ELEMENTS and complete the store.
+ __ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS,
+ FAST_DOUBLE_ELEMENTS,
+ receiver_map,
+ x10,
+ x11,
+ slow);
+ ASSERT(receiver_map.Is(x3)); // Transition code expects map in x3.
+ AllocationSiteMode mode = AllocationSite::GetMode(FAST_SMI_ELEMENTS,
+ FAST_DOUBLE_ELEMENTS);
+ ElementsTransitionGenerator::GenerateSmiToDouble(masm, mode, slow);
+ __ Ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
+ __ B(&fast_double_without_map_check);
+
+ __ Bind(&non_double_value);
+ // Value is not a double, FAST_SMI_ELEMENTS -> FAST_ELEMENTS.
+ __ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS,
+ FAST_ELEMENTS,
+ receiver_map,
+ x10,
+ x11,
+ slow);
+ ASSERT(receiver_map.Is(x3)); // Transition code expects map in x3.
+ mode = AllocationSite::GetMode(FAST_SMI_ELEMENTS, FAST_ELEMENTS);
+ ElementsTransitionGenerator::GenerateMapChangeElementsTransition(masm, mode,
+ slow);
+ __ Ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
+ __ B(&finish_store);
+
+ __ Bind(&transition_double_elements);
+ // Elements are FAST_DOUBLE_ELEMENTS, but value is an Object that's not a
+ // HeapNumber. Make sure that the receiver is a Array with FAST_ELEMENTS and
+ // transition array from FAST_DOUBLE_ELEMENTS to FAST_ELEMENTS
+ __ LoadTransitionedArrayMapConditional(FAST_DOUBLE_ELEMENTS,
+ FAST_ELEMENTS,
+ receiver_map,
+ x10,
+ x11,
+ slow);
+ ASSERT(receiver_map.Is(x3)); // Transition code expects map in x3.
+ mode = AllocationSite::GetMode(FAST_DOUBLE_ELEMENTS, FAST_ELEMENTS);
+ ElementsTransitionGenerator::GenerateDoubleToObject(masm, mode, slow);
+ __ Ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
+ __ B(&finish_store);
+}
+
+
+void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm,
+ StrictMode strict_mode) {
+ ASM_LOCATION("KeyedStoreIC::GenerateGeneric");
+ // ---------- S t a t e --------------
+ // -- x0 : value
+ // -- x1 : key
+ // -- x2 : receiver
+ // -- lr : return address
+ // -----------------------------------
+ Label slow;
+ Label array;
+ Label fast_object;
+ Label extra;
+ Label fast_object_grow;
+ Label fast_double_grow;
+ Label fast_double;
+
+ Register value = x0;
+ Register key = x1;
+ Register receiver = x2;
+ Register receiver_map = x3;
+ Register elements = x4;
+ Register elements_map = x5;
+
+ __ JumpIfNotSmi(key, &slow);
+ __ JumpIfSmi(receiver, &slow);
+ __ Ldr(receiver_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
+
+ // Check that the receiver does not require access checks and is not observed.
+ // The generic stub does not perform map checks or handle observed objects.
+ __ Ldrb(x10, FieldMemOperand(receiver_map, Map::kBitFieldOffset));
+ __ TestAndBranchIfAnySet(
+ x10, (1 << Map::kIsAccessCheckNeeded) | (1 << Map::kIsObserved), &slow);
+
+ // Check if the object is a JS array or not.
+ Register instance_type = x10;
+ __ CompareInstanceType(receiver_map, instance_type, JS_ARRAY_TYPE);
+ __ B(eq, &array);
+ // Check that the object is some kind of JSObject.
+ __ Cmp(instance_type, FIRST_JS_OBJECT_TYPE);
+ __ B(lt, &slow);
+
+ // Object case: Check key against length in the elements array.
+ __ Ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
+ // Check array bounds. Both the key and the length of FixedArray are smis.
+ __ Ldrsw(x10, UntagSmiFieldMemOperand(elements, FixedArray::kLengthOffset));
+ __ Cmp(x10, Operand::UntagSmi(key));
+ __ B(hi, &fast_object);
+
+
+ __ Bind(&slow);
+ // Slow case, handle jump to runtime.
+ // Live values:
+ // x0: value
+ // x1: key
+ // x2: receiver
+ GenerateRuntimeSetProperty(masm, strict_mode);
+
+
+ __ Bind(&extra);
+ // Extra capacity case: Check if there is extra capacity to
+ // perform the store and update the length. Used for adding one
+ // element to the array by writing to array[array.length].
+
+ // Check for room in the elements backing store.
+ // Both the key and the length of FixedArray are smis.
+ __ Ldrsw(x10, UntagSmiFieldMemOperand(elements, FixedArray::kLengthOffset));
+ __ Cmp(x10, Operand::UntagSmi(key));
+ __ B(ls, &slow);
+
+ __ Ldr(elements_map, FieldMemOperand(elements, HeapObject::kMapOffset));
+ __ Cmp(elements_map, Operand(masm->isolate()->factory()->fixed_array_map()));
+ __ B(eq, &fast_object_grow);
+ __ Cmp(elements_map,
+ Operand(masm->isolate()->factory()->fixed_double_array_map()));
+ __ B(eq, &fast_double_grow);
+ __ B(&slow);
+
+
+ __ Bind(&array);
+ // Array case: Get the length and the elements array from the JS
+ // array. Check that the array is in fast mode (and writable); if it
+ // is the length is always a smi.
+
+ __ Ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
+
+ // Check the key against the length in the array.
+ __ Ldrsw(x10, UntagSmiFieldMemOperand(receiver, JSArray::kLengthOffset));
+ __ Cmp(x10, Operand::UntagSmi(key));
+ __ B(eq, &extra); // We can handle the case where we are appending 1 element.
+ __ B(lo, &slow);
+
+ KeyedStoreGenerateGenericHelper(masm, &fast_object, &fast_double,
+ &slow, kCheckMap, kDontIncrementLength,
+ value, key, receiver, receiver_map,
+ elements_map, elements);
+ KeyedStoreGenerateGenericHelper(masm, &fast_object_grow, &fast_double_grow,
+ &slow, kDontCheckMap, kIncrementLength,
+ value, key, receiver, receiver_map,
+ elements_map, elements);
+}
+
+
+void StoreIC::GenerateMegamorphic(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- x0 : value
+ // -- x1 : receiver
+ // -- x2 : name
+ // -- lr : return address
+ // -----------------------------------
+
+ // Probe the stub cache.
+ Code::Flags flags = Code::ComputeHandlerFlags(Code::STORE_IC);
+ masm->isolate()->stub_cache()->GenerateProbe(
+ masm, flags, x1, x2, x3, x4, x5, x6);
+
+ // Cache miss: Jump to runtime.
+ GenerateMiss(masm);
+}
+
+
+void StoreIC::GenerateMiss(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- x0 : value
+ // -- x1 : receiver
+ // -- x2 : name
+ // -- lr : return address
+ // -----------------------------------
+
+ __ Push(x1, x2, x0);
+
+ // Tail call to the entry.
+ ExternalReference ref =
+ ExternalReference(IC_Utility(kStoreIC_Miss), masm->isolate());
+ __ TailCallExternalReference(ref, 3, 1);
+}
+
+
+void StoreIC::GenerateNormal(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- x0 : value
+ // -- x1 : receiver
+ // -- x2 : name
+ // -- lr : return address
+ // -----------------------------------
+ Label miss;
+ Register value = x0;
+ Register receiver = x1;
+ Register name = x2;
+ Register dictionary = x3;
+
+ GenerateNameDictionaryReceiverCheck(
+ masm, receiver, dictionary, x4, x5, &miss);
+
+ GenerateDictionaryStore(masm, &miss, dictionary, name, value, x4, x5);
+ Counters* counters = masm->isolate()->counters();
+ __ IncrementCounter(counters->store_normal_hit(), 1, x4, x5);
+ __ Ret();
+
+ // Cache miss: Jump to runtime.
+ __ Bind(&miss);
+ __ IncrementCounter(counters->store_normal_miss(), 1, x4, x5);
+ GenerateMiss(masm);
+}
+
+
+void StoreIC::GenerateRuntimeSetProperty(MacroAssembler* masm,
+ StrictMode strict_mode) {
+ ASM_LOCATION("StoreIC::GenerateRuntimeSetProperty");
+ // ----------- S t a t e -------------
+ // -- x0 : value
+ // -- x1 : receiver
+ // -- x2 : name
+ // -- lr : return address
+ // -----------------------------------
+
+ __ Push(x1, x2, x0);
+
+ __ Mov(x11, Smi::FromInt(NONE)); // PropertyAttributes
+ __ Mov(x10, Smi::FromInt(strict_mode));
+ __ Push(x11, x10);
+
+ // Do tail-call to runtime routine.
+ __ TailCallRuntime(Runtime::kSetProperty, 5, 1);
+}
+
+
+void StoreIC::GenerateSlow(MacroAssembler* masm) {
+ // ---------- S t a t e --------------
+ // -- x0 : value
+ // -- x1 : receiver
+ // -- x2 : name
+ // -- lr : return address
+ // -----------------------------------
+
+ // Push receiver, name and value for runtime call.
+ __ Push(x1, x2, x0);
+
+ // The slow case calls into the runtime to complete the store without causing
+ // an IC miss that would otherwise cause a transition to the generic stub.
+ ExternalReference ref =
+ ExternalReference(IC_Utility(kStoreIC_Slow), masm->isolate());
+ __ TailCallExternalReference(ref, 3, 1);
+}
+
+
+Condition CompareIC::ComputeCondition(Token::Value op) {
+ switch (op) {
+ case Token::EQ_STRICT:
+ case Token::EQ:
+ return eq;
+ case Token::LT:
+ return lt;
+ case Token::GT:
+ return gt;
+ case Token::LTE:
+ return le;
+ case Token::GTE:
+ return ge;
+ default:
+ UNREACHABLE();
+ return al;
+ }
+}
+
+
+bool CompareIC::HasInlinedSmiCode(Address address) {
+ // The address of the instruction following the call.
+ Address info_address =
+ Assembler::return_address_from_call_start(address);
+
+ InstructionSequence* patch_info = InstructionSequence::At(info_address);
+ return patch_info->IsInlineData();
+}
+
+
+// Activate a SMI fast-path by patching the instructions generated by
+// JumpPatchSite::EmitJumpIf(Not)Smi(), using the information encoded by
+// JumpPatchSite::EmitPatchInfo().
+void PatchInlinedSmiCode(Address address, InlinedSmiCheck check) {
+ // The patch information is encoded in the instruction stream using
+ // instructions which have no side effects, so we can safely execute them.
+ // The patch information is encoded directly after the call to the helper
+ // function which is requesting this patch operation.
+ Address info_address =
+ Assembler::return_address_from_call_start(address);
+ InlineSmiCheckInfo info(info_address);
+
+ // Check and decode the patch information instruction.
+ if (!info.HasSmiCheck()) {
+ return;
+ }
+
+ if (FLAG_trace_ic) {
+ PrintF("[ Patching ic at %p, marker=%p, SMI check=%p\n",
+ address, info_address, reinterpret_cast<void*>(info.SmiCheck()));
+ }
+
+ // Patch and activate code generated by JumpPatchSite::EmitJumpIfNotSmi()
+ // and JumpPatchSite::EmitJumpIfSmi().
+ // Changing
+ // tb(n)z xzr, #0, <target>
+ // to
+ // tb(!n)z test_reg, #0, <target>
+ Instruction* to_patch = info.SmiCheck();
+ PatchingAssembler patcher(to_patch, 1);
+ ASSERT(to_patch->IsTestBranch());
+ ASSERT(to_patch->ImmTestBranchBit5() == 0);
+ ASSERT(to_patch->ImmTestBranchBit40() == 0);
+
+ STATIC_ASSERT(kSmiTag == 0);
+ STATIC_ASSERT(kSmiTagMask == 1);
+
+ int branch_imm = to_patch->ImmTestBranch();
+ Register smi_reg;
+ if (check == ENABLE_INLINED_SMI_CHECK) {
+ ASSERT(to_patch->Rt() == xzr.code());
+ smi_reg = info.SmiRegister();
+ } else {
+ ASSERT(check == DISABLE_INLINED_SMI_CHECK);
+ ASSERT(to_patch->Rt() != xzr.code());
+ smi_reg = xzr;
+ }
+
+ if (to_patch->Mask(TestBranchMask) == TBZ) {
+ // This is JumpIfNotSmi(smi_reg, branch_imm).
+ patcher.tbnz(smi_reg, 0, branch_imm);
+ } else {
+ ASSERT(to_patch->Mask(TestBranchMask) == TBNZ);
+ // This is JumpIfSmi(smi_reg, branch_imm).
+ patcher.tbz(smi_reg, 0, branch_imm);
+ }
+}
+
+
+} } // namespace v8::internal
+
+#endif // V8_TARGET_ARCH_ARM64
diff --git a/chromium/v8/src/arm64/instructions-arm64.cc b/chromium/v8/src/arm64/instructions-arm64.cc
new file mode 100644
index 00000000000..c7334ed5cfc
--- /dev/null
+++ b/chromium/v8/src/arm64/instructions-arm64.cc
@@ -0,0 +1,317 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#if V8_TARGET_ARCH_ARM64
+
+#define ARM64_DEFINE_FP_STATICS
+
+#include "src/arm64/instructions-arm64.h"
+#include "src/arm64/assembler-arm64-inl.h"
+
+namespace v8 {
+namespace internal {
+
+
+bool Instruction::IsLoad() const {
+ if (Mask(LoadStoreAnyFMask) != LoadStoreAnyFixed) {
+ return false;
+ }
+
+ if (Mask(LoadStorePairAnyFMask) == LoadStorePairAnyFixed) {
+ return Mask(LoadStorePairLBit) != 0;
+ } else {
+ LoadStoreOp op = static_cast<LoadStoreOp>(Mask(LoadStoreOpMask));
+ switch (op) {
+ case LDRB_w:
+ case LDRH_w:
+ case LDR_w:
+ case LDR_x:
+ case LDRSB_w:
+ case LDRSB_x:
+ case LDRSH_w:
+ case LDRSH_x:
+ case LDRSW_x:
+ case LDR_s:
+ case LDR_d: return true;
+ default: return false;
+ }
+ }
+}
+
+
+bool Instruction::IsStore() const {
+ if (Mask(LoadStoreAnyFMask) != LoadStoreAnyFixed) {
+ return false;
+ }
+
+ if (Mask(LoadStorePairAnyFMask) == LoadStorePairAnyFixed) {
+ return Mask(LoadStorePairLBit) == 0;
+ } else {
+ LoadStoreOp op = static_cast<LoadStoreOp>(Mask(LoadStoreOpMask));
+ switch (op) {
+ case STRB_w:
+ case STRH_w:
+ case STR_w:
+ case STR_x:
+ case STR_s:
+ case STR_d: return true;
+ default: return false;
+ }
+ }
+}
+
+
+static uint64_t RotateRight(uint64_t value,
+ unsigned int rotate,
+ unsigned int width) {
+ ASSERT(width <= 64);
+ rotate &= 63;
+ return ((value & ((1UL << rotate) - 1UL)) << (width - rotate)) |
+ (value >> rotate);
+}
+
+
+static uint64_t RepeatBitsAcrossReg(unsigned reg_size,
+ uint64_t value,
+ unsigned width) {
+ ASSERT((width == 2) || (width == 4) || (width == 8) || (width == 16) ||
+ (width == 32));
+ ASSERT((reg_size == kWRegSizeInBits) || (reg_size == kXRegSizeInBits));
+ uint64_t result = value & ((1UL << width) - 1UL);
+ for (unsigned i = width; i < reg_size; i *= 2) {
+ result |= (result << i);
+ }
+ return result;
+}
+
+
+// Logical immediates can't encode zero, so a return value of zero is used to
+// indicate a failure case. Specifically, where the constraints on imm_s are not
+// met.
+uint64_t Instruction::ImmLogical() {
+ unsigned reg_size = SixtyFourBits() ? kXRegSizeInBits : kWRegSizeInBits;
+ int64_t n = BitN();
+ int64_t imm_s = ImmSetBits();
+ int64_t imm_r = ImmRotate();
+
+ // An integer is constructed from the n, imm_s and imm_r bits according to
+ // the following table:
+ //
+ // N imms immr size S R
+ // 1 ssssss rrrrrr 64 UInt(ssssss) UInt(rrrrrr)
+ // 0 0sssss xrrrrr 32 UInt(sssss) UInt(rrrrr)
+ // 0 10ssss xxrrrr 16 UInt(ssss) UInt(rrrr)
+ // 0 110sss xxxrrr 8 UInt(sss) UInt(rrr)
+ // 0 1110ss xxxxrr 4 UInt(ss) UInt(rr)
+ // 0 11110s xxxxxr 2 UInt(s) UInt(r)
+ // (s bits must not be all set)
+ //
+ // A pattern is constructed of size bits, where the least significant S+1
+ // bits are set. The pattern is rotated right by R, and repeated across a
+ // 32 or 64-bit value, depending on destination register width.
+ //
+
+ if (n == 1) {
+ if (imm_s == 0x3F) {
+ return 0;
+ }
+ uint64_t bits = (1UL << (imm_s + 1)) - 1;
+ return RotateRight(bits, imm_r, 64);
+ } else {
+ if ((imm_s >> 1) == 0x1F) {
+ return 0;
+ }
+ for (int width = 0x20; width >= 0x2; width >>= 1) {
+ if ((imm_s & width) == 0) {
+ int mask = width - 1;
+ if ((imm_s & mask) == mask) {
+ return 0;
+ }
+ uint64_t bits = (1UL << ((imm_s & mask) + 1)) - 1;
+ return RepeatBitsAcrossReg(reg_size,
+ RotateRight(bits, imm_r & mask, width),
+ width);
+ }
+ }
+ }
+ UNREACHABLE();
+ return 0;
+}
+
+
+float Instruction::ImmFP32() {
+ // ImmFP: abcdefgh (8 bits)
+ // Single: aBbb.bbbc.defg.h000.0000.0000.0000.0000 (32 bits)
+ // where B is b ^ 1
+ uint32_t bits = ImmFP();
+ uint32_t bit7 = (bits >> 7) & 0x1;
+ uint32_t bit6 = (bits >> 6) & 0x1;
+ uint32_t bit5_to_0 = bits & 0x3f;
+ uint32_t result = (bit7 << 31) | ((32 - bit6) << 25) | (bit5_to_0 << 19);
+
+ return rawbits_to_float(result);
+}
+
+
+double Instruction::ImmFP64() {
+ // ImmFP: abcdefgh (8 bits)
+ // Double: aBbb.bbbb.bbcd.efgh.0000.0000.0000.0000
+ // 0000.0000.0000.0000.0000.0000.0000.0000 (64 bits)
+ // where B is b ^ 1
+ uint32_t bits = ImmFP();
+ uint64_t bit7 = (bits >> 7) & 0x1;
+ uint64_t bit6 = (bits >> 6) & 0x1;
+ uint64_t bit5_to_0 = bits & 0x3f;
+ uint64_t result = (bit7 << 63) | ((256 - bit6) << 54) | (bit5_to_0 << 48);
+
+ return rawbits_to_double(result);
+}
+
+
+LSDataSize CalcLSPairDataSize(LoadStorePairOp op) {
+ switch (op) {
+ case STP_x:
+ case LDP_x:
+ case STP_d:
+ case LDP_d: return LSDoubleWord;
+ default: return LSWord;
+ }
+}
+
+
+ptrdiff_t Instruction::ImmPCOffset() {
+ ptrdiff_t offset;
+ if (IsPCRelAddressing()) {
+ // PC-relative addressing. Only ADR is supported.
+ offset = ImmPCRel();
+ } else if (BranchType() != UnknownBranchType) {
+ // All PC-relative branches.
+ // Relative branch offsets are instruction-size-aligned.
+ offset = ImmBranch() << kInstructionSizeLog2;
+ } else {
+ // Load literal (offset from PC).
+ ASSERT(IsLdrLiteral());
+ // The offset is always shifted by 2 bits, even for loads to 64-bits
+ // registers.
+ offset = ImmLLiteral() << kInstructionSizeLog2;
+ }
+ return offset;
+}
+
+
+Instruction* Instruction::ImmPCOffsetTarget() {
+ return InstructionAtOffset(ImmPCOffset());
+}
+
+
+bool Instruction::IsValidImmPCOffset(ImmBranchType branch_type,
+ int32_t offset) {
+ return is_intn(offset, ImmBranchRangeBitwidth(branch_type));
+}
+
+
+bool Instruction::IsTargetInImmPCOffsetRange(Instruction* target) {
+ return IsValidImmPCOffset(BranchType(), DistanceTo(target));
+}
+
+
+void Instruction::SetImmPCOffsetTarget(Instruction* target) {
+ if (IsPCRelAddressing()) {
+ SetPCRelImmTarget(target);
+ } else if (BranchType() != UnknownBranchType) {
+ SetBranchImmTarget(target);
+ } else {
+ SetImmLLiteral(target);
+ }
+}
+
+
+void Instruction::SetPCRelImmTarget(Instruction* target) {
+ // ADRP is not supported, so 'this' must point to an ADR instruction.
+ ASSERT(IsAdr());
+
+ int target_offset = DistanceTo(target);
+ Instr imm;
+ if (Instruction::IsValidPCRelOffset(target_offset)) {
+ imm = Assembler::ImmPCRelAddress(target_offset);
+ SetInstructionBits(Mask(~ImmPCRel_mask) | imm);
+ } else {
+ PatchingAssembler patcher(this,
+ PatchingAssembler::kAdrFarPatchableNInstrs);
+ patcher.PatchAdrFar(target);
+ }
+}
+
+
+void Instruction::SetBranchImmTarget(Instruction* target) {
+ ASSERT(IsAligned(DistanceTo(target), kInstructionSize));
+ Instr branch_imm = 0;
+ uint32_t imm_mask = 0;
+ ptrdiff_t offset = DistanceTo(target) >> kInstructionSizeLog2;
+ switch (BranchType()) {
+ case CondBranchType: {
+ branch_imm = Assembler::ImmCondBranch(offset);
+ imm_mask = ImmCondBranch_mask;
+ break;
+ }
+ case UncondBranchType: {
+ branch_imm = Assembler::ImmUncondBranch(offset);
+ imm_mask = ImmUncondBranch_mask;
+ break;
+ }
+ case CompareBranchType: {
+ branch_imm = Assembler::ImmCmpBranch(offset);
+ imm_mask = ImmCmpBranch_mask;
+ break;
+ }
+ case TestBranchType: {
+ branch_imm = Assembler::ImmTestBranch(offset);
+ imm_mask = ImmTestBranch_mask;
+ break;
+ }
+ default: UNREACHABLE();
+ }
+ SetInstructionBits(Mask(~imm_mask) | branch_imm);
+}
+
+
+void Instruction::SetImmLLiteral(Instruction* source) {
+ ASSERT(IsAligned(DistanceTo(source), kInstructionSize));
+ ptrdiff_t offset = DistanceTo(source) >> kLoadLiteralScaleLog2;
+ Instr imm = Assembler::ImmLLiteral(offset);
+ Instr mask = ImmLLiteral_mask;
+
+ SetInstructionBits(Mask(~mask) | imm);
+}
+
+
+// TODO(jbramley): We can't put this inline in the class because things like
+// xzr and Register are not defined in that header. Consider adding
+// instructions-arm64-inl.h to work around this.
+bool InstructionSequence::IsInlineData() const {
+ // Inline data is encoded as a single movz instruction which writes to xzr
+ // (x31).
+ return IsMovz() && SixtyFourBits() && (Rd() == xzr.code());
+ // TODO(all): If we extend ::InlineData() to support bigger data, we need
+ // to update this method too.
+}
+
+
+// TODO(jbramley): We can't put this inline in the class because things like
+// xzr and Register are not defined in that header. Consider adding
+// instructions-arm64-inl.h to work around this.
+uint64_t InstructionSequence::InlineData() const {
+ ASSERT(IsInlineData());
+ uint64_t payload = ImmMoveWide();
+ // TODO(all): If we extend ::InlineData() to support bigger data, we need
+ // to update this method too.
+ return payload;
+}
+
+
+} } // namespace v8::internal
+
+#endif // V8_TARGET_ARCH_ARM64
diff --git a/chromium/v8/src/arm64/instructions-arm64.h b/chromium/v8/src/arm64/instructions-arm64.h
new file mode 100644
index 00000000000..b3d9b794acb
--- /dev/null
+++ b/chromium/v8/src/arm64/instructions-arm64.h
@@ -0,0 +1,509 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_ARM64_INSTRUCTIONS_ARM64_H_
+#define V8_ARM64_INSTRUCTIONS_ARM64_H_
+
+#include "src/globals.h"
+#include "src/utils.h"
+#include "src/arm64/constants-arm64.h"
+#include "src/arm64/utils-arm64.h"
+
+namespace v8 {
+namespace internal {
+
+
+// ISA constants. --------------------------------------------------------------
+
+typedef uint32_t Instr;
+
+// The following macros initialize a float/double variable with a bit pattern
+// without using static initializers: If ARM64_DEFINE_FP_STATICS is defined, the
+// symbol is defined as uint32_t/uint64_t initialized with the desired bit
+// pattern. Otherwise, the same symbol is declared as an external float/double.
+#if defined(ARM64_DEFINE_FP_STATICS)
+#define DEFINE_FLOAT(name, value) extern const uint32_t name = value
+#define DEFINE_DOUBLE(name, value) extern const uint64_t name = value
+#else
+#define DEFINE_FLOAT(name, value) extern const float name
+#define DEFINE_DOUBLE(name, value) extern const double name
+#endif // defined(ARM64_DEFINE_FP_STATICS)
+
+DEFINE_FLOAT(kFP32PositiveInfinity, 0x7f800000);
+DEFINE_FLOAT(kFP32NegativeInfinity, 0xff800000);
+DEFINE_DOUBLE(kFP64PositiveInfinity, 0x7ff0000000000000UL);
+DEFINE_DOUBLE(kFP64NegativeInfinity, 0xfff0000000000000UL);
+
+// This value is a signalling NaN as both a double and as a float (taking the
+// least-significant word).
+DEFINE_DOUBLE(kFP64SignallingNaN, 0x7ff000007f800001);
+DEFINE_FLOAT(kFP32SignallingNaN, 0x7f800001);
+
+// A similar value, but as a quiet NaN.
+DEFINE_DOUBLE(kFP64QuietNaN, 0x7ff800007fc00001);
+DEFINE_FLOAT(kFP32QuietNaN, 0x7fc00001);
+
+// The default NaN values (for FPCR.DN=1).
+DEFINE_DOUBLE(kFP64DefaultNaN, 0x7ff8000000000000UL);
+DEFINE_FLOAT(kFP32DefaultNaN, 0x7fc00000);
+
+#undef DEFINE_FLOAT
+#undef DEFINE_DOUBLE
+
+
+enum LSDataSize {
+ LSByte = 0,
+ LSHalfword = 1,
+ LSWord = 2,
+ LSDoubleWord = 3
+};
+
+LSDataSize CalcLSPairDataSize(LoadStorePairOp op);
+
+enum ImmBranchType {
+ UnknownBranchType = 0,
+ CondBranchType = 1,
+ UncondBranchType = 2,
+ CompareBranchType = 3,
+ TestBranchType = 4
+};
+
+enum AddrMode {
+ Offset,
+ PreIndex,
+ PostIndex
+};
+
+enum FPRounding {
+ // The first four values are encodable directly by FPCR<RMode>.
+ FPTieEven = 0x0,
+ FPPositiveInfinity = 0x1,
+ FPNegativeInfinity = 0x2,
+ FPZero = 0x3,
+
+ // The final rounding mode is only available when explicitly specified by the
+ // instruction (such as with fcvta). It cannot be set in FPCR.
+ FPTieAway
+};
+
+enum Reg31Mode {
+ Reg31IsStackPointer,
+ Reg31IsZeroRegister
+};
+
+// Instructions. ---------------------------------------------------------------
+
+class Instruction {
+ public:
+ V8_INLINE Instr InstructionBits() const {
+ return *reinterpret_cast<const Instr*>(this);
+ }
+
+ V8_INLINE void SetInstructionBits(Instr new_instr) {
+ *reinterpret_cast<Instr*>(this) = new_instr;
+ }
+
+ int Bit(int pos) const {
+ return (InstructionBits() >> pos) & 1;
+ }
+
+ uint32_t Bits(int msb, int lsb) const {
+ return unsigned_bitextract_32(msb, lsb, InstructionBits());
+ }
+
+ int32_t SignedBits(int msb, int lsb) const {
+ int32_t bits = *(reinterpret_cast<const int32_t*>(this));
+ return signed_bitextract_32(msb, lsb, bits);
+ }
+
+ Instr Mask(uint32_t mask) const {
+ return InstructionBits() & mask;
+ }
+
+ V8_INLINE Instruction* following(int count = 1) {
+ return InstructionAtOffset(count * static_cast<int>(kInstructionSize));
+ }
+
+ V8_INLINE Instruction* preceding(int count = 1) {
+ return following(-count);
+ }
+
+ #define DEFINE_GETTER(Name, HighBit, LowBit, Func) \
+ int64_t Name() const { return Func(HighBit, LowBit); }
+ INSTRUCTION_FIELDS_LIST(DEFINE_GETTER)
+ #undef DEFINE_GETTER
+
+ // ImmPCRel is a compound field (not present in INSTRUCTION_FIELDS_LIST),
+ // formed from ImmPCRelLo and ImmPCRelHi.
+ int ImmPCRel() const {
+ ASSERT(IsPCRelAddressing());
+ int const offset = ((ImmPCRelHi() << ImmPCRelLo_width) | ImmPCRelLo());
+ int const width = ImmPCRelLo_width + ImmPCRelHi_width;
+ return signed_bitextract_32(width - 1, 0, offset);
+ }
+
+ uint64_t ImmLogical();
+ float ImmFP32();
+ double ImmFP64();
+
+ LSDataSize SizeLSPair() const {
+ return CalcLSPairDataSize(
+ static_cast<LoadStorePairOp>(Mask(LoadStorePairMask)));
+ }
+
+ // Helpers.
+ bool IsCondBranchImm() const {
+ return Mask(ConditionalBranchFMask) == ConditionalBranchFixed;
+ }
+
+ bool IsUncondBranchImm() const {
+ return Mask(UnconditionalBranchFMask) == UnconditionalBranchFixed;
+ }
+
+ bool IsCompareBranch() const {
+ return Mask(CompareBranchFMask) == CompareBranchFixed;
+ }
+
+ bool IsTestBranch() const {
+ return Mask(TestBranchFMask) == TestBranchFixed;
+ }
+
+ bool IsImmBranch() const {
+ return BranchType() != UnknownBranchType;
+ }
+
+ bool IsLdrLiteral() const {
+ return Mask(LoadLiteralFMask) == LoadLiteralFixed;
+ }
+
+ bool IsLdrLiteralX() const {
+ return Mask(LoadLiteralMask) == LDR_x_lit;
+ }
+
+ bool IsPCRelAddressing() const {
+ return Mask(PCRelAddressingFMask) == PCRelAddressingFixed;
+ }
+
+ bool IsAdr() const {
+ return Mask(PCRelAddressingMask) == ADR;
+ }
+
+ bool IsLogicalImmediate() const {
+ return Mask(LogicalImmediateFMask) == LogicalImmediateFixed;
+ }
+
+ bool IsAddSubImmediate() const {
+ return Mask(AddSubImmediateFMask) == AddSubImmediateFixed;
+ }
+
+ bool IsAddSubShifted() const {
+ return Mask(AddSubShiftedFMask) == AddSubShiftedFixed;
+ }
+
+ bool IsAddSubExtended() const {
+ return Mask(AddSubExtendedFMask) == AddSubExtendedFixed;
+ }
+
+ // Match any loads or stores, including pairs.
+ bool IsLoadOrStore() const {
+ return Mask(LoadStoreAnyFMask) == LoadStoreAnyFixed;
+ }
+
+ // Match any loads, including pairs.
+ bool IsLoad() const;
+ // Match any stores, including pairs.
+ bool IsStore() const;
+
+ // Indicate whether Rd can be the stack pointer or the zero register. This
+ // does not check that the instruction actually has an Rd field.
+ Reg31Mode RdMode() const {
+ // The following instructions use csp or wsp as Rd:
+ // Add/sub (immediate) when not setting the flags.
+ // Add/sub (extended) when not setting the flags.
+ // Logical (immediate) when not setting the flags.
+ // Otherwise, r31 is the zero register.
+ if (IsAddSubImmediate() || IsAddSubExtended()) {
+ if (Mask(AddSubSetFlagsBit)) {
+ return Reg31IsZeroRegister;
+ } else {
+ return Reg31IsStackPointer;
+ }
+ }
+ if (IsLogicalImmediate()) {
+ // Of the logical (immediate) instructions, only ANDS (and its aliases)
+ // can set the flags. The others can all write into csp.
+ // Note that some logical operations are not available to
+ // immediate-operand instructions, so we have to combine two masks here.
+ if (Mask(LogicalImmediateMask & LogicalOpMask) == ANDS) {
+ return Reg31IsZeroRegister;
+ } else {
+ return Reg31IsStackPointer;
+ }
+ }
+ return Reg31IsZeroRegister;
+ }
+
+ // Indicate whether Rn can be the stack pointer or the zero register. This
+ // does not check that the instruction actually has an Rn field.
+ Reg31Mode RnMode() const {
+ // The following instructions use csp or wsp as Rn:
+ // All loads and stores.
+ // Add/sub (immediate).
+ // Add/sub (extended).
+ // Otherwise, r31 is the zero register.
+ if (IsLoadOrStore() || IsAddSubImmediate() || IsAddSubExtended()) {
+ return Reg31IsStackPointer;
+ }
+ return Reg31IsZeroRegister;
+ }
+
+ ImmBranchType BranchType() const {
+ if (IsCondBranchImm()) {
+ return CondBranchType;
+ } else if (IsUncondBranchImm()) {
+ return UncondBranchType;
+ } else if (IsCompareBranch()) {
+ return CompareBranchType;
+ } else if (IsTestBranch()) {
+ return TestBranchType;
+ } else {
+ return UnknownBranchType;
+ }
+ }
+
+ static int ImmBranchRangeBitwidth(ImmBranchType branch_type) {
+ switch (branch_type) {
+ case UncondBranchType:
+ return ImmUncondBranch_width;
+ case CondBranchType:
+ return ImmCondBranch_width;
+ case CompareBranchType:
+ return ImmCmpBranch_width;
+ case TestBranchType:
+ return ImmTestBranch_width;
+ default:
+ UNREACHABLE();
+ return 0;
+ }
+ }
+
+ // The range of the branch instruction, expressed as 'instr +- range'.
+ static int32_t ImmBranchRange(ImmBranchType branch_type) {
+ return
+ (1 << (ImmBranchRangeBitwidth(branch_type) + kInstructionSizeLog2)) / 2 -
+ kInstructionSize;
+ }
+
+ int ImmBranch() const {
+ switch (BranchType()) {
+ case CondBranchType: return ImmCondBranch();
+ case UncondBranchType: return ImmUncondBranch();
+ case CompareBranchType: return ImmCmpBranch();
+ case TestBranchType: return ImmTestBranch();
+ default: UNREACHABLE();
+ }
+ return 0;
+ }
+
+ bool IsBranchAndLinkToRegister() const {
+ return Mask(UnconditionalBranchToRegisterMask) == BLR;
+ }
+
+ bool IsMovz() const {
+ return (Mask(MoveWideImmediateMask) == MOVZ_x) ||
+ (Mask(MoveWideImmediateMask) == MOVZ_w);
+ }
+
+ bool IsMovk() const {
+ return (Mask(MoveWideImmediateMask) == MOVK_x) ||
+ (Mask(MoveWideImmediateMask) == MOVK_w);
+ }
+
+ bool IsMovn() const {
+ return (Mask(MoveWideImmediateMask) == MOVN_x) ||
+ (Mask(MoveWideImmediateMask) == MOVN_w);
+ }
+
+ bool IsNop(int n) {
+ // A marking nop is an instruction
+ // mov r<n>, r<n>
+ // which is encoded as
+ // orr r<n>, xzr, r<n>
+ return (Mask(LogicalShiftedMask) == ORR_x) &&
+ (Rd() == Rm()) &&
+ (Rd() == n);
+ }
+
+ // Find the PC offset encoded in this instruction. 'this' may be a branch or
+ // a PC-relative addressing instruction.
+ // The offset returned is unscaled.
+ ptrdiff_t ImmPCOffset();
+
+ // Find the target of this instruction. 'this' may be a branch or a
+ // PC-relative addressing instruction.
+ Instruction* ImmPCOffsetTarget();
+
+ static bool IsValidImmPCOffset(ImmBranchType branch_type, int32_t offset);
+ bool IsTargetInImmPCOffsetRange(Instruction* target);
+ // Patch a PC-relative offset to refer to 'target'. 'this' may be a branch or
+ // a PC-relative addressing instruction.
+ void SetImmPCOffsetTarget(Instruction* target);
+ // Patch a literal load instruction to load from 'source'.
+ void SetImmLLiteral(Instruction* source);
+
+ uint8_t* LiteralAddress() {
+ int offset = ImmLLiteral() << kLoadLiteralScaleLog2;
+ return reinterpret_cast<uint8_t*>(this) + offset;
+ }
+
+ enum CheckAlignment { NO_CHECK, CHECK_ALIGNMENT };
+
+ V8_INLINE Instruction* InstructionAtOffset(
+ int64_t offset,
+ CheckAlignment check = CHECK_ALIGNMENT) {
+ Address addr = reinterpret_cast<Address>(this) + offset;
+ // The FUZZ_disasm test relies on no check being done.
+ ASSERT(check == NO_CHECK || IsAddressAligned(addr, kInstructionSize));
+ return Cast(addr);
+ }
+
+ template<typename T> V8_INLINE static Instruction* Cast(T src) {
+ return reinterpret_cast<Instruction*>(src);
+ }
+
+ V8_INLINE ptrdiff_t DistanceTo(Instruction* target) {
+ return reinterpret_cast<Address>(target) - reinterpret_cast<Address>(this);
+ }
+
+
+ static const int ImmPCRelRangeBitwidth = 21;
+ static bool IsValidPCRelOffset(int offset) {
+ return is_int21(offset);
+ }
+ void SetPCRelImmTarget(Instruction* target);
+ void SetBranchImmTarget(Instruction* target);
+};
+
+
+// Where Instruction looks at instructions generated by the Assembler,
+// InstructionSequence looks at instructions sequences generated by the
+// MacroAssembler.
+class InstructionSequence : public Instruction {
+ public:
+ static InstructionSequence* At(Address address) {
+ return reinterpret_cast<InstructionSequence*>(address);
+ }
+
+ // Sequences generated by MacroAssembler::InlineData().
+ bool IsInlineData() const;
+ uint64_t InlineData() const;
+};
+
+
+// Simulator/Debugger debug instructions ---------------------------------------
+// Each debug marker is represented by a HLT instruction. The immediate comment
+// field in the instruction is used to identify the type of debug marker. Each
+// marker encodes arguments in a different way, as described below.
+
+// Indicate to the Debugger that the instruction is a redirected call.
+const Instr kImmExceptionIsRedirectedCall = 0xca11;
+
+// Represent unreachable code. This is used as a guard in parts of the code that
+// should not be reachable, such as in data encoded inline in the instructions.
+const Instr kImmExceptionIsUnreachable = 0xdebf;
+
+// A pseudo 'printf' instruction. The arguments will be passed to the platform
+// printf method.
+const Instr kImmExceptionIsPrintf = 0xdeb1;
+// Most parameters are stored in ARM64 registers as if the printf
+// pseudo-instruction was a call to the real printf method:
+// x0: The format string.
+// x1-x7: Optional arguments.
+// d0-d7: Optional arguments.
+//
+// Also, the argument layout is described inline in the instructions:
+// - arg_count: The number of arguments.
+// - arg_pattern: A set of PrintfArgPattern values, packed into two-bit fields.
+//
+// Floating-point and integer arguments are passed in separate sets of registers
+// in AAPCS64 (even for varargs functions), so it is not possible to determine
+// the type of each argument without some information about the values that were
+// passed in. This information could be retrieved from the printf format string,
+// but the format string is not trivial to parse so we encode the relevant
+// information with the HLT instruction.
+const unsigned kPrintfArgCountOffset = 1 * kInstructionSize;
+const unsigned kPrintfArgPatternListOffset = 2 * kInstructionSize;
+const unsigned kPrintfLength = 3 * kInstructionSize;
+
+const unsigned kPrintfMaxArgCount = 4;
+
+// The argument pattern is a set of two-bit-fields, each with one of the
+// following values:
+enum PrintfArgPattern {
+ kPrintfArgW = 1,
+ kPrintfArgX = 2,
+ // There is no kPrintfArgS because floats are always converted to doubles in C
+ // varargs calls.
+ kPrintfArgD = 3
+};
+static const unsigned kPrintfArgPatternBits = 2;
+
+// A pseudo 'debug' instruction.
+const Instr kImmExceptionIsDebug = 0xdeb0;
+// Parameters are inlined in the code after a debug pseudo-instruction:
+// - Debug code.
+// - Debug parameters.
+// - Debug message string. This is a NULL-terminated ASCII string, padded to
+// kInstructionSize so that subsequent instructions are correctly aligned.
+// - A kImmExceptionIsUnreachable marker, to catch accidental execution of the
+// string data.
+const unsigned kDebugCodeOffset = 1 * kInstructionSize;
+const unsigned kDebugParamsOffset = 2 * kInstructionSize;
+const unsigned kDebugMessageOffset = 3 * kInstructionSize;
+
+// Debug parameters.
+// Used without a TRACE_ option, the Debugger will print the arguments only
+// once. Otherwise TRACE_ENABLE and TRACE_DISABLE will enable or disable tracing
+// before every instruction for the specified LOG_ parameters.
+//
+// TRACE_OVERRIDE enables the specified LOG_ parameters, and disabled any
+// others that were not specified.
+//
+// For example:
+//
+// __ debug("print registers and fp registers", 0, LOG_REGS | LOG_FP_REGS);
+// will print the registers and fp registers only once.
+//
+// __ debug("trace disasm", 1, TRACE_ENABLE | LOG_DISASM);
+// starts disassembling the code.
+//
+// __ debug("trace rets", 2, TRACE_ENABLE | LOG_REGS);
+// adds the general purpose registers to the trace.
+//
+// __ debug("stop regs", 3, TRACE_DISABLE | LOG_REGS);
+// stops tracing the registers.
+const unsigned kDebuggerTracingDirectivesMask = 3 << 6;
+enum DebugParameters {
+ NO_PARAM = 0,
+ BREAK = 1 << 0,
+ LOG_DISASM = 1 << 1, // Use only with TRACE. Disassemble the code.
+ LOG_REGS = 1 << 2, // Log general purpose registers.
+ LOG_FP_REGS = 1 << 3, // Log floating-point registers.
+ LOG_SYS_REGS = 1 << 4, // Log the status flags.
+ LOG_WRITE = 1 << 5, // Log any memory write.
+
+ LOG_STATE = LOG_REGS | LOG_FP_REGS | LOG_SYS_REGS,
+ LOG_ALL = LOG_DISASM | LOG_STATE | LOG_WRITE,
+
+ // Trace control.
+ TRACE_ENABLE = 1 << 6,
+ TRACE_DISABLE = 2 << 6,
+ TRACE_OVERRIDE = 3 << 6
+};
+
+
+} } // namespace v8::internal
+
+
+#endif // V8_ARM64_INSTRUCTIONS_ARM64_H_
diff --git a/chromium/v8/src/arm64/instrument-arm64.cc b/chromium/v8/src/arm64/instrument-arm64.cc
new file mode 100644
index 00000000000..631556f7241
--- /dev/null
+++ b/chromium/v8/src/arm64/instrument-arm64.cc
@@ -0,0 +1,595 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/arm64/instrument-arm64.h"
+
+namespace v8 {
+namespace internal {
+
+Counter::Counter(const char* name, CounterType type)
+ : count_(0), enabled_(false), type_(type) {
+ ASSERT(name != NULL);
+ strncpy(name_, name, kCounterNameMaxLength);
+}
+
+
+void Counter::Enable() {
+ enabled_ = true;
+}
+
+
+void Counter::Disable() {
+ enabled_ = false;
+}
+
+
+bool Counter::IsEnabled() {
+ return enabled_;
+}
+
+
+void Counter::Increment() {
+ if (enabled_) {
+ count_++;
+ }
+}
+
+
+uint64_t Counter::count() {
+ uint64_t result = count_;
+ if (type_ == Gauge) {
+ // If the counter is a Gauge, reset the count after reading.
+ count_ = 0;
+ }
+ return result;
+}
+
+
+const char* Counter::name() {
+ return name_;
+}
+
+
+CounterType Counter::type() {
+ return type_;
+}
+
+
+typedef struct {
+ const char* name;
+ CounterType type;
+} CounterDescriptor;
+
+
+static const CounterDescriptor kCounterList[] = {
+ {"Instruction", Cumulative},
+
+ {"Move Immediate", Gauge},
+ {"Add/Sub DP", Gauge},
+ {"Logical DP", Gauge},
+ {"Other Int DP", Gauge},
+ {"FP DP", Gauge},
+
+ {"Conditional Select", Gauge},
+ {"Conditional Compare", Gauge},
+
+ {"Unconditional Branch", Gauge},
+ {"Compare and Branch", Gauge},
+ {"Test and Branch", Gauge},
+ {"Conditional Branch", Gauge},
+
+ {"Load Integer", Gauge},
+ {"Load FP", Gauge},
+ {"Load Pair", Gauge},
+ {"Load Literal", Gauge},
+
+ {"Store Integer", Gauge},
+ {"Store FP", Gauge},
+ {"Store Pair", Gauge},
+
+ {"PC Addressing", Gauge},
+ {"Other", Gauge},
+ {"SP Adjust", Gauge},
+};
+
+
+Instrument::Instrument(const char* datafile, uint64_t sample_period)
+ : output_stream_(stderr), sample_period_(sample_period) {
+
+ // Set up the output stream. If datafile is non-NULL, use that file. If it
+ // can't be opened, or datafile is NULL, use stderr.
+ if (datafile != NULL) {
+ output_stream_ = fopen(datafile, "w");
+ if (output_stream_ == NULL) {
+ fprintf(stderr, "Can't open output file %s. Using stderr.\n", datafile);
+ output_stream_ = stderr;
+ }
+ }
+
+ static const int num_counters =
+ sizeof(kCounterList) / sizeof(CounterDescriptor);
+
+ // Dump an instrumentation description comment at the top of the file.
+ fprintf(output_stream_, "# counters=%d\n", num_counters);
+ fprintf(output_stream_, "# sample_period=%" PRIu64 "\n", sample_period_);
+
+ // Construct Counter objects from counter description array.
+ for (int i = 0; i < num_counters; i++) {
+ Counter* counter = new Counter(kCounterList[i].name, kCounterList[i].type);
+ counters_.push_back(counter);
+ }
+
+ DumpCounterNames();
+}
+
+
+Instrument::~Instrument() {
+ // Dump any remaining instruction data to the output file.
+ DumpCounters();
+
+ // Free all the counter objects.
+ std::list<Counter*>::iterator it;
+ for (it = counters_.begin(); it != counters_.end(); it++) {
+ delete *it;
+ }
+
+ if (output_stream_ != stderr) {
+ fclose(output_stream_);
+ }
+}
+
+
+void Instrument::Update() {
+ // Increment the instruction counter, and dump all counters if a sample period
+ // has elapsed.
+ static Counter* counter = GetCounter("Instruction");
+ ASSERT(counter->type() == Cumulative);
+ counter->Increment();
+
+ if (counter->IsEnabled() && (counter->count() % sample_period_) == 0) {
+ DumpCounters();
+ }
+}
+
+
+void Instrument::DumpCounters() {
+ // Iterate through the counter objects, dumping their values to the output
+ // stream.
+ std::list<Counter*>::const_iterator it;
+ for (it = counters_.begin(); it != counters_.end(); it++) {
+ fprintf(output_stream_, "%" PRIu64 ",", (*it)->count());
+ }
+ fprintf(output_stream_, "\n");
+ fflush(output_stream_);
+}
+
+
+void Instrument::DumpCounterNames() {
+ // Iterate through the counter objects, dumping the counter names to the
+ // output stream.
+ std::list<Counter*>::const_iterator it;
+ for (it = counters_.begin(); it != counters_.end(); it++) {
+ fprintf(output_stream_, "%s,", (*it)->name());
+ }
+ fprintf(output_stream_, "\n");
+ fflush(output_stream_);
+}
+
+
+void Instrument::HandleInstrumentationEvent(unsigned event) {
+ switch (event) {
+ case InstrumentStateEnable: Enable(); break;
+ case InstrumentStateDisable: Disable(); break;
+ default: DumpEventMarker(event);
+ }
+}
+
+
+void Instrument::DumpEventMarker(unsigned marker) {
+ // Dumpan event marker to the output stream as a specially formatted comment
+ // line.
+ static Counter* counter = GetCounter("Instruction");
+
+ fprintf(output_stream_, "# %c%c @ %" PRId64 "\n", marker & 0xff,
+ (marker >> 8) & 0xff, counter->count());
+}
+
+
+Counter* Instrument::GetCounter(const char* name) {
+ // Get a Counter object by name from the counter list.
+ std::list<Counter*>::const_iterator it;
+ for (it = counters_.begin(); it != counters_.end(); it++) {
+ if (strcmp((*it)->name(), name) == 0) {
+ return *it;
+ }
+ }
+
+ // A Counter by that name does not exist: print an error message to stderr
+ // and the output file, and exit.
+ static const char* error_message =
+ "# Error: Unknown counter \"%s\". Exiting.\n";
+ fprintf(stderr, error_message, name);
+ fprintf(output_stream_, error_message, name);
+ exit(1);
+}
+
+
+void Instrument::Enable() {
+ std::list<Counter*>::iterator it;
+ for (it = counters_.begin(); it != counters_.end(); it++) {
+ (*it)->Enable();
+ }
+}
+
+
+void Instrument::Disable() {
+ std::list<Counter*>::iterator it;
+ for (it = counters_.begin(); it != counters_.end(); it++) {
+ (*it)->Disable();
+ }
+}
+
+
+void Instrument::VisitPCRelAddressing(Instruction* instr) {
+ Update();
+ static Counter* counter = GetCounter("PC Addressing");
+ counter->Increment();
+}
+
+
+void Instrument::VisitAddSubImmediate(Instruction* instr) {
+ Update();
+ static Counter* sp_counter = GetCounter("SP Adjust");
+ static Counter* add_sub_counter = GetCounter("Add/Sub DP");
+ if (((instr->Mask(AddSubOpMask) == SUB) ||
+ (instr->Mask(AddSubOpMask) == ADD)) &&
+ (instr->Rd() == 31) && (instr->Rn() == 31)) {
+ // Count adjustments to the C stack pointer caused by V8 needing two SPs.
+ sp_counter->Increment();
+ } else {
+ add_sub_counter->Increment();
+ }
+}
+
+
+void Instrument::VisitLogicalImmediate(Instruction* instr) {
+ Update();
+ static Counter* counter = GetCounter("Logical DP");
+ counter->Increment();
+}
+
+
+void Instrument::VisitMoveWideImmediate(Instruction* instr) {
+ Update();
+ static Counter* counter = GetCounter("Move Immediate");
+
+ if (instr->IsMovn() && (instr->Rd() == kZeroRegCode)) {
+ unsigned imm = instr->ImmMoveWide();
+ HandleInstrumentationEvent(imm);
+ } else {
+ counter->Increment();
+ }
+}
+
+
+void Instrument::VisitBitfield(Instruction* instr) {
+ Update();
+ static Counter* counter = GetCounter("Other Int DP");
+ counter->Increment();
+}
+
+
+void Instrument::VisitExtract(Instruction* instr) {
+ Update();
+ static Counter* counter = GetCounter("Other Int DP");
+ counter->Increment();
+}
+
+
+void Instrument::VisitUnconditionalBranch(Instruction* instr) {
+ Update();
+ static Counter* counter = GetCounter("Unconditional Branch");
+ counter->Increment();
+}
+
+
+void Instrument::VisitUnconditionalBranchToRegister(Instruction* instr) {
+ Update();
+ static Counter* counter = GetCounter("Unconditional Branch");
+ counter->Increment();
+}
+
+
+void Instrument::VisitCompareBranch(Instruction* instr) {
+ Update();
+ static Counter* counter = GetCounter("Compare and Branch");
+ counter->Increment();
+}
+
+
+void Instrument::VisitTestBranch(Instruction* instr) {
+ Update();
+ static Counter* counter = GetCounter("Test and Branch");
+ counter->Increment();
+}
+
+
+void Instrument::VisitConditionalBranch(Instruction* instr) {
+ Update();
+ static Counter* counter = GetCounter("Conditional Branch");
+ counter->Increment();
+}
+
+
+void Instrument::VisitSystem(Instruction* instr) {
+ Update();
+ static Counter* counter = GetCounter("Other");
+ counter->Increment();
+}
+
+
+void Instrument::VisitException(Instruction* instr) {
+ Update();
+ static Counter* counter = GetCounter("Other");
+ counter->Increment();
+}
+
+
+void Instrument::InstrumentLoadStorePair(Instruction* instr) {
+ static Counter* load_pair_counter = GetCounter("Load Pair");
+ static Counter* store_pair_counter = GetCounter("Store Pair");
+ if (instr->Mask(LoadStorePairLBit) != 0) {
+ load_pair_counter->Increment();
+ } else {
+ store_pair_counter->Increment();
+ }
+}
+
+
+void Instrument::VisitLoadStorePairPostIndex(Instruction* instr) {
+ Update();
+ InstrumentLoadStorePair(instr);
+}
+
+
+void Instrument::VisitLoadStorePairOffset(Instruction* instr) {
+ Update();
+ InstrumentLoadStorePair(instr);
+}
+
+
+void Instrument::VisitLoadStorePairPreIndex(Instruction* instr) {
+ Update();
+ InstrumentLoadStorePair(instr);
+}
+
+
+void Instrument::VisitLoadStorePairNonTemporal(Instruction* instr) {
+ Update();
+ InstrumentLoadStorePair(instr);
+}
+
+
+void Instrument::VisitLoadLiteral(Instruction* instr) {
+ Update();
+ static Counter* counter = GetCounter("Load Literal");
+ counter->Increment();
+}
+
+
+void Instrument::InstrumentLoadStore(Instruction* instr) {
+ static Counter* load_int_counter = GetCounter("Load Integer");
+ static Counter* store_int_counter = GetCounter("Store Integer");
+ static Counter* load_fp_counter = GetCounter("Load FP");
+ static Counter* store_fp_counter = GetCounter("Store FP");
+
+ switch (instr->Mask(LoadStoreOpMask)) {
+ case STRB_w: // Fall through.
+ case STRH_w: // Fall through.
+ case STR_w: // Fall through.
+ case STR_x: store_int_counter->Increment(); break;
+ case STR_s: // Fall through.
+ case STR_d: store_fp_counter->Increment(); break;
+ case LDRB_w: // Fall through.
+ case LDRH_w: // Fall through.
+ case LDR_w: // Fall through.
+ case LDR_x: // Fall through.
+ case LDRSB_x: // Fall through.
+ case LDRSH_x: // Fall through.
+ case LDRSW_x: // Fall through.
+ case LDRSB_w: // Fall through.
+ case LDRSH_w: load_int_counter->Increment(); break;
+ case LDR_s: // Fall through.
+ case LDR_d: load_fp_counter->Increment(); break;
+ default: UNREACHABLE();
+ }
+}
+
+
+void Instrument::VisitLoadStoreUnscaledOffset(Instruction* instr) {
+ Update();
+ InstrumentLoadStore(instr);
+}
+
+
+void Instrument::VisitLoadStorePostIndex(Instruction* instr) {
+ Update();
+ InstrumentLoadStore(instr);
+}
+
+
+void Instrument::VisitLoadStorePreIndex(Instruction* instr) {
+ Update();
+ InstrumentLoadStore(instr);
+}
+
+
+void Instrument::VisitLoadStoreRegisterOffset(Instruction* instr) {
+ Update();
+ InstrumentLoadStore(instr);
+}
+
+
+void Instrument::VisitLoadStoreUnsignedOffset(Instruction* instr) {
+ Update();
+ InstrumentLoadStore(instr);
+}
+
+
+void Instrument::VisitLogicalShifted(Instruction* instr) {
+ Update();
+ static Counter* counter = GetCounter("Logical DP");
+ counter->Increment();
+}
+
+
+void Instrument::VisitAddSubShifted(Instruction* instr) {
+ Update();
+ static Counter* counter = GetCounter("Add/Sub DP");
+ counter->Increment();
+}
+
+
+void Instrument::VisitAddSubExtended(Instruction* instr) {
+ Update();
+ static Counter* sp_counter = GetCounter("SP Adjust");
+ static Counter* add_sub_counter = GetCounter("Add/Sub DP");
+ if (((instr->Mask(AddSubOpMask) == SUB) ||
+ (instr->Mask(AddSubOpMask) == ADD)) &&
+ (instr->Rd() == 31) && (instr->Rn() == 31)) {
+ // Count adjustments to the C stack pointer caused by V8 needing two SPs.
+ sp_counter->Increment();
+ } else {
+ add_sub_counter->Increment();
+ }
+}
+
+
+void Instrument::VisitAddSubWithCarry(Instruction* instr) {
+ Update();
+ static Counter* counter = GetCounter("Add/Sub DP");
+ counter->Increment();
+}
+
+
+void Instrument::VisitConditionalCompareRegister(Instruction* instr) {
+ Update();
+ static Counter* counter = GetCounter("Conditional Compare");
+ counter->Increment();
+}
+
+
+void Instrument::VisitConditionalCompareImmediate(Instruction* instr) {
+ Update();
+ static Counter* counter = GetCounter("Conditional Compare");
+ counter->Increment();
+}
+
+
+void Instrument::VisitConditionalSelect(Instruction* instr) {
+ Update();
+ static Counter* counter = GetCounter("Conditional Select");
+ counter->Increment();
+}
+
+
+void Instrument::VisitDataProcessing1Source(Instruction* instr) {
+ Update();
+ static Counter* counter = GetCounter("Other Int DP");
+ counter->Increment();
+}
+
+
+void Instrument::VisitDataProcessing2Source(Instruction* instr) {
+ Update();
+ static Counter* counter = GetCounter("Other Int DP");
+ counter->Increment();
+}
+
+
+void Instrument::VisitDataProcessing3Source(Instruction* instr) {
+ Update();
+ static Counter* counter = GetCounter("Other Int DP");
+ counter->Increment();
+}
+
+
+void Instrument::VisitFPCompare(Instruction* instr) {
+ Update();
+ static Counter* counter = GetCounter("FP DP");
+ counter->Increment();
+}
+
+
+void Instrument::VisitFPConditionalCompare(Instruction* instr) {
+ Update();
+ static Counter* counter = GetCounter("Conditional Compare");
+ counter->Increment();
+}
+
+
+void Instrument::VisitFPConditionalSelect(Instruction* instr) {
+ Update();
+ static Counter* counter = GetCounter("Conditional Select");
+ counter->Increment();
+}
+
+
+void Instrument::VisitFPImmediate(Instruction* instr) {
+ Update();
+ static Counter* counter = GetCounter("FP DP");
+ counter->Increment();
+}
+
+
+void Instrument::VisitFPDataProcessing1Source(Instruction* instr) {
+ Update();
+ static Counter* counter = GetCounter("FP DP");
+ counter->Increment();
+}
+
+
+void Instrument::VisitFPDataProcessing2Source(Instruction* instr) {
+ Update();
+ static Counter* counter = GetCounter("FP DP");
+ counter->Increment();
+}
+
+
+void Instrument::VisitFPDataProcessing3Source(Instruction* instr) {
+ Update();
+ static Counter* counter = GetCounter("FP DP");
+ counter->Increment();
+}
+
+
+void Instrument::VisitFPIntegerConvert(Instruction* instr) {
+ Update();
+ static Counter* counter = GetCounter("FP DP");
+ counter->Increment();
+}
+
+
+void Instrument::VisitFPFixedPointConvert(Instruction* instr) {
+ Update();
+ static Counter* counter = GetCounter("FP DP");
+ counter->Increment();
+}
+
+
+void Instrument::VisitUnallocated(Instruction* instr) {
+ Update();
+ static Counter* counter = GetCounter("Other");
+ counter->Increment();
+}
+
+
+void Instrument::VisitUnimplemented(Instruction* instr) {
+ Update();
+ static Counter* counter = GetCounter("Other");
+ counter->Increment();
+}
+
+
+} } // namespace v8::internal
diff --git a/chromium/v8/src/arm64/instrument-arm64.h b/chromium/v8/src/arm64/instrument-arm64.h
new file mode 100644
index 00000000000..74583108542
--- /dev/null
+++ b/chromium/v8/src/arm64/instrument-arm64.h
@@ -0,0 +1,84 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_ARM64_INSTRUMENT_ARM64_H_
+#define V8_ARM64_INSTRUMENT_ARM64_H_
+
+#include "src/globals.h"
+#include "src/utils.h"
+#include "src/arm64/decoder-arm64.h"
+#include "src/arm64/constants-arm64.h"
+
+namespace v8 {
+namespace internal {
+
+const int kCounterNameMaxLength = 256;
+const uint64_t kDefaultInstrumentationSamplingPeriod = 1 << 22;
+
+
+enum InstrumentState {
+ InstrumentStateDisable = 0,
+ InstrumentStateEnable = 1
+};
+
+
+enum CounterType {
+ Gauge = 0, // Gauge counters reset themselves after reading.
+ Cumulative = 1 // Cumulative counters keep their value after reading.
+};
+
+
+class Counter {
+ public:
+ Counter(const char* name, CounterType type = Gauge);
+
+ void Increment();
+ void Enable();
+ void Disable();
+ bool IsEnabled();
+ uint64_t count();
+ const char* name();
+ CounterType type();
+
+ private:
+ char name_[kCounterNameMaxLength];
+ uint64_t count_;
+ bool enabled_;
+ CounterType type_;
+};
+
+
+class Instrument: public DecoderVisitor {
+ public:
+ explicit Instrument(const char* datafile = NULL,
+ uint64_t sample_period = kDefaultInstrumentationSamplingPeriod);
+ ~Instrument();
+
+ // Declare all Visitor functions.
+ #define DECLARE(A) void Visit##A(Instruction* instr);
+ VISITOR_LIST(DECLARE)
+ #undef DECLARE
+
+ private:
+ void Update();
+ void Enable();
+ void Disable();
+ void DumpCounters();
+ void DumpCounterNames();
+ void DumpEventMarker(unsigned marker);
+ void HandleInstrumentationEvent(unsigned event);
+ Counter* GetCounter(const char* name);
+
+ void InstrumentLoadStore(Instruction* instr);
+ void InstrumentLoadStorePair(Instruction* instr);
+
+ std::list<Counter*> counters_;
+
+ FILE *output_stream_;
+ uint64_t sample_period_;
+};
+
+} } // namespace v8::internal
+
+#endif // V8_ARM64_INSTRUMENT_ARM64_H_
diff --git a/chromium/v8/src/arm64/lithium-arm64.cc b/chromium/v8/src/arm64/lithium-arm64.cc
new file mode 100644
index 00000000000..8446edfae79
--- /dev/null
+++ b/chromium/v8/src/arm64/lithium-arm64.cc
@@ -0,0 +1,2725 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#include "src/lithium-allocator-inl.h"
+#include "src/arm64/lithium-arm64.h"
+#include "src/arm64/lithium-codegen-arm64.h"
+#include "src/hydrogen-osr.h"
+
+namespace v8 {
+namespace internal {
+
+
+#define DEFINE_COMPILE(type) \
+ void L##type::CompileToNative(LCodeGen* generator) { \
+ generator->Do##type(this); \
+ }
+LITHIUM_CONCRETE_INSTRUCTION_LIST(DEFINE_COMPILE)
+#undef DEFINE_COMPILE
+
+#ifdef DEBUG
+void LInstruction::VerifyCall() {
+ // Call instructions can use only fixed registers as temporaries and
+ // outputs because all registers are blocked by the calling convention.
+ // Inputs operands must use a fixed register or use-at-start policy or
+ // a non-register policy.
+ ASSERT(Output() == NULL ||
+ LUnallocated::cast(Output())->HasFixedPolicy() ||
+ !LUnallocated::cast(Output())->HasRegisterPolicy());
+ for (UseIterator it(this); !it.Done(); it.Advance()) {
+ LUnallocated* operand = LUnallocated::cast(it.Current());
+ ASSERT(operand->HasFixedPolicy() ||
+ operand->IsUsedAtStart());
+ }
+ for (TempIterator it(this); !it.Done(); it.Advance()) {
+ LUnallocated* operand = LUnallocated::cast(it.Current());
+ ASSERT(operand->HasFixedPolicy() ||!operand->HasRegisterPolicy());
+ }
+}
+#endif
+
+
+void LLabel::PrintDataTo(StringStream* stream) {
+ LGap::PrintDataTo(stream);
+ LLabel* rep = replacement();
+ if (rep != NULL) {
+ stream->Add(" Dead block replaced with B%d", rep->block_id());
+ }
+}
+
+
+void LAccessArgumentsAt::PrintDataTo(StringStream* stream) {
+ arguments()->PrintTo(stream);
+ stream->Add(" length ");
+ length()->PrintTo(stream);
+ stream->Add(" index ");
+ index()->PrintTo(stream);
+}
+
+
+void LBranch::PrintDataTo(StringStream* stream) {
+ stream->Add("B%d | B%d on ", true_block_id(), false_block_id());
+ value()->PrintTo(stream);
+}
+
+
+void LCallJSFunction::PrintDataTo(StringStream* stream) {
+ stream->Add("= ");
+ function()->PrintTo(stream);
+ stream->Add("#%d / ", arity());
+}
+
+
+void LCallWithDescriptor::PrintDataTo(StringStream* stream) {
+ for (int i = 0; i < InputCount(); i++) {
+ InputAt(i)->PrintTo(stream);
+ stream->Add(" ");
+ }
+ stream->Add("#%d / ", arity());
+}
+
+
+void LCallNew::PrintDataTo(StringStream* stream) {
+ stream->Add("= ");
+ constructor()->PrintTo(stream);
+ stream->Add(" #%d / ", arity());
+}
+
+
+void LCallNewArray::PrintDataTo(StringStream* stream) {
+ stream->Add("= ");
+ constructor()->PrintTo(stream);
+ stream->Add(" #%d / ", arity());
+ ElementsKind kind = hydrogen()->elements_kind();
+ stream->Add(" (%s) ", ElementsKindToString(kind));
+}
+
+
+void LClassOfTestAndBranch::PrintDataTo(StringStream* stream) {
+ stream->Add("if class_of_test(");
+ value()->PrintTo(stream);
+ stream->Add(", \"%o\") then B%d else B%d",
+ *hydrogen()->class_name(),
+ true_block_id(),
+ false_block_id());
+}
+
+
+void LCompareNumericAndBranch::PrintDataTo(StringStream* stream) {
+ stream->Add("if ");
+ left()->PrintTo(stream);
+ stream->Add(" %s ", Token::String(op()));
+ right()->PrintTo(stream);
+ stream->Add(" then B%d else B%d", true_block_id(), false_block_id());
+}
+
+
+void LHasCachedArrayIndexAndBranch::PrintDataTo(StringStream* stream) {
+ stream->Add("if has_cached_array_index(");
+ value()->PrintTo(stream);
+ stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
+}
+
+
+bool LGoto::HasInterestingComment(LCodeGen* gen) const {
+ return !gen->IsNextEmittedBlock(block_id());
+}
+
+
+void LGoto::PrintDataTo(StringStream* stream) {
+ stream->Add("B%d", block_id());
+}
+
+
+void LInnerAllocatedObject::PrintDataTo(StringStream* stream) {
+ stream->Add(" = ");
+ base_object()->PrintTo(stream);
+ stream->Add(" + ");
+ offset()->PrintTo(stream);
+}
+
+
+void LInvokeFunction::PrintDataTo(StringStream* stream) {
+ stream->Add("= ");
+ function()->PrintTo(stream);
+ stream->Add(" #%d / ", arity());
+}
+
+
+void LInstruction::PrintTo(StringStream* stream) {
+ stream->Add("%s ", this->Mnemonic());
+
+ PrintOutputOperandTo(stream);
+
+ PrintDataTo(stream);
+
+ if (HasEnvironment()) {
+ stream->Add(" ");
+ environment()->PrintTo(stream);
+ }
+
+ if (HasPointerMap()) {
+ stream->Add(" ");
+ pointer_map()->PrintTo(stream);
+ }
+}
+
+
+void LInstruction::PrintDataTo(StringStream* stream) {
+ stream->Add("= ");
+ for (int i = 0; i < InputCount(); i++) {
+ if (i > 0) stream->Add(" ");
+ if (InputAt(i) == NULL) {
+ stream->Add("NULL");
+ } else {
+ InputAt(i)->PrintTo(stream);
+ }
+ }
+}
+
+
+void LInstruction::PrintOutputOperandTo(StringStream* stream) {
+ if (HasResult()) result()->PrintTo(stream);
+}
+
+
+void LHasInstanceTypeAndBranch::PrintDataTo(StringStream* stream) {
+ stream->Add("if has_instance_type(");
+ value()->PrintTo(stream);
+ stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
+}
+
+
+void LIsObjectAndBranch::PrintDataTo(StringStream* stream) {
+ stream->Add("if is_object(");
+ value()->PrintTo(stream);
+ stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
+}
+
+
+void LIsStringAndBranch::PrintDataTo(StringStream* stream) {
+ stream->Add("if is_string(");
+ value()->PrintTo(stream);
+ stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
+}
+
+
+void LIsSmiAndBranch::PrintDataTo(StringStream* stream) {
+ stream->Add("if is_smi(");
+ value()->PrintTo(stream);
+ stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
+}
+
+
+void LTypeofIsAndBranch::PrintDataTo(StringStream* stream) {
+ stream->Add("if typeof ");
+ value()->PrintTo(stream);
+ stream->Add(" == \"%s\" then B%d else B%d",
+ hydrogen()->type_literal()->ToCString().get(),
+ true_block_id(), false_block_id());
+}
+
+
+void LIsUndetectableAndBranch::PrintDataTo(StringStream* stream) {
+ stream->Add("if is_undetectable(");
+ value()->PrintTo(stream);
+ stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
+}
+
+
+bool LGap::IsRedundant() const {
+ for (int i = 0; i < 4; i++) {
+ if ((parallel_moves_[i] != NULL) && !parallel_moves_[i]->IsRedundant()) {
+ return false;
+ }
+ }
+
+ return true;
+}
+
+
+void LGap::PrintDataTo(StringStream* stream) {
+ for (int i = 0; i < 4; i++) {
+ stream->Add("(");
+ if (parallel_moves_[i] != NULL) {
+ parallel_moves_[i]->PrintDataTo(stream);
+ }
+ stream->Add(") ");
+ }
+}
+
+
+void LLoadContextSlot::PrintDataTo(StringStream* stream) {
+ context()->PrintTo(stream);
+ stream->Add("[%d]", slot_index());
+}
+
+
+void LStoreCodeEntry::PrintDataTo(StringStream* stream) {
+ stream->Add(" = ");
+ function()->PrintTo(stream);
+ stream->Add(".code_entry = ");
+ code_object()->PrintTo(stream);
+}
+
+
+void LStoreContextSlot::PrintDataTo(StringStream* stream) {
+ context()->PrintTo(stream);
+ stream->Add("[%d] <- ", slot_index());
+ value()->PrintTo(stream);
+}
+
+
+void LStoreKeyedGeneric::PrintDataTo(StringStream* stream) {
+ object()->PrintTo(stream);
+ stream->Add("[");
+ key()->PrintTo(stream);
+ stream->Add("] <- ");
+ value()->PrintTo(stream);
+}
+
+
+void LStoreNamedField::PrintDataTo(StringStream* stream) {
+ object()->PrintTo(stream);
+ hydrogen()->access().PrintTo(stream);
+ stream->Add(" <- ");
+ value()->PrintTo(stream);
+}
+
+
+void LStoreNamedGeneric::PrintDataTo(StringStream* stream) {
+ object()->PrintTo(stream);
+ stream->Add(".");
+ stream->Add(String::cast(*name())->ToCString().get());
+ stream->Add(" <- ");
+ value()->PrintTo(stream);
+}
+
+
+void LStringCompareAndBranch::PrintDataTo(StringStream* stream) {
+ stream->Add("if string_compare(");
+ left()->PrintTo(stream);
+ right()->PrintTo(stream);
+ stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
+}
+
+
+void LTransitionElementsKind::PrintDataTo(StringStream* stream) {
+ object()->PrintTo(stream);
+ stream->Add("%p -> %p", *original_map(), *transitioned_map());
+}
+
+
+template<int T>
+void LUnaryMathOperation<T>::PrintDataTo(StringStream* stream) {
+ value()->PrintTo(stream);
+}
+
+
+const char* LArithmeticD::Mnemonic() const {
+ switch (op()) {
+ case Token::ADD: return "add-d";
+ case Token::SUB: return "sub-d";
+ case Token::MUL: return "mul-d";
+ case Token::DIV: return "div-d";
+ case Token::MOD: return "mod-d";
+ default:
+ UNREACHABLE();
+ return NULL;
+ }
+}
+
+
+const char* LArithmeticT::Mnemonic() const {
+ switch (op()) {
+ case Token::ADD: return "add-t";
+ case Token::SUB: return "sub-t";
+ case Token::MUL: return "mul-t";
+ case Token::MOD: return "mod-t";
+ case Token::DIV: return "div-t";
+ case Token::BIT_AND: return "bit-and-t";
+ case Token::BIT_OR: return "bit-or-t";
+ case Token::BIT_XOR: return "bit-xor-t";
+ case Token::ROR: return "ror-t";
+ case Token::SHL: return "shl-t";
+ case Token::SAR: return "sar-t";
+ case Token::SHR: return "shr-t";
+ default:
+ UNREACHABLE();
+ return NULL;
+ }
+}
+
+
+void LChunkBuilder::Abort(BailoutReason reason) {
+ info()->set_bailout_reason(reason);
+ status_ = ABORTED;
+}
+
+
+LUnallocated* LChunkBuilder::ToUnallocated(Register reg) {
+ return new(zone()) LUnallocated(LUnallocated::FIXED_REGISTER,
+ Register::ToAllocationIndex(reg));
+}
+
+
+LUnallocated* LChunkBuilder::ToUnallocated(DoubleRegister reg) {
+ return new(zone()) LUnallocated(LUnallocated::FIXED_DOUBLE_REGISTER,
+ DoubleRegister::ToAllocationIndex(reg));
+}
+
+
+LOperand* LChunkBuilder::Use(HValue* value, LUnallocated* operand) {
+ if (value->EmitAtUses()) {
+ HInstruction* instr = HInstruction::cast(value);
+ VisitInstruction(instr);
+ }
+ operand->set_virtual_register(value->id());
+ return operand;
+}
+
+
+LOperand* LChunkBuilder::UseFixed(HValue* value, Register fixed_register) {
+ return Use(value, ToUnallocated(fixed_register));
+}
+
+
+LOperand* LChunkBuilder::UseFixedDouble(HValue* value,
+ DoubleRegister fixed_register) {
+ return Use(value, ToUnallocated(fixed_register));
+}
+
+
+LOperand* LChunkBuilder::UseRegister(HValue* value) {
+ return Use(value, new(zone()) LUnallocated(LUnallocated::MUST_HAVE_REGISTER));
+}
+
+
+LOperand* LChunkBuilder::UseRegisterAndClobber(HValue* value) {
+ return Use(value, new(zone()) LUnallocated(LUnallocated::WRITABLE_REGISTER));
+}
+
+
+LOperand* LChunkBuilder::UseRegisterAtStart(HValue* value) {
+ return Use(value,
+ new(zone()) LUnallocated(LUnallocated::MUST_HAVE_REGISTER,
+ LUnallocated::USED_AT_START));
+}
+
+
+LOperand* LChunkBuilder::UseRegisterOrConstant(HValue* value) {
+ return value->IsConstant() ? UseConstant(value) : UseRegister(value);
+}
+
+
+LOperand* LChunkBuilder::UseRegisterOrConstantAtStart(HValue* value) {
+ return value->IsConstant() ? UseConstant(value) : UseRegisterAtStart(value);
+}
+
+
+LConstantOperand* LChunkBuilder::UseConstant(HValue* value) {
+ return chunk_->DefineConstantOperand(HConstant::cast(value));
+}
+
+
+LOperand* LChunkBuilder::UseAny(HValue* value) {
+ return value->IsConstant()
+ ? UseConstant(value)
+ : Use(value, new(zone()) LUnallocated(LUnallocated::ANY));
+}
+
+
+LInstruction* LChunkBuilder::Define(LTemplateResultInstruction<1>* instr,
+ LUnallocated* result) {
+ result->set_virtual_register(current_instruction_->id());
+ instr->set_result(result);
+ return instr;
+}
+
+
+LInstruction* LChunkBuilder::DefineAsRegister(
+ LTemplateResultInstruction<1>* instr) {
+ return Define(instr,
+ new(zone()) LUnallocated(LUnallocated::MUST_HAVE_REGISTER));
+}
+
+
+LInstruction* LChunkBuilder::DefineAsSpilled(
+ LTemplateResultInstruction<1>* instr, int index) {
+ return Define(instr,
+ new(zone()) LUnallocated(LUnallocated::FIXED_SLOT, index));
+}
+
+
+LInstruction* LChunkBuilder::DefineSameAsFirst(
+ LTemplateResultInstruction<1>* instr) {
+ return Define(instr,
+ new(zone()) LUnallocated(LUnallocated::SAME_AS_FIRST_INPUT));
+}
+
+
+LInstruction* LChunkBuilder::DefineFixed(
+ LTemplateResultInstruction<1>* instr, Register reg) {
+ return Define(instr, ToUnallocated(reg));
+}
+
+
+LInstruction* LChunkBuilder::DefineFixedDouble(
+ LTemplateResultInstruction<1>* instr, DoubleRegister reg) {
+ return Define(instr, ToUnallocated(reg));
+}
+
+
+LInstruction* LChunkBuilder::MarkAsCall(LInstruction* instr,
+ HInstruction* hinstr,
+ CanDeoptimize can_deoptimize) {
+ info()->MarkAsNonDeferredCalling();
+#ifdef DEBUG
+ instr->VerifyCall();
+#endif
+ instr->MarkAsCall();
+ instr = AssignPointerMap(instr);
+
+ // If instruction does not have side-effects lazy deoptimization
+ // after the call will try to deoptimize to the point before the call.
+ // Thus we still need to attach environment to this call even if
+ // call sequence can not deoptimize eagerly.
+ bool needs_environment =
+ (can_deoptimize == CAN_DEOPTIMIZE_EAGERLY) ||
+ !hinstr->HasObservableSideEffects();
+ if (needs_environment && !instr->HasEnvironment()) {
+ instr = AssignEnvironment(instr);
+ // We can't really figure out if the environment is needed or not.
+ instr->environment()->set_has_been_used();
+ }
+
+ return instr;
+}
+
+
+LInstruction* LChunkBuilder::AssignPointerMap(LInstruction* instr) {
+ ASSERT(!instr->HasPointerMap());
+ instr->set_pointer_map(new(zone()) LPointerMap(zone()));
+ return instr;
+}
+
+
+LUnallocated* LChunkBuilder::TempRegister() {
+ LUnallocated* operand =
+ new(zone()) LUnallocated(LUnallocated::MUST_HAVE_REGISTER);
+ int vreg = allocator_->GetVirtualRegister();
+ if (!allocator_->AllocationOk()) {
+ Abort(kOutOfVirtualRegistersWhileTryingToAllocateTempRegister);
+ vreg = 0;
+ }
+ operand->set_virtual_register(vreg);
+ return operand;
+}
+
+
+LUnallocated* LChunkBuilder::TempDoubleRegister() {
+ LUnallocated* operand =
+ new(zone()) LUnallocated(LUnallocated::MUST_HAVE_DOUBLE_REGISTER);
+ int vreg = allocator_->GetVirtualRegister();
+ if (!allocator_->AllocationOk()) {
+ Abort(kOutOfVirtualRegistersWhileTryingToAllocateTempRegister);
+ vreg = 0;
+ }
+ operand->set_virtual_register(vreg);
+ return operand;
+}
+
+
+int LPlatformChunk::GetNextSpillIndex() {
+ return spill_slot_count_++;
+}
+
+
+LOperand* LPlatformChunk::GetNextSpillSlot(RegisterKind kind) {
+ int index = GetNextSpillIndex();
+ if (kind == DOUBLE_REGISTERS) {
+ return LDoubleStackSlot::Create(index, zone());
+ } else {
+ ASSERT(kind == GENERAL_REGISTERS);
+ return LStackSlot::Create(index, zone());
+ }
+}
+
+
+LOperand* LChunkBuilder::FixedTemp(DoubleRegister reg) {
+ LUnallocated* operand = ToUnallocated(reg);
+ ASSERT(operand->HasFixedPolicy());
+ return operand;
+}
+
+
+LPlatformChunk* LChunkBuilder::Build() {
+ ASSERT(is_unused());
+ chunk_ = new(zone()) LPlatformChunk(info_, graph_);
+ LPhase phase("L_Building chunk", chunk_);
+ status_ = BUILDING;
+
+ // If compiling for OSR, reserve space for the unoptimized frame,
+ // which will be subsumed into this frame.
+ if (graph()->has_osr()) {
+ // TODO(all): GetNextSpillIndex just increments a field. It has no other
+ // side effects, so we should get rid of this loop.
+ for (int i = graph()->osr()->UnoptimizedFrameSlots(); i > 0; i--) {
+ chunk_->GetNextSpillIndex();
+ }
+ }
+
+ const ZoneList<HBasicBlock*>* blocks = graph_->blocks();
+ for (int i = 0; i < blocks->length(); i++) {
+ DoBasicBlock(blocks->at(i));
+ if (is_aborted()) return NULL;
+ }
+ status_ = DONE;
+ return chunk_;
+}
+
+
+void LChunkBuilder::DoBasicBlock(HBasicBlock* block) {
+ ASSERT(is_building());
+ current_block_ = block;
+
+ if (block->IsStartBlock()) {
+ block->UpdateEnvironment(graph_->start_environment());
+ argument_count_ = 0;
+ } else if (block->predecessors()->length() == 1) {
+ // We have a single predecessor => copy environment and outgoing
+ // argument count from the predecessor.
+ ASSERT(block->phis()->length() == 0);
+ HBasicBlock* pred = block->predecessors()->at(0);
+ HEnvironment* last_environment = pred->last_environment();
+ ASSERT(last_environment != NULL);
+
+ // Only copy the environment, if it is later used again.
+ if (pred->end()->SecondSuccessor() == NULL) {
+ ASSERT(pred->end()->FirstSuccessor() == block);
+ } else {
+ if ((pred->end()->FirstSuccessor()->block_id() > block->block_id()) ||
+ (pred->end()->SecondSuccessor()->block_id() > block->block_id())) {
+ last_environment = last_environment->Copy();
+ }
+ }
+ block->UpdateEnvironment(last_environment);
+ ASSERT(pred->argument_count() >= 0);
+ argument_count_ = pred->argument_count();
+ } else {
+ // We are at a state join => process phis.
+ HBasicBlock* pred = block->predecessors()->at(0);
+ // No need to copy the environment, it cannot be used later.
+ HEnvironment* last_environment = pred->last_environment();
+ for (int i = 0; i < block->phis()->length(); ++i) {
+ HPhi* phi = block->phis()->at(i);
+ if (phi->HasMergedIndex()) {
+ last_environment->SetValueAt(phi->merged_index(), phi);
+ }
+ }
+ for (int i = 0; i < block->deleted_phis()->length(); ++i) {
+ if (block->deleted_phis()->at(i) < last_environment->length()) {
+ last_environment->SetValueAt(block->deleted_phis()->at(i),
+ graph_->GetConstantUndefined());
+ }
+ }
+ block->UpdateEnvironment(last_environment);
+ // Pick up the outgoing argument count of one of the predecessors.
+ argument_count_ = pred->argument_count();
+ }
+
+ // Translate hydrogen instructions to lithium ones for the current block.
+ HInstruction* current = block->first();
+ int start = chunk_->instructions()->length();
+ while ((current != NULL) && !is_aborted()) {
+ // Code for constants in registers is generated lazily.
+ if (!current->EmitAtUses()) {
+ VisitInstruction(current);
+ }
+ current = current->next();
+ }
+ int end = chunk_->instructions()->length() - 1;
+ if (end >= start) {
+ block->set_first_instruction_index(start);
+ block->set_last_instruction_index(end);
+ }
+ block->set_argument_count(argument_count_);
+ current_block_ = NULL;
+}
+
+
+void LChunkBuilder::VisitInstruction(HInstruction* current) {
+ HInstruction* old_current = current_instruction_;
+ current_instruction_ = current;
+
+ LInstruction* instr = NULL;
+ if (current->CanReplaceWithDummyUses()) {
+ if (current->OperandCount() == 0) {
+ instr = DefineAsRegister(new(zone()) LDummy());
+ } else {
+ ASSERT(!current->OperandAt(0)->IsControlInstruction());
+ instr = DefineAsRegister(new(zone())
+ LDummyUse(UseAny(current->OperandAt(0))));
+ }
+ for (int i = 1; i < current->OperandCount(); ++i) {
+ if (current->OperandAt(i)->IsControlInstruction()) continue;
+ LInstruction* dummy =
+ new(zone()) LDummyUse(UseAny(current->OperandAt(i)));
+ dummy->set_hydrogen_value(current);
+ chunk_->AddInstruction(dummy, current_block_);
+ }
+ } else {
+ HBasicBlock* successor;
+ if (current->IsControlInstruction() &&
+ HControlInstruction::cast(current)->KnownSuccessorBlock(&successor) &&
+ successor != NULL) {
+ instr = new(zone()) LGoto(successor);
+ } else {
+ instr = current->CompileToLithium(this);
+ }
+ }
+
+ argument_count_ += current->argument_delta();
+ ASSERT(argument_count_ >= 0);
+
+ if (instr != NULL) {
+ AddInstruction(instr, current);
+ }
+
+ current_instruction_ = old_current;
+}
+
+
+void LChunkBuilder::AddInstruction(LInstruction* instr,
+ HInstruction* hydrogen_val) {
+ // Associate the hydrogen instruction first, since we may need it for
+ // the ClobbersRegisters() or ClobbersDoubleRegisters() calls below.
+ instr->set_hydrogen_value(hydrogen_val);
+
+#if DEBUG
+ // Make sure that the lithium instruction has either no fixed register
+ // constraints in temps or the result OR no uses that are only used at
+ // start. If this invariant doesn't hold, the register allocator can decide
+ // to insert a split of a range immediately before the instruction due to an
+ // already allocated register needing to be used for the instruction's fixed
+ // register constraint. In this case, the register allocator won't see an
+ // interference between the split child and the use-at-start (it would if
+ // the it was just a plain use), so it is free to move the split child into
+ // the same register that is used for the use-at-start.
+ // See https://code.google.com/p/chromium/issues/detail?id=201590
+ if (!(instr->ClobbersRegisters() &&
+ instr->ClobbersDoubleRegisters(isolate()))) {
+ int fixed = 0;
+ int used_at_start = 0;
+ for (UseIterator it(instr); !it.Done(); it.Advance()) {
+ LUnallocated* operand = LUnallocated::cast(it.Current());
+ if (operand->IsUsedAtStart()) ++used_at_start;
+ }
+ if (instr->Output() != NULL) {
+ if (LUnallocated::cast(instr->Output())->HasFixedPolicy()) ++fixed;
+ }
+ for (TempIterator it(instr); !it.Done(); it.Advance()) {
+ LUnallocated* operand = LUnallocated::cast(it.Current());
+ if (operand->HasFixedPolicy()) ++fixed;
+ }
+ ASSERT(fixed == 0 || used_at_start == 0);
+ }
+#endif
+
+ if (FLAG_stress_pointer_maps && !instr->HasPointerMap()) {
+ instr = AssignPointerMap(instr);
+ }
+ if (FLAG_stress_environments && !instr->HasEnvironment()) {
+ instr = AssignEnvironment(instr);
+ }
+ chunk_->AddInstruction(instr, current_block_);
+
+ if (instr->IsCall()) {
+ HValue* hydrogen_value_for_lazy_bailout = hydrogen_val;
+ LInstruction* instruction_needing_environment = NULL;
+ if (hydrogen_val->HasObservableSideEffects()) {
+ HSimulate* sim = HSimulate::cast(hydrogen_val->next());
+ instruction_needing_environment = instr;
+ sim->ReplayEnvironment(current_block_->last_environment());
+ hydrogen_value_for_lazy_bailout = sim;
+ }
+ LInstruction* bailout = AssignEnvironment(new(zone()) LLazyBailout());
+ bailout->set_hydrogen_value(hydrogen_value_for_lazy_bailout);
+ chunk_->AddInstruction(bailout, current_block_);
+ if (instruction_needing_environment != NULL) {
+ // Store the lazy deopt environment with the instruction if needed.
+ // Right now it is only used for LInstanceOfKnownGlobal.
+ instruction_needing_environment->
+ SetDeferredLazyDeoptimizationEnvironment(bailout->environment());
+ }
+ }
+}
+
+
+LInstruction* LChunkBuilder::AssignEnvironment(LInstruction* instr) {
+ HEnvironment* hydrogen_env = current_block_->last_environment();
+ int argument_index_accumulator = 0;
+ ZoneList<HValue*> objects_to_materialize(0, zone());
+ instr->set_environment(CreateEnvironment(hydrogen_env,
+ &argument_index_accumulator,
+ &objects_to_materialize));
+ return instr;
+}
+
+
+LInstruction* LChunkBuilder::DoAbnormalExit(HAbnormalExit* instr) {
+ // The control instruction marking the end of a block that completed
+ // abruptly (e.g., threw an exception). There is nothing specific to do.
+ return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoArithmeticD(Token::Value op,
+ HArithmeticBinaryOperation* instr) {
+ ASSERT(instr->representation().IsDouble());
+ ASSERT(instr->left()->representation().IsDouble());
+ ASSERT(instr->right()->representation().IsDouble());
+
+ if (op == Token::MOD) {
+ LOperand* left = UseFixedDouble(instr->left(), d0);
+ LOperand* right = UseFixedDouble(instr->right(), d1);
+ LArithmeticD* result = new(zone()) LArithmeticD(Token::MOD, left, right);
+ return MarkAsCall(DefineFixedDouble(result, d0), instr);
+ } else {
+ LOperand* left = UseRegisterAtStart(instr->left());
+ LOperand* right = UseRegisterAtStart(instr->right());
+ LArithmeticD* result = new(zone()) LArithmeticD(op, left, right);
+ return DefineAsRegister(result);
+ }
+}
+
+
+LInstruction* LChunkBuilder::DoArithmeticT(Token::Value op,
+ HBinaryOperation* instr) {
+ ASSERT((op == Token::ADD) || (op == Token::SUB) || (op == Token::MUL) ||
+ (op == Token::DIV) || (op == Token::MOD) || (op == Token::SHR) ||
+ (op == Token::SHL) || (op == Token::SAR) || (op == Token::ROR) ||
+ (op == Token::BIT_OR) || (op == Token::BIT_AND) ||
+ (op == Token::BIT_XOR));
+ HValue* left = instr->left();
+ HValue* right = instr->right();
+
+ // TODO(jbramley): Once we've implemented smi support for all arithmetic
+ // operations, these assertions should check IsTagged().
+ ASSERT(instr->representation().IsSmiOrTagged());
+ ASSERT(left->representation().IsSmiOrTagged());
+ ASSERT(right->representation().IsSmiOrTagged());
+
+ LOperand* context = UseFixed(instr->context(), cp);
+ LOperand* left_operand = UseFixed(left, x1);
+ LOperand* right_operand = UseFixed(right, x0);
+ LArithmeticT* result =
+ new(zone()) LArithmeticT(op, context, left_operand, right_operand);
+ return MarkAsCall(DefineFixed(result, x0), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoBoundsCheckBaseIndexInformation(
+ HBoundsCheckBaseIndexInformation* instr) {
+ UNREACHABLE();
+ return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoAccessArgumentsAt(HAccessArgumentsAt* instr) {
+ info()->MarkAsRequiresFrame();
+ LOperand* args = NULL;
+ LOperand* length = NULL;
+ LOperand* index = NULL;
+
+ if (instr->length()->IsConstant() && instr->index()->IsConstant()) {
+ args = UseRegisterAtStart(instr->arguments());
+ length = UseConstant(instr->length());
+ index = UseConstant(instr->index());
+ } else {
+ args = UseRegister(instr->arguments());
+ length = UseRegisterAtStart(instr->length());
+ index = UseRegisterOrConstantAtStart(instr->index());
+ }
+
+ return DefineAsRegister(new(zone()) LAccessArgumentsAt(args, length, index));
+}
+
+
+LInstruction* LChunkBuilder::DoAdd(HAdd* instr) {
+ if (instr->representation().IsSmiOrInteger32()) {
+ ASSERT(instr->left()->representation().Equals(instr->representation()));
+ ASSERT(instr->right()->representation().Equals(instr->representation()));
+
+ LInstruction* shifted_operation = TryDoOpWithShiftedRightOperand(instr);
+ if (shifted_operation != NULL) {
+ return shifted_operation;
+ }
+
+ LOperand* left = UseRegisterAtStart(instr->BetterLeftOperand());
+ LOperand* right =
+ UseRegisterOrConstantAtStart(instr->BetterRightOperand());
+ LInstruction* result = instr->representation().IsSmi() ?
+ DefineAsRegister(new(zone()) LAddS(left, right)) :
+ DefineAsRegister(new(zone()) LAddI(left, right));
+ if (instr->CheckFlag(HValue::kCanOverflow)) {
+ result = AssignEnvironment(result);
+ }
+ return result;
+ } else if (instr->representation().IsExternal()) {
+ ASSERT(instr->left()->representation().IsExternal());
+ ASSERT(instr->right()->representation().IsInteger32());
+ ASSERT(!instr->CheckFlag(HValue::kCanOverflow));
+ LOperand* left = UseRegisterAtStart(instr->left());
+ LOperand* right = UseRegisterOrConstantAtStart(instr->right());
+ return DefineAsRegister(new(zone()) LAddE(left, right));
+ } else if (instr->representation().IsDouble()) {
+ return DoArithmeticD(Token::ADD, instr);
+ } else {
+ ASSERT(instr->representation().IsTagged());
+ return DoArithmeticT(Token::ADD, instr);
+ }
+}
+
+
+LInstruction* LChunkBuilder::DoAllocate(HAllocate* instr) {
+ info()->MarkAsDeferredCalling();
+ LOperand* context = UseAny(instr->context());
+ LOperand* size = UseRegisterOrConstant(instr->size());
+ LOperand* temp1 = TempRegister();
+ LOperand* temp2 = TempRegister();
+ LOperand* temp3 = instr->MustPrefillWithFiller() ? TempRegister() : NULL;
+ LAllocate* result = new(zone()) LAllocate(context, size, temp1, temp2, temp3);
+ return AssignPointerMap(DefineAsRegister(result));
+}
+
+
+LInstruction* LChunkBuilder::DoApplyArguments(HApplyArguments* instr) {
+ LOperand* function = UseFixed(instr->function(), x1);
+ LOperand* receiver = UseFixed(instr->receiver(), x0);
+ LOperand* length = UseFixed(instr->length(), x2);
+ LOperand* elements = UseFixed(instr->elements(), x3);
+ LApplyArguments* result = new(zone()) LApplyArguments(function,
+ receiver,
+ length,
+ elements);
+ return MarkAsCall(DefineFixed(result, x0), instr, CAN_DEOPTIMIZE_EAGERLY);
+}
+
+
+LInstruction* LChunkBuilder::DoArgumentsElements(HArgumentsElements* instr) {
+ info()->MarkAsRequiresFrame();
+ LOperand* temp = instr->from_inlined() ? NULL : TempRegister();
+ return DefineAsRegister(new(zone()) LArgumentsElements(temp));
+}
+
+
+LInstruction* LChunkBuilder::DoArgumentsLength(HArgumentsLength* instr) {
+ info()->MarkAsRequiresFrame();
+ LOperand* value = UseRegisterAtStart(instr->value());
+ return DefineAsRegister(new(zone()) LArgumentsLength(value));
+}
+
+
+LInstruction* LChunkBuilder::DoArgumentsObject(HArgumentsObject* instr) {
+ // There are no real uses of the arguments object.
+ // arguments.length and element access are supported directly on
+ // stack arguments, and any real arguments object use causes a bailout.
+ // So this value is never used.
+ return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoBitwise(HBitwise* instr) {
+ if (instr->representation().IsSmiOrInteger32()) {
+ ASSERT(instr->left()->representation().Equals(instr->representation()));
+ ASSERT(instr->right()->representation().Equals(instr->representation()));
+ ASSERT(instr->CheckFlag(HValue::kTruncatingToInt32));
+
+ LInstruction* shifted_operation = TryDoOpWithShiftedRightOperand(instr);
+ if (shifted_operation != NULL) {
+ return shifted_operation;
+ }
+
+ LOperand* left = UseRegisterAtStart(instr->BetterLeftOperand());
+ LOperand* right =
+ UseRegisterOrConstantAtStart(instr->BetterRightOperand());
+ return instr->representation().IsSmi() ?
+ DefineAsRegister(new(zone()) LBitS(left, right)) :
+ DefineAsRegister(new(zone()) LBitI(left, right));
+ } else {
+ return DoArithmeticT(instr->op(), instr);
+ }
+}
+
+
+LInstruction* LChunkBuilder::DoBlockEntry(HBlockEntry* instr) {
+ // V8 expects a label to be generated for each basic block.
+ // This is used in some places like LAllocator::IsBlockBoundary
+ // in lithium-allocator.cc
+ return new(zone()) LLabel(instr->block());
+}
+
+
+LInstruction* LChunkBuilder::DoBoundsCheck(HBoundsCheck* instr) {
+ if (!FLAG_debug_code && instr->skip_check()) return NULL;
+ LOperand* index = UseRegisterOrConstantAtStart(instr->index());
+ LOperand* length = !index->IsConstantOperand()
+ ? UseRegisterOrConstantAtStart(instr->length())
+ : UseRegisterAtStart(instr->length());
+ LInstruction* result = new(zone()) LBoundsCheck(index, length);
+ if (!FLAG_debug_code || !instr->skip_check()) {
+ result = AssignEnvironment(result);
+ }
+ return result;
+}
+
+
+LInstruction* LChunkBuilder::DoBranch(HBranch* instr) {
+ HValue* value = instr->value();
+ Representation r = value->representation();
+ HType type = value->type();
+
+ if (r.IsInteger32() || r.IsSmi() || r.IsDouble()) {
+ // These representations have simple checks that cannot deoptimize.
+ return new(zone()) LBranch(UseRegister(value), NULL, NULL);
+ } else {
+ ASSERT(r.IsTagged());
+ if (type.IsBoolean() || type.IsSmi() || type.IsJSArray() ||
+ type.IsHeapNumber()) {
+ // These types have simple checks that cannot deoptimize.
+ return new(zone()) LBranch(UseRegister(value), NULL, NULL);
+ }
+
+ if (type.IsString()) {
+ // This type cannot deoptimize, but needs a scratch register.
+ return new(zone()) LBranch(UseRegister(value), TempRegister(), NULL);
+ }
+
+ ToBooleanStub::Types expected = instr->expected_input_types();
+ bool needs_temps = expected.NeedsMap() || expected.IsEmpty();
+ LOperand* temp1 = needs_temps ? TempRegister() : NULL;
+ LOperand* temp2 = needs_temps ? TempRegister() : NULL;
+
+ if (expected.IsGeneric() || expected.IsEmpty()) {
+ // The generic case cannot deoptimize because it already supports every
+ // possible input type.
+ ASSERT(needs_temps);
+ return new(zone()) LBranch(UseRegister(value), temp1, temp2);
+ } else {
+ return AssignEnvironment(
+ new(zone()) LBranch(UseRegister(value), temp1, temp2));
+ }
+ }
+}
+
+
+LInstruction* LChunkBuilder::DoCallJSFunction(
+ HCallJSFunction* instr) {
+ LOperand* function = UseFixed(instr->function(), x1);
+
+ LCallJSFunction* result = new(zone()) LCallJSFunction(function);
+
+ return MarkAsCall(DefineFixed(result, x0), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoCallWithDescriptor(
+ HCallWithDescriptor* instr) {
+ const CallInterfaceDescriptor* descriptor = instr->descriptor();
+
+ LOperand* target = UseRegisterOrConstantAtStart(instr->target());
+ ZoneList<LOperand*> ops(instr->OperandCount(), zone());
+ ops.Add(target, zone());
+ for (int i = 1; i < instr->OperandCount(); i++) {
+ LOperand* op = UseFixed(instr->OperandAt(i),
+ descriptor->GetParameterRegister(i - 1));
+ ops.Add(op, zone());
+ }
+
+ LCallWithDescriptor* result = new(zone()) LCallWithDescriptor(descriptor,
+ ops,
+ zone());
+ return MarkAsCall(DefineFixed(result, x0), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoCallFunction(HCallFunction* instr) {
+ LOperand* context = UseFixed(instr->context(), cp);
+ LOperand* function = UseFixed(instr->function(), x1);
+ LCallFunction* call = new(zone()) LCallFunction(context, function);
+ return MarkAsCall(DefineFixed(call, x0), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoCallNew(HCallNew* instr) {
+ LOperand* context = UseFixed(instr->context(), cp);
+ // The call to CallConstructStub will expect the constructor to be in x1.
+ LOperand* constructor = UseFixed(instr->constructor(), x1);
+ LCallNew* result = new(zone()) LCallNew(context, constructor);
+ return MarkAsCall(DefineFixed(result, x0), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoCallNewArray(HCallNewArray* instr) {
+ LOperand* context = UseFixed(instr->context(), cp);
+ // The call to ArrayConstructCode will expect the constructor to be in x1.
+ LOperand* constructor = UseFixed(instr->constructor(), x1);
+ LCallNewArray* result = new(zone()) LCallNewArray(context, constructor);
+ return MarkAsCall(DefineFixed(result, x0), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoCallRuntime(HCallRuntime* instr) {
+ LOperand* context = UseFixed(instr->context(), cp);
+ return MarkAsCall(DefineFixed(new(zone()) LCallRuntime(context), x0), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoCallStub(HCallStub* instr) {
+ LOperand* context = UseFixed(instr->context(), cp);
+ return MarkAsCall(DefineFixed(new(zone()) LCallStub(context), x0), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoCapturedObject(HCapturedObject* instr) {
+ instr->ReplayEnvironment(current_block_->last_environment());
+
+ // There are no real uses of a captured object.
+ return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoChange(HChange* instr) {
+ Representation from = instr->from();
+ Representation to = instr->to();
+ HValue* val = instr->value();
+ if (from.IsSmi()) {
+ if (to.IsTagged()) {
+ LOperand* value = UseRegister(val);
+ return DefineSameAsFirst(new(zone()) LDummyUse(value));
+ }
+ from = Representation::Tagged();
+ }
+ if (from.IsTagged()) {
+ if (to.IsDouble()) {
+ LOperand* value = UseRegister(val);
+ LOperand* temp = TempRegister();
+ LInstruction* result =
+ DefineAsRegister(new(zone()) LNumberUntagD(value, temp));
+ if (!val->representation().IsSmi()) result = AssignEnvironment(result);
+ return result;
+ } else if (to.IsSmi()) {
+ LOperand* value = UseRegister(val);
+ if (val->type().IsSmi()) {
+ return DefineSameAsFirst(new(zone()) LDummyUse(value));
+ }
+ return AssignEnvironment(DefineSameAsFirst(new(zone()) LCheckSmi(value)));
+ } else {
+ ASSERT(to.IsInteger32());
+ if (val->type().IsSmi() || val->representation().IsSmi()) {
+ LOperand* value = UseRegisterAtStart(val);
+ return DefineAsRegister(new(zone()) LSmiUntag(value, false));
+ } else {
+ LOperand* value = UseRegister(val);
+ LOperand* temp1 = TempRegister();
+ LOperand* temp2 = instr->CanTruncateToInt32()
+ ? NULL : TempDoubleRegister();
+ LInstruction* result =
+ DefineAsRegister(new(zone()) LTaggedToI(value, temp1, temp2));
+ if (!val->representation().IsSmi()) result = AssignEnvironment(result);
+ return result;
+ }
+ }
+ } else if (from.IsDouble()) {
+ if (to.IsTagged()) {
+ info()->MarkAsDeferredCalling();
+ LOperand* value = UseRegister(val);
+ LOperand* temp1 = TempRegister();
+ LOperand* temp2 = TempRegister();
+ LNumberTagD* result = new(zone()) LNumberTagD(value, temp1, temp2);
+ return AssignPointerMap(DefineAsRegister(result));
+ } else {
+ ASSERT(to.IsSmi() || to.IsInteger32());
+ if (instr->CanTruncateToInt32()) {
+ LOperand* value = UseRegister(val);
+ return DefineAsRegister(new(zone()) LTruncateDoubleToIntOrSmi(value));
+ } else {
+ LOperand* value = UseRegister(val);
+ LDoubleToIntOrSmi* result = new(zone()) LDoubleToIntOrSmi(value);
+ return AssignEnvironment(DefineAsRegister(result));
+ }
+ }
+ } else if (from.IsInteger32()) {
+ info()->MarkAsDeferredCalling();
+ if (to.IsTagged()) {
+ if (val->CheckFlag(HInstruction::kUint32)) {
+ LOperand* value = UseRegister(val);
+ LNumberTagU* result =
+ new(zone()) LNumberTagU(value, TempRegister(), TempRegister());
+ return AssignPointerMap(DefineAsRegister(result));
+ } else {
+ STATIC_ASSERT((kMinInt == Smi::kMinValue) &&
+ (kMaxInt == Smi::kMaxValue));
+ LOperand* value = UseRegisterAtStart(val);
+ return DefineAsRegister(new(zone()) LSmiTag(value));
+ }
+ } else if (to.IsSmi()) {
+ LOperand* value = UseRegisterAtStart(val);
+ LInstruction* result = DefineAsRegister(new(zone()) LSmiTag(value));
+ if (val->CheckFlag(HInstruction::kUint32)) {
+ result = AssignEnvironment(result);
+ }
+ return result;
+ } else {
+ ASSERT(to.IsDouble());
+ if (val->CheckFlag(HInstruction::kUint32)) {
+ return DefineAsRegister(
+ new(zone()) LUint32ToDouble(UseRegisterAtStart(val)));
+ } else {
+ return DefineAsRegister(
+ new(zone()) LInteger32ToDouble(UseRegisterAtStart(val)));
+ }
+ }
+ }
+ UNREACHABLE();
+ return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoCheckValue(HCheckValue* instr) {
+ LOperand* value = UseRegisterAtStart(instr->value());
+ return AssignEnvironment(new(zone()) LCheckValue(value));
+}
+
+
+LInstruction* LChunkBuilder::DoCheckInstanceType(HCheckInstanceType* instr) {
+ LOperand* value = UseRegisterAtStart(instr->value());
+ LOperand* temp = TempRegister();
+ LInstruction* result = new(zone()) LCheckInstanceType(value, temp);
+ return AssignEnvironment(result);
+}
+
+
+LInstruction* LChunkBuilder::DoCheckMaps(HCheckMaps* instr) {
+ if (instr->IsStabilityCheck()) return new(zone()) LCheckMaps;
+ LOperand* value = UseRegisterAtStart(instr->value());
+ LOperand* temp = TempRegister();
+ LInstruction* result = AssignEnvironment(new(zone()) LCheckMaps(value, temp));
+ if (instr->HasMigrationTarget()) {
+ info()->MarkAsDeferredCalling();
+ result = AssignPointerMap(result);
+ }
+ return result;
+}
+
+
+LInstruction* LChunkBuilder::DoCheckHeapObject(HCheckHeapObject* instr) {
+ LOperand* value = UseRegisterAtStart(instr->value());
+ LInstruction* result = new(zone()) LCheckNonSmi(value);
+ if (!instr->value()->type().IsHeapObject()) {
+ result = AssignEnvironment(result);
+ }
+ return result;
+}
+
+
+LInstruction* LChunkBuilder::DoCheckSmi(HCheckSmi* instr) {
+ LOperand* value = UseRegisterAtStart(instr->value());
+ return AssignEnvironment(new(zone()) LCheckSmi(value));
+}
+
+
+LInstruction* LChunkBuilder::DoClampToUint8(HClampToUint8* instr) {
+ HValue* value = instr->value();
+ Representation input_rep = value->representation();
+ LOperand* reg = UseRegister(value);
+ if (input_rep.IsDouble()) {
+ return DefineAsRegister(new(zone()) LClampDToUint8(reg));
+ } else if (input_rep.IsInteger32()) {
+ return DefineAsRegister(new(zone()) LClampIToUint8(reg));
+ } else {
+ ASSERT(input_rep.IsSmiOrTagged());
+ return AssignEnvironment(
+ DefineAsRegister(new(zone()) LClampTToUint8(reg,
+ TempRegister(),
+ TempDoubleRegister())));
+ }
+}
+
+
+LInstruction* LChunkBuilder::DoClassOfTestAndBranch(
+ HClassOfTestAndBranch* instr) {
+ ASSERT(instr->value()->representation().IsTagged());
+ LOperand* value = UseRegisterAtStart(instr->value());
+ return new(zone()) LClassOfTestAndBranch(value,
+ TempRegister(),
+ TempRegister());
+}
+
+
+LInstruction* LChunkBuilder::DoCompareNumericAndBranch(
+ HCompareNumericAndBranch* instr) {
+ Representation r = instr->representation();
+ if (r.IsSmiOrInteger32()) {
+ ASSERT(instr->left()->representation().Equals(r));
+ ASSERT(instr->right()->representation().Equals(r));
+ LOperand* left = UseRegisterOrConstantAtStart(instr->left());
+ LOperand* right = UseRegisterOrConstantAtStart(instr->right());
+ return new(zone()) LCompareNumericAndBranch(left, right);
+ } else {
+ ASSERT(r.IsDouble());
+ ASSERT(instr->left()->representation().IsDouble());
+ ASSERT(instr->right()->representation().IsDouble());
+ // TODO(all): In fact the only case that we can handle more efficiently is
+ // when one of the operand is the constant 0. Currently the MacroAssembler
+ // will be able to cope with any constant by loading it into an internal
+ // scratch register. This means that if the constant is used more that once,
+ // it will be loaded multiple times. Unfortunatly crankshaft already
+ // duplicates constant loads, but we should modify the code below once this
+ // issue has been addressed in crankshaft.
+ LOperand* left = UseRegisterOrConstantAtStart(instr->left());
+ LOperand* right = UseRegisterOrConstantAtStart(instr->right());
+ return new(zone()) LCompareNumericAndBranch(left, right);
+ }
+}
+
+
+LInstruction* LChunkBuilder::DoCompareGeneric(HCompareGeneric* instr) {
+ ASSERT(instr->left()->representation().IsTagged());
+ ASSERT(instr->right()->representation().IsTagged());
+ LOperand* context = UseFixed(instr->context(), cp);
+ LOperand* left = UseFixed(instr->left(), x1);
+ LOperand* right = UseFixed(instr->right(), x0);
+ LCmpT* result = new(zone()) LCmpT(context, left, right);
+ return MarkAsCall(DefineFixed(result, x0), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoCompareHoleAndBranch(
+ HCompareHoleAndBranch* instr) {
+ LOperand* value = UseRegister(instr->value());
+ if (instr->representation().IsTagged()) {
+ return new(zone()) LCmpHoleAndBranchT(value);
+ } else {
+ LOperand* temp = TempRegister();
+ return new(zone()) LCmpHoleAndBranchD(value, temp);
+ }
+}
+
+
+LInstruction* LChunkBuilder::DoCompareObjectEqAndBranch(
+ HCompareObjectEqAndBranch* instr) {
+ LOperand* left = UseRegisterAtStart(instr->left());
+ LOperand* right = UseRegisterAtStart(instr->right());
+ return new(zone()) LCmpObjectEqAndBranch(left, right);
+}
+
+
+LInstruction* LChunkBuilder::DoCompareMap(HCompareMap* instr) {
+ ASSERT(instr->value()->representation().IsTagged());
+ LOperand* value = UseRegisterAtStart(instr->value());
+ LOperand* temp = TempRegister();
+ return new(zone()) LCmpMapAndBranch(value, temp);
+}
+
+
+LInstruction* LChunkBuilder::DoConstant(HConstant* instr) {
+ Representation r = instr->representation();
+ if (r.IsSmi()) {
+ return DefineAsRegister(new(zone()) LConstantS);
+ } else if (r.IsInteger32()) {
+ return DefineAsRegister(new(zone()) LConstantI);
+ } else if (r.IsDouble()) {
+ return DefineAsRegister(new(zone()) LConstantD);
+ } else if (r.IsExternal()) {
+ return DefineAsRegister(new(zone()) LConstantE);
+ } else if (r.IsTagged()) {
+ return DefineAsRegister(new(zone()) LConstantT);
+ } else {
+ UNREACHABLE();
+ return NULL;
+ }
+}
+
+
+LInstruction* LChunkBuilder::DoContext(HContext* instr) {
+ if (instr->HasNoUses()) return NULL;
+
+ if (info()->IsStub()) {
+ return DefineFixed(new(zone()) LContext, cp);
+ }
+
+ return DefineAsRegister(new(zone()) LContext);
+}
+
+
+LInstruction* LChunkBuilder::DoDateField(HDateField* instr) {
+ LOperand* object = UseFixed(instr->value(), x0);
+ LDateField* result = new(zone()) LDateField(object, instr->index());
+ return MarkAsCall(DefineFixed(result, x0), instr, CAN_DEOPTIMIZE_EAGERLY);
+}
+
+
+LInstruction* LChunkBuilder::DoDebugBreak(HDebugBreak* instr) {
+ return new(zone()) LDebugBreak();
+}
+
+
+LInstruction* LChunkBuilder::DoDeclareGlobals(HDeclareGlobals* instr) {
+ LOperand* context = UseFixed(instr->context(), cp);
+ return MarkAsCall(new(zone()) LDeclareGlobals(context), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoDeoptimize(HDeoptimize* instr) {
+ return AssignEnvironment(new(zone()) LDeoptimize);
+}
+
+
+LInstruction* LChunkBuilder::DoDivByPowerOf2I(HDiv* instr) {
+ ASSERT(instr->representation().IsInteger32());
+ ASSERT(instr->left()->representation().Equals(instr->representation()));
+ ASSERT(instr->right()->representation().Equals(instr->representation()));
+ LOperand* dividend = UseRegister(instr->left());
+ int32_t divisor = instr->right()->GetInteger32Constant();
+ LInstruction* result = DefineAsRegister(new(zone()) LDivByPowerOf2I(
+ dividend, divisor));
+ if ((instr->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) ||
+ (instr->CheckFlag(HValue::kCanOverflow) && divisor == -1) ||
+ (!instr->CheckFlag(HInstruction::kAllUsesTruncatingToInt32) &&
+ divisor != 1 && divisor != -1)) {
+ result = AssignEnvironment(result);
+ }
+ return result;
+}
+
+
+LInstruction* LChunkBuilder::DoDivByConstI(HDiv* instr) {
+ ASSERT(instr->representation().IsInteger32());
+ ASSERT(instr->left()->representation().Equals(instr->representation()));
+ ASSERT(instr->right()->representation().Equals(instr->representation()));
+ LOperand* dividend = UseRegister(instr->left());
+ int32_t divisor = instr->right()->GetInteger32Constant();
+ LOperand* temp = instr->CheckFlag(HInstruction::kAllUsesTruncatingToInt32)
+ ? NULL : TempRegister();
+ LInstruction* result = DefineAsRegister(new(zone()) LDivByConstI(
+ dividend, divisor, temp));
+ if (divisor == 0 ||
+ (instr->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) ||
+ !instr->CheckFlag(HInstruction::kAllUsesTruncatingToInt32)) {
+ result = AssignEnvironment(result);
+ }
+ return result;
+}
+
+
+LInstruction* LChunkBuilder::DoDivI(HBinaryOperation* instr) {
+ ASSERT(instr->representation().IsSmiOrInteger32());
+ ASSERT(instr->left()->representation().Equals(instr->representation()));
+ ASSERT(instr->right()->representation().Equals(instr->representation()));
+ LOperand* dividend = UseRegister(instr->left());
+ LOperand* divisor = UseRegister(instr->right());
+ LOperand* temp = instr->CheckFlag(HInstruction::kAllUsesTruncatingToInt32)
+ ? NULL : TempRegister();
+ LInstruction* result =
+ DefineAsRegister(new(zone()) LDivI(dividend, divisor, temp));
+ if (!instr->CheckFlag(HValue::kAllUsesTruncatingToInt32)) {
+ result = AssignEnvironment(result);
+ }
+ return result;
+}
+
+
+LInstruction* LChunkBuilder::DoDiv(HDiv* instr) {
+ if (instr->representation().IsSmiOrInteger32()) {
+ if (instr->RightIsPowerOf2()) {
+ return DoDivByPowerOf2I(instr);
+ } else if (instr->right()->IsConstant()) {
+ return DoDivByConstI(instr);
+ } else {
+ return DoDivI(instr);
+ }
+ } else if (instr->representation().IsDouble()) {
+ return DoArithmeticD(Token::DIV, instr);
+ } else {
+ return DoArithmeticT(Token::DIV, instr);
+ }
+}
+
+
+LInstruction* LChunkBuilder::DoDummyUse(HDummyUse* instr) {
+ return DefineAsRegister(new(zone()) LDummyUse(UseAny(instr->value())));
+}
+
+
+LInstruction* LChunkBuilder::DoEnterInlined(HEnterInlined* instr) {
+ HEnvironment* outer = current_block_->last_environment();
+ outer->set_ast_id(instr->ReturnId());
+ HConstant* undefined = graph()->GetConstantUndefined();
+ HEnvironment* inner = outer->CopyForInlining(instr->closure(),
+ instr->arguments_count(),
+ instr->function(),
+ undefined,
+ instr->inlining_kind());
+ // Only replay binding of arguments object if it wasn't removed from graph.
+ if ((instr->arguments_var() != NULL) &&
+ instr->arguments_object()->IsLinked()) {
+ inner->Bind(instr->arguments_var(), instr->arguments_object());
+ }
+ inner->set_entry(instr);
+ current_block_->UpdateEnvironment(inner);
+ chunk_->AddInlinedClosure(instr->closure());
+ return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoEnvironmentMarker(HEnvironmentMarker* instr) {
+ UNREACHABLE();
+ return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoForceRepresentation(
+ HForceRepresentation* instr) {
+ // All HForceRepresentation instructions should be eliminated in the
+ // representation change phase of Hydrogen.
+ UNREACHABLE();
+ return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoFunctionLiteral(HFunctionLiteral* instr) {
+ LOperand* context = UseFixed(instr->context(), cp);
+ return MarkAsCall(
+ DefineFixed(new(zone()) LFunctionLiteral(context), x0), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoGetCachedArrayIndex(
+ HGetCachedArrayIndex* instr) {
+ ASSERT(instr->value()->representation().IsTagged());
+ LOperand* value = UseRegisterAtStart(instr->value());
+ return DefineAsRegister(new(zone()) LGetCachedArrayIndex(value));
+}
+
+
+LInstruction* LChunkBuilder::DoGoto(HGoto* instr) {
+ return new(zone()) LGoto(instr->FirstSuccessor());
+}
+
+
+LInstruction* LChunkBuilder::DoHasCachedArrayIndexAndBranch(
+ HHasCachedArrayIndexAndBranch* instr) {
+ ASSERT(instr->value()->representation().IsTagged());
+ return new(zone()) LHasCachedArrayIndexAndBranch(
+ UseRegisterAtStart(instr->value()), TempRegister());
+}
+
+
+LInstruction* LChunkBuilder::DoHasInstanceTypeAndBranch(
+ HHasInstanceTypeAndBranch* instr) {
+ ASSERT(instr->value()->representation().IsTagged());
+ LOperand* value = UseRegisterAtStart(instr->value());
+ return new(zone()) LHasInstanceTypeAndBranch(value, TempRegister());
+}
+
+
+LInstruction* LChunkBuilder::DoInnerAllocatedObject(
+ HInnerAllocatedObject* instr) {
+ LOperand* base_object = UseRegisterAtStart(instr->base_object());
+ LOperand* offset = UseRegisterOrConstantAtStart(instr->offset());
+ return DefineAsRegister(
+ new(zone()) LInnerAllocatedObject(base_object, offset));
+}
+
+
+LInstruction* LChunkBuilder::DoInstanceOf(HInstanceOf* instr) {
+ LOperand* context = UseFixed(instr->context(), cp);
+ LInstanceOf* result = new(zone()) LInstanceOf(
+ context,
+ UseFixed(instr->left(), InstanceofStub::left()),
+ UseFixed(instr->right(), InstanceofStub::right()));
+ return MarkAsCall(DefineFixed(result, x0), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoInstanceOfKnownGlobal(
+ HInstanceOfKnownGlobal* instr) {
+ LInstanceOfKnownGlobal* result = new(zone()) LInstanceOfKnownGlobal(
+ UseFixed(instr->context(), cp),
+ UseFixed(instr->left(), InstanceofStub::left()));
+ return MarkAsCall(DefineFixed(result, x0), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoInvokeFunction(HInvokeFunction* instr) {
+ LOperand* context = UseFixed(instr->context(), cp);
+ // The function is required (by MacroAssembler::InvokeFunction) to be in x1.
+ LOperand* function = UseFixed(instr->function(), x1);
+ LInvokeFunction* result = new(zone()) LInvokeFunction(context, function);
+ return MarkAsCall(DefineFixed(result, x0), instr, CANNOT_DEOPTIMIZE_EAGERLY);
+}
+
+
+LInstruction* LChunkBuilder::DoIsConstructCallAndBranch(
+ HIsConstructCallAndBranch* instr) {
+ return new(zone()) LIsConstructCallAndBranch(TempRegister(), TempRegister());
+}
+
+
+LInstruction* LChunkBuilder::DoCompareMinusZeroAndBranch(
+ HCompareMinusZeroAndBranch* instr) {
+ LOperand* value = UseRegister(instr->value());
+ LOperand* scratch = TempRegister();
+ return new(zone()) LCompareMinusZeroAndBranch(value, scratch);
+}
+
+
+LInstruction* LChunkBuilder::DoIsObjectAndBranch(HIsObjectAndBranch* instr) {
+ ASSERT(instr->value()->representation().IsTagged());
+ LOperand* value = UseRegisterAtStart(instr->value());
+ LOperand* temp1 = TempRegister();
+ LOperand* temp2 = TempRegister();
+ return new(zone()) LIsObjectAndBranch(value, temp1, temp2);
+}
+
+
+LInstruction* LChunkBuilder::DoIsStringAndBranch(HIsStringAndBranch* instr) {
+ ASSERT(instr->value()->representation().IsTagged());
+ LOperand* value = UseRegisterAtStart(instr->value());
+ LOperand* temp = TempRegister();
+ return new(zone()) LIsStringAndBranch(value, temp);
+}
+
+
+LInstruction* LChunkBuilder::DoIsSmiAndBranch(HIsSmiAndBranch* instr) {
+ ASSERT(instr->value()->representation().IsTagged());
+ return new(zone()) LIsSmiAndBranch(UseRegisterAtStart(instr->value()));
+}
+
+
+LInstruction* LChunkBuilder::DoIsUndetectableAndBranch(
+ HIsUndetectableAndBranch* instr) {
+ ASSERT(instr->value()->representation().IsTagged());
+ LOperand* value = UseRegisterAtStart(instr->value());
+ return new(zone()) LIsUndetectableAndBranch(value, TempRegister());
+}
+
+
+LInstruction* LChunkBuilder::DoLeaveInlined(HLeaveInlined* instr) {
+ LInstruction* pop = NULL;
+ HEnvironment* env = current_block_->last_environment();
+
+ if (env->entry()->arguments_pushed()) {
+ int argument_count = env->arguments_environment()->parameter_count();
+ pop = new(zone()) LDrop(argument_count);
+ ASSERT(instr->argument_delta() == -argument_count);
+ }
+
+ HEnvironment* outer =
+ current_block_->last_environment()->DiscardInlined(false);
+ current_block_->UpdateEnvironment(outer);
+
+ return pop;
+}
+
+
+LInstruction* LChunkBuilder::DoLoadContextSlot(HLoadContextSlot* instr) {
+ LOperand* context = UseRegisterAtStart(instr->value());
+ LInstruction* result =
+ DefineAsRegister(new(zone()) LLoadContextSlot(context));
+ if (instr->RequiresHoleCheck() && instr->DeoptimizesOnHole()) {
+ result = AssignEnvironment(result);
+ }
+ return result;
+}
+
+
+LInstruction* LChunkBuilder::DoLoadFunctionPrototype(
+ HLoadFunctionPrototype* instr) {
+ LOperand* function = UseRegister(instr->function());
+ LOperand* temp = TempRegister();
+ return AssignEnvironment(DefineAsRegister(
+ new(zone()) LLoadFunctionPrototype(function, temp)));
+}
+
+
+LInstruction* LChunkBuilder::DoLoadGlobalCell(HLoadGlobalCell* instr) {
+ LLoadGlobalCell* result = new(zone()) LLoadGlobalCell();
+ return instr->RequiresHoleCheck()
+ ? AssignEnvironment(DefineAsRegister(result))
+ : DefineAsRegister(result);
+}
+
+
+LInstruction* LChunkBuilder::DoLoadGlobalGeneric(HLoadGlobalGeneric* instr) {
+ LOperand* context = UseFixed(instr->context(), cp);
+ LOperand* global_object = UseFixed(instr->global_object(), x0);
+ LLoadGlobalGeneric* result =
+ new(zone()) LLoadGlobalGeneric(context, global_object);
+ return MarkAsCall(DefineFixed(result, x0), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoLoadKeyed(HLoadKeyed* instr) {
+ ASSERT(instr->key()->representation().IsSmiOrInteger32());
+ ElementsKind elements_kind = instr->elements_kind();
+ LOperand* elements = UseRegister(instr->elements());
+ LOperand* key = UseRegisterOrConstant(instr->key());
+
+ if (!instr->is_typed_elements()) {
+ if (instr->representation().IsDouble()) {
+ LOperand* temp = (!instr->key()->IsConstant() ||
+ instr->RequiresHoleCheck())
+ ? TempRegister()
+ : NULL;
+
+ LLoadKeyedFixedDouble* result =
+ new(zone()) LLoadKeyedFixedDouble(elements, key, temp);
+ return instr->RequiresHoleCheck()
+ ? AssignEnvironment(DefineAsRegister(result))
+ : DefineAsRegister(result);
+ } else {
+ ASSERT(instr->representation().IsSmiOrTagged() ||
+ instr->representation().IsInteger32());
+ LOperand* temp = instr->key()->IsConstant() ? NULL : TempRegister();
+ LLoadKeyedFixed* result =
+ new(zone()) LLoadKeyedFixed(elements, key, temp);
+ return instr->RequiresHoleCheck()
+ ? AssignEnvironment(DefineAsRegister(result))
+ : DefineAsRegister(result);
+ }
+ } else {
+ ASSERT((instr->representation().IsInteger32() &&
+ !IsDoubleOrFloatElementsKind(instr->elements_kind())) ||
+ (instr->representation().IsDouble() &&
+ IsDoubleOrFloatElementsKind(instr->elements_kind())));
+
+ LOperand* temp = instr->key()->IsConstant() ? NULL : TempRegister();
+ LInstruction* result = DefineAsRegister(
+ new(zone()) LLoadKeyedExternal(elements, key, temp));
+ if ((elements_kind == EXTERNAL_UINT32_ELEMENTS ||
+ elements_kind == UINT32_ELEMENTS) &&
+ !instr->CheckFlag(HInstruction::kUint32)) {
+ result = AssignEnvironment(result);
+ }
+ return result;
+ }
+}
+
+
+LInstruction* LChunkBuilder::DoLoadKeyedGeneric(HLoadKeyedGeneric* instr) {
+ LOperand* context = UseFixed(instr->context(), cp);
+ LOperand* object = UseFixed(instr->object(), x1);
+ LOperand* key = UseFixed(instr->key(), x0);
+
+ LInstruction* result =
+ DefineFixed(new(zone()) LLoadKeyedGeneric(context, object, key), x0);
+ return MarkAsCall(result, instr);
+}
+
+
+LInstruction* LChunkBuilder::DoLoadNamedField(HLoadNamedField* instr) {
+ LOperand* object = UseRegisterAtStart(instr->object());
+ return DefineAsRegister(new(zone()) LLoadNamedField(object));
+}
+
+
+LInstruction* LChunkBuilder::DoLoadNamedGeneric(HLoadNamedGeneric* instr) {
+ LOperand* context = UseFixed(instr->context(), cp);
+ LOperand* object = UseFixed(instr->object(), x0);
+ LInstruction* result =
+ DefineFixed(new(zone()) LLoadNamedGeneric(context, object), x0);
+ return MarkAsCall(result, instr);
+}
+
+
+LInstruction* LChunkBuilder::DoLoadRoot(HLoadRoot* instr) {
+ return DefineAsRegister(new(zone()) LLoadRoot);
+}
+
+
+LInstruction* LChunkBuilder::DoMapEnumLength(HMapEnumLength* instr) {
+ LOperand* map = UseRegisterAtStart(instr->value());
+ return DefineAsRegister(new(zone()) LMapEnumLength(map));
+}
+
+
+LInstruction* LChunkBuilder::DoFlooringDivByPowerOf2I(HMathFloorOfDiv* instr) {
+ ASSERT(instr->representation().IsInteger32());
+ ASSERT(instr->left()->representation().Equals(instr->representation()));
+ ASSERT(instr->right()->representation().Equals(instr->representation()));
+ LOperand* dividend = UseRegisterAtStart(instr->left());
+ int32_t divisor = instr->right()->GetInteger32Constant();
+ LInstruction* result = DefineAsRegister(new(zone()) LFlooringDivByPowerOf2I(
+ dividend, divisor));
+ if ((instr->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) ||
+ (instr->CheckFlag(HValue::kLeftCanBeMinInt) && divisor == -1)) {
+ result = AssignEnvironment(result);
+ }
+ return result;
+}
+
+
+LInstruction* LChunkBuilder::DoFlooringDivByConstI(HMathFloorOfDiv* instr) {
+ ASSERT(instr->representation().IsInteger32());
+ ASSERT(instr->left()->representation().Equals(instr->representation()));
+ ASSERT(instr->right()->representation().Equals(instr->representation()));
+ LOperand* dividend = UseRegister(instr->left());
+ int32_t divisor = instr->right()->GetInteger32Constant();
+ LOperand* temp =
+ ((divisor > 0 && !instr->CheckFlag(HValue::kLeftCanBeNegative)) ||
+ (divisor < 0 && !instr->CheckFlag(HValue::kLeftCanBePositive))) ?
+ NULL : TempRegister();
+ LInstruction* result = DefineAsRegister(
+ new(zone()) LFlooringDivByConstI(dividend, divisor, temp));
+ if (divisor == 0 ||
+ (instr->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0)) {
+ result = AssignEnvironment(result);
+ }
+ return result;
+}
+
+
+LInstruction* LChunkBuilder::DoFlooringDivI(HMathFloorOfDiv* instr) {
+ LOperand* dividend = UseRegister(instr->left());
+ LOperand* divisor = UseRegister(instr->right());
+ LOperand* remainder = TempRegister();
+ LInstruction* result =
+ DefineAsRegister(new(zone()) LFlooringDivI(dividend, divisor, remainder));
+ return AssignEnvironment(result);
+}
+
+
+LInstruction* LChunkBuilder::DoMathFloorOfDiv(HMathFloorOfDiv* instr) {
+ if (instr->RightIsPowerOf2()) {
+ return DoFlooringDivByPowerOf2I(instr);
+ } else if (instr->right()->IsConstant()) {
+ return DoFlooringDivByConstI(instr);
+ } else {
+ return DoFlooringDivI(instr);
+ }
+}
+
+
+LInstruction* LChunkBuilder::DoMathMinMax(HMathMinMax* instr) {
+ LOperand* left = NULL;
+ LOperand* right = NULL;
+ if (instr->representation().IsSmiOrInteger32()) {
+ ASSERT(instr->left()->representation().Equals(instr->representation()));
+ ASSERT(instr->right()->representation().Equals(instr->representation()));
+ left = UseRegisterAtStart(instr->BetterLeftOperand());
+ right = UseRegisterOrConstantAtStart(instr->BetterRightOperand());
+ } else {
+ ASSERT(instr->representation().IsDouble());
+ ASSERT(instr->left()->representation().IsDouble());
+ ASSERT(instr->right()->representation().IsDouble());
+ left = UseRegisterAtStart(instr->left());
+ right = UseRegisterAtStart(instr->right());
+ }
+ return DefineAsRegister(new(zone()) LMathMinMax(left, right));
+}
+
+
+LInstruction* LChunkBuilder::DoModByPowerOf2I(HMod* instr) {
+ ASSERT(instr->representation().IsInteger32());
+ ASSERT(instr->left()->representation().Equals(instr->representation()));
+ ASSERT(instr->right()->representation().Equals(instr->representation()));
+ LOperand* dividend = UseRegisterAtStart(instr->left());
+ int32_t divisor = instr->right()->GetInteger32Constant();
+ LInstruction* result = DefineSameAsFirst(new(zone()) LModByPowerOf2I(
+ dividend, divisor));
+ if (instr->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ result = AssignEnvironment(result);
+ }
+ return result;
+}
+
+
+LInstruction* LChunkBuilder::DoModByConstI(HMod* instr) {
+ ASSERT(instr->representation().IsInteger32());
+ ASSERT(instr->left()->representation().Equals(instr->representation()));
+ ASSERT(instr->right()->representation().Equals(instr->representation()));
+ LOperand* dividend = UseRegister(instr->left());
+ int32_t divisor = instr->right()->GetInteger32Constant();
+ LOperand* temp = TempRegister();
+ LInstruction* result = DefineAsRegister(new(zone()) LModByConstI(
+ dividend, divisor, temp));
+ if (divisor == 0 || instr->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ result = AssignEnvironment(result);
+ }
+ return result;
+}
+
+
+LInstruction* LChunkBuilder::DoModI(HMod* instr) {
+ ASSERT(instr->representation().IsSmiOrInteger32());
+ ASSERT(instr->left()->representation().Equals(instr->representation()));
+ ASSERT(instr->right()->representation().Equals(instr->representation()));
+ LOperand* dividend = UseRegister(instr->left());
+ LOperand* divisor = UseRegister(instr->right());
+ LInstruction* result = DefineAsRegister(new(zone()) LModI(dividend, divisor));
+ if (instr->CheckFlag(HValue::kCanBeDivByZero) ||
+ instr->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ result = AssignEnvironment(result);
+ }
+ return result;
+}
+
+
+LInstruction* LChunkBuilder::DoMod(HMod* instr) {
+ if (instr->representation().IsSmiOrInteger32()) {
+ if (instr->RightIsPowerOf2()) {
+ return DoModByPowerOf2I(instr);
+ } else if (instr->right()->IsConstant()) {
+ return DoModByConstI(instr);
+ } else {
+ return DoModI(instr);
+ }
+ } else if (instr->representation().IsDouble()) {
+ return DoArithmeticD(Token::MOD, instr);
+ } else {
+ return DoArithmeticT(Token::MOD, instr);
+ }
+}
+
+
+LInstruction* LChunkBuilder::DoMul(HMul* instr) {
+ if (instr->representation().IsSmiOrInteger32()) {
+ ASSERT(instr->left()->representation().Equals(instr->representation()));
+ ASSERT(instr->right()->representation().Equals(instr->representation()));
+
+ bool can_overflow = instr->CheckFlag(HValue::kCanOverflow);
+ bool bailout_on_minus_zero = instr->CheckFlag(HValue::kBailoutOnMinusZero);
+
+ HValue* least_const = instr->BetterLeftOperand();
+ HValue* most_const = instr->BetterRightOperand();
+
+ // LMulConstI can handle a subset of constants:
+ // With support for overflow detection:
+ // -1, 0, 1, 2
+ // 2^n, -(2^n)
+ // Without support for overflow detection:
+ // 2^n + 1, -(2^n - 1)
+ if (most_const->IsConstant()) {
+ int32_t constant = HConstant::cast(most_const)->Integer32Value();
+ bool small_constant = (constant >= -1) && (constant <= 2);
+ bool end_range_constant = (constant <= -kMaxInt) || (constant == kMaxInt);
+ int32_t constant_abs = Abs(constant);
+
+ if (!end_range_constant &&
+ (small_constant ||
+ (IsPowerOf2(constant_abs)) ||
+ (!can_overflow && (IsPowerOf2(constant_abs + 1) ||
+ IsPowerOf2(constant_abs - 1))))) {
+ LConstantOperand* right = UseConstant(most_const);
+ bool need_register = IsPowerOf2(constant_abs) && !small_constant;
+ LOperand* left = need_register ? UseRegister(least_const)
+ : UseRegisterAtStart(least_const);
+ LInstruction* result =
+ DefineAsRegister(new(zone()) LMulConstIS(left, right));
+ if ((bailout_on_minus_zero && constant <= 0) || can_overflow) {
+ result = AssignEnvironment(result);
+ }
+ return result;
+ }
+ }
+
+ // LMulI/S can handle all cases, but it requires that a register is
+ // allocated for the second operand.
+ LOperand* left = UseRegisterAtStart(least_const);
+ LOperand* right = UseRegisterAtStart(most_const);
+ LInstruction* result = instr->representation().IsSmi()
+ ? DefineAsRegister(new(zone()) LMulS(left, right))
+ : DefineAsRegister(new(zone()) LMulI(left, right));
+ if ((bailout_on_minus_zero && least_const != most_const) || can_overflow) {
+ result = AssignEnvironment(result);
+ }
+ return result;
+ } else if (instr->representation().IsDouble()) {
+ return DoArithmeticD(Token::MUL, instr);
+ } else {
+ return DoArithmeticT(Token::MUL, instr);
+ }
+}
+
+
+LInstruction* LChunkBuilder::DoOsrEntry(HOsrEntry* instr) {
+ ASSERT(argument_count_ == 0);
+ allocator_->MarkAsOsrEntry();
+ current_block_->last_environment()->set_ast_id(instr->ast_id());
+ return AssignEnvironment(new(zone()) LOsrEntry);
+}
+
+
+LInstruction* LChunkBuilder::DoParameter(HParameter* instr) {
+ LParameter* result = new(zone()) LParameter;
+ if (instr->kind() == HParameter::STACK_PARAMETER) {
+ int spill_index = chunk_->GetParameterStackSlot(instr->index());
+ return DefineAsSpilled(result, spill_index);
+ } else {
+ ASSERT(info()->IsStub());
+ CodeStubInterfaceDescriptor* descriptor =
+ info()->code_stub()->GetInterfaceDescriptor();
+ int index = static_cast<int>(instr->index());
+ Register reg = descriptor->GetParameterRegister(index);
+ return DefineFixed(result, reg);
+ }
+}
+
+
+LInstruction* LChunkBuilder::DoPower(HPower* instr) {
+ ASSERT(instr->representation().IsDouble());
+ // We call a C function for double power. It can't trigger a GC.
+ // We need to use fixed result register for the call.
+ Representation exponent_type = instr->right()->representation();
+ ASSERT(instr->left()->representation().IsDouble());
+ LOperand* left = UseFixedDouble(instr->left(), d0);
+ LOperand* right = exponent_type.IsInteger32()
+ ? UseFixed(instr->right(), x12)
+ : exponent_type.IsDouble()
+ ? UseFixedDouble(instr->right(), d1)
+ : UseFixed(instr->right(), x11);
+ LPower* result = new(zone()) LPower(left, right);
+ return MarkAsCall(DefineFixedDouble(result, d0),
+ instr,
+ CAN_DEOPTIMIZE_EAGERLY);
+}
+
+
+LInstruction* LChunkBuilder::DoPushArguments(HPushArguments* instr) {
+ int argc = instr->OperandCount();
+ AddInstruction(new(zone()) LPreparePushArguments(argc), instr);
+
+ LPushArguments* push_args = new(zone()) LPushArguments(zone());
+
+ for (int i = 0; i < argc; ++i) {
+ if (push_args->ShouldSplitPush()) {
+ AddInstruction(push_args, instr);
+ push_args = new(zone()) LPushArguments(zone());
+ }
+ push_args->AddArgument(UseRegister(instr->argument(i)));
+ }
+
+ return push_args;
+}
+
+
+LInstruction* LChunkBuilder::DoRegExpLiteral(HRegExpLiteral* instr) {
+ LOperand* context = UseFixed(instr->context(), cp);
+ return MarkAsCall(
+ DefineFixed(new(zone()) LRegExpLiteral(context), x0), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoDoubleBits(HDoubleBits* instr) {
+ HValue* value = instr->value();
+ ASSERT(value->representation().IsDouble());
+ return DefineAsRegister(new(zone()) LDoubleBits(UseRegister(value)));
+}
+
+
+LInstruction* LChunkBuilder::DoConstructDouble(HConstructDouble* instr) {
+ LOperand* lo = UseRegisterAndClobber(instr->lo());
+ LOperand* hi = UseRegister(instr->hi());
+ return DefineAsRegister(new(zone()) LConstructDouble(hi, lo));
+}
+
+
+LInstruction* LChunkBuilder::DoReturn(HReturn* instr) {
+ LOperand* context = info()->IsStub()
+ ? UseFixed(instr->context(), cp)
+ : NULL;
+ LOperand* parameter_count = UseRegisterOrConstant(instr->parameter_count());
+ return new(zone()) LReturn(UseFixed(instr->value(), x0), context,
+ parameter_count);
+}
+
+
+LInstruction* LChunkBuilder::DoSeqStringGetChar(HSeqStringGetChar* instr) {
+ LOperand* string = UseRegisterAtStart(instr->string());
+ LOperand* index = UseRegisterOrConstantAtStart(instr->index());
+ LOperand* temp = TempRegister();
+ LSeqStringGetChar* result =
+ new(zone()) LSeqStringGetChar(string, index, temp);
+ return DefineAsRegister(result);
+}
+
+
+LInstruction* LChunkBuilder::DoSeqStringSetChar(HSeqStringSetChar* instr) {
+ LOperand* string = UseRegister(instr->string());
+ LOperand* index = FLAG_debug_code
+ ? UseRegister(instr->index())
+ : UseRegisterOrConstant(instr->index());
+ LOperand* value = UseRegister(instr->value());
+ LOperand* context = FLAG_debug_code ? UseFixed(instr->context(), cp) : NULL;
+ LOperand* temp = TempRegister();
+ LSeqStringSetChar* result =
+ new(zone()) LSeqStringSetChar(context, string, index, value, temp);
+ return DefineAsRegister(result);
+}
+
+
+HBitwiseBinaryOperation* LChunkBuilder::CanTransformToShiftedOp(HValue* val,
+ HValue** left) {
+ if (!val->representation().IsInteger32()) return NULL;
+ if (!(val->IsBitwise() || val->IsAdd() || val->IsSub())) return NULL;
+
+ HBinaryOperation* hinstr = HBinaryOperation::cast(val);
+ HValue* hleft = hinstr->left();
+ HValue* hright = hinstr->right();
+ ASSERT(hleft->representation().Equals(hinstr->representation()));
+ ASSERT(hright->representation().Equals(hinstr->representation()));
+
+ if ((hright->IsConstant() &&
+ LikelyFitsImmField(hinstr, HConstant::cast(hright)->Integer32Value())) ||
+ (hinstr->IsCommutative() && hleft->IsConstant() &&
+ LikelyFitsImmField(hinstr, HConstant::cast(hleft)->Integer32Value()))) {
+ // The constant operand will likely fit in the immediate field. We are
+ // better off with
+ // lsl x8, x9, #imm
+ // add x0, x8, #imm2
+ // than with
+ // mov x16, #imm2
+ // add x0, x16, x9 LSL #imm
+ return NULL;
+ }
+
+ HBitwiseBinaryOperation* shift = NULL;
+ // TODO(aleram): We will miss situations where a shift operation is used by
+ // different instructions both as a left and right operands.
+ if (hright->IsBitwiseBinaryShift() &&
+ HBitwiseBinaryOperation::cast(hright)->right()->IsConstant()) {
+ shift = HBitwiseBinaryOperation::cast(hright);
+ if (left != NULL) {
+ *left = hleft;
+ }
+ } else if (hinstr->IsCommutative() &&
+ hleft->IsBitwiseBinaryShift() &&
+ HBitwiseBinaryOperation::cast(hleft)->right()->IsConstant()) {
+ shift = HBitwiseBinaryOperation::cast(hleft);
+ if (left != NULL) {
+ *left = hright;
+ }
+ } else {
+ return NULL;
+ }
+
+ if ((JSShiftAmountFromHConstant(shift->right()) == 0) && shift->IsShr()) {
+ // Shifts right by zero can deoptimize.
+ return NULL;
+ }
+
+ return shift;
+}
+
+
+bool LChunkBuilder::ShiftCanBeOptimizedAway(HBitwiseBinaryOperation* shift) {
+ if (!shift->representation().IsInteger32()) {
+ return false;
+ }
+ for (HUseIterator it(shift->uses()); !it.Done(); it.Advance()) {
+ if (shift != CanTransformToShiftedOp(it.value())) {
+ return false;
+ }
+ }
+ return true;
+}
+
+
+LInstruction* LChunkBuilder::TryDoOpWithShiftedRightOperand(
+ HBinaryOperation* instr) {
+ HValue* left;
+ HBitwiseBinaryOperation* shift = CanTransformToShiftedOp(instr, &left);
+
+ if ((shift != NULL) && ShiftCanBeOptimizedAway(shift)) {
+ return DoShiftedBinaryOp(instr, left, shift);
+ }
+ return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoShiftedBinaryOp(
+ HBinaryOperation* hinstr, HValue* hleft, HBitwiseBinaryOperation* hshift) {
+ ASSERT(hshift->IsBitwiseBinaryShift());
+ ASSERT(!hshift->IsShr() || (JSShiftAmountFromHConstant(hshift->right()) > 0));
+
+ LTemplateResultInstruction<1>* res;
+ LOperand* left = UseRegisterAtStart(hleft);
+ LOperand* right = UseRegisterAtStart(hshift->left());
+ LOperand* shift_amount = UseConstant(hshift->right());
+ Shift shift_op;
+ switch (hshift->opcode()) {
+ case HValue::kShl: shift_op = LSL; break;
+ case HValue::kShr: shift_op = LSR; break;
+ case HValue::kSar: shift_op = ASR; break;
+ default: UNREACHABLE(); shift_op = NO_SHIFT;
+ }
+
+ if (hinstr->IsBitwise()) {
+ res = new(zone()) LBitI(left, right, shift_op, shift_amount);
+ } else if (hinstr->IsAdd()) {
+ res = new(zone()) LAddI(left, right, shift_op, shift_amount);
+ } else {
+ ASSERT(hinstr->IsSub());
+ res = new(zone()) LSubI(left, right, shift_op, shift_amount);
+ }
+ if (hinstr->CheckFlag(HValue::kCanOverflow)) {
+ AssignEnvironment(res);
+ }
+ return DefineAsRegister(res);
+}
+
+
+LInstruction* LChunkBuilder::DoShift(Token::Value op,
+ HBitwiseBinaryOperation* instr) {
+ if (instr->representation().IsTagged()) {
+ return DoArithmeticT(op, instr);
+ }
+
+ ASSERT(instr->representation().IsInteger32() ||
+ instr->representation().IsSmi());
+ ASSERT(instr->left()->representation().Equals(instr->representation()));
+ ASSERT(instr->right()->representation().Equals(instr->representation()));
+
+ if (ShiftCanBeOptimizedAway(instr)) {
+ return NULL;
+ }
+
+ LOperand* left = instr->representation().IsSmi()
+ ? UseRegister(instr->left())
+ : UseRegisterAtStart(instr->left());
+
+ HValue* right_value = instr->right();
+ LOperand* right = NULL;
+ LOperand* temp = NULL;
+ int constant_value = 0;
+ if (right_value->IsConstant()) {
+ right = UseConstant(right_value);
+ constant_value = JSShiftAmountFromHConstant(right_value);
+ } else {
+ right = UseRegisterAtStart(right_value);
+ if (op == Token::ROR) {
+ temp = TempRegister();
+ }
+ }
+
+ // Shift operations can only deoptimize if we do a logical shift by 0 and the
+ // result cannot be truncated to int32.
+ bool does_deopt = false;
+ if ((op == Token::SHR) && (constant_value == 0)) {
+ if (FLAG_opt_safe_uint32_operations) {
+ does_deopt = !instr->CheckFlag(HInstruction::kUint32);
+ } else {
+ does_deopt = !instr->CheckUsesForFlag(HValue::kTruncatingToInt32);
+ }
+ }
+
+ LInstruction* result;
+ if (instr->representation().IsInteger32()) {
+ result = DefineAsRegister(new(zone()) LShiftI(op, left, right, does_deopt));
+ } else {
+ ASSERT(instr->representation().IsSmi());
+ result = DefineAsRegister(
+ new(zone()) LShiftS(op, left, right, temp, does_deopt));
+ }
+
+ return does_deopt ? AssignEnvironment(result) : result;
+}
+
+
+LInstruction* LChunkBuilder::DoRor(HRor* instr) {
+ return DoShift(Token::ROR, instr);
+}
+
+
+LInstruction* LChunkBuilder::DoSar(HSar* instr) {
+ return DoShift(Token::SAR, instr);
+}
+
+
+LInstruction* LChunkBuilder::DoShl(HShl* instr) {
+ return DoShift(Token::SHL, instr);
+}
+
+
+LInstruction* LChunkBuilder::DoShr(HShr* instr) {
+ return DoShift(Token::SHR, instr);
+}
+
+
+LInstruction* LChunkBuilder::DoSimulate(HSimulate* instr) {
+ instr->ReplayEnvironment(current_block_->last_environment());
+ return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoStackCheck(HStackCheck* instr) {
+ if (instr->is_function_entry()) {
+ LOperand* context = UseFixed(instr->context(), cp);
+ return MarkAsCall(new(zone()) LStackCheck(context), instr);
+ } else {
+ ASSERT(instr->is_backwards_branch());
+ LOperand* context = UseAny(instr->context());
+ return AssignEnvironment(
+ AssignPointerMap(new(zone()) LStackCheck(context)));
+ }
+}
+
+
+LInstruction* LChunkBuilder::DoStoreCodeEntry(HStoreCodeEntry* instr) {
+ LOperand* function = UseRegister(instr->function());
+ LOperand* code_object = UseRegisterAtStart(instr->code_object());
+ LOperand* temp = TempRegister();
+ return new(zone()) LStoreCodeEntry(function, code_object, temp);
+}
+
+
+LInstruction* LChunkBuilder::DoStoreContextSlot(HStoreContextSlot* instr) {
+ LOperand* temp = TempRegister();
+ LOperand* context;
+ LOperand* value;
+ if (instr->NeedsWriteBarrier()) {
+ // TODO(all): Replace these constraints when RecordWriteStub has been
+ // rewritten.
+ context = UseRegisterAndClobber(instr->context());
+ value = UseRegisterAndClobber(instr->value());
+ } else {
+ context = UseRegister(instr->context());
+ value = UseRegister(instr->value());
+ }
+ LInstruction* result = new(zone()) LStoreContextSlot(context, value, temp);
+ if (instr->RequiresHoleCheck() && instr->DeoptimizesOnHole()) {
+ result = AssignEnvironment(result);
+ }
+ return result;
+}
+
+
+LInstruction* LChunkBuilder::DoStoreGlobalCell(HStoreGlobalCell* instr) {
+ LOperand* value = UseRegister(instr->value());
+ if (instr->RequiresHoleCheck()) {
+ return AssignEnvironment(new(zone()) LStoreGlobalCell(value,
+ TempRegister(),
+ TempRegister()));
+ } else {
+ return new(zone()) LStoreGlobalCell(value, TempRegister(), NULL);
+ }
+}
+
+
+LInstruction* LChunkBuilder::DoStoreKeyed(HStoreKeyed* instr) {
+ LOperand* key = UseRegisterOrConstant(instr->key());
+ LOperand* temp = NULL;
+ LOperand* elements = NULL;
+ LOperand* val = NULL;
+
+ if (!instr->is_typed_elements() &&
+ instr->value()->representation().IsTagged() &&
+ instr->NeedsWriteBarrier()) {
+ // RecordWrite() will clobber all registers.
+ elements = UseRegisterAndClobber(instr->elements());
+ val = UseRegisterAndClobber(instr->value());
+ temp = TempRegister();
+ } else {
+ elements = UseRegister(instr->elements());
+ val = UseRegister(instr->value());
+ temp = instr->key()->IsConstant() ? NULL : TempRegister();
+ }
+
+ if (instr->is_typed_elements()) {
+ ASSERT((instr->value()->representation().IsInteger32() &&
+ !IsDoubleOrFloatElementsKind(instr->elements_kind())) ||
+ (instr->value()->representation().IsDouble() &&
+ IsDoubleOrFloatElementsKind(instr->elements_kind())));
+ ASSERT((instr->is_fixed_typed_array() &&
+ instr->elements()->representation().IsTagged()) ||
+ (instr->is_external() &&
+ instr->elements()->representation().IsExternal()));
+ return new(zone()) LStoreKeyedExternal(elements, key, val, temp);
+
+ } else if (instr->value()->representation().IsDouble()) {
+ ASSERT(instr->elements()->representation().IsTagged());
+ return new(zone()) LStoreKeyedFixedDouble(elements, key, val, temp);
+
+ } else {
+ ASSERT(instr->elements()->representation().IsTagged());
+ ASSERT(instr->value()->representation().IsSmiOrTagged() ||
+ instr->value()->representation().IsInteger32());
+ return new(zone()) LStoreKeyedFixed(elements, key, val, temp);
+ }
+}
+
+
+LInstruction* LChunkBuilder::DoStoreKeyedGeneric(HStoreKeyedGeneric* instr) {
+ LOperand* context = UseFixed(instr->context(), cp);
+ LOperand* object = UseFixed(instr->object(), x2);
+ LOperand* key = UseFixed(instr->key(), x1);
+ LOperand* value = UseFixed(instr->value(), x0);
+
+ ASSERT(instr->object()->representation().IsTagged());
+ ASSERT(instr->key()->representation().IsTagged());
+ ASSERT(instr->value()->representation().IsTagged());
+
+ return MarkAsCall(
+ new(zone()) LStoreKeyedGeneric(context, object, key, value), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoStoreNamedField(HStoreNamedField* instr) {
+ // TODO(jbramley): It might be beneficial to allow value to be a constant in
+ // some cases. x64 makes use of this with FLAG_track_fields, for example.
+
+ LOperand* object = UseRegister(instr->object());
+ LOperand* value;
+ LOperand* temp0 = NULL;
+ LOperand* temp1 = NULL;
+
+ if (instr->access().IsExternalMemory() ||
+ instr->field_representation().IsDouble()) {
+ value = UseRegister(instr->value());
+ } else if (instr->NeedsWriteBarrier()) {
+ value = UseRegisterAndClobber(instr->value());
+ temp0 = TempRegister();
+ temp1 = TempRegister();
+ } else if (instr->NeedsWriteBarrierForMap()) {
+ value = UseRegister(instr->value());
+ temp0 = TempRegister();
+ temp1 = TempRegister();
+ } else {
+ value = UseRegister(instr->value());
+ temp0 = TempRegister();
+ }
+
+ return new(zone()) LStoreNamedField(object, value, temp0, temp1);
+}
+
+
+LInstruction* LChunkBuilder::DoStoreNamedGeneric(HStoreNamedGeneric* instr) {
+ LOperand* context = UseFixed(instr->context(), cp);
+ LOperand* object = UseFixed(instr->object(), x1);
+ LOperand* value = UseFixed(instr->value(), x0);
+ LInstruction* result = new(zone()) LStoreNamedGeneric(context, object, value);
+ return MarkAsCall(result, instr);
+}
+
+
+LInstruction* LChunkBuilder::DoStringAdd(HStringAdd* instr) {
+ LOperand* context = UseFixed(instr->context(), cp);
+ LOperand* left = UseFixed(instr->left(), x1);
+ LOperand* right = UseFixed(instr->right(), x0);
+
+ LStringAdd* result = new(zone()) LStringAdd(context, left, right);
+ return MarkAsCall(DefineFixed(result, x0), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoStringCharCodeAt(HStringCharCodeAt* instr) {
+ LOperand* string = UseRegisterAndClobber(instr->string());
+ LOperand* index = UseRegisterAndClobber(instr->index());
+ LOperand* context = UseAny(instr->context());
+ LStringCharCodeAt* result =
+ new(zone()) LStringCharCodeAt(context, string, index);
+ return AssignPointerMap(DefineAsRegister(result));
+}
+
+
+LInstruction* LChunkBuilder::DoStringCharFromCode(HStringCharFromCode* instr) {
+ LOperand* char_code = UseRegister(instr->value());
+ LOperand* context = UseAny(instr->context());
+ LStringCharFromCode* result =
+ new(zone()) LStringCharFromCode(context, char_code);
+ return AssignPointerMap(DefineAsRegister(result));
+}
+
+
+LInstruction* LChunkBuilder::DoStringCompareAndBranch(
+ HStringCompareAndBranch* instr) {
+ ASSERT(instr->left()->representation().IsTagged());
+ ASSERT(instr->right()->representation().IsTagged());
+ LOperand* context = UseFixed(instr->context(), cp);
+ LOperand* left = UseFixed(instr->left(), x1);
+ LOperand* right = UseFixed(instr->right(), x0);
+ LStringCompareAndBranch* result =
+ new(zone()) LStringCompareAndBranch(context, left, right);
+ return MarkAsCall(result, instr);
+}
+
+
+LInstruction* LChunkBuilder::DoSub(HSub* instr) {
+ if (instr->representation().IsSmiOrInteger32()) {
+ ASSERT(instr->left()->representation().Equals(instr->representation()));
+ ASSERT(instr->right()->representation().Equals(instr->representation()));
+
+ LInstruction* shifted_operation = TryDoOpWithShiftedRightOperand(instr);
+ if (shifted_operation != NULL) {
+ return shifted_operation;
+ }
+
+ LOperand *left;
+ if (instr->left()->IsConstant() &&
+ (HConstant::cast(instr->left())->Integer32Value() == 0)) {
+ left = UseConstant(instr->left());
+ } else {
+ left = UseRegisterAtStart(instr->left());
+ }
+ LOperand* right = UseRegisterOrConstantAtStart(instr->right());
+ LInstruction* result = instr->representation().IsSmi() ?
+ DefineAsRegister(new(zone()) LSubS(left, right)) :
+ DefineAsRegister(new(zone()) LSubI(left, right));
+ if (instr->CheckFlag(HValue::kCanOverflow)) {
+ result = AssignEnvironment(result);
+ }
+ return result;
+ } else if (instr->representation().IsDouble()) {
+ return DoArithmeticD(Token::SUB, instr);
+ } else {
+ return DoArithmeticT(Token::SUB, instr);
+ }
+}
+
+
+LInstruction* LChunkBuilder::DoThisFunction(HThisFunction* instr) {
+ if (instr->HasNoUses()) {
+ return NULL;
+ } else {
+ return DefineAsRegister(new(zone()) LThisFunction);
+ }
+}
+
+
+LInstruction* LChunkBuilder::DoToFastProperties(HToFastProperties* instr) {
+ LOperand* object = UseFixed(instr->value(), x0);
+ LToFastProperties* result = new(zone()) LToFastProperties(object);
+ return MarkAsCall(DefineFixed(result, x0), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoTransitionElementsKind(
+ HTransitionElementsKind* instr) {
+ if (IsSimpleMapChangeTransition(instr->from_kind(), instr->to_kind())) {
+ LOperand* object = UseRegister(instr->object());
+ LTransitionElementsKind* result =
+ new(zone()) LTransitionElementsKind(object, NULL,
+ TempRegister(), TempRegister());
+ return result;
+ } else {
+ LOperand* object = UseFixed(instr->object(), x0);
+ LOperand* context = UseFixed(instr->context(), cp);
+ LTransitionElementsKind* result =
+ new(zone()) LTransitionElementsKind(object, context, NULL, NULL);
+ return MarkAsCall(result, instr);
+ }
+}
+
+
+LInstruction* LChunkBuilder::DoTrapAllocationMemento(
+ HTrapAllocationMemento* instr) {
+ LOperand* object = UseRegister(instr->object());
+ LOperand* temp1 = TempRegister();
+ LOperand* temp2 = TempRegister();
+ LTrapAllocationMemento* result =
+ new(zone()) LTrapAllocationMemento(object, temp1, temp2);
+ return AssignEnvironment(result);
+}
+
+
+LInstruction* LChunkBuilder::DoTypeof(HTypeof* instr) {
+ LOperand* context = UseFixed(instr->context(), cp);
+ // TODO(jbramley): In ARM, this uses UseFixed to force the input to x0.
+ // However, LCodeGen::DoTypeof just pushes it to the stack (for CallRuntime)
+ // anyway, so the input doesn't have to be in x0. We might be able to improve
+ // the ARM back-end a little by relaxing this restriction.
+ LTypeof* result =
+ new(zone()) LTypeof(context, UseRegisterAtStart(instr->value()));
+ return MarkAsCall(DefineFixed(result, x0), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoTypeofIsAndBranch(HTypeofIsAndBranch* instr) {
+ // We only need temp registers in some cases, but we can't dereference the
+ // instr->type_literal() handle to test that here.
+ LOperand* temp1 = TempRegister();
+ LOperand* temp2 = TempRegister();
+
+ return new(zone()) LTypeofIsAndBranch(
+ UseRegister(instr->value()), temp1, temp2);
+}
+
+
+LInstruction* LChunkBuilder::DoUnaryMathOperation(HUnaryMathOperation* instr) {
+ switch (instr->op()) {
+ case kMathAbs: {
+ Representation r = instr->representation();
+ if (r.IsTagged()) {
+ // The tagged case might need to allocate a HeapNumber for the result,
+ // so it is handled by a separate LInstruction.
+ LOperand* context = UseFixed(instr->context(), cp);
+ LOperand* input = UseRegister(instr->value());
+ LOperand* temp1 = TempRegister();
+ LOperand* temp2 = TempRegister();
+ LOperand* temp3 = TempRegister();
+ LInstruction* result = DefineAsRegister(
+ new(zone()) LMathAbsTagged(context, input, temp1, temp2, temp3));
+ return AssignEnvironment(AssignPointerMap(result));
+ } else {
+ LOperand* input = UseRegisterAtStart(instr->value());
+ LInstruction* result = DefineAsRegister(new(zone()) LMathAbs(input));
+ if (!r.IsDouble()) result = AssignEnvironment(result);
+ return result;
+ }
+ }
+ case kMathExp: {
+ ASSERT(instr->representation().IsDouble());
+ ASSERT(instr->value()->representation().IsDouble());
+ LOperand* input = UseRegister(instr->value());
+ LOperand* double_temp1 = TempDoubleRegister();
+ LOperand* temp1 = TempRegister();
+ LOperand* temp2 = TempRegister();
+ LOperand* temp3 = TempRegister();
+ LMathExp* result = new(zone()) LMathExp(input, double_temp1,
+ temp1, temp2, temp3);
+ return DefineAsRegister(result);
+ }
+ case kMathFloor: {
+ ASSERT(instr->value()->representation().IsDouble());
+ LOperand* input = UseRegisterAtStart(instr->value());
+ if (instr->representation().IsInteger32()) {
+ LMathFloorI* result = new(zone()) LMathFloorI(input);
+ return AssignEnvironment(AssignPointerMap(DefineAsRegister(result)));
+ } else {
+ ASSERT(instr->representation().IsDouble());
+ LMathFloorD* result = new(zone()) LMathFloorD(input);
+ return DefineAsRegister(result);
+ }
+ }
+ case kMathLog: {
+ ASSERT(instr->representation().IsDouble());
+ ASSERT(instr->value()->representation().IsDouble());
+ LOperand* input = UseFixedDouble(instr->value(), d0);
+ LMathLog* result = new(zone()) LMathLog(input);
+ return MarkAsCall(DefineFixedDouble(result, d0), instr);
+ }
+ case kMathPowHalf: {
+ ASSERT(instr->representation().IsDouble());
+ ASSERT(instr->value()->representation().IsDouble());
+ LOperand* input = UseRegister(instr->value());
+ return DefineAsRegister(new(zone()) LMathPowHalf(input));
+ }
+ case kMathRound: {
+ ASSERT(instr->value()->representation().IsDouble());
+ LOperand* input = UseRegister(instr->value());
+ if (instr->representation().IsInteger32()) {
+ LOperand* temp = TempDoubleRegister();
+ LMathRoundI* result = new(zone()) LMathRoundI(input, temp);
+ return AssignEnvironment(DefineAsRegister(result));
+ } else {
+ ASSERT(instr->representation().IsDouble());
+ LMathRoundD* result = new(zone()) LMathRoundD(input);
+ return DefineAsRegister(result);
+ }
+ }
+ case kMathSqrt: {
+ ASSERT(instr->representation().IsDouble());
+ ASSERT(instr->value()->representation().IsDouble());
+ LOperand* input = UseRegisterAtStart(instr->value());
+ return DefineAsRegister(new(zone()) LMathSqrt(input));
+ }
+ case kMathClz32: {
+ ASSERT(instr->representation().IsInteger32());
+ ASSERT(instr->value()->representation().IsInteger32());
+ LOperand* input = UseRegisterAtStart(instr->value());
+ return DefineAsRegister(new(zone()) LMathClz32(input));
+ }
+ default:
+ UNREACHABLE();
+ return NULL;
+ }
+}
+
+
+LInstruction* LChunkBuilder::DoUnknownOSRValue(HUnknownOSRValue* instr) {
+ // Use an index that corresponds to the location in the unoptimized frame,
+ // which the optimized frame will subsume.
+ int env_index = instr->index();
+ int spill_index = 0;
+ if (instr->environment()->is_parameter_index(env_index)) {
+ spill_index = chunk_->GetParameterStackSlot(env_index);
+ } else {
+ spill_index = env_index - instr->environment()->first_local_index();
+ if (spill_index > LUnallocated::kMaxFixedSlotIndex) {
+ Abort(kTooManySpillSlotsNeededForOSR);
+ spill_index = 0;
+ }
+ }
+ return DefineAsSpilled(new(zone()) LUnknownOSRValue, spill_index);
+}
+
+
+LInstruction* LChunkBuilder::DoUseConst(HUseConst* instr) {
+ return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoForInPrepareMap(HForInPrepareMap* instr) {
+ LOperand* context = UseFixed(instr->context(), cp);
+ // Assign object to a fixed register different from those already used in
+ // LForInPrepareMap.
+ LOperand* object = UseFixed(instr->enumerable(), x0);
+ LForInPrepareMap* result = new(zone()) LForInPrepareMap(context, object);
+ return MarkAsCall(DefineFixed(result, x0), instr, CAN_DEOPTIMIZE_EAGERLY);
+}
+
+
+LInstruction* LChunkBuilder::DoForInCacheArray(HForInCacheArray* instr) {
+ LOperand* map = UseRegister(instr->map());
+ return AssignEnvironment(DefineAsRegister(new(zone()) LForInCacheArray(map)));
+}
+
+
+LInstruction* LChunkBuilder::DoCheckMapValue(HCheckMapValue* instr) {
+ LOperand* value = UseRegisterAtStart(instr->value());
+ LOperand* map = UseRegister(instr->map());
+ LOperand* temp = TempRegister();
+ return AssignEnvironment(new(zone()) LCheckMapValue(value, map, temp));
+}
+
+
+LInstruction* LChunkBuilder::DoLoadFieldByIndex(HLoadFieldByIndex* instr) {
+ LOperand* object = UseRegisterAtStart(instr->object());
+ LOperand* index = UseRegisterAndClobber(instr->index());
+ LLoadFieldByIndex* load = new(zone()) LLoadFieldByIndex(object, index);
+ LInstruction* result = DefineSameAsFirst(load);
+ return AssignPointerMap(result);
+}
+
+
+LInstruction* LChunkBuilder::DoWrapReceiver(HWrapReceiver* instr) {
+ LOperand* receiver = UseRegister(instr->receiver());
+ LOperand* function = UseRegister(instr->function());
+ LWrapReceiver* result = new(zone()) LWrapReceiver(receiver, function);
+ return AssignEnvironment(DefineAsRegister(result));
+}
+
+
+LInstruction* LChunkBuilder::DoStoreFrameContext(HStoreFrameContext* instr) {
+ LOperand* context = UseRegisterAtStart(instr->context());
+ return new(zone()) LStoreFrameContext(context);
+}
+
+
+LInstruction* LChunkBuilder::DoAllocateBlockContext(
+ HAllocateBlockContext* instr) {
+ LOperand* context = UseFixed(instr->context(), cp);
+ LOperand* function = UseRegisterAtStart(instr->function());
+ LAllocateBlockContext* result =
+ new(zone()) LAllocateBlockContext(context, function);
+ return MarkAsCall(DefineFixed(result, cp), instr);
+}
+
+
+} } // namespace v8::internal
diff --git a/chromium/v8/src/arm64/lithium-arm64.h b/chromium/v8/src/arm64/lithium-arm64.h
new file mode 100644
index 00000000000..18dd927d815
--- /dev/null
+++ b/chromium/v8/src/arm64/lithium-arm64.h
@@ -0,0 +1,3248 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_ARM64_LITHIUM_ARM64_H_
+#define V8_ARM64_LITHIUM_ARM64_H_
+
+#include "src/hydrogen.h"
+#include "src/lithium-allocator.h"
+#include "src/lithium.h"
+#include "src/safepoint-table.h"
+#include "src/utils.h"
+
+namespace v8 {
+namespace internal {
+
+// Forward declarations.
+class LCodeGen;
+
+#define LITHIUM_CONCRETE_INSTRUCTION_LIST(V) \
+ V(AccessArgumentsAt) \
+ V(AddE) \
+ V(AddI) \
+ V(AddS) \
+ V(Allocate) \
+ V(AllocateBlockContext) \
+ V(ApplyArguments) \
+ V(ArgumentsElements) \
+ V(ArgumentsLength) \
+ V(ArithmeticD) \
+ V(ArithmeticT) \
+ V(BitI) \
+ V(BitS) \
+ V(BoundsCheck) \
+ V(Branch) \
+ V(CallFunction) \
+ V(CallJSFunction) \
+ V(CallNew) \
+ V(CallNewArray) \
+ V(CallRuntime) \
+ V(CallStub) \
+ V(CallWithDescriptor) \
+ V(CheckInstanceType) \
+ V(CheckMapValue) \
+ V(CheckMaps) \
+ V(CheckNonSmi) \
+ V(CheckSmi) \
+ V(CheckValue) \
+ V(ClampDToUint8) \
+ V(ClampIToUint8) \
+ V(ClampTToUint8) \
+ V(ClassOfTestAndBranch) \
+ V(CmpHoleAndBranchD) \
+ V(CmpHoleAndBranchT) \
+ V(CmpMapAndBranch) \
+ V(CmpObjectEqAndBranch) \
+ V(CmpT) \
+ V(CompareMinusZeroAndBranch) \
+ V(CompareNumericAndBranch) \
+ V(ConstantD) \
+ V(ConstantE) \
+ V(ConstantI) \
+ V(ConstantS) \
+ V(ConstantT) \
+ V(ConstructDouble) \
+ V(Context) \
+ V(DateField) \
+ V(DebugBreak) \
+ V(DeclareGlobals) \
+ V(Deoptimize) \
+ V(DivByConstI) \
+ V(DivByPowerOf2I) \
+ V(DivI) \
+ V(DoubleBits) \
+ V(DoubleToIntOrSmi) \
+ V(Drop) \
+ V(Dummy) \
+ V(DummyUse) \
+ V(FlooringDivByConstI) \
+ V(FlooringDivByPowerOf2I) \
+ V(FlooringDivI) \
+ V(ForInCacheArray) \
+ V(ForInPrepareMap) \
+ V(FunctionLiteral) \
+ V(GetCachedArrayIndex) \
+ V(Goto) \
+ V(HasCachedArrayIndexAndBranch) \
+ V(HasInstanceTypeAndBranch) \
+ V(InnerAllocatedObject) \
+ V(InstanceOf) \
+ V(InstanceOfKnownGlobal) \
+ V(InstructionGap) \
+ V(Integer32ToDouble) \
+ V(InvokeFunction) \
+ V(IsConstructCallAndBranch) \
+ V(IsObjectAndBranch) \
+ V(IsSmiAndBranch) \
+ V(IsStringAndBranch) \
+ V(IsUndetectableAndBranch) \
+ V(Label) \
+ V(LazyBailout) \
+ V(LoadContextSlot) \
+ V(LoadFieldByIndex) \
+ V(LoadFunctionPrototype) \
+ V(LoadGlobalCell) \
+ V(LoadGlobalGeneric) \
+ V(LoadKeyedExternal) \
+ V(LoadKeyedFixed) \
+ V(LoadKeyedFixedDouble) \
+ V(LoadKeyedGeneric) \
+ V(LoadNamedField) \
+ V(LoadNamedGeneric) \
+ V(LoadRoot) \
+ V(MapEnumLength) \
+ V(MathAbs) \
+ V(MathAbsTagged) \
+ V(MathClz32) \
+ V(MathExp) \
+ V(MathFloorD) \
+ V(MathFloorI) \
+ V(MathLog) \
+ V(MathMinMax) \
+ V(MathPowHalf) \
+ V(MathRoundD) \
+ V(MathRoundI) \
+ V(MathSqrt) \
+ V(ModByConstI) \
+ V(ModByPowerOf2I) \
+ V(ModI) \
+ V(MulConstIS) \
+ V(MulI) \
+ V(MulS) \
+ V(NumberTagD) \
+ V(NumberTagU) \
+ V(NumberUntagD) \
+ V(OsrEntry) \
+ V(Parameter) \
+ V(Power) \
+ V(PreparePushArguments) \
+ V(PushArguments) \
+ V(RegExpLiteral) \
+ V(Return) \
+ V(SeqStringGetChar) \
+ V(SeqStringSetChar) \
+ V(ShiftI) \
+ V(ShiftS) \
+ V(SmiTag) \
+ V(SmiUntag) \
+ V(StackCheck) \
+ V(StoreCodeEntry) \
+ V(StoreContextSlot) \
+ V(StoreFrameContext) \
+ V(StoreGlobalCell) \
+ V(StoreKeyedExternal) \
+ V(StoreKeyedFixed) \
+ V(StoreKeyedFixedDouble) \
+ V(StoreKeyedGeneric) \
+ V(StoreNamedField) \
+ V(StoreNamedGeneric) \
+ V(StringAdd) \
+ V(StringCharCodeAt) \
+ V(StringCharFromCode) \
+ V(StringCompareAndBranch) \
+ V(SubI) \
+ V(SubS) \
+ V(TaggedToI) \
+ V(ThisFunction) \
+ V(ToFastProperties) \
+ V(TransitionElementsKind) \
+ V(TrapAllocationMemento) \
+ V(TruncateDoubleToIntOrSmi) \
+ V(Typeof) \
+ V(TypeofIsAndBranch) \
+ V(Uint32ToDouble) \
+ V(UnknownOSRValue) \
+ V(WrapReceiver)
+
+
+#define DECLARE_CONCRETE_INSTRUCTION(type, mnemonic) \
+ virtual Opcode opcode() const V8_FINAL V8_OVERRIDE { \
+ return LInstruction::k##type; \
+ } \
+ virtual void CompileToNative(LCodeGen* generator) V8_FINAL V8_OVERRIDE; \
+ virtual const char* Mnemonic() const V8_FINAL V8_OVERRIDE { \
+ return mnemonic; \
+ } \
+ static L##type* cast(LInstruction* instr) { \
+ ASSERT(instr->Is##type()); \
+ return reinterpret_cast<L##type*>(instr); \
+ }
+
+
+#define DECLARE_HYDROGEN_ACCESSOR(type) \
+ H##type* hydrogen() const { \
+ return H##type::cast(this->hydrogen_value()); \
+ }
+
+
+class LInstruction : public ZoneObject {
+ public:
+ LInstruction()
+ : environment_(NULL),
+ hydrogen_value_(NULL),
+ bit_field_(IsCallBits::encode(false)) { }
+
+ virtual ~LInstruction() { }
+
+ virtual void CompileToNative(LCodeGen* generator) = 0;
+ virtual const char* Mnemonic() const = 0;
+ virtual void PrintTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintOutputOperandTo(StringStream* stream);
+
+ enum Opcode {
+ // Declare a unique enum value for each instruction.
+#define DECLARE_OPCODE(type) k##type,
+ LITHIUM_CONCRETE_INSTRUCTION_LIST(DECLARE_OPCODE)
+ kNumberOfInstructions
+#undef DECLARE_OPCODE
+ };
+
+ virtual Opcode opcode() const = 0;
+
+ // Declare non-virtual type testers for all leaf IR classes.
+#define DECLARE_PREDICATE(type) \
+ bool Is##type() const { return opcode() == k##type; }
+ LITHIUM_CONCRETE_INSTRUCTION_LIST(DECLARE_PREDICATE)
+#undef DECLARE_PREDICATE
+
+ // Declare virtual predicates for instructions that don't have
+ // an opcode.
+ virtual bool IsGap() const { return false; }
+
+ virtual bool IsControl() const { return false; }
+
+ void set_environment(LEnvironment* env) { environment_ = env; }
+ LEnvironment* environment() const { return environment_; }
+ bool HasEnvironment() const { return environment_ != NULL; }
+
+ void set_pointer_map(LPointerMap* p) { pointer_map_.set(p); }
+ LPointerMap* pointer_map() const { return pointer_map_.get(); }
+ bool HasPointerMap() const { return pointer_map_.is_set(); }
+
+ void set_hydrogen_value(HValue* value) { hydrogen_value_ = value; }
+ HValue* hydrogen_value() const { return hydrogen_value_; }
+
+ virtual void SetDeferredLazyDeoptimizationEnvironment(LEnvironment* env) { }
+
+ void MarkAsCall() { bit_field_ = IsCallBits::update(bit_field_, true); }
+ bool IsCall() const { return IsCallBits::decode(bit_field_); }
+
+ // Interface to the register allocator and iterators.
+ bool ClobbersTemps() const { return IsCall(); }
+ bool ClobbersRegisters() const { return IsCall(); }
+ virtual bool ClobbersDoubleRegisters(Isolate* isolate) const {
+ return IsCall();
+ }
+ bool IsMarkedAsCall() const { return IsCall(); }
+
+ virtual bool HasResult() const = 0;
+ virtual LOperand* result() const = 0;
+
+ virtual int InputCount() = 0;
+ virtual LOperand* InputAt(int i) = 0;
+ virtual int TempCount() = 0;
+ virtual LOperand* TempAt(int i) = 0;
+
+ LOperand* FirstInput() { return InputAt(0); }
+ LOperand* Output() { return HasResult() ? result() : NULL; }
+
+ virtual bool HasInterestingComment(LCodeGen* gen) const { return true; }
+
+#ifdef DEBUG
+ void VerifyCall();
+#endif
+
+ private:
+ class IsCallBits: public BitField<bool, 0, 1> {};
+
+ LEnvironment* environment_;
+ SetOncePointer<LPointerMap> pointer_map_;
+ HValue* hydrogen_value_;
+ int32_t bit_field_;
+};
+
+
+// R = number of result operands (0 or 1).
+template<int R>
+class LTemplateResultInstruction : public LInstruction {
+ public:
+ // Allow 0 or 1 output operands.
+ STATIC_ASSERT(R == 0 || R == 1);
+ virtual bool HasResult() const V8_FINAL V8_OVERRIDE {
+ return (R != 0) && (result() != NULL);
+ }
+ void set_result(LOperand* operand) { results_[0] = operand; }
+ LOperand* result() const { return results_[0]; }
+
+ protected:
+ EmbeddedContainer<LOperand*, R> results_;
+};
+
+
+// R = number of result operands (0 or 1).
+// I = number of input operands.
+// T = number of temporary operands.
+template<int R, int I, int T>
+class LTemplateInstruction : public LTemplateResultInstruction<R> {
+ protected:
+ EmbeddedContainer<LOperand*, I> inputs_;
+ EmbeddedContainer<LOperand*, T> temps_;
+
+ private:
+ // Iterator support.
+ virtual int InputCount() V8_FINAL V8_OVERRIDE { return I; }
+ virtual LOperand* InputAt(int i) V8_FINAL V8_OVERRIDE { return inputs_[i]; }
+
+ virtual int TempCount() V8_FINAL V8_OVERRIDE { return T; }
+ virtual LOperand* TempAt(int i) V8_FINAL V8_OVERRIDE { return temps_[i]; }
+};
+
+
+class LUnknownOSRValue V8_FINAL : public LTemplateInstruction<1, 0, 0> {
+ public:
+ virtual bool HasInterestingComment(LCodeGen* gen) const V8_OVERRIDE {
+ return false;
+ }
+ DECLARE_CONCRETE_INSTRUCTION(UnknownOSRValue, "unknown-osr-value")
+};
+
+
+template<int I, int T>
+class LControlInstruction : public LTemplateInstruction<0, I, T> {
+ public:
+ LControlInstruction() : false_label_(NULL), true_label_(NULL) { }
+
+ virtual bool IsControl() const V8_FINAL V8_OVERRIDE { return true; }
+
+ int SuccessorCount() { return hydrogen()->SuccessorCount(); }
+ HBasicBlock* SuccessorAt(int i) { return hydrogen()->SuccessorAt(i); }
+
+ int TrueDestination(LChunk* chunk) {
+ return chunk->LookupDestination(true_block_id());
+ }
+
+ int FalseDestination(LChunk* chunk) {
+ return chunk->LookupDestination(false_block_id());
+ }
+
+ Label* TrueLabel(LChunk* chunk) {
+ if (true_label_ == NULL) {
+ true_label_ = chunk->GetAssemblyLabel(TrueDestination(chunk));
+ }
+ return true_label_;
+ }
+
+ Label* FalseLabel(LChunk* chunk) {
+ if (false_label_ == NULL) {
+ false_label_ = chunk->GetAssemblyLabel(FalseDestination(chunk));
+ }
+ return false_label_;
+ }
+
+ protected:
+ int true_block_id() { return SuccessorAt(0)->block_id(); }
+ int false_block_id() { return SuccessorAt(1)->block_id(); }
+
+ private:
+ DECLARE_HYDROGEN_ACCESSOR(ControlInstruction);
+
+ Label* false_label_;
+ Label* true_label_;
+};
+
+
+class LGap : public LTemplateInstruction<0, 0, 0> {
+ public:
+ explicit LGap(HBasicBlock* block)
+ : block_(block) {
+ parallel_moves_[BEFORE] = NULL;
+ parallel_moves_[START] = NULL;
+ parallel_moves_[END] = NULL;
+ parallel_moves_[AFTER] = NULL;
+ }
+
+ // Can't use the DECLARE-macro here because of sub-classes.
+ virtual bool IsGap() const V8_OVERRIDE { return true; }
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+ static LGap* cast(LInstruction* instr) {
+ ASSERT(instr->IsGap());
+ return reinterpret_cast<LGap*>(instr);
+ }
+
+ bool IsRedundant() const;
+
+ HBasicBlock* block() const { return block_; }
+
+ enum InnerPosition {
+ BEFORE,
+ START,
+ END,
+ AFTER,
+ FIRST_INNER_POSITION = BEFORE,
+ LAST_INNER_POSITION = AFTER
+ };
+
+ LParallelMove* GetOrCreateParallelMove(InnerPosition pos, Zone* zone) {
+ if (parallel_moves_[pos] == NULL) {
+ parallel_moves_[pos] = new(zone) LParallelMove(zone);
+ }
+ return parallel_moves_[pos];
+ }
+
+ LParallelMove* GetParallelMove(InnerPosition pos) {
+ return parallel_moves_[pos];
+ }
+
+ private:
+ LParallelMove* parallel_moves_[LAST_INNER_POSITION + 1];
+ HBasicBlock* block_;
+};
+
+
+class LInstructionGap V8_FINAL : public LGap {
+ public:
+ explicit LInstructionGap(HBasicBlock* block) : LGap(block) { }
+
+ virtual bool HasInterestingComment(LCodeGen* gen) const V8_OVERRIDE {
+ return !IsRedundant();
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(InstructionGap, "gap")
+};
+
+
+class LDrop V8_FINAL : public LTemplateInstruction<0, 0, 0> {
+ public:
+ explicit LDrop(int count) : count_(count) { }
+
+ int count() const { return count_; }
+
+ DECLARE_CONCRETE_INSTRUCTION(Drop, "drop")
+
+ private:
+ int count_;
+};
+
+
+class LDummy V8_FINAL : public LTemplateInstruction<1, 0, 0> {
+ public:
+ explicit LDummy() { }
+ DECLARE_CONCRETE_INSTRUCTION(Dummy, "dummy")
+};
+
+
+class LDummyUse V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LDummyUse(LOperand* value) {
+ inputs_[0] = value;
+ }
+ DECLARE_CONCRETE_INSTRUCTION(DummyUse, "dummy-use")
+};
+
+
+class LGoto V8_FINAL : public LTemplateInstruction<0, 0, 0> {
+ public:
+ explicit LGoto(HBasicBlock* block) : block_(block) { }
+
+ virtual bool HasInterestingComment(LCodeGen* gen) const V8_OVERRIDE;
+ DECLARE_CONCRETE_INSTRUCTION(Goto, "goto")
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+ virtual bool IsControl() const V8_OVERRIDE { return true; }
+
+ int block_id() const { return block_->block_id(); }
+
+ private:
+ HBasicBlock* block_;
+};
+
+
+class LLazyBailout V8_FINAL : public LTemplateInstruction<0, 0, 0> {
+ public:
+ LLazyBailout() : gap_instructions_size_(0) { }
+
+ DECLARE_CONCRETE_INSTRUCTION(LazyBailout, "lazy-bailout")
+
+ void set_gap_instructions_size(int gap_instructions_size) {
+ gap_instructions_size_ = gap_instructions_size;
+ }
+ int gap_instructions_size() { return gap_instructions_size_; }
+
+ private:
+ int gap_instructions_size_;
+};
+
+
+class LLabel V8_FINAL : public LGap {
+ public:
+ explicit LLabel(HBasicBlock* block)
+ : LGap(block), replacement_(NULL) { }
+
+ virtual bool HasInterestingComment(LCodeGen* gen) const V8_OVERRIDE {
+ return false;
+ }
+ DECLARE_CONCRETE_INSTRUCTION(Label, "label")
+
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+
+ int block_id() const { return block()->block_id(); }
+ bool is_loop_header() const { return block()->IsLoopHeader(); }
+ bool is_osr_entry() const { return block()->is_osr_entry(); }
+ Label* label() { return &label_; }
+ LLabel* replacement() const { return replacement_; }
+ void set_replacement(LLabel* label) { replacement_ = label; }
+ bool HasReplacement() const { return replacement_ != NULL; }
+
+ private:
+ Label label_;
+ LLabel* replacement_;
+};
+
+
+class LOsrEntry V8_FINAL : public LTemplateInstruction<0, 0, 0> {
+ public:
+ LOsrEntry() {}
+
+ virtual bool HasInterestingComment(LCodeGen* gen) const V8_OVERRIDE {
+ return false;
+ }
+ DECLARE_CONCRETE_INSTRUCTION(OsrEntry, "osr-entry")
+};
+
+
+class LAccessArgumentsAt V8_FINAL : public LTemplateInstruction<1, 3, 0> {
+ public:
+ LAccessArgumentsAt(LOperand* arguments,
+ LOperand* length,
+ LOperand* index) {
+ inputs_[0] = arguments;
+ inputs_[1] = length;
+ inputs_[2] = index;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(AccessArgumentsAt, "access-arguments-at")
+
+ LOperand* arguments() { return inputs_[0]; }
+ LOperand* length() { return inputs_[1]; }
+ LOperand* index() { return inputs_[2]; }
+
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+};
+
+
+class LAddE V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+ public:
+ LAddE(LOperand* left, LOperand* right) {
+ inputs_[0] = left;
+ inputs_[1] = right;
+ }
+
+ LOperand* left() { return inputs_[0]; }
+ LOperand* right() { return inputs_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(AddE, "add-e")
+ DECLARE_HYDROGEN_ACCESSOR(Add)
+};
+
+
+class LAddI V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+ public:
+ LAddI(LOperand* left, LOperand* right)
+ : shift_(NO_SHIFT), shift_amount_(0) {
+ inputs_[0] = left;
+ inputs_[1] = right;
+ }
+
+ LAddI(LOperand* left, LOperand* right, Shift shift, LOperand* shift_amount)
+ : shift_(shift), shift_amount_(shift_amount) {
+ inputs_[0] = left;
+ inputs_[1] = right;
+ }
+
+ LOperand* left() { return inputs_[0]; }
+ LOperand* right() { return inputs_[1]; }
+
+ Shift shift() const { return shift_; }
+ LOperand* shift_amount() const { return shift_amount_; }
+
+ DECLARE_CONCRETE_INSTRUCTION(AddI, "add-i")
+ DECLARE_HYDROGEN_ACCESSOR(Add)
+
+ protected:
+ Shift shift_;
+ LOperand* shift_amount_;
+};
+
+
+class LAddS V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+ public:
+ LAddS(LOperand* left, LOperand* right) {
+ inputs_[0] = left;
+ inputs_[1] = right;
+ }
+
+ LOperand* left() { return inputs_[0]; }
+ LOperand* right() { return inputs_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(AddS, "add-s")
+ DECLARE_HYDROGEN_ACCESSOR(Add)
+};
+
+
+class LAllocate V8_FINAL : public LTemplateInstruction<1, 2, 3> {
+ public:
+ LAllocate(LOperand* context,
+ LOperand* size,
+ LOperand* temp1,
+ LOperand* temp2,
+ LOperand* temp3) {
+ inputs_[0] = context;
+ inputs_[1] = size;
+ temps_[0] = temp1;
+ temps_[1] = temp2;
+ temps_[2] = temp3;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+ LOperand* size() { return inputs_[1]; }
+ LOperand* temp1() { return temps_[0]; }
+ LOperand* temp2() { return temps_[1]; }
+ LOperand* temp3() { return temps_[2]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(Allocate, "allocate")
+ DECLARE_HYDROGEN_ACCESSOR(Allocate)
+};
+
+
+class LApplyArguments V8_FINAL : public LTemplateInstruction<1, 4, 0> {
+ public:
+ LApplyArguments(LOperand* function,
+ LOperand* receiver,
+ LOperand* length,
+ LOperand* elements) {
+ inputs_[0] = function;
+ inputs_[1] = receiver;
+ inputs_[2] = length;
+ inputs_[3] = elements;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(ApplyArguments, "apply-arguments")
+
+ LOperand* function() { return inputs_[0]; }
+ LOperand* receiver() { return inputs_[1]; }
+ LOperand* length() { return inputs_[2]; }
+ LOperand* elements() { return inputs_[3]; }
+};
+
+
+class LArgumentsElements V8_FINAL : public LTemplateInstruction<1, 0, 1> {
+ public:
+ explicit LArgumentsElements(LOperand* temp) {
+ temps_[0] = temp;
+ }
+
+ LOperand* temp() { return temps_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(ArgumentsElements, "arguments-elements")
+ DECLARE_HYDROGEN_ACCESSOR(ArgumentsElements)
+};
+
+
+class LArgumentsLength V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LArgumentsLength(LOperand* elements) {
+ inputs_[0] = elements;
+ }
+
+ LOperand* elements() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(ArgumentsLength, "arguments-length")
+};
+
+
+class LArithmeticD V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+ public:
+ LArithmeticD(Token::Value op,
+ LOperand* left,
+ LOperand* right)
+ : op_(op) {
+ inputs_[0] = left;
+ inputs_[1] = right;
+ }
+
+ Token::Value op() const { return op_; }
+ LOperand* left() { return inputs_[0]; }
+ LOperand* right() { return inputs_[1]; }
+
+ virtual Opcode opcode() const V8_OVERRIDE {
+ return LInstruction::kArithmeticD;
+ }
+ virtual void CompileToNative(LCodeGen* generator) V8_OVERRIDE;
+ virtual const char* Mnemonic() const V8_OVERRIDE;
+
+ private:
+ Token::Value op_;
+};
+
+
+class LArithmeticT V8_FINAL : public LTemplateInstruction<1, 3, 0> {
+ public:
+ LArithmeticT(Token::Value op,
+ LOperand* context,
+ LOperand* left,
+ LOperand* right)
+ : op_(op) {
+ inputs_[0] = context;
+ inputs_[1] = left;
+ inputs_[2] = right;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+ LOperand* left() { return inputs_[1]; }
+ LOperand* right() { return inputs_[2]; }
+ Token::Value op() const { return op_; }
+
+ virtual Opcode opcode() const V8_OVERRIDE {
+ return LInstruction::kArithmeticT;
+ }
+ virtual void CompileToNative(LCodeGen* generator) V8_OVERRIDE;
+ virtual const char* Mnemonic() const V8_OVERRIDE;
+
+ private:
+ Token::Value op_;
+};
+
+
+class LBoundsCheck V8_FINAL : public LTemplateInstruction<0, 2, 0> {
+ public:
+ explicit LBoundsCheck(LOperand* index, LOperand* length) {
+ inputs_[0] = index;
+ inputs_[1] = length;
+ }
+
+ LOperand* index() { return inputs_[0]; }
+ LOperand* length() { return inputs_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(BoundsCheck, "bounds-check")
+ DECLARE_HYDROGEN_ACCESSOR(BoundsCheck)
+};
+
+
+class LBitI V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+ public:
+ LBitI(LOperand* left, LOperand* right)
+ : shift_(NO_SHIFT), shift_amount_(0) {
+ inputs_[0] = left;
+ inputs_[1] = right;
+ }
+
+ LBitI(LOperand* left, LOperand* right, Shift shift, LOperand* shift_amount)
+ : shift_(shift), shift_amount_(shift_amount) {
+ inputs_[0] = left;
+ inputs_[1] = right;
+ }
+
+ LOperand* left() { return inputs_[0]; }
+ LOperand* right() { return inputs_[1]; }
+
+ Shift shift() const { return shift_; }
+ LOperand* shift_amount() const { return shift_amount_; }
+
+ Token::Value op() const { return hydrogen()->op(); }
+
+ DECLARE_CONCRETE_INSTRUCTION(BitI, "bit-i")
+ DECLARE_HYDROGEN_ACCESSOR(Bitwise)
+
+ protected:
+ Shift shift_;
+ LOperand* shift_amount_;
+};
+
+
+class LBitS V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+ public:
+ LBitS(LOperand* left, LOperand* right) {
+ inputs_[0] = left;
+ inputs_[1] = right;
+ }
+
+ LOperand* left() { return inputs_[0]; }
+ LOperand* right() { return inputs_[1]; }
+
+ Token::Value op() const { return hydrogen()->op(); }
+
+ DECLARE_CONCRETE_INSTRUCTION(BitS, "bit-s")
+ DECLARE_HYDROGEN_ACCESSOR(Bitwise)
+};
+
+
+class LBranch V8_FINAL : public LControlInstruction<1, 2> {
+ public:
+ explicit LBranch(LOperand* value, LOperand *temp1, LOperand *temp2) {
+ inputs_[0] = value;
+ temps_[0] = temp1;
+ temps_[1] = temp2;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+ LOperand* temp1() { return temps_[0]; }
+ LOperand* temp2() { return temps_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(Branch, "branch")
+ DECLARE_HYDROGEN_ACCESSOR(Branch)
+
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+};
+
+
+class LCallJSFunction V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LCallJSFunction(LOperand* function) {
+ inputs_[0] = function;
+ }
+
+ LOperand* function() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(CallJSFunction, "call-js-function")
+ DECLARE_HYDROGEN_ACCESSOR(CallJSFunction)
+
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+
+ int arity() const { return hydrogen()->argument_count() - 1; }
+};
+
+
+class LCallFunction V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+ public:
+ LCallFunction(LOperand* context, LOperand* function) {
+ inputs_[0] = context;
+ inputs_[1] = function;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+ LOperand* function() { return inputs_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(CallFunction, "call-function")
+ DECLARE_HYDROGEN_ACCESSOR(CallFunction)
+
+ int arity() const { return hydrogen()->argument_count() - 1; }
+};
+
+
+class LCallNew V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+ public:
+ LCallNew(LOperand* context, LOperand* constructor) {
+ inputs_[0] = context;
+ inputs_[1] = constructor;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+ LOperand* constructor() { return inputs_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(CallNew, "call-new")
+ DECLARE_HYDROGEN_ACCESSOR(CallNew)
+
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+
+ int arity() const { return hydrogen()->argument_count() - 1; }
+};
+
+
+class LCallNewArray V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+ public:
+ LCallNewArray(LOperand* context, LOperand* constructor) {
+ inputs_[0] = context;
+ inputs_[1] = constructor;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+ LOperand* constructor() { return inputs_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(CallNewArray, "call-new-array")
+ DECLARE_HYDROGEN_ACCESSOR(CallNewArray)
+
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+
+ int arity() const { return hydrogen()->argument_count() - 1; }
+};
+
+
+class LCallRuntime V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LCallRuntime(LOperand* context) {
+ inputs_[0] = context;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(CallRuntime, "call-runtime")
+ DECLARE_HYDROGEN_ACCESSOR(CallRuntime)
+
+ virtual bool ClobbersDoubleRegisters(Isolate* isolate) const V8_OVERRIDE {
+ return save_doubles() == kDontSaveFPRegs;
+ }
+
+ const Runtime::Function* function() const { return hydrogen()->function(); }
+ int arity() const { return hydrogen()->argument_count(); }
+ SaveFPRegsMode save_doubles() const { return hydrogen()->save_doubles(); }
+};
+
+
+class LCallStub V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LCallStub(LOperand* context) {
+ inputs_[0] = context;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(CallStub, "call-stub")
+ DECLARE_HYDROGEN_ACCESSOR(CallStub)
+};
+
+
+class LCheckInstanceType V8_FINAL : public LTemplateInstruction<0, 1, 1> {
+ public:
+ explicit LCheckInstanceType(LOperand* value, LOperand* temp) {
+ inputs_[0] = value;
+ temps_[0] = temp;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+ LOperand* temp() { return temps_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(CheckInstanceType, "check-instance-type")
+ DECLARE_HYDROGEN_ACCESSOR(CheckInstanceType)
+};
+
+
+class LCheckMaps V8_FINAL : public LTemplateInstruction<0, 1, 1> {
+ public:
+ LCheckMaps(LOperand* value = NULL, LOperand* temp = NULL) {
+ inputs_[0] = value;
+ temps_[0] = temp;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+ LOperand* temp() { return temps_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(CheckMaps, "check-maps")
+ DECLARE_HYDROGEN_ACCESSOR(CheckMaps)
+};
+
+
+class LCheckNonSmi V8_FINAL : public LTemplateInstruction<0, 1, 0> {
+ public:
+ explicit LCheckNonSmi(LOperand* value) {
+ inputs_[0] = value;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(CheckNonSmi, "check-non-smi")
+ DECLARE_HYDROGEN_ACCESSOR(CheckHeapObject)
+};
+
+
+class LCheckSmi V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LCheckSmi(LOperand* value) {
+ inputs_[0] = value;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(CheckSmi, "check-smi")
+};
+
+
+class LCheckValue V8_FINAL : public LTemplateInstruction<0, 1, 0> {
+ public:
+ explicit LCheckValue(LOperand* value) {
+ inputs_[0] = value;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(CheckValue, "check-value")
+ DECLARE_HYDROGEN_ACCESSOR(CheckValue)
+};
+
+
+class LClampDToUint8 V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LClampDToUint8(LOperand* unclamped) {
+ inputs_[0] = unclamped;
+ }
+
+ LOperand* unclamped() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(ClampDToUint8, "clamp-d-to-uint8")
+};
+
+
+class LClampIToUint8 V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LClampIToUint8(LOperand* unclamped) {
+ inputs_[0] = unclamped;
+ }
+
+ LOperand* unclamped() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(ClampIToUint8, "clamp-i-to-uint8")
+};
+
+
+class LClampTToUint8 V8_FINAL : public LTemplateInstruction<1, 1, 2> {
+ public:
+ LClampTToUint8(LOperand* unclamped, LOperand* temp1, LOperand* temp2) {
+ inputs_[0] = unclamped;
+ temps_[0] = temp1;
+ temps_[1] = temp2;
+ }
+
+ LOperand* unclamped() { return inputs_[0]; }
+ LOperand* temp1() { return temps_[0]; }
+ LOperand* temp2() { return temps_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(ClampTToUint8, "clamp-t-to-uint8")
+};
+
+
+class LDoubleBits V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LDoubleBits(LOperand* value) {
+ inputs_[0] = value;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(DoubleBits, "double-bits")
+ DECLARE_HYDROGEN_ACCESSOR(DoubleBits)
+};
+
+
+class LConstructDouble V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+ public:
+ LConstructDouble(LOperand* hi, LOperand* lo) {
+ inputs_[0] = hi;
+ inputs_[1] = lo;
+ }
+
+ LOperand* hi() { return inputs_[0]; }
+ LOperand* lo() { return inputs_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(ConstructDouble, "construct-double")
+};
+
+
+class LClassOfTestAndBranch V8_FINAL : public LControlInstruction<1, 2> {
+ public:
+ LClassOfTestAndBranch(LOperand* value, LOperand* temp1, LOperand* temp2) {
+ inputs_[0] = value;
+ temps_[0] = temp1;
+ temps_[1] = temp2;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+ LOperand* temp1() { return temps_[0]; }
+ LOperand* temp2() { return temps_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(ClassOfTestAndBranch,
+ "class-of-test-and-branch")
+ DECLARE_HYDROGEN_ACCESSOR(ClassOfTestAndBranch)
+
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+};
+
+
+class LCmpHoleAndBranchD V8_FINAL : public LControlInstruction<1, 1> {
+ public:
+ explicit LCmpHoleAndBranchD(LOperand* object, LOperand* temp) {
+ inputs_[0] = object;
+ temps_[0] = temp;
+ }
+
+ LOperand* object() { return inputs_[0]; }
+ LOperand* temp() { return temps_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(CmpHoleAndBranchD, "cmp-hole-and-branch-d")
+ DECLARE_HYDROGEN_ACCESSOR(CompareHoleAndBranch)
+};
+
+
+class LCmpHoleAndBranchT V8_FINAL : public LControlInstruction<1, 0> {
+ public:
+ explicit LCmpHoleAndBranchT(LOperand* object) {
+ inputs_[0] = object;
+ }
+
+ LOperand* object() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(CmpHoleAndBranchT, "cmp-hole-and-branch-t")
+ DECLARE_HYDROGEN_ACCESSOR(CompareHoleAndBranch)
+};
+
+
+class LCmpMapAndBranch V8_FINAL : public LControlInstruction<1, 1> {
+ public:
+ LCmpMapAndBranch(LOperand* value, LOperand* temp) {
+ inputs_[0] = value;
+ temps_[0] = temp;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+ LOperand* temp() { return temps_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(CmpMapAndBranch, "cmp-map-and-branch")
+ DECLARE_HYDROGEN_ACCESSOR(CompareMap)
+
+ Handle<Map> map() const { return hydrogen()->map().handle(); }
+};
+
+
+class LCmpObjectEqAndBranch V8_FINAL : public LControlInstruction<2, 0> {
+ public:
+ LCmpObjectEqAndBranch(LOperand* left, LOperand* right) {
+ inputs_[0] = left;
+ inputs_[1] = right;
+ }
+
+ LOperand* left() { return inputs_[0]; }
+ LOperand* right() { return inputs_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(CmpObjectEqAndBranch, "cmp-object-eq-and-branch")
+ DECLARE_HYDROGEN_ACCESSOR(CompareObjectEqAndBranch)
+};
+
+
+class LCmpT V8_FINAL : public LTemplateInstruction<1, 3, 0> {
+ public:
+ LCmpT(LOperand* context, LOperand* left, LOperand* right) {
+ inputs_[0] = context;
+ inputs_[1] = left;
+ inputs_[2] = right;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+ LOperand* left() { return inputs_[1]; }
+ LOperand* right() { return inputs_[2]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(CmpT, "cmp-t")
+ DECLARE_HYDROGEN_ACCESSOR(CompareGeneric)
+
+ Token::Value op() const { return hydrogen()->token(); }
+};
+
+
+class LCompareMinusZeroAndBranch V8_FINAL : public LControlInstruction<1, 1> {
+ public:
+ LCompareMinusZeroAndBranch(LOperand* value, LOperand* temp) {
+ inputs_[0] = value;
+ temps_[0] = temp;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+ LOperand* temp() { return temps_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(CompareMinusZeroAndBranch,
+ "cmp-minus-zero-and-branch")
+ DECLARE_HYDROGEN_ACCESSOR(CompareMinusZeroAndBranch)
+};
+
+
+class LCompareNumericAndBranch V8_FINAL : public LControlInstruction<2, 0> {
+ public:
+ LCompareNumericAndBranch(LOperand* left, LOperand* right) {
+ inputs_[0] = left;
+ inputs_[1] = right;
+ }
+
+ LOperand* left() { return inputs_[0]; }
+ LOperand* right() { return inputs_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(CompareNumericAndBranch,
+ "compare-numeric-and-branch")
+ DECLARE_HYDROGEN_ACCESSOR(CompareNumericAndBranch)
+
+ Token::Value op() const { return hydrogen()->token(); }
+ bool is_double() const {
+ return hydrogen()->representation().IsDouble();
+ }
+
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+};
+
+
+class LConstantD V8_FINAL : public LTemplateInstruction<1, 0, 0> {
+ public:
+ DECLARE_CONCRETE_INSTRUCTION(ConstantD, "constant-d")
+ DECLARE_HYDROGEN_ACCESSOR(Constant)
+
+ double value() const { return hydrogen()->DoubleValue(); }
+};
+
+
+class LConstantE V8_FINAL : public LTemplateInstruction<1, 0, 0> {
+ public:
+ DECLARE_CONCRETE_INSTRUCTION(ConstantE, "constant-e")
+ DECLARE_HYDROGEN_ACCESSOR(Constant)
+
+ ExternalReference value() const {
+ return hydrogen()->ExternalReferenceValue();
+ }
+};
+
+
+class LConstantI V8_FINAL : public LTemplateInstruction<1, 0, 0> {
+ public:
+ DECLARE_CONCRETE_INSTRUCTION(ConstantI, "constant-i")
+ DECLARE_HYDROGEN_ACCESSOR(Constant)
+
+ int32_t value() const { return hydrogen()->Integer32Value(); }
+};
+
+
+class LConstantS V8_FINAL : public LTemplateInstruction<1, 0, 0> {
+ public:
+ DECLARE_CONCRETE_INSTRUCTION(ConstantS, "constant-s")
+ DECLARE_HYDROGEN_ACCESSOR(Constant)
+
+ Smi* value() const { return Smi::FromInt(hydrogen()->Integer32Value()); }
+};
+
+
+class LConstantT V8_FINAL : public LTemplateInstruction<1, 0, 0> {
+ public:
+ DECLARE_CONCRETE_INSTRUCTION(ConstantT, "constant-t")
+ DECLARE_HYDROGEN_ACCESSOR(Constant)
+
+ Handle<Object> value(Isolate* isolate) const {
+ return hydrogen()->handle(isolate);
+ }
+};
+
+
+class LContext V8_FINAL : public LTemplateInstruction<1, 0, 0> {
+ public:
+ DECLARE_CONCRETE_INSTRUCTION(Context, "context")
+ DECLARE_HYDROGEN_ACCESSOR(Context)
+};
+
+
+class LDateField V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+ LDateField(LOperand* date, Smi* index) : index_(index) {
+ inputs_[0] = date;
+ }
+
+ LOperand* date() { return inputs_[0]; }
+ Smi* index() const { return index_; }
+
+ DECLARE_CONCRETE_INSTRUCTION(DateField, "date-field")
+ DECLARE_HYDROGEN_ACCESSOR(DateField)
+
+ private:
+ Smi* index_;
+};
+
+
+class LDebugBreak V8_FINAL : public LTemplateInstruction<0, 0, 0> {
+ public:
+ DECLARE_CONCRETE_INSTRUCTION(DebugBreak, "break")
+};
+
+
+class LDeclareGlobals V8_FINAL : public LTemplateInstruction<0, 1, 0> {
+ public:
+ explicit LDeclareGlobals(LOperand* context) {
+ inputs_[0] = context;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(DeclareGlobals, "declare-globals")
+ DECLARE_HYDROGEN_ACCESSOR(DeclareGlobals)
+};
+
+
+class LDeoptimize V8_FINAL : public LTemplateInstruction<0, 0, 0> {
+ public:
+ virtual bool IsControl() const V8_OVERRIDE { return true; }
+ DECLARE_CONCRETE_INSTRUCTION(Deoptimize, "deoptimize")
+ DECLARE_HYDROGEN_ACCESSOR(Deoptimize)
+};
+
+
+class LDivByPowerOf2I V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+ LDivByPowerOf2I(LOperand* dividend, int32_t divisor) {
+ inputs_[0] = dividend;
+ divisor_ = divisor;
+ }
+
+ LOperand* dividend() { return inputs_[0]; }
+ int32_t divisor() const { return divisor_; }
+
+ DECLARE_CONCRETE_INSTRUCTION(DivByPowerOf2I, "div-by-power-of-2-i")
+ DECLARE_HYDROGEN_ACCESSOR(Div)
+
+ private:
+ int32_t divisor_;
+};
+
+
+class LDivByConstI V8_FINAL : public LTemplateInstruction<1, 1, 1> {
+ public:
+ LDivByConstI(LOperand* dividend, int32_t divisor, LOperand* temp) {
+ inputs_[0] = dividend;
+ divisor_ = divisor;
+ temps_[0] = temp;
+ }
+
+ LOperand* dividend() { return inputs_[0]; }
+ int32_t divisor() const { return divisor_; }
+ LOperand* temp() { return temps_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(DivByConstI, "div-by-const-i")
+ DECLARE_HYDROGEN_ACCESSOR(Div)
+
+ private:
+ int32_t divisor_;
+};
+
+
+class LDivI V8_FINAL : public LTemplateInstruction<1, 2, 1> {
+ public:
+ LDivI(LOperand* dividend, LOperand* divisor, LOperand* temp) {
+ inputs_[0] = dividend;
+ inputs_[1] = divisor;
+ temps_[0] = temp;
+ }
+
+ LOperand* dividend() { return inputs_[0]; }
+ LOperand* divisor() { return inputs_[1]; }
+ LOperand* temp() { return temps_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(DivI, "div-i")
+ DECLARE_HYDROGEN_ACCESSOR(BinaryOperation)
+};
+
+
+class LDoubleToIntOrSmi V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LDoubleToIntOrSmi(LOperand* value) {
+ inputs_[0] = value;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(DoubleToIntOrSmi, "double-to-int-or-smi")
+ DECLARE_HYDROGEN_ACCESSOR(UnaryOperation)
+
+ bool tag_result() { return hydrogen()->representation().IsSmi(); }
+};
+
+
+class LForInCacheArray V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LForInCacheArray(LOperand* map) {
+ inputs_[0] = map;
+ }
+
+ LOperand* map() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(ForInCacheArray, "for-in-cache-array")
+
+ int idx() {
+ return HForInCacheArray::cast(this->hydrogen_value())->idx();
+ }
+};
+
+
+class LForInPrepareMap V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+ public:
+ LForInPrepareMap(LOperand* context, LOperand* object) {
+ inputs_[0] = context;
+ inputs_[1] = object;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+ LOperand* object() { return inputs_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(ForInPrepareMap, "for-in-prepare-map")
+};
+
+
+class LGetCachedArrayIndex V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LGetCachedArrayIndex(LOperand* value) {
+ inputs_[0] = value;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(GetCachedArrayIndex, "get-cached-array-index")
+ DECLARE_HYDROGEN_ACCESSOR(GetCachedArrayIndex)
+};
+
+
+class LHasCachedArrayIndexAndBranch V8_FINAL
+ : public LControlInstruction<1, 1> {
+ public:
+ LHasCachedArrayIndexAndBranch(LOperand* value, LOperand* temp) {
+ inputs_[0] = value;
+ temps_[0] = temp;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+ LOperand* temp() { return temps_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(HasCachedArrayIndexAndBranch,
+ "has-cached-array-index-and-branch")
+ DECLARE_HYDROGEN_ACCESSOR(HasCachedArrayIndexAndBranch)
+
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+};
+
+
+class LHasInstanceTypeAndBranch V8_FINAL : public LControlInstruction<1, 1> {
+ public:
+ LHasInstanceTypeAndBranch(LOperand* value, LOperand* temp) {
+ inputs_[0] = value;
+ temps_[0] = temp;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+ LOperand* temp() { return temps_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(HasInstanceTypeAndBranch,
+ "has-instance-type-and-branch")
+ DECLARE_HYDROGEN_ACCESSOR(HasInstanceTypeAndBranch)
+
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+};
+
+
+class LInnerAllocatedObject V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+ public:
+ LInnerAllocatedObject(LOperand* base_object, LOperand* offset) {
+ inputs_[0] = base_object;
+ inputs_[1] = offset;
+ }
+
+ LOperand* base_object() const { return inputs_[0]; }
+ LOperand* offset() const { return inputs_[1]; }
+
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+
+ DECLARE_CONCRETE_INSTRUCTION(InnerAllocatedObject, "inner-allocated-object")
+};
+
+
+class LInstanceOf V8_FINAL : public LTemplateInstruction<1, 3, 0> {
+ public:
+ LInstanceOf(LOperand* context, LOperand* left, LOperand* right) {
+ inputs_[0] = context;
+ inputs_[1] = left;
+ inputs_[2] = right;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+ LOperand* left() { return inputs_[1]; }
+ LOperand* right() { return inputs_[2]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(InstanceOf, "instance-of")
+};
+
+
+class LInstanceOfKnownGlobal V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+ public:
+ LInstanceOfKnownGlobal(LOperand* context, LOperand* value) {
+ inputs_[0] = context;
+ inputs_[1] = value;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+ LOperand* value() { return inputs_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(InstanceOfKnownGlobal,
+ "instance-of-known-global")
+ DECLARE_HYDROGEN_ACCESSOR(InstanceOfKnownGlobal)
+
+ Handle<JSFunction> function() const { return hydrogen()->function(); }
+ LEnvironment* GetDeferredLazyDeoptimizationEnvironment() {
+ return lazy_deopt_env_;
+ }
+ virtual void SetDeferredLazyDeoptimizationEnvironment(
+ LEnvironment* env) V8_OVERRIDE {
+ lazy_deopt_env_ = env;
+ }
+
+ private:
+ LEnvironment* lazy_deopt_env_;
+};
+
+
+class LInteger32ToDouble V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LInteger32ToDouble(LOperand* value) {
+ inputs_[0] = value;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(Integer32ToDouble, "int32-to-double")
+};
+
+
+class LCallWithDescriptor V8_FINAL : public LTemplateResultInstruction<1> {
+ public:
+ LCallWithDescriptor(const CallInterfaceDescriptor* descriptor,
+ const ZoneList<LOperand*>& operands,
+ Zone* zone)
+ : descriptor_(descriptor),
+ inputs_(descriptor->environment_length() + 1, zone) {
+ ASSERT(descriptor->environment_length() + 1 == operands.length());
+ inputs_.AddAll(operands, zone);
+ }
+
+ LOperand* target() const { return inputs_[0]; }
+
+ const CallInterfaceDescriptor* descriptor() { return descriptor_; }
+
+ private:
+ DECLARE_CONCRETE_INSTRUCTION(CallWithDescriptor, "call-with-descriptor")
+ DECLARE_HYDROGEN_ACCESSOR(CallWithDescriptor)
+
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+
+ int arity() const { return hydrogen()->argument_count() - 1; }
+
+ const CallInterfaceDescriptor* descriptor_;
+ ZoneList<LOperand*> inputs_;
+
+ // Iterator support.
+ virtual int InputCount() V8_FINAL V8_OVERRIDE { return inputs_.length(); }
+ virtual LOperand* InputAt(int i) V8_FINAL V8_OVERRIDE { return inputs_[i]; }
+
+ virtual int TempCount() V8_FINAL V8_OVERRIDE { return 0; }
+ virtual LOperand* TempAt(int i) V8_FINAL V8_OVERRIDE { return NULL; }
+};
+
+
+class LInvokeFunction V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+ public:
+ LInvokeFunction(LOperand* context, LOperand* function) {
+ inputs_[0] = context;
+ inputs_[1] = function;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+ LOperand* function() { return inputs_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(InvokeFunction, "invoke-function")
+ DECLARE_HYDROGEN_ACCESSOR(InvokeFunction)
+
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+
+ int arity() const { return hydrogen()->argument_count() - 1; }
+};
+
+
+class LIsConstructCallAndBranch V8_FINAL : public LControlInstruction<0, 2> {
+ public:
+ LIsConstructCallAndBranch(LOperand* temp1, LOperand* temp2) {
+ temps_[0] = temp1;
+ temps_[1] = temp2;
+ }
+
+ LOperand* temp1() { return temps_[0]; }
+ LOperand* temp2() { return temps_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(IsConstructCallAndBranch,
+ "is-construct-call-and-branch")
+};
+
+
+class LIsObjectAndBranch V8_FINAL : public LControlInstruction<1, 2> {
+ public:
+ LIsObjectAndBranch(LOperand* value, LOperand* temp1, LOperand* temp2) {
+ inputs_[0] = value;
+ temps_[0] = temp1;
+ temps_[1] = temp2;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+ LOperand* temp1() { return temps_[0]; }
+ LOperand* temp2() { return temps_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(IsObjectAndBranch, "is-object-and-branch")
+ DECLARE_HYDROGEN_ACCESSOR(IsObjectAndBranch)
+
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+};
+
+
+class LIsStringAndBranch V8_FINAL : public LControlInstruction<1, 1> {
+ public:
+ LIsStringAndBranch(LOperand* value, LOperand* temp) {
+ inputs_[0] = value;
+ temps_[0] = temp;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+ LOperand* temp() { return temps_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(IsStringAndBranch, "is-string-and-branch")
+ DECLARE_HYDROGEN_ACCESSOR(IsStringAndBranch)
+
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+};
+
+
+class LIsSmiAndBranch V8_FINAL : public LControlInstruction<1, 0> {
+ public:
+ explicit LIsSmiAndBranch(LOperand* value) {
+ inputs_[0] = value;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(IsSmiAndBranch, "is-smi-and-branch")
+ DECLARE_HYDROGEN_ACCESSOR(IsSmiAndBranch)
+
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+};
+
+
+class LIsUndetectableAndBranch V8_FINAL : public LControlInstruction<1, 1> {
+ public:
+ explicit LIsUndetectableAndBranch(LOperand* value, LOperand* temp) {
+ inputs_[0] = value;
+ temps_[0] = temp;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+ LOperand* temp() { return temps_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(IsUndetectableAndBranch,
+ "is-undetectable-and-branch")
+ DECLARE_HYDROGEN_ACCESSOR(IsUndetectableAndBranch)
+
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+};
+
+
+class LLoadContextSlot V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LLoadContextSlot(LOperand* context) {
+ inputs_[0] = context;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(LoadContextSlot, "load-context-slot")
+ DECLARE_HYDROGEN_ACCESSOR(LoadContextSlot)
+
+ int slot_index() const { return hydrogen()->slot_index(); }
+
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+};
+
+
+class LLoadNamedField V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LLoadNamedField(LOperand* object) {
+ inputs_[0] = object;
+ }
+
+ LOperand* object() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(LoadNamedField, "load-named-field")
+ DECLARE_HYDROGEN_ACCESSOR(LoadNamedField)
+};
+
+
+class LFunctionLiteral V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LFunctionLiteral(LOperand* context) {
+ inputs_[0] = context;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(FunctionLiteral, "function-literal")
+ DECLARE_HYDROGEN_ACCESSOR(FunctionLiteral)
+};
+
+
+class LLoadFunctionPrototype V8_FINAL : public LTemplateInstruction<1, 1, 1> {
+ public:
+ LLoadFunctionPrototype(LOperand* function, LOperand* temp) {
+ inputs_[0] = function;
+ temps_[0] = temp;
+ }
+
+ LOperand* function() { return inputs_[0]; }
+ LOperand* temp() { return temps_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(LoadFunctionPrototype, "load-function-prototype")
+ DECLARE_HYDROGEN_ACCESSOR(LoadFunctionPrototype)
+};
+
+
+class LLoadGlobalCell V8_FINAL : public LTemplateInstruction<1, 0, 0> {
+ public:
+ DECLARE_CONCRETE_INSTRUCTION(LoadGlobalCell, "load-global-cell")
+ DECLARE_HYDROGEN_ACCESSOR(LoadGlobalCell)
+};
+
+
+class LLoadGlobalGeneric V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+ public:
+ LLoadGlobalGeneric(LOperand* context, LOperand* global_object) {
+ inputs_[0] = context;
+ inputs_[1] = global_object;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+ LOperand* global_object() { return inputs_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(LoadGlobalGeneric, "load-global-generic")
+ DECLARE_HYDROGEN_ACCESSOR(LoadGlobalGeneric)
+
+ Handle<Object> name() const { return hydrogen()->name(); }
+ bool for_typeof() const { return hydrogen()->for_typeof(); }
+};
+
+
+template<int T>
+class LLoadKeyed : public LTemplateInstruction<1, 2, T> {
+ public:
+ LLoadKeyed(LOperand* elements, LOperand* key) {
+ this->inputs_[0] = elements;
+ this->inputs_[1] = key;
+ }
+
+ LOperand* elements() { return this->inputs_[0]; }
+ LOperand* key() { return this->inputs_[1]; }
+ ElementsKind elements_kind() const {
+ return this->hydrogen()->elements_kind();
+ }
+ bool is_external() const {
+ return this->hydrogen()->is_external();
+ }
+ bool is_fixed_typed_array() const {
+ return hydrogen()->is_fixed_typed_array();
+ }
+ bool is_typed_elements() const {
+ return is_external() || is_fixed_typed_array();
+ }
+ uint32_t base_offset() const {
+ return this->hydrogen()->base_offset();
+ }
+ void PrintDataTo(StringStream* stream) V8_OVERRIDE {
+ this->elements()->PrintTo(stream);
+ stream->Add("[");
+ this->key()->PrintTo(stream);
+ if (this->base_offset() != 0) {
+ stream->Add(" + %d]", this->base_offset());
+ } else {
+ stream->Add("]");
+ }
+ }
+
+ DECLARE_HYDROGEN_ACCESSOR(LoadKeyed)
+};
+
+
+class LLoadKeyedExternal: public LLoadKeyed<1> {
+ public:
+ LLoadKeyedExternal(LOperand* elements, LOperand* key, LOperand* temp) :
+ LLoadKeyed<1>(elements, key) {
+ temps_[0] = temp;
+ }
+
+ LOperand* temp() { return temps_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(LoadKeyedExternal, "load-keyed-external");
+};
+
+
+class LLoadKeyedFixed: public LLoadKeyed<1> {
+ public:
+ LLoadKeyedFixed(LOperand* elements, LOperand* key, LOperand* temp) :
+ LLoadKeyed<1>(elements, key) {
+ temps_[0] = temp;
+ }
+
+ LOperand* temp() { return temps_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(LoadKeyedFixed, "load-keyed-fixed");
+};
+
+
+class LLoadKeyedFixedDouble: public LLoadKeyed<1> {
+ public:
+ LLoadKeyedFixedDouble(LOperand* elements, LOperand* key, LOperand* temp) :
+ LLoadKeyed<1>(elements, key) {
+ temps_[0] = temp;
+ }
+
+ LOperand* temp() { return temps_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(LoadKeyedFixedDouble, "load-keyed-fixed-double");
+};
+
+
+class LLoadKeyedGeneric V8_FINAL : public LTemplateInstruction<1, 3, 0> {
+ public:
+ LLoadKeyedGeneric(LOperand* context, LOperand* object, LOperand* key) {
+ inputs_[0] = context;
+ inputs_[1] = object;
+ inputs_[2] = key;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+ LOperand* object() { return inputs_[1]; }
+ LOperand* key() { return inputs_[2]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(LoadKeyedGeneric, "load-keyed-generic")
+};
+
+
+class LLoadNamedGeneric V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+ public:
+ LLoadNamedGeneric(LOperand* context, LOperand* object) {
+ inputs_[0] = context;
+ inputs_[1] = object;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+ LOperand* object() { return inputs_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(LoadNamedGeneric, "load-named-generic")
+ DECLARE_HYDROGEN_ACCESSOR(LoadNamedGeneric)
+
+ Handle<Object> name() const { return hydrogen()->name(); }
+};
+
+
+class LLoadRoot V8_FINAL : public LTemplateInstruction<1, 0, 0> {
+ public:
+ DECLARE_CONCRETE_INSTRUCTION(LoadRoot, "load-root")
+ DECLARE_HYDROGEN_ACCESSOR(LoadRoot)
+
+ Heap::RootListIndex index() const { return hydrogen()->index(); }
+};
+
+
+class LMapEnumLength V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LMapEnumLength(LOperand* value) {
+ inputs_[0] = value;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(MapEnumLength, "map-enum-length")
+};
+
+
+template<int T>
+class LUnaryMathOperation : public LTemplateInstruction<1, 1, T> {
+ public:
+ explicit LUnaryMathOperation(LOperand* value) {
+ this->inputs_[0] = value;
+ }
+
+ LOperand* value() { return this->inputs_[0]; }
+ BuiltinFunctionId op() const { return this->hydrogen()->op(); }
+
+ void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+
+ DECLARE_HYDROGEN_ACCESSOR(UnaryMathOperation)
+};
+
+
+class LMathAbs V8_FINAL : public LUnaryMathOperation<0> {
+ public:
+ explicit LMathAbs(LOperand* value) : LUnaryMathOperation<0>(value) {}
+
+ DECLARE_CONCRETE_INSTRUCTION(MathAbs, "math-abs")
+};
+
+
+class LMathAbsTagged: public LTemplateInstruction<1, 2, 3> {
+ public:
+ LMathAbsTagged(LOperand* context, LOperand* value,
+ LOperand* temp1, LOperand* temp2, LOperand* temp3) {
+ inputs_[0] = context;
+ inputs_[1] = value;
+ temps_[0] = temp1;
+ temps_[1] = temp2;
+ temps_[2] = temp3;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+ LOperand* value() { return inputs_[1]; }
+ LOperand* temp1() { return temps_[0]; }
+ LOperand* temp2() { return temps_[1]; }
+ LOperand* temp3() { return temps_[2]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(MathAbsTagged, "math-abs-tagged")
+ DECLARE_HYDROGEN_ACCESSOR(UnaryMathOperation)
+};
+
+
+class LMathExp V8_FINAL : public LUnaryMathOperation<4> {
+ public:
+ LMathExp(LOperand* value,
+ LOperand* double_temp1,
+ LOperand* temp1,
+ LOperand* temp2,
+ LOperand* temp3)
+ : LUnaryMathOperation<4>(value) {
+ temps_[0] = double_temp1;
+ temps_[1] = temp1;
+ temps_[2] = temp2;
+ temps_[3] = temp3;
+ ExternalReference::InitializeMathExpData();
+ }
+
+ LOperand* double_temp1() { return temps_[0]; }
+ LOperand* temp1() { return temps_[1]; }
+ LOperand* temp2() { return temps_[2]; }
+ LOperand* temp3() { return temps_[3]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(MathExp, "math-exp")
+};
+
+
+// Math.floor with a double result.
+class LMathFloorD V8_FINAL : public LUnaryMathOperation<0> {
+ public:
+ explicit LMathFloorD(LOperand* value) : LUnaryMathOperation<0>(value) { }
+ DECLARE_CONCRETE_INSTRUCTION(MathFloorD, "math-floor-d")
+};
+
+
+// Math.floor with an integer result.
+class LMathFloorI V8_FINAL : public LUnaryMathOperation<0> {
+ public:
+ explicit LMathFloorI(LOperand* value) : LUnaryMathOperation<0>(value) { }
+ DECLARE_CONCRETE_INSTRUCTION(MathFloorI, "math-floor-i")
+};
+
+
+class LFlooringDivByPowerOf2I V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+ LFlooringDivByPowerOf2I(LOperand* dividend, int32_t divisor) {
+ inputs_[0] = dividend;
+ divisor_ = divisor;
+ }
+
+ LOperand* dividend() { return inputs_[0]; }
+ int32_t divisor() const { return divisor_; }
+
+ DECLARE_CONCRETE_INSTRUCTION(FlooringDivByPowerOf2I,
+ "flooring-div-by-power-of-2-i")
+ DECLARE_HYDROGEN_ACCESSOR(MathFloorOfDiv)
+
+ private:
+ int32_t divisor_;
+};
+
+
+class LFlooringDivByConstI V8_FINAL : public LTemplateInstruction<1, 1, 2> {
+ public:
+ LFlooringDivByConstI(LOperand* dividend, int32_t divisor, LOperand* temp) {
+ inputs_[0] = dividend;
+ divisor_ = divisor;
+ temps_[0] = temp;
+ }
+
+ LOperand* dividend() { return inputs_[0]; }
+ int32_t divisor() const { return divisor_; }
+ LOperand* temp() { return temps_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(FlooringDivByConstI, "flooring-div-by-const-i")
+ DECLARE_HYDROGEN_ACCESSOR(MathFloorOfDiv)
+
+ private:
+ int32_t divisor_;
+};
+
+
+class LFlooringDivI V8_FINAL : public LTemplateInstruction<1, 2, 1> {
+ public:
+ LFlooringDivI(LOperand* dividend, LOperand* divisor, LOperand* temp) {
+ inputs_[0] = dividend;
+ inputs_[1] = divisor;
+ temps_[0] = temp;
+ }
+
+ LOperand* dividend() { return inputs_[0]; }
+ LOperand* divisor() { return inputs_[1]; }
+ LOperand* temp() { return temps_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(FlooringDivI, "flooring-div-i")
+ DECLARE_HYDROGEN_ACCESSOR(MathFloorOfDiv)
+};
+
+
+class LMathLog V8_FINAL : public LUnaryMathOperation<0> {
+ public:
+ explicit LMathLog(LOperand* value) : LUnaryMathOperation<0>(value) { }
+ DECLARE_CONCRETE_INSTRUCTION(MathLog, "math-log")
+};
+
+
+class LMathClz32 V8_FINAL : public LUnaryMathOperation<0> {
+ public:
+ explicit LMathClz32(LOperand* value) : LUnaryMathOperation<0>(value) { }
+ DECLARE_CONCRETE_INSTRUCTION(MathClz32, "math-clz32")
+};
+
+
+class LMathMinMax V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+ public:
+ LMathMinMax(LOperand* left, LOperand* right) {
+ inputs_[0] = left;
+ inputs_[1] = right;
+ }
+
+ LOperand* left() { return inputs_[0]; }
+ LOperand* right() { return inputs_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(MathMinMax, "math-min-max")
+ DECLARE_HYDROGEN_ACCESSOR(MathMinMax)
+};
+
+
+class LMathPowHalf V8_FINAL : public LUnaryMathOperation<0> {
+ public:
+ explicit LMathPowHalf(LOperand* value) : LUnaryMathOperation<0>(value) { }
+ DECLARE_CONCRETE_INSTRUCTION(MathPowHalf, "math-pow-half")
+};
+
+
+// Math.round with an integer result.
+class LMathRoundD V8_FINAL : public LUnaryMathOperation<0> {
+ public:
+ explicit LMathRoundD(LOperand* value)
+ : LUnaryMathOperation<0>(value) {
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(MathRoundD, "math-round-d")
+};
+
+
+// Math.round with an integer result.
+class LMathRoundI V8_FINAL : public LUnaryMathOperation<1> {
+ public:
+ LMathRoundI(LOperand* value, LOperand* temp1)
+ : LUnaryMathOperation<1>(value) {
+ temps_[0] = temp1;
+ }
+
+ LOperand* temp1() { return temps_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(MathRoundI, "math-round-i")
+};
+
+
+class LMathSqrt V8_FINAL : public LUnaryMathOperation<0> {
+ public:
+ explicit LMathSqrt(LOperand* value) : LUnaryMathOperation<0>(value) { }
+ DECLARE_CONCRETE_INSTRUCTION(MathSqrt, "math-sqrt")
+};
+
+
+class LModByPowerOf2I V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+ LModByPowerOf2I(LOperand* dividend, int32_t divisor) {
+ inputs_[0] = dividend;
+ divisor_ = divisor;
+ }
+
+ LOperand* dividend() { return inputs_[0]; }
+ int32_t divisor() const { return divisor_; }
+
+ DECLARE_CONCRETE_INSTRUCTION(ModByPowerOf2I, "mod-by-power-of-2-i")
+ DECLARE_HYDROGEN_ACCESSOR(Mod)
+
+ private:
+ int32_t divisor_;
+};
+
+
+class LModByConstI V8_FINAL : public LTemplateInstruction<1, 1, 1> {
+ public:
+ LModByConstI(LOperand* dividend, int32_t divisor, LOperand* temp) {
+ inputs_[0] = dividend;
+ divisor_ = divisor;
+ temps_[0] = temp;
+ }
+
+ LOperand* dividend() { return inputs_[0]; }
+ int32_t divisor() const { return divisor_; }
+ LOperand* temp() { return temps_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(ModByConstI, "mod-by-const-i")
+ DECLARE_HYDROGEN_ACCESSOR(Mod)
+
+ private:
+ int32_t divisor_;
+};
+
+
+class LModI V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+ public:
+ LModI(LOperand* left, LOperand* right) {
+ inputs_[0] = left;
+ inputs_[1] = right;
+ }
+
+ LOperand* left() { return inputs_[0]; }
+ LOperand* right() { return inputs_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(ModI, "mod-i")
+ DECLARE_HYDROGEN_ACCESSOR(Mod)
+};
+
+
+class LMulConstIS V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+ public:
+ LMulConstIS(LOperand* left, LConstantOperand* right) {
+ inputs_[0] = left;
+ inputs_[1] = right;
+ }
+
+ LOperand* left() { return inputs_[0]; }
+ LConstantOperand* right() { return LConstantOperand::cast(inputs_[1]); }
+
+ DECLARE_CONCRETE_INSTRUCTION(MulConstIS, "mul-const-i-s")
+ DECLARE_HYDROGEN_ACCESSOR(Mul)
+};
+
+
+class LMulI V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+ public:
+ LMulI(LOperand* left, LOperand* right) {
+ inputs_[0] = left;
+ inputs_[1] = right;
+ }
+
+ LOperand* left() { return inputs_[0]; }
+ LOperand* right() { return inputs_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(MulI, "mul-i")
+ DECLARE_HYDROGEN_ACCESSOR(Mul)
+};
+
+
+class LMulS V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+ public:
+ LMulS(LOperand* left, LOperand* right) {
+ inputs_[0] = left;
+ inputs_[1] = right;
+ }
+
+ LOperand* left() { return inputs_[0]; }
+ LOperand* right() { return inputs_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(MulI, "mul-s")
+ DECLARE_HYDROGEN_ACCESSOR(Mul)
+};
+
+
+class LNumberTagD V8_FINAL : public LTemplateInstruction<1, 1, 2> {
+ public:
+ LNumberTagD(LOperand* value, LOperand* temp1, LOperand* temp2) {
+ inputs_[0] = value;
+ temps_[0] = temp1;
+ temps_[1] = temp2;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+ LOperand* temp1() { return temps_[0]; }
+ LOperand* temp2() { return temps_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(NumberTagD, "number-tag-d")
+ DECLARE_HYDROGEN_ACCESSOR(Change)
+};
+
+
+class LNumberTagU V8_FINAL : public LTemplateInstruction<1, 1, 2> {
+ public:
+ explicit LNumberTagU(LOperand* value,
+ LOperand* temp1,
+ LOperand* temp2) {
+ inputs_[0] = value;
+ temps_[0] = temp1;
+ temps_[1] = temp2;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+ LOperand* temp1() { return temps_[0]; }
+ LOperand* temp2() { return temps_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(NumberTagU, "number-tag-u")
+};
+
+
+class LNumberUntagD V8_FINAL : public LTemplateInstruction<1, 1, 1> {
+ public:
+ LNumberUntagD(LOperand* value, LOperand* temp) {
+ inputs_[0] = value;
+ temps_[0] = temp;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+
+ LOperand* temp() { return temps_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(NumberUntagD, "double-untag")
+ DECLARE_HYDROGEN_ACCESSOR(Change)
+};
+
+
+class LParameter V8_FINAL : public LTemplateInstruction<1, 0, 0> {
+ public:
+ virtual bool HasInterestingComment(LCodeGen* gen) const { return false; }
+ DECLARE_CONCRETE_INSTRUCTION(Parameter, "parameter")
+};
+
+
+class LPower V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+ public:
+ LPower(LOperand* left, LOperand* right) {
+ inputs_[0] = left;
+ inputs_[1] = right;
+ }
+
+ LOperand* left() { return inputs_[0]; }
+ LOperand* right() { return inputs_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(Power, "power")
+ DECLARE_HYDROGEN_ACCESSOR(Power)
+};
+
+
+class LPreparePushArguments V8_FINAL : public LTemplateInstruction<0, 0, 0> {
+ public:
+ explicit LPreparePushArguments(int argc) : argc_(argc) {}
+
+ inline int argc() const { return argc_; }
+
+ DECLARE_CONCRETE_INSTRUCTION(PreparePushArguments, "prepare-push-arguments")
+
+ protected:
+ int argc_;
+};
+
+
+class LPushArguments V8_FINAL : public LTemplateResultInstruction<0> {
+ public:
+ explicit LPushArguments(Zone* zone,
+ int capacity = kRecommendedMaxPushedArgs)
+ : zone_(zone), inputs_(capacity, zone) {}
+
+ LOperand* argument(int i) { return inputs_[i]; }
+ int ArgumentCount() const { return inputs_.length(); }
+
+ void AddArgument(LOperand* arg) { inputs_.Add(arg, zone_); }
+
+ DECLARE_CONCRETE_INSTRUCTION(PushArguments, "push-arguments")
+
+ // It is better to limit the number of arguments pushed simultaneously to
+ // avoid pressure on the register allocator.
+ static const int kRecommendedMaxPushedArgs = 4;
+ bool ShouldSplitPush() const {
+ return inputs_.length() >= kRecommendedMaxPushedArgs;
+ }
+
+ protected:
+ Zone* zone_;
+ ZoneList<LOperand*> inputs_;
+
+ private:
+ // Iterator support.
+ virtual int InputCount() V8_FINAL V8_OVERRIDE { return inputs_.length(); }
+ virtual LOperand* InputAt(int i) V8_FINAL V8_OVERRIDE { return inputs_[i]; }
+
+ virtual int TempCount() V8_FINAL V8_OVERRIDE { return 0; }
+ virtual LOperand* TempAt(int i) V8_FINAL V8_OVERRIDE { return NULL; }
+};
+
+
+class LRegExpLiteral V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LRegExpLiteral(LOperand* context) {
+ inputs_[0] = context;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(RegExpLiteral, "regexp-literal")
+ DECLARE_HYDROGEN_ACCESSOR(RegExpLiteral)
+};
+
+
+class LReturn V8_FINAL : public LTemplateInstruction<0, 3, 0> {
+ public:
+ LReturn(LOperand* value, LOperand* context, LOperand* parameter_count) {
+ inputs_[0] = value;
+ inputs_[1] = context;
+ inputs_[2] = parameter_count;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+ LOperand* parameter_count() { return inputs_[2]; }
+
+ bool has_constant_parameter_count() {
+ return parameter_count()->IsConstantOperand();
+ }
+ LConstantOperand* constant_parameter_count() {
+ ASSERT(has_constant_parameter_count());
+ return LConstantOperand::cast(parameter_count());
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(Return, "return")
+};
+
+
+class LSeqStringGetChar V8_FINAL : public LTemplateInstruction<1, 2, 1> {
+ public:
+ LSeqStringGetChar(LOperand* string,
+ LOperand* index,
+ LOperand* temp) {
+ inputs_[0] = string;
+ inputs_[1] = index;
+ temps_[0] = temp;
+ }
+
+ LOperand* string() { return inputs_[0]; }
+ LOperand* index() { return inputs_[1]; }
+ LOperand* temp() { return temps_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(SeqStringGetChar, "seq-string-get-char")
+ DECLARE_HYDROGEN_ACCESSOR(SeqStringGetChar)
+};
+
+
+class LSeqStringSetChar V8_FINAL : public LTemplateInstruction<1, 4, 1> {
+ public:
+ LSeqStringSetChar(LOperand* context,
+ LOperand* string,
+ LOperand* index,
+ LOperand* value,
+ LOperand* temp) {
+ inputs_[0] = context;
+ inputs_[1] = string;
+ inputs_[2] = index;
+ inputs_[3] = value;
+ temps_[0] = temp;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+ LOperand* string() { return inputs_[1]; }
+ LOperand* index() { return inputs_[2]; }
+ LOperand* value() { return inputs_[3]; }
+ LOperand* temp() { return temps_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(SeqStringSetChar, "seq-string-set-char")
+ DECLARE_HYDROGEN_ACCESSOR(SeqStringSetChar)
+};
+
+
+class LSmiTag V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LSmiTag(LOperand* value) {
+ inputs_[0] = value;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(SmiTag, "smi-tag")
+ DECLARE_HYDROGEN_ACCESSOR(Change)
+};
+
+
+class LSmiUntag V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+ LSmiUntag(LOperand* value, bool needs_check)
+ : needs_check_(needs_check) {
+ inputs_[0] = value;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+ bool needs_check() const { return needs_check_; }
+
+ DECLARE_CONCRETE_INSTRUCTION(SmiUntag, "smi-untag")
+
+ private:
+ bool needs_check_;
+};
+
+
+class LStackCheck V8_FINAL : public LTemplateInstruction<0, 1, 0> {
+ public:
+ explicit LStackCheck(LOperand* context) {
+ inputs_[0] = context;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(StackCheck, "stack-check")
+ DECLARE_HYDROGEN_ACCESSOR(StackCheck)
+
+ Label* done_label() { return &done_label_; }
+
+ private:
+ Label done_label_;
+};
+
+
+template<int T>
+class LStoreKeyed : public LTemplateInstruction<0, 3, T> {
+ public:
+ LStoreKeyed(LOperand* elements, LOperand* key, LOperand* value) {
+ this->inputs_[0] = elements;
+ this->inputs_[1] = key;
+ this->inputs_[2] = value;
+ }
+
+ bool is_external() const { return this->hydrogen()->is_external(); }
+ bool is_fixed_typed_array() const {
+ return hydrogen()->is_fixed_typed_array();
+ }
+ bool is_typed_elements() const {
+ return is_external() || is_fixed_typed_array();
+ }
+ LOperand* elements() { return this->inputs_[0]; }
+ LOperand* key() { return this->inputs_[1]; }
+ LOperand* value() { return this->inputs_[2]; }
+ ElementsKind elements_kind() const {
+ return this->hydrogen()->elements_kind();
+ }
+
+ bool NeedsCanonicalization() {
+ if (hydrogen()->value()->IsAdd() || hydrogen()->value()->IsSub() ||
+ hydrogen()->value()->IsMul() || hydrogen()->value()->IsDiv()) {
+ return false;
+ }
+ return this->hydrogen()->NeedsCanonicalization();
+ }
+ uint32_t base_offset() const { return this->hydrogen()->base_offset(); }
+
+ void PrintDataTo(StringStream* stream) V8_OVERRIDE {
+ this->elements()->PrintTo(stream);
+ stream->Add("[");
+ this->key()->PrintTo(stream);
+ if (this->base_offset() != 0) {
+ stream->Add(" + %d] <-", this->base_offset());
+ } else {
+ stream->Add("] <- ");
+ }
+
+ if (this->value() == NULL) {
+ ASSERT(hydrogen()->IsConstantHoleStore() &&
+ hydrogen()->value()->representation().IsDouble());
+ stream->Add("<the hole(nan)>");
+ } else {
+ this->value()->PrintTo(stream);
+ }
+ }
+
+ DECLARE_HYDROGEN_ACCESSOR(StoreKeyed)
+};
+
+
+class LStoreKeyedExternal V8_FINAL : public LStoreKeyed<1> {
+ public:
+ LStoreKeyedExternal(LOperand* elements, LOperand* key, LOperand* value,
+ LOperand* temp) :
+ LStoreKeyed<1>(elements, key, value) {
+ temps_[0] = temp;
+ }
+
+ LOperand* temp() { return temps_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(StoreKeyedExternal, "store-keyed-external")
+};
+
+
+class LStoreKeyedFixed V8_FINAL : public LStoreKeyed<1> {
+ public:
+ LStoreKeyedFixed(LOperand* elements, LOperand* key, LOperand* value,
+ LOperand* temp) :
+ LStoreKeyed<1>(elements, key, value) {
+ temps_[0] = temp;
+ }
+
+ LOperand* temp() { return temps_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(StoreKeyedFixed, "store-keyed-fixed")
+};
+
+
+class LStoreKeyedFixedDouble V8_FINAL : public LStoreKeyed<1> {
+ public:
+ LStoreKeyedFixedDouble(LOperand* elements, LOperand* key, LOperand* value,
+ LOperand* temp) :
+ LStoreKeyed<1>(elements, key, value) {
+ temps_[0] = temp;
+ }
+
+ LOperand* temp() { return temps_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(StoreKeyedFixedDouble,
+ "store-keyed-fixed-double")
+};
+
+
+class LStoreKeyedGeneric V8_FINAL : public LTemplateInstruction<0, 4, 0> {
+ public:
+ LStoreKeyedGeneric(LOperand* context,
+ LOperand* obj,
+ LOperand* key,
+ LOperand* value) {
+ inputs_[0] = context;
+ inputs_[1] = obj;
+ inputs_[2] = key;
+ inputs_[3] = value;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+ LOperand* object() { return inputs_[1]; }
+ LOperand* key() { return inputs_[2]; }
+ LOperand* value() { return inputs_[3]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(StoreKeyedGeneric, "store-keyed-generic")
+ DECLARE_HYDROGEN_ACCESSOR(StoreKeyedGeneric)
+
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+
+ StrictMode strict_mode() { return hydrogen()->strict_mode(); }
+};
+
+
+class LStoreNamedField V8_FINAL : public LTemplateInstruction<0, 2, 2> {
+ public:
+ LStoreNamedField(LOperand* object, LOperand* value,
+ LOperand* temp0, LOperand* temp1) {
+ inputs_[0] = object;
+ inputs_[1] = value;
+ temps_[0] = temp0;
+ temps_[1] = temp1;
+ }
+
+ LOperand* object() { return inputs_[0]; }
+ LOperand* value() { return inputs_[1]; }
+ LOperand* temp0() { return temps_[0]; }
+ LOperand* temp1() { return temps_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(StoreNamedField, "store-named-field")
+ DECLARE_HYDROGEN_ACCESSOR(StoreNamedField)
+
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+
+ Representation representation() const {
+ return hydrogen()->field_representation();
+ }
+};
+
+
+class LStoreNamedGeneric V8_FINAL: public LTemplateInstruction<0, 3, 0> {
+ public:
+ LStoreNamedGeneric(LOperand* context, LOperand* object, LOperand* value) {
+ inputs_[0] = context;
+ inputs_[1] = object;
+ inputs_[2] = value;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+ LOperand* object() { return inputs_[1]; }
+ LOperand* value() { return inputs_[2]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(StoreNamedGeneric, "store-named-generic")
+ DECLARE_HYDROGEN_ACCESSOR(StoreNamedGeneric)
+
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+
+ Handle<Object> name() const { return hydrogen()->name(); }
+ StrictMode strict_mode() { return hydrogen()->strict_mode(); }
+};
+
+
+class LStringAdd V8_FINAL : public LTemplateInstruction<1, 3, 0> {
+ public:
+ LStringAdd(LOperand* context, LOperand* left, LOperand* right) {
+ inputs_[0] = context;
+ inputs_[1] = left;
+ inputs_[2] = right;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+ LOperand* left() { return inputs_[1]; }
+ LOperand* right() { return inputs_[2]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(StringAdd, "string-add")
+ DECLARE_HYDROGEN_ACCESSOR(StringAdd)
+};
+
+
+
+class LStringCharCodeAt V8_FINAL : public LTemplateInstruction<1, 3, 0> {
+ public:
+ LStringCharCodeAt(LOperand* context, LOperand* string, LOperand* index) {
+ inputs_[0] = context;
+ inputs_[1] = string;
+ inputs_[2] = index;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+ LOperand* string() { return inputs_[1]; }
+ LOperand* index() { return inputs_[2]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(StringCharCodeAt, "string-char-code-at")
+ DECLARE_HYDROGEN_ACCESSOR(StringCharCodeAt)
+};
+
+
+class LStringCharFromCode V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+ public:
+ LStringCharFromCode(LOperand* context, LOperand* char_code) {
+ inputs_[0] = context;
+ inputs_[1] = char_code;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+ LOperand* char_code() { return inputs_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(StringCharFromCode, "string-char-from-code")
+ DECLARE_HYDROGEN_ACCESSOR(StringCharFromCode)
+};
+
+
+class LStringCompareAndBranch V8_FINAL : public LControlInstruction<3, 0> {
+ public:
+ LStringCompareAndBranch(LOperand* context, LOperand* left, LOperand* right) {
+ inputs_[0] = context;
+ inputs_[1] = left;
+ inputs_[2] = right;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+ LOperand* left() { return inputs_[1]; }
+ LOperand* right() { return inputs_[2]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(StringCompareAndBranch,
+ "string-compare-and-branch")
+ DECLARE_HYDROGEN_ACCESSOR(StringCompareAndBranch)
+
+ Token::Value op() const { return hydrogen()->token(); }
+
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+};
+
+
+// Truncating conversion from a tagged value to an int32.
+class LTaggedToI V8_FINAL : public LTemplateInstruction<1, 1, 2> {
+ public:
+ explicit LTaggedToI(LOperand* value, LOperand* temp1, LOperand* temp2) {
+ inputs_[0] = value;
+ temps_[0] = temp1;
+ temps_[1] = temp2;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+ LOperand* temp1() { return temps_[0]; }
+ LOperand* temp2() { return temps_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(TaggedToI, "tagged-to-i")
+ DECLARE_HYDROGEN_ACCESSOR(Change)
+
+ bool truncating() { return hydrogen()->CanTruncateToInt32(); }
+};
+
+
+class LShiftI V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+ public:
+ LShiftI(Token::Value op, LOperand* left, LOperand* right, bool can_deopt)
+ : op_(op), can_deopt_(can_deopt) {
+ inputs_[0] = left;
+ inputs_[1] = right;
+ }
+
+ Token::Value op() const { return op_; }
+ LOperand* left() { return inputs_[0]; }
+ LOperand* right() { return inputs_[1]; }
+ bool can_deopt() const { return can_deopt_; }
+
+ DECLARE_CONCRETE_INSTRUCTION(ShiftI, "shift-i")
+
+ private:
+ Token::Value op_;
+ bool can_deopt_;
+};
+
+
+class LShiftS V8_FINAL : public LTemplateInstruction<1, 2, 1> {
+ public:
+ LShiftS(Token::Value op, LOperand* left, LOperand* right, LOperand* temp,
+ bool can_deopt) : op_(op), can_deopt_(can_deopt) {
+ inputs_[0] = left;
+ inputs_[1] = right;
+ temps_[0] = temp;
+ }
+
+ Token::Value op() const { return op_; }
+ LOperand* left() { return inputs_[0]; }
+ LOperand* right() { return inputs_[1]; }
+ LOperand* temp() { return temps_[0]; }
+ bool can_deopt() const { return can_deopt_; }
+
+ DECLARE_CONCRETE_INSTRUCTION(ShiftS, "shift-s")
+
+ private:
+ Token::Value op_;
+ bool can_deopt_;
+};
+
+
+class LStoreCodeEntry V8_FINAL: public LTemplateInstruction<0, 2, 1> {
+ public:
+ LStoreCodeEntry(LOperand* function, LOperand* code_object,
+ LOperand* temp) {
+ inputs_[0] = function;
+ inputs_[1] = code_object;
+ temps_[0] = temp;
+ }
+
+ LOperand* function() { return inputs_[0]; }
+ LOperand* code_object() { return inputs_[1]; }
+ LOperand* temp() { return temps_[0]; }
+
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+
+ DECLARE_CONCRETE_INSTRUCTION(StoreCodeEntry, "store-code-entry")
+ DECLARE_HYDROGEN_ACCESSOR(StoreCodeEntry)
+};
+
+
+class LStoreContextSlot V8_FINAL : public LTemplateInstruction<0, 2, 1> {
+ public:
+ LStoreContextSlot(LOperand* context, LOperand* value, LOperand* temp) {
+ inputs_[0] = context;
+ inputs_[1] = value;
+ temps_[0] = temp;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+ LOperand* value() { return inputs_[1]; }
+ LOperand* temp() { return temps_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(StoreContextSlot, "store-context-slot")
+ DECLARE_HYDROGEN_ACCESSOR(StoreContextSlot)
+
+ int slot_index() { return hydrogen()->slot_index(); }
+
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+};
+
+
+class LStoreGlobalCell V8_FINAL : public LTemplateInstruction<0, 1, 2> {
+ public:
+ LStoreGlobalCell(LOperand* value, LOperand* temp1, LOperand* temp2) {
+ inputs_[0] = value;
+ temps_[0] = temp1;
+ temps_[1] = temp2;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+ LOperand* temp1() { return temps_[0]; }
+ LOperand* temp2() { return temps_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(StoreGlobalCell, "store-global-cell")
+ DECLARE_HYDROGEN_ACCESSOR(StoreGlobalCell)
+};
+
+
+class LSubI V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+ public:
+ LSubI(LOperand* left, LOperand* right)
+ : shift_(NO_SHIFT), shift_amount_(0) {
+ inputs_[0] = left;
+ inputs_[1] = right;
+ }
+
+ LSubI(LOperand* left, LOperand* right, Shift shift, LOperand* shift_amount)
+ : shift_(shift), shift_amount_(shift_amount) {
+ inputs_[0] = left;
+ inputs_[1] = right;
+ }
+
+ LOperand* left() { return inputs_[0]; }
+ LOperand* right() { return inputs_[1]; }
+
+ Shift shift() const { return shift_; }
+ LOperand* shift_amount() const { return shift_amount_; }
+
+ DECLARE_CONCRETE_INSTRUCTION(SubI, "sub-i")
+ DECLARE_HYDROGEN_ACCESSOR(Sub)
+
+ protected:
+ Shift shift_;
+ LOperand* shift_amount_;
+};
+
+
+class LSubS: public LTemplateInstruction<1, 2, 0> {
+ public:
+ LSubS(LOperand* left, LOperand* right) {
+ inputs_[0] = left;
+ inputs_[1] = right;
+ }
+
+ LOperand* left() { return inputs_[0]; }
+ LOperand* right() { return inputs_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(SubS, "sub-s")
+ DECLARE_HYDROGEN_ACCESSOR(Sub)
+};
+
+
+class LThisFunction V8_FINAL : public LTemplateInstruction<1, 0, 0> {
+ public:
+ DECLARE_CONCRETE_INSTRUCTION(ThisFunction, "this-function")
+ DECLARE_HYDROGEN_ACCESSOR(ThisFunction)
+};
+
+
+class LToFastProperties V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LToFastProperties(LOperand* value) {
+ inputs_[0] = value;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(ToFastProperties, "to-fast-properties")
+ DECLARE_HYDROGEN_ACCESSOR(ToFastProperties)
+};
+
+
+class LTransitionElementsKind V8_FINAL : public LTemplateInstruction<0, 2, 2> {
+ public:
+ LTransitionElementsKind(LOperand* object,
+ LOperand* context,
+ LOperand* temp1,
+ LOperand* temp2) {
+ inputs_[0] = object;
+ inputs_[1] = context;
+ temps_[0] = temp1;
+ temps_[1] = temp2;
+ }
+
+ LOperand* object() { return inputs_[0]; }
+ LOperand* context() { return inputs_[1]; }
+ LOperand* temp1() { return temps_[0]; }
+ LOperand* temp2() { return temps_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(TransitionElementsKind,
+ "transition-elements-kind")
+ DECLARE_HYDROGEN_ACCESSOR(TransitionElementsKind)
+
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+
+ Handle<Map> original_map() { return hydrogen()->original_map().handle(); }
+ Handle<Map> transitioned_map() {
+ return hydrogen()->transitioned_map().handle();
+ }
+ ElementsKind from_kind() const { return hydrogen()->from_kind(); }
+ ElementsKind to_kind() const { return hydrogen()->to_kind(); }
+};
+
+
+class LTrapAllocationMemento V8_FINAL : public LTemplateInstruction<0, 1, 2> {
+ public:
+ LTrapAllocationMemento(LOperand* object, LOperand* temp1, LOperand* temp2) {
+ inputs_[0] = object;
+ temps_[0] = temp1;
+ temps_[1] = temp2;
+ }
+
+ LOperand* object() { return inputs_[0]; }
+ LOperand* temp1() { return temps_[0]; }
+ LOperand* temp2() { return temps_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(TrapAllocationMemento, "trap-allocation-memento")
+};
+
+
+class LTruncateDoubleToIntOrSmi V8_FINAL
+ : public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LTruncateDoubleToIntOrSmi(LOperand* value) {
+ inputs_[0] = value;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(TruncateDoubleToIntOrSmi,
+ "truncate-double-to-int-or-smi")
+ DECLARE_HYDROGEN_ACCESSOR(UnaryOperation)
+
+ bool tag_result() { return hydrogen()->representation().IsSmi(); }
+};
+
+
+class LTypeof V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+ public:
+ LTypeof(LOperand* context, LOperand* value) {
+ inputs_[0] = context;
+ inputs_[1] = value;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+ LOperand* value() { return inputs_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(Typeof, "typeof")
+};
+
+
+class LTypeofIsAndBranch V8_FINAL : public LControlInstruction<1, 2> {
+ public:
+ LTypeofIsAndBranch(LOperand* value, LOperand* temp1, LOperand* temp2) {
+ inputs_[0] = value;
+ temps_[0] = temp1;
+ temps_[1] = temp2;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+ LOperand* temp1() { return temps_[0]; }
+ LOperand* temp2() { return temps_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(TypeofIsAndBranch, "typeof-is-and-branch")
+ DECLARE_HYDROGEN_ACCESSOR(TypeofIsAndBranch)
+
+ Handle<String> type_literal() const { return hydrogen()->type_literal(); }
+
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+};
+
+
+class LUint32ToDouble V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LUint32ToDouble(LOperand* value) {
+ inputs_[0] = value;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(Uint32ToDouble, "uint32-to-double")
+};
+
+
+class LCheckMapValue V8_FINAL : public LTemplateInstruction<0, 2, 1> {
+ public:
+ LCheckMapValue(LOperand* value, LOperand* map, LOperand* temp) {
+ inputs_[0] = value;
+ inputs_[1] = map;
+ temps_[0] = temp;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+ LOperand* map() { return inputs_[1]; }
+ LOperand* temp() { return temps_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(CheckMapValue, "check-map-value")
+};
+
+
+class LLoadFieldByIndex V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+ public:
+ LLoadFieldByIndex(LOperand* object, LOperand* index) {
+ inputs_[0] = object;
+ inputs_[1] = index;
+ }
+
+ LOperand* object() { return inputs_[0]; }
+ LOperand* index() { return inputs_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(LoadFieldByIndex, "load-field-by-index")
+};
+
+
+class LStoreFrameContext: public LTemplateInstruction<0, 1, 0> {
+ public:
+ explicit LStoreFrameContext(LOperand* context) {
+ inputs_[0] = context;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(StoreFrameContext, "store-frame-context")
+};
+
+
+class LAllocateBlockContext: public LTemplateInstruction<1, 2, 0> {
+ public:
+ LAllocateBlockContext(LOperand* context, LOperand* function) {
+ inputs_[0] = context;
+ inputs_[1] = function;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+ LOperand* function() { return inputs_[1]; }
+
+ Handle<ScopeInfo> scope_info() { return hydrogen()->scope_info(); }
+
+ DECLARE_CONCRETE_INSTRUCTION(AllocateBlockContext, "allocate-block-context")
+ DECLARE_HYDROGEN_ACCESSOR(AllocateBlockContext)
+};
+
+
+class LWrapReceiver V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+ public:
+ LWrapReceiver(LOperand* receiver, LOperand* function) {
+ inputs_[0] = receiver;
+ inputs_[1] = function;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(WrapReceiver, "wrap-receiver")
+ DECLARE_HYDROGEN_ACCESSOR(WrapReceiver)
+
+ LOperand* receiver() { return inputs_[0]; }
+ LOperand* function() { return inputs_[1]; }
+};
+
+
+class LChunkBuilder;
+class LPlatformChunk V8_FINAL : public LChunk {
+ public:
+ LPlatformChunk(CompilationInfo* info, HGraph* graph)
+ : LChunk(info, graph) { }
+
+ int GetNextSpillIndex();
+ LOperand* GetNextSpillSlot(RegisterKind kind);
+};
+
+
+class LChunkBuilder V8_FINAL : public LChunkBuilderBase {
+ public:
+ LChunkBuilder(CompilationInfo* info, HGraph* graph, LAllocator* allocator)
+ : LChunkBuilderBase(graph->zone()),
+ chunk_(NULL),
+ info_(info),
+ graph_(graph),
+ status_(UNUSED),
+ current_instruction_(NULL),
+ current_block_(NULL),
+ allocator_(allocator) { }
+
+ // Build the sequence for the graph.
+ LPlatformChunk* Build();
+
+ // Declare methods that deal with the individual node types.
+#define DECLARE_DO(type) LInstruction* Do##type(H##type* node);
+ HYDROGEN_CONCRETE_INSTRUCTION_LIST(DECLARE_DO)
+#undef DECLARE_DO
+
+ LInstruction* DoDivByPowerOf2I(HDiv* instr);
+ LInstruction* DoDivByConstI(HDiv* instr);
+ LInstruction* DoDivI(HBinaryOperation* instr);
+ LInstruction* DoModByPowerOf2I(HMod* instr);
+ LInstruction* DoModByConstI(HMod* instr);
+ LInstruction* DoModI(HMod* instr);
+ LInstruction* DoFlooringDivByPowerOf2I(HMathFloorOfDiv* instr);
+ LInstruction* DoFlooringDivByConstI(HMathFloorOfDiv* instr);
+ LInstruction* DoFlooringDivI(HMathFloorOfDiv* instr);
+
+ static bool HasMagicNumberForDivision(int32_t divisor);
+
+ private:
+ enum Status {
+ UNUSED,
+ BUILDING,
+ DONE,
+ ABORTED
+ };
+
+ HGraph* graph() const { return graph_; }
+ Isolate* isolate() const { return info_->isolate(); }
+
+ bool is_unused() const { return status_ == UNUSED; }
+ bool is_building() const { return status_ == BUILDING; }
+ bool is_done() const { return status_ == DONE; }
+ bool is_aborted() const { return status_ == ABORTED; }
+
+ int argument_count() const { return argument_count_; }
+ CompilationInfo* info() const { return info_; }
+ Heap* heap() const { return isolate()->heap(); }
+
+ void Abort(BailoutReason reason);
+
+ // Methods for getting operands for Use / Define / Temp.
+ LUnallocated* ToUnallocated(Register reg);
+ LUnallocated* ToUnallocated(DoubleRegister reg);
+
+ // Methods for setting up define-use relationships.
+ MUST_USE_RESULT LOperand* Use(HValue* value, LUnallocated* operand);
+ MUST_USE_RESULT LOperand* UseFixed(HValue* value, Register fixed_register);
+ MUST_USE_RESULT LOperand* UseFixedDouble(HValue* value,
+ DoubleRegister fixed_register);
+
+ // A value that is guaranteed to be allocated to a register.
+ // The operand created by UseRegister is guaranteed to be live until the end
+ // of the instruction. This means that register allocator will not reuse its
+ // register for any other operand inside instruction.
+ MUST_USE_RESULT LOperand* UseRegister(HValue* value);
+
+ // The operand created by UseRegisterAndClobber is guaranteed to be live until
+ // the end of the end of the instruction, and it may also be used as a scratch
+ // register by the instruction implementation.
+ //
+ // This behaves identically to ARM's UseTempRegister. However, it is renamed
+ // to discourage its use in ARM64, since in most cases it is better to
+ // allocate a temporary register for the Lithium instruction.
+ MUST_USE_RESULT LOperand* UseRegisterAndClobber(HValue* value);
+
+ // The operand created by UseRegisterAtStart is guaranteed to be live only at
+ // instruction start. The register allocator is free to assign the same
+ // register to some other operand used inside instruction (i.e. temporary or
+ // output).
+ MUST_USE_RESULT LOperand* UseRegisterAtStart(HValue* value);
+
+ // An input operand in a register or a constant operand.
+ MUST_USE_RESULT LOperand* UseRegisterOrConstant(HValue* value);
+ MUST_USE_RESULT LOperand* UseRegisterOrConstantAtStart(HValue* value);
+
+ // A constant operand.
+ MUST_USE_RESULT LConstantOperand* UseConstant(HValue* value);
+
+ // An input operand in register, stack slot or a constant operand.
+ // Will not be moved to a register even if one is freely available.
+ virtual MUST_USE_RESULT LOperand* UseAny(HValue* value);
+
+ // Temporary operand that must be in a register.
+ MUST_USE_RESULT LUnallocated* TempRegister();
+
+ // Temporary operand that must be in a double register.
+ MUST_USE_RESULT LUnallocated* TempDoubleRegister();
+
+ // Temporary operand that must be in a fixed double register.
+ MUST_USE_RESULT LOperand* FixedTemp(DoubleRegister reg);
+
+ // Methods for setting up define-use relationships.
+ // Return the same instruction that they are passed.
+ LInstruction* Define(LTemplateResultInstruction<1>* instr,
+ LUnallocated* result);
+ LInstruction* DefineAsRegister(LTemplateResultInstruction<1>* instr);
+ LInstruction* DefineAsSpilled(LTemplateResultInstruction<1>* instr,
+ int index);
+
+ LInstruction* DefineSameAsFirst(LTemplateResultInstruction<1>* instr);
+ LInstruction* DefineFixed(LTemplateResultInstruction<1>* instr,
+ Register reg);
+ LInstruction* DefineFixedDouble(LTemplateResultInstruction<1>* instr,
+ DoubleRegister reg);
+
+ enum CanDeoptimize { CAN_DEOPTIMIZE_EAGERLY, CANNOT_DEOPTIMIZE_EAGERLY };
+
+ // By default we assume that instruction sequences generated for calls
+ // cannot deoptimize eagerly and we do not attach environment to this
+ // instruction.
+ LInstruction* MarkAsCall(
+ LInstruction* instr,
+ HInstruction* hinstr,
+ CanDeoptimize can_deoptimize = CANNOT_DEOPTIMIZE_EAGERLY);
+
+ LInstruction* AssignPointerMap(LInstruction* instr);
+ LInstruction* AssignEnvironment(LInstruction* instr);
+
+ void VisitInstruction(HInstruction* current);
+ void AddInstruction(LInstruction* instr, HInstruction* current);
+ void DoBasicBlock(HBasicBlock* block);
+
+ int JSShiftAmountFromHConstant(HValue* constant) {
+ return HConstant::cast(constant)->Integer32Value() & 0x1f;
+ }
+ bool LikelyFitsImmField(HInstruction* instr, int imm) {
+ if (instr->IsAdd() || instr->IsSub()) {
+ return Assembler::IsImmAddSub(imm) || Assembler::IsImmAddSub(-imm);
+ } else {
+ ASSERT(instr->IsBitwise());
+ unsigned unused_n, unused_imm_s, unused_imm_r;
+ return Assembler::IsImmLogical(imm, kWRegSizeInBits,
+ &unused_n, &unused_imm_s, &unused_imm_r);
+ }
+ }
+
+ // Indicates if a sequence of the form
+ // lsl x8, x9, #imm
+ // add x0, x1, x8
+ // can be replaced with:
+ // add x0, x1, x9 LSL #imm
+ // If this is not possible, the function returns NULL. Otherwise it returns a
+ // pointer to the shift instruction that would be optimized away.
+ HBitwiseBinaryOperation* CanTransformToShiftedOp(HValue* val,
+ HValue** left = NULL);
+ // Checks if all uses of the shift operation can optimize it away.
+ bool ShiftCanBeOptimizedAway(HBitwiseBinaryOperation* shift);
+ // Attempts to merge the binary operation and an eventual previous shift
+ // operation into a single operation. Returns the merged instruction on
+ // success, and NULL otherwise.
+ LInstruction* TryDoOpWithShiftedRightOperand(HBinaryOperation* op);
+ LInstruction* DoShiftedBinaryOp(HBinaryOperation* instr,
+ HValue* left,
+ HBitwiseBinaryOperation* shift);
+
+ LInstruction* DoShift(Token::Value op, HBitwiseBinaryOperation* instr);
+ LInstruction* DoArithmeticD(Token::Value op,
+ HArithmeticBinaryOperation* instr);
+ LInstruction* DoArithmeticT(Token::Value op,
+ HBinaryOperation* instr);
+
+ LPlatformChunk* chunk_;
+ CompilationInfo* info_;
+ HGraph* const graph_;
+ Status status_;
+ HInstruction* current_instruction_;
+ HBasicBlock* current_block_;
+ LAllocator* allocator_;
+
+ DISALLOW_COPY_AND_ASSIGN(LChunkBuilder);
+};
+
+#undef DECLARE_HYDROGEN_ACCESSOR
+#undef DECLARE_CONCRETE_INSTRUCTION
+
+} } // namespace v8::internal
+
+#endif // V8_ARM64_LITHIUM_ARM64_H_
diff --git a/chromium/v8/src/arm64/lithium-codegen-arm64.cc b/chromium/v8/src/arm64/lithium-codegen-arm64.cc
new file mode 100644
index 00000000000..29c13ac5833
--- /dev/null
+++ b/chromium/v8/src/arm64/lithium-codegen-arm64.cc
@@ -0,0 +1,6061 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#include "src/arm64/lithium-codegen-arm64.h"
+#include "src/arm64/lithium-gap-resolver-arm64.h"
+#include "src/code-stubs.h"
+#include "src/stub-cache.h"
+#include "src/hydrogen-osr.h"
+
+namespace v8 {
+namespace internal {
+
+
+class SafepointGenerator V8_FINAL : public CallWrapper {
+ public:
+ SafepointGenerator(LCodeGen* codegen,
+ LPointerMap* pointers,
+ Safepoint::DeoptMode mode)
+ : codegen_(codegen),
+ pointers_(pointers),
+ deopt_mode_(mode) { }
+ virtual ~SafepointGenerator() { }
+
+ virtual void BeforeCall(int call_size) const { }
+
+ virtual void AfterCall() const {
+ codegen_->RecordSafepoint(pointers_, deopt_mode_);
+ }
+
+ private:
+ LCodeGen* codegen_;
+ LPointerMap* pointers_;
+ Safepoint::DeoptMode deopt_mode_;
+};
+
+
+#define __ masm()->
+
+// Emit code to branch if the given condition holds.
+// The code generated here doesn't modify the flags and they must have
+// been set by some prior instructions.
+//
+// The EmitInverted function simply inverts the condition.
+class BranchOnCondition : public BranchGenerator {
+ public:
+ BranchOnCondition(LCodeGen* codegen, Condition cond)
+ : BranchGenerator(codegen),
+ cond_(cond) { }
+
+ virtual void Emit(Label* label) const {
+ __ B(cond_, label);
+ }
+
+ virtual void EmitInverted(Label* label) const {
+ if (cond_ != al) {
+ __ B(NegateCondition(cond_), label);
+ }
+ }
+
+ private:
+ Condition cond_;
+};
+
+
+// Emit code to compare lhs and rhs and branch if the condition holds.
+// This uses MacroAssembler's CompareAndBranch function so it will handle
+// converting the comparison to Cbz/Cbnz if the right-hand side is 0.
+//
+// EmitInverted still compares the two operands but inverts the condition.
+class CompareAndBranch : public BranchGenerator {
+ public:
+ CompareAndBranch(LCodeGen* codegen,
+ Condition cond,
+ const Register& lhs,
+ const Operand& rhs)
+ : BranchGenerator(codegen),
+ cond_(cond),
+ lhs_(lhs),
+ rhs_(rhs) { }
+
+ virtual void Emit(Label* label) const {
+ __ CompareAndBranch(lhs_, rhs_, cond_, label);
+ }
+
+ virtual void EmitInverted(Label* label) const {
+ __ CompareAndBranch(lhs_, rhs_, NegateCondition(cond_), label);
+ }
+
+ private:
+ Condition cond_;
+ const Register& lhs_;
+ const Operand& rhs_;
+};
+
+
+// Test the input with the given mask and branch if the condition holds.
+// If the condition is 'eq' or 'ne' this will use MacroAssembler's
+// TestAndBranchIfAllClear and TestAndBranchIfAnySet so it will handle the
+// conversion to Tbz/Tbnz when possible.
+class TestAndBranch : public BranchGenerator {
+ public:
+ TestAndBranch(LCodeGen* codegen,
+ Condition cond,
+ const Register& value,
+ uint64_t mask)
+ : BranchGenerator(codegen),
+ cond_(cond),
+ value_(value),
+ mask_(mask) { }
+
+ virtual void Emit(Label* label) const {
+ switch (cond_) {
+ case eq:
+ __ TestAndBranchIfAllClear(value_, mask_, label);
+ break;
+ case ne:
+ __ TestAndBranchIfAnySet(value_, mask_, label);
+ break;
+ default:
+ __ Tst(value_, mask_);
+ __ B(cond_, label);
+ }
+ }
+
+ virtual void EmitInverted(Label* label) const {
+ // The inverse of "all clear" is "any set" and vice versa.
+ switch (cond_) {
+ case eq:
+ __ TestAndBranchIfAnySet(value_, mask_, label);
+ break;
+ case ne:
+ __ TestAndBranchIfAllClear(value_, mask_, label);
+ break;
+ default:
+ __ Tst(value_, mask_);
+ __ B(NegateCondition(cond_), label);
+ }
+ }
+
+ private:
+ Condition cond_;
+ const Register& value_;
+ uint64_t mask_;
+};
+
+
+// Test the input and branch if it is non-zero and not a NaN.
+class BranchIfNonZeroNumber : public BranchGenerator {
+ public:
+ BranchIfNonZeroNumber(LCodeGen* codegen, const FPRegister& value,
+ const FPRegister& scratch)
+ : BranchGenerator(codegen), value_(value), scratch_(scratch) { }
+
+ virtual void Emit(Label* label) const {
+ __ Fabs(scratch_, value_);
+ // Compare with 0.0. Because scratch_ is positive, the result can be one of
+ // nZCv (equal), nzCv (greater) or nzCV (unordered).
+ __ Fcmp(scratch_, 0.0);
+ __ B(gt, label);
+ }
+
+ virtual void EmitInverted(Label* label) const {
+ __ Fabs(scratch_, value_);
+ __ Fcmp(scratch_, 0.0);
+ __ B(le, label);
+ }
+
+ private:
+ const FPRegister& value_;
+ const FPRegister& scratch_;
+};
+
+
+// Test the input and branch if it is a heap number.
+class BranchIfHeapNumber : public BranchGenerator {
+ public:
+ BranchIfHeapNumber(LCodeGen* codegen, const Register& value)
+ : BranchGenerator(codegen), value_(value) { }
+
+ virtual void Emit(Label* label) const {
+ __ JumpIfHeapNumber(value_, label);
+ }
+
+ virtual void EmitInverted(Label* label) const {
+ __ JumpIfNotHeapNumber(value_, label);
+ }
+
+ private:
+ const Register& value_;
+};
+
+
+// Test the input and branch if it is the specified root value.
+class BranchIfRoot : public BranchGenerator {
+ public:
+ BranchIfRoot(LCodeGen* codegen, const Register& value,
+ Heap::RootListIndex index)
+ : BranchGenerator(codegen), value_(value), index_(index) { }
+
+ virtual void Emit(Label* label) const {
+ __ JumpIfRoot(value_, index_, label);
+ }
+
+ virtual void EmitInverted(Label* label) const {
+ __ JumpIfNotRoot(value_, index_, label);
+ }
+
+ private:
+ const Register& value_;
+ const Heap::RootListIndex index_;
+};
+
+
+void LCodeGen::WriteTranslation(LEnvironment* environment,
+ Translation* translation) {
+ if (environment == NULL) return;
+
+ // The translation includes one command per value in the environment.
+ int translation_size = environment->translation_size();
+ // The output frame height does not include the parameters.
+ int height = translation_size - environment->parameter_count();
+
+ WriteTranslation(environment->outer(), translation);
+ bool has_closure_id = !info()->closure().is_null() &&
+ !info()->closure().is_identical_to(environment->closure());
+ int closure_id = has_closure_id
+ ? DefineDeoptimizationLiteral(environment->closure())
+ : Translation::kSelfLiteralId;
+
+ switch (environment->frame_type()) {
+ case JS_FUNCTION:
+ translation->BeginJSFrame(environment->ast_id(), closure_id, height);
+ break;
+ case JS_CONSTRUCT:
+ translation->BeginConstructStubFrame(closure_id, translation_size);
+ break;
+ case JS_GETTER:
+ ASSERT(translation_size == 1);
+ ASSERT(height == 0);
+ translation->BeginGetterStubFrame(closure_id);
+ break;
+ case JS_SETTER:
+ ASSERT(translation_size == 2);
+ ASSERT(height == 0);
+ translation->BeginSetterStubFrame(closure_id);
+ break;
+ case STUB:
+ translation->BeginCompiledStubFrame();
+ break;
+ case ARGUMENTS_ADAPTOR:
+ translation->BeginArgumentsAdaptorFrame(closure_id, translation_size);
+ break;
+ default:
+ UNREACHABLE();
+ }
+
+ int object_index = 0;
+ int dematerialized_index = 0;
+ for (int i = 0; i < translation_size; ++i) {
+ LOperand* value = environment->values()->at(i);
+
+ AddToTranslation(environment,
+ translation,
+ value,
+ environment->HasTaggedValueAt(i),
+ environment->HasUint32ValueAt(i),
+ &object_index,
+ &dematerialized_index);
+ }
+}
+
+
+void LCodeGen::AddToTranslation(LEnvironment* environment,
+ Translation* translation,
+ LOperand* op,
+ bool is_tagged,
+ bool is_uint32,
+ int* object_index_pointer,
+ int* dematerialized_index_pointer) {
+ if (op == LEnvironment::materialization_marker()) {
+ int object_index = (*object_index_pointer)++;
+ if (environment->ObjectIsDuplicateAt(object_index)) {
+ int dupe_of = environment->ObjectDuplicateOfAt(object_index);
+ translation->DuplicateObject(dupe_of);
+ return;
+ }
+ int object_length = environment->ObjectLengthAt(object_index);
+ if (environment->ObjectIsArgumentsAt(object_index)) {
+ translation->BeginArgumentsObject(object_length);
+ } else {
+ translation->BeginCapturedObject(object_length);
+ }
+ int dematerialized_index = *dematerialized_index_pointer;
+ int env_offset = environment->translation_size() + dematerialized_index;
+ *dematerialized_index_pointer += object_length;
+ for (int i = 0; i < object_length; ++i) {
+ LOperand* value = environment->values()->at(env_offset + i);
+ AddToTranslation(environment,
+ translation,
+ value,
+ environment->HasTaggedValueAt(env_offset + i),
+ environment->HasUint32ValueAt(env_offset + i),
+ object_index_pointer,
+ dematerialized_index_pointer);
+ }
+ return;
+ }
+
+ if (op->IsStackSlot()) {
+ if (is_tagged) {
+ translation->StoreStackSlot(op->index());
+ } else if (is_uint32) {
+ translation->StoreUint32StackSlot(op->index());
+ } else {
+ translation->StoreInt32StackSlot(op->index());
+ }
+ } else if (op->IsDoubleStackSlot()) {
+ translation->StoreDoubleStackSlot(op->index());
+ } else if (op->IsRegister()) {
+ Register reg = ToRegister(op);
+ if (is_tagged) {
+ translation->StoreRegister(reg);
+ } else if (is_uint32) {
+ translation->StoreUint32Register(reg);
+ } else {
+ translation->StoreInt32Register(reg);
+ }
+ } else if (op->IsDoubleRegister()) {
+ DoubleRegister reg = ToDoubleRegister(op);
+ translation->StoreDoubleRegister(reg);
+ } else if (op->IsConstantOperand()) {
+ HConstant* constant = chunk()->LookupConstant(LConstantOperand::cast(op));
+ int src_index = DefineDeoptimizationLiteral(constant->handle(isolate()));
+ translation->StoreLiteral(src_index);
+ } else {
+ UNREACHABLE();
+ }
+}
+
+
+int LCodeGen::DefineDeoptimizationLiteral(Handle<Object> literal) {
+ int result = deoptimization_literals_.length();
+ for (int i = 0; i < deoptimization_literals_.length(); ++i) {
+ if (deoptimization_literals_[i].is_identical_to(literal)) return i;
+ }
+ deoptimization_literals_.Add(literal, zone());
+ return result;
+}
+
+
+void LCodeGen::RegisterEnvironmentForDeoptimization(LEnvironment* environment,
+ Safepoint::DeoptMode mode) {
+ environment->set_has_been_used();
+ if (!environment->HasBeenRegistered()) {
+ int frame_count = 0;
+ int jsframe_count = 0;
+ for (LEnvironment* e = environment; e != NULL; e = e->outer()) {
+ ++frame_count;
+ if (e->frame_type() == JS_FUNCTION) {
+ ++jsframe_count;
+ }
+ }
+ Translation translation(&translations_, frame_count, jsframe_count, zone());
+ WriteTranslation(environment, &translation);
+ int deoptimization_index = deoptimizations_.length();
+ int pc_offset = masm()->pc_offset();
+ environment->Register(deoptimization_index,
+ translation.index(),
+ (mode == Safepoint::kLazyDeopt) ? pc_offset : -1);
+ deoptimizations_.Add(environment, zone());
+ }
+}
+
+
+void LCodeGen::CallCode(Handle<Code> code,
+ RelocInfo::Mode mode,
+ LInstruction* instr) {
+ CallCodeGeneric(code, mode, instr, RECORD_SIMPLE_SAFEPOINT);
+}
+
+
+void LCodeGen::CallCodeGeneric(Handle<Code> code,
+ RelocInfo::Mode mode,
+ LInstruction* instr,
+ SafepointMode safepoint_mode) {
+ ASSERT(instr != NULL);
+
+ Assembler::BlockPoolsScope scope(masm_);
+ __ Call(code, mode);
+ RecordSafepointWithLazyDeopt(instr, safepoint_mode);
+
+ if ((code->kind() == Code::BINARY_OP_IC) ||
+ (code->kind() == Code::COMPARE_IC)) {
+ // Signal that we don't inline smi code before these stubs in the
+ // optimizing code generator.
+ InlineSmiCheckInfo::EmitNotInlined(masm());
+ }
+}
+
+
+void LCodeGen::DoCallFunction(LCallFunction* instr) {
+ ASSERT(ToRegister(instr->context()).is(cp));
+ ASSERT(ToRegister(instr->function()).Is(x1));
+ ASSERT(ToRegister(instr->result()).Is(x0));
+
+ int arity = instr->arity();
+ CallFunctionStub stub(isolate(), arity, instr->hydrogen()->function_flags());
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+ after_push_argument_ = false;
+}
+
+
+void LCodeGen::DoCallNew(LCallNew* instr) {
+ ASSERT(ToRegister(instr->context()).is(cp));
+ ASSERT(instr->IsMarkedAsCall());
+ ASSERT(ToRegister(instr->constructor()).is(x1));
+
+ __ Mov(x0, instr->arity());
+ // No cell in x2 for construct type feedback in optimized code.
+ __ LoadRoot(x2, Heap::kUndefinedValueRootIndex);
+
+ CallConstructStub stub(isolate(), NO_CALL_CONSTRUCTOR_FLAGS);
+ CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
+ after_push_argument_ = false;
+
+ ASSERT(ToRegister(instr->result()).is(x0));
+}
+
+
+void LCodeGen::DoCallNewArray(LCallNewArray* instr) {
+ ASSERT(instr->IsMarkedAsCall());
+ ASSERT(ToRegister(instr->context()).is(cp));
+ ASSERT(ToRegister(instr->constructor()).is(x1));
+
+ __ Mov(x0, Operand(instr->arity()));
+ __ LoadRoot(x2, Heap::kUndefinedValueRootIndex);
+
+ ElementsKind kind = instr->hydrogen()->elements_kind();
+ AllocationSiteOverrideMode override_mode =
+ (AllocationSite::GetMode(kind) == TRACK_ALLOCATION_SITE)
+ ? DISABLE_ALLOCATION_SITES
+ : DONT_OVERRIDE;
+
+ if (instr->arity() == 0) {
+ ArrayNoArgumentConstructorStub stub(isolate(), kind, override_mode);
+ CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
+ } else if (instr->arity() == 1) {
+ Label done;
+ if (IsFastPackedElementsKind(kind)) {
+ Label packed_case;
+
+ // We might need to create a holey array; look at the first argument.
+ __ Peek(x10, 0);
+ __ Cbz(x10, &packed_case);
+
+ ElementsKind holey_kind = GetHoleyElementsKind(kind);
+ ArraySingleArgumentConstructorStub stub(isolate(),
+ holey_kind,
+ override_mode);
+ CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
+ __ B(&done);
+ __ Bind(&packed_case);
+ }
+
+ ArraySingleArgumentConstructorStub stub(isolate(), kind, override_mode);
+ CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
+ __ Bind(&done);
+ } else {
+ ArrayNArgumentsConstructorStub stub(isolate(), kind, override_mode);
+ CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
+ }
+ after_push_argument_ = false;
+
+ ASSERT(ToRegister(instr->result()).is(x0));
+}
+
+
+void LCodeGen::CallRuntime(const Runtime::Function* function,
+ int num_arguments,
+ LInstruction* instr,
+ SaveFPRegsMode save_doubles) {
+ ASSERT(instr != NULL);
+
+ __ CallRuntime(function, num_arguments, save_doubles);
+
+ RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
+}
+
+
+void LCodeGen::LoadContextFromDeferred(LOperand* context) {
+ if (context->IsRegister()) {
+ __ Mov(cp, ToRegister(context));
+ } else if (context->IsStackSlot()) {
+ __ Ldr(cp, ToMemOperand(context, kMustUseFramePointer));
+ } else if (context->IsConstantOperand()) {
+ HConstant* constant =
+ chunk_->LookupConstant(LConstantOperand::cast(context));
+ __ LoadHeapObject(cp,
+ Handle<HeapObject>::cast(constant->handle(isolate())));
+ } else {
+ UNREACHABLE();
+ }
+}
+
+
+void LCodeGen::CallRuntimeFromDeferred(Runtime::FunctionId id,
+ int argc,
+ LInstruction* instr,
+ LOperand* context) {
+ LoadContextFromDeferred(context);
+ __ CallRuntimeSaveDoubles(id);
+ RecordSafepointWithRegisters(
+ instr->pointer_map(), argc, Safepoint::kNoLazyDeopt);
+}
+
+
+void LCodeGen::RecordAndWritePosition(int position) {
+ if (position == RelocInfo::kNoPosition) return;
+ masm()->positions_recorder()->RecordPosition(position);
+ masm()->positions_recorder()->WriteRecordedPositions();
+}
+
+
+void LCodeGen::RecordSafepointWithLazyDeopt(LInstruction* instr,
+ SafepointMode safepoint_mode) {
+ if (safepoint_mode == RECORD_SIMPLE_SAFEPOINT) {
+ RecordSafepoint(instr->pointer_map(), Safepoint::kLazyDeopt);
+ } else {
+ ASSERT(safepoint_mode == RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
+ RecordSafepointWithRegisters(
+ instr->pointer_map(), 0, Safepoint::kLazyDeopt);
+ }
+}
+
+
+void LCodeGen::RecordSafepoint(LPointerMap* pointers,
+ Safepoint::Kind kind,
+ int arguments,
+ Safepoint::DeoptMode deopt_mode) {
+ ASSERT(expected_safepoint_kind_ == kind);
+
+ const ZoneList<LOperand*>* operands = pointers->GetNormalizedOperands();
+ Safepoint safepoint = safepoints_.DefineSafepoint(
+ masm(), kind, arguments, deopt_mode);
+
+ for (int i = 0; i < operands->length(); i++) {
+ LOperand* pointer = operands->at(i);
+ if (pointer->IsStackSlot()) {
+ safepoint.DefinePointerSlot(pointer->index(), zone());
+ } else if (pointer->IsRegister() && (kind & Safepoint::kWithRegisters)) {
+ safepoint.DefinePointerRegister(ToRegister(pointer), zone());
+ }
+ }
+
+ if (kind & Safepoint::kWithRegisters) {
+ // Register cp always contains a pointer to the context.
+ safepoint.DefinePointerRegister(cp, zone());
+ }
+}
+
+void LCodeGen::RecordSafepoint(LPointerMap* pointers,
+ Safepoint::DeoptMode deopt_mode) {
+ RecordSafepoint(pointers, Safepoint::kSimple, 0, deopt_mode);
+}
+
+
+void LCodeGen::RecordSafepoint(Safepoint::DeoptMode deopt_mode) {
+ LPointerMap empty_pointers(zone());
+ RecordSafepoint(&empty_pointers, deopt_mode);
+}
+
+
+void LCodeGen::RecordSafepointWithRegisters(LPointerMap* pointers,
+ int arguments,
+ Safepoint::DeoptMode deopt_mode) {
+ RecordSafepoint(pointers, Safepoint::kWithRegisters, arguments, deopt_mode);
+}
+
+
+void LCodeGen::RecordSafepointWithRegistersAndDoubles(
+ LPointerMap* pointers, int arguments, Safepoint::DeoptMode deopt_mode) {
+ RecordSafepoint(
+ pointers, Safepoint::kWithRegistersAndDoubles, arguments, deopt_mode);
+}
+
+
+bool LCodeGen::GenerateCode() {
+ LPhase phase("Z_Code generation", chunk());
+ ASSERT(is_unused());
+ status_ = GENERATING;
+
+ // Open a frame scope to indicate that there is a frame on the stack. The
+ // NONE indicates that the scope shouldn't actually generate code to set up
+ // the frame (that is done in GeneratePrologue).
+ FrameScope frame_scope(masm_, StackFrame::NONE);
+
+ return GeneratePrologue() &&
+ GenerateBody() &&
+ GenerateDeferredCode() &&
+ GenerateDeoptJumpTable() &&
+ GenerateSafepointTable();
+}
+
+
+void LCodeGen::SaveCallerDoubles() {
+ ASSERT(info()->saves_caller_doubles());
+ ASSERT(NeedsEagerFrame());
+ Comment(";;; Save clobbered callee double registers");
+ BitVector* doubles = chunk()->allocated_double_registers();
+ BitVector::Iterator iterator(doubles);
+ int count = 0;
+ while (!iterator.Done()) {
+ // TODO(all): Is this supposed to save just the callee-saved doubles? It
+ // looks like it's saving all of them.
+ FPRegister value = FPRegister::FromAllocationIndex(iterator.Current());
+ __ Poke(value, count * kDoubleSize);
+ iterator.Advance();
+ count++;
+ }
+}
+
+
+void LCodeGen::RestoreCallerDoubles() {
+ ASSERT(info()->saves_caller_doubles());
+ ASSERT(NeedsEagerFrame());
+ Comment(";;; Restore clobbered callee double registers");
+ BitVector* doubles = chunk()->allocated_double_registers();
+ BitVector::Iterator iterator(doubles);
+ int count = 0;
+ while (!iterator.Done()) {
+ // TODO(all): Is this supposed to restore just the callee-saved doubles? It
+ // looks like it's restoring all of them.
+ FPRegister value = FPRegister::FromAllocationIndex(iterator.Current());
+ __ Peek(value, count * kDoubleSize);
+ iterator.Advance();
+ count++;
+ }
+}
+
+
+bool LCodeGen::GeneratePrologue() {
+ ASSERT(is_generating());
+
+ if (info()->IsOptimizing()) {
+ ProfileEntryHookStub::MaybeCallEntryHook(masm_);
+
+ // TODO(all): Add support for stop_t FLAG in DEBUG mode.
+
+ // Sloppy mode functions and builtins need to replace the receiver with the
+ // global proxy when called as functions (without an explicit receiver
+ // object).
+ if (info_->this_has_uses() &&
+ info_->strict_mode() == SLOPPY &&
+ !info_->is_native()) {
+ Label ok;
+ int receiver_offset = info_->scope()->num_parameters() * kXRegSize;
+ __ Peek(x10, receiver_offset);
+ __ JumpIfNotRoot(x10, Heap::kUndefinedValueRootIndex, &ok);
+
+ __ Ldr(x10, GlobalObjectMemOperand());
+ __ Ldr(x10, FieldMemOperand(x10, GlobalObject::kGlobalReceiverOffset));
+ __ Poke(x10, receiver_offset);
+
+ __ Bind(&ok);
+ }
+ }
+
+ ASSERT(__ StackPointer().Is(jssp));
+ info()->set_prologue_offset(masm_->pc_offset());
+ if (NeedsEagerFrame()) {
+ if (info()->IsStub()) {
+ __ StubPrologue();
+ } else {
+ __ Prologue(info()->IsCodePreAgingActive());
+ }
+ frame_is_built_ = true;
+ info_->AddNoFrameRange(0, masm_->pc_offset());
+ }
+
+ // Reserve space for the stack slots needed by the code.
+ int slots = GetStackSlotCount();
+ if (slots > 0) {
+ __ Claim(slots, kPointerSize);
+ }
+
+ if (info()->saves_caller_doubles()) {
+ SaveCallerDoubles();
+ }
+
+ // Allocate a local context if needed.
+ int heap_slots = info()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
+ if (heap_slots > 0) {
+ Comment(";;; Allocate local context");
+ bool need_write_barrier = true;
+ // Argument to NewContext is the function, which is in x1.
+ if (heap_slots <= FastNewContextStub::kMaximumSlots) {
+ FastNewContextStub stub(isolate(), heap_slots);
+ __ CallStub(&stub);
+ // Result of FastNewContextStub is always in new space.
+ need_write_barrier = false;
+ } else {
+ __ Push(x1);
+ __ CallRuntime(Runtime::kHiddenNewFunctionContext, 1);
+ }
+ RecordSafepoint(Safepoint::kNoLazyDeopt);
+ // Context is returned in x0. It replaces the context passed to us. It's
+ // saved in the stack and kept live in cp.
+ __ Mov(cp, x0);
+ __ Str(x0, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ // Copy any necessary parameters into the context.
+ int num_parameters = scope()->num_parameters();
+ for (int i = 0; i < num_parameters; i++) {
+ Variable* var = scope()->parameter(i);
+ if (var->IsContextSlot()) {
+ Register value = x0;
+ Register scratch = x3;
+
+ int parameter_offset = StandardFrameConstants::kCallerSPOffset +
+ (num_parameters - 1 - i) * kPointerSize;
+ // Load parameter from stack.
+ __ Ldr(value, MemOperand(fp, parameter_offset));
+ // Store it in the context.
+ MemOperand target = ContextMemOperand(cp, var->index());
+ __ Str(value, target);
+ // Update the write barrier. This clobbers value and scratch.
+ if (need_write_barrier) {
+ __ RecordWriteContextSlot(cp, target.offset(), value, scratch,
+ GetLinkRegisterState(), kSaveFPRegs);
+ } else if (FLAG_debug_code) {
+ Label done;
+ __ JumpIfInNewSpace(cp, &done);
+ __ Abort(kExpectedNewSpaceObject);
+ __ bind(&done);
+ }
+ }
+ }
+ Comment(";;; End allocate local context");
+ }
+
+ // Trace the call.
+ if (FLAG_trace && info()->IsOptimizing()) {
+ // We have not executed any compiled code yet, so cp still holds the
+ // incoming context.
+ __ CallRuntime(Runtime::kTraceEnter, 0);
+ }
+
+ return !is_aborted();
+}
+
+
+void LCodeGen::GenerateOsrPrologue() {
+ // Generate the OSR entry prologue at the first unknown OSR value, or if there
+ // are none, at the OSR entrypoint instruction.
+ if (osr_pc_offset_ >= 0) return;
+
+ osr_pc_offset_ = masm()->pc_offset();
+
+ // Adjust the frame size, subsuming the unoptimized frame into the
+ // optimized frame.
+ int slots = GetStackSlotCount() - graph()->osr()->UnoptimizedFrameSlots();
+ ASSERT(slots >= 0);
+ __ Claim(slots);
+}
+
+
+void LCodeGen::GenerateBodyInstructionPre(LInstruction* instr) {
+ if (instr->IsCall()) {
+ EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
+ }
+ if (!instr->IsLazyBailout() && !instr->IsGap()) {
+ safepoints_.BumpLastLazySafepointIndex();
+ }
+}
+
+
+bool LCodeGen::GenerateDeferredCode() {
+ ASSERT(is_generating());
+ if (deferred_.length() > 0) {
+ for (int i = 0; !is_aborted() && (i < deferred_.length()); i++) {
+ LDeferredCode* code = deferred_[i];
+
+ HValue* value =
+ instructions_->at(code->instruction_index())->hydrogen_value();
+ RecordAndWritePosition(
+ chunk()->graph()->SourcePositionToScriptPosition(value->position()));
+
+ Comment(";;; <@%d,#%d> "
+ "-------------------- Deferred %s --------------------",
+ code->instruction_index(),
+ code->instr()->hydrogen_value()->id(),
+ code->instr()->Mnemonic());
+
+ __ Bind(code->entry());
+
+ if (NeedsDeferredFrame()) {
+ Comment(";;; Build frame");
+ ASSERT(!frame_is_built_);
+ ASSERT(info()->IsStub());
+ frame_is_built_ = true;
+ __ Push(lr, fp, cp);
+ __ Mov(fp, Smi::FromInt(StackFrame::STUB));
+ __ Push(fp);
+ __ Add(fp, __ StackPointer(),
+ StandardFrameConstants::kFixedFrameSizeFromFp);
+ Comment(";;; Deferred code");
+ }
+
+ code->Generate();
+
+ if (NeedsDeferredFrame()) {
+ Comment(";;; Destroy frame");
+ ASSERT(frame_is_built_);
+ __ Pop(xzr, cp, fp, lr);
+ frame_is_built_ = false;
+ }
+
+ __ B(code->exit());
+ }
+ }
+
+ // Force constant pool emission at the end of the deferred code to make
+ // sure that no constant pools are emitted after deferred code because
+ // deferred code generation is the last step which generates code. The two
+ // following steps will only output data used by crakshaft.
+ masm()->CheckConstPool(true, false);
+
+ return !is_aborted();
+}
+
+
+bool LCodeGen::GenerateDeoptJumpTable() {
+ Label needs_frame, restore_caller_doubles, call_deopt_entry;
+
+ if (deopt_jump_table_.length() > 0) {
+ Comment(";;; -------------------- Jump table --------------------");
+ Address base = deopt_jump_table_[0]->address;
+
+ UseScratchRegisterScope temps(masm());
+ Register entry_offset = temps.AcquireX();
+
+ int length = deopt_jump_table_.length();
+ for (int i = 0; i < length; i++) {
+ __ Bind(&deopt_jump_table_[i]->label);
+
+ Deoptimizer::BailoutType type = deopt_jump_table_[i]->bailout_type;
+ Address entry = deopt_jump_table_[i]->address;
+ int id = Deoptimizer::GetDeoptimizationId(isolate(), entry, type);
+ if (id == Deoptimizer::kNotDeoptimizationEntry) {
+ Comment(";;; jump table entry %d.", i);
+ } else {
+ Comment(";;; jump table entry %d: deoptimization bailout %d.", i, id);
+ }
+
+ // Second-level deopt table entries are contiguous and small, so instead
+ // of loading the full, absolute address of each one, load the base
+ // address and add an immediate offset.
+ __ Mov(entry_offset, entry - base);
+
+ // The last entry can fall through into `call_deopt_entry`, avoiding a
+ // branch.
+ bool last_entry = (i + 1) == length;
+
+ if (deopt_jump_table_[i]->needs_frame) {
+ ASSERT(!info()->saves_caller_doubles());
+ if (!needs_frame.is_bound()) {
+ // This variant of deopt can only be used with stubs. Since we don't
+ // have a function pointer to install in the stack frame that we're
+ // building, install a special marker there instead.
+ ASSERT(info()->IsStub());
+
+ UseScratchRegisterScope temps(masm());
+ Register stub_marker = temps.AcquireX();
+ __ Bind(&needs_frame);
+ __ Mov(stub_marker, Smi::FromInt(StackFrame::STUB));
+ __ Push(lr, fp, cp, stub_marker);
+ __ Add(fp, __ StackPointer(), 2 * kPointerSize);
+ if (!last_entry) __ B(&call_deopt_entry);
+ } else {
+ // Reuse the existing needs_frame code.
+ __ B(&needs_frame);
+ }
+ } else if (info()->saves_caller_doubles()) {
+ ASSERT(info()->IsStub());
+ if (!restore_caller_doubles.is_bound()) {
+ __ Bind(&restore_caller_doubles);
+ RestoreCallerDoubles();
+ if (!last_entry) __ B(&call_deopt_entry);
+ } else {
+ // Reuse the existing restore_caller_doubles code.
+ __ B(&restore_caller_doubles);
+ }
+ } else {
+ // There is nothing special to do, so just continue to the second-level
+ // table.
+ if (!last_entry) __ B(&call_deopt_entry);
+ }
+
+ masm()->CheckConstPool(false, last_entry);
+ }
+
+ // Generate common code for calling the second-level deopt table.
+ Register deopt_entry = temps.AcquireX();
+ __ Bind(&call_deopt_entry);
+ __ Mov(deopt_entry, Operand(reinterpret_cast<uint64_t>(base),
+ RelocInfo::RUNTIME_ENTRY));
+ __ Add(deopt_entry, deopt_entry, entry_offset);
+ __ Call(deopt_entry);
+ }
+
+ // Force constant pool emission at the end of the deopt jump table to make
+ // sure that no constant pools are emitted after.
+ masm()->CheckConstPool(true, false);
+
+ // The deoptimization jump table is the last part of the instruction
+ // sequence. Mark the generated code as done unless we bailed out.
+ if (!is_aborted()) status_ = DONE;
+ return !is_aborted();
+}
+
+
+bool LCodeGen::GenerateSafepointTable() {
+ ASSERT(is_done());
+ // We do not know how much data will be emitted for the safepoint table, so
+ // force emission of the veneer pool.
+ masm()->CheckVeneerPool(true, true);
+ safepoints_.Emit(masm(), GetStackSlotCount());
+ return !is_aborted();
+}
+
+
+void LCodeGen::FinishCode(Handle<Code> code) {
+ ASSERT(is_done());
+ code->set_stack_slots(GetStackSlotCount());
+ code->set_safepoint_table_offset(safepoints_.GetCodeOffset());
+ if (code->is_optimized_code()) RegisterWeakObjectsInOptimizedCode(code);
+ PopulateDeoptimizationData(code);
+}
+
+
+void LCodeGen::PopulateDeoptimizationData(Handle<Code> code) {
+ int length = deoptimizations_.length();
+ if (length == 0) return;
+
+ Handle<DeoptimizationInputData> data =
+ DeoptimizationInputData::New(isolate(), length, TENURED);
+
+ Handle<ByteArray> translations =
+ translations_.CreateByteArray(isolate()->factory());
+ data->SetTranslationByteArray(*translations);
+ data->SetInlinedFunctionCount(Smi::FromInt(inlined_function_count_));
+ data->SetOptimizationId(Smi::FromInt(info_->optimization_id()));
+ if (info_->IsOptimizing()) {
+ // Reference to shared function info does not change between phases.
+ AllowDeferredHandleDereference allow_handle_dereference;
+ data->SetSharedFunctionInfo(*info_->shared_info());
+ } else {
+ data->SetSharedFunctionInfo(Smi::FromInt(0));
+ }
+
+ Handle<FixedArray> literals =
+ factory()->NewFixedArray(deoptimization_literals_.length(), TENURED);
+ { AllowDeferredHandleDereference copy_handles;
+ for (int i = 0; i < deoptimization_literals_.length(); i++) {
+ literals->set(i, *deoptimization_literals_[i]);
+ }
+ data->SetLiteralArray(*literals);
+ }
+
+ data->SetOsrAstId(Smi::FromInt(info_->osr_ast_id().ToInt()));
+ data->SetOsrPcOffset(Smi::FromInt(osr_pc_offset_));
+
+ // Populate the deoptimization entries.
+ for (int i = 0; i < length; i++) {
+ LEnvironment* env = deoptimizations_[i];
+ data->SetAstId(i, env->ast_id());
+ data->SetTranslationIndex(i, Smi::FromInt(env->translation_index()));
+ data->SetArgumentsStackHeight(i,
+ Smi::FromInt(env->arguments_stack_height()));
+ data->SetPc(i, Smi::FromInt(env->pc_offset()));
+ }
+
+ code->set_deoptimization_data(*data);
+}
+
+
+void LCodeGen::PopulateDeoptimizationLiteralsWithInlinedFunctions() {
+ ASSERT(deoptimization_literals_.length() == 0);
+
+ const ZoneList<Handle<JSFunction> >* inlined_closures =
+ chunk()->inlined_closures();
+
+ for (int i = 0, length = inlined_closures->length(); i < length; i++) {
+ DefineDeoptimizationLiteral(inlined_closures->at(i));
+ }
+
+ inlined_function_count_ = deoptimization_literals_.length();
+}
+
+
+void LCodeGen::DeoptimizeBranch(
+ LEnvironment* environment,
+ BranchType branch_type, Register reg, int bit,
+ Deoptimizer::BailoutType* override_bailout_type) {
+ RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
+ Deoptimizer::BailoutType bailout_type =
+ info()->IsStub() ? Deoptimizer::LAZY : Deoptimizer::EAGER;
+
+ if (override_bailout_type != NULL) {
+ bailout_type = *override_bailout_type;
+ }
+
+ ASSERT(environment->HasBeenRegistered());
+ ASSERT(info()->IsOptimizing() || info()->IsStub());
+ int id = environment->deoptimization_index();
+ Address entry =
+ Deoptimizer::GetDeoptimizationEntry(isolate(), id, bailout_type);
+
+ if (entry == NULL) {
+ Abort(kBailoutWasNotPrepared);
+ }
+
+ if (FLAG_deopt_every_n_times != 0 && !info()->IsStub()) {
+ Label not_zero;
+ ExternalReference count = ExternalReference::stress_deopt_count(isolate());
+
+ __ Push(x0, x1, x2);
+ __ Mrs(x2, NZCV);
+ __ Mov(x0, count);
+ __ Ldr(w1, MemOperand(x0));
+ __ Subs(x1, x1, 1);
+ __ B(gt, &not_zero);
+ __ Mov(w1, FLAG_deopt_every_n_times);
+ __ Str(w1, MemOperand(x0));
+ __ Pop(x2, x1, x0);
+ ASSERT(frame_is_built_);
+ __ Call(entry, RelocInfo::RUNTIME_ENTRY);
+ __ Unreachable();
+
+ __ Bind(&not_zero);
+ __ Str(w1, MemOperand(x0));
+ __ Msr(NZCV, x2);
+ __ Pop(x2, x1, x0);
+ }
+
+ if (info()->ShouldTrapOnDeopt()) {
+ Label dont_trap;
+ __ B(&dont_trap, InvertBranchType(branch_type), reg, bit);
+ __ Debug("trap_on_deopt", __LINE__, BREAK);
+ __ Bind(&dont_trap);
+ }
+
+ ASSERT(info()->IsStub() || frame_is_built_);
+ // Go through jump table if we need to build frame, or restore caller doubles.
+ if (branch_type == always &&
+ frame_is_built_ && !info()->saves_caller_doubles()) {
+ __ Call(entry, RelocInfo::RUNTIME_ENTRY);
+ } else {
+ // We often have several deopts to the same entry, reuse the last
+ // jump entry if this is the case.
+ if (deopt_jump_table_.is_empty() ||
+ (deopt_jump_table_.last()->address != entry) ||
+ (deopt_jump_table_.last()->bailout_type != bailout_type) ||
+ (deopt_jump_table_.last()->needs_frame != !frame_is_built_)) {
+ Deoptimizer::JumpTableEntry* table_entry =
+ new(zone()) Deoptimizer::JumpTableEntry(entry,
+ bailout_type,
+ !frame_is_built_);
+ deopt_jump_table_.Add(table_entry, zone());
+ }
+ __ B(&deopt_jump_table_.last()->label,
+ branch_type, reg, bit);
+ }
+}
+
+
+void LCodeGen::Deoptimize(LEnvironment* environment,
+ Deoptimizer::BailoutType* override_bailout_type) {
+ DeoptimizeBranch(environment, always, NoReg, -1, override_bailout_type);
+}
+
+
+void LCodeGen::DeoptimizeIf(Condition cond, LEnvironment* environment) {
+ DeoptimizeBranch(environment, static_cast<BranchType>(cond));
+}
+
+
+void LCodeGen::DeoptimizeIfZero(Register rt, LEnvironment* environment) {
+ DeoptimizeBranch(environment, reg_zero, rt);
+}
+
+
+void LCodeGen::DeoptimizeIfNotZero(Register rt, LEnvironment* environment) {
+ DeoptimizeBranch(environment, reg_not_zero, rt);
+}
+
+
+void LCodeGen::DeoptimizeIfNegative(Register rt, LEnvironment* environment) {
+ int sign_bit = rt.Is64Bits() ? kXSignBit : kWSignBit;
+ DeoptimizeIfBitSet(rt, sign_bit, environment);
+}
+
+
+void LCodeGen::DeoptimizeIfSmi(Register rt,
+ LEnvironment* environment) {
+ DeoptimizeIfBitClear(rt, MaskToBit(kSmiTagMask), environment);
+}
+
+
+void LCodeGen::DeoptimizeIfNotSmi(Register rt, LEnvironment* environment) {
+ DeoptimizeIfBitSet(rt, MaskToBit(kSmiTagMask), environment);
+}
+
+
+void LCodeGen::DeoptimizeIfRoot(Register rt,
+ Heap::RootListIndex index,
+ LEnvironment* environment) {
+ __ CompareRoot(rt, index);
+ DeoptimizeIf(eq, environment);
+}
+
+
+void LCodeGen::DeoptimizeIfNotRoot(Register rt,
+ Heap::RootListIndex index,
+ LEnvironment* environment) {
+ __ CompareRoot(rt, index);
+ DeoptimizeIf(ne, environment);
+}
+
+
+void LCodeGen::DeoptimizeIfMinusZero(DoubleRegister input,
+ LEnvironment* environment) {
+ __ TestForMinusZero(input);
+ DeoptimizeIf(vs, environment);
+}
+
+
+void LCodeGen::DeoptimizeIfBitSet(Register rt,
+ int bit,
+ LEnvironment* environment) {
+ DeoptimizeBranch(environment, reg_bit_set, rt, bit);
+}
+
+
+void LCodeGen::DeoptimizeIfBitClear(Register rt,
+ int bit,
+ LEnvironment* environment) {
+ DeoptimizeBranch(environment, reg_bit_clear, rt, bit);
+}
+
+
+void LCodeGen::EnsureSpaceForLazyDeopt(int space_needed) {
+ if (!info()->IsStub()) {
+ // Ensure that we have enough space after the previous lazy-bailout
+ // instruction for patching the code here.
+ intptr_t current_pc = masm()->pc_offset();
+
+ if (current_pc < (last_lazy_deopt_pc_ + space_needed)) {
+ ptrdiff_t padding_size = last_lazy_deopt_pc_ + space_needed - current_pc;
+ ASSERT((padding_size % kInstructionSize) == 0);
+ InstructionAccurateScope instruction_accurate(
+ masm(), padding_size / kInstructionSize);
+
+ while (padding_size > 0) {
+ __ nop();
+ padding_size -= kInstructionSize;
+ }
+ }
+ }
+ last_lazy_deopt_pc_ = masm()->pc_offset();
+}
+
+
+Register LCodeGen::ToRegister(LOperand* op) const {
+ // TODO(all): support zero register results, as ToRegister32.
+ ASSERT((op != NULL) && op->IsRegister());
+ return Register::FromAllocationIndex(op->index());
+}
+
+
+Register LCodeGen::ToRegister32(LOperand* op) const {
+ ASSERT(op != NULL);
+ if (op->IsConstantOperand()) {
+ // If this is a constant operand, the result must be the zero register.
+ ASSERT(ToInteger32(LConstantOperand::cast(op)) == 0);
+ return wzr;
+ } else {
+ return ToRegister(op).W();
+ }
+}
+
+
+Smi* LCodeGen::ToSmi(LConstantOperand* op) const {
+ HConstant* constant = chunk_->LookupConstant(op);
+ return Smi::FromInt(constant->Integer32Value());
+}
+
+
+DoubleRegister LCodeGen::ToDoubleRegister(LOperand* op) const {
+ ASSERT((op != NULL) && op->IsDoubleRegister());
+ return DoubleRegister::FromAllocationIndex(op->index());
+}
+
+
+Operand LCodeGen::ToOperand(LOperand* op) {
+ ASSERT(op != NULL);
+ if (op->IsConstantOperand()) {
+ LConstantOperand* const_op = LConstantOperand::cast(op);
+ HConstant* constant = chunk()->LookupConstant(const_op);
+ Representation r = chunk_->LookupLiteralRepresentation(const_op);
+ if (r.IsSmi()) {
+ ASSERT(constant->HasSmiValue());
+ return Operand(Smi::FromInt(constant->Integer32Value()));
+ } else if (r.IsInteger32()) {
+ ASSERT(constant->HasInteger32Value());
+ return Operand(constant->Integer32Value());
+ } else if (r.IsDouble()) {
+ Abort(kToOperandUnsupportedDoubleImmediate);
+ }
+ ASSERT(r.IsTagged());
+ return Operand(constant->handle(isolate()));
+ } else if (op->IsRegister()) {
+ return Operand(ToRegister(op));
+ } else if (op->IsDoubleRegister()) {
+ Abort(kToOperandIsDoubleRegisterUnimplemented);
+ return Operand(0);
+ }
+ // Stack slots not implemented, use ToMemOperand instead.
+ UNREACHABLE();
+ return Operand(0);
+}
+
+
+Operand LCodeGen::ToOperand32I(LOperand* op) {
+ return ToOperand32(op, SIGNED_INT32);
+}
+
+
+Operand LCodeGen::ToOperand32U(LOperand* op) {
+ return ToOperand32(op, UNSIGNED_INT32);
+}
+
+
+Operand LCodeGen::ToOperand32(LOperand* op, IntegerSignedness signedness) {
+ ASSERT(op != NULL);
+ if (op->IsRegister()) {
+ return Operand(ToRegister32(op));
+ } else if (op->IsConstantOperand()) {
+ LConstantOperand* const_op = LConstantOperand::cast(op);
+ HConstant* constant = chunk()->LookupConstant(const_op);
+ Representation r = chunk_->LookupLiteralRepresentation(const_op);
+ if (r.IsInteger32()) {
+ ASSERT(constant->HasInteger32Value());
+ return (signedness == SIGNED_INT32)
+ ? Operand(constant->Integer32Value())
+ : Operand(static_cast<uint32_t>(constant->Integer32Value()));
+ } else {
+ // Other constants not implemented.
+ Abort(kToOperand32UnsupportedImmediate);
+ }
+ }
+ // Other cases are not implemented.
+ UNREACHABLE();
+ return Operand(0);
+}
+
+
+static ptrdiff_t ArgumentsOffsetWithoutFrame(ptrdiff_t index) {
+ ASSERT(index < 0);
+ return -(index + 1) * kPointerSize;
+}
+
+
+MemOperand LCodeGen::ToMemOperand(LOperand* op, StackMode stack_mode) const {
+ ASSERT(op != NULL);
+ ASSERT(!op->IsRegister());
+ ASSERT(!op->IsDoubleRegister());
+ ASSERT(op->IsStackSlot() || op->IsDoubleStackSlot());
+ if (NeedsEagerFrame()) {
+ int fp_offset = StackSlotOffset(op->index());
+ if (op->index() >= 0) {
+ // Loads and stores have a bigger reach in positive offset than negative.
+ // When the load or the store can't be done in one instruction via fp
+ // (too big negative offset), we try to access via jssp (positive offset).
+ // We can reference a stack slot from jssp only if jssp references the end
+ // of the stack slots. It's not the case when:
+ // - stack_mode != kCanUseStackPointer: this is the case when a deferred
+ // code saved the registers.
+ // - after_push_argument_: arguments has been pushed for a call.
+ // - inlined_arguments_: inlined arguments have been pushed once. All the
+ // remainder of the function cannot trust jssp any longer.
+ // - saves_caller_doubles: some double registers have been pushed, jssp
+ // references the end of the double registers and not the end of the
+ // stack slots.
+ // Also, if the offset from fp is small enough to make a load/store in
+ // one instruction, we use a fp access.
+ if ((stack_mode == kCanUseStackPointer) && !after_push_argument_ &&
+ !inlined_arguments_ && !is_int9(fp_offset) &&
+ !info()->saves_caller_doubles()) {
+ int jssp_offset =
+ (GetStackSlotCount() - op->index() - 1) * kPointerSize;
+ return MemOperand(masm()->StackPointer(), jssp_offset);
+ }
+ }
+ return MemOperand(fp, fp_offset);
+ } else {
+ // Retrieve parameter without eager stack-frame relative to the
+ // stack-pointer.
+ return MemOperand(masm()->StackPointer(),
+ ArgumentsOffsetWithoutFrame(op->index()));
+ }
+}
+
+
+Handle<Object> LCodeGen::ToHandle(LConstantOperand* op) const {
+ HConstant* constant = chunk_->LookupConstant(op);
+ ASSERT(chunk_->LookupLiteralRepresentation(op).IsSmiOrTagged());
+ return constant->handle(isolate());
+}
+
+
+template<class LI>
+Operand LCodeGen::ToShiftedRightOperand32(LOperand* right, LI* shift_info,
+ IntegerSignedness signedness) {
+ if (shift_info->shift() == NO_SHIFT) {
+ return (signedness == SIGNED_INT32) ? ToOperand32I(right)
+ : ToOperand32U(right);
+ } else {
+ return Operand(
+ ToRegister32(right),
+ shift_info->shift(),
+ JSShiftAmountFromLConstant(shift_info->shift_amount()));
+ }
+}
+
+
+bool LCodeGen::IsSmi(LConstantOperand* op) const {
+ return chunk_->LookupLiteralRepresentation(op).IsSmi();
+}
+
+
+bool LCodeGen::IsInteger32Constant(LConstantOperand* op) const {
+ return chunk_->LookupLiteralRepresentation(op).IsSmiOrInteger32();
+}
+
+
+int32_t LCodeGen::ToInteger32(LConstantOperand* op) const {
+ HConstant* constant = chunk_->LookupConstant(op);
+ return constant->Integer32Value();
+}
+
+
+double LCodeGen::ToDouble(LConstantOperand* op) const {
+ HConstant* constant = chunk_->LookupConstant(op);
+ ASSERT(constant->HasDoubleValue());
+ return constant->DoubleValue();
+}
+
+
+Condition LCodeGen::TokenToCondition(Token::Value op, bool is_unsigned) {
+ Condition cond = nv;
+ switch (op) {
+ case Token::EQ:
+ case Token::EQ_STRICT:
+ cond = eq;
+ break;
+ case Token::NE:
+ case Token::NE_STRICT:
+ cond = ne;
+ break;
+ case Token::LT:
+ cond = is_unsigned ? lo : lt;
+ break;
+ case Token::GT:
+ cond = is_unsigned ? hi : gt;
+ break;
+ case Token::LTE:
+ cond = is_unsigned ? ls : le;
+ break;
+ case Token::GTE:
+ cond = is_unsigned ? hs : ge;
+ break;
+ case Token::IN:
+ case Token::INSTANCEOF:
+ default:
+ UNREACHABLE();
+ }
+ return cond;
+}
+
+
+template<class InstrType>
+void LCodeGen::EmitBranchGeneric(InstrType instr,
+ const BranchGenerator& branch) {
+ int left_block = instr->TrueDestination(chunk_);
+ int right_block = instr->FalseDestination(chunk_);
+
+ int next_block = GetNextEmittedBlock();
+
+ if (right_block == left_block) {
+ EmitGoto(left_block);
+ } else if (left_block == next_block) {
+ branch.EmitInverted(chunk_->GetAssemblyLabel(right_block));
+ } else if (right_block == next_block) {
+ branch.Emit(chunk_->GetAssemblyLabel(left_block));
+ } else {
+ branch.Emit(chunk_->GetAssemblyLabel(left_block));
+ __ B(chunk_->GetAssemblyLabel(right_block));
+ }
+}
+
+
+template<class InstrType>
+void LCodeGen::EmitBranch(InstrType instr, Condition condition) {
+ ASSERT((condition != al) && (condition != nv));
+ BranchOnCondition branch(this, condition);
+ EmitBranchGeneric(instr, branch);
+}
+
+
+template<class InstrType>
+void LCodeGen::EmitCompareAndBranch(InstrType instr,
+ Condition condition,
+ const Register& lhs,
+ const Operand& rhs) {
+ ASSERT((condition != al) && (condition != nv));
+ CompareAndBranch branch(this, condition, lhs, rhs);
+ EmitBranchGeneric(instr, branch);
+}
+
+
+template<class InstrType>
+void LCodeGen::EmitTestAndBranch(InstrType instr,
+ Condition condition,
+ const Register& value,
+ uint64_t mask) {
+ ASSERT((condition != al) && (condition != nv));
+ TestAndBranch branch(this, condition, value, mask);
+ EmitBranchGeneric(instr, branch);
+}
+
+
+template<class InstrType>
+void LCodeGen::EmitBranchIfNonZeroNumber(InstrType instr,
+ const FPRegister& value,
+ const FPRegister& scratch) {
+ BranchIfNonZeroNumber branch(this, value, scratch);
+ EmitBranchGeneric(instr, branch);
+}
+
+
+template<class InstrType>
+void LCodeGen::EmitBranchIfHeapNumber(InstrType instr,
+ const Register& value) {
+ BranchIfHeapNumber branch(this, value);
+ EmitBranchGeneric(instr, branch);
+}
+
+
+template<class InstrType>
+void LCodeGen::EmitBranchIfRoot(InstrType instr,
+ const Register& value,
+ Heap::RootListIndex index) {
+ BranchIfRoot branch(this, value, index);
+ EmitBranchGeneric(instr, branch);
+}
+
+
+void LCodeGen::DoGap(LGap* gap) {
+ for (int i = LGap::FIRST_INNER_POSITION;
+ i <= LGap::LAST_INNER_POSITION;
+ i++) {
+ LGap::InnerPosition inner_pos = static_cast<LGap::InnerPosition>(i);
+ LParallelMove* move = gap->GetParallelMove(inner_pos);
+ if (move != NULL) {
+ resolver_.Resolve(move);
+ }
+ }
+}
+
+
+void LCodeGen::DoAccessArgumentsAt(LAccessArgumentsAt* instr) {
+ Register arguments = ToRegister(instr->arguments());
+ Register result = ToRegister(instr->result());
+
+ // The pointer to the arguments array come from DoArgumentsElements.
+ // It does not point directly to the arguments and there is an offest of
+ // two words that we must take into account when accessing an argument.
+ // Subtracting the index from length accounts for one, so we add one more.
+
+ if (instr->length()->IsConstantOperand() &&
+ instr->index()->IsConstantOperand()) {
+ int index = ToInteger32(LConstantOperand::cast(instr->index()));
+ int length = ToInteger32(LConstantOperand::cast(instr->length()));
+ int offset = ((length - index) + 1) * kPointerSize;
+ __ Ldr(result, MemOperand(arguments, offset));
+ } else if (instr->index()->IsConstantOperand()) {
+ Register length = ToRegister32(instr->length());
+ int index = ToInteger32(LConstantOperand::cast(instr->index()));
+ int loc = index - 1;
+ if (loc != 0) {
+ __ Sub(result.W(), length, loc);
+ __ Ldr(result, MemOperand(arguments, result, UXTW, kPointerSizeLog2));
+ } else {
+ __ Ldr(result, MemOperand(arguments, length, UXTW, kPointerSizeLog2));
+ }
+ } else {
+ Register length = ToRegister32(instr->length());
+ Operand index = ToOperand32I(instr->index());
+ __ Sub(result.W(), length, index);
+ __ Add(result.W(), result.W(), 1);
+ __ Ldr(result, MemOperand(arguments, result, UXTW, kPointerSizeLog2));
+ }
+}
+
+
+void LCodeGen::DoAddE(LAddE* instr) {
+ Register result = ToRegister(instr->result());
+ Register left = ToRegister(instr->left());
+ Operand right = (instr->right()->IsConstantOperand())
+ ? ToInteger32(LConstantOperand::cast(instr->right()))
+ : Operand(ToRegister32(instr->right()), SXTW);
+
+ ASSERT(!instr->hydrogen()->CheckFlag(HValue::kCanOverflow));
+ __ Add(result, left, right);
+}
+
+
+void LCodeGen::DoAddI(LAddI* instr) {
+ bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
+ Register result = ToRegister32(instr->result());
+ Register left = ToRegister32(instr->left());
+ Operand right = ToShiftedRightOperand32I(instr->right(), instr);
+
+ if (can_overflow) {
+ __ Adds(result, left, right);
+ DeoptimizeIf(vs, instr->environment());
+ } else {
+ __ Add(result, left, right);
+ }
+}
+
+
+void LCodeGen::DoAddS(LAddS* instr) {
+ bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
+ Register result = ToRegister(instr->result());
+ Register left = ToRegister(instr->left());
+ Operand right = ToOperand(instr->right());
+ if (can_overflow) {
+ __ Adds(result, left, right);
+ DeoptimizeIf(vs, instr->environment());
+ } else {
+ __ Add(result, left, right);
+ }
+}
+
+
+void LCodeGen::DoAllocate(LAllocate* instr) {
+ class DeferredAllocate: public LDeferredCode {
+ public:
+ DeferredAllocate(LCodeGen* codegen, LAllocate* instr)
+ : LDeferredCode(codegen), instr_(instr) { }
+ virtual void Generate() { codegen()->DoDeferredAllocate(instr_); }
+ virtual LInstruction* instr() { return instr_; }
+ private:
+ LAllocate* instr_;
+ };
+
+ DeferredAllocate* deferred = new(zone()) DeferredAllocate(this, instr);
+
+ Register result = ToRegister(instr->result());
+ Register temp1 = ToRegister(instr->temp1());
+ Register temp2 = ToRegister(instr->temp2());
+
+ // Allocate memory for the object.
+ AllocationFlags flags = TAG_OBJECT;
+ if (instr->hydrogen()->MustAllocateDoubleAligned()) {
+ flags = static_cast<AllocationFlags>(flags | DOUBLE_ALIGNMENT);
+ }
+
+ if (instr->hydrogen()->IsOldPointerSpaceAllocation()) {
+ ASSERT(!instr->hydrogen()->IsOldDataSpaceAllocation());
+ ASSERT(!instr->hydrogen()->IsNewSpaceAllocation());
+ flags = static_cast<AllocationFlags>(flags | PRETENURE_OLD_POINTER_SPACE);
+ } else if (instr->hydrogen()->IsOldDataSpaceAllocation()) {
+ ASSERT(!instr->hydrogen()->IsNewSpaceAllocation());
+ flags = static_cast<AllocationFlags>(flags | PRETENURE_OLD_DATA_SPACE);
+ }
+
+ if (instr->size()->IsConstantOperand()) {
+ int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
+ if (size <= Page::kMaxRegularHeapObjectSize) {
+ __ Allocate(size, result, temp1, temp2, deferred->entry(), flags);
+ } else {
+ __ B(deferred->entry());
+ }
+ } else {
+ Register size = ToRegister32(instr->size());
+ __ Sxtw(size.X(), size);
+ __ Allocate(size.X(), result, temp1, temp2, deferred->entry(), flags);
+ }
+
+ __ Bind(deferred->exit());
+
+ if (instr->hydrogen()->MustPrefillWithFiller()) {
+ Register filler_count = temp1;
+ Register filler = temp2;
+ Register untagged_result = ToRegister(instr->temp3());
+
+ if (instr->size()->IsConstantOperand()) {
+ int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
+ __ Mov(filler_count, size / kPointerSize);
+ } else {
+ __ Lsr(filler_count.W(), ToRegister32(instr->size()), kPointerSizeLog2);
+ }
+
+ __ Sub(untagged_result, result, kHeapObjectTag);
+ __ Mov(filler, Operand(isolate()->factory()->one_pointer_filler_map()));
+ __ FillFields(untagged_result, filler_count, filler);
+ } else {
+ ASSERT(instr->temp3() == NULL);
+ }
+}
+
+
+void LCodeGen::DoDeferredAllocate(LAllocate* instr) {
+ // TODO(3095996): Get rid of this. For now, we need to make the
+ // result register contain a valid pointer because it is already
+ // contained in the register pointer map.
+ __ Mov(ToRegister(instr->result()), Smi::FromInt(0));
+
+ PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
+ // We're in a SafepointRegistersScope so we can use any scratch registers.
+ Register size = x0;
+ if (instr->size()->IsConstantOperand()) {
+ __ Mov(size, ToSmi(LConstantOperand::cast(instr->size())));
+ } else {
+ __ SmiTag(size, ToRegister32(instr->size()).X());
+ }
+ int flags = AllocateDoubleAlignFlag::encode(
+ instr->hydrogen()->MustAllocateDoubleAligned());
+ if (instr->hydrogen()->IsOldPointerSpaceAllocation()) {
+ ASSERT(!instr->hydrogen()->IsOldDataSpaceAllocation());
+ ASSERT(!instr->hydrogen()->IsNewSpaceAllocation());
+ flags = AllocateTargetSpace::update(flags, OLD_POINTER_SPACE);
+ } else if (instr->hydrogen()->IsOldDataSpaceAllocation()) {
+ ASSERT(!instr->hydrogen()->IsNewSpaceAllocation());
+ flags = AllocateTargetSpace::update(flags, OLD_DATA_SPACE);
+ } else {
+ flags = AllocateTargetSpace::update(flags, NEW_SPACE);
+ }
+ __ Mov(x10, Smi::FromInt(flags));
+ __ Push(size, x10);
+
+ CallRuntimeFromDeferred(
+ Runtime::kHiddenAllocateInTargetSpace, 2, instr, instr->context());
+ __ StoreToSafepointRegisterSlot(x0, ToRegister(instr->result()));
+}
+
+
+void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
+ Register receiver = ToRegister(instr->receiver());
+ Register function = ToRegister(instr->function());
+ Register length = ToRegister32(instr->length());
+
+ Register elements = ToRegister(instr->elements());
+ Register scratch = x5;
+ ASSERT(receiver.Is(x0)); // Used for parameter count.
+ ASSERT(function.Is(x1)); // Required by InvokeFunction.
+ ASSERT(ToRegister(instr->result()).Is(x0));
+ ASSERT(instr->IsMarkedAsCall());
+
+ // Copy the arguments to this function possibly from the
+ // adaptor frame below it.
+ const uint32_t kArgumentsLimit = 1 * KB;
+ __ Cmp(length, kArgumentsLimit);
+ DeoptimizeIf(hi, instr->environment());
+
+ // Push the receiver and use the register to keep the original
+ // number of arguments.
+ __ Push(receiver);
+ Register argc = receiver;
+ receiver = NoReg;
+ __ Sxtw(argc, length);
+ // The arguments are at a one pointer size offset from elements.
+ __ Add(elements, elements, 1 * kPointerSize);
+
+ // Loop through the arguments pushing them onto the execution
+ // stack.
+ Label invoke, loop;
+ // length is a small non-negative integer, due to the test above.
+ __ Cbz(length, &invoke);
+ __ Bind(&loop);
+ __ Ldr(scratch, MemOperand(elements, length, SXTW, kPointerSizeLog2));
+ __ Push(scratch);
+ __ Subs(length, length, 1);
+ __ B(ne, &loop);
+
+ __ Bind(&invoke);
+ ASSERT(instr->HasPointerMap());
+ LPointerMap* pointers = instr->pointer_map();
+ SafepointGenerator safepoint_generator(this, pointers, Safepoint::kLazyDeopt);
+ // The number of arguments is stored in argc (receiver) which is x0, as
+ // expected by InvokeFunction.
+ ParameterCount actual(argc);
+ __ InvokeFunction(function, actual, CALL_FUNCTION, safepoint_generator);
+}
+
+
+void LCodeGen::DoArgumentsElements(LArgumentsElements* instr) {
+ // We push some arguments and they will be pop in an other block. We can't
+ // trust that jssp references the end of the stack slots until the end of
+ // the function.
+ inlined_arguments_ = true;
+ Register result = ToRegister(instr->result());
+
+ if (instr->hydrogen()->from_inlined()) {
+ // When we are inside an inlined function, the arguments are the last things
+ // that have been pushed on the stack. Therefore the arguments array can be
+ // accessed directly from jssp.
+ // However in the normal case, it is accessed via fp but there are two words
+ // on the stack between fp and the arguments (the saved lr and fp) and the
+ // LAccessArgumentsAt implementation take that into account.
+ // In the inlined case we need to subtract the size of 2 words to jssp to
+ // get a pointer which will work well with LAccessArgumentsAt.
+ ASSERT(masm()->StackPointer().Is(jssp));
+ __ Sub(result, jssp, 2 * kPointerSize);
+ } else {
+ ASSERT(instr->temp() != NULL);
+ Register previous_fp = ToRegister(instr->temp());
+
+ __ Ldr(previous_fp,
+ MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+ __ Ldr(result,
+ MemOperand(previous_fp, StandardFrameConstants::kContextOffset));
+ __ Cmp(result, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
+ __ Csel(result, fp, previous_fp, ne);
+ }
+}
+
+
+void LCodeGen::DoArgumentsLength(LArgumentsLength* instr) {
+ Register elements = ToRegister(instr->elements());
+ Register result = ToRegister32(instr->result());
+ Label done;
+
+ // If no arguments adaptor frame the number of arguments is fixed.
+ __ Cmp(fp, elements);
+ __ Mov(result, scope()->num_parameters());
+ __ B(eq, &done);
+
+ // Arguments adaptor frame present. Get argument length from there.
+ __ Ldr(result.X(), MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+ __ Ldr(result,
+ UntagSmiMemOperand(result.X(),
+ ArgumentsAdaptorFrameConstants::kLengthOffset));
+
+ // Argument length is in result register.
+ __ Bind(&done);
+}
+
+
+void LCodeGen::DoArithmeticD(LArithmeticD* instr) {
+ DoubleRegister left = ToDoubleRegister(instr->left());
+ DoubleRegister right = ToDoubleRegister(instr->right());
+ DoubleRegister result = ToDoubleRegister(instr->result());
+
+ switch (instr->op()) {
+ case Token::ADD: __ Fadd(result, left, right); break;
+ case Token::SUB: __ Fsub(result, left, right); break;
+ case Token::MUL: __ Fmul(result, left, right); break;
+ case Token::DIV: __ Fdiv(result, left, right); break;
+ case Token::MOD: {
+ // The ECMA-262 remainder operator is the remainder from a truncating
+ // (round-towards-zero) division. Note that this differs from IEEE-754.
+ //
+ // TODO(jbramley): See if it's possible to do this inline, rather than by
+ // calling a helper function. With frintz (to produce the intermediate
+ // quotient) and fmsub (to calculate the remainder without loss of
+ // precision), it should be possible. However, we would need support for
+ // fdiv in round-towards-zero mode, and the ARM64 simulator doesn't
+ // support that yet.
+ ASSERT(left.Is(d0));
+ ASSERT(right.Is(d1));
+ __ CallCFunction(
+ ExternalReference::mod_two_doubles_operation(isolate()),
+ 0, 2);
+ ASSERT(result.Is(d0));
+ break;
+ }
+ default:
+ UNREACHABLE();
+ break;
+ }
+}
+
+
+void LCodeGen::DoArithmeticT(LArithmeticT* instr) {
+ ASSERT(ToRegister(instr->context()).is(cp));
+ ASSERT(ToRegister(instr->left()).is(x1));
+ ASSERT(ToRegister(instr->right()).is(x0));
+ ASSERT(ToRegister(instr->result()).is(x0));
+
+ BinaryOpICStub stub(isolate(), instr->op(), NO_OVERWRITE);
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+}
+
+
+void LCodeGen::DoBitI(LBitI* instr) {
+ Register result = ToRegister32(instr->result());
+ Register left = ToRegister32(instr->left());
+ Operand right = ToShiftedRightOperand32U(instr->right(), instr);
+
+ switch (instr->op()) {
+ case Token::BIT_AND: __ And(result, left, right); break;
+ case Token::BIT_OR: __ Orr(result, left, right); break;
+ case Token::BIT_XOR: __ Eor(result, left, right); break;
+ default:
+ UNREACHABLE();
+ break;
+ }
+}
+
+
+void LCodeGen::DoBitS(LBitS* instr) {
+ Register result = ToRegister(instr->result());
+ Register left = ToRegister(instr->left());
+ Operand right = ToOperand(instr->right());
+
+ switch (instr->op()) {
+ case Token::BIT_AND: __ And(result, left, right); break;
+ case Token::BIT_OR: __ Orr(result, left, right); break;
+ case Token::BIT_XOR: __ Eor(result, left, right); break;
+ default:
+ UNREACHABLE();
+ break;
+ }
+}
+
+
+void LCodeGen::DoBoundsCheck(LBoundsCheck *instr) {
+ Condition cond = instr->hydrogen()->allow_equality() ? hi : hs;
+ ASSERT(instr->hydrogen()->index()->representation().IsInteger32());
+ ASSERT(instr->hydrogen()->length()->representation().IsInteger32());
+ if (instr->index()->IsConstantOperand()) {
+ Operand index = ToOperand32I(instr->index());
+ Register length = ToRegister32(instr->length());
+ __ Cmp(length, index);
+ cond = CommuteCondition(cond);
+ } else {
+ Register index = ToRegister32(instr->index());
+ Operand length = ToOperand32I(instr->length());
+ __ Cmp(index, length);
+ }
+ if (FLAG_debug_code && instr->hydrogen()->skip_check()) {
+ __ Assert(NegateCondition(cond), kEliminatedBoundsCheckFailed);
+ } else {
+ DeoptimizeIf(cond, instr->environment());
+ }
+}
+
+
+void LCodeGen::DoBranch(LBranch* instr) {
+ Representation r = instr->hydrogen()->value()->representation();
+ Label* true_label = instr->TrueLabel(chunk_);
+ Label* false_label = instr->FalseLabel(chunk_);
+
+ if (r.IsInteger32()) {
+ ASSERT(!info()->IsStub());
+ EmitCompareAndBranch(instr, ne, ToRegister32(instr->value()), 0);
+ } else if (r.IsSmi()) {
+ ASSERT(!info()->IsStub());
+ STATIC_ASSERT(kSmiTag == 0);
+ EmitCompareAndBranch(instr, ne, ToRegister(instr->value()), 0);
+ } else if (r.IsDouble()) {
+ DoubleRegister value = ToDoubleRegister(instr->value());
+ // Test the double value. Zero and NaN are false.
+ EmitBranchIfNonZeroNumber(instr, value, double_scratch());
+ } else {
+ ASSERT(r.IsTagged());
+ Register value = ToRegister(instr->value());
+ HType type = instr->hydrogen()->value()->type();
+
+ if (type.IsBoolean()) {
+ ASSERT(!info()->IsStub());
+ __ CompareRoot(value, Heap::kTrueValueRootIndex);
+ EmitBranch(instr, eq);
+ } else if (type.IsSmi()) {
+ ASSERT(!info()->IsStub());
+ EmitCompareAndBranch(instr, ne, value, Smi::FromInt(0));
+ } else if (type.IsJSArray()) {
+ ASSERT(!info()->IsStub());
+ EmitGoto(instr->TrueDestination(chunk()));
+ } else if (type.IsHeapNumber()) {
+ ASSERT(!info()->IsStub());
+ __ Ldr(double_scratch(), FieldMemOperand(value,
+ HeapNumber::kValueOffset));
+ // Test the double value. Zero and NaN are false.
+ EmitBranchIfNonZeroNumber(instr, double_scratch(), double_scratch());
+ } else if (type.IsString()) {
+ ASSERT(!info()->IsStub());
+ Register temp = ToRegister(instr->temp1());
+ __ Ldr(temp, FieldMemOperand(value, String::kLengthOffset));
+ EmitCompareAndBranch(instr, ne, temp, 0);
+ } else {
+ ToBooleanStub::Types expected = instr->hydrogen()->expected_input_types();
+ // Avoid deopts in the case where we've never executed this path before.
+ if (expected.IsEmpty()) expected = ToBooleanStub::Types::Generic();
+
+ if (expected.Contains(ToBooleanStub::UNDEFINED)) {
+ // undefined -> false.
+ __ JumpIfRoot(
+ value, Heap::kUndefinedValueRootIndex, false_label);
+ }
+
+ if (expected.Contains(ToBooleanStub::BOOLEAN)) {
+ // Boolean -> its value.
+ __ JumpIfRoot(
+ value, Heap::kTrueValueRootIndex, true_label);
+ __ JumpIfRoot(
+ value, Heap::kFalseValueRootIndex, false_label);
+ }
+
+ if (expected.Contains(ToBooleanStub::NULL_TYPE)) {
+ // 'null' -> false.
+ __ JumpIfRoot(
+ value, Heap::kNullValueRootIndex, false_label);
+ }
+
+ if (expected.Contains(ToBooleanStub::SMI)) {
+ // Smis: 0 -> false, all other -> true.
+ ASSERT(Smi::FromInt(0) == 0);
+ __ Cbz(value, false_label);
+ __ JumpIfSmi(value, true_label);
+ } else if (expected.NeedsMap()) {
+ // If we need a map later and have a smi, deopt.
+ DeoptimizeIfSmi(value, instr->environment());
+ }
+
+ Register map = NoReg;
+ Register scratch = NoReg;
+
+ if (expected.NeedsMap()) {
+ ASSERT((instr->temp1() != NULL) && (instr->temp2() != NULL));
+ map = ToRegister(instr->temp1());
+ scratch = ToRegister(instr->temp2());
+
+ __ Ldr(map, FieldMemOperand(value, HeapObject::kMapOffset));
+
+ if (expected.CanBeUndetectable()) {
+ // Undetectable -> false.
+ __ Ldrb(scratch, FieldMemOperand(map, Map::kBitFieldOffset));
+ __ TestAndBranchIfAnySet(
+ scratch, 1 << Map::kIsUndetectable, false_label);
+ }
+ }
+
+ if (expected.Contains(ToBooleanStub::SPEC_OBJECT)) {
+ // spec object -> true.
+ __ CompareInstanceType(map, scratch, FIRST_SPEC_OBJECT_TYPE);
+ __ B(ge, true_label);
+ }
+
+ if (expected.Contains(ToBooleanStub::STRING)) {
+ // String value -> false iff empty.
+ Label not_string;
+ __ CompareInstanceType(map, scratch, FIRST_NONSTRING_TYPE);
+ __ B(ge, &not_string);
+ __ Ldr(scratch, FieldMemOperand(value, String::kLengthOffset));
+ __ Cbz(scratch, false_label);
+ __ B(true_label);
+ __ Bind(&not_string);
+ }
+
+ if (expected.Contains(ToBooleanStub::SYMBOL)) {
+ // Symbol value -> true.
+ __ CompareInstanceType(map, scratch, SYMBOL_TYPE);
+ __ B(eq, true_label);
+ }
+
+ if (expected.Contains(ToBooleanStub::HEAP_NUMBER)) {
+ Label not_heap_number;
+ __ JumpIfNotRoot(map, Heap::kHeapNumberMapRootIndex, &not_heap_number);
+
+ __ Ldr(double_scratch(),
+ FieldMemOperand(value, HeapNumber::kValueOffset));
+ __ Fcmp(double_scratch(), 0.0);
+ // If we got a NaN (overflow bit is set), jump to the false branch.
+ __ B(vs, false_label);
+ __ B(eq, false_label);
+ __ B(true_label);
+ __ Bind(&not_heap_number);
+ }
+
+ if (!expected.IsGeneric()) {
+ // We've seen something for the first time -> deopt.
+ // This can only happen if we are not generic already.
+ Deoptimize(instr->environment());
+ }
+ }
+ }
+}
+
+
+void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
+ int formal_parameter_count,
+ int arity,
+ LInstruction* instr,
+ Register function_reg) {
+ bool dont_adapt_arguments =
+ formal_parameter_count == SharedFunctionInfo::kDontAdaptArgumentsSentinel;
+ bool can_invoke_directly =
+ dont_adapt_arguments || formal_parameter_count == arity;
+
+ // The function interface relies on the following register assignments.
+ ASSERT(function_reg.Is(x1) || function_reg.IsNone());
+ Register arity_reg = x0;
+
+ LPointerMap* pointers = instr->pointer_map();
+
+ // If necessary, load the function object.
+ if (function_reg.IsNone()) {
+ function_reg = x1;
+ __ LoadObject(function_reg, function);
+ }
+
+ if (FLAG_debug_code) {
+ Label is_not_smi;
+ // Try to confirm that function_reg (x1) is a tagged pointer.
+ __ JumpIfNotSmi(function_reg, &is_not_smi);
+ __ Abort(kExpectedFunctionObject);
+ __ Bind(&is_not_smi);
+ }
+
+ if (can_invoke_directly) {
+ // Change context.
+ __ Ldr(cp, FieldMemOperand(function_reg, JSFunction::kContextOffset));
+
+ // Set the arguments count if adaption is not needed. Assumes that x0 is
+ // available to write to at this point.
+ if (dont_adapt_arguments) {
+ __ Mov(arity_reg, arity);
+ }
+
+ // Invoke function.
+ __ Ldr(x10, FieldMemOperand(function_reg, JSFunction::kCodeEntryOffset));
+ __ Call(x10);
+
+ // Set up deoptimization.
+ RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
+ } else {
+ SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
+ ParameterCount count(arity);
+ ParameterCount expected(formal_parameter_count);
+ __ InvokeFunction(function_reg, expected, count, CALL_FUNCTION, generator);
+ }
+}
+
+
+void LCodeGen::DoCallWithDescriptor(LCallWithDescriptor* instr) {
+ ASSERT(instr->IsMarkedAsCall());
+ ASSERT(ToRegister(instr->result()).Is(x0));
+
+ LPointerMap* pointers = instr->pointer_map();
+ SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
+
+ if (instr->target()->IsConstantOperand()) {
+ LConstantOperand* target = LConstantOperand::cast(instr->target());
+ Handle<Code> code = Handle<Code>::cast(ToHandle(target));
+ generator.BeforeCall(__ CallSize(code, RelocInfo::CODE_TARGET));
+ // TODO(all): on ARM we use a call descriptor to specify a storage mode
+ // but on ARM64 we only have one storage mode so it isn't necessary. Check
+ // this understanding is correct.
+ __ Call(code, RelocInfo::CODE_TARGET, TypeFeedbackId::None());
+ } else {
+ ASSERT(instr->target()->IsRegister());
+ Register target = ToRegister(instr->target());
+ generator.BeforeCall(__ CallSize(target));
+ __ Add(target, target, Code::kHeaderSize - kHeapObjectTag);
+ __ Call(target);
+ }
+ generator.AfterCall();
+ after_push_argument_ = false;
+}
+
+
+void LCodeGen::DoCallJSFunction(LCallJSFunction* instr) {
+ ASSERT(instr->IsMarkedAsCall());
+ ASSERT(ToRegister(instr->function()).is(x1));
+
+ if (instr->hydrogen()->pass_argument_count()) {
+ __ Mov(x0, Operand(instr->arity()));
+ }
+
+ // Change context.
+ __ Ldr(cp, FieldMemOperand(x1, JSFunction::kContextOffset));
+
+ // Load the code entry address
+ __ Ldr(x10, FieldMemOperand(x1, JSFunction::kCodeEntryOffset));
+ __ Call(x10);
+
+ RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
+ after_push_argument_ = false;
+}
+
+
+void LCodeGen::DoCallRuntime(LCallRuntime* instr) {
+ CallRuntime(instr->function(), instr->arity(), instr);
+ after_push_argument_ = false;
+}
+
+
+void LCodeGen::DoCallStub(LCallStub* instr) {
+ ASSERT(ToRegister(instr->context()).is(cp));
+ ASSERT(ToRegister(instr->result()).is(x0));
+ switch (instr->hydrogen()->major_key()) {
+ case CodeStub::RegExpExec: {
+ RegExpExecStub stub(isolate());
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+ break;
+ }
+ case CodeStub::SubString: {
+ SubStringStub stub(isolate());
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+ break;
+ }
+ case CodeStub::StringCompare: {
+ StringCompareStub stub(isolate());
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+ break;
+ }
+ default:
+ UNREACHABLE();
+ }
+ after_push_argument_ = false;
+}
+
+
+void LCodeGen::DoUnknownOSRValue(LUnknownOSRValue* instr) {
+ GenerateOsrPrologue();
+}
+
+
+void LCodeGen::DoDeferredInstanceMigration(LCheckMaps* instr, Register object) {
+ Register temp = ToRegister(instr->temp());
+ {
+ PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
+ __ Push(object);
+ __ Mov(cp, 0);
+ __ CallRuntimeSaveDoubles(Runtime::kTryMigrateInstance);
+ RecordSafepointWithRegisters(
+ instr->pointer_map(), 1, Safepoint::kNoLazyDeopt);
+ __ StoreToSafepointRegisterSlot(x0, temp);
+ }
+ DeoptimizeIfSmi(temp, instr->environment());
+}
+
+
+void LCodeGen::DoCheckMaps(LCheckMaps* instr) {
+ class DeferredCheckMaps: public LDeferredCode {
+ public:
+ DeferredCheckMaps(LCodeGen* codegen, LCheckMaps* instr, Register object)
+ : LDeferredCode(codegen), instr_(instr), object_(object) {
+ SetExit(check_maps());
+ }
+ virtual void Generate() {
+ codegen()->DoDeferredInstanceMigration(instr_, object_);
+ }
+ Label* check_maps() { return &check_maps_; }
+ virtual LInstruction* instr() { return instr_; }
+ private:
+ LCheckMaps* instr_;
+ Label check_maps_;
+ Register object_;
+ };
+
+ if (instr->hydrogen()->IsStabilityCheck()) {
+ const UniqueSet<Map>* maps = instr->hydrogen()->maps();
+ for (int i = 0; i < maps->size(); ++i) {
+ AddStabilityDependency(maps->at(i).handle());
+ }
+ return;
+ }
+
+ Register object = ToRegister(instr->value());
+ Register map_reg = ToRegister(instr->temp());
+
+ __ Ldr(map_reg, FieldMemOperand(object, HeapObject::kMapOffset));
+
+ DeferredCheckMaps* deferred = NULL;
+ if (instr->hydrogen()->HasMigrationTarget()) {
+ deferred = new(zone()) DeferredCheckMaps(this, instr, object);
+ __ Bind(deferred->check_maps());
+ }
+
+ const UniqueSet<Map>* maps = instr->hydrogen()->maps();
+ Label success;
+ for (int i = 0; i < maps->size() - 1; i++) {
+ Handle<Map> map = maps->at(i).handle();
+ __ CompareMap(map_reg, map);
+ __ B(eq, &success);
+ }
+ Handle<Map> map = maps->at(maps->size() - 1).handle();
+ __ CompareMap(map_reg, map);
+
+ // We didn't match a map.
+ if (instr->hydrogen()->HasMigrationTarget()) {
+ __ B(ne, deferred->entry());
+ } else {
+ DeoptimizeIf(ne, instr->environment());
+ }
+
+ __ Bind(&success);
+}
+
+
+void LCodeGen::DoCheckNonSmi(LCheckNonSmi* instr) {
+ if (!instr->hydrogen()->value()->type().IsHeapObject()) {
+ DeoptimizeIfSmi(ToRegister(instr->value()), instr->environment());
+ }
+}
+
+
+void LCodeGen::DoCheckSmi(LCheckSmi* instr) {
+ Register value = ToRegister(instr->value());
+ ASSERT(!instr->result() || ToRegister(instr->result()).Is(value));
+ DeoptimizeIfNotSmi(value, instr->environment());
+}
+
+
+void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) {
+ Register input = ToRegister(instr->value());
+ Register scratch = ToRegister(instr->temp());
+
+ __ Ldr(scratch, FieldMemOperand(input, HeapObject::kMapOffset));
+ __ Ldrb(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
+
+ if (instr->hydrogen()->is_interval_check()) {
+ InstanceType first, last;
+ instr->hydrogen()->GetCheckInterval(&first, &last);
+
+ __ Cmp(scratch, first);
+ if (first == last) {
+ // If there is only one type in the interval check for equality.
+ DeoptimizeIf(ne, instr->environment());
+ } else if (last == LAST_TYPE) {
+ // We don't need to compare with the higher bound of the interval.
+ DeoptimizeIf(lo, instr->environment());
+ } else {
+ // If we are below the lower bound, set the C flag and clear the Z flag
+ // to force a deopt.
+ __ Ccmp(scratch, last, CFlag, hs);
+ DeoptimizeIf(hi, instr->environment());
+ }
+ } else {
+ uint8_t mask;
+ uint8_t tag;
+ instr->hydrogen()->GetCheckMaskAndTag(&mask, &tag);
+
+ if (IsPowerOf2(mask)) {
+ ASSERT((tag == 0) || (tag == mask));
+ if (tag == 0) {
+ DeoptimizeIfBitSet(scratch, MaskToBit(mask), instr->environment());
+ } else {
+ DeoptimizeIfBitClear(scratch, MaskToBit(mask), instr->environment());
+ }
+ } else {
+ if (tag == 0) {
+ __ Tst(scratch, mask);
+ } else {
+ __ And(scratch, scratch, mask);
+ __ Cmp(scratch, tag);
+ }
+ DeoptimizeIf(ne, instr->environment());
+ }
+ }
+}
+
+
+void LCodeGen::DoClampDToUint8(LClampDToUint8* instr) {
+ DoubleRegister input = ToDoubleRegister(instr->unclamped());
+ Register result = ToRegister32(instr->result());
+ __ ClampDoubleToUint8(result, input, double_scratch());
+}
+
+
+void LCodeGen::DoClampIToUint8(LClampIToUint8* instr) {
+ Register input = ToRegister32(instr->unclamped());
+ Register result = ToRegister32(instr->result());
+ __ ClampInt32ToUint8(result, input);
+}
+
+
+void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) {
+ Register input = ToRegister(instr->unclamped());
+ Register result = ToRegister32(instr->result());
+ Register scratch = ToRegister(instr->temp1());
+ Label done;
+
+ // Both smi and heap number cases are handled.
+ Label is_not_smi;
+ __ JumpIfNotSmi(input, &is_not_smi);
+ __ SmiUntag(result.X(), input);
+ __ ClampInt32ToUint8(result);
+ __ B(&done);
+
+ __ Bind(&is_not_smi);
+
+ // Check for heap number.
+ Label is_heap_number;
+ __ Ldr(scratch, FieldMemOperand(input, HeapObject::kMapOffset));
+ __ JumpIfRoot(scratch, Heap::kHeapNumberMapRootIndex, &is_heap_number);
+
+ // Check for undefined. Undefined is coverted to zero for clamping conversion.
+ DeoptimizeIfNotRoot(input, Heap::kUndefinedValueRootIndex,
+ instr->environment());
+ __ Mov(result, 0);
+ __ B(&done);
+
+ // Heap number case.
+ __ Bind(&is_heap_number);
+ DoubleRegister dbl_scratch = double_scratch();
+ DoubleRegister dbl_scratch2 = ToDoubleRegister(instr->temp2());
+ __ Ldr(dbl_scratch, FieldMemOperand(input, HeapNumber::kValueOffset));
+ __ ClampDoubleToUint8(result, dbl_scratch, dbl_scratch2);
+
+ __ Bind(&done);
+}
+
+
+void LCodeGen::DoDoubleBits(LDoubleBits* instr) {
+ DoubleRegister value_reg = ToDoubleRegister(instr->value());
+ Register result_reg = ToRegister(instr->result());
+ if (instr->hydrogen()->bits() == HDoubleBits::HIGH) {
+ __ Fmov(result_reg, value_reg);
+ __ Lsr(result_reg, result_reg, 32);
+ } else {
+ __ Fmov(result_reg.W(), value_reg.S());
+ }
+}
+
+
+void LCodeGen::DoConstructDouble(LConstructDouble* instr) {
+ Register hi_reg = ToRegister(instr->hi());
+ Register lo_reg = ToRegister(instr->lo());
+ DoubleRegister result_reg = ToDoubleRegister(instr->result());
+
+ // Insert the least significant 32 bits of hi_reg into the most significant
+ // 32 bits of lo_reg, and move to a floating point register.
+ __ Bfi(lo_reg, hi_reg, 32, 32);
+ __ Fmov(result_reg, lo_reg);
+}
+
+
+void LCodeGen::DoClassOfTestAndBranch(LClassOfTestAndBranch* instr) {
+ Handle<String> class_name = instr->hydrogen()->class_name();
+ Label* true_label = instr->TrueLabel(chunk_);
+ Label* false_label = instr->FalseLabel(chunk_);
+ Register input = ToRegister(instr->value());
+ Register scratch1 = ToRegister(instr->temp1());
+ Register scratch2 = ToRegister(instr->temp2());
+
+ __ JumpIfSmi(input, false_label);
+
+ Register map = scratch2;
+ if (class_name->IsUtf8EqualTo(CStrVector("Function"))) {
+ // Assuming the following assertions, we can use the same compares to test
+ // for both being a function type and being in the object type range.
+ STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
+ STATIC_ASSERT(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE ==
+ FIRST_SPEC_OBJECT_TYPE + 1);
+ STATIC_ASSERT(LAST_NONCALLABLE_SPEC_OBJECT_TYPE ==
+ LAST_SPEC_OBJECT_TYPE - 1);
+ STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE);
+
+ // We expect CompareObjectType to load the object instance type in scratch1.
+ __ CompareObjectType(input, map, scratch1, FIRST_SPEC_OBJECT_TYPE);
+ __ B(lt, false_label);
+ __ B(eq, true_label);
+ __ Cmp(scratch1, LAST_SPEC_OBJECT_TYPE);
+ __ B(eq, true_label);
+ } else {
+ __ IsObjectJSObjectType(input, map, scratch1, false_label);
+ }
+
+ // Now we are in the FIRST-LAST_NONCALLABLE_SPEC_OBJECT_TYPE range.
+ // Check if the constructor in the map is a function.
+ __ Ldr(scratch1, FieldMemOperand(map, Map::kConstructorOffset));
+
+ // Objects with a non-function constructor have class 'Object'.
+ if (class_name->IsUtf8EqualTo(CStrVector("Object"))) {
+ __ JumpIfNotObjectType(
+ scratch1, scratch2, scratch2, JS_FUNCTION_TYPE, true_label);
+ } else {
+ __ JumpIfNotObjectType(
+ scratch1, scratch2, scratch2, JS_FUNCTION_TYPE, false_label);
+ }
+
+ // The constructor function is in scratch1. Get its instance class name.
+ __ Ldr(scratch1,
+ FieldMemOperand(scratch1, JSFunction::kSharedFunctionInfoOffset));
+ __ Ldr(scratch1,
+ FieldMemOperand(scratch1,
+ SharedFunctionInfo::kInstanceClassNameOffset));
+
+ // The class name we are testing against is internalized since it's a literal.
+ // The name in the constructor is internalized because of the way the context
+ // is booted. This routine isn't expected to work for random API-created
+ // classes and it doesn't have to because you can't access it with natives
+ // syntax. Since both sides are internalized it is sufficient to use an
+ // identity comparison.
+ EmitCompareAndBranch(instr, eq, scratch1, Operand(class_name));
+}
+
+
+void LCodeGen::DoCmpHoleAndBranchD(LCmpHoleAndBranchD* instr) {
+ ASSERT(instr->hydrogen()->representation().IsDouble());
+ FPRegister object = ToDoubleRegister(instr->object());
+ Register temp = ToRegister(instr->temp());
+
+ // If we don't have a NaN, we don't have the hole, so branch now to avoid the
+ // (relatively expensive) hole-NaN check.
+ __ Fcmp(object, object);
+ __ B(vc, instr->FalseLabel(chunk_));
+
+ // We have a NaN, but is it the hole?
+ __ Fmov(temp, object);
+ EmitCompareAndBranch(instr, eq, temp, kHoleNanInt64);
+}
+
+
+void LCodeGen::DoCmpHoleAndBranchT(LCmpHoleAndBranchT* instr) {
+ ASSERT(instr->hydrogen()->representation().IsTagged());
+ Register object = ToRegister(instr->object());
+
+ EmitBranchIfRoot(instr, object, Heap::kTheHoleValueRootIndex);
+}
+
+
+void LCodeGen::DoCmpMapAndBranch(LCmpMapAndBranch* instr) {
+ Register value = ToRegister(instr->value());
+ Register map = ToRegister(instr->temp());
+
+ __ Ldr(map, FieldMemOperand(value, HeapObject::kMapOffset));
+ EmitCompareAndBranch(instr, eq, map, Operand(instr->map()));
+}
+
+
+void LCodeGen::DoCompareMinusZeroAndBranch(LCompareMinusZeroAndBranch* instr) {
+ Representation rep = instr->hydrogen()->value()->representation();
+ ASSERT(!rep.IsInteger32());
+ Register scratch = ToRegister(instr->temp());
+
+ if (rep.IsDouble()) {
+ __ JumpIfMinusZero(ToDoubleRegister(instr->value()),
+ instr->TrueLabel(chunk()));
+ } else {
+ Register value = ToRegister(instr->value());
+ __ CheckMap(value, scratch, Heap::kHeapNumberMapRootIndex,
+ instr->FalseLabel(chunk()), DO_SMI_CHECK);
+ __ Ldr(scratch, FieldMemOperand(value, HeapNumber::kValueOffset));
+ __ JumpIfMinusZero(scratch, instr->TrueLabel(chunk()));
+ }
+ EmitGoto(instr->FalseDestination(chunk()));
+}
+
+
+void LCodeGen::DoCompareNumericAndBranch(LCompareNumericAndBranch* instr) {
+ LOperand* left = instr->left();
+ LOperand* right = instr->right();
+ bool is_unsigned =
+ instr->hydrogen()->left()->CheckFlag(HInstruction::kUint32) ||
+ instr->hydrogen()->right()->CheckFlag(HInstruction::kUint32);
+ Condition cond = TokenToCondition(instr->op(), is_unsigned);
+
+ if (left->IsConstantOperand() && right->IsConstantOperand()) {
+ // We can statically evaluate the comparison.
+ double left_val = ToDouble(LConstantOperand::cast(left));
+ double right_val = ToDouble(LConstantOperand::cast(right));
+ int next_block = EvalComparison(instr->op(), left_val, right_val) ?
+ instr->TrueDestination(chunk_) : instr->FalseDestination(chunk_);
+ EmitGoto(next_block);
+ } else {
+ if (instr->is_double()) {
+ if (right->IsConstantOperand()) {
+ __ Fcmp(ToDoubleRegister(left),
+ ToDouble(LConstantOperand::cast(right)));
+ } else if (left->IsConstantOperand()) {
+ // Commute the operands and the condition.
+ __ Fcmp(ToDoubleRegister(right),
+ ToDouble(LConstantOperand::cast(left)));
+ cond = CommuteCondition(cond);
+ } else {
+ __ Fcmp(ToDoubleRegister(left), ToDoubleRegister(right));
+ }
+
+ // If a NaN is involved, i.e. the result is unordered (V set),
+ // jump to false block label.
+ __ B(vs, instr->FalseLabel(chunk_));
+ EmitBranch(instr, cond);
+ } else {
+ if (instr->hydrogen_value()->representation().IsInteger32()) {
+ if (right->IsConstantOperand()) {
+ EmitCompareAndBranch(instr,
+ cond,
+ ToRegister32(left),
+ ToOperand32I(right));
+ } else {
+ // Commute the operands and the condition.
+ EmitCompareAndBranch(instr,
+ CommuteCondition(cond),
+ ToRegister32(right),
+ ToOperand32I(left));
+ }
+ } else {
+ ASSERT(instr->hydrogen_value()->representation().IsSmi());
+ if (right->IsConstantOperand()) {
+ int32_t value = ToInteger32(LConstantOperand::cast(right));
+ EmitCompareAndBranch(instr,
+ cond,
+ ToRegister(left),
+ Operand(Smi::FromInt(value)));
+ } else if (left->IsConstantOperand()) {
+ // Commute the operands and the condition.
+ int32_t value = ToInteger32(LConstantOperand::cast(left));
+ EmitCompareAndBranch(instr,
+ CommuteCondition(cond),
+ ToRegister(right),
+ Operand(Smi::FromInt(value)));
+ } else {
+ EmitCompareAndBranch(instr,
+ cond,
+ ToRegister(left),
+ ToRegister(right));
+ }
+ }
+ }
+ }
+}
+
+
+void LCodeGen::DoCmpObjectEqAndBranch(LCmpObjectEqAndBranch* instr) {
+ Register left = ToRegister(instr->left());
+ Register right = ToRegister(instr->right());
+ EmitCompareAndBranch(instr, eq, left, right);
+}
+
+
+void LCodeGen::DoCmpT(LCmpT* instr) {
+ ASSERT(ToRegister(instr->context()).is(cp));
+ Token::Value op = instr->op();
+ Condition cond = TokenToCondition(op, false);
+
+ ASSERT(ToRegister(instr->left()).Is(x1));
+ ASSERT(ToRegister(instr->right()).Is(x0));
+ Handle<Code> ic = CompareIC::GetUninitialized(isolate(), op);
+ CallCode(ic, RelocInfo::CODE_TARGET, instr);
+ // Signal that we don't inline smi code before this stub.
+ InlineSmiCheckInfo::EmitNotInlined(masm());
+
+ // Return true or false depending on CompareIC result.
+ // This instruction is marked as call. We can clobber any register.
+ ASSERT(instr->IsMarkedAsCall());
+ __ LoadTrueFalseRoots(x1, x2);
+ __ Cmp(x0, 0);
+ __ Csel(ToRegister(instr->result()), x1, x2, cond);
+}
+
+
+void LCodeGen::DoConstantD(LConstantD* instr) {
+ ASSERT(instr->result()->IsDoubleRegister());
+ DoubleRegister result = ToDoubleRegister(instr->result());
+ if (instr->value() == 0) {
+ if (copysign(1.0, instr->value()) == 1.0) {
+ __ Fmov(result, fp_zero);
+ } else {
+ __ Fneg(result, fp_zero);
+ }
+ } else {
+ __ Fmov(result, instr->value());
+ }
+}
+
+
+void LCodeGen::DoConstantE(LConstantE* instr) {
+ __ Mov(ToRegister(instr->result()), Operand(instr->value()));
+}
+
+
+void LCodeGen::DoConstantI(LConstantI* instr) {
+ ASSERT(is_int32(instr->value()));
+ // Cast the value here to ensure that the value isn't sign extended by the
+ // implicit Operand constructor.
+ __ Mov(ToRegister32(instr->result()), static_cast<uint32_t>(instr->value()));
+}
+
+
+void LCodeGen::DoConstantS(LConstantS* instr) {
+ __ Mov(ToRegister(instr->result()), Operand(instr->value()));
+}
+
+
+void LCodeGen::DoConstantT(LConstantT* instr) {
+ Handle<Object> object = instr->value(isolate());
+ AllowDeferredHandleDereference smi_check;
+ __ LoadObject(ToRegister(instr->result()), object);
+}
+
+
+void LCodeGen::DoContext(LContext* instr) {
+ // If there is a non-return use, the context must be moved to a register.
+ Register result = ToRegister(instr->result());
+ if (info()->IsOptimizing()) {
+ __ Ldr(result, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ } else {
+ // If there is no frame, the context must be in cp.
+ ASSERT(result.is(cp));
+ }
+}
+
+
+void LCodeGen::DoCheckValue(LCheckValue* instr) {
+ Register reg = ToRegister(instr->value());
+ Handle<HeapObject> object = instr->hydrogen()->object().handle();
+ AllowDeferredHandleDereference smi_check;
+ if (isolate()->heap()->InNewSpace(*object)) {
+ UseScratchRegisterScope temps(masm());
+ Register temp = temps.AcquireX();
+ Handle<Cell> cell = isolate()->factory()->NewCell(object);
+ __ Mov(temp, Operand(Handle<Object>(cell)));
+ __ Ldr(temp, FieldMemOperand(temp, Cell::kValueOffset));
+ __ Cmp(reg, temp);
+ } else {
+ __ Cmp(reg, Operand(object));
+ }
+ DeoptimizeIf(ne, instr->environment());
+}
+
+
+void LCodeGen::DoLazyBailout(LLazyBailout* instr) {
+ last_lazy_deopt_pc_ = masm()->pc_offset();
+ ASSERT(instr->HasEnvironment());
+ LEnvironment* env = instr->environment();
+ RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
+ safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
+}
+
+
+void LCodeGen::DoDateField(LDateField* instr) {
+ Register object = ToRegister(instr->date());
+ Register result = ToRegister(instr->result());
+ Register temp1 = x10;
+ Register temp2 = x11;
+ Smi* index = instr->index();
+ Label runtime, done;
+
+ ASSERT(object.is(result) && object.Is(x0));
+ ASSERT(instr->IsMarkedAsCall());
+
+ DeoptimizeIfSmi(object, instr->environment());
+ __ CompareObjectType(object, temp1, temp1, JS_DATE_TYPE);
+ DeoptimizeIf(ne, instr->environment());
+
+ if (index->value() == 0) {
+ __ Ldr(result, FieldMemOperand(object, JSDate::kValueOffset));
+ } else {
+ if (index->value() < JSDate::kFirstUncachedField) {
+ ExternalReference stamp = ExternalReference::date_cache_stamp(isolate());
+ __ Mov(temp1, Operand(stamp));
+ __ Ldr(temp1, MemOperand(temp1));
+ __ Ldr(temp2, FieldMemOperand(object, JSDate::kCacheStampOffset));
+ __ Cmp(temp1, temp2);
+ __ B(ne, &runtime);
+ __ Ldr(result, FieldMemOperand(object, JSDate::kValueOffset +
+ kPointerSize * index->value()));
+ __ B(&done);
+ }
+
+ __ Bind(&runtime);
+ __ Mov(x1, Operand(index));
+ __ CallCFunction(ExternalReference::get_date_field_function(isolate()), 2);
+ }
+
+ __ Bind(&done);
+}
+
+
+void LCodeGen::DoDeoptimize(LDeoptimize* instr) {
+ Deoptimizer::BailoutType type = instr->hydrogen()->type();
+ // TODO(danno): Stubs expect all deopts to be lazy for historical reasons (the
+ // needed return address), even though the implementation of LAZY and EAGER is
+ // now identical. When LAZY is eventually completely folded into EAGER, remove
+ // the special case below.
+ if (info()->IsStub() && (type == Deoptimizer::EAGER)) {
+ type = Deoptimizer::LAZY;
+ }
+
+ Comment(";;; deoptimize: %s", instr->hydrogen()->reason());
+ Deoptimize(instr->environment(), &type);
+}
+
+
+void LCodeGen::DoDivByPowerOf2I(LDivByPowerOf2I* instr) {
+ Register dividend = ToRegister32(instr->dividend());
+ int32_t divisor = instr->divisor();
+ Register result = ToRegister32(instr->result());
+ ASSERT(divisor == kMinInt || IsPowerOf2(Abs(divisor)));
+ ASSERT(!result.is(dividend));
+
+ // Check for (0 / -x) that will produce negative zero.
+ HDiv* hdiv = instr->hydrogen();
+ if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
+ DeoptimizeIfZero(dividend, instr->environment());
+ }
+ // Check for (kMinInt / -1).
+ if (hdiv->CheckFlag(HValue::kCanOverflow) && divisor == -1) {
+ // Test dividend for kMinInt by subtracting one (cmp) and checking for
+ // overflow.
+ __ Cmp(dividend, 1);
+ DeoptimizeIf(vs, instr->environment());
+ }
+ // Deoptimize if remainder will not be 0.
+ if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32) &&
+ divisor != 1 && divisor != -1) {
+ int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1);
+ __ Tst(dividend, mask);
+ DeoptimizeIf(ne, instr->environment());
+ }
+
+ if (divisor == -1) { // Nice shortcut, not needed for correctness.
+ __ Neg(result, dividend);
+ return;
+ }
+ int32_t shift = WhichPowerOf2Abs(divisor);
+ if (shift == 0) {
+ __ Mov(result, dividend);
+ } else if (shift == 1) {
+ __ Add(result, dividend, Operand(dividend, LSR, 31));
+ } else {
+ __ Mov(result, Operand(dividend, ASR, 31));
+ __ Add(result, dividend, Operand(result, LSR, 32 - shift));
+ }
+ if (shift > 0) __ Mov(result, Operand(result, ASR, shift));
+ if (divisor < 0) __ Neg(result, result);
+}
+
+
+void LCodeGen::DoDivByConstI(LDivByConstI* instr) {
+ Register dividend = ToRegister32(instr->dividend());
+ int32_t divisor = instr->divisor();
+ Register result = ToRegister32(instr->result());
+ ASSERT(!AreAliased(dividend, result));
+
+ if (divisor == 0) {
+ Deoptimize(instr->environment());
+ return;
+ }
+
+ // Check for (0 / -x) that will produce negative zero.
+ HDiv* hdiv = instr->hydrogen();
+ if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
+ DeoptimizeIfZero(dividend, instr->environment());
+ }
+
+ __ TruncatingDiv(result, dividend, Abs(divisor));
+ if (divisor < 0) __ Neg(result, result);
+
+ if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32)) {
+ Register temp = ToRegister32(instr->temp());
+ ASSERT(!AreAliased(dividend, result, temp));
+ __ Sxtw(dividend.X(), dividend);
+ __ Mov(temp, divisor);
+ __ Smsubl(temp.X(), result, temp, dividend.X());
+ DeoptimizeIfNotZero(temp, instr->environment());
+ }
+}
+
+
+// TODO(svenpanne) Refactor this to avoid code duplication with DoFlooringDivI.
+void LCodeGen::DoDivI(LDivI* instr) {
+ HBinaryOperation* hdiv = instr->hydrogen();
+ Register dividend = ToRegister32(instr->dividend());
+ Register divisor = ToRegister32(instr->divisor());
+ Register result = ToRegister32(instr->result());
+
+ // Issue the division first, and then check for any deopt cases whilst the
+ // result is computed.
+ __ Sdiv(result, dividend, divisor);
+
+ if (hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) {
+ ASSERT_EQ(NULL, instr->temp());
+ return;
+ }
+
+ // Check for x / 0.
+ if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) {
+ DeoptimizeIfZero(divisor, instr->environment());
+ }
+
+ // Check for (0 / -x) as that will produce negative zero.
+ if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ __ Cmp(divisor, 0);
+
+ // If the divisor < 0 (mi), compare the dividend, and deopt if it is
+ // zero, ie. zero dividend with negative divisor deopts.
+ // If the divisor >= 0 (pl, the opposite of mi) set the flags to
+ // condition ne, so we don't deopt, ie. positive divisor doesn't deopt.
+ __ Ccmp(dividend, 0, NoFlag, mi);
+ DeoptimizeIf(eq, instr->environment());
+ }
+
+ // Check for (kMinInt / -1).
+ if (hdiv->CheckFlag(HValue::kCanOverflow)) {
+ // Test dividend for kMinInt by subtracting one (cmp) and checking for
+ // overflow.
+ __ Cmp(dividend, 1);
+ // If overflow is set, ie. dividend = kMinInt, compare the divisor with
+ // -1. If overflow is clear, set the flags for condition ne, as the
+ // dividend isn't -1, and thus we shouldn't deopt.
+ __ Ccmp(divisor, -1, NoFlag, vs);
+ DeoptimizeIf(eq, instr->environment());
+ }
+
+ // Compute remainder and deopt if it's not zero.
+ Register remainder = ToRegister32(instr->temp());
+ __ Msub(remainder, result, divisor, dividend);
+ DeoptimizeIfNotZero(remainder, instr->environment());
+}
+
+
+void LCodeGen::DoDoubleToIntOrSmi(LDoubleToIntOrSmi* instr) {
+ DoubleRegister input = ToDoubleRegister(instr->value());
+ Register result = ToRegister32(instr->result());
+
+ if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ DeoptimizeIfMinusZero(input, instr->environment());
+ }
+
+ __ TryRepresentDoubleAsInt32(result, input, double_scratch());
+ DeoptimizeIf(ne, instr->environment());
+
+ if (instr->tag_result()) {
+ __ SmiTag(result.X());
+ }
+}
+
+
+void LCodeGen::DoDrop(LDrop* instr) {
+ __ Drop(instr->count());
+}
+
+
+void LCodeGen::DoDummy(LDummy* instr) {
+ // Nothing to see here, move on!
+}
+
+
+void LCodeGen::DoDummyUse(LDummyUse* instr) {
+ // Nothing to see here, move on!
+}
+
+
+void LCodeGen::DoFunctionLiteral(LFunctionLiteral* instr) {
+ ASSERT(ToRegister(instr->context()).is(cp));
+ // FunctionLiteral instruction is marked as call, we can trash any register.
+ ASSERT(instr->IsMarkedAsCall());
+
+ // Use the fast case closure allocation code that allocates in new
+ // space for nested functions that don't need literals cloning.
+ bool pretenure = instr->hydrogen()->pretenure();
+ if (!pretenure && instr->hydrogen()->has_no_literals()) {
+ FastNewClosureStub stub(isolate(),
+ instr->hydrogen()->strict_mode(),
+ instr->hydrogen()->is_generator());
+ __ Mov(x2, Operand(instr->hydrogen()->shared_info()));
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+ } else {
+ __ Mov(x2, Operand(instr->hydrogen()->shared_info()));
+ __ Mov(x1, Operand(pretenure ? factory()->true_value()
+ : factory()->false_value()));
+ __ Push(cp, x2, x1);
+ CallRuntime(Runtime::kHiddenNewClosure, 3, instr);
+ }
+}
+
+
+void LCodeGen::DoForInCacheArray(LForInCacheArray* instr) {
+ Register map = ToRegister(instr->map());
+ Register result = ToRegister(instr->result());
+ Label load_cache, done;
+
+ __ EnumLengthUntagged(result, map);
+ __ Cbnz(result, &load_cache);
+
+ __ Mov(result, Operand(isolate()->factory()->empty_fixed_array()));
+ __ B(&done);
+
+ __ Bind(&load_cache);
+ __ LoadInstanceDescriptors(map, result);
+ __ Ldr(result, FieldMemOperand(result, DescriptorArray::kEnumCacheOffset));
+ __ Ldr(result, FieldMemOperand(result, FixedArray::SizeFor(instr->idx())));
+ DeoptimizeIfZero(result, instr->environment());
+
+ __ Bind(&done);
+}
+
+
+void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) {
+ Register object = ToRegister(instr->object());
+ Register null_value = x5;
+
+ ASSERT(instr->IsMarkedAsCall());
+ ASSERT(object.Is(x0));
+
+ DeoptimizeIfRoot(object, Heap::kUndefinedValueRootIndex,
+ instr->environment());
+
+ __ LoadRoot(null_value, Heap::kNullValueRootIndex);
+ __ Cmp(object, null_value);
+ DeoptimizeIf(eq, instr->environment());
+
+ DeoptimizeIfSmi(object, instr->environment());
+
+ STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE);
+ __ CompareObjectType(object, x1, x1, LAST_JS_PROXY_TYPE);
+ DeoptimizeIf(le, instr->environment());
+
+ Label use_cache, call_runtime;
+ __ CheckEnumCache(object, null_value, x1, x2, x3, x4, &call_runtime);
+
+ __ Ldr(object, FieldMemOperand(object, HeapObject::kMapOffset));
+ __ B(&use_cache);
+
+ // Get the set of properties to enumerate.
+ __ Bind(&call_runtime);
+ __ Push(object);
+ CallRuntime(Runtime::kGetPropertyNamesFast, 1, instr);
+
+ __ Ldr(x1, FieldMemOperand(object, HeapObject::kMapOffset));
+ DeoptimizeIfNotRoot(x1, Heap::kMetaMapRootIndex, instr->environment());
+
+ __ Bind(&use_cache);
+}
+
+
+void LCodeGen::DoGetCachedArrayIndex(LGetCachedArrayIndex* instr) {
+ Register input = ToRegister(instr->value());
+ Register result = ToRegister(instr->result());
+
+ __ AssertString(input);
+
+ // Assert that we can use a W register load to get the hash.
+ ASSERT((String::kHashShift + String::kArrayIndexValueBits) < kWRegSizeInBits);
+ __ Ldr(result.W(), FieldMemOperand(input, String::kHashFieldOffset));
+ __ IndexFromHash(result, result);
+}
+
+
+void LCodeGen::EmitGoto(int block) {
+ // Do not emit jump if we are emitting a goto to the next block.
+ if (!IsNextEmittedBlock(block)) {
+ __ B(chunk_->GetAssemblyLabel(LookupDestination(block)));
+ }
+}
+
+
+void LCodeGen::DoGoto(LGoto* instr) {
+ EmitGoto(instr->block_id());
+}
+
+
+void LCodeGen::DoHasCachedArrayIndexAndBranch(
+ LHasCachedArrayIndexAndBranch* instr) {
+ Register input = ToRegister(instr->value());
+ Register temp = ToRegister32(instr->temp());
+
+ // Assert that the cache status bits fit in a W register.
+ ASSERT(is_uint32(String::kContainsCachedArrayIndexMask));
+ __ Ldr(temp, FieldMemOperand(input, String::kHashFieldOffset));
+ __ Tst(temp, String::kContainsCachedArrayIndexMask);
+ EmitBranch(instr, eq);
+}
+
+
+// HHasInstanceTypeAndBranch instruction is built with an interval of type
+// to test but is only used in very restricted ways. The only possible kinds
+// of intervals are:
+// - [ FIRST_TYPE, instr->to() ]
+// - [ instr->form(), LAST_TYPE ]
+// - instr->from() == instr->to()
+//
+// These kinds of intervals can be check with only one compare instruction
+// providing the correct value and test condition are used.
+//
+// TestType() will return the value to use in the compare instruction and
+// BranchCondition() will return the condition to use depending on the kind
+// of interval actually specified in the instruction.
+static InstanceType TestType(HHasInstanceTypeAndBranch* instr) {
+ InstanceType from = instr->from();
+ InstanceType to = instr->to();
+ if (from == FIRST_TYPE) return to;
+ ASSERT((from == to) || (to == LAST_TYPE));
+ return from;
+}
+
+
+// See comment above TestType function for what this function does.
+static Condition BranchCondition(HHasInstanceTypeAndBranch* instr) {
+ InstanceType from = instr->from();
+ InstanceType to = instr->to();
+ if (from == to) return eq;
+ if (to == LAST_TYPE) return hs;
+ if (from == FIRST_TYPE) return ls;
+ UNREACHABLE();
+ return eq;
+}
+
+
+void LCodeGen::DoHasInstanceTypeAndBranch(LHasInstanceTypeAndBranch* instr) {
+ Register input = ToRegister(instr->value());
+ Register scratch = ToRegister(instr->temp());
+
+ if (!instr->hydrogen()->value()->type().IsHeapObject()) {
+ __ JumpIfSmi(input, instr->FalseLabel(chunk_));
+ }
+ __ CompareObjectType(input, scratch, scratch, TestType(instr->hydrogen()));
+ EmitBranch(instr, BranchCondition(instr->hydrogen()));
+}
+
+
+void LCodeGen::DoInnerAllocatedObject(LInnerAllocatedObject* instr) {
+ Register result = ToRegister(instr->result());
+ Register base = ToRegister(instr->base_object());
+ if (instr->offset()->IsConstantOperand()) {
+ __ Add(result, base, ToOperand32I(instr->offset()));
+ } else {
+ __ Add(result, base, Operand(ToRegister32(instr->offset()), SXTW));
+ }
+}
+
+
+void LCodeGen::DoInstanceOf(LInstanceOf* instr) {
+ ASSERT(ToRegister(instr->context()).is(cp));
+ // Assert that the arguments are in the registers expected by InstanceofStub.
+ ASSERT(ToRegister(instr->left()).Is(InstanceofStub::left()));
+ ASSERT(ToRegister(instr->right()).Is(InstanceofStub::right()));
+
+ InstanceofStub stub(isolate(), InstanceofStub::kArgsInRegisters);
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+
+ // InstanceofStub returns a result in x0:
+ // 0 => not an instance
+ // smi 1 => instance.
+ __ Cmp(x0, 0);
+ __ LoadTrueFalseRoots(x0, x1);
+ __ Csel(x0, x0, x1, eq);
+}
+
+
+void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) {
+ class DeferredInstanceOfKnownGlobal: public LDeferredCode {
+ public:
+ DeferredInstanceOfKnownGlobal(LCodeGen* codegen,
+ LInstanceOfKnownGlobal* instr)
+ : LDeferredCode(codegen), instr_(instr) { }
+ virtual void Generate() {
+ codegen()->DoDeferredInstanceOfKnownGlobal(instr_);
+ }
+ virtual LInstruction* instr() { return instr_; }
+ private:
+ LInstanceOfKnownGlobal* instr_;
+ };
+
+ DeferredInstanceOfKnownGlobal* deferred =
+ new(zone()) DeferredInstanceOfKnownGlobal(this, instr);
+
+ Label map_check, return_false, cache_miss, done;
+ Register object = ToRegister(instr->value());
+ Register result = ToRegister(instr->result());
+ // x4 is expected in the associated deferred code and stub.
+ Register map_check_site = x4;
+ Register map = x5;
+
+ // This instruction is marked as call. We can clobber any register.
+ ASSERT(instr->IsMarkedAsCall());
+
+ // We must take into account that object is in x11.
+ ASSERT(object.Is(x11));
+ Register scratch = x10;
+
+ // A Smi is not instance of anything.
+ __ JumpIfSmi(object, &return_false);
+
+ // This is the inlined call site instanceof cache. The two occurences of the
+ // hole value will be patched to the last map/result pair generated by the
+ // instanceof stub.
+ __ Ldr(map, FieldMemOperand(object, HeapObject::kMapOffset));
+ {
+ // Below we use Factory::the_hole_value() on purpose instead of loading from
+ // the root array to force relocation and later be able to patch with a
+ // custom value.
+ InstructionAccurateScope scope(masm(), 5);
+ __ bind(&map_check);
+ // Will be patched with the cached map.
+ Handle<Cell> cell = factory()->NewCell(factory()->the_hole_value());
+ __ ldr(scratch, Immediate(Handle<Object>(cell)));
+ __ ldr(scratch, FieldMemOperand(scratch, PropertyCell::kValueOffset));
+ __ cmp(map, scratch);
+ __ b(&cache_miss, ne);
+ // The address of this instruction is computed relative to the map check
+ // above, so check the size of the code generated.
+ ASSERT(masm()->InstructionsGeneratedSince(&map_check) == 4);
+ // Will be patched with the cached result.
+ __ ldr(result, Immediate(factory()->the_hole_value()));
+ }
+ __ B(&done);
+
+ // The inlined call site cache did not match.
+ // Check null and string before calling the deferred code.
+ __ Bind(&cache_miss);
+ // Compute the address of the map check. It must not be clobbered until the
+ // InstanceOfStub has used it.
+ __ Adr(map_check_site, &map_check);
+ // Null is not instance of anything.
+ __ JumpIfRoot(object, Heap::kNullValueRootIndex, &return_false);
+
+ // String values are not instances of anything.
+ // Return false if the object is a string. Otherwise, jump to the deferred
+ // code.
+ // Note that we can't jump directly to deferred code from
+ // IsObjectJSStringType, because it uses tbz for the jump and the deferred
+ // code can be out of range.
+ __ IsObjectJSStringType(object, scratch, NULL, &return_false);
+ __ B(deferred->entry());
+
+ __ Bind(&return_false);
+ __ LoadRoot(result, Heap::kFalseValueRootIndex);
+
+ // Here result is either true or false.
+ __ Bind(deferred->exit());
+ __ Bind(&done);
+}
+
+
+void LCodeGen::DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) {
+ Register result = ToRegister(instr->result());
+ ASSERT(result.Is(x0)); // InstanceofStub returns its result in x0.
+ InstanceofStub::Flags flags = InstanceofStub::kNoFlags;
+ flags = static_cast<InstanceofStub::Flags>(
+ flags | InstanceofStub::kArgsInRegisters);
+ flags = static_cast<InstanceofStub::Flags>(
+ flags | InstanceofStub::kReturnTrueFalseObject);
+ flags = static_cast<InstanceofStub::Flags>(
+ flags | InstanceofStub::kCallSiteInlineCheck);
+
+ PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
+ LoadContextFromDeferred(instr->context());
+
+ // Prepare InstanceofStub arguments.
+ ASSERT(ToRegister(instr->value()).Is(InstanceofStub::left()));
+ __ LoadObject(InstanceofStub::right(), instr->function());
+
+ InstanceofStub stub(isolate(), flags);
+ CallCodeGeneric(stub.GetCode(),
+ RelocInfo::CODE_TARGET,
+ instr,
+ RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
+ LEnvironment* env = instr->GetDeferredLazyDeoptimizationEnvironment();
+ safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
+
+ // Put the result value into the result register slot.
+ __ StoreToSafepointRegisterSlot(result, result);
+}
+
+
+void LCodeGen::DoInstructionGap(LInstructionGap* instr) {
+ DoGap(instr);
+}
+
+
+void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) {
+ Register value = ToRegister32(instr->value());
+ DoubleRegister result = ToDoubleRegister(instr->result());
+ __ Scvtf(result, value);
+}
+
+
+void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) {
+ ASSERT(ToRegister(instr->context()).is(cp));
+ // The function is required to be in x1.
+ ASSERT(ToRegister(instr->function()).is(x1));
+ ASSERT(instr->HasPointerMap());
+
+ Handle<JSFunction> known_function = instr->hydrogen()->known_function();
+ if (known_function.is_null()) {
+ LPointerMap* pointers = instr->pointer_map();
+ SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
+ ParameterCount count(instr->arity());
+ __ InvokeFunction(x1, count, CALL_FUNCTION, generator);
+ } else {
+ CallKnownFunction(known_function,
+ instr->hydrogen()->formal_parameter_count(),
+ instr->arity(),
+ instr,
+ x1);
+ }
+ after_push_argument_ = false;
+}
+
+
+void LCodeGen::DoIsConstructCallAndBranch(LIsConstructCallAndBranch* instr) {
+ Register temp1 = ToRegister(instr->temp1());
+ Register temp2 = ToRegister(instr->temp2());
+
+ // Get the frame pointer for the calling frame.
+ __ Ldr(temp1, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+
+ // Skip the arguments adaptor frame if it exists.
+ Label check_frame_marker;
+ __ Ldr(temp2, MemOperand(temp1, StandardFrameConstants::kContextOffset));
+ __ Cmp(temp2, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
+ __ B(ne, &check_frame_marker);
+ __ Ldr(temp1, MemOperand(temp1, StandardFrameConstants::kCallerFPOffset));
+
+ // Check the marker in the calling frame.
+ __ Bind(&check_frame_marker);
+ __ Ldr(temp1, MemOperand(temp1, StandardFrameConstants::kMarkerOffset));
+
+ EmitCompareAndBranch(
+ instr, eq, temp1, Operand(Smi::FromInt(StackFrame::CONSTRUCT)));
+}
+
+
+void LCodeGen::DoIsObjectAndBranch(LIsObjectAndBranch* instr) {
+ Label* is_object = instr->TrueLabel(chunk_);
+ Label* is_not_object = instr->FalseLabel(chunk_);
+ Register value = ToRegister(instr->value());
+ Register map = ToRegister(instr->temp1());
+ Register scratch = ToRegister(instr->temp2());
+
+ __ JumpIfSmi(value, is_not_object);
+ __ JumpIfRoot(value, Heap::kNullValueRootIndex, is_object);
+
+ __ Ldr(map, FieldMemOperand(value, HeapObject::kMapOffset));
+
+ // Check for undetectable objects.
+ __ Ldrb(scratch, FieldMemOperand(map, Map::kBitFieldOffset));
+ __ TestAndBranchIfAnySet(scratch, 1 << Map::kIsUndetectable, is_not_object);
+
+ // Check that instance type is in object type range.
+ __ IsInstanceJSObjectType(map, scratch, NULL);
+ // Flags have been updated by IsInstanceJSObjectType. We can now test the
+ // flags for "le" condition to check if the object's type is a valid
+ // JS object type.
+ EmitBranch(instr, le);
+}
+
+
+Condition LCodeGen::EmitIsString(Register input,
+ Register temp1,
+ Label* is_not_string,
+ SmiCheck check_needed = INLINE_SMI_CHECK) {
+ if (check_needed == INLINE_SMI_CHECK) {
+ __ JumpIfSmi(input, is_not_string);
+ }
+ __ CompareObjectType(input, temp1, temp1, FIRST_NONSTRING_TYPE);
+
+ return lt;
+}
+
+
+void LCodeGen::DoIsStringAndBranch(LIsStringAndBranch* instr) {
+ Register val = ToRegister(instr->value());
+ Register scratch = ToRegister(instr->temp());
+
+ SmiCheck check_needed =
+ instr->hydrogen()->value()->type().IsHeapObject()
+ ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
+ Condition true_cond =
+ EmitIsString(val, scratch, instr->FalseLabel(chunk_), check_needed);
+
+ EmitBranch(instr, true_cond);
+}
+
+
+void LCodeGen::DoIsSmiAndBranch(LIsSmiAndBranch* instr) {
+ Register value = ToRegister(instr->value());
+ STATIC_ASSERT(kSmiTag == 0);
+ EmitTestAndBranch(instr, eq, value, kSmiTagMask);
+}
+
+
+void LCodeGen::DoIsUndetectableAndBranch(LIsUndetectableAndBranch* instr) {
+ Register input = ToRegister(instr->value());
+ Register temp = ToRegister(instr->temp());
+
+ if (!instr->hydrogen()->value()->type().IsHeapObject()) {
+ __ JumpIfSmi(input, instr->FalseLabel(chunk_));
+ }
+ __ Ldr(temp, FieldMemOperand(input, HeapObject::kMapOffset));
+ __ Ldrb(temp, FieldMemOperand(temp, Map::kBitFieldOffset));
+
+ EmitTestAndBranch(instr, ne, temp, 1 << Map::kIsUndetectable);
+}
+
+
+static const char* LabelType(LLabel* label) {
+ if (label->is_loop_header()) return " (loop header)";
+ if (label->is_osr_entry()) return " (OSR entry)";
+ return "";
+}
+
+
+void LCodeGen::DoLabel(LLabel* label) {
+ Comment(";;; <@%d,#%d> -------------------- B%d%s --------------------",
+ current_instruction_,
+ label->hydrogen_value()->id(),
+ label->block_id(),
+ LabelType(label));
+
+ __ Bind(label->label());
+ current_block_ = label->block_id();
+ DoGap(label);
+}
+
+
+void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) {
+ Register context = ToRegister(instr->context());
+ Register result = ToRegister(instr->result());
+ __ Ldr(result, ContextMemOperand(context, instr->slot_index()));
+ if (instr->hydrogen()->RequiresHoleCheck()) {
+ if (instr->hydrogen()->DeoptimizesOnHole()) {
+ DeoptimizeIfRoot(result, Heap::kTheHoleValueRootIndex,
+ instr->environment());
+ } else {
+ Label not_the_hole;
+ __ JumpIfNotRoot(result, Heap::kTheHoleValueRootIndex, &not_the_hole);
+ __ LoadRoot(result, Heap::kUndefinedValueRootIndex);
+ __ Bind(&not_the_hole);
+ }
+ }
+}
+
+
+void LCodeGen::DoLoadFunctionPrototype(LLoadFunctionPrototype* instr) {
+ Register function = ToRegister(instr->function());
+ Register result = ToRegister(instr->result());
+ Register temp = ToRegister(instr->temp());
+
+ // Check that the function really is a function. Leaves map in the result
+ // register.
+ __ CompareObjectType(function, result, temp, JS_FUNCTION_TYPE);
+ DeoptimizeIf(ne, instr->environment());
+
+ // Make sure that the function has an instance prototype.
+ Label non_instance;
+ __ Ldrb(temp, FieldMemOperand(result, Map::kBitFieldOffset));
+ __ Tbnz(temp, Map::kHasNonInstancePrototype, &non_instance);
+
+ // Get the prototype or initial map from the function.
+ __ Ldr(result, FieldMemOperand(function,
+ JSFunction::kPrototypeOrInitialMapOffset));
+
+ // Check that the function has a prototype or an initial map.
+ DeoptimizeIfRoot(result, Heap::kTheHoleValueRootIndex,
+ instr->environment());
+
+ // If the function does not have an initial map, we're done.
+ Label done;
+ __ CompareObjectType(result, temp, temp, MAP_TYPE);
+ __ B(ne, &done);
+
+ // Get the prototype from the initial map.
+ __ Ldr(result, FieldMemOperand(result, Map::kPrototypeOffset));
+ __ B(&done);
+
+ // Non-instance prototype: fetch prototype from constructor field in initial
+ // map.
+ __ Bind(&non_instance);
+ __ Ldr(result, FieldMemOperand(result, Map::kConstructorOffset));
+
+ // All done.
+ __ Bind(&done);
+}
+
+
+void LCodeGen::DoLoadGlobalCell(LLoadGlobalCell* instr) {
+ Register result = ToRegister(instr->result());
+ __ Mov(result, Operand(Handle<Object>(instr->hydrogen()->cell().handle())));
+ __ Ldr(result, FieldMemOperand(result, Cell::kValueOffset));
+ if (instr->hydrogen()->RequiresHoleCheck()) {
+ DeoptimizeIfRoot(
+ result, Heap::kTheHoleValueRootIndex, instr->environment());
+ }
+}
+
+
+void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) {
+ ASSERT(ToRegister(instr->context()).is(cp));
+ ASSERT(ToRegister(instr->global_object()).Is(x0));
+ ASSERT(ToRegister(instr->result()).Is(x0));
+ __ Mov(x2, Operand(instr->name()));
+ ContextualMode mode = instr->for_typeof() ? NOT_CONTEXTUAL : CONTEXTUAL;
+ Handle<Code> ic = LoadIC::initialize_stub(isolate(), mode);
+ CallCode(ic, RelocInfo::CODE_TARGET, instr);
+}
+
+
+MemOperand LCodeGen::PrepareKeyedExternalArrayOperand(
+ Register key,
+ Register base,
+ Register scratch,
+ bool key_is_smi,
+ bool key_is_constant,
+ int constant_key,
+ ElementsKind elements_kind,
+ int base_offset) {
+ int element_size_shift = ElementsKindToShiftSize(elements_kind);
+
+ if (key_is_constant) {
+ int key_offset = constant_key << element_size_shift;
+ return MemOperand(base, key_offset + base_offset);
+ }
+
+ if (key_is_smi) {
+ __ Add(scratch, base, Operand::UntagSmiAndScale(key, element_size_shift));
+ return MemOperand(scratch, base_offset);
+ }
+
+ if (base_offset == 0) {
+ return MemOperand(base, key, SXTW, element_size_shift);
+ }
+
+ ASSERT(!AreAliased(scratch, key));
+ __ Add(scratch, base, base_offset);
+ return MemOperand(scratch, key, SXTW, element_size_shift);
+}
+
+
+void LCodeGen::DoLoadKeyedExternal(LLoadKeyedExternal* instr) {
+ Register ext_ptr = ToRegister(instr->elements());
+ Register scratch;
+ ElementsKind elements_kind = instr->elements_kind();
+
+ bool key_is_smi = instr->hydrogen()->key()->representation().IsSmi();
+ bool key_is_constant = instr->key()->IsConstantOperand();
+ Register key = no_reg;
+ int constant_key = 0;
+ if (key_is_constant) {
+ ASSERT(instr->temp() == NULL);
+ constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
+ if (constant_key & 0xf0000000) {
+ Abort(kArrayIndexConstantValueTooBig);
+ }
+ } else {
+ scratch = ToRegister(instr->temp());
+ key = ToRegister(instr->key());
+ }
+
+ MemOperand mem_op =
+ PrepareKeyedExternalArrayOperand(key, ext_ptr, scratch, key_is_smi,
+ key_is_constant, constant_key,
+ elements_kind,
+ instr->base_offset());
+
+ if ((elements_kind == EXTERNAL_FLOAT32_ELEMENTS) ||
+ (elements_kind == FLOAT32_ELEMENTS)) {
+ DoubleRegister result = ToDoubleRegister(instr->result());
+ __ Ldr(result.S(), mem_op);
+ __ Fcvt(result, result.S());
+ } else if ((elements_kind == EXTERNAL_FLOAT64_ELEMENTS) ||
+ (elements_kind == FLOAT64_ELEMENTS)) {
+ DoubleRegister result = ToDoubleRegister(instr->result());
+ __ Ldr(result, mem_op);
+ } else {
+ Register result = ToRegister(instr->result());
+
+ switch (elements_kind) {
+ case EXTERNAL_INT8_ELEMENTS:
+ case INT8_ELEMENTS:
+ __ Ldrsb(result, mem_op);
+ break;
+ case EXTERNAL_UINT8_CLAMPED_ELEMENTS:
+ case EXTERNAL_UINT8_ELEMENTS:
+ case UINT8_ELEMENTS:
+ case UINT8_CLAMPED_ELEMENTS:
+ __ Ldrb(result, mem_op);
+ break;
+ case EXTERNAL_INT16_ELEMENTS:
+ case INT16_ELEMENTS:
+ __ Ldrsh(result, mem_op);
+ break;
+ case EXTERNAL_UINT16_ELEMENTS:
+ case UINT16_ELEMENTS:
+ __ Ldrh(result, mem_op);
+ break;
+ case EXTERNAL_INT32_ELEMENTS:
+ case INT32_ELEMENTS:
+ __ Ldrsw(result, mem_op);
+ break;
+ case EXTERNAL_UINT32_ELEMENTS:
+ case UINT32_ELEMENTS:
+ __ Ldr(result.W(), mem_op);
+ if (!instr->hydrogen()->CheckFlag(HInstruction::kUint32)) {
+ // Deopt if value > 0x80000000.
+ __ Tst(result, 0xFFFFFFFF80000000);
+ DeoptimizeIf(ne, instr->environment());
+ }
+ break;
+ case FLOAT32_ELEMENTS:
+ case FLOAT64_ELEMENTS:
+ case EXTERNAL_FLOAT32_ELEMENTS:
+ case EXTERNAL_FLOAT64_ELEMENTS:
+ case FAST_HOLEY_DOUBLE_ELEMENTS:
+ case FAST_HOLEY_ELEMENTS:
+ case FAST_HOLEY_SMI_ELEMENTS:
+ case FAST_DOUBLE_ELEMENTS:
+ case FAST_ELEMENTS:
+ case FAST_SMI_ELEMENTS:
+ case DICTIONARY_ELEMENTS:
+ case SLOPPY_ARGUMENTS_ELEMENTS:
+ UNREACHABLE();
+ break;
+ }
+ }
+}
+
+
+MemOperand LCodeGen::PrepareKeyedArrayOperand(Register base,
+ Register elements,
+ Register key,
+ bool key_is_tagged,
+ ElementsKind elements_kind,
+ Representation representation,
+ int base_offset) {
+ STATIC_ASSERT((kSmiValueSize == 32) && (kSmiShift == 32) && (kSmiTag == 0));
+ int element_size_shift = ElementsKindToShiftSize(elements_kind);
+
+ // Even though the HLoad/StoreKeyed instructions force the input
+ // representation for the key to be an integer, the input gets replaced during
+ // bounds check elimination with the index argument to the bounds check, which
+ // can be tagged, so that case must be handled here, too.
+ if (key_is_tagged) {
+ __ Add(base, elements, Operand::UntagSmiAndScale(key, element_size_shift));
+ if (representation.IsInteger32()) {
+ ASSERT(elements_kind == FAST_SMI_ELEMENTS);
+ // Read or write only the most-significant 32 bits in the case of fast smi
+ // arrays.
+ return UntagSmiMemOperand(base, base_offset);
+ } else {
+ return MemOperand(base, base_offset);
+ }
+ } else {
+ // Sign extend key because it could be a 32-bit negative value or contain
+ // garbage in the top 32-bits. The address computation happens in 64-bit.
+ ASSERT((element_size_shift >= 0) && (element_size_shift <= 4));
+ if (representation.IsInteger32()) {
+ ASSERT(elements_kind == FAST_SMI_ELEMENTS);
+ // Read or write only the most-significant 32 bits in the case of fast smi
+ // arrays.
+ __ Add(base, elements, Operand(key, SXTW, element_size_shift));
+ return UntagSmiMemOperand(base, base_offset);
+ } else {
+ __ Add(base, elements, base_offset);
+ return MemOperand(base, key, SXTW, element_size_shift);
+ }
+ }
+}
+
+
+void LCodeGen::DoLoadKeyedFixedDouble(LLoadKeyedFixedDouble* instr) {
+ Register elements = ToRegister(instr->elements());
+ DoubleRegister result = ToDoubleRegister(instr->result());
+ MemOperand mem_op;
+
+ if (instr->key()->IsConstantOperand()) {
+ ASSERT(instr->hydrogen()->RequiresHoleCheck() ||
+ (instr->temp() == NULL));
+
+ int constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
+ if (constant_key & 0xf0000000) {
+ Abort(kArrayIndexConstantValueTooBig);
+ }
+ int offset = instr->base_offset() + constant_key * kDoubleSize;
+ mem_op = MemOperand(elements, offset);
+ } else {
+ Register load_base = ToRegister(instr->temp());
+ Register key = ToRegister(instr->key());
+ bool key_is_tagged = instr->hydrogen()->key()->representation().IsSmi();
+ mem_op = PrepareKeyedArrayOperand(load_base, elements, key, key_is_tagged,
+ instr->hydrogen()->elements_kind(),
+ instr->hydrogen()->representation(),
+ instr->base_offset());
+ }
+
+ __ Ldr(result, mem_op);
+
+ if (instr->hydrogen()->RequiresHoleCheck()) {
+ Register scratch = ToRegister(instr->temp());
+ // Detect the hole NaN by adding one to the integer representation of the
+ // result, and checking for overflow.
+ STATIC_ASSERT(kHoleNanInt64 == 0x7fffffffffffffff);
+ __ Ldr(scratch, mem_op);
+ __ Cmn(scratch, 1);
+ DeoptimizeIf(vs, instr->environment());
+ }
+}
+
+
+void LCodeGen::DoLoadKeyedFixed(LLoadKeyedFixed* instr) {
+ Register elements = ToRegister(instr->elements());
+ Register result = ToRegister(instr->result());
+ MemOperand mem_op;
+
+ Representation representation = instr->hydrogen()->representation();
+ if (instr->key()->IsConstantOperand()) {
+ ASSERT(instr->temp() == NULL);
+ LConstantOperand* const_operand = LConstantOperand::cast(instr->key());
+ int offset = instr->base_offset() +
+ ToInteger32(const_operand) * kPointerSize;
+ if (representation.IsInteger32()) {
+ ASSERT(instr->hydrogen()->elements_kind() == FAST_SMI_ELEMENTS);
+ STATIC_ASSERT((kSmiValueSize == 32) && (kSmiShift == 32) &&
+ (kSmiTag == 0));
+ mem_op = UntagSmiMemOperand(elements, offset);
+ } else {
+ mem_op = MemOperand(elements, offset);
+ }
+ } else {
+ Register load_base = ToRegister(instr->temp());
+ Register key = ToRegister(instr->key());
+ bool key_is_tagged = instr->hydrogen()->key()->representation().IsSmi();
+
+ mem_op = PrepareKeyedArrayOperand(load_base, elements, key, key_is_tagged,
+ instr->hydrogen()->elements_kind(),
+ representation, instr->base_offset());
+ }
+
+ __ Load(result, mem_op, representation);
+
+ if (instr->hydrogen()->RequiresHoleCheck()) {
+ if (IsFastSmiElementsKind(instr->hydrogen()->elements_kind())) {
+ DeoptimizeIfNotSmi(result, instr->environment());
+ } else {
+ DeoptimizeIfRoot(result, Heap::kTheHoleValueRootIndex,
+ instr->environment());
+ }
+ }
+}
+
+
+void LCodeGen::DoLoadKeyedGeneric(LLoadKeyedGeneric* instr) {
+ ASSERT(ToRegister(instr->context()).is(cp));
+ ASSERT(ToRegister(instr->object()).Is(x1));
+ ASSERT(ToRegister(instr->key()).Is(x0));
+
+ Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize();
+ CallCode(ic, RelocInfo::CODE_TARGET, instr);
+
+ ASSERT(ToRegister(instr->result()).Is(x0));
+}
+
+
+void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) {
+ HObjectAccess access = instr->hydrogen()->access();
+ int offset = access.offset();
+ Register object = ToRegister(instr->object());
+
+ if (access.IsExternalMemory()) {
+ Register result = ToRegister(instr->result());
+ __ Load(result, MemOperand(object, offset), access.representation());
+ return;
+ }
+
+ if (instr->hydrogen()->representation().IsDouble()) {
+ FPRegister result = ToDoubleRegister(instr->result());
+ __ Ldr(result, FieldMemOperand(object, offset));
+ return;
+ }
+
+ Register result = ToRegister(instr->result());
+ Register source;
+ if (access.IsInobject()) {
+ source = object;
+ } else {
+ // Load the properties array, using result as a scratch register.
+ __ Ldr(result, FieldMemOperand(object, JSObject::kPropertiesOffset));
+ source = result;
+ }
+
+ if (access.representation().IsSmi() &&
+ instr->hydrogen()->representation().IsInteger32()) {
+ // Read int value directly from upper half of the smi.
+ STATIC_ASSERT(kSmiValueSize == 32 && kSmiShift == 32 && kSmiTag == 0);
+ __ Load(result, UntagSmiFieldMemOperand(source, offset),
+ Representation::Integer32());
+ } else {
+ __ Load(result, FieldMemOperand(source, offset), access.representation());
+ }
+}
+
+
+void LCodeGen::DoLoadNamedGeneric(LLoadNamedGeneric* instr) {
+ ASSERT(ToRegister(instr->context()).is(cp));
+ // LoadIC expects x2 to hold the name, and x0 to hold the receiver.
+ ASSERT(ToRegister(instr->object()).is(x0));
+ __ Mov(x2, Operand(instr->name()));
+
+ Handle<Code> ic = LoadIC::initialize_stub(isolate(), NOT_CONTEXTUAL);
+ CallCode(ic, RelocInfo::CODE_TARGET, instr);
+
+ ASSERT(ToRegister(instr->result()).is(x0));
+}
+
+
+void LCodeGen::DoLoadRoot(LLoadRoot* instr) {
+ Register result = ToRegister(instr->result());
+ __ LoadRoot(result, instr->index());
+}
+
+
+void LCodeGen::DoMapEnumLength(LMapEnumLength* instr) {
+ Register result = ToRegister(instr->result());
+ Register map = ToRegister(instr->value());
+ __ EnumLengthSmi(result, map);
+}
+
+
+void LCodeGen::DoMathAbs(LMathAbs* instr) {
+ Representation r = instr->hydrogen()->value()->representation();
+ if (r.IsDouble()) {
+ DoubleRegister input = ToDoubleRegister(instr->value());
+ DoubleRegister result = ToDoubleRegister(instr->result());
+ __ Fabs(result, input);
+ } else if (r.IsSmi() || r.IsInteger32()) {
+ Register input = r.IsSmi() ? ToRegister(instr->value())
+ : ToRegister32(instr->value());
+ Register result = r.IsSmi() ? ToRegister(instr->result())
+ : ToRegister32(instr->result());
+ __ Abs(result, input);
+ DeoptimizeIf(vs, instr->environment());
+ }
+}
+
+
+void LCodeGen::DoDeferredMathAbsTagged(LMathAbsTagged* instr,
+ Label* exit,
+ Label* allocation_entry) {
+ // Handle the tricky cases of MathAbsTagged:
+ // - HeapNumber inputs.
+ // - Negative inputs produce a positive result, so a new HeapNumber is
+ // allocated to hold it.
+ // - Positive inputs are returned as-is, since there is no need to allocate
+ // a new HeapNumber for the result.
+ // - The (smi) input -0x80000000, produces +0x80000000, which does not fit
+ // a smi. In this case, the inline code sets the result and jumps directly
+ // to the allocation_entry label.
+ ASSERT(instr->context() != NULL);
+ ASSERT(ToRegister(instr->context()).is(cp));
+ Register input = ToRegister(instr->value());
+ Register temp1 = ToRegister(instr->temp1());
+ Register temp2 = ToRegister(instr->temp2());
+ Register result_bits = ToRegister(instr->temp3());
+ Register result = ToRegister(instr->result());
+
+ Label runtime_allocation;
+
+ // Deoptimize if the input is not a HeapNumber.
+ __ Ldr(temp1, FieldMemOperand(input, HeapObject::kMapOffset));
+ DeoptimizeIfNotRoot(temp1, Heap::kHeapNumberMapRootIndex,
+ instr->environment());
+
+ // If the argument is positive, we can return it as-is, without any need to
+ // allocate a new HeapNumber for the result. We have to do this in integer
+ // registers (rather than with fabs) because we need to be able to distinguish
+ // the two zeroes.
+ __ Ldr(result_bits, FieldMemOperand(input, HeapNumber::kValueOffset));
+ __ Mov(result, input);
+ __ Tbz(result_bits, kXSignBit, exit);
+
+ // Calculate abs(input) by clearing the sign bit.
+ __ Bic(result_bits, result_bits, kXSignMask);
+
+ // Allocate a new HeapNumber to hold the result.
+ // result_bits The bit representation of the (double) result.
+ __ Bind(allocation_entry);
+ __ AllocateHeapNumber(result, &runtime_allocation, temp1, temp2);
+ // The inline (non-deferred) code will store result_bits into result.
+ __ B(exit);
+
+ __ Bind(&runtime_allocation);
+ if (FLAG_debug_code) {
+ // Because result is in the pointer map, we need to make sure it has a valid
+ // tagged value before we call the runtime. We speculatively set it to the
+ // input (for abs(+x)) or to a smi (for abs(-SMI_MIN)), so it should already
+ // be valid.
+ Label result_ok;
+ Register input = ToRegister(instr->value());
+ __ JumpIfSmi(result, &result_ok);
+ __ Cmp(input, result);
+ __ Assert(eq, kUnexpectedValue);
+ __ Bind(&result_ok);
+ }
+
+ { PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
+ CallRuntimeFromDeferred(Runtime::kHiddenAllocateHeapNumber, 0, instr,
+ instr->context());
+ __ StoreToSafepointRegisterSlot(x0, result);
+ }
+ // The inline (non-deferred) code will store result_bits into result.
+}
+
+
+void LCodeGen::DoMathAbsTagged(LMathAbsTagged* instr) {
+ // Class for deferred case.
+ class DeferredMathAbsTagged: public LDeferredCode {
+ public:
+ DeferredMathAbsTagged(LCodeGen* codegen, LMathAbsTagged* instr)
+ : LDeferredCode(codegen), instr_(instr) { }
+ virtual void Generate() {
+ codegen()->DoDeferredMathAbsTagged(instr_, exit(),
+ allocation_entry());
+ }
+ virtual LInstruction* instr() { return instr_; }
+ Label* allocation_entry() { return &allocation; }
+ private:
+ LMathAbsTagged* instr_;
+ Label allocation;
+ };
+
+ // TODO(jbramley): The early-exit mechanism would skip the new frame handling
+ // in GenerateDeferredCode. Tidy this up.
+ ASSERT(!NeedsDeferredFrame());
+
+ DeferredMathAbsTagged* deferred =
+ new(zone()) DeferredMathAbsTagged(this, instr);
+
+ ASSERT(instr->hydrogen()->value()->representation().IsTagged() ||
+ instr->hydrogen()->value()->representation().IsSmi());
+ Register input = ToRegister(instr->value());
+ Register result_bits = ToRegister(instr->temp3());
+ Register result = ToRegister(instr->result());
+ Label done;
+
+ // Handle smis inline.
+ // We can treat smis as 64-bit integers, since the (low-order) tag bits will
+ // never get set by the negation. This is therefore the same as the Integer32
+ // case in DoMathAbs, except that it operates on 64-bit values.
+ STATIC_ASSERT((kSmiValueSize == 32) && (kSmiShift == 32) && (kSmiTag == 0));
+
+ __ JumpIfNotSmi(input, deferred->entry());
+
+ __ Abs(result, input, NULL, &done);
+
+ // The result is the magnitude (abs) of the smallest value a smi can
+ // represent, encoded as a double.
+ __ Mov(result_bits, double_to_rawbits(0x80000000));
+ __ B(deferred->allocation_entry());
+
+ __ Bind(deferred->exit());
+ __ Str(result_bits, FieldMemOperand(result, HeapNumber::kValueOffset));
+
+ __ Bind(&done);
+}
+
+
+void LCodeGen::DoMathExp(LMathExp* instr) {
+ DoubleRegister input = ToDoubleRegister(instr->value());
+ DoubleRegister result = ToDoubleRegister(instr->result());
+ DoubleRegister double_temp1 = ToDoubleRegister(instr->double_temp1());
+ DoubleRegister double_temp2 = double_scratch();
+ Register temp1 = ToRegister(instr->temp1());
+ Register temp2 = ToRegister(instr->temp2());
+ Register temp3 = ToRegister(instr->temp3());
+
+ MathExpGenerator::EmitMathExp(masm(), input, result,
+ double_temp1, double_temp2,
+ temp1, temp2, temp3);
+}
+
+
+void LCodeGen::DoMathFloorD(LMathFloorD* instr) {
+ DoubleRegister input = ToDoubleRegister(instr->value());
+ DoubleRegister result = ToDoubleRegister(instr->result());
+
+ __ Frintm(result, input);
+}
+
+
+void LCodeGen::DoMathFloorI(LMathFloorI* instr) {
+ DoubleRegister input = ToDoubleRegister(instr->value());
+ Register result = ToRegister(instr->result());
+
+ if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ DeoptimizeIfMinusZero(input, instr->environment());
+ }
+
+ __ Fcvtms(result, input);
+
+ // Check that the result fits into a 32-bit integer.
+ // - The result did not overflow.
+ __ Cmp(result, Operand(result, SXTW));
+ // - The input was not NaN.
+ __ Fccmp(input, input, NoFlag, eq);
+ DeoptimizeIf(ne, instr->environment());
+}
+
+
+void LCodeGen::DoFlooringDivByPowerOf2I(LFlooringDivByPowerOf2I* instr) {
+ Register dividend = ToRegister32(instr->dividend());
+ Register result = ToRegister32(instr->result());
+ int32_t divisor = instr->divisor();
+
+ // If the divisor is 1, return the dividend.
+ if (divisor == 1) {
+ __ Mov(result, dividend, kDiscardForSameWReg);
+ return;
+ }
+
+ // If the divisor is positive, things are easy: There can be no deopts and we
+ // can simply do an arithmetic right shift.
+ int32_t shift = WhichPowerOf2Abs(divisor);
+ if (divisor > 1) {
+ __ Mov(result, Operand(dividend, ASR, shift));
+ return;
+ }
+
+ // If the divisor is negative, we have to negate and handle edge cases.
+ __ Negs(result, dividend);
+ if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ DeoptimizeIf(eq, instr->environment());
+ }
+
+ // Dividing by -1 is basically negation, unless we overflow.
+ if (divisor == -1) {
+ if (instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) {
+ DeoptimizeIf(vs, instr->environment());
+ }
+ return;
+ }
+
+ // If the negation could not overflow, simply shifting is OK.
+ if (!instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) {
+ __ Mov(result, Operand(dividend, ASR, shift));
+ return;
+ }
+
+ __ Asr(result, result, shift);
+ __ Csel(result, result, kMinInt / divisor, vc);
+}
+
+
+void LCodeGen::DoFlooringDivByConstI(LFlooringDivByConstI* instr) {
+ Register dividend = ToRegister32(instr->dividend());
+ int32_t divisor = instr->divisor();
+ Register result = ToRegister32(instr->result());
+ ASSERT(!AreAliased(dividend, result));
+
+ if (divisor == 0) {
+ Deoptimize(instr->environment());
+ return;
+ }
+
+ // Check for (0 / -x) that will produce negative zero.
+ HMathFloorOfDiv* hdiv = instr->hydrogen();
+ if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
+ DeoptimizeIfZero(dividend, instr->environment());
+ }
+
+ // Easy case: We need no dynamic check for the dividend and the flooring
+ // division is the same as the truncating division.
+ if ((divisor > 0 && !hdiv->CheckFlag(HValue::kLeftCanBeNegative)) ||
+ (divisor < 0 && !hdiv->CheckFlag(HValue::kLeftCanBePositive))) {
+ __ TruncatingDiv(result, dividend, Abs(divisor));
+ if (divisor < 0) __ Neg(result, result);
+ return;
+ }
+
+ // In the general case we may need to adjust before and after the truncating
+ // division to get a flooring division.
+ Register temp = ToRegister32(instr->temp());
+ ASSERT(!AreAliased(temp, dividend, result));
+ Label needs_adjustment, done;
+ __ Cmp(dividend, 0);
+ __ B(divisor > 0 ? lt : gt, &needs_adjustment);
+ __ TruncatingDiv(result, dividend, Abs(divisor));
+ if (divisor < 0) __ Neg(result, result);
+ __ B(&done);
+ __ Bind(&needs_adjustment);
+ __ Add(temp, dividend, Operand(divisor > 0 ? 1 : -1));
+ __ TruncatingDiv(result, temp, Abs(divisor));
+ if (divisor < 0) __ Neg(result, result);
+ __ Sub(result, result, Operand(1));
+ __ Bind(&done);
+}
+
+
+// TODO(svenpanne) Refactor this to avoid code duplication with DoDivI.
+void LCodeGen::DoFlooringDivI(LFlooringDivI* instr) {
+ Register dividend = ToRegister32(instr->dividend());
+ Register divisor = ToRegister32(instr->divisor());
+ Register remainder = ToRegister32(instr->temp());
+ Register result = ToRegister32(instr->result());
+
+ // This can't cause an exception on ARM, so we can speculatively
+ // execute it already now.
+ __ Sdiv(result, dividend, divisor);
+
+ // Check for x / 0.
+ DeoptimizeIfZero(divisor, instr->environment());
+
+ // Check for (kMinInt / -1).
+ if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
+ // The V flag will be set iff dividend == kMinInt.
+ __ Cmp(dividend, 1);
+ __ Ccmp(divisor, -1, NoFlag, vs);
+ DeoptimizeIf(eq, instr->environment());
+ }
+
+ // Check for (0 / -x) that will produce negative zero.
+ if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ __ Cmp(divisor, 0);
+ __ Ccmp(dividend, 0, ZFlag, mi);
+ // "divisor" can't be null because the code would have already been
+ // deoptimized. The Z flag is set only if (divisor < 0) and (dividend == 0).
+ // In this case we need to deoptimize to produce a -0.
+ DeoptimizeIf(eq, instr->environment());
+ }
+
+ Label done;
+ // If both operands have the same sign then we are done.
+ __ Eor(remainder, dividend, divisor);
+ __ Tbz(remainder, kWSignBit, &done);
+
+ // Check if the result needs to be corrected.
+ __ Msub(remainder, result, divisor, dividend);
+ __ Cbz(remainder, &done);
+ __ Sub(result, result, 1);
+
+ __ Bind(&done);
+}
+
+
+void LCodeGen::DoMathLog(LMathLog* instr) {
+ ASSERT(instr->IsMarkedAsCall());
+ ASSERT(ToDoubleRegister(instr->value()).is(d0));
+ __ CallCFunction(ExternalReference::math_log_double_function(isolate()),
+ 0, 1);
+ ASSERT(ToDoubleRegister(instr->result()).Is(d0));
+}
+
+
+void LCodeGen::DoMathClz32(LMathClz32* instr) {
+ Register input = ToRegister32(instr->value());
+ Register result = ToRegister32(instr->result());
+ __ Clz(result, input);
+}
+
+
+void LCodeGen::DoMathPowHalf(LMathPowHalf* instr) {
+ DoubleRegister input = ToDoubleRegister(instr->value());
+ DoubleRegister result = ToDoubleRegister(instr->result());
+ Label done;
+
+ // Math.pow(x, 0.5) differs from fsqrt(x) in the following cases:
+ // Math.pow(-Infinity, 0.5) == +Infinity
+ // Math.pow(-0.0, 0.5) == +0.0
+
+ // Catch -infinity inputs first.
+ // TODO(jbramley): A constant infinity register would be helpful here.
+ __ Fmov(double_scratch(), kFP64NegativeInfinity);
+ __ Fcmp(double_scratch(), input);
+ __ Fabs(result, input);
+ __ B(&done, eq);
+
+ // Add +0.0 to convert -0.0 to +0.0.
+ __ Fadd(double_scratch(), input, fp_zero);
+ __ Fsqrt(result, double_scratch());
+
+ __ Bind(&done);
+}
+
+
+void LCodeGen::DoPower(LPower* instr) {
+ Representation exponent_type = instr->hydrogen()->right()->representation();
+ // Having marked this as a call, we can use any registers.
+ // Just make sure that the input/output registers are the expected ones.
+ ASSERT(!instr->right()->IsDoubleRegister() ||
+ ToDoubleRegister(instr->right()).is(d1));
+ ASSERT(exponent_type.IsInteger32() || !instr->right()->IsRegister() ||
+ ToRegister(instr->right()).is(x11));
+ ASSERT(!exponent_type.IsInteger32() || ToRegister(instr->right()).is(x12));
+ ASSERT(ToDoubleRegister(instr->left()).is(d0));
+ ASSERT(ToDoubleRegister(instr->result()).is(d0));
+
+ if (exponent_type.IsSmi()) {
+ MathPowStub stub(isolate(), MathPowStub::TAGGED);
+ __ CallStub(&stub);
+ } else if (exponent_type.IsTagged()) {
+ Label no_deopt;
+ __ JumpIfSmi(x11, &no_deopt);
+ __ Ldr(x0, FieldMemOperand(x11, HeapObject::kMapOffset));
+ DeoptimizeIfNotRoot(x0, Heap::kHeapNumberMapRootIndex,
+ instr->environment());
+ __ Bind(&no_deopt);
+ MathPowStub stub(isolate(), MathPowStub::TAGGED);
+ __ CallStub(&stub);
+ } else if (exponent_type.IsInteger32()) {
+ // Ensure integer exponent has no garbage in top 32-bits, as MathPowStub
+ // supports large integer exponents.
+ Register exponent = ToRegister(instr->right());
+ __ Sxtw(exponent, exponent);
+ MathPowStub stub(isolate(), MathPowStub::INTEGER);
+ __ CallStub(&stub);
+ } else {
+ ASSERT(exponent_type.IsDouble());
+ MathPowStub stub(isolate(), MathPowStub::DOUBLE);
+ __ CallStub(&stub);
+ }
+}
+
+
+void LCodeGen::DoMathRoundD(LMathRoundD* instr) {
+ DoubleRegister input = ToDoubleRegister(instr->value());
+ DoubleRegister result = ToDoubleRegister(instr->result());
+ DoubleRegister scratch_d = double_scratch();
+
+ ASSERT(!AreAliased(input, result, scratch_d));
+
+ Label done;
+
+ __ Frinta(result, input);
+ __ Fcmp(input, 0.0);
+ __ Fccmp(result, input, ZFlag, lt);
+ // The result is correct if the input was in [-0, +infinity], or was a
+ // negative integral value.
+ __ B(eq, &done);
+
+ // Here the input is negative, non integral, with an exponent lower than 52.
+ // We do not have to worry about the 0.49999999999999994 (0x3fdfffffffffffff)
+ // case. So we can safely add 0.5.
+ __ Fmov(scratch_d, 0.5);
+ __ Fadd(result, input, scratch_d);
+ __ Frintm(result, result);
+ // The range [-0.5, -0.0[ yielded +0.0. Force the sign to negative.
+ __ Fabs(result, result);
+ __ Fneg(result, result);
+
+ __ Bind(&done);
+}
+
+
+void LCodeGen::DoMathRoundI(LMathRoundI* instr) {
+ DoubleRegister input = ToDoubleRegister(instr->value());
+ DoubleRegister temp = ToDoubleRegister(instr->temp1());
+ DoubleRegister dot_five = double_scratch();
+ Register result = ToRegister(instr->result());
+ Label done;
+
+ // Math.round() rounds to the nearest integer, with ties going towards
+ // +infinity. This does not match any IEEE-754 rounding mode.
+ // - Infinities and NaNs are propagated unchanged, but cause deopts because
+ // they can't be represented as integers.
+ // - The sign of the result is the same as the sign of the input. This means
+ // that -0.0 rounds to itself, and values -0.5 <= input < 0 also produce a
+ // result of -0.0.
+
+ // Add 0.5 and round towards -infinity.
+ __ Fmov(dot_five, 0.5);
+ __ Fadd(temp, input, dot_five);
+ __ Fcvtms(result, temp);
+
+ // The result is correct if:
+ // result is not 0, as the input could be NaN or [-0.5, -0.0].
+ // result is not 1, as 0.499...94 will wrongly map to 1.
+ // result fits in 32 bits.
+ __ Cmp(result, Operand(result.W(), SXTW));
+ __ Ccmp(result, 1, ZFlag, eq);
+ __ B(hi, &done);
+
+ // At this point, we have to handle possible inputs of NaN or numbers in the
+ // range [-0.5, 1.5[, or numbers larger than 32 bits.
+
+ // Deoptimize if the result > 1, as it must be larger than 32 bits.
+ __ Cmp(result, 1);
+ DeoptimizeIf(hi, instr->environment());
+
+ // Deoptimize for negative inputs, which at this point are only numbers in
+ // the range [-0.5, -0.0]
+ if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ __ Fmov(result, input);
+ DeoptimizeIfNegative(result, instr->environment());
+ }
+
+ // Deoptimize if the input was NaN.
+ __ Fcmp(input, dot_five);
+ DeoptimizeIf(vs, instr->environment());
+
+ // Now, the only unhandled inputs are in the range [0.0, 1.5[ (or [-0.5, 1.5[
+ // if we didn't generate a -0.0 bailout). If input >= 0.5 then return 1,
+ // else 0; we avoid dealing with 0.499...94 directly.
+ __ Cset(result, ge);
+ __ Bind(&done);
+}
+
+
+void LCodeGen::DoMathSqrt(LMathSqrt* instr) {
+ DoubleRegister input = ToDoubleRegister(instr->value());
+ DoubleRegister result = ToDoubleRegister(instr->result());
+ __ Fsqrt(result, input);
+}
+
+
+void LCodeGen::DoMathMinMax(LMathMinMax* instr) {
+ HMathMinMax::Operation op = instr->hydrogen()->operation();
+ if (instr->hydrogen()->representation().IsInteger32()) {
+ Register result = ToRegister32(instr->result());
+ Register left = ToRegister32(instr->left());
+ Operand right = ToOperand32I(instr->right());
+
+ __ Cmp(left, right);
+ __ Csel(result, left, right, (op == HMathMinMax::kMathMax) ? ge : le);
+ } else if (instr->hydrogen()->representation().IsSmi()) {
+ Register result = ToRegister(instr->result());
+ Register left = ToRegister(instr->left());
+ Operand right = ToOperand(instr->right());
+
+ __ Cmp(left, right);
+ __ Csel(result, left, right, (op == HMathMinMax::kMathMax) ? ge : le);
+ } else {
+ ASSERT(instr->hydrogen()->representation().IsDouble());
+ DoubleRegister result = ToDoubleRegister(instr->result());
+ DoubleRegister left = ToDoubleRegister(instr->left());
+ DoubleRegister right = ToDoubleRegister(instr->right());
+
+ if (op == HMathMinMax::kMathMax) {
+ __ Fmax(result, left, right);
+ } else {
+ ASSERT(op == HMathMinMax::kMathMin);
+ __ Fmin(result, left, right);
+ }
+ }
+}
+
+
+void LCodeGen::DoModByPowerOf2I(LModByPowerOf2I* instr) {
+ Register dividend = ToRegister32(instr->dividend());
+ int32_t divisor = instr->divisor();
+ ASSERT(dividend.is(ToRegister32(instr->result())));
+
+ // Theoretically, a variation of the branch-free code for integer division by
+ // a power of 2 (calculating the remainder via an additional multiplication
+ // (which gets simplified to an 'and') and subtraction) should be faster, and
+ // this is exactly what GCC and clang emit. Nevertheless, benchmarks seem to
+ // indicate that positive dividends are heavily favored, so the branching
+ // version performs better.
+ HMod* hmod = instr->hydrogen();
+ int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1);
+ Label dividend_is_not_negative, done;
+ if (hmod->CheckFlag(HValue::kLeftCanBeNegative)) {
+ __ Tbz(dividend, kWSignBit, &dividend_is_not_negative);
+ // Note that this is correct even for kMinInt operands.
+ __ Neg(dividend, dividend);
+ __ And(dividend, dividend, mask);
+ __ Negs(dividend, dividend);
+ if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ DeoptimizeIf(eq, instr->environment());
+ }
+ __ B(&done);
+ }
+
+ __ bind(&dividend_is_not_negative);
+ __ And(dividend, dividend, mask);
+ __ bind(&done);
+}
+
+
+void LCodeGen::DoModByConstI(LModByConstI* instr) {
+ Register dividend = ToRegister32(instr->dividend());
+ int32_t divisor = instr->divisor();
+ Register result = ToRegister32(instr->result());
+ Register temp = ToRegister32(instr->temp());
+ ASSERT(!AreAliased(dividend, result, temp));
+
+ if (divisor == 0) {
+ Deoptimize(instr->environment());
+ return;
+ }
+
+ __ TruncatingDiv(result, dividend, Abs(divisor));
+ __ Sxtw(dividend.X(), dividend);
+ __ Mov(temp, Abs(divisor));
+ __ Smsubl(result.X(), result, temp, dividend.X());
+
+ // Check for negative zero.
+ HMod* hmod = instr->hydrogen();
+ if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ Label remainder_not_zero;
+ __ Cbnz(result, &remainder_not_zero);
+ DeoptimizeIfNegative(dividend, instr->environment());
+ __ bind(&remainder_not_zero);
+ }
+}
+
+
+void LCodeGen::DoModI(LModI* instr) {
+ Register dividend = ToRegister32(instr->left());
+ Register divisor = ToRegister32(instr->right());
+ Register result = ToRegister32(instr->result());
+
+ Label done;
+ // modulo = dividend - quotient * divisor
+ __ Sdiv(result, dividend, divisor);
+ if (instr->hydrogen()->CheckFlag(HValue::kCanBeDivByZero)) {
+ DeoptimizeIfZero(divisor, instr->environment());
+ }
+ __ Msub(result, result, divisor, dividend);
+ if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ __ Cbnz(result, &done);
+ DeoptimizeIfNegative(dividend, instr->environment());
+ }
+ __ Bind(&done);
+}
+
+
+void LCodeGen::DoMulConstIS(LMulConstIS* instr) {
+ ASSERT(instr->hydrogen()->representation().IsSmiOrInteger32());
+ bool is_smi = instr->hydrogen()->representation().IsSmi();
+ Register result =
+ is_smi ? ToRegister(instr->result()) : ToRegister32(instr->result());
+ Register left =
+ is_smi ? ToRegister(instr->left()) : ToRegister32(instr->left()) ;
+ int32_t right = ToInteger32(instr->right());
+ ASSERT((right > -kMaxInt) || (right < kMaxInt));
+
+ bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
+ bool bailout_on_minus_zero =
+ instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero);
+
+ if (bailout_on_minus_zero) {
+ if (right < 0) {
+ // The result is -0 if right is negative and left is zero.
+ DeoptimizeIfZero(left, instr->environment());
+ } else if (right == 0) {
+ // The result is -0 if the right is zero and the left is negative.
+ DeoptimizeIfNegative(left, instr->environment());
+ }
+ }
+
+ switch (right) {
+ // Cases which can detect overflow.
+ case -1:
+ if (can_overflow) {
+ // Only 0x80000000 can overflow here.
+ __ Negs(result, left);
+ DeoptimizeIf(vs, instr->environment());
+ } else {
+ __ Neg(result, left);
+ }
+ break;
+ case 0:
+ // This case can never overflow.
+ __ Mov(result, 0);
+ break;
+ case 1:
+ // This case can never overflow.
+ __ Mov(result, left, kDiscardForSameWReg);
+ break;
+ case 2:
+ if (can_overflow) {
+ __ Adds(result, left, left);
+ DeoptimizeIf(vs, instr->environment());
+ } else {
+ __ Add(result, left, left);
+ }
+ break;
+
+ default:
+ // Multiplication by constant powers of two (and some related values)
+ // can be done efficiently with shifted operands.
+ int32_t right_abs = Abs(right);
+
+ if (IsPowerOf2(right_abs)) {
+ int right_log2 = WhichPowerOf2(right_abs);
+
+ if (can_overflow) {
+ Register scratch = result;
+ ASSERT(!AreAliased(scratch, left));
+ __ Cls(scratch, left);
+ __ Cmp(scratch, right_log2);
+ DeoptimizeIf(lt, instr->environment());
+ }
+
+ if (right >= 0) {
+ // result = left << log2(right)
+ __ Lsl(result, left, right_log2);
+ } else {
+ // result = -left << log2(-right)
+ if (can_overflow) {
+ __ Negs(result, Operand(left, LSL, right_log2));
+ DeoptimizeIf(vs, instr->environment());
+ } else {
+ __ Neg(result, Operand(left, LSL, right_log2));
+ }
+ }
+ return;
+ }
+
+
+ // For the following cases, we could perform a conservative overflow check
+ // with CLS as above. However the few cycles saved are likely not worth
+ // the risk of deoptimizing more often than required.
+ ASSERT(!can_overflow);
+
+ if (right >= 0) {
+ if (IsPowerOf2(right - 1)) {
+ // result = left + left << log2(right - 1)
+ __ Add(result, left, Operand(left, LSL, WhichPowerOf2(right - 1)));
+ } else if (IsPowerOf2(right + 1)) {
+ // result = -left + left << log2(right + 1)
+ __ Sub(result, left, Operand(left, LSL, WhichPowerOf2(right + 1)));
+ __ Neg(result, result);
+ } else {
+ UNREACHABLE();
+ }
+ } else {
+ if (IsPowerOf2(-right + 1)) {
+ // result = left - left << log2(-right + 1)
+ __ Sub(result, left, Operand(left, LSL, WhichPowerOf2(-right + 1)));
+ } else if (IsPowerOf2(-right - 1)) {
+ // result = -left - left << log2(-right - 1)
+ __ Add(result, left, Operand(left, LSL, WhichPowerOf2(-right - 1)));
+ __ Neg(result, result);
+ } else {
+ UNREACHABLE();
+ }
+ }
+ }
+}
+
+
+void LCodeGen::DoMulI(LMulI* instr) {
+ Register result = ToRegister32(instr->result());
+ Register left = ToRegister32(instr->left());
+ Register right = ToRegister32(instr->right());
+
+ bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
+ bool bailout_on_minus_zero =
+ instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero);
+
+ if (bailout_on_minus_zero && !left.Is(right)) {
+ // If one operand is zero and the other is negative, the result is -0.
+ // - Set Z (eq) if either left or right, or both, are 0.
+ __ Cmp(left, 0);
+ __ Ccmp(right, 0, ZFlag, ne);
+ // - If so (eq), set N (mi) if left + right is negative.
+ // - Otherwise, clear N.
+ __ Ccmn(left, right, NoFlag, eq);
+ DeoptimizeIf(mi, instr->environment());
+ }
+
+ if (can_overflow) {
+ __ Smull(result.X(), left, right);
+ __ Cmp(result.X(), Operand(result, SXTW));
+ DeoptimizeIf(ne, instr->environment());
+ } else {
+ __ Mul(result, left, right);
+ }
+}
+
+
+void LCodeGen::DoMulS(LMulS* instr) {
+ Register result = ToRegister(instr->result());
+ Register left = ToRegister(instr->left());
+ Register right = ToRegister(instr->right());
+
+ bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
+ bool bailout_on_minus_zero =
+ instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero);
+
+ if (bailout_on_minus_zero && !left.Is(right)) {
+ // If one operand is zero and the other is negative, the result is -0.
+ // - Set Z (eq) if either left or right, or both, are 0.
+ __ Cmp(left, 0);
+ __ Ccmp(right, 0, ZFlag, ne);
+ // - If so (eq), set N (mi) if left + right is negative.
+ // - Otherwise, clear N.
+ __ Ccmn(left, right, NoFlag, eq);
+ DeoptimizeIf(mi, instr->environment());
+ }
+
+ STATIC_ASSERT((kSmiShift == 32) && (kSmiTag == 0));
+ if (can_overflow) {
+ __ Smulh(result, left, right);
+ __ Cmp(result, Operand(result.W(), SXTW));
+ __ SmiTag(result);
+ DeoptimizeIf(ne, instr->environment());
+ } else {
+ if (AreAliased(result, left, right)) {
+ // All three registers are the same: half untag the input and then
+ // multiply, giving a tagged result.
+ STATIC_ASSERT((kSmiShift % 2) == 0);
+ __ Asr(result, left, kSmiShift / 2);
+ __ Mul(result, result, result);
+ } else if (result.Is(left) && !left.Is(right)) {
+ // Registers result and left alias, right is distinct: untag left into
+ // result, and then multiply by right, giving a tagged result.
+ __ SmiUntag(result, left);
+ __ Mul(result, result, right);
+ } else {
+ ASSERT(!left.Is(result));
+ // Registers result and right alias, left is distinct, or all registers
+ // are distinct: untag right into result, and then multiply by left,
+ // giving a tagged result.
+ __ SmiUntag(result, right);
+ __ Mul(result, left, result);
+ }
+ }
+}
+
+
+void LCodeGen::DoDeferredNumberTagD(LNumberTagD* instr) {
+ // TODO(3095996): Get rid of this. For now, we need to make the
+ // result register contain a valid pointer because it is already
+ // contained in the register pointer map.
+ Register result = ToRegister(instr->result());
+ __ Mov(result, 0);
+
+ PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
+ // NumberTagU and NumberTagD use the context from the frame, rather than
+ // the environment's HContext or HInlinedContext value.
+ // They only call Runtime::kHiddenAllocateHeapNumber.
+ // The corresponding HChange instructions are added in a phase that does
+ // not have easy access to the local context.
+ __ Ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ __ CallRuntimeSaveDoubles(Runtime::kHiddenAllocateHeapNumber);
+ RecordSafepointWithRegisters(
+ instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
+ __ StoreToSafepointRegisterSlot(x0, result);
+}
+
+
+void LCodeGen::DoNumberTagD(LNumberTagD* instr) {
+ class DeferredNumberTagD: public LDeferredCode {
+ public:
+ DeferredNumberTagD(LCodeGen* codegen, LNumberTagD* instr)
+ : LDeferredCode(codegen), instr_(instr) { }
+ virtual void Generate() { codegen()->DoDeferredNumberTagD(instr_); }
+ virtual LInstruction* instr() { return instr_; }
+ private:
+ LNumberTagD* instr_;
+ };
+
+ DoubleRegister input = ToDoubleRegister(instr->value());
+ Register result = ToRegister(instr->result());
+ Register temp1 = ToRegister(instr->temp1());
+ Register temp2 = ToRegister(instr->temp2());
+
+ DeferredNumberTagD* deferred = new(zone()) DeferredNumberTagD(this, instr);
+ if (FLAG_inline_new) {
+ __ AllocateHeapNumber(result, deferred->entry(), temp1, temp2);
+ } else {
+ __ B(deferred->entry());
+ }
+
+ __ Bind(deferred->exit());
+ __ Str(input, FieldMemOperand(result, HeapNumber::kValueOffset));
+}
+
+
+void LCodeGen::DoDeferredNumberTagU(LInstruction* instr,
+ LOperand* value,
+ LOperand* temp1,
+ LOperand* temp2) {
+ Label slow, convert_and_store;
+ Register src = ToRegister32(value);
+ Register dst = ToRegister(instr->result());
+ Register scratch1 = ToRegister(temp1);
+
+ if (FLAG_inline_new) {
+ Register scratch2 = ToRegister(temp2);
+ __ AllocateHeapNumber(dst, &slow, scratch1, scratch2);
+ __ B(&convert_and_store);
+ }
+
+ // Slow case: call the runtime system to do the number allocation.
+ __ Bind(&slow);
+ // TODO(3095996): Put a valid pointer value in the stack slot where the result
+ // register is stored, as this register is in the pointer map, but contains an
+ // integer value.
+ __ Mov(dst, 0);
+ {
+ // Preserve the value of all registers.
+ PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
+
+ // NumberTagU and NumberTagD use the context from the frame, rather than
+ // the environment's HContext or HInlinedContext value.
+ // They only call Runtime::kHiddenAllocateHeapNumber.
+ // The corresponding HChange instructions are added in a phase that does
+ // not have easy access to the local context.
+ __ Ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ __ CallRuntimeSaveDoubles(Runtime::kHiddenAllocateHeapNumber);
+ RecordSafepointWithRegisters(
+ instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
+ __ StoreToSafepointRegisterSlot(x0, dst);
+ }
+
+ // Convert number to floating point and store in the newly allocated heap
+ // number.
+ __ Bind(&convert_and_store);
+ DoubleRegister dbl_scratch = double_scratch();
+ __ Ucvtf(dbl_scratch, src);
+ __ Str(dbl_scratch, FieldMemOperand(dst, HeapNumber::kValueOffset));
+}
+
+
+void LCodeGen::DoNumberTagU(LNumberTagU* instr) {
+ class DeferredNumberTagU: public LDeferredCode {
+ public:
+ DeferredNumberTagU(LCodeGen* codegen, LNumberTagU* instr)
+ : LDeferredCode(codegen), instr_(instr) { }
+ virtual void Generate() {
+ codegen()->DoDeferredNumberTagU(instr_,
+ instr_->value(),
+ instr_->temp1(),
+ instr_->temp2());
+ }
+ virtual LInstruction* instr() { return instr_; }
+ private:
+ LNumberTagU* instr_;
+ };
+
+ Register value = ToRegister32(instr->value());
+ Register result = ToRegister(instr->result());
+
+ DeferredNumberTagU* deferred = new(zone()) DeferredNumberTagU(this, instr);
+ __ Cmp(value, Smi::kMaxValue);
+ __ B(hi, deferred->entry());
+ __ SmiTag(result, value.X());
+ __ Bind(deferred->exit());
+}
+
+
+void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) {
+ Register input = ToRegister(instr->value());
+ Register scratch = ToRegister(instr->temp());
+ DoubleRegister result = ToDoubleRegister(instr->result());
+ bool can_convert_undefined_to_nan =
+ instr->hydrogen()->can_convert_undefined_to_nan();
+
+ Label done, load_smi;
+
+ // Work out what untag mode we're working with.
+ HValue* value = instr->hydrogen()->value();
+ NumberUntagDMode mode = value->representation().IsSmi()
+ ? NUMBER_CANDIDATE_IS_SMI : NUMBER_CANDIDATE_IS_ANY_TAGGED;
+
+ if (mode == NUMBER_CANDIDATE_IS_ANY_TAGGED) {
+ __ JumpIfSmi(input, &load_smi);
+
+ Label convert_undefined;
+
+ // Heap number map check.
+ __ Ldr(scratch, FieldMemOperand(input, HeapObject::kMapOffset));
+ if (can_convert_undefined_to_nan) {
+ __ JumpIfNotRoot(scratch, Heap::kHeapNumberMapRootIndex,
+ &convert_undefined);
+ } else {
+ DeoptimizeIfNotRoot(scratch, Heap::kHeapNumberMapRootIndex,
+ instr->environment());
+ }
+
+ // Load heap number.
+ __ Ldr(result, FieldMemOperand(input, HeapNumber::kValueOffset));
+ if (instr->hydrogen()->deoptimize_on_minus_zero()) {
+ DeoptimizeIfMinusZero(result, instr->environment());
+ }
+ __ B(&done);
+
+ if (can_convert_undefined_to_nan) {
+ __ Bind(&convert_undefined);
+ DeoptimizeIfNotRoot(input, Heap::kUndefinedValueRootIndex,
+ instr->environment());
+
+ __ LoadRoot(scratch, Heap::kNanValueRootIndex);
+ __ Ldr(result, FieldMemOperand(scratch, HeapNumber::kValueOffset));
+ __ B(&done);
+ }
+
+ } else {
+ ASSERT(mode == NUMBER_CANDIDATE_IS_SMI);
+ // Fall through to load_smi.
+ }
+
+ // Smi to double register conversion.
+ __ Bind(&load_smi);
+ __ SmiUntagToDouble(result, input);
+
+ __ Bind(&done);
+}
+
+
+void LCodeGen::DoOsrEntry(LOsrEntry* instr) {
+ // This is a pseudo-instruction that ensures that the environment here is
+ // properly registered for deoptimization and records the assembler's PC
+ // offset.
+ LEnvironment* environment = instr->environment();
+
+ // If the environment were already registered, we would have no way of
+ // backpatching it with the spill slot operands.
+ ASSERT(!environment->HasBeenRegistered());
+ RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
+
+ GenerateOsrPrologue();
+}
+
+
+void LCodeGen::DoParameter(LParameter* instr) {
+ // Nothing to do.
+}
+
+
+void LCodeGen::DoPreparePushArguments(LPreparePushArguments* instr) {
+ __ PushPreamble(instr->argc(), kPointerSize);
+}
+
+
+void LCodeGen::DoPushArguments(LPushArguments* instr) {
+ MacroAssembler::PushPopQueue args(masm());
+
+ for (int i = 0; i < instr->ArgumentCount(); ++i) {
+ LOperand* arg = instr->argument(i);
+ if (arg->IsDoubleRegister() || arg->IsDoubleStackSlot()) {
+ Abort(kDoPushArgumentNotImplementedForDoubleType);
+ return;
+ }
+ args.Queue(ToRegister(arg));
+ }
+
+ // The preamble was done by LPreparePushArguments.
+ args.PushQueued(MacroAssembler::PushPopQueue::SKIP_PREAMBLE);
+
+ after_push_argument_ = true;
+}
+
+
+void LCodeGen::DoReturn(LReturn* instr) {
+ if (FLAG_trace && info()->IsOptimizing()) {
+ // Push the return value on the stack as the parameter.
+ // Runtime::TraceExit returns its parameter in x0. We're leaving the code
+ // managed by the register allocator and tearing down the frame, it's
+ // safe to write to the context register.
+ __ Push(x0);
+ __ Ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ __ CallRuntime(Runtime::kTraceExit, 1);
+ }
+
+ if (info()->saves_caller_doubles()) {
+ RestoreCallerDoubles();
+ }
+
+ int no_frame_start = -1;
+ if (NeedsEagerFrame()) {
+ Register stack_pointer = masm()->StackPointer();
+ __ Mov(stack_pointer, fp);
+ no_frame_start = masm_->pc_offset();
+ __ Pop(fp, lr);
+ }
+
+ if (instr->has_constant_parameter_count()) {
+ int parameter_count = ToInteger32(instr->constant_parameter_count());
+ __ Drop(parameter_count + 1);
+ } else {
+ Register parameter_count = ToRegister(instr->parameter_count());
+ __ DropBySMI(parameter_count);
+ }
+ __ Ret();
+
+ if (no_frame_start != -1) {
+ info_->AddNoFrameRange(no_frame_start, masm_->pc_offset());
+ }
+}
+
+
+MemOperand LCodeGen::BuildSeqStringOperand(Register string,
+ Register temp,
+ LOperand* index,
+ String::Encoding encoding) {
+ if (index->IsConstantOperand()) {
+ int offset = ToInteger32(LConstantOperand::cast(index));
+ if (encoding == String::TWO_BYTE_ENCODING) {
+ offset *= kUC16Size;
+ }
+ STATIC_ASSERT(kCharSize == 1);
+ return FieldMemOperand(string, SeqString::kHeaderSize + offset);
+ }
+
+ __ Add(temp, string, SeqString::kHeaderSize - kHeapObjectTag);
+ if (encoding == String::ONE_BYTE_ENCODING) {
+ return MemOperand(temp, ToRegister32(index), SXTW);
+ } else {
+ STATIC_ASSERT(kUC16Size == 2);
+ return MemOperand(temp, ToRegister32(index), SXTW, 1);
+ }
+}
+
+
+void LCodeGen::DoSeqStringGetChar(LSeqStringGetChar* instr) {
+ String::Encoding encoding = instr->hydrogen()->encoding();
+ Register string = ToRegister(instr->string());
+ Register result = ToRegister(instr->result());
+ Register temp = ToRegister(instr->temp());
+
+ if (FLAG_debug_code) {
+ // Even though this lithium instruction comes with a temp register, we
+ // can't use it here because we want to use "AtStart" constraints on the
+ // inputs and the debug code here needs a scratch register.
+ UseScratchRegisterScope temps(masm());
+ Register dbg_temp = temps.AcquireX();
+
+ __ Ldr(dbg_temp, FieldMemOperand(string, HeapObject::kMapOffset));
+ __ Ldrb(dbg_temp, FieldMemOperand(dbg_temp, Map::kInstanceTypeOffset));
+
+ __ And(dbg_temp, dbg_temp,
+ Operand(kStringRepresentationMask | kStringEncodingMask));
+ static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
+ static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
+ __ Cmp(dbg_temp, Operand(encoding == String::ONE_BYTE_ENCODING
+ ? one_byte_seq_type : two_byte_seq_type));
+ __ Check(eq, kUnexpectedStringType);
+ }
+
+ MemOperand operand =
+ BuildSeqStringOperand(string, temp, instr->index(), encoding);
+ if (encoding == String::ONE_BYTE_ENCODING) {
+ __ Ldrb(result, operand);
+ } else {
+ __ Ldrh(result, operand);
+ }
+}
+
+
+void LCodeGen::DoSeqStringSetChar(LSeqStringSetChar* instr) {
+ String::Encoding encoding = instr->hydrogen()->encoding();
+ Register string = ToRegister(instr->string());
+ Register value = ToRegister(instr->value());
+ Register temp = ToRegister(instr->temp());
+
+ if (FLAG_debug_code) {
+ ASSERT(ToRegister(instr->context()).is(cp));
+ Register index = ToRegister(instr->index());
+ static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
+ static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
+ int encoding_mask =
+ instr->hydrogen()->encoding() == String::ONE_BYTE_ENCODING
+ ? one_byte_seq_type : two_byte_seq_type;
+ __ EmitSeqStringSetCharCheck(string, index, kIndexIsInteger32, temp,
+ encoding_mask);
+ }
+ MemOperand operand =
+ BuildSeqStringOperand(string, temp, instr->index(), encoding);
+ if (encoding == String::ONE_BYTE_ENCODING) {
+ __ Strb(value, operand);
+ } else {
+ __ Strh(value, operand);
+ }
+}
+
+
+void LCodeGen::DoSmiTag(LSmiTag* instr) {
+ HChange* hchange = instr->hydrogen();
+ Register input = ToRegister(instr->value());
+ Register output = ToRegister(instr->result());
+ if (hchange->CheckFlag(HValue::kCanOverflow) &&
+ hchange->value()->CheckFlag(HValue::kUint32)) {
+ DeoptimizeIfNegative(input.W(), instr->environment());
+ }
+ __ SmiTag(output, input);
+}
+
+
+void LCodeGen::DoSmiUntag(LSmiUntag* instr) {
+ Register input = ToRegister(instr->value());
+ Register result = ToRegister(instr->result());
+ Label done, untag;
+
+ if (instr->needs_check()) {
+ DeoptimizeIfNotSmi(input, instr->environment());
+ }
+
+ __ Bind(&untag);
+ __ SmiUntag(result, input);
+ __ Bind(&done);
+}
+
+
+void LCodeGen::DoShiftI(LShiftI* instr) {
+ LOperand* right_op = instr->right();
+ Register left = ToRegister32(instr->left());
+ Register result = ToRegister32(instr->result());
+
+ if (right_op->IsRegister()) {
+ Register right = ToRegister32(instr->right());
+ switch (instr->op()) {
+ case Token::ROR: __ Ror(result, left, right); break;
+ case Token::SAR: __ Asr(result, left, right); break;
+ case Token::SHL: __ Lsl(result, left, right); break;
+ case Token::SHR:
+ if (instr->can_deopt()) {
+ Label right_not_zero;
+ __ Cbnz(right, &right_not_zero);
+ DeoptimizeIfNegative(left, instr->environment());
+ __ Bind(&right_not_zero);
+ }
+ __ Lsr(result, left, right);
+ break;
+ default: UNREACHABLE();
+ }
+ } else {
+ ASSERT(right_op->IsConstantOperand());
+ int shift_count = JSShiftAmountFromLConstant(right_op);
+ if (shift_count == 0) {
+ if ((instr->op() == Token::SHR) && instr->can_deopt()) {
+ DeoptimizeIfNegative(left, instr->environment());
+ }
+ __ Mov(result, left, kDiscardForSameWReg);
+ } else {
+ switch (instr->op()) {
+ case Token::ROR: __ Ror(result, left, shift_count); break;
+ case Token::SAR: __ Asr(result, left, shift_count); break;
+ case Token::SHL: __ Lsl(result, left, shift_count); break;
+ case Token::SHR: __ Lsr(result, left, shift_count); break;
+ default: UNREACHABLE();
+ }
+ }
+ }
+}
+
+
+void LCodeGen::DoShiftS(LShiftS* instr) {
+ LOperand* right_op = instr->right();
+ Register left = ToRegister(instr->left());
+ Register result = ToRegister(instr->result());
+
+ // Only ROR by register needs a temp.
+ ASSERT(((instr->op() == Token::ROR) && right_op->IsRegister()) ||
+ (instr->temp() == NULL));
+
+ if (right_op->IsRegister()) {
+ Register right = ToRegister(instr->right());
+ switch (instr->op()) {
+ case Token::ROR: {
+ Register temp = ToRegister(instr->temp());
+ __ Ubfx(temp, right, kSmiShift, 5);
+ __ SmiUntag(result, left);
+ __ Ror(result.W(), result.W(), temp.W());
+ __ SmiTag(result);
+ break;
+ }
+ case Token::SAR:
+ __ Ubfx(result, right, kSmiShift, 5);
+ __ Asr(result, left, result);
+ __ Bic(result, result, kSmiShiftMask);
+ break;
+ case Token::SHL:
+ __ Ubfx(result, right, kSmiShift, 5);
+ __ Lsl(result, left, result);
+ break;
+ case Token::SHR:
+ if (instr->can_deopt()) {
+ Label right_not_zero;
+ __ Cbnz(right, &right_not_zero);
+ DeoptimizeIfNegative(left, instr->environment());
+ __ Bind(&right_not_zero);
+ }
+ __ Ubfx(result, right, kSmiShift, 5);
+ __ Lsr(result, left, result);
+ __ Bic(result, result, kSmiShiftMask);
+ break;
+ default: UNREACHABLE();
+ }
+ } else {
+ ASSERT(right_op->IsConstantOperand());
+ int shift_count = JSShiftAmountFromLConstant(right_op);
+ if (shift_count == 0) {
+ if ((instr->op() == Token::SHR) && instr->can_deopt()) {
+ DeoptimizeIfNegative(left, instr->environment());
+ }
+ __ Mov(result, left);
+ } else {
+ switch (instr->op()) {
+ case Token::ROR:
+ __ SmiUntag(result, left);
+ __ Ror(result.W(), result.W(), shift_count);
+ __ SmiTag(result);
+ break;
+ case Token::SAR:
+ __ Asr(result, left, shift_count);
+ __ Bic(result, result, kSmiShiftMask);
+ break;
+ case Token::SHL:
+ __ Lsl(result, left, shift_count);
+ break;
+ case Token::SHR:
+ __ Lsr(result, left, shift_count);
+ __ Bic(result, result, kSmiShiftMask);
+ break;
+ default: UNREACHABLE();
+ }
+ }
+ }
+}
+
+
+void LCodeGen::DoDebugBreak(LDebugBreak* instr) {
+ __ Debug("LDebugBreak", 0, BREAK);
+}
+
+
+void LCodeGen::DoDeclareGlobals(LDeclareGlobals* instr) {
+ ASSERT(ToRegister(instr->context()).is(cp));
+ Register scratch1 = x5;
+ Register scratch2 = x6;
+ ASSERT(instr->IsMarkedAsCall());
+
+ ASM_UNIMPLEMENTED_BREAK("DoDeclareGlobals");
+ // TODO(all): if Mov could handle object in new space then it could be used
+ // here.
+ __ LoadHeapObject(scratch1, instr->hydrogen()->pairs());
+ __ Mov(scratch2, Smi::FromInt(instr->hydrogen()->flags()));
+ __ Push(cp, scratch1, scratch2); // The context is the first argument.
+ CallRuntime(Runtime::kHiddenDeclareGlobals, 3, instr);
+}
+
+
+void LCodeGen::DoDeferredStackCheck(LStackCheck* instr) {
+ PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
+ LoadContextFromDeferred(instr->context());
+ __ CallRuntimeSaveDoubles(Runtime::kHiddenStackGuard);
+ RecordSafepointWithLazyDeopt(
+ instr, RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
+ ASSERT(instr->HasEnvironment());
+ LEnvironment* env = instr->environment();
+ safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
+}
+
+
+void LCodeGen::DoStackCheck(LStackCheck* instr) {
+ class DeferredStackCheck: public LDeferredCode {
+ public:
+ DeferredStackCheck(LCodeGen* codegen, LStackCheck* instr)
+ : LDeferredCode(codegen), instr_(instr) { }
+ virtual void Generate() { codegen()->DoDeferredStackCheck(instr_); }
+ virtual LInstruction* instr() { return instr_; }
+ private:
+ LStackCheck* instr_;
+ };
+
+ ASSERT(instr->HasEnvironment());
+ LEnvironment* env = instr->environment();
+ // There is no LLazyBailout instruction for stack-checks. We have to
+ // prepare for lazy deoptimization explicitly here.
+ if (instr->hydrogen()->is_function_entry()) {
+ // Perform stack overflow check.
+ Label done;
+ __ CompareRoot(masm()->StackPointer(), Heap::kStackLimitRootIndex);
+ __ B(hs, &done);
+
+ PredictableCodeSizeScope predictable(masm_,
+ Assembler::kCallSizeWithRelocation);
+ ASSERT(instr->context()->IsRegister());
+ ASSERT(ToRegister(instr->context()).is(cp));
+ CallCode(isolate()->builtins()->StackCheck(),
+ RelocInfo::CODE_TARGET,
+ instr);
+ __ Bind(&done);
+ } else {
+ ASSERT(instr->hydrogen()->is_backwards_branch());
+ // Perform stack overflow check if this goto needs it before jumping.
+ DeferredStackCheck* deferred_stack_check =
+ new(zone()) DeferredStackCheck(this, instr);
+ __ CompareRoot(masm()->StackPointer(), Heap::kStackLimitRootIndex);
+ __ B(lo, deferred_stack_check->entry());
+
+ EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
+ __ Bind(instr->done_label());
+ deferred_stack_check->SetExit(instr->done_label());
+ RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
+ // Don't record a deoptimization index for the safepoint here.
+ // This will be done explicitly when emitting call and the safepoint in
+ // the deferred code.
+ }
+}
+
+
+void LCodeGen::DoStoreCodeEntry(LStoreCodeEntry* instr) {
+ Register function = ToRegister(instr->function());
+ Register code_object = ToRegister(instr->code_object());
+ Register temp = ToRegister(instr->temp());
+ __ Add(temp, code_object, Code::kHeaderSize - kHeapObjectTag);
+ __ Str(temp, FieldMemOperand(function, JSFunction::kCodeEntryOffset));
+}
+
+
+void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) {
+ Register context = ToRegister(instr->context());
+ Register value = ToRegister(instr->value());
+ Register scratch = ToRegister(instr->temp());
+ MemOperand target = ContextMemOperand(context, instr->slot_index());
+
+ Label skip_assignment;
+
+ if (instr->hydrogen()->RequiresHoleCheck()) {
+ __ Ldr(scratch, target);
+ if (instr->hydrogen()->DeoptimizesOnHole()) {
+ DeoptimizeIfRoot(scratch, Heap::kTheHoleValueRootIndex,
+ instr->environment());
+ } else {
+ __ JumpIfNotRoot(scratch, Heap::kTheHoleValueRootIndex, &skip_assignment);
+ }
+ }
+
+ __ Str(value, target);
+ if (instr->hydrogen()->NeedsWriteBarrier()) {
+ SmiCheck check_needed =
+ instr->hydrogen()->value()->type().IsHeapObject()
+ ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
+ __ RecordWriteContextSlot(context,
+ target.offset(),
+ value,
+ scratch,
+ GetLinkRegisterState(),
+ kSaveFPRegs,
+ EMIT_REMEMBERED_SET,
+ check_needed);
+ }
+ __ Bind(&skip_assignment);
+}
+
+
+void LCodeGen::DoStoreGlobalCell(LStoreGlobalCell* instr) {
+ Register value = ToRegister(instr->value());
+ Register cell = ToRegister(instr->temp1());
+
+ // Load the cell.
+ __ Mov(cell, Operand(instr->hydrogen()->cell().handle()));
+
+ // If the cell we are storing to contains the hole it could have
+ // been deleted from the property dictionary. In that case, we need
+ // to update the property details in the property dictionary to mark
+ // it as no longer deleted. We deoptimize in that case.
+ if (instr->hydrogen()->RequiresHoleCheck()) {
+ Register payload = ToRegister(instr->temp2());
+ __ Ldr(payload, FieldMemOperand(cell, Cell::kValueOffset));
+ DeoptimizeIfRoot(
+ payload, Heap::kTheHoleValueRootIndex, instr->environment());
+ }
+
+ // Store the value.
+ __ Str(value, FieldMemOperand(cell, Cell::kValueOffset));
+ // Cells are always rescanned, so no write barrier here.
+}
+
+
+void LCodeGen::DoStoreKeyedExternal(LStoreKeyedExternal* instr) {
+ Register ext_ptr = ToRegister(instr->elements());
+ Register key = no_reg;
+ Register scratch;
+ ElementsKind elements_kind = instr->elements_kind();
+
+ bool key_is_smi = instr->hydrogen()->key()->representation().IsSmi();
+ bool key_is_constant = instr->key()->IsConstantOperand();
+ int constant_key = 0;
+ if (key_is_constant) {
+ ASSERT(instr->temp() == NULL);
+ constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
+ if (constant_key & 0xf0000000) {
+ Abort(kArrayIndexConstantValueTooBig);
+ }
+ } else {
+ key = ToRegister(instr->key());
+ scratch = ToRegister(instr->temp());
+ }
+
+ MemOperand dst =
+ PrepareKeyedExternalArrayOperand(key, ext_ptr, scratch, key_is_smi,
+ key_is_constant, constant_key,
+ elements_kind,
+ instr->base_offset());
+
+ if ((elements_kind == EXTERNAL_FLOAT32_ELEMENTS) ||
+ (elements_kind == FLOAT32_ELEMENTS)) {
+ DoubleRegister value = ToDoubleRegister(instr->value());
+ DoubleRegister dbl_scratch = double_scratch();
+ __ Fcvt(dbl_scratch.S(), value);
+ __ Str(dbl_scratch.S(), dst);
+ } else if ((elements_kind == EXTERNAL_FLOAT64_ELEMENTS) ||
+ (elements_kind == FLOAT64_ELEMENTS)) {
+ DoubleRegister value = ToDoubleRegister(instr->value());
+ __ Str(value, dst);
+ } else {
+ Register value = ToRegister(instr->value());
+
+ switch (elements_kind) {
+ case EXTERNAL_UINT8_CLAMPED_ELEMENTS:
+ case EXTERNAL_INT8_ELEMENTS:
+ case EXTERNAL_UINT8_ELEMENTS:
+ case UINT8_ELEMENTS:
+ case UINT8_CLAMPED_ELEMENTS:
+ case INT8_ELEMENTS:
+ __ Strb(value, dst);
+ break;
+ case EXTERNAL_INT16_ELEMENTS:
+ case EXTERNAL_UINT16_ELEMENTS:
+ case INT16_ELEMENTS:
+ case UINT16_ELEMENTS:
+ __ Strh(value, dst);
+ break;
+ case EXTERNAL_INT32_ELEMENTS:
+ case EXTERNAL_UINT32_ELEMENTS:
+ case INT32_ELEMENTS:
+ case UINT32_ELEMENTS:
+ __ Str(value.W(), dst);
+ break;
+ case FLOAT32_ELEMENTS:
+ case FLOAT64_ELEMENTS:
+ case EXTERNAL_FLOAT32_ELEMENTS:
+ case EXTERNAL_FLOAT64_ELEMENTS:
+ case FAST_DOUBLE_ELEMENTS:
+ case FAST_ELEMENTS:
+ case FAST_SMI_ELEMENTS:
+ case FAST_HOLEY_DOUBLE_ELEMENTS:
+ case FAST_HOLEY_ELEMENTS:
+ case FAST_HOLEY_SMI_ELEMENTS:
+ case DICTIONARY_ELEMENTS:
+ case SLOPPY_ARGUMENTS_ELEMENTS:
+ UNREACHABLE();
+ break;
+ }
+ }
+}
+
+
+void LCodeGen::DoStoreKeyedFixedDouble(LStoreKeyedFixedDouble* instr) {
+ Register elements = ToRegister(instr->elements());
+ DoubleRegister value = ToDoubleRegister(instr->value());
+ MemOperand mem_op;
+
+ if (instr->key()->IsConstantOperand()) {
+ int constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
+ if (constant_key & 0xf0000000) {
+ Abort(kArrayIndexConstantValueTooBig);
+ }
+ int offset = instr->base_offset() + constant_key * kDoubleSize;
+ mem_op = MemOperand(elements, offset);
+ } else {
+ Register store_base = ToRegister(instr->temp());
+ Register key = ToRegister(instr->key());
+ bool key_is_tagged = instr->hydrogen()->key()->representation().IsSmi();
+ mem_op = PrepareKeyedArrayOperand(store_base, elements, key, key_is_tagged,
+ instr->hydrogen()->elements_kind(),
+ instr->hydrogen()->representation(),
+ instr->base_offset());
+ }
+
+ if (instr->NeedsCanonicalization()) {
+ __ CanonicalizeNaN(double_scratch(), value);
+ __ Str(double_scratch(), mem_op);
+ } else {
+ __ Str(value, mem_op);
+ }
+}
+
+
+void LCodeGen::DoStoreKeyedFixed(LStoreKeyedFixed* instr) {
+ Register value = ToRegister(instr->value());
+ Register elements = ToRegister(instr->elements());
+ Register scratch = no_reg;
+ Register store_base = no_reg;
+ Register key = no_reg;
+ MemOperand mem_op;
+
+ if (!instr->key()->IsConstantOperand() ||
+ instr->hydrogen()->NeedsWriteBarrier()) {
+ scratch = ToRegister(instr->temp());
+ }
+
+ Representation representation = instr->hydrogen()->value()->representation();
+ if (instr->key()->IsConstantOperand()) {
+ LConstantOperand* const_operand = LConstantOperand::cast(instr->key());
+ int offset = instr->base_offset() +
+ ToInteger32(const_operand) * kPointerSize;
+ store_base = elements;
+ if (representation.IsInteger32()) {
+ ASSERT(instr->hydrogen()->store_mode() == STORE_TO_INITIALIZED_ENTRY);
+ ASSERT(instr->hydrogen()->elements_kind() == FAST_SMI_ELEMENTS);
+ STATIC_ASSERT((kSmiValueSize == 32) && (kSmiShift == 32) &&
+ (kSmiTag == 0));
+ mem_op = UntagSmiMemOperand(store_base, offset);
+ } else {
+ mem_op = MemOperand(store_base, offset);
+ }
+ } else {
+ store_base = scratch;
+ key = ToRegister(instr->key());
+ bool key_is_tagged = instr->hydrogen()->key()->representation().IsSmi();
+
+ mem_op = PrepareKeyedArrayOperand(store_base, elements, key, key_is_tagged,
+ instr->hydrogen()->elements_kind(),
+ representation, instr->base_offset());
+ }
+
+ __ Store(value, mem_op, representation);
+
+ if (instr->hydrogen()->NeedsWriteBarrier()) {
+ ASSERT(representation.IsTagged());
+ // This assignment may cause element_addr to alias store_base.
+ Register element_addr = scratch;
+ SmiCheck check_needed =
+ instr->hydrogen()->value()->type().IsHeapObject()
+ ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
+ // Compute address of modified element and store it into key register.
+ __ Add(element_addr, mem_op.base(), mem_op.OffsetAsOperand());
+ __ RecordWrite(elements, element_addr, value, GetLinkRegisterState(),
+ kSaveFPRegs, EMIT_REMEMBERED_SET, check_needed,
+ instr->hydrogen()->PointersToHereCheckForValue());
+ }
+}
+
+
+void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) {
+ ASSERT(ToRegister(instr->context()).is(cp));
+ ASSERT(ToRegister(instr->object()).Is(x2));
+ ASSERT(ToRegister(instr->key()).Is(x1));
+ ASSERT(ToRegister(instr->value()).Is(x0));
+
+ Handle<Code> ic = instr->strict_mode() == STRICT
+ ? isolate()->builtins()->KeyedStoreIC_Initialize_Strict()
+ : isolate()->builtins()->KeyedStoreIC_Initialize();
+ CallCode(ic, RelocInfo::CODE_TARGET, instr);
+}
+
+
+void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
+ Representation representation = instr->representation();
+
+ Register object = ToRegister(instr->object());
+ HObjectAccess access = instr->hydrogen()->access();
+ int offset = access.offset();
+
+ if (access.IsExternalMemory()) {
+ ASSERT(!instr->hydrogen()->has_transition());
+ ASSERT(!instr->hydrogen()->NeedsWriteBarrier());
+ Register value = ToRegister(instr->value());
+ __ Store(value, MemOperand(object, offset), representation);
+ return;
+ }
+
+ __ AssertNotSmi(object);
+
+ if (representation.IsDouble()) {
+ ASSERT(access.IsInobject());
+ ASSERT(!instr->hydrogen()->has_transition());
+ ASSERT(!instr->hydrogen()->NeedsWriteBarrier());
+ FPRegister value = ToDoubleRegister(instr->value());
+ __ Str(value, FieldMemOperand(object, offset));
+ return;
+ }
+
+ Register value = ToRegister(instr->value());
+
+ ASSERT(!representation.IsSmi() ||
+ !instr->value()->IsConstantOperand() ||
+ IsInteger32Constant(LConstantOperand::cast(instr->value())));
+
+ if (instr->hydrogen()->has_transition()) {
+ Handle<Map> transition = instr->hydrogen()->transition_map();
+ AddDeprecationDependency(transition);
+ // Store the new map value.
+ Register new_map_value = ToRegister(instr->temp0());
+ __ Mov(new_map_value, Operand(transition));
+ __ Str(new_map_value, FieldMemOperand(object, HeapObject::kMapOffset));
+ if (instr->hydrogen()->NeedsWriteBarrierForMap()) {
+ // Update the write barrier for the map field.
+ __ RecordWriteForMap(object,
+ new_map_value,
+ ToRegister(instr->temp1()),
+ GetLinkRegisterState(),
+ kSaveFPRegs);
+ }
+ }
+
+ // Do the store.
+ Register destination;
+ if (access.IsInobject()) {
+ destination = object;
+ } else {
+ Register temp0 = ToRegister(instr->temp0());
+ __ Ldr(temp0, FieldMemOperand(object, JSObject::kPropertiesOffset));
+ destination = temp0;
+ }
+
+ if (representation.IsSmi() &&
+ instr->hydrogen()->value()->representation().IsInteger32()) {
+ ASSERT(instr->hydrogen()->store_mode() == STORE_TO_INITIALIZED_ENTRY);
+#ifdef DEBUG
+ Register temp0 = ToRegister(instr->temp0());
+ __ Ldr(temp0, FieldMemOperand(destination, offset));
+ __ AssertSmi(temp0);
+ // If destination aliased temp0, restore it to the address calculated
+ // earlier.
+ if (destination.Is(temp0)) {
+ ASSERT(!access.IsInobject());
+ __ Ldr(destination, FieldMemOperand(object, JSObject::kPropertiesOffset));
+ }
+#endif
+ STATIC_ASSERT(kSmiValueSize == 32 && kSmiShift == 32 && kSmiTag == 0);
+ __ Store(value, UntagSmiFieldMemOperand(destination, offset),
+ Representation::Integer32());
+ } else {
+ __ Store(value, FieldMemOperand(destination, offset), representation);
+ }
+ if (instr->hydrogen()->NeedsWriteBarrier()) {
+ __ RecordWriteField(destination,
+ offset,
+ value, // Clobbered.
+ ToRegister(instr->temp1()), // Clobbered.
+ GetLinkRegisterState(),
+ kSaveFPRegs,
+ EMIT_REMEMBERED_SET,
+ instr->hydrogen()->SmiCheckForWriteBarrier(),
+ instr->hydrogen()->PointersToHereCheckForValue());
+ }
+}
+
+
+void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) {
+ ASSERT(ToRegister(instr->context()).is(cp));
+ ASSERT(ToRegister(instr->value()).is(x0));
+ ASSERT(ToRegister(instr->object()).is(x1));
+
+ // Name must be in x2.
+ __ Mov(x2, Operand(instr->name()));
+ Handle<Code> ic = StoreIC::initialize_stub(isolate(), instr->strict_mode());
+ CallCode(ic, RelocInfo::CODE_TARGET, instr);
+}
+
+
+void LCodeGen::DoStringAdd(LStringAdd* instr) {
+ ASSERT(ToRegister(instr->context()).is(cp));
+ ASSERT(ToRegister(instr->left()).Is(x1));
+ ASSERT(ToRegister(instr->right()).Is(x0));
+ StringAddStub stub(isolate(),
+ instr->hydrogen()->flags(),
+ instr->hydrogen()->pretenure_flag());
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+}
+
+
+void LCodeGen::DoStringCharCodeAt(LStringCharCodeAt* instr) {
+ class DeferredStringCharCodeAt: public LDeferredCode {
+ public:
+ DeferredStringCharCodeAt(LCodeGen* codegen, LStringCharCodeAt* instr)
+ : LDeferredCode(codegen), instr_(instr) { }
+ virtual void Generate() { codegen()->DoDeferredStringCharCodeAt(instr_); }
+ virtual LInstruction* instr() { return instr_; }
+ private:
+ LStringCharCodeAt* instr_;
+ };
+
+ DeferredStringCharCodeAt* deferred =
+ new(zone()) DeferredStringCharCodeAt(this, instr);
+
+ StringCharLoadGenerator::Generate(masm(),
+ ToRegister(instr->string()),
+ ToRegister32(instr->index()),
+ ToRegister(instr->result()),
+ deferred->entry());
+ __ Bind(deferred->exit());
+}
+
+
+void LCodeGen::DoDeferredStringCharCodeAt(LStringCharCodeAt* instr) {
+ Register string = ToRegister(instr->string());
+ Register result = ToRegister(instr->result());
+
+ // TODO(3095996): Get rid of this. For now, we need to make the
+ // result register contain a valid pointer because it is already
+ // contained in the register pointer map.
+ __ Mov(result, 0);
+
+ PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
+ __ Push(string);
+ // Push the index as a smi. This is safe because of the checks in
+ // DoStringCharCodeAt above.
+ Register index = ToRegister(instr->index());
+ __ SmiTagAndPush(index);
+
+ CallRuntimeFromDeferred(Runtime::kHiddenStringCharCodeAt, 2, instr,
+ instr->context());
+ __ AssertSmi(x0);
+ __ SmiUntag(x0);
+ __ StoreToSafepointRegisterSlot(x0, result);
+}
+
+
+void LCodeGen::DoStringCharFromCode(LStringCharFromCode* instr) {
+ class DeferredStringCharFromCode: public LDeferredCode {
+ public:
+ DeferredStringCharFromCode(LCodeGen* codegen, LStringCharFromCode* instr)
+ : LDeferredCode(codegen), instr_(instr) { }
+ virtual void Generate() { codegen()->DoDeferredStringCharFromCode(instr_); }
+ virtual LInstruction* instr() { return instr_; }
+ private:
+ LStringCharFromCode* instr_;
+ };
+
+ DeferredStringCharFromCode* deferred =
+ new(zone()) DeferredStringCharFromCode(this, instr);
+
+ ASSERT(instr->hydrogen()->value()->representation().IsInteger32());
+ Register char_code = ToRegister32(instr->char_code());
+ Register result = ToRegister(instr->result());
+
+ __ Cmp(char_code, String::kMaxOneByteCharCode);
+ __ B(hi, deferred->entry());
+ __ LoadRoot(result, Heap::kSingleCharacterStringCacheRootIndex);
+ __ Add(result, result, FixedArray::kHeaderSize - kHeapObjectTag);
+ __ Ldr(result, MemOperand(result, char_code, SXTW, kPointerSizeLog2));
+ __ CompareRoot(result, Heap::kUndefinedValueRootIndex);
+ __ B(eq, deferred->entry());
+ __ Bind(deferred->exit());
+}
+
+
+void LCodeGen::DoDeferredStringCharFromCode(LStringCharFromCode* instr) {
+ Register char_code = ToRegister(instr->char_code());
+ Register result = ToRegister(instr->result());
+
+ // TODO(3095996): Get rid of this. For now, we need to make the
+ // result register contain a valid pointer because it is already
+ // contained in the register pointer map.
+ __ Mov(result, 0);
+
+ PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
+ __ SmiTagAndPush(char_code);
+ CallRuntimeFromDeferred(Runtime::kCharFromCode, 1, instr, instr->context());
+ __ StoreToSafepointRegisterSlot(x0, result);
+}
+
+
+void LCodeGen::DoStringCompareAndBranch(LStringCompareAndBranch* instr) {
+ ASSERT(ToRegister(instr->context()).is(cp));
+ Token::Value op = instr->op();
+
+ Handle<Code> ic = CompareIC::GetUninitialized(isolate(), op);
+ CallCode(ic, RelocInfo::CODE_TARGET, instr);
+ InlineSmiCheckInfo::EmitNotInlined(masm());
+
+ Condition condition = TokenToCondition(op, false);
+
+ EmitCompareAndBranch(instr, condition, x0, 0);
+}
+
+
+void LCodeGen::DoSubI(LSubI* instr) {
+ bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
+ Register result = ToRegister32(instr->result());
+ Register left = ToRegister32(instr->left());
+ Operand right = ToShiftedRightOperand32I(instr->right(), instr);
+
+ if (can_overflow) {
+ __ Subs(result, left, right);
+ DeoptimizeIf(vs, instr->environment());
+ } else {
+ __ Sub(result, left, right);
+ }
+}
+
+
+void LCodeGen::DoSubS(LSubS* instr) {
+ bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
+ Register result = ToRegister(instr->result());
+ Register left = ToRegister(instr->left());
+ Operand right = ToOperand(instr->right());
+ if (can_overflow) {
+ __ Subs(result, left, right);
+ DeoptimizeIf(vs, instr->environment());
+ } else {
+ __ Sub(result, left, right);
+ }
+}
+
+
+void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr,
+ LOperand* value,
+ LOperand* temp1,
+ LOperand* temp2) {
+ Register input = ToRegister(value);
+ Register scratch1 = ToRegister(temp1);
+ DoubleRegister dbl_scratch1 = double_scratch();
+
+ Label done;
+
+ // Load heap object map.
+ __ Ldr(scratch1, FieldMemOperand(input, HeapObject::kMapOffset));
+
+ if (instr->truncating()) {
+ Register output = ToRegister(instr->result());
+ Label check_bools;
+
+ // If it's not a heap number, jump to undefined check.
+ __ JumpIfNotRoot(scratch1, Heap::kHeapNumberMapRootIndex, &check_bools);
+
+ // A heap number: load value and convert to int32 using truncating function.
+ __ TruncateHeapNumberToI(output, input);
+ __ B(&done);
+
+ __ Bind(&check_bools);
+
+ Register true_root = output;
+ Register false_root = scratch1;
+ __ LoadTrueFalseRoots(true_root, false_root);
+ __ Cmp(input, true_root);
+ __ Cset(output, eq);
+ __ Ccmp(input, false_root, ZFlag, ne);
+ __ B(eq, &done);
+
+ // Output contains zero, undefined is converted to zero for truncating
+ // conversions.
+ DeoptimizeIfNotRoot(input, Heap::kUndefinedValueRootIndex,
+ instr->environment());
+ } else {
+ Register output = ToRegister32(instr->result());
+
+ DoubleRegister dbl_scratch2 = ToDoubleRegister(temp2);
+
+ // Deoptimized if it's not a heap number.
+ DeoptimizeIfNotRoot(scratch1, Heap::kHeapNumberMapRootIndex,
+ instr->environment());
+
+ // A heap number: load value and convert to int32 using non-truncating
+ // function. If the result is out of range, branch to deoptimize.
+ __ Ldr(dbl_scratch1, FieldMemOperand(input, HeapNumber::kValueOffset));
+ __ TryRepresentDoubleAsInt32(output, dbl_scratch1, dbl_scratch2);
+ DeoptimizeIf(ne, instr->environment());
+
+ if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ __ Cmp(output, 0);
+ __ B(ne, &done);
+ __ Fmov(scratch1, dbl_scratch1);
+ DeoptimizeIfNegative(scratch1, instr->environment());
+ }
+ }
+ __ Bind(&done);
+}
+
+
+void LCodeGen::DoTaggedToI(LTaggedToI* instr) {
+ class DeferredTaggedToI: public LDeferredCode {
+ public:
+ DeferredTaggedToI(LCodeGen* codegen, LTaggedToI* instr)
+ : LDeferredCode(codegen), instr_(instr) { }
+ virtual void Generate() {
+ codegen()->DoDeferredTaggedToI(instr_, instr_->value(), instr_->temp1(),
+ instr_->temp2());
+ }
+
+ virtual LInstruction* instr() { return instr_; }
+ private:
+ LTaggedToI* instr_;
+ };
+
+ Register input = ToRegister(instr->value());
+ Register output = ToRegister(instr->result());
+
+ if (instr->hydrogen()->value()->representation().IsSmi()) {
+ __ SmiUntag(output, input);
+ } else {
+ DeferredTaggedToI* deferred = new(zone()) DeferredTaggedToI(this, instr);
+
+ __ JumpIfNotSmi(input, deferred->entry());
+ __ SmiUntag(output, input);
+ __ Bind(deferred->exit());
+ }
+}
+
+
+void LCodeGen::DoThisFunction(LThisFunction* instr) {
+ Register result = ToRegister(instr->result());
+ __ Ldr(result, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+}
+
+
+void LCodeGen::DoToFastProperties(LToFastProperties* instr) {
+ ASSERT(ToRegister(instr->value()).Is(x0));
+ ASSERT(ToRegister(instr->result()).Is(x0));
+ __ Push(x0);
+ CallRuntime(Runtime::kToFastProperties, 1, instr);
+}
+
+
+void LCodeGen::DoRegExpLiteral(LRegExpLiteral* instr) {
+ ASSERT(ToRegister(instr->context()).is(cp));
+ Label materialized;
+ // Registers will be used as follows:
+ // x7 = literals array.
+ // x1 = regexp literal.
+ // x0 = regexp literal clone.
+ // x10-x12 are used as temporaries.
+ int literal_offset =
+ FixedArray::OffsetOfElementAt(instr->hydrogen()->literal_index());
+ __ LoadObject(x7, instr->hydrogen()->literals());
+ __ Ldr(x1, FieldMemOperand(x7, literal_offset));
+ __ JumpIfNotRoot(x1, Heap::kUndefinedValueRootIndex, &materialized);
+
+ // Create regexp literal using runtime function
+ // Result will be in x0.
+ __ Mov(x12, Operand(Smi::FromInt(instr->hydrogen()->literal_index())));
+ __ Mov(x11, Operand(instr->hydrogen()->pattern()));
+ __ Mov(x10, Operand(instr->hydrogen()->flags()));
+ __ Push(x7, x12, x11, x10);
+ CallRuntime(Runtime::kHiddenMaterializeRegExpLiteral, 4, instr);
+ __ Mov(x1, x0);
+
+ __ Bind(&materialized);
+ int size = JSRegExp::kSize + JSRegExp::kInObjectFieldCount * kPointerSize;
+ Label allocated, runtime_allocate;
+
+ __ Allocate(size, x0, x10, x11, &runtime_allocate, TAG_OBJECT);
+ __ B(&allocated);
+
+ __ Bind(&runtime_allocate);
+ __ Mov(x0, Smi::FromInt(size));
+ __ Push(x1, x0);
+ CallRuntime(Runtime::kHiddenAllocateInNewSpace, 1, instr);
+ __ Pop(x1);
+
+ __ Bind(&allocated);
+ // Copy the content into the newly allocated memory.
+ __ CopyFields(x0, x1, CPURegList(x10, x11, x12), size / kPointerSize);
+}
+
+
+void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) {
+ Register object = ToRegister(instr->object());
+
+ Handle<Map> from_map = instr->original_map();
+ Handle<Map> to_map = instr->transitioned_map();
+ ElementsKind from_kind = instr->from_kind();
+ ElementsKind to_kind = instr->to_kind();
+
+ Label not_applicable;
+
+ if (IsSimpleMapChangeTransition(from_kind, to_kind)) {
+ Register temp1 = ToRegister(instr->temp1());
+ Register new_map = ToRegister(instr->temp2());
+ __ CheckMap(object, temp1, from_map, &not_applicable, DONT_DO_SMI_CHECK);
+ __ Mov(new_map, Operand(to_map));
+ __ Str(new_map, FieldMemOperand(object, HeapObject::kMapOffset));
+ // Write barrier.
+ __ RecordWriteForMap(object, new_map, temp1, GetLinkRegisterState(),
+ kDontSaveFPRegs);
+ } else {
+ {
+ UseScratchRegisterScope temps(masm());
+ // Use the temp register only in a restricted scope - the codegen checks
+ // that we do not use any register across a call.
+ __ CheckMap(object, temps.AcquireX(), from_map, &not_applicable,
+ DONT_DO_SMI_CHECK);
+ }
+ ASSERT(object.is(x0));
+ ASSERT(ToRegister(instr->context()).is(cp));
+ PushSafepointRegistersScope scope(
+ this, Safepoint::kWithRegistersAndDoubles);
+ __ Mov(x1, Operand(to_map));
+ bool is_js_array = from_map->instance_type() == JS_ARRAY_TYPE;
+ TransitionElementsKindStub stub(isolate(), from_kind, to_kind, is_js_array);
+ __ CallStub(&stub);
+ RecordSafepointWithRegistersAndDoubles(
+ instr->pointer_map(), 0, Safepoint::kLazyDeopt);
+ }
+ __ Bind(&not_applicable);
+}
+
+
+void LCodeGen::DoTrapAllocationMemento(LTrapAllocationMemento* instr) {
+ Register object = ToRegister(instr->object());
+ Register temp1 = ToRegister(instr->temp1());
+ Register temp2 = ToRegister(instr->temp2());
+
+ Label no_memento_found;
+ __ TestJSArrayForAllocationMemento(object, temp1, temp2, &no_memento_found);
+ DeoptimizeIf(eq, instr->environment());
+ __ Bind(&no_memento_found);
+}
+
+
+void LCodeGen::DoTruncateDoubleToIntOrSmi(LTruncateDoubleToIntOrSmi* instr) {
+ DoubleRegister input = ToDoubleRegister(instr->value());
+ Register result = ToRegister(instr->result());
+ __ TruncateDoubleToI(result, input);
+ if (instr->tag_result()) {
+ __ SmiTag(result, result);
+ }
+}
+
+
+void LCodeGen::DoTypeof(LTypeof* instr) {
+ Register input = ToRegister(instr->value());
+ __ Push(input);
+ CallRuntime(Runtime::kTypeof, 1, instr);
+}
+
+
+void LCodeGen::DoTypeofIsAndBranch(LTypeofIsAndBranch* instr) {
+ Handle<String> type_name = instr->type_literal();
+ Label* true_label = instr->TrueLabel(chunk_);
+ Label* false_label = instr->FalseLabel(chunk_);
+ Register value = ToRegister(instr->value());
+
+ Factory* factory = isolate()->factory();
+ if (String::Equals(type_name, factory->number_string())) {
+ ASSERT(instr->temp1() != NULL);
+ Register map = ToRegister(instr->temp1());
+
+ __ JumpIfSmi(value, true_label);
+ __ Ldr(map, FieldMemOperand(value, HeapObject::kMapOffset));
+ __ CompareRoot(map, Heap::kHeapNumberMapRootIndex);
+ EmitBranch(instr, eq);
+
+ } else if (String::Equals(type_name, factory->string_string())) {
+ ASSERT((instr->temp1() != NULL) && (instr->temp2() != NULL));
+ Register map = ToRegister(instr->temp1());
+ Register scratch = ToRegister(instr->temp2());
+
+ __ JumpIfSmi(value, false_label);
+ __ JumpIfObjectType(
+ value, map, scratch, FIRST_NONSTRING_TYPE, false_label, ge);
+ __ Ldrb(scratch, FieldMemOperand(map, Map::kBitFieldOffset));
+ EmitTestAndBranch(instr, eq, scratch, 1 << Map::kIsUndetectable);
+
+ } else if (String::Equals(type_name, factory->symbol_string())) {
+ ASSERT((instr->temp1() != NULL) && (instr->temp2() != NULL));
+ Register map = ToRegister(instr->temp1());
+ Register scratch = ToRegister(instr->temp2());
+
+ __ JumpIfSmi(value, false_label);
+ __ CompareObjectType(value, map, scratch, SYMBOL_TYPE);
+ EmitBranch(instr, eq);
+
+ } else if (String::Equals(type_name, factory->boolean_string())) {
+ __ JumpIfRoot(value, Heap::kTrueValueRootIndex, true_label);
+ __ CompareRoot(value, Heap::kFalseValueRootIndex);
+ EmitBranch(instr, eq);
+
+ } else if (FLAG_harmony_typeof &&
+ String::Equals(type_name, factory->null_string())) {
+ __ CompareRoot(value, Heap::kNullValueRootIndex);
+ EmitBranch(instr, eq);
+
+ } else if (String::Equals(type_name, factory->undefined_string())) {
+ ASSERT(instr->temp1() != NULL);
+ Register scratch = ToRegister(instr->temp1());
+
+ __ JumpIfRoot(value, Heap::kUndefinedValueRootIndex, true_label);
+ __ JumpIfSmi(value, false_label);
+ // Check for undetectable objects and jump to the true branch in this case.
+ __ Ldr(scratch, FieldMemOperand(value, HeapObject::kMapOffset));
+ __ Ldrb(scratch, FieldMemOperand(scratch, Map::kBitFieldOffset));
+ EmitTestAndBranch(instr, ne, scratch, 1 << Map::kIsUndetectable);
+
+ } else if (String::Equals(type_name, factory->function_string())) {
+ STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
+ ASSERT(instr->temp1() != NULL);
+ Register type = ToRegister(instr->temp1());
+
+ __ JumpIfSmi(value, false_label);
+ __ JumpIfObjectType(value, type, type, JS_FUNCTION_TYPE, true_label);
+ // HeapObject's type has been loaded into type register by JumpIfObjectType.
+ EmitCompareAndBranch(instr, eq, type, JS_FUNCTION_PROXY_TYPE);
+
+ } else if (String::Equals(type_name, factory->object_string())) {
+ ASSERT((instr->temp1() != NULL) && (instr->temp2() != NULL));
+ Register map = ToRegister(instr->temp1());
+ Register scratch = ToRegister(instr->temp2());
+
+ __ JumpIfSmi(value, false_label);
+ if (!FLAG_harmony_typeof) {
+ __ JumpIfRoot(value, Heap::kNullValueRootIndex, true_label);
+ }
+ __ JumpIfObjectType(value, map, scratch,
+ FIRST_NONCALLABLE_SPEC_OBJECT_TYPE, false_label, lt);
+ __ CompareInstanceType(map, scratch, LAST_NONCALLABLE_SPEC_OBJECT_TYPE);
+ __ B(gt, false_label);
+ // Check for undetectable objects => false.
+ __ Ldrb(scratch, FieldMemOperand(map, Map::kBitFieldOffset));
+ EmitTestAndBranch(instr, eq, scratch, 1 << Map::kIsUndetectable);
+
+ } else {
+ __ B(false_label);
+ }
+}
+
+
+void LCodeGen::DoUint32ToDouble(LUint32ToDouble* instr) {
+ __ Ucvtf(ToDoubleRegister(instr->result()), ToRegister32(instr->value()));
+}
+
+
+void LCodeGen::DoCheckMapValue(LCheckMapValue* instr) {
+ Register object = ToRegister(instr->value());
+ Register map = ToRegister(instr->map());
+ Register temp = ToRegister(instr->temp());
+ __ Ldr(temp, FieldMemOperand(object, HeapObject::kMapOffset));
+ __ Cmp(map, temp);
+ DeoptimizeIf(ne, instr->environment());
+}
+
+
+void LCodeGen::DoWrapReceiver(LWrapReceiver* instr) {
+ Register receiver = ToRegister(instr->receiver());
+ Register function = ToRegister(instr->function());
+ Register result = ToRegister(instr->result());
+
+ // If the receiver is null or undefined, we have to pass the global object as
+ // a receiver to normal functions. Values have to be passed unchanged to
+ // builtins and strict-mode functions.
+ Label global_object, done, copy_receiver;
+
+ if (!instr->hydrogen()->known_function()) {
+ __ Ldr(result, FieldMemOperand(function,
+ JSFunction::kSharedFunctionInfoOffset));
+
+ // CompilerHints is an int32 field. See objects.h.
+ __ Ldr(result.W(),
+ FieldMemOperand(result, SharedFunctionInfo::kCompilerHintsOffset));
+
+ // Do not transform the receiver to object for strict mode functions.
+ __ Tbnz(result, SharedFunctionInfo::kStrictModeFunction, &copy_receiver);
+
+ // Do not transform the receiver to object for builtins.
+ __ Tbnz(result, SharedFunctionInfo::kNative, &copy_receiver);
+ }
+
+ // Normal function. Replace undefined or null with global receiver.
+ __ JumpIfRoot(receiver, Heap::kNullValueRootIndex, &global_object);
+ __ JumpIfRoot(receiver, Heap::kUndefinedValueRootIndex, &global_object);
+
+ // Deoptimize if the receiver is not a JS object.
+ DeoptimizeIfSmi(receiver, instr->environment());
+ __ CompareObjectType(receiver, result, result, FIRST_SPEC_OBJECT_TYPE);
+ __ B(ge, &copy_receiver);
+ Deoptimize(instr->environment());
+
+ __ Bind(&global_object);
+ __ Ldr(result, FieldMemOperand(function, JSFunction::kContextOffset));
+ __ Ldr(result, ContextMemOperand(result, Context::GLOBAL_OBJECT_INDEX));
+ __ Ldr(result, FieldMemOperand(result, GlobalObject::kGlobalReceiverOffset));
+ __ B(&done);
+
+ __ Bind(&copy_receiver);
+ __ Mov(result, receiver);
+ __ Bind(&done);
+}
+
+
+void LCodeGen::DoDeferredLoadMutableDouble(LLoadFieldByIndex* instr,
+ Register result,
+ Register object,
+ Register index) {
+ PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
+ __ Push(object);
+ __ Push(index);
+ __ Mov(cp, 0);
+ __ CallRuntimeSaveDoubles(Runtime::kLoadMutableDouble);
+ RecordSafepointWithRegisters(
+ instr->pointer_map(), 2, Safepoint::kNoLazyDeopt);
+ __ StoreToSafepointRegisterSlot(x0, result);
+}
+
+
+void LCodeGen::DoLoadFieldByIndex(LLoadFieldByIndex* instr) {
+ class DeferredLoadMutableDouble V8_FINAL : public LDeferredCode {
+ public:
+ DeferredLoadMutableDouble(LCodeGen* codegen,
+ LLoadFieldByIndex* instr,
+ Register result,
+ Register object,
+ Register index)
+ : LDeferredCode(codegen),
+ instr_(instr),
+ result_(result),
+ object_(object),
+ index_(index) {
+ }
+ virtual void Generate() V8_OVERRIDE {
+ codegen()->DoDeferredLoadMutableDouble(instr_, result_, object_, index_);
+ }
+ virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
+ private:
+ LLoadFieldByIndex* instr_;
+ Register result_;
+ Register object_;
+ Register index_;
+ };
+ Register object = ToRegister(instr->object());
+ Register index = ToRegister(instr->index());
+ Register result = ToRegister(instr->result());
+
+ __ AssertSmi(index);
+
+ DeferredLoadMutableDouble* deferred;
+ deferred = new(zone()) DeferredLoadMutableDouble(
+ this, instr, result, object, index);
+
+ Label out_of_object, done;
+
+ __ TestAndBranchIfAnySet(
+ index, reinterpret_cast<uint64_t>(Smi::FromInt(1)), deferred->entry());
+ __ Mov(index, Operand(index, ASR, 1));
+
+ __ Cmp(index, Smi::FromInt(0));
+ __ B(lt, &out_of_object);
+
+ STATIC_ASSERT(kPointerSizeLog2 > kSmiTagSize);
+ __ Add(result, object, Operand::UntagSmiAndScale(index, kPointerSizeLog2));
+ __ Ldr(result, FieldMemOperand(result, JSObject::kHeaderSize));
+
+ __ B(&done);
+
+ __ Bind(&out_of_object);
+ __ Ldr(result, FieldMemOperand(object, JSObject::kPropertiesOffset));
+ // Index is equal to negated out of object property index plus 1.
+ __ Sub(result, result, Operand::UntagSmiAndScale(index, kPointerSizeLog2));
+ __ Ldr(result, FieldMemOperand(result,
+ FixedArray::kHeaderSize - kPointerSize));
+ __ Bind(deferred->exit());
+ __ Bind(&done);
+}
+
+
+void LCodeGen::DoStoreFrameContext(LStoreFrameContext* instr) {
+ Register context = ToRegister(instr->context());
+ __ Str(context, MemOperand(fp, StandardFrameConstants::kContextOffset));
+}
+
+
+void LCodeGen::DoAllocateBlockContext(LAllocateBlockContext* instr) {
+ Handle<ScopeInfo> scope_info = instr->scope_info();
+ __ Push(scope_info);
+ __ Push(ToRegister(instr->function()));
+ CallRuntime(Runtime::kHiddenPushBlockContext, 2, instr);
+ RecordSafepoint(Safepoint::kNoLazyDeopt);
+}
+
+
+
+} } // namespace v8::internal
diff --git a/chromium/v8/src/arm64/lithium-codegen-arm64.h b/chromium/v8/src/arm64/lithium-codegen-arm64.h
new file mode 100644
index 00000000000..43cf13f9fe9
--- /dev/null
+++ b/chromium/v8/src/arm64/lithium-codegen-arm64.h
@@ -0,0 +1,506 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_ARM64_LITHIUM_CODEGEN_ARM64_H_
+#define V8_ARM64_LITHIUM_CODEGEN_ARM64_H_
+
+#include "src/arm64/lithium-arm64.h"
+
+#include "src/arm64/lithium-gap-resolver-arm64.h"
+#include "src/deoptimizer.h"
+#include "src/lithium-codegen.h"
+#include "src/safepoint-table.h"
+#include "src/scopes.h"
+#include "src/utils.h"
+
+namespace v8 {
+namespace internal {
+
+// Forward declarations.
+class LDeferredCode;
+class SafepointGenerator;
+class BranchGenerator;
+
+class LCodeGen: public LCodeGenBase {
+ public:
+ LCodeGen(LChunk* chunk, MacroAssembler* assembler, CompilationInfo* info)
+ : LCodeGenBase(chunk, assembler, info),
+ deoptimizations_(4, info->zone()),
+ deopt_jump_table_(4, info->zone()),
+ deoptimization_literals_(8, info->zone()),
+ inlined_function_count_(0),
+ scope_(info->scope()),
+ translations_(info->zone()),
+ deferred_(8, info->zone()),
+ osr_pc_offset_(-1),
+ frame_is_built_(false),
+ safepoints_(info->zone()),
+ resolver_(this),
+ expected_safepoint_kind_(Safepoint::kSimple),
+ after_push_argument_(false),
+ inlined_arguments_(false) {
+ PopulateDeoptimizationLiteralsWithInlinedFunctions();
+ }
+
+ ~LCodeGen() {
+ ASSERT(!after_push_argument_ || inlined_arguments_);
+ }
+
+ // Simple accessors.
+ Scope* scope() const { return scope_; }
+
+ int LookupDestination(int block_id) const {
+ return chunk()->LookupDestination(block_id);
+ }
+
+ bool IsNextEmittedBlock(int block_id) const {
+ return LookupDestination(block_id) == GetNextEmittedBlock();
+ }
+
+ bool NeedsEagerFrame() const {
+ return GetStackSlotCount() > 0 ||
+ info()->is_non_deferred_calling() ||
+ !info()->IsStub() ||
+ info()->requires_frame();
+ }
+ bool NeedsDeferredFrame() const {
+ return !NeedsEagerFrame() && info()->is_deferred_calling();
+ }
+
+ LinkRegisterStatus GetLinkRegisterState() const {
+ return frame_is_built_ ? kLRHasBeenSaved : kLRHasNotBeenSaved;
+ }
+
+ // Try to generate code for the entire chunk, but it may fail if the
+ // chunk contains constructs we cannot handle. Returns true if the
+ // code generation attempt succeeded.
+ bool GenerateCode();
+
+ // Finish the code by setting stack height, safepoint, and bailout
+ // information on it.
+ void FinishCode(Handle<Code> code);
+
+ enum IntegerSignedness { SIGNED_INT32, UNSIGNED_INT32 };
+ // Support for converting LOperands to assembler types.
+ // LOperand must be a register.
+ Register ToRegister(LOperand* op) const;
+ Register ToRegister32(LOperand* op) const;
+ Operand ToOperand(LOperand* op);
+ Operand ToOperand32I(LOperand* op);
+ Operand ToOperand32U(LOperand* op);
+ enum StackMode { kMustUseFramePointer, kCanUseStackPointer };
+ MemOperand ToMemOperand(LOperand* op,
+ StackMode stack_mode = kCanUseStackPointer) const;
+ Handle<Object> ToHandle(LConstantOperand* op) const;
+
+ template<class LI>
+ Operand ToShiftedRightOperand32I(LOperand* right,
+ LI* shift_info) {
+ return ToShiftedRightOperand32(right, shift_info, SIGNED_INT32);
+ }
+ template<class LI>
+ Operand ToShiftedRightOperand32U(LOperand* right,
+ LI* shift_info) {
+ return ToShiftedRightOperand32(right, shift_info, UNSIGNED_INT32);
+ }
+ template<class LI>
+ Operand ToShiftedRightOperand32(LOperand* right,
+ LI* shift_info,
+ IntegerSignedness signedness);
+
+ int JSShiftAmountFromLConstant(LOperand* constant) {
+ return ToInteger32(LConstantOperand::cast(constant)) & 0x1f;
+ }
+
+ // TODO(jbramley): Examine these helpers and check that they make sense.
+ // IsInteger32Constant returns true for smi constants, for example.
+ bool IsInteger32Constant(LConstantOperand* op) const;
+ bool IsSmi(LConstantOperand* op) const;
+
+ int32_t ToInteger32(LConstantOperand* op) const;
+ Smi* ToSmi(LConstantOperand* op) const;
+ double ToDouble(LConstantOperand* op) const;
+ DoubleRegister ToDoubleRegister(LOperand* op) const;
+
+ // Declare methods that deal with the individual node types.
+#define DECLARE_DO(type) void Do##type(L##type* node);
+ LITHIUM_CONCRETE_INSTRUCTION_LIST(DECLARE_DO)
+#undef DECLARE_DO
+
+ private:
+ // Return a double scratch register which can be used locally
+ // when generating code for a lithium instruction.
+ DoubleRegister double_scratch() { return crankshaft_fp_scratch; }
+
+ // Deferred code support.
+ void DoDeferredNumberTagD(LNumberTagD* instr);
+ void DoDeferredStackCheck(LStackCheck* instr);
+ void DoDeferredStringCharCodeAt(LStringCharCodeAt* instr);
+ void DoDeferredStringCharFromCode(LStringCharFromCode* instr);
+ void DoDeferredMathAbsTagged(LMathAbsTagged* instr,
+ Label* exit,
+ Label* allocation_entry);
+
+ void DoDeferredNumberTagU(LInstruction* instr,
+ LOperand* value,
+ LOperand* temp1,
+ LOperand* temp2);
+ void DoDeferredTaggedToI(LTaggedToI* instr,
+ LOperand* value,
+ LOperand* temp1,
+ LOperand* temp2);
+ void DoDeferredAllocate(LAllocate* instr);
+ void DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr);
+ void DoDeferredInstanceMigration(LCheckMaps* instr, Register object);
+ void DoDeferredLoadMutableDouble(LLoadFieldByIndex* instr,
+ Register result,
+ Register object,
+ Register index);
+
+ Operand ToOperand32(LOperand* op, IntegerSignedness signedness);
+
+ static Condition TokenToCondition(Token::Value op, bool is_unsigned);
+ void EmitGoto(int block);
+ void DoGap(LGap* instr);
+
+ // Generic version of EmitBranch. It contains some code to avoid emitting a
+ // branch on the next emitted basic block where we could just fall-through.
+ // You shouldn't use that directly but rather consider one of the helper like
+ // LCodeGen::EmitBranch, LCodeGen::EmitCompareAndBranch...
+ template<class InstrType>
+ void EmitBranchGeneric(InstrType instr,
+ const BranchGenerator& branch);
+
+ template<class InstrType>
+ void EmitBranch(InstrType instr, Condition condition);
+
+ template<class InstrType>
+ void EmitCompareAndBranch(InstrType instr,
+ Condition condition,
+ const Register& lhs,
+ const Operand& rhs);
+
+ template<class InstrType>
+ void EmitTestAndBranch(InstrType instr,
+ Condition condition,
+ const Register& value,
+ uint64_t mask);
+
+ template<class InstrType>
+ void EmitBranchIfNonZeroNumber(InstrType instr,
+ const FPRegister& value,
+ const FPRegister& scratch);
+
+ template<class InstrType>
+ void EmitBranchIfHeapNumber(InstrType instr,
+ const Register& value);
+
+ template<class InstrType>
+ void EmitBranchIfRoot(InstrType instr,
+ const Register& value,
+ Heap::RootListIndex index);
+
+ // Emits optimized code to deep-copy the contents of statically known object
+ // graphs (e.g. object literal boilerplate). Expects a pointer to the
+ // allocated destination object in the result register, and a pointer to the
+ // source object in the source register.
+ void EmitDeepCopy(Handle<JSObject> object,
+ Register result,
+ Register source,
+ Register scratch,
+ int* offset,
+ AllocationSiteMode mode);
+
+ // Emits optimized code for %_IsString(x). Preserves input register.
+ // Returns the condition on which a final split to
+ // true and false label should be made, to optimize fallthrough.
+ Condition EmitIsString(Register input, Register temp1, Label* is_not_string,
+ SmiCheck check_needed);
+
+ int DefineDeoptimizationLiteral(Handle<Object> literal);
+ void PopulateDeoptimizationData(Handle<Code> code);
+ void PopulateDeoptimizationLiteralsWithInlinedFunctions();
+
+ MemOperand BuildSeqStringOperand(Register string,
+ Register temp,
+ LOperand* index,
+ String::Encoding encoding);
+ void DeoptimizeBranch(
+ LEnvironment* environment,
+ BranchType branch_type, Register reg = NoReg, int bit = -1,
+ Deoptimizer::BailoutType* override_bailout_type = NULL);
+ void Deoptimize(LEnvironment* environment,
+ Deoptimizer::BailoutType* override_bailout_type = NULL);
+ void DeoptimizeIf(Condition cond, LEnvironment* environment);
+ void DeoptimizeIfZero(Register rt, LEnvironment* environment);
+ void DeoptimizeIfNotZero(Register rt, LEnvironment* environment);
+ void DeoptimizeIfNegative(Register rt, LEnvironment* environment);
+ void DeoptimizeIfSmi(Register rt, LEnvironment* environment);
+ void DeoptimizeIfNotSmi(Register rt, LEnvironment* environment);
+ void DeoptimizeIfRoot(Register rt,
+ Heap::RootListIndex index,
+ LEnvironment* environment);
+ void DeoptimizeIfNotRoot(Register rt,
+ Heap::RootListIndex index,
+ LEnvironment* environment);
+ void DeoptimizeIfMinusZero(DoubleRegister input, LEnvironment* environment);
+ void DeoptimizeIfBitSet(Register rt, int bit, LEnvironment* environment);
+ void DeoptimizeIfBitClear(Register rt, int bit, LEnvironment* environment);
+
+ MemOperand PrepareKeyedExternalArrayOperand(Register key,
+ Register base,
+ Register scratch,
+ bool key_is_smi,
+ bool key_is_constant,
+ int constant_key,
+ ElementsKind elements_kind,
+ int base_offset);
+ MemOperand PrepareKeyedArrayOperand(Register base,
+ Register elements,
+ Register key,
+ bool key_is_tagged,
+ ElementsKind elements_kind,
+ Representation representation,
+ int base_offset);
+
+ void RegisterEnvironmentForDeoptimization(LEnvironment* environment,
+ Safepoint::DeoptMode mode);
+
+ int GetStackSlotCount() const { return chunk()->spill_slot_count(); }
+
+ void AddDeferredCode(LDeferredCode* code) { deferred_.Add(code, zone()); }
+
+ // Emit frame translation commands for an environment.
+ void WriteTranslation(LEnvironment* environment, Translation* translation);
+
+ void AddToTranslation(LEnvironment* environment,
+ Translation* translation,
+ LOperand* op,
+ bool is_tagged,
+ bool is_uint32,
+ int* object_index_pointer,
+ int* dematerialized_index_pointer);
+
+ void SaveCallerDoubles();
+ void RestoreCallerDoubles();
+
+ // Code generation steps. Returns true if code generation should continue.
+ void GenerateBodyInstructionPre(LInstruction* instr) V8_OVERRIDE;
+ bool GeneratePrologue();
+ bool GenerateDeferredCode();
+ bool GenerateDeoptJumpTable();
+ bool GenerateSafepointTable();
+
+ // Generates the custom OSR entrypoint and sets the osr_pc_offset.
+ void GenerateOsrPrologue();
+
+ enum SafepointMode {
+ RECORD_SIMPLE_SAFEPOINT,
+ RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS
+ };
+
+ void CallCode(Handle<Code> code,
+ RelocInfo::Mode mode,
+ LInstruction* instr);
+
+ void CallCodeGeneric(Handle<Code> code,
+ RelocInfo::Mode mode,
+ LInstruction* instr,
+ SafepointMode safepoint_mode);
+
+ void CallRuntime(const Runtime::Function* function,
+ int num_arguments,
+ LInstruction* instr,
+ SaveFPRegsMode save_doubles = kDontSaveFPRegs);
+
+ void CallRuntime(Runtime::FunctionId id,
+ int num_arguments,
+ LInstruction* instr) {
+ const Runtime::Function* function = Runtime::FunctionForId(id);
+ CallRuntime(function, num_arguments, instr);
+ }
+
+ void LoadContextFromDeferred(LOperand* context);
+ void CallRuntimeFromDeferred(Runtime::FunctionId id,
+ int argc,
+ LInstruction* instr,
+ LOperand* context);
+
+ // Generate a direct call to a known function.
+ // If the function is already loaded into x1 by the caller, function_reg may
+ // be set to x1. Otherwise, it must be NoReg, and CallKnownFunction will
+ // automatically load it.
+ void CallKnownFunction(Handle<JSFunction> function,
+ int formal_parameter_count,
+ int arity,
+ LInstruction* instr,
+ Register function_reg = NoReg);
+
+ // Support for recording safepoint and position information.
+ void RecordAndWritePosition(int position) V8_OVERRIDE;
+ void RecordSafepoint(LPointerMap* pointers,
+ Safepoint::Kind kind,
+ int arguments,
+ Safepoint::DeoptMode mode);
+ void RecordSafepoint(LPointerMap* pointers, Safepoint::DeoptMode mode);
+ void RecordSafepoint(Safepoint::DeoptMode mode);
+ void RecordSafepointWithRegisters(LPointerMap* pointers,
+ int arguments,
+ Safepoint::DeoptMode mode);
+ void RecordSafepointWithRegistersAndDoubles(LPointerMap* pointers,
+ int arguments,
+ Safepoint::DeoptMode mode);
+ void RecordSafepointWithLazyDeopt(LInstruction* instr,
+ SafepointMode safepoint_mode);
+
+ void EnsureSpaceForLazyDeopt(int space_needed) V8_OVERRIDE;
+
+ ZoneList<LEnvironment*> deoptimizations_;
+ ZoneList<Deoptimizer::JumpTableEntry*> deopt_jump_table_;
+ ZoneList<Handle<Object> > deoptimization_literals_;
+ int inlined_function_count_;
+ Scope* const scope_;
+ TranslationBuffer translations_;
+ ZoneList<LDeferredCode*> deferred_;
+ int osr_pc_offset_;
+ bool frame_is_built_;
+
+ // Builder that keeps track of safepoints in the code. The table itself is
+ // emitted at the end of the generated code.
+ SafepointTableBuilder safepoints_;
+
+ // Compiler from a set of parallel moves to a sequential list of moves.
+ LGapResolver resolver_;
+
+ Safepoint::Kind expected_safepoint_kind_;
+
+ // This flag is true when we are after a push (but before a call).
+ // In this situation, jssp no longer references the end of the stack slots so,
+ // we can only reference a stack slot via fp.
+ bool after_push_argument_;
+ // If we have inlined arguments, we are no longer able to use jssp because
+ // jssp is modified and we never know if we are in a block after or before
+ // the pop of the arguments (which restores jssp).
+ bool inlined_arguments_;
+
+ int old_position_;
+
+ class PushSafepointRegistersScope BASE_EMBEDDED {
+ public:
+ PushSafepointRegistersScope(LCodeGen* codegen,
+ Safepoint::Kind kind)
+ : codegen_(codegen) {
+ ASSERT(codegen_->info()->is_calling());
+ ASSERT(codegen_->expected_safepoint_kind_ == Safepoint::kSimple);
+ codegen_->expected_safepoint_kind_ = kind;
+
+ UseScratchRegisterScope temps(codegen_->masm_);
+ // Preserve the value of lr which must be saved on the stack (the call to
+ // the stub will clobber it).
+ Register to_be_pushed_lr =
+ temps.UnsafeAcquire(StoreRegistersStateStub::to_be_pushed_lr());
+ codegen_->masm_->Mov(to_be_pushed_lr, lr);
+ switch (codegen_->expected_safepoint_kind_) {
+ case Safepoint::kWithRegisters: {
+ StoreRegistersStateStub stub(codegen_->isolate(), kDontSaveFPRegs);
+ codegen_->masm_->CallStub(&stub);
+ break;
+ }
+ case Safepoint::kWithRegistersAndDoubles: {
+ StoreRegistersStateStub stub(codegen_->isolate(), kSaveFPRegs);
+ codegen_->masm_->CallStub(&stub);
+ break;
+ }
+ default:
+ UNREACHABLE();
+ }
+ }
+
+ ~PushSafepointRegistersScope() {
+ Safepoint::Kind kind = codegen_->expected_safepoint_kind_;
+ ASSERT((kind & Safepoint::kWithRegisters) != 0);
+ switch (kind) {
+ case Safepoint::kWithRegisters: {
+ RestoreRegistersStateStub stub(codegen_->isolate(), kDontSaveFPRegs);
+ codegen_->masm_->CallStub(&stub);
+ break;
+ }
+ case Safepoint::kWithRegistersAndDoubles: {
+ RestoreRegistersStateStub stub(codegen_->isolate(), kSaveFPRegs);
+ codegen_->masm_->CallStub(&stub);
+ break;
+ }
+ default:
+ UNREACHABLE();
+ }
+ codegen_->expected_safepoint_kind_ = Safepoint::kSimple;
+ }
+
+ private:
+ LCodeGen* codegen_;
+ };
+
+ friend class LDeferredCode;
+ friend class SafepointGenerator;
+ DISALLOW_COPY_AND_ASSIGN(LCodeGen);
+};
+
+
+class LDeferredCode: public ZoneObject {
+ public:
+ explicit LDeferredCode(LCodeGen* codegen)
+ : codegen_(codegen),
+ external_exit_(NULL),
+ instruction_index_(codegen->current_instruction_) {
+ codegen->AddDeferredCode(this);
+ }
+
+ virtual ~LDeferredCode() { }
+ virtual void Generate() = 0;
+ virtual LInstruction* instr() = 0;
+
+ void SetExit(Label* exit) { external_exit_ = exit; }
+ Label* entry() { return &entry_; }
+ Label* exit() { return (external_exit_ != NULL) ? external_exit_ : &exit_; }
+ int instruction_index() const { return instruction_index_; }
+
+ protected:
+ LCodeGen* codegen() const { return codegen_; }
+ MacroAssembler* masm() const { return codegen_->masm(); }
+
+ private:
+ LCodeGen* codegen_;
+ Label entry_;
+ Label exit_;
+ Label* external_exit_;
+ int instruction_index_;
+};
+
+
+// This is the abstract class used by EmitBranchGeneric.
+// It is used to emit code for conditional branching. The Emit() function
+// emits code to branch when the condition holds and EmitInverted() emits
+// the branch when the inverted condition is verified.
+//
+// For actual examples of condition see the concrete implementation in
+// lithium-codegen-arm64.cc (e.g. BranchOnCondition, CompareAndBranch).
+class BranchGenerator BASE_EMBEDDED {
+ public:
+ explicit BranchGenerator(LCodeGen* codegen)
+ : codegen_(codegen) { }
+
+ virtual ~BranchGenerator() { }
+
+ virtual void Emit(Label* label) const = 0;
+ virtual void EmitInverted(Label* label) const = 0;
+
+ protected:
+ MacroAssembler* masm() const { return codegen_->masm(); }
+
+ LCodeGen* codegen_;
+};
+
+} } // namespace v8::internal
+
+#endif // V8_ARM64_LITHIUM_CODEGEN_ARM64_H_
diff --git a/chromium/v8/src/arm64/lithium-gap-resolver-arm64.cc b/chromium/v8/src/arm64/lithium-gap-resolver-arm64.cc
new file mode 100644
index 00000000000..bd655eaae83
--- /dev/null
+++ b/chromium/v8/src/arm64/lithium-gap-resolver-arm64.cc
@@ -0,0 +1,311 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#include "src/arm64/lithium-gap-resolver-arm64.h"
+#include "src/arm64/lithium-codegen-arm64.h"
+
+namespace v8 {
+namespace internal {
+
+// We use the root register to spill a value while breaking a cycle in parallel
+// moves. We don't need access to roots while resolving the move list and using
+// the root register has two advantages:
+// - It is not in crankshaft allocatable registers list, so it can't interfere
+// with any of the moves we are resolving.
+// - We don't need to push it on the stack, as we can reload it with its value
+// once we have resolved a cycle.
+#define kSavedValue root
+
+// We use the MacroAssembler floating-point scratch register to break a cycle
+// involving double values as the MacroAssembler will not need it for the
+// operations performed by the gap resolver.
+#define kSavedDoubleValue fp_scratch
+
+
+LGapResolver::LGapResolver(LCodeGen* owner)
+ : cgen_(owner), moves_(32, owner->zone()), root_index_(0), in_cycle_(false),
+ saved_destination_(NULL), need_to_restore_root_(false) { }
+
+
+#define __ ACCESS_MASM(cgen_->masm())
+
+void LGapResolver::Resolve(LParallelMove* parallel_move) {
+ ASSERT(moves_.is_empty());
+
+ // Build up a worklist of moves.
+ BuildInitialMoveList(parallel_move);
+
+ for (int i = 0; i < moves_.length(); ++i) {
+ LMoveOperands move = moves_[i];
+
+ // Skip constants to perform them last. They don't block other moves
+ // and skipping such moves with register destinations keeps those
+ // registers free for the whole algorithm.
+ if (!move.IsEliminated() && !move.source()->IsConstantOperand()) {
+ root_index_ = i; // Any cycle is found when we reach this move again.
+ PerformMove(i);
+ if (in_cycle_) RestoreValue();
+ }
+ }
+
+ // Perform the moves with constant sources.
+ for (int i = 0; i < moves_.length(); ++i) {
+ LMoveOperands move = moves_[i];
+
+ if (!move.IsEliminated()) {
+ ASSERT(move.source()->IsConstantOperand());
+ EmitMove(i);
+ }
+ }
+
+ if (need_to_restore_root_) {
+ ASSERT(kSavedValue.Is(root));
+ __ InitializeRootRegister();
+ need_to_restore_root_ = false;
+ }
+
+ moves_.Rewind(0);
+}
+
+
+void LGapResolver::BuildInitialMoveList(LParallelMove* parallel_move) {
+ // Perform a linear sweep of the moves to add them to the initial list of
+ // moves to perform, ignoring any move that is redundant (the source is
+ // the same as the destination, the destination is ignored and
+ // unallocated, or the move was already eliminated).
+ const ZoneList<LMoveOperands>* moves = parallel_move->move_operands();
+ for (int i = 0; i < moves->length(); ++i) {
+ LMoveOperands move = moves->at(i);
+ if (!move.IsRedundant()) moves_.Add(move, cgen_->zone());
+ }
+ Verify();
+}
+
+
+void LGapResolver::PerformMove(int index) {
+ // Each call to this function performs a move and deletes it from the move
+ // graph. We first recursively perform any move blocking this one. We
+ // mark a move as "pending" on entry to PerformMove in order to detect
+ // cycles in the move graph.
+ LMoveOperands& current_move = moves_[index];
+
+ ASSERT(!current_move.IsPending());
+ ASSERT(!current_move.IsRedundant());
+
+ // Clear this move's destination to indicate a pending move. The actual
+ // destination is saved in a stack allocated local. Multiple moves can
+ // be pending because this function is recursive.
+ ASSERT(current_move.source() != NULL); // Otherwise it will look eliminated.
+ LOperand* destination = current_move.destination();
+ current_move.set_destination(NULL);
+
+ // Perform a depth-first traversal of the move graph to resolve
+ // dependencies. Any unperformed, unpending move with a source the same
+ // as this one's destination blocks this one so recursively perform all
+ // such moves.
+ for (int i = 0; i < moves_.length(); ++i) {
+ LMoveOperands other_move = moves_[i];
+ if (other_move.Blocks(destination) && !other_move.IsPending()) {
+ PerformMove(i);
+ // If there is a blocking, pending move it must be moves_[root_index_]
+ // and all other moves with the same source as moves_[root_index_] are
+ // sucessfully executed (because they are cycle-free) by this loop.
+ }
+ }
+
+ // We are about to resolve this move and don't need it marked as
+ // pending, so restore its destination.
+ current_move.set_destination(destination);
+
+ // The move may be blocked on a pending move, which must be the starting move.
+ // In this case, we have a cycle, and we save the source of this move to
+ // a scratch register to break it.
+ LMoveOperands other_move = moves_[root_index_];
+ if (other_move.Blocks(destination)) {
+ ASSERT(other_move.IsPending());
+ BreakCycle(index);
+ return;
+ }
+
+ // This move is no longer blocked.
+ EmitMove(index);
+}
+
+
+void LGapResolver::Verify() {
+#ifdef ENABLE_SLOW_ASSERTS
+ // No operand should be the destination for more than one move.
+ for (int i = 0; i < moves_.length(); ++i) {
+ LOperand* destination = moves_[i].destination();
+ for (int j = i + 1; j < moves_.length(); ++j) {
+ SLOW_ASSERT(!destination->Equals(moves_[j].destination()));
+ }
+ }
+#endif
+}
+
+
+void LGapResolver::BreakCycle(int index) {
+ ASSERT(moves_[index].destination()->Equals(moves_[root_index_].source()));
+ ASSERT(!in_cycle_);
+
+ // We use registers which are not allocatable by crankshaft to break the cycle
+ // to be sure they don't interfere with the moves we are resolving.
+ ASSERT(!kSavedValue.IsAllocatable());
+ ASSERT(!kSavedDoubleValue.IsAllocatable());
+
+ // We save in a register the source of that move and we remember its
+ // destination. Then we mark this move as resolved so the cycle is
+ // broken and we can perform the other moves.
+ in_cycle_ = true;
+ LOperand* source = moves_[index].source();
+ saved_destination_ = moves_[index].destination();
+
+ if (source->IsRegister()) {
+ need_to_restore_root_ = true;
+ __ Mov(kSavedValue, cgen_->ToRegister(source));
+ } else if (source->IsStackSlot()) {
+ need_to_restore_root_ = true;
+ __ Ldr(kSavedValue, cgen_->ToMemOperand(source));
+ } else if (source->IsDoubleRegister()) {
+ ASSERT(cgen_->masm()->FPTmpList()->IncludesAliasOf(kSavedDoubleValue));
+ cgen_->masm()->FPTmpList()->Remove(kSavedDoubleValue);
+ __ Fmov(kSavedDoubleValue, cgen_->ToDoubleRegister(source));
+ } else if (source->IsDoubleStackSlot()) {
+ ASSERT(cgen_->masm()->FPTmpList()->IncludesAliasOf(kSavedDoubleValue));
+ cgen_->masm()->FPTmpList()->Remove(kSavedDoubleValue);
+ __ Ldr(kSavedDoubleValue, cgen_->ToMemOperand(source));
+ } else {
+ UNREACHABLE();
+ }
+
+ // Mark this move as resolved.
+ // This move will be actually performed by moving the saved value to this
+ // move's destination in LGapResolver::RestoreValue().
+ moves_[index].Eliminate();
+}
+
+
+void LGapResolver::RestoreValue() {
+ ASSERT(in_cycle_);
+ ASSERT(saved_destination_ != NULL);
+
+ if (saved_destination_->IsRegister()) {
+ __ Mov(cgen_->ToRegister(saved_destination_), kSavedValue);
+ } else if (saved_destination_->IsStackSlot()) {
+ __ Str(kSavedValue, cgen_->ToMemOperand(saved_destination_));
+ } else if (saved_destination_->IsDoubleRegister()) {
+ __ Fmov(cgen_->ToDoubleRegister(saved_destination_), kSavedDoubleValue);
+ cgen_->masm()->FPTmpList()->Combine(kSavedDoubleValue);
+ } else if (saved_destination_->IsDoubleStackSlot()) {
+ __ Str(kSavedDoubleValue, cgen_->ToMemOperand(saved_destination_));
+ cgen_->masm()->FPTmpList()->Combine(kSavedDoubleValue);
+ } else {
+ UNREACHABLE();
+ }
+
+ in_cycle_ = false;
+ saved_destination_ = NULL;
+}
+
+
+void LGapResolver::EmitMove(int index) {
+ LOperand* source = moves_[index].source();
+ LOperand* destination = moves_[index].destination();
+
+ // Dispatch on the source and destination operand kinds. Not all
+ // combinations are possible.
+
+ if (source->IsRegister()) {
+ Register source_register = cgen_->ToRegister(source);
+ if (destination->IsRegister()) {
+ __ Mov(cgen_->ToRegister(destination), source_register);
+ } else {
+ ASSERT(destination->IsStackSlot());
+ __ Str(source_register, cgen_->ToMemOperand(destination));
+ }
+
+ } else if (source->IsStackSlot()) {
+ MemOperand source_operand = cgen_->ToMemOperand(source);
+ if (destination->IsRegister()) {
+ __ Ldr(cgen_->ToRegister(destination), source_operand);
+ } else {
+ ASSERT(destination->IsStackSlot());
+ EmitStackSlotMove(index);
+ }
+
+ } else if (source->IsConstantOperand()) {
+ LConstantOperand* constant_source = LConstantOperand::cast(source);
+ if (destination->IsRegister()) {
+ Register dst = cgen_->ToRegister(destination);
+ if (cgen_->IsSmi(constant_source)) {
+ __ Mov(dst, cgen_->ToSmi(constant_source));
+ } else if (cgen_->IsInteger32Constant(constant_source)) {
+ __ Mov(dst, cgen_->ToInteger32(constant_source));
+ } else {
+ __ LoadObject(dst, cgen_->ToHandle(constant_source));
+ }
+ } else if (destination->IsDoubleRegister()) {
+ DoubleRegister result = cgen_->ToDoubleRegister(destination);
+ __ Fmov(result, cgen_->ToDouble(constant_source));
+ } else {
+ ASSERT(destination->IsStackSlot());
+ ASSERT(!in_cycle_); // Constant moves happen after all cycles are gone.
+ need_to_restore_root_ = true;
+ if (cgen_->IsSmi(constant_source)) {
+ __ Mov(kSavedValue, cgen_->ToSmi(constant_source));
+ } else if (cgen_->IsInteger32Constant(constant_source)) {
+ __ Mov(kSavedValue, cgen_->ToInteger32(constant_source));
+ } else {
+ __ LoadObject(kSavedValue, cgen_->ToHandle(constant_source));
+ }
+ __ Str(kSavedValue, cgen_->ToMemOperand(destination));
+ }
+
+ } else if (source->IsDoubleRegister()) {
+ DoubleRegister src = cgen_->ToDoubleRegister(source);
+ if (destination->IsDoubleRegister()) {
+ __ Fmov(cgen_->ToDoubleRegister(destination), src);
+ } else {
+ ASSERT(destination->IsDoubleStackSlot());
+ __ Str(src, cgen_->ToMemOperand(destination));
+ }
+
+ } else if (source->IsDoubleStackSlot()) {
+ MemOperand src = cgen_->ToMemOperand(source);
+ if (destination->IsDoubleRegister()) {
+ __ Ldr(cgen_->ToDoubleRegister(destination), src);
+ } else {
+ ASSERT(destination->IsDoubleStackSlot());
+ EmitStackSlotMove(index);
+ }
+
+ } else {
+ UNREACHABLE();
+ }
+
+ // The move has been emitted, we can eliminate it.
+ moves_[index].Eliminate();
+}
+
+
+void LGapResolver::EmitStackSlotMove(int index) {
+ // We need a temp register to perform a stack slot to stack slot move, and
+ // the register must not be involved in breaking cycles.
+
+ // Use the Crankshaft double scratch register as the temporary.
+ DoubleRegister temp = crankshaft_fp_scratch;
+
+ LOperand* src = moves_[index].source();
+ LOperand* dst = moves_[index].destination();
+
+ ASSERT(src->IsStackSlot());
+ ASSERT(dst->IsStackSlot());
+ __ Ldr(temp, cgen_->ToMemOperand(src));
+ __ Str(temp, cgen_->ToMemOperand(dst));
+}
+
+} } // namespace v8::internal
diff --git a/chromium/v8/src/arm64/lithium-gap-resolver-arm64.h b/chromium/v8/src/arm64/lithium-gap-resolver-arm64.h
new file mode 100644
index 00000000000..55d4ecbf9d2
--- /dev/null
+++ b/chromium/v8/src/arm64/lithium-gap-resolver-arm64.h
@@ -0,0 +1,67 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_ARM64_LITHIUM_GAP_RESOLVER_ARM64_H_
+#define V8_ARM64_LITHIUM_GAP_RESOLVER_ARM64_H_
+
+#include "src/v8.h"
+
+#include "src/lithium.h"
+
+namespace v8 {
+namespace internal {
+
+class LCodeGen;
+class LGapResolver;
+
+class LGapResolver BASE_EMBEDDED {
+ public:
+ explicit LGapResolver(LCodeGen* owner);
+
+ // Resolve a set of parallel moves, emitting assembler instructions.
+ void Resolve(LParallelMove* parallel_move);
+
+ private:
+ // Build the initial list of moves.
+ void BuildInitialMoveList(LParallelMove* parallel_move);
+
+ // Perform the move at the moves_ index in question (possibly requiring
+ // other moves to satisfy dependencies).
+ void PerformMove(int index);
+
+ // If a cycle is found in the series of moves, save the blocking value to
+ // a scratch register. The cycle must be found by hitting the root of the
+ // depth-first search.
+ void BreakCycle(int index);
+
+ // After a cycle has been resolved, restore the value from the scratch
+ // register to its proper destination.
+ void RestoreValue();
+
+ // Emit a move and remove it from the move graph.
+ void EmitMove(int index);
+
+ // Emit a move from one stack slot to another.
+ void EmitStackSlotMove(int index);
+
+ // Verify the move list before performing moves.
+ void Verify();
+
+ LCodeGen* cgen_;
+
+ // List of moves not yet resolved.
+ ZoneList<LMoveOperands> moves_;
+
+ int root_index_;
+ bool in_cycle_;
+ LOperand* saved_destination_;
+
+ // We use the root register as a scratch in a few places. When that happens,
+ // this flag is set to indicate that it needs to be restored.
+ bool need_to_restore_root_;
+};
+
+} } // namespace v8::internal
+
+#endif // V8_ARM64_LITHIUM_GAP_RESOLVER_ARM64_H_
diff --git a/chromium/v8/src/arm64/macro-assembler-arm64-inl.h b/chromium/v8/src/arm64/macro-assembler-arm64-inl.h
new file mode 100644
index 00000000000..0c6aadf6b9e
--- /dev/null
+++ b/chromium/v8/src/arm64/macro-assembler-arm64-inl.h
@@ -0,0 +1,1706 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_ARM64_MACRO_ASSEMBLER_ARM64_INL_H_
+#define V8_ARM64_MACRO_ASSEMBLER_ARM64_INL_H_
+
+#include <ctype.h>
+
+#include "src/globals.h"
+
+#include "src/arm64/assembler-arm64.h"
+#include "src/arm64/assembler-arm64-inl.h"
+#include "src/arm64/macro-assembler-arm64.h"
+#include "src/arm64/instrument-arm64.h"
+
+
+namespace v8 {
+namespace internal {
+
+
+MemOperand FieldMemOperand(Register object, int offset) {
+ return MemOperand(object, offset - kHeapObjectTag);
+}
+
+
+MemOperand UntagSmiFieldMemOperand(Register object, int offset) {
+ return UntagSmiMemOperand(object, offset - kHeapObjectTag);
+}
+
+
+MemOperand UntagSmiMemOperand(Register object, int offset) {
+ // Assumes that Smis are shifted by 32 bits and little endianness.
+ STATIC_ASSERT(kSmiShift == 32);
+ return MemOperand(object, offset + (kSmiShift / kBitsPerByte));
+}
+
+
+Handle<Object> MacroAssembler::CodeObject() {
+ ASSERT(!code_object_.is_null());
+ return code_object_;
+}
+
+
+void MacroAssembler::And(const Register& rd,
+ const Register& rn,
+ const Operand& operand) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rd.IsZero());
+ LogicalMacro(rd, rn, operand, AND);
+}
+
+
+void MacroAssembler::Ands(const Register& rd,
+ const Register& rn,
+ const Operand& operand) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rd.IsZero());
+ LogicalMacro(rd, rn, operand, ANDS);
+}
+
+
+void MacroAssembler::Tst(const Register& rn,
+ const Operand& operand) {
+ ASSERT(allow_macro_instructions_);
+ LogicalMacro(AppropriateZeroRegFor(rn), rn, operand, ANDS);
+}
+
+
+void MacroAssembler::Bic(const Register& rd,
+ const Register& rn,
+ const Operand& operand) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rd.IsZero());
+ LogicalMacro(rd, rn, operand, BIC);
+}
+
+
+void MacroAssembler::Bics(const Register& rd,
+ const Register& rn,
+ const Operand& operand) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rd.IsZero());
+ LogicalMacro(rd, rn, operand, BICS);
+}
+
+
+void MacroAssembler::Orr(const Register& rd,
+ const Register& rn,
+ const Operand& operand) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rd.IsZero());
+ LogicalMacro(rd, rn, operand, ORR);
+}
+
+
+void MacroAssembler::Orn(const Register& rd,
+ const Register& rn,
+ const Operand& operand) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rd.IsZero());
+ LogicalMacro(rd, rn, operand, ORN);
+}
+
+
+void MacroAssembler::Eor(const Register& rd,
+ const Register& rn,
+ const Operand& operand) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rd.IsZero());
+ LogicalMacro(rd, rn, operand, EOR);
+}
+
+
+void MacroAssembler::Eon(const Register& rd,
+ const Register& rn,
+ const Operand& operand) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rd.IsZero());
+ LogicalMacro(rd, rn, operand, EON);
+}
+
+
+void MacroAssembler::Ccmp(const Register& rn,
+ const Operand& operand,
+ StatusFlags nzcv,
+ Condition cond) {
+ ASSERT(allow_macro_instructions_);
+ if (operand.IsImmediate() && (operand.ImmediateValue() < 0)) {
+ ConditionalCompareMacro(rn, -operand.ImmediateValue(), nzcv, cond, CCMN);
+ } else {
+ ConditionalCompareMacro(rn, operand, nzcv, cond, CCMP);
+ }
+}
+
+
+void MacroAssembler::Ccmn(const Register& rn,
+ const Operand& operand,
+ StatusFlags nzcv,
+ Condition cond) {
+ ASSERT(allow_macro_instructions_);
+ if (operand.IsImmediate() && (operand.ImmediateValue() < 0)) {
+ ConditionalCompareMacro(rn, -operand.ImmediateValue(), nzcv, cond, CCMP);
+ } else {
+ ConditionalCompareMacro(rn, operand, nzcv, cond, CCMN);
+ }
+}
+
+
+void MacroAssembler::Add(const Register& rd,
+ const Register& rn,
+ const Operand& operand) {
+ ASSERT(allow_macro_instructions_);
+ if (operand.IsImmediate() && (operand.ImmediateValue() < 0)) {
+ AddSubMacro(rd, rn, -operand.ImmediateValue(), LeaveFlags, SUB);
+ } else {
+ AddSubMacro(rd, rn, operand, LeaveFlags, ADD);
+ }
+}
+
+void MacroAssembler::Adds(const Register& rd,
+ const Register& rn,
+ const Operand& operand) {
+ ASSERT(allow_macro_instructions_);
+ if (operand.IsImmediate() && (operand.ImmediateValue() < 0)) {
+ AddSubMacro(rd, rn, -operand.ImmediateValue(), SetFlags, SUB);
+ } else {
+ AddSubMacro(rd, rn, operand, SetFlags, ADD);
+ }
+}
+
+
+void MacroAssembler::Sub(const Register& rd,
+ const Register& rn,
+ const Operand& operand) {
+ ASSERT(allow_macro_instructions_);
+ if (operand.IsImmediate() && (operand.ImmediateValue() < 0)) {
+ AddSubMacro(rd, rn, -operand.ImmediateValue(), LeaveFlags, ADD);
+ } else {
+ AddSubMacro(rd, rn, operand, LeaveFlags, SUB);
+ }
+}
+
+
+void MacroAssembler::Subs(const Register& rd,
+ const Register& rn,
+ const Operand& operand) {
+ ASSERT(allow_macro_instructions_);
+ if (operand.IsImmediate() && (operand.ImmediateValue() < 0)) {
+ AddSubMacro(rd, rn, -operand.ImmediateValue(), SetFlags, ADD);
+ } else {
+ AddSubMacro(rd, rn, operand, SetFlags, SUB);
+ }
+}
+
+
+void MacroAssembler::Cmn(const Register& rn, const Operand& operand) {
+ ASSERT(allow_macro_instructions_);
+ Adds(AppropriateZeroRegFor(rn), rn, operand);
+}
+
+
+void MacroAssembler::Cmp(const Register& rn, const Operand& operand) {
+ ASSERT(allow_macro_instructions_);
+ Subs(AppropriateZeroRegFor(rn), rn, operand);
+}
+
+
+void MacroAssembler::Neg(const Register& rd,
+ const Operand& operand) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rd.IsZero());
+ if (operand.IsImmediate()) {
+ Mov(rd, -operand.ImmediateValue());
+ } else {
+ Sub(rd, AppropriateZeroRegFor(rd), operand);
+ }
+}
+
+
+void MacroAssembler::Negs(const Register& rd,
+ const Operand& operand) {
+ ASSERT(allow_macro_instructions_);
+ Subs(rd, AppropriateZeroRegFor(rd), operand);
+}
+
+
+void MacroAssembler::Adc(const Register& rd,
+ const Register& rn,
+ const Operand& operand) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rd.IsZero());
+ AddSubWithCarryMacro(rd, rn, operand, LeaveFlags, ADC);
+}
+
+
+void MacroAssembler::Adcs(const Register& rd,
+ const Register& rn,
+ const Operand& operand) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rd.IsZero());
+ AddSubWithCarryMacro(rd, rn, operand, SetFlags, ADC);
+}
+
+
+void MacroAssembler::Sbc(const Register& rd,
+ const Register& rn,
+ const Operand& operand) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rd.IsZero());
+ AddSubWithCarryMacro(rd, rn, operand, LeaveFlags, SBC);
+}
+
+
+void MacroAssembler::Sbcs(const Register& rd,
+ const Register& rn,
+ const Operand& operand) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rd.IsZero());
+ AddSubWithCarryMacro(rd, rn, operand, SetFlags, SBC);
+}
+
+
+void MacroAssembler::Ngc(const Register& rd,
+ const Operand& operand) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rd.IsZero());
+ Register zr = AppropriateZeroRegFor(rd);
+ Sbc(rd, zr, operand);
+}
+
+
+void MacroAssembler::Ngcs(const Register& rd,
+ const Operand& operand) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rd.IsZero());
+ Register zr = AppropriateZeroRegFor(rd);
+ Sbcs(rd, zr, operand);
+}
+
+
+void MacroAssembler::Mvn(const Register& rd, uint64_t imm) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rd.IsZero());
+ Mov(rd, ~imm);
+}
+
+
+#define DEFINE_FUNCTION(FN, REGTYPE, REG, OP) \
+void MacroAssembler::FN(const REGTYPE REG, const MemOperand& addr) { \
+ ASSERT(allow_macro_instructions_); \
+ LoadStoreMacro(REG, addr, OP); \
+}
+LS_MACRO_LIST(DEFINE_FUNCTION)
+#undef DEFINE_FUNCTION
+
+
+void MacroAssembler::Asr(const Register& rd,
+ const Register& rn,
+ unsigned shift) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rd.IsZero());
+ asr(rd, rn, shift);
+}
+
+
+void MacroAssembler::Asr(const Register& rd,
+ const Register& rn,
+ const Register& rm) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rd.IsZero());
+ asrv(rd, rn, rm);
+}
+
+
+void MacroAssembler::B(Label* label) {
+ b(label);
+ CheckVeneerPool(false, false);
+}
+
+
+void MacroAssembler::B(Condition cond, Label* label) {
+ ASSERT(allow_macro_instructions_);
+ B(label, cond);
+}
+
+
+void MacroAssembler::Bfi(const Register& rd,
+ const Register& rn,
+ unsigned lsb,
+ unsigned width) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rd.IsZero());
+ bfi(rd, rn, lsb, width);
+}
+
+
+void MacroAssembler::Bfxil(const Register& rd,
+ const Register& rn,
+ unsigned lsb,
+ unsigned width) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rd.IsZero());
+ bfxil(rd, rn, lsb, width);
+}
+
+
+void MacroAssembler::Bind(Label* label) {
+ ASSERT(allow_macro_instructions_);
+ bind(label);
+}
+
+
+void MacroAssembler::Bl(Label* label) {
+ ASSERT(allow_macro_instructions_);
+ bl(label);
+}
+
+
+void MacroAssembler::Blr(const Register& xn) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!xn.IsZero());
+ blr(xn);
+}
+
+
+void MacroAssembler::Br(const Register& xn) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!xn.IsZero());
+ br(xn);
+}
+
+
+void MacroAssembler::Brk(int code) {
+ ASSERT(allow_macro_instructions_);
+ brk(code);
+}
+
+
+void MacroAssembler::Cinc(const Register& rd,
+ const Register& rn,
+ Condition cond) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rd.IsZero());
+ ASSERT((cond != al) && (cond != nv));
+ cinc(rd, rn, cond);
+}
+
+
+void MacroAssembler::Cinv(const Register& rd,
+ const Register& rn,
+ Condition cond) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rd.IsZero());
+ ASSERT((cond != al) && (cond != nv));
+ cinv(rd, rn, cond);
+}
+
+
+void MacroAssembler::Cls(const Register& rd, const Register& rn) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rd.IsZero());
+ cls(rd, rn);
+}
+
+
+void MacroAssembler::Clz(const Register& rd, const Register& rn) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rd.IsZero());
+ clz(rd, rn);
+}
+
+
+void MacroAssembler::Cneg(const Register& rd,
+ const Register& rn,
+ Condition cond) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rd.IsZero());
+ ASSERT((cond != al) && (cond != nv));
+ cneg(rd, rn, cond);
+}
+
+
+// Conditionally zero the destination register. Only X registers are supported
+// due to the truncation side-effect when used on W registers.
+void MacroAssembler::CzeroX(const Register& rd,
+ Condition cond) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rd.IsSP() && rd.Is64Bits());
+ ASSERT((cond != al) && (cond != nv));
+ csel(rd, xzr, rd, cond);
+}
+
+
+// Conditionally move a value into the destination register. Only X registers
+// are supported due to the truncation side-effect when used on W registers.
+void MacroAssembler::CmovX(const Register& rd,
+ const Register& rn,
+ Condition cond) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rd.IsSP());
+ ASSERT(rd.Is64Bits() && rn.Is64Bits());
+ ASSERT((cond != al) && (cond != nv));
+ if (!rd.is(rn)) {
+ csel(rd, rn, rd, cond);
+ }
+}
+
+
+void MacroAssembler::Cset(const Register& rd, Condition cond) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rd.IsZero());
+ ASSERT((cond != al) && (cond != nv));
+ cset(rd, cond);
+}
+
+
+void MacroAssembler::Csetm(const Register& rd, Condition cond) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rd.IsZero());
+ ASSERT((cond != al) && (cond != nv));
+ csetm(rd, cond);
+}
+
+
+void MacroAssembler::Csinc(const Register& rd,
+ const Register& rn,
+ const Register& rm,
+ Condition cond) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rd.IsZero());
+ ASSERT((cond != al) && (cond != nv));
+ csinc(rd, rn, rm, cond);
+}
+
+
+void MacroAssembler::Csinv(const Register& rd,
+ const Register& rn,
+ const Register& rm,
+ Condition cond) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rd.IsZero());
+ ASSERT((cond != al) && (cond != nv));
+ csinv(rd, rn, rm, cond);
+}
+
+
+void MacroAssembler::Csneg(const Register& rd,
+ const Register& rn,
+ const Register& rm,
+ Condition cond) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rd.IsZero());
+ ASSERT((cond != al) && (cond != nv));
+ csneg(rd, rn, rm, cond);
+}
+
+
+void MacroAssembler::Dmb(BarrierDomain domain, BarrierType type) {
+ ASSERT(allow_macro_instructions_);
+ dmb(domain, type);
+}
+
+
+void MacroAssembler::Dsb(BarrierDomain domain, BarrierType type) {
+ ASSERT(allow_macro_instructions_);
+ dsb(domain, type);
+}
+
+
+void MacroAssembler::Debug(const char* message, uint32_t code, Instr params) {
+ ASSERT(allow_macro_instructions_);
+ debug(message, code, params);
+}
+
+
+void MacroAssembler::Extr(const Register& rd,
+ const Register& rn,
+ const Register& rm,
+ unsigned lsb) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rd.IsZero());
+ extr(rd, rn, rm, lsb);
+}
+
+
+void MacroAssembler::Fabs(const FPRegister& fd, const FPRegister& fn) {
+ ASSERT(allow_macro_instructions_);
+ fabs(fd, fn);
+}
+
+
+void MacroAssembler::Fadd(const FPRegister& fd,
+ const FPRegister& fn,
+ const FPRegister& fm) {
+ ASSERT(allow_macro_instructions_);
+ fadd(fd, fn, fm);
+}
+
+
+void MacroAssembler::Fccmp(const FPRegister& fn,
+ const FPRegister& fm,
+ StatusFlags nzcv,
+ Condition cond) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT((cond != al) && (cond != nv));
+ fccmp(fn, fm, nzcv, cond);
+}
+
+
+void MacroAssembler::Fcmp(const FPRegister& fn, const FPRegister& fm) {
+ ASSERT(allow_macro_instructions_);
+ fcmp(fn, fm);
+}
+
+
+void MacroAssembler::Fcmp(const FPRegister& fn, double value) {
+ ASSERT(allow_macro_instructions_);
+ if (value != 0.0) {
+ UseScratchRegisterScope temps(this);
+ FPRegister tmp = temps.AcquireSameSizeAs(fn);
+ Fmov(tmp, value);
+ fcmp(fn, tmp);
+ } else {
+ fcmp(fn, value);
+ }
+}
+
+
+void MacroAssembler::Fcsel(const FPRegister& fd,
+ const FPRegister& fn,
+ const FPRegister& fm,
+ Condition cond) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT((cond != al) && (cond != nv));
+ fcsel(fd, fn, fm, cond);
+}
+
+
+void MacroAssembler::Fcvt(const FPRegister& fd, const FPRegister& fn) {
+ ASSERT(allow_macro_instructions_);
+ fcvt(fd, fn);
+}
+
+
+void MacroAssembler::Fcvtas(const Register& rd, const FPRegister& fn) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rd.IsZero());
+ fcvtas(rd, fn);
+}
+
+
+void MacroAssembler::Fcvtau(const Register& rd, const FPRegister& fn) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rd.IsZero());
+ fcvtau(rd, fn);
+}
+
+
+void MacroAssembler::Fcvtms(const Register& rd, const FPRegister& fn) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rd.IsZero());
+ fcvtms(rd, fn);
+}
+
+
+void MacroAssembler::Fcvtmu(const Register& rd, const FPRegister& fn) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rd.IsZero());
+ fcvtmu(rd, fn);
+}
+
+
+void MacroAssembler::Fcvtns(const Register& rd, const FPRegister& fn) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rd.IsZero());
+ fcvtns(rd, fn);
+}
+
+
+void MacroAssembler::Fcvtnu(const Register& rd, const FPRegister& fn) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rd.IsZero());
+ fcvtnu(rd, fn);
+}
+
+
+void MacroAssembler::Fcvtzs(const Register& rd, const FPRegister& fn) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rd.IsZero());
+ fcvtzs(rd, fn);
+}
+void MacroAssembler::Fcvtzu(const Register& rd, const FPRegister& fn) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rd.IsZero());
+ fcvtzu(rd, fn);
+}
+
+
+void MacroAssembler::Fdiv(const FPRegister& fd,
+ const FPRegister& fn,
+ const FPRegister& fm) {
+ ASSERT(allow_macro_instructions_);
+ fdiv(fd, fn, fm);
+}
+
+
+void MacroAssembler::Fmadd(const FPRegister& fd,
+ const FPRegister& fn,
+ const FPRegister& fm,
+ const FPRegister& fa) {
+ ASSERT(allow_macro_instructions_);
+ fmadd(fd, fn, fm, fa);
+}
+
+
+void MacroAssembler::Fmax(const FPRegister& fd,
+ const FPRegister& fn,
+ const FPRegister& fm) {
+ ASSERT(allow_macro_instructions_);
+ fmax(fd, fn, fm);
+}
+
+
+void MacroAssembler::Fmaxnm(const FPRegister& fd,
+ const FPRegister& fn,
+ const FPRegister& fm) {
+ ASSERT(allow_macro_instructions_);
+ fmaxnm(fd, fn, fm);
+}
+
+
+void MacroAssembler::Fmin(const FPRegister& fd,
+ const FPRegister& fn,
+ const FPRegister& fm) {
+ ASSERT(allow_macro_instructions_);
+ fmin(fd, fn, fm);
+}
+
+
+void MacroAssembler::Fminnm(const FPRegister& fd,
+ const FPRegister& fn,
+ const FPRegister& fm) {
+ ASSERT(allow_macro_instructions_);
+ fminnm(fd, fn, fm);
+}
+
+
+void MacroAssembler::Fmov(FPRegister fd, FPRegister fn) {
+ ASSERT(allow_macro_instructions_);
+ // Only emit an instruction if fd and fn are different, and they are both D
+ // registers. fmov(s0, s0) is not a no-op because it clears the top word of
+ // d0. Technically, fmov(d0, d0) is not a no-op either because it clears the
+ // top of q0, but FPRegister does not currently support Q registers.
+ if (!fd.Is(fn) || !fd.Is64Bits()) {
+ fmov(fd, fn);
+ }
+}
+
+
+void MacroAssembler::Fmov(FPRegister fd, Register rn) {
+ ASSERT(allow_macro_instructions_);
+ fmov(fd, rn);
+}
+
+
+void MacroAssembler::Fmov(FPRegister fd, double imm) {
+ ASSERT(allow_macro_instructions_);
+ if (fd.Is32Bits()) {
+ Fmov(fd, static_cast<float>(imm));
+ return;
+ }
+
+ ASSERT(fd.Is64Bits());
+ if (IsImmFP64(imm)) {
+ fmov(fd, imm);
+ } else if ((imm == 0.0) && (copysign(1.0, imm) == 1.0)) {
+ fmov(fd, xzr);
+ } else {
+ Ldr(fd, imm);
+ }
+}
+
+
+void MacroAssembler::Fmov(FPRegister fd, float imm) {
+ ASSERT(allow_macro_instructions_);
+ if (fd.Is64Bits()) {
+ Fmov(fd, static_cast<double>(imm));
+ return;
+ }
+
+ ASSERT(fd.Is32Bits());
+ if (IsImmFP32(imm)) {
+ fmov(fd, imm);
+ } else if ((imm == 0.0) && (copysign(1.0, imm) == 1.0)) {
+ fmov(fd, wzr);
+ } else {
+ UseScratchRegisterScope temps(this);
+ Register tmp = temps.AcquireW();
+ // TODO(all): Use Assembler::ldr(const FPRegister& ft, float imm).
+ Mov(tmp, float_to_rawbits(imm));
+ Fmov(fd, tmp);
+ }
+}
+
+
+void MacroAssembler::Fmov(Register rd, FPRegister fn) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rd.IsZero());
+ fmov(rd, fn);
+}
+
+
+void MacroAssembler::Fmsub(const FPRegister& fd,
+ const FPRegister& fn,
+ const FPRegister& fm,
+ const FPRegister& fa) {
+ ASSERT(allow_macro_instructions_);
+ fmsub(fd, fn, fm, fa);
+}
+
+
+void MacroAssembler::Fmul(const FPRegister& fd,
+ const FPRegister& fn,
+ const FPRegister& fm) {
+ ASSERT(allow_macro_instructions_);
+ fmul(fd, fn, fm);
+}
+
+
+void MacroAssembler::Fneg(const FPRegister& fd, const FPRegister& fn) {
+ ASSERT(allow_macro_instructions_);
+ fneg(fd, fn);
+}
+
+
+void MacroAssembler::Fnmadd(const FPRegister& fd,
+ const FPRegister& fn,
+ const FPRegister& fm,
+ const FPRegister& fa) {
+ ASSERT(allow_macro_instructions_);
+ fnmadd(fd, fn, fm, fa);
+}
+
+
+void MacroAssembler::Fnmsub(const FPRegister& fd,
+ const FPRegister& fn,
+ const FPRegister& fm,
+ const FPRegister& fa) {
+ ASSERT(allow_macro_instructions_);
+ fnmsub(fd, fn, fm, fa);
+}
+
+
+void MacroAssembler::Frinta(const FPRegister& fd, const FPRegister& fn) {
+ ASSERT(allow_macro_instructions_);
+ frinta(fd, fn);
+}
+
+
+void MacroAssembler::Frintm(const FPRegister& fd, const FPRegister& fn) {
+ ASSERT(allow_macro_instructions_);
+ frintm(fd, fn);
+}
+
+
+void MacroAssembler::Frintn(const FPRegister& fd, const FPRegister& fn) {
+ ASSERT(allow_macro_instructions_);
+ frintn(fd, fn);
+}
+
+
+void MacroAssembler::Frintz(const FPRegister& fd, const FPRegister& fn) {
+ ASSERT(allow_macro_instructions_);
+ frintz(fd, fn);
+}
+
+
+void MacroAssembler::Fsqrt(const FPRegister& fd, const FPRegister& fn) {
+ ASSERT(allow_macro_instructions_);
+ fsqrt(fd, fn);
+}
+
+
+void MacroAssembler::Fsub(const FPRegister& fd,
+ const FPRegister& fn,
+ const FPRegister& fm) {
+ ASSERT(allow_macro_instructions_);
+ fsub(fd, fn, fm);
+}
+
+
+void MacroAssembler::Hint(SystemHint code) {
+ ASSERT(allow_macro_instructions_);
+ hint(code);
+}
+
+
+void MacroAssembler::Hlt(int code) {
+ ASSERT(allow_macro_instructions_);
+ hlt(code);
+}
+
+
+void MacroAssembler::Isb() {
+ ASSERT(allow_macro_instructions_);
+ isb();
+}
+
+
+void MacroAssembler::Ldnp(const CPURegister& rt,
+ const CPURegister& rt2,
+ const MemOperand& src) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!AreAliased(rt, rt2));
+ ldnp(rt, rt2, src);
+}
+
+
+void MacroAssembler::Ldp(const CPURegister& rt,
+ const CPURegister& rt2,
+ const MemOperand& src) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!AreAliased(rt, rt2));
+ ldp(rt, rt2, src);
+}
+
+
+void MacroAssembler::Ldpsw(const Register& rt,
+ const Register& rt2,
+ const MemOperand& src) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rt.IsZero());
+ ASSERT(!rt2.IsZero());
+ ldpsw(rt, rt2, src);
+}
+
+
+void MacroAssembler::Ldr(const CPURegister& rt, const Immediate& imm) {
+ ASSERT(allow_macro_instructions_);
+ ldr(rt, imm);
+}
+
+
+void MacroAssembler::Ldr(const CPURegister& rt, double imm) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(rt.Is64Bits());
+ ldr(rt, Immediate(double_to_rawbits(imm)));
+}
+
+
+void MacroAssembler::Lsl(const Register& rd,
+ const Register& rn,
+ unsigned shift) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rd.IsZero());
+ lsl(rd, rn, shift);
+}
+
+
+void MacroAssembler::Lsl(const Register& rd,
+ const Register& rn,
+ const Register& rm) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rd.IsZero());
+ lslv(rd, rn, rm);
+}
+
+
+void MacroAssembler::Lsr(const Register& rd,
+ const Register& rn,
+ unsigned shift) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rd.IsZero());
+ lsr(rd, rn, shift);
+}
+
+
+void MacroAssembler::Lsr(const Register& rd,
+ const Register& rn,
+ const Register& rm) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rd.IsZero());
+ lsrv(rd, rn, rm);
+}
+
+
+void MacroAssembler::Madd(const Register& rd,
+ const Register& rn,
+ const Register& rm,
+ const Register& ra) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rd.IsZero());
+ madd(rd, rn, rm, ra);
+}
+
+
+void MacroAssembler::Mneg(const Register& rd,
+ const Register& rn,
+ const Register& rm) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rd.IsZero());
+ mneg(rd, rn, rm);
+}
+
+
+void MacroAssembler::Mov(const Register& rd, const Register& rn) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rd.IsZero());
+ // Emit a register move only if the registers are distinct, or if they are
+ // not X registers. Note that mov(w0, w0) is not a no-op because it clears
+ // the top word of x0.
+ if (!rd.Is(rn) || !rd.Is64Bits()) {
+ Assembler::mov(rd, rn);
+ }
+}
+
+
+void MacroAssembler::Movk(const Register& rd, uint64_t imm, int shift) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rd.IsZero());
+ movk(rd, imm, shift);
+}
+
+
+void MacroAssembler::Mrs(const Register& rt, SystemRegister sysreg) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rt.IsZero());
+ mrs(rt, sysreg);
+}
+
+
+void MacroAssembler::Msr(SystemRegister sysreg, const Register& rt) {
+ ASSERT(allow_macro_instructions_);
+ msr(sysreg, rt);
+}
+
+
+void MacroAssembler::Msub(const Register& rd,
+ const Register& rn,
+ const Register& rm,
+ const Register& ra) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rd.IsZero());
+ msub(rd, rn, rm, ra);
+}
+
+
+void MacroAssembler::Mul(const Register& rd,
+ const Register& rn,
+ const Register& rm) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rd.IsZero());
+ mul(rd, rn, rm);
+}
+
+
+void MacroAssembler::Rbit(const Register& rd, const Register& rn) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rd.IsZero());
+ rbit(rd, rn);
+}
+
+
+void MacroAssembler::Ret(const Register& xn) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!xn.IsZero());
+ ret(xn);
+ CheckVeneerPool(false, false);
+}
+
+
+void MacroAssembler::Rev(const Register& rd, const Register& rn) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rd.IsZero());
+ rev(rd, rn);
+}
+
+
+void MacroAssembler::Rev16(const Register& rd, const Register& rn) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rd.IsZero());
+ rev16(rd, rn);
+}
+
+
+void MacroAssembler::Rev32(const Register& rd, const Register& rn) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rd.IsZero());
+ rev32(rd, rn);
+}
+
+
+void MacroAssembler::Ror(const Register& rd,
+ const Register& rs,
+ unsigned shift) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rd.IsZero());
+ ror(rd, rs, shift);
+}
+
+
+void MacroAssembler::Ror(const Register& rd,
+ const Register& rn,
+ const Register& rm) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rd.IsZero());
+ rorv(rd, rn, rm);
+}
+
+
+void MacroAssembler::Sbfiz(const Register& rd,
+ const Register& rn,
+ unsigned lsb,
+ unsigned width) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rd.IsZero());
+ sbfiz(rd, rn, lsb, width);
+}
+
+
+void MacroAssembler::Sbfx(const Register& rd,
+ const Register& rn,
+ unsigned lsb,
+ unsigned width) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rd.IsZero());
+ sbfx(rd, rn, lsb, width);
+}
+
+
+void MacroAssembler::Scvtf(const FPRegister& fd,
+ const Register& rn,
+ unsigned fbits) {
+ ASSERT(allow_macro_instructions_);
+ scvtf(fd, rn, fbits);
+}
+
+
+void MacroAssembler::Sdiv(const Register& rd,
+ const Register& rn,
+ const Register& rm) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rd.IsZero());
+ sdiv(rd, rn, rm);
+}
+
+
+void MacroAssembler::Smaddl(const Register& rd,
+ const Register& rn,
+ const Register& rm,
+ const Register& ra) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rd.IsZero());
+ smaddl(rd, rn, rm, ra);
+}
+
+
+void MacroAssembler::Smsubl(const Register& rd,
+ const Register& rn,
+ const Register& rm,
+ const Register& ra) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rd.IsZero());
+ smsubl(rd, rn, rm, ra);
+}
+
+
+void MacroAssembler::Smull(const Register& rd,
+ const Register& rn,
+ const Register& rm) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rd.IsZero());
+ smull(rd, rn, rm);
+}
+
+
+void MacroAssembler::Smulh(const Register& rd,
+ const Register& rn,
+ const Register& rm) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rd.IsZero());
+ smulh(rd, rn, rm);
+}
+
+
+void MacroAssembler::Stnp(const CPURegister& rt,
+ const CPURegister& rt2,
+ const MemOperand& dst) {
+ ASSERT(allow_macro_instructions_);
+ stnp(rt, rt2, dst);
+}
+
+
+void MacroAssembler::Stp(const CPURegister& rt,
+ const CPURegister& rt2,
+ const MemOperand& dst) {
+ ASSERT(allow_macro_instructions_);
+ stp(rt, rt2, dst);
+}
+
+
+void MacroAssembler::Sxtb(const Register& rd, const Register& rn) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rd.IsZero());
+ sxtb(rd, rn);
+}
+
+
+void MacroAssembler::Sxth(const Register& rd, const Register& rn) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rd.IsZero());
+ sxth(rd, rn);
+}
+
+
+void MacroAssembler::Sxtw(const Register& rd, const Register& rn) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rd.IsZero());
+ sxtw(rd, rn);
+}
+
+
+void MacroAssembler::Ubfiz(const Register& rd,
+ const Register& rn,
+ unsigned lsb,
+ unsigned width) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rd.IsZero());
+ ubfiz(rd, rn, lsb, width);
+}
+
+
+void MacroAssembler::Ubfx(const Register& rd,
+ const Register& rn,
+ unsigned lsb,
+ unsigned width) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rd.IsZero());
+ ubfx(rd, rn, lsb, width);
+}
+
+
+void MacroAssembler::Ucvtf(const FPRegister& fd,
+ const Register& rn,
+ unsigned fbits) {
+ ASSERT(allow_macro_instructions_);
+ ucvtf(fd, rn, fbits);
+}
+
+
+void MacroAssembler::Udiv(const Register& rd,
+ const Register& rn,
+ const Register& rm) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rd.IsZero());
+ udiv(rd, rn, rm);
+}
+
+
+void MacroAssembler::Umaddl(const Register& rd,
+ const Register& rn,
+ const Register& rm,
+ const Register& ra) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rd.IsZero());
+ umaddl(rd, rn, rm, ra);
+}
+
+
+void MacroAssembler::Umsubl(const Register& rd,
+ const Register& rn,
+ const Register& rm,
+ const Register& ra) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rd.IsZero());
+ umsubl(rd, rn, rm, ra);
+}
+
+
+void MacroAssembler::Uxtb(const Register& rd, const Register& rn) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rd.IsZero());
+ uxtb(rd, rn);
+}
+
+
+void MacroAssembler::Uxth(const Register& rd, const Register& rn) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rd.IsZero());
+ uxth(rd, rn);
+}
+
+
+void MacroAssembler::Uxtw(const Register& rd, const Register& rn) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rd.IsZero());
+ uxtw(rd, rn);
+}
+
+
+void MacroAssembler::BumpSystemStackPointer(const Operand& space) {
+ ASSERT(!csp.Is(sp_));
+ if (!TmpList()->IsEmpty()) {
+ if (CpuFeatures::IsSupported(ALWAYS_ALIGN_CSP)) {
+ UseScratchRegisterScope temps(this);
+ Register temp = temps.AcquireX();
+ Sub(temp, StackPointer(), space);
+ Bic(csp, temp, 0xf);
+ } else {
+ Sub(csp, StackPointer(), space);
+ }
+ } else {
+ // TODO(jbramley): Several callers rely on this not using scratch
+ // registers, so we use the assembler directly here. However, this means
+ // that large immediate values of 'space' cannot be handled cleanly. (Only
+ // 24-bits immediates or values of 'space' that can be encoded in one
+ // instruction are accepted.) Once we implement our flexible scratch
+ // register idea, we could greatly simplify this function.
+ InstructionAccurateScope scope(this);
+ ASSERT(space.IsImmediate());
+ // Align to 16 bytes.
+ uint64_t imm = RoundUp(space.ImmediateValue(), 0x10);
+ ASSERT(is_uint24(imm));
+
+ Register source = StackPointer();
+ if (CpuFeatures::IsSupported(ALWAYS_ALIGN_CSP)) {
+ bic(csp, source, 0xf);
+ source = csp;
+ }
+ if (!is_uint12(imm)) {
+ int64_t imm_top_12_bits = imm >> 12;
+ sub(csp, source, imm_top_12_bits << 12);
+ source = csp;
+ imm -= imm_top_12_bits << 12;
+ }
+ if (imm > 0) {
+ sub(csp, source, imm);
+ }
+ }
+ AssertStackConsistency();
+}
+
+
+void MacroAssembler::SyncSystemStackPointer() {
+ ASSERT(emit_debug_code());
+ ASSERT(!csp.Is(sp_));
+ { InstructionAccurateScope scope(this);
+ if (CpuFeatures::IsSupported(ALWAYS_ALIGN_CSP)) {
+ bic(csp, StackPointer(), 0xf);
+ } else {
+ mov(csp, StackPointer());
+ }
+ }
+ AssertStackConsistency();
+}
+
+
+void MacroAssembler::InitializeRootRegister() {
+ ExternalReference roots_array_start =
+ ExternalReference::roots_array_start(isolate());
+ Mov(root, Operand(roots_array_start));
+}
+
+
+void MacroAssembler::SmiTag(Register dst, Register src) {
+ ASSERT(dst.Is64Bits() && src.Is64Bits());
+ Lsl(dst, src, kSmiShift);
+}
+
+
+void MacroAssembler::SmiTag(Register smi) { SmiTag(smi, smi); }
+
+
+void MacroAssembler::SmiUntag(Register dst, Register src) {
+ ASSERT(dst.Is64Bits() && src.Is64Bits());
+ if (FLAG_enable_slow_asserts) {
+ AssertSmi(src);
+ }
+ Asr(dst, src, kSmiShift);
+}
+
+
+void MacroAssembler::SmiUntag(Register smi) { SmiUntag(smi, smi); }
+
+
+void MacroAssembler::SmiUntagToDouble(FPRegister dst,
+ Register src,
+ UntagMode mode) {
+ ASSERT(dst.Is64Bits() && src.Is64Bits());
+ if (FLAG_enable_slow_asserts && (mode == kNotSpeculativeUntag)) {
+ AssertSmi(src);
+ }
+ Scvtf(dst, src, kSmiShift);
+}
+
+
+void MacroAssembler::SmiUntagToFloat(FPRegister dst,
+ Register src,
+ UntagMode mode) {
+ ASSERT(dst.Is32Bits() && src.Is64Bits());
+ if (FLAG_enable_slow_asserts && (mode == kNotSpeculativeUntag)) {
+ AssertSmi(src);
+ }
+ Scvtf(dst, src, kSmiShift);
+}
+
+
+void MacroAssembler::SmiTagAndPush(Register src) {
+ STATIC_ASSERT((kSmiShift == 32) && (kSmiTag == 0));
+ Push(src.W(), wzr);
+}
+
+
+void MacroAssembler::SmiTagAndPush(Register src1, Register src2) {
+ STATIC_ASSERT((kSmiShift == 32) && (kSmiTag == 0));
+ Push(src1.W(), wzr, src2.W(), wzr);
+}
+
+
+void MacroAssembler::JumpIfSmi(Register value,
+ Label* smi_label,
+ Label* not_smi_label) {
+ STATIC_ASSERT((kSmiTagSize == 1) && (kSmiTag == 0));
+ // Check if the tag bit is set.
+ if (smi_label) {
+ Tbz(value, 0, smi_label);
+ if (not_smi_label) {
+ B(not_smi_label);
+ }
+ } else {
+ ASSERT(not_smi_label);
+ Tbnz(value, 0, not_smi_label);
+ }
+}
+
+
+void MacroAssembler::JumpIfNotSmi(Register value, Label* not_smi_label) {
+ JumpIfSmi(value, NULL, not_smi_label);
+}
+
+
+void MacroAssembler::JumpIfBothSmi(Register value1,
+ Register value2,
+ Label* both_smi_label,
+ Label* not_smi_label) {
+ STATIC_ASSERT((kSmiTagSize == 1) && (kSmiTag == 0));
+ UseScratchRegisterScope temps(this);
+ Register tmp = temps.AcquireX();
+ // Check if both tag bits are clear.
+ Orr(tmp, value1, value2);
+ JumpIfSmi(tmp, both_smi_label, not_smi_label);
+}
+
+
+void MacroAssembler::JumpIfEitherSmi(Register value1,
+ Register value2,
+ Label* either_smi_label,
+ Label* not_smi_label) {
+ STATIC_ASSERT((kSmiTagSize == 1) && (kSmiTag == 0));
+ UseScratchRegisterScope temps(this);
+ Register tmp = temps.AcquireX();
+ // Check if either tag bit is clear.
+ And(tmp, value1, value2);
+ JumpIfSmi(tmp, either_smi_label, not_smi_label);
+}
+
+
+void MacroAssembler::JumpIfEitherNotSmi(Register value1,
+ Register value2,
+ Label* not_smi_label) {
+ JumpIfBothSmi(value1, value2, NULL, not_smi_label);
+}
+
+
+void MacroAssembler::JumpIfBothNotSmi(Register value1,
+ Register value2,
+ Label* not_smi_label) {
+ JumpIfEitherSmi(value1, value2, NULL, not_smi_label);
+}
+
+
+void MacroAssembler::ObjectTag(Register tagged_obj, Register obj) {
+ STATIC_ASSERT(kHeapObjectTag == 1);
+ if (emit_debug_code()) {
+ Label ok;
+ Tbz(obj, 0, &ok);
+ Abort(kObjectTagged);
+ Bind(&ok);
+ }
+ Orr(tagged_obj, obj, kHeapObjectTag);
+}
+
+
+void MacroAssembler::ObjectUntag(Register untagged_obj, Register obj) {
+ STATIC_ASSERT(kHeapObjectTag == 1);
+ if (emit_debug_code()) {
+ Label ok;
+ Tbnz(obj, 0, &ok);
+ Abort(kObjectNotTagged);
+ Bind(&ok);
+ }
+ Bic(untagged_obj, obj, kHeapObjectTag);
+}
+
+
+void MacroAssembler::IsObjectNameType(Register object,
+ Register type,
+ Label* fail) {
+ CompareObjectType(object, type, type, LAST_NAME_TYPE);
+ B(hi, fail);
+}
+
+
+void MacroAssembler::IsObjectJSObjectType(Register heap_object,
+ Register map,
+ Register scratch,
+ Label* fail) {
+ Ldr(map, FieldMemOperand(heap_object, HeapObject::kMapOffset));
+ IsInstanceJSObjectType(map, scratch, fail);
+}
+
+
+void MacroAssembler::IsInstanceJSObjectType(Register map,
+ Register scratch,
+ Label* fail) {
+ Ldrb(scratch, FieldMemOperand(map, Map::kInstanceTypeOffset));
+ // If cmp result is lt, the following ccmp will clear all flags.
+ // Z == 0, N == V implies gt condition.
+ Cmp(scratch, FIRST_NONCALLABLE_SPEC_OBJECT_TYPE);
+ Ccmp(scratch, LAST_NONCALLABLE_SPEC_OBJECT_TYPE, NoFlag, ge);
+
+ // If we didn't get a valid label object just fall through and leave the
+ // flags updated.
+ if (fail != NULL) {
+ B(gt, fail);
+ }
+}
+
+
+void MacroAssembler::IsObjectJSStringType(Register object,
+ Register type,
+ Label* not_string,
+ Label* string) {
+ Ldr(type, FieldMemOperand(object, HeapObject::kMapOffset));
+ Ldrb(type.W(), FieldMemOperand(type, Map::kInstanceTypeOffset));
+
+ STATIC_ASSERT(kStringTag == 0);
+ ASSERT((string != NULL) || (not_string != NULL));
+ if (string == NULL) {
+ TestAndBranchIfAnySet(type.W(), kIsNotStringMask, not_string);
+ } else if (not_string == NULL) {
+ TestAndBranchIfAllClear(type.W(), kIsNotStringMask, string);
+ } else {
+ TestAndBranchIfAnySet(type.W(), kIsNotStringMask, not_string);
+ B(string);
+ }
+}
+
+
+void MacroAssembler::Push(Handle<Object> handle) {
+ UseScratchRegisterScope temps(this);
+ Register tmp = temps.AcquireX();
+ Mov(tmp, Operand(handle));
+ Push(tmp);
+}
+
+
+void MacroAssembler::Claim(uint64_t count, uint64_t unit_size) {
+ uint64_t size = count * unit_size;
+
+ if (size == 0) {
+ return;
+ }
+
+ if (csp.Is(StackPointer())) {
+ ASSERT(size % 16 == 0);
+ } else {
+ BumpSystemStackPointer(size);
+ }
+
+ Sub(StackPointer(), StackPointer(), size);
+}
+
+
+void MacroAssembler::Claim(const Register& count, uint64_t unit_size) {
+ if (unit_size == 0) return;
+ ASSERT(IsPowerOf2(unit_size));
+
+ const int shift = CountTrailingZeros(unit_size, kXRegSizeInBits);
+ const Operand size(count, LSL, shift);
+
+ if (size.IsZero()) {
+ return;
+ }
+
+ if (!csp.Is(StackPointer())) {
+ BumpSystemStackPointer(size);
+ }
+
+ Sub(StackPointer(), StackPointer(), size);
+}
+
+
+void MacroAssembler::ClaimBySMI(const Register& count_smi, uint64_t unit_size) {
+ ASSERT(unit_size == 0 || IsPowerOf2(unit_size));
+ const int shift = CountTrailingZeros(unit_size, kXRegSizeInBits) - kSmiShift;
+ const Operand size(count_smi,
+ (shift >= 0) ? (LSL) : (LSR),
+ (shift >= 0) ? (shift) : (-shift));
+
+ if (size.IsZero()) {
+ return;
+ }
+
+ if (!csp.Is(StackPointer())) {
+ BumpSystemStackPointer(size);
+ }
+
+ Sub(StackPointer(), StackPointer(), size);
+}
+
+
+void MacroAssembler::Drop(uint64_t count, uint64_t unit_size) {
+ uint64_t size = count * unit_size;
+
+ if (size == 0) {
+ return;
+ }
+
+ Add(StackPointer(), StackPointer(), size);
+
+ if (csp.Is(StackPointer())) {
+ ASSERT(size % 16 == 0);
+ } else if (emit_debug_code()) {
+ // It is safe to leave csp where it is when unwinding the JavaScript stack,
+ // but if we keep it matching StackPointer, the simulator can detect memory
+ // accesses in the now-free part of the stack.
+ SyncSystemStackPointer();
+ }
+}
+
+
+void MacroAssembler::Drop(const Register& count, uint64_t unit_size) {
+ if (unit_size == 0) return;
+ ASSERT(IsPowerOf2(unit_size));
+
+ const int shift = CountTrailingZeros(unit_size, kXRegSizeInBits);
+ const Operand size(count, LSL, shift);
+
+ if (size.IsZero()) {
+ return;
+ }
+
+ Add(StackPointer(), StackPointer(), size);
+
+ if (!csp.Is(StackPointer()) && emit_debug_code()) {
+ // It is safe to leave csp where it is when unwinding the JavaScript stack,
+ // but if we keep it matching StackPointer, the simulator can detect memory
+ // accesses in the now-free part of the stack.
+ SyncSystemStackPointer();
+ }
+}
+
+
+void MacroAssembler::DropBySMI(const Register& count_smi, uint64_t unit_size) {
+ ASSERT(unit_size == 0 || IsPowerOf2(unit_size));
+ const int shift = CountTrailingZeros(unit_size, kXRegSizeInBits) - kSmiShift;
+ const Operand size(count_smi,
+ (shift >= 0) ? (LSL) : (LSR),
+ (shift >= 0) ? (shift) : (-shift));
+
+ if (size.IsZero()) {
+ return;
+ }
+
+ Add(StackPointer(), StackPointer(), size);
+
+ if (!csp.Is(StackPointer()) && emit_debug_code()) {
+ // It is safe to leave csp where it is when unwinding the JavaScript stack,
+ // but if we keep it matching StackPointer, the simulator can detect memory
+ // accesses in the now-free part of the stack.
+ SyncSystemStackPointer();
+ }
+}
+
+
+void MacroAssembler::CompareAndBranch(const Register& lhs,
+ const Operand& rhs,
+ Condition cond,
+ Label* label) {
+ if (rhs.IsImmediate() && (rhs.ImmediateValue() == 0) &&
+ ((cond == eq) || (cond == ne))) {
+ if (cond == eq) {
+ Cbz(lhs, label);
+ } else {
+ Cbnz(lhs, label);
+ }
+ } else {
+ Cmp(lhs, rhs);
+ B(cond, label);
+ }
+}
+
+
+void MacroAssembler::TestAndBranchIfAnySet(const Register& reg,
+ const uint64_t bit_pattern,
+ Label* label) {
+ int bits = reg.SizeInBits();
+ ASSERT(CountSetBits(bit_pattern, bits) > 0);
+ if (CountSetBits(bit_pattern, bits) == 1) {
+ Tbnz(reg, MaskToBit(bit_pattern), label);
+ } else {
+ Tst(reg, bit_pattern);
+ B(ne, label);
+ }
+}
+
+
+void MacroAssembler::TestAndBranchIfAllClear(const Register& reg,
+ const uint64_t bit_pattern,
+ Label* label) {
+ int bits = reg.SizeInBits();
+ ASSERT(CountSetBits(bit_pattern, bits) > 0);
+ if (CountSetBits(bit_pattern, bits) == 1) {
+ Tbz(reg, MaskToBit(bit_pattern), label);
+ } else {
+ Tst(reg, bit_pattern);
+ B(eq, label);
+ }
+}
+
+
+void MacroAssembler::InlineData(uint64_t data) {
+ ASSERT(is_uint16(data));
+ InstructionAccurateScope scope(this, 1);
+ movz(xzr, data);
+}
+
+
+void MacroAssembler::EnableInstrumentation() {
+ InstructionAccurateScope scope(this, 1);
+ movn(xzr, InstrumentStateEnable);
+}
+
+
+void MacroAssembler::DisableInstrumentation() {
+ InstructionAccurateScope scope(this, 1);
+ movn(xzr, InstrumentStateDisable);
+}
+
+
+void MacroAssembler::AnnotateInstrumentation(const char* marker_name) {
+ ASSERT(strlen(marker_name) == 2);
+
+ // We allow only printable characters in the marker names. Unprintable
+ // characters are reserved for controlling features of the instrumentation.
+ ASSERT(isprint(marker_name[0]) && isprint(marker_name[1]));
+
+ InstructionAccurateScope scope(this, 1);
+ movn(xzr, (marker_name[1] << 8) | marker_name[0]);
+}
+
+} } // namespace v8::internal
+
+#endif // V8_ARM64_MACRO_ASSEMBLER_ARM64_INL_H_
diff --git a/chromium/v8/src/arm64/macro-assembler-arm64.cc b/chromium/v8/src/arm64/macro-assembler-arm64.cc
new file mode 100644
index 00000000000..f2e49b4639c
--- /dev/null
+++ b/chromium/v8/src/arm64/macro-assembler-arm64.cc
@@ -0,0 +1,5303 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#if V8_TARGET_ARCH_ARM64
+
+#include "src/bootstrapper.h"
+#include "src/codegen.h"
+#include "src/cpu-profiler.h"
+#include "src/debug.h"
+#include "src/isolate-inl.h"
+#include "src/runtime.h"
+
+namespace v8 {
+namespace internal {
+
+// Define a fake double underscore to use with the ASM_UNIMPLEMENTED macros.
+#define __
+
+
+MacroAssembler::MacroAssembler(Isolate* arg_isolate,
+ byte * buffer,
+ unsigned buffer_size)
+ : Assembler(arg_isolate, buffer, buffer_size),
+ generating_stub_(false),
+#if DEBUG
+ allow_macro_instructions_(true),
+#endif
+ has_frame_(false),
+ use_real_aborts_(true),
+ sp_(jssp),
+ tmp_list_(DefaultTmpList()),
+ fptmp_list_(DefaultFPTmpList()) {
+ if (isolate() != NULL) {
+ code_object_ = Handle<Object>(isolate()->heap()->undefined_value(),
+ isolate());
+ }
+}
+
+
+CPURegList MacroAssembler::DefaultTmpList() {
+ return CPURegList(ip0, ip1);
+}
+
+
+CPURegList MacroAssembler::DefaultFPTmpList() {
+ return CPURegList(fp_scratch1, fp_scratch2);
+}
+
+
+void MacroAssembler::LogicalMacro(const Register& rd,
+ const Register& rn,
+ const Operand& operand,
+ LogicalOp op) {
+ UseScratchRegisterScope temps(this);
+
+ if (operand.NeedsRelocation(this)) {
+ Register temp = temps.AcquireX();
+ Ldr(temp, operand.immediate());
+ Logical(rd, rn, temp, op);
+
+ } else if (operand.IsImmediate()) {
+ int64_t immediate = operand.ImmediateValue();
+ unsigned reg_size = rd.SizeInBits();
+ ASSERT(rd.Is64Bits() || is_uint32(immediate));
+
+ // If the operation is NOT, invert the operation and immediate.
+ if ((op & NOT) == NOT) {
+ op = static_cast<LogicalOp>(op & ~NOT);
+ immediate = ~immediate;
+ if (rd.Is32Bits()) {
+ immediate &= kWRegMask;
+ }
+ }
+
+ // Special cases for all set or all clear immediates.
+ if (immediate == 0) {
+ switch (op) {
+ case AND:
+ Mov(rd, 0);
+ return;
+ case ORR: // Fall through.
+ case EOR:
+ Mov(rd, rn);
+ return;
+ case ANDS: // Fall through.
+ case BICS:
+ break;
+ default:
+ UNREACHABLE();
+ }
+ } else if ((rd.Is64Bits() && (immediate == -1L)) ||
+ (rd.Is32Bits() && (immediate == 0xffffffffL))) {
+ switch (op) {
+ case AND:
+ Mov(rd, rn);
+ return;
+ case ORR:
+ Mov(rd, immediate);
+ return;
+ case EOR:
+ Mvn(rd, rn);
+ return;
+ case ANDS: // Fall through.
+ case BICS:
+ break;
+ default:
+ UNREACHABLE();
+ }
+ }
+
+ unsigned n, imm_s, imm_r;
+ if (IsImmLogical(immediate, reg_size, &n, &imm_s, &imm_r)) {
+ // Immediate can be encoded in the instruction.
+ LogicalImmediate(rd, rn, n, imm_s, imm_r, op);
+ } else {
+ // Immediate can't be encoded: synthesize using move immediate.
+ Register temp = temps.AcquireSameSizeAs(rn);
+ Mov(temp, immediate);
+ if (rd.Is(csp)) {
+ // If rd is the stack pointer we cannot use it as the destination
+ // register so we use the temp register as an intermediate again.
+ Logical(temp, rn, temp, op);
+ Mov(csp, temp);
+ AssertStackConsistency();
+ } else {
+ Logical(rd, rn, temp, op);
+ }
+ }
+
+ } else if (operand.IsExtendedRegister()) {
+ ASSERT(operand.reg().SizeInBits() <= rd.SizeInBits());
+ // Add/sub extended supports shift <= 4. We want to support exactly the
+ // same modes here.
+ ASSERT(operand.shift_amount() <= 4);
+ ASSERT(operand.reg().Is64Bits() ||
+ ((operand.extend() != UXTX) && (operand.extend() != SXTX)));
+ Register temp = temps.AcquireSameSizeAs(rn);
+ EmitExtendShift(temp, operand.reg(), operand.extend(),
+ operand.shift_amount());
+ Logical(rd, rn, temp, op);
+
+ } else {
+ // The operand can be encoded in the instruction.
+ ASSERT(operand.IsShiftedRegister());
+ Logical(rd, rn, operand, op);
+ }
+}
+
+
+void MacroAssembler::Mov(const Register& rd, uint64_t imm) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(is_uint32(imm) || is_int32(imm) || rd.Is64Bits());
+ ASSERT(!rd.IsZero());
+
+ // TODO(all) extend to support more immediates.
+ //
+ // Immediates on Aarch64 can be produced using an initial value, and zero to
+ // three move keep operations.
+ //
+ // Initial values can be generated with:
+ // 1. 64-bit move zero (movz).
+ // 2. 32-bit move inverted (movn).
+ // 3. 64-bit move inverted.
+ // 4. 32-bit orr immediate.
+ // 5. 64-bit orr immediate.
+ // Move-keep may then be used to modify each of the 16-bit half-words.
+ //
+ // The code below supports all five initial value generators, and
+ // applying move-keep operations to move-zero and move-inverted initial
+ // values.
+
+ unsigned reg_size = rd.SizeInBits();
+ unsigned n, imm_s, imm_r;
+ if (IsImmMovz(imm, reg_size) && !rd.IsSP()) {
+ // Immediate can be represented in a move zero instruction. Movz can't
+ // write to the stack pointer.
+ movz(rd, imm);
+ } else if (IsImmMovn(imm, reg_size) && !rd.IsSP()) {
+ // Immediate can be represented in a move inverted instruction. Movn can't
+ // write to the stack pointer.
+ movn(rd, rd.Is64Bits() ? ~imm : (~imm & kWRegMask));
+ } else if (IsImmLogical(imm, reg_size, &n, &imm_s, &imm_r)) {
+ // Immediate can be represented in a logical orr instruction.
+ LogicalImmediate(rd, AppropriateZeroRegFor(rd), n, imm_s, imm_r, ORR);
+ } else {
+ // Generic immediate case. Imm will be represented by
+ // [imm3, imm2, imm1, imm0], where each imm is 16 bits.
+ // A move-zero or move-inverted is generated for the first non-zero or
+ // non-0xffff immX, and a move-keep for subsequent non-zero immX.
+
+ uint64_t ignored_halfword = 0;
+ bool invert_move = false;
+ // If the number of 0xffff halfwords is greater than the number of 0x0000
+ // halfwords, it's more efficient to use move-inverted.
+ if (CountClearHalfWords(~imm, reg_size) >
+ CountClearHalfWords(imm, reg_size)) {
+ ignored_halfword = 0xffffL;
+ invert_move = true;
+ }
+
+ // Mov instructions can't move immediate values into the stack pointer, so
+ // set up a temporary register, if needed.
+ UseScratchRegisterScope temps(this);
+ Register temp = rd.IsSP() ? temps.AcquireSameSizeAs(rd) : rd;
+
+ // Iterate through the halfwords. Use movn/movz for the first non-ignored
+ // halfword, and movk for subsequent halfwords.
+ ASSERT((reg_size % 16) == 0);
+ bool first_mov_done = false;
+ for (unsigned i = 0; i < (rd.SizeInBits() / 16); i++) {
+ uint64_t imm16 = (imm >> (16 * i)) & 0xffffL;
+ if (imm16 != ignored_halfword) {
+ if (!first_mov_done) {
+ if (invert_move) {
+ movn(temp, (~imm16) & 0xffffL, 16 * i);
+ } else {
+ movz(temp, imm16, 16 * i);
+ }
+ first_mov_done = true;
+ } else {
+ // Construct a wider constant.
+ movk(temp, imm16, 16 * i);
+ }
+ }
+ }
+ ASSERT(first_mov_done);
+
+ // Move the temporary if the original destination register was the stack
+ // pointer.
+ if (rd.IsSP()) {
+ mov(rd, temp);
+ AssertStackConsistency();
+ }
+ }
+}
+
+
+void MacroAssembler::Mov(const Register& rd,
+ const Operand& operand,
+ DiscardMoveMode discard_mode) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rd.IsZero());
+
+ // Provide a swap register for instructions that need to write into the
+ // system stack pointer (and can't do this inherently).
+ UseScratchRegisterScope temps(this);
+ Register dst = (rd.IsSP()) ? temps.AcquireSameSizeAs(rd) : rd;
+
+ if (operand.NeedsRelocation(this)) {
+ Ldr(dst, operand.immediate());
+
+ } else if (operand.IsImmediate()) {
+ // Call the macro assembler for generic immediates.
+ Mov(dst, operand.ImmediateValue());
+
+ } else if (operand.IsShiftedRegister() && (operand.shift_amount() != 0)) {
+ // Emit a shift instruction if moving a shifted register. This operation
+ // could also be achieved using an orr instruction (like orn used by Mvn),
+ // but using a shift instruction makes the disassembly clearer.
+ EmitShift(dst, operand.reg(), operand.shift(), operand.shift_amount());
+
+ } else if (operand.IsExtendedRegister()) {
+ // Emit an extend instruction if moving an extended register. This handles
+ // extend with post-shift operations, too.
+ EmitExtendShift(dst, operand.reg(), operand.extend(),
+ operand.shift_amount());
+
+ } else {
+ // Otherwise, emit a register move only if the registers are distinct, or
+ // if they are not X registers.
+ //
+ // Note that mov(w0, w0) is not a no-op because it clears the top word of
+ // x0. A flag is provided (kDiscardForSameWReg) if a move between the same W
+ // registers is not required to clear the top word of the X register. In
+ // this case, the instruction is discarded.
+ //
+ // If csp is an operand, add #0 is emitted, otherwise, orr #0.
+ if (!rd.Is(operand.reg()) || (rd.Is32Bits() &&
+ (discard_mode == kDontDiscardForSameWReg))) {
+ Assembler::mov(rd, operand.reg());
+ }
+ // This case can handle writes into the system stack pointer directly.
+ dst = rd;
+ }
+
+ // Copy the result to the system stack pointer.
+ if (!dst.Is(rd)) {
+ ASSERT(rd.IsSP());
+ Assembler::mov(rd, dst);
+ }
+}
+
+
+void MacroAssembler::Mvn(const Register& rd, const Operand& operand) {
+ ASSERT(allow_macro_instructions_);
+
+ if (operand.NeedsRelocation(this)) {
+ Ldr(rd, operand.immediate());
+ mvn(rd, rd);
+
+ } else if (operand.IsImmediate()) {
+ // Call the macro assembler for generic immediates.
+ Mov(rd, ~operand.ImmediateValue());
+
+ } else if (operand.IsExtendedRegister()) {
+ // Emit two instructions for the extend case. This differs from Mov, as
+ // the extend and invert can't be achieved in one instruction.
+ EmitExtendShift(rd, operand.reg(), operand.extend(),
+ operand.shift_amount());
+ mvn(rd, rd);
+
+ } else {
+ mvn(rd, operand);
+ }
+}
+
+
+unsigned MacroAssembler::CountClearHalfWords(uint64_t imm, unsigned reg_size) {
+ ASSERT((reg_size % 8) == 0);
+ int count = 0;
+ for (unsigned i = 0; i < (reg_size / 16); i++) {
+ if ((imm & 0xffff) == 0) {
+ count++;
+ }
+ imm >>= 16;
+ }
+ return count;
+}
+
+
+// The movz instruction can generate immediates containing an arbitrary 16-bit
+// half-word, with remaining bits clear, eg. 0x00001234, 0x0000123400000000.
+bool MacroAssembler::IsImmMovz(uint64_t imm, unsigned reg_size) {
+ ASSERT((reg_size == kXRegSizeInBits) || (reg_size == kWRegSizeInBits));
+ return CountClearHalfWords(imm, reg_size) >= ((reg_size / 16) - 1);
+}
+
+
+// The movn instruction can generate immediates containing an arbitrary 16-bit
+// half-word, with remaining bits set, eg. 0xffff1234, 0xffff1234ffffffff.
+bool MacroAssembler::IsImmMovn(uint64_t imm, unsigned reg_size) {
+ return IsImmMovz(~imm, reg_size);
+}
+
+
+void MacroAssembler::ConditionalCompareMacro(const Register& rn,
+ const Operand& operand,
+ StatusFlags nzcv,
+ Condition cond,
+ ConditionalCompareOp op) {
+ ASSERT((cond != al) && (cond != nv));
+ if (operand.NeedsRelocation(this)) {
+ UseScratchRegisterScope temps(this);
+ Register temp = temps.AcquireX();
+ Ldr(temp, operand.immediate());
+ ConditionalCompareMacro(rn, temp, nzcv, cond, op);
+
+ } else if ((operand.IsShiftedRegister() && (operand.shift_amount() == 0)) ||
+ (operand.IsImmediate() &&
+ IsImmConditionalCompare(operand.ImmediateValue()))) {
+ // The immediate can be encoded in the instruction, or the operand is an
+ // unshifted register: call the assembler.
+ ConditionalCompare(rn, operand, nzcv, cond, op);
+
+ } else {
+ // The operand isn't directly supported by the instruction: perform the
+ // operation on a temporary register.
+ UseScratchRegisterScope temps(this);
+ Register temp = temps.AcquireSameSizeAs(rn);
+ Mov(temp, operand);
+ ConditionalCompare(rn, temp, nzcv, cond, op);
+ }
+}
+
+
+void MacroAssembler::Csel(const Register& rd,
+ const Register& rn,
+ const Operand& operand,
+ Condition cond) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rd.IsZero());
+ ASSERT((cond != al) && (cond != nv));
+ if (operand.IsImmediate()) {
+ // Immediate argument. Handle special cases of 0, 1 and -1 using zero
+ // register.
+ int64_t imm = operand.ImmediateValue();
+ Register zr = AppropriateZeroRegFor(rn);
+ if (imm == 0) {
+ csel(rd, rn, zr, cond);
+ } else if (imm == 1) {
+ csinc(rd, rn, zr, cond);
+ } else if (imm == -1) {
+ csinv(rd, rn, zr, cond);
+ } else {
+ UseScratchRegisterScope temps(this);
+ Register temp = temps.AcquireSameSizeAs(rn);
+ Mov(temp, imm);
+ csel(rd, rn, temp, cond);
+ }
+ } else if (operand.IsShiftedRegister() && (operand.shift_amount() == 0)) {
+ // Unshifted register argument.
+ csel(rd, rn, operand.reg(), cond);
+ } else {
+ // All other arguments.
+ UseScratchRegisterScope temps(this);
+ Register temp = temps.AcquireSameSizeAs(rn);
+ Mov(temp, operand);
+ csel(rd, rn, temp, cond);
+ }
+}
+
+
+void MacroAssembler::AddSubMacro(const Register& rd,
+ const Register& rn,
+ const Operand& operand,
+ FlagsUpdate S,
+ AddSubOp op) {
+ if (operand.IsZero() && rd.Is(rn) && rd.Is64Bits() && rn.Is64Bits() &&
+ !operand.NeedsRelocation(this) && (S == LeaveFlags)) {
+ // The instruction would be a nop. Avoid generating useless code.
+ return;
+ }
+
+ if (operand.NeedsRelocation(this)) {
+ UseScratchRegisterScope temps(this);
+ Register temp = temps.AcquireX();
+ Ldr(temp, operand.immediate());
+ AddSubMacro(rd, rn, temp, S, op);
+ } else if ((operand.IsImmediate() &&
+ !IsImmAddSub(operand.ImmediateValue())) ||
+ (rn.IsZero() && !operand.IsShiftedRegister()) ||
+ (operand.IsShiftedRegister() && (operand.shift() == ROR))) {
+ UseScratchRegisterScope temps(this);
+ Register temp = temps.AcquireSameSizeAs(rn);
+ Mov(temp, operand);
+ AddSub(rd, rn, temp, S, op);
+ } else {
+ AddSub(rd, rn, operand, S, op);
+ }
+}
+
+
+void MacroAssembler::AddSubWithCarryMacro(const Register& rd,
+ const Register& rn,
+ const Operand& operand,
+ FlagsUpdate S,
+ AddSubWithCarryOp op) {
+ ASSERT(rd.SizeInBits() == rn.SizeInBits());
+ UseScratchRegisterScope temps(this);
+
+ if (operand.NeedsRelocation(this)) {
+ Register temp = temps.AcquireX();
+ Ldr(temp, operand.immediate());
+ AddSubWithCarryMacro(rd, rn, temp, S, op);
+
+ } else if (operand.IsImmediate() ||
+ (operand.IsShiftedRegister() && (operand.shift() == ROR))) {
+ // Add/sub with carry (immediate or ROR shifted register.)
+ Register temp = temps.AcquireSameSizeAs(rn);
+ Mov(temp, operand);
+ AddSubWithCarry(rd, rn, temp, S, op);
+
+ } else if (operand.IsShiftedRegister() && (operand.shift_amount() != 0)) {
+ // Add/sub with carry (shifted register).
+ ASSERT(operand.reg().SizeInBits() == rd.SizeInBits());
+ ASSERT(operand.shift() != ROR);
+ ASSERT(is_uintn(operand.shift_amount(),
+ rd.SizeInBits() == kXRegSizeInBits ? kXRegSizeInBitsLog2
+ : kWRegSizeInBitsLog2));
+ Register temp = temps.AcquireSameSizeAs(rn);
+ EmitShift(temp, operand.reg(), operand.shift(), operand.shift_amount());
+ AddSubWithCarry(rd, rn, temp, S, op);
+
+ } else if (operand.IsExtendedRegister()) {
+ // Add/sub with carry (extended register).
+ ASSERT(operand.reg().SizeInBits() <= rd.SizeInBits());
+ // Add/sub extended supports a shift <= 4. We want to support exactly the
+ // same modes.
+ ASSERT(operand.shift_amount() <= 4);
+ ASSERT(operand.reg().Is64Bits() ||
+ ((operand.extend() != UXTX) && (operand.extend() != SXTX)));
+ Register temp = temps.AcquireSameSizeAs(rn);
+ EmitExtendShift(temp, operand.reg(), operand.extend(),
+ operand.shift_amount());
+ AddSubWithCarry(rd, rn, temp, S, op);
+
+ } else {
+ // The addressing mode is directly supported by the instruction.
+ AddSubWithCarry(rd, rn, operand, S, op);
+ }
+}
+
+
+void MacroAssembler::LoadStoreMacro(const CPURegister& rt,
+ const MemOperand& addr,
+ LoadStoreOp op) {
+ int64_t offset = addr.offset();
+ LSDataSize size = CalcLSDataSize(op);
+
+ // Check if an immediate offset fits in the immediate field of the
+ // appropriate instruction. If not, emit two instructions to perform
+ // the operation.
+ if (addr.IsImmediateOffset() && !IsImmLSScaled(offset, size) &&
+ !IsImmLSUnscaled(offset)) {
+ // Immediate offset that can't be encoded using unsigned or unscaled
+ // addressing modes.
+ UseScratchRegisterScope temps(this);
+ Register temp = temps.AcquireSameSizeAs(addr.base());
+ Mov(temp, addr.offset());
+ LoadStore(rt, MemOperand(addr.base(), temp), op);
+ } else if (addr.IsPostIndex() && !IsImmLSUnscaled(offset)) {
+ // Post-index beyond unscaled addressing range.
+ LoadStore(rt, MemOperand(addr.base()), op);
+ add(addr.base(), addr.base(), offset);
+ } else if (addr.IsPreIndex() && !IsImmLSUnscaled(offset)) {
+ // Pre-index beyond unscaled addressing range.
+ add(addr.base(), addr.base(), offset);
+ LoadStore(rt, MemOperand(addr.base()), op);
+ } else {
+ // Encodable in one load/store instruction.
+ LoadStore(rt, addr, op);
+ }
+}
+
+
+void MacroAssembler::Load(const Register& rt,
+ const MemOperand& addr,
+ Representation r) {
+ ASSERT(!r.IsDouble());
+
+ if (r.IsInteger8()) {
+ Ldrsb(rt, addr);
+ } else if (r.IsUInteger8()) {
+ Ldrb(rt, addr);
+ } else if (r.IsInteger16()) {
+ Ldrsh(rt, addr);
+ } else if (r.IsUInteger16()) {
+ Ldrh(rt, addr);
+ } else if (r.IsInteger32()) {
+ Ldr(rt.W(), addr);
+ } else {
+ ASSERT(rt.Is64Bits());
+ Ldr(rt, addr);
+ }
+}
+
+
+void MacroAssembler::Store(const Register& rt,
+ const MemOperand& addr,
+ Representation r) {
+ ASSERT(!r.IsDouble());
+
+ if (r.IsInteger8() || r.IsUInteger8()) {
+ Strb(rt, addr);
+ } else if (r.IsInteger16() || r.IsUInteger16()) {
+ Strh(rt, addr);
+ } else if (r.IsInteger32()) {
+ Str(rt.W(), addr);
+ } else {
+ ASSERT(rt.Is64Bits());
+ if (r.IsHeapObject()) {
+ AssertNotSmi(rt);
+ } else if (r.IsSmi()) {
+ AssertSmi(rt);
+ }
+ Str(rt, addr);
+ }
+}
+
+
+bool MacroAssembler::NeedExtraInstructionsOrRegisterBranch(
+ Label *label, ImmBranchType b_type) {
+ bool need_longer_range = false;
+ // There are two situations in which we care about the offset being out of
+ // range:
+ // - The label is bound but too far away.
+ // - The label is not bound but linked, and the previous branch
+ // instruction in the chain is too far away.
+ if (label->is_bound() || label->is_linked()) {
+ need_longer_range =
+ !Instruction::IsValidImmPCOffset(b_type, label->pos() - pc_offset());
+ }
+ if (!need_longer_range && !label->is_bound()) {
+ int max_reachable_pc = pc_offset() + Instruction::ImmBranchRange(b_type);
+ unresolved_branches_.insert(
+ std::pair<int, FarBranchInfo>(max_reachable_pc,
+ FarBranchInfo(pc_offset(), label)));
+ // Also maintain the next pool check.
+ next_veneer_pool_check_ =
+ Min(next_veneer_pool_check_,
+ max_reachable_pc - kVeneerDistanceCheckMargin);
+ }
+ return need_longer_range;
+}
+
+
+void MacroAssembler::Adr(const Register& rd, Label* label, AdrHint hint) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rd.IsZero());
+
+ if (hint == kAdrNear) {
+ adr(rd, label);
+ return;
+ }
+
+ ASSERT(hint == kAdrFar);
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.AcquireX();
+ ASSERT(!AreAliased(rd, scratch));
+
+ if (label->is_bound()) {
+ int label_offset = label->pos() - pc_offset();
+ if (Instruction::IsValidPCRelOffset(label_offset)) {
+ adr(rd, label);
+ } else {
+ ASSERT(label_offset <= 0);
+ int min_adr_offset = -(1 << (Instruction::ImmPCRelRangeBitwidth - 1));
+ adr(rd, min_adr_offset);
+ Add(rd, rd, label_offset - min_adr_offset);
+ }
+ } else {
+ InstructionAccurateScope scope(
+ this, PatchingAssembler::kAdrFarPatchableNInstrs);
+ adr(rd, label);
+ for (int i = 0; i < PatchingAssembler::kAdrFarPatchableNNops; ++i) {
+ nop(ADR_FAR_NOP);
+ }
+ movz(scratch, 0);
+ add(rd, rd, scratch);
+ }
+}
+
+
+void MacroAssembler::B(Label* label, BranchType type, Register reg, int bit) {
+ ASSERT((reg.Is(NoReg) || type >= kBranchTypeFirstUsingReg) &&
+ (bit == -1 || type >= kBranchTypeFirstUsingBit));
+ if (kBranchTypeFirstCondition <= type && type <= kBranchTypeLastCondition) {
+ B(static_cast<Condition>(type), label);
+ } else {
+ switch (type) {
+ case always: B(label); break;
+ case never: break;
+ case reg_zero: Cbz(reg, label); break;
+ case reg_not_zero: Cbnz(reg, label); break;
+ case reg_bit_clear: Tbz(reg, bit, label); break;
+ case reg_bit_set: Tbnz(reg, bit, label); break;
+ default:
+ UNREACHABLE();
+ }
+ }
+}
+
+
+void MacroAssembler::B(Label* label, Condition cond) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT((cond != al) && (cond != nv));
+
+ Label done;
+ bool need_extra_instructions =
+ NeedExtraInstructionsOrRegisterBranch(label, CondBranchType);
+
+ if (need_extra_instructions) {
+ b(&done, NegateCondition(cond));
+ B(label);
+ } else {
+ b(label, cond);
+ }
+ bind(&done);
+}
+
+
+void MacroAssembler::Tbnz(const Register& rt, unsigned bit_pos, Label* label) {
+ ASSERT(allow_macro_instructions_);
+
+ Label done;
+ bool need_extra_instructions =
+ NeedExtraInstructionsOrRegisterBranch(label, TestBranchType);
+
+ if (need_extra_instructions) {
+ tbz(rt, bit_pos, &done);
+ B(label);
+ } else {
+ tbnz(rt, bit_pos, label);
+ }
+ bind(&done);
+}
+
+
+void MacroAssembler::Tbz(const Register& rt, unsigned bit_pos, Label* label) {
+ ASSERT(allow_macro_instructions_);
+
+ Label done;
+ bool need_extra_instructions =
+ NeedExtraInstructionsOrRegisterBranch(label, TestBranchType);
+
+ if (need_extra_instructions) {
+ tbnz(rt, bit_pos, &done);
+ B(label);
+ } else {
+ tbz(rt, bit_pos, label);
+ }
+ bind(&done);
+}
+
+
+void MacroAssembler::Cbnz(const Register& rt, Label* label) {
+ ASSERT(allow_macro_instructions_);
+
+ Label done;
+ bool need_extra_instructions =
+ NeedExtraInstructionsOrRegisterBranch(label, CompareBranchType);
+
+ if (need_extra_instructions) {
+ cbz(rt, &done);
+ B(label);
+ } else {
+ cbnz(rt, label);
+ }
+ bind(&done);
+}
+
+
+void MacroAssembler::Cbz(const Register& rt, Label* label) {
+ ASSERT(allow_macro_instructions_);
+
+ Label done;
+ bool need_extra_instructions =
+ NeedExtraInstructionsOrRegisterBranch(label, CompareBranchType);
+
+ if (need_extra_instructions) {
+ cbnz(rt, &done);
+ B(label);
+ } else {
+ cbz(rt, label);
+ }
+ bind(&done);
+}
+
+
+// Pseudo-instructions.
+
+
+void MacroAssembler::Abs(const Register& rd, const Register& rm,
+ Label* is_not_representable,
+ Label* is_representable) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(AreSameSizeAndType(rd, rm));
+
+ Cmp(rm, 1);
+ Cneg(rd, rm, lt);
+
+ // If the comparison sets the v flag, the input was the smallest value
+ // representable by rm, and the mathematical result of abs(rm) is not
+ // representable using two's complement.
+ if ((is_not_representable != NULL) && (is_representable != NULL)) {
+ B(is_not_representable, vs);
+ B(is_representable);
+ } else if (is_not_representable != NULL) {
+ B(is_not_representable, vs);
+ } else if (is_representable != NULL) {
+ B(is_representable, vc);
+ }
+}
+
+
+// Abstracted stack operations.
+
+
+void MacroAssembler::Push(const CPURegister& src0, const CPURegister& src1,
+ const CPURegister& src2, const CPURegister& src3) {
+ ASSERT(AreSameSizeAndType(src0, src1, src2, src3));
+
+ int count = 1 + src1.IsValid() + src2.IsValid() + src3.IsValid();
+ int size = src0.SizeInBytes();
+
+ PushPreamble(count, size);
+ PushHelper(count, size, src0, src1, src2, src3);
+}
+
+
+void MacroAssembler::Push(const CPURegister& src0, const CPURegister& src1,
+ const CPURegister& src2, const CPURegister& src3,
+ const CPURegister& src4, const CPURegister& src5,
+ const CPURegister& src6, const CPURegister& src7) {
+ ASSERT(AreSameSizeAndType(src0, src1, src2, src3, src4, src5, src6, src7));
+
+ int count = 5 + src5.IsValid() + src6.IsValid() + src6.IsValid();
+ int size = src0.SizeInBytes();
+
+ PushPreamble(count, size);
+ PushHelper(4, size, src0, src1, src2, src3);
+ PushHelper(count - 4, size, src4, src5, src6, src7);
+}
+
+
+void MacroAssembler::Pop(const CPURegister& dst0, const CPURegister& dst1,
+ const CPURegister& dst2, const CPURegister& dst3) {
+ // It is not valid to pop into the same register more than once in one
+ // instruction, not even into the zero register.
+ ASSERT(!AreAliased(dst0, dst1, dst2, dst3));
+ ASSERT(AreSameSizeAndType(dst0, dst1, dst2, dst3));
+ ASSERT(dst0.IsValid());
+
+ int count = 1 + dst1.IsValid() + dst2.IsValid() + dst3.IsValid();
+ int size = dst0.SizeInBytes();
+
+ PopHelper(count, size, dst0, dst1, dst2, dst3);
+ PopPostamble(count, size);
+}
+
+
+void MacroAssembler::PushPopQueue::PushQueued(
+ PreambleDirective preamble_directive) {
+ if (queued_.empty()) return;
+
+ if (preamble_directive == WITH_PREAMBLE) {
+ masm_->PushPreamble(size_);
+ }
+
+ int count = queued_.size();
+ int index = 0;
+ while (index < count) {
+ // PushHelper can only handle registers with the same size and type, and it
+ // can handle only four at a time. Batch them up accordingly.
+ CPURegister batch[4] = {NoReg, NoReg, NoReg, NoReg};
+ int batch_index = 0;
+ do {
+ batch[batch_index++] = queued_[index++];
+ } while ((batch_index < 4) && (index < count) &&
+ batch[0].IsSameSizeAndType(queued_[index]));
+
+ masm_->PushHelper(batch_index, batch[0].SizeInBytes(),
+ batch[0], batch[1], batch[2], batch[3]);
+ }
+
+ queued_.clear();
+}
+
+
+void MacroAssembler::PushPopQueue::PopQueued() {
+ if (queued_.empty()) return;
+
+ int count = queued_.size();
+ int index = 0;
+ while (index < count) {
+ // PopHelper can only handle registers with the same size and type, and it
+ // can handle only four at a time. Batch them up accordingly.
+ CPURegister batch[4] = {NoReg, NoReg, NoReg, NoReg};
+ int batch_index = 0;
+ do {
+ batch[batch_index++] = queued_[index++];
+ } while ((batch_index < 4) && (index < count) &&
+ batch[0].IsSameSizeAndType(queued_[index]));
+
+ masm_->PopHelper(batch_index, batch[0].SizeInBytes(),
+ batch[0], batch[1], batch[2], batch[3]);
+ }
+
+ masm_->PopPostamble(size_);
+ queued_.clear();
+}
+
+
+void MacroAssembler::PushCPURegList(CPURegList registers) {
+ int size = registers.RegisterSizeInBytes();
+
+ PushPreamble(registers.Count(), size);
+ // Push up to four registers at a time because if the current stack pointer is
+ // csp and reg_size is 32, registers must be pushed in blocks of four in order
+ // to maintain the 16-byte alignment for csp.
+ while (!registers.IsEmpty()) {
+ int count_before = registers.Count();
+ const CPURegister& src0 = registers.PopHighestIndex();
+ const CPURegister& src1 = registers.PopHighestIndex();
+ const CPURegister& src2 = registers.PopHighestIndex();
+ const CPURegister& src3 = registers.PopHighestIndex();
+ int count = count_before - registers.Count();
+ PushHelper(count, size, src0, src1, src2, src3);
+ }
+}
+
+
+void MacroAssembler::PopCPURegList(CPURegList registers) {
+ int size = registers.RegisterSizeInBytes();
+
+ // Pop up to four registers at a time because if the current stack pointer is
+ // csp and reg_size is 32, registers must be pushed in blocks of four in
+ // order to maintain the 16-byte alignment for csp.
+ while (!registers.IsEmpty()) {
+ int count_before = registers.Count();
+ const CPURegister& dst0 = registers.PopLowestIndex();
+ const CPURegister& dst1 = registers.PopLowestIndex();
+ const CPURegister& dst2 = registers.PopLowestIndex();
+ const CPURegister& dst3 = registers.PopLowestIndex();
+ int count = count_before - registers.Count();
+ PopHelper(count, size, dst0, dst1, dst2, dst3);
+ }
+ PopPostamble(registers.Count(), size);
+}
+
+
+void MacroAssembler::PushMultipleTimes(CPURegister src, int count) {
+ int size = src.SizeInBytes();
+
+ PushPreamble(count, size);
+
+ if (FLAG_optimize_for_size && count > 8) {
+ UseScratchRegisterScope temps(this);
+ Register temp = temps.AcquireX();
+
+ Label loop;
+ __ Mov(temp, count / 2);
+ __ Bind(&loop);
+ PushHelper(2, size, src, src, NoReg, NoReg);
+ __ Subs(temp, temp, 1);
+ __ B(ne, &loop);
+
+ count %= 2;
+ }
+
+ // Push up to four registers at a time if possible because if the current
+ // stack pointer is csp and the register size is 32, registers must be pushed
+ // in blocks of four in order to maintain the 16-byte alignment for csp.
+ while (count >= 4) {
+ PushHelper(4, size, src, src, src, src);
+ count -= 4;
+ }
+ if (count >= 2) {
+ PushHelper(2, size, src, src, NoReg, NoReg);
+ count -= 2;
+ }
+ if (count == 1) {
+ PushHelper(1, size, src, NoReg, NoReg, NoReg);
+ count -= 1;
+ }
+ ASSERT(count == 0);
+}
+
+
+void MacroAssembler::PushMultipleTimes(CPURegister src, Register count) {
+ PushPreamble(Operand(count, UXTW, WhichPowerOf2(src.SizeInBytes())));
+
+ UseScratchRegisterScope temps(this);
+ Register temp = temps.AcquireSameSizeAs(count);
+
+ if (FLAG_optimize_for_size) {
+ Label loop, done;
+
+ Subs(temp, count, 1);
+ B(mi, &done);
+
+ // Push all registers individually, to save code size.
+ Bind(&loop);
+ Subs(temp, temp, 1);
+ PushHelper(1, src.SizeInBytes(), src, NoReg, NoReg, NoReg);
+ B(pl, &loop);
+
+ Bind(&done);
+ } else {
+ Label loop, leftover2, leftover1, done;
+
+ Subs(temp, count, 4);
+ B(mi, &leftover2);
+
+ // Push groups of four first.
+ Bind(&loop);
+ Subs(temp, temp, 4);
+ PushHelper(4, src.SizeInBytes(), src, src, src, src);
+ B(pl, &loop);
+
+ // Push groups of two.
+ Bind(&leftover2);
+ Tbz(count, 1, &leftover1);
+ PushHelper(2, src.SizeInBytes(), src, src, NoReg, NoReg);
+
+ // Push the last one (if required).
+ Bind(&leftover1);
+ Tbz(count, 0, &done);
+ PushHelper(1, src.SizeInBytes(), src, NoReg, NoReg, NoReg);
+
+ Bind(&done);
+ }
+}
+
+
+void MacroAssembler::PushHelper(int count, int size,
+ const CPURegister& src0,
+ const CPURegister& src1,
+ const CPURegister& src2,
+ const CPURegister& src3) {
+ // Ensure that we don't unintentially modify scratch or debug registers.
+ InstructionAccurateScope scope(this);
+
+ ASSERT(AreSameSizeAndType(src0, src1, src2, src3));
+ ASSERT(size == src0.SizeInBytes());
+
+ // When pushing multiple registers, the store order is chosen such that
+ // Push(a, b) is equivalent to Push(a) followed by Push(b).
+ switch (count) {
+ case 1:
+ ASSERT(src1.IsNone() && src2.IsNone() && src3.IsNone());
+ str(src0, MemOperand(StackPointer(), -1 * size, PreIndex));
+ break;
+ case 2:
+ ASSERT(src2.IsNone() && src3.IsNone());
+ stp(src1, src0, MemOperand(StackPointer(), -2 * size, PreIndex));
+ break;
+ case 3:
+ ASSERT(src3.IsNone());
+ stp(src2, src1, MemOperand(StackPointer(), -3 * size, PreIndex));
+ str(src0, MemOperand(StackPointer(), 2 * size));
+ break;
+ case 4:
+ // Skip over 4 * size, then fill in the gap. This allows four W registers
+ // to be pushed using csp, whilst maintaining 16-byte alignment for csp
+ // at all times.
+ stp(src3, src2, MemOperand(StackPointer(), -4 * size, PreIndex));
+ stp(src1, src0, MemOperand(StackPointer(), 2 * size));
+ break;
+ default:
+ UNREACHABLE();
+ }
+}
+
+
+void MacroAssembler::PopHelper(int count, int size,
+ const CPURegister& dst0,
+ const CPURegister& dst1,
+ const CPURegister& dst2,
+ const CPURegister& dst3) {
+ // Ensure that we don't unintentially modify scratch or debug registers.
+ InstructionAccurateScope scope(this);
+
+ ASSERT(AreSameSizeAndType(dst0, dst1, dst2, dst3));
+ ASSERT(size == dst0.SizeInBytes());
+
+ // When popping multiple registers, the load order is chosen such that
+ // Pop(a, b) is equivalent to Pop(a) followed by Pop(b).
+ switch (count) {
+ case 1:
+ ASSERT(dst1.IsNone() && dst2.IsNone() && dst3.IsNone());
+ ldr(dst0, MemOperand(StackPointer(), 1 * size, PostIndex));
+ break;
+ case 2:
+ ASSERT(dst2.IsNone() && dst3.IsNone());
+ ldp(dst0, dst1, MemOperand(StackPointer(), 2 * size, PostIndex));
+ break;
+ case 3:
+ ASSERT(dst3.IsNone());
+ ldr(dst2, MemOperand(StackPointer(), 2 * size));
+ ldp(dst0, dst1, MemOperand(StackPointer(), 3 * size, PostIndex));
+ break;
+ case 4:
+ // Load the higher addresses first, then load the lower addresses and
+ // skip the whole block in the second instruction. This allows four W
+ // registers to be popped using csp, whilst maintaining 16-byte alignment
+ // for csp at all times.
+ ldp(dst2, dst3, MemOperand(StackPointer(), 2 * size));
+ ldp(dst0, dst1, MemOperand(StackPointer(), 4 * size, PostIndex));
+ break;
+ default:
+ UNREACHABLE();
+ }
+}
+
+
+void MacroAssembler::PushPreamble(Operand total_size) {
+ if (csp.Is(StackPointer())) {
+ // If the current stack pointer is csp, then it must be aligned to 16 bytes
+ // on entry and the total size of the specified registers must also be a
+ // multiple of 16 bytes.
+ if (total_size.IsImmediate()) {
+ ASSERT((total_size.ImmediateValue() % 16) == 0);
+ }
+
+ // Don't check access size for non-immediate sizes. It's difficult to do
+ // well, and it will be caught by hardware (or the simulator) anyway.
+ } else {
+ // Even if the current stack pointer is not the system stack pointer (csp),
+ // the system stack pointer will still be modified in order to comply with
+ // ABI rules about accessing memory below the system stack pointer.
+ BumpSystemStackPointer(total_size);
+ }
+}
+
+
+void MacroAssembler::PopPostamble(Operand total_size) {
+ if (csp.Is(StackPointer())) {
+ // If the current stack pointer is csp, then it must be aligned to 16 bytes
+ // on entry and the total size of the specified registers must also be a
+ // multiple of 16 bytes.
+ if (total_size.IsImmediate()) {
+ ASSERT((total_size.ImmediateValue() % 16) == 0);
+ }
+
+ // Don't check access size for non-immediate sizes. It's difficult to do
+ // well, and it will be caught by hardware (or the simulator) anyway.
+ } else if (emit_debug_code()) {
+ // It is safe to leave csp where it is when unwinding the JavaScript stack,
+ // but if we keep it matching StackPointer, the simulator can detect memory
+ // accesses in the now-free part of the stack.
+ SyncSystemStackPointer();
+ }
+}
+
+
+void MacroAssembler::Poke(const CPURegister& src, const Operand& offset) {
+ if (offset.IsImmediate()) {
+ ASSERT(offset.ImmediateValue() >= 0);
+ } else if (emit_debug_code()) {
+ Cmp(xzr, offset);
+ Check(le, kStackAccessBelowStackPointer);
+ }
+
+ Str(src, MemOperand(StackPointer(), offset));
+}
+
+
+void MacroAssembler::Peek(const CPURegister& dst, const Operand& offset) {
+ if (offset.IsImmediate()) {
+ ASSERT(offset.ImmediateValue() >= 0);
+ } else if (emit_debug_code()) {
+ Cmp(xzr, offset);
+ Check(le, kStackAccessBelowStackPointer);
+ }
+
+ Ldr(dst, MemOperand(StackPointer(), offset));
+}
+
+
+void MacroAssembler::PokePair(const CPURegister& src1,
+ const CPURegister& src2,
+ int offset) {
+ ASSERT(AreSameSizeAndType(src1, src2));
+ ASSERT((offset >= 0) && ((offset % src1.SizeInBytes()) == 0));
+ Stp(src1, src2, MemOperand(StackPointer(), offset));
+}
+
+
+void MacroAssembler::PeekPair(const CPURegister& dst1,
+ const CPURegister& dst2,
+ int offset) {
+ ASSERT(AreSameSizeAndType(dst1, dst2));
+ ASSERT((offset >= 0) && ((offset % dst1.SizeInBytes()) == 0));
+ Ldp(dst1, dst2, MemOperand(StackPointer(), offset));
+}
+
+
+void MacroAssembler::PushCalleeSavedRegisters() {
+ // Ensure that the macro-assembler doesn't use any scratch registers.
+ InstructionAccurateScope scope(this);
+
+ // This method must not be called unless the current stack pointer is the
+ // system stack pointer (csp).
+ ASSERT(csp.Is(StackPointer()));
+
+ MemOperand tos(csp, -2 * kXRegSize, PreIndex);
+
+ stp(d14, d15, tos);
+ stp(d12, d13, tos);
+ stp(d10, d11, tos);
+ stp(d8, d9, tos);
+
+ stp(x29, x30, tos);
+ stp(x27, x28, tos); // x28 = jssp
+ stp(x25, x26, tos);
+ stp(x23, x24, tos);
+ stp(x21, x22, tos);
+ stp(x19, x20, tos);
+}
+
+
+void MacroAssembler::PopCalleeSavedRegisters() {
+ // Ensure that the macro-assembler doesn't use any scratch registers.
+ InstructionAccurateScope scope(this);
+
+ // This method must not be called unless the current stack pointer is the
+ // system stack pointer (csp).
+ ASSERT(csp.Is(StackPointer()));
+
+ MemOperand tos(csp, 2 * kXRegSize, PostIndex);
+
+ ldp(x19, x20, tos);
+ ldp(x21, x22, tos);
+ ldp(x23, x24, tos);
+ ldp(x25, x26, tos);
+ ldp(x27, x28, tos); // x28 = jssp
+ ldp(x29, x30, tos);
+
+ ldp(d8, d9, tos);
+ ldp(d10, d11, tos);
+ ldp(d12, d13, tos);
+ ldp(d14, d15, tos);
+}
+
+
+void MacroAssembler::AssertStackConsistency() {
+ // Avoid emitting code when !use_real_abort() since non-real aborts cause too
+ // much code to be generated.
+ if (emit_debug_code() && use_real_aborts()) {
+ if (csp.Is(StackPointer()) || CpuFeatures::IsSupported(ALWAYS_ALIGN_CSP)) {
+ // Always check the alignment of csp if ALWAYS_ALIGN_CSP is true. We
+ // can't check the alignment of csp without using a scratch register (or
+ // clobbering the flags), but the processor (or simulator) will abort if
+ // it is not properly aligned during a load.
+ ldr(xzr, MemOperand(csp, 0));
+ }
+ if (FLAG_enable_slow_asserts && !csp.Is(StackPointer())) {
+ Label ok;
+ // Check that csp <= StackPointer(), preserving all registers and NZCV.
+ sub(StackPointer(), csp, StackPointer());
+ cbz(StackPointer(), &ok); // Ok if csp == StackPointer().
+ tbnz(StackPointer(), kXSignBit, &ok); // Ok if csp < StackPointer().
+
+ // Avoid generating AssertStackConsistency checks for the Push in Abort.
+ { DontEmitDebugCodeScope dont_emit_debug_code_scope(this);
+ Abort(kTheCurrentStackPointerIsBelowCsp);
+ }
+
+ bind(&ok);
+ // Restore StackPointer().
+ sub(StackPointer(), csp, StackPointer());
+ }
+ }
+}
+
+
+void MacroAssembler::AssertFPCRState(Register fpcr) {
+ if (emit_debug_code()) {
+ Label unexpected_mode, done;
+ UseScratchRegisterScope temps(this);
+ if (fpcr.IsNone()) {
+ fpcr = temps.AcquireX();
+ Mrs(fpcr, FPCR);
+ }
+
+ // Settings overridden by ConfiugreFPCR():
+ // - Assert that default-NaN mode is set.
+ Tbz(fpcr, DN_offset, &unexpected_mode);
+
+ // Settings left to their default values:
+ // - Assert that flush-to-zero is not set.
+ Tbnz(fpcr, FZ_offset, &unexpected_mode);
+ // - Assert that the rounding mode is nearest-with-ties-to-even.
+ STATIC_ASSERT(FPTieEven == 0);
+ Tst(fpcr, RMode_mask);
+ B(eq, &done);
+
+ Bind(&unexpected_mode);
+ Abort(kUnexpectedFPCRMode);
+
+ Bind(&done);
+ }
+}
+
+
+void MacroAssembler::ConfigureFPCR() {
+ UseScratchRegisterScope temps(this);
+ Register fpcr = temps.AcquireX();
+ Mrs(fpcr, FPCR);
+
+ // If necessary, enable default-NaN mode. The default values of the other FPCR
+ // options should be suitable, and AssertFPCRState will verify that.
+ Label no_write_required;
+ Tbnz(fpcr, DN_offset, &no_write_required);
+
+ Orr(fpcr, fpcr, DN_mask);
+ Msr(FPCR, fpcr);
+
+ Bind(&no_write_required);
+ AssertFPCRState(fpcr);
+}
+
+
+void MacroAssembler::CanonicalizeNaN(const FPRegister& dst,
+ const FPRegister& src) {
+ AssertFPCRState();
+
+ // With DN=1 and RMode=FPTieEven, subtracting 0.0 preserves all inputs except
+ // for NaNs, which become the default NaN. We use fsub rather than fadd
+ // because sub preserves -0.0 inputs: -0.0 + 0.0 = 0.0, but -0.0 - 0.0 = -0.0.
+ Fsub(dst, src, fp_zero);
+}
+
+
+void MacroAssembler::LoadRoot(CPURegister destination,
+ Heap::RootListIndex index) {
+ // TODO(jbramley): Most root values are constants, and can be synthesized
+ // without a load. Refer to the ARM back end for details.
+ Ldr(destination, MemOperand(root, index << kPointerSizeLog2));
+}
+
+
+void MacroAssembler::StoreRoot(Register source,
+ Heap::RootListIndex index) {
+ Str(source, MemOperand(root, index << kPointerSizeLog2));
+}
+
+
+void MacroAssembler::LoadTrueFalseRoots(Register true_root,
+ Register false_root) {
+ STATIC_ASSERT((Heap::kTrueValueRootIndex + 1) == Heap::kFalseValueRootIndex);
+ Ldp(true_root, false_root,
+ MemOperand(root, Heap::kTrueValueRootIndex << kPointerSizeLog2));
+}
+
+
+void MacroAssembler::LoadHeapObject(Register result,
+ Handle<HeapObject> object) {
+ AllowDeferredHandleDereference using_raw_address;
+ if (isolate()->heap()->InNewSpace(*object)) {
+ Handle<Cell> cell = isolate()->factory()->NewCell(object);
+ Mov(result, Operand(cell));
+ Ldr(result, FieldMemOperand(result, Cell::kValueOffset));
+ } else {
+ Mov(result, Operand(object));
+ }
+}
+
+
+void MacroAssembler::LoadInstanceDescriptors(Register map,
+ Register descriptors) {
+ Ldr(descriptors, FieldMemOperand(map, Map::kDescriptorsOffset));
+}
+
+
+void MacroAssembler::NumberOfOwnDescriptors(Register dst, Register map) {
+ Ldr(dst, FieldMemOperand(map, Map::kBitField3Offset));
+ DecodeField<Map::NumberOfOwnDescriptorsBits>(dst);
+}
+
+
+void MacroAssembler::EnumLengthUntagged(Register dst, Register map) {
+ STATIC_ASSERT(Map::EnumLengthBits::kShift == 0);
+ Ldrsw(dst, FieldMemOperand(map, Map::kBitField3Offset));
+ And(dst, dst, Map::EnumLengthBits::kMask);
+}
+
+
+void MacroAssembler::EnumLengthSmi(Register dst, Register map) {
+ EnumLengthUntagged(dst, map);
+ SmiTag(dst, dst);
+}
+
+
+void MacroAssembler::CheckEnumCache(Register object,
+ Register null_value,
+ Register scratch0,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ Label* call_runtime) {
+ ASSERT(!AreAliased(object, null_value, scratch0, scratch1, scratch2,
+ scratch3));
+
+ Register empty_fixed_array_value = scratch0;
+ Register current_object = scratch1;
+
+ LoadRoot(empty_fixed_array_value, Heap::kEmptyFixedArrayRootIndex);
+ Label next, start;
+
+ Mov(current_object, object);
+
+ // Check if the enum length field is properly initialized, indicating that
+ // there is an enum cache.
+ Register map = scratch2;
+ Register enum_length = scratch3;
+ Ldr(map, FieldMemOperand(current_object, HeapObject::kMapOffset));
+
+ EnumLengthUntagged(enum_length, map);
+ Cmp(enum_length, kInvalidEnumCacheSentinel);
+ B(eq, call_runtime);
+
+ B(&start);
+
+ Bind(&next);
+ Ldr(map, FieldMemOperand(current_object, HeapObject::kMapOffset));
+
+ // For all objects but the receiver, check that the cache is empty.
+ EnumLengthUntagged(enum_length, map);
+ Cbnz(enum_length, call_runtime);
+
+ Bind(&start);
+
+ // Check that there are no elements. Register current_object contains the
+ // current JS object we've reached through the prototype chain.
+ Label no_elements;
+ Ldr(current_object, FieldMemOperand(current_object,
+ JSObject::kElementsOffset));
+ Cmp(current_object, empty_fixed_array_value);
+ B(eq, &no_elements);
+
+ // Second chance, the object may be using the empty slow element dictionary.
+ CompareRoot(current_object, Heap::kEmptySlowElementDictionaryRootIndex);
+ B(ne, call_runtime);
+
+ Bind(&no_elements);
+ Ldr(current_object, FieldMemOperand(map, Map::kPrototypeOffset));
+ Cmp(current_object, null_value);
+ B(ne, &next);
+}
+
+
+void MacroAssembler::TestJSArrayForAllocationMemento(Register receiver,
+ Register scratch1,
+ Register scratch2,
+ Label* no_memento_found) {
+ ExternalReference new_space_start =
+ ExternalReference::new_space_start(isolate());
+ ExternalReference new_space_allocation_top =
+ ExternalReference::new_space_allocation_top_address(isolate());
+
+ Add(scratch1, receiver,
+ JSArray::kSize + AllocationMemento::kSize - kHeapObjectTag);
+ Cmp(scratch1, new_space_start);
+ B(lt, no_memento_found);
+
+ Mov(scratch2, new_space_allocation_top);
+ Ldr(scratch2, MemOperand(scratch2));
+ Cmp(scratch1, scratch2);
+ B(gt, no_memento_found);
+
+ Ldr(scratch1, MemOperand(scratch1, -AllocationMemento::kSize));
+ Cmp(scratch1,
+ Operand(isolate()->factory()->allocation_memento_map()));
+}
+
+
+void MacroAssembler::JumpToHandlerEntry(Register exception,
+ Register object,
+ Register state,
+ Register scratch1,
+ Register scratch2) {
+ // Handler expects argument in x0.
+ ASSERT(exception.Is(x0));
+
+ // Compute the handler entry address and jump to it. The handler table is
+ // a fixed array of (smi-tagged) code offsets.
+ Ldr(scratch1, FieldMemOperand(object, Code::kHandlerTableOffset));
+ Add(scratch1, scratch1, FixedArray::kHeaderSize - kHeapObjectTag);
+ STATIC_ASSERT(StackHandler::kKindWidth < kPointerSizeLog2);
+ Lsr(scratch2, state, StackHandler::kKindWidth);
+ Ldr(scratch2, MemOperand(scratch1, scratch2, LSL, kPointerSizeLog2));
+ Add(scratch1, object, Code::kHeaderSize - kHeapObjectTag);
+ Add(scratch1, scratch1, Operand::UntagSmi(scratch2));
+ Br(scratch1);
+}
+
+
+void MacroAssembler::InNewSpace(Register object,
+ Condition cond,
+ Label* branch) {
+ ASSERT(cond == eq || cond == ne);
+ UseScratchRegisterScope temps(this);
+ Register temp = temps.AcquireX();
+ And(temp, object, ExternalReference::new_space_mask(isolate()));
+ Cmp(temp, ExternalReference::new_space_start(isolate()));
+ B(cond, branch);
+}
+
+
+void MacroAssembler::Throw(Register value,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ Register scratch4) {
+ // Adjust this code if not the case.
+ STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize);
+ STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
+ STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize);
+ STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize);
+ STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize);
+ STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize);
+
+ // The handler expects the exception in x0.
+ ASSERT(value.Is(x0));
+
+ // Drop the stack pointer to the top of the top handler.
+ ASSERT(jssp.Is(StackPointer()));
+ Mov(scratch1, Operand(ExternalReference(Isolate::kHandlerAddress,
+ isolate())));
+ Ldr(jssp, MemOperand(scratch1));
+ // Restore the next handler.
+ Pop(scratch2);
+ Str(scratch2, MemOperand(scratch1));
+
+ // Get the code object and state. Restore the context and frame pointer.
+ Register object = scratch1;
+ Register state = scratch2;
+ Pop(object, state, cp, fp);
+
+ // If the handler is a JS frame, restore the context to the frame.
+ // (kind == ENTRY) == (fp == 0) == (cp == 0), so we could test either fp
+ // or cp.
+ Label not_js_frame;
+ Cbz(cp, &not_js_frame);
+ Str(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ Bind(&not_js_frame);
+
+ JumpToHandlerEntry(value, object, state, scratch3, scratch4);
+}
+
+
+void MacroAssembler::ThrowUncatchable(Register value,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ Register scratch4) {
+ // Adjust this code if not the case.
+ STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize);
+ STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0 * kPointerSize);
+ STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize);
+ STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize);
+ STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize);
+ STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize);
+
+ // The handler expects the exception in x0.
+ ASSERT(value.Is(x0));
+
+ // Drop the stack pointer to the top of the top stack handler.
+ ASSERT(jssp.Is(StackPointer()));
+ Mov(scratch1, Operand(ExternalReference(Isolate::kHandlerAddress,
+ isolate())));
+ Ldr(jssp, MemOperand(scratch1));
+
+ // Unwind the handlers until the ENTRY handler is found.
+ Label fetch_next, check_kind;
+ B(&check_kind);
+ Bind(&fetch_next);
+ Peek(jssp, StackHandlerConstants::kNextOffset);
+
+ Bind(&check_kind);
+ STATIC_ASSERT(StackHandler::JS_ENTRY == 0);
+ Peek(scratch2, StackHandlerConstants::kStateOffset);
+ TestAndBranchIfAnySet(scratch2, StackHandler::KindField::kMask, &fetch_next);
+
+ // Set the top handler address to next handler past the top ENTRY handler.
+ Pop(scratch2);
+ Str(scratch2, MemOperand(scratch1));
+
+ // Get the code object and state. Clear the context and frame pointer (0 was
+ // saved in the handler).
+ Register object = scratch1;
+ Register state = scratch2;
+ Pop(object, state, cp, fp);
+
+ JumpToHandlerEntry(value, object, state, scratch3, scratch4);
+}
+
+
+void MacroAssembler::SmiAbs(const Register& smi, Label* slow) {
+ ASSERT(smi.Is64Bits());
+ Abs(smi, smi, slow);
+}
+
+
+void MacroAssembler::AssertSmi(Register object, BailoutReason reason) {
+ if (emit_debug_code()) {
+ STATIC_ASSERT(kSmiTag == 0);
+ Tst(object, kSmiTagMask);
+ Check(eq, reason);
+ }
+}
+
+
+void MacroAssembler::AssertNotSmi(Register object, BailoutReason reason) {
+ if (emit_debug_code()) {
+ STATIC_ASSERT(kSmiTag == 0);
+ Tst(object, kSmiTagMask);
+ Check(ne, reason);
+ }
+}
+
+
+void MacroAssembler::AssertName(Register object) {
+ if (emit_debug_code()) {
+ AssertNotSmi(object, kOperandIsASmiAndNotAName);
+
+ UseScratchRegisterScope temps(this);
+ Register temp = temps.AcquireX();
+
+ Ldr(temp, FieldMemOperand(object, HeapObject::kMapOffset));
+ CompareInstanceType(temp, temp, LAST_NAME_TYPE);
+ Check(ls, kOperandIsNotAName);
+ }
+}
+
+
+void MacroAssembler::AssertUndefinedOrAllocationSite(Register object,
+ Register scratch) {
+ if (emit_debug_code()) {
+ Label done_checking;
+ AssertNotSmi(object);
+ JumpIfRoot(object, Heap::kUndefinedValueRootIndex, &done_checking);
+ Ldr(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
+ CompareRoot(scratch, Heap::kAllocationSiteMapRootIndex);
+ Assert(eq, kExpectedUndefinedOrCell);
+ Bind(&done_checking);
+ }
+}
+
+
+void MacroAssembler::AssertString(Register object) {
+ if (emit_debug_code()) {
+ UseScratchRegisterScope temps(this);
+ Register temp = temps.AcquireX();
+ STATIC_ASSERT(kSmiTag == 0);
+ Tst(object, kSmiTagMask);
+ Check(ne, kOperandIsASmiAndNotAString);
+ Ldr(temp, FieldMemOperand(object, HeapObject::kMapOffset));
+ CompareInstanceType(temp, temp, FIRST_NONSTRING_TYPE);
+ Check(lo, kOperandIsNotAString);
+ }
+}
+
+
+void MacroAssembler::CallStub(CodeStub* stub, TypeFeedbackId ast_id) {
+ ASSERT(AllowThisStubCall(stub)); // Stub calls are not allowed in some stubs.
+ Call(stub->GetCode(), RelocInfo::CODE_TARGET, ast_id);
+}
+
+
+void MacroAssembler::TailCallStub(CodeStub* stub) {
+ Jump(stub->GetCode(), RelocInfo::CODE_TARGET);
+}
+
+
+void MacroAssembler::CallRuntime(const Runtime::Function* f,
+ int num_arguments,
+ SaveFPRegsMode save_doubles) {
+ // All arguments must be on the stack before this function is called.
+ // x0 holds the return value after the call.
+
+ // Check that the number of arguments matches what the function expects.
+ // If f->nargs is -1, the function can accept a variable number of arguments.
+ CHECK(f->nargs < 0 || f->nargs == num_arguments);
+
+ // Place the necessary arguments.
+ Mov(x0, num_arguments);
+ Mov(x1, ExternalReference(f, isolate()));
+
+ CEntryStub stub(isolate(), 1, save_doubles);
+ CallStub(&stub);
+}
+
+
+static int AddressOffset(ExternalReference ref0, ExternalReference ref1) {
+ return ref0.address() - ref1.address();
+}
+
+
+void MacroAssembler::CallApiFunctionAndReturn(
+ Register function_address,
+ ExternalReference thunk_ref,
+ int stack_space,
+ int spill_offset,
+ MemOperand return_value_operand,
+ MemOperand* context_restore_operand) {
+ ASM_LOCATION("CallApiFunctionAndReturn");
+ ExternalReference next_address =
+ ExternalReference::handle_scope_next_address(isolate());
+ const int kNextOffset = 0;
+ const int kLimitOffset = AddressOffset(
+ ExternalReference::handle_scope_limit_address(isolate()),
+ next_address);
+ const int kLevelOffset = AddressOffset(
+ ExternalReference::handle_scope_level_address(isolate()),
+ next_address);
+
+ ASSERT(function_address.is(x1) || function_address.is(x2));
+
+ Label profiler_disabled;
+ Label end_profiler_check;
+ Mov(x10, ExternalReference::is_profiling_address(isolate()));
+ Ldrb(w10, MemOperand(x10));
+ Cbz(w10, &profiler_disabled);
+ Mov(x3, thunk_ref);
+ B(&end_profiler_check);
+
+ Bind(&profiler_disabled);
+ Mov(x3, function_address);
+ Bind(&end_profiler_check);
+
+ // Save the callee-save registers we are going to use.
+ // TODO(all): Is this necessary? ARM doesn't do it.
+ STATIC_ASSERT(kCallApiFunctionSpillSpace == 4);
+ Poke(x19, (spill_offset + 0) * kXRegSize);
+ Poke(x20, (spill_offset + 1) * kXRegSize);
+ Poke(x21, (spill_offset + 2) * kXRegSize);
+ Poke(x22, (spill_offset + 3) * kXRegSize);
+
+ // Allocate HandleScope in callee-save registers.
+ // We will need to restore the HandleScope after the call to the API function,
+ // by allocating it in callee-save registers they will be preserved by C code.
+ Register handle_scope_base = x22;
+ Register next_address_reg = x19;
+ Register limit_reg = x20;
+ Register level_reg = w21;
+
+ Mov(handle_scope_base, next_address);
+ Ldr(next_address_reg, MemOperand(handle_scope_base, kNextOffset));
+ Ldr(limit_reg, MemOperand(handle_scope_base, kLimitOffset));
+ Ldr(level_reg, MemOperand(handle_scope_base, kLevelOffset));
+ Add(level_reg, level_reg, 1);
+ Str(level_reg, MemOperand(handle_scope_base, kLevelOffset));
+
+ if (FLAG_log_timer_events) {
+ FrameScope frame(this, StackFrame::MANUAL);
+ PushSafepointRegisters();
+ Mov(x0, ExternalReference::isolate_address(isolate()));
+ CallCFunction(ExternalReference::log_enter_external_function(isolate()), 1);
+ PopSafepointRegisters();
+ }
+
+ // Native call returns to the DirectCEntry stub which redirects to the
+ // return address pushed on stack (could have moved after GC).
+ // DirectCEntry stub itself is generated early and never moves.
+ DirectCEntryStub stub(isolate());
+ stub.GenerateCall(this, x3);
+
+ if (FLAG_log_timer_events) {
+ FrameScope frame(this, StackFrame::MANUAL);
+ PushSafepointRegisters();
+ Mov(x0, ExternalReference::isolate_address(isolate()));
+ CallCFunction(ExternalReference::log_leave_external_function(isolate()), 1);
+ PopSafepointRegisters();
+ }
+
+ Label promote_scheduled_exception;
+ Label exception_handled;
+ Label delete_allocated_handles;
+ Label leave_exit_frame;
+ Label return_value_loaded;
+
+ // Load value from ReturnValue.
+ Ldr(x0, return_value_operand);
+ Bind(&return_value_loaded);
+ // No more valid handles (the result handle was the last one). Restore
+ // previous handle scope.
+ Str(next_address_reg, MemOperand(handle_scope_base, kNextOffset));
+ if (emit_debug_code()) {
+ Ldr(w1, MemOperand(handle_scope_base, kLevelOffset));
+ Cmp(w1, level_reg);
+ Check(eq, kUnexpectedLevelAfterReturnFromApiCall);
+ }
+ Sub(level_reg, level_reg, 1);
+ Str(level_reg, MemOperand(handle_scope_base, kLevelOffset));
+ Ldr(x1, MemOperand(handle_scope_base, kLimitOffset));
+ Cmp(limit_reg, x1);
+ B(ne, &delete_allocated_handles);
+
+ Bind(&leave_exit_frame);
+ // Restore callee-saved registers.
+ Peek(x19, (spill_offset + 0) * kXRegSize);
+ Peek(x20, (spill_offset + 1) * kXRegSize);
+ Peek(x21, (spill_offset + 2) * kXRegSize);
+ Peek(x22, (spill_offset + 3) * kXRegSize);
+
+ // Check if the function scheduled an exception.
+ Mov(x5, ExternalReference::scheduled_exception_address(isolate()));
+ Ldr(x5, MemOperand(x5));
+ JumpIfNotRoot(x5, Heap::kTheHoleValueRootIndex, &promote_scheduled_exception);
+ Bind(&exception_handled);
+
+ bool restore_context = context_restore_operand != NULL;
+ if (restore_context) {
+ Ldr(cp, *context_restore_operand);
+ }
+
+ LeaveExitFrame(false, x1, !restore_context);
+ Drop(stack_space);
+ Ret();
+
+ Bind(&promote_scheduled_exception);
+ {
+ FrameScope frame(this, StackFrame::INTERNAL);
+ CallExternalReference(
+ ExternalReference(
+ Runtime::kHiddenPromoteScheduledException, isolate()), 0);
+ }
+ B(&exception_handled);
+
+ // HandleScope limit has changed. Delete allocated extensions.
+ Bind(&delete_allocated_handles);
+ Str(limit_reg, MemOperand(handle_scope_base, kLimitOffset));
+ // Save the return value in a callee-save register.
+ Register saved_result = x19;
+ Mov(saved_result, x0);
+ Mov(x0, ExternalReference::isolate_address(isolate()));
+ CallCFunction(
+ ExternalReference::delete_handle_scope_extensions(isolate()), 1);
+ Mov(x0, saved_result);
+ B(&leave_exit_frame);
+}
+
+
+void MacroAssembler::CallExternalReference(const ExternalReference& ext,
+ int num_arguments) {
+ Mov(x0, num_arguments);
+ Mov(x1, ext);
+
+ CEntryStub stub(isolate(), 1);
+ CallStub(&stub);
+}
+
+
+void MacroAssembler::JumpToExternalReference(const ExternalReference& builtin) {
+ Mov(x1, builtin);
+ CEntryStub stub(isolate(), 1);
+ Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
+}
+
+
+void MacroAssembler::GetBuiltinFunction(Register target,
+ Builtins::JavaScript id) {
+ // Load the builtins object into target register.
+ Ldr(target, GlobalObjectMemOperand());
+ Ldr(target, FieldMemOperand(target, GlobalObject::kBuiltinsOffset));
+ // Load the JavaScript builtin function from the builtins object.
+ Ldr(target, FieldMemOperand(target,
+ JSBuiltinsObject::OffsetOfFunctionWithId(id)));
+}
+
+
+void MacroAssembler::GetBuiltinEntry(Register target,
+ Register function,
+ Builtins::JavaScript id) {
+ ASSERT(!AreAliased(target, function));
+ GetBuiltinFunction(function, id);
+ // Load the code entry point from the builtins object.
+ Ldr(target, FieldMemOperand(function, JSFunction::kCodeEntryOffset));
+}
+
+
+void MacroAssembler::InvokeBuiltin(Builtins::JavaScript id,
+ InvokeFlag flag,
+ const CallWrapper& call_wrapper) {
+ ASM_LOCATION("MacroAssembler::InvokeBuiltin");
+ // You can't call a builtin without a valid frame.
+ ASSERT(flag == JUMP_FUNCTION || has_frame());
+
+ // Get the builtin entry in x2 and setup the function object in x1.
+ GetBuiltinEntry(x2, x1, id);
+ if (flag == CALL_FUNCTION) {
+ call_wrapper.BeforeCall(CallSize(x2));
+ Call(x2);
+ call_wrapper.AfterCall();
+ } else {
+ ASSERT(flag == JUMP_FUNCTION);
+ Jump(x2);
+ }
+}
+
+
+void MacroAssembler::TailCallExternalReference(const ExternalReference& ext,
+ int num_arguments,
+ int result_size) {
+ // TODO(1236192): Most runtime routines don't need the number of
+ // arguments passed in because it is constant. At some point we
+ // should remove this need and make the runtime routine entry code
+ // smarter.
+ Mov(x0, num_arguments);
+ JumpToExternalReference(ext);
+}
+
+
+void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid,
+ int num_arguments,
+ int result_size) {
+ TailCallExternalReference(ExternalReference(fid, isolate()),
+ num_arguments,
+ result_size);
+}
+
+
+void MacroAssembler::InitializeNewString(Register string,
+ Register length,
+ Heap::RootListIndex map_index,
+ Register scratch1,
+ Register scratch2) {
+ ASSERT(!AreAliased(string, length, scratch1, scratch2));
+ LoadRoot(scratch2, map_index);
+ SmiTag(scratch1, length);
+ Str(scratch2, FieldMemOperand(string, HeapObject::kMapOffset));
+
+ Mov(scratch2, String::kEmptyHashField);
+ Str(scratch1, FieldMemOperand(string, String::kLengthOffset));
+ Str(scratch2, FieldMemOperand(string, String::kHashFieldOffset));
+}
+
+
+int MacroAssembler::ActivationFrameAlignment() {
+#if V8_HOST_ARCH_ARM64
+ // Running on the real platform. Use the alignment as mandated by the local
+ // environment.
+ // Note: This will break if we ever start generating snapshots on one ARM
+ // platform for another ARM platform with a different alignment.
+ return OS::ActivationFrameAlignment();
+#else // V8_HOST_ARCH_ARM64
+ // If we are using the simulator then we should always align to the expected
+ // alignment. As the simulator is used to generate snapshots we do not know
+ // if the target platform will need alignment, so this is controlled from a
+ // flag.
+ return FLAG_sim_stack_alignment;
+#endif // V8_HOST_ARCH_ARM64
+}
+
+
+void MacroAssembler::CallCFunction(ExternalReference function,
+ int num_of_reg_args) {
+ CallCFunction(function, num_of_reg_args, 0);
+}
+
+
+void MacroAssembler::CallCFunction(ExternalReference function,
+ int num_of_reg_args,
+ int num_of_double_args) {
+ UseScratchRegisterScope temps(this);
+ Register temp = temps.AcquireX();
+ Mov(temp, function);
+ CallCFunction(temp, num_of_reg_args, num_of_double_args);
+}
+
+
+void MacroAssembler::CallCFunction(Register function,
+ int num_of_reg_args,
+ int num_of_double_args) {
+ ASSERT(has_frame());
+ // We can pass 8 integer arguments in registers. If we need to pass more than
+ // that, we'll need to implement support for passing them on the stack.
+ ASSERT(num_of_reg_args <= 8);
+
+ // If we're passing doubles, we're limited to the following prototypes
+ // (defined by ExternalReference::Type):
+ // BUILTIN_COMPARE_CALL: int f(double, double)
+ // BUILTIN_FP_FP_CALL: double f(double, double)
+ // BUILTIN_FP_CALL: double f(double)
+ // BUILTIN_FP_INT_CALL: double f(double, int)
+ if (num_of_double_args > 0) {
+ ASSERT(num_of_reg_args <= 1);
+ ASSERT((num_of_double_args + num_of_reg_args) <= 2);
+ }
+
+
+ // If the stack pointer is not csp, we need to derive an aligned csp from the
+ // current stack pointer.
+ const Register old_stack_pointer = StackPointer();
+ if (!csp.Is(old_stack_pointer)) {
+ AssertStackConsistency();
+
+ int sp_alignment = ActivationFrameAlignment();
+ // The ABI mandates at least 16-byte alignment.
+ ASSERT(sp_alignment >= 16);
+ ASSERT(IsPowerOf2(sp_alignment));
+
+ // The current stack pointer is a callee saved register, and is preserved
+ // across the call.
+ ASSERT(kCalleeSaved.IncludesAliasOf(old_stack_pointer));
+
+ // Align and synchronize the system stack pointer with jssp.
+ Bic(csp, old_stack_pointer, sp_alignment - 1);
+ SetStackPointer(csp);
+ }
+
+ // Call directly. The function called cannot cause a GC, or allow preemption,
+ // so the return address in the link register stays correct.
+ Call(function);
+
+ if (!csp.Is(old_stack_pointer)) {
+ if (emit_debug_code()) {
+ // Because the stack pointer must be aligned on a 16-byte boundary, the
+ // aligned csp can be up to 12 bytes below the jssp. This is the case
+ // where we only pushed one W register on top of an aligned jssp.
+ UseScratchRegisterScope temps(this);
+ Register temp = temps.AcquireX();
+ ASSERT(ActivationFrameAlignment() == 16);
+ Sub(temp, csp, old_stack_pointer);
+ // We want temp <= 0 && temp >= -12.
+ Cmp(temp, 0);
+ Ccmp(temp, -12, NFlag, le);
+ Check(ge, kTheStackWasCorruptedByMacroAssemblerCall);
+ }
+ SetStackPointer(old_stack_pointer);
+ }
+}
+
+
+void MacroAssembler::Jump(Register target) {
+ Br(target);
+}
+
+
+void MacroAssembler::Jump(intptr_t target, RelocInfo::Mode rmode) {
+ UseScratchRegisterScope temps(this);
+ Register temp = temps.AcquireX();
+ Mov(temp, Operand(target, rmode));
+ Br(temp);
+}
+
+
+void MacroAssembler::Jump(Address target, RelocInfo::Mode rmode) {
+ ASSERT(!RelocInfo::IsCodeTarget(rmode));
+ Jump(reinterpret_cast<intptr_t>(target), rmode);
+}
+
+
+void MacroAssembler::Jump(Handle<Code> code, RelocInfo::Mode rmode) {
+ ASSERT(RelocInfo::IsCodeTarget(rmode));
+ AllowDeferredHandleDereference embedding_raw_address;
+ Jump(reinterpret_cast<intptr_t>(code.location()), rmode);
+}
+
+
+void MacroAssembler::Call(Register target) {
+ BlockPoolsScope scope(this);
+#ifdef DEBUG
+ Label start_call;
+ Bind(&start_call);
+#endif
+
+ Blr(target);
+
+#ifdef DEBUG
+ AssertSizeOfCodeGeneratedSince(&start_call, CallSize(target));
+#endif
+}
+
+
+void MacroAssembler::Call(Label* target) {
+ BlockPoolsScope scope(this);
+#ifdef DEBUG
+ Label start_call;
+ Bind(&start_call);
+#endif
+
+ Bl(target);
+
+#ifdef DEBUG
+ AssertSizeOfCodeGeneratedSince(&start_call, CallSize(target));
+#endif
+}
+
+
+// MacroAssembler::CallSize is sensitive to changes in this function, as it
+// requires to know how many instructions are used to branch to the target.
+void MacroAssembler::Call(Address target, RelocInfo::Mode rmode) {
+ BlockPoolsScope scope(this);
+#ifdef DEBUG
+ Label start_call;
+ Bind(&start_call);
+#endif
+ // Statement positions are expected to be recorded when the target
+ // address is loaded.
+ positions_recorder()->WriteRecordedPositions();
+
+ // Addresses always have 64 bits, so we shouldn't encounter NONE32.
+ ASSERT(rmode != RelocInfo::NONE32);
+
+ UseScratchRegisterScope temps(this);
+ Register temp = temps.AcquireX();
+
+ if (rmode == RelocInfo::NONE64) {
+ // Addresses are 48 bits so we never need to load the upper 16 bits.
+ uint64_t imm = reinterpret_cast<uint64_t>(target);
+ // If we don't use ARM tagged addresses, the 16 higher bits must be 0.
+ ASSERT(((imm >> 48) & 0xffff) == 0);
+ movz(temp, (imm >> 0) & 0xffff, 0);
+ movk(temp, (imm >> 16) & 0xffff, 16);
+ movk(temp, (imm >> 32) & 0xffff, 32);
+ } else {
+ Ldr(temp, Immediate(reinterpret_cast<intptr_t>(target), rmode));
+ }
+ Blr(temp);
+#ifdef DEBUG
+ AssertSizeOfCodeGeneratedSince(&start_call, CallSize(target, rmode));
+#endif
+}
+
+
+void MacroAssembler::Call(Handle<Code> code,
+ RelocInfo::Mode rmode,
+ TypeFeedbackId ast_id) {
+#ifdef DEBUG
+ Label start_call;
+ Bind(&start_call);
+#endif
+
+ if ((rmode == RelocInfo::CODE_TARGET) && (!ast_id.IsNone())) {
+ SetRecordedAstId(ast_id);
+ rmode = RelocInfo::CODE_TARGET_WITH_ID;
+ }
+
+ AllowDeferredHandleDereference embedding_raw_address;
+ Call(reinterpret_cast<Address>(code.location()), rmode);
+
+#ifdef DEBUG
+ // Check the size of the code generated.
+ AssertSizeOfCodeGeneratedSince(&start_call, CallSize(code, rmode, ast_id));
+#endif
+}
+
+
+int MacroAssembler::CallSize(Register target) {
+ USE(target);
+ return kInstructionSize;
+}
+
+
+int MacroAssembler::CallSize(Label* target) {
+ USE(target);
+ return kInstructionSize;
+}
+
+
+int MacroAssembler::CallSize(Address target, RelocInfo::Mode rmode) {
+ USE(target);
+
+ // Addresses always have 64 bits, so we shouldn't encounter NONE32.
+ ASSERT(rmode != RelocInfo::NONE32);
+
+ if (rmode == RelocInfo::NONE64) {
+ return kCallSizeWithoutRelocation;
+ } else {
+ return kCallSizeWithRelocation;
+ }
+}
+
+
+int MacroAssembler::CallSize(Handle<Code> code,
+ RelocInfo::Mode rmode,
+ TypeFeedbackId ast_id) {
+ USE(code);
+ USE(ast_id);
+
+ // Addresses always have 64 bits, so we shouldn't encounter NONE32.
+ ASSERT(rmode != RelocInfo::NONE32);
+
+ if (rmode == RelocInfo::NONE64) {
+ return kCallSizeWithoutRelocation;
+ } else {
+ return kCallSizeWithRelocation;
+ }
+}
+
+
+
+
+
+void MacroAssembler::JumpForHeapNumber(Register object,
+ Register heap_number_map,
+ Label* on_heap_number,
+ Label* on_not_heap_number) {
+ ASSERT(on_heap_number || on_not_heap_number);
+ AssertNotSmi(object);
+
+ UseScratchRegisterScope temps(this);
+ Register temp = temps.AcquireX();
+
+ // Load the HeapNumber map if it is not passed.
+ if (heap_number_map.Is(NoReg)) {
+ heap_number_map = temps.AcquireX();
+ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
+ } else {
+ AssertRegisterIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
+ }
+
+ ASSERT(!AreAliased(temp, heap_number_map));
+
+ Ldr(temp, FieldMemOperand(object, HeapObject::kMapOffset));
+ Cmp(temp, heap_number_map);
+
+ if (on_heap_number) {
+ B(eq, on_heap_number);
+ }
+ if (on_not_heap_number) {
+ B(ne, on_not_heap_number);
+ }
+}
+
+
+void MacroAssembler::JumpIfHeapNumber(Register object,
+ Label* on_heap_number,
+ Register heap_number_map) {
+ JumpForHeapNumber(object,
+ heap_number_map,
+ on_heap_number,
+ NULL);
+}
+
+
+void MacroAssembler::JumpIfNotHeapNumber(Register object,
+ Label* on_not_heap_number,
+ Register heap_number_map) {
+ JumpForHeapNumber(object,
+ heap_number_map,
+ NULL,
+ on_not_heap_number);
+}
+
+
+void MacroAssembler::LookupNumberStringCache(Register object,
+ Register result,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ Label* not_found) {
+ ASSERT(!AreAliased(object, result, scratch1, scratch2, scratch3));
+
+ // Use of registers. Register result is used as a temporary.
+ Register number_string_cache = result;
+ Register mask = scratch3;
+
+ // Load the number string cache.
+ LoadRoot(number_string_cache, Heap::kNumberStringCacheRootIndex);
+
+ // Make the hash mask from the length of the number string cache. It
+ // contains two elements (number and string) for each cache entry.
+ Ldrsw(mask, UntagSmiFieldMemOperand(number_string_cache,
+ FixedArray::kLengthOffset));
+ Asr(mask, mask, 1); // Divide length by two.
+ Sub(mask, mask, 1); // Make mask.
+
+ // Calculate the entry in the number string cache. The hash value in the
+ // number string cache for smis is just the smi value, and the hash for
+ // doubles is the xor of the upper and lower words. See
+ // Heap::GetNumberStringCache.
+ Label is_smi;
+ Label load_result_from_cache;
+
+ JumpIfSmi(object, &is_smi);
+ CheckMap(object, scratch1, Heap::kHeapNumberMapRootIndex, not_found,
+ DONT_DO_SMI_CHECK);
+
+ STATIC_ASSERT(kDoubleSize == (kWRegSize * 2));
+ Add(scratch1, object, HeapNumber::kValueOffset - kHeapObjectTag);
+ Ldp(scratch1.W(), scratch2.W(), MemOperand(scratch1));
+ Eor(scratch1, scratch1, scratch2);
+ And(scratch1, scratch1, mask);
+
+ // Calculate address of entry in string cache: each entry consists of two
+ // pointer sized fields.
+ Add(scratch1, number_string_cache,
+ Operand(scratch1, LSL, kPointerSizeLog2 + 1));
+
+ Register probe = mask;
+ Ldr(probe, FieldMemOperand(scratch1, FixedArray::kHeaderSize));
+ JumpIfSmi(probe, not_found);
+ Ldr(d0, FieldMemOperand(object, HeapNumber::kValueOffset));
+ Ldr(d1, FieldMemOperand(probe, HeapNumber::kValueOffset));
+ Fcmp(d0, d1);
+ B(ne, not_found);
+ B(&load_result_from_cache);
+
+ Bind(&is_smi);
+ Register scratch = scratch1;
+ And(scratch, mask, Operand::UntagSmi(object));
+ // Calculate address of entry in string cache: each entry consists
+ // of two pointer sized fields.
+ Add(scratch, number_string_cache,
+ Operand(scratch, LSL, kPointerSizeLog2 + 1));
+
+ // Check if the entry is the smi we are looking for.
+ Ldr(probe, FieldMemOperand(scratch, FixedArray::kHeaderSize));
+ Cmp(object, probe);
+ B(ne, not_found);
+
+ // Get the result from the cache.
+ Bind(&load_result_from_cache);
+ Ldr(result, FieldMemOperand(scratch, FixedArray::kHeaderSize + kPointerSize));
+ IncrementCounter(isolate()->counters()->number_to_string_native(), 1,
+ scratch1, scratch2);
+}
+
+
+void MacroAssembler::TryRepresentDoubleAsInt(Register as_int,
+ FPRegister value,
+ FPRegister scratch_d,
+ Label* on_successful_conversion,
+ Label* on_failed_conversion) {
+ // Convert to an int and back again, then compare with the original value.
+ Fcvtzs(as_int, value);
+ Scvtf(scratch_d, as_int);
+ Fcmp(value, scratch_d);
+
+ if (on_successful_conversion) {
+ B(on_successful_conversion, eq);
+ }
+ if (on_failed_conversion) {
+ B(on_failed_conversion, ne);
+ }
+}
+
+
+void MacroAssembler::TestForMinusZero(DoubleRegister input) {
+ UseScratchRegisterScope temps(this);
+ Register temp = temps.AcquireX();
+ // Floating point -0.0 is kMinInt as an integer, so subtracting 1 (cmp) will
+ // cause overflow.
+ Fmov(temp, input);
+ Cmp(temp, 1);
+}
+
+
+void MacroAssembler::JumpIfMinusZero(DoubleRegister input,
+ Label* on_negative_zero) {
+ TestForMinusZero(input);
+ B(vs, on_negative_zero);
+}
+
+
+void MacroAssembler::JumpIfMinusZero(Register input,
+ Label* on_negative_zero) {
+ ASSERT(input.Is64Bits());
+ // Floating point value is in an integer register. Detect -0.0 by subtracting
+ // 1 (cmp), which will cause overflow.
+ Cmp(input, 1);
+ B(vs, on_negative_zero);
+}
+
+
+void MacroAssembler::ClampInt32ToUint8(Register output, Register input) {
+ // Clamp the value to [0..255].
+ Cmp(input.W(), Operand(input.W(), UXTB));
+ // If input < input & 0xff, it must be < 0, so saturate to 0.
+ Csel(output.W(), wzr, input.W(), lt);
+ // If input <= input & 0xff, it must be <= 255. Otherwise, saturate to 255.
+ Csel(output.W(), output.W(), 255, le);
+}
+
+
+void MacroAssembler::ClampInt32ToUint8(Register in_out) {
+ ClampInt32ToUint8(in_out, in_out);
+}
+
+
+void MacroAssembler::ClampDoubleToUint8(Register output,
+ DoubleRegister input,
+ DoubleRegister dbl_scratch) {
+ // This conversion follows the WebIDL "[Clamp]" rules for PIXEL types:
+ // - Inputs lower than 0 (including -infinity) produce 0.
+ // - Inputs higher than 255 (including +infinity) produce 255.
+ // Also, it seems that PIXEL types use round-to-nearest rather than
+ // round-towards-zero.
+
+ // Squash +infinity before the conversion, since Fcvtnu will normally
+ // convert it to 0.
+ Fmov(dbl_scratch, 255);
+ Fmin(dbl_scratch, dbl_scratch, input);
+
+ // Convert double to unsigned integer. Values less than zero become zero.
+ // Values greater than 255 have already been clamped to 255.
+ Fcvtnu(output, dbl_scratch);
+}
+
+
+void MacroAssembler::CopyFieldsLoopPairsHelper(Register dst,
+ Register src,
+ unsigned count,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ Register scratch4,
+ Register scratch5) {
+ // Untag src and dst into scratch registers.
+ // Copy src->dst in a tight loop.
+ ASSERT(!AreAliased(dst, src,
+ scratch1, scratch2, scratch3, scratch4, scratch5));
+ ASSERT(count >= 2);
+
+ const Register& remaining = scratch3;
+ Mov(remaining, count / 2);
+
+ const Register& dst_untagged = scratch1;
+ const Register& src_untagged = scratch2;
+ Sub(dst_untagged, dst, kHeapObjectTag);
+ Sub(src_untagged, src, kHeapObjectTag);
+
+ // Copy fields in pairs.
+ Label loop;
+ Bind(&loop);
+ Ldp(scratch4, scratch5,
+ MemOperand(src_untagged, kXRegSize* 2, PostIndex));
+ Stp(scratch4, scratch5,
+ MemOperand(dst_untagged, kXRegSize* 2, PostIndex));
+ Sub(remaining, remaining, 1);
+ Cbnz(remaining, &loop);
+
+ // Handle the leftovers.
+ if (count & 1) {
+ Ldr(scratch4, MemOperand(src_untagged));
+ Str(scratch4, MemOperand(dst_untagged));
+ }
+}
+
+
+void MacroAssembler::CopyFieldsUnrolledPairsHelper(Register dst,
+ Register src,
+ unsigned count,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ Register scratch4) {
+ // Untag src and dst into scratch registers.
+ // Copy src->dst in an unrolled loop.
+ ASSERT(!AreAliased(dst, src, scratch1, scratch2, scratch3, scratch4));
+
+ const Register& dst_untagged = scratch1;
+ const Register& src_untagged = scratch2;
+ sub(dst_untagged, dst, kHeapObjectTag);
+ sub(src_untagged, src, kHeapObjectTag);
+
+ // Copy fields in pairs.
+ for (unsigned i = 0; i < count / 2; i++) {
+ Ldp(scratch3, scratch4, MemOperand(src_untagged, kXRegSize * 2, PostIndex));
+ Stp(scratch3, scratch4, MemOperand(dst_untagged, kXRegSize * 2, PostIndex));
+ }
+
+ // Handle the leftovers.
+ if (count & 1) {
+ Ldr(scratch3, MemOperand(src_untagged));
+ Str(scratch3, MemOperand(dst_untagged));
+ }
+}
+
+
+void MacroAssembler::CopyFieldsUnrolledHelper(Register dst,
+ Register src,
+ unsigned count,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3) {
+ // Untag src and dst into scratch registers.
+ // Copy src->dst in an unrolled loop.
+ ASSERT(!AreAliased(dst, src, scratch1, scratch2, scratch3));
+
+ const Register& dst_untagged = scratch1;
+ const Register& src_untagged = scratch2;
+ Sub(dst_untagged, dst, kHeapObjectTag);
+ Sub(src_untagged, src, kHeapObjectTag);
+
+ // Copy fields one by one.
+ for (unsigned i = 0; i < count; i++) {
+ Ldr(scratch3, MemOperand(src_untagged, kXRegSize, PostIndex));
+ Str(scratch3, MemOperand(dst_untagged, kXRegSize, PostIndex));
+ }
+}
+
+
+void MacroAssembler::CopyFields(Register dst, Register src, CPURegList temps,
+ unsigned count) {
+ // One of two methods is used:
+ //
+ // For high 'count' values where many scratch registers are available:
+ // Untag src and dst into scratch registers.
+ // Copy src->dst in a tight loop.
+ //
+ // For low 'count' values or where few scratch registers are available:
+ // Untag src and dst into scratch registers.
+ // Copy src->dst in an unrolled loop.
+ //
+ // In both cases, fields are copied in pairs if possible, and left-overs are
+ // handled separately.
+ ASSERT(!AreAliased(dst, src));
+ ASSERT(!temps.IncludesAliasOf(dst));
+ ASSERT(!temps.IncludesAliasOf(src));
+ ASSERT(!temps.IncludesAliasOf(xzr));
+
+ if (emit_debug_code()) {
+ Cmp(dst, src);
+ Check(ne, kTheSourceAndDestinationAreTheSame);
+ }
+
+ // The value of 'count' at which a loop will be generated (if there are
+ // enough scratch registers).
+ static const unsigned kLoopThreshold = 8;
+
+ UseScratchRegisterScope masm_temps(this);
+ if ((temps.Count() >= 3) && (count >= kLoopThreshold)) {
+ CopyFieldsLoopPairsHelper(dst, src, count,
+ Register(temps.PopLowestIndex()),
+ Register(temps.PopLowestIndex()),
+ Register(temps.PopLowestIndex()),
+ masm_temps.AcquireX(),
+ masm_temps.AcquireX());
+ } else if (temps.Count() >= 2) {
+ CopyFieldsUnrolledPairsHelper(dst, src, count,
+ Register(temps.PopLowestIndex()),
+ Register(temps.PopLowestIndex()),
+ masm_temps.AcquireX(),
+ masm_temps.AcquireX());
+ } else if (temps.Count() == 1) {
+ CopyFieldsUnrolledHelper(dst, src, count,
+ Register(temps.PopLowestIndex()),
+ masm_temps.AcquireX(),
+ masm_temps.AcquireX());
+ } else {
+ UNREACHABLE();
+ }
+}
+
+
+void MacroAssembler::CopyBytes(Register dst,
+ Register src,
+ Register length,
+ Register scratch,
+ CopyHint hint) {
+ UseScratchRegisterScope temps(this);
+ Register tmp1 = temps.AcquireX();
+ Register tmp2 = temps.AcquireX();
+ ASSERT(!AreAliased(src, dst, length, scratch, tmp1, tmp2));
+ ASSERT(!AreAliased(src, dst, csp));
+
+ if (emit_debug_code()) {
+ // Check copy length.
+ Cmp(length, 0);
+ Assert(ge, kUnexpectedNegativeValue);
+
+ // Check src and dst buffers don't overlap.
+ Add(scratch, src, length); // Calculate end of src buffer.
+ Cmp(scratch, dst);
+ Add(scratch, dst, length); // Calculate end of dst buffer.
+ Ccmp(scratch, src, ZFlag, gt);
+ Assert(le, kCopyBuffersOverlap);
+ }
+
+ Label short_copy, short_loop, bulk_loop, done;
+
+ if ((hint == kCopyLong || hint == kCopyUnknown) && !FLAG_optimize_for_size) {
+ Register bulk_length = scratch;
+ int pair_size = 2 * kXRegSize;
+ int pair_mask = pair_size - 1;
+
+ Bic(bulk_length, length, pair_mask);
+ Cbz(bulk_length, &short_copy);
+ Bind(&bulk_loop);
+ Sub(bulk_length, bulk_length, pair_size);
+ Ldp(tmp1, tmp2, MemOperand(src, pair_size, PostIndex));
+ Stp(tmp1, tmp2, MemOperand(dst, pair_size, PostIndex));
+ Cbnz(bulk_length, &bulk_loop);
+
+ And(length, length, pair_mask);
+ }
+
+ Bind(&short_copy);
+ Cbz(length, &done);
+ Bind(&short_loop);
+ Sub(length, length, 1);
+ Ldrb(tmp1, MemOperand(src, 1, PostIndex));
+ Strb(tmp1, MemOperand(dst, 1, PostIndex));
+ Cbnz(length, &short_loop);
+
+
+ Bind(&done);
+}
+
+
+void MacroAssembler::FillFields(Register dst,
+ Register field_count,
+ Register filler) {
+ ASSERT(!dst.Is(csp));
+ UseScratchRegisterScope temps(this);
+ Register field_ptr = temps.AcquireX();
+ Register counter = temps.AcquireX();
+ Label done;
+
+ // Decrement count. If the result < zero, count was zero, and there's nothing
+ // to do. If count was one, flags are set to fail the gt condition at the end
+ // of the pairs loop.
+ Subs(counter, field_count, 1);
+ B(lt, &done);
+
+ // There's at least one field to fill, so do this unconditionally.
+ Str(filler, MemOperand(dst, kPointerSize, PostIndex));
+
+ // If the bottom bit of counter is set, there are an even number of fields to
+ // fill, so pull the start pointer back by one field, allowing the pairs loop
+ // to overwrite the field that was stored above.
+ And(field_ptr, counter, 1);
+ Sub(field_ptr, dst, Operand(field_ptr, LSL, kPointerSizeLog2));
+
+ // Store filler to memory in pairs.
+ Label entry, loop;
+ B(&entry);
+ Bind(&loop);
+ Stp(filler, filler, MemOperand(field_ptr, 2 * kPointerSize, PostIndex));
+ Subs(counter, counter, 2);
+ Bind(&entry);
+ B(gt, &loop);
+
+ Bind(&done);
+}
+
+
+void MacroAssembler::JumpIfEitherIsNotSequentialAsciiStrings(
+ Register first,
+ Register second,
+ Register scratch1,
+ Register scratch2,
+ Label* failure,
+ SmiCheckType smi_check) {
+
+ if (smi_check == DO_SMI_CHECK) {
+ JumpIfEitherSmi(first, second, failure);
+ } else if (emit_debug_code()) {
+ ASSERT(smi_check == DONT_DO_SMI_CHECK);
+ Label not_smi;
+ JumpIfEitherSmi(first, second, NULL, &not_smi);
+
+ // At least one input is a smi, but the flags indicated a smi check wasn't
+ // needed.
+ Abort(kUnexpectedSmi);
+
+ Bind(&not_smi);
+ }
+
+ // Test that both first and second are sequential ASCII strings.
+ Ldr(scratch1, FieldMemOperand(first, HeapObject::kMapOffset));
+ Ldr(scratch2, FieldMemOperand(second, HeapObject::kMapOffset));
+ Ldrb(scratch1, FieldMemOperand(scratch1, Map::kInstanceTypeOffset));
+ Ldrb(scratch2, FieldMemOperand(scratch2, Map::kInstanceTypeOffset));
+
+ JumpIfEitherInstanceTypeIsNotSequentialAscii(scratch1,
+ scratch2,
+ scratch1,
+ scratch2,
+ failure);
+}
+
+
+void MacroAssembler::JumpIfEitherInstanceTypeIsNotSequentialAscii(
+ Register first,
+ Register second,
+ Register scratch1,
+ Register scratch2,
+ Label* failure) {
+ ASSERT(!AreAliased(scratch1, second));
+ ASSERT(!AreAliased(scratch1, scratch2));
+ static const int kFlatAsciiStringMask =
+ kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask;
+ static const int kFlatAsciiStringTag = ASCII_STRING_TYPE;
+ And(scratch1, first, kFlatAsciiStringMask);
+ And(scratch2, second, kFlatAsciiStringMask);
+ Cmp(scratch1, kFlatAsciiStringTag);
+ Ccmp(scratch2, kFlatAsciiStringTag, NoFlag, eq);
+ B(ne, failure);
+}
+
+
+void MacroAssembler::JumpIfInstanceTypeIsNotSequentialAscii(Register type,
+ Register scratch,
+ Label* failure) {
+ const int kFlatAsciiStringMask =
+ kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask;
+ const int kFlatAsciiStringTag =
+ kStringTag | kOneByteStringTag | kSeqStringTag;
+ And(scratch, type, kFlatAsciiStringMask);
+ Cmp(scratch, kFlatAsciiStringTag);
+ B(ne, failure);
+}
+
+
+void MacroAssembler::JumpIfBothInstanceTypesAreNotSequentialAscii(
+ Register first,
+ Register second,
+ Register scratch1,
+ Register scratch2,
+ Label* failure) {
+ ASSERT(!AreAliased(first, second, scratch1, scratch2));
+ const int kFlatAsciiStringMask =
+ kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask;
+ const int kFlatAsciiStringTag =
+ kStringTag | kOneByteStringTag | kSeqStringTag;
+ And(scratch1, first, kFlatAsciiStringMask);
+ And(scratch2, second, kFlatAsciiStringMask);
+ Cmp(scratch1, kFlatAsciiStringTag);
+ Ccmp(scratch2, kFlatAsciiStringTag, NoFlag, eq);
+ B(ne, failure);
+}
+
+
+void MacroAssembler::JumpIfNotUniqueName(Register type,
+ Label* not_unique_name) {
+ STATIC_ASSERT((kInternalizedTag == 0) && (kStringTag == 0));
+ // if ((type is string && type is internalized) || type == SYMBOL_TYPE) {
+ // continue
+ // } else {
+ // goto not_unique_name
+ // }
+ Tst(type, kIsNotStringMask | kIsNotInternalizedMask);
+ Ccmp(type, SYMBOL_TYPE, ZFlag, ne);
+ B(ne, not_unique_name);
+}
+
+
+void MacroAssembler::InvokePrologue(const ParameterCount& expected,
+ const ParameterCount& actual,
+ Handle<Code> code_constant,
+ Register code_reg,
+ Label* done,
+ InvokeFlag flag,
+ bool* definitely_mismatches,
+ const CallWrapper& call_wrapper) {
+ bool definitely_matches = false;
+ *definitely_mismatches = false;
+ Label regular_invoke;
+
+ // Check whether the expected and actual arguments count match. If not,
+ // setup registers according to contract with ArgumentsAdaptorTrampoline:
+ // x0: actual arguments count.
+ // x1: function (passed through to callee).
+ // x2: expected arguments count.
+
+ // The code below is made a lot easier because the calling code already sets
+ // up actual and expected registers according to the contract if values are
+ // passed in registers.
+ ASSERT(actual.is_immediate() || actual.reg().is(x0));
+ ASSERT(expected.is_immediate() || expected.reg().is(x2));
+ ASSERT((!code_constant.is_null() && code_reg.is(no_reg)) || code_reg.is(x3));
+
+ if (expected.is_immediate()) {
+ ASSERT(actual.is_immediate());
+ if (expected.immediate() == actual.immediate()) {
+ definitely_matches = true;
+
+ } else {
+ Mov(x0, actual.immediate());
+ if (expected.immediate() ==
+ SharedFunctionInfo::kDontAdaptArgumentsSentinel) {
+ // Don't worry about adapting arguments for builtins that
+ // don't want that done. Skip adaption code by making it look
+ // like we have a match between expected and actual number of
+ // arguments.
+ definitely_matches = true;
+ } else {
+ *definitely_mismatches = true;
+ // Set up x2 for the argument adaptor.
+ Mov(x2, expected.immediate());
+ }
+ }
+
+ } else { // expected is a register.
+ Operand actual_op = actual.is_immediate() ? Operand(actual.immediate())
+ : Operand(actual.reg());
+ // If actual == expected perform a regular invocation.
+ Cmp(expected.reg(), actual_op);
+ B(eq, &regular_invoke);
+ // Otherwise set up x0 for the argument adaptor.
+ Mov(x0, actual_op);
+ }
+
+ // If the argument counts may mismatch, generate a call to the argument
+ // adaptor.
+ if (!definitely_matches) {
+ if (!code_constant.is_null()) {
+ Mov(x3, Operand(code_constant));
+ Add(x3, x3, Code::kHeaderSize - kHeapObjectTag);
+ }
+
+ Handle<Code> adaptor =
+ isolate()->builtins()->ArgumentsAdaptorTrampoline();
+ if (flag == CALL_FUNCTION) {
+ call_wrapper.BeforeCall(CallSize(adaptor));
+ Call(adaptor);
+ call_wrapper.AfterCall();
+ if (!*definitely_mismatches) {
+ // If the arg counts don't match, no extra code is emitted by
+ // MAsm::InvokeCode and we can just fall through.
+ B(done);
+ }
+ } else {
+ Jump(adaptor, RelocInfo::CODE_TARGET);
+ }
+ }
+ Bind(&regular_invoke);
+}
+
+
+void MacroAssembler::InvokeCode(Register code,
+ const ParameterCount& expected,
+ const ParameterCount& actual,
+ InvokeFlag flag,
+ const CallWrapper& call_wrapper) {
+ // You can't call a function without a valid frame.
+ ASSERT(flag == JUMP_FUNCTION || has_frame());
+
+ Label done;
+
+ bool definitely_mismatches = false;
+ InvokePrologue(expected, actual, Handle<Code>::null(), code, &done, flag,
+ &definitely_mismatches, call_wrapper);
+
+ // If we are certain that actual != expected, then we know InvokePrologue will
+ // have handled the call through the argument adaptor mechanism.
+ // The called function expects the call kind in x5.
+ if (!definitely_mismatches) {
+ if (flag == CALL_FUNCTION) {
+ call_wrapper.BeforeCall(CallSize(code));
+ Call(code);
+ call_wrapper.AfterCall();
+ } else {
+ ASSERT(flag == JUMP_FUNCTION);
+ Jump(code);
+ }
+ }
+
+ // Continue here if InvokePrologue does handle the invocation due to
+ // mismatched parameter counts.
+ Bind(&done);
+}
+
+
+void MacroAssembler::InvokeFunction(Register function,
+ const ParameterCount& actual,
+ InvokeFlag flag,
+ const CallWrapper& call_wrapper) {
+ // You can't call a function without a valid frame.
+ ASSERT(flag == JUMP_FUNCTION || has_frame());
+
+ // Contract with called JS functions requires that function is passed in x1.
+ // (See FullCodeGenerator::Generate().)
+ ASSERT(function.is(x1));
+
+ Register expected_reg = x2;
+ Register code_reg = x3;
+
+ Ldr(cp, FieldMemOperand(function, JSFunction::kContextOffset));
+ // The number of arguments is stored as an int32_t, and -1 is a marker
+ // (SharedFunctionInfo::kDontAdaptArgumentsSentinel), so we need sign
+ // extension to correctly handle it.
+ Ldr(expected_reg, FieldMemOperand(function,
+ JSFunction::kSharedFunctionInfoOffset));
+ Ldrsw(expected_reg,
+ FieldMemOperand(expected_reg,
+ SharedFunctionInfo::kFormalParameterCountOffset));
+ Ldr(code_reg,
+ FieldMemOperand(function, JSFunction::kCodeEntryOffset));
+
+ ParameterCount expected(expected_reg);
+ InvokeCode(code_reg, expected, actual, flag, call_wrapper);
+}
+
+
+void MacroAssembler::InvokeFunction(Register function,
+ const ParameterCount& expected,
+ const ParameterCount& actual,
+ InvokeFlag flag,
+ const CallWrapper& call_wrapper) {
+ // You can't call a function without a valid frame.
+ ASSERT(flag == JUMP_FUNCTION || has_frame());
+
+ // Contract with called JS functions requires that function is passed in x1.
+ // (See FullCodeGenerator::Generate().)
+ ASSERT(function.Is(x1));
+
+ Register code_reg = x3;
+
+ // Set up the context.
+ Ldr(cp, FieldMemOperand(function, JSFunction::kContextOffset));
+
+ // We call indirectly through the code field in the function to
+ // allow recompilation to take effect without changing any of the
+ // call sites.
+ Ldr(code_reg, FieldMemOperand(function, JSFunction::kCodeEntryOffset));
+ InvokeCode(code_reg, expected, actual, flag, call_wrapper);
+}
+
+
+void MacroAssembler::InvokeFunction(Handle<JSFunction> function,
+ const ParameterCount& expected,
+ const ParameterCount& actual,
+ InvokeFlag flag,
+ const CallWrapper& call_wrapper) {
+ // Contract with called JS functions requires that function is passed in x1.
+ // (See FullCodeGenerator::Generate().)
+ __ LoadObject(x1, function);
+ InvokeFunction(x1, expected, actual, flag, call_wrapper);
+}
+
+
+void MacroAssembler::TryConvertDoubleToInt64(Register result,
+ DoubleRegister double_input,
+ Label* done) {
+ // Try to convert with an FPU convert instruction. It's trivial to compute
+ // the modulo operation on an integer register so we convert to a 64-bit
+ // integer.
+ //
+ // Fcvtzs will saturate to INT64_MIN (0x800...00) or INT64_MAX (0x7ff...ff)
+ // when the double is out of range. NaNs and infinities will be converted to 0
+ // (as ECMA-262 requires).
+ Fcvtzs(result.X(), double_input);
+
+ // The values INT64_MIN (0x800...00) or INT64_MAX (0x7ff...ff) are not
+ // representable using a double, so if the result is one of those then we know
+ // that saturation occured, and we need to manually handle the conversion.
+ //
+ // It is easy to detect INT64_MIN and INT64_MAX because adding or subtracting
+ // 1 will cause signed overflow.
+ Cmp(result.X(), 1);
+ Ccmp(result.X(), -1, VFlag, vc);
+
+ B(vc, done);
+}
+
+
+void MacroAssembler::TruncateDoubleToI(Register result,
+ DoubleRegister double_input) {
+ Label done;
+ ASSERT(jssp.Is(StackPointer()));
+
+ // Try to convert the double to an int64. If successful, the bottom 32 bits
+ // contain our truncated int32 result.
+ TryConvertDoubleToInt64(result, double_input, &done);
+
+ // If we fell through then inline version didn't succeed - call stub instead.
+ Push(lr);
+ Push(double_input); // Put input on stack.
+
+ DoubleToIStub stub(isolate(),
+ jssp,
+ result,
+ 0,
+ true, // is_truncating
+ true); // skip_fastpath
+ CallStub(&stub); // DoubleToIStub preserves any registers it needs to clobber
+
+ Drop(1, kDoubleSize); // Drop the double input on the stack.
+ Pop(lr);
+
+ Bind(&done);
+}
+
+
+void MacroAssembler::TruncateHeapNumberToI(Register result,
+ Register object) {
+ Label done;
+ ASSERT(!result.is(object));
+ ASSERT(jssp.Is(StackPointer()));
+
+ Ldr(fp_scratch, FieldMemOperand(object, HeapNumber::kValueOffset));
+
+ // Try to convert the double to an int64. If successful, the bottom 32 bits
+ // contain our truncated int32 result.
+ TryConvertDoubleToInt64(result, fp_scratch, &done);
+
+ // If we fell through then inline version didn't succeed - call stub instead.
+ Push(lr);
+ DoubleToIStub stub(isolate(),
+ object,
+ result,
+ HeapNumber::kValueOffset - kHeapObjectTag,
+ true, // is_truncating
+ true); // skip_fastpath
+ CallStub(&stub); // DoubleToIStub preserves any registers it needs to clobber
+ Pop(lr);
+
+ Bind(&done);
+}
+
+
+void MacroAssembler::StubPrologue() {
+ ASSERT(StackPointer().Is(jssp));
+ UseScratchRegisterScope temps(this);
+ Register temp = temps.AcquireX();
+ __ Mov(temp, Smi::FromInt(StackFrame::STUB));
+ // Compiled stubs don't age, and so they don't need the predictable code
+ // ageing sequence.
+ __ Push(lr, fp, cp, temp);
+ __ Add(fp, jssp, StandardFrameConstants::kFixedFrameSizeFromFp);
+}
+
+
+void MacroAssembler::Prologue(bool code_pre_aging) {
+ if (code_pre_aging) {
+ Code* stub = Code::GetPreAgedCodeAgeStub(isolate());
+ __ EmitCodeAgeSequence(stub);
+ } else {
+ __ EmitFrameSetupForCodeAgePatching();
+ }
+}
+
+
+void MacroAssembler::EnterFrame(StackFrame::Type type) {
+ ASSERT(jssp.Is(StackPointer()));
+ UseScratchRegisterScope temps(this);
+ Register type_reg = temps.AcquireX();
+ Register code_reg = temps.AcquireX();
+
+ Push(lr, fp, cp);
+ Mov(type_reg, Smi::FromInt(type));
+ Mov(code_reg, Operand(CodeObject()));
+ Push(type_reg, code_reg);
+ // jssp[4] : lr
+ // jssp[3] : fp
+ // jssp[2] : cp
+ // jssp[1] : type
+ // jssp[0] : code object
+
+ // Adjust FP to point to saved FP.
+ Add(fp, jssp, StandardFrameConstants::kFixedFrameSizeFromFp + kPointerSize);
+}
+
+
+void MacroAssembler::LeaveFrame(StackFrame::Type type) {
+ ASSERT(jssp.Is(StackPointer()));
+ // Drop the execution stack down to the frame pointer and restore
+ // the caller frame pointer and return address.
+ Mov(jssp, fp);
+ AssertStackConsistency();
+ Pop(fp, lr);
+}
+
+
+void MacroAssembler::ExitFramePreserveFPRegs() {
+ PushCPURegList(kCallerSavedFP);
+}
+
+
+void MacroAssembler::ExitFrameRestoreFPRegs() {
+ // Read the registers from the stack without popping them. The stack pointer
+ // will be reset as part of the unwinding process.
+ CPURegList saved_fp_regs = kCallerSavedFP;
+ ASSERT(saved_fp_regs.Count() % 2 == 0);
+
+ int offset = ExitFrameConstants::kLastExitFrameField;
+ while (!saved_fp_regs.IsEmpty()) {
+ const CPURegister& dst0 = saved_fp_regs.PopHighestIndex();
+ const CPURegister& dst1 = saved_fp_regs.PopHighestIndex();
+ offset -= 2 * kDRegSize;
+ Ldp(dst1, dst0, MemOperand(fp, offset));
+ }
+}
+
+
+void MacroAssembler::EnterExitFrame(bool save_doubles,
+ const Register& scratch,
+ int extra_space) {
+ ASSERT(jssp.Is(StackPointer()));
+
+ // Set up the new stack frame.
+ Mov(scratch, Operand(CodeObject()));
+ Push(lr, fp);
+ Mov(fp, StackPointer());
+ Push(xzr, scratch);
+ // fp[8]: CallerPC (lr)
+ // fp -> fp[0]: CallerFP (old fp)
+ // fp[-8]: Space reserved for SPOffset.
+ // jssp -> fp[-16]: CodeObject()
+ STATIC_ASSERT((2 * kPointerSize) ==
+ ExitFrameConstants::kCallerSPDisplacement);
+ STATIC_ASSERT((1 * kPointerSize) == ExitFrameConstants::kCallerPCOffset);
+ STATIC_ASSERT((0 * kPointerSize) == ExitFrameConstants::kCallerFPOffset);
+ STATIC_ASSERT((-1 * kPointerSize) == ExitFrameConstants::kSPOffset);
+ STATIC_ASSERT((-2 * kPointerSize) == ExitFrameConstants::kCodeOffset);
+
+ // Save the frame pointer and context pointer in the top frame.
+ Mov(scratch, Operand(ExternalReference(Isolate::kCEntryFPAddress,
+ isolate())));
+ Str(fp, MemOperand(scratch));
+ Mov(scratch, Operand(ExternalReference(Isolate::kContextAddress,
+ isolate())));
+ Str(cp, MemOperand(scratch));
+
+ STATIC_ASSERT((-2 * kPointerSize) ==
+ ExitFrameConstants::kLastExitFrameField);
+ if (save_doubles) {
+ ExitFramePreserveFPRegs();
+ }
+
+ // Reserve space for the return address and for user requested memory.
+ // We do this before aligning to make sure that we end up correctly
+ // aligned with the minimum of wasted space.
+ Claim(extra_space + 1, kXRegSize);
+ // fp[8]: CallerPC (lr)
+ // fp -> fp[0]: CallerFP (old fp)
+ // fp[-8]: Space reserved for SPOffset.
+ // fp[-16]: CodeObject()
+ // fp[-16 - fp_size]: Saved doubles (if save_doubles is true).
+ // jssp[8]: Extra space reserved for caller (if extra_space != 0).
+ // jssp -> jssp[0]: Space reserved for the return address.
+
+ // Align and synchronize the system stack pointer with jssp.
+ AlignAndSetCSPForFrame();
+ ASSERT(csp.Is(StackPointer()));
+
+ // fp[8]: CallerPC (lr)
+ // fp -> fp[0]: CallerFP (old fp)
+ // fp[-8]: Space reserved for SPOffset.
+ // fp[-16]: CodeObject()
+ // fp[-16 - fp_size]: Saved doubles (if save_doubles is true).
+ // csp[8]: Memory reserved for the caller if extra_space != 0.
+ // Alignment padding, if necessary.
+ // csp -> csp[0]: Space reserved for the return address.
+
+ // ExitFrame::GetStateForFramePointer expects to find the return address at
+ // the memory address immediately below the pointer stored in SPOffset.
+ // It is not safe to derive much else from SPOffset, because the size of the
+ // padding can vary.
+ Add(scratch, csp, kXRegSize);
+ Str(scratch, MemOperand(fp, ExitFrameConstants::kSPOffset));
+}
+
+
+// Leave the current exit frame.
+void MacroAssembler::LeaveExitFrame(bool restore_doubles,
+ const Register& scratch,
+ bool restore_context) {
+ ASSERT(csp.Is(StackPointer()));
+
+ if (restore_doubles) {
+ ExitFrameRestoreFPRegs();
+ }
+
+ // Restore the context pointer from the top frame.
+ if (restore_context) {
+ Mov(scratch, Operand(ExternalReference(Isolate::kContextAddress,
+ isolate())));
+ Ldr(cp, MemOperand(scratch));
+ }
+
+ if (emit_debug_code()) {
+ // Also emit debug code to clear the cp in the top frame.
+ Mov(scratch, Operand(ExternalReference(Isolate::kContextAddress,
+ isolate())));
+ Str(xzr, MemOperand(scratch));
+ }
+ // Clear the frame pointer from the top frame.
+ Mov(scratch, Operand(ExternalReference(Isolate::kCEntryFPAddress,
+ isolate())));
+ Str(xzr, MemOperand(scratch));
+
+ // Pop the exit frame.
+ // fp[8]: CallerPC (lr)
+ // fp -> fp[0]: CallerFP (old fp)
+ // fp[...]: The rest of the frame.
+ Mov(jssp, fp);
+ SetStackPointer(jssp);
+ AssertStackConsistency();
+ Pop(fp, lr);
+}
+
+
+void MacroAssembler::SetCounter(StatsCounter* counter, int value,
+ Register scratch1, Register scratch2) {
+ if (FLAG_native_code_counters && counter->Enabled()) {
+ Mov(scratch1, value);
+ Mov(scratch2, ExternalReference(counter));
+ Str(scratch1, MemOperand(scratch2));
+ }
+}
+
+
+void MacroAssembler::IncrementCounter(StatsCounter* counter, int value,
+ Register scratch1, Register scratch2) {
+ ASSERT(value != 0);
+ if (FLAG_native_code_counters && counter->Enabled()) {
+ Mov(scratch2, ExternalReference(counter));
+ Ldr(scratch1, MemOperand(scratch2));
+ Add(scratch1, scratch1, value);
+ Str(scratch1, MemOperand(scratch2));
+ }
+}
+
+
+void MacroAssembler::DecrementCounter(StatsCounter* counter, int value,
+ Register scratch1, Register scratch2) {
+ IncrementCounter(counter, -value, scratch1, scratch2);
+}
+
+
+void MacroAssembler::LoadContext(Register dst, int context_chain_length) {
+ if (context_chain_length > 0) {
+ // Move up the chain of contexts to the context containing the slot.
+ Ldr(dst, MemOperand(cp, Context::SlotOffset(Context::PREVIOUS_INDEX)));
+ for (int i = 1; i < context_chain_length; i++) {
+ Ldr(dst, MemOperand(dst, Context::SlotOffset(Context::PREVIOUS_INDEX)));
+ }
+ } else {
+ // Slot is in the current function context. Move it into the
+ // destination register in case we store into it (the write barrier
+ // cannot be allowed to destroy the context in cp).
+ Mov(dst, cp);
+ }
+}
+
+
+void MacroAssembler::DebugBreak() {
+ Mov(x0, 0);
+ Mov(x1, ExternalReference(Runtime::kDebugBreak, isolate()));
+ CEntryStub ces(isolate(), 1);
+ ASSERT(AllowThisStubCall(&ces));
+ Call(ces.GetCode(), RelocInfo::DEBUG_BREAK);
+}
+
+
+void MacroAssembler::PushTryHandler(StackHandler::Kind kind,
+ int handler_index) {
+ ASSERT(jssp.Is(StackPointer()));
+ // Adjust this code if the asserts don't hold.
+ STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize);
+ STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0 * kPointerSize);
+ STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize);
+ STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize);
+ STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize);
+ STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize);
+
+ // For the JSEntry handler, we must preserve the live registers x0-x4.
+ // (See JSEntryStub::GenerateBody().)
+
+ unsigned state =
+ StackHandler::IndexField::encode(handler_index) |
+ StackHandler::KindField::encode(kind);
+
+ // Set up the code object and the state for pushing.
+ Mov(x10, Operand(CodeObject()));
+ Mov(x11, state);
+
+ // Push the frame pointer, context, state, and code object.
+ if (kind == StackHandler::JS_ENTRY) {
+ ASSERT(Smi::FromInt(0) == 0);
+ Push(xzr, xzr, x11, x10);
+ } else {
+ Push(fp, cp, x11, x10);
+ }
+
+ // Link the current handler as the next handler.
+ Mov(x11, ExternalReference(Isolate::kHandlerAddress, isolate()));
+ Ldr(x10, MemOperand(x11));
+ Push(x10);
+ // Set this new handler as the current one.
+ Str(jssp, MemOperand(x11));
+}
+
+
+void MacroAssembler::PopTryHandler() {
+ STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
+ Pop(x10);
+ Mov(x11, ExternalReference(Isolate::kHandlerAddress, isolate()));
+ Drop(StackHandlerConstants::kSize - kXRegSize, kByteSizeInBytes);
+ Str(x10, MemOperand(x11));
+}
+
+
+void MacroAssembler::Allocate(int object_size,
+ Register result,
+ Register scratch1,
+ Register scratch2,
+ Label* gc_required,
+ AllocationFlags flags) {
+ ASSERT(object_size <= Page::kMaxRegularHeapObjectSize);
+ if (!FLAG_inline_new) {
+ if (emit_debug_code()) {
+ // Trash the registers to simulate an allocation failure.
+ // We apply salt to the original zap value to easily spot the values.
+ Mov(result, (kDebugZapValue & ~0xffL) | 0x11L);
+ Mov(scratch1, (kDebugZapValue & ~0xffL) | 0x21L);
+ Mov(scratch2, (kDebugZapValue & ~0xffL) | 0x21L);
+ }
+ B(gc_required);
+ return;
+ }
+
+ UseScratchRegisterScope temps(this);
+ Register scratch3 = temps.AcquireX();
+
+ ASSERT(!AreAliased(result, scratch1, scratch2, scratch3));
+ ASSERT(result.Is64Bits() && scratch1.Is64Bits() && scratch2.Is64Bits());
+
+ // Make object size into bytes.
+ if ((flags & SIZE_IN_WORDS) != 0) {
+ object_size *= kPointerSize;
+ }
+ ASSERT(0 == (object_size & kObjectAlignmentMask));
+
+ // Check relative positions of allocation top and limit addresses.
+ // The values must be adjacent in memory to allow the use of LDP.
+ ExternalReference heap_allocation_top =
+ AllocationUtils::GetAllocationTopReference(isolate(), flags);
+ ExternalReference heap_allocation_limit =
+ AllocationUtils::GetAllocationLimitReference(isolate(), flags);
+ intptr_t top = reinterpret_cast<intptr_t>(heap_allocation_top.address());
+ intptr_t limit = reinterpret_cast<intptr_t>(heap_allocation_limit.address());
+ ASSERT((limit - top) == kPointerSize);
+
+ // Set up allocation top address and object size registers.
+ Register top_address = scratch1;
+ Register allocation_limit = scratch2;
+ Mov(top_address, Operand(heap_allocation_top));
+
+ if ((flags & RESULT_CONTAINS_TOP) == 0) {
+ // Load allocation top into result and the allocation limit.
+ Ldp(result, allocation_limit, MemOperand(top_address));
+ } else {
+ if (emit_debug_code()) {
+ // Assert that result actually contains top on entry.
+ Ldr(scratch3, MemOperand(top_address));
+ Cmp(result, scratch3);
+ Check(eq, kUnexpectedAllocationTop);
+ }
+ // Load the allocation limit. 'result' already contains the allocation top.
+ Ldr(allocation_limit, MemOperand(top_address, limit - top));
+ }
+
+ // We can ignore DOUBLE_ALIGNMENT flags here because doubles and pointers have
+ // the same alignment on ARM64.
+ STATIC_ASSERT(kPointerAlignment == kDoubleAlignment);
+
+ // Calculate new top and bail out if new space is exhausted.
+ Adds(scratch3, result, object_size);
+ Ccmp(scratch3, allocation_limit, CFlag, cc);
+ B(hi, gc_required);
+ Str(scratch3, MemOperand(top_address));
+
+ // Tag the object if requested.
+ if ((flags & TAG_OBJECT) != 0) {
+ ObjectTag(result, result);
+ }
+}
+
+
+void MacroAssembler::Allocate(Register object_size,
+ Register result,
+ Register scratch1,
+ Register scratch2,
+ Label* gc_required,
+ AllocationFlags flags) {
+ if (!FLAG_inline_new) {
+ if (emit_debug_code()) {
+ // Trash the registers to simulate an allocation failure.
+ // We apply salt to the original zap value to easily spot the values.
+ Mov(result, (kDebugZapValue & ~0xffL) | 0x11L);
+ Mov(scratch1, (kDebugZapValue & ~0xffL) | 0x21L);
+ Mov(scratch2, (kDebugZapValue & ~0xffL) | 0x21L);
+ }
+ B(gc_required);
+ return;
+ }
+
+ UseScratchRegisterScope temps(this);
+ Register scratch3 = temps.AcquireX();
+
+ ASSERT(!AreAliased(object_size, result, scratch1, scratch2, scratch3));
+ ASSERT(object_size.Is64Bits() && result.Is64Bits() &&
+ scratch1.Is64Bits() && scratch2.Is64Bits());
+
+ // Check relative positions of allocation top and limit addresses.
+ // The values must be adjacent in memory to allow the use of LDP.
+ ExternalReference heap_allocation_top =
+ AllocationUtils::GetAllocationTopReference(isolate(), flags);
+ ExternalReference heap_allocation_limit =
+ AllocationUtils::GetAllocationLimitReference(isolate(), flags);
+ intptr_t top = reinterpret_cast<intptr_t>(heap_allocation_top.address());
+ intptr_t limit = reinterpret_cast<intptr_t>(heap_allocation_limit.address());
+ ASSERT((limit - top) == kPointerSize);
+
+ // Set up allocation top address and object size registers.
+ Register top_address = scratch1;
+ Register allocation_limit = scratch2;
+ Mov(top_address, heap_allocation_top);
+
+ if ((flags & RESULT_CONTAINS_TOP) == 0) {
+ // Load allocation top into result and the allocation limit.
+ Ldp(result, allocation_limit, MemOperand(top_address));
+ } else {
+ if (emit_debug_code()) {
+ // Assert that result actually contains top on entry.
+ Ldr(scratch3, MemOperand(top_address));
+ Cmp(result, scratch3);
+ Check(eq, kUnexpectedAllocationTop);
+ }
+ // Load the allocation limit. 'result' already contains the allocation top.
+ Ldr(allocation_limit, MemOperand(top_address, limit - top));
+ }
+
+ // We can ignore DOUBLE_ALIGNMENT flags here because doubles and pointers have
+ // the same alignment on ARM64.
+ STATIC_ASSERT(kPointerAlignment == kDoubleAlignment);
+
+ // Calculate new top and bail out if new space is exhausted
+ if ((flags & SIZE_IN_WORDS) != 0) {
+ Adds(scratch3, result, Operand(object_size, LSL, kPointerSizeLog2));
+ } else {
+ Adds(scratch3, result, object_size);
+ }
+
+ if (emit_debug_code()) {
+ Tst(scratch3, kObjectAlignmentMask);
+ Check(eq, kUnalignedAllocationInNewSpace);
+ }
+
+ Ccmp(scratch3, allocation_limit, CFlag, cc);
+ B(hi, gc_required);
+ Str(scratch3, MemOperand(top_address));
+
+ // Tag the object if requested.
+ if ((flags & TAG_OBJECT) != 0) {
+ ObjectTag(result, result);
+ }
+}
+
+
+void MacroAssembler::UndoAllocationInNewSpace(Register object,
+ Register scratch) {
+ ExternalReference new_space_allocation_top =
+ ExternalReference::new_space_allocation_top_address(isolate());
+
+ // Make sure the object has no tag before resetting top.
+ Bic(object, object, kHeapObjectTagMask);
+#ifdef DEBUG
+ // Check that the object un-allocated is below the current top.
+ Mov(scratch, new_space_allocation_top);
+ Ldr(scratch, MemOperand(scratch));
+ Cmp(object, scratch);
+ Check(lt, kUndoAllocationOfNonAllocatedMemory);
+#endif
+ // Write the address of the object to un-allocate as the current top.
+ Mov(scratch, new_space_allocation_top);
+ Str(object, MemOperand(scratch));
+}
+
+
+void MacroAssembler::AllocateTwoByteString(Register result,
+ Register length,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ Label* gc_required) {
+ ASSERT(!AreAliased(result, length, scratch1, scratch2, scratch3));
+ // Calculate the number of bytes needed for the characters in the string while
+ // observing object alignment.
+ STATIC_ASSERT((SeqTwoByteString::kHeaderSize & kObjectAlignmentMask) == 0);
+ Add(scratch1, length, length); // Length in bytes, not chars.
+ Add(scratch1, scratch1, kObjectAlignmentMask + SeqTwoByteString::kHeaderSize);
+ Bic(scratch1, scratch1, kObjectAlignmentMask);
+
+ // Allocate two-byte string in new space.
+ Allocate(scratch1,
+ result,
+ scratch2,
+ scratch3,
+ gc_required,
+ TAG_OBJECT);
+
+ // Set the map, length and hash field.
+ InitializeNewString(result,
+ length,
+ Heap::kStringMapRootIndex,
+ scratch1,
+ scratch2);
+}
+
+
+void MacroAssembler::AllocateAsciiString(Register result,
+ Register length,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ Label* gc_required) {
+ ASSERT(!AreAliased(result, length, scratch1, scratch2, scratch3));
+ // Calculate the number of bytes needed for the characters in the string while
+ // observing object alignment.
+ STATIC_ASSERT((SeqOneByteString::kHeaderSize & kObjectAlignmentMask) == 0);
+ STATIC_ASSERT(kCharSize == 1);
+ Add(scratch1, length, kObjectAlignmentMask + SeqOneByteString::kHeaderSize);
+ Bic(scratch1, scratch1, kObjectAlignmentMask);
+
+ // Allocate ASCII string in new space.
+ Allocate(scratch1,
+ result,
+ scratch2,
+ scratch3,
+ gc_required,
+ TAG_OBJECT);
+
+ // Set the map, length and hash field.
+ InitializeNewString(result,
+ length,
+ Heap::kAsciiStringMapRootIndex,
+ scratch1,
+ scratch2);
+}
+
+
+void MacroAssembler::AllocateTwoByteConsString(Register result,
+ Register length,
+ Register scratch1,
+ Register scratch2,
+ Label* gc_required) {
+ Allocate(ConsString::kSize, result, scratch1, scratch2, gc_required,
+ TAG_OBJECT);
+
+ InitializeNewString(result,
+ length,
+ Heap::kConsStringMapRootIndex,
+ scratch1,
+ scratch2);
+}
+
+
+void MacroAssembler::AllocateAsciiConsString(Register result,
+ Register length,
+ Register scratch1,
+ Register scratch2,
+ Label* gc_required) {
+ Allocate(ConsString::kSize,
+ result,
+ scratch1,
+ scratch2,
+ gc_required,
+ TAG_OBJECT);
+
+ InitializeNewString(result,
+ length,
+ Heap::kConsAsciiStringMapRootIndex,
+ scratch1,
+ scratch2);
+}
+
+
+void MacroAssembler::AllocateTwoByteSlicedString(Register result,
+ Register length,
+ Register scratch1,
+ Register scratch2,
+ Label* gc_required) {
+ ASSERT(!AreAliased(result, length, scratch1, scratch2));
+ Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required,
+ TAG_OBJECT);
+
+ InitializeNewString(result,
+ length,
+ Heap::kSlicedStringMapRootIndex,
+ scratch1,
+ scratch2);
+}
+
+
+void MacroAssembler::AllocateAsciiSlicedString(Register result,
+ Register length,
+ Register scratch1,
+ Register scratch2,
+ Label* gc_required) {
+ ASSERT(!AreAliased(result, length, scratch1, scratch2));
+ Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required,
+ TAG_OBJECT);
+
+ InitializeNewString(result,
+ length,
+ Heap::kSlicedAsciiStringMapRootIndex,
+ scratch1,
+ scratch2);
+}
+
+
+// Allocates a heap number or jumps to the need_gc label if the young space
+// is full and a scavenge is needed.
+void MacroAssembler::AllocateHeapNumber(Register result,
+ Label* gc_required,
+ Register scratch1,
+ Register scratch2,
+ CPURegister value,
+ CPURegister heap_number_map) {
+ ASSERT(!value.IsValid() || value.Is64Bits());
+ UseScratchRegisterScope temps(this);
+
+ // Allocate an object in the heap for the heap number and tag it as a heap
+ // object.
+ Allocate(HeapNumber::kSize, result, scratch1, scratch2, gc_required,
+ NO_ALLOCATION_FLAGS);
+
+ // Prepare the heap number map.
+ if (!heap_number_map.IsValid()) {
+ // If we have a valid value register, use the same type of register to store
+ // the map so we can use STP to store both in one instruction.
+ if (value.IsValid() && value.IsFPRegister()) {
+ heap_number_map = temps.AcquireD();
+ } else {
+ heap_number_map = scratch1;
+ }
+ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
+ }
+ if (emit_debug_code()) {
+ Register map;
+ if (heap_number_map.IsFPRegister()) {
+ map = scratch1;
+ Fmov(map, DoubleRegister(heap_number_map));
+ } else {
+ map = Register(heap_number_map);
+ }
+ AssertRegisterIsRoot(map, Heap::kHeapNumberMapRootIndex);
+ }
+
+ // Store the heap number map and the value in the allocated object.
+ if (value.IsSameSizeAndType(heap_number_map)) {
+ STATIC_ASSERT(HeapObject::kMapOffset + kPointerSize ==
+ HeapNumber::kValueOffset);
+ Stp(heap_number_map, value, MemOperand(result, HeapObject::kMapOffset));
+ } else {
+ Str(heap_number_map, MemOperand(result, HeapObject::kMapOffset));
+ if (value.IsValid()) {
+ Str(value, MemOperand(result, HeapNumber::kValueOffset));
+ }
+ }
+ ObjectTag(result, result);
+}
+
+
+void MacroAssembler::JumpIfObjectType(Register object,
+ Register map,
+ Register type_reg,
+ InstanceType type,
+ Label* if_cond_pass,
+ Condition cond) {
+ CompareObjectType(object, map, type_reg, type);
+ B(cond, if_cond_pass);
+}
+
+
+void MacroAssembler::JumpIfNotObjectType(Register object,
+ Register map,
+ Register type_reg,
+ InstanceType type,
+ Label* if_not_object) {
+ JumpIfObjectType(object, map, type_reg, type, if_not_object, ne);
+}
+
+
+// Sets condition flags based on comparison, and returns type in type_reg.
+void MacroAssembler::CompareObjectType(Register object,
+ Register map,
+ Register type_reg,
+ InstanceType type) {
+ Ldr(map, FieldMemOperand(object, HeapObject::kMapOffset));
+ CompareInstanceType(map, type_reg, type);
+}
+
+
+// Sets condition flags based on comparison, and returns type in type_reg.
+void MacroAssembler::CompareInstanceType(Register map,
+ Register type_reg,
+ InstanceType type) {
+ Ldrb(type_reg, FieldMemOperand(map, Map::kInstanceTypeOffset));
+ Cmp(type_reg, type);
+}
+
+
+void MacroAssembler::CompareMap(Register obj,
+ Register scratch,
+ Handle<Map> map) {
+ Ldr(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
+ CompareMap(scratch, map);
+}
+
+
+void MacroAssembler::CompareMap(Register obj_map,
+ Handle<Map> map) {
+ Cmp(obj_map, Operand(map));
+}
+
+
+void MacroAssembler::CheckMap(Register obj,
+ Register scratch,
+ Handle<Map> map,
+ Label* fail,
+ SmiCheckType smi_check_type) {
+ if (smi_check_type == DO_SMI_CHECK) {
+ JumpIfSmi(obj, fail);
+ }
+
+ CompareMap(obj, scratch, map);
+ B(ne, fail);
+}
+
+
+void MacroAssembler::CheckMap(Register obj,
+ Register scratch,
+ Heap::RootListIndex index,
+ Label* fail,
+ SmiCheckType smi_check_type) {
+ if (smi_check_type == DO_SMI_CHECK) {
+ JumpIfSmi(obj, fail);
+ }
+ Ldr(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
+ JumpIfNotRoot(scratch, index, fail);
+}
+
+
+void MacroAssembler::CheckMap(Register obj_map,
+ Handle<Map> map,
+ Label* fail,
+ SmiCheckType smi_check_type) {
+ if (smi_check_type == DO_SMI_CHECK) {
+ JumpIfSmi(obj_map, fail);
+ }
+
+ CompareMap(obj_map, map);
+ B(ne, fail);
+}
+
+
+void MacroAssembler::DispatchMap(Register obj,
+ Register scratch,
+ Handle<Map> map,
+ Handle<Code> success,
+ SmiCheckType smi_check_type) {
+ Label fail;
+ if (smi_check_type == DO_SMI_CHECK) {
+ JumpIfSmi(obj, &fail);
+ }
+ Ldr(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
+ Cmp(scratch, Operand(map));
+ B(ne, &fail);
+ Jump(success, RelocInfo::CODE_TARGET);
+ Bind(&fail);
+}
+
+
+void MacroAssembler::TestMapBitfield(Register object, uint64_t mask) {
+ UseScratchRegisterScope temps(this);
+ Register temp = temps.AcquireX();
+ Ldr(temp, FieldMemOperand(object, HeapObject::kMapOffset));
+ Ldrb(temp, FieldMemOperand(temp, Map::kBitFieldOffset));
+ Tst(temp, mask);
+}
+
+
+void MacroAssembler::LoadElementsKindFromMap(Register result, Register map) {
+ // Load the map's "bit field 2".
+ __ Ldrb(result, FieldMemOperand(map, Map::kBitField2Offset));
+ // Retrieve elements_kind from bit field 2.
+ DecodeField<Map::ElementsKindBits>(result);
+}
+
+
+void MacroAssembler::TryGetFunctionPrototype(Register function,
+ Register result,
+ Register scratch,
+ Label* miss,
+ BoundFunctionAction action) {
+ ASSERT(!AreAliased(function, result, scratch));
+
+ // Check that the receiver isn't a smi.
+ JumpIfSmi(function, miss);
+
+ // Check that the function really is a function. Load map into result reg.
+ JumpIfNotObjectType(function, result, scratch, JS_FUNCTION_TYPE, miss);
+
+ if (action == kMissOnBoundFunction) {
+ Register scratch_w = scratch.W();
+ Ldr(scratch,
+ FieldMemOperand(function, JSFunction::kSharedFunctionInfoOffset));
+ // On 64-bit platforms, compiler hints field is not a smi. See definition of
+ // kCompilerHintsOffset in src/objects.h.
+ Ldr(scratch_w,
+ FieldMemOperand(scratch, SharedFunctionInfo::kCompilerHintsOffset));
+ Tbnz(scratch, SharedFunctionInfo::kBoundFunction, miss);
+ }
+
+ // Make sure that the function has an instance prototype.
+ Label non_instance;
+ Ldrb(scratch, FieldMemOperand(result, Map::kBitFieldOffset));
+ Tbnz(scratch, Map::kHasNonInstancePrototype, &non_instance);
+
+ // Get the prototype or initial map from the function.
+ Ldr(result,
+ FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
+
+ // If the prototype or initial map is the hole, don't return it and simply
+ // miss the cache instead. This will allow us to allocate a prototype object
+ // on-demand in the runtime system.
+ JumpIfRoot(result, Heap::kTheHoleValueRootIndex, miss);
+
+ // If the function does not have an initial map, we're done.
+ Label done;
+ JumpIfNotObjectType(result, scratch, scratch, MAP_TYPE, &done);
+
+ // Get the prototype from the initial map.
+ Ldr(result, FieldMemOperand(result, Map::kPrototypeOffset));
+ B(&done);
+
+ // Non-instance prototype: fetch prototype from constructor field in initial
+ // map.
+ Bind(&non_instance);
+ Ldr(result, FieldMemOperand(result, Map::kConstructorOffset));
+
+ // All done.
+ Bind(&done);
+}
+
+
+void MacroAssembler::CompareRoot(const Register& obj,
+ Heap::RootListIndex index) {
+ UseScratchRegisterScope temps(this);
+ Register temp = temps.AcquireX();
+ ASSERT(!AreAliased(obj, temp));
+ LoadRoot(temp, index);
+ Cmp(obj, temp);
+}
+
+
+void MacroAssembler::JumpIfRoot(const Register& obj,
+ Heap::RootListIndex index,
+ Label* if_equal) {
+ CompareRoot(obj, index);
+ B(eq, if_equal);
+}
+
+
+void MacroAssembler::JumpIfNotRoot(const Register& obj,
+ Heap::RootListIndex index,
+ Label* if_not_equal) {
+ CompareRoot(obj, index);
+ B(ne, if_not_equal);
+}
+
+
+void MacroAssembler::CompareAndSplit(const Register& lhs,
+ const Operand& rhs,
+ Condition cond,
+ Label* if_true,
+ Label* if_false,
+ Label* fall_through) {
+ if ((if_true == if_false) && (if_false == fall_through)) {
+ // Fall through.
+ } else if (if_true == if_false) {
+ B(if_true);
+ } else if (if_false == fall_through) {
+ CompareAndBranch(lhs, rhs, cond, if_true);
+ } else if (if_true == fall_through) {
+ CompareAndBranch(lhs, rhs, NegateCondition(cond), if_false);
+ } else {
+ CompareAndBranch(lhs, rhs, cond, if_true);
+ B(if_false);
+ }
+}
+
+
+void MacroAssembler::TestAndSplit(const Register& reg,
+ uint64_t bit_pattern,
+ Label* if_all_clear,
+ Label* if_any_set,
+ Label* fall_through) {
+ if ((if_all_clear == if_any_set) && (if_any_set == fall_through)) {
+ // Fall through.
+ } else if (if_all_clear == if_any_set) {
+ B(if_all_clear);
+ } else if (if_all_clear == fall_through) {
+ TestAndBranchIfAnySet(reg, bit_pattern, if_any_set);
+ } else if (if_any_set == fall_through) {
+ TestAndBranchIfAllClear(reg, bit_pattern, if_all_clear);
+ } else {
+ TestAndBranchIfAnySet(reg, bit_pattern, if_any_set);
+ B(if_all_clear);
+ }
+}
+
+
+void MacroAssembler::CheckFastElements(Register map,
+ Register scratch,
+ Label* fail) {
+ STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
+ STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
+ STATIC_ASSERT(FAST_ELEMENTS == 2);
+ STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
+ Ldrb(scratch, FieldMemOperand(map, Map::kBitField2Offset));
+ Cmp(scratch, Map::kMaximumBitField2FastHoleyElementValue);
+ B(hi, fail);
+}
+
+
+void MacroAssembler::CheckFastObjectElements(Register map,
+ Register scratch,
+ Label* fail) {
+ STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
+ STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
+ STATIC_ASSERT(FAST_ELEMENTS == 2);
+ STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
+ Ldrb(scratch, FieldMemOperand(map, Map::kBitField2Offset));
+ Cmp(scratch, Operand(Map::kMaximumBitField2FastHoleySmiElementValue));
+ // If cond==ls, set cond=hi, otherwise compare.
+ Ccmp(scratch,
+ Operand(Map::kMaximumBitField2FastHoleyElementValue), CFlag, hi);
+ B(hi, fail);
+}
+
+
+// Note: The ARM version of this clobbers elements_reg, but this version does
+// not. Some uses of this in ARM64 assume that elements_reg will be preserved.
+void MacroAssembler::StoreNumberToDoubleElements(Register value_reg,
+ Register key_reg,
+ Register elements_reg,
+ Register scratch1,
+ FPRegister fpscratch1,
+ Label* fail,
+ int elements_offset) {
+ ASSERT(!AreAliased(value_reg, key_reg, elements_reg, scratch1));
+ Label store_num;
+
+ // Speculatively convert the smi to a double - all smis can be exactly
+ // represented as a double.
+ SmiUntagToDouble(fpscratch1, value_reg, kSpeculativeUntag);
+
+ // If value_reg is a smi, we're done.
+ JumpIfSmi(value_reg, &store_num);
+
+ // Ensure that the object is a heap number.
+ CheckMap(value_reg, scratch1, isolate()->factory()->heap_number_map(),
+ fail, DONT_DO_SMI_CHECK);
+
+ Ldr(fpscratch1, FieldMemOperand(value_reg, HeapNumber::kValueOffset));
+
+ // Canonicalize NaNs.
+ CanonicalizeNaN(fpscratch1);
+
+ // Store the result.
+ Bind(&store_num);
+ Add(scratch1, elements_reg,
+ Operand::UntagSmiAndScale(key_reg, kDoubleSizeLog2));
+ Str(fpscratch1,
+ FieldMemOperand(scratch1,
+ FixedDoubleArray::kHeaderSize - elements_offset));
+}
+
+
+bool MacroAssembler::AllowThisStubCall(CodeStub* stub) {
+ return has_frame_ || !stub->SometimesSetsUpAFrame();
+}
+
+
+void MacroAssembler::IndexFromHash(Register hash, Register index) {
+ // If the hash field contains an array index pick it out. The assert checks
+ // that the constants for the maximum number of digits for an array index
+ // cached in the hash field and the number of bits reserved for it does not
+ // conflict.
+ ASSERT(TenToThe(String::kMaxCachedArrayIndexLength) <
+ (1 << String::kArrayIndexValueBits));
+ DecodeField<String::ArrayIndexValueBits>(index, hash);
+ SmiTag(index, index);
+}
+
+
+void MacroAssembler::EmitSeqStringSetCharCheck(
+ Register string,
+ Register index,
+ SeqStringSetCharCheckIndexType index_type,
+ Register scratch,
+ uint32_t encoding_mask) {
+ ASSERT(!AreAliased(string, index, scratch));
+
+ if (index_type == kIndexIsSmi) {
+ AssertSmi(index);
+ }
+
+ // Check that string is an object.
+ AssertNotSmi(string, kNonObject);
+
+ // Check that string has an appropriate map.
+ Ldr(scratch, FieldMemOperand(string, HeapObject::kMapOffset));
+ Ldrb(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
+
+ And(scratch, scratch, kStringRepresentationMask | kStringEncodingMask);
+ Cmp(scratch, encoding_mask);
+ Check(eq, kUnexpectedStringType);
+
+ Ldr(scratch, FieldMemOperand(string, String::kLengthOffset));
+ Cmp(index, index_type == kIndexIsSmi ? scratch : Operand::UntagSmi(scratch));
+ Check(lt, kIndexIsTooLarge);
+
+ ASSERT_EQ(0, Smi::FromInt(0));
+ Cmp(index, 0);
+ Check(ge, kIndexIsNegative);
+}
+
+
+void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
+ Register scratch1,
+ Register scratch2,
+ Label* miss) {
+ ASSERT(!AreAliased(holder_reg, scratch1, scratch2));
+ Label same_contexts;
+
+ // Load current lexical context from the stack frame.
+ Ldr(scratch1, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ // In debug mode, make sure the lexical context is set.
+#ifdef DEBUG
+ Cmp(scratch1, 0);
+ Check(ne, kWeShouldNotHaveAnEmptyLexicalContext);
+#endif
+
+ // Load the native context of the current context.
+ int offset =
+ Context::kHeaderSize + Context::GLOBAL_OBJECT_INDEX * kPointerSize;
+ Ldr(scratch1, FieldMemOperand(scratch1, offset));
+ Ldr(scratch1, FieldMemOperand(scratch1, GlobalObject::kNativeContextOffset));
+
+ // Check the context is a native context.
+ if (emit_debug_code()) {
+ // Read the first word and compare to the global_context_map.
+ Ldr(scratch2, FieldMemOperand(scratch1, HeapObject::kMapOffset));
+ CompareRoot(scratch2, Heap::kNativeContextMapRootIndex);
+ Check(eq, kExpectedNativeContext);
+ }
+
+ // Check if both contexts are the same.
+ Ldr(scratch2, FieldMemOperand(holder_reg,
+ JSGlobalProxy::kNativeContextOffset));
+ Cmp(scratch1, scratch2);
+ B(&same_contexts, eq);
+
+ // Check the context is a native context.
+ if (emit_debug_code()) {
+ // We're short on scratch registers here, so use holder_reg as a scratch.
+ Push(holder_reg);
+ Register scratch3 = holder_reg;
+
+ CompareRoot(scratch2, Heap::kNullValueRootIndex);
+ Check(ne, kExpectedNonNullContext);
+
+ Ldr(scratch3, FieldMemOperand(scratch2, HeapObject::kMapOffset));
+ CompareRoot(scratch3, Heap::kNativeContextMapRootIndex);
+ Check(eq, kExpectedNativeContext);
+ Pop(holder_reg);
+ }
+
+ // Check that the security token in the calling global object is
+ // compatible with the security token in the receiving global
+ // object.
+ int token_offset = Context::kHeaderSize +
+ Context::SECURITY_TOKEN_INDEX * kPointerSize;
+
+ Ldr(scratch1, FieldMemOperand(scratch1, token_offset));
+ Ldr(scratch2, FieldMemOperand(scratch2, token_offset));
+ Cmp(scratch1, scratch2);
+ B(miss, ne);
+
+ Bind(&same_contexts);
+}
+
+
+// Compute the hash code from the untagged key. This must be kept in sync with
+// ComputeIntegerHash in utils.h and KeyedLoadGenericElementStub in
+// code-stub-hydrogen.cc
+void MacroAssembler::GetNumberHash(Register key, Register scratch) {
+ ASSERT(!AreAliased(key, scratch));
+
+ // Xor original key with a seed.
+ LoadRoot(scratch, Heap::kHashSeedRootIndex);
+ Eor(key, key, Operand::UntagSmi(scratch));
+
+ // The algorithm uses 32-bit integer values.
+ key = key.W();
+ scratch = scratch.W();
+
+ // Compute the hash code from the untagged key. This must be kept in sync
+ // with ComputeIntegerHash in utils.h.
+ //
+ // hash = ~hash + (hash <<1 15);
+ Mvn(scratch, key);
+ Add(key, scratch, Operand(key, LSL, 15));
+ // hash = hash ^ (hash >> 12);
+ Eor(key, key, Operand(key, LSR, 12));
+ // hash = hash + (hash << 2);
+ Add(key, key, Operand(key, LSL, 2));
+ // hash = hash ^ (hash >> 4);
+ Eor(key, key, Operand(key, LSR, 4));
+ // hash = hash * 2057;
+ Mov(scratch, Operand(key, LSL, 11));
+ Add(key, key, Operand(key, LSL, 3));
+ Add(key, key, scratch);
+ // hash = hash ^ (hash >> 16);
+ Eor(key, key, Operand(key, LSR, 16));
+}
+
+
+void MacroAssembler::LoadFromNumberDictionary(Label* miss,
+ Register elements,
+ Register key,
+ Register result,
+ Register scratch0,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3) {
+ ASSERT(!AreAliased(elements, key, scratch0, scratch1, scratch2, scratch3));
+
+ Label done;
+
+ SmiUntag(scratch0, key);
+ GetNumberHash(scratch0, scratch1);
+
+ // Compute the capacity mask.
+ Ldrsw(scratch1,
+ UntagSmiFieldMemOperand(elements,
+ SeededNumberDictionary::kCapacityOffset));
+ Sub(scratch1, scratch1, 1);
+
+ // Generate an unrolled loop that performs a few probes before giving up.
+ for (int i = 0; i < kNumberDictionaryProbes; i++) {
+ // Compute the masked index: (hash + i + i * i) & mask.
+ if (i > 0) {
+ Add(scratch2, scratch0, SeededNumberDictionary::GetProbeOffset(i));
+ } else {
+ Mov(scratch2, scratch0);
+ }
+ And(scratch2, scratch2, scratch1);
+
+ // Scale the index by multiplying by the element size.
+ ASSERT(SeededNumberDictionary::kEntrySize == 3);
+ Add(scratch2, scratch2, Operand(scratch2, LSL, 1));
+
+ // Check if the key is identical to the name.
+ Add(scratch2, elements, Operand(scratch2, LSL, kPointerSizeLog2));
+ Ldr(scratch3,
+ FieldMemOperand(scratch2,
+ SeededNumberDictionary::kElementsStartOffset));
+ Cmp(key, scratch3);
+ if (i != (kNumberDictionaryProbes - 1)) {
+ B(eq, &done);
+ } else {
+ B(ne, miss);
+ }
+ }
+
+ Bind(&done);
+ // Check that the value is a normal property.
+ const int kDetailsOffset =
+ SeededNumberDictionary::kElementsStartOffset + 2 * kPointerSize;
+ Ldrsw(scratch1, UntagSmiFieldMemOperand(scratch2, kDetailsOffset));
+ TestAndBranchIfAnySet(scratch1, PropertyDetails::TypeField::kMask, miss);
+
+ // Get the value at the masked, scaled index and return.
+ const int kValueOffset =
+ SeededNumberDictionary::kElementsStartOffset + kPointerSize;
+ Ldr(result, FieldMemOperand(scratch2, kValueOffset));
+}
+
+
+void MacroAssembler::RememberedSetHelper(Register object, // For debug tests.
+ Register address,
+ Register scratch1,
+ SaveFPRegsMode fp_mode,
+ RememberedSetFinalAction and_then) {
+ ASSERT(!AreAliased(object, address, scratch1));
+ Label done, store_buffer_overflow;
+ if (emit_debug_code()) {
+ Label ok;
+ JumpIfNotInNewSpace(object, &ok);
+ Abort(kRememberedSetPointerInNewSpace);
+ bind(&ok);
+ }
+ UseScratchRegisterScope temps(this);
+ Register scratch2 = temps.AcquireX();
+
+ // Load store buffer top.
+ Mov(scratch2, ExternalReference::store_buffer_top(isolate()));
+ Ldr(scratch1, MemOperand(scratch2));
+ // Store pointer to buffer and increment buffer top.
+ Str(address, MemOperand(scratch1, kPointerSize, PostIndex));
+ // Write back new top of buffer.
+ Str(scratch1, MemOperand(scratch2));
+ // Call stub on end of buffer.
+ // Check for end of buffer.
+ ASSERT(StoreBuffer::kStoreBufferOverflowBit ==
+ (1 << (14 + kPointerSizeLog2)));
+ if (and_then == kFallThroughAtEnd) {
+ Tbz(scratch1, (14 + kPointerSizeLog2), &done);
+ } else {
+ ASSERT(and_then == kReturnAtEnd);
+ Tbnz(scratch1, (14 + kPointerSizeLog2), &store_buffer_overflow);
+ Ret();
+ }
+
+ Bind(&store_buffer_overflow);
+ Push(lr);
+ StoreBufferOverflowStub store_buffer_overflow_stub =
+ StoreBufferOverflowStub(isolate(), fp_mode);
+ CallStub(&store_buffer_overflow_stub);
+ Pop(lr);
+
+ Bind(&done);
+ if (and_then == kReturnAtEnd) {
+ Ret();
+ }
+}
+
+
+void MacroAssembler::PopSafepointRegisters() {
+ const int num_unsaved = kNumSafepointRegisters - kNumSafepointSavedRegisters;
+ PopXRegList(kSafepointSavedRegisters);
+ Drop(num_unsaved);
+}
+
+
+void MacroAssembler::PushSafepointRegisters() {
+ // Safepoints expect a block of kNumSafepointRegisters values on the stack, so
+ // adjust the stack for unsaved registers.
+ const int num_unsaved = kNumSafepointRegisters - kNumSafepointSavedRegisters;
+ ASSERT(num_unsaved >= 0);
+ Claim(num_unsaved);
+ PushXRegList(kSafepointSavedRegisters);
+}
+
+
+void MacroAssembler::PushSafepointRegistersAndDoubles() {
+ PushSafepointRegisters();
+ PushCPURegList(CPURegList(CPURegister::kFPRegister, kDRegSizeInBits,
+ FPRegister::kAllocatableFPRegisters));
+}
+
+
+void MacroAssembler::PopSafepointRegistersAndDoubles() {
+ PopCPURegList(CPURegList(CPURegister::kFPRegister, kDRegSizeInBits,
+ FPRegister::kAllocatableFPRegisters));
+ PopSafepointRegisters();
+}
+
+
+int MacroAssembler::SafepointRegisterStackIndex(int reg_code) {
+ // Make sure the safepoint registers list is what we expect.
+ ASSERT(CPURegList::GetSafepointSavedRegisters().list() == 0x6ffcffff);
+
+ // Safepoint registers are stored contiguously on the stack, but not all the
+ // registers are saved. The following registers are excluded:
+ // - x16 and x17 (ip0 and ip1) because they shouldn't be preserved outside of
+ // the macro assembler.
+ // - x28 (jssp) because JS stack pointer doesn't need to be included in
+ // safepoint registers.
+ // - x31 (csp) because the system stack pointer doesn't need to be included
+ // in safepoint registers.
+ //
+ // This function implements the mapping of register code to index into the
+ // safepoint register slots.
+ if ((reg_code >= 0) && (reg_code <= 15)) {
+ return reg_code;
+ } else if ((reg_code >= 18) && (reg_code <= 27)) {
+ // Skip ip0 and ip1.
+ return reg_code - 2;
+ } else if ((reg_code == 29) || (reg_code == 30)) {
+ // Also skip jssp.
+ return reg_code - 3;
+ } else {
+ // This register has no safepoint register slot.
+ UNREACHABLE();
+ return -1;
+ }
+}
+
+
+void MacroAssembler::CheckPageFlagSet(const Register& object,
+ const Register& scratch,
+ int mask,
+ Label* if_any_set) {
+ And(scratch, object, ~Page::kPageAlignmentMask);
+ Ldr(scratch, MemOperand(scratch, MemoryChunk::kFlagsOffset));
+ TestAndBranchIfAnySet(scratch, mask, if_any_set);
+}
+
+
+void MacroAssembler::CheckPageFlagClear(const Register& object,
+ const Register& scratch,
+ int mask,
+ Label* if_all_clear) {
+ And(scratch, object, ~Page::kPageAlignmentMask);
+ Ldr(scratch, MemOperand(scratch, MemoryChunk::kFlagsOffset));
+ TestAndBranchIfAllClear(scratch, mask, if_all_clear);
+}
+
+
+void MacroAssembler::RecordWriteField(
+ Register object,
+ int offset,
+ Register value,
+ Register scratch,
+ LinkRegisterStatus lr_status,
+ SaveFPRegsMode save_fp,
+ RememberedSetAction remembered_set_action,
+ SmiCheck smi_check,
+ PointersToHereCheck pointers_to_here_check_for_value) {
+ // First, check if a write barrier is even needed. The tests below
+ // catch stores of Smis.
+ Label done;
+
+ // Skip the barrier if writing a smi.
+ if (smi_check == INLINE_SMI_CHECK) {
+ JumpIfSmi(value, &done);
+ }
+
+ // Although the object register is tagged, the offset is relative to the start
+ // of the object, so offset must be a multiple of kPointerSize.
+ ASSERT(IsAligned(offset, kPointerSize));
+
+ Add(scratch, object, offset - kHeapObjectTag);
+ if (emit_debug_code()) {
+ Label ok;
+ Tst(scratch, (1 << kPointerSizeLog2) - 1);
+ B(eq, &ok);
+ Abort(kUnalignedCellInWriteBarrier);
+ Bind(&ok);
+ }
+
+ RecordWrite(object,
+ scratch,
+ value,
+ lr_status,
+ save_fp,
+ remembered_set_action,
+ OMIT_SMI_CHECK,
+ pointers_to_here_check_for_value);
+
+ Bind(&done);
+
+ // Clobber clobbered input registers when running with the debug-code flag
+ // turned on to provoke errors.
+ if (emit_debug_code()) {
+ Mov(value, Operand(BitCast<int64_t>(kZapValue + 4)));
+ Mov(scratch, Operand(BitCast<int64_t>(kZapValue + 8)));
+ }
+}
+
+
+// Will clobber: object, map, dst.
+// If lr_status is kLRHasBeenSaved, lr will also be clobbered.
+void MacroAssembler::RecordWriteForMap(Register object,
+ Register map,
+ Register dst,
+ LinkRegisterStatus lr_status,
+ SaveFPRegsMode fp_mode) {
+ ASM_LOCATION("MacroAssembler::RecordWrite");
+ ASSERT(!AreAliased(object, map));
+
+ if (emit_debug_code()) {
+ UseScratchRegisterScope temps(this);
+ Register temp = temps.AcquireX();
+
+ CompareMap(map, temp, isolate()->factory()->meta_map());
+ Check(eq, kWrongAddressOrValuePassedToRecordWrite);
+ }
+
+ if (!FLAG_incremental_marking) {
+ return;
+ }
+
+ if (emit_debug_code()) {
+ UseScratchRegisterScope temps(this);
+ Register temp = temps.AcquireX();
+
+ Ldr(temp, FieldMemOperand(object, HeapObject::kMapOffset));
+ Cmp(temp, map);
+ Check(eq, kWrongAddressOrValuePassedToRecordWrite);
+ }
+
+ // Count number of write barriers in generated code.
+ isolate()->counters()->write_barriers_static()->Increment();
+ // TODO(mstarzinger): Dynamic counter missing.
+
+ // First, check if a write barrier is even needed. The tests below
+ // catch stores of smis and stores into the young generation.
+ Label done;
+
+ // A single check of the map's pages interesting flag suffices, since it is
+ // only set during incremental collection, and then it's also guaranteed that
+ // the from object's page's interesting flag is also set. This optimization
+ // relies on the fact that maps can never be in new space.
+ CheckPageFlagClear(map,
+ map, // Used as scratch.
+ MemoryChunk::kPointersToHereAreInterestingMask,
+ &done);
+
+ // Record the actual write.
+ if (lr_status == kLRHasNotBeenSaved) {
+ Push(lr);
+ }
+ Add(dst, object, HeapObject::kMapOffset - kHeapObjectTag);
+ RecordWriteStub stub(isolate(), object, map, dst, OMIT_REMEMBERED_SET,
+ fp_mode);
+ CallStub(&stub);
+ if (lr_status == kLRHasNotBeenSaved) {
+ Pop(lr);
+ }
+
+ Bind(&done);
+
+ // Clobber clobbered registers when running with the debug-code flag
+ // turned on to provoke errors.
+ if (emit_debug_code()) {
+ Mov(dst, Operand(BitCast<int64_t>(kZapValue + 12)));
+ Mov(map, Operand(BitCast<int64_t>(kZapValue + 16)));
+ }
+}
+
+
+// Will clobber: object, address, value.
+// If lr_status is kLRHasBeenSaved, lr will also be clobbered.
+//
+// The register 'object' contains a heap object pointer. The heap object tag is
+// shifted away.
+void MacroAssembler::RecordWrite(
+ Register object,
+ Register address,
+ Register value,
+ LinkRegisterStatus lr_status,
+ SaveFPRegsMode fp_mode,
+ RememberedSetAction remembered_set_action,
+ SmiCheck smi_check,
+ PointersToHereCheck pointers_to_here_check_for_value) {
+ ASM_LOCATION("MacroAssembler::RecordWrite");
+ ASSERT(!AreAliased(object, value));
+
+ if (emit_debug_code()) {
+ UseScratchRegisterScope temps(this);
+ Register temp = temps.AcquireX();
+
+ Ldr(temp, MemOperand(address));
+ Cmp(temp, value);
+ Check(eq, kWrongAddressOrValuePassedToRecordWrite);
+ }
+
+ // Count number of write barriers in generated code.
+ isolate()->counters()->write_barriers_static()->Increment();
+ // TODO(mstarzinger): Dynamic counter missing.
+
+ // First, check if a write barrier is even needed. The tests below
+ // catch stores of smis and stores into the young generation.
+ Label done;
+
+ if (smi_check == INLINE_SMI_CHECK) {
+ ASSERT_EQ(0, kSmiTag);
+ JumpIfSmi(value, &done);
+ }
+
+ if (pointers_to_here_check_for_value != kPointersToHereAreAlwaysInteresting) {
+ CheckPageFlagClear(value,
+ value, // Used as scratch.
+ MemoryChunk::kPointersToHereAreInterestingMask,
+ &done);
+ }
+ CheckPageFlagClear(object,
+ value, // Used as scratch.
+ MemoryChunk::kPointersFromHereAreInterestingMask,
+ &done);
+
+ // Record the actual write.
+ if (lr_status == kLRHasNotBeenSaved) {
+ Push(lr);
+ }
+ RecordWriteStub stub(isolate(), object, value, address, remembered_set_action,
+ fp_mode);
+ CallStub(&stub);
+ if (lr_status == kLRHasNotBeenSaved) {
+ Pop(lr);
+ }
+
+ Bind(&done);
+
+ // Clobber clobbered registers when running with the debug-code flag
+ // turned on to provoke errors.
+ if (emit_debug_code()) {
+ Mov(address, Operand(BitCast<int64_t>(kZapValue + 12)));
+ Mov(value, Operand(BitCast<int64_t>(kZapValue + 16)));
+ }
+}
+
+
+void MacroAssembler::AssertHasValidColor(const Register& reg) {
+ if (emit_debug_code()) {
+ // The bit sequence is backward. The first character in the string
+ // represents the least significant bit.
+ ASSERT(strcmp(Marking::kImpossibleBitPattern, "01") == 0);
+
+ Label color_is_valid;
+ Tbnz(reg, 0, &color_is_valid);
+ Tbz(reg, 1, &color_is_valid);
+ Abort(kUnexpectedColorFound);
+ Bind(&color_is_valid);
+ }
+}
+
+
+void MacroAssembler::GetMarkBits(Register addr_reg,
+ Register bitmap_reg,
+ Register shift_reg) {
+ ASSERT(!AreAliased(addr_reg, bitmap_reg, shift_reg));
+ ASSERT(addr_reg.Is64Bits() && bitmap_reg.Is64Bits() && shift_reg.Is64Bits());
+ // addr_reg is divided into fields:
+ // |63 page base 20|19 high 8|7 shift 3|2 0|
+ // 'high' gives the index of the cell holding color bits for the object.
+ // 'shift' gives the offset in the cell for this object's color.
+ const int kShiftBits = kPointerSizeLog2 + Bitmap::kBitsPerCellLog2;
+ UseScratchRegisterScope temps(this);
+ Register temp = temps.AcquireX();
+ Ubfx(temp, addr_reg, kShiftBits, kPageSizeBits - kShiftBits);
+ Bic(bitmap_reg, addr_reg, Page::kPageAlignmentMask);
+ Add(bitmap_reg, bitmap_reg, Operand(temp, LSL, Bitmap::kBytesPerCellLog2));
+ // bitmap_reg:
+ // |63 page base 20|19 zeros 15|14 high 3|2 0|
+ Ubfx(shift_reg, addr_reg, kPointerSizeLog2, Bitmap::kBitsPerCellLog2);
+}
+
+
+void MacroAssembler::HasColor(Register object,
+ Register bitmap_scratch,
+ Register shift_scratch,
+ Label* has_color,
+ int first_bit,
+ int second_bit) {
+ // See mark-compact.h for color definitions.
+ ASSERT(!AreAliased(object, bitmap_scratch, shift_scratch));
+
+ GetMarkBits(object, bitmap_scratch, shift_scratch);
+ Ldr(bitmap_scratch, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
+ // Shift the bitmap down to get the color of the object in bits [1:0].
+ Lsr(bitmap_scratch, bitmap_scratch, shift_scratch);
+
+ AssertHasValidColor(bitmap_scratch);
+
+ // These bit sequences are backwards. The first character in the string
+ // represents the least significant bit.
+ ASSERT(strcmp(Marking::kWhiteBitPattern, "00") == 0);
+ ASSERT(strcmp(Marking::kBlackBitPattern, "10") == 0);
+ ASSERT(strcmp(Marking::kGreyBitPattern, "11") == 0);
+
+ // Check for the color.
+ if (first_bit == 0) {
+ // Checking for white.
+ ASSERT(second_bit == 0);
+ // We only need to test the first bit.
+ Tbz(bitmap_scratch, 0, has_color);
+ } else {
+ Label other_color;
+ // Checking for grey or black.
+ Tbz(bitmap_scratch, 0, &other_color);
+ if (second_bit == 0) {
+ Tbz(bitmap_scratch, 1, has_color);
+ } else {
+ Tbnz(bitmap_scratch, 1, has_color);
+ }
+ Bind(&other_color);
+ }
+
+ // Fall through if it does not have the right color.
+}
+
+
+void MacroAssembler::CheckMapDeprecated(Handle<Map> map,
+ Register scratch,
+ Label* if_deprecated) {
+ if (map->CanBeDeprecated()) {
+ Mov(scratch, Operand(map));
+ Ldrsw(scratch, FieldMemOperand(scratch, Map::kBitField3Offset));
+ TestAndBranchIfAnySet(scratch, Map::Deprecated::kMask, if_deprecated);
+ }
+}
+
+
+void MacroAssembler::JumpIfBlack(Register object,
+ Register scratch0,
+ Register scratch1,
+ Label* on_black) {
+ ASSERT(strcmp(Marking::kBlackBitPattern, "10") == 0);
+ HasColor(object, scratch0, scratch1, on_black, 1, 0); // kBlackBitPattern.
+}
+
+
+void MacroAssembler::JumpIfDictionaryInPrototypeChain(
+ Register object,
+ Register scratch0,
+ Register scratch1,
+ Label* found) {
+ ASSERT(!AreAliased(object, scratch0, scratch1));
+ Factory* factory = isolate()->factory();
+ Register current = scratch0;
+ Label loop_again;
+
+ // Scratch contains elements pointer.
+ Mov(current, object);
+
+ // Loop based on the map going up the prototype chain.
+ Bind(&loop_again);
+ Ldr(current, FieldMemOperand(current, HeapObject::kMapOffset));
+ Ldrb(scratch1, FieldMemOperand(current, Map::kBitField2Offset));
+ DecodeField<Map::ElementsKindBits>(scratch1);
+ CompareAndBranch(scratch1, DICTIONARY_ELEMENTS, eq, found);
+ Ldr(current, FieldMemOperand(current, Map::kPrototypeOffset));
+ CompareAndBranch(current, Operand(factory->null_value()), ne, &loop_again);
+}
+
+
+void MacroAssembler::GetRelocatedValueLocation(Register ldr_location,
+ Register result) {
+ ASSERT(!result.Is(ldr_location));
+ const uint32_t kLdrLitOffset_lsb = 5;
+ const uint32_t kLdrLitOffset_width = 19;
+ Ldr(result, MemOperand(ldr_location));
+ if (emit_debug_code()) {
+ And(result, result, LoadLiteralFMask);
+ Cmp(result, LoadLiteralFixed);
+ Check(eq, kTheInstructionToPatchShouldBeAnLdrLiteral);
+ // The instruction was clobbered. Reload it.
+ Ldr(result, MemOperand(ldr_location));
+ }
+ Sbfx(result, result, kLdrLitOffset_lsb, kLdrLitOffset_width);
+ Add(result, ldr_location, Operand(result, LSL, kWordSizeInBytesLog2));
+}
+
+
+void MacroAssembler::EnsureNotWhite(
+ Register value,
+ Register bitmap_scratch,
+ Register shift_scratch,
+ Register load_scratch,
+ Register length_scratch,
+ Label* value_is_white_and_not_data) {
+ ASSERT(!AreAliased(
+ value, bitmap_scratch, shift_scratch, load_scratch, length_scratch));
+
+ // These bit sequences are backwards. The first character in the string
+ // represents the least significant bit.
+ ASSERT(strcmp(Marking::kWhiteBitPattern, "00") == 0);
+ ASSERT(strcmp(Marking::kBlackBitPattern, "10") == 0);
+ ASSERT(strcmp(Marking::kGreyBitPattern, "11") == 0);
+
+ GetMarkBits(value, bitmap_scratch, shift_scratch);
+ Ldr(load_scratch, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
+ Lsr(load_scratch, load_scratch, shift_scratch);
+
+ AssertHasValidColor(load_scratch);
+
+ // If the value is black or grey we don't need to do anything.
+ // Since both black and grey have a 1 in the first position and white does
+ // not have a 1 there we only need to check one bit.
+ Label done;
+ Tbnz(load_scratch, 0, &done);
+
+ // Value is white. We check whether it is data that doesn't need scanning.
+ Register map = load_scratch; // Holds map while checking type.
+ Label is_data_object;
+
+ // Check for heap-number.
+ Ldr(map, FieldMemOperand(value, HeapObject::kMapOffset));
+ Mov(length_scratch, HeapNumber::kSize);
+ JumpIfRoot(map, Heap::kHeapNumberMapRootIndex, &is_data_object);
+
+ // Check for strings.
+ ASSERT(kIsIndirectStringTag == 1 && kIsIndirectStringMask == 1);
+ ASSERT(kNotStringTag == 0x80 && kIsNotStringMask == 0x80);
+ // If it's a string and it's not a cons string then it's an object containing
+ // no GC pointers.
+ Register instance_type = load_scratch;
+ Ldrb(instance_type, FieldMemOperand(map, Map::kInstanceTypeOffset));
+ TestAndBranchIfAnySet(instance_type,
+ kIsIndirectStringMask | kIsNotStringMask,
+ value_is_white_and_not_data);
+
+ // It's a non-indirect (non-cons and non-slice) string.
+ // If it's external, the length is just ExternalString::kSize.
+ // Otherwise it's String::kHeaderSize + string->length() * (1 or 2).
+ // External strings are the only ones with the kExternalStringTag bit
+ // set.
+ ASSERT_EQ(0, kSeqStringTag & kExternalStringTag);
+ ASSERT_EQ(0, kConsStringTag & kExternalStringTag);
+ Mov(length_scratch, ExternalString::kSize);
+ TestAndBranchIfAnySet(instance_type, kExternalStringTag, &is_data_object);
+
+ // Sequential string, either ASCII or UC16.
+ // For ASCII (char-size of 1) we shift the smi tag away to get the length.
+ // For UC16 (char-size of 2) we just leave the smi tag in place, thereby
+ // getting the length multiplied by 2.
+ ASSERT(kOneByteStringTag == 4 && kStringEncodingMask == 4);
+ Ldrsw(length_scratch, UntagSmiFieldMemOperand(value,
+ String::kLengthOffset));
+ Tst(instance_type, kStringEncodingMask);
+ Cset(load_scratch, eq);
+ Lsl(length_scratch, length_scratch, load_scratch);
+ Add(length_scratch,
+ length_scratch,
+ SeqString::kHeaderSize + kObjectAlignmentMask);
+ Bic(length_scratch, length_scratch, kObjectAlignmentMask);
+
+ Bind(&is_data_object);
+ // Value is a data object, and it is white. Mark it black. Since we know
+ // that the object is white we can make it black by flipping one bit.
+ Register mask = shift_scratch;
+ Mov(load_scratch, 1);
+ Lsl(mask, load_scratch, shift_scratch);
+
+ Ldr(load_scratch, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
+ Orr(load_scratch, load_scratch, mask);
+ Str(load_scratch, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
+
+ Bic(bitmap_scratch, bitmap_scratch, Page::kPageAlignmentMask);
+ Ldr(load_scratch, MemOperand(bitmap_scratch, MemoryChunk::kLiveBytesOffset));
+ Add(load_scratch, load_scratch, length_scratch);
+ Str(load_scratch, MemOperand(bitmap_scratch, MemoryChunk::kLiveBytesOffset));
+
+ Bind(&done);
+}
+
+
+void MacroAssembler::Assert(Condition cond, BailoutReason reason) {
+ if (emit_debug_code()) {
+ Check(cond, reason);
+ }
+}
+
+
+
+void MacroAssembler::AssertRegisterIsClear(Register reg, BailoutReason reason) {
+ if (emit_debug_code()) {
+ CheckRegisterIsClear(reg, reason);
+ }
+}
+
+
+void MacroAssembler::AssertRegisterIsRoot(Register reg,
+ Heap::RootListIndex index,
+ BailoutReason reason) {
+ if (emit_debug_code()) {
+ CompareRoot(reg, index);
+ Check(eq, reason);
+ }
+}
+
+
+void MacroAssembler::AssertFastElements(Register elements) {
+ if (emit_debug_code()) {
+ UseScratchRegisterScope temps(this);
+ Register temp = temps.AcquireX();
+ Label ok;
+ Ldr(temp, FieldMemOperand(elements, HeapObject::kMapOffset));
+ JumpIfRoot(temp, Heap::kFixedArrayMapRootIndex, &ok);
+ JumpIfRoot(temp, Heap::kFixedDoubleArrayMapRootIndex, &ok);
+ JumpIfRoot(temp, Heap::kFixedCOWArrayMapRootIndex, &ok);
+ Abort(kJSObjectWithFastElementsMapHasSlowElements);
+ Bind(&ok);
+ }
+}
+
+
+void MacroAssembler::AssertIsString(const Register& object) {
+ if (emit_debug_code()) {
+ UseScratchRegisterScope temps(this);
+ Register temp = temps.AcquireX();
+ STATIC_ASSERT(kSmiTag == 0);
+ Tst(object, kSmiTagMask);
+ Check(ne, kOperandIsNotAString);
+ Ldr(temp, FieldMemOperand(object, HeapObject::kMapOffset));
+ CompareInstanceType(temp, temp, FIRST_NONSTRING_TYPE);
+ Check(lo, kOperandIsNotAString);
+ }
+}
+
+
+void MacroAssembler::Check(Condition cond, BailoutReason reason) {
+ Label ok;
+ B(cond, &ok);
+ Abort(reason);
+ // Will not return here.
+ Bind(&ok);
+}
+
+
+void MacroAssembler::CheckRegisterIsClear(Register reg, BailoutReason reason) {
+ Label ok;
+ Cbz(reg, &ok);
+ Abort(reason);
+ // Will not return here.
+ Bind(&ok);
+}
+
+
+void MacroAssembler::Abort(BailoutReason reason) {
+#ifdef DEBUG
+ RecordComment("Abort message: ");
+ RecordComment(GetBailoutReason(reason));
+
+ if (FLAG_trap_on_abort) {
+ Brk(0);
+ return;
+ }
+#endif
+
+ // Abort is used in some contexts where csp is the stack pointer. In order to
+ // simplify the CallRuntime code, make sure that jssp is the stack pointer.
+ // There is no risk of register corruption here because Abort doesn't return.
+ Register old_stack_pointer = StackPointer();
+ SetStackPointer(jssp);
+ Mov(jssp, old_stack_pointer);
+
+ // We need some scratch registers for the MacroAssembler, so make sure we have
+ // some. This is safe here because Abort never returns.
+ RegList old_tmp_list = TmpList()->list();
+ TmpList()->Combine(MacroAssembler::DefaultTmpList());
+
+ if (use_real_aborts()) {
+ // Avoid infinite recursion; Push contains some assertions that use Abort.
+ NoUseRealAbortsScope no_real_aborts(this);
+
+ Mov(x0, Smi::FromInt(reason));
+ Push(x0);
+
+ if (!has_frame_) {
+ // We don't actually want to generate a pile of code for this, so just
+ // claim there is a stack frame, without generating one.
+ FrameScope scope(this, StackFrame::NONE);
+ CallRuntime(Runtime::kAbort, 1);
+ } else {
+ CallRuntime(Runtime::kAbort, 1);
+ }
+ } else {
+ // Load the string to pass to Printf.
+ Label msg_address;
+ Adr(x0, &msg_address);
+
+ // Call Printf directly to report the error.
+ CallPrintf();
+
+ // We need a way to stop execution on both the simulator and real hardware,
+ // and Unreachable() is the best option.
+ Unreachable();
+
+ // Emit the message string directly in the instruction stream.
+ {
+ BlockPoolsScope scope(this);
+ Bind(&msg_address);
+ EmitStringData(GetBailoutReason(reason));
+ }
+ }
+
+ SetStackPointer(old_stack_pointer);
+ TmpList()->set_list(old_tmp_list);
+}
+
+
+void MacroAssembler::LoadTransitionedArrayMapConditional(
+ ElementsKind expected_kind,
+ ElementsKind transitioned_kind,
+ Register map_in_out,
+ Register scratch1,
+ Register scratch2,
+ Label* no_map_match) {
+ // Load the global or builtins object from the current context.
+ Ldr(scratch1, GlobalObjectMemOperand());
+ Ldr(scratch1, FieldMemOperand(scratch1, GlobalObject::kNativeContextOffset));
+
+ // Check that the function's map is the same as the expected cached map.
+ Ldr(scratch1, ContextMemOperand(scratch1, Context::JS_ARRAY_MAPS_INDEX));
+ size_t offset = (expected_kind * kPointerSize) + FixedArrayBase::kHeaderSize;
+ Ldr(scratch2, FieldMemOperand(scratch1, offset));
+ Cmp(map_in_out, scratch2);
+ B(ne, no_map_match);
+
+ // Use the transitioned cached map.
+ offset = (transitioned_kind * kPointerSize) + FixedArrayBase::kHeaderSize;
+ Ldr(map_in_out, FieldMemOperand(scratch1, offset));
+}
+
+
+void MacroAssembler::LoadGlobalFunction(int index, Register function) {
+ // Load the global or builtins object from the current context.
+ Ldr(function, GlobalObjectMemOperand());
+ // Load the native context from the global or builtins object.
+ Ldr(function, FieldMemOperand(function,
+ GlobalObject::kNativeContextOffset));
+ // Load the function from the native context.
+ Ldr(function, ContextMemOperand(function, index));
+}
+
+
+void MacroAssembler::LoadGlobalFunctionInitialMap(Register function,
+ Register map,
+ Register scratch) {
+ // Load the initial map. The global functions all have initial maps.
+ Ldr(map, FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
+ if (emit_debug_code()) {
+ Label ok, fail;
+ CheckMap(map, scratch, Heap::kMetaMapRootIndex, &fail, DO_SMI_CHECK);
+ B(&ok);
+ Bind(&fail);
+ Abort(kGlobalFunctionsMustHaveInitialMap);
+ Bind(&ok);
+ }
+}
+
+
+// This is the main Printf implementation. All other Printf variants call
+// PrintfNoPreserve after setting up one or more PreserveRegisterScopes.
+void MacroAssembler::PrintfNoPreserve(const char * format,
+ const CPURegister& arg0,
+ const CPURegister& arg1,
+ const CPURegister& arg2,
+ const CPURegister& arg3) {
+ // We cannot handle a caller-saved stack pointer. It doesn't make much sense
+ // in most cases anyway, so this restriction shouldn't be too serious.
+ ASSERT(!kCallerSaved.IncludesAliasOf(__ StackPointer()));
+
+ // The provided arguments, and their proper procedure-call standard registers.
+ CPURegister args[kPrintfMaxArgCount] = {arg0, arg1, arg2, arg3};
+ CPURegister pcs[kPrintfMaxArgCount] = {NoReg, NoReg, NoReg, NoReg};
+
+ int arg_count = kPrintfMaxArgCount;
+
+ // The PCS varargs registers for printf. Note that x0 is used for the printf
+ // format string.
+ static const CPURegList kPCSVarargs =
+ CPURegList(CPURegister::kRegister, kXRegSizeInBits, 1, arg_count);
+ static const CPURegList kPCSVarargsFP =
+ CPURegList(CPURegister::kFPRegister, kDRegSizeInBits, 0, arg_count - 1);
+
+ // We can use caller-saved registers as scratch values, except for the
+ // arguments and the PCS registers where they might need to go.
+ CPURegList tmp_list = kCallerSaved;
+ tmp_list.Remove(x0); // Used to pass the format string.
+ tmp_list.Remove(kPCSVarargs);
+ tmp_list.Remove(arg0, arg1, arg2, arg3);
+
+ CPURegList fp_tmp_list = kCallerSavedFP;
+ fp_tmp_list.Remove(kPCSVarargsFP);
+ fp_tmp_list.Remove(arg0, arg1, arg2, arg3);
+
+ // Override the MacroAssembler's scratch register list. The lists will be
+ // reset automatically at the end of the UseScratchRegisterScope.
+ UseScratchRegisterScope temps(this);
+ TmpList()->set_list(tmp_list.list());
+ FPTmpList()->set_list(fp_tmp_list.list());
+
+ // Copies of the printf vararg registers that we can pop from.
+ CPURegList pcs_varargs = kPCSVarargs;
+ CPURegList pcs_varargs_fp = kPCSVarargsFP;
+
+ // Place the arguments. There are lots of clever tricks and optimizations we
+ // could use here, but Printf is a debug tool so instead we just try to keep
+ // it simple: Move each input that isn't already in the right place to a
+ // scratch register, then move everything back.
+ for (unsigned i = 0; i < kPrintfMaxArgCount; i++) {
+ // Work out the proper PCS register for this argument.
+ if (args[i].IsRegister()) {
+ pcs[i] = pcs_varargs.PopLowestIndex().X();
+ // We might only need a W register here. We need to know the size of the
+ // argument so we can properly encode it for the simulator call.
+ if (args[i].Is32Bits()) pcs[i] = pcs[i].W();
+ } else if (args[i].IsFPRegister()) {
+ // In C, floats are always cast to doubles for varargs calls.
+ pcs[i] = pcs_varargs_fp.PopLowestIndex().D();
+ } else {
+ ASSERT(args[i].IsNone());
+ arg_count = i;
+ break;
+ }
+
+ // If the argument is already in the right place, leave it where it is.
+ if (args[i].Aliases(pcs[i])) continue;
+
+ // Otherwise, if the argument is in a PCS argument register, allocate an
+ // appropriate scratch register and then move it out of the way.
+ if (kPCSVarargs.IncludesAliasOf(args[i]) ||
+ kPCSVarargsFP.IncludesAliasOf(args[i])) {
+ if (args[i].IsRegister()) {
+ Register old_arg = Register(args[i]);
+ Register new_arg = temps.AcquireSameSizeAs(old_arg);
+ Mov(new_arg, old_arg);
+ args[i] = new_arg;
+ } else {
+ FPRegister old_arg = FPRegister(args[i]);
+ FPRegister new_arg = temps.AcquireSameSizeAs(old_arg);
+ Fmov(new_arg, old_arg);
+ args[i] = new_arg;
+ }
+ }
+ }
+
+ // Do a second pass to move values into their final positions and perform any
+ // conversions that may be required.
+ for (int i = 0; i < arg_count; i++) {
+ ASSERT(pcs[i].type() == args[i].type());
+ if (pcs[i].IsRegister()) {
+ Mov(Register(pcs[i]), Register(args[i]), kDiscardForSameWReg);
+ } else {
+ ASSERT(pcs[i].IsFPRegister());
+ if (pcs[i].SizeInBytes() == args[i].SizeInBytes()) {
+ Fmov(FPRegister(pcs[i]), FPRegister(args[i]));
+ } else {
+ Fcvt(FPRegister(pcs[i]), FPRegister(args[i]));
+ }
+ }
+ }
+
+ // Load the format string into x0, as per the procedure-call standard.
+ //
+ // To make the code as portable as possible, the format string is encoded
+ // directly in the instruction stream. It might be cleaner to encode it in a
+ // literal pool, but since Printf is usually used for debugging, it is
+ // beneficial for it to be minimally dependent on other features.
+ Label format_address;
+ Adr(x0, &format_address);
+
+ // Emit the format string directly in the instruction stream.
+ { BlockPoolsScope scope(this);
+ Label after_data;
+ B(&after_data);
+ Bind(&format_address);
+ EmitStringData(format);
+ Unreachable();
+ Bind(&after_data);
+ }
+
+ // We don't pass any arguments on the stack, but we still need to align the C
+ // stack pointer to a 16-byte boundary for PCS compliance.
+ if (!csp.Is(StackPointer())) {
+ Bic(csp, StackPointer(), 0xf);
+ }
+
+ CallPrintf(arg_count, pcs);
+}
+
+
+void MacroAssembler::CallPrintf(int arg_count, const CPURegister * args) {
+ // A call to printf needs special handling for the simulator, since the system
+ // printf function will use a different instruction set and the procedure-call
+ // standard will not be compatible.
+#ifdef USE_SIMULATOR
+ { InstructionAccurateScope scope(this, kPrintfLength / kInstructionSize);
+ hlt(kImmExceptionIsPrintf);
+ dc32(arg_count); // kPrintfArgCountOffset
+
+ // Determine the argument pattern.
+ uint32_t arg_pattern_list = 0;
+ for (int i = 0; i < arg_count; i++) {
+ uint32_t arg_pattern;
+ if (args[i].IsRegister()) {
+ arg_pattern = args[i].Is32Bits() ? kPrintfArgW : kPrintfArgX;
+ } else {
+ ASSERT(args[i].Is64Bits());
+ arg_pattern = kPrintfArgD;
+ }
+ ASSERT(arg_pattern < (1 << kPrintfArgPatternBits));
+ arg_pattern_list |= (arg_pattern << (kPrintfArgPatternBits * i));
+ }
+ dc32(arg_pattern_list); // kPrintfArgPatternListOffset
+ }
+#else
+ Call(FUNCTION_ADDR(printf), RelocInfo::EXTERNAL_REFERENCE);
+#endif
+}
+
+
+void MacroAssembler::Printf(const char * format,
+ CPURegister arg0,
+ CPURegister arg1,
+ CPURegister arg2,
+ CPURegister arg3) {
+ // We can only print sp if it is the current stack pointer.
+ if (!csp.Is(StackPointer())) {
+ ASSERT(!csp.Aliases(arg0));
+ ASSERT(!csp.Aliases(arg1));
+ ASSERT(!csp.Aliases(arg2));
+ ASSERT(!csp.Aliases(arg3));
+ }
+
+ // Printf is expected to preserve all registers, so make sure that none are
+ // available as scratch registers until we've preserved them.
+ RegList old_tmp_list = TmpList()->list();
+ RegList old_fp_tmp_list = FPTmpList()->list();
+ TmpList()->set_list(0);
+ FPTmpList()->set_list(0);
+
+ // Preserve all caller-saved registers as well as NZCV.
+ // If csp is the stack pointer, PushCPURegList asserts that the size of each
+ // list is a multiple of 16 bytes.
+ PushCPURegList(kCallerSaved);
+ PushCPURegList(kCallerSavedFP);
+
+ // We can use caller-saved registers as scratch values (except for argN).
+ CPURegList tmp_list = kCallerSaved;
+ CPURegList fp_tmp_list = kCallerSavedFP;
+ tmp_list.Remove(arg0, arg1, arg2, arg3);
+ fp_tmp_list.Remove(arg0, arg1, arg2, arg3);
+ TmpList()->set_list(tmp_list.list());
+ FPTmpList()->set_list(fp_tmp_list.list());
+
+ { UseScratchRegisterScope temps(this);
+ // If any of the arguments are the current stack pointer, allocate a new
+ // register for them, and adjust the value to compensate for pushing the
+ // caller-saved registers.
+ bool arg0_sp = StackPointer().Aliases(arg0);
+ bool arg1_sp = StackPointer().Aliases(arg1);
+ bool arg2_sp = StackPointer().Aliases(arg2);
+ bool arg3_sp = StackPointer().Aliases(arg3);
+ if (arg0_sp || arg1_sp || arg2_sp || arg3_sp) {
+ // Allocate a register to hold the original stack pointer value, to pass
+ // to PrintfNoPreserve as an argument.
+ Register arg_sp = temps.AcquireX();
+ Add(arg_sp, StackPointer(),
+ kCallerSaved.TotalSizeInBytes() + kCallerSavedFP.TotalSizeInBytes());
+ if (arg0_sp) arg0 = Register::Create(arg_sp.code(), arg0.SizeInBits());
+ if (arg1_sp) arg1 = Register::Create(arg_sp.code(), arg1.SizeInBits());
+ if (arg2_sp) arg2 = Register::Create(arg_sp.code(), arg2.SizeInBits());
+ if (arg3_sp) arg3 = Register::Create(arg_sp.code(), arg3.SizeInBits());
+ }
+
+ // Preserve NZCV.
+ { UseScratchRegisterScope temps(this);
+ Register tmp = temps.AcquireX();
+ Mrs(tmp, NZCV);
+ Push(tmp, xzr);
+ }
+
+ PrintfNoPreserve(format, arg0, arg1, arg2, arg3);
+
+ // Restore NZCV.
+ { UseScratchRegisterScope temps(this);
+ Register tmp = temps.AcquireX();
+ Pop(xzr, tmp);
+ Msr(NZCV, tmp);
+ }
+ }
+
+ PopCPURegList(kCallerSavedFP);
+ PopCPURegList(kCallerSaved);
+
+ TmpList()->set_list(old_tmp_list);
+ FPTmpList()->set_list(old_fp_tmp_list);
+}
+
+
+void MacroAssembler::EmitFrameSetupForCodeAgePatching() {
+ // TODO(jbramley): Other architectures use the internal memcpy to copy the
+ // sequence. If this is a performance bottleneck, we should consider caching
+ // the sequence and copying it in the same way.
+ InstructionAccurateScope scope(this,
+ kNoCodeAgeSequenceLength / kInstructionSize);
+ ASSERT(jssp.Is(StackPointer()));
+ EmitFrameSetupForCodeAgePatching(this);
+}
+
+
+
+void MacroAssembler::EmitCodeAgeSequence(Code* stub) {
+ InstructionAccurateScope scope(this,
+ kNoCodeAgeSequenceLength / kInstructionSize);
+ ASSERT(jssp.Is(StackPointer()));
+ EmitCodeAgeSequence(this, stub);
+}
+
+
+#undef __
+#define __ assm->
+
+
+void MacroAssembler::EmitFrameSetupForCodeAgePatching(Assembler * assm) {
+ Label start;
+ __ bind(&start);
+
+ // We can do this sequence using four instructions, but the code ageing
+ // sequence that patches it needs five, so we use the extra space to try to
+ // simplify some addressing modes and remove some dependencies (compared to
+ // using two stp instructions with write-back).
+ __ sub(jssp, jssp, 4 * kXRegSize);
+ __ sub(csp, csp, 4 * kXRegSize);
+ __ stp(x1, cp, MemOperand(jssp, 0 * kXRegSize));
+ __ stp(fp, lr, MemOperand(jssp, 2 * kXRegSize));
+ __ add(fp, jssp, StandardFrameConstants::kFixedFrameSizeFromFp);
+
+ __ AssertSizeOfCodeGeneratedSince(&start, kNoCodeAgeSequenceLength);
+}
+
+
+void MacroAssembler::EmitCodeAgeSequence(Assembler * assm,
+ Code * stub) {
+ Label start;
+ __ bind(&start);
+ // When the stub is called, the sequence is replaced with the young sequence
+ // (as in EmitFrameSetupForCodeAgePatching). After the code is replaced, the
+ // stub jumps to &start, stored in x0. The young sequence does not call the
+ // stub so there is no infinite loop here.
+ //
+ // A branch (br) is used rather than a call (blr) because this code replaces
+ // the frame setup code that would normally preserve lr.
+ __ ldr_pcrel(ip0, kCodeAgeStubEntryOffset >> kLoadLiteralScaleLog2);
+ __ adr(x0, &start);
+ __ br(ip0);
+ // IsCodeAgeSequence in codegen-arm64.cc assumes that the code generated up
+ // until now (kCodeAgeStubEntryOffset) is the same for all code age sequences.
+ __ AssertSizeOfCodeGeneratedSince(&start, kCodeAgeStubEntryOffset);
+ if (stub) {
+ __ dc64(reinterpret_cast<uint64_t>(stub->instruction_start()));
+ __ AssertSizeOfCodeGeneratedSince(&start, kNoCodeAgeSequenceLength);
+ }
+}
+
+
+bool MacroAssembler::IsYoungSequence(Isolate* isolate, byte* sequence) {
+ bool is_young = isolate->code_aging_helper()->IsYoung(sequence);
+ ASSERT(is_young ||
+ isolate->code_aging_helper()->IsOld(sequence));
+ return is_young;
+}
+
+
+void MacroAssembler::TruncatingDiv(Register result,
+ Register dividend,
+ int32_t divisor) {
+ ASSERT(!AreAliased(result, dividend));
+ ASSERT(result.Is32Bits() && dividend.Is32Bits());
+ MultiplierAndShift ms(divisor);
+ Mov(result, ms.multiplier());
+ Smull(result.X(), dividend, result);
+ Asr(result.X(), result.X(), 32);
+ if (divisor > 0 && ms.multiplier() < 0) Add(result, result, dividend);
+ if (divisor < 0 && ms.multiplier() > 0) Sub(result, result, dividend);
+ if (ms.shift() > 0) Asr(result, result, ms.shift());
+ Add(result, result, Operand(dividend, LSR, 31));
+}
+
+
+#undef __
+
+
+UseScratchRegisterScope::~UseScratchRegisterScope() {
+ available_->set_list(old_available_);
+ availablefp_->set_list(old_availablefp_);
+}
+
+
+Register UseScratchRegisterScope::AcquireSameSizeAs(const Register& reg) {
+ int code = AcquireNextAvailable(available_).code();
+ return Register::Create(code, reg.SizeInBits());
+}
+
+
+FPRegister UseScratchRegisterScope::AcquireSameSizeAs(const FPRegister& reg) {
+ int code = AcquireNextAvailable(availablefp_).code();
+ return FPRegister::Create(code, reg.SizeInBits());
+}
+
+
+CPURegister UseScratchRegisterScope::AcquireNextAvailable(
+ CPURegList* available) {
+ CHECK(!available->IsEmpty());
+ CPURegister result = available->PopLowestIndex();
+ ASSERT(!AreAliased(result, xzr, csp));
+ return result;
+}
+
+
+CPURegister UseScratchRegisterScope::UnsafeAcquire(CPURegList* available,
+ const CPURegister& reg) {
+ ASSERT(available->IncludesAliasOf(reg));
+ available->Remove(reg);
+ return reg;
+}
+
+
+#define __ masm->
+
+
+void InlineSmiCheckInfo::Emit(MacroAssembler* masm, const Register& reg,
+ const Label* smi_check) {
+ Assembler::BlockPoolsScope scope(masm);
+ if (reg.IsValid()) {
+ ASSERT(smi_check->is_bound());
+ ASSERT(reg.Is64Bits());
+
+ // Encode the register (x0-x30) in the lowest 5 bits, then the offset to
+ // 'check' in the other bits. The possible offset is limited in that we
+ // use BitField to pack the data, and the underlying data type is a
+ // uint32_t.
+ uint32_t delta = __ InstructionsGeneratedSince(smi_check);
+ __ InlineData(RegisterBits::encode(reg.code()) | DeltaBits::encode(delta));
+ } else {
+ ASSERT(!smi_check->is_bound());
+
+ // An offset of 0 indicates that there is no patch site.
+ __ InlineData(0);
+ }
+}
+
+
+InlineSmiCheckInfo::InlineSmiCheckInfo(Address info)
+ : reg_(NoReg), smi_check_(NULL) {
+ InstructionSequence* inline_data = InstructionSequence::At(info);
+ ASSERT(inline_data->IsInlineData());
+ if (inline_data->IsInlineData()) {
+ uint64_t payload = inline_data->InlineData();
+ // We use BitField to decode the payload, and BitField can only handle
+ // 32-bit values.
+ ASSERT(is_uint32(payload));
+ if (payload != 0) {
+ int reg_code = RegisterBits::decode(payload);
+ reg_ = Register::XRegFromCode(reg_code);
+ uint64_t smi_check_delta = DeltaBits::decode(payload);
+ ASSERT(smi_check_delta != 0);
+ smi_check_ = inline_data->preceding(smi_check_delta);
+ }
+ }
+}
+
+
+#undef __
+
+
+} } // namespace v8::internal
+
+#endif // V8_TARGET_ARCH_ARM64
diff --git a/chromium/v8/src/arm64/macro-assembler-arm64.h b/chromium/v8/src/arm64/macro-assembler-arm64.h
new file mode 100644
index 00000000000..34182c02707
--- /dev/null
+++ b/chromium/v8/src/arm64/macro-assembler-arm64.h
@@ -0,0 +1,2327 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_ARM64_MACRO_ASSEMBLER_ARM64_H_
+#define V8_ARM64_MACRO_ASSEMBLER_ARM64_H_
+
+#include <vector>
+
+#include "src/globals.h"
+
+#include "src/arm64/assembler-arm64-inl.h"
+
+namespace v8 {
+namespace internal {
+
+#define LS_MACRO_LIST(V) \
+ V(Ldrb, Register&, rt, LDRB_w) \
+ V(Strb, Register&, rt, STRB_w) \
+ V(Ldrsb, Register&, rt, rt.Is64Bits() ? LDRSB_x : LDRSB_w) \
+ V(Ldrh, Register&, rt, LDRH_w) \
+ V(Strh, Register&, rt, STRH_w) \
+ V(Ldrsh, Register&, rt, rt.Is64Bits() ? LDRSH_x : LDRSH_w) \
+ V(Ldr, CPURegister&, rt, LoadOpFor(rt)) \
+ V(Str, CPURegister&, rt, StoreOpFor(rt)) \
+ V(Ldrsw, Register&, rt, LDRSW_x)
+
+
+// ----------------------------------------------------------------------------
+// Static helper functions
+
+// Generate a MemOperand for loading a field from an object.
+inline MemOperand FieldMemOperand(Register object, int offset);
+inline MemOperand UntagSmiFieldMemOperand(Register object, int offset);
+
+// Generate a MemOperand for loading a SMI from memory.
+inline MemOperand UntagSmiMemOperand(Register object, int offset);
+
+
+// ----------------------------------------------------------------------------
+// MacroAssembler
+
+enum BranchType {
+ // Copies of architectural conditions.
+ // The associated conditions can be used in place of those, the code will
+ // take care of reinterpreting them with the correct type.
+ integer_eq = eq,
+ integer_ne = ne,
+ integer_hs = hs,
+ integer_lo = lo,
+ integer_mi = mi,
+ integer_pl = pl,
+ integer_vs = vs,
+ integer_vc = vc,
+ integer_hi = hi,
+ integer_ls = ls,
+ integer_ge = ge,
+ integer_lt = lt,
+ integer_gt = gt,
+ integer_le = le,
+ integer_al = al,
+ integer_nv = nv,
+
+ // These two are *different* from the architectural codes al and nv.
+ // 'always' is used to generate unconditional branches.
+ // 'never' is used to not generate a branch (generally as the inverse
+ // branch type of 'always).
+ always, never,
+ // cbz and cbnz
+ reg_zero, reg_not_zero,
+ // tbz and tbnz
+ reg_bit_clear, reg_bit_set,
+
+ // Aliases.
+ kBranchTypeFirstCondition = eq,
+ kBranchTypeLastCondition = nv,
+ kBranchTypeFirstUsingReg = reg_zero,
+ kBranchTypeFirstUsingBit = reg_bit_clear
+};
+
+inline BranchType InvertBranchType(BranchType type) {
+ if (kBranchTypeFirstCondition <= type && type <= kBranchTypeLastCondition) {
+ return static_cast<BranchType>(
+ NegateCondition(static_cast<Condition>(type)));
+ } else {
+ return static_cast<BranchType>(type ^ 1);
+ }
+}
+
+enum RememberedSetAction { EMIT_REMEMBERED_SET, OMIT_REMEMBERED_SET };
+enum SmiCheck { INLINE_SMI_CHECK, OMIT_SMI_CHECK };
+enum PointersToHereCheck {
+ kPointersToHereMaybeInteresting,
+ kPointersToHereAreAlwaysInteresting
+};
+enum LinkRegisterStatus { kLRHasNotBeenSaved, kLRHasBeenSaved };
+enum TargetAddressStorageMode {
+ CAN_INLINE_TARGET_ADDRESS,
+ NEVER_INLINE_TARGET_ADDRESS
+};
+enum UntagMode { kNotSpeculativeUntag, kSpeculativeUntag };
+enum ArrayHasHoles { kArrayCantHaveHoles, kArrayCanHaveHoles };
+enum CopyHint { kCopyUnknown, kCopyShort, kCopyLong };
+enum DiscardMoveMode { kDontDiscardForSameWReg, kDiscardForSameWReg };
+enum SeqStringSetCharCheckIndexType { kIndexIsSmi, kIndexIsInteger32 };
+
+class MacroAssembler : public Assembler {
+ public:
+ MacroAssembler(Isolate* isolate, byte * buffer, unsigned buffer_size);
+
+ inline Handle<Object> CodeObject();
+
+ // Instruction set functions ------------------------------------------------
+ // Logical macros.
+ inline void And(const Register& rd,
+ const Register& rn,
+ const Operand& operand);
+ inline void Ands(const Register& rd,
+ const Register& rn,
+ const Operand& operand);
+ inline void Bic(const Register& rd,
+ const Register& rn,
+ const Operand& operand);
+ inline void Bics(const Register& rd,
+ const Register& rn,
+ const Operand& operand);
+ inline void Orr(const Register& rd,
+ const Register& rn,
+ const Operand& operand);
+ inline void Orn(const Register& rd,
+ const Register& rn,
+ const Operand& operand);
+ inline void Eor(const Register& rd,
+ const Register& rn,
+ const Operand& operand);
+ inline void Eon(const Register& rd,
+ const Register& rn,
+ const Operand& operand);
+ inline void Tst(const Register& rn, const Operand& operand);
+ void LogicalMacro(const Register& rd,
+ const Register& rn,
+ const Operand& operand,
+ LogicalOp op);
+
+ // Add and sub macros.
+ inline void Add(const Register& rd,
+ const Register& rn,
+ const Operand& operand);
+ inline void Adds(const Register& rd,
+ const Register& rn,
+ const Operand& operand);
+ inline void Sub(const Register& rd,
+ const Register& rn,
+ const Operand& operand);
+ inline void Subs(const Register& rd,
+ const Register& rn,
+ const Operand& operand);
+ inline void Cmn(const Register& rn, const Operand& operand);
+ inline void Cmp(const Register& rn, const Operand& operand);
+ inline void Neg(const Register& rd,
+ const Operand& operand);
+ inline void Negs(const Register& rd,
+ const Operand& operand);
+
+ void AddSubMacro(const Register& rd,
+ const Register& rn,
+ const Operand& operand,
+ FlagsUpdate S,
+ AddSubOp op);
+
+ // Add/sub with carry macros.
+ inline void Adc(const Register& rd,
+ const Register& rn,
+ const Operand& operand);
+ inline void Adcs(const Register& rd,
+ const Register& rn,
+ const Operand& operand);
+ inline void Sbc(const Register& rd,
+ const Register& rn,
+ const Operand& operand);
+ inline void Sbcs(const Register& rd,
+ const Register& rn,
+ const Operand& operand);
+ inline void Ngc(const Register& rd,
+ const Operand& operand);
+ inline void Ngcs(const Register& rd,
+ const Operand& operand);
+ void AddSubWithCarryMacro(const Register& rd,
+ const Register& rn,
+ const Operand& operand,
+ FlagsUpdate S,
+ AddSubWithCarryOp op);
+
+ // Move macros.
+ void Mov(const Register& rd,
+ const Operand& operand,
+ DiscardMoveMode discard_mode = kDontDiscardForSameWReg);
+ void Mov(const Register& rd, uint64_t imm);
+ inline void Mvn(const Register& rd, uint64_t imm);
+ void Mvn(const Register& rd, const Operand& operand);
+ static bool IsImmMovn(uint64_t imm, unsigned reg_size);
+ static bool IsImmMovz(uint64_t imm, unsigned reg_size);
+ static unsigned CountClearHalfWords(uint64_t imm, unsigned reg_size);
+
+ // Conditional macros.
+ inline void Ccmp(const Register& rn,
+ const Operand& operand,
+ StatusFlags nzcv,
+ Condition cond);
+ inline void Ccmn(const Register& rn,
+ const Operand& operand,
+ StatusFlags nzcv,
+ Condition cond);
+ void ConditionalCompareMacro(const Register& rn,
+ const Operand& operand,
+ StatusFlags nzcv,
+ Condition cond,
+ ConditionalCompareOp op);
+ void Csel(const Register& rd,
+ const Register& rn,
+ const Operand& operand,
+ Condition cond);
+
+ // Load/store macros.
+#define DECLARE_FUNCTION(FN, REGTYPE, REG, OP) \
+ inline void FN(const REGTYPE REG, const MemOperand& addr);
+ LS_MACRO_LIST(DECLARE_FUNCTION)
+#undef DECLARE_FUNCTION
+
+ void LoadStoreMacro(const CPURegister& rt,
+ const MemOperand& addr,
+ LoadStoreOp op);
+
+ // V8-specific load/store helpers.
+ void Load(const Register& rt, const MemOperand& addr, Representation r);
+ void Store(const Register& rt, const MemOperand& addr, Representation r);
+
+ enum AdrHint {
+ // The target must be within the immediate range of adr.
+ kAdrNear,
+ // The target may be outside of the immediate range of adr. Additional
+ // instructions may be emitted.
+ kAdrFar
+ };
+ void Adr(const Register& rd, Label* label, AdrHint = kAdrNear);
+
+ // Remaining instructions are simple pass-through calls to the assembler.
+ inline void Asr(const Register& rd, const Register& rn, unsigned shift);
+ inline void Asr(const Register& rd, const Register& rn, const Register& rm);
+
+ // Branch type inversion relies on these relations.
+ STATIC_ASSERT((reg_zero == (reg_not_zero ^ 1)) &&
+ (reg_bit_clear == (reg_bit_set ^ 1)) &&
+ (always == (never ^ 1)));
+
+ void B(Label* label, BranchType type, Register reg = NoReg, int bit = -1);
+
+ inline void B(Label* label);
+ inline void B(Condition cond, Label* label);
+ void B(Label* label, Condition cond);
+ inline void Bfi(const Register& rd,
+ const Register& rn,
+ unsigned lsb,
+ unsigned width);
+ inline void Bfxil(const Register& rd,
+ const Register& rn,
+ unsigned lsb,
+ unsigned width);
+ inline void Bind(Label* label);
+ inline void Bl(Label* label);
+ inline void Blr(const Register& xn);
+ inline void Br(const Register& xn);
+ inline void Brk(int code);
+ void Cbnz(const Register& rt, Label* label);
+ void Cbz(const Register& rt, Label* label);
+ inline void Cinc(const Register& rd, const Register& rn, Condition cond);
+ inline void Cinv(const Register& rd, const Register& rn, Condition cond);
+ inline void Cls(const Register& rd, const Register& rn);
+ inline void Clz(const Register& rd, const Register& rn);
+ inline void Cneg(const Register& rd, const Register& rn, Condition cond);
+ inline void CzeroX(const Register& rd, Condition cond);
+ inline void CmovX(const Register& rd, const Register& rn, Condition cond);
+ inline void Cset(const Register& rd, Condition cond);
+ inline void Csetm(const Register& rd, Condition cond);
+ inline void Csinc(const Register& rd,
+ const Register& rn,
+ const Register& rm,
+ Condition cond);
+ inline void Csinv(const Register& rd,
+ const Register& rn,
+ const Register& rm,
+ Condition cond);
+ inline void Csneg(const Register& rd,
+ const Register& rn,
+ const Register& rm,
+ Condition cond);
+ inline void Dmb(BarrierDomain domain, BarrierType type);
+ inline void Dsb(BarrierDomain domain, BarrierType type);
+ inline void Debug(const char* message, uint32_t code, Instr params = BREAK);
+ inline void Extr(const Register& rd,
+ const Register& rn,
+ const Register& rm,
+ unsigned lsb);
+ inline void Fabs(const FPRegister& fd, const FPRegister& fn);
+ inline void Fadd(const FPRegister& fd,
+ const FPRegister& fn,
+ const FPRegister& fm);
+ inline void Fccmp(const FPRegister& fn,
+ const FPRegister& fm,
+ StatusFlags nzcv,
+ Condition cond);
+ inline void Fcmp(const FPRegister& fn, const FPRegister& fm);
+ inline void Fcmp(const FPRegister& fn, double value);
+ inline void Fcsel(const FPRegister& fd,
+ const FPRegister& fn,
+ const FPRegister& fm,
+ Condition cond);
+ inline void Fcvt(const FPRegister& fd, const FPRegister& fn);
+ inline void Fcvtas(const Register& rd, const FPRegister& fn);
+ inline void Fcvtau(const Register& rd, const FPRegister& fn);
+ inline void Fcvtms(const Register& rd, const FPRegister& fn);
+ inline void Fcvtmu(const Register& rd, const FPRegister& fn);
+ inline void Fcvtns(const Register& rd, const FPRegister& fn);
+ inline void Fcvtnu(const Register& rd, const FPRegister& fn);
+ inline void Fcvtzs(const Register& rd, const FPRegister& fn);
+ inline void Fcvtzu(const Register& rd, const FPRegister& fn);
+ inline void Fdiv(const FPRegister& fd,
+ const FPRegister& fn,
+ const FPRegister& fm);
+ inline void Fmadd(const FPRegister& fd,
+ const FPRegister& fn,
+ const FPRegister& fm,
+ const FPRegister& fa);
+ inline void Fmax(const FPRegister& fd,
+ const FPRegister& fn,
+ const FPRegister& fm);
+ inline void Fmaxnm(const FPRegister& fd,
+ const FPRegister& fn,
+ const FPRegister& fm);
+ inline void Fmin(const FPRegister& fd,
+ const FPRegister& fn,
+ const FPRegister& fm);
+ inline void Fminnm(const FPRegister& fd,
+ const FPRegister& fn,
+ const FPRegister& fm);
+ inline void Fmov(FPRegister fd, FPRegister fn);
+ inline void Fmov(FPRegister fd, Register rn);
+ // Provide explicit double and float interfaces for FP immediate moves, rather
+ // than relying on implicit C++ casts. This allows signalling NaNs to be
+ // preserved when the immediate matches the format of fd. Most systems convert
+ // signalling NaNs to quiet NaNs when converting between float and double.
+ inline void Fmov(FPRegister fd, double imm);
+ inline void Fmov(FPRegister fd, float imm);
+ // Provide a template to allow other types to be converted automatically.
+ template<typename T>
+ void Fmov(FPRegister fd, T imm) {
+ ASSERT(allow_macro_instructions_);
+ Fmov(fd, static_cast<double>(imm));
+ }
+ inline void Fmov(Register rd, FPRegister fn);
+ inline void Fmsub(const FPRegister& fd,
+ const FPRegister& fn,
+ const FPRegister& fm,
+ const FPRegister& fa);
+ inline void Fmul(const FPRegister& fd,
+ const FPRegister& fn,
+ const FPRegister& fm);
+ inline void Fneg(const FPRegister& fd, const FPRegister& fn);
+ inline void Fnmadd(const FPRegister& fd,
+ const FPRegister& fn,
+ const FPRegister& fm,
+ const FPRegister& fa);
+ inline void Fnmsub(const FPRegister& fd,
+ const FPRegister& fn,
+ const FPRegister& fm,
+ const FPRegister& fa);
+ inline void Frinta(const FPRegister& fd, const FPRegister& fn);
+ inline void Frintm(const FPRegister& fd, const FPRegister& fn);
+ inline void Frintn(const FPRegister& fd, const FPRegister& fn);
+ inline void Frintz(const FPRegister& fd, const FPRegister& fn);
+ inline void Fsqrt(const FPRegister& fd, const FPRegister& fn);
+ inline void Fsub(const FPRegister& fd,
+ const FPRegister& fn,
+ const FPRegister& fm);
+ inline void Hint(SystemHint code);
+ inline void Hlt(int code);
+ inline void Isb();
+ inline void Ldnp(const CPURegister& rt,
+ const CPURegister& rt2,
+ const MemOperand& src);
+ inline void Ldp(const CPURegister& rt,
+ const CPURegister& rt2,
+ const MemOperand& src);
+ inline void Ldpsw(const Register& rt,
+ const Register& rt2,
+ const MemOperand& src);
+ // Load a literal from the inline constant pool.
+ inline void Ldr(const CPURegister& rt, const Immediate& imm);
+ // Helper function for double immediate.
+ inline void Ldr(const CPURegister& rt, double imm);
+ inline void Lsl(const Register& rd, const Register& rn, unsigned shift);
+ inline void Lsl(const Register& rd, const Register& rn, const Register& rm);
+ inline void Lsr(const Register& rd, const Register& rn, unsigned shift);
+ inline void Lsr(const Register& rd, const Register& rn, const Register& rm);
+ inline void Madd(const Register& rd,
+ const Register& rn,
+ const Register& rm,
+ const Register& ra);
+ inline void Mneg(const Register& rd, const Register& rn, const Register& rm);
+ inline void Mov(const Register& rd, const Register& rm);
+ inline void Movk(const Register& rd, uint64_t imm, int shift = -1);
+ inline void Mrs(const Register& rt, SystemRegister sysreg);
+ inline void Msr(SystemRegister sysreg, const Register& rt);
+ inline void Msub(const Register& rd,
+ const Register& rn,
+ const Register& rm,
+ const Register& ra);
+ inline void Mul(const Register& rd, const Register& rn, const Register& rm);
+ inline void Nop() { nop(); }
+ inline void Rbit(const Register& rd, const Register& rn);
+ inline void Ret(const Register& xn = lr);
+ inline void Rev(const Register& rd, const Register& rn);
+ inline void Rev16(const Register& rd, const Register& rn);
+ inline void Rev32(const Register& rd, const Register& rn);
+ inline void Ror(const Register& rd, const Register& rs, unsigned shift);
+ inline void Ror(const Register& rd, const Register& rn, const Register& rm);
+ inline void Sbfiz(const Register& rd,
+ const Register& rn,
+ unsigned lsb,
+ unsigned width);
+ inline void Sbfx(const Register& rd,
+ const Register& rn,
+ unsigned lsb,
+ unsigned width);
+ inline void Scvtf(const FPRegister& fd,
+ const Register& rn,
+ unsigned fbits = 0);
+ inline void Sdiv(const Register& rd, const Register& rn, const Register& rm);
+ inline void Smaddl(const Register& rd,
+ const Register& rn,
+ const Register& rm,
+ const Register& ra);
+ inline void Smsubl(const Register& rd,
+ const Register& rn,
+ const Register& rm,
+ const Register& ra);
+ inline void Smull(const Register& rd,
+ const Register& rn,
+ const Register& rm);
+ inline void Smulh(const Register& rd,
+ const Register& rn,
+ const Register& rm);
+ inline void Stnp(const CPURegister& rt,
+ const CPURegister& rt2,
+ const MemOperand& dst);
+ inline void Stp(const CPURegister& rt,
+ const CPURegister& rt2,
+ const MemOperand& dst);
+ inline void Sxtb(const Register& rd, const Register& rn);
+ inline void Sxth(const Register& rd, const Register& rn);
+ inline void Sxtw(const Register& rd, const Register& rn);
+ void Tbnz(const Register& rt, unsigned bit_pos, Label* label);
+ void Tbz(const Register& rt, unsigned bit_pos, Label* label);
+ inline void Ubfiz(const Register& rd,
+ const Register& rn,
+ unsigned lsb,
+ unsigned width);
+ inline void Ubfx(const Register& rd,
+ const Register& rn,
+ unsigned lsb,
+ unsigned width);
+ inline void Ucvtf(const FPRegister& fd,
+ const Register& rn,
+ unsigned fbits = 0);
+ inline void Udiv(const Register& rd, const Register& rn, const Register& rm);
+ inline void Umaddl(const Register& rd,
+ const Register& rn,
+ const Register& rm,
+ const Register& ra);
+ inline void Umsubl(const Register& rd,
+ const Register& rn,
+ const Register& rm,
+ const Register& ra);
+ inline void Uxtb(const Register& rd, const Register& rn);
+ inline void Uxth(const Register& rd, const Register& rn);
+ inline void Uxtw(const Register& rd, const Register& rn);
+
+ // Pseudo-instructions ------------------------------------------------------
+
+ // Compute rd = abs(rm).
+ // This function clobbers the condition flags. On output the overflow flag is
+ // set iff the negation overflowed.
+ //
+ // If rm is the minimum representable value, the result is not representable.
+ // Handlers for each case can be specified using the relevant labels.
+ void Abs(const Register& rd, const Register& rm,
+ Label * is_not_representable = NULL,
+ Label * is_representable = NULL);
+
+ // Push or pop up to 4 registers of the same width to or from the stack,
+ // using the current stack pointer as set by SetStackPointer.
+ //
+ // If an argument register is 'NoReg', all further arguments are also assumed
+ // to be 'NoReg', and are thus not pushed or popped.
+ //
+ // Arguments are ordered such that "Push(a, b);" is functionally equivalent
+ // to "Push(a); Push(b);".
+ //
+ // It is valid to push the same register more than once, and there is no
+ // restriction on the order in which registers are specified.
+ //
+ // It is not valid to pop into the same register more than once in one
+ // operation, not even into the zero register.
+ //
+ // If the current stack pointer (as set by SetStackPointer) is csp, then it
+ // must be aligned to 16 bytes on entry and the total size of the specified
+ // registers must also be a multiple of 16 bytes.
+ //
+ // Even if the current stack pointer is not the system stack pointer (csp),
+ // Push (and derived methods) will still modify the system stack pointer in
+ // order to comply with ABI rules about accessing memory below the system
+ // stack pointer.
+ //
+ // Other than the registers passed into Pop, the stack pointer and (possibly)
+ // the system stack pointer, these methods do not modify any other registers.
+ void Push(const CPURegister& src0, const CPURegister& src1 = NoReg,
+ const CPURegister& src2 = NoReg, const CPURegister& src3 = NoReg);
+ void Push(const CPURegister& src0, const CPURegister& src1,
+ const CPURegister& src2, const CPURegister& src3,
+ const CPURegister& src4, const CPURegister& src5 = NoReg,
+ const CPURegister& src6 = NoReg, const CPURegister& src7 = NoReg);
+ void Pop(const CPURegister& dst0, const CPURegister& dst1 = NoReg,
+ const CPURegister& dst2 = NoReg, const CPURegister& dst3 = NoReg);
+
+ // Alternative forms of Push and Pop, taking a RegList or CPURegList that
+ // specifies the registers that are to be pushed or popped. Higher-numbered
+ // registers are associated with higher memory addresses (as in the A32 push
+ // and pop instructions).
+ //
+ // (Push|Pop)SizeRegList allow you to specify the register size as a
+ // parameter. Only kXRegSizeInBits, kWRegSizeInBits, kDRegSizeInBits and
+ // kSRegSizeInBits are supported.
+ //
+ // Otherwise, (Push|Pop)(CPU|X|W|D|S)RegList is preferred.
+ void PushCPURegList(CPURegList registers);
+ void PopCPURegList(CPURegList registers);
+
+ inline void PushSizeRegList(RegList registers, unsigned reg_size,
+ CPURegister::RegisterType type = CPURegister::kRegister) {
+ PushCPURegList(CPURegList(type, reg_size, registers));
+ }
+ inline void PopSizeRegList(RegList registers, unsigned reg_size,
+ CPURegister::RegisterType type = CPURegister::kRegister) {
+ PopCPURegList(CPURegList(type, reg_size, registers));
+ }
+ inline void PushXRegList(RegList regs) {
+ PushSizeRegList(regs, kXRegSizeInBits);
+ }
+ inline void PopXRegList(RegList regs) {
+ PopSizeRegList(regs, kXRegSizeInBits);
+ }
+ inline void PushWRegList(RegList regs) {
+ PushSizeRegList(regs, kWRegSizeInBits);
+ }
+ inline void PopWRegList(RegList regs) {
+ PopSizeRegList(regs, kWRegSizeInBits);
+ }
+ inline void PushDRegList(RegList regs) {
+ PushSizeRegList(regs, kDRegSizeInBits, CPURegister::kFPRegister);
+ }
+ inline void PopDRegList(RegList regs) {
+ PopSizeRegList(regs, kDRegSizeInBits, CPURegister::kFPRegister);
+ }
+ inline void PushSRegList(RegList regs) {
+ PushSizeRegList(regs, kSRegSizeInBits, CPURegister::kFPRegister);
+ }
+ inline void PopSRegList(RegList regs) {
+ PopSizeRegList(regs, kSRegSizeInBits, CPURegister::kFPRegister);
+ }
+
+ // Push the specified register 'count' times.
+ void PushMultipleTimes(CPURegister src, Register count);
+ void PushMultipleTimes(CPURegister src, int count);
+
+ // This is a convenience method for pushing a single Handle<Object>.
+ inline void Push(Handle<Object> handle);
+ void Push(Smi* smi) { Push(Handle<Smi>(smi, isolate())); }
+
+ // Aliases of Push and Pop, required for V8 compatibility.
+ inline void push(Register src) {
+ Push(src);
+ }
+ inline void pop(Register dst) {
+ Pop(dst);
+ }
+
+ // Sometimes callers need to push or pop multiple registers in a way that is
+ // difficult to structure efficiently for fixed Push or Pop calls. This scope
+ // allows push requests to be queued up, then flushed at once. The
+ // MacroAssembler will try to generate the most efficient sequence required.
+ //
+ // Unlike the other Push and Pop macros, PushPopQueue can handle mixed sets of
+ // register sizes and types.
+ class PushPopQueue {
+ public:
+ explicit PushPopQueue(MacroAssembler* masm) : masm_(masm), size_(0) { }
+
+ ~PushPopQueue() {
+ ASSERT(queued_.empty());
+ }
+
+ void Queue(const CPURegister& rt) {
+ size_ += rt.SizeInBytes();
+ queued_.push_back(rt);
+ }
+
+ enum PreambleDirective {
+ WITH_PREAMBLE,
+ SKIP_PREAMBLE
+ };
+ void PushQueued(PreambleDirective preamble_directive = WITH_PREAMBLE);
+ void PopQueued();
+
+ private:
+ MacroAssembler* masm_;
+ int size_;
+ std::vector<CPURegister> queued_;
+ };
+
+ // Poke 'src' onto the stack. The offset is in bytes.
+ //
+ // If the current stack pointer (according to StackPointer()) is csp, then
+ // csp must be aligned to 16 bytes.
+ void Poke(const CPURegister& src, const Operand& offset);
+
+ // Peek at a value on the stack, and put it in 'dst'. The offset is in bytes.
+ //
+ // If the current stack pointer (according to StackPointer()) is csp, then
+ // csp must be aligned to 16 bytes.
+ void Peek(const CPURegister& dst, const Operand& offset);
+
+ // Poke 'src1' and 'src2' onto the stack. The values written will be adjacent
+ // with 'src2' at a higher address than 'src1'. The offset is in bytes.
+ //
+ // If the current stack pointer (according to StackPointer()) is csp, then
+ // csp must be aligned to 16 bytes.
+ void PokePair(const CPURegister& src1, const CPURegister& src2, int offset);
+
+ // Peek at two values on the stack, and put them in 'dst1' and 'dst2'. The
+ // values peeked will be adjacent, with the value in 'dst2' being from a
+ // higher address than 'dst1'. The offset is in bytes.
+ //
+ // If the current stack pointer (according to StackPointer()) is csp, then
+ // csp must be aligned to 16 bytes.
+ void PeekPair(const CPURegister& dst1, const CPURegister& dst2, int offset);
+
+ // Claim or drop stack space without actually accessing memory.
+ //
+ // In debug mode, both of these will write invalid data into the claimed or
+ // dropped space.
+ //
+ // If the current stack pointer (according to StackPointer()) is csp, then it
+ // must be aligned to 16 bytes and the size claimed or dropped must be a
+ // multiple of 16 bytes.
+ //
+ // Note that unit_size must be specified in bytes. For variants which take a
+ // Register count, the unit size must be a power of two.
+ inline void Claim(uint64_t count, uint64_t unit_size = kXRegSize);
+ inline void Claim(const Register& count,
+ uint64_t unit_size = kXRegSize);
+ inline void Drop(uint64_t count, uint64_t unit_size = kXRegSize);
+ inline void Drop(const Register& count,
+ uint64_t unit_size = kXRegSize);
+
+ // Variants of Claim and Drop, where the 'count' parameter is a SMI held in a
+ // register.
+ inline void ClaimBySMI(const Register& count_smi,
+ uint64_t unit_size = kXRegSize);
+ inline void DropBySMI(const Register& count_smi,
+ uint64_t unit_size = kXRegSize);
+
+ // Compare a register with an operand, and branch to label depending on the
+ // condition. May corrupt the status flags.
+ inline void CompareAndBranch(const Register& lhs,
+ const Operand& rhs,
+ Condition cond,
+ Label* label);
+
+ // Test the bits of register defined by bit_pattern, and branch if ANY of
+ // those bits are set. May corrupt the status flags.
+ inline void TestAndBranchIfAnySet(const Register& reg,
+ const uint64_t bit_pattern,
+ Label* label);
+
+ // Test the bits of register defined by bit_pattern, and branch if ALL of
+ // those bits are clear (ie. not set.) May corrupt the status flags.
+ inline void TestAndBranchIfAllClear(const Register& reg,
+ const uint64_t bit_pattern,
+ Label* label);
+
+ // Insert one or more instructions into the instruction stream that encode
+ // some caller-defined data. The instructions used will be executable with no
+ // side effects.
+ inline void InlineData(uint64_t data);
+
+ // Insert an instrumentation enable marker into the instruction stream.
+ inline void EnableInstrumentation();
+
+ // Insert an instrumentation disable marker into the instruction stream.
+ inline void DisableInstrumentation();
+
+ // Insert an instrumentation event marker into the instruction stream. These
+ // will be picked up by the instrumentation system to annotate an instruction
+ // profile. The argument marker_name must be a printable two character string;
+ // it will be encoded in the event marker.
+ inline void AnnotateInstrumentation(const char* marker_name);
+
+ // If emit_debug_code() is true, emit a run-time check to ensure that
+ // StackPointer() does not point below the system stack pointer.
+ //
+ // Whilst it is architecturally legal for StackPointer() to point below csp,
+ // it can be evidence of a potential bug because the ABI forbids accesses
+ // below csp.
+ //
+ // If StackPointer() is the system stack pointer (csp) or ALWAYS_ALIGN_CSP is
+ // enabled, then csp will be dereferenced to cause the processor
+ // (or simulator) to abort if it is not properly aligned.
+ //
+ // If emit_debug_code() is false, this emits no code.
+ void AssertStackConsistency();
+
+ // Preserve the callee-saved registers (as defined by AAPCS64).
+ //
+ // Higher-numbered registers are pushed before lower-numbered registers, and
+ // thus get higher addresses.
+ // Floating-point registers are pushed before general-purpose registers, and
+ // thus get higher addresses.
+ //
+ // Note that registers are not checked for invalid values. Use this method
+ // only if you know that the GC won't try to examine the values on the stack.
+ //
+ // This method must not be called unless the current stack pointer (as set by
+ // SetStackPointer) is the system stack pointer (csp), and is aligned to
+ // ActivationFrameAlignment().
+ void PushCalleeSavedRegisters();
+
+ // Restore the callee-saved registers (as defined by AAPCS64).
+ //
+ // Higher-numbered registers are popped after lower-numbered registers, and
+ // thus come from higher addresses.
+ // Floating-point registers are popped after general-purpose registers, and
+ // thus come from higher addresses.
+ //
+ // This method must not be called unless the current stack pointer (as set by
+ // SetStackPointer) is the system stack pointer (csp), and is aligned to
+ // ActivationFrameAlignment().
+ void PopCalleeSavedRegisters();
+
+ // Set the current stack pointer, but don't generate any code.
+ inline void SetStackPointer(const Register& stack_pointer) {
+ ASSERT(!TmpList()->IncludesAliasOf(stack_pointer));
+ sp_ = stack_pointer;
+ }
+
+ // Return the current stack pointer, as set by SetStackPointer.
+ inline const Register& StackPointer() const {
+ return sp_;
+ }
+
+ // Align csp for a frame, as per ActivationFrameAlignment, and make it the
+ // current stack pointer.
+ inline void AlignAndSetCSPForFrame() {
+ int sp_alignment = ActivationFrameAlignment();
+ // AAPCS64 mandates at least 16-byte alignment.
+ ASSERT(sp_alignment >= 16);
+ ASSERT(IsPowerOf2(sp_alignment));
+ Bic(csp, StackPointer(), sp_alignment - 1);
+ SetStackPointer(csp);
+ }
+
+ // Push the system stack pointer (csp) down to allow the same to be done to
+ // the current stack pointer (according to StackPointer()). This must be
+ // called _before_ accessing the memory.
+ //
+ // This is necessary when pushing or otherwise adding things to the stack, to
+ // satisfy the AAPCS64 constraint that the memory below the system stack
+ // pointer is not accessed. The amount pushed will be increased as necessary
+ // to ensure csp remains aligned to 16 bytes.
+ //
+ // This method asserts that StackPointer() is not csp, since the call does
+ // not make sense in that context.
+ inline void BumpSystemStackPointer(const Operand& space);
+
+ // Re-synchronizes the system stack pointer (csp) with the current stack
+ // pointer (according to StackPointer()). This function will ensure the
+ // new value of the system stack pointer is remains aligned to 16 bytes, and
+ // is lower than or equal to the value of the current stack pointer.
+ //
+ // This method asserts that StackPointer() is not csp, since the call does
+ // not make sense in that context.
+ inline void SyncSystemStackPointer();
+
+ // Helpers ------------------------------------------------------------------
+ // Root register.
+ inline void InitializeRootRegister();
+
+ void AssertFPCRState(Register fpcr = NoReg);
+ void ConfigureFPCR();
+ void CanonicalizeNaN(const FPRegister& dst, const FPRegister& src);
+ void CanonicalizeNaN(const FPRegister& reg) {
+ CanonicalizeNaN(reg, reg);
+ }
+
+ // Load an object from the root table.
+ void LoadRoot(CPURegister destination,
+ Heap::RootListIndex index);
+ // Store an object to the root table.
+ void StoreRoot(Register source,
+ Heap::RootListIndex index);
+
+ // Load both TrueValue and FalseValue roots.
+ void LoadTrueFalseRoots(Register true_root, Register false_root);
+
+ void LoadHeapObject(Register dst, Handle<HeapObject> object);
+
+ void LoadObject(Register result, Handle<Object> object) {
+ AllowDeferredHandleDereference heap_object_check;
+ if (object->IsHeapObject()) {
+ LoadHeapObject(result, Handle<HeapObject>::cast(object));
+ } else {
+ ASSERT(object->IsSmi());
+ Mov(result, Operand(object));
+ }
+ }
+
+ static int SafepointRegisterStackIndex(int reg_code);
+
+ // This is required for compatibility with architecture independant code.
+ // Remove if not needed.
+ inline void Move(Register dst, Register src) { Mov(dst, src); }
+
+ void LoadInstanceDescriptors(Register map,
+ Register descriptors);
+ void EnumLengthUntagged(Register dst, Register map);
+ void EnumLengthSmi(Register dst, Register map);
+ void NumberOfOwnDescriptors(Register dst, Register map);
+
+ template<typename Field>
+ void DecodeField(Register dst, Register src) {
+ static const uint64_t shift = Field::kShift;
+ static const uint64_t setbits = CountSetBits(Field::kMask, 32);
+ Ubfx(dst, src, shift, setbits);
+ }
+
+ template<typename Field>
+ void DecodeField(Register reg) {
+ DecodeField<Field>(reg, reg);
+ }
+
+ // ---- SMI and Number Utilities ----
+
+ inline void SmiTag(Register dst, Register src);
+ inline void SmiTag(Register smi);
+ inline void SmiUntag(Register dst, Register src);
+ inline void SmiUntag(Register smi);
+ inline void SmiUntagToDouble(FPRegister dst,
+ Register src,
+ UntagMode mode = kNotSpeculativeUntag);
+ inline void SmiUntagToFloat(FPRegister dst,
+ Register src,
+ UntagMode mode = kNotSpeculativeUntag);
+
+ // Tag and push in one step.
+ inline void SmiTagAndPush(Register src);
+ inline void SmiTagAndPush(Register src1, Register src2);
+
+ // Compute the absolute value of 'smi' and leave the result in 'smi'
+ // register. If 'smi' is the most negative SMI, the absolute value cannot
+ // be represented as a SMI and a jump to 'slow' is done.
+ void SmiAbs(const Register& smi, Label* slow);
+
+ inline void JumpIfSmi(Register value,
+ Label* smi_label,
+ Label* not_smi_label = NULL);
+ inline void JumpIfNotSmi(Register value, Label* not_smi_label);
+ inline void JumpIfBothSmi(Register value1,
+ Register value2,
+ Label* both_smi_label,
+ Label* not_smi_label = NULL);
+ inline void JumpIfEitherSmi(Register value1,
+ Register value2,
+ Label* either_smi_label,
+ Label* not_smi_label = NULL);
+ inline void JumpIfEitherNotSmi(Register value1,
+ Register value2,
+ Label* not_smi_label);
+ inline void JumpIfBothNotSmi(Register value1,
+ Register value2,
+ Label* not_smi_label);
+
+ // Abort execution if argument is a smi, enabled via --debug-code.
+ void AssertNotSmi(Register object, BailoutReason reason = kOperandIsASmi);
+ void AssertSmi(Register object, BailoutReason reason = kOperandIsNotASmi);
+
+ inline void ObjectTag(Register tagged_obj, Register obj);
+ inline void ObjectUntag(Register untagged_obj, Register obj);
+
+ // Abort execution if argument is not a name, enabled via --debug-code.
+ void AssertName(Register object);
+
+ // Abort execution if argument is not undefined or an AllocationSite, enabled
+ // via --debug-code.
+ void AssertUndefinedOrAllocationSite(Register object, Register scratch);
+
+ // Abort execution if argument is not a string, enabled via --debug-code.
+ void AssertString(Register object);
+
+ void JumpForHeapNumber(Register object,
+ Register heap_number_map,
+ Label* on_heap_number,
+ Label* on_not_heap_number = NULL);
+ void JumpIfHeapNumber(Register object,
+ Label* on_heap_number,
+ Register heap_number_map = NoReg);
+ void JumpIfNotHeapNumber(Register object,
+ Label* on_not_heap_number,
+ Register heap_number_map = NoReg);
+
+ // Sets the vs flag if the input is -0.0.
+ void TestForMinusZero(DoubleRegister input);
+
+ // Jump to label if the input double register contains -0.0.
+ void JumpIfMinusZero(DoubleRegister input, Label* on_negative_zero);
+
+ // Jump to label if the input integer register contains the double precision
+ // floating point representation of -0.0.
+ void JumpIfMinusZero(Register input, Label* on_negative_zero);
+
+ // Generate code to do a lookup in the number string cache. If the number in
+ // the register object is found in the cache the generated code falls through
+ // with the result in the result register. The object and the result register
+ // can be the same. If the number is not found in the cache the code jumps to
+ // the label not_found with only the content of register object unchanged.
+ void LookupNumberStringCache(Register object,
+ Register result,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ Label* not_found);
+
+ // Saturate a signed 32-bit integer in input to an unsigned 8-bit integer in
+ // output.
+ void ClampInt32ToUint8(Register in_out);
+ void ClampInt32ToUint8(Register output, Register input);
+
+ // Saturate a double in input to an unsigned 8-bit integer in output.
+ void ClampDoubleToUint8(Register output,
+ DoubleRegister input,
+ DoubleRegister dbl_scratch);
+
+ // Try to represent a double as a signed 32-bit int.
+ // This succeeds if the result compares equal to the input, so inputs of -0.0
+ // are represented as 0 and handled as a success.
+ //
+ // On output the Z flag is set if the operation was successful.
+ void TryRepresentDoubleAsInt32(Register as_int,
+ FPRegister value,
+ FPRegister scratch_d,
+ Label* on_successful_conversion = NULL,
+ Label* on_failed_conversion = NULL) {
+ ASSERT(as_int.Is32Bits());
+ TryRepresentDoubleAsInt(as_int, value, scratch_d, on_successful_conversion,
+ on_failed_conversion);
+ }
+
+ // Try to represent a double as a signed 64-bit int.
+ // This succeeds if the result compares equal to the input, so inputs of -0.0
+ // are represented as 0 and handled as a success.
+ //
+ // On output the Z flag is set if the operation was successful.
+ void TryRepresentDoubleAsInt64(Register as_int,
+ FPRegister value,
+ FPRegister scratch_d,
+ Label* on_successful_conversion = NULL,
+ Label* on_failed_conversion = NULL) {
+ ASSERT(as_int.Is64Bits());
+ TryRepresentDoubleAsInt(as_int, value, scratch_d, on_successful_conversion,
+ on_failed_conversion);
+ }
+
+ // ---- Object Utilities ----
+
+ // Copy fields from 'src' to 'dst', where both are tagged objects.
+ // The 'temps' list is a list of X registers which can be used for scratch
+ // values. The temps list must include at least one register.
+ //
+ // Currently, CopyFields cannot make use of more than three registers from
+ // the 'temps' list.
+ //
+ // CopyFields expects to be able to take at least two registers from
+ // MacroAssembler::TmpList().
+ void CopyFields(Register dst, Register src, CPURegList temps, unsigned count);
+
+ // Starting at address in dst, initialize field_count 64-bit fields with
+ // 64-bit value in register filler. Register dst is corrupted.
+ void FillFields(Register dst,
+ Register field_count,
+ Register filler);
+
+ // Copies a number of bytes from src to dst. All passed registers are
+ // clobbered. On exit src and dst will point to the place just after where the
+ // last byte was read or written and length will be zero. Hint may be used to
+ // determine which is the most efficient algorithm to use for copying.
+ void CopyBytes(Register dst,
+ Register src,
+ Register length,
+ Register scratch,
+ CopyHint hint = kCopyUnknown);
+
+ // ---- String Utilities ----
+
+
+ // Jump to label if either object is not a sequential ASCII string.
+ // Optionally perform a smi check on the objects first.
+ void JumpIfEitherIsNotSequentialAsciiStrings(
+ Register first,
+ Register second,
+ Register scratch1,
+ Register scratch2,
+ Label* failure,
+ SmiCheckType smi_check = DO_SMI_CHECK);
+
+ // Check if instance type is sequential ASCII string and jump to label if
+ // it is not.
+ void JumpIfInstanceTypeIsNotSequentialAscii(Register type,
+ Register scratch,
+ Label* failure);
+
+ // Checks if both instance types are sequential ASCII strings and jumps to
+ // label if either is not.
+ void JumpIfEitherInstanceTypeIsNotSequentialAscii(
+ Register first_object_instance_type,
+ Register second_object_instance_type,
+ Register scratch1,
+ Register scratch2,
+ Label* failure);
+
+ // Checks if both instance types are sequential ASCII strings and jumps to
+ // label if either is not.
+ void JumpIfBothInstanceTypesAreNotSequentialAscii(
+ Register first_object_instance_type,
+ Register second_object_instance_type,
+ Register scratch1,
+ Register scratch2,
+ Label* failure);
+
+ void JumpIfNotUniqueName(Register type, Label* not_unique_name);
+
+ // ---- Calling / Jumping helpers ----
+
+ // This is required for compatibility in architecture indepenedant code.
+ inline void jmp(Label* L) { B(L); }
+
+ // Passes thrown value to the handler of top of the try handler chain.
+ // Register value must be x0.
+ void Throw(Register value,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ Register scratch4);
+
+ // Propagates an uncatchable exception to the top of the current JS stack's
+ // handler chain. Register value must be x0.
+ void ThrowUncatchable(Register value,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ Register scratch4);
+
+ void CallStub(CodeStub* stub, TypeFeedbackId ast_id = TypeFeedbackId::None());
+ void TailCallStub(CodeStub* stub);
+
+ void CallRuntime(const Runtime::Function* f,
+ int num_arguments,
+ SaveFPRegsMode save_doubles = kDontSaveFPRegs);
+
+ void CallRuntime(Runtime::FunctionId id,
+ int num_arguments,
+ SaveFPRegsMode save_doubles = kDontSaveFPRegs) {
+ CallRuntime(Runtime::FunctionForId(id), num_arguments, save_doubles);
+ }
+
+ void CallRuntimeSaveDoubles(Runtime::FunctionId id) {
+ const Runtime::Function* function = Runtime::FunctionForId(id);
+ CallRuntime(function, function->nargs, kSaveFPRegs);
+ }
+
+ void TailCallRuntime(Runtime::FunctionId fid,
+ int num_arguments,
+ int result_size);
+
+ int ActivationFrameAlignment();
+
+ // Calls a C function.
+ // The called function is not allowed to trigger a
+ // garbage collection, since that might move the code and invalidate the
+ // return address (unless this is somehow accounted for by the called
+ // function).
+ void CallCFunction(ExternalReference function,
+ int num_reg_arguments);
+ void CallCFunction(ExternalReference function,
+ int num_reg_arguments,
+ int num_double_arguments);
+ void CallCFunction(Register function,
+ int num_reg_arguments,
+ int num_double_arguments);
+
+ // Calls an API function. Allocates HandleScope, extracts returned value
+ // from handle and propagates exceptions.
+ // 'stack_space' is the space to be unwound on exit (includes the call JS
+ // arguments space and the additional space allocated for the fast call).
+ // 'spill_offset' is the offset from the stack pointer where
+ // CallApiFunctionAndReturn can spill registers.
+ void CallApiFunctionAndReturn(Register function_address,
+ ExternalReference thunk_ref,
+ int stack_space,
+ int spill_offset,
+ MemOperand return_value_operand,
+ MemOperand* context_restore_operand);
+
+ // The number of register that CallApiFunctionAndReturn will need to save on
+ // the stack. The space for these registers need to be allocated in the
+ // ExitFrame before calling CallApiFunctionAndReturn.
+ static const int kCallApiFunctionSpillSpace = 4;
+
+ // Jump to a runtime routine.
+ void JumpToExternalReference(const ExternalReference& builtin);
+ // Tail call of a runtime routine (jump).
+ // Like JumpToExternalReference, but also takes care of passing the number
+ // of parameters.
+ void TailCallExternalReference(const ExternalReference& ext,
+ int num_arguments,
+ int result_size);
+ void CallExternalReference(const ExternalReference& ext,
+ int num_arguments);
+
+
+ // Invoke specified builtin JavaScript function. Adds an entry to
+ // the unresolved list if the name does not resolve.
+ void InvokeBuiltin(Builtins::JavaScript id,
+ InvokeFlag flag,
+ const CallWrapper& call_wrapper = NullCallWrapper());
+
+ // Store the code object for the given builtin in the target register and
+ // setup the function in the function register.
+ void GetBuiltinEntry(Register target,
+ Register function,
+ Builtins::JavaScript id);
+
+ // Store the function for the given builtin in the target register.
+ void GetBuiltinFunction(Register target, Builtins::JavaScript id);
+
+ void Jump(Register target);
+ void Jump(Address target, RelocInfo::Mode rmode);
+ void Jump(Handle<Code> code, RelocInfo::Mode rmode);
+ void Jump(intptr_t target, RelocInfo::Mode rmode);
+
+ void Call(Register target);
+ void Call(Label* target);
+ void Call(Address target, RelocInfo::Mode rmode);
+ void Call(Handle<Code> code,
+ RelocInfo::Mode rmode = RelocInfo::CODE_TARGET,
+ TypeFeedbackId ast_id = TypeFeedbackId::None());
+
+ // For every Call variant, there is a matching CallSize function that returns
+ // the size (in bytes) of the call sequence.
+ static int CallSize(Register target);
+ static int CallSize(Label* target);
+ static int CallSize(Address target, RelocInfo::Mode rmode);
+ static int CallSize(Handle<Code> code,
+ RelocInfo::Mode rmode = RelocInfo::CODE_TARGET,
+ TypeFeedbackId ast_id = TypeFeedbackId::None());
+
+ // Registers used through the invocation chain are hard-coded.
+ // We force passing the parameters to ensure the contracts are correctly
+ // honoured by the caller.
+ // 'function' must be x1.
+ // 'actual' must use an immediate or x0.
+ // 'expected' must use an immediate or x2.
+ // 'call_kind' must be x5.
+ void InvokePrologue(const ParameterCount& expected,
+ const ParameterCount& actual,
+ Handle<Code> code_constant,
+ Register code_reg,
+ Label* done,
+ InvokeFlag flag,
+ bool* definitely_mismatches,
+ const CallWrapper& call_wrapper);
+ void InvokeCode(Register code,
+ const ParameterCount& expected,
+ const ParameterCount& actual,
+ InvokeFlag flag,
+ const CallWrapper& call_wrapper);
+ // Invoke the JavaScript function in the given register.
+ // Changes the current context to the context in the function before invoking.
+ void InvokeFunction(Register function,
+ const ParameterCount& actual,
+ InvokeFlag flag,
+ const CallWrapper& call_wrapper);
+ void InvokeFunction(Register function,
+ const ParameterCount& expected,
+ const ParameterCount& actual,
+ InvokeFlag flag,
+ const CallWrapper& call_wrapper);
+ void InvokeFunction(Handle<JSFunction> function,
+ const ParameterCount& expected,
+ const ParameterCount& actual,
+ InvokeFlag flag,
+ const CallWrapper& call_wrapper);
+
+
+ // ---- Floating point helpers ----
+
+ // Perform a conversion from a double to a signed int64. If the input fits in
+ // range of the 64-bit result, execution branches to done. Otherwise,
+ // execution falls through, and the sign of the result can be used to
+ // determine if overflow was towards positive or negative infinity.
+ //
+ // On successful conversion, the least significant 32 bits of the result are
+ // equivalent to the ECMA-262 operation "ToInt32".
+ //
+ // Only public for the test code in test-code-stubs-arm64.cc.
+ void TryConvertDoubleToInt64(Register result,
+ DoubleRegister input,
+ Label* done);
+
+ // Performs a truncating conversion of a floating point number as used by
+ // the JS bitwise operations. See ECMA-262 9.5: ToInt32.
+ // Exits with 'result' holding the answer.
+ void TruncateDoubleToI(Register result, DoubleRegister double_input);
+
+ // Performs a truncating conversion of a heap number as used by
+ // the JS bitwise operations. See ECMA-262 9.5: ToInt32. 'result' and 'input'
+ // must be different registers. Exits with 'result' holding the answer.
+ void TruncateHeapNumberToI(Register result, Register object);
+
+ // Converts the smi or heap number in object to an int32 using the rules
+ // for ToInt32 as described in ECMAScript 9.5.: the value is truncated
+ // and brought into the range -2^31 .. +2^31 - 1. 'result' and 'input' must be
+ // different registers.
+ void TruncateNumberToI(Register object,
+ Register result,
+ Register heap_number_map,
+ Label* not_int32);
+
+ // ---- Code generation helpers ----
+
+ void set_generating_stub(bool value) { generating_stub_ = value; }
+ bool generating_stub() const { return generating_stub_; }
+#if DEBUG
+ void set_allow_macro_instructions(bool value) {
+ allow_macro_instructions_ = value;
+ }
+ bool allow_macro_instructions() const { return allow_macro_instructions_; }
+#endif
+ bool use_real_aborts() const { return use_real_aborts_; }
+ void set_has_frame(bool value) { has_frame_ = value; }
+ bool has_frame() const { return has_frame_; }
+ bool AllowThisStubCall(CodeStub* stub);
+
+ class NoUseRealAbortsScope {
+ public:
+ explicit NoUseRealAbortsScope(MacroAssembler* masm) :
+ saved_(masm->use_real_aborts_), masm_(masm) {
+ masm_->use_real_aborts_ = false;
+ }
+ ~NoUseRealAbortsScope() {
+ masm_->use_real_aborts_ = saved_;
+ }
+ private:
+ bool saved_;
+ MacroAssembler* masm_;
+ };
+
+ // ---------------------------------------------------------------------------
+ // Debugger Support
+
+ void DebugBreak();
+
+ // ---------------------------------------------------------------------------
+ // Exception handling
+
+ // Push a new try handler and link into try handler chain.
+ void PushTryHandler(StackHandler::Kind kind, int handler_index);
+
+ // Unlink the stack handler on top of the stack from the try handler chain.
+ // Must preserve the result register.
+ void PopTryHandler();
+
+
+ // ---------------------------------------------------------------------------
+ // Allocation support
+
+ // Allocate an object in new space or old pointer space. The object_size is
+ // specified either in bytes or in words if the allocation flag SIZE_IN_WORDS
+ // is passed. The allocated object is returned in result.
+ //
+ // If the new space is exhausted control continues at the gc_required label.
+ // In this case, the result and scratch registers may still be clobbered.
+ // If flags includes TAG_OBJECT, the result is tagged as as a heap object.
+ void Allocate(Register object_size,
+ Register result,
+ Register scratch1,
+ Register scratch2,
+ Label* gc_required,
+ AllocationFlags flags);
+
+ void Allocate(int object_size,
+ Register result,
+ Register scratch1,
+ Register scratch2,
+ Label* gc_required,
+ AllocationFlags flags);
+
+ // Undo allocation in new space. The object passed and objects allocated after
+ // it will no longer be allocated. The caller must make sure that no pointers
+ // are left to the object(s) no longer allocated as they would be invalid when
+ // allocation is undone.
+ void UndoAllocationInNewSpace(Register object, Register scratch);
+
+ void AllocateTwoByteString(Register result,
+ Register length,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ Label* gc_required);
+ void AllocateAsciiString(Register result,
+ Register length,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ Label* gc_required);
+ void AllocateTwoByteConsString(Register result,
+ Register length,
+ Register scratch1,
+ Register scratch2,
+ Label* gc_required);
+ void AllocateAsciiConsString(Register result,
+ Register length,
+ Register scratch1,
+ Register scratch2,
+ Label* gc_required);
+ void AllocateTwoByteSlicedString(Register result,
+ Register length,
+ Register scratch1,
+ Register scratch2,
+ Label* gc_required);
+ void AllocateAsciiSlicedString(Register result,
+ Register length,
+ Register scratch1,
+ Register scratch2,
+ Label* gc_required);
+
+ // Allocates a heap number or jumps to the gc_required label if the young
+ // space is full and a scavenge is needed.
+ // All registers are clobbered.
+ // If no heap_number_map register is provided, the function will take care of
+ // loading it.
+ void AllocateHeapNumber(Register result,
+ Label* gc_required,
+ Register scratch1,
+ Register scratch2,
+ CPURegister value = NoFPReg,
+ CPURegister heap_number_map = NoReg);
+
+ // ---------------------------------------------------------------------------
+ // Support functions.
+
+ // Try to get function prototype of a function and puts the value in the
+ // result register. Checks that the function really is a function and jumps
+ // to the miss label if the fast checks fail. The function register will be
+ // untouched; the other registers may be clobbered.
+ enum BoundFunctionAction {
+ kMissOnBoundFunction,
+ kDontMissOnBoundFunction
+ };
+
+ void TryGetFunctionPrototype(Register function,
+ Register result,
+ Register scratch,
+ Label* miss,
+ BoundFunctionAction action =
+ kDontMissOnBoundFunction);
+
+ // Compare object type for heap object. heap_object contains a non-Smi
+ // whose object type should be compared with the given type. This both
+ // sets the flags and leaves the object type in the type_reg register.
+ // It leaves the map in the map register (unless the type_reg and map register
+ // are the same register). It leaves the heap object in the heap_object
+ // register unless the heap_object register is the same register as one of the
+ // other registers.
+ void CompareObjectType(Register heap_object,
+ Register map,
+ Register type_reg,
+ InstanceType type);
+
+
+ // Compare object type for heap object, and branch if equal (or not.)
+ // heap_object contains a non-Smi whose object type should be compared with
+ // the given type. This both sets the flags and leaves the object type in
+ // the type_reg register. It leaves the map in the map register (unless the
+ // type_reg and map register are the same register). It leaves the heap
+ // object in the heap_object register unless the heap_object register is the
+ // same register as one of the other registers.
+ void JumpIfObjectType(Register object,
+ Register map,
+ Register type_reg,
+ InstanceType type,
+ Label* if_cond_pass,
+ Condition cond = eq);
+
+ void JumpIfNotObjectType(Register object,
+ Register map,
+ Register type_reg,
+ InstanceType type,
+ Label* if_not_object);
+
+ // Compare instance type in a map. map contains a valid map object whose
+ // object type should be compared with the given type. This both
+ // sets the flags and leaves the object type in the type_reg register.
+ void CompareInstanceType(Register map,
+ Register type_reg,
+ InstanceType type);
+
+ // Compare an object's map with the specified map. Condition flags are set
+ // with result of map compare.
+ void CompareMap(Register obj,
+ Register scratch,
+ Handle<Map> map);
+
+ // As above, but the map of the object is already loaded into the register
+ // which is preserved by the code generated.
+ void CompareMap(Register obj_map,
+ Handle<Map> map);
+
+ // Check if the map of an object is equal to a specified map and branch to
+ // label if not. Skip the smi check if not required (object is known to be a
+ // heap object). If mode is ALLOW_ELEMENT_TRANSITION_MAPS, then also match
+ // against maps that are ElementsKind transition maps of the specified map.
+ void CheckMap(Register obj,
+ Register scratch,
+ Handle<Map> map,
+ Label* fail,
+ SmiCheckType smi_check_type);
+
+
+ void CheckMap(Register obj,
+ Register scratch,
+ Heap::RootListIndex index,
+ Label* fail,
+ SmiCheckType smi_check_type);
+
+ // As above, but the map of the object is already loaded into obj_map, and is
+ // preserved.
+ void CheckMap(Register obj_map,
+ Handle<Map> map,
+ Label* fail,
+ SmiCheckType smi_check_type);
+
+ // Check if the map of an object is equal to a specified map and branch to a
+ // specified target if equal. Skip the smi check if not required (object is
+ // known to be a heap object)
+ void DispatchMap(Register obj,
+ Register scratch,
+ Handle<Map> map,
+ Handle<Code> success,
+ SmiCheckType smi_check_type);
+
+ // Test the bitfield of the heap object map with mask and set the condition
+ // flags. The object register is preserved.
+ void TestMapBitfield(Register object, uint64_t mask);
+
+ // Load the elements kind field from a map, and return it in the result
+ // register.
+ void LoadElementsKindFromMap(Register result, Register map);
+
+ // Compare the object in a register to a value from the root list.
+ void CompareRoot(const Register& obj, Heap::RootListIndex index);
+
+ // Compare the object in a register to a value and jump if they are equal.
+ void JumpIfRoot(const Register& obj,
+ Heap::RootListIndex index,
+ Label* if_equal);
+
+ // Compare the object in a register to a value and jump if they are not equal.
+ void JumpIfNotRoot(const Register& obj,
+ Heap::RootListIndex index,
+ Label* if_not_equal);
+
+ // Load and check the instance type of an object for being a unique name.
+ // Loads the type into the second argument register.
+ // The object and type arguments can be the same register; in that case it
+ // will be overwritten with the type.
+ // Fall-through if the object was a string and jump on fail otherwise.
+ inline void IsObjectNameType(Register object, Register type, Label* fail);
+
+ inline void IsObjectJSObjectType(Register heap_object,
+ Register map,
+ Register scratch,
+ Label* fail);
+
+ // Check the instance type in the given map to see if it corresponds to a
+ // JS object type. Jump to the fail label if this is not the case and fall
+ // through otherwise. However if fail label is NULL, no branch will be
+ // performed and the flag will be updated. You can test the flag for "le"
+ // condition to test if it is a valid JS object type.
+ inline void IsInstanceJSObjectType(Register map,
+ Register scratch,
+ Label* fail);
+
+ // Load and check the instance type of an object for being a string.
+ // Loads the type into the second argument register.
+ // The object and type arguments can be the same register; in that case it
+ // will be overwritten with the type.
+ // Jumps to not_string or string appropriate. If the appropriate label is
+ // NULL, fall through.
+ inline void IsObjectJSStringType(Register object, Register type,
+ Label* not_string, Label* string = NULL);
+
+ // Compare the contents of a register with an operand, and branch to true,
+ // false or fall through, depending on condition.
+ void CompareAndSplit(const Register& lhs,
+ const Operand& rhs,
+ Condition cond,
+ Label* if_true,
+ Label* if_false,
+ Label* fall_through);
+
+ // Test the bits of register defined by bit_pattern, and branch to
+ // if_any_set, if_all_clear or fall_through accordingly.
+ void TestAndSplit(const Register& reg,
+ uint64_t bit_pattern,
+ Label* if_all_clear,
+ Label* if_any_set,
+ Label* fall_through);
+
+ // Check if a map for a JSObject indicates that the object has fast elements.
+ // Jump to the specified label if it does not.
+ void CheckFastElements(Register map, Register scratch, Label* fail);
+
+ // Check if a map for a JSObject indicates that the object can have both smi
+ // and HeapObject elements. Jump to the specified label if it does not.
+ void CheckFastObjectElements(Register map, Register scratch, Label* fail);
+
+ // Check to see if number can be stored as a double in FastDoubleElements.
+ // If it can, store it at the index specified by key_reg in the array,
+ // otherwise jump to fail.
+ void StoreNumberToDoubleElements(Register value_reg,
+ Register key_reg,
+ Register elements_reg,
+ Register scratch1,
+ FPRegister fpscratch1,
+ Label* fail,
+ int elements_offset = 0);
+
+ // Picks out an array index from the hash field.
+ // Register use:
+ // hash - holds the index's hash. Clobbered.
+ // index - holds the overwritten index on exit.
+ void IndexFromHash(Register hash, Register index);
+
+ // ---------------------------------------------------------------------------
+ // Inline caching support.
+
+ void EmitSeqStringSetCharCheck(Register string,
+ Register index,
+ SeqStringSetCharCheckIndexType index_type,
+ Register scratch,
+ uint32_t encoding_mask);
+
+ // Generate code for checking access rights - used for security checks
+ // on access to global objects across environments. The holder register
+ // is left untouched, whereas both scratch registers are clobbered.
+ void CheckAccessGlobalProxy(Register holder_reg,
+ Register scratch1,
+ Register scratch2,
+ Label* miss);
+
+ // Hash the interger value in 'key' register.
+ // It uses the same algorithm as ComputeIntegerHash in utils.h.
+ void GetNumberHash(Register key, Register scratch);
+
+ // Load value from the dictionary.
+ //
+ // elements - holds the slow-case elements of the receiver on entry.
+ // Unchanged unless 'result' is the same register.
+ //
+ // key - holds the smi key on entry.
+ // Unchanged unless 'result' is the same register.
+ //
+ // result - holds the result on exit if the load succeeded.
+ // Allowed to be the same as 'key' or 'result'.
+ // Unchanged on bailout so 'key' or 'result' can be used
+ // in further computation.
+ void LoadFromNumberDictionary(Label* miss,
+ Register elements,
+ Register key,
+ Register result,
+ Register scratch0,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3);
+
+ // ---------------------------------------------------------------------------
+ // Frames.
+
+ // Activation support.
+ void EnterFrame(StackFrame::Type type);
+ void LeaveFrame(StackFrame::Type type);
+
+ // Returns map with validated enum cache in object register.
+ void CheckEnumCache(Register object,
+ Register null_value,
+ Register scratch0,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ Label* call_runtime);
+
+ // AllocationMemento support. Arrays may have an associated
+ // AllocationMemento object that can be checked for in order to pretransition
+ // to another type.
+ // On entry, receiver should point to the array object.
+ // If allocation info is present, the Z flag is set (so that the eq
+ // condition will pass).
+ void TestJSArrayForAllocationMemento(Register receiver,
+ Register scratch1,
+ Register scratch2,
+ Label* no_memento_found);
+
+ void JumpIfJSArrayHasAllocationMemento(Register receiver,
+ Register scratch1,
+ Register scratch2,
+ Label* memento_found) {
+ Label no_memento_found;
+ TestJSArrayForAllocationMemento(receiver, scratch1, scratch2,
+ &no_memento_found);
+ B(eq, memento_found);
+ Bind(&no_memento_found);
+ }
+
+ // The stack pointer has to switch between csp and jssp when setting up and
+ // destroying the exit frame. Hence preserving/restoring the registers is
+ // slightly more complicated than simple push/pop operations.
+ void ExitFramePreserveFPRegs();
+ void ExitFrameRestoreFPRegs();
+
+ // Generates function and stub prologue code.
+ void StubPrologue();
+ void Prologue(bool code_pre_aging);
+
+ // Enter exit frame. Exit frames are used when calling C code from generated
+ // (JavaScript) code.
+ //
+ // The stack pointer must be jssp on entry, and will be set to csp by this
+ // function. The frame pointer is also configured, but the only other
+ // registers modified by this function are the provided scratch register, and
+ // jssp.
+ //
+ // The 'extra_space' argument can be used to allocate some space in the exit
+ // frame that will be ignored by the GC. This space will be reserved in the
+ // bottom of the frame immediately above the return address slot.
+ //
+ // Set up a stack frame and registers as follows:
+ // fp[8]: CallerPC (lr)
+ // fp -> fp[0]: CallerFP (old fp)
+ // fp[-8]: SPOffset (new csp)
+ // fp[-16]: CodeObject()
+ // fp[-16 - fp-size]: Saved doubles, if saved_doubles is true.
+ // csp[8]: Memory reserved for the caller if extra_space != 0.
+ // Alignment padding, if necessary.
+ // csp -> csp[0]: Space reserved for the return address.
+ //
+ // This function also stores the new frame information in the top frame, so
+ // that the new frame becomes the current frame.
+ void EnterExitFrame(bool save_doubles,
+ const Register& scratch,
+ int extra_space = 0);
+
+ // Leave the current exit frame, after a C function has returned to generated
+ // (JavaScript) code.
+ //
+ // This effectively unwinds the operation of EnterExitFrame:
+ // * Preserved doubles are restored (if restore_doubles is true).
+ // * The frame information is removed from the top frame.
+ // * The exit frame is dropped.
+ // * The stack pointer is reset to jssp.
+ //
+ // The stack pointer must be csp on entry.
+ void LeaveExitFrame(bool save_doubles,
+ const Register& scratch,
+ bool restore_context);
+
+ void LoadContext(Register dst, int context_chain_length);
+
+ // Emit code for a truncating division by a constant. The dividend register is
+ // unchanged. Dividend and result must be different.
+ void TruncatingDiv(Register result, Register dividend, int32_t divisor);
+
+ // ---------------------------------------------------------------------------
+ // StatsCounter support
+
+ void SetCounter(StatsCounter* counter, int value, Register scratch1,
+ Register scratch2);
+ void IncrementCounter(StatsCounter* counter, int value, Register scratch1,
+ Register scratch2);
+ void DecrementCounter(StatsCounter* counter, int value, Register scratch1,
+ Register scratch2);
+
+ // ---------------------------------------------------------------------------
+ // Garbage collector support (GC).
+
+ enum RememberedSetFinalAction {
+ kReturnAtEnd,
+ kFallThroughAtEnd
+ };
+
+ // Record in the remembered set the fact that we have a pointer to new space
+ // at the address pointed to by the addr register. Only works if addr is not
+ // in new space.
+ void RememberedSetHelper(Register object, // Used for debug code.
+ Register addr,
+ Register scratch1,
+ SaveFPRegsMode save_fp,
+ RememberedSetFinalAction and_then);
+
+ // Push and pop the registers that can hold pointers, as defined by the
+ // RegList constant kSafepointSavedRegisters.
+ void PushSafepointRegisters();
+ void PopSafepointRegisters();
+
+ void PushSafepointRegistersAndDoubles();
+ void PopSafepointRegistersAndDoubles();
+
+ // Store value in register src in the safepoint stack slot for register dst.
+ void StoreToSafepointRegisterSlot(Register src, Register dst) {
+ Poke(src, SafepointRegisterStackIndex(dst.code()) * kPointerSize);
+ }
+
+ // Load the value of the src register from its safepoint stack slot
+ // into register dst.
+ void LoadFromSafepointRegisterSlot(Register dst, Register src) {
+ Peek(src, SafepointRegisterStackIndex(dst.code()) * kPointerSize);
+ }
+
+ void CheckPageFlagSet(const Register& object,
+ const Register& scratch,
+ int mask,
+ Label* if_any_set);
+
+ void CheckPageFlagClear(const Register& object,
+ const Register& scratch,
+ int mask,
+ Label* if_all_clear);
+
+ void CheckMapDeprecated(Handle<Map> map,
+ Register scratch,
+ Label* if_deprecated);
+
+ // Check if object is in new space and jump accordingly.
+ // Register 'object' is preserved.
+ void JumpIfNotInNewSpace(Register object,
+ Label* branch) {
+ InNewSpace(object, ne, branch);
+ }
+
+ void JumpIfInNewSpace(Register object,
+ Label* branch) {
+ InNewSpace(object, eq, branch);
+ }
+
+ // Notify the garbage collector that we wrote a pointer into an object.
+ // |object| is the object being stored into, |value| is the object being
+ // stored. value and scratch registers are clobbered by the operation.
+ // The offset is the offset from the start of the object, not the offset from
+ // the tagged HeapObject pointer. For use with FieldOperand(reg, off).
+ void RecordWriteField(
+ Register object,
+ int offset,
+ Register value,
+ Register scratch,
+ LinkRegisterStatus lr_status,
+ SaveFPRegsMode save_fp,
+ RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
+ SmiCheck smi_check = INLINE_SMI_CHECK,
+ PointersToHereCheck pointers_to_here_check_for_value =
+ kPointersToHereMaybeInteresting);
+
+ // As above, but the offset has the tag presubtracted. For use with
+ // MemOperand(reg, off).
+ inline void RecordWriteContextSlot(
+ Register context,
+ int offset,
+ Register value,
+ Register scratch,
+ LinkRegisterStatus lr_status,
+ SaveFPRegsMode save_fp,
+ RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
+ SmiCheck smi_check = INLINE_SMI_CHECK,
+ PointersToHereCheck pointers_to_here_check_for_value =
+ kPointersToHereMaybeInteresting) {
+ RecordWriteField(context,
+ offset + kHeapObjectTag,
+ value,
+ scratch,
+ lr_status,
+ save_fp,
+ remembered_set_action,
+ smi_check,
+ pointers_to_here_check_for_value);
+ }
+
+ void RecordWriteForMap(
+ Register object,
+ Register map,
+ Register dst,
+ LinkRegisterStatus lr_status,
+ SaveFPRegsMode save_fp);
+
+ // For a given |object| notify the garbage collector that the slot |address|
+ // has been written. |value| is the object being stored. The value and
+ // address registers are clobbered by the operation.
+ void RecordWrite(
+ Register object,
+ Register address,
+ Register value,
+ LinkRegisterStatus lr_status,
+ SaveFPRegsMode save_fp,
+ RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
+ SmiCheck smi_check = INLINE_SMI_CHECK,
+ PointersToHereCheck pointers_to_here_check_for_value =
+ kPointersToHereMaybeInteresting);
+
+ // Checks the color of an object. If the object is already grey or black
+ // then we just fall through, since it is already live. If it is white and
+ // we can determine that it doesn't need to be scanned, then we just mark it
+ // black and fall through. For the rest we jump to the label so the
+ // incremental marker can fix its assumptions.
+ void EnsureNotWhite(Register object,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ Register scratch4,
+ Label* object_is_white_and_not_data);
+
+ // Detects conservatively whether an object is data-only, i.e. it does need to
+ // be scanned by the garbage collector.
+ void JumpIfDataObject(Register value,
+ Register scratch,
+ Label* not_data_object);
+
+ // Helper for finding the mark bits for an address.
+ // Note that the behaviour slightly differs from other architectures.
+ // On exit:
+ // - addr_reg is unchanged.
+ // - The bitmap register points at the word with the mark bits.
+ // - The shift register contains the index of the first color bit for this
+ // object in the bitmap.
+ inline void GetMarkBits(Register addr_reg,
+ Register bitmap_reg,
+ Register shift_reg);
+
+ // Check if an object has a given incremental marking color.
+ void HasColor(Register object,
+ Register scratch0,
+ Register scratch1,
+ Label* has_color,
+ int first_bit,
+ int second_bit);
+
+ void JumpIfBlack(Register object,
+ Register scratch0,
+ Register scratch1,
+ Label* on_black);
+
+
+ // Get the location of a relocated constant (its address in the constant pool)
+ // from its load site.
+ void GetRelocatedValueLocation(Register ldr_location,
+ Register result);
+
+
+ // ---------------------------------------------------------------------------
+ // Debugging.
+
+ // Calls Abort(msg) if the condition cond is not satisfied.
+ // Use --debug_code to enable.
+ void Assert(Condition cond, BailoutReason reason);
+ void AssertRegisterIsClear(Register reg, BailoutReason reason);
+ void AssertRegisterIsRoot(
+ Register reg,
+ Heap::RootListIndex index,
+ BailoutReason reason = kRegisterDidNotMatchExpectedRoot);
+ void AssertFastElements(Register elements);
+
+ // Abort if the specified register contains the invalid color bit pattern.
+ // The pattern must be in bits [1:0] of 'reg' register.
+ //
+ // If emit_debug_code() is false, this emits no code.
+ void AssertHasValidColor(const Register& reg);
+
+ // Abort if 'object' register doesn't point to a string object.
+ //
+ // If emit_debug_code() is false, this emits no code.
+ void AssertIsString(const Register& object);
+
+ // Like Assert(), but always enabled.
+ void Check(Condition cond, BailoutReason reason);
+ void CheckRegisterIsClear(Register reg, BailoutReason reason);
+
+ // Print a message to stderr and abort execution.
+ void Abort(BailoutReason reason);
+
+ // Conditionally load the cached Array transitioned map of type
+ // transitioned_kind from the native context if the map in register
+ // map_in_out is the cached Array map in the native context of
+ // expected_kind.
+ void LoadTransitionedArrayMapConditional(
+ ElementsKind expected_kind,
+ ElementsKind transitioned_kind,
+ Register map_in_out,
+ Register scratch1,
+ Register scratch2,
+ Label* no_map_match);
+
+ void LoadGlobalFunction(int index, Register function);
+
+ // Load the initial map from the global function. The registers function and
+ // map can be the same, function is then overwritten.
+ void LoadGlobalFunctionInitialMap(Register function,
+ Register map,
+ Register scratch);
+
+ CPURegList* TmpList() { return &tmp_list_; }
+ CPURegList* FPTmpList() { return &fptmp_list_; }
+
+ static CPURegList DefaultTmpList();
+ static CPURegList DefaultFPTmpList();
+
+ // Like printf, but print at run-time from generated code.
+ //
+ // The caller must ensure that arguments for floating-point placeholders
+ // (such as %e, %f or %g) are FPRegisters, and that arguments for integer
+ // placeholders are Registers.
+ //
+ // At the moment it is only possible to print the value of csp if it is the
+ // current stack pointer. Otherwise, the MacroAssembler will automatically
+ // update csp on every push (using BumpSystemStackPointer), so determining its
+ // value is difficult.
+ //
+ // Format placeholders that refer to more than one argument, or to a specific
+ // argument, are not supported. This includes formats like "%1$d" or "%.*d".
+ //
+ // This function automatically preserves caller-saved registers so that
+ // calling code can use Printf at any point without having to worry about
+ // corruption. The preservation mechanism generates a lot of code. If this is
+ // a problem, preserve the important registers manually and then call
+ // PrintfNoPreserve. Callee-saved registers are not used by Printf, and are
+ // implicitly preserved.
+ void Printf(const char * format,
+ CPURegister arg0 = NoCPUReg,
+ CPURegister arg1 = NoCPUReg,
+ CPURegister arg2 = NoCPUReg,
+ CPURegister arg3 = NoCPUReg);
+
+ // Like Printf, but don't preserve any caller-saved registers, not even 'lr'.
+ //
+ // The return code from the system printf call will be returned in x0.
+ void PrintfNoPreserve(const char * format,
+ const CPURegister& arg0 = NoCPUReg,
+ const CPURegister& arg1 = NoCPUReg,
+ const CPURegister& arg2 = NoCPUReg,
+ const CPURegister& arg3 = NoCPUReg);
+
+ // Code ageing support functions.
+
+ // Code ageing on ARM64 works similarly to on ARM. When V8 wants to mark a
+ // function as old, it replaces some of the function prologue (generated by
+ // FullCodeGenerator::Generate) with a call to a special stub (ultimately
+ // generated by GenerateMakeCodeYoungAgainCommon). The stub restores the
+ // function prologue to its initial young state (indicating that it has been
+ // recently run) and continues. A young function is therefore one which has a
+ // normal frame setup sequence, and an old function has a code age sequence
+ // which calls a code ageing stub.
+
+ // Set up a basic stack frame for young code (or code exempt from ageing) with
+ // type FUNCTION. It may be patched later for code ageing support. This is
+ // done by to Code::PatchPlatformCodeAge and EmitCodeAgeSequence.
+ //
+ // This function takes an Assembler so it can be called from either a
+ // MacroAssembler or a PatchingAssembler context.
+ static void EmitFrameSetupForCodeAgePatching(Assembler* assm);
+
+ // Call EmitFrameSetupForCodeAgePatching from a MacroAssembler context.
+ void EmitFrameSetupForCodeAgePatching();
+
+ // Emit a code age sequence that calls the relevant code age stub. The code
+ // generated by this sequence is expected to replace the code generated by
+ // EmitFrameSetupForCodeAgePatching, and represents an old function.
+ //
+ // If stub is NULL, this function generates the code age sequence but omits
+ // the stub address that is normally embedded in the instruction stream. This
+ // can be used by debug code to verify code age sequences.
+ static void EmitCodeAgeSequence(Assembler* assm, Code* stub);
+
+ // Call EmitCodeAgeSequence from a MacroAssembler context.
+ void EmitCodeAgeSequence(Code* stub);
+
+ // Return true if the sequence is a young sequence geneated by
+ // EmitFrameSetupForCodeAgePatching. Otherwise, this method asserts that the
+ // sequence is a code age sequence (emitted by EmitCodeAgeSequence).
+ static bool IsYoungSequence(Isolate* isolate, byte* sequence);
+
+ // Jumps to found label if a prototype map has dictionary elements.
+ void JumpIfDictionaryInPrototypeChain(Register object, Register scratch0,
+ Register scratch1, Label* found);
+
+ // Perform necessary maintenance operations before a push or after a pop.
+ //
+ // Note that size is specified in bytes.
+ void PushPreamble(Operand total_size);
+ void PopPostamble(Operand total_size);
+
+ void PushPreamble(int count, int size) { PushPreamble(count * size); }
+ void PopPostamble(int count, int size) { PopPostamble(count * size); }
+
+ private:
+ // Helpers for CopyFields.
+ // These each implement CopyFields in a different way.
+ void CopyFieldsLoopPairsHelper(Register dst, Register src, unsigned count,
+ Register scratch1, Register scratch2,
+ Register scratch3, Register scratch4,
+ Register scratch5);
+ void CopyFieldsUnrolledPairsHelper(Register dst, Register src, unsigned count,
+ Register scratch1, Register scratch2,
+ Register scratch3, Register scratch4);
+ void CopyFieldsUnrolledHelper(Register dst, Register src, unsigned count,
+ Register scratch1, Register scratch2,
+ Register scratch3);
+
+ // The actual Push and Pop implementations. These don't generate any code
+ // other than that required for the push or pop. This allows
+ // (Push|Pop)CPURegList to bundle together run-time assertions for a large
+ // block of registers.
+ //
+ // Note that size is per register, and is specified in bytes.
+ void PushHelper(int count, int size,
+ const CPURegister& src0, const CPURegister& src1,
+ const CPURegister& src2, const CPURegister& src3);
+ void PopHelper(int count, int size,
+ const CPURegister& dst0, const CPURegister& dst1,
+ const CPURegister& dst2, const CPURegister& dst3);
+
+ // Call Printf. On a native build, a simple call will be generated, but if the
+ // simulator is being used then a suitable pseudo-instruction is used. The
+ // arguments and stack (csp) must be prepared by the caller as for a normal
+ // AAPCS64 call to 'printf'.
+ //
+ // The 'args' argument should point to an array of variable arguments in their
+ // proper PCS registers (and in calling order). The argument registers can
+ // have mixed types. The format string (x0) should not be included.
+ void CallPrintf(int arg_count = 0, const CPURegister * args = NULL);
+
+ // Helper for throwing exceptions. Compute a handler address and jump to
+ // it. See the implementation for register usage.
+ void JumpToHandlerEntry(Register exception,
+ Register object,
+ Register state,
+ Register scratch1,
+ Register scratch2);
+
+ // Helper for implementing JumpIfNotInNewSpace and JumpIfInNewSpace.
+ void InNewSpace(Register object,
+ Condition cond, // eq for new space, ne otherwise.
+ Label* branch);
+
+ // Try to represent a double as an int so that integer fast-paths may be
+ // used. Not every valid integer value is guaranteed to be caught.
+ // It supports both 32-bit and 64-bit integers depending whether 'as_int'
+ // is a W or X register.
+ //
+ // This does not distinguish between +0 and -0, so if this distinction is
+ // important it must be checked separately.
+ //
+ // On output the Z flag is set if the operation was successful.
+ void TryRepresentDoubleAsInt(Register as_int,
+ FPRegister value,
+ FPRegister scratch_d,
+ Label* on_successful_conversion = NULL,
+ Label* on_failed_conversion = NULL);
+
+ bool generating_stub_;
+#if DEBUG
+ // Tell whether any of the macro instruction can be used. When false the
+ // MacroAssembler will assert if a method which can emit a variable number
+ // of instructions is called.
+ bool allow_macro_instructions_;
+#endif
+ bool has_frame_;
+
+ // The Abort method should call a V8 runtime function, but the CallRuntime
+ // mechanism depends on CEntryStub. If use_real_aborts is false, Abort will
+ // use a simpler abort mechanism that doesn't depend on CEntryStub.
+ //
+ // The purpose of this is to allow Aborts to be compiled whilst CEntryStub is
+ // being generated.
+ bool use_real_aborts_;
+
+ // This handle will be patched with the code object on installation.
+ Handle<Object> code_object_;
+
+ // The register to use as a stack pointer for stack operations.
+ Register sp_;
+
+ // Scratch registers available for use by the MacroAssembler.
+ CPURegList tmp_list_;
+ CPURegList fptmp_list_;
+
+ void InitializeNewString(Register string,
+ Register length,
+ Heap::RootListIndex map_index,
+ Register scratch1,
+ Register scratch2);
+
+ public:
+ // Far branches resolving.
+ //
+ // The various classes of branch instructions with immediate offsets have
+ // different ranges. While the Assembler will fail to assemble a branch
+ // exceeding its range, the MacroAssembler offers a mechanism to resolve
+ // branches to too distant targets, either by tweaking the generated code to
+ // use branch instructions with wider ranges or generating veneers.
+ //
+ // Currently branches to distant targets are resolved using unconditional
+ // branch isntructions with a range of +-128MB. If that becomes too little
+ // (!), the mechanism can be extended to generate special veneers for really
+ // far targets.
+
+ // Helps resolve branching to labels potentially out of range.
+ // If the label is not bound, it registers the information necessary to later
+ // be able to emit a veneer for this branch if necessary.
+ // If the label is bound, it returns true if the label (or the previous link
+ // in the label chain) is out of range. In that case the caller is responsible
+ // for generating appropriate code.
+ // Otherwise it returns false.
+ // This function also checks wether veneers need to be emitted.
+ bool NeedExtraInstructionsOrRegisterBranch(Label *label,
+ ImmBranchType branch_type);
+};
+
+
+// Use this scope when you need a one-to-one mapping bewteen methods and
+// instructions. This scope prevents the MacroAssembler from being called and
+// literal pools from being emitted. It also asserts the number of instructions
+// emitted is what you specified when creating the scope.
+class InstructionAccurateScope BASE_EMBEDDED {
+ public:
+ InstructionAccurateScope(MacroAssembler* masm, size_t count = 0)
+ : masm_(masm)
+#ifdef DEBUG
+ ,
+ size_(count * kInstructionSize)
+#endif
+ {
+ // Before blocking the const pool, see if it needs to be emitted.
+ masm_->CheckConstPool(false, true);
+ masm_->CheckVeneerPool(false, true);
+
+ masm_->StartBlockPools();
+#ifdef DEBUG
+ if (count != 0) {
+ masm_->bind(&start_);
+ }
+ previous_allow_macro_instructions_ = masm_->allow_macro_instructions();
+ masm_->set_allow_macro_instructions(false);
+#endif
+ }
+
+ ~InstructionAccurateScope() {
+ masm_->EndBlockPools();
+#ifdef DEBUG
+ if (start_.is_bound()) {
+ ASSERT(masm_->SizeOfCodeGeneratedSince(&start_) == size_);
+ }
+ masm_->set_allow_macro_instructions(previous_allow_macro_instructions_);
+#endif
+ }
+
+ private:
+ MacroAssembler* masm_;
+#ifdef DEBUG
+ size_t size_;
+ Label start_;
+ bool previous_allow_macro_instructions_;
+#endif
+};
+
+
+// This scope utility allows scratch registers to be managed safely. The
+// MacroAssembler's TmpList() (and FPTmpList()) is used as a pool of scratch
+// registers. These registers can be allocated on demand, and will be returned
+// at the end of the scope.
+//
+// When the scope ends, the MacroAssembler's lists will be restored to their
+// original state, even if the lists were modified by some other means.
+class UseScratchRegisterScope {
+ public:
+ explicit UseScratchRegisterScope(MacroAssembler* masm)
+ : available_(masm->TmpList()),
+ availablefp_(masm->FPTmpList()),
+ old_available_(available_->list()),
+ old_availablefp_(availablefp_->list()) {
+ ASSERT(available_->type() == CPURegister::kRegister);
+ ASSERT(availablefp_->type() == CPURegister::kFPRegister);
+ }
+
+ ~UseScratchRegisterScope();
+
+ // Take a register from the appropriate temps list. It will be returned
+ // automatically when the scope ends.
+ Register AcquireW() { return AcquireNextAvailable(available_).W(); }
+ Register AcquireX() { return AcquireNextAvailable(available_).X(); }
+ FPRegister AcquireS() { return AcquireNextAvailable(availablefp_).S(); }
+ FPRegister AcquireD() { return AcquireNextAvailable(availablefp_).D(); }
+
+ Register UnsafeAcquire(const Register& reg) {
+ return Register(UnsafeAcquire(available_, reg));
+ }
+
+ Register AcquireSameSizeAs(const Register& reg);
+ FPRegister AcquireSameSizeAs(const FPRegister& reg);
+
+ private:
+ static CPURegister AcquireNextAvailable(CPURegList* available);
+ static CPURegister UnsafeAcquire(CPURegList* available,
+ const CPURegister& reg);
+
+ // Available scratch registers.
+ CPURegList* available_; // kRegister
+ CPURegList* availablefp_; // kFPRegister
+
+ // The state of the available lists at the start of this scope.
+ RegList old_available_; // kRegister
+ RegList old_availablefp_; // kFPRegister
+};
+
+
+inline MemOperand ContextMemOperand(Register context, int index) {
+ return MemOperand(context, Context::SlotOffset(index));
+}
+
+inline MemOperand GlobalObjectMemOperand() {
+ return ContextMemOperand(cp, Context::GLOBAL_OBJECT_INDEX);
+}
+
+
+// Encode and decode information about patchable inline SMI checks.
+class InlineSmiCheckInfo {
+ public:
+ explicit InlineSmiCheckInfo(Address info);
+
+ bool HasSmiCheck() const {
+ return smi_check_ != NULL;
+ }
+
+ const Register& SmiRegister() const {
+ return reg_;
+ }
+
+ Instruction* SmiCheck() const {
+ return smi_check_;
+ }
+
+ // Use MacroAssembler::InlineData to emit information about patchable inline
+ // SMI checks. The caller may specify 'reg' as NoReg and an unbound 'site' to
+ // indicate that there is no inline SMI check. Note that 'reg' cannot be csp.
+ //
+ // The generated patch information can be read using the InlineSMICheckInfo
+ // class.
+ static void Emit(MacroAssembler* masm, const Register& reg,
+ const Label* smi_check);
+
+ // Emit information to indicate that there is no inline SMI check.
+ static void EmitNotInlined(MacroAssembler* masm) {
+ Label unbound;
+ Emit(masm, NoReg, &unbound);
+ }
+
+ private:
+ Register reg_;
+ Instruction* smi_check_;
+
+ // Fields in the data encoded by InlineData.
+
+ // A width of 5 (Rd_width) for the SMI register preclues the use of csp,
+ // since kSPRegInternalCode is 63. However, csp should never hold a SMI or be
+ // used in a patchable check. The Emit() method checks this.
+ //
+ // Note that the total size of the fields is restricted by the underlying
+ // storage size handled by the BitField class, which is a uint32_t.
+ class RegisterBits : public BitField<unsigned, 0, 5> {};
+ class DeltaBits : public BitField<uint32_t, 5, 32-5> {};
+};
+
+} } // namespace v8::internal
+
+#ifdef GENERATED_CODE_COVERAGE
+#error "Unsupported option"
+#define CODE_COVERAGE_STRINGIFY(x) #x
+#define CODE_COVERAGE_TOSTRING(x) CODE_COVERAGE_STRINGIFY(x)
+#define __FILE_LINE__ __FILE__ ":" CODE_COVERAGE_TOSTRING(__LINE__)
+#define ACCESS_MASM(masm) masm->stop(__FILE_LINE__); masm->
+#else
+#define ACCESS_MASM(masm) masm->
+#endif
+
+#endif // V8_ARM64_MACRO_ASSEMBLER_ARM64_H_
diff --git a/chromium/v8/src/arm64/regexp-macro-assembler-arm64.cc b/chromium/v8/src/arm64/regexp-macro-assembler-arm64.cc
new file mode 100644
index 00000000000..a772ef26408
--- /dev/null
+++ b/chromium/v8/src/arm64/regexp-macro-assembler-arm64.cc
@@ -0,0 +1,1707 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#if V8_TARGET_ARCH_ARM64
+
+#include "src/cpu-profiler.h"
+#include "src/unicode.h"
+#include "src/log.h"
+#include "src/code-stubs.h"
+#include "src/regexp-stack.h"
+#include "src/macro-assembler.h"
+#include "src/regexp-macro-assembler.h"
+#include "src/arm64/regexp-macro-assembler-arm64.h"
+
+namespace v8 {
+namespace internal {
+
+#ifndef V8_INTERPRETED_REGEXP
+/*
+ * This assembler uses the following register assignment convention:
+ * - w19 : Used to temporarely store a value before a call to C code.
+ * See CheckNotBackReferenceIgnoreCase.
+ * - x20 : Pointer to the current code object (Code*),
+ * it includes the heap object tag.
+ * - w21 : Current position in input, as negative offset from
+ * the end of the string. Please notice that this is
+ * the byte offset, not the character offset!
+ * - w22 : Currently loaded character. Must be loaded using
+ * LoadCurrentCharacter before using any of the dispatch methods.
+ * - x23 : Points to tip of backtrack stack.
+ * - w24 : Position of the first character minus one: non_position_value.
+ * Used to initialize capture registers.
+ * - x25 : Address at the end of the input string: input_end.
+ * Points to byte after last character in input.
+ * - x26 : Address at the start of the input string: input_start.
+ * - w27 : Where to start in the input string.
+ * - x28 : Output array pointer.
+ * - x29/fp : Frame pointer. Used to access arguments, local variables and
+ * RegExp registers.
+ * - x16/x17 : IP registers, used by assembler. Very volatile.
+ * - csp : Points to tip of C stack.
+ *
+ * - x0-x7 : Used as a cache to store 32 bit capture registers. These
+ * registers need to be retained every time a call to C code
+ * is done.
+ *
+ * The remaining registers are free for computations.
+ * Each call to a public method should retain this convention.
+ *
+ * The stack will have the following structure:
+ *
+ * Location Name Description
+ * (as referred to in
+ * the code)
+ *
+ * - fp[104] isolate Address of the current isolate.
+ * - fp[96] return_address Secondary link/return address
+ * used by an exit frame if this is a
+ * native call.
+ * ^^^ csp when called ^^^
+ * - fp[88] lr Return from the RegExp code.
+ * - fp[80] r29 Old frame pointer (CalleeSaved).
+ * - fp[0..72] r19-r28 Backup of CalleeSaved registers.
+ * - fp[-8] direct_call 1 => Direct call from JavaScript code.
+ * 0 => Call through the runtime system.
+ * - fp[-16] stack_base High end of the memory area to use as
+ * the backtracking stack.
+ * - fp[-24] output_size Output may fit multiple sets of matches.
+ * - fp[-32] input Handle containing the input string.
+ * - fp[-40] success_counter
+ * ^^^^^^^^^^^^^ From here and downwards we store 32 bit values ^^^^^^^^^^^^^
+ * - fp[-44] register N Capture registers initialized with
+ * - fp[-48] register N + 1 non_position_value.
+ * ... The first kNumCachedRegisters (N) registers
+ * ... are cached in x0 to x7.
+ * ... Only positions must be stored in the first
+ * - ... num_saved_registers_ registers.
+ * - ...
+ * - register N + num_registers - 1
+ * ^^^^^^^^^ csp ^^^^^^^^^
+ *
+ * The first num_saved_registers_ registers are initialized to point to
+ * "character -1" in the string (i.e., char_size() bytes before the first
+ * character of the string). The remaining registers start out as garbage.
+ *
+ * The data up to the return address must be placed there by the calling
+ * code and the remaining arguments are passed in registers, e.g. by calling the
+ * code entry as cast to a function with the signature:
+ * int (*match)(String* input,
+ * int start_offset,
+ * Address input_start,
+ * Address input_end,
+ * int* output,
+ * int output_size,
+ * Address stack_base,
+ * bool direct_call = false,
+ * Address secondary_return_address, // Only used by native call.
+ * Isolate* isolate)
+ * The call is performed by NativeRegExpMacroAssembler::Execute()
+ * (in regexp-macro-assembler.cc) via the CALL_GENERATED_REGEXP_CODE macro
+ * in arm64/simulator-arm64.h.
+ * When calling as a non-direct call (i.e., from C++ code), the return address
+ * area is overwritten with the LR register by the RegExp code. When doing a
+ * direct call from generated code, the return address is placed there by
+ * the calling code, as in a normal exit frame.
+ */
+
+#define __ ACCESS_MASM(masm_)
+
+RegExpMacroAssemblerARM64::RegExpMacroAssemblerARM64(
+ Mode mode,
+ int registers_to_save,
+ Zone* zone)
+ : NativeRegExpMacroAssembler(zone),
+ masm_(new MacroAssembler(zone->isolate(), NULL, kRegExpCodeSize)),
+ mode_(mode),
+ num_registers_(registers_to_save),
+ num_saved_registers_(registers_to_save),
+ entry_label_(),
+ start_label_(),
+ success_label_(),
+ backtrack_label_(),
+ exit_label_() {
+ __ SetStackPointer(csp);
+ ASSERT_EQ(0, registers_to_save % 2);
+ // We can cache at most 16 W registers in x0-x7.
+ STATIC_ASSERT(kNumCachedRegisters <= 16);
+ STATIC_ASSERT((kNumCachedRegisters % 2) == 0);
+ __ B(&entry_label_); // We'll write the entry code later.
+ __ Bind(&start_label_); // And then continue from here.
+}
+
+
+RegExpMacroAssemblerARM64::~RegExpMacroAssemblerARM64() {
+ delete masm_;
+ // Unuse labels in case we throw away the assembler without calling GetCode.
+ entry_label_.Unuse();
+ start_label_.Unuse();
+ success_label_.Unuse();
+ backtrack_label_.Unuse();
+ exit_label_.Unuse();
+ check_preempt_label_.Unuse();
+ stack_overflow_label_.Unuse();
+}
+
+int RegExpMacroAssemblerARM64::stack_limit_slack() {
+ return RegExpStack::kStackLimitSlack;
+}
+
+
+void RegExpMacroAssemblerARM64::AdvanceCurrentPosition(int by) {
+ if (by != 0) {
+ __ Add(current_input_offset(),
+ current_input_offset(), by * char_size());
+ }
+}
+
+
+void RegExpMacroAssemblerARM64::AdvanceRegister(int reg, int by) {
+ ASSERT((reg >= 0) && (reg < num_registers_));
+ if (by != 0) {
+ Register to_advance;
+ RegisterState register_state = GetRegisterState(reg);
+ switch (register_state) {
+ case STACKED:
+ __ Ldr(w10, register_location(reg));
+ __ Add(w10, w10, by);
+ __ Str(w10, register_location(reg));
+ break;
+ case CACHED_LSW:
+ to_advance = GetCachedRegister(reg);
+ __ Add(to_advance, to_advance, by);
+ break;
+ case CACHED_MSW:
+ to_advance = GetCachedRegister(reg);
+ __ Add(to_advance, to_advance,
+ static_cast<int64_t>(by) << kWRegSizeInBits);
+ break;
+ default:
+ UNREACHABLE();
+ break;
+ }
+ }
+}
+
+
+void RegExpMacroAssemblerARM64::Backtrack() {
+ CheckPreemption();
+ Pop(w10);
+ __ Add(x10, code_pointer(), Operand(w10, UXTW));
+ __ Br(x10);
+}
+
+
+void RegExpMacroAssemblerARM64::Bind(Label* label) {
+ __ Bind(label);
+}
+
+
+void RegExpMacroAssemblerARM64::CheckCharacter(uint32_t c, Label* on_equal) {
+ CompareAndBranchOrBacktrack(current_character(), c, eq, on_equal);
+}
+
+
+void RegExpMacroAssemblerARM64::CheckCharacterGT(uc16 limit,
+ Label* on_greater) {
+ CompareAndBranchOrBacktrack(current_character(), limit, hi, on_greater);
+}
+
+
+void RegExpMacroAssemblerARM64::CheckAtStart(Label* on_at_start) {
+ Label not_at_start;
+ // Did we start the match at the start of the input string?
+ CompareAndBranchOrBacktrack(start_offset(), 0, ne, &not_at_start);
+ // If we did, are we still at the start of the input string?
+ __ Add(x10, input_end(), Operand(current_input_offset(), SXTW));
+ __ Cmp(x10, input_start());
+ BranchOrBacktrack(eq, on_at_start);
+ __ Bind(&not_at_start);
+}
+
+
+void RegExpMacroAssemblerARM64::CheckNotAtStart(Label* on_not_at_start) {
+ // Did we start the match at the start of the input string?
+ CompareAndBranchOrBacktrack(start_offset(), 0, ne, on_not_at_start);
+ // If we did, are we still at the start of the input string?
+ __ Add(x10, input_end(), Operand(current_input_offset(), SXTW));
+ __ Cmp(x10, input_start());
+ BranchOrBacktrack(ne, on_not_at_start);
+}
+
+
+void RegExpMacroAssemblerARM64::CheckCharacterLT(uc16 limit, Label* on_less) {
+ CompareAndBranchOrBacktrack(current_character(), limit, lo, on_less);
+}
+
+
+void RegExpMacroAssemblerARM64::CheckCharacters(Vector<const uc16> str,
+ int cp_offset,
+ Label* on_failure,
+ bool check_end_of_string) {
+ // This method is only ever called from the cctests.
+
+ if (check_end_of_string) {
+ // Is last character of required match inside string.
+ CheckPosition(cp_offset + str.length() - 1, on_failure);
+ }
+
+ Register characters_address = x11;
+
+ __ Add(characters_address,
+ input_end(),
+ Operand(current_input_offset(), SXTW));
+ if (cp_offset != 0) {
+ __ Add(characters_address, characters_address, cp_offset * char_size());
+ }
+
+ for (int i = 0; i < str.length(); i++) {
+ if (mode_ == ASCII) {
+ __ Ldrb(w10, MemOperand(characters_address, 1, PostIndex));
+ ASSERT(str[i] <= String::kMaxOneByteCharCode);
+ } else {
+ __ Ldrh(w10, MemOperand(characters_address, 2, PostIndex));
+ }
+ CompareAndBranchOrBacktrack(w10, str[i], ne, on_failure);
+ }
+}
+
+
+void RegExpMacroAssemblerARM64::CheckGreedyLoop(Label* on_equal) {
+ __ Ldr(w10, MemOperand(backtrack_stackpointer()));
+ __ Cmp(current_input_offset(), w10);
+ __ Cset(x11, eq);
+ __ Add(backtrack_stackpointer(),
+ backtrack_stackpointer(), Operand(x11, LSL, kWRegSizeLog2));
+ BranchOrBacktrack(eq, on_equal);
+}
+
+void RegExpMacroAssemblerARM64::CheckNotBackReferenceIgnoreCase(
+ int start_reg,
+ Label* on_no_match) {
+ Label fallthrough;
+
+ Register capture_start_offset = w10;
+ // Save the capture length in a callee-saved register so it will
+ // be preserved if we call a C helper.
+ Register capture_length = w19;
+ ASSERT(kCalleeSaved.IncludesAliasOf(capture_length));
+
+ // Find length of back-referenced capture.
+ ASSERT((start_reg % 2) == 0);
+ if (start_reg < kNumCachedRegisters) {
+ __ Mov(capture_start_offset.X(), GetCachedRegister(start_reg));
+ __ Lsr(x11, GetCachedRegister(start_reg), kWRegSizeInBits);
+ } else {
+ __ Ldp(w11, capture_start_offset, capture_location(start_reg, x10));
+ }
+ __ Sub(capture_length, w11, capture_start_offset); // Length to check.
+ // Succeed on empty capture (including no capture).
+ __ Cbz(capture_length, &fallthrough);
+
+ // Check that there are enough characters left in the input.
+ __ Cmn(capture_length, current_input_offset());
+ BranchOrBacktrack(gt, on_no_match);
+
+ if (mode_ == ASCII) {
+ Label success;
+ Label fail;
+ Label loop_check;
+
+ Register capture_start_address = x12;
+ Register capture_end_addresss = x13;
+ Register current_position_address = x14;
+
+ __ Add(capture_start_address,
+ input_end(),
+ Operand(capture_start_offset, SXTW));
+ __ Add(capture_end_addresss,
+ capture_start_address,
+ Operand(capture_length, SXTW));
+ __ Add(current_position_address,
+ input_end(),
+ Operand(current_input_offset(), SXTW));
+
+ Label loop;
+ __ Bind(&loop);
+ __ Ldrb(w10, MemOperand(capture_start_address, 1, PostIndex));
+ __ Ldrb(w11, MemOperand(current_position_address, 1, PostIndex));
+ __ Cmp(w10, w11);
+ __ B(eq, &loop_check);
+
+ // Mismatch, try case-insensitive match (converting letters to lower-case).
+ __ Orr(w10, w10, 0x20); // Convert capture character to lower-case.
+ __ Orr(w11, w11, 0x20); // Also convert input character.
+ __ Cmp(w11, w10);
+ __ B(ne, &fail);
+ __ Sub(w10, w10, 'a');
+ __ Cmp(w10, 'z' - 'a'); // Is w10 a lowercase letter?
+ __ B(ls, &loop_check); // In range 'a'-'z'.
+ // Latin-1: Check for values in range [224,254] but not 247.
+ __ Sub(w10, w10, 224 - 'a');
+ __ Cmp(w10, 254 - 224);
+ __ Ccmp(w10, 247 - 224, ZFlag, ls); // Check for 247.
+ __ B(eq, &fail); // Weren't Latin-1 letters.
+
+ __ Bind(&loop_check);
+ __ Cmp(capture_start_address, capture_end_addresss);
+ __ B(lt, &loop);
+ __ B(&success);
+
+ __ Bind(&fail);
+ BranchOrBacktrack(al, on_no_match);
+
+ __ Bind(&success);
+ // Compute new value of character position after the matched part.
+ __ Sub(current_input_offset().X(), current_position_address, input_end());
+ if (masm_->emit_debug_code()) {
+ __ Cmp(current_input_offset().X(), Operand(current_input_offset(), SXTW));
+ __ Ccmp(current_input_offset(), 0, NoFlag, eq);
+ // The current input offset should be <= 0, and fit in a W register.
+ __ Check(le, kOffsetOutOfRange);
+ }
+ } else {
+ ASSERT(mode_ == UC16);
+ int argument_count = 4;
+
+ // The cached registers need to be retained.
+ CPURegList cached_registers(CPURegister::kRegister, kXRegSizeInBits, 0, 7);
+ ASSERT((cached_registers.Count() * 2) == kNumCachedRegisters);
+ __ PushCPURegList(cached_registers);
+
+ // Put arguments into arguments registers.
+ // Parameters are
+ // x0: Address byte_offset1 - Address captured substring's start.
+ // x1: Address byte_offset2 - Address of current character position.
+ // w2: size_t byte_length - length of capture in bytes(!)
+ // x3: Isolate* isolate
+
+ // Address of start of capture.
+ __ Add(x0, input_end(), Operand(capture_start_offset, SXTW));
+ // Length of capture.
+ __ Mov(w2, capture_length);
+ // Address of current input position.
+ __ Add(x1, input_end(), Operand(current_input_offset(), SXTW));
+ // Isolate.
+ __ Mov(x3, ExternalReference::isolate_address(isolate()));
+
+ {
+ AllowExternalCallThatCantCauseGC scope(masm_);
+ ExternalReference function =
+ ExternalReference::re_case_insensitive_compare_uc16(isolate());
+ __ CallCFunction(function, argument_count);
+ }
+
+ // Check if function returned non-zero for success or zero for failure.
+ CompareAndBranchOrBacktrack(x0, 0, eq, on_no_match);
+ // On success, increment position by length of capture.
+ __ Add(current_input_offset(), current_input_offset(), capture_length);
+ // Reset the cached registers.
+ __ PopCPURegList(cached_registers);
+ }
+
+ __ Bind(&fallthrough);
+}
+
+void RegExpMacroAssemblerARM64::CheckNotBackReference(
+ int start_reg,
+ Label* on_no_match) {
+ Label fallthrough;
+
+ Register capture_start_address = x12;
+ Register capture_end_address = x13;
+ Register current_position_address = x14;
+ Register capture_length = w15;
+
+ // Find length of back-referenced capture.
+ ASSERT((start_reg % 2) == 0);
+ if (start_reg < kNumCachedRegisters) {
+ __ Mov(x10, GetCachedRegister(start_reg));
+ __ Lsr(x11, GetCachedRegister(start_reg), kWRegSizeInBits);
+ } else {
+ __ Ldp(w11, w10, capture_location(start_reg, x10));
+ }
+ __ Sub(capture_length, w11, w10); // Length to check.
+ // Succeed on empty capture (including no capture).
+ __ Cbz(capture_length, &fallthrough);
+
+ // Check that there are enough characters left in the input.
+ __ Cmn(capture_length, current_input_offset());
+ BranchOrBacktrack(gt, on_no_match);
+
+ // Compute pointers to match string and capture string
+ __ Add(capture_start_address, input_end(), Operand(w10, SXTW));
+ __ Add(capture_end_address,
+ capture_start_address,
+ Operand(capture_length, SXTW));
+ __ Add(current_position_address,
+ input_end(),
+ Operand(current_input_offset(), SXTW));
+
+ Label loop;
+ __ Bind(&loop);
+ if (mode_ == ASCII) {
+ __ Ldrb(w10, MemOperand(capture_start_address, 1, PostIndex));
+ __ Ldrb(w11, MemOperand(current_position_address, 1, PostIndex));
+ } else {
+ ASSERT(mode_ == UC16);
+ __ Ldrh(w10, MemOperand(capture_start_address, 2, PostIndex));
+ __ Ldrh(w11, MemOperand(current_position_address, 2, PostIndex));
+ }
+ __ Cmp(w10, w11);
+ BranchOrBacktrack(ne, on_no_match);
+ __ Cmp(capture_start_address, capture_end_address);
+ __ B(lt, &loop);
+
+ // Move current character position to position after match.
+ __ Sub(current_input_offset().X(), current_position_address, input_end());
+ if (masm_->emit_debug_code()) {
+ __ Cmp(current_input_offset().X(), Operand(current_input_offset(), SXTW));
+ __ Ccmp(current_input_offset(), 0, NoFlag, eq);
+ // The current input offset should be <= 0, and fit in a W register.
+ __ Check(le, kOffsetOutOfRange);
+ }
+ __ Bind(&fallthrough);
+}
+
+
+void RegExpMacroAssemblerARM64::CheckNotCharacter(unsigned c,
+ Label* on_not_equal) {
+ CompareAndBranchOrBacktrack(current_character(), c, ne, on_not_equal);
+}
+
+
+void RegExpMacroAssemblerARM64::CheckCharacterAfterAnd(uint32_t c,
+ uint32_t mask,
+ Label* on_equal) {
+ __ And(w10, current_character(), mask);
+ CompareAndBranchOrBacktrack(w10, c, eq, on_equal);
+}
+
+
+void RegExpMacroAssemblerARM64::CheckNotCharacterAfterAnd(unsigned c,
+ unsigned mask,
+ Label* on_not_equal) {
+ __ And(w10, current_character(), mask);
+ CompareAndBranchOrBacktrack(w10, c, ne, on_not_equal);
+}
+
+
+void RegExpMacroAssemblerARM64::CheckNotCharacterAfterMinusAnd(
+ uc16 c,
+ uc16 minus,
+ uc16 mask,
+ Label* on_not_equal) {
+ ASSERT(minus < String::kMaxUtf16CodeUnit);
+ __ Sub(w10, current_character(), minus);
+ __ And(w10, w10, mask);
+ CompareAndBranchOrBacktrack(w10, c, ne, on_not_equal);
+}
+
+
+void RegExpMacroAssemblerARM64::CheckCharacterInRange(
+ uc16 from,
+ uc16 to,
+ Label* on_in_range) {
+ __ Sub(w10, current_character(), from);
+ // Unsigned lower-or-same condition.
+ CompareAndBranchOrBacktrack(w10, to - from, ls, on_in_range);
+}
+
+
+void RegExpMacroAssemblerARM64::CheckCharacterNotInRange(
+ uc16 from,
+ uc16 to,
+ Label* on_not_in_range) {
+ __ Sub(w10, current_character(), from);
+ // Unsigned higher condition.
+ CompareAndBranchOrBacktrack(w10, to - from, hi, on_not_in_range);
+}
+
+
+void RegExpMacroAssemblerARM64::CheckBitInTable(
+ Handle<ByteArray> table,
+ Label* on_bit_set) {
+ __ Mov(x11, Operand(table));
+ if ((mode_ != ASCII) || (kTableMask != String::kMaxOneByteCharCode)) {
+ __ And(w10, current_character(), kTableMask);
+ __ Add(w10, w10, ByteArray::kHeaderSize - kHeapObjectTag);
+ } else {
+ __ Add(w10, current_character(), ByteArray::kHeaderSize - kHeapObjectTag);
+ }
+ __ Ldrb(w11, MemOperand(x11, w10, UXTW));
+ CompareAndBranchOrBacktrack(w11, 0, ne, on_bit_set);
+}
+
+
+bool RegExpMacroAssemblerARM64::CheckSpecialCharacterClass(uc16 type,
+ Label* on_no_match) {
+ // Range checks (c in min..max) are generally implemented by an unsigned
+ // (c - min) <= (max - min) check
+ switch (type) {
+ case 's':
+ // Match space-characters
+ if (mode_ == ASCII) {
+ // One byte space characters are '\t'..'\r', ' ' and \u00a0.
+ Label success;
+ // Check for ' ' or 0x00a0.
+ __ Cmp(current_character(), ' ');
+ __ Ccmp(current_character(), 0x00a0, ZFlag, ne);
+ __ B(eq, &success);
+ // Check range 0x09..0x0d.
+ __ Sub(w10, current_character(), '\t');
+ CompareAndBranchOrBacktrack(w10, '\r' - '\t', hi, on_no_match);
+ __ Bind(&success);
+ return true;
+ }
+ return false;
+ case 'S':
+ // The emitted code for generic character classes is good enough.
+ return false;
+ case 'd':
+ // Match ASCII digits ('0'..'9').
+ __ Sub(w10, current_character(), '0');
+ CompareAndBranchOrBacktrack(w10, '9' - '0', hi, on_no_match);
+ return true;
+ case 'D':
+ // Match ASCII non-digits.
+ __ Sub(w10, current_character(), '0');
+ CompareAndBranchOrBacktrack(w10, '9' - '0', ls, on_no_match);
+ return true;
+ case '.': {
+ // Match non-newlines (not 0x0a('\n'), 0x0d('\r'), 0x2028 and 0x2029)
+ // Here we emit the conditional branch only once at the end to make branch
+ // prediction more efficient, even though we could branch out of here
+ // as soon as a character matches.
+ __ Cmp(current_character(), 0x0a);
+ __ Ccmp(current_character(), 0x0d, ZFlag, ne);
+ if (mode_ == UC16) {
+ __ Sub(w10, current_character(), 0x2028);
+ // If the Z flag was set we clear the flags to force a branch.
+ __ Ccmp(w10, 0x2029 - 0x2028, NoFlag, ne);
+ // ls -> !((C==1) && (Z==0))
+ BranchOrBacktrack(ls, on_no_match);
+ } else {
+ BranchOrBacktrack(eq, on_no_match);
+ }
+ return true;
+ }
+ case 'n': {
+ // Match newlines (0x0a('\n'), 0x0d('\r'), 0x2028 and 0x2029)
+ // We have to check all 4 newline characters before emitting
+ // the conditional branch.
+ __ Cmp(current_character(), 0x0a);
+ __ Ccmp(current_character(), 0x0d, ZFlag, ne);
+ if (mode_ == UC16) {
+ __ Sub(w10, current_character(), 0x2028);
+ // If the Z flag was set we clear the flags to force a fall-through.
+ __ Ccmp(w10, 0x2029 - 0x2028, NoFlag, ne);
+ // hi -> (C==1) && (Z==0)
+ BranchOrBacktrack(hi, on_no_match);
+ } else {
+ BranchOrBacktrack(ne, on_no_match);
+ }
+ return true;
+ }
+ case 'w': {
+ if (mode_ != ASCII) {
+ // Table is 128 entries, so all ASCII characters can be tested.
+ CompareAndBranchOrBacktrack(current_character(), 'z', hi, on_no_match);
+ }
+ ExternalReference map = ExternalReference::re_word_character_map();
+ __ Mov(x10, map);
+ __ Ldrb(w10, MemOperand(x10, current_character(), UXTW));
+ CompareAndBranchOrBacktrack(w10, 0, eq, on_no_match);
+ return true;
+ }
+ case 'W': {
+ Label done;
+ if (mode_ != ASCII) {
+ // Table is 128 entries, so all ASCII characters can be tested.
+ __ Cmp(current_character(), 'z');
+ __ B(hi, &done);
+ }
+ ExternalReference map = ExternalReference::re_word_character_map();
+ __ Mov(x10, map);
+ __ Ldrb(w10, MemOperand(x10, current_character(), UXTW));
+ CompareAndBranchOrBacktrack(w10, 0, ne, on_no_match);
+ __ Bind(&done);
+ return true;
+ }
+ case '*':
+ // Match any character.
+ return true;
+ // No custom implementation (yet): s(UC16), S(UC16).
+ default:
+ return false;
+ }
+}
+
+
+void RegExpMacroAssemblerARM64::Fail() {
+ __ Mov(w0, FAILURE);
+ __ B(&exit_label_);
+}
+
+
+Handle<HeapObject> RegExpMacroAssemblerARM64::GetCode(Handle<String> source) {
+ Label return_w0;
+ // Finalize code - write the entry point code now we know how many
+ // registers we need.
+
+ // Entry code:
+ __ Bind(&entry_label_);
+
+ // Arguments on entry:
+ // x0: String* input
+ // x1: int start_offset
+ // x2: byte* input_start
+ // x3: byte* input_end
+ // x4: int* output array
+ // x5: int output array size
+ // x6: Address stack_base
+ // x7: int direct_call
+
+ // The stack pointer should be csp on entry.
+ // csp[8]: address of the current isolate
+ // csp[0]: secondary link/return address used by native call
+
+ // Tell the system that we have a stack frame. Because the type is MANUAL, no
+ // code is generated.
+ FrameScope scope(masm_, StackFrame::MANUAL);
+
+ // Push registers on the stack, only push the argument registers that we need.
+ CPURegList argument_registers(x0, x5, x6, x7);
+
+ CPURegList registers_to_retain = kCalleeSaved;
+ ASSERT(kCalleeSaved.Count() == 11);
+ registers_to_retain.Combine(lr);
+
+ ASSERT(csp.Is(__ StackPointer()));
+ __ PushCPURegList(registers_to_retain);
+ __ PushCPURegList(argument_registers);
+
+ // Set frame pointer in place.
+ __ Add(frame_pointer(), csp, argument_registers.Count() * kPointerSize);
+
+ // Initialize callee-saved registers.
+ __ Mov(start_offset(), w1);
+ __ Mov(input_start(), x2);
+ __ Mov(input_end(), x3);
+ __ Mov(output_array(), x4);
+
+ // Set the number of registers we will need to allocate, that is:
+ // - success_counter (X register)
+ // - (num_registers_ - kNumCachedRegisters) (W registers)
+ int num_wreg_to_allocate = num_registers_ - kNumCachedRegisters;
+ // Do not allocate registers on the stack if they can all be cached.
+ if (num_wreg_to_allocate < 0) { num_wreg_to_allocate = 0; }
+ // Make room for the success_counter.
+ num_wreg_to_allocate += 2;
+
+ // Make sure the stack alignment will be respected.
+ int alignment = masm_->ActivationFrameAlignment();
+ ASSERT_EQ(alignment % 16, 0);
+ int align_mask = (alignment / kWRegSize) - 1;
+ num_wreg_to_allocate = (num_wreg_to_allocate + align_mask) & ~align_mask;
+
+ // Check if we have space on the stack.
+ Label stack_limit_hit;
+ Label stack_ok;
+
+ ExternalReference stack_limit =
+ ExternalReference::address_of_stack_limit(isolate());
+ __ Mov(x10, stack_limit);
+ __ Ldr(x10, MemOperand(x10));
+ __ Subs(x10, csp, x10);
+
+ // Handle it if the stack pointer is already below the stack limit.
+ __ B(ls, &stack_limit_hit);
+
+ // Check if there is room for the variable number of registers above
+ // the stack limit.
+ __ Cmp(x10, num_wreg_to_allocate * kWRegSize);
+ __ B(hs, &stack_ok);
+
+ // Exit with OutOfMemory exception. There is not enough space on the stack
+ // for our working registers.
+ __ Mov(w0, EXCEPTION);
+ __ B(&return_w0);
+
+ __ Bind(&stack_limit_hit);
+ CallCheckStackGuardState(x10);
+ // If returned value is non-zero, we exit with the returned value as result.
+ __ Cbnz(w0, &return_w0);
+
+ __ Bind(&stack_ok);
+
+ // Allocate space on stack.
+ __ Claim(num_wreg_to_allocate, kWRegSize);
+
+ // Initialize success_counter with 0.
+ __ Str(wzr, MemOperand(frame_pointer(), kSuccessCounter));
+
+ // Find negative length (offset of start relative to end).
+ __ Sub(x10, input_start(), input_end());
+ if (masm_->emit_debug_code()) {
+ // Check that the input string length is < 2^30.
+ __ Neg(x11, x10);
+ __ Cmp(x11, (1<<30) - 1);
+ __ Check(ls, kInputStringTooLong);
+ }
+ __ Mov(current_input_offset(), w10);
+
+ // The non-position value is used as a clearing value for the
+ // capture registers, it corresponds to the position of the first character
+ // minus one.
+ __ Sub(non_position_value(), current_input_offset(), char_size());
+ __ Sub(non_position_value(), non_position_value(),
+ Operand(start_offset(), LSL, (mode_ == UC16) ? 1 : 0));
+ // We can store this value twice in an X register for initializing
+ // on-stack registers later.
+ __ Orr(twice_non_position_value(),
+ non_position_value().X(),
+ Operand(non_position_value().X(), LSL, kWRegSizeInBits));
+
+ // Initialize code pointer register.
+ __ Mov(code_pointer(), Operand(masm_->CodeObject()));
+
+ Label load_char_start_regexp, start_regexp;
+ // Load newline if index is at start, previous character otherwise.
+ __ Cbnz(start_offset(), &load_char_start_regexp);
+ __ Mov(current_character(), '\n');
+ __ B(&start_regexp);
+
+ // Global regexp restarts matching here.
+ __ Bind(&load_char_start_regexp);
+ // Load previous char as initial value of current character register.
+ LoadCurrentCharacterUnchecked(-1, 1);
+ __ Bind(&start_regexp);
+ // Initialize on-stack registers.
+ if (num_saved_registers_ > 0) {
+ ClearRegisters(0, num_saved_registers_ - 1);
+ }
+
+ // Initialize backtrack stack pointer.
+ __ Ldr(backtrack_stackpointer(), MemOperand(frame_pointer(), kStackBase));
+
+ // Execute
+ __ B(&start_label_);
+
+ if (backtrack_label_.is_linked()) {
+ __ Bind(&backtrack_label_);
+ Backtrack();
+ }
+
+ if (success_label_.is_linked()) {
+ Register first_capture_start = w15;
+
+ // Save captures when successful.
+ __ Bind(&success_label_);
+
+ if (num_saved_registers_ > 0) {
+ // V8 expects the output to be an int32_t array.
+ Register capture_start = w12;
+ Register capture_end = w13;
+ Register input_length = w14;
+
+ // Copy captures to output.
+
+ // Get string length.
+ __ Sub(x10, input_end(), input_start());
+ if (masm_->emit_debug_code()) {
+ // Check that the input string length is < 2^30.
+ __ Cmp(x10, (1<<30) - 1);
+ __ Check(ls, kInputStringTooLong);
+ }
+ // input_start has a start_offset offset on entry. We need to include
+ // it when computing the length of the whole string.
+ if (mode_ == UC16) {
+ __ Add(input_length, start_offset(), Operand(w10, LSR, 1));
+ } else {
+ __ Add(input_length, start_offset(), w10);
+ }
+
+ // Copy the results to the output array from the cached registers first.
+ for (int i = 0;
+ (i < num_saved_registers_) && (i < kNumCachedRegisters);
+ i += 2) {
+ __ Mov(capture_start.X(), GetCachedRegister(i));
+ __ Lsr(capture_end.X(), capture_start.X(), kWRegSizeInBits);
+ if ((i == 0) && global_with_zero_length_check()) {
+ // Keep capture start for the zero-length check later.
+ __ Mov(first_capture_start, capture_start);
+ }
+ // Offsets need to be relative to the start of the string.
+ if (mode_ == UC16) {
+ __ Add(capture_start, input_length, Operand(capture_start, ASR, 1));
+ __ Add(capture_end, input_length, Operand(capture_end, ASR, 1));
+ } else {
+ __ Add(capture_start, input_length, capture_start);
+ __ Add(capture_end, input_length, capture_end);
+ }
+ // The output pointer advances for a possible global match.
+ __ Stp(capture_start,
+ capture_end,
+ MemOperand(output_array(), kPointerSize, PostIndex));
+ }
+
+ // Only carry on if there are more than kNumCachedRegisters capture
+ // registers.
+ int num_registers_left_on_stack =
+ num_saved_registers_ - kNumCachedRegisters;
+ if (num_registers_left_on_stack > 0) {
+ Register base = x10;
+ // There are always an even number of capture registers. A couple of
+ // registers determine one match with two offsets.
+ ASSERT_EQ(0, num_registers_left_on_stack % 2);
+ __ Add(base, frame_pointer(), kFirstCaptureOnStack);
+
+ // We can unroll the loop here, we should not unroll for less than 2
+ // registers.
+ STATIC_ASSERT(kNumRegistersToUnroll > 2);
+ if (num_registers_left_on_stack <= kNumRegistersToUnroll) {
+ for (int i = 0; i < num_registers_left_on_stack / 2; i++) {
+ __ Ldp(capture_end,
+ capture_start,
+ MemOperand(base, -kPointerSize, PostIndex));
+ if ((i == 0) && global_with_zero_length_check()) {
+ // Keep capture start for the zero-length check later.
+ __ Mov(first_capture_start, capture_start);
+ }
+ // Offsets need to be relative to the start of the string.
+ if (mode_ == UC16) {
+ __ Add(capture_start,
+ input_length,
+ Operand(capture_start, ASR, 1));
+ __ Add(capture_end, input_length, Operand(capture_end, ASR, 1));
+ } else {
+ __ Add(capture_start, input_length, capture_start);
+ __ Add(capture_end, input_length, capture_end);
+ }
+ // The output pointer advances for a possible global match.
+ __ Stp(capture_start,
+ capture_end,
+ MemOperand(output_array(), kPointerSize, PostIndex));
+ }
+ } else {
+ Label loop, start;
+ __ Mov(x11, num_registers_left_on_stack);
+
+ __ Ldp(capture_end,
+ capture_start,
+ MemOperand(base, -kPointerSize, PostIndex));
+ if (global_with_zero_length_check()) {
+ __ Mov(first_capture_start, capture_start);
+ }
+ __ B(&start);
+
+ __ Bind(&loop);
+ __ Ldp(capture_end,
+ capture_start,
+ MemOperand(base, -kPointerSize, PostIndex));
+ __ Bind(&start);
+ if (mode_ == UC16) {
+ __ Add(capture_start, input_length, Operand(capture_start, ASR, 1));
+ __ Add(capture_end, input_length, Operand(capture_end, ASR, 1));
+ } else {
+ __ Add(capture_start, input_length, capture_start);
+ __ Add(capture_end, input_length, capture_end);
+ }
+ // The output pointer advances for a possible global match.
+ __ Stp(capture_start,
+ capture_end,
+ MemOperand(output_array(), kPointerSize, PostIndex));
+ __ Sub(x11, x11, 2);
+ __ Cbnz(x11, &loop);
+ }
+ }
+ }
+
+ if (global()) {
+ Register success_counter = w0;
+ Register output_size = x10;
+ // Restart matching if the regular expression is flagged as global.
+
+ // Increment success counter.
+ __ Ldr(success_counter, MemOperand(frame_pointer(), kSuccessCounter));
+ __ Add(success_counter, success_counter, 1);
+ __ Str(success_counter, MemOperand(frame_pointer(), kSuccessCounter));
+
+ // Capture results have been stored, so the number of remaining global
+ // output registers is reduced by the number of stored captures.
+ __ Ldr(output_size, MemOperand(frame_pointer(), kOutputSize));
+ __ Sub(output_size, output_size, num_saved_registers_);
+ // Check whether we have enough room for another set of capture results.
+ __ Cmp(output_size, num_saved_registers_);
+ __ B(lt, &return_w0);
+
+ // The output pointer is already set to the next field in the output
+ // array.
+ // Update output size on the frame before we restart matching.
+ __ Str(output_size, MemOperand(frame_pointer(), kOutputSize));
+
+ if (global_with_zero_length_check()) {
+ // Special case for zero-length matches.
+ __ Cmp(current_input_offset(), first_capture_start);
+ // Not a zero-length match, restart.
+ __ B(ne, &load_char_start_regexp);
+ // Offset from the end is zero if we already reached the end.
+ __ Cbz(current_input_offset(), &return_w0);
+ // Advance current position after a zero-length match.
+ __ Add(current_input_offset(),
+ current_input_offset(),
+ Operand((mode_ == UC16) ? 2 : 1));
+ }
+
+ __ B(&load_char_start_regexp);
+ } else {
+ __ Mov(w0, SUCCESS);
+ }
+ }
+
+ if (exit_label_.is_linked()) {
+ // Exit and return w0
+ __ Bind(&exit_label_);
+ if (global()) {
+ __ Ldr(w0, MemOperand(frame_pointer(), kSuccessCounter));
+ }
+ }
+
+ __ Bind(&return_w0);
+
+ // Set stack pointer back to first register to retain
+ ASSERT(csp.Is(__ StackPointer()));
+ __ Mov(csp, fp);
+ __ AssertStackConsistency();
+
+ // Restore registers.
+ __ PopCPURegList(registers_to_retain);
+
+ __ Ret();
+
+ Label exit_with_exception;
+ // Registers x0 to x7 are used to store the first captures, they need to be
+ // retained over calls to C++ code.
+ CPURegList cached_registers(CPURegister::kRegister, kXRegSizeInBits, 0, 7);
+ ASSERT((cached_registers.Count() * 2) == kNumCachedRegisters);
+
+ if (check_preempt_label_.is_linked()) {
+ __ Bind(&check_preempt_label_);
+ SaveLinkRegister();
+ // The cached registers need to be retained.
+ __ PushCPURegList(cached_registers);
+ CallCheckStackGuardState(x10);
+ // Returning from the regexp code restores the stack (csp <- fp)
+ // so we don't need to drop the link register from it before exiting.
+ __ Cbnz(w0, &return_w0);
+ // Reset the cached registers.
+ __ PopCPURegList(cached_registers);
+ RestoreLinkRegister();
+ __ Ret();
+ }
+
+ if (stack_overflow_label_.is_linked()) {
+ __ Bind(&stack_overflow_label_);
+ SaveLinkRegister();
+ // The cached registers need to be retained.
+ __ PushCPURegList(cached_registers);
+ // Call GrowStack(backtrack_stackpointer(), &stack_base)
+ __ Mov(x2, ExternalReference::isolate_address(isolate()));
+ __ Add(x1, frame_pointer(), kStackBase);
+ __ Mov(x0, backtrack_stackpointer());
+ ExternalReference grow_stack =
+ ExternalReference::re_grow_stack(isolate());
+ __ CallCFunction(grow_stack, 3);
+ // If return NULL, we have failed to grow the stack, and
+ // must exit with a stack-overflow exception.
+ // Returning from the regexp code restores the stack (csp <- fp)
+ // so we don't need to drop the link register from it before exiting.
+ __ Cbz(w0, &exit_with_exception);
+ // Otherwise use return value as new stack pointer.
+ __ Mov(backtrack_stackpointer(), x0);
+ // Reset the cached registers.
+ __ PopCPURegList(cached_registers);
+ RestoreLinkRegister();
+ __ Ret();
+ }
+
+ if (exit_with_exception.is_linked()) {
+ __ Bind(&exit_with_exception);
+ __ Mov(w0, EXCEPTION);
+ __ B(&return_w0);
+ }
+
+ CodeDesc code_desc;
+ masm_->GetCode(&code_desc);
+ Handle<Code> code = isolate()->factory()->NewCode(
+ code_desc, Code::ComputeFlags(Code::REGEXP), masm_->CodeObject());
+ PROFILE(masm_->isolate(), RegExpCodeCreateEvent(*code, *source));
+ return Handle<HeapObject>::cast(code);
+}
+
+
+void RegExpMacroAssemblerARM64::GoTo(Label* to) {
+ BranchOrBacktrack(al, to);
+}
+
+void RegExpMacroAssemblerARM64::IfRegisterGE(int reg, int comparand,
+ Label* if_ge) {
+ Register to_compare = GetRegister(reg, w10);
+ CompareAndBranchOrBacktrack(to_compare, comparand, ge, if_ge);
+}
+
+
+void RegExpMacroAssemblerARM64::IfRegisterLT(int reg, int comparand,
+ Label* if_lt) {
+ Register to_compare = GetRegister(reg, w10);
+ CompareAndBranchOrBacktrack(to_compare, comparand, lt, if_lt);
+}
+
+
+void RegExpMacroAssemblerARM64::IfRegisterEqPos(int reg, Label* if_eq) {
+ Register to_compare = GetRegister(reg, w10);
+ __ Cmp(to_compare, current_input_offset());
+ BranchOrBacktrack(eq, if_eq);
+}
+
+RegExpMacroAssembler::IrregexpImplementation
+ RegExpMacroAssemblerARM64::Implementation() {
+ return kARM64Implementation;
+}
+
+
+void RegExpMacroAssemblerARM64::LoadCurrentCharacter(int cp_offset,
+ Label* on_end_of_input,
+ bool check_bounds,
+ int characters) {
+ // TODO(pielan): Make sure long strings are caught before this, and not
+ // just asserted in debug mode.
+ ASSERT(cp_offset >= -1); // ^ and \b can look behind one character.
+ // Be sane! (And ensure that an int32_t can be used to index the string)
+ ASSERT(cp_offset < (1<<30));
+ if (check_bounds) {
+ CheckPosition(cp_offset + characters - 1, on_end_of_input);
+ }
+ LoadCurrentCharacterUnchecked(cp_offset, characters);
+}
+
+
+void RegExpMacroAssemblerARM64::PopCurrentPosition() {
+ Pop(current_input_offset());
+}
+
+
+void RegExpMacroAssemblerARM64::PopRegister(int register_index) {
+ Pop(w10);
+ StoreRegister(register_index, w10);
+}
+
+
+void RegExpMacroAssemblerARM64::PushBacktrack(Label* label) {
+ if (label->is_bound()) {
+ int target = label->pos();
+ __ Mov(w10, target + Code::kHeaderSize - kHeapObjectTag);
+ } else {
+ __ Adr(x10, label, MacroAssembler::kAdrFar);
+ __ Sub(x10, x10, code_pointer());
+ if (masm_->emit_debug_code()) {
+ __ Cmp(x10, kWRegMask);
+ // The code offset has to fit in a W register.
+ __ Check(ls, kOffsetOutOfRange);
+ }
+ }
+ Push(w10);
+ CheckStackLimit();
+}
+
+
+void RegExpMacroAssemblerARM64::PushCurrentPosition() {
+ Push(current_input_offset());
+}
+
+
+void RegExpMacroAssemblerARM64::PushRegister(int register_index,
+ StackCheckFlag check_stack_limit) {
+ Register to_push = GetRegister(register_index, w10);
+ Push(to_push);
+ if (check_stack_limit) CheckStackLimit();
+}
+
+
+void RegExpMacroAssemblerARM64::ReadCurrentPositionFromRegister(int reg) {
+ Register cached_register;
+ RegisterState register_state = GetRegisterState(reg);
+ switch (register_state) {
+ case STACKED:
+ __ Ldr(current_input_offset(), register_location(reg));
+ break;
+ case CACHED_LSW:
+ cached_register = GetCachedRegister(reg);
+ __ Mov(current_input_offset(), cached_register.W());
+ break;
+ case CACHED_MSW:
+ cached_register = GetCachedRegister(reg);
+ __ Lsr(current_input_offset().X(), cached_register, kWRegSizeInBits);
+ break;
+ default:
+ UNREACHABLE();
+ break;
+ }
+}
+
+
+void RegExpMacroAssemblerARM64::ReadStackPointerFromRegister(int reg) {
+ Register read_from = GetRegister(reg, w10);
+ __ Ldr(x11, MemOperand(frame_pointer(), kStackBase));
+ __ Add(backtrack_stackpointer(), x11, Operand(read_from, SXTW));
+}
+
+
+void RegExpMacroAssemblerARM64::SetCurrentPositionFromEnd(int by) {
+ Label after_position;
+ __ Cmp(current_input_offset(), -by * char_size());
+ __ B(ge, &after_position);
+ __ Mov(current_input_offset(), -by * char_size());
+ // On RegExp code entry (where this operation is used), the character before
+ // the current position is expected to be already loaded.
+ // We have advanced the position, so it's safe to read backwards.
+ LoadCurrentCharacterUnchecked(-1, 1);
+ __ Bind(&after_position);
+}
+
+
+void RegExpMacroAssemblerARM64::SetRegister(int register_index, int to) {
+ ASSERT(register_index >= num_saved_registers_); // Reserved for positions!
+ Register set_to = wzr;
+ if (to != 0) {
+ set_to = w10;
+ __ Mov(set_to, to);
+ }
+ StoreRegister(register_index, set_to);
+}
+
+
+bool RegExpMacroAssemblerARM64::Succeed() {
+ __ B(&success_label_);
+ return global();
+}
+
+
+void RegExpMacroAssemblerARM64::WriteCurrentPositionToRegister(int reg,
+ int cp_offset) {
+ Register position = current_input_offset();
+ if (cp_offset != 0) {
+ position = w10;
+ __ Add(position, current_input_offset(), cp_offset * char_size());
+ }
+ StoreRegister(reg, position);
+}
+
+
+void RegExpMacroAssemblerARM64::ClearRegisters(int reg_from, int reg_to) {
+ ASSERT(reg_from <= reg_to);
+ int num_registers = reg_to - reg_from + 1;
+
+ // If the first capture register is cached in a hardware register but not
+ // aligned on a 64-bit one, we need to clear the first one specifically.
+ if ((reg_from < kNumCachedRegisters) && ((reg_from % 2) != 0)) {
+ StoreRegister(reg_from, non_position_value());
+ num_registers--;
+ reg_from++;
+ }
+
+ // Clear cached registers in pairs as far as possible.
+ while ((num_registers >= 2) && (reg_from < kNumCachedRegisters)) {
+ ASSERT(GetRegisterState(reg_from) == CACHED_LSW);
+ __ Mov(GetCachedRegister(reg_from), twice_non_position_value());
+ reg_from += 2;
+ num_registers -= 2;
+ }
+
+ if ((num_registers % 2) == 1) {
+ StoreRegister(reg_from, non_position_value());
+ num_registers--;
+ reg_from++;
+ }
+
+ if (num_registers > 0) {
+ // If there are some remaining registers, they are stored on the stack.
+ ASSERT(reg_from >= kNumCachedRegisters);
+
+ // Move down the indexes of the registers on stack to get the correct offset
+ // in memory.
+ reg_from -= kNumCachedRegisters;
+ reg_to -= kNumCachedRegisters;
+ // We should not unroll the loop for less than 2 registers.
+ STATIC_ASSERT(kNumRegistersToUnroll > 2);
+ // We position the base pointer to (reg_from + 1).
+ int base_offset = kFirstRegisterOnStack -
+ kWRegSize - (kWRegSize * reg_from);
+ if (num_registers > kNumRegistersToUnroll) {
+ Register base = x10;
+ __ Add(base, frame_pointer(), base_offset);
+
+ Label loop;
+ __ Mov(x11, num_registers);
+ __ Bind(&loop);
+ __ Str(twice_non_position_value(),
+ MemOperand(base, -kPointerSize, PostIndex));
+ __ Sub(x11, x11, 2);
+ __ Cbnz(x11, &loop);
+ } else {
+ for (int i = reg_from; i <= reg_to; i += 2) {
+ __ Str(twice_non_position_value(),
+ MemOperand(frame_pointer(), base_offset));
+ base_offset -= kWRegSize * 2;
+ }
+ }
+ }
+}
+
+
+void RegExpMacroAssemblerARM64::WriteStackPointerToRegister(int reg) {
+ __ Ldr(x10, MemOperand(frame_pointer(), kStackBase));
+ __ Sub(x10, backtrack_stackpointer(), x10);
+ if (masm_->emit_debug_code()) {
+ __ Cmp(x10, Operand(w10, SXTW));
+ // The stack offset needs to fit in a W register.
+ __ Check(eq, kOffsetOutOfRange);
+ }
+ StoreRegister(reg, w10);
+}
+
+
+// Helper function for reading a value out of a stack frame.
+template <typename T>
+static T& frame_entry(Address re_frame, int frame_offset) {
+ return *reinterpret_cast<T*>(re_frame + frame_offset);
+}
+
+
+int RegExpMacroAssemblerARM64::CheckStackGuardState(Address* return_address,
+ Code* re_code,
+ Address re_frame,
+ int start_offset,
+ const byte** input_start,
+ const byte** input_end) {
+ Isolate* isolate = frame_entry<Isolate*>(re_frame, kIsolate);
+ StackLimitCheck check(isolate);
+ if (check.JsHasOverflowed()) {
+ isolate->StackOverflow();
+ return EXCEPTION;
+ }
+
+ // If not real stack overflow the stack guard was used to interrupt
+ // execution for another purpose.
+
+ // If this is a direct call from JavaScript retry the RegExp forcing the call
+ // through the runtime system. Currently the direct call cannot handle a GC.
+ if (frame_entry<int>(re_frame, kDirectCall) == 1) {
+ return RETRY;
+ }
+
+ // Prepare for possible GC.
+ HandleScope handles(isolate);
+ Handle<Code> code_handle(re_code);
+
+ Handle<String> subject(frame_entry<String*>(re_frame, kInput));
+
+ // Current string.
+ bool is_ascii = subject->IsOneByteRepresentationUnderneath();
+
+ ASSERT(re_code->instruction_start() <= *return_address);
+ ASSERT(*return_address <=
+ re_code->instruction_start() + re_code->instruction_size());
+
+ Object* result = isolate->stack_guard()->HandleInterrupts();
+
+ if (*code_handle != re_code) { // Return address no longer valid
+ int delta = code_handle->address() - re_code->address();
+ // Overwrite the return address on the stack.
+ *return_address += delta;
+ }
+
+ if (result->IsException()) {
+ return EXCEPTION;
+ }
+
+ Handle<String> subject_tmp = subject;
+ int slice_offset = 0;
+
+ // Extract the underlying string and the slice offset.
+ if (StringShape(*subject_tmp).IsCons()) {
+ subject_tmp = Handle<String>(ConsString::cast(*subject_tmp)->first());
+ } else if (StringShape(*subject_tmp).IsSliced()) {
+ SlicedString* slice = SlicedString::cast(*subject_tmp);
+ subject_tmp = Handle<String>(slice->parent());
+ slice_offset = slice->offset();
+ }
+
+ // String might have changed.
+ if (subject_tmp->IsOneByteRepresentation() != is_ascii) {
+ // If we changed between an ASCII and an UC16 string, the specialized
+ // code cannot be used, and we need to restart regexp matching from
+ // scratch (including, potentially, compiling a new version of the code).
+ return RETRY;
+ }
+
+ // Otherwise, the content of the string might have moved. It must still
+ // be a sequential or external string with the same content.
+ // Update the start and end pointers in the stack frame to the current
+ // location (whether it has actually moved or not).
+ ASSERT(StringShape(*subject_tmp).IsSequential() ||
+ StringShape(*subject_tmp).IsExternal());
+
+ // The original start address of the characters to match.
+ const byte* start_address = *input_start;
+
+ // Find the current start address of the same character at the current string
+ // position.
+ const byte* new_address = StringCharacterPosition(*subject_tmp,
+ start_offset + slice_offset);
+
+ if (start_address != new_address) {
+ // If there is a difference, update the object pointer and start and end
+ // addresses in the RegExp stack frame to match the new value.
+ const byte* end_address = *input_end;
+ int byte_length = static_cast<int>(end_address - start_address);
+ frame_entry<const String*>(re_frame, kInput) = *subject;
+ *input_start = new_address;
+ *input_end = new_address + byte_length;
+ } else if (frame_entry<const String*>(re_frame, kInput) != *subject) {
+ // Subject string might have been a ConsString that underwent
+ // short-circuiting during GC. That will not change start_address but
+ // will change pointer inside the subject handle.
+ frame_entry<const String*>(re_frame, kInput) = *subject;
+ }
+
+ return 0;
+}
+
+
+void RegExpMacroAssemblerARM64::CheckPosition(int cp_offset,
+ Label* on_outside_input) {
+ CompareAndBranchOrBacktrack(current_input_offset(),
+ -cp_offset * char_size(),
+ ge,
+ on_outside_input);
+}
+
+
+bool RegExpMacroAssemblerARM64::CanReadUnaligned() {
+ // TODO(pielan): See whether or not we should disable unaligned accesses.
+ return !slow_safe();
+}
+
+
+// Private methods:
+
+void RegExpMacroAssemblerARM64::CallCheckStackGuardState(Register scratch) {
+ // Allocate space on the stack to store the return address. The
+ // CheckStackGuardState C++ function will override it if the code
+ // moved. Allocate extra space for 2 arguments passed by pointers.
+ // AAPCS64 requires the stack to be 16 byte aligned.
+ int alignment = masm_->ActivationFrameAlignment();
+ ASSERT_EQ(alignment % 16, 0);
+ int align_mask = (alignment / kXRegSize) - 1;
+ int xreg_to_claim = (3 + align_mask) & ~align_mask;
+
+ ASSERT(csp.Is(__ StackPointer()));
+ __ Claim(xreg_to_claim);
+
+ // CheckStackGuardState needs the end and start addresses of the input string.
+ __ Poke(input_end(), 2 * kPointerSize);
+ __ Add(x5, csp, 2 * kPointerSize);
+ __ Poke(input_start(), kPointerSize);
+ __ Add(x4, csp, kPointerSize);
+
+ __ Mov(w3, start_offset());
+ // RegExp code frame pointer.
+ __ Mov(x2, frame_pointer());
+ // Code* of self.
+ __ Mov(x1, Operand(masm_->CodeObject()));
+
+ // We need to pass a pointer to the return address as first argument.
+ // The DirectCEntry stub will place the return address on the stack before
+ // calling so the stack pointer will point to it.
+ __ Mov(x0, csp);
+
+ ExternalReference check_stack_guard_state =
+ ExternalReference::re_check_stack_guard_state(isolate());
+ __ Mov(scratch, check_stack_guard_state);
+ DirectCEntryStub stub(isolate());
+ stub.GenerateCall(masm_, scratch);
+
+ // The input string may have been moved in memory, we need to reload it.
+ __ Peek(input_start(), kPointerSize);
+ __ Peek(input_end(), 2 * kPointerSize);
+
+ ASSERT(csp.Is(__ StackPointer()));
+ __ Drop(xreg_to_claim);
+
+ // Reload the Code pointer.
+ __ Mov(code_pointer(), Operand(masm_->CodeObject()));
+}
+
+void RegExpMacroAssemblerARM64::BranchOrBacktrack(Condition condition,
+ Label* to) {
+ if (condition == al) { // Unconditional.
+ if (to == NULL) {
+ Backtrack();
+ return;
+ }
+ __ B(to);
+ return;
+ }
+ if (to == NULL) {
+ to = &backtrack_label_;
+ }
+ // TODO(ulan): do direct jump when jump distance is known and fits in imm19.
+ Condition inverted_condition = NegateCondition(condition);
+ Label no_branch;
+ __ B(inverted_condition, &no_branch);
+ __ B(to);
+ __ Bind(&no_branch);
+}
+
+void RegExpMacroAssemblerARM64::CompareAndBranchOrBacktrack(Register reg,
+ int immediate,
+ Condition condition,
+ Label* to) {
+ if ((immediate == 0) && ((condition == eq) || (condition == ne))) {
+ if (to == NULL) {
+ to = &backtrack_label_;
+ }
+ // TODO(ulan): do direct jump when jump distance is known and fits in imm19.
+ Label no_branch;
+ if (condition == eq) {
+ __ Cbnz(reg, &no_branch);
+ } else {
+ __ Cbz(reg, &no_branch);
+ }
+ __ B(to);
+ __ Bind(&no_branch);
+ } else {
+ __ Cmp(reg, immediate);
+ BranchOrBacktrack(condition, to);
+ }
+}
+
+
+void RegExpMacroAssemblerARM64::CheckPreemption() {
+ // Check for preemption.
+ ExternalReference stack_limit =
+ ExternalReference::address_of_stack_limit(isolate());
+ __ Mov(x10, stack_limit);
+ __ Ldr(x10, MemOperand(x10));
+ ASSERT(csp.Is(__ StackPointer()));
+ __ Cmp(csp, x10);
+ CallIf(&check_preempt_label_, ls);
+}
+
+
+void RegExpMacroAssemblerARM64::CheckStackLimit() {
+ ExternalReference stack_limit =
+ ExternalReference::address_of_regexp_stack_limit(isolate());
+ __ Mov(x10, stack_limit);
+ __ Ldr(x10, MemOperand(x10));
+ __ Cmp(backtrack_stackpointer(), x10);
+ CallIf(&stack_overflow_label_, ls);
+}
+
+
+void RegExpMacroAssemblerARM64::Push(Register source) {
+ ASSERT(source.Is32Bits());
+ ASSERT(!source.is(backtrack_stackpointer()));
+ __ Str(source,
+ MemOperand(backtrack_stackpointer(),
+ -static_cast<int>(kWRegSize),
+ PreIndex));
+}
+
+
+void RegExpMacroAssemblerARM64::Pop(Register target) {
+ ASSERT(target.Is32Bits());
+ ASSERT(!target.is(backtrack_stackpointer()));
+ __ Ldr(target,
+ MemOperand(backtrack_stackpointer(), kWRegSize, PostIndex));
+}
+
+
+Register RegExpMacroAssemblerARM64::GetCachedRegister(int register_index) {
+ ASSERT(register_index < kNumCachedRegisters);
+ return Register::Create(register_index / 2, kXRegSizeInBits);
+}
+
+
+Register RegExpMacroAssemblerARM64::GetRegister(int register_index,
+ Register maybe_result) {
+ ASSERT(maybe_result.Is32Bits());
+ ASSERT(register_index >= 0);
+ if (num_registers_ <= register_index) {
+ num_registers_ = register_index + 1;
+ }
+ Register result;
+ RegisterState register_state = GetRegisterState(register_index);
+ switch (register_state) {
+ case STACKED:
+ __ Ldr(maybe_result, register_location(register_index));
+ result = maybe_result;
+ break;
+ case CACHED_LSW:
+ result = GetCachedRegister(register_index).W();
+ break;
+ case CACHED_MSW:
+ __ Lsr(maybe_result.X(), GetCachedRegister(register_index),
+ kWRegSizeInBits);
+ result = maybe_result;
+ break;
+ default:
+ UNREACHABLE();
+ break;
+ }
+ ASSERT(result.Is32Bits());
+ return result;
+}
+
+
+void RegExpMacroAssemblerARM64::StoreRegister(int register_index,
+ Register source) {
+ ASSERT(source.Is32Bits());
+ ASSERT(register_index >= 0);
+ if (num_registers_ <= register_index) {
+ num_registers_ = register_index + 1;
+ }
+
+ Register cached_register;
+ RegisterState register_state = GetRegisterState(register_index);
+ switch (register_state) {
+ case STACKED:
+ __ Str(source, register_location(register_index));
+ break;
+ case CACHED_LSW:
+ cached_register = GetCachedRegister(register_index);
+ if (!source.Is(cached_register.W())) {
+ __ Bfi(cached_register, source.X(), 0, kWRegSizeInBits);
+ }
+ break;
+ case CACHED_MSW:
+ cached_register = GetCachedRegister(register_index);
+ __ Bfi(cached_register, source.X(), kWRegSizeInBits, kWRegSizeInBits);
+ break;
+ default:
+ UNREACHABLE();
+ break;
+ }
+}
+
+
+void RegExpMacroAssemblerARM64::CallIf(Label* to, Condition condition) {
+ Label skip_call;
+ if (condition != al) __ B(&skip_call, NegateCondition(condition));
+ __ Bl(to);
+ __ Bind(&skip_call);
+}
+
+
+void RegExpMacroAssemblerARM64::RestoreLinkRegister() {
+ ASSERT(csp.Is(__ StackPointer()));
+ __ Pop(lr, xzr);
+ __ Add(lr, lr, Operand(masm_->CodeObject()));
+}
+
+
+void RegExpMacroAssemblerARM64::SaveLinkRegister() {
+ ASSERT(csp.Is(__ StackPointer()));
+ __ Sub(lr, lr, Operand(masm_->CodeObject()));
+ __ Push(xzr, lr);
+}
+
+
+MemOperand RegExpMacroAssemblerARM64::register_location(int register_index) {
+ ASSERT(register_index < (1<<30));
+ ASSERT(register_index >= kNumCachedRegisters);
+ if (num_registers_ <= register_index) {
+ num_registers_ = register_index + 1;
+ }
+ register_index -= kNumCachedRegisters;
+ int offset = kFirstRegisterOnStack - register_index * kWRegSize;
+ return MemOperand(frame_pointer(), offset);
+}
+
+MemOperand RegExpMacroAssemblerARM64::capture_location(int register_index,
+ Register scratch) {
+ ASSERT(register_index < (1<<30));
+ ASSERT(register_index < num_saved_registers_);
+ ASSERT(register_index >= kNumCachedRegisters);
+ ASSERT_EQ(register_index % 2, 0);
+ register_index -= kNumCachedRegisters;
+ int offset = kFirstCaptureOnStack - register_index * kWRegSize;
+ // capture_location is used with Stp instructions to load/store 2 registers.
+ // The immediate field in the encoding is limited to 7 bits (signed).
+ if (is_int7(offset)) {
+ return MemOperand(frame_pointer(), offset);
+ } else {
+ __ Add(scratch, frame_pointer(), offset);
+ return MemOperand(scratch);
+ }
+}
+
+void RegExpMacroAssemblerARM64::LoadCurrentCharacterUnchecked(int cp_offset,
+ int characters) {
+ Register offset = current_input_offset();
+
+ // The ldr, str, ldrh, strh instructions can do unaligned accesses, if the CPU
+ // and the operating system running on the target allow it.
+ // If unaligned load/stores are not supported then this function must only
+ // be used to load a single character at a time.
+
+ // ARMv8 supports unaligned accesses but V8 or the kernel can decide to
+ // disable it.
+ // TODO(pielan): See whether or not we should disable unaligned accesses.
+ if (!CanReadUnaligned()) {
+ ASSERT(characters == 1);
+ }
+
+ if (cp_offset != 0) {
+ if (masm_->emit_debug_code()) {
+ __ Mov(x10, cp_offset * char_size());
+ __ Add(x10, x10, Operand(current_input_offset(), SXTW));
+ __ Cmp(x10, Operand(w10, SXTW));
+ // The offset needs to fit in a W register.
+ __ Check(eq, kOffsetOutOfRange);
+ } else {
+ __ Add(w10, current_input_offset(), cp_offset * char_size());
+ }
+ offset = w10;
+ }
+
+ if (mode_ == ASCII) {
+ if (characters == 4) {
+ __ Ldr(current_character(), MemOperand(input_end(), offset, SXTW));
+ } else if (characters == 2) {
+ __ Ldrh(current_character(), MemOperand(input_end(), offset, SXTW));
+ } else {
+ ASSERT(characters == 1);
+ __ Ldrb(current_character(), MemOperand(input_end(), offset, SXTW));
+ }
+ } else {
+ ASSERT(mode_ == UC16);
+ if (characters == 2) {
+ __ Ldr(current_character(), MemOperand(input_end(), offset, SXTW));
+ } else {
+ ASSERT(characters == 1);
+ __ Ldrh(current_character(), MemOperand(input_end(), offset, SXTW));
+ }
+ }
+}
+
+#endif // V8_INTERPRETED_REGEXP
+
+}} // namespace v8::internal
+
+#endif // V8_TARGET_ARCH_ARM64
diff --git a/chromium/v8/src/arm64/regexp-macro-assembler-arm64.h b/chromium/v8/src/arm64/regexp-macro-assembler-arm64.h
new file mode 100644
index 00000000000..c319eae3c59
--- /dev/null
+++ b/chromium/v8/src/arm64/regexp-macro-assembler-arm64.h
@@ -0,0 +1,292 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_ARM64_REGEXP_MACRO_ASSEMBLER_ARM64_H_
+#define V8_ARM64_REGEXP_MACRO_ASSEMBLER_ARM64_H_
+
+#include "src/arm64/assembler-arm64.h"
+#include "src/arm64/assembler-arm64-inl.h"
+#include "src/macro-assembler.h"
+
+namespace v8 {
+namespace internal {
+
+
+#ifndef V8_INTERPRETED_REGEXP
+class RegExpMacroAssemblerARM64: public NativeRegExpMacroAssembler {
+ public:
+ RegExpMacroAssemblerARM64(Mode mode, int registers_to_save, Zone* zone);
+ virtual ~RegExpMacroAssemblerARM64();
+ virtual int stack_limit_slack();
+ virtual void AdvanceCurrentPosition(int by);
+ virtual void AdvanceRegister(int reg, int by);
+ virtual void Backtrack();
+ virtual void Bind(Label* label);
+ virtual void CheckAtStart(Label* on_at_start);
+ virtual void CheckCharacter(unsigned c, Label* on_equal);
+ virtual void CheckCharacterAfterAnd(unsigned c,
+ unsigned mask,
+ Label* on_equal);
+ virtual void CheckCharacterGT(uc16 limit, Label* on_greater);
+ virtual void CheckCharacterLT(uc16 limit, Label* on_less);
+ virtual void CheckCharacters(Vector<const uc16> str,
+ int cp_offset,
+ Label* on_failure,
+ bool check_end_of_string);
+ // A "greedy loop" is a loop that is both greedy and with a simple
+ // body. It has a particularly simple implementation.
+ virtual void CheckGreedyLoop(Label* on_tos_equals_current_position);
+ virtual void CheckNotAtStart(Label* on_not_at_start);
+ virtual void CheckNotBackReference(int start_reg, Label* on_no_match);
+ virtual void CheckNotBackReferenceIgnoreCase(int start_reg,
+ Label* on_no_match);
+ virtual void CheckNotCharacter(unsigned c, Label* on_not_equal);
+ virtual void CheckNotCharacterAfterAnd(unsigned c,
+ unsigned mask,
+ Label* on_not_equal);
+ virtual void CheckNotCharacterAfterMinusAnd(uc16 c,
+ uc16 minus,
+ uc16 mask,
+ Label* on_not_equal);
+ virtual void CheckCharacterInRange(uc16 from,
+ uc16 to,
+ Label* on_in_range);
+ virtual void CheckCharacterNotInRange(uc16 from,
+ uc16 to,
+ Label* on_not_in_range);
+ virtual void CheckBitInTable(Handle<ByteArray> table, Label* on_bit_set);
+
+ // Checks whether the given offset from the current position is before
+ // the end of the string.
+ virtual void CheckPosition(int cp_offset, Label* on_outside_input);
+ virtual bool CheckSpecialCharacterClass(uc16 type,
+ Label* on_no_match);
+ virtual void Fail();
+ virtual Handle<HeapObject> GetCode(Handle<String> source);
+ virtual void GoTo(Label* label);
+ virtual void IfRegisterGE(int reg, int comparand, Label* if_ge);
+ virtual void IfRegisterLT(int reg, int comparand, Label* if_lt);
+ virtual void IfRegisterEqPos(int reg, Label* if_eq);
+ virtual IrregexpImplementation Implementation();
+ virtual void LoadCurrentCharacter(int cp_offset,
+ Label* on_end_of_input,
+ bool check_bounds = true,
+ int characters = 1);
+ virtual void PopCurrentPosition();
+ virtual void PopRegister(int register_index);
+ virtual void PushBacktrack(Label* label);
+ virtual void PushCurrentPosition();
+ virtual void PushRegister(int register_index,
+ StackCheckFlag check_stack_limit);
+ virtual void ReadCurrentPositionFromRegister(int reg);
+ virtual void ReadStackPointerFromRegister(int reg);
+ virtual void SetCurrentPositionFromEnd(int by);
+ virtual void SetRegister(int register_index, int to);
+ virtual bool Succeed();
+ virtual void WriteCurrentPositionToRegister(int reg, int cp_offset);
+ virtual void ClearRegisters(int reg_from, int reg_to);
+ virtual void WriteStackPointerToRegister(int reg);
+ virtual bool CanReadUnaligned();
+
+ // Called from RegExp if the stack-guard is triggered.
+ // If the code object is relocated, the return address is fixed before
+ // returning.
+ static int CheckStackGuardState(Address* return_address,
+ Code* re_code,
+ Address re_frame,
+ int start_offset,
+ const byte** input_start,
+ const byte** input_end);
+
+ private:
+ // Above the frame pointer - Stored registers and stack passed parameters.
+ // Callee-saved registers x19-x29, where x29 is the old frame pointer.
+ static const int kCalleeSavedRegisters = 0;
+ // Return address.
+ // It is placed above the 11 callee-saved registers.
+ static const int kReturnAddress = kCalleeSavedRegisters + 11 * kPointerSize;
+ static const int kSecondaryReturnAddress = kReturnAddress + kPointerSize;
+ // Stack parameter placed by caller.
+ static const int kIsolate = kSecondaryReturnAddress + kPointerSize;
+
+ // Below the frame pointer.
+ // Register parameters stored by setup code.
+ static const int kDirectCall = kCalleeSavedRegisters - kPointerSize;
+ static const int kStackBase = kDirectCall - kPointerSize;
+ static const int kOutputSize = kStackBase - kPointerSize;
+ static const int kInput = kOutputSize - kPointerSize;
+ // When adding local variables remember to push space for them in
+ // the frame in GetCode.
+ static const int kSuccessCounter = kInput - kPointerSize;
+ // First position register address on the stack. Following positions are
+ // below it. A position is a 32 bit value.
+ static const int kFirstRegisterOnStack = kSuccessCounter - kWRegSize;
+ // A capture is a 64 bit value holding two position.
+ static const int kFirstCaptureOnStack = kSuccessCounter - kXRegSize;
+
+ // Initial size of code buffer.
+ static const size_t kRegExpCodeSize = 1024;
+
+ // When initializing registers to a non-position value we can unroll
+ // the loop. Set the limit of registers to unroll.
+ static const int kNumRegistersToUnroll = 16;
+
+ // We are using x0 to x7 as a register cache. Each hardware register must
+ // contain one capture, that is two 32 bit registers. We can cache at most
+ // 16 registers.
+ static const int kNumCachedRegisters = 16;
+
+ // Load a number of characters at the given offset from the
+ // current position, into the current-character register.
+ void LoadCurrentCharacterUnchecked(int cp_offset, int character_count);
+
+ // Check whether preemption has been requested.
+ void CheckPreemption();
+
+ // Check whether we are exceeding the stack limit on the backtrack stack.
+ void CheckStackLimit();
+
+ // Generate a call to CheckStackGuardState.
+ void CallCheckStackGuardState(Register scratch);
+
+ // Location of a 32 bit position register.
+ MemOperand register_location(int register_index);
+
+ // Location of a 64 bit capture, combining two position registers.
+ MemOperand capture_location(int register_index, Register scratch);
+
+ // Register holding the current input position as negative offset from
+ // the end of the string.
+ Register current_input_offset() { return w21; }
+
+ // The register containing the current character after LoadCurrentCharacter.
+ Register current_character() { return w22; }
+
+ // Register holding address of the end of the input string.
+ Register input_end() { return x25; }
+
+ // Register holding address of the start of the input string.
+ Register input_start() { return x26; }
+
+ // Register holding the offset from the start of the string where we should
+ // start matching.
+ Register start_offset() { return w27; }
+
+ // Pointer to the output array's first element.
+ Register output_array() { return x28; }
+
+ // Register holding the frame address. Local variables, parameters and
+ // regexp registers are addressed relative to this.
+ Register frame_pointer() { return fp; }
+
+ // The register containing the backtrack stack top. Provides a meaningful
+ // name to the register.
+ Register backtrack_stackpointer() { return x23; }
+
+ // Register holding pointer to the current code object.
+ Register code_pointer() { return x20; }
+
+ // Register holding the value used for clearing capture registers.
+ Register non_position_value() { return w24; }
+ // The top 32 bit of this register is used to store this value
+ // twice. This is used for clearing more than one register at a time.
+ Register twice_non_position_value() { return x24; }
+
+ // Byte size of chars in the string to match (decided by the Mode argument)
+ int char_size() { return static_cast<int>(mode_); }
+
+ // Equivalent to a conditional branch to the label, unless the label
+ // is NULL, in which case it is a conditional Backtrack.
+ void BranchOrBacktrack(Condition condition, Label* to);
+
+ // Compares reg against immmediate before calling BranchOrBacktrack.
+ // It makes use of the Cbz and Cbnz instructions.
+ void CompareAndBranchOrBacktrack(Register reg,
+ int immediate,
+ Condition condition,
+ Label* to);
+
+ inline void CallIf(Label* to, Condition condition);
+
+ // Save and restore the link register on the stack in a way that
+ // is GC-safe.
+ inline void SaveLinkRegister();
+ inline void RestoreLinkRegister();
+
+ // Pushes the value of a register on the backtrack stack. Decrements the
+ // stack pointer by a word size and stores the register's value there.
+ inline void Push(Register source);
+
+ // Pops a value from the backtrack stack. Reads the word at the stack pointer
+ // and increments it by a word size.
+ inline void Pop(Register target);
+
+ // This state indicates where the register actually is.
+ enum RegisterState {
+ STACKED, // Resides in memory.
+ CACHED_LSW, // Least Significant Word of a 64 bit hardware register.
+ CACHED_MSW // Most Significant Word of a 64 bit hardware register.
+ };
+
+ RegisterState GetRegisterState(int register_index) {
+ ASSERT(register_index >= 0);
+ if (register_index >= kNumCachedRegisters) {
+ return STACKED;
+ } else {
+ if ((register_index % 2) == 0) {
+ return CACHED_LSW;
+ } else {
+ return CACHED_MSW;
+ }
+ }
+ }
+
+ // Store helper that takes the state of the register into account.
+ inline void StoreRegister(int register_index, Register source);
+
+ // Returns a hardware W register that holds the value of the capture
+ // register.
+ //
+ // This function will try to use an existing cache register (w0-w7) for the
+ // result. Otherwise, it will load the value into maybe_result.
+ //
+ // If the returned register is anything other than maybe_result, calling code
+ // must not write to it.
+ inline Register GetRegister(int register_index, Register maybe_result);
+
+ // Returns the harware register (x0-x7) holding the value of the capture
+ // register.
+ // This assumes that the state of the register is not STACKED.
+ inline Register GetCachedRegister(int register_index);
+
+ Isolate* isolate() const { return masm_->isolate(); }
+
+ MacroAssembler* masm_;
+
+ // Which mode to generate code for (ASCII or UC16).
+ Mode mode_;
+
+ // One greater than maximal register index actually used.
+ int num_registers_;
+
+ // Number of registers to output at the end (the saved registers
+ // are always 0..num_saved_registers_-1)
+ int num_saved_registers_;
+
+ // Labels used internally.
+ Label entry_label_;
+ Label start_label_;
+ Label success_label_;
+ Label backtrack_label_;
+ Label exit_label_;
+ Label check_preempt_label_;
+ Label stack_overflow_label_;
+};
+
+#endif // V8_INTERPRETED_REGEXP
+
+
+}} // namespace v8::internal
+
+#endif // V8_ARM64_REGEXP_MACRO_ASSEMBLER_ARM64_H_
diff --git a/chromium/v8/src/arm64/simulator-arm64.cc b/chromium/v8/src/arm64/simulator-arm64.cc
new file mode 100644
index 00000000000..488b91e6235
--- /dev/null
+++ b/chromium/v8/src/arm64/simulator-arm64.cc
@@ -0,0 +1,3736 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <stdlib.h>
+#include <cmath>
+#include <cstdarg>
+#include "src/v8.h"
+
+#if V8_TARGET_ARCH_ARM64
+
+#include "src/disasm.h"
+#include "src/assembler.h"
+#include "src/arm64/decoder-arm64-inl.h"
+#include "src/arm64/simulator-arm64.h"
+#include "src/macro-assembler.h"
+
+namespace v8 {
+namespace internal {
+
+#if defined(USE_SIMULATOR)
+
+
+// This macro provides a platform independent use of sscanf. The reason for
+// SScanF not being implemented in a platform independent way through
+// ::v8::internal::OS in the same way as SNPrintF is that the
+// Windows C Run-Time Library does not provide vsscanf.
+#define SScanF sscanf // NOLINT
+
+
+// Helpers for colors.
+// Depending on your terminal configuration, the colour names may not match the
+// observed colours.
+#define COLOUR(colour_code) "\033[" colour_code "m"
+#define BOLD(colour_code) "1;" colour_code
+#define NORMAL ""
+#define GREY "30"
+#define GREEN "32"
+#define ORANGE "33"
+#define BLUE "34"
+#define PURPLE "35"
+#define INDIGO "36"
+#define WHITE "37"
+typedef char const * const TEXT_COLOUR;
+TEXT_COLOUR clr_normal = FLAG_log_colour ? COLOUR(NORMAL) : "";
+TEXT_COLOUR clr_flag_name = FLAG_log_colour ? COLOUR(BOLD(GREY)) : "";
+TEXT_COLOUR clr_flag_value = FLAG_log_colour ? COLOUR(BOLD(WHITE)) : "";
+TEXT_COLOUR clr_reg_name = FLAG_log_colour ? COLOUR(BOLD(BLUE)) : "";
+TEXT_COLOUR clr_reg_value = FLAG_log_colour ? COLOUR(BOLD(INDIGO)) : "";
+TEXT_COLOUR clr_fpreg_name = FLAG_log_colour ? COLOUR(BOLD(ORANGE)) : "";
+TEXT_COLOUR clr_fpreg_value = FLAG_log_colour ? COLOUR(BOLD(PURPLE)) : "";
+TEXT_COLOUR clr_memory_value = FLAG_log_colour ? COLOUR(BOLD(GREEN)) : "";
+TEXT_COLOUR clr_memory_address = FLAG_log_colour ? COLOUR(GREEN) : "";
+TEXT_COLOUR clr_debug_number = FLAG_log_colour ? COLOUR(BOLD(ORANGE)) : "";
+TEXT_COLOUR clr_debug_message = FLAG_log_colour ? COLOUR(ORANGE) : "";
+TEXT_COLOUR clr_printf = FLAG_log_colour ? COLOUR(GREEN) : "";
+
+
+// This is basically the same as PrintF, with a guard for FLAG_trace_sim.
+void Simulator::TraceSim(const char* format, ...) {
+ if (FLAG_trace_sim) {
+ va_list arguments;
+ va_start(arguments, format);
+ OS::VFPrint(stream_, format, arguments);
+ va_end(arguments);
+ }
+}
+
+
+const Instruction* Simulator::kEndOfSimAddress = NULL;
+
+
+void SimSystemRegister::SetBits(int msb, int lsb, uint32_t bits) {
+ int width = msb - lsb + 1;
+ ASSERT(is_uintn(bits, width) || is_intn(bits, width));
+
+ bits <<= lsb;
+ uint32_t mask = ((1 << width) - 1) << lsb;
+ ASSERT((mask & write_ignore_mask_) == 0);
+
+ value_ = (value_ & ~mask) | (bits & mask);
+}
+
+
+SimSystemRegister SimSystemRegister::DefaultValueFor(SystemRegister id) {
+ switch (id) {
+ case NZCV:
+ return SimSystemRegister(0x00000000, NZCVWriteIgnoreMask);
+ case FPCR:
+ return SimSystemRegister(0x00000000, FPCRWriteIgnoreMask);
+ default:
+ UNREACHABLE();
+ return SimSystemRegister();
+ }
+}
+
+
+void Simulator::Initialize(Isolate* isolate) {
+ if (isolate->simulator_initialized()) return;
+ isolate->set_simulator_initialized(true);
+ ExternalReference::set_redirector(isolate, &RedirectExternalReference);
+}
+
+
+// Get the active Simulator for the current thread.
+Simulator* Simulator::current(Isolate* isolate) {
+ Isolate::PerIsolateThreadData* isolate_data =
+ isolate->FindOrAllocatePerThreadDataForThisThread();
+ ASSERT(isolate_data != NULL);
+
+ Simulator* sim = isolate_data->simulator();
+ if (sim == NULL) {
+ if (FLAG_trace_sim || FLAG_log_instruction_stats || FLAG_debug_sim) {
+ sim = new Simulator(new Decoder<DispatchingDecoderVisitor>(), isolate);
+ } else {
+ sim = new Decoder<Simulator>();
+ sim->isolate_ = isolate;
+ }
+ isolate_data->set_simulator(sim);
+ }
+ return sim;
+}
+
+
+void Simulator::CallVoid(byte* entry, CallArgument* args) {
+ int index_x = 0;
+ int index_d = 0;
+
+ std::vector<int64_t> stack_args(0);
+ for (int i = 0; !args[i].IsEnd(); i++) {
+ CallArgument arg = args[i];
+ if (arg.IsX() && (index_x < 8)) {
+ set_xreg(index_x++, arg.bits());
+ } else if (arg.IsD() && (index_d < 8)) {
+ set_dreg_bits(index_d++, arg.bits());
+ } else {
+ ASSERT(arg.IsD() || arg.IsX());
+ stack_args.push_back(arg.bits());
+ }
+ }
+
+ // Process stack arguments, and make sure the stack is suitably aligned.
+ uintptr_t original_stack = sp();
+ uintptr_t entry_stack = original_stack -
+ stack_args.size() * sizeof(stack_args[0]);
+ if (OS::ActivationFrameAlignment() != 0) {
+ entry_stack &= -OS::ActivationFrameAlignment();
+ }
+ char * stack = reinterpret_cast<char*>(entry_stack);
+ std::vector<int64_t>::const_iterator it;
+ for (it = stack_args.begin(); it != stack_args.end(); it++) {
+ memcpy(stack, &(*it), sizeof(*it));
+ stack += sizeof(*it);
+ }
+
+ ASSERT(reinterpret_cast<uintptr_t>(stack) <= original_stack);
+ set_sp(entry_stack);
+
+ // Call the generated code.
+ set_pc(entry);
+ set_lr(kEndOfSimAddress);
+ CheckPCSComplianceAndRun();
+
+ set_sp(original_stack);
+}
+
+
+int64_t Simulator::CallInt64(byte* entry, CallArgument* args) {
+ CallVoid(entry, args);
+ return xreg(0);
+}
+
+
+double Simulator::CallDouble(byte* entry, CallArgument* args) {
+ CallVoid(entry, args);
+ return dreg(0);
+}
+
+
+int64_t Simulator::CallJS(byte* entry,
+ byte* function_entry,
+ JSFunction* func,
+ Object* revc,
+ int64_t argc,
+ Object*** argv) {
+ CallArgument args[] = {
+ CallArgument(function_entry),
+ CallArgument(func),
+ CallArgument(revc),
+ CallArgument(argc),
+ CallArgument(argv),
+ CallArgument::End()
+ };
+ return CallInt64(entry, args);
+}
+
+int64_t Simulator::CallRegExp(byte* entry,
+ String* input,
+ int64_t start_offset,
+ const byte* input_start,
+ const byte* input_end,
+ int* output,
+ int64_t output_size,
+ Address stack_base,
+ int64_t direct_call,
+ void* return_address,
+ Isolate* isolate) {
+ CallArgument args[] = {
+ CallArgument(input),
+ CallArgument(start_offset),
+ CallArgument(input_start),
+ CallArgument(input_end),
+ CallArgument(output),
+ CallArgument(output_size),
+ CallArgument(stack_base),
+ CallArgument(direct_call),
+ CallArgument(return_address),
+ CallArgument(isolate),
+ CallArgument::End()
+ };
+ return CallInt64(entry, args);
+}
+
+
+void Simulator::CheckPCSComplianceAndRun() {
+#ifdef DEBUG
+ CHECK_EQ(kNumberOfCalleeSavedRegisters, kCalleeSaved.Count());
+ CHECK_EQ(kNumberOfCalleeSavedFPRegisters, kCalleeSavedFP.Count());
+
+ int64_t saved_registers[kNumberOfCalleeSavedRegisters];
+ uint64_t saved_fpregisters[kNumberOfCalleeSavedFPRegisters];
+
+ CPURegList register_list = kCalleeSaved;
+ CPURegList fpregister_list = kCalleeSavedFP;
+
+ for (int i = 0; i < kNumberOfCalleeSavedRegisters; i++) {
+ // x31 is not a caller saved register, so no need to specify if we want
+ // the stack or zero.
+ saved_registers[i] = xreg(register_list.PopLowestIndex().code());
+ }
+ for (int i = 0; i < kNumberOfCalleeSavedFPRegisters; i++) {
+ saved_fpregisters[i] =
+ dreg_bits(fpregister_list.PopLowestIndex().code());
+ }
+ int64_t original_stack = sp();
+#endif
+ // Start the simulation!
+ Run();
+#ifdef DEBUG
+ CHECK_EQ(original_stack, sp());
+ // Check that callee-saved registers have been preserved.
+ register_list = kCalleeSaved;
+ fpregister_list = kCalleeSavedFP;
+ for (int i = 0; i < kNumberOfCalleeSavedRegisters; i++) {
+ CHECK_EQ(saved_registers[i], xreg(register_list.PopLowestIndex().code()));
+ }
+ for (int i = 0; i < kNumberOfCalleeSavedFPRegisters; i++) {
+ ASSERT(saved_fpregisters[i] ==
+ dreg_bits(fpregister_list.PopLowestIndex().code()));
+ }
+
+ // Corrupt caller saved register minus the return regiters.
+
+ // In theory x0 to x7 can be used for return values, but V8 only uses x0, x1
+ // for now .
+ register_list = kCallerSaved;
+ register_list.Remove(x0);
+ register_list.Remove(x1);
+
+ // In theory d0 to d7 can be used for return values, but V8 only uses d0
+ // for now .
+ fpregister_list = kCallerSavedFP;
+ fpregister_list.Remove(d0);
+
+ CorruptRegisters(&register_list, kCallerSavedRegisterCorruptionValue);
+ CorruptRegisters(&fpregister_list, kCallerSavedFPRegisterCorruptionValue);
+#endif
+}
+
+
+#ifdef DEBUG
+// The least significant byte of the curruption value holds the corresponding
+// register's code.
+void Simulator::CorruptRegisters(CPURegList* list, uint64_t value) {
+ if (list->type() == CPURegister::kRegister) {
+ while (!list->IsEmpty()) {
+ unsigned code = list->PopLowestIndex().code();
+ set_xreg(code, value | code);
+ }
+ } else {
+ ASSERT(list->type() == CPURegister::kFPRegister);
+ while (!list->IsEmpty()) {
+ unsigned code = list->PopLowestIndex().code();
+ set_dreg_bits(code, value | code);
+ }
+ }
+}
+
+
+void Simulator::CorruptAllCallerSavedCPURegisters() {
+ // Corrupt alters its parameter so copy them first.
+ CPURegList register_list = kCallerSaved;
+ CPURegList fpregister_list = kCallerSavedFP;
+
+ CorruptRegisters(&register_list, kCallerSavedRegisterCorruptionValue);
+ CorruptRegisters(&fpregister_list, kCallerSavedFPRegisterCorruptionValue);
+}
+#endif
+
+
+// Extending the stack by 2 * 64 bits is required for stack alignment purposes.
+uintptr_t Simulator::PushAddress(uintptr_t address) {
+ ASSERT(sizeof(uintptr_t) < 2 * kXRegSize);
+ intptr_t new_sp = sp() - 2 * kXRegSize;
+ uintptr_t* alignment_slot =
+ reinterpret_cast<uintptr_t*>(new_sp + kXRegSize);
+ memcpy(alignment_slot, &kSlotsZapValue, kPointerSize);
+ uintptr_t* stack_slot = reinterpret_cast<uintptr_t*>(new_sp);
+ memcpy(stack_slot, &address, kPointerSize);
+ set_sp(new_sp);
+ return new_sp;
+}
+
+
+uintptr_t Simulator::PopAddress() {
+ intptr_t current_sp = sp();
+ uintptr_t* stack_slot = reinterpret_cast<uintptr_t*>(current_sp);
+ uintptr_t address = *stack_slot;
+ ASSERT(sizeof(uintptr_t) < 2 * kXRegSize);
+ set_sp(current_sp + 2 * kXRegSize);
+ return address;
+}
+
+
+// Returns the limit of the stack area to enable checking for stack overflows.
+uintptr_t Simulator::StackLimit() const {
+ // Leave a safety margin of 1024 bytes to prevent overrunning the stack when
+ // pushing values.
+ return reinterpret_cast<uintptr_t>(stack_limit_) + 1024;
+}
+
+
+Simulator::Simulator(Decoder<DispatchingDecoderVisitor>* decoder,
+ Isolate* isolate, FILE* stream)
+ : decoder_(decoder),
+ last_debugger_input_(NULL),
+ log_parameters_(NO_PARAM),
+ isolate_(isolate) {
+ // Setup the decoder.
+ decoder_->AppendVisitor(this);
+
+ Init(stream);
+
+ if (FLAG_trace_sim) {
+ decoder_->InsertVisitorBefore(print_disasm_, this);
+ log_parameters_ = LOG_ALL;
+ }
+
+ if (FLAG_log_instruction_stats) {
+ instrument_ = new Instrument(FLAG_log_instruction_file,
+ FLAG_log_instruction_period);
+ decoder_->AppendVisitor(instrument_);
+ }
+}
+
+
+Simulator::Simulator()
+ : decoder_(NULL),
+ last_debugger_input_(NULL),
+ log_parameters_(NO_PARAM),
+ isolate_(NULL) {
+ Init(stdout);
+ CHECK(!FLAG_trace_sim && !FLAG_log_instruction_stats);
+}
+
+
+void Simulator::Init(FILE* stream) {
+ ResetState();
+
+ // Allocate and setup the simulator stack.
+ stack_size_ = (FLAG_sim_stack_size * KB) + (2 * stack_protection_size_);
+ stack_ = new byte[stack_size_];
+ stack_limit_ = stack_ + stack_protection_size_;
+ byte* tos = stack_ + stack_size_ - stack_protection_size_;
+ // The stack pointer must be 16 bytes aligned.
+ set_sp(reinterpret_cast<int64_t>(tos) & ~0xfUL);
+
+ stream_ = stream;
+ print_disasm_ = new PrintDisassembler(stream_);
+
+ // The debugger needs to disassemble code without the simulator executing an
+ // instruction, so we create a dedicated decoder.
+ disassembler_decoder_ = new Decoder<DispatchingDecoderVisitor>();
+ disassembler_decoder_->AppendVisitor(print_disasm_);
+}
+
+
+void Simulator::ResetState() {
+ // Reset the system registers.
+ nzcv_ = SimSystemRegister::DefaultValueFor(NZCV);
+ fpcr_ = SimSystemRegister::DefaultValueFor(FPCR);
+
+ // Reset registers to 0.
+ pc_ = NULL;
+ for (unsigned i = 0; i < kNumberOfRegisters; i++) {
+ set_xreg(i, 0xbadbeef);
+ }
+ for (unsigned i = 0; i < kNumberOfFPRegisters; i++) {
+ // Set FP registers to a value that is NaN in both 32-bit and 64-bit FP.
+ set_dreg_bits(i, 0x7ff000007f800001UL);
+ }
+ // Returning to address 0 exits the Simulator.
+ set_lr(kEndOfSimAddress);
+
+ // Reset debug helpers.
+ breakpoints_.empty();
+ break_on_next_= false;
+}
+
+
+Simulator::~Simulator() {
+ delete[] stack_;
+ if (FLAG_log_instruction_stats) {
+ delete instrument_;
+ }
+ delete disassembler_decoder_;
+ delete print_disasm_;
+ DeleteArray(last_debugger_input_);
+ delete decoder_;
+}
+
+
+void Simulator::Run() {
+ pc_modified_ = false;
+ while (pc_ != kEndOfSimAddress) {
+ ExecuteInstruction();
+ }
+}
+
+
+void Simulator::RunFrom(Instruction* start) {
+ set_pc(start);
+ Run();
+}
+
+
+// When the generated code calls an external reference we need to catch that in
+// the simulator. The external reference will be a function compiled for the
+// host architecture. We need to call that function instead of trying to
+// execute it with the simulator. We do that by redirecting the external
+// reference to a svc (Supervisor Call) instruction that is handled by
+// the simulator. We write the original destination of the jump just at a known
+// offset from the svc instruction so the simulator knows what to call.
+class Redirection {
+ public:
+ Redirection(void* external_function, ExternalReference::Type type)
+ : external_function_(external_function),
+ type_(type),
+ next_(NULL) {
+ redirect_call_.SetInstructionBits(
+ HLT | Assembler::ImmException(kImmExceptionIsRedirectedCall));
+ Isolate* isolate = Isolate::Current();
+ next_ = isolate->simulator_redirection();
+ // TODO(all): Simulator flush I cache
+ isolate->set_simulator_redirection(this);
+ }
+
+ void* address_of_redirect_call() {
+ return reinterpret_cast<void*>(&redirect_call_);
+ }
+
+ template <typename T>
+ T external_function() { return reinterpret_cast<T>(external_function_); }
+
+ ExternalReference::Type type() { return type_; }
+
+ static Redirection* Get(void* external_function,
+ ExternalReference::Type type) {
+ Isolate* isolate = Isolate::Current();
+ Redirection* current = isolate->simulator_redirection();
+ for (; current != NULL; current = current->next_) {
+ if (current->external_function_ == external_function) {
+ ASSERT_EQ(current->type(), type);
+ return current;
+ }
+ }
+ return new Redirection(external_function, type);
+ }
+
+ static Redirection* FromHltInstruction(Instruction* redirect_call) {
+ char* addr_of_hlt = reinterpret_cast<char*>(redirect_call);
+ char* addr_of_redirection =
+ addr_of_hlt - OFFSET_OF(Redirection, redirect_call_);
+ return reinterpret_cast<Redirection*>(addr_of_redirection);
+ }
+
+ static void* ReverseRedirection(int64_t reg) {
+ Redirection* redirection =
+ FromHltInstruction(reinterpret_cast<Instruction*>(reg));
+ return redirection->external_function<void*>();
+ }
+
+ private:
+ void* external_function_;
+ Instruction redirect_call_;
+ ExternalReference::Type type_;
+ Redirection* next_;
+};
+
+
+// Calls into the V8 runtime are based on this very simple interface.
+// Note: To be able to return two values from some calls the code in runtime.cc
+// uses the ObjectPair structure.
+// The simulator assumes all runtime calls return two 64-bits values. If they
+// don't, register x1 is clobbered. This is fine because x1 is caller-saved.
+struct ObjectPair {
+ int64_t res0;
+ int64_t res1;
+};
+
+
+typedef ObjectPair (*SimulatorRuntimeCall)(int64_t arg0,
+ int64_t arg1,
+ int64_t arg2,
+ int64_t arg3,
+ int64_t arg4,
+ int64_t arg5,
+ int64_t arg6,
+ int64_t arg7);
+
+typedef int64_t (*SimulatorRuntimeCompareCall)(double arg1, double arg2);
+typedef double (*SimulatorRuntimeFPFPCall)(double arg1, double arg2);
+typedef double (*SimulatorRuntimeFPCall)(double arg1);
+typedef double (*SimulatorRuntimeFPIntCall)(double arg1, int32_t arg2);
+
+// This signature supports direct call in to API function native callback
+// (refer to InvocationCallback in v8.h).
+typedef void (*SimulatorRuntimeDirectApiCall)(int64_t arg0);
+typedef void (*SimulatorRuntimeProfilingApiCall)(int64_t arg0, void* arg1);
+
+// This signature supports direct call to accessor getter callback.
+typedef void (*SimulatorRuntimeDirectGetterCall)(int64_t arg0, int64_t arg1);
+typedef void (*SimulatorRuntimeProfilingGetterCall)(int64_t arg0, int64_t arg1,
+ void* arg2);
+
+void Simulator::DoRuntimeCall(Instruction* instr) {
+ Redirection* redirection = Redirection::FromHltInstruction(instr);
+
+ // The called C code might itself call simulated code, so any
+ // caller-saved registers (including lr) could still be clobbered by a
+ // redirected call.
+ Instruction* return_address = lr();
+
+ int64_t external = redirection->external_function<int64_t>();
+
+ TraceSim("Call to host function at %p\n",
+ redirection->external_function<void*>());
+
+ // SP must be 16-byte-aligned at the call interface.
+ bool stack_alignment_exception = ((sp() & 0xf) != 0);
+ if (stack_alignment_exception) {
+ TraceSim(" with unaligned stack 0x%016" PRIx64 ".\n", sp());
+ FATAL("ALIGNMENT EXCEPTION");
+ }
+
+ switch (redirection->type()) {
+ default:
+ TraceSim("Type: Unknown.\n");
+ UNREACHABLE();
+ break;
+
+ case ExternalReference::BUILTIN_CALL: {
+ // Object* f(v8::internal::Arguments).
+ TraceSim("Type: BUILTIN_CALL\n");
+ SimulatorRuntimeCall target =
+ reinterpret_cast<SimulatorRuntimeCall>(external);
+
+ // We don't know how many arguments are being passed, but we can
+ // pass 8 without touching the stack. They will be ignored by the
+ // host function if they aren't used.
+ TraceSim("Arguments: "
+ "0x%016" PRIx64 ", 0x%016" PRIx64 ", "
+ "0x%016" PRIx64 ", 0x%016" PRIx64 ", "
+ "0x%016" PRIx64 ", 0x%016" PRIx64 ", "
+ "0x%016" PRIx64 ", 0x%016" PRIx64,
+ xreg(0), xreg(1), xreg(2), xreg(3),
+ xreg(4), xreg(5), xreg(6), xreg(7));
+ ObjectPair result = target(xreg(0), xreg(1), xreg(2), xreg(3),
+ xreg(4), xreg(5), xreg(6), xreg(7));
+ TraceSim("Returned: {0x%" PRIx64 ", 0x%" PRIx64 "}\n",
+ result.res0, result.res1);
+#ifdef DEBUG
+ CorruptAllCallerSavedCPURegisters();
+#endif
+ set_xreg(0, result.res0);
+ set_xreg(1, result.res1);
+ break;
+ }
+
+ case ExternalReference::DIRECT_API_CALL: {
+ // void f(v8::FunctionCallbackInfo&)
+ TraceSim("Type: DIRECT_API_CALL\n");
+ SimulatorRuntimeDirectApiCall target =
+ reinterpret_cast<SimulatorRuntimeDirectApiCall>(external);
+ TraceSim("Arguments: 0x%016" PRIx64 "\n", xreg(0));
+ target(xreg(0));
+ TraceSim("No return value.");
+#ifdef DEBUG
+ CorruptAllCallerSavedCPURegisters();
+#endif
+ break;
+ }
+
+ case ExternalReference::BUILTIN_COMPARE_CALL: {
+ // int f(double, double)
+ TraceSim("Type: BUILTIN_COMPARE_CALL\n");
+ SimulatorRuntimeCompareCall target =
+ reinterpret_cast<SimulatorRuntimeCompareCall>(external);
+ TraceSim("Arguments: %f, %f\n", dreg(0), dreg(1));
+ int64_t result = target(dreg(0), dreg(1));
+ TraceSim("Returned: %" PRId64 "\n", result);
+#ifdef DEBUG
+ CorruptAllCallerSavedCPURegisters();
+#endif
+ set_xreg(0, result);
+ break;
+ }
+
+ case ExternalReference::BUILTIN_FP_CALL: {
+ // double f(double)
+ TraceSim("Type: BUILTIN_FP_CALL\n");
+ SimulatorRuntimeFPCall target =
+ reinterpret_cast<SimulatorRuntimeFPCall>(external);
+ TraceSim("Argument: %f\n", dreg(0));
+ double result = target(dreg(0));
+ TraceSim("Returned: %f\n", result);
+#ifdef DEBUG
+ CorruptAllCallerSavedCPURegisters();
+#endif
+ set_dreg(0, result);
+ break;
+ }
+
+ case ExternalReference::BUILTIN_FP_FP_CALL: {
+ // double f(double, double)
+ TraceSim("Type: BUILTIN_FP_FP_CALL\n");
+ SimulatorRuntimeFPFPCall target =
+ reinterpret_cast<SimulatorRuntimeFPFPCall>(external);
+ TraceSim("Arguments: %f, %f\n", dreg(0), dreg(1));
+ double result = target(dreg(0), dreg(1));
+ TraceSim("Returned: %f\n", result);
+#ifdef DEBUG
+ CorruptAllCallerSavedCPURegisters();
+#endif
+ set_dreg(0, result);
+ break;
+ }
+
+ case ExternalReference::BUILTIN_FP_INT_CALL: {
+ // double f(double, int)
+ TraceSim("Type: BUILTIN_FP_INT_CALL\n");
+ SimulatorRuntimeFPIntCall target =
+ reinterpret_cast<SimulatorRuntimeFPIntCall>(external);
+ TraceSim("Arguments: %f, %d\n", dreg(0), wreg(0));
+ double result = target(dreg(0), wreg(0));
+ TraceSim("Returned: %f\n", result);
+#ifdef DEBUG
+ CorruptAllCallerSavedCPURegisters();
+#endif
+ set_dreg(0, result);
+ break;
+ }
+
+ case ExternalReference::DIRECT_GETTER_CALL: {
+ // void f(Local<String> property, PropertyCallbackInfo& info)
+ TraceSim("Type: DIRECT_GETTER_CALL\n");
+ SimulatorRuntimeDirectGetterCall target =
+ reinterpret_cast<SimulatorRuntimeDirectGetterCall>(external);
+ TraceSim("Arguments: 0x%016" PRIx64 ", 0x%016" PRIx64 "\n",
+ xreg(0), xreg(1));
+ target(xreg(0), xreg(1));
+ TraceSim("No return value.");
+#ifdef DEBUG
+ CorruptAllCallerSavedCPURegisters();
+#endif
+ break;
+ }
+
+ case ExternalReference::PROFILING_API_CALL: {
+ // void f(v8::FunctionCallbackInfo&, v8::FunctionCallback)
+ TraceSim("Type: PROFILING_API_CALL\n");
+ SimulatorRuntimeProfilingApiCall target =
+ reinterpret_cast<SimulatorRuntimeProfilingApiCall>(external);
+ void* arg1 = Redirection::ReverseRedirection(xreg(1));
+ TraceSim("Arguments: 0x%016" PRIx64 ", %p\n", xreg(0), arg1);
+ target(xreg(0), arg1);
+ TraceSim("No return value.");
+#ifdef DEBUG
+ CorruptAllCallerSavedCPURegisters();
+#endif
+ break;
+ }
+
+ case ExternalReference::PROFILING_GETTER_CALL: {
+ // void f(Local<String> property, PropertyCallbackInfo& info,
+ // AccessorGetterCallback callback)
+ TraceSim("Type: PROFILING_GETTER_CALL\n");
+ SimulatorRuntimeProfilingGetterCall target =
+ reinterpret_cast<SimulatorRuntimeProfilingGetterCall>(
+ external);
+ void* arg2 = Redirection::ReverseRedirection(xreg(2));
+ TraceSim("Arguments: 0x%016" PRIx64 ", 0x%016" PRIx64 ", %p\n",
+ xreg(0), xreg(1), arg2);
+ target(xreg(0), xreg(1), arg2);
+ TraceSim("No return value.");
+#ifdef DEBUG
+ CorruptAllCallerSavedCPURegisters();
+#endif
+ break;
+ }
+ }
+
+ set_lr(return_address);
+ set_pc(return_address);
+}
+
+
+void* Simulator::RedirectExternalReference(void* external_function,
+ ExternalReference::Type type) {
+ Redirection* redirection = Redirection::Get(external_function, type);
+ return redirection->address_of_redirect_call();
+}
+
+
+const char* Simulator::xreg_names[] = {
+"x0", "x1", "x2", "x3", "x4", "x5", "x6", "x7",
+"x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15",
+"ip0", "ip1", "x18", "x19", "x20", "x21", "x22", "x23",
+"x24", "x25", "x26", "cp", "jssp", "fp", "lr", "xzr", "csp"};
+
+const char* Simulator::wreg_names[] = {
+"w0", "w1", "w2", "w3", "w4", "w5", "w6", "w7",
+"w8", "w9", "w10", "w11", "w12", "w13", "w14", "w15",
+"w16", "w17", "w18", "w19", "w20", "w21", "w22", "w23",
+"w24", "w25", "w26", "wcp", "wjssp", "wfp", "wlr", "wzr", "wcsp"};
+
+const char* Simulator::sreg_names[] = {
+"s0", "s1", "s2", "s3", "s4", "s5", "s6", "s7",
+"s8", "s9", "s10", "s11", "s12", "s13", "s14", "s15",
+"s16", "s17", "s18", "s19", "s20", "s21", "s22", "s23",
+"s24", "s25", "s26", "s27", "s28", "s29", "s30", "s31"};
+
+const char* Simulator::dreg_names[] = {
+"d0", "d1", "d2", "d3", "d4", "d5", "d6", "d7",
+"d8", "d9", "d10", "d11", "d12", "d13", "d14", "d15",
+"d16", "d17", "d18", "d19", "d20", "d21", "d22", "d23",
+"d24", "d25", "d26", "d27", "d28", "d29", "d30", "d31"};
+
+const char* Simulator::vreg_names[] = {
+"v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7",
+"v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15",
+"v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23",
+"v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31"};
+
+
+const char* Simulator::WRegNameForCode(unsigned code, Reg31Mode mode) {
+ ASSERT(code < kNumberOfRegisters);
+ // If the code represents the stack pointer, index the name after zr.
+ if ((code == kZeroRegCode) && (mode == Reg31IsStackPointer)) {
+ code = kZeroRegCode + 1;
+ }
+ return wreg_names[code];
+}
+
+
+const char* Simulator::XRegNameForCode(unsigned code, Reg31Mode mode) {
+ ASSERT(code < kNumberOfRegisters);
+ // If the code represents the stack pointer, index the name after zr.
+ if ((code == kZeroRegCode) && (mode == Reg31IsStackPointer)) {
+ code = kZeroRegCode + 1;
+ }
+ return xreg_names[code];
+}
+
+
+const char* Simulator::SRegNameForCode(unsigned code) {
+ ASSERT(code < kNumberOfFPRegisters);
+ return sreg_names[code];
+}
+
+
+const char* Simulator::DRegNameForCode(unsigned code) {
+ ASSERT(code < kNumberOfFPRegisters);
+ return dreg_names[code];
+}
+
+
+const char* Simulator::VRegNameForCode(unsigned code) {
+ ASSERT(code < kNumberOfFPRegisters);
+ return vreg_names[code];
+}
+
+
+int Simulator::CodeFromName(const char* name) {
+ for (unsigned i = 0; i < kNumberOfRegisters; i++) {
+ if ((strcmp(xreg_names[i], name) == 0) ||
+ (strcmp(wreg_names[i], name) == 0)) {
+ return i;
+ }
+ }
+ for (unsigned i = 0; i < kNumberOfFPRegisters; i++) {
+ if ((strcmp(vreg_names[i], name) == 0) ||
+ (strcmp(dreg_names[i], name) == 0) ||
+ (strcmp(sreg_names[i], name) == 0)) {
+ return i;
+ }
+ }
+ if ((strcmp("csp", name) == 0) || (strcmp("wcsp", name) == 0)) {
+ return kSPRegInternalCode;
+ }
+ return -1;
+}
+
+
+// Helpers ---------------------------------------------------------------------
+template <typename T>
+T Simulator::AddWithCarry(bool set_flags,
+ T src1,
+ T src2,
+ T carry_in) {
+ typedef typename make_unsigned<T>::type unsignedT;
+ ASSERT((carry_in == 0) || (carry_in == 1));
+
+ T signed_sum = src1 + src2 + carry_in;
+ T result = signed_sum;
+
+ bool N, Z, C, V;
+
+ // Compute the C flag
+ unsignedT u1 = static_cast<unsignedT>(src1);
+ unsignedT u2 = static_cast<unsignedT>(src2);
+ unsignedT urest = std::numeric_limits<unsignedT>::max() - u1;
+ C = (u2 > urest) || (carry_in && (((u2 + 1) > urest) || (u2 > (urest - 1))));
+
+ // Overflow iff the sign bit is the same for the two inputs and different
+ // for the result.
+ V = ((src1 ^ src2) >= 0) && ((src1 ^ result) < 0);
+
+ N = CalcNFlag(result);
+ Z = CalcZFlag(result);
+
+ if (set_flags) {
+ nzcv().SetN(N);
+ nzcv().SetZ(Z);
+ nzcv().SetC(C);
+ nzcv().SetV(V);
+ }
+ return result;
+}
+
+
+template<typename T>
+void Simulator::AddSubWithCarry(Instruction* instr) {
+ T op2 = reg<T>(instr->Rm());
+ T new_val;
+
+ if ((instr->Mask(AddSubOpMask) == SUB) || instr->Mask(AddSubOpMask) == SUBS) {
+ op2 = ~op2;
+ }
+
+ new_val = AddWithCarry<T>(instr->FlagsUpdate(),
+ reg<T>(instr->Rn()),
+ op2,
+ nzcv().C());
+
+ set_reg<T>(instr->Rd(), new_val);
+}
+
+template <typename T>
+T Simulator::ShiftOperand(T value, Shift shift_type, unsigned amount) {
+ typedef typename make_unsigned<T>::type unsignedT;
+
+ if (amount == 0) {
+ return value;
+ }
+
+ switch (shift_type) {
+ case LSL:
+ return value << amount;
+ case LSR:
+ return static_cast<unsignedT>(value) >> amount;
+ case ASR:
+ return value >> amount;
+ case ROR:
+ return (static_cast<unsignedT>(value) >> amount) |
+ ((value & ((1L << amount) - 1L)) <<
+ (sizeof(unsignedT) * 8 - amount));
+ default:
+ UNIMPLEMENTED();
+ return 0;
+ }
+}
+
+
+template <typename T>
+T Simulator::ExtendValue(T value, Extend extend_type, unsigned left_shift) {
+ const unsigned kSignExtendBShift = (sizeof(T) - 1) * 8;
+ const unsigned kSignExtendHShift = (sizeof(T) - 2) * 8;
+ const unsigned kSignExtendWShift = (sizeof(T) - 4) * 8;
+
+ switch (extend_type) {
+ case UXTB:
+ value &= kByteMask;
+ break;
+ case UXTH:
+ value &= kHalfWordMask;
+ break;
+ case UXTW:
+ value &= kWordMask;
+ break;
+ case SXTB:
+ value = (value << kSignExtendBShift) >> kSignExtendBShift;
+ break;
+ case SXTH:
+ value = (value << kSignExtendHShift) >> kSignExtendHShift;
+ break;
+ case SXTW:
+ value = (value << kSignExtendWShift) >> kSignExtendWShift;
+ break;
+ case UXTX:
+ case SXTX:
+ break;
+ default:
+ UNREACHABLE();
+ }
+ return value << left_shift;
+}
+
+
+template <typename T>
+void Simulator::Extract(Instruction* instr) {
+ unsigned lsb = instr->ImmS();
+ T op2 = reg<T>(instr->Rm());
+ T result = op2;
+
+ if (lsb) {
+ T op1 = reg<T>(instr->Rn());
+ result = op2 >> lsb | (op1 << ((sizeof(T) * 8) - lsb));
+ }
+ set_reg<T>(instr->Rd(), result);
+}
+
+
+template<> double Simulator::FPDefaultNaN<double>() const {
+ return kFP64DefaultNaN;
+}
+
+
+template<> float Simulator::FPDefaultNaN<float>() const {
+ return kFP32DefaultNaN;
+}
+
+
+void Simulator::FPCompare(double val0, double val1) {
+ AssertSupportedFPCR();
+
+ // TODO(jbramley): This assumes that the C++ implementation handles
+ // comparisons in the way that we expect (as per AssertSupportedFPCR()).
+ if ((std::isnan(val0) != 0) || (std::isnan(val1) != 0)) {
+ nzcv().SetRawValue(FPUnorderedFlag);
+ } else if (val0 < val1) {
+ nzcv().SetRawValue(FPLessThanFlag);
+ } else if (val0 > val1) {
+ nzcv().SetRawValue(FPGreaterThanFlag);
+ } else if (val0 == val1) {
+ nzcv().SetRawValue(FPEqualFlag);
+ } else {
+ UNREACHABLE();
+ }
+}
+
+
+void Simulator::SetBreakpoint(Instruction* location) {
+ for (unsigned i = 0; i < breakpoints_.size(); i++) {
+ if (breakpoints_.at(i).location == location) {
+ PrintF(stream_,
+ "Existing breakpoint at %p was %s\n",
+ reinterpret_cast<void*>(location),
+ breakpoints_.at(i).enabled ? "disabled" : "enabled");
+ breakpoints_.at(i).enabled = !breakpoints_.at(i).enabled;
+ return;
+ }
+ }
+ Breakpoint new_breakpoint = {location, true};
+ breakpoints_.push_back(new_breakpoint);
+ PrintF(stream_,
+ "Set a breakpoint at %p\n", reinterpret_cast<void*>(location));
+}
+
+
+void Simulator::ListBreakpoints() {
+ PrintF(stream_, "Breakpoints:\n");
+ for (unsigned i = 0; i < breakpoints_.size(); i++) {
+ PrintF(stream_, "%p : %s\n",
+ reinterpret_cast<void*>(breakpoints_.at(i).location),
+ breakpoints_.at(i).enabled ? "enabled" : "disabled");
+ }
+}
+
+
+void Simulator::CheckBreakpoints() {
+ bool hit_a_breakpoint = false;
+ for (unsigned i = 0; i < breakpoints_.size(); i++) {
+ if ((breakpoints_.at(i).location == pc_) &&
+ breakpoints_.at(i).enabled) {
+ hit_a_breakpoint = true;
+ // Disable this breakpoint.
+ breakpoints_.at(i).enabled = false;
+ }
+ }
+ if (hit_a_breakpoint) {
+ PrintF(stream_, "Hit and disabled a breakpoint at %p.\n",
+ reinterpret_cast<void*>(pc_));
+ Debug();
+ }
+}
+
+
+void Simulator::CheckBreakNext() {
+ // If the current instruction is a BL, insert a breakpoint just after it.
+ if (break_on_next_ && pc_->IsBranchAndLinkToRegister()) {
+ SetBreakpoint(pc_->following());
+ break_on_next_ = false;
+ }
+}
+
+
+void Simulator::PrintInstructionsAt(Instruction* start, uint64_t count) {
+ Instruction* end = start->InstructionAtOffset(count * kInstructionSize);
+ for (Instruction* pc = start; pc < end; pc = pc->following()) {
+ disassembler_decoder_->Decode(pc);
+ }
+}
+
+
+void Simulator::PrintSystemRegisters(bool print_all) {
+ static bool first_run = true;
+
+ static SimSystemRegister last_nzcv;
+ if (print_all || first_run || (last_nzcv.RawValue() != nzcv().RawValue())) {
+ fprintf(stream_, "# %sFLAGS: %sN:%d Z:%d C:%d V:%d%s\n",
+ clr_flag_name,
+ clr_flag_value,
+ nzcv().N(), nzcv().Z(), nzcv().C(), nzcv().V(),
+ clr_normal);
+ }
+ last_nzcv = nzcv();
+
+ static SimSystemRegister last_fpcr;
+ if (print_all || first_run || (last_fpcr.RawValue() != fpcr().RawValue())) {
+ static const char * rmode[] = {
+ "0b00 (Round to Nearest)",
+ "0b01 (Round towards Plus Infinity)",
+ "0b10 (Round towards Minus Infinity)",
+ "0b11 (Round towards Zero)"
+ };
+ ASSERT(fpcr().RMode() <= (sizeof(rmode) / sizeof(rmode[0])));
+ fprintf(stream_, "# %sFPCR: %sAHP:%d DN:%d FZ:%d RMode:%s%s\n",
+ clr_flag_name,
+ clr_flag_value,
+ fpcr().AHP(), fpcr().DN(), fpcr().FZ(), rmode[fpcr().RMode()],
+ clr_normal);
+ }
+ last_fpcr = fpcr();
+
+ first_run = false;
+}
+
+
+void Simulator::PrintRegisters(bool print_all_regs) {
+ static bool first_run = true;
+ static int64_t last_regs[kNumberOfRegisters];
+
+ for (unsigned i = 0; i < kNumberOfRegisters; i++) {
+ if (print_all_regs || first_run ||
+ (last_regs[i] != xreg(i, Reg31IsStackPointer))) {
+ fprintf(stream_,
+ "# %s%4s:%s 0x%016" PRIx64 "%s\n",
+ clr_reg_name,
+ XRegNameForCode(i, Reg31IsStackPointer),
+ clr_reg_value,
+ xreg(i, Reg31IsStackPointer),
+ clr_normal);
+ }
+ // Cache the new register value so the next run can detect any changes.
+ last_regs[i] = xreg(i, Reg31IsStackPointer);
+ }
+ first_run = false;
+}
+
+
+void Simulator::PrintFPRegisters(bool print_all_regs) {
+ static bool first_run = true;
+ static uint64_t last_regs[kNumberOfFPRegisters];
+
+ // Print as many rows of registers as necessary, keeping each individual
+ // register in the same column each time (to make it easy to visually scan
+ // for changes).
+ for (unsigned i = 0; i < kNumberOfFPRegisters; i++) {
+ if (print_all_regs || first_run || (last_regs[i] != dreg_bits(i))) {
+ fprintf(stream_,
+ "# %s %4s:%s 0x%016" PRIx64 "%s (%s%s:%s %g%s %s:%s %g%s)\n",
+ clr_fpreg_name,
+ VRegNameForCode(i),
+ clr_fpreg_value,
+ dreg_bits(i),
+ clr_normal,
+ clr_fpreg_name,
+ DRegNameForCode(i),
+ clr_fpreg_value,
+ dreg(i),
+ clr_fpreg_name,
+ SRegNameForCode(i),
+ clr_fpreg_value,
+ sreg(i),
+ clr_normal);
+ }
+ // Cache the new register value so the next run can detect any changes.
+ last_regs[i] = dreg_bits(i);
+ }
+ first_run = false;
+}
+
+
+void Simulator::PrintProcessorState() {
+ PrintSystemRegisters();
+ PrintRegisters();
+ PrintFPRegisters();
+}
+
+
+void Simulator::PrintWrite(uint8_t* address,
+ uint64_t value,
+ unsigned num_bytes) {
+ // The template is "# value -> address". The template is not directly used
+ // in the printf since compilers tend to struggle with the parametrized
+ // width (%0*).
+ const char* format = "# %s0x%0*" PRIx64 "%s -> %s0x%016" PRIx64 "%s\n";
+ fprintf(stream_,
+ format,
+ clr_memory_value,
+ num_bytes * 2, // The width in hexa characters.
+ value,
+ clr_normal,
+ clr_memory_address,
+ address,
+ clr_normal);
+}
+
+
+// Visitors---------------------------------------------------------------------
+
+void Simulator::VisitUnimplemented(Instruction* instr) {
+ fprintf(stream_, "Unimplemented instruction at %p: 0x%08" PRIx32 "\n",
+ reinterpret_cast<void*>(instr), instr->InstructionBits());
+ UNIMPLEMENTED();
+}
+
+
+void Simulator::VisitUnallocated(Instruction* instr) {
+ fprintf(stream_, "Unallocated instruction at %p: 0x%08" PRIx32 "\n",
+ reinterpret_cast<void*>(instr), instr->InstructionBits());
+ UNIMPLEMENTED();
+}
+
+
+void Simulator::VisitPCRelAddressing(Instruction* instr) {
+ switch (instr->Mask(PCRelAddressingMask)) {
+ case ADR:
+ set_reg(instr->Rd(), instr->ImmPCOffsetTarget());
+ break;
+ case ADRP: // Not implemented in the assembler.
+ UNIMPLEMENTED();
+ break;
+ default:
+ UNREACHABLE();
+ break;
+ }
+}
+
+
+void Simulator::VisitUnconditionalBranch(Instruction* instr) {
+ switch (instr->Mask(UnconditionalBranchMask)) {
+ case BL:
+ set_lr(instr->following());
+ // Fall through.
+ case B:
+ set_pc(instr->ImmPCOffsetTarget());
+ break;
+ default:
+ UNREACHABLE();
+ }
+}
+
+
+void Simulator::VisitConditionalBranch(Instruction* instr) {
+ ASSERT(instr->Mask(ConditionalBranchMask) == B_cond);
+ if (ConditionPassed(static_cast<Condition>(instr->ConditionBranch()))) {
+ set_pc(instr->ImmPCOffsetTarget());
+ }
+}
+
+
+void Simulator::VisitUnconditionalBranchToRegister(Instruction* instr) {
+ Instruction* target = reg<Instruction*>(instr->Rn());
+ switch (instr->Mask(UnconditionalBranchToRegisterMask)) {
+ case BLR: {
+ set_lr(instr->following());
+ if (instr->Rn() == 31) {
+ // BLR XZR is used as a guard for the constant pool. We should never hit
+ // this, but if we do trap to allow debugging.
+ Debug();
+ }
+ // Fall through.
+ }
+ case BR:
+ case RET: set_pc(target); break;
+ default: UNIMPLEMENTED();
+ }
+}
+
+
+void Simulator::VisitTestBranch(Instruction* instr) {
+ unsigned bit_pos = (instr->ImmTestBranchBit5() << 5) |
+ instr->ImmTestBranchBit40();
+ bool take_branch = ((xreg(instr->Rt()) & (1UL << bit_pos)) == 0);
+ switch (instr->Mask(TestBranchMask)) {
+ case TBZ: break;
+ case TBNZ: take_branch = !take_branch; break;
+ default: UNIMPLEMENTED();
+ }
+ if (take_branch) {
+ set_pc(instr->ImmPCOffsetTarget());
+ }
+}
+
+
+void Simulator::VisitCompareBranch(Instruction* instr) {
+ unsigned rt = instr->Rt();
+ bool take_branch = false;
+ switch (instr->Mask(CompareBranchMask)) {
+ case CBZ_w: take_branch = (wreg(rt) == 0); break;
+ case CBZ_x: take_branch = (xreg(rt) == 0); break;
+ case CBNZ_w: take_branch = (wreg(rt) != 0); break;
+ case CBNZ_x: take_branch = (xreg(rt) != 0); break;
+ default: UNIMPLEMENTED();
+ }
+ if (take_branch) {
+ set_pc(instr->ImmPCOffsetTarget());
+ }
+}
+
+
+template<typename T>
+void Simulator::AddSubHelper(Instruction* instr, T op2) {
+ bool set_flags = instr->FlagsUpdate();
+ T new_val = 0;
+ Instr operation = instr->Mask(AddSubOpMask);
+
+ switch (operation) {
+ case ADD:
+ case ADDS: {
+ new_val = AddWithCarry<T>(set_flags,
+ reg<T>(instr->Rn(), instr->RnMode()),
+ op2);
+ break;
+ }
+ case SUB:
+ case SUBS: {
+ new_val = AddWithCarry<T>(set_flags,
+ reg<T>(instr->Rn(), instr->RnMode()),
+ ~op2,
+ 1);
+ break;
+ }
+ default: UNREACHABLE();
+ }
+
+ set_reg<T>(instr->Rd(), new_val, instr->RdMode());
+}
+
+
+void Simulator::VisitAddSubShifted(Instruction* instr) {
+ Shift shift_type = static_cast<Shift>(instr->ShiftDP());
+ unsigned shift_amount = instr->ImmDPShift();
+
+ if (instr->SixtyFourBits()) {
+ int64_t op2 = ShiftOperand(xreg(instr->Rm()), shift_type, shift_amount);
+ AddSubHelper(instr, op2);
+ } else {
+ int32_t op2 = ShiftOperand(wreg(instr->Rm()), shift_type, shift_amount);
+ AddSubHelper(instr, op2);
+ }
+}
+
+
+void Simulator::VisitAddSubImmediate(Instruction* instr) {
+ int64_t op2 = instr->ImmAddSub() << ((instr->ShiftAddSub() == 1) ? 12 : 0);
+ if (instr->SixtyFourBits()) {
+ AddSubHelper<int64_t>(instr, op2);
+ } else {
+ AddSubHelper<int32_t>(instr, op2);
+ }
+}
+
+
+void Simulator::VisitAddSubExtended(Instruction* instr) {
+ Extend ext = static_cast<Extend>(instr->ExtendMode());
+ unsigned left_shift = instr->ImmExtendShift();
+ if (instr->SixtyFourBits()) {
+ int64_t op2 = ExtendValue(xreg(instr->Rm()), ext, left_shift);
+ AddSubHelper(instr, op2);
+ } else {
+ int32_t op2 = ExtendValue(wreg(instr->Rm()), ext, left_shift);
+ AddSubHelper(instr, op2);
+ }
+}
+
+
+void Simulator::VisitAddSubWithCarry(Instruction* instr) {
+ if (instr->SixtyFourBits()) {
+ AddSubWithCarry<int64_t>(instr);
+ } else {
+ AddSubWithCarry<int32_t>(instr);
+ }
+}
+
+
+void Simulator::VisitLogicalShifted(Instruction* instr) {
+ Shift shift_type = static_cast<Shift>(instr->ShiftDP());
+ unsigned shift_amount = instr->ImmDPShift();
+
+ if (instr->SixtyFourBits()) {
+ int64_t op2 = ShiftOperand(xreg(instr->Rm()), shift_type, shift_amount);
+ op2 = (instr->Mask(NOT) == NOT) ? ~op2 : op2;
+ LogicalHelper<int64_t>(instr, op2);
+ } else {
+ int32_t op2 = ShiftOperand(wreg(instr->Rm()), shift_type, shift_amount);
+ op2 = (instr->Mask(NOT) == NOT) ? ~op2 : op2;
+ LogicalHelper<int32_t>(instr, op2);
+ }
+}
+
+
+void Simulator::VisitLogicalImmediate(Instruction* instr) {
+ if (instr->SixtyFourBits()) {
+ LogicalHelper<int64_t>(instr, instr->ImmLogical());
+ } else {
+ LogicalHelper<int32_t>(instr, instr->ImmLogical());
+ }
+}
+
+
+template<typename T>
+void Simulator::LogicalHelper(Instruction* instr, T op2) {
+ T op1 = reg<T>(instr->Rn());
+ T result = 0;
+ bool update_flags = false;
+
+ // Switch on the logical operation, stripping out the NOT bit, as it has a
+ // different meaning for logical immediate instructions.
+ switch (instr->Mask(LogicalOpMask & ~NOT)) {
+ case ANDS: update_flags = true; // Fall through.
+ case AND: result = op1 & op2; break;
+ case ORR: result = op1 | op2; break;
+ case EOR: result = op1 ^ op2; break;
+ default:
+ UNIMPLEMENTED();
+ }
+
+ if (update_flags) {
+ nzcv().SetN(CalcNFlag(result));
+ nzcv().SetZ(CalcZFlag(result));
+ nzcv().SetC(0);
+ nzcv().SetV(0);
+ }
+
+ set_reg<T>(instr->Rd(), result, instr->RdMode());
+}
+
+
+void Simulator::VisitConditionalCompareRegister(Instruction* instr) {
+ if (instr->SixtyFourBits()) {
+ ConditionalCompareHelper(instr, xreg(instr->Rm()));
+ } else {
+ ConditionalCompareHelper(instr, wreg(instr->Rm()));
+ }
+}
+
+
+void Simulator::VisitConditionalCompareImmediate(Instruction* instr) {
+ if (instr->SixtyFourBits()) {
+ ConditionalCompareHelper<int64_t>(instr, instr->ImmCondCmp());
+ } else {
+ ConditionalCompareHelper<int32_t>(instr, instr->ImmCondCmp());
+ }
+}
+
+
+template<typename T>
+void Simulator::ConditionalCompareHelper(Instruction* instr, T op2) {
+ T op1 = reg<T>(instr->Rn());
+
+ if (ConditionPassed(static_cast<Condition>(instr->Condition()))) {
+ // If the condition passes, set the status flags to the result of comparing
+ // the operands.
+ if (instr->Mask(ConditionalCompareMask) == CCMP) {
+ AddWithCarry<T>(true, op1, ~op2, 1);
+ } else {
+ ASSERT(instr->Mask(ConditionalCompareMask) == CCMN);
+ AddWithCarry<T>(true, op1, op2, 0);
+ }
+ } else {
+ // If the condition fails, set the status flags to the nzcv immediate.
+ nzcv().SetFlags(instr->Nzcv());
+ }
+}
+
+
+void Simulator::VisitLoadStoreUnsignedOffset(Instruction* instr) {
+ int offset = instr->ImmLSUnsigned() << instr->SizeLS();
+ LoadStoreHelper(instr, offset, Offset);
+}
+
+
+void Simulator::VisitLoadStoreUnscaledOffset(Instruction* instr) {
+ LoadStoreHelper(instr, instr->ImmLS(), Offset);
+}
+
+
+void Simulator::VisitLoadStorePreIndex(Instruction* instr) {
+ LoadStoreHelper(instr, instr->ImmLS(), PreIndex);
+}
+
+
+void Simulator::VisitLoadStorePostIndex(Instruction* instr) {
+ LoadStoreHelper(instr, instr->ImmLS(), PostIndex);
+}
+
+
+void Simulator::VisitLoadStoreRegisterOffset(Instruction* instr) {
+ Extend ext = static_cast<Extend>(instr->ExtendMode());
+ ASSERT((ext == UXTW) || (ext == UXTX) || (ext == SXTW) || (ext == SXTX));
+ unsigned shift_amount = instr->ImmShiftLS() * instr->SizeLS();
+
+ int64_t offset = ExtendValue(xreg(instr->Rm()), ext, shift_amount);
+ LoadStoreHelper(instr, offset, Offset);
+}
+
+
+void Simulator::LoadStoreHelper(Instruction* instr,
+ int64_t offset,
+ AddrMode addrmode) {
+ unsigned srcdst = instr->Rt();
+ unsigned addr_reg = instr->Rn();
+ uint8_t* address = LoadStoreAddress(addr_reg, offset, addrmode);
+ int num_bytes = 1 << instr->SizeLS();
+ uint8_t* stack = NULL;
+
+ // Handle the writeback for stores before the store. On a CPU the writeback
+ // and the store are atomic, but when running on the simulator it is possible
+ // to be interrupted in between. The simulator is not thread safe and V8 does
+ // not require it to be to run JavaScript therefore the profiler may sample
+ // the "simulated" CPU in the middle of load/store with writeback. The code
+ // below ensures that push operations are safe even when interrupted: the
+ // stack pointer will be decremented before adding an element to the stack.
+ if (instr->IsStore()) {
+ LoadStoreWriteBack(addr_reg, offset, addrmode);
+
+ // For store the address post writeback is used to check access below the
+ // stack.
+ stack = reinterpret_cast<uint8_t*>(sp());
+ }
+
+ LoadStoreOp op = static_cast<LoadStoreOp>(instr->Mask(LoadStoreOpMask));
+ switch (op) {
+ case LDRB_w:
+ case LDRH_w:
+ case LDR_w:
+ case LDR_x: set_xreg(srcdst, MemoryRead(address, num_bytes)); break;
+ case STRB_w:
+ case STRH_w:
+ case STR_w:
+ case STR_x: MemoryWrite(address, xreg(srcdst), num_bytes); break;
+ case LDRSB_w: {
+ set_wreg(srcdst, ExtendValue<int32_t>(MemoryRead8(address), SXTB));
+ break;
+ }
+ case LDRSB_x: {
+ set_xreg(srcdst, ExtendValue<int64_t>(MemoryRead8(address), SXTB));
+ break;
+ }
+ case LDRSH_w: {
+ set_wreg(srcdst, ExtendValue<int32_t>(MemoryRead16(address), SXTH));
+ break;
+ }
+ case LDRSH_x: {
+ set_xreg(srcdst, ExtendValue<int64_t>(MemoryRead16(address), SXTH));
+ break;
+ }
+ case LDRSW_x: {
+ set_xreg(srcdst, ExtendValue<int64_t>(MemoryRead32(address), SXTW));
+ break;
+ }
+ case LDR_s: set_sreg(srcdst, MemoryReadFP32(address)); break;
+ case LDR_d: set_dreg(srcdst, MemoryReadFP64(address)); break;
+ case STR_s: MemoryWriteFP32(address, sreg(srcdst)); break;
+ case STR_d: MemoryWriteFP64(address, dreg(srcdst)); break;
+ default: UNIMPLEMENTED();
+ }
+
+ // Handle the writeback for loads after the load to ensure safe pop
+ // operation even when interrupted in the middle of it. The stack pointer
+ // is only updated after the load so pop(fp) will never break the invariant
+ // sp <= fp expected while walking the stack in the sampler.
+ if (instr->IsLoad()) {
+ // For loads the address pre writeback is used to check access below the
+ // stack.
+ stack = reinterpret_cast<uint8_t*>(sp());
+
+ LoadStoreWriteBack(addr_reg, offset, addrmode);
+ }
+
+ // Accesses below the stack pointer (but above the platform stack limit) are
+ // not allowed in the ABI.
+ CheckMemoryAccess(address, stack);
+}
+
+
+void Simulator::VisitLoadStorePairOffset(Instruction* instr) {
+ LoadStorePairHelper(instr, Offset);
+}
+
+
+void Simulator::VisitLoadStorePairPreIndex(Instruction* instr) {
+ LoadStorePairHelper(instr, PreIndex);
+}
+
+
+void Simulator::VisitLoadStorePairPostIndex(Instruction* instr) {
+ LoadStorePairHelper(instr, PostIndex);
+}
+
+
+void Simulator::VisitLoadStorePairNonTemporal(Instruction* instr) {
+ LoadStorePairHelper(instr, Offset);
+}
+
+
+void Simulator::LoadStorePairHelper(Instruction* instr,
+ AddrMode addrmode) {
+ unsigned rt = instr->Rt();
+ unsigned rt2 = instr->Rt2();
+ unsigned addr_reg = instr->Rn();
+ int offset = instr->ImmLSPair() << instr->SizeLSPair();
+ uint8_t* address = LoadStoreAddress(addr_reg, offset, addrmode);
+ uint8_t* stack = NULL;
+
+ // Handle the writeback for stores before the store. On a CPU the writeback
+ // and the store are atomic, but when running on the simulator it is possible
+ // to be interrupted in between. The simulator is not thread safe and V8 does
+ // not require it to be to run JavaScript therefore the profiler may sample
+ // the "simulated" CPU in the middle of load/store with writeback. The code
+ // below ensures that push operations are safe even when interrupted: the
+ // stack pointer will be decremented before adding an element to the stack.
+ if (instr->IsStore()) {
+ LoadStoreWriteBack(addr_reg, offset, addrmode);
+
+ // For store the address post writeback is used to check access below the
+ // stack.
+ stack = reinterpret_cast<uint8_t*>(sp());
+ }
+
+ LoadStorePairOp op =
+ static_cast<LoadStorePairOp>(instr->Mask(LoadStorePairMask));
+
+ // 'rt' and 'rt2' can only be aliased for stores.
+ ASSERT(((op & LoadStorePairLBit) == 0) || (rt != rt2));
+
+ switch (op) {
+ case LDP_w: {
+ set_wreg(rt, MemoryRead32(address));
+ set_wreg(rt2, MemoryRead32(address + kWRegSize));
+ break;
+ }
+ case LDP_s: {
+ set_sreg(rt, MemoryReadFP32(address));
+ set_sreg(rt2, MemoryReadFP32(address + kSRegSize));
+ break;
+ }
+ case LDP_x: {
+ set_xreg(rt, MemoryRead64(address));
+ set_xreg(rt2, MemoryRead64(address + kXRegSize));
+ break;
+ }
+ case LDP_d: {
+ set_dreg(rt, MemoryReadFP64(address));
+ set_dreg(rt2, MemoryReadFP64(address + kDRegSize));
+ break;
+ }
+ case LDPSW_x: {
+ set_xreg(rt, ExtendValue<int64_t>(MemoryRead32(address), SXTW));
+ set_xreg(rt2, ExtendValue<int64_t>(
+ MemoryRead32(address + kWRegSize), SXTW));
+ break;
+ }
+ case STP_w: {
+ MemoryWrite32(address, wreg(rt));
+ MemoryWrite32(address + kWRegSize, wreg(rt2));
+ break;
+ }
+ case STP_s: {
+ MemoryWriteFP32(address, sreg(rt));
+ MemoryWriteFP32(address + kSRegSize, sreg(rt2));
+ break;
+ }
+ case STP_x: {
+ MemoryWrite64(address, xreg(rt));
+ MemoryWrite64(address + kXRegSize, xreg(rt2));
+ break;
+ }
+ case STP_d: {
+ MemoryWriteFP64(address, dreg(rt));
+ MemoryWriteFP64(address + kDRegSize, dreg(rt2));
+ break;
+ }
+ default: UNREACHABLE();
+ }
+
+ // Handle the writeback for loads after the load to ensure safe pop
+ // operation even when interrupted in the middle of it. The stack pointer
+ // is only updated after the load so pop(fp) will never break the invariant
+ // sp <= fp expected while walking the stack in the sampler.
+ if (instr->IsLoad()) {
+ // For loads the address pre writeback is used to check access below the
+ // stack.
+ stack = reinterpret_cast<uint8_t*>(sp());
+
+ LoadStoreWriteBack(addr_reg, offset, addrmode);
+ }
+
+ // Accesses below the stack pointer (but above the platform stack limit) are
+ // not allowed in the ABI.
+ CheckMemoryAccess(address, stack);
+}
+
+
+void Simulator::VisitLoadLiteral(Instruction* instr) {
+ uint8_t* address = instr->LiteralAddress();
+ unsigned rt = instr->Rt();
+
+ switch (instr->Mask(LoadLiteralMask)) {
+ case LDR_w_lit: set_wreg(rt, MemoryRead32(address)); break;
+ case LDR_x_lit: set_xreg(rt, MemoryRead64(address)); break;
+ case LDR_s_lit: set_sreg(rt, MemoryReadFP32(address)); break;
+ case LDR_d_lit: set_dreg(rt, MemoryReadFP64(address)); break;
+ default: UNREACHABLE();
+ }
+}
+
+
+uint8_t* Simulator::LoadStoreAddress(unsigned addr_reg,
+ int64_t offset,
+ AddrMode addrmode) {
+ const unsigned kSPRegCode = kSPRegInternalCode & kRegCodeMask;
+ int64_t address = xreg(addr_reg, Reg31IsStackPointer);
+ if ((addr_reg == kSPRegCode) && ((address % 16) != 0)) {
+ // When the base register is SP the stack pointer is required to be
+ // quadword aligned prior to the address calculation and write-backs.
+ // Misalignment will cause a stack alignment fault.
+ FATAL("ALIGNMENT EXCEPTION");
+ }
+
+ if ((addrmode == Offset) || (addrmode == PreIndex)) {
+ address += offset;
+ }
+
+ return reinterpret_cast<uint8_t*>(address);
+}
+
+
+void Simulator::LoadStoreWriteBack(unsigned addr_reg,
+ int64_t offset,
+ AddrMode addrmode) {
+ if ((addrmode == PreIndex) || (addrmode == PostIndex)) {
+ ASSERT(offset != 0);
+ uint64_t address = xreg(addr_reg, Reg31IsStackPointer);
+ set_reg(addr_reg, address + offset, Reg31IsStackPointer);
+ }
+}
+
+
+void Simulator::CheckMemoryAccess(uint8_t* address, uint8_t* stack) {
+ if ((address >= stack_limit_) && (address < stack)) {
+ fprintf(stream_, "ACCESS BELOW STACK POINTER:\n");
+ fprintf(stream_, " sp is here: 0x%16p\n", stack);
+ fprintf(stream_, " access was here: 0x%16p\n", address);
+ fprintf(stream_, " stack limit is here: 0x%16p\n", stack_limit_);
+ fprintf(stream_, "\n");
+ FATAL("ACCESS BELOW STACK POINTER");
+ }
+}
+
+
+uint64_t Simulator::MemoryRead(uint8_t* address, unsigned num_bytes) {
+ ASSERT(address != NULL);
+ ASSERT((num_bytes > 0) && (num_bytes <= sizeof(uint64_t)));
+ uint64_t read = 0;
+ memcpy(&read, address, num_bytes);
+ return read;
+}
+
+
+uint8_t Simulator::MemoryRead8(uint8_t* address) {
+ return MemoryRead(address, sizeof(uint8_t));
+}
+
+
+uint16_t Simulator::MemoryRead16(uint8_t* address) {
+ return MemoryRead(address, sizeof(uint16_t));
+}
+
+
+uint32_t Simulator::MemoryRead32(uint8_t* address) {
+ return MemoryRead(address, sizeof(uint32_t));
+}
+
+
+float Simulator::MemoryReadFP32(uint8_t* address) {
+ return rawbits_to_float(MemoryRead32(address));
+}
+
+
+uint64_t Simulator::MemoryRead64(uint8_t* address) {
+ return MemoryRead(address, sizeof(uint64_t));
+}
+
+
+double Simulator::MemoryReadFP64(uint8_t* address) {
+ return rawbits_to_double(MemoryRead64(address));
+}
+
+
+void Simulator::MemoryWrite(uint8_t* address,
+ uint64_t value,
+ unsigned num_bytes) {
+ ASSERT(address != NULL);
+ ASSERT((num_bytes > 0) && (num_bytes <= sizeof(uint64_t)));
+
+ LogWrite(address, value, num_bytes);
+ memcpy(address, &value, num_bytes);
+}
+
+
+void Simulator::MemoryWrite32(uint8_t* address, uint32_t value) {
+ MemoryWrite(address, value, sizeof(uint32_t));
+}
+
+
+void Simulator::MemoryWriteFP32(uint8_t* address, float value) {
+ MemoryWrite32(address, float_to_rawbits(value));
+}
+
+
+void Simulator::MemoryWrite64(uint8_t* address, uint64_t value) {
+ MemoryWrite(address, value, sizeof(uint64_t));
+}
+
+
+void Simulator::MemoryWriteFP64(uint8_t* address, double value) {
+ MemoryWrite64(address, double_to_rawbits(value));
+}
+
+
+void Simulator::VisitMoveWideImmediate(Instruction* instr) {
+ MoveWideImmediateOp mov_op =
+ static_cast<MoveWideImmediateOp>(instr->Mask(MoveWideImmediateMask));
+ int64_t new_xn_val = 0;
+
+ bool is_64_bits = instr->SixtyFourBits() == 1;
+ // Shift is limited for W operations.
+ ASSERT(is_64_bits || (instr->ShiftMoveWide() < 2));
+
+ // Get the shifted immediate.
+ int64_t shift = instr->ShiftMoveWide() * 16;
+ int64_t shifted_imm16 = instr->ImmMoveWide() << shift;
+
+ // Compute the new value.
+ switch (mov_op) {
+ case MOVN_w:
+ case MOVN_x: {
+ new_xn_val = ~shifted_imm16;
+ if (!is_64_bits) new_xn_val &= kWRegMask;
+ break;
+ }
+ case MOVK_w:
+ case MOVK_x: {
+ unsigned reg_code = instr->Rd();
+ int64_t prev_xn_val = is_64_bits ? xreg(reg_code)
+ : wreg(reg_code);
+ new_xn_val = (prev_xn_val & ~(0xffffL << shift)) | shifted_imm16;
+ break;
+ }
+ case MOVZ_w:
+ case MOVZ_x: {
+ new_xn_val = shifted_imm16;
+ break;
+ }
+ default:
+ UNREACHABLE();
+ }
+
+ // Update the destination register.
+ set_xreg(instr->Rd(), new_xn_val);
+}
+
+
+void Simulator::VisitConditionalSelect(Instruction* instr) {
+ if (ConditionFailed(static_cast<Condition>(instr->Condition()))) {
+ uint64_t new_val = xreg(instr->Rm());
+ switch (instr->Mask(ConditionalSelectMask)) {
+ case CSEL_w: set_wreg(instr->Rd(), new_val); break;
+ case CSEL_x: set_xreg(instr->Rd(), new_val); break;
+ case CSINC_w: set_wreg(instr->Rd(), new_val + 1); break;
+ case CSINC_x: set_xreg(instr->Rd(), new_val + 1); break;
+ case CSINV_w: set_wreg(instr->Rd(), ~new_val); break;
+ case CSINV_x: set_xreg(instr->Rd(), ~new_val); break;
+ case CSNEG_w: set_wreg(instr->Rd(), -new_val); break;
+ case CSNEG_x: set_xreg(instr->Rd(), -new_val); break;
+ default: UNIMPLEMENTED();
+ }
+ } else {
+ if (instr->SixtyFourBits()) {
+ set_xreg(instr->Rd(), xreg(instr->Rn()));
+ } else {
+ set_wreg(instr->Rd(), wreg(instr->Rn()));
+ }
+ }
+}
+
+
+void Simulator::VisitDataProcessing1Source(Instruction* instr) {
+ unsigned dst = instr->Rd();
+ unsigned src = instr->Rn();
+
+ switch (instr->Mask(DataProcessing1SourceMask)) {
+ case RBIT_w: set_wreg(dst, ReverseBits(wreg(src), kWRegSizeInBits)); break;
+ case RBIT_x: set_xreg(dst, ReverseBits(xreg(src), kXRegSizeInBits)); break;
+ case REV16_w: set_wreg(dst, ReverseBytes(wreg(src), Reverse16)); break;
+ case REV16_x: set_xreg(dst, ReverseBytes(xreg(src), Reverse16)); break;
+ case REV_w: set_wreg(dst, ReverseBytes(wreg(src), Reverse32)); break;
+ case REV32_x: set_xreg(dst, ReverseBytes(xreg(src), Reverse32)); break;
+ case REV_x: set_xreg(dst, ReverseBytes(xreg(src), Reverse64)); break;
+ case CLZ_w: set_wreg(dst, CountLeadingZeros(wreg(src), kWRegSizeInBits));
+ break;
+ case CLZ_x: set_xreg(dst, CountLeadingZeros(xreg(src), kXRegSizeInBits));
+ break;
+ case CLS_w: {
+ set_wreg(dst, CountLeadingSignBits(wreg(src), kWRegSizeInBits));
+ break;
+ }
+ case CLS_x: {
+ set_xreg(dst, CountLeadingSignBits(xreg(src), kXRegSizeInBits));
+ break;
+ }
+ default: UNIMPLEMENTED();
+ }
+}
+
+
+uint64_t Simulator::ReverseBits(uint64_t value, unsigned num_bits) {
+ ASSERT((num_bits == kWRegSizeInBits) || (num_bits == kXRegSizeInBits));
+ uint64_t result = 0;
+ for (unsigned i = 0; i < num_bits; i++) {
+ result = (result << 1) | (value & 1);
+ value >>= 1;
+ }
+ return result;
+}
+
+
+uint64_t Simulator::ReverseBytes(uint64_t value, ReverseByteMode mode) {
+ // Split the 64-bit value into an 8-bit array, where b[0] is the least
+ // significant byte, and b[7] is the most significant.
+ uint8_t bytes[8];
+ uint64_t mask = 0xff00000000000000UL;
+ for (int i = 7; i >= 0; i--) {
+ bytes[i] = (value & mask) >> (i * 8);
+ mask >>= 8;
+ }
+
+ // Permutation tables for REV instructions.
+ // permute_table[Reverse16] is used by REV16_x, REV16_w
+ // permute_table[Reverse32] is used by REV32_x, REV_w
+ // permute_table[Reverse64] is used by REV_x
+ ASSERT((Reverse16 == 0) && (Reverse32 == 1) && (Reverse64 == 2));
+ static const uint8_t permute_table[3][8] = { {6, 7, 4, 5, 2, 3, 0, 1},
+ {4, 5, 6, 7, 0, 1, 2, 3},
+ {0, 1, 2, 3, 4, 5, 6, 7} };
+ uint64_t result = 0;
+ for (int i = 0; i < 8; i++) {
+ result <<= 8;
+ result |= bytes[permute_table[mode][i]];
+ }
+ return result;
+}
+
+
+template <typename T>
+void Simulator::DataProcessing2Source(Instruction* instr) {
+ Shift shift_op = NO_SHIFT;
+ T result = 0;
+ switch (instr->Mask(DataProcessing2SourceMask)) {
+ case SDIV_w:
+ case SDIV_x: {
+ T rn = reg<T>(instr->Rn());
+ T rm = reg<T>(instr->Rm());
+ if ((rn == std::numeric_limits<T>::min()) && (rm == -1)) {
+ result = std::numeric_limits<T>::min();
+ } else if (rm == 0) {
+ // Division by zero can be trapped, but not on A-class processors.
+ result = 0;
+ } else {
+ result = rn / rm;
+ }
+ break;
+ }
+ case UDIV_w:
+ case UDIV_x: {
+ typedef typename make_unsigned<T>::type unsignedT;
+ unsignedT rn = static_cast<unsignedT>(reg<T>(instr->Rn()));
+ unsignedT rm = static_cast<unsignedT>(reg<T>(instr->Rm()));
+ if (rm == 0) {
+ // Division by zero can be trapped, but not on A-class processors.
+ result = 0;
+ } else {
+ result = rn / rm;
+ }
+ break;
+ }
+ case LSLV_w:
+ case LSLV_x: shift_op = LSL; break;
+ case LSRV_w:
+ case LSRV_x: shift_op = LSR; break;
+ case ASRV_w:
+ case ASRV_x: shift_op = ASR; break;
+ case RORV_w:
+ case RORV_x: shift_op = ROR; break;
+ default: UNIMPLEMENTED();
+ }
+
+ if (shift_op != NO_SHIFT) {
+ // Shift distance encoded in the least-significant five/six bits of the
+ // register.
+ unsigned shift = wreg(instr->Rm());
+ if (sizeof(T) == kWRegSize) {
+ shift &= kShiftAmountWRegMask;
+ } else {
+ shift &= kShiftAmountXRegMask;
+ }
+ result = ShiftOperand(reg<T>(instr->Rn()), shift_op, shift);
+ }
+ set_reg<T>(instr->Rd(), result);
+}
+
+
+void Simulator::VisitDataProcessing2Source(Instruction* instr) {
+ if (instr->SixtyFourBits()) {
+ DataProcessing2Source<int64_t>(instr);
+ } else {
+ DataProcessing2Source<int32_t>(instr);
+ }
+}
+
+
+// The algorithm used is described in section 8.2 of
+// Hacker's Delight, by Henry S. Warren, Jr.
+// It assumes that a right shift on a signed integer is an arithmetic shift.
+static int64_t MultiplyHighSigned(int64_t u, int64_t v) {
+ uint64_t u0, v0, w0;
+ int64_t u1, v1, w1, w2, t;
+
+ u0 = u & 0xffffffffL;
+ u1 = u >> 32;
+ v0 = v & 0xffffffffL;
+ v1 = v >> 32;
+
+ w0 = u0 * v0;
+ t = u1 * v0 + (w0 >> 32);
+ w1 = t & 0xffffffffL;
+ w2 = t >> 32;
+ w1 = u0 * v1 + w1;
+
+ return u1 * v1 + w2 + (w1 >> 32);
+}
+
+
+void Simulator::VisitDataProcessing3Source(Instruction* instr) {
+ int64_t result = 0;
+ // Extract and sign- or zero-extend 32-bit arguments for widening operations.
+ uint64_t rn_u32 = reg<uint32_t>(instr->Rn());
+ uint64_t rm_u32 = reg<uint32_t>(instr->Rm());
+ int64_t rn_s32 = reg<int32_t>(instr->Rn());
+ int64_t rm_s32 = reg<int32_t>(instr->Rm());
+ switch (instr->Mask(DataProcessing3SourceMask)) {
+ case MADD_w:
+ case MADD_x:
+ result = xreg(instr->Ra()) + (xreg(instr->Rn()) * xreg(instr->Rm()));
+ break;
+ case MSUB_w:
+ case MSUB_x:
+ result = xreg(instr->Ra()) - (xreg(instr->Rn()) * xreg(instr->Rm()));
+ break;
+ case SMADDL_x: result = xreg(instr->Ra()) + (rn_s32 * rm_s32); break;
+ case SMSUBL_x: result = xreg(instr->Ra()) - (rn_s32 * rm_s32); break;
+ case UMADDL_x: result = xreg(instr->Ra()) + (rn_u32 * rm_u32); break;
+ case UMSUBL_x: result = xreg(instr->Ra()) - (rn_u32 * rm_u32); break;
+ case SMULH_x:
+ ASSERT(instr->Ra() == kZeroRegCode);
+ result = MultiplyHighSigned(xreg(instr->Rn()), xreg(instr->Rm()));
+ break;
+ default: UNIMPLEMENTED();
+ }
+
+ if (instr->SixtyFourBits()) {
+ set_xreg(instr->Rd(), result);
+ } else {
+ set_wreg(instr->Rd(), result);
+ }
+}
+
+
+template <typename T>
+void Simulator::BitfieldHelper(Instruction* instr) {
+ typedef typename make_unsigned<T>::type unsignedT;
+ T reg_size = sizeof(T) * 8;
+ T R = instr->ImmR();
+ T S = instr->ImmS();
+ T diff = S - R;
+ T mask;
+ if (diff >= 0) {
+ mask = diff < reg_size - 1 ? (static_cast<T>(1) << (diff + 1)) - 1
+ : static_cast<T>(-1);
+ } else {
+ mask = ((1L << (S + 1)) - 1);
+ mask = (static_cast<uint64_t>(mask) >> R) | (mask << (reg_size - R));
+ diff += reg_size;
+ }
+
+ // inzero indicates if the extracted bitfield is inserted into the
+ // destination register value or in zero.
+ // If extend is true, extend the sign of the extracted bitfield.
+ bool inzero = false;
+ bool extend = false;
+ switch (instr->Mask(BitfieldMask)) {
+ case BFM_x:
+ case BFM_w:
+ break;
+ case SBFM_x:
+ case SBFM_w:
+ inzero = true;
+ extend = true;
+ break;
+ case UBFM_x:
+ case UBFM_w:
+ inzero = true;
+ break;
+ default:
+ UNIMPLEMENTED();
+ }
+
+ T dst = inzero ? 0 : reg<T>(instr->Rd());
+ T src = reg<T>(instr->Rn());
+ // Rotate source bitfield into place.
+ T result = (static_cast<unsignedT>(src) >> R) | (src << (reg_size - R));
+ // Determine the sign extension.
+ T topbits_preshift = (static_cast<T>(1) << (reg_size - diff - 1)) - 1;
+ T signbits = (extend && ((src >> S) & 1) ? topbits_preshift : 0)
+ << (diff + 1);
+
+ // Merge sign extension, dest/zero and bitfield.
+ result = signbits | (result & mask) | (dst & ~mask);
+
+ set_reg<T>(instr->Rd(), result);
+}
+
+
+void Simulator::VisitBitfield(Instruction* instr) {
+ if (instr->SixtyFourBits()) {
+ BitfieldHelper<int64_t>(instr);
+ } else {
+ BitfieldHelper<int32_t>(instr);
+ }
+}
+
+
+void Simulator::VisitExtract(Instruction* instr) {
+ if (instr->SixtyFourBits()) {
+ Extract<uint64_t>(instr);
+ } else {
+ Extract<uint32_t>(instr);
+ }
+}
+
+
+void Simulator::VisitFPImmediate(Instruction* instr) {
+ AssertSupportedFPCR();
+
+ unsigned dest = instr->Rd();
+ switch (instr->Mask(FPImmediateMask)) {
+ case FMOV_s_imm: set_sreg(dest, instr->ImmFP32()); break;
+ case FMOV_d_imm: set_dreg(dest, instr->ImmFP64()); break;
+ default: UNREACHABLE();
+ }
+}
+
+
+void Simulator::VisitFPIntegerConvert(Instruction* instr) {
+ AssertSupportedFPCR();
+
+ unsigned dst = instr->Rd();
+ unsigned src = instr->Rn();
+
+ FPRounding round = fpcr().RMode();
+
+ switch (instr->Mask(FPIntegerConvertMask)) {
+ case FCVTAS_ws: set_wreg(dst, FPToInt32(sreg(src), FPTieAway)); break;
+ case FCVTAS_xs: set_xreg(dst, FPToInt64(sreg(src), FPTieAway)); break;
+ case FCVTAS_wd: set_wreg(dst, FPToInt32(dreg(src), FPTieAway)); break;
+ case FCVTAS_xd: set_xreg(dst, FPToInt64(dreg(src), FPTieAway)); break;
+ case FCVTAU_ws: set_wreg(dst, FPToUInt32(sreg(src), FPTieAway)); break;
+ case FCVTAU_xs: set_xreg(dst, FPToUInt64(sreg(src), FPTieAway)); break;
+ case FCVTAU_wd: set_wreg(dst, FPToUInt32(dreg(src), FPTieAway)); break;
+ case FCVTAU_xd: set_xreg(dst, FPToUInt64(dreg(src), FPTieAway)); break;
+ case FCVTMS_ws:
+ set_wreg(dst, FPToInt32(sreg(src), FPNegativeInfinity));
+ break;
+ case FCVTMS_xs:
+ set_xreg(dst, FPToInt64(sreg(src), FPNegativeInfinity));
+ break;
+ case FCVTMS_wd:
+ set_wreg(dst, FPToInt32(dreg(src), FPNegativeInfinity));
+ break;
+ case FCVTMS_xd:
+ set_xreg(dst, FPToInt64(dreg(src), FPNegativeInfinity));
+ break;
+ case FCVTMU_ws:
+ set_wreg(dst, FPToUInt32(sreg(src), FPNegativeInfinity));
+ break;
+ case FCVTMU_xs:
+ set_xreg(dst, FPToUInt64(sreg(src), FPNegativeInfinity));
+ break;
+ case FCVTMU_wd:
+ set_wreg(dst, FPToUInt32(dreg(src), FPNegativeInfinity));
+ break;
+ case FCVTMU_xd:
+ set_xreg(dst, FPToUInt64(dreg(src), FPNegativeInfinity));
+ break;
+ case FCVTNS_ws: set_wreg(dst, FPToInt32(sreg(src), FPTieEven)); break;
+ case FCVTNS_xs: set_xreg(dst, FPToInt64(sreg(src), FPTieEven)); break;
+ case FCVTNS_wd: set_wreg(dst, FPToInt32(dreg(src), FPTieEven)); break;
+ case FCVTNS_xd: set_xreg(dst, FPToInt64(dreg(src), FPTieEven)); break;
+ case FCVTNU_ws: set_wreg(dst, FPToUInt32(sreg(src), FPTieEven)); break;
+ case FCVTNU_xs: set_xreg(dst, FPToUInt64(sreg(src), FPTieEven)); break;
+ case FCVTNU_wd: set_wreg(dst, FPToUInt32(dreg(src), FPTieEven)); break;
+ case FCVTNU_xd: set_xreg(dst, FPToUInt64(dreg(src), FPTieEven)); break;
+ case FCVTZS_ws: set_wreg(dst, FPToInt32(sreg(src), FPZero)); break;
+ case FCVTZS_xs: set_xreg(dst, FPToInt64(sreg(src), FPZero)); break;
+ case FCVTZS_wd: set_wreg(dst, FPToInt32(dreg(src), FPZero)); break;
+ case FCVTZS_xd: set_xreg(dst, FPToInt64(dreg(src), FPZero)); break;
+ case FCVTZU_ws: set_wreg(dst, FPToUInt32(sreg(src), FPZero)); break;
+ case FCVTZU_xs: set_xreg(dst, FPToUInt64(sreg(src), FPZero)); break;
+ case FCVTZU_wd: set_wreg(dst, FPToUInt32(dreg(src), FPZero)); break;
+ case FCVTZU_xd: set_xreg(dst, FPToUInt64(dreg(src), FPZero)); break;
+ case FMOV_ws: set_wreg(dst, sreg_bits(src)); break;
+ case FMOV_xd: set_xreg(dst, dreg_bits(src)); break;
+ case FMOV_sw: set_sreg_bits(dst, wreg(src)); break;
+ case FMOV_dx: set_dreg_bits(dst, xreg(src)); break;
+
+ // A 32-bit input can be handled in the same way as a 64-bit input, since
+ // the sign- or zero-extension will not affect the conversion.
+ case SCVTF_dx: set_dreg(dst, FixedToDouble(xreg(src), 0, round)); break;
+ case SCVTF_dw: set_dreg(dst, FixedToDouble(wreg(src), 0, round)); break;
+ case UCVTF_dx: set_dreg(dst, UFixedToDouble(xreg(src), 0, round)); break;
+ case UCVTF_dw: {
+ set_dreg(dst, UFixedToDouble(reg<uint32_t>(src), 0, round));
+ break;
+ }
+ case SCVTF_sx: set_sreg(dst, FixedToFloat(xreg(src), 0, round)); break;
+ case SCVTF_sw: set_sreg(dst, FixedToFloat(wreg(src), 0, round)); break;
+ case UCVTF_sx: set_sreg(dst, UFixedToFloat(xreg(src), 0, round)); break;
+ case UCVTF_sw: {
+ set_sreg(dst, UFixedToFloat(reg<uint32_t>(src), 0, round));
+ break;
+ }
+
+ default: UNREACHABLE();
+ }
+}
+
+
+void Simulator::VisitFPFixedPointConvert(Instruction* instr) {
+ AssertSupportedFPCR();
+
+ unsigned dst = instr->Rd();
+ unsigned src = instr->Rn();
+ int fbits = 64 - instr->FPScale();
+
+ FPRounding round = fpcr().RMode();
+
+ switch (instr->Mask(FPFixedPointConvertMask)) {
+ // A 32-bit input can be handled in the same way as a 64-bit input, since
+ // the sign- or zero-extension will not affect the conversion.
+ case SCVTF_dx_fixed:
+ set_dreg(dst, FixedToDouble(xreg(src), fbits, round));
+ break;
+ case SCVTF_dw_fixed:
+ set_dreg(dst, FixedToDouble(wreg(src), fbits, round));
+ break;
+ case UCVTF_dx_fixed:
+ set_dreg(dst, UFixedToDouble(xreg(src), fbits, round));
+ break;
+ case UCVTF_dw_fixed: {
+ set_dreg(dst,
+ UFixedToDouble(reg<uint32_t>(src), fbits, round));
+ break;
+ }
+ case SCVTF_sx_fixed:
+ set_sreg(dst, FixedToFloat(xreg(src), fbits, round));
+ break;
+ case SCVTF_sw_fixed:
+ set_sreg(dst, FixedToFloat(wreg(src), fbits, round));
+ break;
+ case UCVTF_sx_fixed:
+ set_sreg(dst, UFixedToFloat(xreg(src), fbits, round));
+ break;
+ case UCVTF_sw_fixed: {
+ set_sreg(dst,
+ UFixedToFloat(reg<uint32_t>(src), fbits, round));
+ break;
+ }
+ default: UNREACHABLE();
+ }
+}
+
+
+int32_t Simulator::FPToInt32(double value, FPRounding rmode) {
+ value = FPRoundInt(value, rmode);
+ if (value >= kWMaxInt) {
+ return kWMaxInt;
+ } else if (value < kWMinInt) {
+ return kWMinInt;
+ }
+ return std::isnan(value) ? 0 : static_cast<int32_t>(value);
+}
+
+
+int64_t Simulator::FPToInt64(double value, FPRounding rmode) {
+ value = FPRoundInt(value, rmode);
+ if (value >= kXMaxInt) {
+ return kXMaxInt;
+ } else if (value < kXMinInt) {
+ return kXMinInt;
+ }
+ return std::isnan(value) ? 0 : static_cast<int64_t>(value);
+}
+
+
+uint32_t Simulator::FPToUInt32(double value, FPRounding rmode) {
+ value = FPRoundInt(value, rmode);
+ if (value >= kWMaxUInt) {
+ return kWMaxUInt;
+ } else if (value < 0.0) {
+ return 0;
+ }
+ return std::isnan(value) ? 0 : static_cast<uint32_t>(value);
+}
+
+
+uint64_t Simulator::FPToUInt64(double value, FPRounding rmode) {
+ value = FPRoundInt(value, rmode);
+ if (value >= kXMaxUInt) {
+ return kXMaxUInt;
+ } else if (value < 0.0) {
+ return 0;
+ }
+ return std::isnan(value) ? 0 : static_cast<uint64_t>(value);
+}
+
+
+void Simulator::VisitFPCompare(Instruction* instr) {
+ AssertSupportedFPCR();
+
+ unsigned reg_size = (instr->Mask(FP64) == FP64) ? kDRegSizeInBits
+ : kSRegSizeInBits;
+ double fn_val = fpreg(reg_size, instr->Rn());
+
+ switch (instr->Mask(FPCompareMask)) {
+ case FCMP_s:
+ case FCMP_d: FPCompare(fn_val, fpreg(reg_size, instr->Rm())); break;
+ case FCMP_s_zero:
+ case FCMP_d_zero: FPCompare(fn_val, 0.0); break;
+ default: UNIMPLEMENTED();
+ }
+}
+
+
+void Simulator::VisitFPConditionalCompare(Instruction* instr) {
+ AssertSupportedFPCR();
+
+ switch (instr->Mask(FPConditionalCompareMask)) {
+ case FCCMP_s:
+ case FCCMP_d: {
+ if (ConditionPassed(static_cast<Condition>(instr->Condition()))) {
+ // If the condition passes, set the status flags to the result of
+ // comparing the operands.
+ unsigned reg_size = (instr->Mask(FP64) == FP64) ? kDRegSizeInBits
+ : kSRegSizeInBits;
+ FPCompare(fpreg(reg_size, instr->Rn()), fpreg(reg_size, instr->Rm()));
+ } else {
+ // If the condition fails, set the status flags to the nzcv immediate.
+ nzcv().SetFlags(instr->Nzcv());
+ }
+ break;
+ }
+ default: UNIMPLEMENTED();
+ }
+}
+
+
+void Simulator::VisitFPConditionalSelect(Instruction* instr) {
+ AssertSupportedFPCR();
+
+ Instr selected;
+ if (ConditionPassed(static_cast<Condition>(instr->Condition()))) {
+ selected = instr->Rn();
+ } else {
+ selected = instr->Rm();
+ }
+
+ switch (instr->Mask(FPConditionalSelectMask)) {
+ case FCSEL_s: set_sreg(instr->Rd(), sreg(selected)); break;
+ case FCSEL_d: set_dreg(instr->Rd(), dreg(selected)); break;
+ default: UNIMPLEMENTED();
+ }
+}
+
+
+void Simulator::VisitFPDataProcessing1Source(Instruction* instr) {
+ AssertSupportedFPCR();
+
+ unsigned fd = instr->Rd();
+ unsigned fn = instr->Rn();
+
+ switch (instr->Mask(FPDataProcessing1SourceMask)) {
+ case FMOV_s: set_sreg(fd, sreg(fn)); break;
+ case FMOV_d: set_dreg(fd, dreg(fn)); break;
+ case FABS_s: set_sreg(fd, std::fabs(sreg(fn))); break;
+ case FABS_d: set_dreg(fd, std::fabs(dreg(fn))); break;
+ case FNEG_s: set_sreg(fd, -sreg(fn)); break;
+ case FNEG_d: set_dreg(fd, -dreg(fn)); break;
+ case FSQRT_s: set_sreg(fd, FPSqrt(sreg(fn))); break;
+ case FSQRT_d: set_dreg(fd, FPSqrt(dreg(fn))); break;
+ case FRINTA_s: set_sreg(fd, FPRoundInt(sreg(fn), FPTieAway)); break;
+ case FRINTA_d: set_dreg(fd, FPRoundInt(dreg(fn), FPTieAway)); break;
+ case FRINTM_s:
+ set_sreg(fd, FPRoundInt(sreg(fn), FPNegativeInfinity)); break;
+ case FRINTM_d:
+ set_dreg(fd, FPRoundInt(dreg(fn), FPNegativeInfinity)); break;
+ case FRINTN_s: set_sreg(fd, FPRoundInt(sreg(fn), FPTieEven)); break;
+ case FRINTN_d: set_dreg(fd, FPRoundInt(dreg(fn), FPTieEven)); break;
+ case FRINTZ_s: set_sreg(fd, FPRoundInt(sreg(fn), FPZero)); break;
+ case FRINTZ_d: set_dreg(fd, FPRoundInt(dreg(fn), FPZero)); break;
+ case FCVT_ds: set_dreg(fd, FPToDouble(sreg(fn))); break;
+ case FCVT_sd: set_sreg(fd, FPToFloat(dreg(fn), FPTieEven)); break;
+ default: UNIMPLEMENTED();
+ }
+}
+
+
+// Assemble the specified IEEE-754 components into the target type and apply
+// appropriate rounding.
+// sign: 0 = positive, 1 = negative
+// exponent: Unbiased IEEE-754 exponent.
+// mantissa: The mantissa of the input. The top bit (which is not encoded for
+// normal IEEE-754 values) must not be omitted. This bit has the
+// value 'pow(2, exponent)'.
+//
+// The input value is assumed to be a normalized value. That is, the input may
+// not be infinity or NaN. If the source value is subnormal, it must be
+// normalized before calling this function such that the highest set bit in the
+// mantissa has the value 'pow(2, exponent)'.
+//
+// Callers should use FPRoundToFloat or FPRoundToDouble directly, rather than
+// calling a templated FPRound.
+template <class T, int ebits, int mbits>
+static T FPRound(int64_t sign, int64_t exponent, uint64_t mantissa,
+ FPRounding round_mode) {
+ ASSERT((sign == 0) || (sign == 1));
+
+ // Only the FPTieEven rounding mode is implemented.
+ ASSERT(round_mode == FPTieEven);
+ USE(round_mode);
+
+ // Rounding can promote subnormals to normals, and normals to infinities. For
+ // example, a double with exponent 127 (FLT_MAX_EXP) would appear to be
+ // encodable as a float, but rounding based on the low-order mantissa bits
+ // could make it overflow. With ties-to-even rounding, this value would become
+ // an infinity.
+
+ // ---- Rounding Method ----
+ //
+ // The exponent is irrelevant in the rounding operation, so we treat the
+ // lowest-order bit that will fit into the result ('onebit') as having
+ // the value '1'. Similarly, the highest-order bit that won't fit into
+ // the result ('halfbit') has the value '0.5'. The 'point' sits between
+ // 'onebit' and 'halfbit':
+ //
+ // These bits fit into the result.
+ // |---------------------|
+ // mantissa = 0bxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
+ // ||
+ // / |
+ // / halfbit
+ // onebit
+ //
+ // For subnormal outputs, the range of representable bits is smaller and
+ // the position of onebit and halfbit depends on the exponent of the
+ // input, but the method is otherwise similar.
+ //
+ // onebit(frac)
+ // |
+ // | halfbit(frac) halfbit(adjusted)
+ // | / /
+ // | | |
+ // 0b00.0 (exact) -> 0b00.0 (exact) -> 0b00
+ // 0b00.0... -> 0b00.0... -> 0b00
+ // 0b00.1 (exact) -> 0b00.0111..111 -> 0b00
+ // 0b00.1... -> 0b00.1... -> 0b01
+ // 0b01.0 (exact) -> 0b01.0 (exact) -> 0b01
+ // 0b01.0... -> 0b01.0... -> 0b01
+ // 0b01.1 (exact) -> 0b01.1 (exact) -> 0b10
+ // 0b01.1... -> 0b01.1... -> 0b10
+ // 0b10.0 (exact) -> 0b10.0 (exact) -> 0b10
+ // 0b10.0... -> 0b10.0... -> 0b10
+ // 0b10.1 (exact) -> 0b10.0111..111 -> 0b10
+ // 0b10.1... -> 0b10.1... -> 0b11
+ // 0b11.0 (exact) -> 0b11.0 (exact) -> 0b11
+ // ... / | / |
+ // / | / |
+ // / |
+ // adjusted = frac - (halfbit(mantissa) & ~onebit(frac)); / |
+ //
+ // mantissa = (mantissa >> shift) + halfbit(adjusted);
+
+ static const int mantissa_offset = 0;
+ static const int exponent_offset = mantissa_offset + mbits;
+ static const int sign_offset = exponent_offset + ebits;
+ STATIC_ASSERT(sign_offset == (sizeof(T) * kByteSize - 1));
+
+ // Bail out early for zero inputs.
+ if (mantissa == 0) {
+ return sign << sign_offset;
+ }
+
+ // If all bits in the exponent are set, the value is infinite or NaN.
+ // This is true for all binary IEEE-754 formats.
+ static const int infinite_exponent = (1 << ebits) - 1;
+ static const int max_normal_exponent = infinite_exponent - 1;
+
+ // Apply the exponent bias to encode it for the result. Doing this early makes
+ // it easy to detect values that will be infinite or subnormal.
+ exponent += max_normal_exponent >> 1;
+
+ if (exponent > max_normal_exponent) {
+ // Overflow: The input is too large for the result type to represent. The
+ // FPTieEven rounding mode handles overflows using infinities.
+ exponent = infinite_exponent;
+ mantissa = 0;
+ return (sign << sign_offset) |
+ (exponent << exponent_offset) |
+ (mantissa << mantissa_offset);
+ }
+
+ // Calculate the shift required to move the top mantissa bit to the proper
+ // place in the destination type.
+ const int highest_significant_bit = 63 - CountLeadingZeros(mantissa, 64);
+ int shift = highest_significant_bit - mbits;
+
+ if (exponent <= 0) {
+ // The output will be subnormal (before rounding).
+
+ // For subnormal outputs, the shift must be adjusted by the exponent. The +1
+ // is necessary because the exponent of a subnormal value (encoded as 0) is
+ // the same as the exponent of the smallest normal value (encoded as 1).
+ shift += -exponent + 1;
+
+ // Handle inputs that would produce a zero output.
+ //
+ // Shifts higher than highest_significant_bit+1 will always produce a zero
+ // result. A shift of exactly highest_significant_bit+1 might produce a
+ // non-zero result after rounding.
+ if (shift > (highest_significant_bit + 1)) {
+ // The result will always be +/-0.0.
+ return sign << sign_offset;
+ }
+
+ // Properly encode the exponent for a subnormal output.
+ exponent = 0;
+ } else {
+ // Clear the topmost mantissa bit, since this is not encoded in IEEE-754
+ // normal values.
+ mantissa &= ~(1UL << highest_significant_bit);
+ }
+
+ if (shift > 0) {
+ // We have to shift the mantissa to the right. Some precision is lost, so we
+ // need to apply rounding.
+ uint64_t onebit_mantissa = (mantissa >> (shift)) & 1;
+ uint64_t halfbit_mantissa = (mantissa >> (shift-1)) & 1;
+ uint64_t adjusted = mantissa - (halfbit_mantissa & ~onebit_mantissa);
+ T halfbit_adjusted = (adjusted >> (shift-1)) & 1;
+
+ T result = (sign << sign_offset) |
+ (exponent << exponent_offset) |
+ ((mantissa >> shift) << mantissa_offset);
+
+ // A very large mantissa can overflow during rounding. If this happens, the
+ // exponent should be incremented and the mantissa set to 1.0 (encoded as
+ // 0). Applying halfbit_adjusted after assembling the float has the nice
+ // side-effect that this case is handled for free.
+ //
+ // This also handles cases where a very large finite value overflows to
+ // infinity, or where a very large subnormal value overflows to become
+ // normal.
+ return result + halfbit_adjusted;
+ } else {
+ // We have to shift the mantissa to the left (or not at all). The input
+ // mantissa is exactly representable in the output mantissa, so apply no
+ // rounding correction.
+ return (sign << sign_offset) |
+ (exponent << exponent_offset) |
+ ((mantissa << -shift) << mantissa_offset);
+ }
+}
+
+
+// See FPRound for a description of this function.
+static inline double FPRoundToDouble(int64_t sign, int64_t exponent,
+ uint64_t mantissa, FPRounding round_mode) {
+ int64_t bits =
+ FPRound<int64_t, kDoubleExponentBits, kDoubleMantissaBits>(sign,
+ exponent,
+ mantissa,
+ round_mode);
+ return rawbits_to_double(bits);
+}
+
+
+// See FPRound for a description of this function.
+static inline float FPRoundToFloat(int64_t sign, int64_t exponent,
+ uint64_t mantissa, FPRounding round_mode) {
+ int32_t bits =
+ FPRound<int32_t, kFloatExponentBits, kFloatMantissaBits>(sign,
+ exponent,
+ mantissa,
+ round_mode);
+ return rawbits_to_float(bits);
+}
+
+
+double Simulator::FixedToDouble(int64_t src, int fbits, FPRounding round) {
+ if (src >= 0) {
+ return UFixedToDouble(src, fbits, round);
+ } else {
+ // This works for all negative values, including INT64_MIN.
+ return -UFixedToDouble(-src, fbits, round);
+ }
+}
+
+
+double Simulator::UFixedToDouble(uint64_t src, int fbits, FPRounding round) {
+ // An input of 0 is a special case because the result is effectively
+ // subnormal: The exponent is encoded as 0 and there is no implicit 1 bit.
+ if (src == 0) {
+ return 0.0;
+ }
+
+ // Calculate the exponent. The highest significant bit will have the value
+ // 2^exponent.
+ const int highest_significant_bit = 63 - CountLeadingZeros(src, 64);
+ const int64_t exponent = highest_significant_bit - fbits;
+
+ return FPRoundToDouble(0, exponent, src, round);
+}
+
+
+float Simulator::FixedToFloat(int64_t src, int fbits, FPRounding round) {
+ if (src >= 0) {
+ return UFixedToFloat(src, fbits, round);
+ } else {
+ // This works for all negative values, including INT64_MIN.
+ return -UFixedToFloat(-src, fbits, round);
+ }
+}
+
+
+float Simulator::UFixedToFloat(uint64_t src, int fbits, FPRounding round) {
+ // An input of 0 is a special case because the result is effectively
+ // subnormal: The exponent is encoded as 0 and there is no implicit 1 bit.
+ if (src == 0) {
+ return 0.0f;
+ }
+
+ // Calculate the exponent. The highest significant bit will have the value
+ // 2^exponent.
+ const int highest_significant_bit = 63 - CountLeadingZeros(src, 64);
+ const int32_t exponent = highest_significant_bit - fbits;
+
+ return FPRoundToFloat(0, exponent, src, round);
+}
+
+
+double Simulator::FPRoundInt(double value, FPRounding round_mode) {
+ if ((value == 0.0) || (value == kFP64PositiveInfinity) ||
+ (value == kFP64NegativeInfinity)) {
+ return value;
+ } else if (std::isnan(value)) {
+ return FPProcessNaN(value);
+ }
+
+ double int_result = floor(value);
+ double error = value - int_result;
+ switch (round_mode) {
+ case FPTieAway: {
+ // Take care of correctly handling the range ]-0.5, -0.0], which must
+ // yield -0.0.
+ if ((-0.5 < value) && (value < 0.0)) {
+ int_result = -0.0;
+
+ } else if ((error > 0.5) || ((error == 0.5) && (int_result >= 0.0))) {
+ // If the error is greater than 0.5, or is equal to 0.5 and the integer
+ // result is positive, round up.
+ int_result++;
+ }
+ break;
+ }
+ case FPTieEven: {
+ // Take care of correctly handling the range [-0.5, -0.0], which must
+ // yield -0.0.
+ if ((-0.5 <= value) && (value < 0.0)) {
+ int_result = -0.0;
+
+ // If the error is greater than 0.5, or is equal to 0.5 and the integer
+ // result is odd, round up.
+ } else if ((error > 0.5) ||
+ ((error == 0.5) && (fmod(int_result, 2) != 0))) {
+ int_result++;
+ }
+ break;
+ }
+ case FPZero: {
+ // If value > 0 then we take floor(value)
+ // otherwise, ceil(value)
+ if (value < 0) {
+ int_result = ceil(value);
+ }
+ break;
+ }
+ case FPNegativeInfinity: {
+ // We always use floor(value).
+ break;
+ }
+ default: UNIMPLEMENTED();
+ }
+ return int_result;
+}
+
+
+double Simulator::FPToDouble(float value) {
+ switch (std::fpclassify(value)) {
+ case FP_NAN: {
+ if (fpcr().DN()) return kFP64DefaultNaN;
+
+ // Convert NaNs as the processor would:
+ // - The sign is propagated.
+ // - The payload (mantissa) is transferred entirely, except that the top
+ // bit is forced to '1', making the result a quiet NaN. The unused
+ // (low-order) payload bits are set to 0.
+ uint32_t raw = float_to_rawbits(value);
+
+ uint64_t sign = raw >> 31;
+ uint64_t exponent = (1 << 11) - 1;
+ uint64_t payload = unsigned_bitextract_64(21, 0, raw);
+ payload <<= (52 - 23); // The unused low-order bits should be 0.
+ payload |= (1L << 51); // Force a quiet NaN.
+
+ return rawbits_to_double((sign << 63) | (exponent << 52) | payload);
+ }
+
+ case FP_ZERO:
+ case FP_NORMAL:
+ case FP_SUBNORMAL:
+ case FP_INFINITE: {
+ // All other inputs are preserved in a standard cast, because every value
+ // representable using an IEEE-754 float is also representable using an
+ // IEEE-754 double.
+ return static_cast<double>(value);
+ }
+ }
+
+ UNREACHABLE();
+ return static_cast<double>(value);
+}
+
+
+float Simulator::FPToFloat(double value, FPRounding round_mode) {
+ // Only the FPTieEven rounding mode is implemented.
+ ASSERT(round_mode == FPTieEven);
+ USE(round_mode);
+
+ switch (std::fpclassify(value)) {
+ case FP_NAN: {
+ if (fpcr().DN()) return kFP32DefaultNaN;
+
+ // Convert NaNs as the processor would:
+ // - The sign is propagated.
+ // - The payload (mantissa) is transferred as much as possible, except
+ // that the top bit is forced to '1', making the result a quiet NaN.
+ uint64_t raw = double_to_rawbits(value);
+
+ uint32_t sign = raw >> 63;
+ uint32_t exponent = (1 << 8) - 1;
+ uint32_t payload = unsigned_bitextract_64(50, 52 - 23, raw);
+ payload |= (1 << 22); // Force a quiet NaN.
+
+ return rawbits_to_float((sign << 31) | (exponent << 23) | payload);
+ }
+
+ case FP_ZERO:
+ case FP_INFINITE: {
+ // In a C++ cast, any value representable in the target type will be
+ // unchanged. This is always the case for +/-0.0 and infinities.
+ return static_cast<float>(value);
+ }
+
+ case FP_NORMAL:
+ case FP_SUBNORMAL: {
+ // Convert double-to-float as the processor would, assuming that FPCR.FZ
+ // (flush-to-zero) is not set.
+ uint64_t raw = double_to_rawbits(value);
+ // Extract the IEEE-754 double components.
+ uint32_t sign = raw >> 63;
+ // Extract the exponent and remove the IEEE-754 encoding bias.
+ int32_t exponent = unsigned_bitextract_64(62, 52, raw) - 1023;
+ // Extract the mantissa and add the implicit '1' bit.
+ uint64_t mantissa = unsigned_bitextract_64(51, 0, raw);
+ if (std::fpclassify(value) == FP_NORMAL) {
+ mantissa |= (1UL << 52);
+ }
+ return FPRoundToFloat(sign, exponent, mantissa, round_mode);
+ }
+ }
+
+ UNREACHABLE();
+ return value;
+}
+
+
+void Simulator::VisitFPDataProcessing2Source(Instruction* instr) {
+ AssertSupportedFPCR();
+
+ unsigned fd = instr->Rd();
+ unsigned fn = instr->Rn();
+ unsigned fm = instr->Rm();
+
+ // Fmaxnm and Fminnm have special NaN handling.
+ switch (instr->Mask(FPDataProcessing2SourceMask)) {
+ case FMAXNM_s: set_sreg(fd, FPMaxNM(sreg(fn), sreg(fm))); return;
+ case FMAXNM_d: set_dreg(fd, FPMaxNM(dreg(fn), dreg(fm))); return;
+ case FMINNM_s: set_sreg(fd, FPMinNM(sreg(fn), sreg(fm))); return;
+ case FMINNM_d: set_dreg(fd, FPMinNM(dreg(fn), dreg(fm))); return;
+ default:
+ break; // Fall through.
+ }
+
+ if (FPProcessNaNs(instr)) return;
+
+ switch (instr->Mask(FPDataProcessing2SourceMask)) {
+ case FADD_s: set_sreg(fd, FPAdd(sreg(fn), sreg(fm))); break;
+ case FADD_d: set_dreg(fd, FPAdd(dreg(fn), dreg(fm))); break;
+ case FSUB_s: set_sreg(fd, FPSub(sreg(fn), sreg(fm))); break;
+ case FSUB_d: set_dreg(fd, FPSub(dreg(fn), dreg(fm))); break;
+ case FMUL_s: set_sreg(fd, FPMul(sreg(fn), sreg(fm))); break;
+ case FMUL_d: set_dreg(fd, FPMul(dreg(fn), dreg(fm))); break;
+ case FDIV_s: set_sreg(fd, FPDiv(sreg(fn), sreg(fm))); break;
+ case FDIV_d: set_dreg(fd, FPDiv(dreg(fn), dreg(fm))); break;
+ case FMAX_s: set_sreg(fd, FPMax(sreg(fn), sreg(fm))); break;
+ case FMAX_d: set_dreg(fd, FPMax(dreg(fn), dreg(fm))); break;
+ case FMIN_s: set_sreg(fd, FPMin(sreg(fn), sreg(fm))); break;
+ case FMIN_d: set_dreg(fd, FPMin(dreg(fn), dreg(fm))); break;
+ case FMAXNM_s:
+ case FMAXNM_d:
+ case FMINNM_s:
+ case FMINNM_d:
+ // These were handled before the standard FPProcessNaNs() stage.
+ UNREACHABLE();
+ default: UNIMPLEMENTED();
+ }
+}
+
+
+void Simulator::VisitFPDataProcessing3Source(Instruction* instr) {
+ AssertSupportedFPCR();
+
+ unsigned fd = instr->Rd();
+ unsigned fn = instr->Rn();
+ unsigned fm = instr->Rm();
+ unsigned fa = instr->Ra();
+
+ switch (instr->Mask(FPDataProcessing3SourceMask)) {
+ // fd = fa +/- (fn * fm)
+ case FMADD_s: set_sreg(fd, FPMulAdd(sreg(fa), sreg(fn), sreg(fm))); break;
+ case FMSUB_s: set_sreg(fd, FPMulAdd(sreg(fa), -sreg(fn), sreg(fm))); break;
+ case FMADD_d: set_dreg(fd, FPMulAdd(dreg(fa), dreg(fn), dreg(fm))); break;
+ case FMSUB_d: set_dreg(fd, FPMulAdd(dreg(fa), -dreg(fn), dreg(fm))); break;
+ // Negated variants of the above.
+ case FNMADD_s:
+ set_sreg(fd, FPMulAdd(-sreg(fa), -sreg(fn), sreg(fm)));
+ break;
+ case FNMSUB_s:
+ set_sreg(fd, FPMulAdd(-sreg(fa), sreg(fn), sreg(fm)));
+ break;
+ case FNMADD_d:
+ set_dreg(fd, FPMulAdd(-dreg(fa), -dreg(fn), dreg(fm)));
+ break;
+ case FNMSUB_d:
+ set_dreg(fd, FPMulAdd(-dreg(fa), dreg(fn), dreg(fm)));
+ break;
+ default: UNIMPLEMENTED();
+ }
+}
+
+
+template <typename T>
+T Simulator::FPAdd(T op1, T op2) {
+ // NaNs should be handled elsewhere.
+ ASSERT(!std::isnan(op1) && !std::isnan(op2));
+
+ if (std::isinf(op1) && std::isinf(op2) && (op1 != op2)) {
+ // inf + -inf returns the default NaN.
+ return FPDefaultNaN<T>();
+ } else {
+ // Other cases should be handled by standard arithmetic.
+ return op1 + op2;
+ }
+}
+
+
+template <typename T>
+T Simulator::FPDiv(T op1, T op2) {
+ // NaNs should be handled elsewhere.
+ ASSERT(!std::isnan(op1) && !std::isnan(op2));
+
+ if ((std::isinf(op1) && std::isinf(op2)) || ((op1 == 0.0) && (op2 == 0.0))) {
+ // inf / inf and 0.0 / 0.0 return the default NaN.
+ return FPDefaultNaN<T>();
+ } else {
+ // Other cases should be handled by standard arithmetic.
+ return op1 / op2;
+ }
+}
+
+
+template <typename T>
+T Simulator::FPMax(T a, T b) {
+ // NaNs should be handled elsewhere.
+ ASSERT(!std::isnan(a) && !std::isnan(b));
+
+ if ((a == 0.0) && (b == 0.0) &&
+ (copysign(1.0, a) != copysign(1.0, b))) {
+ // a and b are zero, and the sign differs: return +0.0.
+ return 0.0;
+ } else {
+ return (a > b) ? a : b;
+ }
+}
+
+
+template <typename T>
+T Simulator::FPMaxNM(T a, T b) {
+ if (IsQuietNaN(a) && !IsQuietNaN(b)) {
+ a = kFP64NegativeInfinity;
+ } else if (!IsQuietNaN(a) && IsQuietNaN(b)) {
+ b = kFP64NegativeInfinity;
+ }
+
+ T result = FPProcessNaNs(a, b);
+ return std::isnan(result) ? result : FPMax(a, b);
+}
+
+template <typename T>
+T Simulator::FPMin(T a, T b) {
+ // NaNs should be handled elsewhere.
+ ASSERT(!isnan(a) && !isnan(b));
+
+ if ((a == 0.0) && (b == 0.0) &&
+ (copysign(1.0, a) != copysign(1.0, b))) {
+ // a and b are zero, and the sign differs: return -0.0.
+ return -0.0;
+ } else {
+ return (a < b) ? a : b;
+ }
+}
+
+
+template <typename T>
+T Simulator::FPMinNM(T a, T b) {
+ if (IsQuietNaN(a) && !IsQuietNaN(b)) {
+ a = kFP64PositiveInfinity;
+ } else if (!IsQuietNaN(a) && IsQuietNaN(b)) {
+ b = kFP64PositiveInfinity;
+ }
+
+ T result = FPProcessNaNs(a, b);
+ return std::isnan(result) ? result : FPMin(a, b);
+}
+
+
+template <typename T>
+T Simulator::FPMul(T op1, T op2) {
+ // NaNs should be handled elsewhere.
+ ASSERT(!std::isnan(op1) && !std::isnan(op2));
+
+ if ((std::isinf(op1) && (op2 == 0.0)) || (std::isinf(op2) && (op1 == 0.0))) {
+ // inf * 0.0 returns the default NaN.
+ return FPDefaultNaN<T>();
+ } else {
+ // Other cases should be handled by standard arithmetic.
+ return op1 * op2;
+ }
+}
+
+
+template<typename T>
+T Simulator::FPMulAdd(T a, T op1, T op2) {
+ T result = FPProcessNaNs3(a, op1, op2);
+
+ T sign_a = copysign(1.0, a);
+ T sign_prod = copysign(1.0, op1) * copysign(1.0, op2);
+ bool isinf_prod = std::isinf(op1) || std::isinf(op2);
+ bool operation_generates_nan =
+ (std::isinf(op1) && (op2 == 0.0)) || // inf * 0.0
+ (std::isinf(op2) && (op1 == 0.0)) || // 0.0 * inf
+ (std::isinf(a) && isinf_prod && (sign_a != sign_prod)); // inf - inf
+
+ if (std::isnan(result)) {
+ // Generated NaNs override quiet NaNs propagated from a.
+ if (operation_generates_nan && IsQuietNaN(a)) {
+ return FPDefaultNaN<T>();
+ } else {
+ return result;
+ }
+ }
+
+ // If the operation would produce a NaN, return the default NaN.
+ if (operation_generates_nan) {
+ return FPDefaultNaN<T>();
+ }
+
+ // Work around broken fma implementations for exact zero results: The sign of
+ // exact 0.0 results is positive unless both a and op1 * op2 are negative.
+ if (((op1 == 0.0) || (op2 == 0.0)) && (a == 0.0)) {
+ return ((sign_a < 0) && (sign_prod < 0)) ? -0.0 : 0.0;
+ }
+
+ result = FusedMultiplyAdd(op1, op2, a);
+ ASSERT(!std::isnan(result));
+
+ // Work around broken fma implementations for rounded zero results: If a is
+ // 0.0, the sign of the result is the sign of op1 * op2 before rounding.
+ if ((a == 0.0) && (result == 0.0)) {
+ return copysign(0.0, sign_prod);
+ }
+
+ return result;
+}
+
+
+template <typename T>
+T Simulator::FPSqrt(T op) {
+ if (std::isnan(op)) {
+ return FPProcessNaN(op);
+ } else if (op < 0.0) {
+ return FPDefaultNaN<T>();
+ } else {
+ return std::sqrt(op);
+ }
+}
+
+
+template <typename T>
+T Simulator::FPSub(T op1, T op2) {
+ // NaNs should be handled elsewhere.
+ ASSERT(!std::isnan(op1) && !std::isnan(op2));
+
+ if (std::isinf(op1) && std::isinf(op2) && (op1 == op2)) {
+ // inf - inf returns the default NaN.
+ return FPDefaultNaN<T>();
+ } else {
+ // Other cases should be handled by standard arithmetic.
+ return op1 - op2;
+ }
+}
+
+
+template <typename T>
+T Simulator::FPProcessNaN(T op) {
+ ASSERT(std::isnan(op));
+ return fpcr().DN() ? FPDefaultNaN<T>() : ToQuietNaN(op);
+}
+
+
+template <typename T>
+T Simulator::FPProcessNaNs(T op1, T op2) {
+ if (IsSignallingNaN(op1)) {
+ return FPProcessNaN(op1);
+ } else if (IsSignallingNaN(op2)) {
+ return FPProcessNaN(op2);
+ } else if (std::isnan(op1)) {
+ ASSERT(IsQuietNaN(op1));
+ return FPProcessNaN(op1);
+ } else if (std::isnan(op2)) {
+ ASSERT(IsQuietNaN(op2));
+ return FPProcessNaN(op2);
+ } else {
+ return 0.0;
+ }
+}
+
+
+template <typename T>
+T Simulator::FPProcessNaNs3(T op1, T op2, T op3) {
+ if (IsSignallingNaN(op1)) {
+ return FPProcessNaN(op1);
+ } else if (IsSignallingNaN(op2)) {
+ return FPProcessNaN(op2);
+ } else if (IsSignallingNaN(op3)) {
+ return FPProcessNaN(op3);
+ } else if (std::isnan(op1)) {
+ ASSERT(IsQuietNaN(op1));
+ return FPProcessNaN(op1);
+ } else if (std::isnan(op2)) {
+ ASSERT(IsQuietNaN(op2));
+ return FPProcessNaN(op2);
+ } else if (std::isnan(op3)) {
+ ASSERT(IsQuietNaN(op3));
+ return FPProcessNaN(op3);
+ } else {
+ return 0.0;
+ }
+}
+
+
+bool Simulator::FPProcessNaNs(Instruction* instr) {
+ unsigned fd = instr->Rd();
+ unsigned fn = instr->Rn();
+ unsigned fm = instr->Rm();
+ bool done = false;
+
+ if (instr->Mask(FP64) == FP64) {
+ double result = FPProcessNaNs(dreg(fn), dreg(fm));
+ if (std::isnan(result)) {
+ set_dreg(fd, result);
+ done = true;
+ }
+ } else {
+ float result = FPProcessNaNs(sreg(fn), sreg(fm));
+ if (std::isnan(result)) {
+ set_sreg(fd, result);
+ done = true;
+ }
+ }
+
+ return done;
+}
+
+
+void Simulator::VisitSystem(Instruction* instr) {
+ // Some system instructions hijack their Op and Cp fields to represent a
+ // range of immediates instead of indicating a different instruction. This
+ // makes the decoding tricky.
+ if (instr->Mask(SystemSysRegFMask) == SystemSysRegFixed) {
+ switch (instr->Mask(SystemSysRegMask)) {
+ case MRS: {
+ switch (instr->ImmSystemRegister()) {
+ case NZCV: set_xreg(instr->Rt(), nzcv().RawValue()); break;
+ case FPCR: set_xreg(instr->Rt(), fpcr().RawValue()); break;
+ default: UNIMPLEMENTED();
+ }
+ break;
+ }
+ case MSR: {
+ switch (instr->ImmSystemRegister()) {
+ case NZCV: nzcv().SetRawValue(xreg(instr->Rt())); break;
+ case FPCR: fpcr().SetRawValue(xreg(instr->Rt())); break;
+ default: UNIMPLEMENTED();
+ }
+ break;
+ }
+ }
+ } else if (instr->Mask(SystemHintFMask) == SystemHintFixed) {
+ ASSERT(instr->Mask(SystemHintMask) == HINT);
+ switch (instr->ImmHint()) {
+ case NOP: break;
+ default: UNIMPLEMENTED();
+ }
+ } else if (instr->Mask(MemBarrierFMask) == MemBarrierFixed) {
+ __sync_synchronize();
+ } else {
+ UNIMPLEMENTED();
+ }
+}
+
+
+bool Simulator::GetValue(const char* desc, int64_t* value) {
+ int regnum = CodeFromName(desc);
+ if (regnum >= 0) {
+ unsigned code = regnum;
+ if (code == kZeroRegCode) {
+ // Catch the zero register and return 0.
+ *value = 0;
+ return true;
+ } else if (code == kSPRegInternalCode) {
+ // Translate the stack pointer code to 31, for Reg31IsStackPointer.
+ code = 31;
+ }
+ if (desc[0] == 'w') {
+ *value = wreg(code, Reg31IsStackPointer);
+ } else {
+ *value = xreg(code, Reg31IsStackPointer);
+ }
+ return true;
+ } else if (strncmp(desc, "0x", 2) == 0) {
+ return SScanF(desc + 2, "%" SCNx64,
+ reinterpret_cast<uint64_t*>(value)) == 1;
+ } else {
+ return SScanF(desc, "%" SCNu64,
+ reinterpret_cast<uint64_t*>(value)) == 1;
+ }
+}
+
+
+bool Simulator::PrintValue(const char* desc) {
+ if (strcmp(desc, "csp") == 0) {
+ ASSERT(CodeFromName(desc) == static_cast<int>(kSPRegInternalCode));
+ PrintF(stream_, "%s csp:%s 0x%016" PRIx64 "%s\n",
+ clr_reg_name, clr_reg_value, xreg(31, Reg31IsStackPointer), clr_normal);
+ return true;
+ } else if (strcmp(desc, "wcsp") == 0) {
+ ASSERT(CodeFromName(desc) == static_cast<int>(kSPRegInternalCode));
+ PrintF(stream_, "%s wcsp:%s 0x%08" PRIx32 "%s\n",
+ clr_reg_name, clr_reg_value, wreg(31, Reg31IsStackPointer), clr_normal);
+ return true;
+ }
+
+ int i = CodeFromName(desc);
+ STATIC_ASSERT(kNumberOfRegisters == kNumberOfFPRegisters);
+ if (i < 0 || static_cast<unsigned>(i) >= kNumberOfFPRegisters) return false;
+
+ if (desc[0] == 'v') {
+ PrintF(stream_, "%s %s:%s 0x%016" PRIx64 "%s (%s%s:%s %g%s %s:%s %g%s)\n",
+ clr_fpreg_name, VRegNameForCode(i),
+ clr_fpreg_value, double_to_rawbits(dreg(i)),
+ clr_normal,
+ clr_fpreg_name, DRegNameForCode(i),
+ clr_fpreg_value, dreg(i),
+ clr_fpreg_name, SRegNameForCode(i),
+ clr_fpreg_value, sreg(i),
+ clr_normal);
+ return true;
+ } else if (desc[0] == 'd') {
+ PrintF(stream_, "%s %s:%s %g%s\n",
+ clr_fpreg_name, DRegNameForCode(i),
+ clr_fpreg_value, dreg(i),
+ clr_normal);
+ return true;
+ } else if (desc[0] == 's') {
+ PrintF(stream_, "%s %s:%s %g%s\n",
+ clr_fpreg_name, SRegNameForCode(i),
+ clr_fpreg_value, sreg(i),
+ clr_normal);
+ return true;
+ } else if (desc[0] == 'w') {
+ PrintF(stream_, "%s %s:%s 0x%08" PRIx32 "%s\n",
+ clr_reg_name, WRegNameForCode(i), clr_reg_value, wreg(i), clr_normal);
+ return true;
+ } else {
+ // X register names have a wide variety of starting characters, but anything
+ // else will be an X register.
+ PrintF(stream_, "%s %s:%s 0x%016" PRIx64 "%s\n",
+ clr_reg_name, XRegNameForCode(i), clr_reg_value, xreg(i), clr_normal);
+ return true;
+ }
+}
+
+
+void Simulator::Debug() {
+#define COMMAND_SIZE 63
+#define ARG_SIZE 255
+
+#define STR(a) #a
+#define XSTR(a) STR(a)
+
+ char cmd[COMMAND_SIZE + 1];
+ char arg1[ARG_SIZE + 1];
+ char arg2[ARG_SIZE + 1];
+ char* argv[3] = { cmd, arg1, arg2 };
+
+ // Make sure to have a proper terminating character if reaching the limit.
+ cmd[COMMAND_SIZE] = 0;
+ arg1[ARG_SIZE] = 0;
+ arg2[ARG_SIZE] = 0;
+
+ bool done = false;
+ bool cleared_log_disasm_bit = false;
+
+ while (!done) {
+ // Disassemble the next instruction to execute before doing anything else.
+ PrintInstructionsAt(pc_, 1);
+ // Read the command line.
+ char* line = ReadLine("sim> ");
+ if (line == NULL) {
+ break;
+ } else {
+ // Repeat last command by default.
+ char* last_input = last_debugger_input();
+ if (strcmp(line, "\n") == 0 && (last_input != NULL)) {
+ DeleteArray(line);
+ line = last_input;
+ } else {
+ // Update the latest command ran
+ set_last_debugger_input(line);
+ }
+
+ // Use sscanf to parse the individual parts of the command line. At the
+ // moment no command expects more than two parameters.
+ int argc = SScanF(line,
+ "%" XSTR(COMMAND_SIZE) "s "
+ "%" XSTR(ARG_SIZE) "s "
+ "%" XSTR(ARG_SIZE) "s",
+ cmd, arg1, arg2);
+
+ // stepi / si ------------------------------------------------------------
+ if ((strcmp(cmd, "si") == 0) || (strcmp(cmd, "stepi") == 0)) {
+ // We are about to execute instructions, after which by default we
+ // should increment the pc_. If it was set when reaching this debug
+ // instruction, it has not been cleared because this instruction has not
+ // completed yet. So clear it manually.
+ pc_modified_ = false;
+
+ if (argc == 1) {
+ ExecuteInstruction();
+ } else {
+ int64_t number_of_instructions_to_execute = 1;
+ GetValue(arg1, &number_of_instructions_to_execute);
+
+ set_log_parameters(log_parameters() | LOG_DISASM);
+ while (number_of_instructions_to_execute-- > 0) {
+ ExecuteInstruction();
+ }
+ set_log_parameters(log_parameters() & ~LOG_DISASM);
+ PrintF("\n");
+ }
+
+ // If it was necessary, the pc has already been updated or incremented
+ // when executing the instruction. So we do not want it to be updated
+ // again. It will be cleared when exiting.
+ pc_modified_ = true;
+
+ // next / n --------------------------------------------------------------
+ } else if ((strcmp(cmd, "next") == 0) || (strcmp(cmd, "n") == 0)) {
+ // Tell the simulator to break after the next executed BL.
+ break_on_next_ = true;
+ // Continue.
+ done = true;
+
+ // continue / cont / c ---------------------------------------------------
+ } else if ((strcmp(cmd, "continue") == 0) ||
+ (strcmp(cmd, "cont") == 0) ||
+ (strcmp(cmd, "c") == 0)) {
+ // Leave the debugger shell.
+ done = true;
+
+ // disassemble / disasm / di ---------------------------------------------
+ } else if (strcmp(cmd, "disassemble") == 0 ||
+ strcmp(cmd, "disasm") == 0 ||
+ strcmp(cmd, "di") == 0) {
+ int64_t n_of_instrs_to_disasm = 10; // default value.
+ int64_t address = reinterpret_cast<int64_t>(pc_); // default value.
+ if (argc >= 2) { // disasm <n of instrs>
+ GetValue(arg1, &n_of_instrs_to_disasm);
+ }
+ if (argc >= 3) { // disasm <n of instrs> <address>
+ GetValue(arg2, &address);
+ }
+
+ // Disassemble.
+ PrintInstructionsAt(reinterpret_cast<Instruction*>(address),
+ n_of_instrs_to_disasm);
+ PrintF("\n");
+
+ // print / p -------------------------------------------------------------
+ } else if ((strcmp(cmd, "print") == 0) || (strcmp(cmd, "p") == 0)) {
+ if (argc == 2) {
+ if (strcmp(arg1, "all") == 0) {
+ PrintRegisters(true);
+ PrintFPRegisters(true);
+ } else {
+ if (!PrintValue(arg1)) {
+ PrintF("%s unrecognized\n", arg1);
+ }
+ }
+ } else {
+ PrintF(
+ "print <register>\n"
+ " Print the content of a register. (alias 'p')\n"
+ " 'print all' will print all registers.\n"
+ " Use 'printobject' to get more details about the value.\n");
+ }
+
+ // printobject / po ------------------------------------------------------
+ } else if ((strcmp(cmd, "printobject") == 0) ||
+ (strcmp(cmd, "po") == 0)) {
+ if (argc == 2) {
+ int64_t value;
+ if (GetValue(arg1, &value)) {
+ Object* obj = reinterpret_cast<Object*>(value);
+ PrintF("%s: \n", arg1);
+#ifdef DEBUG
+ obj->PrintLn();
+#else
+ obj->ShortPrint();
+ PrintF("\n");
+#endif
+ } else {
+ PrintF("%s unrecognized\n", arg1);
+ }
+ } else {
+ PrintF("printobject <value>\n"
+ "printobject <register>\n"
+ " Print details about the value. (alias 'po')\n");
+ }
+
+ // stack / mem ----------------------------------------------------------
+ } else if (strcmp(cmd, "stack") == 0 || strcmp(cmd, "mem") == 0) {
+ int64_t* cur = NULL;
+ int64_t* end = NULL;
+ int next_arg = 1;
+
+ if (strcmp(cmd, "stack") == 0) {
+ cur = reinterpret_cast<int64_t*>(jssp());
+
+ } else { // "mem"
+ int64_t value;
+ if (!GetValue(arg1, &value)) {
+ PrintF("%s unrecognized\n", arg1);
+ continue;
+ }
+ cur = reinterpret_cast<int64_t*>(value);
+ next_arg++;
+ }
+
+ int64_t words = 0;
+ if (argc == next_arg) {
+ words = 10;
+ } else if (argc == next_arg + 1) {
+ if (!GetValue(argv[next_arg], &words)) {
+ PrintF("%s unrecognized\n", argv[next_arg]);
+ PrintF("Printing 10 double words by default");
+ words = 10;
+ }
+ } else {
+ UNREACHABLE();
+ }
+ end = cur + words;
+
+ while (cur < end) {
+ PrintF(" 0x%016" PRIx64 ": 0x%016" PRIx64 " %10" PRId64,
+ reinterpret_cast<uint64_t>(cur), *cur, *cur);
+ HeapObject* obj = reinterpret_cast<HeapObject*>(*cur);
+ int64_t value = *cur;
+ Heap* current_heap = v8::internal::Isolate::Current()->heap();
+ if (((value & 1) == 0) || current_heap->Contains(obj)) {
+ PrintF(" (");
+ if ((value & kSmiTagMask) == 0) {
+ STATIC_ASSERT(kSmiValueSize == 32);
+ int32_t untagged = (value >> kSmiShift) & 0xffffffff;
+ PrintF("smi %" PRId32, untagged);
+ } else {
+ obj->ShortPrint();
+ }
+ PrintF(")");
+ }
+ PrintF("\n");
+ cur++;
+ }
+
+ // trace / t -------------------------------------------------------------
+ } else if (strcmp(cmd, "trace") == 0 || strcmp(cmd, "t") == 0) {
+ if ((log_parameters() & (LOG_DISASM | LOG_REGS)) !=
+ (LOG_DISASM | LOG_REGS)) {
+ PrintF("Enabling disassembly and registers tracing\n");
+ set_log_parameters(log_parameters() | LOG_DISASM | LOG_REGS);
+ } else {
+ PrintF("Disabling disassembly and registers tracing\n");
+ set_log_parameters(log_parameters() & ~(LOG_DISASM | LOG_REGS));
+ }
+
+ // break / b -------------------------------------------------------------
+ } else if (strcmp(cmd, "break") == 0 || strcmp(cmd, "b") == 0) {
+ if (argc == 2) {
+ int64_t value;
+ if (GetValue(arg1, &value)) {
+ SetBreakpoint(reinterpret_cast<Instruction*>(value));
+ } else {
+ PrintF("%s unrecognized\n", arg1);
+ }
+ } else {
+ ListBreakpoints();
+ PrintF("Use `break <address>` to set or disable a breakpoint\n");
+ }
+
+ // gdb -------------------------------------------------------------------
+ } else if (strcmp(cmd, "gdb") == 0) {
+ PrintF("Relinquishing control to gdb.\n");
+ OS::DebugBreak();
+ PrintF("Regaining control from gdb.\n");
+
+ // sysregs ---------------------------------------------------------------
+ } else if (strcmp(cmd, "sysregs") == 0) {
+ PrintSystemRegisters();
+
+ // help / h --------------------------------------------------------------
+ } else if (strcmp(cmd, "help") == 0 || strcmp(cmd, "h") == 0) {
+ PrintF(
+ "stepi / si\n"
+ " stepi <n>\n"
+ " Step <n> instructions.\n"
+ "next / n\n"
+ " Continue execution until a BL instruction is reached.\n"
+ " At this point a breakpoint is set just after this BL.\n"
+ " Then execution is resumed. It will probably later hit the\n"
+ " breakpoint just set.\n"
+ "continue / cont / c\n"
+ " Continue execution from here.\n"
+ "disassemble / disasm / di\n"
+ " disassemble <n> <address>\n"
+ " Disassemble <n> instructions from current <address>.\n"
+ " By default <n> is 20 and <address> is the current pc.\n"
+ "print / p\n"
+ " print <register>\n"
+ " Print the content of a register.\n"
+ " 'print all' will print all registers.\n"
+ " Use 'printobject' to get more details about the value.\n"
+ "printobject / po\n"
+ " printobject <value>\n"
+ " printobject <register>\n"
+ " Print details about the value.\n"
+ "stack\n"
+ " stack [<words>]\n"
+ " Dump stack content, default dump 10 words\n"
+ "mem\n"
+ " mem <address> [<words>]\n"
+ " Dump memory content, default dump 10 words\n"
+ "trace / t\n"
+ " Toggle disassembly and register tracing\n"
+ "break / b\n"
+ " break : list all breakpoints\n"
+ " break <address> : set / enable / disable a breakpoint.\n"
+ "gdb\n"
+ " Enter gdb.\n"
+ "sysregs\n"
+ " Print all system registers (including NZCV).\n");
+ } else {
+ PrintF("Unknown command: %s\n", cmd);
+ PrintF("Use 'help' for more information.\n");
+ }
+ }
+ if (cleared_log_disasm_bit == true) {
+ set_log_parameters(log_parameters_ | LOG_DISASM);
+ }
+ }
+}
+
+
+void Simulator::VisitException(Instruction* instr) {
+ switch (instr->Mask(ExceptionMask)) {
+ case HLT: {
+ if (instr->ImmException() == kImmExceptionIsDebug) {
+ // Read the arguments encoded inline in the instruction stream.
+ uint32_t code;
+ uint32_t parameters;
+
+ memcpy(&code,
+ pc_->InstructionAtOffset(kDebugCodeOffset),
+ sizeof(code));
+ memcpy(&parameters,
+ pc_->InstructionAtOffset(kDebugParamsOffset),
+ sizeof(parameters));
+ char const *message =
+ reinterpret_cast<char const*>(
+ pc_->InstructionAtOffset(kDebugMessageOffset));
+
+ // Always print something when we hit a debug point that breaks.
+ // We are going to break, so printing something is not an issue in
+ // terms of speed.
+ if (FLAG_trace_sim_messages || FLAG_trace_sim || (parameters & BREAK)) {
+ if (message != NULL) {
+ PrintF(stream_,
+ "%sDebugger hit %d: %s%s%s\n",
+ clr_debug_number,
+ code,
+ clr_debug_message,
+ message,
+ clr_normal);
+ } else {
+ PrintF(stream_,
+ "%sDebugger hit %d.%s\n",
+ clr_debug_number,
+ code,
+ clr_normal);
+ }
+ }
+
+ // Other options.
+ switch (parameters & kDebuggerTracingDirectivesMask) {
+ case TRACE_ENABLE:
+ set_log_parameters(log_parameters() | parameters);
+ if (parameters & LOG_SYS_REGS) { PrintSystemRegisters(); }
+ if (parameters & LOG_REGS) { PrintRegisters(); }
+ if (parameters & LOG_FP_REGS) { PrintFPRegisters(); }
+ break;
+ case TRACE_DISABLE:
+ set_log_parameters(log_parameters() & ~parameters);
+ break;
+ case TRACE_OVERRIDE:
+ set_log_parameters(parameters);
+ break;
+ default:
+ // We don't support a one-shot LOG_DISASM.
+ ASSERT((parameters & LOG_DISASM) == 0);
+ // Don't print information that is already being traced.
+ parameters &= ~log_parameters();
+ // Print the requested information.
+ if (parameters & LOG_SYS_REGS) PrintSystemRegisters(true);
+ if (parameters & LOG_REGS) PrintRegisters(true);
+ if (parameters & LOG_FP_REGS) PrintFPRegisters(true);
+ }
+
+ // The stop parameters are inlined in the code. Skip them:
+ // - Skip to the end of the message string.
+ size_t size = kDebugMessageOffset + strlen(message) + 1;
+ pc_ = pc_->InstructionAtOffset(RoundUp(size, kInstructionSize));
+ // - Verify that the unreachable marker is present.
+ ASSERT(pc_->Mask(ExceptionMask) == HLT);
+ ASSERT(pc_->ImmException() == kImmExceptionIsUnreachable);
+ // - Skip past the unreachable marker.
+ set_pc(pc_->following());
+
+ // Check if the debugger should break.
+ if (parameters & BREAK) Debug();
+
+ } else if (instr->ImmException() == kImmExceptionIsRedirectedCall) {
+ DoRuntimeCall(instr);
+ } else if (instr->ImmException() == kImmExceptionIsPrintf) {
+ DoPrintf(instr);
+
+ } else if (instr->ImmException() == kImmExceptionIsUnreachable) {
+ fprintf(stream_, "Hit UNREACHABLE marker at PC=%p.\n",
+ reinterpret_cast<void*>(pc_));
+ abort();
+
+ } else {
+ OS::DebugBreak();
+ }
+ break;
+ }
+
+ default:
+ UNIMPLEMENTED();
+ }
+}
+
+
+void Simulator::DoPrintf(Instruction* instr) {
+ ASSERT((instr->Mask(ExceptionMask) == HLT) &&
+ (instr->ImmException() == kImmExceptionIsPrintf));
+
+ // Read the arguments encoded inline in the instruction stream.
+ uint32_t arg_count;
+ uint32_t arg_pattern_list;
+ STATIC_ASSERT(sizeof(*instr) == 1);
+ memcpy(&arg_count,
+ instr + kPrintfArgCountOffset,
+ sizeof(arg_count));
+ memcpy(&arg_pattern_list,
+ instr + kPrintfArgPatternListOffset,
+ sizeof(arg_pattern_list));
+
+ ASSERT(arg_count <= kPrintfMaxArgCount);
+ ASSERT((arg_pattern_list >> (kPrintfArgPatternBits * arg_count)) == 0);
+
+ // We need to call the host printf function with a set of arguments defined by
+ // arg_pattern_list. Because we don't know the types and sizes of the
+ // arguments, this is very difficult to do in a robust and portable way. To
+ // work around the problem, we pick apart the format string, and print one
+ // format placeholder at a time.
+
+ // Allocate space for the format string. We take a copy, so we can modify it.
+ // Leave enough space for one extra character per expected argument (plus the
+ // '\0' termination).
+ const char * format_base = reg<const char *>(0);
+ ASSERT(format_base != NULL);
+ size_t length = strlen(format_base) + 1;
+ char * const format = new char[length + arg_count];
+
+ // A list of chunks, each with exactly one format placeholder.
+ const char * chunks[kPrintfMaxArgCount];
+
+ // Copy the format string and search for format placeholders.
+ uint32_t placeholder_count = 0;
+ char * format_scratch = format;
+ for (size_t i = 0; i < length; i++) {
+ if (format_base[i] != '%') {
+ *format_scratch++ = format_base[i];
+ } else {
+ if (format_base[i + 1] == '%') {
+ // Ignore explicit "%%" sequences.
+ *format_scratch++ = format_base[i];
+
+ if (placeholder_count == 0) {
+ // The first chunk is passed to printf using "%s", so we need to
+ // unescape "%%" sequences in this chunk. (Just skip the next '%'.)
+ i++;
+ } else {
+ // Otherwise, pass through "%%" unchanged.
+ *format_scratch++ = format_base[++i];
+ }
+ } else {
+ CHECK(placeholder_count < arg_count);
+ // Insert '\0' before placeholders, and store their locations.
+ *format_scratch++ = '\0';
+ chunks[placeholder_count++] = format_scratch;
+ *format_scratch++ = format_base[i];
+ }
+ }
+ }
+ ASSERT(format_scratch <= (format + length + arg_count));
+ CHECK(placeholder_count == arg_count);
+
+ // Finally, call printf with each chunk, passing the appropriate register
+ // argument. Normally, printf returns the number of bytes transmitted, so we
+ // can emulate a single printf call by adding the result from each chunk. If
+ // any call returns a negative (error) value, though, just return that value.
+
+ fprintf(stream_, "%s", clr_printf);
+
+ // Because '\0' is inserted before each placeholder, the first string in
+ // 'format' contains no format placeholders and should be printed literally.
+ int result = fprintf(stream_, "%s", format);
+ int pcs_r = 1; // Start at x1. x0 holds the format string.
+ int pcs_f = 0; // Start at d0.
+ if (result >= 0) {
+ for (uint32_t i = 0; i < placeholder_count; i++) {
+ int part_result = -1;
+
+ uint32_t arg_pattern = arg_pattern_list >> (i * kPrintfArgPatternBits);
+ arg_pattern &= (1 << kPrintfArgPatternBits) - 1;
+ switch (arg_pattern) {
+ case kPrintfArgW:
+ part_result = fprintf(stream_, chunks[i], wreg(pcs_r++));
+ break;
+ case kPrintfArgX:
+ part_result = fprintf(stream_, chunks[i], xreg(pcs_r++));
+ break;
+ case kPrintfArgD:
+ part_result = fprintf(stream_, chunks[i], dreg(pcs_f++));
+ break;
+ default: UNREACHABLE();
+ }
+
+ if (part_result < 0) {
+ // Handle error values.
+ result = part_result;
+ break;
+ }
+
+ result += part_result;
+ }
+ }
+
+ fprintf(stream_, "%s", clr_normal);
+
+#ifdef DEBUG
+ CorruptAllCallerSavedCPURegisters();
+#endif
+
+ // Printf returns its result in x0 (just like the C library's printf).
+ set_xreg(0, result);
+
+ // The printf parameters are inlined in the code, so skip them.
+ set_pc(instr->InstructionAtOffset(kPrintfLength));
+
+ // Set LR as if we'd just called a native printf function.
+ set_lr(pc());
+
+ delete[] format;
+}
+
+
+#endif // USE_SIMULATOR
+
+} } // namespace v8::internal
+
+#endif // V8_TARGET_ARCH_ARM64
diff --git a/chromium/v8/src/arm64/simulator-arm64.h b/chromium/v8/src/arm64/simulator-arm64.h
new file mode 100644
index 00000000000..bf74de85fbf
--- /dev/null
+++ b/chromium/v8/src/arm64/simulator-arm64.h
@@ -0,0 +1,837 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_ARM64_SIMULATOR_ARM64_H_
+#define V8_ARM64_SIMULATOR_ARM64_H_
+
+#include <stdarg.h>
+#include <vector>
+
+#include "src/v8.h"
+
+#include "src/globals.h"
+#include "src/utils.h"
+#include "src/allocation.h"
+#include "src/assembler.h"
+#include "src/arm64/assembler-arm64.h"
+#include "src/arm64/decoder-arm64.h"
+#include "src/arm64/disasm-arm64.h"
+#include "src/arm64/instrument-arm64.h"
+
+#define REGISTER_CODE_LIST(R) \
+R(0) R(1) R(2) R(3) R(4) R(5) R(6) R(7) \
+R(8) R(9) R(10) R(11) R(12) R(13) R(14) R(15) \
+R(16) R(17) R(18) R(19) R(20) R(21) R(22) R(23) \
+R(24) R(25) R(26) R(27) R(28) R(29) R(30) R(31)
+
+namespace v8 {
+namespace internal {
+
+#if !defined(USE_SIMULATOR)
+
+// Running without a simulator on a native ARM64 platform.
+// When running without a simulator we call the entry directly.
+#define CALL_GENERATED_CODE(entry, p0, p1, p2, p3, p4) \
+ (entry(p0, p1, p2, p3, p4))
+
+typedef int (*arm64_regexp_matcher)(String* input,
+ int64_t start_offset,
+ const byte* input_start,
+ const byte* input_end,
+ int* output,
+ int64_t output_size,
+ Address stack_base,
+ int64_t direct_call,
+ void* return_address,
+ Isolate* isolate);
+
+// Call the generated regexp code directly. The code at the entry address
+// should act as a function matching the type arm64_regexp_matcher.
+// The ninth argument is a dummy that reserves the space used for
+// the return address added by the ExitFrame in native calls.
+#define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6, p7, p8) \
+ (FUNCTION_CAST<arm64_regexp_matcher>(entry)( \
+ p0, p1, p2, p3, p4, p5, p6, p7, NULL, p8))
+
+// Running without a simulator there is nothing to do.
+class SimulatorStack : public v8::internal::AllStatic {
+ public:
+ static uintptr_t JsLimitFromCLimit(v8::internal::Isolate* isolate,
+ uintptr_t c_limit) {
+ USE(isolate);
+ return c_limit;
+ }
+
+ static uintptr_t RegisterCTryCatch(uintptr_t try_catch_address) {
+ return try_catch_address;
+ }
+
+ static void UnregisterCTryCatch() { }
+};
+
+#else // !defined(USE_SIMULATOR)
+
+enum ReverseByteMode {
+ Reverse16 = 0,
+ Reverse32 = 1,
+ Reverse64 = 2
+};
+
+
+// The proper way to initialize a simulated system register (such as NZCV) is as
+// follows:
+// SimSystemRegister nzcv = SimSystemRegister::DefaultValueFor(NZCV);
+class SimSystemRegister {
+ public:
+ // The default constructor represents a register which has no writable bits.
+ // It is not possible to set its value to anything other than 0.
+ SimSystemRegister() : value_(0), write_ignore_mask_(0xffffffff) { }
+
+ uint32_t RawValue() const {
+ return value_;
+ }
+
+ void SetRawValue(uint32_t new_value) {
+ value_ = (value_ & write_ignore_mask_) | (new_value & ~write_ignore_mask_);
+ }
+
+ uint32_t Bits(int msb, int lsb) const {
+ return unsigned_bitextract_32(msb, lsb, value_);
+ }
+
+ int32_t SignedBits(int msb, int lsb) const {
+ return signed_bitextract_32(msb, lsb, value_);
+ }
+
+ void SetBits(int msb, int lsb, uint32_t bits);
+
+ // Default system register values.
+ static SimSystemRegister DefaultValueFor(SystemRegister id);
+
+#define DEFINE_GETTER(Name, HighBit, LowBit, Func, Type) \
+ Type Name() const { return static_cast<Type>(Func(HighBit, LowBit)); } \
+ void Set##Name(Type bits) { \
+ SetBits(HighBit, LowBit, static_cast<Type>(bits)); \
+ }
+#define DEFINE_WRITE_IGNORE_MASK(Name, Mask) \
+ static const uint32_t Name##WriteIgnoreMask = ~static_cast<uint32_t>(Mask);
+ SYSTEM_REGISTER_FIELDS_LIST(DEFINE_GETTER, DEFINE_WRITE_IGNORE_MASK)
+#undef DEFINE_ZERO_BITS
+#undef DEFINE_GETTER
+
+ protected:
+ // Most system registers only implement a few of the bits in the word. Other
+ // bits are "read-as-zero, write-ignored". The write_ignore_mask argument
+ // describes the bits which are not modifiable.
+ SimSystemRegister(uint32_t value, uint32_t write_ignore_mask)
+ : value_(value), write_ignore_mask_(write_ignore_mask) { }
+
+ uint32_t value_;
+ uint32_t write_ignore_mask_;
+};
+
+
+// Represent a register (r0-r31, v0-v31).
+class SimRegisterBase {
+ public:
+ template<typename T>
+ void Set(T new_value) {
+ value_ = 0;
+ memcpy(&value_, &new_value, sizeof(T));
+ }
+
+ template<typename T>
+ T Get() const {
+ T result;
+ memcpy(&result, &value_, sizeof(T));
+ return result;
+ }
+
+ protected:
+ int64_t value_;
+};
+
+
+typedef SimRegisterBase SimRegister; // r0-r31
+typedef SimRegisterBase SimFPRegister; // v0-v31
+
+
+class Simulator : public DecoderVisitor {
+ public:
+ explicit Simulator(Decoder<DispatchingDecoderVisitor>* decoder,
+ Isolate* isolate = NULL,
+ FILE* stream = stderr);
+ Simulator();
+ ~Simulator();
+
+ // System functions.
+
+ static void Initialize(Isolate* isolate);
+
+ static Simulator* current(v8::internal::Isolate* isolate);
+
+ class CallArgument;
+
+ // Call an arbitrary function taking an arbitrary number of arguments. The
+ // varargs list must be a set of arguments with type CallArgument, and
+ // terminated by CallArgument::End().
+ void CallVoid(byte* entry, CallArgument* args);
+
+ // Like CallVoid, but expect a return value.
+ int64_t CallInt64(byte* entry, CallArgument* args);
+ double CallDouble(byte* entry, CallArgument* args);
+
+ // V8 calls into generated JS code with 5 parameters and into
+ // generated RegExp code with 10 parameters. These are convenience functions,
+ // which set up the simulator state and grab the result on return.
+ int64_t CallJS(byte* entry,
+ byte* function_entry,
+ JSFunction* func,
+ Object* revc,
+ int64_t argc,
+ Object*** argv);
+ int64_t CallRegExp(byte* entry,
+ String* input,
+ int64_t start_offset,
+ const byte* input_start,
+ const byte* input_end,
+ int* output,
+ int64_t output_size,
+ Address stack_base,
+ int64_t direct_call,
+ void* return_address,
+ Isolate* isolate);
+
+ // A wrapper class that stores an argument for one of the above Call
+ // functions.
+ //
+ // Only arguments up to 64 bits in size are supported.
+ class CallArgument {
+ public:
+ template<typename T>
+ explicit CallArgument(T argument) {
+ ASSERT(sizeof(argument) <= sizeof(bits_));
+ memcpy(&bits_, &argument, sizeof(argument));
+ type_ = X_ARG;
+ }
+
+ explicit CallArgument(double argument) {
+ ASSERT(sizeof(argument) == sizeof(bits_));
+ memcpy(&bits_, &argument, sizeof(argument));
+ type_ = D_ARG;
+ }
+
+ explicit CallArgument(float argument) {
+ // TODO(all): CallArgument(float) is untested, remove this check once
+ // tested.
+ UNIMPLEMENTED();
+ // Make the D register a NaN to try to trap errors if the callee expects a
+ // double. If it expects a float, the callee should ignore the top word.
+ ASSERT(sizeof(kFP64SignallingNaN) == sizeof(bits_));
+ memcpy(&bits_, &kFP64SignallingNaN, sizeof(kFP64SignallingNaN));
+ // Write the float payload to the S register.
+ ASSERT(sizeof(argument) <= sizeof(bits_));
+ memcpy(&bits_, &argument, sizeof(argument));
+ type_ = D_ARG;
+ }
+
+ // This indicates the end of the arguments list, so that CallArgument
+ // objects can be passed into varargs functions.
+ static CallArgument End() { return CallArgument(); }
+
+ int64_t bits() const { return bits_; }
+ bool IsEnd() const { return type_ == NO_ARG; }
+ bool IsX() const { return type_ == X_ARG; }
+ bool IsD() const { return type_ == D_ARG; }
+
+ private:
+ enum CallArgumentType { X_ARG, D_ARG, NO_ARG };
+
+ // All arguments are aligned to at least 64 bits and we don't support
+ // passing bigger arguments, so the payload size can be fixed at 64 bits.
+ int64_t bits_;
+ CallArgumentType type_;
+
+ CallArgument() { type_ = NO_ARG; }
+ };
+
+
+ // Start the debugging command line.
+ void Debug();
+
+ bool GetValue(const char* desc, int64_t* value);
+
+ bool PrintValue(const char* desc);
+
+ // Push an address onto the JS stack.
+ uintptr_t PushAddress(uintptr_t address);
+
+ // Pop an address from the JS stack.
+ uintptr_t PopAddress();
+
+ // Accessor to the internal simulator stack area.
+ uintptr_t StackLimit() const;
+
+ void ResetState();
+
+ // Runtime call support.
+ static void* RedirectExternalReference(void* external_function,
+ ExternalReference::Type type);
+ void DoRuntimeCall(Instruction* instr);
+
+ // Run the simulator.
+ static const Instruction* kEndOfSimAddress;
+ void DecodeInstruction();
+ void Run();
+ void RunFrom(Instruction* start);
+
+ // Simulation helpers.
+ template <typename T>
+ void set_pc(T new_pc) {
+ ASSERT(sizeof(T) == sizeof(pc_));
+ memcpy(&pc_, &new_pc, sizeof(T));
+ pc_modified_ = true;
+ }
+ Instruction* pc() { return pc_; }
+
+ void increment_pc() {
+ if (!pc_modified_) {
+ pc_ = pc_->following();
+ }
+
+ pc_modified_ = false;
+ }
+
+ virtual void Decode(Instruction* instr) {
+ decoder_->Decode(instr);
+ }
+
+ void ExecuteInstruction() {
+ ASSERT(IsAligned(reinterpret_cast<uintptr_t>(pc_), kInstructionSize));
+ CheckBreakNext();
+ Decode(pc_);
+ LogProcessorState();
+ increment_pc();
+ CheckBreakpoints();
+ }
+
+ // Declare all Visitor functions.
+ #define DECLARE(A) void Visit##A(Instruction* instr);
+ VISITOR_LIST(DECLARE)
+ #undef DECLARE
+
+ bool IsZeroRegister(unsigned code, Reg31Mode r31mode) const {
+ return ((code == 31) && (r31mode == Reg31IsZeroRegister));
+ }
+
+ // Register accessors.
+ // Return 'size' bits of the value of an integer register, as the specified
+ // type. The value is zero-extended to fill the result.
+ //
+ template<typename T>
+ T reg(unsigned code, Reg31Mode r31mode = Reg31IsZeroRegister) const {
+ ASSERT(code < kNumberOfRegisters);
+ if (IsZeroRegister(code, r31mode)) {
+ return 0;
+ }
+ return registers_[code].Get<T>();
+ }
+
+ // Common specialized accessors for the reg() template.
+ int32_t wreg(unsigned code, Reg31Mode r31mode = Reg31IsZeroRegister) const {
+ return reg<int32_t>(code, r31mode);
+ }
+
+ int64_t xreg(unsigned code, Reg31Mode r31mode = Reg31IsZeroRegister) const {
+ return reg<int64_t>(code, r31mode);
+ }
+
+ // Write 'size' bits of 'value' into an integer register. The value is
+ // zero-extended. This behaviour matches AArch64 register writes.
+
+ // Like set_reg(), but infer the access size from the template type.
+ template<typename T>
+ void set_reg(unsigned code, T value,
+ Reg31Mode r31mode = Reg31IsZeroRegister) {
+ ASSERT(code < kNumberOfRegisters);
+ if (!IsZeroRegister(code, r31mode))
+ registers_[code].Set(value);
+ }
+
+ // Common specialized accessors for the set_reg() template.
+ void set_wreg(unsigned code, int32_t value,
+ Reg31Mode r31mode = Reg31IsZeroRegister) {
+ set_reg(code, value, r31mode);
+ }
+
+ void set_xreg(unsigned code, int64_t value,
+ Reg31Mode r31mode = Reg31IsZeroRegister) {
+ set_reg(code, value, r31mode);
+ }
+
+ // Commonly-used special cases.
+ template<typename T>
+ void set_lr(T value) {
+ ASSERT(sizeof(T) == kPointerSize);
+ set_reg(kLinkRegCode, value);
+ }
+
+ template<typename T>
+ void set_sp(T value) {
+ ASSERT(sizeof(T) == kPointerSize);
+ set_reg(31, value, Reg31IsStackPointer);
+ }
+
+ int64_t sp() { return xreg(31, Reg31IsStackPointer); }
+ int64_t jssp() { return xreg(kJSSPCode, Reg31IsStackPointer); }
+ int64_t fp() {
+ return xreg(kFramePointerRegCode, Reg31IsStackPointer);
+ }
+ Instruction* lr() { return reg<Instruction*>(kLinkRegCode); }
+
+ Address get_sp() { return reg<Address>(31, Reg31IsStackPointer); }
+
+ template<typename T>
+ T fpreg(unsigned code) const {
+ ASSERT(code < kNumberOfRegisters);
+ return fpregisters_[code].Get<T>();
+ }
+
+ // Common specialized accessors for the fpreg() template.
+ float sreg(unsigned code) const {
+ return fpreg<float>(code);
+ }
+
+ uint32_t sreg_bits(unsigned code) const {
+ return fpreg<uint32_t>(code);
+ }
+
+ double dreg(unsigned code) const {
+ return fpreg<double>(code);
+ }
+
+ uint64_t dreg_bits(unsigned code) const {
+ return fpreg<uint64_t>(code);
+ }
+
+ double fpreg(unsigned size, unsigned code) const {
+ switch (size) {
+ case kSRegSizeInBits: return sreg(code);
+ case kDRegSizeInBits: return dreg(code);
+ default:
+ UNREACHABLE();
+ return 0.0;
+ }
+ }
+
+ // Write 'value' into a floating-point register. The value is zero-extended.
+ // This behaviour matches AArch64 register writes.
+ template<typename T>
+ void set_fpreg(unsigned code, T value) {
+ ASSERT((sizeof(value) == kDRegSize) || (sizeof(value) == kSRegSize));
+ ASSERT(code < kNumberOfFPRegisters);
+ fpregisters_[code].Set(value);
+ }
+
+ // Common specialized accessors for the set_fpreg() template.
+ void set_sreg(unsigned code, float value) {
+ set_fpreg(code, value);
+ }
+
+ void set_sreg_bits(unsigned code, uint32_t value) {
+ set_fpreg(code, value);
+ }
+
+ void set_dreg(unsigned code, double value) {
+ set_fpreg(code, value);
+ }
+
+ void set_dreg_bits(unsigned code, uint64_t value) {
+ set_fpreg(code, value);
+ }
+
+ SimSystemRegister& nzcv() { return nzcv_; }
+ SimSystemRegister& fpcr() { return fpcr_; }
+
+ // Debug helpers
+
+ // Simulator breakpoints.
+ struct Breakpoint {
+ Instruction* location;
+ bool enabled;
+ };
+ std::vector<Breakpoint> breakpoints_;
+ void SetBreakpoint(Instruction* breakpoint);
+ void ListBreakpoints();
+ void CheckBreakpoints();
+
+ // Helpers for the 'next' command.
+ // When this is set, the Simulator will insert a breakpoint after the next BL
+ // instruction it meets.
+ bool break_on_next_;
+ // Check if the Simulator should insert a break after the current instruction
+ // for the 'next' command.
+ void CheckBreakNext();
+
+ // Disassemble instruction at the given address.
+ void PrintInstructionsAt(Instruction* pc, uint64_t count);
+
+ void PrintSystemRegisters(bool print_all = false);
+ void PrintRegisters(bool print_all_regs = false);
+ void PrintFPRegisters(bool print_all_regs = false);
+ void PrintProcessorState();
+ void PrintWrite(uint8_t* address, uint64_t value, unsigned num_bytes);
+ void LogSystemRegisters() {
+ if (log_parameters_ & LOG_SYS_REGS) PrintSystemRegisters();
+ }
+ void LogRegisters() {
+ if (log_parameters_ & LOG_REGS) PrintRegisters();
+ }
+ void LogFPRegisters() {
+ if (log_parameters_ & LOG_FP_REGS) PrintFPRegisters();
+ }
+ void LogProcessorState() {
+ LogSystemRegisters();
+ LogRegisters();
+ LogFPRegisters();
+ }
+ void LogWrite(uint8_t* address, uint64_t value, unsigned num_bytes) {
+ if (log_parameters_ & LOG_WRITE) PrintWrite(address, value, num_bytes);
+ }
+
+ int log_parameters() { return log_parameters_; }
+ void set_log_parameters(int new_parameters) {
+ log_parameters_ = new_parameters;
+ if (!decoder_) {
+ if (new_parameters & LOG_DISASM) {
+ PrintF("Run --debug-sim to dynamically turn on disassembler\n");
+ }
+ return;
+ }
+ if (new_parameters & LOG_DISASM) {
+ decoder_->InsertVisitorBefore(print_disasm_, this);
+ } else {
+ decoder_->RemoveVisitor(print_disasm_);
+ }
+ }
+
+ static inline const char* WRegNameForCode(unsigned code,
+ Reg31Mode mode = Reg31IsZeroRegister);
+ static inline const char* XRegNameForCode(unsigned code,
+ Reg31Mode mode = Reg31IsZeroRegister);
+ static inline const char* SRegNameForCode(unsigned code);
+ static inline const char* DRegNameForCode(unsigned code);
+ static inline const char* VRegNameForCode(unsigned code);
+ static inline int CodeFromName(const char* name);
+
+ protected:
+ // Simulation helpers ------------------------------------
+ bool ConditionPassed(Condition cond) {
+ SimSystemRegister& flags = nzcv();
+ switch (cond) {
+ case eq:
+ return flags.Z();
+ case ne:
+ return !flags.Z();
+ case hs:
+ return flags.C();
+ case lo:
+ return !flags.C();
+ case mi:
+ return flags.N();
+ case pl:
+ return !flags.N();
+ case vs:
+ return flags.V();
+ case vc:
+ return !flags.V();
+ case hi:
+ return flags.C() && !flags.Z();
+ case ls:
+ return !(flags.C() && !flags.Z());
+ case ge:
+ return flags.N() == flags.V();
+ case lt:
+ return flags.N() != flags.V();
+ case gt:
+ return !flags.Z() && (flags.N() == flags.V());
+ case le:
+ return !(!flags.Z() && (flags.N() == flags.V()));
+ case nv: // Fall through.
+ case al:
+ return true;
+ default:
+ UNREACHABLE();
+ return false;
+ }
+ }
+
+ bool ConditionFailed(Condition cond) {
+ return !ConditionPassed(cond);
+ }
+
+ template<typename T>
+ void AddSubHelper(Instruction* instr, T op2);
+ template<typename T>
+ T AddWithCarry(bool set_flags,
+ T src1,
+ T src2,
+ T carry_in = 0);
+ template<typename T>
+ void AddSubWithCarry(Instruction* instr);
+ template<typename T>
+ void LogicalHelper(Instruction* instr, T op2);
+ template<typename T>
+ void ConditionalCompareHelper(Instruction* instr, T op2);
+ void LoadStoreHelper(Instruction* instr,
+ int64_t offset,
+ AddrMode addrmode);
+ void LoadStorePairHelper(Instruction* instr, AddrMode addrmode);
+ uint8_t* LoadStoreAddress(unsigned addr_reg,
+ int64_t offset,
+ AddrMode addrmode);
+ void LoadStoreWriteBack(unsigned addr_reg,
+ int64_t offset,
+ AddrMode addrmode);
+ void CheckMemoryAccess(uint8_t* address, uint8_t* stack);
+
+ uint64_t MemoryRead(uint8_t* address, unsigned num_bytes);
+ uint8_t MemoryRead8(uint8_t* address);
+ uint16_t MemoryRead16(uint8_t* address);
+ uint32_t MemoryRead32(uint8_t* address);
+ float MemoryReadFP32(uint8_t* address);
+ uint64_t MemoryRead64(uint8_t* address);
+ double MemoryReadFP64(uint8_t* address);
+
+ void MemoryWrite(uint8_t* address, uint64_t value, unsigned num_bytes);
+ void MemoryWrite32(uint8_t* address, uint32_t value);
+ void MemoryWriteFP32(uint8_t* address, float value);
+ void MemoryWrite64(uint8_t* address, uint64_t value);
+ void MemoryWriteFP64(uint8_t* address, double value);
+
+
+ template <typename T>
+ T ShiftOperand(T value,
+ Shift shift_type,
+ unsigned amount);
+ template <typename T>
+ T ExtendValue(T value,
+ Extend extend_type,
+ unsigned left_shift = 0);
+ template <typename T>
+ void Extract(Instruction* instr);
+ template <typename T>
+ void DataProcessing2Source(Instruction* instr);
+ template <typename T>
+ void BitfieldHelper(Instruction* instr);
+
+ uint64_t ReverseBits(uint64_t value, unsigned num_bits);
+ uint64_t ReverseBytes(uint64_t value, ReverseByteMode mode);
+
+ template <typename T>
+ T FPDefaultNaN() const;
+
+ void FPCompare(double val0, double val1);
+ double FPRoundInt(double value, FPRounding round_mode);
+ double FPToDouble(float value);
+ float FPToFloat(double value, FPRounding round_mode);
+ double FixedToDouble(int64_t src, int fbits, FPRounding round_mode);
+ double UFixedToDouble(uint64_t src, int fbits, FPRounding round_mode);
+ float FixedToFloat(int64_t src, int fbits, FPRounding round_mode);
+ float UFixedToFloat(uint64_t src, int fbits, FPRounding round_mode);
+ int32_t FPToInt32(double value, FPRounding rmode);
+ int64_t FPToInt64(double value, FPRounding rmode);
+ uint32_t FPToUInt32(double value, FPRounding rmode);
+ uint64_t FPToUInt64(double value, FPRounding rmode);
+
+ template <typename T>
+ T FPAdd(T op1, T op2);
+
+ template <typename T>
+ T FPDiv(T op1, T op2);
+
+ template <typename T>
+ T FPMax(T a, T b);
+
+ template <typename T>
+ T FPMaxNM(T a, T b);
+
+ template <typename T>
+ T FPMin(T a, T b);
+
+ template <typename T>
+ T FPMinNM(T a, T b);
+
+ template <typename T>
+ T FPMul(T op1, T op2);
+
+ template <typename T>
+ T FPMulAdd(T a, T op1, T op2);
+
+ template <typename T>
+ T FPSqrt(T op);
+
+ template <typename T>
+ T FPSub(T op1, T op2);
+
+ // Standard NaN processing.
+ template <typename T>
+ T FPProcessNaN(T op);
+
+ bool FPProcessNaNs(Instruction* instr);
+
+ template <typename T>
+ T FPProcessNaNs(T op1, T op2);
+
+ template <typename T>
+ T FPProcessNaNs3(T op1, T op2, T op3);
+
+ void CheckStackAlignment();
+
+ inline void CheckPCSComplianceAndRun();
+
+#ifdef DEBUG
+ // Corruption values should have their least significant byte cleared to
+ // allow the code of the register being corrupted to be inserted.
+ static const uint64_t kCallerSavedRegisterCorruptionValue =
+ 0xca11edc0de000000UL;
+ // This value is a NaN in both 32-bit and 64-bit FP.
+ static const uint64_t kCallerSavedFPRegisterCorruptionValue =
+ 0x7ff000007f801000UL;
+ // This value is a mix of 32/64-bits NaN and "verbose" immediate.
+ static const uint64_t kDefaultCPURegisterCorruptionValue =
+ 0x7ffbad007f8bad00UL;
+
+ void CorruptRegisters(CPURegList* list,
+ uint64_t value = kDefaultCPURegisterCorruptionValue);
+ void CorruptAllCallerSavedCPURegisters();
+#endif
+
+ // Pseudo Printf instruction
+ void DoPrintf(Instruction* instr);
+
+ // Processor state ---------------------------------------
+
+ // Output stream.
+ FILE* stream_;
+ PrintDisassembler* print_disasm_;
+ void PRINTF_METHOD_CHECKING TraceSim(const char* format, ...);
+
+ // Instrumentation.
+ Instrument* instrument_;
+
+ // General purpose registers. Register 31 is the stack pointer.
+ SimRegister registers_[kNumberOfRegisters];
+
+ // Floating point registers
+ SimFPRegister fpregisters_[kNumberOfFPRegisters];
+
+ // Processor state
+ // bits[31, 27]: Condition flags N, Z, C, and V.
+ // (Negative, Zero, Carry, Overflow)
+ SimSystemRegister nzcv_;
+
+ // Floating-Point Control Register
+ SimSystemRegister fpcr_;
+
+ // Only a subset of FPCR features are supported by the simulator. This helper
+ // checks that the FPCR settings are supported.
+ //
+ // This is checked when floating-point instructions are executed, not when
+ // FPCR is set. This allows generated code to modify FPCR for external
+ // functions, or to save and restore it when entering and leaving generated
+ // code.
+ void AssertSupportedFPCR() {
+ ASSERT(fpcr().FZ() == 0); // No flush-to-zero support.
+ ASSERT(fpcr().RMode() == FPTieEven); // Ties-to-even rounding only.
+
+ // The simulator does not support half-precision operations so fpcr().AHP()
+ // is irrelevant, and is not checked here.
+ }
+
+ template <typename T>
+ static int CalcNFlag(T result) {
+ return (result >> (sizeof(T) * 8 - 1)) & 1;
+ }
+
+ static int CalcZFlag(uint64_t result) {
+ return result == 0;
+ }
+
+ static const uint32_t kConditionFlagsMask = 0xf0000000;
+
+ // Stack
+ byte* stack_;
+ static const intptr_t stack_protection_size_ = KB;
+ intptr_t stack_size_;
+ byte* stack_limit_;
+
+ Decoder<DispatchingDecoderVisitor>* decoder_;
+ Decoder<DispatchingDecoderVisitor>* disassembler_decoder_;
+
+ // Indicates if the pc has been modified by the instruction and should not be
+ // automatically incremented.
+ bool pc_modified_;
+ Instruction* pc_;
+
+ static const char* xreg_names[];
+ static const char* wreg_names[];
+ static const char* sreg_names[];
+ static const char* dreg_names[];
+ static const char* vreg_names[];
+
+ // Debugger input.
+ void set_last_debugger_input(char* input) {
+ DeleteArray(last_debugger_input_);
+ last_debugger_input_ = input;
+ }
+ char* last_debugger_input() { return last_debugger_input_; }
+ char* last_debugger_input_;
+
+ private:
+ void Init(FILE* stream);
+
+ int log_parameters_;
+ Isolate* isolate_;
+};
+
+
+// When running with the simulator transition into simulated execution at this
+// point.
+#define CALL_GENERATED_CODE(entry, p0, p1, p2, p3, p4) \
+ reinterpret_cast<Object*>(Simulator::current(Isolate::Current())->CallJS( \
+ FUNCTION_ADDR(entry), \
+ p0, p1, p2, p3, p4))
+
+#define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6, p7, p8) \
+ Simulator::current(Isolate::Current())->CallRegExp( \
+ entry, \
+ p0, p1, p2, p3, p4, p5, p6, p7, NULL, p8)
+
+
+// The simulator has its own stack. Thus it has a different stack limit from
+// the C-based native code.
+// See also 'class SimulatorStack' in arm/simulator-arm.h.
+class SimulatorStack : public v8::internal::AllStatic {
+ public:
+ static uintptr_t JsLimitFromCLimit(v8::internal::Isolate* isolate,
+ uintptr_t c_limit) {
+ return Simulator::current(isolate)->StackLimit();
+ }
+
+ static uintptr_t RegisterCTryCatch(uintptr_t try_catch_address) {
+ Simulator* sim = Simulator::current(Isolate::Current());
+ return sim->PushAddress(try_catch_address);
+ }
+
+ static void UnregisterCTryCatch() {
+ Simulator::current(Isolate::Current())->PopAddress();
+ }
+};
+
+#endif // !defined(USE_SIMULATOR)
+
+} } // namespace v8::internal
+
+#endif // V8_ARM64_SIMULATOR_ARM64_H_
diff --git a/chromium/v8/src/arm64/stub-cache-arm64.cc b/chromium/v8/src/arm64/stub-cache-arm64.cc
new file mode 100644
index 00000000000..b0f58fda229
--- /dev/null
+++ b/chromium/v8/src/arm64/stub-cache-arm64.cc
@@ -0,0 +1,1477 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#if V8_TARGET_ARCH_ARM64
+
+#include "src/ic-inl.h"
+#include "src/codegen.h"
+#include "src/stub-cache.h"
+
+namespace v8 {
+namespace internal {
+
+
+#define __ ACCESS_MASM(masm)
+
+
+void StubCompiler::GenerateDictionaryNegativeLookup(MacroAssembler* masm,
+ Label* miss_label,
+ Register receiver,
+ Handle<Name> name,
+ Register scratch0,
+ Register scratch1) {
+ ASSERT(!AreAliased(receiver, scratch0, scratch1));
+ ASSERT(name->IsUniqueName());
+ Counters* counters = masm->isolate()->counters();
+ __ IncrementCounter(counters->negative_lookups(), 1, scratch0, scratch1);
+ __ IncrementCounter(counters->negative_lookups_miss(), 1, scratch0, scratch1);
+
+ Label done;
+
+ const int kInterceptorOrAccessCheckNeededMask =
+ (1 << Map::kHasNamedInterceptor) | (1 << Map::kIsAccessCheckNeeded);
+
+ // Bail out if the receiver has a named interceptor or requires access checks.
+ Register map = scratch1;
+ __ Ldr(map, FieldMemOperand(receiver, HeapObject::kMapOffset));
+ __ Ldrb(scratch0, FieldMemOperand(map, Map::kBitFieldOffset));
+ __ Tst(scratch0, kInterceptorOrAccessCheckNeededMask);
+ __ B(ne, miss_label);
+
+ // Check that receiver is a JSObject.
+ __ Ldrb(scratch0, FieldMemOperand(map, Map::kInstanceTypeOffset));
+ __ Cmp(scratch0, FIRST_SPEC_OBJECT_TYPE);
+ __ B(lt, miss_label);
+
+ // Load properties array.
+ Register properties = scratch0;
+ __ Ldr(properties, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
+ // Check that the properties array is a dictionary.
+ __ Ldr(map, FieldMemOperand(properties, HeapObject::kMapOffset));
+ __ JumpIfNotRoot(map, Heap::kHashTableMapRootIndex, miss_label);
+
+ NameDictionaryLookupStub::GenerateNegativeLookup(masm,
+ miss_label,
+ &done,
+ receiver,
+ properties,
+ name,
+ scratch1);
+ __ Bind(&done);
+ __ DecrementCounter(counters->negative_lookups_miss(), 1, scratch0, scratch1);
+}
+
+
+// Probe primary or secondary table.
+// If the entry is found in the cache, the generated code jump to the first
+// instruction of the stub in the cache.
+// If there is a miss the code fall trough.
+//
+// 'receiver', 'name' and 'offset' registers are preserved on miss.
+static void ProbeTable(Isolate* isolate,
+ MacroAssembler* masm,
+ Code::Flags flags,
+ StubCache::Table table,
+ Register receiver,
+ Register name,
+ Register offset,
+ Register scratch,
+ Register scratch2,
+ Register scratch3) {
+ // Some code below relies on the fact that the Entry struct contains
+ // 3 pointers (name, code, map).
+ STATIC_ASSERT(sizeof(StubCache::Entry) == (3 * kPointerSize));
+
+ ExternalReference key_offset(isolate->stub_cache()->key_reference(table));
+ ExternalReference value_offset(isolate->stub_cache()->value_reference(table));
+ ExternalReference map_offset(isolate->stub_cache()->map_reference(table));
+
+ uintptr_t key_off_addr = reinterpret_cast<uintptr_t>(key_offset.address());
+ uintptr_t value_off_addr =
+ reinterpret_cast<uintptr_t>(value_offset.address());
+ uintptr_t map_off_addr = reinterpret_cast<uintptr_t>(map_offset.address());
+
+ Label miss;
+
+ ASSERT(!AreAliased(name, offset, scratch, scratch2, scratch3));
+
+ // Multiply by 3 because there are 3 fields per entry.
+ __ Add(scratch3, offset, Operand(offset, LSL, 1));
+
+ // Calculate the base address of the entry.
+ __ Mov(scratch, key_offset);
+ __ Add(scratch, scratch, Operand(scratch3, LSL, kPointerSizeLog2));
+
+ // Check that the key in the entry matches the name.
+ __ Ldr(scratch2, MemOperand(scratch));
+ __ Cmp(name, scratch2);
+ __ B(ne, &miss);
+
+ // Check the map matches.
+ __ Ldr(scratch2, MemOperand(scratch, map_off_addr - key_off_addr));
+ __ Ldr(scratch3, FieldMemOperand(receiver, HeapObject::kMapOffset));
+ __ Cmp(scratch2, scratch3);
+ __ B(ne, &miss);
+
+ // Get the code entry from the cache.
+ __ Ldr(scratch, MemOperand(scratch, value_off_addr - key_off_addr));
+
+ // Check that the flags match what we're looking for.
+ __ Ldr(scratch2.W(), FieldMemOperand(scratch, Code::kFlagsOffset));
+ __ Bic(scratch2.W(), scratch2.W(), Code::kFlagsNotUsedInLookup);
+ __ Cmp(scratch2.W(), flags);
+ __ B(ne, &miss);
+
+#ifdef DEBUG
+ if (FLAG_test_secondary_stub_cache && table == StubCache::kPrimary) {
+ __ B(&miss);
+ } else if (FLAG_test_primary_stub_cache && table == StubCache::kSecondary) {
+ __ B(&miss);
+ }
+#endif
+
+ // Jump to the first instruction in the code stub.
+ __ Add(scratch, scratch, Code::kHeaderSize - kHeapObjectTag);
+ __ Br(scratch);
+
+ // Miss: fall through.
+ __ Bind(&miss);
+}
+
+
+void StubCache::GenerateProbe(MacroAssembler* masm,
+ Code::Flags flags,
+ Register receiver,
+ Register name,
+ Register scratch,
+ Register extra,
+ Register extra2,
+ Register extra3) {
+ Isolate* isolate = masm->isolate();
+ Label miss;
+
+ // Make sure the flags does not name a specific type.
+ ASSERT(Code::ExtractTypeFromFlags(flags) == 0);
+
+ // Make sure that there are no register conflicts.
+ ASSERT(!AreAliased(receiver, name, scratch, extra, extra2, extra3));
+
+ // Make sure extra and extra2 registers are valid.
+ ASSERT(!extra.is(no_reg));
+ ASSERT(!extra2.is(no_reg));
+ ASSERT(!extra3.is(no_reg));
+
+ Counters* counters = masm->isolate()->counters();
+ __ IncrementCounter(counters->megamorphic_stub_cache_probes(), 1,
+ extra2, extra3);
+
+ // Check that the receiver isn't a smi.
+ __ JumpIfSmi(receiver, &miss);
+
+ // Compute the hash for primary table.
+ __ Ldr(scratch, FieldMemOperand(name, Name::kHashFieldOffset));
+ __ Ldr(extra, FieldMemOperand(receiver, HeapObject::kMapOffset));
+ __ Add(scratch, scratch, extra);
+ __ Eor(scratch, scratch, flags);
+ // We shift out the last two bits because they are not part of the hash.
+ __ Ubfx(scratch, scratch, kHeapObjectTagSize,
+ CountTrailingZeros(kPrimaryTableSize, 64));
+
+ // Probe the primary table.
+ ProbeTable(isolate, masm, flags, kPrimary, receiver, name,
+ scratch, extra, extra2, extra3);
+
+ // Primary miss: Compute hash for secondary table.
+ __ Sub(scratch, scratch, Operand(name, LSR, kHeapObjectTagSize));
+ __ Add(scratch, scratch, flags >> kHeapObjectTagSize);
+ __ And(scratch, scratch, kSecondaryTableSize - 1);
+
+ // Probe the secondary table.
+ ProbeTable(isolate, masm, flags, kSecondary, receiver, name,
+ scratch, extra, extra2, extra3);
+
+ // Cache miss: Fall-through and let caller handle the miss by
+ // entering the runtime system.
+ __ Bind(&miss);
+ __ IncrementCounter(counters->megamorphic_stub_cache_misses(), 1,
+ extra2, extra3);
+}
+
+
+void StubCompiler::GenerateLoadGlobalFunctionPrototype(MacroAssembler* masm,
+ int index,
+ Register prototype) {
+ // Load the global or builtins object from the current context.
+ __ Ldr(prototype, GlobalObjectMemOperand());
+ // Load the native context from the global or builtins object.
+ __ Ldr(prototype,
+ FieldMemOperand(prototype, GlobalObject::kNativeContextOffset));
+ // Load the function from the native context.
+ __ Ldr(prototype, ContextMemOperand(prototype, index));
+ // Load the initial map. The global functions all have initial maps.
+ __ Ldr(prototype,
+ FieldMemOperand(prototype, JSFunction::kPrototypeOrInitialMapOffset));
+ // Load the prototype from the initial map.
+ __ Ldr(prototype, FieldMemOperand(prototype, Map::kPrototypeOffset));
+}
+
+
+void StubCompiler::GenerateDirectLoadGlobalFunctionPrototype(
+ MacroAssembler* masm,
+ int index,
+ Register prototype,
+ Label* miss) {
+ Isolate* isolate = masm->isolate();
+ // Get the global function with the given index.
+ Handle<JSFunction> function(
+ JSFunction::cast(isolate->native_context()->get(index)));
+
+ // Check we're still in the same context.
+ Register scratch = prototype;
+ __ Ldr(scratch, GlobalObjectMemOperand());
+ __ Ldr(scratch, FieldMemOperand(scratch, GlobalObject::kNativeContextOffset));
+ __ Ldr(scratch, ContextMemOperand(scratch, index));
+ __ Cmp(scratch, Operand(function));
+ __ B(ne, miss);
+
+ // Load its initial map. The global functions all have initial maps.
+ __ Mov(prototype, Operand(Handle<Map>(function->initial_map())));
+ // Load the prototype from the initial map.
+ __ Ldr(prototype, FieldMemOperand(prototype, Map::kPrototypeOffset));
+}
+
+
+void StubCompiler::GenerateFastPropertyLoad(MacroAssembler* masm,
+ Register dst,
+ Register src,
+ bool inobject,
+ int index,
+ Representation representation) {
+ ASSERT(!representation.IsDouble());
+ USE(representation);
+ if (inobject) {
+ int offset = index * kPointerSize;
+ __ Ldr(dst, FieldMemOperand(src, offset));
+ } else {
+ // Calculate the offset into the properties array.
+ int offset = index * kPointerSize + FixedArray::kHeaderSize;
+ __ Ldr(dst, FieldMemOperand(src, JSObject::kPropertiesOffset));
+ __ Ldr(dst, FieldMemOperand(dst, offset));
+ }
+}
+
+
+void StubCompiler::GenerateLoadArrayLength(MacroAssembler* masm,
+ Register receiver,
+ Register scratch,
+ Label* miss_label) {
+ ASSERT(!AreAliased(receiver, scratch));
+
+ // Check that the receiver isn't a smi.
+ __ JumpIfSmi(receiver, miss_label);
+
+ // Check that the object is a JS array.
+ __ JumpIfNotObjectType(receiver, scratch, scratch, JS_ARRAY_TYPE,
+ miss_label);
+
+ // Load length directly from the JS array.
+ __ Ldr(x0, FieldMemOperand(receiver, JSArray::kLengthOffset));
+ __ Ret();
+}
+
+
+void StubCompiler::GenerateLoadFunctionPrototype(MacroAssembler* masm,
+ Register receiver,
+ Register scratch1,
+ Register scratch2,
+ Label* miss_label) {
+ __ TryGetFunctionPrototype(receiver, scratch1, scratch2, miss_label);
+ // TryGetFunctionPrototype can't put the result directly in x0 because the
+ // 3 inputs registers can't alias and we call this function from
+ // LoadIC::GenerateFunctionPrototype, where receiver is x0. So we explicitly
+ // move the result in x0.
+ __ Mov(x0, scratch1);
+ __ Ret();
+}
+
+
+// Generate code to check that a global property cell is empty. Create
+// the property cell at compilation time if no cell exists for the
+// property.
+void StubCompiler::GenerateCheckPropertyCell(MacroAssembler* masm,
+ Handle<JSGlobalObject> global,
+ Handle<Name> name,
+ Register scratch,
+ Label* miss) {
+ Handle<Cell> cell = JSGlobalObject::EnsurePropertyCell(global, name);
+ ASSERT(cell->value()->IsTheHole());
+ __ Mov(scratch, Operand(cell));
+ __ Ldr(scratch, FieldMemOperand(scratch, Cell::kValueOffset));
+ __ JumpIfNotRoot(scratch, Heap::kTheHoleValueRootIndex, miss);
+}
+
+
+void StoreStubCompiler::GenerateNegativeHolderLookup(
+ MacroAssembler* masm,
+ Handle<JSObject> holder,
+ Register holder_reg,
+ Handle<Name> name,
+ Label* miss) {
+ if (holder->IsJSGlobalObject()) {
+ GenerateCheckPropertyCell(
+ masm, Handle<JSGlobalObject>::cast(holder), name, scratch1(), miss);
+ } else if (!holder->HasFastProperties() && !holder->IsJSGlobalProxy()) {
+ GenerateDictionaryNegativeLookup(
+ masm, miss, holder_reg, name, scratch1(), scratch2());
+ }
+}
+
+
+// Generate StoreTransition code, value is passed in x0 register.
+// When leaving generated code after success, the receiver_reg and storage_reg
+// may be clobbered. Upon branch to miss_label, the receiver and name registers
+// have their original values.
+void StoreStubCompiler::GenerateStoreTransition(MacroAssembler* masm,
+ Handle<JSObject> object,
+ LookupResult* lookup,
+ Handle<Map> transition,
+ Handle<Name> name,
+ Register receiver_reg,
+ Register storage_reg,
+ Register value_reg,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ Label* miss_label,
+ Label* slow) {
+ Label exit;
+
+ ASSERT(!AreAliased(receiver_reg, storage_reg, value_reg,
+ scratch1, scratch2, scratch3));
+
+ // We don't need scratch3.
+ scratch3 = NoReg;
+
+ int descriptor = transition->LastAdded();
+ DescriptorArray* descriptors = transition->instance_descriptors();
+ PropertyDetails details = descriptors->GetDetails(descriptor);
+ Representation representation = details.representation();
+ ASSERT(!representation.IsNone());
+
+ if (details.type() == CONSTANT) {
+ Handle<Object> constant(descriptors->GetValue(descriptor), masm->isolate());
+ __ LoadObject(scratch1, constant);
+ __ Cmp(value_reg, scratch1);
+ __ B(ne, miss_label);
+ } else if (representation.IsSmi()) {
+ __ JumpIfNotSmi(value_reg, miss_label);
+ } else if (representation.IsHeapObject()) {
+ __ JumpIfSmi(value_reg, miss_label);
+ HeapType* field_type = descriptors->GetFieldType(descriptor);
+ HeapType::Iterator<Map> it = field_type->Classes();
+ if (!it.Done()) {
+ __ Ldr(scratch1, FieldMemOperand(value_reg, HeapObject::kMapOffset));
+ Label do_store;
+ while (true) {
+ __ CompareMap(scratch1, it.Current());
+ it.Advance();
+ if (it.Done()) {
+ __ B(ne, miss_label);
+ break;
+ }
+ __ B(eq, &do_store);
+ }
+ __ Bind(&do_store);
+ }
+ } else if (representation.IsDouble()) {
+ UseScratchRegisterScope temps(masm);
+ DoubleRegister temp_double = temps.AcquireD();
+ __ SmiUntagToDouble(temp_double, value_reg, kSpeculativeUntag);
+
+ Label do_store;
+ __ JumpIfSmi(value_reg, &do_store);
+
+ __ CheckMap(value_reg, scratch1, Heap::kHeapNumberMapRootIndex,
+ miss_label, DONT_DO_SMI_CHECK);
+ __ Ldr(temp_double, FieldMemOperand(value_reg, HeapNumber::kValueOffset));
+
+ __ Bind(&do_store);
+ __ AllocateHeapNumber(storage_reg, slow, scratch1, scratch2, temp_double);
+ }
+
+ // Stub never generated for non-global objects that require access checks.
+ ASSERT(object->IsJSGlobalProxy() || !object->IsAccessCheckNeeded());
+
+ // Perform map transition for the receiver if necessary.
+ if ((details.type() == FIELD) &&
+ (object->map()->unused_property_fields() == 0)) {
+ // The properties must be extended before we can store the value.
+ // We jump to a runtime call that extends the properties array.
+ __ Mov(scratch1, Operand(transition));
+ __ Push(receiver_reg, scratch1, value_reg);
+ __ TailCallExternalReference(
+ ExternalReference(IC_Utility(IC::kSharedStoreIC_ExtendStorage),
+ masm->isolate()),
+ 3,
+ 1);
+ return;
+ }
+
+ // Update the map of the object.
+ __ Mov(scratch1, Operand(transition));
+ __ Str(scratch1, FieldMemOperand(receiver_reg, HeapObject::kMapOffset));
+
+ // Update the write barrier for the map field.
+ __ RecordWriteField(receiver_reg,
+ HeapObject::kMapOffset,
+ scratch1,
+ scratch2,
+ kLRHasNotBeenSaved,
+ kDontSaveFPRegs,
+ OMIT_REMEMBERED_SET,
+ OMIT_SMI_CHECK);
+
+ if (details.type() == CONSTANT) {
+ ASSERT(value_reg.is(x0));
+ __ Ret();
+ return;
+ }
+
+ int index = transition->instance_descriptors()->GetFieldIndex(
+ transition->LastAdded());
+
+ // Adjust for the number of properties stored in the object. Even in the
+ // face of a transition we can use the old map here because the size of the
+ // object and the number of in-object properties is not going to change.
+ index -= object->map()->inobject_properties();
+
+ // TODO(verwaest): Share this code as a code stub.
+ SmiCheck smi_check = representation.IsTagged()
+ ? INLINE_SMI_CHECK : OMIT_SMI_CHECK;
+ Register prop_reg = representation.IsDouble() ? storage_reg : value_reg;
+ if (index < 0) {
+ // Set the property straight into the object.
+ int offset = object->map()->instance_size() + (index * kPointerSize);
+ __ Str(prop_reg, FieldMemOperand(receiver_reg, offset));
+
+ if (!representation.IsSmi()) {
+ // Update the write barrier for the array address.
+ if (!representation.IsDouble()) {
+ __ Mov(storage_reg, value_reg);
+ }
+ __ RecordWriteField(receiver_reg,
+ offset,
+ storage_reg,
+ scratch1,
+ kLRHasNotBeenSaved,
+ kDontSaveFPRegs,
+ EMIT_REMEMBERED_SET,
+ smi_check);
+ }
+ } else {
+ // Write to the properties array.
+ int offset = index * kPointerSize + FixedArray::kHeaderSize;
+ // Get the properties array
+ __ Ldr(scratch1,
+ FieldMemOperand(receiver_reg, JSObject::kPropertiesOffset));
+ __ Str(prop_reg, FieldMemOperand(scratch1, offset));
+
+ if (!representation.IsSmi()) {
+ // Update the write barrier for the array address.
+ if (!representation.IsDouble()) {
+ __ Mov(storage_reg, value_reg);
+ }
+ __ RecordWriteField(scratch1,
+ offset,
+ storage_reg,
+ receiver_reg,
+ kLRHasNotBeenSaved,
+ kDontSaveFPRegs,
+ EMIT_REMEMBERED_SET,
+ smi_check);
+ }
+ }
+
+ __ Bind(&exit);
+ // Return the value (register x0).
+ ASSERT(value_reg.is(x0));
+ __ Ret();
+}
+
+
+// Generate StoreField code, value is passed in x0 register.
+// When leaving generated code after success, the receiver_reg and name_reg may
+// be clobbered. Upon branch to miss_label, the receiver and name registers have
+// their original values.
+void StoreStubCompiler::GenerateStoreField(MacroAssembler* masm,
+ Handle<JSObject> object,
+ LookupResult* lookup,
+ Register receiver_reg,
+ Register name_reg,
+ Register value_reg,
+ Register scratch1,
+ Register scratch2,
+ Label* miss_label) {
+ // x0 : value
+ Label exit;
+
+ // Stub never generated for non-global objects that require access
+ // checks.
+ ASSERT(object->IsJSGlobalProxy() || !object->IsAccessCheckNeeded());
+
+ FieldIndex index = lookup->GetFieldIndex();
+
+ Representation representation = lookup->representation();
+ ASSERT(!representation.IsNone());
+ if (representation.IsSmi()) {
+ __ JumpIfNotSmi(value_reg, miss_label);
+ } else if (representation.IsHeapObject()) {
+ __ JumpIfSmi(value_reg, miss_label);
+ HeapType* field_type = lookup->GetFieldType();
+ HeapType::Iterator<Map> it = field_type->Classes();
+ if (!it.Done()) {
+ __ Ldr(scratch1, FieldMemOperand(value_reg, HeapObject::kMapOffset));
+ Label do_store;
+ while (true) {
+ __ CompareMap(scratch1, it.Current());
+ it.Advance();
+ if (it.Done()) {
+ __ B(ne, miss_label);
+ break;
+ }
+ __ B(eq, &do_store);
+ }
+ __ Bind(&do_store);
+ }
+ } else if (representation.IsDouble()) {
+ UseScratchRegisterScope temps(masm);
+ DoubleRegister temp_double = temps.AcquireD();
+
+ __ SmiUntagToDouble(temp_double, value_reg, kSpeculativeUntag);
+
+ // Load the double storage.
+ if (index.is_inobject()) {
+ __ Ldr(scratch1, FieldMemOperand(receiver_reg, index.offset()));
+ } else {
+ __ Ldr(scratch1,
+ FieldMemOperand(receiver_reg, JSObject::kPropertiesOffset));
+ __ Ldr(scratch1, FieldMemOperand(scratch1, index.offset()));
+ }
+
+ // Store the value into the storage.
+ Label do_store, heap_number;
+
+ __ JumpIfSmi(value_reg, &do_store);
+
+ __ CheckMap(value_reg, scratch2, Heap::kHeapNumberMapRootIndex,
+ miss_label, DONT_DO_SMI_CHECK);
+ __ Ldr(temp_double, FieldMemOperand(value_reg, HeapNumber::kValueOffset));
+
+ __ Bind(&do_store);
+ __ Str(temp_double, FieldMemOperand(scratch1, HeapNumber::kValueOffset));
+
+ // Return the value (register x0).
+ ASSERT(value_reg.is(x0));
+ __ Ret();
+ return;
+ }
+
+ // TODO(verwaest): Share this code as a code stub.
+ SmiCheck smi_check = representation.IsTagged()
+ ? INLINE_SMI_CHECK : OMIT_SMI_CHECK;
+ if (index.is_inobject()) {
+ // Set the property straight into the object.
+ __ Str(value_reg, FieldMemOperand(receiver_reg, index.offset()));
+
+ if (!representation.IsSmi()) {
+ // Skip updating write barrier if storing a smi.
+ __ JumpIfSmi(value_reg, &exit);
+
+ // Update the write barrier for the array address.
+ // Pass the now unused name_reg as a scratch register.
+ __ Mov(name_reg, value_reg);
+ __ RecordWriteField(receiver_reg,
+ index.offset(),
+ name_reg,
+ scratch1,
+ kLRHasNotBeenSaved,
+ kDontSaveFPRegs,
+ EMIT_REMEMBERED_SET,
+ smi_check);
+ }
+ } else {
+ // Write to the properties array.
+ // Get the properties array
+ __ Ldr(scratch1,
+ FieldMemOperand(receiver_reg, JSObject::kPropertiesOffset));
+ __ Str(value_reg, FieldMemOperand(scratch1, index.offset()));
+
+ if (!representation.IsSmi()) {
+ // Skip updating write barrier if storing a smi.
+ __ JumpIfSmi(value_reg, &exit);
+
+ // Update the write barrier for the array address.
+ // Ok to clobber receiver_reg and name_reg, since we return.
+ __ Mov(name_reg, value_reg);
+ __ RecordWriteField(scratch1,
+ index.offset(),
+ name_reg,
+ receiver_reg,
+ kLRHasNotBeenSaved,
+ kDontSaveFPRegs,
+ EMIT_REMEMBERED_SET,
+ smi_check);
+ }
+ }
+
+ __ Bind(&exit);
+ // Return the value (register x0).
+ ASSERT(value_reg.is(x0));
+ __ Ret();
+}
+
+
+void StoreStubCompiler::GenerateRestoreName(MacroAssembler* masm,
+ Label* label,
+ Handle<Name> name) {
+ if (!label->is_unused()) {
+ __ Bind(label);
+ __ Mov(this->name(), Operand(name));
+ }
+}
+
+
+static void PushInterceptorArguments(MacroAssembler* masm,
+ Register receiver,
+ Register holder,
+ Register name,
+ Handle<JSObject> holder_obj) {
+ STATIC_ASSERT(StubCache::kInterceptorArgsNameIndex == 0);
+ STATIC_ASSERT(StubCache::kInterceptorArgsInfoIndex == 1);
+ STATIC_ASSERT(StubCache::kInterceptorArgsThisIndex == 2);
+ STATIC_ASSERT(StubCache::kInterceptorArgsHolderIndex == 3);
+ STATIC_ASSERT(StubCache::kInterceptorArgsLength == 4);
+
+ __ Push(name);
+ Handle<InterceptorInfo> interceptor(holder_obj->GetNamedInterceptor());
+ ASSERT(!masm->isolate()->heap()->InNewSpace(*interceptor));
+ Register scratch = name;
+ __ Mov(scratch, Operand(interceptor));
+ __ Push(scratch, receiver, holder);
+}
+
+
+static void CompileCallLoadPropertyWithInterceptor(
+ MacroAssembler* masm,
+ Register receiver,
+ Register holder,
+ Register name,
+ Handle<JSObject> holder_obj,
+ IC::UtilityId id) {
+ PushInterceptorArguments(masm, receiver, holder, name, holder_obj);
+
+ __ CallExternalReference(
+ ExternalReference(IC_Utility(id), masm->isolate()),
+ StubCache::kInterceptorArgsLength);
+}
+
+
+// Generate call to api function.
+void StubCompiler::GenerateFastApiCall(MacroAssembler* masm,
+ const CallOptimization& optimization,
+ Handle<Map> receiver_map,
+ Register receiver,
+ Register scratch,
+ bool is_store,
+ int argc,
+ Register* values) {
+ ASSERT(!AreAliased(receiver, scratch));
+
+ MacroAssembler::PushPopQueue queue(masm);
+ queue.Queue(receiver);
+ // Write the arguments to the stack frame.
+ for (int i = 0; i < argc; i++) {
+ Register arg = values[argc-1-i];
+ ASSERT(!AreAliased(receiver, scratch, arg));
+ queue.Queue(arg);
+ }
+ queue.PushQueued();
+
+ ASSERT(optimization.is_simple_api_call());
+
+ // Abi for CallApiFunctionStub.
+ Register callee = x0;
+ Register call_data = x4;
+ Register holder = x2;
+ Register api_function_address = x1;
+
+ // Put holder in place.
+ CallOptimization::HolderLookup holder_lookup;
+ Handle<JSObject> api_holder =
+ optimization.LookupHolderOfExpectedType(receiver_map, &holder_lookup);
+ switch (holder_lookup) {
+ case CallOptimization::kHolderIsReceiver:
+ __ Mov(holder, receiver);
+ break;
+ case CallOptimization::kHolderFound:
+ __ LoadObject(holder, api_holder);
+ break;
+ case CallOptimization::kHolderNotFound:
+ UNREACHABLE();
+ break;
+ }
+
+ Isolate* isolate = masm->isolate();
+ Handle<JSFunction> function = optimization.constant_function();
+ Handle<CallHandlerInfo> api_call_info = optimization.api_call_info();
+ Handle<Object> call_data_obj(api_call_info->data(), isolate);
+
+ // Put callee in place.
+ __ LoadObject(callee, function);
+
+ bool call_data_undefined = false;
+ // Put call_data in place.
+ if (isolate->heap()->InNewSpace(*call_data_obj)) {
+ __ LoadObject(call_data, api_call_info);
+ __ Ldr(call_data, FieldMemOperand(call_data, CallHandlerInfo::kDataOffset));
+ } else if (call_data_obj->IsUndefined()) {
+ call_data_undefined = true;
+ __ LoadRoot(call_data, Heap::kUndefinedValueRootIndex);
+ } else {
+ __ LoadObject(call_data, call_data_obj);
+ }
+
+ // Put api_function_address in place.
+ Address function_address = v8::ToCData<Address>(api_call_info->callback());
+ ApiFunction fun(function_address);
+ ExternalReference ref = ExternalReference(&fun,
+ ExternalReference::DIRECT_API_CALL,
+ masm->isolate());
+ __ Mov(api_function_address, ref);
+
+ // Jump to stub.
+ CallApiFunctionStub stub(isolate, is_store, call_data_undefined, argc);
+ __ TailCallStub(&stub);
+}
+
+
+void StubCompiler::GenerateTailCall(MacroAssembler* masm, Handle<Code> code) {
+ __ Jump(code, RelocInfo::CODE_TARGET);
+}
+
+
+#undef __
+#define __ ACCESS_MASM(masm())
+
+
+Register StubCompiler::CheckPrototypes(Handle<HeapType> type,
+ Register object_reg,
+ Handle<JSObject> holder,
+ Register holder_reg,
+ Register scratch1,
+ Register scratch2,
+ Handle<Name> name,
+ Label* miss,
+ PrototypeCheckType check) {
+ Handle<Map> receiver_map(IC::TypeToMap(*type, isolate()));
+
+ // object_reg and holder_reg registers can alias.
+ ASSERT(!AreAliased(object_reg, scratch1, scratch2));
+ ASSERT(!AreAliased(holder_reg, scratch1, scratch2));
+
+ // Keep track of the current object in register reg.
+ Register reg = object_reg;
+ int depth = 0;
+
+ Handle<JSObject> current = Handle<JSObject>::null();
+ if (type->IsConstant()) {
+ current = Handle<JSObject>::cast(type->AsConstant()->Value());
+ }
+ Handle<JSObject> prototype = Handle<JSObject>::null();
+ Handle<Map> current_map = receiver_map;
+ Handle<Map> holder_map(holder->map());
+ // Traverse the prototype chain and check the maps in the prototype chain for
+ // fast and global objects or do negative lookup for normal objects.
+ while (!current_map.is_identical_to(holder_map)) {
+ ++depth;
+
+ // Only global objects and objects that do not require access
+ // checks are allowed in stubs.
+ ASSERT(current_map->IsJSGlobalProxyMap() ||
+ !current_map->is_access_check_needed());
+
+ prototype = handle(JSObject::cast(current_map->prototype()));
+ if (current_map->is_dictionary_map() &&
+ !current_map->IsJSGlobalObjectMap() &&
+ !current_map->IsJSGlobalProxyMap()) {
+ if (!name->IsUniqueName()) {
+ ASSERT(name->IsString());
+ name = factory()->InternalizeString(Handle<String>::cast(name));
+ }
+ ASSERT(current.is_null() ||
+ (current->property_dictionary()->FindEntry(name) ==
+ NameDictionary::kNotFound));
+
+ GenerateDictionaryNegativeLookup(masm(), miss, reg, name,
+ scratch1, scratch2);
+
+ __ Ldr(scratch1, FieldMemOperand(reg, HeapObject::kMapOffset));
+ reg = holder_reg; // From now on the object will be in holder_reg.
+ __ Ldr(reg, FieldMemOperand(scratch1, Map::kPrototypeOffset));
+ } else {
+ bool need_map = (depth != 1 || check == CHECK_ALL_MAPS) ||
+ heap()->InNewSpace(*prototype);
+ Register map_reg = NoReg;
+ if (need_map) {
+ map_reg = scratch1;
+ __ Ldr(map_reg, FieldMemOperand(reg, HeapObject::kMapOffset));
+ }
+
+ if (depth != 1 || check == CHECK_ALL_MAPS) {
+ __ CheckMap(map_reg, current_map, miss, DONT_DO_SMI_CHECK);
+ }
+
+ // Check access rights to the global object. This has to happen after
+ // the map check so that we know that the object is actually a global
+ // object.
+ if (current_map->IsJSGlobalProxyMap()) {
+ UseScratchRegisterScope temps(masm());
+ __ CheckAccessGlobalProxy(reg, scratch2, temps.AcquireX(), miss);
+ } else if (current_map->IsJSGlobalObjectMap()) {
+ GenerateCheckPropertyCell(
+ masm(), Handle<JSGlobalObject>::cast(current), name,
+ scratch2, miss);
+ }
+
+ reg = holder_reg; // From now on the object will be in holder_reg.
+
+ if (heap()->InNewSpace(*prototype)) {
+ // The prototype is in new space; we cannot store a reference to it
+ // in the code. Load it from the map.
+ __ Ldr(reg, FieldMemOperand(map_reg, Map::kPrototypeOffset));
+ } else {
+ // The prototype is in old space; load it directly.
+ __ Mov(reg, Operand(prototype));
+ }
+ }
+
+ // Go to the next object in the prototype chain.
+ current = prototype;
+ current_map = handle(current->map());
+ }
+
+ // Log the check depth.
+ LOG(isolate(), IntEvent("check-maps-depth", depth + 1));
+
+ // Check the holder map.
+ if (depth != 0 || check == CHECK_ALL_MAPS) {
+ // Check the holder map.
+ __ CheckMap(reg, scratch1, current_map, miss, DONT_DO_SMI_CHECK);
+ }
+
+ // Perform security check for access to the global object.
+ ASSERT(current_map->IsJSGlobalProxyMap() ||
+ !current_map->is_access_check_needed());
+ if (current_map->IsJSGlobalProxyMap()) {
+ __ CheckAccessGlobalProxy(reg, scratch1, scratch2, miss);
+ }
+
+ // Return the register containing the holder.
+ return reg;
+}
+
+
+void LoadStubCompiler::HandlerFrontendFooter(Handle<Name> name, Label* miss) {
+ if (!miss->is_unused()) {
+ Label success;
+ __ B(&success);
+
+ __ Bind(miss);
+ TailCallBuiltin(masm(), MissBuiltin(kind()));
+
+ __ Bind(&success);
+ }
+}
+
+
+void StoreStubCompiler::HandlerFrontendFooter(Handle<Name> name, Label* miss) {
+ if (!miss->is_unused()) {
+ Label success;
+ __ B(&success);
+
+ GenerateRestoreName(masm(), miss, name);
+ TailCallBuiltin(masm(), MissBuiltin(kind()));
+
+ __ Bind(&success);
+ }
+}
+
+
+Register LoadStubCompiler::CallbackHandlerFrontend(Handle<HeapType> type,
+ Register object_reg,
+ Handle<JSObject> holder,
+ Handle<Name> name,
+ Handle<Object> callback) {
+ Label miss;
+
+ Register reg = HandlerFrontendHeader(type, object_reg, holder, name, &miss);
+ // HandlerFrontendHeader can return its result into scratch1() so do not
+ // use it.
+ Register scratch2 = this->scratch2();
+ Register scratch3 = this->scratch3();
+ Register dictionary = this->scratch4();
+ ASSERT(!AreAliased(reg, scratch2, scratch3, dictionary));
+
+ if (!holder->HasFastProperties() && !holder->IsJSGlobalObject()) {
+ // Load the properties dictionary.
+ __ Ldr(dictionary, FieldMemOperand(reg, JSObject::kPropertiesOffset));
+
+ // Probe the dictionary.
+ Label probe_done;
+ NameDictionaryLookupStub::GeneratePositiveLookup(masm(),
+ &miss,
+ &probe_done,
+ dictionary,
+ this->name(),
+ scratch2,
+ scratch3);
+ __ Bind(&probe_done);
+
+ // If probing finds an entry in the dictionary, scratch3 contains the
+ // pointer into the dictionary. Check that the value is the callback.
+ Register pointer = scratch3;
+ const int kElementsStartOffset = NameDictionary::kHeaderSize +
+ NameDictionary::kElementsStartIndex * kPointerSize;
+ const int kValueOffset = kElementsStartOffset + kPointerSize;
+ __ Ldr(scratch2, FieldMemOperand(pointer, kValueOffset));
+ __ Cmp(scratch2, Operand(callback));
+ __ B(ne, &miss);
+ }
+
+ HandlerFrontendFooter(name, &miss);
+ return reg;
+}
+
+
+void LoadStubCompiler::GenerateLoadField(Register reg,
+ Handle<JSObject> holder,
+ FieldIndex field,
+ Representation representation) {
+ __ Mov(receiver(), reg);
+ if (kind() == Code::LOAD_IC) {
+ LoadFieldStub stub(isolate(), field);
+ GenerateTailCall(masm(), stub.GetCode());
+ } else {
+ KeyedLoadFieldStub stub(isolate(), field);
+ GenerateTailCall(masm(), stub.GetCode());
+ }
+}
+
+
+void LoadStubCompiler::GenerateLoadConstant(Handle<Object> value) {
+ // Return the constant value.
+ __ LoadObject(x0, value);
+ __ Ret();
+}
+
+
+void LoadStubCompiler::GenerateLoadCallback(
+ Register reg,
+ Handle<ExecutableAccessorInfo> callback) {
+ ASSERT(!AreAliased(scratch2(), scratch3(), scratch4(), reg));
+
+ // Build ExecutableAccessorInfo::args_ list on the stack and push property
+ // name below the exit frame to make GC aware of them and store pointers to
+ // them.
+ STATIC_ASSERT(PropertyCallbackArguments::kHolderIndex == 0);
+ STATIC_ASSERT(PropertyCallbackArguments::kIsolateIndex == 1);
+ STATIC_ASSERT(PropertyCallbackArguments::kReturnValueDefaultValueIndex == 2);
+ STATIC_ASSERT(PropertyCallbackArguments::kReturnValueOffset == 3);
+ STATIC_ASSERT(PropertyCallbackArguments::kDataIndex == 4);
+ STATIC_ASSERT(PropertyCallbackArguments::kThisIndex == 5);
+ STATIC_ASSERT(PropertyCallbackArguments::kArgsLength == 6);
+
+ __ Push(receiver());
+
+ if (heap()->InNewSpace(callback->data())) {
+ __ Mov(scratch3(), Operand(callback));
+ __ Ldr(scratch3(), FieldMemOperand(scratch3(),
+ ExecutableAccessorInfo::kDataOffset));
+ } else {
+ __ Mov(scratch3(), Operand(Handle<Object>(callback->data(), isolate())));
+ }
+ __ LoadRoot(scratch4(), Heap::kUndefinedValueRootIndex);
+ __ Mov(scratch2(), Operand(ExternalReference::isolate_address(isolate())));
+ __ Push(scratch3(), scratch4(), scratch4(), scratch2(), reg, name());
+
+ Register args_addr = scratch2();
+ __ Add(args_addr, __ StackPointer(), kPointerSize);
+
+ // Stack at this point:
+ // sp[40] callback data
+ // sp[32] undefined
+ // sp[24] undefined
+ // sp[16] isolate
+ // args_addr -> sp[8] reg
+ // sp[0] name
+
+ // Abi for CallApiGetter.
+ Register getter_address_reg = x2;
+
+ // Set up the call.
+ Address getter_address = v8::ToCData<Address>(callback->getter());
+ ApiFunction fun(getter_address);
+ ExternalReference::Type type = ExternalReference::DIRECT_GETTER_CALL;
+ ExternalReference ref = ExternalReference(&fun, type, isolate());
+ __ Mov(getter_address_reg, ref);
+
+ CallApiGetterStub stub(isolate());
+ __ TailCallStub(&stub);
+}
+
+
+void LoadStubCompiler::GenerateLoadInterceptor(
+ Register holder_reg,
+ Handle<Object> object,
+ Handle<JSObject> interceptor_holder,
+ LookupResult* lookup,
+ Handle<Name> name) {
+ ASSERT(!AreAliased(receiver(), this->name(),
+ scratch1(), scratch2(), scratch3()));
+ ASSERT(interceptor_holder->HasNamedInterceptor());
+ ASSERT(!interceptor_holder->GetNamedInterceptor()->getter()->IsUndefined());
+
+ // So far the most popular follow ups for interceptor loads are FIELD
+ // and CALLBACKS, so inline only them, other cases may be added later.
+ bool compile_followup_inline = false;
+ if (lookup->IsFound() && lookup->IsCacheable()) {
+ if (lookup->IsField()) {
+ compile_followup_inline = true;
+ } else if (lookup->type() == CALLBACKS &&
+ lookup->GetCallbackObject()->IsExecutableAccessorInfo()) {
+ ExecutableAccessorInfo* callback =
+ ExecutableAccessorInfo::cast(lookup->GetCallbackObject());
+ compile_followup_inline = callback->getter() != NULL &&
+ callback->IsCompatibleReceiver(*object);
+ }
+ }
+
+ if (compile_followup_inline) {
+ // Compile the interceptor call, followed by inline code to load the
+ // property from further up the prototype chain if the call fails.
+ // Check that the maps haven't changed.
+ ASSERT(holder_reg.is(receiver()) || holder_reg.is(scratch1()));
+
+ // Preserve the receiver register explicitly whenever it is different from
+ // the holder and it is needed should the interceptor return without any
+ // result. The CALLBACKS case needs the receiver to be passed into C++ code,
+ // the FIELD case might cause a miss during the prototype check.
+ bool must_perfrom_prototype_check = *interceptor_holder != lookup->holder();
+ bool must_preserve_receiver_reg = !receiver().Is(holder_reg) &&
+ (lookup->type() == CALLBACKS || must_perfrom_prototype_check);
+
+ // Save necessary data before invoking an interceptor.
+ // Requires a frame to make GC aware of pushed pointers.
+ {
+ FrameScope frame_scope(masm(), StackFrame::INTERNAL);
+ if (must_preserve_receiver_reg) {
+ __ Push(receiver(), holder_reg, this->name());
+ } else {
+ __ Push(holder_reg, this->name());
+ }
+ // Invoke an interceptor. Note: map checks from receiver to
+ // interceptor's holder has been compiled before (see a caller
+ // of this method.)
+ CompileCallLoadPropertyWithInterceptor(
+ masm(), receiver(), holder_reg, this->name(), interceptor_holder,
+ IC::kLoadPropertyWithInterceptorOnly);
+
+ // Check if interceptor provided a value for property. If it's
+ // the case, return immediately.
+ Label interceptor_failed;
+ __ JumpIfRoot(x0,
+ Heap::kNoInterceptorResultSentinelRootIndex,
+ &interceptor_failed);
+ frame_scope.GenerateLeaveFrame();
+ __ Ret();
+
+ __ Bind(&interceptor_failed);
+ if (must_preserve_receiver_reg) {
+ __ Pop(this->name(), holder_reg, receiver());
+ } else {
+ __ Pop(this->name(), holder_reg);
+ }
+ // Leave the internal frame.
+ }
+ GenerateLoadPostInterceptor(holder_reg, interceptor_holder, name, lookup);
+ } else { // !compile_followup_inline
+ // Call the runtime system to load the interceptor.
+ // Check that the maps haven't changed.
+ PushInterceptorArguments(
+ masm(), receiver(), holder_reg, this->name(), interceptor_holder);
+
+ ExternalReference ref =
+ ExternalReference(IC_Utility(IC::kLoadPropertyWithInterceptor),
+ isolate());
+ __ TailCallExternalReference(ref, StubCache::kInterceptorArgsLength, 1);
+ }
+}
+
+
+Handle<Code> StoreStubCompiler::CompileStoreCallback(
+ Handle<JSObject> object,
+ Handle<JSObject> holder,
+ Handle<Name> name,
+ Handle<ExecutableAccessorInfo> callback) {
+ ASM_LOCATION("StoreStubCompiler::CompileStoreCallback");
+ Register holder_reg = HandlerFrontend(
+ IC::CurrentTypeOf(object, isolate()), receiver(), holder, name);
+
+ // Stub never generated for non-global objects that require access checks.
+ ASSERT(holder->IsJSGlobalProxy() || !holder->IsAccessCheckNeeded());
+
+ // receiver() and holder_reg can alias.
+ ASSERT(!AreAliased(receiver(), scratch1(), scratch2(), value()));
+ ASSERT(!AreAliased(holder_reg, scratch1(), scratch2(), value()));
+ __ Mov(scratch1(), Operand(callback));
+ __ Mov(scratch2(), Operand(name));
+ __ Push(receiver(), holder_reg, scratch1(), scratch2(), value());
+
+ // Do tail-call to the runtime system.
+ ExternalReference store_callback_property =
+ ExternalReference(IC_Utility(IC::kStoreCallbackProperty), isolate());
+ __ TailCallExternalReference(store_callback_property, 5, 1);
+
+ // Return the generated code.
+ return GetCode(kind(), Code::FAST, name);
+}
+
+
+#undef __
+#define __ ACCESS_MASM(masm)
+
+
+void StoreStubCompiler::GenerateStoreViaSetter(
+ MacroAssembler* masm,
+ Handle<HeapType> type,
+ Register receiver,
+ Handle<JSFunction> setter) {
+ // ----------- S t a t e -------------
+ // -- lr : return address
+ // -----------------------------------
+ Label miss;
+
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+
+ // Save value register, so we can restore it later.
+ __ Push(value());
+
+ if (!setter.is_null()) {
+ // Call the JavaScript setter with receiver and value on the stack.
+ if (IC::TypeToMap(*type, masm->isolate())->IsJSGlobalObjectMap()) {
+ // Swap in the global receiver.
+ __ Ldr(receiver,
+ FieldMemOperand(
+ receiver, JSGlobalObject::kGlobalReceiverOffset));
+ }
+ __ Push(receiver, value());
+ ParameterCount actual(1);
+ ParameterCount expected(setter);
+ __ InvokeFunction(setter, expected, actual,
+ CALL_FUNCTION, NullCallWrapper());
+ } else {
+ // If we generate a global code snippet for deoptimization only, remember
+ // the place to continue after deoptimization.
+ masm->isolate()->heap()->SetSetterStubDeoptPCOffset(masm->pc_offset());
+ }
+
+ // We have to return the passed value, not the return value of the setter.
+ __ Pop(x0);
+
+ // Restore context register.
+ __ Ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ }
+ __ Ret();
+}
+
+
+#undef __
+#define __ ACCESS_MASM(masm())
+
+
+Handle<Code> StoreStubCompiler::CompileStoreInterceptor(
+ Handle<JSObject> object,
+ Handle<Name> name) {
+ Label miss;
+
+ ASM_LOCATION("StoreStubCompiler::CompileStoreInterceptor");
+
+ __ Push(receiver(), this->name(), value());
+
+ // Do tail-call to the runtime system.
+ ExternalReference store_ic_property =
+ ExternalReference(IC_Utility(IC::kStoreInterceptorProperty), isolate());
+ __ TailCallExternalReference(store_ic_property, 3, 1);
+
+ // Return the generated code.
+ return GetCode(kind(), Code::FAST, name);
+}
+
+
+Handle<Code> LoadStubCompiler::CompileLoadNonexistent(Handle<HeapType> type,
+ Handle<JSObject> last,
+ Handle<Name> name) {
+ NonexistentHandlerFrontend(type, last, name);
+
+ // Return undefined if maps of the full prototype chain are still the
+ // same and no global property with this name contains a value.
+ __ LoadRoot(x0, Heap::kUndefinedValueRootIndex);
+ __ Ret();
+
+ // Return the generated code.
+ return GetCode(kind(), Code::FAST, name);
+}
+
+
+// TODO(all): The so-called scratch registers are significant in some cases. For
+// example, KeyedStoreStubCompiler::registers()[3] (x3) is actually used for
+// KeyedStoreCompiler::transition_map(). We should verify which registers are
+// actually scratch registers, and which are important. For now, we use the same
+// assignments as ARM to remain on the safe side.
+
+Register* LoadStubCompiler::registers() {
+ // receiver, name, scratch1, scratch2, scratch3, scratch4.
+ static Register registers[] = { x0, x2, x3, x1, x4, x5 };
+ return registers;
+}
+
+
+Register* KeyedLoadStubCompiler::registers() {
+ // receiver, name/key, scratch1, scratch2, scratch3, scratch4.
+ static Register registers[] = { x1, x0, x2, x3, x4, x5 };
+ return registers;
+}
+
+
+Register StoreStubCompiler::value() {
+ return x0;
+}
+
+
+Register* StoreStubCompiler::registers() {
+ // receiver, value, scratch1, scratch2, scratch3.
+ static Register registers[] = { x1, x2, x3, x4, x5 };
+ return registers;
+}
+
+
+Register* KeyedStoreStubCompiler::registers() {
+ // receiver, name, scratch1, scratch2, scratch3.
+ static Register registers[] = { x2, x1, x3, x4, x5 };
+ return registers;
+}
+
+
+#undef __
+#define __ ACCESS_MASM(masm)
+
+void LoadStubCompiler::GenerateLoadViaGetter(MacroAssembler* masm,
+ Handle<HeapType> type,
+ Register receiver,
+ Handle<JSFunction> getter) {
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+
+ if (!getter.is_null()) {
+ // Call the JavaScript getter with the receiver on the stack.
+ if (IC::TypeToMap(*type, masm->isolate())->IsJSGlobalObjectMap()) {
+ // Swap in the global receiver.
+ __ Ldr(receiver,
+ FieldMemOperand(
+ receiver, JSGlobalObject::kGlobalReceiverOffset));
+ }
+ __ Push(receiver);
+ ParameterCount actual(0);
+ ParameterCount expected(getter);
+ __ InvokeFunction(getter, expected, actual,
+ CALL_FUNCTION, NullCallWrapper());
+ } else {
+ // If we generate a global code snippet for deoptimization only, remember
+ // the place to continue after deoptimization.
+ masm->isolate()->heap()->SetGetterStubDeoptPCOffset(masm->pc_offset());
+ }
+
+ // Restore context register.
+ __ Ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ }
+ __ Ret();
+}
+
+
+#undef __
+#define __ ACCESS_MASM(masm())
+
+
+Handle<Code> LoadStubCompiler::CompileLoadGlobal(
+ Handle<HeapType> type,
+ Handle<GlobalObject> global,
+ Handle<PropertyCell> cell,
+ Handle<Name> name,
+ bool is_dont_delete) {
+ Label miss;
+ HandlerFrontendHeader(type, receiver(), global, name, &miss);
+
+ // Get the value from the cell.
+ __ Mov(x3, Operand(cell));
+ __ Ldr(x4, FieldMemOperand(x3, Cell::kValueOffset));
+
+ // Check for deleted property if property can actually be deleted.
+ if (!is_dont_delete) {
+ __ JumpIfRoot(x4, Heap::kTheHoleValueRootIndex, &miss);
+ }
+
+ Counters* counters = isolate()->counters();
+ __ IncrementCounter(counters->named_load_global_stub(), 1, x1, x3);
+ __ Mov(x0, x4);
+ __ Ret();
+
+ HandlerFrontendFooter(name, &miss);
+
+ // Return the generated code.
+ return GetCode(kind(), Code::NORMAL, name);
+}
+
+
+Handle<Code> BaseLoadStoreStubCompiler::CompilePolymorphicIC(
+ TypeHandleList* types,
+ CodeHandleList* handlers,
+ Handle<Name> name,
+ Code::StubType type,
+ IcCheckType check) {
+ Label miss;
+
+ if (check == PROPERTY &&
+ (kind() == Code::KEYED_LOAD_IC || kind() == Code::KEYED_STORE_IC)) {
+ __ CompareAndBranch(this->name(), Operand(name), ne, &miss);
+ }
+
+ Label number_case;
+ Label* smi_target = IncludesNumberType(types) ? &number_case : &miss;
+ __ JumpIfSmi(receiver(), smi_target);
+
+ Register map_reg = scratch1();
+ __ Ldr(map_reg, FieldMemOperand(receiver(), HeapObject::kMapOffset));
+ int receiver_count = types->length();
+ int number_of_handled_maps = 0;
+ for (int current = 0; current < receiver_count; ++current) {
+ Handle<HeapType> type = types->at(current);
+ Handle<Map> map = IC::TypeToMap(*type, isolate());
+ if (!map->is_deprecated()) {
+ number_of_handled_maps++;
+ Label try_next;
+ __ Cmp(map_reg, Operand(map));
+ __ B(ne, &try_next);
+ if (type->Is(HeapType::Number())) {
+ ASSERT(!number_case.is_unused());
+ __ Bind(&number_case);
+ }
+ __ Jump(handlers->at(current), RelocInfo::CODE_TARGET);
+ __ Bind(&try_next);
+ }
+ }
+ ASSERT(number_of_handled_maps != 0);
+
+ __ Bind(&miss);
+ TailCallBuiltin(masm(), MissBuiltin(kind()));
+
+ // Return the generated code.
+ InlineCacheState state =
+ (number_of_handled_maps > 1) ? POLYMORPHIC : MONOMORPHIC;
+ return GetICCode(kind(), type, name, state);
+}
+
+
+void StoreStubCompiler::GenerateStoreArrayLength() {
+ // Prepare tail call to StoreIC_ArrayLength.
+ __ Push(receiver(), value());
+
+ ExternalReference ref =
+ ExternalReference(IC_Utility(IC::kStoreIC_ArrayLength),
+ masm()->isolate());
+ __ TailCallExternalReference(ref, 2, 1);
+}
+
+
+Handle<Code> KeyedStoreStubCompiler::CompileStorePolymorphic(
+ MapHandleList* receiver_maps,
+ CodeHandleList* handler_stubs,
+ MapHandleList* transitioned_maps) {
+ Label miss;
+
+ ASM_LOCATION("KeyedStoreStubCompiler::CompileStorePolymorphic");
+
+ __ JumpIfSmi(receiver(), &miss);
+
+ int receiver_count = receiver_maps->length();
+ __ Ldr(scratch1(), FieldMemOperand(receiver(), HeapObject::kMapOffset));
+ for (int i = 0; i < receiver_count; i++) {
+ __ Cmp(scratch1(), Operand(receiver_maps->at(i)));
+
+ Label skip;
+ __ B(&skip, ne);
+ if (!transitioned_maps->at(i).is_null()) {
+ // This argument is used by the handler stub. For example, see
+ // ElementsTransitionGenerator::GenerateMapChangeElementsTransition.
+ __ Mov(transition_map(), Operand(transitioned_maps->at(i)));
+ }
+ __ Jump(handler_stubs->at(i), RelocInfo::CODE_TARGET);
+ __ Bind(&skip);
+ }
+
+ __ Bind(&miss);
+ TailCallBuiltin(masm(), MissBuiltin(kind()));
+
+ return GetICCode(
+ kind(), Code::NORMAL, factory()->empty_string(), POLYMORPHIC);
+}
+
+
+#undef __
+#define __ ACCESS_MASM(masm)
+
+void KeyedLoadStubCompiler::GenerateLoadDictionaryElement(
+ MacroAssembler* masm) {
+ // ---------- S t a t e --------------
+ // -- lr : return address
+ // -- x0 : key
+ // -- x1 : receiver
+ // -----------------------------------
+ Label slow, miss;
+
+ Register result = x0;
+ Register key = x0;
+ Register receiver = x1;
+
+ __ JumpIfNotSmi(key, &miss);
+ __ Ldr(x4, FieldMemOperand(receiver, JSObject::kElementsOffset));
+ __ LoadFromNumberDictionary(&slow, x4, key, result, x2, x3, x5, x6);
+ __ Ret();
+
+ __ Bind(&slow);
+ __ IncrementCounter(
+ masm->isolate()->counters()->keyed_load_external_array_slow(), 1, x2, x3);
+ TailCallBuiltin(masm, Builtins::kKeyedLoadIC_Slow);
+
+ // Miss case, call the runtime.
+ __ Bind(&miss);
+ TailCallBuiltin(masm, Builtins::kKeyedLoadIC_Miss);
+}
+
+
+} } // namespace v8::internal
+
+#endif // V8_TARGET_ARCH_ARM64
diff --git a/chromium/v8/src/arm64/utils-arm64.cc b/chromium/v8/src/arm64/utils-arm64.cc
new file mode 100644
index 00000000000..0cb4ea5e74f
--- /dev/null
+++ b/chromium/v8/src/arm64/utils-arm64.cc
@@ -0,0 +1,89 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#if V8_TARGET_ARCH_ARM64
+
+#include "src/arm64/utils-arm64.h"
+
+
+namespace v8 {
+namespace internal {
+
+#define __ assm->
+
+
+int CountLeadingZeros(uint64_t value, int width) {
+ // TODO(jbramley): Optimize this for ARM64 hosts.
+ ASSERT((width == 32) || (width == 64));
+ int count = 0;
+ uint64_t bit_test = 1UL << (width - 1);
+ while ((count < width) && ((bit_test & value) == 0)) {
+ count++;
+ bit_test >>= 1;
+ }
+ return count;
+}
+
+
+int CountLeadingSignBits(int64_t value, int width) {
+ // TODO(jbramley): Optimize this for ARM64 hosts.
+ ASSERT((width == 32) || (width == 64));
+ if (value >= 0) {
+ return CountLeadingZeros(value, width) - 1;
+ } else {
+ return CountLeadingZeros(~value, width) - 1;
+ }
+}
+
+
+int CountTrailingZeros(uint64_t value, int width) {
+ // TODO(jbramley): Optimize this for ARM64 hosts.
+ ASSERT((width == 32) || (width == 64));
+ int count = 0;
+ while ((count < width) && (((value >> count) & 1) == 0)) {
+ count++;
+ }
+ return count;
+}
+
+
+int CountSetBits(uint64_t value, int width) {
+ // TODO(jbramley): Would it be useful to allow other widths? The
+ // implementation already supports them.
+ ASSERT((width == 32) || (width == 64));
+
+ // Mask out unused bits to ensure that they are not counted.
+ value &= (0xffffffffffffffffUL >> (64-width));
+
+ // Add up the set bits.
+ // The algorithm works by adding pairs of bit fields together iteratively,
+ // where the size of each bit field doubles each time.
+ // An example for an 8-bit value:
+ // Bits: h g f e d c b a
+ // \ | \ | \ | \ |
+ // value = h+g f+e d+c b+a
+ // \ | \ |
+ // value = h+g+f+e d+c+b+a
+ // \ |
+ // value = h+g+f+e+d+c+b+a
+ value = ((value >> 1) & 0x5555555555555555) + (value & 0x5555555555555555);
+ value = ((value >> 2) & 0x3333333333333333) + (value & 0x3333333333333333);
+ value = ((value >> 4) & 0x0f0f0f0f0f0f0f0f) + (value & 0x0f0f0f0f0f0f0f0f);
+ value = ((value >> 8) & 0x00ff00ff00ff00ff) + (value & 0x00ff00ff00ff00ff);
+ value = ((value >> 16) & 0x0000ffff0000ffff) + (value & 0x0000ffff0000ffff);
+ value = ((value >> 32) & 0x00000000ffffffff) + (value & 0x00000000ffffffff);
+
+ return value;
+}
+
+
+int MaskToBit(uint64_t mask) {
+ ASSERT(CountSetBits(mask, 64) == 1);
+ return CountTrailingZeros(mask, 64);
+}
+
+
+} } // namespace v8::internal
+
+#endif // V8_TARGET_ARCH_ARM64
diff --git a/chromium/v8/src/arm64/utils-arm64.h b/chromium/v8/src/arm64/utils-arm64.h
new file mode 100644
index 00000000000..b9407102059
--- /dev/null
+++ b/chromium/v8/src/arm64/utils-arm64.h
@@ -0,0 +1,112 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_ARM64_UTILS_ARM64_H_
+#define V8_ARM64_UTILS_ARM64_H_
+
+#include <cmath>
+#include "src/v8.h"
+#include "src/arm64/constants-arm64.h"
+
+#define REGISTER_CODE_LIST(R) \
+R(0) R(1) R(2) R(3) R(4) R(5) R(6) R(7) \
+R(8) R(9) R(10) R(11) R(12) R(13) R(14) R(15) \
+R(16) R(17) R(18) R(19) R(20) R(21) R(22) R(23) \
+R(24) R(25) R(26) R(27) R(28) R(29) R(30) R(31)
+
+namespace v8 {
+namespace internal {
+
+// These are global assumptions in v8.
+STATIC_ASSERT((static_cast<int32_t>(-1) >> 1) == -1);
+STATIC_ASSERT((static_cast<uint32_t>(-1) >> 1) == 0x7FFFFFFF);
+
+// Floating point representation.
+static inline uint32_t float_to_rawbits(float value) {
+ uint32_t bits = 0;
+ memcpy(&bits, &value, 4);
+ return bits;
+}
+
+
+static inline uint64_t double_to_rawbits(double value) {
+ uint64_t bits = 0;
+ memcpy(&bits, &value, 8);
+ return bits;
+}
+
+
+static inline float rawbits_to_float(uint32_t bits) {
+ float value = 0.0;
+ memcpy(&value, &bits, 4);
+ return value;
+}
+
+
+static inline double rawbits_to_double(uint64_t bits) {
+ double value = 0.0;
+ memcpy(&value, &bits, 8);
+ return value;
+}
+
+
+// Bit counting.
+int CountLeadingZeros(uint64_t value, int width);
+int CountLeadingSignBits(int64_t value, int width);
+int CountTrailingZeros(uint64_t value, int width);
+int CountSetBits(uint64_t value, int width);
+int MaskToBit(uint64_t mask);
+
+
+// NaN tests.
+inline bool IsSignallingNaN(double num) {
+ uint64_t raw = double_to_rawbits(num);
+ if (std::isnan(num) && ((raw & kDQuietNanMask) == 0)) {
+ return true;
+ }
+ return false;
+}
+
+
+inline bool IsSignallingNaN(float num) {
+ uint32_t raw = float_to_rawbits(num);
+ if (std::isnan(num) && ((raw & kSQuietNanMask) == 0)) {
+ return true;
+ }
+ return false;
+}
+
+
+template <typename T>
+inline bool IsQuietNaN(T num) {
+ return std::isnan(num) && !IsSignallingNaN(num);
+}
+
+
+// Convert the NaN in 'num' to a quiet NaN.
+inline double ToQuietNaN(double num) {
+ ASSERT(isnan(num));
+ return rawbits_to_double(double_to_rawbits(num) | kDQuietNanMask);
+}
+
+
+inline float ToQuietNaN(float num) {
+ ASSERT(isnan(num));
+ return rawbits_to_float(float_to_rawbits(num) | kSQuietNanMask);
+}
+
+
+// Fused multiply-add.
+inline double FusedMultiplyAdd(double op1, double op2, double a) {
+ return fma(op1, op2, a);
+}
+
+
+inline float FusedMultiplyAdd(float op1, float op2, float a) {
+ return fmaf(op1, op2, a);
+}
+
+} } // namespace v8::internal
+
+#endif // V8_ARM64_UTILS_ARM64_H_
diff --git a/chromium/v8/src/array-iterator.js b/chromium/v8/src/array-iterator.js
index a8c5e001c48..9511b6d95a4 100644
--- a/chromium/v8/src/array-iterator.js
+++ b/chromium/v8/src/array-iterator.js
@@ -1,71 +1,65 @@
// Copyright 2013 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// 'AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
'use strict';
+
// This file relies on the fact that the following declaration has been made
// in runtime.js:
// var $Array = global.Array;
-var ARRAY_ITERATOR_KIND_KEYS = 1;
-var ARRAY_ITERATOR_KIND_VALUES = 2;
-var ARRAY_ITERATOR_KIND_ENTRIES = 3;
-// The spec draft also has "sparse" but it is never used.
-var iteratorObjectSymbol = NEW_PRIVATE("iterator_object");
-var arrayIteratorNextIndexSymbol = NEW_PRIVATE("iterator_next");
-var arrayIterationKindSymbol = NEW_PRIVATE("iterator_kind");
+var arrayIteratorObjectSymbol = GLOBAL_PRIVATE("ArrayIterator#object");
+var arrayIteratorNextIndexSymbol = GLOBAL_PRIVATE("ArrayIterator#next");
+var arrayIterationKindSymbol = GLOBAL_PRIVATE("ArrayIterator#kind");
+
function ArrayIterator() {}
+
+// TODO(wingo): Update section numbers when ES6 has stabilized. The
+// section numbers below are already out of date as of the May 2014
+// draft.
+
+
// 15.4.5.1 CreateArrayIterator Abstract Operation
function CreateArrayIterator(array, kind) {
var object = ToObject(array);
var iterator = new ArrayIterator;
- SET_PRIVATE(iterator, iteratorObjectSymbol, object);
+ SET_PRIVATE(iterator, arrayIteratorObjectSymbol, object);
SET_PRIVATE(iterator, arrayIteratorNextIndexSymbol, 0);
SET_PRIVATE(iterator, arrayIterationKindSymbol, kind);
return iterator;
}
+
// 15.19.4.3.4 CreateItrResultObject
function CreateIteratorResultObject(value, done) {
return {value: value, done: done};
}
+
+// 22.1.5.2.2 %ArrayIteratorPrototype%[@@iterator]
+function ArrayIteratorIterator() {
+ return this;
+}
+
+
// 15.4.5.2.2 ArrayIterator.prototype.next( )
function ArrayIteratorNext() {
var iterator = ToObject(this);
- var array = GET_PRIVATE(iterator, iteratorObjectSymbol);
- if (!array) {
+
+ if (!HAS_PRIVATE(iterator, arrayIteratorObjectSymbol)) {
throw MakeTypeError('incompatible_method_receiver',
['Array Iterator.prototype.next']);
}
+ var array = GET_PRIVATE(iterator, arrayIteratorObjectSymbol);
+ if (IS_UNDEFINED(array)) {
+ return CreateIteratorResultObject(UNDEFINED, true);
+ }
+
var index = GET_PRIVATE(iterator, arrayIteratorNextIndexSymbol);
var itemKind = GET_PRIVATE(iterator, arrayIterationKindSymbol);
var length = TO_UINT32(array.length);
@@ -73,46 +67,55 @@ function ArrayIteratorNext() {
// "sparse" is never used.
if (index >= length) {
- SET_PRIVATE(iterator, arrayIteratorNextIndexSymbol, INFINITY);
+ SET_PRIVATE(iterator, arrayIteratorObjectSymbol, UNDEFINED);
return CreateIteratorResultObject(UNDEFINED, true);
}
SET_PRIVATE(iterator, arrayIteratorNextIndexSymbol, index + 1);
- if (itemKind == ARRAY_ITERATOR_KIND_VALUES)
+ if (itemKind == ITERATOR_KIND_VALUES) {
return CreateIteratorResultObject(array[index], false);
+ }
- if (itemKind == ARRAY_ITERATOR_KIND_ENTRIES)
+ if (itemKind == ITERATOR_KIND_ENTRIES) {
return CreateIteratorResultObject([index, array[index]], false);
+ }
return CreateIteratorResultObject(index, false);
}
+
function ArrayEntries() {
- return CreateArrayIterator(this, ARRAY_ITERATOR_KIND_ENTRIES);
+ return CreateArrayIterator(this, ITERATOR_KIND_ENTRIES);
}
+
function ArrayValues() {
- return CreateArrayIterator(this, ARRAY_ITERATOR_KIND_VALUES);
+ return CreateArrayIterator(this, ITERATOR_KIND_VALUES);
}
+
function ArrayKeys() {
- return CreateArrayIterator(this, ARRAY_ITERATOR_KIND_KEYS);
+ return CreateArrayIterator(this, ITERATOR_KIND_KEYS);
}
+
function SetUpArrayIterator() {
%CheckIsBootstrapping();
+ %FunctionSetPrototype(ArrayIterator, new $Object());
%FunctionSetInstanceClassName(ArrayIterator, 'Array Iterator');
- %FunctionSetReadOnlyPrototype(ArrayIterator);
InstallFunctions(ArrayIterator.prototype, DONT_ENUM, $Array(
'next', ArrayIteratorNext
));
+ %FunctionSetName(ArrayIteratorIterator, '[Symbol.iterator]');
+ %SetProperty(ArrayIterator.prototype, symbolIterator, ArrayIteratorIterator,
+ DONT_ENUM | DONT_DELETE | READ_ONLY);
}
-
SetUpArrayIterator();
+
function ExtendArrayPrototype() {
%CheckIsBootstrapping();
@@ -122,5 +125,4 @@ function ExtendArrayPrototype() {
'keys', ArrayKeys
));
}
-
ExtendArrayPrototype();
diff --git a/chromium/v8/src/array.js b/chromium/v8/src/array.js
index 26bf7282e18..ef7aae4774f 100644
--- a/chromium/v8/src/array.js
+++ b/chromium/v8/src/array.js
@@ -1,29 +1,8 @@
// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+"use strict";
// This file relies on the fact that the following declarations have been made
// in runtime.js:
@@ -66,7 +45,7 @@ function GetSortedArrayKeys(array, indices) {
}
-function SparseJoinWithSeparator(array, len, convert, separator) {
+function SparseJoinWithSeparatorJS(array, len, convert, separator) {
var keys = GetSortedArrayKeys(array, %GetArrayKeys(array, len));
var totalLength = 0;
var elements = new InternalArray(keys.length * 2);
@@ -132,7 +111,7 @@ function Join(array, length, separator, convert) {
if (separator.length == 0) {
return SparseJoin(array, length, convert);
} else {
- return SparseJoinWithSeparator(array, length, convert, separator);
+ return SparseJoinWithSeparatorJS(array, length, convert, separator);
}
}
@@ -292,7 +271,7 @@ function SmartMove(array, start_i, del_count, len, num_additional_args) {
function SimpleSlice(array, start_i, del_count, len, deleted_elements) {
for (var i = 0; i < del_count; i++) {
var index = start_i + i;
- // The spec could also be interpreted such that %HasLocalProperty
+ // The spec could also be interpreted such that %HasOwnProperty
// would be the appropriate test. We follow KJS in consulting the
// prototype.
var current = array[index];
@@ -312,7 +291,7 @@ function SimpleMove(array, start_i, del_count, len, num_additional_args) {
var from_index = i + del_count - 1;
var to_index = i + num_additional_args - 1;
// The spec could also be interpreted such that
- // %HasLocalProperty would be the appropriate test. We follow
+ // %HasOwnProperty would be the appropriate test. We follow
// KJS in consulting the prototype.
var current = array[from_index];
if (!IS_UNDEFINED(current) || from_index in array) {
@@ -326,7 +305,7 @@ function SimpleMove(array, start_i, del_count, len, num_additional_args) {
var from_index = i + del_count;
var to_index = i + num_additional_args;
// The spec could also be interpreted such that
- // %HasLocalProperty would be the appropriate test. We follow
+ // %HasOwnProperty would be the appropriate test. We follow
// KJS in consulting the prototype.
var current = array[from_index];
if (!IS_UNDEFINED(current) || from_index in array) {
@@ -376,22 +355,20 @@ function ArrayToLocaleString() {
function ArrayJoin(separator) {
- if (IS_NULL_OR_UNDEFINED(this) && !IS_UNDETECTABLE(this)) {
- throw MakeTypeError("called_on_null_or_undefined",
- ["Array.prototype.join"]);
- }
+ CHECK_OBJECT_COERCIBLE(this, "Array.prototype.join");
- var length = TO_UINT32(this.length);
+ var array = TO_OBJECT_INLINE(this);
+ var length = TO_UINT32(array.length);
if (IS_UNDEFINED(separator)) {
separator = ',';
} else if (!IS_STRING(separator)) {
separator = NonStringToString(separator);
}
- var result = %_FastAsciiArrayJoin(this, separator);
+ var result = %_FastAsciiArrayJoin(array, separator);
if (!IS_UNDEFINED(result)) return result;
- return Join(this, length, separator, ConvertToString);
+ return Join(array, length, separator, ConvertToString);
}
@@ -414,29 +391,22 @@ function ObservedArrayPop(n) {
// Removes the last element from the array and returns it. See
// ECMA-262, section 15.4.4.6.
function ArrayPop() {
- if (IS_NULL_OR_UNDEFINED(this) && !IS_UNDETECTABLE(this)) {
- throw MakeTypeError("called_on_null_or_undefined",
- ["Array.prototype.pop"]);
- }
+ CHECK_OBJECT_COERCIBLE(this, "Array.prototype.pop");
- var n = TO_UINT32(this.length);
+ var array = TO_OBJECT_INLINE(this);
+ var n = TO_UINT32(array.length);
if (n == 0) {
- this.length = n;
+ array.length = n;
return;
}
- if ($Object.isSealed(this)) {
- throw MakeTypeError("array_functions_change_sealed",
- ["Array.prototype.pop"]);
- }
-
- if (%IsObserved(this))
- return ObservedArrayPop.call(this, n);
+ if (%IsObserved(array))
+ return ObservedArrayPop.call(array, n);
n--;
- var value = this[n];
- Delete(this, ToName(n), true);
- this.length = n;
+ var value = array[n];
+ Delete(array, ToName(n), true);
+ array.length = n;
return value;
}
@@ -450,49 +420,45 @@ function ObservedArrayPush() {
for (var i = 0; i < m; i++) {
this[i+n] = %_Arguments(i);
}
- this.length = n + m;
+ var new_length = n + m;
+ this.length = new_length;
} finally {
EndPerformSplice(this);
EnqueueSpliceRecord(this, n, [], m);
}
- return this.length;
+ return new_length;
}
// Appends the arguments to the end of the array and returns the new
// length of the array. See ECMA-262, section 15.4.4.7.
function ArrayPush() {
- if (IS_NULL_OR_UNDEFINED(this) && !IS_UNDETECTABLE(this)) {
- throw MakeTypeError("called_on_null_or_undefined",
- ["Array.prototype.push"]);
- }
-
- var n = TO_UINT32(this.length);
- var m = %_ArgumentsLength();
- if (m > 0 && $Object.isSealed(this)) {
- throw MakeTypeError("array_functions_change_sealed",
- ["Array.prototype.push"]);
- }
+ CHECK_OBJECT_COERCIBLE(this, "Array.prototype.push");
if (%IsObserved(this))
return ObservedArrayPush.apply(this, arguments);
+ var array = TO_OBJECT_INLINE(this);
+ var n = TO_UINT32(array.length);
+ var m = %_ArgumentsLength();
+
for (var i = 0; i < m; i++) {
- this[i+n] = %_Arguments(i);
+ // Use SetProperty rather than a direct keyed store to ensure that the store
+ // site doesn't become poisened with an elements transition KeyedStoreIC.
+ %SetProperty(array, i+n, %_Arguments(i), 0, kStrictMode);
}
- this.length = n + m;
- return this.length;
+
+ var new_length = n + m;
+ array.length = new_length;
+ return new_length;
}
// Returns an array containing the array elements of the object followed
// by the array elements of each argument in order. See ECMA-262,
// section 15.4.4.7.
-function ArrayConcat(arg1) { // length == 1
- if (IS_NULL_OR_UNDEFINED(this) && !IS_UNDETECTABLE(this)) {
- throw MakeTypeError("called_on_null_or_undefined",
- ["Array.prototype.concat"]);
- }
+function ArrayConcatJS(arg1) { // length == 1
+ CHECK_OBJECT_COERCIBLE(this, "Array.prototype.concat");
var array = ToObject(this);
var arg_count = %_ArgumentsLength();
@@ -551,38 +517,36 @@ function SparseReverse(array, len) {
function ArrayReverse() {
- if (IS_NULL_OR_UNDEFINED(this) && !IS_UNDETECTABLE(this)) {
- throw MakeTypeError("called_on_null_or_undefined",
- ["Array.prototype.reverse"]);
- }
+ CHECK_OBJECT_COERCIBLE(this, "Array.prototype.reverse");
- var j = TO_UINT32(this.length) - 1;
+ var array = TO_OBJECT_INLINE(this);
+ var j = TO_UINT32(array.length) - 1;
- if (UseSparseVariant(this, j, IS_ARRAY(this))) {
- SparseReverse(this, j+1);
- return this;
+ if (UseSparseVariant(array, j, IS_ARRAY(array))) {
+ SparseReverse(array, j+1);
+ return array;
}
for (var i = 0; i < j; i++, j--) {
- var current_i = this[i];
- if (!IS_UNDEFINED(current_i) || i in this) {
- var current_j = this[j];
- if (!IS_UNDEFINED(current_j) || j in this) {
- this[i] = current_j;
- this[j] = current_i;
+ var current_i = array[i];
+ if (!IS_UNDEFINED(current_i) || i in array) {
+ var current_j = array[j];
+ if (!IS_UNDEFINED(current_j) || j in array) {
+ array[i] = current_j;
+ array[j] = current_i;
} else {
- this[j] = current_i;
- delete this[i];
+ array[j] = current_i;
+ delete array[i];
}
} else {
- var current_j = this[j];
- if (!IS_UNDEFINED(current_j) || j in this) {
- this[i] = current_j;
- delete this[j];
+ var current_j = array[j];
+ if (!IS_UNDEFINED(current_j) || j in array) {
+ array[i] = current_j;
+ delete array[j];
}
}
}
- return this;
+ return array;
}
@@ -602,35 +566,33 @@ function ObservedArrayShift(len) {
}
function ArrayShift() {
- if (IS_NULL_OR_UNDEFINED(this) && !IS_UNDETECTABLE(this)) {
- throw MakeTypeError("called_on_null_or_undefined",
- ["Array.prototype.shift"]);
- }
+ CHECK_OBJECT_COERCIBLE(this, "Array.prototype.shift");
- var len = TO_UINT32(this.length);
+ var array = TO_OBJECT_INLINE(this);
+ var len = TO_UINT32(array.length);
if (len === 0) {
- this.length = 0;
+ array.length = 0;
return;
}
- if ($Object.isSealed(this)) {
+ if (ObjectIsSealed(array)) {
throw MakeTypeError("array_functions_change_sealed",
["Array.prototype.shift"]);
}
- if (%IsObserved(this))
- return ObservedArrayShift.call(this, len);
+ if (%IsObserved(array))
+ return ObservedArrayShift.call(array, len);
- var first = this[0];
+ var first = array[0];
- if (IS_ARRAY(this)) {
- SmartMove(this, 0, 1, len, 0);
+ if (IS_ARRAY(array)) {
+ SmartMove(array, 0, 1, len, 0);
} else {
- SimpleMove(this, 0, 1, len, 0);
+ SimpleMove(array, 0, 1, len, 0);
}
- this.length = len - 1;
+ array.length = len - 1;
return first;
}
@@ -645,67 +607,48 @@ function ObservedArrayUnshift() {
for (var i = 0; i < num_arguments; i++) {
this[i] = %_Arguments(i);
}
- this.length = len + num_arguments;
+ var new_length = len + num_arguments;
+ this.length = new_length;
} finally {
EndPerformSplice(this);
EnqueueSpliceRecord(this, 0, [], num_arguments);
}
- return len + num_arguments;
+ return new_length;
}
function ArrayUnshift(arg1) { // length == 1
- if (IS_NULL_OR_UNDEFINED(this) && !IS_UNDETECTABLE(this)) {
- throw MakeTypeError("called_on_null_or_undefined",
- ["Array.prototype.unshift"]);
- }
-
- var len = TO_UINT32(this.length);
- var num_arguments = %_ArgumentsLength();
- var is_sealed = $Object.isSealed(this);
-
- if (num_arguments > 0 && is_sealed) {
- throw MakeTypeError("array_functions_change_sealed",
- ["Array.prototype.unshift"]);
- }
+ CHECK_OBJECT_COERCIBLE(this, "Array.prototype.unshift");
if (%IsObserved(this))
return ObservedArrayUnshift.apply(this, arguments);
- if (IS_ARRAY(this) && !is_sealed) {
- SmartMove(this, 0, 0, len, num_arguments);
- } else {
- if (num_arguments == 0 && $Object.isFrozen(this)) {
- // In the zero argument case, values from the prototype come into the
- // object. This can't be allowed on frozen arrays.
- for (var i = 0; i < len; i++) {
- if (!this.hasOwnProperty(i) && !IS_UNDEFINED(this[i])) {
- throw MakeTypeError("array_functions_on_frozen",
- ["Array.prototype.shift"]);
- }
- }
- }
+ var array = TO_OBJECT_INLINE(this);
+ var len = TO_UINT32(array.length);
+ var num_arguments = %_ArgumentsLength();
+ var is_sealed = ObjectIsSealed(array);
- SimpleMove(this, 0, 0, len, num_arguments);
+ if (IS_ARRAY(array) && !is_sealed && len > 0) {
+ SmartMove(array, 0, 0, len, num_arguments);
+ } else {
+ SimpleMove(array, 0, 0, len, num_arguments);
}
for (var i = 0; i < num_arguments; i++) {
- this[i] = %_Arguments(i);
+ array[i] = %_Arguments(i);
}
- this.length = len + num_arguments;
-
- return this.length;
+ var new_length = len + num_arguments;
+ array.length = new_length;
+ return new_length;
}
function ArraySlice(start, end) {
- if (IS_NULL_OR_UNDEFINED(this) && !IS_UNDETECTABLE(this)) {
- throw MakeTypeError("called_on_null_or_undefined",
- ["Array.prototype.slice"]);
- }
+ CHECK_OBJECT_COERCIBLE(this, "Array.prototype.slice");
- var len = TO_UINT32(this.length);
+ var array = TO_OBJECT_INLINE(this);
+ var len = TO_UINT32(array.length);
var start_i = TO_INTEGER(start);
var end_i = len;
@@ -729,13 +672,13 @@ function ArraySlice(start, end) {
if (end_i < start_i) return result;
- if (IS_ARRAY(this) &&
- !%IsObserved(this) &&
+ if (IS_ARRAY(array) &&
+ !%IsObserved(array) &&
(end_i > 1000) &&
- (%EstimateNumberOfElements(this) < end_i)) {
- SmartSlice(this, start_i, end_i - start_i, len, result);
+ (%EstimateNumberOfElements(array) < end_i)) {
+ SmartSlice(array, start_i, end_i - start_i, len, result);
} else {
- SimpleSlice(this, start_i, end_i - start_i, len, result);
+ SimpleSlice(array, start_i, end_i - start_i, len, result);
}
result.length = end_i - start_i;
@@ -817,16 +760,14 @@ function ObservedArraySplice(start, delete_count) {
function ArraySplice(start, delete_count) {
- if (IS_NULL_OR_UNDEFINED(this) && !IS_UNDETECTABLE(this)) {
- throw MakeTypeError("called_on_null_or_undefined",
- ["Array.prototype.splice"]);
- }
+ CHECK_OBJECT_COERCIBLE(this, "Array.prototype.splice");
if (%IsObserved(this))
return ObservedArraySplice.apply(this, arguments);
var num_arguments = %_ArgumentsLength();
- var len = TO_UINT32(this.length);
+ var array = TO_OBJECT_INLINE(this);
+ var len = TO_UINT32(array.length);
var start_i = ComputeSpliceStartIndex(TO_INTEGER(start), len);
var del_count = ComputeSpliceDeleteCount(delete_count, num_arguments, len,
start_i);
@@ -834,32 +775,32 @@ function ArraySplice(start, delete_count) {
deleted_elements.length = del_count;
var num_elements_to_add = num_arguments > 2 ? num_arguments - 2 : 0;
- if (del_count != num_elements_to_add && $Object.isSealed(this)) {
+ if (del_count != num_elements_to_add && ObjectIsSealed(array)) {
throw MakeTypeError("array_functions_change_sealed",
["Array.prototype.splice"]);
- } else if (del_count > 0 && $Object.isFrozen(this)) {
+ } else if (del_count > 0 && ObjectIsFrozen(array)) {
throw MakeTypeError("array_functions_on_frozen",
["Array.prototype.splice"]);
}
var use_simple_splice = true;
- if (IS_ARRAY(this) &&
+ if (IS_ARRAY(array) &&
num_elements_to_add !== del_count) {
// If we are only deleting/moving a few things near the end of the
// array then the simple version is going to be faster, because it
// doesn't touch most of the array.
- var estimated_non_hole_elements = %EstimateNumberOfElements(this);
+ var estimated_non_hole_elements = %EstimateNumberOfElements(array);
if (len > 20 && (estimated_non_hole_elements >> 2) < (len - start_i)) {
use_simple_splice = false;
}
}
if (use_simple_splice) {
- SimpleSlice(this, start_i, del_count, len, deleted_elements);
- SimpleMove(this, start_i, del_count, len, num_elements_to_add);
+ SimpleSlice(array, start_i, del_count, len, deleted_elements);
+ SimpleMove(array, start_i, del_count, len, num_elements_to_add);
} else {
- SmartSlice(this, start_i, del_count, len, deleted_elements);
- SmartMove(this, start_i, del_count, len, num_elements_to_add);
+ SmartSlice(array, start_i, del_count, len, deleted_elements);
+ SmartMove(array, start_i, del_count, len, num_elements_to_add);
}
// Insert the arguments into the resulting array in
@@ -868,9 +809,9 @@ function ArraySplice(start, delete_count) {
var arguments_index = 2;
var arguments_length = %_ArgumentsLength();
while (arguments_index < arguments_length) {
- this[i++] = %_Arguments(arguments_index++);
+ array[i++] = %_Arguments(arguments_index++);
}
- this.length = len - del_count + num_elements_to_add;
+ array.length = len - del_count + num_elements_to_add;
// Return the deleted elements.
return deleted_elements;
@@ -878,10 +819,7 @@ function ArraySplice(start, delete_count) {
function ArraySort(comparefn) {
- if (IS_NULL_OR_UNDEFINED(this) && !IS_UNDETECTABLE(this)) {
- throw MakeTypeError("called_on_null_or_undefined",
- ["Array.prototype.sort"]);
- }
+ CHECK_OBJECT_COERCIBLE(this, "Array.prototype.sort");
// In-place QuickSort algorithm.
// For short (length <= 22) arrays, insertion sort is used for efficiency.
@@ -1137,7 +1075,7 @@ function ArraySort(comparefn) {
// For compatibility with JSC, we also sort elements inherited from
// the prototype chain on non-Array objects.
// We do this by copying them to this object and sorting only
- // local elements. This is not very efficient, but sorting with
+ // own elements. This is not very efficient, but sorting with
// inherited elements happens very, very rarely, if at all.
// The specification allows "implementation dependent" behavior
// if an element on the prototype chain has an element that
@@ -1145,8 +1083,8 @@ function ArraySort(comparefn) {
max_prototype_element = CopyFromPrototype(this, length);
}
- var num_non_undefined = %IsObserved(this) ?
- -1 : %RemoveArrayHoles(this, length);
+ // %RemoveArrayHoles returns -1 if fast removal is not supported.
+ var num_non_undefined = %RemoveArrayHoles(this, length);
if (num_non_undefined == -1) {
// The array is observed, or there were indexed accessors in the array.
@@ -1171,10 +1109,7 @@ function ArraySort(comparefn) {
// preserving the semantics, since the calls to the receiver function can add
// or delete elements from the array.
function ArrayFilter(f, receiver) {
- if (IS_NULL_OR_UNDEFINED(this) && !IS_UNDETECTABLE(this)) {
- throw MakeTypeError("called_on_null_or_undefined",
- ["Array.prototype.filter"]);
- }
+ CHECK_OBJECT_COERCIBLE(this, "Array.prototype.filter");
// Pull out the length so that modifications to the length in the
// loop will not affect the looping and side effects are visible.
@@ -1186,35 +1121,23 @@ function ArrayFilter(f, receiver) {
}
if (IS_NULL_OR_UNDEFINED(receiver)) {
receiver = %GetDefaultReceiver(f) || receiver;
- } else if (!IS_SPEC_OBJECT(receiver) && %IsClassicModeFunction(f)) {
+ } else if (!IS_SPEC_OBJECT(receiver) && %IsSloppyModeFunction(f)) {
receiver = ToObject(receiver);
}
var result = new $Array();
var accumulator = new InternalArray();
var accumulator_length = 0;
- if (%DebugCallbackSupportsStepping(f)) {
- for (var i = 0; i < length; i++) {
- if (i in array) {
- var element = array[i];
- // Prepare break slots for debugger step in.
- %DebugPrepareStepInIfStepping(f);
- if (%_CallFunction(receiver, element, i, array, f)) {
- accumulator[accumulator_length++] = element;
- }
+ var stepping = %_DebugCallbackSupportsStepping(f);
+ for (var i = 0; i < length; i++) {
+ if (i in array) {
+ var element = array[i];
+ // Prepare break slots for debugger step in.
+ if (stepping) %DebugPrepareStepInIfStepping(f);
+ if (%_CallFunction(receiver, element, i, array, f)) {
+ accumulator[accumulator_length++] = element;
}
}
- } else {
- // This is a duplicate of the previous loop sans debug stepping.
- for (var i = 0; i < length; i++) {
- if (i in array) {
- var element = array[i];
- if (%_CallFunction(receiver, element, i, array, f)) {
- accumulator[accumulator_length++] = element;
- }
- }
- }
- // End of duplicate.
}
%MoveArrayContents(accumulator, result);
return result;
@@ -1222,10 +1145,7 @@ function ArrayFilter(f, receiver) {
function ArrayForEach(f, receiver) {
- if (IS_NULL_OR_UNDEFINED(this) && !IS_UNDETECTABLE(this)) {
- throw MakeTypeError("called_on_null_or_undefined",
- ["Array.prototype.forEach"]);
- }
+ CHECK_OBJECT_COERCIBLE(this, "Array.prototype.forEach");
// Pull out the length so that modifications to the length in the
// loop will not affect the looping and side effects are visible.
@@ -1237,28 +1157,18 @@ function ArrayForEach(f, receiver) {
}
if (IS_NULL_OR_UNDEFINED(receiver)) {
receiver = %GetDefaultReceiver(f) || receiver;
- } else if (!IS_SPEC_OBJECT(receiver) && %IsClassicModeFunction(f)) {
+ } else if (!IS_SPEC_OBJECT(receiver) && %IsSloppyModeFunction(f)) {
receiver = ToObject(receiver);
}
- if (%DebugCallbackSupportsStepping(f)) {
- for (var i = 0; i < length; i++) {
- if (i in array) {
- var element = array[i];
- // Prepare break slots for debugger step in.
- %DebugPrepareStepInIfStepping(f);
- %_CallFunction(receiver, element, i, array, f);
- }
+ var stepping = %_DebugCallbackSupportsStepping(f);
+ for (var i = 0; i < length; i++) {
+ if (i in array) {
+ var element = array[i];
+ // Prepare break slots for debugger step in.
+ if (stepping) %DebugPrepareStepInIfStepping(f);
+ %_CallFunction(receiver, element, i, array, f);
}
- } else {
- // This is a duplicate of the previous loop sans debug stepping.
- for (var i = 0; i < length; i++) {
- if (i in array) {
- var element = array[i];
- %_CallFunction(receiver, element, i, array, f);
- }
- }
- // End of duplicate.
}
}
@@ -1266,10 +1176,7 @@ function ArrayForEach(f, receiver) {
// Executes the function once for each element present in the
// array until it finds one where callback returns true.
function ArraySome(f, receiver) {
- if (IS_NULL_OR_UNDEFINED(this) && !IS_UNDETECTABLE(this)) {
- throw MakeTypeError("called_on_null_or_undefined",
- ["Array.prototype.some"]);
- }
+ CHECK_OBJECT_COERCIBLE(this, "Array.prototype.some");
// Pull out the length so that modifications to the length in the
// loop will not affect the looping and side effects are visible.
@@ -1281,38 +1188,25 @@ function ArraySome(f, receiver) {
}
if (IS_NULL_OR_UNDEFINED(receiver)) {
receiver = %GetDefaultReceiver(f) || receiver;
- } else if (!IS_SPEC_OBJECT(receiver) && %IsClassicModeFunction(f)) {
+ } else if (!IS_SPEC_OBJECT(receiver) && %IsSloppyModeFunction(f)) {
receiver = ToObject(receiver);
}
- if (%DebugCallbackSupportsStepping(f)) {
- for (var i = 0; i < length; i++) {
- if (i in array) {
- var element = array[i];
- // Prepare break slots for debugger step in.
- %DebugPrepareStepInIfStepping(f);
- if (%_CallFunction(receiver, element, i, array, f)) return true;
- }
- }
- } else {
- // This is a duplicate of the previous loop sans debug stepping.
- for (var i = 0; i < length; i++) {
- if (i in array) {
- var element = array[i];
- if (%_CallFunction(receiver, element, i, array, f)) return true;
- }
+ var stepping = %_DebugCallbackSupportsStepping(f);
+ for (var i = 0; i < length; i++) {
+ if (i in array) {
+ var element = array[i];
+ // Prepare break slots for debugger step in.
+ if (stepping) %DebugPrepareStepInIfStepping(f);
+ if (%_CallFunction(receiver, element, i, array, f)) return true;
}
- // End of duplicate.
}
return false;
}
function ArrayEvery(f, receiver) {
- if (IS_NULL_OR_UNDEFINED(this) && !IS_UNDETECTABLE(this)) {
- throw MakeTypeError("called_on_null_or_undefined",
- ["Array.prototype.every"]);
- }
+ CHECK_OBJECT_COERCIBLE(this, "Array.prototype.every");
// Pull out the length so that modifications to the length in the
// loop will not affect the looping and side effects are visible.
@@ -1324,37 +1218,24 @@ function ArrayEvery(f, receiver) {
}
if (IS_NULL_OR_UNDEFINED(receiver)) {
receiver = %GetDefaultReceiver(f) || receiver;
- } else if (!IS_SPEC_OBJECT(receiver) && %IsClassicModeFunction(f)) {
+ } else if (!IS_SPEC_OBJECT(receiver) && %IsSloppyModeFunction(f)) {
receiver = ToObject(receiver);
}
- if (%DebugCallbackSupportsStepping(f)) {
- for (var i = 0; i < length; i++) {
- if (i in array) {
- var element = array[i];
- // Prepare break slots for debugger step in.
- %DebugPrepareStepInIfStepping(f);
- if (!%_CallFunction(receiver, element, i, array, f)) return false;
- }
+ var stepping = %_DebugCallbackSupportsStepping(f);
+ for (var i = 0; i < length; i++) {
+ if (i in array) {
+ var element = array[i];
+ // Prepare break slots for debugger step in.
+ if (stepping) %DebugPrepareStepInIfStepping(f);
+ if (!%_CallFunction(receiver, element, i, array, f)) return false;
}
- } else {
- // This is a duplicate of the previous loop sans debug stepping.
- for (var i = 0; i < length; i++) {
- if (i in array) {
- var element = array[i];
- if (!%_CallFunction(receiver, element, i, array, f)) return false;
- }
- }
- // End of duplicate.
}
return true;
}
function ArrayMap(f, receiver) {
- if (IS_NULL_OR_UNDEFINED(this) && !IS_UNDETECTABLE(this)) {
- throw MakeTypeError("called_on_null_or_undefined",
- ["Array.prototype.map"]);
- }
+ CHECK_OBJECT_COERCIBLE(this, "Array.prototype.map");
// Pull out the length so that modifications to the length in the
// loop will not affect the looping and side effects are visible.
@@ -1366,30 +1247,20 @@ function ArrayMap(f, receiver) {
}
if (IS_NULL_OR_UNDEFINED(receiver)) {
receiver = %GetDefaultReceiver(f) || receiver;
- } else if (!IS_SPEC_OBJECT(receiver) && %IsClassicModeFunction(f)) {
+ } else if (!IS_SPEC_OBJECT(receiver) && %IsSloppyModeFunction(f)) {
receiver = ToObject(receiver);
}
var result = new $Array();
var accumulator = new InternalArray(length);
- if (%DebugCallbackSupportsStepping(f)) {
- for (var i = 0; i < length; i++) {
- if (i in array) {
- var element = array[i];
- // Prepare break slots for debugger step in.
- %DebugPrepareStepInIfStepping(f);
- accumulator[i] = %_CallFunction(receiver, element, i, array, f);
- }
- }
- } else {
- // This is a duplicate of the previous loop sans debug stepping.
- for (var i = 0; i < length; i++) {
- if (i in array) {
- var element = array[i];
- accumulator[i] = %_CallFunction(receiver, element, i, array, f);
- }
+ var stepping = %_DebugCallbackSupportsStepping(f);
+ for (var i = 0; i < length; i++) {
+ if (i in array) {
+ var element = array[i];
+ // Prepare break slots for debugger step in.
+ if (stepping) %DebugPrepareStepInIfStepping(f);
+ accumulator[i] = %_CallFunction(receiver, element, i, array, f);
}
- // End of duplicate.
}
%MoveArrayContents(accumulator, result);
return result;
@@ -1397,10 +1268,7 @@ function ArrayMap(f, receiver) {
function ArrayIndexOf(element, index) {
- if (IS_NULL_OR_UNDEFINED(this) && !IS_UNDETECTABLE(this)) {
- throw MakeTypeError("called_on_null_or_undefined",
- ["Array.prototype.indexOf"]);
- }
+ CHECK_OBJECT_COERCIBLE(this, "Array.prototype.indexOf");
var length = TO_UINT32(this.length);
if (length == 0) return -1;
@@ -1456,10 +1324,7 @@ function ArrayIndexOf(element, index) {
function ArrayLastIndexOf(element, index) {
- if (IS_NULL_OR_UNDEFINED(this) && !IS_UNDETECTABLE(this)) {
- throw MakeTypeError("called_on_null_or_undefined",
- ["Array.prototype.lastIndexOf"]);
- }
+ CHECK_OBJECT_COERCIBLE(this, "Array.prototype.lastIndexOf");
var length = TO_UINT32(this.length);
if (length == 0) return -1;
@@ -1511,10 +1376,7 @@ function ArrayLastIndexOf(element, index) {
function ArrayReduce(callback, current) {
- if (IS_NULL_OR_UNDEFINED(this) && !IS_UNDETECTABLE(this)) {
- throw MakeTypeError("called_on_null_or_undefined",
- ["Array.prototype.reduce"]);
- }
+ CHECK_OBJECT_COERCIBLE(this, "Array.prototype.reduce");
// Pull out the length so that modifications to the length in the
// loop will not affect the looping and side effects are visible.
@@ -1538,36 +1400,20 @@ function ArrayReduce(callback, current) {
}
var receiver = %GetDefaultReceiver(callback);
-
- if (%DebugCallbackSupportsStepping(callback)) {
- for (; i < length; i++) {
- if (i in array) {
- var element = array[i];
- // Prepare break slots for debugger step in.
- %DebugPrepareStepInIfStepping(callback);
- current =
- %_CallFunction(receiver, current, element, i, array, callback);
- }
+ var stepping = %_DebugCallbackSupportsStepping(callback);
+ for (; i < length; i++) {
+ if (i in array) {
+ var element = array[i];
+ // Prepare break slots for debugger step in.
+ if (stepping) %DebugPrepareStepInIfStepping(callback);
+ current = %_CallFunction(receiver, current, element, i, array, callback);
}
- } else {
- // This is a duplicate of the previous loop sans debug stepping.
- for (; i < length; i++) {
- if (i in array) {
- var element = array[i];
- current =
- %_CallFunction(receiver, current, element, i, array, callback);
- }
- }
- // End of duplicate.
}
return current;
}
function ArrayReduceRight(callback, current) {
- if (IS_NULL_OR_UNDEFINED(this) && !IS_UNDETECTABLE(this)) {
- throw MakeTypeError("called_on_null_or_undefined",
- ["Array.prototype.reduceRight"]);
- }
+ CHECK_OBJECT_COERCIBLE(this, "Array.prototype.reduceRight");
// Pull out the length so that side effects are visible before the
// callback function is checked.
@@ -1591,27 +1437,14 @@ function ArrayReduceRight(callback, current) {
}
var receiver = %GetDefaultReceiver(callback);
-
- if (%DebugCallbackSupportsStepping(callback)) {
- for (; i >= 0; i--) {
- if (i in array) {
- var element = array[i];
- // Prepare break slots for debugger step in.
- %DebugPrepareStepInIfStepping(callback);
- current =
- %_CallFunction(receiver, current, element, i, array, callback);
- }
- }
- } else {
- // This is a duplicate of the previous loop sans debug stepping.
- for (; i >= 0; i--) {
- if (i in array) {
- var element = array[i];
- current =
- %_CallFunction(receiver, current, element, i, array, callback);
- }
+ var stepping = %_DebugCallbackSupportsStepping(callback);
+ for (; i >= 0; i--) {
+ if (i in array) {
+ var element = array[i];
+ // Prepare break slots for debugger step in.
+ if (stepping) %DebugPrepareStepInIfStepping(callback);
+ current = %_CallFunction(receiver, current, element, i, array, callback);
}
- // End of duplicate.
}
return current;
}
@@ -1636,7 +1469,7 @@ function SetUpArray() {
"isArray", ArrayIsArray
));
- var specialFunctions = %SpecialArrayFunctions({});
+ var specialFunctions = %SpecialArrayFunctions();
var getFunction = function(name, jsBuiltin, len) {
var f = jsBuiltin;
@@ -1659,7 +1492,7 @@ function SetUpArray() {
"join", getFunction("join", ArrayJoin),
"pop", getFunction("pop", ArrayPop),
"push", getFunction("push", ArrayPush, 1),
- "concat", getFunction("concat", ArrayConcat, 1),
+ "concat", getFunction("concat", ArrayConcatJS, 1),
"reverse", getFunction("reverse", ArrayReverse),
"shift", getFunction("shift", ArrayShift),
"unshift", getFunction("unshift", ArrayUnshift, 1),
@@ -1683,7 +1516,7 @@ function SetUpArray() {
// exposed to user code.
// Adding only the functions that are actually used.
SetUpLockedPrototype(InternalArray, $Array(), $Array(
- "concat", getFunction("concat", ArrayConcat),
+ "concat", getFunction("concat", ArrayConcatJS),
"indexOf", getFunction("indexOf", ArrayIndexOf),
"join", getFunction("join", ArrayJoin),
"pop", getFunction("pop", ArrayPop),
diff --git a/chromium/v8/src/arraybuffer.js b/chromium/v8/src/arraybuffer.js
index cfaa8d7efca..d1324bbed0d 100644
--- a/chromium/v8/src/arraybuffer.js
+++ b/chromium/v8/src/arraybuffer.js
@@ -1,29 +1,6 @@
// Copyright 2013 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
"use strict";
@@ -40,12 +17,12 @@ function ArrayBufferConstructor(length) { // length = 1
}
}
-function ArrayBufferGetByteLength() {
+function ArrayBufferGetByteLen() {
if (!IS_ARRAYBUFFER(this)) {
throw MakeTypeError('incompatible_method_receiver',
['ArrayBuffer.prototype.byteLength', this]);
}
- return %ArrayBufferGetByteLength(this);
+ return %_ArrayBufferGetByteLength(this);
}
// ES6 Draft 15.13.5.5.3
@@ -56,14 +33,17 @@ function ArrayBufferSlice(start, end) {
}
var relativeStart = TO_INTEGER(start);
+ if (!IS_UNDEFINED(end)) {
+ end = TO_INTEGER(end);
+ }
var first;
- var byte_length = %ArrayBufferGetByteLength(this);
+ var byte_length = %_ArrayBufferGetByteLength(this);
if (relativeStart < 0) {
first = MathMax(byte_length + relativeStart, 0);
} else {
first = MathMin(relativeStart, byte_length);
}
- var relativeEnd = IS_UNDEFINED(end) ? byte_length : TO_INTEGER(end);
+ var relativeEnd = IS_UNDEFINED(end) ? byte_length : end;
var fin;
if (relativeEnd < 0) {
fin = MathMax(byte_length + relativeEnd, 0);
@@ -82,7 +62,7 @@ function ArrayBufferSlice(start, end) {
return result;
}
-function ArrayBufferIsView(obj) {
+function ArrayBufferIsViewJS(obj) {
return %ArrayBufferIsView(obj);
}
@@ -96,10 +76,10 @@ function SetUpArrayBuffer() {
// Set up the constructor property on the ArrayBuffer prototype object.
%SetProperty($ArrayBuffer.prototype, "constructor", $ArrayBuffer, DONT_ENUM);
- InstallGetter($ArrayBuffer.prototype, "byteLength", ArrayBufferGetByteLength);
+ InstallGetter($ArrayBuffer.prototype, "byteLength", ArrayBufferGetByteLen);
InstallFunctions($ArrayBuffer, DONT_ENUM, $Array(
- "isView", ArrayBufferIsView
+ "isView", ArrayBufferIsViewJS
));
InstallFunctions($ArrayBuffer.prototype, DONT_ENUM, $Array(
diff --git a/chromium/v8/src/assembler.cc b/chromium/v8/src/assembler.cc
index febae63ea1a..e8fa7287973 100644
--- a/chromium/v8/src/assembler.cc
+++ b/chromium/v8/src/assembler.cc
@@ -32,37 +32,42 @@
// modified significantly by Google Inc.
// Copyright 2012 the V8 project authors. All rights reserved.
-#include "assembler.h"
+#include "src/assembler.h"
#include <cmath>
-#include "api.h"
-#include "builtins.h"
-#include "counters.h"
-#include "cpu.h"
-#include "debug.h"
-#include "deoptimizer.h"
-#include "execution.h"
-#include "ic.h"
-#include "isolate-inl.h"
-#include "jsregexp.h"
-#include "lazy-instance.h"
-#include "platform.h"
-#include "regexp-macro-assembler.h"
-#include "regexp-stack.h"
-#include "runtime.h"
-#include "serialize.h"
-#include "store-buffer-inl.h"
-#include "stub-cache.h"
-#include "token.h"
+#include "src/api.h"
+#include "src/base/lazy-instance.h"
+#include "src/builtins.h"
+#include "src/counters.h"
+#include "src/cpu.h"
+#include "src/cpu-profiler.h"
+#include "src/debug.h"
+#include "src/deoptimizer.h"
+#include "src/execution.h"
+#include "src/ic.h"
+#include "src/isolate-inl.h"
+#include "src/jsregexp.h"
+#include "src/platform.h"
+#include "src/regexp-macro-assembler.h"
+#include "src/regexp-stack.h"
+#include "src/runtime.h"
+#include "src/serialize.h"
+#include "src/store-buffer-inl.h"
+#include "src/stub-cache.h"
+#include "src/token.h"
#if V8_TARGET_ARCH_IA32
-#include "ia32/assembler-ia32-inl.h"
+#include "src/ia32/assembler-ia32-inl.h"
#elif V8_TARGET_ARCH_X64
-#include "x64/assembler-x64-inl.h"
+#include "src/x64/assembler-x64-inl.h"
+#elif V8_TARGET_ARCH_ARM64
+#include "src/arm64/assembler-arm64-inl.h"
#elif V8_TARGET_ARCH_ARM
-#include "arm/assembler-arm-inl.h"
+#include "src/arm/assembler-arm-inl.h"
#elif V8_TARGET_ARCH_MIPS
-#include "mips/assembler-mips-inl.h"
+#include "src/mips/assembler-mips-inl.h"
+#elif V8_TARGET_ARCH_X87
+#include "src/x87/assembler-x87-inl.h"
#else
#error "Unknown architecture."
#endif
@@ -70,13 +75,17 @@
// Include native regexp-macro-assembler.
#ifndef V8_INTERPRETED_REGEXP
#if V8_TARGET_ARCH_IA32
-#include "ia32/regexp-macro-assembler-ia32.h"
+#include "src/ia32/regexp-macro-assembler-ia32.h"
#elif V8_TARGET_ARCH_X64
-#include "x64/regexp-macro-assembler-x64.h"
+#include "src/x64/regexp-macro-assembler-x64.h"
+#elif V8_TARGET_ARCH_ARM64
+#include "src/arm64/regexp-macro-assembler-arm64.h"
#elif V8_TARGET_ARCH_ARM
-#include "arm/regexp-macro-assembler-arm.h"
+#include "src/arm/regexp-macro-assembler-arm.h"
#elif V8_TARGET_ARCH_MIPS
-#include "mips/regexp-macro-assembler-mips.h"
+#include "src/mips/regexp-macro-assembler-mips.h"
+#elif V8_TARGET_ARCH_X87
+#include "src/x87/regexp-macro-assembler-x87.h"
#else // Unknown architecture.
#error "Unknown architecture."
#endif // Target architecture.
@@ -118,11 +127,12 @@ AssemblerBase::AssemblerBase(Isolate* isolate, void* buffer, int buffer_size)
jit_cookie_(0),
enabled_cpu_features_(0),
emit_debug_code_(FLAG_debug_code),
- predictable_code_size_(false) {
+ predictable_code_size_(false),
+ // We may use the assembler without an isolate.
+ serializer_enabled_(isolate && isolate->serializer_enabled()) {
if (FLAG_mask_constants_with_cookie && isolate != NULL) {
jit_cookie_ = isolate->random_number_generator()->NextInt();
}
-
if (buffer == NULL) {
// Do our own buffer management.
if (buffer_size <= kMinimalBufferSize) {
@@ -187,7 +197,7 @@ PredictableCodeSizeScope::~PredictableCodeSizeScope() {
#ifdef DEBUG
CpuFeatureScope::CpuFeatureScope(AssemblerBase* assembler, CpuFeature f)
: assembler_(assembler) {
- ASSERT(CpuFeatures::IsSafeForSnapshot(f));
+ ASSERT(CpuFeatures::IsSupported(f));
old_enabled_ = assembler_->enabled_cpu_features();
uint64_t mask = static_cast<uint64_t>(1) << f;
// TODO(svenpanne) This special case below doesn't belong here!
@@ -207,22 +217,9 @@ CpuFeatureScope::~CpuFeatureScope() {
#endif
-// -----------------------------------------------------------------------------
-// Implementation of PlatformFeatureScope
-
-PlatformFeatureScope::PlatformFeatureScope(CpuFeature f)
- : old_cross_compile_(CpuFeatures::cross_compile_) {
- // CpuFeatures is a global singleton, therefore this is only safe in
- // single threaded code.
- ASSERT(Serializer::enabled());
- uint64_t mask = static_cast<uint64_t>(1) << f;
- CpuFeatures::cross_compile_ |= mask;
-}
-
-
-PlatformFeatureScope::~PlatformFeatureScope() {
- CpuFeatures::cross_compile_ = old_cross_compile_;
-}
+bool CpuFeatures::initialized_ = false;
+unsigned CpuFeatures::supported_ = 0;
+unsigned CpuFeatures::cache_line_size_ = 0;
// -----------------------------------------------------------------------------
@@ -283,9 +280,12 @@ int Label::pos() const {
// 00 [4 bit middle_tag] 11 followed by
// 00 [6 bit pc delta]
//
-// 1101: constant pool. Used on ARM only for now.
-// The format is: 11 1101 11
-// signed int (size of the constant pool).
+// 1101: constant or veneer pool. Used only on ARM and ARM64 for now.
+// The format is: [2-bit sub-type] 1101 11
+// signed int (size of the pool).
+// The 2-bit sub-types are:
+// 00: constant pool
+// 01: veneer pool
// 1110: long_data_record
// The format is: [2-bit data_type_tag] 1110 11
// signed intptr_t, lowest byte written first
@@ -306,7 +306,9 @@ int Label::pos() const {
// dropped, and last non-zero chunk tagged with 1.)
+#ifdef DEBUG
const int kMaxStandardNonCompactModes = 14;
+#endif
const int kTagBits = 2;
const int kTagMask = (1 << kTagBits) - 1;
@@ -340,8 +342,9 @@ const int kNonstatementPositionTag = 1;
const int kStatementPositionTag = 2;
const int kCommentTag = 3;
-const int kConstPoolExtraTag = kPCJumpExtraTag - 2;
-const int kConstPoolTag = 3;
+const int kPoolExtraTag = kPCJumpExtraTag - 2;
+const int kConstPoolTag = 0;
+const int kVeneerPoolTag = 1;
uint32_t RelocInfoWriter::WriteVariableLengthPCJump(uint32_t pc_delta) {
@@ -401,8 +404,8 @@ void RelocInfoWriter::WriteExtraTaggedIntData(int data_delta, int top_tag) {
}
-void RelocInfoWriter::WriteExtraTaggedConstPoolData(int data) {
- WriteExtraTag(kConstPoolExtraTag, kConstPoolTag);
+void RelocInfoWriter::WriteExtraTaggedPoolData(int data, int pool_type) {
+ WriteExtraTag(kPoolExtraTag, pool_type);
for (int i = 0; i < kIntSize; i++) {
*--pos_ = static_cast<byte>(data);
// Signed right shift is arithmetic shift. Tested in test-utils.cc.
@@ -474,9 +477,11 @@ void RelocInfoWriter::Write(const RelocInfo* rinfo) {
WriteExtraTaggedPC(pc_delta, kPCJumpExtraTag);
WriteExtraTaggedData(rinfo->data(), kCommentTag);
ASSERT(begin_pos - pos_ >= RelocInfo::kMinRelocCommentSize);
- } else if (RelocInfo::IsConstPool(rmode)) {
+ } else if (RelocInfo::IsConstPool(rmode) || RelocInfo::IsVeneerPool(rmode)) {
WriteExtraTaggedPC(pc_delta, kPCJumpExtraTag);
- WriteExtraTaggedConstPoolData(static_cast<int>(rinfo->data()));
+ WriteExtraTaggedPoolData(static_cast<int>(rinfo->data()),
+ RelocInfo::IsConstPool(rmode) ? kConstPoolTag
+ : kVeneerPoolTag);
} else {
ASSERT(rmode > RelocInfo::LAST_COMPACT_ENUM);
int saved_mode = rmode - RelocInfo::LAST_COMPACT_ENUM;
@@ -527,7 +532,7 @@ void RelocIterator::AdvanceReadId() {
}
-void RelocIterator::AdvanceReadConstPoolData() {
+void RelocIterator::AdvanceReadPoolData() {
int x = 0;
for (int i = 0; i < kIntSize; i++) {
x |= static_cast<int>(*--pos_) << i * kBitsPerByte;
@@ -669,10 +674,13 @@ void RelocIterator::next() {
}
Advance(kIntptrSize);
}
- } else if ((extra_tag == kConstPoolExtraTag) &&
- (GetTopTag() == kConstPoolTag)) {
- if (SetMode(RelocInfo::CONST_POOL)) {
- AdvanceReadConstPoolData();
+ } else if (extra_tag == kPoolExtraTag) {
+ int pool_type = GetTopTag();
+ ASSERT(pool_type == kConstPoolTag || pool_type == kVeneerPoolTag);
+ RelocInfo::Mode rmode = (pool_type == kConstPoolTag) ?
+ RelocInfo::CONST_POOL : RelocInfo::VENEER_POOL;
+ if (SetMode(rmode)) {
+ AdvanceReadPoolData();
return;
}
Advance(kIntSize);
@@ -708,7 +716,10 @@ RelocIterator::RelocIterator(Code* code, int mode_mask) {
last_id_ = 0;
last_position_ = 0;
byte* sequence = code->FindCodeAgeSequence();
- if (sequence != NULL && !Code::IsYoungSequence(sequence)) {
+ // We get the isolate from the map, because at serialization time
+ // the code pointer has been cloned and isn't really in heap space.
+ Isolate* isolate = code->map()->GetIsolate();
+ if (sequence != NULL && !Code::IsYoungSequence(isolate, sequence)) {
code_age_sequence_ = sequence;
} else {
code_age_sequence_ = NULL;
@@ -764,12 +775,7 @@ const char* RelocInfo::RelocModeName(RelocInfo::Mode rmode) {
return "embedded object";
case RelocInfo::CONSTRUCT_CALL:
return "code target (js construct call)";
- case RelocInfo::CODE_TARGET_CONTEXT:
- return "code target (context)";
case RelocInfo::DEBUG_BREAK:
-#ifndef ENABLE_DEBUGGER_SUPPORT
- UNREACHABLE();
-#endif
return "debug break";
case RelocInfo::CODE_TARGET:
return "code target";
@@ -793,10 +799,9 @@ const char* RelocInfo::RelocModeName(RelocInfo::Mode rmode) {
return "internal reference";
case RelocInfo::CONST_POOL:
return "constant pool";
+ case RelocInfo::VENEER_POOL:
+ return "veneer pool";
case RelocInfo::DEBUG_BREAK_SLOT:
-#ifndef ENABLE_DEBUGGER_SUPPORT
- UNREACHABLE();
-#endif
return "debug break slot";
case RelocInfo::CODE_AGE_SEQUENCE:
return "code_age_sequence";
@@ -846,7 +851,7 @@ void RelocInfo::Print(Isolate* isolate, FILE* out) {
#ifdef VERIFY_HEAP
-void RelocInfo::Verify() {
+void RelocInfo::Verify(Isolate* isolate) {
switch (rmode_) {
case EMBEDDED_OBJECT:
Object::VerifyPointer(target_object());
@@ -855,12 +860,7 @@ void RelocInfo::Verify() {
Object::VerifyPointer(target_cell());
break;
case DEBUG_BREAK:
-#ifndef ENABLE_DEBUGGER_SUPPORT
- UNREACHABLE();
- break;
-#endif
case CONSTRUCT_CALL:
- case CODE_TARGET_CONTEXT:
case CODE_TARGET_WITH_ID:
case CODE_TARGET: {
// convert inline target address to code object
@@ -868,7 +868,7 @@ void RelocInfo::Verify() {
CHECK(addr != NULL);
// Check that we can find the right code object.
Code* code = Code::GetCodeFromTargetAddress(addr);
- Object* found = code->GetIsolate()->FindCodeObject(addr);
+ Object* found = isolate->FindCodeObject(addr);
CHECK(found->IsCode());
CHECK(code->address() == HeapObject::cast(found)->address());
break;
@@ -881,6 +881,7 @@ void RelocInfo::Verify() {
case EXTERNAL_REFERENCE:
case INTERNAL_REFERENCE:
case CONST_POOL:
+ case VENEER_POOL:
case DEBUG_BREAK_SLOT:
case NONE32:
case NONE64:
@@ -889,7 +890,7 @@ void RelocInfo::Verify() {
UNREACHABLE();
break;
case CODE_AGE_SEQUENCE:
- ASSERT(Code::IsYoungSequence(pc_) || code_age_stub()->IsCode());
+ ASSERT(Code::IsYoungSequence(isolate, pc_) || code_age_stub()->IsCode());
break;
}
}
@@ -936,7 +937,7 @@ void ExternalReference::InitializeMathExpData() {
// The rest is black magic. Do not attempt to understand it. It is
// loosely based on the "expd" function published at:
// http://herumi.blogspot.com/2011/08/fast-double-precision-exponential.html
- const double constant3 = (1 << kTableSizeBits) / log(2.0);
+ const double constant3 = (1 << kTableSizeBits) / std::log(2.0);
math_exp_constants_array[3] = constant3;
math_exp_constants_array[4] =
static_cast<double>(static_cast<int64_t>(3) << 51);
@@ -947,7 +948,7 @@ void ExternalReference::InitializeMathExpData() {
math_exp_log_table_array = new double[kTableSize];
for (int i = 0; i < kTableSize; i++) {
- double value = pow(2, i / kTableSizeDouble);
+ double value = std::pow(2, i / kTableSizeDouble);
uint64_t bits = BitCast<uint64_t, double>(value);
bits &= (static_cast<uint64_t>(1) << 52) - 1;
double mantissa = BitCast<double, uint64_t>(bits);
@@ -1000,11 +1001,6 @@ ExternalReference::ExternalReference(const IC_Utility& ic_utility,
Isolate* isolate)
: address_(Redirect(isolate, ic_utility.address())) {}
-#ifdef ENABLE_DEBUGGER_SUPPORT
-ExternalReference::ExternalReference(const Debug_Address& debug_address,
- Isolate* isolate)
- : address_(debug_address.address(isolate)) {}
-#endif
ExternalReference::ExternalReference(StatsCounter* counter)
: address_(reinterpret_cast<Address>(counter->GetInternalPointer())) {}
@@ -1027,14 +1023,6 @@ ExternalReference ExternalReference::
ExternalReference ExternalReference::
- incremental_evacuation_record_write_function(Isolate* isolate) {
- return ExternalReference(Redirect(
- isolate,
- FUNCTION_ADDR(IncrementalMarking::RecordWriteForEvacuationFromCode)));
-}
-
-
-ExternalReference ExternalReference::
store_buffer_overflow_function(Isolate* isolate) {
return ExternalReference(Redirect(
isolate,
@@ -1047,12 +1035,6 @@ ExternalReference ExternalReference::flush_icache_function(Isolate* isolate) {
}
-ExternalReference ExternalReference::perform_gc_function(Isolate* isolate) {
- return
- ExternalReference(Redirect(isolate, FUNCTION_ADDR(Runtime::PerformGC)));
-}
-
-
ExternalReference ExternalReference::delete_handle_scope_extensions(
Isolate* isolate) {
return ExternalReference(Redirect(
@@ -1091,13 +1073,6 @@ ExternalReference ExternalReference::stress_deopt_count(Isolate* isolate) {
}
-ExternalReference ExternalReference::transcendental_cache_array_address(
- Isolate* isolate) {
- return ExternalReference(
- isolate->transcendental_cache()->cache_array_address());
-}
-
-
ExternalReference ExternalReference::new_deoptimizer_function(
Isolate* isolate) {
return ExternalReference(
@@ -1229,13 +1204,6 @@ ExternalReference ExternalReference::old_data_space_allocation_limit_address(
}
-ExternalReference ExternalReference::
- new_space_high_promotion_mode_active_address(Isolate* isolate) {
- return ExternalReference(
- isolate->heap()->NewSpaceHighPromotionModeActiveAddress());
-}
-
-
ExternalReference ExternalReference::handle_scope_level_address(
Isolate* isolate) {
return ExternalReference(HandleScope::current_level_address(isolate));
@@ -1335,6 +1303,30 @@ ExternalReference ExternalReference::address_of_uint32_bias() {
}
+ExternalReference ExternalReference::is_profiling_address(Isolate* isolate) {
+ return ExternalReference(isolate->cpu_profiler()->is_profiling_address());
+}
+
+
+ExternalReference ExternalReference::invoke_function_callback(
+ Isolate* isolate) {
+ Address thunk_address = FUNCTION_ADDR(&InvokeFunctionCallback);
+ ExternalReference::Type thunk_type = ExternalReference::PROFILING_API_CALL;
+ ApiFunction thunk_fun(thunk_address);
+ return ExternalReference(&thunk_fun, thunk_type, isolate);
+}
+
+
+ExternalReference ExternalReference::invoke_accessor_getter_callback(
+ Isolate* isolate) {
+ Address thunk_address = FUNCTION_ADDR(&InvokeAccessorGetterCallback);
+ ExternalReference::Type thunk_type =
+ ExternalReference::PROFILING_GETTER_CALL;
+ ApiFunction thunk_fun(thunk_address);
+ return ExternalReference(&thunk_fun, thunk_type, isolate);
+}
+
+
#ifndef V8_INTERPRETED_REGEXP
ExternalReference ExternalReference::re_check_stack_guard_state(
@@ -1344,10 +1336,14 @@ ExternalReference ExternalReference::re_check_stack_guard_state(
function = FUNCTION_ADDR(RegExpMacroAssemblerX64::CheckStackGuardState);
#elif V8_TARGET_ARCH_IA32
function = FUNCTION_ADDR(RegExpMacroAssemblerIA32::CheckStackGuardState);
+#elif V8_TARGET_ARCH_ARM64
+ function = FUNCTION_ADDR(RegExpMacroAssemblerARM64::CheckStackGuardState);
#elif V8_TARGET_ARCH_ARM
function = FUNCTION_ADDR(RegExpMacroAssemblerARM::CheckStackGuardState);
#elif V8_TARGET_ARCH_MIPS
function = FUNCTION_ADDR(RegExpMacroAssemblerMIPS::CheckStackGuardState);
+#elif V8_TARGET_ARCH_X87
+ function = FUNCTION_ADDR(RegExpMacroAssemblerX87::CheckStackGuardState);
#else
UNREACHABLE();
#endif
@@ -1393,79 +1389,11 @@ ExternalReference ExternalReference::address_of_regexp_stack_memory_size(
#endif // V8_INTERPRETED_REGEXP
-static double add_two_doubles(double x, double y) {
- return x + y;
-}
-
-
-static double sub_two_doubles(double x, double y) {
- return x - y;
-}
-
-
-static double mul_two_doubles(double x, double y) {
- return x * y;
-}
-
-
-static double div_two_doubles(double x, double y) {
- return x / y;
-}
-
-
-static double mod_two_doubles(double x, double y) {
- return modulo(x, y);
-}
-
-
-static double math_sin_double(double x) {
- return sin(x);
-}
-
-
-static double math_cos_double(double x) {
- return cos(x);
-}
-
-
-static double math_tan_double(double x) {
- return tan(x);
-}
-
-
-static double math_log_double(double x) {
- return log(x);
-}
-
-
-ExternalReference ExternalReference::math_sin_double_function(
- Isolate* isolate) {
- return ExternalReference(Redirect(isolate,
- FUNCTION_ADDR(math_sin_double),
- BUILTIN_FP_CALL));
-}
-
-
-ExternalReference ExternalReference::math_cos_double_function(
- Isolate* isolate) {
- return ExternalReference(Redirect(isolate,
- FUNCTION_ADDR(math_cos_double),
- BUILTIN_FP_CALL));
-}
-
-
-ExternalReference ExternalReference::math_tan_double_function(
- Isolate* isolate) {
- return ExternalReference(Redirect(isolate,
- FUNCTION_ADDR(math_tan_double),
- BUILTIN_FP_CALL));
-}
-
-
ExternalReference ExternalReference::math_log_double_function(
Isolate* isolate) {
+ typedef double (*d2d)(double x);
return ExternalReference(Redirect(isolate,
- FUNCTION_ADDR(math_log_double),
+ FUNCTION_ADDR(static_cast<d2d>(std::log)),
BUILTIN_FP_CALL));
}
@@ -1494,6 +1422,26 @@ ExternalReference ExternalReference::ForDeoptEntry(Address entry) {
}
+ExternalReference ExternalReference::cpu_features() {
+ ASSERT(CpuFeatures::initialized_);
+ return ExternalReference(&CpuFeatures::supported_);
+}
+
+
+ExternalReference ExternalReference::debug_after_break_target_address(
+ Isolate* isolate) {
+ return ExternalReference(isolate->debug()->after_break_target_address());
+}
+
+
+ExternalReference
+ ExternalReference::debug_restarter_frame_function_pointer_address(
+ Isolate* isolate) {
+ return ExternalReference(
+ isolate->debug()->restarter_frame_function_pointer_address());
+}
+
+
double power_helper(double x, double y) {
int y_int = static_cast<int>(y);
if (y == y_int) {
@@ -1536,12 +1484,16 @@ double power_double_double(double x, double y) {
// special cases that are different.
if ((x == 0.0 || std::isinf(x)) && std::isfinite(y)) {
double f;
- if (modf(y, &f) != 0.0) return ((x == 0.0) ^ (y > 0)) ? V8_INFINITY : 0;
+ if (std::modf(y, &f) != 0.0) {
+ return ((x == 0.0) ^ (y > 0)) ? V8_INFINITY : 0;
+ }
}
if (x == 2.0) {
int y_int = static_cast<int>(y);
- if (y == y_int) return ldexp(1.0, y_int);
+ if (y == y_int) {
+ return std::ldexp(1.0, y_int);
+ }
}
#endif
@@ -1550,7 +1502,7 @@ double power_double_double(double x, double y) {
if (std::isnan(y) || ((x == 1 || x == -1) && std::isinf(y))) {
return OS::nan_value();
}
- return pow(x, y);
+ return std::pow(x, y);
}
@@ -1570,12 +1522,6 @@ ExternalReference ExternalReference::power_double_int_function(
}
-static int native_compare_doubles(double y, double x) {
- if (x == y) return EQUAL;
- return x < y ? LESS : GREATER;
-}
-
-
bool EvalComparison(Token::Value op, double op1, double op2) {
ASSERT(Token::IsCompareOp(op));
switch (op) {
@@ -1593,43 +1539,14 @@ bool EvalComparison(Token::Value op, double op1, double op2) {
}
-ExternalReference ExternalReference::double_fp_operation(
- Token::Value operation, Isolate* isolate) {
- typedef double BinaryFPOperation(double x, double y);
- BinaryFPOperation* function = NULL;
- switch (operation) {
- case Token::ADD:
- function = &add_two_doubles;
- break;
- case Token::SUB:
- function = &sub_two_doubles;
- break;
- case Token::MUL:
- function = &mul_two_doubles;
- break;
- case Token::DIV:
- function = &div_two_doubles;
- break;
- case Token::MOD:
- function = &mod_two_doubles;
- break;
- default:
- UNREACHABLE();
- }
+ExternalReference ExternalReference::mod_two_doubles_operation(
+ Isolate* isolate) {
return ExternalReference(Redirect(isolate,
- FUNCTION_ADDR(function),
+ FUNCTION_ADDR(modulo),
BUILTIN_FP_FP_CALL));
}
-ExternalReference ExternalReference::compare_doubles(Isolate* isolate) {
- return ExternalReference(Redirect(isolate,
- FUNCTION_ADDR(native_compare_doubles),
- BUILTIN_COMPARE_CALL));
-}
-
-
-#ifdef ENABLE_DEBUGGER_SUPPORT
ExternalReference ExternalReference::debug_break(Isolate* isolate) {
return ExternalReference(Redirect(isolate, FUNCTION_ADDR(Debug_Break)));
}
@@ -1639,7 +1556,6 @@ ExternalReference ExternalReference::debug_step_in_fp_address(
Isolate* isolate) {
return ExternalReference(isolate->debug()->step_in_fp_addr());
}
-#endif
void PositionsRecorder::RecordPosition(int pos) {
@@ -1702,4 +1618,38 @@ bool PositionsRecorder::WriteRecordedPositions() {
return written;
}
+
+MultiplierAndShift::MultiplierAndShift(int32_t d) {
+ ASSERT(d <= -2 || 2 <= d);
+ const uint32_t two31 = 0x80000000;
+ uint32_t ad = Abs(d);
+ uint32_t t = two31 + (uint32_t(d) >> 31);
+ uint32_t anc = t - 1 - t % ad; // Absolute value of nc.
+ int32_t p = 31; // Init. p.
+ uint32_t q1 = two31 / anc; // Init. q1 = 2**p/|nc|.
+ uint32_t r1 = two31 - q1 * anc; // Init. r1 = rem(2**p, |nc|).
+ uint32_t q2 = two31 / ad; // Init. q2 = 2**p/|d|.
+ uint32_t r2 = two31 - q2 * ad; // Init. r2 = rem(2**p, |d|).
+ uint32_t delta;
+ do {
+ p++;
+ q1 *= 2; // Update q1 = 2**p/|nc|.
+ r1 *= 2; // Update r1 = rem(2**p, |nc|).
+ if (r1 >= anc) { // Must be an unsigned comparison here.
+ q1++;
+ r1 = r1 - anc;
+ }
+ q2 *= 2; // Update q2 = 2**p/|d|.
+ r2 *= 2; // Update r2 = rem(2**p, |d|).
+ if (r2 >= ad) { // Must be an unsigned comparison here.
+ q2++;
+ r2 = r2 - ad;
+ }
+ delta = ad - r2;
+ } while (q1 < delta || (q1 == delta && r1 == 0));
+ int32_t mul = static_cast<int32_t>(q2 + 1);
+ multiplier_ = (d < 0) ? -mul : mul;
+ shift_ = p - 32;
+}
+
} } // namespace v8::internal
diff --git a/chromium/v8/src/assembler.h b/chromium/v8/src/assembler.h
index 0c706c450b0..bbca79374a9 100644
--- a/chromium/v8/src/assembler.h
+++ b/chromium/v8/src/assembler.h
@@ -35,14 +35,14 @@
#ifndef V8_ASSEMBLER_H_
#define V8_ASSEMBLER_H_
-#include "v8.h"
+#include "src/v8.h"
-#include "allocation.h"
-#include "builtins.h"
-#include "gdb-jit.h"
-#include "isolate.h"
-#include "runtime.h"
-#include "token.h"
+#include "src/allocation.h"
+#include "src/builtins.h"
+#include "src/gdb-jit.h"
+#include "src/isolate.h"
+#include "src/runtime.h"
+#include "src/token.h"
namespace v8 {
@@ -65,6 +65,8 @@ class AssemblerBase: public Malloced {
bool emit_debug_code() const { return emit_debug_code_; }
void set_emit_debug_code(bool value) { emit_debug_code_ = value; }
+ bool serializer_enabled() const { return serializer_enabled_; }
+
bool predictable_code_size() const { return predictable_code_size_; }
void set_predictable_code_size(bool value) { predictable_code_size_ = value; }
@@ -82,6 +84,10 @@ class AssemblerBase: public Malloced {
int pc_offset() const { return static_cast<int>(pc_ - buffer_); }
+ // This function is called when code generation is aborted, so that
+ // the assembler could clean up internal data structures.
+ virtual void AbortedCodeGeneration() { }
+
static const int kMinimalBufferSize = 4*KB;
protected:
@@ -100,6 +106,23 @@ class AssemblerBase: public Malloced {
uint64_t enabled_cpu_features_;
bool emit_debug_code_;
bool predictable_code_size_;
+ bool serializer_enabled_;
+};
+
+
+// Avoids emitting debug code during the lifetime of this scope object.
+class DontEmitDebugCodeScope BASE_EMBEDDED {
+ public:
+ explicit DontEmitDebugCodeScope(AssemblerBase* assembler)
+ : assembler_(assembler), old_value_(assembler->emit_debug_code()) {
+ assembler_->set_emit_debug_code(false);
+ }
+ ~DontEmitDebugCodeScope() {
+ assembler_->set_emit_debug_code(old_value_);
+ }
+ private:
+ AssemblerBase* assembler_;
+ bool old_value_;
};
@@ -134,15 +157,47 @@ class CpuFeatureScope BASE_EMBEDDED {
};
-// Enable a unsupported feature within a scope for cross-compiling for a
-// different CPU.
-class PlatformFeatureScope BASE_EMBEDDED {
+// CpuFeatures keeps track of which features are supported by the target CPU.
+// Supported features must be enabled by a CpuFeatureScope before use.
+// Example:
+// if (assembler->IsSupported(SSE3)) {
+// CpuFeatureScope fscope(assembler, SSE3);
+// // Generate code containing SSE3 instructions.
+// } else {
+// // Generate alternative code.
+// }
+class CpuFeatures : public AllStatic {
public:
- explicit PlatformFeatureScope(CpuFeature f);
- ~PlatformFeatureScope();
+ static void Probe(bool cross_compile) {
+ STATIC_ASSERT(NUMBER_OF_CPU_FEATURES <= kBitsPerInt);
+ if (initialized_) return;
+ initialized_ = true;
+ ProbeImpl(cross_compile);
+ }
+
+ static bool IsSupported(CpuFeature f) {
+ return (supported_ & (1u << f)) != 0;
+ }
+
+ static inline bool SupportsCrankshaft();
+
+ static inline unsigned cache_line_size() {
+ ASSERT(cache_line_size_ != 0);
+ return cache_line_size_;
+ }
+
+ static void PrintTarget();
+ static void PrintFeatures();
private:
- uint64_t old_cross_compile_;
+ // Platform-dependent implementation.
+ static void ProbeImpl(bool cross_compile);
+
+ static unsigned supported_;
+ static unsigned cache_line_size_;
+ static bool initialized_;
+ friend class ExternalReference;
+ DISALLOW_COPY_AND_ASSIGN(CpuFeatures);
};
@@ -210,11 +265,23 @@ class Label BASE_EMBEDDED {
friend class Assembler;
friend class Displacement;
friend class RegExpMacroAssemblerIrregexp;
+
+#if V8_TARGET_ARCH_ARM64
+ // On ARM64, the Assembler keeps track of pointers to Labels to resolve
+ // branches to distant targets. Copying labels would confuse the Assembler.
+ DISALLOW_COPY_AND_ASSIGN(Label); // NOLINT
+#endif
};
enum SaveFPRegsMode { kDontSaveFPRegs, kSaveFPRegs };
+// Specifies whether to perform icache flush operations on RelocInfo updates.
+// If FLUSH_ICACHE_IF_NEEDED, the icache will always be flushed if an
+// instruction was modified. If SKIP_ICACHE_FLUSH the flush will always be
+// skipped (only use this if you will flush the icache manually before it is
+// executed).
+enum ICacheFlushMode { FLUSH_ICACHE_IF_NEEDED, SKIP_ICACHE_FLUSH };
// -----------------------------------------------------------------------------
// Relocation information
@@ -227,7 +294,7 @@ enum SaveFPRegsMode { kDontSaveFPRegs, kSaveFPRegs };
// describe a property of the datum. Such rmodes are useful for GC
// and nice disassembly output.
-class RelocInfo BASE_EMBEDDED {
+class RelocInfo {
public:
// The constant kNoPosition is used with the collecting of source positions
// in the relocation information. Two types of source positions are collected
@@ -262,7 +329,6 @@ class RelocInfo BASE_EMBEDDED {
CODE_TARGET, // Code target which is not any of the above.
CODE_TARGET_WITH_ID,
CONSTRUCT_CALL, // code target that is a call to a JavaScript constructor.
- CODE_TARGET_CONTEXT, // Code target used for contextual loads and stores.
DEBUG_BREAK, // Code target for the debugger statement.
EMBEDDED_OBJECT,
CELL,
@@ -277,9 +343,10 @@ class RelocInfo BASE_EMBEDDED {
EXTERNAL_REFERENCE, // The address of an external C++ function.
INTERNAL_REFERENCE, // An address inside the same function.
- // Marks a constant pool. Only used on ARM.
- // It uses a custom noncompact encoding.
+ // Marks constant and veneer pools. Only used on ARM and ARM64.
+ // They use a custom noncompact encoding.
CONST_POOL,
+ VENEER_POOL,
// add more as needed
// Pseudo-types
@@ -289,7 +356,7 @@ class RelocInfo BASE_EMBEDDED {
CODE_AGE_SEQUENCE, // Not stored in RelocInfo array, used explictly by
// code aging.
FIRST_REAL_RELOC_MODE = CODE_TARGET,
- LAST_REAL_RELOC_MODE = CONST_POOL,
+ LAST_REAL_RELOC_MODE = VENEER_POOL,
FIRST_PSEUDO_RELOC_MODE = CODE_AGE_SEQUENCE,
LAST_PSEUDO_RELOC_MODE = CODE_AGE_SEQUENCE,
LAST_CODE_ENUM = DEBUG_BREAK,
@@ -299,7 +366,6 @@ class RelocInfo BASE_EMBEDDED {
LAST_STANDARD_NONCOMPACT_ENUM = INTERNAL_REFERENCE
};
-
RelocInfo() {}
RelocInfo(byte* pc, Mode rmode, intptr_t data, Code* host)
@@ -343,6 +409,9 @@ class RelocInfo BASE_EMBEDDED {
static inline bool IsConstPool(Mode mode) {
return mode == CONST_POOL;
}
+ static inline bool IsVeneerPool(Mode mode) {
+ return mode == VENEER_POOL;
+ }
static inline bool IsPosition(Mode mode) {
return mode == POSITION || mode == STATEMENT_POSITION;
}
@@ -366,6 +435,15 @@ class RelocInfo BASE_EMBEDDED {
}
static inline int ModeMask(Mode mode) { return 1 << mode; }
+ // Returns true if the first RelocInfo has the same mode and raw data as the
+ // second one.
+ static inline bool IsEqual(RelocInfo first, RelocInfo second) {
+ return first.rmode() == second.rmode() &&
+ (first.rmode() == RelocInfo::NONE64 ?
+ first.raw_data64() == second.raw_data64() :
+ first.data() == second.data());
+ }
+
// Accessors
byte* pc() const { return pc_; }
void set_pc(byte* pc) { pc_ = pc; }
@@ -376,36 +454,60 @@ class RelocInfo BASE_EMBEDDED {
return BitCast<uint64_t>(data64_);
}
Code* host() const { return host_; }
+ void set_host(Code* host) { host_ = host; }
// Apply a relocation by delta bytes
- INLINE(void apply(intptr_t delta));
+ INLINE(void apply(intptr_t delta,
+ ICacheFlushMode icache_flush_mode =
+ FLUSH_ICACHE_IF_NEEDED));
// Is the pointer this relocation info refers to coded like a plain pointer
// or is it strange in some way (e.g. relative or patched into a series of
// instructions).
bool IsCodedSpecially();
+ // If true, the pointer this relocation info refers to is an entry in the
+ // constant pool, otherwise the pointer is embedded in the instruction stream.
+ bool IsInConstantPool();
+
// Read/modify the code target in the branch/call instruction
// this relocation applies to;
// can only be called if IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_)
INLINE(Address target_address());
INLINE(void set_target_address(Address target,
- WriteBarrierMode mode = UPDATE_WRITE_BARRIER));
+ WriteBarrierMode write_barrier_mode =
+ UPDATE_WRITE_BARRIER,
+ ICacheFlushMode icache_flush_mode =
+ FLUSH_ICACHE_IF_NEEDED));
INLINE(Object* target_object());
INLINE(Handle<Object> target_object_handle(Assembler* origin));
INLINE(void set_target_object(Object* target,
- WriteBarrierMode mode = UPDATE_WRITE_BARRIER));
+ WriteBarrierMode write_barrier_mode =
+ UPDATE_WRITE_BARRIER,
+ ICacheFlushMode icache_flush_mode =
+ FLUSH_ICACHE_IF_NEEDED));
INLINE(Address target_runtime_entry(Assembler* origin));
INLINE(void set_target_runtime_entry(Address target,
- WriteBarrierMode mode =
- UPDATE_WRITE_BARRIER));
+ WriteBarrierMode write_barrier_mode =
+ UPDATE_WRITE_BARRIER,
+ ICacheFlushMode icache_flush_mode =
+ FLUSH_ICACHE_IF_NEEDED));
INLINE(Cell* target_cell());
INLINE(Handle<Cell> target_cell_handle());
INLINE(void set_target_cell(Cell* cell,
- WriteBarrierMode mode = UPDATE_WRITE_BARRIER));
+ WriteBarrierMode write_barrier_mode =
+ UPDATE_WRITE_BARRIER,
+ ICacheFlushMode icache_flush_mode =
+ FLUSH_ICACHE_IF_NEEDED));
INLINE(Handle<Object> code_age_stub_handle(Assembler* origin));
INLINE(Code* code_age_stub());
- INLINE(void set_code_age_stub(Code* stub));
+ INLINE(void set_code_age_stub(Code* stub,
+ ICacheFlushMode icache_flush_mode =
+ FLUSH_ICACHE_IF_NEEDED));
+
+ // Returns the address of the constant pool entry where the target address
+ // is held. This should only be called if IsInConstantPool returns true.
+ INLINE(Address constant_pool_entry_address());
// Read the address of the word containing the target_address in an
// instruction stream. What this means exactly is architecture-independent.
@@ -414,6 +516,7 @@ class RelocInfo BASE_EMBEDDED {
// output before the next target. Architecture-independent code shouldn't
// dereference the pointer it gets back from this.
INLINE(Address target_address_address());
+
// This indicates how much space a target takes up when deserializing a code
// stream. For most architectures this is just the size of a pointer. For
// an instruction like movw/movt where the target bits are mixed into the
@@ -471,7 +574,7 @@ class RelocInfo BASE_EMBEDDED {
void Print(Isolate* isolate, FILE* out);
#endif // ENABLE_DISASSEMBLER
#ifdef VERIFY_HEAP
- void Verify();
+ void Verify(Isolate* isolate);
#endif
static const int kCodeTargetMask = (1 << (LAST_CODE_ENUM + 1)) - 1;
@@ -538,7 +641,7 @@ class RelocInfoWriter BASE_EMBEDDED {
inline void WriteTaggedPC(uint32_t pc_delta, int tag);
inline void WriteExtraTaggedPC(uint32_t pc_delta, int extra_tag);
inline void WriteExtraTaggedIntData(int data_delta, int top_tag);
- inline void WriteExtraTaggedConstPoolData(int data);
+ inline void WriteExtraTaggedPoolData(int data, int pool_type);
inline void WriteExtraTaggedData(intptr_t data_delta, int top_tag);
inline void WriteTaggedData(intptr_t data_delta, int tag);
inline void WriteExtraTag(int extra_tag, int top_tag);
@@ -589,7 +692,7 @@ class RelocIterator: public Malloced {
void ReadTaggedPC();
void AdvanceReadPC();
void AdvanceReadId();
- void AdvanceReadConstPoolData();
+ void AdvanceReadPoolData();
void AdvanceReadPosition();
void AdvanceReadData();
void AdvanceReadVariableLengthPCJump();
@@ -621,9 +724,7 @@ class RelocIterator: public Malloced {
//----------------------------------------------------------------------------
class IC_Utility;
class SCTableReference;
-#ifdef ENABLE_DEBUGGER_SUPPORT
class Debug_Address;
-#endif
// An ExternalReference represents a C++ address used in the generated
@@ -636,7 +737,7 @@ class ExternalReference BASE_EMBEDDED {
// Used in the simulator to support different native api calls.
enum Type {
// Builtin call.
- // MaybeObject* f(v8::internal::Arguments).
+ // Object* f(v8::internal::Arguments).
BUILTIN_CALL, // default
// Builtin that takes float arguments and returns an int.
@@ -693,10 +794,6 @@ class ExternalReference BASE_EMBEDDED {
ExternalReference(const IC_Utility& ic_utility, Isolate* isolate);
-#ifdef ENABLE_DEBUGGER_SUPPORT
- ExternalReference(const Debug_Address& debug_address, Isolate* isolate);
-#endif
-
explicit ExternalReference(StatsCounter* counter);
ExternalReference(Isolate::AddressId id, Isolate* isolate);
@@ -712,13 +809,9 @@ class ExternalReference BASE_EMBEDDED {
static ExternalReference incremental_marking_record_write_function(
Isolate* isolate);
- static ExternalReference incremental_evacuation_record_write_function(
- Isolate* isolate);
static ExternalReference store_buffer_overflow_function(
Isolate* isolate);
static ExternalReference flush_icache_function(Isolate* isolate);
- static ExternalReference perform_gc_function(Isolate* isolate);
- static ExternalReference transcendental_cache_array_address(Isolate* isolate);
static ExternalReference delete_handle_scope_extensions(Isolate* isolate);
static ExternalReference get_date_field_function(Isolate* isolate);
@@ -781,12 +874,8 @@ class ExternalReference BASE_EMBEDDED {
Isolate* isolate);
static ExternalReference old_data_space_allocation_limit_address(
Isolate* isolate);
- static ExternalReference new_space_high_promotion_mode_active_address(
- Isolate* isolate);
- static ExternalReference double_fp_operation(Token::Value operation,
- Isolate* isolate);
- static ExternalReference compare_doubles(Isolate* isolate);
+ static ExternalReference mod_two_doubles_operation(Isolate* isolate);
static ExternalReference power_double_double_function(Isolate* isolate);
static ExternalReference power_double_int_function(Isolate* isolate);
@@ -811,9 +900,6 @@ class ExternalReference BASE_EMBEDDED {
static ExternalReference address_of_the_hole_nan();
static ExternalReference address_of_uint32_bias();
- static ExternalReference math_sin_double_function(Isolate* isolate);
- static ExternalReference math_cos_double_function(Isolate* isolate);
- static ExternalReference math_tan_double_function(Isolate* isolate);
static ExternalReference math_log_double_function(Isolate* isolate);
static ExternalReference math_exp_constants(int constant_index);
@@ -825,15 +911,21 @@ class ExternalReference BASE_EMBEDDED {
static ExternalReference cpu_features();
+ static ExternalReference debug_after_break_target_address(Isolate* isolate);
+ static ExternalReference debug_restarter_frame_function_pointer_address(
+ Isolate* isolate);
+
+ static ExternalReference is_profiling_address(Isolate* isolate);
+ static ExternalReference invoke_function_callback(Isolate* isolate);
+ static ExternalReference invoke_accessor_getter_callback(Isolate* isolate);
+
Address address() const { return reinterpret_cast<Address>(address_); }
-#ifdef ENABLE_DEBUGGER_SUPPORT
// Function Debug::Break()
static ExternalReference debug_break(Isolate* isolate);
// Used to check if single stepping is enabled in generated code.
static ExternalReference debug_step_in_fp_address(Isolate* isolate);
-#endif
#ifndef V8_INTERPRETED_REGEXP
// C functions called from RegExp generated code.
@@ -1009,32 +1101,6 @@ class PreservePositionScope BASE_EMBEDDED {
// -----------------------------------------------------------------------------
// Utility functions
-inline bool is_intn(int x, int n) {
- return -(1 << (n-1)) <= x && x < (1 << (n-1));
-}
-
-inline bool is_int8(int x) { return is_intn(x, 8); }
-inline bool is_int16(int x) { return is_intn(x, 16); }
-inline bool is_int18(int x) { return is_intn(x, 18); }
-inline bool is_int24(int x) { return is_intn(x, 24); }
-
-inline bool is_uintn(int x, int n) {
- return (x & -(1 << n)) == 0;
-}
-
-inline bool is_uint2(int x) { return is_uintn(x, 2); }
-inline bool is_uint3(int x) { return is_uintn(x, 3); }
-inline bool is_uint4(int x) { return is_uintn(x, 4); }
-inline bool is_uint5(int x) { return is_uintn(x, 5); }
-inline bool is_uint6(int x) { return is_uintn(x, 6); }
-inline bool is_uint8(int x) { return is_uintn(x, 8); }
-inline bool is_uint10(int x) { return is_uintn(x, 10); }
-inline bool is_uint12(int x) { return is_uintn(x, 12); }
-inline bool is_uint16(int x) { return is_uintn(x, 16); }
-inline bool is_uint24(int x) { return is_uintn(x, 24); }
-inline bool is_uint26(int x) { return is_uintn(x, 26); }
-inline bool is_uint28(int x) { return is_uintn(x, 28); }
-
inline int NumberOfBitsSet(uint32_t x) {
unsigned int num_bits_set;
for (num_bits_set = 0; x; x >>= 1) {
@@ -1072,6 +1138,21 @@ class NullCallWrapper : public CallWrapper {
virtual void AfterCall() const { }
};
+
+// The multiplier and shift for signed division via multiplication, see Warren's
+// "Hacker's Delight", chapter 10.
+class MultiplierAndShift {
+ public:
+ explicit MultiplierAndShift(int32_t d);
+ int32_t multiplier() const { return multiplier_; }
+ int32_t shift() const { return shift_; }
+
+ private:
+ int32_t multiplier_;
+ int32_t shift_;
+};
+
+
} } // namespace v8::internal
#endif // V8_ASSEMBLER_H_
diff --git a/chromium/v8/src/assert-scope.cc b/chromium/v8/src/assert-scope.cc
new file mode 100644
index 00000000000..c4aa9877d45
--- /dev/null
+++ b/chromium/v8/src/assert-scope.cc
@@ -0,0 +1,21 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+
+#include "src/assert-scope.h"
+#include "src/v8.h"
+
+namespace v8 {
+namespace internal {
+
+uint32_t PerIsolateAssertBase::GetData(Isolate* isolate) {
+ return isolate->per_isolate_assert_data();
+}
+
+
+void PerIsolateAssertBase::SetData(Isolate* isolate, uint32_t data) {
+ isolate->set_per_isolate_assert_data(data);
+}
+
+} } // namespace v8::internal
diff --git a/chromium/v8/src/assert-scope.h b/chromium/v8/src/assert-scope.h
index 269b280d027..14e1194ba5c 100644
--- a/chromium/v8/src/assert-scope.h
+++ b/chromium/v8/src/assert-scope.h
@@ -1,35 +1,13 @@
// Copyright 2013 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
#ifndef V8_ASSERT_SCOPE_H_
#define V8_ASSERT_SCOPE_H_
-#include "allocation.h"
-#include "platform.h"
+#include "src/allocation.h"
+#include "src/platform.h"
+#include "src/utils.h"
namespace v8 {
namespace internal {
@@ -46,7 +24,14 @@ enum PerThreadAssertType {
};
-#ifdef DEBUG
+enum PerIsolateAssertType {
+ JAVASCRIPT_EXECUTION_ASSERT,
+ JAVASCRIPT_EXECUTION_THROWS,
+ ALLOCATION_FAILURE_ASSERT,
+ DEOPTIMIZATION_ASSERT
+};
+
+
class PerThreadAssertData {
public:
PerThreadAssertData() : nesting_level_(0) {
@@ -72,12 +57,9 @@ class PerThreadAssertData {
DISALLOW_COPY_AND_ASSIGN(PerThreadAssertData);
};
-#endif // DEBUG
class PerThreadAssertScopeBase {
-#ifdef DEBUG
-
protected:
PerThreadAssertScopeBase() {
data_ = GetAssertData();
@@ -110,18 +92,12 @@ class PerThreadAssertScopeBase {
static void SetThreadLocalData(PerThreadAssertData* data) {
Thread::SetThreadLocal(thread_local_key, data);
}
-#endif // DEBUG
};
-
template <PerThreadAssertType type, bool allow>
class PerThreadAssertScope : public PerThreadAssertScopeBase {
public:
-#ifndef DEBUG
- PerThreadAssertScope() { }
- static void SetIsAllowed(bool is_allowed) { }
-#else
PerThreadAssertScope() {
old_state_ = data_->get(type);
data_->set(type, allow);
@@ -136,49 +112,148 @@ class PerThreadAssertScope : public PerThreadAssertScopeBase {
private:
bool old_state_;
+
+ DISALLOW_COPY_AND_ASSIGN(PerThreadAssertScope);
+};
+
+
+class PerIsolateAssertBase {
+ protected:
+ static uint32_t GetData(Isolate* isolate);
+ static void SetData(Isolate* isolate, uint32_t data);
+};
+
+
+template <PerIsolateAssertType type, bool allow>
+class PerIsolateAssertScope : public PerIsolateAssertBase {
+ public:
+ explicit PerIsolateAssertScope(Isolate* isolate) : isolate_(isolate) {
+ STATIC_ASSERT(type < 32);
+ old_data_ = GetData(isolate_);
+ SetData(isolate_, DataBit::update(old_data_, allow));
+ }
+
+ ~PerIsolateAssertScope() {
+ SetData(isolate_, old_data_);
+ }
+
+ static bool IsAllowed(Isolate* isolate) {
+ return DataBit::decode(GetData(isolate));
+ }
+
+ private:
+ typedef BitField<bool, type, 1> DataBit;
+
+ uint32_t old_data_;
+ Isolate* isolate_;
+
+ DISALLOW_COPY_AND_ASSIGN(PerIsolateAssertScope);
+};
+
+
+template <PerThreadAssertType type, bool allow>
+#ifdef DEBUG
+class PerThreadAssertScopeDebugOnly : public
+ PerThreadAssertScope<type, allow> {
+#else
+class PerThreadAssertScopeDebugOnly {
+ public:
+ PerThreadAssertScopeDebugOnly() { }
+#endif
+};
+
+
+template <PerIsolateAssertType type, bool allow>
+#ifdef DEBUG
+class PerIsolateAssertScopeDebugOnly : public
+ PerIsolateAssertScope<type, allow> {
+ public:
+ explicit PerIsolateAssertScopeDebugOnly(Isolate* isolate)
+ : PerIsolateAssertScope<type, allow>(isolate) { }
+#else
+class PerIsolateAssertScopeDebugOnly {
+ public:
+ explicit PerIsolateAssertScopeDebugOnly(Isolate* isolate) { }
#endif
};
+// Per-thread assert scopes.
+
// Scope to document where we do not expect handles to be created.
-typedef PerThreadAssertScope<HANDLE_ALLOCATION_ASSERT, false>
+typedef PerThreadAssertScopeDebugOnly<HANDLE_ALLOCATION_ASSERT, false>
DisallowHandleAllocation;
// Scope to introduce an exception to DisallowHandleAllocation.
-typedef PerThreadAssertScope<HANDLE_ALLOCATION_ASSERT, true>
+typedef PerThreadAssertScopeDebugOnly<HANDLE_ALLOCATION_ASSERT, true>
AllowHandleAllocation;
// Scope to document where we do not expect any allocation and GC.
-typedef PerThreadAssertScope<HEAP_ALLOCATION_ASSERT, false>
+typedef PerThreadAssertScopeDebugOnly<HEAP_ALLOCATION_ASSERT, false>
DisallowHeapAllocation;
// Scope to introduce an exception to DisallowHeapAllocation.
-typedef PerThreadAssertScope<HEAP_ALLOCATION_ASSERT, true>
+typedef PerThreadAssertScopeDebugOnly<HEAP_ALLOCATION_ASSERT, true>
AllowHeapAllocation;
// Scope to document where we do not expect any handle dereferences.
-typedef PerThreadAssertScope<HANDLE_DEREFERENCE_ASSERT, false>
+typedef PerThreadAssertScopeDebugOnly<HANDLE_DEREFERENCE_ASSERT, false>
DisallowHandleDereference;
// Scope to introduce an exception to DisallowHandleDereference.
-typedef PerThreadAssertScope<HANDLE_DEREFERENCE_ASSERT, true>
+typedef PerThreadAssertScopeDebugOnly<HANDLE_DEREFERENCE_ASSERT, true>
AllowHandleDereference;
// Scope to document where we do not expect deferred handles to be dereferenced.
-typedef PerThreadAssertScope<DEFERRED_HANDLE_DEREFERENCE_ASSERT, false>
+typedef PerThreadAssertScopeDebugOnly<DEFERRED_HANDLE_DEREFERENCE_ASSERT, false>
DisallowDeferredHandleDereference;
// Scope to introduce an exception to DisallowDeferredHandleDereference.
-typedef PerThreadAssertScope<DEFERRED_HANDLE_DEREFERENCE_ASSERT, true>
+typedef PerThreadAssertScopeDebugOnly<DEFERRED_HANDLE_DEREFERENCE_ASSERT, true>
AllowDeferredHandleDereference;
// Scope to document where we do not expect deferred handles to be dereferenced.
-typedef PerThreadAssertScope<CODE_DEPENDENCY_CHANGE_ASSERT, false>
+typedef PerThreadAssertScopeDebugOnly<CODE_DEPENDENCY_CHANGE_ASSERT, false>
DisallowCodeDependencyChange;
// Scope to introduce an exception to DisallowDeferredHandleDereference.
-typedef PerThreadAssertScope<CODE_DEPENDENCY_CHANGE_ASSERT, true>
+typedef PerThreadAssertScopeDebugOnly<CODE_DEPENDENCY_CHANGE_ASSERT, true>
AllowCodeDependencyChange;
+
+// Per-isolate assert scopes.
+
+// Scope to document where we do not expect javascript execution.
+typedef PerIsolateAssertScope<JAVASCRIPT_EXECUTION_ASSERT, false>
+ DisallowJavascriptExecution;
+
+// Scope to introduce an exception to DisallowJavascriptExecution.
+typedef PerIsolateAssertScope<JAVASCRIPT_EXECUTION_ASSERT, true>
+ AllowJavascriptExecution;
+
+// Scope in which javascript execution leads to exception being thrown.
+typedef PerIsolateAssertScope<JAVASCRIPT_EXECUTION_THROWS, false>
+ ThrowOnJavascriptExecution;
+
+// Scope to introduce an exception to ThrowOnJavascriptExecution.
+typedef PerIsolateAssertScope<JAVASCRIPT_EXECUTION_THROWS, true>
+ NoThrowOnJavascriptExecution;
+
+// Scope to document where we do not expect an allocation failure.
+typedef PerIsolateAssertScopeDebugOnly<ALLOCATION_FAILURE_ASSERT, false>
+ DisallowAllocationFailure;
+
+// Scope to introduce an exception to DisallowAllocationFailure.
+typedef PerIsolateAssertScopeDebugOnly<ALLOCATION_FAILURE_ASSERT, true>
+ AllowAllocationFailure;
+
+// Scope to document where we do not expect deoptimization.
+typedef PerIsolateAssertScopeDebugOnly<DEOPTIMIZATION_ASSERT, false>
+ DisallowDeoptimization;
+
+// Scope to introduce an exception to DisallowDeoptimization.
+typedef PerIsolateAssertScopeDebugOnly<DEOPTIMIZATION_ASSERT, true>
+ AllowDeoptimization;
+
} } // namespace v8::internal
#endif // V8_ASSERT_SCOPE_H_
diff --git a/chromium/v8/src/ast.cc b/chromium/v8/src/ast.cc
index 681b3d46b86..d332f4a3cbc 100644
--- a/chromium/v8/src/ast.cc
+++ b/chromium/v8/src/ast.cc
@@ -1,44 +1,21 @@
// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "ast.h"
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/ast.h"
#include <cmath> // For isfinite.
-#include "builtins.h"
-#include "code-stubs.h"
-#include "contexts.h"
-#include "conversions.h"
-#include "hashmap.h"
-#include "parser.h"
-#include "property-details.h"
-#include "property.h"
-#include "scopes.h"
-#include "string-stream.h"
-#include "type-info.h"
+#include "src/builtins.h"
+#include "src/code-stubs.h"
+#include "src/contexts.h"
+#include "src/conversions.h"
+#include "src/hashmap.h"
+#include "src/parser.h"
+#include "src/property-details.h"
+#include "src/property.h"
+#include "src/scopes.h"
+#include "src/string-stream.h"
+#include "src/type-info.h"
namespace v8 {
namespace internal {
@@ -56,34 +33,35 @@ AST_NODE_LIST(DECL_ACCEPT)
// Implementation of other node functionality.
-bool Expression::IsSmiLiteral() {
- return AsLiteral() != NULL && AsLiteral()->value()->IsSmi();
+bool Expression::IsSmiLiteral() const {
+ return IsLiteral() && AsLiteral()->value()->IsSmi();
}
-bool Expression::IsStringLiteral() {
- return AsLiteral() != NULL && AsLiteral()->value()->IsString();
+bool Expression::IsStringLiteral() const {
+ return IsLiteral() && AsLiteral()->value()->IsString();
}
-bool Expression::IsNullLiteral() {
- return AsLiteral() != NULL && AsLiteral()->value()->IsNull();
+bool Expression::IsNullLiteral() const {
+ return IsLiteral() && AsLiteral()->value()->IsNull();
}
-bool Expression::IsUndefinedLiteral(Isolate* isolate) {
- VariableProxy* var_proxy = AsVariableProxy();
+bool Expression::IsUndefinedLiteral(Isolate* isolate) const {
+ const VariableProxy* var_proxy = AsVariableProxy();
if (var_proxy == NULL) return false;
Variable* var = var_proxy->var();
// The global identifier "undefined" is immutable. Everything
// else could be reassigned.
return var != NULL && var->location() == Variable::UNALLOCATED &&
- var_proxy->name()->Equals(isolate->heap()->undefined_string());
+ String::Equals(var_proxy->name(),
+ isolate->factory()->undefined_string());
}
-VariableProxy::VariableProxy(Isolate* isolate, Variable* var, int position)
- : Expression(isolate, position),
+VariableProxy::VariableProxy(Zone* zone, Variable* var, int position)
+ : Expression(zone, position),
name_(var->name()),
var_(NULL), // Will be set by the call to BindTo.
is_this_(var->is_this()),
@@ -94,12 +72,12 @@ VariableProxy::VariableProxy(Isolate* isolate, Variable* var, int position)
}
-VariableProxy::VariableProxy(Isolate* isolate,
+VariableProxy::VariableProxy(Zone* zone,
Handle<String> name,
bool is_this,
Interface* interface,
int position)
- : Expression(isolate, position),
+ : Expression(zone, position),
name_(name),
var_(NULL),
is_this_(is_this),
@@ -126,19 +104,18 @@ void VariableProxy::BindTo(Variable* var) {
}
-Assignment::Assignment(Isolate* isolate,
+Assignment::Assignment(Zone* zone,
Token::Value op,
Expression* target,
Expression* value,
int pos)
- : Expression(isolate, pos),
+ : Expression(zone, pos),
op_(op),
target_(target),
value_(value),
binary_operation_(NULL),
- assignment_id_(GetNextId(isolate)),
+ assignment_id_(GetNextId(zone)),
is_uninitialized_(false),
- is_pre_monomorphic_(false),
store_mode_(STANDARD_STORE) { }
@@ -181,24 +158,41 @@ int FunctionLiteral::end_position() const {
}
-LanguageMode FunctionLiteral::language_mode() const {
- return scope()->language_mode();
+StrictMode FunctionLiteral::strict_mode() const {
+ return scope()->strict_mode();
}
-ObjectLiteralProperty::ObjectLiteralProperty(Literal* key,
- Expression* value,
- Isolate* isolate) {
+void FunctionLiteral::InitializeSharedInfo(
+ Handle<Code> unoptimized_code) {
+ for (RelocIterator it(*unoptimized_code); !it.done(); it.next()) {
+ RelocInfo* rinfo = it.rinfo();
+ if (rinfo->rmode() != RelocInfo::EMBEDDED_OBJECT) continue;
+ Object* obj = rinfo->target_object();
+ if (obj->IsSharedFunctionInfo()) {
+ SharedFunctionInfo* shared = SharedFunctionInfo::cast(obj);
+ if (shared->start_position() == start_position()) {
+ shared_info_ = Handle<SharedFunctionInfo>(shared);
+ break;
+ }
+ }
+ }
+}
+
+
+ObjectLiteralProperty::ObjectLiteralProperty(
+ Zone* zone, Literal* key, Expression* value) {
emit_store_ = true;
key_ = key;
value_ = value;
- Object* k = *key->value();
+ Handle<Object> k = key->value();
if (k->IsInternalizedString() &&
- isolate->heap()->proto_string()->Equals(String::cast(k))) {
+ String::Equals(Handle<String>::cast(k),
+ zone->isolate()->factory()->proto_string())) {
kind_ = PROTOTYPE;
} else if (value_->AsMaterializedLiteral() != NULL) {
kind_ = MATERIALIZED_LITERAL;
- } else if (value_->AsLiteral() != NULL) {
+ } else if (value_->IsLiteral()) {
kind_ = CONSTANT;
} else {
kind_ = COMPUTED;
@@ -206,8 +200,8 @@ ObjectLiteralProperty::ObjectLiteralProperty(Literal* key,
}
-ObjectLiteralProperty::ObjectLiteralProperty(bool is_getter,
- FunctionLiteral* value) {
+ObjectLiteralProperty::ObjectLiteralProperty(
+ Zone* zone, bool is_getter, FunctionLiteral* value) {
emit_store_ = true;
value_ = value;
kind_ = is_getter ? GETTER : SETTER;
@@ -342,8 +336,7 @@ void ArrayLiteral::BuildConstantElements(Isolate* isolate) {
// Allocate a fixed array to hold all the object literals.
Handle<JSArray> array =
isolate->factory()->NewJSArray(0, FAST_HOLEY_SMI_ELEMENTS);
- isolate->factory()->SetElementsCapacityAndLength(
- array, values()->length(), values()->length());
+ JSArray::Expand(array, values()->length());
// Fill in the literals.
bool is_simple = true;
@@ -364,9 +357,9 @@ void ArrayLiteral::BuildConstantElements(Isolate* isolate) {
} else if (boilerplate_value->IsUninitialized()) {
is_simple = false;
JSObject::SetOwnElement(
- array, i, handle(Smi::FromInt(0), isolate), kNonStrictMode);
+ array, i, handle(Smi::FromInt(0), isolate), SLOPPY).Assert();
} else {
- JSObject::SetOwnElement(array, i, boilerplate_value, kNonStrictMode);
+ JSObject::SetOwnElement(array, i, boilerplate_value, SLOPPY).Assert();
}
}
@@ -397,7 +390,7 @@ void ArrayLiteral::BuildConstantElements(Isolate* isolate) {
Handle<Object> MaterializedLiteral::GetBoilerplateValue(Expression* expression,
Isolate* isolate) {
- if (expression->AsLiteral() != NULL) {
+ if (expression->IsLiteral()) {
return expression->AsLiteral()->value();
}
if (CompileTimeValue::IsCompileTimeValue(expression)) {
@@ -449,7 +442,7 @@ void BinaryOperation::RecordToBooleanTypeFeedback(TypeFeedbackOracle* oracle) {
}
-bool BinaryOperation::ResultOverwriteAllowed() {
+bool BinaryOperation::ResultOverwriteAllowed() const {
switch (op_) {
case Token::COMMA:
case Token::OR:
@@ -506,7 +499,7 @@ static bool IsVoidOfLiteral(Expression* expr) {
UnaryOperation* maybe_unary = expr->AsUnaryOperation();
return maybe_unary != NULL &&
maybe_unary->op() == Token::VOID &&
- maybe_unary->expression()->AsLiteral() != NULL;
+ maybe_unary->expression()->IsLiteral();
}
@@ -573,62 +566,31 @@ bool FunctionDeclaration::IsInlineable() const {
// TODO(rossberg): all RecordTypeFeedback functions should disappear
// once we use the common type field in the AST consistently.
-
void Expression::RecordToBooleanTypeFeedback(TypeFeedbackOracle* oracle) {
to_boolean_types_ = oracle->ToBooleanTypes(test_id());
}
-bool Call::ComputeTarget(Handle<Map> type, Handle<String> name) {
- // If there is an interceptor, we can't compute the target for a direct call.
- if (type->has_named_interceptor()) return false;
+bool Call::IsUsingCallFeedbackSlot(Isolate* isolate) const {
+ CallType call_type = GetCallType(isolate);
+ return (call_type != POSSIBLY_EVAL_CALL);
+}
- if (check_type_ == RECEIVER_MAP_CHECK) {
- // For primitive checks the holder is set up to point to the corresponding
- // prototype object, i.e. one step of the algorithm below has been already
- // performed. For non-primitive checks we clear it to allow computing
- // targets for polymorphic calls.
- holder_ = Handle<JSObject>::null();
- }
- LookupResult lookup(type->GetIsolate());
- while (true) {
- // If a dictionary map is found in the prototype chain before the actual
- // target, a new target can always appear. In that case, bail out.
- // TODO(verwaest): Alternatively a runtime negative lookup on the normal
- // receiver or prototype could be added.
- if (type->is_dictionary_map()) return false;
- type->LookupDescriptor(NULL, *name, &lookup);
- if (lookup.IsFound()) {
- switch (lookup.type()) {
- case CONSTANT: {
- // We surely know the target for a constant function.
- Handle<Object> constant(lookup.GetConstantFromMap(*type),
- type->GetIsolate());
- if (constant->IsJSFunction()) {
- target_ = Handle<JSFunction>::cast(constant);
- return true;
- }
- // Fall through.
- }
- case NORMAL:
- case FIELD:
- case CALLBACKS:
- case HANDLER:
- case INTERCEPTOR:
- // We don't know the target.
- return false;
- case TRANSITION:
- case NONEXISTENT:
- UNREACHABLE();
- break;
- }
+
+Call::CallType Call::GetCallType(Isolate* isolate) const {
+ VariableProxy* proxy = expression()->AsVariableProxy();
+ if (proxy != NULL) {
+ if (proxy->var()->is_possibly_eval(isolate)) {
+ return POSSIBLY_EVAL_CALL;
+ } else if (proxy->var()->IsUnallocated()) {
+ return GLOBAL_CALL;
+ } else if (proxy->var()->IsLookupSlot()) {
+ return LOOKUP_SLOT_CALL;
}
- // If we reach the end of the prototype chain, we don't know the target.
- if (!type->prototype()->IsJSObject()) return false;
- // Go up the prototype chain, recording where we are currently.
- holder_ = Handle<JSObject>(JSObject::cast(type->prototype()));
- type = Handle<Map>(holder()->map());
}
+
+ Property* property = expression()->AsProperty();
+ return property != NULL ? PROPERTY_CALL : OTHER_CALL;
}
@@ -653,87 +615,17 @@ bool Call::ComputeGlobalTarget(Handle<GlobalObject> global,
}
-Handle<JSObject> Call::GetPrototypeForPrimitiveCheck(
- CheckType check, Isolate* isolate) {
- v8::internal::Context* native_context = isolate->context()->native_context();
- JSFunction* function = NULL;
- switch (check) {
- case RECEIVER_MAP_CHECK:
- UNREACHABLE();
- break;
- case STRING_CHECK:
- function = native_context->string_function();
- break;
- case SYMBOL_CHECK:
- function = native_context->symbol_function();
- break;
- case NUMBER_CHECK:
- function = native_context->number_function();
- break;
- case BOOLEAN_CHECK:
- function = native_context->boolean_function();
- break;
- }
- ASSERT(function != NULL);
- return Handle<JSObject>(JSObject::cast(function->instance_prototype()));
-}
-
-
-void Call::RecordTypeFeedback(TypeFeedbackOracle* oracle,
- CallKind call_kind) {
- is_monomorphic_ = oracle->CallIsMonomorphic(CallFeedbackId());
- Property* property = expression()->AsProperty();
- if (property == NULL) {
- // Function call. Specialize for monomorphic calls.
- if (is_monomorphic_) target_ = oracle->GetCallTarget(CallFeedbackId());
- } else if (property->key()->IsPropertyName()) {
- // Method call. Specialize for the receiver types seen at runtime.
- Literal* key = property->key()->AsLiteral();
- ASSERT(key != NULL && key->value()->IsString());
- Handle<String> name = Handle<String>::cast(key->value());
- check_type_ = oracle->GetCallCheckType(CallFeedbackId());
- receiver_types_.Clear();
- if (check_type_ == RECEIVER_MAP_CHECK) {
- oracle->CallReceiverTypes(CallFeedbackId(),
- name, arguments()->length(), call_kind, &receiver_types_);
- is_monomorphic_ = is_monomorphic_ && receiver_types_.length() > 0;
- } else {
- holder_ = GetPrototypeForPrimitiveCheck(check_type_, oracle->isolate());
- receiver_types_.Add(handle(holder_->map()), oracle->zone());
- }
-#ifdef ENABLE_SLOW_ASSERTS
- if (FLAG_enable_slow_asserts) {
- int length = receiver_types_.length();
- for (int i = 0; i < length; i++) {
- Handle<Map> map = receiver_types_.at(i);
- ASSERT(!map.is_null() && *map != NULL);
- }
- }
-#endif
- if (is_monomorphic_) {
- Handle<Map> map = receiver_types_.first();
- is_monomorphic_ = ComputeTarget(map, name);
- }
- } else {
- if (is_monomorphic_) {
- keyed_array_call_is_holey_ =
- oracle->KeyedArrayCallIsHoley(CallFeedbackId());
- }
- }
-}
-
-
void CallNew::RecordTypeFeedback(TypeFeedbackOracle* oracle) {
- allocation_info_cell_ =
- oracle->GetCallNewAllocationInfoCell(CallNewFeedbackId());
- is_monomorphic_ = oracle->CallNewIsMonomorphic(CallNewFeedbackId());
+ int allocation_site_feedback_slot = FLAG_pretenuring_call_new
+ ? AllocationSiteFeedbackSlot()
+ : CallNewFeedbackSlot();
+ allocation_site_ =
+ oracle->GetCallNewAllocationSite(allocation_site_feedback_slot);
+ is_monomorphic_ = oracle->CallNewIsMonomorphic(CallNewFeedbackSlot());
if (is_monomorphic_) {
- target_ = oracle->GetCallNewTarget(CallNewFeedbackId());
- Object* value = allocation_info_cell_->value();
- ASSERT(!value->IsTheHole());
- if (value->IsAllocationSite()) {
- AllocationSite* site = AllocationSite::cast(value);
- elements_kind_ = site->GetElementsKind();
+ target_ = oracle->GetCallNewTarget(CallNewFeedbackSlot());
+ if (!allocation_site_.is_null()) {
+ elements_kind_ = allocation_site_->GetElementsKind();
}
}
}
@@ -1117,16 +1009,16 @@ RegExpAlternative::RegExpAlternative(ZoneList<RegExpTree*>* nodes)
}
-CaseClause::CaseClause(Isolate* isolate,
+CaseClause::CaseClause(Zone* zone,
Expression* label,
ZoneList<Statement*>* statements,
int pos)
- : AstNode(pos),
+ : Expression(zone, pos),
label_(label),
statements_(statements),
- compare_type_(Type::None(), isolate),
- compare_id_(AstNode::GetNextId(isolate)),
- entry_id_(AstNode::GetNextId(isolate)) {
+ compare_type_(Type::None(zone)),
+ compare_id_(AstNode::GetNextId(zone)),
+ entry_id_(AstNode::GetNextId(zone)) {
}
@@ -1134,6 +1026,11 @@ CaseClause::CaseClause(Isolate* isolate,
void AstConstructionVisitor::Visit##NodeType(NodeType* node) { \
increase_node_count(); \
}
+#define REGULAR_NODE_WITH_FEEDBACK_SLOTS(NodeType) \
+ void AstConstructionVisitor::Visit##NodeType(NodeType* node) { \
+ increase_node_count(); \
+ add_slot_node(node); \
+ }
#define DONT_OPTIMIZE_NODE(NodeType) \
void AstConstructionVisitor::Visit##NodeType(NodeType* node) { \
increase_node_count(); \
@@ -1146,6 +1043,12 @@ CaseClause::CaseClause(Isolate* isolate,
increase_node_count(); \
add_flag(kDontSelfOptimize); \
}
+#define DONT_SELFOPTIMIZE_NODE_WITH_FEEDBACK_SLOTS(NodeType) \
+ void AstConstructionVisitor::Visit##NodeType(NodeType* node) { \
+ increase_node_count(); \
+ add_slot_node(node); \
+ add_flag(kDontSelfOptimize); \
+ }
#define DONT_CACHE_NODE(NodeType) \
void AstConstructionVisitor::Visit##NodeType(NodeType* node) { \
increase_node_count(); \
@@ -1180,8 +1083,8 @@ REGULAR_NODE(CountOperation)
REGULAR_NODE(BinaryOperation)
REGULAR_NODE(CompareOperation)
REGULAR_NODE(ThisFunction)
-REGULAR_NODE(Call)
-REGULAR_NODE(CallNew)
+REGULAR_NODE_WITH_FEEDBACK_SLOTS(Call)
+REGULAR_NODE_WITH_FEEDBACK_SLOTS(CallNew)
// In theory, for VariableProxy we'd have to add:
// if (node->var()->IsLookupSlot()) add_flag(kDontInline);
// But node->var() is usually not bound yet at VariableProxy creation time, and
@@ -1206,11 +1109,12 @@ DONT_OPTIMIZE_NODE(NativeFunctionLiteral)
DONT_SELFOPTIMIZE_NODE(DoWhileStatement)
DONT_SELFOPTIMIZE_NODE(WhileStatement)
DONT_SELFOPTIMIZE_NODE(ForStatement)
-DONT_SELFOPTIMIZE_NODE(ForInStatement)
+DONT_SELFOPTIMIZE_NODE_WITH_FEEDBACK_SLOTS(ForInStatement)
DONT_SELFOPTIMIZE_NODE(ForOfStatement)
DONT_CACHE_NODE(ModuleLiteral)
+
void AstConstructionVisitor::VisitCallRuntime(CallRuntime* node) {
increase_node_count();
if (node->is_jsruntime()) {
@@ -1242,12 +1146,12 @@ Handle<String> Literal::ToString() {
const char* str;
if (value_->IsSmi()) {
// Optimization only, the heap number case would subsume this.
- OS::SNPrintF(buffer, "%d", Smi::cast(*value_)->value());
+ SNPrintF(buffer, "%d", Smi::cast(*value_)->value());
str = arr;
} else {
str = DoubleToCString(value_->Number(), buffer);
}
- return isolate_->factory()->NewStringFromAscii(CStrVector(str));
+ return isolate_->factory()->NewStringFromAsciiChecked(str);
}
diff --git a/chromium/v8/src/ast.h b/chromium/v8/src/ast.h
index 4e413c5426e..3036fccbddb 100644
--- a/chromium/v8/src/ast.h
+++ b/chromium/v8/src/ast.h
@@ -1,50 +1,27 @@
// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
#ifndef V8_AST_H_
#define V8_AST_H_
-#include "v8.h"
-
-#include "assembler.h"
-#include "factory.h"
-#include "isolate.h"
-#include "jsregexp.h"
-#include "list-inl.h"
-#include "runtime.h"
-#include "small-pointer-list.h"
-#include "smart-pointers.h"
-#include "token.h"
-#include "type-info.h" // TODO(rossberg): this should eventually be removed
-#include "types.h"
-#include "utils.h"
-#include "variables.h"
-#include "interface.h"
-#include "zone-inl.h"
+#include "src/v8.h"
+
+#include "src/assembler.h"
+#include "src/factory.h"
+#include "src/feedback-slots.h"
+#include "src/isolate.h"
+#include "src/jsregexp.h"
+#include "src/list-inl.h"
+#include "src/runtime.h"
+#include "src/small-pointer-list.h"
+#include "src/smart-pointers.h"
+#include "src/token.h"
+#include "src/types.h"
+#include "src/utils.h"
+#include "src/variables.h"
+#include "src/interface.h"
+#include "src/zone-inl.h"
namespace v8 {
namespace internal {
@@ -115,17 +92,14 @@ namespace internal {
V(CountOperation) \
V(BinaryOperation) \
V(CompareOperation) \
- V(ThisFunction)
-
-#define AUXILIARY_NODE_LIST(V) \
+ V(ThisFunction) \
V(CaseClause)
#define AST_NODE_LIST(V) \
DECLARATION_NODE_LIST(V) \
MODULE_NODE_LIST(V) \
STATEMENT_NODE_LIST(V) \
- EXPRESSION_NODE_LIST(V) \
- AUXILIARY_NODE_LIST(V)
+ EXPRESSION_NODE_LIST(V)
// Forward declarations
class AstConstructionVisitor;
@@ -185,15 +159,21 @@ class AstProperties V8_FINAL BASE_EMBEDDED {
public:
class Flags : public EnumSet<AstPropertiesFlag, int> {};
- AstProperties() : node_count_(0) { }
+AstProperties() : node_count_(0), feedback_slots_(0) {}
Flags* flags() { return &flags_; }
int node_count() { return node_count_; }
void add_node_count(int count) { node_count_ += count; }
+ int feedback_slots() const { return feedback_slots_; }
+ void increase_feedback_slots(int count) {
+ feedback_slots_ += count;
+ }
+
private:
Flags flags_;
int node_count_;
+ int feedback_slots_;
};
@@ -218,9 +198,14 @@ class AstNode: public ZoneObject {
int position() const { return position_; }
// Type testing & conversion functions overridden by concrete subclasses.
-#define DECLARE_NODE_FUNCTIONS(type) \
- bool Is##type() { return node_type() == AstNode::k##type; } \
- type* As##type() { return Is##type() ? reinterpret_cast<type*>(this) : NULL; }
+#define DECLARE_NODE_FUNCTIONS(type) \
+ bool Is##type() const { return node_type() == AstNode::k##type; } \
+ type* As##type() { \
+ return Is##type() ? reinterpret_cast<type*>(this) : NULL; \
+ } \
+ const type* As##type() const { \
+ return Is##type() ? reinterpret_cast<const type*>(this) : NULL; \
+ }
AST_NODE_LIST(DECLARE_NODE_FUNCTIONS)
#undef DECLARE_NODE_FUNCTIONS
@@ -230,13 +215,13 @@ class AstNode: public ZoneObject {
virtual MaterializedLiteral* AsMaterializedLiteral() { return NULL; }
protected:
- static int GetNextId(Isolate* isolate) {
- return ReserveIdRange(isolate, 1);
+ static int GetNextId(Zone* zone) {
+ return ReserveIdRange(zone, 1);
}
- static int ReserveIdRange(Isolate* isolate, int n) {
- int tmp = isolate->ast_node_id();
- isolate->set_ast_node_id(tmp + n);
+ static int ReserveIdRange(Zone* zone, int n) {
+ int tmp = zone->isolate()->ast_node_id();
+ zone->isolate()->set_ast_node_id(tmp + n);
return tmp;
}
@@ -259,7 +244,7 @@ class AstNode: public ZoneObject {
class Statement : public AstNode {
public:
- explicit Statement(int position) : AstNode(position) {}
+ explicit Statement(Zone* zone, int position) : AstNode(position) {}
bool IsEmpty() { return AsEmptyStatement() != NULL; }
virtual bool IsJump() const { return false; }
@@ -279,8 +264,7 @@ class SmallMapList V8_FINAL {
int length() const { return list_.length(); }
void AddMapIfMissing(Handle<Map> map, Zone* zone) {
- map = Map::CurrentMapForDeprecated(map);
- if (map.is_null()) return;
+ if (!Map::CurrentMapForDeprecated(map).ToHandle(&map)) return;
for (int i = 0; i < length(); ++i) {
if (at(i).is_identical_to(map)) return;
}
@@ -328,35 +312,35 @@ class Expression : public AstNode {
kTest
};
- virtual bool IsValidLeftHandSide() { return false; }
+ virtual bool IsValidReferenceExpression() const { return false; }
// Helpers for ToBoolean conversion.
- virtual bool ToBooleanIsTrue() { return false; }
- virtual bool ToBooleanIsFalse() { return false; }
+ virtual bool ToBooleanIsTrue() const { return false; }
+ virtual bool ToBooleanIsFalse() const { return false; }
// Symbols that cannot be parsed as array indices are considered property
// names. We do not treat symbols that can be array indexes as property
// names because [] for string objects is handled only by keyed ICs.
- virtual bool IsPropertyName() { return false; }
+ virtual bool IsPropertyName() const { return false; }
// True iff the result can be safely overwritten (to avoid allocation).
// False for operations that can return one of their operands.
- virtual bool ResultOverwriteAllowed() { return false; }
+ virtual bool ResultOverwriteAllowed() const { return false; }
// True iff the expression is a literal represented as a smi.
- bool IsSmiLiteral();
+ bool IsSmiLiteral() const;
// True iff the expression is a string literal.
- bool IsStringLiteral();
+ bool IsStringLiteral() const;
// True iff the expression is the null literal.
- bool IsNullLiteral();
+ bool IsNullLiteral() const;
// True if we can prove that the expression is the undefined literal.
- bool IsUndefinedLiteral(Isolate* isolate);
+ bool IsUndefinedLiteral(Isolate* isolate) const;
// Expression type bounds
- Bounds bounds() { return bounds_; }
+ Bounds bounds() const { return bounds_; }
void set_bounds(Bounds bounds) { bounds_ = bounds; }
// Type feedback information for assignments and properties.
@@ -381,11 +365,11 @@ class Expression : public AstNode {
TypeFeedbackId test_id() const { return test_id_; }
protected:
- Expression(Isolate* isolate, int pos)
+ Expression(Zone* zone, int pos)
: AstNode(pos),
- bounds_(Bounds::Unbounded(isolate)),
- id_(GetNextId(isolate)),
- test_id_(GetNextId(isolate)) {}
+ bounds_(Bounds::Unbounded(zone)),
+ id_(GetNextId(zone)),
+ test_id_(GetNextId(zone)) {}
void set_to_boolean_types(byte types) { to_boolean_types_ = types; }
private:
@@ -426,13 +410,13 @@ class BreakableStatement : public Statement {
protected:
BreakableStatement(
- Isolate* isolate, ZoneStringList* labels,
+ Zone* zone, ZoneStringList* labels,
BreakableType breakable_type, int position)
- : Statement(position),
+ : Statement(zone, position),
labels_(labels),
breakable_type_(breakable_type),
- entry_id_(GetNextId(isolate)),
- exit_id_(GetNextId(isolate)) {
+ entry_id_(GetNextId(zone)),
+ exit_id_(GetNextId(zone)) {
ASSERT(labels == NULL || labels->length() > 0);
}
@@ -457,6 +441,8 @@ class Block V8_FINAL : public BreakableStatement {
ZoneList<Statement*>* statements() { return &statements_; }
bool is_initializer_block() const { return is_initializer_block_; }
+ BailoutId DeclsId() const { return decls_id_; }
+
virtual bool IsJump() const V8_OVERRIDE {
return !statements_.is_empty() && statements_.last()->IsJump()
&& labels() == NULL; // Good enough as an approximation...
@@ -466,21 +452,22 @@ class Block V8_FINAL : public BreakableStatement {
void set_scope(Scope* scope) { scope_ = scope; }
protected:
- Block(Isolate* isolate,
+ Block(Zone* zone,
ZoneStringList* labels,
int capacity,
bool is_initializer_block,
- int pos,
- Zone* zone)
- : BreakableStatement(isolate, labels, TARGET_FOR_NAMED_ONLY, pos),
+ int pos)
+ : BreakableStatement(zone, labels, TARGET_FOR_NAMED_ONLY, pos),
statements_(capacity, zone),
is_initializer_block_(is_initializer_block),
+ decls_id_(GetNextId(zone)),
scope_(NULL) {
}
private:
ZoneList<Statement*> statements_;
bool is_initializer_block_;
+ const BailoutId decls_id_;
Scope* scope_;
};
@@ -494,7 +481,8 @@ class Declaration : public AstNode {
virtual bool IsInlineable() const;
protected:
- Declaration(VariableProxy* proxy,
+ Declaration(Zone* zone,
+ VariableProxy* proxy,
VariableMode mode,
Scope* scope,
int pos)
@@ -523,11 +511,12 @@ class VariableDeclaration V8_FINAL : public Declaration {
}
protected:
- VariableDeclaration(VariableProxy* proxy,
+ VariableDeclaration(Zone* zone,
+ VariableProxy* proxy,
VariableMode mode,
Scope* scope,
int pos)
- : Declaration(proxy, mode, scope, pos) {
+ : Declaration(zone, proxy, mode, scope, pos) {
}
};
@@ -543,12 +532,13 @@ class FunctionDeclaration V8_FINAL : public Declaration {
virtual bool IsInlineable() const V8_OVERRIDE;
protected:
- FunctionDeclaration(VariableProxy* proxy,
+ FunctionDeclaration(Zone* zone,
+ VariableProxy* proxy,
VariableMode mode,
FunctionLiteral* fun,
Scope* scope,
int pos)
- : Declaration(proxy, mode, scope, pos),
+ : Declaration(zone, proxy, mode, scope, pos),
fun_(fun) {
// At the moment there are no "const functions" in JavaScript...
ASSERT(mode == VAR || mode == LET);
@@ -570,11 +560,12 @@ class ModuleDeclaration V8_FINAL : public Declaration {
}
protected:
- ModuleDeclaration(VariableProxy* proxy,
+ ModuleDeclaration(Zone* zone,
+ VariableProxy* proxy,
Module* module,
Scope* scope,
int pos)
- : Declaration(proxy, MODULE, scope, pos),
+ : Declaration(zone, proxy, MODULE, scope, pos),
module_(module) {
}
@@ -593,11 +584,12 @@ class ImportDeclaration V8_FINAL : public Declaration {
}
protected:
- ImportDeclaration(VariableProxy* proxy,
+ ImportDeclaration(Zone* zone,
+ VariableProxy* proxy,
Module* module,
Scope* scope,
int pos)
- : Declaration(proxy, LET, scope, pos),
+ : Declaration(zone, proxy, LET, scope, pos),
module_(module) {
}
@@ -615,8 +607,8 @@ class ExportDeclaration V8_FINAL : public Declaration {
}
protected:
- ExportDeclaration(VariableProxy* proxy, Scope* scope, int pos)
- : Declaration(proxy, LET, scope, pos) {}
+ ExportDeclaration(Zone* zone, VariableProxy* proxy, Scope* scope, int pos)
+ : Declaration(zone, proxy, LET, scope, pos) {}
};
@@ -630,7 +622,7 @@ class Module : public AstNode {
: AstNode(pos),
interface_(Interface::NewModule(zone)),
body_(NULL) {}
- Module(Interface* interface, int pos, Block* body = NULL)
+ Module(Zone* zone, Interface* interface, int pos, Block* body = NULL)
: AstNode(pos),
interface_(interface),
body_(body) {}
@@ -646,8 +638,8 @@ class ModuleLiteral V8_FINAL : public Module {
DECLARE_NODE_TYPE(ModuleLiteral)
protected:
- ModuleLiteral(Block* body, Interface* interface, int pos)
- : Module(interface, pos, body) {}
+ ModuleLiteral(Zone* zone, Block* body, Interface* interface, int pos)
+ : Module(zone, interface, pos, body) {}
};
@@ -658,7 +650,7 @@ class ModuleVariable V8_FINAL : public Module {
VariableProxy* proxy() const { return proxy_; }
protected:
- inline ModuleVariable(VariableProxy* proxy, int pos);
+ inline ModuleVariable(Zone* zone, VariableProxy* proxy, int pos);
private:
VariableProxy* proxy_;
@@ -673,7 +665,7 @@ class ModulePath V8_FINAL : public Module {
Handle<String> name() const { return name_; }
protected:
- ModulePath(Module* module, Handle<String> name, Zone* zone, int pos)
+ ModulePath(Zone* zone, Module* module, Handle<String> name, int pos)
: Module(zone, pos),
module_(module),
name_(name) {
@@ -692,7 +684,7 @@ class ModuleUrl V8_FINAL : public Module {
Handle<String> url() const { return url_; }
protected:
- ModuleUrl(Handle<String> url, Zone* zone, int pos)
+ ModuleUrl(Zone* zone, Handle<String> url, int pos)
: Module(zone, pos), url_(url) {
}
@@ -709,8 +701,8 @@ class ModuleStatement V8_FINAL : public Statement {
Block* body() const { return body_; }
protected:
- ModuleStatement(VariableProxy* proxy, Block* body, int pos)
- : Statement(pos),
+ ModuleStatement(Zone* zone, VariableProxy* proxy, Block* body, int pos)
+ : Statement(zone, pos),
proxy_(proxy),
body_(body) {
}
@@ -738,10 +730,10 @@ class IterationStatement : public BreakableStatement {
Label* continue_target() { return &continue_target_; }
protected:
- IterationStatement(Isolate* isolate, ZoneStringList* labels, int pos)
- : BreakableStatement(isolate, labels, TARGET_FOR_ANONYMOUS, pos),
+ IterationStatement(Zone* zone, ZoneStringList* labels, int pos)
+ : BreakableStatement(zone, labels, TARGET_FOR_ANONYMOUS, pos),
body_(NULL),
- osr_entry_id_(GetNextId(isolate)) {
+ osr_entry_id_(GetNextId(zone)) {
}
void Initialize(Statement* body) {
@@ -772,11 +764,11 @@ class DoWhileStatement V8_FINAL : public IterationStatement {
BailoutId BackEdgeId() const { return back_edge_id_; }
protected:
- DoWhileStatement(Isolate* isolate, ZoneStringList* labels, int pos)
- : IterationStatement(isolate, labels, pos),
+ DoWhileStatement(Zone* zone, ZoneStringList* labels, int pos)
+ : IterationStatement(zone, labels, pos),
cond_(NULL),
- continue_id_(GetNextId(isolate)),
- back_edge_id_(GetNextId(isolate)) {
+ continue_id_(GetNextId(zone)),
+ back_edge_id_(GetNextId(zone)) {
}
private:
@@ -809,11 +801,11 @@ class WhileStatement V8_FINAL : public IterationStatement {
BailoutId BodyId() const { return body_id_; }
protected:
- WhileStatement(Isolate* isolate, ZoneStringList* labels, int pos)
- : IterationStatement(isolate, labels, pos),
+ WhileStatement(Zone* zone, ZoneStringList* labels, int pos)
+ : IterationStatement(zone, labels, pos),
cond_(NULL),
may_have_function_literal_(true),
- body_id_(GetNextId(isolate)) {
+ body_id_(GetNextId(zone)) {
}
private:
@@ -860,15 +852,15 @@ class ForStatement V8_FINAL : public IterationStatement {
void set_loop_variable(Variable* var) { loop_variable_ = var; }
protected:
- ForStatement(Isolate* isolate, ZoneStringList* labels, int pos)
- : IterationStatement(isolate, labels, pos),
+ ForStatement(Zone* zone, ZoneStringList* labels, int pos)
+ : IterationStatement(zone, labels, pos),
init_(NULL),
cond_(NULL),
next_(NULL),
may_have_function_literal_(true),
loop_variable_(NULL),
- continue_id_(GetNextId(isolate)),
- body_id_(GetNextId(isolate)) {
+ continue_id_(GetNextId(zone)),
+ body_id_(GetNextId(zone)) {
}
private:
@@ -902,8 +894,8 @@ class ForEachStatement : public IterationStatement {
Expression* subject() const { return subject_; }
protected:
- ForEachStatement(Isolate* isolate, ZoneStringList* labels, int pos)
- : IterationStatement(isolate, labels, pos),
+ ForEachStatement(Zone* zone, ZoneStringList* labels, int pos)
+ : IterationStatement(zone, labels, pos),
each_(NULL),
subject_(NULL) {
}
@@ -914,7 +906,8 @@ class ForEachStatement : public IterationStatement {
};
-class ForInStatement V8_FINAL : public ForEachStatement {
+class ForInStatement V8_FINAL : public ForEachStatement,
+ public FeedbackSlotInterface {
public:
DECLARE_NODE_TYPE(ForInStatement)
@@ -922,7 +915,15 @@ class ForInStatement V8_FINAL : public ForEachStatement {
return subject();
}
- TypeFeedbackId ForInFeedbackId() const { return reuse(PrepareId()); }
+ // Type feedback information.
+ virtual int ComputeFeedbackSlotCount() { return 1; }
+ virtual void SetFirstFeedbackSlot(int slot) { for_in_feedback_slot_ = slot; }
+
+ int ForInFeedbackSlot() {
+ ASSERT(for_in_feedback_slot_ != kInvalidFeedbackSlot);
+ return for_in_feedback_slot_;
+ }
+
enum ForInType { FAST_FOR_IN, SLOW_FOR_IN };
ForInType for_in_type() const { return for_in_type_; }
void set_for_in_type(ForInType type) { for_in_type_ = type; }
@@ -933,14 +934,16 @@ class ForInStatement V8_FINAL : public ForEachStatement {
virtual BailoutId StackCheckId() const V8_OVERRIDE { return body_id_; }
protected:
- ForInStatement(Isolate* isolate, ZoneStringList* labels, int pos)
- : ForEachStatement(isolate, labels, pos),
+ ForInStatement(Zone* zone, ZoneStringList* labels, int pos)
+ : ForEachStatement(zone, labels, pos),
for_in_type_(SLOW_FOR_IN),
- body_id_(GetNextId(isolate)),
- prepare_id_(GetNextId(isolate)) {
+ for_in_feedback_slot_(kInvalidFeedbackSlot),
+ body_id_(GetNextId(zone)),
+ prepare_id_(GetNextId(zone)) {
}
ForInType for_in_type_;
+ int for_in_feedback_slot_;
const BailoutId body_id_;
const BailoutId prepare_id_;
};
@@ -953,11 +956,13 @@ class ForOfStatement V8_FINAL : public ForEachStatement {
void Initialize(Expression* each,
Expression* subject,
Statement* body,
+ Expression* assign_iterable,
Expression* assign_iterator,
Expression* next_result,
Expression* result_done,
Expression* assign_each) {
ForEachStatement::Initialize(each, subject, body);
+ assign_iterable_ = assign_iterable;
assign_iterator_ = assign_iterator;
next_result_ = next_result;
result_done_ = result_done;
@@ -968,7 +973,12 @@ class ForOfStatement V8_FINAL : public ForEachStatement {
return subject();
}
- // var iterator = iterable;
+ // var iterable = subject;
+ Expression* assign_iterable() const {
+ return assign_iterable_;
+ }
+
+ // var iterator = iterable[Symbol.iterator]();
Expression* assign_iterator() const {
return assign_iterator_;
}
@@ -994,15 +1004,16 @@ class ForOfStatement V8_FINAL : public ForEachStatement {
BailoutId BackEdgeId() const { return back_edge_id_; }
protected:
- ForOfStatement(Isolate* isolate, ZoneStringList* labels, int pos)
- : ForEachStatement(isolate, labels, pos),
+ ForOfStatement(Zone* zone, ZoneStringList* labels, int pos)
+ : ForEachStatement(zone, labels, pos),
assign_iterator_(NULL),
next_result_(NULL),
result_done_(NULL),
assign_each_(NULL),
- back_edge_id_(GetNextId(isolate)) {
+ back_edge_id_(GetNextId(zone)) {
}
+ Expression* assign_iterable_;
Expression* assign_iterator_;
Expression* next_result_;
Expression* result_done_;
@@ -1020,8 +1031,8 @@ class ExpressionStatement V8_FINAL : public Statement {
virtual bool IsJump() const V8_OVERRIDE { return expression_->IsThrow(); }
protected:
- ExpressionStatement(Expression* expression, int pos)
- : Statement(pos), expression_(expression) { }
+ ExpressionStatement(Zone* zone, Expression* expression, int pos)
+ : Statement(zone, pos), expression_(expression) { }
private:
Expression* expression_;
@@ -1033,7 +1044,7 @@ class JumpStatement : public Statement {
virtual bool IsJump() const V8_FINAL V8_OVERRIDE { return true; }
protected:
- explicit JumpStatement(int pos) : Statement(pos) {}
+ explicit JumpStatement(Zone* zone, int pos) : Statement(zone, pos) {}
};
@@ -1044,8 +1055,8 @@ class ContinueStatement V8_FINAL : public JumpStatement {
IterationStatement* target() const { return target_; }
protected:
- explicit ContinueStatement(IterationStatement* target, int pos)
- : JumpStatement(pos), target_(target) { }
+ explicit ContinueStatement(Zone* zone, IterationStatement* target, int pos)
+ : JumpStatement(zone, pos), target_(target) { }
private:
IterationStatement* target_;
@@ -1059,8 +1070,8 @@ class BreakStatement V8_FINAL : public JumpStatement {
BreakableStatement* target() const { return target_; }
protected:
- explicit BreakStatement(BreakableStatement* target, int pos)
- : JumpStatement(pos), target_(target) { }
+ explicit BreakStatement(Zone* zone, BreakableStatement* target, int pos)
+ : JumpStatement(zone, pos), target_(target) { }
private:
BreakableStatement* target_;
@@ -1074,8 +1085,8 @@ class ReturnStatement V8_FINAL : public JumpStatement {
Expression* expression() const { return expression_; }
protected:
- explicit ReturnStatement(Expression* expression, int pos)
- : JumpStatement(pos), expression_(expression) { }
+ explicit ReturnStatement(Zone* zone, Expression* expression, int pos)
+ : JumpStatement(zone, pos), expression_(expression) { }
private:
Expression* expression_;
@@ -1092,8 +1103,9 @@ class WithStatement V8_FINAL : public Statement {
protected:
WithStatement(
- Scope* scope, Expression* expression, Statement* statement, int pos)
- : Statement(pos),
+ Zone* zone, Scope* scope,
+ Expression* expression, Statement* statement, int pos)
+ : Statement(zone, pos),
scope_(scope),
expression_(expression),
statement_(statement) { }
@@ -1105,7 +1117,7 @@ class WithStatement V8_FINAL : public Statement {
};
-class CaseClause V8_FINAL : public AstNode {
+class CaseClause V8_FINAL : public Expression {
public:
DECLARE_NODE_TYPE(CaseClause)
@@ -1121,11 +1133,11 @@ class CaseClause V8_FINAL : public AstNode {
// Type feedback information.
TypeFeedbackId CompareId() { return compare_id_; }
- Handle<Type> compare_type() { return compare_type_; }
- void set_compare_type(Handle<Type> type) { compare_type_ = type; }
+ Type* compare_type() { return compare_type_; }
+ void set_compare_type(Type* type) { compare_type_ = type; }
private:
- CaseClause(Isolate* isolate,
+ CaseClause(Zone* zone,
Expression* label,
ZoneList<Statement*>* statements,
int pos);
@@ -1133,7 +1145,7 @@ class CaseClause V8_FINAL : public AstNode {
Expression* label_;
Label body_target_;
ZoneList<Statement*>* statements_;
- Handle<Type> compare_type_;
+ Type* compare_type_;
const TypeFeedbackId compare_id_;
const BailoutId entry_id_;
@@ -1147,26 +1159,20 @@ class SwitchStatement V8_FINAL : public BreakableStatement {
void Initialize(Expression* tag, ZoneList<CaseClause*>* cases) {
tag_ = tag;
cases_ = cases;
- switch_type_ = UNKNOWN_SWITCH;
}
Expression* tag() const { return tag_; }
ZoneList<CaseClause*>* cases() const { return cases_; }
- enum SwitchType { UNKNOWN_SWITCH, SMI_SWITCH, STRING_SWITCH, GENERIC_SWITCH };
- SwitchType switch_type() const { return switch_type_; }
- void set_switch_type(SwitchType switch_type) { switch_type_ = switch_type; }
-
protected:
- SwitchStatement(Isolate* isolate, ZoneStringList* labels, int pos)
- : BreakableStatement(isolate, labels, TARGET_FOR_ANONYMOUS, pos),
+ SwitchStatement(Zone* zone, ZoneStringList* labels, int pos)
+ : BreakableStatement(zone, labels, TARGET_FOR_ANONYMOUS, pos),
tag_(NULL),
cases_(NULL) { }
private:
Expression* tag_;
ZoneList<CaseClause*>* cases_;
- SwitchType switch_type_;
};
@@ -1196,18 +1202,18 @@ class IfStatement V8_FINAL : public Statement {
BailoutId ElseId() const { return else_id_; }
protected:
- IfStatement(Isolate* isolate,
+ IfStatement(Zone* zone,
Expression* condition,
Statement* then_statement,
Statement* else_statement,
int pos)
- : Statement(pos),
+ : Statement(zone, pos),
condition_(condition),
then_statement_(then_statement),
else_statement_(else_statement),
- if_id_(GetNextId(isolate)),
- then_id_(GetNextId(isolate)),
- else_id_(GetNextId(isolate)) {
+ if_id_(GetNextId(zone)),
+ then_id_(GetNextId(zone)),
+ else_id_(GetNextId(zone)) {
}
private:
@@ -1255,8 +1261,8 @@ class TryStatement : public Statement {
ZoneList<Label*>* escaping_targets() const { return escaping_targets_; }
protected:
- TryStatement(int index, Block* try_block, int pos)
- : Statement(pos),
+ TryStatement(Zone* zone, int index, Block* try_block, int pos)
+ : Statement(zone, pos),
index_(index),
try_block_(try_block),
escaping_targets_(NULL) { }
@@ -1279,13 +1285,14 @@ class TryCatchStatement V8_FINAL : public TryStatement {
Block* catch_block() const { return catch_block_; }
protected:
- TryCatchStatement(int index,
+ TryCatchStatement(Zone* zone,
+ int index,
Block* try_block,
Scope* scope,
Variable* variable,
Block* catch_block,
int pos)
- : TryStatement(index, try_block, pos),
+ : TryStatement(zone, index, try_block, pos),
scope_(scope),
variable_(variable),
catch_block_(catch_block) {
@@ -1306,8 +1313,8 @@ class TryFinallyStatement V8_FINAL : public TryStatement {
protected:
TryFinallyStatement(
- int index, Block* try_block, Block* finally_block, int pos)
- : TryStatement(index, try_block, pos),
+ Zone* zone, int index, Block* try_block, Block* finally_block, int pos)
+ : TryStatement(zone, index, try_block, pos),
finally_block_(finally_block) { }
private:
@@ -1320,7 +1327,7 @@ class DebuggerStatement V8_FINAL : public Statement {
DECLARE_NODE_TYPE(DebuggerStatement)
protected:
- explicit DebuggerStatement(int pos): Statement(pos) {}
+ explicit DebuggerStatement(Zone* zone, int pos): Statement(zone, pos) {}
};
@@ -1329,7 +1336,7 @@ class EmptyStatement V8_FINAL : public Statement {
DECLARE_NODE_TYPE(EmptyStatement)
protected:
- explicit EmptyStatement(int pos): Statement(pos) {}
+ explicit EmptyStatement(Zone* zone, int pos): Statement(zone, pos) {}
};
@@ -1337,7 +1344,7 @@ class Literal V8_FINAL : public Expression {
public:
DECLARE_NODE_TYPE(Literal)
- virtual bool IsPropertyName() V8_OVERRIDE {
+ virtual bool IsPropertyName() const V8_OVERRIDE {
if (value_->IsInternalizedString()) {
uint32_t ignored;
return !String::cast(*value_)->AsArrayIndex(&ignored);
@@ -1350,27 +1357,13 @@ class Literal V8_FINAL : public Expression {
return Handle<String>::cast(value_);
}
- virtual bool ToBooleanIsTrue() V8_OVERRIDE {
+ virtual bool ToBooleanIsTrue() const V8_OVERRIDE {
return value_->BooleanValue();
}
- virtual bool ToBooleanIsFalse() V8_OVERRIDE {
+ virtual bool ToBooleanIsFalse() const V8_OVERRIDE {
return !value_->BooleanValue();
}
- // Identity testers.
- bool IsNull() const {
- ASSERT(!value_.is_null());
- return value_->IsNull();
- }
- bool IsTrue() const {
- ASSERT(!value_.is_null());
- return value_->IsTrue();
- }
- bool IsFalse() const {
- ASSERT(!value_.is_null());
- return value_->IsFalse();
- }
-
Handle<Object> value() const { return value_; }
// Support for using Literal as a HashMap key. NOTE: Currently, this works
@@ -1380,17 +1373,16 @@ class Literal V8_FINAL : public Expression {
static bool Match(void* literal1, void* literal2) {
Handle<String> s1 = static_cast<Literal*>(literal1)->ToString();
Handle<String> s2 = static_cast<Literal*>(literal2)->ToString();
- return s1->Equals(*s2);
+ return String::Equals(s1, s2);
}
TypeFeedbackId LiteralFeedbackId() const { return reuse(id()); }
protected:
- Literal(
- Isolate* isolate, Handle<Object> value, int position)
- : Expression(isolate, position),
+ Literal(Zone* zone, Handle<Object> value, int position)
+ : Expression(zone, position),
value_(value),
- isolate_(isolate) { }
+ isolate_(zone->isolate()) { }
private:
Handle<String> ToString();
@@ -1415,10 +1407,10 @@ class MaterializedLiteral : public Expression {
}
protected:
- MaterializedLiteral(Isolate* isolate,
+ MaterializedLiteral(Zone* zone,
int literal_index,
int pos)
- : Expression(isolate, pos),
+ : Expression(zone, pos),
literal_index_(literal_index),
is_simple_(false),
depth_(0) {}
@@ -1466,7 +1458,7 @@ class ObjectLiteralProperty V8_FINAL : public ZoneObject {
PROTOTYPE // Property is __proto__.
};
- ObjectLiteralProperty(Literal* key, Expression* value, Isolate* isolate);
+ ObjectLiteralProperty(Zone* zone, Literal* key, Expression* value);
Literal* key() { return key_; }
Expression* value() { return value_; }
@@ -1485,7 +1477,7 @@ class ObjectLiteralProperty V8_FINAL : public ZoneObject {
protected:
template<class> friend class AstNodeFactory;
- ObjectLiteralProperty(bool is_getter, FunctionLiteral* value);
+ ObjectLiteralProperty(Zone* zone, bool is_getter, FunctionLiteral* value);
void set_key(Literal* key) { key_ = key; }
private:
@@ -1537,13 +1529,13 @@ class ObjectLiteral V8_FINAL : public MaterializedLiteral {
};
protected:
- ObjectLiteral(Isolate* isolate,
+ ObjectLiteral(Zone* zone,
ZoneList<Property*>* properties,
int literal_index,
int boilerplate_properties,
bool has_function,
int pos)
- : MaterializedLiteral(isolate, literal_index, pos),
+ : MaterializedLiteral(zone, literal_index, pos),
properties_(properties),
boilerplate_properties_(boilerplate_properties),
fast_elements_(false),
@@ -1569,12 +1561,12 @@ class RegExpLiteral V8_FINAL : public MaterializedLiteral {
Handle<String> flags() const { return flags_; }
protected:
- RegExpLiteral(Isolate* isolate,
+ RegExpLiteral(Zone* zone,
Handle<String> pattern,
Handle<String> flags,
int literal_index,
int pos)
- : MaterializedLiteral(isolate, literal_index, pos),
+ : MaterializedLiteral(zone, literal_index, pos),
pattern_(pattern),
flags_(flags) {
set_depth(1);
@@ -1610,13 +1602,13 @@ class ArrayLiteral V8_FINAL : public MaterializedLiteral {
};
protected:
- ArrayLiteral(Isolate* isolate,
+ ArrayLiteral(Zone* zone,
ZoneList<Expression*>* values,
int literal_index,
int pos)
- : MaterializedLiteral(isolate, literal_index, pos),
+ : MaterializedLiteral(zone, literal_index, pos),
values_(values),
- first_element_id_(ReserveIdRange(isolate, values->length())) {}
+ first_element_id_(ReserveIdRange(zone, values->length())) {}
private:
Handle<FixedArray> constant_elements_;
@@ -1629,19 +1621,17 @@ class VariableProxy V8_FINAL : public Expression {
public:
DECLARE_NODE_TYPE(VariableProxy)
- virtual bool IsValidLeftHandSide() V8_OVERRIDE {
- return var_ == NULL ? true : var_->IsValidLeftHandSide();
+ virtual bool IsValidReferenceExpression() const V8_OVERRIDE {
+ return var_ == NULL ? true : var_->IsValidReference();
}
- bool IsVariable(Handle<String> n) {
+ bool IsVariable(Handle<String> n) const {
return !is_this() && name().is_identical_to(n);
}
- bool IsArguments() { return var_ != NULL && var_->is_arguments(); }
+ bool IsArguments() const { return var_ != NULL && var_->is_arguments(); }
- bool IsLValue() {
- return is_lvalue_;
- }
+ bool IsLValue() const { return is_lvalue_; }
Handle<String> name() const { return name_; }
Variable* var() const { return var_; }
@@ -1656,9 +1646,9 @@ class VariableProxy V8_FINAL : public Expression {
void BindTo(Variable* var);
protected:
- VariableProxy(Isolate* isolate, Variable* var, int position);
+ VariableProxy(Zone* zone, Variable* var, int position);
- VariableProxy(Isolate* isolate,
+ VariableProxy(Zone* zone,
Handle<String> name,
bool is_this,
Interface* interface,
@@ -1679,7 +1669,7 @@ class Property V8_FINAL : public Expression {
public:
DECLARE_NODE_TYPE(Property)
- virtual bool IsValidLeftHandSide() V8_OVERRIDE { return true; }
+ virtual bool IsValidReferenceExpression() const V8_OVERRIDE { return true; }
Expression* obj() const { return obj_; }
Expression* key() const { return key_; }
@@ -1699,28 +1689,28 @@ class Property V8_FINAL : public Expression {
virtual KeyedAccessStoreMode GetStoreMode() V8_OVERRIDE {
return STANDARD_STORE;
}
- bool IsUninitialized() { return is_uninitialized_; }
- bool IsPreMonomorphic() { return is_pre_monomorphic_; }
+ bool IsUninitialized() { return !is_for_call_ && is_uninitialized_; }
bool HasNoTypeInformation() {
- return is_uninitialized_ || is_pre_monomorphic_;
+ return is_uninitialized_;
}
void set_is_uninitialized(bool b) { is_uninitialized_ = b; }
- void set_is_pre_monomorphic(bool b) { is_pre_monomorphic_ = b; }
void set_is_string_access(bool b) { is_string_access_ = b; }
void set_is_function_prototype(bool b) { is_function_prototype_ = b; }
+ void mark_for_call() { is_for_call_ = true; }
+ bool IsForCall() { return is_for_call_; }
TypeFeedbackId PropertyFeedbackId() { return reuse(id()); }
protected:
- Property(Isolate* isolate,
+ Property(Zone* zone,
Expression* obj,
Expression* key,
int pos)
- : Expression(isolate, pos),
+ : Expression(zone, pos),
obj_(obj),
key_(key),
- load_id_(GetNextId(isolate)),
- is_pre_monomorphic_(false),
+ load_id_(GetNextId(zone)),
+ is_for_call_(false),
is_uninitialized_(false),
is_string_access_(false),
is_function_prototype_(false) { }
@@ -1731,14 +1721,14 @@ class Property V8_FINAL : public Expression {
const BailoutId load_id_;
SmallMapList receiver_types_;
- bool is_pre_monomorphic_ : 1;
+ bool is_for_call_ : 1;
bool is_uninitialized_ : 1;
bool is_string_access_ : 1;
bool is_function_prototype_ : 1;
};
-class Call V8_FINAL : public Expression {
+class Call V8_FINAL : public Expression, public FeedbackSlotInterface {
public:
DECLARE_NODE_TYPE(Call)
@@ -1746,48 +1736,64 @@ class Call V8_FINAL : public Expression {
ZoneList<Expression*>* arguments() const { return arguments_; }
// Type feedback information.
- TypeFeedbackId CallFeedbackId() const { return reuse(id()); }
- void RecordTypeFeedback(TypeFeedbackOracle* oracle, CallKind call_kind);
+ virtual int ComputeFeedbackSlotCount() { return 1; }
+ virtual void SetFirstFeedbackSlot(int slot) {
+ call_feedback_slot_ = slot;
+ }
+
+ bool HasCallFeedbackSlot() const {
+ return call_feedback_slot_ != kInvalidFeedbackSlot;
+ }
+ int CallFeedbackSlot() const { return call_feedback_slot_; }
+
virtual SmallMapList* GetReceiverTypes() V8_OVERRIDE {
- return &receiver_types_;
+ if (expression()->IsProperty()) {
+ return expression()->AsProperty()->GetReceiverTypes();
+ }
+ return NULL;
}
- virtual bool IsMonomorphic() V8_OVERRIDE { return is_monomorphic_; }
- bool KeyedArrayCallIsHoley() { return keyed_array_call_is_holey_; }
- CheckType check_type() const { return check_type_; }
- void set_string_check(Handle<JSObject> holder) {
- holder_ = holder;
- check_type_ = STRING_CHECK;
+ virtual bool IsMonomorphic() V8_OVERRIDE {
+ if (expression()->IsProperty()) {
+ return expression()->AsProperty()->IsMonomorphic();
+ }
+ return !target_.is_null();
}
- void set_number_check(Handle<JSObject> holder) {
- holder_ = holder;
- check_type_ = NUMBER_CHECK;
+ bool global_call() const {
+ VariableProxy* proxy = expression_->AsVariableProxy();
+ return proxy != NULL && proxy->var()->IsUnallocated();
}
- void set_map_check() {
- holder_ = Handle<JSObject>::null();
- check_type_ = RECEIVER_MAP_CHECK;
+ bool known_global_function() const {
+ return global_call() && !target_.is_null();
}
Handle<JSFunction> target() { return target_; }
- // A cache for the holder, set as a side effect of computing the target of the
- // call. Note that it contains the null handle when the receiver is the same
- // as the holder!
- Handle<JSObject> holder() { return holder_; }
-
Handle<Cell> cell() { return cell_; }
- bool ComputeTarget(Handle<Map> type, Handle<String> name);
+ Handle<AllocationSite> allocation_site() { return allocation_site_; }
+
+ void set_target(Handle<JSFunction> target) { target_ = target; }
+ void set_allocation_site(Handle<AllocationSite> site) {
+ allocation_site_ = site;
+ }
bool ComputeGlobalTarget(Handle<GlobalObject> global, LookupResult* lookup);
BailoutId ReturnId() const { return return_id_; }
- // TODO(rossberg): this should really move somewhere else (and be merged with
- // various similar methods in objets.cc), but for now...
- static Handle<JSObject> GetPrototypeForPrimitiveCheck(
- CheckType check, Isolate* isolate);
+ enum CallType {
+ POSSIBLY_EVAL_CALL,
+ GLOBAL_CALL,
+ LOOKUP_SLOT_CALL,
+ PROPERTY_CALL,
+ OTHER_CALL
+ };
+
+ // Helpers to determine how to handle the call.
+ CallType GetCallType(Isolate* isolate) const;
+ bool IsUsingCallFeedbackSlot(Isolate* isolate) const;
#ifdef DEBUG
// Used to assert that the FullCodeGenerator records the return site.
@@ -1795,35 +1801,34 @@ class Call V8_FINAL : public Expression {
#endif
protected:
- Call(Isolate* isolate,
+ Call(Zone* zone,
Expression* expression,
ZoneList<Expression*>* arguments,
int pos)
- : Expression(isolate, pos),
+ : Expression(zone, pos),
expression_(expression),
arguments_(arguments),
- is_monomorphic_(false),
- keyed_array_call_is_holey_(true),
- check_type_(RECEIVER_MAP_CHECK),
- return_id_(GetNextId(isolate)) { }
+ call_feedback_slot_(kInvalidFeedbackSlot),
+ return_id_(GetNextId(zone)) {
+ if (expression->IsProperty()) {
+ expression->AsProperty()->mark_for_call();
+ }
+ }
private:
Expression* expression_;
ZoneList<Expression*>* arguments_;
- bool is_monomorphic_;
- bool keyed_array_call_is_holey_;
- CheckType check_type_;
- SmallMapList receiver_types_;
Handle<JSFunction> target_;
- Handle<JSObject> holder_;
Handle<Cell> cell_;
+ Handle<AllocationSite> allocation_site_;
+ int call_feedback_slot_;
const BailoutId return_id_;
};
-class CallNew V8_FINAL : public Expression {
+class CallNew V8_FINAL : public Expression, public FeedbackSlotInterface {
public:
DECLARE_NODE_TYPE(CallNew)
@@ -1831,28 +1836,47 @@ class CallNew V8_FINAL : public Expression {
ZoneList<Expression*>* arguments() const { return arguments_; }
// Type feedback information.
- TypeFeedbackId CallNewFeedbackId() const { return reuse(id()); }
+ virtual int ComputeFeedbackSlotCount() {
+ return FLAG_pretenuring_call_new ? 2 : 1;
+ }
+ virtual void SetFirstFeedbackSlot(int slot) {
+ callnew_feedback_slot_ = slot;
+ }
+
+ int CallNewFeedbackSlot() {
+ ASSERT(callnew_feedback_slot_ != kInvalidFeedbackSlot);
+ return callnew_feedback_slot_;
+ }
+ int AllocationSiteFeedbackSlot() {
+ ASSERT(callnew_feedback_slot_ != kInvalidFeedbackSlot);
+ ASSERT(FLAG_pretenuring_call_new);
+ return callnew_feedback_slot_ + 1;
+ }
+
void RecordTypeFeedback(TypeFeedbackOracle* oracle);
virtual bool IsMonomorphic() V8_OVERRIDE { return is_monomorphic_; }
Handle<JSFunction> target() const { return target_; }
ElementsKind elements_kind() const { return elements_kind_; }
- Handle<Cell> allocation_info_cell() const {
- return allocation_info_cell_;
+ Handle<AllocationSite> allocation_site() const {
+ return allocation_site_;
}
+ static int feedback_slots() { return 1; }
+
BailoutId ReturnId() const { return return_id_; }
protected:
- CallNew(Isolate* isolate,
+ CallNew(Zone* zone,
Expression* expression,
ZoneList<Expression*>* arguments,
int pos)
- : Expression(isolate, pos),
+ : Expression(zone, pos),
expression_(expression),
arguments_(arguments),
is_monomorphic_(false),
elements_kind_(GetInitialFastElementsKind()),
- return_id_(GetNextId(isolate)) { }
+ callnew_feedback_slot_(kInvalidFeedbackSlot),
+ return_id_(GetNextId(zone)) { }
private:
Expression* expression_;
@@ -1861,7 +1885,8 @@ class CallNew V8_FINAL : public Expression {
bool is_monomorphic_;
Handle<JSFunction> target_;
ElementsKind elements_kind_;
- Handle<Cell> allocation_info_cell_;
+ Handle<AllocationSite> allocation_site_;
+ int callnew_feedback_slot_;
const BailoutId return_id_;
};
@@ -1883,12 +1908,12 @@ class CallRuntime V8_FINAL : public Expression {
TypeFeedbackId CallRuntimeFeedbackId() const { return reuse(id()); }
protected:
- CallRuntime(Isolate* isolate,
+ CallRuntime(Zone* zone,
Handle<String> name,
const Runtime::Function* function,
ZoneList<Expression*>* arguments,
int pos)
- : Expression(isolate, pos),
+ : Expression(zone, pos),
name_(name),
function_(function),
arguments_(arguments) { }
@@ -1914,15 +1939,15 @@ class UnaryOperation V8_FINAL : public Expression {
TypeFeedbackOracle* oracle) V8_OVERRIDE;
protected:
- UnaryOperation(Isolate* isolate,
+ UnaryOperation(Zone* zone,
Token::Value op,
Expression* expression,
int pos)
- : Expression(isolate, pos),
+ : Expression(zone, pos),
op_(op),
expression_(expression),
- materialize_true_id_(GetNextId(isolate)),
- materialize_false_id_(GetNextId(isolate)) {
+ materialize_true_id_(GetNextId(zone)),
+ materialize_false_id_(GetNextId(zone)) {
ASSERT(Token::IsUnaryOp(op));
}
@@ -1941,11 +1966,15 @@ class BinaryOperation V8_FINAL : public Expression {
public:
DECLARE_NODE_TYPE(BinaryOperation)
- virtual bool ResultOverwriteAllowed();
+ virtual bool ResultOverwriteAllowed() const V8_OVERRIDE;
Token::Value op() const { return op_; }
Expression* left() const { return left_; }
Expression* right() const { return right_; }
+ Handle<AllocationSite> allocation_site() const { return allocation_site_; }
+ void set_allocation_site(Handle<AllocationSite> allocation_site) {
+ allocation_site_ = allocation_site;
+ }
BailoutId RightId() const { return right_id_; }
@@ -1957,16 +1986,16 @@ class BinaryOperation V8_FINAL : public Expression {
TypeFeedbackOracle* oracle) V8_OVERRIDE;
protected:
- BinaryOperation(Isolate* isolate,
+ BinaryOperation(Zone* zone,
Token::Value op,
Expression* left,
Expression* right,
int pos)
- : Expression(isolate, pos),
+ : Expression(zone, pos),
op_(op),
left_(left),
right_(right),
- right_id_(GetNextId(isolate)) {
+ right_id_(GetNextId(zone)) {
ASSERT(Token::IsBinaryOp(op));
}
@@ -1974,6 +2003,7 @@ class BinaryOperation V8_FINAL : public Expression {
Token::Value op_;
Expression* left_;
Expression* right_;
+ Handle<AllocationSite> allocation_site_;
// TODO(rossberg): the fixed arg should probably be represented as a Constant
// type for the RHS.
@@ -2008,9 +2038,9 @@ class CountOperation V8_FINAL : public Expression {
virtual KeyedAccessStoreMode GetStoreMode() V8_OVERRIDE {
return store_mode_;
}
- Handle<Type> type() const { return type_; }
+ Type* type() const { return type_; }
void set_store_mode(KeyedAccessStoreMode mode) { store_mode_ = mode; }
- void set_type(Handle<Type> type) { type_ = type; }
+ void set_type(Type* type) { type_ = type; }
BailoutId AssignmentId() const { return assignment_id_; }
@@ -2018,25 +2048,25 @@ class CountOperation V8_FINAL : public Expression {
TypeFeedbackId CountStoreFeedbackId() const { return reuse(id()); }
protected:
- CountOperation(Isolate* isolate,
+ CountOperation(Zone* zone,
Token::Value op,
bool is_prefix,
Expression* expr,
int pos)
- : Expression(isolate, pos),
+ : Expression(zone, pos),
op_(op),
is_prefix_(is_prefix),
store_mode_(STANDARD_STORE),
expression_(expr),
- assignment_id_(GetNextId(isolate)),
- count_id_(GetNextId(isolate)) {}
+ assignment_id_(GetNextId(zone)),
+ count_id_(GetNextId(zone)) {}
private:
Token::Value op_;
bool is_prefix_ : 1;
KeyedAccessStoreMode store_mode_ : 5; // Windows treats as signed,
// must have extra bit.
- Handle<Type> type_;
+ Type* type_;
Expression* expression_;
const BailoutId assignment_id_;
@@ -2055,8 +2085,8 @@ class CompareOperation V8_FINAL : public Expression {
// Type feedback information.
TypeFeedbackId CompareOperationFeedbackId() const { return reuse(id()); }
- Handle<Type> combined_type() const { return combined_type_; }
- void set_combined_type(Handle<Type> type) { combined_type_ = type; }
+ Type* combined_type() const { return combined_type_; }
+ void set_combined_type(Type* type) { combined_type_ = type; }
// Match special cases.
bool IsLiteralCompareTypeof(Expression** expr, Handle<String>* check);
@@ -2064,16 +2094,16 @@ class CompareOperation V8_FINAL : public Expression {
bool IsLiteralCompareNull(Expression** expr);
protected:
- CompareOperation(Isolate* isolate,
+ CompareOperation(Zone* zone,
Token::Value op,
Expression* left,
Expression* right,
int pos)
- : Expression(isolate, pos),
+ : Expression(zone, pos),
op_(op),
left_(left),
right_(right),
- combined_type_(Type::None(), isolate) {
+ combined_type_(Type::None(zone)) {
ASSERT(Token::IsCompareOp(op));
}
@@ -2082,7 +2112,7 @@ class CompareOperation V8_FINAL : public Expression {
Expression* left_;
Expression* right_;
- Handle<Type> combined_type_;
+ Type* combined_type_;
};
@@ -2098,17 +2128,17 @@ class Conditional V8_FINAL : public Expression {
BailoutId ElseId() const { return else_id_; }
protected:
- Conditional(Isolate* isolate,
+ Conditional(Zone* zone,
Expression* condition,
Expression* then_expression,
Expression* else_expression,
int position)
- : Expression(isolate, position),
+ : Expression(zone, position),
condition_(condition),
then_expression_(then_expression),
else_expression_(else_expression),
- then_id_(GetNextId(isolate)),
- else_id_(GetNextId(isolate)) { }
+ then_id_(GetNextId(zone)),
+ else_id_(GetNextId(zone)) { }
private:
Expression* condition_;
@@ -2143,9 +2173,8 @@ class Assignment V8_FINAL : public Expression {
return receiver_types_.length() == 1;
}
bool IsUninitialized() { return is_uninitialized_; }
- bool IsPreMonomorphic() { return is_pre_monomorphic_; }
bool HasNoTypeInformation() {
- return is_uninitialized_ || is_pre_monomorphic_;
+ return is_uninitialized_;
}
virtual SmallMapList* GetReceiverTypes() V8_OVERRIDE {
return &receiver_types_;
@@ -2154,18 +2183,17 @@ class Assignment V8_FINAL : public Expression {
return store_mode_;
}
void set_is_uninitialized(bool b) { is_uninitialized_ = b; }
- void set_is_pre_monomorphic(bool b) { is_pre_monomorphic_ = b; }
void set_store_mode(KeyedAccessStoreMode mode) { store_mode_ = mode; }
protected:
- Assignment(Isolate* isolate,
+ Assignment(Zone* zone,
Token::Value op,
Expression* target,
Expression* value,
int pos);
template<class Visitor>
- void Init(Isolate* isolate, AstNodeFactory<Visitor>* factory) {
+ void Init(Zone* zone, AstNodeFactory<Visitor>* factory) {
ASSERT(Token::IsAssignmentOp(op_));
if (is_compound()) {
binary_operation_ = factory->NewBinaryOperation(
@@ -2181,7 +2209,6 @@ class Assignment V8_FINAL : public Expression {
const BailoutId assignment_id_;
bool is_uninitialized_ : 1;
- bool is_pre_monomorphic_ : 1;
KeyedAccessStoreMode store_mode_ : 5; // Windows treats as signed,
// must have extra bit.
SmallMapList receiver_types_;
@@ -2216,12 +2243,12 @@ class Yield V8_FINAL : public Expression {
}
protected:
- Yield(Isolate* isolate,
+ Yield(Zone* zone,
Expression* generator_object,
Expression* expression,
Kind yield_kind,
int pos)
- : Expression(isolate, pos),
+ : Expression(zone, pos),
generator_object_(generator_object),
expression_(expression),
yield_kind_(yield_kind),
@@ -2242,8 +2269,8 @@ class Throw V8_FINAL : public Expression {
Expression* exception() const { return exception_; }
protected:
- Throw(Isolate* isolate, Expression* exception, int pos)
- : Expression(isolate, pos), exception_(exception) {}
+ Throw(Zone* zone, Expression* exception, int pos)
+ : Expression(zone, pos), exception_(exception) {}
private:
Expression* exception_;
@@ -2278,6 +2305,12 @@ class FunctionLiteral V8_FINAL : public Expression {
kNotGenerator
};
+ enum ArityRestriction {
+ NORMAL_ARITY,
+ GETTER_ARITY,
+ SETTER_ARITY
+ };
+
DECLARE_NODE_TYPE(FunctionLiteral)
Handle<String> name() const { return name_; }
@@ -2290,8 +2323,7 @@ class FunctionLiteral V8_FINAL : public Expression {
int SourceSize() const { return end_position() - start_position(); }
bool is_expression() const { return IsExpression::decode(bitfield_); }
bool is_anonymous() const { return IsAnonymous::decode(bitfield_); }
- bool is_classic_mode() const { return language_mode() == CLASSIC_MODE; }
- LanguageMode language_mode() const;
+ StrictMode strict_mode() const;
int materialized_literal_count() { return materialized_literal_count_; }
int expected_property_count() { return expected_property_count_; }
@@ -2301,6 +2333,8 @@ class FunctionLiteral V8_FINAL : public Expression {
bool AllowsLazyCompilation();
bool AllowsLazyCompilationWithoutContext();
+ void InitializeSharedInfo(Handle<Code> code);
+
Handle<String> debug_name() const {
if (name_->length() > 0) return name_;
return inferred_name();
@@ -2311,6 +2345,9 @@ class FunctionLiteral V8_FINAL : public Expression {
inferred_name_ = inferred_name;
}
+ // shared_info may be null if it's not cached in full code.
+ Handle<SharedFunctionInfo> shared_info() { return shared_info_; }
+
bool pretenure() { return Pretenure::decode(bitfield_); }
void set_pretenure() { bitfield_ |= Pretenure::encode(true); }
@@ -2341,7 +2378,9 @@ class FunctionLiteral V8_FINAL : public Expression {
void set_ast_properties(AstProperties* ast_properties) {
ast_properties_ = *ast_properties;
}
-
+ int slot_count() {
+ return ast_properties_.feedback_slots();
+ }
bool dont_optimize() { return dont_optimize_reason_ != kNoReason; }
BailoutReason dont_optimize_reason() { return dont_optimize_reason_; }
void set_dont_optimize_reason(BailoutReason reason) {
@@ -2349,7 +2388,7 @@ class FunctionLiteral V8_FINAL : public Expression {
}
protected:
- FunctionLiteral(Isolate* isolate,
+ FunctionLiteral(Zone* zone,
Handle<String> name,
Scope* scope,
ZoneList<Statement*>* body,
@@ -2363,11 +2402,11 @@ class FunctionLiteral V8_FINAL : public Expression {
IsParenthesizedFlag is_parenthesized,
IsGeneratorFlag is_generator,
int position)
- : Expression(isolate, position),
+ : Expression(zone, position),
name_(name),
scope_(scope),
body_(body),
- inferred_name_(isolate->factory()->empty_string()),
+ inferred_name_(zone->isolate()->factory()->empty_string()),
dont_optimize_reason_(kNoReason),
materialized_literal_count_(materialized_literal_count),
expected_property_count_(expected_property_count),
@@ -2386,6 +2425,7 @@ class FunctionLiteral V8_FINAL : public Expression {
private:
Handle<String> name_;
+ Handle<SharedFunctionInfo> shared_info_;
Scope* scope_;
ZoneList<Statement*>* body_;
Handle<String> inferred_name_;
@@ -2418,8 +2458,8 @@ class NativeFunctionLiteral V8_FINAL : public Expression {
protected:
NativeFunctionLiteral(
- Isolate* isolate, Handle<String> name, v8::Extension* extension, int pos)
- : Expression(isolate, pos), name_(name), extension_(extension) {}
+ Zone* zone, Handle<String> name, v8::Extension* extension, int pos)
+ : Expression(zone, pos), name_(name), extension_(extension) {}
private:
Handle<String> name_;
@@ -2432,7 +2472,7 @@ class ThisFunction V8_FINAL : public Expression {
DECLARE_NODE_TYPE(ThisFunction)
protected:
- explicit ThisFunction(Isolate* isolate, int pos): Expression(isolate, pos) {}
+ explicit ThisFunction(Zone* zone, int pos): Expression(zone, pos) {}
};
#undef DECLARE_NODE_TYPE
@@ -2799,8 +2839,8 @@ class RegExpEmpty V8_FINAL : public RegExpTree {
// ----------------------------------------------------------------------------
// Out-of-line inline constructors (to side-step cyclic dependencies).
-inline ModuleVariable::ModuleVariable(VariableProxy* proxy, int pos)
- : Module(proxy->interface(), pos),
+inline ModuleVariable::ModuleVariable(Zone* zone, VariableProxy* proxy, int pos)
+ : Module(zone, proxy->interface(), pos),
proxy_(proxy) {
}
@@ -2832,7 +2872,7 @@ class AstVisitor BASE_EMBEDDED {
#define DEFINE_AST_VISITOR_SUBCLASS_MEMBERS() \
public: \
- virtual void Visit(AstNode* node) V8_FINAL V8_OVERRIDE { \
+ virtual void Visit(AstNode* node) V8_FINAL V8_OVERRIDE { \
if (!CheckStackOverflow()) node->Accept(this); \
} \
\
@@ -2842,19 +2882,20 @@ public: \
\
bool CheckStackOverflow() { \
if (stack_overflow_) return true; \
- StackLimitCheck check(isolate_); \
+ StackLimitCheck check(zone_->isolate()); \
if (!check.HasOverflowed()) return false; \
return (stack_overflow_ = true); \
} \
\
private: \
- void InitializeAstVisitor(Isolate* isolate) { \
- isolate_ = isolate; \
+ void InitializeAstVisitor(Zone* zone) { \
+ zone_ = zone; \
stack_overflow_ = false; \
} \
- Isolate* isolate() { return isolate_; } \
+ Zone* zone() { return zone_; } \
+ Isolate* isolate() { return zone_->isolate(); } \
\
- Isolate* isolate_; \
+ Zone* zone_; \
bool stack_overflow_
@@ -2883,6 +2924,14 @@ class AstConstructionVisitor BASE_EMBEDDED {
dont_optimize_reason_ = reason;
}
+ void add_slot_node(FeedbackSlotInterface* slot_node) {
+ int count = slot_node->ComputeFeedbackSlotCount();
+ if (count > 0) {
+ slot_node->SetFirstFeedbackSlot(properties_.feedback_slots());
+ properties_.increase_feedback_slots(count);
+ }
+ }
+
AstProperties properties_;
BailoutReason dont_optimize_reason_;
};
@@ -2905,9 +2954,7 @@ class AstNullVisitor BASE_EMBEDDED {
template<class Visitor>
class AstNodeFactory V8_FINAL BASE_EMBEDDED {
public:
- AstNodeFactory(Isolate* isolate, Zone* zone)
- : isolate_(isolate),
- zone_(zone) { }
+ explicit AstNodeFactory(Zone* zone) : zone_(zone) { }
Visitor* visitor() { return &visitor_; }
@@ -2920,7 +2967,7 @@ class AstNodeFactory V8_FINAL BASE_EMBEDDED {
Scope* scope,
int pos) {
VariableDeclaration* decl =
- new(zone_) VariableDeclaration(proxy, mode, scope, pos);
+ new(zone_) VariableDeclaration(zone_, proxy, mode, scope, pos);
VISIT_AND_RETURN(VariableDeclaration, decl)
}
@@ -2930,7 +2977,7 @@ class AstNodeFactory V8_FINAL BASE_EMBEDDED {
Scope* scope,
int pos) {
FunctionDeclaration* decl =
- new(zone_) FunctionDeclaration(proxy, mode, fun, scope, pos);
+ new(zone_) FunctionDeclaration(zone_, proxy, mode, fun, scope, pos);
VISIT_AND_RETURN(FunctionDeclaration, decl)
}
@@ -2939,7 +2986,7 @@ class AstNodeFactory V8_FINAL BASE_EMBEDDED {
Scope* scope,
int pos) {
ModuleDeclaration* decl =
- new(zone_) ModuleDeclaration(proxy, module, scope, pos);
+ new(zone_) ModuleDeclaration(zone_, proxy, module, scope, pos);
VISIT_AND_RETURN(ModuleDeclaration, decl)
}
@@ -2948,7 +2995,7 @@ class AstNodeFactory V8_FINAL BASE_EMBEDDED {
Scope* scope,
int pos) {
ImportDeclaration* decl =
- new(zone_) ImportDeclaration(proxy, module, scope, pos);
+ new(zone_) ImportDeclaration(zone_, proxy, module, scope, pos);
VISIT_AND_RETURN(ImportDeclaration, decl)
}
@@ -2956,27 +3003,28 @@ class AstNodeFactory V8_FINAL BASE_EMBEDDED {
Scope* scope,
int pos) {
ExportDeclaration* decl =
- new(zone_) ExportDeclaration(proxy, scope, pos);
+ new(zone_) ExportDeclaration(zone_, proxy, scope, pos);
VISIT_AND_RETURN(ExportDeclaration, decl)
}
ModuleLiteral* NewModuleLiteral(Block* body, Interface* interface, int pos) {
- ModuleLiteral* module = new(zone_) ModuleLiteral(body, interface, pos);
+ ModuleLiteral* module =
+ new(zone_) ModuleLiteral(zone_, body, interface, pos);
VISIT_AND_RETURN(ModuleLiteral, module)
}
ModuleVariable* NewModuleVariable(VariableProxy* proxy, int pos) {
- ModuleVariable* module = new(zone_) ModuleVariable(proxy, pos);
+ ModuleVariable* module = new(zone_) ModuleVariable(zone_, proxy, pos);
VISIT_AND_RETURN(ModuleVariable, module)
}
ModulePath* NewModulePath(Module* origin, Handle<String> name, int pos) {
- ModulePath* module = new(zone_) ModulePath(origin, name, zone_, pos);
+ ModulePath* module = new(zone_) ModulePath(zone_, origin, name, pos);
VISIT_AND_RETURN(ModulePath, module)
}
ModuleUrl* NewModuleUrl(Handle<String> url, int pos) {
- ModuleUrl* module = new(zone_) ModuleUrl(url, zone_, pos);
+ ModuleUrl* module = new(zone_) ModuleUrl(zone_, url, pos);
VISIT_AND_RETURN(ModuleUrl, module)
}
@@ -2985,13 +3033,13 @@ class AstNodeFactory V8_FINAL BASE_EMBEDDED {
bool is_initializer_block,
int pos) {
Block* block = new(zone_) Block(
- isolate_, labels, capacity, is_initializer_block, pos, zone_);
+ zone_, labels, capacity, is_initializer_block, pos);
VISIT_AND_RETURN(Block, block)
}
#define STATEMENT_WITH_LABELS(NodeType) \
NodeType* New##NodeType(ZoneStringList* labels, int pos) { \
- NodeType* stmt = new(zone_) NodeType(isolate_, labels, pos); \
+ NodeType* stmt = new(zone_) NodeType(zone_, labels, pos); \
VISIT_AND_RETURN(NodeType, stmt); \
}
STATEMENT_WITH_LABELS(DoWhileStatement)
@@ -3005,11 +3053,11 @@ class AstNodeFactory V8_FINAL BASE_EMBEDDED {
int pos) {
switch (visit_mode) {
case ForEachStatement::ENUMERATE: {
- ForInStatement* stmt = new(zone_) ForInStatement(isolate_, labels, pos);
+ ForInStatement* stmt = new(zone_) ForInStatement(zone_, labels, pos);
VISIT_AND_RETURN(ForInStatement, stmt);
}
case ForEachStatement::ITERATE: {
- ForOfStatement* stmt = new(zone_) ForOfStatement(isolate_, labels, pos);
+ ForOfStatement* stmt = new(zone_) ForOfStatement(zone_, labels, pos);
VISIT_AND_RETURN(ForOfStatement, stmt);
}
}
@@ -3019,27 +3067,28 @@ class AstNodeFactory V8_FINAL BASE_EMBEDDED {
ModuleStatement* NewModuleStatement(
VariableProxy* proxy, Block* body, int pos) {
- ModuleStatement* stmt = new(zone_) ModuleStatement(proxy, body, pos);
+ ModuleStatement* stmt = new(zone_) ModuleStatement(zone_, proxy, body, pos);
VISIT_AND_RETURN(ModuleStatement, stmt)
}
ExpressionStatement* NewExpressionStatement(Expression* expression, int pos) {
- ExpressionStatement* stmt = new(zone_) ExpressionStatement(expression, pos);
+ ExpressionStatement* stmt =
+ new(zone_) ExpressionStatement(zone_, expression, pos);
VISIT_AND_RETURN(ExpressionStatement, stmt)
}
ContinueStatement* NewContinueStatement(IterationStatement* target, int pos) {
- ContinueStatement* stmt = new(zone_) ContinueStatement(target, pos);
+ ContinueStatement* stmt = new(zone_) ContinueStatement(zone_, target, pos);
VISIT_AND_RETURN(ContinueStatement, stmt)
}
BreakStatement* NewBreakStatement(BreakableStatement* target, int pos) {
- BreakStatement* stmt = new(zone_) BreakStatement(target, pos);
+ BreakStatement* stmt = new(zone_) BreakStatement(zone_, target, pos);
VISIT_AND_RETURN(BreakStatement, stmt)
}
ReturnStatement* NewReturnStatement(Expression* expression, int pos) {
- ReturnStatement* stmt = new(zone_) ReturnStatement(expression, pos);
+ ReturnStatement* stmt = new(zone_) ReturnStatement(zone_, expression, pos);
VISIT_AND_RETURN(ReturnStatement, stmt)
}
@@ -3048,7 +3097,7 @@ class AstNodeFactory V8_FINAL BASE_EMBEDDED {
Statement* statement,
int pos) {
WithStatement* stmt = new(zone_) WithStatement(
- scope, expression, statement, pos);
+ zone_, scope, expression, statement, pos);
VISIT_AND_RETURN(WithStatement, stmt)
}
@@ -3057,7 +3106,7 @@ class AstNodeFactory V8_FINAL BASE_EMBEDDED {
Statement* else_statement,
int pos) {
IfStatement* stmt = new(zone_) IfStatement(
- isolate_, condition, then_statement, else_statement, pos);
+ zone_, condition, then_statement, else_statement, pos);
VISIT_AND_RETURN(IfStatement, stmt)
}
@@ -3068,7 +3117,7 @@ class AstNodeFactory V8_FINAL BASE_EMBEDDED {
Block* catch_block,
int pos) {
TryCatchStatement* stmt = new(zone_) TryCatchStatement(
- index, try_block, scope, variable, catch_block, pos);
+ zone_, index, try_block, scope, variable, catch_block, pos);
VISIT_AND_RETURN(TryCatchStatement, stmt)
}
@@ -3076,34 +3125,35 @@ class AstNodeFactory V8_FINAL BASE_EMBEDDED {
Block* try_block,
Block* finally_block,
int pos) {
- TryFinallyStatement* stmt =
- new(zone_) TryFinallyStatement(index, try_block, finally_block, pos);
+ TryFinallyStatement* stmt = new(zone_) TryFinallyStatement(
+ zone_, index, try_block, finally_block, pos);
VISIT_AND_RETURN(TryFinallyStatement, stmt)
}
DebuggerStatement* NewDebuggerStatement(int pos) {
- DebuggerStatement* stmt = new(zone_) DebuggerStatement(pos);
+ DebuggerStatement* stmt = new(zone_) DebuggerStatement(zone_, pos);
VISIT_AND_RETURN(DebuggerStatement, stmt)
}
EmptyStatement* NewEmptyStatement(int pos) {
- return new(zone_) EmptyStatement(pos);
+ return new(zone_) EmptyStatement(zone_, pos);
}
CaseClause* NewCaseClause(
Expression* label, ZoneList<Statement*>* statements, int pos) {
CaseClause* clause =
- new(zone_) CaseClause(isolate_, label, statements, pos);
+ new(zone_) CaseClause(zone_, label, statements, pos);
VISIT_AND_RETURN(CaseClause, clause)
}
Literal* NewLiteral(Handle<Object> handle, int pos) {
- Literal* lit = new(zone_) Literal(isolate_, handle, pos);
+ Literal* lit = new(zone_) Literal(zone_, handle, pos);
VISIT_AND_RETURN(Literal, lit)
}
Literal* NewNumberLiteral(double number, int pos) {
- return NewLiteral(isolate_->factory()->NewNumber(number, TENURED), pos);
+ return NewLiteral(
+ zone_->isolate()->factory()->NewNumber(number, TENURED), pos);
}
ObjectLiteral* NewObjectLiteral(
@@ -3113,16 +3163,21 @@ class AstNodeFactory V8_FINAL BASE_EMBEDDED {
bool has_function,
int pos) {
ObjectLiteral* lit = new(zone_) ObjectLiteral(
- isolate_, properties, literal_index, boilerplate_properties,
+ zone_, properties, literal_index, boilerplate_properties,
has_function, pos);
VISIT_AND_RETURN(ObjectLiteral, lit)
}
+ ObjectLiteral::Property* NewObjectLiteralProperty(Literal* key,
+ Expression* value) {
+ return new(zone_) ObjectLiteral::Property(zone_, key, value);
+ }
+
ObjectLiteral::Property* NewObjectLiteralProperty(bool is_getter,
FunctionLiteral* value,
int pos) {
ObjectLiteral::Property* prop =
- new(zone_) ObjectLiteral::Property(is_getter, value);
+ new(zone_) ObjectLiteral::Property(zone_, is_getter, value);
prop->set_key(NewLiteral(value->name(), pos));
return prop; // Not an AST node, will not be visited.
}
@@ -3132,7 +3187,7 @@ class AstNodeFactory V8_FINAL BASE_EMBEDDED {
int literal_index,
int pos) {
RegExpLiteral* lit =
- new(zone_) RegExpLiteral(isolate_, pattern, flags, literal_index, pos);
+ new(zone_) RegExpLiteral(zone_, pattern, flags, literal_index, pos);
VISIT_AND_RETURN(RegExpLiteral, lit);
}
@@ -3140,13 +3195,13 @@ class AstNodeFactory V8_FINAL BASE_EMBEDDED {
int literal_index,
int pos) {
ArrayLiteral* lit = new(zone_) ArrayLiteral(
- isolate_, values, literal_index, pos);
+ zone_, values, literal_index, pos);
VISIT_AND_RETURN(ArrayLiteral, lit)
}
VariableProxy* NewVariableProxy(Variable* var,
int pos = RelocInfo::kNoPosition) {
- VariableProxy* proxy = new(zone_) VariableProxy(isolate_, var, pos);
+ VariableProxy* proxy = new(zone_) VariableProxy(zone_, var, pos);
VISIT_AND_RETURN(VariableProxy, proxy)
}
@@ -3155,26 +3210,26 @@ class AstNodeFactory V8_FINAL BASE_EMBEDDED {
Interface* interface = Interface::NewValue(),
int position = RelocInfo::kNoPosition) {
VariableProxy* proxy =
- new(zone_) VariableProxy(isolate_, name, is_this, interface, position);
+ new(zone_) VariableProxy(zone_, name, is_this, interface, position);
VISIT_AND_RETURN(VariableProxy, proxy)
}
Property* NewProperty(Expression* obj, Expression* key, int pos) {
- Property* prop = new(zone_) Property(isolate_, obj, key, pos);
+ Property* prop = new(zone_) Property(zone_, obj, key, pos);
VISIT_AND_RETURN(Property, prop)
}
Call* NewCall(Expression* expression,
ZoneList<Expression*>* arguments,
int pos) {
- Call* call = new(zone_) Call(isolate_, expression, arguments, pos);
+ Call* call = new(zone_) Call(zone_, expression, arguments, pos);
VISIT_AND_RETURN(Call, call)
}
CallNew* NewCallNew(Expression* expression,
ZoneList<Expression*>* arguments,
int pos) {
- CallNew* call = new(zone_) CallNew(isolate_, expression, arguments, pos);
+ CallNew* call = new(zone_) CallNew(zone_, expression, arguments, pos);
VISIT_AND_RETURN(CallNew, call)
}
@@ -3183,7 +3238,7 @@ class AstNodeFactory V8_FINAL BASE_EMBEDDED {
ZoneList<Expression*>* arguments,
int pos) {
CallRuntime* call =
- new(zone_) CallRuntime(isolate_, name, function, arguments, pos);
+ new(zone_) CallRuntime(zone_, name, function, arguments, pos);
VISIT_AND_RETURN(CallRuntime, call)
}
@@ -3191,7 +3246,7 @@ class AstNodeFactory V8_FINAL BASE_EMBEDDED {
Expression* expression,
int pos) {
UnaryOperation* node =
- new(zone_) UnaryOperation(isolate_, op, expression, pos);
+ new(zone_) UnaryOperation(zone_, op, expression, pos);
VISIT_AND_RETURN(UnaryOperation, node)
}
@@ -3200,7 +3255,7 @@ class AstNodeFactory V8_FINAL BASE_EMBEDDED {
Expression* right,
int pos) {
BinaryOperation* node =
- new(zone_) BinaryOperation(isolate_, op, left, right, pos);
+ new(zone_) BinaryOperation(zone_, op, left, right, pos);
VISIT_AND_RETURN(BinaryOperation, node)
}
@@ -3209,7 +3264,7 @@ class AstNodeFactory V8_FINAL BASE_EMBEDDED {
Expression* expr,
int pos) {
CountOperation* node =
- new(zone_) CountOperation(isolate_, op, is_prefix, expr, pos);
+ new(zone_) CountOperation(zone_, op, is_prefix, expr, pos);
VISIT_AND_RETURN(CountOperation, node)
}
@@ -3218,7 +3273,7 @@ class AstNodeFactory V8_FINAL BASE_EMBEDDED {
Expression* right,
int pos) {
CompareOperation* node =
- new(zone_) CompareOperation(isolate_, op, left, right, pos);
+ new(zone_) CompareOperation(zone_, op, left, right, pos);
VISIT_AND_RETURN(CompareOperation, node)
}
@@ -3227,7 +3282,7 @@ class AstNodeFactory V8_FINAL BASE_EMBEDDED {
Expression* else_expression,
int position) {
Conditional* cond = new(zone_) Conditional(
- isolate_, condition, then_expression, else_expression, position);
+ zone_, condition, then_expression, else_expression, position);
VISIT_AND_RETURN(Conditional, cond)
}
@@ -3236,8 +3291,8 @@ class AstNodeFactory V8_FINAL BASE_EMBEDDED {
Expression* value,
int pos) {
Assignment* assign =
- new(zone_) Assignment(isolate_, op, target, value, pos);
- assign->Init(isolate_, this);
+ new(zone_) Assignment(zone_, op, target, value, pos);
+ assign->Init(zone_, this);
VISIT_AND_RETURN(Assignment, assign)
}
@@ -3246,12 +3301,12 @@ class AstNodeFactory V8_FINAL BASE_EMBEDDED {
Yield::Kind yield_kind,
int pos) {
Yield* yield = new(zone_) Yield(
- isolate_, generator_object, expression, yield_kind, pos);
+ zone_, generator_object, expression, yield_kind, pos);
VISIT_AND_RETURN(Yield, yield)
}
Throw* NewThrow(Expression* exception, int pos) {
- Throw* t = new(zone_) Throw(isolate_, exception, pos);
+ Throw* t = new(zone_) Throw(zone_, exception, pos);
VISIT_AND_RETURN(Throw, t)
}
@@ -3270,7 +3325,7 @@ class AstNodeFactory V8_FINAL BASE_EMBEDDED {
FunctionLiteral::IsGeneratorFlag is_generator,
int position) {
FunctionLiteral* lit = new(zone_) FunctionLiteral(
- isolate_, name, scope, body,
+ zone_, name, scope, body,
materialized_literal_count, expected_property_count, handler_count,
parameter_count, function_type, has_duplicate_parameters, is_function,
is_parenthesized, is_generator, position);
@@ -3284,19 +3339,18 @@ class AstNodeFactory V8_FINAL BASE_EMBEDDED {
NativeFunctionLiteral* NewNativeFunctionLiteral(
Handle<String> name, v8::Extension* extension, int pos) {
NativeFunctionLiteral* lit =
- new(zone_) NativeFunctionLiteral(isolate_, name, extension, pos);
+ new(zone_) NativeFunctionLiteral(zone_, name, extension, pos);
VISIT_AND_RETURN(NativeFunctionLiteral, lit)
}
ThisFunction* NewThisFunction(int pos) {
- ThisFunction* fun = new(zone_) ThisFunction(isolate_, pos);
+ ThisFunction* fun = new(zone_) ThisFunction(zone_, pos);
VISIT_AND_RETURN(ThisFunction, fun)
}
#undef VISIT_AND_RETURN
private:
- Isolate* isolate_;
Zone* zone_;
Visitor visitor_;
};
diff --git a/chromium/v8/src/atomicops_internals_arm_gcc.h b/chromium/v8/src/atomicops_internals_arm_gcc.h
deleted file mode 100644
index 6c30256d93d..00000000000
--- a/chromium/v8/src/atomicops_internals_arm_gcc.h
+++ /dev/null
@@ -1,145 +0,0 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// This file is an internal atomic implementation, use atomicops.h instead.
-//
-// LinuxKernelCmpxchg and Barrier_AtomicIncrement are from Google Gears.
-
-#ifndef V8_ATOMICOPS_INTERNALS_ARM_GCC_H_
-#define V8_ATOMICOPS_INTERNALS_ARM_GCC_H_
-
-namespace v8 {
-namespace internal {
-
-// 0xffff0fc0 is the hard coded address of a function provided by
-// the kernel which implements an atomic compare-exchange. On older
-// ARM architecture revisions (pre-v6) this may be implemented using
-// a syscall. This address is stable, and in active use (hard coded)
-// by at least glibc-2.7 and the Android C library.
-typedef Atomic32 (*LinuxKernelCmpxchgFunc)(Atomic32 old_value,
- Atomic32 new_value,
- volatile Atomic32* ptr);
-LinuxKernelCmpxchgFunc pLinuxKernelCmpxchg __attribute__((weak)) =
- (LinuxKernelCmpxchgFunc) 0xffff0fc0;
-
-typedef void (*LinuxKernelMemoryBarrierFunc)(void);
-LinuxKernelMemoryBarrierFunc pLinuxKernelMemoryBarrier __attribute__((weak)) =
- (LinuxKernelMemoryBarrierFunc) 0xffff0fa0;
-
-
-inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr,
- Atomic32 old_value,
- Atomic32 new_value) {
- Atomic32 prev_value = *ptr;
- do {
- if (!pLinuxKernelCmpxchg(old_value, new_value,
- const_cast<Atomic32*>(ptr))) {
- return old_value;
- }
- prev_value = *ptr;
- } while (prev_value == old_value);
- return prev_value;
-}
-
-inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr,
- Atomic32 new_value) {
- Atomic32 old_value;
- do {
- old_value = *ptr;
- } while (pLinuxKernelCmpxchg(old_value, new_value,
- const_cast<Atomic32*>(ptr)));
- return old_value;
-}
-
-inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr,
- Atomic32 increment) {
- return Barrier_AtomicIncrement(ptr, increment);
-}
-
-inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr,
- Atomic32 increment) {
- for (;;) {
- // Atomic exchange the old value with an incremented one.
- Atomic32 old_value = *ptr;
- Atomic32 new_value = old_value + increment;
- if (pLinuxKernelCmpxchg(old_value, new_value,
- const_cast<Atomic32*>(ptr)) == 0) {
- // The exchange took place as expected.
- return new_value;
- }
- // Otherwise, *ptr changed mid-loop and we need to retry.
- }
-}
-
-inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,
- Atomic32 old_value,
- Atomic32 new_value) {
- return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
-}
-
-inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr,
- Atomic32 old_value,
- Atomic32 new_value) {
- return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
-}
-
-inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) {
- *ptr = value;
-}
-
-inline void MemoryBarrier() {
- pLinuxKernelMemoryBarrier();
-}
-
-inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) {
- *ptr = value;
- MemoryBarrier();
-}
-
-inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) {
- MemoryBarrier();
- *ptr = value;
-}
-
-inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) {
- return *ptr;
-}
-
-inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) {
- Atomic32 value = *ptr;
- MemoryBarrier();
- return value;
-}
-
-inline Atomic32 Release_Load(volatile const Atomic32* ptr) {
- MemoryBarrier();
- return *ptr;
-}
-
-} } // namespace v8::internal
-
-#endif // V8_ATOMICOPS_INTERNALS_ARM_GCC_H_
diff --git a/chromium/v8/src/atomicops_internals_x86_macosx.h b/chromium/v8/src/atomicops_internals_x86_macosx.h
deleted file mode 100644
index bfb02b3851f..00000000000
--- a/chromium/v8/src/atomicops_internals_x86_macosx.h
+++ /dev/null
@@ -1,301 +0,0 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// This file is an internal atomic implementation, use atomicops.h instead.
-
-#ifndef V8_ATOMICOPS_INTERNALS_X86_MACOSX_H_
-#define V8_ATOMICOPS_INTERNALS_X86_MACOSX_H_
-
-#include <libkern/OSAtomic.h>
-
-namespace v8 {
-namespace internal {
-
-inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr,
- Atomic32 old_value,
- Atomic32 new_value) {
- Atomic32 prev_value;
- do {
- if (OSAtomicCompareAndSwap32(old_value, new_value,
- const_cast<Atomic32*>(ptr))) {
- return old_value;
- }
- prev_value = *ptr;
- } while (prev_value == old_value);
- return prev_value;
-}
-
-inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr,
- Atomic32 new_value) {
- Atomic32 old_value;
- do {
- old_value = *ptr;
- } while (!OSAtomicCompareAndSwap32(old_value, new_value,
- const_cast<Atomic32*>(ptr)));
- return old_value;
-}
-
-inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr,
- Atomic32 increment) {
- return OSAtomicAdd32(increment, const_cast<Atomic32*>(ptr));
-}
-
-inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr,
- Atomic32 increment) {
- return OSAtomicAdd32Barrier(increment, const_cast<Atomic32*>(ptr));
-}
-
-inline void MemoryBarrier() {
- OSMemoryBarrier();
-}
-
-inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,
- Atomic32 old_value,
- Atomic32 new_value) {
- Atomic32 prev_value;
- do {
- if (OSAtomicCompareAndSwap32Barrier(old_value, new_value,
- const_cast<Atomic32*>(ptr))) {
- return old_value;
- }
- prev_value = *ptr;
- } while (prev_value == old_value);
- return prev_value;
-}
-
-inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr,
- Atomic32 old_value,
- Atomic32 new_value) {
- return Acquire_CompareAndSwap(ptr, old_value, new_value);
-}
-
-inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) {
- *ptr = value;
-}
-
-inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) {
- *ptr = value;
- MemoryBarrier();
-}
-
-inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) {
- MemoryBarrier();
- *ptr = value;
-}
-
-inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) {
- return *ptr;
-}
-
-inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) {
- Atomic32 value = *ptr;
- MemoryBarrier();
- return value;
-}
-
-inline Atomic32 Release_Load(volatile const Atomic32* ptr) {
- MemoryBarrier();
- return *ptr;
-}
-
-#ifdef __LP64__
-
-// 64-bit implementation on 64-bit platform
-
-inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr,
- Atomic64 old_value,
- Atomic64 new_value) {
- Atomic64 prev_value;
- do {
- if (OSAtomicCompareAndSwap64(old_value, new_value,
- const_cast<Atomic64*>(ptr))) {
- return old_value;
- }
- prev_value = *ptr;
- } while (prev_value == old_value);
- return prev_value;
-}
-
-inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr,
- Atomic64 new_value) {
- Atomic64 old_value;
- do {
- old_value = *ptr;
- } while (!OSAtomicCompareAndSwap64(old_value, new_value,
- const_cast<Atomic64*>(ptr)));
- return old_value;
-}
-
-inline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64* ptr,
- Atomic64 increment) {
- return OSAtomicAdd64(increment, const_cast<Atomic64*>(ptr));
-}
-
-inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr,
- Atomic64 increment) {
- return OSAtomicAdd64Barrier(increment, const_cast<Atomic64*>(ptr));
-}
-
-inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr,
- Atomic64 old_value,
- Atomic64 new_value) {
- Atomic64 prev_value;
- do {
- if (OSAtomicCompareAndSwap64Barrier(old_value, new_value,
- const_cast<Atomic64*>(ptr))) {
- return old_value;
- }
- prev_value = *ptr;
- } while (prev_value == old_value);
- return prev_value;
-}
-
-inline Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr,
- Atomic64 old_value,
- Atomic64 new_value) {
- // The lib kern interface does not distinguish between
- // Acquire and Release memory barriers; they are equivalent.
- return Acquire_CompareAndSwap(ptr, old_value, new_value);
-}
-
-inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) {
- *ptr = value;
-}
-
-inline void Acquire_Store(volatile Atomic64* ptr, Atomic64 value) {
- *ptr = value;
- MemoryBarrier();
-}
-
-inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) {
- MemoryBarrier();
- *ptr = value;
-}
-
-inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) {
- return *ptr;
-}
-
-inline Atomic64 Acquire_Load(volatile const Atomic64* ptr) {
- Atomic64 value = *ptr;
- MemoryBarrier();
- return value;
-}
-
-inline Atomic64 Release_Load(volatile const Atomic64* ptr) {
- MemoryBarrier();
- return *ptr;
-}
-
-#endif // defined(__LP64__)
-
-// MacOS uses long for intptr_t, AtomicWord and Atomic32 are always different
-// on the Mac, even when they are the same size. We need to explicitly cast
-// from AtomicWord to Atomic32/64 to implement the AtomicWord interface.
-#ifdef __LP64__
-#define AtomicWordCastType Atomic64
-#else
-#define AtomicWordCastType Atomic32
-#endif
-
-inline AtomicWord NoBarrier_CompareAndSwap(volatile AtomicWord* ptr,
- AtomicWord old_value,
- AtomicWord new_value) {
- return NoBarrier_CompareAndSwap(
- reinterpret_cast<volatile AtomicWordCastType*>(ptr),
- old_value, new_value);
-}
-
-inline AtomicWord NoBarrier_AtomicExchange(volatile AtomicWord* ptr,
- AtomicWord new_value) {
- return NoBarrier_AtomicExchange(
- reinterpret_cast<volatile AtomicWordCastType*>(ptr), new_value);
-}
-
-inline AtomicWord NoBarrier_AtomicIncrement(volatile AtomicWord* ptr,
- AtomicWord increment) {
- return NoBarrier_AtomicIncrement(
- reinterpret_cast<volatile AtomicWordCastType*>(ptr), increment);
-}
-
-inline AtomicWord Barrier_AtomicIncrement(volatile AtomicWord* ptr,
- AtomicWord increment) {
- return Barrier_AtomicIncrement(
- reinterpret_cast<volatile AtomicWordCastType*>(ptr), increment);
-}
-
-inline AtomicWord Acquire_CompareAndSwap(volatile AtomicWord* ptr,
- AtomicWord old_value,
- AtomicWord new_value) {
- return v8::internal::Acquire_CompareAndSwap(
- reinterpret_cast<volatile AtomicWordCastType*>(ptr),
- old_value, new_value);
-}
-
-inline AtomicWord Release_CompareAndSwap(volatile AtomicWord* ptr,
- AtomicWord old_value,
- AtomicWord new_value) {
- return v8::internal::Release_CompareAndSwap(
- reinterpret_cast<volatile AtomicWordCastType*>(ptr),
- old_value, new_value);
-}
-
-inline void NoBarrier_Store(volatile AtomicWord* ptr, AtomicWord value) {
- NoBarrier_Store(
- reinterpret_cast<volatile AtomicWordCastType*>(ptr), value);
-}
-
-inline void Acquire_Store(volatile AtomicWord* ptr, AtomicWord value) {
- return v8::internal::Acquire_Store(
- reinterpret_cast<volatile AtomicWordCastType*>(ptr), value);
-}
-
-inline void Release_Store(volatile AtomicWord* ptr, AtomicWord value) {
- return v8::internal::Release_Store(
- reinterpret_cast<volatile AtomicWordCastType*>(ptr), value);
-}
-
-inline AtomicWord NoBarrier_Load(volatile const AtomicWord* ptr) {
- return NoBarrier_Load(
- reinterpret_cast<volatile const AtomicWordCastType*>(ptr));
-}
-
-inline AtomicWord Acquire_Load(volatile const AtomicWord* ptr) {
- return v8::internal::Acquire_Load(
- reinterpret_cast<volatile const AtomicWordCastType*>(ptr));
-}
-
-inline AtomicWord Release_Load(volatile const AtomicWord* ptr) {
- return v8::internal::Release_Load(
- reinterpret_cast<volatile const AtomicWordCastType*>(ptr));
-}
-
-#undef AtomicWordCastType
-
-} } // namespace v8::internal
-
-#endif // V8_ATOMICOPS_INTERNALS_X86_MACOSX_H_
diff --git a/chromium/v8/src/base/DEPS b/chromium/v8/src/base/DEPS
new file mode 100644
index 00000000000..65480302438
--- /dev/null
+++ b/chromium/v8/src/base/DEPS
@@ -0,0 +1,4 @@
+include_rules = [
+ "-src",
+ "+src/base",
+]
diff --git a/chromium/v8/src/atomicops.h b/chromium/v8/src/base/atomicops.h
index 789721edfc6..b26fc4c98b9 100644
--- a/chromium/v8/src/atomicops.h
+++ b/chromium/v8/src/base/atomicops.h
@@ -1,29 +1,6 @@
// Copyright 2010 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
// The routines exported by this module are subtle. If you use them, even if
// you get the code right, it will depend on careful reasoning about atomicity
@@ -45,22 +22,30 @@
// to use these.
//
-#ifndef V8_ATOMICOPS_H_
-#define V8_ATOMICOPS_H_
+#ifndef V8_BASE_ATOMICOPS_H_
+#define V8_BASE_ATOMICOPS_H_
-#include "../include/v8.h"
-#include "globals.h"
+#include "include/v8.h"
+#include "src/base/build_config.h"
+
+#if defined(_WIN32) && defined(V8_HOST_ARCH_64_BIT)
+// windows.h #defines this (only on x64). This causes problems because the
+// public API also uses MemoryBarrier at the public name for this fence. So, on
+// X64, undef it, and call its documented
+// (http://msdn.microsoft.com/en-us/library/windows/desktop/ms684208.aspx)
+// implementation directly.
+#undef MemoryBarrier
+#endif
namespace v8 {
-namespace internal {
+namespace base {
+typedef char Atomic8;
typedef int32_t Atomic32;
#ifdef V8_HOST_ARCH_64_BIT
// We need to be able to go between Atomic64 and AtomicWord implicitly. This
// means Atomic64 and AtomicWord should be the same type on 64-bit.
-#if defined(__ILP32__) || defined(__APPLE__)
-// MacOS is an exception to the implicit conversion rule above,
-// because it uses long for intptr_t.
+#if defined(__ILP32__)
typedef int64_t Atomic64;
#else
typedef intptr_t Atomic64;
@@ -69,11 +54,7 @@ typedef intptr_t Atomic64;
// Use AtomicWord for a machine-sized pointer. It will use the Atomic32 or
// Atomic64 routines below, depending on your architecture.
-#if defined(__OpenBSD__) && defined(__i386__)
-typedef Atomic32 AtomicWord;
-#else
typedef intptr_t AtomicWord;
-#endif
// Atomically execute:
// result = *ptr;
@@ -117,10 +98,12 @@ Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr,
Atomic32 new_value);
void MemoryBarrier();
+void NoBarrier_Store(volatile Atomic8* ptr, Atomic8 value);
void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value);
void Acquire_Store(volatile Atomic32* ptr, Atomic32 value);
void Release_Store(volatile Atomic32* ptr, Atomic32 value);
+Atomic8 NoBarrier_Load(volatile const Atomic8* ptr);
Atomic32 NoBarrier_Load(volatile const Atomic32* ptr);
Atomic32 Acquire_Load(volatile const Atomic32* ptr);
Atomic32 Release_Load(volatile const Atomic32* ptr);
@@ -148,23 +131,31 @@ Atomic64 Acquire_Load(volatile const Atomic64* ptr);
Atomic64 Release_Load(volatile const Atomic64* ptr);
#endif // V8_HOST_ARCH_64_BIT
-} } // namespace v8::internal
+} } // namespace v8::base
// Include our platform specific implementation.
#if defined(THREAD_SANITIZER)
-#include "atomicops_internals_tsan.h"
+#include "src/base/atomicops_internals_tsan.h"
#elif defined(_MSC_VER) && (V8_HOST_ARCH_IA32 || V8_HOST_ARCH_X64)
-#include "atomicops_internals_x86_msvc.h"
-#elif defined(__APPLE__) && (V8_HOST_ARCH_IA32 || V8_HOST_ARCH_X64)
-#include "atomicops_internals_x86_macosx.h"
-#elif defined(__GNUC__) && (V8_HOST_ARCH_IA32 || V8_HOST_ARCH_X64)
-#include "atomicops_internals_x86_gcc.h"
+#include "src/base/atomicops_internals_x86_msvc.h"
+#elif defined(__APPLE__)
+#include "src/base/atomicops_internals_mac.h"
+#elif defined(__GNUC__) && V8_HOST_ARCH_ARM64
+#include "src/base/atomicops_internals_arm64_gcc.h"
#elif defined(__GNUC__) && V8_HOST_ARCH_ARM
-#include "atomicops_internals_arm_gcc.h"
+#include "src/base/atomicops_internals_arm_gcc.h"
+#elif defined(__GNUC__) && (V8_HOST_ARCH_IA32 || V8_HOST_ARCH_X64)
+#include "src/base/atomicops_internals_x86_gcc.h"
#elif defined(__GNUC__) && V8_HOST_ARCH_MIPS
-#include "atomicops_internals_mips_gcc.h"
+#include "src/base/atomicops_internals_mips_gcc.h"
#else
#error "Atomic operations are not supported on your platform"
#endif
-#endif // V8_ATOMICOPS_H_
+// On some platforms we need additional declarations to make
+// AtomicWord compatible with our other Atomic* types.
+#if defined(__APPLE__) || defined(__OpenBSD__)
+#include "src/base/atomicops_internals_atomicword_compat.h"
+#endif
+
+#endif // V8_BASE_ATOMICOPS_H_
diff --git a/chromium/v8/src/base/atomicops_internals_arm64_gcc.h b/chromium/v8/src/base/atomicops_internals_arm64_gcc.h
new file mode 100644
index 00000000000..b01783e6a7e
--- /dev/null
+++ b/chromium/v8/src/base/atomicops_internals_arm64_gcc.h
@@ -0,0 +1,316 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This file is an internal atomic implementation, use atomicops.h instead.
+
+#ifndef V8_BASE_ATOMICOPS_INTERNALS_ARM_GCC_H_
+#define V8_BASE_ATOMICOPS_INTERNALS_ARM_GCC_H_
+
+namespace v8 {
+namespace base {
+
+inline void MemoryBarrier() {
+ __asm__ __volatile__ ("dmb ish" ::: "memory"); // NOLINT
+}
+
+// NoBarrier versions of the operation include "memory" in the clobber list.
+// This is not required for direct usage of the NoBarrier versions of the
+// operations. However this is required for correctness when they are used as
+// part of the Acquire or Release versions, to ensure that nothing from outside
+// the call is reordered between the operation and the memory barrier. This does
+// not change the code generated, so has no or minimal impact on the
+// NoBarrier operations.
+
+inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr,
+ Atomic32 old_value,
+ Atomic32 new_value) {
+ Atomic32 prev;
+ int32_t temp;
+
+ __asm__ __volatile__ ( // NOLINT
+ "0: \n\t"
+ "ldxr %w[prev], %[ptr] \n\t" // Load the previous value.
+ "cmp %w[prev], %w[old_value] \n\t"
+ "bne 1f \n\t"
+ "stxr %w[temp], %w[new_value], %[ptr] \n\t" // Try to store the new value.
+ "cbnz %w[temp], 0b \n\t" // Retry if it did not work.
+ "1: \n\t"
+ : [prev]"=&r" (prev),
+ [temp]"=&r" (temp),
+ [ptr]"+Q" (*ptr)
+ : [old_value]"IJr" (old_value),
+ [new_value]"r" (new_value)
+ : "cc", "memory"
+ ); // NOLINT
+
+ return prev;
+}
+
+inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr,
+ Atomic32 new_value) {
+ Atomic32 result;
+ int32_t temp;
+
+ __asm__ __volatile__ ( // NOLINT
+ "0: \n\t"
+ "ldxr %w[result], %[ptr] \n\t" // Load the previous value.
+ "stxr %w[temp], %w[new_value], %[ptr] \n\t" // Try to store the new value.
+ "cbnz %w[temp], 0b \n\t" // Retry if it did not work.
+ : [result]"=&r" (result),
+ [temp]"=&r" (temp),
+ [ptr]"+Q" (*ptr)
+ : [new_value]"r" (new_value)
+ : "memory"
+ ); // NOLINT
+
+ return result;
+}
+
+inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr,
+ Atomic32 increment) {
+ Atomic32 result;
+ int32_t temp;
+
+ __asm__ __volatile__ ( // NOLINT
+ "0: \n\t"
+ "ldxr %w[result], %[ptr] \n\t" // Load the previous value.
+ "add %w[result], %w[result], %w[increment]\n\t"
+ "stxr %w[temp], %w[result], %[ptr] \n\t" // Try to store the result.
+ "cbnz %w[temp], 0b \n\t" // Retry on failure.
+ : [result]"=&r" (result),
+ [temp]"=&r" (temp),
+ [ptr]"+Q" (*ptr)
+ : [increment]"IJr" (increment)
+ : "memory"
+ ); // NOLINT
+
+ return result;
+}
+
+inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr,
+ Atomic32 increment) {
+ Atomic32 result;
+
+ MemoryBarrier();
+ result = NoBarrier_AtomicIncrement(ptr, increment);
+ MemoryBarrier();
+
+ return result;
+}
+
+inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,
+ Atomic32 old_value,
+ Atomic32 new_value) {
+ Atomic32 prev;
+
+ prev = NoBarrier_CompareAndSwap(ptr, old_value, new_value);
+ MemoryBarrier();
+
+ return prev;
+}
+
+inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr,
+ Atomic32 old_value,
+ Atomic32 new_value) {
+ Atomic32 prev;
+
+ MemoryBarrier();
+ prev = NoBarrier_CompareAndSwap(ptr, old_value, new_value);
+
+ return prev;
+}
+
+inline void NoBarrier_Store(volatile Atomic8* ptr, Atomic8 value) {
+ *ptr = value;
+}
+
+inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) {
+ *ptr = value;
+}
+
+inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) {
+ *ptr = value;
+ MemoryBarrier();
+}
+
+inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) {
+ __asm__ __volatile__ ( // NOLINT
+ "stlr %w[value], %[ptr] \n\t"
+ : [ptr]"=Q" (*ptr)
+ : [value]"r" (value)
+ : "memory"
+ ); // NOLINT
+}
+
+inline Atomic8 NoBarrier_Load(volatile const Atomic8* ptr) {
+ return *ptr;
+}
+
+inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) {
+ return *ptr;
+}
+
+inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) {
+ Atomic32 value;
+
+ __asm__ __volatile__ ( // NOLINT
+ "ldar %w[value], %[ptr] \n\t"
+ : [value]"=r" (value)
+ : [ptr]"Q" (*ptr)
+ : "memory"
+ ); // NOLINT
+
+ return value;
+}
+
+inline Atomic32 Release_Load(volatile const Atomic32* ptr) {
+ MemoryBarrier();
+ return *ptr;
+}
+
+// 64-bit versions of the operations.
+// See the 32-bit versions for comments.
+
+inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr,
+ Atomic64 old_value,
+ Atomic64 new_value) {
+ Atomic64 prev;
+ int32_t temp;
+
+ __asm__ __volatile__ ( // NOLINT
+ "0: \n\t"
+ "ldxr %[prev], %[ptr] \n\t"
+ "cmp %[prev], %[old_value] \n\t"
+ "bne 1f \n\t"
+ "stxr %w[temp], %[new_value], %[ptr] \n\t"
+ "cbnz %w[temp], 0b \n\t"
+ "1: \n\t"
+ : [prev]"=&r" (prev),
+ [temp]"=&r" (temp),
+ [ptr]"+Q" (*ptr)
+ : [old_value]"IJr" (old_value),
+ [new_value]"r" (new_value)
+ : "cc", "memory"
+ ); // NOLINT
+
+ return prev;
+}
+
+inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr,
+ Atomic64 new_value) {
+ Atomic64 result;
+ int32_t temp;
+
+ __asm__ __volatile__ ( // NOLINT
+ "0: \n\t"
+ "ldxr %[result], %[ptr] \n\t"
+ "stxr %w[temp], %[new_value], %[ptr] \n\t"
+ "cbnz %w[temp], 0b \n\t"
+ : [result]"=&r" (result),
+ [temp]"=&r" (temp),
+ [ptr]"+Q" (*ptr)
+ : [new_value]"r" (new_value)
+ : "memory"
+ ); // NOLINT
+
+ return result;
+}
+
+inline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64* ptr,
+ Atomic64 increment) {
+ Atomic64 result;
+ int32_t temp;
+
+ __asm__ __volatile__ ( // NOLINT
+ "0: \n\t"
+ "ldxr %[result], %[ptr] \n\t"
+ "add %[result], %[result], %[increment] \n\t"
+ "stxr %w[temp], %[result], %[ptr] \n\t"
+ "cbnz %w[temp], 0b \n\t"
+ : [result]"=&r" (result),
+ [temp]"=&r" (temp),
+ [ptr]"+Q" (*ptr)
+ : [increment]"IJr" (increment)
+ : "memory"
+ ); // NOLINT
+
+ return result;
+}
+
+inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr,
+ Atomic64 increment) {
+ Atomic64 result;
+
+ MemoryBarrier();
+ result = NoBarrier_AtomicIncrement(ptr, increment);
+ MemoryBarrier();
+
+ return result;
+}
+
+inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr,
+ Atomic64 old_value,
+ Atomic64 new_value) {
+ Atomic64 prev;
+
+ prev = NoBarrier_CompareAndSwap(ptr, old_value, new_value);
+ MemoryBarrier();
+
+ return prev;
+}
+
+inline Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr,
+ Atomic64 old_value,
+ Atomic64 new_value) {
+ Atomic64 prev;
+
+ MemoryBarrier();
+ prev = NoBarrier_CompareAndSwap(ptr, old_value, new_value);
+
+ return prev;
+}
+
+inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) {
+ *ptr = value;
+}
+
+inline void Acquire_Store(volatile Atomic64* ptr, Atomic64 value) {
+ *ptr = value;
+ MemoryBarrier();
+}
+
+inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) {
+ __asm__ __volatile__ ( // NOLINT
+ "stlr %x[value], %[ptr] \n\t"
+ : [ptr]"=Q" (*ptr)
+ : [value]"r" (value)
+ : "memory"
+ ); // NOLINT
+}
+
+inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) {
+ return *ptr;
+}
+
+inline Atomic64 Acquire_Load(volatile const Atomic64* ptr) {
+ Atomic64 value;
+
+ __asm__ __volatile__ ( // NOLINT
+ "ldar %x[value], %[ptr] \n\t"
+ : [value]"=r" (value)
+ : [ptr]"Q" (*ptr)
+ : "memory"
+ ); // NOLINT
+
+ return value;
+}
+
+inline Atomic64 Release_Load(volatile const Atomic64* ptr) {
+ MemoryBarrier();
+ return *ptr;
+}
+
+} } // namespace v8::base
+
+#endif // V8_BASE_ATOMICOPS_INTERNALS_ARM_GCC_H_
diff --git a/chromium/v8/src/base/atomicops_internals_arm_gcc.h b/chromium/v8/src/base/atomicops_internals_arm_gcc.h
new file mode 100644
index 00000000000..069b1ffa883
--- /dev/null
+++ b/chromium/v8/src/base/atomicops_internals_arm_gcc.h
@@ -0,0 +1,301 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This file is an internal atomic implementation, use atomicops.h instead.
+//
+// LinuxKernelCmpxchg and Barrier_AtomicIncrement are from Google Gears.
+
+#ifndef V8_BASE_ATOMICOPS_INTERNALS_ARM_GCC_H_
+#define V8_BASE_ATOMICOPS_INTERNALS_ARM_GCC_H_
+
+#if defined(__QNXNTO__)
+#include <sys/cpuinline.h>
+#endif
+
+namespace v8 {
+namespace base {
+
+// Memory barriers on ARM are funky, but the kernel is here to help:
+//
+// * ARMv5 didn't support SMP, there is no memory barrier instruction at
+// all on this architecture, or when targeting its machine code.
+//
+// * Some ARMv6 CPUs support SMP. A full memory barrier can be produced by
+// writing a random value to a very specific coprocessor register.
+//
+// * On ARMv7, the "dmb" instruction is used to perform a full memory
+// barrier (though writing to the co-processor will still work).
+// However, on single core devices (e.g. Nexus One, or Nexus S),
+// this instruction will take up to 200 ns, which is huge, even though
+// it's completely un-needed on these devices.
+//
+// * There is no easy way to determine at runtime if the device is
+// single or multi-core. However, the kernel provides a useful helper
+// function at a fixed memory address (0xffff0fa0), which will always
+// perform a memory barrier in the most efficient way. I.e. on single
+// core devices, this is an empty function that exits immediately.
+// On multi-core devices, it implements a full memory barrier.
+//
+// * This source could be compiled to ARMv5 machine code that runs on a
+// multi-core ARMv6 or ARMv7 device. In this case, memory barriers
+// are needed for correct execution. Always call the kernel helper, even
+// when targeting ARMv5TE.
+//
+
+inline void MemoryBarrier() {
+#if defined(__linux__) || defined(__ANDROID__)
+ // Note: This is a function call, which is also an implicit compiler barrier.
+ typedef void (*KernelMemoryBarrierFunc)();
+ ((KernelMemoryBarrierFunc)0xffff0fa0)();
+#elif defined(__QNXNTO__)
+ __cpu_membarrier();
+#else
+#error MemoryBarrier() is not implemented on this platform.
+#endif
+}
+
+// An ARM toolchain would only define one of these depending on which
+// variant of the target architecture is being used. This tests against
+// any known ARMv6 or ARMv7 variant, where it is possible to directly
+// use ldrex/strex instructions to implement fast atomic operations.
+#if defined(__ARM_ARCH_7__) || defined(__ARM_ARCH_7A__) || \
+ defined(__ARM_ARCH_7R__) || defined(__ARM_ARCH_7M__) || \
+ defined(__ARM_ARCH_6__) || defined(__ARM_ARCH_6J__) || \
+ defined(__ARM_ARCH_6K__) || defined(__ARM_ARCH_6Z__) || \
+ defined(__ARM_ARCH_6KZ__) || defined(__ARM_ARCH_6T2__)
+
+inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr,
+ Atomic32 old_value,
+ Atomic32 new_value) {
+ Atomic32 prev_value;
+ int reloop;
+ do {
+ // The following is equivalent to:
+ //
+ // prev_value = LDREX(ptr)
+ // reloop = 0
+ // if (prev_value != old_value)
+ // reloop = STREX(ptr, new_value)
+ __asm__ __volatile__(" ldrex %0, [%3]\n"
+ " mov %1, #0\n"
+ " cmp %0, %4\n"
+#ifdef __thumb2__
+ " it eq\n"
+#endif
+ " strexeq %1, %5, [%3]\n"
+ : "=&r"(prev_value), "=&r"(reloop), "+m"(*ptr)
+ : "r"(ptr), "r"(old_value), "r"(new_value)
+ : "cc", "memory");
+ } while (reloop != 0);
+ return prev_value;
+}
+
+inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,
+ Atomic32 old_value,
+ Atomic32 new_value) {
+ Atomic32 result = NoBarrier_CompareAndSwap(ptr, old_value, new_value);
+ MemoryBarrier();
+ return result;
+}
+
+inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr,
+ Atomic32 old_value,
+ Atomic32 new_value) {
+ MemoryBarrier();
+ return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
+}
+
+inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr,
+ Atomic32 increment) {
+ Atomic32 value;
+ int reloop;
+ do {
+ // Equivalent to:
+ //
+ // value = LDREX(ptr)
+ // value += increment
+ // reloop = STREX(ptr, value)
+ //
+ __asm__ __volatile__(" ldrex %0, [%3]\n"
+ " add %0, %0, %4\n"
+ " strex %1, %0, [%3]\n"
+ : "=&r"(value), "=&r"(reloop), "+m"(*ptr)
+ : "r"(ptr), "r"(increment)
+ : "cc", "memory");
+ } while (reloop);
+ return value;
+}
+
+inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr,
+ Atomic32 increment) {
+ // TODO(digit): Investigate if it's possible to implement this with
+ // a single MemoryBarrier() operation between the LDREX and STREX.
+ // See http://crbug.com/246514
+ MemoryBarrier();
+ Atomic32 result = NoBarrier_AtomicIncrement(ptr, increment);
+ MemoryBarrier();
+ return result;
+}
+
+inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr,
+ Atomic32 new_value) {
+ Atomic32 old_value;
+ int reloop;
+ do {
+ // old_value = LDREX(ptr)
+ // reloop = STREX(ptr, new_value)
+ __asm__ __volatile__(" ldrex %0, [%3]\n"
+ " strex %1, %4, [%3]\n"
+ : "=&r"(old_value), "=&r"(reloop), "+m"(*ptr)
+ : "r"(ptr), "r"(new_value)
+ : "cc", "memory");
+ } while (reloop != 0);
+ return old_value;
+}
+
+// This tests against any known ARMv5 variant.
+#elif defined(__ARM_ARCH_5__) || defined(__ARM_ARCH_5T__) || \
+ defined(__ARM_ARCH_5TE__) || defined(__ARM_ARCH_5TEJ__)
+
+// The kernel also provides a helper function to perform an atomic
+// compare-and-swap operation at the hard-wired address 0xffff0fc0.
+// On ARMv5, this is implemented by a special code path that the kernel
+// detects and treats specially when thread pre-emption happens.
+// On ARMv6 and higher, it uses LDREX/STREX instructions instead.
+//
+// Note that this always perform a full memory barrier, there is no
+// need to add calls MemoryBarrier() before or after it. It also
+// returns 0 on success, and 1 on exit.
+//
+// Available and reliable since Linux 2.6.24. Both Android and ChromeOS
+// use newer kernel revisions, so this should not be a concern.
+namespace {
+
+inline int LinuxKernelCmpxchg(Atomic32 old_value,
+ Atomic32 new_value,
+ volatile Atomic32* ptr) {
+ typedef int (*KernelCmpxchgFunc)(Atomic32, Atomic32, volatile Atomic32*);
+ return ((KernelCmpxchgFunc)0xffff0fc0)(old_value, new_value, ptr);
+}
+
+} // namespace
+
+inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr,
+ Atomic32 old_value,
+ Atomic32 new_value) {
+ Atomic32 prev_value;
+ for (;;) {
+ prev_value = *ptr;
+ if (prev_value != old_value)
+ return prev_value;
+ if (!LinuxKernelCmpxchg(old_value, new_value, ptr))
+ return old_value;
+ }
+}
+
+inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr,
+ Atomic32 new_value) {
+ Atomic32 old_value;
+ do {
+ old_value = *ptr;
+ } while (LinuxKernelCmpxchg(old_value, new_value, ptr));
+ return old_value;
+}
+
+inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr,
+ Atomic32 increment) {
+ return Barrier_AtomicIncrement(ptr, increment);
+}
+
+inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr,
+ Atomic32 increment) {
+ for (;;) {
+ // Atomic exchange the old value with an incremented one.
+ Atomic32 old_value = *ptr;
+ Atomic32 new_value = old_value + increment;
+ if (!LinuxKernelCmpxchg(old_value, new_value, ptr)) {
+ // The exchange took place as expected.
+ return new_value;
+ }
+ // Otherwise, *ptr changed mid-loop and we need to retry.
+ }
+}
+
+inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,
+ Atomic32 old_value,
+ Atomic32 new_value) {
+ Atomic32 prev_value;
+ for (;;) {
+ prev_value = *ptr;
+ if (prev_value != old_value) {
+ // Always ensure acquire semantics.
+ MemoryBarrier();
+ return prev_value;
+ }
+ if (!LinuxKernelCmpxchg(old_value, new_value, ptr))
+ return old_value;
+ }
+}
+
+inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr,
+ Atomic32 old_value,
+ Atomic32 new_value) {
+ // This could be implemented as:
+ // MemoryBarrier();
+ // return NoBarrier_CompareAndSwap();
+ //
+ // But would use 3 barriers per succesful CAS. To save performance,
+ // use Acquire_CompareAndSwap(). Its implementation guarantees that:
+ // - A succesful swap uses only 2 barriers (in the kernel helper).
+ // - An early return due to (prev_value != old_value) performs
+ // a memory barrier with no store, which is equivalent to the
+ // generic implementation above.
+ return Acquire_CompareAndSwap(ptr, old_value, new_value);
+}
+
+#else
+# error "Your CPU's ARM architecture is not supported yet"
+#endif
+
+// NOTE: Atomicity of the following load and store operations is only
+// guaranteed in case of 32-bit alignement of |ptr| values.
+
+inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) {
+ *ptr = value;
+}
+
+inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) {
+ *ptr = value;
+ MemoryBarrier();
+}
+
+inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) {
+ MemoryBarrier();
+ *ptr = value;
+}
+
+inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) { return *ptr; }
+
+inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) {
+ Atomic32 value = *ptr;
+ MemoryBarrier();
+ return value;
+}
+
+inline Atomic32 Release_Load(volatile const Atomic32* ptr) {
+ MemoryBarrier();
+ return *ptr;
+}
+
+// Byte accessors.
+
+inline void NoBarrier_Store(volatile Atomic8* ptr, Atomic8 value) {
+ *ptr = value;
+}
+
+inline Atomic8 NoBarrier_Load(volatile const Atomic8* ptr) { return *ptr; }
+
+} } // namespace v8::base
+
+#endif // V8_BASE_ATOMICOPS_INTERNALS_ARM_GCC_H_
diff --git a/chromium/v8/src/base/atomicops_internals_atomicword_compat.h b/chromium/v8/src/base/atomicops_internals_atomicword_compat.h
new file mode 100644
index 00000000000..0530ced2a44
--- /dev/null
+++ b/chromium/v8/src/base/atomicops_internals_atomicword_compat.h
@@ -0,0 +1,99 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This file is an internal atomic implementation, use atomicops.h instead.
+
+#ifndef V8_BASE_ATOMICOPS_INTERNALS_ATOMICWORD_COMPAT_H_
+#define V8_BASE_ATOMICOPS_INTERNALS_ATOMICWORD_COMPAT_H_
+
+// AtomicWord is a synonym for intptr_t, and Atomic32 is a synonym for int32,
+// which in turn means int. On some LP32 platforms, intptr_t is an int, but
+// on others, it's a long. When AtomicWord and Atomic32 are based on different
+// fundamental types, their pointers are incompatible.
+//
+// This file defines function overloads to allow both AtomicWord and Atomic32
+// data to be used with this interface.
+//
+// On LP64 platforms, AtomicWord and Atomic64 are both always long,
+// so this problem doesn't occur.
+
+#if !defined(V8_HOST_ARCH_64_BIT)
+
+namespace v8 {
+namespace base {
+
+inline AtomicWord NoBarrier_CompareAndSwap(volatile AtomicWord* ptr,
+ AtomicWord old_value,
+ AtomicWord new_value) {
+ return NoBarrier_CompareAndSwap(
+ reinterpret_cast<volatile Atomic32*>(ptr), old_value, new_value);
+}
+
+inline AtomicWord NoBarrier_AtomicExchange(volatile AtomicWord* ptr,
+ AtomicWord new_value) {
+ return NoBarrier_AtomicExchange(
+ reinterpret_cast<volatile Atomic32*>(ptr), new_value);
+}
+
+inline AtomicWord NoBarrier_AtomicIncrement(volatile AtomicWord* ptr,
+ AtomicWord increment) {
+ return NoBarrier_AtomicIncrement(
+ reinterpret_cast<volatile Atomic32*>(ptr), increment);
+}
+
+inline AtomicWord Barrier_AtomicIncrement(volatile AtomicWord* ptr,
+ AtomicWord increment) {
+ return Barrier_AtomicIncrement(
+ reinterpret_cast<volatile Atomic32*>(ptr), increment);
+}
+
+inline AtomicWord Acquire_CompareAndSwap(volatile AtomicWord* ptr,
+ AtomicWord old_value,
+ AtomicWord new_value) {
+ return v8::base::Acquire_CompareAndSwap(
+ reinterpret_cast<volatile Atomic32*>(ptr), old_value, new_value);
+}
+
+inline AtomicWord Release_CompareAndSwap(volatile AtomicWord* ptr,
+ AtomicWord old_value,
+ AtomicWord new_value) {
+ return v8::base::Release_CompareAndSwap(
+ reinterpret_cast<volatile Atomic32*>(ptr), old_value, new_value);
+}
+
+inline void NoBarrier_Store(volatile AtomicWord *ptr, AtomicWord value) {
+ NoBarrier_Store(
+ reinterpret_cast<volatile Atomic32*>(ptr), value);
+}
+
+inline void Acquire_Store(volatile AtomicWord* ptr, AtomicWord value) {
+ return v8::base::Acquire_Store(
+ reinterpret_cast<volatile Atomic32*>(ptr), value);
+}
+
+inline void Release_Store(volatile AtomicWord* ptr, AtomicWord value) {
+ return v8::base::Release_Store(
+ reinterpret_cast<volatile Atomic32*>(ptr), value);
+}
+
+inline AtomicWord NoBarrier_Load(volatile const AtomicWord *ptr) {
+ return NoBarrier_Load(
+ reinterpret_cast<volatile const Atomic32*>(ptr));
+}
+
+inline AtomicWord Acquire_Load(volatile const AtomicWord* ptr) {
+ return v8::base::Acquire_Load(
+ reinterpret_cast<volatile const Atomic32*>(ptr));
+}
+
+inline AtomicWord Release_Load(volatile const AtomicWord* ptr) {
+ return v8::base::Release_Load(
+ reinterpret_cast<volatile const Atomic32*>(ptr));
+}
+
+} } // namespace v8::base
+
+#endif // !defined(V8_HOST_ARCH_64_BIT)
+
+#endif // V8_BASE_ATOMICOPS_INTERNALS_ATOMICWORD_COMPAT_H_
diff --git a/chromium/v8/src/base/atomicops_internals_mac.h b/chromium/v8/src/base/atomicops_internals_mac.h
new file mode 100644
index 00000000000..a046872e4d0
--- /dev/null
+++ b/chromium/v8/src/base/atomicops_internals_mac.h
@@ -0,0 +1,204 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This file is an internal atomic implementation, use atomicops.h instead.
+
+#ifndef V8_BASE_ATOMICOPS_INTERNALS_MAC_H_
+#define V8_BASE_ATOMICOPS_INTERNALS_MAC_H_
+
+#include <libkern/OSAtomic.h>
+
+namespace v8 {
+namespace base {
+
+inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr,
+ Atomic32 old_value,
+ Atomic32 new_value) {
+ Atomic32 prev_value;
+ do {
+ if (OSAtomicCompareAndSwap32(old_value, new_value,
+ const_cast<Atomic32*>(ptr))) {
+ return old_value;
+ }
+ prev_value = *ptr;
+ } while (prev_value == old_value);
+ return prev_value;
+}
+
+inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr,
+ Atomic32 new_value) {
+ Atomic32 old_value;
+ do {
+ old_value = *ptr;
+ } while (!OSAtomicCompareAndSwap32(old_value, new_value,
+ const_cast<Atomic32*>(ptr)));
+ return old_value;
+}
+
+inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr,
+ Atomic32 increment) {
+ return OSAtomicAdd32(increment, const_cast<Atomic32*>(ptr));
+}
+
+inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr,
+ Atomic32 increment) {
+ return OSAtomicAdd32Barrier(increment, const_cast<Atomic32*>(ptr));
+}
+
+inline void MemoryBarrier() {
+ OSMemoryBarrier();
+}
+
+inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,
+ Atomic32 old_value,
+ Atomic32 new_value) {
+ Atomic32 prev_value;
+ do {
+ if (OSAtomicCompareAndSwap32Barrier(old_value, new_value,
+ const_cast<Atomic32*>(ptr))) {
+ return old_value;
+ }
+ prev_value = *ptr;
+ } while (prev_value == old_value);
+ return prev_value;
+}
+
+inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr,
+ Atomic32 old_value,
+ Atomic32 new_value) {
+ return Acquire_CompareAndSwap(ptr, old_value, new_value);
+}
+
+inline void NoBarrier_Store(volatile Atomic8* ptr, Atomic8 value) {
+ *ptr = value;
+}
+
+inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) {
+ *ptr = value;
+}
+
+inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) {
+ *ptr = value;
+ MemoryBarrier();
+}
+
+inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) {
+ MemoryBarrier();
+ *ptr = value;
+}
+
+inline Atomic8 NoBarrier_Load(volatile const Atomic8* ptr) {
+ return *ptr;
+}
+
+inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) {
+ return *ptr;
+}
+
+inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) {
+ Atomic32 value = *ptr;
+ MemoryBarrier();
+ return value;
+}
+
+inline Atomic32 Release_Load(volatile const Atomic32* ptr) {
+ MemoryBarrier();
+ return *ptr;
+}
+
+#ifdef __LP64__
+
+// 64-bit implementation on 64-bit platform
+
+inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr,
+ Atomic64 old_value,
+ Atomic64 new_value) {
+ Atomic64 prev_value;
+ do {
+ if (OSAtomicCompareAndSwap64(old_value, new_value,
+ reinterpret_cast<volatile int64_t*>(ptr))) {
+ return old_value;
+ }
+ prev_value = *ptr;
+ } while (prev_value == old_value);
+ return prev_value;
+}
+
+inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr,
+ Atomic64 new_value) {
+ Atomic64 old_value;
+ do {
+ old_value = *ptr;
+ } while (!OSAtomicCompareAndSwap64(old_value, new_value,
+ reinterpret_cast<volatile int64_t*>(ptr)));
+ return old_value;
+}
+
+inline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64* ptr,
+ Atomic64 increment) {
+ return OSAtomicAdd64(increment, reinterpret_cast<volatile int64_t*>(ptr));
+}
+
+inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr,
+ Atomic64 increment) {
+ return OSAtomicAdd64Barrier(increment,
+ reinterpret_cast<volatile int64_t*>(ptr));
+}
+
+inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr,
+ Atomic64 old_value,
+ Atomic64 new_value) {
+ Atomic64 prev_value;
+ do {
+ if (OSAtomicCompareAndSwap64Barrier(
+ old_value, new_value, reinterpret_cast<volatile int64_t*>(ptr))) {
+ return old_value;
+ }
+ prev_value = *ptr;
+ } while (prev_value == old_value);
+ return prev_value;
+}
+
+inline Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr,
+ Atomic64 old_value,
+ Atomic64 new_value) {
+ // The lib kern interface does not distinguish between
+ // Acquire and Release memory barriers; they are equivalent.
+ return Acquire_CompareAndSwap(ptr, old_value, new_value);
+}
+
+inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) {
+ *ptr = value;
+}
+
+inline void Acquire_Store(volatile Atomic64* ptr, Atomic64 value) {
+ *ptr = value;
+ MemoryBarrier();
+}
+
+inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) {
+ MemoryBarrier();
+ *ptr = value;
+}
+
+inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) {
+ return *ptr;
+}
+
+inline Atomic64 Acquire_Load(volatile const Atomic64* ptr) {
+ Atomic64 value = *ptr;
+ MemoryBarrier();
+ return value;
+}
+
+inline Atomic64 Release_Load(volatile const Atomic64* ptr) {
+ MemoryBarrier();
+ return *ptr;
+}
+
+#endif // defined(__LP64__)
+
+} } // namespace v8::base
+
+#endif // V8_BASE_ATOMICOPS_INTERNALS_MAC_H_
diff --git a/chromium/v8/src/atomicops_internals_mips_gcc.h b/chromium/v8/src/base/atomicops_internals_mips_gcc.h
index cb8f8b9d954..0d3a0e38c13 100644
--- a/chromium/v8/src/atomicops_internals_mips_gcc.h
+++ b/chromium/v8/src/base/atomicops_internals_mips_gcc.h
@@ -1,37 +1,14 @@
// Copyright 2010 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
// This file is an internal atomic implementation, use atomicops.h instead.
-#ifndef V8_ATOMICOPS_INTERNALS_MIPS_GCC_H_
-#define V8_ATOMICOPS_INTERNALS_MIPS_GCC_H_
+#ifndef V8_BASE_ATOMICOPS_INTERNALS_MIPS_GCC_H_
+#define V8_BASE_ATOMICOPS_INTERNALS_MIPS_GCC_H_
namespace v8 {
-namespace internal {
+namespace base {
// Atomically execute:
// result = *ptr;
@@ -136,6 +113,10 @@ inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr,
return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
}
+inline void NoBarrier_Store(volatile Atomic8* ptr, Atomic8 value) {
+ *ptr = value;
+}
+
inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) {
*ptr = value;
}
@@ -154,6 +135,10 @@ inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) {
*ptr = value;
}
+inline Atomic8 NoBarrier_Load(volatile const Atomic8* ptr) {
+ return *ptr;
+}
+
inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) {
return *ptr;
}
@@ -169,6 +154,6 @@ inline Atomic32 Release_Load(volatile const Atomic32* ptr) {
return *ptr;
}
-} } // namespace v8::internal
+} } // namespace v8::base
-#endif // V8_ATOMICOPS_INTERNALS_MIPS_GCC_H_
+#endif // V8_BASE_ATOMICOPS_INTERNALS_MIPS_GCC_H_
diff --git a/chromium/v8/src/atomicops_internals_tsan.h b/chromium/v8/src/base/atomicops_internals_tsan.h
index b5162bad9f6..363668d86d3 100644
--- a/chromium/v8/src/atomicops_internals_tsan.h
+++ b/chromium/v8/src/base/atomicops_internals_tsan.h
@@ -1,39 +1,16 @@
// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
// This file is an internal atomic implementation for compiler-based
// ThreadSanitizer. Use base/atomicops.h instead.
-#ifndef V8_ATOMICOPS_INTERNALS_TSAN_H_
-#define V8_ATOMICOPS_INTERNALS_TSAN_H_
+#ifndef V8_BASE_ATOMICOPS_INTERNALS_TSAN_H_
+#define V8_BASE_ATOMICOPS_INTERNALS_TSAN_H_
namespace v8 {
-namespace internal {
+namespace base {
#ifndef TSAN_INTERFACE_ATOMIC_H
#define TSAN_INTERFACE_ATOMIC_H
@@ -53,10 +30,7 @@ extern struct AtomicOps_x86CPUFeatureStruct
#define ATOMICOPS_COMPILER_BARRIER() __asm__ __volatile__("" : : : "memory")
-#ifdef __cplusplus
extern "C" {
-#endif
-
typedef char __tsan_atomic8;
typedef short __tsan_atomic16; // NOLINT
typedef int __tsan_atomic32;
@@ -80,152 +54,149 @@ typedef enum {
__tsan_memory_order_seq_cst,
} __tsan_memory_order;
-__tsan_atomic8 __tsan_atomic8_load(const volatile __tsan_atomic8 *a,
+__tsan_atomic8 __tsan_atomic8_load(const volatile __tsan_atomic8* a,
__tsan_memory_order mo);
-__tsan_atomic16 __tsan_atomic16_load(const volatile __tsan_atomic16 *a,
+__tsan_atomic16 __tsan_atomic16_load(const volatile __tsan_atomic16* a,
__tsan_memory_order mo);
-__tsan_atomic32 __tsan_atomic32_load(const volatile __tsan_atomic32 *a,
+__tsan_atomic32 __tsan_atomic32_load(const volatile __tsan_atomic32* a,
__tsan_memory_order mo);
-__tsan_atomic64 __tsan_atomic64_load(const volatile __tsan_atomic64 *a,
+__tsan_atomic64 __tsan_atomic64_load(const volatile __tsan_atomic64* a,
__tsan_memory_order mo);
-__tsan_atomic128 __tsan_atomic128_load(const volatile __tsan_atomic128 *a,
+__tsan_atomic128 __tsan_atomic128_load(const volatile __tsan_atomic128* a,
__tsan_memory_order mo);
-void __tsan_atomic8_store(volatile __tsan_atomic8 *a, __tsan_atomic8 v,
+void __tsan_atomic8_store(volatile __tsan_atomic8* a, __tsan_atomic8 v,
__tsan_memory_order mo);
-void __tsan_atomic16_store(volatile __tsan_atomic16 *a, __tsan_atomic16 v,
+void __tsan_atomic16_store(volatile __tsan_atomic16* a, __tsan_atomic16 v,
__tsan_memory_order mo);
-void __tsan_atomic32_store(volatile __tsan_atomic32 *a, __tsan_atomic32 v,
+void __tsan_atomic32_store(volatile __tsan_atomic32* a, __tsan_atomic32 v,
__tsan_memory_order mo);
-void __tsan_atomic64_store(volatile __tsan_atomic64 *a, __tsan_atomic64 v,
+void __tsan_atomic64_store(volatile __tsan_atomic64* a, __tsan_atomic64 v,
__tsan_memory_order mo);
-void __tsan_atomic128_store(volatile __tsan_atomic128 *a, __tsan_atomic128 v,
+void __tsan_atomic128_store(volatile __tsan_atomic128* a, __tsan_atomic128 v,
__tsan_memory_order mo);
-__tsan_atomic8 __tsan_atomic8_exchange(volatile __tsan_atomic8 *a,
+__tsan_atomic8 __tsan_atomic8_exchange(volatile __tsan_atomic8* a,
__tsan_atomic8 v, __tsan_memory_order mo);
-__tsan_atomic16 __tsan_atomic16_exchange(volatile __tsan_atomic16 *a,
+__tsan_atomic16 __tsan_atomic16_exchange(volatile __tsan_atomic16* a,
__tsan_atomic16 v, __tsan_memory_order mo);
-__tsan_atomic32 __tsan_atomic32_exchange(volatile __tsan_atomic32 *a,
+__tsan_atomic32 __tsan_atomic32_exchange(volatile __tsan_atomic32* a,
__tsan_atomic32 v, __tsan_memory_order mo);
-__tsan_atomic64 __tsan_atomic64_exchange(volatile __tsan_atomic64 *a,
+__tsan_atomic64 __tsan_atomic64_exchange(volatile __tsan_atomic64* a,
__tsan_atomic64 v, __tsan_memory_order mo);
-__tsan_atomic128 __tsan_atomic128_exchange(volatile __tsan_atomic128 *a,
+__tsan_atomic128 __tsan_atomic128_exchange(volatile __tsan_atomic128* a,
__tsan_atomic128 v, __tsan_memory_order mo);
-__tsan_atomic8 __tsan_atomic8_fetch_add(volatile __tsan_atomic8 *a,
+__tsan_atomic8 __tsan_atomic8_fetch_add(volatile __tsan_atomic8* a,
__tsan_atomic8 v, __tsan_memory_order mo);
-__tsan_atomic16 __tsan_atomic16_fetch_add(volatile __tsan_atomic16 *a,
+__tsan_atomic16 __tsan_atomic16_fetch_add(volatile __tsan_atomic16* a,
__tsan_atomic16 v, __tsan_memory_order mo);
-__tsan_atomic32 __tsan_atomic32_fetch_add(volatile __tsan_atomic32 *a,
+__tsan_atomic32 __tsan_atomic32_fetch_add(volatile __tsan_atomic32* a,
__tsan_atomic32 v, __tsan_memory_order mo);
-__tsan_atomic64 __tsan_atomic64_fetch_add(volatile __tsan_atomic64 *a,
+__tsan_atomic64 __tsan_atomic64_fetch_add(volatile __tsan_atomic64* a,
__tsan_atomic64 v, __tsan_memory_order mo);
-__tsan_atomic128 __tsan_atomic128_fetch_add(volatile __tsan_atomic128 *a,
+__tsan_atomic128 __tsan_atomic128_fetch_add(volatile __tsan_atomic128* a,
__tsan_atomic128 v, __tsan_memory_order mo);
-__tsan_atomic8 __tsan_atomic8_fetch_and(volatile __tsan_atomic8 *a,
+__tsan_atomic8 __tsan_atomic8_fetch_and(volatile __tsan_atomic8* a,
__tsan_atomic8 v, __tsan_memory_order mo);
-__tsan_atomic16 __tsan_atomic16_fetch_and(volatile __tsan_atomic16 *a,
+__tsan_atomic16 __tsan_atomic16_fetch_and(volatile __tsan_atomic16* a,
__tsan_atomic16 v, __tsan_memory_order mo);
-__tsan_atomic32 __tsan_atomic32_fetch_and(volatile __tsan_atomic32 *a,
+__tsan_atomic32 __tsan_atomic32_fetch_and(volatile __tsan_atomic32* a,
__tsan_atomic32 v, __tsan_memory_order mo);
-__tsan_atomic64 __tsan_atomic64_fetch_and(volatile __tsan_atomic64 *a,
+__tsan_atomic64 __tsan_atomic64_fetch_and(volatile __tsan_atomic64* a,
__tsan_atomic64 v, __tsan_memory_order mo);
-__tsan_atomic128 __tsan_atomic128_fetch_and(volatile __tsan_atomic128 *a,
+__tsan_atomic128 __tsan_atomic128_fetch_and(volatile __tsan_atomic128* a,
__tsan_atomic128 v, __tsan_memory_order mo);
-__tsan_atomic8 __tsan_atomic8_fetch_or(volatile __tsan_atomic8 *a,
+__tsan_atomic8 __tsan_atomic8_fetch_or(volatile __tsan_atomic8* a,
__tsan_atomic8 v, __tsan_memory_order mo);
-__tsan_atomic16 __tsan_atomic16_fetch_or(volatile __tsan_atomic16 *a,
+__tsan_atomic16 __tsan_atomic16_fetch_or(volatile __tsan_atomic16* a,
__tsan_atomic16 v, __tsan_memory_order mo);
-__tsan_atomic32 __tsan_atomic32_fetch_or(volatile __tsan_atomic32 *a,
+__tsan_atomic32 __tsan_atomic32_fetch_or(volatile __tsan_atomic32* a,
__tsan_atomic32 v, __tsan_memory_order mo);
-__tsan_atomic64 __tsan_atomic64_fetch_or(volatile __tsan_atomic64 *a,
+__tsan_atomic64 __tsan_atomic64_fetch_or(volatile __tsan_atomic64* a,
__tsan_atomic64 v, __tsan_memory_order mo);
-__tsan_atomic128 __tsan_atomic128_fetch_or(volatile __tsan_atomic128 *a,
+__tsan_atomic128 __tsan_atomic128_fetch_or(volatile __tsan_atomic128* a,
__tsan_atomic128 v, __tsan_memory_order mo);
-__tsan_atomic8 __tsan_atomic8_fetch_xor(volatile __tsan_atomic8 *a,
+__tsan_atomic8 __tsan_atomic8_fetch_xor(volatile __tsan_atomic8* a,
__tsan_atomic8 v, __tsan_memory_order mo);
-__tsan_atomic16 __tsan_atomic16_fetch_xor(volatile __tsan_atomic16 *a,
+__tsan_atomic16 __tsan_atomic16_fetch_xor(volatile __tsan_atomic16* a,
__tsan_atomic16 v, __tsan_memory_order mo);
-__tsan_atomic32 __tsan_atomic32_fetch_xor(volatile __tsan_atomic32 *a,
+__tsan_atomic32 __tsan_atomic32_fetch_xor(volatile __tsan_atomic32* a,
__tsan_atomic32 v, __tsan_memory_order mo);
-__tsan_atomic64 __tsan_atomic64_fetch_xor(volatile __tsan_atomic64 *a,
+__tsan_atomic64 __tsan_atomic64_fetch_xor(volatile __tsan_atomic64* a,
__tsan_atomic64 v, __tsan_memory_order mo);
-__tsan_atomic128 __tsan_atomic128_fetch_xor(volatile __tsan_atomic128 *a,
+__tsan_atomic128 __tsan_atomic128_fetch_xor(volatile __tsan_atomic128* a,
__tsan_atomic128 v, __tsan_memory_order mo);
-__tsan_atomic8 __tsan_atomic8_fetch_nand(volatile __tsan_atomic8 *a,
+__tsan_atomic8 __tsan_atomic8_fetch_nand(volatile __tsan_atomic8* a,
__tsan_atomic8 v, __tsan_memory_order mo);
-__tsan_atomic16 __tsan_atomic16_fetch_nand(volatile __tsan_atomic16 *a,
+__tsan_atomic16 __tsan_atomic16_fetch_nand(volatile __tsan_atomic16* a,
__tsan_atomic16 v, __tsan_memory_order mo);
-__tsan_atomic32 __tsan_atomic32_fetch_nand(volatile __tsan_atomic32 *a,
+__tsan_atomic32 __tsan_atomic32_fetch_nand(volatile __tsan_atomic32* a,
__tsan_atomic32 v, __tsan_memory_order mo);
-__tsan_atomic64 __tsan_atomic64_fetch_nand(volatile __tsan_atomic64 *a,
- __tsan_atomic64 v, __tsan_memory_order mo);
-__tsan_atomic128 __tsan_atomic128_fetch_nand(volatile __tsan_atomic128 *a,
+__tsan_atomic64 __tsan_atomic64_fetch_nand(volatile __tsan_atomic64* a,
__tsan_atomic64 v, __tsan_memory_order mo);
+__tsan_atomic128 __tsan_atomic128_fetch_nand(volatile __tsan_atomic128* a,
+ __tsan_atomic128 v, __tsan_memory_order mo);
-int __tsan_atomic8_compare_exchange_weak(volatile __tsan_atomic8 *a,
- __tsan_atomic8 *c, __tsan_atomic8 v, __tsan_memory_order mo,
+int __tsan_atomic8_compare_exchange_weak(volatile __tsan_atomic8* a,
+ __tsan_atomic8* c, __tsan_atomic8 v, __tsan_memory_order mo,
__tsan_memory_order fail_mo);
-int __tsan_atomic16_compare_exchange_weak(volatile __tsan_atomic16 *a,
- __tsan_atomic16 *c, __tsan_atomic16 v, __tsan_memory_order mo,
+int __tsan_atomic16_compare_exchange_weak(volatile __tsan_atomic16* a,
+ __tsan_atomic16* c, __tsan_atomic16 v, __tsan_memory_order mo,
__tsan_memory_order fail_mo);
-int __tsan_atomic32_compare_exchange_weak(volatile __tsan_atomic32 *a,
- __tsan_atomic32 *c, __tsan_atomic32 v, __tsan_memory_order mo,
+int __tsan_atomic32_compare_exchange_weak(volatile __tsan_atomic32* a,
+ __tsan_atomic32* c, __tsan_atomic32 v, __tsan_memory_order mo,
__tsan_memory_order fail_mo);
-int __tsan_atomic64_compare_exchange_weak(volatile __tsan_atomic64 *a,
- __tsan_atomic64 *c, __tsan_atomic64 v, __tsan_memory_order mo,
+int __tsan_atomic64_compare_exchange_weak(volatile __tsan_atomic64* a,
+ __tsan_atomic64* c, __tsan_atomic64 v, __tsan_memory_order mo,
__tsan_memory_order fail_mo);
-int __tsan_atomic128_compare_exchange_weak(volatile __tsan_atomic128 *a,
- __tsan_atomic128 *c, __tsan_atomic128 v, __tsan_memory_order mo,
+int __tsan_atomic128_compare_exchange_weak(volatile __tsan_atomic128* a,
+ __tsan_atomic128* c, __tsan_atomic128 v, __tsan_memory_order mo,
__tsan_memory_order fail_mo);
-int __tsan_atomic8_compare_exchange_strong(volatile __tsan_atomic8 *a,
- __tsan_atomic8 *c, __tsan_atomic8 v, __tsan_memory_order mo,
+int __tsan_atomic8_compare_exchange_strong(volatile __tsan_atomic8* a,
+ __tsan_atomic8* c, __tsan_atomic8 v, __tsan_memory_order mo,
__tsan_memory_order fail_mo);
-int __tsan_atomic16_compare_exchange_strong(volatile __tsan_atomic16 *a,
- __tsan_atomic16 *c, __tsan_atomic16 v, __tsan_memory_order mo,
+int __tsan_atomic16_compare_exchange_strong(volatile __tsan_atomic16* a,
+ __tsan_atomic16* c, __tsan_atomic16 v, __tsan_memory_order mo,
__tsan_memory_order fail_mo);
-int __tsan_atomic32_compare_exchange_strong(volatile __tsan_atomic32 *a,
- __tsan_atomic32 *c, __tsan_atomic32 v, __tsan_memory_order mo,
+int __tsan_atomic32_compare_exchange_strong(volatile __tsan_atomic32* a,
+ __tsan_atomic32* c, __tsan_atomic32 v, __tsan_memory_order mo,
__tsan_memory_order fail_mo);
-int __tsan_atomic64_compare_exchange_strong(volatile __tsan_atomic64 *a,
- __tsan_atomic64 *c, __tsan_atomic64 v, __tsan_memory_order mo,
+int __tsan_atomic64_compare_exchange_strong(volatile __tsan_atomic64* a,
+ __tsan_atomic64* c, __tsan_atomic64 v, __tsan_memory_order mo,
__tsan_memory_order fail_mo);
-int __tsan_atomic128_compare_exchange_strong(volatile __tsan_atomic128 *a,
- __tsan_atomic128 *c, __tsan_atomic128 v, __tsan_memory_order mo,
+int __tsan_atomic128_compare_exchange_strong(volatile __tsan_atomic128* a,
+ __tsan_atomic128* c, __tsan_atomic128 v, __tsan_memory_order mo,
__tsan_memory_order fail_mo);
__tsan_atomic8 __tsan_atomic8_compare_exchange_val(
- volatile __tsan_atomic8 *a, __tsan_atomic8 c, __tsan_atomic8 v,
+ volatile __tsan_atomic8* a, __tsan_atomic8 c, __tsan_atomic8 v,
__tsan_memory_order mo, __tsan_memory_order fail_mo);
__tsan_atomic16 __tsan_atomic16_compare_exchange_val(
- volatile __tsan_atomic16 *a, __tsan_atomic16 c, __tsan_atomic16 v,
+ volatile __tsan_atomic16* a, __tsan_atomic16 c, __tsan_atomic16 v,
__tsan_memory_order mo, __tsan_memory_order fail_mo);
__tsan_atomic32 __tsan_atomic32_compare_exchange_val(
- volatile __tsan_atomic32 *a, __tsan_atomic32 c, __tsan_atomic32 v,
+ volatile __tsan_atomic32* a, __tsan_atomic32 c, __tsan_atomic32 v,
__tsan_memory_order mo, __tsan_memory_order fail_mo);
__tsan_atomic64 __tsan_atomic64_compare_exchange_val(
- volatile __tsan_atomic64 *a, __tsan_atomic64 c, __tsan_atomic64 v,
+ volatile __tsan_atomic64* a, __tsan_atomic64 c, __tsan_atomic64 v,
__tsan_memory_order mo, __tsan_memory_order fail_mo);
__tsan_atomic128 __tsan_atomic128_compare_exchange_val(
- volatile __tsan_atomic128 *a, __tsan_atomic128 c, __tsan_atomic128 v,
+ volatile __tsan_atomic128* a, __tsan_atomic128 c, __tsan_atomic128 v,
__tsan_memory_order mo, __tsan_memory_order fail_mo);
void __tsan_atomic_thread_fence(__tsan_memory_order mo);
void __tsan_atomic_signal_fence(__tsan_memory_order mo);
-
-#ifdef __cplusplus
} // extern "C"
-#endif
#endif // #ifndef TSAN_INTERFACE_ATOMIC_H
-inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32 *ptr,
+inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr,
Atomic32 old_value,
Atomic32 new_value) {
Atomic32 cmp = old_value;
@@ -234,37 +205,37 @@ inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32 *ptr,
return cmp;
}
-inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32 *ptr,
+inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr,
Atomic32 new_value) {
return __tsan_atomic32_exchange(ptr, new_value,
__tsan_memory_order_relaxed);
}
-inline Atomic32 Acquire_AtomicExchange(volatile Atomic32 *ptr,
+inline Atomic32 Acquire_AtomicExchange(volatile Atomic32* ptr,
Atomic32 new_value) {
return __tsan_atomic32_exchange(ptr, new_value,
__tsan_memory_order_acquire);
}
-inline Atomic32 Release_AtomicExchange(volatile Atomic32 *ptr,
+inline Atomic32 Release_AtomicExchange(volatile Atomic32* ptr,
Atomic32 new_value) {
return __tsan_atomic32_exchange(ptr, new_value,
__tsan_memory_order_release);
}
-inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32 *ptr,
+inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr,
Atomic32 increment) {
return increment + __tsan_atomic32_fetch_add(ptr, increment,
__tsan_memory_order_relaxed);
}
-inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32 *ptr,
+inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr,
Atomic32 increment) {
return increment + __tsan_atomic32_fetch_add(ptr, increment,
__tsan_memory_order_acq_rel);
}
-inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32 *ptr,
+inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,
Atomic32 old_value,
Atomic32 new_value) {
Atomic32 cmp = old_value;
@@ -273,7 +244,7 @@ inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32 *ptr,
return cmp;
}
-inline Atomic32 Release_CompareAndSwap(volatile Atomic32 *ptr,
+inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr,
Atomic32 old_value,
Atomic32 new_value) {
Atomic32 cmp = old_value;
@@ -282,33 +253,41 @@ inline Atomic32 Release_CompareAndSwap(volatile Atomic32 *ptr,
return cmp;
}
-inline void NoBarrier_Store(volatile Atomic32 *ptr, Atomic32 value) {
+inline void NoBarrier_Store(volatile Atomic8* ptr, Atomic8 value) {
+ __tsan_atomic8_store(ptr, value, __tsan_memory_order_relaxed);
+}
+
+inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) {
__tsan_atomic32_store(ptr, value, __tsan_memory_order_relaxed);
}
-inline void Acquire_Store(volatile Atomic32 *ptr, Atomic32 value) {
+inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) {
__tsan_atomic32_store(ptr, value, __tsan_memory_order_relaxed);
__tsan_atomic_thread_fence(__tsan_memory_order_seq_cst);
}
-inline void Release_Store(volatile Atomic32 *ptr, Atomic32 value) {
+inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) {
__tsan_atomic32_store(ptr, value, __tsan_memory_order_release);
}
-inline Atomic32 NoBarrier_Load(volatile const Atomic32 *ptr) {
+inline Atomic8 NoBarrier_Load(volatile const Atomic8* ptr) {
+ return __tsan_atomic8_load(ptr, __tsan_memory_order_relaxed);
+}
+
+inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) {
return __tsan_atomic32_load(ptr, __tsan_memory_order_relaxed);
}
-inline Atomic32 Acquire_Load(volatile const Atomic32 *ptr) {
+inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) {
return __tsan_atomic32_load(ptr, __tsan_memory_order_acquire);
}
-inline Atomic32 Release_Load(volatile const Atomic32 *ptr) {
+inline Atomic32 Release_Load(volatile const Atomic32* ptr) {
__tsan_atomic_thread_fence(__tsan_memory_order_seq_cst);
return __tsan_atomic32_load(ptr, __tsan_memory_order_relaxed);
}
-inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64 *ptr,
+inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr,
Atomic64 old_value,
Atomic64 new_value) {
Atomic64 cmp = old_value;
@@ -317,60 +296,60 @@ inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64 *ptr,
return cmp;
}
-inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64 *ptr,
+inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr,
Atomic64 new_value) {
return __tsan_atomic64_exchange(ptr, new_value, __tsan_memory_order_relaxed);
}
-inline Atomic64 Acquire_AtomicExchange(volatile Atomic64 *ptr,
+inline Atomic64 Acquire_AtomicExchange(volatile Atomic64* ptr,
Atomic64 new_value) {
return __tsan_atomic64_exchange(ptr, new_value, __tsan_memory_order_acquire);
}
-inline Atomic64 Release_AtomicExchange(volatile Atomic64 *ptr,
+inline Atomic64 Release_AtomicExchange(volatile Atomic64* ptr,
Atomic64 new_value) {
return __tsan_atomic64_exchange(ptr, new_value, __tsan_memory_order_release);
}
-inline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64 *ptr,
+inline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64* ptr,
Atomic64 increment) {
return increment + __tsan_atomic64_fetch_add(ptr, increment,
__tsan_memory_order_relaxed);
}
-inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64 *ptr,
+inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr,
Atomic64 increment) {
return increment + __tsan_atomic64_fetch_add(ptr, increment,
__tsan_memory_order_acq_rel);
}
-inline void NoBarrier_Store(volatile Atomic64 *ptr, Atomic64 value) {
+inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) {
__tsan_atomic64_store(ptr, value, __tsan_memory_order_relaxed);
}
-inline void Acquire_Store(volatile Atomic64 *ptr, Atomic64 value) {
+inline void Acquire_Store(volatile Atomic64* ptr, Atomic64 value) {
__tsan_atomic64_store(ptr, value, __tsan_memory_order_relaxed);
__tsan_atomic_thread_fence(__tsan_memory_order_seq_cst);
}
-inline void Release_Store(volatile Atomic64 *ptr, Atomic64 value) {
+inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) {
__tsan_atomic64_store(ptr, value, __tsan_memory_order_release);
}
-inline Atomic64 NoBarrier_Load(volatile const Atomic64 *ptr) {
+inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) {
return __tsan_atomic64_load(ptr, __tsan_memory_order_relaxed);
}
-inline Atomic64 Acquire_Load(volatile const Atomic64 *ptr) {
+inline Atomic64 Acquire_Load(volatile const Atomic64* ptr) {
return __tsan_atomic64_load(ptr, __tsan_memory_order_acquire);
}
-inline Atomic64 Release_Load(volatile const Atomic64 *ptr) {
+inline Atomic64 Release_Load(volatile const Atomic64* ptr) {
__tsan_atomic_thread_fence(__tsan_memory_order_seq_cst);
return __tsan_atomic64_load(ptr, __tsan_memory_order_relaxed);
}
-inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64 *ptr,
+inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr,
Atomic64 old_value,
Atomic64 new_value) {
Atomic64 cmp = old_value;
@@ -379,7 +358,7 @@ inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64 *ptr,
return cmp;
}
-inline Atomic64 Release_CompareAndSwap(volatile Atomic64 *ptr,
+inline Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr,
Atomic64 old_value,
Atomic64 new_value) {
Atomic64 cmp = old_value;
@@ -392,9 +371,9 @@ inline void MemoryBarrier() {
__tsan_atomic_thread_fence(__tsan_memory_order_seq_cst);
}
-} // namespace internal
+} // namespace base
} // namespace v8
#undef ATOMICOPS_COMPILER_BARRIER
-#endif // V8_ATOMICOPS_INTERNALS_TSAN_H_
+#endif // V8_BASE_ATOMICOPS_INTERNALS_TSAN_H_
diff --git a/chromium/v8/src/atomicops_internals_x86_gcc.cc b/chromium/v8/src/base/atomicops_internals_x86_gcc.cc
index 950b423f413..b8ba0c34b56 100644
--- a/chromium/v8/src/atomicops_internals_x86_gcc.cc
+++ b/chromium/v8/src/base/atomicops_internals_x86_gcc.cc
@@ -1,43 +1,19 @@
// Copyright 2010 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
// This module gets enough CPU information to optimize the
// atomicops module on x86.
#include <string.h>
-#include "atomicops.h"
-#include "platform.h"
+#include "src/base/atomicops.h"
// This file only makes sense with atomicops_internals_x86_gcc.h -- it
// depends on structs that are defined in that file. If atomicops.h
// doesn't sub-include that file, then we aren't needed, and shouldn't
// try to do anything.
-#ifdef V8_ATOMICOPS_INTERNALS_X86_GCC_H_
+#ifdef V8_BASE_ATOMICOPS_INTERNALS_X86_GCC_H_
// Inline cpuid instruction. In PIC compilations, %ebx contains the address
// of the global offset table. To avoid breaking such executables, this code
@@ -59,7 +35,7 @@
#if defined(cpuid) // initialize the struct only on x86
namespace v8 {
-namespace internal {
+namespace base {
// Set the flags so that code will run correctly and conservatively, so even
// if we haven't been initialized yet, we're probably single threaded, and our
@@ -69,25 +45,25 @@ struct AtomicOps_x86CPUFeatureStruct AtomicOps_Internalx86CPUFeatures = {
false, // no SSE2
};
-} } // namespace v8::internal
+} } // namespace v8::base
namespace {
// Initialize the AtomicOps_Internalx86CPUFeatures struct.
void AtomicOps_Internalx86CPUFeaturesInit() {
- using v8::internal::AtomicOps_Internalx86CPUFeatures;
+ using v8::base::AtomicOps_Internalx86CPUFeatures;
- uint32_t eax;
- uint32_t ebx;
- uint32_t ecx;
- uint32_t edx;
+ uint32_t eax = 0;
+ uint32_t ebx = 0;
+ uint32_t ecx = 0;
+ uint32_t edx = 0;
// Get vendor string (issue CPUID with eax = 0)
cpuid(eax, ebx, ecx, edx, 0);
char vendor[13];
- v8::internal::OS::MemCopy(vendor, &ebx, 4);
- v8::internal::OS::MemCopy(vendor + 4, &edx, 4);
- v8::internal::OS::MemCopy(vendor + 8, &ecx, 4);
+ memcpy(vendor, &ebx, 4);
+ memcpy(vendor + 4, &edx, 4);
+ memcpy(vendor + 8, &ecx, 4);
vendor[12] = 0;
// get feature flags in ecx/edx, and family/model in eax
@@ -132,4 +108,4 @@ AtomicOpsx86Initializer g_initer;
#endif // if x86
-#endif // ifdef V8_ATOMICOPS_INTERNALS_X86_GCC_H_
+#endif // ifdef V8_BASE_ATOMICOPS_INTERNALS_X86_GCC_H_
diff --git a/chromium/v8/src/atomicops_internals_x86_gcc.h b/chromium/v8/src/base/atomicops_internals_x86_gcc.h
index e58d598fbd4..00b64484683 100644
--- a/chromium/v8/src/atomicops_internals_x86_gcc.h
+++ b/chromium/v8/src/base/atomicops_internals_x86_gcc.h
@@ -1,37 +1,14 @@
// Copyright 2010 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
// This file is an internal atomic implementation, use atomicops.h instead.
-#ifndef V8_ATOMICOPS_INTERNALS_X86_GCC_H_
-#define V8_ATOMICOPS_INTERNALS_X86_GCC_H_
+#ifndef V8_BASE_ATOMICOPS_INTERNALS_X86_GCC_H_
+#define V8_BASE_ATOMICOPS_INTERNALS_X86_GCC_H_
namespace v8 {
-namespace internal {
+namespace base {
// This struct is not part of the public API of this module; clients may not
// use it.
@@ -107,6 +84,10 @@ inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr,
return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
}
+inline void NoBarrier_Store(volatile Atomic8* ptr, Atomic8 value) {
+ *ptr = value;
+}
+
inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) {
*ptr = value;
}
@@ -152,6 +133,10 @@ inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) {
// See comments in Atomic64 version of Release_Store(), below.
}
+inline Atomic8 NoBarrier_Load(volatile const Atomic8* ptr) {
+ return *ptr;
+}
+
inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) {
return *ptr;
}
@@ -280,8 +265,8 @@ inline Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr,
#endif // defined(__x86_64__)
-} } // namespace v8::internal
+} } // namespace v8::base
#undef ATOMICOPS_COMPILER_BARRIER
-#endif // V8_ATOMICOPS_INTERNALS_X86_GCC_H_
+#endif // V8_BASE_ATOMICOPS_INTERNALS_X86_GCC_H_
diff --git a/chromium/v8/src/atomicops_internals_x86_msvc.h b/chromium/v8/src/base/atomicops_internals_x86_msvc.h
index fcf6a651077..adc40318e92 100644
--- a/chromium/v8/src/atomicops_internals_x86_msvc.h
+++ b/chromium/v8/src/base/atomicops_internals_x86_msvc.h
@@ -1,40 +1,26 @@
// Copyright 2010 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
// This file is an internal atomic implementation, use atomicops.h instead.
-#ifndef V8_ATOMICOPS_INTERNALS_X86_MSVC_H_
-#define V8_ATOMICOPS_INTERNALS_X86_MSVC_H_
+#ifndef V8_BASE_ATOMICOPS_INTERNALS_X86_MSVC_H_
+#define V8_BASE_ATOMICOPS_INTERNALS_X86_MSVC_H_
-#include "checks.h"
-#include "win32-headers.h"
+#include "src/base/macros.h"
+#include "src/base/win32-headers.h"
+
+#if defined(V8_HOST_ARCH_64_BIT)
+// windows.h #defines this (only on x64). This causes problems because the
+// public API also uses MemoryBarrier at the public name for this fence. So, on
+// X64, undef it, and call its documented
+// (http://msdn.microsoft.com/en-us/library/windows/desktop/ms684208.aspx)
+// implementation directly.
+#undef MemoryBarrier
+#endif
namespace v8 {
-namespace internal {
+namespace base {
inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr,
Atomic32 old_value,
@@ -70,8 +56,13 @@ inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr,
#error "We require at least vs2005 for MemoryBarrier"
#endif
inline void MemoryBarrier() {
+#if defined(V8_HOST_ARCH_64_BIT)
+ // See #undef and note at the top of this file.
+ __faststorefence();
+#else
// We use MemoryBarrier from WinNT.h
::MemoryBarrier();
+#endif
}
inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,
@@ -86,6 +77,10 @@ inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr,
return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
}
+inline void NoBarrier_Store(volatile Atomic8* ptr, Atomic8 value) {
+ *ptr = value;
+}
+
inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) {
*ptr = value;
}
@@ -100,6 +95,10 @@ inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) {
// See comments in Atomic64 version of Release_Store() below.
}
+inline Atomic8 NoBarrier_Load(volatile const Atomic8* ptr) {
+ return *ptr;
+}
+
inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) {
return *ptr;
}
@@ -198,6 +197,6 @@ inline Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr,
#endif // defined(_WIN64)
-} } // namespace v8::internal
+} } // namespace v8::base
-#endif // V8_ATOMICOPS_INTERNALS_X86_MSVC_H_
+#endif // V8_BASE_ATOMICOPS_INTERNALS_X86_MSVC_H_
diff --git a/chromium/v8/src/base/build_config.h b/chromium/v8/src/base/build_config.h
new file mode 100644
index 00000000000..e412b92dfef
--- /dev/null
+++ b/chromium/v8/src/base/build_config.h
@@ -0,0 +1,120 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_BASE_BUILD_CONFIG_H_
+#define V8_BASE_BUILD_CONFIG_H_
+
+#include "include/v8config.h"
+
+// Processor architecture detection. For more info on what's defined, see:
+// http://msdn.microsoft.com/en-us/library/b0084kay.aspx
+// http://www.agner.org/optimize/calling_conventions.pdf
+// or with gcc, run: "echo | gcc -E -dM -"
+#if defined(_M_X64) || defined(__x86_64__)
+#if defined(__native_client__)
+// For Native Client builds of V8, use V8_TARGET_ARCH_ARM, so that V8
+// generates ARM machine code, together with a portable ARM simulator
+// compiled for the host architecture in question.
+//
+// Since Native Client is ILP-32 on all architectures we use
+// V8_HOST_ARCH_IA32 on both 32- and 64-bit x86.
+#define V8_HOST_ARCH_IA32 1
+#define V8_HOST_ARCH_32_BIT 1
+#define V8_HOST_CAN_READ_UNALIGNED 1
+#else
+#define V8_HOST_ARCH_X64 1
+#define V8_HOST_ARCH_64_BIT 1
+#define V8_HOST_CAN_READ_UNALIGNED 1
+#endif // __native_client__
+#elif defined(_M_IX86) || defined(__i386__)
+#define V8_HOST_ARCH_IA32 1
+#define V8_HOST_ARCH_32_BIT 1
+#define V8_HOST_CAN_READ_UNALIGNED 1
+#elif defined(__AARCH64EL__)
+#define V8_HOST_ARCH_ARM64 1
+#define V8_HOST_ARCH_64_BIT 1
+#define V8_HOST_CAN_READ_UNALIGNED 1
+#elif defined(__ARMEL__)
+#define V8_HOST_ARCH_ARM 1
+#define V8_HOST_ARCH_32_BIT 1
+#elif defined(__MIPSEB__) || defined(__MIPSEL__)
+#define V8_HOST_ARCH_MIPS 1
+#define V8_HOST_ARCH_32_BIT 1
+#else
+#error "Host architecture was not detected as supported by v8"
+#endif
+
+#if defined(__ARM_ARCH_7A__) || \
+ defined(__ARM_ARCH_7R__) || \
+ defined(__ARM_ARCH_7__)
+# define CAN_USE_ARMV7_INSTRUCTIONS 1
+# ifndef CAN_USE_VFP3_INSTRUCTIONS
+# define CAN_USE_VFP3_INSTRUCTIONS
+# endif
+#endif
+
+
+// Target architecture detection. This may be set externally. If not, detect
+// in the same way as the host architecture, that is, target the native
+// environment as presented by the compiler.
+#if !V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_IA32 && !V8_TARGET_ARCH_X87 && \
+ !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_MIPS
+#if defined(_M_X64) || defined(__x86_64__)
+#define V8_TARGET_ARCH_X64 1
+#elif defined(_M_IX86) || defined(__i386__)
+#define V8_TARGET_ARCH_IA32 1
+#elif defined(__AARCH64EL__)
+#define V8_TARGET_ARCH_ARM64 1
+#elif defined(__ARMEL__)
+#define V8_TARGET_ARCH_ARM 1
+#elif defined(__MIPSEB__) || defined(__MIPSEL__)
+#define V8_TARGET_ARCH_MIPS 1
+#else
+#error Target architecture was not detected as supported by v8
+#endif
+#endif
+
+// Check for supported combinations of host and target architectures.
+#if V8_TARGET_ARCH_IA32 && !V8_HOST_ARCH_IA32
+#error Target architecture ia32 is only supported on ia32 host
+#endif
+#if V8_TARGET_ARCH_X64 && !V8_HOST_ARCH_X64
+#error Target architecture x64 is only supported on x64 host
+#endif
+#if (V8_TARGET_ARCH_ARM && !(V8_HOST_ARCH_IA32 || V8_HOST_ARCH_ARM))
+#error Target architecture arm is only supported on arm and ia32 host
+#endif
+#if (V8_TARGET_ARCH_ARM64 && !(V8_HOST_ARCH_X64 || V8_HOST_ARCH_ARM64))
+#error Target architecture arm64 is only supported on arm64 and x64 host
+#endif
+#if (V8_TARGET_ARCH_MIPS && !(V8_HOST_ARCH_IA32 || V8_HOST_ARCH_MIPS))
+#error Target architecture mips is only supported on mips and ia32 host
+#endif
+
+// Determine architecture endianness.
+#if V8_TARGET_ARCH_IA32
+#define V8_TARGET_LITTLE_ENDIAN 1
+#elif V8_TARGET_ARCH_X64
+#define V8_TARGET_LITTLE_ENDIAN 1
+#elif V8_TARGET_ARCH_ARM
+#define V8_TARGET_LITTLE_ENDIAN 1
+#elif V8_TARGET_ARCH_ARM64
+#define V8_TARGET_LITTLE_ENDIAN 1
+#elif V8_TARGET_ARCH_MIPS
+#if defined(__MIPSEB__)
+#define V8_TARGET_BIG_ENDIAN 1
+#else
+#define V8_TARGET_LITTLE_ENDIAN 1
+#endif
+#elif V8_TARGET_ARCH_X87
+#define V8_TARGET_LITTLE_ENDIAN 1
+#else
+#error Unknown target architecture endianness
+#endif
+
+#if V8_OS_MACOSX || defined(__FreeBSD__) || defined(__OpenBSD__)
+#define USING_BSD_ABI
+#endif
+
+#endif // V8_BASE_BUILD_CONFIG_H_
diff --git a/chromium/v8/src/lazy-instance.h b/chromium/v8/src/base/lazy-instance.h
index fc03f4d1260..a20689a16c4 100644
--- a/chromium/v8/src/lazy-instance.h
+++ b/chromium/v8/src/base/lazy-instance.h
@@ -1,29 +1,6 @@
// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
// The LazyInstance<Type, Traits> class manages a single instance of Type,
// which will be lazily created on the first time it's accessed. This class is
@@ -66,8 +43,8 @@
// LAZY_INSTANCE_INITIALIZER;
//
// WARNINGS:
-// - This implementation of LazyInstance is NOT THREAD-SAFE by default. See
-// ThreadSafeInitOnceTrait declared below for that.
+// - This implementation of LazyInstance IS THREAD-SAFE by default. See
+// SingleThreadInitOnceTrait if you don't care about thread safety.
// - Lazy initialization comes with a cost. Make sure that you don't use it on
// critical path. Consider adding your initialization code to a function
// which is explicitly called once.
@@ -88,14 +65,14 @@
// The macro LAZY_DYNAMIC_INSTANCE_INITIALIZER must be used to initialize
// dynamic lazy instances.
-#ifndef V8_LAZY_INSTANCE_H_
-#define V8_LAZY_INSTANCE_H_
+#ifndef V8_BASE_LAZY_INSTANCE_H_
+#define V8_BASE_LAZY_INSTANCE_H_
-#include "checks.h"
-#include "once.h"
+#include "src/base/macros.h"
+#include "src/base/once.h"
namespace v8 {
-namespace internal {
+namespace base {
#define LAZY_STATIC_INSTANCE_INITIALIZER { V8_ONCE_INIT, { {} } }
#define LAZY_DYNAMIC_INSTANCE_INITIALIZER { V8_ONCE_INIT, 0 }
@@ -227,7 +204,7 @@ struct LazyInstanceImpl {
template <typename T,
typename CreateTrait = DefaultConstructTrait<T>,
- typename InitOnceTrait = SingleThreadInitOnceTrait,
+ typename InitOnceTrait = ThreadSafeInitOnceTrait,
typename DestroyTrait = LeakyInstanceTrait<T> >
struct LazyStaticInstance {
typedef LazyInstanceImpl<T, StaticallyAllocatedInstanceTrait<T>,
@@ -237,7 +214,7 @@ struct LazyStaticInstance {
template <typename T,
typename CreateTrait = DefaultConstructTrait<T>,
- typename InitOnceTrait = SingleThreadInitOnceTrait,
+ typename InitOnceTrait = ThreadSafeInitOnceTrait,
typename DestroyTrait = LeakyInstanceTrait<T> >
struct LazyInstance {
// A LazyInstance is a LazyStaticInstance.
@@ -248,13 +225,13 @@ struct LazyInstance {
template <typename T,
typename CreateTrait = DefaultCreateTrait<T>,
- typename InitOnceTrait = SingleThreadInitOnceTrait,
+ typename InitOnceTrait = ThreadSafeInitOnceTrait,
typename DestroyTrait = LeakyInstanceTrait<T> >
struct LazyDynamicInstance {
typedef LazyInstanceImpl<T, DynamicallyAllocatedInstanceTrait<T>,
CreateTrait, InitOnceTrait, DestroyTrait> type;
};
-} } // namespace v8::internal
+} } // namespace v8::base
-#endif // V8_LAZY_INSTANCE_H_
+#endif // V8_BASE_LAZY_INSTANCE_H_
diff --git a/chromium/v8/src/base/macros.h b/chromium/v8/src/base/macros.h
new file mode 100644
index 00000000000..736a656ed9f
--- /dev/null
+++ b/chromium/v8/src/base/macros.h
@@ -0,0 +1,120 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_BASE_MACROS_H_
+#define V8_BASE_MACROS_H_
+
+#include "include/v8stdint.h"
+
+
+// The expression OFFSET_OF(type, field) computes the byte-offset
+// of the specified field relative to the containing type. This
+// corresponds to 'offsetof' (in stddef.h), except that it doesn't
+// use 0 or NULL, which causes a problem with the compiler warnings
+// we have enabled (which is also why 'offsetof' doesn't seem to work).
+// Here we simply use the non-zero value 4, which seems to work.
+#define OFFSET_OF(type, field) \
+ (reinterpret_cast<intptr_t>(&(reinterpret_cast<type*>(4)->field)) - 4)
+
+
+// The expression ARRAY_SIZE(a) is a compile-time constant of type
+// size_t which represents the number of elements of the given
+// array. You should only use ARRAY_SIZE on statically allocated
+// arrays.
+#define ARRAY_SIZE(a) \
+ ((sizeof(a) / sizeof(*(a))) / \
+ static_cast<size_t>(!(sizeof(a) % sizeof(*(a)))))
+
+
+// A macro to disallow the evil copy constructor and operator= functions
+// This should be used in the private: declarations for a class
+#define DISALLOW_COPY_AND_ASSIGN(TypeName) \
+ TypeName(const TypeName&) V8_DELETE; \
+ void operator=(const TypeName&) V8_DELETE
+
+
+// A macro to disallow all the implicit constructors, namely the
+// default constructor, copy constructor and operator= functions.
+//
+// This should be used in the private: declarations for a class
+// that wants to prevent anyone from instantiating it. This is
+// especially useful for classes containing only static methods.
+#define DISALLOW_IMPLICIT_CONSTRUCTORS(TypeName) \
+ TypeName() V8_DELETE; \
+ DISALLOW_COPY_AND_ASSIGN(TypeName)
+
+
+// Newly written code should use V8_INLINE and V8_NOINLINE directly.
+#define INLINE(declarator) V8_INLINE declarator
+#define NO_INLINE(declarator) V8_NOINLINE declarator
+
+
+// Newly written code should use V8_WARN_UNUSED_RESULT.
+#define MUST_USE_RESULT V8_WARN_UNUSED_RESULT
+
+
+// Define V8_USE_ADDRESS_SANITIZER macros.
+#if defined(__has_feature)
+#if __has_feature(address_sanitizer)
+#define V8_USE_ADDRESS_SANITIZER 1
+#endif
+#endif
+
+// Define DISABLE_ASAN macros.
+#ifdef V8_USE_ADDRESS_SANITIZER
+#define DISABLE_ASAN __attribute__((no_sanitize_address))
+#else
+#define DISABLE_ASAN
+#endif
+
+
+#if V8_CC_GNU
+#define V8_IMMEDIATE_CRASH() __builtin_trap()
+#else
+#define V8_IMMEDIATE_CRASH() ((void(*)())0)()
+#endif
+
+
+// Use C++11 static_assert if possible, which gives error
+// messages that are easier to understand on first sight.
+#if V8_HAS_CXX11_STATIC_ASSERT
+#define STATIC_ASSERT(test) static_assert(test, #test)
+#else
+// This is inspired by the static assertion facility in boost. This
+// is pretty magical. If it causes you trouble on a platform you may
+// find a fix in the boost code.
+template <bool> class StaticAssertion;
+template <> class StaticAssertion<true> { };
+// This macro joins two tokens. If one of the tokens is a macro the
+// helper call causes it to be resolved before joining.
+#define SEMI_STATIC_JOIN(a, b) SEMI_STATIC_JOIN_HELPER(a, b)
+#define SEMI_STATIC_JOIN_HELPER(a, b) a##b
+// Causes an error during compilation of the condition is not
+// statically known to be true. It is formulated as a typedef so that
+// it can be used wherever a typedef can be used. Beware that this
+// actually causes each use to introduce a new defined type with a
+// name depending on the source line.
+template <int> class StaticAssertionHelper { };
+#define STATIC_ASSERT(test) \
+ typedef \
+ StaticAssertionHelper<sizeof(StaticAssertion<static_cast<bool>((test))>)> \
+ SEMI_STATIC_JOIN(__StaticAssertTypedef__, __LINE__) V8_UNUSED
+
+#endif
+
+
+// The USE(x) template is used to silence C++ compiler warnings
+// issued for (yet) unused variables (typically parameters).
+template <typename T>
+inline void USE(T) { }
+
+
+#define IS_POWER_OF_TWO(x) ((x) != 0 && (((x) & ((x) - 1)) == 0))
+
+// The following macro works on both 32 and 64-bit platforms.
+// Usage: instead of writing 0x1234567890123456
+// write V8_2PART_UINT64_C(0x12345678,90123456);
+#define V8_2PART_UINT64_C(a, b) (((static_cast<uint64_t>(a) << 32) + 0x##b##u))
+
+#endif // V8_BASE_MACROS_H_
diff --git a/chromium/v8/src/base/once.cc b/chromium/v8/src/base/once.cc
new file mode 100644
index 00000000000..eaabf40d9a5
--- /dev/null
+++ b/chromium/v8/src/base/once.cc
@@ -0,0 +1,53 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/base/once.h"
+
+#ifdef _WIN32
+#include <windows.h>
+#else
+#include <sched.h>
+#endif
+
+#include "src/base/atomicops.h"
+
+namespace v8 {
+namespace base {
+
+void CallOnceImpl(OnceType* once, PointerArgFunction init_func, void* arg) {
+ AtomicWord state = Acquire_Load(once);
+ // Fast path. The provided function was already executed.
+ if (state == ONCE_STATE_DONE) {
+ return;
+ }
+
+ // The function execution did not complete yet. The once object can be in one
+ // of the two following states:
+ // - UNINITIALIZED: We are the first thread calling this function.
+ // - EXECUTING_FUNCTION: Another thread is already executing the function.
+ //
+ // First, try to change the state from UNINITIALIZED to EXECUTING_FUNCTION
+ // atomically.
+ state = Acquire_CompareAndSwap(
+ once, ONCE_STATE_UNINITIALIZED, ONCE_STATE_EXECUTING_FUNCTION);
+ if (state == ONCE_STATE_UNINITIALIZED) {
+ // We are the first thread to call this function, so we have to call the
+ // function.
+ init_func(arg);
+ Release_Store(once, ONCE_STATE_DONE);
+ } else {
+ // Another thread has already started executing the function. We need to
+ // wait until it completes the initialization.
+ while (state == ONCE_STATE_EXECUTING_FUNCTION) {
+#ifdef _WIN32
+ ::Sleep(0);
+#else
+ sched_yield();
+#endif
+ state = Acquire_Load(once);
+ }
+ }
+}
+
+} } // namespace v8::base
diff --git a/chromium/v8/src/once.h b/chromium/v8/src/base/once.h
index a44b8fafbf5..a8e8437afaa 100644
--- a/chromium/v8/src/once.h
+++ b/chromium/v8/src/base/once.h
@@ -1,29 +1,6 @@
// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
// emulates google3/base/once.h
//
@@ -72,19 +49,19 @@
// whatsoever to statically-initialize its synchronization primitives, so our
// only choice is to assume that dynamic initialization is single-threaded.
-#ifndef V8_ONCE_H_
-#define V8_ONCE_H_
+#ifndef V8_BASE_ONCE_H_
+#define V8_BASE_ONCE_H_
-#include "atomicops.h"
+#include "src/base/atomicops.h"
namespace v8 {
-namespace internal {
+namespace base {
typedef AtomicWord OnceType;
#define V8_ONCE_INIT 0
-#define V8_DECLARE_ONCE(NAME) ::v8::internal::OnceType NAME
+#define V8_DECLARE_ONCE(NAME) ::v8::base::OnceType NAME
enum {
ONCE_STATE_UNINITIALIZED = 0,
@@ -118,6 +95,6 @@ inline void CallOnce(OnceType* once,
}
}
-} } // namespace v8::internal
+} } // namespace v8::base
-#endif // V8_ONCE_H_
+#endif // V8_BASE_ONCE_H_
diff --git a/chromium/v8/src/base/safe_conversions.h b/chromium/v8/src/base/safe_conversions.h
new file mode 100644
index 00000000000..0a1bd696469
--- /dev/null
+++ b/chromium/v8/src/base/safe_conversions.h
@@ -0,0 +1,67 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Slightly adapted for inclusion in V8.
+// Copyright 2014 the V8 project authors. All rights reserved.
+
+#ifndef V8_BASE_SAFE_CONVERSIONS_H_
+#define V8_BASE_SAFE_CONVERSIONS_H_
+
+#include <limits>
+
+#include "src/base/safe_conversions_impl.h"
+
+namespace v8 {
+namespace base {
+
+// Convenience function that returns true if the supplied value is in range
+// for the destination type.
+template <typename Dst, typename Src>
+inline bool IsValueInRangeForNumericType(Src value) {
+ return internal::DstRangeRelationToSrcRange<Dst>(value) ==
+ internal::RANGE_VALID;
+}
+
+// checked_cast<> is analogous to static_cast<> for numeric types,
+// except that it CHECKs that the specified numeric conversion will not
+// overflow or underflow. NaN source will always trigger a CHECK.
+template <typename Dst, typename Src>
+inline Dst checked_cast(Src value) {
+ CHECK(IsValueInRangeForNumericType<Dst>(value));
+ return static_cast<Dst>(value);
+}
+
+// saturated_cast<> is analogous to static_cast<> for numeric types, except
+// that the specified numeric conversion will saturate rather than overflow or
+// underflow. NaN assignment to an integral will trigger a CHECK condition.
+template <typename Dst, typename Src>
+inline Dst saturated_cast(Src value) {
+ // Optimization for floating point values, which already saturate.
+ if (std::numeric_limits<Dst>::is_iec559)
+ return static_cast<Dst>(value);
+
+ switch (internal::DstRangeRelationToSrcRange<Dst>(value)) {
+ case internal::RANGE_VALID:
+ return static_cast<Dst>(value);
+
+ case internal::RANGE_UNDERFLOW:
+ return std::numeric_limits<Dst>::min();
+
+ case internal::RANGE_OVERFLOW:
+ return std::numeric_limits<Dst>::max();
+
+ // Should fail only on attempting to assign NaN to a saturated integer.
+ case internal::RANGE_INVALID:
+ CHECK(false);
+ return std::numeric_limits<Dst>::max();
+ }
+
+ UNREACHABLE();
+ return static_cast<Dst>(value);
+}
+
+} // namespace base
+} // namespace v8
+
+#endif // V8_BASE_SAFE_CONVERSIONS_H_
diff --git a/chromium/v8/src/base/safe_conversions_impl.h b/chromium/v8/src/base/safe_conversions_impl.h
new file mode 100644
index 00000000000..2226f17aebd
--- /dev/null
+++ b/chromium/v8/src/base/safe_conversions_impl.h
@@ -0,0 +1,220 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Slightly adapted for inclusion in V8.
+// Copyright 2014 the V8 project authors. All rights reserved.
+
+#ifndef V8_BASE_SAFE_CONVERSIONS_IMPL_H_
+#define V8_BASE_SAFE_CONVERSIONS_IMPL_H_
+
+#include <limits>
+
+#include "src/base/macros.h"
+
+namespace v8 {
+namespace base {
+namespace internal {
+
+// The std library doesn't provide a binary max_exponent for integers, however
+// we can compute one by adding one to the number of non-sign bits. This allows
+// for accurate range comparisons between floating point and integer types.
+template <typename NumericType>
+struct MaxExponent {
+ static const int value = std::numeric_limits<NumericType>::is_iec559
+ ? std::numeric_limits<NumericType>::max_exponent
+ : (sizeof(NumericType) * 8 + 1 -
+ std::numeric_limits<NumericType>::is_signed);
+};
+
+enum IntegerRepresentation {
+ INTEGER_REPRESENTATION_UNSIGNED,
+ INTEGER_REPRESENTATION_SIGNED
+};
+
+// A range for a given nunmeric Src type is contained for a given numeric Dst
+// type if both numeric_limits<Src>::max() <= numeric_limits<Dst>::max() and
+// numeric_limits<Src>::min() >= numeric_limits<Dst>::min() are true.
+// We implement this as template specializations rather than simple static
+// comparisons to ensure type correctness in our comparisons.
+enum NumericRangeRepresentation {
+ NUMERIC_RANGE_NOT_CONTAINED,
+ NUMERIC_RANGE_CONTAINED
+};
+
+// Helper templates to statically determine if our destination type can contain
+// maximum and minimum values represented by the source type.
+
+template <
+ typename Dst,
+ typename Src,
+ IntegerRepresentation DstSign = std::numeric_limits<Dst>::is_signed
+ ? INTEGER_REPRESENTATION_SIGNED
+ : INTEGER_REPRESENTATION_UNSIGNED,
+ IntegerRepresentation SrcSign =
+ std::numeric_limits<Src>::is_signed
+ ? INTEGER_REPRESENTATION_SIGNED
+ : INTEGER_REPRESENTATION_UNSIGNED >
+struct StaticDstRangeRelationToSrcRange;
+
+// Same sign: Dst is guaranteed to contain Src only if its range is equal or
+// larger.
+template <typename Dst, typename Src, IntegerRepresentation Sign>
+struct StaticDstRangeRelationToSrcRange<Dst, Src, Sign, Sign> {
+ static const NumericRangeRepresentation value =
+ MaxExponent<Dst>::value >= MaxExponent<Src>::value
+ ? NUMERIC_RANGE_CONTAINED
+ : NUMERIC_RANGE_NOT_CONTAINED;
+};
+
+// Unsigned to signed: Dst is guaranteed to contain source only if its range is
+// larger.
+template <typename Dst, typename Src>
+struct StaticDstRangeRelationToSrcRange<Dst,
+ Src,
+ INTEGER_REPRESENTATION_SIGNED,
+ INTEGER_REPRESENTATION_UNSIGNED> {
+ static const NumericRangeRepresentation value =
+ MaxExponent<Dst>::value > MaxExponent<Src>::value
+ ? NUMERIC_RANGE_CONTAINED
+ : NUMERIC_RANGE_NOT_CONTAINED;
+};
+
+// Signed to unsigned: Dst cannot be statically determined to contain Src.
+template <typename Dst, typename Src>
+struct StaticDstRangeRelationToSrcRange<Dst,
+ Src,
+ INTEGER_REPRESENTATION_UNSIGNED,
+ INTEGER_REPRESENTATION_SIGNED> {
+ static const NumericRangeRepresentation value = NUMERIC_RANGE_NOT_CONTAINED;
+};
+
+enum RangeConstraint {
+ RANGE_VALID = 0x0, // Value can be represented by the destination type.
+ RANGE_UNDERFLOW = 0x1, // Value would overflow.
+ RANGE_OVERFLOW = 0x2, // Value would underflow.
+ RANGE_INVALID = RANGE_UNDERFLOW | RANGE_OVERFLOW // Invalid (i.e. NaN).
+};
+
+// Helper function for coercing an int back to a RangeContraint.
+inline RangeConstraint GetRangeConstraint(int integer_range_constraint) {
+ // TODO(jochen/jkummerow): Re-enable this when checks.h is available in base.
+ // ASSERT(integer_range_constraint >= RANGE_VALID &&
+ // integer_range_constraint <= RANGE_INVALID);
+ return static_cast<RangeConstraint>(integer_range_constraint);
+}
+
+// This function creates a RangeConstraint from an upper and lower bound
+// check by taking advantage of the fact that only NaN can be out of range in
+// both directions at once.
+inline RangeConstraint GetRangeConstraint(bool is_in_upper_bound,
+ bool is_in_lower_bound) {
+ return GetRangeConstraint((is_in_upper_bound ? 0 : RANGE_OVERFLOW) |
+ (is_in_lower_bound ? 0 : RANGE_UNDERFLOW));
+}
+
+template <
+ typename Dst,
+ typename Src,
+ IntegerRepresentation DstSign = std::numeric_limits<Dst>::is_signed
+ ? INTEGER_REPRESENTATION_SIGNED
+ : INTEGER_REPRESENTATION_UNSIGNED,
+ IntegerRepresentation SrcSign = std::numeric_limits<Src>::is_signed
+ ? INTEGER_REPRESENTATION_SIGNED
+ : INTEGER_REPRESENTATION_UNSIGNED,
+ NumericRangeRepresentation DstRange =
+ StaticDstRangeRelationToSrcRange<Dst, Src>::value >
+struct DstRangeRelationToSrcRangeImpl;
+
+// The following templates are for ranges that must be verified at runtime. We
+// split it into checks based on signedness to avoid confusing casts and
+// compiler warnings on signed an unsigned comparisons.
+
+// Dst range is statically determined to contain Src: Nothing to check.
+template <typename Dst,
+ typename Src,
+ IntegerRepresentation DstSign,
+ IntegerRepresentation SrcSign>
+struct DstRangeRelationToSrcRangeImpl<Dst,
+ Src,
+ DstSign,
+ SrcSign,
+ NUMERIC_RANGE_CONTAINED> {
+ static RangeConstraint Check(Src value) { return RANGE_VALID; }
+};
+
+// Signed to signed narrowing: Both the upper and lower boundaries may be
+// exceeded.
+template <typename Dst, typename Src>
+struct DstRangeRelationToSrcRangeImpl<Dst,
+ Src,
+ INTEGER_REPRESENTATION_SIGNED,
+ INTEGER_REPRESENTATION_SIGNED,
+ NUMERIC_RANGE_NOT_CONTAINED> {
+ static RangeConstraint Check(Src value) {
+ return std::numeric_limits<Dst>::is_iec559
+ ? GetRangeConstraint(value <= std::numeric_limits<Dst>::max(),
+ value >= -std::numeric_limits<Dst>::max())
+ : GetRangeConstraint(value <= std::numeric_limits<Dst>::max(),
+ value >= std::numeric_limits<Dst>::min());
+ }
+};
+
+// Unsigned to unsigned narrowing: Only the upper boundary can be exceeded.
+template <typename Dst, typename Src>
+struct DstRangeRelationToSrcRangeImpl<Dst,
+ Src,
+ INTEGER_REPRESENTATION_UNSIGNED,
+ INTEGER_REPRESENTATION_UNSIGNED,
+ NUMERIC_RANGE_NOT_CONTAINED> {
+ static RangeConstraint Check(Src value) {
+ return GetRangeConstraint(value <= std::numeric_limits<Dst>::max(), true);
+ }
+};
+
+// Unsigned to signed: The upper boundary may be exceeded.
+template <typename Dst, typename Src>
+struct DstRangeRelationToSrcRangeImpl<Dst,
+ Src,
+ INTEGER_REPRESENTATION_SIGNED,
+ INTEGER_REPRESENTATION_UNSIGNED,
+ NUMERIC_RANGE_NOT_CONTAINED> {
+ static RangeConstraint Check(Src value) {
+ return sizeof(Dst) > sizeof(Src)
+ ? RANGE_VALID
+ : GetRangeConstraint(
+ value <= static_cast<Src>(std::numeric_limits<Dst>::max()),
+ true);
+ }
+};
+
+// Signed to unsigned: The upper boundary may be exceeded for a narrower Dst,
+// and any negative value exceeds the lower boundary.
+template <typename Dst, typename Src>
+struct DstRangeRelationToSrcRangeImpl<Dst,
+ Src,
+ INTEGER_REPRESENTATION_UNSIGNED,
+ INTEGER_REPRESENTATION_SIGNED,
+ NUMERIC_RANGE_NOT_CONTAINED> {
+ static RangeConstraint Check(Src value) {
+ return (MaxExponent<Dst>::value >= MaxExponent<Src>::value)
+ ? GetRangeConstraint(true, value >= static_cast<Src>(0))
+ : GetRangeConstraint(
+ value <= static_cast<Src>(std::numeric_limits<Dst>::max()),
+ value >= static_cast<Src>(0));
+ }
+};
+
+template <typename Dst, typename Src>
+inline RangeConstraint DstRangeRelationToSrcRange(Src value) {
+ // Both source and destination must be numeric.
+ STATIC_ASSERT(std::numeric_limits<Src>::is_specialized);
+ STATIC_ASSERT(std::numeric_limits<Dst>::is_specialized);
+ return DstRangeRelationToSrcRangeImpl<Dst, Src>::Check(value);
+}
+
+} // namespace internal
+} // namespace base
+} // namespace v8
+
+#endif // V8_BASE_SAFE_CONVERSIONS_IMPL_H_
diff --git a/chromium/v8/src/base/safe_math.h b/chromium/v8/src/base/safe_math.h
new file mode 100644
index 00000000000..62a2f723f2b
--- /dev/null
+++ b/chromium/v8/src/base/safe_math.h
@@ -0,0 +1,276 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Slightly adapted for inclusion in V8.
+// Copyright 2014 the V8 project authors. All rights reserved.
+
+#ifndef V8_BASE_SAFE_MATH_H_
+#define V8_BASE_SAFE_MATH_H_
+
+#include "src/base/safe_math_impl.h"
+
+namespace v8 {
+namespace base {
+namespace internal {
+
+// CheckedNumeric implements all the logic and operators for detecting integer
+// boundary conditions such as overflow, underflow, and invalid conversions.
+// The CheckedNumeric type implicitly converts from floating point and integer
+// data types, and contains overloads for basic arithmetic operations (i.e.: +,
+// -, *, /, %).
+//
+// The following methods convert from CheckedNumeric to standard numeric values:
+// IsValid() - Returns true if the underlying numeric value is valid (i.e. has
+// has not wrapped and is not the result of an invalid conversion).
+// ValueOrDie() - Returns the underlying value. If the state is not valid this
+// call will crash on a CHECK.
+// ValueOrDefault() - Returns the current value, or the supplied default if the
+// state is not valid.
+// ValueFloating() - Returns the underlying floating point value (valid only
+// only for floating point CheckedNumeric types).
+//
+// Bitwise operations are explicitly not supported, because correct
+// handling of some cases (e.g. sign manipulation) is ambiguous. Comparison
+// operations are explicitly not supported because they could result in a crash
+// on a CHECK condition. You should use patterns like the following for these
+// operations:
+// Bitwise operation:
+// CheckedNumeric<int> checked_int = untrusted_input_value;
+// int x = checked_int.ValueOrDefault(0) | kFlagValues;
+// Comparison:
+// CheckedNumeric<size_t> checked_size;
+// CheckedNumeric<int> checked_size = untrusted_input_value;
+// checked_size = checked_size + HEADER LENGTH;
+// if (checked_size.IsValid() && checked_size.ValueOrDie() < buffer_size)
+// Do stuff...
+template <typename T>
+class CheckedNumeric {
+ public:
+ typedef T type;
+
+ CheckedNumeric() {}
+
+ // Copy constructor.
+ template <typename Src>
+ CheckedNumeric(const CheckedNumeric<Src>& rhs)
+ : state_(rhs.ValueUnsafe(), rhs.validity()) {}
+
+ template <typename Src>
+ CheckedNumeric(Src value, RangeConstraint validity)
+ : state_(value, validity) {}
+
+ // This is not an explicit constructor because we implicitly upgrade regular
+ // numerics to CheckedNumerics to make them easier to use.
+ template <typename Src>
+ CheckedNumeric(Src value) // NOLINT
+ : state_(value) {
+ // Argument must be numeric.
+ STATIC_ASSERT(std::numeric_limits<Src>::is_specialized);
+ }
+
+ // IsValid() is the public API to test if a CheckedNumeric is currently valid.
+ bool IsValid() const { return validity() == RANGE_VALID; }
+
+ // ValueOrDie() The primary accessor for the underlying value. If the current
+ // state is not valid it will CHECK and crash.
+ T ValueOrDie() const {
+ CHECK(IsValid());
+ return state_.value();
+ }
+
+ // ValueOrDefault(T default_value) A convenience method that returns the
+ // current value if the state is valid, and the supplied default_value for
+ // any other state.
+ T ValueOrDefault(T default_value) const {
+ return IsValid() ? state_.value() : default_value;
+ }
+
+ // ValueFloating() - Since floating point values include their validity state,
+ // we provide an easy method for extracting them directly, without a risk of
+ // crashing on a CHECK.
+ T ValueFloating() const {
+ // Argument must be a floating-point value.
+ STATIC_ASSERT(std::numeric_limits<T>::is_iec559);
+ return CheckedNumeric<T>::cast(*this).ValueUnsafe();
+ }
+
+ // validity() - DO NOT USE THIS IN EXTERNAL CODE - It is public right now for
+ // tests and to avoid a big matrix of friend operator overloads. But the
+ // values it returns are likely to change in the future.
+ // Returns: current validity state (i.e. valid, overflow, underflow, nan).
+ // TODO(jschuh): crbug.com/332611 Figure out and implement semantics for
+ // saturation/wrapping so we can expose this state consistently and implement
+ // saturated arithmetic.
+ RangeConstraint validity() const { return state_.validity(); }
+
+ // ValueUnsafe() - DO NOT USE THIS IN EXTERNAL CODE - It is public right now
+ // for tests and to avoid a big matrix of friend operator overloads. But the
+ // values it returns are likely to change in the future.
+ // Returns: the raw numeric value, regardless of the current state.
+ // TODO(jschuh): crbug.com/332611 Figure out and implement semantics for
+ // saturation/wrapping so we can expose this state consistently and implement
+ // saturated arithmetic.
+ T ValueUnsafe() const { return state_.value(); }
+
+ // Prototypes for the supported arithmetic operator overloads.
+ template <typename Src> CheckedNumeric& operator+=(Src rhs);
+ template <typename Src> CheckedNumeric& operator-=(Src rhs);
+ template <typename Src> CheckedNumeric& operator*=(Src rhs);
+ template <typename Src> CheckedNumeric& operator/=(Src rhs);
+ template <typename Src> CheckedNumeric& operator%=(Src rhs);
+
+ CheckedNumeric operator-() const {
+ RangeConstraint validity;
+ T value = CheckedNeg(state_.value(), &validity);
+ // Negation is always valid for floating point.
+ if (std::numeric_limits<T>::is_iec559)
+ return CheckedNumeric<T>(value);
+
+ validity = GetRangeConstraint(state_.validity() | validity);
+ return CheckedNumeric<T>(value, validity);
+ }
+
+ CheckedNumeric Abs() const {
+ RangeConstraint validity;
+ T value = CheckedAbs(state_.value(), &validity);
+ // Absolute value is always valid for floating point.
+ if (std::numeric_limits<T>::is_iec559)
+ return CheckedNumeric<T>(value);
+
+ validity = GetRangeConstraint(state_.validity() | validity);
+ return CheckedNumeric<T>(value, validity);
+ }
+
+ CheckedNumeric& operator++() {
+ *this += 1;
+ return *this;
+ }
+
+ CheckedNumeric operator++(int) {
+ CheckedNumeric value = *this;
+ *this += 1;
+ return value;
+ }
+
+ CheckedNumeric& operator--() {
+ *this -= 1;
+ return *this;
+ }
+
+ CheckedNumeric operator--(int) {
+ CheckedNumeric value = *this;
+ *this -= 1;
+ return value;
+ }
+
+ // These static methods behave like a convenience cast operator targeting
+ // the desired CheckedNumeric type. As an optimization, a reference is
+ // returned when Src is the same type as T.
+ template <typename Src>
+ static CheckedNumeric<T> cast(
+ Src u,
+ typename enable_if<std::numeric_limits<Src>::is_specialized, int>::type =
+ 0) {
+ return u;
+ }
+
+ template <typename Src>
+ static CheckedNumeric<T> cast(
+ const CheckedNumeric<Src>& u,
+ typename enable_if<!is_same<Src, T>::value, int>::type = 0) {
+ return u;
+ }
+
+ static const CheckedNumeric<T>& cast(const CheckedNumeric<T>& u) { return u; }
+
+ private:
+ CheckedNumericState<T> state_;
+};
+
+// This is the boilerplate for the standard arithmetic operator overloads. A
+// macro isn't the prettiest solution, but it beats rewriting these five times.
+// Some details worth noting are:
+// * We apply the standard arithmetic promotions.
+// * We skip range checks for floating points.
+// * We skip range checks for destination integers with sufficient range.
+// TODO(jschuh): extract these out into templates.
+#define BASE_NUMERIC_ARITHMETIC_OPERATORS(NAME, OP, COMPOUND_OP) \
+ /* Binary arithmetic operator for CheckedNumerics of the same type. */ \
+ template <typename T> \
+ CheckedNumeric<typename ArithmeticPromotion<T>::type> operator OP( \
+ const CheckedNumeric<T>& lhs, const CheckedNumeric<T>& rhs) { \
+ typedef typename ArithmeticPromotion<T>::type Promotion; \
+ /* Floating point always takes the fast path */ \
+ if (std::numeric_limits<T>::is_iec559) \
+ return CheckedNumeric<T>(lhs.ValueUnsafe() OP rhs.ValueUnsafe()); \
+ if (IsIntegerArithmeticSafe<Promotion, T, T>::value) \
+ return CheckedNumeric<Promotion>( \
+ lhs.ValueUnsafe() OP rhs.ValueUnsafe(), \
+ GetRangeConstraint(rhs.validity() | lhs.validity())); \
+ RangeConstraint validity = RANGE_VALID; \
+ T result = Checked##NAME(static_cast<Promotion>(lhs.ValueUnsafe()), \
+ static_cast<Promotion>(rhs.ValueUnsafe()), \
+ &validity); \
+ return CheckedNumeric<Promotion>( \
+ result, \
+ GetRangeConstraint(validity | lhs.validity() | rhs.validity())); \
+ } \
+ /* Assignment arithmetic operator implementation from CheckedNumeric. */ \
+ template <typename T> \
+ template <typename Src> \
+ CheckedNumeric<T>& CheckedNumeric<T>::operator COMPOUND_OP(Src rhs) { \
+ *this = CheckedNumeric<T>::cast(*this) OP CheckedNumeric<Src>::cast(rhs); \
+ return *this; \
+ } \
+ /* Binary arithmetic operator for CheckedNumeric of different type. */ \
+ template <typename T, typename Src> \
+ CheckedNumeric<typename ArithmeticPromotion<T, Src>::type> operator OP( \
+ const CheckedNumeric<Src>& lhs, const CheckedNumeric<T>& rhs) { \
+ typedef typename ArithmeticPromotion<T, Src>::type Promotion; \
+ if (IsIntegerArithmeticSafe<Promotion, T, Src>::value) \
+ return CheckedNumeric<Promotion>( \
+ lhs.ValueUnsafe() OP rhs.ValueUnsafe(), \
+ GetRangeConstraint(rhs.validity() | lhs.validity())); \
+ return CheckedNumeric<Promotion>::cast(lhs) \
+ OP CheckedNumeric<Promotion>::cast(rhs); \
+ } \
+ /* Binary arithmetic operator for left CheckedNumeric and right numeric. */ \
+ template <typename T, typename Src> \
+ CheckedNumeric<typename ArithmeticPromotion<T, Src>::type> operator OP( \
+ const CheckedNumeric<T>& lhs, Src rhs) { \
+ typedef typename ArithmeticPromotion<T, Src>::type Promotion; \
+ if (IsIntegerArithmeticSafe<Promotion, T, Src>::value) \
+ return CheckedNumeric<Promotion>(lhs.ValueUnsafe() OP rhs, \
+ lhs.validity()); \
+ return CheckedNumeric<Promotion>::cast(lhs) \
+ OP CheckedNumeric<Promotion>::cast(rhs); \
+ } \
+ /* Binary arithmetic operator for right numeric and left CheckedNumeric. */ \
+ template <typename T, typename Src> \
+ CheckedNumeric<typename ArithmeticPromotion<T, Src>::type> operator OP( \
+ Src lhs, const CheckedNumeric<T>& rhs) { \
+ typedef typename ArithmeticPromotion<T, Src>::type Promotion; \
+ if (IsIntegerArithmeticSafe<Promotion, T, Src>::value) \
+ return CheckedNumeric<Promotion>(lhs OP rhs.ValueUnsafe(), \
+ rhs.validity()); \
+ return CheckedNumeric<Promotion>::cast(lhs) \
+ OP CheckedNumeric<Promotion>::cast(rhs); \
+ }
+
+BASE_NUMERIC_ARITHMETIC_OPERATORS(Add, +, += )
+BASE_NUMERIC_ARITHMETIC_OPERATORS(Sub, -, -= )
+BASE_NUMERIC_ARITHMETIC_OPERATORS(Mul, *, *= )
+BASE_NUMERIC_ARITHMETIC_OPERATORS(Div, /, /= )
+BASE_NUMERIC_ARITHMETIC_OPERATORS(Mod, %, %= )
+
+#undef BASE_NUMERIC_ARITHMETIC_OPERATORS
+
+} // namespace internal
+
+using internal::CheckedNumeric;
+
+} // namespace base
+} // namespace v8
+
+#endif // V8_BASE_SAFE_MATH_H_
diff --git a/chromium/v8/src/base/safe_math_impl.h b/chromium/v8/src/base/safe_math_impl.h
new file mode 100644
index 00000000000..055e2a02750
--- /dev/null
+++ b/chromium/v8/src/base/safe_math_impl.h
@@ -0,0 +1,531 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Slightly adapted for inclusion in V8.
+// Copyright 2014 the V8 project authors. All rights reserved.
+
+#ifndef V8_BASE_SAFE_MATH_IMPL_H_
+#define V8_BASE_SAFE_MATH_IMPL_H_
+
+#include <stdint.h>
+
+#include <cmath>
+#include <cstdlib>
+#include <limits>
+
+#include "src/base/macros.h"
+#include "src/base/safe_conversions.h"
+
+namespace v8 {
+namespace base {
+namespace internal {
+
+
+// From Chromium's base/template_util.h:
+
+template<class T, T v>
+struct integral_constant {
+ static const T value = v;
+ typedef T value_type;
+ typedef integral_constant<T, v> type;
+};
+
+template <class T, T v> const T integral_constant<T, v>::value;
+
+typedef integral_constant<bool, true> true_type;
+typedef integral_constant<bool, false> false_type;
+
+template <class T, class U> struct is_same : public false_type {};
+template <class T> struct is_same<T, T> : true_type {};
+
+template<bool B, class T = void>
+struct enable_if {};
+
+template<class T>
+struct enable_if<true, T> { typedef T type; };
+
+// </template_util.h>
+
+
+// Everything from here up to the floating point operations is portable C++,
+// but it may not be fast. This code could be split based on
+// platform/architecture and replaced with potentially faster implementations.
+
+// Integer promotion templates used by the portable checked integer arithmetic.
+template <size_t Size, bool IsSigned>
+struct IntegerForSizeAndSign;
+template <>
+struct IntegerForSizeAndSign<1, true> {
+ typedef int8_t type;
+};
+template <>
+struct IntegerForSizeAndSign<1, false> {
+ typedef uint8_t type;
+};
+template <>
+struct IntegerForSizeAndSign<2, true> {
+ typedef int16_t type;
+};
+template <>
+struct IntegerForSizeAndSign<2, false> {
+ typedef uint16_t type;
+};
+template <>
+struct IntegerForSizeAndSign<4, true> {
+ typedef int32_t type;
+};
+template <>
+struct IntegerForSizeAndSign<4, false> {
+ typedef uint32_t type;
+};
+template <>
+struct IntegerForSizeAndSign<8, true> {
+ typedef int64_t type;
+};
+template <>
+struct IntegerForSizeAndSign<8, false> {
+ typedef uint64_t type;
+};
+
+// WARNING: We have no IntegerForSizeAndSign<16, *>. If we ever add one to
+// support 128-bit math, then the ArithmeticPromotion template below will need
+// to be updated (or more likely replaced with a decltype expression).
+
+template <typename Integer>
+struct UnsignedIntegerForSize {
+ typedef typename enable_if<
+ std::numeric_limits<Integer>::is_integer,
+ typename IntegerForSizeAndSign<sizeof(Integer), false>::type>::type type;
+};
+
+template <typename Integer>
+struct SignedIntegerForSize {
+ typedef typename enable_if<
+ std::numeric_limits<Integer>::is_integer,
+ typename IntegerForSizeAndSign<sizeof(Integer), true>::type>::type type;
+};
+
+template <typename Integer>
+struct TwiceWiderInteger {
+ typedef typename enable_if<
+ std::numeric_limits<Integer>::is_integer,
+ typename IntegerForSizeAndSign<
+ sizeof(Integer) * 2,
+ std::numeric_limits<Integer>::is_signed>::type>::type type;
+};
+
+template <typename Integer>
+struct PositionOfSignBit {
+ static const typename enable_if<std::numeric_limits<Integer>::is_integer,
+ size_t>::type value = 8 * sizeof(Integer) - 1;
+};
+
+// Helper templates for integer manipulations.
+
+template <typename T>
+bool HasSignBit(T x) {
+ // Cast to unsigned since right shift on signed is undefined.
+ return !!(static_cast<typename UnsignedIntegerForSize<T>::type>(x) >>
+ PositionOfSignBit<T>::value);
+}
+
+// This wrapper undoes the standard integer promotions.
+template <typename T>
+T BinaryComplement(T x) {
+ return ~x;
+}
+
+// Here are the actual portable checked integer math implementations.
+// TODO(jschuh): Break this code out from the enable_if pattern and find a clean
+// way to coalesce things into the CheckedNumericState specializations below.
+
+template <typename T>
+typename enable_if<std::numeric_limits<T>::is_integer, T>::type
+CheckedAdd(T x, T y, RangeConstraint* validity) {
+ // Since the value of x+y is undefined if we have a signed type, we compute
+ // it using the unsigned type of the same size.
+ typedef typename UnsignedIntegerForSize<T>::type UnsignedDst;
+ UnsignedDst ux = static_cast<UnsignedDst>(x);
+ UnsignedDst uy = static_cast<UnsignedDst>(y);
+ UnsignedDst uresult = ux + uy;
+ // Addition is valid if the sign of (x + y) is equal to either that of x or
+ // that of y.
+ if (std::numeric_limits<T>::is_signed) {
+ if (HasSignBit(BinaryComplement((uresult ^ ux) & (uresult ^ uy))))
+ *validity = RANGE_VALID;
+ else // Direction of wrap is inverse of result sign.
+ *validity = HasSignBit(uresult) ? RANGE_OVERFLOW : RANGE_UNDERFLOW;
+
+ } else { // Unsigned is either valid or overflow.
+ *validity = BinaryComplement(x) >= y ? RANGE_VALID : RANGE_OVERFLOW;
+ }
+ return static_cast<T>(uresult);
+}
+
+template <typename T>
+typename enable_if<std::numeric_limits<T>::is_integer, T>::type
+CheckedSub(T x, T y, RangeConstraint* validity) {
+ // Since the value of x+y is undefined if we have a signed type, we compute
+ // it using the unsigned type of the same size.
+ typedef typename UnsignedIntegerForSize<T>::type UnsignedDst;
+ UnsignedDst ux = static_cast<UnsignedDst>(x);
+ UnsignedDst uy = static_cast<UnsignedDst>(y);
+ UnsignedDst uresult = ux - uy;
+ // Subtraction is valid if either x and y have same sign, or (x-y) and x have
+ // the same sign.
+ if (std::numeric_limits<T>::is_signed) {
+ if (HasSignBit(BinaryComplement((uresult ^ ux) & (ux ^ uy))))
+ *validity = RANGE_VALID;
+ else // Direction of wrap is inverse of result sign.
+ *validity = HasSignBit(uresult) ? RANGE_OVERFLOW : RANGE_UNDERFLOW;
+
+ } else { // Unsigned is either valid or underflow.
+ *validity = x >= y ? RANGE_VALID : RANGE_UNDERFLOW;
+ }
+ return static_cast<T>(uresult);
+}
+
+// Integer multiplication is a bit complicated. In the fast case we just
+// we just promote to a twice wider type, and range check the result. In the
+// slow case we need to manually check that the result won't be truncated by
+// checking with division against the appropriate bound.
+template <typename T>
+typename enable_if<
+ std::numeric_limits<T>::is_integer && sizeof(T) * 2 <= sizeof(uintmax_t),
+ T>::type
+CheckedMul(T x, T y, RangeConstraint* validity) {
+ typedef typename TwiceWiderInteger<T>::type IntermediateType;
+ IntermediateType tmp =
+ static_cast<IntermediateType>(x) * static_cast<IntermediateType>(y);
+ *validity = DstRangeRelationToSrcRange<T>(tmp);
+ return static_cast<T>(tmp);
+}
+
+template <typename T>
+typename enable_if<std::numeric_limits<T>::is_integer &&
+ std::numeric_limits<T>::is_signed &&
+ (sizeof(T) * 2 > sizeof(uintmax_t)),
+ T>::type
+CheckedMul(T x, T y, RangeConstraint* validity) {
+ // if either side is zero then the result will be zero.
+ if (!(x || y)) {
+ return RANGE_VALID;
+
+ } else if (x > 0) {
+ if (y > 0)
+ *validity =
+ x <= std::numeric_limits<T>::max() / y ? RANGE_VALID : RANGE_OVERFLOW;
+ else
+ *validity = y >= std::numeric_limits<T>::min() / x ? RANGE_VALID
+ : RANGE_UNDERFLOW;
+
+ } else {
+ if (y > 0)
+ *validity = x >= std::numeric_limits<T>::min() / y ? RANGE_VALID
+ : RANGE_UNDERFLOW;
+ else
+ *validity =
+ y >= std::numeric_limits<T>::max() / x ? RANGE_VALID : RANGE_OVERFLOW;
+ }
+
+ return x * y;
+}
+
+template <typename T>
+typename enable_if<std::numeric_limits<T>::is_integer &&
+ !std::numeric_limits<T>::is_signed &&
+ (sizeof(T) * 2 > sizeof(uintmax_t)),
+ T>::type
+CheckedMul(T x, T y, RangeConstraint* validity) {
+ *validity = (y == 0 || x <= std::numeric_limits<T>::max() / y)
+ ? RANGE_VALID
+ : RANGE_OVERFLOW;
+ return x * y;
+}
+
+// Division just requires a check for an invalid negation on signed min/-1.
+template <typename T>
+T CheckedDiv(
+ T x,
+ T y,
+ RangeConstraint* validity,
+ typename enable_if<std::numeric_limits<T>::is_integer, int>::type = 0) {
+ if (std::numeric_limits<T>::is_signed && x == std::numeric_limits<T>::min() &&
+ y == static_cast<T>(-1)) {
+ *validity = RANGE_OVERFLOW;
+ return std::numeric_limits<T>::min();
+ }
+
+ *validity = RANGE_VALID;
+ return x / y;
+}
+
+template <typename T>
+typename enable_if<
+ std::numeric_limits<T>::is_integer && std::numeric_limits<T>::is_signed,
+ T>::type
+CheckedMod(T x, T y, RangeConstraint* validity) {
+ *validity = y > 0 ? RANGE_VALID : RANGE_INVALID;
+ return x % y;
+}
+
+template <typename T>
+typename enable_if<
+ std::numeric_limits<T>::is_integer && !std::numeric_limits<T>::is_signed,
+ T>::type
+CheckedMod(T x, T y, RangeConstraint* validity) {
+ *validity = RANGE_VALID;
+ return x % y;
+}
+
+template <typename T>
+typename enable_if<
+ std::numeric_limits<T>::is_integer && std::numeric_limits<T>::is_signed,
+ T>::type
+CheckedNeg(T value, RangeConstraint* validity) {
+ *validity =
+ value != std::numeric_limits<T>::min() ? RANGE_VALID : RANGE_OVERFLOW;
+ // The negation of signed min is min, so catch that one.
+ return -value;
+}
+
+template <typename T>
+typename enable_if<
+ std::numeric_limits<T>::is_integer && !std::numeric_limits<T>::is_signed,
+ T>::type
+CheckedNeg(T value, RangeConstraint* validity) {
+ // The only legal unsigned negation is zero.
+ *validity = value ? RANGE_UNDERFLOW : RANGE_VALID;
+ return static_cast<T>(
+ -static_cast<typename SignedIntegerForSize<T>::type>(value));
+}
+
+template <typename T>
+typename enable_if<
+ std::numeric_limits<T>::is_integer && std::numeric_limits<T>::is_signed,
+ T>::type
+CheckedAbs(T value, RangeConstraint* validity) {
+ *validity =
+ value != std::numeric_limits<T>::min() ? RANGE_VALID : RANGE_OVERFLOW;
+ return std::abs(value);
+}
+
+template <typename T>
+typename enable_if<
+ std::numeric_limits<T>::is_integer && !std::numeric_limits<T>::is_signed,
+ T>::type
+CheckedAbs(T value, RangeConstraint* validity) {
+ // Absolute value of a positive is just its identiy.
+ *validity = RANGE_VALID;
+ return value;
+}
+
+// These are the floating point stubs that the compiler needs to see. Only the
+// negation operation is ever called.
+#define BASE_FLOAT_ARITHMETIC_STUBS(NAME) \
+ template <typename T> \
+ typename enable_if<std::numeric_limits<T>::is_iec559, T>::type \
+ Checked##NAME(T, T, RangeConstraint*) { \
+ UNREACHABLE(); \
+ return 0; \
+ }
+
+BASE_FLOAT_ARITHMETIC_STUBS(Add)
+BASE_FLOAT_ARITHMETIC_STUBS(Sub)
+BASE_FLOAT_ARITHMETIC_STUBS(Mul)
+BASE_FLOAT_ARITHMETIC_STUBS(Div)
+BASE_FLOAT_ARITHMETIC_STUBS(Mod)
+
+#undef BASE_FLOAT_ARITHMETIC_STUBS
+
+template <typename T>
+typename enable_if<std::numeric_limits<T>::is_iec559, T>::type CheckedNeg(
+ T value,
+ RangeConstraint*) {
+ return -value;
+}
+
+template <typename T>
+typename enable_if<std::numeric_limits<T>::is_iec559, T>::type CheckedAbs(
+ T value,
+ RangeConstraint*) {
+ return std::abs(value);
+}
+
+// Floats carry around their validity state with them, but integers do not. So,
+// we wrap the underlying value in a specialization in order to hide that detail
+// and expose an interface via accessors.
+enum NumericRepresentation {
+ NUMERIC_INTEGER,
+ NUMERIC_FLOATING,
+ NUMERIC_UNKNOWN
+};
+
+template <typename NumericType>
+struct GetNumericRepresentation {
+ static const NumericRepresentation value =
+ std::numeric_limits<NumericType>::is_integer
+ ? NUMERIC_INTEGER
+ : (std::numeric_limits<NumericType>::is_iec559 ? NUMERIC_FLOATING
+ : NUMERIC_UNKNOWN);
+};
+
+template <typename T, NumericRepresentation type =
+ GetNumericRepresentation<T>::value>
+class CheckedNumericState {};
+
+// Integrals require quite a bit of additional housekeeping to manage state.
+template <typename T>
+class CheckedNumericState<T, NUMERIC_INTEGER> {
+ private:
+ T value_;
+ RangeConstraint validity_;
+
+ public:
+ template <typename Src, NumericRepresentation type>
+ friend class CheckedNumericState;
+
+ CheckedNumericState() : value_(0), validity_(RANGE_VALID) {}
+
+ template <typename Src>
+ CheckedNumericState(Src value, RangeConstraint validity)
+ : value_(value),
+ validity_(GetRangeConstraint(validity |
+ DstRangeRelationToSrcRange<T>(value))) {
+ // Argument must be numeric.
+ STATIC_ASSERT(std::numeric_limits<Src>::is_specialized);
+ }
+
+ // Copy constructor.
+ template <typename Src>
+ CheckedNumericState(const CheckedNumericState<Src>& rhs)
+ : value_(static_cast<T>(rhs.value())),
+ validity_(GetRangeConstraint(
+ rhs.validity() | DstRangeRelationToSrcRange<T>(rhs.value()))) {}
+
+ template <typename Src>
+ explicit CheckedNumericState(
+ Src value,
+ typename enable_if<std::numeric_limits<Src>::is_specialized, int>::type =
+ 0)
+ : value_(static_cast<T>(value)),
+ validity_(DstRangeRelationToSrcRange<T>(value)) {}
+
+ RangeConstraint validity() const { return validity_; }
+ T value() const { return value_; }
+};
+
+// Floating points maintain their own validity, but need translation wrappers.
+template <typename T>
+class CheckedNumericState<T, NUMERIC_FLOATING> {
+ private:
+ T value_;
+
+ public:
+ template <typename Src, NumericRepresentation type>
+ friend class CheckedNumericState;
+
+ CheckedNumericState() : value_(0.0) {}
+
+ template <typename Src>
+ CheckedNumericState(
+ Src value,
+ RangeConstraint validity,
+ typename enable_if<std::numeric_limits<Src>::is_integer, int>::type = 0) {
+ switch (DstRangeRelationToSrcRange<T>(value)) {
+ case RANGE_VALID:
+ value_ = static_cast<T>(value);
+ break;
+
+ case RANGE_UNDERFLOW:
+ value_ = -std::numeric_limits<T>::infinity();
+ break;
+
+ case RANGE_OVERFLOW:
+ value_ = std::numeric_limits<T>::infinity();
+ break;
+
+ case RANGE_INVALID:
+ value_ = std::numeric_limits<T>::quiet_NaN();
+ break;
+ }
+ }
+
+ template <typename Src>
+ explicit CheckedNumericState(
+ Src value,
+ typename enable_if<std::numeric_limits<Src>::is_specialized, int>::type =
+ 0)
+ : value_(static_cast<T>(value)) {}
+
+ // Copy constructor.
+ template <typename Src>
+ CheckedNumericState(const CheckedNumericState<Src>& rhs)
+ : value_(static_cast<T>(rhs.value())) {}
+
+ RangeConstraint validity() const {
+ return GetRangeConstraint(value_ <= std::numeric_limits<T>::max(),
+ value_ >= -std::numeric_limits<T>::max());
+ }
+ T value() const { return value_; }
+};
+
+// For integers less than 128-bit and floats 32-bit or larger, we can distil
+// C/C++ arithmetic promotions down to two simple rules:
+// 1. The type with the larger maximum exponent always takes precedence.
+// 2. The resulting type must be promoted to at least an int.
+// The following template specializations implement that promotion logic.
+enum ArithmeticPromotionCategory {
+ LEFT_PROMOTION,
+ RIGHT_PROMOTION,
+ DEFAULT_PROMOTION
+};
+
+template <typename Lhs,
+ typename Rhs = Lhs,
+ ArithmeticPromotionCategory Promotion =
+ (MaxExponent<Lhs>::value > MaxExponent<Rhs>::value)
+ ? (MaxExponent<Lhs>::value > MaxExponent<int>::value
+ ? LEFT_PROMOTION
+ : DEFAULT_PROMOTION)
+ : (MaxExponent<Rhs>::value > MaxExponent<int>::value
+ ? RIGHT_PROMOTION
+ : DEFAULT_PROMOTION) >
+struct ArithmeticPromotion;
+
+template <typename Lhs, typename Rhs>
+struct ArithmeticPromotion<Lhs, Rhs, LEFT_PROMOTION> {
+ typedef Lhs type;
+};
+
+template <typename Lhs, typename Rhs>
+struct ArithmeticPromotion<Lhs, Rhs, RIGHT_PROMOTION> {
+ typedef Rhs type;
+};
+
+template <typename Lhs, typename Rhs>
+struct ArithmeticPromotion<Lhs, Rhs, DEFAULT_PROMOTION> {
+ typedef int type;
+};
+
+// We can statically check if operations on the provided types can wrap, so we
+// can skip the checked operations if they're not needed. So, for an integer we
+// care if the destination type preserves the sign and is twice the width of
+// the source.
+template <typename T, typename Lhs, typename Rhs>
+struct IsIntegerArithmeticSafe {
+ static const bool value = !std::numeric_limits<T>::is_iec559 &&
+ StaticDstRangeRelationToSrcRange<T, Lhs>::value ==
+ NUMERIC_RANGE_CONTAINED &&
+ sizeof(T) >= (2 * sizeof(Lhs)) &&
+ StaticDstRangeRelationToSrcRange<T, Rhs>::value !=
+ NUMERIC_RANGE_CONTAINED &&
+ sizeof(T) >= (2 * sizeof(Rhs));
+};
+
+} // namespace internal
+} // namespace base
+} // namespace v8
+
+#endif // V8_BASE_SAFE_MATH_IMPL_H_
diff --git a/chromium/v8/src/win32-headers.h b/chromium/v8/src/base/win32-headers.h
index 98b0120ea16..5432de165c3 100644
--- a/chromium/v8/src/win32-headers.h
+++ b/chromium/v8/src/base/win32-headers.h
@@ -1,32 +1,9 @@
// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
-#ifndef V8_WIN32_HEADERS_H_
-#define V8_WIN32_HEADERS_H_
+#ifndef V8_BASE_WIN32_HEADERS_H_
+#define V8_BASE_WIN32_HEADERS_H_
#ifndef WIN32_LEAN_AND_MEAN
// WIN32_LEAN_AND_MEAN implies NOCRYPT and NOGDI.
@@ -94,8 +71,9 @@
#undef NONE
#undef ANY
#undef IGNORE
+#undef STRICT
#undef GetObject
#undef CreateSemaphore
#undef Yield
-#endif // V8_WIN32_HEADERS_H_
+#endif // V8_BASE_WIN32_HEADERS_H_
diff --git a/chromium/v8/src/bignum-dtoa.cc b/chromium/v8/src/bignum-dtoa.cc
index c5ad4420c81..8860a9bddc7 100644
--- a/chromium/v8/src/bignum-dtoa.cc
+++ b/chromium/v8/src/bignum-dtoa.cc
@@ -1,40 +1,17 @@
// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
#include <cmath>
-#include "../include/v8stdint.h"
-#include "checks.h"
-#include "utils.h"
+#include "include/v8stdint.h"
+#include "src/checks.h"
+#include "src/utils.h"
-#include "bignum-dtoa.h"
+#include "src/bignum-dtoa.h"
-#include "bignum.h"
-#include "double.h"
+#include "src/bignum.h"
+#include "src/double.h"
namespace v8 {
namespace internal {
@@ -394,7 +371,8 @@ static int EstimatePower(int exponent) {
// For doubles len(f) == 53 (don't forget the hidden bit).
const int kSignificandSize = 53;
- double estimate = ceil((exponent + kSignificandSize - 1) * k1Log10 - 1e-10);
+ double estimate =
+ std::ceil((exponent + kSignificandSize - 1) * k1Log10 - 1e-10);
return static_cast<int>(estimate);
}
diff --git a/chromium/v8/src/bignum-dtoa.h b/chromium/v8/src/bignum-dtoa.h
index 93ec1f77061..fc160aecd4f 100644
--- a/chromium/v8/src/bignum-dtoa.h
+++ b/chromium/v8/src/bignum-dtoa.h
@@ -1,29 +1,6 @@
// Copyright 2010 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
#ifndef V8_BIGNUM_DTOA_H_
#define V8_BIGNUM_DTOA_H_
diff --git a/chromium/v8/src/bignum.cc b/chromium/v8/src/bignum.cc
index af0edde6d58..a44a6725865 100644
--- a/chromium/v8/src/bignum.cc
+++ b/chromium/v8/src/bignum.cc
@@ -1,33 +1,10 @@
// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "../include/v8stdint.h"
-#include "utils.h"
-#include "bignum.h"
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "include/v8stdint.h"
+#include "src/utils.h"
+#include "src/bignum.h"
namespace v8 {
namespace internal {
diff --git a/chromium/v8/src/bignum.h b/chromium/v8/src/bignum.h
index dcc4fa702aa..744768f874e 100644
--- a/chromium/v8/src/bignum.h
+++ b/chromium/v8/src/bignum.h
@@ -1,29 +1,6 @@
// Copyright 2010 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
#ifndef V8_BIGNUM_H_
#define V8_BIGNUM_H_
diff --git a/chromium/v8/src/bootstrapper.cc b/chromium/v8/src/bootstrapper.cc
index 2b2f7c71931..4d7ce52a127 100644
--- a/chromium/v8/src/bootstrapper.cc
+++ b/chromium/v8/src/bootstrapper.cc
@@ -1,56 +1,24 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#include "accessors.h"
-#include "api.h"
-#include "bootstrapper.h"
-#include "compiler.h"
-#include "debug.h"
-#include "execution.h"
-#include "global-handles.h"
-#include "isolate-inl.h"
-#include "macro-assembler.h"
-#include "natives.h"
-#include "objects-visiting.h"
-#include "platform.h"
-#include "snapshot.h"
-#include "trig-table.h"
-#include "extensions/free-buffer-extension.h"
-#include "extensions/externalize-string-extension.h"
-#include "extensions/gc-extension.h"
-#include "extensions/statistics-extension.h"
-#include "code-stubs.h"
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/bootstrapper.h"
+
+#include "src/accessors.h"
+#include "src/isolate-inl.h"
+#include "src/natives.h"
+#include "src/snapshot.h"
+#include "src/trig-table.h"
+#include "src/extensions/externalize-string-extension.h"
+#include "src/extensions/free-buffer-extension.h"
+#include "src/extensions/gc-extension.h"
+#include "src/extensions/statistics-extension.h"
+#include "src/extensions/trigger-failure-extension.h"
+#include "src/code-stubs.h"
namespace v8 {
namespace internal {
-
NativesExternalStringResource::NativesExternalStringResource(
Bootstrapper* bootstrapper,
const char* source,
@@ -85,8 +53,10 @@ Handle<String> Bootstrapper::NativesSourceLookup(int index) {
new NativesExternalStringResource(this,
source.start(),
source.length());
+ // We do not expect this to throw an exception. Change this if it does.
Handle<String> source_code =
- isolate_->factory()->NewExternalStringFromAscii(resource);
+ isolate_->factory()->NewExternalStringFromAscii(
+ resource).ToHandleChecked();
heap->natives_source_cache()->set(index, *source_code);
}
Handle<Object> cached_source(heap->natives_source_cache()->get(index),
@@ -100,13 +70,39 @@ void Bootstrapper::Initialize(bool create_heap_objects) {
}
+static const char* GCFunctionName() {
+ bool flag_given = FLAG_expose_gc_as != NULL && strlen(FLAG_expose_gc_as) != 0;
+ return flag_given ? FLAG_expose_gc_as : "gc";
+}
+
+
+v8::Extension* Bootstrapper::free_buffer_extension_ = NULL;
+v8::Extension* Bootstrapper::gc_extension_ = NULL;
+v8::Extension* Bootstrapper::externalize_string_extension_ = NULL;
+v8::Extension* Bootstrapper::statistics_extension_ = NULL;
+v8::Extension* Bootstrapper::trigger_failure_extension_ = NULL;
+
+
void Bootstrapper::InitializeOncePerProcess() {
-#ifdef ADDRESS_SANITIZER
- FreeBufferExtension::Register();
-#endif
- GCExtension::Register();
- ExternalizeStringExtension::Register();
- StatisticsExtension::Register();
+ free_buffer_extension_ = new FreeBufferExtension;
+ v8::RegisterExtension(free_buffer_extension_);
+ gc_extension_ = new GCExtension(GCFunctionName());
+ v8::RegisterExtension(gc_extension_);
+ externalize_string_extension_ = new ExternalizeStringExtension;
+ v8::RegisterExtension(externalize_string_extension_);
+ statistics_extension_ = new StatisticsExtension;
+ v8::RegisterExtension(statistics_extension_);
+ trigger_failure_extension_ = new TriggerFailureExtension;
+ v8::RegisterExtension(trigger_failure_extension_);
+}
+
+
+void Bootstrapper::TearDownExtensions() {
+ delete free_buffer_extension_;
+ delete gc_extension_;
+ delete externalize_string_extension_;
+ delete statistics_extension_;
+ delete trigger_failure_extension_;
}
@@ -125,7 +121,7 @@ char* Bootstrapper::AllocateAutoDeletedArray(int bytes) {
void Bootstrapper::TearDown() {
if (delete_these_non_arrays_on_tear_down_ != NULL) {
int len = delete_these_non_arrays_on_tear_down_->length();
- ASSERT(len < 20); // Don't use this mechanism for unbounded allocations.
+ ASSERT(len < 24); // Don't use this mechanism for unbounded allocations.
for (int i = 0; i < len; i++) {
delete delete_these_non_arrays_on_tear_down_->at(i);
delete_these_non_arrays_on_tear_down_->at(i) = NULL;
@@ -171,7 +167,9 @@ class Genesis BASE_EMBEDDED {
// Creates the empty function. Used for creating a context from scratch.
Handle<JSFunction> CreateEmptyFunction(Isolate* isolate);
// Creates the ThrowTypeError function. ECMA 5th Ed. 13.2.3
- Handle<JSFunction> GetThrowTypeErrorFunction();
+ Handle<JSFunction> GetStrictPoisonFunction();
+ // Poison for sloppy generator function arguments/callee.
+ Handle<JSFunction> GetGeneratorPoisonFunction();
void CreateStrictModeFunctionMaps(Handle<JSFunction> empty);
@@ -204,14 +202,18 @@ class Genesis BASE_EMBEDDED {
// Installs the contents of the native .js files on the global objects.
// Used for creating a context from scratch.
void InstallNativeFunctions();
+ void InstallExperimentalBuiltinFunctionIds();
void InstallExperimentalNativeFunctions();
Handle<JSFunction> InstallInternalArray(Handle<JSBuiltinsObject> builtins,
const char* name,
ElementsKind elements_kind);
bool InstallNatives();
- Handle<JSFunction> InstallTypedArray(const char* name,
- ElementsKind elementsKind);
+ void InstallTypedArray(
+ const char* name,
+ ElementsKind elements_kind,
+ Handle<JSFunction>* fun,
+ Handle<Map>* external_map);
bool InstallExperimentalNatives();
void InstallBuiltinFunctionIds();
void InstallJSFunctionResultCaches();
@@ -236,13 +238,18 @@ class Genesis BASE_EMBEDDED {
// provided.
static bool InstallExtensions(Handle<Context> native_context,
v8::ExtensionConfiguration* extensions);
+ static bool InstallAutoExtensions(Isolate* isolate,
+ ExtensionStates* extension_states);
+ static bool InstallRequestedExtensions(Isolate* isolate,
+ v8::ExtensionConfiguration* extensions,
+ ExtensionStates* extension_states);
static bool InstallExtension(Isolate* isolate,
const char* name,
ExtensionStates* extension_states);
static bool InstallExtension(Isolate* isolate,
v8::RegisteredExtension* current,
ExtensionStates* extension_states);
- static void InstallSpecialObjects(Handle<Context> native_context);
+ static bool InstallSpecialObjects(Handle<Context> native_context);
bool InstallJSBuiltins(Handle<JSBuiltinsObject> builtins);
bool ConfigureApiObject(Handle<JSObject> object,
Handle<ObjectTemplateInfo> object_template);
@@ -255,24 +262,32 @@ class Genesis BASE_EMBEDDED {
void TransferNamedProperties(Handle<JSObject> from, Handle<JSObject> to);
void TransferIndexedProperties(Handle<JSObject> from, Handle<JSObject> to);
- enum PrototypePropertyMode {
- DONT_ADD_PROTOTYPE,
- ADD_READONLY_PROTOTYPE,
- ADD_WRITEABLE_PROTOTYPE
+ enum FunctionMode {
+ // With prototype.
+ FUNCTION_WITH_WRITEABLE_PROTOTYPE,
+ FUNCTION_WITH_READONLY_PROTOTYPE,
+ // Without prototype.
+ FUNCTION_WITHOUT_PROTOTYPE,
+ BOUND_FUNCTION
};
- Handle<Map> CreateFunctionMap(PrototypePropertyMode prototype_mode);
+ static bool IsFunctionModeWithPrototype(FunctionMode function_mode) {
+ return (function_mode == FUNCTION_WITH_WRITEABLE_PROTOTYPE ||
+ function_mode == FUNCTION_WITH_READONLY_PROTOTYPE);
+ }
+
+ Handle<Map> CreateFunctionMap(FunctionMode function_mode);
void SetFunctionInstanceDescriptor(Handle<Map> map,
- PrototypePropertyMode prototypeMode);
+ FunctionMode function_mode);
void MakeFunctionInstancePrototypeWritable();
- Handle<Map> CreateStrictModeFunctionMap(
- PrototypePropertyMode prototype_mode,
+ Handle<Map> CreateStrictFunctionMap(
+ FunctionMode function_mode,
Handle<JSFunction> empty_function);
void SetStrictFunctionInstanceDescriptor(Handle<Map> map,
- PrototypePropertyMode propertyMode);
+ FunctionMode function_mode);
static bool CompileBuiltin(Isolate* isolate, int index);
static bool CompileExperimentalBuiltin(Isolate* isolate, int index);
@@ -295,9 +310,10 @@ class Genesis BASE_EMBEDDED {
// prototype for the processing of JS builtins. Later the function maps are
// replaced in order to make prototype writable. These are the final, writable
// prototype, maps.
- Handle<Map> function_map_writable_prototype_;
- Handle<Map> strict_mode_function_map_writable_prototype_;
- Handle<JSFunction> throw_type_error_function;
+ Handle<Map> sloppy_function_map_writable_prototype_;
+ Handle<Map> strict_function_map_writable_prototype_;
+ Handle<JSFunction> strict_poison_function;
+ Handle<JSFunction> generator_poison_function;
BootstrapperActive active_;
friend class Bootstrapper;
@@ -326,9 +342,8 @@ Handle<Context> Bootstrapper::CreateEnvironment(
static void SetObjectPrototype(Handle<JSObject> object, Handle<Object> proto) {
// object.__proto__ = proto;
- Factory* factory = object->GetIsolate()->factory();
Handle<Map> old_to_map = Handle<Map>(object->map());
- Handle<Map> new_to_map = factory->CopyMap(old_to_map);
+ Handle<Map> new_to_map = Map::Copy(old_to_map);
new_to_map->set_prototype(*proto);
object->set_map(*new_to_map);
}
@@ -346,22 +361,17 @@ static Handle<JSFunction> InstallFunction(Handle<JSObject> target,
const char* name,
InstanceType type,
int instance_size,
- Handle<JSObject> prototype,
- Builtins::Name call,
- bool install_initial_map,
- bool set_instance_class_name) {
+ MaybeHandle<JSObject> maybe_prototype,
+ Builtins::Name call) {
Isolate* isolate = target->GetIsolate();
Factory* factory = isolate->factory();
Handle<String> internalized_name = factory->InternalizeUtf8String(name);
Handle<Code> call_code = Handle<Code>(isolate->builtins()->builtin(call));
- Handle<JSFunction> function = prototype.is_null() ?
- factory->NewFunctionWithoutPrototype(internalized_name, call_code) :
- factory->NewFunctionWithPrototype(internalized_name,
- type,
- instance_size,
- prototype,
- call_code,
- install_initial_map);
+ Handle<JSObject> prototype;
+ Handle<JSFunction> function = maybe_prototype.ToHandle(&prototype)
+ ? factory->NewFunction(internalized_name, call_code, prototype,
+ type, instance_size)
+ : factory->NewFunctionWithoutPrototype(internalized_name, call_code);
PropertyAttributes attributes;
if (target->IsJSBuiltinsObject()) {
attributes =
@@ -369,10 +379,9 @@ static Handle<JSFunction> InstallFunction(Handle<JSObject> target,
} else {
attributes = DONT_ENUM;
}
- CHECK_NOT_EMPTY_HANDLE(isolate,
- JSObject::SetLocalPropertyIgnoreAttributes(
- target, internalized_name, function, attributes));
- if (set_instance_class_name) {
+ JSObject::SetOwnPropertyIgnoreAttributes(
+ target, internalized_name, function, attributes).Check();
+ if (target->IsJSGlobalObject()) {
function->shared()->set_instance_class_name(*internalized_name);
}
function->shared()->set_native(true);
@@ -381,54 +390,58 @@ static Handle<JSFunction> InstallFunction(Handle<JSObject> target,
void Genesis::SetFunctionInstanceDescriptor(
- Handle<Map> map, PrototypePropertyMode prototypeMode) {
- int size = (prototypeMode == DONT_ADD_PROTOTYPE) ? 4 : 5;
- Handle<DescriptorArray> descriptors(factory()->NewDescriptorArray(0, size));
- DescriptorArray::WhitenessWitness witness(*descriptors);
-
- Handle<Foreign> length(factory()->NewForeign(&Accessors::FunctionLength));
- Handle<Foreign> name(factory()->NewForeign(&Accessors::FunctionName));
- Handle<Foreign> args(factory()->NewForeign(&Accessors::FunctionArguments));
- Handle<Foreign> caller(factory()->NewForeign(&Accessors::FunctionCaller));
- Handle<Foreign> prototype;
- if (prototypeMode != DONT_ADD_PROTOTYPE) {
- prototype = factory()->NewForeign(&Accessors::FunctionPrototype);
- }
+ Handle<Map> map, FunctionMode function_mode) {
+ int size = IsFunctionModeWithPrototype(function_mode) ? 5 : 4;
+ Map::EnsureDescriptorSlack(map, size);
+
PropertyAttributes attribs = static_cast<PropertyAttributes>(
DONT_ENUM | DONT_DELETE | READ_ONLY);
- map->set_instance_descriptors(*descriptors);
+ Handle<AccessorInfo> length =
+ Accessors::FunctionLengthInfo(isolate(), attribs);
{ // Add length.
- CallbacksDescriptor d(*factory()->length_string(), *length, attribs);
- map->AppendDescriptor(&d, witness);
+ CallbacksDescriptor d(Handle<Name>(Name::cast(length->name())),
+ length, attribs);
+ map->AppendDescriptor(&d);
}
+ Handle<AccessorInfo> name =
+ Accessors::FunctionNameInfo(isolate(), attribs);
{ // Add name.
- CallbacksDescriptor d(*factory()->name_string(), *name, attribs);
- map->AppendDescriptor(&d, witness);
+ CallbacksDescriptor d(Handle<Name>(Name::cast(name->name())),
+ name, attribs);
+ map->AppendDescriptor(&d);
}
+ Handle<AccessorInfo> args =
+ Accessors::FunctionArgumentsInfo(isolate(), attribs);
{ // Add arguments.
- CallbacksDescriptor d(*factory()->arguments_string(), *args, attribs);
- map->AppendDescriptor(&d, witness);
+ CallbacksDescriptor d(Handle<Name>(Name::cast(args->name())),
+ args, attribs);
+ map->AppendDescriptor(&d);
}
+ Handle<AccessorInfo> caller =
+ Accessors::FunctionCallerInfo(isolate(), attribs);
{ // Add caller.
- CallbacksDescriptor d(*factory()->caller_string(), *caller, attribs);
- map->AppendDescriptor(&d, witness);
+ CallbacksDescriptor d(Handle<Name>(Name::cast(caller->name())),
+ caller, attribs);
+ map->AppendDescriptor(&d);
}
- if (prototypeMode != DONT_ADD_PROTOTYPE) {
- // Add prototype.
- if (prototypeMode == ADD_WRITEABLE_PROTOTYPE) {
+ if (IsFunctionModeWithPrototype(function_mode)) {
+ if (function_mode == FUNCTION_WITH_WRITEABLE_PROTOTYPE) {
attribs = static_cast<PropertyAttributes>(attribs & ~READ_ONLY);
}
- CallbacksDescriptor d(*factory()->prototype_string(), *prototype, attribs);
- map->AppendDescriptor(&d, witness);
+ Handle<AccessorInfo> prototype =
+ Accessors::FunctionPrototypeInfo(isolate(), attribs);
+ CallbacksDescriptor d(Handle<Name>(Name::cast(prototype->name())),
+ prototype, attribs);
+ map->AppendDescriptor(&d);
}
}
-Handle<Map> Genesis::CreateFunctionMap(PrototypePropertyMode prototype_mode) {
+Handle<Map> Genesis::CreateFunctionMap(FunctionMode function_mode) {
Handle<Map> map = factory()->NewMap(JS_FUNCTION_TYPE, JSFunction::kSize);
- SetFunctionInstanceDescriptor(map, prototype_mode);
- map->set_function_with_prototype(prototype_mode != DONT_ADD_PROTOTYPE);
+ SetFunctionInstanceDescriptor(map, function_mode);
+ map->set_function_with_prototype(IsFunctionModeWithPrototype(function_mode));
return map;
}
@@ -440,31 +453,36 @@ Handle<JSFunction> Genesis::CreateEmptyFunction(Isolate* isolate) {
// Functions with this map will not have a 'prototype' property, and
// can not be used as constructors.
Handle<Map> function_without_prototype_map =
- CreateFunctionMap(DONT_ADD_PROTOTYPE);
- native_context()->set_function_without_prototype_map(
+ CreateFunctionMap(FUNCTION_WITHOUT_PROTOTYPE);
+ native_context()->set_sloppy_function_without_prototype_map(
*function_without_prototype_map);
// Allocate the function map. This map is temporary, used only for processing
// of builtins.
// Later the map is replaced with writable prototype map, allocated below.
- Handle<Map> function_map = CreateFunctionMap(ADD_READONLY_PROTOTYPE);
- native_context()->set_function_map(*function_map);
+ Handle<Map> function_map =
+ CreateFunctionMap(FUNCTION_WITH_READONLY_PROTOTYPE);
+ native_context()->set_sloppy_function_map(*function_map);
+ native_context()->set_sloppy_function_with_readonly_prototype_map(
+ *function_map);
// The final map for functions. Writeable prototype.
// This map is installed in MakeFunctionInstancePrototypeWritable.
- function_map_writable_prototype_ = CreateFunctionMap(ADD_WRITEABLE_PROTOTYPE);
+ sloppy_function_map_writable_prototype_ =
+ CreateFunctionMap(FUNCTION_WITH_WRITEABLE_PROTOTYPE);
Factory* factory = isolate->factory();
Handle<String> object_name = factory->Object_string();
{ // --- O b j e c t ---
- Handle<JSFunction> object_fun =
- factory->NewFunction(object_name, factory->null_value());
+ Handle<JSFunction> object_fun = factory->NewFunction(object_name);
Handle<Map> object_function_map =
factory->NewMap(JS_OBJECT_TYPE, JSObject::kHeaderSize);
object_fun->set_initial_map(*object_function_map);
object_function_map->set_constructor(*object_fun);
+ object_function_map->set_unused_property_fields(
+ JSObject::kInitialGlobalObjectUnusedPropertiesCount);
native_context()->set_object_function(*object_fun);
@@ -485,17 +503,12 @@ Handle<JSFunction> Genesis::CreateEmptyFunction(Isolate* isolate) {
// 262 15.3.4.
Handle<String> empty_string =
factory->InternalizeOneByteString(STATIC_ASCII_VECTOR("Empty"));
- Handle<JSFunction> empty_function =
- factory->NewFunctionWithoutPrototype(empty_string, CLASSIC_MODE);
+ Handle<Code> code(isolate->builtins()->builtin(Builtins::kEmptyFunction));
+ Handle<JSFunction> empty_function = factory->NewFunctionWithoutPrototype(
+ empty_string, code);
// --- E m p t y ---
- Handle<Code> code =
- Handle<Code>(isolate->builtins()->builtin(
- Builtins::kEmptyFunction));
- empty_function->set_code(*code);
- empty_function->shared()->set_code(*code);
- Handle<String> source =
- factory->NewStringFromOneByte(STATIC_ASCII_VECTOR("() {}"));
+ Handle<String> source = factory->NewStringFromStaticAscii("() {}");
Handle<Script> script = factory->NewScript(source);
script->set_type(Smi::FromInt(Script::TYPE_NATIVE));
empty_function->shared()->set_script(*script);
@@ -504,13 +517,14 @@ Handle<JSFunction> Genesis::CreateEmptyFunction(Isolate* isolate) {
empty_function->shared()->DontAdaptArguments();
// Set prototypes for the function maps.
- native_context()->function_map()->set_prototype(*empty_function);
- native_context()->function_without_prototype_map()->
+ native_context()->sloppy_function_map()->set_prototype(*empty_function);
+ native_context()->sloppy_function_without_prototype_map()->
set_prototype(*empty_function);
- function_map_writable_prototype_->set_prototype(*empty_function);
+ sloppy_function_map_writable_prototype_->set_prototype(*empty_function);
// Allocate the function map first and then patch the prototype later
- Handle<Map> empty_function_map = CreateFunctionMap(DONT_ADD_PROTOTYPE);
+ Handle<Map> empty_function_map =
+ CreateFunctionMap(FUNCTION_WITHOUT_PROTOTYPE);
empty_function_map->set_prototype(
native_context()->object_function()->prototype());
empty_function->set_map(*empty_function_map);
@@ -519,79 +533,102 @@ Handle<JSFunction> Genesis::CreateEmptyFunction(Isolate* isolate) {
void Genesis::SetStrictFunctionInstanceDescriptor(
- Handle<Map> map, PrototypePropertyMode prototypeMode) {
- int size = (prototypeMode == DONT_ADD_PROTOTYPE) ? 4 : 5;
- Handle<DescriptorArray> descriptors(factory()->NewDescriptorArray(0, size));
- DescriptorArray::WhitenessWitness witness(*descriptors);
+ Handle<Map> map, FunctionMode function_mode) {
+ int size = IsFunctionModeWithPrototype(function_mode) ? 5 : 4;
+ Map::EnsureDescriptorSlack(map, size);
- Handle<Foreign> length(factory()->NewForeign(&Accessors::FunctionLength));
- Handle<Foreign> name(factory()->NewForeign(&Accessors::FunctionName));
Handle<AccessorPair> arguments(factory()->NewAccessorPair());
Handle<AccessorPair> caller(factory()->NewAccessorPair());
- Handle<Foreign> prototype;
- if (prototypeMode != DONT_ADD_PROTOTYPE) {
- prototype = factory()->NewForeign(&Accessors::FunctionPrototype);
- }
PropertyAttributes rw_attribs =
static_cast<PropertyAttributes>(DONT_ENUM | DONT_DELETE);
PropertyAttributes ro_attribs =
static_cast<PropertyAttributes>(DONT_ENUM | DONT_DELETE | READ_ONLY);
- map->set_instance_descriptors(*descriptors);
- { // Add length.
- CallbacksDescriptor d(*factory()->length_string(), *length, ro_attribs);
- map->AppendDescriptor(&d, witness);
- }
+ // Add length.
+ if (function_mode == BOUND_FUNCTION) {
+ Handle<String> length_string = isolate()->factory()->length_string();
+ FieldDescriptor d(length_string, 0, ro_attribs, Representation::Tagged());
+ map->AppendDescriptor(&d);
+ } else {
+ ASSERT(function_mode == FUNCTION_WITH_WRITEABLE_PROTOTYPE ||
+ function_mode == FUNCTION_WITH_READONLY_PROTOTYPE ||
+ function_mode == FUNCTION_WITHOUT_PROTOTYPE);
+ Handle<AccessorInfo> length =
+ Accessors::FunctionLengthInfo(isolate(), ro_attribs);
+ CallbacksDescriptor d(Handle<Name>(Name::cast(length->name())),
+ length, ro_attribs);
+ map->AppendDescriptor(&d);
+ }
+ Handle<AccessorInfo> name =
+ Accessors::FunctionNameInfo(isolate(), ro_attribs);
{ // Add name.
- CallbacksDescriptor d(*factory()->name_string(), *name, rw_attribs);
- map->AppendDescriptor(&d, witness);
+ CallbacksDescriptor d(Handle<Name>(Name::cast(name->name())),
+ name, ro_attribs);
+ map->AppendDescriptor(&d);
}
{ // Add arguments.
- CallbacksDescriptor d(*factory()->arguments_string(), *arguments,
+ CallbacksDescriptor d(factory()->arguments_string(), arguments,
rw_attribs);
- map->AppendDescriptor(&d, witness);
+ map->AppendDescriptor(&d);
}
{ // Add caller.
- CallbacksDescriptor d(*factory()->caller_string(), *caller, rw_attribs);
- map->AppendDescriptor(&d, witness);
+ CallbacksDescriptor d(factory()->caller_string(), caller, rw_attribs);
+ map->AppendDescriptor(&d);
}
- if (prototypeMode != DONT_ADD_PROTOTYPE) {
+ if (IsFunctionModeWithPrototype(function_mode)) {
// Add prototype.
PropertyAttributes attribs =
- prototypeMode == ADD_WRITEABLE_PROTOTYPE ? rw_attribs : ro_attribs;
- CallbacksDescriptor d(*factory()->prototype_string(), *prototype, attribs);
- map->AppendDescriptor(&d, witness);
+ function_mode == FUNCTION_WITH_WRITEABLE_PROTOTYPE ? rw_attribs
+ : ro_attribs;
+ Handle<AccessorInfo> prototype =
+ Accessors::FunctionPrototypeInfo(isolate(), attribs);
+ CallbacksDescriptor d(Handle<Name>(Name::cast(prototype->name())),
+ prototype, attribs);
+ map->AppendDescriptor(&d);
}
}
// ECMAScript 5th Edition, 13.2.3
-Handle<JSFunction> Genesis::GetThrowTypeErrorFunction() {
- if (throw_type_error_function.is_null()) {
+Handle<JSFunction> Genesis::GetStrictPoisonFunction() {
+ if (strict_poison_function.is_null()) {
Handle<String> name = factory()->InternalizeOneByteString(
STATIC_ASCII_VECTOR("ThrowTypeError"));
- throw_type_error_function =
- factory()->NewFunctionWithoutPrototype(name, CLASSIC_MODE);
Handle<Code> code(isolate()->builtins()->builtin(
Builtins::kStrictModePoisonPill));
- throw_type_error_function->set_map(
- native_context()->function_map());
- throw_type_error_function->set_code(*code);
- throw_type_error_function->shared()->set_code(*code);
- throw_type_error_function->shared()->DontAdaptArguments();
+ strict_poison_function = factory()->NewFunctionWithoutPrototype(name, code);
+ strict_poison_function->set_map(native_context()->sloppy_function_map());
+ strict_poison_function->shared()->DontAdaptArguments();
+
+ JSObject::PreventExtensions(strict_poison_function).Assert();
+ }
+ return strict_poison_function;
+}
- JSObject::PreventExtensions(throw_type_error_function);
+
+Handle<JSFunction> Genesis::GetGeneratorPoisonFunction() {
+ if (generator_poison_function.is_null()) {
+ Handle<String> name = factory()->InternalizeOneByteString(
+ STATIC_ASCII_VECTOR("ThrowTypeError"));
+ Handle<Code> code(isolate()->builtins()->builtin(
+ Builtins::kGeneratorPoisonPill));
+ generator_poison_function = factory()->NewFunctionWithoutPrototype(
+ name, code);
+ generator_poison_function->set_map(native_context()->sloppy_function_map());
+ generator_poison_function->shared()->DontAdaptArguments();
+
+ JSObject::PreventExtensions(generator_poison_function).Assert();
}
- return throw_type_error_function;
+ return generator_poison_function;
}
-Handle<Map> Genesis::CreateStrictModeFunctionMap(
- PrototypePropertyMode prototype_mode,
+Handle<Map> Genesis::CreateStrictFunctionMap(
+ FunctionMode function_mode,
Handle<JSFunction> empty_function) {
Handle<Map> map = factory()->NewMap(JS_FUNCTION_TYPE, JSFunction::kSize);
- SetStrictFunctionInstanceDescriptor(map, prototype_mode);
- map->set_function_with_prototype(prototype_mode != DONT_ADD_PROTOTYPE);
+ SetStrictFunctionInstanceDescriptor(map, function_mode);
+ map->set_function_with_prototype(IsFunctionModeWithPrototype(function_mode));
map->set_prototype(*empty_function);
return map;
}
@@ -599,28 +636,32 @@ Handle<Map> Genesis::CreateStrictModeFunctionMap(
void Genesis::CreateStrictModeFunctionMaps(Handle<JSFunction> empty) {
// Allocate map for the prototype-less strict mode instances.
- Handle<Map> strict_mode_function_without_prototype_map =
- CreateStrictModeFunctionMap(DONT_ADD_PROTOTYPE, empty);
- native_context()->set_strict_mode_function_without_prototype_map(
- *strict_mode_function_without_prototype_map);
+ Handle<Map> strict_function_without_prototype_map =
+ CreateStrictFunctionMap(FUNCTION_WITHOUT_PROTOTYPE, empty);
+ native_context()->set_strict_function_without_prototype_map(
+ *strict_function_without_prototype_map);
// Allocate map for the strict mode functions. This map is temporary, used
// only for processing of builtins.
// Later the map is replaced with writable prototype map, allocated below.
- Handle<Map> strict_mode_function_map =
- CreateStrictModeFunctionMap(ADD_READONLY_PROTOTYPE, empty);
- native_context()->set_strict_mode_function_map(
- *strict_mode_function_map);
+ Handle<Map> strict_function_map =
+ CreateStrictFunctionMap(FUNCTION_WITH_READONLY_PROTOTYPE, empty);
+ native_context()->set_strict_function_map(*strict_function_map);
// The final map for the strict mode functions. Writeable prototype.
// This map is installed in MakeFunctionInstancePrototypeWritable.
- strict_mode_function_map_writable_prototype_ =
- CreateStrictModeFunctionMap(ADD_WRITEABLE_PROTOTYPE, empty);
+ strict_function_map_writable_prototype_ =
+ CreateStrictFunctionMap(FUNCTION_WITH_WRITEABLE_PROTOTYPE, empty);
+ // Special map for bound functions.
+ Handle<Map> bound_function_map =
+ CreateStrictFunctionMap(BOUND_FUNCTION, empty);
+ native_context()->set_bound_function_map(*bound_function_map);
// Complete the callbacks.
- PoisonArgumentsAndCaller(strict_mode_function_without_prototype_map);
- PoisonArgumentsAndCaller(strict_mode_function_map);
- PoisonArgumentsAndCaller(strict_mode_function_map_writable_prototype_);
+ PoisonArgumentsAndCaller(strict_function_without_prototype_map);
+ PoisonArgumentsAndCaller(strict_function_map);
+ PoisonArgumentsAndCaller(strict_function_map_writable_prototype_);
+ PoisonArgumentsAndCaller(bound_function_map);
}
@@ -635,9 +676,20 @@ static void SetAccessors(Handle<Map> map,
}
+static void ReplaceAccessors(Handle<Map> map,
+ Handle<String> name,
+ PropertyAttributes attributes,
+ Handle<AccessorPair> accessor_pair) {
+ DescriptorArray* descriptors = map->instance_descriptors();
+ int idx = descriptors->SearchWithCache(*name, *map);
+ CallbacksDescriptor descriptor(name, accessor_pair, attributes);
+ descriptors->Replace(idx, &descriptor);
+}
+
+
void Genesis::PoisonArgumentsAndCaller(Handle<Map> map) {
- SetAccessors(map, factory()->arguments_string(), GetThrowTypeErrorFunction());
- SetAccessors(map, factory()->caller_string(), GetThrowTypeErrorFunction());
+ SetAccessors(map, factory()->arguments_string(), GetStrictPoisonFunction());
+ SetAccessors(map, factory()->caller_string(), GetStrictPoisonFunction());
}
@@ -671,7 +723,7 @@ void Genesis::CreateRoots() {
// Allocate the message listeners object.
{
- v8::NeanderArray listeners;
+ v8::NeanderArray listeners(isolate());
native_context()->set_message_listeners(*listeners.value());
}
}
@@ -715,23 +767,22 @@ Handle<JSGlobalProxy> Genesis::CreateNewGlobals(
Handle<String> name = Handle<String>(heap()->empty_string());
Handle<Code> code = Handle<Code>(isolate()->builtins()->builtin(
Builtins::kIllegal));
- js_global_function =
- factory()->NewFunction(name, JS_GLOBAL_OBJECT_TYPE,
- JSGlobalObject::kSize, code, true);
+ js_global_function = factory()->NewFunction(
+ name, code, JS_GLOBAL_OBJECT_TYPE, JSGlobalObject::kSize);
// Change the constructor property of the prototype of the
// hidden global function to refer to the Object function.
Handle<JSObject> prototype =
Handle<JSObject>(
JSObject::cast(js_global_function->instance_prototype()));
- CHECK_NOT_EMPTY_HANDLE(isolate(),
- JSObject::SetLocalPropertyIgnoreAttributes(
- prototype, factory()->constructor_string(),
- isolate()->object_function(), NONE));
+ JSObject::SetOwnPropertyIgnoreAttributes(
+ prototype, factory()->constructor_string(),
+ isolate()->object_function(), NONE).Check();
} else {
Handle<FunctionTemplateInfo> js_global_constructor(
FunctionTemplateInfo::cast(js_global_template->constructor()));
js_global_function =
factory()->CreateApiFunction(js_global_constructor,
+ factory()->the_hole_value(),
factory()->InnerGlobalObject);
}
@@ -749,9 +800,8 @@ Handle<JSGlobalProxy> Genesis::CreateNewGlobals(
Handle<String> name = Handle<String>(heap()->empty_string());
Handle<Code> code = Handle<Code>(isolate()->builtins()->builtin(
Builtins::kIllegal));
- global_proxy_function =
- factory()->NewFunction(name, JS_GLOBAL_PROXY_TYPE,
- JSGlobalProxy::kSize, code, true);
+ global_proxy_function = factory()->NewFunction(
+ name, code, JS_GLOBAL_PROXY_TYPE, JSGlobalProxy::kSize);
} else {
Handle<ObjectTemplateInfo> data =
v8::Utils::OpenHandle(*global_template);
@@ -759,6 +809,7 @@ Handle<JSGlobalProxy> Genesis::CreateNewGlobals(
FunctionTemplateInfo::cast(data->constructor()));
global_proxy_function =
factory()->CreateApiFunction(global_constructor,
+ factory()->the_hole_value(),
factory()->OuterGlobalObject);
}
@@ -770,15 +821,17 @@ Handle<JSGlobalProxy> Genesis::CreateNewGlobals(
// Set global_proxy.__proto__ to js_global after ConfigureGlobalObjects
// Return the global proxy.
+ Handle<JSGlobalProxy> global_proxy;
if (global_object.location() != NULL) {
ASSERT(global_object->IsJSGlobalProxy());
- return ReinitializeJSGlobalProxy(
- global_proxy_function,
- Handle<JSGlobalProxy>::cast(global_object));
+ global_proxy = Handle<JSGlobalProxy>::cast(global_object);
+ factory()->ReinitializeJSGlobalProxy(global_proxy, global_proxy_function);
} else {
- return Handle<JSGlobalProxy>::cast(
+ global_proxy = Handle<JSGlobalProxy>::cast(
factory()->NewJSObject(global_proxy_function, TENURED));
+ global_proxy->set_hash(heap()->undefined_value());
}
+ return global_proxy;
}
@@ -802,11 +855,11 @@ void Genesis::HookUpInnerGlobal(Handle<GlobalObject> inner_global) {
native_context()->set_security_token(*inner_global);
static const PropertyAttributes attributes =
static_cast<PropertyAttributes>(READ_ONLY | DONT_DELETE);
- ForceSetProperty(builtins_global,
- factory()->InternalizeOneByteString(
- STATIC_ASCII_VECTOR("global")),
- inner_global,
- attributes);
+ Runtime::ForceSetObjectProperty(builtins_global,
+ factory()->InternalizeOneByteString(
+ STATIC_ASCII_VECTOR("global")),
+ inner_global,
+ attributes).Assert();
// Set up the reference from the global object to the builtins object.
JSGlobalObject::cast(*inner_global)->set_builtins(*builtins_global);
TransferNamedProperties(inner_global_from_snapshot, inner_global);
@@ -836,22 +889,21 @@ void Genesis::InitializeGlobal(Handle<GlobalObject> inner_global,
Heap* heap = isolate->heap();
Handle<String> object_name = factory->Object_string();
- CHECK_NOT_EMPTY_HANDLE(isolate,
- JSObject::SetLocalPropertyIgnoreAttributes(
- inner_global, object_name,
- isolate->object_function(), DONT_ENUM));
+ JSObject::SetOwnPropertyIgnoreAttributes(
+ inner_global, object_name,
+ isolate->object_function(), DONT_ENUM).Check();
- Handle<JSObject> global = Handle<JSObject>(native_context()->global_object());
+ Handle<JSObject> global(native_context()->global_object());
// Install global Function object
InstallFunction(global, "Function", JS_FUNCTION_TYPE, JSFunction::kSize,
- empty_function, Builtins::kIllegal, true, true);
+ empty_function, Builtins::kIllegal);
{ // --- A r r a y ---
Handle<JSFunction> array_function =
InstallFunction(global, "Array", JS_ARRAY_TYPE, JSArray::kSize,
isolate->initial_object_prototype(),
- Builtins::kArrayCode, true, true);
+ Builtins::kArrayCode);
array_function->shared()->DontAdaptArguments();
array_function->shared()->set_function_data(Smi::FromInt(kArrayCode));
@@ -864,19 +916,18 @@ void Genesis::InitializeGlobal(Handle<GlobalObject> inner_global,
// This assert protects an optimization in
// HGraphBuilder::JSArrayBuilder::EmitMapCode()
ASSERT(initial_map->elements_kind() == GetInitialFastElementsKind());
+ Map::EnsureDescriptorSlack(initial_map, 1);
- Handle<DescriptorArray> array_descriptors(
- factory->NewDescriptorArray(0, 1));
- DescriptorArray::WhitenessWitness witness(*array_descriptors);
-
- Handle<Foreign> array_length(factory->NewForeign(&Accessors::ArrayLength));
PropertyAttributes attribs = static_cast<PropertyAttributes>(
DONT_ENUM | DONT_DELETE);
- initial_map->set_instance_descriptors(*array_descriptors);
+ Handle<AccessorInfo> array_length =
+ Accessors::ArrayLengthInfo(isolate, attribs);
{ // Add length.
- CallbacksDescriptor d(*factory->length_string(), *array_length, attribs);
- array_function->initial_map()->AppendDescriptor(&d, witness);
+ CallbacksDescriptor d(
+ Handle<Name>(Name::cast(array_length->name())),
+ array_length, attribs);
+ array_function->initial_map()->AppendDescriptor(&d);
}
// array_function is used internally. JS code creating array object should
@@ -888,7 +939,7 @@ void Genesis::InitializeGlobal(Handle<GlobalObject> inner_global,
// Cache the array maps, needed by ArrayConstructorStub
CacheInitialJSArrayMaps(native_context(), initial_map);
ArrayConstructorStub array_constructor_stub(isolate);
- Handle<Code> code = array_constructor_stub.GetCode(isolate);
+ Handle<Code> code = array_constructor_stub.GetCode();
array_function->shared()->set_construct_stub(*code);
}
@@ -896,7 +947,7 @@ void Genesis::InitializeGlobal(Handle<GlobalObject> inner_global,
Handle<JSFunction> number_fun =
InstallFunction(global, "Number", JS_VALUE_TYPE, JSValue::kSize,
isolate->initial_object_prototype(),
- Builtins::kIllegal, true, true);
+ Builtins::kIllegal);
native_context()->set_number_function(*number_fun);
}
@@ -904,7 +955,7 @@ void Genesis::InitializeGlobal(Handle<GlobalObject> inner_global,
Handle<JSFunction> boolean_fun =
InstallFunction(global, "Boolean", JS_VALUE_TYPE, JSValue::kSize,
isolate->initial_object_prototype(),
- Builtins::kIllegal, true, true);
+ Builtins::kIllegal);
native_context()->set_boolean_function(*boolean_fun);
}
@@ -912,26 +963,23 @@ void Genesis::InitializeGlobal(Handle<GlobalObject> inner_global,
Handle<JSFunction> string_fun =
InstallFunction(global, "String", JS_VALUE_TYPE, JSValue::kSize,
isolate->initial_object_prototype(),
- Builtins::kIllegal, true, true);
+ Builtins::kIllegal);
string_fun->shared()->set_construct_stub(
isolate->builtins()->builtin(Builtins::kStringConstructCode));
native_context()->set_string_function(*string_fun);
Handle<Map> string_map =
Handle<Map>(native_context()->string_function()->initial_map());
- Handle<DescriptorArray> string_descriptors(
- factory->NewDescriptorArray(0, 1));
- DescriptorArray::WhitenessWitness witness(*string_descriptors);
+ Map::EnsureDescriptorSlack(string_map, 1);
- Handle<Foreign> string_length(
- factory->NewForeign(&Accessors::StringLength));
PropertyAttributes attribs = static_cast<PropertyAttributes>(
DONT_ENUM | DONT_DELETE | READ_ONLY);
- string_map->set_instance_descriptors(*string_descriptors);
+ Handle<AccessorInfo> string_length(
+ Accessors::StringLengthInfo(isolate, attribs));
{ // Add length.
- CallbacksDescriptor d(*factory->length_string(), *string_length, attribs);
- string_map->AppendDescriptor(&d, witness);
+ CallbacksDescriptor d(factory->length_string(), string_length, attribs);
+ string_map->AppendDescriptor(&d);
}
}
@@ -940,7 +988,7 @@ void Genesis::InitializeGlobal(Handle<GlobalObject> inner_global,
Handle<JSFunction> date_fun =
InstallFunction(global, "Date", JS_DATE_TYPE, JSDate::kSize,
isolate->initial_object_prototype(),
- Builtins::kIllegal, true, true);
+ Builtins::kIllegal);
native_context()->set_date_function(*date_fun);
}
@@ -951,7 +999,7 @@ void Genesis::InitializeGlobal(Handle<GlobalObject> inner_global,
Handle<JSFunction> regexp_fun =
InstallFunction(global, "RegExp", JS_REGEXP_TYPE, JSRegExp::kSize,
isolate->initial_object_prototype(),
- Builtins::kIllegal, true, true);
+ Builtins::kIllegal);
native_context()->set_regexp_function(*regexp_fun);
ASSERT(regexp_fun->has_initial_map());
@@ -961,51 +1009,49 @@ void Genesis::InitializeGlobal(Handle<GlobalObject> inner_global,
PropertyAttributes final =
static_cast<PropertyAttributes>(DONT_ENUM | DONT_DELETE | READ_ONLY);
- Handle<DescriptorArray> descriptors = factory->NewDescriptorArray(0, 5);
- DescriptorArray::WhitenessWitness witness(*descriptors);
- initial_map->set_instance_descriptors(*descriptors);
+ Map::EnsureDescriptorSlack(initial_map, 5);
{
// ECMA-262, section 15.10.7.1.
- FieldDescriptor field(heap->source_string(),
+ FieldDescriptor field(factory->source_string(),
JSRegExp::kSourceFieldIndex,
final,
Representation::Tagged());
- initial_map->AppendDescriptor(&field, witness);
+ initial_map->AppendDescriptor(&field);
}
{
// ECMA-262, section 15.10.7.2.
- FieldDescriptor field(heap->global_string(),
+ FieldDescriptor field(factory->global_string(),
JSRegExp::kGlobalFieldIndex,
final,
Representation::Tagged());
- initial_map->AppendDescriptor(&field, witness);
+ initial_map->AppendDescriptor(&field);
}
{
// ECMA-262, section 15.10.7.3.
- FieldDescriptor field(heap->ignore_case_string(),
+ FieldDescriptor field(factory->ignore_case_string(),
JSRegExp::kIgnoreCaseFieldIndex,
final,
Representation::Tagged());
- initial_map->AppendDescriptor(&field, witness);
+ initial_map->AppendDescriptor(&field);
}
{
// ECMA-262, section 15.10.7.4.
- FieldDescriptor field(heap->multiline_string(),
+ FieldDescriptor field(factory->multiline_string(),
JSRegExp::kMultilineFieldIndex,
final,
Representation::Tagged());
- initial_map->AppendDescriptor(&field, witness);
+ initial_map->AppendDescriptor(&field);
}
{
// ECMA-262, section 15.10.7.5.
PropertyAttributes writable =
static_cast<PropertyAttributes>(DONT_ENUM | DONT_DELETE);
- FieldDescriptor field(heap->last_index_string(),
+ FieldDescriptor field(factory->last_index_string(),
JSRegExp::kLastIndexFieldIndex,
writable,
Representation::Tagged());
- initial_map->AppendDescriptor(&field, witness);
+ initial_map->AppendDescriptor(&field);
}
initial_map->set_inobject_properties(5);
@@ -1016,7 +1062,7 @@ void Genesis::InitializeGlobal(Handle<GlobalObject> inner_global,
initial_map->set_visitor_id(StaticVisitorBase::GetVisitorId(*initial_map));
// RegExp prototype object is itself a RegExp.
- Handle<Map> proto_map = factory->CopyMap(initial_map);
+ Handle<Map> proto_map = Map::Copy(initial_map);
proto_map->set_prototype(native_context()->initial_object_prototype());
Handle<JSObject> proto = factory->NewJSObjectFromMap(proto_map);
proto->InObjectPropertyAtPut(JSRegExp::kSourceFieldIndex,
@@ -1038,114 +1084,98 @@ void Genesis::InitializeGlobal(Handle<GlobalObject> inner_global,
{ // -- J S O N
Handle<String> name = factory->InternalizeUtf8String("JSON");
- Handle<JSFunction> cons = factory->NewFunction(name,
- factory->the_hole_value());
+ Handle<JSFunction> cons = factory->NewFunction(name);
JSFunction::SetInstancePrototype(cons,
Handle<Object>(native_context()->initial_object_prototype(), isolate));
cons->SetInstanceClassName(*name);
Handle<JSObject> json_object = factory->NewJSObject(cons, TENURED);
ASSERT(json_object->IsJSObject());
- CHECK_NOT_EMPTY_HANDLE(isolate,
- JSObject::SetLocalPropertyIgnoreAttributes(
- global, name, json_object, DONT_ENUM));
+ JSObject::SetOwnPropertyIgnoreAttributes(
+ global, name, json_object, DONT_ENUM).Check();
native_context()->set_json_object(*json_object);
}
- { // -- A r r a y B u f f e r
+ { // -- A r r a y B u f f e r
Handle<JSFunction> array_buffer_fun =
InstallFunction(
global, "ArrayBuffer", JS_ARRAY_BUFFER_TYPE,
JSArrayBuffer::kSizeWithInternalFields,
isolate->initial_object_prototype(),
- Builtins::kIllegal, true, true);
+ Builtins::kIllegal);
native_context()->set_array_buffer_fun(*array_buffer_fun);
}
- { // -- T y p e d A r r a y s
- Handle<JSFunction> int8_fun = InstallTypedArray("Int8Array",
- EXTERNAL_BYTE_ELEMENTS);
- native_context()->set_int8_array_fun(*int8_fun);
- Handle<JSFunction> uint8_fun = InstallTypedArray("Uint8Array",
- EXTERNAL_UNSIGNED_BYTE_ELEMENTS);
- native_context()->set_uint8_array_fun(*uint8_fun);
- Handle<JSFunction> int16_fun = InstallTypedArray("Int16Array",
- EXTERNAL_SHORT_ELEMENTS);
- native_context()->set_int16_array_fun(*int16_fun);
- Handle<JSFunction> uint16_fun = InstallTypedArray("Uint16Array",
- EXTERNAL_UNSIGNED_SHORT_ELEMENTS);
- native_context()->set_uint16_array_fun(*uint16_fun);
- Handle<JSFunction> int32_fun = InstallTypedArray("Int32Array",
- EXTERNAL_INT_ELEMENTS);
- native_context()->set_int32_array_fun(*int32_fun);
- Handle<JSFunction> uint32_fun = InstallTypedArray("Uint32Array",
- EXTERNAL_UNSIGNED_INT_ELEMENTS);
- native_context()->set_uint32_array_fun(*uint32_fun);
- Handle<JSFunction> float_fun = InstallTypedArray("Float32Array",
- EXTERNAL_FLOAT_ELEMENTS);
- native_context()->set_float_array_fun(*float_fun);
- Handle<JSFunction> double_fun = InstallTypedArray("Float64Array",
- EXTERNAL_DOUBLE_ELEMENTS);
- native_context()->set_double_array_fun(*double_fun);
- Handle<JSFunction> uint8c_fun = InstallTypedArray("Uint8ClampedArray",
- EXTERNAL_PIXEL_ELEMENTS);
- native_context()->set_uint8c_array_fun(*uint8c_fun);
+ { // -- T y p e d A r r a y s
+#define INSTALL_TYPED_ARRAY(Type, type, TYPE, ctype, size) \
+ { \
+ Handle<JSFunction> fun; \
+ Handle<Map> external_map; \
+ InstallTypedArray(#Type "Array", \
+ TYPE##_ELEMENTS, \
+ &fun, \
+ &external_map); \
+ native_context()->set_##type##_array_fun(*fun); \
+ native_context()->set_##type##_array_external_map(*external_map); \
+ }
+ TYPED_ARRAYS(INSTALL_TYPED_ARRAY)
+#undef INSTALL_TYPED_ARRAY
Handle<JSFunction> data_view_fun =
InstallFunction(
global, "DataView", JS_DATA_VIEW_TYPE,
JSDataView::kSizeWithInternalFields,
isolate->initial_object_prototype(),
- Builtins::kIllegal, true, true);
+ Builtins::kIllegal);
native_context()->set_data_view_fun(*data_view_fun);
}
+ // -- W e a k M a p
+ InstallFunction(global, "WeakMap", JS_WEAK_MAP_TYPE, JSWeakMap::kSize,
+ isolate->initial_object_prototype(), Builtins::kIllegal);
+ // -- W e a k S e t
+ InstallFunction(global, "WeakSet", JS_WEAK_SET_TYPE, JSWeakSet::kSize,
+ isolate->initial_object_prototype(), Builtins::kIllegal);
+
{ // --- arguments_boilerplate_
// Make sure we can recognize argument objects at runtime.
// This is done by introducing an anonymous function with
// class_name equals 'Arguments'.
Handle<String> arguments_string = factory->InternalizeOneByteString(
STATIC_ASCII_VECTOR("Arguments"));
- Handle<Code> code = Handle<Code>(
- isolate->builtins()->builtin(Builtins::kIllegal));
- Handle<JSObject> prototype =
- Handle<JSObject>(
- JSObject::cast(native_context()->object_function()->prototype()));
+ Handle<Code> code(isolate->builtins()->builtin(Builtins::kIllegal));
- Handle<JSFunction> function =
- factory->NewFunctionWithPrototype(arguments_string,
- JS_OBJECT_TYPE,
- JSObject::kHeaderSize,
- prototype,
- code,
- false);
+ Handle<JSFunction> function = factory->NewFunctionWithoutPrototype(
+ arguments_string, code);
ASSERT(!function->has_initial_map());
function->shared()->set_instance_class_name(*arguments_string);
function->shared()->set_expected_nof_properties(2);
+ function->set_prototype_or_initial_map(
+ native_context()->object_function()->prototype());
Handle<JSObject> result = factory->NewJSObject(function);
- native_context()->set_arguments_boilerplate(*result);
+ native_context()->set_sloppy_arguments_boilerplate(*result);
// Note: length must be added as the first property and
// callee must be added as the second property.
- CHECK_NOT_EMPTY_HANDLE(isolate,
- JSObject::SetLocalPropertyIgnoreAttributes(
- result, factory->length_string(),
- factory->undefined_value(), DONT_ENUM,
- Object::FORCE_TAGGED, FORCE_FIELD));
- CHECK_NOT_EMPTY_HANDLE(isolate,
- JSObject::SetLocalPropertyIgnoreAttributes(
- result, factory->callee_string(),
- factory->undefined_value(), DONT_ENUM,
- Object::FORCE_TAGGED, FORCE_FIELD));
+ JSObject::SetOwnPropertyIgnoreAttributes(
+ result, factory->length_string(),
+ factory->undefined_value(), DONT_ENUM,
+ Object::FORCE_TAGGED, FORCE_FIELD).Check();
+ JSObject::SetOwnPropertyIgnoreAttributes(
+ result, factory->callee_string(),
+ factory->undefined_value(), DONT_ENUM,
+ Object::FORCE_TAGGED, FORCE_FIELD).Check();
#ifdef DEBUG
LookupResult lookup(isolate);
- result->LocalLookup(heap->callee_string(), &lookup);
+ result->LookupOwn(factory->callee_string(), &lookup);
ASSERT(lookup.IsField());
- ASSERT(lookup.GetFieldIndex().field_index() == Heap::kArgumentsCalleeIndex);
+ ASSERT(lookup.GetFieldIndex().property_index() ==
+ Heap::kArgumentsCalleeIndex);
- result->LocalLookup(heap->length_string(), &lookup);
+ result->LookupOwn(factory->length_string(), &lookup);
ASSERT(lookup.IsField());
- ASSERT(lookup.GetFieldIndex().field_index() == Heap::kArgumentsLengthIndex);
+ ASSERT(lookup.GetFieldIndex().property_index() ==
+ Heap::kArgumentsLengthIndex);
ASSERT(result->map()->inobject_properties() > Heap::kArgumentsCalleeIndex);
ASSERT(result->map()->inobject_properties() > Heap::kArgumentsLengthIndex);
@@ -1159,22 +1189,23 @@ void Genesis::InitializeGlobal(Handle<GlobalObject> inner_global,
{ // --- aliased_arguments_boilerplate_
// Set up a well-formed parameter map to make assertions happy.
Handle<FixedArray> elements = factory->NewFixedArray(2);
- elements->set_map(heap->non_strict_arguments_elements_map());
+ elements->set_map(heap->sloppy_arguments_elements_map());
Handle<FixedArray> array;
array = factory->NewFixedArray(0);
elements->set(0, *array);
array = factory->NewFixedArray(0);
elements->set(1, *array);
- Handle<Map> old_map(native_context()->arguments_boilerplate()->map());
- Handle<Map> new_map = factory->CopyMap(old_map);
+ Handle<Map> old_map(
+ native_context()->sloppy_arguments_boilerplate()->map());
+ Handle<Map> new_map = Map::Copy(old_map);
new_map->set_pre_allocated_property_fields(2);
Handle<JSObject> result = factory->NewJSObjectFromMap(new_map);
// Set elements kind after allocating the object because
// NewJSObjectFromMap assumes a fast elements map.
- new_map->set_elements_kind(NON_STRICT_ARGUMENTS_ELEMENTS);
+ new_map->set_elements_kind(SLOPPY_ARGUMENTS_ELEMENTS);
result->set_elements(*elements);
- ASSERT(result->HasNonStrictArgumentsElements());
+ ASSERT(result->HasSloppyArgumentsElements());
native_context()->set_aliased_arguments_boilerplate(*result);
}
@@ -1186,39 +1217,36 @@ void Genesis::InitializeGlobal(Handle<GlobalObject> inner_global,
Handle<AccessorPair> callee = factory->NewAccessorPair();
Handle<AccessorPair> caller = factory->NewAccessorPair();
- Handle<JSFunction> throw_function =
- GetThrowTypeErrorFunction();
+ Handle<JSFunction> poison = GetStrictPoisonFunction();
// Install the ThrowTypeError functions.
- callee->set_getter(*throw_function);
- callee->set_setter(*throw_function);
- caller->set_getter(*throw_function);
- caller->set_setter(*throw_function);
+ callee->set_getter(*poison);
+ callee->set_setter(*poison);
+ caller->set_getter(*poison);
+ caller->set_setter(*poison);
// Create the map. Allocate one in-object field for length.
Handle<Map> map = factory->NewMap(JS_OBJECT_TYPE,
- Heap::kArgumentsObjectSizeStrict);
+ Heap::kStrictArgumentsObjectSize);
// Create the descriptor array for the arguments object.
- Handle<DescriptorArray> descriptors = factory->NewDescriptorArray(0, 3);
- DescriptorArray::WhitenessWitness witness(*descriptors);
- map->set_instance_descriptors(*descriptors);
+ Map::EnsureDescriptorSlack(map, 3);
{ // length
FieldDescriptor d(
- *factory->length_string(), 0, DONT_ENUM, Representation::Tagged());
- map->AppendDescriptor(&d, witness);
+ factory->length_string(), 0, DONT_ENUM, Representation::Tagged());
+ map->AppendDescriptor(&d);
}
{ // callee
- CallbacksDescriptor d(*factory->callee_string(),
- *callee,
+ CallbacksDescriptor d(factory->callee_string(),
+ callee,
attributes);
- map->AppendDescriptor(&d, witness);
+ map->AppendDescriptor(&d);
}
{ // caller
- CallbacksDescriptor d(*factory->caller_string(),
- *caller,
+ CallbacksDescriptor d(factory->caller_string(),
+ caller,
attributes);
- map->AppendDescriptor(&d, witness);
+ map->AppendDescriptor(&d);
}
map->set_function_with_prototype(true);
@@ -1226,25 +1254,25 @@ void Genesis::InitializeGlobal(Handle<GlobalObject> inner_global,
map->set_pre_allocated_property_fields(1);
map->set_inobject_properties(1);
- // Copy constructor from the non-strict arguments boilerplate.
+ // Copy constructor from the sloppy arguments boilerplate.
map->set_constructor(
- native_context()->arguments_boilerplate()->map()->constructor());
+ native_context()->sloppy_arguments_boilerplate()->map()->constructor());
// Allocate the arguments boilerplate object.
Handle<JSObject> result = factory->NewJSObjectFromMap(map);
- native_context()->set_strict_mode_arguments_boilerplate(*result);
+ native_context()->set_strict_arguments_boilerplate(*result);
// Add length property only for strict mode boilerplate.
- CHECK_NOT_EMPTY_HANDLE(isolate,
- JSObject::SetLocalPropertyIgnoreAttributes(
- result, factory->length_string(),
- factory->undefined_value(), DONT_ENUM));
+ JSObject::SetOwnPropertyIgnoreAttributes(
+ result, factory->length_string(),
+ factory->undefined_value(), DONT_ENUM).Check();
#ifdef DEBUG
LookupResult lookup(isolate);
- result->LocalLookup(heap->length_string(), &lookup);
+ result->LookupOwn(factory->length_string(), &lookup);
ASSERT(lookup.IsField());
- ASSERT(lookup.GetFieldIndex().field_index() == Heap::kArgumentsLengthIndex);
+ ASSERT(lookup.GetFieldIndex().property_index() ==
+ Heap::kArgumentsLengthIndex);
ASSERT(result->map()->inobject_properties() > Heap::kArgumentsLengthIndex);
@@ -1258,12 +1286,9 @@ void Genesis::InitializeGlobal(Handle<GlobalObject> inner_global,
// Create a function for the context extension objects.
Handle<Code> code = Handle<Code>(
isolate->builtins()->builtin(Builtins::kIllegal));
- Handle<JSFunction> context_extension_fun =
- factory->NewFunction(factory->empty_string(),
- JS_CONTEXT_EXTENSION_OBJECT_TYPE,
- JSObject::kHeaderSize,
- code,
- true);
+ Handle<JSFunction> context_extension_fun = factory->NewFunction(
+ factory->empty_string(), code, JS_CONTEXT_EXTENSION_OBJECT_TYPE,
+ JSObject::kHeaderSize);
Handle<String> name = factory->InternalizeOneByteString(
STATIC_ASCII_VECTOR("context_extension"));
@@ -1277,9 +1302,8 @@ void Genesis::InitializeGlobal(Handle<GlobalObject> inner_global,
Handle<Code> code =
Handle<Code>(isolate->builtins()->builtin(
Builtins::kHandleApiCallAsFunction));
- Handle<JSFunction> delegate =
- factory->NewFunction(factory->empty_string(), JS_OBJECT_TYPE,
- JSObject::kHeaderSize, code, true);
+ Handle<JSFunction> delegate = factory->NewFunction(
+ factory->empty_string(), code, JS_OBJECT_TYPE, JSObject::kHeaderSize);
native_context()->set_call_as_function_delegate(*delegate);
delegate->shared()->DontAdaptArguments();
}
@@ -1289,34 +1313,38 @@ void Genesis::InitializeGlobal(Handle<GlobalObject> inner_global,
Handle<Code> code =
Handle<Code>(isolate->builtins()->builtin(
Builtins::kHandleApiCallAsConstructor));
- Handle<JSFunction> delegate =
- factory->NewFunction(factory->empty_string(), JS_OBJECT_TYPE,
- JSObject::kHeaderSize, code, true);
+ Handle<JSFunction> delegate = factory->NewFunction(
+ factory->empty_string(), code, JS_OBJECT_TYPE, JSObject::kHeaderSize);
native_context()->set_call_as_constructor_delegate(*delegate);
delegate->shared()->DontAdaptArguments();
}
- // Initialize the out of memory slot.
- native_context()->set_out_of_memory(heap->false_value());
-
// Initialize the embedder data slot.
- Handle<FixedArray> embedder_data = factory->NewFixedArray(2);
+ Handle<FixedArray> embedder_data = factory->NewFixedArray(3);
native_context()->set_embedder_data(*embedder_data);
}
-Handle<JSFunction> Genesis::InstallTypedArray(
- const char* name, ElementsKind elementsKind) {
+void Genesis::InstallTypedArray(
+ const char* name,
+ ElementsKind elements_kind,
+ Handle<JSFunction>* fun,
+ Handle<Map>* external_map) {
Handle<JSObject> global = Handle<JSObject>(native_context()->global_object());
- Handle<JSFunction> result = InstallFunction(global, name, JS_TYPED_ARRAY_TYPE,
- JSTypedArray::kSize, isolate()->initial_object_prototype(),
- Builtins::kIllegal, false, true);
+ Handle<JSFunction> result = InstallFunction(
+ global, name, JS_TYPED_ARRAY_TYPE, JSTypedArray::kSize,
+ isolate()->initial_object_prototype(), Builtins::kIllegal);
Handle<Map> initial_map = isolate()->factory()->NewMap(
- JS_TYPED_ARRAY_TYPE, JSTypedArray::kSizeWithInternalFields, elementsKind);
+ JS_TYPED_ARRAY_TYPE,
+ JSTypedArray::kSizeWithInternalFields,
+ elements_kind);
result->set_initial_map(*initial_map);
initial_map->set_constructor(*result);
- return result;
+ *fun = result;
+
+ ElementsKind external_kind = GetNextTransitionElementsKind(elements_kind);
+ *external_map = Map::AsElementsKind(initial_map, external_kind);
}
@@ -1328,33 +1356,38 @@ void Genesis::InitializeExperimentalGlobal() {
if (FLAG_harmony_symbols) {
// --- S y m b o l ---
- Handle<JSFunction> symbol_fun =
- InstallFunction(global, "Symbol", JS_VALUE_TYPE, JSValue::kSize,
- isolate()->initial_object_prototype(),
- Builtins::kIllegal, true, true);
+ Handle<JSFunction> symbol_fun = InstallFunction(
+ global, "Symbol", JS_VALUE_TYPE, JSValue::kSize,
+ isolate()->initial_object_prototype(), Builtins::kIllegal);
native_context()->set_symbol_function(*symbol_fun);
}
if (FLAG_harmony_collections) {
- { // -- S e t
- InstallFunction(global, "Set", JS_SET_TYPE, JSSet::kSize,
- isolate()->initial_object_prototype(),
- Builtins::kIllegal, true, true);
- }
- { // -- M a p
- InstallFunction(global, "Map", JS_MAP_TYPE, JSMap::kSize,
- isolate()->initial_object_prototype(),
- Builtins::kIllegal, true, true);
- }
- { // -- W e a k M a p
- InstallFunction(global, "WeakMap", JS_WEAK_MAP_TYPE, JSWeakMap::kSize,
- isolate()->initial_object_prototype(),
- Builtins::kIllegal, true, true);
+ // -- M a p
+ InstallFunction(global, "Map", JS_MAP_TYPE, JSMap::kSize,
+ isolate()->initial_object_prototype(), Builtins::kIllegal);
+ // -- S e t
+ InstallFunction(global, "Set", JS_SET_TYPE, JSSet::kSize,
+ isolate()->initial_object_prototype(), Builtins::kIllegal);
+ { // -- S e t I t e r a t o r
+ Handle<JSObject> builtins(native_context()->builtins());
+ Handle<JSFunction> set_iterator_function =
+ InstallFunction(builtins, "SetIterator", JS_SET_ITERATOR_TYPE,
+ JSSetIterator::kSize,
+ isolate()->initial_object_prototype(),
+ Builtins::kIllegal);
+ native_context()->set_set_iterator_map(
+ set_iterator_function->initial_map());
}
- { // -- W e a k S e t
- InstallFunction(global, "WeakSet", JS_WEAK_SET_TYPE, JSWeakSet::kSize,
- isolate()->initial_object_prototype(),
- Builtins::kIllegal, true, true);
+ { // -- M a p I t e r a t o r
+ Handle<JSObject> builtins(native_context()->builtins());
+ Handle<JSFunction> map_iterator_function =
+ InstallFunction(builtins, "MapIterator", JS_MAP_ITERATOR_TYPE,
+ JSMapIterator::kSize,
+ isolate()->initial_object_prototype(),
+ Builtins::kIllegal);
+ native_context()->set_map_iterator_map(
+ map_iterator_function->initial_map());
}
}
@@ -1363,73 +1396,93 @@ void Genesis::InitializeExperimentalGlobal() {
Handle<JSObject> builtins(native_context()->builtins());
Handle<JSObject> generator_object_prototype =
factory()->NewJSObject(isolate()->object_function(), TENURED);
- Handle<JSFunction> generator_function_prototype =
- InstallFunction(builtins, "GeneratorFunctionPrototype",
- JS_FUNCTION_TYPE, JSFunction::kHeaderSize,
- generator_object_prototype, Builtins::kIllegal,
- false, false);
+ Handle<JSFunction> generator_function_prototype = InstallFunction(
+ builtins, "GeneratorFunctionPrototype", JS_FUNCTION_TYPE,
+ JSFunction::kHeaderSize, generator_object_prototype,
+ Builtins::kIllegal);
InstallFunction(builtins, "GeneratorFunction",
JS_FUNCTION_TYPE, JSFunction::kSize,
- generator_function_prototype, Builtins::kIllegal,
- false, false);
+ generator_function_prototype, Builtins::kIllegal);
// Create maps for generator functions and their prototypes. Store those
// maps in the native context.
- Handle<Map> function_map(native_context()->function_map());
- Handle<Map> generator_function_map = factory()->CopyMap(function_map);
+ Handle<Map> sloppy_function_map(native_context()->sloppy_function_map());
+ Handle<Map> generator_function_map = Map::Copy(sloppy_function_map);
generator_function_map->set_prototype(*generator_function_prototype);
- native_context()->set_generator_function_map(*generator_function_map);
-
- Handle<Map> strict_mode_function_map(
- native_context()->strict_mode_function_map());
- Handle<Map> strict_mode_generator_function_map = factory()->CopyMap(
- strict_mode_function_map);
- strict_mode_generator_function_map->set_prototype(
- *generator_function_prototype);
- native_context()->set_strict_mode_generator_function_map(
- *strict_mode_generator_function_map);
-
- Handle<Map> object_map(native_context()->object_function()->initial_map());
- Handle<Map> generator_object_prototype_map = factory()->CopyMap(
- object_map, 0);
+ native_context()->set_sloppy_generator_function_map(
+ *generator_function_map);
+
+ // The "arguments" and "caller" instance properties aren't specified, so
+ // technically we could leave them out. They make even less sense for
+ // generators than for functions. Still, the same argument that it makes
+ // sense to keep them around but poisoned in strict mode applies to
+ // generators as well. With poisoned accessors, naive callers can still
+ // iterate over the properties without accessing them.
+ //
+ // We can't use PoisonArgumentsAndCaller because that mutates accessor pairs
+ // in place, and the initial state of the generator function map shares the
+ // accessor pair with sloppy functions. Also the error message should be
+ // different. Also unhappily, we can't use the API accessors to implement
+ // poisoning, because API accessors present themselves as data properties,
+ // not accessor properties, and so getOwnPropertyDescriptor raises an
+ // exception as it tries to get the values. Sadness.
+ Handle<AccessorPair> poison_pair(factory()->NewAccessorPair());
+ PropertyAttributes rw_attribs =
+ static_cast<PropertyAttributes>(DONT_ENUM | DONT_DELETE);
+ Handle<JSFunction> poison_function = GetGeneratorPoisonFunction();
+ poison_pair->set_getter(*poison_function);
+ poison_pair->set_setter(*poison_function);
+ ReplaceAccessors(generator_function_map, factory()->arguments_string(),
+ rw_attribs, poison_pair);
+ ReplaceAccessors(generator_function_map, factory()->caller_string(),
+ rw_attribs, poison_pair);
+
+ Handle<Map> strict_function_map(native_context()->strict_function_map());
+ Handle<Map> strict_generator_function_map = Map::Copy(strict_function_map);
+ // "arguments" and "caller" already poisoned.
+ strict_generator_function_map->set_prototype(*generator_function_prototype);
+ native_context()->set_strict_generator_function_map(
+ *strict_generator_function_map);
+
+ Handle<JSFunction> object_function(native_context()->object_function());
+ Handle<Map> generator_object_prototype_map = Map::Create(
+ object_function, 0);
generator_object_prototype_map->set_prototype(
*generator_object_prototype);
native_context()->set_generator_object_prototype_map(
*generator_object_prototype_map);
+ }
- // Create a map for generator result objects.
- ASSERT(object_map->inobject_properties() == 0);
- STATIC_ASSERT(JSGeneratorObject::kResultPropertyCount == 2);
- Handle<Map> generator_result_map = factory()->CopyMap(object_map,
- JSGeneratorObject::kResultPropertyCount);
- ASSERT(generator_result_map->inobject_properties() ==
- JSGeneratorObject::kResultPropertyCount);
+ if (FLAG_harmony_collections || FLAG_harmony_generators) {
+ // Collection forEach uses an iterator result object.
+ // Generators return iteraror result objects.
- Handle<DescriptorArray> descriptors = factory()->NewDescriptorArray(0,
+ STATIC_ASSERT(JSGeneratorObject::kResultPropertyCount == 2);
+ Handle<JSFunction> object_function(native_context()->object_function());
+ ASSERT(object_function->initial_map()->inobject_properties() == 0);
+ Handle<Map> iterator_result_map = Map::Create(
+ object_function, JSGeneratorObject::kResultPropertyCount);
+ ASSERT(iterator_result_map->inobject_properties() ==
JSGeneratorObject::kResultPropertyCount);
- DescriptorArray::WhitenessWitness witness(*descriptors);
- generator_result_map->set_instance_descriptors(*descriptors);
+ Map::EnsureDescriptorSlack(
+ iterator_result_map, JSGeneratorObject::kResultPropertyCount);
- Handle<String> value_string = factory()->InternalizeOneByteString(
- STATIC_ASCII_VECTOR("value"));
- FieldDescriptor value_descr(*value_string,
+ FieldDescriptor value_descr(isolate()->factory()->value_string(),
JSGeneratorObject::kResultValuePropertyIndex,
NONE,
Representation::Tagged());
- generator_result_map->AppendDescriptor(&value_descr, witness);
+ iterator_result_map->AppendDescriptor(&value_descr);
- Handle<String> done_string = factory()->InternalizeOneByteString(
- STATIC_ASCII_VECTOR("done"));
- FieldDescriptor done_descr(*done_string,
+ FieldDescriptor done_descr(isolate()->factory()->done_string(),
JSGeneratorObject::kResultDonePropertyIndex,
NONE,
Representation::Tagged());
- generator_result_map->AppendDescriptor(&done_descr, witness);
+ iterator_result_map->AppendDescriptor(&done_descr);
- generator_result_map->set_unused_property_fields(0);
+ iterator_result_map->set_unused_property_fields(0);
ASSERT_EQ(JSGeneratorObject::kResultSize,
- generator_result_map->instance_size());
- native_context()->set_generator_result_map(*generator_result_map);
+ iterator_result_map->instance_size());
+ native_context()->set_iterator_result_map(*iterator_result_map);
}
}
@@ -1445,9 +1498,12 @@ bool Genesis::CompileBuiltin(Isolate* isolate, int index) {
bool Genesis::CompileExperimentalBuiltin(Isolate* isolate, int index) {
Vector<const char> name = ExperimentalNatives::GetScriptName(index);
Factory* factory = isolate->factory();
- Handle<String> source_code =
+ Handle<String> source_code;
+ ASSIGN_RETURN_ON_EXCEPTION_VALUE(
+ isolate, source_code,
factory->NewStringFromAscii(
- ExperimentalNatives::GetRawScriptSource(index));
+ ExperimentalNatives::GetRawScriptSource(index)),
+ false);
return CompileNative(isolate, name, source_code);
}
@@ -1456,9 +1512,7 @@ bool Genesis::CompileNative(Isolate* isolate,
Vector<const char> name,
Handle<String> source) {
HandleScope scope(isolate);
-#ifdef ENABLE_DEBUGGER_SUPPORT
- isolate->debugger()->set_compiling_natives(true);
-#endif
+ SuppressDebug compiling_natives(isolate->debug());
// During genesis, the boilerplate for stack overflow won't work until the
// environment has been at least partially initialized. Add a stack check
// before entering JS code to catch overflow early.
@@ -1474,9 +1528,6 @@ bool Genesis::CompileNative(Isolate* isolate,
true);
ASSERT(isolate->has_pending_exception() != result);
if (!result) isolate->clear_pending_exception();
-#ifdef ENABLE_DEBUGGER_SUPPORT
- isolate->debugger()->set_compiling_natives(false);
-#endif
return result;
}
@@ -1496,8 +1547,9 @@ bool Genesis::CompileScriptCached(Isolate* isolate,
// function and insert it into the cache.
if (cache == NULL || !cache->Lookup(name, &function_info)) {
ASSERT(source->IsOneByteRepresentation());
- Handle<String> script_name = factory->NewStringFromUtf8(name);
- function_info = Compiler::Compile(
+ Handle<String> script_name =
+ factory->NewStringFromUtf8(name).ToHandleChecked();
+ function_info = Compiler::CompileScript(
source,
script_name,
0,
@@ -1506,7 +1558,7 @@ bool Genesis::CompileScriptCached(Isolate* isolate,
top_context,
extension,
NULL,
- Handle<String>::null(),
+ NO_CACHED_DATA,
use_runtime_context ? NATIVES_CODE : NOT_NATIVES_CODE);
if (function_info.is_null()) return false;
if (cache != NULL) cache->Add(name, function_info);
@@ -1530,25 +1582,23 @@ bool Genesis::CompileScriptCached(Isolate* isolate,
? top_context->builtins()
: top_context->global_object(),
isolate);
- bool has_pending_exception;
- Execution::Call(isolate, fun, receiver, 0, NULL, &has_pending_exception);
- if (has_pending_exception) return false;
- return true;
+ return !Execution::Call(
+ isolate, fun, receiver, 0, NULL).is_null();
}
-#define INSTALL_NATIVE(Type, name, var) \
- Handle<String> var##_name = \
- factory()->InternalizeOneByteString(STATIC_ASCII_VECTOR(name)); \
- Object* var##_native = \
- native_context()->builtins()->GetPropertyNoExceptionThrown( \
- *var##_name); \
- native_context()->set_##var(Type::cast(var##_native));
+#define INSTALL_NATIVE(Type, name, var) \
+ Handle<String> var##_name = \
+ factory()->InternalizeOneByteString(STATIC_ASCII_VECTOR(name)); \
+ Handle<Object> var##_native = Object::GetProperty( \
+ handle(native_context()->builtins()), var##_name).ToHandleChecked(); \
+ native_context()->set_##var(Type::cast(*var##_native));
void Genesis::InstallNativeFunctions() {
HandleScope scope(isolate());
INSTALL_NATIVE(JSFunction, "CreateDate", create_date_fun);
+
INSTALL_NATIVE(JSFunction, "ToNumber", to_number_fun);
INSTALL_NATIVE(JSFunction, "ToString", to_string_fun);
INSTALL_NATIVE(JSFunction, "ToDetailString", to_detail_string_fun);
@@ -1556,6 +1606,7 @@ void Genesis::InstallNativeFunctions() {
INSTALL_NATIVE(JSFunction, "ToInteger", to_integer_fun);
INSTALL_NATIVE(JSFunction, "ToUint32", to_uint32_fun);
INSTALL_NATIVE(JSFunction, "ToInt32", to_int32_fun);
+
INSTALL_NATIVE(JSFunction, "GlobalEval", global_eval_fun);
INSTALL_NATIVE(JSFunction, "Instantiate", instantiate_fun);
INSTALL_NATIVE(JSFunction, "ConfigureTemplateInstance",
@@ -1564,24 +1615,40 @@ void Genesis::InstallNativeFunctions() {
INSTALL_NATIVE(JSObject, "functionCache", function_cache);
INSTALL_NATIVE(JSFunction, "ToCompletePropertyDescriptor",
to_complete_property_descriptor);
+
+ INSTALL_NATIVE(JSFunction, "IsPromise", is_promise);
+ INSTALL_NATIVE(JSFunction, "PromiseCreate", promise_create);
+ INSTALL_NATIVE(JSFunction, "PromiseResolve", promise_resolve);
+ INSTALL_NATIVE(JSFunction, "PromiseReject", promise_reject);
+ INSTALL_NATIVE(JSFunction, "PromiseChain", promise_chain);
+ INSTALL_NATIVE(JSFunction, "PromiseCatch", promise_catch);
+ INSTALL_NATIVE(JSFunction, "PromiseThen", promise_then);
+
+ INSTALL_NATIVE(JSFunction, "NotifyChange", observers_notify_change);
+ INSTALL_NATIVE(JSFunction, "EnqueueSpliceRecord", observers_enqueue_splice);
+ INSTALL_NATIVE(JSFunction, "BeginPerformSplice",
+ observers_begin_perform_splice);
+ INSTALL_NATIVE(JSFunction, "EndPerformSplice",
+ observers_end_perform_splice);
+ INSTALL_NATIVE(JSFunction, "NativeObjectObserve",
+ native_object_observe);
+ INSTALL_NATIVE(JSFunction, "NativeObjectGetNotifier",
+ native_object_get_notifier);
+ INSTALL_NATIVE(JSFunction, "NativeObjectNotifierPerformChange",
+ native_object_notifier_perform_change);
}
void Genesis::InstallExperimentalNativeFunctions() {
- INSTALL_NATIVE(JSFunction, "RunMicrotasks", run_microtasks);
if (FLAG_harmony_proxies) {
INSTALL_NATIVE(JSFunction, "DerivedHasTrap", derived_has_trap);
INSTALL_NATIVE(JSFunction, "DerivedGetTrap", derived_get_trap);
INSTALL_NATIVE(JSFunction, "DerivedSetTrap", derived_set_trap);
INSTALL_NATIVE(JSFunction, "ProxyEnumerate", proxy_enumerate);
}
- if (FLAG_harmony_observation) {
- INSTALL_NATIVE(JSFunction, "NotifyChange", observers_notify_change);
- INSTALL_NATIVE(JSFunction, "EnqueueSpliceRecord", observers_enqueue_splice);
- INSTALL_NATIVE(JSFunction, "BeginPerformSplice",
- observers_begin_perform_splice);
- INSTALL_NATIVE(JSFunction, "EndPerformSplice",
- observers_end_perform_splice);
+
+ if (FLAG_harmony_symbols) {
+ INSTALL_NATIVE(Symbol, "symbolIterator", iterator_symbol);
}
}
@@ -1598,43 +1665,34 @@ Handle<JSFunction> Genesis::InstallInternalArray(
// doesn't inherit from Object.prototype.
// To be used only for internal work by builtins. Instances
// must not be leaked to user code.
- Handle<JSFunction> array_function =
- InstallFunction(builtins,
- name,
- JS_ARRAY_TYPE,
- JSArray::kSize,
- isolate()->initial_object_prototype(),
- Builtins::kInternalArrayCode,
- true, true);
Handle<JSObject> prototype =
factory()->NewJSObject(isolate()->object_function(), TENURED);
- Accessors::FunctionSetPrototype(array_function, prototype);
+ Handle<JSFunction> array_function = InstallFunction(
+ builtins, name, JS_ARRAY_TYPE, JSArray::kSize,
+ prototype, Builtins::kInternalArrayCode);
InternalArrayConstructorStub internal_array_constructor_stub(isolate());
- Handle<Code> code = internal_array_constructor_stub.GetCode(isolate());
+ Handle<Code> code = internal_array_constructor_stub.GetCode();
array_function->shared()->set_construct_stub(*code);
array_function->shared()->DontAdaptArguments();
Handle<Map> original_map(array_function->initial_map());
- Handle<Map> initial_map = factory()->CopyMap(original_map);
+ Handle<Map> initial_map = Map::Copy(original_map);
initial_map->set_elements_kind(elements_kind);
array_function->set_initial_map(*initial_map);
// Make "length" magic on instances.
- Handle<DescriptorArray> array_descriptors(
- factory()->NewDescriptorArray(0, 1));
- DescriptorArray::WhitenessWitness witness(*array_descriptors);
+ Map::EnsureDescriptorSlack(initial_map, 1);
- Handle<Foreign> array_length(factory()->NewForeign(
- &Accessors::ArrayLength));
PropertyAttributes attribs = static_cast<PropertyAttributes>(
DONT_ENUM | DONT_DELETE);
- initial_map->set_instance_descriptors(*array_descriptors);
+ Handle<AccessorInfo> array_length =
+ Accessors::ArrayLengthInfo(isolate(), attribs);
{ // Add length.
CallbacksDescriptor d(
- *factory()->length_string(), *array_length, attribs);
- array_function->initial_map()->AppendDescriptor(&d, witness);
+ Handle<Name>(Name::cast(array_length->name())), array_length, attribs);
+ array_function->initial_map()->AppendDescriptor(&d);
}
return array_function;
@@ -1649,10 +1707,9 @@ bool Genesis::InstallNatives() {
// (itself) and a reference to the native_context directly in the object.
Handle<Code> code = Handle<Code>(
isolate()->builtins()->builtin(Builtins::kIllegal));
- Handle<JSFunction> builtins_fun =
- factory()->NewFunction(factory()->empty_string(),
- JS_BUILTINS_OBJECT_TYPE,
- JSBuiltinsObject::kSize, code, true);
+ Handle<JSFunction> builtins_fun = factory()->NewFunction(
+ factory()->empty_string(), code, JS_BUILTINS_OBJECT_TYPE,
+ JSBuiltinsObject::kSize);
Handle<String> name =
factory()->InternalizeOneByteString(STATIC_ASCII_VECTOR("builtins"));
@@ -1667,6 +1724,8 @@ bool Genesis::InstallNatives() {
builtins->set_native_context(*native_context());
builtins->set_global_context(*native_context());
builtins->set_global_receiver(*builtins);
+ builtins->set_global_receiver(native_context()->global_proxy());
+
// Set up the 'global' properties of the builtins object. The
// 'global' property that refers to the global object is the only
@@ -1677,18 +1736,19 @@ bool Genesis::InstallNatives() {
Handle<String> global_string =
factory()->InternalizeOneByteString(STATIC_ASCII_VECTOR("global"));
Handle<Object> global_obj(native_context()->global_object(), isolate());
- CHECK_NOT_EMPTY_HANDLE(isolate(),
- JSObject::SetLocalPropertyIgnoreAttributes(
- builtins, global_string, global_obj, attributes));
+ JSObject::SetOwnPropertyIgnoreAttributes(
+ builtins, global_string, global_obj, attributes).Check();
+ Handle<String> builtins_string =
+ factory()->InternalizeOneByteString(STATIC_ASCII_VECTOR("builtins"));
+ JSObject::SetOwnPropertyIgnoreAttributes(
+ builtins, builtins_string, builtins, attributes).Check();
// Set up the reference from the global object to the builtins object.
JSGlobalObject::cast(native_context()->global_object())->
set_builtins(*builtins);
// Create a bridge function that has context in the native context.
- Handle<JSFunction> bridge =
- factory()->NewFunction(factory()->empty_string(),
- factory()->undefined_value());
+ Handle<JSFunction> bridge = factory()->NewFunction(factory()->empty_string());
ASSERT(bridge->context() == *isolate()->native_context());
// Allocate the builtins context.
@@ -1700,150 +1760,120 @@ bool Genesis::InstallNatives() {
{ // -- S c r i p t
// Builtin functions for Script.
- Handle<JSFunction> script_fun =
- InstallFunction(builtins, "Script", JS_VALUE_TYPE, JSValue::kSize,
- isolate()->initial_object_prototype(),
- Builtins::kIllegal, false, false);
+ Handle<JSFunction> script_fun = InstallFunction(
+ builtins, "Script", JS_VALUE_TYPE, JSValue::kSize,
+ isolate()->initial_object_prototype(), Builtins::kIllegal);
Handle<JSObject> prototype =
factory()->NewJSObject(isolate()->object_function(), TENURED);
Accessors::FunctionSetPrototype(script_fun, prototype);
native_context()->set_script_function(*script_fun);
Handle<Map> script_map = Handle<Map>(script_fun->initial_map());
+ Map::EnsureDescriptorSlack(script_map, 13);
- Handle<DescriptorArray> script_descriptors(
- factory()->NewDescriptorArray(0, 13));
- DescriptorArray::WhitenessWitness witness(*script_descriptors);
-
- Handle<Foreign> script_source(
- factory()->NewForeign(&Accessors::ScriptSource));
- Handle<Foreign> script_name(factory()->NewForeign(&Accessors::ScriptName));
- Handle<String> id_string(factory()->InternalizeOneByteString(
- STATIC_ASCII_VECTOR("id")));
- Handle<Foreign> script_id(factory()->NewForeign(&Accessors::ScriptId));
- Handle<String> line_offset_string(
- factory()->InternalizeOneByteString(
- STATIC_ASCII_VECTOR("line_offset")));
- Handle<Foreign> script_line_offset(
- factory()->NewForeign(&Accessors::ScriptLineOffset));
- Handle<String> column_offset_string(
- factory()->InternalizeOneByteString(
- STATIC_ASCII_VECTOR("column_offset")));
- Handle<Foreign> script_column_offset(
- factory()->NewForeign(&Accessors::ScriptColumnOffset));
- Handle<String> data_string(factory()->InternalizeOneByteString(
- STATIC_ASCII_VECTOR("data")));
- Handle<Foreign> script_data(factory()->NewForeign(&Accessors::ScriptData));
- Handle<String> type_string(factory()->InternalizeOneByteString(
- STATIC_ASCII_VECTOR("type")));
- Handle<Foreign> script_type(factory()->NewForeign(&Accessors::ScriptType));
- Handle<String> compilation_type_string(
- factory()->InternalizeOneByteString(
- STATIC_ASCII_VECTOR("compilation_type")));
- Handle<Foreign> script_compilation_type(
- factory()->NewForeign(&Accessors::ScriptCompilationType));
- Handle<String> line_ends_string(factory()->InternalizeOneByteString(
- STATIC_ASCII_VECTOR("line_ends")));
- Handle<Foreign> script_line_ends(
- factory()->NewForeign(&Accessors::ScriptLineEnds));
- Handle<String> context_data_string(
- factory()->InternalizeOneByteString(
- STATIC_ASCII_VECTOR("context_data")));
- Handle<Foreign> script_context_data(
- factory()->NewForeign(&Accessors::ScriptContextData));
- Handle<String> eval_from_script_string(
- factory()->InternalizeOneByteString(
- STATIC_ASCII_VECTOR("eval_from_script")));
- Handle<Foreign> script_eval_from_script(
- factory()->NewForeign(&Accessors::ScriptEvalFromScript));
- Handle<String> eval_from_script_position_string(
- factory()->InternalizeOneByteString(
- STATIC_ASCII_VECTOR("eval_from_script_position")));
- Handle<Foreign> script_eval_from_script_position(
- factory()->NewForeign(&Accessors::ScriptEvalFromScriptPosition));
- Handle<String> eval_from_function_name_string(
- factory()->InternalizeOneByteString(
- STATIC_ASCII_VECTOR("eval_from_function_name")));
- Handle<Foreign> script_eval_from_function_name(
- factory()->NewForeign(&Accessors::ScriptEvalFromFunctionName));
PropertyAttributes attribs =
static_cast<PropertyAttributes>(DONT_ENUM | DONT_DELETE | READ_ONLY);
- script_map->set_instance_descriptors(*script_descriptors);
+ Handle<AccessorInfo> script_column =
+ Accessors::ScriptColumnOffsetInfo(isolate(), attribs);
{
- CallbacksDescriptor d(
- *factory()->source_string(), *script_source, attribs);
- script_map->AppendDescriptor(&d, witness);
+ CallbacksDescriptor d(Handle<Name>(Name::cast(script_column->name())),
+ script_column, attribs);
+ script_map->AppendDescriptor(&d);
}
+ Handle<AccessorInfo> script_id =
+ Accessors::ScriptIdInfo(isolate(), attribs);
{
- CallbacksDescriptor d(*factory()->name_string(), *script_name, attribs);
- script_map->AppendDescriptor(&d, witness);
+ CallbacksDescriptor d(Handle<Name>(Name::cast(script_id->name())),
+ script_id, attribs);
+ script_map->AppendDescriptor(&d);
}
- {
- CallbacksDescriptor d(*id_string, *script_id, attribs);
- script_map->AppendDescriptor(&d, witness);
- }
+ Handle<AccessorInfo> script_name =
+ Accessors::ScriptNameInfo(isolate(), attribs);
{
- CallbacksDescriptor d(*line_offset_string, *script_line_offset, attribs);
- script_map->AppendDescriptor(&d, witness);
+ CallbacksDescriptor d(Handle<Name>(Name::cast(script_name->name())),
+ script_name, attribs);
+ script_map->AppendDescriptor(&d);
}
+ Handle<AccessorInfo> script_line =
+ Accessors::ScriptLineOffsetInfo(isolate(), attribs);
{
- CallbacksDescriptor d(
- *column_offset_string, *script_column_offset, attribs);
- script_map->AppendDescriptor(&d, witness);
+ CallbacksDescriptor d(Handle<Name>(Name::cast(script_line->name())),
+ script_line, attribs);
+ script_map->AppendDescriptor(&d);
}
+ Handle<AccessorInfo> script_source =
+ Accessors::ScriptSourceInfo(isolate(), attribs);
{
- CallbacksDescriptor d(*data_string, *script_data, attribs);
- script_map->AppendDescriptor(&d, witness);
+ CallbacksDescriptor d(Handle<Name>(Name::cast(script_source->name())),
+ script_source, attribs);
+ script_map->AppendDescriptor(&d);
}
+ Handle<AccessorInfo> script_type =
+ Accessors::ScriptTypeInfo(isolate(), attribs);
{
- CallbacksDescriptor d(*type_string, *script_type, attribs);
- script_map->AppendDescriptor(&d, witness);
+ CallbacksDescriptor d(Handle<Name>(Name::cast(script_type->name())),
+ script_type, attribs);
+ script_map->AppendDescriptor(&d);
}
+ Handle<AccessorInfo> script_compilation_type =
+ Accessors::ScriptCompilationTypeInfo(isolate(), attribs);
{
CallbacksDescriptor d(
- *compilation_type_string, *script_compilation_type, attribs);
- script_map->AppendDescriptor(&d, witness);
+ Handle<Name>(Name::cast(script_compilation_type->name())),
+ script_compilation_type, attribs);
+ script_map->AppendDescriptor(&d);
}
+ Handle<AccessorInfo> script_line_ends =
+ Accessors::ScriptLineEndsInfo(isolate(), attribs);
{
- CallbacksDescriptor d(*line_ends_string, *script_line_ends, attribs);
- script_map->AppendDescriptor(&d, witness);
+ CallbacksDescriptor d(Handle<Name>(Name::cast(script_line_ends->name())),
+ script_line_ends, attribs);
+ script_map->AppendDescriptor(&d);
}
+ Handle<AccessorInfo> script_context_data =
+ Accessors::ScriptContextDataInfo(isolate(), attribs);
{
CallbacksDescriptor d(
- *context_data_string, *script_context_data, attribs);
- script_map->AppendDescriptor(&d, witness);
+ Handle<Name>(Name::cast(script_context_data->name())),
+ script_context_data, attribs);
+ script_map->AppendDescriptor(&d);
}
+ Handle<AccessorInfo> script_eval_from_script =
+ Accessors::ScriptEvalFromScriptInfo(isolate(), attribs);
{
CallbacksDescriptor d(
- *eval_from_script_string, *script_eval_from_script, attribs);
- script_map->AppendDescriptor(&d, witness);
+ Handle<Name>(Name::cast(script_eval_from_script->name())),
+ script_eval_from_script, attribs);
+ script_map->AppendDescriptor(&d);
}
+ Handle<AccessorInfo> script_eval_from_script_position =
+ Accessors::ScriptEvalFromScriptPositionInfo(isolate(), attribs);
{
CallbacksDescriptor d(
- *eval_from_script_position_string,
- *script_eval_from_script_position,
- attribs);
- script_map->AppendDescriptor(&d, witness);
+ Handle<Name>(Name::cast(script_eval_from_script_position->name())),
+ script_eval_from_script_position, attribs);
+ script_map->AppendDescriptor(&d);
}
+ Handle<AccessorInfo> script_eval_from_function_name =
+ Accessors::ScriptEvalFromFunctionNameInfo(isolate(), attribs);
{
CallbacksDescriptor d(
- *eval_from_function_name_string,
- *script_eval_from_function_name,
- attribs);
- script_map->AppendDescriptor(&d, witness);
+ Handle<Name>(Name::cast(script_eval_from_function_name->name())),
+ script_eval_from_function_name, attribs);
+ script_map->AppendDescriptor(&d);
}
// Allocate the empty script.
@@ -1855,11 +1885,9 @@ bool Genesis::InstallNatives() {
// Builtin function for OpaqueReference -- a JSValue-based object,
// that keeps its field isolated from JavaScript code. It may store
// objects, that JavaScript code may not access.
- Handle<JSFunction> opaque_reference_fun =
- InstallFunction(builtins, "OpaqueReference", JS_VALUE_TYPE,
- JSValue::kSize,
- isolate()->initial_object_prototype(),
- Builtins::kIllegal, false, false);
+ Handle<JSFunction> opaque_reference_fun = InstallFunction(
+ builtins, "OpaqueReference", JS_VALUE_TYPE, JSValue::kSize,
+ isolate()->initial_object_prototype(), Builtins::kIllegal);
Handle<JSObject> prototype =
factory()->NewJSObject(isolate()->object_function(), TENURED);
Accessors::FunctionSetPrototype(opaque_reference_fun, prototype);
@@ -1910,22 +1938,18 @@ bool Genesis::InstallNatives() {
// Install Function.prototype.call and apply.
{ Handle<String> key = factory()->function_class_string();
Handle<JSFunction> function =
- Handle<JSFunction>::cast(
- GetProperty(isolate(), isolate()->global_object(), key));
+ Handle<JSFunction>::cast(Object::GetProperty(
+ isolate()->global_object(), key).ToHandleChecked());
Handle<JSObject> proto =
Handle<JSObject>(JSObject::cast(function->instance_prototype()));
// Install the call and the apply functions.
Handle<JSFunction> call =
InstallFunction(proto, "call", JS_OBJECT_TYPE, JSObject::kHeaderSize,
- Handle<JSObject>::null(),
- Builtins::kFunctionCall,
- false, false);
+ MaybeHandle<JSObject>(), Builtins::kFunctionCall);
Handle<JSFunction> apply =
InstallFunction(proto, "apply", JS_OBJECT_TYPE, JSObject::kHeaderSize,
- Handle<JSObject>::null(),
- Builtins::kFunctionApply,
- false, false);
+ MaybeHandle<JSObject>(), Builtins::kFunctionApply);
// Make sure that Function.prototype.call appears to be compiled.
// The code will never be called, but inline caching for call will
@@ -1963,38 +1987,36 @@ bool Genesis::InstallNatives() {
initial_map->set_prototype(*array_prototype);
// Update map with length accessor from Array and add "index" and "input".
- Handle<DescriptorArray> reresult_descriptors =
- factory()->NewDescriptorArray(0, 3);
- DescriptorArray::WhitenessWitness witness(*reresult_descriptors);
- initial_map->set_instance_descriptors(*reresult_descriptors);
+ Map::EnsureDescriptorSlack(initial_map, 3);
{
JSFunction* array_function = native_context()->array_function();
Handle<DescriptorArray> array_descriptors(
array_function->initial_map()->instance_descriptors());
- String* length = heap()->length_string();
+ Handle<String> length = factory()->length_string();
int old = array_descriptors->SearchWithCache(
- length, array_function->initial_map());
+ *length, array_function->initial_map());
ASSERT(old != DescriptorArray::kNotFound);
CallbacksDescriptor desc(length,
- array_descriptors->GetValue(old),
+ handle(array_descriptors->GetValue(old),
+ isolate()),
array_descriptors->GetDetails(old).attributes());
- initial_map->AppendDescriptor(&desc, witness);
+ initial_map->AppendDescriptor(&desc);
}
{
- FieldDescriptor index_field(heap()->index_string(),
+ FieldDescriptor index_field(factory()->index_string(),
JSRegExpResult::kIndexIndex,
NONE,
Representation::Tagged());
- initial_map->AppendDescriptor(&index_field, witness);
+ initial_map->AppendDescriptor(&index_field);
}
{
- FieldDescriptor input_field(heap()->input_string(),
+ FieldDescriptor input_field(factory()->input_string(),
JSRegExpResult::kInputIndex,
NONE,
Representation::Tagged());
- initial_map->AppendDescriptor(&input_field, witness);
+ initial_map->AppendDescriptor(&input_field);
}
initial_map->set_inobject_properties(2);
@@ -2005,7 +2027,7 @@ bool Genesis::InstallNatives() {
}
#ifdef VERIFY_HEAP
- builtins->Verify();
+ builtins->ObjectVerify();
#endif
return true;
@@ -2027,8 +2049,7 @@ bool Genesis::InstallExperimentalNatives() {
INSTALL_EXPERIMENTAL_NATIVE(i, symbols, "symbol.js")
INSTALL_EXPERIMENTAL_NATIVE(i, proxies, "proxy.js")
INSTALL_EXPERIMENTAL_NATIVE(i, collections, "collection.js")
- INSTALL_EXPERIMENTAL_NATIVE(i, observation, "object-observe.js")
- INSTALL_EXPERIMENTAL_NATIVE(i, promises, "promise.js")
+ INSTALL_EXPERIMENTAL_NATIVE(i, collections, "collection-iterator.js")
INSTALL_EXPERIMENTAL_NATIVE(i, generators, "generator.js")
INSTALL_EXPERIMENTAL_NATIVE(i, iteration, "array-iterator.js")
INSTALL_EXPERIMENTAL_NATIVE(i, strings, "harmony-string.js")
@@ -2037,7 +2058,7 @@ bool Genesis::InstallExperimentalNatives() {
}
InstallExperimentalNativeFunctions();
-
+ InstallExperimentalBuiltinFunctionIds();
return true;
}
@@ -2050,14 +2071,16 @@ static Handle<JSObject> ResolveBuiltinIdHolder(
Handle<GlobalObject> global(native_context->global_object());
const char* period_pos = strchr(holder_expr, '.');
if (period_pos == NULL) {
- return Handle<JSObject>::cast(GetProperty(
- isolate, global, factory->InternalizeUtf8String(holder_expr)));
+ return Handle<JSObject>::cast(Object::GetPropertyOrElement(
+ global, factory->InternalizeUtf8String(holder_expr)).ToHandleChecked());
}
ASSERT_EQ(".prototype", period_pos);
Vector<const char> property(holder_expr,
static_cast<int>(period_pos - holder_expr));
+ Handle<String> property_string = factory->InternalizeUtf8String(property);
+ ASSERT(!property_string.is_null());
Handle<JSFunction> function = Handle<JSFunction>::cast(
- GetProperty(isolate, global, factory->InternalizeUtf8String(property)));
+ Object::GetProperty(global, property_string).ToHandleChecked());
return Handle<JSObject>(JSObject::cast(function->prototype()));
}
@@ -2065,10 +2088,10 @@ static Handle<JSObject> ResolveBuiltinIdHolder(
static void InstallBuiltinFunctionId(Handle<JSObject> holder,
const char* function_name,
BuiltinFunctionId id) {
- Factory* factory = holder->GetIsolate()->factory();
- Handle<String> name = factory->InternalizeUtf8String(function_name);
- Object* function_object = holder->GetProperty(*name)->ToObjectUnchecked();
- Handle<JSFunction> function(JSFunction::cast(function_object));
+ Isolate* isolate = holder->GetIsolate();
+ Handle<Object> function_object =
+ Object::GetProperty(isolate, holder, function_name).ToHandleChecked();
+ Handle<JSFunction> function = Handle<JSFunction>::cast(function_object);
function->shared()->set_function_data(Smi::FromInt(id));
}
@@ -2087,6 +2110,15 @@ void Genesis::InstallBuiltinFunctionIds() {
}
+void Genesis::InstallExperimentalBuiltinFunctionIds() {
+ HandleScope scope(isolate());
+ if (FLAG_harmony_maths) {
+ Handle<JSObject> holder = ResolveBuiltinIdHolder(native_context(), "Math");
+ InstallBuiltinFunctionId(holder, "clz32", kMathClz32);
+ }
+}
+
+
// Do not forget to update macros.py with named constant
// of cache id.
#define JSFUNCTION_RESULT_CACHE_LIST(F) \
@@ -2132,9 +2164,8 @@ void Genesis::InstallJSFunctionResultCaches() {
void Genesis::InitializeNormalizedMapCaches() {
- Handle<FixedArray> array(
- factory()->NewFixedArray(NormalizedMapCache::kEntries, TENURED));
- native_context()->set_normalized_map_cache(NormalizedMapCache::cast(*array));
+ Handle<NormalizedMapCache> cache = NormalizedMapCache::New(isolate());
+ native_context()->set_normalized_map_cache(*cache);
}
@@ -2143,13 +2174,12 @@ bool Bootstrapper::InstallExtensions(Handle<Context> native_context,
BootstrapperActive active(this);
SaveContext saved_context(isolate_);
isolate_->set_context(*native_context);
- if (!Genesis::InstallExtensions(native_context, extensions)) return false;
- Genesis::InstallSpecialObjects(native_context);
- return true;
+ return Genesis::InstallExtensions(native_context, extensions) &&
+ Genesis::InstallSpecialObjects(native_context);
}
-void Genesis::InstallSpecialObjects(Handle<Context> native_context) {
+bool Genesis::InstallSpecialObjects(Handle<Context> native_context) {
Isolate* isolate = native_context->GetIsolate();
Factory* factory = isolate->factory();
HandleScope scope(isolate);
@@ -2159,47 +2189,48 @@ void Genesis::InstallSpecialObjects(Handle<Context> native_context) {
if (FLAG_expose_natives_as != NULL && strlen(FLAG_expose_natives_as) != 0) {
Handle<String> natives =
factory->InternalizeUtf8String(FLAG_expose_natives_as);
- CHECK_NOT_EMPTY_HANDLE(isolate,
- JSObject::SetLocalPropertyIgnoreAttributes(
- global, natives,
- Handle<JSObject>(global->builtins()),
- DONT_ENUM));
+ RETURN_ON_EXCEPTION_VALUE(
+ isolate,
+ JSObject::SetOwnPropertyIgnoreAttributes(
+ global, natives, Handle<JSObject>(global->builtins()), DONT_ENUM),
+ false);
}
- Handle<Object> Error = GetProperty(global, "Error");
+ Handle<Object> Error = Object::GetProperty(
+ isolate, global, "Error").ToHandleChecked();
if (Error->IsJSObject()) {
Handle<String> name = factory->InternalizeOneByteString(
STATIC_ASCII_VECTOR("stackTraceLimit"));
Handle<Smi> stack_trace_limit(
Smi::FromInt(FLAG_stack_trace_limit), isolate);
- CHECK_NOT_EMPTY_HANDLE(isolate,
- JSObject::SetLocalPropertyIgnoreAttributes(
- Handle<JSObject>::cast(Error), name,
- stack_trace_limit, NONE));
+ RETURN_ON_EXCEPTION_VALUE(
+ isolate,
+ JSObject::SetOwnPropertyIgnoreAttributes(
+ Handle<JSObject>::cast(Error), name, stack_trace_limit, NONE),
+ false);
}
-#ifdef ENABLE_DEBUGGER_SUPPORT
// Expose the debug global object in global if a name for it is specified.
if (FLAG_expose_debug_as != NULL && strlen(FLAG_expose_debug_as) != 0) {
- Debug* debug = isolate->debug();
// If loading fails we just bail out without installing the
// debugger but without tanking the whole context.
- if (!debug->Load()) return;
+ Debug* debug = isolate->debug();
+ if (!debug->Load()) return true;
+ Handle<Context> debug_context = debug->debug_context();
// Set the security token for the debugger context to the same as
// the shell native context to allow calling between these (otherwise
// exposing debug global object doesn't make much sense).
- debug->debug_context()->set_security_token(
- native_context->security_token());
-
+ debug_context->set_security_token(native_context->security_token());
Handle<String> debug_string =
factory->InternalizeUtf8String(FLAG_expose_debug_as);
- Handle<Object> global_proxy(
- debug->debug_context()->global_proxy(), isolate);
- CHECK_NOT_EMPTY_HANDLE(isolate,
- JSObject::SetLocalPropertyIgnoreAttributes(
- global, debug_string, global_proxy, DONT_ENUM));
+ Handle<Object> global_proxy(debug_context->global_proxy(), isolate);
+ RETURN_ON_EXCEPTION_VALUE(
+ isolate,
+ JSObject::SetOwnPropertyIgnoreAttributes(
+ global, debug_string, global_proxy, DONT_ENUM),
+ false);
}
-#endif
+ return true;
}
@@ -2208,12 +2239,7 @@ static uint32_t Hash(RegisteredExtension* extension) {
}
-static bool MatchRegisteredExtensions(void* key1, void* key2) {
- return key1 == key2;
-}
-
-Genesis::ExtensionStates::ExtensionStates()
- : map_(MatchRegisteredExtensions, 8) { }
+Genesis::ExtensionStates::ExtensionStates() : map_(HashMap::PointersMatch, 8) {}
Genesis::ExtensionTraversalState Genesis::ExtensionStates::get_state(
RegisteredExtension* extension) {
@@ -2231,40 +2257,46 @@ void Genesis::ExtensionStates::set_state(RegisteredExtension* extension,
reinterpret_cast<void*>(static_cast<intptr_t>(state));
}
+
bool Genesis::InstallExtensions(Handle<Context> native_context,
v8::ExtensionConfiguration* extensions) {
Isolate* isolate = native_context->GetIsolate();
ExtensionStates extension_states; // All extensions have state UNVISITED.
- // Install auto extensions.
- v8::RegisteredExtension* current = v8::RegisteredExtension::first_extension();
- while (current != NULL) {
- if (current->extension()->auto_enable())
- InstallExtension(isolate, current, &extension_states);
- current = current->next();
+ return InstallAutoExtensions(isolate, &extension_states) &&
+ (!FLAG_expose_free_buffer ||
+ InstallExtension(isolate, "v8/free-buffer", &extension_states)) &&
+ (!FLAG_expose_gc ||
+ InstallExtension(isolate, "v8/gc", &extension_states)) &&
+ (!FLAG_expose_externalize_string ||
+ InstallExtension(isolate, "v8/externalize", &extension_states)) &&
+ (!FLAG_track_gc_object_stats ||
+ InstallExtension(isolate, "v8/statistics", &extension_states)) &&
+ (!FLAG_expose_trigger_failure ||
+ InstallExtension(isolate, "v8/trigger-failure", &extension_states)) &&
+ InstallRequestedExtensions(isolate, extensions, &extension_states);
+}
+
+
+bool Genesis::InstallAutoExtensions(Isolate* isolate,
+ ExtensionStates* extension_states) {
+ for (v8::RegisteredExtension* it = v8::RegisteredExtension::first_extension();
+ it != NULL;
+ it = it->next()) {
+ if (it->extension()->auto_enable() &&
+ !InstallExtension(isolate, it, extension_states)) {
+ return false;
+ }
}
+ return true;
+}
-#ifdef ADDRESS_SANITIZER
- if (FLAG_expose_free_buffer) {
- InstallExtension(isolate, "v8/free-buffer", &extension_states);
- }
-#endif
- if (FLAG_expose_gc) InstallExtension(isolate, "v8/gc", &extension_states);
- if (FLAG_expose_externalize_string) {
- InstallExtension(isolate, "v8/externalize", &extension_states);
- }
- if (FLAG_track_gc_object_stats) {
- InstallExtension(isolate, "v8/statistics", &extension_states);
- }
- if (extensions == NULL) return true;
- // Install required extensions
- int count = v8::ImplementationUtilities::GetNameCount(extensions);
- const char** names = v8::ImplementationUtilities::GetNames(extensions);
- for (int i = 0; i < count; i++) {
- if (!InstallExtension(isolate, names[i], &extension_states))
- return false;
+bool Genesis::InstallRequestedExtensions(Isolate* isolate,
+ v8::ExtensionConfiguration* extensions,
+ ExtensionStates* extension_states) {
+ for (const char** it = extensions->begin(); it != extensions->end(); ++it) {
+ if (!InstallExtension(isolate, *it, extension_states)) return false;
}
-
return true;
}
@@ -2274,19 +2306,16 @@ bool Genesis::InstallExtensions(Handle<Context> native_context,
bool Genesis::InstallExtension(Isolate* isolate,
const char* name,
ExtensionStates* extension_states) {
- v8::RegisteredExtension* current = v8::RegisteredExtension::first_extension();
- // Loop until we find the relevant extension
- while (current != NULL) {
- if (strcmp(name, current->extension()->name()) == 0) break;
- current = current->next();
- }
- // Didn't find the extension; fail.
- if (current == NULL) {
- v8::Utils::ReportApiFailure(
- "v8::Context::New()", "Cannot find required extension");
- return false;
+ for (v8::RegisteredExtension* it = v8::RegisteredExtension::first_extension();
+ it != NULL;
+ it = it->next()) {
+ if (strcmp(name, it->extension()->name()) == 0) {
+ return InstallExtension(isolate, it, extension_states);
+ }
}
- return InstallExtension(isolate, current, extension_states);
+ return Utils::ApiCheck(false,
+ "v8::Context::New()",
+ "Cannot find required extension");
}
@@ -2298,9 +2327,9 @@ bool Genesis::InstallExtension(Isolate* isolate,
if (extension_states->get_state(current) == INSTALLED) return true;
// The current node has already been visited so there must be a
// cycle in the dependency graph; fail.
- if (extension_states->get_state(current) == VISITED) {
- v8::Utils::ReportApiFailure(
- "v8::Context::New()", "Circular extension dependency");
+ if (!Utils::ApiCheck(extension_states->get_state(current) != VISITED,
+ "v8::Context::New()",
+ "Circular extension dependency")) {
return false;
}
ASSERT(extension_states->get_state(current) == UNVISITED);
@@ -2314,8 +2343,10 @@ bool Genesis::InstallExtension(Isolate* isolate,
return false;
}
}
+ // We do not expect this to throw an exception. Change this if it does.
Handle<String> source_code =
- isolate->factory()->NewExternalStringFromAscii(extension->source());
+ isolate->factory()->NewExternalStringFromAscii(
+ extension->source()).ToHandleChecked();
bool result = CompileScriptCached(isolate,
CStrVector(extension->name()),
source_code,
@@ -2343,13 +2374,11 @@ bool Genesis::InstallJSBuiltins(Handle<JSBuiltinsObject> builtins) {
HandleScope scope(isolate());
for (int i = 0; i < Builtins::NumberOfJavaScriptBuiltins(); i++) {
Builtins::JavaScript id = static_cast<Builtins::JavaScript>(i);
- Handle<String> name =
- factory()->InternalizeUtf8String(Builtins::GetName(id));
- Object* function_object = builtins->GetPropertyNoExceptionThrown(*name);
- Handle<JSFunction> function
- = Handle<JSFunction>(JSFunction::cast(function_object));
+ Handle<Object> function_object = Object::GetProperty(
+ isolate(), builtins, Builtins::GetName(id)).ToHandleChecked();
+ Handle<JSFunction> function = Handle<JSFunction>::cast(function_object);
builtins->set_javascript_builtin(id, *function);
- if (!JSFunction::CompileLazy(function, CLEAR_EXCEPTION)) {
+ if (!Compiler::EnsureCompiled(function, CLEAR_EXCEPTION)) {
return false;
}
builtins->set_javascript_builtin_code(id, function->shared()->code());
@@ -2396,10 +2425,10 @@ bool Genesis::ConfigureApiObject(Handle<JSObject> object,
ASSERT(FunctionTemplateInfo::cast(object_template->constructor())
->IsTemplateFor(object->map()));;
- bool pending_exception = false;
- Handle<JSObject> obj =
- Execution::InstantiateObject(object_template, &pending_exception);
- if (pending_exception) {
+ MaybeHandle<JSObject> maybe_obj =
+ Execution::InstantiateObject(object_template);
+ Handle<JSObject> obj;
+ if (!maybe_obj.ToHandle(&obj)) {
ASSERT(isolate()->has_pending_exception());
isolate()->clear_pending_exception();
return false;
@@ -2420,33 +2449,31 @@ void Genesis::TransferNamedProperties(Handle<JSObject> from,
case FIELD: {
HandleScope inner(isolate());
Handle<Name> key = Handle<Name>(descs->GetKey(i));
- int index = descs->GetFieldIndex(i);
+ FieldIndex index = FieldIndex::ForDescriptor(from->map(), i);
ASSERT(!descs->GetDetails(i).representation().IsDouble());
Handle<Object> value = Handle<Object>(from->RawFastPropertyAt(index),
isolate());
- CHECK_NOT_EMPTY_HANDLE(isolate(),
- JSObject::SetLocalPropertyIgnoreAttributes(
- to, key, value, details.attributes()));
+ JSObject::SetOwnPropertyIgnoreAttributes(
+ to, key, value, details.attributes()).Check();
break;
}
case CONSTANT: {
HandleScope inner(isolate());
Handle<Name> key = Handle<Name>(descs->GetKey(i));
Handle<Object> constant(descs->GetConstant(i), isolate());
- CHECK_NOT_EMPTY_HANDLE(isolate(),
- JSObject::SetLocalPropertyIgnoreAttributes(
- to, key, constant, details.attributes()));
+ JSObject::SetOwnPropertyIgnoreAttributes(
+ to, key, constant, details.attributes()).Check();
break;
}
case CALLBACKS: {
LookupResult result(isolate());
- to->LocalLookup(descs->GetKey(i), &result);
+ Handle<Name> key(Name::cast(descs->GetKey(i)), isolate());
+ to->LookupOwn(key, &result);
// If the property is already there we skip it
if (result.IsFound()) continue;
HandleScope inner(isolate());
ASSERT(!to->HasFastProperties());
// Add to dictionary.
- Handle<Name> key = Handle<Name>(descs->GetKey(i));
Handle<Object> callbacks(descs->GetCallbacksObject(i), isolate());
PropertyDetails d = PropertyDetails(
details.attributes(), CALLBACKS, i + 1);
@@ -2457,7 +2484,6 @@ void Genesis::TransferNamedProperties(Handle<JSObject> from,
// Do not occur since the from object has fast properties.
case HANDLER:
case INTERCEPTOR:
- case TRANSITION:
case NONEXISTENT:
// No element in instance descriptors have proxy or interceptor type.
UNREACHABLE();
@@ -2474,10 +2500,10 @@ void Genesis::TransferNamedProperties(Handle<JSObject> from,
ASSERT(raw_key->IsName());
// If the property is already there we skip it.
LookupResult result(isolate());
- to->LocalLookup(Name::cast(raw_key), &result);
+ Handle<Name> key(Name::cast(raw_key));
+ to->LookupOwn(key, &result);
if (result.IsFound()) continue;
// Set the property.
- Handle<Name> key = Handle<Name>(Name::cast(raw_key));
Handle<Object> value = Handle<Object>(properties->ValueAt(i),
isolate());
ASSERT(!value->IsCell());
@@ -2486,9 +2512,8 @@ void Genesis::TransferNamedProperties(Handle<JSObject> from,
isolate());
}
PropertyDetails details = properties->DetailsAt(i);
- CHECK_NOT_EMPTY_HANDLE(isolate(),
- JSObject::SetLocalPropertyIgnoreAttributes(
- to, key, value, details.attributes()));
+ JSObject::SetOwnPropertyIgnoreAttributes(
+ to, key, value, details.attributes()).Check();
}
}
}
@@ -2516,7 +2541,7 @@ void Genesis::TransferObject(Handle<JSObject> from, Handle<JSObject> to) {
// Transfer the prototype (new map is needed).
Handle<Map> old_to_map = Handle<Map>(to->map());
- Handle<Map> new_to_map = factory()->CopyMap(old_to_map);
+ Handle<Map> new_to_map = Map::Copy(old_to_map);
new_to_map->set_prototype(from->map()->prototype());
to->set_map(*new_to_map);
}
@@ -2526,22 +2551,44 @@ void Genesis::MakeFunctionInstancePrototypeWritable() {
// The maps with writable prototype are created in CreateEmptyFunction
// and CreateStrictModeFunctionMaps respectively. Initially the maps are
// created with read-only prototype for JS builtins processing.
- ASSERT(!function_map_writable_prototype_.is_null());
- ASSERT(!strict_mode_function_map_writable_prototype_.is_null());
+ ASSERT(!sloppy_function_map_writable_prototype_.is_null());
+ ASSERT(!strict_function_map_writable_prototype_.is_null());
// Replace function instance maps to make prototype writable.
- native_context()->set_function_map(*function_map_writable_prototype_);
- native_context()->set_strict_mode_function_map(
- *strict_mode_function_map_writable_prototype_);
+ native_context()->set_sloppy_function_map(
+ *sloppy_function_map_writable_prototype_);
+ native_context()->set_strict_function_map(
+ *strict_function_map_writable_prototype_);
}
+class NoTrackDoubleFieldsForSerializerScope {
+ public:
+ explicit NoTrackDoubleFieldsForSerializerScope(Isolate* isolate)
+ : flag_(FLAG_track_double_fields) {
+ if (isolate->serializer_enabled()) {
+ // Disable tracking double fields because heap numbers treated as
+ // immutable by the serializer.
+ FLAG_track_double_fields = false;
+ }
+ }
+
+ ~NoTrackDoubleFieldsForSerializerScope() {
+ FLAG_track_double_fields = flag_;
+ }
+
+ private:
+ bool flag_;
+};
+
+
Genesis::Genesis(Isolate* isolate,
Handle<Object> global_object,
v8::Handle<v8::ObjectTemplate> global_template,
v8::ExtensionConfiguration* extensions)
: isolate_(isolate),
active_(isolate->bootstrapper()) {
+ NoTrackDoubleFieldsForSerializerScope disable_scope(isolate);
result_ = Handle<Context>::null();
// If V8 cannot be initialized, just return.
if (!V8::Initialize(NULL)) return;
@@ -2576,6 +2623,8 @@ Genesis::Genesis(Isolate* isolate,
HookUpGlobalProxy(inner_global, global_proxy);
HookUpInnerGlobal(inner_global);
+ native_context()->builtins()->set_global_receiver(
+ native_context()->global_proxy());
if (!ConfigureGlobalObjects(global_template)) return;
} else {
@@ -2605,7 +2654,7 @@ Genesis::Genesis(Isolate* isolate,
// We can't (de-)serialize typed arrays currently, but we are lucky: The state
// of the random number generator needs no initialization during snapshot
// creation time and we don't need trigonometric functions then.
- if (!Serializer::enabled()) {
+ if (!isolate->serializer_enabled()) {
// Initially seed the per-context random number generator using the
// per-isolate random number generator.
const int num_elems = 2;
@@ -2621,11 +2670,11 @@ Genesis::Genesis(Isolate* isolate,
Utils::OpenHandle(*buffer)->set_should_be_freed(true);
v8::Local<v8::Uint32Array> ta = v8::Uint32Array::New(buffer, 0, num_elems);
Handle<JSBuiltinsObject> builtins(native_context()->builtins());
- ForceSetProperty(builtins,
- factory()->InternalizeOneByteString(
- STATIC_ASCII_VECTOR("rngstate")),
- Utils::OpenHandle(*ta),
- NONE);
+ Runtime::ForceSetObjectProperty(builtins,
+ factory()->InternalizeOneByteString(
+ STATIC_ASCII_VECTOR("rngstate")),
+ Utils::OpenHandle(*ta),
+ NONE).Assert();
// Initialize trigonometric lookup tables and constants.
const int table_num_bytes = TrigonometricLookupTable::table_num_bytes();
@@ -2640,28 +2689,31 @@ Genesis::Genesis(Isolate* isolate,
v8::Local<v8::Float64Array> cos_table = v8::Float64Array::New(
cos_buffer, 0, TrigonometricLookupTable::table_size());
- ForceSetProperty(builtins,
- factory()->InternalizeOneByteString(
- STATIC_ASCII_VECTOR("kSinTable")),
- Utils::OpenHandle(*sin_table),
- NONE);
- ForceSetProperty(builtins,
- factory()->InternalizeOneByteString(
- STATIC_ASCII_VECTOR("kCosXIntervalTable")),
- Utils::OpenHandle(*cos_table),
- NONE);
- ForceSetProperty(builtins,
- factory()->InternalizeOneByteString(
- STATIC_ASCII_VECTOR("kSamples")),
- factory()->NewHeapNumber(
- TrigonometricLookupTable::samples()),
- NONE);
- ForceSetProperty(builtins,
- factory()->InternalizeOneByteString(
- STATIC_ASCII_VECTOR("kIndexConvert")),
- factory()->NewHeapNumber(
- TrigonometricLookupTable::samples_over_pi_half()),
- NONE);
+ Runtime::ForceSetObjectProperty(builtins,
+ factory()->InternalizeOneByteString(
+ STATIC_ASCII_VECTOR("kSinTable")),
+ Utils::OpenHandle(*sin_table),
+ NONE).Assert();
+ Runtime::ForceSetObjectProperty(
+ builtins,
+ factory()->InternalizeOneByteString(
+ STATIC_ASCII_VECTOR("kCosXIntervalTable")),
+ Utils::OpenHandle(*cos_table),
+ NONE).Assert();
+ Runtime::ForceSetObjectProperty(
+ builtins,
+ factory()->InternalizeOneByteString(
+ STATIC_ASCII_VECTOR("kSamples")),
+ factory()->NewHeapNumber(
+ TrigonometricLookupTable::samples()),
+ NONE).Assert();
+ Runtime::ForceSetObjectProperty(
+ builtins,
+ factory()->InternalizeOneByteString(
+ STATIC_ASCII_VECTOR("kIndexConvert")),
+ factory()->NewHeapNumber(
+ TrigonometricLookupTable::samples_over_pi_half()),
+ NONE).Assert();
}
result_ = native_context();
@@ -2676,7 +2728,7 @@ int Bootstrapper::ArchiveSpacePerThread() {
}
-// Archive statics that are thread local.
+// Archive statics that are thread-local.
char* Bootstrapper::ArchiveState(char* to) {
*reinterpret_cast<NestingCounterType*>(to) = nesting_;
nesting_ = 0;
@@ -2684,7 +2736,7 @@ char* Bootstrapper::ArchiveState(char* to) {
}
-// Restore statics that are thread local.
+// Restore statics that are thread-local.
char* Bootstrapper::RestoreState(char* from) {
nesting_ = *reinterpret_cast<NestingCounterType*>(from);
return from + sizeof(NestingCounterType);
diff --git a/chromium/v8/src/bootstrapper.h b/chromium/v8/src/bootstrapper.h
index 4f63c87163f..598838278e4 100644
--- a/chromium/v8/src/bootstrapper.h
+++ b/chromium/v8/src/bootstrapper.h
@@ -1,46 +1,21 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
#ifndef V8_BOOTSTRAPPER_H_
#define V8_BOOTSTRAPPER_H_
-#include "allocation.h"
+#include "src/factory.h"
namespace v8 {
namespace internal {
-
// A SourceCodeCache uses a FixedArray to store pairs of
// (AsciiString*, JSFunction*), mapping names of native code files
// (runtime.js, etc.) to precompiled functions. Instead of mapping
// names to functions it might make sense to let the JS2C tool
// generate an index for each native JS file.
-class SourceCodeCache BASE_EMBEDDED {
+class SourceCodeCache V8_FINAL BASE_EMBEDDED {
public:
explicit SourceCodeCache(Script::Type type): type_(type), cache_(NULL) { }
@@ -72,7 +47,9 @@ class SourceCodeCache BASE_EMBEDDED {
Handle<FixedArray> new_array = factory->NewFixedArray(length + 2, TENURED);
cache_->CopyTo(0, *new_array, 0, cache_->length());
cache_ = *new_array;
- Handle<String> str = factory->NewStringFromAscii(name, TENURED);
+ Handle<String> str =
+ factory->NewStringFromAscii(name, TENURED).ToHandleChecked();
+ ASSERT(!str.is_null());
cache_->set(length, *str);
cache_->set(length + 1, *shared);
Script::cast(shared->script())->set_type(Smi::FromInt(type_));
@@ -87,9 +64,10 @@ class SourceCodeCache BASE_EMBEDDED {
// The Boostrapper is the public interface for creating a JavaScript global
// context.
-class Bootstrapper {
+class Bootstrapper V8_FINAL {
public:
static void InitializeOncePerProcess();
+ static void TearDownExtensions();
// Requires: Heap::SetUp has been called.
void Initialize(bool create_heap_objects);
@@ -146,11 +124,17 @@ class Bootstrapper {
explicit Bootstrapper(Isolate* isolate);
+ static v8::Extension* free_buffer_extension_;
+ static v8::Extension* gc_extension_;
+ static v8::Extension* externalize_string_extension_;
+ static v8::Extension* statistics_extension_;
+ static v8::Extension* trigger_failure_extension_;
+
DISALLOW_COPY_AND_ASSIGN(Bootstrapper);
};
-class BootstrapperActive BASE_EMBEDDED {
+class BootstrapperActive V8_FINAL BASE_EMBEDDED {
public:
explicit BootstrapperActive(Bootstrapper* bootstrapper)
: bootstrapper_(bootstrapper) {
@@ -168,20 +152,15 @@ class BootstrapperActive BASE_EMBEDDED {
};
-class NativesExternalStringResource
+class NativesExternalStringResource V8_FINAL
: public v8::String::ExternalAsciiStringResource {
public:
NativesExternalStringResource(Bootstrapper* bootstrapper,
const char* source,
size_t length);
+ virtual const char* data() const V8_OVERRIDE { return data_; }
+ virtual size_t length() const V8_OVERRIDE { return length_; }
- const char* data() const {
- return data_;
- }
-
- size_t length() const {
- return length_;
- }
private:
const char* data_;
size_t length_;
diff --git a/chromium/v8/src/builtins.cc b/chromium/v8/src/builtins.cc
index f9c2708ba12..503d9a6d962 100644
--- a/chromium/v8/src/builtins.cc
+++ b/chromium/v8/src/builtins.cc
@@ -1,43 +1,21 @@
// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#include "api.h"
-#include "arguments.h"
-#include "bootstrapper.h"
-#include "builtins.h"
-#include "cpu-profiler.h"
-#include "gdb-jit.h"
-#include "ic-inl.h"
-#include "heap-profiler.h"
-#include "mark-compact.h"
-#include "stub-cache.h"
-#include "vm-state-inl.h"
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#include "src/api.h"
+#include "src/arguments.h"
+#include "src/base/once.h"
+#include "src/bootstrapper.h"
+#include "src/builtins.h"
+#include "src/cpu-profiler.h"
+#include "src/gdb-jit.h"
+#include "src/ic-inl.h"
+#include "src/heap-profiler.h"
+#include "src/mark-compact.h"
+#include "src/stub-cache.h"
+#include "src/vm-state-inl.h"
namespace v8 {
namespace internal {
@@ -127,34 +105,34 @@ BUILTIN_LIST_C(DEF_ARG_TYPE)
#ifdef DEBUG
#define BUILTIN(name) \
- MUST_USE_RESULT static MaybeObject* Builtin_Impl_##name( \
+ MUST_USE_RESULT static Object* Builtin_Impl_##name( \
name##ArgumentsType args, Isolate* isolate); \
- MUST_USE_RESULT static MaybeObject* Builtin_##name( \
+ MUST_USE_RESULT static Object* Builtin_##name( \
int args_length, Object** args_object, Isolate* isolate) { \
name##ArgumentsType args(args_length, args_object); \
args.Verify(); \
return Builtin_Impl_##name(args, isolate); \
} \
- MUST_USE_RESULT static MaybeObject* Builtin_Impl_##name( \
+ MUST_USE_RESULT static Object* Builtin_Impl_##name( \
name##ArgumentsType args, Isolate* isolate)
#else // For release mode.
#define BUILTIN(name) \
- static MaybeObject* Builtin_impl##name( \
+ static Object* Builtin_impl##name( \
name##ArgumentsType args, Isolate* isolate); \
- static MaybeObject* Builtin_##name( \
+ static Object* Builtin_##name( \
int args_length, Object** args_object, Isolate* isolate) { \
name##ArgumentsType args(args_length, args_object); \
return Builtin_impl##name(args, isolate); \
} \
- static MaybeObject* Builtin_impl##name( \
+ static Object* Builtin_impl##name( \
name##ArgumentsType args, Isolate* isolate)
#endif
-static inline bool CalledAsConstructor(Isolate* isolate) {
#ifdef DEBUG
+static inline bool CalledAsConstructor(Isolate* isolate) {
// Calculate the result using a full stack frame iterator and check
// that the state of the stack is as we assume it to be in the
// code below.
@@ -163,7 +141,6 @@ static inline bool CalledAsConstructor(Isolate* isolate) {
it.Advance();
StackFrame* frame = it.frame();
bool reference_result = frame->is_construct();
-#endif
Address fp = Isolate::c_entry_fp(isolate->thread_local_top());
// Because we know fp points to an exit frame we can use the relevant
// part of ExitFrame::ComputeCallerState directly.
@@ -180,6 +157,7 @@ static inline bool CalledAsConstructor(Isolate* isolate) {
ASSERT_EQ(result, reference_result);
return result;
}
+#endif
// ----------------------------------------------------------------------------
@@ -195,34 +173,19 @@ BUILTIN(EmptyFunction) {
}
-static void MoveDoubleElements(FixedDoubleArray* dst,
- int dst_index,
- FixedDoubleArray* src,
- int src_index,
- int len) {
+static void MoveDoubleElements(FixedDoubleArray* dst, int dst_index,
+ FixedDoubleArray* src, int src_index, int len) {
if (len == 0) return;
- OS::MemMove(dst->data_start() + dst_index,
- src->data_start() + src_index,
- len * kDoubleSize);
-}
-
-
-static void FillWithHoles(Heap* heap, FixedArray* dst, int from, int to) {
- ASSERT(dst->map() != heap->fixed_cow_array_map());
- MemsetPointer(dst->data_start() + from, heap->the_hole_value(), to - from);
-}
-
-
-static void FillWithHoles(FixedDoubleArray* dst, int from, int to) {
- for (int i = from; i < to; i++) {
- dst->set_the_hole(i);
- }
+ MemMove(dst->data_start() + dst_index, src->data_start() + src_index,
+ len * kDoubleSize);
}
static FixedArrayBase* LeftTrimFixedArray(Heap* heap,
FixedArrayBase* elms,
int to_trim) {
+ ASSERT(heap->CanMoveObjectStart(elms));
+
Map* map = elms->map();
int entry_size;
if (elms->IsFixedArray()) {
@@ -259,6 +222,8 @@ static FixedArrayBase* LeftTrimFixedArray(Heap* heap,
// Technically in new space this write might be omitted (except for
// debug mode which iterates through the heap), but to play safer
// we still do it.
+ // Since left trimming is only performed on pages which are not concurrently
+ // swept creating a filler object does not require synchronization.
heap->CreateFillerObjectAt(elms->address(), to_trim * entry_size);
int new_start_index = to_trim * (entry_size / kPointerSize);
@@ -268,13 +233,12 @@ static FixedArrayBase* LeftTrimFixedArray(Heap* heap,
// Maintain marking consistency for HeapObjectIterator and
// IncrementalMarking.
int size_delta = to_trim * entry_size;
- if (heap->marking()->TransferMark(elms->address(),
- elms->address() + size_delta)) {
- MemoryChunk::IncrementLiveBytesFromMutator(elms->address(), -size_delta);
- }
+ Address new_start = elms->address() + size_delta;
+ heap->marking()->TransferMark(elms->address(), new_start);
+ heap->AdjustLiveBytes(new_start, -size_delta, Heap::FROM_MUTATOR);
- FixedArrayBase* new_elms = FixedArrayBase::cast(HeapObject::FromAddress(
- elms->address() + size_delta));
+ FixedArrayBase* new_elms =
+ FixedArrayBase::cast(HeapObject::FromAddress(new_start));
HeapProfiler* profiler = heap->isolate()->heap_profiler();
if (profiler->is_tracking_object_moves()) {
profiler->ObjectMoveEvent(elms->address(),
@@ -288,6 +252,7 @@ static FixedArrayBase* LeftTrimFixedArray(Heap* heap,
static bool ArrayPrototypeHasNoElements(Heap* heap,
Context* native_context,
JSObject* array_proto) {
+ DisallowHeapAllocation no_gc;
// This method depends on non writability of Object and Array prototype
// fields.
if (array_proto->elements() != heap->empty_fixed_array()) return false;
@@ -301,54 +266,63 @@ static bool ArrayPrototypeHasNoElements(Heap* heap,
}
+// Returns empty handle if not applicable.
MUST_USE_RESULT
-static inline MaybeObject* EnsureJSArrayWithWritableFastElements(
- Heap* heap, Object* receiver, Arguments* args, int first_added_arg) {
- if (!receiver->IsJSArray()) return NULL;
- JSArray* array = JSArray::cast(receiver);
- if (array->map()->is_observed()) return NULL;
- if (!array->map()->is_extensible()) return NULL;
- HeapObject* elms = array->elements();
+static inline MaybeHandle<FixedArrayBase> EnsureJSArrayWithWritableFastElements(
+ Isolate* isolate,
+ Handle<Object> receiver,
+ Arguments* args,
+ int first_added_arg) {
+ if (!receiver->IsJSArray()) return MaybeHandle<FixedArrayBase>();
+ Handle<JSArray> array = Handle<JSArray>::cast(receiver);
+ // If there may be elements accessors in the prototype chain, the fast path
+ // cannot be used if there arguments to add to the array.
+ if (args != NULL && array->map()->DictionaryElementsInPrototypeChainOnly()) {
+ return MaybeHandle<FixedArrayBase>();
+ }
+ if (array->map()->is_observed()) return MaybeHandle<FixedArrayBase>();
+ if (!array->map()->is_extensible()) return MaybeHandle<FixedArrayBase>();
+ Handle<FixedArrayBase> elms(array->elements(), isolate);
+ Heap* heap = isolate->heap();
Map* map = elms->map();
if (map == heap->fixed_array_map()) {
if (args == NULL || array->HasFastObjectElements()) return elms;
} else if (map == heap->fixed_cow_array_map()) {
- MaybeObject* maybe_writable_result = array->EnsureWritableFastElements();
- if (args == NULL || array->HasFastObjectElements() ||
- !maybe_writable_result->To(&elms)) {
- return maybe_writable_result;
- }
+ elms = JSObject::EnsureWritableFastElements(array);
+ if (args == NULL || array->HasFastObjectElements()) return elms;
} else if (map == heap->fixed_double_array_map()) {
if (args == NULL) return elms;
} else {
- return NULL;
+ return MaybeHandle<FixedArrayBase>();
}
// Need to ensure that the arguments passed in args can be contained in
// the array.
int args_length = args->length();
- if (first_added_arg >= args_length) return array->elements();
+ if (first_added_arg >= args_length) return handle(array->elements(), isolate);
ElementsKind origin_kind = array->map()->elements_kind();
ASSERT(!IsFastObjectElementsKind(origin_kind));
ElementsKind target_kind = origin_kind;
- int arg_count = args->length() - first_added_arg;
- Object** arguments = args->arguments() - first_added_arg - (arg_count - 1);
- for (int i = 0; i < arg_count; i++) {
- Object* arg = arguments[i];
- if (arg->IsHeapObject()) {
- if (arg->IsHeapNumber()) {
- target_kind = FAST_DOUBLE_ELEMENTS;
- } else {
- target_kind = FAST_ELEMENTS;
- break;
+ {
+ DisallowHeapAllocation no_gc;
+ int arg_count = args->length() - first_added_arg;
+ Object** arguments = args->arguments() - first_added_arg - (arg_count - 1);
+ for (int i = 0; i < arg_count; i++) {
+ Object* arg = arguments[i];
+ if (arg->IsHeapObject()) {
+ if (arg->IsHeapNumber()) {
+ target_kind = FAST_DOUBLE_ELEMENTS;
+ } else {
+ target_kind = FAST_ELEMENTS;
+ break;
+ }
}
}
}
if (target_kind != origin_kind) {
- MaybeObject* maybe_failure = array->TransitionElementsKind(target_kind);
- if (maybe_failure->IsFailure()) return maybe_failure;
- return array->elements();
+ JSObject::TransitionElementsKind(array, target_kind);
+ return handle(array->elements(), isolate);
}
return elms;
}
@@ -357,6 +331,7 @@ static inline MaybeObject* EnsureJSArrayWithWritableFastElements(
static inline bool IsJSArrayFastElementMovingAllowed(Heap* heap,
JSArray* receiver) {
if (!FLAG_clever_optimizations) return false;
+ DisallowHeapAllocation no_gc;
Context* native_context = heap->isolate()->context()->native_context();
JSObject* array_proto =
JSObject::cast(native_context->array_function()->prototype());
@@ -365,54 +340,56 @@ static inline bool IsJSArrayFastElementMovingAllowed(Heap* heap,
}
-MUST_USE_RESULT static MaybeObject* CallJsBuiltin(
+MUST_USE_RESULT static Object* CallJsBuiltin(
Isolate* isolate,
const char* name,
BuiltinArguments<NO_EXTRA_ARGUMENTS> args) {
HandleScope handleScope(isolate);
- Handle<Object> js_builtin =
- GetProperty(Handle<JSObject>(isolate->native_context()->builtins()),
- name);
+ Handle<Object> js_builtin = Object::GetProperty(
+ isolate,
+ handle(isolate->native_context()->builtins(), isolate),
+ name).ToHandleChecked();
Handle<JSFunction> function = Handle<JSFunction>::cast(js_builtin);
int argc = args.length() - 1;
ScopedVector<Handle<Object> > argv(argc);
for (int i = 0; i < argc; ++i) {
argv[i] = args.at<Object>(i + 1);
}
- bool pending_exception;
- Handle<Object> result = Execution::Call(isolate,
- function,
- args.receiver(),
- argc,
- argv.start(),
- &pending_exception);
- if (pending_exception) return Failure::Exception();
+ Handle<Object> result;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, result,
+ Execution::Call(isolate,
+ function,
+ args.receiver(),
+ argc,
+ argv.start()));
return *result;
}
BUILTIN(ArrayPush) {
- Heap* heap = isolate->heap();
- Object* receiver = *args.receiver();
- FixedArrayBase* elms_obj;
- MaybeObject* maybe_elms_obj =
- EnsureJSArrayWithWritableFastElements(heap, receiver, &args, 1);
- if (maybe_elms_obj == NULL) {
+ HandleScope scope(isolate);
+ Handle<Object> receiver = args.receiver();
+ MaybeHandle<FixedArrayBase> maybe_elms_obj =
+ EnsureJSArrayWithWritableFastElements(isolate, receiver, &args, 1);
+ Handle<FixedArrayBase> elms_obj;
+ if (!maybe_elms_obj.ToHandle(&elms_obj)) {
return CallJsBuiltin(isolate, "ArrayPush", args);
}
- if (!maybe_elms_obj->To(&elms_obj)) return maybe_elms_obj;
- JSArray* array = JSArray::cast(receiver);
+ Handle<JSArray> array = Handle<JSArray>::cast(receiver);
+ int len = Smi::cast(array->length())->value();
+ int to_add = args.length() - 1;
+ if (to_add > 0 && JSArray::WouldChangeReadOnlyLength(array, len + to_add)) {
+ return CallJsBuiltin(isolate, "ArrayPush", args);
+ }
ASSERT(!array->map()->is_observed());
ElementsKind kind = array->GetElementsKind();
if (IsFastSmiOrObjectElementsKind(kind)) {
- FixedArray* elms = FixedArray::cast(elms_obj);
-
- int len = Smi::cast(array->length())->value();
- int to_add = args.length() - 1;
+ Handle<FixedArray> elms = Handle<FixedArray>::cast(elms_obj);
if (to_add == 0) {
return Smi::FromInt(len);
}
@@ -425,16 +402,13 @@ BUILTIN(ArrayPush) {
if (new_length > elms->length()) {
// New backing storage is needed.
int capacity = new_length + (new_length >> 1) + 16;
- FixedArray* new_elms;
- MaybeObject* maybe_obj = heap->AllocateUninitializedFixedArray(capacity);
- if (!maybe_obj->To(&new_elms)) return maybe_obj;
+ Handle<FixedArray> new_elms =
+ isolate->factory()->NewUninitializedFixedArray(capacity);
ElementsAccessor* accessor = array->GetElementsAccessor();
- MaybeObject* maybe_failure = accessor->CopyElements(
- NULL, 0, kind, new_elms, 0,
- ElementsAccessor::kCopyToEndAndInitializeToHole, elms_obj);
- ASSERT(!maybe_failure->IsFailure());
- USE(maybe_failure);
+ accessor->CopyElements(
+ elms_obj, 0, kind, new_elms, 0,
+ ElementsAccessor::kCopyToEndAndInitializeToHole);
elms = new_elms;
}
@@ -446,18 +420,15 @@ BUILTIN(ArrayPush) {
elms->set(index + len, args[index + 1], mode);
}
- if (elms != array->elements()) {
- array->set_elements(elms);
+ if (*elms != array->elements()) {
+ array->set_elements(*elms);
}
// Set the length.
array->set_length(Smi::FromInt(new_length));
return Smi::FromInt(new_length);
} else {
- int len = Smi::cast(array->length())->value();
int elms_len = elms_obj->length();
-
- int to_add = args.length() - 1;
if (to_add == 0) {
return Smi::FromInt(len);
}
@@ -467,25 +438,25 @@ BUILTIN(ArrayPush) {
int new_length = len + to_add;
- FixedDoubleArray* new_elms;
+ Handle<FixedDoubleArray> new_elms;
if (new_length > elms_len) {
// New backing storage is needed.
int capacity = new_length + (new_length >> 1) + 16;
- MaybeObject* maybe_obj =
- heap->AllocateUninitializedFixedDoubleArray(capacity);
- if (!maybe_obj->To(&new_elms)) return maybe_obj;
+ // Create new backing store; since capacity > 0, we can
+ // safely cast to FixedDoubleArray.
+ new_elms = Handle<FixedDoubleArray>::cast(
+ isolate->factory()->NewFixedDoubleArray(capacity));
ElementsAccessor* accessor = array->GetElementsAccessor();
- MaybeObject* maybe_failure = accessor->CopyElements(
- NULL, 0, kind, new_elms, 0,
- ElementsAccessor::kCopyToEndAndInitializeToHole, elms_obj);
- ASSERT(!maybe_failure->IsFailure());
- USE(maybe_failure);
+ accessor->CopyElements(
+ elms_obj, 0, kind, new_elms, 0,
+ ElementsAccessor::kCopyToEndAndInitializeToHole);
+
} else {
// to_add is > 0 and new_length <= elms_len, so elms_obj cannot be the
// empty_fixed_array.
- new_elms = FixedDoubleArray::cast(elms_obj);
+ new_elms = Handle<FixedDoubleArray>::cast(elms_obj);
}
// Add the provided values.
@@ -496,8 +467,8 @@ BUILTIN(ArrayPush) {
new_elms->set(index + len, arg->Number());
}
- if (new_elms != array->elements()) {
- array->set_elements(new_elms);
+ if (*new_elms != array->elements()) {
+ array->set_elements(*new_elms);
}
// Set the length.
@@ -508,50 +479,48 @@ BUILTIN(ArrayPush) {
BUILTIN(ArrayPop) {
- Heap* heap = isolate->heap();
- Object* receiver = *args.receiver();
- FixedArrayBase* elms_obj;
- MaybeObject* maybe_elms =
- EnsureJSArrayWithWritableFastElements(heap, receiver, NULL, 0);
- if (maybe_elms == NULL) return CallJsBuiltin(isolate, "ArrayPop", args);
- if (!maybe_elms->To(&elms_obj)) return maybe_elms;
-
- JSArray* array = JSArray::cast(receiver);
+ HandleScope scope(isolate);
+ Handle<Object> receiver = args.receiver();
+ MaybeHandle<FixedArrayBase> maybe_elms_obj =
+ EnsureJSArrayWithWritableFastElements(isolate, receiver, NULL, 0);
+ Handle<FixedArrayBase> elms_obj;
+ if (!maybe_elms_obj.ToHandle(&elms_obj)) {
+ return CallJsBuiltin(isolate, "ArrayPop", args);
+ }
+
+ Handle<JSArray> array = Handle<JSArray>::cast(receiver);
ASSERT(!array->map()->is_observed());
int len = Smi::cast(array->length())->value();
- if (len == 0) return heap->undefined_value();
+ if (len == 0) return isolate->heap()->undefined_value();
ElementsAccessor* accessor = array->GetElementsAccessor();
int new_length = len - 1;
- MaybeObject* maybe_result;
- if (accessor->HasElement(array, array, new_length, elms_obj)) {
- maybe_result = accessor->Get(array, array, new_length, elms_obj);
- } else {
- maybe_result = array->GetPrototype()->GetElement(isolate, len - 1);
+ Handle<Object> element =
+ accessor->Get(array, array, new_length, elms_obj).ToHandleChecked();
+ if (element->IsTheHole()) {
+ return CallJsBuiltin(isolate, "ArrayPop", args);
}
- if (maybe_result->IsFailure()) return maybe_result;
- MaybeObject* maybe_failure =
- accessor->SetLength(array, Smi::FromInt(new_length));
- if (maybe_failure->IsFailure()) return maybe_failure;
- return maybe_result;
+ RETURN_FAILURE_ON_EXCEPTION(
+ isolate,
+ accessor->SetLength(array, handle(Smi::FromInt(new_length), isolate)));
+ return *element;
}
BUILTIN(ArrayShift) {
+ HandleScope scope(isolate);
Heap* heap = isolate->heap();
- Object* receiver = *args.receiver();
- FixedArrayBase* elms_obj;
- MaybeObject* maybe_elms_obj =
- EnsureJSArrayWithWritableFastElements(heap, receiver, NULL, 0);
- if (maybe_elms_obj == NULL)
- return CallJsBuiltin(isolate, "ArrayShift", args);
- if (!maybe_elms_obj->To(&elms_obj)) return maybe_elms_obj;
-
- if (!IsJSArrayFastElementMovingAllowed(heap, JSArray::cast(receiver))) {
+ Handle<Object> receiver = args.receiver();
+ MaybeHandle<FixedArrayBase> maybe_elms_obj =
+ EnsureJSArrayWithWritableFastElements(isolate, receiver, NULL, 0);
+ Handle<FixedArrayBase> elms_obj;
+ if (!maybe_elms_obj.ToHandle(&elms_obj) ||
+ !IsJSArrayFastElementMovingAllowed(heap,
+ *Handle<JSArray>::cast(receiver))) {
return CallJsBuiltin(isolate, "ArrayShift", args);
}
- JSArray* array = JSArray::cast(receiver);
+ Handle<JSArray> array = Handle<JSArray>::cast(receiver);
ASSERT(!array->map()->is_observed());
int len = Smi::cast(array->length())->value();
@@ -559,25 +528,24 @@ BUILTIN(ArrayShift) {
// Get first element
ElementsAccessor* accessor = array->GetElementsAccessor();
- Object* first;
- MaybeObject* maybe_first = accessor->Get(receiver, array, 0, elms_obj);
- if (!maybe_first->To(&first)) return maybe_first;
+ Handle<Object> first =
+ accessor->Get(array, array, 0, elms_obj).ToHandleChecked();
if (first->IsTheHole()) {
- first = heap->undefined_value();
+ return CallJsBuiltin(isolate, "ArrayShift", args);
}
- if (!heap->lo_space()->Contains(elms_obj)) {
- array->set_elements(LeftTrimFixedArray(heap, elms_obj, 1));
+ if (heap->CanMoveObjectStart(*elms_obj)) {
+ array->set_elements(LeftTrimFixedArray(heap, *elms_obj, 1));
} else {
// Shift the elements.
if (elms_obj->IsFixedArray()) {
- FixedArray* elms = FixedArray::cast(elms_obj);
+ Handle<FixedArray> elms = Handle<FixedArray>::cast(elms_obj);
DisallowHeapAllocation no_gc;
- heap->MoveElements(elms, 0, 1, len - 1);
+ heap->MoveElements(*elms, 0, 1, len - 1);
elms->set(len - 1, heap->the_hole_value());
} else {
- FixedDoubleArray* elms = FixedDoubleArray::cast(elms_obj);
- MoveDoubleElements(elms, 0, elms, 1, len - 1);
+ Handle<FixedDoubleArray> elms = Handle<FixedDoubleArray>::cast(elms_obj);
+ MoveDoubleElements(*elms, 0, *elms, 1, len - 1);
elms->set_the_hole(len - 1);
}
}
@@ -585,30 +553,27 @@ BUILTIN(ArrayShift) {
// Set the length.
array->set_length(Smi::FromInt(len - 1));
- return first;
+ return *first;
}
BUILTIN(ArrayUnshift) {
+ HandleScope scope(isolate);
Heap* heap = isolate->heap();
- Object* receiver = *args.receiver();
- FixedArrayBase* elms_obj;
- MaybeObject* maybe_elms_obj =
- EnsureJSArrayWithWritableFastElements(heap, receiver, NULL, 0);
- if (maybe_elms_obj == NULL)
- return CallJsBuiltin(isolate, "ArrayUnshift", args);
- if (!maybe_elms_obj->To(&elms_obj)) return maybe_elms_obj;
-
- if (!IsJSArrayFastElementMovingAllowed(heap, JSArray::cast(receiver))) {
+ Handle<Object> receiver = args.receiver();
+ MaybeHandle<FixedArrayBase> maybe_elms_obj =
+ EnsureJSArrayWithWritableFastElements(isolate, receiver, NULL, 0);
+ Handle<FixedArrayBase> elms_obj;
+ if (!maybe_elms_obj.ToHandle(&elms_obj) ||
+ !IsJSArrayFastElementMovingAllowed(heap,
+ *Handle<JSArray>::cast(receiver))) {
return CallJsBuiltin(isolate, "ArrayUnshift", args);
}
- JSArray* array = JSArray::cast(receiver);
+ Handle<JSArray> array = Handle<JSArray>::cast(receiver);
ASSERT(!array->map()->is_observed());
if (!array->HasFastSmiOrObjectElements()) {
return CallJsBuiltin(isolate, "ArrayUnshift", args);
}
- FixedArray* elms = FixedArray::cast(elms_obj);
-
int len = Smi::cast(array->length())->value();
int to_add = args.length() - 1;
int new_length = len + to_add;
@@ -616,31 +581,32 @@ BUILTIN(ArrayUnshift) {
// we should never hit this case.
ASSERT(to_add <= (Smi::kMaxValue - len));
- MaybeObject* maybe_object =
- array->EnsureCanContainElements(&args, 1, to_add,
- DONT_ALLOW_DOUBLE_ELEMENTS);
- if (maybe_object->IsFailure()) return maybe_object;
+ if (to_add > 0 && JSArray::WouldChangeReadOnlyLength(array, len + to_add)) {
+ return CallJsBuiltin(isolate, "ArrayUnshift", args);
+ }
+
+ Handle<FixedArray> elms = Handle<FixedArray>::cast(elms_obj);
+
+ JSObject::EnsureCanContainElements(array, &args, 1, to_add,
+ DONT_ALLOW_DOUBLE_ELEMENTS);
if (new_length > elms->length()) {
// New backing storage is needed.
int capacity = new_length + (new_length >> 1) + 16;
- FixedArray* new_elms;
- MaybeObject* maybe_elms = heap->AllocateUninitializedFixedArray(capacity);
- if (!maybe_elms->To(&new_elms)) return maybe_elms;
+ Handle<FixedArray> new_elms =
+ isolate->factory()->NewUninitializedFixedArray(capacity);
ElementsKind kind = array->GetElementsKind();
ElementsAccessor* accessor = array->GetElementsAccessor();
- MaybeObject* maybe_failure = accessor->CopyElements(
- NULL, 0, kind, new_elms, to_add,
- ElementsAccessor::kCopyToEndAndInitializeToHole, elms);
- ASSERT(!maybe_failure->IsFailure());
- USE(maybe_failure);
+ accessor->CopyElements(
+ elms, 0, kind, new_elms, to_add,
+ ElementsAccessor::kCopyToEndAndInitializeToHole);
elms = new_elms;
- array->set_elements(elms);
+ array->set_elements(*elms);
} else {
DisallowHeapAllocation no_gc;
- heap->MoveElements(elms, to_add, 0, len);
+ heap->MoveElements(*elms, to_add, 0, len);
}
// Add the provided values.
@@ -657,88 +623,98 @@ BUILTIN(ArrayUnshift) {
BUILTIN(ArraySlice) {
+ HandleScope scope(isolate);
Heap* heap = isolate->heap();
- Object* receiver = *args.receiver();
- FixedArrayBase* elms;
+ Handle<Object> receiver = args.receiver();
int len = -1;
- if (receiver->IsJSArray()) {
- JSArray* array = JSArray::cast(receiver);
- if (!IsJSArrayFastElementMovingAllowed(heap, array)) {
- return CallJsBuiltin(isolate, "ArraySlice", args);
- }
-
- if (array->HasFastElements()) {
- elms = array->elements();
- } else {
- return CallJsBuiltin(isolate, "ArraySlice", args);
- }
+ int relative_start = 0;
+ int relative_end = 0;
+ {
+ DisallowHeapAllocation no_gc;
+ if (receiver->IsJSArray()) {
+ JSArray* array = JSArray::cast(*receiver);
+ if (!IsJSArrayFastElementMovingAllowed(heap, array)) {
+ AllowHeapAllocation allow_allocation;
+ return CallJsBuiltin(isolate, "ArraySlice", args);
+ }
- len = Smi::cast(array->length())->value();
- } else {
- // Array.slice(arguments, ...) is quite a common idiom (notably more
- // than 50% of invocations in Web apps). Treat it in C++ as well.
- Map* arguments_map =
- isolate->context()->native_context()->arguments_boilerplate()->map();
-
- bool is_arguments_object_with_fast_elements =
- receiver->IsJSObject() &&
- JSObject::cast(receiver)->map() == arguments_map;
- if (!is_arguments_object_with_fast_elements) {
- return CallJsBuiltin(isolate, "ArraySlice", args);
- }
- JSObject* object = JSObject::cast(receiver);
+ if (!array->HasFastElements()) {
+ AllowHeapAllocation allow_allocation;
+ return CallJsBuiltin(isolate, "ArraySlice", args);
+ }
- if (object->HasFastElements()) {
- elms = object->elements();
+ len = Smi::cast(array->length())->value();
} else {
- return CallJsBuiltin(isolate, "ArraySlice", args);
- }
- Object* len_obj = object->InObjectPropertyAt(Heap::kArgumentsLengthIndex);
- if (!len_obj->IsSmi()) {
- return CallJsBuiltin(isolate, "ArraySlice", args);
- }
- len = Smi::cast(len_obj)->value();
- if (len > elms->length()) {
- return CallJsBuiltin(isolate, "ArraySlice", args);
- }
- }
-
- JSObject* object = JSObject::cast(receiver);
+ // Array.slice(arguments, ...) is quite a common idiom (notably more
+ // than 50% of invocations in Web apps). Treat it in C++ as well.
+ Map* arguments_map = isolate->context()->native_context()->
+ sloppy_arguments_boilerplate()->map();
+
+ bool is_arguments_object_with_fast_elements =
+ receiver->IsJSObject() &&
+ JSObject::cast(*receiver)->map() == arguments_map;
+ if (!is_arguments_object_with_fast_elements) {
+ AllowHeapAllocation allow_allocation;
+ return CallJsBuiltin(isolate, "ArraySlice", args);
+ }
+ JSObject* object = JSObject::cast(*receiver);
- ASSERT(len >= 0);
- int n_arguments = args.length() - 1;
+ if (!object->HasFastElements()) {
+ AllowHeapAllocation allow_allocation;
+ return CallJsBuiltin(isolate, "ArraySlice", args);
+ }
- // Note carefully choosen defaults---if argument is missing,
- // it's undefined which gets converted to 0 for relative_start
- // and to len for relative_end.
- int relative_start = 0;
- int relative_end = len;
- if (n_arguments > 0) {
- Object* arg1 = args[1];
- if (arg1->IsSmi()) {
- relative_start = Smi::cast(arg1)->value();
- } else if (arg1->IsHeapNumber()) {
- double start = HeapNumber::cast(arg1)->value();
- if (start < kMinInt || start > kMaxInt) {
+ Object* len_obj = object->InObjectPropertyAt(Heap::kArgumentsLengthIndex);
+ if (!len_obj->IsSmi()) {
+ AllowHeapAllocation allow_allocation;
+ return CallJsBuiltin(isolate, "ArraySlice", args);
+ }
+ len = Smi::cast(len_obj)->value();
+ if (len > object->elements()->length()) {
+ AllowHeapAllocation allow_allocation;
return CallJsBuiltin(isolate, "ArraySlice", args);
}
- relative_start = std::isnan(start) ? 0 : static_cast<int>(start);
- } else if (!arg1->IsUndefined()) {
- return CallJsBuiltin(isolate, "ArraySlice", args);
}
- if (n_arguments > 1) {
- Object* arg2 = args[2];
- if (arg2->IsSmi()) {
- relative_end = Smi::cast(arg2)->value();
- } else if (arg2->IsHeapNumber()) {
- double end = HeapNumber::cast(arg2)->value();
- if (end < kMinInt || end > kMaxInt) {
+
+ ASSERT(len >= 0);
+ int n_arguments = args.length() - 1;
+
+ // Note carefully choosen defaults---if argument is missing,
+ // it's undefined which gets converted to 0 for relative_start
+ // and to len for relative_end.
+ relative_start = 0;
+ relative_end = len;
+ if (n_arguments > 0) {
+ Object* arg1 = args[1];
+ if (arg1->IsSmi()) {
+ relative_start = Smi::cast(arg1)->value();
+ } else if (arg1->IsHeapNumber()) {
+ double start = HeapNumber::cast(arg1)->value();
+ if (start < kMinInt || start > kMaxInt) {
+ AllowHeapAllocation allow_allocation;
return CallJsBuiltin(isolate, "ArraySlice", args);
}
- relative_end = std::isnan(end) ? 0 : static_cast<int>(end);
- } else if (!arg2->IsUndefined()) {
+ relative_start = std::isnan(start) ? 0 : static_cast<int>(start);
+ } else if (!arg1->IsUndefined()) {
+ AllowHeapAllocation allow_allocation;
return CallJsBuiltin(isolate, "ArraySlice", args);
}
+ if (n_arguments > 1) {
+ Object* arg2 = args[2];
+ if (arg2->IsSmi()) {
+ relative_end = Smi::cast(arg2)->value();
+ } else if (arg2->IsHeapNumber()) {
+ double end = HeapNumber::cast(arg2)->value();
+ if (end < kMinInt || end > kMaxInt) {
+ AllowHeapAllocation allow_allocation;
+ return CallJsBuiltin(isolate, "ArraySlice", args);
+ }
+ relative_end = std::isnan(end) ? 0 : static_cast<int>(end);
+ } else if (!arg2->IsUndefined()) {
+ AllowHeapAllocation allow_allocation;
+ return CallJsBuiltin(isolate, "ArraySlice", args);
+ }
+ }
}
}
@@ -753,8 +729,12 @@ BUILTIN(ArraySlice) {
// Calculate the length of result array.
int result_len = Max(final - k, 0);
+ Handle<JSObject> object = Handle<JSObject>::cast(receiver);
+ Handle<FixedArrayBase> elms(object->elements(), isolate);
+
ElementsKind kind = object->GetElementsKind();
if (IsHoleyElementsKind(kind)) {
+ DisallowHeapAllocation no_gc;
bool packed = true;
ElementsAccessor* accessor = ElementsAccessor::ForKind(kind);
for (int i = k; i < final; i++) {
@@ -766,44 +746,37 @@ BUILTIN(ArraySlice) {
if (packed) {
kind = GetPackedElementsKind(kind);
} else if (!receiver->IsJSArray()) {
+ AllowHeapAllocation allow_allocation;
return CallJsBuiltin(isolate, "ArraySlice", args);
}
}
- JSArray* result_array;
- MaybeObject* maybe_array = heap->AllocateJSArrayAndStorage(kind,
- result_len,
- result_len);
+ Handle<JSArray> result_array =
+ isolate->factory()->NewJSArray(kind, result_len, result_len);
DisallowHeapAllocation no_gc;
- if (result_len == 0) return maybe_array;
- if (!maybe_array->To(&result_array)) return maybe_array;
+ if (result_len == 0) return *result_array;
ElementsAccessor* accessor = object->GetElementsAccessor();
- MaybeObject* maybe_failure = accessor->CopyElements(
- NULL, k, kind, result_array->elements(), 0, result_len, elms);
- ASSERT(!maybe_failure->IsFailure());
- USE(maybe_failure);
-
- return result_array;
+ accessor->CopyElements(
+ elms, k, kind, handle(result_array->elements(), isolate), 0, result_len);
+ return *result_array;
}
BUILTIN(ArraySplice) {
+ HandleScope scope(isolate);
Heap* heap = isolate->heap();
- Object* receiver = *args.receiver();
- FixedArrayBase* elms_obj;
- MaybeObject* maybe_elms =
- EnsureJSArrayWithWritableFastElements(heap, receiver, &args, 3);
- if (maybe_elms == NULL) {
- return CallJsBuiltin(isolate, "ArraySplice", args);
- }
- if (!maybe_elms->To(&elms_obj)) return maybe_elms;
-
- if (!IsJSArrayFastElementMovingAllowed(heap, JSArray::cast(receiver))) {
+ Handle<Object> receiver = args.receiver();
+ MaybeHandle<FixedArrayBase> maybe_elms_obj =
+ EnsureJSArrayWithWritableFastElements(isolate, receiver, &args, 3);
+ Handle<FixedArrayBase> elms_obj;
+ if (!maybe_elms_obj.ToHandle(&elms_obj) ||
+ !IsJSArrayFastElementMovingAllowed(heap,
+ *Handle<JSArray>::cast(receiver))) {
return CallJsBuiltin(isolate, "ArraySplice", args);
}
- JSArray* array = JSArray::cast(receiver);
+ Handle<JSArray> array = Handle<JSArray>::cast(receiver);
ASSERT(!array->map()->is_observed());
int len = Smi::cast(array->length())->value();
@@ -812,16 +785,19 @@ BUILTIN(ArraySplice) {
int relative_start = 0;
if (n_arguments > 0) {
+ DisallowHeapAllocation no_gc;
Object* arg1 = args[1];
if (arg1->IsSmi()) {
relative_start = Smi::cast(arg1)->value();
} else if (arg1->IsHeapNumber()) {
double start = HeapNumber::cast(arg1)->value();
if (start < kMinInt || start > kMaxInt) {
+ AllowHeapAllocation allow_allocation;
return CallJsBuiltin(isolate, "ArraySplice", args);
}
relative_start = std::isnan(start) ? 0 : static_cast<int>(start);
} else if (!arg1->IsUndefined()) {
+ AllowHeapAllocation allow_allocation;
return CallJsBuiltin(isolate, "ArraySplice", args);
}
}
@@ -840,10 +816,12 @@ BUILTIN(ArraySplice) {
} else {
int value = 0; // ToInteger(undefined) == 0
if (n_arguments > 1) {
+ DisallowHeapAllocation no_gc;
Object* arg2 = args[2];
if (arg2->IsSmi()) {
value = Smi::cast(arg2)->value();
} else {
+ AllowHeapAllocation allow_allocation;
return CallJsBuiltin(isolate, "ArraySplice", args);
}
}
@@ -861,72 +839,83 @@ BUILTIN(ArraySplice) {
}
if (new_length == 0) {
- MaybeObject* maybe_array = heap->AllocateJSArrayWithElements(
+ Handle<JSArray> result = isolate->factory()->NewJSArrayWithElements(
elms_obj, elements_kind, actual_delete_count);
- if (maybe_array->IsFailure()) return maybe_array;
array->set_elements(heap->empty_fixed_array());
array->set_length(Smi::FromInt(0));
- return maybe_array;
+ return *result;
}
- JSArray* result_array = NULL;
- MaybeObject* maybe_array =
- heap->AllocateJSArrayAndStorage(elements_kind,
- actual_delete_count,
- actual_delete_count);
- if (!maybe_array->To(&result_array)) return maybe_array;
+ Handle<JSArray> result_array =
+ isolate->factory()->NewJSArray(elements_kind,
+ actual_delete_count,
+ actual_delete_count);
if (actual_delete_count > 0) {
DisallowHeapAllocation no_gc;
ElementsAccessor* accessor = array->GetElementsAccessor();
- MaybeObject* maybe_failure = accessor->CopyElements(
- NULL, actual_start, elements_kind, result_array->elements(),
- 0, actual_delete_count, elms_obj);
- // Cannot fail since the origin and target array are of the same elements
- // kind.
- ASSERT(!maybe_failure->IsFailure());
- USE(maybe_failure);
+ accessor->CopyElements(
+ elms_obj, actual_start, elements_kind,
+ handle(result_array->elements(), isolate), 0, actual_delete_count);
}
bool elms_changed = false;
if (item_count < actual_delete_count) {
// Shrink the array.
- const bool trim_array = !heap->lo_space()->Contains(elms_obj) &&
+ const bool trim_array = !heap->lo_space()->Contains(*elms_obj) &&
((actual_start + item_count) <
(len - actual_delete_count - actual_start));
if (trim_array) {
const int delta = actual_delete_count - item_count;
if (elms_obj->IsFixedDoubleArray()) {
- FixedDoubleArray* elms = FixedDoubleArray::cast(elms_obj);
- MoveDoubleElements(elms, delta, elms, 0, actual_start);
+ Handle<FixedDoubleArray> elms =
+ Handle<FixedDoubleArray>::cast(elms_obj);
+ MoveDoubleElements(*elms, delta, *elms, 0, actual_start);
} else {
- FixedArray* elms = FixedArray::cast(elms_obj);
+ Handle<FixedArray> elms = Handle<FixedArray>::cast(elms_obj);
DisallowHeapAllocation no_gc;
- heap->MoveElements(elms, delta, 0, actual_start);
+ heap->MoveElements(*elms, delta, 0, actual_start);
}
- elms_obj = LeftTrimFixedArray(heap, elms_obj, delta);
-
+ if (heap->CanMoveObjectStart(*elms_obj)) {
+ // On the fast path we move the start of the object in memory.
+ elms_obj = handle(LeftTrimFixedArray(heap, *elms_obj, delta), isolate);
+ } else {
+ // This is the slow path. We are going to move the elements to the left
+ // by copying them. For trimmed values we store the hole.
+ if (elms_obj->IsFixedDoubleArray()) {
+ Handle<FixedDoubleArray> elms =
+ Handle<FixedDoubleArray>::cast(elms_obj);
+ MoveDoubleElements(*elms, 0, *elms, delta, len - delta);
+ elms->FillWithHoles(len - delta, len);
+ } else {
+ Handle<FixedArray> elms = Handle<FixedArray>::cast(elms_obj);
+ DisallowHeapAllocation no_gc;
+ heap->MoveElements(*elms, 0, delta, len - delta);
+ elms->FillWithHoles(len - delta, len);
+ }
+ }
elms_changed = true;
} else {
if (elms_obj->IsFixedDoubleArray()) {
- FixedDoubleArray* elms = FixedDoubleArray::cast(elms_obj);
- MoveDoubleElements(elms, actual_start + item_count,
- elms, actual_start + actual_delete_count,
+ Handle<FixedDoubleArray> elms =
+ Handle<FixedDoubleArray>::cast(elms_obj);
+ MoveDoubleElements(*elms, actual_start + item_count,
+ *elms, actual_start + actual_delete_count,
(len - actual_delete_count - actual_start));
- FillWithHoles(elms, new_length, len);
+ elms->FillWithHoles(new_length, len);
} else {
- FixedArray* elms = FixedArray::cast(elms_obj);
+ Handle<FixedArray> elms = Handle<FixedArray>::cast(elms_obj);
DisallowHeapAllocation no_gc;
- heap->MoveElements(elms, actual_start + item_count,
+ heap->MoveElements(*elms, actual_start + item_count,
actual_start + actual_delete_count,
(len - actual_delete_count - actual_start));
- FillWithHoles(heap, elms, new_length, len);
+ elms->FillWithHoles(new_length, len);
}
}
} else if (item_count > actual_delete_count) {
- FixedArray* elms = FixedArray::cast(elms_obj);
+ Handle<FixedArray> elms = Handle<FixedArray>::cast(elms_obj);
// Currently fixed arrays cannot grow too big, so
// we should never hit this case.
ASSERT((item_count - actual_delete_count) <= (Smi::kMaxValue - len));
@@ -935,9 +924,8 @@ BUILTIN(ArraySplice) {
if (new_length > elms->length()) {
// New backing storage is needed.
int capacity = new_length + (new_length >> 1) + 16;
- FixedArray* new_elms;
- MaybeObject* maybe_obj = heap->AllocateUninitializedFixedArray(capacity);
- if (!maybe_obj->To(&new_elms)) return maybe_obj;
+ Handle<FixedArray> new_elms =
+ isolate->factory()->NewUninitializedFixedArray(capacity);
DisallowHeapAllocation no_gc;
@@ -945,30 +933,26 @@ BUILTIN(ArraySplice) {
ElementsAccessor* accessor = array->GetElementsAccessor();
if (actual_start > 0) {
// Copy the part before actual_start as is.
- MaybeObject* maybe_failure = accessor->CopyElements(
- NULL, 0, kind, new_elms, 0, actual_start, elms);
- ASSERT(!maybe_failure->IsFailure());
- USE(maybe_failure);
+ accessor->CopyElements(
+ elms, 0, kind, new_elms, 0, actual_start);
}
- MaybeObject* maybe_failure = accessor->CopyElements(
- NULL, actual_start + actual_delete_count, kind, new_elms,
- actual_start + item_count,
- ElementsAccessor::kCopyToEndAndInitializeToHole, elms);
- ASSERT(!maybe_failure->IsFailure());
- USE(maybe_failure);
+ accessor->CopyElements(
+ elms, actual_start + actual_delete_count, kind,
+ new_elms, actual_start + item_count,
+ ElementsAccessor::kCopyToEndAndInitializeToHole);
elms_obj = new_elms;
elms_changed = true;
} else {
DisallowHeapAllocation no_gc;
- heap->MoveElements(elms, actual_start + item_count,
+ heap->MoveElements(*elms, actual_start + item_count,
actual_start + actual_delete_count,
(len - actual_delete_count - actual_start));
}
}
if (IsFastDoubleElementsKind(elements_kind)) {
- FixedDoubleArray* elms = FixedDoubleArray::cast(elms_obj);
+ Handle<FixedDoubleArray> elms = Handle<FixedDoubleArray>::cast(elms_obj);
for (int k = actual_start; k < actual_start + item_count; k++) {
Object* arg = args[3 + k - actual_start];
if (arg->IsSmi()) {
@@ -978,7 +962,7 @@ BUILTIN(ArraySplice) {
}
}
} else {
- FixedArray* elms = FixedArray::cast(elms_obj);
+ Handle<FixedArray> elms = Handle<FixedArray>::cast(elms_obj);
DisallowHeapAllocation no_gc;
WriteBarrierMode mode = elms->GetWriteBarrierMode(no_gc);
for (int k = actual_start; k < actual_start + item_count; k++) {
@@ -987,100 +971,104 @@ BUILTIN(ArraySplice) {
}
if (elms_changed) {
- array->set_elements(elms_obj);
+ array->set_elements(*elms_obj);
}
// Set the length.
array->set_length(Smi::FromInt(new_length));
- return result_array;
+ return *result_array;
}
BUILTIN(ArrayConcat) {
- Heap* heap = isolate->heap();
- Context* native_context = isolate->context()->native_context();
- JSObject* array_proto =
- JSObject::cast(native_context->array_function()->prototype());
- if (!ArrayPrototypeHasNoElements(heap, native_context, array_proto)) {
- return CallJsBuiltin(isolate, "ArrayConcat", args);
- }
+ HandleScope scope(isolate);
- // Iterate through all the arguments performing checks
- // and calculating total length.
int n_arguments = args.length();
int result_len = 0;
ElementsKind elements_kind = GetInitialFastElementsKind();
bool has_double = false;
- bool is_holey = false;
- for (int i = 0; i < n_arguments; i++) {
- Object* arg = args[i];
- if (!arg->IsJSArray() ||
- !JSArray::cast(arg)->HasFastElements() ||
- JSArray::cast(arg)->GetPrototype() != array_proto) {
- return CallJsBuiltin(isolate, "ArrayConcat", args);
+ {
+ DisallowHeapAllocation no_gc;
+ Heap* heap = isolate->heap();
+ Context* native_context = isolate->context()->native_context();
+ JSObject* array_proto =
+ JSObject::cast(native_context->array_function()->prototype());
+ if (!ArrayPrototypeHasNoElements(heap, native_context, array_proto)) {
+ AllowHeapAllocation allow_allocation;
+ return CallJsBuiltin(isolate, "ArrayConcatJS", args);
}
- int len = Smi::cast(JSArray::cast(arg)->length())->value();
-
- // We shouldn't overflow when adding another len.
- const int kHalfOfMaxInt = 1 << (kBitsPerInt - 2);
- STATIC_ASSERT(FixedArray::kMaxLength < kHalfOfMaxInt);
- USE(kHalfOfMaxInt);
- result_len += len;
- ASSERT(result_len >= 0);
- if (result_len > FixedDoubleArray::kMaxLength) {
- return CallJsBuiltin(isolate, "ArrayConcat", args);
- }
+ // Iterate through all the arguments performing checks
+ // and calculating total length.
+ bool is_holey = false;
+ for (int i = 0; i < n_arguments; i++) {
+ Object* arg = args[i];
+ if (!arg->IsJSArray() ||
+ !JSArray::cast(arg)->HasFastElements() ||
+ JSArray::cast(arg)->GetPrototype() != array_proto) {
+ AllowHeapAllocation allow_allocation;
+ return CallJsBuiltin(isolate, "ArrayConcatJS", args);
+ }
+ int len = Smi::cast(JSArray::cast(arg)->length())->value();
+
+ // We shouldn't overflow when adding another len.
+ const int kHalfOfMaxInt = 1 << (kBitsPerInt - 2);
+ STATIC_ASSERT(FixedArray::kMaxLength < kHalfOfMaxInt);
+ USE(kHalfOfMaxInt);
+ result_len += len;
+ ASSERT(result_len >= 0);
+
+ if (result_len > FixedDoubleArray::kMaxLength) {
+ AllowHeapAllocation allow_allocation;
+ return CallJsBuiltin(isolate, "ArrayConcatJS", args);
+ }
- ElementsKind arg_kind = JSArray::cast(arg)->map()->elements_kind();
- has_double = has_double || IsFastDoubleElementsKind(arg_kind);
- is_holey = is_holey || IsFastHoleyElementsKind(arg_kind);
- if (IsMoreGeneralElementsKindTransition(elements_kind, arg_kind)) {
- elements_kind = arg_kind;
+ ElementsKind arg_kind = JSArray::cast(arg)->map()->elements_kind();
+ has_double = has_double || IsFastDoubleElementsKind(arg_kind);
+ is_holey = is_holey || IsFastHoleyElementsKind(arg_kind);
+ if (IsMoreGeneralElementsKindTransition(elements_kind, arg_kind)) {
+ elements_kind = arg_kind;
+ }
}
+ if (is_holey) elements_kind = GetHoleyElementsKind(elements_kind);
}
- if (is_holey) elements_kind = GetHoleyElementsKind(elements_kind);
-
// If a double array is concatted into a fast elements array, the fast
// elements array needs to be initialized to contain proper holes, since
// boxing doubles may cause incremental marking.
ArrayStorageAllocationMode mode =
has_double && IsFastObjectElementsKind(elements_kind)
? INITIALIZE_ARRAY_ELEMENTS_WITH_HOLE : DONT_INITIALIZE_ARRAY_ELEMENTS;
- JSArray* result_array;
- // Allocate result.
- MaybeObject* maybe_array =
- heap->AllocateJSArrayAndStorage(elements_kind,
- result_len,
- result_len,
- mode);
- if (!maybe_array->To(&result_array)) return maybe_array;
- if (result_len == 0) return result_array;
+ Handle<JSArray> result_array =
+ isolate->factory()->NewJSArray(elements_kind,
+ result_len,
+ result_len,
+ mode);
+ if (result_len == 0) return *result_array;
int j = 0;
- FixedArrayBase* storage = result_array->elements();
+ Handle<FixedArrayBase> storage(result_array->elements(), isolate);
ElementsAccessor* accessor = ElementsAccessor::ForKind(elements_kind);
for (int i = 0; i < n_arguments; i++) {
+ // TODO(ishell): It is crucial to keep |array| as a raw pointer to avoid
+ // performance degradation. Revisit this later.
JSArray* array = JSArray::cast(args[i]);
int len = Smi::cast(array->length())->value();
ElementsKind from_kind = array->GetElementsKind();
if (len > 0) {
- MaybeObject* maybe_failure =
- accessor->CopyElements(array, 0, from_kind, storage, j, len);
- if (maybe_failure->IsFailure()) return maybe_failure;
+ accessor->CopyElements(array, 0, from_kind, storage, j, len);
j += len;
}
}
ASSERT(j == result_len);
- return result_array;
+ return *result_array;
}
// -----------------------------------------------------------------------------
-// Strict mode poison pills
+// Generator and strict mode poison pills
BUILTIN(StrictModePoisonPill) {
@@ -1090,6 +1078,13 @@ BUILTIN(StrictModePoisonPill) {
}
+BUILTIN(GeneratorPoisonPill) {
+ HandleScope scope(isolate);
+ return isolate->Throw(*isolate->factory()->NewTypeError(
+ "generator_poison_pill", HandleVector<Object>(NULL, 0)));
+}
+
+
// -----------------------------------------------------------------------------
//
@@ -1153,7 +1148,7 @@ static inline Object* TypeCheck(Heap* heap,
template <bool is_construct>
-MUST_USE_RESULT static MaybeObject* HandleApiCallHelper(
+MUST_USE_RESULT static Object* HandleApiCallHelper(
BuiltinArguments<NEEDS_CALLED_FUNCTION> args, Isolate* isolate) {
ASSERT(is_construct == CalledAsConstructor(isolate));
Heap* heap = isolate->heap();
@@ -1162,18 +1157,25 @@ MUST_USE_RESULT static MaybeObject* HandleApiCallHelper(
Handle<JSFunction> function = args.called_function();
ASSERT(function->shared()->IsApiFunction());
- FunctionTemplateInfo* fun_data = function->shared()->get_api_func_data();
+ Handle<FunctionTemplateInfo> fun_data(
+ function->shared()->get_api_func_data(), isolate);
if (is_construct) {
- Handle<FunctionTemplateInfo> desc(fun_data, isolate);
- bool pending_exception = false;
- isolate->factory()->ConfigureInstance(
- desc, Handle<JSObject>::cast(args.receiver()), &pending_exception);
- ASSERT(isolate->has_pending_exception() == pending_exception);
- if (pending_exception) return Failure::Exception();
- fun_data = *desc;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, fun_data,
+ isolate->factory()->ConfigureInstance(
+ fun_data, Handle<JSObject>::cast(args.receiver())));
+ }
+
+ SharedFunctionInfo* shared = function->shared();
+ if (shared->strict_mode() == SLOPPY && !shared->native()) {
+ Object* recv = args[0];
+ ASSERT(!recv->IsNull());
+ if (recv->IsUndefined()) {
+ args[0] = function->context()->global_object()->global_receiver();
+ }
}
- Object* raw_holder = TypeCheck(heap, args.length(), &args[0], fun_data);
+ Object* raw_holder = TypeCheck(heap, args.length(), &args[0], *fun_data);
if (raw_holder->IsNull()) {
// This function cannot be called with the given receiver. Abort!
@@ -1211,7 +1213,7 @@ MUST_USE_RESULT static MaybeObject* HandleApiCallHelper(
result->VerifyApiCallResultType();
}
- RETURN_IF_SCHEDULED_EXCEPTION(isolate);
+ RETURN_FAILURE_IF_SCHEDULED_EXCEPTION(isolate);
if (!is_construct || result->IsJSObject()) return result;
}
@@ -1232,7 +1234,7 @@ BUILTIN(HandleApiCallConstruct) {
// Helper function to handle calls to non-function objects created through the
// API. The object can be called as either a constructor (using new) or just as
// a function (without new).
-MUST_USE_RESULT static MaybeObject* HandleApiCallAsFunctionOrConstructor(
+MUST_USE_RESULT static Object* HandleApiCallAsFunctionOrConstructor(
Isolate* isolate,
bool is_construct_call,
BuiltinArguments<NO_EXTRA_ARGUMENTS> args) {
@@ -1281,7 +1283,7 @@ MUST_USE_RESULT static MaybeObject* HandleApiCallAsFunctionOrConstructor(
}
}
// Check for exceptions and return result.
- RETURN_IF_SCHEDULED_EXCEPTION(isolate);
+ RETURN_FAILURE_IF_SCHEDULED_EXCEPTION(isolate);
return result;
}
@@ -1300,34 +1302,18 @@ BUILTIN(HandleApiCallAsConstructor) {
}
-static void Generate_LoadIC_Initialize(MacroAssembler* masm) {
- LoadIC::GenerateInitialize(masm);
-}
-
-
-static void Generate_LoadIC_PreMonomorphic(MacroAssembler* masm) {
- LoadIC::GeneratePreMonomorphic(masm);
-}
-
-
static void Generate_LoadIC_Miss(MacroAssembler* masm) {
LoadIC::GenerateMiss(masm);
}
-static void Generate_LoadIC_Megamorphic(MacroAssembler* masm) {
- LoadIC::GenerateMegamorphic(masm);
-}
-
-
static void Generate_LoadIC_Normal(MacroAssembler* masm) {
LoadIC::GenerateNormal(masm);
}
static void Generate_LoadIC_Getter_ForDeopt(MacroAssembler* masm) {
- LoadStubCompiler::GenerateLoadViaGetter(
- masm, LoadStubCompiler::registers()[0], Handle<JSFunction>());
+ LoadStubCompiler::GenerateLoadViaGetterForDeopt(masm);
}
@@ -1371,8 +1357,8 @@ static void Generate_KeyedLoadIC_IndexedInterceptor(MacroAssembler* masm) {
}
-static void Generate_KeyedLoadIC_NonStrictArguments(MacroAssembler* masm) {
- KeyedLoadIC::GenerateNonStrictArguments(masm);
+static void Generate_KeyedLoadIC_SloppyArguments(MacroAssembler* masm) {
+ KeyedLoadIC::GenerateSloppyArguments(masm);
}
@@ -1381,26 +1367,6 @@ static void Generate_StoreIC_Slow(MacroAssembler* masm) {
}
-static void Generate_StoreIC_Initialize(MacroAssembler* masm) {
- StoreIC::GenerateInitialize(masm);
-}
-
-
-static void Generate_StoreIC_Initialize_Strict(MacroAssembler* masm) {
- StoreIC::GenerateInitialize(masm);
-}
-
-
-static void Generate_StoreIC_PreMonomorphic(MacroAssembler* masm) {
- StoreIC::GeneratePreMonomorphic(masm);
-}
-
-
-static void Generate_StoreIC_PreMonomorphic_Strict(MacroAssembler* masm) {
- StoreIC::GeneratePreMonomorphic(masm);
-}
-
-
static void Generate_StoreIC_Miss(MacroAssembler* masm) {
StoreIC::GenerateMiss(masm);
}
@@ -1411,40 +1377,18 @@ static void Generate_StoreIC_Normal(MacroAssembler* masm) {
}
-static void Generate_StoreIC_Megamorphic(MacroAssembler* masm) {
- StoreIC::GenerateMegamorphic(masm,
- StoreIC::ComputeExtraICState(kNonStrictMode));
-}
-
-
-static void Generate_StoreIC_Megamorphic_Strict(MacroAssembler* masm) {
- StoreIC::GenerateMegamorphic(masm,
- StoreIC::ComputeExtraICState(kStrictMode));
-}
-
-
static void Generate_StoreIC_Setter_ForDeopt(MacroAssembler* masm) {
- StoreStubCompiler::GenerateStoreViaSetter(masm, Handle<JSFunction>());
-}
-
-
-static void Generate_StoreIC_Generic(MacroAssembler* masm) {
- StoreIC::GenerateRuntimeSetProperty(masm, kNonStrictMode);
-}
-
-
-static void Generate_StoreIC_Generic_Strict(MacroAssembler* masm) {
- StoreIC::GenerateRuntimeSetProperty(masm, kStrictMode);
+ StoreStubCompiler::GenerateStoreViaSetterForDeopt(masm);
}
static void Generate_KeyedStoreIC_Generic(MacroAssembler* masm) {
- KeyedStoreIC::GenerateGeneric(masm, kNonStrictMode);
+ KeyedStoreIC::GenerateGeneric(masm, SLOPPY);
}
static void Generate_KeyedStoreIC_Generic_Strict(MacroAssembler* masm) {
- KeyedStoreIC::GenerateGeneric(masm, kStrictMode);
+ KeyedStoreIC::GenerateGeneric(masm, STRICT);
}
@@ -1478,78 +1422,75 @@ static void Generate_KeyedStoreIC_PreMonomorphic_Strict(MacroAssembler* masm) {
}
-static void Generate_KeyedStoreIC_NonStrictArguments(MacroAssembler* masm) {
- KeyedStoreIC::GenerateNonStrictArguments(masm);
+static void Generate_KeyedStoreIC_SloppyArguments(MacroAssembler* masm) {
+ KeyedStoreIC::GenerateSloppyArguments(masm);
+}
+
+
+static void Generate_CallICStub_DebugBreak(MacroAssembler* masm) {
+ DebugCodegen::GenerateCallICStubDebugBreak(masm);
}
-#ifdef ENABLE_DEBUGGER_SUPPORT
static void Generate_LoadIC_DebugBreak(MacroAssembler* masm) {
- Debug::GenerateLoadICDebugBreak(masm);
+ DebugCodegen::GenerateLoadICDebugBreak(masm);
}
static void Generate_StoreIC_DebugBreak(MacroAssembler* masm) {
- Debug::GenerateStoreICDebugBreak(masm);
+ DebugCodegen::GenerateStoreICDebugBreak(masm);
}
static void Generate_KeyedLoadIC_DebugBreak(MacroAssembler* masm) {
- Debug::GenerateKeyedLoadICDebugBreak(masm);
+ DebugCodegen::GenerateKeyedLoadICDebugBreak(masm);
}
static void Generate_KeyedStoreIC_DebugBreak(MacroAssembler* masm) {
- Debug::GenerateKeyedStoreICDebugBreak(masm);
+ DebugCodegen::GenerateKeyedStoreICDebugBreak(masm);
}
static void Generate_CompareNilIC_DebugBreak(MacroAssembler* masm) {
- Debug::GenerateCompareNilICDebugBreak(masm);
+ DebugCodegen::GenerateCompareNilICDebugBreak(masm);
}
static void Generate_Return_DebugBreak(MacroAssembler* masm) {
- Debug::GenerateReturnDebugBreak(masm);
+ DebugCodegen::GenerateReturnDebugBreak(masm);
}
static void Generate_CallFunctionStub_DebugBreak(MacroAssembler* masm) {
- Debug::GenerateCallFunctionStubDebugBreak(masm);
-}
-
-
-static void Generate_CallFunctionStub_Recording_DebugBreak(
- MacroAssembler* masm) {
- Debug::GenerateCallFunctionStubRecordDebugBreak(masm);
+ DebugCodegen::GenerateCallFunctionStubDebugBreak(masm);
}
static void Generate_CallConstructStub_DebugBreak(MacroAssembler* masm) {
- Debug::GenerateCallConstructStubDebugBreak(masm);
+ DebugCodegen::GenerateCallConstructStubDebugBreak(masm);
}
static void Generate_CallConstructStub_Recording_DebugBreak(
MacroAssembler* masm) {
- Debug::GenerateCallConstructStubRecordDebugBreak(masm);
+ DebugCodegen::GenerateCallConstructStubRecordDebugBreak(masm);
}
static void Generate_Slot_DebugBreak(MacroAssembler* masm) {
- Debug::GenerateSlotDebugBreak(masm);
+ DebugCodegen::GenerateSlotDebugBreak(masm);
}
static void Generate_PlainReturn_LiveEdit(MacroAssembler* masm) {
- Debug::GeneratePlainReturnLiveEdit(masm);
+ DebugCodegen::GeneratePlainReturnLiveEdit(masm);
}
static void Generate_FrameDropper_LiveEdit(MacroAssembler* masm) {
- Debug::GenerateFrameDropperLiveEdit(masm);
+ DebugCodegen::GenerateFrameDropperLiveEdit(masm);
}
-#endif
Builtins::Builtins() : initialized_(false) {
@@ -1594,11 +1535,11 @@ struct BuiltinDesc {
class BuiltinFunctionTable {
public:
BuiltinDesc* functions() {
- CallOnce(&once_, &Builtins::InitBuiltinFunctionTable);
+ base::CallOnce(&once_, &Builtins::InitBuiltinFunctionTable);
return functions_;
}
- OnceType once_;
+ base::OnceType once_;
BuiltinDesc functions_[Builtins::builtin_count + 1];
friend class Builtins;
@@ -1645,9 +1586,7 @@ void Builtins::InitBuiltinFunctionTable() {
functions->c_code = NULL; \
functions->s_name = #aname; \
functions->name = k##aname; \
- functions->flags = Code::ComputeFlags( \
- Code::HANDLER, MONOMORPHIC, kNoExtraICState, \
- Code::NORMAL, Code::kind); \
+ functions->flags = Code::ComputeHandlerFlags(Code::kind); \
functions->extra_args = NO_EXTRA_ARGUMENTS; \
++functions;
@@ -1663,7 +1602,6 @@ void Builtins::InitBuiltinFunctionTable() {
void Builtins::SetUp(Isolate* isolate, bool create_heap_objects) {
ASSERT(!initialized_);
- Heap* heap = isolate->heap();
// Create a scope for the handles in the builtins.
HandleScope scope(isolate);
@@ -1673,7 +1611,13 @@ void Builtins::SetUp(Isolate* isolate, bool create_heap_objects) {
// For now we generate builtin adaptor code into a stack-allocated
// buffer, before copying it into individual code objects. Be careful
// with alignment, some platforms don't like unaligned code.
- union { int force_alignment; byte buffer[8*KB]; } u;
+#ifdef DEBUG
+ // We can generate a lot of debug code on Arm64.
+ const size_t buffer_size = 32*KB;
+#else
+ const size_t buffer_size = 8*KB;
+#endif
+ union { int force_alignment; byte buffer[buffer_size]; } u;
// Traverse the list of builtins and generate an adaptor in a
// separate code object for each one.
@@ -1692,32 +1636,18 @@ void Builtins::SetUp(Isolate* isolate, bool create_heap_objects) {
CodeDesc desc;
masm.GetCode(&desc);
Code::Flags flags = functions[i].flags;
- Object* code = NULL;
- {
- // During startup it's OK to always allocate and defer GC to later.
- // This simplifies things because we don't need to retry.
- AlwaysAllocateScope __scope__;
- { MaybeObject* maybe_code =
- heap->CreateCode(desc, flags, masm.CodeObject());
- if (!maybe_code->ToObject(&code)) {
- v8::internal::V8::FatalProcessOutOfMemory("CreateCode");
- }
- }
- }
+ Handle<Code> code =
+ isolate->factory()->NewCode(desc, flags, masm.CodeObject());
// Log the event and add the code to the builtins array.
PROFILE(isolate,
- CodeCreateEvent(Logger::BUILTIN_TAG,
- Code::cast(code),
- functions[i].s_name));
- GDBJIT(AddCode(GDBJITInterface::BUILTIN,
- functions[i].s_name,
- Code::cast(code)));
- builtins_[i] = code;
+ CodeCreateEvent(Logger::BUILTIN_TAG, *code, functions[i].s_name));
+ GDBJIT(AddCode(GDBJITInterface::BUILTIN, functions[i].s_name, *code));
+ builtins_[i] = *code;
#ifdef ENABLE_DISASSEMBLER
if (FLAG_print_builtin_code) {
CodeTracer::Scope trace_scope(isolate->GetCodeTracer());
PrintF(trace_scope.file(), "Builtin: %s\n", functions[i].s_name);
- Code::cast(code)->Disassemble(functions[i].s_name, trace_scope.file());
+ code->Disassemble(functions[i].s_name, trace_scope.file());
PrintF(trace_scope.file(), "\n");
}
#endif
@@ -1758,12 +1688,12 @@ const char* Builtins::Lookup(byte* pc) {
void Builtins::Generate_InterruptCheck(MacroAssembler* masm) {
- masm->TailCallRuntime(Runtime::kInterrupt, 0, 1);
+ masm->TailCallRuntime(Runtime::kHiddenInterrupt, 0, 1);
}
void Builtins::Generate_StackCheck(MacroAssembler* masm) {
- masm->TailCallRuntime(Runtime::kStackGuard, 0, 1);
+ masm->TailCallRuntime(Runtime::kHiddenStackGuard, 0, 1);
}
diff --git a/chromium/v8/src/builtins.h b/chromium/v8/src/builtins.h
index edc13f7511a..a2ed12db003 100644
--- a/chromium/v8/src/builtins.h
+++ b/chromium/v8/src/builtins.h
@@ -1,29 +1,6 @@
// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
#ifndef V8_BUILTINS_H_
#define V8_BUILTINS_H_
@@ -82,15 +59,14 @@ enum BuiltinExtraArguments {
V(HandleApiCallAsFunction, NO_EXTRA_ARGUMENTS) \
V(HandleApiCallAsConstructor, NO_EXTRA_ARGUMENTS) \
\
- V(StrictModePoisonPill, NO_EXTRA_ARGUMENTS)
+ V(StrictModePoisonPill, NO_EXTRA_ARGUMENTS) \
+ V(GeneratorPoisonPill, NO_EXTRA_ARGUMENTS)
// Define list of builtins implemented in assembly.
#define BUILTIN_LIST_A(V) \
V(ArgumentsAdaptorTrampoline, BUILTIN, UNINITIALIZED, \
kNoExtraICState) \
- V(InRecompileQueue, BUILTIN, UNINITIALIZED, \
- kNoExtraICState) \
- V(JSConstructStubCountdown, BUILTIN, UNINITIALIZED, \
+ V(InOptimizationQueue, BUILTIN, UNINITIALIZED, \
kNoExtraICState) \
V(JSConstructStubGeneric, BUILTIN, UNINITIALIZED, \
kNoExtraICState) \
@@ -100,11 +76,11 @@ enum BuiltinExtraArguments {
kNoExtraICState) \
V(JSConstructEntryTrampoline, BUILTIN, UNINITIALIZED, \
kNoExtraICState) \
- V(LazyCompile, BUILTIN, UNINITIALIZED, \
+ V(CompileUnoptimized, BUILTIN, UNINITIALIZED, \
kNoExtraICState) \
- V(LazyRecompile, BUILTIN, UNINITIALIZED, \
+ V(CompileOptimized, BUILTIN, UNINITIALIZED, \
kNoExtraICState) \
- V(ConcurrentRecompile, BUILTIN, UNINITIALIZED, \
+ V(CompileOptimizedConcurrent, BUILTIN, UNINITIALIZED, \
kNoExtraICState) \
V(NotifyDeoptimized, BUILTIN, UNINITIALIZED, \
kNoExtraICState) \
@@ -125,15 +101,8 @@ enum BuiltinExtraArguments {
kNoExtraICState) \
V(KeyedStoreIC_Miss, BUILTIN, UNINITIALIZED, \
kNoExtraICState) \
- V(LoadIC_Initialize, LOAD_IC, UNINITIALIZED, \
- kNoExtraICState) \
- V(LoadIC_PreMonomorphic, LOAD_IC, PREMONOMORPHIC, \
- kNoExtraICState) \
- V(LoadIC_Megamorphic, LOAD_IC, MEGAMORPHIC, \
- kNoExtraICState) \
V(LoadIC_Getter_ForDeopt, LOAD_IC, MONOMORPHIC, \
kNoExtraICState) \
- \
V(KeyedLoadIC_Initialize, KEYED_LOAD_IC, UNINITIALIZED, \
kNoExtraICState) \
V(KeyedLoadIC_PreMonomorphic, KEYED_LOAD_IC, PREMONOMORPHIC, \
@@ -144,25 +113,9 @@ enum BuiltinExtraArguments {
kNoExtraICState) \
V(KeyedLoadIC_IndexedInterceptor, KEYED_LOAD_IC, MONOMORPHIC, \
kNoExtraICState) \
- V(KeyedLoadIC_NonStrictArguments, KEYED_LOAD_IC, MONOMORPHIC, \
+ V(KeyedLoadIC_SloppyArguments, KEYED_LOAD_IC, MONOMORPHIC, \
kNoExtraICState) \
\
- V(StoreIC_Initialize, STORE_IC, UNINITIALIZED, \
- kNoExtraICState) \
- V(StoreIC_PreMonomorphic, STORE_IC, PREMONOMORPHIC, \
- kNoExtraICState) \
- V(StoreIC_Megamorphic, STORE_IC, MEGAMORPHIC, \
- kNoExtraICState) \
- V(StoreIC_Generic, STORE_IC, GENERIC, \
- kNoExtraICState) \
- V(StoreIC_Generic_Strict, STORE_IC, GENERIC, \
- StoreIC::kStrictModeState) \
- V(StoreIC_Initialize_Strict, STORE_IC, UNINITIALIZED, \
- StoreIC::kStrictModeState) \
- V(StoreIC_PreMonomorphic_Strict, STORE_IC, PREMONOMORPHIC, \
- StoreIC::kStrictModeState) \
- V(StoreIC_Megamorphic_Strict, STORE_IC, MEGAMORPHIC, \
- StoreIC::kStrictModeState) \
V(StoreIC_Setter_ForDeopt, STORE_IC, MONOMORPHIC, \
StoreIC::kStrictModeState) \
\
@@ -179,8 +132,8 @@ enum BuiltinExtraArguments {
StoreIC::kStrictModeState) \
V(KeyedStoreIC_Generic_Strict, KEYED_STORE_IC, GENERIC, \
StoreIC::kStrictModeState) \
- V(KeyedStoreIC_NonStrictArguments, KEYED_STORE_IC, MONOMORPHIC, \
- kNoExtraICState) \
+ V(KeyedStoreIC_SloppyArguments, KEYED_STORE_IC, MONOMORPHIC, \
+ kNoExtraICState) \
\
/* Uses KeyedLoadIC_Initialize; must be after in list. */ \
V(FunctionCall, BUILTIN, UNINITIALIZED, \
@@ -220,19 +173,18 @@ enum BuiltinExtraArguments {
V(LoadIC_Normal, LOAD_IC) \
V(StoreIC_Normal, STORE_IC)
-#ifdef ENABLE_DEBUGGER_SUPPORT
// Define list of builtins used by the debugger implemented in assembly.
#define BUILTIN_LIST_DEBUG_A(V) \
V(Return_DebugBreak, BUILTIN, DEBUG_STUB, \
DEBUG_BREAK) \
V(CallFunctionStub_DebugBreak, BUILTIN, DEBUG_STUB, \
DEBUG_BREAK) \
- V(CallFunctionStub_Recording_DebugBreak, BUILTIN, DEBUG_STUB, \
- DEBUG_BREAK) \
V(CallConstructStub_DebugBreak, BUILTIN, DEBUG_STUB, \
DEBUG_BREAK) \
V(CallConstructStub_Recording_DebugBreak, BUILTIN, DEBUG_STUB, \
DEBUG_BREAK) \
+ V(CallICStub_DebugBreak, CALL_IC, DEBUG_STUB, \
+ DEBUG_BREAK) \
V(LoadIC_DebugBreak, LOAD_IC, DEBUG_STUB, \
DEBUG_BREAK) \
V(KeyedLoadIC_DebugBreak, KEYED_LOAD_IC, DEBUG_STUB, \
@@ -249,9 +201,6 @@ enum BuiltinExtraArguments {
DEBUG_BREAK) \
V(FrameDropper_LiveEdit, BUILTIN, DEBUG_STUB, \
DEBUG_BREAK)
-#else
-#define BUILTIN_LIST_DEBUG_A(V)
-#endif
// Define list of builtins implemented in JavaScript.
#define BUILTINS_LIST_JS(V) \
@@ -283,7 +232,7 @@ enum BuiltinExtraArguments {
V(STRING_ADD_LEFT, 1) \
V(STRING_ADD_RIGHT, 1) \
V(APPLY_PREPARE, 1) \
- V(APPLY_OVERFLOW, 1)
+ V(STACK_OVERFLOW, 1)
class BuiltinFunctionTable;
class ObjectVisitor;
@@ -385,15 +334,14 @@ class Builtins {
static void Generate_Adaptor(MacroAssembler* masm,
CFunctionId id,
BuiltinExtraArguments extra_args);
- static void Generate_InRecompileQueue(MacroAssembler* masm);
- static void Generate_ConcurrentRecompile(MacroAssembler* masm);
- static void Generate_JSConstructStubCountdown(MacroAssembler* masm);
+ static void Generate_CompileUnoptimized(MacroAssembler* masm);
+ static void Generate_InOptimizationQueue(MacroAssembler* masm);
+ static void Generate_CompileOptimized(MacroAssembler* masm);
+ static void Generate_CompileOptimizedConcurrent(MacroAssembler* masm);
static void Generate_JSConstructStubGeneric(MacroAssembler* masm);
static void Generate_JSConstructStubApi(MacroAssembler* masm);
static void Generate_JSEntryTrampoline(MacroAssembler* masm);
static void Generate_JSConstructEntryTrampoline(MacroAssembler* masm);
- static void Generate_LazyCompile(MacroAssembler* masm);
- static void Generate_LazyRecompile(MacroAssembler* masm);
static void Generate_NotifyDeoptimized(MacroAssembler* masm);
static void Generate_NotifySoftDeoptimized(MacroAssembler* masm);
static void Generate_NotifyLazyDeoptimized(MacroAssembler* masm);
diff --git a/chromium/v8/src/bytecodes-irregexp.h b/chromium/v8/src/bytecodes-irregexp.h
index c7cc66e5275..04b9740acbe 100644
--- a/chromium/v8/src/bytecodes-irregexp.h
+++ b/chromium/v8/src/bytecodes-irregexp.h
@@ -1,29 +1,6 @@
// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
#ifndef V8_BYTECODES_IRREGEXP_H_
diff --git a/chromium/v8/src/cached-powers.cc b/chromium/v8/src/cached-powers.cc
index fbfaf26159a..f5720875e25 100644
--- a/chromium/v8/src/cached-powers.cc
+++ b/chromium/v8/src/cached-powers.cc
@@ -1,38 +1,15 @@
// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
#include <stdarg.h>
#include <limits.h>
#include <cmath>
-#include "../include/v8stdint.h"
-#include "globals.h"
-#include "checks.h"
-#include "cached-powers.h"
+#include "include/v8stdint.h"
+#include "src/globals.h"
+#include "src/checks.h"
+#include "src/cached-powers.h"
namespace v8 {
namespace internal {
@@ -133,7 +110,10 @@ static const CachedPower kCachedPowers[] = {
{V8_2PART_UINT64_C(0xaf87023b, 9bf0ee6b), 1066, 340},
};
+#ifdef DEBUG
static const int kCachedPowersLength = ARRAY_SIZE(kCachedPowers);
+#endif
+
static const int kCachedPowersOffset = 348; // -1 * the first decimal_exponent.
static const double kD_1_LOG2_10 = 0.30102999566398114; // 1 / lg(10)
// Difference between the decimal exponents in the table above.
@@ -149,7 +129,7 @@ void PowersOfTenCache::GetCachedPowerForBinaryExponentRange(
int kQ = DiyFp::kSignificandSize;
// Some platforms return incorrect sign on 0 result. We can ignore that here,
// which means we can avoid depending on platform.h.
- double k = ceil((min_exponent + kQ - 1) * kD_1_LOG2_10);
+ double k = std::ceil((min_exponent + kQ - 1) * kD_1_LOG2_10);
int foo = kCachedPowersOffset;
int index =
(foo + static_cast<int>(k) - 1) / kDecimalExponentDistance + 1;
diff --git a/chromium/v8/src/cached-powers.h b/chromium/v8/src/cached-powers.h
index 88df22260c3..dc681bfd80e 100644
--- a/chromium/v8/src/cached-powers.h
+++ b/chromium/v8/src/cached-powers.h
@@ -1,34 +1,11 @@
// Copyright 2010 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
#ifndef V8_CACHED_POWERS_H_
#define V8_CACHED_POWERS_H_
-#include "diy-fp.h"
+#include "src/diy-fp.h"
namespace v8 {
namespace internal {
diff --git a/chromium/v8/src/char-predicates-inl.h b/chromium/v8/src/char-predicates-inl.h
index dee9ccd3815..19d96db976b 100644
--- a/chromium/v8/src/char-predicates-inl.h
+++ b/chromium/v8/src/char-predicates-inl.h
@@ -1,34 +1,11 @@
// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
#ifndef V8_CHAR_PREDICATES_INL_H_
#define V8_CHAR_PREDICATES_INL_H_
-#include "char-predicates.h"
+#include "src/char-predicates.h"
namespace v8 {
namespace internal {
diff --git a/chromium/v8/src/char-predicates.h b/chromium/v8/src/char-predicates.h
index 767ad6513af..b7c5d42320f 100644
--- a/chromium/v8/src/char-predicates.h
+++ b/chromium/v8/src/char-predicates.h
@@ -1,34 +1,11 @@
// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
#ifndef V8_CHAR_PREDICATES_H_
#define V8_CHAR_PREDICATES_H_
-#include "unicode.h"
+#include "src/unicode.h"
namespace v8 {
namespace internal {
@@ -66,6 +43,27 @@ struct IdentifierPart {
}
};
+
+// WhiteSpace according to ECMA-262 5.1, 7.2.
+struct WhiteSpace {
+ static inline bool Is(uc32 c) {
+ return c == 0x0009 || // <TAB>
+ c == 0x000B || // <VT>
+ c == 0x000C || // <FF>
+ c == 0xFEFF || // <BOM>
+ // \u0020 and \u00A0 are included in unibrow::WhiteSpace.
+ unibrow::WhiteSpace::Is(c);
+ }
+};
+
+
+// WhiteSpace and LineTerminator according to ECMA-262 5.1, 7.2 and 7.3.
+struct WhiteSpaceOrLineTerminator {
+ static inline bool Is(uc32 c) {
+ return WhiteSpace::Is(c) || unibrow::LineTerminator::Is(c);
+ }
+};
+
} } // namespace v8::internal
#endif // V8_CHAR_PREDICATES_H_
diff --git a/chromium/v8/src/checks.cc b/chromium/v8/src/checks.cc
index a4514bf9650..23c9305f8cd 100644
--- a/chromium/v8/src/checks.cc
+++ b/chromium/v8/src/checks.cc
@@ -1,77 +1,80 @@
// Copyright 2006-2008 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
-#include "checks.h"
+#include "src/checks.h"
#if V8_LIBC_GLIBC || V8_OS_BSD
# include <cxxabi.h>
# include <execinfo.h>
+#elif V8_OS_QNX
+# include <backtrace.h>
#endif // V8_LIBC_GLIBC || V8_OS_BSD
#include <stdio.h>
-#include "platform.h"
-#include "v8.h"
+#include "src/platform.h"
+#include "src/v8.h"
+namespace v8 {
+namespace internal {
+
+intptr_t HeapObjectTagMask() { return kHeapObjectTagMask; }
// Attempts to dump a backtrace (if supported).
-static V8_INLINE void DumpBacktrace() {
+void DumpBacktrace() {
#if V8_LIBC_GLIBC || V8_OS_BSD
void* trace[100];
int size = backtrace(trace, ARRAY_SIZE(trace));
char** symbols = backtrace_symbols(trace, size);
- i::OS::PrintError("\n==== C stack trace ===============================\n\n");
+ OS::PrintError("\n==== C stack trace ===============================\n\n");
if (size == 0) {
- i::OS::PrintError("(empty)\n");
+ OS::PrintError("(empty)\n");
} else if (symbols == NULL) {
- i::OS::PrintError("(no symbols)\n");
+ OS::PrintError("(no symbols)\n");
} else {
for (int i = 1; i < size; ++i) {
- i::OS::PrintError("%2d: ", i);
+ OS::PrintError("%2d: ", i);
char mangled[201];
if (sscanf(symbols[i], "%*[^(]%*[(]%200[^)+]", mangled) == 1) { // NOLINT
int status;
size_t length;
char* demangled = abi::__cxa_demangle(mangled, NULL, &length, &status);
- i::OS::PrintError("%s\n", demangled != NULL ? demangled : mangled);
+ OS::PrintError("%s\n", demangled != NULL ? demangled : mangled);
free(demangled);
} else {
- i::OS::PrintError("??\n");
+ OS::PrintError("??\n");
}
}
}
free(symbols);
+#elif V8_OS_QNX
+ char out[1024];
+ bt_accessor_t acc;
+ bt_memmap_t memmap;
+ bt_init_accessor(&acc, BT_SELF);
+ bt_load_memmap(&acc, &memmap);
+ bt_sprn_memmap(&memmap, out, sizeof(out));
+ OS::PrintError(out);
+ bt_addr_t trace[100];
+ int size = bt_get_backtrace(&acc, trace, ARRAY_SIZE(trace));
+ OS::PrintError("\n==== C stack trace ===============================\n\n");
+ if (size == 0) {
+ OS::PrintError("(empty)\n");
+ } else {
+ bt_sprnf_addrs(&memmap, trace, size, const_cast<char*>("%a\n"),
+ out, sizeof(out), NULL);
+ OS::PrintError(out);
+ }
+ bt_unload_memmap(&memmap);
+ bt_release_accessor(&acc);
#endif // V8_LIBC_GLIBC || V8_OS_BSD
}
+} } // namespace v8::internal
+
// Contains protection against recursive calls (faults while handling faults).
extern "C" void V8_Fatal(const char* file, int line, const char* format, ...) {
- i::AllowHandleDereference allow_deref;
- i::AllowDeferredHandleDereference allow_deferred_deref;
fflush(stdout);
fflush(stderr);
i::OS::PrintError("\n\n#\n# Fatal error in %s, line %d\n# ", file, line);
@@ -80,7 +83,7 @@ extern "C" void V8_Fatal(const char* file, int line, const char* format, ...) {
i::OS::VPrintError(format, arguments);
va_end(arguments);
i::OS::PrintError("\n#\n");
- DumpBacktrace();
+ v8::internal::DumpBacktrace();
fflush(stderr);
i::OS::Abort();
}
@@ -114,21 +117,3 @@ void CheckNonEqualsHelper(const char* file,
unexpected_source, value_source, *value_str);
}
}
-
-
-void API_Fatal(const char* location, const char* format, ...) {
- i::OS::PrintError("\n#\n# Fatal error in %s\n# ", location);
- va_list arguments;
- va_start(arguments, format);
- i::OS::VPrintError(format, arguments);
- va_end(arguments);
- i::OS::PrintError("\n#\n\n");
- i::OS::Abort();
-}
-
-
-namespace v8 { namespace internal {
-
- intptr_t HeapObjectTagMask() { return kHeapObjectTagMask; }
-
-} } // namespace v8::internal
diff --git a/chromium/v8/src/checks.h b/chromium/v8/src/checks.h
index f7b145fc8a8..dd7a3955fb1 100644
--- a/chromium/v8/src/checks.h
+++ b/chromium/v8/src/checks.h
@@ -1,39 +1,18 @@
// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
#ifndef V8_CHECKS_H_
#define V8_CHECKS_H_
#include <string.h>
-#include "../include/v8stdint.h"
+#include "include/v8stdint.h"
+#include "src/base/build_config.h"
extern "C" void V8_Fatal(const char* file, int line, const char* format, ...);
+
// The FATAL, UNREACHABLE and UNIMPLEMENTED macros are useful during
// development, but they should not be relied on in the final product.
#ifdef DEBUG
@@ -51,6 +30,24 @@ extern "C" void V8_Fatal(const char* file, int line, const char* format, ...);
#define UNREACHABLE() ((void) 0)
#endif
+// Simulator specific helpers.
+// We can't use USE_SIMULATOR here because it isn't defined yet.
+#if V8_TARGET_ARCH_ARM64 && !V8_HOST_ARCH_ARM64
+ // TODO(all): If possible automatically prepend an indicator like
+ // UNIMPLEMENTED or LOCATION.
+ #define ASM_UNIMPLEMENTED(message) \
+ __ Debug(message, __LINE__, NO_PARAM)
+ #define ASM_UNIMPLEMENTED_BREAK(message) \
+ __ Debug(message, __LINE__, \
+ FLAG_ignore_asm_unimplemented_break ? NO_PARAM : BREAK)
+ #define ASM_LOCATION(message) \
+ __ Debug("LOCATION: " message, __LINE__, NO_PARAM)
+#else
+ #define ASM_UNIMPLEMENTED(message)
+ #define ASM_UNIMPLEMENTED_BREAK(message)
+ #define ASM_LOCATION(message)
+#endif
+
// The CHECK macro checks that the given condition is true; if not, it
// prints a message to stderr and aborts.
@@ -245,33 +242,6 @@ inline void CheckNonEqualsHelper(const char* file,
#define CHECK_LE(a, b) CHECK((a) <= (b))
-// Use C++11 static_assert if possible, which gives error
-// messages that are easier to understand on first sight.
-#if V8_HAS_CXX11_STATIC_ASSERT
-#define STATIC_CHECK(test) static_assert(test, #test)
-#else
-// This is inspired by the static assertion facility in boost. This
-// is pretty magical. If it causes you trouble on a platform you may
-// find a fix in the boost code.
-template <bool> class StaticAssertion;
-template <> class StaticAssertion<true> { };
-// This macro joins two tokens. If one of the tokens is a macro the
-// helper call causes it to be resolved before joining.
-#define SEMI_STATIC_JOIN(a, b) SEMI_STATIC_JOIN_HELPER(a, b)
-#define SEMI_STATIC_JOIN_HELPER(a, b) a##b
-// Causes an error during compilation of the condition is not
-// statically known to be true. It is formulated as a typedef so that
-// it can be used wherever a typedef can be used. Beware that this
-// actually causes each use to introduce a new defined type with a
-// name depending on the source line.
-template <int> class StaticAssertionHelper { };
-#define STATIC_CHECK(test) \
- typedef \
- StaticAssertionHelper<sizeof(StaticAssertion<static_cast<bool>((test))>)> \
- SEMI_STATIC_JOIN(__StaticAssertTypedef__, __LINE__) V8_UNUSED
-#endif
-
-
#ifdef DEBUG
#ifndef OPTIMIZED_DEBUG
#define ENABLE_SLOW_ASSERTS 1
@@ -288,8 +258,12 @@ extern bool FLAG_enable_slow_asserts;
#define SLOW_ASSERT(condition) ((void) 0)
const bool FLAG_enable_slow_asserts = false;
#endif
-} // namespace internal
-} // namespace v8
+
+// Exposed for making debugging easier (to see where your function is being
+// called, just add a call to DumpBacktrace).
+void DumpBacktrace();
+
+} } // namespace v8::internal
// The ASSERT macro is equivalent to CHECK except that it only
@@ -311,12 +285,6 @@ const bool FLAG_enable_slow_asserts = false;
#define ASSERT_LT(v1, v2) ((void) 0)
#define ASSERT_LE(v1, v2) ((void) 0)
#endif
-// Static asserts has no impact on runtime performance, so they can be
-// safely enabled in release mode. Moreover, the ((void) 0) expression
-// obeys different syntax rules than typedef's, e.g. it can't appear
-// inside class declaration, this leads to inconsistency between debug
-// and release compilation modes behavior.
-#define STATIC_ASSERT(test) STATIC_CHECK(test)
#define ASSERT_NOT_NULL(p) ASSERT_NE(NULL, p)
diff --git a/chromium/v8/src/circular-queue-inl.h b/chromium/v8/src/circular-queue-inl.h
index dfb70315781..2f06f6c496f 100644
--- a/chromium/v8/src/circular-queue-inl.h
+++ b/chromium/v8/src/circular-queue-inl.h
@@ -1,34 +1,11 @@
// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
#ifndef V8_CIRCULAR_QUEUE_INL_H_
#define V8_CIRCULAR_QUEUE_INL_H_
-#include "circular-queue.h"
+#include "src/circular-queue.h"
namespace v8 {
namespace internal {
@@ -47,8 +24,8 @@ SamplingCircularQueue<T, L>::~SamplingCircularQueue() {
template<typename T, unsigned L>
T* SamplingCircularQueue<T, L>::Peek() {
- MemoryBarrier();
- if (Acquire_Load(&dequeue_pos_->marker) == kFull) {
+ base::MemoryBarrier();
+ if (base::Acquire_Load(&dequeue_pos_->marker) == kFull) {
return &dequeue_pos_->record;
}
return NULL;
@@ -57,15 +34,15 @@ T* SamplingCircularQueue<T, L>::Peek() {
template<typename T, unsigned L>
void SamplingCircularQueue<T, L>::Remove() {
- Release_Store(&dequeue_pos_->marker, kEmpty);
+ base::Release_Store(&dequeue_pos_->marker, kEmpty);
dequeue_pos_ = Next(dequeue_pos_);
}
template<typename T, unsigned L>
T* SamplingCircularQueue<T, L>::StartEnqueue() {
- MemoryBarrier();
- if (Acquire_Load(&enqueue_pos_->marker) == kEmpty) {
+ base::MemoryBarrier();
+ if (base::Acquire_Load(&enqueue_pos_->marker) == kEmpty) {
return &enqueue_pos_->record;
}
return NULL;
@@ -74,7 +51,7 @@ T* SamplingCircularQueue<T, L>::StartEnqueue() {
template<typename T, unsigned L>
void SamplingCircularQueue<T, L>::FinishEnqueue() {
- Release_Store(&enqueue_pos_->marker, kFull);
+ base::Release_Store(&enqueue_pos_->marker, kFull);
enqueue_pos_ = Next(enqueue_pos_);
}
diff --git a/chromium/v8/src/circular-queue.h b/chromium/v8/src/circular-queue.h
index 94bc89e7dfb..c312c597c67 100644
--- a/chromium/v8/src/circular-queue.h
+++ b/chromium/v8/src/circular-queue.h
@@ -1,34 +1,12 @@
// Copyright 2010 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
#ifndef V8_CIRCULAR_QUEUE_H_
#define V8_CIRCULAR_QUEUE_H_
-#include "v8globals.h"
+#include "src/base/atomicops.h"
+#include "src/globals.h"
namespace v8 {
namespace internal {
@@ -72,7 +50,7 @@ class SamplingCircularQueue {
struct V8_ALIGNED(PROCESSOR_CACHE_LINE_SIZE) Entry {
Entry() : marker(kEmpty) {}
T record;
- Atomic32 marker;
+ base::Atomic32 marker;
};
Entry* Next(Entry* entry);
diff --git a/chromium/v8/src/code-stubs-hydrogen.cc b/chromium/v8/src/code-stubs-hydrogen.cc
index 96cfc378476..8342f9f7ea1 100644
--- a/chromium/v8/src/code-stubs-hydrogen.cc
+++ b/chromium/v8/src/code-stubs-hydrogen.cc
@@ -1,35 +1,13 @@
// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#include "code-stubs.h"
-#include "hydrogen.h"
-#include "lithium.h"
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#include "src/code-stubs.h"
+#include "src/field-index.h"
+#include "src/hydrogen.h"
+#include "src/lithium.h"
namespace v8 {
namespace internal {
@@ -60,7 +38,7 @@ class CodeStubGraphBuilderBase : public HGraphBuilder {
arguments_length_(NULL),
info_(stub, isolate),
context_(NULL) {
- descriptor_ = stub->GetInterfaceDescriptor(isolate);
+ descriptor_ = stub->GetInterfaceDescriptor();
parameters_.Reset(new HParameter*[descriptor_->register_param_count_]);
}
virtual bool BuildGraph();
@@ -81,23 +59,8 @@ class CodeStubGraphBuilderBase : public HGraphBuilder {
HContext* context() { return context_; }
Isolate* isolate() { return info_.isolate(); }
- class ArrayContextChecker {
- public:
- ArrayContextChecker(HGraphBuilder* builder, HValue* constructor,
- HValue* array_function)
- : checker_(builder) {
- checker_.If<HCompareObjectEqAndBranch, HValue*>(constructor,
- array_function);
- checker_.Then();
- }
-
- ~ArrayContextChecker() {
- checker_.ElseDeopt("Array constructor called from different context");
- checker_.End();
- }
- private:
- IfBuilder checker_;
- };
+ HLoadNamedField* BuildLoadNamedField(HValue* object,
+ FieldIndex index);
enum ArgumentClass {
NONE,
@@ -106,15 +69,25 @@ class CodeStubGraphBuilderBase : public HGraphBuilder {
};
HValue* BuildArrayConstructor(ElementsKind kind,
- ContextCheckMode context_mode,
AllocationSiteOverrideMode override_mode,
ArgumentClass argument_class);
HValue* BuildInternalArrayConstructor(ElementsKind kind,
ArgumentClass argument_class);
- void BuildInstallOptimizedCode(HValue* js_function, HValue* native_context,
- HValue* code_object);
+ // BuildCheckAndInstallOptimizedCode emits code to install the optimized
+ // function found in the optimized code map at map_index in js_function, if
+ // the function at map_index matches the given native_context. Builder is
+ // left in the "Then()" state after the install.
+ void BuildCheckAndInstallOptimizedCode(HValue* js_function,
+ HValue* native_context,
+ IfBuilder* builder,
+ HValue* optimized_map,
+ HValue* map_index);
void BuildInstallCode(HValue* js_function, HValue* shared_info);
+
+ HInstruction* LoadFromOptimizedCodeMap(HValue* optimized_map,
+ HValue* iterator,
+ int field_offset);
void BuildInstallFromOptimizedCodeMap(HValue* js_function,
HValue* shared_info,
HValue* native_context);
@@ -153,9 +126,9 @@ bool CodeStubGraphBuilderBase::BuildGraph() {
bool runtime_stack_params = descriptor_->stack_parameter_count_.is_valid();
HInstruction* stack_parameter_count = NULL;
for (int i = 0; i < param_count; ++i) {
- Representation r = descriptor_->IsParameterCountRegister(i)
- ? Representation::Integer32()
- : Representation::Tagged();
+ Representation r = descriptor_->register_param_representations_ == NULL
+ ? Representation::Tagged()
+ : descriptor_->register_param_representations_[i];
HParameter* param = Add<HParameter>(i, HParameter::REGISTER_PARAMETER, r);
start_environment->Bind(i, param);
parameters_[i] = param;
@@ -241,15 +214,15 @@ class CodeStubGraphBuilder: public CodeStubGraphBuilderBase {
};
-Handle<Code> HydrogenCodeStub::GenerateLightweightMissCode(Isolate* isolate) {
- Factory* factory = isolate->factory();
+Handle<Code> HydrogenCodeStub::GenerateLightweightMissCode() {
+ Factory* factory = isolate()->factory();
// Generate the new code.
- MacroAssembler masm(isolate, NULL, 256);
+ MacroAssembler masm(isolate(), NULL, 256);
{
// Update the static counter each time a new code stub is generated.
- isolate->counters()->code_stubs()->Increment();
+ isolate()->counters()->code_stubs()->Increment();
// Generate the code for the stub.
masm.set_generating_stub(true);
@@ -266,8 +239,7 @@ Handle<Code> HydrogenCodeStub::GenerateLightweightMissCode(Isolate* isolate) {
GetCodeKind(),
GetICState(),
GetExtraICState(),
- GetStubType(),
- GetStubFlags());
+ GetStubType());
Handle<Code> new_object = factory->NewCode(
desc, flags, masm.CodeObject(), NeedsImmovableCode());
return new_object;
@@ -275,13 +247,14 @@ Handle<Code> HydrogenCodeStub::GenerateLightweightMissCode(Isolate* isolate) {
template <class Stub>
-static Handle<Code> DoGenerateCode(Isolate* isolate, Stub* stub) {
+static Handle<Code> DoGenerateCode(Stub* stub) {
+ Isolate* isolate = stub->isolate();
CodeStub::Major major_key =
static_cast<HydrogenCodeStub*>(stub)->MajorKey();
CodeStubInterfaceDescriptor* descriptor =
isolate->code_stub_interface_descriptor(major_key);
if (descriptor->register_param_count_ < 0) {
- stub->InitializeInterfaceDescriptor(isolate, descriptor);
+ stub->InitializeInterfaceDescriptor(descriptor);
}
// If we are uninitialized we can use a light-weight stub to enter
@@ -289,7 +262,7 @@ static Handle<Code> DoGenerateCode(Isolate* isolate, Stub* stub) {
// stub-failure deopt mechanism.
if (stub->IsUninitialized() && descriptor->has_miss_handler()) {
ASSERT(!descriptor->stack_parameter_count_.is_valid());
- return stub->GenerateLightweightMissCode(isolate);
+ return stub->GenerateLightweightMissCode();
}
ElapsedTimer timer;
if (FLAG_profile_hydrogen_code_stub_compilation) {
@@ -300,7 +273,8 @@ static Handle<Code> DoGenerateCode(Isolate* isolate, Stub* stub) {
Handle<Code> code = chunk->Codegen();
if (FLAG_profile_hydrogen_code_stub_compilation) {
double ms = timer.Elapsed().InMillisecondsF();
- PrintF("[Lazy compilation of %s took %0.3f ms]\n", *stub->GetName(), ms);
+ PrintF("[Lazy compilation of %s took %0.3f ms]\n",
+ stub->GetName().get(), ms);
}
return code;
}
@@ -323,7 +297,7 @@ HValue* CodeStubGraphBuilder<ToNumberStub>::BuildCodeStub() {
// Convert the parameter to number using the builtin.
HValue* function = AddLoadJSBuiltin(Builtins::TO_NUMBER);
- Add<HPushArgument>(value);
+ Add<HPushArguments>(value);
Push(Add<HInvokeFunction>(function, 1));
if_number.End();
@@ -332,8 +306,8 @@ HValue* CodeStubGraphBuilder<ToNumberStub>::BuildCodeStub() {
}
-Handle<Code> ToNumberStub::GenerateCode(Isolate* isolate) {
- return DoGenerateCode(isolate, this);
+Handle<Code> ToNumberStub::GenerateCode() {
+ return DoGenerateCode(this);
}
@@ -341,12 +315,12 @@ template <>
HValue* CodeStubGraphBuilder<NumberToStringStub>::BuildCodeStub() {
info()->MarkAsSavesCallerDoubles();
HValue* number = GetParameter(NumberToStringStub::kNumber);
- return BuildNumberToString(number, handle(Type::Number(), isolate()));
+ return BuildNumberToString(number, Type::Number(zone()));
}
-Handle<Code> NumberToStringStub::GenerateCode(Isolate* isolate) {
- return DoGenerateCode(isolate, this);
+Handle<Code> NumberToStringStub::GenerateCode() {
+ return DoGenerateCode(this);
}
@@ -355,8 +329,10 @@ HValue* CodeStubGraphBuilder<FastCloneShallowArrayStub>::BuildCodeStub() {
Factory* factory = isolate()->factory();
HValue* undefined = graph()->GetConstantUndefined();
AllocationSiteMode alloc_site_mode = casted_stub()->allocation_site_mode();
- FastCloneShallowArrayStub::Mode mode = casted_stub()->mode();
- int length = casted_stub()->length();
+
+ // This stub is very performance sensitive, the generated code must be tuned
+ // so that it doesn't build and eager frame.
+ info()->MarkMustNotHaveEagerFrame();
HInstruction* allocation_site = Add<HLoadKeyed>(GetParameter(0),
GetParameter(1),
@@ -369,47 +345,42 @@ HValue* CodeStubGraphBuilder<FastCloneShallowArrayStub>::BuildCodeStub() {
HObjectAccess access = HObjectAccess::ForAllocationSiteOffset(
AllocationSite::kTransitionInfoOffset);
- HInstruction* boilerplate = Add<HLoadNamedField>(allocation_site, access);
- HValue* push_value;
- if (mode == FastCloneShallowArrayStub::CLONE_ANY_ELEMENTS) {
- HValue* elements = AddLoadElements(boilerplate);
-
- IfBuilder if_fixed_cow(this);
- if_fixed_cow.If<HCompareMap>(elements, factory->fixed_cow_array_map());
- if_fixed_cow.Then();
- push_value = BuildCloneShallowArray(boilerplate,
- allocation_site,
- alloc_site_mode,
- FAST_ELEMENTS,
- 0/*copy-on-write*/);
- environment()->Push(push_value);
- if_fixed_cow.Else();
-
- IfBuilder if_fixed(this);
- if_fixed.If<HCompareMap>(elements, factory->fixed_array_map());
- if_fixed.Then();
- push_value = BuildCloneShallowArray(boilerplate,
- allocation_site,
- alloc_site_mode,
- FAST_ELEMENTS,
- length);
- environment()->Push(push_value);
- if_fixed.Else();
- push_value = BuildCloneShallowArray(boilerplate,
- allocation_site,
- alloc_site_mode,
- FAST_DOUBLE_ELEMENTS,
- length);
- environment()->Push(push_value);
- } else {
- ElementsKind elements_kind = casted_stub()->ComputeElementsKind();
- push_value = BuildCloneShallowArray(boilerplate,
- allocation_site,
- alloc_site_mode,
- elements_kind,
- length);
- environment()->Push(push_value);
- }
+ HInstruction* boilerplate = Add<HLoadNamedField>(
+ allocation_site, static_cast<HValue*>(NULL), access);
+ HValue* elements = AddLoadElements(boilerplate);
+ HValue* capacity = AddLoadFixedArrayLength(elements);
+ IfBuilder zero_capacity(this);
+ zero_capacity.If<HCompareNumericAndBranch>(capacity, graph()->GetConstant0(),
+ Token::EQ);
+ zero_capacity.Then();
+ Push(BuildCloneShallowArrayEmpty(boilerplate,
+ allocation_site,
+ alloc_site_mode));
+ zero_capacity.Else();
+ IfBuilder if_fixed_cow(this);
+ if_fixed_cow.If<HCompareMap>(elements, factory->fixed_cow_array_map());
+ if_fixed_cow.Then();
+ Push(BuildCloneShallowArrayCow(boilerplate,
+ allocation_site,
+ alloc_site_mode,
+ FAST_ELEMENTS));
+ if_fixed_cow.Else();
+ IfBuilder if_fixed(this);
+ if_fixed.If<HCompareMap>(elements, factory->fixed_array_map());
+ if_fixed.Then();
+ Push(BuildCloneShallowArrayNonEmpty(boilerplate,
+ allocation_site,
+ alloc_site_mode,
+ FAST_ELEMENTS));
+
+ if_fixed.Else();
+ Push(BuildCloneShallowArrayNonEmpty(boilerplate,
+ allocation_site,
+ alloc_site_mode,
+ FAST_DOUBLE_ELEMENTS));
+ if_fixed.End();
+ if_fixed_cow.End();
+ zero_capacity.End();
checker.ElseDeopt("Uninitialized boilerplate literals");
checker.End();
@@ -418,8 +389,8 @@ HValue* CodeStubGraphBuilder<FastCloneShallowArrayStub>::BuildCodeStub() {
}
-Handle<Code> FastCloneShallowArrayStub::GenerateCode(Isolate* isolate) {
- return DoGenerateCode(isolate, this);
+Handle<Code> FastCloneShallowArrayStub::GenerateCode() {
+ return DoGenerateCode(this);
}
@@ -439,7 +410,8 @@ HValue* CodeStubGraphBuilder<FastCloneShallowObjectStub>::BuildCodeStub() {
HObjectAccess access = HObjectAccess::ForAllocationSiteOffset(
AllocationSite::kTransitionInfoOffset);
- HInstruction* boilerplate = Add<HLoadNamedField>(allocation_site, access);
+ HInstruction* boilerplate = Add<HLoadNamedField>(
+ allocation_site, static_cast<HValue*>(NULL), access);
int size = JSObject::kHeaderSize + casted_stub()->length() * kPointerSize;
int object_size = size;
@@ -448,9 +420,11 @@ HValue* CodeStubGraphBuilder<FastCloneShallowObjectStub>::BuildCodeStub() {
}
HValue* boilerplate_map = Add<HLoadNamedField>(
- boilerplate, HObjectAccess::ForMap());
+ boilerplate, static_cast<HValue*>(NULL),
+ HObjectAccess::ForMap());
HValue* boilerplate_size = Add<HLoadNamedField>(
- boilerplate_map, HObjectAccess::ForMapInstanceSize());
+ boilerplate_map, static_cast<HValue*>(NULL),
+ HObjectAccess::ForMapInstanceSize());
HValue* size_in_words = Add<HConstant>(object_size >> kPointerSizeLog2);
checker.If<HCompareNumericAndBranch>(boilerplate_size,
size_in_words, Token::EQ);
@@ -459,12 +433,13 @@ HValue* CodeStubGraphBuilder<FastCloneShallowObjectStub>::BuildCodeStub() {
HValue* size_in_bytes = Add<HConstant>(size);
HInstruction* object = Add<HAllocate>(size_in_bytes, HType::JSObject(),
- isolate()->heap()->GetPretenureMode(), JS_OBJECT_TYPE);
+ NOT_TENURED, JS_OBJECT_TYPE);
for (int i = 0; i < object_size; i += kPointerSize) {
- HObjectAccess access = HObjectAccess::ForJSObjectOffset(i);
- Add<HStoreNamedField>(object, access,
- Add<HLoadNamedField>(boilerplate, access));
+ HObjectAccess access = HObjectAccess::ForObservableJSObjectOffset(i);
+ Add<HStoreNamedField>(
+ object, access, Add<HLoadNamedField>(
+ boilerplate, static_cast<HValue*>(NULL), access));
}
ASSERT(FLAG_allocation_site_pretenuring || (size == object_size));
@@ -481,8 +456,8 @@ HValue* CodeStubGraphBuilder<FastCloneShallowObjectStub>::BuildCodeStub() {
}
-Handle<Code> FastCloneShallowObjectStub::GenerateCode(Isolate* isolate) {
- return DoGenerateCode(isolate, this);
+Handle<Code> FastCloneShallowObjectStub::GenerateCode() {
+ return DoGenerateCode(this);
}
@@ -509,26 +484,22 @@ HValue* CodeStubGraphBuilder<CreateAllocationSiteStub>::BuildCodeStub() {
AllocationSite::kNestedSiteOffset),
graph()->GetConstant0());
- // Pretenuring calculation fields.
- Add<HStoreNamedField>(object,
- HObjectAccess::ForAllocationSiteOffset(
- AllocationSite::kMementoFoundCountOffset),
- graph()->GetConstant0());
-
+ // Pretenuring calculation field.
Add<HStoreNamedField>(object,
HObjectAccess::ForAllocationSiteOffset(
- AllocationSite::kMementoCreateCountOffset),
+ AllocationSite::kPretenureDataOffset),
graph()->GetConstant0());
+ // Pretenuring memento creation count field.
Add<HStoreNamedField>(object,
HObjectAccess::ForAllocationSiteOffset(
- AllocationSite::kPretenureDecisionOffset),
+ AllocationSite::kPretenureCreateCountOffset),
graph()->GetConstant0());
// Store an empty fixed array for the code dependency.
HConstant* empty_fixed_array =
Add<HConstant>(isolate()->factory()->empty_fixed_array());
- HStoreNamedField* store = Add<HStoreNamedField>(
+ Add<HStoreNamedField>(
object,
HObjectAccess::ForAllocationSiteOffset(
AllocationSite::kDependentCodeOffset),
@@ -537,29 +508,31 @@ HValue* CodeStubGraphBuilder<CreateAllocationSiteStub>::BuildCodeStub() {
// Link the object to the allocation site list
HValue* site_list = Add<HConstant>(
ExternalReference::allocation_sites_list_address(isolate()));
- HValue* site = Add<HLoadNamedField>(site_list,
- HObjectAccess::ForAllocationSiteList());
- store = Add<HStoreNamedField>(object,
+ HValue* site = Add<HLoadNamedField>(
+ site_list, static_cast<HValue*>(NULL),
+ HObjectAccess::ForAllocationSiteList());
+ // TODO(mvstanton): This is a store to a weak pointer, which we may want to
+ // mark as such in order to skip the write barrier, once we have a unified
+ // system for weakness. For now we decided to keep it like this because having
+ // an initial write barrier backed store makes this pointer strong until the
+ // next GC, and allocation sites are designed to survive several GCs anyway.
+ Add<HStoreNamedField>(
+ object,
HObjectAccess::ForAllocationSiteOffset(AllocationSite::kWeakNextOffset),
site);
- store->SkipWriteBarrier();
Add<HStoreNamedField>(site_list, HObjectAccess::ForAllocationSiteList(),
object);
- // We use a hammer (SkipWriteBarrier()) to indicate that we know the input
- // cell is really a Cell, and so no write barrier is needed.
- // TODO(mvstanton): Add a debug_code check to verify the input cell is really
- // a cell. (perhaps with a new instruction, HAssert).
- HInstruction* cell = GetParameter(0);
- HObjectAccess access = HObjectAccess::ForCellValue();
- store = Add<HStoreNamedField>(cell, access, object);
- store->SkipWriteBarrier();
- return cell;
+ HInstruction* feedback_vector = GetParameter(0);
+ HInstruction* slot = GetParameter(1);
+ Add<HStoreKeyed>(feedback_vector, slot, object, FAST_ELEMENTS,
+ INITIALIZING_STORE);
+ return feedback_vector;
}
-Handle<Code> CreateAllocationSiteStub::GenerateCode(Isolate* isolate) {
- return DoGenerateCode(isolate, this);
+Handle<Code> CreateAllocationSiteStub::GenerateCode() {
+ return DoGenerateCode(this);
}
@@ -568,70 +541,59 @@ HValue* CodeStubGraphBuilder<KeyedLoadFastElementStub>::BuildCodeStub() {
HInstruction* load = BuildUncheckedMonomorphicElementAccess(
GetParameter(0), GetParameter(1), NULL,
casted_stub()->is_js_array(), casted_stub()->elements_kind(),
- false, NEVER_RETURN_HOLE, STANDARD_STORE);
+ LOAD, NEVER_RETURN_HOLE, STANDARD_STORE);
return load;
}
-Handle<Code> KeyedLoadFastElementStub::GenerateCode(Isolate* isolate) {
- return DoGenerateCode(isolate, this);
+Handle<Code> KeyedLoadFastElementStub::GenerateCode() {
+ return DoGenerateCode(this);
}
-template<>
-HValue* CodeStubGraphBuilder<LoadFieldStub>::BuildCodeStub() {
- Representation rep = casted_stub()->representation();
- HObjectAccess access = casted_stub()->is_inobject() ?
- HObjectAccess::ForJSObjectOffset(casted_stub()->offset(), rep) :
- HObjectAccess::ForBackingStoreOffset(casted_stub()->offset(), rep);
- return AddLoadNamedField(GetParameter(0), access);
-}
-
-
-Handle<Code> LoadFieldStub::GenerateCode(Isolate* isolate) {
- return DoGenerateCode(isolate, this);
+HLoadNamedField* CodeStubGraphBuilderBase::BuildLoadNamedField(
+ HValue* object, FieldIndex index) {
+ Representation representation = index.is_double()
+ ? Representation::Double()
+ : Representation::Tagged();
+ int offset = index.offset();
+ HObjectAccess access = index.is_inobject()
+ ? HObjectAccess::ForObservableJSObjectOffset(offset, representation)
+ : HObjectAccess::ForBackingStoreOffset(offset, representation);
+ if (index.is_double()) {
+ // Load the heap number.
+ object = Add<HLoadNamedField>(
+ object, static_cast<HValue*>(NULL),
+ access.WithRepresentation(Representation::Tagged()));
+ // Load the double value from it.
+ access = HObjectAccess::ForHeapNumberValue();
+ }
+ return Add<HLoadNamedField>(object, static_cast<HValue*>(NULL), access);
}
template<>
-HValue* CodeStubGraphBuilder<KeyedLoadFieldStub>::BuildCodeStub() {
- Representation rep = casted_stub()->representation();
- HObjectAccess access = casted_stub()->is_inobject() ?
- HObjectAccess::ForJSObjectOffset(casted_stub()->offset(), rep) :
- HObjectAccess::ForBackingStoreOffset(casted_stub()->offset(), rep);
- return AddLoadNamedField(GetParameter(0), access);
+HValue* CodeStubGraphBuilder<LoadFieldStub>::BuildCodeStub() {
+ return BuildLoadNamedField(GetParameter(0), casted_stub()->index());
}
-Handle<Code> KeyedLoadFieldStub::GenerateCode(Isolate* isolate) {
- return DoGenerateCode(isolate, this);
+Handle<Code> LoadFieldStub::GenerateCode() {
+ return DoGenerateCode(this);
}
template<>
-HValue* CodeStubGraphBuilder<KeyedArrayCallStub>::BuildCodeStub() {
- int argc = casted_stub()->argc() + 1;
- info()->set_parameter_count(argc);
-
- HValue* receiver = Add<HParameter>(1);
- BuildCheckHeapObject(receiver);
-
- // Load the expected initial array map from the context.
- JSArrayBuilder array_builder(this, casted_stub()->elements_kind());
- HValue* map = array_builder.EmitMapCode();
-
- HValue* checked_receiver = Add<HCheckMapValue>(receiver, map);
-
- HValue* function = BuildUncheckedMonomorphicElementAccess(
- checked_receiver, GetParameter(0),
- NULL, true, casted_stub()->elements_kind(),
- false, NEVER_RETURN_HOLE, STANDARD_STORE);
- return Add<HCallFunction>(function, argc, TAIL_CALL);
+HValue* CodeStubGraphBuilder<StringLengthStub>::BuildCodeStub() {
+ HValue* string = BuildLoadNamedField(GetParameter(0),
+ FieldIndex::ForInObjectOffset(JSValue::kValueOffset));
+ return BuildLoadNamedField(string,
+ FieldIndex::ForInObjectOffset(String::kLengthOffset));
}
-Handle<Code> KeyedArrayCallStub::GenerateCode(Isolate* isolate) {
- return DoGenerateCode(isolate, this);
+Handle<Code> StringLengthStub::GenerateCode() {
+ return DoGenerateCode(this);
}
@@ -640,14 +602,14 @@ HValue* CodeStubGraphBuilder<KeyedStoreFastElementStub>::BuildCodeStub() {
BuildUncheckedMonomorphicElementAccess(
GetParameter(0), GetParameter(1), GetParameter(2),
casted_stub()->is_js_array(), casted_stub()->elements_kind(),
- true, NEVER_RETURN_HOLE, casted_stub()->store_mode());
+ STORE, NEVER_RETURN_HOLE, casted_stub()->store_mode());
return GetParameter(2);
}
-Handle<Code> KeyedStoreFastElementStub::GenerateCode(Isolate* isolate) {
- return DoGenerateCode(isolate, this);
+Handle<Code> KeyedStoreFastElementStub::GenerateCode() {
+ return DoGenerateCode(this);
}
@@ -659,36 +621,30 @@ HValue* CodeStubGraphBuilder<TransitionElementsKindStub>::BuildCodeStub() {
GetParameter(1),
casted_stub()->from_kind(),
casted_stub()->to_kind(),
- true);
+ casted_stub()->is_js_array());
return GetParameter(0);
}
-Handle<Code> TransitionElementsKindStub::GenerateCode(Isolate* isolate) {
- return DoGenerateCode(isolate, this);
+Handle<Code> TransitionElementsKindStub::GenerateCode() {
+ return DoGenerateCode(this);
}
HValue* CodeStubGraphBuilderBase::BuildArrayConstructor(
ElementsKind kind,
- ContextCheckMode context_mode,
AllocationSiteOverrideMode override_mode,
ArgumentClass argument_class) {
HValue* constructor = GetParameter(ArrayConstructorStubBase::kConstructor);
- if (context_mode == CONTEXT_CHECK_REQUIRED) {
- HInstruction* array_function = BuildGetArrayFunction();
- ArrayContextChecker checker(this, constructor, array_function);
- }
-
- HValue* property_cell = GetParameter(ArrayConstructorStubBase::kPropertyCell);
- // Walk through the property cell to the AllocationSite
- HValue* alloc_site = Add<HLoadNamedField>(property_cell,
- HObjectAccess::ForCellValue());
+ HValue* alloc_site = GetParameter(ArrayConstructorStubBase::kAllocationSite);
JSArrayBuilder array_builder(this, kind, alloc_site, constructor,
override_mode);
HValue* result = NULL;
switch (argument_class) {
case NONE:
+ // This stub is very performance sensitive, the generated code must be
+ // tuned so that it doesn't build and eager frame.
+ info()->MarkMustNotHaveEagerFrame();
result = array_builder.AllocateEmptyArray();
break;
case SINGLE:
@@ -712,6 +668,9 @@ HValue* CodeStubGraphBuilderBase::BuildInternalArrayConstructor(
HValue* result = NULL;
switch (argument_class) {
case NONE:
+ // This stub is very performance sensitive, the generated code must be
+ // tuned so that it doesn't build and eager frame.
+ info()->MarkMustNotHaveEagerFrame();
result = array_builder.AllocateEmptyArray();
break;
case SINGLE:
@@ -762,6 +721,7 @@ HValue* CodeStubGraphBuilderBase::BuildArrayNArgumentsConstructor(
? JSArrayBuilder::FILL_WITH_HOLE
: JSArrayBuilder::DONT_FILL_WITH_HOLE;
HValue* new_object = array_builder->AllocateArray(checked_length,
+ max_alloc_length,
checked_length,
fill_mode);
HValue* elements = array_builder->GetElementsLocation();
@@ -786,14 +746,13 @@ HValue* CodeStubGraphBuilderBase::BuildArrayNArgumentsConstructor(
template <>
HValue* CodeStubGraphBuilder<ArrayNoArgumentConstructorStub>::BuildCodeStub() {
ElementsKind kind = casted_stub()->elements_kind();
- ContextCheckMode context_mode = casted_stub()->context_mode();
AllocationSiteOverrideMode override_mode = casted_stub()->override_mode();
- return BuildArrayConstructor(kind, context_mode, override_mode, NONE);
+ return BuildArrayConstructor(kind, override_mode, NONE);
}
-Handle<Code> ArrayNoArgumentConstructorStub::GenerateCode(Isolate* isolate) {
- return DoGenerateCode(isolate, this);
+Handle<Code> ArrayNoArgumentConstructorStub::GenerateCode() {
+ return DoGenerateCode(this);
}
@@ -801,29 +760,26 @@ template <>
HValue* CodeStubGraphBuilder<ArraySingleArgumentConstructorStub>::
BuildCodeStub() {
ElementsKind kind = casted_stub()->elements_kind();
- ContextCheckMode context_mode = casted_stub()->context_mode();
AllocationSiteOverrideMode override_mode = casted_stub()->override_mode();
- return BuildArrayConstructor(kind, context_mode, override_mode, SINGLE);
+ return BuildArrayConstructor(kind, override_mode, SINGLE);
}
-Handle<Code> ArraySingleArgumentConstructorStub::GenerateCode(
- Isolate* isolate) {
- return DoGenerateCode(isolate, this);
+Handle<Code> ArraySingleArgumentConstructorStub::GenerateCode() {
+ return DoGenerateCode(this);
}
template <>
HValue* CodeStubGraphBuilder<ArrayNArgumentsConstructorStub>::BuildCodeStub() {
ElementsKind kind = casted_stub()->elements_kind();
- ContextCheckMode context_mode = casted_stub()->context_mode();
AllocationSiteOverrideMode override_mode = casted_stub()->override_mode();
- return BuildArrayConstructor(kind, context_mode, override_mode, MULTIPLE);
+ return BuildArrayConstructor(kind, override_mode, MULTIPLE);
}
-Handle<Code> ArrayNArgumentsConstructorStub::GenerateCode(Isolate* isolate) {
- return DoGenerateCode(isolate, this);
+Handle<Code> ArrayNArgumentsConstructorStub::GenerateCode() {
+ return DoGenerateCode(this);
}
@@ -835,9 +791,8 @@ HValue* CodeStubGraphBuilder<InternalArrayNoArgumentConstructorStub>::
}
-Handle<Code> InternalArrayNoArgumentConstructorStub::GenerateCode(
- Isolate* isolate) {
- return DoGenerateCode(isolate, this);
+Handle<Code> InternalArrayNoArgumentConstructorStub::GenerateCode() {
+ return DoGenerateCode(this);
}
@@ -849,9 +804,8 @@ HValue* CodeStubGraphBuilder<InternalArraySingleArgumentConstructorStub>::
}
-Handle<Code> InternalArraySingleArgumentConstructorStub::GenerateCode(
- Isolate* isolate) {
- return DoGenerateCode(isolate, this);
+Handle<Code> InternalArraySingleArgumentConstructorStub::GenerateCode() {
+ return DoGenerateCode(this);
}
@@ -863,9 +817,8 @@ HValue* CodeStubGraphBuilder<InternalArrayNArgumentsConstructorStub>::
}
-Handle<Code> InternalArrayNArgumentsConstructorStub::GenerateCode(
- Isolate* isolate) {
- return DoGenerateCode(isolate, this);
+Handle<Code> InternalArrayNArgumentsConstructorStub::GenerateCode() {
+ return DoGenerateCode(this);
}
@@ -875,7 +828,7 @@ HValue* CodeStubGraphBuilder<CompareNilICStub>::BuildCodeInitializedStub() {
CompareNilICStub* stub = casted_stub();
HIfContinuation continuation;
Handle<Map> sentinel_map(isolate->heap()->meta_map());
- Handle<Type> type = stub->GetType(isolate, sentinel_map);
+ Type* type = stub->GetType(zone(), sentinel_map);
BuildCompareNil(GetParameter(0), type, &continuation);
IfBuilder if_nil(this, &continuation);
if_nil.Then();
@@ -890,8 +843,8 @@ HValue* CodeStubGraphBuilder<CompareNilICStub>::BuildCodeInitializedStub() {
}
-Handle<Code> CompareNilICStub::GenerateCode(Isolate* isolate) {
- return DoGenerateCode(isolate, this);
+Handle<Code> CompareNilICStub::GenerateCode() {
+ return DoGenerateCode(this);
}
@@ -902,14 +855,15 @@ HValue* CodeStubGraphBuilder<BinaryOpICStub>::BuildCodeInitializedStub() {
HValue* left = GetParameter(BinaryOpICStub::kLeft);
HValue* right = GetParameter(BinaryOpICStub::kRight);
- Handle<Type> left_type = state.GetLeftType(isolate());
- Handle<Type> right_type = state.GetRightType(isolate());
- Handle<Type> result_type = state.GetResultType(isolate());
+ Type* left_type = state.GetLeftType(zone());
+ Type* right_type = state.GetRightType(zone());
+ Type* result_type = state.GetResultType(zone());
ASSERT(!left_type->Is(Type::None()) && !right_type->Is(Type::None()) &&
(state.HasSideEffects() || !result_type->Is(Type::None())));
HValue* result = NULL;
+ HAllocationMode allocation_mode(NOT_TENURED);
if (state.op() == Token::ADD &&
(left_type->Maybe(Type::String()) || right_type->Maybe(Type::String())) &&
!left_type->Is(Type::String()) && !right_type->Is(Type::String())) {
@@ -922,15 +876,16 @@ HValue* CodeStubGraphBuilder<BinaryOpICStub>::BuildCodeInitializedStub() {
{
Push(BuildBinaryOperation(
state.op(), left, right,
- handle(Type::String(), isolate()), right_type,
- result_type, state.fixed_right_arg()));
+ Type::String(zone()), right_type,
+ result_type, state.fixed_right_arg(),
+ allocation_mode));
}
if_leftisstring.Else();
{
Push(BuildBinaryOperation(
state.op(), left, right,
left_type, right_type, result_type,
- state.fixed_right_arg()));
+ state.fixed_right_arg(), allocation_mode));
}
if_leftisstring.End();
result = Pop();
@@ -941,15 +896,16 @@ HValue* CodeStubGraphBuilder<BinaryOpICStub>::BuildCodeInitializedStub() {
{
Push(BuildBinaryOperation(
state.op(), left, right,
- left_type, handle(Type::String(), isolate()),
- result_type, state.fixed_right_arg()));
+ left_type, Type::String(zone()),
+ result_type, state.fixed_right_arg(),
+ allocation_mode));
}
if_rightisstring.Else();
{
Push(BuildBinaryOperation(
state.op(), left, right,
left_type, right_type, result_type,
- state.fixed_right_arg()));
+ state.fixed_right_arg(), allocation_mode));
}
if_rightisstring.End();
result = Pop();
@@ -958,26 +914,12 @@ HValue* CodeStubGraphBuilder<BinaryOpICStub>::BuildCodeInitializedStub() {
result = BuildBinaryOperation(
state.op(), left, right,
left_type, right_type, result_type,
- state.fixed_right_arg());
+ state.fixed_right_arg(), allocation_mode);
}
// If we encounter a generic argument, the number conversion is
// observable, thus we cannot afford to bail out after the fact.
if (!state.HasSideEffects()) {
- if (result_type->Is(Type::Smi())) {
- if (state.op() == Token::SHR) {
- // TODO(olivf) Replace this by a SmiTagU Instruction.
- // 0x40000000: this number would convert to negative when interpreting
- // the register as signed value;
- IfBuilder if_of(this);
- if_of.IfNot<HCompareNumericAndBranch>(result,
- Add<HConstant>(static_cast<int>(SmiValuesAre32Bits()
- ? 0x80000000 : 0x40000000)), Token::EQ_STRICT);
- if_of.Then();
- if_of.ElseDeopt("UInt->Smi oveflow");
- if_of.End();
- }
- }
result = EnforceNumberType(result, result_type);
}
@@ -986,7 +928,7 @@ HValue* CodeStubGraphBuilder<BinaryOpICStub>::BuildCodeInitializedStub() {
if (state.CanReuseDoubleBox()) {
HValue* operand = (state.mode() == OVERWRITE_LEFT) ? left : right;
IfBuilder if_heap_number(this);
- if_heap_number.IfNot<HIsSmiAndBranch>(operand);
+ if_heap_number.If<HHasInstanceTypeAndBranch>(operand, HEAP_NUMBER_TYPE);
if_heap_number.Then();
Add<HStoreNamedField>(operand, HObjectAccess::ForHeapNumberValue(), result);
Push(operand);
@@ -1000,19 +942,44 @@ HValue* CodeStubGraphBuilder<BinaryOpICStub>::BuildCodeInitializedStub() {
}
-Handle<Code> BinaryOpICStub::GenerateCode(Isolate* isolate) {
- return DoGenerateCode(isolate, this);
+Handle<Code> BinaryOpICStub::GenerateCode() {
+ return DoGenerateCode(this);
}
template <>
-HValue* CodeStubGraphBuilder<NewStringAddStub>::BuildCodeInitializedStub() {
- NewStringAddStub* stub = casted_stub();
+HValue* CodeStubGraphBuilder<BinaryOpWithAllocationSiteStub>::BuildCodeStub() {
+ BinaryOpIC::State state = casted_stub()->state();
+
+ HValue* allocation_site = GetParameter(
+ BinaryOpWithAllocationSiteStub::kAllocationSite);
+ HValue* left = GetParameter(BinaryOpWithAllocationSiteStub::kLeft);
+ HValue* right = GetParameter(BinaryOpWithAllocationSiteStub::kRight);
+
+ Type* left_type = state.GetLeftType(zone());
+ Type* right_type = state.GetRightType(zone());
+ Type* result_type = state.GetResultType(zone());
+ HAllocationMode allocation_mode(allocation_site);
+
+ return BuildBinaryOperation(state.op(), left, right,
+ left_type, right_type, result_type,
+ state.fixed_right_arg(), allocation_mode);
+}
+
+
+Handle<Code> BinaryOpWithAllocationSiteStub::GenerateCode() {
+ return DoGenerateCode(this);
+}
+
+
+template <>
+HValue* CodeStubGraphBuilder<StringAddStub>::BuildCodeInitializedStub() {
+ StringAddStub* stub = casted_stub();
StringAddFlags flags = stub->flags();
PretenureFlag pretenure_flag = stub->pretenure_flag();
- HValue* left = GetParameter(NewStringAddStub::kLeft);
- HValue* right = GetParameter(NewStringAddStub::kRight);
+ HValue* left = GetParameter(StringAddStub::kLeft);
+ HValue* right = GetParameter(StringAddStub::kRight);
// Make sure that both arguments are strings if not known in advance.
if ((flags & STRING_ADD_CHECK_LEFT) == STRING_ADD_CHECK_LEFT) {
@@ -1022,12 +989,12 @@ HValue* CodeStubGraphBuilder<NewStringAddStub>::BuildCodeInitializedStub() {
right = BuildCheckString(right);
}
- return BuildStringAdd(left, right, pretenure_flag);
+ return BuildStringAdd(left, right, HAllocationMode(pretenure_flag));
}
-Handle<Code> NewStringAddStub::GenerateCode(Isolate* isolate) {
- return DoGenerateCode(isolate, this);
+Handle<Code> StringAddStub::GenerateCode() {
+ return DoGenerateCode(this);
}
@@ -1045,8 +1012,8 @@ HValue* CodeStubGraphBuilder<ToBooleanStub>::BuildCodeInitializedStub() {
}
-Handle<Code> ToBooleanStub::GenerateCode(Isolate* isolate) {
- return DoGenerateCode(isolate, this);
+Handle<Code> ToBooleanStub::GenerateCode() {
+ return DoGenerateCode(this);
}
@@ -1058,17 +1025,21 @@ HValue* CodeStubGraphBuilder<StoreGlobalStub>::BuildCodeInitializedStub() {
Handle<PropertyCell> placeholder_cell =
isolate()->factory()->NewPropertyCell(placeholer_value);
- HParameter* receiver = GetParameter(0);
HParameter* value = GetParameter(2);
- // Check that the map of the global has not changed: use a placeholder map
- // that will be replaced later with the global object's map.
- Handle<Map> placeholder_map = isolate()->factory()->meta_map();
- Add<HCheckMaps>(receiver, placeholder_map, top_info());
+ if (stub->check_global()) {
+ // Check that the map of the global has not changed: use a placeholder map
+ // that will be replaced later with the global object's map.
+ Handle<Map> placeholder_map = isolate()->factory()->meta_map();
+ HValue* global = Add<HConstant>(
+ StoreGlobalStub::global_placeholder(isolate()));
+ Add<HCheckMaps>(global, placeholder_map);
+ }
HValue* cell = Add<HConstant>(placeholder_cell);
HObjectAccess access(HObjectAccess::ForCellPayload(isolate()));
- HValue* cell_contents = Add<HLoadNamedField>(cell, access);
+ HValue* cell_contents = Add<HLoadNamedField>(
+ cell, static_cast<HValue*>(NULL), access);
if (stub->is_constant()) {
IfBuilder builder(this);
@@ -1094,8 +1065,8 @@ HValue* CodeStubGraphBuilder<StoreGlobalStub>::BuildCodeInitializedStub() {
}
-Handle<Code> StoreGlobalStub::GenerateCode(Isolate* isolate) {
- return DoGenerateCode(isolate, this);
+Handle<Code> StoreGlobalStub::GenerateCode() {
+ return DoGenerateCode(this);
}
@@ -1120,7 +1091,7 @@ HValue* CodeStubGraphBuilder<ElementsTransitionAndStoreStub>::BuildCodeStub() {
BuildUncheckedMonomorphicElementAccess(object, key, value,
casted_stub()->is_jsarray(),
casted_stub()->to_kind(),
- true, ALLOW_RETURN_HOLE,
+ STORE, ALLOW_RETURN_HOLE,
casted_stub()->store_mode());
}
@@ -1128,15 +1099,32 @@ HValue* CodeStubGraphBuilder<ElementsTransitionAndStoreStub>::BuildCodeStub() {
}
-Handle<Code> ElementsTransitionAndStoreStub::GenerateCode(Isolate* isolate) {
- return DoGenerateCode(isolate, this);
+Handle<Code> ElementsTransitionAndStoreStub::GenerateCode() {
+ return DoGenerateCode(this);
}
-void CodeStubGraphBuilderBase::BuildInstallOptimizedCode(
+void CodeStubGraphBuilderBase::BuildCheckAndInstallOptimizedCode(
HValue* js_function,
HValue* native_context,
- HValue* code_object) {
+ IfBuilder* builder,
+ HValue* optimized_map,
+ HValue* map_index) {
+ HValue* osr_ast_id_none = Add<HConstant>(BailoutId::None().ToInt());
+ HValue* context_slot = LoadFromOptimizedCodeMap(
+ optimized_map, map_index, SharedFunctionInfo::kContextOffset);
+ HValue* osr_ast_slot = LoadFromOptimizedCodeMap(
+ optimized_map, map_index, SharedFunctionInfo::kOsrAstIdOffset);
+ builder->If<HCompareObjectEqAndBranch>(native_context,
+ context_slot);
+ builder->AndIf<HCompareObjectEqAndBranch>(osr_ast_slot, osr_ast_id_none);
+ builder->Then();
+ HValue* code_object = LoadFromOptimizedCodeMap(optimized_map,
+ map_index, SharedFunctionInfo::kCachedCodeOffset);
+ // and the literals
+ HValue* literals = LoadFromOptimizedCodeMap(optimized_map,
+ map_index, SharedFunctionInfo::kLiteralsOffset);
+
Counters* counters = isolate()->counters();
AddIncrementCounter(counters->fast_new_closure_install_optimized());
@@ -1144,9 +1132,12 @@ void CodeStubGraphBuilderBase::BuildInstallOptimizedCode(
// map and either unmangle them on marking or do nothing as the whole map is
// discarded on major GC anyway.
Add<HStoreCodeEntry>(js_function, code_object);
+ Add<HStoreNamedField>(js_function, HObjectAccess::ForLiteralsPointer(),
+ literals);
// Now link a function into a list of optimized functions.
- HValue* optimized_functions_list = Add<HLoadNamedField>(native_context,
+ HValue* optimized_functions_list = Add<HLoadNamedField>(
+ native_context, static_cast<HValue*>(NULL),
HObjectAccess::ForContextSlot(Context::OPTIMIZED_FUNCTIONS_LIST));
Add<HStoreNamedField>(js_function,
HObjectAccess::ForNextFunctionLinkPointer(),
@@ -1156,6 +1147,8 @@ void CodeStubGraphBuilderBase::BuildInstallOptimizedCode(
Add<HStoreNamedField>(native_context,
HObjectAccess::ForContextSlot(Context::OPTIMIZED_FUNCTIONS_LIST),
js_function);
+
+ // The builder continues in the "then" after this function.
}
@@ -1164,19 +1157,38 @@ void CodeStubGraphBuilderBase::BuildInstallCode(HValue* js_function,
Add<HStoreNamedField>(js_function,
HObjectAccess::ForNextFunctionLinkPointer(),
graph()->GetConstantUndefined());
- HValue* code_object = Add<HLoadNamedField>(shared_info,
- HObjectAccess::ForCodeOffset());
+ HValue* code_object = Add<HLoadNamedField>(
+ shared_info, static_cast<HValue*>(NULL), HObjectAccess::ForCodeOffset());
Add<HStoreCodeEntry>(js_function, code_object);
}
+HInstruction* CodeStubGraphBuilderBase::LoadFromOptimizedCodeMap(
+ HValue* optimized_map,
+ HValue* iterator,
+ int field_offset) {
+ // By making sure to express these loads in the form [<hvalue> + constant]
+ // the keyed load can be hoisted.
+ ASSERT(field_offset >= 0 && field_offset < SharedFunctionInfo::kEntryLength);
+ HValue* field_slot = iterator;
+ if (field_offset > 0) {
+ HValue* field_offset_value = Add<HConstant>(field_offset);
+ field_slot = AddUncasted<HAdd>(iterator, field_offset_value);
+ }
+ HInstruction* field_entry = Add<HLoadKeyed>(optimized_map, field_slot,
+ static_cast<HValue*>(NULL), FAST_ELEMENTS);
+ return field_entry;
+}
+
+
void CodeStubGraphBuilderBase::BuildInstallFromOptimizedCodeMap(
HValue* js_function,
HValue* shared_info,
HValue* native_context) {
Counters* counters = isolate()->counters();
IfBuilder is_optimized(this);
- HInstruction* optimized_map = Add<HLoadNamedField>(shared_info,
+ HInstruction* optimized_map = Add<HLoadNamedField>(
+ shared_info, static_cast<HValue*>(NULL),
HObjectAccess::ForOptimizedCodeMap());
HValue* null_constant = Add<HConstant>(0);
is_optimized.If<HCompareObjectEqAndBranch>(optimized_map, null_constant);
@@ -1190,76 +1202,56 @@ void CodeStubGraphBuilderBase::BuildInstallFromOptimizedCodeMap(
// optimized_map points to fixed array of 3-element entries
// (native context, optimized code, literals).
// Map must never be empty, so check the first elements.
- Label install_optimized;
- HValue* first_context_slot = Add<HLoadNamedField>(optimized_map,
- HObjectAccess::ForFirstContextSlot());
+ HValue* first_entry_index =
+ Add<HConstant>(SharedFunctionInfo::kEntriesStart);
IfBuilder already_in(this);
- already_in.If<HCompareObjectEqAndBranch>(native_context,
- first_context_slot);
- already_in.Then();
- {
- HValue* code_object = Add<HLoadNamedField>(optimized_map,
- HObjectAccess::ForFirstCodeSlot());
- BuildInstallOptimizedCode(js_function, native_context, code_object);
- }
+ BuildCheckAndInstallOptimizedCode(js_function, native_context, &already_in,
+ optimized_map, first_entry_index);
already_in.Else();
{
+ // Iterate through the rest of map backwards. Do not double check first
+ // entry. After the loop, if no matching optimized code was found,
+ // install unoptimized code.
+ // for(i = map.length() - SharedFunctionInfo::kEntryLength;
+ // i > SharedFunctionInfo::kEntriesStart;
+ // i -= SharedFunctionInfo::kEntryLength) { .. }
HValue* shared_function_entry_length =
Add<HConstant>(SharedFunctionInfo::kEntryLength);
LoopBuilder loop_builder(this,
context(),
LoopBuilder::kPostDecrement,
shared_function_entry_length);
- HValue* array_length = Add<HLoadNamedField>(optimized_map,
+ HValue* array_length = Add<HLoadNamedField>(
+ optimized_map, static_cast<HValue*>(NULL),
HObjectAccess::ForFixedArrayLength());
- HValue* key = loop_builder.BeginBody(array_length,
- graph()->GetConstant0(),
- Token::GT);
+ HValue* start_pos = AddUncasted<HSub>(array_length,
+ shared_function_entry_length);
+ HValue* slot_iterator = loop_builder.BeginBody(start_pos,
+ first_entry_index,
+ Token::GT);
{
- // Iterate through the rest of map backwards.
- // Do not double check first entry.
- HValue* second_entry_index =
- Add<HConstant>(SharedFunctionInfo::kSecondEntryIndex);
- IfBuilder restore_check(this);
- restore_check.If<HCompareNumericAndBranch>(key, second_entry_index,
- Token::EQ);
- restore_check.Then();
- {
- // Store the unoptimized code
- BuildInstallCode(js_function, shared_info);
- loop_builder.Break();
- }
- restore_check.Else();
- {
- HValue* keyed_minus = AddUncasted<HSub>(
- key, shared_function_entry_length);
- HInstruction* keyed_lookup = Add<HLoadKeyed>(optimized_map,
- keyed_minus, static_cast<HValue*>(NULL), FAST_ELEMENTS);
- IfBuilder done_check(this);
- done_check.If<HCompareObjectEqAndBranch>(native_context,
- keyed_lookup);
- done_check.Then();
- {
- // Hit: fetch the optimized code.
- HValue* keyed_plus = AddUncasted<HAdd>(
- keyed_minus, graph()->GetConstant1());
- HValue* code_object = Add<HLoadKeyed>(optimized_map,
- keyed_plus, static_cast<HValue*>(NULL), FAST_ELEMENTS);
- BuildInstallOptimizedCode(js_function, native_context, code_object);
-
- // Fall out of the loop
- loop_builder.Break();
- }
- done_check.Else();
- done_check.End();
- }
- restore_check.End();
+ IfBuilder done_check(this);
+ BuildCheckAndInstallOptimizedCode(js_function, native_context,
+ &done_check,
+ optimized_map,
+ slot_iterator);
+ // Fall out of the loop
+ loop_builder.Break();
}
loop_builder.EndBody();
+
+ // If slot_iterator equals first entry index, then we failed to find and
+ // install optimized code
+ IfBuilder no_optimized_code_check(this);
+ no_optimized_code_check.If<HCompareNumericAndBranch>(
+ slot_iterator, first_entry_index, Token::EQ);
+ no_optimized_code_check.Then();
+ {
+ // Store the unoptimized code
+ BuildInstallCode(js_function, shared_info);
+ }
}
- already_in.End();
}
- is_optimized.End();
}
@@ -1278,13 +1270,14 @@ HValue* CodeStubGraphBuilder<FastNewClosureStub>::BuildCodeStub() {
HInstruction* js_function = Add<HAllocate>(size, HType::JSObject(),
NOT_TENURED, JS_FUNCTION_TYPE);
- int map_index = Context::FunctionMapIndex(casted_stub()->language_mode(),
+ int map_index = Context::FunctionMapIndex(casted_stub()->strict_mode(),
casted_stub()->is_generator());
// Compute the function map in the current native context and set that
// as the map of the allocated object.
HInstruction* native_context = BuildGetNativeContext();
- HInstruction* map_slot_value = Add<HLoadNamedField>(native_context,
+ HInstruction* map_slot_value = Add<HLoadNamedField>(
+ native_context, static_cast<HValue*>(NULL),
HObjectAccess::ForContextSlot(map_index));
Add<HStoreNamedField>(js_function, HObjectAccess::ForMap(), map_slot_value);
@@ -1316,8 +1309,63 @@ HValue* CodeStubGraphBuilder<FastNewClosureStub>::BuildCodeStub() {
}
-Handle<Code> FastNewClosureStub::GenerateCode(Isolate* isolate) {
- return DoGenerateCode(isolate, this);
+Handle<Code> FastNewClosureStub::GenerateCode() {
+ return DoGenerateCode(this);
+}
+
+
+template<>
+HValue* CodeStubGraphBuilder<FastNewContextStub>::BuildCodeStub() {
+ int length = casted_stub()->slots() + Context::MIN_CONTEXT_SLOTS;
+
+ // Get the function.
+ HParameter* function = GetParameter(FastNewContextStub::kFunction);
+
+ // Allocate the context in new space.
+ HAllocate* function_context = Add<HAllocate>(
+ Add<HConstant>(length * kPointerSize + FixedArray::kHeaderSize),
+ HType::HeapObject(), NOT_TENURED, FIXED_ARRAY_TYPE);
+
+ // Set up the object header.
+ AddStoreMapConstant(function_context,
+ isolate()->factory()->function_context_map());
+ Add<HStoreNamedField>(function_context,
+ HObjectAccess::ForFixedArrayLength(),
+ Add<HConstant>(length));
+
+ // Set up the fixed slots.
+ Add<HStoreNamedField>(function_context,
+ HObjectAccess::ForContextSlot(Context::CLOSURE_INDEX),
+ function);
+ Add<HStoreNamedField>(function_context,
+ HObjectAccess::ForContextSlot(Context::PREVIOUS_INDEX),
+ context());
+ Add<HStoreNamedField>(function_context,
+ HObjectAccess::ForContextSlot(Context::EXTENSION_INDEX),
+ graph()->GetConstant0());
+
+ // Copy the global object from the previous context.
+ HValue* global_object = Add<HLoadNamedField>(
+ context(), static_cast<HValue*>(NULL),
+ HObjectAccess::ForContextSlot(Context::GLOBAL_OBJECT_INDEX));
+ Add<HStoreNamedField>(function_context,
+ HObjectAccess::ForContextSlot(
+ Context::GLOBAL_OBJECT_INDEX),
+ global_object);
+
+ // Initialize the rest of the slots to undefined.
+ for (int i = Context::MIN_CONTEXT_SLOTS; i < length; ++i) {
+ Add<HStoreNamedField>(function_context,
+ HObjectAccess::ForContextSlot(i),
+ graph()->GetConstantUndefined());
+ }
+
+ return function_context;
+}
+
+
+Handle<Code> FastNewContextStub::GenerateCode() {
+ return DoGenerateCode(this);
}
@@ -1328,12 +1376,334 @@ HValue* CodeStubGraphBuilder<KeyedLoadDictionaryElementStub>::BuildCodeStub() {
Add<HCheckSmi>(key);
- return BuildUncheckedDictionaryElementLoad(receiver, key);
+ HValue* elements = AddLoadElements(receiver);
+
+ HValue* hash = BuildElementIndexHash(key);
+
+ return BuildUncheckedDictionaryElementLoad(receiver, elements, key, hash);
+}
+
+
+Handle<Code> KeyedLoadDictionaryElementStub::GenerateCode() {
+ return DoGenerateCode(this);
+}
+
+
+template<>
+HValue* CodeStubGraphBuilder<RegExpConstructResultStub>::BuildCodeStub() {
+ // Determine the parameters.
+ HValue* length = GetParameter(RegExpConstructResultStub::kLength);
+ HValue* index = GetParameter(RegExpConstructResultStub::kIndex);
+ HValue* input = GetParameter(RegExpConstructResultStub::kInput);
+
+ info()->MarkMustNotHaveEagerFrame();
+
+ return BuildRegExpConstructResult(length, index, input);
+}
+
+
+Handle<Code> RegExpConstructResultStub::GenerateCode() {
+ return DoGenerateCode(this);
+}
+
+
+template <>
+class CodeStubGraphBuilder<KeyedLoadGenericElementStub>
+ : public CodeStubGraphBuilderBase {
+ public:
+ CodeStubGraphBuilder(Isolate* isolate,
+ KeyedLoadGenericElementStub* stub)
+ : CodeStubGraphBuilderBase(isolate, stub) {}
+
+ protected:
+ virtual HValue* BuildCodeStub();
+
+ void BuildElementsKindLimitCheck(HGraphBuilder::IfBuilder* if_builder,
+ HValue* bit_field2,
+ ElementsKind kind);
+
+ void BuildFastElementLoad(HGraphBuilder::IfBuilder* if_builder,
+ HValue* receiver,
+ HValue* key,
+ HValue* instance_type,
+ HValue* bit_field2,
+ ElementsKind kind);
+
+ void BuildExternalElementLoad(HGraphBuilder::IfBuilder* if_builder,
+ HValue* receiver,
+ HValue* key,
+ HValue* instance_type,
+ HValue* bit_field2,
+ ElementsKind kind);
+
+ KeyedLoadGenericElementStub* casted_stub() {
+ return static_cast<KeyedLoadGenericElementStub*>(stub());
+ }
+};
+
+
+void CodeStubGraphBuilder<
+ KeyedLoadGenericElementStub>::BuildElementsKindLimitCheck(
+ HGraphBuilder::IfBuilder* if_builder,
+ HValue* bit_field2,
+ ElementsKind kind) {
+ ElementsKind next_kind = static_cast<ElementsKind>(kind + 1);
+ HValue* kind_limit = Add<HConstant>(
+ static_cast<int>(Map::ElementsKindBits::encode(next_kind)));
+
+ if_builder->If<HCompareNumericAndBranch>(bit_field2, kind_limit, Token::LT);
+ if_builder->Then();
+}
+
+
+void CodeStubGraphBuilder<KeyedLoadGenericElementStub>::BuildFastElementLoad(
+ HGraphBuilder::IfBuilder* if_builder,
+ HValue* receiver,
+ HValue* key,
+ HValue* instance_type,
+ HValue* bit_field2,
+ ElementsKind kind) {
+ ASSERT(!IsExternalArrayElementsKind(kind));
+
+ BuildElementsKindLimitCheck(if_builder, bit_field2, kind);
+
+ IfBuilder js_array_check(this);
+ js_array_check.If<HCompareNumericAndBranch>(
+ instance_type, Add<HConstant>(JS_ARRAY_TYPE), Token::EQ);
+ js_array_check.Then();
+ Push(BuildUncheckedMonomorphicElementAccess(receiver, key, NULL,
+ true, kind,
+ LOAD, NEVER_RETURN_HOLE,
+ STANDARD_STORE));
+ js_array_check.Else();
+ Push(BuildUncheckedMonomorphicElementAccess(receiver, key, NULL,
+ false, kind,
+ LOAD, NEVER_RETURN_HOLE,
+ STANDARD_STORE));
+ js_array_check.End();
+}
+
+
+void CodeStubGraphBuilder<
+ KeyedLoadGenericElementStub>::BuildExternalElementLoad(
+ HGraphBuilder::IfBuilder* if_builder,
+ HValue* receiver,
+ HValue* key,
+ HValue* instance_type,
+ HValue* bit_field2,
+ ElementsKind kind) {
+ ASSERT(IsExternalArrayElementsKind(kind));
+
+ BuildElementsKindLimitCheck(if_builder, bit_field2, kind);
+
+ Push(BuildUncheckedMonomorphicElementAccess(receiver, key, NULL,
+ false, kind,
+ LOAD, NEVER_RETURN_HOLE,
+ STANDARD_STORE));
+}
+
+
+HValue* CodeStubGraphBuilder<KeyedLoadGenericElementStub>::BuildCodeStub() {
+ HValue* receiver = GetParameter(0);
+ HValue* key = GetParameter(1);
+
+ // Split into a smi/integer case and unique string case.
+ HIfContinuation index_name_split_continuation(graph()->CreateBasicBlock(),
+ graph()->CreateBasicBlock());
+
+ BuildKeyedIndexCheck(key, &index_name_split_continuation);
+
+ IfBuilder index_name_split(this, &index_name_split_continuation);
+ index_name_split.Then();
+ {
+ // Key is an index (number)
+ key = Pop();
+
+ int bit_field_mask = (1 << Map::kIsAccessCheckNeeded) |
+ (1 << Map::kHasIndexedInterceptor);
+ BuildJSObjectCheck(receiver, bit_field_mask);
+
+ HValue* map = Add<HLoadNamedField>(receiver, static_cast<HValue*>(NULL),
+ HObjectAccess::ForMap());
+
+ HValue* instance_type =
+ Add<HLoadNamedField>(map, static_cast<HValue*>(NULL),
+ HObjectAccess::ForMapInstanceType());
+
+ HValue* bit_field2 = Add<HLoadNamedField>(map,
+ static_cast<HValue*>(NULL),
+ HObjectAccess::ForMapBitField2());
+
+ IfBuilder kind_if(this);
+ BuildFastElementLoad(&kind_if, receiver, key, instance_type, bit_field2,
+ FAST_HOLEY_ELEMENTS);
+
+ kind_if.Else();
+ {
+ BuildFastElementLoad(&kind_if, receiver, key, instance_type, bit_field2,
+ FAST_HOLEY_DOUBLE_ELEMENTS);
+ }
+ kind_if.Else();
+
+ // The DICTIONARY_ELEMENTS check generates a "kind_if.Then"
+ BuildElementsKindLimitCheck(&kind_if, bit_field2, DICTIONARY_ELEMENTS);
+ {
+ HValue* elements = AddLoadElements(receiver);
+
+ HValue* hash = BuildElementIndexHash(key);
+
+ Push(BuildUncheckedDictionaryElementLoad(receiver, elements, key, hash));
+ }
+ kind_if.Else();
+
+ // The SLOPPY_ARGUMENTS_ELEMENTS check generates a "kind_if.Then"
+ BuildElementsKindLimitCheck(&kind_if, bit_field2,
+ SLOPPY_ARGUMENTS_ELEMENTS);
+ // Non-strict elements are not handled.
+ Add<HDeoptimize>("non-strict elements in KeyedLoadGenericElementStub",
+ Deoptimizer::EAGER);
+ Push(graph()->GetConstant0());
+
+ kind_if.Else();
+ BuildExternalElementLoad(&kind_if, receiver, key, instance_type, bit_field2,
+ EXTERNAL_INT8_ELEMENTS);
+
+ kind_if.Else();
+ BuildExternalElementLoad(&kind_if, receiver, key, instance_type, bit_field2,
+ EXTERNAL_UINT8_ELEMENTS);
+
+ kind_if.Else();
+ BuildExternalElementLoad(&kind_if, receiver, key, instance_type, bit_field2,
+ EXTERNAL_INT16_ELEMENTS);
+
+ kind_if.Else();
+ BuildExternalElementLoad(&kind_if, receiver, key, instance_type, bit_field2,
+ EXTERNAL_UINT16_ELEMENTS);
+
+ kind_if.Else();
+ BuildExternalElementLoad(&kind_if, receiver, key, instance_type, bit_field2,
+ EXTERNAL_INT32_ELEMENTS);
+
+ kind_if.Else();
+ BuildExternalElementLoad(&kind_if, receiver, key, instance_type, bit_field2,
+ EXTERNAL_UINT32_ELEMENTS);
+
+ kind_if.Else();
+ BuildExternalElementLoad(&kind_if, receiver, key, instance_type, bit_field2,
+ EXTERNAL_FLOAT32_ELEMENTS);
+
+ kind_if.Else();
+ BuildExternalElementLoad(&kind_if, receiver, key, instance_type, bit_field2,
+ EXTERNAL_FLOAT64_ELEMENTS);
+
+ kind_if.Else();
+ BuildExternalElementLoad(&kind_if, receiver, key, instance_type, bit_field2,
+ EXTERNAL_UINT8_CLAMPED_ELEMENTS);
+
+ kind_if.ElseDeopt("ElementsKind unhandled in KeyedLoadGenericElementStub");
+
+ kind_if.End();
+ }
+ index_name_split.Else();
+ {
+ // Key is a unique string.
+ key = Pop();
+
+ int bit_field_mask = (1 << Map::kIsAccessCheckNeeded) |
+ (1 << Map::kHasNamedInterceptor);
+ BuildJSObjectCheck(receiver, bit_field_mask);
+
+ HIfContinuation continuation;
+ BuildTestForDictionaryProperties(receiver, &continuation);
+ IfBuilder if_dict_properties(this, &continuation);
+ if_dict_properties.Then();
+ {
+ // Key is string, properties are dictionary mode
+ BuildNonGlobalObjectCheck(receiver);
+
+ HValue* properties = Add<HLoadNamedField>(
+ receiver, static_cast<HValue*>(NULL),
+ HObjectAccess::ForPropertiesPointer());
+
+ HValue* hash =
+ Add<HLoadNamedField>(key, static_cast<HValue*>(NULL),
+ HObjectAccess::ForNameHashField());
+
+ HValue* value = BuildUncheckedDictionaryElementLoad(receiver,
+ properties,
+ key,
+ hash);
+ Push(value);
+ }
+ if_dict_properties.Else();
+ {
+ // Key is string, properties are fast mode
+ HValue* hash = BuildKeyedLookupCacheHash(receiver, key);
+
+ ExternalReference cache_keys_ref =
+ ExternalReference::keyed_lookup_cache_keys(isolate());
+ HValue* cache_keys = Add<HConstant>(cache_keys_ref);
+
+ HValue* map = Add<HLoadNamedField>(receiver, static_cast<HValue*>(NULL),
+ HObjectAccess::ForMap());
+ HValue* base_index = AddUncasted<HMul>(hash, Add<HConstant>(2));
+ base_index->ClearFlag(HValue::kCanOverflow);
+
+ IfBuilder lookup_if(this);
+ for (int probe = 0; probe < KeyedLookupCache::kEntriesPerBucket;
+ ++probe) {
+ int probe_base = probe * KeyedLookupCache::kEntryLength;
+ HValue* map_index = AddUncasted<HAdd>(base_index,
+ Add<HConstant>(probe_base + KeyedLookupCache::kMapIndex));
+ map_index->ClearFlag(HValue::kCanOverflow);
+ HValue* key_index = AddUncasted<HAdd>(base_index,
+ Add<HConstant>(probe_base + KeyedLookupCache::kKeyIndex));
+ key_index->ClearFlag(HValue::kCanOverflow);
+ HValue* map_to_check = Add<HLoadKeyed>(cache_keys,
+ map_index,
+ static_cast<HValue*>(NULL),
+ FAST_ELEMENTS,
+ NEVER_RETURN_HOLE, 0);
+ lookup_if.If<HCompareObjectEqAndBranch>(map_to_check, map);
+ lookup_if.And();
+ HValue* key_to_check = Add<HLoadKeyed>(cache_keys,
+ key_index,
+ static_cast<HValue*>(NULL),
+ FAST_ELEMENTS,
+ NEVER_RETURN_HOLE, 0);
+ lookup_if.If<HCompareObjectEqAndBranch>(key_to_check, key);
+ lookup_if.Then();
+ {
+ ExternalReference cache_field_offsets_ref =
+ ExternalReference::keyed_lookup_cache_field_offsets(isolate());
+ HValue* cache_field_offsets = Add<HConstant>(cache_field_offsets_ref);
+ HValue* index = AddUncasted<HAdd>(hash,
+ Add<HConstant>(probe));
+ index->ClearFlag(HValue::kCanOverflow);
+ HValue* property_index = Add<HLoadKeyed>(cache_field_offsets,
+ index,
+ static_cast<HValue*>(NULL),
+ EXTERNAL_INT32_ELEMENTS,
+ NEVER_RETURN_HOLE, 0);
+ Push(property_index);
+ }
+ lookup_if.Else();
+ }
+ Add<HDeoptimize>("KeyedLoad fall-back", Deoptimizer::EAGER);
+ Push(graph()->GetConstant0());
+ lookup_if.End();
+ Push(Add<HLoadFieldByIndex>(receiver, Pop()));
+ }
+ if_dict_properties.End();
+ }
+ index_name_split.End();
+
+ return Pop();
}
-Handle<Code> KeyedLoadDictionaryElementStub::GenerateCode(Isolate* isolate) {
- return DoGenerateCode(isolate, this);
+Handle<Code> KeyedLoadGenericElementStub::GenerateCode() {
+ return DoGenerateCode(this);
}
diff --git a/chromium/v8/src/code-stubs.cc b/chromium/v8/src/code-stubs.cc
index 5f6a1147a15..acd877d4635 100644
--- a/chromium/v8/src/code-stubs.cc
+++ b/chromium/v8/src/code-stubs.cc
@@ -1,39 +1,16 @@
// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#include "bootstrapper.h"
-#include "code-stubs.h"
-#include "cpu-profiler.h"
-#include "stub-cache.h"
-#include "factory.h"
-#include "gdb-jit.h"
-#include "macro-assembler.h"
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#include "src/bootstrapper.h"
+#include "src/code-stubs.h"
+#include "src/cpu-profiler.h"
+#include "src/stub-cache.h"
+#include "src/factory.h"
+#include "src/gdb-jit.h"
+#include "src/macro-assembler.h"
namespace v8 {
namespace internal {
@@ -43,22 +20,17 @@ CodeStubInterfaceDescriptor::CodeStubInterfaceDescriptor()
: register_param_count_(-1),
stack_parameter_count_(no_reg),
hint_stack_parameter_count_(-1),
- continuation_type_(NORMAL_CONTINUATION),
function_mode_(NOT_JS_FUNCTION_STUB_MODE),
register_params_(NULL),
+ register_param_representations_(NULL),
deoptimization_handler_(NULL),
handler_arguments_mode_(DONT_PASS_ARGUMENTS),
miss_handler_(),
has_miss_handler_(false) { }
-void CodeStub::GenerateStubsRequiringBuiltinsAheadOfTime(Isolate* isolate) {
- StubFailureTailCallTrampolineStub::GenerateAheadOfTime(isolate);
-}
-
-
-bool CodeStub::FindCodeInCache(Code** code_out, Isolate* isolate) {
- UnseededNumberDictionary* stubs = isolate->heap()->code_stubs();
+bool CodeStub::FindCodeInCache(Code** code_out) {
+ UnseededNumberDictionary* stubs = isolate()->heap()->code_stubs();
int index = stubs->FindEntry(GetKey());
if (index != UnseededNumberDictionary::kNotFound) {
*code_out = Code::cast(stubs->ValueAt(index));
@@ -78,11 +50,12 @@ SmartArrayPointer<const char> CodeStub::GetName() {
}
-void CodeStub::RecordCodeGeneration(Code* code, Isolate* isolate) {
+void CodeStub::RecordCodeGeneration(Handle<Code> code) {
+ IC::RegisterWeakMapDependency(code);
SmartArrayPointer<const char> name = GetName();
- PROFILE(isolate, CodeCreateEvent(Logger::STUB_TAG, code, *name));
- GDBJIT(AddCode(GDBJITInterface::STUB, *name, code));
- Counters* counters = isolate->counters();
+ PROFILE(isolate(), CodeCreateEvent(Logger::STUB_TAG, *code, name.get()));
+ GDBJIT(AddCode(GDBJITInterface::STUB, name.get(), *code));
+ Counters* counters = isolate()->counters();
counters->total_stubs_code_size()->Increment(code->instruction_size());
}
@@ -92,23 +65,24 @@ Code::Kind CodeStub::GetCodeKind() const {
}
-Handle<Code> CodeStub::GetCodeCopyFromTemplate(Isolate* isolate) {
- Handle<Code> ic = GetCode(isolate);
- ic = isolate->factory()->CopyCode(ic);
- RecordCodeGeneration(*ic, isolate);
+Handle<Code> CodeStub::GetCodeCopy(const Code::FindAndReplacePattern& pattern) {
+ Handle<Code> ic = GetCode();
+ ic = isolate()->factory()->CopyCode(ic);
+ ic->FindAndReplace(pattern);
+ RecordCodeGeneration(ic);
return ic;
}
-Handle<Code> PlatformCodeStub::GenerateCode(Isolate* isolate) {
- Factory* factory = isolate->factory();
+Handle<Code> PlatformCodeStub::GenerateCode() {
+ Factory* factory = isolate()->factory();
// Generate the new code.
- MacroAssembler masm(isolate, NULL, 256);
+ MacroAssembler masm(isolate(), NULL, 256);
{
// Update the static counter each time a new code stub is generated.
- isolate->counters()->code_stubs()->Increment();
+ isolate()->counters()->code_stubs()->Increment();
// Generate the code for the stub.
masm.set_generating_stub(true);
@@ -125,46 +99,35 @@ Handle<Code> PlatformCodeStub::GenerateCode(Isolate* isolate) {
GetCodeKind(),
GetICState(),
GetExtraICState(),
- GetStubType(),
- GetStubFlags());
+ GetStubType());
Handle<Code> new_object = factory->NewCode(
desc, flags, masm.CodeObject(), NeedsImmovableCode());
return new_object;
}
-void CodeStub::VerifyPlatformFeatures(Isolate* isolate) {
- ASSERT(CpuFeatures::VerifyCrossCompiling());
-}
-
-
-Handle<Code> CodeStub::GetCode(Isolate* isolate) {
- Factory* factory = isolate->factory();
- Heap* heap = isolate->heap();
+Handle<Code> CodeStub::GetCode() {
+ Heap* heap = isolate()->heap();
Code* code;
if (UseSpecialCache()
- ? FindCodeInSpecialCache(&code, isolate)
- : FindCodeInCache(&code, isolate)) {
+ ? FindCodeInSpecialCache(&code)
+ : FindCodeInCache(&code)) {
ASSERT(GetCodeKind() == code->kind());
return Handle<Code>(code);
}
-#ifdef DEBUG
- VerifyPlatformFeatures(isolate);
-#endif
-
{
- HandleScope scope(isolate);
+ HandleScope scope(isolate());
- Handle<Code> new_object = GenerateCode(isolate);
+ Handle<Code> new_object = GenerateCode();
new_object->set_major_key(MajorKey());
FinishCode(new_object);
- RecordCodeGeneration(*new_object, isolate);
+ RecordCodeGeneration(new_object);
#ifdef ENABLE_DISASSEMBLER
if (FLAG_print_code_stubs) {
- CodeTracer::Scope trace_scope(isolate->GetCodeTracer());
- new_object->Disassemble(*GetName(), trace_scope.file());
+ CodeTracer::Scope trace_scope(isolate()->GetCodeTracer());
+ new_object->Disassemble(GetName().get(), trace_scope.file());
PrintF(trace_scope.file(), "\n");
}
#endif
@@ -174,7 +137,7 @@ Handle<Code> CodeStub::GetCode(Isolate* isolate) {
} else {
// Update the dictionary and the root in Heap.
Handle<UnseededNumberDictionary> dict =
- factory->DictionaryAtNumberPut(
+ UnseededNumberDictionary::AtNumberPut(
Handle<UnseededNumberDictionary>(heap->code_stubs()),
GetKey(),
new_object);
@@ -187,7 +150,7 @@ Handle<Code> CodeStub::GetCode(Isolate* isolate) {
ASSERT(!NeedsImmovableCode() ||
heap->lo_space()->Contains(code) ||
heap->code_space()->FirstPage()->Contains(code->address()));
- return Handle<Code>(code, isolate);
+ return Handle<Code>(code, isolate());
}
@@ -223,9 +186,10 @@ void BinaryOpICStub::GenerateAheadOfTime(Isolate* isolate) {
// Generate the uninitialized versions of the stub.
for (int op = Token::BIT_OR; op <= Token::MOD; ++op) {
for (int mode = NO_OVERWRITE; mode <= OVERWRITE_RIGHT; ++mode) {
- BinaryOpICStub stub(static_cast<Token::Value>(op),
+ BinaryOpICStub stub(isolate,
+ static_cast<Token::Value>(op),
static_cast<OverwriteMode>(mode));
- stub.GetCode(isolate);
+ stub.GetCode();
}
}
@@ -242,13 +206,35 @@ void BinaryOpICStub::PrintState(StringStream* stream) {
// static
void BinaryOpICStub::GenerateAheadOfTime(Isolate* isolate,
const BinaryOpIC::State& state) {
- BinaryOpICStub stub(state);
- stub.GetCode(isolate);
+ BinaryOpICStub stub(isolate, state);
+ stub.GetCode();
+}
+
+
+// static
+void BinaryOpICWithAllocationSiteStub::GenerateAheadOfTime(Isolate* isolate) {
+ // Generate special versions of the stub.
+ BinaryOpIC::State::GenerateAheadOfTime(isolate, &GenerateAheadOfTime);
}
-void NewStringAddStub::PrintBaseName(StringStream* stream) {
- stream->Add("NewStringAddStub");
+void BinaryOpICWithAllocationSiteStub::PrintState(StringStream* stream) {
+ state_.Print(stream);
+}
+
+
+// static
+void BinaryOpICWithAllocationSiteStub::GenerateAheadOfTime(
+ Isolate* isolate, const BinaryOpIC::State& state) {
+ if (state.CouldCreateAllocationMementos()) {
+ BinaryOpICWithAllocationSiteStub stub(isolate, state);
+ stub.GetCode();
+ }
+}
+
+
+void StringAddStub::PrintBaseName(StringStream* stream) {
+ stream->Add("StringAddStub");
if ((flags() & STRING_ADD_CHECK_BOTH) == STRING_ADD_CHECK_BOTH) {
stream->Add("_CheckBoth");
} else if ((flags() & STRING_ADD_CHECK_LEFT) == STRING_ADD_CHECK_LEFT) {
@@ -295,8 +281,8 @@ void ICCompareStub::AddToSpecialCache(Handle<Code> new_object) {
}
-bool ICCompareStub::FindCodeInSpecialCache(Code** code_out, Isolate* isolate) {
- Factory* factory = isolate->factory();
+bool ICCompareStub::FindCodeInSpecialCache(Code** code_out) {
+ Factory* factory = isolate()->factory();
Code::Flags flags = Code::ComputeFlags(
GetCodeKind(),
UNINITIALIZED);
@@ -307,7 +293,7 @@ bool ICCompareStub::FindCodeInSpecialCache(Code** code_out, Isolate* isolate) {
*factory->strict_compare_ic_string() :
*factory->compare_ic_string(),
flags),
- isolate);
+ isolate());
if (probe->IsCode()) {
*code_out = Code::cast(*probe);
#ifdef DEBUG
@@ -454,38 +440,44 @@ void CompareNilICStub::State::Print(StringStream* stream) const {
}
-Handle<Type> CompareNilICStub::GetType(
- Isolate* isolate,
- Handle<Map> map) {
+Type* CompareNilICStub::GetType(Zone* zone, Handle<Map> map) {
if (state_.Contains(CompareNilICStub::GENERIC)) {
- return handle(Type::Any(), isolate);
+ return Type::Any(zone);
}
- Handle<Type> result(Type::None(), isolate);
+ Type* result = Type::None(zone);
if (state_.Contains(CompareNilICStub::UNDEFINED)) {
- result = handle(Type::Union(result, handle(Type::Undefined(), isolate)),
- isolate);
+ result = Type::Union(result, Type::Undefined(zone), zone);
}
if (state_.Contains(CompareNilICStub::NULL_TYPE)) {
- result = handle(Type::Union(result, handle(Type::Null(), isolate)),
- isolate);
+ result = Type::Union(result, Type::Null(zone), zone);
}
if (state_.Contains(CompareNilICStub::MONOMORPHIC_MAP)) {
- Type* type = map.is_null() ? Type::Detectable() : Type::Class(map);
- result = handle(Type::Union(result, handle(type, isolate)), isolate);
+ Type* type =
+ map.is_null() ? Type::Detectable(zone) : Type::Class(map, zone);
+ result = Type::Union(result, type, zone);
}
return result;
}
-Handle<Type> CompareNilICStub::GetInputType(
- Isolate* isolate,
- Handle<Map> map) {
- Handle<Type> output_type = GetType(isolate, map);
- Handle<Type> nil_type = handle(nil_value_ == kNullValue
- ? Type::Null() : Type::Undefined(), isolate);
- return handle(Type::Union(output_type, nil_type), isolate);
+Type* CompareNilICStub::GetInputType(Zone* zone, Handle<Map> map) {
+ Type* output_type = GetType(zone, map);
+ Type* nil_type =
+ nil_value_ == kNullValue ? Type::Null(zone) : Type::Undefined(zone);
+ return Type::Union(output_type, nil_type, zone);
+}
+
+
+void CallIC_ArrayStub::PrintState(StringStream* stream) {
+ state_.Print(stream);
+ stream->Add(" (Array)");
+}
+
+
+void CallICStub::PrintState(StringStream* stream) {
+ state_.Print(stream);
}
@@ -527,8 +519,8 @@ void KeyedLoadDictionaryElementPlatformStub::Generate(
void CreateAllocationSiteStub::GenerateAheadOfTime(Isolate* isolate) {
- CreateAllocationSiteStub stub;
- stub.GetCode(isolate);
+ CreateAllocationSiteStub stub(isolate);
+ stub.GetCode();
}
@@ -540,21 +532,18 @@ void KeyedStoreElementStub::Generate(MacroAssembler* masm) {
case FAST_HOLEY_SMI_ELEMENTS:
case FAST_DOUBLE_ELEMENTS:
case FAST_HOLEY_DOUBLE_ELEMENTS:
- case EXTERNAL_BYTE_ELEMENTS:
- case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
- case EXTERNAL_SHORT_ELEMENTS:
- case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
- case EXTERNAL_INT_ELEMENTS:
- case EXTERNAL_UNSIGNED_INT_ELEMENTS:
- case EXTERNAL_FLOAT_ELEMENTS:
- case EXTERNAL_DOUBLE_ELEMENTS:
- case EXTERNAL_PIXEL_ELEMENTS:
+#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype, size) \
+ case EXTERNAL_##TYPE##_ELEMENTS: \
+ case TYPE##_ELEMENTS:
+
+ TYPED_ARRAYS(TYPED_ARRAY_CASE)
+#undef TYPED_ARRAY_CASE
UNREACHABLE();
break;
case DICTIONARY_ELEMENTS:
KeyedStoreStubCompiler::GenerateStoreDictionaryElement(masm);
break;
- case NON_STRICT_ARGUMENTS_ELEMENTS:
+ case SLOPPY_ARGUMENTS_ELEMENTS:
UNREACHABLE();
break;
}
@@ -565,8 +554,8 @@ void ArgumentsAccessStub::PrintName(StringStream* stream) {
stream->Add("ArgumentsAccessStub_");
switch (type_) {
case READ_ELEMENT: stream->Add("ReadElement"); break;
- case NEW_NON_STRICT_FAST: stream->Add("NewNonStrictFast"); break;
- case NEW_NON_STRICT_SLOW: stream->Add("NewNonStrictSlow"); break;
+ case NEW_SLOPPY_FAST: stream->Add("NewSloppyFast"); break;
+ case NEW_SLOPPY_SLOW: stream->Add("NewSloppySlow"); break;
case NEW_STRICT: stream->Add("NewStrict"); break;
}
}
@@ -574,8 +563,6 @@ void ArgumentsAccessStub::PrintName(StringStream* stream) {
void CallFunctionStub::PrintName(StringStream* stream) {
stream->Add("CallFunctionStub_Args%d", argc_);
- if (ReceiverMightBeImplicit()) stream->Add("_Implicit");
- if (RecordCallTarget()) stream->Add("_Recording");
}
@@ -585,6 +572,28 @@ void CallConstructStub::PrintName(StringStream* stream) {
}
+void ArrayConstructorStub::PrintName(StringStream* stream) {
+ stream->Add("ArrayConstructorStub");
+ switch (argument_count_) {
+ case ANY: stream->Add("_Any"); break;
+ case NONE: stream->Add("_None"); break;
+ case ONE: stream->Add("_One"); break;
+ case MORE_THAN_ONE: stream->Add("_More_Than_One"); break;
+ }
+}
+
+
+void ArrayConstructorStubBase::BasePrintName(const char* name,
+ StringStream* stream) {
+ stream->Add(name);
+ stream->Add("_");
+ stream->Add(ElementsKindToString(elements_kind()));
+ if (override_mode() == DISABLE_ALLOCATION_SITES) {
+ stream->Add("_DISABLE_ALLOCATION_SITES");
+ }
+}
+
+
bool ToBooleanStub::UpdateStatus(Handle<Object> object) {
Types old_types(types_);
bool to_boolean_value = types_.UpdateStatus(object);
@@ -665,16 +674,10 @@ bool ToBooleanStub::Types::CanBeUndetectable() const {
void StubFailureTrampolineStub::GenerateAheadOfTime(Isolate* isolate) {
- StubFailureTrampolineStub stub1(NOT_JS_FUNCTION_STUB_MODE);
- StubFailureTrampolineStub stub2(JS_FUNCTION_STUB_MODE);
- stub1.GetCode(isolate);
- stub2.GetCode(isolate);
-}
-
-
-void StubFailureTailCallTrampolineStub::GenerateAheadOfTime(Isolate* isolate) {
- StubFailureTailCallTrampolineStub stub;
- stub.GetCode(isolate);
+ StubFailureTrampolineStub stub1(isolate, NOT_JS_FUNCTION_STUB_MODE);
+ StubFailureTrampolineStub stub2(isolate, JS_FUNCTION_STUB_MODE);
+ stub1.GetCode();
+ stub2.GetCode();
}
@@ -692,55 +695,91 @@ static void InstallDescriptor(Isolate* isolate, HydrogenCodeStub* stub) {
CodeStubInterfaceDescriptor* descriptor =
isolate->code_stub_interface_descriptor(major_key);
if (!descriptor->initialized()) {
- stub->InitializeInterfaceDescriptor(isolate, descriptor);
+ stub->InitializeInterfaceDescriptor(descriptor);
}
}
void ArrayConstructorStubBase::InstallDescriptors(Isolate* isolate) {
- ArrayNoArgumentConstructorStub stub1(GetInitialFastElementsKind());
+ ArrayNoArgumentConstructorStub stub1(isolate, GetInitialFastElementsKind());
InstallDescriptor(isolate, &stub1);
- ArraySingleArgumentConstructorStub stub2(GetInitialFastElementsKind());
+ ArraySingleArgumentConstructorStub stub2(isolate,
+ GetInitialFastElementsKind());
InstallDescriptor(isolate, &stub2);
- ArrayNArgumentsConstructorStub stub3(GetInitialFastElementsKind());
+ ArrayNArgumentsConstructorStub stub3(isolate, GetInitialFastElementsKind());
InstallDescriptor(isolate, &stub3);
}
void NumberToStringStub::InstallDescriptors(Isolate* isolate) {
- NumberToStringStub stub;
+ NumberToStringStub stub(isolate);
InstallDescriptor(isolate, &stub);
}
void FastNewClosureStub::InstallDescriptors(Isolate* isolate) {
- FastNewClosureStub stub(STRICT_MODE, false);
+ FastNewClosureStub stub(isolate, STRICT, false);
+ InstallDescriptor(isolate, &stub);
+}
+
+
+void FastNewContextStub::InstallDescriptors(Isolate* isolate) {
+ FastNewContextStub stub(isolate, FastNewContextStub::kMaximumSlots);
+ InstallDescriptor(isolate, &stub);
+}
+
+
+// static
+void FastCloneShallowArrayStub::InstallDescriptors(Isolate* isolate) {
+ FastCloneShallowArrayStub stub(isolate, DONT_TRACK_ALLOCATION_SITE);
InstallDescriptor(isolate, &stub);
}
// static
void BinaryOpICStub::InstallDescriptors(Isolate* isolate) {
- BinaryOpICStub stub(Token::ADD, NO_OVERWRITE);
+ BinaryOpICStub stub(isolate, Token::ADD, NO_OVERWRITE);
+ InstallDescriptor(isolate, &stub);
+}
+
+
+// static
+void BinaryOpWithAllocationSiteStub::InstallDescriptors(Isolate* isolate) {
+ BinaryOpWithAllocationSiteStub stub(isolate, Token::ADD, NO_OVERWRITE);
+ InstallDescriptor(isolate, &stub);
+}
+
+
+// static
+void StringAddStub::InstallDescriptors(Isolate* isolate) {
+ StringAddStub stub(isolate, STRING_ADD_CHECK_NONE, NOT_TENURED);
+ InstallDescriptor(isolate, &stub);
+}
+
+
+// static
+void RegExpConstructResultStub::InstallDescriptors(Isolate* isolate) {
+ RegExpConstructResultStub stub(isolate);
InstallDescriptor(isolate, &stub);
}
// static
-void NewStringAddStub::InstallDescriptors(Isolate* isolate) {
- NewStringAddStub stub(STRING_ADD_CHECK_NONE, NOT_TENURED);
+void KeyedLoadGenericElementStub::InstallDescriptors(Isolate* isolate) {
+ KeyedLoadGenericElementStub stub(isolate);
InstallDescriptor(isolate, &stub);
}
ArrayConstructorStub::ArrayConstructorStub(Isolate* isolate)
- : argument_count_(ANY) {
+ : PlatformCodeStub(isolate), argument_count_(ANY) {
ArrayConstructorStubBase::GenerateStubsAheadOfTime(isolate);
}
ArrayConstructorStub::ArrayConstructorStub(Isolate* isolate,
- int argument_count) {
+ int argument_count)
+ : PlatformCodeStub(isolate) {
if (argument_count == 0) {
argument_count_ = NONE;
} else if (argument_count == 1) {
@@ -755,16 +794,16 @@ ArrayConstructorStub::ArrayConstructorStub(Isolate* isolate,
void InternalArrayConstructorStubBase::InstallDescriptors(Isolate* isolate) {
- InternalArrayNoArgumentConstructorStub stub1(FAST_ELEMENTS);
+ InternalArrayNoArgumentConstructorStub stub1(isolate, FAST_ELEMENTS);
InstallDescriptor(isolate, &stub1);
- InternalArraySingleArgumentConstructorStub stub2(FAST_ELEMENTS);
+ InternalArraySingleArgumentConstructorStub stub2(isolate, FAST_ELEMENTS);
InstallDescriptor(isolate, &stub2);
- InternalArrayNArgumentsConstructorStub stub3(FAST_ELEMENTS);
+ InternalArrayNArgumentsConstructorStub stub3(isolate, FAST_ELEMENTS);
InstallDescriptor(isolate, &stub3);
}
InternalArrayConstructorStub::InternalArrayConstructorStub(
- Isolate* isolate) {
+ Isolate* isolate) : PlatformCodeStub(isolate) {
InternalArrayConstructorStubBase::GenerateStubsAheadOfTime(isolate);
}
diff --git a/chromium/v8/src/code-stubs.h b/chromium/v8/src/code-stubs.h
index bedf295d2f9..b243b560931 100644
--- a/chromium/v8/src/code-stubs.h
+++ b/chromium/v8/src/code-stubs.h
@@ -1,38 +1,15 @@
// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
#ifndef V8_CODE_STUBS_H_
#define V8_CODE_STUBS_H_
-#include "allocation.h"
-#include "assembler.h"
-#include "codegen.h"
-#include "globals.h"
-#include "macro-assembler.h"
+#include "src/allocation.h"
+#include "src/assembler.h"
+#include "src/codegen.h"
+#include "src/globals.h"
+#include "src/macro-assembler.h"
namespace v8 {
namespace internal {
@@ -42,21 +19,21 @@ namespace internal {
V(CallFunction) \
V(CallConstruct) \
V(BinaryOpIC) \
+ V(BinaryOpICWithAllocationSite) \
+ V(BinaryOpWithAllocationSite) \
V(StringAdd) \
- V(NewStringAdd) \
V(SubString) \
V(StringCompare) \
V(Compare) \
V(CompareIC) \
V(CompareNilIC) \
V(MathPow) \
- V(StringLength) \
+ V(CallIC) \
+ V(CallIC_Array) \
V(FunctionPrototype) \
- V(StoreArrayLength) \
V(RecordWrite) \
V(StoreBufferOverflow) \
V(RegExpExec) \
- V(TranscendentalCache) \
V(Instanceof) \
V(ConvertToDouble) \
V(WriteInt32ToHeapNumber) \
@@ -64,7 +41,6 @@ namespace internal {
V(Interrupt) \
V(FastNewClosure) \
V(FastNewContext) \
- V(FastNewBlockContext) \
V(FastCloneShallowArray) \
V(FastCloneShallowObject) \
V(CreateAllocationSite) \
@@ -77,6 +53,7 @@ namespace internal {
V(CEntry) \
V(JSEntry) \
V(KeyedLoadElement) \
+ V(KeyedLoadGeneric) \
V(ArrayNoArgumentConstructor) \
V(ArraySingleArgumentConstructor) \
V(ArrayNArgumentsConstructor) \
@@ -90,17 +67,19 @@ namespace internal {
V(TransitionElementsKind) \
V(StoreArrayLiteralElement) \
V(StubFailureTrampoline) \
- V(StubFailureTailCallTrampoline) \
V(ArrayConstructor) \
V(InternalArrayConstructor) \
V(ProfileEntryHook) \
V(StoreGlobal) \
+ V(CallApiFunction) \
+ V(CallApiGetter) \
/* IC Handler stubs */ \
V(LoadField) \
V(KeyedLoadField) \
- V(KeyedArrayCall)
+ V(StringLength) \
+ V(KeyedStringLength)
-// List of code stubs only used on ARM platforms.
+// List of code stubs only used on ARM 32 bits platforms.
#if V8_TARGET_ARCH_ARM
#define CODE_STUB_LIST_ARM(V) \
V(GetProperty) \
@@ -111,11 +90,26 @@ namespace internal {
#define CODE_STUB_LIST_ARM(V)
#endif
+// List of code stubs only used on ARM 64 bits platforms.
+#if V8_TARGET_ARCH_ARM64
+#define CODE_STUB_LIST_ARM64(V) \
+ V(GetProperty) \
+ V(SetProperty) \
+ V(InvokeBuiltin) \
+ V(DirectCEntry) \
+ V(StoreRegistersState) \
+ V(RestoreRegistersState)
+#else
+#define CODE_STUB_LIST_ARM64(V)
+#endif
+
// List of code stubs only used on MIPS platforms.
#if V8_TARGET_ARCH_MIPS
#define CODE_STUB_LIST_MIPS(V) \
V(RegExpCEntry) \
- V(DirectCEntry)
+ V(DirectCEntry) \
+ V(StoreRegistersState) \
+ V(RestoreRegistersState)
#else
#define CODE_STUB_LIST_MIPS(V)
#endif
@@ -124,6 +118,7 @@ namespace internal {
#define CODE_STUB_LIST(V) \
CODE_STUB_LIST_ALL_PLATFORMS(V) \
CODE_STUB_LIST_ARM(V) \
+ CODE_STUB_LIST_ARM64(V) \
CODE_STUB_LIST_MIPS(V)
// Stub is base classes of all stubs.
@@ -139,10 +134,11 @@ class CodeStub BASE_EMBEDDED {
};
// Retrieve the code for the stub. Generate the code if needed.
- Handle<Code> GetCode(Isolate* isolate);
+ Handle<Code> GetCode();
// Retrieve the code for the stub, make and return a copy of the code.
- Handle<Code> GetCodeCopyFromTemplate(Isolate* isolate);
+ Handle<Code> GetCodeCopy(const Code::FindAndReplacePattern& pattern);
+
static Major MajorKeyFromKey(uint32_t key) {
return static_cast<Major>(MajorKeyBits::decode(key));
}
@@ -157,10 +153,10 @@ class CodeStub BASE_EMBEDDED {
static const char* MajorName(Major major_key, bool allow_unknown_keys);
+ explicit CodeStub(Isolate* isolate) : isolate_(isolate) { }
virtual ~CodeStub() {}
static void GenerateStubsAheadOfTime(Isolate* isolate);
- static void GenerateStubsRequiringBuiltinsAheadOfTime(Isolate* isolate);
static void GenerateFPStubs(Isolate* isolate);
// Some stubs put untagged junk on the stack that cannot be scanned by the
@@ -172,7 +168,7 @@ class CodeStub BASE_EMBEDDED {
virtual bool SometimesSetsUpAFrame() { return true; }
// Lookup the code in the (possibly custom) cache.
- bool FindCodeInCache(Code** code_out, Isolate* isolate);
+ bool FindCodeInCache(Code** code_out);
// Returns information for computing the number key.
virtual Major MajorKey() = 0;
@@ -187,22 +183,17 @@ class CodeStub BASE_EMBEDDED {
virtual Code::StubType GetStubType() {
return Code::NORMAL;
}
- virtual int GetStubFlags() {
- return -1;
- }
virtual void PrintName(StringStream* stream);
// Returns a name for logging/debugging purposes.
SmartArrayPointer<const char> GetName();
- protected:
- static bool CanUseFPRegisters();
+ Isolate* isolate() const { return isolate_; }
+ protected:
// Generates the assembler code for the stub.
- virtual Handle<Code> GenerateCode(Isolate* isolate) = 0;
-
- virtual void VerifyPlatformFeatures(Isolate* isolate);
+ virtual Handle<Code> GenerateCode() = 0;
// Returns whether the code generated for this stub needs to be allocated as
// a fixed (non-moveable) code object.
@@ -214,7 +205,7 @@ class CodeStub BASE_EMBEDDED {
private:
// Perform bookkeeping required after code generation when stub code is
// initially generated.
- void RecordCodeGeneration(Code* code, Isolate* isolate);
+ void RecordCodeGeneration(Handle<Code> code);
// Finish the code object after it has been generated.
virtual void FinishCode(Handle<Code> code) { }
@@ -232,7 +223,7 @@ class CodeStub BASE_EMBEDDED {
virtual void AddToSpecialCache(Handle<Code> new_object) { }
// Find code in a specialized cache, work is delegated to the specific stub.
- virtual bool FindCodeInSpecialCache(Code** code_out, Isolate* isolate) {
+ virtual bool FindCodeInSpecialCache(Code** code_out) {
return false;
}
@@ -252,13 +243,17 @@ class CodeStub BASE_EMBEDDED {
kStubMajorKeyBits, kStubMinorKeyBits> {}; // NOLINT
friend class BreakPointIterator;
+
+ Isolate* isolate_;
};
class PlatformCodeStub : public CodeStub {
public:
+ explicit PlatformCodeStub(Isolate* isolate) : CodeStub(isolate) { }
+
// Retrieve the code for the stub. Generate the code if needed.
- virtual Handle<Code> GenerateCode(Isolate* isolate);
+ virtual Handle<Code> GenerateCode() V8_OVERRIDE;
virtual Code::Kind GetCodeKind() const { return Code::STUB; }
@@ -271,9 +266,6 @@ class PlatformCodeStub : public CodeStub {
enum StubFunctionMode { NOT_JS_FUNCTION_STUB_MODE, JS_FUNCTION_STUB_MODE };
enum HandlerArgumentsMode { DONT_PASS_ARGUMENTS, PASS_ARGUMENTS };
-enum ContinuationType { NORMAL_CONTINUATION, TAIL_CALL_CONTINUATION };
-
-
struct CodeStubInterfaceDescriptor {
CodeStubInterfaceDescriptor();
int register_param_count_;
@@ -282,19 +274,19 @@ struct CodeStubInterfaceDescriptor {
// if hint_stack_parameter_count_ > 0, the code stub can optimize the
// return sequence. Default value is -1, which means it is ignored.
int hint_stack_parameter_count_;
- ContinuationType continuation_type_;
StubFunctionMode function_mode_;
Register* register_params_;
+ // Specifies Representations for the stub's parameter. Points to an array of
+ // Representations of the same length of the numbers of parameters to the
+ // stub, or if NULL (the default value), Representation of each parameter
+ // assumed to be Tagged()
+ Representation* register_param_representations_;
Address deoptimization_handler_;
HandlerArgumentsMode handler_arguments_mode_;
bool initialized() const { return register_param_count_ >= 0; }
- bool HasTailCallContinuation() const {
- return continuation_type_ == TAIL_CALL_CONTINUATION;
- }
-
int environment_length() const {
return register_param_count_;
}
@@ -316,7 +308,7 @@ struct CodeStubInterfaceDescriptor {
return has_miss_handler_;
}
- Register GetParameterRegister(int index) {
+ Register GetParameterRegister(int index) const {
return register_params_[index];
}
@@ -338,6 +330,41 @@ struct CodeStubInterfaceDescriptor {
};
+struct PlatformCallInterfaceDescriptor;
+
+
+struct CallInterfaceDescriptor {
+ CallInterfaceDescriptor()
+ : register_param_count_(-1),
+ register_params_(NULL),
+ param_representations_(NULL),
+ platform_specific_descriptor_(NULL) { }
+
+ bool initialized() const { return register_param_count_ >= 0; }
+
+ int environment_length() const {
+ return register_param_count_;
+ }
+
+ Representation GetParameterRepresentation(int index) const {
+ return param_representations_[index];
+ }
+
+ Register GetParameterRegister(int index) const {
+ return register_params_[index];
+ }
+
+ PlatformCallInterfaceDescriptor* platform_specific_descriptor() const {
+ return platform_specific_descriptor_;
+ }
+
+ int register_param_count_;
+ Register* register_params_;
+ Representation* param_representations_;
+ PlatformCallInterfaceDescriptor* platform_specific_descriptor_;
+};
+
+
class HydrogenCodeStub : public CodeStub {
public:
enum InitializationState {
@@ -345,14 +372,15 @@ class HydrogenCodeStub : public CodeStub {
INITIALIZED
};
- explicit HydrogenCodeStub(InitializationState state = INITIALIZED) {
+ HydrogenCodeStub(Isolate* isolate, InitializationState state = INITIALIZED)
+ : CodeStub(isolate) {
is_uninitialized_ = (state == UNINITIALIZED);
}
virtual Code::Kind GetCodeKind() const { return Code::STUB; }
- CodeStubInterfaceDescriptor* GetInterfaceDescriptor(Isolate* isolate) {
- return isolate->code_stub_interface_descriptor(MajorKey());
+ CodeStubInterfaceDescriptor* GetInterfaceDescriptor() {
+ return isolate()->code_stub_interface_descriptor(MajorKey());
}
bool IsUninitialized() { return is_uninitialized_; }
@@ -364,15 +392,14 @@ class HydrogenCodeStub : public CodeStub {
}
virtual void InitializeInterfaceDescriptor(
- Isolate* isolate,
CodeStubInterfaceDescriptor* descriptor) = 0;
// Retrieve the code for the stub. Generate the code if needed.
- virtual Handle<Code> GenerateCode(Isolate* isolate) = 0;
+ virtual Handle<Code> GenerateCode() = 0;
virtual int NotMissMinorKey() = 0;
- Handle<Code> GenerateLightweightMissCode(Isolate* isolate);
+ Handle<Code> GenerateLightweightMissCode();
template<class StateType>
void TraceTransition(StateType from, StateType to);
@@ -408,29 +435,20 @@ class RuntimeCallHelper {
};
-// TODO(bmeurer): Move to the StringAddStub declaration once we're
-// done with the translation to a hydrogen code stub.
-enum StringAddFlags {
- // Omit both parameter checks.
- STRING_ADD_CHECK_NONE = 0,
- // Check left parameter.
- STRING_ADD_CHECK_LEFT = 1 << 0,
- // Check right parameter.
- STRING_ADD_CHECK_RIGHT = 1 << 1,
- // Check both parameters.
- STRING_ADD_CHECK_BOTH = STRING_ADD_CHECK_LEFT | STRING_ADD_CHECK_RIGHT
-};
-
} } // namespace v8::internal
#if V8_TARGET_ARCH_IA32
-#include "ia32/code-stubs-ia32.h"
+#include "src/ia32/code-stubs-ia32.h"
#elif V8_TARGET_ARCH_X64
-#include "x64/code-stubs-x64.h"
+#include "src/x64/code-stubs-x64.h"
+#elif V8_TARGET_ARCH_ARM64
+#include "src/arm64/code-stubs-arm64.h"
#elif V8_TARGET_ARCH_ARM
-#include "arm/code-stubs-arm.h"
+#include "src/arm/code-stubs-arm.h"
#elif V8_TARGET_ARCH_MIPS
-#include "mips/code-stubs-mips.h"
+#include "src/mips/code-stubs-mips.h"
+#elif V8_TARGET_ARCH_X87
+#include "src/x87/code-stubs-x87.h"
#else
#error Unsupported target architecture.
#endif
@@ -464,13 +482,18 @@ class NopRuntimeCallHelper : public RuntimeCallHelper {
class ToNumberStub: public HydrogenCodeStub {
public:
- ToNumberStub() { }
+ explicit ToNumberStub(Isolate* isolate) : HydrogenCodeStub(isolate) { }
- virtual Handle<Code> GenerateCode(Isolate* isolate);
+ virtual Handle<Code> GenerateCode() V8_OVERRIDE;
virtual void InitializeInterfaceDescriptor(
- Isolate* isolate,
- CodeStubInterfaceDescriptor* descriptor);
+ CodeStubInterfaceDescriptor* descriptor) V8_OVERRIDE;
+
+ static void InstallDescriptors(Isolate* isolate) {
+ ToNumberStub stub(isolate);
+ stub.InitializeInterfaceDescriptor(
+ isolate->code_stub_interface_descriptor(CodeStub::ToNumber));
+ }
private:
Major MajorKey() { return ToNumber; }
@@ -480,12 +503,11 @@ class ToNumberStub: public HydrogenCodeStub {
class NumberToStringStub V8_FINAL : public HydrogenCodeStub {
public:
- NumberToStringStub() {}
+ explicit NumberToStringStub(Isolate* isolate) : HydrogenCodeStub(isolate) {}
- virtual Handle<Code> GenerateCode(Isolate* isolate) V8_OVERRIDE;
+ virtual Handle<Code> GenerateCode() V8_OVERRIDE;
virtual void InitializeInterfaceDescriptor(
- Isolate* isolate,
CodeStubInterfaceDescriptor* descriptor) V8_OVERRIDE;
static void InstallDescriptors(Isolate* isolate);
@@ -501,19 +523,21 @@ class NumberToStringStub V8_FINAL : public HydrogenCodeStub {
class FastNewClosureStub : public HydrogenCodeStub {
public:
- explicit FastNewClosureStub(LanguageMode language_mode, bool is_generator)
- : language_mode_(language_mode),
- is_generator_(is_generator) { }
+ FastNewClosureStub(Isolate* isolate,
+ StrictMode strict_mode,
+ bool is_generator)
+ : HydrogenCodeStub(isolate),
+ strict_mode_(strict_mode),
+ is_generator_(is_generator) { }
- virtual Handle<Code> GenerateCode(Isolate* isolate);
+ virtual Handle<Code> GenerateCode() V8_OVERRIDE;
virtual void InitializeInterfaceDescriptor(
- Isolate* isolate,
- CodeStubInterfaceDescriptor* descriptor);
+ CodeStubInterfaceDescriptor* descriptor) V8_OVERRIDE;
static void InstallDescriptors(Isolate* isolate);
- LanguageMode language_mode() const { return language_mode_; }
+ StrictMode strict_mode() const { return strict_mode_; }
bool is_generator() const { return is_generator_; }
private:
@@ -522,117 +546,70 @@ class FastNewClosureStub : public HydrogenCodeStub {
Major MajorKey() { return FastNewClosure; }
int NotMissMinorKey() {
- return StrictModeBits::encode(language_mode_ != CLASSIC_MODE) |
+ return StrictModeBits::encode(strict_mode_ == STRICT) |
IsGeneratorBits::encode(is_generator_);
}
- LanguageMode language_mode_;
+ StrictMode strict_mode_;
bool is_generator_;
};
-class FastNewContextStub : public PlatformCodeStub {
+class FastNewContextStub V8_FINAL : public HydrogenCodeStub {
public:
static const int kMaximumSlots = 64;
- explicit FastNewContextStub(int slots) : slots_(slots) {
+ FastNewContextStub(Isolate* isolate, int slots)
+ : HydrogenCodeStub(isolate), slots_(slots) {
ASSERT(slots_ > 0 && slots_ <= kMaximumSlots);
}
- void Generate(MacroAssembler* masm);
+ virtual Handle<Code> GenerateCode() V8_OVERRIDE;
- private:
- int slots_;
+ virtual void InitializeInterfaceDescriptor(
+ CodeStubInterfaceDescriptor* descriptor) V8_OVERRIDE;
- Major MajorKey() { return FastNewContext; }
- int MinorKey() { return slots_; }
-};
+ static void InstallDescriptors(Isolate* isolate);
+ int slots() const { return slots_; }
-class FastNewBlockContextStub : public PlatformCodeStub {
- public:
- static const int kMaximumSlots = 64;
+ virtual Major MajorKey() V8_OVERRIDE { return FastNewContext; }
+ virtual int NotMissMinorKey() V8_OVERRIDE { return slots_; }
- explicit FastNewBlockContextStub(int slots) : slots_(slots) {
- ASSERT(slots_ > 0 && slots_ <= kMaximumSlots);
- }
-
- void Generate(MacroAssembler* masm);
+ // Parameters accessed via CodeStubGraphBuilder::GetParameter()
+ static const int kFunction = 0;
private:
int slots_;
-
- Major MajorKey() { return FastNewBlockContext; }
- int MinorKey() { return slots_; }
};
+
class FastCloneShallowArrayStub : public HydrogenCodeStub {
public:
- // Maximum length of copied elements array.
- static const int kMaximumClonedLength = 8;
- enum Mode {
- CLONE_ELEMENTS,
- CLONE_DOUBLE_ELEMENTS,
- COPY_ON_WRITE_ELEMENTS,
- CLONE_ANY_ELEMENTS,
- LAST_CLONE_MODE = CLONE_ANY_ELEMENTS
- };
-
- static const int kFastCloneModeCount = LAST_CLONE_MODE + 1;
-
- FastCloneShallowArrayStub(Mode mode,
- AllocationSiteMode allocation_site_mode,
- int length)
- : mode_(mode),
- allocation_site_mode_(allocation_site_mode),
- length_((mode == COPY_ON_WRITE_ELEMENTS) ? 0 : length) {
- ASSERT_GE(length_, 0);
- ASSERT_LE(length_, kMaximumClonedLength);
- }
+ FastCloneShallowArrayStub(Isolate* isolate,
+ AllocationSiteMode allocation_site_mode)
+ : HydrogenCodeStub(isolate),
+ allocation_site_mode_(allocation_site_mode) {}
- Mode mode() const { return mode_; }
- int length() const { return length_; }
AllocationSiteMode allocation_site_mode() const {
return allocation_site_mode_;
}
- ElementsKind ComputeElementsKind() const {
- switch (mode()) {
- case CLONE_ELEMENTS:
- case COPY_ON_WRITE_ELEMENTS:
- return FAST_ELEMENTS;
- case CLONE_DOUBLE_ELEMENTS:
- return FAST_DOUBLE_ELEMENTS;
- case CLONE_ANY_ELEMENTS:
- /*fall-through*/;
- }
- UNREACHABLE();
- return LAST_ELEMENTS_KIND;
- }
-
- virtual Handle<Code> GenerateCode(Isolate* isolate);
+ virtual Handle<Code> GenerateCode();
virtual void InitializeInterfaceDescriptor(
- Isolate* isolate,
- CodeStubInterfaceDescriptor* descriptor);
+ CodeStubInterfaceDescriptor* descriptor) V8_OVERRIDE;
+
+ static void InstallDescriptors(Isolate* isolate);
private:
- Mode mode_;
AllocationSiteMode allocation_site_mode_;
- int length_;
class AllocationSiteModeBits: public BitField<AllocationSiteMode, 0, 1> {};
- class ModeBits: public BitField<Mode, 1, 4> {};
- class LengthBits: public BitField<int, 5, 4> {};
// Ensure data fits within available bits.
- STATIC_ASSERT(LAST_ALLOCATION_SITE_MODE == 1);
- STATIC_ASSERT(kFastCloneModeCount < 16);
- STATIC_ASSERT(kMaximumClonedLength < 16);
Major MajorKey() { return FastCloneShallowArray; }
int NotMissMinorKey() {
- return AllocationSiteModeBits::encode(allocation_site_mode_)
- | ModeBits::encode(mode_)
- | LengthBits::encode(length_);
+ return AllocationSiteModeBits::encode(allocation_site_mode_);
}
};
@@ -642,19 +619,18 @@ class FastCloneShallowObjectStub : public HydrogenCodeStub {
// Maximum number of properties in copied object.
static const int kMaximumClonedProperties = 6;
- explicit FastCloneShallowObjectStub(int length)
- : length_(length) {
+ FastCloneShallowObjectStub(Isolate* isolate, int length)
+ : HydrogenCodeStub(isolate), length_(length) {
ASSERT_GE(length_, 0);
ASSERT_LE(length_, kMaximumClonedProperties);
}
int length() const { return length_; }
- virtual Handle<Code> GenerateCode(Isolate* isolate);
+ virtual Handle<Code> GenerateCode() V8_OVERRIDE;
virtual void InitializeInterfaceDescriptor(
- Isolate* isolate,
- CodeStubInterfaceDescriptor* descriptor);
+ CodeStubInterfaceDescriptor* descriptor) V8_OVERRIDE;
private:
int length_;
@@ -668,15 +644,15 @@ class FastCloneShallowObjectStub : public HydrogenCodeStub {
class CreateAllocationSiteStub : public HydrogenCodeStub {
public:
- explicit CreateAllocationSiteStub() { }
+ explicit CreateAllocationSiteStub(Isolate* isolate)
+ : HydrogenCodeStub(isolate) { }
- virtual Handle<Code> GenerateCode(Isolate* isolate);
+ virtual Handle<Code> GenerateCode() V8_OVERRIDE;
static void GenerateAheadOfTime(Isolate* isolate);
virtual void InitializeInterfaceDescriptor(
- Isolate* isolate,
- CodeStubInterfaceDescriptor* descriptor);
+ CodeStubInterfaceDescriptor* descriptor) V8_OVERRIDE;
private:
Major MajorKey() { return CreateAllocationSite; }
@@ -695,7 +671,8 @@ class InstanceofStub: public PlatformCodeStub {
kReturnTrueFalseObject = 1 << 2
};
- explicit InstanceofStub(Flags flags) : flags_(flags) { }
+ InstanceofStub(Isolate* isolate, Flags flags)
+ : PlatformCodeStub(isolate), flags_(flags) { }
static Register left();
static Register right();
@@ -742,6 +719,7 @@ class ArrayConstructorStub: public PlatformCodeStub {
private:
void GenerateDispatchToArrayStub(MacroAssembler* masm,
AllocationSiteOverrideMode mode);
+ virtual void PrintName(StringStream* stream);
virtual CodeStub::Major MajorKey() { return ArrayConstructor; }
virtual int MinorKey() { return argument_count_; }
@@ -768,8 +746,8 @@ class MathPowStub: public PlatformCodeStub {
public:
enum ExponentType { INTEGER, DOUBLE, TAGGED, ON_STACK };
- explicit MathPowStub(ExponentType exponent_type)
- : exponent_type_(exponent_type) { }
+ MathPowStub(Isolate* isolate, ExponentType exponent_type)
+ : PlatformCodeStub(isolate), exponent_type_(exponent_type) { }
virtual void Generate(MacroAssembler* masm);
private:
@@ -782,7 +760,8 @@ class MathPowStub: public PlatformCodeStub {
class ICStub: public PlatformCodeStub {
public:
- explicit ICStub(Code::Kind kind) : kind_(kind) { }
+ ICStub(Isolate* isolate, Code::Kind kind)
+ : PlatformCodeStub(isolate), kind_(kind) { }
virtual Code::Kind GetCodeKind() const { return kind_; }
virtual InlineCacheState GetICState() { return MONOMORPHIC; }
@@ -806,31 +785,76 @@ class ICStub: public PlatformCodeStub {
};
-class FunctionPrototypeStub: public ICStub {
+class CallICStub: public PlatformCodeStub {
public:
- explicit FunctionPrototypeStub(Code::Kind kind) : ICStub(kind) { }
+ CallICStub(Isolate* isolate, const CallIC::State& state)
+ : PlatformCodeStub(isolate), state_(state) {}
+
+ bool CallAsMethod() const { return state_.CallAsMethod(); }
+
+ int arg_count() const { return state_.arg_count(); }
+
+ static int ExtractArgcFromMinorKey(int minor_key) {
+ CallIC::State state((ExtraICState) minor_key);
+ return state.arg_count();
+ }
+
virtual void Generate(MacroAssembler* masm);
- private:
- virtual CodeStub::Major MajorKey() { return FunctionPrototype; }
+ virtual Code::Kind GetCodeKind() const V8_OVERRIDE {
+ return Code::CALL_IC;
+ }
+
+ virtual InlineCacheState GetICState() V8_FINAL V8_OVERRIDE {
+ return state_.GetICState();
+ }
+
+ virtual ExtraICState GetExtraICState() V8_FINAL V8_OVERRIDE {
+ return state_.GetExtraICState();
+ }
+
+ protected:
+ virtual int MinorKey() { return GetExtraICState(); }
+ virtual void PrintState(StringStream* stream) V8_OVERRIDE;
+
+ virtual CodeStub::Major MajorKey() { return CallIC; }
+
+ // Code generation helpers.
+ void GenerateMiss(MacroAssembler* masm, IC::UtilityId id);
+
+ const CallIC::State state_;
};
-class StringLengthStub: public ICStub {
+class CallIC_ArrayStub: public CallICStub {
public:
- explicit StringLengthStub(Code::Kind kind) : ICStub(kind) { }
+ CallIC_ArrayStub(Isolate* isolate, const CallIC::State& state_in)
+ : CallICStub(isolate, state_in) {}
+
+ virtual void Generate(MacroAssembler* masm);
+
+ protected:
+ virtual void PrintState(StringStream* stream) V8_OVERRIDE;
+
+ virtual CodeStub::Major MajorKey() { return CallIC_Array; }
+};
+
+
+class FunctionPrototypeStub: public ICStub {
+ public:
+ FunctionPrototypeStub(Isolate* isolate, Code::Kind kind)
+ : ICStub(isolate, kind) { }
virtual void Generate(MacroAssembler* masm);
private:
- STATIC_ASSERT(KindBits::kSize == 4);
- virtual CodeStub::Major MajorKey() { return StringLength; }
+ virtual CodeStub::Major MajorKey() { return FunctionPrototype; }
};
class StoreICStub: public ICStub {
public:
- StoreICStub(Code::Kind kind, StrictModeFlag strict_mode)
- : ICStub(kind), strict_mode_(strict_mode) { }
+ StoreICStub(Isolate* isolate, Code::Kind kind, StrictMode strict_mode)
+ : ICStub(isolate, kind), strict_mode_(strict_mode) { }
protected:
virtual ExtraICState GetExtraICState() {
@@ -844,23 +868,13 @@ class StoreICStub: public ICStub {
return KindBits::encode(kind()) | StrictModeBits::encode(strict_mode_);
}
- StrictModeFlag strict_mode_;
-};
-
-
-class StoreArrayLengthStub: public StoreICStub {
- public:
- explicit StoreArrayLengthStub(Code::Kind kind, StrictModeFlag strict_mode)
- : StoreICStub(kind, strict_mode) { }
- virtual void Generate(MacroAssembler* masm);
-
- private:
- virtual CodeStub::Major MajorKey() { return StoreArrayLength; }
+ StrictMode strict_mode_;
};
class HICStub: public HydrogenCodeStub {
public:
+ explicit HICStub(Isolate* isolate) : HydrogenCodeStub(isolate) { }
virtual Code::Kind GetCodeKind() const { return kind(); }
virtual InlineCacheState GetICState() { return MONOMORPHIC; }
@@ -873,10 +887,10 @@ class HICStub: public HydrogenCodeStub {
class HandlerStub: public HICStub {
public:
virtual Code::Kind GetCodeKind() const { return Code::HANDLER; }
- virtual int GetStubFlags() { return kind(); }
+ virtual ExtraICState GetExtraICState() { return kind(); }
protected:
- HandlerStub() : HICStub() { }
+ explicit HandlerStub(Isolate* isolate) : HICStub(isolate) { }
virtual int NotMissMinorKey() { return bit_field_; }
int bit_field_;
};
@@ -884,15 +898,15 @@ class HandlerStub: public HICStub {
class LoadFieldStub: public HandlerStub {
public:
- LoadFieldStub(bool inobject, int index, Representation representation) {
- Initialize(Code::LOAD_IC, inobject, index, representation);
+ LoadFieldStub(Isolate* isolate, FieldIndex index)
+ : HandlerStub(isolate), index_(index) {
+ Initialize(Code::LOAD_IC);
}
- virtual Handle<Code> GenerateCode(Isolate* isolate);
+ virtual Handle<Code> GenerateCode() V8_OVERRIDE;
virtual void InitializeInterfaceDescriptor(
- Isolate* isolate,
- CodeStubInterfaceDescriptor* descriptor);
+ CodeStubInterfaceDescriptor* descriptor) V8_OVERRIDE;
Representation representation() {
if (unboxed_double()) return Representation::Double();
@@ -903,77 +917,109 @@ class LoadFieldStub: public HandlerStub {
return KindBits::decode(bit_field_);
}
- bool is_inobject() {
- return InobjectBits::decode(bit_field_);
- }
-
- int offset() {
- int index = IndexBits::decode(bit_field_);
- int offset = index * kPointerSize;
- if (is_inobject()) return offset;
- return FixedArray::kHeaderSize + offset;
- }
+ FieldIndex index() const { return index_; }
bool unboxed_double() {
- return UnboxedDoubleBits::decode(bit_field_);
+ return index_.is_double();
}
virtual Code::StubType GetStubType() { return Code::FAST; }
protected:
- LoadFieldStub() : HandlerStub() { }
+ explicit LoadFieldStub(Isolate* isolate);
- void Initialize(Code::Kind kind,
- bool inobject,
- int index,
- Representation representation) {
- bool unboxed_double = FLAG_track_double_fields && representation.IsDouble();
- bit_field_ = KindBits::encode(kind)
- | InobjectBits::encode(inobject)
- | IndexBits::encode(index)
- | UnboxedDoubleBits::encode(unboxed_double);
+ void Initialize(Code::Kind kind) {
+ int property_index_key = index_.GetLoadFieldStubKey();
+ // Save a copy of the essence of the property index into the bit field to
+ // make sure that hashing of unique stubs works correctly..
+ bit_field_ = KindBits::encode(kind) |
+ EncodedLoadFieldByIndexBits::encode(property_index_key);
}
private:
STATIC_ASSERT(KindBits::kSize == 4);
- class InobjectBits: public BitField<bool, 4, 1> {};
- class IndexBits: public BitField<int, 5, 11> {};
- class UnboxedDoubleBits: public BitField<bool, 16, 1> {};
+ class EncodedLoadFieldByIndexBits: public BitField<int, 4, 13> {};
virtual CodeStub::Major MajorKey() { return LoadField; }
+ FieldIndex index_;
+};
+
+
+class StringLengthStub: public HandlerStub {
+ public:
+ explicit StringLengthStub(Isolate* isolate) : HandlerStub(isolate) {
+ Initialize(Code::LOAD_IC);
+ }
+ virtual Handle<Code> GenerateCode() V8_OVERRIDE;
+ virtual void InitializeInterfaceDescriptor(
+ CodeStubInterfaceDescriptor* descriptor) V8_OVERRIDE;
+
+ protected:
+ virtual Code::Kind kind() const {
+ return KindBits::decode(bit_field_);
+ }
+
+ void Initialize(Code::Kind kind) {
+ bit_field_ = KindBits::encode(kind);
+ }
+
+ private:
+ virtual CodeStub::Major MajorKey() { return StringLength; }
+};
+
+
+class KeyedStringLengthStub: public StringLengthStub {
+ public:
+ explicit KeyedStringLengthStub(Isolate* isolate) : StringLengthStub(isolate) {
+ Initialize(Code::KEYED_LOAD_IC);
+ }
+ virtual void InitializeInterfaceDescriptor(
+ CodeStubInterfaceDescriptor* descriptor) V8_OVERRIDE;
+
+ private:
+ virtual CodeStub::Major MajorKey() { return KeyedStringLength; }
};
class StoreGlobalStub : public HandlerStub {
public:
- explicit StoreGlobalStub(bool is_constant) {
- bit_field_ = IsConstantBits::encode(is_constant);
+ StoreGlobalStub(Isolate* isolate, bool is_constant, bool check_global)
+ : HandlerStub(isolate) {
+ bit_field_ = IsConstantBits::encode(is_constant) |
+ CheckGlobalBits::encode(check_global);
+ }
+
+ static Handle<HeapObject> global_placeholder(Isolate* isolate) {
+ return isolate->factory()->uninitialized_value();
}
- Handle<Code> GetCodeCopyFromTemplate(Isolate* isolate,
- Map* receiver_map,
- PropertyCell* cell) {
- Handle<Code> code = CodeStub::GetCodeCopyFromTemplate(isolate);
- // Replace the placeholder cell and global object map with the actual global
- // cell and receiver map.
- Map* cell_map = isolate->heap()->global_property_cell_map();
- code->ReplaceNthObject(1, cell_map, cell);
- code->ReplaceNthObject(1, isolate->heap()->meta_map(), receiver_map);
- return code;
+ Handle<Code> GetCodeCopyFromTemplate(Handle<GlobalObject> global,
+ Handle<PropertyCell> cell) {
+ if (check_global()) {
+ Code::FindAndReplacePattern pattern;
+ pattern.Add(Handle<Map>(global_placeholder(isolate())->map()), global);
+ pattern.Add(isolate()->factory()->meta_map(), Handle<Map>(global->map()));
+ pattern.Add(isolate()->factory()->global_property_cell_map(), cell);
+ return CodeStub::GetCodeCopy(pattern);
+ } else {
+ Code::FindAndReplacePattern pattern;
+ pattern.Add(isolate()->factory()->global_property_cell_map(), cell);
+ return CodeStub::GetCodeCopy(pattern);
+ }
}
virtual Code::Kind kind() const { return Code::STORE_IC; }
- virtual Handle<Code> GenerateCode(Isolate* isolate);
+ virtual Handle<Code> GenerateCode() V8_OVERRIDE;
virtual void InitializeInterfaceDescriptor(
- Isolate* isolate,
- CodeStubInterfaceDescriptor* descriptor);
-
- virtual ExtraICState GetExtraICState() { return bit_field_; }
+ CodeStubInterfaceDescriptor* descriptor) V8_OVERRIDE;
- bool is_constant() {
+ bool is_constant() const {
return IsConstantBits::decode(bit_field_);
}
+ bool check_global() const {
+ return CheckGlobalBits::decode(bit_field_);
+ }
void set_is_constant(bool value) {
bit_field_ = IsConstantBits::update(bit_field_, value);
}
@@ -986,91 +1032,84 @@ class StoreGlobalStub : public HandlerStub {
}
private:
- virtual int NotMissMinorKey() { return GetExtraICState(); }
Major MajorKey() { return StoreGlobal; }
class IsConstantBits: public BitField<bool, 0, 1> {};
class RepresentationBits: public BitField<Representation::Kind, 1, 8> {};
-
- int bit_field_;
+ class CheckGlobalBits: public BitField<bool, 9, 1> {};
DISALLOW_COPY_AND_ASSIGN(StoreGlobalStub);
};
-class KeyedLoadFieldStub: public LoadFieldStub {
+class CallApiFunctionStub : public PlatformCodeStub {
public:
- KeyedLoadFieldStub(bool inobject, int index, Representation representation)
- : LoadFieldStub() {
- Initialize(Code::KEYED_LOAD_IC, inobject, index, representation);
+ CallApiFunctionStub(Isolate* isolate,
+ bool is_store,
+ bool call_data_undefined,
+ int argc) : PlatformCodeStub(isolate) {
+ bit_field_ =
+ IsStoreBits::encode(is_store) |
+ CallDataUndefinedBits::encode(call_data_undefined) |
+ ArgumentBits::encode(argc);
+ ASSERT(!is_store || argc == 1);
}
- virtual void InitializeInterfaceDescriptor(
- Isolate* isolate,
- CodeStubInterfaceDescriptor* descriptor);
+ private:
+ virtual void Generate(MacroAssembler* masm) V8_OVERRIDE;
+ virtual Major MajorKey() V8_OVERRIDE { return CallApiFunction; }
+ virtual int MinorKey() V8_OVERRIDE { return bit_field_; }
- virtual Handle<Code> GenerateCode(Isolate* isolate);
+ class IsStoreBits: public BitField<bool, 0, 1> {};
+ class CallDataUndefinedBits: public BitField<bool, 1, 1> {};
+ class ArgumentBits: public BitField<int, 2, Code::kArgumentsBits> {};
- private:
- virtual CodeStub::Major MajorKey() { return KeyedLoadField; }
+ int bit_field_;
+
+ DISALLOW_COPY_AND_ASSIGN(CallApiFunctionStub);
};
-class KeyedArrayCallStub: public HICStub {
+class CallApiGetterStub : public PlatformCodeStub {
public:
- KeyedArrayCallStub(bool holey, int argc) : HICStub(), argc_(argc) {
- bit_field_ = ContextualBits::encode(false) | HoleyBits::encode(holey);
- }
+ explicit CallApiGetterStub(Isolate* isolate) : PlatformCodeStub(isolate) {}
- virtual Code::Kind kind() const { return Code::KEYED_CALL_IC; }
- virtual ExtraICState GetExtraICState() { return bit_field_; }
+ private:
+ virtual void Generate(MacroAssembler* masm) V8_OVERRIDE;
+ virtual Major MajorKey() V8_OVERRIDE { return CallApiGetter; }
+ virtual int MinorKey() V8_OVERRIDE { return 0; }
- ElementsKind elements_kind() {
- return HoleyBits::decode(bit_field_) ? FAST_HOLEY_ELEMENTS : FAST_ELEMENTS;
- }
+ DISALLOW_COPY_AND_ASSIGN(CallApiGetterStub);
+};
- int argc() { return argc_; }
- virtual int GetStubFlags() { return argc(); }
- static bool IsHoley(Handle<Code> code) {
- ExtraICState state = code->extra_ic_state();
- return HoleyBits::decode(state);
+class KeyedLoadFieldStub: public LoadFieldStub {
+ public:
+ KeyedLoadFieldStub(Isolate* isolate, FieldIndex index)
+ : LoadFieldStub(isolate, index) {
+ Initialize(Code::KEYED_LOAD_IC);
}
virtual void InitializeInterfaceDescriptor(
- Isolate* isolate,
- CodeStubInterfaceDescriptor* descriptor);
-
- virtual Handle<Code> GenerateCode(Isolate* isolate);
+ CodeStubInterfaceDescriptor* descriptor) V8_OVERRIDE;
private:
- virtual int NotMissMinorKey() {
- return GetExtraICState() | ArgcBits::encode(argc_);
- }
-
- class ContextualBits: public BitField<bool, 0, 1> {};
- STATIC_ASSERT(CallICBase::Contextual::kShift == ContextualBits::kShift);
- STATIC_ASSERT(CallICBase::Contextual::kSize == ContextualBits::kSize);
- class HoleyBits: public BitField<bool, 1, 1> {};
- STATIC_ASSERT(Code::kArgumentsBits <= kStubMinorKeyBits - 2);
- class ArgcBits: public BitField<int, 2, Code::kArgumentsBits> {};
- virtual CodeStub::Major MajorKey() { return KeyedArrayCall; }
- int bit_field_;
- int argc_;
+ virtual CodeStub::Major MajorKey() { return KeyedLoadField; }
};
-class BinaryOpICStub V8_FINAL : public HydrogenCodeStub {
+class BinaryOpICStub : public HydrogenCodeStub {
public:
- BinaryOpICStub(Token::Value op, OverwriteMode mode)
- : HydrogenCodeStub(UNINITIALIZED), state_(op, mode) {}
+ BinaryOpICStub(Isolate* isolate, Token::Value op, OverwriteMode mode)
+ : HydrogenCodeStub(isolate, UNINITIALIZED), state_(isolate, op, mode) {}
- explicit BinaryOpICStub(const BinaryOpIC::State& state) : state_(state) {}
+ BinaryOpICStub(Isolate* isolate, const BinaryOpIC::State& state)
+ : HydrogenCodeStub(isolate), state_(state) {}
static void GenerateAheadOfTime(Isolate* isolate);
virtual void InitializeInterfaceDescriptor(
- Isolate* isolate, CodeStubInterfaceDescriptor* descriptor) V8_OVERRIDE;
+ CodeStubInterfaceDescriptor* descriptor) V8_OVERRIDE;
static void InstallDescriptors(Isolate* isolate);
@@ -1078,23 +1117,24 @@ class BinaryOpICStub V8_FINAL : public HydrogenCodeStub {
return Code::BINARY_OP_IC;
}
- virtual InlineCacheState GetICState() V8_OVERRIDE {
+ virtual InlineCacheState GetICState() V8_FINAL V8_OVERRIDE {
return state_.GetICState();
}
- virtual ExtraICState GetExtraICState() V8_OVERRIDE {
+ virtual ExtraICState GetExtraICState() V8_FINAL V8_OVERRIDE {
return state_.GetExtraICState();
}
- virtual void VerifyPlatformFeatures(Isolate* isolate) V8_OVERRIDE {
- ASSERT(CpuFeatures::VerifyCrossCompiling(SSE2));
- }
-
- virtual Handle<Code> GenerateCode(Isolate* isolate) V8_OVERRIDE;
+ virtual Handle<Code> GenerateCode() V8_OVERRIDE;
const BinaryOpIC::State& state() const { return state_; }
- virtual void PrintState(StringStream* stream) V8_OVERRIDE;
+ virtual void PrintState(StringStream* stream) V8_FINAL V8_OVERRIDE;
+
+ virtual Major MajorKey() V8_OVERRIDE { return BinaryOpIC; }
+ virtual int NotMissMinorKey() V8_FINAL V8_OVERRIDE {
+ return GetExtraICState();
+ }
// Parameters accessed via CodeStubGraphBuilder::GetParameter()
static const int kLeft = 0;
@@ -1104,20 +1144,109 @@ class BinaryOpICStub V8_FINAL : public HydrogenCodeStub {
static void GenerateAheadOfTime(Isolate* isolate,
const BinaryOpIC::State& state);
- virtual Major MajorKey() V8_OVERRIDE { return BinaryOpIC; }
- virtual int NotMissMinorKey() V8_OVERRIDE { return GetExtraICState(); }
-
BinaryOpIC::State state_;
DISALLOW_COPY_AND_ASSIGN(BinaryOpICStub);
};
-// TODO(bmeurer): Rename to StringAddStub once we dropped the old StringAddStub.
-class NewStringAddStub V8_FINAL : public HydrogenCodeStub {
+// TODO(bmeurer): Merge this into the BinaryOpICStub once we have proper tail
+// call support for stubs in Hydrogen.
+class BinaryOpICWithAllocationSiteStub V8_FINAL : public PlatformCodeStub {
+ public:
+ BinaryOpICWithAllocationSiteStub(Isolate* isolate,
+ const BinaryOpIC::State& state)
+ : PlatformCodeStub(isolate), state_(state) {}
+
+ static void GenerateAheadOfTime(Isolate* isolate);
+
+ Handle<Code> GetCodeCopyFromTemplate(Handle<AllocationSite> allocation_site) {
+ Code::FindAndReplacePattern pattern;
+ pattern.Add(isolate()->factory()->undefined_map(), allocation_site);
+ return CodeStub::GetCodeCopy(pattern);
+ }
+
+ virtual Code::Kind GetCodeKind() const V8_OVERRIDE {
+ return Code::BINARY_OP_IC;
+ }
+
+ virtual InlineCacheState GetICState() V8_OVERRIDE {
+ return state_.GetICState();
+ }
+
+ virtual ExtraICState GetExtraICState() V8_OVERRIDE {
+ return state_.GetExtraICState();
+ }
+
+ virtual void Generate(MacroAssembler* masm) V8_OVERRIDE;
+
+ virtual void PrintState(StringStream* stream) V8_OVERRIDE;
+
+ virtual Major MajorKey() V8_OVERRIDE { return BinaryOpICWithAllocationSite; }
+ virtual int MinorKey() V8_OVERRIDE { return GetExtraICState(); }
+
+ private:
+ static void GenerateAheadOfTime(Isolate* isolate,
+ const BinaryOpIC::State& state);
+
+ BinaryOpIC::State state_;
+
+ DISALLOW_COPY_AND_ASSIGN(BinaryOpICWithAllocationSiteStub);
+};
+
+
+class BinaryOpWithAllocationSiteStub V8_FINAL : public BinaryOpICStub {
public:
- NewStringAddStub(StringAddFlags flags, PretenureFlag pretenure_flag)
- : bit_field_(StringAddFlagsBits::encode(flags) |
+ BinaryOpWithAllocationSiteStub(Isolate* isolate,
+ Token::Value op,
+ OverwriteMode mode)
+ : BinaryOpICStub(isolate, op, mode) {}
+
+ BinaryOpWithAllocationSiteStub(Isolate* isolate,
+ const BinaryOpIC::State& state)
+ : BinaryOpICStub(isolate, state) {}
+
+ virtual void InitializeInterfaceDescriptor(
+ CodeStubInterfaceDescriptor* descriptor) V8_OVERRIDE;
+
+ static void InstallDescriptors(Isolate* isolate);
+
+ virtual Code::Kind GetCodeKind() const V8_FINAL V8_OVERRIDE {
+ return Code::STUB;
+ }
+
+ virtual Handle<Code> GenerateCode() V8_OVERRIDE;
+
+ virtual Major MajorKey() V8_OVERRIDE {
+ return BinaryOpWithAllocationSite;
+ }
+
+ // Parameters accessed via CodeStubGraphBuilder::GetParameter()
+ static const int kAllocationSite = 0;
+ static const int kLeft = 1;
+ static const int kRight = 2;
+};
+
+
+enum StringAddFlags {
+ // Omit both parameter checks.
+ STRING_ADD_CHECK_NONE = 0,
+ // Check left parameter.
+ STRING_ADD_CHECK_LEFT = 1 << 0,
+ // Check right parameter.
+ STRING_ADD_CHECK_RIGHT = 1 << 1,
+ // Check both parameters.
+ STRING_ADD_CHECK_BOTH = STRING_ADD_CHECK_LEFT | STRING_ADD_CHECK_RIGHT
+};
+
+
+class StringAddStub V8_FINAL : public HydrogenCodeStub {
+ public:
+ StringAddStub(Isolate* isolate,
+ StringAddFlags flags,
+ PretenureFlag pretenure_flag)
+ : HydrogenCodeStub(isolate),
+ bit_field_(StringAddFlagsBits::encode(flags) |
PretenureFlagBits::encode(pretenure_flag)) {}
StringAddFlags flags() const {
@@ -1128,10 +1257,9 @@ class NewStringAddStub V8_FINAL : public HydrogenCodeStub {
return PretenureFlagBits::decode(bit_field_);
}
- virtual Handle<Code> GenerateCode(Isolate* isolate) V8_OVERRIDE;
+ virtual Handle<Code> GenerateCode() V8_OVERRIDE;
virtual void InitializeInterfaceDescriptor(
- Isolate* isolate,
CodeStubInterfaceDescriptor* descriptor) V8_OVERRIDE;
static void InstallDescriptors(Isolate* isolate);
@@ -1145,22 +1273,24 @@ class NewStringAddStub V8_FINAL : public HydrogenCodeStub {
class PretenureFlagBits: public BitField<PretenureFlag, 2, 1> {};
uint32_t bit_field_;
- virtual Major MajorKey() V8_OVERRIDE { return NewStringAdd; }
+ virtual Major MajorKey() V8_OVERRIDE { return StringAdd; }
virtual int NotMissMinorKey() V8_OVERRIDE { return bit_field_; }
virtual void PrintBaseName(StringStream* stream) V8_OVERRIDE;
- DISALLOW_COPY_AND_ASSIGN(NewStringAddStub);
+ DISALLOW_COPY_AND_ASSIGN(StringAddStub);
};
class ICCompareStub: public PlatformCodeStub {
public:
- ICCompareStub(Token::Value op,
+ ICCompareStub(Isolate* isolate,
+ Token::Value op,
CompareIC::State left,
CompareIC::State right,
CompareIC::State handler)
- : op_(op),
+ : PlatformCodeStub(isolate),
+ op_(op),
left_(left),
right_(right),
state_(handler) {
@@ -1177,10 +1307,6 @@ class ICCompareStub: public PlatformCodeStub {
CompareIC::State* handler_state,
Token::Value* op);
- static CompareIC::State CompareState(int minor_key) {
- return static_cast<CompareIC::State>(HandlerStateField::decode(minor_key));
- }
-
virtual InlineCacheState GetICState();
private:
@@ -1212,7 +1338,7 @@ class ICCompareStub: public PlatformCodeStub {
Condition GetCondition() const { return CompareIC::ComputeCondition(op_); }
virtual void AddToSpecialCache(Handle<Code> new_object);
- virtual bool FindCodeInSpecialCache(Code** code_out, Isolate* isolate);
+ virtual bool FindCodeInSpecialCache(Code** code_out);
virtual bool UseSpecialCache() { return state_ == CompareIC::KNOWN_OBJECT; }
Token::Value op_;
@@ -1225,31 +1351,31 @@ class ICCompareStub: public PlatformCodeStub {
class CompareNilICStub : public HydrogenCodeStub {
public:
- Handle<Type> GetType(Isolate* isolate, Handle<Map> map = Handle<Map>());
- Handle<Type> GetInputType(Isolate* isolate, Handle<Map> map);
+ Type* GetType(Zone* zone, Handle<Map> map = Handle<Map>());
+ Type* GetInputType(Zone* zone, Handle<Map> map);
- explicit CompareNilICStub(NilValue nil) : nil_value_(nil) { }
+ CompareNilICStub(Isolate* isolate, NilValue nil)
+ : HydrogenCodeStub(isolate), nil_value_(nil) { }
- CompareNilICStub(ExtraICState ic_state,
+ CompareNilICStub(Isolate* isolate,
+ ExtraICState ic_state,
InitializationState init_state = INITIALIZED)
- : HydrogenCodeStub(init_state),
+ : HydrogenCodeStub(isolate, init_state),
nil_value_(NilValueField::decode(ic_state)),
state_(State(TypesField::decode(ic_state))) {
}
static Handle<Code> GetUninitialized(Isolate* isolate,
NilValue nil) {
- return CompareNilICStub(nil, UNINITIALIZED).GetCode(isolate);
+ return CompareNilICStub(isolate, nil, UNINITIALIZED).GetCode();
}
virtual void InitializeInterfaceDescriptor(
- Isolate* isolate,
- CodeStubInterfaceDescriptor* descriptor);
+ CodeStubInterfaceDescriptor* descriptor) V8_OVERRIDE;
- static void InitializeForIsolate(Isolate* isolate) {
- CompareNilICStub compare_stub(kNullValue, UNINITIALIZED);
+ static void InstallDescriptors(Isolate* isolate) {
+ CompareNilICStub compare_stub(isolate, kNullValue, UNINITIALIZED);
compare_stub.InitializeInterfaceDescriptor(
- isolate,
isolate->code_stub_interface_descriptor(CodeStub::CompareNilIC));
}
@@ -1265,7 +1391,7 @@ class CompareNilICStub : public HydrogenCodeStub {
virtual Code::Kind GetCodeKind() const { return Code::COMPARE_NIL_IC; }
- virtual Handle<Code> GenerateCode(Isolate* isolate);
+ virtual Handle<Code> GenerateCode() V8_OVERRIDE;
virtual ExtraICState GetExtraICState() {
return NilValueField::encode(nil_value_) |
@@ -1305,8 +1431,10 @@ class CompareNilICStub : public HydrogenCodeStub {
void Print(StringStream* stream) const;
};
- CompareNilICStub(NilValue nil, InitializationState init_state)
- : HydrogenCodeStub(init_state), nil_value_(nil) { }
+ CompareNilICStub(Isolate* isolate,
+ NilValue nil,
+ InitializationState init_state)
+ : HydrogenCodeStub(isolate, init_state), nil_value_(nil) { }
class NilValueField : public BitField<NilValue, 0, 1> {};
class TypesField : public BitField<byte, 1, NUMBER_OF_TYPES> {};
@@ -1323,9 +1451,12 @@ class CompareNilICStub : public HydrogenCodeStub {
class CEntryStub : public PlatformCodeStub {
public:
- explicit CEntryStub(int result_size,
- SaveFPRegsMode save_doubles = kDontSaveFPRegs)
- : result_size_(result_size), save_doubles_(save_doubles) { }
+ CEntryStub(Isolate* isolate,
+ int result_size,
+ SaveFPRegsMode save_doubles = kDontSaveFPRegs)
+ : PlatformCodeStub(isolate),
+ result_size_(result_size),
+ save_doubles_(save_doubles) { }
void Generate(MacroAssembler* masm);
@@ -1335,21 +1466,8 @@ class CEntryStub : public PlatformCodeStub {
// can generate both variants ahead of time.
static void GenerateAheadOfTime(Isolate* isolate);
- protected:
- virtual void VerifyPlatformFeatures(Isolate* isolate) V8_OVERRIDE {
- ASSERT(CpuFeatures::VerifyCrossCompiling(SSE2));
- };
-
private:
- void GenerateCore(MacroAssembler* masm,
- Label* throw_normal_exception,
- Label* throw_termination_exception,
- Label* throw_out_of_memory_exception,
- bool do_gc,
- bool always_allocate_scope);
-
// Number of pointers/values returned.
- Isolate* isolate_;
const int result_size_;
SaveFPRegsMode save_doubles_;
@@ -1362,7 +1480,7 @@ class CEntryStub : public PlatformCodeStub {
class JSEntryStub : public PlatformCodeStub {
public:
- JSEntryStub() { }
+ explicit JSEntryStub(Isolate* isolate) : PlatformCodeStub(isolate) { }
void Generate(MacroAssembler* masm) { GenerateBody(masm, false); }
@@ -1381,7 +1499,7 @@ class JSEntryStub : public PlatformCodeStub {
class JSConstructEntryStub : public JSEntryStub {
public:
- JSConstructEntryStub() { }
+ explicit JSConstructEntryStub(Isolate* isolate) : JSEntryStub(isolate) { }
void Generate(MacroAssembler* masm) { GenerateBody(masm, true); }
@@ -1398,12 +1516,13 @@ class ArgumentsAccessStub: public PlatformCodeStub {
public:
enum Type {
READ_ELEMENT,
- NEW_NON_STRICT_FAST,
- NEW_NON_STRICT_SLOW,
+ NEW_SLOPPY_FAST,
+ NEW_SLOPPY_SLOW,
NEW_STRICT
};
- explicit ArgumentsAccessStub(Type type) : type_(type) { }
+ ArgumentsAccessStub(Isolate* isolate, Type type)
+ : PlatformCodeStub(isolate), type_(type) { }
private:
Type type_;
@@ -1414,8 +1533,8 @@ class ArgumentsAccessStub: public PlatformCodeStub {
void Generate(MacroAssembler* masm);
void GenerateReadElement(MacroAssembler* masm);
void GenerateNewStrict(MacroAssembler* masm);
- void GenerateNewNonStrictFast(MacroAssembler* masm);
- void GenerateNewNonStrictSlow(MacroAssembler* masm);
+ void GenerateNewSloppyFast(MacroAssembler* masm);
+ void GenerateNewSloppySlow(MacroAssembler* masm);
virtual void PrintName(StringStream* stream);
};
@@ -1423,7 +1542,7 @@ class ArgumentsAccessStub: public PlatformCodeStub {
class RegExpExecStub: public PlatformCodeStub {
public:
- RegExpExecStub() { }
+ explicit RegExpExecStub(Isolate* isolate) : PlatformCodeStub(isolate) { }
private:
Major MajorKey() { return RegExpExec; }
@@ -1433,29 +1552,38 @@ class RegExpExecStub: public PlatformCodeStub {
};
-class RegExpConstructResultStub: public PlatformCodeStub {
+class RegExpConstructResultStub V8_FINAL : public HydrogenCodeStub {
public:
- RegExpConstructResultStub() { }
+ explicit RegExpConstructResultStub(Isolate* isolate)
+ : HydrogenCodeStub(isolate) { }
- private:
- Major MajorKey() { return RegExpConstructResult; }
- int MinorKey() { return 0; }
+ virtual Handle<Code> GenerateCode() V8_OVERRIDE;
- void Generate(MacroAssembler* masm);
+ virtual void InitializeInterfaceDescriptor(
+ CodeStubInterfaceDescriptor* descriptor) V8_OVERRIDE;
+
+ virtual Major MajorKey() V8_OVERRIDE { return RegExpConstructResult; }
+ virtual int NotMissMinorKey() V8_OVERRIDE { return 0; }
+
+ static void InstallDescriptors(Isolate* isolate);
+
+ // Parameters accessed via CodeStubGraphBuilder::GetParameter()
+ static const int kLength = 0;
+ static const int kIndex = 1;
+ static const int kInput = 2;
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(RegExpConstructResultStub);
};
class CallFunctionStub: public PlatformCodeStub {
public:
- CallFunctionStub(int argc, CallFunctionFlags flags)
- : argc_(argc), flags_(flags) { }
+ CallFunctionStub(Isolate* isolate, int argc, CallFunctionFlags flags)
+ : PlatformCodeStub(isolate), argc_(argc), flags_(flags) { }
void Generate(MacroAssembler* masm);
- virtual void FinishCode(Handle<Code> code) {
- code->set_has_function_cache(RecordCallTarget());
- }
-
static int ExtractArgcFromMinorKey(int minor_key) {
return ArgcBits::decode(minor_key);
}
@@ -1476,19 +1604,20 @@ class CallFunctionStub: public PlatformCodeStub {
return FlagBits::encode(flags_) | ArgcBits::encode(argc_);
}
- bool ReceiverMightBeImplicit() {
- return (flags_ & RECEIVER_MIGHT_BE_IMPLICIT) != 0;
+ bool CallAsMethod() {
+ return flags_ == CALL_AS_METHOD || flags_ == WRAP_AND_CALL;
}
- bool RecordCallTarget() {
- return (flags_ & RECORD_CALL_TARGET) != 0;
+ bool NeedsChecks() {
+ return flags_ != WRAP_AND_CALL;
}
};
class CallConstructStub: public PlatformCodeStub {
public:
- explicit CallConstructStub(CallFunctionFlags flags) : flags_(flags) {}
+ CallConstructStub(Isolate* isolate, CallConstructorFlags flags)
+ : PlatformCodeStub(isolate), flags_(flags) {}
void Generate(MacroAssembler* masm);
@@ -1497,7 +1626,7 @@ class CallConstructStub: public PlatformCodeStub {
}
private:
- CallFunctionFlags flags_;
+ CallConstructorFlags flags_;
virtual void PrintName(StringStream* stream);
@@ -1505,7 +1634,7 @@ class CallConstructStub: public PlatformCodeStub {
int MinorKey() { return flags_; }
bool RecordCallTarget() {
- return (flags_ & RECORD_CALL_TARGET) != 0;
+ return (flags_ & RECORD_CONSTRUCTOR_TARGET) != 0;
}
};
@@ -1686,12 +1815,12 @@ class StringCharAtGenerator {
class KeyedLoadDictionaryElementStub : public HydrogenCodeStub {
public:
- KeyedLoadDictionaryElementStub() {}
+ explicit KeyedLoadDictionaryElementStub(Isolate* isolate)
+ : HydrogenCodeStub(isolate) {}
- virtual Handle<Code> GenerateCode(Isolate* isolate) V8_OVERRIDE;
+ virtual Handle<Code> GenerateCode() V8_OVERRIDE;
virtual void InitializeInterfaceDescriptor(
- Isolate* isolate,
CodeStubInterfaceDescriptor* descriptor) V8_OVERRIDE;
private:
@@ -1704,7 +1833,8 @@ class KeyedLoadDictionaryElementStub : public HydrogenCodeStub {
class KeyedLoadDictionaryElementPlatformStub : public PlatformCodeStub {
public:
- KeyedLoadDictionaryElementPlatformStub() {}
+ explicit KeyedLoadDictionaryElementPlatformStub(Isolate* isolate)
+ : PlatformCodeStub(isolate) {}
void Generate(MacroAssembler* masm);
@@ -1716,30 +1846,52 @@ class KeyedLoadDictionaryElementPlatformStub : public PlatformCodeStub {
};
+class KeyedLoadGenericElementStub : public HydrogenCodeStub {
+ public:
+ explicit KeyedLoadGenericElementStub(Isolate *isolate)
+ : HydrogenCodeStub(isolate) {}
+
+ virtual Handle<Code> GenerateCode() V8_OVERRIDE;
+
+ virtual void InitializeInterfaceDescriptor(
+ CodeStubInterfaceDescriptor* descriptor) V8_OVERRIDE;
+
+ static void InstallDescriptors(Isolate* isolate);
+
+ virtual Code::Kind GetCodeKind() const { return Code::KEYED_LOAD_IC; }
+ virtual InlineCacheState GetICState() { return GENERIC; }
+
+ private:
+ Major MajorKey() { return KeyedLoadGeneric; }
+ int NotMissMinorKey() { return 0; }
+
+ DISALLOW_COPY_AND_ASSIGN(KeyedLoadGenericElementStub);
+};
+
+
class DoubleToIStub : public PlatformCodeStub {
public:
- DoubleToIStub(Register source,
+ DoubleToIStub(Isolate* isolate,
+ Register source,
Register destination,
int offset,
bool is_truncating,
- bool skip_fastpath = false) : bit_field_(0) {
- bit_field_ = SourceRegisterBits::encode(source.code_) |
- DestinationRegisterBits::encode(destination.code_) |
+ bool skip_fastpath = false)
+ : PlatformCodeStub(isolate), bit_field_(0) {
+ bit_field_ = SourceRegisterBits::encode(source.code()) |
+ DestinationRegisterBits::encode(destination.code()) |
OffsetBits::encode(offset) |
IsTruncatingBits::encode(is_truncating) |
SkipFastPathBits::encode(skip_fastpath) |
- SSEBits::encode(CpuFeatures::IsSafeForSnapshot(SSE2) ?
- CpuFeatures::IsSafeForSnapshot(SSE3) ? 2 : 1 : 0);
+ SSE3Bits::encode(CpuFeatures::IsSupported(SSE3) ? 1 : 0);
}
Register source() {
- Register result = { SourceRegisterBits::decode(bit_field_) };
- return result;
+ return Register::from_code(SourceRegisterBits::decode(bit_field_));
}
Register destination() {
- Register result = { DestinationRegisterBits::decode(bit_field_) };
- return result;
+ return Register::from_code(DestinationRegisterBits::decode(bit_field_));
}
bool is_truncating() {
@@ -1758,11 +1910,6 @@ class DoubleToIStub : public PlatformCodeStub {
virtual bool SometimesSetsUpAFrame() { return false; }
- protected:
- virtual void VerifyPlatformFeatures(Isolate* isolate) V8_OVERRIDE {
- ASSERT(CpuFeatures::VerifyCrossCompiling(SSE2));
- }
-
private:
static const int kBitsPerRegisterNumber = 6;
STATIC_ASSERT((1L << kBitsPerRegisterNumber) >= Register::kNumRegisters);
@@ -1777,8 +1924,8 @@ class DoubleToIStub : public PlatformCodeStub {
public BitField<int, 2 * kBitsPerRegisterNumber + 1, 3> {}; // NOLINT
class SkipFastPathBits:
public BitField<int, 2 * kBitsPerRegisterNumber + 4, 1> {}; // NOLINT
- class SSEBits:
- public BitField<int, 2 * kBitsPerRegisterNumber + 5, 2> {}; // NOLINT
+ class SSE3Bits:
+ public BitField<int, 2 * kBitsPerRegisterNumber + 5, 1> {}; // NOLINT
Major MajorKey() { return DoubleToI; }
int MinorKey() { return bit_field_; }
@@ -1791,7 +1938,10 @@ class DoubleToIStub : public PlatformCodeStub {
class KeyedLoadFastElementStub : public HydrogenCodeStub {
public:
- KeyedLoadFastElementStub(bool is_js_array, ElementsKind elements_kind) {
+ KeyedLoadFastElementStub(Isolate* isolate,
+ bool is_js_array,
+ ElementsKind elements_kind)
+ : HydrogenCodeStub(isolate) {
bit_field_ = ElementsKindBits::encode(elements_kind) |
IsJSArrayBits::encode(is_js_array);
}
@@ -1804,11 +1954,10 @@ class KeyedLoadFastElementStub : public HydrogenCodeStub {
return ElementsKindBits::decode(bit_field_);
}
- virtual Handle<Code> GenerateCode(Isolate* isolate);
+ virtual Handle<Code> GenerateCode() V8_OVERRIDE;
virtual void InitializeInterfaceDescriptor(
- Isolate* isolate,
- CodeStubInterfaceDescriptor* descriptor);
+ CodeStubInterfaceDescriptor* descriptor) V8_OVERRIDE;
private:
class ElementsKindBits: public BitField<ElementsKind, 0, 8> {};
@@ -1824,9 +1973,11 @@ class KeyedLoadFastElementStub : public HydrogenCodeStub {
class KeyedStoreFastElementStub : public HydrogenCodeStub {
public:
- KeyedStoreFastElementStub(bool is_js_array,
+ KeyedStoreFastElementStub(Isolate* isolate,
+ bool is_js_array,
ElementsKind elements_kind,
- KeyedAccessStoreMode mode) {
+ KeyedAccessStoreMode mode)
+ : HydrogenCodeStub(isolate) {
bit_field_ = ElementsKindBits::encode(elements_kind) |
IsJSArrayBits::encode(is_js_array) |
StoreModeBits::encode(mode);
@@ -1844,11 +1995,10 @@ class KeyedStoreFastElementStub : public HydrogenCodeStub {
return StoreModeBits::decode(bit_field_);
}
- virtual Handle<Code> GenerateCode(Isolate* isolate);
+ virtual Handle<Code> GenerateCode() V8_OVERRIDE;
virtual void InitializeInterfaceDescriptor(
- Isolate* isolate,
- CodeStubInterfaceDescriptor* descriptor);
+ CodeStubInterfaceDescriptor* descriptor) V8_OVERRIDE;
private:
class ElementsKindBits: public BitField<ElementsKind, 0, 8> {};
@@ -1865,10 +2015,13 @@ class KeyedStoreFastElementStub : public HydrogenCodeStub {
class TransitionElementsKindStub : public HydrogenCodeStub {
public:
- TransitionElementsKindStub(ElementsKind from_kind,
- ElementsKind to_kind) {
+ TransitionElementsKindStub(Isolate* isolate,
+ ElementsKind from_kind,
+ ElementsKind to_kind,
+ bool is_js_array) : HydrogenCodeStub(isolate) {
bit_field_ = FromKindBits::encode(from_kind) |
- ToKindBits::encode(to_kind);
+ ToKindBits::encode(to_kind) |
+ IsJSArrayBits::encode(is_js_array);
}
ElementsKind from_kind() const {
@@ -1879,15 +2032,19 @@ class TransitionElementsKindStub : public HydrogenCodeStub {
return ToKindBits::decode(bit_field_);
}
- virtual Handle<Code> GenerateCode(Isolate* isolate);
+ bool is_js_array() const {
+ return IsJSArrayBits::decode(bit_field_);
+ }
+
+ virtual Handle<Code> GenerateCode() V8_OVERRIDE;
virtual void InitializeInterfaceDescriptor(
- Isolate* isolate,
- CodeStubInterfaceDescriptor* descriptor);
+ CodeStubInterfaceDescriptor* descriptor) V8_OVERRIDE;
private:
class FromKindBits: public BitField<ElementsKind, 8, 8> {};
class ToKindBits: public BitField<ElementsKind, 0, 8> {};
+ class IsJSArrayBits: public BitField<bool, 16, 1> {};
uint32_t bit_field_;
Major MajorKey() { return TransitionElementsKind; }
@@ -1897,26 +2054,19 @@ class TransitionElementsKindStub : public HydrogenCodeStub {
};
-enum ContextCheckMode {
- CONTEXT_CHECK_REQUIRED,
- CONTEXT_CHECK_NOT_REQUIRED,
- LAST_CONTEXT_CHECK_MODE = CONTEXT_CHECK_NOT_REQUIRED
-};
-
-
class ArrayConstructorStubBase : public HydrogenCodeStub {
public:
- ArrayConstructorStubBase(ElementsKind kind, ContextCheckMode context_mode,
- AllocationSiteOverrideMode override_mode) {
+ ArrayConstructorStubBase(Isolate* isolate,
+ ElementsKind kind,
+ AllocationSiteOverrideMode override_mode)
+ : HydrogenCodeStub(isolate) {
// It only makes sense to override local allocation site behavior
// if there is a difference between the global allocation site policy
// for an ElementsKind and the desired usage of the stub.
- ASSERT(!(FLAG_track_allocation_sites &&
- override_mode == DISABLE_ALLOCATION_SITES) ||
+ ASSERT(override_mode != DISABLE_ALLOCATION_SITES ||
AllocationSite::GetMode(kind) == TRACK_ALLOCATION_SITE);
bit_field_ = ElementsKindBits::encode(kind) |
- AllocationSiteOverrideModeBits::encode(override_mode) |
- ContextCheckModeBits::encode(context_mode);
+ AllocationSiteOverrideModeBits::encode(override_mode);
}
ElementsKind elements_kind() const {
@@ -1927,28 +2077,25 @@ class ArrayConstructorStubBase : public HydrogenCodeStub {
return AllocationSiteOverrideModeBits::decode(bit_field_);
}
- ContextCheckMode context_mode() const {
- return ContextCheckModeBits::decode(bit_field_);
- }
-
static void GenerateStubsAheadOfTime(Isolate* isolate);
static void InstallDescriptors(Isolate* isolate);
// Parameters accessed via CodeStubGraphBuilder::GetParameter()
static const int kConstructor = 0;
- static const int kPropertyCell = 1;
+ static const int kAllocationSite = 1;
+
+ protected:
+ void BasePrintName(const char* name, StringStream* stream);
private:
int NotMissMinorKey() { return bit_field_; }
// Ensure data fits within available bits.
STATIC_ASSERT(LAST_ALLOCATION_SITE_OVERRIDE_MODE == 1);
- STATIC_ASSERT(LAST_CONTEXT_CHECK_MODE == 1);
class ElementsKindBits: public BitField<ElementsKind, 0, 8> {};
class AllocationSiteOverrideModeBits: public
BitField<AllocationSiteOverrideMode, 8, 1> {}; // NOLINT
- class ContextCheckModeBits: public BitField<ContextCheckMode, 9, 1> {};
uint32_t bit_field_;
DISALLOW_COPY_AND_ASSIGN(ArrayConstructorStubBase);
@@ -1958,21 +2105,24 @@ class ArrayConstructorStubBase : public HydrogenCodeStub {
class ArrayNoArgumentConstructorStub : public ArrayConstructorStubBase {
public:
ArrayNoArgumentConstructorStub(
+ Isolate* isolate,
ElementsKind kind,
- ContextCheckMode context_mode = CONTEXT_CHECK_REQUIRED,
AllocationSiteOverrideMode override_mode = DONT_OVERRIDE)
- : ArrayConstructorStubBase(kind, context_mode, override_mode) {
+ : ArrayConstructorStubBase(isolate, kind, override_mode) {
}
- virtual Handle<Code> GenerateCode(Isolate* isolate);
+ virtual Handle<Code> GenerateCode() V8_OVERRIDE;
virtual void InitializeInterfaceDescriptor(
- Isolate* isolate,
- CodeStubInterfaceDescriptor* descriptor);
+ CodeStubInterfaceDescriptor* descriptor) V8_OVERRIDE;
private:
Major MajorKey() { return ArrayNoArgumentConstructor; }
+ virtual void PrintName(StringStream* stream) {
+ BasePrintName("ArrayNoArgumentConstructorStub", stream);
+ }
+
DISALLOW_COPY_AND_ASSIGN(ArrayNoArgumentConstructorStub);
};
@@ -1980,21 +2130,24 @@ class ArrayNoArgumentConstructorStub : public ArrayConstructorStubBase {
class ArraySingleArgumentConstructorStub : public ArrayConstructorStubBase {
public:
ArraySingleArgumentConstructorStub(
+ Isolate* isolate,
ElementsKind kind,
- ContextCheckMode context_mode = CONTEXT_CHECK_REQUIRED,
AllocationSiteOverrideMode override_mode = DONT_OVERRIDE)
- : ArrayConstructorStubBase(kind, context_mode, override_mode) {
+ : ArrayConstructorStubBase(isolate, kind, override_mode) {
}
- virtual Handle<Code> GenerateCode(Isolate* isolate);
+ virtual Handle<Code> GenerateCode() V8_OVERRIDE;
virtual void InitializeInterfaceDescriptor(
- Isolate* isolate,
- CodeStubInterfaceDescriptor* descriptor);
+ CodeStubInterfaceDescriptor* descriptor) V8_OVERRIDE;
private:
Major MajorKey() { return ArraySingleArgumentConstructor; }
+ virtual void PrintName(StringStream* stream) {
+ BasePrintName("ArraySingleArgumentConstructorStub", stream);
+ }
+
DISALLOW_COPY_AND_ASSIGN(ArraySingleArgumentConstructorStub);
};
@@ -2002,28 +2155,32 @@ class ArraySingleArgumentConstructorStub : public ArrayConstructorStubBase {
class ArrayNArgumentsConstructorStub : public ArrayConstructorStubBase {
public:
ArrayNArgumentsConstructorStub(
+ Isolate* isolate,
ElementsKind kind,
- ContextCheckMode context_mode = CONTEXT_CHECK_REQUIRED,
AllocationSiteOverrideMode override_mode = DONT_OVERRIDE)
- : ArrayConstructorStubBase(kind, context_mode, override_mode) {
+ : ArrayConstructorStubBase(isolate, kind, override_mode) {
}
- virtual Handle<Code> GenerateCode(Isolate* isolate);
+ virtual Handle<Code> GenerateCode() V8_OVERRIDE;
virtual void InitializeInterfaceDescriptor(
- Isolate* isolate,
- CodeStubInterfaceDescriptor* descriptor);
+ CodeStubInterfaceDescriptor* descriptor) V8_OVERRIDE;
private:
Major MajorKey() { return ArrayNArgumentsConstructor; }
+ virtual void PrintName(StringStream* stream) {
+ BasePrintName("ArrayNArgumentsConstructorStub", stream);
+ }
+
DISALLOW_COPY_AND_ASSIGN(ArrayNArgumentsConstructorStub);
};
class InternalArrayConstructorStubBase : public HydrogenCodeStub {
public:
- explicit InternalArrayConstructorStubBase(ElementsKind kind) {
+ InternalArrayConstructorStubBase(Isolate* isolate, ElementsKind kind)
+ : HydrogenCodeStub(isolate) {
kind_ = kind;
}
@@ -2047,14 +2204,14 @@ class InternalArrayConstructorStubBase : public HydrogenCodeStub {
class InternalArrayNoArgumentConstructorStub : public
InternalArrayConstructorStubBase {
public:
- explicit InternalArrayNoArgumentConstructorStub(ElementsKind kind)
- : InternalArrayConstructorStubBase(kind) { }
+ InternalArrayNoArgumentConstructorStub(Isolate* isolate,
+ ElementsKind kind)
+ : InternalArrayConstructorStubBase(isolate, kind) { }
- virtual Handle<Code> GenerateCode(Isolate* isolate);
+ virtual Handle<Code> GenerateCode() V8_OVERRIDE;
virtual void InitializeInterfaceDescriptor(
- Isolate* isolate,
- CodeStubInterfaceDescriptor* descriptor);
+ CodeStubInterfaceDescriptor* descriptor) V8_OVERRIDE;
private:
Major MajorKey() { return InternalArrayNoArgumentConstructor; }
@@ -2066,14 +2223,14 @@ class InternalArrayNoArgumentConstructorStub : public
class InternalArraySingleArgumentConstructorStub : public
InternalArrayConstructorStubBase {
public:
- explicit InternalArraySingleArgumentConstructorStub(ElementsKind kind)
- : InternalArrayConstructorStubBase(kind) { }
+ InternalArraySingleArgumentConstructorStub(Isolate* isolate,
+ ElementsKind kind)
+ : InternalArrayConstructorStubBase(isolate, kind) { }
- virtual Handle<Code> GenerateCode(Isolate* isolate);
+ virtual Handle<Code> GenerateCode() V8_OVERRIDE;
virtual void InitializeInterfaceDescriptor(
- Isolate* isolate,
- CodeStubInterfaceDescriptor* descriptor);
+ CodeStubInterfaceDescriptor* descriptor) V8_OVERRIDE;
private:
Major MajorKey() { return InternalArraySingleArgumentConstructor; }
@@ -2085,14 +2242,13 @@ class InternalArraySingleArgumentConstructorStub : public
class InternalArrayNArgumentsConstructorStub : public
InternalArrayConstructorStubBase {
public:
- explicit InternalArrayNArgumentsConstructorStub(ElementsKind kind)
- : InternalArrayConstructorStubBase(kind) { }
+ InternalArrayNArgumentsConstructorStub(Isolate* isolate, ElementsKind kind)
+ : InternalArrayConstructorStubBase(isolate, kind) { }
- virtual Handle<Code> GenerateCode(Isolate* isolate);
+ virtual Handle<Code> GenerateCode() V8_OVERRIDE;
virtual void InitializeInterfaceDescriptor(
- Isolate* isolate,
- CodeStubInterfaceDescriptor* descriptor);
+ CodeStubInterfaceDescriptor* descriptor) V8_OVERRIDE;
private:
Major MajorKey() { return InternalArrayNArgumentsConstructor; }
@@ -2103,20 +2259,20 @@ class InternalArrayNArgumentsConstructorStub : public
class KeyedStoreElementStub : public PlatformCodeStub {
public:
- KeyedStoreElementStub(bool is_js_array,
+ KeyedStoreElementStub(Isolate* isolate,
+ bool is_js_array,
ElementsKind elements_kind,
KeyedAccessStoreMode store_mode)
- : is_js_array_(is_js_array),
+ : PlatformCodeStub(isolate),
+ is_js_array_(is_js_array),
elements_kind_(elements_kind),
- store_mode_(store_mode),
- fp_registers_(CanUseFPRegisters()) { }
+ store_mode_(store_mode) { }
Major MajorKey() { return KeyedStoreElement; }
int MinorKey() {
return ElementsKindBits::encode(elements_kind_) |
IsJSArrayBits::encode(is_js_array_) |
- StoreModeBits::encode(store_mode_) |
- FPRegisters::encode(fp_registers_);
+ StoreModeBits::encode(store_mode_);
}
void Generate(MacroAssembler* masm);
@@ -2125,12 +2281,10 @@ class KeyedStoreElementStub : public PlatformCodeStub {
class ElementsKindBits: public BitField<ElementsKind, 0, 8> {};
class StoreModeBits: public BitField<KeyedAccessStoreMode, 8, 4> {};
class IsJSArrayBits: public BitField<bool, 12, 1> {};
- class FPRegisters: public BitField<bool, 13, 1> {};
bool is_js_array_;
ElementsKind elements_kind_;
KeyedAccessStoreMode store_mode_;
- bool fp_registers_;
DISALLOW_COPY_AND_ASSIGN(KeyedStoreElementStub);
};
@@ -2169,33 +2323,31 @@ class ToBooleanStub: public HydrogenCodeStub {
static Types Generic() { return Types((1 << NUMBER_OF_TYPES) - 1); }
};
- explicit ToBooleanStub(Types types = Types())
- : types_(types) { }
- explicit ToBooleanStub(ExtraICState state)
- : types_(static_cast<byte>(state)) { }
+ ToBooleanStub(Isolate* isolate, Types types = Types())
+ : HydrogenCodeStub(isolate), types_(types) { }
+ ToBooleanStub(Isolate* isolate, ExtraICState state)
+ : HydrogenCodeStub(isolate), types_(static_cast<byte>(state)) { }
bool UpdateStatus(Handle<Object> object);
Types GetTypes() { return types_; }
- virtual Handle<Code> GenerateCode(Isolate* isolate);
+ virtual Handle<Code> GenerateCode() V8_OVERRIDE;
virtual void InitializeInterfaceDescriptor(
- Isolate* isolate,
- CodeStubInterfaceDescriptor* descriptor);
+ CodeStubInterfaceDescriptor* descriptor) V8_OVERRIDE;
virtual Code::Kind GetCodeKind() const { return Code::TO_BOOLEAN_IC; }
virtual void PrintState(StringStream* stream);
virtual bool SometimesSetsUpAFrame() { return false; }
- static void InitializeForIsolate(Isolate* isolate) {
- ToBooleanStub stub;
+ static void InstallDescriptors(Isolate* isolate) {
+ ToBooleanStub stub(isolate);
stub.InitializeInterfaceDescriptor(
- isolate,
isolate->code_stub_interface_descriptor(CodeStub::ToBoolean));
}
static Handle<Code> GetUninitialized(Isolate* isolate) {
- return ToBooleanStub(UNINITIALIZED).GetCode(isolate);
+ return ToBooleanStub(isolate, UNINITIALIZED).GetCode();
}
virtual ExtraICState GetExtraICState() {
@@ -2214,8 +2366,8 @@ class ToBooleanStub: public HydrogenCodeStub {
Major MajorKey() { return ToBoolean; }
int NotMissMinorKey() { return GetExtraICState(); }
- explicit ToBooleanStub(InitializationState init_state) :
- HydrogenCodeStub(init_state) {}
+ ToBooleanStub(Isolate* isolate, InitializationState init_state) :
+ HydrogenCodeStub(isolate, init_state) {}
Types types_;
};
@@ -2223,11 +2375,13 @@ class ToBooleanStub: public HydrogenCodeStub {
class ElementsTransitionAndStoreStub : public HydrogenCodeStub {
public:
- ElementsTransitionAndStoreStub(ElementsKind from_kind,
+ ElementsTransitionAndStoreStub(Isolate* isolate,
+ ElementsKind from_kind,
ElementsKind to_kind,
bool is_jsarray,
KeyedAccessStoreMode store_mode)
- : from_kind_(from_kind),
+ : HydrogenCodeStub(isolate),
+ from_kind_(from_kind),
to_kind_(to_kind),
is_jsarray_(is_jsarray),
store_mode_(store_mode) {}
@@ -2237,11 +2391,10 @@ class ElementsTransitionAndStoreStub : public HydrogenCodeStub {
bool is_jsarray() const { return is_jsarray_; }
KeyedAccessStoreMode store_mode() const { return store_mode_; }
- virtual Handle<Code> GenerateCode(Isolate* isolate);
+ virtual Handle<Code> GenerateCode() V8_OVERRIDE;
- void InitializeInterfaceDescriptor(
- Isolate* isolate,
- CodeStubInterfaceDescriptor* descriptor);
+ virtual void InitializeInterfaceDescriptor(
+ CodeStubInterfaceDescriptor* descriptor) V8_OVERRIDE;
private:
class FromBits: public BitField<ElementsKind, 0, 8> {};
@@ -2268,71 +2421,46 @@ class ElementsTransitionAndStoreStub : public HydrogenCodeStub {
class StoreArrayLiteralElementStub : public PlatformCodeStub {
public:
- StoreArrayLiteralElementStub()
- : fp_registers_(CanUseFPRegisters()) { }
+ explicit StoreArrayLiteralElementStub(Isolate* isolate)
+ : PlatformCodeStub(isolate) { }
private:
- class FPRegisters: public BitField<bool, 0, 1> {};
-
Major MajorKey() { return StoreArrayLiteralElement; }
- int MinorKey() { return FPRegisters::encode(fp_registers_); }
+ int MinorKey() { return 0; }
void Generate(MacroAssembler* masm);
- bool fp_registers_;
-
DISALLOW_COPY_AND_ASSIGN(StoreArrayLiteralElementStub);
};
class StubFailureTrampolineStub : public PlatformCodeStub {
public:
- explicit StubFailureTrampolineStub(StubFunctionMode function_mode)
- : fp_registers_(CanUseFPRegisters()), function_mode_(function_mode) {}
+ StubFailureTrampolineStub(Isolate* isolate, StubFunctionMode function_mode)
+ : PlatformCodeStub(isolate),
+ function_mode_(function_mode) {}
static void GenerateAheadOfTime(Isolate* isolate);
private:
- class FPRegisters: public BitField<bool, 0, 1> {};
- class FunctionModeField: public BitField<StubFunctionMode, 1, 1> {};
+ class FunctionModeField: public BitField<StubFunctionMode, 0, 1> {};
Major MajorKey() { return StubFailureTrampoline; }
int MinorKey() {
- return FPRegisters::encode(fp_registers_) |
- FunctionModeField::encode(function_mode_);
+ return FunctionModeField::encode(function_mode_);
}
void Generate(MacroAssembler* masm);
- bool fp_registers_;
StubFunctionMode function_mode_;
DISALLOW_COPY_AND_ASSIGN(StubFailureTrampolineStub);
};
-class StubFailureTailCallTrampolineStub : public PlatformCodeStub {
- public:
- StubFailureTailCallTrampolineStub() : fp_registers_(CanUseFPRegisters()) {}
-
- static void GenerateAheadOfTime(Isolate* isolate);
-
- private:
- class FPRegisters: public BitField<bool, 0, 1> {};
- Major MajorKey() { return StubFailureTailCallTrampoline; }
- int MinorKey() { return FPRegisters::encode(fp_registers_); }
-
- void Generate(MacroAssembler* masm);
-
- bool fp_registers_;
-
- DISALLOW_COPY_AND_ASSIGN(StubFailureTailCallTrampolineStub);
-};
-
-
class ProfileEntryHookStub : public PlatformCodeStub {
public:
- explicit ProfileEntryHookStub() {}
+ explicit ProfileEntryHookStub(Isolate* isolate) : PlatformCodeStub(isolate) {}
// The profile entry hook function is not allowed to cause a GC.
virtual bool SometimesSetsUpAFrame() { return false; }
@@ -2353,6 +2481,12 @@ class ProfileEntryHookStub : public PlatformCodeStub {
DISALLOW_COPY_AND_ASSIGN(ProfileEntryHookStub);
};
+
+class CallDescriptors {
+ public:
+ static void InitializeForIsolate(Isolate* isolate);
+};
+
} } // namespace v8::internal
#endif // V8_CODE_STUBS_H_
diff --git a/chromium/v8/src/code.h b/chromium/v8/src/code.h
index 791420cf399..2b821c6512d 100644
--- a/chromium/v8/src/code.h
+++ b/chromium/v8/src/code.h
@@ -1,36 +1,13 @@
// Copyright 2006-2008 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
#ifndef V8_CODE_H_
#define V8_CODE_H_
-#include "allocation.h"
-#include "handles.h"
-#include "objects.h"
+#include "src/allocation.h"
+#include "src/handles.h"
+#include "src/objects.h"
namespace v8 {
namespace internal {
diff --git a/chromium/v8/src/codegen.cc b/chromium/v8/src/codegen.cc
index 28f7d6c099e..c039e40c933 100644
--- a/chromium/v8/src/codegen.cc
+++ b/chromium/v8/src/codegen.cc
@@ -1,45 +1,81 @@
// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#include "bootstrapper.h"
-#include "codegen.h"
-#include "compiler.h"
-#include "cpu-profiler.h"
-#include "debug.h"
-#include "prettyprinter.h"
-#include "rewriter.h"
-#include "runtime.h"
-#include "stub-cache.h"
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#include "src/bootstrapper.h"
+#include "src/codegen.h"
+#include "src/compiler.h"
+#include "src/cpu-profiler.h"
+#include "src/debug.h"
+#include "src/prettyprinter.h"
+#include "src/rewriter.h"
+#include "src/runtime.h"
+#include "src/stub-cache.h"
namespace v8 {
namespace internal {
+
+#if defined(_WIN64)
+typedef double (*ModuloFunction)(double, double);
+static ModuloFunction modulo_function = NULL;
+// Defined in codegen-x64.cc.
+ModuloFunction CreateModuloFunction();
+
+void init_modulo_function() {
+ modulo_function = CreateModuloFunction();
+}
+
+
+double modulo(double x, double y) {
+ // Note: here we rely on dependent reads being ordered. This is true
+ // on all architectures we currently support.
+ return (*modulo_function)(x, y);
+}
+#elif defined(_WIN32)
+
+double modulo(double x, double y) {
+ // Workaround MS fmod bugs. ECMA-262 says:
+ // dividend is finite and divisor is an infinity => result equals dividend
+ // dividend is a zero and divisor is nonzero finite => result equals dividend
+ if (!(std::isfinite(x) && (!std::isfinite(y) && !std::isnan(y))) &&
+ !(x == 0 && (y != 0 && std::isfinite(y)))) {
+ x = fmod(x, y);
+ }
+ return x;
+}
+#else // POSIX
+
+double modulo(double x, double y) {
+ return std::fmod(x, y);
+}
+#endif // defined(_WIN64)
+
+
+#define UNARY_MATH_FUNCTION(name, generator) \
+static UnaryMathFunction fast_##name##_function = NULL; \
+void init_fast_##name##_function() { \
+ fast_##name##_function = generator; \
+} \
+double fast_##name(double x) { \
+ return (*fast_##name##_function)(x); \
+}
+
+UNARY_MATH_FUNCTION(exp, CreateExpFunction())
+UNARY_MATH_FUNCTION(sqrt, CreateSqrtFunction())
+
+#undef UNARY_MATH_FUNCTION
+
+
+void lazily_initialize_fast_exp() {
+ if (fast_exp_function == NULL) {
+ init_fast_exp_function();
+ }
+}
+
+
#define __ ACCESS_MASM(masm_)
#ifdef DEBUG
@@ -81,7 +117,7 @@ void CodeGenerator::MakeCodePrologue(CompilationInfo* info, const char* kind) {
CodeStub::MajorName(info->code_stub()->MajorKey(), true);
PrintF("%s", name == NULL ? "<unknown>" : name);
} else {
- PrintF("%s", *info->function()->debug_name()->ToCString());
+ PrintF("%s", info->function()->debug_name()->ToCString().get());
}
PrintF("]\n");
}
@@ -89,12 +125,12 @@ void CodeGenerator::MakeCodePrologue(CompilationInfo* info, const char* kind) {
#ifdef DEBUG
if (!info->IsStub() && print_source) {
PrintF("--- Source from AST ---\n%s\n",
- PrettyPrinter(info->isolate()).PrintProgram(info->function()));
+ PrettyPrinter(info->zone()).PrintProgram(info->function()));
}
if (!info->IsStub() && print_ast) {
PrintF("--- AST ---\n%s\n",
- AstPrinter(info->isolate()).PrintProgram(info->function()));
+ AstPrinter(info->zone()).PrintProgram(info->function()));
}
#endif // DEBUG
}
@@ -114,7 +150,8 @@ Handle<Code> CodeGenerator::MakeCodeEpilogue(MacroAssembler* masm,
Handle<Code> code =
isolate->factory()->NewCode(desc, flags, masm->CodeObject(),
false, is_crankshafted,
- info->prologue_offset());
+ info->prologue_offset(),
+ info->is_debug() && !is_crankshafted);
isolate->counters()->total_compiled_code_size()->Increment(
code->instruction_size());
isolate->heap()->IncrementCodeGeneratedBytes(is_crankshafted,
@@ -162,9 +199,11 @@ void CodeGenerator::PrintCode(Handle<Code> code, CompilationInfo* info) {
if (FLAG_print_unopt_code) {
PrintF(tracing_scope.file(), "--- Unoptimized code ---\n");
info->closure()->shared()->code()->Disassemble(
- *function->debug_name()->ToCString(), tracing_scope.file());
+ function->debug_name()->ToCString().get(), tracing_scope.file());
}
PrintF(tracing_scope.file(), "--- Optimized code ---\n");
+ PrintF(tracing_scope.file(),
+ "optimization_id = %d\n", info->optimization_id());
} else {
PrintF(tracing_scope.file(), "--- Code ---\n");
}
@@ -177,7 +216,7 @@ void CodeGenerator::PrintCode(Handle<Code> code, CompilationInfo* info) {
code->Disassemble(CodeStub::MajorName(major_key, false),
tracing_scope.file());
} else {
- code->Disassemble(*function->debug_name()->ToCString(),
+ code->Disassemble(function->debug_name()->ToCString().get(),
tracing_scope.file());
}
PrintF(tracing_scope.file(), "--- End code ---\n");
@@ -186,21 +225,6 @@ void CodeGenerator::PrintCode(Handle<Code> code, CompilationInfo* info) {
}
-bool CodeGenerator::ShouldGenerateLog(Isolate* isolate, Expression* type) {
- ASSERT(type != NULL);
- if (!isolate->logger()->is_logging() &&
- !isolate->cpu_profiler()->is_profiling()) {
- return false;
- }
- Handle<String> name = Handle<String>::cast(type->AsLiteral()->value());
- if (FLAG_log_regexp) {
- if (name->IsOneByteEqualTo(STATIC_ASCII_VECTOR("regexp")))
- return true;
- }
- return false;
-}
-
-
bool CodeGenerator::RecordPositions(MacroAssembler* masm,
int pos,
bool right_here) {
@@ -220,11 +244,11 @@ void ArgumentsAccessStub::Generate(MacroAssembler* masm) {
case READ_ELEMENT:
GenerateReadElement(masm);
break;
- case NEW_NON_STRICT_FAST:
- GenerateNewNonStrictFast(masm);
+ case NEW_SLOPPY_FAST:
+ GenerateNewSloppyFast(masm);
break;
- case NEW_NON_STRICT_SLOW:
- GenerateNewNonStrictSlow(masm);
+ case NEW_SLOPPY_SLOW:
+ GenerateNewSloppySlow(masm);
break;
case NEW_STRICT:
GenerateNewStrict(masm);
diff --git a/chromium/v8/src/codegen.h b/chromium/v8/src/codegen.h
index 33672a2b341..ec36b150d80 100644
--- a/chromium/v8/src/codegen.h
+++ b/chromium/v8/src/codegen.h
@@ -1,36 +1,12 @@
// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
#ifndef V8_CODEGEN_H_
#define V8_CODEGEN_H_
-#include "code-stubs.h"
-#include "runtime.h"
-#include "type-info.h"
+#include "src/code-stubs.h"
+#include "src/runtime.h"
// Include the declaration of the architecture defined class CodeGenerator.
// The contract to the shared code is that the the CodeGenerator is a subclass
@@ -70,13 +46,17 @@
enum TypeofState { INSIDE_TYPEOF, NOT_INSIDE_TYPEOF };
#if V8_TARGET_ARCH_IA32
-#include "ia32/codegen-ia32.h"
+#include "src/ia32/codegen-ia32.h"
#elif V8_TARGET_ARCH_X64
-#include "x64/codegen-x64.h"
+#include "src/x64/codegen-x64.h"
+#elif V8_TARGET_ARCH_ARM64
+#include "src/arm64/codegen-arm64.h"
#elif V8_TARGET_ARCH_ARM
-#include "arm/codegen-arm.h"
+#include "src/arm/codegen-arm.h"
#elif V8_TARGET_ARCH_MIPS
-#include "mips/codegen-mips.h"
+#include "src/mips/codegen-mips.h"
+#elif V8_TARGET_ARCH_X87
+#include "src/x87/codegen-x87.h"
#else
#error Unsupported target architecture.
#endif
@@ -84,16 +64,53 @@ enum TypeofState { INSIDE_TYPEOF, NOT_INSIDE_TYPEOF };
namespace v8 {
namespace internal {
+
+class CompilationInfo;
+
+
+class CodeGenerator {
+ public:
+ // Printing of AST, etc. as requested by flags.
+ static void MakeCodePrologue(CompilationInfo* info, const char* kind);
+
+ // Allocate and install the code.
+ static Handle<Code> MakeCodeEpilogue(MacroAssembler* masm,
+ Code::Flags flags,
+ CompilationInfo* info);
+
+ // Print the code after compiling it.
+ static void PrintCode(Handle<Code> code, CompilationInfo* info);
+
+ static bool RecordPositions(MacroAssembler* masm,
+ int pos,
+ bool right_here = false);
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(CodeGenerator);
+};
+
+
// Results of the library implementation of transcendental functions may differ
// from the one we use in our generated code. Therefore we use the same
// generated code both in runtime and compiled code.
typedef double (*UnaryMathFunction)(double x);
-UnaryMathFunction CreateTranscendentalFunction(TranscendentalCache::Type type);
UnaryMathFunction CreateExpFunction();
UnaryMathFunction CreateSqrtFunction();
+double modulo(double x, double y);
+
+// Custom implementation of math functions.
+double fast_exp(double input);
+double fast_sqrt(double input);
+#ifdef _WIN64
+void init_modulo_function();
+#endif
+void lazily_initialize_fast_exp();
+void init_fast_sqrt_function();
+
+
class ElementsTransitionGenerator : public AllStatic {
public:
// If |mode| is set to DONT_TRACK_ALLOCATION_SITE,
@@ -115,6 +132,33 @@ class ElementsTransitionGenerator : public AllStatic {
static const int kNumberDictionaryProbes = 4;
+class CodeAgingHelper {
+ public:
+ CodeAgingHelper();
+
+ uint32_t young_sequence_length() const { return young_sequence_.length(); }
+ bool IsYoung(byte* candidate) const {
+ return memcmp(candidate,
+ young_sequence_.start(),
+ young_sequence_.length()) == 0;
+ }
+ void CopyYoungSequenceTo(byte* new_buffer) const {
+ CopyBytes(new_buffer, young_sequence_.start(), young_sequence_.length());
+ }
+
+#ifdef DEBUG
+ bool IsOld(byte* candidate) const;
+#endif
+
+ protected:
+ const EmbeddedVector<byte, kNoCodeAgeSequenceLength> young_sequence_;
+#ifdef DEBUG
+#ifdef V8_TARGET_ARCH_ARM64
+ const EmbeddedVector<byte, kNoCodeAgeSequenceLength> old_sequence_;
+#endif
+#endif
+};
+
} } // namespace v8::internal
#endif // V8_CODEGEN_H_
diff --git a/chromium/v8/src/collection-iterator.js b/chromium/v8/src/collection-iterator.js
new file mode 100644
index 00000000000..2436a931e28
--- /dev/null
+++ b/chromium/v8/src/collection-iterator.js
@@ -0,0 +1,162 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+'use strict';
+
+
+// This file relies on the fact that the following declaration has been made
+// in runtime.js:
+// var $Set = global.Set;
+// var $Map = global.Map;
+
+
+function SetIteratorConstructor(set, kind) {
+ %SetIteratorInitialize(this, set, kind);
+}
+
+
+function SetIteratorNextJS() {
+ if (!IS_SET_ITERATOR(this)) {
+ throw MakeTypeError('incompatible_method_receiver',
+ ['Set Iterator.prototype.next', this]);
+ }
+ return %SetIteratorNext(this);
+}
+
+
+function SetIteratorSymbolIterator() {
+ return this;
+}
+
+
+function SetEntries() {
+ if (!IS_SET(this)) {
+ throw MakeTypeError('incompatible_method_receiver',
+ ['Set.prototype.entries', this]);
+ }
+ return new SetIterator(this, ITERATOR_KIND_ENTRIES);
+}
+
+
+function SetValues() {
+ if (!IS_SET(this)) {
+ throw MakeTypeError('incompatible_method_receiver',
+ ['Set.prototype.values', this]);
+ }
+ return new SetIterator(this, ITERATOR_KIND_VALUES);
+}
+
+
+function SetUpSetIterator() {
+ %CheckIsBootstrapping();
+
+ %SetCode(SetIterator, SetIteratorConstructor);
+ %FunctionSetPrototype(SetIterator, new $Object());
+ %FunctionSetInstanceClassName(SetIterator, 'Set Iterator');
+ InstallFunctions(SetIterator.prototype, DONT_ENUM, $Array(
+ 'next', SetIteratorNextJS
+ ));
+
+ %FunctionSetName(SetIteratorSymbolIterator, '[Symbol.iterator]');
+ %SetProperty(SetIterator.prototype, symbolIterator,
+ SetIteratorSymbolIterator, DONT_ENUM);
+}
+
+SetUpSetIterator();
+
+
+function ExtendSetPrototype() {
+ %CheckIsBootstrapping();
+
+ InstallFunctions($Set.prototype, DONT_ENUM, $Array(
+ 'entries', SetEntries,
+ 'values', SetValues
+ ));
+
+ %SetProperty($Set.prototype, symbolIterator, SetValues,
+ DONT_ENUM);
+}
+
+ExtendSetPrototype();
+
+
+
+function MapIteratorConstructor(map, kind) {
+ %MapIteratorInitialize(this, map, kind);
+}
+
+
+function MapIteratorSymbolIterator() {
+ return this;
+}
+
+
+function MapIteratorNextJS() {
+ if (!IS_MAP_ITERATOR(this)) {
+ throw MakeTypeError('incompatible_method_receiver',
+ ['Map Iterator.prototype.next', this]);
+ }
+ return %MapIteratorNext(this);
+}
+
+
+function MapEntries() {
+ if (!IS_MAP(this)) {
+ throw MakeTypeError('incompatible_method_receiver',
+ ['Map.prototype.entries', this]);
+ }
+ return new MapIterator(this, ITERATOR_KIND_ENTRIES);
+}
+
+
+function MapKeys() {
+ if (!IS_MAP(this)) {
+ throw MakeTypeError('incompatible_method_receiver',
+ ['Map.prototype.keys', this]);
+ }
+ return new MapIterator(this, ITERATOR_KIND_KEYS);
+}
+
+
+function MapValues() {
+ if (!IS_MAP(this)) {
+ throw MakeTypeError('incompatible_method_receiver',
+ ['Map.prototype.values', this]);
+ }
+ return new MapIterator(this, ITERATOR_KIND_VALUES);
+}
+
+
+function SetUpMapIterator() {
+ %CheckIsBootstrapping();
+
+ %SetCode(MapIterator, MapIteratorConstructor);
+ %FunctionSetPrototype(MapIterator, new $Object());
+ %FunctionSetInstanceClassName(MapIterator, 'Map Iterator');
+ InstallFunctions(MapIterator.prototype, DONT_ENUM, $Array(
+ 'next', MapIteratorNextJS
+ ));
+
+ %FunctionSetName(MapIteratorSymbolIterator, '[Symbol.iterator]');
+ %SetProperty(MapIterator.prototype, symbolIterator,
+ MapIteratorSymbolIterator, DONT_ENUM);
+}
+
+SetUpMapIterator();
+
+
+function ExtendMapPrototype() {
+ %CheckIsBootstrapping();
+
+ InstallFunctions($Map.prototype, DONT_ENUM, $Array(
+ 'entries', MapEntries,
+ 'keys', MapKeys,
+ 'values', MapValues
+ ));
+
+ %SetProperty($Map.prototype, symbolIterator, MapEntries,
+ DONT_ENUM);
+}
+
+ExtendMapPrototype();
diff --git a/chromium/v8/src/collection.js b/chromium/v8/src/collection.js
index 01537e87b09..0d8dd77f7bd 100644
--- a/chromium/v8/src/collection.js
+++ b/chromium/v8/src/collection.js
@@ -1,29 +1,6 @@
// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
"use strict";
@@ -33,12 +10,7 @@
var $Set = global.Set;
var $Map = global.Map;
-var $WeakMap = global.WeakMap;
-var $WeakSet = global.WeakSet;
-// Global sentinel to be used instead of undefined keys, which are not
-// supported internally but required for Harmony sets and maps.
-var undefined_sentinel = {};
// -------------------------------------------------------------------
// Harmony Set
@@ -52,48 +24,34 @@ function SetConstructor() {
}
-function SetAdd(key) {
+function SetAddJS(key) {
if (!IS_SET(this)) {
throw MakeTypeError('incompatible_method_receiver',
['Set.prototype.add', this]);
}
- if (IS_UNDEFINED(key)) {
- key = undefined_sentinel;
- }
return %SetAdd(this, key);
}
-function SetHas(key) {
+function SetHasJS(key) {
if (!IS_SET(this)) {
throw MakeTypeError('incompatible_method_receiver',
['Set.prototype.has', this]);
}
- if (IS_UNDEFINED(key)) {
- key = undefined_sentinel;
- }
return %SetHas(this, key);
}
-function SetDelete(key) {
+function SetDeleteJS(key) {
if (!IS_SET(this)) {
throw MakeTypeError('incompatible_method_receiver',
['Set.prototype.delete', this]);
}
- if (IS_UNDEFINED(key)) {
- key = undefined_sentinel;
- }
- if (%SetHas(this, key)) {
- %SetDelete(this, key);
- return true;
- } else {
- return false;
- }
+ return %SetDelete(this, key);
}
-function SetGetSize() {
+function SetGetSizeJS() {
if (!IS_SET(this)) {
throw MakeTypeError('incompatible_method_receiver',
['Set.prototype.size', this]);
@@ -102,13 +60,32 @@ function SetGetSize() {
}
-function SetClear() {
+function SetClearJS() {
if (!IS_SET(this)) {
throw MakeTypeError('incompatible_method_receiver',
['Set.prototype.clear', this]);
}
- // Replace the internal table with a new empty table.
- %SetInitialize(this);
+ %SetClear(this);
+}
+
+
+function SetForEach(f, receiver) {
+ if (!IS_SET(this)) {
+ throw MakeTypeError('incompatible_method_receiver',
+ ['Set.prototype.forEach', this]);
+ }
+
+ if (!IS_SPEC_FUNCTION(f)) {
+ throw MakeTypeError('called_non_callable', [f]);
+ }
+
+ var iterator = new SetIterator(this, ITERATOR_KIND_VALUES);
+ var entry;
+ var stepping = %_DebugCallbackSupportsStepping(f);
+ while (!(entry = %SetIteratorNext(iterator)).done) {
+ if (stepping) %DebugPrepareStepInIfStepping(f);
+ %_CallFunction(receiver, entry.value, entry.value, this, f);
+ }
}
@@ -121,13 +98,16 @@ function SetUpSet() {
%FunctionSetPrototype($Set, new $Object());
%SetProperty($Set.prototype, "constructor", $Set, DONT_ENUM);
+ %FunctionSetLength(SetForEach, 1);
+
// Set up the non-enumerable functions on the Set prototype object.
- InstallGetter($Set.prototype, "size", SetGetSize);
+ InstallGetter($Set.prototype, "size", SetGetSizeJS);
InstallFunctions($Set.prototype, DONT_ENUM, $Array(
- "add", SetAdd,
- "has", SetHas,
- "delete", SetDelete,
- "clear", SetClear
+ "add", SetAddJS,
+ "has", SetHasJS,
+ "delete", SetDeleteJS,
+ "clear", SetClearJS,
+ "forEach", SetForEach
));
}
@@ -146,55 +126,43 @@ function MapConstructor() {
}
-function MapGet(key) {
+function MapGetJS(key) {
if (!IS_MAP(this)) {
throw MakeTypeError('incompatible_method_receiver',
['Map.prototype.get', this]);
}
- if (IS_UNDEFINED(key)) {
- key = undefined_sentinel;
- }
return %MapGet(this, key);
}
-function MapSet(key, value) {
+function MapSetJS(key, value) {
if (!IS_MAP(this)) {
throw MakeTypeError('incompatible_method_receiver',
['Map.prototype.set', this]);
}
- if (IS_UNDEFINED(key)) {
- key = undefined_sentinel;
- }
return %MapSet(this, key, value);
}
-function MapHas(key) {
+function MapHasJS(key) {
if (!IS_MAP(this)) {
throw MakeTypeError('incompatible_method_receiver',
['Map.prototype.has', this]);
}
- if (IS_UNDEFINED(key)) {
- key = undefined_sentinel;
- }
return %MapHas(this, key);
}
-function MapDelete(key) {
+function MapDeleteJS(key) {
if (!IS_MAP(this)) {
throw MakeTypeError('incompatible_method_receiver',
['Map.prototype.delete', this]);
}
- if (IS_UNDEFINED(key)) {
- key = undefined_sentinel;
- }
return %MapDelete(this, key);
}
-function MapGetSize() {
+function MapGetSizeJS() {
if (!IS_MAP(this)) {
throw MakeTypeError('incompatible_method_receiver',
['Map.prototype.size', this]);
@@ -203,205 +171,56 @@ function MapGetSize() {
}
-function MapClear() {
+function MapClearJS() {
if (!IS_MAP(this)) {
throw MakeTypeError('incompatible_method_receiver',
['Map.prototype.clear', this]);
}
- // Replace the internal table with a new empty table.
- %MapInitialize(this);
-}
-
-
-// -------------------------------------------------------------------
-
-function SetUpMap() {
- %CheckIsBootstrapping();
-
- %SetCode($Map, MapConstructor);
- %FunctionSetPrototype($Map, new $Object());
- %SetProperty($Map.prototype, "constructor", $Map, DONT_ENUM);
-
- // Set up the non-enumerable functions on the Map prototype object.
- InstallGetter($Map.prototype, "size", MapGetSize);
- InstallFunctions($Map.prototype, DONT_ENUM, $Array(
- "get", MapGet,
- "set", MapSet,
- "has", MapHas,
- "delete", MapDelete,
- "clear", MapClear
- ));
-}
-
-SetUpMap();
-
-
-// -------------------------------------------------------------------
-// Harmony WeakMap
-
-function WeakMapConstructor() {
- if (%_IsConstructCall()) {
- %WeakCollectionInitialize(this);
- } else {
- throw MakeTypeError('constructor_not_function', ['WeakMap']);
- }
-}
-
-
-function WeakMapGet(key) {
- if (!IS_WEAKMAP(this)) {
- throw MakeTypeError('incompatible_method_receiver',
- ['WeakMap.prototype.get', this]);
- }
- if (!(IS_SPEC_OBJECT(key) || IS_SYMBOL(key))) {
- throw %MakeTypeError('invalid_weakmap_key', [this, key]);
- }
- return %WeakCollectionGet(this, key);
-}
-
-
-function WeakMapSet(key, value) {
- if (!IS_WEAKMAP(this)) {
- throw MakeTypeError('incompatible_method_receiver',
- ['WeakMap.prototype.set', this]);
- }
- if (!(IS_SPEC_OBJECT(key) || IS_SYMBOL(key))) {
- throw %MakeTypeError('invalid_weakmap_key', [this, key]);
- }
- return %WeakCollectionSet(this, key, value);
+ %MapClear(this);
}
-function WeakMapHas(key) {
- if (!IS_WEAKMAP(this)) {
+function MapForEach(f, receiver) {
+ if (!IS_MAP(this)) {
throw MakeTypeError('incompatible_method_receiver',
- ['WeakMap.prototype.has', this]);
+ ['Map.prototype.forEach', this]);
}
- if (!(IS_SPEC_OBJECT(key) || IS_SYMBOL(key))) {
- throw %MakeTypeError('invalid_weakmap_key', [this, key]);
- }
- return %WeakCollectionHas(this, key);
-}
-
-function WeakMapDelete(key) {
- if (!IS_WEAKMAP(this)) {
- throw MakeTypeError('incompatible_method_receiver',
- ['WeakMap.prototype.delete', this]);
- }
- if (!(IS_SPEC_OBJECT(key) || IS_SYMBOL(key))) {
- throw %MakeTypeError('invalid_weakmap_key', [this, key]);
+ if (!IS_SPEC_FUNCTION(f)) {
+ throw MakeTypeError('called_non_callable', [f]);
}
- return %WeakCollectionDelete(this, key);
-}
-
-function WeakMapClear() {
- if (!IS_WEAKMAP(this)) {
- throw MakeTypeError('incompatible_method_receiver',
- ['WeakMap.prototype.clear', this]);
+ var iterator = new MapIterator(this, ITERATOR_KIND_ENTRIES);
+ var entry;
+ var stepping = %_DebugCallbackSupportsStepping(f);
+ while (!(entry = %MapIteratorNext(iterator)).done) {
+ if (stepping) %DebugPrepareStepInIfStepping(f);
+ %_CallFunction(receiver, entry.value[1], entry.value[0], this, f);
}
- // Replace the internal table with a new empty table.
- %WeakCollectionInitialize(this);
}
// -------------------------------------------------------------------
-function SetUpWeakMap() {
+function SetUpMap() {
%CheckIsBootstrapping();
- %SetCode($WeakMap, WeakMapConstructor);
- %FunctionSetPrototype($WeakMap, new $Object());
- %SetProperty($WeakMap.prototype, "constructor", $WeakMap, DONT_ENUM);
-
- // Set up the non-enumerable functions on the WeakMap prototype object.
- InstallFunctions($WeakMap.prototype, DONT_ENUM, $Array(
- "get", WeakMapGet,
- "set", WeakMapSet,
- "has", WeakMapHas,
- "delete", WeakMapDelete,
- "clear", WeakMapClear
- ));
-}
-
-SetUpWeakMap();
-
-
-// -------------------------------------------------------------------
-// Harmony WeakSet
-
-function WeakSetConstructor() {
- if (%_IsConstructCall()) {
- %WeakCollectionInitialize(this);
- } else {
- throw MakeTypeError('constructor_not_function', ['WeakSet']);
- }
-}
-
-
-function WeakSetAdd(value) {
- if (!IS_WEAKSET(this)) {
- throw MakeTypeError('incompatible_method_receiver',
- ['WeakSet.prototype.add', this]);
- }
- if (!(IS_SPEC_OBJECT(value) || IS_SYMBOL(value))) {
- throw %MakeTypeError('invalid_weakset_value', [this, value]);
- }
- return %WeakCollectionSet(this, value, true);
-}
-
-
-function WeakSetHas(value) {
- if (!IS_WEAKSET(this)) {
- throw MakeTypeError('incompatible_method_receiver',
- ['WeakSet.prototype.has', this]);
- }
- if (!(IS_SPEC_OBJECT(value) || IS_SYMBOL(value))) {
- throw %MakeTypeError('invalid_weakset_value', [this, value]);
- }
- return %WeakCollectionHas(this, value);
-}
-
-
-function WeakSetDelete(value) {
- if (!IS_WEAKSET(this)) {
- throw MakeTypeError('incompatible_method_receiver',
- ['WeakSet.prototype.delete', this]);
- }
- if (!(IS_SPEC_OBJECT(value) || IS_SYMBOL(value))) {
- throw %MakeTypeError('invalid_weakset_value', [this, value]);
- }
- return %WeakCollectionDelete(this, value);
-}
-
-
-function WeakSetClear() {
- if (!IS_WEAKSET(this)) {
- throw MakeTypeError('incompatible_method_receiver',
- ['WeakSet.prototype.clear', this]);
- }
- // Replace the internal table with a new empty table.
- %WeakCollectionInitialize(this);
-}
-
-
-// -------------------------------------------------------------------
-
-function SetUpWeakSet() {
- %CheckIsBootstrapping();
+ %SetCode($Map, MapConstructor);
+ %FunctionSetPrototype($Map, new $Object());
+ %SetProperty($Map.prototype, "constructor", $Map, DONT_ENUM);
- %SetCode($WeakSet, WeakSetConstructor);
- %FunctionSetPrototype($WeakSet, new $Object());
- %SetProperty($WeakSet.prototype, "constructor", $WeakSet, DONT_ENUM);
+ %FunctionSetLength(MapForEach, 1);
- // Set up the non-enumerable functions on the WeakSet prototype object.
- InstallFunctions($WeakSet.prototype, DONT_ENUM, $Array(
- "add", WeakSetAdd,
- "has", WeakSetHas,
- "delete", WeakSetDelete,
- "clear", WeakSetClear
+ // Set up the non-enumerable functions on the Map prototype object.
+ InstallGetter($Map.prototype, "size", MapGetSizeJS);
+ InstallFunctions($Map.prototype, DONT_ENUM, $Array(
+ "get", MapGetJS,
+ "set", MapSetJS,
+ "has", MapHasJS,
+ "delete", MapDeleteJS,
+ "clear", MapClearJS,
+ "forEach", MapForEach
));
}
-SetUpWeakSet();
+SetUpMap();
diff --git a/chromium/v8/src/compilation-cache.cc b/chromium/v8/src/compilation-cache.cc
index fffe5da71da..6b22bf24283 100644
--- a/chromium/v8/src/compilation-cache.cc
+++ b/chromium/v8/src/compilation-cache.cc
@@ -1,35 +1,12 @@
// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#include "assembler.h"
-#include "compilation-cache.h"
-#include "serialize.h"
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#include "src/assembler.h"
+#include "src/compilation-cache.h"
+#include "src/serialize.h"
namespace v8 {
namespace internal {
@@ -65,18 +42,11 @@ CompilationCache::CompilationCache(Isolate* isolate)
CompilationCache::~CompilationCache() {}
-static Handle<CompilationCacheTable> AllocateTable(Isolate* isolate, int size) {
- CALL_HEAP_FUNCTION(isolate,
- CompilationCacheTable::Allocate(isolate->heap(), size),
- CompilationCacheTable);
-}
-
-
Handle<CompilationCacheTable> CompilationSubCache::GetTable(int generation) {
ASSERT(generation < generations_);
Handle<CompilationCacheTable> result;
if (tables_[generation]->IsUndefined()) {
- result = AllocateTable(isolate(), kInitialCacheSize);
+ result = CompilationCacheTable::New(isolate(), kInitialCacheSize);
tables_[generation] = *result;
} else {
CompilationCacheTable* table =
@@ -161,7 +131,8 @@ bool CompilationCacheScript::HasOrigin(
// Were both scripts tagged by the embedder as being shared cross-origin?
if (is_shared_cross_origin != script->is_shared_cross_origin()) return false;
// Compare the two name strings for equality.
- return String::cast(*name)->Equals(String::cast(script->name()));
+ return String::Equals(Handle<String>::cast(name),
+ Handle<String>(String::cast(script->name())));
}
@@ -184,7 +155,7 @@ Handle<SharedFunctionInfo> CompilationCacheScript::Lookup(
{ HandleScope scope(isolate());
for (generation = 0; generation < generations(); generation++) {
Handle<CompilationCacheTable> table = GetTable(generation);
- Handle<Object> probe(table->Lookup(*source, *context), isolate());
+ Handle<Object> probe = table->Lookup(source, context);
if (probe->IsSharedFunctionInfo()) {
Handle<SharedFunctionInfo> function_info =
Handle<SharedFunctionInfo>::cast(probe);
@@ -239,153 +210,93 @@ Handle<SharedFunctionInfo> CompilationCacheScript::Lookup(
}
-MaybeObject* CompilationCacheScript::TryTablePut(
- Handle<String> source,
- Handle<Context> context,
- Handle<SharedFunctionInfo> function_info) {
- Handle<CompilationCacheTable> table = GetFirstTable();
- return table->Put(*source, *context, *function_info);
-}
-
-
-Handle<CompilationCacheTable> CompilationCacheScript::TablePut(
- Handle<String> source,
- Handle<Context> context,
- Handle<SharedFunctionInfo> function_info) {
- CALL_HEAP_FUNCTION(isolate(),
- TryTablePut(source, context, function_info),
- CompilationCacheTable);
-}
-
-
void CompilationCacheScript::Put(Handle<String> source,
Handle<Context> context,
Handle<SharedFunctionInfo> function_info) {
HandleScope scope(isolate());
- SetFirstTable(TablePut(source, context, function_info));
+ Handle<CompilationCacheTable> table = GetFirstTable();
+ SetFirstTable(
+ CompilationCacheTable::Put(table, source, context, function_info));
}
-Handle<SharedFunctionInfo> CompilationCacheEval::Lookup(
+MaybeHandle<SharedFunctionInfo> CompilationCacheEval::Lookup(
Handle<String> source,
Handle<Context> context,
- LanguageMode language_mode,
+ StrictMode strict_mode,
int scope_position) {
+ HandleScope scope(isolate());
// Make sure not to leak the table into the surrounding handle
// scope. Otherwise, we risk keeping old tables around even after
// having cleared the cache.
- Object* result = NULL;
+ Handle<Object> result = isolate()->factory()->undefined_value();
int generation;
- { HandleScope scope(isolate());
- for (generation = 0; generation < generations(); generation++) {
- Handle<CompilationCacheTable> table = GetTable(generation);
- result = table->LookupEval(
- *source, *context, language_mode, scope_position);
- if (result->IsSharedFunctionInfo()) {
- break;
- }
- }
+ for (generation = 0; generation < generations(); generation++) {
+ Handle<CompilationCacheTable> table = GetTable(generation);
+ result = table->LookupEval(source, context, strict_mode, scope_position);
+ if (result->IsSharedFunctionInfo()) break;
}
if (result->IsSharedFunctionInfo()) {
- Handle<SharedFunctionInfo>
- function_info(SharedFunctionInfo::cast(result), isolate());
+ Handle<SharedFunctionInfo> function_info =
+ Handle<SharedFunctionInfo>::cast(result);
if (generation != 0) {
Put(source, context, function_info, scope_position);
}
isolate()->counters()->compilation_cache_hits()->Increment();
- return function_info;
+ return scope.CloseAndEscape(function_info);
} else {
isolate()->counters()->compilation_cache_misses()->Increment();
- return Handle<SharedFunctionInfo>::null();
+ return MaybeHandle<SharedFunctionInfo>();
}
}
-MaybeObject* CompilationCacheEval::TryTablePut(
- Handle<String> source,
- Handle<Context> context,
- Handle<SharedFunctionInfo> function_info,
- int scope_position) {
- Handle<CompilationCacheTable> table = GetFirstTable();
- return table->PutEval(*source, *context, *function_info, scope_position);
-}
-
-
-Handle<CompilationCacheTable> CompilationCacheEval::TablePut(
- Handle<String> source,
- Handle<Context> context,
- Handle<SharedFunctionInfo> function_info,
- int scope_position) {
- CALL_HEAP_FUNCTION(isolate(),
- TryTablePut(
- source, context, function_info, scope_position),
- CompilationCacheTable);
-}
-
-
void CompilationCacheEval::Put(Handle<String> source,
Handle<Context> context,
Handle<SharedFunctionInfo> function_info,
int scope_position) {
HandleScope scope(isolate());
- SetFirstTable(TablePut(source, context, function_info, scope_position));
+ Handle<CompilationCacheTable> table = GetFirstTable();
+ table = CompilationCacheTable::PutEval(table, source, context,
+ function_info, scope_position);
+ SetFirstTable(table);
}
-Handle<FixedArray> CompilationCacheRegExp::Lookup(Handle<String> source,
- JSRegExp::Flags flags) {
+MaybeHandle<FixedArray> CompilationCacheRegExp::Lookup(
+ Handle<String> source,
+ JSRegExp::Flags flags) {
+ HandleScope scope(isolate());
// Make sure not to leak the table into the surrounding handle
// scope. Otherwise, we risk keeping old tables around even after
// having cleared the cache.
- Object* result = NULL;
+ Handle<Object> result = isolate()->factory()->undefined_value();
int generation;
- { HandleScope scope(isolate());
- for (generation = 0; generation < generations(); generation++) {
- Handle<CompilationCacheTable> table = GetTable(generation);
- result = table->LookupRegExp(*source, flags);
- if (result->IsFixedArray()) {
- break;
- }
- }
+ for (generation = 0; generation < generations(); generation++) {
+ Handle<CompilationCacheTable> table = GetTable(generation);
+ result = table->LookupRegExp(source, flags);
+ if (result->IsFixedArray()) break;
}
if (result->IsFixedArray()) {
- Handle<FixedArray> data(FixedArray::cast(result), isolate());
+ Handle<FixedArray> data = Handle<FixedArray>::cast(result);
if (generation != 0) {
Put(source, flags, data);
}
isolate()->counters()->compilation_cache_hits()->Increment();
- return data;
+ return scope.CloseAndEscape(data);
} else {
isolate()->counters()->compilation_cache_misses()->Increment();
- return Handle<FixedArray>::null();
+ return MaybeHandle<FixedArray>();
}
}
-MaybeObject* CompilationCacheRegExp::TryTablePut(
- Handle<String> source,
- JSRegExp::Flags flags,
- Handle<FixedArray> data) {
- Handle<CompilationCacheTable> table = GetFirstTable();
- return table->PutRegExp(*source, flags, *data);
-}
-
-
-Handle<CompilationCacheTable> CompilationCacheRegExp::TablePut(
- Handle<String> source,
- JSRegExp::Flags flags,
- Handle<FixedArray> data) {
- CALL_HEAP_FUNCTION(isolate(),
- TryTablePut(source, flags, data),
- CompilationCacheTable);
-}
-
-
void CompilationCacheRegExp::Put(Handle<String> source,
JSRegExp::Flags flags,
Handle<FixedArray> data) {
HandleScope scope(isolate());
- SetFirstTable(TablePut(source, flags, data));
+ Handle<CompilationCacheTable> table = GetFirstTable();
+ SetFirstTable(CompilationCacheTable::PutRegExp(table, source, flags, data));
}
@@ -398,54 +309,43 @@ void CompilationCache::Remove(Handle<SharedFunctionInfo> function_info) {
}
-Handle<SharedFunctionInfo> CompilationCache::LookupScript(
+MaybeHandle<SharedFunctionInfo> CompilationCache::LookupScript(
Handle<String> source,
Handle<Object> name,
int line_offset,
int column_offset,
bool is_shared_cross_origin,
Handle<Context> context) {
- if (!IsEnabled()) {
- return Handle<SharedFunctionInfo>::null();
- }
+ if (!IsEnabled()) return MaybeHandle<SharedFunctionInfo>();
- return script_.Lookup(source,
- name,
- line_offset,
- column_offset,
- is_shared_cross_origin,
- context);
+ return script_.Lookup(source, name, line_offset, column_offset,
+ is_shared_cross_origin, context);
}
-Handle<SharedFunctionInfo> CompilationCache::LookupEval(
+MaybeHandle<SharedFunctionInfo> CompilationCache::LookupEval(
Handle<String> source,
Handle<Context> context,
- bool is_global,
- LanguageMode language_mode,
+ StrictMode strict_mode,
int scope_position) {
- if (!IsEnabled()) {
- return Handle<SharedFunctionInfo>::null();
- }
+ if (!IsEnabled()) return MaybeHandle<SharedFunctionInfo>();
- Handle<SharedFunctionInfo> result;
- if (is_global) {
+ MaybeHandle<SharedFunctionInfo> result;
+ if (context->IsNativeContext()) {
result = eval_global_.Lookup(
- source, context, language_mode, scope_position);
+ source, context, strict_mode, scope_position);
} else {
ASSERT(scope_position != RelocInfo::kNoPosition);
result = eval_contextual_.Lookup(
- source, context, language_mode, scope_position);
+ source, context, strict_mode, scope_position);
}
return result;
}
-Handle<FixedArray> CompilationCache::LookupRegExp(Handle<String> source,
+MaybeHandle<FixedArray> CompilationCache::LookupRegExp(Handle<String> source,
JSRegExp::Flags flags) {
- if (!IsEnabled()) {
- return Handle<FixedArray>::null();
- }
+ if (!IsEnabled()) return MaybeHandle<FixedArray>();
return reg_exp_.Lookup(source, flags);
}
@@ -454,9 +354,7 @@ Handle<FixedArray> CompilationCache::LookupRegExp(Handle<String> source,
void CompilationCache::PutScript(Handle<String> source,
Handle<Context> context,
Handle<SharedFunctionInfo> function_info) {
- if (!IsEnabled()) {
- return;
- }
+ if (!IsEnabled()) return;
script_.Put(source, context, function_info);
}
@@ -464,15 +362,12 @@ void CompilationCache::PutScript(Handle<String> source,
void CompilationCache::PutEval(Handle<String> source,
Handle<Context> context,
- bool is_global,
Handle<SharedFunctionInfo> function_info,
int scope_position) {
- if (!IsEnabled()) {
- return;
- }
+ if (!IsEnabled()) return;
HandleScope scope(isolate());
- if (is_global) {
+ if (context->IsNativeContext()) {
eval_global_.Put(source, context, function_info, scope_position);
} else {
ASSERT(scope_position != RelocInfo::kNoPosition);
diff --git a/chromium/v8/src/compilation-cache.h b/chromium/v8/src/compilation-cache.h
index 414e09e655c..baa53fb45d5 100644
--- a/chromium/v8/src/compilation-cache.h
+++ b/chromium/v8/src/compilation-cache.h
@@ -1,29 +1,6 @@
// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
#ifndef V8_COMPILATION_CACHE_H_
#define V8_COMPILATION_CACHE_H_
@@ -106,17 +83,6 @@ class CompilationCacheScript : public CompilationSubCache {
Handle<SharedFunctionInfo> function_info);
private:
- MUST_USE_RESULT MaybeObject* TryTablePut(
- Handle<String> source,
- Handle<Context> context,
- Handle<SharedFunctionInfo> function_info);
-
- // Note: Returns a new hash table if operation results in expansion.
- Handle<CompilationCacheTable> TablePut(
- Handle<String> source,
- Handle<Context> context,
- Handle<SharedFunctionInfo> function_info);
-
bool HasOrigin(Handle<SharedFunctionInfo> function_info,
Handle<Object> name,
int line_offset,
@@ -136,10 +102,9 @@ class CompilationCacheScript : public CompilationSubCache {
// entries:
// 1. The source string.
// 2. The shared function info of the calling function.
-// 3. Whether the source should be compiled as strict code or as non-strict
-// code.
+// 3. Whether the source should be compiled as strict code or as sloppy code.
// Note: Currently there are clients of CompileEval that always compile
-// non-strict code even if the calling function is a strict mode function.
+// sloppy code even if the calling function is a strict mode function.
// More specifically these are the CompileString, DebugEvaluate and
// DebugEvaluateGlobal runtime functions.
// 4. The start position of the calling scope.
@@ -148,10 +113,10 @@ class CompilationCacheEval: public CompilationSubCache {
CompilationCacheEval(Isolate* isolate, int generations)
: CompilationSubCache(isolate, generations) { }
- Handle<SharedFunctionInfo> Lookup(Handle<String> source,
- Handle<Context> context,
- LanguageMode language_mode,
- int scope_position);
+ MaybeHandle<SharedFunctionInfo> Lookup(Handle<String> source,
+ Handle<Context> context,
+ StrictMode strict_mode,
+ int scope_position);
void Put(Handle<String> source,
Handle<Context> context,
@@ -159,19 +124,6 @@ class CompilationCacheEval: public CompilationSubCache {
int scope_position);
private:
- MUST_USE_RESULT MaybeObject* TryTablePut(
- Handle<String> source,
- Handle<Context> context,
- Handle<SharedFunctionInfo> function_info,
- int scope_position);
-
- // Note: Returns a new hash table if operation results in expansion.
- Handle<CompilationCacheTable> TablePut(
- Handle<String> source,
- Handle<Context> context,
- Handle<SharedFunctionInfo> function_info,
- int scope_position);
-
DISALLOW_IMPLICIT_CONSTRUCTORS(CompilationCacheEval);
};
@@ -182,21 +134,12 @@ class CompilationCacheRegExp: public CompilationSubCache {
CompilationCacheRegExp(Isolate* isolate, int generations)
: CompilationSubCache(isolate, generations) { }
- Handle<FixedArray> Lookup(Handle<String> source, JSRegExp::Flags flags);
+ MaybeHandle<FixedArray> Lookup(Handle<String> source, JSRegExp::Flags flags);
void Put(Handle<String> source,
JSRegExp::Flags flags,
Handle<FixedArray> data);
private:
- MUST_USE_RESULT MaybeObject* TryTablePut(Handle<String> source,
- JSRegExp::Flags flags,
- Handle<FixedArray> data);
-
- // Note: Returns a new hash table if operation results in expansion.
- Handle<CompilationCacheTable> TablePut(Handle<String> source,
- JSRegExp::Flags flags,
- Handle<FixedArray> data);
-
DISALLOW_IMPLICIT_CONSTRUCTORS(CompilationCacheRegExp);
};
@@ -210,26 +153,21 @@ class CompilationCache {
// Finds the script shared function info for a source
// string. Returns an empty handle if the cache doesn't contain a
// script for the given source string with the right origin.
- Handle<SharedFunctionInfo> LookupScript(Handle<String> source,
- Handle<Object> name,
- int line_offset,
- int column_offset,
- bool is_shared_cross_origin,
- Handle<Context> context);
+ MaybeHandle<SharedFunctionInfo> LookupScript(
+ Handle<String> source, Handle<Object> name, int line_offset,
+ int column_offset, bool is_shared_cross_origin, Handle<Context> context);
// Finds the shared function info for a source string for eval in a
// given context. Returns an empty handle if the cache doesn't
// contain a script for the given source string.
- Handle<SharedFunctionInfo> LookupEval(Handle<String> source,
- Handle<Context> context,
- bool is_global,
- LanguageMode language_mode,
- int scope_position);
+ MaybeHandle<SharedFunctionInfo> LookupEval(
+ Handle<String> source, Handle<Context> context, StrictMode strict_mode,
+ int scope_position);
// Returns the regexp data associated with the given regexp if it
// is in cache, otherwise an empty handle.
- Handle<FixedArray> LookupRegExp(Handle<String> source,
- JSRegExp::Flags flags);
+ MaybeHandle<FixedArray> LookupRegExp(
+ Handle<String> source, JSRegExp::Flags flags);
// Associate the (source, kind) pair to the shared function
// info. This may overwrite an existing mapping.
@@ -241,7 +179,6 @@ class CompilationCache {
// with the shared function info. This may overwrite an existing mapping.
void PutEval(Handle<String> source,
Handle<Context> context,
- bool is_global,
Handle<SharedFunctionInfo> function_info,
int scope_position);
diff --git a/chromium/v8/src/compiler-intrinsics.h b/chromium/v8/src/compiler-intrinsics.h
index b73e8ac750d..f31895e2d37 100644
--- a/chromium/v8/src/compiler-intrinsics.h
+++ b/chromium/v8/src/compiler-intrinsics.h
@@ -1,29 +1,6 @@
// Copyright 2006-2008 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
#ifndef V8_COMPILER_INTRINSICS_H_
#define V8_COMPILER_INTRINSICS_H_
diff --git a/chromium/v8/src/compiler.cc b/chromium/v8/src/compiler.cc
index 83f9ab2daff..0d3f146ab00 100644
--- a/chromium/v8/src/compiler.cc
+++ b/chromium/v8/src/compiler.cc
@@ -1,54 +1,31 @@
// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#include "compiler.h"
-
-#include "bootstrapper.h"
-#include "codegen.h"
-#include "compilation-cache.h"
-#include "cpu-profiler.h"
-#include "debug.h"
-#include "deoptimizer.h"
-#include "full-codegen.h"
-#include "gdb-jit.h"
-#include "typing.h"
-#include "hydrogen.h"
-#include "isolate-inl.h"
-#include "lithium.h"
-#include "liveedit.h"
-#include "parser.h"
-#include "rewriter.h"
-#include "runtime-profiler.h"
-#include "scanner-character-streams.h"
-#include "scopeinfo.h"
-#include "scopes.h"
-#include "vm-state-inl.h"
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#include "src/compiler.h"
+
+#include "src/bootstrapper.h"
+#include "src/codegen.h"
+#include "src/compilation-cache.h"
+#include "src/cpu-profiler.h"
+#include "src/debug.h"
+#include "src/deoptimizer.h"
+#include "src/full-codegen.h"
+#include "src/gdb-jit.h"
+#include "src/typing.h"
+#include "src/hydrogen.h"
+#include "src/isolate-inl.h"
+#include "src/lithium.h"
+#include "src/liveedit.h"
+#include "src/parser.h"
+#include "src/rewriter.h"
+#include "src/runtime-profiler.h"
+#include "src/scanner-character-streams.h"
+#include "src/scopeinfo.h"
+#include "src/scopes.h"
+#include "src/vm-state-inl.h"
namespace v8 {
namespace internal {
@@ -56,37 +33,40 @@ namespace internal {
CompilationInfo::CompilationInfo(Handle<Script> script,
Zone* zone)
- : flags_(LanguageModeField::encode(CLASSIC_MODE)),
+ : flags_(StrictModeField::encode(SLOPPY)),
script_(script),
osr_ast_id_(BailoutId::None()),
- osr_pc_offset_(0),
- parameter_count_(0) {
+ parameter_count_(0),
+ this_has_uses_(true),
+ optimization_id_(-1) {
Initialize(script->GetIsolate(), BASE, zone);
}
CompilationInfo::CompilationInfo(Handle<SharedFunctionInfo> shared_info,
Zone* zone)
- : flags_(LanguageModeField::encode(CLASSIC_MODE) | IsLazy::encode(true)),
+ : flags_(StrictModeField::encode(SLOPPY) | IsLazy::encode(true)),
shared_info_(shared_info),
script_(Handle<Script>(Script::cast(shared_info->script()))),
osr_ast_id_(BailoutId::None()),
- osr_pc_offset_(0),
- parameter_count_(0) {
+ parameter_count_(0),
+ this_has_uses_(true),
+ optimization_id_(-1) {
Initialize(script_->GetIsolate(), BASE, zone);
}
CompilationInfo::CompilationInfo(Handle<JSFunction> closure,
Zone* zone)
- : flags_(LanguageModeField::encode(CLASSIC_MODE) | IsLazy::encode(true)),
+ : flags_(StrictModeField::encode(SLOPPY) | IsLazy::encode(true)),
closure_(closure),
shared_info_(Handle<SharedFunctionInfo>(closure->shared())),
script_(Handle<Script>(Script::cast(shared_info_->script()))),
context_(closure->context()),
osr_ast_id_(BailoutId::None()),
- osr_pc_offset_(0),
- parameter_count_(0) {
+ parameter_count_(0),
+ this_has_uses_(true),
+ optimization_id_(-1) {
Initialize(script_->GetIsolate(), BASE, zone);
}
@@ -94,11 +74,11 @@ CompilationInfo::CompilationInfo(Handle<JSFunction> closure,
CompilationInfo::CompilationInfo(HydrogenCodeStub* stub,
Isolate* isolate,
Zone* zone)
- : flags_(LanguageModeField::encode(CLASSIC_MODE) |
- IsLazy::encode(true)),
+ : flags_(StrictModeField::encode(SLOPPY) | IsLazy::encode(true)),
osr_ast_id_(BailoutId::None()),
- osr_pc_offset_(0),
- parameter_count_(0) {
+ parameter_count_(0),
+ this_has_uses_(true),
+ optimization_id_(-1) {
Initialize(isolate, STUB, zone);
code_stub_ = stub;
}
@@ -112,7 +92,8 @@ void CompilationInfo::Initialize(Isolate* isolate,
scope_ = NULL;
global_scope_ = NULL;
extension_ = NULL;
- pre_parse_data_ = NULL;
+ cached_data_ = NULL;
+ cached_data_mode_ = NO_CACHED_DATA;
zone_ = zone;
deferred_handles_ = NULL;
code_stub_ = NULL;
@@ -129,14 +110,21 @@ void CompilationInfo::Initialize(Isolate* isolate,
}
mode_ = mode;
abort_due_to_dependency_ = false;
- if (script_->type()->value() == Script::TYPE_NATIVE) {
- MarkAsNative();
- }
+ if (script_->type()->value() == Script::TYPE_NATIVE) MarkAsNative();
+ if (isolate_->debug()->is_active()) MarkAsDebug();
+
if (!shared_info_.is_null()) {
- ASSERT(language_mode() == CLASSIC_MODE);
- SetLanguageMode(shared_info_->language_mode());
+ ASSERT(strict_mode() == SLOPPY);
+ SetStrictMode(shared_info_->strict_mode());
}
set_bailout_reason(kUnknown);
+
+ if (!shared_info().is_null() && shared_info()->is_compiled()) {
+ // We should initialize the CompilationInfo feedback vector from the
+ // passed in shared info, rather than creating a new one.
+ feedback_vector_ = Handle<FixedArray>(shared_info()->feedback_vector(),
+ isolate);
+ }
}
@@ -211,8 +199,7 @@ Code::Flags CompilationInfo::flags() const {
return Code::ComputeFlags(code_stub()->GetCodeKind(),
code_stub()->GetICState(),
code_stub()->GetExtraICState(),
- code_stub()->GetStubType(),
- code_stub()->GetStubFlags());
+ code_stub()->GetStubType());
} else {
return Code::ComputeFlags(Code::OPTIMIZED_FUNCTION);
}
@@ -225,7 +212,7 @@ void CompilationInfo::DisableOptimization() {
FLAG_optimize_closures &&
closure_.is_null() &&
!scope_->HasTrivialOuterContext() &&
- !scope_->outer_scope_calls_non_strict_eval() &&
+ !scope_->outer_scope_calls_sloppy_eval() &&
!scope_->inside_with();
SetMode(is_optimizable_closure ? BASE : NONOPT);
}
@@ -235,8 +222,7 @@ void CompilationInfo::DisableOptimization() {
// profiler, so they trigger their own optimization when they're called
// for the SharedFunctionInfo::kCallsUntilPrimitiveOptimization-th time.
bool CompilationInfo::ShouldSelfOptimize() {
- return FLAG_self_optimization &&
- FLAG_crankshaft &&
+ return FLAG_crankshaft &&
!function()->flags()->Contains(kDontSelfOptimize) &&
!function()->dont_optimize() &&
function()->scope()->AllowsLazyCompilation() &&
@@ -244,83 +230,16 @@ bool CompilationInfo::ShouldSelfOptimize() {
}
-// Determine whether to use the full compiler for all code. If the flag
-// --always-full-compiler is specified this is the case. For the virtual frame
-// based compiler the full compiler is also used if a debugger is connected, as
-// the code from the full compiler supports mode precise break points. For the
-// crankshaft adaptive compiler debugging the optimized code is not possible at
-// all. However crankshaft support recompilation of functions, so in this case
-// the full compiler need not be be used if a debugger is attached, but only if
-// break points has actually been set.
-static bool IsDebuggerActive(Isolate* isolate) {
-#ifdef ENABLE_DEBUGGER_SUPPORT
- return isolate->use_crankshaft() ?
- isolate->debug()->has_break_points() :
- isolate->debugger()->IsDebuggerActive();
-#else
- return false;
-#endif
-}
-
-
-static bool AlwaysFullCompiler(Isolate* isolate) {
- return FLAG_always_full_compiler || IsDebuggerActive(isolate);
-}
-
+void CompilationInfo::PrepareForCompilation(Scope* scope) {
+ ASSERT(scope_ == NULL);
+ scope_ = scope;
-void RecompileJob::RecordOptimizationStats() {
- Handle<JSFunction> function = info()->closure();
- if (!function->IsOptimized()) {
- // Concurrent recompilation and OSR may race. Increment only once.
- int opt_count = function->shared()->opt_count();
- function->shared()->set_opt_count(opt_count + 1);
- }
- double ms_creategraph = time_taken_to_create_graph_.InMillisecondsF();
- double ms_optimize = time_taken_to_optimize_.InMillisecondsF();
- double ms_codegen = time_taken_to_codegen_.InMillisecondsF();
- if (FLAG_trace_opt) {
- PrintF("[optimizing ");
- function->ShortPrint();
- PrintF(" - took %0.3f, %0.3f, %0.3f ms]\n", ms_creategraph, ms_optimize,
- ms_codegen);
- }
- if (FLAG_trace_opt_stats) {
- static double compilation_time = 0.0;
- static int compiled_functions = 0;
- static int code_size = 0;
-
- compilation_time += (ms_creategraph + ms_optimize + ms_codegen);
- compiled_functions++;
- code_size += function->shared()->SourceSize();
- PrintF("Compiled: %d functions with %d byte source size in %fms.\n",
- compiled_functions,
- code_size,
- compilation_time);
- }
- if (FLAG_hydrogen_stats) {
- isolate()->GetHStatistics()->IncrementSubtotals(time_taken_to_create_graph_,
- time_taken_to_optimize_,
- time_taken_to_codegen_);
+ int length = function()->slot_count();
+ if (feedback_vector_.is_null()) {
+ // Allocate the feedback vector too.
+ feedback_vector_ = isolate()->factory()->NewTypeFeedbackVector(length);
}
-}
-
-
-// A return value of true indicates the compilation pipeline is still
-// going, not necessarily that we optimized the code.
-static bool MakeCrankshaftCode(CompilationInfo* info) {
- RecompileJob job(info);
- RecompileJob::Status status = job.CreateGraph();
-
- if (status != RecompileJob::SUCCEEDED) {
- return status != RecompileJob::FAILED;
- }
- status = job.OptimizeGraph();
- if (status != RecompileJob::SUCCEEDED) {
- status = job.AbortOptimization();
- return status != RecompileJob::FAILED;
- }
- status = job.GenerateAndInstallCode();
- return status != RecompileJob::FAILED;
+ ASSERT(feedback_vector_->length() == length);
}
@@ -356,12 +275,11 @@ class HOptimizedGraphBuilderWithPositions: public HOptimizedGraphBuilder {
}
MODULE_NODE_LIST(DEF_VISIT)
DECLARATION_NODE_LIST(DEF_VISIT)
- AUXILIARY_NODE_LIST(DEF_VISIT)
#undef DEF_VISIT
};
-RecompileJob::Status RecompileJob::CreateGraph() {
+OptimizedCompileJob::Status OptimizedCompileJob::CreateGraph() {
ASSERT(isolate()->use_crankshaft());
ASSERT(info()->IsOptimizing());
ASSERT(!info()->IsCompilingForDebugging());
@@ -377,9 +295,11 @@ RecompileJob::Status RecompileJob::CreateGraph() {
// Fall back to using the full code generator if it's not possible
// to use the Hydrogen-based optimizing compiler. We already have
// generated code for this from the shared function object.
- if (AlwaysFullCompiler(isolate())) {
- info()->AbortOptimization();
- return SetLastStatus(BAILED_OUT);
+ if (FLAG_always_full_compiler) return AbortOptimization();
+
+ // Do not use crankshaft if we need to be able to set break points.
+ if (isolate()->DebuggerHasBreakPoints()) {
+ return AbortOptimization(kDebuggerHasBreakPoints);
}
// Limit the number of times we re-compile a functions with
@@ -387,8 +307,7 @@ RecompileJob::Status RecompileJob::CreateGraph() {
const int kMaxOptCount =
FLAG_deopt_every_n_times == 0 ? FLAG_max_opt_count : 1000;
if (info()->opt_count() > kMaxOptCount) {
- info()->set_bailout_reason(kOptimizedTooManyTimes);
- return AbortOptimization();
+ return AbortAndDisableOptimization(kOptimizedTooManyTimes);
}
// Due to an encoding limit on LUnallocated operands in the Lithium
@@ -401,21 +320,22 @@ RecompileJob::Status RecompileJob::CreateGraph() {
const int parameter_limit = -LUnallocated::kMinFixedSlotIndex;
Scope* scope = info()->scope();
if ((scope->num_parameters() + 1) > parameter_limit) {
- info()->set_bailout_reason(kTooManyParameters);
- return AbortOptimization();
+ return AbortAndDisableOptimization(kTooManyParameters);
}
const int locals_limit = LUnallocated::kMaxFixedSlotIndex;
if (info()->is_osr() &&
scope->num_parameters() + 1 + scope->num_stack_slots() > locals_limit) {
- info()->set_bailout_reason(kTooManyParametersLocals);
- return AbortOptimization();
+ return AbortAndDisableOptimization(kTooManyParametersLocals);
+ }
+
+ if (scope->HasIllegalRedeclaration()) {
+ return AbortAndDisableOptimization(kFunctionWithIllegalRedeclaration);
}
// Take --hydrogen-filter into account.
if (!info()->closure()->PassesFilter(FLAG_hydrogen_filter)) {
- info()->AbortOptimization();
- return SetLastStatus(BAILED_OUT);
+ return AbortOptimization(kHydrogenFilter);
}
// Recompile the unoptimized version of the code if the current version
@@ -432,7 +352,7 @@ RecompileJob::Status RecompileJob::CreateGraph() {
// Note that we use the same AST that we will use for generating the
// optimized code.
unoptimized.SetFunction(info()->function());
- unoptimized.SetScope(info()->scope());
+ unoptimized.PrepareForCompilation(info()->scope());
unoptimized.SetContext(info()->context());
if (should_recompile) unoptimized.EnableDeoptimizationSupport();
bool succeeded = FullCodeGenerator::MakeCode(&unoptimized);
@@ -460,22 +380,22 @@ RecompileJob::Status RecompileJob::CreateGraph() {
if (FLAG_trace_hydrogen) {
Handle<String> name = info()->function()->debug_name();
PrintF("-----------------------------------------------------------\n");
- PrintF("Compiling method %s using hydrogen\n", *name->ToCString());
+ PrintF("Compiling method %s using hydrogen\n", name->ToCString().get());
isolate()->GetHTracer()->TraceCompilation(info());
}
// Type-check the function.
AstTyper::Run(info());
- graph_builder_ = FLAG_emit_opt_code_positions
+ graph_builder_ = FLAG_hydrogen_track_positions
? new(info()->zone()) HOptimizedGraphBuilderWithPositions(info())
: new(info()->zone()) HOptimizedGraphBuilder(info());
Timer t(this, &time_taken_to_create_graph_);
+ info()->set_this_has_uses(false);
graph_ = graph_builder_->CreateGraph();
if (isolate()->has_pending_exception()) {
- info()->SetCode(Handle<Code>::null());
return SetLastStatus(FAILED);
}
@@ -485,24 +405,21 @@ RecompileJob::Status RecompileJob::CreateGraph() {
ASSERT(!graph_builder_->inline_bailout() || graph_ == NULL);
if (graph_ == NULL) {
if (graph_builder_->inline_bailout()) {
- info_->AbortOptimization();
- return SetLastStatus(BAILED_OUT);
- } else {
return AbortOptimization();
+ } else {
+ return AbortAndDisableOptimization();
}
}
if (info()->HasAbortedDueToDependencyChange()) {
- info_->set_bailout_reason(kBailedOutDueToDependencyChange);
- info_->AbortOptimization();
- return SetLastStatus(BAILED_OUT);
+ return AbortOptimization(kBailedOutDueToDependencyChange);
}
return SetLastStatus(SUCCEEDED);
}
-RecompileJob::Status RecompileJob::OptimizeGraph() {
+OptimizedCompileJob::Status OptimizedCompileJob::OptimizeGraph() {
DisallowHeapAllocation no_allocation;
DisallowHandleAllocation no_handles;
DisallowHandleDereference no_deref;
@@ -512,20 +429,19 @@ RecompileJob::Status RecompileJob::OptimizeGraph() {
Timer t(this, &time_taken_to_optimize_);
ASSERT(graph_ != NULL);
BailoutReason bailout_reason = kNoReason;
- if (!graph_->Optimize(&bailout_reason)) {
- if (bailout_reason != kNoReason) graph_builder_->Bailout(bailout_reason);
- return SetLastStatus(BAILED_OUT);
- } else {
+
+ if (graph_->Optimize(&bailout_reason)) {
chunk_ = LChunk::NewChunk(graph_);
- if (chunk_ == NULL) {
- return SetLastStatus(BAILED_OUT);
- }
+ if (chunk_ != NULL) return SetLastStatus(SUCCEEDED);
+ } else if (bailout_reason != kNoReason) {
+ graph_builder_->Bailout(bailout_reason);
}
- return SetLastStatus(SUCCEEDED);
+
+ return AbortOptimization();
}
-RecompileJob::Status RecompileJob::GenerateAndInstallCode() {
+OptimizedCompileJob::Status OptimizedCompileJob::GenerateCode() {
ASSERT(last_status() == SUCCEEDED);
ASSERT(!info()->HasAbortedDueToDependencyChange());
DisallowCodeDependencyChange no_dependency_change;
@@ -541,9 +457,23 @@ RecompileJob::Status RecompileJob::GenerateAndInstallCode() {
Handle<Code> optimized_code = chunk_->Codegen();
if (optimized_code.is_null()) {
if (info()->bailout_reason() == kNoReason) {
- info()->set_bailout_reason(kCodeGenerationFailed);
+ info_->set_bailout_reason(kCodeGenerationFailed);
+ } else if (info()->bailout_reason() == kMapBecameDeprecated) {
+ if (FLAG_trace_opt) {
+ PrintF("[aborted optimizing ");
+ info()->closure()->ShortPrint();
+ PrintF(" because a map became deprecated]\n");
+ }
+ return AbortOptimization();
+ } else if (info()->bailout_reason() == kMapBecameUnstable) {
+ if (FLAG_trace_opt) {
+ PrintF("[aborted optimizing ");
+ info()->closure()->ShortPrint();
+ PrintF(" because a map became unstable]\n");
+ }
+ return AbortOptimization();
}
- return AbortOptimization();
+ return AbortAndDisableOptimization();
}
info()->SetCode(optimized_code);
}
@@ -554,63 +484,46 @@ RecompileJob::Status RecompileJob::GenerateAndInstallCode() {
}
-static bool GenerateCode(CompilationInfo* info) {
- bool is_optimizing = info->isolate()->use_crankshaft() &&
- !info->IsCompilingForDebugging() &&
- info->IsOptimizing();
- if (is_optimizing) {
- Logger::TimerEventScope timer(
- info->isolate(), Logger::TimerEventScope::v8_recompile_synchronous);
- return MakeCrankshaftCode(info);
- } else {
- if (info->IsOptimizing()) {
- // Have the CompilationInfo decide if the compilation should be
- // BASE or NONOPT.
- info->DisableOptimization();
- }
- Logger::TimerEventScope timer(
- info->isolate(), Logger::TimerEventScope::v8_compile_full_code);
- return FullCodeGenerator::MakeCode(info);
+void OptimizedCompileJob::RecordOptimizationStats() {
+ Handle<JSFunction> function = info()->closure();
+ if (!function->IsOptimized()) {
+ // Concurrent recompilation and OSR may race. Increment only once.
+ int opt_count = function->shared()->opt_count();
+ function->shared()->set_opt_count(opt_count + 1);
}
-}
-
-
-static bool MakeCode(CompilationInfo* info) {
- // Precondition: code has been parsed. Postcondition: the code field in
- // the compilation info is set if compilation succeeded.
- ASSERT(info->function() != NULL);
- return Rewriter::Rewrite(info) && Scope::Analyze(info) && GenerateCode(info);
-}
-
-
-#ifdef ENABLE_DEBUGGER_SUPPORT
-bool Compiler::MakeCodeForLiveEdit(CompilationInfo* info) {
- // Precondition: code has been parsed. Postcondition: the code field in
- // the compilation info is set if compilation succeeded.
- bool succeeded = MakeCode(info);
- if (!info->shared_info().is_null()) {
- Handle<ScopeInfo> scope_info = ScopeInfo::Create(info->scope(),
- info->zone());
- info->shared_info()->set_scope_info(*scope_info);
+ double ms_creategraph = time_taken_to_create_graph_.InMillisecondsF();
+ double ms_optimize = time_taken_to_optimize_.InMillisecondsF();
+ double ms_codegen = time_taken_to_codegen_.InMillisecondsF();
+ if (FLAG_trace_opt) {
+ PrintF("[optimizing ");
+ function->ShortPrint();
+ PrintF(" - took %0.3f, %0.3f, %0.3f ms]\n", ms_creategraph, ms_optimize,
+ ms_codegen);
}
- return succeeded;
-}
-#endif
-
+ if (FLAG_trace_opt_stats) {
+ static double compilation_time = 0.0;
+ static int compiled_functions = 0;
+ static int code_size = 0;
-static bool DebuggerWantsEagerCompilation(CompilationInfo* info,
- bool allow_lazy_without_ctx = false) {
- return LiveEditFunctionTracker::IsActive(info->isolate()) ||
- (info->isolate()->DebuggerHasBreakPoints() && !allow_lazy_without_ctx);
+ compilation_time += (ms_creategraph + ms_optimize + ms_codegen);
+ compiled_functions++;
+ code_size += function->shared()->SourceSize();
+ PrintF("Compiled: %d functions with %d byte source size in %fms.\n",
+ compiled_functions,
+ code_size,
+ compilation_time);
+ }
+ if (FLAG_hydrogen_stats) {
+ isolate()->GetHStatistics()->IncrementSubtotals(time_taken_to_create_graph_,
+ time_taken_to_optimize_,
+ time_taken_to_codegen_);
+ }
}
// Sets the expected number of properties based on estimate from compiler.
void SetExpectedNofPropertiesFromEstimate(Handle<SharedFunctionInfo> shared,
int estimate) {
- // See the comment in SetExpectedNofProperties.
- if (shared->live_objects_may_exist()) return;
-
// If no properties are added in the constructor, they are more likely
// to be added later.
if (estimate == 0) estimate = 2;
@@ -618,7 +531,7 @@ void SetExpectedNofPropertiesFromEstimate(Handle<SharedFunctionInfo> shared,
// TODO(yangguo): check whether those heuristics are still up-to-date.
// We do not shrink objects that go into a snapshot (yet), so we adjust
// the estimate conservatively.
- if (Serializer::enabled()) {
+ if (shared->GetIsolate()->serializer_enabled()) {
estimate += 2;
} else if (FLAG_clever_optimizations) {
// Inobject slack tracking will reclaim redundant inobject space later,
@@ -632,54 +545,264 @@ void SetExpectedNofPropertiesFromEstimate(Handle<SharedFunctionInfo> shared,
}
-static Handle<SharedFunctionInfo> MakeFunctionInfo(CompilationInfo* info) {
+static void UpdateSharedFunctionInfo(CompilationInfo* info) {
+ // Update the shared function info with the compiled code and the
+ // scope info. Please note, that the order of the shared function
+ // info initialization is important since set_scope_info might
+ // trigger a GC, causing the ASSERT below to be invalid if the code
+ // was flushed. By setting the code object last we avoid this.
+ Handle<SharedFunctionInfo> shared = info->shared_info();
+ Handle<ScopeInfo> scope_info =
+ ScopeInfo::Create(info->scope(), info->zone());
+ shared->set_scope_info(*scope_info);
+
+ Handle<Code> code = info->code();
+ CHECK(code->kind() == Code::FUNCTION);
+ shared->ReplaceCode(*code);
+ if (shared->optimization_disabled()) code->set_optimizable(false);
+
+ shared->set_feedback_vector(*info->feedback_vector());
+
+ // Set the expected number of properties for instances.
+ FunctionLiteral* lit = info->function();
+ int expected = lit->expected_property_count();
+ SetExpectedNofPropertiesFromEstimate(shared, expected);
+
+ // Check the function has compiled code.
+ ASSERT(shared->is_compiled());
+ shared->set_dont_optimize_reason(lit->dont_optimize_reason());
+ shared->set_dont_inline(lit->flags()->Contains(kDontInline));
+ shared->set_ast_node_count(lit->ast_node_count());
+ shared->set_strict_mode(lit->strict_mode());
+}
+
+
+// Sets the function info on a function.
+// The start_position points to the first '(' character after the function name
+// in the full script source. When counting characters in the script source the
+// the first character is number 0 (not 1).
+static void SetFunctionInfo(Handle<SharedFunctionInfo> function_info,
+ FunctionLiteral* lit,
+ bool is_toplevel,
+ Handle<Script> script) {
+ function_info->set_length(lit->parameter_count());
+ function_info->set_formal_parameter_count(lit->parameter_count());
+ function_info->set_script(*script);
+ function_info->set_function_token_position(lit->function_token_position());
+ function_info->set_start_position(lit->start_position());
+ function_info->set_end_position(lit->end_position());
+ function_info->set_is_expression(lit->is_expression());
+ function_info->set_is_anonymous(lit->is_anonymous());
+ function_info->set_is_toplevel(is_toplevel);
+ function_info->set_inferred_name(*lit->inferred_name());
+ function_info->set_allows_lazy_compilation(lit->AllowsLazyCompilation());
+ function_info->set_allows_lazy_compilation_without_context(
+ lit->AllowsLazyCompilationWithoutContext());
+ function_info->set_strict_mode(lit->strict_mode());
+ function_info->set_uses_arguments(lit->scope()->arguments() != NULL);
+ function_info->set_has_duplicate_parameters(lit->has_duplicate_parameters());
+ function_info->set_ast_node_count(lit->ast_node_count());
+ function_info->set_is_function(lit->is_function());
+ function_info->set_dont_optimize_reason(lit->dont_optimize_reason());
+ function_info->set_dont_inline(lit->flags()->Contains(kDontInline));
+ function_info->set_dont_cache(lit->flags()->Contains(kDontCache));
+ function_info->set_is_generator(lit->is_generator());
+}
+
+
+static bool CompileUnoptimizedCode(CompilationInfo* info) {
+ ASSERT(info->function() != NULL);
+ if (!Rewriter::Rewrite(info)) return false;
+ if (!Scope::Analyze(info)) return false;
+ ASSERT(info->scope() != NULL);
+
+ if (!FullCodeGenerator::MakeCode(info)) {
+ Isolate* isolate = info->isolate();
+ if (!isolate->has_pending_exception()) isolate->StackOverflow();
+ return false;
+ }
+ return true;
+}
+
+
+MUST_USE_RESULT static MaybeHandle<Code> GetUnoptimizedCodeCommon(
+ CompilationInfo* info) {
+ VMState<COMPILER> state(info->isolate());
+ PostponeInterruptsScope postpone(info->isolate());
+ if (!Parser::Parse(info)) return MaybeHandle<Code>();
+ info->SetStrictMode(info->function()->strict_mode());
+
+ if (!CompileUnoptimizedCode(info)) return MaybeHandle<Code>();
+ Compiler::RecordFunctionCompilation(
+ Logger::LAZY_COMPILE_TAG, info, info->shared_info());
+ UpdateSharedFunctionInfo(info);
+ ASSERT_EQ(Code::FUNCTION, info->code()->kind());
+ return info->code();
+}
+
+
+MaybeHandle<Code> Compiler::GetUnoptimizedCode(Handle<JSFunction> function) {
+ ASSERT(!function->GetIsolate()->has_pending_exception());
+ ASSERT(!function->is_compiled());
+ if (function->shared()->is_compiled()) {
+ return Handle<Code>(function->shared()->code());
+ }
+
+ CompilationInfoWithZone info(function);
+ Handle<Code> result;
+ ASSIGN_RETURN_ON_EXCEPTION(info.isolate(), result,
+ GetUnoptimizedCodeCommon(&info),
+ Code);
+
+ if (FLAG_always_opt &&
+ info.isolate()->use_crankshaft() &&
+ !info.shared_info()->optimization_disabled() &&
+ !info.isolate()->DebuggerHasBreakPoints()) {
+ Handle<Code> opt_code;
+ if (Compiler::GetOptimizedCode(
+ function, result,
+ Compiler::NOT_CONCURRENT).ToHandle(&opt_code)) {
+ result = opt_code;
+ }
+ }
+
+ return result;
+}
+
+
+MaybeHandle<Code> Compiler::GetUnoptimizedCode(
+ Handle<SharedFunctionInfo> shared) {
+ ASSERT(!shared->GetIsolate()->has_pending_exception());
+ ASSERT(!shared->is_compiled());
+
+ CompilationInfoWithZone info(shared);
+ return GetUnoptimizedCodeCommon(&info);
+}
+
+
+bool Compiler::EnsureCompiled(Handle<JSFunction> function,
+ ClearExceptionFlag flag) {
+ if (function->is_compiled()) return true;
+ MaybeHandle<Code> maybe_code = Compiler::GetUnoptimizedCode(function);
+ Handle<Code> code;
+ if (!maybe_code.ToHandle(&code)) {
+ if (flag == CLEAR_EXCEPTION) {
+ function->GetIsolate()->clear_pending_exception();
+ }
+ return false;
+ }
+ function->ReplaceCode(*code);
+ ASSERT(function->is_compiled());
+ return true;
+}
+
+
+// Compile full code for debugging. This code will have debug break slots
+// and deoptimization information. Deoptimization information is required
+// in case that an optimized version of this function is still activated on
+// the stack. It will also make sure that the full code is compiled with
+// the same flags as the previous version, that is flags which can change
+// the code generated. The current method of mapping from already compiled
+// full code without debug break slots to full code with debug break slots
+// depends on the generated code is otherwise exactly the same.
+// If compilation fails, just keep the existing code.
+MaybeHandle<Code> Compiler::GetCodeForDebugging(Handle<JSFunction> function) {
+ CompilationInfoWithZone info(function);
+ Isolate* isolate = info.isolate();
+ VMState<COMPILER> state(isolate);
+
+ info.MarkAsDebug();
+
+ ASSERT(!isolate->has_pending_exception());
+ Handle<Code> old_code(function->shared()->code());
+ ASSERT(old_code->kind() == Code::FUNCTION);
+ ASSERT(!old_code->has_debug_break_slots());
+
+ info.MarkCompilingForDebugging();
+ if (old_code->is_compiled_optimizable()) {
+ info.EnableDeoptimizationSupport();
+ } else {
+ info.MarkNonOptimizable();
+ }
+ MaybeHandle<Code> maybe_new_code = GetUnoptimizedCodeCommon(&info);
+ Handle<Code> new_code;
+ if (!maybe_new_code.ToHandle(&new_code)) {
+ isolate->clear_pending_exception();
+ } else {
+ ASSERT_EQ(old_code->is_compiled_optimizable(),
+ new_code->is_compiled_optimizable());
+ }
+ return maybe_new_code;
+}
+
+
+void Compiler::CompileForLiveEdit(Handle<Script> script) {
+ // TODO(635): support extensions.
+ CompilationInfoWithZone info(script);
+ PostponeInterruptsScope postpone(info.isolate());
+ VMState<COMPILER> state(info.isolate());
+
+ info.MarkAsGlobal();
+ if (!Parser::Parse(&info)) return;
+ info.SetStrictMode(info.function()->strict_mode());
+
+ LiveEditFunctionTracker tracker(info.isolate(), info.function());
+ if (!CompileUnoptimizedCode(&info)) return;
+ if (!info.shared_info().is_null()) {
+ Handle<ScopeInfo> scope_info = ScopeInfo::Create(info.scope(),
+ info.zone());
+ info.shared_info()->set_scope_info(*scope_info);
+ }
+ tracker.RecordRootFunctionInfo(info.code());
+}
+
+
+static bool DebuggerWantsEagerCompilation(CompilationInfo* info,
+ bool allow_lazy_without_ctx = false) {
+ return LiveEditFunctionTracker::IsActive(info->isolate()) ||
+ (info->isolate()->DebuggerHasBreakPoints() && !allow_lazy_without_ctx);
+}
+
+
+static Handle<SharedFunctionInfo> CompileToplevel(CompilationInfo* info) {
Isolate* isolate = info->isolate();
PostponeInterruptsScope postpone(isolate);
-
ASSERT(!isolate->native_context().is_null());
Handle<Script> script = info->script();
+
// TODO(svenpanne) Obscure place for this, perhaps move to OnBeforeCompile?
FixedArray* array = isolate->native_context()->embedder_data();
script->set_context_data(array->get(0));
-#ifdef ENABLE_DEBUGGER_SUPPORT
- if (info->is_eval()) {
- script->set_compilation_type(Script::COMPILATION_TYPE_EVAL);
- // For eval scripts add information on the function from which eval was
- // called.
- if (info->is_eval()) {
- StackTraceFrameIterator it(isolate);
- if (!it.done()) {
- script->set_eval_from_shared(it.frame()->function()->shared());
- Code* code = it.frame()->LookupCode();
- int offset = static_cast<int>(
- it.frame()->pc() - code->instruction_start());
- script->set_eval_from_instructions_offset(Smi::FromInt(offset));
- }
- }
+ isolate->debug()->OnBeforeCompile(script);
+
+ ASSERT(info->is_eval() || info->is_global());
+
+ bool parse_allow_lazy =
+ (info->cached_data_mode() == CONSUME_CACHED_DATA ||
+ String::cast(script->source())->length() > FLAG_min_preparse_length) &&
+ !DebuggerWantsEagerCompilation(info);
+
+ if (!parse_allow_lazy && info->cached_data_mode() != NO_CACHED_DATA) {
+ // We are going to parse eagerly, but we either 1) have cached data produced
+ // by lazy parsing or 2) are asked to generate cached data. We cannot use
+ // the existing data, since it won't contain all the symbols we need for
+ // eager parsing. In addition, it doesn't make sense to produce the data
+ // when parsing eagerly. That data would contain all symbols, but no
+ // functions, so it cannot be used to aid lazy parsing later.
+ info->SetCachedData(NULL, NO_CACHED_DATA);
}
- // Notify debugger
- isolate->debugger()->OnBeforeCompile(script);
-#endif
+ Handle<SharedFunctionInfo> result;
- // Only allow non-global compiles for eval.
- ASSERT(info->is_eval() || info->is_global());
- {
- Parser parser(info);
- if ((info->pre_parse_data() != NULL ||
- String::cast(script->source())->length() > FLAG_min_preparse_length) &&
- !DebuggerWantsEagerCompilation(info))
- parser.set_allow_lazy(true);
- if (!parser.Parse()) {
+ { VMState<COMPILER> state(info->isolate());
+ if (!Parser::Parse(info, parse_allow_lazy)) {
return Handle<SharedFunctionInfo>::null();
}
- }
- FunctionLiteral* lit = info->function();
- LiveEditFunctionTracker live_edit_tracker(isolate, lit);
- Handle<SharedFunctionInfo> result;
- {
+ FunctionLiteral* lit = info->function();
+ LiveEditFunctionTracker live_edit_tracker(isolate, lit);
+
// Measure how long it takes to do the compilation; only take the
// rest of the function into account to avoid overlap with the
// parsing statistics.
@@ -689,48 +812,33 @@ static Handle<SharedFunctionInfo> MakeFunctionInfo(CompilationInfo* info) {
HistogramTimerScope timer(rate);
// Compile the code.
- if (!MakeCode(info)) {
- if (!isolate->has_pending_exception()) isolate->StackOverflow();
+ if (!CompileUnoptimizedCode(info)) {
return Handle<SharedFunctionInfo>::null();
}
// Allocate function.
ASSERT(!info->code().is_null());
- result =
- isolate->factory()->NewSharedFunctionInfo(
- lit->name(),
- lit->materialized_literal_count(),
- lit->is_generator(),
- info->code(),
- ScopeInfo::Create(info->scope(), info->zone()));
+ result = isolate->factory()->NewSharedFunctionInfo(
+ lit->name(),
+ lit->materialized_literal_count(),
+ lit->is_generator(),
+ info->code(),
+ ScopeInfo::Create(info->scope(), info->zone()),
+ info->feedback_vector());
ASSERT_EQ(RelocInfo::kNoPosition, lit->function_token_position());
- Compiler::SetFunctionInfo(result, lit, true, script);
-
- if (script->name()->IsString()) {
- PROFILE(isolate, CodeCreateEvent(
- info->is_eval()
- ? Logger::EVAL_TAG
- : Logger::ToNativeByScript(Logger::SCRIPT_TAG, *script),
- *info->code(),
- *result,
- info,
- String::cast(script->name())));
- GDBJIT(AddCode(Handle<String>(String::cast(script->name())),
- script,
- info->code(),
- info));
- } else {
- PROFILE(isolate, CodeCreateEvent(
- info->is_eval()
- ? Logger::EVAL_TAG
- : Logger::ToNativeByScript(Logger::SCRIPT_TAG, *script),
- *info->code(),
- *result,
- info,
- isolate->heap()->empty_string()));
- GDBJIT(AddCode(Handle<String>(), script, info->code(), info));
- }
+ SetFunctionInfo(result, lit, true, script);
+
+ Handle<String> script_name = script->name()->IsString()
+ ? Handle<String>(String::cast(script->name()))
+ : isolate->factory()->empty_string();
+ Logger::LogEventsAndTags log_tag = info->is_eval()
+ ? Logger::EVAL_TAG
+ : Logger::ToNativeByScript(Logger::SCRIPT_TAG, *script);
+
+ PROFILE(isolate, CodeCreateEvent(
+ log_tag, *info->code(), *result, info, *script_name));
+ GDBJIT(AddCode(script_name, script, info->code(), info));
// Hint to the runtime system used when allocating space for initial
// property space by setting the expected number of properties for
@@ -739,60 +847,106 @@ static Handle<SharedFunctionInfo> MakeFunctionInfo(CompilationInfo* info) {
lit->expected_property_count());
script->set_compilation_state(Script::COMPILATION_STATE_COMPILED);
- }
-#ifdef ENABLE_DEBUGGER_SUPPORT
- // Notify debugger
- isolate->debugger()->OnAfterCompile(
- script, Debugger::NO_AFTER_COMPILE_FLAGS);
-#endif
+ live_edit_tracker.RecordFunctionInfo(result, lit, info->zone());
+ }
- live_edit_tracker.RecordFunctionInfo(result, lit, info->zone());
+ isolate->debug()->OnAfterCompile(script, Debug::NO_AFTER_COMPILE_FLAGS);
return result;
}
-Handle<SharedFunctionInfo> Compiler::Compile(Handle<String> source,
- Handle<Object> script_name,
- int line_offset,
- int column_offset,
- bool is_shared_cross_origin,
- Handle<Context> context,
- v8::Extension* extension,
- ScriptDataImpl* pre_data,
- Handle<Object> script_data,
- NativesFlag natives) {
+MaybeHandle<JSFunction> Compiler::GetFunctionFromEval(
+ Handle<String> source,
+ Handle<Context> context,
+ StrictMode strict_mode,
+ ParseRestriction restriction,
+ int scope_position) {
Isolate* isolate = source->GetIsolate();
int source_length = source->length();
- isolate->counters()->total_load_size()->Increment(source_length);
+ isolate->counters()->total_eval_size()->Increment(source_length);
isolate->counters()->total_compile_size()->Increment(source_length);
- // The VM is in the COMPILER state until exiting this function.
- VMState<COMPILER> state(isolate);
+ CompilationCache* compilation_cache = isolate->compilation_cache();
+ MaybeHandle<SharedFunctionInfo> maybe_shared_info =
+ compilation_cache->LookupEval(source, context, strict_mode,
+ scope_position);
+ Handle<SharedFunctionInfo> shared_info;
+
+ if (!maybe_shared_info.ToHandle(&shared_info)) {
+ Handle<Script> script = isolate->factory()->NewScript(source);
+ CompilationInfoWithZone info(script);
+ info.MarkAsEval();
+ if (context->IsNativeContext()) info.MarkAsGlobal();
+ info.SetStrictMode(strict_mode);
+ info.SetParseRestriction(restriction);
+ info.SetContext(context);
+
+ Debug::RecordEvalCaller(script);
+
+ shared_info = CompileToplevel(&info);
+
+ if (shared_info.is_null()) {
+ return MaybeHandle<JSFunction>();
+ } else {
+ // Explicitly disable optimization for eval code. We're not yet prepared
+ // to handle eval-code in the optimizing compiler.
+ shared_info->DisableOptimization(kEval);
+
+ // If caller is strict mode, the result must be in strict mode as well.
+ ASSERT(strict_mode == SLOPPY || shared_info->strict_mode() == STRICT);
+ if (!shared_info->dont_cache()) {
+ compilation_cache->PutEval(
+ source, context, shared_info, scope_position);
+ }
+ }
+ } else if (shared_info->ic_age() != isolate->heap()->global_ic_age()) {
+ shared_info->ResetForNewContext(isolate->heap()->global_ic_age());
+ }
+
+ return isolate->factory()->NewFunctionFromSharedFunctionInfo(
+ shared_info, context, NOT_TENURED);
+}
+
+
+Handle<SharedFunctionInfo> Compiler::CompileScript(
+ Handle<String> source,
+ Handle<Object> script_name,
+ int line_offset,
+ int column_offset,
+ bool is_shared_cross_origin,
+ Handle<Context> context,
+ v8::Extension* extension,
+ ScriptData** cached_data,
+ CachedDataMode cached_data_mode,
+ NativesFlag natives) {
+ if (cached_data_mode == NO_CACHED_DATA) {
+ cached_data = NULL;
+ } else if (cached_data_mode == PRODUCE_CACHED_DATA) {
+ ASSERT(cached_data && !*cached_data);
+ } else {
+ ASSERT(cached_data_mode == CONSUME_CACHED_DATA);
+ ASSERT(cached_data && *cached_data);
+ }
+ Isolate* isolate = source->GetIsolate();
+ int source_length = source->length();
+ isolate->counters()->total_load_size()->Increment(source_length);
+ isolate->counters()->total_compile_size()->Increment(source_length);
CompilationCache* compilation_cache = isolate->compilation_cache();
// Do a lookup in the compilation cache but not for extensions.
+ MaybeHandle<SharedFunctionInfo> maybe_result;
Handle<SharedFunctionInfo> result;
if (extension == NULL) {
- result = compilation_cache->LookupScript(source,
- script_name,
- line_offset,
- column_offset,
- is_shared_cross_origin,
- context);
+ maybe_result = compilation_cache->LookupScript(
+ source, script_name, line_offset, column_offset,
+ is_shared_cross_origin, context);
}
- if (result.is_null()) {
- // No cache entry found. Do pre-parsing, if it makes sense, and compile
- // the script.
- // Building preparse data that is only used immediately after is only a
- // saving if we might skip building the AST for lazily compiled functions.
- // I.e., preparse data isn't relevant when the lazy flag is off, and
- // for small sources, odds are that there aren't many functions
- // that would be compiled lazily anyway, so we skip the preparse step
- // in that case too.
+ if (!maybe_result.ToHandle(&result)) {
+ // No cache entry found. Compile the script.
// Create a script object describing the script to be compiled.
Handle<Script> script = isolate->factory()->NewScript(source);
@@ -806,157 +960,107 @@ Handle<SharedFunctionInfo> Compiler::Compile(Handle<String> source,
}
script->set_is_shared_cross_origin(is_shared_cross_origin);
- script->set_data(script_data.is_null() ? isolate->heap()->undefined_value()
- : *script_data);
-
// Compile the function and add it to the cache.
CompilationInfoWithZone info(script);
info.MarkAsGlobal();
info.SetExtension(extension);
- info.SetPreParseData(pre_data);
+ info.SetCachedData(cached_data, cached_data_mode);
info.SetContext(context);
- if (FLAG_use_strict) {
- info.SetLanguageMode(FLAG_harmony_scoping ? EXTENDED_MODE : STRICT_MODE);
- }
- result = MakeFunctionInfo(&info);
+ if (FLAG_use_strict) info.SetStrictMode(STRICT);
+ result = CompileToplevel(&info);
if (extension == NULL && !result.is_null() && !result->dont_cache()) {
compilation_cache->PutScript(source, context, result);
}
- } else {
- if (result->ic_age() != isolate->heap()->global_ic_age()) {
+ if (result.is_null()) isolate->ReportPendingMessages();
+ } else if (result->ic_age() != isolate->heap()->global_ic_age()) {
result->ResetForNewContext(isolate->heap()->global_ic_age());
- }
}
-
- if (result.is_null()) isolate->ReportPendingMessages();
return result;
}
-Handle<SharedFunctionInfo> Compiler::CompileEval(Handle<String> source,
- Handle<Context> context,
- bool is_global,
- LanguageMode language_mode,
- ParseRestriction restriction,
- int scope_position) {
- Isolate* isolate = source->GetIsolate();
- int source_length = source->length();
- isolate->counters()->total_eval_size()->Increment(source_length);
- isolate->counters()->total_compile_size()->Increment(source_length);
-
- // The VM is in the COMPILER state until exiting this function.
- VMState<COMPILER> state(isolate);
+Handle<SharedFunctionInfo> Compiler::BuildFunctionInfo(FunctionLiteral* literal,
+ Handle<Script> script) {
+ // Precondition: code has been parsed and scopes have been analyzed.
+ CompilationInfoWithZone info(script);
+ info.SetFunction(literal);
+ info.PrepareForCompilation(literal->scope());
+ info.SetStrictMode(literal->scope()->strict_mode());
- // Do a lookup in the compilation cache; if the entry is not there, invoke
- // the compiler and add the result to the cache.
- Handle<SharedFunctionInfo> result;
- CompilationCache* compilation_cache = isolate->compilation_cache();
- result = compilation_cache->LookupEval(source,
- context,
- is_global,
- language_mode,
- scope_position);
+ Isolate* isolate = info.isolate();
+ Factory* factory = isolate->factory();
+ LiveEditFunctionTracker live_edit_tracker(isolate, literal);
+ // Determine if the function can be lazily compiled. This is necessary to
+ // allow some of our builtin JS files to be lazily compiled. These
+ // builtins cannot be handled lazily by the parser, since we have to know
+ // if a function uses the special natives syntax, which is something the
+ // parser records.
+ // If the debugger requests compilation for break points, we cannot be
+ // aggressive about lazy compilation, because it might trigger compilation
+ // of functions without an outer context when setting a breakpoint through
+ // Debug::FindSharedFunctionInfoInScript.
+ bool allow_lazy_without_ctx = literal->AllowsLazyCompilationWithoutContext();
+ bool allow_lazy = literal->AllowsLazyCompilation() &&
+ !DebuggerWantsEagerCompilation(&info, allow_lazy_without_ctx);
- if (result.is_null()) {
- // Create a script object describing the script to be compiled.
- Handle<Script> script = isolate->factory()->NewScript(source);
- CompilationInfoWithZone info(script);
- info.MarkAsEval();
- if (is_global) info.MarkAsGlobal();
- info.SetLanguageMode(language_mode);
- info.SetParseRestriction(restriction);
- info.SetContext(context);
- result = MakeFunctionInfo(&info);
- if (!result.is_null()) {
- // Explicitly disable optimization for eval code. We're not yet prepared
- // to handle eval-code in the optimizing compiler.
- result->DisableOptimization(kEval);
-
- // If caller is strict mode, the result must be in strict mode or
- // extended mode as well, but not the other way around. Consider:
- // eval("'use strict'; ...");
- ASSERT(language_mode != STRICT_MODE || !result->is_classic_mode());
- // If caller is in extended mode, the result must also be in
- // extended mode.
- ASSERT(language_mode != EXTENDED_MODE ||
- result->is_extended_mode());
- if (!result->dont_cache()) {
- compilation_cache->PutEval(
- source, context, is_global, result, scope_position);
- }
- }
+ // Generate code
+ Handle<ScopeInfo> scope_info;
+ if (FLAG_lazy && allow_lazy && !literal->is_parenthesized()) {
+ Handle<Code> code = isolate->builtins()->CompileUnoptimized();
+ info.SetCode(code);
+ scope_info = Handle<ScopeInfo>(ScopeInfo::Empty(isolate));
+ } else if (FullCodeGenerator::MakeCode(&info)) {
+ ASSERT(!info.code().is_null());
+ scope_info = ScopeInfo::Create(info.scope(), info.zone());
} else {
- if (result->ic_age() != isolate->heap()->global_ic_age()) {
- result->ResetForNewContext(isolate->heap()->global_ic_age());
- }
+ return Handle<SharedFunctionInfo>::null();
}
+ // Create a shared function info object.
+ Handle<SharedFunctionInfo> result =
+ factory->NewSharedFunctionInfo(literal->name(),
+ literal->materialized_literal_count(),
+ literal->is_generator(),
+ info.code(),
+ scope_info,
+ info.feedback_vector());
+ SetFunctionInfo(result, literal, false, script);
+ RecordFunctionCompilation(Logger::FUNCTION_TAG, &info, result);
+ result->set_allows_lazy_compilation(allow_lazy);
+ result->set_allows_lazy_compilation_without_context(allow_lazy_without_ctx);
+
+ // Set the expected number of properties for instances and return
+ // the resulting function.
+ SetExpectedNofPropertiesFromEstimate(result,
+ literal->expected_property_count());
+ live_edit_tracker.RecordFunctionInfo(result, literal, info.zone());
return result;
}
-static bool InstallFullCode(CompilationInfo* info) {
- // Update the shared function info with the compiled code and the
- // scope info. Please note, that the order of the shared function
- // info initialization is important since set_scope_info might
- // trigger a GC, causing the ASSERT below to be invalid if the code
- // was flushed. By setting the code object last we avoid this.
- Handle<SharedFunctionInfo> shared = info->shared_info();
- Handle<Code> code = info->code();
- CHECK(code->kind() == Code::FUNCTION);
- Handle<JSFunction> function = info->closure();
- Handle<ScopeInfo> scope_info =
- ScopeInfo::Create(info->scope(), info->zone());
- shared->set_scope_info(*scope_info);
- shared->ReplaceCode(*code);
- if (!function.is_null()) {
- function->ReplaceCode(*code);
- ASSERT(!function->IsOptimized());
- }
-
- // Set the expected number of properties for instances.
- FunctionLiteral* lit = info->function();
- int expected = lit->expected_property_count();
- SetExpectedNofPropertiesFromEstimate(shared, expected);
-
- // Check the function has compiled code.
- ASSERT(shared->is_compiled());
- shared->set_dont_optimize_reason(lit->dont_optimize_reason());
- shared->set_dont_inline(lit->flags()->Contains(kDontInline));
- shared->set_ast_node_count(lit->ast_node_count());
-
- if (info->isolate()->use_crankshaft() &&
- !function.is_null() &&
- !shared->optimization_disabled()) {
- // If we're asked to always optimize, we compile the optimized
- // version of the function right away - unless the debugger is
- // active as it makes no sense to compile optimized code then.
- if (FLAG_always_opt &&
- !info->isolate()->DebuggerHasBreakPoints()) {
- CompilationInfoWithZone optimized(function);
- optimized.SetOptimizing(BailoutId::None());
- return Compiler::CompileLazy(&optimized);
+MUST_USE_RESULT static MaybeHandle<Code> GetCodeFromOptimizedCodeMap(
+ Handle<JSFunction> function,
+ BailoutId osr_ast_id) {
+ if (FLAG_cache_optimized_code) {
+ Handle<SharedFunctionInfo> shared(function->shared());
+ DisallowHeapAllocation no_gc;
+ int index = shared->SearchOptimizedCodeMap(
+ function->context()->native_context(), osr_ast_id);
+ if (index > 0) {
+ if (FLAG_trace_opt) {
+ PrintF("[found optimized code for ");
+ function->ShortPrint();
+ if (!osr_ast_id.IsNone()) {
+ PrintF(" at OSR AST id %d", osr_ast_id.ToInt());
+ }
+ PrintF("]\n");
+ }
+ FixedArray* literals = shared->GetLiteralsFromOptimizedCodeMap(index);
+ if (literals != NULL) function->set_literals(literals);
+ return Handle<Code>(shared->GetCodeFromOptimizedCodeMap(index));
}
}
- return true;
-}
-
-
-static void InstallCodeCommon(CompilationInfo* info) {
- Handle<SharedFunctionInfo> shared = info->shared_info();
- Handle<Code> code = info->code();
- ASSERT(!code.is_null());
-
- // Set optimizable to false if this is disallowed by the shared
- // function info, e.g., we might have flushed the code and must
- // reset this bit when lazy compiling the code again.
- if (shared->optimization_disabled()) code->set_optimizable(false);
-
- if (shared->code() == *code) {
- // Do not send compilation event for the same code twice.
- return;
- }
- Compiler::RecordFunctionCompilation(Logger::LAZY_COMPILE_TAG, info, shared);
+ return MaybeHandle<Code>();
}
@@ -964,329 +1068,174 @@ static void InsertCodeIntoOptimizedCodeMap(CompilationInfo* info) {
Handle<Code> code = info->code();
if (code->kind() != Code::OPTIMIZED_FUNCTION) return; // Nothing to do.
- // Cache non-OSR optimized code.
- if (FLAG_cache_optimized_code && !info->is_osr()) {
+ // Cache optimized code.
+ if (FLAG_cache_optimized_code) {
Handle<JSFunction> function = info->closure();
Handle<SharedFunctionInfo> shared(function->shared());
Handle<FixedArray> literals(function->literals());
Handle<Context> native_context(function->context()->native_context());
SharedFunctionInfo::AddToOptimizedCodeMap(
- shared, native_context, code, literals);
+ shared, native_context, code, literals, info->osr_ast_id());
}
}
-static bool InstallCodeFromOptimizedCodeMap(CompilationInfo* info) {
- if (!info->IsOptimizing()) return false; // Nothing to look up.
+static bool CompileOptimizedPrologue(CompilationInfo* info) {
+ if (!Parser::Parse(info)) return false;
+ info->SetStrictMode(info->function()->strict_mode());
- // Lookup non-OSR optimized code.
- if (FLAG_cache_optimized_code && !info->is_osr()) {
- Handle<SharedFunctionInfo> shared = info->shared_info();
- Handle<JSFunction> function = info->closure();
- ASSERT(!function.is_null());
- Handle<Context> native_context(function->context()->native_context());
- int index = shared->SearchOptimizedCodeMap(*native_context);
- if (index > 0) {
- if (FLAG_trace_opt) {
- PrintF("[found optimized code for ");
- function->ShortPrint();
- PrintF("]\n");
- }
- // Caching of optimized code enabled and optimized code found.
- shared->InstallFromOptimizedCodeMap(*function, index);
- return true;
- }
- }
- return false;
+ if (!Rewriter::Rewrite(info)) return false;
+ if (!Scope::Analyze(info)) return false;
+ ASSERT(info->scope() != NULL);
+ return true;
}
-bool Compiler::CompileLazy(CompilationInfo* info) {
- Isolate* isolate = info->isolate();
-
- // The VM is in the COMPILER state until exiting this function.
- VMState<COMPILER> state(isolate);
-
- PostponeInterruptsScope postpone(isolate);
-
- Handle<SharedFunctionInfo> shared = info->shared_info();
- int compiled_size = shared->end_position() - shared->start_position();
- isolate->counters()->total_compile_size()->Increment(compiled_size);
-
- if (InstallCodeFromOptimizedCodeMap(info)) return true;
+static bool GetOptimizedCodeNow(CompilationInfo* info) {
+ if (!CompileOptimizedPrologue(info)) return false;
- // Generate the AST for the lazily compiled function.
- if (Parser::Parse(info)) {
- // Measure how long it takes to do the lazy compilation; only take the
- // rest of the function into account to avoid overlap with the lazy
- // parsing statistics.
- HistogramTimerScope timer(isolate->counters()->compile_lazy());
+ Logger::TimerEventScope timer(
+ info->isolate(), Logger::TimerEventScope::v8_recompile_synchronous);
+
+ OptimizedCompileJob job(info);
+ if (job.CreateGraph() != OptimizedCompileJob::SUCCEEDED) return false;
+ if (job.OptimizeGraph() != OptimizedCompileJob::SUCCEEDED) return false;
+ if (job.GenerateCode() != OptimizedCompileJob::SUCCEEDED) return false;
+
+ // Success!
+ ASSERT(!info->isolate()->has_pending_exception());
+ InsertCodeIntoOptimizedCodeMap(info);
+ Compiler::RecordFunctionCompilation(
+ Logger::LAZY_COMPILE_TAG, info, info->shared_info());
+ return true;
+}
- // After parsing we know the function's language mode. Remember it.
- LanguageMode language_mode = info->function()->language_mode();
- info->SetLanguageMode(language_mode);
- shared->set_language_mode(language_mode);
- // Compile the code.
- if (!MakeCode(info)) {
- if (!isolate->has_pending_exception()) {
- isolate->StackOverflow();
- }
- } else {
- InstallCodeCommon(info);
-
- if (info->IsOptimizing()) {
- // Optimized code successfully created.
- Handle<Code> code = info->code();
- ASSERT(shared->scope_info() != ScopeInfo::Empty(isolate));
- // TODO(titzer): Only replace the code if it was not an OSR compile.
- info->closure()->ReplaceCode(*code);
- InsertCodeIntoOptimizedCodeMap(info);
- return true;
- } else if (!info->is_osr()) {
- // Compilation failed. Replace with full code if not OSR compile.
- return InstallFullCode(info);
- }
+static bool GetOptimizedCodeLater(CompilationInfo* info) {
+ Isolate* isolate = info->isolate();
+ if (!isolate->optimizing_compiler_thread()->IsQueueAvailable()) {
+ if (FLAG_trace_concurrent_recompilation) {
+ PrintF(" ** Compilation queue full, will retry optimizing ");
+ info->closure()->PrintName();
+ PrintF(" later.\n");
}
+ return false;
}
- ASSERT(info->code().is_null());
- return false;
-}
-
-
-bool Compiler::RecompileConcurrent(Handle<JSFunction> closure,
- uint32_t osr_pc_offset) {
- bool compiling_for_osr = (osr_pc_offset != 0);
+ CompilationHandleScope handle_scope(info);
+ if (!CompileOptimizedPrologue(info)) return false;
+ info->SaveHandles(); // Copy handles to the compilation handle scope.
- Isolate* isolate = closure->GetIsolate();
- // Here we prepare compile data for the concurrent recompilation thread, but
- // this still happens synchronously and interrupts execution.
Logger::TimerEventScope timer(
isolate, Logger::TimerEventScope::v8_recompile_synchronous);
- if (!isolate->optimizing_compiler_thread()->IsQueueAvailable()) {
- if (FLAG_trace_concurrent_recompilation) {
- PrintF(" ** Compilation queue full, will retry optimizing ");
- closure->PrintName();
- PrintF(" on next run.\n");
+ OptimizedCompileJob* job = new(info->zone()) OptimizedCompileJob(info);
+ OptimizedCompileJob::Status status = job->CreateGraph();
+ if (status != OptimizedCompileJob::SUCCEEDED) return false;
+ isolate->optimizing_compiler_thread()->QueueForOptimization(job);
+
+ if (FLAG_trace_concurrent_recompilation) {
+ PrintF(" ** Queued ");
+ info->closure()->PrintName();
+ if (info->is_osr()) {
+ PrintF(" for concurrent OSR at %d.\n", info->osr_ast_id().ToInt());
+ } else {
+ PrintF(" for concurrent optimization.\n");
}
- return false;
}
+ return true;
+}
- SmartPointer<CompilationInfo> info(new CompilationInfoWithZone(closure));
- Handle<SharedFunctionInfo> shared = info->shared_info();
- if (compiling_for_osr) {
- BailoutId osr_ast_id =
- shared->code()->TranslatePcOffsetToAstId(osr_pc_offset);
- ASSERT(!osr_ast_id.IsNone());
- info->SetOptimizing(osr_ast_id);
- info->set_osr_pc_offset(osr_pc_offset);
-
- if (FLAG_trace_osr) {
- PrintF("[COSR - attempt to queue ");
- closure->PrintName();
- PrintF(" at AST id %d]\n", osr_ast_id.ToInt());
- }
- } else {
- info->SetOptimizing(BailoutId::None());
+MaybeHandle<Code> Compiler::GetOptimizedCode(Handle<JSFunction> function,
+ Handle<Code> current_code,
+ ConcurrencyMode mode,
+ BailoutId osr_ast_id) {
+ Handle<Code> cached_code;
+ if (GetCodeFromOptimizedCodeMap(
+ function, osr_ast_id).ToHandle(&cached_code)) {
+ return cached_code;
}
+ SmartPointer<CompilationInfo> info(new CompilationInfoWithZone(function));
+ Isolate* isolate = info->isolate();
VMState<COMPILER> state(isolate);
+ ASSERT(!isolate->has_pending_exception());
PostponeInterruptsScope postpone(isolate);
+ Handle<SharedFunctionInfo> shared = info->shared_info();
+ ASSERT_NE(ScopeInfo::Empty(isolate), shared->scope_info());
int compiled_size = shared->end_position() - shared->start_position();
isolate->counters()->total_compile_size()->Increment(compiled_size);
+ current_code->set_profiler_ticks(0);
- {
- CompilationHandleScope handle_scope(*info);
+ info->SetOptimizing(osr_ast_id, current_code);
- if (!compiling_for_osr && InstallCodeFromOptimizedCodeMap(*info)) {
- return true;
+ if (mode == CONCURRENT) {
+ if (GetOptimizedCodeLater(info.get())) {
+ info.Detach(); // The background recompile job owns this now.
+ return isolate->builtins()->InOptimizationQueue();
}
+ } else {
+ if (GetOptimizedCodeNow(info.get())) return info->code();
+ }
- if (Parser::Parse(*info)) {
- LanguageMode language_mode = info->function()->language_mode();
- info->SetLanguageMode(language_mode);
- shared->set_language_mode(language_mode);
- info->SaveHandles();
-
- if (Rewriter::Rewrite(*info) && Scope::Analyze(*info)) {
- RecompileJob* job = new(info->zone()) RecompileJob(*info);
- RecompileJob::Status status = job->CreateGraph();
- if (status == RecompileJob::SUCCEEDED) {
- info.Detach();
- shared->code()->set_profiler_ticks(0);
- isolate->optimizing_compiler_thread()->QueueForOptimization(job);
- ASSERT(!isolate->has_pending_exception());
- return true;
- } else if (status == RecompileJob::BAILED_OUT) {
- isolate->clear_pending_exception();
- InstallFullCode(*info);
- }
- }
- }
+ // Failed.
+ if (FLAG_trace_opt) {
+ PrintF("[failed to optimize ");
+ function->PrintName();
+ PrintF(": %s]\n", GetBailoutReason(info->bailout_reason()));
}
if (isolate->has_pending_exception()) isolate->clear_pending_exception();
- return false;
+ return MaybeHandle<Code>();
}
-Handle<Code> Compiler::InstallOptimizedCode(RecompileJob* job) {
+Handle<Code> Compiler::GetConcurrentlyOptimizedCode(OptimizedCompileJob* job) {
+ // Take ownership of compilation info. Deleting compilation info
+ // also tears down the zone and the recompile job.
SmartPointer<CompilationInfo> info(job->info());
- // The function may have already been optimized by OSR. Simply continue.
- // Except when OSR already disabled optimization for some reason.
- if (info->shared_info()->optimization_disabled()) {
- info->AbortOptimization();
- InstallFullCode(*info);
- if (FLAG_trace_concurrent_recompilation) {
- PrintF(" ** aborting optimization for ");
- info->closure()->PrintName();
- PrintF(" as it has been disabled.\n");
- }
- ASSERT(!info->closure()->IsInRecompileQueue());
- return Handle<Code>::null();
- }
-
Isolate* isolate = info->isolate();
+
VMState<COMPILER> state(isolate);
Logger::TimerEventScope timer(
isolate, Logger::TimerEventScope::v8_recompile_synchronous);
- // If crankshaft succeeded, install the optimized code else install
- // the unoptimized code.
- RecompileJob::Status status = job->last_status();
- if (info->HasAbortedDueToDependencyChange()) {
- info->set_bailout_reason(kBailedOutDueToDependencyChange);
- status = job->AbortOptimization();
- } else if (status != RecompileJob::SUCCEEDED) {
- info->set_bailout_reason(kFailedBailedOutLastTime);
- status = job->AbortOptimization();
- } else if (isolate->DebuggerHasBreakPoints()) {
- info->set_bailout_reason(kDebuggerIsActive);
- status = job->AbortOptimization();
- } else {
- status = job->GenerateAndInstallCode();
- ASSERT(status == RecompileJob::SUCCEEDED ||
- status == RecompileJob::BAILED_OUT);
- }
- InstallCodeCommon(*info);
- if (status == RecompileJob::SUCCEEDED) {
- Handle<Code> code = info->code();
- ASSERT(info->shared_info()->scope_info() != ScopeInfo::Empty(isolate));
- info->closure()->ReplaceCode(*code);
- if (info->shared_info()->SearchOptimizedCodeMap(
- info->closure()->context()->native_context()) == -1) {
- InsertCodeIntoOptimizedCodeMap(*info);
- }
- if (FLAG_trace_concurrent_recompilation) {
- PrintF(" ** Optimized code for ");
- info->closure()->PrintName();
- PrintF(" installed.\n");
- }
- } else {
- info->AbortOptimization();
- InstallFullCode(*info);
+ Handle<SharedFunctionInfo> shared = info->shared_info();
+ shared->code()->set_profiler_ticks(0);
+
+ // 1) Optimization may have failed.
+ // 2) The function may have already been optimized by OSR. Simply continue.
+ // Except when OSR already disabled optimization for some reason.
+ // 3) The code may have already been invalidated due to dependency change.
+ // 4) Debugger may have been activated.
+
+ if (job->last_status() != OptimizedCompileJob::SUCCEEDED ||
+ shared->optimization_disabled() ||
+ info->HasAbortedDueToDependencyChange() ||
+ isolate->DebuggerHasBreakPoints()) {
+ return Handle<Code>::null();
}
- // Optimized code is finally replacing unoptimized code. Reset the latter's
- // profiler ticks to prevent too soon re-opt after a deopt.
- info->shared_info()->code()->set_profiler_ticks(0);
- ASSERT(!info->closure()->IsInRecompileQueue());
- return (status == RecompileJob::SUCCEEDED) ? info->code()
- : Handle<Code>::null();
-}
-
-
-Handle<SharedFunctionInfo> Compiler::BuildFunctionInfo(FunctionLiteral* literal,
- Handle<Script> script) {
- // Precondition: code has been parsed and scopes have been analyzed.
- CompilationInfoWithZone info(script);
- info.SetFunction(literal);
- info.SetScope(literal->scope());
- info.SetLanguageMode(literal->scope()->language_mode());
-
- Isolate* isolate = info.isolate();
- Factory* factory = isolate->factory();
- LiveEditFunctionTracker live_edit_tracker(isolate, literal);
- // Determine if the function can be lazily compiled. This is necessary to
- // allow some of our builtin JS files to be lazily compiled. These
- // builtins cannot be handled lazily by the parser, since we have to know
- // if a function uses the special natives syntax, which is something the
- // parser records.
- // If the debugger requests compilation for break points, we cannot be
- // aggressive about lazy compilation, because it might trigger compilation
- // of functions without an outer context when setting a breakpoint through
- // Debug::FindSharedFunctionInfoInScript.
- bool allow_lazy_without_ctx = literal->AllowsLazyCompilationWithoutContext();
- bool allow_lazy = literal->AllowsLazyCompilation() &&
- !DebuggerWantsEagerCompilation(&info, allow_lazy_without_ctx);
-
- Handle<ScopeInfo> scope_info(ScopeInfo::Empty(isolate));
- // Generate code
- if (FLAG_lazy && allow_lazy && !literal->is_parenthesized()) {
- Handle<Code> code = isolate->builtins()->LazyCompile();
- info.SetCode(code);
- } else if (GenerateCode(&info)) {
- ASSERT(!info.code().is_null());
- scope_info = ScopeInfo::Create(info.scope(), info.zone());
- } else {
- return Handle<SharedFunctionInfo>::null();
+ if (job->GenerateCode() != OptimizedCompileJob::SUCCEEDED) {
+ return Handle<Code>::null();
}
- // Create a shared function info object.
- Handle<SharedFunctionInfo> result =
- factory->NewSharedFunctionInfo(literal->name(),
- literal->materialized_literal_count(),
- literal->is_generator(),
- info.code(),
- scope_info);
- SetFunctionInfo(result, literal, false, script);
- RecordFunctionCompilation(Logger::FUNCTION_TAG, &info, result);
- result->set_allows_lazy_compilation(allow_lazy);
- result->set_allows_lazy_compilation_without_context(allow_lazy_without_ctx);
-
- // Set the expected number of properties for instances and return
- // the resulting function.
- SetExpectedNofPropertiesFromEstimate(result,
- literal->expected_property_count());
- live_edit_tracker.RecordFunctionInfo(result, literal, info.zone());
- return result;
-}
+ Compiler::RecordFunctionCompilation(
+ Logger::LAZY_COMPILE_TAG, info.get(), shared);
+ if (info->shared_info()->SearchOptimizedCodeMap(
+ info->context()->native_context(), info->osr_ast_id()) == -1) {
+ InsertCodeIntoOptimizedCodeMap(info.get());
+ }
+ if (FLAG_trace_concurrent_recompilation) {
+ PrintF(" ** Optimized code for ");
+ info->closure()->PrintName();
+ PrintF(" generated.\n");
+ }
-// Sets the function info on a function.
-// The start_position points to the first '(' character after the function name
-// in the full script source. When counting characters in the script source the
-// the first character is number 0 (not 1).
-void Compiler::SetFunctionInfo(Handle<SharedFunctionInfo> function_info,
- FunctionLiteral* lit,
- bool is_toplevel,
- Handle<Script> script) {
- function_info->set_length(lit->parameter_count());
- function_info->set_formal_parameter_count(lit->parameter_count());
- function_info->set_script(*script);
- function_info->set_function_token_position(lit->function_token_position());
- function_info->set_start_position(lit->start_position());
- function_info->set_end_position(lit->end_position());
- function_info->set_is_expression(lit->is_expression());
- function_info->set_is_anonymous(lit->is_anonymous());
- function_info->set_is_toplevel(is_toplevel);
- function_info->set_inferred_name(*lit->inferred_name());
- function_info->set_allows_lazy_compilation(lit->AllowsLazyCompilation());
- function_info->set_allows_lazy_compilation_without_context(
- lit->AllowsLazyCompilationWithoutContext());
- function_info->set_language_mode(lit->language_mode());
- function_info->set_uses_arguments(lit->scope()->arguments() != NULL);
- function_info->set_has_duplicate_parameters(lit->has_duplicate_parameters());
- function_info->set_ast_node_count(lit->ast_node_count());
- function_info->set_is_function(lit->is_function());
- function_info->set_dont_optimize_reason(lit->dont_optimize_reason());
- function_info->set_dont_inline(lit->flags()->Contains(kDontInline));
- function_info->set_dont_cache(lit->flags()->Contains(kDontCache));
- function_info->set_is_generator(lit->is_generator());
+ return Handle<Code>(*info->code());
}
@@ -1303,31 +1252,19 @@ void Compiler::RecordFunctionCompilation(Logger::LogEventsAndTags tag,
info->isolate()->cpu_profiler()->is_profiling()) {
Handle<Script> script = info->script();
Handle<Code> code = info->code();
- if (*code == info->isolate()->builtins()->builtin(Builtins::kLazyCompile))
+ if (code.is_identical_to(
+ info->isolate()->builtins()->CompileUnoptimized())) {
return;
- int line_num = GetScriptLineNumber(script, shared->start_position()) + 1;
- int column_num =
- GetScriptColumnNumber(script, shared->start_position()) + 1;
- USE(line_num);
- if (script->name()->IsString()) {
- PROFILE(info->isolate(),
- CodeCreateEvent(Logger::ToNativeByScript(tag, *script),
- *code,
- *shared,
- info,
- String::cast(script->name()),
- line_num,
- column_num));
- } else {
- PROFILE(info->isolate(),
- CodeCreateEvent(Logger::ToNativeByScript(tag, *script),
- *code,
- *shared,
- info,
- info->isolate()->heap()->empty_string(),
- line_num,
- column_num));
}
+ int line_num = Script::GetLineNumber(script, shared->start_position()) + 1;
+ int column_num =
+ Script::GetColumnNumber(script, shared->start_position()) + 1;
+ String* script_name = script->name()->IsString()
+ ? String::cast(script->name())
+ : info->isolate()->heap()->empty_string();
+ Logger::LogEventsAndTags log_tag = Logger::ToNativeByScript(tag, *script);
+ PROFILE(info->isolate(), CodeCreateEvent(
+ log_tag, *code, *shared, info, script_name, line_num, column_num));
}
GDBJIT(AddCode(Handle<String>(shared->DebugName()),
diff --git a/chromium/v8/src/compiler.h b/chromium/v8/src/compiler.h
index 080907e390c..6531474a17d 100644
--- a/chromium/v8/src/compiler.h
+++ b/chromium/v8/src/compiler.h
@@ -1,41 +1,18 @@
// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
#ifndef V8_COMPILER_H_
#define V8_COMPILER_H_
-#include "allocation.h"
-#include "ast.h"
-#include "zone.h"
+#include "src/allocation.h"
+#include "src/ast.h"
+#include "src/zone.h"
namespace v8 {
namespace internal {
-class ScriptDataImpl;
+class ScriptData;
class HydrogenCodeStub;
// ParseRestriction is used to restrict the set of valid statements in a
@@ -45,6 +22,12 @@ enum ParseRestriction {
ONLY_SINGLE_FUNCTION_LITERAL // Only a single FunctionLiteral expression.
};
+enum CachedDataMode {
+ NO_CACHED_DATA,
+ CONSUME_CACHED_DATA,
+ PRODUCE_CACHED_DATA
+};
+
struct OffsetRange {
OffsetRange(int from, int to) : from(from), to(to) {}
int from;
@@ -66,12 +49,7 @@ class CompilationInfo {
bool is_lazy() const { return IsLazy::decode(flags_); }
bool is_eval() const { return IsEval::decode(flags_); }
bool is_global() const { return IsGlobal::decode(flags_); }
- bool is_classic_mode() const { return language_mode() == CLASSIC_MODE; }
- bool is_extended_mode() const { return language_mode() == EXTENDED_MODE; }
- LanguageMode language_mode() const {
- return LanguageModeField::decode(flags_);
- }
- bool is_in_loop() const { return IsInLoop::decode(flags_); }
+ StrictMode strict_mode() const { return StrictModeField::decode(flags_); }
FunctionLiteral* function() const { return function_; }
Scope* scope() const { return scope_; }
Scope* global_scope() const { return global_scope_; }
@@ -81,10 +59,13 @@ class CompilationInfo {
Handle<Script> script() const { return script_; }
HydrogenCodeStub* code_stub() const {return code_stub_; }
v8::Extension* extension() const { return extension_; }
- ScriptDataImpl* pre_parse_data() const { return pre_parse_data_; }
+ ScriptData** cached_data() const { return cached_data_; }
+ CachedDataMode cached_data_mode() const {
+ return cached_data_mode_;
+ }
Handle<Context> context() const { return context_; }
BailoutId osr_ast_id() const { return osr_ast_id_; }
- uint32_t osr_pc_offset() const { return osr_pc_offset_; }
+ Handle<Code> unoptimized_code() const { return unoptimized_code_; }
int opt_count() const { return opt_count_; }
int num_parameters() const;
int num_heap_slots() const;
@@ -102,15 +83,16 @@ class CompilationInfo {
ASSERT(IsStub());
parameter_count_ = parameter_count;
}
- void SetLanguageMode(LanguageMode language_mode) {
- ASSERT(this->language_mode() == CLASSIC_MODE ||
- this->language_mode() == language_mode ||
- language_mode == EXTENDED_MODE);
- flags_ = LanguageModeField::update(flags_, language_mode);
+
+ void set_this_has_uses(bool has_no_uses) {
+ this_has_uses_ = has_no_uses;
+ }
+ bool this_has_uses() {
+ return this_has_uses_;
}
- void MarkAsInLoop() {
- ASSERT(is_lazy());
- flags_ |= IsInLoop::encode(true);
+ void SetStrictMode(StrictMode strict_mode) {
+ ASSERT(this->strict_mode() == SLOPPY || this->strict_mode() == strict_mode);
+ flags_ = StrictModeField::update(flags_, strict_mode);
}
void MarkAsNative() {
flags_ |= IsNative::encode(true);
@@ -156,6 +138,26 @@ class CompilationInfo {
return RequiresFrame::decode(flags_);
}
+ void MarkMustNotHaveEagerFrame() {
+ flags_ |= MustNotHaveEagerFrame::encode(true);
+ }
+
+ bool GetMustNotHaveEagerFrame() const {
+ return MustNotHaveEagerFrame::decode(flags_);
+ }
+
+ void MarkAsDebug() {
+ flags_ |= IsDebug::encode(true);
+ }
+
+ bool is_debug() const {
+ return IsDebug::decode(flags_);
+ }
+
+ bool IsCodePreAgingActive() const {
+ return FLAG_optimize_for_size && FLAG_age_code && !is_debug();
+ }
+
void SetParseRestriction(ParseRestriction restriction) {
flags_ = ParseRestricitonField::update(flags_, restriction);
}
@@ -168,39 +170,42 @@ class CompilationInfo {
ASSERT(function_ == NULL);
function_ = literal;
}
- void SetScope(Scope* scope) {
- ASSERT(scope_ == NULL);
- scope_ = scope;
- }
+ void PrepareForCompilation(Scope* scope);
void SetGlobalScope(Scope* global_scope) {
ASSERT(global_scope_ == NULL);
global_scope_ = global_scope;
}
+ Handle<FixedArray> feedback_vector() const {
+ return feedback_vector_;
+ }
void SetCode(Handle<Code> code) { code_ = code; }
void SetExtension(v8::Extension* extension) {
ASSERT(!is_lazy());
extension_ = extension;
}
- void SetPreParseData(ScriptDataImpl* pre_parse_data) {
- ASSERT(!is_lazy());
- pre_parse_data_ = pre_parse_data;
+ void SetCachedData(ScriptData** cached_data,
+ CachedDataMode cached_data_mode) {
+ cached_data_mode_ = cached_data_mode;
+ if (cached_data_mode == NO_CACHED_DATA) {
+ cached_data_ = NULL;
+ } else {
+ ASSERT(!is_lazy());
+ cached_data_ = cached_data;
+ }
}
void SetContext(Handle<Context> context) {
context_ = context;
}
- void MarkCompilingForDebugging(Handle<Code> current_code) {
- ASSERT(mode_ != OPTIMIZE);
- ASSERT(current_code->kind() == Code::FUNCTION);
+
+ void MarkCompilingForDebugging() {
flags_ |= IsCompilingForDebugging::encode(true);
- if (current_code->is_compiled_optimizable()) {
- EnableDeoptimizationSupport();
- } else {
- mode_ = CompilationInfo::NONOPT;
- }
}
bool IsCompilingForDebugging() {
return IsCompilingForDebugging::decode(flags_);
}
+ void MarkNonOptimizable() {
+ SetMode(CompilationInfo::NONOPT);
+ }
bool ShouldTrapOnDeopt() const {
return (FLAG_trap_on_deopt && IsOptimizing()) ||
@@ -220,9 +225,12 @@ class CompilationInfo {
bool IsOptimizing() const { return mode_ == OPTIMIZE; }
bool IsOptimizable() const { return mode_ == BASE; }
bool IsStub() const { return mode_ == STUB; }
- void SetOptimizing(BailoutId osr_ast_id) {
+ void SetOptimizing(BailoutId osr_ast_id, Handle<Code> unoptimized) {
+ ASSERT(!shared_info_.is_null());
SetMode(OPTIMIZE);
osr_ast_id_ = osr_ast_id;
+ unoptimized_code_ = unoptimized;
+ optimization_id_ = isolate()->NextOptimizationId();
}
void DisableOptimization();
@@ -238,11 +246,6 @@ class CompilationInfo {
// Determines whether or not to insert a self-optimization header.
bool ShouldSelfOptimize();
- // Reset code to the unoptimized version when optimization is aborted.
- void AbortOptimization() {
- SetCode(handle(shared_info()->code()));
- }
-
void set_deferred_handles(DeferredHandles* deferred_handles) {
ASSERT(deferred_handles_ == NULL);
deferred_handles_ = deferred_handles;
@@ -265,6 +268,7 @@ class CompilationInfo {
SaveHandle(&shared_info_);
SaveHandle(&context_);
SaveHandle(&script_);
+ SaveHandle(&unoptimized_code_);
}
BailoutReason bailout_reason() const { return bailout_reason_; }
@@ -311,13 +315,11 @@ class CompilationInfo {
return abort_due_to_dependency_;
}
- void set_osr_pc_offset(uint32_t pc_offset) {
- osr_pc_offset_ = pc_offset;
+ bool HasSameOsrEntry(Handle<JSFunction> function, BailoutId osr_ast_id) {
+ return osr_ast_id_ == osr_ast_id && function.is_identical_to(closure_);
}
- bool HasSameOsrEntry(Handle<JSFunction> function, uint32_t pc_offset) {
- return osr_pc_offset_ == pc_offset && function.is_identical_to(closure_);
- }
+ int optimization_id() const { return optimization_id_; }
protected:
CompilationInfo(Handle<Script> script,
@@ -358,29 +360,31 @@ class CompilationInfo {
// Flags that can be set for eager compilation.
class IsEval: public BitField<bool, 1, 1> {};
class IsGlobal: public BitField<bool, 2, 1> {};
- // Flags that can be set for lazy compilation.
- class IsInLoop: public BitField<bool, 3, 1> {};
+ // If the function is being compiled for the debugger.
+ class IsDebug: public BitField<bool, 3, 1> {};
// Strict mode - used in eager compilation.
- class LanguageModeField: public BitField<LanguageMode, 4, 2> {};
+ class StrictModeField: public BitField<StrictMode, 4, 1> {};
// Is this a function from our natives.
- class IsNative: public BitField<bool, 6, 1> {};
+ class IsNative: public BitField<bool, 5, 1> {};
// Is this code being compiled with support for deoptimization..
- class SupportsDeoptimization: public BitField<bool, 7, 1> {};
+ class SupportsDeoptimization: public BitField<bool, 6, 1> {};
// If compiling for debugging produce just full code matching the
// initial mode setting.
- class IsCompilingForDebugging: public BitField<bool, 8, 1> {};
+ class IsCompilingForDebugging: public BitField<bool, 7, 1> {};
// If the compiled code contains calls that require building a frame
- class IsCalling: public BitField<bool, 9, 1> {};
+ class IsCalling: public BitField<bool, 8, 1> {};
// If the compiled code contains calls that require building a frame
- class IsDeferredCalling: public BitField<bool, 10, 1> {};
+ class IsDeferredCalling: public BitField<bool, 9, 1> {};
// If the compiled code contains calls that require building a frame
- class IsNonDeferredCalling: public BitField<bool, 11, 1> {};
+ class IsNonDeferredCalling: public BitField<bool, 10, 1> {};
// If the compiled code saves double caller registers that it clobbers.
- class SavesCallerDoubles: public BitField<bool, 12, 1> {};
+ class SavesCallerDoubles: public BitField<bool, 11, 1> {};
// If the set of valid statements is restricted.
- class ParseRestricitonField: public BitField<ParseRestriction, 13, 1> {};
+ class ParseRestricitonField: public BitField<ParseRestriction, 12, 1> {};
// If the function requires a frame (for unspecified reasons)
- class RequiresFrame: public BitField<bool, 14, 1> {};
+ class RequiresFrame: public BitField<bool, 13, 1> {};
+ // If the function cannot build a frame (for unspecified reasons)
+ class MustNotHaveEagerFrame: public BitField<bool, 14, 1> {};
unsigned flags_;
@@ -404,18 +408,23 @@ class CompilationInfo {
// Fields possibly needed for eager compilation, NULL by default.
v8::Extension* extension_;
- ScriptDataImpl* pre_parse_data_;
+ ScriptData** cached_data_;
+ CachedDataMode cached_data_mode_;
// The context of the caller for eval code, and the global context for a
// global script. Will be a null handle otherwise.
Handle<Context> context_;
+ // Used by codegen, ultimately kept rooted by the SharedFunctionInfo.
+ Handle<FixedArray> feedback_vector_;
+
// Compilation mode flag and whether deoptimization is allowed.
Mode mode_;
BailoutId osr_ast_id_;
- // The pc_offset corresponding to osr_ast_id_ in unoptimized code.
- // We can look this up in the back edge table, but cache it for quick access.
- uint32_t osr_pc_offset_;
+ // The unoptimized code we patched for OSR may not be the shared code
+ // afterwards, since we may need to compile it again to include deoptimization
+ // data. Keep track which code we patched.
+ Handle<Code> unoptimized_code_;
// Flag whether compilation needs to be aborted due to dependency change.
bool abort_due_to_dependency_;
@@ -449,8 +458,12 @@ class CompilationInfo {
// Number of parameters used for compilation of stubs that require arguments.
int parameter_count_;
+ bool this_has_uses_;
+
Handle<Foreign> object_wrapper_;
+ int optimization_id_;
+
DISALLOW_COPY_AND_ASSIGN(CompilationInfo);
};
@@ -511,9 +524,9 @@ class LChunk;
// fail, bail-out to the full code generator or succeed. Apart from
// their return value, the status of the phase last run can be checked
// using last_status().
-class RecompileJob: public ZoneObject {
+class OptimizedCompileJob: public ZoneObject {
public:
- explicit RecompileJob(CompilationInfo* info)
+ explicit OptimizedCompileJob(CompilationInfo* info)
: info_(info),
graph_builder_(NULL),
graph_(NULL),
@@ -527,14 +540,23 @@ class RecompileJob: public ZoneObject {
MUST_USE_RESULT Status CreateGraph();
MUST_USE_RESULT Status OptimizeGraph();
- MUST_USE_RESULT Status GenerateAndInstallCode();
+ MUST_USE_RESULT Status GenerateCode();
Status last_status() const { return last_status_; }
CompilationInfo* info() const { return info_; }
Isolate* isolate() const { return info()->isolate(); }
- MUST_USE_RESULT Status AbortOptimization() {
- info_->AbortOptimization();
+ MUST_USE_RESULT Status AbortOptimization(
+ BailoutReason reason = kNoReason) {
+ if (reason != kNoReason) info_->set_bailout_reason(reason);
+ return SetLastStatus(BAILED_OUT);
+ }
+
+ MUST_USE_RESULT Status AbortAndDisableOptimization(
+ BailoutReason reason = kNoReason) {
+ if (reason != kNoReason) info_->set_bailout_reason(reason);
+ // Reference to shared function info does not change between phases.
+ AllowDeferredHandleDereference allow_handle_dereference;
info_->shared_info()->DisableOptimization(info_->bailout_reason());
return SetLastStatus(BAILED_OUT);
}
@@ -564,7 +586,7 @@ class RecompileJob: public ZoneObject {
void RecordOptimizationStats();
struct Timer {
- Timer(RecompileJob* job, TimeDelta* location)
+ Timer(OptimizedCompileJob* job, TimeDelta* location)
: job_(job), location_(location) {
ASSERT(location_ != NULL);
timer_.Start();
@@ -574,7 +596,7 @@ class RecompileJob: public ZoneObject {
*location_ += timer_.Elapsed();
}
- RecompileJob* job_;
+ OptimizedCompileJob* job_;
ElapsedTimer timer_;
TimeDelta* location_;
};
@@ -594,56 +616,56 @@ class RecompileJob: public ZoneObject {
class Compiler : public AllStatic {
public:
- // Call count before primitive functions trigger their own optimization.
- static const int kCallsUntilPrimitiveOpt = 200;
-
- // All routines return a SharedFunctionInfo.
- // If an error occurs an exception is raised and the return handle
- // contains NULL.
+ MUST_USE_RESULT static MaybeHandle<Code> GetUnoptimizedCode(
+ Handle<JSFunction> function);
+ MUST_USE_RESULT static MaybeHandle<Code> GetUnoptimizedCode(
+ Handle<SharedFunctionInfo> shared);
+ static bool EnsureCompiled(Handle<JSFunction> function,
+ ClearExceptionFlag flag);
+ MUST_USE_RESULT static MaybeHandle<Code> GetCodeForDebugging(
+ Handle<JSFunction> function);
+
+ static void CompileForLiveEdit(Handle<Script> script);
+
+ // Compile a String source within a context for eval.
+ MUST_USE_RESULT static MaybeHandle<JSFunction> GetFunctionFromEval(
+ Handle<String> source,
+ Handle<Context> context,
+ StrictMode strict_mode,
+ ParseRestriction restriction,
+ int scope_position);
// Compile a String source within a context.
- static Handle<SharedFunctionInfo> Compile(Handle<String> source,
- Handle<Object> script_name,
- int line_offset,
- int column_offset,
- bool is_shared_cross_origin,
- Handle<Context> context,
- v8::Extension* extension,
- ScriptDataImpl* pre_data,
- Handle<Object> script_data,
- NativesFlag is_natives_code);
-
- // Compile a String source within a context for Eval.
- static Handle<SharedFunctionInfo> CompileEval(Handle<String> source,
- Handle<Context> context,
- bool is_global,
- LanguageMode language_mode,
- ParseRestriction restriction,
- int scope_position);
-
- // Compile from function info (used for lazy compilation). Returns true on
- // success and false if the compilation resulted in a stack overflow.
- static bool CompileLazy(CompilationInfo* info);
-
- static bool RecompileConcurrent(Handle<JSFunction> function,
- uint32_t osr_pc_offset = 0);
-
- // Compile a shared function info object (the function is possibly lazily
- // compiled).
+ static Handle<SharedFunctionInfo> CompileScript(
+ Handle<String> source,
+ Handle<Object> script_name,
+ int line_offset,
+ int column_offset,
+ bool is_shared_cross_origin,
+ Handle<Context> context,
+ v8::Extension* extension,
+ ScriptData** cached_data,
+ CachedDataMode cached_data_mode,
+ NativesFlag is_natives_code);
+
+ // Create a shared function info object (the code may be lazily compiled).
static Handle<SharedFunctionInfo> BuildFunctionInfo(FunctionLiteral* node,
Handle<Script> script);
- // Set the function info for a newly compiled function.
- static void SetFunctionInfo(Handle<SharedFunctionInfo> function_info,
- FunctionLiteral* lit,
- bool is_toplevel,
- Handle<Script> script);
+ enum ConcurrencyMode { NOT_CONCURRENT, CONCURRENT };
- static Handle<Code> InstallOptimizedCode(RecompileJob* job);
+ // Generate and return optimized code or start a concurrent optimization job.
+ // In the latter case, return the InOptimizationQueue builtin. On failure,
+ // return the empty handle.
+ MUST_USE_RESULT static MaybeHandle<Code> GetOptimizedCode(
+ Handle<JSFunction> function,
+ Handle<Code> current_code,
+ ConcurrencyMode mode,
+ BailoutId osr_ast_id = BailoutId::None());
-#ifdef ENABLE_DEBUGGER_SUPPORT
- static bool MakeCodeForLiveEdit(CompilationInfo* info);
-#endif
+ // Generate and return code from previously queued optimization job.
+ // On failure, return the empty handle.
+ static Handle<Code> GetConcurrentlyOptimizedCode(OptimizedCompileJob* job);
static void RecordFunctionCompilation(Logger::LogEventsAndTags tag,
CompilationInfo* info,
diff --git a/chromium/v8/src/contexts.cc b/chromium/v8/src/contexts.cc
index 710d30aa8ec..cb5e852d7d6 100644
--- a/chromium/v8/src/contexts.cc
+++ b/chromium/v8/src/contexts.cc
@@ -1,35 +1,12 @@
// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#include "bootstrapper.h"
-#include "debug.h"
-#include "scopeinfo.h"
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#include "src/bootstrapper.h"
+#include "src/debug.h"
+#include "src/scopeinfo.h"
namespace v8 {
namespace internal {
@@ -131,9 +108,9 @@ Handle<Object> Context::Lookup(Handle<String> name,
// to only do a local lookup for context extension objects.
if ((flags & FOLLOW_PROTOTYPE_CHAIN) == 0 ||
object->IsJSContextExtensionObject()) {
- *attributes = object->GetLocalPropertyAttribute(*name);
+ *attributes = JSReceiver::GetOwnPropertyAttributes(object, name);
} else {
- *attributes = object->GetPropertyAttribute(*name);
+ *attributes = JSReceiver::GetPropertyAttributes(object, name);
}
if (isolate->has_pending_exception()) return Handle<Object>();
@@ -160,7 +137,8 @@ Handle<Object> Context::Lookup(Handle<String> name,
}
VariableMode mode;
InitializationFlag init_flag;
- int slot_index = scope_info->ContextSlotIndex(*name, &mode, &init_flag);
+ int slot_index =
+ ScopeInfo::ContextSlotIndex(scope_info, name, &mode, &init_flag);
ASSERT(slot_index < 0 || slot_index >= MIN_CONTEXT_SLOTS);
if (slot_index >= 0) {
if (FLAG_trace_contexts) {
@@ -185,12 +163,12 @@ Handle<Object> Context::Lookup(Handle<String> name,
*binding_flags = (init_flag == kNeedsInitialization)
? MUTABLE_CHECK_INITIALIZED : MUTABLE_IS_INITIALIZED;
break;
- case CONST:
+ case CONST_LEGACY:
*attributes = READ_ONLY;
*binding_flags = (init_flag == kNeedsInitialization)
? IMMUTABLE_CHECK_INITIALIZED : IMMUTABLE_IS_INITIALIZED;
break;
- case CONST_HARMONY:
+ case CONST:
*attributes = READ_ONLY;
*binding_flags = (init_flag == kNeedsInitialization)
? IMMUTABLE_CHECK_INITIALIZED_HARMONY :
@@ -222,8 +200,8 @@ Handle<Object> Context::Lookup(Handle<String> name,
}
*index = function_index;
*attributes = READ_ONLY;
- ASSERT(mode == CONST || mode == CONST_HARMONY);
- *binding_flags = (mode == CONST)
+ ASSERT(mode == CONST_LEGACY || mode == CONST);
+ *binding_flags = (mode == CONST_LEGACY)
? IMMUTABLE_IS_INITIALIZED : IMMUTABLE_IS_INITIALIZED_HARMONY;
return context;
}
@@ -231,7 +209,7 @@ Handle<Object> Context::Lookup(Handle<String> name,
} else if (context->IsCatchContext()) {
// Catch contexts have the variable name in the extension slot.
- if (name->Equals(String::cast(context->extension()))) {
+ if (String::Equals(name, handle(String::cast(context->extension())))) {
if (FLAG_trace_contexts) {
PrintF("=> found in catch context\n");
}
@@ -365,11 +343,11 @@ Object* Context::DeoptimizedCodeListHead() {
Handle<Object> Context::ErrorMessageForCodeGenerationFromStrings() {
- Handle<Object> result(error_message_for_code_gen_from_strings(),
- GetIsolate());
+ Isolate* isolate = GetIsolate();
+ Handle<Object> result(error_message_for_code_gen_from_strings(), isolate);
if (!result->IsUndefined()) return result;
- return GetIsolate()->factory()->NewStringFromAscii(i::CStrVector(
- "Code generation from strings disallowed for this context"));
+ return isolate->factory()->NewStringFromStaticAscii(
+ "Code generation from strings disallowed for this context");
}
diff --git a/chromium/v8/src/contexts.h b/chromium/v8/src/contexts.h
index 1b857c30207..1ee5a6f8913 100644
--- a/chromium/v8/src/contexts.h
+++ b/chromium/v8/src/contexts.h
@@ -1,35 +1,12 @@
// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
#ifndef V8_CONTEXTS_H_
#define V8_CONTEXTS_H_
-#include "heap.h"
-#include "objects.h"
+#include "src/heap.h"
+#include "src/objects.h"
namespace v8 {
namespace internal {
@@ -131,21 +108,36 @@ enum BindingFlags {
V(INT16_ARRAY_FUN_INDEX, JSFunction, int16_array_fun) \
V(UINT32_ARRAY_FUN_INDEX, JSFunction, uint32_array_fun) \
V(INT32_ARRAY_FUN_INDEX, JSFunction, int32_array_fun) \
- V(FLOAT_ARRAY_FUN_INDEX, JSFunction, float_array_fun) \
- V(DOUBLE_ARRAY_FUN_INDEX, JSFunction, double_array_fun) \
- V(UINT8C_ARRAY_FUN_INDEX, JSFunction, uint8c_array_fun) \
+ V(FLOAT32_ARRAY_FUN_INDEX, JSFunction, float32_array_fun) \
+ V(FLOAT64_ARRAY_FUN_INDEX, JSFunction, float64_array_fun) \
+ V(UINT8_CLAMPED_ARRAY_FUN_INDEX, JSFunction, uint8_clamped_array_fun) \
+ V(INT8_ARRAY_EXTERNAL_MAP_INDEX, Map, int8_array_external_map) \
+ V(UINT8_ARRAY_EXTERNAL_MAP_INDEX, Map, uint8_array_external_map) \
+ V(INT16_ARRAY_EXTERNAL_MAP_INDEX, Map, int16_array_external_map) \
+ V(UINT16_ARRAY_EXTERNAL_MAP_INDEX, Map, uint16_array_external_map) \
+ V(INT32_ARRAY_EXTERNAL_MAP_INDEX, Map, int32_array_external_map) \
+ V(UINT32_ARRAY_EXTERNAL_MAP_INDEX, Map, uint32_array_external_map) \
+ V(FLOAT32_ARRAY_EXTERNAL_MAP_INDEX, Map, float32_array_external_map) \
+ V(FLOAT64_ARRAY_EXTERNAL_MAP_INDEX, Map, float64_array_external_map) \
+ V(UINT8_CLAMPED_ARRAY_EXTERNAL_MAP_INDEX, Map, \
+ uint8_clamped_array_external_map) \
V(DATA_VIEW_FUN_INDEX, JSFunction, data_view_fun) \
- V(FUNCTION_MAP_INDEX, Map, function_map) \
- V(STRICT_MODE_FUNCTION_MAP_INDEX, Map, strict_mode_function_map) \
- V(FUNCTION_WITHOUT_PROTOTYPE_MAP_INDEX, Map, function_without_prototype_map) \
- V(STRICT_MODE_FUNCTION_WITHOUT_PROTOTYPE_MAP_INDEX, Map, \
- strict_mode_function_without_prototype_map) \
+ V(SLOPPY_FUNCTION_MAP_INDEX, Map, sloppy_function_map) \
+ V(SLOPPY_FUNCTION_WITH_READONLY_PROTOTYPE_MAP_INDEX, Map, \
+ sloppy_function_with_readonly_prototype_map) \
+ V(STRICT_FUNCTION_MAP_INDEX, Map, strict_function_map) \
+ V(SLOPPY_FUNCTION_WITHOUT_PROTOTYPE_MAP_INDEX, Map, \
+ sloppy_function_without_prototype_map) \
+ V(STRICT_FUNCTION_WITHOUT_PROTOTYPE_MAP_INDEX, Map, \
+ strict_function_without_prototype_map) \
+ V(BOUND_FUNCTION_MAP_INDEX, Map, bound_function_map) \
V(REGEXP_RESULT_MAP_INDEX, Map, regexp_result_map)\
- V(ARGUMENTS_BOILERPLATE_INDEX, JSObject, arguments_boilerplate) \
+ V(SLOPPY_ARGUMENTS_BOILERPLATE_INDEX, JSObject, \
+ sloppy_arguments_boilerplate) \
V(ALIASED_ARGUMENTS_BOILERPLATE_INDEX, JSObject, \
aliased_arguments_boilerplate) \
- V(STRICT_MODE_ARGUMENTS_BOILERPLATE_INDEX, JSObject, \
- strict_mode_arguments_boilerplate) \
+ V(STRICT_ARGUMENTS_BOILERPLATE_INDEX, JSObject, \
+ strict_arguments_boilerplate) \
V(MESSAGE_LISTENERS_INDEX, JSObject, message_listeners) \
V(MAKE_MESSAGE_FUN_INDEX, JSFunction, make_message_fun) \
V(GET_STACK_TRACE_LINE_INDEX, JSFunction, get_stack_trace_line_fun) \
@@ -160,13 +152,18 @@ enum BindingFlags {
V(SCRIPT_FUNCTION_INDEX, JSFunction, script_function) \
V(OPAQUE_REFERENCE_FUNCTION_INDEX, JSFunction, opaque_reference_function) \
V(CONTEXT_EXTENSION_FUNCTION_INDEX, JSFunction, context_extension_function) \
- V(OUT_OF_MEMORY_INDEX, Object, out_of_memory) \
V(MAP_CACHE_INDEX, Object, map_cache) \
V(EMBEDDER_DATA_INDEX, FixedArray, embedder_data) \
V(ALLOW_CODE_GEN_FROM_STRINGS_INDEX, Object, allow_code_gen_from_strings) \
V(ERROR_MESSAGE_FOR_CODE_GEN_FROM_STRINGS_INDEX, Object, \
error_message_for_code_gen_from_strings) \
- V(RUN_MICROTASKS_INDEX, JSFunction, run_microtasks) \
+ V(IS_PROMISE_INDEX, JSFunction, is_promise) \
+ V(PROMISE_CREATE_INDEX, JSFunction, promise_create) \
+ V(PROMISE_RESOLVE_INDEX, JSFunction, promise_resolve) \
+ V(PROMISE_REJECT_INDEX, JSFunction, promise_reject) \
+ V(PROMISE_CHAIN_INDEX, JSFunction, promise_chain) \
+ V(PROMISE_CATCH_INDEX, JSFunction, promise_catch) \
+ V(PROMISE_THEN_INDEX, JSFunction, promise_then) \
V(TO_COMPLETE_PROPERTY_DESCRIPTOR_INDEX, JSFunction, \
to_complete_property_descriptor) \
V(DERIVED_HAS_TRAP_INDEX, JSFunction, derived_has_trap) \
@@ -179,12 +176,20 @@ enum BindingFlags {
observers_begin_perform_splice) \
V(OBSERVERS_END_SPLICE_INDEX, JSFunction, \
observers_end_perform_splice) \
- V(GENERATOR_FUNCTION_MAP_INDEX, Map, generator_function_map) \
- V(STRICT_MODE_GENERATOR_FUNCTION_MAP_INDEX, Map, \
- strict_mode_generator_function_map) \
+ V(NATIVE_OBJECT_OBSERVE_INDEX, JSFunction, \
+ native_object_observe) \
+ V(NATIVE_OBJECT_GET_NOTIFIER_INDEX, JSFunction, \
+ native_object_get_notifier) \
+ V(NATIVE_OBJECT_NOTIFIER_PERFORM_CHANGE, JSFunction, \
+ native_object_notifier_perform_change) \
+ V(SLOPPY_GENERATOR_FUNCTION_MAP_INDEX, Map, sloppy_generator_function_map) \
+ V(STRICT_GENERATOR_FUNCTION_MAP_INDEX, Map, strict_generator_function_map) \
V(GENERATOR_OBJECT_PROTOTYPE_MAP_INDEX, Map, \
generator_object_prototype_map) \
- V(GENERATOR_RESULT_MAP_INDEX, Map, generator_result_map)
+ V(ITERATOR_RESULT_MAP_INDEX, Map, iterator_result_map) \
+ V(MAP_ITERATOR_MAP_INDEX, Map, map_iterator_map) \
+ V(SET_ITERATOR_MAP_INDEX, Map, set_iterator_map) \
+ V(ITERATOR_SYMBOL_INDEX, Symbol, iterator_symbol)
// JSFunctions are pairs (context, function code), sometimes also called
// closures. A Context object is used to represent function contexts and
@@ -225,8 +230,11 @@ enum BindingFlags {
// In addition, function contexts may have statically allocated context slots
// to store local variables/functions that are accessed from inner functions
// (via static context addresses) or through 'eval' (dynamic context lookups).
-// Finally, the native context contains additional slots for fast access to
-// native properties.
+// The native context contains additional slots for fast access to native
+// properties.
+//
+// Finally, with Harmony scoping, the JSFunction representing a top level
+// script will have the GlobalContext rather than a FunctionContext.
class Context: public FixedArray {
public:
@@ -255,14 +263,16 @@ class Context: public FixedArray {
// These slots are only in native contexts.
GLOBAL_PROXY_INDEX = MIN_CONTEXT_SLOTS,
SECURITY_TOKEN_INDEX,
- ARGUMENTS_BOILERPLATE_INDEX,
+ SLOPPY_ARGUMENTS_BOILERPLATE_INDEX,
ALIASED_ARGUMENTS_BOILERPLATE_INDEX,
- STRICT_MODE_ARGUMENTS_BOILERPLATE_INDEX,
+ STRICT_ARGUMENTS_BOILERPLATE_INDEX,
REGEXP_RESULT_MAP_INDEX,
- FUNCTION_MAP_INDEX,
- STRICT_MODE_FUNCTION_MAP_INDEX,
- FUNCTION_WITHOUT_PROTOTYPE_MAP_INDEX,
- STRICT_MODE_FUNCTION_WITHOUT_PROTOTYPE_MAP_INDEX,
+ SLOPPY_FUNCTION_MAP_INDEX,
+ SLOPPY_FUNCTION_WITH_READONLY_PROTOTYPE_MAP_INDEX,
+ STRICT_FUNCTION_MAP_INDEX,
+ SLOPPY_FUNCTION_WITHOUT_PROTOTYPE_MAP_INDEX,
+ STRICT_FUNCTION_WITHOUT_PROTOTYPE_MAP_INDEX,
+ BOUND_FUNCTION_MAP_INDEX,
INITIAL_OBJECT_PROTOTYPE_INDEX,
INITIAL_ARRAY_PROTOTYPE_INDEX,
BOOLEAN_FUNCTION_INDEX,
@@ -296,9 +306,18 @@ class Context: public FixedArray {
INT16_ARRAY_FUN_INDEX,
UINT32_ARRAY_FUN_INDEX,
INT32_ARRAY_FUN_INDEX,
- FLOAT_ARRAY_FUN_INDEX,
- DOUBLE_ARRAY_FUN_INDEX,
- UINT8C_ARRAY_FUN_INDEX,
+ FLOAT32_ARRAY_FUN_INDEX,
+ FLOAT64_ARRAY_FUN_INDEX,
+ UINT8_CLAMPED_ARRAY_FUN_INDEX,
+ INT8_ARRAY_EXTERNAL_MAP_INDEX,
+ UINT8_ARRAY_EXTERNAL_MAP_INDEX,
+ INT16_ARRAY_EXTERNAL_MAP_INDEX,
+ UINT16_ARRAY_EXTERNAL_MAP_INDEX,
+ INT32_ARRAY_EXTERNAL_MAP_INDEX,
+ UINT32_ARRAY_EXTERNAL_MAP_INDEX,
+ FLOAT32_ARRAY_EXTERNAL_MAP_INDEX,
+ FLOAT64_ARRAY_EXTERNAL_MAP_INDEX,
+ UINT8_CLAMPED_ARRAY_EXTERNAL_MAP_INDEX,
DATA_VIEW_FUN_INDEX,
MESSAGE_LISTENERS_INDEX,
MAKE_MESSAGE_FUN_INDEX,
@@ -318,6 +337,14 @@ class Context: public FixedArray {
ALLOW_CODE_GEN_FROM_STRINGS_INDEX,
ERROR_MESSAGE_FOR_CODE_GEN_FROM_STRINGS_INDEX,
RUN_MICROTASKS_INDEX,
+ ENQUEUE_MICROTASK_INDEX,
+ IS_PROMISE_INDEX,
+ PROMISE_CREATE_INDEX,
+ PROMISE_RESOLVE_INDEX,
+ PROMISE_REJECT_INDEX,
+ PROMISE_CHAIN_INDEX,
+ PROMISE_CATCH_INDEX,
+ PROMISE_THEN_INDEX,
TO_COMPLETE_PROPERTY_DESCRIPTOR_INDEX,
DERIVED_HAS_TRAP_INDEX,
DERIVED_GET_TRAP_INDEX,
@@ -327,10 +354,16 @@ class Context: public FixedArray {
OBSERVERS_ENQUEUE_SPLICE_INDEX,
OBSERVERS_BEGIN_SPLICE_INDEX,
OBSERVERS_END_SPLICE_INDEX,
- GENERATOR_FUNCTION_MAP_INDEX,
- STRICT_MODE_GENERATOR_FUNCTION_MAP_INDEX,
+ NATIVE_OBJECT_OBSERVE_INDEX,
+ NATIVE_OBJECT_GET_NOTIFIER_INDEX,
+ NATIVE_OBJECT_NOTIFIER_PERFORM_CHANGE,
+ SLOPPY_GENERATOR_FUNCTION_MAP_INDEX,
+ STRICT_GENERATOR_FUNCTION_MAP_INDEX,
GENERATOR_OBJECT_PROTOTYPE_MAP_INDEX,
- GENERATOR_RESULT_MAP_INDEX,
+ ITERATOR_RESULT_MAP_INDEX,
+ MAP_ITERATOR_MAP_INDEX,
+ SET_ITERATOR_MAP_INDEX,
+ ITERATOR_SYMBOL_INDEX,
// Properties from here are treated as weak references by the full GC.
// Scavenge treats them as strong references.
@@ -422,11 +455,10 @@ class Context: public FixedArray {
return map == map->GetHeap()->global_context_map();
}
- // Tells whether the native context is marked with out of memory.
- inline bool has_out_of_memory();
-
- // Mark the native context with out of memory.
- inline void mark_out_of_memory();
+ bool HasSameSecurityTokenAs(Context* that) {
+ return this->global_object()->native_context()->security_token() ==
+ that->global_object()->native_context()->security_token();
+ }
// A native context holds a list of all functions with optimized code.
void AddOptimizedFunction(JSFunction* function);
@@ -488,14 +520,14 @@ class Context: public FixedArray {
return kHeaderSize + index * kPointerSize - kHeapObjectTag;
}
- static int FunctionMapIndex(LanguageMode language_mode, bool is_generator) {
+ static int FunctionMapIndex(StrictMode strict_mode, bool is_generator) {
return is_generator
- ? (language_mode == CLASSIC_MODE
- ? GENERATOR_FUNCTION_MAP_INDEX
- : STRICT_MODE_GENERATOR_FUNCTION_MAP_INDEX)
- : (language_mode == CLASSIC_MODE
- ? FUNCTION_MAP_INDEX
- : STRICT_MODE_FUNCTION_MAP_INDEX);
+ ? (strict_mode == SLOPPY
+ ? SLOPPY_GENERATOR_FUNCTION_MAP_INDEX
+ : STRICT_GENERATOR_FUNCTION_MAP_INDEX)
+ : (strict_mode == SLOPPY
+ ? SLOPPY_FUNCTION_MAP_INDEX
+ : STRICT_FUNCTION_MAP_INDEX);
}
static const int kSize = kHeaderSize + NATIVE_CONTEXT_SLOTS * kPointerSize;
@@ -519,8 +551,8 @@ class Context: public FixedArray {
static bool IsBootstrappingOrGlobalObject(Isolate* isolate, Object* object);
#endif
- STATIC_CHECK(kHeaderSize == Internals::kContextHeaderSize);
- STATIC_CHECK(EMBEDDER_DATA_INDEX == Internals::kContextEmbedderDataIndex);
+ STATIC_ASSERT(kHeaderSize == Internals::kContextHeaderSize);
+ STATIC_ASSERT(EMBEDDER_DATA_INDEX == Internals::kContextEmbedderDataIndex);
};
} } // namespace v8::internal
diff --git a/chromium/v8/src/conversions-inl.h b/chromium/v8/src/conversions-inl.h
index 7ba19ba0f1d..27fed95eb59 100644
--- a/chromium/v8/src/conversions-inl.h
+++ b/chromium/v8/src/conversions-inl.h
@@ -1,29 +1,6 @@
// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
#ifndef V8_CONVERSIONS_INL_H_
#define V8_CONVERSIONS_INL_H_
@@ -32,16 +9,16 @@
#include <float.h> // Required for DBL_MAX and on Win32 for finite()
#include <stdarg.h>
#include <cmath>
-#include "globals.h" // Required for V8_INFINITY
+#include "src/globals.h" // Required for V8_INFINITY
// ----------------------------------------------------------------------------
// Extra POSIX/ANSI functions for Win32/MSVC.
-#include "conversions.h"
-#include "double.h"
-#include "platform.h"
-#include "scanner.h"
-#include "strtod.h"
+#include "src/conversions.h"
+#include "src/double.h"
+#include "src/platform.h"
+#include "src/scanner.h"
+#include "src/strtod.h"
namespace v8 {
namespace internal {
@@ -75,9 +52,13 @@ inline unsigned int FastD2UI(double x) {
if (x < k2Pow52) {
x += k2Pow52;
uint32_t result;
+#ifndef V8_TARGET_BIG_ENDIAN
Address mantissa_ptr = reinterpret_cast<Address>(&x);
+#else
+ Address mantissa_ptr = reinterpret_cast<Address>(&x) + kIntSize;
+#endif
// Copy least significant 32 bits of mantissa.
- OS::MemCopy(&result, mantissa_ptr, sizeof(result));
+ memcpy(&result, mantissa_ptr, sizeof(result));
return negative ? ~result + 1 : result;
}
// Large number (outside uint32 range), Infinity or NaN.
@@ -88,7 +69,7 @@ inline unsigned int FastD2UI(double x) {
inline double DoubleToInteger(double x) {
if (std::isnan(x)) return 0;
if (!std::isfinite(x) || x == 0) return x;
- return (x >= 0) ? floor(x) : ceil(x);
+ return (x >= 0) ? std::floor(x) : std::ceil(x);
}
@@ -128,7 +109,7 @@ inline bool AdvanceToNonspace(UnicodeCache* unicode_cache,
Iterator* current,
EndMark end) {
while (*current != end) {
- if (!unicode_cache->IsWhiteSpace(**current)) return true;
+ if (!unicode_cache->IsWhiteSpaceOrLineTerminator(**current)) return true;
++*current;
}
return false;
@@ -233,7 +214,7 @@ double InternalStringToIntDouble(UnicodeCache* unicode_cache,
}
ASSERT(number != 0);
- return ldexp(static_cast<double>(negative ? -number : number), exponent);
+ return std::ldexp(static_cast<double>(negative ? -number : number), exponent);
}
diff --git a/chromium/v8/src/conversions.cc b/chromium/v8/src/conversions.cc
index 397f3c57fb8..4efe9039048 100644
--- a/chromium/v8/src/conversions.cc
+++ b/chromium/v8/src/conversions.cc
@@ -1,39 +1,21 @@
// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
#include <stdarg.h>
#include <limits.h>
#include <cmath>
-#include "conversions-inl.h"
-#include "dtoa.h"
-#include "list-inl.h"
-#include "strtod.h"
-#include "utils.h"
+#include "src/v8.h"
+
+#include "src/assert-scope.h"
+#include "src/conversions.h"
+#include "src/conversions-inl.h"
+#include "src/dtoa.h"
+#include "src/factory.h"
+#include "src/list-inl.h"
+#include "src/strtod.h"
+#include "src/utils.h"
#ifndef _STLP_VENDOR_CSTD
// STLPort doesn't import fpclassify into the std namespace.
@@ -44,6 +26,47 @@ namespace v8 {
namespace internal {
+namespace {
+
+// C++-style iterator adaptor for StringCharacterStream
+// (unlike C++ iterators the end-marker has different type).
+class StringCharacterStreamIterator {
+ public:
+ class EndMarker {};
+
+ explicit StringCharacterStreamIterator(StringCharacterStream* stream);
+
+ uint16_t operator*() const;
+ void operator++();
+ bool operator==(EndMarker const&) const { return end_; }
+ bool operator!=(EndMarker const& m) const { return !end_; }
+
+ private:
+ StringCharacterStream* const stream_;
+ uint16_t current_;
+ bool end_;
+};
+
+
+StringCharacterStreamIterator::StringCharacterStreamIterator(
+ StringCharacterStream* stream) : stream_(stream) {
+ ++(*this);
+}
+
+uint16_t StringCharacterStreamIterator::operator*() const {
+ return current_;
+}
+
+
+void StringCharacterStreamIterator::operator++() {
+ end_ = !stream_->HasMore();
+ if (!end_) {
+ current_ = stream_->GetNext();
+ }
+}
+} // End anonymous namespace.
+
+
double StringToDouble(UnicodeCache* unicode_cache,
const char* str, int flags, double empty_string_val) {
// We cast to const uint8_t* here to avoid instantiating the
@@ -56,7 +79,7 @@ double StringToDouble(UnicodeCache* unicode_cache,
double StringToDouble(UnicodeCache* unicode_cache,
- Vector<const char> str,
+ Vector<const uint8_t> str,
int flags,
double empty_string_val) {
// We cast to const uint8_t* here to avoid instantiating the
@@ -78,6 +101,23 @@ double StringToDouble(UnicodeCache* unicode_cache,
}
+// Converts a string into an integer.
+double StringToInt(UnicodeCache* unicode_cache,
+ Vector<const uint8_t> vector,
+ int radix) {
+ return InternalStringToInt(
+ unicode_cache, vector.start(), vector.start() + vector.length(), radix);
+}
+
+
+double StringToInt(UnicodeCache* unicode_cache,
+ Vector<const uc16> vector,
+ int radix) {
+ return InternalStringToInt(
+ unicode_cache, vector.start(), vector.start() + vector.length(), radix);
+}
+
+
const char* DoubleToCString(double v, Vector<char> buffer) {
switch (fpclassify(v)) {
case FP_NAN: return "NaN";
@@ -256,7 +296,6 @@ static char* CreateExponentialRepresentation(char* decimal_rep,
}
-
char* DoubleToExponentialCString(double value, int f) {
const int kMaxDigitsAfterPoint = 20;
// f might be -1 to signal that f was undefined in JavaScript.
@@ -394,14 +433,14 @@ char* DoubleToRadixCString(double value, int radix) {
if (is_negative) value = -value;
// Get the integer part and the decimal part.
- double integer_part = floor(value);
+ double integer_part = std::floor(value);
double decimal_part = value - integer_part;
// Convert the integer part starting from the back. Always generate
// at least one digit.
int integer_pos = kBufferSize - 2;
do {
- double remainder = fmod(integer_part, radix);
+ double remainder = std::fmod(integer_part, radix);
integer_buffer[integer_pos--] = chars[static_cast<int>(remainder)];
integer_part -= remainder;
integer_part /= radix;
@@ -424,8 +463,8 @@ char* DoubleToRadixCString(double value, int radix) {
while ((decimal_part > 0.0) && (decimal_pos < kBufferSize - 1)) {
decimal_part *= radix;
decimal_buffer[decimal_pos++] =
- chars[static_cast<int>(floor(decimal_part))];
- decimal_part -= floor(decimal_part);
+ chars[static_cast<int>(std::floor(decimal_part))];
+ decimal_part -= std::floor(decimal_part);
}
decimal_buffer[decimal_pos] = '\0';
@@ -443,4 +482,22 @@ char* DoubleToRadixCString(double value, int radix) {
return builder.Finalize();
}
+
+double StringToDouble(UnicodeCache* unicode_cache,
+ String* string,
+ int flags,
+ double empty_string_val) {
+ DisallowHeapAllocation no_gc;
+ String::FlatContent flat = string->GetFlatContent();
+ // ECMA-262 section 15.1.2.3, empty string is NaN
+ if (flat.IsAscii()) {
+ return StringToDouble(
+ unicode_cache, flat.ToOneByteVector(), flags, empty_string_val);
+ } else {
+ return StringToDouble(
+ unicode_cache, flat.ToUC16Vector(), flags, empty_string_val);
+ }
+}
+
+
} } // namespace v8::internal
diff --git a/chromium/v8/src/conversions.h b/chromium/v8/src/conversions.h
index 7aa2d3fb3a8..a23ea90fb12 100644
--- a/chromium/v8/src/conversions.h
+++ b/chromium/v8/src/conversions.h
@@ -1,34 +1,16 @@
// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
#ifndef V8_CONVERSIONS_H_
#define V8_CONVERSIONS_H_
-#include "utils.h"
+#include <limits>
+
+#include "src/checks.h"
+#include "src/handles.h"
+#include "src/objects.h"
+#include "src/utils.h"
namespace v8 {
namespace internal {
@@ -72,7 +54,7 @@ inline int FastD2IChecked(double x) {
// The result is unspecified if x is infinite or NaN, or if the rounded
// integer value is outside the range of type int.
inline int FastD2I(double x) {
- return static_cast<int>(x);
+ return static_cast<int32_t>(x);
}
inline unsigned int FastD2UI(double x);
@@ -122,7 +104,7 @@ enum ConversionFlags {
// Converts a string into a double value according to ECMA-262 9.3.1
double StringToDouble(UnicodeCache* unicode_cache,
- Vector<const char> str,
+ Vector<const uint8_t> str,
int flags,
double empty_string_val = 0);
double StringToDouble(UnicodeCache* unicode_cache,
@@ -135,6 +117,16 @@ double StringToDouble(UnicodeCache* unicode_cache,
int flags,
double empty_string_val = 0);
+// Converts a string into an integer.
+double StringToInt(UnicodeCache* unicode_cache,
+ Vector<const uint8_t> vector,
+ int radix);
+
+
+double StringToInt(UnicodeCache* unicode_cache,
+ Vector<const uc16> vector,
+ int radix);
+
const int kDoubleToCStringMinBufferSize = 100;
// Converts a double to a string value according to ECMA-262 9.8.1.
@@ -153,6 +145,88 @@ char* DoubleToExponentialCString(double value, int f);
char* DoubleToPrecisionCString(double value, int f);
char* DoubleToRadixCString(double value, int radix);
+
+static inline bool IsMinusZero(double value) {
+ static const DoubleRepresentation minus_zero(-0.0);
+ return DoubleRepresentation(value) == minus_zero;
+}
+
+
+// Integer32 is an integer that can be represented as a signed 32-bit
+// integer. It has to be in the range [-2^31, 2^31 - 1].
+// We also have to check for negative 0 as it is not an Integer32.
+static inline bool IsInt32Double(double value) {
+ return !IsMinusZero(value) &&
+ value >= kMinInt &&
+ value <= kMaxInt &&
+ value == FastI2D(FastD2I(value));
+}
+
+
+// UInteger32 is an integer that can be represented as an unsigned 32-bit
+// integer. It has to be in the range [0, 2^32 - 1].
+// We also have to check for negative 0 as it is not a UInteger32.
+static inline bool IsUint32Double(double value) {
+ return !IsMinusZero(value) &&
+ value >= 0 &&
+ value <= kMaxUInt32 &&
+ value == FastUI2D(FastD2UI(value));
+}
+
+
+// Convert from Number object to C integer.
+inline int32_t NumberToInt32(Object* number) {
+ if (number->IsSmi()) return Smi::cast(number)->value();
+ return DoubleToInt32(number->Number());
+}
+
+
+inline uint32_t NumberToUint32(Object* number) {
+ if (number->IsSmi()) return Smi::cast(number)->value();
+ return DoubleToUint32(number->Number());
+}
+
+
+double StringToDouble(UnicodeCache* unicode_cache,
+ String* string,
+ int flags,
+ double empty_string_val = 0.0);
+
+
+inline bool TryNumberToSize(Isolate* isolate,
+ Object* number, size_t* result) {
+ SealHandleScope shs(isolate);
+ if (number->IsSmi()) {
+ int value = Smi::cast(number)->value();
+ ASSERT(static_cast<unsigned>(Smi::kMaxValue)
+ <= std::numeric_limits<size_t>::max());
+ if (value >= 0) {
+ *result = static_cast<size_t>(value);
+ return true;
+ }
+ return false;
+ } else {
+ ASSERT(number->IsHeapNumber());
+ double value = HeapNumber::cast(number)->value();
+ if (value >= 0 &&
+ value <= std::numeric_limits<size_t>::max()) {
+ *result = static_cast<size_t>(value);
+ return true;
+ } else {
+ return false;
+ }
+ }
+}
+
+// Converts a number into size_t.
+inline size_t NumberToSize(Isolate* isolate,
+ Object* number) {
+ size_t result = 0;
+ bool is_valid = TryNumberToSize(isolate, number, &result);
+ CHECK(is_valid);
+ return result;
+}
+
} } // namespace v8::internal
#endif // V8_CONVERSIONS_H_
diff --git a/chromium/v8/src/counters.cc b/chromium/v8/src/counters.cc
index e0a6a60a0a4..cdff8877da2 100644
--- a/chromium/v8/src/counters.cc
+++ b/chromium/v8/src/counters.cc
@@ -1,35 +1,12 @@
// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#include "counters.h"
-#include "isolate.h"
-#include "platform.h"
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#include "src/counters.h"
+#include "src/isolate.h"
+#include "src/platform.h"
namespace v8 {
namespace internal {
@@ -62,9 +39,7 @@ void HistogramTimer::Start() {
if (Enabled()) {
timer_.Start();
}
- if (FLAG_log_internal_timer_events) {
- LOG(isolate(), TimerEvent(Logger::START, name()));
- }
+ isolate()->event_logger()(name(), Logger::START);
}
@@ -75,9 +50,109 @@ void HistogramTimer::Stop() {
AddSample(static_cast<int>(timer_.Elapsed().InMilliseconds()));
timer_.Stop();
}
- if (FLAG_log_internal_timer_events) {
- LOG(isolate(), TimerEvent(Logger::END, name()));
- }
+ isolate()->event_logger()(name(), Logger::END);
+}
+
+
+Counters::Counters(Isolate* isolate) {
+#define HT(name, caption) \
+ name##_ = HistogramTimer(#caption, 0, 10000, 50, isolate);
+ HISTOGRAM_TIMER_LIST(HT)
+#undef HT
+
+#define HP(name, caption) \
+ name##_ = Histogram(#caption, 0, 101, 100, isolate);
+ HISTOGRAM_PERCENTAGE_LIST(HP)
+#undef HP
+
+#define HM(name, caption) \
+ name##_ = Histogram(#caption, 1000, 500000, 50, isolate);
+ HISTOGRAM_MEMORY_LIST(HM)
+#undef HM
+
+#define SC(name, caption) \
+ name##_ = StatsCounter(isolate, "c:" #caption);
+
+ STATS_COUNTER_LIST_1(SC)
+ STATS_COUNTER_LIST_2(SC)
+#undef SC
+
+#define SC(name) \
+ count_of_##name##_ = StatsCounter(isolate, "c:" "V8.CountOf_" #name); \
+ size_of_##name##_ = StatsCounter(isolate, "c:" "V8.SizeOf_" #name);
+ INSTANCE_TYPE_LIST(SC)
+#undef SC
+
+#define SC(name) \
+ count_of_CODE_TYPE_##name##_ = \
+ StatsCounter(isolate, "c:" "V8.CountOf_CODE_TYPE-" #name); \
+ size_of_CODE_TYPE_##name##_ = \
+ StatsCounter(isolate, "c:" "V8.SizeOf_CODE_TYPE-" #name);
+ CODE_KIND_LIST(SC)
+#undef SC
+
+#define SC(name) \
+ count_of_FIXED_ARRAY_##name##_ = \
+ StatsCounter(isolate, "c:" "V8.CountOf_FIXED_ARRAY-" #name); \
+ size_of_FIXED_ARRAY_##name##_ = \
+ StatsCounter(isolate, "c:" "V8.SizeOf_FIXED_ARRAY-" #name);
+ FIXED_ARRAY_SUB_INSTANCE_TYPE_LIST(SC)
+#undef SC
+
+#define SC(name) \
+ count_of_CODE_AGE_##name##_ = \
+ StatsCounter(isolate, "c:" "V8.CountOf_CODE_AGE-" #name); \
+ size_of_CODE_AGE_##name##_ = \
+ StatsCounter(isolate, "c:" "V8.SizeOf_CODE_AGE-" #name);
+ CODE_AGE_LIST_COMPLETE(SC)
+#undef SC
+}
+
+
+void Counters::ResetCounters() {
+#define SC(name, caption) name##_.Reset();
+ STATS_COUNTER_LIST_1(SC)
+ STATS_COUNTER_LIST_2(SC)
+#undef SC
+
+#define SC(name) \
+ count_of_##name##_.Reset(); \
+ size_of_##name##_.Reset();
+ INSTANCE_TYPE_LIST(SC)
+#undef SC
+
+#define SC(name) \
+ count_of_CODE_TYPE_##name##_.Reset(); \
+ size_of_CODE_TYPE_##name##_.Reset();
+ CODE_KIND_LIST(SC)
+#undef SC
+
+#define SC(name) \
+ count_of_FIXED_ARRAY_##name##_.Reset(); \
+ size_of_FIXED_ARRAY_##name##_.Reset();
+ FIXED_ARRAY_SUB_INSTANCE_TYPE_LIST(SC)
+#undef SC
+
+#define SC(name) \
+ count_of_CODE_AGE_##name##_.Reset(); \
+ size_of_CODE_AGE_##name##_.Reset();
+ CODE_AGE_LIST_COMPLETE(SC)
+#undef SC
+}
+
+
+void Counters::ResetHistograms() {
+#define HT(name, caption) name##_.Reset();
+ HISTOGRAM_TIMER_LIST(HT)
+#undef HT
+
+#define HP(name, caption) name##_.Reset();
+ HISTOGRAM_PERCENTAGE_LIST(HP)
+#undef HP
+
+#define HM(name, caption) name##_.Reset();
+ HISTOGRAM_MEMORY_LIST(HM)
+#undef HM
}
} } // namespace v8::internal
diff --git a/chromium/v8/src/counters.h b/chromium/v8/src/counters.h
index 821c25f8cec..a7d00dcc80f 100644
--- a/chromium/v8/src/counters.h
+++ b/chromium/v8/src/counters.h
@@ -1,35 +1,15 @@
// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
#ifndef V8_COUNTERS_H_
#define V8_COUNTERS_H_
-#include "../include/v8.h"
-#include "allocation.h"
+#include "include/v8.h"
+#include "src/allocation.h"
+#include "src/globals.h"
+#include "src/objects.h"
+#include "src/platform/elapsed-timer.h"
namespace v8 {
namespace internal {
@@ -163,6 +143,9 @@ class StatsCounter {
return loc;
}
+ // Reset the cached internal pointer.
+ void Reset() { lookup_done_ = false; }
+
protected:
// Returns the cached address of this counter location.
int* GetPtr() {
@@ -285,11 +268,12 @@ class HistogramTimerScope BASE_EMBEDDED {
} else {
timer_->Start();
}
+ }
#else
: timer_(timer) {
timer_->Start();
-#endif
}
+#endif
~HistogramTimerScope() {
#ifdef DEBUG
if (!skipped_timer_start_) {
@@ -299,6 +283,7 @@ class HistogramTimerScope BASE_EMBEDDED {
timer_->Stop();
#endif
}
+
private:
HistogramTimer* timer_;
#ifdef DEBUG
@@ -307,6 +292,402 @@ class HistogramTimerScope BASE_EMBEDDED {
};
+#define HISTOGRAM_TIMER_LIST(HT) \
+ /* Garbage collection timers. */ \
+ HT(gc_compactor, V8.GCCompactor) \
+ HT(gc_scavenger, V8.GCScavenger) \
+ HT(gc_context, V8.GCContext) /* GC context cleanup time */ \
+ /* Parsing timers. */ \
+ HT(parse, V8.Parse) \
+ HT(parse_lazy, V8.ParseLazy) \
+ HT(pre_parse, V8.PreParse) \
+ /* Total compilation times. */ \
+ HT(compile, V8.Compile) \
+ HT(compile_eval, V8.CompileEval) \
+ HT(compile_lazy, V8.CompileLazy)
+
+#define HISTOGRAM_PERCENTAGE_LIST(HP) \
+ /* Heap fragmentation. */ \
+ HP(external_fragmentation_total, \
+ V8.MemoryExternalFragmentationTotal) \
+ HP(external_fragmentation_old_pointer_space, \
+ V8.MemoryExternalFragmentationOldPointerSpace) \
+ HP(external_fragmentation_old_data_space, \
+ V8.MemoryExternalFragmentationOldDataSpace) \
+ HP(external_fragmentation_code_space, \
+ V8.MemoryExternalFragmentationCodeSpace) \
+ HP(external_fragmentation_map_space, \
+ V8.MemoryExternalFragmentationMapSpace) \
+ HP(external_fragmentation_cell_space, \
+ V8.MemoryExternalFragmentationCellSpace) \
+ HP(external_fragmentation_property_cell_space, \
+ V8.MemoryExternalFragmentationPropertyCellSpace) \
+ HP(external_fragmentation_lo_space, \
+ V8.MemoryExternalFragmentationLoSpace) \
+ /* Percentages of heap committed to each space. */ \
+ HP(heap_fraction_new_space, \
+ V8.MemoryHeapFractionNewSpace) \
+ HP(heap_fraction_old_pointer_space, \
+ V8.MemoryHeapFractionOldPointerSpace) \
+ HP(heap_fraction_old_data_space, \
+ V8.MemoryHeapFractionOldDataSpace) \
+ HP(heap_fraction_code_space, \
+ V8.MemoryHeapFractionCodeSpace) \
+ HP(heap_fraction_map_space, \
+ V8.MemoryHeapFractionMapSpace) \
+ HP(heap_fraction_cell_space, \
+ V8.MemoryHeapFractionCellSpace) \
+ HP(heap_fraction_property_cell_space, \
+ V8.MemoryHeapFractionPropertyCellSpace) \
+ HP(heap_fraction_lo_space, \
+ V8.MemoryHeapFractionLoSpace) \
+ /* Percentage of crankshafted codegen. */ \
+ HP(codegen_fraction_crankshaft, \
+ V8.CodegenFractionCrankshaft) \
+
+
+#define HISTOGRAM_MEMORY_LIST(HM) \
+ HM(heap_sample_total_committed, V8.MemoryHeapSampleTotalCommitted) \
+ HM(heap_sample_total_used, V8.MemoryHeapSampleTotalUsed) \
+ HM(heap_sample_map_space_committed, \
+ V8.MemoryHeapSampleMapSpaceCommitted) \
+ HM(heap_sample_cell_space_committed, \
+ V8.MemoryHeapSampleCellSpaceCommitted) \
+ HM(heap_sample_property_cell_space_committed, \
+ V8.MemoryHeapSamplePropertyCellSpaceCommitted) \
+ HM(heap_sample_code_space_committed, \
+ V8.MemoryHeapSampleCodeSpaceCommitted) \
+ HM(heap_sample_maximum_committed, \
+ V8.MemoryHeapSampleMaximumCommitted) \
+
+
+// WARNING: STATS_COUNTER_LIST_* is a very large macro that is causing MSVC
+// Intellisense to crash. It was broken into two macros (each of length 40
+// lines) rather than one macro (of length about 80 lines) to work around
+// this problem. Please avoid using recursive macros of this length when
+// possible.
+#define STATS_COUNTER_LIST_1(SC) \
+ /* Global Handle Count*/ \
+ SC(global_handles, V8.GlobalHandles) \
+ /* OS Memory allocated */ \
+ SC(memory_allocated, V8.OsMemoryAllocated) \
+ SC(normalized_maps, V8.NormalizedMaps) \
+ SC(props_to_dictionary, V8.ObjectPropertiesToDictionary) \
+ SC(elements_to_dictionary, V8.ObjectElementsToDictionary) \
+ SC(alive_after_last_gc, V8.AliveAfterLastGC) \
+ SC(objs_since_last_young, V8.ObjsSinceLastYoung) \
+ SC(objs_since_last_full, V8.ObjsSinceLastFull) \
+ SC(string_table_capacity, V8.StringTableCapacity) \
+ SC(number_of_symbols, V8.NumberOfSymbols) \
+ SC(script_wrappers, V8.ScriptWrappers) \
+ SC(call_initialize_stubs, V8.CallInitializeStubs) \
+ SC(call_premonomorphic_stubs, V8.CallPreMonomorphicStubs) \
+ SC(call_normal_stubs, V8.CallNormalStubs) \
+ SC(call_megamorphic_stubs, V8.CallMegamorphicStubs) \
+ SC(inlined_copied_elements, V8.InlinedCopiedElements) \
+ SC(arguments_adaptors, V8.ArgumentsAdaptors) \
+ SC(compilation_cache_hits, V8.CompilationCacheHits) \
+ SC(compilation_cache_misses, V8.CompilationCacheMisses) \
+ SC(string_ctor_calls, V8.StringConstructorCalls) \
+ SC(string_ctor_conversions, V8.StringConstructorConversions) \
+ SC(string_ctor_cached_number, V8.StringConstructorCachedNumber) \
+ SC(string_ctor_string_value, V8.StringConstructorStringValue) \
+ SC(string_ctor_gc_required, V8.StringConstructorGCRequired) \
+ /* Amount of evaled source code. */ \
+ SC(total_eval_size, V8.TotalEvalSize) \
+ /* Amount of loaded source code. */ \
+ SC(total_load_size, V8.TotalLoadSize) \
+ /* Amount of parsed source code. */ \
+ SC(total_parse_size, V8.TotalParseSize) \
+ /* Amount of source code skipped over using preparsing. */ \
+ SC(total_preparse_skipped, V8.TotalPreparseSkipped) \
+ /* Number of symbol lookups skipped using preparsing */ \
+ SC(total_preparse_symbols_skipped, V8.TotalPreparseSymbolSkipped) \
+ /* Amount of compiled source code. */ \
+ SC(total_compile_size, V8.TotalCompileSize) \
+ /* Amount of source code compiled with the full codegen. */ \
+ SC(total_full_codegen_source_size, V8.TotalFullCodegenSourceSize) \
+ /* Number of contexts created from scratch. */ \
+ SC(contexts_created_from_scratch, V8.ContextsCreatedFromScratch) \
+ /* Number of contexts created by partial snapshot. */ \
+ SC(contexts_created_by_snapshot, V8.ContextsCreatedBySnapshot) \
+ /* Number of code objects found from pc. */ \
+ SC(pc_to_code, V8.PcToCode) \
+ SC(pc_to_code_cached, V8.PcToCodeCached) \
+ /* The store-buffer implementation of the write barrier. */ \
+ SC(store_buffer_compactions, V8.StoreBufferCompactions) \
+ SC(store_buffer_overflows, V8.StoreBufferOverflows)
+
+
+#define STATS_COUNTER_LIST_2(SC) \
+ /* Number of code stubs. */ \
+ SC(code_stubs, V8.CodeStubs) \
+ /* Amount of stub code. */ \
+ SC(total_stubs_code_size, V8.TotalStubsCodeSize) \
+ /* Amount of (JS) compiled code. */ \
+ SC(total_compiled_code_size, V8.TotalCompiledCodeSize) \
+ SC(gc_compactor_caused_by_request, V8.GCCompactorCausedByRequest) \
+ SC(gc_compactor_caused_by_promoted_data, \
+ V8.GCCompactorCausedByPromotedData) \
+ SC(gc_compactor_caused_by_oldspace_exhaustion, \
+ V8.GCCompactorCausedByOldspaceExhaustion) \
+ SC(gc_last_resort_from_js, V8.GCLastResortFromJS) \
+ SC(gc_last_resort_from_handles, V8.GCLastResortFromHandles) \
+ /* How is the generic keyed-load stub used? */ \
+ SC(keyed_load_generic_smi, V8.KeyedLoadGenericSmi) \
+ SC(keyed_load_generic_symbol, V8.KeyedLoadGenericSymbol) \
+ SC(keyed_load_generic_lookup_cache, V8.KeyedLoadGenericLookupCache) \
+ SC(keyed_load_generic_slow, V8.KeyedLoadGenericSlow) \
+ SC(keyed_load_polymorphic_stubs, V8.KeyedLoadPolymorphicStubs) \
+ SC(keyed_load_external_array_slow, V8.KeyedLoadExternalArraySlow) \
+ /* How is the generic keyed-call stub used? */ \
+ SC(keyed_call_generic_smi_fast, V8.KeyedCallGenericSmiFast) \
+ SC(keyed_call_generic_smi_dict, V8.KeyedCallGenericSmiDict) \
+ SC(keyed_call_generic_lookup_cache, V8.KeyedCallGenericLookupCache) \
+ SC(keyed_call_generic_lookup_dict, V8.KeyedCallGenericLookupDict) \
+ SC(keyed_call_generic_slow, V8.KeyedCallGenericSlow) \
+ SC(keyed_call_generic_slow_load, V8.KeyedCallGenericSlowLoad) \
+ SC(named_load_global_stub, V8.NamedLoadGlobalStub) \
+ SC(named_store_global_inline, V8.NamedStoreGlobalInline) \
+ SC(named_store_global_inline_miss, V8.NamedStoreGlobalInlineMiss) \
+ SC(keyed_store_polymorphic_stubs, V8.KeyedStorePolymorphicStubs) \
+ SC(keyed_store_external_array_slow, V8.KeyedStoreExternalArraySlow) \
+ SC(store_normal_miss, V8.StoreNormalMiss) \
+ SC(store_normal_hit, V8.StoreNormalHit) \
+ SC(cow_arrays_created_stub, V8.COWArraysCreatedStub) \
+ SC(cow_arrays_created_runtime, V8.COWArraysCreatedRuntime) \
+ SC(cow_arrays_converted, V8.COWArraysConverted) \
+ SC(call_miss, V8.CallMiss) \
+ SC(keyed_call_miss, V8.KeyedCallMiss) \
+ SC(load_miss, V8.LoadMiss) \
+ SC(keyed_load_miss, V8.KeyedLoadMiss) \
+ SC(call_const, V8.CallConst) \
+ SC(call_const_fast_api, V8.CallConstFastApi) \
+ SC(call_const_interceptor, V8.CallConstInterceptor) \
+ SC(call_const_interceptor_fast_api, V8.CallConstInterceptorFastApi) \
+ SC(call_global_inline, V8.CallGlobalInline) \
+ SC(call_global_inline_miss, V8.CallGlobalInlineMiss) \
+ SC(constructed_objects, V8.ConstructedObjects) \
+ SC(constructed_objects_runtime, V8.ConstructedObjectsRuntime) \
+ SC(negative_lookups, V8.NegativeLookups) \
+ SC(negative_lookups_miss, V8.NegativeLookupsMiss) \
+ SC(megamorphic_stub_cache_probes, V8.MegamorphicStubCacheProbes) \
+ SC(megamorphic_stub_cache_misses, V8.MegamorphicStubCacheMisses) \
+ SC(megamorphic_stub_cache_updates, V8.MegamorphicStubCacheUpdates) \
+ SC(array_function_runtime, V8.ArrayFunctionRuntime) \
+ SC(array_function_native, V8.ArrayFunctionNative) \
+ SC(for_in, V8.ForIn) \
+ SC(enum_cache_hits, V8.EnumCacheHits) \
+ SC(enum_cache_misses, V8.EnumCacheMisses) \
+ SC(zone_segment_bytes, V8.ZoneSegmentBytes) \
+ SC(fast_new_closure_total, V8.FastNewClosureTotal) \
+ SC(fast_new_closure_try_optimized, V8.FastNewClosureTryOptimized) \
+ SC(fast_new_closure_install_optimized, V8.FastNewClosureInstallOptimized) \
+ SC(string_add_runtime, V8.StringAddRuntime) \
+ SC(string_add_native, V8.StringAddNative) \
+ SC(string_add_runtime_ext_to_ascii, V8.StringAddRuntimeExtToAscii) \
+ SC(sub_string_runtime, V8.SubStringRuntime) \
+ SC(sub_string_native, V8.SubStringNative) \
+ SC(string_add_make_two_char, V8.StringAddMakeTwoChar) \
+ SC(string_compare_native, V8.StringCompareNative) \
+ SC(string_compare_runtime, V8.StringCompareRuntime) \
+ SC(regexp_entry_runtime, V8.RegExpEntryRuntime) \
+ SC(regexp_entry_native, V8.RegExpEntryNative) \
+ SC(number_to_string_native, V8.NumberToStringNative) \
+ SC(number_to_string_runtime, V8.NumberToStringRuntime) \
+ SC(math_acos, V8.MathAcos) \
+ SC(math_asin, V8.MathAsin) \
+ SC(math_atan, V8.MathAtan) \
+ SC(math_atan2, V8.MathAtan2) \
+ SC(math_exp, V8.MathExp) \
+ SC(math_floor, V8.MathFloor) \
+ SC(math_log, V8.MathLog) \
+ SC(math_pow, V8.MathPow) \
+ SC(math_round, V8.MathRound) \
+ SC(math_sqrt, V8.MathSqrt) \
+ SC(stack_interrupts, V8.StackInterrupts) \
+ SC(runtime_profiler_ticks, V8.RuntimeProfilerTicks) \
+ SC(bounds_checks_eliminated, V8.BoundsChecksEliminated) \
+ SC(bounds_checks_hoisted, V8.BoundsChecksHoisted) \
+ SC(soft_deopts_requested, V8.SoftDeoptsRequested) \
+ SC(soft_deopts_inserted, V8.SoftDeoptsInserted) \
+ SC(soft_deopts_executed, V8.SoftDeoptsExecuted) \
+ /* Number of write barriers in generated code. */ \
+ SC(write_barriers_dynamic, V8.WriteBarriersDynamic) \
+ SC(write_barriers_static, V8.WriteBarriersStatic) \
+ SC(new_space_bytes_available, V8.MemoryNewSpaceBytesAvailable) \
+ SC(new_space_bytes_committed, V8.MemoryNewSpaceBytesCommitted) \
+ SC(new_space_bytes_used, V8.MemoryNewSpaceBytesUsed) \
+ SC(old_pointer_space_bytes_available, \
+ V8.MemoryOldPointerSpaceBytesAvailable) \
+ SC(old_pointer_space_bytes_committed, \
+ V8.MemoryOldPointerSpaceBytesCommitted) \
+ SC(old_pointer_space_bytes_used, V8.MemoryOldPointerSpaceBytesUsed) \
+ SC(old_data_space_bytes_available, V8.MemoryOldDataSpaceBytesAvailable) \
+ SC(old_data_space_bytes_committed, V8.MemoryOldDataSpaceBytesCommitted) \
+ SC(old_data_space_bytes_used, V8.MemoryOldDataSpaceBytesUsed) \
+ SC(code_space_bytes_available, V8.MemoryCodeSpaceBytesAvailable) \
+ SC(code_space_bytes_committed, V8.MemoryCodeSpaceBytesCommitted) \
+ SC(code_space_bytes_used, V8.MemoryCodeSpaceBytesUsed) \
+ SC(map_space_bytes_available, V8.MemoryMapSpaceBytesAvailable) \
+ SC(map_space_bytes_committed, V8.MemoryMapSpaceBytesCommitted) \
+ SC(map_space_bytes_used, V8.MemoryMapSpaceBytesUsed) \
+ SC(cell_space_bytes_available, V8.MemoryCellSpaceBytesAvailable) \
+ SC(cell_space_bytes_committed, V8.MemoryCellSpaceBytesCommitted) \
+ SC(cell_space_bytes_used, V8.MemoryCellSpaceBytesUsed) \
+ SC(property_cell_space_bytes_available, \
+ V8.MemoryPropertyCellSpaceBytesAvailable) \
+ SC(property_cell_space_bytes_committed, \
+ V8.MemoryPropertyCellSpaceBytesCommitted) \
+ SC(property_cell_space_bytes_used, \
+ V8.MemoryPropertyCellSpaceBytesUsed) \
+ SC(lo_space_bytes_available, V8.MemoryLoSpaceBytesAvailable) \
+ SC(lo_space_bytes_committed, V8.MemoryLoSpaceBytesCommitted) \
+ SC(lo_space_bytes_used, V8.MemoryLoSpaceBytesUsed)
+
+
+// This file contains all the v8 counters that are in use.
+class Counters {
+ public:
+#define HT(name, caption) \
+ HistogramTimer* name() { return &name##_; }
+ HISTOGRAM_TIMER_LIST(HT)
+#undef HT
+
+#define HP(name, caption) \
+ Histogram* name() { return &name##_; }
+ HISTOGRAM_PERCENTAGE_LIST(HP)
+#undef HP
+
+#define HM(name, caption) \
+ Histogram* name() { return &name##_; }
+ HISTOGRAM_MEMORY_LIST(HM)
+#undef HM
+
+#define SC(name, caption) \
+ StatsCounter* name() { return &name##_; }
+ STATS_COUNTER_LIST_1(SC)
+ STATS_COUNTER_LIST_2(SC)
+#undef SC
+
+#define SC(name) \
+ StatsCounter* count_of_##name() { return &count_of_##name##_; } \
+ StatsCounter* size_of_##name() { return &size_of_##name##_; }
+ INSTANCE_TYPE_LIST(SC)
+#undef SC
+
+#define SC(name) \
+ StatsCounter* count_of_CODE_TYPE_##name() \
+ { return &count_of_CODE_TYPE_##name##_; } \
+ StatsCounter* size_of_CODE_TYPE_##name() \
+ { return &size_of_CODE_TYPE_##name##_; }
+ CODE_KIND_LIST(SC)
+#undef SC
+
+#define SC(name) \
+ StatsCounter* count_of_FIXED_ARRAY_##name() \
+ { return &count_of_FIXED_ARRAY_##name##_; } \
+ StatsCounter* size_of_FIXED_ARRAY_##name() \
+ { return &size_of_FIXED_ARRAY_##name##_; }
+ FIXED_ARRAY_SUB_INSTANCE_TYPE_LIST(SC)
+#undef SC
+
+#define SC(name) \
+ StatsCounter* count_of_CODE_AGE_##name() \
+ { return &count_of_CODE_AGE_##name##_; } \
+ StatsCounter* size_of_CODE_AGE_##name() \
+ { return &size_of_CODE_AGE_##name##_; }
+ CODE_AGE_LIST_COMPLETE(SC)
+#undef SC
+
+ enum Id {
+#define RATE_ID(name, caption) k_##name,
+ HISTOGRAM_TIMER_LIST(RATE_ID)
+#undef RATE_ID
+#define PERCENTAGE_ID(name, caption) k_##name,
+ HISTOGRAM_PERCENTAGE_LIST(PERCENTAGE_ID)
+#undef PERCENTAGE_ID
+#define MEMORY_ID(name, caption) k_##name,
+ HISTOGRAM_MEMORY_LIST(MEMORY_ID)
+#undef MEMORY_ID
+#define COUNTER_ID(name, caption) k_##name,
+ STATS_COUNTER_LIST_1(COUNTER_ID)
+ STATS_COUNTER_LIST_2(COUNTER_ID)
+#undef COUNTER_ID
+#define COUNTER_ID(name) kCountOf##name, kSizeOf##name,
+ INSTANCE_TYPE_LIST(COUNTER_ID)
+#undef COUNTER_ID
+#define COUNTER_ID(name) kCountOfCODE_TYPE_##name, \
+ kSizeOfCODE_TYPE_##name,
+ CODE_KIND_LIST(COUNTER_ID)
+#undef COUNTER_ID
+#define COUNTER_ID(name) kCountOfFIXED_ARRAY__##name, \
+ kSizeOfFIXED_ARRAY__##name,
+ FIXED_ARRAY_SUB_INSTANCE_TYPE_LIST(COUNTER_ID)
+#undef COUNTER_ID
+#define COUNTER_ID(name) kCountOfCODE_AGE__##name, \
+ kSizeOfCODE_AGE__##name,
+ CODE_AGE_LIST_COMPLETE(COUNTER_ID)
+#undef COUNTER_ID
+ stats_counter_count
+ };
+
+ void ResetCounters();
+ void ResetHistograms();
+
+ private:
+#define HT(name, caption) \
+ HistogramTimer name##_;
+ HISTOGRAM_TIMER_LIST(HT)
+#undef HT
+
+#define HP(name, caption) \
+ Histogram name##_;
+ HISTOGRAM_PERCENTAGE_LIST(HP)
+#undef HP
+
+#define HM(name, caption) \
+ Histogram name##_;
+ HISTOGRAM_MEMORY_LIST(HM)
+#undef HM
+
+#define SC(name, caption) \
+ StatsCounter name##_;
+ STATS_COUNTER_LIST_1(SC)
+ STATS_COUNTER_LIST_2(SC)
+#undef SC
+
+#define SC(name) \
+ StatsCounter size_of_##name##_; \
+ StatsCounter count_of_##name##_;
+ INSTANCE_TYPE_LIST(SC)
+#undef SC
+
+#define SC(name) \
+ StatsCounter size_of_CODE_TYPE_##name##_; \
+ StatsCounter count_of_CODE_TYPE_##name##_;
+ CODE_KIND_LIST(SC)
+#undef SC
+
+#define SC(name) \
+ StatsCounter size_of_FIXED_ARRAY_##name##_; \
+ StatsCounter count_of_FIXED_ARRAY_##name##_;
+ FIXED_ARRAY_SUB_INSTANCE_TYPE_LIST(SC)
+#undef SC
+
+#define SC(name) \
+ StatsCounter size_of_CODE_AGE_##name##_; \
+ StatsCounter count_of_CODE_AGE_##name##_;
+ CODE_AGE_LIST_COMPLETE(SC)
+#undef SC
+
+ friend class Isolate;
+
+ explicit Counters(Isolate* isolate);
+
+ DISALLOW_IMPLICIT_CONSTRUCTORS(Counters);
+};
+
} } // namespace v8::internal
#endif // V8_COUNTERS_H_
diff --git a/chromium/v8/src/cpu-profiler-inl.h b/chromium/v8/src/cpu-profiler-inl.h
index 7bfbf5c57cb..c63a9c3cc25 100644
--- a/chromium/v8/src/cpu-profiler-inl.h
+++ b/chromium/v8/src/cpu-profiler-inl.h
@@ -1,39 +1,16 @@
// Copyright 2010 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
#ifndef V8_CPU_PROFILER_INL_H_
#define V8_CPU_PROFILER_INL_H_
-#include "cpu-profiler.h"
+#include "src/cpu-profiler.h"
#include <new>
-#include "circular-queue-inl.h"
-#include "profile-generator-inl.h"
-#include "unbound-queue-inl.h"
+#include "src/circular-queue-inl.h"
+#include "src/profile-generator-inl.h"
+#include "src/unbound-queue-inl.h"
namespace v8 {
namespace internal {
@@ -51,6 +28,14 @@ void CodeMoveEventRecord::UpdateCodeMap(CodeMap* code_map) {
}
+void CodeDisableOptEventRecord::UpdateCodeMap(CodeMap* code_map) {
+ CodeEntry* entry = code_map->FindEntry(start);
+ if (entry != NULL) {
+ entry->set_bailout_reason(bailout_reason);
+ }
+}
+
+
void SharedFunctionInfoMoveEventRecord::UpdateCodeMap(CodeMap* code_map) {
code_map->MoveCode(from, to);
}
diff --git a/chromium/v8/src/cpu-profiler.cc b/chromium/v8/src/cpu-profiler.cc
index b1af621cccc..49a1d7a2e95 100644
--- a/chromium/v8/src/cpu-profiler.cc
+++ b/chromium/v8/src/cpu-profiler.cc
@@ -1,41 +1,18 @@
// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#include "cpu-profiler-inl.h"
-
-#include "compiler.h"
-#include "frames-inl.h"
-#include "hashmap.h"
-#include "log-inl.h"
-#include "vm-state-inl.h"
-
-#include "../include/v8-profiler.h"
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#include "src/cpu-profiler-inl.h"
+
+#include "src/compiler.h"
+#include "src/frames-inl.h"
+#include "src/hashmap.h"
+#include "src/log-inl.h"
+#include "src/vm-state-inl.h"
+
+#include "include/v8-profiler.h"
namespace v8 {
namespace internal {
@@ -156,6 +133,16 @@ void ProfilerEventsProcessor::Run() {
}
+void* ProfilerEventsProcessor::operator new(size_t size) {
+ return AlignedAlloc(size, V8_ALIGNOF(ProfilerEventsProcessor));
+}
+
+
+void ProfilerEventsProcessor::operator delete(void* ptr) {
+ AlignedFree(ptr);
+}
+
+
int CpuProfiler::GetProfilesCount() {
// The count of profiles doesn't depend on a security token.
return profiles_->profiles()->length();
@@ -176,6 +163,10 @@ void CpuProfiler::DeleteAllProfiles() {
void CpuProfiler::DeleteProfile(CpuProfile* profile) {
profiles_->RemoveProfile(profile);
delete profile;
+ if (profiles_->profiles()->is_empty() && !is_profiling_) {
+ // If this was the last profile, clean up all accessory data as well.
+ ResetProfiles();
+ }
}
@@ -313,6 +304,15 @@ void CpuProfiler::CodeMoveEvent(Address from, Address to) {
}
+void CpuProfiler::CodeDisableOptEvent(Code* code, SharedFunctionInfo* shared) {
+ CodeEventsContainer evt_rec(CodeEventRecord::CODE_DISABLE_OPT);
+ CodeDisableOptEventRecord* rec = &evt_rec.CodeDisableOptEventRecord_;
+ rec->start = code->address();
+ rec->bailout_reason = GetBailoutReason(shared->DisableOptimizationReason());
+ processor_->Enqueue(evt_rec);
+}
+
+
void CpuProfiler::CodeDeleteEvent(Address from) {
}
@@ -376,7 +376,6 @@ CpuProfiler::CpuProfiler(Isolate* isolate)
sampling_interval_(TimeDelta::FromMicroseconds(
FLAG_cpu_profiler_sampling_interval)),
profiles_(new CpuProfilesCollection(isolate->heap())),
- next_profile_uid_(1),
generator_(NULL),
processor_(NULL),
is_profiling_(false) {
@@ -391,7 +390,6 @@ CpuProfiler::CpuProfiler(Isolate* isolate,
sampling_interval_(TimeDelta::FromMicroseconds(
FLAG_cpu_profiler_sampling_interval)),
profiles_(test_profiles),
- next_profile_uid_(1),
generator_(test_generator),
processor_(test_processor),
is_profiling_(false) {
@@ -417,10 +415,9 @@ void CpuProfiler::ResetProfiles() {
void CpuProfiler::StartProfiling(const char* title, bool record_samples) {
- if (profiles_->StartProfiling(title, next_profile_uid_++, record_samples)) {
+ if (profiles_->StartProfiling(title, record_samples)) {
StartProcessorIfNotStarted();
}
- processor_->AddCurrentStack(isolate_);
}
@@ -430,39 +427,32 @@ void CpuProfiler::StartProfiling(String* title, bool record_samples) {
void CpuProfiler::StartProcessorIfNotStarted() {
- if (processor_ == NULL) {
- Logger* logger = isolate_->logger();
- // Disable logging when using the new implementation.
- saved_is_logging_ = logger->is_logging_;
- logger->is_logging_ = false;
- generator_ = new ProfileGenerator(profiles_);
- Sampler* sampler = logger->sampler();
-#if V8_CC_MSVC && (_MSC_VER >= 1800)
- // VS2013 reports "warning C4316: 'v8::internal::ProfilerEventsProcessor'
- // : object allocated on the heap may not be aligned 64". We need to
- // figure out if this is a legitimate warning or a compiler bug.
- #pragma warning(push)
- #pragma warning(disable:4316)
-#endif
- processor_ = new ProfilerEventsProcessor(
- generator_, sampler, sampling_interval_);
-#if V8_CC_MSVC && (_MSC_VER >= 1800)
- #pragma warning(pop)
-#endif
- is_profiling_ = true;
- // Enumerate stuff we already have in the heap.
- ASSERT(isolate_->heap()->HasBeenSetUp());
- if (!FLAG_prof_browser_mode) {
- logger->LogCodeObjects();
- }
- logger->LogCompiledFunctions();
- logger->LogAccessorCallbacks();
- LogBuiltins();
- // Enable stack sampling.
- sampler->SetHasProcessingThread(true);
- sampler->IncreaseProfilingDepth();
- processor_->StartSynchronously();
+ if (processor_ != NULL) {
+ processor_->AddCurrentStack(isolate_);
+ return;
}
+ Logger* logger = isolate_->logger();
+ // Disable logging when using the new implementation.
+ saved_is_logging_ = logger->is_logging_;
+ logger->is_logging_ = false;
+ generator_ = new ProfileGenerator(profiles_);
+ Sampler* sampler = logger->sampler();
+ processor_ = new ProfilerEventsProcessor(
+ generator_, sampler, sampling_interval_);
+ is_profiling_ = true;
+ // Enumerate stuff we already have in the heap.
+ ASSERT(isolate_->heap()->HasBeenSetUp());
+ if (!FLAG_prof_browser_mode) {
+ logger->LogCodeObjects();
+ }
+ logger->LogCompiledFunctions();
+ logger->LogAccessorCallbacks();
+ LogBuiltins();
+ // Enable stack sampling.
+ sampler->SetHasProcessingThread(true);
+ sampler->IncreaseProfilingDepth();
+ processor_->AddCurrentStack(isolate_);
+ processor_->StartSynchronously();
}
diff --git a/chromium/v8/src/cpu-profiler.h b/chromium/v8/src/cpu-profiler.h
index fcb9a67ddf7..f5f2014bd5e 100644
--- a/chromium/v8/src/cpu-profiler.h
+++ b/chromium/v8/src/cpu-profiler.h
@@ -1,39 +1,16 @@
// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
#ifndef V8_CPU_PROFILER_H_
#define V8_CPU_PROFILER_H_
-#include "allocation.h"
-#include "atomicops.h"
-#include "circular-queue.h"
-#include "platform/time.h"
-#include "sampler.h"
-#include "unbound-queue.h"
+#include "src/allocation.h"
+#include "src/base/atomicops.h"
+#include "src/circular-queue.h"
+#include "src/platform/time.h"
+#include "src/sampler.h"
+#include "src/unbound-queue.h"
namespace v8 {
namespace internal {
@@ -49,6 +26,7 @@ class ProfileGenerator;
#define CODE_EVENTS_TYPE_LIST(V) \
V(CODE_CREATION, CodeCreateEventRecord) \
V(CODE_MOVE, CodeMoveEventRecord) \
+ V(CODE_DISABLE_OPT, CodeDisableOptEventRecord) \
V(SHARED_FUNC_MOVE, SharedFunctionInfoMoveEventRecord) \
V(REPORT_BUILTIN, ReportBuiltinEventRecord)
@@ -88,6 +66,15 @@ class CodeMoveEventRecord : public CodeEventRecord {
};
+class CodeDisableOptEventRecord : public CodeEventRecord {
+ public:
+ Address start;
+ const char* bailout_reason;
+
+ INLINE(void UpdateCodeMap(CodeMap* code_map));
+};
+
+
class SharedFunctionInfoMoveEventRecord : public CodeEventRecord {
public:
Address from;
@@ -158,6 +145,11 @@ class ProfilerEventsProcessor : public Thread {
inline TickSample* StartTickSample();
inline void FinishTickSample();
+ // SamplingCircularQueue has stricter alignment requirements than a normal new
+ // can fulfil, so we need to provide our own new/delete here.
+ void* operator new(size_t size);
+ void operator delete(void* ptr);
+
private:
// Called from events processing thread (Run() method.)
bool ProcessCodeEvent();
@@ -243,6 +235,7 @@ class CpuProfiler : public CodeEventListener {
Code* code, int args_count);
virtual void CodeMovingGCEvent() {}
virtual void CodeMoveEvent(Address from, Address to);
+ virtual void CodeDisableOptEvent(Code* code, SharedFunctionInfo* shared);
virtual void CodeDeleteEvent(Address from);
virtual void GetterCallbackEvent(Name* name, Address entry_point);
virtual void RegExpCodeCreateEvent(Code* code, String* source);
@@ -268,7 +261,6 @@ class CpuProfiler : public CodeEventListener {
Isolate* isolate_;
TimeDelta sampling_interval_;
CpuProfilesCollection* profiles_;
- unsigned next_profile_uid_;
ProfileGenerator* generator_;
ProfilerEventsProcessor* processor_;
bool saved_is_logging_;
diff --git a/chromium/v8/src/cpu.cc b/chromium/v8/src/cpu.cc
index 2bf51a7f6c0..01c1036db26 100644
--- a/chromium/v8/src/cpu.cc
+++ b/chromium/v8/src/cpu.cc
@@ -1,49 +1,29 @@
// Copyright 2013 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "cpu.h"
-
-#if V8_CC_MSVC
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/cpu.h"
+
+#if V8_LIBC_MSVCRT
#include <intrin.h> // __cpuid()
#endif
#if V8_OS_POSIX
#include <unistd.h> // sysconf()
#endif
+#if V8_OS_QNX
+#include <sys/syspage.h> // cpuinfo
+#endif
+#include <ctype.h>
+#include <limits.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
#include <algorithm>
-#include <cctype>
-#include <climits>
-#include <cstdio>
-#include <cstdlib>
-#include <cstring>
-#include "checks.h"
+#include "src/checks.h"
#if V8_OS_WIN
-#include "win32-headers.h"
+#include "src/base/win32-headers.h"
#endif
namespace v8 {
@@ -51,8 +31,8 @@ namespace internal {
#if V8_HOST_ARCH_IA32 || V8_HOST_ARCH_X64
-// Define __cpuid() for non-MSVC compilers.
-#if !V8_CC_MSVC
+// Define __cpuid() for non-MSVC libraries.
+#if !V8_LIBC_MSVCRT
static V8_INLINE void __cpuid(int cpu_info[4], int info_type) {
#if defined(__i386__) && defined(__pic__)
@@ -74,9 +54,11 @@ static V8_INLINE void __cpuid(int cpu_info[4], int info_type) {
#endif // defined(__i386__) && defined(__pic__)
}
-#endif // !V8_CC_MSVC
+#endif // !V8_LIBC_MSVCRT
+
+#elif V8_HOST_ARCH_ARM || V8_HOST_ARCH_ARM64 || V8_HOST_ARCH_MIPS
-#elif V8_HOST_ARCH_ARM || V8_HOST_ARCH_MIPS
+#if V8_OS_LINUX
#if V8_HOST_ARCH_ARM
@@ -224,6 +206,7 @@ class CPUInfo V8_FINAL BASE_EMBEDDED {
size_t datalen_;
};
+#if V8_HOST_ARCH_ARM || V8_HOST_ARCH_MIPS
// Checks that a space-separated list of items contains one given 'item'.
static bool HasListItem(const char* list, const char* item) {
@@ -249,6 +232,10 @@ static bool HasListItem(const char* list, const char* item) {
return false;
}
+#endif // V8_HOST_ARCH_ARM || V8_HOST_ARCH_MIPS
+
+#endif // V8_OS_LINUX
+
#endif // V8_HOST_ARCH_IA32 || V8_HOST_ARCH_X64
CPU::CPU() : stepping_(0),
@@ -272,7 +259,7 @@ CPU::CPU() : stepping_(0),
has_sse42_(false),
has_idiva_(false),
has_neon_(false),
- has_thumbee_(false),
+ has_thumb2_(false),
has_vfp_(false),
has_vfp3_(false),
has_vfp3_d32_(false) {
@@ -313,6 +300,10 @@ CPU::CPU() : stepping_(0),
has_sse42_ = (cpu_info[2] & 0x00100000) != 0;
}
+#if V8_HOST_ARCH_IA32
+ // SAHF is always available in compat/legacy mode,
+ has_sahf_ = true;
+#else
// Query extended IDs.
__cpuid(cpu_info, 0x80000000);
unsigned num_ext_ids = cpu_info[0];
@@ -320,15 +311,15 @@ CPU::CPU() : stepping_(0),
// Interpret extended CPU feature information.
if (num_ext_ids > 0x80000000) {
__cpuid(cpu_info, 0x80000001);
- // SAHF is always available in compat/legacy mode,
- // but must be probed in long mode.
-#if V8_HOST_ARCH_IA32
- has_sahf_ = true;
-#else
+ // SAHF must be probed in long mode.
has_sahf_ = (cpu_info[2] & 0x00000001) != 0;
-#endif
}
+#endif
+
#elif V8_HOST_ARCH_ARM
+
+#if V8_OS_LINUX
+
CPUInfo cpu_info;
// Extract implementor from the "CPU implementer" field.
@@ -392,7 +383,6 @@ CPU::CPU() : stepping_(0),
if (hwcaps != 0) {
has_idiva_ = (hwcaps & HWCAP_IDIVA) != 0;
has_neon_ = (hwcaps & HWCAP_NEON) != 0;
- has_thumbee_ = (hwcaps & HWCAP_THUMBEE) != 0;
has_vfp_ = (hwcaps & HWCAP_VFP) != 0;
has_vfp3_ = (hwcaps & (HWCAP_VFPv3 | HWCAP_VFPv3D16 | HWCAP_VFPv4)) != 0;
has_vfp3_d32_ = (has_vfp3_ && ((hwcaps & HWCAP_VFPv3D16) == 0 ||
@@ -402,13 +392,13 @@ CPU::CPU() : stepping_(0),
char* features = cpu_info.ExtractField("Features");
has_idiva_ = HasListItem(features, "idiva");
has_neon_ = HasListItem(features, "neon");
- has_thumbee_ = HasListItem(features, "thumbee");
+ has_thumb2_ = HasListItem(features, "thumb2");
has_vfp_ = HasListItem(features, "vfp");
- if (HasListItem(features, "vfpv3")) {
+ if (HasListItem(features, "vfpv3d16")) {
has_vfp3_ = true;
- has_vfp3_d32_ = true;
- } else if (HasListItem(features, "vfpv3d16")) {
+ } else if (HasListItem(features, "vfpv3")) {
has_vfp3_ = true;
+ has_vfp3_d32_ = true;
}
delete[] features;
}
@@ -426,19 +416,46 @@ CPU::CPU() : stepping_(0),
architecture_ = 7;
}
- // ARMv7 implies ThumbEE.
+ // ARMv7 implies Thumb2.
if (architecture_ >= 7) {
- has_thumbee_ = true;
+ has_thumb2_ = true;
}
- // The earliest architecture with ThumbEE is ARMv6T2.
- if (has_thumbee_ && architecture_ < 6) {
+ // The earliest architecture with Thumb2 is ARMv6T2.
+ if (has_thumb2_ && architecture_ < 6) {
architecture_ = 6;
}
// We don't support any FPUs other than VFP.
has_fpu_ = has_vfp_;
+
+#elif V8_OS_QNX
+
+ uint32_t cpu_flags = SYSPAGE_ENTRY(cpuinfo)->flags;
+ if (cpu_flags & ARM_CPU_FLAG_V7) {
+ architecture_ = 7;
+ has_thumb2_ = true;
+ } else if (cpu_flags & ARM_CPU_FLAG_V6) {
+ architecture_ = 6;
+ // QNX doesn't say if Thumb2 is available.
+ // Assume false for the architectures older than ARMv7.
+ }
+ ASSERT(architecture_ >= 6);
+ has_fpu_ = (cpu_flags & CPU_FLAG_FPU) != 0;
+ has_vfp_ = has_fpu_;
+ if (cpu_flags & ARM_CPU_FLAG_NEON) {
+ has_neon_ = true;
+ has_vfp3_ = has_vfp_;
+#ifdef ARM_CPU_FLAG_VFP_D32
+ has_vfp3_d32_ = (cpu_flags & ARM_CPU_FLAG_VFP_D32) != 0;
+#endif
+ }
+ has_idiva_ = (cpu_flags & ARM_CPU_FLAG_IDIV) != 0;
+
+#endif // V8_OS_LINUX
+
#elif V8_HOST_ARCH_MIPS
+
// Simple detection of FPU at runtime for Linux.
// It is based on /proc/cpuinfo, which reveals hardware configuration
// to user-space applications. According to MIPS (early 2010), no similar
@@ -448,18 +465,33 @@ CPU::CPU() : stepping_(0),
char* cpu_model = cpu_info.ExtractField("cpu model");
has_fpu_ = HasListItem(cpu_model, "FPU");
delete[] cpu_model;
-#endif
-}
+#elif V8_HOST_ARCH_ARM64
+
+ CPUInfo cpu_info;
+
+ // Extract implementor from the "CPU implementer" field.
+ char* implementer = cpu_info.ExtractField("CPU implementer");
+ if (implementer != NULL) {
+ char* end ;
+ implementer_ = strtol(implementer, &end, 0);
+ if (end == implementer) {
+ implementer_ = 0;
+ }
+ delete[] implementer;
+ }
+
+ // Extract part number from the "CPU part" field.
+ char* part = cpu_info.ExtractField("CPU part");
+ if (part != NULL) {
+ char* end ;
+ part_ = strtol(part, &end, 0);
+ if (end == part) {
+ part_ = 0;
+ }
+ delete[] part;
+ }
-// static
-int CPU::NumberOfProcessorsOnline() {
-#if V8_OS_WIN
- SYSTEM_INFO info;
- GetSystemInfo(&info);
- return info.dwNumberOfProcessors;
-#else
- return static_cast<int>(sysconf(_SC_NPROCESSORS_ONLN));
#endif
}
diff --git a/chromium/v8/src/cpu.h b/chromium/v8/src/cpu.h
index b2e9f7da7ee..ac8ee982a44 100644
--- a/chromium/v8/src/cpu.h
+++ b/chromium/v8/src/cpu.h
@@ -1,29 +1,6 @@
// Copyright 2006-2013 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
// This module contains the architecture-specific code. This make the rest of
// the code less dependent on differences between different processor
@@ -36,7 +13,7 @@
#ifndef V8_CPU_H_
#define V8_CPU_H_
-#include "allocation.h"
+#include "src/allocation.h"
namespace v8 {
namespace internal {
@@ -67,6 +44,7 @@ class CPU V8_FINAL BASE_EMBEDDED {
// arm implementer/part information
int implementer() const { return implementer_; }
static const int ARM = 0x41;
+ static const int NVIDIA = 0x4e;
static const int QUALCOMM = 0x51;
int architecture() const { return architecture_; }
int part() const { return part_; }
@@ -94,19 +72,11 @@ class CPU V8_FINAL BASE_EMBEDDED {
// arm features
bool has_idiva() const { return has_idiva_; }
bool has_neon() const { return has_neon_; }
- bool has_thumbee() const { return has_thumbee_; }
+ bool has_thumb2() const { return has_thumb2_; }
bool has_vfp() const { return has_vfp_; }
bool has_vfp3() const { return has_vfp3_; }
bool has_vfp3_d32() const { return has_vfp3_d32_; }
- // Returns the number of processors online.
- static int NumberOfProcessorsOnline();
-
- // Initializes the cpu architecture support. Called once at VM startup.
- static void SetUp();
-
- static bool SupportsCrankshaft();
-
// Flush instruction cache.
static void FlushICache(void* start, size_t size);
@@ -133,7 +103,7 @@ class CPU V8_FINAL BASE_EMBEDDED {
bool has_sse42_;
bool has_idiva_;
bool has_neon_;
- bool has_thumbee_;
+ bool has_thumb2_;
bool has_vfp_;
bool has_vfp3_;
bool has_vfp3_d32_;
diff --git a/chromium/v8/src/d8-debug.cc b/chromium/v8/src/d8-debug.cc
index 6c297d73969..71e006c48e6 100644
--- a/chromium/v8/src/d8-debug.cc
+++ b/chromium/v8/src/d8-debug.cc
@@ -1,54 +1,19 @@
// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifdef ENABLE_DEBUGGER_SUPPORT
-
-#include "d8.h"
-#include "d8-debug.h"
-#include "debug-agent.h"
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+#include "src/d8.h"
+#include "src/d8-debug.h"
namespace v8 {
-static bool was_running = true;
-
void PrintPrompt(bool is_running) {
const char* prompt = is_running? "> " : "dbg> ";
- was_running = is_running;
printf("%s", prompt);
fflush(stdout);
}
-void PrintPrompt() {
- PrintPrompt(was_running);
-}
-
-
void HandleDebugEvent(const Debug::EventDetails& event_details) {
// TODO(svenpanne) There should be a way to retrieve this in the callback.
Isolate* isolate = Isolate::GetCurrent();
@@ -164,208 +129,4 @@ void HandleDebugEvent(const Debug::EventDetails& event_details) {
}
}
-
-void RunRemoteDebugger(Isolate* isolate, int port) {
- RemoteDebugger debugger(isolate, port);
- debugger.Run();
-}
-
-
-void RemoteDebugger::Run() {
- bool ok;
-
- // Connect to the debugger agent.
- conn_ = new i::Socket;
- static const int kPortStrSize = 6;
- char port_str[kPortStrSize];
- i::OS::SNPrintF(i::Vector<char>(port_str, kPortStrSize), "%d", port_);
- ok = conn_->Connect("localhost", port_str);
- if (!ok) {
- printf("Unable to connect to debug agent %d\n", i::Socket::GetLastError());
- return;
- }
-
- // Start the receiver thread.
- ReceiverThread receiver(this);
- receiver.Start();
-
- // Start the keyboard thread.
- KeyboardThread keyboard(this);
- keyboard.Start();
- PrintPrompt();
-
- // Process events received from debugged VM and from the keyboard.
- bool terminate = false;
- while (!terminate) {
- event_available_.Wait();
- RemoteDebuggerEvent* event = GetEvent();
- switch (event->type()) {
- case RemoteDebuggerEvent::kMessage:
- HandleMessageReceived(event->data());
- break;
- case RemoteDebuggerEvent::kKeyboard:
- HandleKeyboardCommand(event->data());
- break;
- case RemoteDebuggerEvent::kDisconnect:
- terminate = true;
- break;
-
- default:
- UNREACHABLE();
- }
- delete event;
- }
-
- // Wait for the receiver thread to end.
- receiver.Join();
-}
-
-
-void RemoteDebugger::MessageReceived(i::SmartArrayPointer<char> message) {
- RemoteDebuggerEvent* event =
- new RemoteDebuggerEvent(RemoteDebuggerEvent::kMessage, message);
- AddEvent(event);
-}
-
-
-void RemoteDebugger::KeyboardCommand(i::SmartArrayPointer<char> command) {
- RemoteDebuggerEvent* event =
- new RemoteDebuggerEvent(RemoteDebuggerEvent::kKeyboard, command);
- AddEvent(event);
-}
-
-
-void RemoteDebugger::ConnectionClosed() {
- RemoteDebuggerEvent* event =
- new RemoteDebuggerEvent(RemoteDebuggerEvent::kDisconnect,
- i::SmartArrayPointer<char>());
- AddEvent(event);
-}
-
-
-void RemoteDebugger::AddEvent(RemoteDebuggerEvent* event) {
- i::LockGuard<i::Mutex> lock_guard(&event_access_);
- if (head_ == NULL) {
- ASSERT(tail_ == NULL);
- head_ = event;
- tail_ = event;
- } else {
- ASSERT(tail_ != NULL);
- tail_->set_next(event);
- tail_ = event;
- }
- event_available_.Signal();
-}
-
-
-RemoteDebuggerEvent* RemoteDebugger::GetEvent() {
- i::LockGuard<i::Mutex> lock_guard(&event_access_);
- ASSERT(head_ != NULL);
- RemoteDebuggerEvent* result = head_;
- head_ = head_->next();
- if (head_ == NULL) {
- ASSERT(tail_ == result);
- tail_ = NULL;
- }
- return result;
-}
-
-
-void RemoteDebugger::HandleMessageReceived(char* message) {
- Locker lock(isolate_);
- HandleScope scope(isolate_);
-
- // Print the event details.
- TryCatch try_catch;
- Handle<Object> details = Shell::DebugMessageDetails(
- isolate_, Handle<String>::Cast(String::NewFromUtf8(isolate_, message)));
- if (try_catch.HasCaught()) {
- Shell::ReportException(isolate_, &try_catch);
- PrintPrompt();
- return;
- }
- String::Utf8Value str(details->Get(String::NewFromUtf8(isolate_, "text")));
- if (str.length() == 0) {
- // Empty string is used to signal not to process this event.
- return;
- }
- if (*str != NULL) {
- printf("%s\n", *str);
- } else {
- printf("???\n");
- }
-
- bool is_running = details->Get(String::NewFromUtf8(isolate_, "running"))
- ->ToBoolean()
- ->Value();
- PrintPrompt(is_running);
-}
-
-
-void RemoteDebugger::HandleKeyboardCommand(char* command) {
- Locker lock(isolate_);
- HandleScope scope(isolate_);
-
- // Convert the debugger command to a JSON debugger request.
- TryCatch try_catch;
- Handle<Value> request = Shell::DebugCommandToJSONRequest(
- isolate_, String::NewFromUtf8(isolate_, command));
- if (try_catch.HasCaught()) {
- Shell::ReportException(isolate_, &try_catch);
- PrintPrompt();
- return;
- }
-
- // If undefined is returned the command was handled internally and there is
- // no JSON to send.
- if (request->IsUndefined()) {
- PrintPrompt();
- return;
- }
-
- // Send the JSON debugger request.
- i::DebuggerAgentUtil::SendMessage(conn_, Handle<String>::Cast(request));
-}
-
-
-void ReceiverThread::Run() {
- // Receive the connect message (with empty body).
- i::SmartArrayPointer<char> message =
- i::DebuggerAgentUtil::ReceiveMessage(remote_debugger_->conn());
- ASSERT(*message == NULL);
-
- while (true) {
- // Receive a message.
- i::SmartArrayPointer<char> message =
- i::DebuggerAgentUtil::ReceiveMessage(remote_debugger_->conn());
- if (*message == NULL) {
- remote_debugger_->ConnectionClosed();
- return;
- }
-
- // Pass the message to the main thread.
- remote_debugger_->MessageReceived(message);
- }
-}
-
-
-void KeyboardThread::Run() {
- static const int kBufferSize = 256;
- while (true) {
- // read keyboard input.
- char command[kBufferSize];
- char* str = fgets(command, kBufferSize, stdin);
- if (str == NULL) {
- break;
- }
-
- // Pass the keyboard command to the main thread.
- remote_debugger_->KeyboardCommand(
- i::SmartArrayPointer<char>(i::StrDup(command)));
- }
-}
-
-
} // namespace v8
-
-#endif // ENABLE_DEBUGGER_SUPPORT
diff --git a/chromium/v8/src/d8-debug.h b/chromium/v8/src/d8-debug.h
index 55876229a32..1a693cc86d2 100644
--- a/chromium/v8/src/d8-debug.h
+++ b/chromium/v8/src/d8-debug.h
@@ -1,154 +1,19 @@
// Copyright 2008 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
#ifndef V8_D8_DEBUG_H_
#define V8_D8_DEBUG_H_
-#include "d8.h"
-#include "debug.h"
+#include "src/d8.h"
+#include "src/debug.h"
namespace v8 {
-
void HandleDebugEvent(const Debug::EventDetails& event_details);
-// Start the remove debugger connecting to a V8 debugger agent on the specified
-// port.
-void RunRemoteDebugger(Isolate* isolate, int port);
-
-// Forward declerations.
-class RemoteDebuggerEvent;
-class ReceiverThread;
-
-
-// Remote debugging class.
-class RemoteDebugger {
- public:
- explicit RemoteDebugger(Isolate* isolate, int port)
- : isolate_(isolate),
- port_(port),
- event_available_(0),
- head_(NULL), tail_(NULL) {}
- void Run();
-
- // Handle events from the subordinate threads.
- void MessageReceived(i::SmartArrayPointer<char> message);
- void KeyboardCommand(i::SmartArrayPointer<char> command);
- void ConnectionClosed();
-
- private:
- // Add new debugger event to the list.
- void AddEvent(RemoteDebuggerEvent* event);
- // Read next debugger event from the list.
- RemoteDebuggerEvent* GetEvent();
-
- // Handle a message from the debugged V8.
- void HandleMessageReceived(char* message);
- // Handle a keyboard command.
- void HandleKeyboardCommand(char* command);
-
- // Get connection to agent in debugged V8.
- i::Socket* conn() { return conn_; }
-
- Isolate* isolate_;
- int port_; // Port used to connect to debugger V8.
- i::Socket* conn_; // Connection to debugger agent in debugged V8.
-
- // Linked list of events from debugged V8 and from keyboard input. Access to
- // the list is guarded by a mutex and a semaphore signals new items in the
- // list.
- i::Mutex event_access_;
- i::Semaphore event_available_;
- RemoteDebuggerEvent* head_;
- RemoteDebuggerEvent* tail_;
-
- friend class ReceiverThread;
-};
-
-
-// Thread reading from debugged V8 instance.
-class ReceiverThread: public i::Thread {
- public:
- explicit ReceiverThread(RemoteDebugger* remote_debugger)
- : Thread("d8:ReceiverThrd"),
- remote_debugger_(remote_debugger) {}
- ~ReceiverThread() {}
-
- void Run();
-
- private:
- RemoteDebugger* remote_debugger_;
-};
-
-
-// Thread reading keyboard input.
-class KeyboardThread: public i::Thread {
- public:
- explicit KeyboardThread(RemoteDebugger* remote_debugger)
- : Thread("d8:KeyboardThrd"),
- remote_debugger_(remote_debugger) {}
- ~KeyboardThread() {}
-
- void Run();
-
- private:
- RemoteDebugger* remote_debugger_;
-};
-
-
-// Events processed by the main deubgger thread.
-class RemoteDebuggerEvent {
- public:
- RemoteDebuggerEvent(int type, i::SmartArrayPointer<char> data)
- : type_(type), data_(data), next_(NULL) {
- ASSERT(type == kMessage || type == kKeyboard || type == kDisconnect);
- }
-
- static const int kMessage = 1;
- static const int kKeyboard = 2;
- static const int kDisconnect = 3;
-
- int type() { return type_; }
- char* data() { return *data_; }
-
- private:
- void set_next(RemoteDebuggerEvent* event) { next_ = event; }
- RemoteDebuggerEvent* next() { return next_; }
-
- int type_;
- i::SmartArrayPointer<char> data_;
- RemoteDebuggerEvent* next_;
-
- friend class RemoteDebugger;
-};
-
-
} // namespace v8
diff --git a/chromium/v8/src/d8-posix.cc b/chromium/v8/src/d8-posix.cc
index 25f79a4be6b..8851ce85f99 100644
--- a/chromium/v8/src/d8-posix.cc
+++ b/chromium/v8/src/d8-posix.cc
@@ -1,29 +1,6 @@
// Copyright 2009 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
#include <stdlib.h>
@@ -38,9 +15,9 @@
#include <signal.h>
-#include "d8.h"
-#include "d8-debug.h"
-#include "debug.h"
+#include "src/d8.h"
+#include "src/d8-debug.h"
+#include "src/debug.h"
namespace v8 {
@@ -106,7 +83,7 @@ static int LengthWithoutIncompleteUtf8(char* buffer, int len) {
static bool WaitOnFD(int fd,
int read_timeout,
int total_timeout,
- struct timeval& start_time) {
+ const struct timeval& start_time) {
fd_set readfds, writefds, exceptfds;
struct timeval timeout;
int gone = 0;
@@ -202,7 +179,7 @@ class ExecArgs {
exec_args_[0] = c_arg;
int i = 1;
for (unsigned j = 0; j < command_args->Length(); i++, j++) {
- Handle<Value> arg(command_args->Get(Integer::New(j)));
+ Handle<Value> arg(command_args->Get(Integer::New(isolate, j)));
String::Utf8Value utf8_arg(arg);
if (*utf8_arg == NULL) {
exec_args_[i] = NULL; // Consistent state for destructor.
@@ -229,8 +206,8 @@ class ExecArgs {
}
}
static const unsigned kMaxArgs = 1000;
- char** arg_array() { return exec_args_; }
- char* arg0() { return exec_args_[0]; }
+ char* const* arg_array() const { return exec_args_; }
+ const char* arg0() const { return exec_args_[0]; }
private:
char* exec_args_[kMaxArgs + 1];
@@ -272,7 +249,7 @@ static const int kWriteFD = 1;
// It only returns if an error occurred.
static void ExecSubprocess(int* exec_error_fds,
int* stdout_fds,
- ExecArgs& exec_args) {
+ const ExecArgs& exec_args) {
close(exec_error_fds[kReadFD]); // Don't need this in the child.
close(stdout_fds[kReadFD]); // Don't need this in the child.
close(1); // Close stdout.
@@ -311,10 +288,10 @@ static bool ChildLaunchedOK(Isolate* isolate, int* exec_error_fds) {
// succeeded or false if an exception was thrown.
static Handle<Value> GetStdout(Isolate* isolate,
int child_fd,
- struct timeval& start_time,
+ const struct timeval& start_time,
int read_timeout,
int total_timeout) {
- Handle<String> accumulator = String::Empty();
+ Handle<String> accumulator = String::Empty(isolate);
int fullness = 0;
static const int kStdoutReadBufferSize = 4096;
@@ -383,8 +360,8 @@ static Handle<Value> GetStdout(Isolate* isolate,
// Get exit status of child.
static bool WaitForChild(Isolate* isolate,
int pid,
- ZombieProtector& child_waiter,
- struct timeval& start_time,
+ ZombieProtector& child_waiter, // NOLINT
+ const struct timeval& start_time,
int read_timeout,
int total_timeout) {
#ifdef HAS_WAITID
@@ -723,19 +700,19 @@ void Shell::UnsetEnvironment(const v8::FunctionCallbackInfo<v8::Value>& args) {
void Shell::AddOSMethods(Isolate* isolate, Handle<ObjectTemplate> os_templ) {
os_templ->Set(String::NewFromUtf8(isolate, "system"),
- FunctionTemplate::New(System));
+ FunctionTemplate::New(isolate, System));
os_templ->Set(String::NewFromUtf8(isolate, "chdir"),
- FunctionTemplate::New(ChangeDirectory));
+ FunctionTemplate::New(isolate, ChangeDirectory));
os_templ->Set(String::NewFromUtf8(isolate, "setenv"),
- FunctionTemplate::New(SetEnvironment));
+ FunctionTemplate::New(isolate, SetEnvironment));
os_templ->Set(String::NewFromUtf8(isolate, "unsetenv"),
- FunctionTemplate::New(UnsetEnvironment));
+ FunctionTemplate::New(isolate, UnsetEnvironment));
os_templ->Set(String::NewFromUtf8(isolate, "umask"),
- FunctionTemplate::New(SetUMask));
+ FunctionTemplate::New(isolate, SetUMask));
os_templ->Set(String::NewFromUtf8(isolate, "mkdirp"),
- FunctionTemplate::New(MakeDirectory));
+ FunctionTemplate::New(isolate, MakeDirectory));
os_templ->Set(String::NewFromUtf8(isolate, "rmdir"),
- FunctionTemplate::New(RemoveDirectory));
+ FunctionTemplate::New(isolate, RemoveDirectory));
}
} // namespace v8
diff --git a/chromium/v8/src/d8-readline.cc b/chromium/v8/src/d8-readline.cc
index 15b13617279..39c93d35de5 100644
--- a/chromium/v8/src/d8-readline.cc
+++ b/chromium/v8/src/d8-readline.cc
@@ -1,31 +1,8 @@
// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include <cstdio> // NOLINT
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <stdio.h> // NOLINT
#include <string.h> // NOLINT
#include <readline/readline.h> // NOLINT
#include <readline/history.h> // NOLINT
@@ -33,7 +10,7 @@
// The readline includes leaves RETURN defined which breaks V8 compilation.
#undef RETURN
-#include "d8.h"
+#include "src/d8.h"
// There are incompatibilities between different versions and different
// implementations of readline. This smooths out one known incompatibility.
@@ -105,10 +82,7 @@ bool ReadLineEditor::Close() {
Handle<String> ReadLineEditor::Prompt(const char* prompt) {
char* result = NULL;
- { // Release lock for blocking input.
- Unlocker unlock(Isolate::GetCurrent());
- result = readline(prompt);
- }
+ result = readline(prompt);
if (result == NULL) return Handle<String>();
AddHistory(result);
return String::NewFromUtf8(isolate_, result);
@@ -146,7 +120,6 @@ char* ReadLineEditor::CompletionGenerator(const char* text, int state) {
static unsigned current_index;
static Persistent<Array> current_completions;
Isolate* isolate = read_line_editor.isolate_;
- Locker lock(isolate);
HandleScope scope(isolate);
Handle<Array> completions;
if (state == 0) {
@@ -163,7 +136,7 @@ char* ReadLineEditor::CompletionGenerator(const char* text, int state) {
completions = Local<Array>::New(isolate, current_completions);
}
if (current_index < completions->Length()) {
- Handle<Integer> index = Integer::New(current_index);
+ Handle<Integer> index = Integer::New(isolate, current_index);
Handle<Value> str_obj = completions->Get(index);
current_index++;
String::Utf8Value str(str_obj);
diff --git a/chromium/v8/src/d8-windows.cc b/chromium/v8/src/d8-windows.cc
index edf5085d498..b519407a980 100644
--- a/chromium/v8/src/d8-windows.cc
+++ b/chromium/v8/src/d8-windows.cc
@@ -1,35 +1,12 @@
// Copyright 2009 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-
-#include "d8.h"
-#include "d8-debug.h"
-#include "debug.h"
-#include "api.h"
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+
+#include "src/d8.h"
+#include "src/d8-debug.h"
+#include "src/debug.h"
+#include "src/api.h"
namespace v8 {
diff --git a/chromium/v8/src/d8.cc b/chromium/v8/src/d8.cc
index 7c5df463d33..661307f0dfe 100644
--- a/chromium/v8/src/d8.cc
+++ b/chromium/v8/src/d8.cc
@@ -1,29 +1,6 @@
// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
// Defined when linking against shared lib on Windows.
@@ -49,25 +26,25 @@
#endif // !V8_SHARED
#ifdef V8_SHARED
-#include "../include/v8-testing.h"
+#include "include/v8-testing.h"
#endif // V8_SHARED
#ifdef ENABLE_VTUNE_JIT_INTERFACE
-#include "third_party/vtune/v8-vtune.h"
+#include "src/third_party/vtune/v8-vtune.h"
#endif
-#include "d8.h"
+#include "src/d8.h"
#ifndef V8_SHARED
-#include "api.h"
-#include "checks.h"
-#include "cpu.h"
-#include "d8-debug.h"
-#include "debug.h"
-#include "natives.h"
-#include "platform.h"
-#include "v8.h"
-#endif // V8_SHARED
+#include "src/api.h"
+#include "src/checks.h"
+#include "src/cpu.h"
+#include "src/d8-debug.h"
+#include "src/debug.h"
+#include "src/natives.h"
+#include "src/platform.h"
+#include "src/v8.h"
+#endif // !V8_SHARED
#if !defined(_WIN32) && !defined(_WIN64)
#include <unistd.h> // NOLINT
@@ -119,6 +96,8 @@ class PerIsolateData {
Persistent<Context>* realms_;
Persistent<Value> realm_shared_;
+ int RealmIndexOrThrow(const v8::FunctionCallbackInfo<v8::Value>& args,
+ int arg_offset);
int RealmFind(Handle<Context> context);
};
@@ -161,23 +140,22 @@ CounterCollection* Shell::counters_ = &local_counters_;
i::Mutex Shell::context_mutex_;
const i::TimeTicks Shell::kInitialTicks = i::TimeTicks::HighResolutionNow();
Persistent<Context> Shell::utility_context_;
-#endif // V8_SHARED
+#endif // !V8_SHARED
Persistent<Context> Shell::evaluation_context_;
ShellOptions Shell::options;
const char* Shell::kPrompt = "d8> ";
+#ifndef V8_SHARED
const int MB = 1024 * 1024;
-
-#ifndef V8_SHARED
bool CounterMap::Match(void* key1, void* key2) {
const char* name1 = reinterpret_cast<const char*>(key1);
const char* name2 = reinterpret_cast<const char*>(key2);
return strcmp(name1, name2) == 0;
}
-#endif // V8_SHARED
+#endif // !V8_SHARED
// Converts a V8 value to a C string.
@@ -192,11 +170,11 @@ bool Shell::ExecuteString(Isolate* isolate,
Handle<Value> name,
bool print_result,
bool report_exceptions) {
-#if !defined(V8_SHARED) && defined(ENABLE_DEBUGGER_SUPPORT)
+#ifndef V8_SHARED
bool FLAG_debugger = i::FLAG_debugger;
#else
bool FLAG_debugger = false;
-#endif // !V8_SHARED && ENABLE_DEBUGGER_SUPPORT
+#endif // !V8_SHARED
HandleScope handle_scope(isolate);
TryCatch try_catch;
options.script_executed = true;
@@ -204,7 +182,10 @@ bool Shell::ExecuteString(Isolate* isolate,
// When debugging make exceptions appear to be uncaught.
try_catch.SetVerbose(true);
}
- Handle<Script> script = Script::New(source, name);
+ ScriptOrigin origin(name);
+ ScriptCompiler::Source script_source(source, origin);
+ Handle<UnboundScript> script =
+ ScriptCompiler::CompileUnbound(isolate, &script_source);
if (script.IsEmpty()) {
// Print errors that happened during compilation.
if (report_exceptions && !FLAG_debugger)
@@ -215,7 +196,7 @@ bool Shell::ExecuteString(Isolate* isolate,
Local<Context> realm =
Local<Context>::New(isolate, data->realms_[data->realm_current_]);
realm->Enter();
- Handle<Value> result = script->Run();
+ Handle<Value> result = script->BindToCurrentContext()->Run();
realm->Exit();
data->realm_current_ = data->realm_switch_;
if (result.IsEmpty()) {
@@ -289,13 +270,31 @@ int PerIsolateData::RealmFind(Handle<Context> context) {
}
+int PerIsolateData::RealmIndexOrThrow(
+ const v8::FunctionCallbackInfo<v8::Value>& args,
+ int arg_offset) {
+ if (args.Length() < arg_offset || !args[arg_offset]->IsNumber()) {
+ Throw(args.GetIsolate(), "Invalid argument");
+ return -1;
+ }
+ int index = args[arg_offset]->Int32Value();
+ if (index < 0 ||
+ index >= realm_count_ ||
+ realms_[index].IsEmpty()) {
+ Throw(args.GetIsolate(), "Invalid realm index");
+ return -1;
+ }
+ return index;
+}
+
+
#ifndef V8_SHARED
// performance.now() returns a time stamp as double, measured in milliseconds.
void Shell::PerformanceNow(const v8::FunctionCallbackInfo<v8::Value>& args) {
i::TimeDelta delta = i::TimeTicks::HighResolutionNow() - kInitialTicks;
args.GetReturnValue().Set(delta.InMillisecondsF());
}
-#endif // V8_SHARED
+#endif // !V8_SHARED
// Realm.current() returns the index of the currently active realm.
@@ -326,15 +325,8 @@ void Shell::RealmOwner(const v8::FunctionCallbackInfo<v8::Value>& args) {
// (Note that properties of global objects cannot be read/written cross-realm.)
void Shell::RealmGlobal(const v8::FunctionCallbackInfo<v8::Value>& args) {
PerIsolateData* data = PerIsolateData::Get(args.GetIsolate());
- if (args.Length() < 1 || !args[0]->IsNumber()) {
- Throw(args.GetIsolate(), "Invalid argument");
- return;
- }
- int index = args[0]->Uint32Value();
- if (index >= data->realm_count_ || data->realms_[index].IsEmpty()) {
- Throw(args.GetIsolate(), "Invalid realm index");
- return;
- }
+ int index = data->RealmIndexOrThrow(args, 0);
+ if (index == -1) return;
args.GetReturnValue().Set(
Local<Context>::New(args.GetIsolate(), data->realms_[index])->Global());
}
@@ -362,13 +354,9 @@ void Shell::RealmCreate(const v8::FunctionCallbackInfo<v8::Value>& args) {
void Shell::RealmDispose(const v8::FunctionCallbackInfo<v8::Value>& args) {
Isolate* isolate = args.GetIsolate();
PerIsolateData* data = PerIsolateData::Get(isolate);
- if (args.Length() < 1 || !args[0]->IsNumber()) {
- Throw(args.GetIsolate(), "Invalid argument");
- return;
- }
- int index = args[0]->Uint32Value();
- if (index >= data->realm_count_ || data->realms_[index].IsEmpty() ||
- index == 0 ||
+ int index = data->RealmIndexOrThrow(args, 0);
+ if (index == -1) return;
+ if (index == 0 ||
index == data->realm_current_ || index == data->realm_switch_) {
Throw(args.GetIsolate(), "Invalid realm index");
return;
@@ -381,15 +369,8 @@ void Shell::RealmDispose(const v8::FunctionCallbackInfo<v8::Value>& args) {
void Shell::RealmSwitch(const v8::FunctionCallbackInfo<v8::Value>& args) {
Isolate* isolate = args.GetIsolate();
PerIsolateData* data = PerIsolateData::Get(isolate);
- if (args.Length() < 1 || !args[0]->IsNumber()) {
- Throw(args.GetIsolate(), "Invalid argument");
- return;
- }
- int index = args[0]->Uint32Value();
- if (index >= data->realm_count_ || data->realms_[index].IsEmpty()) {
- Throw(args.GetIsolate(), "Invalid realm index");
- return;
- }
+ int index = data->RealmIndexOrThrow(args, 0);
+ if (index == -1) return;
data->realm_switch_ = index;
}
@@ -398,20 +379,19 @@ void Shell::RealmSwitch(const v8::FunctionCallbackInfo<v8::Value>& args) {
void Shell::RealmEval(const v8::FunctionCallbackInfo<v8::Value>& args) {
Isolate* isolate = args.GetIsolate();
PerIsolateData* data = PerIsolateData::Get(isolate);
- if (args.Length() < 2 || !args[0]->IsNumber() || !args[1]->IsString()) {
+ int index = data->RealmIndexOrThrow(args, 0);
+ if (index == -1) return;
+ if (args.Length() < 2 || !args[1]->IsString()) {
Throw(args.GetIsolate(), "Invalid argument");
return;
}
- int index = args[0]->Uint32Value();
- if (index >= data->realm_count_ || data->realms_[index].IsEmpty()) {
- Throw(args.GetIsolate(), "Invalid realm index");
- return;
- }
- Handle<Script> script = Script::New(args[1]->ToString());
+ ScriptCompiler::Source script_source(args[1]->ToString());
+ Handle<UnboundScript> script = ScriptCompiler::CompileUnbound(
+ isolate, &script_source);
if (script.IsEmpty()) return;
Local<Context> realm = Local<Context>::New(isolate, data->realms_[index]);
realm->Enter();
- Handle<Value> result = script->Run();
+ Handle<Value> result = script->BindToCurrentContext()->Run();
realm->Exit();
args.GetReturnValue().Set(result);
}
@@ -492,10 +472,7 @@ Handle<String> Shell::ReadFromStdin(Isolate* isolate) {
// not been fully read into the buffer yet (does not end with '\n').
// If fgets gets an error, just give up.
char* input = NULL;
- { // Release lock for blocking input.
- Unlocker unlock(isolate);
- input = fgets(buffer, kBufferSize, stdin);
- }
+ input = fgets(buffer, kBufferSize, stdin);
if (input == NULL) return Handle<String>();
length = static_cast<int>(strlen(buffer));
if (length == 0) {
@@ -558,14 +535,14 @@ void Shell::Version(const v8::FunctionCallbackInfo<v8::Value>& args) {
void Shell::ReportException(Isolate* isolate, v8::TryCatch* try_catch) {
HandleScope handle_scope(isolate);
-#if !defined(V8_SHARED) && defined(ENABLE_DEBUGGER_SUPPORT)
+#ifndef V8_SHARED
Handle<Context> utility_context;
bool enter_context = !isolate->InContext();
if (enter_context) {
utility_context = Local<Context>::New(isolate, utility_context_);
utility_context->Enter();
}
-#endif // !V8_SHARED && ENABLE_DEBUGGER_SUPPORT
+#endif // !V8_SHARED
v8::String::Utf8Value exception(try_catch->Exception());
const char* exception_string = ToCString(exception);
Handle<Message> message = try_catch->Message();
@@ -600,9 +577,9 @@ void Shell::ReportException(Isolate* isolate, v8::TryCatch* try_catch) {
}
}
printf("\n");
-#if !defined(V8_SHARED) && defined(ENABLE_DEBUGGER_SUPPORT)
+#ifndef V8_SHARED
if (enter_context) utility_context->Exit();
-#endif // !V8_SHARED && ENABLE_DEBUGGER_SUPPORT
+#endif // !V8_SHARED
}
@@ -610,26 +587,25 @@ void Shell::ReportException(Isolate* isolate, v8::TryCatch* try_catch) {
Handle<Array> Shell::GetCompletions(Isolate* isolate,
Handle<String> text,
Handle<String> full) {
- HandleScope handle_scope(isolate);
+ EscapableHandleScope handle_scope(isolate);
v8::Local<v8::Context> utility_context =
v8::Local<v8::Context>::New(isolate, utility_context_);
v8::Context::Scope context_scope(utility_context);
Handle<Object> global = utility_context->Global();
- Handle<Value> fun =
+ Local<Value> fun =
global->Get(String::NewFromUtf8(isolate, "GetCompletions"));
static const int kArgc = 3;
v8::Local<v8::Context> evaluation_context =
v8::Local<v8::Context>::New(isolate, evaluation_context_);
Handle<Value> argv[kArgc] = { evaluation_context->Global(), text, full };
- Handle<Value> val = Handle<Function>::Cast(fun)->Call(global, kArgc, argv);
- return handle_scope.Close(Handle<Array>::Cast(val));
+ Local<Value> val = Local<Function>::Cast(fun)->Call(global, kArgc, argv);
+ return handle_scope.Escape(Local<Array>::Cast(val));
}
-#ifdef ENABLE_DEBUGGER_SUPPORT
-Handle<Object> Shell::DebugMessageDetails(Isolate* isolate,
- Handle<String> message) {
- HandleScope handle_scope(isolate);
+Local<Object> Shell::DebugMessageDetails(Isolate* isolate,
+ Handle<String> message) {
+ EscapableHandleScope handle_scope(isolate);
v8::Local<v8::Context> context =
v8::Local<v8::Context>::New(isolate, utility_context_);
v8::Context::Scope context_scope(context);
@@ -639,13 +615,13 @@ Handle<Object> Shell::DebugMessageDetails(Isolate* isolate,
static const int kArgc = 1;
Handle<Value> argv[kArgc] = { message };
Handle<Value> val = Handle<Function>::Cast(fun)->Call(global, kArgc, argv);
- return Handle<Object>::Cast(val);
+ return handle_scope.Escape(Local<Object>(Handle<Object>::Cast(val)));
}
-Handle<Value> Shell::DebugCommandToJSONRequest(Isolate* isolate,
- Handle<String> command) {
- HandleScope handle_scope(isolate);
+Local<Value> Shell::DebugCommandToJSONRequest(Isolate* isolate,
+ Handle<String> command) {
+ EscapableHandleScope handle_scope(isolate);
v8::Local<v8::Context> context =
v8::Local<v8::Context>::New(isolate, utility_context_);
v8::Context::Scope context_scope(context);
@@ -655,23 +631,10 @@ Handle<Value> Shell::DebugCommandToJSONRequest(Isolate* isolate,
static const int kArgc = 1;
Handle<Value> argv[kArgc] = { command };
Handle<Value> val = Handle<Function>::Cast(fun)->Call(global, kArgc, argv);
- return val;
+ return handle_scope.Escape(Local<Value>(val));
}
-void Shell::DispatchDebugMessages() {
- Isolate* isolate = v8::Isolate::GetCurrent();
- HandleScope handle_scope(isolate);
- v8::Local<v8::Context> context =
- v8::Local<v8::Context>::New(isolate, Shell::evaluation_context_);
- v8::Context::Scope context_scope(context);
- v8::Debug::ProcessDebugMessages();
-}
-#endif // ENABLE_DEBUGGER_SUPPORT
-#endif // V8_SHARED
-
-
-#ifndef V8_SHARED
int32_t* Counter::Bind(const char* name, bool is_histogram) {
int i;
for (i = 0; i < kMaxNameSize - 1 && name[i]; i++)
@@ -702,7 +665,7 @@ Counter* CounterCollection::GetNextCounter() {
}
-void Shell::MapCounters(const char* name) {
+void Shell::MapCounters(v8::Isolate* isolate, const char* name) {
counters_file_ = i::OS::MemoryMappedFile::create(
name, sizeof(CounterCollection), &local_counters_);
void* memory = (counters_file_ == NULL) ?
@@ -712,9 +675,9 @@ void Shell::MapCounters(const char* name) {
Exit(1);
}
counters_ = static_cast<CounterCollection*>(memory);
- V8::SetCounterFunction(LookupCounter);
- V8::SetCreateHistogramFunction(CreateHistogram);
- V8::SetAddHistogramSampleFunction(AddHistogramSample);
+ isolate->SetCounterFunction(LookupCounter);
+ isolate->SetCreateHistogramFunction(CreateHistogram);
+ isolate->SetAddHistogramSampleFunction(AddHistogramSample);
}
@@ -771,7 +734,6 @@ void Shell::AddHistogramSample(void* histogram, int sample) {
void Shell::InstallUtilityScript(Isolate* isolate) {
- Locker lock(isolate);
HandleScope scope(isolate);
// If we use the utility context, we have to set the security tokens so that
// utility, evaluation and debug context can all access each other.
@@ -783,18 +745,17 @@ void Shell::InstallUtilityScript(Isolate* isolate) {
evaluation_context->SetSecurityToken(Undefined(isolate));
v8::Context::Scope context_scope(utility_context);
-#ifdef ENABLE_DEBUGGER_SUPPORT
if (i::FLAG_debugger) printf("JavaScript debugger enabled\n");
// Install the debugger object in the utility scope
i::Debug* debug = reinterpret_cast<i::Isolate*>(isolate)->debug();
debug->Load();
+ i::Handle<i::Context> debug_context = debug->debug_context();
i::Handle<i::JSObject> js_debug
- = i::Handle<i::JSObject>(debug->debug_context()->global_object());
+ = i::Handle<i::JSObject>(debug_context->global_object());
utility_context->Global()->Set(String::NewFromUtf8(isolate, "$debug"),
Utils::ToLocal(js_debug));
- debug->debug_context()->set_security_token(
+ debug_context->set_security_token(
reinterpret_cast<i::Isolate*>(isolate)->heap()->undefined_value());
-#endif // ENABLE_DEBUGGER_SUPPORT
// Run the d8 shell utility script in the utility context
int source_index = i::NativesCollection<i::D8>::GetIndex("d8");
@@ -808,7 +769,8 @@ void Shell::InstallUtilityScript(Isolate* isolate) {
Handle<String> name =
String::NewFromUtf8(isolate, shell_source_name.start(),
String::kNormalString, shell_source_name.length());
- Handle<Script> script = Script::Compile(source, name);
+ ScriptOrigin origin(name);
+ Handle<Script> script = Script::Compile(source, &origin);
script->Run();
// Mark the d8 shell script as native to avoid it showing up as normal source
// in the debugger.
@@ -820,14 +782,10 @@ void Shell::InstallUtilityScript(Isolate* isolate) {
i::SharedFunctionInfo::cast(*compiled_script)->script()));
script_object->set_type(i::Smi::FromInt(i::Script::TYPE_NATIVE));
-#ifdef ENABLE_DEBUGGER_SUPPORT
// Start the in-process debugger if requested.
- if (i::FLAG_debugger && !i::FLAG_debugger_agent) {
- v8::Debug::SetDebugEventListener2(HandleDebugEvent);
- }
-#endif // ENABLE_DEBUGGER_SUPPORT
+ if (i::FLAG_debugger) v8::Debug::SetDebugEventListener(HandleDebugEvent);
}
-#endif // V8_SHARED
+#endif // !V8_SHARED
#ifdef COMPRESS_STARTUP_DATA_BZ2
@@ -859,57 +817,57 @@ class BZip2Decompressor : public v8::StartupDataDecompressor {
Handle<ObjectTemplate> Shell::CreateGlobalTemplate(Isolate* isolate) {
- Handle<ObjectTemplate> global_template = ObjectTemplate::New();
+ Handle<ObjectTemplate> global_template = ObjectTemplate::New(isolate);
global_template->Set(String::NewFromUtf8(isolate, "print"),
- FunctionTemplate::New(Print));
+ FunctionTemplate::New(isolate, Print));
global_template->Set(String::NewFromUtf8(isolate, "write"),
- FunctionTemplate::New(Write));
+ FunctionTemplate::New(isolate, Write));
global_template->Set(String::NewFromUtf8(isolate, "read"),
- FunctionTemplate::New(Read));
+ FunctionTemplate::New(isolate, Read));
global_template->Set(String::NewFromUtf8(isolate, "readbuffer"),
- FunctionTemplate::New(ReadBuffer));
+ FunctionTemplate::New(isolate, ReadBuffer));
global_template->Set(String::NewFromUtf8(isolate, "readline"),
- FunctionTemplate::New(ReadLine));
+ FunctionTemplate::New(isolate, ReadLine));
global_template->Set(String::NewFromUtf8(isolate, "load"),
- FunctionTemplate::New(Load));
+ FunctionTemplate::New(isolate, Load));
global_template->Set(String::NewFromUtf8(isolate, "quit"),
- FunctionTemplate::New(Quit));
+ FunctionTemplate::New(isolate, Quit));
global_template->Set(String::NewFromUtf8(isolate, "version"),
- FunctionTemplate::New(Version));
+ FunctionTemplate::New(isolate, Version));
// Bind the Realm object.
- Handle<ObjectTemplate> realm_template = ObjectTemplate::New();
+ Handle<ObjectTemplate> realm_template = ObjectTemplate::New(isolate);
realm_template->Set(String::NewFromUtf8(isolate, "current"),
- FunctionTemplate::New(RealmCurrent));
+ FunctionTemplate::New(isolate, RealmCurrent));
realm_template->Set(String::NewFromUtf8(isolate, "owner"),
- FunctionTemplate::New(RealmOwner));
+ FunctionTemplate::New(isolate, RealmOwner));
realm_template->Set(String::NewFromUtf8(isolate, "global"),
- FunctionTemplate::New(RealmGlobal));
+ FunctionTemplate::New(isolate, RealmGlobal));
realm_template->Set(String::NewFromUtf8(isolate, "create"),
- FunctionTemplate::New(RealmCreate));
+ FunctionTemplate::New(isolate, RealmCreate));
realm_template->Set(String::NewFromUtf8(isolate, "dispose"),
- FunctionTemplate::New(RealmDispose));
+ FunctionTemplate::New(isolate, RealmDispose));
realm_template->Set(String::NewFromUtf8(isolate, "switch"),
- FunctionTemplate::New(RealmSwitch));
+ FunctionTemplate::New(isolate, RealmSwitch));
realm_template->Set(String::NewFromUtf8(isolate, "eval"),
- FunctionTemplate::New(RealmEval));
+ FunctionTemplate::New(isolate, RealmEval));
realm_template->SetAccessor(String::NewFromUtf8(isolate, "shared"),
RealmSharedGet, RealmSharedSet);
global_template->Set(String::NewFromUtf8(isolate, "Realm"), realm_template);
#ifndef V8_SHARED
- Handle<ObjectTemplate> performance_template = ObjectTemplate::New();
+ Handle<ObjectTemplate> performance_template = ObjectTemplate::New(isolate);
performance_template->Set(String::NewFromUtf8(isolate, "now"),
- FunctionTemplate::New(PerformanceNow));
+ FunctionTemplate::New(isolate, PerformanceNow));
global_template->Set(String::NewFromUtf8(isolate, "performance"),
performance_template);
-#endif // V8_SHARED
+#endif // !V8_SHARED
#if !defined(V8_SHARED) && !defined(_WIN32) && !defined(_WIN64)
- Handle<ObjectTemplate> os_templ = ObjectTemplate::New();
+ Handle<ObjectTemplate> os_templ = ObjectTemplate::New(isolate);
AddOSMethods(isolate, os_templ);
global_template->Set(String::NewFromUtf8(isolate, "os"), os_templ);
-#endif // V8_SHARED
+#endif // !V8_SHARED && !_WIN32 && !_WIN64
return global_template;
}
@@ -929,33 +887,24 @@ void Shell::Initialize(Isolate* isolate) {
Shell::counter_map_ = new CounterMap();
// Set up counters
if (i::StrLength(i::FLAG_map_counters) != 0)
- MapCounters(i::FLAG_map_counters);
+ MapCounters(isolate, i::FLAG_map_counters);
if (i::FLAG_dump_counters || i::FLAG_track_gc_object_stats) {
V8::SetCounterFunction(LookupCounter);
V8::SetCreateHistogramFunction(CreateHistogram);
V8::SetAddHistogramSampleFunction(AddHistogramSample);
}
-#endif // V8_SHARED
+#endif // !V8_SHARED
}
void Shell::InitializeDebugger(Isolate* isolate) {
if (options.test_shell) return;
#ifndef V8_SHARED
- Locker lock(isolate);
HandleScope scope(isolate);
Handle<ObjectTemplate> global_template = CreateGlobalTemplate(isolate);
utility_context_.Reset(isolate,
Context::New(isolate, NULL, global_template));
-
-#ifdef ENABLE_DEBUGGER_SUPPORT
- // Start the debugger agent if requested.
- if (i::FLAG_debugger_agent) {
- v8::Debug::EnableAgent("d8 shell", i::FLAG_debugger_port, true);
- v8::Debug::SetDebugMessageDispatchHandler(DispatchDebugMessages, true);
- }
-#endif // ENABLE_DEBUGGER_SUPPORT
-#endif // V8_SHARED
+#endif // !V8_SHARED
}
@@ -963,10 +912,10 @@ Local<Context> Shell::CreateEvaluationContext(Isolate* isolate) {
#ifndef V8_SHARED
// This needs to be a critical section since this is not thread-safe
i::LockGuard<i::Mutex> lock_guard(&context_mutex_);
-#endif // V8_SHARED
+#endif // !V8_SHARED
// Initialize the global objects
Handle<ObjectTemplate> global_template = CreateGlobalTemplate(isolate);
- HandleScope handle_scope(isolate);
+ EscapableHandleScope handle_scope(isolate);
Local<Context> context = Context::New(isolate, NULL, global_template);
ASSERT(!context.IsEmpty());
Context::Scope scope(context);
@@ -978,15 +927,15 @@ Local<Context> Shell::CreateEvaluationContext(Isolate* isolate) {
factory->NewFixedArray(js_args.argc);
for (int j = 0; j < js_args.argc; j++) {
i::Handle<i::String> arg =
- factory->NewStringFromUtf8(i::CStrVector(js_args[j]));
+ factory->NewStringFromUtf8(i::CStrVector(js_args[j])).ToHandleChecked();
arguments_array->set(j, *arg);
}
i::Handle<i::JSArray> arguments_jsarray =
factory->NewJSArrayWithElements(arguments_array);
context->Global()->Set(String::NewFromUtf8(isolate, "arguments"),
Utils::ToLocal(arguments_jsarray));
-#endif // V8_SHARED
- return handle_scope.Close(context);
+#endif // !V8_SHARED
+ return handle_scope.Escape(context);
}
@@ -1009,7 +958,7 @@ struct CounterAndKey {
inline bool operator<(const CounterAndKey& lhs, const CounterAndKey& rhs) {
return strcmp(lhs.key, rhs.key) < 0;
}
-#endif // V8_SHARED
+#endif // !V8_SHARED
void Shell::OnExit() {
@@ -1050,7 +999,7 @@ void Shell::OnExit() {
}
delete counters_file_;
delete counter_map_;
-#endif // V8_SHARED
+#endif // !V8_SHARED
}
@@ -1077,8 +1026,6 @@ static FILE* FOpen(const char* path, const char* mode) {
static char* ReadChars(Isolate* isolate, const char* name, int* size_out) {
- // Release the V8 lock while reading files.
- v8::Unlocker unlocker(isolate);
FILE* file = FOpen(name, "rb");
if (file == NULL) return NULL;
@@ -1097,16 +1044,22 @@ static char* ReadChars(Isolate* isolate, const char* name, int* size_out) {
return chars;
}
-static void ReadBufferWeakCallback(v8::Isolate* isolate,
- Persistent<ArrayBuffer>* array_buffer,
- uint8_t* data) {
- size_t byte_length =
- Local<ArrayBuffer>::New(isolate, *array_buffer)->ByteLength();
- isolate->AdjustAmountOfExternalAllocatedMemory(
+
+struct DataAndPersistent {
+ uint8_t* data;
+ Persistent<ArrayBuffer> handle;
+};
+
+
+static void ReadBufferWeakCallback(
+ const v8::WeakCallbackData<ArrayBuffer, DataAndPersistent>& data) {
+ size_t byte_length = data.GetValue()->ByteLength();
+ data.GetIsolate()->AdjustAmountOfExternalAllocatedMemory(
-static_cast<intptr_t>(byte_length));
- delete[] data;
- array_buffer->Reset();
+ delete[] data.GetParameter()->data;
+ data.GetParameter()->handle.Reset();
+ delete data.GetParameter();
}
@@ -1120,45 +1073,25 @@ void Shell::ReadBuffer(const v8::FunctionCallbackInfo<v8::Value>& args) {
}
Isolate* isolate = args.GetIsolate();
- uint8_t* data = reinterpret_cast<uint8_t*>(
+ DataAndPersistent* data = new DataAndPersistent;
+ data->data = reinterpret_cast<uint8_t*>(
ReadChars(args.GetIsolate(), *filename, &length));
- if (data == NULL) {
+ if (data->data == NULL) {
+ delete data;
Throw(args.GetIsolate(), "Error reading file");
return;
}
- Handle<v8::ArrayBuffer> buffer = ArrayBuffer::New(isolate, data, length);
- v8::Persistent<v8::ArrayBuffer> weak_handle(isolate, buffer);
- weak_handle.MakeWeak(data, ReadBufferWeakCallback);
- weak_handle.MarkIndependent();
+ Handle<v8::ArrayBuffer> buffer =
+ ArrayBuffer::New(isolate, data->data, length);
+ data->handle.Reset(isolate, buffer);
+ data->handle.SetWeak(data, ReadBufferWeakCallback);
+ data->handle.MarkIndependent();
isolate->AdjustAmountOfExternalAllocatedMemory(length);
args.GetReturnValue().Set(buffer);
}
-#ifndef V8_SHARED
-static char* ReadToken(char* data, char token) {
- char* next = i::OS::StrChr(data, token);
- if (next != NULL) {
- *next = '\0';
- return (next + 1);
- }
-
- return NULL;
-}
-
-
-static char* ReadLine(char* data) {
- return ReadToken(data, '\n');
-}
-
-
-static char* ReadWord(char* data) {
- return ReadToken(data, ' ');
-}
-#endif // V8_SHARED
-
-
// Reads a file into a v8 string.
Handle<String> Shell::ReadFile(Isolate* isolate, const char* name) {
int size = 0;
@@ -1172,7 +1105,6 @@ Handle<String> Shell::ReadFile(Isolate* isolate, const char* name) {
void Shell::RunShell(Isolate* isolate) {
- Locker locker(isolate);
HandleScope outer_scope(isolate);
v8::Local<v8::Context> context =
v8::Local<v8::Context>::New(isolate, evaluation_context_);
@@ -1192,76 +1124,11 @@ void Shell::RunShell(Isolate* isolate) {
}
-#ifndef V8_SHARED
-class ShellThread : public i::Thread {
- public:
- // Takes ownership of the underlying char array of |files|.
- ShellThread(Isolate* isolate, char* files)
- : Thread("d8:ShellThread"),
- isolate_(isolate), files_(files) { }
-
- ~ShellThread() {
- delete[] files_;
- }
-
- virtual void Run();
- private:
- Isolate* isolate_;
- char* files_;
-};
-
-
-void ShellThread::Run() {
- char* ptr = files_;
- while ((ptr != NULL) && (*ptr != '\0')) {
- // For each newline-separated line.
- char* next_line = ReadLine(ptr);
-
- if (*ptr == '#') {
- // Skip comment lines.
- ptr = next_line;
- continue;
- }
-
- // Prepare the context for this thread.
- Locker locker(isolate_);
- HandleScope outer_scope(isolate_);
- Local<Context> thread_context =
- Shell::CreateEvaluationContext(isolate_);
- Context::Scope context_scope(thread_context);
- PerIsolateData::RealmScope realm_scope(PerIsolateData::Get(isolate_));
-
- while ((ptr != NULL) && (*ptr != '\0')) {
- HandleScope inner_scope(isolate_);
- char* filename = ptr;
- ptr = ReadWord(ptr);
-
- // Skip empty strings.
- if (strlen(filename) == 0) {
- continue;
- }
-
- Handle<String> str = Shell::ReadFile(isolate_, filename);
- if (str.IsEmpty()) {
- printf("File '%s' not found\n", filename);
- Shell::Exit(1);
- }
-
- Shell::ExecuteString(
- isolate_, str, String::NewFromUtf8(isolate_, filename), false, false);
- }
-
- ptr = next_line;
- }
-}
-#endif // V8_SHARED
-
-
SourceGroup::~SourceGroup() {
#ifndef V8_SHARED
delete thread_;
thread_ = NULL;
-#endif // V8_SHARED
+#endif // !V8_SHARED
}
@@ -1329,7 +1196,6 @@ void SourceGroup::ExecuteInThread() {
next_semaphore_.Wait();
{
Isolate::Scope iscope(isolate);
- Locker lock(isolate);
{
HandleScope scope(isolate);
PerIsolateData data(isolate);
@@ -1345,9 +1211,16 @@ void SourceGroup::ExecuteInThread() {
V8::ContextDisposedNotification();
V8::IdleNotification(kLongIdlePauseInMs);
}
+ if (Shell::options.invoke_weak_callbacks) {
+ // By sending a low memory notifications, we will try hard to collect
+ // all garbage and will therefore also invoke all weak callbacks of
+ // actually unreachable persistent handles.
+ V8::LowMemoryNotification();
+ }
}
done_semaphore_.Signal();
} while (!Shell::options.last_run);
+
isolate->Dispose();
}
@@ -1369,10 +1242,16 @@ void SourceGroup::WaitForThread() {
done_semaphore_.Wait();
}
}
-#endif // V8_SHARED
+#endif // !V8_SHARED
+
+
+void SetFlagsFromString(const char* flags) {
+ v8::V8::SetFlagsFromString(flags, static_cast<int>(strlen(flags)));
+}
bool Shell::SetOptions(int argc, char* argv[]) {
+ bool logfile_per_isolate = false;
for (int i = 0; i < argc; i++) {
if (strcmp(argv[i], "--stress-opt") == 0) {
options.stress_opt = true;
@@ -1390,6 +1269,9 @@ bool Shell::SetOptions(int argc, char* argv[]) {
// No support for stressing if we can't use --always-opt.
options.stress_opt = false;
options.stress_deopt = false;
+ } else if (strcmp(argv[i], "--logfile-per-isolate") == 0) {
+ logfile_per_isolate = true;
+ argv[i] = NULL;
} else if (strcmp(argv[i], "--shell") == 0) {
options.interactive_shell = true;
argv[i] = NULL;
@@ -1399,6 +1281,11 @@ bool Shell::SetOptions(int argc, char* argv[]) {
} else if (strcmp(argv[i], "--send-idle-notification") == 0) {
options.send_idle_notification = true;
argv[i] = NULL;
+ } else if (strcmp(argv[i], "--invoke-weak-callbacks") == 0) {
+ options.invoke_weak_callbacks = true;
+ // TODO(jochen) See issue 3351
+ options.send_idle_notification = true;
+ argv[i] = NULL;
} else if (strcmp(argv[i], "-f") == 0) {
// Ignore any -f flags for compatibility with other stand-alone
// JavaScript engines.
@@ -1409,13 +1296,6 @@ bool Shell::SetOptions(int argc, char* argv[]) {
return false;
#endif // V8_SHARED
options.num_isolates++;
- } else if (strcmp(argv[i], "-p") == 0) {
-#ifdef V8_SHARED
- printf("D8 with shared library does not support multi-threading\n");
- return false;
-#else
- options.num_parallel_files++;
-#endif // V8_SHARED
} else if (strcmp(argv[i], "--dump-heap-constants") == 0) {
#ifdef V8_SHARED
printf("D8 with shared library does not support constant dumping\n");
@@ -1423,45 +1303,23 @@ bool Shell::SetOptions(int argc, char* argv[]) {
#else
options.dump_heap_constants = true;
argv[i] = NULL;
-#endif
+#endif // V8_SHARED
} else if (strcmp(argv[i], "--throws") == 0) {
options.expected_to_throw = true;
argv[i] = NULL;
- }
+ } else if (strncmp(argv[i], "--icu-data-file=", 16) == 0) {
+ options.icu_data_file = argv[i] + 16;
+ argv[i] = NULL;
#ifdef V8_SHARED
- else if (strcmp(argv[i], "--dump-counters") == 0) {
+ } else if (strcmp(argv[i], "--dump-counters") == 0) {
printf("D8 with shared library does not include counters\n");
return false;
} else if (strcmp(argv[i], "--debugger") == 0) {
printf("Javascript debugger not included\n");
return false;
- }
#endif // V8_SHARED
- }
-
-#ifndef V8_SHARED
- // Run parallel threads if we are not using --isolate
- options.parallel_files = new char*[options.num_parallel_files];
- int parallel_files_set = 0;
- for (int i = 1; i < argc; i++) {
- if (argv[i] == NULL) continue;
- if (strcmp(argv[i], "-p") == 0 && i + 1 < argc) {
- if (options.num_isolates > 1) {
- printf("-p is not compatible with --isolate\n");
- return false;
- }
- argv[i] = NULL;
- i++;
- options.parallel_files[parallel_files_set] = argv[i];
- parallel_files_set++;
- argv[i] = NULL;
}
}
- if (parallel_files_set != options.num_parallel_files) {
- printf("-p requires a file containing a list of files as parameter\n");
- return false;
- }
-#endif // V8_SHARED
v8::V8::SetFlagsFromCommandLine(&argc, argv, true);
@@ -1481,93 +1339,61 @@ bool Shell::SetOptions(int argc, char* argv[]) {
}
current->End(argc);
+ if (!logfile_per_isolate && options.num_isolates) {
+ SetFlagsFromString("--nologfile_per_isolate");
+ }
+
return true;
}
int Shell::RunMain(Isolate* isolate, int argc, char* argv[]) {
#ifndef V8_SHARED
- i::List<i::Thread*> threads(1);
- if (options.parallel_files != NULL) {
- for (int i = 0; i < options.num_parallel_files; i++) {
- char* files = NULL;
- { Locker lock(isolate);
- int size = 0;
- files = ReadChars(isolate, options.parallel_files[i], &size);
- }
- if (files == NULL) {
- printf("File list '%s' not found\n", options.parallel_files[i]);
- Exit(1);
- }
- ShellThread* thread = new ShellThread(isolate, files);
- thread->Start();
- threads.Add(thread);
- }
- }
for (int i = 1; i < options.num_isolates; ++i) {
options.isolate_sources[i].StartExecuteInThread();
}
-#endif // V8_SHARED
- { // NOLINT
- Locker lock(isolate);
- {
- HandleScope scope(isolate);
- Local<Context> context = CreateEvaluationContext(isolate);
- if (options.last_run) {
- // Keep using the same context in the interactive shell.
- evaluation_context_.Reset(isolate, context);
-#if !defined(V8_SHARED) && defined(ENABLE_DEBUGGER_SUPPORT)
- // If the interactive debugger is enabled make sure to activate
- // it before running the files passed on the command line.
- if (i::FLAG_debugger) {
- InstallUtilityScript(isolate);
- }
-#endif // !V8_SHARED && ENABLE_DEBUGGER_SUPPORT
- }
- {
- Context::Scope cscope(context);
- PerIsolateData::RealmScope realm_scope(PerIsolateData::Get(isolate));
- options.isolate_sources[0].Execute(isolate);
+#endif // !V8_SHARED
+ {
+ HandleScope scope(isolate);
+ Local<Context> context = CreateEvaluationContext(isolate);
+ if (options.last_run && options.use_interactive_shell()) {
+ // Keep using the same context in the interactive shell.
+ evaluation_context_.Reset(isolate, context);
+#ifndef V8_SHARED
+ // If the interactive debugger is enabled make sure to activate
+ // it before running the files passed on the command line.
+ if (i::FLAG_debugger) {
+ InstallUtilityScript(isolate);
}
+#endif // !V8_SHARED
}
- if (!options.last_run) {
- if (options.send_idle_notification) {
- const int kLongIdlePauseInMs = 1000;
- V8::ContextDisposedNotification();
- V8::IdleNotification(kLongIdlePauseInMs);
- }
+ {
+ Context::Scope cscope(context);
+ PerIsolateData::RealmScope realm_scope(PerIsolateData::Get(isolate));
+ options.isolate_sources[0].Execute(isolate);
}
}
+ if (options.send_idle_notification) {
+ const int kLongIdlePauseInMs = 1000;
+ V8::ContextDisposedNotification();
+ V8::IdleNotification(kLongIdlePauseInMs);
+ }
+ if (options.invoke_weak_callbacks) {
+ // By sending a low memory notifications, we will try hard to collect all
+ // garbage and will therefore also invoke all weak callbacks of actually
+ // unreachable persistent handles.
+ V8::LowMemoryNotification();
+ }
#ifndef V8_SHARED
for (int i = 1; i < options.num_isolates; ++i) {
options.isolate_sources[i].WaitForThread();
}
-
- for (int i = 0; i < threads.length(); i++) {
- i::Thread* thread = threads[i];
- thread->Join();
- delete thread;
- }
-#endif // V8_SHARED
+#endif // !V8_SHARED
return 0;
}
-#ifdef V8_SHARED
-static void SetStandaloneFlagsViaCommandLine() {
- int fake_argc = 2;
- char **fake_argv = new char*[2];
- fake_argv[0] = NULL;
- fake_argv[1] = strdup("--trace-hydrogen-file=hydrogen.cfg");
- fake_argv[2] = strdup("--redirect-code-traces-to=code.asm");
- v8::V8::SetFlagsFromCommandLine(&fake_argc, fake_argv, false);
- free(fake_argv[1]);
- delete[] fake_argv;
-}
-#endif
-
-
#ifndef V8_SHARED
static void DumpHeapConstants(i::Isolate* isolate) {
i::Heap* heap = isolate->heap();
@@ -1622,26 +1448,17 @@ static void DumpHeapConstants(i::Isolate* isolate) {
printf("}\n");
#undef ROOT_LIST_CASE
}
-#endif // V8_SHARED
+#endif // !V8_SHARED
class ShellArrayBufferAllocator : public v8::ArrayBuffer::Allocator {
public:
virtual void* Allocate(size_t length) {
- void* result = malloc(length);
- memset(result, 0, length);
- return result;
- }
- virtual void* AllocateUninitialized(size_t length) {
- return malloc(length);
+ void* data = AllocateUninitialized(length);
+ return data == NULL ? data : memset(data, 0, length);
}
+ virtual void* AllocateUninitialized(size_t length) { return malloc(length); }
virtual void Free(void* data, size_t) { free(data); }
- // TODO(dslomov): Remove when v8:2823 is fixed.
- virtual void Free(void* data) {
-#ifndef V8_SHARED
- UNREACHABLE();
-#endif
- }
};
@@ -1653,20 +1470,17 @@ class MockArrayBufferAllocator : public v8::ArrayBuffer::Allocator {
virtual void* AllocateUninitialized(size_t length) V8_OVERRIDE {
return malloc(0);
}
- virtual void Free(void*, size_t) V8_OVERRIDE {
+ virtual void Free(void* p, size_t) V8_OVERRIDE {
+ free(p);
}
};
int Shell::Main(int argc, char* argv[]) {
if (!SetOptions(argc, argv)) return 1;
- v8::V8::InitializeICU();
-#ifndef V8_SHARED
- i::FLAG_trace_hydrogen_file = "hydrogen.cfg";
- i::FLAG_redirect_code_traces_to = "code.asm";
-#else
- SetStandaloneFlagsViaCommandLine();
-#endif
+ v8::V8::InitializeICU(options.icu_data_file);
+ SetFlagsFromString("--trace-hydrogen-file=hydrogen.cfg");
+ SetFlagsFromString("--redirect-code-traces-to=code.asm");
ShellArrayBufferAllocator array_buffer_allocator;
MockArrayBufferAllocator mock_arraybuffer_allocator;
if (options.mock_arraybuffer_allocator) {
@@ -1675,15 +1489,17 @@ int Shell::Main(int argc, char* argv[]) {
v8::V8::SetArrayBufferAllocator(&array_buffer_allocator);
}
int result = 0;
- Isolate* isolate = Isolate::GetCurrent();
+ Isolate* isolate = Isolate::New();
#ifndef V8_SHARED
v8::ResourceConstraints constraints;
constraints.ConfigureDefaults(i::OS::TotalPhysicalMemory(),
- i::CPU::NumberOfProcessorsOnline());
+ i::OS::MaxVirtualMemory(),
+ i::OS::NumberOfProcessorsOnline());
v8::SetResourceConstraints(isolate, &constraints);
#endif
DumbLineEditor dumb_line_editor(isolate);
{
+ Isolate::Scope scope(isolate);
Initialize(isolate);
#ifdef ENABLE_VTUNE_JIT_INTERFACE
vTune::InitializeVtuneForV8();
@@ -1724,29 +1540,18 @@ int Shell::Main(int argc, char* argv[]) {
result = RunMain(isolate, argc, argv);
}
-
-#if !defined(V8_SHARED) && defined(ENABLE_DEBUGGER_SUPPORT)
- // Run remote debugger if requested, but never on --test
- if (i::FLAG_remote_debugger && !options.test_shell) {
- InstallUtilityScript(isolate);
- RunRemoteDebugger(isolate, i::FLAG_debugger_port);
- return 0;
- }
-#endif // !V8_SHARED && ENABLE_DEBUGGER_SUPPORT
-
// Run interactive shell if explicitly requested or if no script has been
// executed, but never on --test
-
- if (( options.interactive_shell || !options.script_executed )
- && !options.test_shell ) {
-#if !defined(V8_SHARED) && defined(ENABLE_DEBUGGER_SUPPORT)
+ if (options.use_interactive_shell()) {
+#ifndef V8_SHARED
if (!i::FLAG_debugger) {
InstallUtilityScript(isolate);
}
-#endif // !V8_SHARED && ENABLE_DEBUGGER_SUPPORT
+#endif // !V8_SHARED
RunShell(isolate);
}
}
+ isolate->Dispose();
V8::Dispose();
OnExit();
diff --git a/chromium/v8/src/d8.gyp b/chromium/v8/src/d8.gyp
index 097abc04652..b353eb01369 100644
--- a/chromium/v8/src/d8.gyp
+++ b/chromium/v8/src/d8.gyp
@@ -32,6 +32,7 @@
# Enable support for Intel VTune. Supported on ia32/x64 only
'v8_enable_vtunejit%': 0,
'v8_enable_i18n_support%': 1,
+ 'v8_toolset_for_d8%': 'target',
},
'includes': ['../build/toolchain.gypi', '../build/features.gypi'],
'targets': [
@@ -43,12 +44,15 @@
],
# Generated source files need this explicitly:
'include_dirs+': [
- '../src',
+ '..',
],
'sources': [
'd8.cc',
],
'conditions': [
+ [ 'want_separate_host_toolset==1', {
+ 'toolsets': [ '<(v8_toolset_for_d8)', ],
+ }],
[ 'console=="readline"', {
'libraries': [ '-lreadline', ],
'sources': [ 'd8-readline.cc' ],
@@ -66,7 +70,8 @@
],
}],
['(OS=="linux" or OS=="mac" or OS=="freebsd" or OS=="netbsd" \
- or OS=="openbsd" or OS=="solaris" or OS=="android")', {
+ or OS=="openbsd" or OS=="solaris" or OS=="android" \
+ or OS=="qnx")', {
'sources': [ 'd8-posix.cc', ]
}],
[ 'OS=="win"', {
diff --git a/chromium/v8/src/d8.h b/chromium/v8/src/d8.h
index 39352000fd9..143eabb8abc 100644
--- a/chromium/v8/src/d8.h
+++ b/chromium/v8/src/d8.h
@@ -1,41 +1,18 @@
// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
#ifndef V8_D8_H_
#define V8_D8_H_
#ifndef V8_SHARED
-#include "allocation.h"
-#include "hashmap.h"
-#include "smart-pointers.h"
-#include "v8.h"
+#include "src/allocation.h"
+#include "src/hashmap.h"
+#include "src/smart-pointers.h"
+#include "src/v8.h"
#else
-#include "../include/v8.h"
-#endif // V8_SHARED
+#include "include/v8.h"
+#endif // !V8_SHARED
namespace v8 {
@@ -113,7 +90,7 @@ class CounterMap {
static bool Match(void* key1, void* key2);
i::HashMap hash_map_;
};
-#endif // V8_SHARED
+#endif // !V8_SHARED
class LineEditor {
@@ -143,7 +120,7 @@ class SourceGroup {
next_semaphore_(0),
done_semaphore_(0),
thread_(NULL),
-#endif // V8_SHARED
+#endif // !V8_SHARED
argv_(NULL),
begin_offset_(0),
end_offset_(0) {}
@@ -183,7 +160,7 @@ class SourceGroup {
i::Semaphore next_semaphore_;
i::Semaphore done_semaphore_;
i::Thread* thread_;
-#endif // V8_SHARED
+#endif // !V8_SHARED
void ExitShell(int exit_code);
Handle<String> ReadFile(Isolate* isolate, const char* name);
@@ -218,13 +195,10 @@ class BinaryResource : public v8::String::ExternalAsciiStringResource {
class ShellOptions {
public:
ShellOptions() :
-#ifndef V8_SHARED
- num_parallel_files(0),
- parallel_files(NULL),
-#endif // V8_SHARED
script_executed(false),
last_run(true),
send_idle_notification(false),
+ invoke_weak_callbacks(false),
stress_opt(false),
stress_deopt(false),
interactive_shell(false),
@@ -233,22 +207,21 @@ class ShellOptions {
expected_to_throw(false),
mock_arraybuffer_allocator(false),
num_isolates(1),
- isolate_sources(NULL) { }
+ isolate_sources(NULL),
+ icu_data_file(NULL) { }
~ShellOptions() {
-#ifndef V8_SHARED
- delete[] parallel_files;
-#endif // V8_SHARED
delete[] isolate_sources;
}
-#ifndef V8_SHARED
- int num_parallel_files;
- char** parallel_files;
-#endif // V8_SHARED
+ bool use_interactive_shell() {
+ return (interactive_shell || !script_executed) && !test_shell;
+ }
+
bool script_executed;
bool last_run;
bool send_idle_notification;
+ bool invoke_weak_callbacks;
bool stress_opt;
bool stress_deopt;
bool interactive_shell;
@@ -258,6 +231,7 @@ class ShellOptions {
bool mock_arraybuffer_allocator;
int num_isolates;
SourceGroup* isolate_sources;
+ const char* icu_data_file;
};
#ifdef V8_SHARED
@@ -291,18 +265,15 @@ class Shell : public i::AllStatic {
int max,
size_t buckets);
static void AddHistogramSample(void* histogram, int sample);
- static void MapCounters(const char* name);
+ static void MapCounters(v8::Isolate* isolate, const char* name);
-#ifdef ENABLE_DEBUGGER_SUPPORT
- static Handle<Object> DebugMessageDetails(Isolate* isolate,
- Handle<String> message);
- static Handle<Value> DebugCommandToJSONRequest(Isolate* isolate,
- Handle<String> command);
- static void DispatchDebugMessages();
-#endif // ENABLE_DEBUGGER_SUPPORT
+ static Local<Object> DebugMessageDetails(Isolate* isolate,
+ Handle<String> message);
+ static Local<Value> DebugCommandToJSONRequest(Isolate* isolate,
+ Handle<String> command);
static void PerformanceNow(const v8::FunctionCallbackInfo<v8::Value>& args);
-#endif // V8_SHARED
+#endif // !V8_SHARED
static void RealmCurrent(const v8::FunctionCallbackInfo<v8::Value>& args);
static void RealmOwner(const v8::FunctionCallbackInfo<v8::Value>& args);
@@ -398,7 +369,7 @@ class Shell : public i::AllStatic {
static Counter* GetCounter(const char* name, bool is_histogram);
static void InstallUtilityScript(Isolate* isolate);
-#endif // V8_SHARED
+#endif // !V8_SHARED
static void Initialize(Isolate* isolate);
static void InitializeDebugger(Isolate* isolate);
static void RunShell(Isolate* isolate);
diff --git a/chromium/v8/src/d8.js b/chromium/v8/src/d8.js
index 35b61d54ee7..3f7832dd00b 100644
--- a/chromium/v8/src/d8.js
+++ b/chromium/v8/src/d8.js
@@ -1,29 +1,6 @@
// Copyright 2008 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
"use strict";
@@ -526,7 +503,7 @@ RequestPacket.prototype.toJSONProtocol = function() {
json += '"seq":' + this.seq;
json += ',"type":"' + this.type + '"';
if (this.command) {
- json += ',"command":' + StringToJSON_(this.command);
+ json += ',"command":' + JSON.stringify(this.command);
}
if (this.arguments) {
json += ',"arguments":';
@@ -534,7 +511,7 @@ RequestPacket.prototype.toJSONProtocol = function() {
if (this.arguments.toJSONProtocol) {
json += this.arguments.toJSONProtocol();
} else {
- json += SimpleObjectToJSON_(this.arguments);
+ json += JSON.stringify(this.arguments);
}
}
json += '}';
@@ -1988,214 +1965,6 @@ ProtocolReference.prototype.handle = function() {
};
-function MakeJSONPair_(name, value) {
- return '"' + name + '":' + value;
-}
-
-
-function ArrayToJSONObject_(content) {
- return '{' + content.join(',') + '}';
-}
-
-
-function ArrayToJSONArray_(content) {
- return '[' + content.join(',') + ']';
-}
-
-
-function BooleanToJSON_(value) {
- return String(value);
-}
-
-
-function NumberToJSON_(value) {
- return String(value);
-}
-
-
-// Mapping of some control characters to avoid the \uXXXX syntax for most
-// commonly used control cahracters.
-var ctrlCharMap_ = {
- '\b': '\\b',
- '\t': '\\t',
- '\n': '\\n',
- '\f': '\\f',
- '\r': '\\r',
- '"' : '\\"',
- '\\': '\\\\'
-};
-
-
-// Regular expression testing for ", \ and control characters (0x00 - 0x1F).
-var ctrlCharTest_ = new RegExp('["\\\\\x00-\x1F]');
-
-
-// Regular expression matching ", \ and control characters (0x00 - 0x1F)
-// globally.
-var ctrlCharMatch_ = new RegExp('["\\\\\x00-\x1F]', 'g');
-
-
-/**
- * Convert a String to its JSON representation (see http://www.json.org/). To
- * avoid depending on the String object this method calls the functions in
- * string.js directly and not through the value.
- * @param {String} value The String value to format as JSON
- * @return {string} JSON formatted String value
- */
-function StringToJSON_(value) {
- // Check for" , \ and control characters (0x00 - 0x1F). No need to call
- // RegExpTest as ctrlchar is constructed using RegExp.
- if (ctrlCharTest_.test(value)) {
- // Replace ", \ and control characters (0x00 - 0x1F).
- return '"' +
- value.replace(ctrlCharMatch_, function (char) {
- // Use charmap if possible.
- var mapped = ctrlCharMap_[char];
- if (mapped) return mapped;
- mapped = char.charCodeAt();
- // Convert control character to unicode escape sequence.
- return '\\u00' +
- '0' + // TODO %NumberToRadixString(Math.floor(mapped / 16), 16) +
- '0'; // TODO %NumberToRadixString(mapped % 16, 16)
- })
- + '"';
- }
-
- // Simple string with no special characters.
- return '"' + value + '"';
-}
-
-
-/**
- * Convert a Date to ISO 8601 format. To avoid depending on the Date object
- * this method calls the functions in date.js directly and not through the
- * value.
- * @param {Date} value The Date value to format as JSON
- * @return {string} JSON formatted Date value
- */
-function DateToISO8601_(value) {
- var f = function(n) {
- return n < 10 ? '0' + n : n;
- };
- var g = function(n) {
- return n < 10 ? '00' + n : n < 100 ? '0' + n : n;
- };
- return builtins.GetUTCFullYearFrom(value) + '-' +
- f(builtins.GetUTCMonthFrom(value) + 1) + '-' +
- f(builtins.GetUTCDateFrom(value)) + 'T' +
- f(builtins.GetUTCHoursFrom(value)) + ':' +
- f(builtins.GetUTCMinutesFrom(value)) + ':' +
- f(builtins.GetUTCSecondsFrom(value)) + '.' +
- g(builtins.GetUTCMillisecondsFrom(value)) + 'Z';
-}
-
-
-/**
- * Convert a Date to ISO 8601 format. To avoid depending on the Date object
- * this method calls the functions in date.js directly and not through the
- * value.
- * @param {Date} value The Date value to format as JSON
- * @return {string} JSON formatted Date value
- */
-function DateToJSON_(value) {
- return '"' + DateToISO8601_(value) + '"';
-}
-
-
-/**
- * Convert an Object to its JSON representation (see http://www.json.org/).
- * This implementation simply runs through all string property names and adds
- * each property to the JSON representation for some predefined types. For type
- * "object" the function calls itself recursively unless the object has the
- * function property "toJSONProtocol" in which case that is used. This is not
- * a general implementation but sufficient for the debugger. Note that circular
- * structures will cause infinite recursion.
- * @param {Object} object The object to format as JSON
- * @return {string} JSON formatted object value
- */
-function SimpleObjectToJSON_(object) {
- var content = [];
- for (var key in object) {
- // Only consider string keys.
- if (typeof key == 'string') {
- var property_value = object[key];
-
- // Format the value based on its type.
- var property_value_json;
- switch (typeof property_value) {
- case 'object':
- if (IS_NULL(property_value)) {
- property_value_json = 'null';
- } else if (typeof property_value.toJSONProtocol == 'function') {
- property_value_json = property_value.toJSONProtocol(true);
- } else if (property_value.constructor.name == 'Array'){
- property_value_json = SimpleArrayToJSON_(property_value);
- } else {
- property_value_json = SimpleObjectToJSON_(property_value);
- }
- break;
-
- case 'boolean':
- property_value_json = BooleanToJSON_(property_value);
- break;
-
- case 'number':
- property_value_json = NumberToJSON_(property_value);
- break;
-
- case 'string':
- property_value_json = StringToJSON_(property_value);
- break;
-
- default:
- property_value_json = null;
- }
-
- // Add the property if relevant.
- if (property_value_json) {
- content.push(StringToJSON_(key) + ':' + property_value_json);
- }
- }
- }
-
- // Make JSON object representation.
- return '{' + content.join(',') + '}';
-}
-
-
-/**
- * Convert an array to its JSON representation. This is a VERY simple
- * implementation just to support what is needed for the debugger.
- * @param {Array} arrya The array to format as JSON
- * @return {string} JSON formatted array value
- */
-function SimpleArrayToJSON_(array) {
- // Make JSON array representation.
- var json = '[';
- for (var i = 0; i < array.length; i++) {
- if (i != 0) {
- json += ',';
- }
- var elem = array[i];
- if (elem.toJSONProtocol) {
- json += elem.toJSONProtocol(true);
- } else if (typeof(elem) === 'object') {
- json += SimpleObjectToJSON_(elem);
- } else if (typeof(elem) === 'boolean') {
- json += BooleanToJSON_(elem);
- } else if (typeof(elem) === 'number') {
- json += NumberToJSON_(elem);
- } else if (typeof(elem) === 'string') {
- json += StringToJSON_(elem);
- } else {
- json += elem;
- }
- }
- json += ']';
- return json;
-}
-
-
// A more universal stringify that supports more types than JSON.
// Used by the d8 shell to output results.
var stringifyDepthLimit = 4; // To avoid crashing on cyclic objects
diff --git a/chromium/v8/src/data-flow.cc b/chromium/v8/src/data-flow.cc
index 6a3b05cc86d..e591778fa1c 100644
--- a/chromium/v8/src/data-flow.cc
+++ b/chromium/v8/src/data-flow.cc
@@ -1,34 +1,11 @@
// Copyright 2010 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
-#include "v8.h"
+#include "src/v8.h"
-#include "data-flow.h"
-#include "scopes.h"
+#include "src/data-flow.h"
+#include "src/scopes.h"
namespace v8 {
namespace internal {
diff --git a/chromium/v8/src/data-flow.h b/chromium/v8/src/data-flow.h
index 8ceccf67c54..5c214ae8319 100644
--- a/chromium/v8/src/data-flow.h
+++ b/chromium/v8/src/data-flow.h
@@ -1,39 +1,16 @@
// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
#ifndef V8_DATAFLOW_H_
#define V8_DATAFLOW_H_
-#include "v8.h"
+#include "src/v8.h"
-#include "allocation.h"
-#include "ast.h"
-#include "compiler.h"
-#include "zone-inl.h"
+#include "src/allocation.h"
+#include "src/ast.h"
+#include "src/compiler.h"
+#include "src/zone-inl.h"
namespace v8 {
namespace internal {
diff --git a/chromium/v8/src/date.cc b/chromium/v8/src/date.cc
index a377451770f..3425ce24aac 100644
--- a/chromium/v8/src/date.cc
+++ b/chromium/v8/src/date.cc
@@ -1,42 +1,18 @@
// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "date.h"
-
-#include "v8.h"
-
-#include "objects.h"
-#include "objects-inl.h"
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/date.h"
+
+#include "src/v8.h"
+
+#include "src/objects.h"
+#include "src/objects-inl.h"
namespace v8 {
namespace internal {
-static const int kDays4Years[] = {0, 365, 2 * 365, 3 * 365 + 1};
static const int kDaysIn4Years = 4 * 365 + 1;
static const int kDaysIn100Years = 25 * kDaysIn4Years - 1;
static const int kDaysIn400Years = 4 * kDaysIn100Years + 1;
@@ -50,9 +26,10 @@ static const char kDaysInMonths[] =
void DateCache::ResetDateCache() {
static const int kMaxStamp = Smi::kMaxValue;
- stamp_ = Smi::FromInt(stamp_->value() + 1);
- if (stamp_->value() > kMaxStamp) {
+ if (stamp_->value() >= kMaxStamp) {
stamp_ = Smi::FromInt(0);
+ } else {
+ stamp_ = Smi::FromInt(stamp_->value() + 1);
}
ASSERT(stamp_ != Smi::FromInt(kInvalidStamp));
for (int i = 0; i < kDSTSize; ++i) {
@@ -63,6 +40,7 @@ void DateCache::ResetDateCache() {
after_ = &dst_[1];
local_offset_ms_ = kInvalidLocalOffsetInMs;
ymd_valid_ = false;
+ OS::ClearTimezoneCache(tz_cache_);
}
diff --git a/chromium/v8/src/date.h b/chromium/v8/src/date.h
index fcd61db0467..89ae641c19a 100644
--- a/chromium/v8/src/date.h
+++ b/chromium/v8/src/date.h
@@ -1,36 +1,13 @@
// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
#ifndef V8_DATE_H_
#define V8_DATE_H_
-#include "allocation.h"
-#include "globals.h"
-#include "platform.h"
+#include "src/allocation.h"
+#include "src/globals.h"
+#include "src/platform.h"
namespace v8 {
@@ -62,11 +39,14 @@ class DateCache {
// It is an invariant of DateCache that cache stamp is non-negative.
static const int kInvalidStamp = -1;
- DateCache() : stamp_(0) {
+ DateCache() : stamp_(0), tz_cache_(OS::CreateTimezoneCache()) {
ResetDateCache();
}
- virtual ~DateCache() {}
+ virtual ~DateCache() {
+ OS::DisposeTimezoneCache(tz_cache_);
+ tz_cache_ = NULL;
+ }
// Clears cached timezone information and increments the cache stamp.
@@ -113,7 +93,7 @@ class DateCache {
if (time_ms < 0 || time_ms > kMaxEpochTimeInMs) {
time_ms = EquivalentTime(time_ms);
}
- return OS::LocalTimezone(static_cast<double>(time_ms));
+ return OS::LocalTimezone(static_cast<double>(time_ms), tz_cache_);
}
// ECMA 262 - 15.9.5.26
@@ -182,11 +162,11 @@ class DateCache {
// These functions are virtual so that we can override them when testing.
virtual int GetDaylightSavingsOffsetFromOS(int64_t time_sec) {
double time_ms = static_cast<double>(time_sec * 1000);
- return static_cast<int>(OS::DaylightSavingsOffset(time_ms));
+ return static_cast<int>(OS::DaylightSavingsOffset(time_ms, tz_cache_));
}
virtual int GetLocalOffsetFromOS() {
- double offset = OS::LocalTimeOffset();
+ double offset = OS::LocalTimeOffset(tz_cache_);
ASSERT(offset < kInvalidLocalOffsetInMs);
return static_cast<int>(offset);
}
@@ -253,6 +233,8 @@ class DateCache {
int ymd_year_;
int ymd_month_;
int ymd_day_;
+
+ TimezoneCache* tz_cache_;
};
} } // namespace v8::internal
diff --git a/chromium/v8/src/date.js b/chromium/v8/src/date.js
index f3d4af244f9..c58903cc29c 100644
--- a/chromium/v8/src/date.js
+++ b/chromium/v8/src/date.js
@@ -1,29 +1,8 @@
// Copyright 2006-2008 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+"use strict";
// This file relies on the fact that the following declarations have been made
// in v8natives.js:
@@ -46,6 +25,7 @@ var timezone_cache_timezone;
function LocalTimezone(t) {
if (NUMBER_IS_NAN(t)) return "";
+ CheckDateCacheCurrent();
if (t == timezone_cache_time) {
return timezone_cache_timezone;
}
@@ -156,6 +136,7 @@ function DateConstructor(year, month, date, hours, minutes, seconds, ms) {
} else if (IS_STRING(year)) {
// Probe the Date cache. If we already have a time value for the
// given time, we re-use that instead of parsing the string again.
+ CheckDateCacheCurrent();
var cache = Date_cache;
if (cache.string === year) {
value = cache.time;
@@ -743,15 +724,26 @@ function DateToJSON(key) {
}
-function ResetDateCache() {
+var date_cache_version_holder;
+var date_cache_version = NAN;
+
+
+function CheckDateCacheCurrent() {
+ if (!date_cache_version_holder) {
+ date_cache_version_holder = %DateCacheVersion();
+ }
+ if (date_cache_version_holder[0] == date_cache_version) {
+ return;
+ }
+ date_cache_version = date_cache_version_holder[0];
+
// Reset the timezone cache:
timezone_cache_time = NAN;
- timezone_cache_timezone = undefined;
+ timezone_cache_timezone = UNDEFINED;
// Reset the date cache:
- cache = Date_cache;
- cache.time = NAN;
- cache.string = null;
+ Date_cache.time = NAN;
+ Date_cache.string = null;
}
diff --git a/chromium/v8/src/dateparser-inl.h b/chromium/v8/src/dateparser-inl.h
index 3cb36fa4339..c16812b55ac 100644
--- a/chromium/v8/src/dateparser-inl.h
+++ b/chromium/v8/src/dateparser-inl.h
@@ -1,34 +1,11 @@
// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
#ifndef V8_DATEPARSER_INL_H_
#define V8_DATEPARSER_INL_H_
-#include "dateparser.h"
+#include "src/dateparser.h"
namespace v8 {
namespace internal {
diff --git a/chromium/v8/src/dateparser.cc b/chromium/v8/src/dateparser.cc
index 3964e811780..71809114a04 100644
--- a/chromium/v8/src/dateparser.cc
+++ b/chromium/v8/src/dateparser.cc
@@ -1,33 +1,10 @@
// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#include "dateparser.h"
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#include "src/dateparser.h"
namespace v8 {
namespace internal {
diff --git a/chromium/v8/src/dateparser.h b/chromium/v8/src/dateparser.h
index 27584ce39ef..0a0a6f0d9de 100644
--- a/chromium/v8/src/dateparser.h
+++ b/chromium/v8/src/dateparser.h
@@ -1,35 +1,12 @@
// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
#ifndef V8_DATEPARSER_H_
#define V8_DATEPARSER_H_
-#include "allocation.h"
-#include "char-predicates-inl.h"
+#include "src/allocation.h"
+#include "src/char-predicates-inl.h"
namespace v8 {
namespace internal {
@@ -122,7 +99,7 @@ class DateParser : public AllStatic {
}
bool SkipWhiteSpace() {
- if (unicode_cache_->IsWhiteSpace(ch_)) {
+ if (unicode_cache_->IsWhiteSpaceOrLineTerminator(ch_)) {
Next();
return true;
}
diff --git a/chromium/v8/src/debug-agent.cc b/chromium/v8/src/debug-agent.cc
deleted file mode 100644
index 51823aaf24c..00000000000
--- a/chromium/v8/src/debug-agent.cc
+++ /dev/null
@@ -1,503 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifdef ENABLE_DEBUGGER_SUPPORT
-
-#include "v8.h"
-#include "debug.h"
-#include "debug-agent.h"
-#include "platform/socket.h"
-
-namespace v8 {
-namespace internal {
-
-// Public V8 debugger API message handler function. This function just delegates
-// to the debugger agent through it's data parameter.
-void DebuggerAgentMessageHandler(const v8::Debug::Message& message) {
- Isolate* isolate = reinterpret_cast<Isolate*>(message.GetIsolate());
- DebuggerAgent* agent = isolate->debugger_agent_instance();
- ASSERT(agent != NULL);
- agent->DebuggerMessage(message);
-}
-
-
-DebuggerAgent::DebuggerAgent(Isolate* isolate, const char* name, int port)
- : Thread(name),
- isolate_(isolate),
- name_(StrDup(name)),
- port_(port),
- server_(new Socket),
- terminate_(false),
- session_(NULL),
- terminate_now_(0),
- listening_(0) {
- ASSERT(isolate_->debugger_agent_instance() == NULL);
- isolate_->set_debugger_agent_instance(this);
-}
-
-
-DebuggerAgent::~DebuggerAgent() {
- isolate_->set_debugger_agent_instance(NULL);
- delete server_;
-}
-
-
-// Debugger agent main thread.
-void DebuggerAgent::Run() {
- // Allow this socket to reuse port even if still in TIME_WAIT.
- server_->SetReuseAddress(true);
-
- // First bind the socket to the requested port.
- bool bound = false;
- while (!bound && !terminate_) {
- bound = server_->Bind(port_);
-
- // If an error occurred wait a bit before retrying. The most common error
- // would be that the port is already in use so this avoids a busy loop and
- // make the agent take over the port when it becomes free.
- if (!bound) {
- const TimeDelta kTimeout = TimeDelta::FromSeconds(1);
- PrintF("Failed to open socket on port %d, "
- "waiting %d ms before retrying\n", port_,
- static_cast<int>(kTimeout.InMilliseconds()));
- if (!terminate_now_.WaitFor(kTimeout)) {
- if (terminate_) return;
- }
- }
- }
-
- // Accept connections on the bound port.
- while (!terminate_) {
- bool ok = server_->Listen(1);
- listening_.Signal();
- if (ok) {
- // Accept the new connection.
- Socket* client = server_->Accept();
- ok = client != NULL;
- if (ok) {
- // Create and start a new session.
- CreateSession(client);
- }
- }
- }
-}
-
-
-void DebuggerAgent::Shutdown() {
- // Set the termination flag.
- terminate_ = true;
-
- // Signal termination and make the server exit either its listen call or its
- // binding loop. This makes sure that no new sessions can be established.
- terminate_now_.Signal();
- server_->Shutdown();
- Join();
-
- // Close existing session if any.
- CloseSession();
-}
-
-
-void DebuggerAgent::WaitUntilListening() {
- listening_.Wait();
-}
-
-static const char* kCreateSessionMessage =
- "Remote debugging session already active\r\n";
-
-void DebuggerAgent::CreateSession(Socket* client) {
- LockGuard<RecursiveMutex> session_access_guard(&session_access_);
-
- // If another session is already established terminate this one.
- if (session_ != NULL) {
- int len = StrLength(kCreateSessionMessage);
- int res = client->Send(kCreateSessionMessage, len);
- delete client;
- USE(res);
- return;
- }
-
- // Create a new session and hook up the debug message handler.
- session_ = new DebuggerAgentSession(this, client);
- isolate_->debugger()->SetMessageHandler(DebuggerAgentMessageHandler);
- session_->Start();
-}
-
-
-void DebuggerAgent::CloseSession() {
- LockGuard<RecursiveMutex> session_access_guard(&session_access_);
-
- // Terminate the session.
- if (session_ != NULL) {
- session_->Shutdown();
- session_->Join();
- delete session_;
- session_ = NULL;
- }
-}
-
-
-void DebuggerAgent::DebuggerMessage(const v8::Debug::Message& message) {
- LockGuard<RecursiveMutex> session_access_guard(&session_access_);
-
- // Forward the message handling to the session.
- if (session_ != NULL) {
- v8::String::Value val(message.GetJSON());
- session_->DebuggerMessage(Vector<uint16_t>(const_cast<uint16_t*>(*val),
- val.length()));
- }
-}
-
-
-void DebuggerAgent::OnSessionClosed(DebuggerAgentSession* session) {
- // Don't do anything during termination.
- if (terminate_) {
- return;
- }
-
- // Terminate the session.
- LockGuard<RecursiveMutex> session_access_guard(&session_access_);
- ASSERT(session == session_);
- if (session == session_) {
- session_->Shutdown();
- delete session_;
- session_ = NULL;
- }
-}
-
-
-void DebuggerAgentSession::Run() {
- // Send the hello message.
- bool ok = DebuggerAgentUtil::SendConnectMessage(client_, *agent_->name_);
- if (!ok) return;
-
- while (true) {
- // Read data from the debugger front end.
- SmartArrayPointer<char> message =
- DebuggerAgentUtil::ReceiveMessage(client_);
-
- const char* msg = *message;
- bool is_closing_session = (msg == NULL);
-
- if (msg == NULL) {
- // If we lost the connection, then simulate a disconnect msg:
- msg = "{\"seq\":1,\"type\":\"request\",\"command\":\"disconnect\"}";
-
- } else {
- // Check if we're getting a disconnect request:
- const char* disconnectRequestStr =
- "\"type\":\"request\",\"command\":\"disconnect\"}";
- const char* result = strstr(msg, disconnectRequestStr);
- if (result != NULL) {
- is_closing_session = true;
- }
- }
-
- // Convert UTF-8 to UTF-16.
- unibrow::Utf8Decoder<128> decoder(msg, StrLength(msg));
- int utf16_length = decoder.Utf16Length();
- ScopedVector<uint16_t> temp(utf16_length + 1);
- decoder.WriteUtf16(temp.start(), utf16_length);
-
- // Send the request received to the debugger.
- v8::Debug::SendCommand(temp.start(),
- utf16_length,
- NULL,
- reinterpret_cast<v8::Isolate*>(agent_->isolate()));
-
- if (is_closing_session) {
- // Session is closed.
- agent_->OnSessionClosed(this);
- return;
- }
- }
-}
-
-
-void DebuggerAgentSession::DebuggerMessage(Vector<uint16_t> message) {
- DebuggerAgentUtil::SendMessage(client_, message);
-}
-
-
-void DebuggerAgentSession::Shutdown() {
- // Shutdown the socket to end the blocking receive.
- client_->Shutdown();
-}
-
-
-const char* const DebuggerAgentUtil::kContentLength = "Content-Length";
-
-
-SmartArrayPointer<char> DebuggerAgentUtil::ReceiveMessage(Socket* conn) {
- int received;
-
- // Read header.
- int content_length = 0;
- while (true) {
- const int kHeaderBufferSize = 80;
- char header_buffer[kHeaderBufferSize];
- int header_buffer_position = 0;
- char c = '\0'; // One character receive buffer.
- char prev_c = '\0'; // Previous character.
-
- // Read until CRLF.
- while (!(c == '\n' && prev_c == '\r')) {
- prev_c = c;
- received = conn->Receive(&c, 1);
- if (received == 0) {
- PrintF("Error %d\n", Socket::GetLastError());
- return SmartArrayPointer<char>();
- }
-
- // Add character to header buffer.
- if (header_buffer_position < kHeaderBufferSize) {
- header_buffer[header_buffer_position++] = c;
- }
- }
-
- // Check for end of header (empty header line).
- if (header_buffer_position == 2) { // Receive buffer contains CRLF.
- break;
- }
-
- // Terminate header.
- ASSERT(header_buffer_position > 1); // At least CRLF is received.
- ASSERT(header_buffer_position <= kHeaderBufferSize);
- header_buffer[header_buffer_position - 2] = '\0';
-
- // Split header.
- char* key = header_buffer;
- char* value = NULL;
- for (int i = 0; header_buffer[i] != '\0'; i++) {
- if (header_buffer[i] == ':') {
- header_buffer[i] = '\0';
- value = header_buffer + i + 1;
- while (*value == ' ') {
- value++;
- }
- break;
- }
- }
-
- // Check that key is Content-Length.
- if (strcmp(key, kContentLength) == 0) {
- // Get the content length value if present and within a sensible range.
- if (value == NULL || strlen(value) > 7) {
- return SmartArrayPointer<char>();
- }
- for (int i = 0; value[i] != '\0'; i++) {
- // Bail out if illegal data.
- if (value[i] < '0' || value[i] > '9') {
- return SmartArrayPointer<char>();
- }
- content_length = 10 * content_length + (value[i] - '0');
- }
- } else {
- // For now just print all other headers than Content-Length.
- PrintF("%s: %s\n", key, value != NULL ? value : "(no value)");
- }
- }
-
- // Return now if no body.
- if (content_length == 0) {
- return SmartArrayPointer<char>();
- }
-
- // Read body.
- char* buffer = NewArray<char>(content_length + 1);
- received = ReceiveAll(conn, buffer, content_length);
- if (received < content_length) {
- PrintF("Error %d\n", Socket::GetLastError());
- return SmartArrayPointer<char>();
- }
- buffer[content_length] = '\0';
-
- return SmartArrayPointer<char>(buffer);
-}
-
-
-bool DebuggerAgentUtil::SendConnectMessage(Socket* conn,
- const char* embedding_host) {
- static const int kBufferSize = 80;
- char buffer[kBufferSize]; // Sending buffer.
- bool ok;
- int len;
-
- // Send the header.
- len = OS::SNPrintF(Vector<char>(buffer, kBufferSize),
- "Type: connect\r\n");
- ok = conn->Send(buffer, len);
- if (!ok) return false;
-
- len = OS::SNPrintF(Vector<char>(buffer, kBufferSize),
- "V8-Version: %s\r\n", v8::V8::GetVersion());
- ok = conn->Send(buffer, len);
- if (!ok) return false;
-
- len = OS::SNPrintF(Vector<char>(buffer, kBufferSize),
- "Protocol-Version: 1\r\n");
- ok = conn->Send(buffer, len);
- if (!ok) return false;
-
- if (embedding_host != NULL) {
- len = OS::SNPrintF(Vector<char>(buffer, kBufferSize),
- "Embedding-Host: %s\r\n", embedding_host);
- ok = conn->Send(buffer, len);
- if (!ok) return false;
- }
-
- len = OS::SNPrintF(Vector<char>(buffer, kBufferSize),
- "%s: 0\r\n", kContentLength);
- ok = conn->Send(buffer, len);
- if (!ok) return false;
-
- // Terminate header with empty line.
- len = OS::SNPrintF(Vector<char>(buffer, kBufferSize), "\r\n");
- ok = conn->Send(buffer, len);
- if (!ok) return false;
-
- // No body for connect message.
-
- return true;
-}
-
-
-bool DebuggerAgentUtil::SendMessage(Socket* conn,
- const Vector<uint16_t> message) {
- static const int kBufferSize = 80;
- char buffer[kBufferSize]; // Sending buffer both for header and body.
-
- // Calculate the message size in UTF-8 encoding.
- int utf8_len = 0;
- int previous = unibrow::Utf16::kNoPreviousCharacter;
- for (int i = 0; i < message.length(); i++) {
- uint16_t character = message[i];
- utf8_len += unibrow::Utf8::Length(character, previous);
- previous = character;
- }
-
- // Send the header.
- int len = OS::SNPrintF(Vector<char>(buffer, kBufferSize),
- "%s: %d\r\n", kContentLength, utf8_len);
- if (conn->Send(buffer, len) < len) {
- return false;
- }
-
- // Terminate header with empty line.
- len = OS::SNPrintF(Vector<char>(buffer, kBufferSize), "\r\n");
- if (conn->Send(buffer, len) < len) {
- return false;
- }
-
- // Send message body as UTF-8.
- int buffer_position = 0; // Current buffer position.
- previous = unibrow::Utf16::kNoPreviousCharacter;
- for (int i = 0; i < message.length(); i++) {
- // Write next UTF-8 encoded character to buffer.
- uint16_t character = message[i];
- buffer_position +=
- unibrow::Utf8::Encode(buffer + buffer_position, character, previous);
- ASSERT(buffer_position <= kBufferSize);
-
- // Send buffer if full or last character is encoded.
- if (kBufferSize - buffer_position <
- unibrow::Utf16::kMaxExtraUtf8BytesForOneUtf16CodeUnit ||
- i == message.length() - 1) {
- if (unibrow::Utf16::IsLeadSurrogate(character)) {
- const int kEncodedSurrogateLength =
- unibrow::Utf16::kUtf8BytesToCodeASurrogate;
- ASSERT(buffer_position >= kEncodedSurrogateLength);
- len = buffer_position - kEncodedSurrogateLength;
- if (conn->Send(buffer, len) < len) {
- return false;
- }
- for (int i = 0; i < kEncodedSurrogateLength; i++) {
- buffer[i] = buffer[buffer_position + i];
- }
- buffer_position = kEncodedSurrogateLength;
- } else {
- len = buffer_position;
- if (conn->Send(buffer, len) < len) {
- return false;
- }
- buffer_position = 0;
- }
- }
- previous = character;
- }
-
- return true;
-}
-
-
-bool DebuggerAgentUtil::SendMessage(Socket* conn,
- const v8::Handle<v8::String> request) {
- static const int kBufferSize = 80;
- char buffer[kBufferSize]; // Sending buffer both for header and body.
-
- // Convert the request to UTF-8 encoding.
- v8::String::Utf8Value utf8_request(request);
-
- // Send the header.
- int len = OS::SNPrintF(Vector<char>(buffer, kBufferSize),
- "Content-Length: %d\r\n", utf8_request.length());
- if (conn->Send(buffer, len) < len) {
- return false;
- }
-
- // Terminate header with empty line.
- len = OS::SNPrintF(Vector<char>(buffer, kBufferSize), "\r\n");
- if (conn->Send(buffer, len) < len) {
- return false;
- }
-
- // Send message body as UTF-8.
- len = utf8_request.length();
- if (conn->Send(*utf8_request, len) < len) {
- return false;
- }
-
- return true;
-}
-
-
-// Receive the full buffer before returning unless an error occours.
-int DebuggerAgentUtil::ReceiveAll(Socket* conn, char* data, int len) {
- int total_received = 0;
- while (total_received < len) {
- int received = conn->Receive(data + total_received, len - total_received);
- if (received == 0) {
- return total_received;
- }
- total_received += received;
- }
- return total_received;
-}
-
-} } // namespace v8::internal
-
-#endif // ENABLE_DEBUGGER_SUPPORT
diff --git a/chromium/v8/src/debug-agent.h b/chromium/v8/src/debug-agent.h
deleted file mode 100644
index 138e51acc60..00000000000
--- a/chromium/v8/src/debug-agent.h
+++ /dev/null
@@ -1,118 +0,0 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_DEBUG_AGENT_H_
-#define V8_DEBUG_AGENT_H_
-
-#ifdef ENABLE_DEBUGGER_SUPPORT
-#include "../include/v8-debug.h"
-#include "platform.h"
-
-namespace v8 {
-namespace internal {
-
-// Forward decelrations.
-class DebuggerAgentSession;
-class Socket;
-
-
-// Debugger agent which starts a socket listener on the debugger port and
-// handles connection from a remote debugger.
-class DebuggerAgent: public Thread {
- public:
- DebuggerAgent(Isolate* isolate, const char* name, int port);
- ~DebuggerAgent();
-
- void Shutdown();
- void WaitUntilListening();
-
- Isolate* isolate() { return isolate_; }
-
- private:
- void Run();
- void CreateSession(Socket* socket);
- void DebuggerMessage(const v8::Debug::Message& message);
- void CloseSession();
- void OnSessionClosed(DebuggerAgentSession* session);
-
- Isolate* isolate_;
- SmartArrayPointer<const char> name_; // Name of the embedding application.
- int port_; // Port to use for the agent.
- Socket* server_; // Server socket for listen/accept.
- bool terminate_; // Termination flag.
- RecursiveMutex session_access_; // Mutex guarding access to session_.
- DebuggerAgentSession* session_; // Current active session if any.
- Semaphore terminate_now_; // Semaphore to signal termination.
- Semaphore listening_;
-
- friend class DebuggerAgentSession;
- friend void DebuggerAgentMessageHandler(const v8::Debug::Message& message);
-
- DISALLOW_COPY_AND_ASSIGN(DebuggerAgent);
-};
-
-
-// Debugger agent session. The session receives requests from the remote
-// debugger and sends debugger events/responses to the remote debugger.
-class DebuggerAgentSession: public Thread {
- public:
- DebuggerAgentSession(DebuggerAgent* agent, Socket* client)
- : Thread("v8:DbgAgntSessn"),
- agent_(agent), client_(client) {}
-
- void DebuggerMessage(Vector<uint16_t> message);
- void Shutdown();
-
- private:
- void Run();
-
- void DebuggerMessage(Vector<char> message);
-
- DebuggerAgent* agent_;
- Socket* client_;
-
- DISALLOW_COPY_AND_ASSIGN(DebuggerAgentSession);
-};
-
-
-// Utility methods factored out to be used by the D8 shell as well.
-class DebuggerAgentUtil {
- public:
- static const char* const kContentLength;
-
- static SmartArrayPointer<char> ReceiveMessage(Socket* conn);
- static bool SendConnectMessage(Socket* conn, const char* embedding_host);
- static bool SendMessage(Socket* conn, const Vector<uint16_t> message);
- static bool SendMessage(Socket* conn, const v8::Handle<v8::String> message);
- static int ReceiveAll(Socket* conn, char* data, int len);
-};
-
-} } // namespace v8::internal
-
-#endif // ENABLE_DEBUGGER_SUPPORT
-
-#endif // V8_DEBUG_AGENT_H_
diff --git a/chromium/v8/src/debug-debugger.js b/chromium/v8/src/debug-debugger.js
index b159ae3b298..660ea790389 100644
--- a/chromium/v8/src/debug-debugger.js
+++ b/chromium/v8/src/debug-debugger.js
@@ -1,29 +1,6 @@
// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
// Default number of frames to include in the response to backtrace request.
var kDefaultBacktraceLength = 10;
@@ -1093,15 +1070,16 @@ BreakEvent.prototype.toJSONProtocol = function() {
};
-function MakeExceptionEvent(exec_state, exception, uncaught) {
- return new ExceptionEvent(exec_state, exception, uncaught);
+function MakeExceptionEvent(exec_state, exception, uncaught, promise) {
+ return new ExceptionEvent(exec_state, exception, uncaught, promise);
}
-function ExceptionEvent(exec_state, exception, uncaught) {
+function ExceptionEvent(exec_state, exception, uncaught, promise) {
this.exec_state_ = exec_state;
this.exception_ = exception;
this.uncaught_ = uncaught;
+ this.promise_ = promise;
}
@@ -1125,6 +1103,11 @@ ExceptionEvent.prototype.uncaught = function() {
};
+ExceptionEvent.prototype.promise = function() {
+ return this.promise_;
+};
+
+
ExceptionEvent.prototype.func = function() {
return this.exec_state_.frame(0).func();
};
@@ -1217,31 +1200,6 @@ CompileEvent.prototype.toJSONProtocol = function() {
};
-function MakeNewFunctionEvent(func) {
- return new NewFunctionEvent(func);
-}
-
-
-function NewFunctionEvent(func) {
- this.func = func;
-}
-
-
-NewFunctionEvent.prototype.eventType = function() {
- return Debug.DebugEvent.NewFunction;
-};
-
-
-NewFunctionEvent.prototype.name = function() {
- return this.func.name;
-};
-
-
-NewFunctionEvent.prototype.setBreakPoint = function(p) {
- Debug.setBreakPoint(this.func, p || 0);
-};
-
-
function MakeScriptCollectedEvent(exec_state, id) {
return new ScriptCollectedEvent(exec_state, id);
}
@@ -1430,63 +1388,10 @@ DebugCommandProcessor.prototype.processDebugJSONRequest = function(
}
}
- if (request.command == 'continue') {
- this.continueRequest_(request, response);
- } else if (request.command == 'break') {
- this.breakRequest_(request, response);
- } else if (request.command == 'setbreakpoint') {
- this.setBreakPointRequest_(request, response);
- } else if (request.command == 'changebreakpoint') {
- this.changeBreakPointRequest_(request, response);
- } else if (request.command == 'clearbreakpoint') {
- this.clearBreakPointRequest_(request, response);
- } else if (request.command == 'clearbreakpointgroup') {
- this.clearBreakPointGroupRequest_(request, response);
- } else if (request.command == 'disconnect') {
- this.disconnectRequest_(request, response);
- } else if (request.command == 'setexceptionbreak') {
- this.setExceptionBreakRequest_(request, response);
- } else if (request.command == 'listbreakpoints') {
- this.listBreakpointsRequest_(request, response);
- } else if (request.command == 'backtrace') {
- this.backtraceRequest_(request, response);
- } else if (request.command == 'frame') {
- this.frameRequest_(request, response);
- } else if (request.command == 'scopes') {
- this.scopesRequest_(request, response);
- } else if (request.command == 'scope') {
- this.scopeRequest_(request, response);
- } else if (request.command == 'setVariableValue') {
- this.setVariableValueRequest_(request, response);
- } else if (request.command == 'evaluate') {
- this.evaluateRequest_(request, response);
- } else if (request.command == 'lookup') {
- this.lookupRequest_(request, response);
- } else if (request.command == 'references') {
- this.referencesRequest_(request, response);
- } else if (request.command == 'source') {
- this.sourceRequest_(request, response);
- } else if (request.command == 'scripts') {
- this.scriptsRequest_(request, response);
- } else if (request.command == 'threads') {
- this.threadsRequest_(request, response);
- } else if (request.command == 'suspend') {
- this.suspendRequest_(request, response);
- } else if (request.command == 'version') {
- this.versionRequest_(request, response);
- } else if (request.command == 'changelive') {
- this.changeLiveRequest_(request, response);
- } else if (request.command == 'restartframe') {
- this.restartFrameRequest_(request, response);
- } else if (request.command == 'flags') {
- this.debuggerFlagsRequest_(request, response);
- } else if (request.command == 'v8flags') {
- this.v8FlagsRequest_(request, response);
-
- // GC tools:
- } else if (request.command == 'gc') {
- this.gcRequest_(request, response);
-
+ var key = request.command.toLowerCase();
+ var handler = DebugCommandProcessor.prototype.dispatch_[key];
+ if (IS_FUNCTION(handler)) {
+ %_CallFunction(this, request, response, handler);
} else {
throw new Error('Unknown command "' + request.command + '" in request');
}
@@ -2534,6 +2439,40 @@ DebugCommandProcessor.prototype.gcRequest_ = function(request, response) {
};
+DebugCommandProcessor.prototype.dispatch_ = (function() {
+ var proto = DebugCommandProcessor.prototype;
+ return {
+ "continue": proto.continueRequest_,
+ "break" : proto.breakRequest_,
+ "setbreakpoint" : proto.setBreakPointRequest_,
+ "changebreakpoint": proto.changeBreakPointRequest_,
+ "clearbreakpoint": proto.clearBreakPointRequest_,
+ "clearbreakpointgroup": proto.clearBreakPointGroupRequest_,
+ "disconnect": proto.disconnectRequest_,
+ "setexceptionbreak": proto.setExceptionBreakRequest_,
+ "listbreakpoints": proto.listBreakpointsRequest_,
+ "backtrace": proto.backtraceRequest_,
+ "frame": proto.frameRequest_,
+ "scopes": proto.scopesRequest_,
+ "scope": proto.scopeRequest_,
+ "setvariablevalue": proto.setVariableValueRequest_,
+ "evaluate": proto.evaluateRequest_,
+ "lookup": proto.lookupRequest_,
+ "references": proto.referencesRequest_,
+ "source": proto.sourceRequest_,
+ "scripts": proto.scriptsRequest_,
+ "threads": proto.threadsRequest_,
+ "suspend": proto.suspendRequest_,
+ "version": proto.versionRequest_,
+ "changelive": proto.changeLiveRequest_,
+ "restartframe": proto.restartFrameRequest_,
+ "flags": proto.debuggerFlagsRequest_,
+ "v8flag": proto.v8FlagsRequest_,
+ "gc": proto.gcRequest_,
+ };
+})();
+
+
// Check whether the previously processed command caused the VM to become
// running.
DebugCommandProcessor.prototype.isRunning = function() {
@@ -2546,17 +2485,6 @@ DebugCommandProcessor.prototype.systemBreak = function(cmd, args) {
};
-function NumberToHex8Str(n) {
- var r = "";
- for (var i = 0; i < 8; ++i) {
- var c = hexCharArray[n & 0x0F]; // hexCharArray is defined in uri.js
- r = c + r;
- n = n >>> 4;
- }
- return r;
-}
-
-
/**
* Convert an Object to its debugger protocol representation. The representation
* may be serilized to a JSON object using JSON.stringify().
diff --git a/chromium/v8/src/debug.cc b/chromium/v8/src/debug.cc
index 25be003f707..a710f0ba558 100644
--- a/chromium/v8/src/debug.cc
+++ b/chromium/v8/src/debug.cc
@@ -1,95 +1,54 @@
// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#include "api.h"
-#include "arguments.h"
-#include "bootstrapper.h"
-#include "code-stubs.h"
-#include "codegen.h"
-#include "compilation-cache.h"
-#include "compiler.h"
-#include "debug.h"
-#include "deoptimizer.h"
-#include "execution.h"
-#include "full-codegen.h"
-#include "global-handles.h"
-#include "ic.h"
-#include "ic-inl.h"
-#include "isolate-inl.h"
-#include "list.h"
-#include "messages.h"
-#include "natives.h"
-#include "stub-cache.h"
-#include "log.h"
-
-#include "../include/v8-debug.h"
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#include "src/api.h"
+#include "src/arguments.h"
+#include "src/bootstrapper.h"
+#include "src/code-stubs.h"
+#include "src/codegen.h"
+#include "src/compilation-cache.h"
+#include "src/compiler.h"
+#include "src/debug.h"
+#include "src/deoptimizer.h"
+#include "src/execution.h"
+#include "src/full-codegen.h"
+#include "src/global-handles.h"
+#include "src/ic.h"
+#include "src/ic-inl.h"
+#include "src/isolate-inl.h"
+#include "src/list.h"
+#include "src/messages.h"
+#include "src/natives.h"
+#include "src/stub-cache.h"
+#include "src/log.h"
+
+#include "include/v8-debug.h"
namespace v8 {
namespace internal {
-#ifdef ENABLE_DEBUGGER_SUPPORT
-
-
Debug::Debug(Isolate* isolate)
- : has_break_points_(false),
- script_cache_(NULL),
- debug_info_list_(NULL),
- disable_break_(false),
+ : debug_context_(Handle<Context>()),
+ event_listener_(Handle<Object>()),
+ event_listener_data_(Handle<Object>()),
+ message_handler_(NULL),
+ command_received_(0),
+ command_queue_(isolate->logger(), kQueueInitialSize),
+ event_command_queue_(isolate->logger(), kQueueInitialSize),
+ is_active_(false),
+ is_suppressed_(false),
+ live_edit_enabled_(true), // TODO(yangguo): set to false by default.
+ has_break_points_(false),
+ break_disabled_(false),
break_on_exception_(false),
break_on_uncaught_exception_(false),
- debug_break_return_(NULL),
- debug_break_slot_(NULL),
+ script_cache_(NULL),
+ debug_info_list_(NULL),
isolate_(isolate) {
- memset(registers_, 0, sizeof(JSCallerSavedBuffer));
-}
-
-
-Debug::~Debug() {
-}
-
-
-static void PrintLn(v8::Local<v8::Value> value) {
- v8::Local<v8::String> s = value->ToString();
- ScopedVector<char> data(s->Utf8Length() + 1);
- if (data.start() == NULL) {
- V8::FatalProcessOutOfMemory("PrintLn");
- return;
- }
- s->WriteUtf8(data.start());
- PrintF("%s\n", data.start());
-}
-
-
-static Handle<Code> ComputeCallDebugPrepareStepIn(Isolate* isolate,
- int argc,
- Code::Kind kind) {
- return isolate->stub_cache()->ComputeCallDebugPrepareStepIn(argc, kind);
+ ThreadInit();
}
@@ -121,6 +80,22 @@ BreakLocationIterator::~BreakLocationIterator() {
}
+// Check whether a code stub with the specified major key is a possible break
+// point location when looking for source break locations.
+static bool IsSourceBreakStub(Code* code) {
+ CodeStub::Major major_key = CodeStub::GetMajorKey(code);
+ return major_key == CodeStub::CallFunction;
+}
+
+
+// Check whether a code stub with the specified major key is a possible break
+// location.
+static bool IsBreakStub(Code* code) {
+ CodeStub::Major major_key = CodeStub::GetMajorKey(code);
+ return major_key == CodeStub::CallFunction;
+}
+
+
void BreakLocationIterator::Next() {
DisallowHeapAllocation no_gc;
ASSERT(!RinfoDone());
@@ -170,15 +145,14 @@ void BreakLocationIterator::Next() {
if (IsDebuggerStatement()) {
break_point_++;
return;
- }
- if (type_ == ALL_BREAK_LOCATIONS) {
- if (Debug::IsBreakStub(code)) {
+ } else if (type_ == ALL_BREAK_LOCATIONS) {
+ if (IsBreakStub(code)) {
break_point_++;
return;
}
} else {
ASSERT(type_ == SOURCE_BREAK_LOCATIONS);
- if (Debug::IsSourceBreakStub(code)) {
+ if (IsSourceBreakStub(code)) {
break_point_++;
return;
}
@@ -298,9 +272,7 @@ bool BreakLocationIterator::Done() const {
void BreakLocationIterator::SetBreakPoint(Handle<Object> break_point_object) {
// If there is not already a real break point here patch code with debug
// break.
- if (!HasBreakPoint()) {
- SetDebugBreak();
- }
+ if (!HasBreakPoint()) SetDebugBreak();
ASSERT(IsDebugBreak() || IsDebuggerStatement());
// Set the break point information.
DebugInfo::SetBreakPoint(debug_info_, code_position(),
@@ -322,9 +294,7 @@ void BreakLocationIterator::ClearBreakPoint(Handle<Object> break_point_object) {
void BreakLocationIterator::SetOneShot() {
// Debugger statement always calls debugger. No need to modify it.
- if (IsDebuggerStatement()) {
- return;
- }
+ if (IsDebuggerStatement()) return;
// If there is a real break point here no more to do.
if (HasBreakPoint()) {
@@ -339,9 +309,7 @@ void BreakLocationIterator::SetOneShot() {
void BreakLocationIterator::ClearOneShot() {
// Debugger statement always calls debugger. No need to modify it.
- if (IsDebuggerStatement()) {
- return;
- }
+ if (IsDebuggerStatement()) return;
// If there is a real break point here no more to do.
if (HasBreakPoint()) {
@@ -357,17 +325,13 @@ void BreakLocationIterator::ClearOneShot() {
void BreakLocationIterator::SetDebugBreak() {
// Debugger statement always calls debugger. No need to modify it.
- if (IsDebuggerStatement()) {
- return;
- }
+ if (IsDebuggerStatement()) return;
// If there is already a break point here just return. This might happen if
// the same code is flooded with break points twice. Flooding the same
// function twice might happen when stepping in a function with an exception
// handler as the handler and the function is the same.
- if (IsDebugBreak()) {
- return;
- }
+ if (IsDebugBreak()) return;
if (RelocInfo::IsJSReturn(rmode())) {
// Patch the frame exit code with a break point.
@@ -385,9 +349,7 @@ void BreakLocationIterator::SetDebugBreak() {
void BreakLocationIterator::ClearDebugBreak() {
// Debugger statement always calls debugger. No need to modify it.
- if (IsDebuggerStatement()) {
- return;
- }
+ if (IsDebuggerStatement()) return;
if (RelocInfo::IsJSReturn(rmode())) {
// Restore the frame exit code.
@@ -413,59 +375,42 @@ bool BreakLocationIterator::IsStepInLocation(Isolate* isolate) {
if (target_code->kind() == Code::STUB) {
return target_code->major_key() == CodeStub::CallFunction;
}
- return target_code->is_call_stub() || target_code->is_keyed_call_stub();
- } else {
- return false;
+ return target_code->is_call_stub();
}
+ return false;
}
void BreakLocationIterator::PrepareStepIn(Isolate* isolate) {
+#ifdef DEBUG
HandleScope scope(isolate);
-
// Step in can only be prepared if currently positioned on an IC call,
// construct call or CallFunction stub call.
Address target = rinfo()->target_address();
Handle<Code> target_code(Code::GetCodeFromTargetAddress(target));
- if (target_code->is_call_stub() || target_code->is_keyed_call_stub()) {
- // Step in through IC call is handled by the runtime system. Therefore make
- // sure that the any current IC is cleared and the runtime system is
- // called. If the executing code has a debug break at the location change
- // the call in the original code as it is the code there that will be
- // executed in place of the debug break call.
- Handle<Code> stub = ComputeCallDebugPrepareStepIn(
- isolate, target_code->arguments_count(), target_code->kind());
- if (IsDebugBreak()) {
- original_rinfo()->set_target_address(stub->entry());
- } else {
- rinfo()->set_target_address(stub->entry());
- }
- } else {
-#ifdef DEBUG
- // All the following stuff is needed only for assertion checks so the code
- // is wrapped in ifdef.
- Handle<Code> maybe_call_function_stub = target_code;
- if (IsDebugBreak()) {
- Address original_target = original_rinfo()->target_address();
- maybe_call_function_stub =
- Handle<Code>(Code::GetCodeFromTargetAddress(original_target));
- }
- bool is_call_function_stub =
- (maybe_call_function_stub->kind() == Code::STUB &&
- maybe_call_function_stub->major_key() == CodeStub::CallFunction);
-
- // Step in through construct call requires no changes to the running code.
- // Step in through getters/setters should already be prepared as well
- // because caller of this function (Debug::PrepareStep) is expected to
- // flood the top frame's function with one shot breakpoints.
- // Step in through CallFunction stub should also be prepared by caller of
- // this function (Debug::PrepareStep) which should flood target function
- // with breakpoints.
- ASSERT(RelocInfo::IsConstructCall(rmode()) ||
- target_code->is_inline_cache_stub() ||
- is_call_function_stub);
+ // All the following stuff is needed only for assertion checks so the code
+ // is wrapped in ifdef.
+ Handle<Code> maybe_call_function_stub = target_code;
+ if (IsDebugBreak()) {
+ Address original_target = original_rinfo()->target_address();
+ maybe_call_function_stub =
+ Handle<Code>(Code::GetCodeFromTargetAddress(original_target));
+ }
+ bool is_call_function_stub =
+ (maybe_call_function_stub->kind() == Code::STUB &&
+ maybe_call_function_stub->major_key() == CodeStub::CallFunction);
+
+ // Step in through construct call requires no changes to the running code.
+ // Step in through getters/setters should already be prepared as well
+ // because caller of this function (Debug::PrepareStep) is expected to
+ // flood the top frame's function with one shot breakpoints.
+ // Step in through CallFunction stub should also be prepared by caller of
+ // this function (Debug::PrepareStep) which should flood target function
+ // with breakpoints.
+ ASSERT(RelocInfo::IsConstructCall(rmode()) ||
+ target_code->is_inline_cache_stub() ||
+ is_call_function_stub);
#endif
- }
}
@@ -492,6 +437,53 @@ bool BreakLocationIterator::IsDebugBreak() {
}
+// Find the builtin to use for invoking the debug break
+static Handle<Code> DebugBreakForIC(Handle<Code> code, RelocInfo::Mode mode) {
+ Isolate* isolate = code->GetIsolate();
+
+ // Find the builtin debug break function matching the calling convention
+ // used by the call site.
+ if (code->is_inline_cache_stub()) {
+ switch (code->kind()) {
+ case Code::CALL_IC:
+ return isolate->builtins()->CallICStub_DebugBreak();
+
+ case Code::LOAD_IC:
+ return isolate->builtins()->LoadIC_DebugBreak();
+
+ case Code::STORE_IC:
+ return isolate->builtins()->StoreIC_DebugBreak();
+
+ case Code::KEYED_LOAD_IC:
+ return isolate->builtins()->KeyedLoadIC_DebugBreak();
+
+ case Code::KEYED_STORE_IC:
+ return isolate->builtins()->KeyedStoreIC_DebugBreak();
+
+ case Code::COMPARE_NIL_IC:
+ return isolate->builtins()->CompareNilIC_DebugBreak();
+
+ default:
+ UNREACHABLE();
+ }
+ }
+ if (RelocInfo::IsConstructCall(mode)) {
+ if (code->has_function_cache()) {
+ return isolate->builtins()->CallConstructStub_Recording_DebugBreak();
+ } else {
+ return isolate->builtins()->CallConstructStub_DebugBreak();
+ }
+ }
+ if (code->kind() == Code::STUB) {
+ ASSERT(code->major_key() == CodeStub::CallFunction);
+ return isolate->builtins()->CallFunctionStub_DebugBreak();
+ }
+
+ UNREACHABLE();
+ return Handle<Code>::null();
+}
+
+
void BreakLocationIterator::SetDebugBreakAtIC() {
// Patch the original code with the current address as the current address
// might have changed by the inline caching since the code was copied.
@@ -504,7 +496,7 @@ void BreakLocationIterator::SetDebugBreakAtIC() {
// Patch the code to invoke the builtin debug break function matching the
// calling convention used by the call site.
- Handle<Code> dbgbrk_code(Debug::FindDebugBreak(target_code, mode));
+ Handle<Code> dbgbrk_code = DebugBreakForIC(target_code, mode);
rinfo()->set_target_address(dbgbrk_code->entry());
}
}
@@ -572,66 +564,56 @@ void Debug::ThreadInit() {
thread_local_.queued_step_count_ = 0;
thread_local_.step_into_fp_ = 0;
thread_local_.step_out_fp_ = 0;
- thread_local_.after_break_target_ = 0;
// TODO(isolates): frames_are_dropped_?
- thread_local_.debugger_entry_ = NULL;
- thread_local_.pending_interrupts_ = 0;
+ thread_local_.current_debug_scope_ = NULL;
thread_local_.restarter_frame_function_pointer_ = NULL;
+ thread_local_.promise_on_stack_ = NULL;
}
char* Debug::ArchiveDebug(char* storage) {
char* to = storage;
- OS::MemCopy(to, reinterpret_cast<char*>(&thread_local_), sizeof(ThreadLocal));
- to += sizeof(ThreadLocal);
- OS::MemCopy(to, reinterpret_cast<char*>(&registers_), sizeof(registers_));
+ MemCopy(to, reinterpret_cast<char*>(&thread_local_), sizeof(ThreadLocal));
ThreadInit();
- ASSERT(to <= storage + ArchiveSpacePerThread());
return storage + ArchiveSpacePerThread();
}
char* Debug::RestoreDebug(char* storage) {
char* from = storage;
- OS::MemCopy(
- reinterpret_cast<char*>(&thread_local_), from, sizeof(ThreadLocal));
- from += sizeof(ThreadLocal);
- OS::MemCopy(reinterpret_cast<char*>(&registers_), from, sizeof(registers_));
- ASSERT(from <= storage + ArchiveSpacePerThread());
+ MemCopy(reinterpret_cast<char*>(&thread_local_), from, sizeof(ThreadLocal));
return storage + ArchiveSpacePerThread();
}
int Debug::ArchiveSpacePerThread() {
- return sizeof(ThreadLocal) + sizeof(JSCallerSavedBuffer);
+ return sizeof(ThreadLocal);
}
-// Frame structure (conforms InternalFrame structure):
-// -- code
-// -- SMI maker
-// -- function (slot is called "context")
-// -- frame base
-Object** Debug::SetUpFrameDropperFrame(StackFrame* bottom_js_frame,
- Handle<Code> code) {
- ASSERT(bottom_js_frame->is_java_script());
-
- Address fp = bottom_js_frame->fp();
+ScriptCache::ScriptCache(Isolate* isolate) : HashMap(HashMap::PointersMatch),
+ isolate_(isolate),
+ collected_scripts_(10) {
+ Heap* heap = isolate_->heap();
+ HandleScope scope(isolate_);
- // Move function pointer into "context" slot.
- Memory::Object_at(fp + StandardFrameConstants::kContextOffset) =
- Memory::Object_at(fp + JavaScriptFrameConstants::kFunctionOffset);
+ // Perform two GCs to get rid of all unreferenced scripts. The first GC gets
+ // rid of all the cached script wrappers and the second gets rid of the
+ // scripts which are no longer referenced.
+ heap->CollectAllGarbage(Heap::kMakeHeapIterableMask, "ScriptCache");
+ heap->CollectAllGarbage(Heap::kMakeHeapIterableMask, "ScriptCache");
- Memory::Object_at(fp + InternalFrameConstants::kCodeOffset) = *code;
- Memory::Object_at(fp + StandardFrameConstants::kMarkerOffset) =
- Smi::FromInt(StackFrame::INTERNAL);
+ // Scan heap for Script objects.
+ HeapIterator iterator(heap);
+ DisallowHeapAllocation no_allocation;
- return reinterpret_cast<Object**>(&Memory::Object_at(
- fp + StandardFrameConstants::kContextOffset));
+ for (HeapObject* obj = iterator.next(); obj != NULL; obj = iterator.next()) {
+ if (obj->IsScript() && Script::cast(obj)->HasValidSource()) {
+ Add(Handle<Script>(Script::cast(obj)));
+ }
+ }
}
-const int Debug::kFrameDropperFrameSize = 4;
-
void ScriptCache::Add(Handle<Script> script) {
GlobalHandles* global_handles = isolate_->global_handles();
@@ -646,11 +628,10 @@ void ScriptCache::Add(Handle<Script> script) {
// Globalize the script object, make it weak and use the location of the
// global handle as the value in the hash map.
Handle<Script> script_ =
- Handle<Script>::cast(
- (global_handles->Create(*script)));
- global_handles->MakeWeak(reinterpret_cast<Object**>(script_.location()),
- this,
- ScriptCache::HandleWeakScript);
+ Handle<Script>::cast(global_handles->Create(*script));
+ GlobalHandles::MakeWeak(reinterpret_cast<Object**>(script_.location()),
+ this,
+ ScriptCache::HandleWeakScript);
entry->value = script_.location();
}
@@ -671,68 +652,54 @@ Handle<FixedArray> ScriptCache::GetScripts() {
void ScriptCache::ProcessCollectedScripts() {
- Debugger* debugger = isolate_->debugger();
+ Debug* debug = isolate_->debug();
for (int i = 0; i < collected_scripts_.length(); i++) {
- debugger->OnScriptCollected(collected_scripts_[i]);
+ debug->OnScriptCollected(collected_scripts_[i]);
}
collected_scripts_.Clear();
}
void ScriptCache::Clear() {
- GlobalHandles* global_handles = isolate_->global_handles();
// Iterate the script cache to get rid of all the weak handles.
for (HashMap::Entry* entry = Start(); entry != NULL; entry = Next(entry)) {
ASSERT(entry != NULL);
Object** location = reinterpret_cast<Object**>(entry->value);
ASSERT((*location)->IsScript());
- global_handles->ClearWeakness(location);
- global_handles->Destroy(location);
+ GlobalHandles::ClearWeakness(location);
+ GlobalHandles::Destroy(location);
}
// Clear the content of the hash map.
HashMap::Clear();
}
-void ScriptCache::HandleWeakScript(v8::Isolate* isolate,
- v8::Persistent<v8::Value>* obj,
- void* data) {
- ScriptCache* script_cache = reinterpret_cast<ScriptCache*>(data);
- // Find the location of the global handle.
- Script** location =
- reinterpret_cast<Script**>(Utils::OpenPersistent(*obj).location());
- ASSERT((*location)->IsScript());
+void ScriptCache::HandleWeakScript(
+ const v8::WeakCallbackData<v8::Value, void>& data) {
+ // Retrieve the script identifier.
+ Handle<Object> object = Utils::OpenHandle(*data.GetValue());
+ int id = Handle<Script>::cast(object)->id()->value();
+ void* key = reinterpret_cast<void*>(id);
+ uint32_t hash = Hash(id);
- // Remove the entry from the cache.
- int id = (*location)->id()->value();
- script_cache->Remove(reinterpret_cast<void*>(id), Hash(id));
+ // Remove the corresponding entry from the cache.
+ ScriptCache* script_cache =
+ reinterpret_cast<ScriptCache*>(data.GetParameter());
+ HashMap::Entry* entry = script_cache->Lookup(key, hash, false);
+ Object** location = reinterpret_cast<Object**>(entry->value);
+ script_cache->Remove(key, hash);
script_cache->collected_scripts_.Add(id);
// Clear the weak handle.
- obj->Reset();
-}
-
-
-void Debug::SetUp(bool create_heap_objects) {
- ThreadInit();
- if (create_heap_objects) {
- // Get code to handle debug break on return.
- debug_break_return_ =
- isolate_->builtins()->builtin(Builtins::kReturn_DebugBreak);
- ASSERT(debug_break_return_->IsCode());
- // Get code to handle debug break in debug break slots.
- debug_break_slot_ =
- isolate_->builtins()->builtin(Builtins::kSlot_DebugBreak);
- ASSERT(debug_break_slot_->IsCode());
- }
+ GlobalHandles::Destroy(location);
}
-void Debug::HandleWeakDebugInfo(v8::Isolate* isolate,
- v8::Persistent<v8::Value>* obj,
- void* data) {
- Debug* debug = reinterpret_cast<Isolate*>(isolate)->debug();
- DebugInfoListNode* node = reinterpret_cast<DebugInfoListNode*>(data);
+void Debug::HandleWeakDebugInfo(
+ const v8::WeakCallbackData<v8::Value, void>& data) {
+ Debug* debug = reinterpret_cast<Isolate*>(data.GetIsolate())->debug();
+ DebugInfoListNode* node =
+ reinterpret_cast<DebugInfoListNode*>(data.GetParameter());
// We need to clear all breakpoints associated with the function to restore
// original code and avoid patching the code twice later because
// the function will live in the heap until next gc, and can be found by
@@ -741,29 +708,27 @@ void Debug::HandleWeakDebugInfo(v8::Isolate* isolate,
it.ClearAllDebugBreak();
debug->RemoveDebugInfo(node->debug_info());
#ifdef DEBUG
- node = debug->debug_info_list_;
- while (node != NULL) {
- ASSERT(node != reinterpret_cast<DebugInfoListNode*>(data));
- node = node->next();
+ for (DebugInfoListNode* n = debug->debug_info_list_;
+ n != NULL;
+ n = n->next()) {
+ ASSERT(n != node);
}
#endif
}
DebugInfoListNode::DebugInfoListNode(DebugInfo* debug_info): next_(NULL) {
- GlobalHandles* global_handles = debug_info->GetIsolate()->global_handles();
// Globalize the request debug info object and make it weak.
- debug_info_ = Handle<DebugInfo>::cast(
- (global_handles->Create(debug_info)));
- global_handles->MakeWeak(reinterpret_cast<Object**>(debug_info_.location()),
- this,
- Debug::HandleWeakDebugInfo);
+ GlobalHandles* global_handles = debug_info->GetIsolate()->global_handles();
+ debug_info_ = Handle<DebugInfo>::cast(global_handles->Create(debug_info));
+ GlobalHandles::MakeWeak(reinterpret_cast<Object**>(debug_info_.location()),
+ this,
+ Debug::HandleWeakDebugInfo);
}
DebugInfoListNode::~DebugInfoListNode() {
- debug_info_->GetIsolate()->global_handles()->Destroy(
- reinterpret_cast<Object**>(debug_info_.location()));
+ GlobalHandles::Destroy(reinterpret_cast<Object**>(debug_info_.location()));
}
@@ -772,27 +737,24 @@ bool Debug::CompileDebuggerScript(Isolate* isolate, int index) {
HandleScope scope(isolate);
// Bail out if the index is invalid.
- if (index == -1) {
- return false;
- }
+ if (index == -1) return false;
// Find source and name for the requested script.
Handle<String> source_code =
isolate->bootstrapper()->NativesSourceLookup(index);
Vector<const char> name = Natives::GetScriptName(index);
- Handle<String> script_name = factory->NewStringFromAscii(name);
+ Handle<String> script_name =
+ factory->NewStringFromAscii(name).ToHandleChecked();
Handle<Context> context = isolate->native_context();
// Compile the script.
Handle<SharedFunctionInfo> function_info;
- function_info = Compiler::Compile(source_code,
- script_name,
- 0, 0,
- false,
- context,
- NULL, NULL,
- Handle<String>::null(),
- NATIVES_CODE);
+ function_info = Compiler::CompileScript(source_code,
+ script_name, 0, 0,
+ false,
+ context,
+ NULL, NULL, NO_CACHED_DATA,
+ NATIVES_CODE);
// Silently ignore stack overflows during compilation.
if (function_info.is_null()) {
@@ -802,25 +764,25 @@ bool Debug::CompileDebuggerScript(Isolate* isolate, int index) {
}
// Execute the shared function in the debugger context.
- bool caught_exception;
Handle<JSFunction> function =
factory->NewFunctionFromSharedFunctionInfo(function_info, context);
- Handle<Object> exception =
+ Handle<Object> exception;
+ MaybeHandle<Object> result =
Execution::TryCall(function,
Handle<Object>(context->global_object(), isolate),
0,
NULL,
- &caught_exception);
+ &exception);
// Check for caught exceptions.
- if (caught_exception) {
+ if (result.is_null()) {
ASSERT(!isolate->has_pending_exception());
MessageLocation computed_location;
isolate->ComputeLocation(&computed_location);
Handle<Object> message = MessageHandler::MakeMessageObject(
isolate, "error_loading_debugger", &computed_location,
- Vector<Handle<Object> >::empty(), Handle<String>(), Handle<JSArray>());
+ Vector<Handle<Object> >::empty(), Handle<JSArray>());
ASSERT(!isolate->has_pending_exception());
if (!exception.is_null()) {
isolate->set_pending_exception(*exception);
@@ -839,29 +801,26 @@ bool Debug::CompileDebuggerScript(Isolate* isolate, int index) {
bool Debug::Load() {
// Return if debugger is already loaded.
- if (IsLoaded()) return true;
-
- Debugger* debugger = isolate_->debugger();
+ if (is_loaded()) return true;
// Bail out if we're already in the process of compiling the native
// JavaScript source code for the debugger.
- if (debugger->compiling_natives() ||
- debugger->is_loading_debugger())
- return false;
- debugger->set_loading_debugger(true);
+ if (is_suppressed_) return false;
+ SuppressDebug while_loading(this);
// Disable breakpoints and interrupts while compiling and running the
// debugger scripts including the context creation code.
- DisableBreak disable(isolate_, true);
+ DisableBreak disable(this, true);
PostponeInterruptsScope postpone(isolate_);
// Create the debugger context.
HandleScope scope(isolate_);
+ ExtensionConfiguration no_extensions;
Handle<Context> context =
isolate_->bootstrapper()->CreateEnvironment(
Handle<Object>::null(),
v8::Handle<ObjectTemplate>(),
- NULL);
+ &no_extensions);
// Fail if no context could be created.
if (context.is_null()) return false;
@@ -873,18 +832,16 @@ bool Debug::Load() {
// Expose the builtins object in the debugger context.
Handle<String> key = isolate_->factory()->InternalizeOneByteString(
STATIC_ASCII_VECTOR("builtins"));
- Handle<GlobalObject> global = Handle<GlobalObject>(context->global_object());
- RETURN_IF_EMPTY_HANDLE_VALUE(
+ Handle<GlobalObject> global =
+ Handle<GlobalObject>(context->global_object(), isolate_);
+ Handle<JSBuiltinsObject> builtin =
+ Handle<JSBuiltinsObject>(global->builtins(), isolate_);
+ RETURN_ON_EXCEPTION_VALUE(
isolate_,
- JSReceiver::SetProperty(global,
- key,
- Handle<Object>(global->builtins(), isolate_),
- NONE,
- kNonStrictMode),
+ JSReceiver::SetProperty(global, key, builtin, NONE, SLOPPY),
false);
// Compile the JavaScript for the debugger in the debugger context.
- debugger->set_compiling_natives(true);
bool caught_exception =
!CompileDebuggerScript(isolate_, Natives::GetIndex("mirror")) ||
!CompileDebuggerScript(isolate_, Natives::GetIndex("debug"));
@@ -893,75 +850,51 @@ bool Debug::Load() {
caught_exception = caught_exception ||
!CompileDebuggerScript(isolate_, Natives::GetIndex("liveedit"));
}
-
- debugger->set_compiling_natives(false);
-
- // Make sure we mark the debugger as not loading before we might
- // return.
- debugger->set_loading_debugger(false);
-
// Check for caught exceptions.
if (caught_exception) return false;
- // Debugger loaded, create debugger context global handle.
debug_context_ = Handle<Context>::cast(
isolate_->global_handles()->Create(*context));
-
return true;
}
void Debug::Unload() {
+ ClearAllBreakPoints();
+ ClearStepping();
+
+ // Match unmatched PromiseHandlePrologue calls.
+ while (thread_local_.promise_on_stack_) PromiseHandleEpilogue();
+
// Return debugger is not loaded.
- if (!IsLoaded()) {
- return;
- }
+ if (!is_loaded()) return;
// Clear the script cache.
- DestroyScriptCache();
+ if (script_cache_ != NULL) {
+ delete script_cache_;
+ script_cache_ = NULL;
+ }
// Clear debugger context global handle.
- isolate_->global_handles()->Destroy(
- reinterpret_cast<Object**>(debug_context_.location()));
+ GlobalHandles::Destroy(Handle<Object>::cast(debug_context_).location());
debug_context_ = Handle<Context>();
}
-// Set the flag indicating that preemption happened during debugging.
-void Debug::PreemptionWhileInDebugger() {
- ASSERT(InDebugger());
- Debug::set_interrupts_pending(PREEMPT);
-}
-
-
-void Debug::Iterate(ObjectVisitor* v) {
- v->VisitPointer(BitCast<Object**>(&(debug_break_return_)));
- v->VisitPointer(BitCast<Object**>(&(debug_break_slot_)));
-}
-
-
-Object* Debug::Break(Arguments args) {
+void Debug::Break(Arguments args, JavaScriptFrame* frame) {
Heap* heap = isolate_->heap();
HandleScope scope(isolate_);
ASSERT(args.length() == 0);
- thread_local_.frame_drop_mode_ = FRAMES_UNTOUCHED;
-
- // Get the top-most JavaScript frame.
- JavaScriptFrameIterator it(isolate_);
- JavaScriptFrame* frame = it.frame();
+ // Initialize LiveEdit.
+ LiveEdit::InitializeThreadLocal(this);
// Just continue if breaks are disabled or debugger cannot be loaded.
- if (disable_break() || !Load()) {
- SetAfterBreakTarget(frame);
- return heap->undefined_value();
- }
+ if (break_disabled_) return;
// Enter the debugger.
- EnterDebugger debugger(isolate_);
- if (debugger.FailedToEnter()) {
- return heap->undefined_value();
- }
+ DebugScope debug_scope(this);
+ if (debug_scope.failed()) return;
// Postpone interrupt during breakpoint processing.
PostponeInterruptsScope postpone(isolate_);
@@ -997,7 +930,8 @@ Object* Debug::Break(Arguments args) {
// If step out is active skip everything until the frame where we need to step
// out to is reached, unless real breakpoint is hit.
- if (StepOutActive() && frame->fp() != step_out_fp() &&
+ if (StepOutActive() &&
+ frame->fp() != thread_local_.step_out_fp_ &&
break_points_hit->IsUndefined() ) {
// Step count should always be 0 for StepOut.
ASSERT(thread_local_.step_count_ == 0);
@@ -1020,7 +954,7 @@ Object* Debug::Break(Arguments args) {
PrepareStep(StepNext, step_count, StackFrame::NO_ID);
} else {
// Notify the debug event listeners.
- isolate_->debugger()->OnDebugBreak(break_points_hit, false);
+ OnDebugBreak(break_points_hit, false);
}
} else if (thread_local_.last_step_action_ != StepNone) {
// Hold on to last step action as it is cleared by the call to
@@ -1057,40 +991,15 @@ Object* Debug::Break(Arguments args) {
// Set up for the remaining steps.
PrepareStep(step_action, step_count, StackFrame::NO_ID);
}
-
- if (thread_local_.frame_drop_mode_ == FRAMES_UNTOUCHED) {
- SetAfterBreakTarget(frame);
- } else if (thread_local_.frame_drop_mode_ ==
- FRAME_DROPPED_IN_IC_CALL) {
- // We must have been calling IC stub. Do not go there anymore.
- Code* plain_return = isolate_->builtins()->builtin(
- Builtins::kPlainReturn_LiveEdit);
- thread_local_.after_break_target_ = plain_return->entry();
- } else if (thread_local_.frame_drop_mode_ ==
- FRAME_DROPPED_IN_DEBUG_SLOT_CALL) {
- // Debug break slot stub does not return normally, instead it manually
- // cleans the stack and jumps. We should patch the jump address.
- Code* plain_return = isolate_->builtins()->builtin(
- Builtins::kFrameDropper_LiveEdit);
- thread_local_.after_break_target_ = plain_return->entry();
- } else if (thread_local_.frame_drop_mode_ ==
- FRAME_DROPPED_IN_DIRECT_CALL) {
- // Nothing to do, after_break_target is not used here.
- } else if (thread_local_.frame_drop_mode_ ==
- FRAME_DROPPED_IN_RETURN_CALL) {
- Code* plain_return = isolate_->builtins()->builtin(
- Builtins::kFrameDropper_LiveEdit);
- thread_local_.after_break_target_ = plain_return->entry();
- } else {
- UNREACHABLE();
- }
-
- return heap->undefined_value();
}
-RUNTIME_FUNCTION(Object*, Debug_Break) {
- return isolate->debug()->Break(args);
+RUNTIME_FUNCTION(Debug_Break) {
+ // Get the top-most JavaScript frame.
+ JavaScriptFrameIterator it(isolate);
+ isolate->debug()->Break(args, it.frame());
+ isolate->debug()->SetAfterBreakTarget(it.frame());
+ return isolate->heap()->undefined_value();
}
@@ -1144,31 +1053,26 @@ bool Debug::CheckBreakPoint(Handle<Object> break_point_object) {
Handle<String> is_break_point_triggered_string =
factory->InternalizeOneByteString(
STATIC_ASCII_VECTOR("IsBreakPointTriggered"));
+ Handle<GlobalObject> debug_global(debug_context()->global_object());
Handle<JSFunction> check_break_point =
- Handle<JSFunction>(JSFunction::cast(
- debug_context()->global_object()->GetPropertyNoExceptionThrown(
- *is_break_point_triggered_string)));
+ Handle<JSFunction>::cast(Object::GetProperty(
+ debug_global, is_break_point_triggered_string).ToHandleChecked());
// Get the break id as an object.
Handle<Object> break_id = factory->NewNumberFromInt(Debug::break_id());
// Call HandleBreakPointx.
- bool caught_exception;
Handle<Object> argv[] = { break_id, break_point_object };
- Handle<Object> result = Execution::TryCall(check_break_point,
- isolate_->js_builtins_object(),
- ARRAY_SIZE(argv),
- argv,
- &caught_exception);
-
- // If exception or non boolean result handle as not triggered
- if (caught_exception || !result->IsBoolean()) {
+ Handle<Object> result;
+ if (!Execution::TryCall(check_break_point,
+ isolate_->js_builtins_object(),
+ ARRAY_SIZE(argv),
+ argv).ToHandle(&result)) {
return false;
}
// Return whether the break point is triggered.
- ASSERT(!result.is_null());
- return (*result)->IsTrue();
+ return result->IsTrue();
}
@@ -1186,7 +1090,7 @@ Handle<DebugInfo> Debug::GetDebugInfo(Handle<SharedFunctionInfo> shared) {
}
-void Debug::SetBreakPoint(Handle<JSFunction> function,
+bool Debug::SetBreakPoint(Handle<JSFunction> function,
Handle<Object> break_point_object,
int* source_position) {
HandleScope scope(isolate_);
@@ -1197,7 +1101,7 @@ void Debug::SetBreakPoint(Handle<JSFunction> function,
Handle<SharedFunctionInfo> shared(function->shared());
if (!EnsureDebugInfo(shared, function)) {
// Return if retrieving debug info failed.
- return;
+ return true;
}
Handle<DebugInfo> debug_info = GetDebugInfo(shared);
@@ -1212,7 +1116,7 @@ void Debug::SetBreakPoint(Handle<JSFunction> function,
*source_position = it.position();
// At least one active break point now.
- ASSERT(debug_info->GetBreakPointCount() > 0);
+ return debug_info->GetBreakPointCount() > 0;
}
@@ -1333,7 +1237,7 @@ void Debug::FloodBoundFunctionWithOneShot(Handle<JSFunction> function) {
isolate_);
if (!bindee.is_null() && bindee->IsJSFunction() &&
- !JSFunction::cast(*bindee)->IsBuiltin()) {
+ !JSFunction::cast(*bindee)->IsNative()) {
Handle<JSFunction> bindee_function(JSFunction::cast(*bindee));
Debug::FloodWithOneShot(bindee_function);
}
@@ -1376,6 +1280,66 @@ bool Debug::IsBreakOnException(ExceptionBreakType type) {
}
+PromiseOnStack::PromiseOnStack(Isolate* isolate,
+ PromiseOnStack* prev,
+ Handle<JSFunction> getter)
+ : isolate_(isolate), prev_(prev) {
+ handler_ = StackHandler::FromAddress(
+ Isolate::handler(isolate->thread_local_top()));
+ getter_ = Handle<JSFunction>::cast(
+ isolate->global_handles()->Create(*getter));
+}
+
+
+PromiseOnStack::~PromiseOnStack() {
+ isolate_->global_handles()->Destroy(Handle<Object>::cast(getter_).location());
+}
+
+
+void Debug::PromiseHandlePrologue(Handle<JSFunction> promise_getter) {
+ PromiseOnStack* prev = thread_local_.promise_on_stack_;
+ thread_local_.promise_on_stack_ =
+ new PromiseOnStack(isolate_, prev, promise_getter);
+}
+
+
+void Debug::PromiseHandleEpilogue() {
+ if (thread_local_.promise_on_stack_ == NULL) return;
+ PromiseOnStack* prev = thread_local_.promise_on_stack_->prev();
+ delete thread_local_.promise_on_stack_;
+ thread_local_.promise_on_stack_ = prev;
+}
+
+
+Handle<Object> Debug::GetPromiseForUncaughtException() {
+ Handle<Object> undefined = isolate_->factory()->undefined_value();
+ if (thread_local_.promise_on_stack_ == NULL) return undefined;
+ Handle<JSFunction> promise_getter = thread_local_.promise_on_stack_->getter();
+ StackHandler* promise_catch = thread_local_.promise_on_stack_->handler();
+ // Find the top-most try-catch handler.
+ StackHandler* handler = StackHandler::FromAddress(
+ Isolate::handler(isolate_->thread_local_top()));
+ while (handler != NULL && !handler->is_catch()) {
+ handler = handler->next();
+ }
+#ifdef DEBUG
+ // Make sure that our promise catch handler is in the list of handlers,
+ // even if it's not the top-most try-catch handler.
+ StackHandler* temp = handler;
+ while (temp != promise_catch && !temp->is_catch()) {
+ temp = temp->next();
+ CHECK(temp != NULL);
+ }
+#endif // DEBUG
+
+ if (handler == promise_catch) {
+ return Execution::Call(
+ isolate_, promise_getter, undefined, 0, NULL).ToHandleChecked();
+ }
+ return undefined;
+}
+
+
void Debug::PrepareStep(StepAction step_action,
int step_count,
StackFrame::Id frame_id) {
@@ -1383,7 +1347,7 @@ void Debug::PrepareStep(StepAction step_action,
PrepareForBreakPoints();
- ASSERT(Debug::InDebugger());
+ ASSERT(in_debug_scope());
// Remember this step action and count.
thread_local_.last_step_action_ = step_action;
@@ -1453,7 +1417,7 @@ void Debug::PrepareStep(StepAction step_action,
bool is_call_target = false;
Address target = it.rinfo()->target_address();
Code* code = Code::GetCodeFromTargetAddress(target);
- if (code->is_call_stub() || code->is_keyed_call_stub()) {
+ if (code->is_call_stub()) {
is_call_target = true;
}
if (code->is_inline_cache_stub()) {
@@ -1470,8 +1434,9 @@ void Debug::PrepareStep(StepAction step_action,
maybe_call_function_stub =
Code::GetCodeFromTargetAddress(original_target);
}
- if (maybe_call_function_stub->kind() == Code::STUB &&
- maybe_call_function_stub->major_key() == CodeStub::CallFunction) {
+ if ((maybe_call_function_stub->kind() == Code::STUB &&
+ maybe_call_function_stub->major_key() == CodeStub::CallFunction) ||
+ maybe_call_function_stub->kind() == Code::CALL_IC) {
// Save reference to the code as we may need it to find out arguments
// count for 'step in' later.
call_function_stub = Handle<Code>(maybe_call_function_stub);
@@ -1493,7 +1458,7 @@ void Debug::PrepareStep(StepAction step_action,
frames_it.Advance();
}
// Skip builtin functions on the stack.
- while (!frames_it.done() && frames_it.frame()->function()->IsBuiltin()) {
+ while (!frames_it.done() && frames_it.frame()->function()->IsNative()) {
frames_it.Advance();
}
// Step out: If there is a JavaScript caller frame, we need to
@@ -1527,6 +1492,7 @@ void Debug::PrepareStep(StepAction step_action,
} else if (!call_function_stub.is_null()) {
// If it's CallFunction stub ensure target function is compiled and flood
// it with one shot breakpoints.
+ bool is_call_ic = call_function_stub->kind() == Code::CALL_IC;
// Find out number of arguments from the stub minor key.
// Reverse lookup required as the minor key cannot be retrieved
@@ -1542,11 +1508,13 @@ void Debug::PrepareStep(StepAction step_action,
uint32_t key = Smi::cast(*obj)->value();
// Argc in the stub is the number of arguments passed - not the
// expected arguments of the called function.
- int call_function_arg_count =
- CallFunctionStub::ExtractArgcFromMinorKey(
+ int call_function_arg_count = is_call_ic
+ ? CallICStub::ExtractArgcFromMinorKey(CodeStub::MinorKeyFromKey(key))
+ : CallFunctionStub::ExtractArgcFromMinorKey(
CodeStub::MinorKeyFromKey(key));
- ASSERT(call_function_stub->major_key() ==
- CodeStub::MajorKeyFromKey(key));
+
+ ASSERT(is_call_ic ||
+ call_function_stub->major_key() == CodeStub::MajorKeyFromKey(key));
// Find target function on the expression stack.
// Expression stack looks like this (top to bottom):
@@ -1559,11 +1527,25 @@ void Debug::PrepareStep(StepAction step_action,
ASSERT(expressions_count - 2 - call_function_arg_count >= 0);
Object* fun = frame->GetExpression(
expressions_count - 2 - call_function_arg_count);
+
+ // Flood the actual target of call/apply.
+ if (fun->IsJSFunction()) {
+ Isolate* isolate = JSFunction::cast(fun)->GetIsolate();
+ Code* apply = isolate->builtins()->builtin(Builtins::kFunctionApply);
+ Code* call = isolate->builtins()->builtin(Builtins::kFunctionCall);
+ while (fun->IsJSFunction()) {
+ Code* code = JSFunction::cast(fun)->shared()->code();
+ if (code != apply && code != call) break;
+ fun = frame->GetExpression(
+ expressions_count - 1 - call_function_arg_count);
+ }
+ }
+
if (fun->IsJSFunction()) {
Handle<JSFunction> js_function(JSFunction::cast(fun));
if (js_function->shared()->bound()) {
Debug::FloodBoundFunctionWithOneShot(js_function);
- } else if (!js_function->IsBuiltin()) {
+ } else if (!js_function->IsNative()) {
// Don't step into builtins.
// It will also compile target function if it's not compiled yet.
FloodWithOneShot(js_function);
@@ -1636,73 +1618,7 @@ bool Debug::IsDebugBreak(Address addr) {
}
-// Check whether a code stub with the specified major key is a possible break
-// point location when looking for source break locations.
-bool Debug::IsSourceBreakStub(Code* code) {
- CodeStub::Major major_key = CodeStub::GetMajorKey(code);
- return major_key == CodeStub::CallFunction;
-}
-
-
-// Check whether a code stub with the specified major key is a possible break
-// location.
-bool Debug::IsBreakStub(Code* code) {
- CodeStub::Major major_key = CodeStub::GetMajorKey(code);
- return major_key == CodeStub::CallFunction;
-}
-
-
-// Find the builtin to use for invoking the debug break
-Handle<Code> Debug::FindDebugBreak(Handle<Code> code, RelocInfo::Mode mode) {
- Isolate* isolate = code->GetIsolate();
- // Find the builtin debug break function matching the calling convention
- // used by the call site.
- if (code->is_inline_cache_stub()) {
- switch (code->kind()) {
- case Code::CALL_IC:
- case Code::KEYED_CALL_IC:
- return isolate->stub_cache()->ComputeCallDebugBreak(
- code->arguments_count(), code->kind());
-
- case Code::LOAD_IC:
- return isolate->builtins()->LoadIC_DebugBreak();
-
- case Code::STORE_IC:
- return isolate->builtins()->StoreIC_DebugBreak();
-
- case Code::KEYED_LOAD_IC:
- return isolate->builtins()->KeyedLoadIC_DebugBreak();
-
- case Code::KEYED_STORE_IC:
- return isolate->builtins()->KeyedStoreIC_DebugBreak();
-
- case Code::COMPARE_NIL_IC:
- return isolate->builtins()->CompareNilIC_DebugBreak();
-
- default:
- UNREACHABLE();
- }
- }
- if (RelocInfo::IsConstructCall(mode)) {
- if (code->has_function_cache()) {
- return isolate->builtins()->CallConstructStub_Recording_DebugBreak();
- } else {
- return isolate->builtins()->CallConstructStub_DebugBreak();
- }
- }
- if (code->kind() == Code::STUB) {
- ASSERT(code->major_key() == CodeStub::CallFunction);
- if (code->has_function_cache()) {
- return isolate->builtins()->CallFunctionStub_Recording_DebugBreak();
- } else {
- return isolate->builtins()->CallFunctionStub_DebugBreak();
- }
- }
-
- UNREACHABLE();
- return Handle<Code>::null();
-}
// Simple function for returning the source positions for active break points.
@@ -1747,18 +1663,6 @@ Handle<Object> Debug::GetSourceBreakLocations(
}
-void Debug::NewBreak(StackFrame::Id break_frame_id) {
- thread_local_.break_frame_id_ = break_frame_id;
- thread_local_.break_id_ = ++thread_local_.break_count_;
-}
-
-
-void Debug::SetBreak(StackFrame::Id break_frame_id, int break_id) {
- thread_local_.break_frame_id_ = break_frame_id;
- thread_local_.break_id_ = break_id;
-}
-
-
// Handle stepping into a function.
void Debug::HandleStepIn(Handle<JSFunction> function,
Handle<Object> holder,
@@ -1779,11 +1683,11 @@ void Debug::HandleStepIn(Handle<JSFunction> function,
// Flood the function with one-shot break points if it is called from where
// step into was requested.
- if (fp == step_in_fp()) {
+ if (fp == thread_local_.step_into_fp_) {
if (function->shared()->bound()) {
// Handle Function.prototype.bind
Debug::FloodBoundFunctionWithOneShot(function);
- } else if (!function->IsBuiltin()) {
+ } else if (!function->IsNative()) {
// Don't allow step into functions in the native context.
if (function->shared()->code() ==
isolate->builtins()->builtin(Builtins::kFunctionApply) ||
@@ -1795,7 +1699,7 @@ void Debug::HandleStepIn(Handle<JSFunction> function,
// function.
if (!holder.is_null() && holder->IsJSFunction()) {
Handle<JSFunction> js_function = Handle<JSFunction>::cast(holder);
- if (!js_function->IsBuiltin()) {
+ if (!js_function->IsNative()) {
Debug::FloodWithOneShot(js_function);
} else if (js_function->shared()->bound()) {
// Handle Function.prototype.bind
@@ -1871,41 +1775,6 @@ void Debug::ClearStepNext() {
}
-// Helper function to compile full code for debugging. This code will
-// have debug break slots and deoptimization information. Deoptimization
-// information is required in case that an optimized version of this
-// function is still activated on the stack. It will also make sure that
-// the full code is compiled with the same flags as the previous version,
-// that is flags which can change the code generated. The current method
-// of mapping from already compiled full code without debug break slots
-// to full code with debug break slots depends on the generated code is
-// otherwise exactly the same.
-static bool CompileFullCodeForDebugging(Handle<JSFunction> function,
- Handle<Code> current_code) {
- ASSERT(!current_code->has_debug_break_slots());
-
- CompilationInfoWithZone info(function);
- info.MarkCompilingForDebugging(current_code);
- ASSERT(!info.shared_info()->is_compiled());
- ASSERT(!info.isolate()->has_pending_exception());
-
- // Use compile lazy which will end up compiling the full code in the
- // configuration configured above.
- bool result = Compiler::CompileLazy(&info);
- ASSERT(result != info.isolate()->has_pending_exception());
- info.isolate()->clear_pending_exception();
-#if DEBUG
- if (result) {
- Handle<Code> new_code(function->shared()->code());
- ASSERT(new_code->has_debug_break_slots());
- ASSERT(current_code->is_compiled_optimizable() ==
- new_code->is_compiled_optimizable());
- }
-#endif
- return result;
-}
-
-
static void CollectActiveFunctionsFromThread(
Isolate* isolate,
ThreadLocalTop* top,
@@ -1935,6 +1804,59 @@ static void CollectActiveFunctionsFromThread(
}
+// Figure out how many bytes of "pc_offset" correspond to actual code by
+// subtracting off the bytes that correspond to constant/veneer pools. See
+// Assembler::CheckConstPool() and Assembler::CheckVeneerPool(). Note that this
+// is only useful for architectures using constant pools or veneer pools.
+static int ComputeCodeOffsetFromPcOffset(Code *code, int pc_offset) {
+ ASSERT_EQ(code->kind(), Code::FUNCTION);
+ ASSERT(!code->has_debug_break_slots());
+ ASSERT_LE(0, pc_offset);
+ ASSERT_LT(pc_offset, code->instruction_end() - code->instruction_start());
+
+ int mask = RelocInfo::ModeMask(RelocInfo::CONST_POOL) |
+ RelocInfo::ModeMask(RelocInfo::VENEER_POOL);
+ byte *pc = code->instruction_start() + pc_offset;
+ int code_offset = pc_offset;
+ for (RelocIterator it(code, mask); !it.done(); it.next()) {
+ RelocInfo* info = it.rinfo();
+ if (info->pc() >= pc) break;
+ ASSERT(RelocInfo::IsConstPool(info->rmode()));
+ code_offset -= static_cast<int>(info->data());
+ ASSERT_LE(0, code_offset);
+ }
+
+ return code_offset;
+}
+
+
+// The inverse of ComputeCodeOffsetFromPcOffset.
+static int ComputePcOffsetFromCodeOffset(Code *code, int code_offset) {
+ ASSERT_EQ(code->kind(), Code::FUNCTION);
+
+ int mask = RelocInfo::ModeMask(RelocInfo::DEBUG_BREAK_SLOT) |
+ RelocInfo::ModeMask(RelocInfo::CONST_POOL) |
+ RelocInfo::ModeMask(RelocInfo::VENEER_POOL);
+ int reloc = 0;
+ for (RelocIterator it(code, mask); !it.done(); it.next()) {
+ RelocInfo* info = it.rinfo();
+ if (info->pc() - code->instruction_start() - reloc >= code_offset) break;
+ if (RelocInfo::IsDebugBreakSlot(info->rmode())) {
+ reloc += Assembler::kDebugBreakSlotLength;
+ } else {
+ ASSERT(RelocInfo::IsConstPool(info->rmode()));
+ reloc += static_cast<int>(info->data());
+ }
+ }
+
+ int pc_offset = code_offset + reloc;
+
+ ASSERT_LT(code->instruction_start() + pc_offset, code->instruction_end());
+
+ return pc_offset;
+}
+
+
static void RedirectActivationsToRecompiledCodeOnThread(
Isolate* isolate,
ThreadLocalTop* top) {
@@ -1956,47 +1878,13 @@ static void RedirectActivationsToRecompiledCodeOnThread(
continue;
}
- // Iterate over the RelocInfo in the original code to compute the sum of the
- // constant pools sizes. (See Assembler::CheckConstPool())
- // Note that this is only useful for architectures using constant pools.
- int constpool_mask = RelocInfo::ModeMask(RelocInfo::CONST_POOL);
- int frame_const_pool_size = 0;
- for (RelocIterator it(*frame_code, constpool_mask); !it.done(); it.next()) {
- RelocInfo* info = it.rinfo();
- if (info->pc() >= frame->pc()) break;
- frame_const_pool_size += static_cast<int>(info->data());
- }
- intptr_t frame_offset =
- frame->pc() - frame_code->instruction_start() - frame_const_pool_size;
-
- // Iterate over the RelocInfo for new code to find the number of bytes
- // generated for debug slots and constant pools.
- int debug_break_slot_bytes = 0;
- int new_code_const_pool_size = 0;
- int mask = RelocInfo::ModeMask(RelocInfo::DEBUG_BREAK_SLOT) |
- RelocInfo::ModeMask(RelocInfo::CONST_POOL);
- for (RelocIterator it(*new_code, mask); !it.done(); it.next()) {
- // Check if the pc in the new code with debug break
- // slots is before this slot.
- RelocInfo* info = it.rinfo();
- intptr_t new_offset = info->pc() - new_code->instruction_start() -
- new_code_const_pool_size - debug_break_slot_bytes;
- if (new_offset >= frame_offset) {
- break;
- }
-
- if (RelocInfo::IsDebugBreakSlot(info->rmode())) {
- debug_break_slot_bytes += Assembler::kDebugBreakSlotLength;
- } else {
- ASSERT(RelocInfo::IsConstPool(info->rmode()));
- // The size of the constant pool is encoded in the data.
- new_code_const_pool_size += static_cast<int>(info->data());
- }
- }
+ int old_pc_offset =
+ static_cast<int>(frame->pc() - frame_code->instruction_start());
+ int code_offset = ComputeCodeOffsetFromPcOffset(*frame_code, old_pc_offset);
+ int new_pc_offset = ComputePcOffsetFromCodeOffset(*new_code, code_offset);
// Compute the equivalent pc in the new code.
- byte* new_pc = new_code->instruction_start() + frame_offset +
- debug_break_slot_bytes + new_code_const_pool_size;
+ byte* new_pc = new_code->instruction_start() + new_pc_offset;
if (FLAG_trace_deopt) {
PrintF("Replacing code %08" V8PRIxPTR " - %08" V8PRIxPTR " (%d) "
@@ -2052,6 +1940,39 @@ class ActiveFunctionsRedirector : public ThreadVisitor {
};
+static void EnsureFunctionHasDebugBreakSlots(Handle<JSFunction> function) {
+ if (function->code()->kind() == Code::FUNCTION &&
+ function->code()->has_debug_break_slots()) {
+ // Nothing to do. Function code already had debug break slots.
+ return;
+ }
+ // Make sure that the shared full code is compiled with debug
+ // break slots.
+ if (!function->shared()->code()->has_debug_break_slots()) {
+ MaybeHandle<Code> code = Compiler::GetCodeForDebugging(function);
+ // Recompilation can fail. In that case leave the code as it was.
+ if (!code.is_null()) function->ReplaceCode(*code.ToHandleChecked());
+ } else {
+ // Simply use shared code if it has debug break slots.
+ function->ReplaceCode(function->shared()->code());
+ }
+}
+
+
+static void RecompileAndRelocateSuspendedGenerators(
+ const List<Handle<JSGeneratorObject> > &generators) {
+ for (int i = 0; i < generators.length(); i++) {
+ Handle<JSFunction> fun(generators[i]->function());
+
+ EnsureFunctionHasDebugBreakSlots(fun);
+
+ int code_offset = generators[i]->continuation();
+ int pc_offset = ComputePcOffsetFromCodeOffset(fun->code(), code_offset);
+ generators[i]->set_continuation(pc_offset);
+ }
+}
+
+
void Debug::PrepareForBreakPoints() {
// If preparing for the first break point make sure to deoptimize all
// functions as debugging does not work with optimized code.
@@ -2062,8 +1983,7 @@ void Debug::PrepareForBreakPoints() {
Deoptimizer::DeoptimizeAll(isolate_);
- Handle<Code> lazy_compile =
- Handle<Code>(isolate_->builtins()->builtin(Builtins::kLazyCompile));
+ Handle<Code> lazy_compile = isolate_->builtins()->CompileUnoptimized();
// There will be at least one break point when we are done.
has_break_points_ = true;
@@ -2072,12 +1992,28 @@ void Debug::PrepareForBreakPoints() {
// is used both in GC and non-GC code.
List<Handle<JSFunction> > active_functions(100);
+ // A list of all suspended generators.
+ List<Handle<JSGeneratorObject> > suspended_generators;
+
+ // A list of all generator functions. We need to recompile all functions,
+ // but we don't know until after visiting the whole heap which generator
+ // functions have suspended activations and which do not. As in the case of
+ // functions with activations on the stack, we need to be careful with
+ // generator functions with suspended activations because although they
+ // should be recompiled, recompilation can fail, and we need to avoid
+ // leaving the heap in an inconsistent state.
+ //
+ // We could perhaps avoid this list and instead re-use the GC metadata
+ // links.
+ List<Handle<JSFunction> > generator_functions;
+
{
// We are going to iterate heap to find all functions without
// debug break slots.
Heap* heap = isolate_->heap();
heap->CollectAllGarbage(Heap::kMakeHeapIterableMask,
"preparing for breakpoints");
+ HeapIterator iterator(heap);
// Ensure no GC in this scope as we are going to use gc_metadata
// field in the Code object to mark active functions.
@@ -2097,7 +2033,6 @@ void Debug::PrepareForBreakPoints() {
// Scan the heap for all non-optimized functions which have no
// debug break slots and are not active or inlined into an active
// function and mark them for lazy compilation.
- HeapIterator iterator(heap);
HeapObject* obj = NULL;
while (((obj = iterator.next()) != NULL)) {
if (obj->IsJSFunction()) {
@@ -2106,28 +2041,51 @@ void Debug::PrepareForBreakPoints() {
if (!shared->allows_lazy_compilation()) continue;
if (!shared->script()->IsScript()) continue;
- if (function->IsBuiltin()) continue;
+ if (function->IsNative()) continue;
if (shared->code()->gc_metadata() == active_code_marker) continue;
+ if (shared->is_generator()) {
+ generator_functions.Add(Handle<JSFunction>(function, isolate_));
+ continue;
+ }
+
Code::Kind kind = function->code()->kind();
if (kind == Code::FUNCTION &&
!function->code()->has_debug_break_slots()) {
- function->set_code(*lazy_compile);
- function->shared()->set_code(*lazy_compile);
+ function->ReplaceCode(*lazy_compile);
+ function->shared()->ReplaceCode(*lazy_compile);
} else if (kind == Code::BUILTIN &&
- (function->IsInRecompileQueue() ||
- function->IsMarkedForLazyRecompilation() ||
- function->IsMarkedForConcurrentRecompilation())) {
+ (function->IsInOptimizationQueue() ||
+ function->IsMarkedForOptimization() ||
+ function->IsMarkedForConcurrentOptimization())) {
// Abort in-flight compilation.
Code* shared_code = function->shared()->code();
if (shared_code->kind() == Code::FUNCTION &&
shared_code->has_debug_break_slots()) {
- function->set_code(shared_code);
+ function->ReplaceCode(shared_code);
} else {
- function->set_code(*lazy_compile);
- function->shared()->set_code(*lazy_compile);
+ function->ReplaceCode(*lazy_compile);
+ function->shared()->ReplaceCode(*lazy_compile);
}
}
+ } else if (obj->IsJSGeneratorObject()) {
+ JSGeneratorObject* gen = JSGeneratorObject::cast(obj);
+ if (!gen->is_suspended()) continue;
+
+ JSFunction* fun = gen->function();
+ ASSERT_EQ(fun->code()->kind(), Code::FUNCTION);
+ if (fun->code()->has_debug_break_slots()) continue;
+
+ int pc_offset = gen->continuation();
+ ASSERT_LT(0, pc_offset);
+
+ int code_offset =
+ ComputeCodeOffsetFromPcOffset(fun->code(), pc_offset);
+
+ // This will be fixed after we recompile the functions.
+ gen->set_continuation(code_offset);
+
+ suspended_generators.Add(Handle<JSGeneratorObject>(gen, isolate_));
}
}
@@ -2138,47 +2096,35 @@ void Debug::PrepareForBreakPoints() {
}
}
+ // Recompile generator functions that have suspended activations, and
+ // relocate those activations.
+ RecompileAndRelocateSuspendedGenerators(suspended_generators);
+
+ // Mark generator functions that didn't have suspended activations for lazy
+ // recompilation. Note that this set does not include any active functions.
+ for (int i = 0; i < generator_functions.length(); i++) {
+ Handle<JSFunction> &function = generator_functions[i];
+ if (function->code()->kind() != Code::FUNCTION) continue;
+ if (function->code()->has_debug_break_slots()) continue;
+ function->ReplaceCode(*lazy_compile);
+ function->shared()->ReplaceCode(*lazy_compile);
+ }
+
// Now recompile all functions with activation frames and and
- // patch the return address to run in the new compiled code.
+ // patch the return address to run in the new compiled code. It could be
+ // that some active functions were recompiled already by the suspended
+ // generator recompilation pass above; a generator with suspended
+ // activations could also have active activations. That's fine.
for (int i = 0; i < active_functions.length(); i++) {
Handle<JSFunction> function = active_functions[i];
Handle<SharedFunctionInfo> shared(function->shared());
- if (function->code()->kind() == Code::FUNCTION &&
- function->code()->has_debug_break_slots()) {
- // Nothing to do. Function code already had debug break slots.
- continue;
- }
-
// If recompilation is not possible just skip it.
- if (shared->is_toplevel() ||
- !shared->allows_lazy_compilation() ||
- shared->code()->kind() == Code::BUILTIN) {
- continue;
- }
+ if (shared->is_toplevel()) continue;
+ if (!shared->allows_lazy_compilation()) continue;
+ if (shared->code()->kind() == Code::BUILTIN) continue;
- // Make sure that the shared full code is compiled with debug
- // break slots.
- if (!shared->code()->has_debug_break_slots()) {
- // Try to compile the full code with debug break slots. If it
- // fails just keep the current code.
- Handle<Code> current_code(function->shared()->code());
- shared->set_code(*lazy_compile);
- bool prev_force_debugger_active =
- isolate_->debugger()->force_debugger_active();
- isolate_->debugger()->set_force_debugger_active(true);
- ASSERT(current_code->kind() == Code::FUNCTION);
- CompileFullCodeForDebugging(function, current_code);
- isolate_->debugger()->set_force_debugger_active(
- prev_force_debugger_active);
- if (!shared->is_compiled()) {
- shared->set_code(*current_code);
- continue;
- }
- }
-
- // Keep function code in sync with shared function info.
- function->set_code(shared->code());
+ EnsureFunctionHasDebugBreakSlots(function);
}
RedirectActivationsToRecompiledCodeOnThread(isolate_,
@@ -2211,9 +2157,7 @@ Object* Debug::FindSharedFunctionInfoInScript(Handle<Script> script,
Handle<SharedFunctionInfo> target;
Heap* heap = isolate_->heap();
while (!done) {
- { // Extra scope for iterator and no-allocation.
- heap->EnsureHeapIsIterable();
- DisallowHeapAllocation no_alloc_during_heap_iteration;
+ { // Extra scope for iterator.
HeapIterator iterator(heap);
for (HeapObject* obj = iterator.next();
obj != NULL; obj = iterator.next()) {
@@ -2287,11 +2231,10 @@ Object* Debug::FindSharedFunctionInfoInScript(Handle<Script> script,
// will compile all inner functions that cannot be compiled without a
// context, because Compiler::BuildFunctionInfo checks whether the
// debugger is active.
- if (target_function.is_null()) {
- SharedFunctionInfo::CompileLazy(target, KEEP_EXCEPTION);
- } else {
- JSFunction::CompileLazy(target_function, KEEP_EXCEPTION);
- }
+ MaybeHandle<Code> maybe_result = target_function.is_null()
+ ? Compiler::GetUnoptimizedCode(target)
+ : Compiler::GetUnoptimizedCode(target_function);
+ if (maybe_result.is_null()) return isolate_->heap()->undefined_value();
}
} // End while loop.
@@ -2315,7 +2258,7 @@ bool Debug::EnsureDebugInfo(Handle<SharedFunctionInfo> shared,
// Ensure function is compiled. Return false if this failed.
if (!function.is_null() &&
- !JSFunction::EnsureCompiled(function, CLEAR_EXCEPTION)) {
+ !Compiler::EnsureCompiled(function, CLEAR_EXCEPTION)) {
return false;
}
@@ -2363,8 +2306,11 @@ void Debug::RemoveDebugInfo(Handle<DebugInfo> debug_info) {
void Debug::SetAfterBreakTarget(JavaScriptFrame* frame) {
- HandleScope scope(isolate_);
+ after_break_target_ = NULL;
+ if (LiveEdit::SetAfterBreakTarget(this)) return; // LiveEdit did the job.
+
+ HandleScope scope(isolate_);
PrepareForBreakPoints();
// Get the executing function in which the debug break occurred.
@@ -2413,19 +2359,18 @@ void Debug::SetAfterBreakTarget(JavaScriptFrame* frame) {
// place in the original code. If not the break point was removed during
// break point processing.
if (break_at_js_return_active) {
- addr += original_code->instruction_start() - code->instruction_start();
+ addr += original_code->instruction_start() - code->instruction_start();
}
// Move back to where the call instruction sequence started.
- thread_local_.after_break_target_ =
- addr - Assembler::kPatchReturnSequenceAddressOffset;
+ after_break_target_ = addr - Assembler::kPatchReturnSequenceAddressOffset;
} else if (at_debug_break_slot) {
// Address of where the debug break slot starts.
addr = addr - Assembler::kPatchDebugBreakSlotAddressOffset;
// Continue just after the slot.
- thread_local_.after_break_target_ = addr + Assembler::kDebugBreakSlotLength;
- } else if (IsDebugBreak(Assembler::target_address_at(addr))) {
+ after_break_target_ = addr + Assembler::kDebugBreakSlotLength;
+ } else if (IsDebugBreak(Assembler::target_address_at(addr, *code))) {
// We now know that there is still a debug break call at the target address,
// so the break point is still there and the original code will hold the
// address to jump to in order to complete the call which is replaced by a
@@ -2436,13 +2381,13 @@ void Debug::SetAfterBreakTarget(JavaScriptFrame* frame) {
// Install jump to the call address in the original code. This will be the
// call which was overwritten by the call to DebugBreakXXX.
- thread_local_.after_break_target_ = Assembler::target_address_at(addr);
+ after_break_target_ = Assembler::target_address_at(addr, *original_code);
} else {
// There is no longer a break point present. Don't try to look in the
// original code as the running code will have the right address. This takes
// care of the case where the last break point is removed from the function
// and therefore no "original code" is available.
- thread_local_.after_break_target_ = Assembler::target_address_at(addr);
+ after_break_target_ = Assembler::target_address_at(addr, *code);
}
}
@@ -2491,9 +2436,9 @@ bool Debug::IsBreakAtReturn(JavaScriptFrame* frame) {
void Debug::FramesHaveBeenDropped(StackFrame::Id new_break_frame_id,
- FrameDropMode mode,
+ LiveEdit::FrameDropMode mode,
Object** restarter_frame_function_pointer) {
- if (mode != CURRENTLY_SET_MODE) {
+ if (mode != LiveEdit::CURRENTLY_SET_MODE) {
thread_local_.frame_drop_mode_ = mode;
}
thread_local_.break_frame_id_ = new_break_frame_id;
@@ -2502,94 +2447,33 @@ void Debug::FramesHaveBeenDropped(StackFrame::Id new_break_frame_id,
}
-const int Debug::FramePaddingLayout::kInitialSize = 1;
-
-
-// Any even value bigger than kInitialSize as needed for stack scanning.
-const int Debug::FramePaddingLayout::kPaddingValue = kInitialSize + 1;
-
-
bool Debug::IsDebugGlobal(GlobalObject* global) {
- return IsLoaded() && global == debug_context()->global_object();
+ return is_loaded() && global == debug_context()->global_object();
}
void Debug::ClearMirrorCache() {
PostponeInterruptsScope postpone(isolate_);
HandleScope scope(isolate_);
- ASSERT(isolate_->context() == *Debug::debug_context());
-
- // Clear the mirror cache.
- Handle<String> function_name = isolate_->factory()->InternalizeOneByteString(
- STATIC_ASCII_VECTOR("ClearMirrorCache"));
- Handle<Object> fun(
- isolate_->global_object()->GetPropertyNoExceptionThrown(*function_name),
- isolate_);
- ASSERT(fun->IsJSFunction());
- bool caught_exception;
- Execution::TryCall(Handle<JSFunction>::cast(fun),
- Handle<JSObject>(Debug::debug_context()->global_object()),
- 0, NULL, &caught_exception);
-}
-
-
-void Debug::CreateScriptCache() {
- Heap* heap = isolate_->heap();
- HandleScope scope(isolate_);
-
- // Perform two GCs to get rid of all unreferenced scripts. The first GC gets
- // rid of all the cached script wrappers and the second gets rid of the
- // scripts which are no longer referenced. The second also sweeps precisely,
- // which saves us doing yet another GC to make the heap iterable.
- heap->CollectAllGarbage(Heap::kNoGCFlags, "Debug::CreateScriptCache");
- heap->CollectAllGarbage(Heap::kMakeHeapIterableMask,
- "Debug::CreateScriptCache");
-
- ASSERT(script_cache_ == NULL);
- script_cache_ = new ScriptCache(isolate_);
-
- // Scan heap for Script objects.
- int count = 0;
- HeapIterator iterator(heap);
- DisallowHeapAllocation no_allocation;
-
- for (HeapObject* obj = iterator.next(); obj != NULL; obj = iterator.next()) {
- if (obj->IsScript() && Script::cast(obj)->HasValidSource()) {
- script_cache_->Add(Handle<Script>(Script::cast(obj)));
- count++;
- }
- }
-}
-
-
-void Debug::DestroyScriptCache() {
- // Get rid of the script cache if it was created.
- if (script_cache_ != NULL) {
- delete script_cache_;
- script_cache_ = NULL;
- }
-}
-
-
-void Debug::AddScriptToScriptCache(Handle<Script> script) {
- if (script_cache_ != NULL) {
- script_cache_->Add(script);
- }
+ AssertDebugContext();
+ Factory* factory = isolate_->factory();
+ JSObject::SetProperty(isolate_->global_object(),
+ factory->NewStringFromAsciiChecked("next_handle_"),
+ handle(Smi::FromInt(0), isolate_),
+ NONE,
+ SLOPPY).Check();
+ JSObject::SetProperty(isolate_->global_object(),
+ factory->NewStringFromAsciiChecked("mirror_cache_"),
+ factory->NewJSArray(0, FAST_ELEMENTS),
+ NONE,
+ SLOPPY).Check();
}
Handle<FixedArray> Debug::GetLoadedScripts() {
// Create and fill the script cache when the loaded scripts is requested for
// the first time.
- if (script_cache_ == NULL) {
- CreateScriptCache();
- }
-
- // If the script cache is not active just return an empty array.
- ASSERT(script_cache_ != NULL);
- if (script_cache_ == NULL) {
- isolate_->factory()->NewFixedArray(0);
- }
+ if (script_cache_ == NULL) script_cache_ = new ScriptCache(isolate_);
// Perform GC to get unreferenced scripts evicted from the cache before
// returning the content.
@@ -2601,6 +2485,21 @@ Handle<FixedArray> Debug::GetLoadedScripts() {
}
+void Debug::RecordEvalCaller(Handle<Script> script) {
+ script->set_compilation_type(Script::COMPILATION_TYPE_EVAL);
+ // For eval scripts add information on the function from which eval was
+ // called.
+ StackTraceFrameIterator it(script->GetIsolate());
+ if (!it.done()) {
+ script->set_eval_from_shared(it.frame()->function()->shared());
+ Code* code = it.frame()->LookupCode();
+ int offset = static_cast<int>(
+ it.frame()->pc() - code->instruction_start());
+ script->set_eval_from_instructions_offset(Smi::FromInt(offset));
+ }
+}
+
+
void Debug::AfterGarbageCollection() {
// Generate events for collected scripts.
if (script_cache_ != NULL) {
@@ -2609,175 +2508,104 @@ void Debug::AfterGarbageCollection() {
}
-Debugger::Debugger(Isolate* isolate)
- : debugger_access_(isolate->debugger_access()),
- event_listener_(Handle<Object>()),
- event_listener_data_(Handle<Object>()),
- compiling_natives_(false),
- is_loading_debugger_(false),
- live_edit_enabled_(true),
- never_unload_debugger_(false),
- force_debugger_active_(false),
- message_handler_(NULL),
- debugger_unload_pending_(false),
- host_dispatch_handler_(NULL),
- debug_message_dispatch_handler_(NULL),
- message_dispatch_helper_thread_(NULL),
- host_dispatch_period_(TimeDelta::FromMilliseconds(100)),
- agent_(NULL),
- command_queue_(isolate->logger(), kQueueInitialSize),
- command_received_(0),
- event_command_queue_(isolate->logger(), kQueueInitialSize),
- isolate_(isolate) {
-}
-
-
-Debugger::~Debugger() {}
-
-
-Handle<Object> Debugger::MakeJSObject(Vector<const char> constructor_name,
- int argc,
- Handle<Object> argv[],
- bool* caught_exception) {
- ASSERT(isolate_->context() == *isolate_->debug()->debug_context());
-
+MaybeHandle<Object> Debug::MakeJSObject(const char* constructor_name,
+ int argc,
+ Handle<Object> argv[]) {
+ AssertDebugContext();
// Create the execution state object.
- Handle<String> constructor_str =
- isolate_->factory()->InternalizeUtf8String(constructor_name);
- Handle<Object> constructor(
- isolate_->global_object()->GetPropertyNoExceptionThrown(*constructor_str),
- isolate_);
+ Handle<Object> constructor = Object::GetProperty(
+ isolate_, isolate_->global_object(), constructor_name).ToHandleChecked();
ASSERT(constructor->IsJSFunction());
- if (!constructor->IsJSFunction()) {
- *caught_exception = true;
- return isolate_->factory()->undefined_value();
- }
- Handle<Object> js_object = Execution::TryCall(
- Handle<JSFunction>::cast(constructor),
- Handle<JSObject>(isolate_->debug()->debug_context()->global_object()),
- argc,
- argv,
- caught_exception);
- return js_object;
+ if (!constructor->IsJSFunction()) return MaybeHandle<Object>();
+ // We do not handle interrupts here. In particular, termination interrupts.
+ PostponeInterruptsScope no_interrupts(isolate_);
+ return Execution::TryCall(Handle<JSFunction>::cast(constructor),
+ Handle<JSObject>(debug_context()->global_object()),
+ argc,
+ argv);
}
-Handle<Object> Debugger::MakeExecutionState(bool* caught_exception) {
+MaybeHandle<Object> Debug::MakeExecutionState() {
// Create the execution state object.
- Handle<Object> break_id = isolate_->factory()->NewNumberFromInt(
- isolate_->debug()->break_id());
- Handle<Object> argv[] = { break_id };
- return MakeJSObject(CStrVector("MakeExecutionState"),
- ARRAY_SIZE(argv),
- argv,
- caught_exception);
+ Handle<Object> argv[] = { isolate_->factory()->NewNumberFromInt(break_id()) };
+ return MakeJSObject("MakeExecutionState", ARRAY_SIZE(argv), argv);
}
-Handle<Object> Debugger::MakeBreakEvent(Handle<Object> exec_state,
- Handle<Object> break_points_hit,
- bool* caught_exception) {
+MaybeHandle<Object> Debug::MakeBreakEvent(Handle<Object> break_points_hit) {
+ Handle<Object> exec_state;
+ if (!MakeExecutionState().ToHandle(&exec_state)) return MaybeHandle<Object>();
// Create the new break event object.
Handle<Object> argv[] = { exec_state, break_points_hit };
- return MakeJSObject(CStrVector("MakeBreakEvent"),
- ARRAY_SIZE(argv),
- argv,
- caught_exception);
+ return MakeJSObject("MakeBreakEvent", ARRAY_SIZE(argv), argv);
}
-Handle<Object> Debugger::MakeExceptionEvent(Handle<Object> exec_state,
- Handle<Object> exception,
- bool uncaught,
- bool* caught_exception) {
- Factory* factory = isolate_->factory();
+MaybeHandle<Object> Debug::MakeExceptionEvent(Handle<Object> exception,
+ bool uncaught,
+ Handle<Object> promise) {
+ Handle<Object> exec_state;
+ if (!MakeExecutionState().ToHandle(&exec_state)) return MaybeHandle<Object>();
// Create the new exception event object.
Handle<Object> argv[] = { exec_state,
exception,
- factory->ToBoolean(uncaught) };
- return MakeJSObject(CStrVector("MakeExceptionEvent"),
- ARRAY_SIZE(argv),
- argv,
- caught_exception);
-}
-
-
-Handle<Object> Debugger::MakeNewFunctionEvent(Handle<Object> function,
- bool* caught_exception) {
- // Create the new function event object.
- Handle<Object> argv[] = { function };
- return MakeJSObject(CStrVector("MakeNewFunctionEvent"),
- ARRAY_SIZE(argv),
- argv,
- caught_exception);
+ isolate_->factory()->ToBoolean(uncaught),
+ promise };
+ return MakeJSObject("MakeExceptionEvent", ARRAY_SIZE(argv), argv);
}
-Handle<Object> Debugger::MakeCompileEvent(Handle<Script> script,
- bool before,
- bool* caught_exception) {
- Factory* factory = isolate_->factory();
+MaybeHandle<Object> Debug::MakeCompileEvent(Handle<Script> script,
+ bool before) {
+ Handle<Object> exec_state;
+ if (!MakeExecutionState().ToHandle(&exec_state)) return MaybeHandle<Object>();
// Create the compile event object.
- Handle<Object> exec_state = MakeExecutionState(caught_exception);
- Handle<Object> script_wrapper = GetScriptWrapper(script);
+ Handle<Object> script_wrapper = Script::GetWrapper(script);
Handle<Object> argv[] = { exec_state,
script_wrapper,
- factory->ToBoolean(before) };
- return MakeJSObject(CStrVector("MakeCompileEvent"),
- ARRAY_SIZE(argv),
- argv,
- caught_exception);
+ isolate_->factory()->ToBoolean(before) };
+ return MakeJSObject("MakeCompileEvent", ARRAY_SIZE(argv), argv);
}
-Handle<Object> Debugger::MakeScriptCollectedEvent(int id,
- bool* caught_exception) {
+MaybeHandle<Object> Debug::MakeScriptCollectedEvent(int id) {
+ Handle<Object> exec_state;
+ if (!MakeExecutionState().ToHandle(&exec_state)) return MaybeHandle<Object>();
// Create the script collected event object.
- Handle<Object> exec_state = MakeExecutionState(caught_exception);
Handle<Object> id_object = Handle<Smi>(Smi::FromInt(id), isolate_);
Handle<Object> argv[] = { exec_state, id_object };
-
- return MakeJSObject(CStrVector("MakeScriptCollectedEvent"),
- ARRAY_SIZE(argv),
- argv,
- caught_exception);
+ return MakeJSObject("MakeScriptCollectedEvent", ARRAY_SIZE(argv), argv);
}
-void Debugger::OnException(Handle<Object> exception, bool uncaught) {
- HandleScope scope(isolate_);
- Debug* debug = isolate_->debug();
+void Debug::OnException(Handle<Object> exception, bool uncaught) {
+ if (in_debug_scope() || ignore_events()) return;
- // Bail out based on state or if there is no listener for this event
- if (debug->InDebugger()) return;
- if (!Debugger::EventActive(v8::Exception)) return;
+ HandleScope scope(isolate_);
+ Handle<Object> promise = GetPromiseForUncaughtException();
+ uncaught |= !promise->IsUndefined();
// Bail out if exception breaks are not active
if (uncaught) {
// Uncaught exceptions are reported by either flags.
- if (!(debug->break_on_uncaught_exception() ||
- debug->break_on_exception())) return;
+ if (!(break_on_uncaught_exception_ || break_on_exception_)) return;
} else {
// Caught exceptions are reported is activated.
- if (!debug->break_on_exception()) return;
+ if (!break_on_exception_) return;
}
- // Enter the debugger.
- EnterDebugger debugger(isolate_);
- if (debugger.FailedToEnter()) return;
+ DebugScope debug_scope(this);
+ if (debug_scope.failed()) return;
// Clear all current stepping setup.
- debug->ClearStepping();
+ ClearStepping();
+
// Create the event data object.
- bool caught_exception = false;
- Handle<Object> exec_state = MakeExecutionState(&caught_exception);
Handle<Object> event_data;
- if (!caught_exception) {
- event_data = MakeExceptionEvent(exec_state, exception, uncaught,
- &caught_exception);
- }
// Bail out and don't call debugger if exception.
- if (caught_exception) {
+ if (!MakeExceptionEvent(
+ exception, uncaught, promise).ToHandle(&event_data)) {
return;
}
@@ -2787,31 +2615,18 @@ void Debugger::OnException(Handle<Object> exception, bool uncaught) {
}
-void Debugger::OnDebugBreak(Handle<Object> break_points_hit,
+void Debug::OnDebugBreak(Handle<Object> break_points_hit,
bool auto_continue) {
- HandleScope scope(isolate_);
-
- // Debugger has already been entered by caller.
- ASSERT(isolate_->context() == *isolate_->debug()->debug_context());
-
+ // The caller provided for DebugScope.
+ AssertDebugContext();
// Bail out if there is no listener for this event
- if (!Debugger::EventActive(v8::Break)) return;
-
- // Debugger must be entered in advance.
- ASSERT(isolate_->context() == *isolate_->debug()->debug_context());
+ if (ignore_events()) return;
+ HandleScope scope(isolate_);
// Create the event data object.
- bool caught_exception = false;
- Handle<Object> exec_state = MakeExecutionState(&caught_exception);
Handle<Object> event_data;
- if (!caught_exception) {
- event_data = MakeBreakEvent(exec_state, break_points_hit,
- &caught_exception);
- }
// Bail out and don't call debugger if exception.
- if (caught_exception) {
- return;
- }
+ if (!MakeBreakEvent(break_points_hit).ToHandle(&event_data)) return;
// Process debug event.
ProcessDebugEvent(v8::Break,
@@ -2820,25 +2635,17 @@ void Debugger::OnDebugBreak(Handle<Object> break_points_hit,
}
-void Debugger::OnBeforeCompile(Handle<Script> script) {
- HandleScope scope(isolate_);
-
- // Bail out based on state or if there is no listener for this event
- if (isolate_->debug()->InDebugger()) return;
- if (compiling_natives()) return;
- if (!EventActive(v8::BeforeCompile)) return;
+void Debug::OnBeforeCompile(Handle<Script> script) {
+ if (in_debug_scope() || ignore_events()) return;
- // Enter the debugger.
- EnterDebugger debugger(isolate_);
- if (debugger.FailedToEnter()) return;
+ HandleScope scope(isolate_);
+ DebugScope debug_scope(this);
+ if (debug_scope.failed()) return;
// Create the event data object.
- bool caught_exception = false;
- Handle<Object> event_data = MakeCompileEvent(script, true, &caught_exception);
+ Handle<Object> event_data;
// Bail out and don't call debugger if exception.
- if (caught_exception) {
- return;
- }
+ if (!MakeCompileEvent(script, true).ToHandle(&event_data)) return;
// Process debug event.
ProcessDebugEvent(v8::BeforeCompile,
@@ -2848,26 +2655,20 @@ void Debugger::OnBeforeCompile(Handle<Script> script) {
// Handle debugger actions when a new script is compiled.
-void Debugger::OnAfterCompile(Handle<Script> script,
- AfterCompileFlags after_compile_flags) {
- HandleScope scope(isolate_);
- Debug* debug = isolate_->debug();
-
+void Debug::OnAfterCompile(Handle<Script> script,
+ AfterCompileFlags after_compile_flags) {
// Add the newly compiled script to the script cache.
- debug->AddScriptToScriptCache(script);
+ if (script_cache_ != NULL) script_cache_->Add(script);
// No more to do if not debugging.
- if (!IsDebuggerActive()) return;
-
- // No compile events while compiling natives.
- if (compiling_natives()) return;
+ if (in_debug_scope() || ignore_events()) return;
+ HandleScope scope(isolate_);
// Store whether in debugger before entering debugger.
- bool in_debugger = debug->InDebugger();
+ bool was_in_scope = in_debug_scope();
- // Enter the debugger.
- EnterDebugger debugger(isolate_);
- if (debugger.FailedToEnter()) return;
+ DebugScope debug_scope(this);
+ if (debug_scope.failed()) return;
// If debugging there might be script break points registered for this
// script. Make sure that these break points are set.
@@ -2876,11 +2677,10 @@ void Debugger::OnAfterCompile(Handle<Script> script,
Handle<String> update_script_break_points_string =
isolate_->factory()->InternalizeOneByteString(
STATIC_ASCII_VECTOR("UpdateScriptBreakPoints"));
+ Handle<GlobalObject> debug_global(debug_context()->global_object());
Handle<Object> update_script_break_points =
- Handle<Object>(
- debug->debug_context()->global_object()->GetPropertyNoExceptionThrown(
- *update_script_break_points_string),
- isolate_);
+ Object::GetProperty(
+ debug_global, update_script_break_points_string).ToHandleChecked();
if (!update_script_break_points->IsJSFunction()) {
return;
}
@@ -2888,58 +2688,40 @@ void Debugger::OnAfterCompile(Handle<Script> script,
// Wrap the script object in a proper JS object before passing it
// to JavaScript.
- Handle<JSValue> wrapper = GetScriptWrapper(script);
+ Handle<Object> wrapper = Script::GetWrapper(script);
// Call UpdateScriptBreakPoints expect no exceptions.
- bool caught_exception;
Handle<Object> argv[] = { wrapper };
- Execution::TryCall(Handle<JSFunction>::cast(update_script_break_points),
- isolate_->js_builtins_object(),
- ARRAY_SIZE(argv),
- argv,
- &caught_exception);
- if (caught_exception) {
+ if (Execution::TryCall(Handle<JSFunction>::cast(update_script_break_points),
+ isolate_->js_builtins_object(),
+ ARRAY_SIZE(argv),
+ argv).is_null()) {
return;
}
// Bail out based on state or if there is no listener for this event
- if (in_debugger && (after_compile_flags & SEND_WHEN_DEBUGGING) == 0) return;
- if (!Debugger::EventActive(v8::AfterCompile)) return;
+ if (was_in_scope && (after_compile_flags & SEND_WHEN_DEBUGGING) == 0) return;
// Create the compile state object.
- Handle<Object> event_data = MakeCompileEvent(script,
- false,
- &caught_exception);
+ Handle<Object> event_data;
// Bail out and don't call debugger if exception.
- if (caught_exception) {
- return;
- }
+ if (!MakeCompileEvent(script, false).ToHandle(&event_data)) return;
+
// Process debug event.
- ProcessDebugEvent(v8::AfterCompile,
- Handle<JSObject>::cast(event_data),
- true);
+ ProcessDebugEvent(v8::AfterCompile, Handle<JSObject>::cast(event_data), true);
}
-void Debugger::OnScriptCollected(int id) {
- HandleScope scope(isolate_);
-
- // No more to do if not debugging.
- if (isolate_->debug()->InDebugger()) return;
- if (!IsDebuggerActive()) return;
- if (!Debugger::EventActive(v8::ScriptCollected)) return;
+void Debug::OnScriptCollected(int id) {
+ if (in_debug_scope() || ignore_events()) return;
- // Enter the debugger.
- EnterDebugger debugger(isolate_);
- if (debugger.FailedToEnter()) return;
+ HandleScope scope(isolate_);
+ DebugScope debug_scope(this);
+ if (debug_scope.failed()) return;
// Create the script collected state object.
- bool caught_exception = false;
- Handle<Object> event_data = MakeScriptCollectedEvent(id,
- &caught_exception);
+ Handle<Object> event_data;
// Bail out and don't call debugger if exception.
- if (caught_exception) {
- return;
- }
+ if (!MakeScriptCollectedEvent(id).ToHandle(&event_data)) return;
// Process debug event.
ProcessDebugEvent(v8::ScriptCollected,
@@ -2948,22 +2730,16 @@ void Debugger::OnScriptCollected(int id) {
}
-void Debugger::ProcessDebugEvent(v8::DebugEvent event,
- Handle<JSObject> event_data,
- bool auto_continue) {
+void Debug::ProcessDebugEvent(v8::DebugEvent event,
+ Handle<JSObject> event_data,
+ bool auto_continue) {
HandleScope scope(isolate_);
- // Clear any pending debug break if this is a real break.
- if (!auto_continue) {
- isolate_->debug()->clear_interrupt_pending(DEBUGBREAK);
- }
-
// Create the execution state.
- bool caught_exception = false;
- Handle<Object> exec_state = MakeExecutionState(&caught_exception);
- if (caught_exception) {
- return;
- }
+ Handle<Object> exec_state;
+ // Bail out and don't call debugger if exception.
+ if (!MakeExecutionState().ToHandle(&exec_state)) return;
+
// First notify the message handler if any.
if (message_handler_ != NULL) {
NotifyMessageHandler(event,
@@ -2993,89 +2769,53 @@ void Debugger::ProcessDebugEvent(v8::DebugEvent event,
}
-void Debugger::CallEventCallback(v8::DebugEvent event,
- Handle<Object> exec_state,
- Handle<Object> event_data,
- v8::Debug::ClientData* client_data) {
+void Debug::CallEventCallback(v8::DebugEvent event,
+ Handle<Object> exec_state,
+ Handle<Object> event_data,
+ v8::Debug::ClientData* client_data) {
if (event_listener_->IsForeign()) {
- CallCEventCallback(event, exec_state, event_data, client_data);
+ // Invoke the C debug event listener.
+ v8::Debug::EventCallback callback =
+ FUNCTION_CAST<v8::Debug::EventCallback>(
+ Handle<Foreign>::cast(event_listener_)->foreign_address());
+ EventDetailsImpl event_details(event,
+ Handle<JSObject>::cast(exec_state),
+ Handle<JSObject>::cast(event_data),
+ event_listener_data_,
+ client_data);
+ callback(event_details);
+ ASSERT(!isolate_->has_scheduled_exception());
} else {
- CallJSEventCallback(event, exec_state, event_data);
+ // Invoke the JavaScript debug event listener.
+ ASSERT(event_listener_->IsJSFunction());
+ Handle<Object> argv[] = { Handle<Object>(Smi::FromInt(event), isolate_),
+ exec_state,
+ event_data,
+ event_listener_data_ };
+ Execution::TryCall(Handle<JSFunction>::cast(event_listener_),
+ isolate_->global_object(),
+ ARRAY_SIZE(argv),
+ argv);
}
}
-void Debugger::CallCEventCallback(v8::DebugEvent event,
- Handle<Object> exec_state,
- Handle<Object> event_data,
- v8::Debug::ClientData* client_data) {
- Handle<Foreign> callback_obj(Handle<Foreign>::cast(event_listener_));
- v8::Debug::EventCallback2 callback =
- FUNCTION_CAST<v8::Debug::EventCallback2>(
- callback_obj->foreign_address());
- EventDetailsImpl event_details(
- event,
- Handle<JSObject>::cast(exec_state),
- Handle<JSObject>::cast(event_data),
- event_listener_data_,
- client_data);
- callback(event_details);
-}
-
-
-void Debugger::CallJSEventCallback(v8::DebugEvent event,
- Handle<Object> exec_state,
- Handle<Object> event_data) {
- ASSERT(event_listener_->IsJSFunction());
- Handle<JSFunction> fun(Handle<JSFunction>::cast(event_listener_));
-
- // Invoke the JavaScript debug event listener.
- Handle<Object> argv[] = { Handle<Object>(Smi::FromInt(event), isolate_),
- exec_state,
- event_data,
- event_listener_data_ };
- bool caught_exception;
- Execution::TryCall(fun,
- isolate_->global_object(),
- ARRAY_SIZE(argv),
- argv,
- &caught_exception);
- // Silently ignore exceptions from debug event listeners.
-}
-
-
-Handle<Context> Debugger::GetDebugContext() {
- never_unload_debugger_ = true;
- EnterDebugger debugger(isolate_);
- return isolate_->debug()->debug_context();
-}
-
-
-void Debugger::UnloadDebugger() {
- Debug* debug = isolate_->debug();
-
- // Make sure that there are no breakpoints left.
- debug->ClearAllBreakPoints();
-
- // Unload the debugger if feasible.
- if (!never_unload_debugger_) {
- debug->Unload();
- }
-
- // Clear the flag indicating that the debugger should be unloaded.
- debugger_unload_pending_ = false;
+Handle<Context> Debug::GetDebugContext() {
+ DebugScope debug_scope(this);
+ // The global handle may be destroyed soon after. Return it reboxed.
+ return handle(*debug_context(), isolate_);
}
-void Debugger::NotifyMessageHandler(v8::DebugEvent event,
- Handle<JSObject> exec_state,
- Handle<JSObject> event_data,
- bool auto_continue) {
- v8::Isolate* isolate = reinterpret_cast<v8::Isolate*>(isolate_);
+void Debug::NotifyMessageHandler(v8::DebugEvent event,
+ Handle<JSObject> exec_state,
+ Handle<JSObject> event_data,
+ bool auto_continue) {
+ // Prevent other interrupts from triggering, for example API callbacks,
+ // while dispatching message handler callbacks.
+ PostponeInterruptsScope no_interrupts(isolate_);
+ ASSERT(is_active_);
HandleScope scope(isolate_);
-
- if (!isolate_->debug()->Load()) return;
-
// Process the individual events.
bool sendEventMessage = false;
switch (event) {
@@ -3103,8 +2843,8 @@ void Debugger::NotifyMessageHandler(v8::DebugEvent event,
// The debug command interrupt flag might have been set when the command was
// added. It should be enough to clear the flag only once while we are in the
// debugger.
- ASSERT(isolate_->debug()->InDebugger());
- isolate_->stack_guard()->Continue(DEBUGCOMMAND);
+ ASSERT(in_debug_scope());
+ isolate_->stack_guard()->ClearDebugCommand();
// Notify the debugger that a debug event has occurred unless auto continue is
// active in which case no event is send.
@@ -3121,216 +2861,137 @@ void Debugger::NotifyMessageHandler(v8::DebugEvent event,
// in the queue if any. For script collected events don't even process
// messages in the queue as the execution state might not be what is expected
// by the client.
- if ((auto_continue && !HasCommands()) || event == v8::ScriptCollected) {
+ if ((auto_continue && !has_commands()) || event == v8::ScriptCollected) {
return;
}
- v8::TryCatch try_catch;
-
// DebugCommandProcessor goes here.
- v8::Local<v8::Object> cmd_processor;
- {
- v8::Local<v8::Object> api_exec_state =
- v8::Utils::ToLocal(Handle<JSObject>::cast(exec_state));
- v8::Local<v8::String> fun_name = v8::String::NewFromUtf8(
- isolate, "debugCommandProcessor");
- v8::Local<v8::Function> fun =
- v8::Local<v8::Function>::Cast(api_exec_state->Get(fun_name));
-
- v8::Handle<v8::Boolean> running = v8::Boolean::New(isolate, auto_continue);
- static const int kArgc = 1;
- v8::Handle<Value> argv[kArgc] = { running };
- cmd_processor = v8::Local<v8::Object>::Cast(
- fun->Call(api_exec_state, kArgc, argv));
- if (try_catch.HasCaught()) {
- PrintLn(try_catch.Exception());
- return;
- }
- }
-
bool running = auto_continue;
+ Handle<Object> cmd_processor_ctor = Object::GetProperty(
+ isolate_, exec_state, "debugCommandProcessor").ToHandleChecked();
+ Handle<Object> ctor_args[] = { isolate_->factory()->ToBoolean(running) };
+ Handle<Object> cmd_processor = Execution::Call(
+ isolate_, cmd_processor_ctor, exec_state, 1, ctor_args).ToHandleChecked();
+ Handle<JSFunction> process_debug_request = Handle<JSFunction>::cast(
+ Object::GetProperty(
+ isolate_, cmd_processor, "processDebugRequest").ToHandleChecked());
+ Handle<Object> is_running = Object::GetProperty(
+ isolate_, cmd_processor, "isRunning").ToHandleChecked();
+
// Process requests from the debugger.
- while (true) {
+ do {
// Wait for new command in the queue.
- if (Debugger::host_dispatch_handler_) {
- // In case there is a host dispatch - do periodic dispatches.
- if (!command_received_.WaitFor(host_dispatch_period_)) {
- // Timout expired, do the dispatch.
- Debugger::host_dispatch_handler_();
- continue;
- }
- } else {
- // In case there is no host dispatch - just wait.
- command_received_.Wait();
- }
+ command_received_.Wait();
// Get the command from the queue.
CommandMessage command = command_queue_.Get();
isolate_->logger()->DebugTag(
"Got request from command queue, in interactive loop.");
- if (!Debugger::IsDebuggerActive()) {
+ if (!is_active()) {
// Delete command text and user data.
command.Dispose();
return;
}
- // Invoke JavaScript to process the debug request.
- v8::Local<v8::String> fun_name;
- v8::Local<v8::Function> fun;
- v8::Local<v8::Value> request;
- v8::TryCatch try_catch;
- fun_name = v8::String::NewFromUtf8(isolate, "processDebugRequest");
- fun = v8::Local<v8::Function>::Cast(cmd_processor->Get(fun_name));
-
- request = v8::String::NewFromTwoByte(isolate, command.text().start(),
- v8::String::kNormalString,
- command.text().length());
- static const int kArgc = 1;
- v8::Handle<Value> argv[kArgc] = { request };
- v8::Local<v8::Value> response_val = fun->Call(cmd_processor, kArgc, argv);
-
- // Get the response.
- v8::Local<v8::String> response;
- if (!try_catch.HasCaught()) {
- // Get response string.
- if (!response_val->IsUndefined()) {
- response = v8::Local<v8::String>::Cast(response_val);
+ Vector<const uc16> command_text(
+ const_cast<const uc16*>(command.text().start()),
+ command.text().length());
+ Handle<String> request_text = isolate_->factory()->NewStringFromTwoByte(
+ command_text).ToHandleChecked();
+ Handle<Object> request_args[] = { request_text };
+ Handle<Object> exception;
+ Handle<Object> answer_value;
+ Handle<String> answer;
+ MaybeHandle<Object> maybe_result = Execution::TryCall(
+ process_debug_request, cmd_processor, 1, request_args, &exception);
+
+ if (maybe_result.ToHandle(&answer_value)) {
+ if (answer_value->IsUndefined()) {
+ answer = isolate_->factory()->empty_string();
} else {
- response = v8::String::NewFromUtf8(isolate, "");
+ answer = Handle<String>::cast(answer_value);
}
// Log the JSON request/response.
if (FLAG_trace_debug_json) {
- PrintLn(request);
- PrintLn(response);
+ PrintF("%s\n", request_text->ToCString().get());
+ PrintF("%s\n", answer->ToCString().get());
}
- // Get the running state.
- fun_name = v8::String::NewFromUtf8(isolate, "isRunning");
- fun = v8::Local<v8::Function>::Cast(cmd_processor->Get(fun_name));
- static const int kArgc = 1;
- v8::Handle<Value> argv[kArgc] = { response };
- v8::Local<v8::Value> running_val = fun->Call(cmd_processor, kArgc, argv);
- if (!try_catch.HasCaught()) {
- running = running_val->ToBoolean()->Value();
- }
+ Handle<Object> is_running_args[] = { answer };
+ maybe_result = Execution::Call(
+ isolate_, is_running, cmd_processor, 1, is_running_args);
+ running = maybe_result.ToHandleChecked()->IsTrue();
} else {
- // In case of failure the result text is the exception text.
- response = try_catch.Exception()->ToString();
+ answer = Handle<String>::cast(
+ Execution::ToString(isolate_, exception).ToHandleChecked());
}
// Return the result.
MessageImpl message = MessageImpl::NewResponse(
- event,
- running,
- Handle<JSObject>::cast(exec_state),
- Handle<JSObject>::cast(event_data),
- Handle<String>(Utils::OpenHandle(*response)),
- command.client_data());
+ event, running, exec_state, event_data, answer, command.client_data());
InvokeMessageHandler(message);
command.Dispose();
// Return from debug event processing if either the VM is put into the
// running state (through a continue command) or auto continue is active
// and there are no more commands queued.
- if (running && !HasCommands()) {
- return;
- }
- }
+ } while (!running || has_commands());
}
-void Debugger::SetEventListener(Handle<Object> callback,
- Handle<Object> data) {
- HandleScope scope(isolate_);
+void Debug::SetEventListener(Handle<Object> callback,
+ Handle<Object> data) {
GlobalHandles* global_handles = isolate_->global_handles();
- // Clear the global handles for the event listener and the event listener data
- // object.
- if (!event_listener_.is_null()) {
- global_handles->Destroy(
- reinterpret_cast<Object**>(event_listener_.location()));
- event_listener_ = Handle<Object>();
- }
- if (!event_listener_data_.is_null()) {
- global_handles->Destroy(
- reinterpret_cast<Object**>(event_listener_data_.location()));
- event_listener_data_ = Handle<Object>();
- }
+ // Remove existing entry.
+ GlobalHandles::Destroy(event_listener_.location());
+ event_listener_ = Handle<Object>();
+ GlobalHandles::Destroy(event_listener_data_.location());
+ event_listener_data_ = Handle<Object>();
- // If there is a new debug event listener register it together with its data
- // object.
+ // Set new entry.
if (!callback->IsUndefined() && !callback->IsNull()) {
- event_listener_ = Handle<Object>::cast(
- global_handles->Create(*callback));
- if (data.is_null()) {
- data = isolate_->factory()->undefined_value();
- }
- event_listener_data_ = Handle<Object>::cast(
- global_handles->Create(*data));
+ event_listener_ = global_handles->Create(*callback);
+ if (data.is_null()) data = isolate_->factory()->undefined_value();
+ event_listener_data_ = global_handles->Create(*data);
}
- ListenersChanged();
+ UpdateState();
}
-void Debugger::SetMessageHandler(v8::Debug::MessageHandler2 handler) {
- LockGuard<RecursiveMutex> with(debugger_access_);
-
+void Debug::SetMessageHandler(v8::Debug::MessageHandler handler) {
message_handler_ = handler;
- ListenersChanged();
- if (handler == NULL) {
+ UpdateState();
+ if (handler == NULL && in_debug_scope()) {
// Send an empty command to the debugger if in a break to make JavaScript
// run again if the debugger is closed.
- if (isolate_->debug()->InDebugger()) {
- ProcessCommand(Vector<const uint16_t>::empty());
- }
+ EnqueueCommandMessage(Vector<const uint16_t>::empty());
}
}
-void Debugger::ListenersChanged() {
- if (IsDebuggerActive()) {
- // Disable the compilation cache when the debugger is active.
+
+void Debug::UpdateState() {
+ is_active_ = message_handler_ != NULL || !event_listener_.is_null();
+ if (is_active_ || in_debug_scope()) {
+ // Note that the debug context could have already been loaded to
+ // bootstrap test cases.
isolate_->compilation_cache()->Disable();
- debugger_unload_pending_ = false;
- } else {
+ is_active_ = Load();
+ } else if (is_loaded()) {
isolate_->compilation_cache()->Enable();
- // Unload the debugger if event listener and message handler cleared.
- // Schedule this for later, because we may be in non-V8 thread.
- debugger_unload_pending_ = true;
- }
-}
-
-
-void Debugger::SetHostDispatchHandler(v8::Debug::HostDispatchHandler handler,
- TimeDelta period) {
- host_dispatch_handler_ = handler;
- host_dispatch_period_ = period;
-}
-
-
-void Debugger::SetDebugMessageDispatchHandler(
- v8::Debug::DebugMessageDispatchHandler handler, bool provide_locker) {
- LockGuard<Mutex> lock_guard(&dispatch_handler_access_);
- debug_message_dispatch_handler_ = handler;
-
- if (provide_locker && message_dispatch_helper_thread_ == NULL) {
- message_dispatch_helper_thread_ = new MessageDispatchHelperThread(isolate_);
- message_dispatch_helper_thread_->Start();
+ Unload();
}
}
// Calls the registered debug message handler. This callback is part of the
// public API.
-void Debugger::InvokeMessageHandler(MessageImpl message) {
- LockGuard<RecursiveMutex> with(debugger_access_);
-
- if (message_handler_ != NULL) {
- message_handler_(message);
- }
+void Debug::InvokeMessageHandler(MessageImpl message) {
+ if (message_handler_ != NULL) message_handler_(message);
}
@@ -3338,8 +2999,8 @@ void Debugger::InvokeMessageHandler(MessageImpl message) {
// a copy of the command string managed by the debugger. Up to this
// point, the command data was managed by the API client. Called
// by the API client thread.
-void Debugger::ProcessCommand(Vector<const uint16_t> command,
- v8::Debug::ClientData* client_data) {
+void Debug::EnqueueCommandMessage(Vector<const uint16_t> command,
+ v8::Debug::ClientData* client_data) {
// Need to cast away const.
CommandMessage message = CommandMessage::New(
Vector<uint16_t>(const_cast<uint16_t*>(command.start()),
@@ -3350,219 +3011,136 @@ void Debugger::ProcessCommand(Vector<const uint16_t> command,
command_received_.Signal();
// Set the debug command break flag to have the command processed.
- if (!isolate_->debug()->InDebugger()) {
- isolate_->stack_guard()->DebugCommand();
- }
-
- MessageDispatchHelperThread* dispatch_thread;
- {
- LockGuard<Mutex> lock_guard(&dispatch_handler_access_);
- dispatch_thread = message_dispatch_helper_thread_;
- }
-
- if (dispatch_thread == NULL) {
- CallMessageDispatchHandler();
- } else {
- dispatch_thread->Schedule();
- }
+ if (!in_debug_scope()) isolate_->stack_guard()->RequestDebugCommand();
}
-bool Debugger::HasCommands() {
- return !command_queue_.IsEmpty();
-}
-
-
-void Debugger::EnqueueDebugCommand(v8::Debug::ClientData* client_data) {
+void Debug::EnqueueDebugCommand(v8::Debug::ClientData* client_data) {
CommandMessage message = CommandMessage::New(Vector<uint16_t>(), client_data);
event_command_queue_.Put(message);
// Set the debug command break flag to have the command processed.
- if (!isolate_->debug()->InDebugger()) {
- isolate_->stack_guard()->DebugCommand();
- }
-}
-
-
-bool Debugger::IsDebuggerActive() {
- LockGuard<RecursiveMutex> with(debugger_access_);
-
- return message_handler_ != NULL ||
- !event_listener_.is_null() ||
- force_debugger_active_;
+ if (!in_debug_scope()) isolate_->stack_guard()->RequestDebugCommand();
}
-Handle<Object> Debugger::Call(Handle<JSFunction> fun,
- Handle<Object> data,
- bool* pending_exception) {
- // When calling functions in the debugger prevent it from beeing unloaded.
- Debugger::never_unload_debugger_ = true;
-
- // Enter the debugger.
- EnterDebugger debugger(isolate_);
- if (debugger.FailedToEnter()) {
- return isolate_->factory()->undefined_value();
- }
+MaybeHandle<Object> Debug::Call(Handle<JSFunction> fun, Handle<Object> data) {
+ DebugScope debug_scope(this);
+ if (debug_scope.failed()) return isolate_->factory()->undefined_value();
// Create the execution state.
- bool caught_exception = false;
- Handle<Object> exec_state = MakeExecutionState(&caught_exception);
- if (caught_exception) {
+ Handle<Object> exec_state;
+ if (!MakeExecutionState().ToHandle(&exec_state)) {
return isolate_->factory()->undefined_value();
}
Handle<Object> argv[] = { exec_state, data };
- Handle<Object> result = Execution::Call(
+ return Execution::Call(
isolate_,
fun,
- Handle<Object>(isolate_->debug()->debug_context_->global_proxy(),
- isolate_),
+ Handle<Object>(debug_context()->global_proxy(), isolate_),
ARRAY_SIZE(argv),
- argv,
- pending_exception);
- return result;
+ argv);
}
-static void StubMessageHandler2(const v8::Debug::Message& message) {
- // Simply ignore message.
-}
+void Debug::HandleDebugBreak() {
+ // Ignore debug break during bootstrapping.
+ if (isolate_->bootstrapper()->IsActive()) return;
+ // Just continue if breaks are disabled.
+ if (break_disabled_) return;
+ // Ignore debug break if debugger is not active.
+ if (!is_active()) return;
+ StackLimitCheck check(isolate_);
+ if (check.HasOverflowed()) return;
-bool Debugger::StartAgent(const char* name, int port,
- bool wait_for_connection) {
- if (wait_for_connection) {
- // Suspend V8 if it is already running or set V8 to suspend whenever
- // it starts.
- // Provide stub message handler; V8 auto-continues each suspend
- // when there is no message handler; we doesn't need it.
- // Once become suspended, V8 will stay so indefinitely long, until remote
- // debugger connects and issues "continue" command.
- Debugger::message_handler_ = StubMessageHandler2;
- v8::Debug::DebugBreak();
+ { JavaScriptFrameIterator it(isolate_);
+ ASSERT(!it.done());
+ Object* fun = it.frame()->function();
+ if (fun && fun->IsJSFunction()) {
+ // Don't stop in builtin functions.
+ if (JSFunction::cast(fun)->IsBuiltin()) return;
+ GlobalObject* global = JSFunction::cast(fun)->context()->global_object();
+ // Don't stop in debugger functions.
+ if (IsDebugGlobal(global)) return;
+ }
}
- if (agent_ == NULL) {
- agent_ = new DebuggerAgent(isolate_, name, port);
- agent_->Start();
- }
- return true;
-}
+ // Collect the break state before clearing the flags.
+ bool debug_command_only = isolate_->stack_guard()->CheckDebugCommand() &&
+ !isolate_->stack_guard()->CheckDebugBreak();
+ isolate_->stack_guard()->ClearDebugBreak();
-void Debugger::StopAgent() {
- if (agent_ != NULL) {
- agent_->Shutdown();
- agent_->Join();
- delete agent_;
- agent_ = NULL;
- }
+ ProcessDebugMessages(debug_command_only);
}
-void Debugger::WaitForAgent() {
- if (agent_ != NULL)
- agent_->WaitUntilListening();
-}
+void Debug::ProcessDebugMessages(bool debug_command_only) {
+ isolate_->stack_guard()->ClearDebugCommand();
+ StackLimitCheck check(isolate_);
+ if (check.HasOverflowed()) return;
-void Debugger::CallMessageDispatchHandler() {
- v8::Debug::DebugMessageDispatchHandler handler;
- {
- LockGuard<Mutex> lock_guard(&dispatch_handler_access_);
- handler = Debugger::debug_message_dispatch_handler_;
- }
- if (handler != NULL) {
- handler();
- }
-}
+ HandleScope scope(isolate_);
+ DebugScope debug_scope(this);
+ if (debug_scope.failed()) return;
+ // Notify the debug event listeners. Indicate auto continue if the break was
+ // a debug command break.
+ OnDebugBreak(isolate_->factory()->undefined_value(), debug_command_only);
+}
-EnterDebugger::EnterDebugger(Isolate* isolate)
- : isolate_(isolate),
- prev_(isolate_->debug()->debugger_entry()),
- it_(isolate_),
- has_js_frames_(!it_.done()),
- save_(isolate_) {
- Debug* debug = isolate_->debug();
- ASSERT(prev_ != NULL || !debug->is_interrupt_pending(PREEMPT));
- ASSERT(prev_ != NULL || !debug->is_interrupt_pending(DEBUGBREAK));
+DebugScope::DebugScope(Debug* debug) : debug_(debug),
+ prev_(debug->debugger_entry()),
+ save_(debug_->isolate_) {
// Link recursive debugger entry.
- debug->set_debugger_entry(this);
+ debug_->thread_local_.current_debug_scope_ = this;
// Store the previous break id and frame id.
- break_id_ = debug->break_id();
- break_frame_id_ = debug->break_frame_id();
+ break_id_ = debug_->break_id();
+ break_frame_id_ = debug_->break_frame_id();
// Create the new break info. If there is no JavaScript frames there is no
// break frame id.
- if (has_js_frames_) {
- debug->NewBreak(it_.frame()->id());
- } else {
- debug->NewBreak(StackFrame::NO_ID);
- }
+ JavaScriptFrameIterator it(isolate());
+ bool has_js_frames = !it.done();
+ debug_->thread_local_.break_frame_id_ = has_js_frames ? it.frame()->id()
+ : StackFrame::NO_ID;
+ debug_->SetNextBreakId();
+ debug_->UpdateState();
// Make sure that debugger is loaded and enter the debugger context.
- load_failed_ = !debug->Load();
- if (!load_failed_) {
- // NOTE the member variable save which saves the previous context before
- // this change.
- isolate_->set_context(*debug->debug_context());
- }
+ // The previous context is kept in save_.
+ failed_ = !debug_->is_loaded();
+ if (!failed_) isolate()->set_context(*debug->debug_context());
}
-EnterDebugger::~EnterDebugger() {
- Debug* debug = isolate_->debug();
-
- // Restore to the previous break state.
- debug->SetBreak(break_frame_id_, break_id_);
- // Check for leaving the debugger.
- if (!load_failed_ && prev_ == NULL) {
+DebugScope::~DebugScope() {
+ if (!failed_ && prev_ == NULL) {
// Clear mirror cache when leaving the debugger. Skip this if there is a
// pending exception as clearing the mirror cache calls back into
// JavaScript. This can happen if the v8::Debug::Call is used in which
// case the exception should end up in the calling code.
- if (!isolate_->has_pending_exception()) {
- // Try to avoid any pending debug break breaking in the clear mirror
- // cache JavaScript code.
- if (isolate_->stack_guard()->IsDebugBreak()) {
- debug->set_interrupts_pending(DEBUGBREAK);
- isolate_->stack_guard()->Continue(DEBUGBREAK);
- }
- debug->ClearMirrorCache();
- }
-
- // Request preemption and debug break when leaving the last debugger entry
- // if any of these where recorded while debugging.
- if (debug->is_interrupt_pending(PREEMPT)) {
- // This re-scheduling of preemption is to avoid starvation in some
- // debugging scenarios.
- debug->clear_interrupt_pending(PREEMPT);
- isolate_->stack_guard()->Preempt();
- }
- if (debug->is_interrupt_pending(DEBUGBREAK)) {
- debug->clear_interrupt_pending(DEBUGBREAK);
- isolate_->stack_guard()->DebugBreak();
- }
+ if (!isolate()->has_pending_exception()) debug_->ClearMirrorCache();
// If there are commands in the queue when leaving the debugger request
// that these commands are processed.
- if (isolate_->debugger()->HasCommands()) {
- isolate_->stack_guard()->DebugCommand();
- }
-
- // If leaving the debugger with the debugger no longer active unload it.
- if (!isolate_->debugger()->IsDebuggerActive()) {
- isolate_->debugger()->UnloadDebugger();
- }
+ if (debug_->has_commands()) isolate()->stack_guard()->RequestDebugCommand();
}
// Leaving this debugger entry.
- debug->set_debugger_entry(prev_);
+ debug_->thread_local_.current_debug_scope_ = prev_;
+
+ // Restore to the previous break state.
+ debug_->thread_local_.break_frame_id_ = break_frame_id_;
+ debug_->thread_local_.break_id_ = break_id_;
+
+ debug_->UpdateState();
}
@@ -3640,20 +3218,21 @@ v8::Handle<v8::Object> MessageImpl::GetEventData() const {
v8::Handle<v8::String> MessageImpl::GetJSON() const {
- v8::EscapableHandleScope scope(
- reinterpret_cast<v8::Isolate*>(event_data_->GetIsolate()));
+ Isolate* isolate = event_data_->GetIsolate();
+ v8::EscapableHandleScope scope(reinterpret_cast<v8::Isolate*>(isolate));
if (IsEvent()) {
// Call toJSONProtocol on the debug event object.
- Handle<Object> fun = GetProperty(event_data_, "toJSONProtocol");
+ Handle<Object> fun = Object::GetProperty(
+ isolate, event_data_, "toJSONProtocol").ToHandleChecked();
if (!fun->IsJSFunction()) {
return v8::Handle<v8::String>();
}
- bool caught_exception;
- Handle<Object> json = Execution::TryCall(Handle<JSFunction>::cast(fun),
- event_data_,
- 0, NULL, &caught_exception);
- if (caught_exception || !json->IsString()) {
+
+ MaybeHandle<Object> maybe_json =
+ Execution::TryCall(Handle<JSFunction>::cast(fun), event_data_, 0, NULL);
+ Handle<Object> json;
+ if (!maybe_json.ToHandle(&json) || !json->IsString()) {
return v8::Handle<v8::String>();
}
return scope.Escape(v8::Utils::ToLocal(Handle<String>::cast(json)));
@@ -3731,10 +3310,6 @@ CommandMessage::CommandMessage(const Vector<uint16_t>& text,
}
-CommandMessage::~CommandMessage() {
-}
-
-
void CommandMessage::Dispose() {
text_.Dispose();
delete client_data_;
@@ -3755,10 +3330,7 @@ CommandMessageQueue::CommandMessageQueue(int size) : start_(0), end_(0),
CommandMessageQueue::~CommandMessageQueue() {
- while (!IsEmpty()) {
- CommandMessage m = Get();
- m.Dispose();
- }
+ while (!IsEmpty()) Get().Dispose();
DeleteArray(messages_);
}
@@ -3824,40 +3396,4 @@ void LockingCommandMessageQueue::Clear() {
queue_.Clear();
}
-
-MessageDispatchHelperThread::MessageDispatchHelperThread(Isolate* isolate)
- : Thread("v8:MsgDispHelpr"),
- isolate_(isolate), sem_(0),
- already_signalled_(false) {
-}
-
-
-void MessageDispatchHelperThread::Schedule() {
- {
- LockGuard<Mutex> lock_guard(&mutex_);
- if (already_signalled_) {
- return;
- }
- already_signalled_ = true;
- }
- sem_.Signal();
-}
-
-
-void MessageDispatchHelperThread::Run() {
- while (true) {
- sem_.Wait();
- {
- LockGuard<Mutex> lock_guard(&mutex_);
- already_signalled_ = false;
- }
- {
- Locker locker(reinterpret_cast<v8::Isolate*>(isolate_));
- isolate_->debugger()->CallMessageDispatchHandler();
- }
- }
-}
-
-#endif // ENABLE_DEBUGGER_SUPPORT
-
} } // namespace v8::internal
diff --git a/chromium/v8/src/debug.h b/chromium/v8/src/debug.h
index 8e71ea67052..7f9b1a2eb02 100644
--- a/chromium/v8/src/debug.h
+++ b/chromium/v8/src/debug.h
@@ -1,56 +1,31 @@
// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
#ifndef V8_DEBUG_H_
#define V8_DEBUG_H_
-#include "allocation.h"
-#include "arguments.h"
-#include "assembler.h"
-#include "debug-agent.h"
-#include "execution.h"
-#include "factory.h"
-#include "flags.h"
-#include "frames-inl.h"
-#include "hashmap.h"
-#include "platform.h"
-#include "platform/socket.h"
-#include "string-stream.h"
-#include "v8threads.h"
-
-#ifdef ENABLE_DEBUGGER_SUPPORT
-#include "../include/v8-debug.h"
+#include "src/allocation.h"
+#include "src/arguments.h"
+#include "src/assembler.h"
+#include "src/execution.h"
+#include "src/factory.h"
+#include "src/flags.h"
+#include "src/frames-inl.h"
+#include "src/hashmap.h"
+#include "src/liveedit.h"
+#include "src/platform.h"
+#include "src/string-stream.h"
+#include "src/v8threads.h"
+
+#include "include/v8-debug.h"
namespace v8 {
namespace internal {
// Forward declarations.
-class EnterDebugger;
+class DebugScope;
// Step actions. NOTE: These values are in macros.py as well.
@@ -175,8 +150,7 @@ class BreakLocationIterator {
// the cache is the script id.
class ScriptCache : private HashMap {
public:
- explicit ScriptCache(Isolate* isolate)
- : HashMap(ScriptMatch), isolate_(isolate), collected_scripts_(10) {}
+ explicit ScriptCache(Isolate* isolate);
virtual ~ScriptCache() { Clear(); }
// Add script to the cache.
@@ -194,16 +168,12 @@ class ScriptCache : private HashMap {
return ComputeIntegerHash(key, v8::internal::kZeroHashSeed);
}
- // Scripts match if their keys (script id) match.
- static bool ScriptMatch(void* key1, void* key2) { return key1 == key2; }
-
// Clear the cache releasing all the weak handles.
void Clear();
// Weak handle callback for scripts in the cache.
- static void HandleWeakScript(v8::Isolate* isolate,
- v8::Persistent<v8::Value>* obj,
- void* data);
+ static void HandleWeakScript(
+ const v8::WeakCallbackData<v8::Value, void>& data);
Isolate* isolate_;
// List used during GC to temporarily store id's of collected scripts.
@@ -230,415 +200,6 @@ class DebugInfoListNode {
DebugInfoListNode* next_;
};
-// This class contains the debugger support. The main purpose is to handle
-// setting break points in the code.
-//
-// This class controls the debug info for all functions which currently have
-// active breakpoints in them. This debug info is held in the heap root object
-// debug_info which is a FixedArray. Each entry in this list is of class
-// DebugInfo.
-class Debug {
- public:
- void SetUp(bool create_heap_objects);
- bool Load();
- void Unload();
- bool IsLoaded() { return !debug_context_.is_null(); }
- bool InDebugger() { return thread_local_.debugger_entry_ != NULL; }
- void PreemptionWhileInDebugger();
- void Iterate(ObjectVisitor* v);
-
- Object* Break(Arguments args);
- void SetBreakPoint(Handle<JSFunction> function,
- Handle<Object> break_point_object,
- int* source_position);
- bool SetBreakPointForScript(Handle<Script> script,
- Handle<Object> break_point_object,
- int* source_position,
- BreakPositionAlignment alignment);
- void ClearBreakPoint(Handle<Object> break_point_object);
- void ClearAllBreakPoints();
- void FloodWithOneShot(Handle<JSFunction> function);
- void FloodBoundFunctionWithOneShot(Handle<JSFunction> function);
- void FloodHandlerWithOneShot();
- void ChangeBreakOnException(ExceptionBreakType type, bool enable);
- bool IsBreakOnException(ExceptionBreakType type);
- void PrepareStep(StepAction step_action,
- int step_count,
- StackFrame::Id frame_id);
- void ClearStepping();
- void ClearStepOut();
- bool IsStepping() { return thread_local_.step_count_ > 0; }
- bool StepNextContinue(BreakLocationIterator* break_location_iterator,
- JavaScriptFrame* frame);
- static Handle<DebugInfo> GetDebugInfo(Handle<SharedFunctionInfo> shared);
- static bool HasDebugInfo(Handle<SharedFunctionInfo> shared);
-
- void PrepareForBreakPoints();
-
- // This function is used in FunctionNameUsing* tests.
- Object* FindSharedFunctionInfoInScript(Handle<Script> script, int position);
-
- // Returns whether the operation succeeded. Compilation can only be triggered
- // if a valid closure is passed as the second argument, otherwise the shared
- // function needs to be compiled already.
- bool EnsureDebugInfo(Handle<SharedFunctionInfo> shared,
- Handle<JSFunction> function);
-
- // Returns true if the current stub call is patched to call the debugger.
- static bool IsDebugBreak(Address addr);
- // Returns true if the current return statement has been patched to be
- // a debugger breakpoint.
- static bool IsDebugBreakAtReturn(RelocInfo* rinfo);
-
- // Check whether a code stub with the specified major key is a possible break
- // point location.
- static bool IsSourceBreakStub(Code* code);
- static bool IsBreakStub(Code* code);
-
- // Find the builtin to use for invoking the debug break
- static Handle<Code> FindDebugBreak(Handle<Code> code, RelocInfo::Mode mode);
-
- static Handle<Object> GetSourceBreakLocations(
- Handle<SharedFunctionInfo> shared,
- BreakPositionAlignment position_aligment);
-
- // Getter for the debug_context.
- inline Handle<Context> debug_context() { return debug_context_; }
-
- // Check whether a global object is the debug global object.
- bool IsDebugGlobal(GlobalObject* global);
-
- // Check whether this frame is just about to return.
- bool IsBreakAtReturn(JavaScriptFrame* frame);
-
- // Fast check to see if any break points are active.
- inline bool has_break_points() { return has_break_points_; }
-
- void NewBreak(StackFrame::Id break_frame_id);
- void SetBreak(StackFrame::Id break_frame_id, int break_id);
- StackFrame::Id break_frame_id() {
- return thread_local_.break_frame_id_;
- }
- int break_id() { return thread_local_.break_id_; }
-
- bool StepInActive() { return thread_local_.step_into_fp_ != 0; }
- void HandleStepIn(Handle<JSFunction> function,
- Handle<Object> holder,
- Address fp,
- bool is_constructor);
- Address step_in_fp() { return thread_local_.step_into_fp_; }
- Address* step_in_fp_addr() { return &thread_local_.step_into_fp_; }
-
- bool StepOutActive() { return thread_local_.step_out_fp_ != 0; }
- Address step_out_fp() { return thread_local_.step_out_fp_; }
-
- EnterDebugger* debugger_entry() {
- return thread_local_.debugger_entry_;
- }
- void set_debugger_entry(EnterDebugger* entry) {
- thread_local_.debugger_entry_ = entry;
- }
-
- // Check whether any of the specified interrupts are pending.
- bool is_interrupt_pending(InterruptFlag what) {
- return (thread_local_.pending_interrupts_ & what) != 0;
- }
-
- // Set specified interrupts as pending.
- void set_interrupts_pending(InterruptFlag what) {
- thread_local_.pending_interrupts_ |= what;
- }
-
- // Clear specified interrupts from pending.
- void clear_interrupt_pending(InterruptFlag what) {
- thread_local_.pending_interrupts_ &= ~static_cast<int>(what);
- }
-
- // Getter and setter for the disable break state.
- bool disable_break() { return disable_break_; }
- void set_disable_break(bool disable_break) {
- disable_break_ = disable_break;
- }
-
- // Getters for the current exception break state.
- bool break_on_exception() { return break_on_exception_; }
- bool break_on_uncaught_exception() {
- return break_on_uncaught_exception_;
- }
-
- enum AddressId {
- k_after_break_target_address,
- k_debug_break_return_address,
- k_debug_break_slot_address,
- k_restarter_frame_function_pointer
- };
-
- // Support for setting the address to jump to when returning from break point.
- Address* after_break_target_address() {
- return reinterpret_cast<Address*>(&thread_local_.after_break_target_);
- }
- Address* restarter_frame_function_pointer_address() {
- Object*** address = &thread_local_.restarter_frame_function_pointer_;
- return reinterpret_cast<Address*>(address);
- }
-
- // Support for saving/restoring registers when handling debug break calls.
- Object** register_address(int r) {
- return &registers_[r];
- }
-
- // Access to the debug break on return code.
- Code* debug_break_return() { return debug_break_return_; }
- Code** debug_break_return_address() {
- return &debug_break_return_;
- }
-
- // Access to the debug break in debug break slot code.
- Code* debug_break_slot() { return debug_break_slot_; }
- Code** debug_break_slot_address() {
- return &debug_break_slot_;
- }
-
- static const int kEstimatedNofDebugInfoEntries = 16;
- static const int kEstimatedNofBreakPointsInFunction = 16;
-
- // Passed to MakeWeak.
- static void HandleWeakDebugInfo(v8::Isolate* isolate,
- v8::Persistent<v8::Value>* obj,
- void* data);
-
- friend class Debugger;
- friend Handle<FixedArray> GetDebuggedFunctions(); // In test-debug.cc
- friend void CheckDebuggerUnloaded(bool check_functions); // In test-debug.cc
-
- // Threading support.
- char* ArchiveDebug(char* to);
- char* RestoreDebug(char* from);
- static int ArchiveSpacePerThread();
- void FreeThreadResources() { }
-
- // Mirror cache handling.
- void ClearMirrorCache();
-
- // Script cache handling.
- void CreateScriptCache();
- void DestroyScriptCache();
- void AddScriptToScriptCache(Handle<Script> script);
- Handle<FixedArray> GetLoadedScripts();
-
- // Garbage collection notifications.
- void AfterGarbageCollection();
-
- // Code generator routines.
- static void GenerateSlot(MacroAssembler* masm);
- static void GenerateLoadICDebugBreak(MacroAssembler* masm);
- static void GenerateStoreICDebugBreak(MacroAssembler* masm);
- static void GenerateKeyedLoadICDebugBreak(MacroAssembler* masm);
- static void GenerateKeyedStoreICDebugBreak(MacroAssembler* masm);
- static void GenerateCompareNilICDebugBreak(MacroAssembler* masm);
- static void GenerateReturnDebugBreak(MacroAssembler* masm);
- static void GenerateCallFunctionStubDebugBreak(MacroAssembler* masm);
- static void GenerateCallFunctionStubRecordDebugBreak(MacroAssembler* masm);
- static void GenerateCallConstructStubDebugBreak(MacroAssembler* masm);
- static void GenerateCallConstructStubRecordDebugBreak(MacroAssembler* masm);
- static void GenerateSlotDebugBreak(MacroAssembler* masm);
- static void GeneratePlainReturnLiveEdit(MacroAssembler* masm);
-
- // FrameDropper is a code replacement for a JavaScript frame with possibly
- // several frames above.
- // There is no calling conventions here, because it never actually gets
- // called, it only gets returned to.
- static void GenerateFrameDropperLiveEdit(MacroAssembler* masm);
-
- // Called from stub-cache.cc.
- static void GenerateCallICDebugBreak(MacroAssembler* masm);
-
- // Describes how exactly a frame has been dropped from stack.
- enum FrameDropMode {
- // No frame has been dropped.
- FRAMES_UNTOUCHED,
- // The top JS frame had been calling IC stub. IC stub mustn't be called now.
- FRAME_DROPPED_IN_IC_CALL,
- // The top JS frame had been calling debug break slot stub. Patch the
- // address this stub jumps to in the end.
- FRAME_DROPPED_IN_DEBUG_SLOT_CALL,
- // The top JS frame had been calling some C++ function. The return address
- // gets patched automatically.
- FRAME_DROPPED_IN_DIRECT_CALL,
- FRAME_DROPPED_IN_RETURN_CALL,
- CURRENTLY_SET_MODE
- };
-
- void FramesHaveBeenDropped(StackFrame::Id new_break_frame_id,
- FrameDropMode mode,
- Object** restarter_frame_function_pointer);
-
- // Initializes an artificial stack frame. The data it contains is used for:
- // a. successful work of frame dropper code which eventually gets control,
- // b. being compatible with regular stack structure for various stack
- // iterators.
- // Returns address of stack allocated pointer to restarted function,
- // the value that is called 'restarter_frame_function_pointer'. The value
- // at this address (possibly updated by GC) may be used later when preparing
- // 'step in' operation.
- static Object** SetUpFrameDropperFrame(StackFrame* bottom_js_frame,
- Handle<Code> code);
-
- static const int kFrameDropperFrameSize;
-
- // Architecture-specific constant.
- static const bool kFrameDropperSupported;
-
- /**
- * Defines layout of a stack frame that supports padding. This is a regular
- * internal frame that has a flexible stack structure. LiveEdit can shift
- * its lower part up the stack, taking up the 'padding' space when additional
- * stack memory is required.
- * Such frame is expected immediately above the topmost JavaScript frame.
- *
- * Stack Layout:
- * --- Top
- * LiveEdit routine frames
- * ---
- * C frames of debug handler
- * ---
- * ...
- * ---
- * An internal frame that has n padding words:
- * - any number of words as needed by code -- upper part of frame
- * - padding size: a Smi storing n -- current size of padding
- * - padding: n words filled with kPaddingValue in form of Smi
- * - 3 context/type words of a regular InternalFrame
- * - fp
- * ---
- * Topmost JavaScript frame
- * ---
- * ...
- * --- Bottom
- */
- class FramePaddingLayout : public AllStatic {
- public:
- // Architecture-specific constant.
- static const bool kIsSupported;
-
- // A size of frame base including fp. Padding words starts right above
- // the base.
- static const int kFrameBaseSize = 4;
-
- // A number of words that should be reserved on stack for the LiveEdit use.
- // Normally equals 1. Stored on stack in form of Smi.
- static const int kInitialSize;
- // A value that padding words are filled with (in form of Smi). Going
- // bottom-top, the first word not having this value is a counter word.
- static const int kPaddingValue;
- };
-
- private:
- explicit Debug(Isolate* isolate);
- ~Debug();
-
- static bool CompileDebuggerScript(Isolate* isolate, int index);
- void ClearOneShot();
- void ActivateStepIn(StackFrame* frame);
- void ClearStepIn();
- void ActivateStepOut(StackFrame* frame);
- void ClearStepNext();
- // Returns whether the compile succeeded.
- void RemoveDebugInfo(Handle<DebugInfo> debug_info);
- void SetAfterBreakTarget(JavaScriptFrame* frame);
- Handle<Object> CheckBreakPoints(Handle<Object> break_point);
- bool CheckBreakPoint(Handle<Object> break_point_object);
-
- // Global handle to debug context where all the debugger JavaScript code is
- // loaded.
- Handle<Context> debug_context_;
-
- // Boolean state indicating whether any break points are set.
- bool has_break_points_;
-
- // Cache of all scripts in the heap.
- ScriptCache* script_cache_;
-
- // List of active debug info objects.
- DebugInfoListNode* debug_info_list_;
-
- bool disable_break_;
- bool break_on_exception_;
- bool break_on_uncaught_exception_;
-
- // Per-thread data.
- class ThreadLocal {
- public:
- // Counter for generating next break id.
- int break_count_;
-
- // Current break id.
- int break_id_;
-
- // Frame id for the frame of the current break.
- StackFrame::Id break_frame_id_;
-
- // Step action for last step performed.
- StepAction last_step_action_;
-
- // Source statement position from last step next action.
- int last_statement_position_;
-
- // Number of steps left to perform before debug event.
- int step_count_;
-
- // Frame pointer from last step next action.
- Address last_fp_;
-
- // Number of queued steps left to perform before debug event.
- int queued_step_count_;
-
- // Frame pointer for frame from which step in was performed.
- Address step_into_fp_;
-
- // Frame pointer for the frame where debugger should be called when current
- // step out action is completed.
- Address step_out_fp_;
-
- // Storage location for jump when exiting debug break calls.
- Address after_break_target_;
-
- // Stores the way how LiveEdit has patched the stack. It is used when
- // debugger returns control back to user script.
- FrameDropMode frame_drop_mode_;
-
- // Top debugger entry.
- EnterDebugger* debugger_entry_;
-
- // Pending interrupts scheduled while debugging.
- int pending_interrupts_;
-
- // When restarter frame is on stack, stores the address
- // of the pointer to function being restarted. Otherwise (most of the time)
- // stores NULL. This pointer is used with 'step in' implementation.
- Object** restarter_frame_function_pointer_;
- };
-
- // Storage location for registers when handling debug break calls
- JSCallerSavedBuffer registers_;
- ThreadLocal thread_local_;
- void ThreadInit();
-
- // Code to call for handling debug break on return.
- Code* debug_break_return_;
-
- // Code to call for handling debug break in debug break slots.
- Code* debug_break_slot_;
-
- Isolate* isolate_;
-
- friend class Isolate;
-
- DISALLOW_COPY_AND_ASSIGN(Debug);
-};
-
-
-DECLARE_RUNTIME_FUNCTION(Object*, Debug_Break);
// Message delivered to the message handler callback. This is either a debugger
@@ -723,7 +284,6 @@ class CommandMessage {
static CommandMessage New(const Vector<uint16_t>& command,
v8::Debug::ClientData* data);
CommandMessage();
- ~CommandMessage();
// Deletes user data and disposes of the text.
void Dispose();
@@ -737,6 +297,7 @@ class CommandMessage {
v8::Debug::ClientData* client_data_;
};
+
// A Queue of CommandMessage objects. A thread-safe version is
// LockingCommandMessageQueue, based on this class.
class CommandMessageQueue BASE_EMBEDDED {
@@ -758,9 +319,6 @@ class CommandMessageQueue BASE_EMBEDDED {
};
-class MessageDispatchHelperThread;
-
-
// LockingCommandMessageQueue is a thread-safe circular buffer of CommandMessage
// messages. The message data is not managed by LockingCommandMessageQueue.
// Pointers to the data are passed in and out. Implemented by adding a
@@ -780,296 +338,437 @@ class LockingCommandMessageQueue BASE_EMBEDDED {
};
-class Debugger {
+class PromiseOnStack {
public:
- ~Debugger();
-
- void DebugRequest(const uint16_t* json_request, int length);
-
- Handle<Object> MakeJSObject(Vector<const char> constructor_name,
- int argc,
- Handle<Object> argv[],
- bool* caught_exception);
- Handle<Object> MakeExecutionState(bool* caught_exception);
- Handle<Object> MakeBreakEvent(Handle<Object> exec_state,
- Handle<Object> break_points_hit,
- bool* caught_exception);
- Handle<Object> MakeExceptionEvent(Handle<Object> exec_state,
- Handle<Object> exception,
- bool uncaught,
- bool* caught_exception);
- Handle<Object> MakeNewFunctionEvent(Handle<Object> func,
- bool* caught_exception);
- Handle<Object> MakeCompileEvent(Handle<Script> script,
- bool before,
- bool* caught_exception);
- Handle<Object> MakeScriptCollectedEvent(int id,
- bool* caught_exception);
- void OnDebugBreak(Handle<Object> break_points_hit, bool auto_continue);
- void OnException(Handle<Object> exception, bool uncaught);
- void OnBeforeCompile(Handle<Script> script);
+ PromiseOnStack(Isolate* isolate,
+ PromiseOnStack* prev,
+ Handle<JSFunction> getter);
+ ~PromiseOnStack();
+ StackHandler* handler() { return handler_; }
+ Handle<JSFunction> getter() { return getter_; }
+ PromiseOnStack* prev() { return prev_; }
+ private:
+ Isolate* isolate_;
+ StackHandler* handler_;
+ Handle<JSFunction> getter_;
+ PromiseOnStack* prev_;
+};
+
+// This class contains the debugger support. The main purpose is to handle
+// setting break points in the code.
+//
+// This class controls the debug info for all functions which currently have
+// active breakpoints in them. This debug info is held in the heap root object
+// debug_info which is a FixedArray. Each entry in this list is of class
+// DebugInfo.
+class Debug {
+ public:
enum AfterCompileFlags {
NO_AFTER_COMPILE_FLAGS,
SEND_WHEN_DEBUGGING
};
+
+ // Debug event triggers.
+ void OnDebugBreak(Handle<Object> break_points_hit, bool auto_continue);
+ void OnException(Handle<Object> exception, bool uncaught);
+ void OnBeforeCompile(Handle<Script> script);
void OnAfterCompile(Handle<Script> script,
AfterCompileFlags after_compile_flags);
void OnScriptCollected(int id);
- void ProcessDebugEvent(v8::DebugEvent event,
- Handle<JSObject> event_data,
- bool auto_continue);
- void NotifyMessageHandler(v8::DebugEvent event,
- Handle<JSObject> exec_state,
- Handle<JSObject> event_data,
- bool auto_continue);
+
+ // API facing.
void SetEventListener(Handle<Object> callback, Handle<Object> data);
- void SetMessageHandler(v8::Debug::MessageHandler2 handler);
- void SetHostDispatchHandler(v8::Debug::HostDispatchHandler handler,
- TimeDelta period);
- void SetDebugMessageDispatchHandler(
- v8::Debug::DebugMessageDispatchHandler handler,
- bool provide_locker);
-
- // Invoke the message handler function.
- void InvokeMessageHandler(MessageImpl message);
+ void SetMessageHandler(v8::Debug::MessageHandler handler);
+ void EnqueueCommandMessage(Vector<const uint16_t> command,
+ v8::Debug::ClientData* client_data = NULL);
+ // Enqueue a debugger command to the command queue for event listeners.
+ void EnqueueDebugCommand(v8::Debug::ClientData* client_data = NULL);
+ MUST_USE_RESULT MaybeHandle<Object> Call(Handle<JSFunction> fun,
+ Handle<Object> data);
+ Handle<Context> GetDebugContext();
+ void HandleDebugBreak();
+ void ProcessDebugMessages(bool debug_command_only);
- // Add a debugger command to the command queue.
- void ProcessCommand(Vector<const uint16_t> command,
- v8::Debug::ClientData* client_data = NULL);
+ // Internal logic
+ bool Load();
+ void Break(Arguments args, JavaScriptFrame*);
+ void SetAfterBreakTarget(JavaScriptFrame* frame);
- // Check whether there are commands in the command queue.
- bool HasCommands();
+ // Scripts handling.
+ Handle<FixedArray> GetLoadedScripts();
- // Enqueue a debugger command to the command queue for event listeners.
- void EnqueueDebugCommand(v8::Debug::ClientData* client_data = NULL);
+ // Break point handling.
+ bool SetBreakPoint(Handle<JSFunction> function,
+ Handle<Object> break_point_object,
+ int* source_position);
+ bool SetBreakPointForScript(Handle<Script> script,
+ Handle<Object> break_point_object,
+ int* source_position,
+ BreakPositionAlignment alignment);
+ void ClearBreakPoint(Handle<Object> break_point_object);
+ void ClearAllBreakPoints();
+ void FloodWithOneShot(Handle<JSFunction> function);
+ void FloodBoundFunctionWithOneShot(Handle<JSFunction> function);
+ void FloodHandlerWithOneShot();
+ void ChangeBreakOnException(ExceptionBreakType type, bool enable);
+ bool IsBreakOnException(ExceptionBreakType type);
+
+ // Stepping handling.
+ void PrepareStep(StepAction step_action,
+ int step_count,
+ StackFrame::Id frame_id);
+ void ClearStepping();
+ void ClearStepOut();
+ bool IsStepping() { return thread_local_.step_count_ > 0; }
+ bool StepNextContinue(BreakLocationIterator* break_location_iterator,
+ JavaScriptFrame* frame);
+ bool StepInActive() { return thread_local_.step_into_fp_ != 0; }
+ void HandleStepIn(Handle<JSFunction> function,
+ Handle<Object> holder,
+ Address fp,
+ bool is_constructor);
+ bool StepOutActive() { return thread_local_.step_out_fp_ != 0; }
- Handle<Object> Call(Handle<JSFunction> fun,
- Handle<Object> data,
- bool* pending_exception);
+ // Purge all code objects that have no debug break slots.
+ void PrepareForBreakPoints();
- // Start the debugger agent listening on the provided port.
- bool StartAgent(const char* name, int port,
- bool wait_for_connection = false);
+ // Returns whether the operation succeeded. Compilation can only be triggered
+ // if a valid closure is passed as the second argument, otherwise the shared
+ // function needs to be compiled already.
+ bool EnsureDebugInfo(Handle<SharedFunctionInfo> shared,
+ Handle<JSFunction> function);
+ static Handle<DebugInfo> GetDebugInfo(Handle<SharedFunctionInfo> shared);
+ static bool HasDebugInfo(Handle<SharedFunctionInfo> shared);
- // Stop the debugger agent.
- void StopAgent();
+ // This function is used in FunctionNameUsing* tests.
+ Object* FindSharedFunctionInfoInScript(Handle<Script> script, int position);
- // Blocks until the agent has started listening for connections
- void WaitForAgent();
+ // Returns true if the current stub call is patched to call the debugger.
+ static bool IsDebugBreak(Address addr);
+ // Returns true if the current return statement has been patched to be
+ // a debugger breakpoint.
+ static bool IsDebugBreakAtReturn(RelocInfo* rinfo);
- void CallMessageDispatchHandler();
+ static Handle<Object> GetSourceBreakLocations(
+ Handle<SharedFunctionInfo> shared,
+ BreakPositionAlignment position_aligment);
- Handle<Context> GetDebugContext();
+ // Check whether a global object is the debug global object.
+ bool IsDebugGlobal(GlobalObject* global);
- // Unload the debugger if possible. Only called when no debugger is currently
- // active.
- void UnloadDebugger();
- friend void ForceUnloadDebugger(); // In test-debug.cc
+ // Check whether this frame is just about to return.
+ bool IsBreakAtReturn(JavaScriptFrame* frame);
- inline bool EventActive(v8::DebugEvent event) {
- LockGuard<RecursiveMutex> lock_guard(debugger_access_);
+ // Promise handling.
+ void PromiseHandlePrologue(Handle<JSFunction> promise_getter);
+ void PromiseHandleEpilogue();
- // Check whether the message handler was been cleared.
- if (debugger_unload_pending_) {
- if (isolate_->debug()->debugger_entry() == NULL) {
- UnloadDebugger();
- }
- }
+ // Support for LiveEdit
+ void FramesHaveBeenDropped(StackFrame::Id new_break_frame_id,
+ LiveEdit::FrameDropMode mode,
+ Object** restarter_frame_function_pointer);
- if (((event == v8::BeforeCompile) || (event == v8::AfterCompile)) &&
- !FLAG_debug_compile_events) {
- return false;
+ // Passed to MakeWeak.
+ static void HandleWeakDebugInfo(
+ const v8::WeakCallbackData<v8::Value, void>& data);
- } else if ((event == v8::ScriptCollected) &&
- !FLAG_debug_script_collected_events) {
- return false;
- }
+ // Threading support.
+ char* ArchiveDebug(char* to);
+ char* RestoreDebug(char* from);
+ static int ArchiveSpacePerThread();
+ void FreeThreadResources() { }
- // Currently argument event is not used.
- return !compiling_natives_ && Debugger::IsDebuggerActive();
- }
+ // Record function from which eval was called.
+ static void RecordEvalCaller(Handle<Script> script);
- void set_compiling_natives(bool compiling_natives) {
- compiling_natives_ = compiling_natives;
- }
- bool compiling_natives() const { return compiling_natives_; }
- void set_loading_debugger(bool v) { is_loading_debugger_ = v; }
- bool is_loading_debugger() const { return is_loading_debugger_; }
+ // Garbage collection notifications.
+ void AfterGarbageCollection();
+
+ // Flags and states.
+ DebugScope* debugger_entry() { return thread_local_.current_debug_scope_; }
+ inline Handle<Context> debug_context() { return debug_context_; }
void set_live_edit_enabled(bool v) { live_edit_enabled_ = v; }
bool live_edit_enabled() const {
return FLAG_enable_liveedit && live_edit_enabled_ ;
}
- void set_force_debugger_active(bool force_debugger_active) {
- force_debugger_active_ = force_debugger_active;
+
+ inline bool is_active() const { return is_active_; }
+ inline bool is_loaded() const { return !debug_context_.is_null(); }
+ inline bool has_break_points() const { return has_break_points_; }
+ inline bool in_debug_scope() const {
+ return thread_local_.current_debug_scope_ != NULL;
+ }
+ void set_disable_break(bool v) { break_disabled_ = v; }
+
+ StackFrame::Id break_frame_id() { return thread_local_.break_frame_id_; }
+ int break_id() { return thread_local_.break_id_; }
+
+ // Support for embedding into generated code.
+ Address after_break_target_address() {
+ return reinterpret_cast<Address>(&after_break_target_);
+ }
+
+ Address restarter_frame_function_pointer_address() {
+ Object*** address = &thread_local_.restarter_frame_function_pointer_;
+ return reinterpret_cast<Address>(address);
}
- bool force_debugger_active() const { return force_debugger_active_; }
- bool IsDebuggerActive();
+ Address step_in_fp_addr() {
+ return reinterpret_cast<Address>(&thread_local_.step_into_fp_);
+ }
private:
- explicit Debugger(Isolate* isolate);
+ explicit Debug(Isolate* isolate);
+
+ void UpdateState();
+ void Unload();
+ void SetNextBreakId() {
+ thread_local_.break_id_ = ++thread_local_.break_count_;
+ }
+
+ // Check whether there are commands in the command queue.
+ inline bool has_commands() const { return !command_queue_.IsEmpty(); }
+ inline bool ignore_events() const { return is_suppressed_ || !is_active_; }
+
+ // Constructors for debug event objects.
+ MUST_USE_RESULT MaybeHandle<Object> MakeJSObject(
+ const char* constructor_name,
+ int argc,
+ Handle<Object> argv[]);
+ MUST_USE_RESULT MaybeHandle<Object> MakeExecutionState();
+ MUST_USE_RESULT MaybeHandle<Object> MakeBreakEvent(
+ Handle<Object> break_points_hit);
+ MUST_USE_RESULT MaybeHandle<Object> MakeExceptionEvent(
+ Handle<Object> exception,
+ bool uncaught,
+ Handle<Object> promise);
+ MUST_USE_RESULT MaybeHandle<Object> MakeCompileEvent(
+ Handle<Script> script, bool before);
+ MUST_USE_RESULT MaybeHandle<Object> MakeScriptCollectedEvent(int id);
+
+ // Mirror cache handling.
+ void ClearMirrorCache();
+
+ // Returns a promise if it does not have a reject handler.
+ Handle<Object> GetPromiseForUncaughtException();
void CallEventCallback(v8::DebugEvent event,
Handle<Object> exec_state,
Handle<Object> event_data,
v8::Debug::ClientData* client_data);
- void CallCEventCallback(v8::DebugEvent event,
- Handle<Object> exec_state,
- Handle<Object> event_data,
- v8::Debug::ClientData* client_data);
- void CallJSEventCallback(v8::DebugEvent event,
- Handle<Object> exec_state,
- Handle<Object> event_data);
- void ListenersChanged();
-
- RecursiveMutex* debugger_access_; // Mutex guarding debugger variables.
- Handle<Object> event_listener_; // Global handle to listener.
+ void ProcessDebugEvent(v8::DebugEvent event,
+ Handle<JSObject> event_data,
+ bool auto_continue);
+ void NotifyMessageHandler(v8::DebugEvent event,
+ Handle<JSObject> exec_state,
+ Handle<JSObject> event_data,
+ bool auto_continue);
+ void InvokeMessageHandler(MessageImpl message);
+
+ static bool CompileDebuggerScript(Isolate* isolate, int index);
+ void ClearOneShot();
+ void ActivateStepIn(StackFrame* frame);
+ void ClearStepIn();
+ void ActivateStepOut(StackFrame* frame);
+ void ClearStepNext();
+ // Returns whether the compile succeeded.
+ void RemoveDebugInfo(Handle<DebugInfo> debug_info);
+ Handle<Object> CheckBreakPoints(Handle<Object> break_point);
+ bool CheckBreakPoint(Handle<Object> break_point_object);
+
+ inline void AssertDebugContext() {
+ ASSERT(isolate_->context() == *debug_context());
+ ASSERT(in_debug_scope());
+ }
+
+ void ThreadInit();
+
+ // Global handles.
+ Handle<Context> debug_context_;
+ Handle<Object> event_listener_;
Handle<Object> event_listener_data_;
- bool compiling_natives_; // Are we compiling natives?
- bool is_loading_debugger_; // Are we loading the debugger?
- bool live_edit_enabled_; // Enable LiveEdit.
- bool never_unload_debugger_; // Can we unload the debugger?
- bool force_debugger_active_; // Activate debugger without event listeners.
- v8::Debug::MessageHandler2 message_handler_;
- bool debugger_unload_pending_; // Was message handler cleared?
- v8::Debug::HostDispatchHandler host_dispatch_handler_;
- Mutex dispatch_handler_access_; // Mutex guarding dispatch handler.
- v8::Debug::DebugMessageDispatchHandler debug_message_dispatch_handler_;
- MessageDispatchHelperThread* message_dispatch_helper_thread_;
- TimeDelta host_dispatch_period_;
-
- DebuggerAgent* agent_;
+
+ v8::Debug::MessageHandler message_handler_;
static const int kQueueInitialSize = 4;
- LockingCommandMessageQueue command_queue_;
Semaphore command_received_; // Signaled for each command received.
+ LockingCommandMessageQueue command_queue_;
LockingCommandMessageQueue event_command_queue_;
+ bool is_active_;
+ bool is_suppressed_;
+ bool live_edit_enabled_;
+ bool has_break_points_;
+ bool break_disabled_;
+ bool break_on_exception_;
+ bool break_on_uncaught_exception_;
+
+ ScriptCache* script_cache_; // Cache of all scripts in the heap.
+ DebugInfoListNode* debug_info_list_; // List of active debug info objects.
+
+ // Storage location for jump when exiting debug break calls.
+ // Note that this address is not GC safe. It should be computed immediately
+ // before returning to the DebugBreakCallHelper.
+ Address after_break_target_;
+
+ // Per-thread data.
+ class ThreadLocal {
+ public:
+ // Top debugger entry.
+ DebugScope* current_debug_scope_;
+
+ // Counter for generating next break id.
+ int break_count_;
+
+ // Current break id.
+ int break_id_;
+
+ // Frame id for the frame of the current break.
+ StackFrame::Id break_frame_id_;
+
+ // Step action for last step performed.
+ StepAction last_step_action_;
+
+ // Source statement position from last step next action.
+ int last_statement_position_;
+
+ // Number of steps left to perform before debug event.
+ int step_count_;
+
+ // Frame pointer from last step next action.
+ Address last_fp_;
+
+ // Number of queued steps left to perform before debug event.
+ int queued_step_count_;
+
+ // Frame pointer for frame from which step in was performed.
+ Address step_into_fp_;
+
+ // Frame pointer for the frame where debugger should be called when current
+ // step out action is completed.
+ Address step_out_fp_;
+
+ // Stores the way how LiveEdit has patched the stack. It is used when
+ // debugger returns control back to user script.
+ LiveEdit::FrameDropMode frame_drop_mode_;
+
+ // When restarter frame is on stack, stores the address
+ // of the pointer to function being restarted. Otherwise (most of the time)
+ // stores NULL. This pointer is used with 'step in' implementation.
+ Object** restarter_frame_function_pointer_;
+
+ // When a promise is being resolved, we may want to trigger a debug event
+ // if we catch a throw. For this purpose we remember the try-catch
+ // handler address that would catch the exception. We also hold onto a
+ // closure that returns a promise if the exception is considered uncaught.
+ // Due to the possibility of reentry we use a linked list.
+ PromiseOnStack* promise_on_stack_;
+ };
+
+ // Storage location for registers when handling debug break calls
+ ThreadLocal thread_local_;
+
Isolate* isolate_;
- friend class EnterDebugger;
friend class Isolate;
+ friend class DebugScope;
+ friend class DisableBreak;
+ friend class LiveEdit;
+ friend class SuppressDebug;
+
+ friend Handle<FixedArray> GetDebuggedFunctions(); // In test-debug.cc
+ friend void CheckDebuggerUnloaded(bool check_functions); // In test-debug.cc
- DISALLOW_COPY_AND_ASSIGN(Debugger);
+ DISALLOW_COPY_AND_ASSIGN(Debug);
};
-// This class is used for entering the debugger. Create an instance in the stack
-// to enter the debugger. This will set the current break state, make sure the
-// debugger is loaded and switch to the debugger context. If the debugger for
-// some reason could not be entered FailedToEnter will return true.
-class EnterDebugger BASE_EMBEDDED {
- public:
- explicit EnterDebugger(Isolate* isolate);
- ~EnterDebugger();
+DECLARE_RUNTIME_FUNCTION(Debug_Break);
- // Check whether the debugger could be entered.
- inline bool FailedToEnter() { return load_failed_; }
- // Check whether there are any JavaScript frames on the stack.
- inline bool HasJavaScriptFrames() { return has_js_frames_; }
+// This scope is used to load and enter the debug context and create a new
+// break state. Leaving the scope will restore the previous state.
+// On failure to load, FailedToEnter returns true.
+class DebugScope BASE_EMBEDDED {
+ public:
+ explicit DebugScope(Debug* debug);
+ ~DebugScope();
+
+ // Check whether loading was successful.
+ inline bool failed() { return failed_; }
// Get the active context from before entering the debugger.
inline Handle<Context> GetContext() { return save_.context(); }
private:
- Isolate* isolate_;
- EnterDebugger* prev_; // Previous debugger entry if entered recursively.
- JavaScriptFrameIterator it_;
- const bool has_js_frames_; // Were there any JavaScript frames?
+ Isolate* isolate() { return debug_->isolate_; }
+
+ Debug* debug_;
+ DebugScope* prev_; // Previous scope if entered recursively.
StackFrame::Id break_frame_id_; // Previous break frame id.
- int break_id_; // Previous break id.
- bool load_failed_; // Did the debugger fail to load?
- SaveContext save_; // Saves previous context.
+ int break_id_; // Previous break id.
+ bool failed_; // Did the debug context fail to load?
+ SaveContext save_; // Saves previous context.
};
// Stack allocated class for disabling break.
class DisableBreak BASE_EMBEDDED {
public:
- explicit DisableBreak(Isolate* isolate, bool disable_break)
- : isolate_(isolate) {
- prev_disable_break_ = isolate_->debug()->disable_break();
- isolate_->debug()->set_disable_break(disable_break);
- }
- ~DisableBreak() {
- isolate_->debug()->set_disable_break(prev_disable_break_);
+ explicit DisableBreak(Debug* debug, bool disable_break)
+ : debug_(debug), old_state_(debug->break_disabled_) {
+ debug_->break_disabled_ = disable_break;
}
+ ~DisableBreak() { debug_->break_disabled_ = old_state_; }
private:
- Isolate* isolate_;
- // The previous state of the disable break used to restore the value when this
- // object is destructed.
- bool prev_disable_break_;
+ Debug* debug_;
+ bool old_state_;
+ DISALLOW_COPY_AND_ASSIGN(DisableBreak);
};
-// Debug_Address encapsulates the Address pointers used in generating debug
-// code.
-class Debug_Address {
+class SuppressDebug BASE_EMBEDDED {
public:
- explicit Debug_Address(Debug::AddressId id) : id_(id) { }
-
- static Debug_Address AfterBreakTarget() {
- return Debug_Address(Debug::k_after_break_target_address);
- }
-
- static Debug_Address DebugBreakReturn() {
- return Debug_Address(Debug::k_debug_break_return_address);
- }
-
- static Debug_Address RestarterFrameFunctionPointer() {
- return Debug_Address(Debug::k_restarter_frame_function_pointer);
- }
-
- Address address(Isolate* isolate) const {
- Debug* debug = isolate->debug();
- switch (id_) {
- case Debug::k_after_break_target_address:
- return reinterpret_cast<Address>(debug->after_break_target_address());
- case Debug::k_debug_break_return_address:
- return reinterpret_cast<Address>(debug->debug_break_return_address());
- case Debug::k_debug_break_slot_address:
- return reinterpret_cast<Address>(debug->debug_break_slot_address());
- case Debug::k_restarter_frame_function_pointer:
- return reinterpret_cast<Address>(
- debug->restarter_frame_function_pointer_address());
- default:
- UNREACHABLE();
- return NULL;
- }
+ explicit SuppressDebug(Debug* debug)
+ : debug_(debug), old_state_(debug->is_suppressed_) {
+ debug_->is_suppressed_ = true;
}
+ ~SuppressDebug() { debug_->is_suppressed_ = old_state_; }
private:
- Debug::AddressId id_;
+ Debug* debug_;
+ bool old_state_;
+ DISALLOW_COPY_AND_ASSIGN(SuppressDebug);
};
-// The optional thread that Debug Agent may use to temporary call V8 to process
-// pending debug requests if debuggee is not running V8 at the moment.
-// Techincally it does not call V8 itself, rather it asks embedding program
-// to do this via v8::Debug::HostDispatchHandler
-class MessageDispatchHelperThread: public Thread {
- public:
- explicit MessageDispatchHelperThread(Isolate* isolate);
- ~MessageDispatchHelperThread() {}
-
- void Schedule();
- private:
- void Run();
-
- Isolate* isolate_;
- Semaphore sem_;
- Mutex mutex_;
- bool already_signalled_;
+// Code generator routines.
+class DebugCodegen : public AllStatic {
+ public:
+ static void GenerateSlot(MacroAssembler* masm);
+ static void GenerateCallICStubDebugBreak(MacroAssembler* masm);
+ static void GenerateLoadICDebugBreak(MacroAssembler* masm);
+ static void GenerateStoreICDebugBreak(MacroAssembler* masm);
+ static void GenerateKeyedLoadICDebugBreak(MacroAssembler* masm);
+ static void GenerateKeyedStoreICDebugBreak(MacroAssembler* masm);
+ static void GenerateCompareNilICDebugBreak(MacroAssembler* masm);
+ static void GenerateReturnDebugBreak(MacroAssembler* masm);
+ static void GenerateCallFunctionStubDebugBreak(MacroAssembler* masm);
+ static void GenerateCallConstructStubDebugBreak(MacroAssembler* masm);
+ static void GenerateCallConstructStubRecordDebugBreak(MacroAssembler* masm);
+ static void GenerateSlotDebugBreak(MacroAssembler* masm);
+ static void GeneratePlainReturnLiveEdit(MacroAssembler* masm);
- DISALLOW_COPY_AND_ASSIGN(MessageDispatchHelperThread);
+ // FrameDropper is a code replacement for a JavaScript frame with possibly
+ // several frames above.
+ // There is no calling conventions here, because it never actually gets
+ // called, it only gets returned to.
+ static void GenerateFrameDropperLiveEdit(MacroAssembler* masm);
};
} } // namespace v8::internal
-#endif // ENABLE_DEBUGGER_SUPPORT
-
#endif // V8_DEBUG_H_
diff --git a/chromium/v8/src/default-platform.cc b/chromium/v8/src/default-platform.cc
deleted file mode 100644
index ef3c4ebd450..00000000000
--- a/chromium/v8/src/default-platform.cc
+++ /dev/null
@@ -1,56 +0,0 @@
-// Copyright 2013 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#include "default-platform.h"
-
-namespace v8 {
-namespace internal {
-
-
-DefaultPlatform::DefaultPlatform() {}
-
-
-DefaultPlatform::~DefaultPlatform() {}
-
-void DefaultPlatform::CallOnBackgroundThread(Task *task,
- ExpectedRuntime expected_runtime) {
- // TODO(jochen): implement.
- task->Run();
- delete task;
-}
-
-
-void DefaultPlatform::CallOnForegroundThread(v8::Isolate* isolate, Task* task) {
- // TODO(jochen): implement.
- task->Run();
- delete task;
-}
-
-
-} } // namespace v8::internal
diff --git a/chromium/v8/src/default-platform.h b/chromium/v8/src/default-platform.h
deleted file mode 100644
index fe1bf8e2d64..00000000000
--- a/chromium/v8/src/default-platform.h
+++ /dev/null
@@ -1,55 +0,0 @@
-// Copyright 2013 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_DEFAULT_PLATFORM_H_
-#define V8_DEFAULT_PLATFORM_H_
-
-#include "v8.h"
-
-namespace v8 {
-namespace internal {
-
-class DefaultPlatform : public Platform {
- public:
- DefaultPlatform();
- virtual ~DefaultPlatform();
-
- // v8::Platform implementation.
- virtual void CallOnBackgroundThread(
- Task *task, ExpectedRuntime expected_runtime) V8_OVERRIDE;
- virtual void CallOnForegroundThread(v8::Isolate *isolate,
- Task *task) V8_OVERRIDE;
-
- private:
- DISALLOW_COPY_AND_ASSIGN(DefaultPlatform);
-};
-
-
-} } // namespace v8::internal
-
-
-#endif // V8_DEFAULT_PLATFORM_H_
diff --git a/chromium/v8/src/deoptimizer.cc b/chromium/v8/src/deoptimizer.cc
index 6c3100a6381..2b39ff6965e 100644
--- a/chromium/v8/src/deoptimizer.cc
+++ b/chromium/v8/src/deoptimizer.cc
@@ -1,40 +1,17 @@
// Copyright 2013 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#include "accessors.h"
-#include "codegen.h"
-#include "deoptimizer.h"
-#include "disasm.h"
-#include "full-codegen.h"
-#include "global-handles.h"
-#include "macro-assembler.h"
-#include "prettyprinter.h"
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#include "src/accessors.h"
+#include "src/codegen.h"
+#include "src/deoptimizer.h"
+#include "src/disasm.h"
+#include "src/full-codegen.h"
+#include "src/global-handles.h"
+#include "src/macro-assembler.h"
+#include "src/prettyprinter.h"
namespace v8 {
@@ -56,9 +33,7 @@ static MemoryChunk* AllocateCodeChunk(MemoryAllocator* allocator) {
DeoptimizerData::DeoptimizerData(MemoryAllocator* allocator)
: allocator_(allocator),
-#ifdef ENABLE_DEBUGGER_SUPPORT
deoptimized_frame_info_(NULL),
-#endif
current_(NULL) {
for (int i = 0; i < Deoptimizer::kBailoutTypesWithCodeEntry; ++i) {
deopt_entry_code_entries_[i] = -1;
@@ -75,13 +50,11 @@ DeoptimizerData::~DeoptimizerData() {
}
-#ifdef ENABLE_DEBUGGER_SUPPORT
void DeoptimizerData::Iterate(ObjectVisitor* v) {
if (deoptimized_frame_info_ != NULL) {
deoptimized_frame_info_->Iterate(v);
}
}
-#endif
Code* Deoptimizer::FindDeoptimizingCode(Address addr) {
@@ -91,7 +64,7 @@ Code* Deoptimizer::FindDeoptimizingCode(Address addr) {
Object* element = native_context->DeoptimizedCodeListHead();
while (!element->IsUndefined()) {
Code* code = Code::cast(element);
- ASSERT(code->kind() == Code::OPTIMIZED_FUNCTION);
+ CHECK(code->kind() == Code::OPTIMIZED_FUNCTION);
if (code->contains(addr)) return code;
element = code->next_code_link();
}
@@ -115,7 +88,7 @@ Deoptimizer* Deoptimizer::New(JSFunction* function,
from,
fp_to_sp_delta,
NULL);
- ASSERT(isolate->deoptimizer_data()->current_ == NULL);
+ CHECK(isolate->deoptimizer_data()->current_ == NULL);
isolate->deoptimizer_data()->current_ = deoptimizer;
return deoptimizer;
}
@@ -137,7 +110,7 @@ size_t Deoptimizer::GetMaxDeoptTableSize() {
Deoptimizer* Deoptimizer::Grab(Isolate* isolate) {
Deoptimizer* result = isolate->deoptimizer_data()->current_;
- ASSERT(result != NULL);
+ CHECK_NE(result, NULL);
result->DeleteFrameDescriptions();
isolate->deoptimizer_data()->current_ = NULL;
return result;
@@ -160,13 +133,12 @@ int Deoptimizer::ConvertJSFrameIndexToFrameIndex(int jsframe_index) {
}
-#ifdef ENABLE_DEBUGGER_SUPPORT
DeoptimizedFrameInfo* Deoptimizer::DebuggerInspectableFrame(
JavaScriptFrame* frame,
int jsframe_index,
Isolate* isolate) {
- ASSERT(frame->is_optimized());
- ASSERT(isolate->deoptimizer_data()->deoptimized_frame_info_ == NULL);
+ CHECK(frame->is_optimized());
+ CHECK(isolate->deoptimizer_data()->deoptimized_frame_info_ == NULL);
// Get the function and code from the frame.
JSFunction* function = frame->function();
@@ -176,7 +148,7 @@ DeoptimizedFrameInfo* Deoptimizer::DebuggerInspectableFrame(
// return address must be at a place in the code with deoptimization support.
SafepointEntry safepoint_entry = code->GetSafepointEntry(frame->pc());
int deoptimization_index = safepoint_entry.deoptimization_index();
- ASSERT(deoptimization_index != Safepoint::kNoDeoptimizationIndex);
+ CHECK_NE(deoptimization_index, Safepoint::kNoDeoptimizationIndex);
// Always use the actual stack slots when calculating the fp to sp
// delta adding two for the function and context.
@@ -199,7 +171,7 @@ DeoptimizedFrameInfo* Deoptimizer::DebuggerInspectableFrame(
// Create the GC safe output frame information and register it for GC
// handling.
- ASSERT_LT(jsframe_index, deoptimizer->jsframe_count());
+ CHECK_LT(jsframe_index, deoptimizer->jsframe_count());
// Convert JS frame index into frame index.
int frame_index = deoptimizer->ConvertJSFrameIndexToFrameIndex(jsframe_index);
@@ -251,11 +223,11 @@ DeoptimizedFrameInfo* Deoptimizer::DebuggerInspectableFrame(
void Deoptimizer::DeleteDebuggerInspectableFrame(DeoptimizedFrameInfo* info,
Isolate* isolate) {
- ASSERT(isolate->deoptimizer_data()->deoptimized_frame_info_ == info);
+ CHECK_EQ(isolate->deoptimizer_data()->deoptimized_frame_info_, info);
delete info;
isolate->deoptimizer_data()->deoptimized_frame_info_ = NULL;
}
-#endif
+
void Deoptimizer::GenerateDeoptimizationEntries(MacroAssembler* masm,
int count,
@@ -269,7 +241,7 @@ void Deoptimizer::VisitAllOptimizedFunctionsForContext(
Context* context, OptimizedFunctionVisitor* visitor) {
DisallowHeapAllocation no_allocation;
- ASSERT(context->IsNativeContext());
+ CHECK(context->IsNativeContext());
visitor->EnterContext(context);
@@ -292,13 +264,13 @@ void Deoptimizer::VisitAllOptimizedFunctionsForContext(
context->SetOptimizedFunctionsListHead(next);
}
// The visitor should not alter the link directly.
- ASSERT(function->next_function_link() == next);
+ CHECK_EQ(function->next_function_link(), next);
// Set the next function link to undefined to indicate it is no longer
// in the optimized functions list.
function->set_next_function_link(context->GetHeap()->undefined_value());
} else {
// The visitor should not alter the link directly.
- ASSERT(function->next_function_link() == next);
+ CHECK_EQ(function->next_function_link(), next);
// preserve this element.
prev = function;
}
@@ -342,7 +314,6 @@ void Deoptimizer::DeoptimizeMarkedCodeForContext(Context* context) {
// Unlink this function and evict from optimized code map.
SharedFunctionInfo* shared = function->shared();
function->set_code(shared->code());
- shared->EvictFromOptimizedCodeMap(code, "deoptimized function");
if (FLAG_trace_deopt) {
CodeTracer::Scope scope(code->GetHeap()->isolate()->GetCodeTracer());
@@ -358,9 +329,41 @@ void Deoptimizer::DeoptimizeMarkedCodeForContext(Context* context) {
SelectedCodeUnlinker unlinker;
VisitAllOptimizedFunctionsForContext(context, &unlinker);
+ Isolate* isolate = context->GetHeap()->isolate();
+#ifdef DEBUG
+ Code* topmost_optimized_code = NULL;
+ bool safe_to_deopt_topmost_optimized_code = false;
+ // Make sure all activations of optimized code can deopt at their current PC.
+ // The topmost optimized code has special handling because it cannot be
+ // deoptimized due to weak object dependency.
+ for (StackFrameIterator it(isolate, isolate->thread_local_top());
+ !it.done(); it.Advance()) {
+ StackFrame::Type type = it.frame()->type();
+ if (type == StackFrame::OPTIMIZED) {
+ Code* code = it.frame()->LookupCode();
+ if (FLAG_trace_deopt) {
+ JSFunction* function =
+ static_cast<OptimizedFrame*>(it.frame())->function();
+ CodeTracer::Scope scope(isolate->GetCodeTracer());
+ PrintF(scope.file(), "[deoptimizer found activation of function: ");
+ function->PrintName(scope.file());
+ PrintF(scope.file(),
+ " / %" V8PRIxPTR "]\n", reinterpret_cast<intptr_t>(function));
+ }
+ SafepointEntry safepoint = code->GetSafepointEntry(it.frame()->pc());
+ int deopt_index = safepoint.deoptimization_index();
+ bool safe_to_deopt = deopt_index != Safepoint::kNoDeoptimizationIndex;
+ CHECK(topmost_optimized_code == NULL || safe_to_deopt);
+ if (topmost_optimized_code == NULL) {
+ topmost_optimized_code = code;
+ safe_to_deopt_topmost_optimized_code = safe_to_deopt;
+ }
+ }
+ }
+#endif
+
// Move marked code from the optimized code list to the deoptimized
// code list, collecting them into a ZoneList.
- Isolate* isolate = context->GetHeap()->isolate();
Zone zone(isolate);
ZoneList<Code*> codes(10, &zone);
@@ -369,7 +372,7 @@ void Deoptimizer::DeoptimizeMarkedCodeForContext(Context* context) {
Object* element = context->OptimizedCodeListHead();
while (!element->IsUndefined()) {
Code* code = Code::cast(element);
- ASSERT(code->kind() == Code::OPTIMIZED_FUNCTION);
+ CHECK_EQ(code->kind(), Code::OPTIMIZED_FUNCTION);
Object* next = code->next_code_link();
if (code->marked_for_deoptimization()) {
// Put the code into the list for later patching.
@@ -393,35 +396,17 @@ void Deoptimizer::DeoptimizeMarkedCodeForContext(Context* context) {
element = next;
}
-#ifdef DEBUG
- // Make sure all activations of optimized code can deopt at their current PC.
- for (StackFrameIterator it(isolate, isolate->thread_local_top());
- !it.done(); it.Advance()) {
- StackFrame::Type type = it.frame()->type();
- if (type == StackFrame::OPTIMIZED) {
- Code* code = it.frame()->LookupCode();
- if (FLAG_trace_deopt) {
- JSFunction* function =
- static_cast<OptimizedFrame*>(it.frame())->function();
- CodeTracer::Scope scope(isolate->GetCodeTracer());
- PrintF(scope.file(), "[deoptimizer patches for lazy deopt: ");
- function->PrintName(scope.file());
- PrintF(scope.file(),
- " / %" V8PRIxPTR "]\n", reinterpret_cast<intptr_t>(function));
- }
- SafepointEntry safepoint = code->GetSafepointEntry(it.frame()->pc());
- int deopt_index = safepoint.deoptimization_index();
- CHECK(deopt_index != Safepoint::kNoDeoptimizationIndex);
- }
- }
-#endif
-
// TODO(titzer): we need a handle scope only because of the macro assembler,
// which is only used in EnsureCodeForDeoptimizationEntry.
HandleScope scope(isolate);
// Now patch all the codes for deoptimization.
for (int i = 0; i < codes.length(); i++) {
+#ifdef DEBUG
+ if (codes[i] == topmost_optimized_code) {
+ ASSERT(safe_to_deopt_topmost_optimized_code);
+ }
+#endif
// It is finally time to die, code object.
// Do platform-specific patching to force any activations to lazy deopt.
PatchCodeForDeoptimization(isolate, codes[i]);
@@ -475,7 +460,7 @@ void Deoptimizer::DeoptimizeGlobalObject(JSObject* object) {
}
if (object->IsJSGlobalProxy()) {
Object* proto = object->GetPrototype();
- ASSERT(proto->IsJSGlobalObject());
+ CHECK(proto->IsJSGlobalObject());
Context* native_context = GlobalObject::cast(proto)->native_context();
MarkAllCodeForContext(native_context);
DeoptimizeMarkedCodeForContext(native_context);
@@ -491,7 +476,7 @@ void Deoptimizer::MarkAllCodeForContext(Context* context) {
Object* element = context->OptimizedCodeListHead();
while (!element->IsUndefined()) {
Code* code = Code::cast(element);
- ASSERT(code->kind() == Code::OPTIMIZED_FUNCTION);
+ CHECK_EQ(code->kind(), Code::OPTIMIZED_FUNCTION);
code->set_marked_for_deoptimization(true);
element = code->next_code_link();
}
@@ -526,7 +511,7 @@ bool Deoptimizer::TraceEnabledFor(BailoutType deopt_type,
? FLAG_trace_stub_failures
: FLAG_trace_deopt;
}
- UNREACHABLE();
+ FATAL("Unsupported deopt type");
return false;
}
@@ -538,7 +523,7 @@ const char* Deoptimizer::MessageFor(BailoutType type) {
case LAZY: return "lazy";
case DEBUGGER: return "debugger";
}
- UNREACHABLE();
+ FATAL("Unsupported deopt type");
return NULL;
}
@@ -628,7 +613,7 @@ Code* Deoptimizer::FindOptimizedCode(JSFunction* function,
ASSERT(optimized_code->contains(from_));
return optimized_code;
}
- UNREACHABLE();
+ FATAL("Could not find code for optimized function");
return NULL;
}
@@ -671,15 +656,15 @@ Address Deoptimizer::GetDeoptimizationEntry(Isolate* isolate,
int id,
BailoutType type,
GetEntryMode mode) {
- ASSERT(id >= 0);
+ CHECK_GE(id, 0);
if (id >= kMaxNumberOfEntries) return NULL;
if (mode == ENSURE_ENTRY_CODE) {
EnsureCodeForDeoptimizationEntry(isolate, type, id);
} else {
- ASSERT(mode == CALCULATE_ENTRY_ADDRESS);
+ CHECK_EQ(mode, CALCULATE_ENTRY_ADDRESS);
}
DeoptimizerData* data = isolate->deoptimizer_data();
- ASSERT(type < kBailoutTypesWithCodeEntry);
+ CHECK_LT(type, kBailoutTypesWithCodeEntry);
MemoryChunk* base = data->deopt_entry_code_[type];
return base->area_start() + (id * table_entry_size_);
}
@@ -715,12 +700,12 @@ int Deoptimizer::GetOutputInfo(DeoptimizationOutputData* data,
}
}
PrintF(stderr, "[couldn't find pc offset for node=%d]\n", id.ToInt());
- PrintF(stderr, "[method: %s]\n", *shared->DebugName()->ToCString());
+ PrintF(stderr, "[method: %s]\n", shared->DebugName()->ToCString().get());
// Print the source code if available.
HeapStringAllocator string_allocator;
StringStream stream(&string_allocator);
shared->SourceCodePrint(&stream, -1);
- PrintF(stderr, "[source:\n%s\n]", *stream.ToCString());
+ PrintF(stderr, "[source:\n%s\n]", stream.ToCString().get());
FATAL("unable to find pc offset during deoptimization");
return -1;
@@ -755,6 +740,12 @@ void Deoptimizer::DoComputeOutputFrames() {
LOG(isolate(), CodeDeoptEvent(compiled_code_));
}
ElapsedTimer timer;
+
+ // Determine basic deoptimization information. The optimized frame is
+ // described by the input data.
+ DeoptimizationInputData* input_data =
+ DeoptimizationInputData::cast(compiled_code_->deoptimization_data());
+
if (trace_scope_ != NULL) {
timer.Start();
PrintF(trace_scope_->file(),
@@ -763,7 +754,8 @@ void Deoptimizer::DoComputeOutputFrames() {
reinterpret_cast<intptr_t>(function_));
PrintFunctionName();
PrintF(trace_scope_->file(),
- " @%d, FP to SP delta: %d]\n",
+ " (opt #%d) @%d, FP to SP delta: %d]\n",
+ input_data->OptimizationId()->value(),
bailout_id_,
fp_to_sp_delta_);
if (bailout_type_ == EAGER || bailout_type_ == SOFT) {
@@ -771,10 +763,6 @@ void Deoptimizer::DoComputeOutputFrames() {
}
}
- // Determine basic deoptimization information. The optimized frame is
- // described by the input data.
- DeoptimizationInputData* input_data =
- DeoptimizationInputData::cast(compiled_code_->deoptimization_data());
BailoutId node_id = input_data->AstId(bailout_id_);
ByteArray* translations = input_data->TranslationByteArray();
unsigned translation_index =
@@ -797,6 +785,11 @@ void Deoptimizer::DoComputeOutputFrames() {
}
output_count_ = count;
+ Register fp_reg = JavaScriptFrame::fp_register();
+ stack_fp_ = reinterpret_cast<Address>(
+ input_->GetRegister(fp_reg.code()) +
+ has_alignment_padding_ * kPointerSize);
+
// Translate each output frame.
for (int i = 0; i < count; ++i) {
// Read the ast node id, function, and frame height for this output frame.
@@ -834,7 +827,7 @@ void Deoptimizer::DoComputeOutputFrames() {
case Translation::LITERAL:
case Translation::ARGUMENTS_OBJECT:
default:
- UNREACHABLE();
+ FATAL("Unsupported translation");
break;
}
}
@@ -873,7 +866,7 @@ void Deoptimizer::DoComputeJSFrame(TranslationIterator* iterator,
} else {
int closure_id = iterator->Next();
USE(closure_id);
- ASSERT_EQ(Translation::kSelfLiteralId, closure_id);
+ CHECK_EQ(Translation::kSelfLiteralId, closure_id);
function = function_;
}
unsigned height = iterator->Next();
@@ -898,8 +891,8 @@ void Deoptimizer::DoComputeJSFrame(TranslationIterator* iterator,
bool is_bottommost = (0 == frame_index);
bool is_topmost = (output_count_ - 1 == frame_index);
- ASSERT(frame_index >= 0 && frame_index < output_count_);
- ASSERT(output_[frame_index] == NULL);
+ CHECK(frame_index >= 0 && frame_index < output_count_);
+ CHECK_EQ(output_[frame_index], NULL);
output_[frame_index] = output_frame;
// The top address for the bottommost output frame can be computed from
@@ -983,6 +976,25 @@ void Deoptimizer::DoComputeJSFrame(TranslationIterator* iterator,
ASSERT(!is_bottommost || !has_alignment_padding_ ||
(fp_value & kPointerSize) != 0);
+ if (FLAG_enable_ool_constant_pool) {
+ // For the bottommost output frame the constant pool pointer can be gotten
+ // from the input frame. For subsequent output frames, it can be read from
+ // the previous frame.
+ output_offset -= kPointerSize;
+ input_offset -= kPointerSize;
+ if (is_bottommost) {
+ value = input_->GetFrameSlot(input_offset);
+ } else {
+ value = output_[frame_index - 1]->GetConstantPool();
+ }
+ output_frame->SetCallerConstantPool(output_offset, value);
+ if (trace_scope_) {
+ PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
+ V8PRIxPTR "; caller's constant_pool\n",
+ top_address + output_offset, output_offset, value);
+ }
+ }
+
// For the bottommost output frame the context can be gotten from the input
// frame. For all subsequent output frames it can be gotten from the function
// so long as we don't inline functions that need local contexts.
@@ -1024,7 +1036,7 @@ void Deoptimizer::DoComputeJSFrame(TranslationIterator* iterator,
output_offset -= kPointerSize;
DoTranslateCommand(iterator, frame_index, output_offset);
}
- ASSERT(0 == output_offset);
+ CHECK_EQ(0, output_offset);
// Compute this frame's PC, state, and continuation.
Code* non_optimized_code = function->shared()->code();
@@ -1036,6 +1048,18 @@ void Deoptimizer::DoComputeJSFrame(TranslationIterator* iterator,
intptr_t pc_value = reinterpret_cast<intptr_t>(start + pc_offset);
output_frame->SetPc(pc_value);
+ // Update constant pool.
+ if (FLAG_enable_ool_constant_pool) {
+ intptr_t constant_pool_value =
+ reinterpret_cast<intptr_t>(non_optimized_code->constant_pool());
+ output_frame->SetConstantPool(constant_pool_value);
+ if (is_topmost) {
+ Register constant_pool_reg =
+ JavaScriptFrame::constant_pool_pointer_register();
+ output_frame->SetRegister(constant_pool_reg.code(), constant_pool_value);
+ }
+ }
+
FullCodeGenerator::State state =
FullCodeGenerator::StateField::decode(pc_and_state);
output_frame->SetState(Smi::FromInt(state));
@@ -1049,7 +1073,7 @@ void Deoptimizer::DoComputeJSFrame(TranslationIterator* iterator,
} else if (bailout_type_ == SOFT) {
continuation = builtins->builtin(Builtins::kNotifySoftDeoptimized);
} else {
- ASSERT(bailout_type_ == EAGER);
+ CHECK_EQ(bailout_type_, EAGER);
}
output_frame->SetContinuation(
reinterpret_cast<intptr_t>(continuation->entry()));
@@ -1076,8 +1100,8 @@ void Deoptimizer::DoComputeArgumentsAdaptorFrame(TranslationIterator* iterator,
output_frame->SetFrameType(StackFrame::ARGUMENTS_ADAPTOR);
// Arguments adaptor can not be topmost or bottommost.
- ASSERT(frame_index > 0 && frame_index < output_count_ - 1);
- ASSERT(output_[frame_index] == NULL);
+ CHECK(frame_index > 0 && frame_index < output_count_ - 1);
+ CHECK(output_[frame_index] == NULL);
output_[frame_index] = output_frame;
// The top address of the frame is computed from the previous
@@ -1118,6 +1142,18 @@ void Deoptimizer::DoComputeArgumentsAdaptorFrame(TranslationIterator* iterator,
fp_value, output_offset, value);
}
+ if (FLAG_enable_ool_constant_pool) {
+ // Read the caller's constant pool from the previous frame.
+ output_offset -= kPointerSize;
+ value = output_[frame_index - 1]->GetConstantPool();
+ output_frame->SetCallerConstantPool(output_offset, value);
+ if (trace_scope_) {
+ PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
+ V8PRIxPTR "; caller's constant_pool\n",
+ top_address + output_offset, output_offset, value);
+ }
+ }
+
// A marker value is used in place of the context.
output_offset -= kPointerSize;
intptr_t context = reinterpret_cast<intptr_t>(
@@ -1161,6 +1197,11 @@ void Deoptimizer::DoComputeArgumentsAdaptorFrame(TranslationIterator* iterator,
adaptor_trampoline->instruction_start() +
isolate_->heap()->arguments_adaptor_deopt_pc_offset()->value());
output_frame->SetPc(pc_value);
+ if (FLAG_enable_ool_constant_pool) {
+ intptr_t constant_pool_value =
+ reinterpret_cast<intptr_t>(adaptor_trampoline->constant_pool());
+ output_frame->SetConstantPool(constant_pool_value);
+ }
}
@@ -1206,7 +1247,7 @@ void Deoptimizer::DoComputeConstructStubFrame(TranslationIterator* iterator,
// receiver parameter through the translation. It might be encoding
// a captured object, patch the slot address for a captured object.
if (i == 0 && deferred_objects_.length() > deferred_object_index) {
- ASSERT(!deferred_objects_[deferred_object_index].is_arguments());
+ CHECK(!deferred_objects_[deferred_object_index].is_arguments());
deferred_objects_[deferred_object_index].patch_slot_address(top_address);
}
}
@@ -1235,6 +1276,18 @@ void Deoptimizer::DoComputeConstructStubFrame(TranslationIterator* iterator,
fp_value, output_offset, value);
}
+ if (FLAG_enable_ool_constant_pool) {
+ // Read the caller's constant pool from the previous frame.
+ output_offset -= kPointerSize;
+ value = output_[frame_index - 1]->GetConstantPool();
+ output_frame->SetCallerConstantPool(output_offset, value);
+ if (trace_scope_) {
+ PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
+ V8PRIxPTR " ; caller's constant pool\n",
+ top_address + output_offset, output_offset, value);
+ }
+ }
+
// The context can be gotten from the previous frame.
output_offset -= kPointerSize;
value = output_[frame_index - 1]->GetContext();
@@ -1305,12 +1358,17 @@ void Deoptimizer::DoComputeConstructStubFrame(TranslationIterator* iterator,
top_address + output_offset, output_offset, value);
}
- ASSERT(0 == output_offset);
+ CHECK_EQ(0, output_offset);
intptr_t pc = reinterpret_cast<intptr_t>(
construct_stub->instruction_start() +
isolate_->heap()->construct_stub_deopt_pc_offset()->value());
output_frame->SetPc(pc);
+ if (FLAG_enable_ool_constant_pool) {
+ intptr_t constant_pool_value =
+ reinterpret_cast<intptr_t>(construct_stub->constant_pool());
+ output_frame->SetConstantPool(constant_pool_value);
+ }
}
@@ -1330,10 +1388,10 @@ void Deoptimizer::DoComputeAccessorStubFrame(TranslationIterator* iterator,
}
// We need 1 stack entry for the return address and enough entries for the
- // StackFrame::INTERNAL (FP, context, frame type and code object - see
- // MacroAssembler::EnterFrame). For a setter stub frame we need one additional
- // entry for the implicit return value, see
- // StoreStubCompiler::CompileStoreViaSetter.
+ // StackFrame::INTERNAL (FP, context, frame type, code object and constant
+ // pool (if FLAG_enable_ool_constant_pool)- see MacroAssembler::EnterFrame).
+ // For a setter stub frame we need one additional entry for the implicit
+ // return value, see StoreStubCompiler::CompileStoreViaSetter.
unsigned fixed_frame_entries =
(StandardFrameConstants::kFixedFrameSize / kPointerSize) + 1 +
(is_setter_stub_frame ? 1 : 0);
@@ -1346,8 +1404,8 @@ void Deoptimizer::DoComputeAccessorStubFrame(TranslationIterator* iterator,
output_frame->SetFrameType(StackFrame::INTERNAL);
// A frame for an accessor stub can not be the topmost or bottommost one.
- ASSERT(frame_index > 0 && frame_index < output_count_ - 1);
- ASSERT(output_[frame_index] == NULL);
+ CHECK(frame_index > 0 && frame_index < output_count_ - 1);
+ CHECK_EQ(output_[frame_index], NULL);
output_[frame_index] = output_frame;
// The top address of the frame is computed from the previous frame's top and
@@ -1381,6 +1439,18 @@ void Deoptimizer::DoComputeAccessorStubFrame(TranslationIterator* iterator,
fp_value, output_offset, value);
}
+ if (FLAG_enable_ool_constant_pool) {
+ // Read the caller's constant pool from the previous frame.
+ output_offset -= kPointerSize;
+ value = output_[frame_index - 1]->GetConstantPool();
+ output_frame->SetCallerConstantPool(output_offset, value);
+ if (trace_scope_) {
+ PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
+ V8PRIxPTR " ; caller's constant pool\n",
+ top_address + output_offset, output_offset, value);
+ }
+ }
+
// The context can be gotten from the previous frame.
output_offset -= kPointerSize;
value = output_[frame_index - 1]->GetContext();
@@ -1419,9 +1489,7 @@ void Deoptimizer::DoComputeAccessorStubFrame(TranslationIterator* iterator,
}
// Skip receiver.
- Translation::Opcode opcode =
- static_cast<Translation::Opcode>(iterator->Next());
- iterator->Skip(Translation::NumberOfOperandsFor(opcode));
+ DoTranslateObjectAndSkip(iterator);
if (is_setter_stub_frame) {
// The implicit return value was part of the artificial setter stub
@@ -1430,7 +1498,7 @@ void Deoptimizer::DoComputeAccessorStubFrame(TranslationIterator* iterator,
DoTranslateCommand(iterator, frame_index, output_offset);
}
- ASSERT(0 == output_offset);
+ CHECK_EQ(output_offset, 0);
Smi* offset = is_setter_stub_frame ?
isolate_->heap()->setter_stub_deopt_pc_offset() :
@@ -1438,6 +1506,11 @@ void Deoptimizer::DoComputeAccessorStubFrame(TranslationIterator* iterator,
intptr_t pc = reinterpret_cast<intptr_t>(
accessor_stub->instruction_start() + offset->value());
output_frame->SetPc(pc);
+ if (FLAG_enable_ool_constant_pool) {
+ intptr_t constant_pool_value =
+ reinterpret_cast<intptr_t>(accessor_stub->constant_pool());
+ output_frame->SetConstantPool(constant_pool_value);
+ }
}
@@ -1451,6 +1524,8 @@ void Deoptimizer::DoComputeCompiledStubFrame(TranslationIterator* iterator,
// +-------------------------+ +-------------------------+
// | | saved frame (FP) | | saved frame (FP) |
// | +=========================+<-fpreg +=========================+<-fpreg
+ // | |constant pool (if ool_cp)| |constant pool (if ool_cp)|
+ // | +-------------------------+ +-------------------------|
// | | JSFunction context | | JSFunction context |
// v +-------------------------+ +-------------------------|
// | COMPILED_STUB marker | | STUB_FAILURE marker |
@@ -1473,8 +1548,8 @@ void Deoptimizer::DoComputeCompiledStubFrame(TranslationIterator* iterator,
// reg = JSFunction context
//
- ASSERT(compiled_code_->is_crankshafted() &&
- compiled_code_->kind() != Code::OPTIMIZED_FUNCTION);
+ CHECK(compiled_code_->is_crankshafted() &&
+ compiled_code_->kind() != Code::OPTIMIZED_FUNCTION);
int major_key = compiled_code_->major_key();
CodeStubInterfaceDescriptor* descriptor =
isolate_->code_stub_interface_descriptor(major_key);
@@ -1483,7 +1558,7 @@ void Deoptimizer::DoComputeCompiledStubFrame(TranslationIterator* iterator,
// and the standard stack frame slots. Include space for an argument
// object to the callee and optionally the space to pass the argument
// object to the stub failure handler.
- ASSERT(descriptor->register_param_count_ >= 0);
+ CHECK_GE(descriptor->register_param_count_, 0);
int height_in_bytes = kPointerSize * descriptor->register_param_count_ +
sizeof(Arguments) + kPointerSize;
int fixed_frame_size = StandardFrameConstants::kFixedFrameSize;
@@ -1491,9 +1566,8 @@ void Deoptimizer::DoComputeCompiledStubFrame(TranslationIterator* iterator,
int output_frame_size = height_in_bytes + fixed_frame_size;
if (trace_scope_ != NULL) {
PrintF(trace_scope_->file(),
- " translating %s => StubFailure%sTrampolineStub, height=%d\n",
+ " translating %s => StubFailureTrampolineStub, height=%d\n",
CodeStub::MajorName(static_cast<CodeStub::Major>(major_key), false),
- descriptor->HasTailCallContinuation() ? "TailCall" : "",
height_in_bytes);
}
@@ -1501,7 +1575,7 @@ void Deoptimizer::DoComputeCompiledStubFrame(TranslationIterator* iterator,
FrameDescription* output_frame =
new(output_frame_size) FrameDescription(output_frame_size, NULL);
output_frame->SetFrameType(StackFrame::STUB_FAILURE_TRAMPOLINE);
- ASSERT(frame_index == 0);
+ CHECK_EQ(frame_index, 0);
output_[frame_index] = output_frame;
// The top address for the output frame can be computed from the input
@@ -1539,6 +1613,19 @@ void Deoptimizer::DoComputeCompiledStubFrame(TranslationIterator* iterator,
top_address + output_frame_offset, output_frame_offset, value);
}
+ if (FLAG_enable_ool_constant_pool) {
+ // Read the caller's constant pool from the input frame.
+ input_frame_offset -= kPointerSize;
+ value = input_->GetFrameSlot(input_frame_offset);
+ output_frame_offset -= kPointerSize;
+ output_frame->SetCallerConstantPool(output_frame_offset, value);
+ if (trace_scope_) {
+ PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
+ V8PRIxPTR " ; caller's constant_pool\n",
+ top_address + output_frame_offset, output_frame_offset, value);
+ }
+ }
+
// The context can be gotten from the input frame.
Register context_reg = StubFailureTrampolineFrame::context_register();
input_frame_offset -= kPointerSize;
@@ -1546,7 +1633,7 @@ void Deoptimizer::DoComputeCompiledStubFrame(TranslationIterator* iterator,
output_frame->SetRegister(context_reg.code(), value);
output_frame_offset -= kPointerSize;
output_frame->SetFrameSlot(output_frame_offset, value);
- ASSERT(reinterpret_cast<Object*>(value)->IsContext());
+ CHECK(reinterpret_cast<Object*>(value)->IsContext());
if (trace_scope_ != NULL) {
PrintF(trace_scope_->file(),
" 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
@@ -1566,8 +1653,7 @@ void Deoptimizer::DoComputeCompiledStubFrame(TranslationIterator* iterator,
top_address + output_frame_offset, output_frame_offset, value);
}
- intptr_t caller_arg_count = descriptor->HasTailCallContinuation()
- ? compiled_code_->arguments_count() + 1 : 0;
+ intptr_t caller_arg_count = 0;
bool arg_count_known = !descriptor->stack_parameter_count_.is_valid();
// Build the Arguments object for the caller's parameters and a pointer to it.
@@ -1625,10 +1711,10 @@ void Deoptimizer::DoComputeCompiledStubFrame(TranslationIterator* iterator,
}
}
- ASSERT(0 == output_frame_offset);
+ CHECK_EQ(output_frame_offset, 0);
if (!arg_count_known) {
- ASSERT(arguments_length_offset >= 0);
+ CHECK_GE(arguments_length_offset, 0);
// We know it's a smi because 1) the code stub guarantees the stack
// parameter count is in smi range, and 2) the DoTranslateCommand in the
// parameter loop above translated that to a tagged value.
@@ -1663,18 +1749,23 @@ void Deoptimizer::DoComputeCompiledStubFrame(TranslationIterator* iterator,
// Compute this frame's PC, state, and continuation.
Code* trampoline = NULL;
- if (descriptor->HasTailCallContinuation()) {
- StubFailureTailCallTrampolineStub().FindCodeInCache(&trampoline, isolate_);
- } else {
- StubFunctionMode function_mode = descriptor->function_mode_;
- StubFailureTrampolineStub(function_mode).FindCodeInCache(&trampoline,
- isolate_);
- }
+ StubFunctionMode function_mode = descriptor->function_mode_;
+ StubFailureTrampolineStub(isolate_,
+ function_mode).FindCodeInCache(&trampoline);
ASSERT(trampoline != NULL);
output_frame->SetPc(reinterpret_cast<intptr_t>(
trampoline->instruction_start()));
+ if (FLAG_enable_ool_constant_pool) {
+ Register constant_pool_reg =
+ StubFailureTrampolineFrame::constant_pool_pointer_register();
+ intptr_t constant_pool_value =
+ reinterpret_cast<intptr_t>(trampoline->constant_pool());
+ output_frame->SetConstantPool(constant_pool_value);
+ output_frame->SetRegister(constant_pool_reg.code(), constant_pool_value);
+ }
output_frame->SetState(Smi::FromInt(FullCodeGenerator::NO_REGISTERS));
- Code* notify_failure = NotifyStubFailureBuiltin();
+ Code* notify_failure =
+ isolate_->builtins()->builtin(Builtins::kNotifyStubFailureSaveDoubles);
output_frame->SetContinuation(
reinterpret_cast<intptr_t>(notify_failure->entry()));
}
@@ -1696,7 +1787,11 @@ Handle<Object> Deoptimizer::MaterializeNextHeapObject() {
Handle<JSObject> arguments = Handle<JSObject>::cast(
Accessors::FunctionGetArguments(function));
materialized_objects_->Add(arguments);
- materialization_value_index_ += length;
+ // To keep consistent object counters, we still materialize the
+ // nested values (but we throw them away).
+ for (int i = 0; i < length; ++i) {
+ MaterializeNextValue();
+ }
} else if (desc.is_arguments()) {
// Construct an arguments object and copy the parameters to a newly
// allocated arguments object backing store.
@@ -1704,7 +1799,7 @@ Handle<Object> Deoptimizer::MaterializeNextHeapObject() {
Handle<JSObject> arguments =
isolate_->factory()->NewArgumentsObject(function, length);
Handle<FixedArray> array = isolate_->factory()->NewFixedArray(length);
- ASSERT(array->length() == length);
+ ASSERT_EQ(array->length(), length);
arguments->set_elements(*array);
materialized_objects_->Add(arguments);
for (int i = 0; i < length; ++i) {
@@ -1716,34 +1811,50 @@ Handle<Object> Deoptimizer::MaterializeNextHeapObject() {
// We also need to make sure that the representation of all fields
// in the given object are general enough to hold a tagged value.
Handle<Map> map = Map::GeneralizeAllFieldRepresentations(
- Handle<Map>::cast(MaterializeNextValue()), Representation::Tagged());
+ Handle<Map>::cast(MaterializeNextValue()));
switch (map->instance_type()) {
case HEAP_NUMBER_TYPE: {
// Reuse the HeapNumber value directly as it is already properly
// tagged and skip materializing the HeapNumber explicitly.
Handle<Object> object = MaterializeNextValue();
- materialized_objects_->Add(object);
+ if (object_index < prev_materialized_count_) {
+ materialized_objects_->Add(Handle<Object>(
+ previously_materialized_objects_->get(object_index), isolate_));
+ } else {
+ materialized_objects_->Add(object);
+ }
materialization_value_index_ += kDoubleSize / kPointerSize - 1;
break;
}
case JS_OBJECT_TYPE: {
Handle<JSObject> object =
isolate_->factory()->NewJSObjectFromMap(map, NOT_TENURED, false);
- materialized_objects_->Add(object);
+ if (object_index < prev_materialized_count_) {
+ materialized_objects_->Add(Handle<Object>(
+ previously_materialized_objects_->get(object_index), isolate_));
+ } else {
+ materialized_objects_->Add(object);
+ }
Handle<Object> properties = MaterializeNextValue();
Handle<Object> elements = MaterializeNextValue();
object->set_properties(FixedArray::cast(*properties));
object->set_elements(FixedArrayBase::cast(*elements));
for (int i = 0; i < length - 3; ++i) {
Handle<Object> value = MaterializeNextValue();
- object->FastPropertyAtPut(i, *value);
+ FieldIndex index = FieldIndex::ForPropertyIndex(object->map(), i);
+ object->FastPropertyAtPut(index, *value);
}
break;
}
case JS_ARRAY_TYPE: {
Handle<JSArray> object =
isolate_->factory()->NewJSArray(0, map->elements_kind());
- materialized_objects_->Add(object);
+ if (object_index < prev_materialized_count_) {
+ materialized_objects_->Add(Handle<Object>(
+ previously_materialized_objects_->get(object_index), isolate_));
+ } else {
+ materialized_objects_->Add(object);
+ }
Handle<Object> properties = MaterializeNextValue();
Handle<Object> elements = MaterializeNextValue();
Handle<Object> length = MaterializeNextValue();
@@ -1755,7 +1866,7 @@ Handle<Object> Deoptimizer::MaterializeNextHeapObject() {
default:
PrintF(stderr,
"[couldn't handle instance type %d]\n", map->instance_type());
- UNREACHABLE();
+ FATAL("Unsupported instance type");
}
}
@@ -1776,6 +1887,12 @@ Handle<Object> Deoptimizer::MaterializeNextValue() {
void Deoptimizer::MaterializeHeapObjects(JavaScriptFrameIterator* it) {
ASSERT_NE(DEBUGGER, bailout_type_);
+ MaterializedObjectStore* materialized_store =
+ isolate_->materialized_object_store();
+ previously_materialized_objects_ = materialized_store->Get(stack_fp_);
+ prev_materialized_count_ = previously_materialized_objects_.is_null() ?
+ 0 : previously_materialized_objects_->length();
+
// Walk all JavaScript output frames with the given frame iterator.
for (int frame_index = 0; frame_index < jsframe_count(); ++frame_index) {
if (frame_index != 0) it->Advance();
@@ -1843,7 +1960,9 @@ void Deoptimizer::MaterializeHeapObjects(JavaScriptFrameIterator* it) {
// materialize a new instance of the object if necessary. Store
// the materialized object into the frame slot.
Handle<Object> object = MaterializeNextHeapObject();
- Memory::Object_at(descriptor.slot_address()) = *object;
+ if (descriptor.slot_address() != NULL) {
+ Memory::Object_at(descriptor.slot_address()) = *object;
+ }
if (trace_scope_ != NULL) {
if (descriptor.is_arguments()) {
PrintF(trace_scope_->file(),
@@ -1862,20 +1981,23 @@ void Deoptimizer::MaterializeHeapObjects(JavaScriptFrameIterator* it) {
}
}
- ASSERT(materialization_object_index_ == materialized_objects_->length());
- ASSERT(materialization_value_index_ == materialized_values_->length());
+ CHECK_EQ(materialization_object_index_, materialized_objects_->length());
+ CHECK_EQ(materialization_value_index_, materialized_values_->length());
+ }
+
+ if (prev_materialized_count_ > 0) {
+ materialized_store->Remove(stack_fp_);
}
}
-#ifdef ENABLE_DEBUGGER_SUPPORT
void Deoptimizer::MaterializeHeapNumbersForDebuggerInspectableFrame(
Address parameters_top,
uint32_t parameters_size,
Address expressions_top,
uint32_t expressions_size,
DeoptimizedFrameInfo* info) {
- ASSERT_EQ(DEBUGGER, bailout_type_);
+ CHECK_EQ(DEBUGGER, bailout_type_);
Address parameters_bottom = parameters_top + parameters_size;
Address expressions_bottom = expressions_top + expressions_size;
for (int i = 0; i < deferred_heap_numbers_.length(); i++) {
@@ -1921,7 +2043,6 @@ void Deoptimizer::MaterializeHeapNumbersForDebuggerInspectableFrame(
}
}
}
-#endif
static const char* TraceValueType(bool is_smi) {
@@ -1933,6 +2054,73 @@ static const char* TraceValueType(bool is_smi) {
}
+void Deoptimizer::DoTranslateObjectAndSkip(TranslationIterator* iterator) {
+ Translation::Opcode opcode =
+ static_cast<Translation::Opcode>(iterator->Next());
+
+ switch (opcode) {
+ case Translation::BEGIN:
+ case Translation::JS_FRAME:
+ case Translation::ARGUMENTS_ADAPTOR_FRAME:
+ case Translation::CONSTRUCT_STUB_FRAME:
+ case Translation::GETTER_STUB_FRAME:
+ case Translation::SETTER_STUB_FRAME:
+ case Translation::COMPILED_STUB_FRAME: {
+ FATAL("Unexpected frame start translation opcode");
+ return;
+ }
+
+ case Translation::REGISTER:
+ case Translation::INT32_REGISTER:
+ case Translation::UINT32_REGISTER:
+ case Translation::DOUBLE_REGISTER:
+ case Translation::STACK_SLOT:
+ case Translation::INT32_STACK_SLOT:
+ case Translation::UINT32_STACK_SLOT:
+ case Translation::DOUBLE_STACK_SLOT:
+ case Translation::LITERAL: {
+ // The value is not part of any materialized object, so we can ignore it.
+ iterator->Skip(Translation::NumberOfOperandsFor(opcode));
+ return;
+ }
+
+ case Translation::DUPLICATED_OBJECT: {
+ int object_index = iterator->Next();
+ if (trace_scope_ != NULL) {
+ PrintF(trace_scope_->file(), " skipping object ");
+ PrintF(trace_scope_->file(),
+ " ; duplicate of object #%d\n", object_index);
+ }
+ AddObjectDuplication(0, object_index);
+ return;
+ }
+
+ case Translation::ARGUMENTS_OBJECT:
+ case Translation::CAPTURED_OBJECT: {
+ int length = iterator->Next();
+ bool is_args = opcode == Translation::ARGUMENTS_OBJECT;
+ if (trace_scope_ != NULL) {
+ PrintF(trace_scope_->file(), " skipping object ");
+ PrintF(trace_scope_->file(),
+ " ; object (length = %d, is_args = %d)\n", length, is_args);
+ }
+
+ AddObjectStart(0, length, is_args);
+
+ // We save the object values on the side and materialize the actual
+ // object after the deoptimized frame is built.
+ int object_index = deferred_objects_.length() - 1;
+ for (int i = 0; i < length; i++) {
+ DoTranslateObject(iterator, object_index, i);
+ }
+ return;
+ }
+ }
+
+ FATAL("Unexpected translation opcode");
+}
+
+
void Deoptimizer::DoTranslateObject(TranslationIterator* iterator,
int object_index,
int field_index) {
@@ -1950,7 +2138,7 @@ void Deoptimizer::DoTranslateObject(TranslationIterator* iterator,
case Translation::GETTER_STUB_FRAME:
case Translation::SETTER_STUB_FRAME:
case Translation::COMPILED_STUB_FRAME:
- UNREACHABLE();
+ FATAL("Unexpected frame start translation opcode");
return;
case Translation::REGISTER: {
@@ -2190,6 +2378,8 @@ void Deoptimizer::DoTranslateObject(TranslationIterator* iterator,
return;
}
}
+
+ FATAL("Unexpected translation opcode");
}
@@ -2211,7 +2401,7 @@ void Deoptimizer::DoTranslateCommand(TranslationIterator* iterator,
case Translation::GETTER_STUB_FRAME:
case Translation::SETTER_STUB_FRAME:
case Translation::COMPILED_STUB_FRAME:
- UNREACHABLE();
+ FATAL("Unexpected translation opcode");
return;
case Translation::REGISTER: {
@@ -2479,17 +2669,15 @@ void Deoptimizer::DoTranslateCommand(TranslationIterator* iterator,
unsigned Deoptimizer::ComputeInputFrameSize() const {
unsigned fixed_size = ComputeFixedSize(function_);
- // The fp-to-sp delta already takes the context and the function
- // into account so we have to avoid double counting them.
+ // The fp-to-sp delta already takes the context, constant pool pointer and the
+ // function into account so we have to avoid double counting them.
unsigned result = fixed_size + fp_to_sp_delta_ -
StandardFrameConstants::kFixedFrameSizeFromFp;
-#ifdef DEBUG
if (compiled_code_->kind() == Code::OPTIMIZED_FUNCTION) {
unsigned stack_slots = compiled_code_->stack_slots();
unsigned outgoing_size = ComputeOutgoingArgumentSize();
- ASSERT(result == fixed_size + (stack_slots * kPointerSize) + outgoing_size);
+ CHECK(result == fixed_size + (stack_slots * kPointerSize) + outgoing_size);
}
-#endif
return result;
}
@@ -2506,7 +2694,7 @@ unsigned Deoptimizer::ComputeIncomingArgumentSize(JSFunction* function) const {
// The incoming arguments is the values for formal parameters and
// the receiver. Every slot contains a pointer.
if (function->IsSmi()) {
- ASSERT(Smi::cast(function) == Smi::FromInt(StackFrame::STUB));
+ CHECK_EQ(Smi::cast(function), Smi::FromInt(StackFrame::STUB));
return 0;
}
unsigned arguments = function->shared()->formal_parameter_count() + 1;
@@ -2571,13 +2759,13 @@ void Deoptimizer::EnsureCodeForDeoptimizationEntry(Isolate* isolate,
// cause us to emit relocation information for the external
// references. This is fine because the deoptimizer's code section
// isn't meant to be serialized at all.
- ASSERT(type == EAGER || type == SOFT || type == LAZY);
+ CHECK(type == EAGER || type == SOFT || type == LAZY);
DeoptimizerData* data = isolate->deoptimizer_data();
int entry_count = data->deopt_entry_code_entries_[type];
if (max_entry_id < entry_count) return;
entry_count = Max(entry_count, Deoptimizer::kMinNumberOfEntries);
while (max_entry_id >= entry_count) entry_count *= 2;
- ASSERT(entry_count <= Deoptimizer::kMaxNumberOfEntries);
+ CHECK(entry_count <= Deoptimizer::kMaxNumberOfEntries);
MacroAssembler masm(isolate, NULL, 16 * KB);
masm.set_emit_debug_code(false);
@@ -2587,8 +2775,8 @@ void Deoptimizer::EnsureCodeForDeoptimizationEntry(Isolate* isolate,
ASSERT(!RelocInfo::RequiresRelocation(desc));
MemoryChunk* chunk = data->deopt_entry_code_[type];
- ASSERT(static_cast<int>(Deoptimizer::GetMaxDeoptTableSize()) >=
- desc.instr_size);
+ CHECK(static_cast<int>(Deoptimizer::GetMaxDeoptTableSize()) >=
+ desc.instr_size);
chunk->CommitArea(desc.instr_size);
CopyBytes(chunk->area_start(), desc.buffer,
static_cast<size_t>(desc.instr_size));
@@ -2605,9 +2793,13 @@ FrameDescription::FrameDescription(uint32_t frame_size,
top_(kZapUint32),
pc_(kZapUint32),
fp_(kZapUint32),
- context_(kZapUint32) {
+ context_(kZapUint32),
+ constant_pool_(kZapUint32) {
// Zap all the registers.
for (int r = 0; r < Register::kNumRegisters; r++) {
+ // TODO(jbramley): It isn't safe to use kZapUint32 here. If the register
+ // isn't used before the next safepoint, the GC will try to scan it as a
+ // tagged value. kZapUint32 looks like a valid tagged pointer, but it isn't.
SetRegister(r, kZapUint32);
}
@@ -2651,15 +2843,15 @@ int FrameDescription::ComputeParametersCount() {
case StackFrame::STUB:
return -1; // Minus receiver.
default:
- UNREACHABLE();
+ FATAL("Unexpected stack frame type");
return 0;
}
}
Object* FrameDescription::GetParameter(int index) {
- ASSERT(index >= 0);
- ASSERT(index < ComputeParametersCount());
+ CHECK_GE(index, 0);
+ CHECK_LT(index, ComputeParametersCount());
// The slot indexes for incoming arguments are negative.
unsigned offset = GetOffsetFromSlotIndex(index - ComputeParametersCount());
return reinterpret_cast<Object*>(*GetFrameSlotPointer(offset));
@@ -2667,7 +2859,7 @@ Object* FrameDescription::GetParameter(int index) {
unsigned FrameDescription::GetExpressionCount() {
- ASSERT_EQ(StackFrame::JAVA_SCRIPT, type_);
+ CHECK_EQ(StackFrame::JAVA_SCRIPT, type_);
unsigned size = GetFrameSize() - ComputeFixedSize();
return size / kPointerSize;
}
@@ -2715,8 +2907,7 @@ int32_t TranslationIterator::Next() {
Handle<ByteArray> TranslationBuffer::CreateByteArray(Factory* factory) {
int length = contents_.length();
Handle<ByteArray> result = factory->NewByteArray(length, TENURED);
- OS::MemCopy(
- result->GetDataStartAddress(), contents_.ToVector().start(), length);
+ MemCopy(result->GetDataStartAddress(), contents_.ToVector().start(), length);
return result;
}
@@ -2869,7 +3060,7 @@ int Translation::NumberOfOperandsFor(Opcode opcode) {
case JS_FRAME:
return 3;
}
- UNREACHABLE();
+ FATAL("Unexpected translation type");
return -1;
}
@@ -2892,12 +3083,11 @@ const char* Translation::StringFor(Opcode opcode) {
// We can't intermix stack decoding and allocations because
// deoptimization infrastracture is not GC safe.
// Thus we build a temporary structure in malloced space.
-SlotRef SlotRef::ComputeSlotForNextArgument(TranslationIterator* iterator,
- DeoptimizationInputData* data,
- JavaScriptFrame* frame) {
- Translation::Opcode opcode =
- static_cast<Translation::Opcode>(iterator->Next());
-
+SlotRef SlotRefValueBuilder::ComputeSlotForNextArgument(
+ Translation::Opcode opcode,
+ TranslationIterator* iterator,
+ DeoptimizationInputData* data,
+ JavaScriptFrame* frame) {
switch (opcode) {
case Translation::BEGIN:
case Translation::JS_FRAME:
@@ -2908,11 +3098,16 @@ SlotRef SlotRef::ComputeSlotForNextArgument(TranslationIterator* iterator,
// Peeled off before getting here.
break;
- case Translation::DUPLICATED_OBJECT:
+ case Translation::DUPLICATED_OBJECT: {
+ return SlotRef::NewDuplicateObject(iterator->Next());
+ }
+
case Translation::ARGUMENTS_OBJECT:
- case Translation::CAPTURED_OBJECT:
- // This can be only emitted for local slots not for argument slots.
- break;
+ return SlotRef::NewArgumentsObject(iterator->Next());
+
+ case Translation::CAPTURED_OBJECT: {
+ return SlotRef::NewDeferredObject(iterator->Next());
+ }
case Translation::REGISTER:
case Translation::INT32_REGISTER:
@@ -2958,87 +3153,397 @@ SlotRef SlotRef::ComputeSlotForNextArgument(TranslationIterator* iterator,
break;
}
- UNREACHABLE();
+ FATAL("We should never get here - unexpected deopt info.");
return SlotRef();
}
-void SlotRef::ComputeSlotsForArguments(Vector<SlotRef>* args_slots,
- TranslationIterator* it,
- DeoptimizationInputData* data,
- JavaScriptFrame* frame) {
- // Process the translation commands for the arguments.
-
- // Skip the translation command for the receiver.
- it->Skip(Translation::NumberOfOperandsFor(
- static_cast<Translation::Opcode>(it->Next())));
-
- // Compute slots for arguments.
- for (int i = 0; i < args_slots->length(); ++i) {
- (*args_slots)[i] = ComputeSlotForNextArgument(it, data, frame);
- }
-}
-
-
-Vector<SlotRef> SlotRef::ComputeSlotMappingForArguments(
- JavaScriptFrame* frame,
- int inlined_jsframe_index,
- int formal_parameter_count) {
+SlotRefValueBuilder::SlotRefValueBuilder(JavaScriptFrame* frame,
+ int inlined_jsframe_index,
+ int formal_parameter_count)
+ : current_slot_(0), args_length_(-1), first_slot_index_(-1) {
DisallowHeapAllocation no_gc;
+
int deopt_index = Safepoint::kNoDeoptimizationIndex;
DeoptimizationInputData* data =
static_cast<OptimizedFrame*>(frame)->GetDeoptimizationData(&deopt_index);
TranslationIterator it(data->TranslationByteArray(),
data->TranslationIndex(deopt_index)->value());
Translation::Opcode opcode = static_cast<Translation::Opcode>(it.Next());
- ASSERT(opcode == Translation::BEGIN);
+ CHECK_EQ(opcode, Translation::BEGIN);
it.Next(); // Drop frame count.
+
+ stack_frame_id_ = frame->fp();
+
int jsframe_count = it.Next();
- USE(jsframe_count);
- ASSERT(jsframe_count > inlined_jsframe_index);
+ CHECK_GT(jsframe_count, inlined_jsframe_index);
int jsframes_to_skip = inlined_jsframe_index;
- while (true) {
+ int number_of_slots = -1; // Number of slots inside our frame (yet unknown)
+ bool should_deopt = false;
+ while (number_of_slots != 0) {
opcode = static_cast<Translation::Opcode>(it.Next());
+ bool processed = false;
if (opcode == Translation::ARGUMENTS_ADAPTOR_FRAME) {
if (jsframes_to_skip == 0) {
- ASSERT(Translation::NumberOfOperandsFor(opcode) == 2);
+ CHECK_EQ(Translation::NumberOfOperandsFor(opcode), 2);
it.Skip(1); // literal id
int height = it.Next();
+ // Skip the translation command for the receiver.
+ it.Skip(Translation::NumberOfOperandsFor(
+ static_cast<Translation::Opcode>(it.Next())));
+
// We reached the arguments adaptor frame corresponding to the
// inlined function in question. Number of arguments is height - 1.
- Vector<SlotRef> args_slots =
- Vector<SlotRef>::New(height - 1); // Minus receiver.
- ComputeSlotsForArguments(&args_slots, &it, data, frame);
- return args_slots;
+ first_slot_index_ = slot_refs_.length();
+ args_length_ = height - 1;
+ number_of_slots = height - 1;
+ processed = true;
}
} else if (opcode == Translation::JS_FRAME) {
if (jsframes_to_skip == 0) {
// Skip over operands to advance to the next opcode.
it.Skip(Translation::NumberOfOperandsFor(opcode));
+ // Skip the translation command for the receiver.
+ it.Skip(Translation::NumberOfOperandsFor(
+ static_cast<Translation::Opcode>(it.Next())));
+
// We reached the frame corresponding to the inlined function
// in question. Process the translation commands for the
// arguments. Number of arguments is equal to the number of
// format parameter count.
- Vector<SlotRef> args_slots =
- Vector<SlotRef>::New(formal_parameter_count);
- ComputeSlotsForArguments(&args_slots, &it, data, frame);
- return args_slots;
+ first_slot_index_ = slot_refs_.length();
+ args_length_ = formal_parameter_count;
+ number_of_slots = formal_parameter_count;
+ processed = true;
}
jsframes_to_skip--;
+ } else if (opcode != Translation::BEGIN &&
+ opcode != Translation::CONSTRUCT_STUB_FRAME &&
+ opcode != Translation::GETTER_STUB_FRAME &&
+ opcode != Translation::SETTER_STUB_FRAME &&
+ opcode != Translation::COMPILED_STUB_FRAME) {
+ slot_refs_.Add(ComputeSlotForNextArgument(opcode, &it, data, frame));
+
+ if (first_slot_index_ >= 0) {
+ // We have found the beginning of our frame -> make sure we count
+ // the nested slots of captured objects
+ number_of_slots--;
+ SlotRef& slot = slot_refs_.last();
+ CHECK_NE(slot.Representation(), SlotRef::ARGUMENTS_OBJECT);
+ number_of_slots += slot.GetChildrenCount();
+ if (slot.Representation() == SlotRef::DEFERRED_OBJECT ||
+ slot.Representation() == SlotRef::DUPLICATE_OBJECT) {
+ should_deopt = true;
+ }
+ }
+
+ processed = true;
+ }
+ if (!processed) {
+ // Skip over operands to advance to the next opcode.
+ it.Skip(Translation::NumberOfOperandsFor(opcode));
+ }
+ }
+ if (should_deopt) {
+ List<JSFunction*> functions(2);
+ frame->GetFunctions(&functions);
+ Deoptimizer::DeoptimizeFunction(functions[0]);
+ }
+}
+
+
+Handle<Object> SlotRef::GetValue(Isolate* isolate) {
+ switch (representation_) {
+ case TAGGED:
+ return Handle<Object>(Memory::Object_at(addr_), isolate);
+
+ case INT32: {
+ int value = Memory::int32_at(addr_);
+ if (Smi::IsValid(value)) {
+ return Handle<Object>(Smi::FromInt(value), isolate);
+ } else {
+ return isolate->factory()->NewNumberFromInt(value);
+ }
+ }
+
+ case UINT32: {
+ uint32_t value = Memory::uint32_at(addr_);
+ if (value <= static_cast<uint32_t>(Smi::kMaxValue)) {
+ return Handle<Object>(Smi::FromInt(static_cast<int>(value)), isolate);
+ } else {
+ return isolate->factory()->NewNumber(static_cast<double>(value));
+ }
+ }
+
+ case DOUBLE: {
+ double value = read_double_value(addr_);
+ return isolate->factory()->NewNumber(value);
}
- // Skip over operands to advance to the next opcode.
- it.Skip(Translation::NumberOfOperandsFor(opcode));
+ case LITERAL:
+ return literal_;
+
+ default:
+ FATAL("We should never get here - unexpected deopt info.");
+ return Handle<Object>::null();
}
+}
- UNREACHABLE();
- return Vector<SlotRef>();
+
+void SlotRefValueBuilder::Prepare(Isolate* isolate) {
+ MaterializedObjectStore* materialized_store =
+ isolate->materialized_object_store();
+ previously_materialized_objects_ = materialized_store->Get(stack_frame_id_);
+ prev_materialized_count_ = previously_materialized_objects_.is_null()
+ ? 0 : previously_materialized_objects_->length();
+
+ // Skip any materialized objects of the inlined "parent" frames.
+ // (Note that we still need to materialize them because they might be
+ // referred to as duplicated objects.)
+ while (current_slot_ < first_slot_index_) {
+ GetNext(isolate, 0);
+ }
+ CHECK_EQ(current_slot_, first_slot_index_);
+}
+
+
+Handle<Object> SlotRefValueBuilder::GetPreviouslyMaterialized(
+ Isolate* isolate, int length) {
+ int object_index = materialized_objects_.length();
+ Handle<Object> return_value = Handle<Object>(
+ previously_materialized_objects_->get(object_index), isolate);
+ materialized_objects_.Add(return_value);
+
+ // Now need to skip all the nested objects (and possibly read them from
+ // the materialization store, too).
+ for (int i = 0; i < length; i++) {
+ SlotRef& slot = slot_refs_[current_slot_];
+ current_slot_++;
+
+ // We need to read all the nested objects - add them to the
+ // number of objects we need to process.
+ length += slot.GetChildrenCount();
+
+ // Put the nested deferred/duplicate objects into our materialization
+ // array.
+ if (slot.Representation() == SlotRef::DEFERRED_OBJECT ||
+ slot.Representation() == SlotRef::DUPLICATE_OBJECT) {
+ int nested_object_index = materialized_objects_.length();
+ Handle<Object> nested_object = Handle<Object>(
+ previously_materialized_objects_->get(nested_object_index),
+ isolate);
+ materialized_objects_.Add(nested_object);
+ }
+ }
+
+ return return_value;
+}
+
+
+Handle<Object> SlotRefValueBuilder::GetNext(Isolate* isolate, int lvl) {
+ SlotRef& slot = slot_refs_[current_slot_];
+ current_slot_++;
+ switch (slot.Representation()) {
+ case SlotRef::TAGGED:
+ case SlotRef::INT32:
+ case SlotRef::UINT32:
+ case SlotRef::DOUBLE:
+ case SlotRef::LITERAL: {
+ return slot.GetValue(isolate);
+ }
+ case SlotRef::ARGUMENTS_OBJECT: {
+ // We should never need to materialize an arguments object,
+ // but we still need to put something into the array
+ // so that the indexing is consistent.
+ materialized_objects_.Add(isolate->factory()->undefined_value());
+ int length = slot.GetChildrenCount();
+ for (int i = 0; i < length; ++i) {
+ // We don't need the argument, just ignore it
+ GetNext(isolate, lvl + 1);
+ }
+ return isolate->factory()->undefined_value();
+ }
+ case SlotRef::DEFERRED_OBJECT: {
+ int length = slot.GetChildrenCount();
+ CHECK(slot_refs_[current_slot_].Representation() == SlotRef::LITERAL ||
+ slot_refs_[current_slot_].Representation() == SlotRef::TAGGED);
+
+ int object_index = materialized_objects_.length();
+ if (object_index < prev_materialized_count_) {
+ return GetPreviouslyMaterialized(isolate, length);
+ }
+
+ Handle<Object> map_object = slot_refs_[current_slot_].GetValue(isolate);
+ Handle<Map> map = Map::GeneralizeAllFieldRepresentations(
+ Handle<Map>::cast(map_object));
+ current_slot_++;
+ // TODO(jarin) this should be unified with the code in
+ // Deoptimizer::MaterializeNextHeapObject()
+ switch (map->instance_type()) {
+ case HEAP_NUMBER_TYPE: {
+ // Reuse the HeapNumber value directly as it is already properly
+ // tagged and skip materializing the HeapNumber explicitly.
+ Handle<Object> object = GetNext(isolate, lvl + 1);
+ materialized_objects_.Add(object);
+ // On 32-bit architectures, there is an extra slot there because
+ // the escape analysis calculates the number of slots as
+ // object-size/pointer-size. To account for this, we read out
+ // any extra slots.
+ for (int i = 0; i < length - 2; i++) {
+ GetNext(isolate, lvl + 1);
+ }
+ return object;
+ }
+ case JS_OBJECT_TYPE: {
+ Handle<JSObject> object =
+ isolate->factory()->NewJSObjectFromMap(map, NOT_TENURED, false);
+ materialized_objects_.Add(object);
+ Handle<Object> properties = GetNext(isolate, lvl + 1);
+ Handle<Object> elements = GetNext(isolate, lvl + 1);
+ object->set_properties(FixedArray::cast(*properties));
+ object->set_elements(FixedArrayBase::cast(*elements));
+ for (int i = 0; i < length - 3; ++i) {
+ Handle<Object> value = GetNext(isolate, lvl + 1);
+ FieldIndex index = FieldIndex::ForPropertyIndex(object->map(), i);
+ object->FastPropertyAtPut(index, *value);
+ }
+ return object;
+ }
+ case JS_ARRAY_TYPE: {
+ Handle<JSArray> object =
+ isolate->factory()->NewJSArray(0, map->elements_kind());
+ materialized_objects_.Add(object);
+ Handle<Object> properties = GetNext(isolate, lvl + 1);
+ Handle<Object> elements = GetNext(isolate, lvl + 1);
+ Handle<Object> length = GetNext(isolate, lvl + 1);
+ object->set_properties(FixedArray::cast(*properties));
+ object->set_elements(FixedArrayBase::cast(*elements));
+ object->set_length(*length);
+ return object;
+ }
+ default:
+ PrintF(stderr,
+ "[couldn't handle instance type %d]\n", map->instance_type());
+ UNREACHABLE();
+ break;
+ }
+ UNREACHABLE();
+ break;
+ }
+
+ case SlotRef::DUPLICATE_OBJECT: {
+ int object_index = slot.DuplicateObjectId();
+ Handle<Object> object = materialized_objects_[object_index];
+ materialized_objects_.Add(object);
+ return object;
+ }
+ default:
+ UNREACHABLE();
+ break;
+ }
+
+ FATAL("We should never get here - unexpected deopt slot kind.");
+ return Handle<Object>::null();
+}
+
+
+void SlotRefValueBuilder::Finish(Isolate* isolate) {
+ // We should have processed all the slots
+ CHECK_EQ(slot_refs_.length(), current_slot_);
+
+ if (materialized_objects_.length() > prev_materialized_count_) {
+ // We have materialized some new objects, so we have to store them
+ // to prevent duplicate materialization
+ Handle<FixedArray> array = isolate->factory()->NewFixedArray(
+ materialized_objects_.length());
+ for (int i = 0; i < materialized_objects_.length(); i++) {
+ array->set(i, *(materialized_objects_.at(i)));
+ }
+ isolate->materialized_object_store()->Set(stack_frame_id_, array);
+ }
+}
+
+
+Handle<FixedArray> MaterializedObjectStore::Get(Address fp) {
+ int index = StackIdToIndex(fp);
+ if (index == -1) {
+ return Handle<FixedArray>::null();
+ }
+ Handle<FixedArray> array = GetStackEntries();
+ CHECK_GT(array->length(), index);
+ return Handle<FixedArray>::cast(Handle<Object>(array->get(index),
+ isolate()));
+}
+
+
+void MaterializedObjectStore::Set(Address fp,
+ Handle<FixedArray> materialized_objects) {
+ int index = StackIdToIndex(fp);
+ if (index == -1) {
+ index = frame_fps_.length();
+ frame_fps_.Add(fp);
+ }
+
+ Handle<FixedArray> array = EnsureStackEntries(index + 1);
+ array->set(index, *materialized_objects);
+}
+
+
+void MaterializedObjectStore::Remove(Address fp) {
+ int index = StackIdToIndex(fp);
+ CHECK_GE(index, 0);
+
+ frame_fps_.Remove(index);
+ Handle<FixedArray> array = GetStackEntries();
+ CHECK_LT(index, array->length());
+ for (int i = index; i < frame_fps_.length(); i++) {
+ array->set(i, array->get(i + 1));
+ }
+ array->set(frame_fps_.length(), isolate()->heap()->undefined_value());
+}
+
+
+int MaterializedObjectStore::StackIdToIndex(Address fp) {
+ for (int i = 0; i < frame_fps_.length(); i++) {
+ if (frame_fps_[i] == fp) {
+ return i;
+ }
+ }
+ return -1;
+}
+
+
+Handle<FixedArray> MaterializedObjectStore::GetStackEntries() {
+ return Handle<FixedArray>(isolate()->heap()->materialized_objects());
+}
+
+
+Handle<FixedArray> MaterializedObjectStore::EnsureStackEntries(int length) {
+ Handle<FixedArray> array = GetStackEntries();
+ if (array->length() >= length) {
+ return array;
+ }
+
+ int new_length = length > 10 ? length : 10;
+ if (new_length < 2 * array->length()) {
+ new_length = 2 * array->length();
+ }
+
+ Handle<FixedArray> new_array =
+ isolate()->factory()->NewFixedArray(new_length, TENURED);
+ for (int i = 0; i < array->length(); i++) {
+ new_array->set(i, array->get(i));
+ }
+ for (int i = array->length(); i < length; i++) {
+ new_array->set(i, isolate()->heap()->undefined_value());
+ }
+ isolate()->heap()->public_set_materialized_objects(*new_array);
+ return new_array;
}
-#ifdef ENABLE_DEBUGGER_SUPPORT
DeoptimizedFrameInfo::DeoptimizedFrameInfo(Deoptimizer* deoptimizer,
int frame_index,
@@ -3060,7 +3565,7 @@ DeoptimizedFrameInfo::DeoptimizedFrameInfo(Deoptimizer* deoptimizer,
if (has_arguments_adaptor) {
output_frame = deoptimizer->output_[frame_index - 1];
- ASSERT(output_frame->GetFrameType() == StackFrame::ARGUMENTS_ADAPTOR);
+ CHECK_EQ(output_frame->GetFrameType(), StackFrame::ARGUMENTS_ADAPTOR);
}
parameters_count_ = output_frame->ComputeParametersCount();
@@ -3083,6 +3588,4 @@ void DeoptimizedFrameInfo::Iterate(ObjectVisitor* v) {
v->VisitPointers(expression_stack_, expression_stack_ + expression_count_);
}
-#endif // ENABLE_DEBUGGER_SUPPORT
-
} } // namespace v8::internal
diff --git a/chromium/v8/src/deoptimizer.h b/chromium/v8/src/deoptimizer.h
index f518546018b..1a6f668d8f3 100644
--- a/chromium/v8/src/deoptimizer.h
+++ b/chromium/v8/src/deoptimizer.h
@@ -1,38 +1,15 @@
// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
#ifndef V8_DEOPTIMIZER_H_
#define V8_DEOPTIMIZER_H_
-#include "v8.h"
+#include "src/v8.h"
-#include "allocation.h"
-#include "macro-assembler.h"
-#include "zone-inl.h"
+#include "src/allocation.h"
+#include "src/macro-assembler.h"
+#include "src/zone-inl.h"
namespace v8 {
@@ -134,7 +111,7 @@ class Deoptimizer : public Malloced {
static const int kBailoutTypesWithCodeEntry = SOFT + 1;
- struct JumpTableEntry {
+ struct JumpTableEntry : public ZoneObject {
inline JumpTableEntry(Address entry,
Deoptimizer::BailoutType type,
bool frame)
@@ -169,7 +146,6 @@ class Deoptimizer : public Malloced {
Isolate* isolate);
static Deoptimizer* Grab(Isolate* isolate);
-#ifdef ENABLE_DEBUGGER_SUPPORT
// The returned object with information on the optimized frame needs to be
// freed before another one can be generated.
static DeoptimizedFrameInfo* DebuggerInspectableFrame(JavaScriptFrame* frame,
@@ -177,7 +153,6 @@ class Deoptimizer : public Malloced {
Isolate* isolate);
static void DeleteDebuggerInspectableFrame(DeoptimizedFrameInfo* info,
Isolate* isolate);
-#endif
// Makes sure that there is enough room in the relocation
// information of a code object to perform lazy deoptimization
@@ -212,14 +187,13 @@ class Deoptimizer : public Malloced {
~Deoptimizer();
void MaterializeHeapObjects(JavaScriptFrameIterator* it);
-#ifdef ENABLE_DEBUGGER_SUPPORT
+
void MaterializeHeapNumbersForDebuggerInspectableFrame(
Address parameters_top,
uint32_t parameters_size,
Address expressions_top,
uint32_t expressions_size,
DeoptimizedFrameInfo* info);
-#endif
static void ComputeOutputFrames(Deoptimizer* deoptimizer);
@@ -329,14 +303,21 @@ class Deoptimizer : public Malloced {
void DoComputeCompiledStubFrame(TranslationIterator* iterator,
int frame_index);
+ // Translate object, store the result into an auxiliary array
+ // (deferred_objects_tagged_values_).
void DoTranslateObject(TranslationIterator* iterator,
int object_index,
int field_index);
+ // Translate value, store the result into the given frame slot.
void DoTranslateCommand(TranslationIterator* iterator,
int frame_index,
unsigned output_offset);
+ // Translate object, do not store the result anywhere (but do update
+ // the deferred materialization array).
+ void DoTranslateObjectAndSkip(TranslationIterator* iterator);
+
unsigned ComputeInputFrameSize() const;
unsigned ComputeFixedSize(JSFunction* function) const;
@@ -406,10 +387,6 @@ class Deoptimizer : public Malloced {
// at the dynamic alignment state slot inside the frame.
bool HasAlignmentPadding(JSFunction* function);
- // Select the version of NotifyStubFailure builtin that either saves or
- // doesn't save the double registers depending on CPU features.
- Code* NotifyStubFailureBuiltin();
-
Isolate* isolate_;
JSFunction* function_;
Code* compiled_code_;
@@ -435,6 +412,11 @@ class Deoptimizer : public Malloced {
List<ObjectMaterializationDescriptor> deferred_objects_;
List<HeapNumberMaterializationDescriptor<Address> > deferred_heap_numbers_;
+ // Key for lookup of previously materialized objects
+ Address stack_fp_;
+ Handle<FixedArray> previously_materialized_objects_;
+ int prev_materialized_count_;
+
// Output frame information. Only used during heap object materialization.
List<Handle<JSFunction> > jsframe_functions_;
List<bool> jsframe_has_adapted_arguments_;
@@ -503,6 +485,8 @@ class FrameDescription {
void SetCallerFp(unsigned offset, intptr_t value);
+ void SetCallerConstantPool(unsigned offset, intptr_t value);
+
intptr_t GetRegister(unsigned n) const {
#if DEBUG
// This convoluted ASSERT is needed to work around a gcc problem that
@@ -543,6 +527,11 @@ class FrameDescription {
intptr_t GetContext() const { return context_; }
void SetContext(intptr_t context) { context_ = context; }
+ intptr_t GetConstantPool() const { return constant_pool_; }
+ void SetConstantPool(intptr_t constant_pool) {
+ constant_pool_ = constant_pool;
+ }
+
Smi* GetState() const { return state_; }
void SetState(Smi* state) { state_ = state; }
@@ -605,6 +594,7 @@ class FrameDescription {
intptr_t pc_;
intptr_t fp_;
intptr_t context_;
+ intptr_t constant_pool_;
StackFrame::Type type_;
Smi* state_;
@@ -631,18 +621,14 @@ class DeoptimizerData {
explicit DeoptimizerData(MemoryAllocator* allocator);
~DeoptimizerData();
-#ifdef ENABLE_DEBUGGER_SUPPORT
void Iterate(ObjectVisitor* v);
-#endif
private:
MemoryAllocator* allocator_;
int deopt_entry_code_entries_[Deoptimizer::kBailoutTypesWithCodeEntry];
MemoryChunk* deopt_entry_code_[Deoptimizer::kBailoutTypesWithCodeEntry];
-#ifdef ENABLE_DEBUGGER_SUPPORT
DeoptimizedFrameInfo* deoptimized_frame_info_;
-#endif
Deoptimizer* current_;
@@ -777,7 +763,15 @@ class SlotRef BASE_EMBEDDED {
INT32,
UINT32,
DOUBLE,
- LITERAL
+ LITERAL,
+ DEFERRED_OBJECT, // Object captured by the escape analysis.
+ // The number of nested objects can be obtained
+ // with the DeferredObjectLength() method
+ // (the SlotRefs of the nested objects follow
+ // this SlotRef in the depth-first order.)
+ DUPLICATE_OBJECT, // Duplicated object of a deferred object.
+ ARGUMENTS_OBJECT // Arguments object - only used to keep indexing
+ // in sync, it should not be materialized.
};
SlotRef()
@@ -789,52 +783,80 @@ class SlotRef BASE_EMBEDDED {
SlotRef(Isolate* isolate, Object* literal)
: literal_(literal, isolate), representation_(LITERAL) { }
- Handle<Object> GetValue(Isolate* isolate) {
- switch (representation_) {
- case TAGGED:
- return Handle<Object>(Memory::Object_at(addr_), isolate);
-
- case INT32: {
- int value = Memory::int32_at(addr_);
- if (Smi::IsValid(value)) {
- return Handle<Object>(Smi::FromInt(value), isolate);
- } else {
- return isolate->factory()->NewNumberFromInt(value);
- }
- }
-
- case UINT32: {
- uint32_t value = Memory::uint32_at(addr_);
- if (value <= static_cast<uint32_t>(Smi::kMaxValue)) {
- return Handle<Object>(Smi::FromInt(static_cast<int>(value)), isolate);
- } else {
- return isolate->factory()->NewNumber(static_cast<double>(value));
- }
- }
-
- case DOUBLE: {
- double value = read_double_value(addr_);
- return isolate->factory()->NewNumber(value);
- }
-
- case LITERAL:
- return literal_;
-
- default:
- UNREACHABLE();
- return Handle<Object>::null();
+ static SlotRef NewArgumentsObject(int length) {
+ SlotRef slot;
+ slot.representation_ = ARGUMENTS_OBJECT;
+ slot.deferred_object_length_ = length;
+ return slot;
+ }
+
+ static SlotRef NewDeferredObject(int length) {
+ SlotRef slot;
+ slot.representation_ = DEFERRED_OBJECT;
+ slot.deferred_object_length_ = length;
+ return slot;
+ }
+
+ SlotRepresentation Representation() { return representation_; }
+
+ static SlotRef NewDuplicateObject(int id) {
+ SlotRef slot;
+ slot.representation_ = DUPLICATE_OBJECT;
+ slot.duplicate_object_id_ = id;
+ return slot;
+ }
+
+ int GetChildrenCount() {
+ if (representation_ == DEFERRED_OBJECT ||
+ representation_ == ARGUMENTS_OBJECT) {
+ return deferred_object_length_;
+ } else {
+ return 0;
}
}
- static Vector<SlotRef> ComputeSlotMappingForArguments(
- JavaScriptFrame* frame,
- int inlined_frame_index,
- int formal_parameter_count);
+ int DuplicateObjectId() { return duplicate_object_id_; }
+
+ Handle<Object> GetValue(Isolate* isolate);
private:
Address addr_;
Handle<Object> literal_;
SlotRepresentation representation_;
+ int deferred_object_length_;
+ int duplicate_object_id_;
+};
+
+class SlotRefValueBuilder BASE_EMBEDDED {
+ public:
+ SlotRefValueBuilder(
+ JavaScriptFrame* frame,
+ int inlined_frame_index,
+ int formal_parameter_count);
+
+ void Prepare(Isolate* isolate);
+ Handle<Object> GetNext(Isolate* isolate, int level);
+ void Finish(Isolate* isolate);
+
+ int args_length() { return args_length_; }
+
+ private:
+ List<Handle<Object> > materialized_objects_;
+ Handle<FixedArray> previously_materialized_objects_;
+ int prev_materialized_count_;
+ Address stack_frame_id_;
+ List<SlotRef> slot_refs_;
+ int current_slot_;
+ int args_length_;
+ int first_slot_index_;
+
+ static SlotRef ComputeSlotForNextArgument(
+ Translation::Opcode opcode,
+ TranslationIterator* iterator,
+ DeoptimizationInputData* data,
+ JavaScriptFrame* frame);
+
+ Handle<Object> GetPreviouslyMaterialized(Isolate* isolate, int length);
static Address SlotAddress(JavaScriptFrame* frame, int slot_index) {
if (slot_index >= 0) {
@@ -846,19 +868,30 @@ class SlotRef BASE_EMBEDDED {
}
}
- static SlotRef ComputeSlotForNextArgument(TranslationIterator* iterator,
- DeoptimizationInputData* data,
- JavaScriptFrame* frame);
+ Handle<Object> GetDeferredObject(Isolate* isolate);
+};
- static void ComputeSlotsForArguments(
- Vector<SlotRef>* args_slots,
- TranslationIterator* iterator,
- DeoptimizationInputData* data,
- JavaScriptFrame* frame);
+class MaterializedObjectStore {
+ public:
+ explicit MaterializedObjectStore(Isolate* isolate) : isolate_(isolate) {
+ }
+
+ Handle<FixedArray> Get(Address fp);
+ void Set(Address fp, Handle<FixedArray> materialized_objects);
+ void Remove(Address fp);
+
+ private:
+ Isolate* isolate() { return isolate_; }
+ Handle<FixedArray> GetStackEntries();
+ Handle<FixedArray> EnsureStackEntries(int size);
+
+ int StackIdToIndex(Address fp);
+
+ Isolate* isolate_;
+ List<Address> frame_fps_;
};
-#ifdef ENABLE_DEBUGGER_SUPPORT
// Class used to represent an unoptimized frame when the debugger
// needs to inspect a frame that is part of an optimized frame. The
// internally used FrameDescription objects are not GC safe so for use
@@ -932,7 +965,6 @@ class DeoptimizedFrameInfo : public Malloced {
friend class Deoptimizer;
};
-#endif
} } // namespace v8::internal
diff --git a/chromium/v8/src/disasm.h b/chromium/v8/src/disasm.h
index f7f2d412028..89b7fc26156 100644
--- a/chromium/v8/src/disasm.h
+++ b/chromium/v8/src/disasm.h
@@ -1,29 +1,6 @@
// Copyright 2007-2008 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
#ifndef V8_DISASM_H_
#define V8_DISASM_H_
diff --git a/chromium/v8/src/disassembler.cc b/chromium/v8/src/disassembler.cc
index 69737ed89f8..f1c28e8199f 100644
--- a/chromium/v8/src/disassembler.cc
+++ b/chromium/v8/src/disassembler.cc
@@ -1,41 +1,18 @@
// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#include "code-stubs.h"
-#include "codegen.h"
-#include "debug.h"
-#include "deoptimizer.h"
-#include "disasm.h"
-#include "disassembler.h"
-#include "macro-assembler.h"
-#include "serialize.h"
-#include "string-stream.h"
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#include "src/code-stubs.h"
+#include "src/codegen.h"
+#include "src/debug.h"
+#include "src/deoptimizer.h"
+#include "src/disasm.h"
+#include "src/disassembler.h"
+#include "src/macro-assembler.h"
+#include "src/serialize.h"
+#include "src/string-stream.h"
namespace v8 {
namespace internal {
@@ -73,7 +50,7 @@ class V8NameConverter: public disasm::NameConverter {
const char* V8NameConverter::NameOfAddress(byte* pc) const {
const char* name = code_->GetIsolate()->builtins()->Lookup(pc);
if (name != NULL) {
- OS::SNPrintF(v8_buffer_, "%s (%p)", name, pc);
+ SNPrintF(v8_buffer_, "%s (%p)", name, pc);
return v8_buffer_.start();
}
@@ -81,7 +58,7 @@ const char* V8NameConverter::NameOfAddress(byte* pc) const {
int offs = static_cast<int>(pc - code_->instruction_start());
// print as code offset, if it seems reasonable
if (0 <= offs && offs < code_->instruction_size()) {
- OS::SNPrintF(v8_buffer_, "%d (%p)", offs, pc);
+ SNPrintF(v8_buffer_, "%d (%p)", offs, pc);
return v8_buffer_.start();
}
}
@@ -137,27 +114,27 @@ static int DecodeIt(Isolate* isolate,
// First decode instruction so that we know its length.
byte* prev_pc = pc;
if (constants > 0) {
- OS::SNPrintF(decode_buffer,
- "%08x constant",
- *reinterpret_cast<int32_t*>(pc));
+ SNPrintF(decode_buffer,
+ "%08x constant",
+ *reinterpret_cast<int32_t*>(pc));
constants--;
pc += 4;
} else {
int num_const = d.ConstantPoolSizeAt(pc);
if (num_const >= 0) {
- OS::SNPrintF(decode_buffer,
- "%08x constant pool begin",
- *reinterpret_cast<int32_t*>(pc));
+ SNPrintF(decode_buffer,
+ "%08x constant pool begin",
+ *reinterpret_cast<int32_t*>(pc));
constants = num_const;
pc += 4;
} else if (it != NULL && !it->done() && it->rinfo()->pc() == pc &&
it->rinfo()->rmode() == RelocInfo::INTERNAL_REFERENCE) {
// raw pointer embedded in code stream, e.g., jump table
byte* ptr = *reinterpret_cast<byte**>(pc);
- OS::SNPrintF(decode_buffer,
- "%08" V8PRIxPTR " jump table entry %4" V8PRIdPTR,
- ptr,
- ptr - begin);
+ SNPrintF(decode_buffer,
+ "%08" V8PRIxPTR " jump table entry %4" V8PRIdPTR,
+ reinterpret_cast<intptr_t>(ptr),
+ ptr - begin);
pc += 4;
} else {
decode_buffer[0] = '\0';
@@ -200,7 +177,7 @@ static int DecodeIt(Isolate* isolate,
// Print all the reloc info for this instruction which are not comments.
for (int i = 0; i < pcs.length(); i++) {
// Put together the reloc info
- RelocInfo relocinfo(pcs[i], rmodes[i], datas[i], NULL);
+ RelocInfo relocinfo(pcs[i], rmodes[i], datas[i], converter.code());
// Indent the printing of the reloc info.
if (i == 0) {
@@ -224,7 +201,7 @@ static int DecodeIt(Isolate* isolate,
StringStream accumulator(&allocator);
relocinfo.target_object()->ShortPrint(&accumulator);
SmartArrayPointer<const char> obj_name = accumulator.ToCString();
- out.AddFormatted(" ;; object: %s", *obj_name);
+ out.AddFormatted(" ;; object: %s", obj_name.get());
} else if (rmode == RelocInfo::EXTERNAL_REFERENCE) {
const char* reference_name =
ref_encoder.NameOfAddress(relocinfo.target_reference());
@@ -237,7 +214,8 @@ static int DecodeIt(Isolate* isolate,
Code* code = Code::GetCodeFromTargetAddress(relocinfo.target_address());
Code::Kind kind = code->kind();
if (code->is_inline_cache_stub()) {
- if (rmode == RelocInfo::CODE_TARGET_CONTEXT) {
+ if (kind == Code::LOAD_IC &&
+ LoadIC::GetContextualMode(code->extra_ic_state()) == CONTEXTUAL) {
out.AddFormatted(" contextual,");
}
InlineCacheState ic_state = code->ic_state();
@@ -247,9 +225,6 @@ static int DecodeIt(Isolate* isolate,
Code::StubType type = code->type();
out.AddFormatted(", %s", Code::StubType2String(type));
}
- if (kind == Code::CALL_IC || kind == Code::KEYED_CALL_IC) {
- out.AddFormatted(", argc = %d", code->arguments_count());
- }
} else if (kind == Code::STUB || kind == Code::HANDLER) {
// Reverse lookup required as the minor key cannot be retrieved
// from the code object.
diff --git a/chromium/v8/src/disassembler.h b/chromium/v8/src/disassembler.h
index 87891500367..f65f5385791 100644
--- a/chromium/v8/src/disassembler.h
+++ b/chromium/v8/src/disassembler.h
@@ -1,34 +1,11 @@
// Copyright 2006-2008 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
#ifndef V8_DISASSEMBLER_H_
#define V8_DISASSEMBLER_H_
-#include "allocation.h"
+#include "src/allocation.h"
namespace v8 {
namespace internal {
diff --git a/chromium/v8/src/diy-fp.cc b/chromium/v8/src/diy-fp.cc
index 49138777088..3abf14d3f2c 100644
--- a/chromium/v8/src/diy-fp.cc
+++ b/chromium/v8/src/diy-fp.cc
@@ -1,34 +1,11 @@
// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
-#include "../include/v8stdint.h"
-#include "globals.h"
-#include "checks.h"
-#include "diy-fp.h"
+#include "include/v8stdint.h"
+#include "src/globals.h"
+#include "src/checks.h"
+#include "src/diy-fp.h"
namespace v8 {
namespace internal {
diff --git a/chromium/v8/src/diy-fp.h b/chromium/v8/src/diy-fp.h
index 26ff1a20bfc..f8f2673c410 100644
--- a/chromium/v8/src/diy-fp.h
+++ b/chromium/v8/src/diy-fp.h
@@ -1,29 +1,6 @@
// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
#ifndef V8_DIY_FP_H_
#define V8_DIY_FP_H_
diff --git a/chromium/v8/src/double.h b/chromium/v8/src/double.h
index fcf6906af73..947d25943c6 100644
--- a/chromium/v8/src/double.h
+++ b/chromium/v8/src/double.h
@@ -1,34 +1,11 @@
// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
#ifndef V8_DOUBLE_H_
#define V8_DOUBLE_H_
-#include "diy-fp.h"
+#include "src/diy-fp.h"
namespace v8 {
namespace internal {
diff --git a/chromium/v8/src/dtoa.cc b/chromium/v8/src/dtoa.cc
index bda67205c76..949e76bf527 100644
--- a/chromium/v8/src/dtoa.cc
+++ b/chromium/v8/src/dtoa.cc
@@ -1,42 +1,19 @@
// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
#include <cmath>
-#include "../include/v8stdint.h"
-#include "checks.h"
-#include "utils.h"
+#include "include/v8stdint.h"
+#include "src/checks.h"
+#include "src/utils.h"
-#include "dtoa.h"
+#include "src/dtoa.h"
-#include "bignum-dtoa.h"
-#include "double.h"
-#include "fast-dtoa.h"
-#include "fixed-dtoa.h"
+#include "src/bignum-dtoa.h"
+#include "src/double.h"
+#include "src/fast-dtoa.h"
+#include "src/fixed-dtoa.h"
namespace v8 {
namespace internal {
diff --git a/chromium/v8/src/dtoa.h b/chromium/v8/src/dtoa.h
index 948a0791913..dd88688d082 100644
--- a/chromium/v8/src/dtoa.h
+++ b/chromium/v8/src/dtoa.h
@@ -1,29 +1,6 @@
// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
#ifndef V8_DTOA_H_
#define V8_DTOA_H_
diff --git a/chromium/v8/src/effects.h b/chromium/v8/src/effects.h
index afb8f9e54b3..8cf5a88ee42 100644
--- a/chromium/v8/src/effects.h
+++ b/chromium/v8/src/effects.h
@@ -1,36 +1,13 @@
// Copyright 2013 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
#ifndef V8_EFFECTS_H_
#define V8_EFFECTS_H_
-#include "v8.h"
+#include "src/v8.h"
-#include "types.h"
+#include "src/types.h"
namespace v8 {
namespace internal {
@@ -59,24 +36,24 @@ struct Effect {
Effect(Bounds b, Modality m = DEFINITE) : modality(m), bounds(b) {}
// The unknown effect.
- static Effect Unknown(Isolate* isolate) {
- return Effect(Bounds::Unbounded(isolate), POSSIBLE);
+ static Effect Unknown(Zone* zone) {
+ return Effect(Bounds::Unbounded(zone), POSSIBLE);
}
- static Effect Forget(Isolate* isolate) {
- return Effect(Bounds::Unbounded(isolate), DEFINITE);
+ static Effect Forget(Zone* zone) {
+ return Effect(Bounds::Unbounded(zone), DEFINITE);
}
// Sequential composition, as in 'e1; e2'.
- static Effect Seq(Effect e1, Effect e2, Isolate* isolate) {
+ static Effect Seq(Effect e1, Effect e2, Zone* zone) {
if (e2.modality == DEFINITE) return e2;
- return Effect(Bounds::Either(e1.bounds, e2.bounds, isolate), e1.modality);
+ return Effect(Bounds::Either(e1.bounds, e2.bounds, zone), e1.modality);
}
// Alternative composition, as in 'cond ? e1 : e2'.
- static Effect Alt(Effect e1, Effect e2, Isolate* isolate) {
+ static Effect Alt(Effect e1, Effect e2, Zone* zone) {
return Effect(
- Bounds::Either(e1.bounds, e2.bounds, isolate),
+ Bounds::Either(e1.bounds, e2.bounds, zone),
e1.modality == POSSIBLE ? POSSIBLE : e2.modality);
}
};
@@ -106,20 +83,20 @@ class EffectsMixin: public Base {
Effect Lookup(Var var) {
Locator locator;
return this->Find(var, &locator)
- ? locator.value() : Effect::Unknown(Base::isolate());
+ ? locator.value() : Effect::Unknown(Base::zone());
}
Bounds LookupBounds(Var var) {
Effect effect = Lookup(var);
return effect.modality == Effect::DEFINITE
- ? effect.bounds : Bounds::Unbounded(Base::isolate());
+ ? effect.bounds : Bounds::Unbounded(Base::zone());
}
// Sequential composition.
void Seq(Var var, Effect effect) {
Locator locator;
if (!this->Insert(var, &locator)) {
- effect = Effect::Seq(locator.value(), effect, Base::isolate());
+ effect = Effect::Seq(locator.value(), effect, Base::zone());
}
locator.set_value(effect);
}
@@ -133,7 +110,7 @@ class EffectsMixin: public Base {
void Alt(Var var, Effect effect) {
Locator locator;
if (!this->Insert(var, &locator)) {
- effect = Effect::Alt(locator.value(), effect, Base::isolate());
+ effect = Effect::Alt(locator.value(), effect, Base::zone());
}
locator.set_value(effect);
}
@@ -148,7 +125,7 @@ class EffectsMixin: public Base {
// Invalidation.
void Forget() {
Overrider override = {
- Effect::Forget(Base::isolate()), Effects(Base::zone()) };
+ Effect::Forget(Base::zone()), Effects(Base::zone()) };
this->ForEach(&override);
Seq(override.effects);
}
@@ -206,7 +183,6 @@ class EffectsBase {
EffectsMixin<Var, NestedEffectsBase<Var, kNoVar>, Effects<Var, kNoVar> >;
Zone* zone() { return map_->allocator().zone(); }
- Isolate* isolate() { return zone()->isolate(); }
struct SplayTreeConfig {
typedef Var Key;
@@ -277,7 +253,6 @@ class NestedEffectsBase {
typedef typename EffectsBase<Var, kNoVar>::Locator Locator;
Zone* zone() { return node_->zone; }
- Isolate* isolate() { return zone()->isolate(); }
void push() { node_ = new(node_->zone) Node(node_->zone, node_); }
void pop() { node_ = node_->previous; }
diff --git a/chromium/v8/src/elements-kind.cc b/chromium/v8/src/elements-kind.cc
index 689c2205696..cd946e8028d 100644
--- a/chromium/v8/src/elements-kind.cc
+++ b/chromium/v8/src/elements-kind.cc
@@ -1,35 +1,13 @@
// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "elements-kind.h"
-
-#include "api.h"
-#include "elements.h"
-#include "objects.h"
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/elements-kind.h"
+
+#include "src/api.h"
+#include "src/base/lazy-instance.h"
+#include "src/elements.h"
+#include "src/objects.h"
namespace v8 {
namespace internal {
@@ -37,27 +15,36 @@ namespace internal {
int ElementsKindToShiftSize(ElementsKind elements_kind) {
switch (elements_kind) {
- case EXTERNAL_BYTE_ELEMENTS:
- case EXTERNAL_PIXEL_ELEMENTS:
- case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
+ case EXTERNAL_INT8_ELEMENTS:
+ case EXTERNAL_UINT8_CLAMPED_ELEMENTS:
+ case EXTERNAL_UINT8_ELEMENTS:
+ case UINT8_ELEMENTS:
+ case INT8_ELEMENTS:
+ case UINT8_CLAMPED_ELEMENTS:
return 0;
- case EXTERNAL_SHORT_ELEMENTS:
- case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
+ case EXTERNAL_INT16_ELEMENTS:
+ case EXTERNAL_UINT16_ELEMENTS:
+ case UINT16_ELEMENTS:
+ case INT16_ELEMENTS:
return 1;
- case EXTERNAL_INT_ELEMENTS:
- case EXTERNAL_UNSIGNED_INT_ELEMENTS:
- case EXTERNAL_FLOAT_ELEMENTS:
+ case EXTERNAL_INT32_ELEMENTS:
+ case EXTERNAL_UINT32_ELEMENTS:
+ case EXTERNAL_FLOAT32_ELEMENTS:
+ case UINT32_ELEMENTS:
+ case INT32_ELEMENTS:
+ case FLOAT32_ELEMENTS:
return 2;
- case EXTERNAL_DOUBLE_ELEMENTS:
+ case EXTERNAL_FLOAT64_ELEMENTS:
case FAST_DOUBLE_ELEMENTS:
case FAST_HOLEY_DOUBLE_ELEMENTS:
+ case FLOAT64_ELEMENTS:
return 3;
case FAST_SMI_ELEMENTS:
case FAST_ELEMENTS:
case FAST_HOLEY_SMI_ELEMENTS:
case FAST_HOLEY_ELEMENTS:
case DICTIONARY_ELEMENTS:
- case NON_STRICT_ARGUMENTS_ELEMENTS:
+ case SLOPPY_ARGUMENTS_ELEMENTS:
return kPointerSizeLog2;
}
UNREACHABLE();
@@ -65,6 +52,13 @@ int ElementsKindToShiftSize(ElementsKind elements_kind) {
}
+int GetDefaultHeaderSizeForElementsKind(ElementsKind elements_kind) {
+ STATIC_ASSERT(FixedArray::kHeaderSize == FixedDoubleArray::kHeaderSize);
+ return IsExternalArrayElementsKind(elements_kind)
+ ? 0 : (FixedArray::kHeaderSize - kHeapObjectTag);
+}
+
+
const char* ElementsKindToString(ElementsKind kind) {
ElementsAccessor* accessor = ElementsAccessor::ForKind(kind);
return accessor->name();
@@ -110,8 +104,8 @@ struct InitializeFastElementsKindSequence {
};
-static LazyInstance<ElementsKind*,
- InitializeFastElementsKindSequence>::type
+static base::LazyInstance<ElementsKind*,
+ InitializeFastElementsKindSequence>::type
fast_elements_kind_sequence = LAZY_INSTANCE_INITIALIZER;
@@ -133,14 +127,27 @@ int GetSequenceIndexFromFastElementsKind(ElementsKind elements_kind) {
}
+ElementsKind GetNextTransitionElementsKind(ElementsKind kind) {
+ switch (kind) {
+#define FIXED_TYPED_ARRAY_CASE(Type, type, TYPE, ctype, size) \
+ case TYPE##_ELEMENTS: return EXTERNAL_##TYPE##_ELEMENTS;
+
+ TYPED_ARRAYS(FIXED_TYPED_ARRAY_CASE)
+#undef FIXED_TYPED_ARRAY_CASE
+ default: {
+ int index = GetSequenceIndexFromFastElementsKind(kind);
+ return GetFastElementsKindFromSequenceIndex(index + 1);
+ }
+ }
+}
+
+
ElementsKind GetNextMoreGeneralFastElementsKind(ElementsKind elements_kind,
bool allow_only_packed) {
ASSERT(IsFastElementsKind(elements_kind));
ASSERT(elements_kind != TERMINAL_FAST_ELEMENTS_KIND);
while (true) {
- int index =
- GetSequenceIndexFromFastElementsKind(elements_kind) + 1;
- elements_kind = GetFastElementsKindFromSequenceIndex(index);
+ elements_kind = GetNextTransitionElementsKind(elements_kind);
if (!IsFastHoleyElementsKind(elements_kind) || !allow_only_packed) {
return elements_kind;
}
@@ -150,28 +157,55 @@ ElementsKind GetNextMoreGeneralFastElementsKind(ElementsKind elements_kind,
}
+static bool IsTypedArrayElementsKind(ElementsKind elements_kind) {
+ return IsFixedTypedArrayElementsKind(elements_kind) ||
+ IsExternalArrayElementsKind(elements_kind);
+}
+
+
+static inline bool IsFastTransitionTarget(ElementsKind elements_kind) {
+ return IsFastElementsKind(elements_kind) ||
+ elements_kind == DICTIONARY_ELEMENTS;
+}
+
bool IsMoreGeneralElementsKindTransition(ElementsKind from_kind,
ElementsKind to_kind) {
- switch (from_kind) {
- case FAST_SMI_ELEMENTS:
- return to_kind != FAST_SMI_ELEMENTS;
- case FAST_HOLEY_SMI_ELEMENTS:
- return to_kind != FAST_SMI_ELEMENTS &&
- to_kind != FAST_HOLEY_SMI_ELEMENTS;
- case FAST_DOUBLE_ELEMENTS:
- return to_kind != FAST_SMI_ELEMENTS &&
- to_kind != FAST_HOLEY_SMI_ELEMENTS &&
- to_kind != FAST_DOUBLE_ELEMENTS;
- case FAST_HOLEY_DOUBLE_ELEMENTS:
- return to_kind == FAST_ELEMENTS ||
- to_kind == FAST_HOLEY_ELEMENTS;
- case FAST_ELEMENTS:
- return to_kind == FAST_HOLEY_ELEMENTS;
- case FAST_HOLEY_ELEMENTS:
- return false;
- default:
- return false;
+ if (IsTypedArrayElementsKind(from_kind) ||
+ IsTypedArrayElementsKind(to_kind)) {
+ switch (from_kind) {
+#define FIXED_TYPED_ARRAY_CASE(Type, type, TYPE, ctype, size) \
+ case TYPE##_ELEMENTS: \
+ return to_kind == EXTERNAL_##TYPE##_ELEMENTS;
+
+ TYPED_ARRAYS(FIXED_TYPED_ARRAY_CASE);
+#undef FIXED_TYPED_ARRAY_CASE
+ default:
+ return false;
+ }
+ }
+ if (IsFastElementsKind(from_kind) && IsFastTransitionTarget(to_kind)) {
+ switch (from_kind) {
+ case FAST_SMI_ELEMENTS:
+ return to_kind != FAST_SMI_ELEMENTS;
+ case FAST_HOLEY_SMI_ELEMENTS:
+ return to_kind != FAST_SMI_ELEMENTS &&
+ to_kind != FAST_HOLEY_SMI_ELEMENTS;
+ case FAST_DOUBLE_ELEMENTS:
+ return to_kind != FAST_SMI_ELEMENTS &&
+ to_kind != FAST_HOLEY_SMI_ELEMENTS &&
+ to_kind != FAST_DOUBLE_ELEMENTS;
+ case FAST_HOLEY_DOUBLE_ELEMENTS:
+ return to_kind == FAST_ELEMENTS ||
+ to_kind == FAST_HOLEY_ELEMENTS;
+ case FAST_ELEMENTS:
+ return to_kind == FAST_HOLEY_ELEMENTS;
+ case FAST_HOLEY_ELEMENTS:
+ return false;
+ default:
+ return false;
+ }
}
+ return false;
}
diff --git a/chromium/v8/src/elements-kind.h b/chromium/v8/src/elements-kind.h
index 51a690272f7..cdd928b42d8 100644
--- a/chromium/v8/src/elements-kind.h
+++ b/chromium/v8/src/elements-kind.h
@@ -1,34 +1,11 @@
// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
#ifndef V8_ELEMENTS_KIND_H_
#define V8_ELEMENTS_KIND_H_
-#include "v8checks.h"
+#include "src/v8checks.h"
namespace v8 {
namespace internal {
@@ -51,25 +28,38 @@ enum ElementsKind {
// The "slow" kind.
DICTIONARY_ELEMENTS,
- NON_STRICT_ARGUMENTS_ELEMENTS,
+ SLOPPY_ARGUMENTS_ELEMENTS,
// The "fast" kind for external arrays
- EXTERNAL_BYTE_ELEMENTS,
- EXTERNAL_UNSIGNED_BYTE_ELEMENTS,
- EXTERNAL_SHORT_ELEMENTS,
- EXTERNAL_UNSIGNED_SHORT_ELEMENTS,
- EXTERNAL_INT_ELEMENTS,
- EXTERNAL_UNSIGNED_INT_ELEMENTS,
- EXTERNAL_FLOAT_ELEMENTS,
- EXTERNAL_DOUBLE_ELEMENTS,
- EXTERNAL_PIXEL_ELEMENTS,
+ EXTERNAL_INT8_ELEMENTS,
+ EXTERNAL_UINT8_ELEMENTS,
+ EXTERNAL_INT16_ELEMENTS,
+ EXTERNAL_UINT16_ELEMENTS,
+ EXTERNAL_INT32_ELEMENTS,
+ EXTERNAL_UINT32_ELEMENTS,
+ EXTERNAL_FLOAT32_ELEMENTS,
+ EXTERNAL_FLOAT64_ELEMENTS,
+ EXTERNAL_UINT8_CLAMPED_ELEMENTS,
+
+ // Fixed typed arrays
+ UINT8_ELEMENTS,
+ INT8_ELEMENTS,
+ UINT16_ELEMENTS,
+ INT16_ELEMENTS,
+ UINT32_ELEMENTS,
+ INT32_ELEMENTS,
+ FLOAT32_ELEMENTS,
+ FLOAT64_ELEMENTS,
+ UINT8_CLAMPED_ELEMENTS,
// Derived constants from ElementsKind
FIRST_ELEMENTS_KIND = FAST_SMI_ELEMENTS,
- LAST_ELEMENTS_KIND = EXTERNAL_PIXEL_ELEMENTS,
+ LAST_ELEMENTS_KIND = UINT8_CLAMPED_ELEMENTS,
FIRST_FAST_ELEMENTS_KIND = FAST_SMI_ELEMENTS,
LAST_FAST_ELEMENTS_KIND = FAST_HOLEY_DOUBLE_ELEMENTS,
- FIRST_EXTERNAL_ARRAY_ELEMENTS_KIND = EXTERNAL_BYTE_ELEMENTS,
- LAST_EXTERNAL_ARRAY_ELEMENTS_KIND = EXTERNAL_PIXEL_ELEMENTS,
+ FIRST_EXTERNAL_ARRAY_ELEMENTS_KIND = EXTERNAL_INT8_ELEMENTS,
+ LAST_EXTERNAL_ARRAY_ELEMENTS_KIND = EXTERNAL_UINT8_CLAMPED_ELEMENTS,
+ FIRST_FIXED_TYPED_ARRAY_ELEMENTS_KIND = UINT8_ELEMENTS,
+ LAST_FIXED_TYPED_ARRAY_ELEMENTS_KIND = UINT8_CLAMPED_ELEMENTS,
TERMINAL_FAST_ELEMENTS_KIND = FAST_HOLEY_ELEMENTS
};
@@ -82,15 +72,16 @@ const int kFastElementsKindPackedToHoley =
FAST_HOLEY_SMI_ELEMENTS - FAST_SMI_ELEMENTS;
int ElementsKindToShiftSize(ElementsKind elements_kind);
+int GetDefaultHeaderSizeForElementsKind(ElementsKind elements_kind);
const char* ElementsKindToString(ElementsKind kind);
void PrintElementsKind(FILE* out, ElementsKind kind);
ElementsKind GetInitialFastElementsKind();
-ElementsKind GetFastElementsKindFromSequenceIndex(int sequence_index);
-
+ElementsKind GetFastElementsKindFromSequenceIndex(int sequence_number);
int GetSequenceIndexFromFastElementsKind(ElementsKind elements_kind);
+ElementsKind GetNextTransitionElementsKind(ElementsKind elements_kind);
inline bool IsDictionaryElementsKind(ElementsKind kind) {
return kind == DICTIONARY_ELEMENTS;
@@ -103,12 +94,29 @@ inline bool IsExternalArrayElementsKind(ElementsKind kind) {
}
+inline bool IsTerminalElementsKind(ElementsKind kind) {
+ return kind == TERMINAL_FAST_ELEMENTS_KIND ||
+ IsExternalArrayElementsKind(kind);
+}
+
+
+inline bool IsFixedTypedArrayElementsKind(ElementsKind kind) {
+ return kind >= FIRST_FIXED_TYPED_ARRAY_ELEMENTS_KIND &&
+ kind <= LAST_FIXED_TYPED_ARRAY_ELEMENTS_KIND;
+}
+
+
inline bool IsFastElementsKind(ElementsKind kind) {
ASSERT(FIRST_FAST_ELEMENTS_KIND == 0);
return kind <= FAST_HOLEY_DOUBLE_ELEMENTS;
}
+inline bool IsTransitionElementsKind(ElementsKind kind) {
+ return IsFastElementsKind(kind) || IsFixedTypedArrayElementsKind(kind);
+}
+
+
inline bool IsFastDoubleElementsKind(ElementsKind kind) {
return kind == FAST_DOUBLE_ELEMENTS ||
kind == FAST_HOLEY_DOUBLE_ELEMENTS;
@@ -116,14 +124,20 @@ inline bool IsFastDoubleElementsKind(ElementsKind kind) {
inline bool IsExternalFloatOrDoubleElementsKind(ElementsKind kind) {
- return kind == EXTERNAL_DOUBLE_ELEMENTS ||
- kind == EXTERNAL_FLOAT_ELEMENTS;
+ return kind == EXTERNAL_FLOAT64_ELEMENTS ||
+ kind == EXTERNAL_FLOAT32_ELEMENTS;
+}
+
+
+inline bool IsFixedFloatElementsKind(ElementsKind kind) {
+ return kind == FLOAT32_ELEMENTS || kind == FLOAT64_ELEMENTS;
}
inline bool IsDoubleOrFloatElementsKind(ElementsKind kind) {
return IsFastDoubleElementsKind(kind) ||
- IsExternalFloatOrDoubleElementsKind(kind);
+ IsExternalFloatOrDoubleElementsKind(kind) ||
+ IsFixedFloatElementsKind(kind);
}
diff --git a/chromium/v8/src/elements.cc b/chromium/v8/src/elements.cc
index 0b745c4505f..a64268835d7 100644
--- a/chromium/v8/src/elements.cc
+++ b/chromium/v8/src/elements.cc
@@ -1,37 +1,14 @@
// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#include "arguments.h"
-#include "objects.h"
-#include "elements.h"
-#include "utils.h"
-#include "v8conversions.h"
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#include "src/arguments.h"
+#include "src/conversions.h"
+#include "src/elements.h"
+#include "src/objects.h"
+#include "src/utils.h"
// Each concrete ElementsAccessor can handle exactly one ElementsKind,
// several abstract ElementsAccessor classes are used to allow sharing
@@ -48,18 +25,27 @@
// - FastDoubleElementsAccessor
// - FastPackedDoubleElementsAccessor
// - FastHoleyDoubleElementsAccessor
-// - ExternalElementsAccessor (abstract)
-// - ExternalByteElementsAccessor
-// - ExternalUnsignedByteElementsAccessor
-// - ExternalShortElementsAccessor
-// - ExternalUnsignedShortElementsAccessor
-// - ExternalIntElementsAccessor
-// - ExternalUnsignedIntElementsAccessor
-// - ExternalFloatElementsAccessor
-// - ExternalDoubleElementsAccessor
-// - PixelElementsAccessor
+// - TypedElementsAccessor: template, with instantiations:
+// - ExternalInt8ElementsAccessor
+// - ExternalUint8ElementsAccessor
+// - ExternalInt16ElementsAccessor
+// - ExternalUint16ElementsAccessor
+// - ExternalInt32ElementsAccessor
+// - ExternalUint32ElementsAccessor
+// - ExternalFloat32ElementsAccessor
+// - ExternalFloat64ElementsAccessor
+// - ExternalUint8ClampedElementsAccessor
+// - FixedUint8ElementsAccessor
+// - FixedInt8ElementsAccessor
+// - FixedUint16ElementsAccessor
+// - FixedInt16ElementsAccessor
+// - FixedUint32ElementsAccessor
+// - FixedInt32ElementsAccessor
+// - FixedFloat32ElementsAccessor
+// - FixedFloat64ElementsAccessor
+// - FixedUint8ClampedElementsAccessor
// - DictionaryElementsAccessor
-// - NonStrictArgumentsElementsAccessor
+// - SloppyArgumentsElementsAccessor
namespace v8 {
@@ -86,25 +72,37 @@ static const int kPackedSizeNotKnown = -1;
FixedDoubleArray) \
V(DictionaryElementsAccessor, DICTIONARY_ELEMENTS, \
SeededNumberDictionary) \
- V(NonStrictArgumentsElementsAccessor, NON_STRICT_ARGUMENTS_ELEMENTS, \
+ V(SloppyArgumentsElementsAccessor, SLOPPY_ARGUMENTS_ELEMENTS, \
FixedArray) \
- V(ExternalByteElementsAccessor, EXTERNAL_BYTE_ELEMENTS, \
- ExternalByteArray) \
- V(ExternalUnsignedByteElementsAccessor, \
- EXTERNAL_UNSIGNED_BYTE_ELEMENTS, ExternalUnsignedByteArray) \
- V(ExternalShortElementsAccessor, EXTERNAL_SHORT_ELEMENTS, \
- ExternalShortArray) \
- V(ExternalUnsignedShortElementsAccessor, \
- EXTERNAL_UNSIGNED_SHORT_ELEMENTS, ExternalUnsignedShortArray) \
- V(ExternalIntElementsAccessor, EXTERNAL_INT_ELEMENTS, \
- ExternalIntArray) \
- V(ExternalUnsignedIntElementsAccessor, \
- EXTERNAL_UNSIGNED_INT_ELEMENTS, ExternalUnsignedIntArray) \
- V(ExternalFloatElementsAccessor, \
- EXTERNAL_FLOAT_ELEMENTS, ExternalFloatArray) \
- V(ExternalDoubleElementsAccessor, \
- EXTERNAL_DOUBLE_ELEMENTS, ExternalDoubleArray) \
- V(PixelElementsAccessor, EXTERNAL_PIXEL_ELEMENTS, ExternalPixelArray)
+ V(ExternalInt8ElementsAccessor, EXTERNAL_INT8_ELEMENTS, \
+ ExternalInt8Array) \
+ V(ExternalUint8ElementsAccessor, \
+ EXTERNAL_UINT8_ELEMENTS, ExternalUint8Array) \
+ V(ExternalInt16ElementsAccessor, EXTERNAL_INT16_ELEMENTS, \
+ ExternalInt16Array) \
+ V(ExternalUint16ElementsAccessor, \
+ EXTERNAL_UINT16_ELEMENTS, ExternalUint16Array) \
+ V(ExternalInt32ElementsAccessor, EXTERNAL_INT32_ELEMENTS, \
+ ExternalInt32Array) \
+ V(ExternalUint32ElementsAccessor, \
+ EXTERNAL_UINT32_ELEMENTS, ExternalUint32Array) \
+ V(ExternalFloat32ElementsAccessor, \
+ EXTERNAL_FLOAT32_ELEMENTS, ExternalFloat32Array) \
+ V(ExternalFloat64ElementsAccessor, \
+ EXTERNAL_FLOAT64_ELEMENTS, ExternalFloat64Array) \
+ V(ExternalUint8ClampedElementsAccessor, \
+ EXTERNAL_UINT8_CLAMPED_ELEMENTS, \
+ ExternalUint8ClampedArray) \
+ V(FixedUint8ElementsAccessor, UINT8_ELEMENTS, FixedUint8Array) \
+ V(FixedInt8ElementsAccessor, INT8_ELEMENTS, FixedInt8Array) \
+ V(FixedUint16ElementsAccessor, UINT16_ELEMENTS, FixedUint16Array) \
+ V(FixedInt16ElementsAccessor, INT16_ELEMENTS, FixedInt16Array) \
+ V(FixedUint32ElementsAccessor, UINT32_ELEMENTS, FixedUint32Array) \
+ V(FixedInt32ElementsAccessor, INT32_ELEMENTS, FixedInt32Array) \
+ V(FixedFloat32ElementsAccessor, FLOAT32_ELEMENTS, FixedFloat32Array) \
+ V(FixedFloat64ElementsAccessor, FLOAT64_ELEMENTS, FixedFloat64Array) \
+ V(FixedUint8ClampedElementsAccessor, UINT8_CLAMPED_ELEMENTS, \
+ FixedUint8ClampedArray)
template<ElementsKind Kind> class ElementsKindTraits {
@@ -114,7 +112,7 @@ template<ElementsKind Kind> class ElementsKindTraits {
#define ELEMENTS_TRAITS(Class, KindParam, Store) \
template<> class ElementsKindTraits<KindParam> { \
- public: \
+ public: /* NOLINT */ \
static const ElementsKind Kind = KindParam; \
typedef Store BackingStore; \
};
@@ -125,7 +123,9 @@ ELEMENTS_LIST(ELEMENTS_TRAITS)
ElementsAccessor** ElementsAccessor::elements_accessors_;
-static bool HasKey(FixedArray* array, Object* key) {
+static bool HasKey(Handle<FixedArray> array, Handle<Object> key_handle) {
+ DisallowHeapAllocation no_gc;
+ Object* key = *key_handle;
int len0 = array->length();
for (int i = 0; i < len0; i++) {
Object* element = array->get(i);
@@ -139,18 +139,18 @@ static bool HasKey(FixedArray* array, Object* key) {
}
-static Failure* ThrowArrayLengthRangeError(Heap* heap) {
- HandleScope scope(heap->isolate());
- return heap->isolate()->Throw(
- *heap->isolate()->factory()->NewRangeError("invalid_array_length",
- HandleVector<Object>(NULL, 0)));
+MUST_USE_RESULT
+static MaybeHandle<Object> ThrowArrayLengthRangeError(Isolate* isolate) {
+ return isolate->Throw<Object>(
+ isolate->factory()->NewRangeError("invalid_array_length",
+ HandleVector<Object>(NULL, 0)));
}
-static void CopyObjectToObjectElements(FixedArrayBase* from_base,
+static void CopyObjectToObjectElements(Handle<FixedArrayBase> from_base,
ElementsKind from_kind,
uint32_t from_start,
- FixedArrayBase* to_base,
+ Handle<FixedArrayBase> to_base,
ElementsKind to_kind,
uint32_t to_start,
int raw_copy_size) {
@@ -168,7 +168,7 @@ static void CopyObjectToObjectElements(FixedArrayBase* from_base,
int length = to_base->length() - start;
if (length > 0) {
Heap* heap = from_base->GetHeap();
- MemsetPointer(FixedArray::cast(to_base)->data_start() + start,
+ MemsetPointer(Handle<FixedArray>::cast(to_base)->data_start() + start,
heap->the_hole_value(), length);
}
}
@@ -176,8 +176,8 @@ static void CopyObjectToObjectElements(FixedArrayBase* from_base,
ASSERT((copy_size + static_cast<int>(to_start)) <= to_base->length() &&
(copy_size + static_cast<int>(from_start)) <= from_base->length());
if (copy_size == 0) return;
- FixedArray* from = FixedArray::cast(from_base);
- FixedArray* to = FixedArray::cast(to_base);
+ Handle<FixedArray> from = Handle<FixedArray>::cast(from_base);
+ Handle<FixedArray> to = Handle<FixedArray>::cast(to_base);
ASSERT(IsFastSmiOrObjectElementsKind(from_kind));
ASSERT(IsFastSmiOrObjectElementsKind(to_kind));
Address to_address = to->address() + FixedArray::kHeaderSize;
@@ -188,23 +188,24 @@ static void CopyObjectToObjectElements(FixedArrayBase* from_base,
if (IsFastObjectElementsKind(from_kind) &&
IsFastObjectElementsKind(to_kind)) {
Heap* heap = from->GetHeap();
- if (!heap->InNewSpace(to)) {
+ if (!heap->InNewSpace(*to)) {
heap->RecordWrites(to->address(),
to->OffsetOfElementAt(to_start),
copy_size);
}
- heap->incremental_marking()->RecordWrites(to);
+ heap->incremental_marking()->RecordWrites(*to);
}
}
-static void CopyDictionaryToObjectElements(FixedArrayBase* from_base,
+static void CopyDictionaryToObjectElements(Handle<FixedArrayBase> from_base,
uint32_t from_start,
- FixedArrayBase* to_base,
+ Handle<FixedArrayBase> to_base,
ElementsKind to_kind,
uint32_t to_start,
int raw_copy_size) {
- SeededNumberDictionary* from = SeededNumberDictionary::cast(from_base);
+ Handle<SeededNumberDictionary> from =
+ Handle<SeededNumberDictionary>::cast(from_base);
DisallowHeapAllocation no_allocation;
int copy_size = raw_copy_size;
Heap* heap = from->GetHeap();
@@ -217,15 +218,15 @@ static void CopyDictionaryToObjectElements(FixedArrayBase* from_base,
int length = to_base->length() - start;
if (length > 0) {
Heap* heap = from->GetHeap();
- MemsetPointer(FixedArray::cast(to_base)->data_start() + start,
+ MemsetPointer(Handle<FixedArray>::cast(to_base)->data_start() + start,
heap->the_hole_value(), length);
}
}
}
- ASSERT(to_base != from_base);
+ ASSERT(*to_base != *from_base);
ASSERT(IsFastSmiOrObjectElementsKind(to_kind));
if (copy_size == 0) return;
- FixedArray* to = FixedArray::cast(to_base);
+ Handle<FixedArray> to = Handle<FixedArray>::cast(to_base);
uint32_t to_length = to->length();
if (to_start + copy_size > to_length) {
copy_size = to_length - to_start;
@@ -241,23 +242,22 @@ static void CopyDictionaryToObjectElements(FixedArrayBase* from_base,
}
}
if (IsFastObjectElementsKind(to_kind)) {
- if (!heap->InNewSpace(to)) {
+ if (!heap->InNewSpace(*to)) {
heap->RecordWrites(to->address(),
to->OffsetOfElementAt(to_start),
copy_size);
}
- heap->incremental_marking()->RecordWrites(to);
+ heap->incremental_marking()->RecordWrites(*to);
}
}
-MUST_USE_RESULT static MaybeObject* CopyDoubleToObjectElements(
- FixedArrayBase* from_base,
- uint32_t from_start,
- FixedArrayBase* to_base,
- ElementsKind to_kind,
- uint32_t to_start,
- int raw_copy_size) {
+static void CopyDoubleToObjectElements(Handle<FixedArrayBase> from_base,
+ uint32_t from_start,
+ Handle<FixedArrayBase> to_base,
+ ElementsKind to_kind,
+ uint32_t to_start,
+ int raw_copy_size) {
ASSERT(IsFastSmiOrObjectElementsKind(to_kind));
int copy_size = raw_copy_size;
if (raw_copy_size < 0) {
@@ -273,49 +273,36 @@ MUST_USE_RESULT static MaybeObject* CopyDoubleToObjectElements(
int length = to_base->length() - start;
if (length > 0) {
Heap* heap = from_base->GetHeap();
- MemsetPointer(FixedArray::cast(to_base)->data_start() + start,
+ MemsetPointer(Handle<FixedArray>::cast(to_base)->data_start() + start,
heap->the_hole_value(), length);
}
}
}
ASSERT((copy_size + static_cast<int>(to_start)) <= to_base->length() &&
(copy_size + static_cast<int>(from_start)) <= from_base->length());
- if (copy_size == 0) return from_base;
- FixedDoubleArray* from = FixedDoubleArray::cast(from_base);
- FixedArray* to = FixedArray::cast(to_base);
+ if (copy_size == 0) return;
+ Isolate* isolate = from_base->GetIsolate();
+ Handle<FixedDoubleArray> from = Handle<FixedDoubleArray>::cast(from_base);
+ Handle<FixedArray> to = Handle<FixedArray>::cast(to_base);
for (int i = 0; i < copy_size; ++i) {
+ HandleScope scope(isolate);
if (IsFastSmiElementsKind(to_kind)) {
UNIMPLEMENTED();
- return Failure::Exception();
} else {
- MaybeObject* maybe_value = from->get(i + from_start);
- Object* value;
ASSERT(IsFastObjectElementsKind(to_kind));
- // Because Double -> Object elements transitions allocate HeapObjects
- // iteratively, the allocate must succeed within a single GC cycle,
- // otherwise the retry after the GC will also fail. In order to ensure
- // that no GC is triggered, allocate HeapNumbers from old space if they
- // can't be taken from new space.
- if (!maybe_value->ToObject(&value)) {
- ASSERT(maybe_value->IsRetryAfterGC() || maybe_value->IsOutOfMemory());
- Heap* heap = from->GetHeap();
- MaybeObject* maybe_value_object =
- heap->AllocateHeapNumber(from->get_scalar(i + from_start),
- TENURED);
- if (!maybe_value_object->ToObject(&value)) return maybe_value_object;
- }
- to->set(i + to_start, value, UPDATE_WRITE_BARRIER);
+ Handle<Object> value = FixedDoubleArray::get(from, i + from_start);
+ to->set(i + to_start, *value, UPDATE_WRITE_BARRIER);
}
}
- return to;
}
-static void CopyDoubleToDoubleElements(FixedArrayBase* from_base,
+static void CopyDoubleToDoubleElements(Handle<FixedArrayBase> from_base,
uint32_t from_start,
- FixedArrayBase* to_base,
+ Handle<FixedArrayBase> to_base,
uint32_t to_start,
int raw_copy_size) {
+ DisallowHeapAllocation no_allocation;
int copy_size = raw_copy_size;
if (raw_copy_size < 0) {
ASSERT(raw_copy_size == ElementsAccessor::kCopyToEnd ||
@@ -324,15 +311,15 @@ static void CopyDoubleToDoubleElements(FixedArrayBase* from_base,
to_base->length() - to_start);
if (raw_copy_size == ElementsAccessor::kCopyToEndAndInitializeToHole) {
for (int i = to_start + copy_size; i < to_base->length(); ++i) {
- FixedDoubleArray::cast(to_base)->set_the_hole(i);
+ Handle<FixedDoubleArray>::cast(to_base)->set_the_hole(i);
}
}
}
ASSERT((copy_size + static_cast<int>(to_start)) <= to_base->length() &&
(copy_size + static_cast<int>(from_start)) <= from_base->length());
if (copy_size == 0) return;
- FixedDoubleArray* from = FixedDoubleArray::cast(from_base);
- FixedDoubleArray* to = FixedDoubleArray::cast(to_base);
+ Handle<FixedDoubleArray> from = Handle<FixedDoubleArray>::cast(from_base);
+ Handle<FixedDoubleArray> to = Handle<FixedDoubleArray>::cast(to_base);
Address to_address = to->address() + FixedDoubleArray::kHeaderSize;
Address from_address = from->address() + FixedDoubleArray::kHeaderSize;
to_address += kDoubleSize * to_start;
@@ -344,11 +331,12 @@ static void CopyDoubleToDoubleElements(FixedArrayBase* from_base,
}
-static void CopySmiToDoubleElements(FixedArrayBase* from_base,
+static void CopySmiToDoubleElements(Handle<FixedArrayBase> from_base,
uint32_t from_start,
- FixedArrayBase* to_base,
+ Handle<FixedArrayBase> to_base,
uint32_t to_start,
int raw_copy_size) {
+ DisallowHeapAllocation no_allocation;
int copy_size = raw_copy_size;
if (raw_copy_size < 0) {
ASSERT(raw_copy_size == ElementsAccessor::kCopyToEnd ||
@@ -356,20 +344,20 @@ static void CopySmiToDoubleElements(FixedArrayBase* from_base,
copy_size = from_base->length() - from_start;
if (raw_copy_size == ElementsAccessor::kCopyToEndAndInitializeToHole) {
for (int i = to_start + copy_size; i < to_base->length(); ++i) {
- FixedDoubleArray::cast(to_base)->set_the_hole(i);
+ Handle<FixedDoubleArray>::cast(to_base)->set_the_hole(i);
}
}
}
ASSERT((copy_size + static_cast<int>(to_start)) <= to_base->length() &&
(copy_size + static_cast<int>(from_start)) <= from_base->length());
if (copy_size == 0) return;
- FixedArray* from = FixedArray::cast(from_base);
- FixedDoubleArray* to = FixedDoubleArray::cast(to_base);
- Object* the_hole = from->GetHeap()->the_hole_value();
+ Handle<FixedArray> from = Handle<FixedArray>::cast(from_base);
+ Handle<FixedDoubleArray> to = Handle<FixedDoubleArray>::cast(to_base);
+ Handle<Object> the_hole = from->GetIsolate()->factory()->the_hole_value();
for (uint32_t from_end = from_start + static_cast<uint32_t>(copy_size);
from_start < from_end; from_start++, to_start++) {
Object* hole_or_smi = from->get(from_start);
- if (hole_or_smi == the_hole) {
+ if (hole_or_smi == *the_hole) {
to->set_the_hole(to_start);
} else {
to->set(to_start, Smi::cast(hole_or_smi)->value());
@@ -378,12 +366,13 @@ static void CopySmiToDoubleElements(FixedArrayBase* from_base,
}
-static void CopyPackedSmiToDoubleElements(FixedArrayBase* from_base,
+static void CopyPackedSmiToDoubleElements(Handle<FixedArrayBase> from_base,
uint32_t from_start,
- FixedArrayBase* to_base,
+ Handle<FixedArrayBase> to_base,
uint32_t to_start,
int packed_size,
int raw_copy_size) {
+ DisallowHeapAllocation no_allocation;
int copy_size = raw_copy_size;
uint32_t to_end;
if (raw_copy_size < 0) {
@@ -393,7 +382,7 @@ static void CopyPackedSmiToDoubleElements(FixedArrayBase* from_base,
if (raw_copy_size == ElementsAccessor::kCopyToEndAndInitializeToHole) {
to_end = to_base->length();
for (uint32_t i = to_start + copy_size; i < to_end; ++i) {
- FixedDoubleArray::cast(to_base)->set_the_hole(i);
+ Handle<FixedDoubleArray>::cast(to_base)->set_the_hole(i);
}
} else {
to_end = to_start + static_cast<uint32_t>(copy_size);
@@ -406,8 +395,8 @@ static void CopyPackedSmiToDoubleElements(FixedArrayBase* from_base,
ASSERT((copy_size + static_cast<int>(to_start)) <= to_base->length() &&
(copy_size + static_cast<int>(from_start)) <= from_base->length());
if (copy_size == 0) return;
- FixedArray* from = FixedArray::cast(from_base);
- FixedDoubleArray* to = FixedDoubleArray::cast(to_base);
+ Handle<FixedArray> from = Handle<FixedArray>::cast(from_base);
+ Handle<FixedDoubleArray> to = Handle<FixedDoubleArray>::cast(to_base);
for (uint32_t from_end = from_start + static_cast<uint32_t>(packed_size);
from_start < from_end; from_start++, to_start++) {
Object* smi = from->get(from_start);
@@ -417,11 +406,12 @@ static void CopyPackedSmiToDoubleElements(FixedArrayBase* from_base,
}
-static void CopyObjectToDoubleElements(FixedArrayBase* from_base,
+static void CopyObjectToDoubleElements(Handle<FixedArrayBase> from_base,
uint32_t from_start,
- FixedArrayBase* to_base,
+ Handle<FixedArrayBase> to_base,
uint32_t to_start,
int raw_copy_size) {
+ DisallowHeapAllocation no_allocation;
int copy_size = raw_copy_size;
if (raw_copy_size < 0) {
ASSERT(raw_copy_size == ElementsAccessor::kCopyToEnd ||
@@ -429,20 +419,20 @@ static void CopyObjectToDoubleElements(FixedArrayBase* from_base,
copy_size = from_base->length() - from_start;
if (raw_copy_size == ElementsAccessor::kCopyToEndAndInitializeToHole) {
for (int i = to_start + copy_size; i < to_base->length(); ++i) {
- FixedDoubleArray::cast(to_base)->set_the_hole(i);
+ Handle<FixedDoubleArray>::cast(to_base)->set_the_hole(i);
}
}
}
ASSERT((copy_size + static_cast<int>(to_start)) <= to_base->length() &&
(copy_size + static_cast<int>(from_start)) <= from_base->length());
if (copy_size == 0) return;
- FixedArray* from = FixedArray::cast(from_base);
- FixedDoubleArray* to = FixedDoubleArray::cast(to_base);
- Object* the_hole = from->GetHeap()->the_hole_value();
+ Handle<FixedArray> from = Handle<FixedArray>::cast(from_base);
+ Handle<FixedDoubleArray> to = Handle<FixedDoubleArray>::cast(to_base);
+ Handle<Object> the_hole = from->GetIsolate()->factory()->the_hole_value();
for (uint32_t from_end = from_start + copy_size;
from_start < from_end; from_start++, to_start++) {
Object* hole_or_object = from->get(from_start);
- if (hole_or_object == the_hole) {
+ if (hole_or_object == *the_hole) {
to->set_the_hole(to_start);
} else {
to->set(to_start, hole_or_object->Number());
@@ -451,12 +441,14 @@ static void CopyObjectToDoubleElements(FixedArrayBase* from_base,
}
-static void CopyDictionaryToDoubleElements(FixedArrayBase* from_base,
+static void CopyDictionaryToDoubleElements(Handle<FixedArrayBase> from_base,
uint32_t from_start,
- FixedArrayBase* to_base,
+ Handle<FixedArrayBase> to_base,
uint32_t to_start,
int raw_copy_size) {
- SeededNumberDictionary* from = SeededNumberDictionary::cast(from_base);
+ Handle<SeededNumberDictionary> from =
+ Handle<SeededNumberDictionary>::cast(from_base);
+ DisallowHeapAllocation no_allocation;
int copy_size = raw_copy_size;
if (copy_size < 0) {
ASSERT(copy_size == ElementsAccessor::kCopyToEnd ||
@@ -464,12 +456,12 @@ static void CopyDictionaryToDoubleElements(FixedArrayBase* from_base,
copy_size = from->max_number_key() + 1 - from_start;
if (raw_copy_size == ElementsAccessor::kCopyToEndAndInitializeToHole) {
for (int i = to_start + copy_size; i < to_base->length(); ++i) {
- FixedDoubleArray::cast(to_base)->set_the_hole(i);
+ Handle<FixedDoubleArray>::cast(to_base)->set_the_hole(i);
}
}
}
if (copy_size == 0) return;
- FixedDoubleArray* to = FixedDoubleArray::cast(to_base);
+ Handle<FixedDoubleArray> to = Handle<FixedDoubleArray>::cast(to_base);
uint32_t to_length = to->length();
if (to_start + copy_size > to_length) {
copy_size = to_length - to_start;
@@ -505,12 +497,13 @@ static void TraceTopFrame(Isolate* isolate) {
}
-void CheckArrayAbuse(JSObject* obj, const char* op, uint32_t key,
+void CheckArrayAbuse(Handle<JSObject> obj, const char* op, uint32_t key,
bool allow_appending) {
+ DisallowHeapAllocation no_allocation;
Object* raw_length = NULL;
const char* elements_type = "array";
if (obj->IsJSArray()) {
- JSArray* array = JSArray::cast(obj);
+ JSArray* array = JSArray::cast(*obj);
raw_length = array->length();
} else {
raw_length = Smi::FromInt(obj->elements()->length());
@@ -571,21 +564,21 @@ class ElementsAccessorBase : public ElementsAccessor {
typedef ElementsTraitsParam ElementsTraits;
typedef typename ElementsTraitsParam::BackingStore BackingStore;
- virtual ElementsKind kind() const { return ElementsTraits::Kind; }
+ virtual ElementsKind kind() const V8_FINAL V8_OVERRIDE {
+ return ElementsTraits::Kind;
+ }
- static void ValidateContents(JSObject* holder, int length) {
+ static void ValidateContents(Handle<JSObject> holder, int length) {
}
- static void ValidateImpl(JSObject* holder) {
- FixedArrayBase* fixed_array_base = holder->elements();
- // When objects are first allocated, its elements are Failures.
- if (fixed_array_base->IsFailure()) return;
+ static void ValidateImpl(Handle<JSObject> holder) {
+ Handle<FixedArrayBase> fixed_array_base(holder->elements());
if (!fixed_array_base->IsHeapObject()) return;
// Arrays that have been shifted in place can't be verified.
if (fixed_array_base->IsFiller()) return;
int length = 0;
if (holder->IsJSArray()) {
- Object* length_obj = JSArray::cast(holder)->length();
+ Object* length_obj = Handle<JSArray>::cast(holder)->length();
if (length_obj->IsSmi()) {
length = Smi::cast(length_obj)->value();
}
@@ -595,37 +588,33 @@ class ElementsAccessorBase : public ElementsAccessor {
ElementsAccessorSubclass::ValidateContents(holder, length);
}
- virtual void Validate(JSObject* holder) {
+ virtual void Validate(Handle<JSObject> holder) V8_FINAL V8_OVERRIDE {
+ DisallowHeapAllocation no_gc;
ElementsAccessorSubclass::ValidateImpl(holder);
}
- static bool HasElementImpl(Object* receiver,
- JSObject* holder,
+ static bool HasElementImpl(Handle<Object> receiver,
+ Handle<JSObject> holder,
uint32_t key,
- FixedArrayBase* backing_store) {
+ Handle<FixedArrayBase> backing_store) {
return ElementsAccessorSubclass::GetAttributesImpl(
receiver, holder, key, backing_store) != ABSENT;
}
- virtual bool HasElement(Object* receiver,
- JSObject* holder,
- uint32_t key,
- FixedArrayBase* backing_store) {
- if (backing_store == NULL) {
- backing_store = holder->elements();
- }
+ virtual bool HasElement(
+ Handle<Object> receiver,
+ Handle<JSObject> holder,
+ uint32_t key,
+ Handle<FixedArrayBase> backing_store) V8_FINAL V8_OVERRIDE {
return ElementsAccessorSubclass::HasElementImpl(
receiver, holder, key, backing_store);
}
- MUST_USE_RESULT virtual MaybeObject* Get(Object* receiver,
- JSObject* holder,
- uint32_t key,
- FixedArrayBase* backing_store) {
- if (backing_store == NULL) {
- backing_store = holder->elements();
- }
-
+ MUST_USE_RESULT virtual MaybeHandle<Object> Get(
+ Handle<Object> receiver,
+ Handle<JSObject> holder,
+ uint32_t key,
+ Handle<FixedArrayBase> backing_store) V8_FINAL V8_OVERRIDE {
if (!IsExternalArrayElementsKind(ElementsTraits::Kind) &&
FLAG_trace_js_array_abuse) {
CheckArrayAbuse(holder, "elements read", key);
@@ -640,157 +629,161 @@ class ElementsAccessorBase : public ElementsAccessor {
receiver, holder, key, backing_store);
}
- MUST_USE_RESULT static MaybeObject* GetImpl(Object* receiver,
- JSObject* obj,
- uint32_t key,
- FixedArrayBase* backing_store) {
- return (key < ElementsAccessorSubclass::GetCapacityImpl(backing_store))
- ? BackingStore::cast(backing_store)->get(key)
- : backing_store->GetHeap()->the_hole_value();
+ MUST_USE_RESULT static MaybeHandle<Object> GetImpl(
+ Handle<Object> receiver,
+ Handle<JSObject> obj,
+ uint32_t key,
+ Handle<FixedArrayBase> backing_store) {
+ if (key < ElementsAccessorSubclass::GetCapacityImpl(backing_store)) {
+ return BackingStore::get(Handle<BackingStore>::cast(backing_store), key);
+ } else {
+ return backing_store->GetIsolate()->factory()->the_hole_value();
+ }
}
MUST_USE_RESULT virtual PropertyAttributes GetAttributes(
- Object* receiver,
- JSObject* holder,
+ Handle<Object> receiver,
+ Handle<JSObject> holder,
uint32_t key,
- FixedArrayBase* backing_store) {
- if (backing_store == NULL) {
- backing_store = holder->elements();
- }
+ Handle<FixedArrayBase> backing_store) V8_FINAL V8_OVERRIDE {
return ElementsAccessorSubclass::GetAttributesImpl(
receiver, holder, key, backing_store);
}
MUST_USE_RESULT static PropertyAttributes GetAttributesImpl(
- Object* receiver,
- JSObject* obj,
+ Handle<Object> receiver,
+ Handle<JSObject> obj,
uint32_t key,
- FixedArrayBase* backing_store) {
+ Handle<FixedArrayBase> backing_store) {
if (key >= ElementsAccessorSubclass::GetCapacityImpl(backing_store)) {
return ABSENT;
}
- return BackingStore::cast(backing_store)->is_the_hole(key) ? ABSENT : NONE;
+ return
+ Handle<BackingStore>::cast(backing_store)->is_the_hole(key)
+ ? ABSENT : NONE;
}
MUST_USE_RESULT virtual PropertyType GetType(
- Object* receiver,
- JSObject* holder,
+ Handle<Object> receiver,
+ Handle<JSObject> holder,
uint32_t key,
- FixedArrayBase* backing_store) {
- if (backing_store == NULL) {
- backing_store = holder->elements();
- }
+ Handle<FixedArrayBase> backing_store) V8_FINAL V8_OVERRIDE {
return ElementsAccessorSubclass::GetTypeImpl(
receiver, holder, key, backing_store);
}
MUST_USE_RESULT static PropertyType GetTypeImpl(
- Object* receiver,
- JSObject* obj,
- uint32_t key,
- FixedArrayBase* backing_store) {
+ Handle<Object> receiver,
+ Handle<JSObject> obj,
+ uint32_t key,
+ Handle<FixedArrayBase> backing_store) {
if (key >= ElementsAccessorSubclass::GetCapacityImpl(backing_store)) {
return NONEXISTENT;
}
- return BackingStore::cast(backing_store)->is_the_hole(key)
- ? NONEXISTENT : FIELD;
+ return
+ Handle<BackingStore>::cast(backing_store)->is_the_hole(key)
+ ? NONEXISTENT : FIELD;
}
- MUST_USE_RESULT virtual AccessorPair* GetAccessorPair(
- Object* receiver,
- JSObject* holder,
+ MUST_USE_RESULT virtual MaybeHandle<AccessorPair> GetAccessorPair(
+ Handle<Object> receiver,
+ Handle<JSObject> holder,
uint32_t key,
- FixedArrayBase* backing_store) {
- if (backing_store == NULL) {
- backing_store = holder->elements();
- }
+ Handle<FixedArrayBase> backing_store) V8_FINAL V8_OVERRIDE {
return ElementsAccessorSubclass::GetAccessorPairImpl(
receiver, holder, key, backing_store);
}
- MUST_USE_RESULT static AccessorPair* GetAccessorPairImpl(
- Object* receiver,
- JSObject* obj,
- uint32_t key,
- FixedArrayBase* backing_store) {
- return NULL;
+ MUST_USE_RESULT static MaybeHandle<AccessorPair> GetAccessorPairImpl(
+ Handle<Object> receiver,
+ Handle<JSObject> obj,
+ uint32_t key,
+ Handle<FixedArrayBase> backing_store) {
+ return MaybeHandle<AccessorPair>();
}
- MUST_USE_RESULT virtual MaybeObject* SetLength(JSArray* array,
- Object* length) {
+ MUST_USE_RESULT virtual MaybeHandle<Object> SetLength(
+ Handle<JSArray> array,
+ Handle<Object> length) V8_FINAL V8_OVERRIDE {
return ElementsAccessorSubclass::SetLengthImpl(
- array, length, array->elements());
+ array, length, handle(array->elements()));
}
- MUST_USE_RESULT static MaybeObject* SetLengthImpl(
- JSObject* obj,
- Object* length,
- FixedArrayBase* backing_store);
+ MUST_USE_RESULT static MaybeHandle<Object> SetLengthImpl(
+ Handle<JSObject> obj,
+ Handle<Object> length,
+ Handle<FixedArrayBase> backing_store);
- MUST_USE_RESULT virtual MaybeObject* SetCapacityAndLength(
- JSArray* array,
+ virtual void SetCapacityAndLength(
+ Handle<JSArray> array,
int capacity,
- int length) {
- return ElementsAccessorSubclass::SetFastElementsCapacityAndLength(
- array,
- capacity,
- length);
+ int length) V8_FINAL V8_OVERRIDE {
+ ElementsAccessorSubclass::
+ SetFastElementsCapacityAndLength(array, capacity, length);
}
- MUST_USE_RESULT static MaybeObject* SetFastElementsCapacityAndLength(
- JSObject* obj,
+ static void SetFastElementsCapacityAndLength(
+ Handle<JSObject> obj,
int capacity,
int length) {
UNIMPLEMENTED();
- return obj;
}
- MUST_USE_RESULT virtual MaybeObject* Delete(JSObject* obj,
- uint32_t key,
- JSReceiver::DeleteMode mode) = 0;
-
- MUST_USE_RESULT static MaybeObject* CopyElementsImpl(FixedArrayBase* from,
- uint32_t from_start,
- FixedArrayBase* to,
- ElementsKind from_kind,
- uint32_t to_start,
- int packed_size,
- int copy_size) {
+ MUST_USE_RESULT virtual MaybeHandle<Object> Delete(
+ Handle<JSObject> obj,
+ uint32_t key,
+ JSReceiver::DeleteMode mode) V8_OVERRIDE = 0;
+
+ static void CopyElementsImpl(Handle<FixedArrayBase> from,
+ uint32_t from_start,
+ Handle<FixedArrayBase> to,
+ ElementsKind from_kind,
+ uint32_t to_start,
+ int packed_size,
+ int copy_size) {
UNREACHABLE();
- return NULL;
}
- MUST_USE_RESULT virtual MaybeObject* CopyElements(JSObject* from_holder,
- uint32_t from_start,
- ElementsKind from_kind,
- FixedArrayBase* to,
- uint32_t to_start,
- int copy_size,
- FixedArrayBase* from) {
+ virtual void CopyElements(
+ Handle<FixedArrayBase> from,
+ uint32_t from_start,
+ ElementsKind from_kind,
+ Handle<FixedArrayBase> to,
+ uint32_t to_start,
+ int copy_size) V8_FINAL V8_OVERRIDE {
+ ASSERT(!from.is_null());
+ ElementsAccessorSubclass::CopyElementsImpl(
+ from, from_start, to, from_kind, to_start, kPackedSizeNotKnown,
+ copy_size);
+ }
+
+ virtual void CopyElements(
+ JSObject* from_holder,
+ uint32_t from_start,
+ ElementsKind from_kind,
+ Handle<FixedArrayBase> to,
+ uint32_t to_start,
+ int copy_size) V8_FINAL V8_OVERRIDE {
int packed_size = kPackedSizeNotKnown;
- if (from == NULL) {
- from = from_holder->elements();
- }
-
- if (from_holder) {
- bool is_packed = IsFastPackedElementsKind(from_kind) &&
- from_holder->IsJSArray();
- if (is_packed) {
- packed_size = Smi::cast(JSArray::cast(from_holder)->length())->value();
- if (copy_size >= 0 && packed_size > copy_size) {
- packed_size = copy_size;
- }
+ bool is_packed = IsFastPackedElementsKind(from_kind) &&
+ from_holder->IsJSArray();
+ if (is_packed) {
+ packed_size =
+ Smi::cast(JSArray::cast(from_holder)->length())->value();
+ if (copy_size >= 0 && packed_size > copy_size) {
+ packed_size = copy_size;
}
}
- return ElementsAccessorSubclass::CopyElementsImpl(
+ Handle<FixedArrayBase> from(from_holder->elements());
+ ElementsAccessorSubclass::CopyElementsImpl(
from, from_start, to, from_kind, to_start, packed_size, copy_size);
}
- MUST_USE_RESULT virtual MaybeObject* AddElementsToFixedArray(
- Object* receiver,
- JSObject* holder,
- FixedArray* to,
- FixedArrayBase* from) {
+ virtual MaybeHandle<FixedArray> AddElementsToFixedArray(
+ Handle<Object> receiver,
+ Handle<JSObject> holder,
+ Handle<FixedArray> to,
+ Handle<FixedArrayBase> from) V8_FINAL V8_OVERRIDE {
int len0 = to->length();
#ifdef ENABLE_SLOW_ASSERTS
if (FLAG_enable_slow_asserts) {
@@ -799,25 +792,26 @@ class ElementsAccessorBase : public ElementsAccessor {
}
}
#endif
- if (from == NULL) {
- from = holder->elements();
- }
// Optimize if 'other' is empty.
// We cannot optimize if 'this' is empty, as other may have holes.
uint32_t len1 = ElementsAccessorSubclass::GetCapacityImpl(from);
if (len1 == 0) return to;
+ Isolate* isolate = from->GetIsolate();
+
// Compute how many elements are not in other.
uint32_t extra = 0;
for (uint32_t y = 0; y < len1; y++) {
uint32_t key = ElementsAccessorSubclass::GetKeyForIndexImpl(from, y);
if (ElementsAccessorSubclass::HasElementImpl(
receiver, holder, key, from)) {
- MaybeObject* maybe_value =
- ElementsAccessorSubclass::GetImpl(receiver, holder, key, from);
- Object* value;
- if (!maybe_value->To(&value)) return maybe_value;
+ Handle<Object> value;
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, value,
+ ElementsAccessorSubclass::GetImpl(receiver, holder, key, from),
+ FixedArray);
+
ASSERT(!value->IsTheHole());
if (!HasKey(to, value)) {
extra++;
@@ -828,9 +822,7 @@ class ElementsAccessorBase : public ElementsAccessor {
if (extra == 0) return to;
// Allocate the result
- FixedArray* result;
- MaybeObject* maybe_obj = from->GetHeap()->AllocateFixedArray(len0 + extra);
- if (!maybe_obj->To(&result)) return maybe_obj;
+ Handle<FixedArray> result = isolate->factory()->NewFixedArray(len0 + extra);
// Fill in the content
{
@@ -849,12 +841,13 @@ class ElementsAccessorBase : public ElementsAccessor {
ElementsAccessorSubclass::GetKeyForIndexImpl(from, y);
if (ElementsAccessorSubclass::HasElementImpl(
receiver, holder, key, from)) {
- MaybeObject* maybe_value =
- ElementsAccessorSubclass::GetImpl(receiver, holder, key, from);
- Object* value;
- if (!maybe_value->To(&value)) return maybe_value;
+ Handle<Object> value;
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, value,
+ ElementsAccessorSubclass::GetImpl(receiver, holder, key, from),
+ FixedArray);
if (!value->IsTheHole() && !HasKey(to, value)) {
- result->set(len0 + index, value);
+ result->set(len0 + index, *value);
index++;
}
}
@@ -864,21 +857,22 @@ class ElementsAccessorBase : public ElementsAccessor {
}
protected:
- static uint32_t GetCapacityImpl(FixedArrayBase* backing_store) {
+ static uint32_t GetCapacityImpl(Handle<FixedArrayBase> backing_store) {
return backing_store->length();
}
- virtual uint32_t GetCapacity(FixedArrayBase* backing_store) {
+ virtual uint32_t GetCapacity(Handle<FixedArrayBase> backing_store)
+ V8_FINAL V8_OVERRIDE {
return ElementsAccessorSubclass::GetCapacityImpl(backing_store);
}
- static uint32_t GetKeyForIndexImpl(FixedArrayBase* backing_store,
+ static uint32_t GetKeyForIndexImpl(Handle<FixedArrayBase> backing_store,
uint32_t index) {
return index;
}
- virtual uint32_t GetKeyForIndex(FixedArrayBase* backing_store,
- uint32_t index) {
+ virtual uint32_t GetKeyForIndex(Handle<FixedArrayBase> backing_store,
+ uint32_t index) V8_FINAL V8_OVERRIDE {
return ElementsAccessorSubclass::GetKeyForIndexImpl(backing_store, index);
}
@@ -899,51 +893,55 @@ class FastElementsAccessor
KindTraits>(name) {}
protected:
friend class ElementsAccessorBase<FastElementsAccessorSubclass, KindTraits>;
- friend class NonStrictArgumentsElementsAccessor;
+ friend class SloppyArgumentsElementsAccessor;
typedef typename KindTraits::BackingStore BackingStore;
// Adjusts the length of the fast backing store or returns the new length or
// undefined in case conversion to a slow backing store should be performed.
- static MaybeObject* SetLengthWithoutNormalize(FixedArrayBase* backing_store,
- JSArray* array,
- Object* length_object,
- uint32_t length) {
+ static Handle<Object> SetLengthWithoutNormalize(
+ Handle<FixedArrayBase> backing_store,
+ Handle<JSArray> array,
+ Handle<Object> length_object,
+ uint32_t length) {
+ Isolate* isolate = array->GetIsolate();
uint32_t old_capacity = backing_store->length();
- Object* old_length = array->length();
+ Handle<Object> old_length(array->length(), isolate);
bool same_or_smaller_size = old_length->IsSmi() &&
- static_cast<uint32_t>(Smi::cast(old_length)->value()) >= length;
+ static_cast<uint32_t>(Handle<Smi>::cast(old_length)->value()) >= length;
ElementsKind kind = array->GetElementsKind();
if (!same_or_smaller_size && IsFastElementsKind(kind) &&
!IsFastHoleyElementsKind(kind)) {
kind = GetHoleyElementsKind(kind);
- MaybeObject* maybe_obj = array->TransitionElementsKind(kind);
- if (maybe_obj->IsFailure()) return maybe_obj;
+ JSObject::TransitionElementsKind(array, kind);
}
// Check whether the backing store should be shrunk.
if (length <= old_capacity) {
if (array->HasFastSmiOrObjectElements()) {
- MaybeObject* maybe_obj = array->EnsureWritableFastElements();
- if (!maybe_obj->To(&backing_store)) return maybe_obj;
+ backing_store = JSObject::EnsureWritableFastElements(array);
}
if (2 * length <= old_capacity) {
// If more than half the elements won't be used, trim the array.
if (length == 0) {
array->initialize_elements();
} else {
- backing_store->set_length(length);
+ int filler_size = (old_capacity - length) * ElementSize;
Address filler_start = backing_store->address() +
BackingStore::OffsetOfElementAt(length);
- int filler_size = (old_capacity - length) * ElementSize;
array->GetHeap()->CreateFillerObjectAt(filler_start, filler_size);
+
+ // We are storing the new length using release store after creating a
+ // filler for the left-over space to avoid races with the sweeper
+ // thread.
+ backing_store->synchronized_set_length(length);
}
} else {
// Otherwise, fill the unused tail with holes.
int old_length = FastD2IChecked(array->length()->Number());
for (int i = length; i < old_length; i++) {
- BackingStore::cast(backing_store)->set_the_hole(i);
+ Handle<BackingStore>::cast(backing_store)->set_the_hole(i);
}
}
return length_object;
@@ -953,53 +951,49 @@ class FastElementsAccessor
uint32_t min = JSObject::NewElementsCapacity(old_capacity);
uint32_t new_capacity = length > min ? length : min;
if (!array->ShouldConvertToSlowElements(new_capacity)) {
- MaybeObject* result = FastElementsAccessorSubclass::
+ FastElementsAccessorSubclass::
SetFastElementsCapacityAndLength(array, new_capacity, length);
- if (result->IsFailure()) return result;
- array->ValidateElements();
+ JSObject::ValidateElements(array);
return length_object;
}
// Request conversion to slow elements.
- return array->GetHeap()->undefined_value();
+ return isolate->factory()->undefined_value();
}
- static MaybeObject* DeleteCommon(JSObject* obj,
- uint32_t key,
- JSReceiver::DeleteMode mode) {
+ static Handle<Object> DeleteCommon(Handle<JSObject> obj,
+ uint32_t key,
+ JSReceiver::DeleteMode mode) {
ASSERT(obj->HasFastSmiOrObjectElements() ||
obj->HasFastDoubleElements() ||
obj->HasFastArgumentsElements());
+ Isolate* isolate = obj->GetIsolate();
Heap* heap = obj->GetHeap();
- Object* elements = obj->elements();
- if (elements == heap->empty_fixed_array()) {
- return heap->true_value();
- }
- typename KindTraits::BackingStore* backing_store =
- KindTraits::BackingStore::cast(elements);
- bool is_non_strict_arguments_elements_map =
- backing_store->map() == heap->non_strict_arguments_elements_map();
- if (is_non_strict_arguments_elements_map) {
- backing_store = KindTraits::BackingStore::cast(
- FixedArray::cast(backing_store)->get(1));
+ Handle<FixedArrayBase> elements(obj->elements());
+ if (*elements == heap->empty_fixed_array()) {
+ return isolate->factory()->true_value();
+ }
+ Handle<BackingStore> backing_store = Handle<BackingStore>::cast(elements);
+ bool is_sloppy_arguments_elements_map =
+ backing_store->map() == heap->sloppy_arguments_elements_map();
+ if (is_sloppy_arguments_elements_map) {
+ backing_store = handle(
+ BackingStore::cast(Handle<FixedArray>::cast(backing_store)->get(1)),
+ isolate);
}
uint32_t length = static_cast<uint32_t>(
obj->IsJSArray()
- ? Smi::cast(JSArray::cast(obj)->length())->value()
+ ? Smi::cast(Handle<JSArray>::cast(obj)->length())->value()
: backing_store->length());
if (key < length) {
- if (!is_non_strict_arguments_elements_map) {
+ if (!is_sloppy_arguments_elements_map) {
ElementsKind kind = KindTraits::Kind;
if (IsFastPackedElementsKind(kind)) {
- MaybeObject* transitioned =
- obj->TransitionElementsKind(GetHoleyElementsKind(kind));
- if (transitioned->IsFailure()) return transitioned;
+ JSObject::TransitionElementsKind(obj, GetHoleyElementsKind(kind));
}
if (IsFastSmiOrObjectElementsKind(KindTraits::Kind)) {
- Object* writable;
- MaybeObject* maybe = obj->EnsureWritableFastElements();
- if (!maybe->ToObject(&writable)) return maybe;
- backing_store = KindTraits::BackingStore::cast(writable);
+ Handle<Object> writable = JSObject::EnsureWritableFastElements(obj);
+ backing_store = Handle<BackingStore>::cast(writable);
}
}
backing_store->set_the_hole(key);
@@ -1009,7 +1003,7 @@ class FastElementsAccessor
// one adjacent hole to the value being deleted.
const int kMinLengthForSparsenessCheck = 64;
if (backing_store->length() >= kMinLengthForSparsenessCheck &&
- !heap->InNewSpace(backing_store) &&
+ !heap->InNewSpace(*backing_store) &&
((key > 0 && backing_store->is_the_hole(key - 1)) ||
(key + 1 < length && backing_store->is_the_hole(key + 1)))) {
int num_used = 0;
@@ -1019,47 +1013,49 @@ class FastElementsAccessor
if (4 * num_used > backing_store->length()) break;
}
if (4 * num_used <= backing_store->length()) {
- MaybeObject* result = obj->NormalizeElements();
- if (result->IsFailure()) return result;
+ JSObject::NormalizeElements(obj);
}
}
}
- return heap->true_value();
+ return isolate->factory()->true_value();
}
- virtual MaybeObject* Delete(JSObject* obj,
- uint32_t key,
- JSReceiver::DeleteMode mode) {
+ virtual MaybeHandle<Object> Delete(
+ Handle<JSObject> obj,
+ uint32_t key,
+ JSReceiver::DeleteMode mode) V8_FINAL V8_OVERRIDE {
return DeleteCommon(obj, key, mode);
}
static bool HasElementImpl(
- Object* receiver,
- JSObject* holder,
+ Handle<Object> receiver,
+ Handle<JSObject> holder,
uint32_t key,
- FixedArrayBase* backing_store) {
+ Handle<FixedArrayBase> backing_store) {
if (key >= static_cast<uint32_t>(backing_store->length())) {
return false;
}
- return !BackingStore::cast(backing_store)->is_the_hole(key);
+ return !Handle<BackingStore>::cast(backing_store)->is_the_hole(key);
}
- static void ValidateContents(JSObject* holder, int length) {
+ static void ValidateContents(Handle<JSObject> holder, int length) {
#if DEBUG
- FixedArrayBase* elements = holder->elements();
- Heap* heap = elements->GetHeap();
+ Isolate* isolate = holder->GetIsolate();
+ HandleScope scope(isolate);
+ Handle<FixedArrayBase> elements(holder->elements(), isolate);
Map* map = elements->map();
ASSERT((IsFastSmiOrObjectElementsKind(KindTraits::Kind) &&
- (map == heap->fixed_array_map() ||
- map == heap->fixed_cow_array_map())) ||
+ (map == isolate->heap()->fixed_array_map() ||
+ map == isolate->heap()->fixed_cow_array_map())) ||
(IsFastDoubleElementsKind(KindTraits::Kind) ==
- ((map == heap->fixed_array_map() && length == 0) ||
- map == heap->fixed_double_array_map())));
+ ((map == isolate->heap()->fixed_array_map() && length == 0) ||
+ map == isolate->heap()->fixed_double_array_map())));
+ DisallowHeapAllocation no_gc;
for (int i = 0; i < length; i++) {
- typename KindTraits::BackingStore* backing_store =
- KindTraits::BackingStore::cast(elements);
+ HandleScope scope(isolate);
+ Handle<BackingStore> backing_store = Handle<BackingStore>::cast(elements);
ASSERT((!IsFastSmiElementsKind(KindTraits::Kind) ||
- static_cast<Object*>(backing_store->get(i))->IsSmi()) ||
+ BackingStore::get(backing_store, i)->IsSmi()) ||
(IsFastHoleyElementsKind(KindTraits::Kind) ==
backing_store->is_the_hole(i)));
}
@@ -1068,7 +1064,7 @@ class FastElementsAccessor
};
-static inline ElementsKind ElementsKindForArray(FixedArrayBase* array) {
+static inline ElementsKind ElementsKindForArray(Handle<FixedArrayBase> array) {
switch (array->map()->instance_type()) {
case FIXED_ARRAY_TYPE:
if (array->IsDictionary()) {
@@ -1078,24 +1074,16 @@ static inline ElementsKind ElementsKindForArray(FixedArrayBase* array) {
}
case FIXED_DOUBLE_ARRAY_TYPE:
return FAST_HOLEY_DOUBLE_ELEMENTS;
- case EXTERNAL_BYTE_ARRAY_TYPE:
- return EXTERNAL_BYTE_ELEMENTS;
- case EXTERNAL_UNSIGNED_BYTE_ARRAY_TYPE:
- return EXTERNAL_UNSIGNED_BYTE_ELEMENTS;
- case EXTERNAL_SHORT_ARRAY_TYPE:
- return EXTERNAL_SHORT_ELEMENTS;
- case EXTERNAL_UNSIGNED_SHORT_ARRAY_TYPE:
- return EXTERNAL_UNSIGNED_SHORT_ELEMENTS;
- case EXTERNAL_INT_ARRAY_TYPE:
- return EXTERNAL_INT_ELEMENTS;
- case EXTERNAL_UNSIGNED_INT_ARRAY_TYPE:
- return EXTERNAL_UNSIGNED_INT_ELEMENTS;
- case EXTERNAL_FLOAT_ARRAY_TYPE:
- return EXTERNAL_FLOAT_ELEMENTS;
- case EXTERNAL_DOUBLE_ARRAY_TYPE:
- return EXTERNAL_DOUBLE_ELEMENTS;
- case EXTERNAL_PIXEL_ARRAY_TYPE:
- return EXTERNAL_PIXEL_ELEMENTS;
+
+#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype, size) \
+ case EXTERNAL_##TYPE##_ARRAY_TYPE: \
+ return EXTERNAL_##TYPE##_ELEMENTS; \
+ case FIXED_##TYPE##_ARRAY_TYPE: \
+ return TYPE##_ELEMENTS;
+
+ TYPED_ARRAYS(TYPED_ARRAY_CASE)
+#undef TYPED_ARRAY_CASE
+
default:
UNREACHABLE();
}
@@ -1115,13 +1103,13 @@ class FastSmiOrObjectElementsAccessor
KindTraits,
kPointerSize>(name) {}
- static MaybeObject* CopyElementsImpl(FixedArrayBase* from,
- uint32_t from_start,
- FixedArrayBase* to,
- ElementsKind from_kind,
- uint32_t to_start,
- int packed_size,
- int copy_size) {
+ static void CopyElementsImpl(Handle<FixedArrayBase> from,
+ uint32_t from_start,
+ Handle<FixedArrayBase> to,
+ ElementsKind from_kind,
+ uint32_t to_start,
+ int packed_size,
+ int copy_size) {
ElementsKind to_kind = KindTraits::Kind;
switch (from_kind) {
case FAST_SMI_ELEMENTS:
@@ -1130,50 +1118,48 @@ class FastSmiOrObjectElementsAccessor
case FAST_HOLEY_ELEMENTS:
CopyObjectToObjectElements(
from, from_kind, from_start, to, to_kind, to_start, copy_size);
- return to->GetHeap()->undefined_value();
+ break;
case FAST_DOUBLE_ELEMENTS:
case FAST_HOLEY_DOUBLE_ELEMENTS:
- return CopyDoubleToObjectElements(
+ CopyDoubleToObjectElements(
from, from_start, to, to_kind, to_start, copy_size);
+ break;
case DICTIONARY_ELEMENTS:
CopyDictionaryToObjectElements(
from, from_start, to, to_kind, to_start, copy_size);
- return to->GetHeap()->undefined_value();
- case NON_STRICT_ARGUMENTS_ELEMENTS: {
+ break;
+ case SLOPPY_ARGUMENTS_ELEMENTS: {
// TODO(verwaest): This is a temporary hack to support extending
- // NON_STRICT_ARGUMENTS_ELEMENTS in SetFastElementsCapacityAndLength.
+ // SLOPPY_ARGUMENTS_ELEMENTS in SetFastElementsCapacityAndLength.
// This case should be UNREACHABLE().
- FixedArray* parameter_map = FixedArray::cast(from);
- FixedArrayBase* arguments = FixedArrayBase::cast(parameter_map->get(1));
+ Handle<FixedArray> parameter_map = Handle<FixedArray>::cast(from);
+ Handle<FixedArrayBase> arguments(
+ FixedArrayBase::cast(parameter_map->get(1)));
ElementsKind from_kind = ElementsKindForArray(arguments);
- return CopyElementsImpl(arguments, from_start, to, from_kind,
- to_start, packed_size, copy_size);
+ CopyElementsImpl(arguments, from_start, to, from_kind,
+ to_start, packed_size, copy_size);
+ break;
}
- case EXTERNAL_BYTE_ELEMENTS:
- case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
- case EXTERNAL_SHORT_ELEMENTS:
- case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
- case EXTERNAL_INT_ELEMENTS:
- case EXTERNAL_UNSIGNED_INT_ELEMENTS:
- case EXTERNAL_FLOAT_ELEMENTS:
- case EXTERNAL_DOUBLE_ELEMENTS:
- case EXTERNAL_PIXEL_ELEMENTS:
+#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype, size) \
+ case EXTERNAL_##TYPE##_ELEMENTS: \
+ case TYPE##_ELEMENTS: \
UNREACHABLE();
+ TYPED_ARRAYS(TYPED_ARRAY_CASE)
+#undef TYPED_ARRAY_CASE
}
- return NULL;
}
- static MaybeObject* SetFastElementsCapacityAndLength(JSObject* obj,
- uint32_t capacity,
- uint32_t length) {
+ static void SetFastElementsCapacityAndLength(
+ Handle<JSObject> obj,
+ uint32_t capacity,
+ uint32_t length) {
JSObject::SetFastElementsCapacitySmiMode set_capacity_mode =
obj->HasFastSmiElements()
? JSObject::kAllowSmiElements
: JSObject::kDontAllowSmiElements;
- return obj->SetFastElementsCapacityAndLength(capacity,
- length,
- set_capacity_mode);
+ JSObject::SetFastElementsCapacityAndLength(
+ obj, capacity, length, set_capacity_mode);
}
};
@@ -1238,21 +1224,20 @@ class FastDoubleElementsAccessor
KindTraits,
kDoubleSize>(name) {}
- static MaybeObject* SetFastElementsCapacityAndLength(JSObject* obj,
- uint32_t capacity,
- uint32_t length) {
- return obj->SetFastDoubleElementsCapacityAndLength(capacity,
- length);
+ static void SetFastElementsCapacityAndLength(Handle<JSObject> obj,
+ uint32_t capacity,
+ uint32_t length) {
+ JSObject::SetFastDoubleElementsCapacityAndLength(obj, capacity, length);
}
protected:
- static MaybeObject* CopyElementsImpl(FixedArrayBase* from,
- uint32_t from_start,
- FixedArrayBase* to,
- ElementsKind from_kind,
- uint32_t to_start,
- int packed_size,
- int copy_size) {
+ static void CopyElementsImpl(Handle<FixedArrayBase> from,
+ uint32_t from_start,
+ Handle<FixedArrayBase> to,
+ ElementsKind from_kind,
+ uint32_t to_start,
+ int packed_size,
+ int copy_size) {
switch (from_kind) {
case FAST_SMI_ELEMENTS:
CopyPackedSmiToDoubleElements(
@@ -1273,19 +1258,16 @@ class FastDoubleElementsAccessor
CopyDictionaryToDoubleElements(
from, from_start, to, to_start, copy_size);
break;
- case NON_STRICT_ARGUMENTS_ELEMENTS:
- case EXTERNAL_BYTE_ELEMENTS:
- case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
- case EXTERNAL_SHORT_ELEMENTS:
- case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
- case EXTERNAL_INT_ELEMENTS:
- case EXTERNAL_UNSIGNED_INT_ELEMENTS:
- case EXTERNAL_FLOAT_ELEMENTS:
- case EXTERNAL_DOUBLE_ELEMENTS:
- case EXTERNAL_PIXEL_ELEMENTS:
+ case SLOPPY_ARGUMENTS_ELEMENTS:
+ UNREACHABLE();
+
+#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype, size) \
+ case EXTERNAL_##TYPE##_ELEMENTS: \
+ case TYPE##_ELEMENTS: \
UNREACHABLE();
+ TYPED_ARRAYS(TYPED_ARRAY_CASE)
+#undef TYPED_ARRAY_CASE
}
- return to->GetHeap()->undefined_value();
}
};
@@ -1320,168 +1302,98 @@ class FastHoleyDoubleElementsAccessor
// Super class for all external element arrays.
-template<typename ExternalElementsAccessorSubclass,
- ElementsKind Kind>
-class ExternalElementsAccessor
- : public ElementsAccessorBase<ExternalElementsAccessorSubclass,
+template<ElementsKind Kind>
+class TypedElementsAccessor
+ : public ElementsAccessorBase<TypedElementsAccessor<Kind>,
ElementsKindTraits<Kind> > {
public:
- explicit ExternalElementsAccessor(const char* name)
- : ElementsAccessorBase<ExternalElementsAccessorSubclass,
+ explicit TypedElementsAccessor(const char* name)
+ : ElementsAccessorBase<AccessorClass,
ElementsKindTraits<Kind> >(name) {}
protected:
typedef typename ElementsKindTraits<Kind>::BackingStore BackingStore;
+ typedef TypedElementsAccessor<Kind> AccessorClass;
- friend class ElementsAccessorBase<ExternalElementsAccessorSubclass,
+ friend class ElementsAccessorBase<AccessorClass,
ElementsKindTraits<Kind> >;
- MUST_USE_RESULT static MaybeObject* GetImpl(Object* receiver,
- JSObject* obj,
- uint32_t key,
- FixedArrayBase* backing_store) {
- return
- key < ExternalElementsAccessorSubclass::GetCapacityImpl(backing_store)
- ? BackingStore::cast(backing_store)->get(key)
- : backing_store->GetHeap()->undefined_value();
+ MUST_USE_RESULT static MaybeHandle<Object> GetImpl(
+ Handle<Object> receiver,
+ Handle<JSObject> obj,
+ uint32_t key,
+ Handle<FixedArrayBase> backing_store) {
+ if (key < AccessorClass::GetCapacityImpl(backing_store)) {
+ return BackingStore::get(Handle<BackingStore>::cast(backing_store), key);
+ } else {
+ return backing_store->GetIsolate()->factory()->undefined_value();
+ }
}
MUST_USE_RESULT static PropertyAttributes GetAttributesImpl(
- Object* receiver,
- JSObject* obj,
+ Handle<Object> receiver,
+ Handle<JSObject> obj,
uint32_t key,
- FixedArrayBase* backing_store) {
+ Handle<FixedArrayBase> backing_store) {
return
- key < ExternalElementsAccessorSubclass::GetCapacityImpl(backing_store)
+ key < AccessorClass::GetCapacityImpl(backing_store)
? NONE : ABSENT;
}
MUST_USE_RESULT static PropertyType GetTypeImpl(
- Object* receiver,
- JSObject* obj,
+ Handle<Object> receiver,
+ Handle<JSObject> obj,
uint32_t key,
- FixedArrayBase* backing_store) {
+ Handle<FixedArrayBase> backing_store) {
return
- key < ExternalElementsAccessorSubclass::GetCapacityImpl(backing_store)
+ key < AccessorClass::GetCapacityImpl(backing_store)
? FIELD : NONEXISTENT;
}
- MUST_USE_RESULT static MaybeObject* SetLengthImpl(
- JSObject* obj,
- Object* length,
- FixedArrayBase* backing_store) {
+ MUST_USE_RESULT static MaybeHandle<Object> SetLengthImpl(
+ Handle<JSObject> obj,
+ Handle<Object> length,
+ Handle<FixedArrayBase> backing_store) {
// External arrays do not support changing their length.
UNREACHABLE();
return obj;
}
- MUST_USE_RESULT virtual MaybeObject* Delete(JSObject* obj,
- uint32_t key,
- JSReceiver::DeleteMode mode) {
+ MUST_USE_RESULT virtual MaybeHandle<Object> Delete(
+ Handle<JSObject> obj,
+ uint32_t key,
+ JSReceiver::DeleteMode mode) V8_FINAL V8_OVERRIDE {
// External arrays always ignore deletes.
- return obj->GetHeap()->true_value();
+ return obj->GetIsolate()->factory()->true_value();
}
- static bool HasElementImpl(Object* receiver,
- JSObject* holder,
+ static bool HasElementImpl(Handle<Object> receiver,
+ Handle<JSObject> holder,
uint32_t key,
- FixedArrayBase* backing_store) {
+ Handle<FixedArrayBase> backing_store) {
uint32_t capacity =
- ExternalElementsAccessorSubclass::GetCapacityImpl(backing_store);
+ AccessorClass::GetCapacityImpl(backing_store);
return key < capacity;
}
};
-class ExternalByteElementsAccessor
- : public ExternalElementsAccessor<ExternalByteElementsAccessor,
- EXTERNAL_BYTE_ELEMENTS> {
- public:
- explicit ExternalByteElementsAccessor(const char* name)
- : ExternalElementsAccessor<ExternalByteElementsAccessor,
- EXTERNAL_BYTE_ELEMENTS>(name) {}
-};
+#define EXTERNAL_ELEMENTS_ACCESSOR(Type, type, TYPE, ctype, size) \
+ typedef TypedElementsAccessor<EXTERNAL_##TYPE##_ELEMENTS> \
+ External##Type##ElementsAccessor;
-class ExternalUnsignedByteElementsAccessor
- : public ExternalElementsAccessor<ExternalUnsignedByteElementsAccessor,
- EXTERNAL_UNSIGNED_BYTE_ELEMENTS> {
- public:
- explicit ExternalUnsignedByteElementsAccessor(const char* name)
- : ExternalElementsAccessor<ExternalUnsignedByteElementsAccessor,
- EXTERNAL_UNSIGNED_BYTE_ELEMENTS>(name) {}
-};
+TYPED_ARRAYS(EXTERNAL_ELEMENTS_ACCESSOR)
+#undef EXTERNAL_ELEMENTS_ACCESSOR
+#define FIXED_ELEMENTS_ACCESSOR(Type, type, TYPE, ctype, size) \
+ typedef TypedElementsAccessor<TYPE##_ELEMENTS > \
+ Fixed##Type##ElementsAccessor;
-class ExternalShortElementsAccessor
- : public ExternalElementsAccessor<ExternalShortElementsAccessor,
- EXTERNAL_SHORT_ELEMENTS> {
- public:
- explicit ExternalShortElementsAccessor(const char* name)
- : ExternalElementsAccessor<ExternalShortElementsAccessor,
- EXTERNAL_SHORT_ELEMENTS>(name) {}
-};
-
-
-class ExternalUnsignedShortElementsAccessor
- : public ExternalElementsAccessor<ExternalUnsignedShortElementsAccessor,
- EXTERNAL_UNSIGNED_SHORT_ELEMENTS> {
- public:
- explicit ExternalUnsignedShortElementsAccessor(const char* name)
- : ExternalElementsAccessor<ExternalUnsignedShortElementsAccessor,
- EXTERNAL_UNSIGNED_SHORT_ELEMENTS>(name) {}
-};
+TYPED_ARRAYS(FIXED_ELEMENTS_ACCESSOR)
+#undef FIXED_ELEMENTS_ACCESSOR
-class ExternalIntElementsAccessor
- : public ExternalElementsAccessor<ExternalIntElementsAccessor,
- EXTERNAL_INT_ELEMENTS> {
- public:
- explicit ExternalIntElementsAccessor(const char* name)
- : ExternalElementsAccessor<ExternalIntElementsAccessor,
- EXTERNAL_INT_ELEMENTS>(name) {}
-};
-
-
-class ExternalUnsignedIntElementsAccessor
- : public ExternalElementsAccessor<ExternalUnsignedIntElementsAccessor,
- EXTERNAL_UNSIGNED_INT_ELEMENTS> {
- public:
- explicit ExternalUnsignedIntElementsAccessor(const char* name)
- : ExternalElementsAccessor<ExternalUnsignedIntElementsAccessor,
- EXTERNAL_UNSIGNED_INT_ELEMENTS>(name) {}
-};
-
-
-class ExternalFloatElementsAccessor
- : public ExternalElementsAccessor<ExternalFloatElementsAccessor,
- EXTERNAL_FLOAT_ELEMENTS> {
- public:
- explicit ExternalFloatElementsAccessor(const char* name)
- : ExternalElementsAccessor<ExternalFloatElementsAccessor,
- EXTERNAL_FLOAT_ELEMENTS>(name) {}
-};
-
-
-class ExternalDoubleElementsAccessor
- : public ExternalElementsAccessor<ExternalDoubleElementsAccessor,
- EXTERNAL_DOUBLE_ELEMENTS> {
- public:
- explicit ExternalDoubleElementsAccessor(const char* name)
- : ExternalElementsAccessor<ExternalDoubleElementsAccessor,
- EXTERNAL_DOUBLE_ELEMENTS>(name) {}
-};
-
-
-class PixelElementsAccessor
- : public ExternalElementsAccessor<PixelElementsAccessor,
- EXTERNAL_PIXEL_ELEMENTS> {
- public:
- explicit PixelElementsAccessor(const char* name)
- : ExternalElementsAccessor<PixelElementsAccessor,
- EXTERNAL_PIXEL_ELEMENTS>(name) {}
-};
-
class DictionaryElementsAccessor
: public ElementsAccessorBase<DictionaryElementsAccessor,
@@ -1493,13 +1405,14 @@ class DictionaryElementsAccessor
// Adjusts the length of the dictionary backing store and returns the new
// length according to ES5 section 15.4.5.2 behavior.
- MUST_USE_RESULT static MaybeObject* SetLengthWithoutNormalize(
- FixedArrayBase* store,
- JSArray* array,
- Object* length_object,
+ static Handle<Object> SetLengthWithoutNormalize(
+ Handle<FixedArrayBase> store,
+ Handle<JSArray> array,
+ Handle<Object> length_object,
uint32_t length) {
- SeededNumberDictionary* dict = SeededNumberDictionary::cast(store);
- Heap* heap = array->GetHeap();
+ Handle<SeededNumberDictionary> dict =
+ Handle<SeededNumberDictionary>::cast(store);
+ Isolate* isolate = array->GetIsolate();
int capacity = dict->Capacity();
uint32_t new_length = length;
uint32_t old_length = static_cast<uint32_t>(array->length()->Number());
@@ -1507,6 +1420,7 @@ class DictionaryElementsAccessor
// Find last non-deletable element in range of elements to be
// deleted and adjust range accordingly.
for (int i = 0; i < capacity; i++) {
+ DisallowHeapAllocation no_gc;
Object* key = dict->KeyAt(i);
if (key->IsNumber()) {
uint32_t number = static_cast<uint32_t>(key->Number());
@@ -1517,22 +1431,18 @@ class DictionaryElementsAccessor
}
}
if (new_length != length) {
- MaybeObject* maybe_object = heap->NumberFromUint32(new_length);
- if (!maybe_object->To(&length_object)) return maybe_object;
+ length_object = isolate->factory()->NewNumberFromUint(new_length);
}
}
if (new_length == 0) {
- // If the length of a slow array is reset to zero, we clear
- // the array and flush backing storage. This has the added
- // benefit that the array returns to fast mode.
- Object* obj;
- MaybeObject* maybe_obj = array->ResetElements();
- if (!maybe_obj->ToObject(&obj)) return maybe_obj;
+ // Flush the backing store.
+ JSObject::ResetElements(array);
} else {
+ DisallowHeapAllocation no_gc;
// Remove elements that should be deleted.
int removed_entries = 0;
- Object* the_hole_value = heap->the_hole_value();
+ Handle<Object> the_hole_value = isolate->factory()->the_hole_value();
for (int i = 0; i < capacity; i++) {
Object* key = dict->KeyAt(i);
if (key->IsNumber()) {
@@ -1550,60 +1460,56 @@ class DictionaryElementsAccessor
return length_object;
}
- MUST_USE_RESULT static MaybeObject* DeleteCommon(
- JSObject* obj,
+ MUST_USE_RESULT static MaybeHandle<Object> DeleteCommon(
+ Handle<JSObject> obj,
uint32_t key,
JSReceiver::DeleteMode mode) {
Isolate* isolate = obj->GetIsolate();
- Heap* heap = isolate->heap();
- FixedArray* backing_store = FixedArray::cast(obj->elements());
+ Handle<FixedArray> backing_store(FixedArray::cast(obj->elements()),
+ isolate);
bool is_arguments =
- (obj->GetElementsKind() == NON_STRICT_ARGUMENTS_ELEMENTS);
+ (obj->GetElementsKind() == SLOPPY_ARGUMENTS_ELEMENTS);
if (is_arguments) {
- backing_store = FixedArray::cast(backing_store->get(1));
+ backing_store = handle(FixedArray::cast(backing_store->get(1)), isolate);
}
- SeededNumberDictionary* dictionary =
- SeededNumberDictionary::cast(backing_store);
+ Handle<SeededNumberDictionary> dictionary =
+ Handle<SeededNumberDictionary>::cast(backing_store);
int entry = dictionary->FindEntry(key);
if (entry != SeededNumberDictionary::kNotFound) {
- Object* result = dictionary->DeleteProperty(entry, mode);
- if (result == heap->false_value()) {
+ Handle<Object> result =
+ SeededNumberDictionary::DeleteProperty(dictionary, entry, mode);
+ if (*result == *isolate->factory()->false_value()) {
if (mode == JSObject::STRICT_DELETION) {
// Deleting a non-configurable property in strict mode.
- HandleScope scope(isolate);
- Handle<Object> holder(obj, isolate);
Handle<Object> name = isolate->factory()->NewNumberFromUint(key);
- Handle<Object> args[2] = { name, holder };
+ Handle<Object> args[2] = { name, obj };
Handle<Object> error =
isolate->factory()->NewTypeError("strict_delete_property",
HandleVector(args, 2));
- return isolate->Throw(*error);
+ return isolate->Throw<Object>(error);
}
- return heap->false_value();
- }
- MaybeObject* maybe_elements = dictionary->Shrink(key);
- FixedArray* new_elements = NULL;
- if (!maybe_elements->To(&new_elements)) {
- return maybe_elements;
+ return isolate->factory()->false_value();
}
+ Handle<FixedArray> new_elements =
+ SeededNumberDictionary::Shrink(dictionary, key);
+
if (is_arguments) {
- FixedArray::cast(obj->elements())->set(1, new_elements);
+ FixedArray::cast(obj->elements())->set(1, *new_elements);
} else {
- obj->set_elements(new_elements);
+ obj->set_elements(*new_elements);
}
}
- return heap->true_value();
+ return isolate->factory()->true_value();
}
- MUST_USE_RESULT static MaybeObject* CopyElementsImpl(FixedArrayBase* from,
- uint32_t from_start,
- FixedArrayBase* to,
- ElementsKind from_kind,
- uint32_t to_start,
- int packed_size,
- int copy_size) {
+ static void CopyElementsImpl(Handle<FixedArrayBase> from,
+ uint32_t from_start,
+ Handle<FixedArrayBase> to,
+ ElementsKind from_kind,
+ uint32_t to_start,
+ int packed_size,
+ int copy_size) {
UNREACHABLE();
- return NULL;
}
@@ -1611,41 +1517,42 @@ class DictionaryElementsAccessor
friend class ElementsAccessorBase<DictionaryElementsAccessor,
ElementsKindTraits<DICTIONARY_ELEMENTS> >;
- MUST_USE_RESULT virtual MaybeObject* Delete(JSObject* obj,
- uint32_t key,
- JSReceiver::DeleteMode mode) {
+ MUST_USE_RESULT virtual MaybeHandle<Object> Delete(
+ Handle<JSObject> obj,
+ uint32_t key,
+ JSReceiver::DeleteMode mode) V8_FINAL V8_OVERRIDE {
return DeleteCommon(obj, key, mode);
}
- MUST_USE_RESULT static MaybeObject* GetImpl(
- Object* receiver,
- JSObject* obj,
+ MUST_USE_RESULT static MaybeHandle<Object> GetImpl(
+ Handle<Object> receiver,
+ Handle<JSObject> obj,
uint32_t key,
- FixedArrayBase* store) {
- SeededNumberDictionary* backing_store = SeededNumberDictionary::cast(store);
+ Handle<FixedArrayBase> store) {
+ Handle<SeededNumberDictionary> backing_store =
+ Handle<SeededNumberDictionary>::cast(store);
+ Isolate* isolate = backing_store->GetIsolate();
int entry = backing_store->FindEntry(key);
if (entry != SeededNumberDictionary::kNotFound) {
- Object* element = backing_store->ValueAt(entry);
+ Handle<Object> element(backing_store->ValueAt(entry), isolate);
PropertyDetails details = backing_store->DetailsAt(entry);
if (details.type() == CALLBACKS) {
- return obj->GetElementWithCallback(receiver,
- element,
- key,
- obj);
+ return JSObject::GetElementWithCallback(
+ obj, receiver, element, key, obj);
} else {
return element;
}
}
- return obj->GetHeap()->the_hole_value();
+ return isolate->factory()->the_hole_value();
}
MUST_USE_RESULT static PropertyAttributes GetAttributesImpl(
- Object* receiver,
- JSObject* obj,
+ Handle<Object> receiver,
+ Handle<JSObject> obj,
uint32_t key,
- FixedArrayBase* backing_store) {
- SeededNumberDictionary* dictionary =
- SeededNumberDictionary::cast(backing_store);
+ Handle<FixedArrayBase> backing_store) {
+ Handle<SeededNumberDictionary> dictionary =
+ Handle<SeededNumberDictionary>::cast(backing_store);
int entry = dictionary->FindEntry(key);
if (entry != SeededNumberDictionary::kNotFound) {
return dictionary->DetailsAt(entry).attributes();
@@ -1654,11 +1561,12 @@ class DictionaryElementsAccessor
}
MUST_USE_RESULT static PropertyType GetTypeImpl(
- Object* receiver,
- JSObject* obj,
+ Handle<Object> receiver,
+ Handle<JSObject> obj,
uint32_t key,
- FixedArrayBase* store) {
- SeededNumberDictionary* backing_store = SeededNumberDictionary::cast(store);
+ Handle<FixedArrayBase> store) {
+ Handle<SeededNumberDictionary> backing_store =
+ Handle<SeededNumberDictionary>::cast(store);
int entry = backing_store->FindEntry(key);
if (entry != SeededNumberDictionary::kNotFound) {
return backing_store->DetailsAt(entry).type();
@@ -1666,76 +1574,87 @@ class DictionaryElementsAccessor
return NONEXISTENT;
}
- MUST_USE_RESULT static AccessorPair* GetAccessorPairImpl(
- Object* receiver,
- JSObject* obj,
+ MUST_USE_RESULT static MaybeHandle<AccessorPair> GetAccessorPairImpl(
+ Handle<Object> receiver,
+ Handle<JSObject> obj,
uint32_t key,
- FixedArrayBase* store) {
- SeededNumberDictionary* backing_store = SeededNumberDictionary::cast(store);
+ Handle<FixedArrayBase> store) {
+ Handle<SeededNumberDictionary> backing_store =
+ Handle<SeededNumberDictionary>::cast(store);
int entry = backing_store->FindEntry(key);
if (entry != SeededNumberDictionary::kNotFound &&
backing_store->DetailsAt(entry).type() == CALLBACKS &&
backing_store->ValueAt(entry)->IsAccessorPair()) {
- return AccessorPair::cast(backing_store->ValueAt(entry));
+ return handle(AccessorPair::cast(backing_store->ValueAt(entry)));
}
- return NULL;
+ return MaybeHandle<AccessorPair>();
}
- static bool HasElementImpl(Object* receiver,
- JSObject* holder,
+ static bool HasElementImpl(Handle<Object> receiver,
+ Handle<JSObject> holder,
uint32_t key,
- FixedArrayBase* backing_store) {
- return SeededNumberDictionary::cast(backing_store)->FindEntry(key) !=
- SeededNumberDictionary::kNotFound;
+ Handle<FixedArrayBase> store) {
+ Handle<SeededNumberDictionary> backing_store =
+ Handle<SeededNumberDictionary>::cast(store);
+ return backing_store->FindEntry(key) != SeededNumberDictionary::kNotFound;
}
- static uint32_t GetKeyForIndexImpl(FixedArrayBase* store,
+ static uint32_t GetKeyForIndexImpl(Handle<FixedArrayBase> store,
uint32_t index) {
- SeededNumberDictionary* dict = SeededNumberDictionary::cast(store);
+ DisallowHeapAllocation no_gc;
+ Handle<SeededNumberDictionary> dict =
+ Handle<SeededNumberDictionary>::cast(store);
Object* key = dict->KeyAt(index);
return Smi::cast(key)->value();
}
};
-class NonStrictArgumentsElementsAccessor : public ElementsAccessorBase<
- NonStrictArgumentsElementsAccessor,
- ElementsKindTraits<NON_STRICT_ARGUMENTS_ELEMENTS> > {
+class SloppyArgumentsElementsAccessor : public ElementsAccessorBase<
+ SloppyArgumentsElementsAccessor,
+ ElementsKindTraits<SLOPPY_ARGUMENTS_ELEMENTS> > {
public:
- explicit NonStrictArgumentsElementsAccessor(const char* name)
+ explicit SloppyArgumentsElementsAccessor(const char* name)
: ElementsAccessorBase<
- NonStrictArgumentsElementsAccessor,
- ElementsKindTraits<NON_STRICT_ARGUMENTS_ELEMENTS> >(name) {}
+ SloppyArgumentsElementsAccessor,
+ ElementsKindTraits<SLOPPY_ARGUMENTS_ELEMENTS> >(name) {}
protected:
friend class ElementsAccessorBase<
- NonStrictArgumentsElementsAccessor,
- ElementsKindTraits<NON_STRICT_ARGUMENTS_ELEMENTS> >;
-
- MUST_USE_RESULT static MaybeObject* GetImpl(Object* receiver,
- JSObject* obj,
- uint32_t key,
- FixedArrayBase* parameters) {
- FixedArray* parameter_map = FixedArray::cast(parameters);
- Object* probe = GetParameterMapArg(obj, parameter_map, key);
+ SloppyArgumentsElementsAccessor,
+ ElementsKindTraits<SLOPPY_ARGUMENTS_ELEMENTS> >;
+
+ MUST_USE_RESULT static MaybeHandle<Object> GetImpl(
+ Handle<Object> receiver,
+ Handle<JSObject> obj,
+ uint32_t key,
+ Handle<FixedArrayBase> parameters) {
+ Isolate* isolate = obj->GetIsolate();
+ Handle<FixedArray> parameter_map = Handle<FixedArray>::cast(parameters);
+ Handle<Object> probe = GetParameterMapArg(obj, parameter_map, key);
if (!probe->IsTheHole()) {
+ DisallowHeapAllocation no_gc;
Context* context = Context::cast(parameter_map->get(0));
- int context_index = Smi::cast(probe)->value();
+ int context_index = Handle<Smi>::cast(probe)->value();
ASSERT(!context->get(context_index)->IsTheHole());
- return context->get(context_index);
+ return handle(context->get(context_index), isolate);
} else {
// Object is not mapped, defer to the arguments.
- FixedArray* arguments = FixedArray::cast(parameter_map->get(1));
- MaybeObject* maybe_result = ElementsAccessor::ForArray(arguments)->Get(
- receiver, obj, key, arguments);
- Object* result;
- if (!maybe_result->ToObject(&result)) return maybe_result;
+ Handle<FixedArray> arguments(FixedArray::cast(parameter_map->get(1)),
+ isolate);
+ Handle<Object> result;
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, result,
+ ElementsAccessor::ForArray(arguments)->Get(
+ receiver, obj, key, arguments),
+ Object);
// Elements of the arguments object in slow mode might be slow aliases.
if (result->IsAliasedArgumentsEntry()) {
- AliasedArgumentsEntry* entry = AliasedArgumentsEntry::cast(result);
+ DisallowHeapAllocation no_gc;
+ AliasedArgumentsEntry* entry = AliasedArgumentsEntry::cast(*result);
Context* context = Context::cast(parameter_map->get(0));
int context_index = entry->aliased_context_slot();
ASSERT(!context->get(context_index)->IsTheHole());
- return context->get(context_index);
+ return handle(context->get(context_index), isolate);
} else {
return result;
}
@@ -1743,78 +1662,80 @@ class NonStrictArgumentsElementsAccessor : public ElementsAccessorBase<
}
MUST_USE_RESULT static PropertyAttributes GetAttributesImpl(
- Object* receiver,
- JSObject* obj,
+ Handle<Object> receiver,
+ Handle<JSObject> obj,
uint32_t key,
- FixedArrayBase* backing_store) {
- FixedArray* parameter_map = FixedArray::cast(backing_store);
- Object* probe = GetParameterMapArg(obj, parameter_map, key);
+ Handle<FixedArrayBase> backing_store) {
+ Handle<FixedArray> parameter_map = Handle<FixedArray>::cast(backing_store);
+ Handle<Object> probe = GetParameterMapArg(obj, parameter_map, key);
if (!probe->IsTheHole()) {
return NONE;
} else {
// If not aliased, check the arguments.
- FixedArray* arguments = FixedArray::cast(parameter_map->get(1));
+ Handle<FixedArray> arguments(FixedArray::cast(parameter_map->get(1)));
return ElementsAccessor::ForArray(arguments)->GetAttributes(
receiver, obj, key, arguments);
}
}
MUST_USE_RESULT static PropertyType GetTypeImpl(
- Object* receiver,
- JSObject* obj,
+ Handle<Object> receiver,
+ Handle<JSObject> obj,
uint32_t key,
- FixedArrayBase* parameters) {
- FixedArray* parameter_map = FixedArray::cast(parameters);
- Object* probe = GetParameterMapArg(obj, parameter_map, key);
+ Handle<FixedArrayBase> parameters) {
+ Handle<FixedArray> parameter_map = Handle<FixedArray>::cast(parameters);
+ Handle<Object> probe = GetParameterMapArg(obj, parameter_map, key);
if (!probe->IsTheHole()) {
return FIELD;
} else {
// If not aliased, check the arguments.
- FixedArray* arguments = FixedArray::cast(parameter_map->get(1));
+ Handle<FixedArray> arguments(FixedArray::cast(parameter_map->get(1)));
return ElementsAccessor::ForArray(arguments)->GetType(
receiver, obj, key, arguments);
}
}
- MUST_USE_RESULT static AccessorPair* GetAccessorPairImpl(
- Object* receiver,
- JSObject* obj,
+ MUST_USE_RESULT static MaybeHandle<AccessorPair> GetAccessorPairImpl(
+ Handle<Object> receiver,
+ Handle<JSObject> obj,
uint32_t key,
- FixedArrayBase* parameters) {
- FixedArray* parameter_map = FixedArray::cast(parameters);
- Object* probe = GetParameterMapArg(obj, parameter_map, key);
+ Handle<FixedArrayBase> parameters) {
+ Handle<FixedArray> parameter_map = Handle<FixedArray>::cast(parameters);
+ Handle<Object> probe = GetParameterMapArg(obj, parameter_map, key);
if (!probe->IsTheHole()) {
- return NULL;
+ return MaybeHandle<AccessorPair>();
} else {
// If not aliased, check the arguments.
- FixedArray* arguments = FixedArray::cast(parameter_map->get(1));
+ Handle<FixedArray> arguments(FixedArray::cast(parameter_map->get(1)));
return ElementsAccessor::ForArray(arguments)->GetAccessorPair(
receiver, obj, key, arguments);
}
}
- MUST_USE_RESULT static MaybeObject* SetLengthImpl(
- JSObject* obj,
- Object* length,
- FixedArrayBase* parameter_map) {
+ MUST_USE_RESULT static MaybeHandle<Object> SetLengthImpl(
+ Handle<JSObject> obj,
+ Handle<Object> length,
+ Handle<FixedArrayBase> parameter_map) {
// TODO(mstarzinger): This was never implemented but will be used once we
// correctly implement [[DefineOwnProperty]] on arrays.
UNIMPLEMENTED();
return obj;
}
- MUST_USE_RESULT virtual MaybeObject* Delete(JSObject* obj,
- uint32_t key,
- JSReceiver::DeleteMode mode) {
- FixedArray* parameter_map = FixedArray::cast(obj->elements());
- Object* probe = GetParameterMapArg(obj, parameter_map, key);
+ MUST_USE_RESULT virtual MaybeHandle<Object> Delete(
+ Handle<JSObject> obj,
+ uint32_t key,
+ JSReceiver::DeleteMode mode) V8_FINAL V8_OVERRIDE {
+ Isolate* isolate = obj->GetIsolate();
+ Handle<FixedArray> parameter_map(FixedArray::cast(obj->elements()));
+ Handle<Object> probe = GetParameterMapArg(obj, parameter_map, key);
if (!probe->IsTheHole()) {
// TODO(kmillikin): We could check if this was the last aliased
// parameter, and revert to normal elements in that case. That
// would enable GC of the context.
parameter_map->set_the_hole(key + 2);
} else {
- FixedArray* arguments = FixedArray::cast(parameter_map->get(1));
+ Handle<FixedArray> arguments(FixedArray::cast(parameter_map->get(1)));
if (arguments->IsDictionary()) {
return DictionaryElementsAccessor::DeleteCommon(obj, key, mode);
} else {
@@ -1824,63 +1745,70 @@ class NonStrictArgumentsElementsAccessor : public ElementsAccessorBase<
return FastHoleyObjectElementsAccessor::DeleteCommon(obj, key, mode);
}
}
- return obj->GetHeap()->true_value();
+ return isolate->factory()->true_value();
}
- MUST_USE_RESULT static MaybeObject* CopyElementsImpl(FixedArrayBase* from,
- uint32_t from_start,
- FixedArrayBase* to,
- ElementsKind from_kind,
- uint32_t to_start,
- int packed_size,
- int copy_size) {
+ static void CopyElementsImpl(Handle<FixedArrayBase> from,
+ uint32_t from_start,
+ Handle<FixedArrayBase> to,
+ ElementsKind from_kind,
+ uint32_t to_start,
+ int packed_size,
+ int copy_size) {
UNREACHABLE();
- return NULL;
}
- static uint32_t GetCapacityImpl(FixedArrayBase* backing_store) {
- FixedArray* parameter_map = FixedArray::cast(backing_store);
- FixedArrayBase* arguments = FixedArrayBase::cast(parameter_map->get(1));
+ static uint32_t GetCapacityImpl(Handle<FixedArrayBase> backing_store) {
+ Handle<FixedArray> parameter_map = Handle<FixedArray>::cast(backing_store);
+ Handle<FixedArrayBase> arguments(
+ FixedArrayBase::cast(parameter_map->get(1)));
return Max(static_cast<uint32_t>(parameter_map->length() - 2),
ForArray(arguments)->GetCapacity(arguments));
}
- static uint32_t GetKeyForIndexImpl(FixedArrayBase* dict,
+ static uint32_t GetKeyForIndexImpl(Handle<FixedArrayBase> dict,
uint32_t index) {
return index;
}
- static bool HasElementImpl(Object* receiver,
- JSObject* holder,
+ static bool HasElementImpl(Handle<Object> receiver,
+ Handle<JSObject> holder,
uint32_t key,
- FixedArrayBase* parameters) {
- FixedArray* parameter_map = FixedArray::cast(parameters);
- Object* probe = GetParameterMapArg(holder, parameter_map, key);
+ Handle<FixedArrayBase> parameters) {
+ Handle<FixedArray> parameter_map = Handle<FixedArray>::cast(parameters);
+ Handle<Object> probe = GetParameterMapArg(holder, parameter_map, key);
if (!probe->IsTheHole()) {
return true;
} else {
- FixedArrayBase* arguments =
- FixedArrayBase::cast(FixedArray::cast(parameter_map)->get(1));
+ Isolate* isolate = holder->GetIsolate();
+ Handle<FixedArrayBase> arguments(FixedArrayBase::cast(
+ Handle<FixedArray>::cast(parameter_map)->get(1)), isolate);
ElementsAccessor* accessor = ElementsAccessor::ForArray(arguments);
- return !accessor->Get(receiver, holder, key, arguments)->IsTheHole();
+ Handle<Object> value;
+ ASSIGN_RETURN_ON_EXCEPTION_VALUE(
+ isolate, value,
+ accessor->Get(receiver, holder, key, arguments),
+ false);
+ return !value->IsTheHole();
}
}
private:
- static Object* GetParameterMapArg(JSObject* holder,
- FixedArray* parameter_map,
- uint32_t key) {
+ static Handle<Object> GetParameterMapArg(Handle<JSObject> holder,
+ Handle<FixedArray> parameter_map,
+ uint32_t key) {
+ Isolate* isolate = holder->GetIsolate();
uint32_t length = holder->IsJSArray()
- ? Smi::cast(JSArray::cast(holder)->length())->value()
+ ? Smi::cast(Handle<JSArray>::cast(holder)->length())->value()
: parameter_map->length();
return key < (length - 2)
- ? parameter_map->get(key + 2)
- : parameter_map->GetHeap()->the_hole_value();
+ ? handle(parameter_map->get(key + 2), isolate)
+ : Handle<Object>::cast(isolate->factory()->the_hole_value());
}
};
-ElementsAccessor* ElementsAccessor::ForArray(FixedArrayBase* array) {
+ElementsAccessor* ElementsAccessor::ForArray(Handle<FixedArrayBase> array) {
return elements_accessors_[ElementsKindForArray(array)];
}
@@ -1908,30 +1836,41 @@ void ElementsAccessor::TearDown() {
template <typename ElementsAccessorSubclass, typename ElementsKindTraits>
-MUST_USE_RESULT MaybeObject* ElementsAccessorBase<ElementsAccessorSubclass,
- ElementsKindTraits>::
- SetLengthImpl(JSObject* obj,
- Object* length,
- FixedArrayBase* backing_store) {
- JSArray* array = JSArray::cast(obj);
+MUST_USE_RESULT
+MaybeHandle<Object> ElementsAccessorBase<ElementsAccessorSubclass,
+ ElementsKindTraits>::
+ SetLengthImpl(Handle<JSObject> obj,
+ Handle<Object> length,
+ Handle<FixedArrayBase> backing_store) {
+ Isolate* isolate = obj->GetIsolate();
+ Handle<JSArray> array = Handle<JSArray>::cast(obj);
// Fast case: The new length fits into a Smi.
- MaybeObject* maybe_smi_length = length->ToSmi();
- Object* smi_length = Smi::FromInt(0);
- if (maybe_smi_length->ToObject(&smi_length) && smi_length->IsSmi()) {
- const int value = Smi::cast(smi_length)->value();
+ Handle<Object> smi_length;
+
+ if (Object::ToSmi(isolate, length).ToHandle(&smi_length) &&
+ smi_length->IsSmi()) {
+ const int value = Handle<Smi>::cast(smi_length)->value();
if (value >= 0) {
- Object* new_length;
- MaybeObject* result = ElementsAccessorSubclass::
+ Handle<Object> new_length = ElementsAccessorSubclass::
SetLengthWithoutNormalize(backing_store, array, smi_length, value);
- if (!result->ToObject(&new_length)) return result;
- ASSERT(new_length->IsSmi() || new_length->IsUndefined());
+ ASSERT(!new_length.is_null());
+
+ // even though the proposed length was a smi, new_length could
+ // still be a heap number because SetLengthWithoutNormalize doesn't
+ // allow the array length property to drop below the index of
+ // non-deletable elements.
+ ASSERT(new_length->IsSmi() || new_length->IsHeapNumber() ||
+ new_length->IsUndefined());
if (new_length->IsSmi()) {
- array->set_length(Smi::cast(new_length));
+ array->set_length(*Handle<Smi>::cast(new_length));
+ return array;
+ } else if (new_length->IsHeapNumber()) {
+ array->set_length(*new_length);
return array;
}
} else {
- return ThrowArrayLengthRangeError(array->GetHeap());
+ return ThrowArrayLengthRangeError(isolate);
}
}
@@ -1940,97 +1879,89 @@ MUST_USE_RESULT MaybeObject* ElementsAccessorBase<ElementsAccessorSubclass,
if (length->IsNumber()) {
uint32_t value;
if (length->ToArrayIndex(&value)) {
- SeededNumberDictionary* dictionary;
- MaybeObject* maybe_object = array->NormalizeElements();
- if (!maybe_object->To(&dictionary)) return maybe_object;
- Object* new_length;
- MaybeObject* result = DictionaryElementsAccessor::
+ Handle<SeededNumberDictionary> dictionary =
+ JSObject::NormalizeElements(array);
+ ASSERT(!dictionary.is_null());
+
+ Handle<Object> new_length = DictionaryElementsAccessor::
SetLengthWithoutNormalize(dictionary, array, length, value);
- if (!result->ToObject(&new_length)) return result;
+ ASSERT(!new_length.is_null());
+
ASSERT(new_length->IsNumber());
- array->set_length(new_length);
+ array->set_length(*new_length);
return array;
} else {
- return ThrowArrayLengthRangeError(array->GetHeap());
+ return ThrowArrayLengthRangeError(isolate);
}
}
// Fall-back case: The new length is not a number so make the array
// size one and set only element to length.
- FixedArray* new_backing_store;
- MaybeObject* maybe_obj = array->GetHeap()->AllocateFixedArray(1);
- if (!maybe_obj->To(&new_backing_store)) return maybe_obj;
- new_backing_store->set(0, length);
- { MaybeObject* result = array->SetContent(new_backing_store);
- if (result->IsFailure()) return result;
- }
+ Handle<FixedArray> new_backing_store = isolate->factory()->NewFixedArray(1);
+ new_backing_store->set(0, *length);
+ JSArray::SetContent(array, new_backing_store);
return array;
}
-MUST_USE_RESULT MaybeObject* ArrayConstructInitializeElements(
- JSArray* array, Arguments* args) {
- Heap* heap = array->GetIsolate()->heap();
-
+MaybeHandle<Object> ArrayConstructInitializeElements(Handle<JSArray> array,
+ Arguments* args) {
// Optimize the case where there is one argument and the argument is a
// small smi.
if (args->length() == 1) {
- Object* obj = (*args)[0];
+ Handle<Object> obj = args->at<Object>(0);
if (obj->IsSmi()) {
- int len = Smi::cast(obj)->value();
+ int len = Handle<Smi>::cast(obj)->value();
if (len > 0 && len < JSObject::kInitialMaxFastElementArray) {
ElementsKind elements_kind = array->GetElementsKind();
- MaybeObject* maybe_array = array->Initialize(len, len);
- if (maybe_array->IsFailure()) return maybe_array;
+ JSArray::Initialize(array, len, len);
if (!IsFastHoleyElementsKind(elements_kind)) {
elements_kind = GetHoleyElementsKind(elements_kind);
- maybe_array = array->TransitionElementsKind(elements_kind);
- if (maybe_array->IsFailure()) return maybe_array;
+ JSObject::TransitionElementsKind(array, elements_kind);
}
-
return array;
} else if (len == 0) {
- return array->Initialize(JSArray::kPreallocatedArrayElements);
+ JSArray::Initialize(array, JSArray::kPreallocatedArrayElements);
+ return array;
}
}
// Take the argument as the length.
- MaybeObject* maybe_obj = array->Initialize(0);
- if (!maybe_obj->To(&obj)) return maybe_obj;
+ JSArray::Initialize(array, 0);
- return array->SetElementsLength((*args)[0]);
+ return JSArray::SetElementsLength(array, obj);
}
// Optimize the case where there are no parameters passed.
if (args->length() == 0) {
- return array->Initialize(JSArray::kPreallocatedArrayElements);
+ JSArray::Initialize(array, JSArray::kPreallocatedArrayElements);
+ return array;
}
+ Factory* factory = array->GetIsolate()->factory();
+
// Set length and elements on the array.
int number_of_elements = args->length();
- MaybeObject* maybe_object =
- array->EnsureCanContainElements(args, 0, number_of_elements,
- ALLOW_CONVERTED_DOUBLE_ELEMENTS);
- if (maybe_object->IsFailure()) return maybe_object;
+ JSObject::EnsureCanContainElements(
+ array, args, 0, number_of_elements, ALLOW_CONVERTED_DOUBLE_ELEMENTS);
// Allocate an appropriately typed elements array.
- MaybeObject* maybe_elms;
ElementsKind elements_kind = array->GetElementsKind();
+ Handle<FixedArrayBase> elms;
if (IsFastDoubleElementsKind(elements_kind)) {
- maybe_elms = heap->AllocateUninitializedFixedDoubleArray(
- number_of_elements);
+ elms = Handle<FixedArrayBase>::cast(
+ factory->NewFixedDoubleArray(number_of_elements));
} else {
- maybe_elms = heap->AllocateFixedArrayWithHoles(number_of_elements);
+ elms = Handle<FixedArrayBase>::cast(
+ factory->NewFixedArrayWithHoles(number_of_elements));
}
- FixedArrayBase* elms;
- if (!maybe_elms->To(&elms)) return maybe_elms;
// Fill in the content
switch (array->GetElementsKind()) {
case FAST_HOLEY_SMI_ELEMENTS:
case FAST_SMI_ELEMENTS: {
- FixedArray* smi_elms = FixedArray::cast(elms);
+ Handle<FixedArray> smi_elms = Handle<FixedArray>::cast(elms);
for (int index = 0; index < number_of_elements; index++) {
smi_elms->set(index, (*args)[index], SKIP_WRITE_BARRIER);
}
@@ -2040,7 +1971,7 @@ MUST_USE_RESULT MaybeObject* ArrayConstructInitializeElements(
case FAST_ELEMENTS: {
DisallowHeapAllocation no_gc;
WriteBarrierMode mode = elms->GetWriteBarrierMode(no_gc);
- FixedArray* object_elms = FixedArray::cast(elms);
+ Handle<FixedArray> object_elms = Handle<FixedArray>::cast(elms);
for (int index = 0; index < number_of_elements; index++) {
object_elms->set(index, (*args)[index], mode);
}
@@ -2048,7 +1979,8 @@ MUST_USE_RESULT MaybeObject* ArrayConstructInitializeElements(
}
case FAST_HOLEY_DOUBLE_ELEMENTS:
case FAST_DOUBLE_ELEMENTS: {
- FixedDoubleArray* double_elms = FixedDoubleArray::cast(elms);
+ Handle<FixedDoubleArray> double_elms =
+ Handle<FixedDoubleArray>::cast(elms);
for (int index = 0; index < number_of_elements; index++) {
double_elms->set(index, (*args)[index]->Number());
}
@@ -2059,7 +1991,7 @@ MUST_USE_RESULT MaybeObject* ArrayConstructInitializeElements(
break;
}
- array->set_elements(elms);
+ array->set_elements(*elms);
array->set_length(Smi::FromInt(number_of_elements));
return array;
}
diff --git a/chromium/v8/src/elements.h b/chromium/v8/src/elements.h
index 6353aaecf5c..aa0159ed05b 100644
--- a/chromium/v8/src/elements.h
+++ b/chromium/v8/src/elements.h
@@ -1,37 +1,14 @@
// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
#ifndef V8_ELEMENTS_H_
#define V8_ELEMENTS_H_
-#include "elements-kind.h"
-#include "objects.h"
-#include "heap.h"
-#include "isolate.h"
+#include "src/elements-kind.h"
+#include "src/objects.h"
+#include "src/heap.h"
+#include "src/isolate.h"
namespace v8 {
namespace internal {
@@ -48,28 +25,43 @@ class ElementsAccessor {
// Checks the elements of an object for consistency, asserting when a problem
// is found.
- virtual void Validate(JSObject* obj) = 0;
+ virtual void Validate(Handle<JSObject> obj) = 0;
// Returns true if a holder contains an element with the specified key
// without iterating up the prototype chain. The caller can optionally pass
// in the backing store to use for the check, which must be compatible with
// the ElementsKind of the ElementsAccessor. If backing_store is NULL, the
// holder->elements() is used as the backing store.
- virtual bool HasElement(Object* receiver,
- JSObject* holder,
- uint32_t key,
- FixedArrayBase* backing_store = NULL) = 0;
+ virtual bool HasElement(
+ Handle<Object> receiver,
+ Handle<JSObject> holder,
+ uint32_t key,
+ Handle<FixedArrayBase> backing_store) = 0;
+
+ inline bool HasElement(
+ Handle<Object> receiver,
+ Handle<JSObject> holder,
+ uint32_t key) {
+ return HasElement(receiver, holder, key, handle(holder->elements()));
+ }
// Returns the element with the specified key or undefined if there is no such
// element. This method doesn't iterate up the prototype chain. The caller
// can optionally pass in the backing store to use for the check, which must
// be compatible with the ElementsKind of the ElementsAccessor. If
// backing_store is NULL, the holder->elements() is used as the backing store.
- MUST_USE_RESULT virtual MaybeObject* Get(
- Object* receiver,
- JSObject* holder,
+ MUST_USE_RESULT virtual MaybeHandle<Object> Get(
+ Handle<Object> receiver,
+ Handle<JSObject> holder,
uint32_t key,
- FixedArrayBase* backing_store = NULL) = 0;
+ Handle<FixedArrayBase> backing_store) = 0;
+
+ MUST_USE_RESULT inline MaybeHandle<Object> Get(
+ Handle<Object> receiver,
+ Handle<JSObject> holder,
+ uint32_t key) {
+ return Get(receiver, holder, key, handle(holder->elements()));
+ }
// Returns an element's attributes, or ABSENT if there is no such
// element. This method doesn't iterate up the prototype chain. The caller
@@ -77,10 +69,17 @@ class ElementsAccessor {
// be compatible with the ElementsKind of the ElementsAccessor. If
// backing_store is NULL, the holder->elements() is used as the backing store.
MUST_USE_RESULT virtual PropertyAttributes GetAttributes(
- Object* receiver,
- JSObject* holder,
+ Handle<Object> receiver,
+ Handle<JSObject> holder,
uint32_t key,
- FixedArrayBase* backing_store = NULL) = 0;
+ Handle<FixedArrayBase> backing_store) = 0;
+
+ MUST_USE_RESULT inline PropertyAttributes GetAttributes(
+ Handle<Object> receiver,
+ Handle<JSObject> holder,
+ uint32_t key) {
+ return GetAttributes(receiver, holder, key, handle(holder->elements()));
+ }
// Returns an element's type, or NONEXISTENT if there is no such
// element. This method doesn't iterate up the prototype chain. The caller
@@ -88,29 +87,44 @@ class ElementsAccessor {
// be compatible with the ElementsKind of the ElementsAccessor. If
// backing_store is NULL, the holder->elements() is used as the backing store.
MUST_USE_RESULT virtual PropertyType GetType(
- Object* receiver,
- JSObject* holder,
+ Handle<Object> receiver,
+ Handle<JSObject> holder,
uint32_t key,
- FixedArrayBase* backing_store = NULL) = 0;
+ Handle<FixedArrayBase> backing_store) = 0;
+
+ MUST_USE_RESULT inline PropertyType GetType(
+ Handle<Object> receiver,
+ Handle<JSObject> holder,
+ uint32_t key) {
+ return GetType(receiver, holder, key, handle(holder->elements()));
+ }
// Returns an element's accessors, or NULL if the element does not exist or
// is plain. This method doesn't iterate up the prototype chain. The caller
// can optionally pass in the backing store to use for the check, which must
// be compatible with the ElementsKind of the ElementsAccessor. If
// backing_store is NULL, the holder->elements() is used as the backing store.
- MUST_USE_RESULT virtual AccessorPair* GetAccessorPair(
- Object* receiver,
- JSObject* holder,
+ MUST_USE_RESULT virtual MaybeHandle<AccessorPair> GetAccessorPair(
+ Handle<Object> receiver,
+ Handle<JSObject> holder,
uint32_t key,
- FixedArrayBase* backing_store = NULL) = 0;
+ Handle<FixedArrayBase> backing_store) = 0;
+
+ MUST_USE_RESULT inline MaybeHandle<AccessorPair> GetAccessorPair(
+ Handle<Object> receiver,
+ Handle<JSObject> holder,
+ uint32_t key) {
+ return GetAccessorPair(receiver, holder, key, handle(holder->elements()));
+ }
// Modifies the length data property as specified for JSArrays and resizes the
// underlying backing store accordingly. The method honors the semantics of
// changing array sizes as defined in EcmaScript 5.1 15.4.5.2, i.e. array that
// have non-deletable elements can only be shrunk to the size of highest
// element that is non-deletable.
- MUST_USE_RESULT virtual MaybeObject* SetLength(JSArray* holder,
- Object* new_length) = 0;
+ MUST_USE_RESULT virtual MaybeHandle<Object> SetLength(
+ Handle<JSArray> holder,
+ Handle<Object> new_length) = 0;
// Modifies both the length and capacity of a JSArray, resizing the underlying
// backing store as necessary. This method does NOT honor the semantics of
@@ -118,14 +132,16 @@ class ElementsAccessor {
// elements. This method should only be called for array expansion OR by
// runtime JavaScript code that use InternalArrays and don't care about
// EcmaScript 5.1 semantics.
- MUST_USE_RESULT virtual MaybeObject* SetCapacityAndLength(JSArray* array,
- int capacity,
- int length) = 0;
+ virtual void SetCapacityAndLength(
+ Handle<JSArray> array,
+ int capacity,
+ int length) = 0;
// Deletes an element in an object, returning a new elements backing store.
- MUST_USE_RESULT virtual MaybeObject* Delete(JSObject* holder,
- uint32_t key,
- JSReceiver::DeleteMode mode) = 0;
+ MUST_USE_RESULT virtual MaybeHandle<Object> Delete(
+ Handle<JSObject> holder,
+ uint32_t key,
+ JSReceiver::DeleteMode mode) = 0;
// If kCopyToEnd is specified as the copy_size to CopyElements, it copies all
// of elements from source after source_start to the destination array.
@@ -140,28 +156,46 @@ class ElementsAccessor {
// the source JSObject or JSArray in source_holder. If the holder's backing
// store is available, it can be passed in source and source_holder is
// ignored.
- MUST_USE_RESULT virtual MaybeObject* CopyElements(
+ virtual void CopyElements(
+ Handle<FixedArrayBase> source,
+ uint32_t source_start,
+ ElementsKind source_kind,
+ Handle<FixedArrayBase> destination,
+ uint32_t destination_start,
+ int copy_size) = 0;
+
+ // TODO(ishell): Keeping |source_holder| parameter in a non-handlified form
+ // helps avoiding ArrayConcat() builtin performance degradation.
+ // Revisit this later.
+ virtual void CopyElements(
JSObject* source_holder,
uint32_t source_start,
ElementsKind source_kind,
- FixedArrayBase* destination,
+ Handle<FixedArrayBase> destination,
uint32_t destination_start,
- int copy_size,
- FixedArrayBase* source = NULL) = 0;
-
- MUST_USE_RESULT MaybeObject* CopyElements(JSObject* from_holder,
- FixedArrayBase* to,
- ElementsKind from_kind,
- FixedArrayBase* from = NULL) {
- return CopyElements(from_holder, 0, from_kind, to, 0,
- kCopyToEndAndInitializeToHole, from);
+ int copy_size) = 0;
+
+ inline void CopyElements(
+ Handle<JSObject> from_holder,
+ Handle<FixedArrayBase> to,
+ ElementsKind from_kind) {
+ CopyElements(
+ *from_holder, 0, from_kind, to, 0, kCopyToEndAndInitializeToHole);
}
- MUST_USE_RESULT virtual MaybeObject* AddElementsToFixedArray(
- Object* receiver,
- JSObject* holder,
- FixedArray* to,
- FixedArrayBase* from = NULL) = 0;
+ MUST_USE_RESULT virtual MaybeHandle<FixedArray> AddElementsToFixedArray(
+ Handle<Object> receiver,
+ Handle<JSObject> holder,
+ Handle<FixedArray> to,
+ Handle<FixedArrayBase> from) = 0;
+
+ MUST_USE_RESULT inline MaybeHandle<FixedArray> AddElementsToFixedArray(
+ Handle<Object> receiver,
+ Handle<JSObject> holder,
+ Handle<FixedArray> to) {
+ return AddElementsToFixedArray(
+ receiver, holder, to, handle(holder->elements()));
+ }
// Returns a shared ElementsAccessor for the specified ElementsKind.
static ElementsAccessor* ForKind(ElementsKind elements_kind) {
@@ -169,15 +203,15 @@ class ElementsAccessor {
return elements_accessors_[elements_kind];
}
- static ElementsAccessor* ForArray(FixedArrayBase* array);
+ static ElementsAccessor* ForArray(Handle<FixedArrayBase> array);
static void InitializeOncePerProcess();
static void TearDown();
protected:
- friend class NonStrictArgumentsElementsAccessor;
+ friend class SloppyArgumentsElementsAccessor;
- virtual uint32_t GetCapacity(FixedArrayBase* backing_store) = 0;
+ virtual uint32_t GetCapacity(Handle<FixedArrayBase> backing_store) = 0;
// Element handlers distinguish between indexes and keys when they manipulate
// elements. Indexes refer to elements in terms of their location in the
@@ -187,7 +221,7 @@ class ElementsAccessor {
// keys are equivalent to indexes, and GetKeyForIndex returns the same value
// it is passed. In the NumberDictionary ElementsAccessor, GetKeyForIndex maps
// the index to a key using the KeyAt method on the NumberDictionary.
- virtual uint32_t GetKeyForIndex(FixedArrayBase* backing_store,
+ virtual uint32_t GetKeyForIndex(Handle<FixedArrayBase> backing_store,
uint32_t index) = 0;
private:
@@ -197,11 +231,12 @@ class ElementsAccessor {
DISALLOW_COPY_AND_ASSIGN(ElementsAccessor);
};
-void CheckArrayAbuse(JSObject* obj, const char* op, uint32_t key,
+void CheckArrayAbuse(Handle<JSObject> obj, const char* op, uint32_t key,
bool allow_appending = false);
-MUST_USE_RESULT MaybeObject* ArrayConstructInitializeElements(
- JSArray* array, Arguments* args);
+MUST_USE_RESULT MaybeHandle<Object> ArrayConstructInitializeElements(
+ Handle<JSArray> array,
+ Arguments* args);
} } // namespace v8::internal
diff --git a/chromium/v8/src/execution.cc b/chromium/v8/src/execution.cc
index c0e9a64fbb0..2766e76b8ca 100644
--- a/chromium/v8/src/execution.cc
+++ b/chromium/v8/src/execution.cc
@@ -1,49 +1,18 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include <stdlib.h>
-
-#include "v8.h"
-
-#include "api.h"
-#include "bootstrapper.h"
-#include "codegen.h"
-#include "debug.h"
-#include "deoptimizer.h"
-#include "isolate-inl.h"
-#include "runtime-profiler.h"
-#include "simulator.h"
-#include "v8threads.h"
-#include "vm-state-inl.h"
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/execution.h"
+
+#include "src/bootstrapper.h"
+#include "src/codegen.h"
+#include "src/deoptimizer.h"
+#include "src/isolate-inl.h"
+#include "src/vm-state-inl.h"
namespace v8 {
namespace internal {
-
StackGuard::StackGuard()
: isolate_(NULL) {
}
@@ -67,19 +36,25 @@ void StackGuard::reset_limits(const ExecutionAccess& lock) {
}
-static Handle<Object> Invoke(bool is_construct,
- Handle<JSFunction> function,
- Handle<Object> receiver,
- int argc,
- Handle<Object> args[],
- bool* has_pending_exception) {
+MUST_USE_RESULT static MaybeHandle<Object> Invoke(
+ bool is_construct,
+ Handle<JSFunction> function,
+ Handle<Object> receiver,
+ int argc,
+ Handle<Object> args[]) {
Isolate* isolate = function->GetIsolate();
// Entering JavaScript.
VMState<JS> state(isolate);
+ CHECK(AllowJavascriptExecution::IsAllowed(isolate));
+ if (!ThrowOnJavascriptExecution::IsAllowed(isolate)) {
+ isolate->ThrowIllegalOperation();
+ isolate->ReportPendingMessages();
+ return MaybeHandle<Object>();
+ }
// Placeholder for return value.
- MaybeObject* value = reinterpret_cast<Object*>(kZapValue);
+ Object* value = NULL;
typedef Object* (*JSEntryFunction)(byte* entry,
Object* function,
@@ -120,52 +95,43 @@ static Handle<Object> Invoke(bool is_construct,
}
#ifdef VERIFY_HEAP
- value->Verify();
+ value->ObjectVerify();
#endif
// Update the pending exception flag and return the value.
- *has_pending_exception = value->IsException();
- ASSERT(*has_pending_exception == isolate->has_pending_exception());
- if (*has_pending_exception) {
+ bool has_exception = value->IsException();
+ ASSERT(has_exception == isolate->has_pending_exception());
+ if (has_exception) {
isolate->ReportPendingMessages();
- if (isolate->pending_exception()->IsOutOfMemory()) {
- if (!isolate->ignore_out_of_memory()) {
- V8::FatalProcessOutOfMemory("JS", true);
- }
- }
-#ifdef ENABLE_DEBUGGER_SUPPORT
// Reset stepping state when script exits with uncaught exception.
- if (isolate->debugger()->IsDebuggerActive()) {
+ if (isolate->debug()->is_active()) {
isolate->debug()->ClearStepping();
}
-#endif // ENABLE_DEBUGGER_SUPPORT
- return Handle<Object>();
+ return MaybeHandle<Object>();
} else {
isolate->clear_pending_message();
}
- return Handle<Object>(value->ToObjectUnchecked(), isolate);
+ return Handle<Object>(value, isolate);
}
-Handle<Object> Execution::Call(Isolate* isolate,
- Handle<Object> callable,
- Handle<Object> receiver,
- int argc,
- Handle<Object> argv[],
- bool* pending_exception,
- bool convert_receiver) {
- *pending_exception = false;
-
+MaybeHandle<Object> Execution::Call(Isolate* isolate,
+ Handle<Object> callable,
+ Handle<Object> receiver,
+ int argc,
+ Handle<Object> argv[],
+ bool convert_receiver) {
if (!callable->IsJSFunction()) {
- callable = TryGetFunctionDelegate(isolate, callable, pending_exception);
- if (*pending_exception) return callable;
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, callable, TryGetFunctionDelegate(isolate, callable), Object);
}
Handle<JSFunction> func = Handle<JSFunction>::cast(callable);
- // In non-strict mode, convert receiver.
+ // In sloppy mode, convert receiver.
if (convert_receiver && !receiver->IsJSReceiver() &&
- !func->shared()->native() && func->shared()->is_classic_mode()) {
+ !func->shared()->native() &&
+ func->shared()->strict_mode() == SLOPPY) {
if (receiver->IsUndefined() || receiver->IsNull()) {
Object* global = func->context()->global_object()->global_receiver();
// Under some circumstances, 'global' can be the JSBuiltinsObject
@@ -175,29 +141,27 @@ Handle<Object> Execution::Call(Isolate* isolate,
receiver = Handle<Object>(global, func->GetIsolate());
}
} else {
- receiver = ToObject(isolate, receiver, pending_exception);
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, receiver, ToObject(isolate, receiver), Object);
}
- if (*pending_exception) return callable;
}
- return Invoke(false, func, receiver, argc, argv, pending_exception);
+ return Invoke(false, func, receiver, argc, argv);
}
-Handle<Object> Execution::New(Handle<JSFunction> func,
- int argc,
- Handle<Object> argv[],
- bool* pending_exception) {
- return Invoke(true, func, func->GetIsolate()->global_object(), argc, argv,
- pending_exception);
+MaybeHandle<Object> Execution::New(Handle<JSFunction> func,
+ int argc,
+ Handle<Object> argv[]) {
+ return Invoke(true, func, func->GetIsolate()->global_object(), argc, argv);
}
-Handle<Object> Execution::TryCall(Handle<JSFunction> func,
- Handle<Object> receiver,
- int argc,
- Handle<Object> args[],
- bool* caught_exception) {
+MaybeHandle<Object> Execution::TryCall(Handle<JSFunction> func,
+ Handle<Object> receiver,
+ int argc,
+ Handle<Object> args[],
+ Handle<Object>* exception_out) {
// Enter a try-block while executing the JavaScript code. To avoid
// duplicate error printing it must be non-verbose. Also, to avoid
// creating message objects during stack overflow we shouldn't
@@ -205,33 +169,30 @@ Handle<Object> Execution::TryCall(Handle<JSFunction> func,
v8::TryCatch catcher;
catcher.SetVerbose(false);
catcher.SetCaptureMessage(false);
- *caught_exception = false;
// Get isolate now, because handle might be persistent
// and get destroyed in the next call.
Isolate* isolate = func->GetIsolate();
- Handle<Object> result = Invoke(false, func, receiver, argc, args,
- caught_exception);
+ MaybeHandle<Object> maybe_result = Invoke(false, func, receiver, argc, args);
- if (*caught_exception) {
+ if (maybe_result.is_null()) {
ASSERT(catcher.HasCaught());
ASSERT(isolate->has_pending_exception());
ASSERT(isolate->external_caught_exception());
- if (isolate->is_out_of_memory() && !isolate->ignore_out_of_memory()) {
- V8::FatalProcessOutOfMemory("OOM during Execution::TryCall");
- }
- if (isolate->pending_exception() ==
- isolate->heap()->termination_exception()) {
- result = isolate->factory()->termination_exception();
- } else {
- result = v8::Utils::OpenHandle(*catcher.Exception());
+ if (exception_out != NULL) {
+ if (isolate->pending_exception() ==
+ isolate->heap()->termination_exception()) {
+ *exception_out = isolate->factory()->termination_exception();
+ } else {
+ *exception_out = v8::Utils::OpenHandle(*catcher.Exception());
+ }
}
isolate->OptionalRescheduleException(true);
}
ASSERT(!isolate->has_pending_exception());
ASSERT(!isolate->external_caught_exception());
- return result;
+ return maybe_result;
}
@@ -262,9 +223,8 @@ Handle<Object> Execution::GetFunctionDelegate(Isolate* isolate,
}
-Handle<Object> Execution::TryGetFunctionDelegate(Isolate* isolate,
- Handle<Object> object,
- bool* has_pending_exception) {
+MaybeHandle<Object> Execution::TryGetFunctionDelegate(Isolate* isolate,
+ Handle<Object> object) {
ASSERT(!object->IsJSFunction());
// If object is a function proxy, get its handler. Iterate if necessary.
@@ -286,10 +246,8 @@ Handle<Object> Execution::TryGetFunctionDelegate(Isolate* isolate,
// throw a non-callable exception.
i::Handle<i::Object> error_obj = isolate->factory()->NewTypeError(
"called_non_callable", i::HandleVector<i::Object>(&object, 1));
- isolate->Throw(*error_obj);
- *has_pending_exception = true;
- return isolate->factory()->undefined_value();
+ return isolate->Throw<Object>(error_obj);
}
@@ -319,10 +277,8 @@ Handle<Object> Execution::GetConstructorDelegate(Isolate* isolate,
}
-Handle<Object> Execution::TryGetConstructorDelegate(
- Isolate* isolate,
- Handle<Object> object,
- bool* has_pending_exception) {
+MaybeHandle<Object> Execution::TryGetConstructorDelegate(
+ Isolate* isolate, Handle<Object> object) {
ASSERT(!object->IsJSFunction());
// If you return a function from here, it will be called when an
@@ -347,31 +303,7 @@ Handle<Object> Execution::TryGetConstructorDelegate(
// throw a non-callable exception.
i::Handle<i::Object> error_obj = isolate->factory()->NewTypeError(
"called_non_callable", i::HandleVector<i::Object>(&object, 1));
- isolate->Throw(*error_obj);
- *has_pending_exception = true;
-
- return isolate->factory()->undefined_value();
-}
-
-
-void Execution::RunMicrotasks(Isolate* isolate) {
- ASSERT(isolate->microtask_pending());
- bool threw = false;
- Execution::Call(
- isolate,
- isolate->run_microtasks(),
- isolate->factory()->undefined_value(),
- 0,
- NULL,
- &threw);
- ASSERT(!threw);
-}
-
-
-bool StackGuard::IsStackOverflow() {
- ExecutionAccess access(isolate_);
- return (thread_local_.jslimit_ != kInterruptLimit &&
- thread_local_.climit_ != kInterruptLimit);
+ return isolate->Throw<Object>(error_obj);
}
@@ -405,144 +337,43 @@ void StackGuard::DisableInterrupts() {
}
-bool StackGuard::ShouldPostponeInterrupts() {
+bool StackGuard::CheckInterrupt(int flagbit) {
ExecutionAccess access(isolate_);
- return should_postpone_interrupts(access);
+ return thread_local_.interrupt_flags_ & flagbit;
}
-bool StackGuard::IsInterrupted() {
+void StackGuard::RequestInterrupt(int flagbit) {
ExecutionAccess access(isolate_);
- return (thread_local_.interrupt_flags_ & INTERRUPT) != 0;
-}
-
-
-void StackGuard::Interrupt() {
- ExecutionAccess access(isolate_);
- thread_local_.interrupt_flags_ |= INTERRUPT;
+ thread_local_.interrupt_flags_ |= flagbit;
set_interrupt_limits(access);
}
-bool StackGuard::IsPreempted() {
- ExecutionAccess access(isolate_);
- return thread_local_.interrupt_flags_ & PREEMPT;
-}
-
-
-void StackGuard::Preempt() {
+void StackGuard::ClearInterrupt(int flagbit) {
ExecutionAccess access(isolate_);
- thread_local_.interrupt_flags_ |= PREEMPT;
- set_interrupt_limits(access);
-}
-
-
-bool StackGuard::IsTerminateExecution() {
- ExecutionAccess access(isolate_);
- return (thread_local_.interrupt_flags_ & TERMINATE) != 0;
-}
-
-
-void StackGuard::CancelTerminateExecution() {
- ExecutionAccess access(isolate_);
- Continue(TERMINATE);
- isolate_->CancelTerminateExecution();
-}
-
-
-void StackGuard::TerminateExecution() {
- ExecutionAccess access(isolate_);
- thread_local_.interrupt_flags_ |= TERMINATE;
- set_interrupt_limits(access);
-}
-
-
-bool StackGuard::IsGCRequest() {
- ExecutionAccess access(isolate_);
- return (thread_local_.interrupt_flags_ & GC_REQUEST) != 0;
-}
-
-
-void StackGuard::RequestGC() {
- ExecutionAccess access(isolate_);
- thread_local_.interrupt_flags_ |= GC_REQUEST;
- if (thread_local_.postpone_interrupts_nesting_ == 0) {
- thread_local_.jslimit_ = thread_local_.climit_ = kInterruptLimit;
- isolate_->heap()->SetStackLimits();
- }
-}
-
-
-bool StackGuard::IsInstallCodeRequest() {
- ExecutionAccess access(isolate_);
- return (thread_local_.interrupt_flags_ & INSTALL_CODE) != 0;
-}
-
-
-void StackGuard::RequestInstallCode() {
- ExecutionAccess access(isolate_);
- thread_local_.interrupt_flags_ |= INSTALL_CODE;
- if (thread_local_.postpone_interrupts_nesting_ == 0) {
- thread_local_.jslimit_ = thread_local_.climit_ = kInterruptLimit;
- isolate_->heap()->SetStackLimits();
+ thread_local_.interrupt_flags_ &= ~flagbit;
+ if (!should_postpone_interrupts(access) && !has_pending_interrupts(access)) {
+ reset_limits(access);
}
}
-bool StackGuard::IsFullDeopt() {
+bool StackGuard::CheckAndClearInterrupt(InterruptFlag flag) {
ExecutionAccess access(isolate_);
- return (thread_local_.interrupt_flags_ & FULL_DEOPT) != 0;
-}
-
-
-void StackGuard::FullDeopt() {
- ExecutionAccess access(isolate_);
- thread_local_.interrupt_flags_ |= FULL_DEOPT;
- set_interrupt_limits(access);
-}
-
-
-#ifdef ENABLE_DEBUGGER_SUPPORT
-bool StackGuard::IsDebugBreak() {
- ExecutionAccess access(isolate_);
- return thread_local_.interrupt_flags_ & DEBUGBREAK;
-}
-
-
-void StackGuard::DebugBreak() {
- ExecutionAccess access(isolate_);
- thread_local_.interrupt_flags_ |= DEBUGBREAK;
- set_interrupt_limits(access);
-}
-
-
-bool StackGuard::IsDebugCommand() {
- ExecutionAccess access(isolate_);
- return thread_local_.interrupt_flags_ & DEBUGCOMMAND;
-}
-
-
-void StackGuard::DebugCommand() {
- if (FLAG_debugger_auto_break) {
- ExecutionAccess access(isolate_);
- thread_local_.interrupt_flags_ |= DEBUGCOMMAND;
- set_interrupt_limits(access);
- }
-}
-#endif
-
-void StackGuard::Continue(InterruptFlag after_what) {
- ExecutionAccess access(isolate_);
- thread_local_.interrupt_flags_ &= ~static_cast<int>(after_what);
+ int flagbit = 1 << flag;
+ bool result = (thread_local_.interrupt_flags_ & flagbit);
+ thread_local_.interrupt_flags_ &= ~flagbit;
if (!should_postpone_interrupts(access) && !has_pending_interrupts(access)) {
reset_limits(access);
}
+ return result;
}
char* StackGuard::ArchiveStackGuard(char* to) {
ExecutionAccess access(isolate_);
- OS::MemCopy(to, reinterpret_cast<char*>(&thread_local_), sizeof(ThreadLocal));
+ MemCopy(to, reinterpret_cast<char*>(&thread_local_), sizeof(ThreadLocal));
ThreadLocal blank;
// Set the stack limits using the old thread_local_.
@@ -559,8 +390,7 @@ char* StackGuard::ArchiveStackGuard(char* to) {
char* StackGuard::RestoreStackGuard(char* from) {
ExecutionAccess access(isolate_);
- OS::MemCopy(
- reinterpret_cast<char*>(&thread_local_), from, sizeof(ThreadLocal));
+ MemCopy(reinterpret_cast<char*>(&thread_local_), from, sizeof(ThreadLocal));
isolate_->heap()->SetStackLimits();
return from + sizeof(ThreadLocal);
}
@@ -625,78 +455,78 @@ void StackGuard::InitThread(const ExecutionAccess& lock) {
// --- C a l l s t o n a t i v e s ---
-#define RETURN_NATIVE_CALL(name, args, has_pending_exception) \
+#define RETURN_NATIVE_CALL(name, args) \
do { \
Handle<Object> argv[] = args; \
- ASSERT(has_pending_exception != NULL); \
return Call(isolate, \
isolate->name##_fun(), \
isolate->js_builtins_object(), \
- ARRAY_SIZE(argv), argv, \
- has_pending_exception); \
+ ARRAY_SIZE(argv), argv); \
} while (false)
-Handle<Object> Execution::ToNumber(
- Isolate* isolate, Handle<Object> obj, bool* exc) {
- RETURN_NATIVE_CALL(to_number, { obj }, exc);
+MaybeHandle<Object> Execution::ToNumber(
+ Isolate* isolate, Handle<Object> obj) {
+ RETURN_NATIVE_CALL(to_number, { obj });
}
-Handle<Object> Execution::ToString(
- Isolate* isolate, Handle<Object> obj, bool* exc) {
- RETURN_NATIVE_CALL(to_string, { obj }, exc);
+MaybeHandle<Object> Execution::ToString(
+ Isolate* isolate, Handle<Object> obj) {
+ RETURN_NATIVE_CALL(to_string, { obj });
}
-Handle<Object> Execution::ToDetailString(
- Isolate* isolate, Handle<Object> obj, bool* exc) {
- RETURN_NATIVE_CALL(to_detail_string, { obj }, exc);
+MaybeHandle<Object> Execution::ToDetailString(
+ Isolate* isolate, Handle<Object> obj) {
+ RETURN_NATIVE_CALL(to_detail_string, { obj });
}
-Handle<Object> Execution::ToObject(
- Isolate* isolate, Handle<Object> obj, bool* exc) {
+MaybeHandle<Object> Execution::ToObject(
+ Isolate* isolate, Handle<Object> obj) {
if (obj->IsSpecObject()) return obj;
- RETURN_NATIVE_CALL(to_object, { obj }, exc);
+ RETURN_NATIVE_CALL(to_object, { obj });
}
-Handle<Object> Execution::ToInteger(
- Isolate* isolate, Handle<Object> obj, bool* exc) {
- RETURN_NATIVE_CALL(to_integer, { obj }, exc);
+MaybeHandle<Object> Execution::ToInteger(
+ Isolate* isolate, Handle<Object> obj) {
+ RETURN_NATIVE_CALL(to_integer, { obj });
}
-Handle<Object> Execution::ToUint32(
- Isolate* isolate, Handle<Object> obj, bool* exc) {
- RETURN_NATIVE_CALL(to_uint32, { obj }, exc);
+MaybeHandle<Object> Execution::ToUint32(
+ Isolate* isolate, Handle<Object> obj) {
+ RETURN_NATIVE_CALL(to_uint32, { obj });
}
-Handle<Object> Execution::ToInt32(
- Isolate* isolate, Handle<Object> obj, bool* exc) {
- RETURN_NATIVE_CALL(to_int32, { obj }, exc);
+MaybeHandle<Object> Execution::ToInt32(
+ Isolate* isolate, Handle<Object> obj) {
+ RETURN_NATIVE_CALL(to_int32, { obj });
}
-Handle<Object> Execution::NewDate(Isolate* isolate, double time, bool* exc) {
+MaybeHandle<Object> Execution::NewDate(Isolate* isolate, double time) {
Handle<Object> time_obj = isolate->factory()->NewNumber(time);
- RETURN_NATIVE_CALL(create_date, { time_obj }, exc);
+ RETURN_NATIVE_CALL(create_date, { time_obj });
}
#undef RETURN_NATIVE_CALL
-Handle<JSRegExp> Execution::NewJSRegExp(Handle<String> pattern,
- Handle<String> flags,
- bool* exc) {
+MaybeHandle<JSRegExp> Execution::NewJSRegExp(Handle<String> pattern,
+ Handle<String> flags) {
+ Isolate* isolate = pattern->GetIsolate();
Handle<JSFunction> function = Handle<JSFunction>(
- pattern->GetIsolate()->native_context()->regexp_function());
- Handle<Object> re_obj = RegExpImpl::CreateRegExpLiteral(
- function, pattern, flags, exc);
- if (*exc) return Handle<JSRegExp>();
+ isolate->native_context()->regexp_function());
+ Handle<Object> re_obj;
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, re_obj,
+ RegExpImpl::CreateRegExpLiteral(function, pattern, flags),
+ JSRegExp);
return Handle<JSRegExp>::cast(re_obj);
}
@@ -710,97 +540,90 @@ Handle<Object> Execution::CharAt(Handle<String> string, uint32_t index) {
return factory->undefined_value();
}
- Handle<Object> char_at = GetProperty(
- isolate, isolate->js_builtins_object(), factory->char_at_string());
+ Handle<Object> char_at = Object::GetProperty(
+ isolate->js_builtins_object(),
+ factory->char_at_string()).ToHandleChecked();
if (!char_at->IsJSFunction()) {
return factory->undefined_value();
}
- bool caught_exception;
Handle<Object> index_object = factory->NewNumberFromInt(int_index);
Handle<Object> index_arg[] = { index_object };
- Handle<Object> result = TryCall(Handle<JSFunction>::cast(char_at),
- string,
- ARRAY_SIZE(index_arg),
- index_arg,
- &caught_exception);
- if (caught_exception) {
+ Handle<Object> result;
+ if (!TryCall(Handle<JSFunction>::cast(char_at),
+ string,
+ ARRAY_SIZE(index_arg),
+ index_arg).ToHandle(&result)) {
return factory->undefined_value();
}
return result;
}
-Handle<JSFunction> Execution::InstantiateFunction(
- Handle<FunctionTemplateInfo> data,
- bool* exc) {
+MaybeHandle<JSFunction> Execution::InstantiateFunction(
+ Handle<FunctionTemplateInfo> data) {
Isolate* isolate = data->GetIsolate();
if (!data->do_not_cache()) {
// Fast case: see if the function has already been instantiated
int serial_number = Smi::cast(data->serial_number())->value();
- Object* elm =
- isolate->native_context()->function_cache()->
- GetElementNoExceptionThrown(isolate, serial_number);
- if (elm->IsJSFunction()) return Handle<JSFunction>(JSFunction::cast(elm));
+ Handle<JSObject> cache(isolate->native_context()->function_cache());
+ Handle<Object> elm =
+ Object::GetElement(isolate, cache, serial_number).ToHandleChecked();
+ if (elm->IsJSFunction()) return Handle<JSFunction>::cast(elm);
}
// The function has not yet been instantiated in this context; do it.
Handle<Object> args[] = { data };
- Handle<Object> result = Call(isolate,
- isolate->instantiate_fun(),
- isolate->js_builtins_object(),
- ARRAY_SIZE(args),
- args,
- exc);
- if (*exc) return Handle<JSFunction>::null();
+ Handle<Object> result;
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, result,
+ Call(isolate,
+ isolate->instantiate_fun(),
+ isolate->js_builtins_object(),
+ ARRAY_SIZE(args),
+ args),
+ JSFunction);
return Handle<JSFunction>::cast(result);
}
-Handle<JSObject> Execution::InstantiateObject(Handle<ObjectTemplateInfo> data,
- bool* exc) {
+MaybeHandle<JSObject> Execution::InstantiateObject(
+ Handle<ObjectTemplateInfo> data) {
Isolate* isolate = data->GetIsolate();
+ Handle<Object> result;
if (data->property_list()->IsUndefined() &&
!data->constructor()->IsUndefined()) {
- // Initialization to make gcc happy.
- Object* result = NULL;
- {
- HandleScope scope(isolate);
- Handle<FunctionTemplateInfo> cons_template =
- Handle<FunctionTemplateInfo>(
- FunctionTemplateInfo::cast(data->constructor()));
- Handle<JSFunction> cons = InstantiateFunction(cons_template, exc);
- if (*exc) return Handle<JSObject>::null();
- Handle<Object> value = New(cons, 0, NULL, exc);
- if (*exc) return Handle<JSObject>::null();
- result = *value;
- }
- ASSERT(!*exc);
- return Handle<JSObject>(JSObject::cast(result));
+ Handle<FunctionTemplateInfo> cons_template =
+ Handle<FunctionTemplateInfo>(
+ FunctionTemplateInfo::cast(data->constructor()));
+ Handle<JSFunction> cons;
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, cons, InstantiateFunction(cons_template), JSObject);
+ ASSIGN_RETURN_ON_EXCEPTION(isolate, result, New(cons, 0, NULL), JSObject);
} else {
Handle<Object> args[] = { data };
- Handle<Object> result = Call(isolate,
- isolate->instantiate_fun(),
- isolate->js_builtins_object(),
- ARRAY_SIZE(args),
- args,
- exc);
- if (*exc) return Handle<JSObject>::null();
- return Handle<JSObject>::cast(result);
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, result,
+ Call(isolate,
+ isolate->instantiate_fun(),
+ isolate->js_builtins_object(),
+ ARRAY_SIZE(args),
+ args),
+ JSObject);
}
+ return Handle<JSObject>::cast(result);
}
-void Execution::ConfigureInstance(Isolate* isolate,
- Handle<Object> instance,
- Handle<Object> instance_template,
- bool* exc) {
+MaybeHandle<Object> Execution::ConfigureInstance(
+ Isolate* isolate,
+ Handle<Object> instance,
+ Handle<Object> instance_template) {
Handle<Object> args[] = { instance, instance_template };
- Execution::Call(isolate,
- isolate->configure_instance_fun(),
- isolate->js_builtins_object(),
- ARRAY_SIZE(args),
- args,
- exc);
+ return Execution::Call(isolate,
+ isolate->configure_instance_fun(),
+ isolate->js_builtins_object(),
+ ARRAY_SIZE(args),
+ args);
}
@@ -810,166 +633,59 @@ Handle<String> Execution::GetStackTraceLine(Handle<Object> recv,
Handle<Object> is_global) {
Isolate* isolate = fun->GetIsolate();
Handle<Object> args[] = { recv, fun, pos, is_global };
- bool caught_exception;
- Handle<Object> result = TryCall(isolate->get_stack_trace_line_fun(),
- isolate->js_builtins_object(),
- ARRAY_SIZE(args),
- args,
- &caught_exception);
- if (caught_exception || !result->IsString()) {
- return isolate->factory()->empty_string();
+ MaybeHandle<Object> maybe_result =
+ TryCall(isolate->get_stack_trace_line_fun(),
+ isolate->js_builtins_object(),
+ ARRAY_SIZE(args),
+ args);
+ Handle<Object> result;
+ if (!maybe_result.ToHandle(&result) || !result->IsString()) {
+ return isolate->factory()->empty_string();
}
return Handle<String>::cast(result);
}
-static Object* RuntimePreempt(Isolate* isolate) {
- // Clear the preempt request flag.
- isolate->stack_guard()->Continue(PREEMPT);
-
-#ifdef ENABLE_DEBUGGER_SUPPORT
- if (isolate->debug()->InDebugger()) {
- // If currently in the debugger don't do any actual preemption but record
- // that preemption occoured while in the debugger.
- isolate->debug()->PreemptionWhileInDebugger();
- } else {
- // Perform preemption.
- v8::Unlocker unlocker(reinterpret_cast<v8::Isolate*>(isolate));
- Thread::YieldCPU();
- }
-#else
- { // NOLINT
- // Perform preemption.
- v8::Unlocker unlocker(reinterpret_cast<v8::Isolate*>(isolate));
- Thread::YieldCPU();
- }
-#endif
-
- return isolate->heap()->undefined_value();
-}
-
-
-#ifdef ENABLE_DEBUGGER_SUPPORT
-Object* Execution::DebugBreakHelper(Isolate* isolate) {
- // Just continue if breaks are disabled.
- if (isolate->debug()->disable_break()) {
- return isolate->heap()->undefined_value();
+Object* StackGuard::HandleInterrupts() {
+ {
+ ExecutionAccess access(isolate_);
+ if (should_postpone_interrupts(access)) {
+ return isolate_->heap()->undefined_value();
+ }
}
- // Ignore debug break during bootstrapping.
- if (isolate->bootstrapper()->IsActive()) {
- return isolate->heap()->undefined_value();
+ if (CheckAndClearInterrupt(GC_REQUEST)) {
+ isolate_->heap()->CollectAllGarbage(Heap::kNoGCFlags, "GC interrupt");
}
- // Ignore debug break if debugger is not active.
- if (!isolate->debugger()->IsDebuggerActive()) {
- return isolate->heap()->undefined_value();
+ if (CheckDebugBreak() || CheckDebugCommand()) {
+ isolate_->debug()->HandleDebugBreak();
}
- StackLimitCheck check(isolate);
- if (check.HasOverflowed()) {
- return isolate->heap()->undefined_value();
+ if (CheckAndClearInterrupt(TERMINATE_EXECUTION)) {
+ return isolate_->TerminateExecution();
}
- {
- JavaScriptFrameIterator it(isolate);
- ASSERT(!it.done());
- Object* fun = it.frame()->function();
- if (fun && fun->IsJSFunction()) {
- // Don't stop in builtin functions.
- if (JSFunction::cast(fun)->IsBuiltin()) {
- return isolate->heap()->undefined_value();
- }
- GlobalObject* global = JSFunction::cast(fun)->context()->global_object();
- // Don't stop in debugger functions.
- if (isolate->debug()->IsDebugGlobal(global)) {
- return isolate->heap()->undefined_value();
- }
- }
+ if (CheckAndClearInterrupt(DEOPT_MARKED_ALLOCATION_SITES)) {
+ isolate_->heap()->DeoptMarkedAllocationSites();
}
- // Collect the break state before clearing the flags.
- bool debug_command_only =
- isolate->stack_guard()->IsDebugCommand() &&
- !isolate->stack_guard()->IsDebugBreak();
-
- // Clear the debug break request flag.
- isolate->stack_guard()->Continue(DEBUGBREAK);
-
- ProcessDebugMessages(isolate, debug_command_only);
-
- // Return to continue execution.
- return isolate->heap()->undefined_value();
-}
-
-
-void Execution::ProcessDebugMessages(Isolate* isolate,
- bool debug_command_only) {
- // Clear the debug command request flag.
- isolate->stack_guard()->Continue(DEBUGCOMMAND);
-
- StackLimitCheck check(isolate);
- if (check.HasOverflowed()) {
- return;
- }
-
- HandleScope scope(isolate);
- // Enter the debugger. Just continue if we fail to enter the debugger.
- EnterDebugger debugger(isolate);
- if (debugger.FailedToEnter()) {
- return;
+ if (CheckAndClearInterrupt(INSTALL_CODE)) {
+ ASSERT(isolate_->concurrent_recompilation_enabled());
+ isolate_->optimizing_compiler_thread()->InstallOptimizedFunctions();
}
- // Notify the debug event listeners. Indicate auto continue if the break was
- // a debug command break.
- isolate->debugger()->OnDebugBreak(isolate->factory()->undefined_value(),
- debug_command_only);
-}
-
-
-#endif
-
-MaybeObject* Execution::HandleStackGuardInterrupt(Isolate* isolate) {
- StackGuard* stack_guard = isolate->stack_guard();
- if (stack_guard->ShouldPostponeInterrupts()) {
- return isolate->heap()->undefined_value();
+ if (CheckAndClearInterrupt(API_INTERRUPT)) {
+ // Callback must be invoked outside of ExecusionAccess lock.
+ isolate_->InvokeApiInterruptCallback();
}
- if (stack_guard->IsGCRequest()) {
- isolate->heap()->CollectAllGarbage(Heap::kNoGCFlags,
- "StackGuard GC request");
- stack_guard->Continue(GC_REQUEST);
- }
+ isolate_->counters()->stack_interrupts()->Increment();
+ isolate_->counters()->runtime_profiler_ticks()->Increment();
+ isolate_->runtime_profiler()->OptimizeNow();
- isolate->counters()->stack_interrupts()->Increment();
- isolate->counters()->runtime_profiler_ticks()->Increment();
-#ifdef ENABLE_DEBUGGER_SUPPORT
- if (stack_guard->IsDebugBreak() || stack_guard->IsDebugCommand()) {
- DebugBreakHelper(isolate);
- }
-#endif
- if (stack_guard->IsPreempted()) RuntimePreempt(isolate);
- if (stack_guard->IsTerminateExecution()) {
- stack_guard->Continue(TERMINATE);
- return isolate->TerminateExecution();
- }
- if (stack_guard->IsInterrupted()) {
- stack_guard->Continue(INTERRUPT);
- return isolate->StackOverflow();
- }
- if (stack_guard->IsFullDeopt()) {
- stack_guard->Continue(FULL_DEOPT);
- Deoptimizer::DeoptimizeAll(isolate);
- }
- if (stack_guard->IsInstallCodeRequest()) {
- ASSERT(isolate->concurrent_recompilation_enabled());
- stack_guard->Continue(INSTALL_CODE);
- isolate->optimizing_compiler_thread()->InstallOptimizedFunctions();
- }
- isolate->runtime_profiler()->OptimizeNow();
- return isolate->heap()->undefined_value();
+ return isolate_->heap()->undefined_value();
}
-
} } // namespace v8::internal
diff --git a/chromium/v8/src/execution.h b/chromium/v8/src/execution.h
index eda416c0da5..74d0feb75c7 100644
--- a/chromium/v8/src/execution.h
+++ b/chromium/v8/src/execution.h
@@ -1,56 +1,16 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
#ifndef V8_EXECUTION_H_
#define V8_EXECUTION_H_
-#include "allocation.h"
+#include "src/handles.h"
namespace v8 {
namespace internal {
-
-// Flag used to set the interrupt causes.
-enum InterruptFlag {
- INTERRUPT = 1 << 0,
- DEBUGBREAK = 1 << 1,
- DEBUGCOMMAND = 1 << 2,
- PREEMPT = 1 << 3,
- TERMINATE = 1 << 4,
- GC_REQUEST = 1 << 5,
- FULL_DEOPT = 1 << 6,
- INSTALL_CODE = 1 << 7
-};
-
-
-class Isolate;
-
-
-class Execution : public AllStatic {
+class Execution V8_FINAL : public AllStatic {
public:
// Call a function, the caller supplies a receiver and an array
// of arguments. Arguments are Object* type. After function returns,
@@ -63,13 +23,13 @@ class Execution : public AllStatic {
// and the function called is not in strict mode, receiver is converted to
// an object.
//
- static Handle<Object> Call(Isolate* isolate,
- Handle<Object> callable,
- Handle<Object> receiver,
- int argc,
- Handle<Object> argv[],
- bool* pending_exception,
- bool convert_receiver = false);
+ MUST_USE_RESULT static MaybeHandle<Object> Call(
+ Isolate* isolate,
+ Handle<Object> callable,
+ Handle<Object> receiver,
+ int argc,
+ Handle<Object> argv[],
+ bool convert_receiver = false);
// Construct object from function, the caller supplies an array of
// arguments. Arguments are Object* type. After function returns,
@@ -78,101 +38,86 @@ class Execution : public AllStatic {
// *pending_exception tells whether the invoke resulted in
// a pending exception.
//
- static Handle<Object> New(Handle<JSFunction> func,
- int argc,
- Handle<Object> argv[],
- bool* pending_exception);
+ MUST_USE_RESULT static MaybeHandle<Object> New(Handle<JSFunction> func,
+ int argc,
+ Handle<Object> argv[]);
// Call a function, just like Call(), but make sure to silently catch
// any thrown exceptions. The return value is either the result of
// calling the function (if caught exception is false) or the exception
// that occurred (if caught exception is true).
- static Handle<Object> TryCall(Handle<JSFunction> func,
- Handle<Object> receiver,
- int argc,
- Handle<Object> argv[],
- bool* caught_exception);
+ static MaybeHandle<Object> TryCall(
+ Handle<JSFunction> func,
+ Handle<Object> receiver,
+ int argc,
+ Handle<Object> argv[],
+ Handle<Object>* exception_out = NULL);
// ECMA-262 9.3
- static Handle<Object> ToNumber(
- Isolate* isolate, Handle<Object> obj, bool* exc);
+ MUST_USE_RESULT static MaybeHandle<Object> ToNumber(
+ Isolate* isolate, Handle<Object> obj);
// ECMA-262 9.4
- static Handle<Object> ToInteger(
- Isolate* isolate, Handle<Object> obj, bool* exc);
+ MUST_USE_RESULT static MaybeHandle<Object> ToInteger(
+ Isolate* isolate, Handle<Object> obj);
// ECMA-262 9.5
- static Handle<Object> ToInt32(
- Isolate* isolate, Handle<Object> obj, bool* exc);
+ MUST_USE_RESULT static MaybeHandle<Object> ToInt32(
+ Isolate* isolate, Handle<Object> obj);
// ECMA-262 9.6
- static Handle<Object> ToUint32(
- Isolate* isolate, Handle<Object> obj, bool* exc);
+ MUST_USE_RESULT static MaybeHandle<Object> ToUint32(
+ Isolate* isolate, Handle<Object> obj);
// ECMA-262 9.8
- static Handle<Object> ToString(
- Isolate* isolate, Handle<Object> obj, bool* exc);
+ MUST_USE_RESULT static MaybeHandle<Object> ToString(
+ Isolate* isolate, Handle<Object> obj);
// ECMA-262 9.8
- static Handle<Object> ToDetailString(
- Isolate* isolate, Handle<Object> obj, bool* exc);
+ MUST_USE_RESULT static MaybeHandle<Object> ToDetailString(
+ Isolate* isolate, Handle<Object> obj);
// ECMA-262 9.9
- static Handle<Object> ToObject(
- Isolate* isolate, Handle<Object> obj, bool* exc);
+ MUST_USE_RESULT static MaybeHandle<Object> ToObject(
+ Isolate* isolate, Handle<Object> obj);
// Create a new date object from 'time'.
- static Handle<Object> NewDate(
- Isolate* isolate, double time, bool* exc);
+ MUST_USE_RESULT static MaybeHandle<Object> NewDate(
+ Isolate* isolate, double time);
// Create a new regular expression object from 'pattern' and 'flags'.
- static Handle<JSRegExp> NewJSRegExp(Handle<String> pattern,
- Handle<String> flags,
- bool* exc);
+ MUST_USE_RESULT static MaybeHandle<JSRegExp> NewJSRegExp(
+ Handle<String> pattern, Handle<String> flags);
// Used to implement [] notation on strings (calls JS code)
static Handle<Object> CharAt(Handle<String> str, uint32_t index);
static Handle<Object> GetFunctionFor();
- static Handle<JSFunction> InstantiateFunction(
- Handle<FunctionTemplateInfo> data, bool* exc);
- static Handle<JSObject> InstantiateObject(Handle<ObjectTemplateInfo> data,
- bool* exc);
- static void ConfigureInstance(Isolate* isolate,
- Handle<Object> instance,
- Handle<Object> data,
- bool* exc);
+ MUST_USE_RESULT static MaybeHandle<JSFunction> InstantiateFunction(
+ Handle<FunctionTemplateInfo> data);
+ MUST_USE_RESULT static MaybeHandle<JSObject> InstantiateObject(
+ Handle<ObjectTemplateInfo> data);
+ MUST_USE_RESULT static MaybeHandle<Object> ConfigureInstance(
+ Isolate* isolate, Handle<Object> instance, Handle<Object> data);
static Handle<String> GetStackTraceLine(Handle<Object> recv,
Handle<JSFunction> fun,
Handle<Object> pos,
Handle<Object> is_global);
-#ifdef ENABLE_DEBUGGER_SUPPORT
- static Object* DebugBreakHelper(Isolate* isolate);
- static void ProcessDebugMessages(Isolate* isolate, bool debug_command_only);
-#endif
-
- // If the stack guard is triggered, but it is not an actual
- // stack overflow, then handle the interruption accordingly.
- MUST_USE_RESULT static MaybeObject* HandleStackGuardInterrupt(
- Isolate* isolate);
// Get a function delegate (or undefined) for the given non-function
// object. Used for support calling objects as functions.
static Handle<Object> GetFunctionDelegate(Isolate* isolate,
Handle<Object> object);
- static Handle<Object> TryGetFunctionDelegate(Isolate* isolate,
- Handle<Object> object,
- bool* has_pending_exception);
+ MUST_USE_RESULT static MaybeHandle<Object> TryGetFunctionDelegate(
+ Isolate* isolate,
+ Handle<Object> object);
// Get a function delegate (or undefined) for the given non-function
// object. Used for support calling objects as constructors.
static Handle<Object> GetConstructorDelegate(Isolate* isolate,
Handle<Object> object);
- static Handle<Object> TryGetConstructorDelegate(Isolate* isolate,
- Handle<Object> object,
- bool* has_pending_exception);
-
- static void RunMicrotasks(Isolate* isolate);
+ static MaybeHandle<Object> TryGetConstructorDelegate(Isolate* isolate,
+ Handle<Object> object);
};
@@ -182,7 +127,7 @@ class ExecutionAccess;
// StackGuard contains the handling of the limits that are used to limit the
// number of nested invocations of JavaScript and the stack size used in each
// invocation.
-class StackGuard {
+class StackGuard V8_FINAL {
public:
// Pass the address beyond which the stack should not grow. The stack
// is assumed to grow downwards.
@@ -200,27 +145,21 @@ class StackGuard {
// it has been set up.
void ClearThread(const ExecutionAccess& lock);
- bool IsStackOverflow();
- bool IsPreempted();
- void Preempt();
- bool IsInterrupted();
- void Interrupt();
- bool IsTerminateExecution();
- void TerminateExecution();
- void CancelTerminateExecution();
-#ifdef ENABLE_DEBUGGER_SUPPORT
- bool IsDebugBreak();
- void DebugBreak();
- bool IsDebugCommand();
- void DebugCommand();
-#endif
- bool IsGCRequest();
- void RequestGC();
- bool IsInstallCodeRequest();
- void RequestInstallCode();
- bool IsFullDeopt();
- void FullDeopt();
- void Continue(InterruptFlag after_what);
+#define INTERRUPT_LIST(V) \
+ V(DEBUGBREAK, DebugBreak) \
+ V(DEBUGCOMMAND, DebugCommand) \
+ V(TERMINATE_EXECUTION, TerminateExecution) \
+ V(GC_REQUEST, GC) \
+ V(INSTALL_CODE, InstallCode) \
+ V(API_INTERRUPT, ApiInterrupt) \
+ V(DEOPT_MARKED_ALLOCATION_SITES, DeoptMarkedAllocationSites)
+
+#define V(NAME, Name) \
+ inline bool Check##Name() { return CheckInterrupt(1 << NAME); } \
+ inline void Request##Name() { RequestInterrupt(1 << NAME); } \
+ inline void Clear##Name() { ClearInterrupt(1 << NAME); }
+ INTERRUPT_LIST(V)
+#undef V
// This provides an asynchronous read of the stack limits for the current
// thread. There are no locks protecting this, but it is assumed that you
@@ -243,11 +182,27 @@ class StackGuard {
Address address_of_real_jslimit() {
return reinterpret_cast<Address>(&thread_local_.real_jslimit_);
}
- bool ShouldPostponeInterrupts();
+
+ // If the stack guard is triggered, but it is not an actual
+ // stack overflow, then handle the interruption accordingly.
+ Object* HandleInterrupts();
private:
StackGuard();
+// Flag used to set the interrupt causes.
+enum InterruptFlag {
+#define V(NAME, Name) NAME,
+ INTERRUPT_LIST(V)
+#undef V
+ NUMBER_OF_INTERRUPTS
+};
+
+ bool CheckInterrupt(int flagbit);
+ void RequestInterrupt(int flagbit);
+ void ClearInterrupt(int flagbit);
+ bool CheckAndClearInterrupt(InterruptFlag flag);
+
// You should hold the ExecutionAccess lock when calling this method.
bool has_pending_interrupts(const ExecutionAccess& lock) {
// Sanity check: We shouldn't be asking about pending interrupts
@@ -272,7 +227,7 @@ class StackGuard {
void EnableInterrupts();
void DisableInterrupts();
-#if V8_TARGET_ARCH_X64
+#if V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64
static const uintptr_t kInterruptLimit = V8_UINT64_C(0xfffffffffffffffe);
static const uintptr_t kIllegalLimit = V8_UINT64_C(0xfffffffffffffff8);
#else
@@ -280,7 +235,7 @@ class StackGuard {
static const uintptr_t kIllegalLimit = 0xfffffff8;
#endif
- class ThreadLocal {
+ class ThreadLocal V8_FINAL {
public:
ThreadLocal() { Clear(); }
// You should hold the ExecutionAccess lock when you call Initialize or
@@ -309,6 +264,11 @@ class StackGuard {
int interrupt_flags_;
};
+ class StackPointer {
+ public:
+ inline uintptr_t address() { return reinterpret_cast<uintptr_t>(this); }
+ };
+
// TODO(isolates): Technically this could be calculated directly from a
// pointer to StackGuard.
Isolate* isolate_;
@@ -321,7 +281,6 @@ class StackGuard {
DISALLOW_COPY_AND_ASSIGN(StackGuard);
};
-
} } // namespace v8::internal
#endif // V8_EXECUTION_H_
diff --git a/chromium/v8/src/extensions/externalize-string-extension.cc b/chromium/v8/src/extensions/externalize-string-extension.cc
index edc7dd8052c..c3b1ec756d9 100644
--- a/chromium/v8/src/extensions/externalize-string-extension.cc
+++ b/chromium/v8/src/extensions/externalize-string-extension.cc
@@ -1,31 +1,8 @@
// Copyright 2010 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "externalize-string-extension.h"
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/extensions/externalize-string-extension.h"
namespace v8 {
namespace internal {
@@ -64,10 +41,12 @@ v8::Handle<v8::FunctionTemplate>
ExternalizeStringExtension::GetNativeFunctionTemplate(
v8::Isolate* isolate, v8::Handle<v8::String> str) {
if (strcmp(*v8::String::Utf8Value(str), "externalizeString") == 0) {
- return v8::FunctionTemplate::New(ExternalizeStringExtension::Externalize);
+ return v8::FunctionTemplate::New(isolate,
+ ExternalizeStringExtension::Externalize);
} else {
ASSERT(strcmp(*v8::String::Utf8Value(str), "isAsciiString") == 0);
- return v8::FunctionTemplate::New(ExternalizeStringExtension::IsAscii);
+ return v8::FunctionTemplate::New(isolate,
+ ExternalizeStringExtension::IsAscii);
}
}
@@ -105,7 +84,7 @@ void ExternalizeStringExtension::Externalize(
SimpleAsciiStringResource* resource = new SimpleAsciiStringResource(
reinterpret_cast<char*>(data), string->length());
result = string->MakeExternal(resource);
- if (result && !string->IsInternalizedString()) {
+ if (result) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(args.GetIsolate());
isolate->heap()->external_string_table()->AddString(*string);
}
@@ -116,7 +95,7 @@ void ExternalizeStringExtension::Externalize(
SimpleTwoByteStringResource* resource = new SimpleTwoByteStringResource(
data, string->length());
result = string->MakeExternal(resource);
- if (result && !string->IsInternalizedString()) {
+ if (result) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(args.GetIsolate());
isolate->heap()->external_string_table()->AddString(*string);
}
@@ -143,10 +122,4 @@ void ExternalizeStringExtension::IsAscii(
args.GetReturnValue().Set(is_one_byte);
}
-
-void ExternalizeStringExtension::Register() {
- static ExternalizeStringExtension externalize_extension;
- static v8::DeclareExtension declaration(&externalize_extension);
-}
-
} } // namespace v8::internal
diff --git a/chromium/v8/src/extensions/externalize-string-extension.h b/chromium/v8/src/extensions/externalize-string-extension.h
index 3d1e438f7f2..74b5665ef0e 100644
--- a/chromium/v8/src/extensions/externalize-string-extension.h
+++ b/chromium/v8/src/extensions/externalize-string-extension.h
@@ -1,34 +1,11 @@
// Copyright 2010 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
#ifndef V8_EXTENSIONS_EXTERNALIZE_STRING_EXTENSION_H_
#define V8_EXTENSIONS_EXTERNALIZE_STRING_EXTENSION_H_
-#include "v8.h"
+#include "src/v8.h"
namespace v8 {
namespace internal {
@@ -41,7 +18,7 @@ class ExternalizeStringExtension : public v8::Extension {
v8::Handle<v8::String> name);
static void Externalize(const v8::FunctionCallbackInfo<v8::Value>& args);
static void IsAscii(const v8::FunctionCallbackInfo<v8::Value>& args);
- static void Register();
+
private:
static const char* const kSource;
};
diff --git a/chromium/v8/src/extensions/free-buffer-extension.cc b/chromium/v8/src/extensions/free-buffer-extension.cc
index 5cf2b68146c..ffba6558a2a 100644
--- a/chromium/v8/src/extensions/free-buffer-extension.cc
+++ b/chromium/v8/src/extensions/free-buffer-extension.cc
@@ -1,33 +1,10 @@
// Copyright 2013 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
-#include "free-buffer-extension.h"
-#include "platform.h"
-#include "v8.h"
+#include "src/extensions/free-buffer-extension.h"
+#include "src/platform.h"
+#include "src/v8.h"
namespace v8 {
namespace internal {
@@ -36,7 +13,7 @@ namespace internal {
v8::Handle<v8::FunctionTemplate> FreeBufferExtension::GetNativeFunctionTemplate(
v8::Isolate* isolate,
v8::Handle<v8::String> str) {
- return v8::FunctionTemplate::New(FreeBufferExtension::FreeBuffer);
+ return v8::FunctionTemplate::New(isolate, FreeBufferExtension::FreeBuffer);
}
@@ -47,14 +24,4 @@ void FreeBufferExtension::FreeBuffer(
V8::ArrayBufferAllocator()->Free(contents.Data(), contents.ByteLength());
}
-
-void FreeBufferExtension::Register() {
- static char buffer[100];
- Vector<char> temp_vector(buffer, sizeof(buffer));
- OS::SNPrintF(temp_vector, "native function freeBuffer();");
-
- static FreeBufferExtension buffer_free_extension(buffer);
- static v8::DeclareExtension declaration(&buffer_free_extension);
-}
-
} } // namespace v8::internal
diff --git a/chromium/v8/src/extensions/free-buffer-extension.h b/chromium/v8/src/extensions/free-buffer-extension.h
index 22d466f61e0..bccf760cc21 100644
--- a/chromium/v8/src/extensions/free-buffer-extension.h
+++ b/chromium/v8/src/extensions/free-buffer-extension.h
@@ -1,47 +1,23 @@
// Copyright 2013 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
#ifndef V8_EXTENSIONS_FREE_BUFFER_EXTENSION_H_
#define V8_EXTENSIONS_FREE_BUFFER_EXTENSION_H_
-#include "v8.h"
+#include "src/v8.h"
namespace v8 {
namespace internal {
class FreeBufferExtension : public v8::Extension {
public:
- explicit FreeBufferExtension(const char* source)
- : v8::Extension("v8/free-buffer", source) {}
+ FreeBufferExtension()
+ : v8::Extension("v8/free-buffer", "native function freeBuffer();") {}
virtual v8::Handle<v8::FunctionTemplate> GetNativeFunctionTemplate(
v8::Isolate* isolate,
v8::Handle<v8::String> name);
static void FreeBuffer(const v8::FunctionCallbackInfo<v8::Value>& args);
- static void Register();
};
} } // namespace v8::internal
diff --git a/chromium/v8/src/extensions/gc-extension.cc b/chromium/v8/src/extensions/gc-extension.cc
index b8442c1bf85..e3c2b1d3709 100644
--- a/chromium/v8/src/extensions/gc-extension.cc
+++ b/chromium/v8/src/extensions/gc-extension.cc
@@ -1,32 +1,9 @@
// Copyright 2010 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
-#include "gc-extension.h"
-#include "platform.h"
+#include "src/extensions/gc-extension.h"
+#include "src/platform.h"
namespace v8 {
namespace internal {
@@ -35,31 +12,14 @@ namespace internal {
v8::Handle<v8::FunctionTemplate> GCExtension::GetNativeFunctionTemplate(
v8::Isolate* isolate,
v8::Handle<v8::String> str) {
- return v8::FunctionTemplate::New(GCExtension::GC);
+ return v8::FunctionTemplate::New(isolate, GCExtension::GC);
}
void GCExtension::GC(const v8::FunctionCallbackInfo<v8::Value>& args) {
- i::Isolate* isolate = reinterpret_cast<i::Isolate*>(args.GetIsolate());
- if (args[0]->BooleanValue()) {
- isolate->heap()->CollectGarbage(NEW_SPACE, "gc extension");
- } else {
- isolate->heap()->CollectAllGarbage(Heap::kNoGCFlags, "gc extension");
- }
-}
-
-
-void GCExtension::Register() {
- static char buffer[50];
- Vector<char> temp_vector(buffer, sizeof(buffer));
- if (FLAG_expose_gc_as != NULL && strlen(FLAG_expose_gc_as) != 0) {
- OS::SNPrintF(temp_vector, "native function %s();", FLAG_expose_gc_as);
- } else {
- OS::SNPrintF(temp_vector, "native function gc();");
- }
-
- static GCExtension gc_extension(buffer);
- static v8::DeclareExtension declaration(&gc_extension);
+ args.GetIsolate()->RequestGarbageCollectionForTesting(
+ args[0]->BooleanValue() ? v8::Isolate::kMinorGarbageCollection
+ : v8::Isolate::kFullGarbageCollection);
}
} } // namespace v8::internal
diff --git a/chromium/v8/src/extensions/gc-extension.h b/chromium/v8/src/extensions/gc-extension.h
index 8c25e7d84d5..789354597e5 100644
--- a/chromium/v8/src/extensions/gc-extension.h
+++ b/chromium/v8/src/extensions/gc-extension.h
@@ -1,46 +1,33 @@
// Copyright 2010 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
#ifndef V8_EXTENSIONS_GC_EXTENSION_H_
#define V8_EXTENSIONS_GC_EXTENSION_H_
-#include "v8.h"
+#include "src/v8.h"
namespace v8 {
namespace internal {
class GCExtension : public v8::Extension {
public:
- explicit GCExtension(const char* source) : v8::Extension("v8/gc", source) {}
+ explicit GCExtension(const char* fun_name)
+ : v8::Extension("v8/gc",
+ BuildSource(buffer_, sizeof(buffer_), fun_name)) {}
virtual v8::Handle<v8::FunctionTemplate> GetNativeFunctionTemplate(
v8::Isolate* isolate,
v8::Handle<v8::String> name);
static void GC(const v8::FunctionCallbackInfo<v8::Value>& args);
- static void Register();
+
+ private:
+ static const char* BuildSource(char* buf, size_t size, const char* fun_name) {
+ SNPrintF(Vector<char>(buf, static_cast<int>(size)),
+ "native function %s();", fun_name);
+ return buf;
+ }
+
+ char buffer_[50];
};
} } // namespace v8::internal
diff --git a/chromium/v8/src/extensions/statistics-extension.cc b/chromium/v8/src/extensions/statistics-extension.cc
index 92d152d0de7..fe34c9a3868 100644
--- a/chromium/v8/src/extensions/statistics-extension.cc
+++ b/chromium/v8/src/extensions/statistics-extension.cc
@@ -1,31 +1,8 @@
// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "statistics-extension.h"
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/extensions/statistics-extension.h"
namespace v8 {
namespace internal {
@@ -38,7 +15,7 @@ v8::Handle<v8::FunctionTemplate> StatisticsExtension::GetNativeFunctionTemplate(
v8::Isolate* isolate,
v8::Handle<v8::String> str) {
ASSERT(strcmp(*v8::String::Utf8Value(str), "getV8Statistics") == 0);
- return v8::FunctionTemplate::New(StatisticsExtension::GetCounters);
+ return v8::FunctionTemplate::New(isolate, StatisticsExtension::GetCounters);
}
@@ -48,7 +25,7 @@ static void AddCounter(v8::Isolate* isolate,
const char* name) {
if (counter->Enabled()) {
object->Set(v8::String::NewFromUtf8(isolate, name),
- v8::Number::New(*counter->GetInternalPointer()));
+ v8::Number::New(isolate, *counter->GetInternalPointer()));
}
}
@@ -57,7 +34,7 @@ static void AddNumber(v8::Isolate* isolate,
intptr_t value,
const char* name) {
object->Set(v8::String::NewFromUtf8(isolate, name),
- v8::Number::New(static_cast<double>(value)));
+ v8::Number::New(isolate, static_cast<double>(value)));
}
@@ -66,7 +43,7 @@ static void AddNumber64(v8::Isolate* isolate,
int64_t value,
const char* name) {
object->Set(v8::String::NewFromUtf8(isolate, name),
- v8::Number::New(static_cast<double>(value)));
+ v8::Number::New(isolate, static_cast<double>(value)));
}
@@ -82,7 +59,7 @@ void StatisticsExtension::GetCounters(
}
Counters* counters = isolate->counters();
- v8::Local<v8::Object> result = v8::Object::New();
+ v8::Local<v8::Object> result = v8::Object::New(args.GetIsolate());
#define ADD_COUNTER(name, caption) \
AddCounter(args.GetIsolate(), result, counters->name(), #name);
@@ -170,10 +147,4 @@ void StatisticsExtension::GetCounters(
args.GetReturnValue().Set(result);
}
-
-void StatisticsExtension::Register() {
- static StatisticsExtension statistics_extension;
- static v8::DeclareExtension declaration(&statistics_extension);
-}
-
} } // namespace v8::internal
diff --git a/chromium/v8/src/extensions/statistics-extension.h b/chromium/v8/src/extensions/statistics-extension.h
index f05e7689e73..0915e61de05 100644
--- a/chromium/v8/src/extensions/statistics-extension.h
+++ b/chromium/v8/src/extensions/statistics-extension.h
@@ -1,34 +1,11 @@
// Copyright 2010 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
#ifndef V8_EXTENSIONS_STATISTICS_EXTENSION_H_
#define V8_EXTENSIONS_STATISTICS_EXTENSION_H_
-#include "v8.h"
+#include "src/v8.h"
namespace v8 {
namespace internal {
@@ -40,7 +17,7 @@ class StatisticsExtension : public v8::Extension {
v8::Isolate* isolate,
v8::Handle<v8::String> name);
static void GetCounters(const v8::FunctionCallbackInfo<v8::Value>& args);
- static void Register();
+
private:
static const char* const kSource;
};
diff --git a/chromium/v8/src/extensions/trigger-failure-extension.cc b/chromium/v8/src/extensions/trigger-failure-extension.cc
new file mode 100644
index 00000000000..30cd9c2ba24
--- /dev/null
+++ b/chromium/v8/src/extensions/trigger-failure-extension.cc
@@ -0,0 +1,56 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/extensions/trigger-failure-extension.h"
+#include "src/v8.h"
+
+namespace v8 {
+namespace internal {
+
+
+const char* const TriggerFailureExtension::kSource =
+ "native function triggerCheckFalse();"
+ "native function triggerAssertFalse();"
+ "native function triggerSlowAssertFalse();";
+
+
+v8::Handle<v8::FunctionTemplate>
+TriggerFailureExtension::GetNativeFunctionTemplate(
+ v8::Isolate* isolate,
+ v8::Handle<v8::String> str) {
+ if (strcmp(*v8::String::Utf8Value(str), "triggerCheckFalse") == 0) {
+ return v8::FunctionTemplate::New(
+ isolate,
+ TriggerFailureExtension::TriggerCheckFalse);
+ } else if (strcmp(*v8::String::Utf8Value(str), "triggerAssertFalse") == 0) {
+ return v8::FunctionTemplate::New(
+ isolate,
+ TriggerFailureExtension::TriggerAssertFalse);
+ } else {
+ CHECK_EQ(0, strcmp(*v8::String::Utf8Value(str), "triggerSlowAssertFalse"));
+ return v8::FunctionTemplate::New(
+ isolate,
+ TriggerFailureExtension::TriggerSlowAssertFalse);
+ }
+}
+
+
+void TriggerFailureExtension::TriggerCheckFalse(
+ const v8::FunctionCallbackInfo<v8::Value>& args) {
+ CHECK(false);
+}
+
+
+void TriggerFailureExtension::TriggerAssertFalse(
+ const v8::FunctionCallbackInfo<v8::Value>& args) {
+ ASSERT(false);
+}
+
+
+void TriggerFailureExtension::TriggerSlowAssertFalse(
+ const v8::FunctionCallbackInfo<v8::Value>& args) {
+ SLOW_ASSERT(false);
+}
+
+} } // namespace v8::internal
diff --git a/chromium/v8/src/extensions/trigger-failure-extension.h b/chromium/v8/src/extensions/trigger-failure-extension.h
new file mode 100644
index 00000000000..6974da5e311
--- /dev/null
+++ b/chromium/v8/src/extensions/trigger-failure-extension.h
@@ -0,0 +1,32 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_EXTENSIONS_TRIGGER_FAILURE_EXTENSION_H_
+#define V8_EXTENSIONS_TRIGGER_FAILURE_EXTENSION_H_
+
+#include "src/v8.h"
+
+namespace v8 {
+namespace internal {
+
+class TriggerFailureExtension : public v8::Extension {
+ public:
+ TriggerFailureExtension() : v8::Extension("v8/trigger-failure", kSource) {}
+ virtual v8::Handle<v8::FunctionTemplate> GetNativeFunctionTemplate(
+ v8::Isolate* isolate,
+ v8::Handle<v8::String> name);
+ static void TriggerCheckFalse(
+ const v8::FunctionCallbackInfo<v8::Value>& args);
+ static void TriggerAssertFalse(
+ const v8::FunctionCallbackInfo<v8::Value>& args);
+ static void TriggerSlowAssertFalse(
+ const v8::FunctionCallbackInfo<v8::Value>& args);
+
+ private:
+ static const char* const kSource;
+};
+
+} } // namespace v8::internal
+
+#endif // V8_EXTENSIONS_TRIGGER_FAILURE_EXTENSION_H_
diff --git a/chromium/v8/src/factory.cc b/chromium/v8/src/factory.cc
index 483e6a632a3..3d373fbb5bd 100644
--- a/chromium/v8/src/factory.cc
+++ b/chromium/v8/src/factory.cc
@@ -1,52 +1,61 @@
-// Copyright 2013 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#include "api.h"
-#include "debug.h"
-#include "execution.h"
-#include "factory.h"
-#include "isolate-inl.h"
-#include "macro-assembler.h"
-#include "objects.h"
-#include "objects-visiting.h"
-#include "platform.h"
-#include "scopeinfo.h"
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/factory.h"
+
+#include "src/conversions.h"
+#include "src/isolate-inl.h"
+#include "src/macro-assembler.h"
namespace v8 {
namespace internal {
-Handle<Box> Factory::NewBox(Handle<Object> value, PretenureFlag pretenure) {
+template<typename T>
+Handle<T> Factory::New(Handle<Map> map, AllocationSpace space) {
+ CALL_HEAP_FUNCTION(
+ isolate(),
+ isolate()->heap()->Allocate(*map, space),
+ T);
+}
+
+
+template<typename T>
+Handle<T> Factory::New(Handle<Map> map,
+ AllocationSpace space,
+ Handle<AllocationSite> allocation_site) {
+ CALL_HEAP_FUNCTION(
+ isolate(),
+ isolate()->heap()->Allocate(*map, space, *allocation_site),
+ T);
+}
+
+
+Handle<HeapObject> Factory::NewFillerObject(int size,
+ bool double_align,
+ AllocationSpace space) {
CALL_HEAP_FUNCTION(
isolate(),
- isolate()->heap()->AllocateBox(*value, pretenure),
- Box);
+ isolate()->heap()->AllocateFillerObject(size, double_align, space),
+ HeapObject);
+}
+
+
+Handle<Box> Factory::NewBox(Handle<Object> value) {
+ Handle<Box> result = Handle<Box>::cast(NewStruct(BOX_TYPE));
+ result->set_value(*value);
+ return result;
+}
+
+
+Handle<Oddball> Factory::NewOddball(Handle<Map> map,
+ const char* to_string,
+ Handle<Object> to_number,
+ byte kind) {
+ Handle<Oddball> oddball = New<Oddball>(map, OLD_POINTER_SPACE);
+ Oddball::Initialize(isolate(), oddball, to_string, to_number, kind);
+ return oddball;
}
@@ -64,213 +73,295 @@ Handle<FixedArray> Factory::NewFixedArrayWithHoles(int size,
ASSERT(0 <= size);
CALL_HEAP_FUNCTION(
isolate(),
- isolate()->heap()->AllocateFixedArrayWithHoles(size, pretenure),
+ isolate()->heap()->AllocateFixedArrayWithFiller(size,
+ pretenure,
+ *the_hole_value()),
FixedArray);
}
-Handle<FixedDoubleArray> Factory::NewFixedDoubleArray(int size,
- PretenureFlag pretenure) {
+Handle<FixedArray> Factory::NewUninitializedFixedArray(int size) {
+ CALL_HEAP_FUNCTION(
+ isolate(),
+ isolate()->heap()->AllocateUninitializedFixedArray(size),
+ FixedArray);
+}
+
+
+Handle<FixedArrayBase> Factory::NewFixedDoubleArray(int size,
+ PretenureFlag pretenure) {
ASSERT(0 <= size);
CALL_HEAP_FUNCTION(
isolate(),
isolate()->heap()->AllocateUninitializedFixedDoubleArray(size, pretenure),
- FixedDoubleArray);
+ FixedArrayBase);
+}
+
+
+Handle<FixedArrayBase> Factory::NewFixedDoubleArrayWithHoles(
+ int size,
+ PretenureFlag pretenure) {
+ ASSERT(0 <= size);
+ Handle<FixedArrayBase> array = NewFixedDoubleArray(size, pretenure);
+ if (size > 0) {
+ Handle<FixedDoubleArray> double_array =
+ Handle<FixedDoubleArray>::cast(array);
+ for (int i = 0; i < size; ++i) {
+ double_array->set_the_hole(i);
+ }
+ }
+ return array;
}
Handle<ConstantPoolArray> Factory::NewConstantPoolArray(
- int number_of_int64_entries,
- int number_of_ptr_entries,
- int number_of_int32_entries) {
- ASSERT(number_of_int64_entries > 0 || number_of_ptr_entries > 0 ||
- number_of_int32_entries > 0);
+ const ConstantPoolArray::NumberOfEntries& small) {
+ ASSERT(small.total_count() > 0);
CALL_HEAP_FUNCTION(
isolate(),
- isolate()->heap()->AllocateConstantPoolArray(number_of_int64_entries,
- number_of_ptr_entries,
- number_of_int32_entries),
+ isolate()->heap()->AllocateConstantPoolArray(small),
ConstantPoolArray);
}
-Handle<NameDictionary> Factory::NewNameDictionary(int at_least_space_for) {
- ASSERT(0 <= at_least_space_for);
- CALL_HEAP_FUNCTION(isolate(),
- NameDictionary::Allocate(isolate()->heap(),
- at_least_space_for),
- NameDictionary);
+Handle<ConstantPoolArray> Factory::NewExtendedConstantPoolArray(
+ const ConstantPoolArray::NumberOfEntries& small,
+ const ConstantPoolArray::NumberOfEntries& extended) {
+ ASSERT(small.total_count() > 0);
+ ASSERT(extended.total_count() > 0);
+ CALL_HEAP_FUNCTION(
+ isolate(),
+ isolate()->heap()->AllocateExtendedConstantPoolArray(small, extended),
+ ConstantPoolArray);
}
-Handle<SeededNumberDictionary> Factory::NewSeededNumberDictionary(
- int at_least_space_for) {
- ASSERT(0 <= at_least_space_for);
- CALL_HEAP_FUNCTION(isolate(),
- SeededNumberDictionary::Allocate(isolate()->heap(),
- at_least_space_for),
- SeededNumberDictionary);
+Handle<OrderedHashSet> Factory::NewOrderedHashSet() {
+ return OrderedHashSet::Allocate(isolate(), 4);
}
-Handle<UnseededNumberDictionary> Factory::NewUnseededNumberDictionary(
- int at_least_space_for) {
- ASSERT(0 <= at_least_space_for);
- CALL_HEAP_FUNCTION(isolate(),
- UnseededNumberDictionary::Allocate(isolate()->heap(),
- at_least_space_for),
- UnseededNumberDictionary);
+Handle<OrderedHashMap> Factory::NewOrderedHashMap() {
+ return OrderedHashMap::Allocate(isolate(), 4);
}
-Handle<ObjectHashSet> Factory::NewObjectHashSet(int at_least_space_for) {
- ASSERT(0 <= at_least_space_for);
- CALL_HEAP_FUNCTION(isolate(),
- ObjectHashSet::Allocate(isolate()->heap(),
- at_least_space_for),
- ObjectHashSet);
+Handle<AccessorPair> Factory::NewAccessorPair() {
+ Handle<AccessorPair> accessors =
+ Handle<AccessorPair>::cast(NewStruct(ACCESSOR_PAIR_TYPE));
+ accessors->set_getter(*the_hole_value(), SKIP_WRITE_BARRIER);
+ accessors->set_setter(*the_hole_value(), SKIP_WRITE_BARRIER);
+ accessors->set_access_flags(Smi::FromInt(0), SKIP_WRITE_BARRIER);
+ return accessors;
}
-Handle<ObjectHashTable> Factory::NewObjectHashTable(
- int at_least_space_for,
- MinimumCapacity capacity_option) {
- ASSERT(0 <= at_least_space_for);
- CALL_HEAP_FUNCTION(isolate(),
- ObjectHashTable::Allocate(isolate()->heap(),
- at_least_space_for,
- capacity_option),
- ObjectHashTable);
+Handle<TypeFeedbackInfo> Factory::NewTypeFeedbackInfo() {
+ Handle<TypeFeedbackInfo> info =
+ Handle<TypeFeedbackInfo>::cast(NewStruct(TYPE_FEEDBACK_INFO_TYPE));
+ info->initialize_storage();
+ return info;
}
-Handle<WeakHashTable> Factory::NewWeakHashTable(int at_least_space_for) {
- ASSERT(0 <= at_least_space_for);
- CALL_HEAP_FUNCTION(
- isolate(),
- WeakHashTable::Allocate(isolate()->heap(),
- at_least_space_for,
- USE_DEFAULT_MINIMUM_CAPACITY,
- TENURED),
- WeakHashTable);
+// Internalized strings are created in the old generation (data space).
+Handle<String> Factory::InternalizeUtf8String(Vector<const char> string) {
+ Utf8StringKey key(string, isolate()->heap()->HashSeed());
+ return InternalizeStringWithKey(&key);
}
-Handle<DescriptorArray> Factory::NewDescriptorArray(int number_of_descriptors,
- int slack) {
- ASSERT(0 <= number_of_descriptors);
- CALL_HEAP_FUNCTION(isolate(),
- DescriptorArray::Allocate(
- isolate(), number_of_descriptors, slack),
- DescriptorArray);
+// Internalized strings are created in the old generation (data space).
+Handle<String> Factory::InternalizeString(Handle<String> string) {
+ if (string->IsInternalizedString()) return string;
+ return StringTable::LookupString(isolate(), string);
}
-Handle<DeoptimizationInputData> Factory::NewDeoptimizationInputData(
- int deopt_entry_count,
- PretenureFlag pretenure) {
- ASSERT(deopt_entry_count > 0);
- CALL_HEAP_FUNCTION(isolate(),
- DeoptimizationInputData::Allocate(isolate(),
- deopt_entry_count,
- pretenure),
- DeoptimizationInputData);
+Handle<String> Factory::InternalizeOneByteString(Vector<const uint8_t> string) {
+ OneByteStringKey key(string, isolate()->heap()->HashSeed());
+ return InternalizeStringWithKey(&key);
}
-Handle<DeoptimizationOutputData> Factory::NewDeoptimizationOutputData(
- int deopt_entry_count,
- PretenureFlag pretenure) {
- ASSERT(deopt_entry_count > 0);
- CALL_HEAP_FUNCTION(isolate(),
- DeoptimizationOutputData::Allocate(isolate(),
- deopt_entry_count,
- pretenure),
- DeoptimizationOutputData);
+Handle<String> Factory::InternalizeOneByteString(
+ Handle<SeqOneByteString> string, int from, int length) {
+ SubStringKey<uint8_t> key(string, from, length);
+ return InternalizeStringWithKey(&key);
}
-Handle<AccessorPair> Factory::NewAccessorPair() {
- CALL_HEAP_FUNCTION(isolate(),
- isolate()->heap()->AllocateAccessorPair(),
- AccessorPair);
+Handle<String> Factory::InternalizeTwoByteString(Vector<const uc16> string) {
+ TwoByteStringKey key(string, isolate()->heap()->HashSeed());
+ return InternalizeStringWithKey(&key);
}
-Handle<TypeFeedbackInfo> Factory::NewTypeFeedbackInfo() {
- CALL_HEAP_FUNCTION(isolate(),
- isolate()->heap()->AllocateTypeFeedbackInfo(),
- TypeFeedbackInfo);
+template<class StringTableKey>
+Handle<String> Factory::InternalizeStringWithKey(StringTableKey* key) {
+ return StringTable::LookupKey(isolate(), key);
}
-// Internalized strings are created in the old generation (data space).
-Handle<String> Factory::InternalizeUtf8String(Vector<const char> string) {
- CALL_HEAP_FUNCTION(isolate(),
- isolate()->heap()->InternalizeUtf8String(string),
- String);
-}
+template Handle<String> Factory::InternalizeStringWithKey<
+ SubStringKey<uint8_t> > (SubStringKey<uint8_t>* key);
+template Handle<String> Factory::InternalizeStringWithKey<
+ SubStringKey<uint16_t> > (SubStringKey<uint16_t>* key);
-// Internalized strings are created in the old generation (data space).
-Handle<String> Factory::InternalizeString(Handle<String> string) {
- CALL_HEAP_FUNCTION(isolate(),
- isolate()->heap()->InternalizeString(*string),
- String);
+MaybeHandle<String> Factory::NewStringFromOneByte(Vector<const uint8_t> string,
+ PretenureFlag pretenure) {
+ int length = string.length();
+ if (length == 1) return LookupSingleCharacterStringFromCode(string[0]);
+ Handle<SeqOneByteString> result;
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate(),
+ result,
+ NewRawOneByteString(string.length(), pretenure),
+ String);
+
+ DisallowHeapAllocation no_gc;
+ // Copy the characters into the new object.
+ CopyChars(SeqOneByteString::cast(*result)->GetChars(),
+ string.start(),
+ length);
+ return result;
}
+MaybeHandle<String> Factory::NewStringFromUtf8(Vector<const char> string,
+ PretenureFlag pretenure) {
+ // Check for ASCII first since this is the common case.
+ const char* start = string.start();
+ int length = string.length();
+ int non_ascii_start = String::NonAsciiStart(start, length);
+ if (non_ascii_start >= length) {
+ // If the string is ASCII, we do not need to convert the characters
+ // since UTF8 is backwards compatible with ASCII.
+ return NewStringFromOneByte(Vector<const uint8_t>::cast(string), pretenure);
+ }
-Handle<String> Factory::InternalizeOneByteString(Vector<const uint8_t> string) {
- CALL_HEAP_FUNCTION(isolate(),
- isolate()->heap()->InternalizeOneByteString(string),
- String);
+ // Non-ASCII and we need to decode.
+ Access<UnicodeCache::Utf8Decoder>
+ decoder(isolate()->unicode_cache()->utf8_decoder());
+ decoder->Reset(string.start() + non_ascii_start,
+ length - non_ascii_start);
+ int utf16_length = decoder->Utf16Length();
+ ASSERT(utf16_length > 0);
+ // Allocate string.
+ Handle<SeqTwoByteString> result;
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate(), result,
+ NewRawTwoByteString(non_ascii_start + utf16_length, pretenure),
+ String);
+ // Copy ascii portion.
+ uint16_t* data = result->GetChars();
+ const char* ascii_data = string.start();
+ for (int i = 0; i < non_ascii_start; i++) {
+ *data++ = *ascii_data++;
+ }
+ // Now write the remainder.
+ decoder->WriteUtf16(data, utf16_length);
+ return result;
}
-Handle<String> Factory::InternalizeOneByteString(
- Handle<SeqOneByteString> string, int from, int length) {
- CALL_HEAP_FUNCTION(isolate(),
- isolate()->heap()->InternalizeOneByteString(
- string, from, length),
- String);
+MaybeHandle<String> Factory::NewStringFromTwoByte(Vector<const uc16> string,
+ PretenureFlag pretenure) {
+ int length = string.length();
+ const uc16* start = string.start();
+ if (String::IsOneByte(start, length)) {
+ Handle<SeqOneByteString> result;
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate(),
+ result,
+ NewRawOneByteString(length, pretenure),
+ String);
+ CopyChars(result->GetChars(), start, length);
+ return result;
+ } else {
+ Handle<SeqTwoByteString> result;
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate(),
+ result,
+ NewRawTwoByteString(length, pretenure),
+ String);
+ CopyChars(result->GetChars(), start, length);
+ return result;
+ }
}
-Handle<String> Factory::InternalizeTwoByteString(Vector<const uc16> string) {
- CALL_HEAP_FUNCTION(isolate(),
- isolate()->heap()->InternalizeTwoByteString(string),
- String);
+Handle<String> Factory::NewInternalizedStringFromUtf8(Vector<const char> str,
+ int chars,
+ uint32_t hash_field) {
+ CALL_HEAP_FUNCTION(
+ isolate(),
+ isolate()->heap()->AllocateInternalizedStringFromUtf8(
+ str, chars, hash_field),
+ String);
}
-Handle<String> Factory::NewStringFromOneByte(Vector<const uint8_t> string,
- PretenureFlag pretenure) {
+MUST_USE_RESULT Handle<String> Factory::NewOneByteInternalizedString(
+ Vector<const uint8_t> str,
+ uint32_t hash_field) {
CALL_HEAP_FUNCTION(
isolate(),
- isolate()->heap()->AllocateStringFromOneByte(string, pretenure),
+ isolate()->heap()->AllocateOneByteInternalizedString(str, hash_field),
String);
}
-Handle<String> Factory::NewStringFromUtf8(Vector<const char> string,
- PretenureFlag pretenure) {
+
+MUST_USE_RESULT Handle<String> Factory::NewTwoByteInternalizedString(
+ Vector<const uc16> str,
+ uint32_t hash_field) {
CALL_HEAP_FUNCTION(
isolate(),
- isolate()->heap()->AllocateStringFromUtf8(string, pretenure),
+ isolate()->heap()->AllocateTwoByteInternalizedString(str, hash_field),
String);
}
-Handle<String> Factory::NewStringFromTwoByte(Vector<const uc16> string,
- PretenureFlag pretenure) {
+Handle<String> Factory::NewInternalizedStringImpl(
+ Handle<String> string, int chars, uint32_t hash_field) {
CALL_HEAP_FUNCTION(
isolate(),
- isolate()->heap()->AllocateStringFromTwoByte(string, pretenure),
+ isolate()->heap()->AllocateInternalizedStringImpl(
+ *string, chars, hash_field),
String);
}
-Handle<SeqOneByteString> Factory::NewRawOneByteString(int length,
- PretenureFlag pretenure) {
+MaybeHandle<Map> Factory::InternalizedStringMapForString(
+ Handle<String> string) {
+ // If the string is in new space it cannot be used as internalized.
+ if (isolate()->heap()->InNewSpace(*string)) return MaybeHandle<Map>();
+
+ // Find the corresponding internalized string map for strings.
+ switch (string->map()->instance_type()) {
+ case STRING_TYPE: return internalized_string_map();
+ case ASCII_STRING_TYPE: return ascii_internalized_string_map();
+ case EXTERNAL_STRING_TYPE: return external_internalized_string_map();
+ case EXTERNAL_ASCII_STRING_TYPE:
+ return external_ascii_internalized_string_map();
+ case EXTERNAL_STRING_WITH_ONE_BYTE_DATA_TYPE:
+ return external_internalized_string_with_one_byte_data_map();
+ case SHORT_EXTERNAL_STRING_TYPE:
+ return short_external_internalized_string_map();
+ case SHORT_EXTERNAL_ASCII_STRING_TYPE:
+ return short_external_ascii_internalized_string_map();
+ case SHORT_EXTERNAL_STRING_WITH_ONE_BYTE_DATA_TYPE:
+ return short_external_internalized_string_with_one_byte_data_map();
+ default: return MaybeHandle<Map>(); // No match found.
+ }
+}
+
+
+MaybeHandle<SeqOneByteString> Factory::NewRawOneByteString(
+ int length, PretenureFlag pretenure) {
+ if (length > String::kMaxLength || length < 0) {
+ return isolate()->Throw<SeqOneByteString>(NewInvalidStringLengthError());
+ }
CALL_HEAP_FUNCTION(
isolate(),
isolate()->heap()->AllocateRawOneByteString(length, pretenure),
@@ -278,8 +369,11 @@ Handle<SeqOneByteString> Factory::NewRawOneByteString(int length,
}
-Handle<SeqTwoByteString> Factory::NewRawTwoByteString(int length,
- PretenureFlag pretenure) {
+MaybeHandle<SeqTwoByteString> Factory::NewRawTwoByteString(
+ int length, PretenureFlag pretenure) {
+ if (length > String::kMaxLength || length < 0) {
+ return isolate()->Throw<SeqTwoByteString>(NewInvalidStringLengthError());
+ }
CALL_HEAP_FUNCTION(
isolate(),
isolate()->heap()->AllocateRawTwoByteString(length, pretenure),
@@ -287,11 +381,69 @@ Handle<SeqTwoByteString> Factory::NewRawTwoByteString(int length,
}
-Handle<String> Factory::NewConsString(Handle<String> first,
- Handle<String> second) {
- CALL_HEAP_FUNCTION(isolate(),
- isolate()->heap()->AllocateConsString(*first, *second),
- String);
+Handle<String> Factory::LookupSingleCharacterStringFromCode(uint32_t code) {
+ if (code <= String::kMaxOneByteCharCodeU) {
+ {
+ DisallowHeapAllocation no_allocation;
+ Object* value = single_character_string_cache()->get(code);
+ if (value != *undefined_value()) {
+ return handle(String::cast(value), isolate());
+ }
+ }
+ uint8_t buffer[1];
+ buffer[0] = static_cast<uint8_t>(code);
+ Handle<String> result =
+ InternalizeOneByteString(Vector<const uint8_t>(buffer, 1));
+ single_character_string_cache()->set(code, *result);
+ return result;
+ }
+ ASSERT(code <= String::kMaxUtf16CodeUnitU);
+
+ Handle<SeqTwoByteString> result = NewRawTwoByteString(1).ToHandleChecked();
+ result->SeqTwoByteStringSet(0, static_cast<uint16_t>(code));
+ return result;
+}
+
+
+// Returns true for a character in a range. Both limits are inclusive.
+static inline bool Between(uint32_t character, uint32_t from, uint32_t to) {
+ // This makes uses of the the unsigned wraparound.
+ return character - from <= to - from;
+}
+
+
+static inline Handle<String> MakeOrFindTwoCharacterString(Isolate* isolate,
+ uint16_t c1,
+ uint16_t c2) {
+ // Numeric strings have a different hash algorithm not known by
+ // LookupTwoCharsStringIfExists, so we skip this step for such strings.
+ if (!Between(c1, '0', '9') || !Between(c2, '0', '9')) {
+ Handle<String> result;
+ if (StringTable::LookupTwoCharsStringIfExists(isolate, c1, c2).
+ ToHandle(&result)) {
+ return result;
+ }
+ }
+
+ // Now we know the length is 2, we might as well make use of that fact
+ // when building the new string.
+ if (static_cast<unsigned>(c1 | c2) <= String::kMaxOneByteCharCodeU) {
+ // We can do this.
+ ASSERT(IsPowerOf2(String::kMaxOneByteCharCodeU + 1)); // because of this.
+ Handle<SeqOneByteString> str =
+ isolate->factory()->NewRawOneByteString(2).ToHandleChecked();
+ uint8_t* dest = str->GetChars();
+ dest[0] = static_cast<uint8_t>(c1);
+ dest[1] = static_cast<uint8_t>(c2);
+ return str;
+ } else {
+ Handle<SeqTwoByteString> str =
+ isolate->factory()->NewRawTwoByteString(2).ToHandleChecked();
+ uc16* dest = str->GetChars();
+ dest[0] = c1;
+ dest[1] = c2;
+ return str;
+ }
}
@@ -307,53 +459,205 @@ Handle<String> ConcatStringContent(Handle<StringType> result,
}
+MaybeHandle<String> Factory::NewConsString(Handle<String> left,
+ Handle<String> right) {
+ int left_length = left->length();
+ if (left_length == 0) return right;
+ int right_length = right->length();
+ if (right_length == 0) return left;
+
+ int length = left_length + right_length;
+
+ if (length == 2) {
+ uint16_t c1 = left->Get(0);
+ uint16_t c2 = right->Get(0);
+ return MakeOrFindTwoCharacterString(isolate(), c1, c2);
+ }
+
+ // Make sure that an out of memory exception is thrown if the length
+ // of the new cons string is too large.
+ if (length > String::kMaxLength || length < 0) {
+ return isolate()->Throw<String>(NewInvalidStringLengthError());
+ }
+
+ bool left_is_one_byte = left->IsOneByteRepresentation();
+ bool right_is_one_byte = right->IsOneByteRepresentation();
+ bool is_one_byte = left_is_one_byte && right_is_one_byte;
+ bool is_one_byte_data_in_two_byte_string = false;
+ if (!is_one_byte) {
+ // At least one of the strings uses two-byte representation so we
+ // can't use the fast case code for short ASCII strings below, but
+ // we can try to save memory if all chars actually fit in ASCII.
+ is_one_byte_data_in_two_byte_string =
+ left->HasOnlyOneByteChars() && right->HasOnlyOneByteChars();
+ if (is_one_byte_data_in_two_byte_string) {
+ isolate()->counters()->string_add_runtime_ext_to_ascii()->Increment();
+ }
+ }
+
+ // If the resulting string is small make a flat string.
+ if (length < ConsString::kMinLength) {
+ // Note that neither of the two inputs can be a slice because:
+ STATIC_ASSERT(ConsString::kMinLength <= SlicedString::kMinLength);
+ ASSERT(left->IsFlat());
+ ASSERT(right->IsFlat());
+
+ STATIC_ASSERT(ConsString::kMinLength <= String::kMaxLength);
+ if (is_one_byte) {
+ Handle<SeqOneByteString> result =
+ NewRawOneByteString(length).ToHandleChecked();
+ DisallowHeapAllocation no_gc;
+ uint8_t* dest = result->GetChars();
+ // Copy left part.
+ const uint8_t* src = left->IsExternalString()
+ ? Handle<ExternalAsciiString>::cast(left)->GetChars()
+ : Handle<SeqOneByteString>::cast(left)->GetChars();
+ for (int i = 0; i < left_length; i++) *dest++ = src[i];
+ // Copy right part.
+ src = right->IsExternalString()
+ ? Handle<ExternalAsciiString>::cast(right)->GetChars()
+ : Handle<SeqOneByteString>::cast(right)->GetChars();
+ for (int i = 0; i < right_length; i++) *dest++ = src[i];
+ return result;
+ }
+
+ return (is_one_byte_data_in_two_byte_string)
+ ? ConcatStringContent<uint8_t>(
+ NewRawOneByteString(length).ToHandleChecked(), left, right)
+ : ConcatStringContent<uc16>(
+ NewRawTwoByteString(length).ToHandleChecked(), left, right);
+ }
+
+ Handle<Map> map = (is_one_byte || is_one_byte_data_in_two_byte_string)
+ ? cons_ascii_string_map() : cons_string_map();
+ Handle<ConsString> result = New<ConsString>(map, NEW_SPACE);
+
+ DisallowHeapAllocation no_gc;
+ WriteBarrierMode mode = result->GetWriteBarrierMode(no_gc);
+
+ result->set_hash_field(String::kEmptyHashField);
+ result->set_length(length);
+ result->set_first(*left, mode);
+ result->set_second(*right, mode);
+ return result;
+}
+
+
Handle<String> Factory::NewFlatConcatString(Handle<String> first,
Handle<String> second) {
int total_length = first->length() + second->length();
if (first->IsOneByteRepresentation() && second->IsOneByteRepresentation()) {
return ConcatStringContent<uint8_t>(
- NewRawOneByteString(total_length), first, second);
+ NewRawOneByteString(total_length).ToHandleChecked(), first, second);
} else {
return ConcatStringContent<uc16>(
- NewRawTwoByteString(total_length), first, second);
+ NewRawTwoByteString(total_length).ToHandleChecked(), first, second);
}
}
-Handle<String> Factory::NewSubString(Handle<String> str,
- int begin,
- int end) {
- CALL_HEAP_FUNCTION(isolate(),
- str->SubString(begin, end),
- String);
-}
-
-
Handle<String> Factory::NewProperSubString(Handle<String> str,
int begin,
int end) {
+#if VERIFY_HEAP
+ if (FLAG_verify_heap) str->StringVerify();
+#endif
ASSERT(begin > 0 || end < str->length());
- CALL_HEAP_FUNCTION(isolate(),
- isolate()->heap()->AllocateSubString(*str, begin, end),
- String);
+
+ str = String::Flatten(str);
+
+ int length = end - begin;
+ if (length <= 0) return empty_string();
+ if (length == 1) {
+ return LookupSingleCharacterStringFromCode(str->Get(begin));
+ }
+ if (length == 2) {
+ // Optimization for 2-byte strings often used as keys in a decompression
+ // dictionary. Check whether we already have the string in the string
+ // table to prevent creation of many unnecessary strings.
+ uint16_t c1 = str->Get(begin);
+ uint16_t c2 = str->Get(begin + 1);
+ return MakeOrFindTwoCharacterString(isolate(), c1, c2);
+ }
+
+ if (!FLAG_string_slices || length < SlicedString::kMinLength) {
+ if (str->IsOneByteRepresentation()) {
+ Handle<SeqOneByteString> result =
+ NewRawOneByteString(length).ToHandleChecked();
+ uint8_t* dest = result->GetChars();
+ DisallowHeapAllocation no_gc;
+ String::WriteToFlat(*str, dest, begin, end);
+ return result;
+ } else {
+ Handle<SeqTwoByteString> result =
+ NewRawTwoByteString(length).ToHandleChecked();
+ uc16* dest = result->GetChars();
+ DisallowHeapAllocation no_gc;
+ String::WriteToFlat(*str, dest, begin, end);
+ return result;
+ }
+ }
+
+ int offset = begin;
+
+ if (str->IsSlicedString()) {
+ Handle<SlicedString> slice = Handle<SlicedString>::cast(str);
+ str = Handle<String>(slice->parent(), isolate());
+ offset += slice->offset();
+ }
+
+ ASSERT(str->IsSeqString() || str->IsExternalString());
+ Handle<Map> map = str->IsOneByteRepresentation() ? sliced_ascii_string_map()
+ : sliced_string_map();
+ Handle<SlicedString> slice = New<SlicedString>(map, NEW_SPACE);
+
+ slice->set_hash_field(String::kEmptyHashField);
+ slice->set_length(length);
+ slice->set_parent(*str);
+ slice->set_offset(offset);
+ return slice;
}
-Handle<String> Factory::NewExternalStringFromAscii(
+MaybeHandle<String> Factory::NewExternalStringFromAscii(
const ExternalAsciiString::Resource* resource) {
- CALL_HEAP_FUNCTION(
- isolate(),
- isolate()->heap()->AllocateExternalStringFromAscii(resource),
- String);
+ size_t length = resource->length();
+ if (length > static_cast<size_t>(String::kMaxLength)) {
+ return isolate()->Throw<String>(NewInvalidStringLengthError());
+ }
+
+ Handle<Map> map = external_ascii_string_map();
+ Handle<ExternalAsciiString> external_string =
+ New<ExternalAsciiString>(map, NEW_SPACE);
+ external_string->set_length(static_cast<int>(length));
+ external_string->set_hash_field(String::kEmptyHashField);
+ external_string->set_resource(resource);
+
+ return external_string;
}
-Handle<String> Factory::NewExternalStringFromTwoByte(
+MaybeHandle<String> Factory::NewExternalStringFromTwoByte(
const ExternalTwoByteString::Resource* resource) {
- CALL_HEAP_FUNCTION(
- isolate(),
- isolate()->heap()->AllocateExternalStringFromTwoByte(resource),
- String);
+ size_t length = resource->length();
+ if (length > static_cast<size_t>(String::kMaxLength)) {
+ return isolate()->Throw<String>(NewInvalidStringLengthError());
+ }
+
+ // For small strings we check whether the resource contains only
+ // one byte characters. If yes, we use a different string map.
+ static const size_t kOneByteCheckLengthLimit = 32;
+ bool is_one_byte = length <= kOneByteCheckLengthLimit &&
+ String::IsOneByte(resource->data(), static_cast<int>(length));
+ Handle<Map> map = is_one_byte ?
+ external_string_with_one_byte_data_map() : external_string_map();
+ Handle<ExternalTwoByteString> external_string =
+ New<ExternalTwoByteString>(map, NEW_SPACE);
+ external_string->set_length(static_cast<int>(length));
+ external_string->set_hash_field(String::kEmptyHashField);
+ external_string->set_resource(resource);
+
+ return external_string;
}
@@ -366,44 +670,59 @@ Handle<Symbol> Factory::NewSymbol() {
Handle<Symbol> Factory::NewPrivateSymbol() {
- CALL_HEAP_FUNCTION(
- isolate(),
- isolate()->heap()->AllocatePrivateSymbol(),
- Symbol);
+ Handle<Symbol> symbol = NewSymbol();
+ symbol->set_is_private(true);
+ return symbol;
}
Handle<Context> Factory::NewNativeContext() {
- CALL_HEAP_FUNCTION(
- isolate(),
- isolate()->heap()->AllocateNativeContext(),
- Context);
+ Handle<FixedArray> array = NewFixedArray(Context::NATIVE_CONTEXT_SLOTS);
+ array->set_map_no_write_barrier(*native_context_map());
+ Handle<Context> context = Handle<Context>::cast(array);
+ context->set_js_array_maps(*undefined_value());
+ ASSERT(context->IsNativeContext());
+ return context;
}
Handle<Context> Factory::NewGlobalContext(Handle<JSFunction> function,
Handle<ScopeInfo> scope_info) {
- CALL_HEAP_FUNCTION(
- isolate(),
- isolate()->heap()->AllocateGlobalContext(*function, *scope_info),
- Context);
+ Handle<FixedArray> array =
+ NewFixedArray(scope_info->ContextLength(), TENURED);
+ array->set_map_no_write_barrier(*global_context_map());
+ Handle<Context> context = Handle<Context>::cast(array);
+ context->set_closure(*function);
+ context->set_previous(function->context());
+ context->set_extension(*scope_info);
+ context->set_global_object(function->context()->global_object());
+ ASSERT(context->IsGlobalContext());
+ return context;
}
Handle<Context> Factory::NewModuleContext(Handle<ScopeInfo> scope_info) {
- CALL_HEAP_FUNCTION(
- isolate(),
- isolate()->heap()->AllocateModuleContext(*scope_info),
- Context);
+ Handle<FixedArray> array =
+ NewFixedArray(scope_info->ContextLength(), TENURED);
+ array->set_map_no_write_barrier(*module_context_map());
+ // Instance link will be set later.
+ Handle<Context> context = Handle<Context>::cast(array);
+ context->set_extension(Smi::FromInt(0));
+ return context;
}
Handle<Context> Factory::NewFunctionContext(int length,
Handle<JSFunction> function) {
- CALL_HEAP_FUNCTION(
- isolate(),
- isolate()->heap()->AllocateFunctionContext(length, *function),
- Context);
+ ASSERT(length >= Context::MIN_CONTEXT_SLOTS);
+ Handle<FixedArray> array = NewFixedArray(length);
+ array->set_map_no_write_barrier(*function_context_map());
+ Handle<Context> context = Handle<Context>::cast(array);
+ context->set_closure(*function);
+ context->set_previous(function->context());
+ context->set_extension(Smi::FromInt(0));
+ context->set_global_object(function->context()->global_object());
+ return context;
}
@@ -411,35 +730,45 @@ Handle<Context> Factory::NewCatchContext(Handle<JSFunction> function,
Handle<Context> previous,
Handle<String> name,
Handle<Object> thrown_object) {
- CALL_HEAP_FUNCTION(
- isolate(),
- isolate()->heap()->AllocateCatchContext(*function,
- *previous,
- *name,
- *thrown_object),
- Context);
+ STATIC_ASSERT(Context::MIN_CONTEXT_SLOTS == Context::THROWN_OBJECT_INDEX);
+ Handle<FixedArray> array = NewFixedArray(Context::MIN_CONTEXT_SLOTS + 1);
+ array->set_map_no_write_barrier(*catch_context_map());
+ Handle<Context> context = Handle<Context>::cast(array);
+ context->set_closure(*function);
+ context->set_previous(*previous);
+ context->set_extension(*name);
+ context->set_global_object(previous->global_object());
+ context->set(Context::THROWN_OBJECT_INDEX, *thrown_object);
+ return context;
}
Handle<Context> Factory::NewWithContext(Handle<JSFunction> function,
Handle<Context> previous,
- Handle<JSObject> extension) {
- CALL_HEAP_FUNCTION(
- isolate(),
- isolate()->heap()->AllocateWithContext(*function, *previous, *extension),
- Context);
+ Handle<JSReceiver> extension) {
+ Handle<FixedArray> array = NewFixedArray(Context::MIN_CONTEXT_SLOTS);
+ array->set_map_no_write_barrier(*with_context_map());
+ Handle<Context> context = Handle<Context>::cast(array);
+ context->set_closure(*function);
+ context->set_previous(*previous);
+ context->set_extension(*extension);
+ context->set_global_object(previous->global_object());
+ return context;
}
Handle<Context> Factory::NewBlockContext(Handle<JSFunction> function,
Handle<Context> previous,
Handle<ScopeInfo> scope_info) {
- CALL_HEAP_FUNCTION(
- isolate(),
- isolate()->heap()->AllocateBlockContext(*function,
- *previous,
- *scope_info),
- Context);
+ Handle<FixedArray> array =
+ NewFixedArrayWithHoles(scope_info->ContextLength());
+ array->set_map_no_write_barrier(*block_context_map());
+ Handle<Context> context = Handle<Context>::cast(array);
+ context->set_closure(*function);
+ context->set_previous(*previous);
+ context->set_extension(*scope_info);
+ context->set_global_object(previous->global_object());
+ return context;
}
@@ -451,6 +780,15 @@ Handle<Struct> Factory::NewStruct(InstanceType type) {
}
+Handle<CodeCache> Factory::NewCodeCache() {
+ Handle<CodeCache> code_cache =
+ Handle<CodeCache>::cast(NewStruct(CODE_CACHE_TYPE));
+ code_cache->set_default_cache(*empty_fixed_array(), SKIP_WRITE_BARRIER);
+ code_cache->set_normal_type_cache(*undefined_value(), SKIP_WRITE_BARRIER);
+ return code_cache;
+}
+
+
Handle<AliasedArgumentsEntry> Factory::NewAliasedArgumentsEntry(
int aliased_context_slot) {
Handle<AliasedArgumentsEntry> entry = Handle<AliasedArgumentsEntry>::cast(
@@ -499,7 +837,6 @@ Handle<Script> Factory::NewScript(Handle<String> source) {
script->set_id(Smi::FromInt(id));
script->set_line_offset(Smi::FromInt(0));
script->set_column_offset(Smi::FromInt(0));
- script->set_data(heap->undefined_value());
script->set_context_data(heap->undefined_value());
script->set_type(Smi::FromInt(Script::TYPE_NORMAL));
script->set_wrapper(*wrapper);
@@ -537,7 +874,7 @@ Handle<ExternalArray> Factory::NewExternalArray(int length,
ExternalArrayType array_type,
void* external_pointer,
PretenureFlag pretenure) {
- ASSERT(0 <= length);
+ ASSERT(0 <= length && length <= Smi::kMaxValue);
CALL_HEAP_FUNCTION(
isolate(),
isolate()->heap()->AllocateExternalArray(length,
@@ -548,6 +885,20 @@ Handle<ExternalArray> Factory::NewExternalArray(int length,
}
+Handle<FixedTypedArrayBase> Factory::NewFixedTypedArray(
+ int length,
+ ExternalArrayType array_type,
+ PretenureFlag pretenure) {
+ ASSERT(0 <= length && length <= Smi::kMaxValue);
+ CALL_HEAP_FUNCTION(
+ isolate(),
+ isolate()->heap()->AllocateFixedTypedArray(length,
+ array_type,
+ pretenure),
+ FixedTypedArrayBase);
+}
+
+
Handle<Cell> Factory::NewCell(Handle<Object> value) {
AllowDeferredHandleDereference convert_to_cell;
CALL_HEAP_FUNCTION(
@@ -574,10 +925,14 @@ Handle<PropertyCell> Factory::NewPropertyCell(Handle<Object> value) {
Handle<AllocationSite> Factory::NewAllocationSite() {
- CALL_HEAP_FUNCTION(
- isolate(),
- isolate()->heap()->AllocateAllocationSite(),
- AllocationSite);
+ Handle<Map> map = allocation_site_map();
+ Handle<AllocationSite> site = New<AllocationSite>(map, OLD_POINTER_SPACE);
+ site->Initialize();
+
+ // Link the site
+ site->set_weak_next(isolate()->heap()->allocation_sites_list());
+ isolate()->heap()->set_allocation_sites_list(*site);
+ return site;
}
@@ -591,201 +946,96 @@ Handle<Map> Factory::NewMap(InstanceType type,
}
-Handle<JSObject> Factory::NewFunctionPrototype(Handle<JSFunction> function) {
- // Make sure to use globals from the function's context, since the function
- // can be from a different context.
- Handle<Context> native_context(function->context()->native_context());
- Handle<Map> new_map;
- if (function->shared()->is_generator()) {
- // Generator prototypes can share maps since they don't have "constructor"
- // properties.
- new_map = handle(native_context->generator_object_prototype_map());
- } else {
- // Each function prototype gets a fresh map to avoid unwanted sharing of
- // maps between prototypes of different constructors.
- Handle<JSFunction> object_function(native_context->object_function());
- ASSERT(object_function->has_initial_map());
- new_map = Map::Copy(handle(object_function->initial_map()));
- }
-
- Handle<JSObject> prototype = NewJSObjectFromMap(new_map);
-
- if (!function->shared()->is_generator()) {
- JSObject::SetLocalPropertyIgnoreAttributes(prototype,
- constructor_string(),
- function,
- DONT_ENUM);
- }
-
- return prototype;
-}
-
-
-Handle<Map> Factory::CopyWithPreallocatedFieldDescriptors(Handle<Map> src) {
- CALL_HEAP_FUNCTION(
- isolate(), src->CopyWithPreallocatedFieldDescriptors(), Map);
-}
-
-
-Handle<Map> Factory::CopyMap(Handle<Map> src,
- int extra_inobject_properties) {
- Handle<Map> copy = CopyWithPreallocatedFieldDescriptors(src);
- // Check that we do not overflow the instance size when adding the
- // extra inobject properties.
- int instance_size_delta = extra_inobject_properties * kPointerSize;
- int max_instance_size_delta =
- JSObject::kMaxInstanceSize - copy->instance_size();
- int max_extra_properties = max_instance_size_delta >> kPointerSizeLog2;
- if (extra_inobject_properties > max_extra_properties) {
- // If the instance size overflows, we allocate as many properties
- // as we can as inobject properties.
- instance_size_delta = max_instance_size_delta;
- extra_inobject_properties = max_extra_properties;
- }
- // Adjust the map with the extra inobject properties.
- int inobject_properties =
- copy->inobject_properties() + extra_inobject_properties;
- copy->set_inobject_properties(inobject_properties);
- copy->set_unused_property_fields(inobject_properties);
- copy->set_instance_size(copy->instance_size() + instance_size_delta);
- copy->set_visitor_id(StaticVisitorBase::GetVisitorId(*copy));
- return copy;
+Handle<JSObject> Factory::CopyJSObject(Handle<JSObject> object) {
+ CALL_HEAP_FUNCTION(isolate(),
+ isolate()->heap()->CopyJSObject(*object, NULL),
+ JSObject);
}
-Handle<Map> Factory::CopyMap(Handle<Map> src) {
- CALL_HEAP_FUNCTION(isolate(), src->Copy(), Map);
+Handle<JSObject> Factory::CopyJSObjectWithAllocationSite(
+ Handle<JSObject> object,
+ Handle<AllocationSite> site) {
+ CALL_HEAP_FUNCTION(isolate(),
+ isolate()->heap()->CopyJSObject(
+ *object,
+ site.is_null() ? NULL : *site),
+ JSObject);
}
-Handle<Map> Factory::GetElementsTransitionMap(
- Handle<JSObject> src,
- ElementsKind elements_kind) {
- Isolate* i = isolate();
- CALL_HEAP_FUNCTION(i,
- src->GetElementsTransitionMap(i, elements_kind),
- Map);
+Handle<FixedArray> Factory::CopyFixedArrayWithMap(Handle<FixedArray> array,
+ Handle<Map> map) {
+ CALL_HEAP_FUNCTION(isolate(),
+ isolate()->heap()->CopyFixedArrayWithMap(*array, *map),
+ FixedArray);
}
Handle<FixedArray> Factory::CopyFixedArray(Handle<FixedArray> array) {
- CALL_HEAP_FUNCTION(isolate(), array->Copy(), FixedArray);
+ CALL_HEAP_FUNCTION(isolate(),
+ isolate()->heap()->CopyFixedArray(*array),
+ FixedArray);
}
-Handle<FixedArray> Factory::CopySizeFixedArray(Handle<FixedArray> array,
- int new_length,
- PretenureFlag pretenure) {
+Handle<FixedArray> Factory::CopyAndTenureFixedCOWArray(
+ Handle<FixedArray> array) {
+ ASSERT(isolate()->heap()->InNewSpace(*array));
CALL_HEAP_FUNCTION(isolate(),
- array->CopySize(new_length, pretenure),
+ isolate()->heap()->CopyAndTenureFixedCOWArray(*array),
FixedArray);
}
Handle<FixedDoubleArray> Factory::CopyFixedDoubleArray(
Handle<FixedDoubleArray> array) {
- CALL_HEAP_FUNCTION(isolate(), array->Copy(), FixedDoubleArray);
+ CALL_HEAP_FUNCTION(isolate(),
+ isolate()->heap()->CopyFixedDoubleArray(*array),
+ FixedDoubleArray);
}
Handle<ConstantPoolArray> Factory::CopyConstantPoolArray(
Handle<ConstantPoolArray> array) {
- CALL_HEAP_FUNCTION(isolate(), array->Copy(), ConstantPoolArray);
-}
-
-
-Handle<JSFunction> Factory::BaseNewFunctionFromSharedFunctionInfo(
- Handle<SharedFunctionInfo> function_info,
- Handle<Map> function_map,
- PretenureFlag pretenure) {
- CALL_HEAP_FUNCTION(
- isolate(),
- isolate()->heap()->AllocateFunction(*function_map,
- *function_info,
- isolate()->heap()->the_hole_value(),
- pretenure),
- JSFunction);
-}
-
-
-static Handle<Map> MapForNewFunction(Isolate *isolate,
- Handle<SharedFunctionInfo> function_info) {
- Context *context = isolate->context()->native_context();
- int map_index = Context::FunctionMapIndex(function_info->language_mode(),
- function_info->is_generator());
- return Handle<Map>(Map::cast(context->get(map_index)));
-}
-
-
-Handle<JSFunction> Factory::NewFunctionFromSharedFunctionInfo(
- Handle<SharedFunctionInfo> function_info,
- Handle<Context> context,
- PretenureFlag pretenure) {
- Handle<JSFunction> result = BaseNewFunctionFromSharedFunctionInfo(
- function_info,
- MapForNewFunction(isolate(), function_info),
- pretenure);
-
- if (function_info->ic_age() != isolate()->heap()->global_ic_age()) {
- function_info->ResetForNewContext(isolate()->heap()->global_ic_age());
- }
-
- result->set_context(*context);
-
- int index = function_info->SearchOptimizedCodeMap(context->native_context());
- if (!function_info->bound() && index < 0) {
- int number_of_literals = function_info->num_literals();
- Handle<FixedArray> literals = NewFixedArray(number_of_literals, pretenure);
- if (number_of_literals > 0) {
- // Store the native context in the literals array prefix. This
- // context will be used when creating object, regexp and array
- // literals in this function.
- literals->set(JSFunction::kLiteralNativeContextIndex,
- context->native_context());
- }
- result->set_literals(*literals);
- }
-
- if (index > 0) {
- // Caching of optimized code enabled and optimized code found.
- function_info->InstallFromOptimizedCodeMap(*result, index);
- return result;
- }
-
- if (isolate()->use_crankshaft() &&
- FLAG_always_opt &&
- result->is_compiled() &&
- !function_info->is_toplevel() &&
- function_info->allows_lazy_compilation() &&
- !function_info->optimization_disabled() &&
- !isolate()->DebuggerHasBreakPoints()) {
- result->MarkForLazyRecompilation();
- }
- return result;
+ CALL_HEAP_FUNCTION(isolate(),
+ isolate()->heap()->CopyConstantPoolArray(*array),
+ ConstantPoolArray);
}
Handle<Object> Factory::NewNumber(double value,
PretenureFlag pretenure) {
- CALL_HEAP_FUNCTION(
- isolate(),
- isolate()->heap()->NumberFromDouble(value, pretenure), Object);
+ // We need to distinguish the minus zero value and this cannot be
+ // done after conversion to int. Doing this by comparing bit
+ // patterns is faster than using fpclassify() et al.
+ if (IsMinusZero(value)) return NewHeapNumber(-0.0, pretenure);
+
+ int int_value = FastD2I(value);
+ if (value == int_value && Smi::IsValid(int_value)) {
+ return handle(Smi::FromInt(int_value), isolate());
+ }
+
+ // Materialize the value in the heap.
+ return NewHeapNumber(value, pretenure);
}
Handle<Object> Factory::NewNumberFromInt(int32_t value,
PretenureFlag pretenure) {
- CALL_HEAP_FUNCTION(
- isolate(),
- isolate()->heap()->NumberFromInt32(value, pretenure), Object);
+ if (Smi::IsValid(value)) return handle(Smi::FromInt(value), isolate());
+ // Bypass NumberFromDouble to avoid various redundant checks.
+ return NewHeapNumber(FastI2D(value), pretenure);
}
Handle<Object> Factory::NewNumberFromUint(uint32_t value,
- PretenureFlag pretenure) {
- CALL_HEAP_FUNCTION(
- isolate(),
- isolate()->heap()->NumberFromUint32(value, pretenure), Object);
+ PretenureFlag pretenure) {
+ int32_t int32v = static_cast<int32_t>(value);
+ if (int32v >= 0 && Smi::IsValid(int32v)) {
+ return handle(Smi::FromInt(int32v), isolate());
+ }
+ return NewHeapNumber(FastUI2D(value), pretenure);
}
@@ -797,15 +1047,6 @@ Handle<HeapNumber> Factory::NewHeapNumber(double value,
}
-Handle<JSObject> Factory::NewNeanderObject() {
- CALL_HEAP_FUNCTION(
- isolate(),
- isolate()->heap()->AllocateJSObjectFromMap(
- isolate()->heap()->neander_map()),
- JSObject);
-}
-
-
Handle<Object> Factory::NewTypeError(const char* message,
Vector< Handle<Object> > args) {
return NewError("MakeTypeError", message, args);
@@ -845,6 +1086,12 @@ Handle<Object> Factory::NewReferenceError(const char* message,
}
+Handle<Object> Factory::NewReferenceError(const char* message,
+ Handle<JSArray> args) {
+ return NewError("MakeReferenceError", message, args);
+}
+
+
Handle<Object> Factory::NewReferenceError(Handle<String> message) {
return NewError("$ReferenceError", message);
}
@@ -885,7 +1132,7 @@ Handle<String> Factory::EmergencyNewError(const char* message,
char* p = &buffer[0];
Vector<char> v(buffer, kBufferSize);
- OS::StrNCpy(v, message, space);
+ StrNCpy(v, message, space);
space -= Min(space, strlen(message));
p = &buffer[kBufferSize] - space;
@@ -894,12 +1141,12 @@ Handle<String> Factory::EmergencyNewError(const char* message,
*p++ = ' ';
space--;
if (space > 0) {
- MaybeObject* maybe_arg = args->GetElement(isolate(), i);
- Handle<String> arg_str(reinterpret_cast<String*>(maybe_arg));
- const char* arg = *arg_str->ToCString();
+ Handle<String> arg_str = Handle<String>::cast(
+ Object::GetElement(isolate(), args, i).ToHandleChecked());
+ SmartArrayPointer<char> arg = arg_str->ToCString();
Vector<char> v2(p, static_cast<int>(space));
- OS::StrNCpy(v2, arg, space);
- space -= Min(space, strlen(arg));
+ StrNCpy(v2, arg.get(), space);
+ space -= Min(space, strlen(arg.get()));
p = &buffer[kBufferSize] - space;
}
}
@@ -909,8 +1156,7 @@ Handle<String> Factory::EmergencyNewError(const char* message,
} else {
buffer[kBufferSize - 1] = '\0';
}
- Handle<String> error_string = NewStringFromUtf8(CStrVector(buffer), TENURED);
- return error_string;
+ return NewStringFromUtf8(CStrVector(buffer), TENURED).ToHandleChecked();
}
@@ -918,9 +1164,8 @@ Handle<Object> Factory::NewError(const char* maker,
const char* message,
Handle<JSArray> args) {
Handle<String> make_str = InternalizeUtf8String(maker);
- Handle<Object> fun_obj(
- isolate()->js_builtins_object()->GetPropertyNoExceptionThrown(*make_str),
- isolate());
+ Handle<Object> fun_obj = Object::GetProperty(
+ isolate()->js_builtins_object(), make_str).ToHandleChecked();
// If the builtins haven't been properly configured yet this error
// constructor may not have been defined. Bail out.
if (!fun_obj->IsJSFunction()) {
@@ -932,12 +1177,15 @@ Handle<Object> Factory::NewError(const char* maker,
// Invoke the JavaScript factory method. If an exception is thrown while
// running the factory method, use the exception as the result.
- bool caught_exception;
- Handle<Object> result = Execution::TryCall(fun,
- isolate()->js_builtins_object(),
- ARRAY_SIZE(argv),
- argv,
- &caught_exception);
+ Handle<Object> result;
+ Handle<Object> exception;
+ if (!Execution::TryCall(fun,
+ isolate()->js_builtins_object(),
+ ARRAY_SIZE(argv),
+ argv,
+ &exception).ToHandle(&result)) {
+ return exception;
+ }
return result;
}
@@ -950,106 +1198,235 @@ Handle<Object> Factory::NewError(Handle<String> message) {
Handle<Object> Factory::NewError(const char* constructor,
Handle<String> message) {
Handle<String> constr = InternalizeUtf8String(constructor);
- Handle<JSFunction> fun = Handle<JSFunction>(
- JSFunction::cast(isolate()->js_builtins_object()->
- GetPropertyNoExceptionThrown(*constr)));
+ Handle<JSFunction> fun = Handle<JSFunction>::cast(Object::GetProperty(
+ isolate()->js_builtins_object(), constr).ToHandleChecked());
Handle<Object> argv[] = { message };
// Invoke the JavaScript factory method. If an exception is thrown while
// running the factory method, use the exception as the result.
- bool caught_exception;
- Handle<Object> result = Execution::TryCall(fun,
- isolate()->js_builtins_object(),
- ARRAY_SIZE(argv),
- argv,
- &caught_exception);
+ Handle<Object> result;
+ Handle<Object> exception;
+ if (!Execution::TryCall(fun,
+ isolate()->js_builtins_object(),
+ ARRAY_SIZE(argv),
+ argv,
+ &exception).ToHandle(&result)) {
+ return exception;
+ }
+ return result;
+}
+
+
+void Factory::InitializeFunction(Handle<JSFunction> function,
+ Handle<SharedFunctionInfo> info,
+ Handle<Context> context) {
+ function->initialize_properties();
+ function->initialize_elements();
+ function->set_shared(*info);
+ function->set_code(info->code());
+ function->set_context(*context);
+ function->set_prototype_or_initial_map(*the_hole_value());
+ function->set_literals_or_bindings(*empty_fixed_array());
+ function->set_next_function_link(*undefined_value());
+}
+
+
+Handle<JSFunction> Factory::NewFunction(Handle<Map> map,
+ Handle<SharedFunctionInfo> info,
+ Handle<Context> context,
+ PretenureFlag pretenure) {
+ AllocationSpace space = pretenure == TENURED ? OLD_POINTER_SPACE : NEW_SPACE;
+ Handle<JSFunction> result = New<JSFunction>(map, space);
+ InitializeFunction(result, info, context);
+ return result;
+}
+
+
+Handle<JSFunction> Factory::NewFunction(Handle<Map> map,
+ Handle<String> name,
+ MaybeHandle<Code> code) {
+ Handle<Context> context(isolate()->context()->native_context());
+ Handle<SharedFunctionInfo> info = NewSharedFunctionInfo(name, code);
+ ASSERT((info->strict_mode() == SLOPPY) &&
+ (map.is_identical_to(isolate()->sloppy_function_map()) ||
+ map.is_identical_to(
+ isolate()->sloppy_function_without_prototype_map()) ||
+ map.is_identical_to(
+ isolate()->sloppy_function_with_readonly_prototype_map())));
+ return NewFunction(map, info, context);
+}
+
+
+Handle<JSFunction> Factory::NewFunction(Handle<String> name) {
+ return NewFunction(
+ isolate()->sloppy_function_map(), name, MaybeHandle<Code>());
+}
+
+
+Handle<JSFunction> Factory::NewFunctionWithoutPrototype(Handle<String> name,
+ Handle<Code> code) {
+ return NewFunction(
+ isolate()->sloppy_function_without_prototype_map(), name, code);
+}
+
+
+Handle<JSFunction> Factory::NewFunction(Handle<String> name,
+ Handle<Code> code,
+ Handle<Object> prototype,
+ bool read_only_prototype) {
+ Handle<Map> map = read_only_prototype
+ ? isolate()->sloppy_function_with_readonly_prototype_map()
+ : isolate()->sloppy_function_map();
+ Handle<JSFunction> result = NewFunction(map, name, code);
+ result->set_prototype_or_initial_map(*prototype);
return result;
}
Handle<JSFunction> Factory::NewFunction(Handle<String> name,
+ Handle<Code> code,
+ Handle<Object> prototype,
InstanceType type,
int instance_size,
- Handle<Code> code,
- bool force_initial_map) {
+ bool read_only_prototype) {
// Allocate the function
- Handle<JSFunction> function = NewFunction(name, the_hole_value());
-
- // Set up the code pointer in both the shared function info and in
- // the function itself.
- function->shared()->set_code(*code);
- function->set_code(*code);
-
- if (force_initial_map ||
- type != JS_OBJECT_TYPE ||
- instance_size != JSObject::kHeaderSize) {
- Handle<Map> initial_map = NewMap(type, instance_size);
- Handle<JSObject> prototype = NewFunctionPrototype(function);
- initial_map->set_prototype(*prototype);
- function->set_initial_map(*initial_map);
- initial_map->set_constructor(*function);
- } else {
- ASSERT(!function->has_initial_map());
- ASSERT(!function->has_prototype());
+ Handle<JSFunction> function = NewFunction(
+ name, code, prototype, read_only_prototype);
+
+ Handle<Map> initial_map = NewMap(
+ type, instance_size, GetInitialFastElementsKind());
+ if (prototype->IsTheHole() && !function->shared()->is_generator()) {
+ prototype = NewFunctionPrototype(function);
}
+ initial_map->set_prototype(*prototype);
+ function->set_initial_map(*initial_map);
+ initial_map->set_constructor(*function);
return function;
}
-Handle<JSFunction> Factory::NewFunctionWithPrototype(Handle<String> name,
- InstanceType type,
- int instance_size,
- Handle<JSObject> prototype,
- Handle<Code> code,
- bool force_initial_map) {
- // Allocate the function.
- Handle<JSFunction> function = NewFunction(name, prototype);
+Handle<JSFunction> Factory::NewFunction(Handle<String> name,
+ Handle<Code> code,
+ InstanceType type,
+ int instance_size) {
+ return NewFunction(name, code, the_hole_value(), type, instance_size);
+}
- // Set up the code pointer in both the shared function info and in
- // the function itself.
- function->shared()->set_code(*code);
- function->set_code(*code);
- if (force_initial_map ||
- type != JS_OBJECT_TYPE ||
- instance_size != JSObject::kHeaderSize) {
- Handle<Map> initial_map = NewMap(type,
- instance_size,
- GetInitialFastElementsKind());
- function->set_initial_map(*initial_map);
- initial_map->set_constructor(*function);
+Handle<JSObject> Factory::NewFunctionPrototype(Handle<JSFunction> function) {
+ // Make sure to use globals from the function's context, since the function
+ // can be from a different context.
+ Handle<Context> native_context(function->context()->native_context());
+ Handle<Map> new_map;
+ if (function->shared()->is_generator()) {
+ // Generator prototypes can share maps since they don't have "constructor"
+ // properties.
+ new_map = handle(native_context->generator_object_prototype_map());
+ } else {
+ // Each function prototype gets a fresh map to avoid unwanted sharing of
+ // maps between prototypes of different constructors.
+ Handle<JSFunction> object_function(native_context->object_function());
+ ASSERT(object_function->has_initial_map());
+ new_map = Map::Copy(handle(object_function->initial_map()));
}
- JSFunction::SetPrototype(function, prototype);
- return function;
+ Handle<JSObject> prototype = NewJSObjectFromMap(new_map);
+
+ if (!function->shared()->is_generator()) {
+ JSObject::SetOwnPropertyIgnoreAttributes(prototype,
+ constructor_string(),
+ function,
+ DONT_ENUM).Assert();
+ }
+
+ return prototype;
}
-Handle<JSFunction> Factory::NewFunctionWithoutPrototype(Handle<String> name,
- Handle<Code> code) {
- Handle<JSFunction> function = NewFunctionWithoutPrototype(name,
- CLASSIC_MODE);
- function->shared()->set_code(*code);
- function->set_code(*code);
- ASSERT(!function->has_initial_map());
- ASSERT(!function->has_prototype());
- return function;
+Handle<JSFunction> Factory::NewFunctionFromSharedFunctionInfo(
+ Handle<SharedFunctionInfo> info,
+ Handle<Context> context,
+ PretenureFlag pretenure) {
+ int map_index = Context::FunctionMapIndex(info->strict_mode(),
+ info->is_generator());
+ Handle<Map> map(Map::cast(context->native_context()->get(map_index)));
+ Handle<JSFunction> result = NewFunction(map, info, context, pretenure);
+
+ if (info->ic_age() != isolate()->heap()->global_ic_age()) {
+ info->ResetForNewContext(isolate()->heap()->global_ic_age());
+ }
+
+ int index = info->SearchOptimizedCodeMap(context->native_context(),
+ BailoutId::None());
+ if (!info->bound() && index < 0) {
+ int number_of_literals = info->num_literals();
+ Handle<FixedArray> literals = NewFixedArray(number_of_literals, pretenure);
+ if (number_of_literals > 0) {
+ // Store the native context in the literals array prefix. This
+ // context will be used when creating object, regexp and array
+ // literals in this function.
+ literals->set(JSFunction::kLiteralNativeContextIndex,
+ context->native_context());
+ }
+ result->set_literals(*literals);
+ }
+
+ if (index > 0) {
+ // Caching of optimized code enabled and optimized code found.
+ FixedArray* literals = info->GetLiteralsFromOptimizedCodeMap(index);
+ if (literals != NULL) result->set_literals(literals);
+ Code* code = info->GetCodeFromOptimizedCodeMap(index);
+ ASSERT(!code->marked_for_deoptimization());
+ result->ReplaceCode(code);
+ return result;
+ }
+
+ if (isolate()->use_crankshaft() &&
+ FLAG_always_opt &&
+ result->is_compiled() &&
+ !info->is_toplevel() &&
+ info->allows_lazy_compilation() &&
+ !info->optimization_disabled() &&
+ !isolate()->DebuggerHasBreakPoints()) {
+ result->MarkForOptimization();
+ }
+ return result;
+}
+
+
+Handle<JSObject> Factory::NewIteratorResultObject(Handle<Object> value,
+ bool done) {
+ Handle<Map> map(isolate()->native_context()->iterator_result_map());
+ Handle<JSObject> result = NewJSObjectFromMap(map, NOT_TENURED, false);
+ result->InObjectPropertyAtPut(
+ JSGeneratorObject::kResultValuePropertyIndex, *value);
+ result->InObjectPropertyAtPut(
+ JSGeneratorObject::kResultDonePropertyIndex, *ToBoolean(done));
+ return result;
}
Handle<ScopeInfo> Factory::NewScopeInfo(int length) {
- CALL_HEAP_FUNCTION(
- isolate(),
- isolate()->heap()->AllocateScopeInfo(length),
- ScopeInfo);
+ Handle<FixedArray> array = NewFixedArray(length, TENURED);
+ array->set_map_no_write_barrier(*scope_info_map());
+ Handle<ScopeInfo> scope_info = Handle<ScopeInfo>::cast(array);
+ return scope_info;
}
Handle<JSObject> Factory::NewExternal(void* value) {
+ Handle<Foreign> foreign = NewForeign(static_cast<Address>(value));
+ Handle<JSObject> external = NewJSObjectFromMap(external_map());
+ external->SetInternalField(0, *foreign);
+ return external;
+}
+
+
+Handle<Code> Factory::NewCodeRaw(int object_size, bool immovable) {
CALL_HEAP_FUNCTION(isolate(),
- isolate()->heap()->AllocateExternal(value),
- JSObject);
+ isolate()->heap()->AllocateCode(object_size, immovable),
+ Code);
}
@@ -1058,12 +1435,64 @@ Handle<Code> Factory::NewCode(const CodeDesc& desc,
Handle<Object> self_ref,
bool immovable,
bool crankshafted,
- int prologue_offset) {
- CALL_HEAP_FUNCTION(isolate(),
- isolate()->heap()->CreateCode(
- desc, flags, self_ref, immovable, crankshafted,
- prologue_offset),
- Code);
+ int prologue_offset,
+ bool is_debug) {
+ Handle<ByteArray> reloc_info = NewByteArray(desc.reloc_size, TENURED);
+ Handle<ConstantPoolArray> constant_pool =
+ desc.origin->NewConstantPool(isolate());
+
+ // Compute size.
+ int body_size = RoundUp(desc.instr_size, kObjectAlignment);
+ int obj_size = Code::SizeFor(body_size);
+
+ Handle<Code> code = NewCodeRaw(obj_size, immovable);
+ ASSERT(isolate()->code_range() == NULL ||
+ !isolate()->code_range()->valid() ||
+ isolate()->code_range()->contains(code->address()));
+
+ // The code object has not been fully initialized yet. We rely on the
+ // fact that no allocation will happen from this point on.
+ DisallowHeapAllocation no_gc;
+ code->set_gc_metadata(Smi::FromInt(0));
+ code->set_ic_age(isolate()->heap()->global_ic_age());
+ code->set_instruction_size(desc.instr_size);
+ code->set_relocation_info(*reloc_info);
+ code->set_flags(flags);
+ code->set_raw_kind_specific_flags1(0);
+ code->set_raw_kind_specific_flags2(0);
+ code->set_is_crankshafted(crankshafted);
+ code->set_deoptimization_data(*empty_fixed_array(), SKIP_WRITE_BARRIER);
+ code->set_raw_type_feedback_info(*undefined_value());
+ code->set_next_code_link(*undefined_value());
+ code->set_handler_table(*empty_fixed_array(), SKIP_WRITE_BARRIER);
+ code->set_prologue_offset(prologue_offset);
+ if (code->kind() == Code::OPTIMIZED_FUNCTION) {
+ code->set_marked_for_deoptimization(false);
+ }
+
+ if (is_debug) {
+ ASSERT(code->kind() == Code::FUNCTION);
+ code->set_has_debug_break_slots(true);
+ }
+
+ desc.origin->PopulateConstantPool(*constant_pool);
+ code->set_constant_pool(*constant_pool);
+
+ // Allow self references to created code object by patching the handle to
+ // point to the newly allocated Code object.
+ if (!self_ref.is_null()) *(self_ref.location()) = *code;
+
+ // Migrate generated code.
+ // The generated code can contain Object** values (typically from handles)
+ // that are dereferenced during the copy to point directly to the actual heap
+ // objects. These pointers can include references to the code object itself,
+ // through the self_reference parameter.
+ code->CopyFrom(desc);
+
+#ifdef VERIFY_HEAP
+ if (FLAG_verify_heap) code->ObjectVerify();
+#endif
+ return code;
}
@@ -1081,12 +1510,6 @@ Handle<Code> Factory::CopyCode(Handle<Code> code, Vector<byte> reloc_info) {
}
-Handle<String> Factory::InternalizedStringFromString(Handle<String> value) {
- CALL_HEAP_FUNCTION(isolate(),
- isolate()->heap()->InternalizeString(*value), String);
-}
-
-
Handle<JSObject> Factory::NewJSObject(Handle<JSFunction> constructor,
PretenureFlag pretenure) {
JSFunction::EnsureHasInitialMap(constructor);
@@ -1096,30 +1519,27 @@ Handle<JSObject> Factory::NewJSObject(Handle<JSFunction> constructor,
}
-Handle<JSModule> Factory::NewJSModule(Handle<Context> context,
- Handle<ScopeInfo> scope_info) {
+Handle<JSObject> Factory::NewJSObjectWithMemento(
+ Handle<JSFunction> constructor,
+ Handle<AllocationSite> site) {
+ JSFunction::EnsureHasInitialMap(constructor);
CALL_HEAP_FUNCTION(
isolate(),
- isolate()->heap()->AllocateJSModule(*context, *scope_info), JSModule);
-}
-
-
-// TODO(mstarzinger): Temporary wrapper until handlified.
-static Handle<NameDictionary> NameDictionaryAdd(Handle<NameDictionary> dict,
- Handle<Name> name,
- Handle<Object> value,
- PropertyDetails details) {
- CALL_HEAP_FUNCTION(dict->GetIsolate(),
- dict->Add(*name, *value, details),
- NameDictionary);
+ isolate()->heap()->AllocateJSObject(*constructor, NOT_TENURED, *site),
+ JSObject);
}
-static Handle<GlobalObject> NewGlobalObjectFromMap(Isolate* isolate,
- Handle<Map> map) {
- CALL_HEAP_FUNCTION(isolate,
- isolate->heap()->Allocate(*map, OLD_POINTER_SPACE),
- GlobalObject);
+Handle<JSModule> Factory::NewJSModule(Handle<Context> context,
+ Handle<ScopeInfo> scope_info) {
+ // Allocate a fresh map. Modules do not have a prototype.
+ Handle<Map> map = NewMap(JS_MODULE_TYPE, JSModule::kSize);
+ // Allocate the object based on the map.
+ Handle<JSModule> module =
+ Handle<JSModule>::cast(NewJSObjectFromMap(map, TENURED));
+ module->set_context(*context);
+ module->set_scope_info(*scope_info);
+ return module;
}
@@ -1145,7 +1565,8 @@ Handle<GlobalObject> Factory::NewGlobalObject(Handle<JSFunction> constructor) {
// Allocate a dictionary object for backing storage.
int at_least_space_for = map->NumberOfOwnDescriptors() * 2 + initial_size;
- Handle<NameDictionary> dictionary = NewNameDictionary(at_least_space_for);
+ Handle<NameDictionary> dictionary =
+ NameDictionary::New(isolate(), at_least_space_for);
// The global object might be created from an object template with accessors.
// Fill these accessors into the dictionary.
@@ -1157,11 +1578,12 @@ Handle<GlobalObject> Factory::NewGlobalObject(Handle<JSFunction> constructor) {
Handle<Name> name(descs->GetKey(i));
Handle<Object> value(descs->GetCallbacksObject(i), isolate());
Handle<PropertyCell> cell = NewPropertyCell(value);
- NameDictionaryAdd(dictionary, name, cell, d);
+ // |dictionary| already contains enough space for all properties.
+ USE(NameDictionary::Add(dictionary, name, cell, d));
}
// Allocate the global object and initialize it with the backing store.
- Handle<GlobalObject> global = NewGlobalObjectFromMap(isolate(), map);
+ Handle<GlobalObject> global = New<GlobalObject>(map, OLD_POINTER_SPACE);
isolate()->heap()->InitializeJSObjectFromMap(*global, *dictionary, *map);
// Create a new map for the global object.
@@ -1178,61 +1600,91 @@ Handle<GlobalObject> Factory::NewGlobalObject(Handle<JSFunction> constructor) {
}
-Handle<JSObject> Factory::NewJSObjectFromMap(Handle<Map> map,
- PretenureFlag pretenure,
- bool alloc_props) {
+Handle<JSObject> Factory::NewJSObjectFromMap(
+ Handle<Map> map,
+ PretenureFlag pretenure,
+ bool alloc_props,
+ Handle<AllocationSite> allocation_site) {
CALL_HEAP_FUNCTION(
isolate(),
- isolate()->heap()->AllocateJSObjectFromMap(*map, pretenure, alloc_props),
+ isolate()->heap()->AllocateJSObjectFromMap(
+ *map,
+ pretenure,
+ alloc_props,
+ allocation_site.is_null() ? NULL : *allocation_site),
JSObject);
}
-Handle<JSArray> Factory::NewJSArray(int capacity,
- ElementsKind elements_kind,
+Handle<JSArray> Factory::NewJSArray(ElementsKind elements_kind,
PretenureFlag pretenure) {
- if (capacity != 0) {
- elements_kind = GetHoleyElementsKind(elements_kind);
- }
- CALL_HEAP_FUNCTION(isolate(),
- isolate()->heap()->AllocateJSArrayAndStorage(
- elements_kind,
- 0,
- capacity,
- INITIALIZE_ARRAY_ELEMENTS_WITH_HOLE,
- pretenure),
- JSArray);
+ Context* native_context = isolate()->context()->native_context();
+ JSFunction* array_function = native_context->array_function();
+ Map* map = array_function->initial_map();
+ Map* transition_map = isolate()->get_initial_js_array_map(elements_kind);
+ if (transition_map != NULL) map = transition_map;
+ return Handle<JSArray>::cast(NewJSObjectFromMap(handle(map), pretenure));
+}
+
+
+Handle<JSArray> Factory::NewJSArray(ElementsKind elements_kind,
+ int length,
+ int capacity,
+ ArrayStorageAllocationMode mode,
+ PretenureFlag pretenure) {
+ Handle<JSArray> array = NewJSArray(elements_kind, pretenure);
+ NewJSArrayStorage(array, length, capacity, mode);
+ return array;
}
Handle<JSArray> Factory::NewJSArrayWithElements(Handle<FixedArrayBase> elements,
ElementsKind elements_kind,
+ int length,
PretenureFlag pretenure) {
- CALL_HEAP_FUNCTION(
- isolate(),
- isolate()->heap()->AllocateJSArrayWithElements(*elements,
- elements_kind,
- elements->length(),
- pretenure),
- JSArray);
+ ASSERT(length <= elements->length());
+ Handle<JSArray> array = NewJSArray(elements_kind, pretenure);
+
+ array->set_elements(*elements);
+ array->set_length(Smi::FromInt(length));
+ JSObject::ValidateElements(array);
+ return array;
}
-void Factory::SetElementsCapacityAndLength(Handle<JSArray> array,
- int capacity,
- int length) {
- ElementsAccessor* accessor = array->GetElementsAccessor();
- CALL_HEAP_FUNCTION_VOID(
- isolate(),
- accessor->SetCapacityAndLength(*array, capacity, length));
-}
+void Factory::NewJSArrayStorage(Handle<JSArray> array,
+ int length,
+ int capacity,
+ ArrayStorageAllocationMode mode) {
+ ASSERT(capacity >= length);
+
+ if (capacity == 0) {
+ array->set_length(Smi::FromInt(0));
+ array->set_elements(*empty_fixed_array());
+ return;
+ }
+ Handle<FixedArrayBase> elms;
+ ElementsKind elements_kind = array->GetElementsKind();
+ if (IsFastDoubleElementsKind(elements_kind)) {
+ if (mode == DONT_INITIALIZE_ARRAY_ELEMENTS) {
+ elms = NewFixedDoubleArray(capacity);
+ } else {
+ ASSERT(mode == INITIALIZE_ARRAY_ELEMENTS_WITH_HOLE);
+ elms = NewFixedDoubleArrayWithHoles(capacity);
+ }
+ } else {
+ ASSERT(IsFastSmiOrObjectElementsKind(elements_kind));
+ if (mode == DONT_INITIALIZE_ARRAY_ELEMENTS) {
+ elms = NewUninitializedFixedArray(capacity);
+ } else {
+ ASSERT(mode == INITIALIZE_ARRAY_ELEMENTS_WITH_HOLE);
+ elms = NewFixedArrayWithHoles(capacity);
+ }
+ }
-void Factory::SetContent(Handle<JSArray> array,
- Handle<FixedArrayBase> elements) {
- CALL_HEAP_FUNCTION_VOID(
- isolate(),
- array->SetContent(*elements));
+ array->set_elements(*elms);
+ array->set_length(Smi::FromInt(length));
}
@@ -1273,32 +1725,12 @@ static JSFunction* GetTypedArrayFun(ExternalArrayType type,
Isolate* isolate) {
Context* native_context = isolate->context()->native_context();
switch (type) {
- case kExternalUnsignedByteArray:
- return native_context->uint8_array_fun();
-
- case kExternalByteArray:
- return native_context->int8_array_fun();
-
- case kExternalUnsignedShortArray:
- return native_context->uint16_array_fun();
-
- case kExternalShortArray:
- return native_context->int16_array_fun();
+#define TYPED_ARRAY_FUN(Type, type, TYPE, ctype, size) \
+ case kExternal##Type##Array: \
+ return native_context->type##_array_fun();
- case kExternalUnsignedIntArray:
- return native_context->uint32_array_fun();
-
- case kExternalIntArray:
- return native_context->int32_array_fun();
-
- case kExternalFloatArray:
- return native_context->float_array_fun();
-
- case kExternalDoubleArray:
- return native_context->double_array_fun();
-
- case kExternalPixelArray:
- return native_context->uint8c_array_fun();
+ TYPED_ARRAYS(TYPED_ARRAY_FUN)
+#undef TYPED_ARRAY_FUN
default:
UNREACHABLE();
@@ -1319,26 +1751,152 @@ Handle<JSTypedArray> Factory::NewJSTypedArray(ExternalArrayType type) {
Handle<JSProxy> Factory::NewJSProxy(Handle<Object> handler,
Handle<Object> prototype) {
- CALL_HEAP_FUNCTION(
- isolate(),
- isolate()->heap()->AllocateJSProxy(*handler, *prototype),
- JSProxy);
+ // Allocate map.
+ // TODO(rossberg): Once we optimize proxies, think about a scheme to share
+ // maps. Will probably depend on the identity of the handler object, too.
+ Handle<Map> map = NewMap(JS_PROXY_TYPE, JSProxy::kSize);
+ map->set_prototype(*prototype);
+
+ // Allocate the proxy object.
+ Handle<JSProxy> result = New<JSProxy>(map, NEW_SPACE);
+ result->InitializeBody(map->instance_size(), Smi::FromInt(0));
+ result->set_handler(*handler);
+ result->set_hash(*undefined_value(), SKIP_WRITE_BARRIER);
+ return result;
+}
+
+
+Handle<JSProxy> Factory::NewJSFunctionProxy(Handle<Object> handler,
+ Handle<Object> call_trap,
+ Handle<Object> construct_trap,
+ Handle<Object> prototype) {
+ // Allocate map.
+ // TODO(rossberg): Once we optimize proxies, think about a scheme to share
+ // maps. Will probably depend on the identity of the handler object, too.
+ Handle<Map> map = NewMap(JS_FUNCTION_PROXY_TYPE, JSFunctionProxy::kSize);
+ map->set_prototype(*prototype);
+
+ // Allocate the proxy object.
+ Handle<JSFunctionProxy> result = New<JSFunctionProxy>(map, NEW_SPACE);
+ result->InitializeBody(map->instance_size(), Smi::FromInt(0));
+ result->set_handler(*handler);
+ result->set_hash(*undefined_value(), SKIP_WRITE_BARRIER);
+ result->set_call_trap(*call_trap);
+ result->set_construct_trap(*construct_trap);
+ return result;
+}
+
+
+void Factory::ReinitializeJSReceiver(Handle<JSReceiver> object,
+ InstanceType type,
+ int size) {
+ ASSERT(type >= FIRST_JS_OBJECT_TYPE);
+
+ // Allocate fresh map.
+ // TODO(rossberg): Once we optimize proxies, cache these maps.
+ Handle<Map> map = NewMap(type, size);
+
+ // Check that the receiver has at least the size of the fresh object.
+ int size_difference = object->map()->instance_size() - map->instance_size();
+ ASSERT(size_difference >= 0);
+
+ map->set_prototype(object->map()->prototype());
+
+ // Allocate the backing storage for the properties.
+ int prop_size = map->InitialPropertiesLength();
+ Handle<FixedArray> properties = NewFixedArray(prop_size, TENURED);
+
+ Heap* heap = isolate()->heap();
+ MaybeHandle<SharedFunctionInfo> shared;
+ if (type == JS_FUNCTION_TYPE) {
+ OneByteStringKey key(STATIC_ASCII_VECTOR("<freezing call trap>"),
+ heap->HashSeed());
+ Handle<String> name = InternalizeStringWithKey(&key);
+ shared = NewSharedFunctionInfo(name, MaybeHandle<Code>());
+ }
+
+ // In order to keep heap in consistent state there must be no allocations
+ // before object re-initialization is finished and filler object is installed.
+ DisallowHeapAllocation no_allocation;
+
+ // Reset the map for the object.
+ object->set_map(*map);
+ Handle<JSObject> jsobj = Handle<JSObject>::cast(object);
+
+ // Reinitialize the object from the constructor map.
+ heap->InitializeJSObjectFromMap(*jsobj, *properties, *map);
+
+ // Functions require some minimal initialization.
+ if (type == JS_FUNCTION_TYPE) {
+ map->set_function_with_prototype(true);
+ Handle<JSFunction> js_function = Handle<JSFunction>::cast(object);
+ Handle<Context> context(isolate()->context()->native_context());
+ InitializeFunction(js_function, shared.ToHandleChecked(), context);
+ }
+
+ // Put in filler if the new object is smaller than the old.
+ if (size_difference > 0) {
+ heap->CreateFillerObjectAt(
+ object->address() + map->instance_size(), size_difference);
+ }
+}
+
+
+void Factory::ReinitializeJSGlobalProxy(Handle<JSGlobalProxy> object,
+ Handle<JSFunction> constructor) {
+ ASSERT(constructor->has_initial_map());
+ Handle<Map> map(constructor->initial_map(), isolate());
+
+ // The proxy's hash should be retained across reinitialization.
+ Handle<Object> hash(object->hash(), isolate());
+
+ // Check that the already allocated object has the same size and type as
+ // objects allocated using the constructor.
+ ASSERT(map->instance_size() == object->map()->instance_size());
+ ASSERT(map->instance_type() == object->map()->instance_type());
+
+ // Allocate the backing storage for the properties.
+ int prop_size = map->InitialPropertiesLength();
+ Handle<FixedArray> properties = NewFixedArray(prop_size, TENURED);
+
+ // In order to keep heap in consistent state there must be no allocations
+ // before object re-initialization is finished.
+ DisallowHeapAllocation no_allocation;
+
+ // Reset the map for the object.
+ object->set_map(constructor->initial_map());
+
+ Heap* heap = isolate()->heap();
+ // Reinitialize the object from the constructor map.
+ heap->InitializeJSObjectFromMap(*object, *properties, *map);
+
+ // Restore the saved hash.
+ object->set_hash(*hash);
}
void Factory::BecomeJSObject(Handle<JSReceiver> object) {
- CALL_HEAP_FUNCTION_VOID(
- isolate(),
- isolate()->heap()->ReinitializeJSReceiver(
- *object, JS_OBJECT_TYPE, JSObject::kHeaderSize));
+ ReinitializeJSReceiver(object, JS_OBJECT_TYPE, JSObject::kHeaderSize);
}
void Factory::BecomeJSFunction(Handle<JSReceiver> object) {
- CALL_HEAP_FUNCTION_VOID(
+ ReinitializeJSReceiver(object, JS_FUNCTION_TYPE, JSFunction::kSize);
+}
+
+
+Handle<FixedArray> Factory::NewTypeFeedbackVector(int slot_count) {
+ // Ensure we can skip the write barrier
+ ASSERT_EQ(isolate()->heap()->uninitialized_symbol(),
+ *TypeFeedbackInfo::UninitializedSentinel(isolate()));
+
+ CALL_HEAP_FUNCTION(
isolate(),
- isolate()->heap()->ReinitializeJSReceiver(
- *object, JS_FUNCTION_TYPE, JSFunction::kSize));
+ isolate()->heap()->AllocateFixedArrayWithFiller(
+ slot_count,
+ TENURED,
+ *TypeFeedbackInfo::UninitializedSentinel(isolate())),
+ FixedArray);
}
@@ -1347,10 +1905,11 @@ Handle<SharedFunctionInfo> Factory::NewSharedFunctionInfo(
int number_of_literals,
bool is_generator,
Handle<Code> code,
- Handle<ScopeInfo> scope_info) {
- Handle<SharedFunctionInfo> shared = NewSharedFunctionInfo(name);
- shared->set_code(*code);
+ Handle<ScopeInfo> scope_info,
+ Handle<FixedArray> feedback_vector) {
+ Handle<SharedFunctionInfo> shared = NewSharedFunctionInfo(name, code);
shared->set_scope_info(*scope_info);
+ shared->set_feedback_vector(*feedback_vector);
int literals_array_size = number_of_literals;
// If the function contains object, regexp or array literals,
// allocate extra space for a literals array prefix containing the
@@ -1373,117 +1932,142 @@ Handle<JSMessageObject> Factory::NewJSMessageObject(
int start_position,
int end_position,
Handle<Object> script,
- Handle<Object> stack_trace,
Handle<Object> stack_frames) {
- CALL_HEAP_FUNCTION(isolate(),
- isolate()->heap()->AllocateJSMessageObject(*type,
- *arguments,
- start_position,
- end_position,
- *script,
- *stack_trace,
- *stack_frames),
- JSMessageObject);
-}
-
-
-Handle<SharedFunctionInfo> Factory::NewSharedFunctionInfo(Handle<String> name) {
- CALL_HEAP_FUNCTION(isolate(),
- isolate()->heap()->AllocateSharedFunctionInfo(*name),
- SharedFunctionInfo);
-}
-
-
-Handle<String> Factory::NumberToString(Handle<Object> number) {
- CALL_HEAP_FUNCTION(isolate(),
- isolate()->heap()->NumberToString(*number), String);
-}
-
-
-Handle<String> Factory::Uint32ToString(uint32_t value) {
- CALL_HEAP_FUNCTION(isolate(),
- isolate()->heap()->Uint32ToString(value), String);
-}
-
-
-Handle<SeededNumberDictionary> Factory::DictionaryAtNumberPut(
- Handle<SeededNumberDictionary> dictionary,
- uint32_t key,
- Handle<Object> value) {
- CALL_HEAP_FUNCTION(isolate(),
- dictionary->AtNumberPut(key, *value),
- SeededNumberDictionary);
-}
-
-
-Handle<UnseededNumberDictionary> Factory::DictionaryAtNumberPut(
- Handle<UnseededNumberDictionary> dictionary,
- uint32_t key,
- Handle<Object> value) {
- CALL_HEAP_FUNCTION(isolate(),
- dictionary->AtNumberPut(key, *value),
- UnseededNumberDictionary);
-}
-
-
-Handle<JSFunction> Factory::NewFunctionHelper(Handle<String> name,
- Handle<Object> prototype) {
- Handle<SharedFunctionInfo> function_share = NewSharedFunctionInfo(name);
- CALL_HEAP_FUNCTION(
- isolate(),
- isolate()->heap()->AllocateFunction(*isolate()->function_map(),
- *function_share,
- *prototype),
- JSFunction);
-}
-
-
-Handle<JSFunction> Factory::NewFunction(Handle<String> name,
- Handle<Object> prototype) {
- Handle<JSFunction> fun = NewFunctionHelper(name, prototype);
- fun->set_context(isolate()->context()->native_context());
- return fun;
+ Handle<Map> map = message_object_map();
+ Handle<JSMessageObject> message = New<JSMessageObject>(map, NEW_SPACE);
+ message->set_properties(*empty_fixed_array(), SKIP_WRITE_BARRIER);
+ message->initialize_elements();
+ message->set_elements(*empty_fixed_array(), SKIP_WRITE_BARRIER);
+ message->set_type(*type);
+ message->set_arguments(*arguments);
+ message->set_start_position(start_position);
+ message->set_end_position(end_position);
+ message->set_script(*script);
+ message->set_stack_frames(*stack_frames);
+ return message;
}
-Handle<JSFunction> Factory::NewFunctionWithoutPrototypeHelper(
+Handle<SharedFunctionInfo> Factory::NewSharedFunctionInfo(
Handle<String> name,
- LanguageMode language_mode) {
- Handle<SharedFunctionInfo> function_share = NewSharedFunctionInfo(name);
- Handle<Map> map = (language_mode == CLASSIC_MODE)
- ? isolate()->function_without_prototype_map()
- : isolate()->strict_mode_function_without_prototype_map();
- CALL_HEAP_FUNCTION(isolate(),
- isolate()->heap()->AllocateFunction(
- *map,
- *function_share,
- *the_hole_value()),
- JSFunction);
+ MaybeHandle<Code> maybe_code) {
+ Handle<Map> map = shared_function_info_map();
+ Handle<SharedFunctionInfo> share = New<SharedFunctionInfo>(map,
+ OLD_POINTER_SPACE);
+
+ // Set pointer fields.
+ share->set_name(*name);
+ Handle<Code> code;
+ if (!maybe_code.ToHandle(&code)) {
+ code = handle(isolate()->builtins()->builtin(Builtins::kIllegal));
+ }
+ share->set_code(*code);
+ share->set_optimized_code_map(Smi::FromInt(0));
+ share->set_scope_info(ScopeInfo::Empty(isolate()));
+ Code* construct_stub =
+ isolate()->builtins()->builtin(Builtins::kJSConstructStubGeneric);
+ share->set_construct_stub(construct_stub);
+ share->set_instance_class_name(*Object_string());
+ share->set_function_data(*undefined_value(), SKIP_WRITE_BARRIER);
+ share->set_script(*undefined_value(), SKIP_WRITE_BARRIER);
+ share->set_debug_info(*undefined_value(), SKIP_WRITE_BARRIER);
+ share->set_inferred_name(*empty_string(), SKIP_WRITE_BARRIER);
+ share->set_feedback_vector(*empty_fixed_array(), SKIP_WRITE_BARRIER);
+ share->set_profiler_ticks(0);
+ share->set_ast_node_count(0);
+ share->set_counters(0);
+
+ // Set integer fields (smi or int, depending on the architecture).
+ share->set_length(0);
+ share->set_formal_parameter_count(0);
+ share->set_expected_nof_properties(0);
+ share->set_num_literals(0);
+ share->set_start_position_and_type(0);
+ share->set_end_position(0);
+ share->set_function_token_position(0);
+ // All compiler hints default to false or 0.
+ share->set_compiler_hints(0);
+ share->set_opt_count_and_bailout_reason(0);
+
+ return share;
+}
+
+
+static inline int NumberCacheHash(Handle<FixedArray> cache,
+ Handle<Object> number) {
+ int mask = (cache->length() >> 1) - 1;
+ if (number->IsSmi()) {
+ return Handle<Smi>::cast(number)->value() & mask;
+ } else {
+ DoubleRepresentation rep(number->Number());
+ return
+ (static_cast<int>(rep.bits) ^ static_cast<int>(rep.bits >> 32)) & mask;
+ }
}
-Handle<JSFunction> Factory::NewFunctionWithoutPrototype(
- Handle<String> name,
- LanguageMode language_mode) {
- Handle<JSFunction> fun =
- NewFunctionWithoutPrototypeHelper(name, language_mode);
- fun->set_context(isolate()->context()->native_context());
- return fun;
+Handle<Object> Factory::GetNumberStringCache(Handle<Object> number) {
+ DisallowHeapAllocation no_gc;
+ int hash = NumberCacheHash(number_string_cache(), number);
+ Object* key = number_string_cache()->get(hash * 2);
+ if (key == *number || (key->IsHeapNumber() && number->IsHeapNumber() &&
+ key->Number() == number->Number())) {
+ return Handle<String>(
+ String::cast(number_string_cache()->get(hash * 2 + 1)), isolate());
+ }
+ return undefined_value();
+}
+
+
+void Factory::SetNumberStringCache(Handle<Object> number,
+ Handle<String> string) {
+ int hash = NumberCacheHash(number_string_cache(), number);
+ if (number_string_cache()->get(hash * 2) != *undefined_value()) {
+ int full_size = isolate()->heap()->FullSizeNumberStringCacheLength();
+ if (number_string_cache()->length() != full_size) {
+ // The first time we have a hash collision, we move to the full sized
+ // number string cache. The idea is to have a small number string
+ // cache in the snapshot to keep boot-time memory usage down.
+ // If we expand the number string cache already while creating
+ // the snapshot then that didn't work out.
+ ASSERT(!isolate()->serializer_enabled() || FLAG_extra_code != NULL);
+ Handle<FixedArray> new_cache = NewFixedArray(full_size, TENURED);
+ isolate()->heap()->set_number_string_cache(*new_cache);
+ return;
+ }
+ }
+ number_string_cache()->set(hash * 2, *number);
+ number_string_cache()->set(hash * 2 + 1, *string);
}
-Handle<Object> Factory::ToObject(Handle<Object> object) {
- CALL_HEAP_FUNCTION(isolate(), object->ToObject(isolate()), Object);
-}
+Handle<String> Factory::NumberToString(Handle<Object> number,
+ bool check_number_string_cache) {
+ isolate()->counters()->number_to_string_runtime()->Increment();
+ if (check_number_string_cache) {
+ Handle<Object> cached = GetNumberStringCache(number);
+ if (!cached->IsUndefined()) return Handle<String>::cast(cached);
+ }
+ char arr[100];
+ Vector<char> buffer(arr, ARRAY_SIZE(arr));
+ const char* str;
+ if (number->IsSmi()) {
+ int num = Handle<Smi>::cast(number)->value();
+ str = IntToCString(num, buffer);
+ } else {
+ double num = Handle<HeapNumber>::cast(number)->value();
+ str = DoubleToCString(num, buffer);
+ }
-Handle<Object> Factory::ToObject(Handle<Object> object,
- Handle<Context> native_context) {
- CALL_HEAP_FUNCTION(isolate(), object->ToObject(*native_context), Object);
+ // We tenure the allocated string since it is referenced from the
+ // number-string cache which lives in the old space.
+ Handle<String> js_string = NewStringFromAsciiChecked(str, TENURED);
+ SetNumberStringCache(number, js_string);
+ return js_string;
}
-#ifdef ENABLE_DEBUGGER_SUPPORT
Handle<DebugInfo> Factory::NewDebugInfo(Handle<SharedFunctionInfo> shared) {
// Get the original code of the function.
Handle<Code> code(shared->code());
@@ -1496,7 +2080,7 @@ Handle<DebugInfo> Factory::NewDebugInfo(Handle<SharedFunctionInfo> shared) {
// debug info object to avoid allocation while setting up the debug info
// object.
Handle<FixedArray> break_points(
- NewFixedArray(Debug::kEstimatedNofBreakPointsInFunction));
+ NewFixedArray(DebugInfo::kEstimatedNofBreakPointsInFunction));
// Create and set up the debug info object. Debug info contains function, a
// copy of the original code, the executing code and initial fixed array for
@@ -1513,7 +2097,6 @@ Handle<DebugInfo> Factory::NewDebugInfo(Handle<SharedFunctionInfo> shared) {
return debug_info;
}
-#endif
Handle<JSObject> Factory::NewArgumentsObject(Handle<Object> callee,
@@ -1525,60 +2108,79 @@ Handle<JSObject> Factory::NewArgumentsObject(Handle<Object> callee,
Handle<JSFunction> Factory::CreateApiFunction(
- Handle<FunctionTemplateInfo> obj, ApiInstanceType instance_type) {
+ Handle<FunctionTemplateInfo> obj,
+ Handle<Object> prototype,
+ ApiInstanceType instance_type) {
Handle<Code> code = isolate()->builtins()->HandleApiCall();
Handle<Code> construct_stub = isolate()->builtins()->JSConstructStubApi();
- int internal_field_count = 0;
- if (!obj->instance_template()->IsUndefined()) {
- Handle<ObjectTemplateInfo> instance_template =
- Handle<ObjectTemplateInfo>(
- ObjectTemplateInfo::cast(obj->instance_template()));
- internal_field_count =
- Smi::cast(instance_template->internal_field_count())->value();
- }
-
- // TODO(svenpanne) Kill ApiInstanceType and refactor things by generalizing
- // JSObject::GetHeaderSize.
- int instance_size = kPointerSize * internal_field_count;
- InstanceType type;
- switch (instance_type) {
- case JavaScriptObject:
- type = JS_OBJECT_TYPE;
- instance_size += JSObject::kHeaderSize;
- break;
- case InnerGlobalObject:
- type = JS_GLOBAL_OBJECT_TYPE;
- instance_size += JSGlobalObject::kSize;
- break;
- case OuterGlobalObject:
- type = JS_GLOBAL_PROXY_TYPE;
- instance_size += JSGlobalProxy::kSize;
- break;
- default:
- UNREACHABLE();
- type = JS_OBJECT_TYPE; // Keep the compiler happy.
- break;
- }
+ Handle<JSFunction> result;
+ if (obj->remove_prototype()) {
+ result = NewFunctionWithoutPrototype(empty_string(), code);
+ } else {
+ int internal_field_count = 0;
+ if (!obj->instance_template()->IsUndefined()) {
+ Handle<ObjectTemplateInfo> instance_template =
+ Handle<ObjectTemplateInfo>(
+ ObjectTemplateInfo::cast(obj->instance_template()));
+ internal_field_count =
+ Smi::cast(instance_template->internal_field_count())->value();
+ }
+
+ // TODO(svenpanne) Kill ApiInstanceType and refactor things by generalizing
+ // JSObject::GetHeaderSize.
+ int instance_size = kPointerSize * internal_field_count;
+ InstanceType type;
+ switch (instance_type) {
+ case JavaScriptObject:
+ type = JS_OBJECT_TYPE;
+ instance_size += JSObject::kHeaderSize;
+ break;
+ case InnerGlobalObject:
+ type = JS_GLOBAL_OBJECT_TYPE;
+ instance_size += JSGlobalObject::kSize;
+ break;
+ case OuterGlobalObject:
+ type = JS_GLOBAL_PROXY_TYPE;
+ instance_size += JSGlobalProxy::kSize;
+ break;
+ default:
+ UNREACHABLE();
+ type = JS_OBJECT_TYPE; // Keep the compiler happy.
+ break;
+ }
- Handle<JSFunction> result =
- NewFunction(Factory::empty_string(),
- type,
- instance_size,
- code,
- true);
+ result = NewFunction(empty_string(), code, prototype, type,
+ instance_size, obj->read_only_prototype());
+ }
- // Set length.
result->shared()->set_length(obj->length());
-
- // Set class name.
- Handle<Object> class_name = Handle<Object>(obj->class_name(), isolate());
+ Handle<Object> class_name(obj->class_name(), isolate());
if (class_name->IsString()) {
result->shared()->set_instance_class_name(*class_name);
result->shared()->set_name(*class_name);
}
+ result->shared()->set_function_data(*obj);
+ result->shared()->set_construct_stub(*construct_stub);
+ result->shared()->DontAdaptArguments();
+
+ if (obj->remove_prototype()) {
+ ASSERT(result->shared()->IsApiFunction());
+ ASSERT(!result->has_initial_map());
+ ASSERT(!result->has_prototype());
+ return result;
+ }
- Handle<Map> map = Handle<Map>(result->initial_map());
+ JSObject::SetOwnPropertyIgnoreAttributes(
+ handle(JSObject::cast(result->prototype())),
+ constructor_string(),
+ result,
+ DONT_ENUM).Assert();
+
+ // Down from here is only valid for API functions that can be used as a
+ // constructor (don't set the "remove prototype" flag).
+
+ Handle<Map> map(result->initial_map());
// Mark as undetectable if needed.
if (obj->undetectable()) {
@@ -1608,10 +2210,6 @@ Handle<JSFunction> Factory::CreateApiFunction(
map->set_has_instance_call_handler();
}
- result->shared()->set_function_data(*obj);
- result->shared()->set_construct_stub(*construct_stub);
- result->shared()->DontAdaptArguments();
-
// Recursively copy parent instance templates' accessors,
// 'data' may be modified.
int max_number_of_additional_properties = 0;
@@ -1678,7 +2276,7 @@ Handle<JSFunction> Factory::CreateApiFunction(
// Install accumulated static accessors
for (int i = 0; i < valid_descriptors; i++) {
Handle<AccessorInfo> accessor(AccessorInfo::cast(array->get(i)));
- JSObject::SetAccessor(result, accessor);
+ JSObject::SetAccessor(result, accessor).Assert();
}
ASSERT(result->shared()->IsApiFunction());
@@ -1686,32 +2284,13 @@ Handle<JSFunction> Factory::CreateApiFunction(
}
-Handle<MapCache> Factory::NewMapCache(int at_least_space_for) {
- CALL_HEAP_FUNCTION(isolate(),
- MapCache::Allocate(isolate()->heap(),
- at_least_space_for),
- MapCache);
-}
-
-
-MUST_USE_RESULT static MaybeObject* UpdateMapCacheWith(Context* context,
- FixedArray* keys,
- Map* map) {
- Object* result;
- { MaybeObject* maybe_result =
- MapCache::cast(context->map_cache())->Put(keys, map);
- if (!maybe_result->ToObject(&result)) return maybe_result;
- }
- context->set_map_cache(MapCache::cast(result));
- return result;
-}
-
-
Handle<MapCache> Factory::AddToMapCache(Handle<Context> context,
Handle<FixedArray> keys,
Handle<Map> map) {
- CALL_HEAP_FUNCTION(isolate(),
- UpdateMapCacheWith(*context, *keys, *map), MapCache);
+ Handle<MapCache> map_cache = handle(MapCache::cast(context->map_cache()));
+ Handle<MapCache> result = MapCache::Put(map_cache, keys, map);
+ context->set_map_cache(*result);
+ return result;
}
@@ -1719,7 +2298,7 @@ Handle<Map> Factory::ObjectLiteralMapFromCache(Handle<Context> context,
Handle<FixedArray> keys) {
if (context->map_cache()->IsUndefined()) {
// Allocate the new map cache for the native context.
- Handle<MapCache> new_cache = NewMapCache(24);
+ Handle<MapCache> new_cache = MapCache::New(isolate(), 24);
context->set_map_cache(*new_cache);
}
// Check to see whether there is a matching element in the cache.
@@ -1728,11 +2307,10 @@ Handle<Map> Factory::ObjectLiteralMapFromCache(Handle<Context> context,
Handle<Object> result = Handle<Object>(cache->Lookup(*keys), isolate());
if (result->IsMap()) return Handle<Map>::cast(result);
// Create a new map and add it to the cache.
- Handle<Map> map =
- CopyMap(Handle<Map>(context->object_function()->initial_map()),
- keys->length());
+ Handle<Map> map = Map::Create(
+ handle(context->object_function()), keys->length());
AddToMapCache(context, keys, map);
- return Handle<Map>(map);
+ return map;
}
@@ -1772,28 +2350,25 @@ void Factory::SetRegExpIrregexpData(Handle<JSRegExp> regexp,
-void Factory::ConfigureInstance(Handle<FunctionTemplateInfo> desc,
- Handle<JSObject> instance,
- bool* pending_exception) {
+MaybeHandle<FunctionTemplateInfo> Factory::ConfigureInstance(
+ Handle<FunctionTemplateInfo> desc, Handle<JSObject> instance) {
// Configure the instance by adding the properties specified by the
// instance template.
Handle<Object> instance_template(desc->instance_template(), isolate());
if (!instance_template->IsUndefined()) {
- Execution::ConfigureInstance(isolate(),
- instance,
- instance_template,
- pending_exception);
- } else {
- *pending_exception = false;
+ RETURN_ON_EXCEPTION(
+ isolate(),
+ Execution::ConfigureInstance(isolate(), instance, instance_template),
+ FunctionTemplateInfo);
}
+ return desc;
}
Handle<Object> Factory::GlobalConstantFor(Handle<String> name) {
- Heap* h = isolate()->heap();
- if (name->Equals(h->undefined_string())) return undefined_value();
- if (name->Equals(h->nan_string())) return nan_value();
- if (name->Equals(h->infinity_string())) return infinity_value();
+ if (String::Equals(name, undefined_string())) return undefined_value();
+ if (String::Equals(name, nan_string())) return nan_value();
+ if (String::Equals(name, infinity_string())) return infinity_value();
return Handle<Object>::null();
}
diff --git a/chromium/v8/src/factory.h b/chromium/v8/src/factory.h
index 92086d4b304..e22ea8d323d 100644
--- a/chromium/v8/src/factory.h
+++ b/chromium/v8/src/factory.h
@@ -1,50 +1,25 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
#ifndef V8_FACTORY_H_
#define V8_FACTORY_H_
-#include "globals.h"
-#include "handles.h"
-#include "heap.h"
+#include "src/isolate.h"
namespace v8 {
namespace internal {
// Interface for handle based allocation.
-class Factory {
+class Factory V8_FINAL {
public:
- // Allocate a new boxed value.
- Handle<Box> NewBox(
- Handle<Object> value,
- PretenureFlag pretenure = NOT_TENURED);
+ Handle<Oddball> NewOddball(Handle<Map> map,
+ const char* to_string,
+ Handle<Object> to_number,
+ byte kind);
- // Allocate a new uninitialized fixed array.
+ // Allocates a fixed array initialized with undefined values.
Handle<FixedArray> NewFixedArray(
int size,
PretenureFlag pretenure = NOT_TENURED);
@@ -54,56 +29,56 @@ class Factory {
int size,
PretenureFlag pretenure = NOT_TENURED);
+ // Allocates an uninitialized fixed array. It must be filled by the caller.
+ Handle<FixedArray> NewUninitializedFixedArray(int size);
+
// Allocate a new uninitialized fixed double array.
- Handle<FixedDoubleArray> NewFixedDoubleArray(
+ // The function returns a pre-allocated empty fixed array for capacity = 0,
+ // so the return type must be the general fixed array class.
+ Handle<FixedArrayBase> NewFixedDoubleArray(
int size,
PretenureFlag pretenure = NOT_TENURED);
- Handle<ConstantPoolArray> NewConstantPoolArray(
- int number_of_int64_entries,
- int number_of_ptr_entries,
- int number_of_int32_entries);
-
- Handle<SeededNumberDictionary> NewSeededNumberDictionary(
- int at_least_space_for);
-
- Handle<UnseededNumberDictionary> NewUnseededNumberDictionary(
- int at_least_space_for);
+ // Allocate a new fixed double array with hole values.
+ Handle<FixedArrayBase> NewFixedDoubleArrayWithHoles(
+ int size,
+ PretenureFlag pretenure = NOT_TENURED);
- Handle<NameDictionary> NewNameDictionary(int at_least_space_for);
+ Handle<ConstantPoolArray> NewConstantPoolArray(
+ const ConstantPoolArray::NumberOfEntries& small);
- Handle<ObjectHashSet> NewObjectHashSet(int at_least_space_for);
+ Handle<ConstantPoolArray> NewExtendedConstantPoolArray(
+ const ConstantPoolArray::NumberOfEntries& small,
+ const ConstantPoolArray::NumberOfEntries& extended);
- Handle<ObjectHashTable> NewObjectHashTable(
- int at_least_space_for,
- MinimumCapacity capacity_option = USE_DEFAULT_MINIMUM_CAPACITY);
+ Handle<OrderedHashSet> NewOrderedHashSet();
+ Handle<OrderedHashMap> NewOrderedHashMap();
- Handle<WeakHashTable> NewWeakHashTable(int at_least_space_for);
+ // Create a new boxed value.
+ Handle<Box> NewBox(Handle<Object> value);
- Handle<DescriptorArray> NewDescriptorArray(int number_of_descriptors,
- int slack = 0);
- Handle<DeoptimizationInputData> NewDeoptimizationInputData(
- int deopt_entry_count,
- PretenureFlag pretenure);
- Handle<DeoptimizationOutputData> NewDeoptimizationOutputData(
- int deopt_entry_count,
- PretenureFlag pretenure);
- // Allocates a pre-tenured empty AccessorPair.
+ // Create a pre-tenured empty AccessorPair.
Handle<AccessorPair> NewAccessorPair();
+ // Create an empty TypeFeedbackInfo.
Handle<TypeFeedbackInfo> NewTypeFeedbackInfo();
+ // Finds the internalized copy for string in the string table.
+ // If not found, a new string is added to the table and returned.
Handle<String> InternalizeUtf8String(Vector<const char> str);
Handle<String> InternalizeUtf8String(const char* str) {
return InternalizeUtf8String(CStrVector(str));
}
Handle<String> InternalizeString(Handle<String> str);
Handle<String> InternalizeOneByteString(Vector<const uint8_t> str);
- Handle<String> InternalizeOneByteString(Handle<SeqOneByteString>,
- int from,
- int length);
+ Handle<String> InternalizeOneByteString(
+ Handle<SeqOneByteString>, int from, int length);
+
Handle<String> InternalizeTwoByteString(Vector<const uc16> str);
+ template<class StringTableKey>
+ Handle<String> InternalizeStringWithKey(StringTableKey* key);
+
// String creation functions. Most of the string creation functions take
// a Heap::PretenureFlag argument to optionally request that they be
@@ -127,11 +102,45 @@ class Factory {
// two byte.
//
// ASCII strings are pretenured when used as keys in the SourceCodeCache.
- Handle<String> NewStringFromOneByte(
+ MUST_USE_RESULT MaybeHandle<String> NewStringFromOneByte(
Vector<const uint8_t> str,
PretenureFlag pretenure = NOT_TENURED);
+
+ template<size_t N>
+ inline Handle<String> NewStringFromStaticAscii(
+ const char (&str)[N],
+ PretenureFlag pretenure = NOT_TENURED) {
+ ASSERT(N == StrLength(str) + 1);
+ return NewStringFromOneByte(
+ STATIC_ASCII_VECTOR(str), pretenure).ToHandleChecked();
+ }
+
+ inline Handle<String> NewStringFromAsciiChecked(
+ const char* str,
+ PretenureFlag pretenure = NOT_TENURED) {
+ return NewStringFromOneByte(
+ OneByteVector(str), pretenure).ToHandleChecked();
+ }
+
+
+ // Allocates and fully initializes a String. There are two String
+ // encodings: ASCII and two byte. One should choose between the three string
+ // allocation functions based on the encoding of the string buffer used to
+ // initialized the string.
+ // - ...FromAscii initializes the string from a buffer that is ASCII
+ // encoded (it does not check that the buffer is ASCII encoded) and the
+ // result will be ASCII encoded.
+ // - ...FromUTF8 initializes the string from a buffer that is UTF-8
+ // encoded. If the characters are all single-byte characters, the
+ // result will be ASCII encoded, otherwise it will converted to two
+ // byte.
+ // - ...FromTwoByte initializes the string from a buffer that is two-byte
+ // encoded. If the characters are all single-byte characters, the
+ // result will be converted to ASCII, otherwise it will be left as
+ // two-byte.
+
// TODO(dcarney): remove this function.
- inline Handle<String> NewStringFromAscii(
+ MUST_USE_RESULT inline MaybeHandle<String> NewStringFromAscii(
Vector<const char> str,
PretenureFlag pretenure = NOT_TENURED) {
return NewStringFromOneByte(Vector<const uint8_t>::cast(str), pretenure);
@@ -139,49 +148,78 @@ class Factory {
// UTF8 strings are pretenured when used for regexp literal patterns and
// flags in the parser.
- Handle<String> NewStringFromUtf8(
+ MUST_USE_RESULT MaybeHandle<String> NewStringFromUtf8(
Vector<const char> str,
PretenureFlag pretenure = NOT_TENURED);
- Handle<String> NewStringFromTwoByte(
+ MUST_USE_RESULT MaybeHandle<String> NewStringFromTwoByte(
Vector<const uc16> str,
PretenureFlag pretenure = NOT_TENURED);
+ // Allocates an internalized string in old space based on the character
+ // stream.
+ MUST_USE_RESULT Handle<String> NewInternalizedStringFromUtf8(
+ Vector<const char> str,
+ int chars,
+ uint32_t hash_field);
+
+ MUST_USE_RESULT Handle<String> NewOneByteInternalizedString(
+ Vector<const uint8_t> str,
+ uint32_t hash_field);
+
+ MUST_USE_RESULT Handle<String> NewTwoByteInternalizedString(
+ Vector<const uc16> str,
+ uint32_t hash_field);
+
+ MUST_USE_RESULT Handle<String> NewInternalizedStringImpl(
+ Handle<String> string, int chars, uint32_t hash_field);
+
+ // Compute the matching internalized string map for a string if possible.
+ // Empty handle is returned if string is in new space or not flattened.
+ MUST_USE_RESULT MaybeHandle<Map> InternalizedStringMapForString(
+ Handle<String> string);
+
// Allocates and partially initializes an ASCII or TwoByte String. The
// characters of the string are uninitialized. Currently used in regexp code
// only, where they are pretenured.
- Handle<SeqOneByteString> NewRawOneByteString(
+ MUST_USE_RESULT MaybeHandle<SeqOneByteString> NewRawOneByteString(
int length,
PretenureFlag pretenure = NOT_TENURED);
- Handle<SeqTwoByteString> NewRawTwoByteString(
+ MUST_USE_RESULT MaybeHandle<SeqTwoByteString> NewRawTwoByteString(
int length,
PretenureFlag pretenure = NOT_TENURED);
+ // Creates a single character string where the character has given code.
+ // A cache is used for ASCII codes.
+ Handle<String> LookupSingleCharacterStringFromCode(uint32_t code);
+
// Create a new cons string object which consists of a pair of strings.
- Handle<String> NewConsString(Handle<String> first,
- Handle<String> second);
+ MUST_USE_RESULT MaybeHandle<String> NewConsString(Handle<String> left,
+ Handle<String> right);
// Create a new sequential string containing the concatenation of the inputs.
Handle<String> NewFlatConcatString(Handle<String> first,
Handle<String> second);
- // Create a new string object which holds a substring of a string.
- Handle<String> NewSubString(Handle<String> str,
- int begin,
- int end);
-
// Create a new string object which holds a proper substring of a string.
Handle<String> NewProperSubString(Handle<String> str,
int begin,
int end);
+ // Create a new string object which holds a substring of a string.
+ Handle<String> NewSubString(Handle<String> str, int begin, int end) {
+ if (begin == 0 && end == str->length()) return str;
+ return NewProperSubString(str, begin, end);
+ }
+
// Creates a new external String object. There are two String encodings
// in the system: ASCII and two byte. Unlike other String types, it does
// not make sense to have a UTF-8 factory function for external strings,
- // because we cannot change the underlying buffer.
- Handle<String> NewExternalStringFromAscii(
+ // because we cannot change the underlying buffer. Note that these strings
+ // are backed by a string resource that resides outside the V8 heap.
+ MUST_USE_RESULT MaybeHandle<String> NewExternalStringFromAscii(
const ExternalAsciiString::Resource* resource);
- Handle<String> NewExternalStringFromTwoByte(
+ MUST_USE_RESULT MaybeHandle<String> NewExternalStringFromTwoByte(
const ExternalTwoByteString::Resource* resource);
// Create a symbol.
@@ -210,20 +248,19 @@ class Factory {
// Create a 'with' context.
Handle<Context> NewWithContext(Handle<JSFunction> function,
Handle<Context> previous,
- Handle<JSObject> extension);
+ Handle<JSReceiver> extension);
// Create a block context.
Handle<Context> NewBlockContext(Handle<JSFunction> function,
Handle<Context> previous,
Handle<ScopeInfo> scope_info);
- // Return the internalized version of the passed in string.
- Handle<String> InternalizedStringFromString(Handle<String> value);
-
// Allocate a new struct. The struct is pretenured (allocated directly in
// the old generation).
Handle<Struct> NewStruct(InstanceType type);
+ Handle<CodeCache> NewCodeCache();
+
Handle<AliasedArgumentsEntry> NewAliasedArgumentsEntry(
int aliased_context_slot);
@@ -252,12 +289,18 @@ class Factory {
void* external_pointer,
PretenureFlag pretenure = NOT_TENURED);
+ Handle<FixedTypedArrayBase> NewFixedTypedArray(
+ int length,
+ ExternalArrayType array_type,
+ PretenureFlag pretenure = NOT_TENURED);
+
Handle<Cell> NewCell(Handle<Object> value);
Handle<PropertyCell> NewPropertyCellWithHole();
Handle<PropertyCell> NewPropertyCell(Handle<Object> value);
+ // Allocate a tenured AllocationSite. It's payload is null.
Handle<AllocationSite> NewAllocationSite();
Handle<Map> NewMap(
@@ -265,23 +308,25 @@ class Factory {
int instance_size,
ElementsKind elements_kind = TERMINAL_FAST_ELEMENTS_KIND);
+ Handle<HeapObject> NewFillerObject(int size,
+ bool double_align,
+ AllocationSpace space);
+
Handle<JSObject> NewFunctionPrototype(Handle<JSFunction> function);
- Handle<Map> CopyWithPreallocatedFieldDescriptors(Handle<Map> map);
+ Handle<JSObject> CopyJSObject(Handle<JSObject> object);
- // Copy the map adding more inobject properties if possible without
- // overflowing the instance size.
- Handle<Map> CopyMap(Handle<Map> map, int extra_inobject_props);
- Handle<Map> CopyMap(Handle<Map> map);
+ Handle<JSObject> CopyJSObjectWithAllocationSite(Handle<JSObject> object,
+ Handle<AllocationSite> site);
- Handle<Map> GetElementsTransitionMap(Handle<JSObject> object,
- ElementsKind elements_kind);
+ Handle<FixedArray> CopyFixedArrayWithMap(Handle<FixedArray> array,
+ Handle<Map> map);
Handle<FixedArray> CopyFixedArray(Handle<FixedArray> array);
- Handle<FixedArray> CopySizeFixedArray(Handle<FixedArray> array,
- int new_length,
- PretenureFlag pretenure = NOT_TENURED);
+ // This method expects a COW array in new space, and creates a copy
+ // of it in old space.
+ Handle<FixedArray> CopyAndTenureFixedCOWArray(Handle<FixedArray> array);
Handle<FixedDoubleArray> CopyFixedDoubleArray(
Handle<FixedDoubleArray> array);
@@ -290,6 +335,7 @@ class Factory {
Handle<ConstantPoolArray> array);
// Numbers (e.g. literals) are pretenured by the parser.
+ // The return value may be a smi or a heap number.
Handle<Object> NewNumber(double value,
PretenureFlag pretenure = NOT_TENURED);
@@ -297,15 +343,23 @@ class Factory {
PretenureFlag pretenure = NOT_TENURED);
Handle<Object> NewNumberFromUint(uint32_t value,
PretenureFlag pretenure = NOT_TENURED);
- inline Handle<Object> NewNumberFromSize(size_t value,
- PretenureFlag pretenure = NOT_TENURED);
+ Handle<Object> NewNumberFromSize(size_t value,
+ PretenureFlag pretenure = NOT_TENURED) {
+ if (Smi::IsValid(static_cast<intptr_t>(value))) {
+ return Handle<Object>(Smi::FromIntptr(static_cast<intptr_t>(value)),
+ isolate());
+ }
+ return NewNumber(static_cast<double>(value), pretenure);
+ }
Handle<HeapNumber> NewHeapNumber(double value,
PretenureFlag pretenure = NOT_TENURED);
// These objects are used by the api to create env-independent data
// structures in the heap.
- Handle<JSObject> NewNeanderObject();
+ inline Handle<JSObject> NewNeanderObject() {
+ return NewJSObjectFromMap(neander_map());
+ }
Handle<JSObject> NewArgumentsObject(Handle<Object> callee, int length);
@@ -313,39 +367,72 @@ class Factory {
// runtime.
Handle<JSObject> NewJSObject(Handle<JSFunction> constructor,
PretenureFlag pretenure = NOT_TENURED);
+ // JSObject that should have a memento pointing to the allocation site.
+ Handle<JSObject> NewJSObjectWithMemento(Handle<JSFunction> constructor,
+ Handle<AllocationSite> site);
// Global objects are pretenured and initialized based on a constructor.
Handle<GlobalObject> NewGlobalObject(Handle<JSFunction> constructor);
// JS objects are pretenured when allocated by the bootstrapper and
// runtime.
- Handle<JSObject> NewJSObjectFromMap(Handle<Map> map,
- PretenureFlag pretenure = NOT_TENURED,
- bool allocate_properties = true);
-
- Handle<JSObject> NewJSObjectFromMapForDeoptimizer(
- Handle<Map> map, PretenureFlag pretenure = NOT_TENURED);
+ Handle<JSObject> NewJSObjectFromMap(
+ Handle<Map> map,
+ PretenureFlag pretenure = NOT_TENURED,
+ bool allocate_properties = true,
+ Handle<AllocationSite> allocation_site = Handle<AllocationSite>::null());
// JS modules are pretenured.
Handle<JSModule> NewJSModule(Handle<Context> context,
Handle<ScopeInfo> scope_info);
// JS arrays are pretenured when allocated by the parser.
+
+ // Create a JSArray with no elements.
+ Handle<JSArray> NewJSArray(
+ ElementsKind elements_kind,
+ PretenureFlag pretenure = NOT_TENURED);
+
+ // Create a JSArray with a specified length and elements initialized
+ // according to the specified mode.
Handle<JSArray> NewJSArray(
+ ElementsKind elements_kind,
+ int length,
int capacity,
- ElementsKind elements_kind = TERMINAL_FAST_ELEMENTS_KIND,
+ ArrayStorageAllocationMode mode = INITIALIZE_ARRAY_ELEMENTS_WITH_HOLE,
PretenureFlag pretenure = NOT_TENURED);
+ Handle<JSArray> NewJSArray(
+ int capacity,
+ ElementsKind elements_kind = TERMINAL_FAST_ELEMENTS_KIND,
+ PretenureFlag pretenure = NOT_TENURED) {
+ if (capacity != 0) {
+ elements_kind = GetHoleyElementsKind(elements_kind);
+ }
+ return NewJSArray(elements_kind, 0, capacity,
+ INITIALIZE_ARRAY_ELEMENTS_WITH_HOLE, pretenure);
+ }
+
+ // Create a JSArray with the given elements.
Handle<JSArray> NewJSArrayWithElements(
Handle<FixedArrayBase> elements,
- ElementsKind elements_kind = TERMINAL_FAST_ELEMENTS_KIND,
+ ElementsKind elements_kind,
+ int length,
PretenureFlag pretenure = NOT_TENURED);
- void SetElementsCapacityAndLength(Handle<JSArray> array,
- int capacity,
- int length);
+ Handle<JSArray> NewJSArrayWithElements(
+ Handle<FixedArrayBase> elements,
+ ElementsKind elements_kind = TERMINAL_FAST_ELEMENTS_KIND,
+ PretenureFlag pretenure = NOT_TENURED) {
+ return NewJSArrayWithElements(
+ elements, elements_kind, elements->length(), pretenure);
+ }
- void SetContent(Handle<JSArray> array, Handle<FixedArrayBase> elements);
+ void NewJSArrayStorage(
+ Handle<JSArray> array,
+ int length,
+ int capacity,
+ ArrayStorageAllocationMode mode = DONT_INITIALIZE_ARRAY_ELEMENTS);
Handle<JSGeneratorObject> NewJSGeneratorObject(Handle<JSFunction> function);
@@ -355,50 +442,78 @@ class Factory {
Handle<JSDataView> NewJSDataView();
+ // Allocates a Harmony proxy.
Handle<JSProxy> NewJSProxy(Handle<Object> handler, Handle<Object> prototype);
+ // Allocates a Harmony function proxy.
+ Handle<JSProxy> NewJSFunctionProxy(Handle<Object> handler,
+ Handle<Object> call_trap,
+ Handle<Object> construct_trap,
+ Handle<Object> prototype);
+
+ // Reinitialize a JSReceiver into an (empty) JS object of respective type and
+ // size, but keeping the original prototype. The receiver must have at least
+ // the size of the new object. The object is reinitialized and behaves as an
+ // object that has been freshly allocated.
+ void ReinitializeJSReceiver(
+ Handle<JSReceiver> object, InstanceType type, int size);
+
+ // Reinitialize an JSGlobalProxy based on a constructor. The object
+ // must have the same size as objects allocated using the
+ // constructor. The object is reinitialized and behaves as an
+ // object that has been freshly allocated using the constructor.
+ void ReinitializeJSGlobalProxy(Handle<JSGlobalProxy> global,
+ Handle<JSFunction> constructor);
+
// Change the type of the argument into a JS object/function and reinitialize.
void BecomeJSObject(Handle<JSReceiver> object);
void BecomeJSFunction(Handle<JSReceiver> object);
Handle<JSFunction> NewFunction(Handle<String> name,
- Handle<Object> prototype);
-
- Handle<JSFunction> NewFunctionWithoutPrototype(
- Handle<String> name,
- LanguageMode language_mode);
-
- Handle<JSFunction> NewFunction(Handle<Object> super, bool is_global);
-
- Handle<JSFunction> BaseNewFunctionFromSharedFunctionInfo(
- Handle<SharedFunctionInfo> function_info,
- Handle<Map> function_map,
- PretenureFlag pretenure);
+ Handle<Code> code,
+ Handle<Object> prototype,
+ bool read_only_prototype = false);
+ Handle<JSFunction> NewFunction(Handle<String> name);
+ Handle<JSFunction> NewFunctionWithoutPrototype(Handle<String> name,
+ Handle<Code> code);
Handle<JSFunction> NewFunctionFromSharedFunctionInfo(
Handle<SharedFunctionInfo> function_info,
Handle<Context> context,
PretenureFlag pretenure = TENURED);
+ Handle<JSFunction> NewFunction(Handle<String> name,
+ Handle<Code> code,
+ Handle<Object> prototype,
+ InstanceType type,
+ int instance_size,
+ bool read_only_prototype = false);
+ Handle<JSFunction> NewFunction(Handle<String> name,
+ Handle<Code> code,
+ InstanceType type,
+ int instance_size);
+
+ // Create a serialized scope info.
Handle<ScopeInfo> NewScopeInfo(int length);
+ // Create an External object for V8's external API.
Handle<JSObject> NewExternal(void* value);
+ // The reference to the Code object is stored in self_reference.
+ // This allows generated code to reference its own Code object
+ // by containing this handle.
Handle<Code> NewCode(const CodeDesc& desc,
Code::Flags flags,
Handle<Object> self_reference,
bool immovable = false,
bool crankshafted = false,
- int prologue_offset = Code::kPrologueOffsetNotSet);
+ int prologue_offset = Code::kPrologueOffsetNotSet,
+ bool is_debug = false);
Handle<Code> CopyCode(Handle<Code> code);
Handle<Code> CopyCode(Handle<Code> code, Vector<byte> reloc_info);
- Handle<Object> ToObject(Handle<Object> object);
- Handle<Object> ToObject(Handle<Object> object,
- Handle<Context> native_context);
-
// Interface for creating error objects.
Handle<Object> NewError(const char* maker, const char* message,
@@ -420,39 +535,30 @@ class Factory {
Vector< Handle<Object> > args);
Handle<Object> NewRangeError(Handle<String> message);
+ Handle<Object> NewInvalidStringLengthError() {
+ return NewRangeError("invalid_string_length",
+ HandleVector<Object>(NULL, 0));
+ }
+
Handle<Object> NewSyntaxError(const char* message, Handle<JSArray> args);
Handle<Object> NewSyntaxError(Handle<String> message);
Handle<Object> NewReferenceError(const char* message,
Vector< Handle<Object> > args);
+ Handle<Object> NewReferenceError(const char* message, Handle<JSArray> args);
Handle<Object> NewReferenceError(Handle<String> message);
Handle<Object> NewEvalError(const char* message,
Vector< Handle<Object> > args);
+ Handle<JSObject> NewIteratorResultObject(Handle<Object> value, bool done);
- Handle<JSFunction> NewFunction(Handle<String> name,
- InstanceType type,
- int instance_size,
- Handle<Code> code,
- bool force_initial_map);
-
- Handle<JSFunction> NewFunction(Handle<Map> function_map,
- Handle<SharedFunctionInfo> shared, Handle<Object> prototype);
-
-
- Handle<JSFunction> NewFunctionWithPrototype(Handle<String> name,
- InstanceType type,
- int instance_size,
- Handle<JSObject> prototype,
- Handle<Code> code,
- bool force_initial_map);
-
- Handle<JSFunction> NewFunctionWithoutPrototype(Handle<String> name,
- Handle<Code> code);
+ Handle<String> NumberToString(Handle<Object> number,
+ bool check_number_string_cache = true);
- Handle<String> NumberToString(Handle<Object> number);
- Handle<String> Uint32ToString(uint32_t value);
+ Handle<String> Uint32ToString(uint32_t value) {
+ return NumberToString(NewNumberFromUint(value));
+ }
enum ApiInstanceType {
JavaScriptObject,
@@ -462,6 +568,7 @@ class Factory {
Handle<JSFunction> CreateApiFunction(
Handle<FunctionTemplateInfo> data,
+ Handle<Object> prototype,
ApiInstanceType type = JavaScriptObject);
Handle<JSFunction> InstallMembers(Handle<JSFunction> function);
@@ -469,9 +576,8 @@ class Factory {
// Installs interceptors on the instance. 'desc' is a function template,
// and instance is an object instance created by the function of this
// function template.
- void ConfigureInstance(Handle<FunctionTemplateInfo> desc,
- Handle<JSObject> instance,
- bool* pending_exception);
+ MUST_USE_RESULT MaybeHandle<FunctionTemplateInfo> ConfigureInstance(
+ Handle<FunctionTemplateInfo> desc, Handle<JSObject> instance);
#define ROOT_ACCESSOR(type, name, camel_name) \
inline Handle<type> name() { \
@@ -497,40 +603,38 @@ class Factory {
INTERNALIZED_STRING_LIST(STRING_ACCESSOR)
#undef STRING_ACCESSOR
+ inline void set_string_table(Handle<StringTable> table) {
+ isolate()->heap()->set_string_table(*table);
+ }
+
Handle<String> hidden_string() {
return Handle<String>(&isolate()->heap()->hidden_string_);
}
+ // Allocates a new SharedFunctionInfo object.
Handle<SharedFunctionInfo> NewSharedFunctionInfo(
Handle<String> name,
int number_of_literals,
bool is_generator,
Handle<Code> code,
- Handle<ScopeInfo> scope_info);
- Handle<SharedFunctionInfo> NewSharedFunctionInfo(Handle<String> name);
+ Handle<ScopeInfo> scope_info,
+ Handle<FixedArray> feedback_vector);
+ Handle<SharedFunctionInfo> NewSharedFunctionInfo(Handle<String> name,
+ MaybeHandle<Code> code);
+
+ // Allocate a new type feedback vector
+ Handle<FixedArray> NewTypeFeedbackVector(int slot_count);
+ // Allocates a new JSMessageObject object.
Handle<JSMessageObject> NewJSMessageObject(
Handle<String> type,
Handle<JSArray> arguments,
int start_position,
int end_position,
Handle<Object> script,
- Handle<Object> stack_trace,
Handle<Object> stack_frames);
- Handle<SeededNumberDictionary> DictionaryAtNumberPut(
- Handle<SeededNumberDictionary>,
- uint32_t key,
- Handle<Object> value);
-
- Handle<UnseededNumberDictionary> DictionaryAtNumberPut(
- Handle<UnseededNumberDictionary>,
- uint32_t key,
- Handle<Object> value);
-
-#ifdef ENABLE_DEBUGGER_SUPPORT
Handle<DebugInfo> NewDebugInfo(Handle<SharedFunctionInfo> shared);
-#endif
// Return a map using the map cache in the native context.
// The key the an ordered set of property names.
@@ -564,12 +668,19 @@ class Factory {
private:
Isolate* isolate() { return reinterpret_cast<Isolate*>(this); }
- Handle<JSFunction> NewFunctionHelper(Handle<String> name,
- Handle<Object> prototype);
+ // Creates a heap object based on the map. The fields of the heap object are
+ // not initialized by New<>() functions. It's the responsibility of the caller
+ // to do that.
+ template<typename T>
+ Handle<T> New(Handle<Map> map, AllocationSpace space);
- Handle<JSFunction> NewFunctionWithoutPrototypeHelper(
- Handle<String> name,
- LanguageMode language_mode);
+ template<typename T>
+ Handle<T> New(Handle<Map> map,
+ AllocationSpace space,
+ Handle<AllocationSite> allocation_site);
+
+ // Creates a code object that is not yet fully initialized yet.
+ inline Handle<Code> NewCodeRaw(int object_size, bool immovable);
// Create a new map cache.
Handle<MapCache> NewMapCache(int at_least_space_for);
@@ -578,19 +689,32 @@ class Factory {
Handle<MapCache> AddToMapCache(Handle<Context> context,
Handle<FixedArray> keys,
Handle<Map> map);
-};
-
-
-Handle<Object> Factory::NewNumberFromSize(size_t value,
- PretenureFlag pretenure) {
- if (Smi::IsValid(static_cast<intptr_t>(value))) {
- return Handle<Object>(Smi::FromIntptr(static_cast<intptr_t>(value)),
- isolate());
- } else {
- return NewNumber(static_cast<double>(value), pretenure);
- }
-}
+ // Attempt to find the number in a small cache. If we finds it, return
+ // the string representation of the number. Otherwise return undefined.
+ Handle<Object> GetNumberStringCache(Handle<Object> number);
+
+ // Update the cache with a new number-string pair.
+ void SetNumberStringCache(Handle<Object> number, Handle<String> string);
+
+ // Initializes a function with a shared part and prototype.
+ // Note: this code was factored out of NewFunction such that other parts of
+ // the VM could use it. Specifically, a function that creates instances of
+ // type JS_FUNCTION_TYPE benefit from the use of this function.
+ inline void InitializeFunction(Handle<JSFunction> function,
+ Handle<SharedFunctionInfo> info,
+ Handle<Context> context);
+
+ // Creates a function initialized with a shared part.
+ Handle<JSFunction> NewFunction(Handle<Map> map,
+ Handle<SharedFunctionInfo> info,
+ Handle<Context> context,
+ PretenureFlag pretenure = TENURED);
+
+ Handle<JSFunction> NewFunction(Handle<Map> map,
+ Handle<String> name,
+ MaybeHandle<Code> maybe_code);
+};
} } // namespace v8::internal
diff --git a/chromium/v8/src/fast-dtoa.cc b/chromium/v8/src/fast-dtoa.cc
index e62bd01fbb5..919023cd0af 100644
--- a/chromium/v8/src/fast-dtoa.cc
+++ b/chromium/v8/src/fast-dtoa.cc
@@ -1,39 +1,16 @@
// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "../include/v8stdint.h"
-#include "checks.h"
-#include "utils.h"
-
-#include "fast-dtoa.h"
-
-#include "cached-powers.h"
-#include "diy-fp.h"
-#include "double.h"
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "include/v8stdint.h"
+#include "src/checks.h"
+#include "src/utils.h"
+
+#include "src/fast-dtoa.h"
+
+#include "src/cached-powers.h"
+#include "src/diy-fp.h"
+#include "src/double.h"
namespace v8 {
namespace internal {
diff --git a/chromium/v8/src/fast-dtoa.h b/chromium/v8/src/fast-dtoa.h
index ef285579341..d96c296f157 100644
--- a/chromium/v8/src/fast-dtoa.h
+++ b/chromium/v8/src/fast-dtoa.h
@@ -1,29 +1,6 @@
// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
#ifndef V8_FAST_DTOA_H_
#define V8_FAST_DTOA_H_
diff --git a/chromium/v8/src/feedback-slots.h b/chromium/v8/src/feedback-slots.h
new file mode 100644
index 00000000000..9951fc8fdcc
--- /dev/null
+++ b/chromium/v8/src/feedback-slots.h
@@ -0,0 +1,27 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_FEEDBACK_SLOTS_H_
+#define V8_FEEDBACK_SLOTS_H_
+
+#include "src/v8.h"
+
+#include "src/isolate.h"
+
+namespace v8 {
+namespace internal {
+
+class FeedbackSlotInterface {
+ public:
+ static const int kInvalidFeedbackSlot = -1;
+
+ virtual ~FeedbackSlotInterface() {}
+
+ virtual int ComputeFeedbackSlotCount() = 0;
+ virtual void SetFirstFeedbackSlot(int slot) = 0;
+};
+
+} } // namespace v8::internal
+
+#endif // V8_FEEDBACK_SLOTS_H_
diff --git a/chromium/v8/src/field-index-inl.h b/chromium/v8/src/field-index-inl.h
new file mode 100644
index 00000000000..d3bf94aec8e
--- /dev/null
+++ b/chromium/v8/src/field-index-inl.h
@@ -0,0 +1,98 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_FIELD_INDEX_INL_H_
+#define V8_FIELD_INDEX_INL_H_
+
+#include "src/field-index.h"
+
+namespace v8 {
+namespace internal {
+
+
+inline FieldIndex FieldIndex::ForInObjectOffset(int offset, Map* map) {
+ ASSERT((offset % kPointerSize) == 0);
+ int index = offset / kPointerSize;
+ if (map == NULL) {
+ return FieldIndex(true, index, false, index + 1, 0, true);
+ }
+ int first_inobject_offset = map->GetInObjectPropertyOffset(0);
+ if (offset < first_inobject_offset) {
+ return FieldIndex(true, index, false, 0, 0, true);
+ } else {
+ return FieldIndex::ForPropertyIndex(map, offset / kPointerSize);
+ }
+}
+
+
+inline FieldIndex FieldIndex::ForPropertyIndex(Map* map,
+ int property_index,
+ bool is_double) {
+ ASSERT(map->instance_type() >= FIRST_NONSTRING_TYPE);
+ int inobject_properties = map->inobject_properties();
+ bool is_inobject = property_index < inobject_properties;
+ int first_inobject_offset;
+ if (is_inobject) {
+ first_inobject_offset = map->GetInObjectPropertyOffset(0);
+ } else {
+ first_inobject_offset = FixedArray::kHeaderSize;
+ property_index -= inobject_properties;
+ }
+ return FieldIndex(is_inobject,
+ property_index + first_inobject_offset / kPointerSize,
+ is_double, inobject_properties, first_inobject_offset);
+}
+
+
+inline FieldIndex FieldIndex::ForLoadByFieldIndex(Map* map, int orig_index) {
+ int field_index = orig_index;
+ int is_inobject = true;
+ bool is_double = field_index & 1;
+ int first_inobject_offset = 0;
+ field_index >>= 1;
+ if (field_index < 0) {
+ field_index = -(field_index + 1);
+ is_inobject = false;
+ first_inobject_offset = FixedArray::kHeaderSize;
+ field_index += FixedArray::kHeaderSize / kPointerSize;
+ } else {
+ first_inobject_offset = map->GetInObjectPropertyOffset(0);
+ field_index += JSObject::kHeaderSize / kPointerSize;
+ }
+ return FieldIndex(is_inobject, field_index, is_double,
+ map->inobject_properties(), first_inobject_offset);
+}
+
+
+inline FieldIndex FieldIndex::ForDescriptor(Map* map, int descriptor_index) {
+ PropertyDetails details =
+ map->instance_descriptors()->GetDetails(descriptor_index);
+ int field_index =
+ map->instance_descriptors()->GetFieldIndex(descriptor_index);
+ return ForPropertyIndex(map, field_index,
+ details.representation().IsDouble());
+}
+
+
+inline FieldIndex FieldIndex::ForKeyedLookupCacheIndex(Map* map, int index) {
+ if (FLAG_compiled_keyed_generic_loads) {
+ return ForLoadByFieldIndex(map, index);
+ } else {
+ return ForPropertyIndex(map, index);
+ }
+}
+
+
+inline int FieldIndex::GetKeyedLookupCacheIndex() const {
+ if (FLAG_compiled_keyed_generic_loads) {
+ return GetLoadByFieldIndex();
+ } else {
+ return property_index();
+ }
+}
+
+
+} } // namespace v8::internal
+
+#endif
diff --git a/chromium/v8/src/field-index.cc b/chromium/v8/src/field-index.cc
new file mode 100644
index 00000000000..5392afc9f2c
--- /dev/null
+++ b/chromium/v8/src/field-index.cc
@@ -0,0 +1,23 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#include "src/field-index.h"
+#include "src/objects.h"
+#include "src/objects-inl.h"
+
+namespace v8 {
+namespace internal {
+
+
+FieldIndex FieldIndex::ForLookupResult(const LookupResult* lookup_result) {
+ Map* map = lookup_result->holder()->map();
+ return ForPropertyIndex(map,
+ lookup_result->GetFieldIndexFromMap(map),
+ lookup_result->representation().IsDouble());
+}
+
+
+} } // namespace v8::internal
diff --git a/chromium/v8/src/field-index.h b/chromium/v8/src/field-index.h
new file mode 100644
index 00000000000..0f77c8c57e4
--- /dev/null
+++ b/chromium/v8/src/field-index.h
@@ -0,0 +1,119 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_FIELD_INDEX_H_
+#define V8_FIELD_INDEX_H_
+
+#include "src/utils.h"
+#include "src/property-details.h"
+
+namespace v8 {
+namespace internal {
+
+class Map;
+
+// Wrapper class to hold a field index, usually but not necessarily generated
+// from a property index. When available, the wrapper class captures additional
+// information to allow the field index to be translated back into the property
+// index it was originally generated from.
+class FieldIndex V8_FINAL {
+ public:
+ static FieldIndex ForPropertyIndex(Map* map,
+ int index,
+ bool is_double = false);
+ static FieldIndex ForInObjectOffset(int offset, Map* map = NULL);
+ static FieldIndex ForLookupResult(const LookupResult* result);
+ static FieldIndex ForDescriptor(Map* map, int descriptor_index);
+ static FieldIndex ForLoadByFieldIndex(Map* map, int index);
+ static FieldIndex ForKeyedLookupCacheIndex(Map* map, int index);
+
+ bool is_inobject() const {
+ return IsInObjectBits::decode(bit_field_);
+ }
+
+ bool is_double() const {
+ return IsDoubleBits::decode(bit_field_);
+ }
+
+ int offset() const {
+ return index() * kPointerSize;
+ }
+
+ int index() const {
+ return IndexBits::decode(bit_field_);
+ }
+
+ int outobject_array_index() const {
+ ASSERT(!is_inobject());
+ return index() - first_inobject_property_offset() / kPointerSize;
+ }
+
+ int property_index() const {
+ ASSERT(!IsHiddenField::decode(bit_field_));
+ int result = index() - first_inobject_property_offset() / kPointerSize;
+ if (!is_inobject()) {
+ result += InObjectPropertyBits::decode(bit_field_);
+ }
+ return result;
+ }
+
+ int GetLoadByFieldIndex() const {
+ // For efficiency, the LoadByFieldIndex instruction takes an index that is
+ // optimized for quick access. If the property is inline, the index is
+ // positive. If it's out-of-line, the encoded index is -raw_index - 1 to
+ // disambiguate the zero out-of-line index from the zero inobject case.
+ // The index itself is shifted up by one bit, the lower-most bit
+ // signifying if the field is a mutable double box (1) or not (0).
+ int result = index() - first_inobject_property_offset() / kPointerSize;
+ if (!is_inobject()) {
+ result = -result - 1;
+ }
+ result <<= 1;
+ return is_double() ? (result | 1) : result;
+ }
+
+ int GetKeyedLookupCacheIndex() const;
+
+ int GetLoadFieldStubKey() const {
+ return bit_field_ &
+ (IsInObjectBits::kMask | IsDoubleBits::kMask | IndexBits::kMask);
+ }
+
+ private:
+ FieldIndex(bool is_inobject, int local_index, bool is_double,
+ int inobject_properties, int first_inobject_property_offset,
+ bool is_hidden = false) {
+ ASSERT((first_inobject_property_offset & (kPointerSize - 1)) == 0);
+ bit_field_ = IsInObjectBits::encode(is_inobject) |
+ IsDoubleBits::encode(is_double) |
+ FirstInobjectPropertyOffsetBits::encode(first_inobject_property_offset) |
+ IsHiddenField::encode(is_hidden) |
+ IndexBits::encode(local_index) |
+ InObjectPropertyBits::encode(inobject_properties);
+ }
+
+ int first_inobject_property_offset() const {
+ ASSERT(!IsHiddenField::decode(bit_field_));
+ return FirstInobjectPropertyOffsetBits::decode(bit_field_);
+ }
+
+ static const int kIndexBitsSize = kDescriptorIndexBitCount + 1;
+
+ class IndexBits: public BitField<int, 0, kIndexBitsSize> {};
+ class IsInObjectBits: public BitField<bool, IndexBits::kNext, 1> {};
+ class IsDoubleBits: public BitField<bool, IsInObjectBits::kNext, 1> {};
+ class InObjectPropertyBits: public BitField<int, IsDoubleBits::kNext,
+ kDescriptorIndexBitCount> {};
+ class FirstInobjectPropertyOffsetBits:
+ public BitField<int, InObjectPropertyBits::kNext, 7> {};
+ class IsHiddenField:
+ public BitField<bool, FirstInobjectPropertyOffsetBits::kNext, 1> {};
+ STATIC_ASSERT(IsHiddenField::kNext <= 32);
+
+ int bit_field_;
+};
+
+} } // namespace v8::internal
+
+#endif
diff --git a/chromium/v8/src/fixed-dtoa.cc b/chromium/v8/src/fixed-dtoa.cc
index fd90eca901c..4541e856f8c 100644
--- a/chromium/v8/src/fixed-dtoa.cc
+++ b/chromium/v8/src/fixed-dtoa.cc
@@ -1,38 +1,15 @@
// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
#include <cmath>
-#include "../include/v8stdint.h"
-#include "checks.h"
-#include "utils.h"
+#include "include/v8stdint.h"
+#include "src/checks.h"
+#include "src/utils.h"
-#include "double.h"
-#include "fixed-dtoa.h"
+#include "src/double.h"
+#include "src/fixed-dtoa.h"
namespace v8 {
namespace internal {
diff --git a/chromium/v8/src/fixed-dtoa.h b/chromium/v8/src/fixed-dtoa.h
index 93f826fe841..b6495c11ef1 100644
--- a/chromium/v8/src/fixed-dtoa.h
+++ b/chromium/v8/src/fixed-dtoa.h
@@ -1,29 +1,6 @@
// Copyright 2010 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
#ifndef V8_FIXED_DTOA_H_
#define V8_FIXED_DTOA_H_
diff --git a/chromium/v8/src/flag-definitions.h b/chromium/v8/src/flag-definitions.h
index 405a351562e..1d83481f9e4 100644
--- a/chromium/v8/src/flag-definitions.h
+++ b/chromium/v8/src/flag-definitions.h
@@ -1,29 +1,6 @@
// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
// This file defines all of the flags. It is separated into different section,
// for Debug, Release, Logging and Profiling, etc. To add a new flag, find the
@@ -69,6 +46,9 @@
#define DEFINE_implication(whenflag, thenflag) \
if (FLAG_##whenflag) FLAG_##thenflag = true;
+#define DEFINE_neg_implication(whenflag, thenflag) \
+ if (FLAG_##whenflag) FLAG_##thenflag = false;
+
#else
#error No mode supplied when including flags.defs
#endif
@@ -90,6 +70,10 @@
#define DEFINE_implication(whenflag, thenflag)
#endif
+#ifndef DEFINE_neg_implication
+#define DEFINE_neg_implication(whenflag, thenflag)
+#endif
+
#define COMMA ,
#ifdef FLAG_MODE_DECLARE
@@ -136,6 +120,11 @@ struct MaybeBoolFlag {
#else
# define ENABLE_32DREGS_DEFAULT false
#endif
+#if (defined CAN_USE_NEON) || !(defined ARM_TEST)
+# define ENABLE_NEON_DEFAULT true
+#else
+# define ENABLE_NEON_DEFAULT false
+#endif
#define DEFINE_bool(nam, def, cmt) FLAG(BOOL, bool, nam, def, cmt)
#define DEFINE_maybe_bool(nam, cmt) FLAG(MAYBE_BOOL, MaybeBoolFlag, nam, \
@@ -160,23 +149,16 @@ struct MaybeBoolFlag {
// Flags for language modes and experimental language features.
DEFINE_bool(use_strict, false, "enforce strict mode")
-DEFINE_bool(es5_readonly, true,
- "activate correct semantics for inheriting readonliness")
-DEFINE_bool(es52_globals, true,
- "activate new semantics for global var declarations")
+DEFINE_bool(es_staging, false, "enable upcoming ES6+ features")
DEFINE_bool(harmony_typeof, false, "enable harmony semantics for typeof")
DEFINE_bool(harmony_scoping, false, "enable harmony block scoping")
DEFINE_bool(harmony_modules, false,
"enable harmony modules (implies block scoping)")
-DEFINE_bool(harmony_symbols, false,
- "enable harmony symbols (a.k.a. private names)")
-DEFINE_bool(harmony_promises, false, "enable harmony promises")
+DEFINE_bool(harmony_symbols, false, "enable harmony symbols")
DEFINE_bool(harmony_proxies, false, "enable harmony proxies")
DEFINE_bool(harmony_collections, false,
- "enable harmony collections (sets, maps, and weak maps)")
-DEFINE_bool(harmony_observation, false,
- "enable harmony object observation (implies harmony collections")
+ "enable harmony collections (sets, maps)")
DEFINE_bool(harmony_generators, false, "enable harmony generators")
DEFINE_bool(harmony_iteration, false, "enable harmony iteration (for-of)")
DEFINE_bool(harmony_numeric_literals, false,
@@ -185,38 +167,44 @@ DEFINE_bool(harmony_strings, false, "enable harmony string")
DEFINE_bool(harmony_arrays, false, "enable harmony arrays")
DEFINE_bool(harmony_maths, false, "enable harmony math functions")
DEFINE_bool(harmony, false, "enable all harmony features (except typeof)")
+
DEFINE_implication(harmony, harmony_scoping)
DEFINE_implication(harmony, harmony_modules)
-DEFINE_implication(harmony, harmony_symbols)
-DEFINE_implication(harmony, harmony_promises)
DEFINE_implication(harmony, harmony_proxies)
DEFINE_implication(harmony, harmony_collections)
-DEFINE_implication(harmony, harmony_observation)
DEFINE_implication(harmony, harmony_generators)
DEFINE_implication(harmony, harmony_iteration)
DEFINE_implication(harmony, harmony_numeric_literals)
DEFINE_implication(harmony, harmony_strings)
DEFINE_implication(harmony, harmony_arrays)
-DEFINE_implication(harmony, harmony_maths)
-DEFINE_implication(harmony_promises, harmony_collections)
DEFINE_implication(harmony_modules, harmony_scoping)
-DEFINE_implication(harmony_observation, harmony_collections)
+DEFINE_implication(harmony_collections, harmony_symbols)
+DEFINE_implication(harmony_generators, harmony_symbols)
+DEFINE_implication(harmony_iteration, harmony_symbols)
+
+DEFINE_implication(harmony, es_staging)
+DEFINE_implication(es_staging, harmony_maths)
+DEFINE_implication(es_staging, harmony_symbols)
+DEFINE_implication(es_staging, harmony_collections)
// Flags for experimental implementation features.
DEFINE_bool(packed_arrays, true, "optimizes arrays that have no holes")
DEFINE_bool(smi_only_arrays, true, "tracks arrays with only smi values")
DEFINE_bool(compiled_keyed_dictionary_loads, true,
"use optimizing compiler to generate keyed dictionary load stubs")
+DEFINE_bool(compiled_keyed_generic_loads, false,
+ "use optimizing compiler to generate keyed generic load stubs")
DEFINE_bool(clever_optimizations, true,
"Optimize object size, Array shift, DOM strings and string +")
-DEFINE_bool(pretenuring, true, "allocate objects in old space")
// TODO(hpayer): We will remove this flag as soon as we have pretenuring
// support for specific allocation sites.
DEFINE_bool(pretenuring_call_new, false, "pretenure call new")
-DEFINE_bool(allocation_site_pretenuring, false,
+DEFINE_bool(allocation_site_pretenuring, true,
"pretenure with allocation sites")
DEFINE_bool(trace_pretenuring, false,
"trace pretenuring decisions of HAllocate instructions")
+DEFINE_bool(trace_pretenuring_statistics, false,
+ "trace allocation site pretenuring statistics")
DEFINE_bool(track_fields, true, "track fields with only smi values")
DEFINE_bool(track_double_fields, true, "track fields with double values")
DEFINE_bool(track_heap_object_fields, true, "track fields with heap values")
@@ -224,6 +212,9 @@ DEFINE_bool(track_computed_fields, true, "track computed boilerplate fields")
DEFINE_implication(track_double_fields, track_fields)
DEFINE_implication(track_heap_object_fields, track_fields)
DEFINE_implication(track_computed_fields, track_fields)
+DEFINE_bool(track_field_types, true, "track field types")
+DEFINE_implication(track_field_types, track_fields)
+DEFINE_implication(track_field_types, track_heap_object_fields)
DEFINE_bool(smi_binop, true, "support smi representation in binary operations")
// Flags for optimization types.
@@ -238,12 +229,15 @@ DEFINE_bool(string_slices, true, "use string slices")
// Flags for Crankshaft.
DEFINE_bool(crankshaft, true, "use crankshaft")
DEFINE_string(hydrogen_filter, "*", "optimization filter")
-DEFINE_bool(use_range, true, "use hydrogen range analysis")
DEFINE_bool(use_gvn, true, "use hydrogen global value numbering")
+DEFINE_int(gvn_iterations, 3, "maximum number of GVN fix-point iterations")
DEFINE_bool(use_canonicalizing, true, "use hydrogen instruction canonicalizing")
DEFINE_bool(use_inlining, true, "use function inlining")
-DEFINE_bool(use_escape_analysis, false, "use hydrogen escape analysis")
+DEFINE_bool(use_escape_analysis, true, "use hydrogen escape analysis")
DEFINE_bool(use_allocation_folding, true, "use allocation folding")
+DEFINE_bool(use_local_allocation_folding, false, "only fold in basic blocks")
+DEFINE_bool(use_write_barrier_elimination, true,
+ "eliminate write barriers targeting allocations in optimized code")
DEFINE_int(max_inlining_levels, 5, "maximum number of inlining levels")
DEFINE_int(max_inlined_source_size, 600,
"maximum source size in bytes considered for a single inlining")
@@ -264,11 +258,13 @@ DEFINE_string(trace_hydrogen_file, NULL, "trace hydrogen to given file name")
DEFINE_string(trace_phase, "HLZ", "trace generated IR for specified phases")
DEFINE_bool(trace_inlining, false, "trace inlining decisions")
DEFINE_bool(trace_load_elimination, false, "trace load elimination")
+DEFINE_bool(trace_store_elimination, false, "trace store elimination")
DEFINE_bool(trace_alloc, false, "trace register allocator")
DEFINE_bool(trace_all_uses, false, "trace all use positions")
DEFINE_bool(trace_range, false, "trace range analysis")
DEFINE_bool(trace_gvn, false, "trace global value numbering")
DEFINE_bool(trace_representation, false, "trace representation types")
+DEFINE_bool(trace_removable_simulates, false, "trace removable simulates")
DEFINE_bool(trace_escape_analysis, false, "trace hydrogen escape analysis")
DEFINE_bool(trace_allocation_folding, false, "trace allocation folding")
DEFINE_bool(trace_track_allocation_sites, false,
@@ -290,6 +286,7 @@ DEFINE_bool(polymorphic_inlining, true, "polymorphic inlining")
DEFINE_bool(use_osr, true, "use on-stack replacement")
DEFINE_bool(array_bounds_checks_elimination, true,
"perform array bounds checks elimination")
+DEFINE_bool(trace_bce, false, "trace array bounds check elimination")
DEFINE_bool(array_bounds_checks_hoisting, false,
"perform array bounds checks hoisting")
DEFINE_bool(array_index_dehoisting, true,
@@ -297,13 +294,12 @@ DEFINE_bool(array_index_dehoisting, true,
DEFINE_bool(analyze_environment_liveness, true,
"analyze liveness of environment slots and zap dead values")
DEFINE_bool(load_elimination, true, "use load elimination")
-DEFINE_bool(check_elimination, false, "use check elimination")
+DEFINE_bool(check_elimination, true, "use check elimination")
+DEFINE_bool(store_elimination, false, "use store elimination")
DEFINE_bool(dead_code_elimination, true, "use dead code elimination")
DEFINE_bool(fold_constants, true, "use constant folding")
DEFINE_bool(trace_dead_code_elimination, false, "trace dead code elimination")
DEFINE_bool(unreachable_code_elimination, true, "eliminate unreachable code")
-DEFINE_bool(track_allocation_sites, true,
- "Use allocation site info to reduce transitions")
DEFINE_bool(trace_osr, false, "trace on-stack replacement")
DEFINE_int(stress_runs, 0, "number of stress runs")
DEFINE_bool(optimize_closures, true, "optimize closures")
@@ -317,7 +313,6 @@ DEFINE_bool(flush_optimized_code_cache, true,
DEFINE_bool(inline_construct, true, "inline constructor calls")
DEFINE_bool(inline_arguments, true, "inline functions with arguments object")
DEFINE_bool(inline_accessors, true, "inline JavaScript accessors")
-DEFINE_int(loop_weight, 1, "loop weight for representation inference")
DEFINE_int(escape_analysis_iterations, 2,
"maximum number of escape analysis fix-point iterations")
@@ -337,7 +332,7 @@ DEFINE_int(concurrent_recompilation_delay, 0,
"artificial compilation delay in ms")
DEFINE_bool(block_concurrent_recompilation, false,
"block queued jobs until released")
-DEFINE_bool(concurrent_osr, false,
+DEFINE_bool(concurrent_osr, true,
"concurrent on-stack replacement")
DEFINE_implication(concurrent_osr, concurrent_recompilation)
@@ -345,35 +340,18 @@ DEFINE_bool(omit_map_checks_for_leaf_maps, true,
"do not emit check maps for constant values that have a leaf map, "
"deoptimize the optimized code if the layout of the maps changes.")
-DEFINE_bool(new_string_add, false, "enable new string addition")
+DEFINE_int(typed_array_max_size_in_heap, 64,
+ "threshold for in-heap typed array")
-// Experimental profiler changes.
-DEFINE_bool(experimental_profiler, true, "enable all profiler experiments")
-DEFINE_bool(watch_ic_patching, false, "profiler considers IC stability")
+// Profiler flags.
DEFINE_int(frame_count, 1, "number of stack frames inspected by the profiler")
-DEFINE_bool(self_optimization, false,
- "primitive functions trigger their own optimization")
-DEFINE_bool(direct_self_opt, false,
- "call recompile stub directly when self-optimizing")
-DEFINE_bool(retry_self_opt, false, "re-try self-optimization if it failed")
-DEFINE_bool(interrupt_at_exit, false,
- "insert an interrupt check at function exit")
-DEFINE_bool(weighted_back_edges, false,
- "weight back edges by jump distance for interrupt triggering")
- // 0x1700 fits in the immediate field of an ARM instruction.
-DEFINE_int(interrupt_budget, 0x1700,
+ // 0x1800 fits in the immediate field of an ARM instruction.
+DEFINE_int(interrupt_budget, 0x1800,
"execution budget before interrupt is triggered")
DEFINE_int(type_info_threshold, 25,
"percentage of ICs that must have type info to allow optimization")
DEFINE_int(self_opt_count, 130, "call count before self-optimization")
-DEFINE_implication(experimental_profiler, watch_ic_patching)
-DEFINE_implication(experimental_profiler, self_optimization)
-// Not implying direct_self_opt here because it seems to be a bad idea.
-DEFINE_implication(experimental_profiler, retry_self_opt)
-DEFINE_implication(experimental_profiler, interrupt_at_exit)
-DEFINE_implication(experimental_profiler, weighted_back_edges)
-
DEFINE_bool(trace_opt_verbose, false, "extra verbose compilation tracing")
DEFINE_implication(trace_opt_verbose, trace_opt)
@@ -381,24 +359,22 @@ DEFINE_implication(trace_opt_verbose, trace_opt)
DEFINE_bool(debug_code, false,
"generate extra code (assertions) for debugging")
DEFINE_bool(code_comments, false, "emit comments in code disassembly")
-DEFINE_bool(enable_sse2, true,
- "enable use of SSE2 instructions if available")
DEFINE_bool(enable_sse3, true,
"enable use of SSE3 instructions if available")
DEFINE_bool(enable_sse4_1, true,
"enable use of SSE4.1 instructions if available")
-DEFINE_bool(enable_cmov, true,
- "enable use of CMOV instruction if available")
DEFINE_bool(enable_sahf, true,
"enable use of SAHF instruction if available (X64 only)")
DEFINE_bool(enable_vfp3, ENABLE_VFP3_DEFAULT,
"enable use of VFP3 instructions if available")
DEFINE_bool(enable_armv7, ENABLE_ARMV7_DEFAULT,
"enable use of ARMv7 instructions if available (ARM only)")
-DEFINE_bool(enable_neon, true,
+DEFINE_bool(enable_neon, ENABLE_NEON_DEFAULT,
"enable use of NEON instructions if available (ARM only)")
DEFINE_bool(enable_sudiv, true,
"enable use of SDIV and UDIV instructions if available (ARM only)")
+DEFINE_bool(enable_mls, true,
+ "enable use of MLS instructions if available (ARM only)")
DEFINE_bool(enable_movw_movt, false,
"enable loading 32-bit constant by means of movw/movt "
"instruction pairs (ARM only)")
@@ -408,19 +384,25 @@ DEFINE_bool(enable_32dregs, ENABLE_32DREGS_DEFAULT,
"enable use of d16-d31 registers on ARM - this requires VFP3")
DEFINE_bool(enable_vldr_imm, false,
"enable use of constant pools for double immediate (ARM only)")
+DEFINE_bool(force_long_branches, false,
+ "force all emitted branches to be in long mode (MIPS only)")
+
+// cpu-arm64.cc
+DEFINE_bool(enable_always_align_csp, true,
+ "enable alignment of csp to 16 bytes on platforms which prefer "
+ "the register to always be aligned (ARM64 only)")
// bootstrapper.cc
DEFINE_string(expose_natives_as, NULL, "expose natives in global object")
DEFINE_string(expose_debug_as, NULL, "expose debug in global object")
-#ifdef ADDRESS_SANITIZER
DEFINE_bool(expose_free_buffer, false, "expose freeBuffer extension")
-#endif
DEFINE_bool(expose_gc, false, "expose gc extension")
DEFINE_string(expose_gc_as, NULL,
"expose gc extension under the specified name")
DEFINE_implication(expose_gc_as, expose_gc)
DEFINE_bool(expose_externalize_string, false,
"expose externalize string extension")
+DEFINE_bool(expose_trigger_failure, false, "expose trigger-failure extension")
DEFINE_int(stack_trace_limit, 10, "number of stack frames to capture")
DEFINE_bool(builtins_in_stack_traces, false,
"show built-in functions in stack traces")
@@ -429,10 +411,6 @@ DEFINE_bool(disable_native_files, false, "disable builtin natives files")
// builtins-ia32.cc
DEFINE_bool(inline_new, true, "use fast inline allocation")
-// checks.cc
-DEFINE_bool(stack_trace_on_abort, true,
- "print a stack trace if an assertion failure occurs")
-
// codegen-ia32.cc / codegen-arm.cc
DEFINE_bool(trace_codegen, false,
"print name of functions for which code is generated")
@@ -479,11 +457,8 @@ DEFINE_bool(trace_array_abuse, false,
"trace out-of-bounds accesses to all arrays")
DEFINE_implication(trace_array_abuse, trace_js_array_abuse)
DEFINE_implication(trace_array_abuse, trace_external_array_abuse)
-DEFINE_bool(debugger_auto_break, true,
- "automatically set the debug break flag when debugger commands are "
- "in the queue")
DEFINE_bool(enable_liveedit, true, "enable liveedit experimental feature")
-DEFINE_bool(break_on_abort, true, "always cause a debug break before aborting")
+DEFINE_bool(hard_abort, true, "abort by crashing")
// execution.cc
// Slightly less than 1MB on 64-bit, since Windows' default stack size for
@@ -500,8 +475,13 @@ DEFINE_bool(always_inline_smi_code, false,
"always inline smi code in non-opt code")
// heap.cc
-DEFINE_int(max_new_space_size, 0, "max size of the new generation (in kBytes)")
-DEFINE_int(max_old_space_size, 0, "max size of the old generation (in Mbytes)")
+DEFINE_int(min_semi_space_size, 0,
+ "min size of a semi-space (in MBytes), the new space consists of two"
+ "semi-spaces")
+DEFINE_int(max_semi_space_size, 0,
+ "max size of a semi-space (in MBytes), the new space consists of two"
+ "semi-spaces")
+DEFINE_int(max_old_space_size, 0, "max size of the old space (in Mbytes)")
DEFINE_int(max_executable_size, 0, "max size of executable memory (in Mbytes)")
DEFINE_bool(gc_global, false, "always perform global GCs")
DEFINE_int(gc_interval, -1, "garbage collect after <n> allocations")
@@ -521,11 +501,10 @@ DEFINE_bool(trace_gc_verbose, false,
"print more details following each garbage collection")
DEFINE_bool(trace_fragmentation, false,
"report fragmentation for old pointer and data pages")
-DEFINE_bool(trace_external_memory, false,
- "print amount of external allocated memory after each time "
- "it is adjusted.")
DEFINE_bool(collect_maps, true,
"garbage collect maps from which no objects can be reached")
+DEFINE_bool(weak_embedded_maps_in_ic, true,
+ "make maps embedded in inline cache stubs")
DEFINE_bool(weak_embedded_maps_in_optimized_code, true,
"make maps embedded in optimized code weak")
DEFINE_bool(weak_embedded_objects_in_optimized_code, true,
@@ -544,10 +523,11 @@ DEFINE_bool(trace_incremental_marking, false,
"trace progress of the incremental marking")
DEFINE_bool(track_gc_object_stats, false,
"track object counts and memory usage")
-DEFINE_bool(parallel_sweeping, true, "enable parallel sweeping")
-DEFINE_bool(concurrent_sweeping, false, "enable concurrent sweeping")
+DEFINE_bool(parallel_sweeping, false, "enable parallel sweeping")
+DEFINE_bool(concurrent_sweeping, true, "enable concurrent sweeping")
DEFINE_int(sweeper_threads, 0,
"number of parallel and concurrent sweeping threads")
+DEFINE_bool(job_based_sweeping, false, "enable job based sweeping")
#ifdef VERIFY_HEAP
DEFINE_bool(verify_heap, false, "verify heap pointers before and after GC")
#endif
@@ -570,8 +550,6 @@ DEFINE_bool(native_code_counters, false,
// mark-compact.cc
DEFINE_bool(always_compact, false, "Perform compaction on every full GC")
-DEFINE_bool(lazy_sweeping, true,
- "Use lazy sweeping for old pointer and data spaces")
DEFINE_bool(never_compact, false,
"Never perform compaction on full GC - testing only")
DEFINE_bool(compact_code_space, true,
@@ -584,6 +562,8 @@ DEFINE_bool(cleanup_code_caches_at_gc, true,
DEFINE_bool(use_marking_progress_bar, true,
"Use a progress bar to scan large objects in increments when "
"incremental marking is active.")
+DEFINE_bool(zap_code_space, true,
+ "Zap free memory in code space with 0xCC while sweeping.")
DEFINE_int(random_seed, 0,
"Default seed for initializing random generator "
"(0, the default, means to use system random).")
@@ -595,19 +575,36 @@ DEFINE_bool(use_verbose_printer, true, "allows verbose printing")
DEFINE_bool(allow_natives_syntax, false, "allow natives syntax")
DEFINE_bool(trace_parse, false, "trace parsing and preparsing")
-// simulator-arm.cc and simulator-mips.cc
+// simulator-arm.cc, simulator-arm64.cc and simulator-mips.cc
DEFINE_bool(trace_sim, false, "Trace simulator execution")
+DEFINE_bool(debug_sim, false, "Enable debugging the simulator")
DEFINE_bool(check_icache, false,
"Check icache flushes in ARM and MIPS simulator")
DEFINE_int(stop_sim_at, 0, "Simulator stop after x number of instructions")
+#ifdef V8_TARGET_ARCH_ARM64
+DEFINE_int(sim_stack_alignment, 16,
+ "Stack alignment in bytes in simulator. This must be a power of two "
+ "and it must be at least 16. 16 is default.")
+#else
DEFINE_int(sim_stack_alignment, 8,
"Stack alingment in bytes in simulator (4 or 8, 8 is default)")
+#endif
+DEFINE_int(sim_stack_size, 2 * MB / KB,
+ "Stack size of the ARM64 simulator in kBytes (default is 2 MB)")
+DEFINE_bool(log_regs_modified, true,
+ "When logging register values, only print modified registers.")
+DEFINE_bool(log_colour, true,
+ "When logging, try to use coloured output.")
+DEFINE_bool(ignore_asm_unimplemented_break, false,
+ "Don't break for ASM_UNIMPLEMENTED_BREAK macros.")
+DEFINE_bool(trace_sim_messages, false,
+ "Trace simulator debug messages. Implied by --trace-sim.")
// isolate.cc
+DEFINE_bool(stack_trace_on_illegal, false,
+ "print stack trace when an illegal exception is thrown")
DEFINE_bool(abort_on_uncaught_exception, false,
"abort program (dump core) when an uncaught exception is thrown")
-DEFINE_bool(trace_exception, false,
- "print stack trace when throwing exceptions")
DEFINE_bool(randomize_hashes, true,
"randomize hashes to avoid predictable hash collisions "
"(with snapshots this option cannot override the baked-in seed)")
@@ -639,12 +636,25 @@ DEFINE_string(testing_serialization_file, "/tmp/serdes",
// mksnapshot.cc
DEFINE_string(extra_code, NULL, "A filename with extra code to be included in"
- " the snapshot (mksnapshot only)")
+ " the snapshot (mksnapshot only)")
+DEFINE_string(raw_file, NULL, "A file to write the raw snapshot bytes to. "
+ "(mksnapshot only)")
+DEFINE_string(raw_context_file, NULL, "A file to write the raw context "
+ "snapshot bytes to. (mksnapshot only)")
+DEFINE_bool(omit, false, "Omit raw snapshot bytes in generated code. "
+ "(mksnapshot only)")
// code-stubs-hydrogen.cc
DEFINE_bool(profile_hydrogen_code_stub_compilation, false,
"Print the time it takes to lazily compile hydrogen code stubs.")
+DEFINE_bool(predictable, false, "enable predictable mode")
+DEFINE_neg_implication(predictable, concurrent_recompilation)
+DEFINE_neg_implication(predictable, concurrent_osr)
+DEFINE_neg_implication(predictable, concurrent_sweeping)
+DEFINE_neg_implication(predictable, parallel_sweeping)
+
+
//
// Dev shell flags
//
@@ -652,29 +662,12 @@ DEFINE_bool(profile_hydrogen_code_stub_compilation, false,
DEFINE_bool(help, false, "Print usage message, including flags, on console")
DEFINE_bool(dump_counters, false, "Dump counters on exit")
-#ifdef ENABLE_DEBUGGER_SUPPORT
DEFINE_bool(debugger, false, "Enable JavaScript debugger")
-DEFINE_bool(remote_debugger, false, "Connect JavaScript debugger to the "
- "debugger agent in another process")
-DEFINE_bool(debugger_agent, false, "Enable debugger agent")
-DEFINE_int(debugger_port, 5858, "Port to use for remote debugging")
-#endif // ENABLE_DEBUGGER_SUPPORT
DEFINE_string(map_counters, "", "Map counters to a file")
DEFINE_args(js_arguments,
"Pass all remaining arguments to the script. Alias for \"--\".")
-#if defined(WEBOS__)
-DEFINE_bool(debug_compile_events, false, "Enable debugger compile events")
-DEFINE_bool(debug_script_collected_events, false,
- "Enable debugger script collected events")
-#else
-DEFINE_bool(debug_compile_events, true, "Enable debugger compile events")
-DEFINE_bool(debug_script_collected_events, true,
- "Enable debugger script collected events")
-#endif
-
-
//
// GDB JIT integration flags.
//
@@ -727,7 +720,6 @@ DEFINE_bool(print_scopes, false, "print scopes")
DEFINE_bool(trace_contexts, false, "trace contexts operations")
// heap.cc
-DEFINE_bool(gc_greedy, false, "perform GC prior to some allocations")
DEFINE_bool(gc_verbose, false, "print stuff during garbage collection")
DEFINE_bool(heap_stats, false, "report heap statistics before and after GC")
DEFINE_bool(code_stats, false, "report code statistics after GC")
@@ -775,7 +767,6 @@ DEFINE_bool(trace_regexp_assembler, false,
DEFINE_bool(log, false,
"Minimal logging (no API, code, GC, suspect, or handles samples).")
DEFINE_bool(log_all, false, "Log all events to the log file.")
-DEFINE_bool(log_runtime, false, "Activate runtime system %Log call.")
DEFINE_bool(log_api, false, "Log API events to the log file.")
DEFINE_bool(log_code, false,
"Log code events to the log file without profiling.")
@@ -804,6 +795,11 @@ DEFINE_bool(log_timer_events, false,
"Time events including external callbacks.")
DEFINE_implication(log_timer_events, log_internal_timer_events)
DEFINE_implication(log_internal_timer_events, prof)
+DEFINE_bool(log_instruction_stats, false, "Log AArch64 instruction statistics.")
+DEFINE_string(log_instruction_file, "arm64_inst.csv",
+ "AArch64 instruction statistics log file.")
+DEFINE_int(log_instruction_period, 1 << 22,
+ "AArch64 instruction statistics logging period.")
DEFINE_bool(redirect_code_traces, false,
"output deopt information and disassembly into file "
@@ -811,6 +807,9 @@ DEFINE_bool(redirect_code_traces, false,
DEFINE_string(redirect_code_traces_to, NULL,
"output deopt information and disassembly into the given file")
+DEFINE_bool(hydrogen_track_positions, false,
+ "track source code positions when building IR")
+
//
// Disassembler only flags
//
@@ -843,8 +842,6 @@ DEFINE_bool(print_unopt_code, false, "print unoptimized code before "
"printing optimized code based on it")
DEFINE_bool(print_code_verbose, false, "print more information for code")
DEFINE_bool(print_builtin_code, false, "print generated code for builtins")
-DEFINE_bool(emit_opt_code_positions, false,
- "annotate optimize code with source code positions")
#ifdef ENABLE_DISASSEMBLER
DEFINE_bool(sodium, false, "print generated code output suitable for use with "
@@ -853,7 +850,7 @@ DEFINE_bool(sodium, false, "print generated code output suitable for use with "
DEFINE_implication(sodium, print_code_stubs)
DEFINE_implication(sodium, print_code)
DEFINE_implication(sodium, print_opt_code)
-DEFINE_implication(sodium, emit_opt_code_positions)
+DEFINE_implication(sodium, hydrogen_track_positions)
DEFINE_implication(sodium, code_comments)
DEFINE_bool(print_all_code, false, "enable all flags related to printing code")
@@ -876,7 +873,7 @@ DEFINE_implication(print_all_code, trace_codegen)
#define FLAG FLAG_READONLY
// assembler-arm.h
-DEFINE_bool(enable_ool_constant_pool, false,
+DEFINE_bool(enable_ool_constant_pool, V8_OOL_CONSTANT_POOL,
"enable use of out-of-line constant pools (ARM only)")
// Cleanup...
@@ -892,6 +889,7 @@ DEFINE_bool(enable_ool_constant_pool, false,
#undef DEFINE_float
#undef DEFINE_args
#undef DEFINE_implication
+#undef DEFINE_neg_implication
#undef DEFINE_ALIAS_bool
#undef DEFINE_ALIAS_int
#undef DEFINE_ALIAS_string
diff --git a/chromium/v8/src/flags.cc b/chromium/v8/src/flags.cc
index 0c36aed3320..265eb8fd506 100644
--- a/chromium/v8/src/flags.cc
+++ b/chromium/v8/src/flags.cc
@@ -1,53 +1,27 @@
// Copyright 2006-2008 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
#include <ctype.h>
#include <stdlib.h>
-#include "v8.h"
+#include "src/v8.h"
-#include "platform.h"
-#include "smart-pointers.h"
-#include "string-stream.h"
-
-#if V8_TARGET_ARCH_ARM
-#include "arm/assembler-arm-inl.h"
-#endif
+#include "src/assembler.h"
+#include "src/platform.h"
+#include "src/smart-pointers.h"
+#include "src/string-stream.h"
namespace v8 {
namespace internal {
// Define all of our flags.
#define FLAG_MODE_DEFINE
-#include "flag-definitions.h"
+#include "src/flag-definitions.h"
// Define all of our flags default values.
#define FLAG_MODE_DEFINE_DEFAULTS
-#include "flag-definitions.h"
+#include "src/flag-definitions.h"
namespace {
@@ -186,7 +160,7 @@ struct Flag {
Flag flags[] = {
#define FLAG_MODE_META
-#include "flag-definitions.h"
+#include "src/flag-definitions.h"
};
const size_t num_flags = sizeof(flags) / sizeof(*flags);
@@ -331,7 +305,7 @@ static void SplitArgument(const char* arg,
// make a copy so we can NUL-terminate flag name
size_t n = arg - *name;
CHECK(n < static_cast<size_t>(buffer_size)); // buffer is too small
- OS::MemCopy(buffer, *name, n);
+ MemCopy(buffer, *name, n);
buffer[n] = '\0';
*name = buffer;
// get the value
@@ -501,7 +475,7 @@ static char* SkipBlackSpace(char* p) {
int FlagList::SetFlagsFromString(const char* str, int len) {
// make a 0-terminated copy of str
ScopedVector<char> copy0(len + 1);
- OS::MemCopy(copy0.start(), str, len);
+ MemCopy(copy0.start(), str, len);
copy0[len] = '\0';
// strip leading white space
@@ -543,11 +517,8 @@ void FlagList::ResetAllFlags() {
// static
void FlagList::PrintHelp() {
-#if V8_TARGET_ARCH_ARM
CpuFeatures::PrintTarget();
- CpuFeatures::Probe();
CpuFeatures::PrintFeatures();
-#endif // V8_TARGET_ARCH_ARM
printf("Usage:\n");
printf(" shell [options] -e string\n");
@@ -566,7 +537,7 @@ void FlagList::PrintHelp() {
Flag* f = &flags[i];
SmartArrayPointer<const char> value = ToString(f);
printf(" --%s (%s)\n type: %s default: %s\n",
- f->name(), f->comment(), Type2String(f->type()), *value);
+ f->name(), f->comment(), Type2String(f->type()), value.get());
}
}
@@ -574,7 +545,7 @@ void FlagList::PrintHelp() {
// static
void FlagList::EnforceFlagImplications() {
#define FLAG_MODE_DEFINE_IMPLICATIONS
-#include "flag-definitions.h"
+#include "src/flag-definitions.h"
#undef FLAG_MODE_DEFINE_IMPLICATIONS
}
diff --git a/chromium/v8/src/flags.h b/chromium/v8/src/flags.h
index fe182e5221c..092de21f468 100644
--- a/chromium/v8/src/flags.h
+++ b/chromium/v8/src/flags.h
@@ -1,41 +1,18 @@
// Copyright 2006-2008 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
#ifndef V8_FLAGS_H_
#define V8_FLAGS_H_
-#include "atomicops.h"
+#include "src/globals.h"
namespace v8 {
namespace internal {
// Declare all of our flags.
#define FLAG_MODE_DECLARE
-#include "flag-definitions.h"
+#include "src/flag-definitions.h"
// The global list of all flags.
class FlagList {
@@ -63,7 +40,9 @@ class FlagList {
// --flag=value (non-bool flags only, no spaces around '=')
// --flag value (non-bool flags only)
// -- (equivalent to --js_arguments, captures all remaining args)
- static int SetFlagsFromCommandLine(int* argc, char** argv, bool remove_flags);
+ static int SetFlagsFromCommandLine(int* argc,
+ char** argv,
+ bool remove_flags);
// Set the flag values by parsing the string str. Splits string into argc
// substrings argv[], each of which consisting of non-white-space chars,
diff --git a/chromium/v8/src/frames-inl.h b/chromium/v8/src/frames-inl.h
index 2b15bfffab7..02e7fb44da6 100644
--- a/chromium/v8/src/frames-inl.h
+++ b/chromium/v8/src/frames-inl.h
@@ -1,45 +1,26 @@
// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
#ifndef V8_FRAMES_INL_H_
#define V8_FRAMES_INL_H_
-#include "frames.h"
-#include "isolate.h"
-#include "v8memory.h"
+#include "src/frames.h"
+#include "src/isolate.h"
+#include "src/v8memory.h"
#if V8_TARGET_ARCH_IA32
-#include "ia32/frames-ia32.h"
+#include "src/ia32/frames-ia32.h"
#elif V8_TARGET_ARCH_X64
-#include "x64/frames-x64.h"
+#include "src/x64/frames-x64.h"
+#elif V8_TARGET_ARCH_ARM64
+#include "src/arm64/frames-arm64.h"
#elif V8_TARGET_ARCH_ARM
-#include "arm/frames-arm.h"
+#include "src/arm/frames-arm.h"
#elif V8_TARGET_ARCH_MIPS
-#include "mips/frames-mips.h"
+#include "src/mips/frames-mips.h"
+#elif V8_TARGET_ARCH_X87
+#include "src/x87/frames-x87.h"
#else
#error Unsupported target architecture.
#endif
@@ -199,6 +180,11 @@ inline Address StandardFrame::ComputePCAddress(Address fp) {
}
+inline Address StandardFrame::ComputeConstantPoolAddress(Address fp) {
+ return fp + StandardFrameConstants::kConstantPoolOffset;
+}
+
+
inline bool StandardFrame::IsArgumentsAdaptorFrame(Address fp) {
Object* marker =
Memory::Object_at(fp + StandardFrameConstants::kContextOffset);
diff --git a/chromium/v8/src/frames.cc b/chromium/v8/src/frames.cc
index 9549c2db653..7e0079bcf62 100644
--- a/chromium/v8/src/frames.cc
+++ b/chromium/v8/src/frames.cc
@@ -1,42 +1,18 @@
// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#include "ast.h"
-#include "deoptimizer.h"
-#include "frames-inl.h"
-#include "full-codegen.h"
-#include "lazy-instance.h"
-#include "mark-compact.h"
-#include "safepoint-table.h"
-#include "scopeinfo.h"
-#include "string-stream.h"
-#include "vm-state-inl.h"
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#include "src/ast.h"
+#include "src/deoptimizer.h"
+#include "src/frames-inl.h"
+#include "src/full-codegen.h"
+#include "src/mark-compact.h"
+#include "src/safepoint-table.h"
+#include "src/scopeinfo.h"
+#include "src/string-stream.h"
+#include "src/vm-state-inl.h"
namespace v8 {
namespace internal {
@@ -473,7 +449,7 @@ StackFrame::Type StackFrame::GetCallerState(State* state) const {
Address StackFrame::UnpaddedFP() const {
-#if V8_TARGET_ARCH_IA32
+#if V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X87
if (!is_optimized()) return fp();
int32_t alignment_state = Memory::int32_at(
fp() + JavaScriptFrameConstants::kDynamicAlignmentStateOffset);
@@ -531,6 +507,10 @@ void ExitFrame::ComputeCallerState(State* state) const {
state->fp = Memory::Address_at(fp() + ExitFrameConstants::kCallerFPOffset);
state->pc_address = ResolveReturnAddressLocation(
reinterpret_cast<Address*>(fp() + ExitFrameConstants::kCallerPCOffset));
+ if (FLAG_enable_ool_constant_pool) {
+ state->constant_pool_address = reinterpret_cast<Address*>(
+ fp() + ExitFrameConstants::kConstantPoolOffset);
+ }
}
@@ -544,6 +524,9 @@ void ExitFrame::Iterate(ObjectVisitor* v) const {
// the calling frame.
IteratePc(v, pc_address(), LookupCode());
v->VisitPointer(&code_slot());
+ if (FLAG_enable_ool_constant_pool) {
+ v->VisitPointer(&constant_pool_slot());
+ }
}
@@ -571,6 +554,8 @@ void ExitFrame::FillState(Address fp, Address sp, State* state) {
state->fp = fp;
state->pc_address = ResolveReturnAddressLocation(
reinterpret_cast<Address*>(sp - 1 * kPCOnStackSize));
+ state->constant_pool_address =
+ reinterpret_cast<Address*>(fp + ExitFrameConstants::kConstantPoolOffset);
}
@@ -607,6 +592,8 @@ void StandardFrame::ComputeCallerState(State* state) const {
state->fp = caller_fp();
state->pc_address = ResolveReturnAddressLocation(
reinterpret_cast<Address*>(ComputePCAddress(fp())));
+ state->constant_pool_address =
+ reinterpret_cast<Address*>(ComputeConstantPoolAddress(fp()));
}
@@ -652,7 +639,7 @@ void StandardFrame::IterateCompiledFrame(ObjectVisitor* v) const {
// Skip saved double registers.
if (safepoint_entry.has_doubles()) {
// Number of doubles not known at snapshot time.
- ASSERT(!Serializer::enabled());
+ ASSERT(!isolate()->serializer_enabled());
parameters_base += DoubleRegister::NumAllocatableRegisters() *
kDoubleSize / kPointerSize;
}
@@ -795,7 +782,6 @@ void JavaScriptFrame::PrintTop(Isolate* isolate,
bool print_args,
bool print_line_number) {
// constructor calls
- HandleScope scope(isolate);
DisallowHeapAllocation no_allocation;
JavaScriptFrameIterator it(isolate);
while (!it.done()) {
@@ -816,15 +802,15 @@ void JavaScriptFrame::PrintTop(Isolate* isolate,
int source_pos = code->SourcePosition(pc);
Object* maybe_script = shared->script();
if (maybe_script->IsScript()) {
- Handle<Script> script(Script::cast(maybe_script));
- int line = GetScriptLineNumberSafe(script, source_pos) + 1;
+ Script* script = Script::cast(maybe_script);
+ int line = script->GetLineNumber(source_pos) + 1;
Object* script_name_raw = script->name();
if (script_name_raw->IsString()) {
String* script_name = String::cast(script->name());
SmartArrayPointer<char> c_script_name =
script_name->ToCString(DISALLOW_NULLS,
ROBUST_STRING_TRAVERSAL);
- PrintF(file, " at %s:%d", *c_script_name, line);
+ PrintF(file, " at %s:%d", c_script_name.get(), line);
} else {
PrintF(file, " at <unknown>:%d", line);
}
@@ -980,13 +966,10 @@ void OptimizedFrame::Summarize(List<FrameSummary>* frames) {
it.Next(); // Skip height.
// The translation commands are ordered and the receiver is always
- // at the first position. Since we are always at a call when we need
- // to construct a stack trace, the receiver is always in a stack slot.
+ // at the first position.
+ // If we are at a call, the receiver is always in a stack slot.
+ // Otherwise we are not guaranteed to get the receiver value.
opcode = static_cast<Translation::Opcode>(it.Next());
- ASSERT(opcode == Translation::STACK_SLOT ||
- opcode == Translation::LITERAL ||
- opcode == Translation::CAPTURED_OBJECT ||
- opcode == Translation::DUPLICATED_OBJECT);
int index = it.Next();
// Get the correct receiver in the optimized frame.
@@ -1010,6 +993,7 @@ void OptimizedFrame::Summarize(List<FrameSummary>* frames) {
: this->GetParameter(parameter_index);
}
} else {
+ // The receiver is not in a stack slot nor in a literal. We give up.
// TODO(3029): Materializing a captured object (or duplicated
// object) is hard, we return undefined for now. This breaks the
// produced stack trace, as constructor frames aren't marked as
@@ -1160,7 +1144,7 @@ void StackFrame::PrintIndex(StringStream* accumulator,
void JavaScriptFrame::Print(StringStream* accumulator,
PrintMode mode,
int index) const {
- HandleScope scope(isolate());
+ DisallowHeapAllocation no_gc;
Object* receiver = this->receiver();
JSFunction* function = this->function();
@@ -1174,13 +1158,11 @@ void JavaScriptFrame::Print(StringStream* accumulator,
// doesn't contain scope info, scope_info will return 0 for the number of
// parameters, stack local variables, context local variables, stack slots,
// or context slots.
- Handle<ScopeInfo> scope_info(ScopeInfo::Empty(isolate()));
-
- Handle<SharedFunctionInfo> shared(function->shared());
- scope_info = Handle<ScopeInfo>(shared->scope_info());
+ SharedFunctionInfo* shared = function->shared();
+ ScopeInfo* scope_info = shared->scope_info();
Object* script_obj = shared->script();
if (script_obj->IsScript()) {
- Handle<Script> script(Script::cast(script_obj));
+ Script* script = Script::cast(script_obj);
accumulator->Add(" [");
accumulator->PrintName(script->name());
@@ -1188,11 +1170,11 @@ void JavaScriptFrame::Print(StringStream* accumulator,
if (code != NULL && code->kind() == Code::FUNCTION &&
pc >= code->instruction_start() && pc < code->instruction_end()) {
int source_pos = code->SourcePosition(pc);
- int line = GetScriptLineNumberSafe(script, source_pos) + 1;
+ int line = script->GetLineNumber(source_pos) + 1;
accumulator->Add(":%d", line);
} else {
int function_start_pos = shared->start_position();
- int line = GetScriptLineNumberSafe(script, function_start_pos) + 1;
+ int line = script->GetLineNumber(function_start_pos) + 1;
accumulator->Add(":~%d", line);
}
@@ -1252,6 +1234,10 @@ void JavaScriptFrame::Print(StringStream* accumulator,
if (this->context() != NULL && this->context()->IsContext()) {
context = Context::cast(this->context());
}
+ while (context->IsWithContext()) {
+ context = context->previous();
+ ASSERT(context != NULL);
+ }
// Print heap-allocated local variables.
if (heap_locals_count > 0) {
@@ -1262,8 +1248,9 @@ void JavaScriptFrame::Print(StringStream* accumulator,
accumulator->PrintName(scope_info->ContextLocalName(i));
accumulator->Add(" = ");
if (context != NULL) {
- if (i < context->length()) {
- accumulator->Add("%o", context->get(Context::MIN_CONTEXT_SLOTS + i));
+ int index = Context::MIN_CONTEXT_SLOTS + i;
+ if (index < context->length()) {
+ accumulator->Add("%o", context->get(index));
} else {
accumulator->Add(
"// warning: missing context slot - inconsistent frame?");
@@ -1343,7 +1330,7 @@ void EntryFrame::Iterate(ObjectVisitor* v) const {
void StandardFrame::IterateExpressions(ObjectVisitor* v) const {
- const int offset = StandardFrameConstants::kContextOffset;
+ const int offset = StandardFrameConstants::kLastObjectOffset;
Object** base = &Memory::Object_at(sp());
Object** limit = &Memory::Object_at(fp() + offset) + 1;
for (StackHandlerIterator it(this, top_handler()); !it.done(); it.Advance()) {
@@ -1381,7 +1368,7 @@ void StubFailureTrampolineFrame::Iterate(ObjectVisitor* v) const {
kFirstRegisterParameterFrameOffset);
v->VisitPointers(base, limit);
base = &Memory::Object_at(fp() + StandardFrameConstants::kMarkerOffset);
- const int offset = StandardFrameConstants::kContextOffset;
+ const int offset = StandardFrameConstants::kLastObjectOffset;
limit = &Memory::Object_at(fp() + offset) + 1;
v->VisitPointers(base, limit);
IteratePc(v, pc_address(), LookupCode());
@@ -1395,19 +1382,14 @@ Address StubFailureTrampolineFrame::GetCallerStackPointer() const {
Code* StubFailureTrampolineFrame::unchecked_code() const {
Code* trampoline;
- StubFailureTrampolineStub(NOT_JS_FUNCTION_STUB_MODE).
- FindCodeInCache(&trampoline, isolate());
- if (trampoline->contains(pc())) {
- return trampoline;
- }
-
- StubFailureTrampolineStub(JS_FUNCTION_STUB_MODE).
- FindCodeInCache(&trampoline, isolate());
+ StubFailureTrampolineStub(isolate(), NOT_JS_FUNCTION_STUB_MODE).
+ FindCodeInCache(&trampoline);
if (trampoline->contains(pc())) {
return trampoline;
}
- StubFailureTailCallTrampolineStub().FindCodeInCache(&trampoline, isolate());
+ StubFailureTrampolineStub(isolate(), JS_FUNCTION_STUB_MODE).
+ FindCodeInCache(&trampoline);
if (trampoline->contains(pc())) {
return trampoline;
}
diff --git a/chromium/v8/src/frames.h b/chromium/v8/src/frames.h
index 230144d6800..e80e3392ddd 100644
--- a/chromium/v8/src/frames.h
+++ b/chromium/v8/src/frames.h
@@ -1,41 +1,22 @@
// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
#ifndef V8_FRAMES_H_
#define V8_FRAMES_H_
-#include "allocation.h"
-#include "handles.h"
-#include "safepoint-table.h"
+#include "src/allocation.h"
+#include "src/handles.h"
+#include "src/safepoint-table.h"
namespace v8 {
namespace internal {
+#if V8_TARGET_ARCH_ARM64
+typedef uint64_t RegList;
+#else
typedef uint32_t RegList;
+#endif
// Get the number of registers in a given register list.
int NumRegs(RegList list);
@@ -143,6 +124,7 @@ class StackHandler BASE_EMBEDDED {
inline Kind kind() const;
inline unsigned index() const;
+ inline Object** constant_pool_address() const;
inline Object** context_address() const;
inline Object** code_address() const;
inline void SetFp(Address slot, Address fp);
@@ -167,18 +149,25 @@ class StackHandler BASE_EMBEDDED {
class StandardFrameConstants : public AllStatic {
public:
// Fixed part of the frame consists of return address, caller fp,
- // context and function.
- // StandardFrame::IterateExpressions assumes that kContextOffset is the last
- // object pointer.
- static const int kFixedFrameSizeFromFp = 2 * kPointerSize;
+ // constant pool (if FLAG_enable_ool_constant_pool), context, and function.
+ // StandardFrame::IterateExpressions assumes that kLastObjectOffset is the
+ // last object pointer.
+ static const int kCPSlotSize =
+ FLAG_enable_ool_constant_pool ? kPointerSize : 0;
+ static const int kFixedFrameSizeFromFp = 2 * kPointerSize + kCPSlotSize;
static const int kFixedFrameSize = kPCOnStackSize + kFPOnStackSize +
kFixedFrameSizeFromFp;
- static const int kExpressionsOffset = -3 * kPointerSize;
- static const int kMarkerOffset = -2 * kPointerSize;
- static const int kContextOffset = -1 * kPointerSize;
+ static const int kExpressionsOffset = -3 * kPointerSize - kCPSlotSize;
+ static const int kMarkerOffset = -2 * kPointerSize - kCPSlotSize;
+ static const int kContextOffset = -1 * kPointerSize - kCPSlotSize;
+ static const int kConstantPoolOffset = FLAG_enable_ool_constant_pool ?
+ -1 * kPointerSize : 0;
static const int kCallerFPOffset = 0 * kPointerSize;
static const int kCallerPCOffset = +1 * kFPOnStackSize;
static const int kCallerSPOffset = kCallerPCOffset + 1 * kPCOnStackSize;
+
+ static const int kLastObjectOffset = FLAG_enable_ool_constant_pool ?
+ kConstantPoolOffset : kContextOffset;
};
@@ -213,10 +202,12 @@ class StackFrame BASE_EMBEDDED {
};
struct State {
- State() : sp(NULL), fp(NULL), pc_address(NULL) { }
+ State() : sp(NULL), fp(NULL), pc_address(NULL),
+ constant_pool_address(NULL) { }
Address sp;
Address fp;
Address* pc_address;
+ Address* constant_pool_address;
};
// Copy constructor; it breaks the connection to host iterator
@@ -258,6 +249,11 @@ class StackFrame BASE_EMBEDDED {
Address pc() const { return *pc_address(); }
void set_pc(Address pc) { *pc_address() = pc; }
+ Address constant_pool() const { return *constant_pool_address(); }
+ void set_constant_pool(ConstantPoolArray* constant_pool) {
+ *constant_pool_address() = reinterpret_cast<Address>(constant_pool);
+ }
+
virtual void SetCallerFp(Address caller_fp) = 0;
// Manually changes value of fp in this object.
@@ -265,6 +261,10 @@ class StackFrame BASE_EMBEDDED {
Address* pc_address() const { return state_.pc_address; }
+ Address* constant_pool_address() const {
+ return state_.constant_pool_address;
+ }
+
// Get the id of this stack frame.
Id id() const { return static_cast<Id>(OffsetFrom(caller_sp())); }
@@ -419,6 +419,7 @@ class ExitFrame: public StackFrame {
virtual Code* unchecked_code() const;
Object*& code_slot() const;
+ Object*& constant_pool_slot() const;
// Garbage collection support.
virtual void Iterate(ObjectVisitor* v) const;
@@ -483,6 +484,10 @@ class StandardFrame: public StackFrame {
// by the provided frame pointer.
static inline Address ComputePCAddress(Address fp);
+ // Computes the address of the constant pool field in the standard
+ // frame given by the provided frame pointer.
+ static inline Address ComputeConstantPoolAddress(Address fp);
+
// Iterate over expression stack including stack handlers, locals,
// and parts of the fixed part including context and code fields.
void IterateExpressions(ObjectVisitor* v) const;
@@ -602,6 +607,7 @@ class JavaScriptFrame: public StandardFrame {
// Architecture-specific register description.
static Register fp_register();
static Register context_register();
+ static Register constant_pool_pointer_register();
static JavaScriptFrame* cast(StackFrame* frame) {
ASSERT(frame->is_java_script());
@@ -758,6 +764,7 @@ class StubFailureTrampolineFrame: public StandardFrame {
// Architecture-specific register description.
static Register fp_register();
static Register context_register();
+ static Register constant_pool_pointer_register();
protected:
inline explicit StubFailureTrampolineFrame(
@@ -923,13 +930,6 @@ class StackFrameLocator BASE_EMBEDDED {
};
-// Used specify the type of prologue to generate.
-enum PrologueFrameMode {
- BUILD_FUNCTION_FRAME,
- BUILD_STUB_FRAME
-};
-
-
// Reads all frames on the current stack and copies them into the current
// zone memory.
Vector<StackFrame*> CreateStackMap(Isolate* isolate, Zone* zone);
diff --git a/chromium/v8/src/full-codegen.cc b/chromium/v8/src/full-codegen.cc
index 483d1e378d4..0c82eb3d82b 100644
--- a/chromium/v8/src/full-codegen.cc
+++ b/chromium/v8/src/full-codegen.cc
@@ -1,43 +1,20 @@
// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#include "codegen.h"
-#include "compiler.h"
-#include "debug.h"
-#include "full-codegen.h"
-#include "liveedit.h"
-#include "macro-assembler.h"
-#include "prettyprinter.h"
-#include "scopes.h"
-#include "scopeinfo.h"
-#include "snapshot.h"
-#include "stub-cache.h"
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#include "src/codegen.h"
+#include "src/compiler.h"
+#include "src/debug.h"
+#include "src/full-codegen.h"
+#include "src/liveedit.h"
+#include "src/macro-assembler.h"
+#include "src/prettyprinter.h"
+#include "src/scopes.h"
+#include "src/scopeinfo.h"
+#include "src/snapshot.h"
+#include "src/stub-cache.h"
namespace v8 {
namespace internal {
@@ -312,6 +289,10 @@ void BreakableStatementChecker::VisitThisFunction(ThisFunction* expr) {
bool FullCodeGenerator::MakeCode(CompilationInfo* info) {
Isolate* isolate = info->isolate();
+
+ Logger::TimerEventScope timer(
+ isolate, Logger::TimerEventScope::v8_compile_full_code);
+
Handle<Script> script = info->script();
if (!script->IsUndefined() && !script->source()->IsUndefined()) {
int len = String::cast(script->source())->length();
@@ -341,12 +322,9 @@ bool FullCodeGenerator::MakeCode(CompilationInfo* info) {
info->function()->scope()->AllowsLazyCompilation());
cgen.PopulateDeoptimizationData(code);
cgen.PopulateTypeFeedbackInfo(code);
- cgen.PopulateTypeFeedbackCells(code);
code->set_has_deoptimization_support(info->HasDeoptimizationSupport());
code->set_handler_table(*cgen.handler_table());
-#ifdef ENABLE_DEBUGGER_SUPPORT
code->set_compiled_optimizable(info->IsOptimizable());
-#endif // ENABLE_DEBUGGER_SUPPORT
code->set_allow_osr_at_loop_nesting_level(0);
code->set_profiler_ticks(0);
code->set_back_edge_table_offset(table_offset);
@@ -383,13 +361,23 @@ unsigned FullCodeGenerator::EmitBackEdgeTable() {
}
+void FullCodeGenerator::EnsureSlotContainsAllocationSite(int slot) {
+ Handle<FixedArray> vector = FeedbackVector();
+ if (!vector->get(slot)->IsAllocationSite()) {
+ Handle<AllocationSite> allocation_site =
+ isolate()->factory()->NewAllocationSite();
+ vector->set(slot, *allocation_site);
+ }
+}
+
+
void FullCodeGenerator::PopulateDeoptimizationData(Handle<Code> code) {
// Fill in the deoptimization information.
ASSERT(info_->HasDeoptimizationSupport() || bailout_entries_.is_empty());
if (!info_->HasDeoptimizationSupport()) return;
int length = bailout_entries_.length();
- Handle<DeoptimizationOutputData> data = isolate()->factory()->
- NewDeoptimizationOutputData(length, TENURED);
+ Handle<DeoptimizationOutputData> data =
+ DeoptimizationOutputData::New(isolate(), length, TENURED);
for (int i = 0; i < length; i++) {
data->SetAstId(i, bailout_entries_[i].id);
data->SetPcAndState(i, Smi::FromInt(bailout_entries_[i].pc_and_state));
@@ -407,38 +395,36 @@ void FullCodeGenerator::PopulateTypeFeedbackInfo(Handle<Code> code) {
void FullCodeGenerator::Initialize() {
+ InitializeAstVisitor(info_->zone());
// The generation of debug code must match between the snapshot code and the
// code that is generated later. This is assumed by the debugger when it is
// calculating PC offsets after generating a debug version of code. Therefore
// we disable the production of debug code in the full compiler if we are
// either generating a snapshot or we booted from a snapshot.
generate_debug_code_ = FLAG_debug_code &&
- !Serializer::enabled() &&
+ !masm_->serializer_enabled() &&
!Snapshot::HaveASnapshotToStartFrom();
masm_->set_emit_debug_code(generate_debug_code_);
masm_->set_predictable_code_size(true);
- InitializeAstVisitor(info_->isolate());
}
-void FullCodeGenerator::PopulateTypeFeedbackCells(Handle<Code> code) {
- if (type_feedback_cells_.is_empty()) return;
- int length = type_feedback_cells_.length();
- int array_size = TypeFeedbackCells::LengthOfFixedArray(length);
- Handle<TypeFeedbackCells> cache = Handle<TypeFeedbackCells>::cast(
- isolate()->factory()->NewFixedArray(array_size, TENURED));
- for (int i = 0; i < length; i++) {
- cache->SetAstId(i, type_feedback_cells_[i].ast_id);
- cache->SetCell(i, *type_feedback_cells_[i].cell);
- }
- TypeFeedbackInfo::cast(code->type_feedback_info())->set_type_feedback_cells(
- *cache);
+void FullCodeGenerator::PrepareForBailout(Expression* node, State state) {
+ PrepareForBailoutForId(node->id(), state);
}
+void FullCodeGenerator::CallLoadIC(ContextualMode contextual_mode,
+ TypeFeedbackId id) {
+ ExtraICState extra_state = LoadIC::ComputeExtraICState(contextual_mode);
+ Handle<Code> ic = LoadIC::initialize_stub(isolate(), extra_state);
+ CallIC(ic, id);
+}
+
-void FullCodeGenerator::PrepareForBailout(Expression* node, State state) {
- PrepareForBailoutForId(node->id(), state);
+void FullCodeGenerator::CallStoreIC(TypeFeedbackId id) {
+ Handle<Code> ic = StoreIC::initialize_stub(isolate(), strict_mode());
+ CallIC(ic, id);
}
@@ -466,20 +452,16 @@ void FullCodeGenerator::PrepareForBailoutForId(BailoutId id, State state) {
unsigned pc_and_state =
StateField::encode(state) | PcField::encode(masm_->pc_offset());
ASSERT(Smi::IsValid(pc_and_state));
+#ifdef DEBUG
+ for (int i = 0; i < bailout_entries_.length(); ++i) {
+ ASSERT(bailout_entries_[i].id != id);
+ }
+#endif
BailoutEntry entry = { id, pc_and_state };
- ASSERT(!prepared_bailout_ids_.Contains(id.ToInt()));
- prepared_bailout_ids_.Add(id.ToInt(), zone());
bailout_entries_.Add(entry, zone());
}
-void FullCodeGenerator::RecordTypeFeedbackCell(
- TypeFeedbackId id, Handle<Cell> cell) {
- TypeFeedbackCellEntry entry = { id, cell };
- type_feedback_cells_.Add(entry, zone());
-}
-
-
void FullCodeGenerator::RecordBackEdge(BailoutId ast_id) {
// The pc offset does not need to be encoded and packed together with a state.
ASSERT(masm_->pc_offset() > 0);
@@ -617,7 +599,7 @@ void FullCodeGenerator::AllocateModules(ZoneList<Declaration*>* declarations) {
ASSERT(scope->interface()->Index() >= 0);
__ Push(Smi::FromInt(scope->interface()->Index()));
__ Push(scope->GetScopeInfo());
- __ CallRuntime(Runtime::kPushModuleContext, 2);
+ __ CallRuntime(Runtime::kHiddenPushModuleContext, 2);
StoreToFrameField(StandardFrameConstants::kContextOffset,
context_register());
@@ -757,7 +739,7 @@ void FullCodeGenerator::VisitModuleLiteral(ModuleLiteral* module) {
ASSERT(interface->Index() >= 0);
__ Push(Smi::FromInt(interface->Index()));
__ Push(Smi::FromInt(0));
- __ CallRuntime(Runtime::kPushModuleContext, 2);
+ __ CallRuntime(Runtime::kHiddenPushModuleContext, 2);
StoreToFrameField(StandardFrameConstants::kContextOffset, context_register());
{
@@ -808,10 +790,10 @@ void FullCodeGenerator::VisitModuleUrl(ModuleUrl* module) {
int FullCodeGenerator::DeclareGlobalsFlags() {
- ASSERT(DeclareGlobalsLanguageMode::is_valid(language_mode()));
+ ASSERT(DeclareGlobalsStrictMode::is_valid(strict_mode()));
return DeclareGlobalsEvalFlag::encode(is_eval()) |
DeclareGlobalsNativeFlag::encode(is_native()) |
- DeclareGlobalsLanguageMode::encode(language_mode());
+ DeclareGlobalsStrictMode::encode(strict_mode());
}
@@ -826,13 +808,12 @@ void FullCodeGenerator::SetReturnPosition(FunctionLiteral* fun) {
void FullCodeGenerator::SetStatementPosition(Statement* stmt) {
-#ifdef ENABLE_DEBUGGER_SUPPORT
- if (!isolate()->debugger()->IsDebuggerActive()) {
+ if (!info_->is_debug()) {
CodeGenerator::RecordPositions(masm_, stmt->position());
} else {
// Check if the statement will be breakable without adding a debug break
// slot.
- BreakableStatementChecker checker(isolate());
+ BreakableStatementChecker checker(zone());
checker.Check(stmt);
// Record the statement position right here if the statement is not
// breakable. For breakable statements the actual recording of the
@@ -842,23 +823,19 @@ void FullCodeGenerator::SetStatementPosition(Statement* stmt) {
// If the position recording did record a new position generate a debug
// break slot to make the statement breakable.
if (position_recorded) {
- Debug::GenerateSlot(masm_);
+ DebugCodegen::GenerateSlot(masm_);
}
}
-#else
- CodeGenerator::RecordPositions(masm_, stmt->position());
-#endif
}
void FullCodeGenerator::SetExpressionPosition(Expression* expr) {
-#ifdef ENABLE_DEBUGGER_SUPPORT
- if (!isolate()->debugger()->IsDebuggerActive()) {
+ if (!info_->is_debug()) {
CodeGenerator::RecordPositions(masm_, expr->position());
} else {
// Check if the expression will be breakable without adding a debug break
// slot.
- BreakableStatementChecker checker(isolate());
+ BreakableStatementChecker checker(zone());
checker.Check(expr);
// Record a statement position right here if the expression is not
// breakable. For breakable expressions the actual recording of the
@@ -872,12 +849,9 @@ void FullCodeGenerator::SetExpressionPosition(Expression* expr) {
// If the position recording did record a new position generate a debug
// break slot to make the statement breakable.
if (position_recorded) {
- Debug::GenerateSlot(masm_);
+ DebugCodegen::GenerateSlot(masm_);
}
}
-#else
- CodeGenerator::RecordPositions(masm_, pos);
-#endif
}
@@ -901,7 +875,6 @@ void FullCodeGenerator::SetSourcePosition(int pos) {
const FullCodeGenerator::InlineFunctionGenerator
FullCodeGenerator::kInlineFunctionGenerators[] = {
INLINE_FUNCTION_LIST(INLINE_FUNCTION_GENERATOR_ADDRESS)
- INLINE_RUNTIME_FUNCTION_LIST(INLINE_FUNCTION_GENERATOR_ADDRESS)
};
#undef INLINE_FUNCTION_GENERATOR_ADDRESS
@@ -1079,35 +1052,30 @@ void FullCodeGenerator::VisitBlock(Block* stmt) {
Scope* saved_scope = scope();
// Push a block context when entering a block with block scoped variables.
- if (stmt->scope() != NULL) {
+ if (stmt->scope() == NULL) {
+ PrepareForBailoutForId(stmt->EntryId(), NO_REGISTERS);
+ } else {
scope_ = stmt->scope();
ASSERT(!scope_->is_module_scope());
{ Comment cmnt(masm_, "[ Extend block context");
- Handle<ScopeInfo> scope_info = scope_->GetScopeInfo();
- int heap_slots = scope_info->ContextLength() - Context::MIN_CONTEXT_SLOTS;
- __ Push(scope_info);
+ __ Push(scope_->GetScopeInfo());
PushFunctionArgumentForContextAllocation();
- if (heap_slots <= FastNewBlockContextStub::kMaximumSlots) {
- FastNewBlockContextStub stub(heap_slots);
- __ CallStub(&stub);
- } else {
- __ CallRuntime(Runtime::kPushBlockContext, 2);
- }
+ __ CallRuntime(Runtime::kHiddenPushBlockContext, 2);
// Replace the context stored in the frame.
StoreToFrameField(StandardFrameConstants::kContextOffset,
context_register());
+ PrepareForBailoutForId(stmt->EntryId(), NO_REGISTERS);
}
{ Comment cmnt(masm_, "[ Declarations");
VisitDeclarations(scope_->declarations());
+ PrepareForBailoutForId(stmt->DeclsId(), NO_REGISTERS);
}
}
- PrepareForBailoutForId(stmt->EntryId(), NO_REGISTERS);
VisitStatements(stmt->statements());
scope_ = saved_scope;
__ bind(nested_block.break_label());
- PrepareForBailoutForId(stmt->ExitId(), NO_REGISTERS);
// Pop block context if necessary.
if (stmt->scope() != NULL) {
@@ -1116,6 +1084,7 @@ void FullCodeGenerator::VisitBlock(Block* stmt) {
StoreToFrameField(StandardFrameConstants::kContextOffset,
context_register());
}
+ PrepareForBailoutForId(stmt->ExitId(), NO_REGISTERS);
}
@@ -1124,7 +1093,7 @@ void FullCodeGenerator::VisitModuleStatement(ModuleStatement* stmt) {
__ Push(Smi::FromInt(stmt->proxy()->interface()->Index()));
__ Push(Smi::FromInt(0));
- __ CallRuntime(Runtime::kPushModuleContext, 2);
+ __ CallRuntime(Runtime::kHiddenPushModuleContext, 2);
StoreToFrameField(
StandardFrameConstants::kContextOffset, context_register());
@@ -1263,7 +1232,7 @@ void FullCodeGenerator::VisitWithStatement(WithStatement* stmt) {
VisitForStackValue(stmt->expression());
PushFunctionArgumentForContextAllocation();
- __ CallRuntime(Runtime::kPushWithContext, 2);
+ __ CallRuntime(Runtime::kHiddenPushWithContext, 2);
StoreToFrameField(StandardFrameConstants::kContextOffset, context_register());
Scope* saved_scope = scope();
@@ -1416,7 +1385,7 @@ void FullCodeGenerator::VisitTryCatchStatement(TryCatchStatement* stmt) {
__ Push(stmt->variable()->name());
__ Push(result_register());
PushFunctionArgumentForContextAllocation();
- __ CallRuntime(Runtime::kPushCatchContext, 3);
+ __ CallRuntime(Runtime::kHiddenPushCatchContext, 3);
StoreToFrameField(StandardFrameConstants::kContextOffset,
context_register());
}
@@ -1480,7 +1449,7 @@ void FullCodeGenerator::VisitTryFinallyStatement(TryFinallyStatement* stmt) {
// rethrow the exception if it returns.
__ Call(&finally_entry);
__ Push(result_register());
- __ CallRuntime(Runtime::kReThrow, 1);
+ __ CallRuntime(Runtime::kHiddenReThrow, 1);
// Finally block implementation.
__ bind(&finally_entry);
@@ -1507,13 +1476,11 @@ void FullCodeGenerator::VisitTryFinallyStatement(TryFinallyStatement* stmt) {
void FullCodeGenerator::VisitDebuggerStatement(DebuggerStatement* stmt) {
-#ifdef ENABLE_DEBUGGER_SUPPORT
Comment cmnt(masm_, "[ DebuggerStatement");
SetStatementPosition(stmt);
__ DebugBreak();
// Ignore the return value.
-#endif
}
@@ -1590,8 +1557,10 @@ void FullCodeGenerator::VisitNativeFunctionLiteral(
Handle<Code> construct_stub = Handle<Code>(fun->shared()->construct_stub());
bool is_generator = false;
Handle<SharedFunctionInfo> shared =
- isolate()->factory()->NewSharedFunctionInfo(name, literals, is_generator,
- code, Handle<ScopeInfo>(fun->shared()->scope_info()));
+ isolate()->factory()->NewSharedFunctionInfo(
+ name, literals, is_generator,
+ code, Handle<ScopeInfo>(fun->shared()->scope_info()),
+ Handle<FixedArray>(fun->shared()->feedback_vector()));
shared->set_construct_stub(*construct_stub);
// Copy the function data to the shared function info.
@@ -1606,7 +1575,7 @@ void FullCodeGenerator::VisitNativeFunctionLiteral(
void FullCodeGenerator::VisitThrow(Throw* expr) {
Comment cmnt(masm_, "[ Throw");
VisitForStackValue(expr->exception());
- __ CallRuntime(Runtime::kThrow, 1);
+ __ CallRuntime(Runtime::kHiddenThrow, 1);
// Never returns here.
}
@@ -1644,8 +1613,7 @@ bool FullCodeGenerator::TryLiteralCompare(CompareOperation* expr) {
}
-void BackEdgeTable::Patch(Isolate* isolate,
- Code* unoptimized) {
+void BackEdgeTable::Patch(Isolate* isolate, Code* unoptimized) {
DisallowHeapAllocation no_gc;
Code* patch = isolate->builtins()->builtin(Builtins::kOnStackReplacement);
@@ -1668,8 +1636,7 @@ void BackEdgeTable::Patch(Isolate* isolate,
}
-void BackEdgeTable::Revert(Isolate* isolate,
- Code* unoptimized) {
+void BackEdgeTable::Revert(Isolate* isolate, Code* unoptimized) {
DisallowHeapAllocation no_gc;
Code* patch = isolate->builtins()->builtin(Builtins::kInterruptCheck);
@@ -1694,25 +1661,23 @@ void BackEdgeTable::Revert(Isolate* isolate,
}
-void BackEdgeTable::AddStackCheck(CompilationInfo* info) {
+void BackEdgeTable::AddStackCheck(Handle<Code> code, uint32_t pc_offset) {
DisallowHeapAllocation no_gc;
- Isolate* isolate = info->isolate();
- Code* code = info->shared_info()->code();
- Address pc = code->instruction_start() + info->osr_pc_offset();
- ASSERT_EQ(ON_STACK_REPLACEMENT, GetBackEdgeState(isolate, code, pc));
+ Isolate* isolate = code->GetIsolate();
+ Address pc = code->instruction_start() + pc_offset;
Code* patch = isolate->builtins()->builtin(Builtins::kOsrAfterStackCheck);
- PatchAt(code, pc, OSR_AFTER_STACK_CHECK, patch);
+ PatchAt(*code, pc, OSR_AFTER_STACK_CHECK, patch);
}
-void BackEdgeTable::RemoveStackCheck(CompilationInfo* info) {
+void BackEdgeTable::RemoveStackCheck(Handle<Code> code, uint32_t pc_offset) {
DisallowHeapAllocation no_gc;
- Isolate* isolate = info->isolate();
- Code* code = info->shared_info()->code();
- Address pc = code->instruction_start() + info->osr_pc_offset();
- if (GetBackEdgeState(isolate, code, pc) == OSR_AFTER_STACK_CHECK) {
+ Isolate* isolate = code->GetIsolate();
+ Address pc = code->instruction_start() + pc_offset;
+
+ if (OSR_AFTER_STACK_CHECK == GetBackEdgeState(isolate, *code, pc)) {
Code* patch = isolate->builtins()->builtin(Builtins::kOnStackReplacement);
- PatchAt(code, pc, ON_STACK_REPLACEMENT, patch);
+ PatchAt(*code, pc, ON_STACK_REPLACEMENT, patch);
}
}
diff --git a/chromium/v8/src/full-codegen.h b/chromium/v8/src/full-codegen.h
index 11d5341ecab..bd6aa13c830 100644
--- a/chromium/v8/src/full-codegen.h
+++ b/chromium/v8/src/full-codegen.h
@@ -1,44 +1,21 @@
// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
#ifndef V8_FULL_CODEGEN_H_
#define V8_FULL_CODEGEN_H_
-#include "v8.h"
+#include "src/v8.h"
-#include "allocation.h"
-#include "assert-scope.h"
-#include "ast.h"
-#include "code-stubs.h"
-#include "codegen.h"
-#include "compiler.h"
-#include "data-flow.h"
-#include "globals.h"
-#include "objects.h"
+#include "src/allocation.h"
+#include "src/assert-scope.h"
+#include "src/ast.h"
+#include "src/code-stubs.h"
+#include "src/codegen.h"
+#include "src/compiler.h"
+#include "src/data-flow.h"
+#include "src/globals.h"
+#include "src/objects.h"
namespace v8 {
namespace internal {
@@ -52,8 +29,8 @@ class JumpPatchSite;
// debugger to piggybag on.
class BreakableStatementChecker: public AstVisitor {
public:
- explicit BreakableStatementChecker(Isolate* isolate) : is_breakable_(false) {
- InitializeAstVisitor(isolate);
+ explicit BreakableStatementChecker(Zone* zone) : is_breakable_(false) {
+ InitializeAstVisitor(zone);
}
void Check(Statement* stmt);
@@ -96,11 +73,8 @@ class FullCodeGenerator: public AstVisitor {
? info->function()->ast_node_count() : 0,
info->zone()),
back_edges_(2, info->zone()),
- type_feedback_cells_(info->HasDeoptimizationSupport()
- ? info->function()->ast_node_count() : 0,
- info->zone()),
- ic_total_count_(0),
- zone_(info->zone()) {
+ ic_total_count_(0) {
+ ASSERT(!info->IsStub());
Initialize();
}
@@ -122,19 +96,25 @@ class FullCodeGenerator: public AstVisitor {
return NULL;
}
- Zone* zone() const { return zone_; }
-
static const int kMaxBackEdgeWeight = 127;
// Platform-specific code size multiplier.
-#if V8_TARGET_ARCH_IA32
- static const int kCodeSizeMultiplier = 100;
+#if V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X87
+ static const int kCodeSizeMultiplier = 105;
+ static const int kBootCodeSizeMultiplier = 100;
#elif V8_TARGET_ARCH_X64
- static const int kCodeSizeMultiplier = 162;
+ static const int kCodeSizeMultiplier = 170;
+ static const int kBootCodeSizeMultiplier = 140;
#elif V8_TARGET_ARCH_ARM
- static const int kCodeSizeMultiplier = 142;
+ static const int kCodeSizeMultiplier = 149;
+ static const int kBootCodeSizeMultiplier = 110;
+#elif V8_TARGET_ARCH_ARM64
+// TODO(all): Copied ARM value. Check this is sensible for ARM64.
+ static const int kCodeSizeMultiplier = 149;
+ static const int kBootCodeSizeMultiplier = 110;
#elif V8_TARGET_ARCH_MIPS
- static const int kCodeSizeMultiplier = 142;
+ static const int kCodeSizeMultiplier = 149;
+ static const int kBootCodeSizeMultiplier = 120;
#else
#error Unsupported target architecture.
#endif
@@ -237,7 +217,7 @@ class FullCodeGenerator: public AstVisitor {
++(*context_length);
}
return previous_;
- };
+ }
};
// The try block of a try/catch statement.
@@ -437,9 +417,12 @@ class FullCodeGenerator: public AstVisitor {
void PrepareForBailout(Expression* node, State state);
void PrepareForBailoutForId(BailoutId id, State state);
- // Cache cell support. This associates AST ids with global property cells
- // that will be cleared during GC and collected by the type-feedback oracle.
- void RecordTypeFeedbackCell(TypeFeedbackId id, Handle<Cell> cell);
+ // Feedback slot support. The feedback vector will be cleared during gc and
+ // collected by the type-feedback oracle.
+ Handle<FixedArray> FeedbackVector() {
+ return info_->feedback_vector();
+ }
+ void EnsureSlotContainsAllocationSite(int slot);
// Record a call's return site offset, used to rebuild the frame if the
// called function was inlined at the site.
@@ -482,9 +465,9 @@ class FullCodeGenerator: public AstVisitor {
void EmitReturnSequence();
// Platform-specific code sequences for calls
- void EmitCallWithStub(Call* expr, CallFunctionFlags flags);
- void EmitCallWithIC(Call* expr, Handle<Object> name, RelocInfo::Mode mode);
- void EmitKeyedCallWithIC(Call* expr, Expression* key);
+ void EmitCall(Call* expr, CallIC::CallType = CallIC::FUNCTION);
+ void EmitCallWithLoadIC(Call* expr);
+ void EmitKeyedCallWithLoadIC(Call* expr, Expression* key);
// Platform-specific code for inline runtime calls.
InlineFunctionGenerator FindInlineFunctionGenerator(Runtime::FunctionId id);
@@ -494,7 +477,6 @@ class FullCodeGenerator: public AstVisitor {
#define EMIT_INLINE_RUNTIME_CALL(name, x, y) \
void Emit##name(CallRuntime* expr);
INLINE_FUNCTION_LIST(EMIT_INLINE_RUNTIME_CALL)
- INLINE_RUNTIME_FUNCTION_LIST(EMIT_INLINE_RUNTIME_CALL)
#undef EMIT_INLINE_RUNTIME_CALL
// Platform-specific code for resuming generators.
@@ -555,6 +537,11 @@ class FullCodeGenerator: public AstVisitor {
void EmitVariableAssignment(Variable* var,
Token::Value op);
+ // Helper functions to EmitVariableAssignment
+ void EmitStoreToStackLocalOrContextSlot(Variable* var,
+ MemOperand location);
+ void EmitCallStoreContextSlot(Handle<String> name, StrictMode strict_mode);
+
// Complete a named property assignment. The receiver is expected on top
// of the stack and the right-hand-side value in the accumulator.
void EmitNamedPropertyAssignment(Assignment* expr);
@@ -565,9 +552,12 @@ class FullCodeGenerator: public AstVisitor {
void EmitKeyedPropertyAssignment(Assignment* expr);
void CallIC(Handle<Code> code,
- RelocInfo::Mode rmode = RelocInfo::CODE_TARGET,
TypeFeedbackId id = TypeFeedbackId::None());
+ void CallLoadIC(ContextualMode mode,
+ TypeFeedbackId id = TypeFeedbackId::None());
+ void CallStoreIC(TypeFeedbackId id = TypeFeedbackId::None());
+
void SetFunctionPosition(FunctionLiteral* fun);
void SetReturnPosition(FunctionLiteral* fun);
void SetStatementPosition(Statement* stmt);
@@ -596,8 +586,7 @@ class FullCodeGenerator: public AstVisitor {
Handle<Script> script() { return info_->script(); }
bool is_eval() { return info_->is_eval(); }
bool is_native() { return info_->is_native(); }
- bool is_classic_mode() { return language_mode() == CLASSIC_MODE; }
- LanguageMode language_mode() { return function()->language_mode(); }
+ StrictMode strict_mode() { return function()->strict_mode(); }
FunctionLiteral* function() { return info_->function(); }
Scope* scope() { return scope_; }
@@ -630,7 +619,6 @@ class FullCodeGenerator: public AstVisitor {
void Generate();
void PopulateDeoptimizationData(Handle<Code> code);
void PopulateTypeFeedbackInfo(Handle<Code> code);
- void PopulateTypeFeedbackCells(Handle<Code> code);
Handle<FixedArray> handler_table() { return handler_table_; }
@@ -645,12 +633,6 @@ class FullCodeGenerator: public AstVisitor {
uint32_t loop_depth;
};
- struct TypeFeedbackCellEntry {
- TypeFeedbackId ast_id;
- Handle<Cell> cell;
- };
-
-
class ExpressionContext BASE_EMBEDDED {
public:
explicit ExpressionContext(FullCodeGenerator* codegen)
@@ -838,14 +820,11 @@ class FullCodeGenerator: public AstVisitor {
int module_index_;
const ExpressionContext* context_;
ZoneList<BailoutEntry> bailout_entries_;
- GrowableBitVector prepared_bailout_ids_;
ZoneList<BackEdgeEntry> back_edges_;
- ZoneList<TypeFeedbackCellEntry> type_feedback_cells_;
int ic_total_count_;
Handle<FixedArray> handler_table_;
Handle<Cell> profiling_counter_;
bool generate_debug_code_;
- Zone* zone_;
friend class NestedStatement;
@@ -928,10 +907,10 @@ class BackEdgeTable {
// Change a back edge patched for on-stack replacement to perform a
// stack check first.
- static void AddStackCheck(CompilationInfo* info);
+ static void AddStackCheck(Handle<Code> code, uint32_t pc_offset);
- // Remove the stack check, if available, and replace by on-stack replacement.
- static void RemoveStackCheck(CompilationInfo* info);
+ // Revert the patch by AddStackCheck.
+ static void RemoveStackCheck(Handle<Code> code, uint32_t pc_offset);
// Return the current patch state of the back edge.
static BackEdgeState GetBackEdgeState(Isolate* isolate,
diff --git a/chromium/v8/src/func-name-inferrer.cc b/chromium/v8/src/func-name-inferrer.cc
index 5409a4e1800..a3c2f08ae7a 100644
--- a/chromium/v8/src/func-name-inferrer.cc
+++ b/chromium/v8/src/func-name-inferrer.cc
@@ -1,35 +1,12 @@
// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#include "ast.h"
-#include "func-name-inferrer.h"
-#include "list-inl.h"
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#include "src/ast.h"
+#include "src/func-name-inferrer.h"
+#include "src/list-inl.h"
namespace v8 {
namespace internal {
@@ -55,14 +32,16 @@ void FuncNameInferrer::PushEnclosingName(Handle<String> name) {
void FuncNameInferrer::PushLiteralName(Handle<String> name) {
- if (IsOpen() && !isolate()->heap()->prototype_string()->Equals(*name)) {
+ if (IsOpen() &&
+ !String::Equals(isolate()->factory()->prototype_string(), name)) {
names_stack_.Add(Name(name, kLiteralName), zone());
}
}
void FuncNameInferrer::PushVariableName(Handle<String> name) {
- if (IsOpen() && !isolate()->heap()->dot_result_string()->Equals(*name)) {
+ if (IsOpen() &&
+ !String::Equals(isolate()->factory()->dot_result_string(), name)) {
names_stack_.Add(Name(name, kVariableName), zone());
}
}
@@ -83,11 +62,13 @@ Handle<String> FuncNameInferrer::MakeNameFromStackHelper(int pos,
return MakeNameFromStackHelper(pos + 1, prev);
} else {
if (prev->length() > 0) {
+ Handle<String> name = names_stack_.at(pos).name;
+ if (prev->length() + name->length() + 1 > String::kMaxLength) return prev;
Factory* factory = isolate()->factory();
- Handle<String> curr = factory->NewConsString(
- factory->dot_string(), names_stack_.at(pos).name);
- return MakeNameFromStackHelper(pos + 1,
- factory->NewConsString(prev, curr));
+ Handle<String> curr =
+ factory->NewConsString(factory->dot_string(), name).ToHandleChecked();
+ curr = factory->NewConsString(prev, curr).ToHandleChecked();
+ return MakeNameFromStackHelper(pos + 1, curr);
} else {
return MakeNameFromStackHelper(pos + 1, names_stack_.at(pos).name);
}
diff --git a/chromium/v8/src/func-name-inferrer.h b/chromium/v8/src/func-name-inferrer.h
index f57e7786045..0c5399c7bf1 100644
--- a/chromium/v8/src/func-name-inferrer.h
+++ b/chromium/v8/src/func-name-inferrer.h
@@ -1,36 +1,17 @@
// Copyright 2006-2009 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
#ifndef V8_FUNC_NAME_INFERRER_H_
#define V8_FUNC_NAME_INFERRER_H_
+#include "src/handles.h"
+#include "src/zone.h"
+
namespace v8 {
namespace internal {
+class FunctionLiteral;
class Isolate;
// FuncNameInferrer is a stateful class that is used to perform name
diff --git a/chromium/v8/src/gdb-jit.cc b/chromium/v8/src/gdb-jit.cc
index 21cfd223349..789a0fd5a39 100644
--- a/chromium/v8/src/gdb-jit.cc
+++ b/chromium/v8/src/gdb-jit.cc
@@ -1,43 +1,20 @@
// Copyright 2010 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
#ifdef ENABLE_GDB_JIT_INTERFACE
-#include "v8.h"
-#include "gdb-jit.h"
-
-#include "bootstrapper.h"
-#include "compiler.h"
-#include "frames.h"
-#include "frames-inl.h"
-#include "global-handles.h"
-#include "messages.h"
-#include "natives.h"
-#include "platform.h"
-#include "scopes.h"
+#include "src/v8.h"
+#include "src/gdb-jit.h"
+
+#include "src/bootstrapper.h"
+#include "src/compiler.h"
+#include "src/frames.h"
+#include "src/frames-inl.h"
+#include "src/global-handles.h"
+#include "src/messages.h"
+#include "src/natives.h"
+#include "src/platform.h"
+#include "src/scopes.h"
namespace v8 {
namespace internal {
@@ -217,7 +194,7 @@ class DebugSectionBase : public ZoneObject {
struct MachOSectionHeader {
char sectname[16];
char segname[16];
-#if V8_TARGET_ARCH_IA32
+#if V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X87
uint32_t addr;
uint32_t size;
#else
@@ -252,8 +229,8 @@ class MachOSection : public DebugSectionBase<MachOSectionHeader> {
segment_(segment),
align_(align),
flags_(flags) {
- ASSERT(IsPowerOf2(align));
if (align_ != 0) {
+ ASSERT(IsPowerOf2(align));
align_ = WhichPowerOf2(align_);
}
}
@@ -534,7 +511,7 @@ class MachO BASE_EMBEDDED {
uint32_t cmd;
uint32_t cmdsize;
char segname[16];
-#if V8_TARGET_ARCH_IA32
+#if V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X87
uint32_t vmaddr;
uint32_t vmsize;
uint32_t fileoff;
@@ -560,7 +537,7 @@ class MachO BASE_EMBEDDED {
Writer::Slot<MachOHeader> WriteHeader(Writer* w) {
ASSERT(w->position() == 0);
Writer::Slot<MachOHeader> header = w->CreateSlotHere<MachOHeader>();
-#if V8_TARGET_ARCH_IA32
+#if V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X87
header->magic = 0xFEEDFACEu;
header->cputype = 7; // i386
header->cpusubtype = 3; // CPU_SUBTYPE_I386_ALL
@@ -585,7 +562,7 @@ class MachO BASE_EMBEDDED {
uintptr_t code_size) {
Writer::Slot<MachOSegmentCommand> cmd =
w->CreateSlotHere<MachOSegmentCommand>();
-#if V8_TARGET_ARCH_IA32
+#if V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X87
cmd->cmd = LC_SEGMENT_32;
#else
cmd->cmd = LC_SEGMENT_64;
@@ -672,7 +649,7 @@ class ELF BASE_EMBEDDED {
void WriteHeader(Writer* w) {
ASSERT(w->position() == 0);
Writer::Slot<ELFHeader> header = w->CreateSlotHere<ELFHeader>();
-#if V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_ARM
+#if V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_X87
const uint8_t ident[16] =
{ 0x7f, 'E', 'L', 'F', 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0};
#elif V8_TARGET_ARCH_X64
@@ -681,9 +658,9 @@ class ELF BASE_EMBEDDED {
#else
#error Unsupported target architecture.
#endif
- OS::MemCopy(header->ident, ident, 16);
+ memcpy(header->ident, ident, 16);
header->type = 1;
-#if V8_TARGET_ARCH_IA32
+#if V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X87
header->machine = 3;
#elif V8_TARGET_ARCH_X64
// Processor identification value for x64 is 62 as defined in
@@ -785,7 +762,7 @@ class ELFSymbol BASE_EMBEDDED {
Binding binding() const {
return static_cast<Binding>(info >> 4);
}
-#if V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_ARM
+#if V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_X87
struct SerializedLayout {
SerializedLayout(uint32_t name,
uintptr_t value,
@@ -1002,7 +979,7 @@ class CodeDescription BASE_EMBEDDED {
}
int GetScriptLineNumber(int pos) {
- return GetScriptLineNumberSafe(script_, pos) + 1;
+ return script_->GetLineNumber(pos) + 1;
}
@@ -1089,7 +1066,7 @@ class DebugInfoSection : public DebugSection {
w->Write<uint8_t>(sizeof(intptr_t));
w->WriteULEB128(1); // Abbreviation code.
- w->WriteString(*desc_->GetFilename());
+ w->WriteString(desc_->GetFilename().get());
w->Write<intptr_t>(desc_->CodeStart());
w->Write<intptr_t>(desc_->CodeStart() + desc_->CodeSize());
w->Write<uint32_t>(0);
@@ -1107,7 +1084,7 @@ class DebugInfoSection : public DebugSection {
w->Write<intptr_t>(desc_->CodeStart() + desc_->CodeSize());
Writer::Slot<uint32_t> fb_block_size = w->CreateSlotHere<uint32_t>();
uintptr_t fb_block_start = w->position();
-#if V8_TARGET_ARCH_IA32
+#if V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X87
w->Write<uint8_t>(DW_OP_reg5); // The frame pointer's here on ia32
#elif V8_TARGET_ARCH_X64
w->Write<uint8_t>(DW_OP_reg6); // and here on x64.
@@ -1131,7 +1108,7 @@ class DebugInfoSection : public DebugSection {
for (int param = 0; param < params; ++param) {
w->WriteULEB128(current_abbreviation++);
w->WriteString(
- *scope->parameter(param)->name()->ToCString(DISALLOW_NULLS));
+ scope->parameter(param)->name()->ToCString(DISALLOW_NULLS).get());
w->Write<uint32_t>(ty_offset);
Writer::Slot<uint32_t> block_size = w->CreateSlotHere<uint32_t>();
uintptr_t block_start = w->position();
@@ -1182,7 +1159,7 @@ class DebugInfoSection : public DebugSection {
for (int local = 0; local < locals; ++local) {
w->WriteULEB128(current_abbreviation++);
w->WriteString(
- *stack_locals[local]->name()->ToCString(DISALLOW_NULLS));
+ stack_locals[local]->name()->ToCString(DISALLOW_NULLS).get());
w->Write<uint32_t>(ty_offset);
Writer::Slot<uint32_t> block_size = w->CreateSlotHere<uint32_t>();
uintptr_t block_start = w->position();
@@ -1455,7 +1432,7 @@ class DebugLineSection : public DebugSection {
w->Write<uint8_t>(1); // DW_LNS_SET_COLUMN operands count.
w->Write<uint8_t>(0); // DW_LNS_NEGATE_STMT operands count.
w->Write<uint8_t>(0); // Empty include_directories sequence.
- w->WriteString(*desc_->GetFilename()); // File name.
+ w->WriteString(desc_->GetFilename().get()); // File name.
w->WriteULEB128(0); // Current directory.
w->WriteULEB128(0); // Unknown modification time.
w->WriteULEB128(0); // Unknown file size.
@@ -1841,7 +1818,7 @@ extern "C" {
JITDescriptor __jit_debug_descriptor = { 1, 0, 0, 0 };
#ifdef OBJECT_PRINT
- void __gdb_print_v8_object(MaybeObject* object) {
+ void __gdb_print_v8_object(Object* object) {
object->Print();
PrintF(stdout, "\n");
}
@@ -1856,7 +1833,7 @@ static JITCodeEntry* CreateCodeEntry(Address symfile_addr,
entry->symfile_addr_ = reinterpret_cast<Address>(entry + 1);
entry->symfile_size_ = symfile_size;
- OS::MemCopy(entry->symfile_addr_, symfile_addr, symfile_size);
+ MemCopy(entry->symfile_addr_, symfile_addr, symfile_size);
entry->prev_ = entry->next_ = NULL;
@@ -1880,12 +1857,12 @@ static void RegisterCodeEntry(JITCodeEntry* entry,
static const char* kObjFileExt = ".o";
char file_name[64];
- OS::SNPrintF(Vector<char>(file_name, kMaxFileNameSize),
- "%s%s%d%s",
- kElfFilePrefix,
- (name_hint != NULL) ? name_hint : "",
- file_num++,
- kObjFileExt);
+ SNPrintF(Vector<char>(file_name, kMaxFileNameSize),
+ "%s%s%d%s",
+ kElfFilePrefix,
+ (name_hint != NULL) ? name_hint : "",
+ file_num++,
+ kObjFileExt);
WriteBytes(file_name, entry->symfile_addr_, entry->symfile_size_);
}
#endif
@@ -2003,13 +1980,13 @@ void GDBJITInterface::AddCode(Handle<Name> name,
CompilationInfo* info) {
if (!FLAG_gdbjit) return;
- // Force initialization of line_ends array.
- GetScriptLineNumber(script, 0);
+ Script::InitLineEnds(script);
if (!name.is_null() && name->IsString()) {
SmartArrayPointer<char> name_cstring =
Handle<String>::cast(name)->ToCString(DISALLOW_NULLS);
- AddCode(*name_cstring, *code, GDBJITInterface::FUNCTION, *script, info);
+ AddCode(name_cstring.get(), *code, GDBJITInterface::FUNCTION, *script,
+ info);
} else {
AddCode("", *code, GDBJITInterface::FUNCTION, *script, info);
}
@@ -2132,7 +2109,7 @@ void GDBJITInterface::AddCode(GDBJITInterface::CodeTag tag,
Code* code) {
if (!FLAG_gdbjit) return;
if (name != NULL && name->IsString()) {
- AddCode(tag, *String::cast(name)->ToCString(DISALLOW_NULLS), code);
+ AddCode(tag, String::cast(name)->ToCString(DISALLOW_NULLS).get(), code);
} else {
AddCode(tag, "", code);
}
diff --git a/chromium/v8/src/gdb-jit.h b/chromium/v8/src/gdb-jit.h
index a34d3d3012e..d8828566c8e 100644
--- a/chromium/v8/src/gdb-jit.h
+++ b/chromium/v8/src/gdb-jit.h
@@ -1,34 +1,11 @@
// Copyright 2010 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
#ifndef V8_GDB_JIT_H_
#define V8_GDB_JIT_H_
-#include "allocation.h"
+#include "src/allocation.h"
//
// Basic implementation of GDB JIT Interface client.
@@ -37,8 +14,8 @@
//
#ifdef ENABLE_GDB_JIT_INTERFACE
-#include "v8.h"
-#include "factory.h"
+#include "src/v8.h"
+#include "src/factory.h"
namespace v8 {
namespace internal {
@@ -50,12 +27,6 @@ class CompilationInfo;
V(KEYED_LOAD_IC) \
V(STORE_IC) \
V(KEYED_STORE_IC) \
- V(CALL_IC) \
- V(CALL_INITIALIZE) \
- V(CALL_PRE_MONOMORPHIC) \
- V(CALL_NORMAL) \
- V(CALL_MEGAMORPHIC) \
- V(CALL_MISS) \
V(STUB) \
V(BUILTIN) \
V(SCRIPT) \
diff --git a/chromium/v8/src/generator.js b/chromium/v8/src/generator.js
index 3c8ea6f3194..a0c2aff67c4 100644
--- a/chromium/v8/src/generator.js
+++ b/chromium/v8/src/generator.js
@@ -1,29 +1,6 @@
// Copyright 2013 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
"use strict";
@@ -55,6 +32,10 @@ function GeneratorObjectThrow(exn) {
return %_GeneratorThrow(this, exn);
}
+function GeneratorObjectIterator() {
+ return this;
+}
+
function GeneratorFunctionPrototypeConstructor(x) {
if (%_IsConstructCall()) {
throw MakeTypeError('not_constructor', ['GeneratorFunctionPrototype']);
@@ -66,7 +47,9 @@ function GeneratorFunctionConstructor(arg1) { // length == 1
var global_receiver = %GlobalReceiver(global);
// Compile the string in the constructor and not a helper so that errors
// appear to come from here.
- var f = %_CallFunction(global_receiver, %CompileString(source, true));
+ var f = %CompileString(source, true);
+ if (!IS_FUNCTION(f)) return f;
+ f = %_CallFunction(global_receiver, f);
%FunctionMarkNameShouldPrintAsAnonymous(f);
return f;
}
@@ -79,6 +62,9 @@ function SetUpGenerators() {
DONT_ENUM | DONT_DELETE | READ_ONLY,
["next", GeneratorObjectNext,
"throw", GeneratorObjectThrow]);
+ %FunctionSetName(GeneratorObjectIterator, '[Symbol.iterator]');
+ %SetProperty(GeneratorObjectPrototype, symbolIterator, GeneratorObjectIterator,
+ DONT_ENUM | DONT_DELETE | READ_ONLY);
%SetProperty(GeneratorObjectPrototype, "constructor",
GeneratorFunctionPrototype, DONT_ENUM | DONT_DELETE | READ_ONLY);
%SetPrototype(GeneratorFunctionPrototype, $Function.prototype);
diff --git a/chromium/v8/src/global-handles.cc b/chromium/v8/src/global-handles.cc
index 2ebe1c0088f..a5ae2d5626f 100644
--- a/chromium/v8/src/global-handles.cc
+++ b/chromium/v8/src/global-handles.cc
@@ -1,36 +1,13 @@
// Copyright 2009 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#include "api.h"
-#include "global-handles.h"
-
-#include "vm-state-inl.h"
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#include "src/api.h"
+#include "src/global-handles.h"
+
+#include "src/vm-state-inl.h"
namespace v8 {
namespace internal {
@@ -169,13 +146,6 @@ class GlobalHandles::Node {
flags_ = IsInNewSpaceList::update(flags_, v);
}
- bool is_revivable_callback() {
- return IsRevivableCallback::decode(flags_);
- }
- void set_revivable_callback(bool v) {
- flags_ = IsRevivableCallback::update(flags_, v);
- }
-
bool IsNearDeath() const {
// Check for PENDING to ensure correct answer when processing callbacks.
return state() == PENDING || state() == NEAR_DEATH;
@@ -234,27 +204,21 @@ class GlobalHandles::Node {
parameter_or_next_free_.next_free = value;
}
- void MakeWeak(void* parameter,
- WeakCallback weak_callback,
- RevivableCallback revivable_callback) {
- ASSERT((weak_callback == NULL) != (revivable_callback == NULL));
+ void MakeWeak(void* parameter, WeakCallback weak_callback) {
+ ASSERT(weak_callback != NULL);
ASSERT(state() != FREE);
+ CHECK(object_ != NULL);
set_state(WEAK);
set_parameter(parameter);
- if (weak_callback != NULL) {
- weak_callback_ = weak_callback;
- set_revivable_callback(false);
- } else {
- weak_callback_ =
- reinterpret_cast<WeakCallback>(revivable_callback);
- set_revivable_callback(true);
- }
+ weak_callback_ = weak_callback;
}
- void ClearWeakness() {
+ void* ClearWeakness() {
ASSERT(state() != FREE);
+ void* p = parameter();
set_state(NORMAL);
set_parameter(NULL);
+ return p;
}
bool PostGarbageCollectionProcessing(Isolate* isolate) {
@@ -278,24 +242,16 @@ class GlobalHandles::Node {
// Leaving V8.
VMState<EXTERNAL> state(isolate);
HandleScope handle_scope(isolate);
- if (is_revivable_callback()) {
- RevivableCallback revivable =
- reinterpret_cast<RevivableCallback>(weak_callback_);
- revivable(reinterpret_cast<v8::Isolate*>(isolate),
- reinterpret_cast<Persistent<Value>*>(&object),
- par);
- } else {
- Handle<Object> handle(*object, isolate);
- v8::WeakCallbackData<v8::Value, void> data(
- reinterpret_cast<v8::Isolate*>(isolate),
- v8::Utils::ToLocal(handle),
- par);
- weak_callback_(data);
- }
+ Handle<Object> handle(*object, isolate);
+ v8::WeakCallbackData<v8::Value, void> data(
+ reinterpret_cast<v8::Isolate*>(isolate),
+ v8::Utils::ToLocal(handle),
+ par);
+ weak_callback_(data);
}
// Absence of explicit cleanup or revival of weak handle
// in most of the cases would lead to memory leak.
- ASSERT(state() != NEAR_DEATH);
+ CHECK(state() != NEAR_DEATH);
return true;
}
@@ -325,7 +281,6 @@ class GlobalHandles::Node {
class IsIndependent: public BitField<bool, 4, 1> {};
class IsPartiallyDependent: public BitField<bool, 5, 1> {};
class IsInNewSpaceList: public BitField<bool, 6, 1> {};
- class IsRevivableCallback: public BitField<bool, 7, 1> {};
uint8_t flags_;
@@ -522,15 +477,13 @@ void GlobalHandles::Destroy(Object** location) {
void GlobalHandles::MakeWeak(Object** location,
void* parameter,
- WeakCallback weak_callback,
- RevivableCallback revivable_callback) {
- Node::FromLocation(location)->MakeWeak(
- parameter, weak_callback, revivable_callback);
+ WeakCallback weak_callback) {
+ Node::FromLocation(location)->MakeWeak(parameter, weak_callback);
}
-void GlobalHandles::ClearWeakness(Object** location) {
- Node::FromLocation(location)->ClearWeakness();
+void* GlobalHandles::ClearWeakness(Object** location) {
+ return Node::FromLocation(location)->ClearWeakness();
}
@@ -658,21 +611,21 @@ bool GlobalHandles::IterateObjectGroups(ObjectVisitor* v,
}
-bool GlobalHandles::PostGarbageCollectionProcessing(
+int GlobalHandles::PostGarbageCollectionProcessing(
GarbageCollector collector, GCTracer* tracer) {
// Process weak global handle callbacks. This must be done after the
// GC is completely done, because the callbacks may invoke arbitrary
// API functions.
ASSERT(isolate_->heap()->gc_state() == Heap::NOT_IN_GC);
const int initial_post_gc_processing_count = ++post_gc_processing_count_;
- bool next_gc_likely_to_collect_more = false;
+ int freed_nodes = 0;
if (collector == SCAVENGER) {
for (int i = 0; i < new_space_nodes_.length(); ++i) {
Node* node = new_space_nodes_[i];
ASSERT(node->is_in_new_space_list());
if (!node->IsRetainer()) {
// Free nodes do not have weak callbacks. Do not use them to compute
- // the next_gc_likely_to_collect_more.
+ // the freed_nodes.
continue;
}
// Skip dependent handles. Their weak callbacks might expect to be
@@ -688,29 +641,29 @@ bool GlobalHandles::PostGarbageCollectionProcessing(
// PostGarbageCollection processing. The current node might
// have been deleted in that round, so we need to bail out (or
// restart the processing).
- return next_gc_likely_to_collect_more;
+ return freed_nodes;
}
}
if (!node->IsRetainer()) {
- next_gc_likely_to_collect_more = true;
+ freed_nodes++;
}
}
} else {
for (NodeIterator it(this); !it.done(); it.Advance()) {
if (!it.node()->IsRetainer()) {
// Free nodes do not have weak callbacks. Do not use them to compute
- // the next_gc_likely_to_collect_more.
+ // the freed_nodes.
continue;
}
it.node()->clear_partially_dependent();
if (it.node()->PostGarbageCollectionProcessing(isolate_)) {
if (initial_post_gc_processing_count != post_gc_processing_count_) {
// See the comment above.
- return next_gc_likely_to_collect_more;
+ return freed_nodes;
}
}
if (!it.node()->IsRetainer()) {
- next_gc_likely_to_collect_more = true;
+ freed_nodes++;
}
}
}
@@ -733,7 +686,7 @@ bool GlobalHandles::PostGarbageCollectionProcessing(
}
}
new_space_nodes_.Rewind(last);
- return next_gc_likely_to_collect_more;
+ return freed_nodes;
}
diff --git a/chromium/v8/src/global-handles.h b/chromium/v8/src/global-handles.h
index 4b46aac05d0..2f5afc9345d 100644
--- a/chromium/v8/src/global-handles.h
+++ b/chromium/v8/src/global-handles.h
@@ -1,39 +1,16 @@
// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
#ifndef V8_GLOBAL_HANDLES_H_
#define V8_GLOBAL_HANDLES_H_
-#include "../include/v8.h"
-#include "../include/v8-profiler.h"
+#include "include/v8.h"
+#include "include/v8-profiler.h"
-#include "handles.h"
-#include "list.h"
-#include "v8utils.h"
+#include "src/handles.h"
+#include "src/list.h"
+#include "src/utils.h"
namespace v8 {
namespace internal {
@@ -135,7 +112,6 @@ class GlobalHandles {
static void Destroy(Object** location);
typedef WeakCallbackData<v8::Value, void>::Callback WeakCallback;
- typedef WeakReferenceCallbacks<v8::Value, void>::Revivable RevivableCallback;
// Make the global handle weak and set the callback parameter for the
// handle. When the garbage collector recognizes that only weak global
@@ -145,14 +121,7 @@ class GlobalHandles {
// reason is that Smi::FromInt(0) does not change during garage collection.
static void MakeWeak(Object** location,
void* parameter,
- WeakCallback weak_callback,
- RevivableCallback revivable_callback);
-
- static inline void MakeWeak(Object** location,
- void* parameter,
- RevivableCallback revivable_callback) {
- MakeWeak(location, parameter, NULL, revivable_callback);
- }
+ WeakCallback weak_callback);
void RecordStats(HeapStats* stats);
@@ -169,7 +138,7 @@ class GlobalHandles {
}
// Clear the weakness of a global handle.
- static void ClearWeakness(Object** location);
+ static void* ClearWeakness(Object** location);
// Clear the weakness of a global handle.
static void MarkIndependent(Object** location);
@@ -186,9 +155,9 @@ class GlobalHandles {
static bool IsWeak(Object** location);
// Process pending weak handles.
- // Returns true if next major GC is likely to collect more garbage.
- bool PostGarbageCollectionProcessing(GarbageCollector collector,
- GCTracer* tracer);
+ // Returns the number of freed nodes.
+ int PostGarbageCollectionProcessing(GarbageCollector collector,
+ GCTracer* tracer);
// Iterates over all strong handles.
void IterateStrongRoots(ObjectVisitor* v);
@@ -348,6 +317,7 @@ class EternalHandles {
enum SingletonHandle {
I18N_TEMPLATE_ONE,
I18N_TEMPLATE_TWO,
+ DATE_CACHE_VERSION,
NUMBER_OF_SINGLETON_HANDLES
};
diff --git a/chromium/v8/src/globals.h b/chromium/v8/src/globals.h
index 2f526a83bbe..595ecc37cfe 100644
--- a/chromium/v8/src/globals.h
+++ b/chromium/v8/src/globals.h
@@ -1,34 +1,15 @@
// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
#ifndef V8_GLOBALS_H_
#define V8_GLOBALS_H_
-#include "../include/v8stdint.h"
+#include "include/v8stdint.h"
+
+#include "src/base/build_config.h"
+#include "src/base/macros.h"
+#include "src/checks.h"
// Unfortunately, the INFINITY macro cannot be used with the '-pedantic'
// warning flag and certain versions of GCC due to a bug:
@@ -38,7 +19,7 @@
#if V8_CC_GNU && V8_GNUC_PREREQ(2, 96, 0) && !V8_GNUC_PREREQ(4, 1, 0)
# include <limits> // NOLINT
# define V8_INFINITY std::numeric_limits<double>::infinity()
-#elif V8_CC_MSVC
+#elif V8_LIBC_MSVCRT
# define V8_INFINITY HUGE_VAL
#else
# define V8_INFINITY INFINITY
@@ -47,86 +28,13 @@
namespace v8 {
namespace internal {
-// Processor architecture detection. For more info on what's defined, see:
-// http://msdn.microsoft.com/en-us/library/b0084kay.aspx
-// http://www.agner.org/optimize/calling_conventions.pdf
-// or with gcc, run: "echo | gcc -E -dM -"
-#if defined(_M_X64) || defined(__x86_64__)
-#if defined(__native_client__)
-// For Native Client builds of V8, use V8_TARGET_ARCH_ARM, so that V8
-// generates ARM machine code, together with a portable ARM simulator
-// compiled for the host architecture in question.
-//
-// Since Native Client is ILP-32 on all architectures we use
-// V8_HOST_ARCH_IA32 on both 32- and 64-bit x86.
-#define V8_HOST_ARCH_IA32 1
-#define V8_HOST_ARCH_32_BIT 1
-#define V8_HOST_CAN_READ_UNALIGNED 1
-#else
-#define V8_HOST_ARCH_X64 1
-#define V8_HOST_ARCH_64_BIT 1
-#define V8_HOST_CAN_READ_UNALIGNED 1
-#endif // __native_client__
-#elif defined(_M_IX86) || defined(__i386__)
-#define V8_HOST_ARCH_IA32 1
-#define V8_HOST_ARCH_32_BIT 1
-#define V8_HOST_CAN_READ_UNALIGNED 1
-#elif defined(__ARMEL__)
-#define V8_HOST_ARCH_ARM 1
-#define V8_HOST_ARCH_32_BIT 1
-#elif defined(__MIPSEL__)
-#define V8_HOST_ARCH_MIPS 1
-#define V8_HOST_ARCH_32_BIT 1
-#else
-#error Host architecture was not detected as supported by v8
-#endif
-
-#if defined(__ARM_ARCH_7A__) || \
- defined(__ARM_ARCH_7R__) || \
- defined(__ARM_ARCH_7__)
-# define CAN_USE_ARMV7_INSTRUCTIONS 1
-# ifndef CAN_USE_VFP3_INSTRUCTIONS
-# define CAN_USE_VFP3_INSTRUCTIONS
-# endif
-#endif
-
-
-// Target architecture detection. This may be set externally. If not, detect
-// in the same way as the host architecture, that is, target the native
-// environment as presented by the compiler.
-#if !V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_IA32 && \
- !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_MIPS
-#if defined(_M_X64) || defined(__x86_64__)
-#define V8_TARGET_ARCH_X64 1
-#elif defined(_M_IX86) || defined(__i386__)
-#define V8_TARGET_ARCH_IA32 1
-#elif defined(__ARMEL__)
-#define V8_TARGET_ARCH_ARM 1
-#elif defined(__MIPSEL__)
-#define V8_TARGET_ARCH_MIPS 1
-#else
-#error Target architecture was not detected as supported by v8
-#endif
-#endif
-
-// Check for supported combinations of host and target architectures.
-#if V8_TARGET_ARCH_IA32 && !V8_HOST_ARCH_IA32
-#error Target architecture ia32 is only supported on ia32 host
-#endif
-#if V8_TARGET_ARCH_X64 && !V8_HOST_ARCH_X64
-#error Target architecture x64 is only supported on x64 host
-#endif
-#if (V8_TARGET_ARCH_ARM && !(V8_HOST_ARCH_IA32 || V8_HOST_ARCH_ARM))
-#error Target architecture arm is only supported on arm and ia32 host
-#endif
-#if (V8_TARGET_ARCH_MIPS && !(V8_HOST_ARCH_IA32 || V8_HOST_ARCH_MIPS))
-#error Target architecture mips is only supported on mips and ia32 host
-#endif
-
// Determine whether we are running in a simulated environment.
// Setting USE_SIMULATOR explicitly from the build script will force
// the use of a simulated environment.
#if !defined(USE_SIMULATOR)
+#if (V8_TARGET_ARCH_ARM64 && !V8_HOST_ARCH_ARM64)
+#define USE_SIMULATOR 1
+#endif
#if (V8_TARGET_ARCH_ARM && !V8_HOST_ARCH_ARM)
#define USE_SIMULATOR 1
#endif
@@ -135,18 +43,8 @@ namespace internal {
#endif
#endif
-// Determine architecture endiannes (we only support little-endian).
-#if V8_TARGET_ARCH_IA32
-#define V8_TARGET_LITTLE_ENDIAN 1
-#elif V8_TARGET_ARCH_X64
-#define V8_TARGET_LITTLE_ENDIAN 1
-#elif V8_TARGET_ARCH_ARM
-#define V8_TARGET_LITTLE_ENDIAN 1
-#elif V8_TARGET_ARCH_MIPS
-#define V8_TARGET_LITTLE_ENDIAN 1
-#else
-#error Unknown target architecture endiannes
-#endif
+// Determine whether the architecture uses an out-of-line constant pool.
+#define V8_OOL_CONSTANT_POOL 0
// Support for alternative bool type. This is only enabled if the code is
// compiled with USE_MYBOOL defined. This catches some nasty type bugs.
@@ -203,11 +101,6 @@ typedef byte* Address;
# define V8_PTR_PREFIX ""
#endif
-// The following macro works on both 32 and 64-bit platforms.
-// Usage: instead of writing 0x1234567890123456
-// write V8_2PART_UINT64_C(0x12345678,90123456);
-#define V8_2PART_UINT64_C(a, b) (((static_cast<uint64_t>(a) << 32) + 0x##b##u))
-
#define V8PRIxPTR V8_PTR_PREFIX "x"
#define V8PRIdPTR V8_PTR_PREFIX "d"
#define V8PRIuPTR V8_PTR_PREFIX "u"
@@ -218,10 +111,6 @@ typedef byte* Address;
#define V8PRIxPTR "lx"
#endif
-#if V8_OS_MACOSX || defined(__FreeBSD__) || defined(__OpenBSD__)
-#define USING_BSD_ABI
-#endif
-
// -----------------------------------------------------------------------------
// Constants
@@ -259,12 +148,14 @@ const int kDoubleSizeLog2 = 3;
const int kPointerSizeLog2 = 3;
const intptr_t kIntptrSignBit = V8_INT64_C(0x8000000000000000);
const uintptr_t kUintptrAllBitsSet = V8_UINT64_C(0xFFFFFFFFFFFFFFFF);
-const bool kIs64BitArch = true;
+const bool kRequiresCodeRange = true;
+const size_t kMaximalCodeRangeSize = 512 * MB;
#else
const int kPointerSizeLog2 = 2;
const intptr_t kIntptrSignBit = 0x80000000;
const uintptr_t kUintptrAllBitsSet = 0xFFFFFFFFu;
-const bool kIs64BitArch = false;
+const bool kRequiresCodeRange = false;
+const size_t kMaximalCodeRangeSize = 0 * MB;
#endif
const int kBitsPerByte = 8;
@@ -299,31 +190,6 @@ const int kUC16Size = sizeof(uc16); // NOLINT
#define ROUND_UP(n, sz) (((n) + ((sz) - 1)) & ~((sz) - 1))
-// The expression OFFSET_OF(type, field) computes the byte-offset
-// of the specified field relative to the containing type. This
-// corresponds to 'offsetof' (in stddef.h), except that it doesn't
-// use 0 or NULL, which causes a problem with the compiler warnings
-// we have enabled (which is also why 'offsetof' doesn't seem to work).
-// Here we simply use the non-zero value 4, which seems to work.
-#define OFFSET_OF(type, field) \
- (reinterpret_cast<intptr_t>(&(reinterpret_cast<type*>(4)->field)) - 4)
-
-
-// The expression ARRAY_SIZE(a) is a compile-time constant of type
-// size_t which represents the number of elements of the given
-// array. You should only use ARRAY_SIZE on statically allocated
-// arrays.
-#define ARRAY_SIZE(a) \
- ((sizeof(a) / sizeof(*(a))) / \
- static_cast<size_t>(!(sizeof(a) % sizeof(*(a)))))
-
-
-// The USE(x) template is used to silence C++ compiler warnings
-// issued for (yet) unused variables (typically parameters).
-template <typename T>
-inline void USE(T) { }
-
-
// FUNCTION_ADDR(f) gets the address of a C function f.
#define FUNCTION_ADDR(f) \
(reinterpret_cast<v8::internal::Address>(reinterpret_cast<intptr_t>(f)))
@@ -337,86 +203,562 @@ F FUNCTION_CAST(Address addr) {
}
-// A macro to disallow the evil copy constructor and operator= functions
-// This should be used in the private: declarations for a class
-#define DISALLOW_COPY_AND_ASSIGN(TypeName) \
- TypeName(const TypeName&) V8_DELETE; \
- void operator=(const TypeName&) V8_DELETE
-
-
-// A macro to disallow all the implicit constructors, namely the
-// default constructor, copy constructor and operator= functions.
-//
-// This should be used in the private: declarations for a class
-// that wants to prevent anyone from instantiating it. This is
-// especially useful for classes containing only static methods.
-#define DISALLOW_IMPLICIT_CONSTRUCTORS(TypeName) \
- TypeName() V8_DELETE; \
- DISALLOW_COPY_AND_ASSIGN(TypeName)
+// -----------------------------------------------------------------------------
+// Forward declarations for frequently used classes
+// (sorted alphabetically)
+class FreeStoreAllocationPolicy;
+template <typename T, class P = FreeStoreAllocationPolicy> class List;
-// Newly written code should use V8_INLINE and V8_NOINLINE directly.
-#define INLINE(declarator) V8_INLINE declarator
-#define NO_INLINE(declarator) V8_NOINLINE declarator
+// -----------------------------------------------------------------------------
+// Declarations for use in both the preparser and the rest of V8.
+// The Strict Mode (ECMA-262 5th edition, 4.2.2).
-// Newly written code should use V8_WARN_UNUSED_RESULT.
-#define MUST_USE_RESULT V8_WARN_UNUSED_RESULT
+enum StrictMode { SLOPPY, STRICT };
+
+
+// Mask for the sign bit in a smi.
+const intptr_t kSmiSignMask = kIntptrSignBit;
+
+const int kObjectAlignmentBits = kPointerSizeLog2;
+const intptr_t kObjectAlignment = 1 << kObjectAlignmentBits;
+const intptr_t kObjectAlignmentMask = kObjectAlignment - 1;
+
+// Desired alignment for pointers.
+const intptr_t kPointerAlignment = (1 << kPointerSizeLog2);
+const intptr_t kPointerAlignmentMask = kPointerAlignment - 1;
+
+// Desired alignment for double values.
+const intptr_t kDoubleAlignment = 8;
+const intptr_t kDoubleAlignmentMask = kDoubleAlignment - 1;
+
+// Desired alignment for generated code is 32 bytes (to improve cache line
+// utilization).
+const int kCodeAlignmentBits = 5;
+const intptr_t kCodeAlignment = 1 << kCodeAlignmentBits;
+const intptr_t kCodeAlignmentMask = kCodeAlignment - 1;
+
+// Tag information for Failure.
+// TODO(yangguo): remove this from space owner calculation.
+const int kFailureTag = 3;
+const int kFailureTagSize = 2;
+const intptr_t kFailureTagMask = (1 << kFailureTagSize) - 1;
+
+
+// Zap-value: The value used for zapping dead objects.
+// Should be a recognizable hex value tagged as a failure.
+#ifdef V8_HOST_ARCH_64_BIT
+const Address kZapValue =
+ reinterpret_cast<Address>(V8_UINT64_C(0xdeadbeedbeadbeef));
+const Address kHandleZapValue =
+ reinterpret_cast<Address>(V8_UINT64_C(0x1baddead0baddeaf));
+const Address kGlobalHandleZapValue =
+ reinterpret_cast<Address>(V8_UINT64_C(0x1baffed00baffedf));
+const Address kFromSpaceZapValue =
+ reinterpret_cast<Address>(V8_UINT64_C(0x1beefdad0beefdaf));
+const uint64_t kDebugZapValue = V8_UINT64_C(0xbadbaddbbadbaddb);
+const uint64_t kSlotsZapValue = V8_UINT64_C(0xbeefdeadbeefdeef);
+const uint64_t kFreeListZapValue = 0xfeed1eaffeed1eaf;
+#else
+const Address kZapValue = reinterpret_cast<Address>(0xdeadbeef);
+const Address kHandleZapValue = reinterpret_cast<Address>(0xbaddeaf);
+const Address kGlobalHandleZapValue = reinterpret_cast<Address>(0xbaffedf);
+const Address kFromSpaceZapValue = reinterpret_cast<Address>(0xbeefdaf);
+const uint32_t kSlotsZapValue = 0xbeefdeef;
+const uint32_t kDebugZapValue = 0xbadbaddb;
+const uint32_t kFreeListZapValue = 0xfeed1eaf;
+#endif
+const int kCodeZapValue = 0xbadc0de;
-// Define DISABLE_ASAN macros.
-#if defined(__has_feature)
-#if __has_feature(address_sanitizer)
-#define DISABLE_ASAN __attribute__((no_sanitize_address))
-#endif
-#endif
+// Number of bits to represent the page size for paged spaces. The value of 20
+// gives 1Mb bytes per page.
+const int kPageSizeBits = 20;
+// On Intel architecture, cache line size is 64 bytes.
+// On ARM it may be less (32 bytes), but as far this constant is
+// used for aligning data, it doesn't hurt to align on a greater value.
+#define PROCESSOR_CACHE_LINE_SIZE 64
-#ifndef DISABLE_ASAN
-#define DISABLE_ASAN
-#endif
+// Constants relevant to double precision floating point numbers.
+// If looking only at the top 32 bits, the QNaN mask is bits 19 to 30.
+const uint32_t kQuietNaNHighBitsMask = 0xfff << (51 - 32);
// -----------------------------------------------------------------------------
// Forward declarations for frequently used classes
-// (sorted alphabetically)
-class FreeStoreAllocationPolicy;
-template <typename T, class P = FreeStoreAllocationPolicy> class List;
+class AccessorInfo;
+class Allocation;
+class Arguments;
+class Assembler;
+class Code;
+class CodeGenerator;
+class CodeStub;
+class Context;
+class Debug;
+class Debugger;
+class DebugInfo;
+class Descriptor;
+class DescriptorArray;
+class TransitionArray;
+class ExternalReference;
+class FixedArray;
+class FunctionTemplateInfo;
+class MemoryChunk;
+class SeededNumberDictionary;
+class UnseededNumberDictionary;
+class NameDictionary;
+template <typename T> class MaybeHandle;
+template <typename T> class Handle;
+class Heap;
+class HeapObject;
+class IC;
+class InterceptorInfo;
+class Isolate;
+class JSReceiver;
+class JSArray;
+class JSFunction;
+class JSObject;
+class LargeObjectSpace;
+class LookupResult;
+class MacroAssembler;
+class Map;
+class MapSpace;
+class MarkCompactCollector;
+class NewSpace;
+class Object;
+class OldSpace;
+class Foreign;
+class Scope;
+class ScopeInfo;
+class Script;
+class Smi;
+template <typename Config, class Allocator = FreeStoreAllocationPolicy>
+ class SplayTree;
+class String;
+class Name;
+class Struct;
+class Variable;
+class RelocInfo;
+class Deserializer;
+class MessageLocation;
+class VirtualMemory;
+class Mutex;
+class RecursiveMutex;
+
+typedef bool (*WeakSlotCallback)(Object** pointer);
+
+typedef bool (*WeakSlotCallbackWithHeap)(Heap* heap, Object** pointer);
// -----------------------------------------------------------------------------
-// Declarations for use in both the preparser and the rest of V8.
+// Miscellaneous
+
+// NOTE: SpaceIterator depends on AllocationSpace enumeration values being
+// consecutive.
+enum AllocationSpace {
+ NEW_SPACE, // Semispaces collected with copying collector.
+ OLD_POINTER_SPACE, // May contain pointers to new space.
+ OLD_DATA_SPACE, // Must not have pointers to new space.
+ CODE_SPACE, // No pointers to new space, marked executable.
+ MAP_SPACE, // Only and all map objects.
+ CELL_SPACE, // Only and all cell objects.
+ PROPERTY_CELL_SPACE, // Only and all global property cell objects.
+ LO_SPACE, // Promoted large objects.
+ INVALID_SPACE, // Only used in AllocationResult to signal success.
+
+ FIRST_SPACE = NEW_SPACE,
+ LAST_SPACE = LO_SPACE,
+ FIRST_PAGED_SPACE = OLD_POINTER_SPACE,
+ LAST_PAGED_SPACE = PROPERTY_CELL_SPACE
+};
+const int kSpaceTagSize = 3;
+const int kSpaceTagMask = (1 << kSpaceTagSize) - 1;
-// The different language modes that V8 implements. ES5 defines two language
-// modes: an unrestricted mode respectively a strict mode which are indicated by
-// CLASSIC_MODE respectively STRICT_MODE in the enum. The harmony spec drafts
-// for the next ES standard specify a new third mode which is called 'extended
-// mode'. The extended mode is only available if the harmony flag is set. It is
-// based on the 'strict mode' and adds new functionality to it. This means that
-// most of the semantics of these two modes coincide.
+
+// A flag that indicates whether objects should be pretenured when
+// allocated (allocated directly into the old generation) or not
+// (allocated in the young generation if the object size and type
+// allows).
+enum PretenureFlag { NOT_TENURED, TENURED };
+
+enum MinimumCapacity {
+ USE_DEFAULT_MINIMUM_CAPACITY,
+ USE_CUSTOM_MINIMUM_CAPACITY
+};
+
+enum GarbageCollector { SCAVENGER, MARK_COMPACTOR };
+
+enum Executability { NOT_EXECUTABLE, EXECUTABLE };
+
+enum VisitMode {
+ VISIT_ALL,
+ VISIT_ALL_IN_SCAVENGE,
+ VISIT_ALL_IN_SWEEP_NEWSPACE,
+ VISIT_ONLY_STRONG
+};
+
+// Flag indicating whether code is built into the VM (one of the natives files).
+enum NativesFlag { NOT_NATIVES_CODE, NATIVES_CODE };
+
+
+// A CodeDesc describes a buffer holding instructions and relocation
+// information. The instructions start at the beginning of the buffer
+// and grow forward, the relocation information starts at the end of
+// the buffer and grows backward.
//
-// In the current draft the term 'base code' is used to refer to code that is
-// neither in strict nor extended mode. However, the more distinguishing term
-// 'classic mode' is used in V8 instead to avoid mix-ups.
-
-enum LanguageMode {
- CLASSIC_MODE,
- STRICT_MODE,
- EXTENDED_MODE
+// |<--------------- buffer_size ---------------->|
+// |<-- instr_size -->| |<-- reloc_size -->|
+// +==================+========+==================+
+// | instructions | free | reloc info |
+// +==================+========+==================+
+// ^
+// |
+// buffer
+
+struct CodeDesc {
+ byte* buffer;
+ int buffer_size;
+ int instr_size;
+ int reloc_size;
+ Assembler* origin;
};
-// The Strict Mode (ECMA-262 5th edition, 4.2.2).
+// Callback function used for iterating objects in heap spaces,
+// for example, scanning heap objects.
+typedef int (*HeapObjectCallback)(HeapObject* obj);
+
+
+// Callback function used for checking constraints when copying/relocating
+// objects. Returns true if an object can be copied/relocated from its
+// old_addr to a new_addr.
+typedef bool (*ConstraintCallback)(Address new_addr, Address old_addr);
+
+
+// Callback function on inline caches, used for iterating over inline caches
+// in compiled code.
+typedef void (*InlineCacheCallback)(Code* code, Address ic);
+
+
+// State for inline cache call sites. Aliased as IC::State.
+enum InlineCacheState {
+ // Has never been executed.
+ UNINITIALIZED,
+ // Has been executed but monomorhic state has been delayed.
+ PREMONOMORPHIC,
+ // Has been executed and only one receiver type has been seen.
+ MONOMORPHIC,
+ // Like MONOMORPHIC but check failed due to prototype.
+ MONOMORPHIC_PROTOTYPE_FAILURE,
+ // Multiple receiver types have been seen.
+ POLYMORPHIC,
+ // Many receiver types have been seen.
+ MEGAMORPHIC,
+ // A generic handler is installed and no extra typefeedback is recorded.
+ GENERIC,
+ // Special state for debug break or step in prepare stubs.
+ DEBUG_STUB
+};
+
+
+enum CallFunctionFlags {
+ NO_CALL_FUNCTION_FLAGS,
+ CALL_AS_METHOD,
+ // Always wrap the receiver and call to the JSFunction. Only use this flag
+ // both the receiver type and the target method are statically known.
+ WRAP_AND_CALL
+};
+
+
+enum CallConstructorFlags {
+ NO_CALL_CONSTRUCTOR_FLAGS,
+ // The call target is cached in the instruction stream.
+ RECORD_CONSTRUCTOR_TARGET
+};
+
+
+enum InlineCacheHolderFlag {
+ OWN_MAP, // For fast properties objects.
+ PROTOTYPE_MAP // For slow properties objects (except GlobalObjects).
+};
+
+
+// The Store Buffer (GC).
+typedef enum {
+ kStoreBufferFullEvent,
+ kStoreBufferStartScanningPagesEvent,
+ kStoreBufferScanningPageEvent
+} StoreBufferEvent;
+
+
+typedef void (*StoreBufferCallback)(Heap* heap,
+ MemoryChunk* page,
+ StoreBufferEvent event);
+
+
+// Union used for fast testing of specific double values.
+union DoubleRepresentation {
+ double value;
+ int64_t bits;
+ DoubleRepresentation(double x) { value = x; }
+ bool operator==(const DoubleRepresentation& other) const {
+ return bits == other.bits;
+ }
+};
+
+
+// Union used for customized checking of the IEEE double types
+// inlined within v8 runtime, rather than going to the underlying
+// platform headers and libraries
+union IeeeDoubleLittleEndianArchType {
+ double d;
+ struct {
+ unsigned int man_low :32;
+ unsigned int man_high :20;
+ unsigned int exp :11;
+ unsigned int sign :1;
+ } bits;
+};
+
+
+union IeeeDoubleBigEndianArchType {
+ double d;
+ struct {
+ unsigned int sign :1;
+ unsigned int exp :11;
+ unsigned int man_high :20;
+ unsigned int man_low :32;
+ } bits;
+};
+
+
+// AccessorCallback
+struct AccessorDescriptor {
+ Object* (*getter)(Isolate* isolate, Object* object, void* data);
+ Object* (*setter)(
+ Isolate* isolate, JSObject* object, Object* value, void* data);
+ void* data;
+};
+
+
+// Logging and profiling. A StateTag represents a possible state of
+// the VM. The logger maintains a stack of these. Creating a VMState
+// object enters a state by pushing on the stack, and destroying a
+// VMState object leaves a state by popping the current state from the
+// stack.
+
+enum StateTag {
+ JS,
+ GC,
+ COMPILER,
+ OTHER,
+ EXTERNAL,
+ IDLE
+};
+
+
+// -----------------------------------------------------------------------------
+// Macros
+
+// Testers for test.
+
+#define HAS_SMI_TAG(value) \
+ ((reinterpret_cast<intptr_t>(value) & kSmiTagMask) == kSmiTag)
+
+#define HAS_FAILURE_TAG(value) \
+ ((reinterpret_cast<intptr_t>(value) & kFailureTagMask) == kFailureTag)
+
+// OBJECT_POINTER_ALIGN returns the value aligned as a HeapObject pointer
+#define OBJECT_POINTER_ALIGN(value) \
+ (((value) + kObjectAlignmentMask) & ~kObjectAlignmentMask)
+
+// POINTER_SIZE_ALIGN returns the value aligned as a pointer.
+#define POINTER_SIZE_ALIGN(value) \
+ (((value) + kPointerAlignmentMask) & ~kPointerAlignmentMask)
+
+// CODE_POINTER_ALIGN returns the value aligned as a generated code segment.
+#define CODE_POINTER_ALIGN(value) \
+ (((value) + kCodeAlignmentMask) & ~kCodeAlignmentMask)
+
+// Support for tracking C++ memory allocation. Insert TRACK_MEMORY("Fisk")
+// inside a C++ class and new and delete will be overloaded so logging is
+// performed.
+// This file (globals.h) is included before log.h, so we use direct calls to
+// the Logger rather than the LOG macro.
+#ifdef DEBUG
+#define TRACK_MEMORY(name) \
+ void* operator new(size_t size) { \
+ void* result = ::operator new(size); \
+ Logger::NewEventStatic(name, result, size); \
+ return result; \
+ } \
+ void operator delete(void* object) { \
+ Logger::DeleteEventStatic(name, object); \
+ ::operator delete(object); \
+ }
+#else
+#define TRACK_MEMORY(name)
+#endif
+
+
+// CPU feature flags.
+enum CpuFeature {
+ // x86
+ SSE4_1,
+ SSE3,
+ SAHF,
+ // ARM
+ VFP3,
+ ARMv7,
+ SUDIV,
+ MLS,
+ UNALIGNED_ACCESSES,
+ MOVW_MOVT_IMMEDIATE_LOADS,
+ VFP32DREGS,
+ NEON,
+ // MIPS
+ FPU,
+ // ARM64
+ ALWAYS_ALIGN_CSP,
+ NUMBER_OF_CPU_FEATURES
+};
+
+
+// Used to specify if a macro instruction must perform a smi check on tagged
+// values.
+enum SmiCheckType {
+ DONT_DO_SMI_CHECK,
+ DO_SMI_CHECK
+};
+
+
+enum ScopeType {
+ EVAL_SCOPE, // The top-level scope for an eval source.
+ FUNCTION_SCOPE, // The top-level scope for a function.
+ MODULE_SCOPE, // The scope introduced by a module literal
+ GLOBAL_SCOPE, // The top-level scope for a program or a top-level eval.
+ CATCH_SCOPE, // The scope introduced by catch.
+ BLOCK_SCOPE, // The scope introduced by a new block.
+ WITH_SCOPE // The scope introduced by with.
+};
+
+
+const uint32_t kHoleNanUpper32 = 0x7FFFFFFF;
+const uint32_t kHoleNanLower32 = 0xFFFFFFFF;
+const uint32_t kNaNOrInfinityLowerBoundUpper32 = 0x7FF00000;
+
+const uint64_t kHoleNanInt64 =
+ (static_cast<uint64_t>(kHoleNanUpper32) << 32) | kHoleNanLower32;
+const uint64_t kLastNonNaNInt64 =
+ (static_cast<uint64_t>(kNaNOrInfinityLowerBoundUpper32) << 32);
+
+
+// The order of this enum has to be kept in sync with the predicates below.
+enum VariableMode {
+ // User declared variables:
+ VAR, // declared via 'var', and 'function' declarations
+
+ CONST_LEGACY, // declared via legacy 'const' declarations
+
+ LET, // declared via 'let' declarations (first lexical)
+
+ CONST, // declared via 'const' declarations
+
+ MODULE, // declared via 'module' declaration (last lexical)
+
+ // Variables introduced by the compiler:
+ INTERNAL, // like VAR, but not user-visible (may or may not
+ // be in a context)
+
+ TEMPORARY, // temporary variables (not user-visible), stack-allocated
+ // unless the scope as a whole has forced context allocation
+
+ DYNAMIC, // always require dynamic lookup (we don't know
+ // the declaration)
+
+ DYNAMIC_GLOBAL, // requires dynamic lookup, but we know that the
+ // variable is global unless it has been shadowed
+ // by an eval-introduced variable
+
+ DYNAMIC_LOCAL // requires dynamic lookup, but we know that the
+ // variable is local and where it is unless it
+ // has been shadowed by an eval-introduced
+ // variable
+};
+
+
+inline bool IsDynamicVariableMode(VariableMode mode) {
+ return mode >= DYNAMIC && mode <= DYNAMIC_LOCAL;
+}
+
+
+inline bool IsDeclaredVariableMode(VariableMode mode) {
+ return mode >= VAR && mode <= MODULE;
+}
+
+
+inline bool IsLexicalVariableMode(VariableMode mode) {
+ return mode >= LET && mode <= MODULE;
+}
+
+
+inline bool IsImmutableVariableMode(VariableMode mode) {
+ return (mode >= CONST && mode <= MODULE) || mode == CONST_LEGACY;
+}
+
+
+// ES6 Draft Rev3 10.2 specifies declarative environment records with mutable
+// and immutable bindings that can be in two states: initialized and
+// uninitialized. In ES5 only immutable bindings have these two states. When
+// accessing a binding, it needs to be checked for initialization. However in
+// the following cases the binding is initialized immediately after creation
+// so the initialization check can always be skipped:
+// 1. Var declared local variables.
+// var foo;
+// 2. A local variable introduced by a function declaration.
+// function foo() {}
+// 3. Parameters
+// function x(foo) {}
+// 4. Catch bound variables.
+// try {} catch (foo) {}
+// 6. Function variables of named function expressions.
+// var x = function foo() {}
+// 7. Implicit binding of 'this'.
+// 8. Implicit binding of 'arguments' in functions.
+//
+// ES5 specified object environment records which are introduced by ES elements
+// such as Program and WithStatement that associate identifier bindings with the
+// properties of some object. In the specification only mutable bindings exist
+// (which may be non-writable) and have no distinct initialization step. However
+// V8 allows const declarations in global code with distinct creation and
+// initialization steps which are represented by non-writable properties in the
+// global object. As a result also these bindings need to be checked for
+// initialization.
//
-// This flag is used in the backend to represent the language mode. So far
-// there is no semantic difference between the strict and the extended mode in
-// the backend, so both modes are represented by the kStrictMode value.
-enum StrictModeFlag {
- kNonStrictMode,
- kStrictMode
+// The following enum specifies a flag that indicates if the binding needs a
+// distinct initialization step (kNeedsInitialization) or if the binding is
+// immediately initialized upon creation (kCreatedInitialized).
+enum InitializationFlag {
+ kNeedsInitialization,
+ kCreatedInitialized
};
+enum ClearExceptionFlag {
+ KEEP_EXCEPTION,
+ CLEAR_EXCEPTION
+};
+
+
+enum MinusZeroMode {
+ TREAT_MINUS_ZERO_AS_ZERO,
+ FAIL_ON_MINUS_ZERO
+};
+
} } // namespace v8::internal
+namespace i = v8::internal;
+
#endif // V8_GLOBALS_H_
diff --git a/chromium/v8/src/handles-inl.h b/chromium/v8/src/handles-inl.h
index ec69c3fdbe6..833f9dd3a5a 100644
--- a/chromium/v8/src/handles-inl.h
+++ b/chromium/v8/src/handles-inl.h
@@ -1,79 +1,51 @@
// Copyright 2006-2008 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
//
#ifndef V8_HANDLES_INL_H_
#define V8_HANDLES_INL_H_
-#include "api.h"
-#include "apiutils.h"
-#include "handles.h"
-#include "heap.h"
-#include "isolate.h"
+#include "src/api.h"
+#include "src/handles.h"
+#include "src/heap.h"
+#include "src/isolate.h"
namespace v8 {
namespace internal {
template<typename T>
Handle<T>::Handle(T* obj) {
- ASSERT(!obj->IsFailure());
location_ = HandleScope::CreateHandle(obj->GetIsolate(), obj);
}
template<typename T>
Handle<T>::Handle(T* obj, Isolate* isolate) {
- ASSERT(!obj->IsFailure());
location_ = HandleScope::CreateHandle(isolate, obj);
}
template <typename T>
-inline bool Handle<T>::is_identical_to(const Handle<T> other) const {
- ASSERT(location_ == NULL || !(*location_)->IsFailure());
- if (location_ == other.location_) return true;
- if (location_ == NULL || other.location_ == NULL) return false;
+inline bool Handle<T>::is_identical_to(const Handle<T> o) const {
// Dereferencing deferred handles to check object equality is safe.
- SLOW_ASSERT(IsDereferenceAllowed(NO_DEFERRED_CHECK) &&
- other.IsDereferenceAllowed(NO_DEFERRED_CHECK));
- return *location_ == *other.location_;
+ SLOW_ASSERT(
+ (location_ == NULL || IsDereferenceAllowed(NO_DEFERRED_CHECK)) &&
+ (o.location_ == NULL || o.IsDereferenceAllowed(NO_DEFERRED_CHECK)));
+ if (location_ == o.location_) return true;
+ if (location_ == NULL || o.location_ == NULL) return false;
+ return *location_ == *o.location_;
}
template <typename T>
inline T* Handle<T>::operator*() const {
- ASSERT(location_ != NULL && !(*location_)->IsFailure());
SLOW_ASSERT(IsDereferenceAllowed(INCLUDE_DEFERRED_CHECK));
return *BitCast<T**>(location_);
}
template <typename T>
inline T** Handle<T>::location() const {
- ASSERT(location_ == NULL || !(*location_)->IsFailure());
SLOW_ASSERT(location_ == NULL ||
IsDereferenceAllowed(INCLUDE_DEFERRED_CHECK));
return location_;
@@ -98,7 +70,8 @@ bool Handle<T>::IsDereferenceAllowed(DereferenceCheckMode mode) const {
if (!AllowHandleDereference::IsAllowed()) return false;
if (mode == INCLUDE_DEFERRED_CHECK &&
!AllowDeferredHandleDereference::IsAllowed()) {
- // Accessing maps and internalized strings is safe.
+ // Accessing cells, maps and internalized strings is safe.
+ if (heap_object->IsCell()) return true;
if (heap_object->IsMap()) return true;
if (heap_object->IsInternalizedString()) return true;
return !heap->isolate()->IsDeferredHandle(handle);
@@ -110,8 +83,7 @@ bool Handle<T>::IsDereferenceAllowed(DereferenceCheckMode mode) const {
HandleScope::HandleScope(Isolate* isolate) {
- v8::ImplementationUtilities::HandleScopeData* current =
- isolate->handle_scope_data();
+ HandleScopeData* current = isolate->handle_scope_data();
isolate_ = isolate;
prev_next_ = current->next;
prev_limit_ = current->limit;
@@ -127,8 +99,7 @@ HandleScope::~HandleScope() {
void HandleScope::CloseScope(Isolate* isolate,
Object** prev_next,
Object** prev_limit) {
- v8::ImplementationUtilities::HandleScopeData* current =
- isolate->handle_scope_data();
+ HandleScopeData* current = isolate->handle_scope_data();
std::swap(current->next, prev_next);
current->level--;
@@ -146,8 +117,7 @@ void HandleScope::CloseScope(Isolate* isolate,
template <typename T>
Handle<T> HandleScope::CloseAndEscape(Handle<T> handle_value) {
- v8::ImplementationUtilities::HandleScopeData* current =
- isolate_->handle_scope_data();
+ HandleScopeData* current = isolate_->handle_scope_data();
T* value = *handle_value;
// Throw away all handles in the current scope.
@@ -167,8 +137,7 @@ Handle<T> HandleScope::CloseAndEscape(Handle<T> handle_value) {
template <typename T>
T** HandleScope::CreateHandle(Isolate* isolate, T* value) {
ASSERT(AllowHandleAllocation::IsAllowed());
- v8::ImplementationUtilities::HandleScopeData* current =
- isolate->handle_scope_data();
+ HandleScopeData* current = isolate->handle_scope_data();
internal::Object** cur = current->next;
if (cur == current->limit) cur = Extend(isolate);
@@ -187,8 +156,7 @@ T** HandleScope::CreateHandle(Isolate* isolate, T* value) {
inline SealHandleScope::SealHandleScope(Isolate* isolate) : isolate_(isolate) {
// Make sure the current thread is allowed to create handles to begin with.
CHECK(AllowHandleAllocation::IsAllowed());
- v8::ImplementationUtilities::HandleScopeData* current =
- isolate_->handle_scope_data();
+ HandleScopeData* current = isolate_->handle_scope_data();
// Shrink the current handle scope to make it impossible to do
// handle allocations without an explicit handle scope.
limit_ = current->limit;
@@ -201,8 +169,7 @@ inline SealHandleScope::SealHandleScope(Isolate* isolate) : isolate_(isolate) {
inline SealHandleScope::~SealHandleScope() {
// Restore state in current handle scope to re-enable handle
// allocations.
- v8::ImplementationUtilities::HandleScopeData* current =
- isolate_->handle_scope_data();
+ HandleScopeData* current = isolate_->handle_scope_data();
ASSERT_EQ(0, current->level);
current->level = level_;
ASSERT_EQ(current->next, current->limit);
diff --git a/chromium/v8/src/handles.cc b/chromium/v8/src/handles.cc
index 2d414022e09..f701d26f05f 100644
--- a/chromium/v8/src/handles.cc
+++ b/chromium/v8/src/handles.cc
@@ -1,45 +1,10 @@
// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
-#include "v8.h"
+#include "src/v8.h"
-#include "accessors.h"
-#include "api.h"
-#include "arguments.h"
-#include "bootstrapper.h"
-#include "compiler.h"
-#include "debug.h"
-#include "execution.h"
-#include "global-handles.h"
-#include "natives.h"
-#include "runtime.h"
-#include "string-search.h"
-#include "stub-cache.h"
-#include "vm-state-inl.h"
+#include "src/handles.h"
namespace v8 {
namespace internal {
@@ -55,17 +20,16 @@ int HandleScope::NumberOfHandles(Isolate* isolate) {
Object** HandleScope::Extend(Isolate* isolate) {
- v8::ImplementationUtilities::HandleScopeData* current =
- isolate->handle_scope_data();
+ HandleScopeData* current = isolate->handle_scope_data();
Object** result = current->next;
ASSERT(result == current->limit);
// Make sure there's at least one scope on the stack and that the
// top of the scope stack isn't a barrier.
- if (current->level == 0) {
- Utils::ReportApiFailure("v8::HandleScope::CreateHandle()",
- "Cannot create a handle without a HandleScope");
+ if (!Utils::ApiCheck(current->level != 0,
+ "v8::HandleScope::CreateHandle()",
+ "Cannot create a handle without a HandleScope")) {
return NULL;
}
HandleScopeImplementer* impl = isolate->handle_scope_implementer();
@@ -95,8 +59,7 @@ Object** HandleScope::Extend(Isolate* isolate) {
void HandleScope::DeleteExtensions(Isolate* isolate) {
- v8::ImplementationUtilities::HandleScopeData* current =
- isolate->handle_scope_data();
+ HandleScopeData* current = isolate->handle_scope_data();
isolate->handle_scope_implementer()->DeleteExtensions(current->limit);
}
@@ -126,634 +89,10 @@ Address HandleScope::current_limit_address(Isolate* isolate) {
}
-Handle<FixedArray> AddKeysFromJSArray(Handle<FixedArray> content,
- Handle<JSArray> array) {
- CALL_HEAP_FUNCTION(content->GetIsolate(),
- content->AddKeysFromJSArray(*array), FixedArray);
-}
-
-
-Handle<FixedArray> UnionOfKeys(Handle<FixedArray> first,
- Handle<FixedArray> second) {
- CALL_HEAP_FUNCTION(first->GetIsolate(),
- first->UnionOfKeys(*second), FixedArray);
-}
-
-
-Handle<JSGlobalProxy> ReinitializeJSGlobalProxy(
- Handle<JSFunction> constructor,
- Handle<JSGlobalProxy> global) {
- CALL_HEAP_FUNCTION(
- constructor->GetIsolate(),
- constructor->GetHeap()->ReinitializeJSGlobalProxy(*constructor, *global),
- JSGlobalProxy);
-}
-
-
-void FlattenString(Handle<String> string) {
- CALL_HEAP_FUNCTION_VOID(string->GetIsolate(), string->TryFlatten());
-}
-
-
-Handle<String> FlattenGetString(Handle<String> string) {
- CALL_HEAP_FUNCTION(string->GetIsolate(), string->TryFlatten(), String);
-}
-
-
-Handle<Object> ForceSetProperty(Handle<JSObject> object,
- Handle<Object> key,
- Handle<Object> value,
- PropertyAttributes attributes) {
- return Runtime::ForceSetObjectProperty(object->GetIsolate(), object, key,
- value, attributes);
-}
-
-
-Handle<Object> DeleteProperty(Handle<JSObject> object, Handle<Object> key) {
- Isolate* isolate = object->GetIsolate();
- CALL_HEAP_FUNCTION(isolate,
- Runtime::DeleteObjectProperty(
- isolate, object, key, JSReceiver::NORMAL_DELETION),
- Object);
-}
-
-
-Handle<Object> ForceDeleteProperty(Handle<JSObject> object,
- Handle<Object> key) {
- Isolate* isolate = object->GetIsolate();
- CALL_HEAP_FUNCTION(isolate,
- Runtime::DeleteObjectProperty(
- isolate, object, key, JSReceiver::FORCE_DELETION),
- Object);
-}
-
-
-Handle<Object> HasProperty(Handle<JSReceiver> obj, Handle<Object> key) {
- Isolate* isolate = obj->GetIsolate();
- CALL_HEAP_FUNCTION(isolate,
- Runtime::HasObjectProperty(isolate, obj, key), Object);
-}
-
-
-Handle<Object> GetProperty(Handle<JSReceiver> obj,
- const char* name) {
- Isolate* isolate = obj->GetIsolate();
- Handle<String> str = isolate->factory()->InternalizeUtf8String(name);
- CALL_HEAP_FUNCTION(isolate, obj->GetProperty(*str), Object);
-}
-
-
-Handle<Object> GetProperty(Isolate* isolate,
- Handle<Object> obj,
- Handle<Object> key) {
- CALL_HEAP_FUNCTION(isolate,
- Runtime::GetObjectProperty(isolate, obj, key), Object);
-}
-
-
-Handle<Object> LookupSingleCharacterStringFromCode(Isolate* isolate,
- uint32_t index) {
- CALL_HEAP_FUNCTION(
- isolate,
- isolate->heap()->LookupSingleCharacterStringFromCode(index), Object);
-}
-
-
-// Wrappers for scripts are kept alive and cached in weak global
-// handles referred from foreign objects held by the scripts as long as
-// they are used. When they are not used anymore, the garbage
-// collector will call the weak callback on the global handle
-// associated with the wrapper and get rid of both the wrapper and the
-// handle.
-static void ClearWrapperCache(v8::Isolate* v8_isolate,
- Persistent<v8::Value>* handle,
- void*) {
- Handle<Object> cache = Utils::OpenPersistent(handle);
- JSValue* wrapper = JSValue::cast(*cache);
- Foreign* foreign = Script::cast(wrapper->value())->wrapper();
- ASSERT(foreign->foreign_address() ==
- reinterpret_cast<Address>(cache.location()));
- foreign->set_foreign_address(0);
- Isolate* isolate = reinterpret_cast<Isolate*>(v8_isolate);
- isolate->global_handles()->Destroy(cache.location());
- isolate->counters()->script_wrappers()->Decrement();
-}
-
-
-Handle<JSValue> GetScriptWrapper(Handle<Script> script) {
- if (script->wrapper()->foreign_address() != NULL) {
- // Return a handle for the existing script wrapper from the cache.
- return Handle<JSValue>(
- *reinterpret_cast<JSValue**>(script->wrapper()->foreign_address()));
- }
- Isolate* isolate = script->GetIsolate();
- // Construct a new script wrapper.
- isolate->counters()->script_wrappers()->Increment();
- Handle<JSFunction> constructor = isolate->script_function();
- Handle<JSValue> result =
- Handle<JSValue>::cast(isolate->factory()->NewJSObject(constructor));
-
- // The allocation might have triggered a GC, which could have called this
- // function recursively, and a wrapper has already been created and cached.
- // In that case, simply return a handle for the cached wrapper.
- if (script->wrapper()->foreign_address() != NULL) {
- return Handle<JSValue>(
- *reinterpret_cast<JSValue**>(script->wrapper()->foreign_address()));
- }
-
- result->set_value(*script);
-
- // Create a new weak global handle and use it to cache the wrapper
- // for future use. The cache will automatically be cleared by the
- // garbage collector when it is not used anymore.
- Handle<Object> handle = isolate->global_handles()->Create(*result);
- isolate->global_handles()->MakeWeak(handle.location(),
- NULL,
- &ClearWrapperCache);
- script->wrapper()->set_foreign_address(
- reinterpret_cast<Address>(handle.location()));
- return result;
-}
-
-
-// Init line_ends array with code positions of line ends inside script
-// source.
-void InitScriptLineEnds(Handle<Script> script) {
- if (!script->line_ends()->IsUndefined()) return;
-
- Isolate* isolate = script->GetIsolate();
-
- if (!script->source()->IsString()) {
- ASSERT(script->source()->IsUndefined());
- Handle<FixedArray> empty = isolate->factory()->NewFixedArray(0);
- script->set_line_ends(*empty);
- ASSERT(script->line_ends()->IsFixedArray());
- return;
- }
-
- Handle<String> src(String::cast(script->source()), isolate);
-
- Handle<FixedArray> array = CalculateLineEnds(src, true);
-
- if (*array != isolate->heap()->empty_fixed_array()) {
- array->set_map(isolate->heap()->fixed_cow_array_map());
- }
-
- script->set_line_ends(*array);
- ASSERT(script->line_ends()->IsFixedArray());
-}
-
-
-template <typename SourceChar>
-static void CalculateLineEnds(Isolate* isolate,
- List<int>* line_ends,
- Vector<const SourceChar> src,
- bool with_last_line) {
- const int src_len = src.length();
- StringSearch<uint8_t, SourceChar> search(isolate, STATIC_ASCII_VECTOR("\n"));
-
- // Find and record line ends.
- int position = 0;
- while (position != -1 && position < src_len) {
- position = search.Search(src, position);
- if (position != -1) {
- line_ends->Add(position);
- position++;
- } else if (with_last_line) {
- // Even if the last line misses a line end, it is counted.
- line_ends->Add(src_len);
- return;
- }
- }
-}
-
-
-Handle<FixedArray> CalculateLineEnds(Handle<String> src,
- bool with_last_line) {
- src = FlattenGetString(src);
- // Rough estimate of line count based on a roughly estimated average
- // length of (unpacked) code.
- int line_count_estimate = src->length() >> 4;
- List<int> line_ends(line_count_estimate);
- Isolate* isolate = src->GetIsolate();
- {
- DisallowHeapAllocation no_allocation; // ensure vectors stay valid.
- // Dispatch on type of strings.
- String::FlatContent content = src->GetFlatContent();
- ASSERT(content.IsFlat());
- if (content.IsAscii()) {
- CalculateLineEnds(isolate,
- &line_ends,
- content.ToOneByteVector(),
- with_last_line);
- } else {
- CalculateLineEnds(isolate,
- &line_ends,
- content.ToUC16Vector(),
- with_last_line);
- }
- }
- int line_count = line_ends.length();
- Handle<FixedArray> array = isolate->factory()->NewFixedArray(line_count);
- for (int i = 0; i < line_count; i++) {
- array->set(i, Smi::FromInt(line_ends[i]));
- }
- return array;
-}
-
-
-// Convert code position into line number.
-int GetScriptLineNumber(Handle<Script> script, int code_pos) {
- InitScriptLineEnds(script);
- DisallowHeapAllocation no_allocation;
- FixedArray* line_ends_array = FixedArray::cast(script->line_ends());
- const int line_ends_len = line_ends_array->length();
-
- if (!line_ends_len) return -1;
-
- if ((Smi::cast(line_ends_array->get(0)))->value() >= code_pos) {
- return script->line_offset()->value();
- }
-
- int left = 0;
- int right = line_ends_len;
- while (int half = (right - left) / 2) {
- if ((Smi::cast(line_ends_array->get(left + half)))->value() > code_pos) {
- right -= half;
- } else {
- left += half;
- }
- }
- return right + script->line_offset()->value();
-}
-
-
-// Convert code position into column number.
-int GetScriptColumnNumber(Handle<Script> script, int code_pos) {
- int line_number = GetScriptLineNumber(script, code_pos);
- if (line_number == -1) return -1;
-
- DisallowHeapAllocation no_allocation;
- FixedArray* line_ends_array = FixedArray::cast(script->line_ends());
- line_number = line_number - script->line_offset()->value();
- if (line_number == 0) return code_pos + script->column_offset()->value();
- int prev_line_end_pos =
- Smi::cast(line_ends_array->get(line_number - 1))->value();
- return code_pos - (prev_line_end_pos + 1);
-}
-
-
-int GetScriptLineNumberSafe(Handle<Script> script, int code_pos) {
- DisallowHeapAllocation no_allocation;
- if (!script->line_ends()->IsUndefined()) {
- return GetScriptLineNumber(script, code_pos);
- }
- // Slow mode: we do not have line_ends. We have to iterate through source.
- if (!script->source()->IsString()) {
- return -1;
- }
- String* source = String::cast(script->source());
- int line = 0;
- int len = source->length();
- for (int pos = 0; pos < len; pos++) {
- if (pos == code_pos) {
- break;
- }
- if (source->Get(pos) == '\n') {
- line++;
- }
- }
- return line;
-}
-
-
-// Compute the property keys from the interceptor.
-// TODO(rossberg): support symbols in API, and filter here if needed.
-v8::Handle<v8::Array> GetKeysForNamedInterceptor(Handle<JSReceiver> receiver,
- Handle<JSObject> object) {
- Isolate* isolate = receiver->GetIsolate();
- Handle<InterceptorInfo> interceptor(object->GetNamedInterceptor());
- PropertyCallbackArguments
- args(isolate, interceptor->data(), *receiver, *object);
- v8::Handle<v8::Array> result;
- if (!interceptor->enumerator()->IsUndefined()) {
- v8::NamedPropertyEnumeratorCallback enum_fun =
- v8::ToCData<v8::NamedPropertyEnumeratorCallback>(
- interceptor->enumerator());
- LOG(isolate, ApiObjectAccess("interceptor-named-enum", *object));
- result = args.Call(enum_fun);
- }
-#if ENABLE_EXTRA_CHECKS
- CHECK(result.IsEmpty() || v8::Utils::OpenHandle(*result)->IsJSObject());
-#endif
- return v8::Local<v8::Array>::New(reinterpret_cast<v8::Isolate*>(isolate),
- result);
-}
-
-
-// Compute the element keys from the interceptor.
-v8::Handle<v8::Array> GetKeysForIndexedInterceptor(Handle<JSReceiver> receiver,
- Handle<JSObject> object) {
- Isolate* isolate = receiver->GetIsolate();
- Handle<InterceptorInfo> interceptor(object->GetIndexedInterceptor());
- PropertyCallbackArguments
- args(isolate, interceptor->data(), *receiver, *object);
- v8::Handle<v8::Array> result;
- if (!interceptor->enumerator()->IsUndefined()) {
- v8::IndexedPropertyEnumeratorCallback enum_fun =
- v8::ToCData<v8::IndexedPropertyEnumeratorCallback>(
- interceptor->enumerator());
- LOG(isolate, ApiObjectAccess("interceptor-indexed-enum", *object));
- result = args.Call(enum_fun);
-#if ENABLE_EXTRA_CHECKS
- CHECK(result.IsEmpty() || v8::Utils::OpenHandle(*result)->IsJSObject());
-#endif
- }
- return v8::Local<v8::Array>::New(reinterpret_cast<v8::Isolate*>(isolate),
- result);
-}
-
-
-Handle<Object> GetScriptNameOrSourceURL(Handle<Script> script) {
- Isolate* isolate = script->GetIsolate();
- Handle<String> name_or_source_url_key =
- isolate->factory()->InternalizeOneByteString(
- STATIC_ASCII_VECTOR("nameOrSourceURL"));
- Handle<JSValue> script_wrapper = GetScriptWrapper(script);
- Handle<Object> property = GetProperty(isolate,
- script_wrapper,
- name_or_source_url_key);
- ASSERT(property->IsJSFunction());
- Handle<JSFunction> method = Handle<JSFunction>::cast(property);
- bool caught_exception;
- Handle<Object> result = Execution::TryCall(method, script_wrapper, 0,
- NULL, &caught_exception);
- if (caught_exception) {
- result = isolate->factory()->undefined_value();
- }
- return result;
-}
-
-
-static bool ContainsOnlyValidKeys(Handle<FixedArray> array) {
- int len = array->length();
- for (int i = 0; i < len; i++) {
- Object* e = array->get(i);
- if (!(e->IsString() || e->IsNumber())) return false;
- }
- return true;
-}
-
-
-Handle<FixedArray> GetKeysInFixedArrayFor(Handle<JSReceiver> object,
- KeyCollectionType type,
- bool* threw) {
- USE(ContainsOnlyValidKeys);
- Isolate* isolate = object->GetIsolate();
- Handle<FixedArray> content = isolate->factory()->empty_fixed_array();
- Handle<JSObject> arguments_boilerplate = Handle<JSObject>(
- isolate->context()->native_context()->arguments_boilerplate(),
- isolate);
- Handle<JSFunction> arguments_function = Handle<JSFunction>(
- JSFunction::cast(arguments_boilerplate->map()->constructor()),
- isolate);
-
- // Only collect keys if access is permitted.
- for (Handle<Object> p = object;
- *p != isolate->heap()->null_value();
- p = Handle<Object>(p->GetPrototype(isolate), isolate)) {
- if (p->IsJSProxy()) {
- Handle<JSProxy> proxy(JSProxy::cast(*p), isolate);
- Handle<Object> args[] = { proxy };
- Handle<Object> names = Execution::Call(isolate,
- isolate->proxy_enumerate(),
- object,
- ARRAY_SIZE(args),
- args,
- threw);
- if (*threw) return content;
- content = AddKeysFromJSArray(content, Handle<JSArray>::cast(names));
- break;
- }
-
- Handle<JSObject> current(JSObject::cast(*p), isolate);
-
- // Check access rights if required.
- if (current->IsAccessCheckNeeded() &&
- !isolate->MayNamedAccess(*current,
- isolate->heap()->undefined_value(),
- v8::ACCESS_KEYS)) {
- isolate->ReportFailedAccessCheck(*current, v8::ACCESS_KEYS);
- if (isolate->has_scheduled_exception()) {
- isolate->PromoteScheduledException();
- *threw = true;
- }
- break;
- }
-
- // Compute the element keys.
- Handle<FixedArray> element_keys =
- isolate->factory()->NewFixedArray(current->NumberOfEnumElements());
- current->GetEnumElementKeys(*element_keys);
- content = UnionOfKeys(content, element_keys);
- ASSERT(ContainsOnlyValidKeys(content));
-
- // Add the element keys from the interceptor.
- if (current->HasIndexedInterceptor()) {
- v8::Handle<v8::Array> result =
- GetKeysForIndexedInterceptor(object, current);
- if (!result.IsEmpty())
- content = AddKeysFromJSArray(content, v8::Utils::OpenHandle(*result));
- ASSERT(ContainsOnlyValidKeys(content));
- }
-
- // We can cache the computed property keys if access checks are
- // not needed and no interceptors are involved.
- //
- // We do not use the cache if the object has elements and
- // therefore it does not make sense to cache the property names
- // for arguments objects. Arguments objects will always have
- // elements.
- // Wrapped strings have elements, but don't have an elements
- // array or dictionary. So the fast inline test for whether to
- // use the cache says yes, so we should not create a cache.
- bool cache_enum_keys =
- ((current->map()->constructor() != *arguments_function) &&
- !current->IsJSValue() &&
- !current->IsAccessCheckNeeded() &&
- !current->HasNamedInterceptor() &&
- !current->HasIndexedInterceptor());
- // Compute the property keys and cache them if possible.
- content =
- UnionOfKeys(content, GetEnumPropertyKeys(current, cache_enum_keys));
- ASSERT(ContainsOnlyValidKeys(content));
-
- // Add the property keys from the interceptor.
- if (current->HasNamedInterceptor()) {
- v8::Handle<v8::Array> result =
- GetKeysForNamedInterceptor(object, current);
- if (!result.IsEmpty())
- content = AddKeysFromJSArray(content, v8::Utils::OpenHandle(*result));
- ASSERT(ContainsOnlyValidKeys(content));
- }
-
- // If we only want local properties we bail out after the first
- // iteration.
- if (type == LOCAL_ONLY)
- break;
- }
- return content;
-}
-
-
-Handle<JSArray> GetKeysFor(Handle<JSReceiver> object, bool* threw) {
- Isolate* isolate = object->GetIsolate();
- isolate->counters()->for_in()->Increment();
- Handle<FixedArray> elements =
- GetKeysInFixedArrayFor(object, INCLUDE_PROTOS, threw);
- return isolate->factory()->NewJSArrayWithElements(elements);
-}
-
-
-Handle<FixedArray> ReduceFixedArrayTo(Handle<FixedArray> array, int length) {
- ASSERT(array->length() >= length);
- if (array->length() == length) return array;
-
- Handle<FixedArray> new_array =
- array->GetIsolate()->factory()->NewFixedArray(length);
- for (int i = 0; i < length; ++i) new_array->set(i, array->get(i));
- return new_array;
-}
-
-
-Handle<FixedArray> GetEnumPropertyKeys(Handle<JSObject> object,
- bool cache_result) {
- Isolate* isolate = object->GetIsolate();
- if (object->HasFastProperties()) {
- if (object->map()->instance_descriptors()->HasEnumCache()) {
- int own_property_count = object->map()->EnumLength();
- // If we have an enum cache, but the enum length of the given map is set
- // to kInvalidEnumCache, this means that the map itself has never used the
- // present enum cache. The first step to using the cache is to set the
- // enum length of the map by counting the number of own descriptors that
- // are not DONT_ENUM or SYMBOLIC.
- if (own_property_count == kInvalidEnumCacheSentinel) {
- own_property_count = object->map()->NumberOfDescribedProperties(
- OWN_DESCRIPTORS, DONT_SHOW);
-
- if (cache_result) object->map()->SetEnumLength(own_property_count);
- }
-
- DescriptorArray* desc = object->map()->instance_descriptors();
- Handle<FixedArray> keys(desc->GetEnumCache(), isolate);
-
- // In case the number of properties required in the enum are actually
- // present, we can reuse the enum cache. Otherwise, this means that the
- // enum cache was generated for a previous (smaller) version of the
- // Descriptor Array. In that case we regenerate the enum cache.
- if (own_property_count <= keys->length()) {
- isolate->counters()->enum_cache_hits()->Increment();
- return ReduceFixedArrayTo(keys, own_property_count);
- }
- }
-
- Handle<Map> map(object->map());
-
- if (map->instance_descriptors()->IsEmpty()) {
- isolate->counters()->enum_cache_hits()->Increment();
- if (cache_result) map->SetEnumLength(0);
- return isolate->factory()->empty_fixed_array();
- }
-
- isolate->counters()->enum_cache_misses()->Increment();
- int num_enum = map->NumberOfDescribedProperties(ALL_DESCRIPTORS, DONT_SHOW);
-
- Handle<FixedArray> storage = isolate->factory()->NewFixedArray(num_enum);
- Handle<FixedArray> indices = isolate->factory()->NewFixedArray(num_enum);
-
- Handle<DescriptorArray> descs =
- Handle<DescriptorArray>(object->map()->instance_descriptors(), isolate);
-
- int real_size = map->NumberOfOwnDescriptors();
- int enum_size = 0;
- int index = 0;
-
- for (int i = 0; i < descs->number_of_descriptors(); i++) {
- PropertyDetails details = descs->GetDetails(i);
- Object* key = descs->GetKey(i);
- if (!(details.IsDontEnum() || key->IsSymbol())) {
- if (i < real_size) ++enum_size;
- storage->set(index, key);
- if (!indices.is_null()) {
- if (details.type() != FIELD) {
- indices = Handle<FixedArray>();
- } else {
- int field_index = descs->GetFieldIndex(i);
- if (field_index >= map->inobject_properties()) {
- field_index = -(field_index - map->inobject_properties() + 1);
- }
- indices->set(index, Smi::FromInt(field_index));
- }
- }
- index++;
- }
- }
- ASSERT(index == storage->length());
-
- Handle<FixedArray> bridge_storage =
- isolate->factory()->NewFixedArray(
- DescriptorArray::kEnumCacheBridgeLength);
- DescriptorArray* desc = object->map()->instance_descriptors();
- desc->SetEnumCache(*bridge_storage,
- *storage,
- indices.is_null() ? Object::cast(Smi::FromInt(0))
- : Object::cast(*indices));
- if (cache_result) {
- object->map()->SetEnumLength(enum_size);
- }
-
- return ReduceFixedArrayTo(storage, enum_size);
- } else {
- Handle<NameDictionary> dictionary(object->property_dictionary());
-
- int length = dictionary->NumberOfElements();
- if (length == 0) {
- return Handle<FixedArray>(isolate->heap()->empty_fixed_array());
- }
-
- // The enumeration array is generated by allocating an array big enough to
- // hold all properties that have been seen, whether they are are deleted or
- // not. Subsequently all visible properties are added to the array. If some
- // properties were not visible, the array is trimmed so it only contains
- // visible properties. This improves over adding elements and sorting by
- // index by having linear complexity rather than n*log(n).
-
- // By comparing the monotonous NextEnumerationIndex to the NumberOfElements,
- // we can predict the number of holes in the final array. If there will be
- // more than 50% holes, regenerate the enumeration indices to reduce the
- // number of holes to a minimum. This avoids allocating a large array if
- // many properties were added but subsequently deleted.
- int next_enumeration = dictionary->NextEnumerationIndex();
- if (!object->IsGlobalObject() && next_enumeration > (length * 3) / 2) {
- NameDictionary::DoGenerateNewEnumerationIndices(dictionary);
- next_enumeration = dictionary->NextEnumerationIndex();
- }
-
- Handle<FixedArray> storage =
- isolate->factory()->NewFixedArray(next_enumeration);
-
- storage = Handle<FixedArray>(dictionary->CopyEnumKeysTo(*storage));
- ASSERT(storage->length() == object->NumberOfLocalProperties(DONT_SHOW));
- return storage;
- }
-}
-
-
DeferredHandleScope::DeferredHandleScope(Isolate* isolate)
: impl_(isolate->handle_scope_implementer()) {
impl_->BeginDeferredScope();
- v8::ImplementationUtilities::HandleScopeData* data =
- impl_->isolate()->handle_scope_data();
+ HandleScopeData* data = impl_->isolate()->handle_scope_data();
Object** new_next = impl_->GetSpareOrNewBlock();
Object** new_limit = &new_next[kHandleBlockSize];
ASSERT(data->limit == &impl_->blocks()->last()[kHandleBlockSize]);
@@ -779,8 +118,7 @@ DeferredHandleScope::~DeferredHandleScope() {
DeferredHandles* DeferredHandleScope::Detach() {
DeferredHandles* deferred = impl_->Detach(prev_limit_);
- v8::ImplementationUtilities::HandleScopeData* data =
- impl_->isolate()->handle_scope_data();
+ HandleScopeData* data = impl_->isolate()->handle_scope_data();
data->next = prev_next_;
data->limit = prev_limit_;
#ifdef DEBUG
@@ -789,16 +127,4 @@ DeferredHandles* DeferredHandleScope::Detach() {
return deferred;
}
-
-void AddWeakObjectToCodeDependency(Heap* heap,
- Handle<Object> object,
- Handle<Code> code) {
- heap->EnsureWeakObjectToCodeTable();
- Handle<DependentCode> dep(heap->LookupWeakObjectToCodeDependency(*object));
- dep = DependentCode::Insert(dep, DependentCode::kWeaklyEmbeddedGroup, code);
- CALL_HEAP_FUNCTION_VOID(heap->isolate(),
- heap->AddWeakObjectToCodeDependency(*object, *dep));
-}
-
-
} } // namespace v8::internal
diff --git a/chromium/v8/src/handles.h b/chromium/v8/src/handles.h
index 7fef9198646..3bd82e51e3a 100644
--- a/chromium/v8/src/handles.h
+++ b/chromium/v8/src/handles.h
@@ -1,40 +1,78 @@
// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
#ifndef V8_HANDLES_H_
#define V8_HANDLES_H_
-#include "allocation.h"
-#include "apiutils.h"
-#include "objects.h"
+#include "src/objects.h"
namespace v8 {
namespace internal {
+// A Handle can be converted into a MaybeHandle. Converting a MaybeHandle
+// into a Handle requires checking that it does not point to NULL. This
+// ensures NULL checks before use.
+// Do not use MaybeHandle as argument type.
+
+template<typename T>
+class MaybeHandle {
+ public:
+ INLINE(MaybeHandle()) : location_(NULL) { }
+
+ // Constructor for handling automatic up casting from Handle.
+ // Ex. Handle<JSArray> can be passed when MaybeHandle<Object> is expected.
+ template <class S> MaybeHandle(Handle<S> handle) {
+#ifdef DEBUG
+ T* a = NULL;
+ S* b = NULL;
+ a = b; // Fake assignment to enforce type checks.
+ USE(a);
+#endif
+ this->location_ = reinterpret_cast<T**>(handle.location());
+ }
+
+ // Constructor for handling automatic up casting.
+ // Ex. MaybeHandle<JSArray> can be passed when Handle<Object> is expected.
+ template <class S> MaybeHandle(MaybeHandle<S> maybe_handle) {
+#ifdef DEBUG
+ T* a = NULL;
+ S* b = NULL;
+ a = b; // Fake assignment to enforce type checks.
+ USE(a);
+#endif
+ location_ = reinterpret_cast<T**>(maybe_handle.location_);
+ }
+
+ INLINE(void Assert() const) { ASSERT(location_ != NULL); }
+ INLINE(void Check() const) { CHECK(location_ != NULL); }
+
+ INLINE(Handle<T> ToHandleChecked()) const {
+ Check();
+ return Handle<T>(location_);
+ }
+
+ // Convert to a Handle with a type that can be upcasted to.
+ template <class S> INLINE(bool ToHandle(Handle<S>* out)) {
+ if (location_ == NULL) {
+ *out = Handle<T>::null();
+ return false;
+ } else {
+ *out = Handle<T>(location_);
+ return true;
+ }
+ }
+
+ bool is_null() const { return location_ == NULL; }
+
+ protected:
+ T** location_;
+
+ // MaybeHandles of different classes are allowed to access each
+ // other's location_.
+ template<class S> friend class MaybeHandle;
+};
+
// ----------------------------------------------------------------------------
// A Handle provides a reference to an object that survives relocation by
// the garbage collector.
@@ -48,7 +86,9 @@ class Handle {
INLINE(explicit Handle(T* obj));
INLINE(Handle(T* obj, Isolate* isolate));
- INLINE(Handle()) : location_(NULL) {}
+ // TODO(yangguo): Values that contain empty handles should be declared as
+ // MaybeHandle to force validation before being used as handles.
+ INLINE(Handle()) : location_(NULL) { }
// Constructor for handling automatic up casting.
// Ex. Handle<JSFunction> can be passed when Handle<Object> is expected.
@@ -78,6 +118,8 @@ class Handle {
return Handle<T>(reinterpret_cast<T**>(that.location_));
}
+ // TODO(yangguo): Values that contain empty handles should be declared as
+ // MaybeHandle to force validation before being used as handles.
static Handle<T> null() { return Handle<T>(); }
bool is_null() const { return location_ == NULL; }
@@ -113,6 +155,13 @@ inline Handle<T> handle(T* t) {
}
+// Key comparison function for Map handles.
+inline bool operator<(const Handle<Map>& lhs, const Handle<Map>& rhs) {
+ // This is safe because maps don't move.
+ return *lhs < *rhs;
+}
+
+
class DeferredHandles;
class HandleScopeImplementer;
@@ -215,91 +264,6 @@ class DeferredHandleScope {
};
-// ----------------------------------------------------------------------------
-// Handle operations.
-// They might invoke garbage collection. The result is an handle to
-// an object of expected type, or the handle is an error if running out
-// of space or encountering an internal error.
-
-// Flattens a string.
-void FlattenString(Handle<String> str);
-
-// Flattens a string and returns the underlying external or sequential
-// string.
-Handle<String> FlattenGetString(Handle<String> str);
-
-Handle<Object> ForceSetProperty(Handle<JSObject> object,
- Handle<Object> key,
- Handle<Object> value,
- PropertyAttributes attributes);
-
-Handle<Object> DeleteProperty(Handle<JSObject> object, Handle<Object> key);
-
-Handle<Object> ForceDeleteProperty(Handle<JSObject> object, Handle<Object> key);
-
-Handle<Object> HasProperty(Handle<JSReceiver> obj, Handle<Object> key);
-
-Handle<Object> GetProperty(Handle<JSReceiver> obj, const char* name);
-
-Handle<Object> GetProperty(Isolate* isolate,
- Handle<Object> obj,
- Handle<Object> key);
-
-Handle<Object> LookupSingleCharacterStringFromCode(Isolate* isolate,
- uint32_t index);
-
-Handle<FixedArray> AddKeysFromJSArray(Handle<FixedArray>,
- Handle<JSArray> array);
-
-// Get the JS object corresponding to the given script; create it
-// if none exists.
-Handle<JSValue> GetScriptWrapper(Handle<Script> script);
-
-// Script line number computations. Note that the line number is zero-based.
-void InitScriptLineEnds(Handle<Script> script);
-// For string calculates an array of line end positions. If the string
-// does not end with a new line character, this character may optionally be
-// imagined.
-Handle<FixedArray> CalculateLineEnds(Handle<String> string,
- bool with_imaginary_last_new_line);
-int GetScriptLineNumber(Handle<Script> script, int code_position);
-// The safe version does not make heap allocations but may work much slower.
-int GetScriptLineNumberSafe(Handle<Script> script, int code_position);
-int GetScriptColumnNumber(Handle<Script> script, int code_position);
-Handle<Object> GetScriptNameOrSourceURL(Handle<Script> script);
-
-// Computes the enumerable keys from interceptors. Used for debug mirrors and
-// by GetKeysInFixedArrayFor below.
-v8::Handle<v8::Array> GetKeysForNamedInterceptor(Handle<JSReceiver> receiver,
- Handle<JSObject> object);
-v8::Handle<v8::Array> GetKeysForIndexedInterceptor(Handle<JSReceiver> receiver,
- Handle<JSObject> object);
-
-enum KeyCollectionType { LOCAL_ONLY, INCLUDE_PROTOS };
-
-// Computes the enumerable keys for a JSObject. Used for implementing
-// "for (n in object) { }".
-Handle<FixedArray> GetKeysInFixedArrayFor(Handle<JSReceiver> object,
- KeyCollectionType type,
- bool* threw);
-Handle<JSArray> GetKeysFor(Handle<JSReceiver> object, bool* threw);
-Handle<FixedArray> ReduceFixedArrayTo(Handle<FixedArray> array, int length);
-Handle<FixedArray> GetEnumPropertyKeys(Handle<JSObject> object,
- bool cache_result);
-
-// Computes the union of keys and return the result.
-// Used for implementing "for (n in object) { }"
-Handle<FixedArray> UnionOfKeys(Handle<FixedArray> first,
- Handle<FixedArray> second);
-
-Handle<JSGlobalProxy> ReinitializeJSGlobalProxy(
- Handle<JSFunction> constructor,
- Handle<JSGlobalProxy> global);
-
-void AddWeakObjectToCodeDependency(Heap* heap,
- Handle<Object> object,
- Handle<Code> code);
-
// Seal off the current HandleScope so that new handles can only be created
// if a new HandleScope is entered.
class SealHandleScope BASE_EMBEDDED {
@@ -317,6 +281,17 @@ class SealHandleScope BASE_EMBEDDED {
#endif
};
+struct HandleScopeData {
+ internal::Object** next;
+ internal::Object** limit;
+ int level;
+
+ void Initialize() {
+ next = limit = NULL;
+ level = 0;
+ }
+};
+
} } // namespace v8::internal
#endif // V8_HANDLES_H_
diff --git a/chromium/v8/src/harmony-array.js b/chromium/v8/src/harmony-array.js
index a9cc3b83841..dbcb292a087 100644
--- a/chromium/v8/src/harmony-array.js
+++ b/chromium/v8/src/harmony-array.js
@@ -1,29 +1,6 @@
// Copyright 2013 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
'use strict';
@@ -35,10 +12,7 @@
// ES6 draft 07-15-13, section 15.4.3.23
function ArrayFind(predicate /* thisArg */) { // length == 1
- if (IS_NULL_OR_UNDEFINED(this) && !IS_UNDETECTABLE(this)) {
- throw MakeTypeError("called_on_null_or_undefined",
- ["Array.prototype.find"]);
- }
+ CHECK_OBJECT_COERCIBLE(this, "Array.prototype.find");
var array = ToObject(this);
var length = ToInteger(array.length);
@@ -54,7 +28,7 @@ function ArrayFind(predicate /* thisArg */) { // length == 1
if (IS_NULL_OR_UNDEFINED(thisArg)) {
thisArg = %GetDefaultReceiver(predicate) || thisArg;
- } else if (!IS_SPEC_OBJECT(thisArg) && %IsClassicModeFunction(predicate)) {
+ } else if (!IS_SPEC_OBJECT(thisArg) && %IsSloppyModeFunction(predicate)) {
thisArg = ToObject(thisArg);
}
@@ -73,10 +47,7 @@ function ArrayFind(predicate /* thisArg */) { // length == 1
// ES6 draft 07-15-13, section 15.4.3.24
function ArrayFindIndex(predicate /* thisArg */) { // length == 1
- if (IS_NULL_OR_UNDEFINED(this) && !IS_UNDETECTABLE(this)) {
- throw MakeTypeError("called_on_null_or_undefined",
- ["Array.prototype.findIndex"]);
- }
+ CHECK_OBJECT_COERCIBLE(this, "Array.prototype.findIndex");
var array = ToObject(this);
var length = ToInteger(array.length);
@@ -92,7 +63,7 @@ function ArrayFindIndex(predicate /* thisArg */) { // length == 1
if (IS_NULL_OR_UNDEFINED(thisArg)) {
thisArg = %GetDefaultReceiver(predicate) || thisArg;
- } else if (!IS_SPEC_OBJECT(thisArg) && %IsClassicModeFunction(predicate)) {
+ } else if (!IS_SPEC_OBJECT(thisArg) && %IsSloppyModeFunction(predicate)) {
thisArg = ToObject(thisArg);
}
@@ -109,6 +80,49 @@ function ArrayFindIndex(predicate /* thisArg */) { // length == 1
}
+// ES6, draft 04-05-14, section 22.1.3.6
+function ArrayFill(value /* [, start [, end ] ] */) { // length == 1
+ CHECK_OBJECT_COERCIBLE(this, "Array.prototype.fill");
+
+ var array = ToObject(this);
+ var length = TO_UINT32(array.length);
+
+ var i = 0;
+ var end = length;
+
+ if (%_ArgumentsLength() > 1) {
+ i = %_Arguments(1);
+ i = IS_UNDEFINED(i) ? 0 : TO_INTEGER(i);
+ if (%_ArgumentsLength() > 2) {
+ end = %_Arguments(2);
+ end = IS_UNDEFINED(end) ? length : TO_INTEGER(end);
+ }
+ }
+
+ if (i < 0) {
+ i += length;
+ if (i < 0) i = 0;
+ } else {
+ if (i > length) i = length;
+ }
+
+ if (end < 0) {
+ end += length;
+ if (end < 0) end = 0;
+ } else {
+ if (end > length) end = length;
+ }
+
+ if ((end - i) > 0 && ObjectIsFrozen(array)) {
+ throw MakeTypeError("array_functions_on_frozen",
+ ["Array.prototype.fill"]);
+ }
+
+ for (; i < end; i++)
+ array[i] = value;
+ return array;
+}
+
// -------------------------------------------------------------------
function HarmonyArrayExtendArrayPrototype() {
@@ -117,7 +131,8 @@ function HarmonyArrayExtendArrayPrototype() {
// Set up the non-enumerable functions on the Array prototype object.
InstallFunctions($Array.prototype, DONT_ENUM, $Array(
"find", ArrayFind,
- "findIndex", ArrayFindIndex
+ "findIndex", ArrayFindIndex,
+ "fill", ArrayFill
));
}
diff --git a/chromium/v8/src/harmony-math.js b/chromium/v8/src/harmony-math.js
index a4d3f2e8a5e..4a8d95bc01a 100644
--- a/chromium/v8/src/harmony-math.js
+++ b/chromium/v8/src/harmony-math.js
@@ -1,29 +1,6 @@
// Copyright 2013 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
'use strict';
@@ -47,14 +24,223 @@ function MathTrunc(x) {
}
+// ES6 draft 09-27-13, section 20.2.2.30.
+function MathSinh(x) {
+ if (!IS_NUMBER(x)) x = NonNumberToNumber(x);
+ // Idempotent for NaN, +/-0 and +/-Infinity.
+ if (x === 0 || !NUMBER_IS_FINITE(x)) return x;
+ return (MathExp(x) - MathExp(-x)) / 2;
+}
+
+
+// ES6 draft 09-27-13, section 20.2.2.12.
+function MathCosh(x) {
+ if (!IS_NUMBER(x)) x = NonNumberToNumber(x);
+ if (!NUMBER_IS_FINITE(x)) return MathAbs(x);
+ return (MathExp(x) + MathExp(-x)) / 2;
+}
+
+
+// ES6 draft 09-27-13, section 20.2.2.33.
+function MathTanh(x) {
+ if (!IS_NUMBER(x)) x = NonNumberToNumber(x);
+ // Idempotent for +/-0.
+ if (x === 0) return x;
+ // Returns +/-1 for +/-Infinity.
+ if (!NUMBER_IS_FINITE(x)) return MathSign(x);
+ var exp1 = MathExp(x);
+ var exp2 = MathExp(-x);
+ return (exp1 - exp2) / (exp1 + exp2);
+}
+
+
+// ES6 draft 09-27-13, section 20.2.2.5.
+function MathAsinh(x) {
+ if (!IS_NUMBER(x)) x = NonNumberToNumber(x);
+ // Idempotent for NaN, +/-0 and +/-Infinity.
+ if (x === 0 || !NUMBER_IS_FINITE(x)) return x;
+ if (x > 0) return MathLog(x + MathSqrt(x * x + 1));
+ // This is to prevent numerical errors caused by large negative x.
+ return -MathLog(-x + MathSqrt(x * x + 1));
+}
+
+
+// ES6 draft 09-27-13, section 20.2.2.3.
+function MathAcosh(x) {
+ if (!IS_NUMBER(x)) x = NonNumberToNumber(x);
+ if (x < 1) return NAN;
+ // Idempotent for NaN and +Infinity.
+ if (!NUMBER_IS_FINITE(x)) return x;
+ return MathLog(x + MathSqrt(x + 1) * MathSqrt(x - 1));
+}
+
+
+// ES6 draft 09-27-13, section 20.2.2.7.
+function MathAtanh(x) {
+ if (!IS_NUMBER(x)) x = NonNumberToNumber(x);
+ // Idempotent for +/-0.
+ if (x === 0) return x;
+ // Returns NaN for NaN and +/- Infinity.
+ if (!NUMBER_IS_FINITE(x)) return NAN;
+ return 0.5 * MathLog((1 + x) / (1 - x));
+}
+
+
+// ES6 draft 09-27-13, section 20.2.2.21.
+function MathLog10(x) {
+ return MathLog(x) * 0.434294481903251828; // log10(x) = log(x)/log(10).
+}
+
+
+// ES6 draft 09-27-13, section 20.2.2.22.
+function MathLog2(x) {
+ return MathLog(x) * 1.442695040888963407; // log2(x) = log(x)/log(2).
+}
+
+
+// ES6 draft 09-27-13, section 20.2.2.17.
+function MathHypot(x, y) { // Function length is 2.
+ // We may want to introduce fast paths for two arguments and when
+ // normalization to avoid overflow is not necessary. For now, we
+ // simply assume the general case.
+ var length = %_ArgumentsLength();
+ var args = new InternalArray(length);
+ var max = 0;
+ for (var i = 0; i < length; i++) {
+ var n = %_Arguments(i);
+ if (!IS_NUMBER(n)) n = NonNumberToNumber(n);
+ if (n === INFINITY || n === -INFINITY) return INFINITY;
+ n = MathAbs(n);
+ if (n > max) max = n;
+ args[i] = n;
+ }
+
+ // Kahan summation to avoid rounding errors.
+ // Normalize the numbers to the largest one to avoid overflow.
+ if (max === 0) max = 1;
+ var sum = 0;
+ var compensation = 0;
+ for (var i = 0; i < length; i++) {
+ var n = args[i] / max;
+ var summand = n * n - compensation;
+ var preliminary = sum + summand;
+ compensation = (preliminary - sum) - summand;
+ sum = preliminary;
+ }
+ return MathSqrt(sum) * max;
+}
+
+
+// ES6 draft 09-27-13, section 20.2.2.16.
+function MathFroundJS(x) {
+ return %MathFround(TO_NUMBER_INLINE(x));
+}
+
+
+function MathClz32(x) {
+ x = ToUint32(TO_NUMBER_INLINE(x));
+ if (x == 0) return 32;
+ var result = 0;
+ // Binary search.
+ if ((x & 0xFFFF0000) === 0) { x <<= 16; result += 16; };
+ if ((x & 0xFF000000) === 0) { x <<= 8; result += 8; };
+ if ((x & 0xF0000000) === 0) { x <<= 4; result += 4; };
+ if ((x & 0xC0000000) === 0) { x <<= 2; result += 2; };
+ if ((x & 0x80000000) === 0) { x <<= 1; result += 1; };
+ return result;
+}
+
+
+// ES6 draft 09-27-13, section 20.2.2.9.
+// Cube root approximation, refer to: http://metamerist.com/cbrt/cbrt.htm
+// Using initial approximation adapted from Kahan's cbrt and 4 iterations
+// of Newton's method.
+function MathCbrt(x) {
+ if (!IS_NUMBER(x)) x = NonNumberToNumber(x);
+ if (x == 0 || !NUMBER_IS_FINITE(x)) return x;
+ return x >= 0 ? CubeRoot(x) : -CubeRoot(-x);
+}
+
+macro NEWTON_ITERATION_CBRT(x, approx)
+ (1.0 / 3.0) * (x / (approx * approx) + 2 * approx);
+endmacro
+
+function CubeRoot(x) {
+ var approx_hi = MathFloor(%_DoubleHi(x) / 3) + 0x2A9F7893;
+ var approx = %_ConstructDouble(approx_hi, 0);
+ approx = NEWTON_ITERATION_CBRT(x, approx);
+ approx = NEWTON_ITERATION_CBRT(x, approx);
+ approx = NEWTON_ITERATION_CBRT(x, approx);
+ return NEWTON_ITERATION_CBRT(x, approx);
+}
+
+
+
+// ES6 draft 09-27-13, section 20.2.2.14.
+// Use Taylor series to approximate.
+// exp(x) - 1 at 0 == -1 + exp(0) + exp'(0)*x/1! + exp''(0)*x^2/2! + ...
+// == x/1! + x^2/2! + x^3/3! + ...
+// The closer x is to 0, the fewer terms are required.
+function MathExpm1(x) {
+ if (!IS_NUMBER(x)) x = NonNumberToNumber(x);
+ var xabs = MathAbs(x);
+ if (xabs < 2E-7) {
+ return x * (1 + x * (1/2));
+ } else if (xabs < 6E-5) {
+ return x * (1 + x * (1/2 + x * (1/6)));
+ } else if (xabs < 2E-2) {
+ return x * (1 + x * (1/2 + x * (1/6 +
+ x * (1/24 + x * (1/120 + x * (1/720))))));
+ } else { // Use regular exp if not close enough to 0.
+ return MathExp(x) - 1;
+ }
+}
+
+
+// ES6 draft 09-27-13, section 20.2.2.20.
+// Use Taylor series to approximate. With y = x + 1;
+// log(y) at 1 == log(1) + log'(1)(y-1)/1! + log''(1)(y-1)^2/2! + ...
+// == 0 + x - x^2/2 + x^3/3 ...
+// The closer x is to 0, the fewer terms are required.
+function MathLog1p(x) {
+ if (!IS_NUMBER(x)) x = NonNumberToNumber(x);
+ var xabs = MathAbs(x);
+ if (xabs < 1E-7) {
+ return x * (1 - x * (1/2));
+ } else if (xabs < 3E-5) {
+ return x * (1 - x * (1/2 - x * (1/3)));
+ } else if (xabs < 7E-3) {
+ return x * (1 - x * (1/2 - x * (1/3 - x * (1/4 -
+ x * (1/5 - x * (1/6 - x * (1/7)))))));
+ } else { // Use regular log if not close enough to 0.
+ return MathLog(1 + x);
+ }
+}
+
+
function ExtendMath() {
%CheckIsBootstrapping();
// Set up the non-enumerable functions on the Math object.
InstallFunctions($Math, DONT_ENUM, $Array(
"sign", MathSign,
- "trunc", MathTrunc
+ "trunc", MathTrunc,
+ "sinh", MathSinh,
+ "cosh", MathCosh,
+ "tanh", MathTanh,
+ "asinh", MathAsinh,
+ "acosh", MathAcosh,
+ "atanh", MathAtanh,
+ "log10", MathLog10,
+ "log2", MathLog2,
+ "hypot", MathHypot,
+ "fround", MathFroundJS,
+ "clz32", MathClz32,
+ "cbrt", MathCbrt,
+ "log1p", MathLog1p,
+ "expm1", MathExpm1
));
}
+
ExtendMath();
diff --git a/chromium/v8/src/harmony-string.js b/chromium/v8/src/harmony-string.js
index 8e4b9a46264..4cd8e6687ed 100644
--- a/chromium/v8/src/harmony-string.js
+++ b/chromium/v8/src/harmony-string.js
@@ -1,29 +1,6 @@
-// Copyright 2013 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
'use strict';
@@ -34,12 +11,9 @@
// -------------------------------------------------------------------
-// ES6 draft 07-15-13, section 15.5.3.21
+// ES6 draft 01-20-14, section 21.1.3.13
function StringRepeat(count) {
- if (IS_NULL_OR_UNDEFINED(this) && !IS_UNDETECTABLE(this)) {
- throw MakeTypeError("called_on_null_or_undefined",
- ["String.prototype.repeat"]);
- }
+ CHECK_OBJECT_COERCIBLE(this, "String.prototype.repeat");
var s = TO_STRING_INLINE(this);
var n = ToInteger(count);
@@ -56,14 +30,17 @@ function StringRepeat(count) {
}
-// ES6 draft 07-15-13, section 15.5.3.22
+// ES6 draft 04-05-14, section 21.1.3.18
function StringStartsWith(searchString /* position */) { // length == 1
- if (IS_NULL_OR_UNDEFINED(this) && !IS_UNDETECTABLE(this)) {
- throw MakeTypeError("called_on_null_or_undefined",
+ CHECK_OBJECT_COERCIBLE(this, "String.prototype.startsWith");
+
+ var s = TO_STRING_INLINE(this);
+
+ if (IS_REGEXP(searchString)) {
+ throw MakeTypeError("first_argument_not_regexp",
["String.prototype.startsWith"]);
}
- var s = TO_STRING_INLINE(this);
var ss = TO_STRING_INLINE(searchString);
var pos = 0;
if (%_ArgumentsLength() > 1) {
@@ -82,14 +59,17 @@ function StringStartsWith(searchString /* position */) { // length == 1
}
-// ES6 draft 07-15-13, section 15.5.3.23
+// ES6 draft 04-05-14, section 21.1.3.7
function StringEndsWith(searchString /* position */) { // length == 1
- if (IS_NULL_OR_UNDEFINED(this) && !IS_UNDETECTABLE(this)) {
- throw MakeTypeError("called_on_null_or_undefined",
+ CHECK_OBJECT_COERCIBLE(this, "String.prototype.endsWith");
+
+ var s = TO_STRING_INLINE(this);
+
+ if (IS_REGEXP(searchString)) {
+ throw MakeTypeError("first_argument_not_regexp",
["String.prototype.endsWith"]);
}
- var s = TO_STRING_INLINE(this);
var ss = TO_STRING_INLINE(searchString);
var s_len = s.length;
var pos = s_len;
@@ -111,14 +91,17 @@ function StringEndsWith(searchString /* position */) { // length == 1
}
-// ES6 draft 07-15-13, section 15.5.3.24
+// ES6 draft 04-05-14, section 21.1.3.6
function StringContains(searchString /* position */) { // length == 1
- if (IS_NULL_OR_UNDEFINED(this) && !IS_UNDETECTABLE(this)) {
- throw MakeTypeError("called_on_null_or_undefined",
+ CHECK_OBJECT_COERCIBLE(this, "String.prototype.contains");
+
+ var s = TO_STRING_INLINE(this);
+
+ if (IS_REGEXP(searchString)) {
+ throw MakeTypeError("first_argument_not_regexp",
["String.prototype.contains"]);
}
- var s = TO_STRING_INLINE(this);
var ss = TO_STRING_INLINE(searchString);
var pos = 0;
if (%_ArgumentsLength() > 1) {
diff --git a/chromium/v8/src/hashmap.h b/chromium/v8/src/hashmap.h
index 11f6ace7d83..d800f2f4389 100644
--- a/chromium/v8/src/hashmap.h
+++ b/chromium/v8/src/hashmap.h
@@ -1,36 +1,13 @@
// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
#ifndef V8_HASHMAP_H_
#define V8_HASHMAP_H_
-#include "allocation.h"
-#include "checks.h"
-#include "utils.h"
+#include "src/allocation.h"
+#include "src/checks.h"
+#include "src/utils.h"
namespace v8 {
namespace internal {
@@ -98,6 +75,11 @@ class TemplateHashMapImpl {
Entry* Start() const;
Entry* Next(Entry* p) const;
+ // Some match functions defined for convenience.
+ static bool PointersMatch(void* key1, void* key2) {
+ return key1 == key2;
+ }
+
private:
MatchFun match_;
Entry* map_;
diff --git a/chromium/v8/src/heap-inl.h b/chromium/v8/src/heap-inl.h
index 525c634da62..2e80452b05a 100644
--- a/chromium/v8/src/heap-inl.h
+++ b/chromium/v8/src/heap-inl.h
@@ -1,41 +1,20 @@
// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
#ifndef V8_HEAP_INL_H_
#define V8_HEAP_INL_H_
-#include "heap.h"
-#include "isolate.h"
-#include "list-inl.h"
-#include "objects.h"
-#include "platform.h"
-#include "v8-counters.h"
-#include "store-buffer.h"
-#include "store-buffer-inl.h"
+#include <cmath>
+
+#include "src/heap.h"
+#include "src/heap-profiler.h"
+#include "src/isolate.h"
+#include "src/list-inl.h"
+#include "src/objects.h"
+#include "src/platform.h"
+#include "src/store-buffer.h"
+#include "src/store-buffer-inl.h"
namespace v8 {
namespace internal {
@@ -82,22 +61,6 @@ void PromotionQueue::ActivateGuardIfOnTheSamePage() {
}
-MaybeObject* Heap::AllocateStringFromUtf8(Vector<const char> str,
- PretenureFlag pretenure) {
- // Check for ASCII first since this is the common case.
- const char* start = str.start();
- int length = str.length();
- int non_ascii_start = String::NonAsciiStart(start, length);
- if (non_ascii_start >= length) {
- // If the string is ASCII, we do not need to convert the characters
- // since UTF8 is backwards compatible with ASCII.
- return AllocateStringFromOneByte(str, pretenure);
- }
- // Non-ASCII and we need to decode.
- return AllocateStringFromUtf8Slow(str, non_ascii_start, pretenure);
-}
-
-
template<>
bool inline Heap::IsOneByte(Vector<const char> str, int chars) {
// TODO(dcarney): incorporate Latin-1 check when Latin-1 is supported?
@@ -112,7 +75,7 @@ bool inline Heap::IsOneByte(String* str, int chars) {
}
-MaybeObject* Heap::AllocateInternalizedStringFromUtf8(
+AllocationResult Heap::AllocateInternalizedStringFromUtf8(
Vector<const char> str, int chars, uint32_t hash_field) {
if (IsOneByte(str, chars)) {
return AllocateOneByteInternalizedString(
@@ -123,7 +86,7 @@ MaybeObject* Heap::AllocateInternalizedStringFromUtf8(
template<typename T>
-MaybeObject* Heap::AllocateInternalizedStringImpl(
+AllocationResult Heap::AllocateInternalizedStringImpl(
T t, int chars, uint32_t hash_field) {
if (IsOneByte(t, chars)) {
return AllocateInternalizedStringImpl<true>(t, chars, hash_field);
@@ -132,24 +95,23 @@ MaybeObject* Heap::AllocateInternalizedStringImpl(
}
-MaybeObject* Heap::AllocateOneByteInternalizedString(Vector<const uint8_t> str,
- uint32_t hash_field) {
- if (str.length() > SeqOneByteString::kMaxLength) {
- return Failure::OutOfMemoryException(0x2);
- }
+AllocationResult Heap::AllocateOneByteInternalizedString(
+ Vector<const uint8_t> str,
+ uint32_t hash_field) {
+ CHECK_GE(String::kMaxLength, str.length());
// Compute map and object size.
Map* map = ascii_internalized_string_map();
int size = SeqOneByteString::SizeFor(str.length());
AllocationSpace space = SelectSpace(size, OLD_DATA_SPACE, TENURED);
// Allocate string.
- Object* result;
- { MaybeObject* maybe_result = AllocateRaw(size, space, OLD_DATA_SPACE);
- if (!maybe_result->ToObject(&result)) return maybe_result;
+ HeapObject* result;
+ { AllocationResult allocation = AllocateRaw(size, space, OLD_DATA_SPACE);
+ if (!allocation.To(&result)) return allocation;
}
// String maps are all immortal immovable objects.
- reinterpret_cast<HeapObject*>(result)->set_map_no_write_barrier(map);
+ result->set_map_no_write_barrier(map);
// Set length and hash fields of the allocated string.
String* answer = String::cast(result);
answer->set_length(str.length());
@@ -158,30 +120,28 @@ MaybeObject* Heap::AllocateOneByteInternalizedString(Vector<const uint8_t> str,
ASSERT_EQ(size, answer->Size());
// Fill in the characters.
- OS::MemCopy(answer->address() + SeqOneByteString::kHeaderSize,
- str.start(), str.length());
+ MemCopy(answer->address() + SeqOneByteString::kHeaderSize, str.start(),
+ str.length());
return answer;
}
-MaybeObject* Heap::AllocateTwoByteInternalizedString(Vector<const uc16> str,
- uint32_t hash_field) {
- if (str.length() > SeqTwoByteString::kMaxLength) {
- return Failure::OutOfMemoryException(0x3);
- }
+AllocationResult Heap::AllocateTwoByteInternalizedString(Vector<const uc16> str,
+ uint32_t hash_field) {
+ CHECK_GE(String::kMaxLength, str.length());
// Compute map and object size.
Map* map = internalized_string_map();
int size = SeqTwoByteString::SizeFor(str.length());
AllocationSpace space = SelectSpace(size, OLD_DATA_SPACE, TENURED);
// Allocate string.
- Object* result;
- { MaybeObject* maybe_result = AllocateRaw(size, space, OLD_DATA_SPACE);
- if (!maybe_result->ToObject(&result)) return maybe_result;
+ HeapObject* result;
+ { AllocationResult allocation = AllocateRaw(size, space, OLD_DATA_SPACE);
+ if (!allocation.To(&result)) return allocation;
}
- reinterpret_cast<HeapObject*>(result)->set_map(map);
+ result->set_map(map);
// Set length and hash fields of the allocated string.
String* answer = String::cast(result);
answer->set_length(str.length());
@@ -190,98 +150,84 @@ MaybeObject* Heap::AllocateTwoByteInternalizedString(Vector<const uc16> str,
ASSERT_EQ(size, answer->Size());
// Fill in the characters.
- OS::MemCopy(answer->address() + SeqTwoByteString::kHeaderSize,
- str.start(), str.length() * kUC16Size);
+ MemCopy(answer->address() + SeqTwoByteString::kHeaderSize, str.start(),
+ str.length() * kUC16Size);
return answer;
}
-MaybeObject* Heap::CopyFixedArray(FixedArray* src) {
+AllocationResult Heap::CopyFixedArray(FixedArray* src) {
+ if (src->length() == 0) return src;
return CopyFixedArrayWithMap(src, src->map());
}
-MaybeObject* Heap::CopyFixedDoubleArray(FixedDoubleArray* src) {
+AllocationResult Heap::CopyFixedDoubleArray(FixedDoubleArray* src) {
+ if (src->length() == 0) return src;
return CopyFixedDoubleArrayWithMap(src, src->map());
}
-MaybeObject* Heap::CopyConstantPoolArray(ConstantPoolArray* src) {
+AllocationResult Heap::CopyConstantPoolArray(ConstantPoolArray* src) {
+ if (src->length() == 0) return src;
return CopyConstantPoolArrayWithMap(src, src->map());
}
-MaybeObject* Heap::AllocateRaw(int size_in_bytes,
- AllocationSpace space,
- AllocationSpace retry_space) {
+AllocationResult Heap::AllocateRaw(int size_in_bytes,
+ AllocationSpace space,
+ AllocationSpace retry_space) {
ASSERT(AllowHandleAllocation::IsAllowed());
ASSERT(AllowHeapAllocation::IsAllowed());
ASSERT(gc_state_ == NOT_IN_GC);
HeapProfiler* profiler = isolate_->heap_profiler();
#ifdef DEBUG
if (FLAG_gc_interval >= 0 &&
- !disallow_allocation_failure_ &&
+ AllowAllocationFailure::IsAllowed(isolate_) &&
Heap::allocation_timeout_-- <= 0) {
- return Failure::RetryAfterGC(space);
+ return AllocationResult::Retry(space);
}
isolate_->counters()->objs_since_last_full()->Increment();
isolate_->counters()->objs_since_last_young()->Increment();
#endif
HeapObject* object;
- MaybeObject* result;
+ AllocationResult allocation;
if (NEW_SPACE == space) {
- result = new_space_.AllocateRaw(size_in_bytes);
- if (always_allocate() && result->IsFailure() && retry_space != NEW_SPACE) {
+ allocation = new_space_.AllocateRaw(size_in_bytes);
+ if (always_allocate() &&
+ allocation.IsRetry() &&
+ retry_space != NEW_SPACE) {
space = retry_space;
} else {
- if (profiler->is_tracking_allocations() && result->To(&object)) {
+ if (profiler->is_tracking_allocations() && allocation.To(&object)) {
profiler->AllocationEvent(object->address(), size_in_bytes);
}
- return result;
+ return allocation;
}
}
if (OLD_POINTER_SPACE == space) {
- result = old_pointer_space_->AllocateRaw(size_in_bytes);
+ allocation = old_pointer_space_->AllocateRaw(size_in_bytes);
} else if (OLD_DATA_SPACE == space) {
- result = old_data_space_->AllocateRaw(size_in_bytes);
+ allocation = old_data_space_->AllocateRaw(size_in_bytes);
} else if (CODE_SPACE == space) {
- result = code_space_->AllocateRaw(size_in_bytes);
+ allocation = code_space_->AllocateRaw(size_in_bytes);
} else if (LO_SPACE == space) {
- result = lo_space_->AllocateRaw(size_in_bytes, NOT_EXECUTABLE);
+ allocation = lo_space_->AllocateRaw(size_in_bytes, NOT_EXECUTABLE);
} else if (CELL_SPACE == space) {
- result = cell_space_->AllocateRaw(size_in_bytes);
+ allocation = cell_space_->AllocateRaw(size_in_bytes);
} else if (PROPERTY_CELL_SPACE == space) {
- result = property_cell_space_->AllocateRaw(size_in_bytes);
+ allocation = property_cell_space_->AllocateRaw(size_in_bytes);
} else {
ASSERT(MAP_SPACE == space);
- result = map_space_->AllocateRaw(size_in_bytes);
+ allocation = map_space_->AllocateRaw(size_in_bytes);
}
- if (result->IsFailure()) old_gen_exhausted_ = true;
- if (profiler->is_tracking_allocations() && result->To(&object)) {
+ if (allocation.IsRetry()) old_gen_exhausted_ = true;
+ if (profiler->is_tracking_allocations() && allocation.To(&object)) {
profiler->AllocationEvent(object->address(), size_in_bytes);
}
- return result;
-}
-
-
-MaybeObject* Heap::NumberFromInt32(
- int32_t value, PretenureFlag pretenure) {
- if (Smi::IsValid(value)) return Smi::FromInt(value);
- // Bypass NumberFromDouble to avoid various redundant checks.
- return AllocateHeapNumber(FastI2D(value), pretenure);
-}
-
-
-MaybeObject* Heap::NumberFromUint32(
- uint32_t value, PretenureFlag pretenure) {
- if (static_cast<int32_t>(value) >= 0 &&
- Smi::IsValid(static_cast<int32_t>(value))) {
- return Smi::FromInt(static_cast<int32_t>(value));
- }
- // Bypass NumberFromDouble to avoid various redundant checks.
- return AllocateHeapNumber(FastUI2D(value), pretenure);
+ return allocation;
}
@@ -415,7 +361,7 @@ AllocationSpace Heap::TargetSpaceId(InstanceType type) {
}
-bool Heap::AllowedToBeMigrated(HeapObject* object, AllocationSpace dst) {
+bool Heap::AllowedToBeMigrated(HeapObject* obj, AllocationSpace dst) {
// Object migration is governed by the following rules:
//
// 1) Objects in new-space can be migrated to one of the old spaces
@@ -425,18 +371,22 @@ bool Heap::AllowedToBeMigrated(HeapObject* object, AllocationSpace dst) {
// fixed arrays in new-space, old-data-space and old-pointer-space.
// 4) Fillers (one word) can never migrate, they are skipped by
// incremental marking explicitly to prevent invalid pattern.
+ // 5) Short external strings can end up in old pointer space when a cons
+ // string in old pointer space is made external (String::MakeExternal).
//
// Since this function is used for debugging only, we do not place
// asserts here, but check everything explicitly.
- if (object->map() == one_pointer_filler_map()) return false;
- InstanceType type = object->map()->instance_type();
- MemoryChunk* chunk = MemoryChunk::FromAddress(object->address());
+ if (obj->map() == one_pointer_filler_map()) return false;
+ InstanceType type = obj->map()->instance_type();
+ MemoryChunk* chunk = MemoryChunk::FromAddress(obj->address());
AllocationSpace src = chunk->owner()->identity();
switch (src) {
case NEW_SPACE:
return dst == src || dst == TargetSpaceId(type);
case OLD_POINTER_SPACE:
- return dst == src && (dst == TargetSpaceId(type) || object->IsFiller());
+ return dst == src &&
+ (dst == TargetSpaceId(type) || obj->IsFiller() ||
+ (obj->IsExternalString() && ExternalString::cast(obj)->is_short()));
case OLD_DATA_SPACE:
return dst == src && dst == TargetSpaceId(type);
case CODE_SPACE:
@@ -446,6 +396,8 @@ bool Heap::AllowedToBeMigrated(HeapObject* object, AllocationSpace dst) {
case PROPERTY_CELL_SPACE:
case LO_SPACE:
return false;
+ case INVALID_SPACE:
+ break;
}
UNREACHABLE();
return false;
@@ -473,7 +425,7 @@ void Heap::MoveBlock(Address dst, Address src, int byte_size) {
*dst_slot++ = *src_slot++;
}
} else {
- OS::MemMove(dst, src, static_cast<size_t>(byte_size));
+ MemMove(dst, src, static_cast<size_t>(byte_size));
}
}
@@ -483,14 +435,54 @@ void Heap::ScavengePointer(HeapObject** p) {
}
-void Heap::UpdateAllocationSiteFeedback(HeapObject* object) {
- if (FLAG_allocation_site_pretenuring && object->IsJSObject()) {
- AllocationMemento* memento = AllocationMemento::FindForJSObject(
- JSObject::cast(object), true);
- if (memento != NULL) {
- ASSERT(memento->IsValid());
- memento->GetAllocationSite()->IncrementMementoFoundCount();
- }
+AllocationMemento* Heap::FindAllocationMemento(HeapObject* object) {
+ // Check if there is potentially a memento behind the object. If
+ // the last word of the momento is on another page we return
+ // immediately.
+ Address object_address = object->address();
+ Address memento_address = object_address + object->Size();
+ Address last_memento_word_address = memento_address + kPointerSize;
+ if (!NewSpacePage::OnSamePage(object_address,
+ last_memento_word_address)) {
+ return NULL;
+ }
+
+ HeapObject* candidate = HeapObject::FromAddress(memento_address);
+ if (candidate->map() != allocation_memento_map()) return NULL;
+
+ // Either the object is the last object in the new space, or there is another
+ // object of at least word size (the header map word) following it, so
+ // suffices to compare ptr and top here. Note that technically we do not have
+ // to compare with the current top pointer of the from space page during GC,
+ // since we always install filler objects above the top pointer of a from
+ // space page when performing a garbage collection. However, always performing
+ // the test makes it possible to have a single, unified version of
+ // FindAllocationMemento that is used both by the GC and the mutator.
+ Address top = NewSpaceTop();
+ ASSERT(memento_address == top ||
+ memento_address + HeapObject::kHeaderSize <= top ||
+ !NewSpacePage::OnSamePage(memento_address, top));
+ if (memento_address == top) return NULL;
+
+ AllocationMemento* memento = AllocationMemento::cast(candidate);
+ if (!memento->IsValid()) return NULL;
+ return memento;
+}
+
+
+void Heap::UpdateAllocationSiteFeedback(HeapObject* object,
+ ScratchpadSlotMode mode) {
+ Heap* heap = object->GetHeap();
+ ASSERT(heap->InFromSpace(object));
+
+ if (!FLAG_allocation_site_pretenuring ||
+ !AllocationSite::CanTrack(object->map()->instance_type())) return;
+
+ AllocationMemento* memento = heap->FindAllocationMemento(object);
+ if (memento == NULL) return;
+
+ if (memento->GetAllocationSite()->IncrementMementoFoundCount()) {
+ heap->AddAllocationSiteToScratchpad(memento->GetAllocationSite(), mode);
}
}
@@ -513,7 +505,7 @@ void Heap::ScavengeObject(HeapObject** p, HeapObject* object) {
return;
}
- UpdateAllocationSiteFeedback(object);
+ UpdateAllocationSiteFeedback(object, IGNORE_SCRATCHPAD_SLOT);
// AllocationMementos are unrooted and shouldn't survive a scavenge
ASSERT(object->map() != object->GetHeap()->allocation_memento_map());
@@ -522,71 +514,12 @@ void Heap::ScavengeObject(HeapObject** p, HeapObject* object) {
}
-bool Heap::CollectGarbage(AllocationSpace space, const char* gc_reason) {
+bool Heap::CollectGarbage(AllocationSpace space,
+ const char* gc_reason,
+ const v8::GCCallbackFlags callbackFlags) {
const char* collector_reason = NULL;
GarbageCollector collector = SelectGarbageCollector(space, &collector_reason);
- return CollectGarbage(space, collector, gc_reason, collector_reason);
-}
-
-
-MaybeObject* Heap::PrepareForCompare(String* str) {
- // Always flatten small strings and force flattening of long strings
- // after we have accumulated a certain amount we failed to flatten.
- static const int kMaxAlwaysFlattenLength = 32;
- static const int kFlattenLongThreshold = 16*KB;
-
- const int length = str->length();
- MaybeObject* obj = str->TryFlatten();
- if (length <= kMaxAlwaysFlattenLength ||
- unflattened_strings_length_ >= kFlattenLongThreshold) {
- return obj;
- }
- if (obj->IsFailure()) {
- unflattened_strings_length_ += length;
- }
- return str;
-}
-
-
-int64_t Heap::AdjustAmountOfExternalAllocatedMemory(
- int64_t change_in_bytes) {
- ASSERT(HasBeenSetUp());
- int64_t amount = amount_of_external_allocated_memory_ + change_in_bytes;
- if (change_in_bytes > 0) {
- // Avoid overflow.
- if (amount > amount_of_external_allocated_memory_) {
- amount_of_external_allocated_memory_ = amount;
- } else {
- // Give up and reset the counters in case of an overflow.
- amount_of_external_allocated_memory_ = 0;
- amount_of_external_allocated_memory_at_last_global_gc_ = 0;
- }
- int64_t amount_since_last_global_gc = PromotedExternalMemorySize();
- if (amount_since_last_global_gc > external_allocation_limit_) {
- CollectAllGarbage(kNoGCFlags, "external memory allocation limit reached");
- }
- } else {
- // Avoid underflow.
- if (amount >= 0) {
- amount_of_external_allocated_memory_ = amount;
- } else {
- // Give up and reset the counters in case of an underflow.
- amount_of_external_allocated_memory_ = 0;
- amount_of_external_allocated_memory_at_last_global_gc_ = 0;
- }
- }
- if (FLAG_trace_external_memory) {
- PrintPID("%8.0f ms: ", isolate()->time_millis_since_init());
- PrintF("Adjust amount of external memory: delta=%6" V8_PTR_PREFIX "d KB, "
- "amount=%6" V8_PTR_PREFIX "d KB, since_gc=%6" V8_PTR_PREFIX "d KB, "
- "isolate=0x%08" V8PRIxPTR ".\n",
- static_cast<intptr_t>(change_in_bytes / KB),
- static_cast<intptr_t>(amount_of_external_allocated_memory_ / KB),
- static_cast<intptr_t>(PromotedExternalMemorySize() / KB),
- reinterpret_cast<intptr_t>(isolate()));
- }
- ASSERT(amount_of_external_allocated_memory_ >= 0);
- return amount_of_external_allocated_memory_;
+ return CollectGarbage(collector, gc_reason, collector_reason, callbackFlags);
}
@@ -596,13 +529,6 @@ Isolate* Heap::isolate() {
}
-#ifdef DEBUG
-#define GC_GREEDY_CHECK(ISOLATE) \
- if (FLAG_gc_greedy) (ISOLATE)->heap()->GarbageCollectionGreedyCheck()
-#else
-#define GC_GREEDY_CHECK(ISOLATE) { }
-#endif
-
// Calls the FUNCTION_CALL function and retries it up to three times
// to guarantee that any allocations performed during the call will
// succeed if there's enough memory.
@@ -610,39 +536,30 @@ Isolate* Heap::isolate() {
// Warning: Do not use the identifiers __object__, __maybe_object__ or
// __scope__ in a call to this macro.
-#define CALL_AND_RETRY(ISOLATE, FUNCTION_CALL, RETURN_VALUE, RETURN_EMPTY, OOM)\
+#define RETURN_OBJECT_UNLESS_RETRY(ISOLATE, RETURN_VALUE) \
+ if (__allocation__.To(&__object__)) { \
+ ASSERT(__object__ != (ISOLATE)->heap()->exception()); \
+ RETURN_VALUE; \
+ }
+
+#define CALL_AND_RETRY(ISOLATE, FUNCTION_CALL, RETURN_VALUE, RETURN_EMPTY) \
do { \
- GC_GREEDY_CHECK(ISOLATE); \
- MaybeObject* __maybe_object__ = FUNCTION_CALL; \
+ AllocationResult __allocation__ = FUNCTION_CALL; \
Object* __object__ = NULL; \
- if (__maybe_object__->ToObject(&__object__)) RETURN_VALUE; \
- if (__maybe_object__->IsOutOfMemory()) { \
- OOM; \
- } \
- if (!__maybe_object__->IsRetryAfterGC()) RETURN_EMPTY; \
- (ISOLATE)->heap()->CollectGarbage(Failure::cast(__maybe_object__)-> \
- allocation_space(), \
- "allocation failure"); \
- __maybe_object__ = FUNCTION_CALL; \
- if (__maybe_object__->ToObject(&__object__)) RETURN_VALUE; \
- if (__maybe_object__->IsOutOfMemory()) { \
- OOM; \
- } \
- if (!__maybe_object__->IsRetryAfterGC()) RETURN_EMPTY; \
+ RETURN_OBJECT_UNLESS_RETRY(ISOLATE, RETURN_VALUE) \
+ (ISOLATE)->heap()->CollectGarbage(__allocation__.RetrySpace(), \
+ "allocation failure"); \
+ __allocation__ = FUNCTION_CALL; \
+ RETURN_OBJECT_UNLESS_RETRY(ISOLATE, RETURN_VALUE) \
(ISOLATE)->counters()->gc_last_resort_from_handles()->Increment(); \
(ISOLATE)->heap()->CollectAllAvailableGarbage("last resort gc"); \
{ \
- AlwaysAllocateScope __scope__; \
- __maybe_object__ = FUNCTION_CALL; \
- } \
- if (__maybe_object__->ToObject(&__object__)) RETURN_VALUE; \
- if (__maybe_object__->IsOutOfMemory()) { \
- OOM; \
+ AlwaysAllocateScope __scope__(ISOLATE); \
+ __allocation__ = FUNCTION_CALL; \
} \
- if (__maybe_object__->IsRetryAfterGC()) { \
+ RETURN_OBJECT_UNLESS_RETRY(ISOLATE, RETURN_VALUE) \
/* TODO(1181417): Fix this. */ \
- v8::internal::V8::FatalProcessOutOfMemory("CALL_AND_RETRY_LAST", true); \
- } \
+ v8::internal::Heap::FatalProcessOutOfMemory("CALL_AND_RETRY_LAST", true); \
RETURN_EMPTY; \
} while (false)
@@ -652,8 +569,7 @@ Isolate* Heap::isolate() {
ISOLATE, \
FUNCTION_CALL, \
RETURN_VALUE, \
- RETURN_EMPTY, \
- v8::internal::V8::FatalProcessOutOfMemory("CALL_AND_RETRY", true))
+ RETURN_EMPTY)
#define CALL_HEAP_FUNCTION(ISOLATE, FUNCTION_CALL, TYPE) \
CALL_AND_RETRY_OR_DIE(ISOLATE, \
@@ -666,14 +582,6 @@ Isolate* Heap::isolate() {
CALL_AND_RETRY_OR_DIE(ISOLATE, FUNCTION_CALL, return, return)
-#define CALL_HEAP_FUNCTION_PASS_EXCEPTION(ISOLATE, FUNCTION_CALL) \
- CALL_AND_RETRY(ISOLATE, \
- FUNCTION_CALL, \
- return __object__, \
- return __maybe_object__, \
- return __maybe_object__)
-
-
void ExternalStringTable::AddString(String* string) {
ASSERT(string->IsExternalString());
if (heap_->InNewSpace(string)) {
@@ -747,84 +655,20 @@ void Heap::CompletelyClearInstanceofCache() {
}
-MaybeObject* TranscendentalCache::Get(Type type, double input) {
- SubCache* cache = caches_[type];
- if (cache == NULL) {
- caches_[type] = cache = new SubCache(isolate_, type);
- }
- return cache->Get(input);
-}
-
-
-Address TranscendentalCache::cache_array_address() {
- return reinterpret_cast<Address>(caches_);
-}
-
-
-double TranscendentalCache::SubCache::Calculate(double input) {
- switch (type_) {
- case ACOS:
- return acos(input);
- case ASIN:
- return asin(input);
- case ATAN:
- return atan(input);
- case COS:
- return fast_cos(input);
- case EXP:
- return exp(input);
- case LOG:
- return fast_log(input);
- case SIN:
- return fast_sin(input);
- case TAN:
- return fast_tan(input);
- default:
- return 0.0; // Never happens.
- }
-}
-
-
-MaybeObject* TranscendentalCache::SubCache::Get(double input) {
- Converter c;
- c.dbl = input;
- int hash = Hash(c);
- Element e = elements_[hash];
- if (e.in[0] == c.integers[0] &&
- e.in[1] == c.integers[1]) {
- ASSERT(e.output != NULL);
- isolate_->counters()->transcendental_cache_hit()->Increment();
- return e.output;
- }
- double answer = Calculate(input);
- isolate_->counters()->transcendental_cache_miss()->Increment();
- Object* heap_number;
- { MaybeObject* maybe_heap_number =
- isolate_->heap()->AllocateHeapNumber(answer);
- if (!maybe_heap_number->ToObject(&heap_number)) return maybe_heap_number;
- }
- elements_[hash].in[0] = c.integers[0];
- elements_[hash].in[1] = c.integers[1];
- elements_[hash].output = heap_number;
- return heap_number;
-}
-
-
-AlwaysAllocateScope::AlwaysAllocateScope() {
+AlwaysAllocateScope::AlwaysAllocateScope(Isolate* isolate)
+ : heap_(isolate->heap()), daf_(isolate) {
// We shouldn't hit any nested scopes, because that requires
// non-handle code to call handle code. The code still works but
// performance will degrade, so we want to catch this situation
// in debug mode.
- Isolate* isolate = Isolate::Current();
- ASSERT(isolate->heap()->always_allocate_scope_depth_ == 0);
- isolate->heap()->always_allocate_scope_depth_++;
+ ASSERT(heap_->always_allocate_scope_depth_ == 0);
+ heap_->always_allocate_scope_depth_++;
}
AlwaysAllocateScope::~AlwaysAllocateScope() {
- Isolate* isolate = Isolate::Current();
- isolate->heap()->always_allocate_scope_depth_--;
- ASSERT(isolate->heap()->always_allocate_scope_depth_ == 0);
+ heap_->always_allocate_scope_depth_--;
+ ASSERT(heap_->always_allocate_scope_depth_ == 0);
}
@@ -842,6 +686,21 @@ NoWeakObjectVerificationScope::~NoWeakObjectVerificationScope() {
#endif
+GCCallbacksScope::GCCallbacksScope(Heap* heap) : heap_(heap) {
+ heap_->gc_callbacks_depth_++;
+}
+
+
+GCCallbacksScope::~GCCallbacksScope() {
+ heap_->gc_callbacks_depth_--;
+}
+
+
+bool GCCallbacksScope::CheckReenter() {
+ return heap_->gc_callbacks_depth_ == 1;
+}
+
+
void VerifyPointersVisitor::VisitPointers(Object** start, Object** end) {
for (Object** current = start; current < end; current++) {
if ((*current)->IsHeapObject()) {
@@ -853,25 +712,15 @@ void VerifyPointersVisitor::VisitPointers(Object** start, Object** end) {
}
-double GCTracer::SizeOfHeapObjects() {
- return (static_cast<double>(heap_->SizeOfObjects())) / MB;
-}
-
-
-DisallowAllocationFailure::DisallowAllocationFailure() {
-#ifdef DEBUG
- Isolate* isolate = Isolate::Current();
- old_state_ = isolate->heap()->disallow_allocation_failure_;
- isolate->heap()->disallow_allocation_failure_ = true;
-#endif
+void VerifySmisVisitor::VisitPointers(Object** start, Object** end) {
+ for (Object** current = start; current < end; current++) {
+ CHECK((*current)->IsSmi());
+ }
}
-DisallowAllocationFailure::~DisallowAllocationFailure() {
-#ifdef DEBUG
- Isolate* isolate = Isolate::Current();
- isolate->heap()->disallow_allocation_failure_ = old_state_;
-#endif
+double GCTracer::SizeOfHeapObjects() {
+ return (static_cast<double>(heap_->SizeOfObjects())) / MB;
}
diff --git a/chromium/v8/src/heap-profiler.cc b/chromium/v8/src/heap-profiler.cc
index 3d8e3364c90..e576d3b23e9 100644
--- a/chromium/v8/src/heap-profiler.cc
+++ b/chromium/v8/src/heap-profiler.cc
@@ -1,55 +1,45 @@
// Copyright 2009-2010 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#include "heap-profiler.h"
-#include "heap-snapshot-generator-inl.h"
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#include "src/heap-profiler.h"
+
+#include "src/allocation-tracker.h"
+#include "src/heap-snapshot-generator-inl.h"
namespace v8 {
namespace internal {
HeapProfiler::HeapProfiler(Heap* heap)
- : snapshots_(new HeapSnapshotsCollection(heap)),
+ : ids_(new HeapObjectsMap(heap)),
+ names_(new StringsStorage(heap)),
next_snapshot_uid_(1),
- is_tracking_allocations_(false),
is_tracking_object_moves_(false) {
}
+static void DeleteHeapSnapshot(HeapSnapshot** snapshot_ptr) {
+ delete *snapshot_ptr;
+}
+
+
HeapProfiler::~HeapProfiler() {
- delete snapshots_;
+ snapshots_.Iterate(DeleteHeapSnapshot);
+ snapshots_.Clear();
}
void HeapProfiler::DeleteAllSnapshots() {
- Heap* the_heap = heap();
- delete snapshots_;
- snapshots_ = new HeapSnapshotsCollection(the_heap);
+ snapshots_.Iterate(DeleteHeapSnapshot);
+ snapshots_.Clear();
+ names_.Reset(new StringsStorage(heap()));
+}
+
+
+void HeapProfiler::RemoveSnapshot(HeapSnapshot* snapshot) {
+ snapshots_.RemoveElement(snapshot);
}
@@ -76,15 +66,17 @@ HeapSnapshot* HeapProfiler::TakeSnapshot(
const char* name,
v8::ActivityControl* control,
v8::HeapProfiler::ObjectNameResolver* resolver) {
- HeapSnapshot* result = snapshots_->NewSnapshot(name, next_snapshot_uid_++);
+ HeapSnapshot* result = new HeapSnapshot(this, name, next_snapshot_uid_++);
{
HeapSnapshotGenerator generator(result, control, resolver, heap());
if (!generator.GenerateSnapshot()) {
delete result;
result = NULL;
+ } else {
+ snapshots_.Add(result);
}
}
- snapshots_->SnapshotGenerationFinished(result);
+ ids_->RemoveDeadEntries();
is_tracking_object_moves_ = true;
return result;
}
@@ -94,69 +86,82 @@ HeapSnapshot* HeapProfiler::TakeSnapshot(
String* name,
v8::ActivityControl* control,
v8::HeapProfiler::ObjectNameResolver* resolver) {
- return TakeSnapshot(snapshots_->names()->GetName(name), control, resolver);
+ return TakeSnapshot(names_->GetName(name), control, resolver);
}
void HeapProfiler::StartHeapObjectsTracking(bool track_allocations) {
- snapshots_->StartHeapObjectsTracking(track_allocations);
+ ids_->UpdateHeapObjectsMap();
is_tracking_object_moves_ = true;
- ASSERT(!is_tracking_allocations_);
+ ASSERT(!is_tracking_allocations());
if (track_allocations) {
+ allocation_tracker_.Reset(new AllocationTracker(ids_.get(), names_.get()));
heap()->DisableInlineAllocation();
- is_tracking_allocations_ = true;
}
}
SnapshotObjectId HeapProfiler::PushHeapObjectsStats(OutputStream* stream) {
- return snapshots_->PushHeapObjectsStats(stream);
+ return ids_->PushHeapObjectsStats(stream);
}
void HeapProfiler::StopHeapObjectsTracking() {
- snapshots_->StopHeapObjectsTracking();
- if (is_tracking_allocations_) {
+ ids_->StopHeapObjectsTracking();
+ if (is_tracking_allocations()) {
+ allocation_tracker_.Reset(NULL);
heap()->EnableInlineAllocation();
- is_tracking_allocations_ = false;
}
}
size_t HeapProfiler::GetMemorySizeUsedByProfiler() {
- return snapshots_->GetUsedMemorySize();
+ size_t size = sizeof(*this);
+ size += names_->GetUsedMemorySize();
+ size += ids_->GetUsedMemorySize();
+ size += GetMemoryUsedByList(snapshots_);
+ for (int i = 0; i < snapshots_.length(); ++i) {
+ size += snapshots_[i]->RawSnapshotSize();
+ }
+ return size;
}
int HeapProfiler::GetSnapshotsCount() {
- return snapshots_->snapshots()->length();
+ return snapshots_.length();
}
HeapSnapshot* HeapProfiler::GetSnapshot(int index) {
- return snapshots_->snapshots()->at(index);
+ return snapshots_.at(index);
}
SnapshotObjectId HeapProfiler::GetSnapshotObjectId(Handle<Object> obj) {
if (!obj->IsHeapObject())
return v8::HeapProfiler::kUnknownObjectId;
- return snapshots_->FindObjectId(HeapObject::cast(*obj)->address());
+ return ids_->FindEntry(HeapObject::cast(*obj)->address());
}
void HeapProfiler::ObjectMoveEvent(Address from, Address to, int size) {
- snapshots_->ObjectMoveEvent(from, to, size);
+ bool known_object = ids_->MoveObject(from, to, size);
+ if (!known_object && !allocation_tracker_.is_empty()) {
+ allocation_tracker_->address_to_trace()->MoveObject(from, to, size);
+ }
}
void HeapProfiler::AllocationEvent(Address addr, int size) {
- snapshots_->AllocationEvent(addr, size);
+ DisallowHeapAllocation no_allocation;
+ if (!allocation_tracker_.is_empty()) {
+ allocation_tracker_->AllocationEvent(addr, size);
+ }
}
void HeapProfiler::UpdateObjectSizeEvent(Address addr, int size) {
- snapshots_->UpdateObjectSizeEvent(addr, size);
+ ids_->UpdateObjectSize(addr, size);
}
@@ -167,4 +172,27 @@ void HeapProfiler::SetRetainedObjectInfo(UniqueId id,
}
+Handle<HeapObject> HeapProfiler::FindHeapObjectById(SnapshotObjectId id) {
+ HeapObject* object = NULL;
+ HeapIterator iterator(heap(), HeapIterator::kFilterUnreachable);
+ // Make sure that object with the given id is still reachable.
+ for (HeapObject* obj = iterator.next();
+ obj != NULL;
+ obj = iterator.next()) {
+ if (ids_->FindEntry(obj->address()) == id) {
+ ASSERT(object == NULL);
+ object = obj;
+ // Can't break -- kFilterUnreachable requires full heap traversal.
+ }
+ }
+ return object != NULL ? Handle<HeapObject>(object) : Handle<HeapObject>();
+}
+
+
+void HeapProfiler::ClearHeapObjectMap() {
+ ids_.Reset(new HeapObjectsMap(heap()));
+ if (!is_tracking_allocations()) is_tracking_object_moves_ = false;
+}
+
+
} } // namespace v8::internal
diff --git a/chromium/v8/src/heap-profiler.h b/chromium/v8/src/heap-profiler.h
index 13e605b12d3..4197d4d54c9 100644
--- a/chromium/v8/src/heap-profiler.h
+++ b/chromium/v8/src/heap-profiler.h
@@ -1,41 +1,18 @@
// Copyright 2009-2010 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
#ifndef V8_HEAP_PROFILER_H_
#define V8_HEAP_PROFILER_H_
-#include "heap-snapshot-generator-inl.h"
-#include "isolate.h"
+#include "src/heap-snapshot-generator-inl.h"
+#include "src/isolate.h"
+#include "src/smart-pointers.h"
namespace v8 {
namespace internal {
class HeapSnapshot;
-class HeapSnapshotsCollection;
class HeapProfiler {
public:
@@ -55,12 +32,18 @@ class HeapProfiler {
void StartHeapObjectsTracking(bool track_allocations);
void StopHeapObjectsTracking();
+ AllocationTracker* allocation_tracker() const {
+ return allocation_tracker_.get();
+ }
+ HeapObjectsMap* heap_object_map() const { return ids_.get(); }
+ StringsStorage* names() const { return names_.get(); }
SnapshotObjectId PushHeapObjectsStats(OutputStream* stream);
int GetSnapshotsCount();
HeapSnapshot* GetSnapshot(int index);
SnapshotObjectId GetSnapshotObjectId(Handle<Object> obj);
void DeleteAllSnapshots();
+ void RemoveSnapshot(HeapSnapshot* snapshot);
void ObjectMoveEvent(Address from, Address to, int size);
@@ -76,19 +59,23 @@ class HeapProfiler {
void SetRetainedObjectInfo(UniqueId id, RetainedObjectInfo* info);
bool is_tracking_object_moves() const { return is_tracking_object_moves_; }
- bool is_tracking_allocations() const { return is_tracking_allocations_; }
-
- int FindUntrackedObjects() {
- return snapshots_->FindUntrackedObjects();
+ bool is_tracking_allocations() const {
+ return !allocation_tracker_.is_empty();
}
+ Handle<HeapObject> FindHeapObjectById(SnapshotObjectId id);
+ void ClearHeapObjectMap();
+
private:
- Heap* heap() const { return snapshots_->heap(); }
+ Heap* heap() const { return ids_->heap(); }
- HeapSnapshotsCollection* snapshots_;
+ // Mapping from HeapObject addresses to objects' uids.
+ SmartPointer<HeapObjectsMap> ids_;
+ List<HeapSnapshot*> snapshots_;
+ SmartPointer<StringsStorage> names_;
unsigned next_snapshot_uid_;
List<v8::HeapProfiler::WrapperInfoCallback> wrapper_callbacks_;
- bool is_tracking_allocations_;
+ SmartPointer<AllocationTracker> allocation_tracker_;
bool is_tracking_object_moves_;
};
diff --git a/chromium/v8/src/heap-snapshot-generator-inl.h b/chromium/v8/src/heap-snapshot-generator-inl.h
index 43002d2d2b1..b4021ecafb4 100644
--- a/chromium/v8/src/heap-snapshot-generator-inl.h
+++ b/chromium/v8/src/heap-snapshot-generator-inl.h
@@ -1,34 +1,11 @@
// Copyright 2013 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
#ifndef V8_HEAP_SNAPSHOT_GENERATOR_INL_H_
#define V8_HEAP_SNAPSHOT_GENERATOR_INL_H_
-#include "heap-snapshot-generator.h"
+#include "src/heap-snapshot-generator.h"
namespace v8 {
namespace internal {
@@ -59,7 +36,10 @@ int HeapEntry::set_children_index(int index) {
HeapGraphEdge** HeapEntry::children_arr() {
ASSERT(children_index_ >= 0);
- return &snapshot_->children()[children_index_];
+ SLOW_ASSERT(children_index_ < snapshot_->children().length() ||
+ (children_index_ == snapshot_->children().length() &&
+ children_count_ == 0));
+ return &snapshot_->children().first() + children_index_;
}
diff --git a/chromium/v8/src/heap-snapshot-generator.cc b/chromium/v8/src/heap-snapshot-generator.cc
index 271f95c5ca0..be970eeeeb3 100644
--- a/chromium/v8/src/heap-snapshot-generator.cc
+++ b/chromium/v8/src/heap-snapshot-generator.cc
@@ -1,39 +1,17 @@
// Copyright 2013 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#include "heap-snapshot-generator-inl.h"
-
-#include "allocation-tracker.h"
-#include "code-stubs.h"
-#include "heap-profiler.h"
-#include "debug.h"
-#include "types.h"
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#include "src/heap-snapshot-generator-inl.h"
+
+#include "src/allocation-tracker.h"
+#include "src/code-stubs.h"
+#include "src/conversions.h"
+#include "src/debug.h"
+#include "src/heap-profiler.h"
+#include "src/types.h"
namespace v8 {
namespace internal {
@@ -47,7 +25,8 @@ HeapGraphEdge::HeapGraphEdge(Type type, const char* name, int from, int to)
ASSERT(type == kContextVariable
|| type == kProperty
|| type == kInternal
- || type == kShortcut);
+ || type == kShortcut
+ || type == kWeak);
}
@@ -56,7 +35,7 @@ HeapGraphEdge::HeapGraphEdge(Type type, int index, int from, int to)
from_index_(from),
to_index_(to),
index_(index) {
- ASSERT(type == kElement || type == kHidden || type == kWeak);
+ ASSERT(type == kElement || type == kHidden);
}
@@ -71,14 +50,16 @@ HeapEntry::HeapEntry(HeapSnapshot* snapshot,
Type type,
const char* name,
SnapshotObjectId id,
- int self_size)
+ size_t self_size,
+ unsigned trace_node_id)
: type_(type),
children_count_(0),
children_index_(-1),
self_size_(self_size),
- id_(id),
snapshot_(snapshot),
- name_(name) { }
+ name_(name),
+ id_(id),
+ trace_node_id_(trace_node_id) { }
void HeapEntry::SetNamedReference(HeapGraphEdge::Type type,
@@ -99,15 +80,10 @@ void HeapEntry::SetIndexedReference(HeapGraphEdge::Type type,
}
-Handle<HeapObject> HeapEntry::GetHeapObject() {
- return snapshot_->collection()->FindHeapObjectById(id());
-}
-
-
void HeapEntry::Print(
const char* prefix, const char* edge_name, int max_depth, int indent) {
- STATIC_CHECK(sizeof(unsigned) == sizeof(id()));
- OS::Print("%6d @%6u %*c %s%s: ",
+ STATIC_ASSERT(sizeof(unsigned) == sizeof(id()));
+ OS::Print("%6" V8PRIuPTR " @%6u %*c %s%s: ",
self_size(), id(), indent, ' ', prefix, edge_name);
if (type() != kString) {
OS::Print("%s %.40s\n", TypeAsString(), name_);
@@ -136,7 +112,7 @@ void HeapEntry::Print(
edge_name = edge.name();
break;
case HeapGraphEdge::kElement:
- OS::SNPrintF(index, "%d", edge.index());
+ SNPrintF(index, "%d", edge.index());
break;
case HeapGraphEdge::kInternal:
edge_prefix = "$";
@@ -147,7 +123,7 @@ void HeapEntry::Print(
break;
case HeapGraphEdge::kHidden:
edge_prefix = "$";
- OS::SNPrintF(index, "%d", edge.index());
+ SNPrintF(index, "%d", edge.index());
break;
case HeapGraphEdge::kShortcut:
edge_prefix = "^";
@@ -155,10 +131,10 @@ void HeapEntry::Print(
break;
case HeapGraphEdge::kWeak:
edge_prefix = "w";
- OS::SNPrintF(index, "%d", edge.index());
+ edge_name = edge.name();
break;
default:
- OS::SNPrintF(index, "!!! unknown edge type: %d ", edge.type());
+ SNPrintF(index, "!!! unknown edge type: %d ", edge.type());
}
edge.to()->Print(edge_prefix, edge_name, max_depth, indent + 2);
}
@@ -179,6 +155,7 @@ const char* HeapEntry::TypeAsString() {
case kSynthetic: return "/synthetic/";
case kConsString: return "/concatenated string/";
case kSlicedString: return "/sliced string/";
+ case kSymbol: return "/symbol/";
default: return "???";
}
}
@@ -192,32 +169,37 @@ template <size_t ptr_size> struct SnapshotSizeConstants;
template <> struct SnapshotSizeConstants<4> {
static const int kExpectedHeapGraphEdgeSize = 12;
- static const int kExpectedHeapEntrySize = 24;
+ static const int kExpectedHeapEntrySize = 28;
};
template <> struct SnapshotSizeConstants<8> {
static const int kExpectedHeapGraphEdgeSize = 24;
- static const int kExpectedHeapEntrySize = 32;
+ static const int kExpectedHeapEntrySize = 40;
};
} // namespace
-HeapSnapshot::HeapSnapshot(HeapSnapshotsCollection* collection,
+
+HeapSnapshot::HeapSnapshot(HeapProfiler* profiler,
const char* title,
unsigned uid)
- : collection_(collection),
+ : profiler_(profiler),
title_(title),
uid_(uid),
root_index_(HeapEntry::kNoEntry),
gc_roots_index_(HeapEntry::kNoEntry),
natives_root_index_(HeapEntry::kNoEntry),
max_snapshot_js_object_id_(0) {
- STATIC_CHECK(
+ STATIC_ASSERT(
sizeof(HeapGraphEdge) ==
SnapshotSizeConstants<kPointerSize>::kExpectedHeapGraphEdgeSize);
- STATIC_CHECK(
+ STATIC_ASSERT(
sizeof(HeapEntry) ==
SnapshotSizeConstants<kPointerSize>::kExpectedHeapEntrySize);
+ USE(SnapshotSizeConstants<4>::kExpectedHeapGraphEdgeSize);
+ USE(SnapshotSizeConstants<4>::kExpectedHeapEntrySize);
+ USE(SnapshotSizeConstants<8>::kExpectedHeapGraphEdgeSize);
+ USE(SnapshotSizeConstants<8>::kExpectedHeapEntrySize);
for (int i = 0; i < VisitorSynchronization::kNumberOfSyncTags; ++i) {
gc_subroot_indexes_[i] = HeapEntry::kNoEntry;
}
@@ -225,13 +207,13 @@ HeapSnapshot::HeapSnapshot(HeapSnapshotsCollection* collection,
void HeapSnapshot::Delete() {
- collection_->RemoveSnapshot(this);
+ profiler_->RemoveSnapshot(this);
delete this;
}
void HeapSnapshot::RememberLastJSObjectId() {
- max_snapshot_js_object_id_ = collection_->last_assigned_id();
+ max_snapshot_js_object_id_ = profiler_->heap_object_map()->last_assigned_id();
}
@@ -241,6 +223,7 @@ HeapEntry* HeapSnapshot::AddRootEntry() {
HeapEntry* entry = AddEntry(HeapEntry::kSynthetic,
"",
HeapObjectsMap::kInternalRootObjectId,
+ 0,
0);
root_index_ = entry->index();
ASSERT(root_index_ == 0);
@@ -253,6 +236,7 @@ HeapEntry* HeapSnapshot::AddGcRootsEntry() {
HeapEntry* entry = AddEntry(HeapEntry::kSynthetic,
"(GC roots)",
HeapObjectsMap::kGcRootsObjectId,
+ 0,
0);
gc_roots_index_ = entry->index();
return entry;
@@ -266,6 +250,7 @@ HeapEntry* HeapSnapshot::AddGcSubrootEntry(int tag) {
HeapEntry::kSynthetic,
VisitorSynchronization::kTagNames[tag],
HeapObjectsMap::GetNthGcSubrootId(tag),
+ 0,
0);
gc_subroot_indexes_[tag] = entry->index();
return entry;
@@ -275,8 +260,9 @@ HeapEntry* HeapSnapshot::AddGcSubrootEntry(int tag) {
HeapEntry* HeapSnapshot::AddEntry(HeapEntry::Type type,
const char* name,
SnapshotObjectId id,
- int size) {
- HeapEntry entry(this, type, name, id, size);
+ size_t size,
+ unsigned trace_node_id) {
+ HeapEntry entry(this, type, name, id, size, trace_node_id);
entries_.Add(entry);
return &entries_.last();
}
@@ -346,12 +332,6 @@ void HeapSnapshot::Print(int max_depth) {
}
-template<typename T, class P>
-static size_t GetMemoryUsedByList(const List<T, P>& list) {
- return list.length() * sizeof(T) + sizeof(list);
-}
-
-
size_t HeapSnapshot::RawSnapshotSize() const {
return
sizeof(*this) +
@@ -394,15 +374,10 @@ HeapObjectsMap::HeapObjectsMap(Heap* heap)
}
-void HeapObjectsMap::SnapshotGenerationFinished() {
- RemoveDeadEntries();
-}
-
-
-void HeapObjectsMap::MoveObject(Address from, Address to, int object_size) {
+bool HeapObjectsMap::MoveObject(Address from, Address to, int object_size) {
ASSERT(to != NULL);
ASSERT(from != NULL);
- if (from == to) return;
+ if (from == to) return false;
void* from_value = entries_map_.Remove(from, ComputePointerHash(from));
if (from_value == NULL) {
// It may occur that some untracked object moves to an address X and there
@@ -443,6 +418,7 @@ void HeapObjectsMap::MoveObject(Address from, Address to, int object_size) {
entries_.at(from_entry_info_index).size = object_size;
to_entry->value = from_value;
}
+ return from_value != NULL;
}
@@ -502,7 +478,7 @@ void HeapObjectsMap::UpdateHeapObjectsMap() {
entries_map_.occupancy());
}
heap_->CollectAllGarbage(Heap::kMakeHeapIterableMask,
- "HeapSnapshotsCollection::UpdateHeapObjectsMap");
+ "HeapObjectsMap::UpdateHeapObjectsMap");
HeapIterator iterator(heap_);
for (HeapObject* obj = iterator.next();
obj != NULL;
@@ -710,13 +686,12 @@ void HeapObjectsMap::RemoveDeadEntries() {
}
-SnapshotObjectId HeapObjectsMap::GenerateId(Heap* heap,
- v8::RetainedObjectInfo* info) {
+SnapshotObjectId HeapObjectsMap::GenerateId(v8::RetainedObjectInfo* info) {
SnapshotObjectId id = static_cast<SnapshotObjectId>(info->GetHash());
const char* label = info->GetLabel();
id ^= StringHasher::HashSequentialString(label,
static_cast<int>(strlen(label)),
- heap->HashSeed());
+ heap_->HashSeed());
intptr_t element_count = info->GetElementCount();
if (element_count != -1)
id ^= ComputeIntegerHash(static_cast<uint32_t>(element_count),
@@ -734,106 +709,8 @@ size_t HeapObjectsMap::GetUsedMemorySize() const {
}
-HeapSnapshotsCollection::HeapSnapshotsCollection(Heap* heap)
- : names_(heap),
- ids_(heap),
- allocation_tracker_(NULL) {
-}
-
-
-static void DeleteHeapSnapshot(HeapSnapshot** snapshot_ptr) {
- delete *snapshot_ptr;
-}
-
-
-HeapSnapshotsCollection::~HeapSnapshotsCollection() {
- delete allocation_tracker_;
- snapshots_.Iterate(DeleteHeapSnapshot);
-}
-
-
-void HeapSnapshotsCollection::StartHeapObjectsTracking(bool track_allocations) {
- ids_.UpdateHeapObjectsMap();
- ASSERT(allocation_tracker_ == NULL);
- if (track_allocations) {
- allocation_tracker_ = new AllocationTracker(&ids_, names());
- }
-}
-
-
-void HeapSnapshotsCollection::StopHeapObjectsTracking() {
- ids_.StopHeapObjectsTracking();
- if (allocation_tracker_ != NULL) {
- delete allocation_tracker_;
- allocation_tracker_ = NULL;
- }
-}
-
-
-HeapSnapshot* HeapSnapshotsCollection::NewSnapshot(const char* name,
- unsigned uid) {
- return new HeapSnapshot(this, name, uid);
-}
-
-
-void HeapSnapshotsCollection::SnapshotGenerationFinished(
- HeapSnapshot* snapshot) {
- ids_.SnapshotGenerationFinished();
- if (snapshot != NULL) {
- snapshots_.Add(snapshot);
- }
-}
-
-
-void HeapSnapshotsCollection::RemoveSnapshot(HeapSnapshot* snapshot) {
- snapshots_.RemoveElement(snapshot);
-}
-
-
-Handle<HeapObject> HeapSnapshotsCollection::FindHeapObjectById(
- SnapshotObjectId id) {
- // First perform a full GC in order to avoid dead objects.
- heap()->CollectAllGarbage(Heap::kMakeHeapIterableMask,
- "HeapSnapshotsCollection::FindHeapObjectById");
- DisallowHeapAllocation no_allocation;
- HeapObject* object = NULL;
- HeapIterator iterator(heap(), HeapIterator::kFilterUnreachable);
- // Make sure that object with the given id is still reachable.
- for (HeapObject* obj = iterator.next();
- obj != NULL;
- obj = iterator.next()) {
- if (ids_.FindEntry(obj->address()) == id) {
- ASSERT(object == NULL);
- object = obj;
- // Can't break -- kFilterUnreachable requires full heap traversal.
- }
- }
- return object != NULL ? Handle<HeapObject>(object) : Handle<HeapObject>();
-}
-
-
-void HeapSnapshotsCollection::AllocationEvent(Address addr, int size) {
- DisallowHeapAllocation no_allocation;
- if (allocation_tracker_ != NULL) {
- allocation_tracker_->AllocationEvent(addr, size);
- }
-}
-
-
-size_t HeapSnapshotsCollection::GetUsedMemorySize() const {
- size_t size = sizeof(*this);
- size += names_.GetUsedMemorySize();
- size += ids_.GetUsedMemorySize();
- size += GetMemoryUsedByList(snapshots_);
- for (int i = 0; i < snapshots_.length(); ++i) {
- size += snapshots_[i]->RawSnapshotSize();
- }
- return size;
-}
-
-
HeapEntriesMap::HeapEntriesMap()
- : entries_(HeapThingsMatch) {
+ : entries_(HashMap::PointersMatch) {
}
@@ -852,7 +729,7 @@ void HeapEntriesMap::Pair(HeapThing thing, int entry) {
HeapObjectsSet::HeapObjectsSet()
- : entries_(HeapEntriesMap::HeapThingsMatch) {
+ : entries_(HashMap::PointersMatch) {
}
@@ -912,9 +789,10 @@ V8HeapExplorer::V8HeapExplorer(
HeapSnapshot* snapshot,
SnapshottingProgressReportingInterface* progress,
v8::HeapProfiler::ObjectNameResolver* resolver)
- : heap_(snapshot->collection()->heap()),
+ : heap_(snapshot->profiler()->heap_object_map()->heap()),
snapshot_(snapshot),
- collection_(snapshot_->collection()),
+ names_(snapshot_->profiler()->names()),
+ heap_object_map_(snapshot_->profiler()->heap_object_map()),
progress_(progress),
filler_(NULL),
global_object_name_resolver_(resolver) {
@@ -944,20 +822,20 @@ HeapEntry* V8HeapExplorer::AddEntry(HeapObject* object) {
JSFunction* func = JSFunction::cast(object);
SharedFunctionInfo* shared = func->shared();
const char* name = shared->bound() ? "native_bind" :
- collection_->names()->GetName(String::cast(shared->name()));
+ names_->GetName(String::cast(shared->name()));
return AddEntry(object, HeapEntry::kClosure, name);
} else if (object->IsJSRegExp()) {
JSRegExp* re = JSRegExp::cast(object);
return AddEntry(object,
HeapEntry::kRegExp,
- collection_->names()->GetName(re->Pattern()));
+ names_->GetName(re->Pattern()));
} else if (object->IsJSObject()) {
- const char* name = collection_->names()->GetName(
+ const char* name = names_->GetName(
GetConstructorName(JSObject::cast(object)));
if (object->IsJSGlobalObject()) {
const char* tag = objects_tags_.GetTag(object);
if (tag != NULL) {
- name = collection_->names()->GetFormatted("%s / %s", name, tag);
+ name = names_->GetFormatted("%s / %s", name, tag);
}
}
return AddEntry(object, HeapEntry::kObject, name);
@@ -973,20 +851,22 @@ HeapEntry* V8HeapExplorer::AddEntry(HeapObject* object) {
"(sliced string)");
return AddEntry(object,
HeapEntry::kString,
- collection_->names()->GetName(String::cast(object)));
+ names_->GetName(String::cast(object)));
+ } else if (object->IsSymbol()) {
+ return AddEntry(object, HeapEntry::kSymbol, "symbol");
} else if (object->IsCode()) {
return AddEntry(object, HeapEntry::kCode, "");
} else if (object->IsSharedFunctionInfo()) {
String* name = String::cast(SharedFunctionInfo::cast(object)->name());
return AddEntry(object,
HeapEntry::kCode,
- collection_->names()->GetName(name));
+ names_->GetName(name));
} else if (object->IsScript()) {
Object* name = Script::cast(object)->name();
return AddEntry(object,
HeapEntry::kCode,
name->IsString()
- ? collection_->names()->GetName(String::cast(name))
+ ? names_->GetName(String::cast(name))
: "");
} else if (object->IsNativeContext()) {
return AddEntry(object, HeapEntry::kHidden, "system / NativeContext");
@@ -1007,17 +887,88 @@ HeapEntry* V8HeapExplorer::AddEntry(HeapObject* object) {
HeapEntry* V8HeapExplorer::AddEntry(HeapObject* object,
HeapEntry::Type type,
const char* name) {
- int object_size = object->Size();
- SnapshotObjectId object_id =
- collection_->GetObjectId(object->address(), object_size);
- return snapshot_->AddEntry(type, name, object_id, object_size);
+ return AddEntry(object->address(), type, name, object->Size());
+}
+
+
+HeapEntry* V8HeapExplorer::AddEntry(Address address,
+ HeapEntry::Type type,
+ const char* name,
+ size_t size) {
+ SnapshotObjectId object_id = heap_object_map_->FindOrAddEntry(
+ address, static_cast<unsigned int>(size));
+ unsigned trace_node_id = 0;
+ if (AllocationTracker* allocation_tracker =
+ snapshot_->profiler()->allocation_tracker()) {
+ trace_node_id =
+ allocation_tracker->address_to_trace()->GetTraceNodeId(address);
+ }
+ return snapshot_->AddEntry(type, name, object_id, size, trace_node_id);
}
+class SnapshotFiller {
+ public:
+ explicit SnapshotFiller(HeapSnapshot* snapshot, HeapEntriesMap* entries)
+ : snapshot_(snapshot),
+ names_(snapshot->profiler()->names()),
+ entries_(entries) { }
+ HeapEntry* AddEntry(HeapThing ptr, HeapEntriesAllocator* allocator) {
+ HeapEntry* entry = allocator->AllocateEntry(ptr);
+ entries_->Pair(ptr, entry->index());
+ return entry;
+ }
+ HeapEntry* FindEntry(HeapThing ptr) {
+ int index = entries_->Map(ptr);
+ return index != HeapEntry::kNoEntry ? &snapshot_->entries()[index] : NULL;
+ }
+ HeapEntry* FindOrAddEntry(HeapThing ptr, HeapEntriesAllocator* allocator) {
+ HeapEntry* entry = FindEntry(ptr);
+ return entry != NULL ? entry : AddEntry(ptr, allocator);
+ }
+ void SetIndexedReference(HeapGraphEdge::Type type,
+ int parent,
+ int index,
+ HeapEntry* child_entry) {
+ HeapEntry* parent_entry = &snapshot_->entries()[parent];
+ parent_entry->SetIndexedReference(type, index, child_entry);
+ }
+ void SetIndexedAutoIndexReference(HeapGraphEdge::Type type,
+ int parent,
+ HeapEntry* child_entry) {
+ HeapEntry* parent_entry = &snapshot_->entries()[parent];
+ int index = parent_entry->children_count() + 1;
+ parent_entry->SetIndexedReference(type, index, child_entry);
+ }
+ void SetNamedReference(HeapGraphEdge::Type type,
+ int parent,
+ const char* reference_name,
+ HeapEntry* child_entry) {
+ HeapEntry* parent_entry = &snapshot_->entries()[parent];
+ parent_entry->SetNamedReference(type, reference_name, child_entry);
+ }
+ void SetNamedAutoIndexReference(HeapGraphEdge::Type type,
+ int parent,
+ HeapEntry* child_entry) {
+ HeapEntry* parent_entry = &snapshot_->entries()[parent];
+ int index = parent_entry->children_count() + 1;
+ parent_entry->SetNamedReference(
+ type,
+ names_->GetName(index),
+ child_entry);
+ }
+
+ private:
+ HeapSnapshot* snapshot_;
+ StringsStorage* names_;
+ HeapEntriesMap* entries_;
+};
+
+
class GcSubrootsEnumerator : public ObjectVisitor {
public:
GcSubrootsEnumerator(
- SnapshotFillerInterface* filler, V8HeapExplorer* explorer)
+ SnapshotFiller* filler, V8HeapExplorer* explorer)
: filler_(filler),
explorer_(explorer),
previous_object_count_(0),
@@ -1034,14 +985,14 @@ class GcSubrootsEnumerator : public ObjectVisitor {
}
}
private:
- SnapshotFillerInterface* filler_;
+ SnapshotFiller* filler_;
V8HeapExplorer* explorer_;
intptr_t previous_object_count_;
intptr_t object_count_;
};
-void V8HeapExplorer::AddRootEntries(SnapshotFillerInterface* filler) {
+void V8HeapExplorer::AddRootEntries(SnapshotFiller* filler) {
filler->AddEntry(kInternalRootObject, this);
filler->AddEntry(kGcRootsObject, this);
GcSubrootsEnumerator enumerator(filler, this);
@@ -1100,28 +1051,38 @@ class IndexedReferencesExtractor : public ObjectVisitor {
}
void VisitPointers(Object** start, Object** end) {
for (Object** p = start; p < end; p++) {
+ ++next_index_;
if (CheckVisitedAndUnmark(p)) continue;
- generator_->SetHiddenReference(parent_obj_, parent_, next_index_++, *p);
+ generator_->SetHiddenReference(parent_obj_, parent_, next_index_, *p);
}
}
static void MarkVisitedField(HeapObject* obj, int offset) {
if (offset < 0) return;
Address field = obj->address() + offset;
- ASSERT(!Memory::Object_at(field)->IsFailure());
ASSERT(Memory::Object_at(field)->IsHeapObject());
- *field |= kFailureTag;
+ intptr_t p = reinterpret_cast<intptr_t>(Memory::Object_at(field));
+ ASSERT(!IsMarked(p));
+ intptr_t p_tagged = p | kTag;
+ Memory::Object_at(field) = reinterpret_cast<Object*>(p_tagged);
}
private:
bool CheckVisitedAndUnmark(Object** field) {
- if ((*field)->IsFailure()) {
- intptr_t untagged = reinterpret_cast<intptr_t>(*field) & ~kFailureTagMask;
- *field = reinterpret_cast<Object*>(untagged | kHeapObjectTag);
+ intptr_t p = reinterpret_cast<intptr_t>(*field);
+ if (IsMarked(p)) {
+ intptr_t p_untagged = (p & ~kTaggingMask) | kHeapObjectTag;
+ *field = reinterpret_cast<Object*>(p_untagged);
ASSERT((*field)->IsHeapObject());
return true;
}
return false;
}
+
+ static const intptr_t kTaggingMask = 3;
+ static const intptr_t kTag = 3;
+
+ static bool IsMarked(intptr_t p) { return (p & kTaggingMask) == kTag; }
+
V8HeapExplorer* generator_;
HeapObject* parent_obj_;
int parent_;
@@ -1129,19 +1090,23 @@ class IndexedReferencesExtractor : public ObjectVisitor {
};
-void V8HeapExplorer::ExtractReferences(HeapObject* obj) {
- HeapEntry* heap_entry = GetEntry(obj);
- if (heap_entry == NULL) return; // No interest in this object.
- int entry = heap_entry->index();
+bool V8HeapExplorer::ExtractReferencesPass1(int entry, HeapObject* obj) {
+ if (obj->IsFixedArray()) return false; // FixedArrays are processed on pass 2
if (obj->IsJSGlobalProxy()) {
ExtractJSGlobalProxyReferences(entry, JSGlobalProxy::cast(obj));
+ } else if (obj->IsJSArrayBuffer()) {
+ ExtractJSArrayBufferReferences(entry, JSArrayBuffer::cast(obj));
+ } else if (obj->IsJSWeakSet()) {
+ ExtractJSWeakCollectionReferences(entry, JSWeakSet::cast(obj));
+ } else if (obj->IsJSWeakMap()) {
+ ExtractJSWeakCollectionReferences(entry, JSWeakMap::cast(obj));
} else if (obj->IsJSObject()) {
ExtractJSObjectReferences(entry, JSObject::cast(obj));
} else if (obj->IsString()) {
ExtractStringReferences(entry, String::cast(obj));
- } else if (obj->IsContext()) {
- ExtractContextReferences(entry, Context::cast(obj));
+ } else if (obj->IsSymbol()) {
+ ExtractSymbolReferences(entry, Symbol::cast(obj));
} else if (obj->IsMap()) {
ExtractMapReferences(entry, Map::cast(obj));
} else if (obj->IsSharedFunctionInfo()) {
@@ -1154,6 +1119,8 @@ void V8HeapExplorer::ExtractReferences(HeapObject* obj) {
ExtractCodeCacheReferences(entry, CodeCache::cast(obj));
} else if (obj->IsCode()) {
ExtractCodeReferences(entry, Code::cast(obj));
+ } else if (obj->IsBox()) {
+ ExtractBoxReferences(entry, Box::cast(obj));
} else if (obj->IsCell()) {
ExtractCellReferences(entry, Cell::cast(obj));
} else if (obj->IsPropertyCell()) {
@@ -1161,12 +1128,19 @@ void V8HeapExplorer::ExtractReferences(HeapObject* obj) {
} else if (obj->IsAllocationSite()) {
ExtractAllocationSiteReferences(entry, AllocationSite::cast(obj));
}
- SetInternalReference(obj, entry, "map", obj->map(), HeapObject::kMapOffset);
+ return true;
+}
+
+
+bool V8HeapExplorer::ExtractReferencesPass2(int entry, HeapObject* obj) {
+ if (!obj->IsFixedArray()) return false;
- // Extract unvisited fields as hidden references and restore tags
- // of visited fields.
- IndexedReferencesExtractor refs_extractor(this, obj, entry);
- obj->Iterate(&refs_extractor);
+ if (obj->IsContext()) {
+ ExtractContextReferences(entry, Context::cast(obj));
+ } else {
+ ExtractFixedArrayReferences(entry, FixedArray::cast(obj));
+ }
+ return true;
}
@@ -1223,11 +1197,13 @@ void V8HeapExplorer::ExtractJSObjectReferences(
SetInternalReference(js_fun, entry,
"context", js_fun->context(),
JSFunction::kContextOffset);
- for (int i = JSFunction::kNonWeakFieldsEndOffset;
- i < JSFunction::kSize;
- i += kPointerSize) {
- SetWeakReference(js_fun, entry, i, *HeapObject::RawField(js_fun, i), i);
- }
+ SetWeakReference(js_fun, entry,
+ "next_function_link", js_fun->next_function_link(),
+ JSFunction::kNextFunctionLinkOffset);
+ STATIC_ASSERT(JSFunction::kNextFunctionLinkOffset
+ == JSFunction::kNonWeakFieldsEndOffset);
+ STATIC_ASSERT(JSFunction::kNextFunctionLinkOffset + kPointerSize
+ == JSFunction::kSize);
} else if (obj->IsGlobalObject()) {
GlobalObject* global_obj = GlobalObject::cast(obj);
SetInternalReference(global_obj, entry,
@@ -1237,8 +1213,19 @@ void V8HeapExplorer::ExtractJSObjectReferences(
"native_context", global_obj->native_context(),
GlobalObject::kNativeContextOffset);
SetInternalReference(global_obj, entry,
+ "global_context", global_obj->global_context(),
+ GlobalObject::kGlobalContextOffset);
+ SetInternalReference(global_obj, entry,
"global_receiver", global_obj->global_receiver(),
GlobalObject::kGlobalReceiverOffset);
+ STATIC_ASSERT(GlobalObject::kHeaderSize - JSObject::kHeaderSize ==
+ 4 * kPointerSize);
+ } else if (obj->IsJSArrayBufferView()) {
+ JSArrayBufferView* view = JSArrayBufferView::cast(obj);
+ SetInternalReference(view, entry, "buffer", view->buffer(),
+ JSArrayBufferView::kBufferOffset);
+ SetWeakReference(view, entry, "weak_next", view->weak_next(),
+ JSArrayBufferView::kWeakNextOffset);
}
TagObject(js_obj->properties(), "(object properties)");
SetInternalReference(obj, entry,
@@ -1266,6 +1253,22 @@ void V8HeapExplorer::ExtractStringReferences(int entry, String* string) {
}
+void V8HeapExplorer::ExtractSymbolReferences(int entry, Symbol* symbol) {
+ SetInternalReference(symbol, entry,
+ "name", symbol->name(),
+ Symbol::kNameOffset);
+}
+
+
+void V8HeapExplorer::ExtractJSWeakCollectionReferences(
+ int entry, JSWeakCollection* collection) {
+ MarkAsWeakContainer(collection->table());
+ SetInternalReference(collection, entry,
+ "table", collection->table(),
+ JSWeakCollection::kTableOffset);
+}
+
+
void V8HeapExplorer::ExtractContextReferences(int entry, Context* context) {
if (context == context->declaration_context()) {
ScopeInfo* scope_info = context->closure()->shared()->scope_info();
@@ -1289,8 +1292,14 @@ void V8HeapExplorer::ExtractContextReferences(int entry, Context* context) {
}
#define EXTRACT_CONTEXT_FIELD(index, type, name) \
- SetInternalReference(context, entry, #name, context->get(Context::index), \
- FixedArray::OffsetOfElementAt(Context::index));
+ if (Context::index < Context::FIRST_WEAK_SLOT || \
+ Context::index == Context::MAP_CACHE_INDEX) { \
+ SetInternalReference(context, entry, #name, context->get(Context::index), \
+ FixedArray::OffsetOfElementAt(Context::index)); \
+ } else { \
+ SetWeakReference(context, entry, #name, context->get(Context::index), \
+ FixedArray::OffsetOfElementAt(Context::index)); \
+ }
EXTRACT_CONTEXT_FIELD(CLOSURE_INDEX, JSFunction, closure);
EXTRACT_CONTEXT_FIELD(PREVIOUS_INDEX, Context, previous);
EXTRACT_CONTEXT_FIELD(EXTENSION_INDEX, Object, extension);
@@ -1302,13 +1311,18 @@ void V8HeapExplorer::ExtractContextReferences(int entry, Context* context) {
TagObject(context->runtime_context(), "(runtime context)");
TagObject(context->embedder_data(), "(context data)");
NATIVE_CONTEXT_FIELDS(EXTRACT_CONTEXT_FIELD);
+ EXTRACT_CONTEXT_FIELD(OPTIMIZED_FUNCTIONS_LIST, unused,
+ optimized_functions_list);
+ EXTRACT_CONTEXT_FIELD(OPTIMIZED_CODE_LIST, unused, optimized_code_list);
+ EXTRACT_CONTEXT_FIELD(DEOPTIMIZED_CODE_LIST, unused, deoptimized_code_list);
+ EXTRACT_CONTEXT_FIELD(NEXT_CONTEXT_LINK, unused, next_context_link);
#undef EXTRACT_CONTEXT_FIELD
- for (int i = Context::FIRST_WEAK_SLOT;
- i < Context::NATIVE_CONTEXT_SLOTS;
- ++i) {
- SetWeakReference(context, entry, i, context->get(i),
- FixedArray::OffsetOfElementAt(i));
- }
+ STATIC_ASSERT(Context::OPTIMIZED_FUNCTIONS_LIST ==
+ Context::FIRST_WEAK_SLOT);
+ STATIC_ASSERT(Context::NEXT_CONTEXT_LINK + 1 ==
+ Context::NATIVE_CONTEXT_SLOTS);
+ STATIC_ASSERT(Context::FIRST_WEAK_SLOT + 5 ==
+ Context::NATIVE_CONTEXT_SLOTS);
}
}
@@ -1321,6 +1335,22 @@ void V8HeapExplorer::ExtractMapReferences(int entry, Map* map) {
TagObject(back_pointer, "(back pointer)");
SetInternalReference(transitions, transitions_entry,
"back_pointer", back_pointer);
+
+ if (FLAG_collect_maps && map->CanTransition()) {
+ if (!transitions->IsSimpleTransition()) {
+ if (transitions->HasPrototypeTransitions()) {
+ FixedArray* prototype_transitions =
+ transitions->GetPrototypeTransitions();
+ MarkAsWeakContainer(prototype_transitions);
+ TagObject(prototype_transitions, "(prototype transitions");
+ SetInternalReference(transitions, transitions_entry,
+ "prototype_transitions", prototype_transitions);
+ }
+ // TODO(alph): transitions keys are strong links.
+ MarkAsWeakContainer(transitions);
+ }
+ }
+
TagObject(transitions, "(transition array)");
SetInternalReference(map, entry,
"transitions", transitions,
@@ -1338,6 +1368,7 @@ void V8HeapExplorer::ExtractMapReferences(int entry, Map* map) {
"descriptors", descriptors,
Map::kDescriptorsOffset);
+ MarkAsWeakContainer(map->code_cache());
SetInternalReference(map, entry,
"code_cache", map->code_cache(),
Map::kCodeCacheOffset);
@@ -1347,6 +1378,7 @@ void V8HeapExplorer::ExtractMapReferences(int entry, Map* map) {
"constructor", map->constructor(),
Map::kConstructorOffset);
TagObject(map->dependent_code(), "(dependent code)");
+ MarkAsWeakContainer(map->dependent_code());
SetInternalReference(map, entry,
"dependent_code", map->dependent_code(),
Map::kDependentCodeOffset);
@@ -1356,14 +1388,13 @@ void V8HeapExplorer::ExtractMapReferences(int entry, Map* map) {
void V8HeapExplorer::ExtractSharedFunctionInfoReferences(
int entry, SharedFunctionInfo* shared) {
HeapObject* obj = shared;
- StringsStorage* names = collection_->names();
String* shared_name = shared->DebugName();
const char* name = NULL;
if (shared_name != *heap_->isolate()->factory()->empty_string()) {
- name = names->GetName(shared_name);
- TagObject(shared->code(), names->GetFormatted("(code for %s)", name));
+ name = names_->GetName(shared_name);
+ TagObject(shared->code(), names_->GetFormatted("(code for %s)", name));
} else {
- TagObject(shared->code(), names->GetFormatted("(%s code)",
+ TagObject(shared->code(), names_->GetFormatted("(%s code)",
Code::Kind2String(shared->code()->kind())));
}
@@ -1384,7 +1415,7 @@ void V8HeapExplorer::ExtractSharedFunctionInfoReferences(
"script", shared->script(),
SharedFunctionInfo::kScriptOffset);
const char* construct_stub_name = name ?
- names->GetFormatted("(construct stub code for %s)", name) :
+ names_->GetFormatted("(construct stub code for %s)", name) :
"(construct stub code)";
TagObject(shared->construct_stub(), construct_stub_name);
SetInternalReference(obj, entry,
@@ -1402,9 +1433,9 @@ void V8HeapExplorer::ExtractSharedFunctionInfoReferences(
SetInternalReference(obj, entry,
"optimized_code_map", shared->optimized_code_map(),
SharedFunctionInfo::kOptimizedCodeMapOffset);
- SetWeakReference(obj, entry,
- 1, shared->initial_map(),
- SharedFunctionInfo::kInitialMapOffset);
+ SetInternalReference(obj, entry,
+ "feedback_vector", shared->feedback_vector(),
+ SharedFunctionInfo::kFeedbackVectorOffset);
}
@@ -1417,9 +1448,6 @@ void V8HeapExplorer::ExtractScriptReferences(int entry, Script* script) {
"name", script->name(),
Script::kNameOffset);
SetInternalReference(obj, entry,
- "data", script->data(),
- Script::kDataOffset);
- SetInternalReference(obj, entry,
"context_data", script->context_data(),
Script::kContextOffset);
TagObject(script->line_ends(), "(script line ends)");
@@ -1451,15 +1479,14 @@ void V8HeapExplorer::ExtractCodeCacheReferences(
}
-void V8HeapExplorer::TagCodeObject(Code* code, const char* external_name) {
- TagObject(code, collection_->names()->GetFormatted("(%s code)",
- external_name));
+void V8HeapExplorer::TagBuiltinCodeObject(Code* code, const char* name) {
+ TagObject(code, names_->GetFormatted("(%s builtin)", name));
}
void V8HeapExplorer::TagCodeObject(Code* code) {
if (code->kind() == Code::STUB) {
- TagObject(code, collection_->names()->GetFormatted(
+ TagObject(code, names_->GetFormatted(
"(%s code)", CodeStub::MajorName(
static_cast<CodeStub::Major>(code->major_key()), true)));
}
@@ -1487,6 +1514,19 @@ void V8HeapExplorer::ExtractCodeReferences(int entry, Code* code) {
SetInternalReference(code, entry,
"gc_metadata", code->gc_metadata(),
Code::kGCMetadataOffset);
+ SetInternalReference(code, entry,
+ "constant_pool", code->constant_pool(),
+ Code::kConstantPoolOffset);
+ if (code->kind() == Code::OPTIMIZED_FUNCTION) {
+ SetWeakReference(code, entry,
+ "next_code_link", code->next_code_link(),
+ Code::kNextCodeLinkOffset);
+ }
+}
+
+
+void V8HeapExplorer::ExtractBoxReferences(int entry, Box* box) {
+ SetInternalReference(box, entry, "value", box->value(), Box::kValueOffset);
}
@@ -1500,6 +1540,7 @@ void V8HeapExplorer::ExtractPropertyCellReferences(int entry,
ExtractCellReferences(entry, cell);
SetInternalReference(cell, entry, "type", cell->type(),
PropertyCell::kTypeOffset);
+ MarkAsWeakContainer(cell->dependent_code());
SetInternalReference(cell, entry, "dependent_code", cell->dependent_code(),
PropertyCell::kDependentCodeOffset);
}
@@ -1511,17 +1552,63 @@ void V8HeapExplorer::ExtractAllocationSiteReferences(int entry,
AllocationSite::kTransitionInfoOffset);
SetInternalReference(site, entry, "nested_site", site->nested_site(),
AllocationSite::kNestedSiteOffset);
- SetInternalReference(site, entry, "memento_found_count",
- site->memento_found_count(),
- AllocationSite::kMementoFoundCountOffset);
- SetInternalReference(site, entry, "memento_create_count",
- site->memento_create_count(),
- AllocationSite::kMementoCreateCountOffset);
- SetInternalReference(site, entry, "pretenure_decision",
- site->pretenure_decision(),
- AllocationSite::kPretenureDecisionOffset);
+ MarkAsWeakContainer(site->dependent_code());
SetInternalReference(site, entry, "dependent_code", site->dependent_code(),
AllocationSite::kDependentCodeOffset);
+ // Do not visit weak_next as it is not visited by the StaticVisitor,
+ // and we're not very interested in weak_next field here.
+ STATIC_ASSERT(AllocationSite::kWeakNextOffset >=
+ AllocationSite::BodyDescriptor::kEndOffset);
+}
+
+
+class JSArrayBufferDataEntryAllocator : public HeapEntriesAllocator {
+ public:
+ JSArrayBufferDataEntryAllocator(size_t size, V8HeapExplorer* explorer)
+ : size_(size)
+ , explorer_(explorer) {
+ }
+ virtual HeapEntry* AllocateEntry(HeapThing ptr) {
+ return explorer_->AddEntry(
+ static_cast<Address>(ptr),
+ HeapEntry::kNative, "system / JSArrayBufferData", size_);
+ }
+ private:
+ size_t size_;
+ V8HeapExplorer* explorer_;
+};
+
+
+void V8HeapExplorer::ExtractJSArrayBufferReferences(
+ int entry, JSArrayBuffer* buffer) {
+ SetWeakReference(buffer, entry, "weak_next", buffer->weak_next(),
+ JSArrayBuffer::kWeakNextOffset);
+ SetWeakReference(buffer, entry,
+ "weak_first_view", buffer->weak_first_view(),
+ JSArrayBuffer::kWeakFirstViewOffset);
+ // Setup a reference to a native memory backing_store object.
+ if (!buffer->backing_store())
+ return;
+ size_t data_size = NumberToSize(heap_->isolate(), buffer->byte_length());
+ JSArrayBufferDataEntryAllocator allocator(data_size, this);
+ HeapEntry* data_entry =
+ filler_->FindOrAddEntry(buffer->backing_store(), &allocator);
+ filler_->SetNamedReference(HeapGraphEdge::kInternal,
+ entry, "backing_store", data_entry);
+}
+
+
+void V8HeapExplorer::ExtractFixedArrayReferences(int entry, FixedArray* array) {
+ bool is_weak = weak_containers_.Contains(array);
+ for (int i = 0, l = array->length(); i < l; ++i) {
+ if (is_weak) {
+ SetWeakReference(array, entry,
+ i, array->get(i), array->OffsetOfElementAt(i));
+ } else {
+ SetInternalReference(array, entry,
+ i, array->get(i), array->OffsetOfElementAt(i));
+ }
+ }
}
@@ -1537,7 +1624,7 @@ void V8HeapExplorer::ExtractClosureReferences(JSObject* js_obj, int entry) {
bindings->get(JSFunction::kBoundFunctionIndex));
for (int i = JSFunction::kBoundArgumentsStartIndex;
i < bindings->length(); i++) {
- const char* reference_name = collection_->names()->GetFormatted(
+ const char* reference_name = names_->GetFormatted(
"bound_argument_%d",
i - JSFunction::kBoundArgumentsStartIndex);
SetNativeBindReference(js_obj, entry, reference_name,
@@ -1573,7 +1660,9 @@ void V8HeapExplorer::ExtractPropertyReferences(JSObject* js_obj, int entry) {
js_obj->GetInObjectPropertyOffset(index));
}
} else {
- Object* value = js_obj->RawFastPropertyAt(index);
+ FieldIndex field_index =
+ FieldIndex::ForDescriptor(js_obj->map(), i);
+ Object* value = js_obj->RawFastPropertyAt(field_index);
if (k != heap_->hidden_string()) {
SetPropertyReference(js_obj, entry, k, value);
} else {
@@ -1597,7 +1686,6 @@ void V8HeapExplorer::ExtractPropertyReferences(JSObject* js_obj, int entry) {
case HANDLER: // only in lookup results, not in descriptors
case INTERCEPTOR: // only in lookup results, not in descriptors
break;
- case TRANSITION:
case NONEXISTENT:
UNREACHABLE();
break;
@@ -1688,8 +1776,10 @@ String* V8HeapExplorer::GetConstructorName(JSObject* object) {
// return its name. This is for instances of binding objects, which
// have prototype constructor type "Object".
Object* constructor_prop = NULL;
- LookupResult result(heap->isolate());
- object->LocalLookupRealNamedProperty(heap->constructor_string(), &result);
+ Isolate* isolate = heap->isolate();
+ LookupResult result(isolate);
+ object->LookupOwnRealNamedProperty(
+ isolate->factory()->constructor_string(), &result);
if (!result.IsFound()) return object->constructor_name();
constructor_prop = result.GetLazyValue();
@@ -1746,24 +1836,20 @@ class RootsReferencesExtractor : public ObjectVisitor {
}
int strong_index = 0, all_index = 0, tags_index = 0, builtin_index = 0;
while (all_index < all_references_.length()) {
- if (strong_index < strong_references_.length() &&
- strong_references_[strong_index] == all_references_[all_index]) {
- explorer->SetGcSubrootReference(reference_tags_[tags_index].tag,
- false,
- all_references_[all_index]);
- ++strong_index;
- } else {
- explorer->SetGcSubrootReference(reference_tags_[tags_index].tag,
- true,
- all_references_[all_index]);
- }
+ bool is_strong = strong_index < strong_references_.length()
+ && strong_references_[strong_index] == all_references_[all_index];
+ explorer->SetGcSubrootReference(reference_tags_[tags_index].tag,
+ !is_strong,
+ all_references_[all_index]);
if (reference_tags_[tags_index].tag ==
VisitorSynchronization::kBuiltins) {
ASSERT(all_references_[all_index]->IsCode());
- explorer->TagCodeObject(Code::cast(all_references_[all_index]),
+ explorer->TagBuiltinCodeObject(
+ Code::cast(all_references_[all_index]),
builtins->name(builtin_index++));
}
++all_index;
+ if (is_strong) ++strong_index;
if (reference_tags_[tags_index].index == all_index) ++tags_index;
}
}
@@ -1787,37 +1873,64 @@ class RootsReferencesExtractor : public ObjectVisitor {
bool V8HeapExplorer::IterateAndExtractReferences(
- SnapshotFillerInterface* filler) {
- HeapIterator iterator(heap_, HeapIterator::kFilterUnreachable);
-
+ SnapshotFiller* filler) {
filler_ = filler;
- bool interrupted = false;
-
- // Heap iteration with filtering must be finished in any case.
- for (HeapObject* obj = iterator.next();
- obj != NULL;
- obj = iterator.next(), progress_->ProgressStep()) {
- if (!interrupted) {
- ExtractReferences(obj);
- if (!progress_->ProgressReport(false)) interrupted = true;
- }
- }
- if (interrupted) {
- filler_ = NULL;
- return false;
- }
+ // Make sure builtin code objects get their builtin tags
+ // first. Otherwise a particular JSFunction object could set
+ // its custom name to a generic builtin.
SetRootGcRootsReference();
RootsReferencesExtractor extractor(heap_);
heap_->IterateRoots(&extractor, VISIT_ONLY_STRONG);
extractor.SetCollectingAllReferences();
heap_->IterateRoots(&extractor, VISIT_ALL);
extractor.FillReferences(this);
+
+ // We have to do two passes as sometimes FixedArrays are used
+ // to weakly hold their items, and it's impossible to distinguish
+ // between these cases without processing the array owner first.
+ bool interrupted =
+ IterateAndExtractSinglePass<&V8HeapExplorer::ExtractReferencesPass1>() ||
+ IterateAndExtractSinglePass<&V8HeapExplorer::ExtractReferencesPass2>();
+
+ if (interrupted) {
+ filler_ = NULL;
+ return false;
+ }
+
filler_ = NULL;
return progress_->ProgressReport(true);
}
+template<V8HeapExplorer::ExtractReferencesMethod extractor>
+bool V8HeapExplorer::IterateAndExtractSinglePass() {
+ // Now iterate the whole heap.
+ bool interrupted = false;
+ HeapIterator iterator(heap_, HeapIterator::kFilterUnreachable);
+ // Heap iteration with filtering must be finished in any case.
+ for (HeapObject* obj = iterator.next();
+ obj != NULL;
+ obj = iterator.next(), progress_->ProgressStep()) {
+ if (interrupted) continue;
+
+ HeapEntry* heap_entry = GetEntry(obj);
+ int entry = heap_entry->index();
+ if ((this->*extractor)(entry, obj)) {
+ SetInternalReference(obj, entry,
+ "map", obj->map(), HeapObject::kMapOffset);
+ // Extract unvisited fields as hidden references and restore tags
+ // of visited fields.
+ IndexedReferencesExtractor refs_extractor(this, obj, entry);
+ obj->Iterate(&refs_extractor);
+ }
+
+ if (!progress_->ProgressReport(false)) interrupted = true;
+ }
+ return interrupted;
+}
+
+
bool V8HeapExplorer::IsEssentialObject(Object* object) {
return object->IsHeapObject()
&& !object->IsOddball()
@@ -1844,7 +1957,7 @@ void V8HeapExplorer::SetContextReference(HeapObject* parent_obj,
if (child_entry != NULL) {
filler_->SetNamedReference(HeapGraphEdge::kContextVariable,
parent_entry,
- collection_->names()->GetName(reference_name),
+ names_->GetName(reference_name),
child_entry);
IndexedReferencesExtractor::MarkVisitedField(parent_obj, field_offset);
}
@@ -1910,7 +2023,7 @@ void V8HeapExplorer::SetInternalReference(HeapObject* parent_obj,
if (IsEssentialObject(child_obj)) {
filler_->SetNamedReference(HeapGraphEdge::kInternal,
parent_entry,
- collection_->names()->GetName(index),
+ names_->GetName(index),
child_entry);
}
IndexedReferencesExtractor::MarkVisitedField(parent_obj, field_offset);
@@ -1934,6 +2047,24 @@ void V8HeapExplorer::SetHiddenReference(HeapObject* parent_obj,
void V8HeapExplorer::SetWeakReference(HeapObject* parent_obj,
int parent_entry,
+ const char* reference_name,
+ Object* child_obj,
+ int field_offset) {
+ ASSERT(parent_entry == GetEntry(parent_obj)->index());
+ HeapEntry* child_entry = GetEntry(child_obj);
+ if (child_entry == NULL) return;
+ if (IsEssentialObject(child_obj)) {
+ filler_->SetNamedReference(HeapGraphEdge::kWeak,
+ parent_entry,
+ reference_name,
+ child_entry);
+ }
+ IndexedReferencesExtractor::MarkVisitedField(parent_obj, field_offset);
+}
+
+
+void V8HeapExplorer::SetWeakReference(HeapObject* parent_obj,
+ int parent_entry,
int index,
Object* child_obj,
int field_offset) {
@@ -1941,10 +2072,10 @@ void V8HeapExplorer::SetWeakReference(HeapObject* parent_obj,
HeapEntry* child_entry = GetEntry(child_obj);
if (child_entry == NULL) return;
if (IsEssentialObject(child_obj)) {
- filler_->SetIndexedReference(HeapGraphEdge::kWeak,
- parent_entry,
- index,
- child_entry);
+ filler_->SetNamedReference(HeapGraphEdge::kWeak,
+ parent_entry,
+ names_->GetFormatted("%d", index),
+ child_entry);
}
IndexedReferencesExtractor::MarkVisitedField(parent_obj, field_offset);
}
@@ -1963,11 +2094,11 @@ void V8HeapExplorer::SetPropertyReference(HeapObject* parent_obj,
reference_name->IsSymbol() || String::cast(reference_name)->length() > 0
? HeapGraphEdge::kProperty : HeapGraphEdge::kInternal;
const char* name = name_format_string != NULL && reference_name->IsString()
- ? collection_->names()->GetFormatted(
+ ? names_->GetFormatted(
name_format_string,
- *String::cast(reference_name)->ToCString(
- DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL)) :
- collection_->names()->GetName(reference_name);
+ String::cast(reference_name)->ToCString(
+ DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL).get()) :
+ names_->GetName(reference_name);
filler_->SetNamedReference(type,
parent_entry,
@@ -2016,10 +2147,17 @@ void V8HeapExplorer::SetGcSubrootReference(
name,
child_entry);
} else {
- filler_->SetIndexedAutoIndexReference(
- is_weak ? HeapGraphEdge::kWeak : HeapGraphEdge::kElement,
- snapshot_->gc_subroot(tag)->index(),
- child_entry);
+ if (is_weak) {
+ filler_->SetNamedAutoIndexReference(
+ HeapGraphEdge::kWeak,
+ snapshot_->gc_subroot(tag)->index(),
+ child_entry);
+ } else {
+ filler_->SetIndexedAutoIndexReference(
+ HeapGraphEdge::kElement,
+ snapshot_->gc_subroot(tag)->index(),
+ child_entry);
+ }
}
// Add a shortcut to JS global object reference at snapshot root.
@@ -2028,9 +2166,7 @@ void V8HeapExplorer::SetGcSubrootReference(
GlobalObject* global = context->global_object();
if (global->IsJSGlobalObject()) {
bool is_debug_object = false;
-#ifdef ENABLE_DEBUGGER_SUPPORT
is_debug_object = heap_->isolate()->debug()->IsDebugGlobal(global);
-#endif
if (!is_debug_object && !user_roots_.Contains(global)) {
user_roots_.Insert(global);
SetUserGlobalReference(global);
@@ -2070,6 +2206,13 @@ void V8HeapExplorer::TagObject(Object* obj, const char* tag) {
}
+void V8HeapExplorer::MarkAsWeakContainer(Object* object) {
+ if (IsEssentialObject(object) && object->IsFixedArray()) {
+ weak_containers_.Insert(object);
+ }
+}
+
+
class GlobalObjectsEnumerator : public ObjectVisitor {
public:
virtual void VisitPointers(Object** start, Object** end) {
@@ -2143,13 +2286,15 @@ class BasicHeapEntriesAllocator : public HeapEntriesAllocator {
HeapSnapshot* snapshot,
HeapEntry::Type entries_type)
: snapshot_(snapshot),
- collection_(snapshot_->collection()),
+ names_(snapshot_->profiler()->names()),
+ heap_object_map_(snapshot_->profiler()->heap_object_map()),
entries_type_(entries_type) {
}
virtual HeapEntry* AllocateEntry(HeapThing ptr);
private:
HeapSnapshot* snapshot_;
- HeapSnapshotsCollection* collection_;
+ StringsStorage* names_;
+ HeapObjectsMap* heap_object_map_;
HeapEntry::Type entries_type_;
};
@@ -2159,23 +2304,24 @@ HeapEntry* BasicHeapEntriesAllocator::AllocateEntry(HeapThing ptr) {
intptr_t elements = info->GetElementCount();
intptr_t size = info->GetSizeInBytes();
const char* name = elements != -1
- ? collection_->names()->GetFormatted(
+ ? names_->GetFormatted(
"%s / %" V8_PTR_PREFIX "d entries", info->GetLabel(), elements)
- : collection_->names()->GetCopy(info->GetLabel());
+ : names_->GetCopy(info->GetLabel());
return snapshot_->AddEntry(
entries_type_,
name,
- HeapObjectsMap::GenerateId(collection_->heap(), info),
- size != -1 ? static_cast<int>(size) : 0);
+ heap_object_map_->GenerateId(info),
+ size != -1 ? static_cast<int>(size) : 0,
+ 0);
}
NativeObjectsExplorer::NativeObjectsExplorer(
HeapSnapshot* snapshot,
SnapshottingProgressReportingInterface* progress)
- : isolate_(snapshot->collection()->heap()->isolate()),
+ : isolate_(snapshot->profiler()->heap_object_map()->heap()->isolate()),
snapshot_(snapshot),
- collection_(snapshot_->collection()),
+ names_(snapshot_->profiler()->names()),
progress_(progress),
embedder_queried_(false),
objects_by_info_(RetainedInfosMatch),
@@ -2237,7 +2383,7 @@ void NativeObjectsExplorer::FillRetainedObjects() {
group->info = NULL; // Acquire info object ownership.
}
isolate->global_handles()->RemoveObjectGroups();
- isolate->heap()->CallGCEpilogueCallbacks(major_gc_type);
+ isolate->heap()->CallGCEpilogueCallbacks(major_gc_type, kNoGCCallbackFlags);
// Record objects that are not in ObjectGroups, but have class ID.
GlobalHandlesExtractor extractor(this);
isolate->global_handles()->IterateAllRootsWithClassIds(&extractor);
@@ -2284,7 +2430,7 @@ List<HeapObject*>* NativeObjectsExplorer::GetListMaybeDisposeInfo(
bool NativeObjectsExplorer::IterateAndExtractReferences(
- SnapshotFillerInterface* filler) {
+ SnapshotFiller* filler) {
filler_ = filler;
FillRetainedObjects();
FillImplicitReferences();
@@ -2337,7 +2483,7 @@ class NativeGroupRetainedObjectInfo : public v8::RetainedObjectInfo {
NativeGroupRetainedObjectInfo* NativeObjectsExplorer::FindOrAddGroupInfo(
const char* label) {
- const char* label_copy = collection_->names()->GetCopy(label);
+ const char* label_copy = names_->GetCopy(label);
uint32_t hash = StringHasher::HashSequentialString(
label_copy,
static_cast<int>(strlen(label_copy)),
@@ -2411,64 +2557,6 @@ void NativeObjectsExplorer::VisitSubtreeWrapper(Object** p, uint16_t class_id) {
}
-class SnapshotFiller : public SnapshotFillerInterface {
- public:
- explicit SnapshotFiller(HeapSnapshot* snapshot, HeapEntriesMap* entries)
- : snapshot_(snapshot),
- collection_(snapshot->collection()),
- entries_(entries) { }
- HeapEntry* AddEntry(HeapThing ptr, HeapEntriesAllocator* allocator) {
- HeapEntry* entry = allocator->AllocateEntry(ptr);
- entries_->Pair(ptr, entry->index());
- return entry;
- }
- HeapEntry* FindEntry(HeapThing ptr) {
- int index = entries_->Map(ptr);
- return index != HeapEntry::kNoEntry ? &snapshot_->entries()[index] : NULL;
- }
- HeapEntry* FindOrAddEntry(HeapThing ptr, HeapEntriesAllocator* allocator) {
- HeapEntry* entry = FindEntry(ptr);
- return entry != NULL ? entry : AddEntry(ptr, allocator);
- }
- void SetIndexedReference(HeapGraphEdge::Type type,
- int parent,
- int index,
- HeapEntry* child_entry) {
- HeapEntry* parent_entry = &snapshot_->entries()[parent];
- parent_entry->SetIndexedReference(type, index, child_entry);
- }
- void SetIndexedAutoIndexReference(HeapGraphEdge::Type type,
- int parent,
- HeapEntry* child_entry) {
- HeapEntry* parent_entry = &snapshot_->entries()[parent];
- int index = parent_entry->children_count() + 1;
- parent_entry->SetIndexedReference(type, index, child_entry);
- }
- void SetNamedReference(HeapGraphEdge::Type type,
- int parent,
- const char* reference_name,
- HeapEntry* child_entry) {
- HeapEntry* parent_entry = &snapshot_->entries()[parent];
- parent_entry->SetNamedReference(type, reference_name, child_entry);
- }
- void SetNamedAutoIndexReference(HeapGraphEdge::Type type,
- int parent,
- HeapEntry* child_entry) {
- HeapEntry* parent_entry = &snapshot_->entries()[parent];
- int index = parent_entry->children_count() + 1;
- parent_entry->SetNamedReference(
- type,
- collection_->names()->GetName(index),
- child_entry);
- }
-
- private:
- HeapSnapshot* snapshot_;
- HeapSnapshotsCollection* collection_;
- HeapEntriesMap* entries_;
-};
-
-
HeapSnapshotGenerator::HeapSnapshotGenerator(
HeapSnapshot* snapshot,
v8::ActivityControl* control,
@@ -2507,15 +2595,11 @@ bool HeapSnapshotGenerator::GenerateSnapshot() {
CHECK(!debug_heap->map_space()->was_swept_conservatively());
#endif
- // The following code uses heap iterators, so we want the heap to be
- // stable. It should follow TagGlobalObjects as that can allocate.
- DisallowHeapAllocation no_alloc;
-
#ifdef VERIFY_HEAP
debug_heap->Verify();
#endif
- SetProgressTotal(1); // 1 pass.
+ SetProgressTotal(2); // 2 passes.
#ifdef VERIFY_HEAP
debug_heap->Verify();
@@ -2603,10 +2687,10 @@ class OutputStreamWriter {
ASSERT(static_cast<size_t>(n) <= strlen(s));
const char* s_end = s + n;
while (s < s_end) {
- int s_chunk_size = Min(
- chunk_size_ - chunk_pos_, static_cast<int>(s_end - s));
+ int s_chunk_size =
+ Min(chunk_size_ - chunk_pos_, static_cast<int>(s_end - s));
ASSERT(s_chunk_size > 0);
- OS::MemCopy(chunk_.start() + chunk_pos_, s, s_chunk_size);
+ MemCopy(chunk_.start() + chunk_pos_, s, s_chunk_size);
s += s_chunk_size;
chunk_pos_ += s_chunk_size;
MaybeWriteChunk();
@@ -2629,14 +2713,14 @@ class OutputStreamWriter {
static const int kMaxNumberSize =
MaxDecimalDigitsIn<sizeof(T)>::kUnsigned + 1;
if (chunk_size_ - chunk_pos_ >= kMaxNumberSize) {
- int result = OS::SNPrintF(
+ int result = SNPrintF(
chunk_.SubVector(chunk_pos_, chunk_size_), format, n);
ASSERT(result != -1);
chunk_pos_ += result;
MaybeWriteChunk();
} else {
EmbeddedVector<char, kMaxNumberSize> buffer;
- int result = OS::SNPrintF(buffer, format, n);
+ int result = SNPrintF(buffer, format, n);
USE(result);
ASSERT(result != -1);
AddString(buffer.start());
@@ -2665,12 +2749,12 @@ class OutputStreamWriter {
// type, name|index, to_node.
const int HeapSnapshotJSONSerializer::kEdgeFieldsCount = 3;
-// type, name, id, self_size, children_index.
-const int HeapSnapshotJSONSerializer::kNodeFieldsCount = 5;
+// type, name, id, self_size, edge_count, trace_node_id.
+const int HeapSnapshotJSONSerializer::kNodeFieldsCount = 6;
void HeapSnapshotJSONSerializer::Serialize(v8::OutputStream* stream) {
if (AllocationTracker* allocation_tracker =
- snapshot_->collection()->allocation_tracker()) {
+ snapshot_->profiler()->allocation_tracker()) {
allocation_tracker->PrepareForSerialization();
}
ASSERT(writer_ == NULL);
@@ -2725,9 +2809,26 @@ int HeapSnapshotJSONSerializer::GetStringId(const char* s) {
}
-static int utoa(unsigned value, const Vector<char>& buffer, int buffer_pos) {
+namespace {
+
+template<size_t size> struct ToUnsigned;
+
+template<> struct ToUnsigned<4> {
+ typedef uint32_t Type;
+};
+
+template<> struct ToUnsigned<8> {
+ typedef uint64_t Type;
+};
+
+} // namespace
+
+
+template<typename T>
+static int utoa_impl(T value, const Vector<char>& buffer, int buffer_pos) {
+ STATIC_ASSERT(static_cast<T>(-1) > 0); // Check that T is unsigned
int number_of_digits = 0;
- unsigned t = value;
+ T t = value;
do {
++number_of_digits;
} while (t /= 10);
@@ -2735,7 +2836,7 @@ static int utoa(unsigned value, const Vector<char>& buffer, int buffer_pos) {
buffer_pos += number_of_digits;
int result = buffer_pos;
do {
- int last_digit = value % 10;
+ int last_digit = static_cast<int>(value % 10);
buffer[--buffer_pos] = '0' + last_digit;
value /= 10;
} while (value);
@@ -2743,6 +2844,14 @@ static int utoa(unsigned value, const Vector<char>& buffer, int buffer_pos) {
}
+template<typename T>
+static int utoa(T value, const Vector<char>& buffer, int buffer_pos) {
+ typename ToUnsigned<sizeof(value)>::Type unsigned_value = value;
+ STATIC_ASSERT(sizeof(value) == sizeof(unsigned_value));
+ return utoa_impl(unsigned_value, buffer, buffer_pos);
+}
+
+
void HeapSnapshotJSONSerializer::SerializeEdge(HeapGraphEdge* edge,
bool first_edge) {
// The buffer needs space for 3 unsigned ints, 3 commas, \n and \0
@@ -2751,7 +2860,6 @@ void HeapSnapshotJSONSerializer::SerializeEdge(HeapGraphEdge* edge,
EmbeddedVector<char, kBufferSize> buffer;
int edge_name_or_index = edge->type() == HeapGraphEdge::kElement
|| edge->type() == HeapGraphEdge::kHidden
- || edge->type() == HeapGraphEdge::kWeak
? edge->index() : GetStringId(edge->name());
int buffer_pos = 0;
if (!first_edge) {
@@ -2780,10 +2888,11 @@ void HeapSnapshotJSONSerializer::SerializeEdges() {
void HeapSnapshotJSONSerializer::SerializeNode(HeapEntry* entry) {
- // The buffer needs space for 5 unsigned ints, 5 commas, \n and \0
+ // The buffer needs space for 4 unsigned ints, 1 size_t, 5 commas, \n and \0
static const int kBufferSize =
5 * MaxDecimalDigitsIn<sizeof(unsigned)>::kUnsigned // NOLINT
- + 5 + 1 + 1;
+ + MaxDecimalDigitsIn<sizeof(size_t)>::kUnsigned // NOLINT
+ + 6 + 1 + 1;
EmbeddedVector<char, kBufferSize> buffer;
int buffer_pos = 0;
if (entry_index(entry) != 0) {
@@ -2798,6 +2907,8 @@ void HeapSnapshotJSONSerializer::SerializeNode(HeapEntry* entry) {
buffer_pos = utoa(entry->self_size(), buffer, buffer_pos);
buffer[buffer_pos++] = ',';
buffer_pos = utoa(entry->children_count(), buffer, buffer_pos);
+ buffer[buffer_pos++] = ',';
+ buffer_pos = utoa(entry->trace_node_id(), buffer, buffer_pos);
buffer[buffer_pos++] = '\n';
buffer[buffer_pos++] = '\0';
writer_->AddString(buffer.start());
@@ -2831,7 +2942,8 @@ void HeapSnapshotJSONSerializer::SerializeSnapshot() {
JSON_S("name") ","
JSON_S("id") ","
JSON_S("self_size") ","
- JSON_S("edge_count")) ","
+ JSON_S("edge_count") ","
+ JSON_S("trace_node_id")) ","
JSON_S("node_types") ":" JSON_A(
JSON_A(
JSON_S("hidden") ","
@@ -2876,7 +2988,7 @@ void HeapSnapshotJSONSerializer::SerializeSnapshot() {
JSON_S("column")) ","
JSON_S("trace_node_fields") ":" JSON_A(
JSON_S("id") ","
- JSON_S("function_id") ","
+ JSON_S("function_info_index") ","
JSON_S("count") ","
JSON_S("size") ","
JSON_S("children"))));
@@ -2889,9 +3001,9 @@ void HeapSnapshotJSONSerializer::SerializeSnapshot() {
writer_->AddNumber(snapshot_->edges().length());
writer_->AddString(",\"trace_function_count\":");
uint32_t count = 0;
- AllocationTracker* tracker = snapshot_->collection()->allocation_tracker();
+ AllocationTracker* tracker = snapshot_->profiler()->allocation_tracker();
if (tracker) {
- count = tracker->id_to_function_info()->occupancy();
+ count = tracker->function_info_list().length();
}
writer_->AddNumber(count);
}
@@ -2908,7 +3020,7 @@ static void WriteUChar(OutputStreamWriter* w, unibrow::uchar u) {
void HeapSnapshotJSONSerializer::SerializeTraceTree() {
- AllocationTracker* tracker = snapshot_->collection()->allocation_tracker();
+ AllocationTracker* tracker = snapshot_->profiler()->allocation_tracker();
if (!tracker) return;
AllocationTraceTree* traces = tracker->trace_tree();
SerializeTraceNode(traces->root());
@@ -2924,7 +3036,7 @@ void HeapSnapshotJSONSerializer::SerializeTraceNode(AllocationTraceNode* node) {
int buffer_pos = 0;
buffer_pos = utoa(node->id(), buffer, buffer_pos);
buffer[buffer_pos++] = ',';
- buffer_pos = utoa(node->function_id(), buffer, buffer_pos);
+ buffer_pos = utoa(node->function_info_index(), buffer, buffer_pos);
buffer[buffer_pos++] = ',';
buffer_pos = utoa(node->allocation_count(), buffer, buffer_pos);
buffer[buffer_pos++] = ',';
@@ -2959,29 +3071,25 @@ static int SerializePosition(int position, const Vector<char>& buffer,
void HeapSnapshotJSONSerializer::SerializeTraceNodeInfos() {
- AllocationTracker* tracker = snapshot_->collection()->allocation_tracker();
+ AllocationTracker* tracker = snapshot_->profiler()->allocation_tracker();
if (!tracker) return;
// The buffer needs space for 6 unsigned ints, 6 commas, \n and \0
const int kBufferSize =
6 * MaxDecimalDigitsIn<sizeof(unsigned)>::kUnsigned // NOLINT
+ 6 + 1 + 1;
EmbeddedVector<char, kBufferSize> buffer;
- HashMap* id_to_function_info = tracker->id_to_function_info();
+ const List<AllocationTracker::FunctionInfo*>& list =
+ tracker->function_info_list();
bool first_entry = true;
- for (HashMap::Entry* p = id_to_function_info->Start();
- p != NULL;
- p = id_to_function_info->Next(p)) {
- SnapshotObjectId id =
- static_cast<SnapshotObjectId>(reinterpret_cast<intptr_t>(p->key));
- AllocationTracker::FunctionInfo* info =
- reinterpret_cast<AllocationTracker::FunctionInfo* >(p->value);
+ for (int i = 0; i < list.length(); i++) {
+ AllocationTracker::FunctionInfo* info = list[i];
int buffer_pos = 0;
if (first_entry) {
first_entry = false;
} else {
buffer[buffer_pos++] = ',';
}
- buffer_pos = utoa(id, buffer, buffer_pos);
+ buffer_pos = utoa(info->function_id, buffer, buffer_pos);
buffer[buffer_pos++] = ',';
buffer_pos = utoa(GetStringId(info->name), buffer, buffer_pos);
buffer[buffer_pos++] = ',';
diff --git a/chromium/v8/src/heap-snapshot-generator.h b/chromium/v8/src/heap-snapshot-generator.h
index e55513f890d..e18d70a2203 100644
--- a/chromium/v8/src/heap-snapshot-generator.h
+++ b/chromium/v8/src/heap-snapshot-generator.h
@@ -1,34 +1,11 @@
// Copyright 2013 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
#ifndef V8_HEAP_SNAPSHOT_GENERATOR_H_
#define V8_HEAP_SNAPSHOT_GENERATOR_H_
-#include "profile-generator-inl.h"
+#include "src/profile-generator-inl.h"
namespace v8 {
namespace internal {
@@ -37,6 +14,7 @@ class AllocationTracker;
class AllocationTraceNode;
class HeapEntry;
class HeapSnapshot;
+class SnapshotFiller;
class HeapGraphEdge BASE_EMBEDDED {
public:
@@ -57,14 +35,15 @@ class HeapGraphEdge BASE_EMBEDDED {
Type type() const { return static_cast<Type>(type_); }
int index() const {
- ASSERT(type_ == kElement || type_ == kHidden || type_ == kWeak);
+ ASSERT(type_ == kElement || type_ == kHidden);
return index_;
}
const char* name() const {
ASSERT(type_ == kContextVariable
|| type_ == kProperty
|| type_ == kInternal
- || type_ == kShortcut);
+ || type_ == kShortcut
+ || type_ == kWeak);
return name_;
}
INLINE(HeapEntry* from() const);
@@ -104,7 +83,8 @@ class HeapEntry BASE_EMBEDDED {
kNative = v8::HeapGraphNode::kNative,
kSynthetic = v8::HeapGraphNode::kSynthetic,
kConsString = v8::HeapGraphNode::kConsString,
- kSlicedString = v8::HeapGraphNode::kSlicedString
+ kSlicedString = v8::HeapGraphNode::kSlicedString,
+ kSymbol = v8::HeapGraphNode::kSymbol
};
static const int kNoEntry;
@@ -113,14 +93,16 @@ class HeapEntry BASE_EMBEDDED {
Type type,
const char* name,
SnapshotObjectId id,
- int self_size);
+ size_t self_size,
+ unsigned trace_node_id);
HeapSnapshot* snapshot() { return snapshot_; }
Type type() { return static_cast<Type>(type_); }
const char* name() { return name_; }
void set_name(const char* name) { name_ = name; }
inline SnapshotObjectId id() { return id_; }
- int self_size() { return self_size_; }
+ size_t self_size() { return self_size_; }
+ unsigned trace_node_id() const { return trace_node_id_; }
INLINE(int index() const);
int children_count() const { return children_count_; }
INLINE(int set_children_index(int index));
@@ -138,8 +120,6 @@ class HeapEntry BASE_EMBEDDED {
void Print(
const char* prefix, const char* edge_name, int max_depth, int indent);
- Handle<HeapObject> GetHeapObject();
-
private:
INLINE(HeapGraphEdge** children_arr());
const char* TypeAsString();
@@ -147,28 +127,28 @@ class HeapEntry BASE_EMBEDDED {
unsigned type_: 4;
int children_count_: 28;
int children_index_;
- int self_size_;
- SnapshotObjectId id_;
+ size_t self_size_;
HeapSnapshot* snapshot_;
const char* name_;
+ SnapshotObjectId id_;
+ // id of allocation stack trace top node
+ unsigned trace_node_id_;
};
-class HeapSnapshotsCollection;
-
// HeapSnapshot represents a single heap snapshot. It is stored in
-// HeapSnapshotsCollection, which is also a factory for
+// HeapProfiler, which is also a factory for
// HeapSnapshots. All HeapSnapshots share strings copied from JS heap
// to be able to return them even if they were collected.
// HeapSnapshotGenerator fills in a HeapSnapshot.
class HeapSnapshot {
public:
- HeapSnapshot(HeapSnapshotsCollection* collection,
+ HeapSnapshot(HeapProfiler* profiler,
const char* title,
unsigned uid);
void Delete();
- HeapSnapshotsCollection* collection() { return collection_; }
+ HeapProfiler* profiler() { return profiler_; }
const char* title() { return title_; }
unsigned uid() { return uid_; }
size_t RawSnapshotSize() const;
@@ -189,7 +169,8 @@ class HeapSnapshot {
HeapEntry* AddEntry(HeapEntry::Type type,
const char* name,
SnapshotObjectId id,
- int size);
+ size_t size,
+ unsigned trace_node_id);
HeapEntry* AddRootEntry();
HeapEntry* AddGcRootsEntry();
HeapEntry* AddGcSubrootEntry(int tag);
@@ -202,7 +183,7 @@ class HeapSnapshot {
void PrintEntriesSize();
private:
- HeapSnapshotsCollection* collection_;
+ HeapProfiler* profiler_;
const char* title_;
unsigned uid_;
int root_index_;
@@ -227,12 +208,11 @@ class HeapObjectsMap {
Heap* heap() const { return heap_; }
- void SnapshotGenerationFinished();
SnapshotObjectId FindEntry(Address addr);
SnapshotObjectId FindOrAddEntry(Address addr,
unsigned int size,
bool accessed = true);
- void MoveObject(Address from, Address to, int size);
+ bool MoveObject(Address from, Address to, int size);
void UpdateObjectSize(Address addr, int size);
SnapshotObjectId last_assigned_id() const {
return next_id_ - kObjectIdStep;
@@ -242,7 +222,7 @@ class HeapObjectsMap {
SnapshotObjectId PushHeapObjectsStats(OutputStream* stream);
size_t GetUsedMemorySize() const;
- static SnapshotObjectId GenerateId(Heap* heap, v8::RetainedObjectInfo* info);
+ SnapshotObjectId GenerateId(v8::RetainedObjectInfo* info);
static inline SnapshotObjectId GetNthGcSubrootId(int delta);
static const int kObjectIdStep = 2;
@@ -255,6 +235,7 @@ class HeapObjectsMap {
int FindUntrackedObjects();
void UpdateHeapObjectsMap();
+ void RemoveDeadEntries();
private:
struct EntryInfo {
@@ -274,8 +255,6 @@ class HeapObjectsMap {
uint32_t count;
};
- void RemoveDeadEntries();
-
SnapshotObjectId next_id_;
HashMap entries_map_;
List<EntryInfo> entries_;
@@ -286,59 +265,6 @@ class HeapObjectsMap {
};
-class HeapSnapshotsCollection {
- public:
- explicit HeapSnapshotsCollection(Heap* heap);
- ~HeapSnapshotsCollection();
-
- Heap* heap() const { return ids_.heap(); }
-
- SnapshotObjectId PushHeapObjectsStats(OutputStream* stream) {
- return ids_.PushHeapObjectsStats(stream);
- }
- void StartHeapObjectsTracking(bool track_allocations);
- void StopHeapObjectsTracking();
-
- HeapSnapshot* NewSnapshot(const char* name, unsigned uid);
- void SnapshotGenerationFinished(HeapSnapshot* snapshot);
- List<HeapSnapshot*>* snapshots() { return &snapshots_; }
- void RemoveSnapshot(HeapSnapshot* snapshot);
-
- StringsStorage* names() { return &names_; }
- AllocationTracker* allocation_tracker() { return allocation_tracker_; }
-
- SnapshotObjectId FindObjectId(Address object_addr) {
- return ids_.FindEntry(object_addr);
- }
- SnapshotObjectId GetObjectId(Address object_addr, int object_size) {
- return ids_.FindOrAddEntry(object_addr, object_size);
- }
- Handle<HeapObject> FindHeapObjectById(SnapshotObjectId id);
- void ObjectMoveEvent(Address from, Address to, int size) {
- ids_.MoveObject(from, to, size);
- }
- void AllocationEvent(Address addr, int size);
- void UpdateObjectSizeEvent(Address addr, int size) {
- ids_.UpdateObjectSize(addr, size);
- }
- SnapshotObjectId last_assigned_id() const {
- return ids_.last_assigned_id();
- }
- size_t GetUsedMemorySize() const;
-
- int FindUntrackedObjects() { return ids_.FindUntrackedObjects(); }
-
- private:
- List<HeapSnapshot*> snapshots_;
- StringsStorage names_;
- // Mapping from HeapObject addresses to objects' uids.
- HeapObjectsMap ids_;
- AllocationTracker* allocation_tracker_;
-
- DISALLOW_COPY_AND_ASSIGN(HeapSnapshotsCollection);
-};
-
-
// A typedef for referencing anything that can be snapshotted living
// in any kind of heap memory.
typedef void* HeapThing;
@@ -367,9 +293,6 @@ class HeapEntriesMap {
static_cast<uint32_t>(reinterpret_cast<uintptr_t>(thing)),
v8::internal::kZeroHashSeed);
}
- static bool HeapThingsMatch(HeapThing key1, HeapThing key2) {
- return key1 == key2;
- }
HashMap entries_;
@@ -396,32 +319,6 @@ class HeapObjectsSet {
};
-// An interface used to populate a snapshot with nodes and edges.
-class SnapshotFillerInterface {
- public:
- virtual ~SnapshotFillerInterface() { }
- virtual HeapEntry* AddEntry(HeapThing ptr,
- HeapEntriesAllocator* allocator) = 0;
- virtual HeapEntry* FindEntry(HeapThing ptr) = 0;
- virtual HeapEntry* FindOrAddEntry(HeapThing ptr,
- HeapEntriesAllocator* allocator) = 0;
- virtual void SetIndexedReference(HeapGraphEdge::Type type,
- int parent_entry,
- int index,
- HeapEntry* child_entry) = 0;
- virtual void SetIndexedAutoIndexReference(HeapGraphEdge::Type type,
- int parent_entry,
- HeapEntry* child_entry) = 0;
- virtual void SetNamedReference(HeapGraphEdge::Type type,
- int parent_entry,
- const char* reference_name,
- HeapEntry* child_entry) = 0;
- virtual void SetNamedAutoIndexReference(HeapGraphEdge::Type type,
- int parent_entry,
- HeapEntry* child_entry) = 0;
-};
-
-
class SnapshottingProgressReportingInterface {
public:
virtual ~SnapshottingProgressReportingInterface() { }
@@ -438,28 +335,43 @@ class V8HeapExplorer : public HeapEntriesAllocator {
v8::HeapProfiler::ObjectNameResolver* resolver);
virtual ~V8HeapExplorer();
virtual HeapEntry* AllocateEntry(HeapThing ptr);
- void AddRootEntries(SnapshotFillerInterface* filler);
+ void AddRootEntries(SnapshotFiller* filler);
int EstimateObjectsCount(HeapIterator* iterator);
- bool IterateAndExtractReferences(SnapshotFillerInterface* filler);
+ bool IterateAndExtractReferences(SnapshotFiller* filler);
void TagGlobalObjects();
void TagCodeObject(Code* code);
- void TagCodeObject(Code* code, const char* external_name);
+ void TagBuiltinCodeObject(Code* code, const char* name);
+ HeapEntry* AddEntry(Address address,
+ HeapEntry::Type type,
+ const char* name,
+ size_t size);
static String* GetConstructorName(JSObject* object);
static HeapObject* const kInternalRootObject;
private:
+ typedef bool (V8HeapExplorer::*ExtractReferencesMethod)(int entry,
+ HeapObject* object);
+
HeapEntry* AddEntry(HeapObject* object);
HeapEntry* AddEntry(HeapObject* object,
HeapEntry::Type type,
const char* name);
+
const char* GetSystemEntryName(HeapObject* object);
- void ExtractReferences(HeapObject* obj);
+ template<V8HeapExplorer::ExtractReferencesMethod extractor>
+ bool IterateAndExtractSinglePass();
+
+ bool ExtractReferencesPass1(int entry, HeapObject* obj);
+ bool ExtractReferencesPass2(int entry, HeapObject* obj);
void ExtractJSGlobalProxyReferences(int entry, JSGlobalProxy* proxy);
void ExtractJSObjectReferences(int entry, JSObject* js_obj);
void ExtractStringReferences(int entry, String* obj);
+ void ExtractSymbolReferences(int entry, Symbol* symbol);
+ void ExtractJSWeakCollectionReferences(int entry,
+ JSWeakCollection* collection);
void ExtractContextReferences(int entry, Context* context);
void ExtractMapReferences(int entry, Map* map);
void ExtractSharedFunctionInfoReferences(int entry,
@@ -468,15 +380,19 @@ class V8HeapExplorer : public HeapEntriesAllocator {
void ExtractAccessorPairReferences(int entry, AccessorPair* accessors);
void ExtractCodeCacheReferences(int entry, CodeCache* code_cache);
void ExtractCodeReferences(int entry, Code* code);
+ void ExtractBoxReferences(int entry, Box* box);
void ExtractCellReferences(int entry, Cell* cell);
void ExtractPropertyCellReferences(int entry, PropertyCell* cell);
void ExtractAllocationSiteReferences(int entry, AllocationSite* site);
+ void ExtractJSArrayBufferReferences(int entry, JSArrayBuffer* buffer);
+ void ExtractFixedArrayReferences(int entry, FixedArray* array);
void ExtractClosureReferences(JSObject* js_obj, int entry);
void ExtractPropertyReferences(JSObject* js_obj, int entry);
bool ExtractAccessorPairProperty(JSObject* js_obj, int entry,
Object* key, Object* callback_obj);
void ExtractElementReferences(JSObject* js_obj, int entry);
void ExtractInternalReferences(JSObject* js_obj, int entry);
+
bool IsEssentialObject(Object* object);
void SetContextReference(HeapObject* parent_obj,
int parent,
@@ -507,6 +423,11 @@ class V8HeapExplorer : public HeapEntriesAllocator {
Object* child);
void SetWeakReference(HeapObject* parent_obj,
int parent,
+ const char* reference_name,
+ Object* child_obj,
+ int field_offset);
+ void SetWeakReference(HeapObject* parent_obj,
+ int parent,
int index,
Object* child_obj,
int field_offset);
@@ -523,6 +444,7 @@ class V8HeapExplorer : public HeapEntriesAllocator {
VisitorSynchronization::SyncTag tag, bool is_weak, Object* child);
const char* GetStrongGcSubrootName(Object* object);
void TagObject(Object* obj, const char* tag);
+ void MarkAsWeakContainer(Object* object);
HeapEntry* GetEntry(Object* obj);
@@ -531,12 +453,14 @@ class V8HeapExplorer : public HeapEntriesAllocator {
Heap* heap_;
HeapSnapshot* snapshot_;
- HeapSnapshotsCollection* collection_;
+ StringsStorage* names_;
+ HeapObjectsMap* heap_object_map_;
SnapshottingProgressReportingInterface* progress_;
- SnapshotFillerInterface* filler_;
+ SnapshotFiller* filler_;
HeapObjectsSet objects_tags_;
HeapObjectsSet strong_gc_subroot_names_;
HeapObjectsSet user_roots_;
+ HeapObjectsSet weak_containers_;
v8::HeapProfiler::ObjectNameResolver* global_object_name_resolver_;
static HeapObject* const kGcRootsObject;
@@ -560,9 +484,9 @@ class NativeObjectsExplorer {
NativeObjectsExplorer(HeapSnapshot* snapshot,
SnapshottingProgressReportingInterface* progress);
virtual ~NativeObjectsExplorer();
- void AddRootEntries(SnapshotFillerInterface* filler);
+ void AddRootEntries(SnapshotFiller* filler);
int EstimateObjectsCount();
- bool IterateAndExtractReferences(SnapshotFillerInterface* filler);
+ bool IterateAndExtractReferences(SnapshotFiller* filler);
private:
void FillRetainedObjects();
@@ -592,7 +516,7 @@ class NativeObjectsExplorer {
Isolate* isolate_;
HeapSnapshot* snapshot_;
- HeapSnapshotsCollection* collection_;
+ StringsStorage* names_;
SnapshottingProgressReportingInterface* progress_;
bool embedder_queried_;
HeapObjectsSet in_groups_;
@@ -602,7 +526,7 @@ class NativeObjectsExplorer {
HeapEntriesAllocator* synthetic_entries_allocator_;
HeapEntriesAllocator* native_entries_allocator_;
// Used during references extraction.
- SnapshotFillerInterface* filler_;
+ SnapshotFiller* filler_;
static HeapThing const kNativesRootObject;
diff --git a/chromium/v8/src/heap.cc b/chromium/v8/src/heap.cc
index d9dc8c19b07..dd3946f18c5 100644
--- a/chromium/v8/src/heap.cc
+++ b/chromium/v8/src/heap.cc
@@ -1,64 +1,42 @@
// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#include "accessors.h"
-#include "api.h"
-#include "bootstrapper.h"
-#include "codegen.h"
-#include "compilation-cache.h"
-#include "cpu-profiler.h"
-#include "debug.h"
-#include "deoptimizer.h"
-#include "global-handles.h"
-#include "heap-profiler.h"
-#include "incremental-marking.h"
-#include "isolate-inl.h"
-#include "mark-compact.h"
-#include "natives.h"
-#include "objects-visiting.h"
-#include "objects-visiting-inl.h"
-#include "once.h"
-#include "runtime-profiler.h"
-#include "scopeinfo.h"
-#include "snapshot.h"
-#include "store-buffer.h"
-#include "utils/random-number-generator.h"
-#include "v8threads.h"
-#include "v8utils.h"
-#include "vm-state-inl.h"
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#include "src/accessors.h"
+#include "src/api.h"
+#include "src/base/once.h"
+#include "src/bootstrapper.h"
+#include "src/codegen.h"
+#include "src/compilation-cache.h"
+#include "src/conversions.h"
+#include "src/cpu-profiler.h"
+#include "src/debug.h"
+#include "src/deoptimizer.h"
+#include "src/global-handles.h"
+#include "src/heap-profiler.h"
+#include "src/incremental-marking.h"
+#include "src/isolate-inl.h"
+#include "src/mark-compact.h"
+#include "src/natives.h"
+#include "src/objects-visiting.h"
+#include "src/objects-visiting-inl.h"
+#include "src/runtime-profiler.h"
+#include "src/scopeinfo.h"
+#include "src/snapshot.h"
+#include "src/store-buffer.h"
+#include "src/utils/random-number-generator.h"
+#include "src/utils.h"
+#include "src/v8threads.h"
+#include "src/vm-state-inl.h"
#if V8_TARGET_ARCH_ARM && !V8_INTERPRETED_REGEXP
-#include "regexp-macro-assembler.h"
-#include "arm/regexp-macro-assembler-arm.h"
+#include "src/regexp-macro-assembler.h"
+#include "src/arm/regexp-macro-assembler-arm.h"
#endif
#if V8_TARGET_ARCH_MIPS && !V8_INTERPRETED_REGEXP
-#include "regexp-macro-assembler.h"
-#include "mips/regexp-macro-assembler-mips.h"
+#include "src/regexp-macro-assembler.h"
+#include "src/mips/regexp-macro-assembler-mips.h"
#endif
namespace v8 {
@@ -66,17 +44,19 @@ namespace internal {
Heap::Heap()
- : isolate_(NULL),
- code_range_size_(kIs64BitArch ? 512 * MB : 0),
+ : amount_of_external_allocated_memory_(0),
+ amount_of_external_allocated_memory_at_last_global_gc_(0),
+ isolate_(NULL),
+ code_range_size_(0),
// semispace_size_ should be a power of 2 and old_generation_size_ should be
// a multiple of Page::kPageSize.
reserved_semispace_size_(8 * (kPointerSize / 4) * MB),
- max_semispace_size_(8 * (kPointerSize / 4) * MB),
+ max_semi_space_size_(8 * (kPointerSize / 4) * MB),
initial_semispace_size_(Page::kPageSize),
max_old_generation_size_(700ul * (kPointerSize / 4) * MB),
max_executable_size_(256ul * (kPointerSize / 4) * MB),
// Variables set based on semispace_size_ and old_generation_size_ in
-// ConfigureHeap (survived_since_last_expansion_, external_allocation_limit_)
+// ConfigureHeap.
// Will be 4 * reserved_semispace_size_ to ensure that young
// generation can be aligned to its size.
maximum_committed_(0),
@@ -104,14 +84,8 @@ Heap::Heap()
unflattened_strings_length_(0),
#ifdef DEBUG
allocation_timeout_(0),
- disallow_allocation_failure_(false),
#endif // DEBUG
- new_space_high_promotion_mode_active_(false),
old_generation_allocation_limit_(kMinimumOldGenerationAllocationLimit),
- size_of_old_gen_at_last_old_space_gc_(0),
- external_allocation_limit_(0),
- amount_of_external_allocated_memory_(0),
- amount_of_external_allocated_memory_at_last_global_gc_(0),
old_gen_exhausted_(false),
inline_allocation_disabled_(false),
store_buffer_rebuilder_(store_buffer()),
@@ -119,12 +93,12 @@ Heap::Heap()
gc_safe_size_of_old_object_(NULL),
total_regexp_code_generated_(0),
tracer_(NULL),
- young_survivors_after_last_gc_(0),
high_survival_rate_period_length_(0),
- low_survival_rate_period_length_(0),
- survival_rate_(0),
- previous_survival_rate_trend_(Heap::STABLE),
- survival_rate_trend_(Heap::STABLE),
+ promoted_objects_size_(0),
+ promotion_rate_(0),
+ semi_space_copied_object_size_(0),
+ semi_space_copied_rate_(0),
+ maximum_size_scavenges_(0),
max_gc_pause_(0.0),
total_gc_time_ms_(0.0),
max_alive_after_gc_(0),
@@ -133,6 +107,7 @@ Heap::Heap()
last_gc_end_timestamp_(0.0),
marking_time_(0.0),
sweeping_time_(0.0),
+ mark_compact_collector_(this),
store_buffer_(this),
marking_(this),
incremental_marking_(this),
@@ -148,35 +123,27 @@ Heap::Heap()
#ifdef VERIFY_HEAP
no_weak_object_verification_scope_depth_(0),
#endif
+ allocation_sites_scratchpad_length_(0),
promotion_queue_(this),
configured_(false),
+ external_string_table_(this),
chunks_queued_for_free_(NULL),
- relocation_mutex_(NULL) {
+ gc_callbacks_depth_(0) {
// Allow build-time customization of the max semispace size. Building
// V8 with snapshots and a non-default max semispace size is much
// easier if you can define it as part of the build environment.
#if defined(V8_MAX_SEMISPACE_SIZE)
- max_semispace_size_ = reserved_semispace_size_ = V8_MAX_SEMISPACE_SIZE;
+ max_semi_space_size_ = reserved_semispace_size_ = V8_MAX_SEMISPACE_SIZE;
#endif
// Ensure old_generation_size_ is a multiple of kPageSize.
ASSERT(MB >= Page::kPageSize);
- intptr_t max_virtual = OS::MaxVirtualMemory();
-
- if (max_virtual > 0) {
- if (code_range_size_ > 0) {
- // Reserve no more than 1/8 of the memory for the code range.
- code_range_size_ = Min(code_range_size_, max_virtual >> 3);
- }
- }
-
memset(roots_, 0, sizeof(roots_[0]) * kRootListLength);
- native_contexts_list_ = NULL;
- array_buffers_list_ = Smi::FromInt(0);
- allocation_sites_list_ = Smi::FromInt(0);
- mark_compact_collector_.heap_ = this;
- external_string_table_.heap_ = this;
+ set_native_contexts_list(NULL);
+ set_array_buffers_list(Smi::FromInt(0));
+ set_allocation_sites_list(Smi::FromInt(0));
+ set_encountered_weak_collections(Smi::FromInt(0));
// Put a dummy entry in the remembered pages so we can find the list the
// minidump even if there are no real unmapped pages.
RememberUnmappedPage(NULL, false);
@@ -436,7 +403,6 @@ void Heap::ReportStatisticsAfterGC() {
void Heap::GarbageCollectionPrologue() {
{ AllowHeapAllocation for_the_first_part_of_prologue;
- isolate_->transcendental_cache()->Clear();
ClearJSFunctionResultCaches();
gc_count_++;
unflattened_strings_length_ = 0;
@@ -452,6 +418,10 @@ void Heap::GarbageCollectionPrologue() {
#endif
}
+ // Reset GC statistics.
+ promoted_objects_size_ = 0;
+ semi_space_copied_object_size_ = 0;
+
UpdateMaximumCommitted();
#ifdef DEBUG
@@ -467,6 +437,13 @@ void Heap::GarbageCollectionPrologue() {
if (isolate()->concurrent_osr_enabled()) {
isolate()->optimizing_compiler_thread()->AgeBufferedOsrJobs();
}
+
+ if (new_space_.IsAtMaximumCapacity()) {
+ maximum_size_scavenges_++;
+ } else {
+ maximum_size_scavenges_ = 0;
+ }
+ CheckNewSpaceExpansionCriteria();
}
@@ -504,41 +481,105 @@ void Heap::RepairFreeListsAfterBoot() {
}
-void Heap::GarbageCollectionEpilogue() {
+void Heap::ProcessPretenuringFeedback() {
if (FLAG_allocation_site_pretenuring) {
int tenure_decisions = 0;
int dont_tenure_decisions = 0;
int allocation_mementos_found = 0;
-
- Object* cur = allocation_sites_list();
- while (cur->IsAllocationSite()) {
- AllocationSite* casted = AllocationSite::cast(cur);
- allocation_mementos_found += casted->memento_found_count()->value();
- if (casted->DigestPretenuringFeedback()) {
- if (casted->GetPretenureMode() == TENURED) {
+ int allocation_sites = 0;
+ int active_allocation_sites = 0;
+
+ // If the scratchpad overflowed, we have to iterate over the allocation
+ // sites list.
+ // TODO(hpayer): We iterate over the whole list of allocation sites when
+ // we grew to the maximum semi-space size to deopt maybe tenured
+ // allocation sites. We could hold the maybe tenured allocation sites
+ // in a seperate data structure if this is a performance problem.
+ bool deopt_maybe_tenured = DeoptMaybeTenuredAllocationSites();
+ bool use_scratchpad =
+ allocation_sites_scratchpad_length_ < kAllocationSiteScratchpadSize &&
+ !deopt_maybe_tenured;
+
+ int i = 0;
+ Object* list_element = allocation_sites_list();
+ bool trigger_deoptimization = false;
+ bool maximum_size_scavenge = MaximumSizeScavenge();
+ while (use_scratchpad ?
+ i < allocation_sites_scratchpad_length_ :
+ list_element->IsAllocationSite()) {
+ AllocationSite* site = use_scratchpad ?
+ AllocationSite::cast(allocation_sites_scratchpad()->get(i)) :
+ AllocationSite::cast(list_element);
+ allocation_mementos_found += site->memento_found_count();
+ if (site->memento_found_count() > 0) {
+ active_allocation_sites++;
+ if (site->DigestPretenuringFeedback(maximum_size_scavenge)) {
+ trigger_deoptimization = true;
+ }
+ if (site->GetPretenureMode() == TENURED) {
tenure_decisions++;
} else {
dont_tenure_decisions++;
}
+ allocation_sites++;
+ }
+
+ if (deopt_maybe_tenured && site->IsMaybeTenure()) {
+ site->set_deopt_dependent_code(true);
+ trigger_deoptimization = true;
}
- cur = casted->weak_next();
+
+ if (use_scratchpad) {
+ i++;
+ } else {
+ list_element = site->weak_next();
+ }
+ }
+
+ if (trigger_deoptimization) {
+ isolate_->stack_guard()->RequestDeoptMarkedAllocationSites();
}
- // TODO(mvstanton): Pretenure decisions are only made once for an allocation
- // site. Find a sane way to decide about revisiting the decision later.
+ FlushAllocationSitesScratchpad();
- if (FLAG_trace_track_allocation_sites &&
+ if (FLAG_trace_pretenuring_statistics &&
(allocation_mementos_found > 0 ||
tenure_decisions > 0 ||
dont_tenure_decisions > 0)) {
- PrintF("GC: (#mementos, #tenure decisions, #donttenure decisions) "
- "(%d, %d, %d)\n",
+ PrintF("GC: (mode, #visited allocation sites, #active allocation sites, "
+ "#mementos, #tenure decisions, #donttenure decisions) "
+ "(%s, %d, %d, %d, %d, %d)\n",
+ use_scratchpad ? "use scratchpad" : "use list",
+ allocation_sites,
+ active_allocation_sites,
allocation_mementos_found,
tenure_decisions,
dont_tenure_decisions);
}
}
+}
+
+
+void Heap::DeoptMarkedAllocationSites() {
+ // TODO(hpayer): If iterating over the allocation sites list becomes a
+ // performance issue, use a cache heap data structure instead (similar to the
+ // allocation sites scratchpad).
+ Object* list_element = allocation_sites_list();
+ while (list_element->IsAllocationSite()) {
+ AllocationSite* site = AllocationSite::cast(list_element);
+ if (site->deopt_dependent_code()) {
+ site->dependent_code()->MarkCodeForDeoptimization(
+ isolate_,
+ DependentCode::kAllocationSiteTenuringChangedGroup);
+ site->set_deopt_dependent_code(false);
+ }
+ list_element = site->weak_next();
+ }
+ Deoptimizer::DeoptimizeMarkedCode(isolate_);
+}
+
+void Heap::GarbageCollectionEpilogue() {
store_buffer()->GCEpilogue();
// In release mode, we only zap the from space under heap verification.
@@ -546,6 +587,9 @@ void Heap::GarbageCollectionEpilogue() {
ZapFromSpace();
}
+ // Process pretenuring feedback and update allocation sites.
+ ProcessPretenuringFeedback();
+
#ifdef VERIFY_HEAP
if (FLAG_verify_heap) {
Verify();
@@ -667,21 +711,25 @@ void Heap::GarbageCollectionEpilogue() {
#undef UPDATE_FRAGMENTATION_FOR_SPACE
#undef UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE
-#if defined(DEBUG)
+#ifdef DEBUG
ReportStatisticsAfterGC();
#endif // DEBUG
-#ifdef ENABLE_DEBUGGER_SUPPORT
isolate_->debug()->AfterGarbageCollection();
-#endif // ENABLE_DEBUGGER_SUPPORT
+
+ // Remember the last top pointer so that we can later find out
+ // whether we allocated in new space since the last GC.
+ new_space_top_after_last_gc_ = new_space()->top();
}
-void Heap::CollectAllGarbage(int flags, const char* gc_reason) {
+void Heap::CollectAllGarbage(int flags,
+ const char* gc_reason,
+ const v8::GCCallbackFlags gc_callback_flags) {
// Since we are ignoring the return value, the exact choice of space does
// not matter, so long as we do not specify NEW_SPACE, which would not
// cause a full GC.
mark_compact_collector_.SetFlags(flags);
- CollectGarbage(OLD_POINTER_SPACE, gc_reason);
+ CollectGarbage(OLD_POINTER_SPACE, gc_reason, gc_callback_flags);
mark_compact_collector_.SetFlags(kNoGCFlags);
}
@@ -709,7 +757,7 @@ void Heap::CollectAllAvailableGarbage(const char* gc_reason) {
const int kMaxNumberOfAttempts = 7;
const int kMinNumberOfAttempts = 2;
for (int attempt = 0; attempt < kMaxNumberOfAttempts; attempt++) {
- if (!CollectGarbage(OLD_POINTER_SPACE, MARK_COMPACTOR, gc_reason, NULL) &&
+ if (!CollectGarbage(MARK_COMPACTOR, gc_reason, NULL) &&
attempt + 1 >= kMinNumberOfAttempts) {
break;
}
@@ -721,10 +769,25 @@ void Heap::CollectAllAvailableGarbage(const char* gc_reason) {
}
-bool Heap::CollectGarbage(AllocationSpace space,
- GarbageCollector collector,
+void Heap::EnsureFillerObjectAtTop() {
+ // There may be an allocation memento behind every object in new space.
+ // If we evacuate a not full new space or if we are on the last page of
+ // the new space, then there may be uninitialized memory behind the top
+ // pointer of the new space page. We store a filler object there to
+ // identify the unused space.
+ Address from_top = new_space_.top();
+ Address from_limit = new_space_.limit();
+ if (from_top < from_limit) {
+ int remaining_in_page = static_cast<int>(from_limit - from_top);
+ CreateFillerObjectAt(from_top, remaining_in_page);
+ }
+}
+
+
+bool Heap::CollectGarbage(GarbageCollector collector,
const char* gc_reason,
- const char* collector_reason) {
+ const char* collector_reason,
+ const v8::GCCallbackFlags gc_callback_flags) {
// The VM is in the GC state until exiting this function.
VMState<GC> state(isolate_);
@@ -737,6 +800,8 @@ bool Heap::CollectGarbage(AllocationSpace space,
allocation_timeout_ = Max(6, FLAG_gc_interval);
#endif
+ EnsureFillerObjectAtTop();
+
if (collector == SCAVENGER && !incremental_marking()->IsStopped()) {
if (FLAG_trace_incremental_marking) {
PrintF("[IncrementalMarking] Scavenge during marking.\n");
@@ -752,7 +817,7 @@ bool Heap::CollectGarbage(AllocationSpace space,
const intptr_t kStepSizeWhenDelayedByScavenge = 1 * MB;
incremental_marking()->Step(kStepSizeWhenDelayedByScavenge,
IncrementalMarking::NO_GC_VIA_STACK_GUARD);
- if (!incremental_marking()->IsComplete()) {
+ if (!incremental_marking()->IsComplete() && !FLAG_gc_global) {
if (FLAG_trace_incremental_marking) {
PrintF("[IncrementalMarking] Delaying MarkSweep.\n");
}
@@ -779,7 +844,7 @@ bool Heap::CollectGarbage(AllocationSpace space,
(collector == SCAVENGER) ? isolate_->counters()->gc_scavenger()
: isolate_->counters()->gc_compactor());
next_gc_likely_to_collect_more =
- PerformGarbageCollection(collector, &tracer);
+ PerformGarbageCollection(collector, &tracer, gc_callback_flags);
}
GarbageCollectionEpilogue();
@@ -809,16 +874,6 @@ int Heap::NotifyContextDisposed() {
}
-void Heap::PerformScavenge() {
- GCTracer tracer(this, NULL, NULL);
- if (incremental_marking()->IsStopped()) {
- PerformGarbageCollection(SCAVENGER, &tracer);
- } else {
- PerformGarbageCollection(MARK_COMPACTOR, &tracer);
- }
-}
-
-
void Heap::MoveElements(FixedArray* array,
int dst_index,
int src_index,
@@ -827,9 +882,7 @@ void Heap::MoveElements(FixedArray* array,
ASSERT(array->map() != fixed_cow_array_map());
Object** dst_objects = array->data_start() + dst_index;
- OS::MemMove(dst_objects,
- array->data_start() + src_index,
- len * kPointerSize);
+ MemMove(dst_objects, array->data_start() + src_index, len * kPointerSize);
if (!InNewSpace(array)) {
for (int i = 0; i < len; i++) {
// TODO(hpayer): check store buffer for entries
@@ -886,14 +939,14 @@ void Heap::ReserveSpace(int *sizes, Address *locations_out) {
ASSERT(NEW_SPACE == FIRST_PAGED_SPACE - 1);
for (int space = NEW_SPACE; space <= LAST_PAGED_SPACE; space++) {
if (sizes[space] != 0) {
- MaybeObject* allocation;
+ AllocationResult allocation;
if (space == NEW_SPACE) {
allocation = new_space()->AllocateRaw(sizes[space]);
} else {
allocation = paged_space(space)->AllocateRaw(sizes[space]);
}
FreeListNode* node;
- if (!allocation->To<FreeListNode>(&node)) {
+ if (!allocation.To(&node)) {
if (space == NEW_SPACE) {
Heap::CollectGarbage(NEW_SPACE,
"failed to reserve space in the new space");
@@ -934,7 +987,7 @@ void Heap::EnsureFromSpaceIsCommitted() {
void Heap::ClearJSFunctionResultCaches() {
if (isolate_->bootstrapper()->IsActive()) return;
- Object* context = native_contexts_list_;
+ Object* context = native_contexts_list();
while (!context->IsUndefined()) {
// Get the caches for this context. GC can happen when the context
// is not fully initialized, so the caches can be undefined.
@@ -960,7 +1013,7 @@ void Heap::ClearNormalizedMapCaches() {
return;
}
- Object* context = native_contexts_list_;
+ Object* context = native_contexts_list();
while (!context->IsUndefined()) {
// GC can happen when the context is not fully initialized,
// so the cache can be undefined.
@@ -974,41 +1027,31 @@ void Heap::ClearNormalizedMapCaches() {
}
-void Heap::UpdateSurvivalRateTrend(int start_new_space_size) {
+void Heap::UpdateSurvivalStatistics(int start_new_space_size) {
if (start_new_space_size == 0) return;
- double survival_rate =
- (static_cast<double>(young_survivors_after_last_gc_) * 100) /
- start_new_space_size;
+ promotion_rate_ =
+ (static_cast<double>(promoted_objects_size_) /
+ static_cast<double>(start_new_space_size) * 100);
+
+ semi_space_copied_rate_ =
+ (static_cast<double>(semi_space_copied_object_size_) /
+ static_cast<double>(start_new_space_size) * 100);
+
+ double survival_rate = promotion_rate_ + semi_space_copied_rate_;
if (survival_rate > kYoungSurvivalRateHighThreshold) {
high_survival_rate_period_length_++;
} else {
high_survival_rate_period_length_ = 0;
}
-
- if (survival_rate < kYoungSurvivalRateLowThreshold) {
- low_survival_rate_period_length_++;
- } else {
- low_survival_rate_period_length_ = 0;
- }
-
- double survival_rate_diff = survival_rate_ - survival_rate;
-
- if (survival_rate_diff > kYoungSurvivalRateAllowedDeviation) {
- set_survival_rate_trend(DECREASING);
- } else if (survival_rate_diff < -kYoungSurvivalRateAllowedDeviation) {
- set_survival_rate_trend(INCREASING);
- } else {
- set_survival_rate_trend(STABLE);
- }
-
- survival_rate_ = survival_rate;
}
-bool Heap::PerformGarbageCollection(GarbageCollector collector,
- GCTracer* tracer) {
- bool next_gc_likely_to_collect_more = false;
+bool Heap::PerformGarbageCollection(
+ GarbageCollector collector,
+ GCTracer* tracer,
+ const v8::GCCallbackFlags gc_callback_flags) {
+ int freed_global_handles = 0;
if (collector != SCAVENGER) {
PROFILE(isolate_, CodeMovingGCEvent());
@@ -1023,11 +1066,14 @@ bool Heap::PerformGarbageCollection(GarbageCollector collector,
GCType gc_type =
collector == MARK_COMPACTOR ? kGCTypeMarkSweepCompact : kGCTypeScavenge;
- {
- GCTracer::Scope scope(tracer, GCTracer::Scope::EXTERNAL);
- VMState<EXTERNAL> state(isolate_);
- HandleScope handle_scope(isolate_);
- CallGCPrologueCallbacks(gc_type, kNoGCCallbackFlags);
+ { GCCallbacksScope scope(this);
+ if (scope.CheckReenter()) {
+ AllowHeapAllocation allow_allocation;
+ GCTracer::Scope scope(tracer, GCTracer::Scope::EXTERNAL);
+ VMState<EXTERNAL> state(isolate_);
+ HandleScope handle_scope(isolate_);
+ CallGCPrologueCallbacks(gc_type, kNoGCCallbackFlags);
+ }
}
EnsureFromSpaceIsCommitted();
@@ -1045,65 +1091,19 @@ bool Heap::PerformGarbageCollection(GarbageCollector collector,
// Perform mark-sweep with optional compaction.
MarkCompact(tracer);
sweep_generation_++;
-
- UpdateSurvivalRateTrend(start_new_space_size);
-
- size_of_old_gen_at_last_old_space_gc_ = PromotedSpaceSizeOfObjects();
-
+ // Temporarily set the limit for case when PostGarbageCollectionProcessing
+ // allocates and triggers GC. The real limit is set at after
+ // PostGarbageCollectionProcessing.
old_generation_allocation_limit_ =
- OldGenerationAllocationLimit(size_of_old_gen_at_last_old_space_gc_);
-
+ OldGenerationAllocationLimit(PromotedSpaceSizeOfObjects(), 0);
old_gen_exhausted_ = false;
} else {
tracer_ = tracer;
Scavenge();
tracer_ = NULL;
-
- UpdateSurvivalRateTrend(start_new_space_size);
- }
-
- if (!new_space_high_promotion_mode_active_ &&
- new_space_.Capacity() == new_space_.MaximumCapacity() &&
- IsStableOrIncreasingSurvivalTrend() &&
- IsHighSurvivalRate()) {
- // Stable high survival rates even though young generation is at
- // maximum capacity indicates that most objects will be promoted.
- // To decrease scavenger pauses and final mark-sweep pauses, we
- // have to limit maximal capacity of the young generation.
- SetNewSpaceHighPromotionModeActive(true);
- if (FLAG_trace_gc) {
- PrintPID("Limited new space size due to high promotion rate: %d MB\n",
- new_space_.InitialCapacity() / MB);
- }
- // Support for global pre-tenuring uses the high promotion mode as a
- // heuristic indicator of whether to pretenure or not, we trigger
- // deoptimization here to take advantage of pre-tenuring as soon as
- // possible.
- if (FLAG_pretenuring) {
- isolate_->stack_guard()->FullDeopt();
- }
- } else if (new_space_high_promotion_mode_active_ &&
- IsStableOrDecreasingSurvivalTrend() &&
- IsLowSurvivalRate()) {
- // Decreasing low survival rates might indicate that the above high
- // promotion mode is over and we should allow the young generation
- // to grow again.
- SetNewSpaceHighPromotionModeActive(false);
- if (FLAG_trace_gc) {
- PrintPID("Unlimited new space size due to low promotion rate: %d MB\n",
- new_space_.MaximumCapacity() / MB);
- }
- // Trigger deoptimization here to turn off pre-tenuring as soon as
- // possible.
- if (FLAG_pretenuring) {
- isolate_->stack_guard()->FullDeopt();
- }
}
- if (new_space_high_promotion_mode_active_ &&
- new_space_.Capacity() > new_space_.InitialCapacity()) {
- new_space_.Shrink();
- }
+ UpdateSurvivalStatistics(start_new_space_size);
isolate_->counters()->objs_since_last_young()->Set(0);
@@ -1114,7 +1114,7 @@ bool Heap::PerformGarbageCollection(GarbageCollector collector,
gc_post_processing_depth_++;
{ AllowHeapAllocation allow_allocation;
GCTracer::Scope scope(tracer, GCTracer::Scope::EXTERNAL);
- next_gc_likely_to_collect_more =
+ freed_global_handles =
isolate_->global_handles()->PostGarbageCollectionProcessing(
collector, tracer);
}
@@ -1129,13 +1129,19 @@ bool Heap::PerformGarbageCollection(GarbageCollector collector,
// Register the amount of external allocated memory.
amount_of_external_allocated_memory_at_last_global_gc_ =
amount_of_external_allocated_memory_;
+ old_generation_allocation_limit_ =
+ OldGenerationAllocationLimit(PromotedSpaceSizeOfObjects(),
+ freed_global_handles);
}
- {
- GCTracer::Scope scope(tracer, GCTracer::Scope::EXTERNAL);
- VMState<EXTERNAL> state(isolate_);
- HandleScope handle_scope(isolate_);
- CallGCEpilogueCallbacks(gc_type);
+ { GCCallbacksScope scope(this);
+ if (scope.CheckReenter()) {
+ AllowHeapAllocation allow_allocation;
+ GCTracer::Scope scope(tracer, GCTracer::Scope::EXTERNAL);
+ VMState<EXTERNAL> state(isolate_);
+ HandleScope handle_scope(isolate_);
+ CallGCEpilogueCallbacks(gc_type, gc_callback_flags);
+ }
}
#ifdef VERIFY_HEAP
@@ -1144,7 +1150,7 @@ bool Heap::PerformGarbageCollection(GarbageCollector collector,
}
#endif
- return next_gc_likely_to_collect_more;
+ return freed_global_handles > 0;
}
@@ -1165,18 +1171,19 @@ void Heap::CallGCPrologueCallbacks(GCType gc_type, GCCallbackFlags flags) {
}
-void Heap::CallGCEpilogueCallbacks(GCType gc_type) {
+void Heap::CallGCEpilogueCallbacks(GCType gc_type,
+ GCCallbackFlags gc_callback_flags) {
for (int i = 0; i < gc_epilogue_callbacks_.length(); ++i) {
if (gc_type & gc_epilogue_callbacks_[i].gc_type) {
if (!gc_epilogue_callbacks_[i].pass_isolate_) {
v8::GCPrologueCallback callback =
reinterpret_cast<v8::GCPrologueCallback>(
gc_epilogue_callbacks_[i].callback);
- callback(gc_type, kNoGCCallbackFlags);
+ callback(gc_type, gc_callback_flags);
} else {
v8::Isolate* isolate = reinterpret_cast<v8::Isolate*>(this->isolate());
gc_epilogue_callbacks_[i].callback(
- isolate, gc_type, kNoGCCallbackFlags);
+ isolate, gc_type, gc_callback_flags);
}
}
}
@@ -1187,6 +1194,8 @@ void Heap::MarkCompact(GCTracer* tracer) {
gc_state_ = MARK_COMPACT;
LOG(isolate_, ResourceEvent("markcompact", "begin"));
+ uint64_t size_of_objects_before_gc = SizeOfObjects();
+
mark_compact_collector_.Prepare(tracer);
ms_count_++;
@@ -1203,6 +1212,10 @@ void Heap::MarkCompact(GCTracer* tracer) {
isolate_->counters()->objs_since_last_full()->Set(0);
flush_monomorphic_ics_ = false;
+
+ if (FLAG_allocation_site_pretenuring) {
+ EvaluateOldSpaceLocalPretenuring(size_of_objects_before_gc);
+ }
}
@@ -1294,8 +1307,7 @@ static void VerifyNonPointerSpacePointers(Heap* heap) {
void Heap::CheckNewSpaceExpansionCriteria() {
if (new_space_.Capacity() < new_space_.MaximumCapacity() &&
- survived_since_last_expansion_ > new_space_.Capacity() &&
- !new_space_high_promotion_mode_active_) {
+ survived_since_last_expansion_ > new_space_.Capacity()) {
// Grow the size of new space if there is room to grow, enough data
// has survived scavenge since the last expansion and we are not in
// high promotion mode.
@@ -1444,15 +1456,10 @@ void Heap::Scavenge() {
// Used for updating survived_since_last_expansion_ at function end.
intptr_t survived_watermark = PromotedSpaceSizeOfObjects();
- CheckNewSpaceExpansionCriteria();
-
SelectScavengingVisitorsTable();
incremental_marking()->PrepareForScavenge();
- paged_space(OLD_DATA_SPACE)->EnsureSweeperProgress(new_space_.Size());
- paged_space(OLD_POINTER_SPACE)->EnsureSweeperProgress(new_space_.Size());
-
// Flip the semispaces. After flipping, to space is empty, from space has
// live objects.
new_space_.Flip();
@@ -1522,15 +1529,15 @@ void Heap::Scavenge() {
}
}
+ // Copy objects reachable from the encountered weak collections list.
+ scavenge_visitor.VisitPointer(&encountered_weak_collections_);
+
// Copy objects reachable from the code flushing candidates list.
MarkCompactCollector* collector = mark_compact_collector();
if (collector->is_code_flushing_enabled()) {
collector->code_flusher()->IteratePointersToFromSpace(&scavenge_visitor);
}
- // Scavenge object reachable from the native contexts list directly.
- scavenge_visitor.VisitPointer(BitCast<Object**>(&native_contexts_list_));
-
new_space_front = DoScavenge(&scavenge_visitor, new_space_front);
while (isolate()->global_handles()->IterateObjectGroups(
@@ -1551,9 +1558,6 @@ void Heap::Scavenge() {
promotion_queue_.Destroy();
- if (!FLAG_watch_ic_patching) {
- isolate()->runtime_profiler()->UpdateSamplesAfterScavenge();
- }
incremental_marking()->UpdateMarkingDequeAfterScavenge();
ScavengeWeakObjectRetainer weak_object_retainer(this);
@@ -1645,255 +1649,25 @@ void Heap::UpdateReferencesInExternalStringTable(
}
-template <class T>
-struct WeakListVisitor;
-
-
-template <class T>
-static Object* VisitWeakList(Heap* heap,
- Object* list,
- WeakObjectRetainer* retainer,
- bool record_slots) {
- Object* undefined = heap->undefined_value();
- Object* head = undefined;
- T* tail = NULL;
- MarkCompactCollector* collector = heap->mark_compact_collector();
- while (list != undefined) {
- // Check whether to keep the candidate in the list.
- T* candidate = reinterpret_cast<T*>(list);
- Object* retained = retainer->RetainAs(list);
- if (retained != NULL) {
- if (head == undefined) {
- // First element in the list.
- head = retained;
- } else {
- // Subsequent elements in the list.
- ASSERT(tail != NULL);
- WeakListVisitor<T>::SetWeakNext(tail, retained);
- if (record_slots) {
- Object** next_slot =
- HeapObject::RawField(tail, WeakListVisitor<T>::WeakNextOffset());
- collector->RecordSlot(next_slot, next_slot, retained);
- }
- }
- // Retained object is new tail.
- ASSERT(!retained->IsUndefined());
- candidate = reinterpret_cast<T*>(retained);
- tail = candidate;
-
-
- // tail is a live object, visit it.
- WeakListVisitor<T>::VisitLiveObject(
- heap, tail, retainer, record_slots);
- } else {
- WeakListVisitor<T>::VisitPhantomObject(heap, candidate);
- }
-
- // Move to next element in the list.
- list = WeakListVisitor<T>::WeakNext(candidate);
- }
-
- // Terminate the list if there is one or more elements.
- if (tail != NULL) {
- WeakListVisitor<T>::SetWeakNext(tail, undefined);
- }
- return head;
-}
-
-
-template<>
-struct WeakListVisitor<JSFunction> {
- static void SetWeakNext(JSFunction* function, Object* next) {
- function->set_next_function_link(next);
- }
-
- static Object* WeakNext(JSFunction* function) {
- return function->next_function_link();
- }
-
- static int WeakNextOffset() {
- return JSFunction::kNextFunctionLinkOffset;
- }
-
- static void VisitLiveObject(Heap*, JSFunction*,
- WeakObjectRetainer*, bool) {
- }
-
- static void VisitPhantomObject(Heap*, JSFunction*) {
- }
-};
-
-
-template<>
-struct WeakListVisitor<Code> {
- static void SetWeakNext(Code* code, Object* next) {
- code->set_next_code_link(next);
- }
-
- static Object* WeakNext(Code* code) {
- return code->next_code_link();
- }
-
- static int WeakNextOffset() {
- return Code::kNextCodeLinkOffset;
- }
-
- static void VisitLiveObject(Heap*, Code*,
- WeakObjectRetainer*, bool) {
- }
-
- static void VisitPhantomObject(Heap*, Code*) {
- }
-};
-
-
-template<>
-struct WeakListVisitor<Context> {
- static void SetWeakNext(Context* context, Object* next) {
- context->set(Context::NEXT_CONTEXT_LINK,
- next,
- UPDATE_WRITE_BARRIER);
- }
-
- static Object* WeakNext(Context* context) {
- return context->get(Context::NEXT_CONTEXT_LINK);
- }
-
- static void VisitLiveObject(Heap* heap,
- Context* context,
- WeakObjectRetainer* retainer,
- bool record_slots) {
- // Process the three weak lists linked off the context.
- DoWeakList<JSFunction>(heap, context, retainer, record_slots,
- Context::OPTIMIZED_FUNCTIONS_LIST);
- DoWeakList<Code>(heap, context, retainer, record_slots,
- Context::OPTIMIZED_CODE_LIST);
- DoWeakList<Code>(heap, context, retainer, record_slots,
- Context::DEOPTIMIZED_CODE_LIST);
- }
-
- template<class T>
- static void DoWeakList(Heap* heap,
- Context* context,
- WeakObjectRetainer* retainer,
- bool record_slots,
- int index) {
- // Visit the weak list, removing dead intermediate elements.
- Object* list_head = VisitWeakList<T>(heap, context->get(index), retainer,
- record_slots);
-
- // Update the list head.
- context->set(index, list_head, UPDATE_WRITE_BARRIER);
-
- if (record_slots) {
- // Record the updated slot if necessary.
- Object** head_slot = HeapObject::RawField(
- context, FixedArray::SizeFor(index));
- heap->mark_compact_collector()->RecordSlot(
- head_slot, head_slot, list_head);
- }
- }
-
- static void VisitPhantomObject(Heap*, Context*) {
- }
-
- static int WeakNextOffset() {
- return FixedArray::SizeFor(Context::NEXT_CONTEXT_LINK);
- }
-};
-
-
void Heap::ProcessWeakReferences(WeakObjectRetainer* retainer) {
- // We don't record weak slots during marking or scavenges.
- // Instead we do it once when we complete mark-compact cycle.
- // Note that write barrier has no effect if we are already in the middle of
- // compacting mark-sweep cycle and we have to record slots manually.
- bool record_slots =
- gc_state() == MARK_COMPACT &&
- mark_compact_collector()->is_compacting();
- ProcessArrayBuffers(retainer, record_slots);
- ProcessNativeContexts(retainer, record_slots);
+ ProcessArrayBuffers(retainer);
+ ProcessNativeContexts(retainer);
// TODO(mvstanton): AllocationSites only need to be processed during
// MARK_COMPACT, as they live in old space. Verify and address.
- ProcessAllocationSites(retainer, record_slots);
+ ProcessAllocationSites(retainer);
}
-void Heap::ProcessNativeContexts(WeakObjectRetainer* retainer,
- bool record_slots) {
- Object* head =
- VisitWeakList<Context>(
- this, native_contexts_list(), retainer, record_slots);
+
+void Heap::ProcessNativeContexts(WeakObjectRetainer* retainer) {
+ Object* head = VisitWeakList<Context>(this, native_contexts_list(), retainer);
// Update the head of the list of contexts.
- native_contexts_list_ = head;
+ set_native_contexts_list(head);
}
-template<>
-struct WeakListVisitor<JSArrayBufferView> {
- static void SetWeakNext(JSArrayBufferView* obj, Object* next) {
- obj->set_weak_next(next);
- }
-
- static Object* WeakNext(JSArrayBufferView* obj) {
- return obj->weak_next();
- }
-
- static void VisitLiveObject(Heap*,
- JSArrayBufferView* obj,
- WeakObjectRetainer* retainer,
- bool record_slots) {}
-
- static void VisitPhantomObject(Heap*, JSArrayBufferView*) {}
-
- static int WeakNextOffset() {
- return JSArrayBufferView::kWeakNextOffset;
- }
-};
-
-
-template<>
-struct WeakListVisitor<JSArrayBuffer> {
- static void SetWeakNext(JSArrayBuffer* obj, Object* next) {
- obj->set_weak_next(next);
- }
-
- static Object* WeakNext(JSArrayBuffer* obj) {
- return obj->weak_next();
- }
-
- static void VisitLiveObject(Heap* heap,
- JSArrayBuffer* array_buffer,
- WeakObjectRetainer* retainer,
- bool record_slots) {
- Object* typed_array_obj =
- VisitWeakList<JSArrayBufferView>(
- heap,
- array_buffer->weak_first_view(),
- retainer, record_slots);
- array_buffer->set_weak_first_view(typed_array_obj);
- if (typed_array_obj != heap->undefined_value() && record_slots) {
- Object** slot = HeapObject::RawField(
- array_buffer, JSArrayBuffer::kWeakFirstViewOffset);
- heap->mark_compact_collector()->RecordSlot(slot, slot, typed_array_obj);
- }
- }
-
- static void VisitPhantomObject(Heap* heap, JSArrayBuffer* phantom) {
- Runtime::FreeArrayBuffer(heap->isolate(), phantom);
- }
-
- static int WeakNextOffset() {
- return JSArrayBuffer::kWeakNextOffset;
- }
-};
-
-
-void Heap::ProcessArrayBuffers(WeakObjectRetainer* retainer,
- bool record_slots) {
+void Heap::ProcessArrayBuffers(WeakObjectRetainer* retainer) {
Object* array_buffer_obj =
- VisitWeakList<JSArrayBuffer>(this,
- array_buffers_list(),
- retainer, record_slots);
+ VisitWeakList<JSArrayBuffer>(this, array_buffers_list(), retainer);
set_array_buffers_list(array_buffer_obj);
}
@@ -1905,51 +1679,58 @@ void Heap::TearDownArrayBuffers() {
Runtime::FreeArrayBuffer(isolate(), buffer);
o = buffer->weak_next();
}
- array_buffers_list_ = undefined;
+ set_array_buffers_list(undefined);
}
-template<>
-struct WeakListVisitor<AllocationSite> {
- static void SetWeakNext(AllocationSite* obj, Object* next) {
- obj->set_weak_next(next);
- }
+void Heap::ProcessAllocationSites(WeakObjectRetainer* retainer) {
+ Object* allocation_site_obj =
+ VisitWeakList<AllocationSite>(this, allocation_sites_list(), retainer);
+ set_allocation_sites_list(allocation_site_obj);
+}
- static Object* WeakNext(AllocationSite* obj) {
- return obj->weak_next();
+
+void Heap::ResetAllAllocationSitesDependentCode(PretenureFlag flag) {
+ DisallowHeapAllocation no_allocation_scope;
+ Object* cur = allocation_sites_list();
+ bool marked = false;
+ while (cur->IsAllocationSite()) {
+ AllocationSite* casted = AllocationSite::cast(cur);
+ if (casted->GetPretenureMode() == flag) {
+ casted->ResetPretenureDecision();
+ casted->set_deopt_dependent_code(true);
+ marked = true;
+ }
+ cur = casted->weak_next();
}
+ if (marked) isolate_->stack_guard()->RequestDeoptMarkedAllocationSites();
+}
- static void VisitLiveObject(Heap* heap,
- AllocationSite* site,
- WeakObjectRetainer* retainer,
- bool record_slots) {}
- static void VisitPhantomObject(Heap* heap, AllocationSite* phantom) {}
+void Heap::EvaluateOldSpaceLocalPretenuring(
+ uint64_t size_of_objects_before_gc) {
+ uint64_t size_of_objects_after_gc = SizeOfObjects();
+ double old_generation_survival_rate =
+ (static_cast<double>(size_of_objects_after_gc) * 100) /
+ static_cast<double>(size_of_objects_before_gc);
- static int WeakNextOffset() {
- return AllocationSite::kWeakNextOffset;
+ if (old_generation_survival_rate < kOldSurvivalRateLowThreshold) {
+ // Too many objects died in the old generation, pretenuring of wrong
+ // allocation sites may be the cause for that. We have to deopt all
+ // dependent code registered in the allocation sites to re-evaluate
+ // our pretenuring decisions.
+ ResetAllAllocationSitesDependentCode(TENURED);
+ if (FLAG_trace_pretenuring) {
+ PrintF("Deopt all allocation sites dependent code due to low survival "
+ "rate in the old generation %f\n", old_generation_survival_rate);
+ }
}
-};
-
-
-void Heap::ProcessAllocationSites(WeakObjectRetainer* retainer,
- bool record_slots) {
- Object* allocation_site_obj =
- VisitWeakList<AllocationSite>(this,
- allocation_sites_list(),
- retainer, record_slots);
- set_allocation_sites_list(allocation_site_obj);
}
void Heap::VisitExternalResources(v8::ExternalResourceVisitor* visitor) {
DisallowHeapAllocation no_allocation;
-
- // Both the external string table and the string table may contain
- // external strings, but neither lists them exhaustively, nor is the
- // intersection set empty. Therefore we iterate over the external string
- // table first, ignoring internalized strings, and then over the
- // internalized string table.
+ // All external strings are listed in the external string table.
class ExternalStringTableVisitorAdapter : public ObjectVisitor {
public:
@@ -1957,13 +1738,9 @@ void Heap::VisitExternalResources(v8::ExternalResourceVisitor* visitor) {
v8::ExternalResourceVisitor* visitor) : visitor_(visitor) {}
virtual void VisitPointers(Object** start, Object** end) {
for (Object** p = start; p < end; p++) {
- // Visit non-internalized external strings,
- // since internalized strings are listed in the string table.
- if (!(*p)->IsInternalizedString()) {
- ASSERT((*p)->IsExternalString());
- visitor_->VisitExternalString(Utils::ToLocal(
- Handle<String>(String::cast(*p))));
- }
+ ASSERT((*p)->IsExternalString());
+ visitor_->VisitExternalString(Utils::ToLocal(
+ Handle<String>(String::cast(*p))));
}
}
private:
@@ -1971,25 +1748,6 @@ void Heap::VisitExternalResources(v8::ExternalResourceVisitor* visitor) {
} external_string_table_visitor(visitor);
external_string_table_.Iterate(&external_string_table_visitor);
-
- class StringTableVisitorAdapter : public ObjectVisitor {
- public:
- explicit StringTableVisitorAdapter(
- v8::ExternalResourceVisitor* visitor) : visitor_(visitor) {}
- virtual void VisitPointers(Object** start, Object** end) {
- for (Object** p = start; p < end; p++) {
- if ((*p)->IsExternalString()) {
- ASSERT((*p)->IsInternalizedString());
- visitor_->VisitExternalString(Utils::ToLocal(
- Handle<String>(String::cast(*p))));
- }
- }
- }
- private:
- v8::ExternalResourceVisitor* visitor_;
- } string_table_visitor(visitor);
-
- string_table()->IterateElements(&string_table_visitor);
}
@@ -2051,8 +1809,12 @@ Address Heap::DoScavenge(ObjectVisitor* scavenge_visitor,
}
-STATIC_ASSERT((FixedDoubleArray::kHeaderSize & kDoubleAlignmentMask) == 0);
-STATIC_ASSERT((ConstantPoolArray::kHeaderSize & kDoubleAlignmentMask) == 0);
+STATIC_ASSERT((FixedDoubleArray::kHeaderSize &
+ kDoubleAlignmentMask) == 0); // NOLINT
+STATIC_ASSERT((ConstantPoolArray::kFirstEntryOffset &
+ kDoubleAlignmentMask) == 0); // NOLINT
+STATIC_ASSERT((ConstantPoolArray::kExtendedFirstOffset &
+ kDoubleAlignmentMask) == 0); // NOLINT
INLINE(static HeapObject* EnsureDoubleAligned(Heap* heap,
@@ -2093,6 +1855,8 @@ class ScavengingVisitor : public StaticVisitorBase {
table_.Register(kVisitByteArray, &EvacuateByteArray);
table_.Register(kVisitFixedArray, &EvacuateFixedArray);
table_.Register(kVisitFixedDoubleArray, &EvacuateFixedDoubleArray);
+ table_.Register(kVisitFixedTypedArray, &EvacuateFixedTypedArray);
+ table_.Register(kVisitFixedFloat64Array, &EvacuateFixedFloat64Array);
table_.Register(kVisitNativeContext,
&ObjectEvacuationStrategy<POINTER_OBJECT>::
@@ -2114,11 +1878,7 @@ class ScavengingVisitor : public StaticVisitorBase {
&ObjectEvacuationStrategy<POINTER_OBJECT>::
template VisitSpecialized<SharedFunctionInfo::kSize>);
- table_.Register(kVisitJSWeakMap,
- &ObjectEvacuationStrategy<POINTER_OBJECT>::
- Visit);
-
- table_.Register(kVisitJSWeakSet,
+ table_.Register(kVisitJSWeakCollection,
&ObjectEvacuationStrategy<POINTER_OBJECT>::
Visit);
@@ -2225,7 +1985,7 @@ class ScavengingVisitor : public StaticVisitorBase {
HeapObject** slot,
HeapObject* object,
int object_size) {
- SLOW_ASSERT(object_size <= Page::kMaxNonCodeHeapObjectSize);
+ SLOW_ASSERT(object_size <= Page::kMaxRegularHeapObjectSize);
SLOW_ASSERT(object->Size() == object_size);
int allocation_size = object_size;
@@ -2236,20 +1996,18 @@ class ScavengingVisitor : public StaticVisitorBase {
Heap* heap = map->GetHeap();
if (heap->ShouldBePromoted(object->address(), object_size)) {
- MaybeObject* maybe_result;
+ AllocationResult allocation;
if (object_contents == DATA_OBJECT) {
ASSERT(heap->AllowedToBeMigrated(object, OLD_DATA_SPACE));
- maybe_result = heap->old_data_space()->AllocateRaw(allocation_size);
+ allocation = heap->old_data_space()->AllocateRaw(allocation_size);
} else {
ASSERT(heap->AllowedToBeMigrated(object, OLD_POINTER_SPACE));
- maybe_result = heap->old_pointer_space()->AllocateRaw(allocation_size);
+ allocation = heap->old_pointer_space()->AllocateRaw(allocation_size);
}
- Object* result = NULL; // Initialization to please compiler.
- if (maybe_result->ToObject(&result)) {
- HeapObject* target = HeapObject::cast(result);
-
+ HeapObject* target = NULL; // Initialization to please compiler.
+ if (allocation.To(&target)) {
if (alignment != kObjectAlignment) {
target = EnsureDoubleAligned(heap, target, allocation_size);
}
@@ -2269,15 +2027,15 @@ class ScavengingVisitor : public StaticVisitorBase {
}
}
- heap->tracer()->increment_promoted_objects_size(object_size);
+ heap->IncrementPromotedObjectsSize(object_size);
return;
}
}
ASSERT(heap->AllowedToBeMigrated(object, NEW_SPACE));
- MaybeObject* allocation = heap->new_space()->AllocateRaw(allocation_size);
+ AllocationResult allocation =
+ heap->new_space()->AllocateRaw(allocation_size);
heap->promotion_queue()->SetNewLimit(heap->new_space()->top());
- Object* result = allocation->ToObjectUnchecked();
- HeapObject* target = HeapObject::cast(result);
+ HeapObject* target = HeapObject::cast(allocation.ToObjectChecked());
if (alignment != kObjectAlignment) {
target = EnsureDoubleAligned(heap, target, allocation_size);
@@ -2288,6 +2046,7 @@ class ScavengingVisitor : public StaticVisitorBase {
// buffer.
*slot = target;
MigrateObject(heap, object, target, object_size);
+ heap->IncrementSemiSpaceCopiedObjectSize(object_size);
return;
}
@@ -2333,6 +2092,24 @@ class ScavengingVisitor : public StaticVisitorBase {
}
+ static inline void EvacuateFixedTypedArray(Map* map,
+ HeapObject** slot,
+ HeapObject* object) {
+ int object_size = reinterpret_cast<FixedTypedArrayBase*>(object)->size();
+ EvacuateObject<DATA_OBJECT, kObjectAlignment>(
+ map, slot, object, object_size);
+ }
+
+
+ static inline void EvacuateFixedFloat64Array(Map* map,
+ HeapObject** slot,
+ HeapObject* object) {
+ int object_size = reinterpret_cast<FixedFloat64Array*>(object)->size();
+ EvacuateObject<DATA_OBJECT, kDoubleAlignment>(
+ map, slot, object, object_size);
+ }
+
+
static inline void EvacuateByteArray(Map* map,
HeapObject** slot,
HeapObject* object) {
@@ -2496,11 +2273,11 @@ void Heap::ScavengeObjectSlow(HeapObject** p, HeapObject* object) {
}
-MaybeObject* Heap::AllocatePartialMap(InstanceType instance_type,
- int instance_size) {
+AllocationResult Heap::AllocatePartialMap(InstanceType instance_type,
+ int instance_size) {
Object* result;
- MaybeObject* maybe_result = AllocateRaw(Map::kSize, MAP_SPACE, MAP_SPACE);
- if (!maybe_result->ToObject(&result)) return maybe_result;
+ AllocationResult allocation = AllocateRaw(Map::kSize, MAP_SPACE, MAP_SPACE);
+ if (!allocation.To(&result)) return allocation;
// Map::cast cannot be used due to uninitialized map field.
reinterpret_cast<Map*>(result)->set_map(raw_unchecked_meta_map());
@@ -2520,15 +2297,15 @@ MaybeObject* Heap::AllocatePartialMap(InstanceType instance_type,
}
-MaybeObject* Heap::AllocateMap(InstanceType instance_type,
- int instance_size,
- ElementsKind elements_kind) {
- Object* result;
- MaybeObject* maybe_result = AllocateRaw(Map::kSize, MAP_SPACE, MAP_SPACE);
- if (!maybe_result->To(&result)) return maybe_result;
+AllocationResult Heap::AllocateMap(InstanceType instance_type,
+ int instance_size,
+ ElementsKind elements_kind) {
+ HeapObject* result;
+ AllocationResult allocation = AllocateRaw(Map::kSize, MAP_SPACE, MAP_SPACE);
+ if (!allocation.To(&result)) return allocation;
- Map* map = reinterpret_cast<Map*>(result);
- map->set_map_no_write_barrier(meta_map());
+ result->set_map_no_write_barrier(meta_map());
+ Map* map = Map::cast(result);
map->set_instance_type(instance_type);
map->set_visitor_id(
StaticVisitorBase::GetVisitorId(instance_type, instance_size));
@@ -2554,53 +2331,19 @@ MaybeObject* Heap::AllocateMap(InstanceType instance_type,
}
-MaybeObject* Heap::AllocateCodeCache() {
- CodeCache* code_cache;
- { MaybeObject* maybe_code_cache = AllocateStruct(CODE_CACHE_TYPE);
- if (!maybe_code_cache->To(&code_cache)) return maybe_code_cache;
+AllocationResult Heap::AllocateFillerObject(int size,
+ bool double_align,
+ AllocationSpace space) {
+ HeapObject* obj;
+ { AllocationResult allocation = AllocateRaw(size, space, space);
+ if (!allocation.To(&obj)) return allocation;
}
- code_cache->set_default_cache(empty_fixed_array(), SKIP_WRITE_BARRIER);
- code_cache->set_normal_type_cache(undefined_value(), SKIP_WRITE_BARRIER);
- return code_cache;
-}
-
-
-MaybeObject* Heap::AllocatePolymorphicCodeCache() {
- return AllocateStruct(POLYMORPHIC_CODE_CACHE_TYPE);
-}
-
-
-MaybeObject* Heap::AllocateAccessorPair() {
- AccessorPair* accessors;
- { MaybeObject* maybe_accessors = AllocateStruct(ACCESSOR_PAIR_TYPE);
- if (!maybe_accessors->To(&accessors)) return maybe_accessors;
- }
- accessors->set_getter(the_hole_value(), SKIP_WRITE_BARRIER);
- accessors->set_setter(the_hole_value(), SKIP_WRITE_BARRIER);
- accessors->set_access_flags(Smi::FromInt(0), SKIP_WRITE_BARRIER);
- return accessors;
-}
-
-
-MaybeObject* Heap::AllocateTypeFeedbackInfo() {
- TypeFeedbackInfo* info;
- { MaybeObject* maybe_info = AllocateStruct(TYPE_FEEDBACK_INFO_TYPE);
- if (!maybe_info->To(&info)) return maybe_info;
- }
- info->initialize_storage();
- info->set_type_feedback_cells(TypeFeedbackCells::cast(empty_fixed_array()),
- SKIP_WRITE_BARRIER);
- return info;
-}
-
-
-MaybeObject* Heap::AllocateAliasedArgumentsEntry(int aliased_context_slot) {
- AliasedArgumentsEntry* entry;
- { MaybeObject* maybe_entry = AllocateStruct(ALIASED_ARGUMENTS_ENTRY_TYPE);
- if (!maybe_entry->To(&entry)) return maybe_entry;
- }
- entry->set_aliased_context_slot(aliased_context_slot);
- return entry;
+#ifdef DEBUG
+ MemoryChunk* chunk = MemoryChunk::FromAddress(obj->address());
+ ASSERT(chunk->owner()->identity() == space);
+#endif
+ CreateFillerObjectAt(obj->address(), size);
+ return obj;
}
@@ -2629,51 +2372,65 @@ const Heap::StructTable Heap::struct_table[] = {
bool Heap::CreateInitialMaps() {
- Object* obj;
- { MaybeObject* maybe_obj = AllocatePartialMap(MAP_TYPE, Map::kSize);
- if (!maybe_obj->ToObject(&obj)) return false;
+ HeapObject* obj;
+ { AllocationResult allocation = AllocatePartialMap(MAP_TYPE, Map::kSize);
+ if (!allocation.To(&obj)) return false;
}
// Map::cast cannot be used due to uninitialized map field.
Map* new_meta_map = reinterpret_cast<Map*>(obj);
set_meta_map(new_meta_map);
new_meta_map->set_map(new_meta_map);
- { MaybeObject* maybe_obj =
- AllocatePartialMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
- if (!maybe_obj->ToObject(&obj)) return false;
- }
- set_fixed_array_map(Map::cast(obj));
+ { // Partial map allocation
+#define ALLOCATE_PARTIAL_MAP(instance_type, size, field_name) \
+ { Map* map; \
+ if (!AllocatePartialMap((instance_type), (size)).To(&map)) return false; \
+ set_##field_name##_map(map); \
+ }
+
+ ALLOCATE_PARTIAL_MAP(FIXED_ARRAY_TYPE, kVariableSizeSentinel, fixed_array);
+ ALLOCATE_PARTIAL_MAP(ODDBALL_TYPE, Oddball::kSize, undefined);
+ ALLOCATE_PARTIAL_MAP(ODDBALL_TYPE, Oddball::kSize, null);
+ ALLOCATE_PARTIAL_MAP(CONSTANT_POOL_ARRAY_TYPE, kVariableSizeSentinel,
+ constant_pool_array);
- { MaybeObject* maybe_obj = AllocatePartialMap(ODDBALL_TYPE, Oddball::kSize);
- if (!maybe_obj->ToObject(&obj)) return false;
+#undef ALLOCATE_PARTIAL_MAP
}
- set_oddball_map(Map::cast(obj));
// Allocate the empty array.
- { MaybeObject* maybe_obj = AllocateEmptyFixedArray();
- if (!maybe_obj->ToObject(&obj)) return false;
+ { AllocationResult allocation = AllocateEmptyFixedArray();
+ if (!allocation.To(&obj)) return false;
}
set_empty_fixed_array(FixedArray::cast(obj));
- { MaybeObject* maybe_obj = Allocate(oddball_map(), OLD_POINTER_SPACE);
- if (!maybe_obj->ToObject(&obj)) return false;
+ { AllocationResult allocation = Allocate(null_map(), OLD_POINTER_SPACE);
+ if (!allocation.To(&obj)) return false;
}
set_null_value(Oddball::cast(obj));
Oddball::cast(obj)->set_kind(Oddball::kNull);
- { MaybeObject* maybe_obj = Allocate(oddball_map(), OLD_POINTER_SPACE);
- if (!maybe_obj->ToObject(&obj)) return false;
+ { AllocationResult allocation = Allocate(undefined_map(), OLD_POINTER_SPACE);
+ if (!allocation.To(&obj)) return false;
}
set_undefined_value(Oddball::cast(obj));
Oddball::cast(obj)->set_kind(Oddball::kUndefined);
ASSERT(!InNewSpace(undefined_value()));
+ // Set preliminary exception sentinel value before actually initializing it.
+ set_exception(null_value());
+
// Allocate the empty descriptor array.
- { MaybeObject* maybe_obj = AllocateEmptyFixedArray();
- if (!maybe_obj->ToObject(&obj)) return false;
+ { AllocationResult allocation = AllocateEmptyFixedArray();
+ if (!allocation.To(&obj)) return false;
}
set_empty_descriptor_array(DescriptorArray::cast(obj));
+ // Allocate the constant pool array.
+ { AllocationResult allocation = AllocateEmptyConstantPoolArray();
+ if (!allocation.To(&obj)) return false;
+ }
+ set_empty_constant_pool_array(ConstantPoolArray::cast(obj));
+
// Fix the instance_descriptors for the existing maps.
meta_map()->set_code_cache(empty_fixed_array());
meta_map()->set_dependent_code(DependentCode::cast(empty_fixed_array()));
@@ -2686,10 +2443,21 @@ bool Heap::CreateInitialMaps() {
fixed_array_map()->init_back_pointer(undefined_value());
fixed_array_map()->set_instance_descriptors(empty_descriptor_array());
- oddball_map()->set_code_cache(empty_fixed_array());
- oddball_map()->set_dependent_code(DependentCode::cast(empty_fixed_array()));
- oddball_map()->init_back_pointer(undefined_value());
- oddball_map()->set_instance_descriptors(empty_descriptor_array());
+ undefined_map()->set_code_cache(empty_fixed_array());
+ undefined_map()->set_dependent_code(DependentCode::cast(empty_fixed_array()));
+ undefined_map()->init_back_pointer(undefined_value());
+ undefined_map()->set_instance_descriptors(empty_descriptor_array());
+
+ null_map()->set_code_cache(empty_fixed_array());
+ null_map()->set_dependent_code(DependentCode::cast(empty_fixed_array()));
+ null_map()->init_back_pointer(undefined_value());
+ null_map()->set_instance_descriptors(empty_descriptor_array());
+
+ constant_pool_array_map()->set_code_cache(empty_fixed_array());
+ constant_pool_array_map()->set_dependent_code(
+ DependentCode::cast(empty_fixed_array()));
+ constant_pool_array_map()->init_back_pointer(undefined_value());
+ constant_pool_array_map()->set_instance_descriptors(empty_descriptor_array());
// Fix prototype object for existing maps.
meta_map()->set_prototype(null_value());
@@ -2698,429 +2466,236 @@ bool Heap::CreateInitialMaps() {
fixed_array_map()->set_prototype(null_value());
fixed_array_map()->set_constructor(null_value());
- oddball_map()->set_prototype(null_value());
- oddball_map()->set_constructor(null_value());
+ undefined_map()->set_prototype(null_value());
+ undefined_map()->set_constructor(null_value());
- { MaybeObject* maybe_obj =
- AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
- if (!maybe_obj->ToObject(&obj)) return false;
- }
- set_fixed_cow_array_map(Map::cast(obj));
- ASSERT(fixed_array_map() != fixed_cow_array_map());
+ null_map()->set_prototype(null_value());
+ null_map()->set_constructor(null_value());
- { MaybeObject* maybe_obj =
- AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
- if (!maybe_obj->ToObject(&obj)) return false;
- }
- set_scope_info_map(Map::cast(obj));
+ constant_pool_array_map()->set_prototype(null_value());
+ constant_pool_array_map()->set_constructor(null_value());
- { MaybeObject* maybe_obj = AllocateMap(HEAP_NUMBER_TYPE, HeapNumber::kSize);
- if (!maybe_obj->ToObject(&obj)) return false;
- }
- set_heap_number_map(Map::cast(obj));
-
- { MaybeObject* maybe_obj = AllocateMap(SYMBOL_TYPE, Symbol::kSize);
- if (!maybe_obj->ToObject(&obj)) return false;
- }
- set_symbol_map(Map::cast(obj));
-
- { MaybeObject* maybe_obj = AllocateMap(FOREIGN_TYPE, Foreign::kSize);
- if (!maybe_obj->ToObject(&obj)) return false;
- }
- set_foreign_map(Map::cast(obj));
-
- for (unsigned i = 0; i < ARRAY_SIZE(string_type_table); i++) {
- const StringTypeTable& entry = string_type_table[i];
- { MaybeObject* maybe_obj = AllocateMap(entry.type, entry.size);
- if (!maybe_obj->ToObject(&obj)) return false;
+ { // Map allocation
+#define ALLOCATE_MAP(instance_type, size, field_name) \
+ { Map* map; \
+ if (!AllocateMap((instance_type), size).To(&map)) return false; \
+ set_##field_name##_map(map); \
}
- roots_[entry.index] = Map::cast(obj);
- }
-
- { MaybeObject* maybe_obj = AllocateMap(STRING_TYPE, kVariableSizeSentinel);
- if (!maybe_obj->ToObject(&obj)) return false;
- }
- set_undetectable_string_map(Map::cast(obj));
- Map::cast(obj)->set_is_undetectable();
-
- { MaybeObject* maybe_obj =
- AllocateMap(ASCII_STRING_TYPE, kVariableSizeSentinel);
- if (!maybe_obj->ToObject(&obj)) return false;
- }
- set_undetectable_ascii_string_map(Map::cast(obj));
- Map::cast(obj)->set_is_undetectable();
-
- { MaybeObject* maybe_obj =
- AllocateMap(FIXED_DOUBLE_ARRAY_TYPE, kVariableSizeSentinel);
- if (!maybe_obj->ToObject(&obj)) return false;
- }
- set_fixed_double_array_map(Map::cast(obj));
-
- { MaybeObject* maybe_obj =
- AllocateMap(CONSTANT_POOL_ARRAY_TYPE, kVariableSizeSentinel);
- if (!maybe_obj->ToObject(&obj)) return false;
- }
- set_constant_pool_array_map(Map::cast(obj));
-
- { MaybeObject* maybe_obj =
- AllocateMap(BYTE_ARRAY_TYPE, kVariableSizeSentinel);
- if (!maybe_obj->ToObject(&obj)) return false;
- }
- set_byte_array_map(Map::cast(obj));
-
- { MaybeObject* maybe_obj =
- AllocateMap(FREE_SPACE_TYPE, kVariableSizeSentinel);
- if (!maybe_obj->ToObject(&obj)) return false;
- }
- set_free_space_map(Map::cast(obj));
-
- { MaybeObject* maybe_obj = AllocateByteArray(0, TENURED);
- if (!maybe_obj->ToObject(&obj)) return false;
- }
- set_empty_byte_array(ByteArray::cast(obj));
-
- { MaybeObject* maybe_obj =
- AllocateMap(EXTERNAL_PIXEL_ARRAY_TYPE, ExternalArray::kAlignedSize);
- if (!maybe_obj->ToObject(&obj)) return false;
- }
- set_external_pixel_array_map(Map::cast(obj));
-
- { MaybeObject* maybe_obj = AllocateMap(EXTERNAL_BYTE_ARRAY_TYPE,
- ExternalArray::kAlignedSize);
- if (!maybe_obj->ToObject(&obj)) return false;
- }
- set_external_byte_array_map(Map::cast(obj));
-
- { MaybeObject* maybe_obj = AllocateMap(EXTERNAL_UNSIGNED_BYTE_ARRAY_TYPE,
- ExternalArray::kAlignedSize);
- if (!maybe_obj->ToObject(&obj)) return false;
- }
- set_external_unsigned_byte_array_map(Map::cast(obj));
-
- { MaybeObject* maybe_obj = AllocateMap(EXTERNAL_SHORT_ARRAY_TYPE,
- ExternalArray::kAlignedSize);
- if (!maybe_obj->ToObject(&obj)) return false;
- }
- set_external_short_array_map(Map::cast(obj));
-
- { MaybeObject* maybe_obj = AllocateMap(EXTERNAL_UNSIGNED_SHORT_ARRAY_TYPE,
- ExternalArray::kAlignedSize);
- if (!maybe_obj->ToObject(&obj)) return false;
- }
- set_external_unsigned_short_array_map(Map::cast(obj));
- { MaybeObject* maybe_obj = AllocateMap(EXTERNAL_INT_ARRAY_TYPE,
- ExternalArray::kAlignedSize);
- if (!maybe_obj->ToObject(&obj)) return false;
- }
- set_external_int_array_map(Map::cast(obj));
+#define ALLOCATE_VARSIZE_MAP(instance_type, field_name) \
+ ALLOCATE_MAP(instance_type, kVariableSizeSentinel, field_name)
+
+ ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, fixed_cow_array)
+ ASSERT(fixed_array_map() != fixed_cow_array_map());
+
+ ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, scope_info)
+ ALLOCATE_MAP(HEAP_NUMBER_TYPE, HeapNumber::kSize, heap_number)
+ ALLOCATE_MAP(SYMBOL_TYPE, Symbol::kSize, symbol)
+ ALLOCATE_MAP(FOREIGN_TYPE, Foreign::kSize, foreign)
+
+ ALLOCATE_MAP(ODDBALL_TYPE, Oddball::kSize, the_hole);
+ ALLOCATE_MAP(ODDBALL_TYPE, Oddball::kSize, boolean);
+ ALLOCATE_MAP(ODDBALL_TYPE, Oddball::kSize, uninitialized);
+ ALLOCATE_MAP(ODDBALL_TYPE, Oddball::kSize, arguments_marker);
+ ALLOCATE_MAP(ODDBALL_TYPE, Oddball::kSize, no_interceptor_result_sentinel);
+ ALLOCATE_MAP(ODDBALL_TYPE, Oddball::kSize, exception);
+ ALLOCATE_MAP(ODDBALL_TYPE, Oddball::kSize, termination_exception);
+
+ for (unsigned i = 0; i < ARRAY_SIZE(string_type_table); i++) {
+ const StringTypeTable& entry = string_type_table[i];
+ { AllocationResult allocation = AllocateMap(entry.type, entry.size);
+ if (!allocation.To(&obj)) return false;
+ }
+ // Mark cons string maps as unstable, because their objects can change
+ // maps during GC.
+ Map* map = Map::cast(obj);
+ if (StringShape(entry.type).IsCons()) map->mark_unstable();
+ roots_[entry.index] = map;
+ }
- { MaybeObject* maybe_obj = AllocateMap(EXTERNAL_UNSIGNED_INT_ARRAY_TYPE,
- ExternalArray::kAlignedSize);
- if (!maybe_obj->ToObject(&obj)) return false;
- }
- set_external_unsigned_int_array_map(Map::cast(obj));
+ ALLOCATE_VARSIZE_MAP(STRING_TYPE, undetectable_string)
+ undetectable_string_map()->set_is_undetectable();
- { MaybeObject* maybe_obj = AllocateMap(EXTERNAL_FLOAT_ARRAY_TYPE,
- ExternalArray::kAlignedSize);
- if (!maybe_obj->ToObject(&obj)) return false;
- }
- set_external_float_array_map(Map::cast(obj));
+ ALLOCATE_VARSIZE_MAP(ASCII_STRING_TYPE, undetectable_ascii_string);
+ undetectable_ascii_string_map()->set_is_undetectable();
- { MaybeObject* maybe_obj =
- AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
- if (!maybe_obj->ToObject(&obj)) return false;
- }
- set_non_strict_arguments_elements_map(Map::cast(obj));
+ ALLOCATE_VARSIZE_MAP(FIXED_DOUBLE_ARRAY_TYPE, fixed_double_array)
+ ALLOCATE_VARSIZE_MAP(BYTE_ARRAY_TYPE, byte_array)
+ ALLOCATE_VARSIZE_MAP(FREE_SPACE_TYPE, free_space)
- { MaybeObject* maybe_obj = AllocateMap(EXTERNAL_DOUBLE_ARRAY_TYPE,
- ExternalArray::kAlignedSize);
- if (!maybe_obj->ToObject(&obj)) return false;
- }
- set_external_double_array_map(Map::cast(obj));
+#define ALLOCATE_EXTERNAL_ARRAY_MAP(Type, type, TYPE, ctype, size) \
+ ALLOCATE_MAP(EXTERNAL_##TYPE##_ARRAY_TYPE, ExternalArray::kAlignedSize, \
+ external_##type##_array)
- { MaybeObject* maybe_obj = AllocateEmptyExternalArray(kExternalByteArray);
- if (!maybe_obj->ToObject(&obj)) return false;
- }
- set_empty_external_byte_array(ExternalArray::cast(obj));
+ TYPED_ARRAYS(ALLOCATE_EXTERNAL_ARRAY_MAP)
+#undef ALLOCATE_EXTERNAL_ARRAY_MAP
- { MaybeObject* maybe_obj =
- AllocateEmptyExternalArray(kExternalUnsignedByteArray);
- if (!maybe_obj->ToObject(&obj)) return false;
- }
- set_empty_external_unsigned_byte_array(ExternalArray::cast(obj));
+#define ALLOCATE_FIXED_TYPED_ARRAY_MAP(Type, type, TYPE, ctype, size) \
+ ALLOCATE_VARSIZE_MAP(FIXED_##TYPE##_ARRAY_TYPE, \
+ fixed_##type##_array)
- { MaybeObject* maybe_obj = AllocateEmptyExternalArray(kExternalShortArray);
- if (!maybe_obj->ToObject(&obj)) return false;
- }
- set_empty_external_short_array(ExternalArray::cast(obj));
+ TYPED_ARRAYS(ALLOCATE_FIXED_TYPED_ARRAY_MAP)
+#undef ALLOCATE_FIXED_TYPED_ARRAY_MAP
- { MaybeObject* maybe_obj = AllocateEmptyExternalArray(
- kExternalUnsignedShortArray);
- if (!maybe_obj->ToObject(&obj)) return false;
- }
- set_empty_external_unsigned_short_array(ExternalArray::cast(obj));
-
- { MaybeObject* maybe_obj = AllocateEmptyExternalArray(kExternalIntArray);
- if (!maybe_obj->ToObject(&obj)) return false;
- }
- set_empty_external_int_array(ExternalArray::cast(obj));
-
- { MaybeObject* maybe_obj =
- AllocateEmptyExternalArray(kExternalUnsignedIntArray);
- if (!maybe_obj->ToObject(&obj)) return false;
- }
- set_empty_external_unsigned_int_array(ExternalArray::cast(obj));
+ ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, sloppy_arguments_elements)
- { MaybeObject* maybe_obj = AllocateEmptyExternalArray(kExternalFloatArray);
- if (!maybe_obj->ToObject(&obj)) return false;
- }
- set_empty_external_float_array(ExternalArray::cast(obj));
+ ALLOCATE_VARSIZE_MAP(CODE_TYPE, code)
- { MaybeObject* maybe_obj = AllocateEmptyExternalArray(kExternalDoubleArray);
- if (!maybe_obj->ToObject(&obj)) return false;
- }
- set_empty_external_double_array(ExternalArray::cast(obj));
+ ALLOCATE_MAP(CELL_TYPE, Cell::kSize, cell)
+ ALLOCATE_MAP(PROPERTY_CELL_TYPE, PropertyCell::kSize, global_property_cell)
+ ALLOCATE_MAP(FILLER_TYPE, kPointerSize, one_pointer_filler)
+ ALLOCATE_MAP(FILLER_TYPE, 2 * kPointerSize, two_pointer_filler)
- { MaybeObject* maybe_obj = AllocateEmptyExternalArray(kExternalPixelArray);
- if (!maybe_obj->ToObject(&obj)) return false;
- }
- set_empty_external_pixel_array(ExternalArray::cast(obj));
- { MaybeObject* maybe_obj = AllocateMap(CODE_TYPE, kVariableSizeSentinel);
- if (!maybe_obj->ToObject(&obj)) return false;
- }
- set_code_map(Map::cast(obj));
-
- { MaybeObject* maybe_obj = AllocateMap(CELL_TYPE, Cell::kSize);
- if (!maybe_obj->ToObject(&obj)) return false;
- }
- set_cell_map(Map::cast(obj));
-
- { MaybeObject* maybe_obj = AllocateMap(PROPERTY_CELL_TYPE,
- PropertyCell::kSize);
- if (!maybe_obj->ToObject(&obj)) return false;
- }
- set_global_property_cell_map(Map::cast(obj));
-
- { MaybeObject* maybe_obj = AllocateMap(FILLER_TYPE, kPointerSize);
- if (!maybe_obj->ToObject(&obj)) return false;
- }
- set_one_pointer_filler_map(Map::cast(obj));
-
- { MaybeObject* maybe_obj = AllocateMap(FILLER_TYPE, 2 * kPointerSize);
- if (!maybe_obj->ToObject(&obj)) return false;
- }
- set_two_pointer_filler_map(Map::cast(obj));
-
- for (unsigned i = 0; i < ARRAY_SIZE(struct_table); i++) {
- const StructTable& entry = struct_table[i];
- { MaybeObject* maybe_obj = AllocateMap(entry.type, entry.size);
- if (!maybe_obj->ToObject(&obj)) return false;
+ for (unsigned i = 0; i < ARRAY_SIZE(struct_table); i++) {
+ const StructTable& entry = struct_table[i];
+ Map* map;
+ if (!AllocateMap(entry.type, entry.size).To(&map))
+ return false;
+ roots_[entry.index] = map;
}
- roots_[entry.index] = Map::cast(obj);
- }
-
- { MaybeObject* maybe_obj =
- AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
- if (!maybe_obj->ToObject(&obj)) return false;
- }
- set_hash_table_map(Map::cast(obj));
-
- { MaybeObject* maybe_obj =
- AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
- if (!maybe_obj->ToObject(&obj)) return false;
- }
- set_function_context_map(Map::cast(obj));
- { MaybeObject* maybe_obj =
- AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
- if (!maybe_obj->ToObject(&obj)) return false;
- }
- set_catch_context_map(Map::cast(obj));
-
- { MaybeObject* maybe_obj =
- AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
- if (!maybe_obj->ToObject(&obj)) return false;
- }
- set_with_context_map(Map::cast(obj));
-
- { MaybeObject* maybe_obj =
- AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
- if (!maybe_obj->ToObject(&obj)) return false;
- }
- set_block_context_map(Map::cast(obj));
-
- { MaybeObject* maybe_obj =
- AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
- if (!maybe_obj->ToObject(&obj)) return false;
- }
- set_module_context_map(Map::cast(obj));
-
- { MaybeObject* maybe_obj =
- AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
- if (!maybe_obj->ToObject(&obj)) return false;
- }
- set_global_context_map(Map::cast(obj));
+ ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, hash_table)
+ ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, ordered_hash_table)
+
+ ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, function_context)
+ ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, catch_context)
+ ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, with_context)
+ ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, block_context)
+ ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, module_context)
+ ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, global_context)
+
+ ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, native_context)
+ native_context_map()->set_dictionary_map(true);
+ native_context_map()->set_visitor_id(
+ StaticVisitorBase::kVisitNativeContext);
+
+ ALLOCATE_MAP(SHARED_FUNCTION_INFO_TYPE, SharedFunctionInfo::kAlignedSize,
+ shared_function_info)
+
+ ALLOCATE_MAP(JS_MESSAGE_OBJECT_TYPE, JSMessageObject::kSize,
+ message_object)
+ ALLOCATE_MAP(JS_OBJECT_TYPE, JSObject::kHeaderSize + kPointerSize,
+ external)
+ external_map()->set_is_extensible(false);
+#undef ALLOCATE_VARSIZE_MAP
+#undef ALLOCATE_MAP
+ }
+
+ { // Empty arrays
+ { ByteArray* byte_array;
+ if (!AllocateByteArray(0, TENURED).To(&byte_array)) return false;
+ set_empty_byte_array(byte_array);
+ }
- { MaybeObject* maybe_obj =
- AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
- if (!maybe_obj->ToObject(&obj)) return false;
- }
- Map* native_context_map = Map::cast(obj);
- native_context_map->set_dictionary_map(true);
- native_context_map->set_visitor_id(StaticVisitorBase::kVisitNativeContext);
- set_native_context_map(native_context_map);
+#define ALLOCATE_EMPTY_EXTERNAL_ARRAY(Type, type, TYPE, ctype, size) \
+ { ExternalArray* obj; \
+ if (!AllocateEmptyExternalArray(kExternal##Type##Array).To(&obj)) \
+ return false; \
+ set_empty_external_##type##_array(obj); \
+ }
- { MaybeObject* maybe_obj = AllocateMap(SHARED_FUNCTION_INFO_TYPE,
- SharedFunctionInfo::kAlignedSize);
- if (!maybe_obj->ToObject(&obj)) return false;
- }
- set_shared_function_info_map(Map::cast(obj));
+ TYPED_ARRAYS(ALLOCATE_EMPTY_EXTERNAL_ARRAY)
+#undef ALLOCATE_EMPTY_EXTERNAL_ARRAY
- { MaybeObject* maybe_obj = AllocateMap(JS_MESSAGE_OBJECT_TYPE,
- JSMessageObject::kSize);
- if (!maybe_obj->ToObject(&obj)) return false;
- }
- set_message_object_map(Map::cast(obj));
+#define ALLOCATE_EMPTY_FIXED_TYPED_ARRAY(Type, type, TYPE, ctype, size) \
+ { FixedTypedArrayBase* obj; \
+ if (!AllocateEmptyFixedTypedArray(kExternal##Type##Array).To(&obj)) \
+ return false; \
+ set_empty_fixed_##type##_array(obj); \
+ }
- Map* external_map;
- { MaybeObject* maybe_obj =
- AllocateMap(JS_OBJECT_TYPE, JSObject::kHeaderSize + kPointerSize);
- if (!maybe_obj->To(&external_map)) return false;
+ TYPED_ARRAYS(ALLOCATE_EMPTY_FIXED_TYPED_ARRAY)
+#undef ALLOCATE_EMPTY_FIXED_TYPED_ARRAY
}
- external_map->set_is_extensible(false);
- set_external_map(external_map);
-
ASSERT(!InNewSpace(empty_fixed_array()));
return true;
}
-MaybeObject* Heap::AllocateHeapNumber(double value, PretenureFlag pretenure) {
+AllocationResult Heap::AllocateHeapNumber(double value,
+ PretenureFlag pretenure) {
// Statically ensure that it is safe to allocate heap numbers in paged
// spaces.
int size = HeapNumber::kSize;
- STATIC_ASSERT(HeapNumber::kSize <= Page::kNonCodeObjectAreaSize);
+ STATIC_ASSERT(HeapNumber::kSize <= Page::kMaxRegularHeapObjectSize);
+
AllocationSpace space = SelectSpace(size, OLD_DATA_SPACE, pretenure);
- Object* result;
- { MaybeObject* maybe_result = AllocateRaw(size, space, OLD_DATA_SPACE);
- if (!maybe_result->ToObject(&result)) return maybe_result;
+ HeapObject* result;
+ { AllocationResult allocation = AllocateRaw(size, space, OLD_DATA_SPACE);
+ if (!allocation.To(&result)) return allocation;
}
- HeapObject::cast(result)->set_map_no_write_barrier(heap_number_map());
+ result->set_map_no_write_barrier(heap_number_map());
HeapNumber::cast(result)->set_value(value);
return result;
}
-MaybeObject* Heap::AllocateCell(Object* value) {
+AllocationResult Heap::AllocateCell(Object* value) {
int size = Cell::kSize;
- STATIC_ASSERT(Cell::kSize <= Page::kNonCodeObjectAreaSize);
+ STATIC_ASSERT(Cell::kSize <= Page::kMaxRegularHeapObjectSize);
- Object* result;
- { MaybeObject* maybe_result = AllocateRaw(size, CELL_SPACE, CELL_SPACE);
- if (!maybe_result->ToObject(&result)) return maybe_result;
+ HeapObject* result;
+ { AllocationResult allocation = AllocateRaw(size, CELL_SPACE, CELL_SPACE);
+ if (!allocation.To(&result)) return allocation;
}
- HeapObject::cast(result)->set_map_no_write_barrier(cell_map());
+ result->set_map_no_write_barrier(cell_map());
Cell::cast(result)->set_value(value);
return result;
}
-MaybeObject* Heap::AllocatePropertyCell() {
+AllocationResult Heap::AllocatePropertyCell() {
int size = PropertyCell::kSize;
- STATIC_ASSERT(PropertyCell::kSize <= Page::kNonCodeObjectAreaSize);
+ STATIC_ASSERT(PropertyCell::kSize <= Page::kMaxRegularHeapObjectSize);
- Object* result;
- MaybeObject* maybe_result =
+ HeapObject* result;
+ AllocationResult allocation =
AllocateRaw(size, PROPERTY_CELL_SPACE, PROPERTY_CELL_SPACE);
- if (!maybe_result->ToObject(&result)) return maybe_result;
+ if (!allocation.To(&result)) return allocation;
- HeapObject::cast(result)->set_map_no_write_barrier(
- global_property_cell_map());
+ result->set_map_no_write_barrier(global_property_cell_map());
PropertyCell* cell = PropertyCell::cast(result);
cell->set_dependent_code(DependentCode::cast(empty_fixed_array()),
SKIP_WRITE_BARRIER);
cell->set_value(the_hole_value());
- cell->set_type(Type::None());
- return result;
-}
-
-
-MaybeObject* Heap::AllocateBox(Object* value, PretenureFlag pretenure) {
- Box* result;
- MaybeObject* maybe_result = AllocateStruct(BOX_TYPE);
- if (!maybe_result->To(&result)) return maybe_result;
- result->set_value(value);
+ cell->set_type(HeapType::None());
return result;
}
-MaybeObject* Heap::AllocateAllocationSite() {
- AllocationSite* site;
- MaybeObject* maybe_result = Allocate(allocation_site_map(),
- OLD_POINTER_SPACE);
- if (!maybe_result->To(&site)) return maybe_result;
- site->Initialize();
-
- // Link the site
- site->set_weak_next(allocation_sites_list());
- set_allocation_sites_list(site);
- return site;
-}
-
-
-MaybeObject* Heap::CreateOddball(const char* to_string,
- Object* to_number,
- byte kind) {
- Object* result;
- { MaybeObject* maybe_result = Allocate(oddball_map(), OLD_POINTER_SPACE);
- if (!maybe_result->ToObject(&result)) return maybe_result;
- }
- return Oddball::cast(result)->Initialize(this, to_string, to_number, kind);
-}
-
-
-bool Heap::CreateApiObjects() {
- Object* obj;
+void Heap::CreateApiObjects() {
+ HandleScope scope(isolate());
+ Factory* factory = isolate()->factory();
+ Handle<Map> new_neander_map =
+ factory->NewMap(JS_OBJECT_TYPE, JSObject::kHeaderSize);
- { MaybeObject* maybe_obj = AllocateMap(JS_OBJECT_TYPE, JSObject::kHeaderSize);
- if (!maybe_obj->ToObject(&obj)) return false;
- }
// Don't use Smi-only elements optimizations for objects with the neander
// map. There are too many cases where element values are set directly with a
// bottleneck to trap the Smi-only -> fast elements transition, and there
// appears to be no benefit for optimize this case.
- Map* new_neander_map = Map::cast(obj);
new_neander_map->set_elements_kind(TERMINAL_FAST_ELEMENTS_KIND);
- set_neander_map(new_neander_map);
+ set_neander_map(*new_neander_map);
- { MaybeObject* maybe_obj = AllocateJSObjectFromMap(neander_map());
- if (!maybe_obj->ToObject(&obj)) return false;
- }
- Object* elements;
- { MaybeObject* maybe_elements = AllocateFixedArray(2);
- if (!maybe_elements->ToObject(&elements)) return false;
- }
- FixedArray::cast(elements)->set(0, Smi::FromInt(0));
- JSObject::cast(obj)->set_elements(FixedArray::cast(elements));
- set_message_listeners(JSObject::cast(obj));
-
- return true;
+ Handle<JSObject> listeners = factory->NewNeanderObject();
+ Handle<FixedArray> elements = factory->NewFixedArray(2);
+ elements->set(0, Smi::FromInt(0));
+ listeners->set_elements(*elements);
+ set_message_listeners(*listeners);
}
void Heap::CreateJSEntryStub() {
- JSEntryStub stub;
- set_js_entry_code(*stub.GetCode(isolate()));
+ JSEntryStub stub(isolate());
+ set_js_entry_code(*stub.GetCode());
}
void Heap::CreateJSConstructEntryStub() {
- JSConstructEntryStub stub;
- set_js_construct_entry_code(*stub.GetCode(isolate()));
+ JSConstructEntryStub stub(isolate());
+ set_js_construct_entry_code(*stub.GetCode());
}
@@ -3130,6 +2705,17 @@ void Heap::CreateFixedStubs() {
// The eliminates the need for doing dictionary lookup in the
// stub cache for these stubs.
HandleScope scope(isolate());
+
+ // Create stubs that should be there, so we don't unexpectedly have to
+ // create them if we need them during the creation of another stub.
+ // Stub creation mixes raw pointers and handles in an unsafe manner so
+ // we cannot create stubs while we are creating stubs.
+ CodeStub::GenerateStubsAheadOfTime(isolate());
+
+ // MacroAssembler::Abort calls (usually enabled with --debug-code) depend on
+ // CEntryStub, so we need to call GenerateStubsAheadOfTime before JSEntryStub
+ // is created.
+
// gcc-4.4 has problem generating correct code of following snippet:
// { JSEntryStub stub;
// js_entry_code_ = *stub.GetCode();
@@ -3140,123 +2726,89 @@ void Heap::CreateFixedStubs() {
// To workaround the problem, make separate functions without inlining.
Heap::CreateJSEntryStub();
Heap::CreateJSConstructEntryStub();
-
- // Create stubs that should be there, so we don't unexpectedly have to
- // create them if we need them during the creation of another stub.
- // Stub creation mixes raw pointers and handles in an unsafe manner so
- // we cannot create stubs while we are creating stubs.
- CodeStub::GenerateStubsAheadOfTime(isolate());
}
-void Heap::CreateStubsRequiringBuiltins() {
+void Heap::CreateInitialObjects() {
HandleScope scope(isolate());
- CodeStub::GenerateStubsRequiringBuiltinsAheadOfTime(isolate());
-}
-
-
-bool Heap::CreateInitialObjects() {
- Object* obj;
+ Factory* factory = isolate()->factory();
// The -0 value must be set before NumberFromDouble works.
- { MaybeObject* maybe_obj = AllocateHeapNumber(-0.0, TENURED);
- if (!maybe_obj->ToObject(&obj)) return false;
- }
- set_minus_zero_value(HeapNumber::cast(obj));
+ set_minus_zero_value(*factory->NewHeapNumber(-0.0, TENURED));
ASSERT(std::signbit(minus_zero_value()->Number()) != 0);
- { MaybeObject* maybe_obj = AllocateHeapNumber(OS::nan_value(), TENURED);
- if (!maybe_obj->ToObject(&obj)) return false;
- }
- set_nan_value(HeapNumber::cast(obj));
-
- { MaybeObject* maybe_obj = AllocateHeapNumber(V8_INFINITY, TENURED);
- if (!maybe_obj->ToObject(&obj)) return false;
- }
- set_infinity_value(HeapNumber::cast(obj));
+ set_nan_value(*factory->NewHeapNumber(OS::nan_value(), TENURED));
+ set_infinity_value(*factory->NewHeapNumber(V8_INFINITY, TENURED));
// The hole has not been created yet, but we want to put something
// predictable in the gaps in the string table, so lets make that Smi zero.
set_the_hole_value(reinterpret_cast<Oddball*>(Smi::FromInt(0)));
// Allocate initial string table.
- { MaybeObject* maybe_obj =
- StringTable::Allocate(this, kInitialStringTableSize);
- if (!maybe_obj->ToObject(&obj)) return false;
- }
- // Don't use set_string_table() due to asserts.
- roots_[kStringTableRootIndex] = obj;
+ set_string_table(*StringTable::New(isolate(), kInitialStringTableSize));
// Finish initializing oddballs after creating the string table.
- { MaybeObject* maybe_obj =
- undefined_value()->Initialize(this,
- "undefined",
- nan_value(),
- Oddball::kUndefined);
- if (!maybe_obj->ToObject(&obj)) return false;
- }
+ Oddball::Initialize(isolate(),
+ factory->undefined_value(),
+ "undefined",
+ factory->nan_value(),
+ Oddball::kUndefined);
// Initialize the null_value.
- { MaybeObject* maybe_obj = null_value()->Initialize(
- this, "null", Smi::FromInt(0), Oddball::kNull);
- if (!maybe_obj->ToObject(&obj)) return false;
- }
-
- { MaybeObject* maybe_obj = CreateOddball("true",
- Smi::FromInt(1),
- Oddball::kTrue);
- if (!maybe_obj->ToObject(&obj)) return false;
- }
- set_true_value(Oddball::cast(obj));
-
- { MaybeObject* maybe_obj = CreateOddball("false",
- Smi::FromInt(0),
- Oddball::kFalse);
- if (!maybe_obj->ToObject(&obj)) return false;
- }
- set_false_value(Oddball::cast(obj));
-
- { MaybeObject* maybe_obj = CreateOddball("hole",
- Smi::FromInt(-1),
- Oddball::kTheHole);
- if (!maybe_obj->ToObject(&obj)) return false;
- }
- set_the_hole_value(Oddball::cast(obj));
-
- { MaybeObject* maybe_obj = CreateOddball("uninitialized",
- Smi::FromInt(-1),
- Oddball::kUninitialized);
- if (!maybe_obj->ToObject(&obj)) return false;
- }
- set_uninitialized_value(Oddball::cast(obj));
-
- { MaybeObject* maybe_obj = CreateOddball("arguments_marker",
- Smi::FromInt(-4),
- Oddball::kArgumentMarker);
- if (!maybe_obj->ToObject(&obj)) return false;
- }
- set_arguments_marker(Oddball::cast(obj));
-
- { MaybeObject* maybe_obj = CreateOddball("no_interceptor_result_sentinel",
- Smi::FromInt(-2),
- Oddball::kOther);
- if (!maybe_obj->ToObject(&obj)) return false;
- }
- set_no_interceptor_result_sentinel(obj);
-
- { MaybeObject* maybe_obj = CreateOddball("termination_exception",
- Smi::FromInt(-3),
- Oddball::kOther);
- if (!maybe_obj->ToObject(&obj)) return false;
- }
- set_termination_exception(obj);
+ Oddball::Initialize(isolate(),
+ factory->null_value(),
+ "null",
+ handle(Smi::FromInt(0), isolate()),
+ Oddball::kNull);
+
+ set_true_value(*factory->NewOddball(factory->boolean_map(),
+ "true",
+ handle(Smi::FromInt(1), isolate()),
+ Oddball::kTrue));
+
+ set_false_value(*factory->NewOddball(factory->boolean_map(),
+ "false",
+ handle(Smi::FromInt(0), isolate()),
+ Oddball::kFalse));
+
+ set_the_hole_value(*factory->NewOddball(factory->the_hole_map(),
+ "hole",
+ handle(Smi::FromInt(-1), isolate()),
+ Oddball::kTheHole));
+
+ set_uninitialized_value(
+ *factory->NewOddball(factory->uninitialized_map(),
+ "uninitialized",
+ handle(Smi::FromInt(-1), isolate()),
+ Oddball::kUninitialized));
+
+ set_arguments_marker(*factory->NewOddball(factory->arguments_marker_map(),
+ "arguments_marker",
+ handle(Smi::FromInt(-4), isolate()),
+ Oddball::kArgumentMarker));
+
+ set_no_interceptor_result_sentinel(
+ *factory->NewOddball(factory->no_interceptor_result_sentinel_map(),
+ "no_interceptor_result_sentinel",
+ handle(Smi::FromInt(-2), isolate()),
+ Oddball::kOther));
+
+ set_termination_exception(
+ *factory->NewOddball(factory->termination_exception_map(),
+ "termination_exception",
+ handle(Smi::FromInt(-3), isolate()),
+ Oddball::kOther));
+
+ set_exception(
+ *factory->NewOddball(factory->exception_map(),
+ "exception",
+ handle(Smi::FromInt(-5), isolate()),
+ Oddball::kException));
for (unsigned i = 0; i < ARRAY_SIZE(constant_string_table); i++) {
- { MaybeObject* maybe_obj =
- InternalizeUtf8String(constant_string_table[i].contents);
- if (!maybe_obj->ToObject(&obj)) return false;
- }
- roots_[constant_string_table[i].index] = String::cast(obj);
+ Handle<String> str =
+ factory->InternalizeUtf8String(constant_string_table[i].contents);
+ roots_[constant_string_table[i].index] = *str;
}
// Allocate the hidden string which is used to identify the hidden properties
@@ -3265,31 +2817,19 @@ bool Heap::CreateInitialObjects() {
// loop above because it needs to be allocated manually with the special
// hash code in place. The hash code for the hidden_string is zero to ensure
// that it will always be at the first entry in property descriptors.
- { MaybeObject* maybe_obj = AllocateOneByteInternalizedString(
+ hidden_string_ = *factory->NewOneByteInternalizedString(
OneByteVector("", 0), String::kEmptyStringHash);
- if (!maybe_obj->ToObject(&obj)) return false;
- }
- hidden_string_ = String::cast(obj);
- // Allocate the code_stubs dictionary. The initial size is set to avoid
+ // Create the code_stubs dictionary. The initial size is set to avoid
// expanding the dictionary during bootstrapping.
- { MaybeObject* maybe_obj = UnseededNumberDictionary::Allocate(this, 128);
- if (!maybe_obj->ToObject(&obj)) return false;
- }
- set_code_stubs(UnseededNumberDictionary::cast(obj));
-
+ set_code_stubs(*UnseededNumberDictionary::New(isolate(), 128));
- // Allocate the non_monomorphic_cache used in stub-cache.cc. The initial size
+ // Create the non_monomorphic_cache used in stub-cache.cc. The initial size
// is set to avoid expanding the dictionary during bootstrapping.
- { MaybeObject* maybe_obj = UnseededNumberDictionary::Allocate(this, 64);
- if (!maybe_obj->ToObject(&obj)) return false;
- }
- set_non_monomorphic_cache(UnseededNumberDictionary::cast(obj));
+ set_non_monomorphic_cache(*UnseededNumberDictionary::New(isolate(), 64));
- { MaybeObject* maybe_obj = AllocatePolymorphicCodeCache();
- if (!maybe_obj->ToObject(&obj)) return false;
- }
- set_polymorphic_code_cache(PolymorphicCodeCache::cast(obj));
+ set_polymorphic_code_cache(PolymorphicCodeCache::cast(
+ *factory->NewStruct(POLYMORPHIC_CODE_CACHE_TYPE)));
set_instanceof_cache_function(Smi::FromInt(0));
set_instanceof_cache_map(Smi::FromInt(0));
@@ -3298,82 +2838,61 @@ bool Heap::CreateInitialObjects() {
CreateFixedStubs();
// Allocate the dictionary of intrinsic function names.
- { MaybeObject* maybe_obj =
- NameDictionary::Allocate(this, Runtime::kNumFunctions);
- if (!maybe_obj->ToObject(&obj)) return false;
- }
- { MaybeObject* maybe_obj = Runtime::InitializeIntrinsicFunctionNames(this,
- obj);
- if (!maybe_obj->ToObject(&obj)) return false;
- }
- set_intrinsic_function_names(NameDictionary::cast(obj));
+ Handle<NameDictionary> intrinsic_names =
+ NameDictionary::New(isolate(), Runtime::kNumFunctions);
+ Runtime::InitializeIntrinsicFunctionNames(isolate(), intrinsic_names);
+ set_intrinsic_function_names(*intrinsic_names);
- { MaybeObject* maybe_obj = AllocateInitialNumberStringCache();
- if (!maybe_obj->ToObject(&obj)) return false;
- }
- set_number_string_cache(FixedArray::cast(obj));
+ set_number_string_cache(*factory->NewFixedArray(
+ kInitialNumberStringCacheSize * 2, TENURED));
// Allocate cache for single character one byte strings.
- { MaybeObject* maybe_obj =
- AllocateFixedArray(String::kMaxOneByteCharCode + 1, TENURED);
- if (!maybe_obj->ToObject(&obj)) return false;
- }
- set_single_character_string_cache(FixedArray::cast(obj));
+ set_single_character_string_cache(*factory->NewFixedArray(
+ String::kMaxOneByteCharCode + 1, TENURED));
- // Allocate cache for string split.
- { MaybeObject* maybe_obj = AllocateFixedArray(
- RegExpResultsCache::kRegExpResultsCacheSize, TENURED);
- if (!maybe_obj->ToObject(&obj)) return false;
- }
- set_string_split_cache(FixedArray::cast(obj));
-
- { MaybeObject* maybe_obj = AllocateFixedArray(
- RegExpResultsCache::kRegExpResultsCacheSize, TENURED);
- if (!maybe_obj->ToObject(&obj)) return false;
- }
- set_regexp_multiple_cache(FixedArray::cast(obj));
+ // Allocate cache for string split and regexp-multiple.
+ set_string_split_cache(*factory->NewFixedArray(
+ RegExpResultsCache::kRegExpResultsCacheSize, TENURED));
+ set_regexp_multiple_cache(*factory->NewFixedArray(
+ RegExpResultsCache::kRegExpResultsCacheSize, TENURED));
// Allocate cache for external strings pointing to native source code.
- { MaybeObject* maybe_obj = AllocateFixedArray(Natives::GetBuiltinsCount());
- if (!maybe_obj->ToObject(&obj)) return false;
- }
- set_natives_source_cache(FixedArray::cast(obj));
+ set_natives_source_cache(*factory->NewFixedArray(
+ Natives::GetBuiltinsCount()));
+
+ set_undefined_cell(*factory->NewCell(factory->undefined_value()));
+
+ // The symbol registry is initialized lazily.
+ set_symbol_registry(undefined_value());
// Allocate object to hold object observation state.
- { MaybeObject* maybe_obj = AllocateMap(JS_OBJECT_TYPE, JSObject::kHeaderSize);
- if (!maybe_obj->ToObject(&obj)) return false;
- }
- { MaybeObject* maybe_obj = AllocateJSObjectFromMap(Map::cast(obj));
- if (!maybe_obj->ToObject(&obj)) return false;
- }
- set_observation_state(JSObject::cast(obj));
+ set_observation_state(*factory->NewJSObjectFromMap(
+ factory->NewMap(JS_OBJECT_TYPE, JSObject::kHeaderSize)));
- { MaybeObject* maybe_obj = AllocateSymbol();
- if (!maybe_obj->ToObject(&obj)) return false;
- }
- Symbol::cast(obj)->set_is_private(true);
- set_frozen_symbol(Symbol::cast(obj));
+ // Microtask queue uses the empty fixed array as a sentinel for "empty".
+ // Number of queued microtasks stored in Isolate::pending_microtask_count().
+ set_microtask_queue(empty_fixed_array());
- { MaybeObject* maybe_obj = AllocateSymbol();
- if (!maybe_obj->ToObject(&obj)) return false;
- }
- Symbol::cast(obj)->set_is_private(true);
- set_elements_transition_symbol(Symbol::cast(obj));
+ set_frozen_symbol(*factory->NewPrivateSymbol());
+ set_nonexistent_symbol(*factory->NewPrivateSymbol());
+ set_elements_transition_symbol(*factory->NewPrivateSymbol());
+ set_uninitialized_symbol(*factory->NewPrivateSymbol());
+ set_megamorphic_symbol(*factory->NewPrivateSymbol());
+ set_observed_symbol(*factory->NewPrivateSymbol());
- { MaybeObject* maybe_obj = SeededNumberDictionary::Allocate(this, 0, TENURED);
- if (!maybe_obj->ToObject(&obj)) return false;
- }
- SeededNumberDictionary::cast(obj)->set_requires_slow_elements();
- set_empty_slow_element_dictionary(SeededNumberDictionary::cast(obj));
+ Handle<SeededNumberDictionary> slow_element_dictionary =
+ SeededNumberDictionary::New(isolate(), 0, TENURED);
+ slow_element_dictionary->set_requires_slow_elements();
+ set_empty_slow_element_dictionary(*slow_element_dictionary);
- { MaybeObject* maybe_obj = AllocateSymbol();
- if (!maybe_obj->ToObject(&obj)) return false;
- }
- Symbol::cast(obj)->set_is_private(true);
- set_observed_symbol(Symbol::cast(obj));
+ set_materialized_objects(*factory->NewFixedArray(0, TENURED));
// Handling of script id generation is in Factory::NewScript.
- set_last_script_id(Smi::FromInt(v8::Script::kNoScriptId));
+ set_last_script_id(Smi::FromInt(v8::UnboundScript::kNoScriptId));
+
+ set_allocation_sites_scratchpad(*factory->NewFixedArray(
+ kAllocationSiteScratchpadSize, TENURED));
+ InitializeAllocationSitesScratchpad();
// Initialize keyed lookup cache.
isolate_->keyed_lookup_cache()->Clear();
@@ -3386,8 +2905,6 @@ bool Heap::CreateInitialObjects() {
// Initialize compilation cache.
isolate_->compilation_cache()->Clear();
-
- return true;
}
@@ -3459,60 +2976,58 @@ Object* RegExpResultsCache::Lookup(Heap* heap,
}
-void RegExpResultsCache::Enter(Heap* heap,
- String* key_string,
- Object* key_pattern,
- FixedArray* value_array,
+void RegExpResultsCache::Enter(Isolate* isolate,
+ Handle<String> key_string,
+ Handle<Object> key_pattern,
+ Handle<FixedArray> value_array,
ResultsCacheType type) {
- FixedArray* cache;
+ Factory* factory = isolate->factory();
+ Handle<FixedArray> cache;
if (!key_string->IsInternalizedString()) return;
if (type == STRING_SPLIT_SUBSTRINGS) {
ASSERT(key_pattern->IsString());
if (!key_pattern->IsInternalizedString()) return;
- cache = heap->string_split_cache();
+ cache = factory->string_split_cache();
} else {
ASSERT(type == REGEXP_MULTIPLE_INDICES);
ASSERT(key_pattern->IsFixedArray());
- cache = heap->regexp_multiple_cache();
+ cache = factory->regexp_multiple_cache();
}
uint32_t hash = key_string->Hash();
uint32_t index = ((hash & (kRegExpResultsCacheSize - 1)) &
~(kArrayEntriesPerCacheEntry - 1));
if (cache->get(index + kStringOffset) == Smi::FromInt(0)) {
- cache->set(index + kStringOffset, key_string);
- cache->set(index + kPatternOffset, key_pattern);
- cache->set(index + kArrayOffset, value_array);
+ cache->set(index + kStringOffset, *key_string);
+ cache->set(index + kPatternOffset, *key_pattern);
+ cache->set(index + kArrayOffset, *value_array);
} else {
uint32_t index2 =
((index + kArrayEntriesPerCacheEntry) & (kRegExpResultsCacheSize - 1));
if (cache->get(index2 + kStringOffset) == Smi::FromInt(0)) {
- cache->set(index2 + kStringOffset, key_string);
- cache->set(index2 + kPatternOffset, key_pattern);
- cache->set(index2 + kArrayOffset, value_array);
+ cache->set(index2 + kStringOffset, *key_string);
+ cache->set(index2 + kPatternOffset, *key_pattern);
+ cache->set(index2 + kArrayOffset, *value_array);
} else {
cache->set(index2 + kStringOffset, Smi::FromInt(0));
cache->set(index2 + kPatternOffset, Smi::FromInt(0));
cache->set(index2 + kArrayOffset, Smi::FromInt(0));
- cache->set(index + kStringOffset, key_string);
- cache->set(index + kPatternOffset, key_pattern);
- cache->set(index + kArrayOffset, value_array);
+ cache->set(index + kStringOffset, *key_string);
+ cache->set(index + kPatternOffset, *key_pattern);
+ cache->set(index + kArrayOffset, *value_array);
}
}
// If the array is a reasonably short list of substrings, convert it into a
// list of internalized strings.
if (type == STRING_SPLIT_SUBSTRINGS && value_array->length() < 100) {
for (int i = 0; i < value_array->length(); i++) {
- String* str = String::cast(value_array->get(i));
- Object* internalized_str;
- MaybeObject* maybe_string = heap->InternalizeString(str);
- if (maybe_string->ToObject(&internalized_str)) {
- value_array->set(i, internalized_str);
- }
+ Handle<String> str(String::cast(value_array->get(i)), isolate);
+ Handle<String> internalized_str = factory->InternalizeString(str);
+ value_array->set(i, *internalized_str);
}
}
// Convert backing store to a copy-on-write array.
- value_array->set_map_no_write_barrier(heap->fixed_cow_array_map());
+ value_array->set_map_no_write_barrier(*factory->fixed_cow_array_map());
}
@@ -3523,18 +3038,11 @@ void RegExpResultsCache::Clear(FixedArray* cache) {
}
-MaybeObject* Heap::AllocateInitialNumberStringCache() {
- MaybeObject* maybe_obj =
- AllocateFixedArray(kInitialNumberStringCacheSize * 2, TENURED);
- return maybe_obj;
-}
-
-
int Heap::FullSizeNumberStringCacheLength() {
// Compute the size of the number string cache based on the max newspace size.
// The number string cache has a minimum size based on twice the initial cache
// size to ensure that it is bigger after being made 'full size'.
- int number_string_cache_size = max_semispace_size_ / 512;
+ int number_string_cache_size = max_semi_space_size_ / 512;
number_string_cache_size = Max(kInitialNumberStringCacheSize * 2,
Min(0x4000, number_string_cache_size));
// There is a string and a number per entry so the length is twice the number
@@ -3543,24 +3051,6 @@ int Heap::FullSizeNumberStringCacheLength() {
}
-void Heap::AllocateFullSizeNumberStringCache() {
- // The idea is to have a small number string cache in the snapshot to keep
- // boot-time memory usage down. If we expand the number string cache already
- // while creating the snapshot then that didn't work out.
- ASSERT(!Serializer::enabled() || FLAG_extra_code != NULL);
- MaybeObject* maybe_obj =
- AllocateFixedArray(FullSizeNumberStringCacheLength(), TENURED);
- Object* new_cache;
- if (maybe_obj->ToObject(&new_cache)) {
- // We don't bother to repopulate the cache with entries from the old cache.
- // It will be repopulated soon enough with new strings.
- set_number_string_cache(FixedArray::cast(new_cache));
- }
- // If allocation fails then we just return without doing anything. It is only
- // a cache, so best effort is OK here.
-}
-
-
void Heap::FlushNumberStringCache() {
// Flush the number to string cache.
int len = number_string_cache()->length();
@@ -3570,151 +3060,117 @@ void Heap::FlushNumberStringCache() {
}
-static inline int double_get_hash(double d) {
- DoubleRepresentation rep(d);
- return static_cast<int>(rep.bits) ^ static_cast<int>(rep.bits >> 32);
+void Heap::FlushAllocationSitesScratchpad() {
+ for (int i = 0; i < allocation_sites_scratchpad_length_; i++) {
+ allocation_sites_scratchpad()->set_undefined(i);
+ }
+ allocation_sites_scratchpad_length_ = 0;
}
-static inline int smi_get_hash(Smi* smi) {
- return smi->value();
+void Heap::InitializeAllocationSitesScratchpad() {
+ ASSERT(allocation_sites_scratchpad()->length() ==
+ kAllocationSiteScratchpadSize);
+ for (int i = 0; i < kAllocationSiteScratchpadSize; i++) {
+ allocation_sites_scratchpad()->set_undefined(i);
+ }
}
-Object* Heap::GetNumberStringCache(Object* number) {
- int hash;
- int mask = (number_string_cache()->length() >> 1) - 1;
- if (number->IsSmi()) {
- hash = smi_get_hash(Smi::cast(number)) & mask;
- } else {
- hash = double_get_hash(number->Number()) & mask;
- }
- Object* key = number_string_cache()->get(hash * 2);
- if (key == number) {
- return String::cast(number_string_cache()->get(hash * 2 + 1));
- } else if (key->IsHeapNumber() &&
- number->IsHeapNumber() &&
- key->Number() == number->Number()) {
- return String::cast(number_string_cache()->get(hash * 2 + 1));
+void Heap::AddAllocationSiteToScratchpad(AllocationSite* site,
+ ScratchpadSlotMode mode) {
+ if (allocation_sites_scratchpad_length_ < kAllocationSiteScratchpadSize) {
+ // We cannot use the normal write-barrier because slots need to be
+ // recorded with non-incremental marking as well. We have to explicitly
+ // record the slot to take evacuation candidates into account.
+ allocation_sites_scratchpad()->set(
+ allocation_sites_scratchpad_length_, site, SKIP_WRITE_BARRIER);
+ Object** slot = allocation_sites_scratchpad()->RawFieldOfElementAt(
+ allocation_sites_scratchpad_length_);
+
+ if (mode == RECORD_SCRATCHPAD_SLOT) {
+ // We need to allow slots buffer overflow here since the evacuation
+ // candidates are not part of the global list of old space pages and
+ // releasing an evacuation candidate due to a slots buffer overflow
+ // results in lost pages.
+ mark_compact_collector()->RecordSlot(
+ slot, slot, *slot, SlotsBuffer::IGNORE_OVERFLOW);
+ }
+ allocation_sites_scratchpad_length_++;
}
- return undefined_value();
}
-void Heap::SetNumberStringCache(Object* number, String* string) {
- int hash;
- int mask = (number_string_cache()->length() >> 1) - 1;
- if (number->IsSmi()) {
- hash = smi_get_hash(Smi::cast(number)) & mask;
- } else {
- hash = double_get_hash(number->Number()) & mask;
- }
- if (number_string_cache()->get(hash * 2) != undefined_value() &&
- number_string_cache()->length() != FullSizeNumberStringCacheLength()) {
- // The first time we have a hash collision, we move to the full sized
- // number string cache.
- AllocateFullSizeNumberStringCache();
- return;
- }
- number_string_cache()->set(hash * 2, number);
- number_string_cache()->set(hash * 2 + 1, string);
+Map* Heap::MapForExternalArrayType(ExternalArrayType array_type) {
+ return Map::cast(roots_[RootIndexForExternalArrayType(array_type)]);
}
-MaybeObject* Heap::NumberToString(Object* number,
- bool check_number_string_cache,
- PretenureFlag pretenure) {
- isolate_->counters()->number_to_string_runtime()->Increment();
- if (check_number_string_cache) {
- Object* cached = GetNumberStringCache(number);
- if (cached != undefined_value()) {
- return cached;
- }
- }
+Heap::RootListIndex Heap::RootIndexForExternalArrayType(
+ ExternalArrayType array_type) {
+ switch (array_type) {
+#define ARRAY_TYPE_TO_ROOT_INDEX(Type, type, TYPE, ctype, size) \
+ case kExternal##Type##Array: \
+ return kExternal##Type##ArrayMapRootIndex;
- char arr[100];
- Vector<char> buffer(arr, ARRAY_SIZE(arr));
- const char* str;
- if (number->IsSmi()) {
- int num = Smi::cast(number)->value();
- str = IntToCString(num, buffer);
- } else {
- double num = HeapNumber::cast(number)->value();
- str = DoubleToCString(num, buffer);
- }
+ TYPED_ARRAYS(ARRAY_TYPE_TO_ROOT_INDEX)
+#undef ARRAY_TYPE_TO_ROOT_INDEX
- Object* js_string;
- MaybeObject* maybe_js_string =
- AllocateStringFromOneByte(CStrVector(str), pretenure);
- if (maybe_js_string->ToObject(&js_string)) {
- SetNumberStringCache(number, String::cast(js_string));
+ default:
+ UNREACHABLE();
+ return kUndefinedValueRootIndex;
}
- return maybe_js_string;
-}
-
-
-MaybeObject* Heap::Uint32ToString(uint32_t value,
- bool check_number_string_cache) {
- Object* number;
- MaybeObject* maybe = NumberFromUint32(value);
- if (!maybe->To<Object>(&number)) return maybe;
- return NumberToString(number, check_number_string_cache);
}
-Map* Heap::MapForExternalArrayType(ExternalArrayType array_type) {
- return Map::cast(roots_[RootIndexForExternalArrayType(array_type)]);
+Map* Heap::MapForFixedTypedArray(ExternalArrayType array_type) {
+ return Map::cast(roots_[RootIndexForFixedTypedArray(array_type)]);
}
-Heap::RootListIndex Heap::RootIndexForExternalArrayType(
+Heap::RootListIndex Heap::RootIndexForFixedTypedArray(
ExternalArrayType array_type) {
switch (array_type) {
- case kExternalByteArray:
- return kExternalByteArrayMapRootIndex;
- case kExternalUnsignedByteArray:
- return kExternalUnsignedByteArrayMapRootIndex;
- case kExternalShortArray:
- return kExternalShortArrayMapRootIndex;
- case kExternalUnsignedShortArray:
- return kExternalUnsignedShortArrayMapRootIndex;
- case kExternalIntArray:
- return kExternalIntArrayMapRootIndex;
- case kExternalUnsignedIntArray:
- return kExternalUnsignedIntArrayMapRootIndex;
- case kExternalFloatArray:
- return kExternalFloatArrayMapRootIndex;
- case kExternalDoubleArray:
- return kExternalDoubleArrayMapRootIndex;
- case kExternalPixelArray:
- return kExternalPixelArrayMapRootIndex;
+#define ARRAY_TYPE_TO_ROOT_INDEX(Type, type, TYPE, ctype, size) \
+ case kExternal##Type##Array: \
+ return kFixed##Type##ArrayMapRootIndex;
+
+ TYPED_ARRAYS(ARRAY_TYPE_TO_ROOT_INDEX)
+#undef ARRAY_TYPE_TO_ROOT_INDEX
+
default:
UNREACHABLE();
return kUndefinedValueRootIndex;
}
}
+
Heap::RootListIndex Heap::RootIndexForEmptyExternalArray(
ElementsKind elementsKind) {
switch (elementsKind) {
- case EXTERNAL_BYTE_ELEMENTS:
- return kEmptyExternalByteArrayRootIndex;
- case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
- return kEmptyExternalUnsignedByteArrayRootIndex;
- case EXTERNAL_SHORT_ELEMENTS:
- return kEmptyExternalShortArrayRootIndex;
- case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
- return kEmptyExternalUnsignedShortArrayRootIndex;
- case EXTERNAL_INT_ELEMENTS:
- return kEmptyExternalIntArrayRootIndex;
- case EXTERNAL_UNSIGNED_INT_ELEMENTS:
- return kEmptyExternalUnsignedIntArrayRootIndex;
- case EXTERNAL_FLOAT_ELEMENTS:
- return kEmptyExternalFloatArrayRootIndex;
- case EXTERNAL_DOUBLE_ELEMENTS:
- return kEmptyExternalDoubleArrayRootIndex;
- case EXTERNAL_PIXEL_ELEMENTS:
- return kEmptyExternalPixelArrayRootIndex;
+#define ELEMENT_KIND_TO_ROOT_INDEX(Type, type, TYPE, ctype, size) \
+ case EXTERNAL_##TYPE##_ELEMENTS: \
+ return kEmptyExternal##Type##ArrayRootIndex;
+
+ TYPED_ARRAYS(ELEMENT_KIND_TO_ROOT_INDEX)
+#undef ELEMENT_KIND_TO_ROOT_INDEX
+
+ default:
+ UNREACHABLE();
+ return kUndefinedValueRootIndex;
+ }
+}
+
+
+Heap::RootListIndex Heap::RootIndexForEmptyFixedTypedArray(
+ ElementsKind elementsKind) {
+ switch (elementsKind) {
+#define ELEMENT_KIND_TO_ROOT_INDEX(Type, type, TYPE, ctype, size) \
+ case TYPE##_ELEMENTS: \
+ return kEmptyFixed##Type##ArrayRootIndex;
+
+ TYPED_ARRAYS(ELEMENT_KIND_TO_ROOT_INDEX)
+#undef ELEMENT_KIND_TO_ROOT_INDEX
default:
UNREACHABLE();
return kUndefinedValueRootIndex;
@@ -3728,619 +3184,256 @@ ExternalArray* Heap::EmptyExternalArrayForMap(Map* map) {
}
-
-
-MaybeObject* Heap::NumberFromDouble(double value, PretenureFlag pretenure) {
- // We need to distinguish the minus zero value and this cannot be
- // done after conversion to int. Doing this by comparing bit
- // patterns is faster than using fpclassify() et al.
- static const DoubleRepresentation minus_zero(-0.0);
-
- DoubleRepresentation rep(value);
- if (rep.bits == minus_zero.bits) {
- return AllocateHeapNumber(-0.0, pretenure);
- }
-
- int int_value = FastD2I(value);
- if (value == int_value && Smi::IsValid(int_value)) {
- return Smi::FromInt(int_value);
- }
-
- // Materialize the value in the heap.
- return AllocateHeapNumber(value, pretenure);
+FixedTypedArrayBase* Heap::EmptyFixedTypedArrayForMap(Map* map) {
+ return FixedTypedArrayBase::cast(
+ roots_[RootIndexForEmptyFixedTypedArray(map->elements_kind())]);
}
-MaybeObject* Heap::AllocateForeign(Address address, PretenureFlag pretenure) {
+AllocationResult Heap::AllocateForeign(Address address,
+ PretenureFlag pretenure) {
// Statically ensure that it is safe to allocate foreigns in paged spaces.
- STATIC_ASSERT(Foreign::kSize <= Page::kMaxNonCodeHeapObjectSize);
+ STATIC_ASSERT(Foreign::kSize <= Page::kMaxRegularHeapObjectSize);
AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
Foreign* result;
- MaybeObject* maybe_result = Allocate(foreign_map(), space);
- if (!maybe_result->To(&result)) return maybe_result;
+ AllocationResult allocation = Allocate(foreign_map(), space);
+ if (!allocation.To(&result)) return allocation;
result->set_foreign_address(address);
return result;
}
-MaybeObject* Heap::AllocateSharedFunctionInfo(Object* name) {
- SharedFunctionInfo* share;
- MaybeObject* maybe = Allocate(shared_function_info_map(), OLD_POINTER_SPACE);
- if (!maybe->To<SharedFunctionInfo>(&share)) return maybe;
-
- // Set pointer fields.
- share->set_name(name);
- Code* illegal = isolate_->builtins()->builtin(Builtins::kIllegal);
- share->set_code(illegal);
- share->set_optimized_code_map(Smi::FromInt(0));
- share->set_scope_info(ScopeInfo::Empty(isolate_));
- Code* construct_stub =
- isolate_->builtins()->builtin(Builtins::kJSConstructStubGeneric);
- share->set_construct_stub(construct_stub);
- share->set_instance_class_name(Object_string());
- share->set_function_data(undefined_value(), SKIP_WRITE_BARRIER);
- share->set_script(undefined_value(), SKIP_WRITE_BARRIER);
- share->set_debug_info(undefined_value(), SKIP_WRITE_BARRIER);
- share->set_inferred_name(empty_string(), SKIP_WRITE_BARRIER);
- share->set_initial_map(undefined_value(), SKIP_WRITE_BARRIER);
- share->set_ast_node_count(0);
- share->set_counters(0);
-
- // Set integer fields (smi or int, depending on the architecture).
- share->set_length(0);
- share->set_formal_parameter_count(0);
- share->set_expected_nof_properties(0);
- share->set_num_literals(0);
- share->set_start_position_and_type(0);
- share->set_end_position(0);
- share->set_function_token_position(0);
- // All compiler hints default to false or 0.
- share->set_compiler_hints(0);
- share->set_opt_count_and_bailout_reason(0);
-
- return share;
-}
-
-
-MaybeObject* Heap::AllocateJSMessageObject(String* type,
- JSArray* arguments,
- int start_position,
- int end_position,
- Object* script,
- Object* stack_trace,
- Object* stack_frames) {
- Object* result;
- { MaybeObject* maybe_result = Allocate(message_object_map(), NEW_SPACE);
- if (!maybe_result->ToObject(&result)) return maybe_result;
- }
- JSMessageObject* message = JSMessageObject::cast(result);
- message->set_properties(Heap::empty_fixed_array(), SKIP_WRITE_BARRIER);
- message->initialize_elements();
- message->set_elements(Heap::empty_fixed_array(), SKIP_WRITE_BARRIER);
- message->set_type(type);
- message->set_arguments(arguments);
- message->set_start_position(start_position);
- message->set_end_position(end_position);
- message->set_script(script);
- message->set_stack_trace(stack_trace);
- message->set_stack_frames(stack_frames);
- return result;
-}
-
-
+AllocationResult Heap::AllocateByteArray(int length, PretenureFlag pretenure) {
+ if (length < 0 || length > ByteArray::kMaxLength) {
+ v8::internal::Heap::FatalProcessOutOfMemory("invalid array length", true);
+ }
+ int size = ByteArray::SizeFor(length);
+ AllocationSpace space = SelectSpace(size, OLD_DATA_SPACE, pretenure);
+ HeapObject* result;
+ { AllocationResult allocation = AllocateRaw(size, space, OLD_DATA_SPACE);
+ if (!allocation.To(&result)) return allocation;
+ }
-// Returns true for a character in a range. Both limits are inclusive.
-static inline bool Between(uint32_t character, uint32_t from, uint32_t to) {
- // This makes uses of the the unsigned wraparound.
- return character - from <= to - from;
+ result->set_map_no_write_barrier(byte_array_map());
+ ByteArray::cast(result)->set_length(length);
+ return result;
}
-MUST_USE_RESULT static inline MaybeObject* MakeOrFindTwoCharacterString(
- Heap* heap,
- uint16_t c1,
- uint16_t c2) {
- String* result;
- // Numeric strings have a different hash algorithm not known by
- // LookupTwoCharsStringIfExists, so we skip this step for such strings.
- if ((!Between(c1, '0', '9') || !Between(c2, '0', '9')) &&
- heap->string_table()->LookupTwoCharsStringIfExists(c1, c2, &result)) {
- return result;
- // Now we know the length is 2, we might as well make use of that fact
- // when building the new string.
- } else if (static_cast<unsigned>(c1 | c2) <= String::kMaxOneByteCharCodeU) {
- // We can do this.
- ASSERT(IsPowerOf2(String::kMaxOneByteCharCodeU + 1)); // because of this.
- Object* result;
- { MaybeObject* maybe_result = heap->AllocateRawOneByteString(2);
- if (!maybe_result->ToObject(&result)) return maybe_result;
- }
- uint8_t* dest = SeqOneByteString::cast(result)->GetChars();
- dest[0] = static_cast<uint8_t>(c1);
- dest[1] = static_cast<uint8_t>(c2);
- return result;
+void Heap::CreateFillerObjectAt(Address addr, int size) {
+ if (size == 0) return;
+ HeapObject* filler = HeapObject::FromAddress(addr);
+ if (size == kPointerSize) {
+ filler->set_map_no_write_barrier(one_pointer_filler_map());
+ } else if (size == 2 * kPointerSize) {
+ filler->set_map_no_write_barrier(two_pointer_filler_map());
} else {
- Object* result;
- { MaybeObject* maybe_result = heap->AllocateRawTwoByteString(2);
- if (!maybe_result->ToObject(&result)) return maybe_result;
- }
- uc16* dest = SeqTwoByteString::cast(result)->GetChars();
- dest[0] = c1;
- dest[1] = c2;
- return result;
+ filler->set_map_no_write_barrier(free_space_map());
+ FreeSpace::cast(filler)->set_size(size);
}
}
-MaybeObject* Heap::AllocateConsString(String* first, String* second) {
- int first_length = first->length();
- if (first_length == 0) {
- return second;
- }
-
- int second_length = second->length();
- if (second_length == 0) {
- return first;
- }
+bool Heap::CanMoveObjectStart(HeapObject* object) {
+ Address address = object->address();
+ bool is_in_old_pointer_space = InOldPointerSpace(address);
+ bool is_in_old_data_space = InOldDataSpace(address);
- int length = first_length + second_length;
+ if (lo_space()->Contains(object)) return false;
- // Optimization for 2-byte strings often used as keys in a decompression
- // dictionary. Check whether we already have the string in the string
- // table to prevent creation of many unneccesary strings.
- if (length == 2) {
- uint16_t c1 = first->Get(0);
- uint16_t c2 = second->Get(0);
- return MakeOrFindTwoCharacterString(this, c1, c2);
- }
-
- bool first_is_one_byte = first->IsOneByteRepresentation();
- bool second_is_one_byte = second->IsOneByteRepresentation();
- bool is_one_byte = first_is_one_byte && second_is_one_byte;
- // Make sure that an out of memory exception is thrown if the length
- // of the new cons string is too large.
- if (length > String::kMaxLength || length < 0) {
- isolate()->context()->mark_out_of_memory();
- return Failure::OutOfMemoryException(0x4);
- }
-
- bool is_one_byte_data_in_two_byte_string = false;
- if (!is_one_byte) {
- // At least one of the strings uses two-byte representation so we
- // can't use the fast case code for short ASCII strings below, but
- // we can try to save memory if all chars actually fit in ASCII.
- is_one_byte_data_in_two_byte_string =
- first->HasOnlyOneByteChars() && second->HasOnlyOneByteChars();
- if (is_one_byte_data_in_two_byte_string) {
- isolate_->counters()->string_add_runtime_ext_to_ascii()->Increment();
- }
- }
-
- // If the resulting string is small make a flat string.
- if (length < ConsString::kMinLength) {
- // Note that neither of the two inputs can be a slice because:
- STATIC_ASSERT(ConsString::kMinLength <= SlicedString::kMinLength);
- ASSERT(first->IsFlat());
- ASSERT(second->IsFlat());
- if (is_one_byte) {
- Object* result;
- { MaybeObject* maybe_result = AllocateRawOneByteString(length);
- if (!maybe_result->ToObject(&result)) return maybe_result;
- }
- // Copy the characters into the new object.
- uint8_t* dest = SeqOneByteString::cast(result)->GetChars();
- // Copy first part.
- const uint8_t* src;
- if (first->IsExternalString()) {
- src = ExternalAsciiString::cast(first)->GetChars();
- } else {
- src = SeqOneByteString::cast(first)->GetChars();
- }
- for (int i = 0; i < first_length; i++) *dest++ = src[i];
- // Copy second part.
- if (second->IsExternalString()) {
- src = ExternalAsciiString::cast(second)->GetChars();
- } else {
- src = SeqOneByteString::cast(second)->GetChars();
- }
- for (int i = 0; i < second_length; i++) *dest++ = src[i];
- return result;
- } else {
- if (is_one_byte_data_in_two_byte_string) {
- Object* result;
- { MaybeObject* maybe_result = AllocateRawOneByteString(length);
- if (!maybe_result->ToObject(&result)) return maybe_result;
- }
- // Copy the characters into the new object.
- uint8_t* dest = SeqOneByteString::cast(result)->GetChars();
- String::WriteToFlat(first, dest, 0, first_length);
- String::WriteToFlat(second, dest + first_length, 0, second_length);
- isolate_->counters()->string_add_runtime_ext_to_ascii()->Increment();
- return result;
- }
-
- Object* result;
- { MaybeObject* maybe_result = AllocateRawTwoByteString(length);
- if (!maybe_result->ToObject(&result)) return maybe_result;
- }
- // Copy the characters into the new object.
- uc16* dest = SeqTwoByteString::cast(result)->GetChars();
- String::WriteToFlat(first, dest, 0, first_length);
- String::WriteToFlat(second, dest + first_length, 0, second_length);
- return result;
- }
- }
-
- Map* map = (is_one_byte || is_one_byte_data_in_two_byte_string) ?
- cons_ascii_string_map() : cons_string_map();
-
- Object* result;
- { MaybeObject* maybe_result = Allocate(map, NEW_SPACE);
- if (!maybe_result->ToObject(&result)) return maybe_result;
- }
-
- DisallowHeapAllocation no_gc;
- ConsString* cons_string = ConsString::cast(result);
- WriteBarrierMode mode = cons_string->GetWriteBarrierMode(no_gc);
- cons_string->set_length(length);
- cons_string->set_hash_field(String::kEmptyHashField);
- cons_string->set_first(first, mode);
- cons_string->set_second(second, mode);
- return result;
+ Page* page = Page::FromAddress(address);
+ // We can move the object start if:
+ // (1) the object is not in old pointer or old data space,
+ // (2) the page of the object was already swept,
+ // (3) the page was already concurrently swept. This case is an optimization
+ // for concurrent sweeping. The WasSwept predicate for concurrently swept
+ // pages is set after sweeping all pages.
+ return (!is_in_old_pointer_space && !is_in_old_data_space) ||
+ page->WasSwept() ||
+ (mark_compact_collector()->AreSweeperThreadsActivated() &&
+ page->parallel_sweeping() <=
+ MemoryChunk::PARALLEL_SWEEPING_FINALIZE);
}
-MaybeObject* Heap::AllocateSubString(String* buffer,
- int start,
- int end,
- PretenureFlag pretenure) {
- int length = end - start;
- if (length <= 0) {
- return empty_string();
- }
-
- // Make an attempt to flatten the buffer to reduce access time.
- buffer = buffer->TryFlattenGetString();
-
- if (length == 1) {
- return LookupSingleCharacterStringFromCode(buffer->Get(start));
- } else if (length == 2) {
- // Optimization for 2-byte strings often used as keys in a decompression
- // dictionary. Check whether we already have the string in the string
- // table to prevent creation of many unnecessary strings.
- uint16_t c1 = buffer->Get(start);
- uint16_t c2 = buffer->Get(start + 1);
- return MakeOrFindTwoCharacterString(this, c1, c2);
- }
-
- if (!FLAG_string_slices ||
- !buffer->IsFlat() ||
- length < SlicedString::kMinLength ||
- pretenure == TENURED) {
- Object* result;
- // WriteToFlat takes care of the case when an indirect string has a
- // different encoding from its underlying string. These encodings may
- // differ because of externalization.
- bool is_one_byte = buffer->IsOneByteRepresentation();
- { MaybeObject* maybe_result = is_one_byte
- ? AllocateRawOneByteString(length, pretenure)
- : AllocateRawTwoByteString(length, pretenure);
- if (!maybe_result->ToObject(&result)) return maybe_result;
- }
- String* string_result = String::cast(result);
- // Copy the characters into the new object.
- if (is_one_byte) {
- ASSERT(string_result->IsOneByteRepresentation());
- uint8_t* dest = SeqOneByteString::cast(string_result)->GetChars();
- String::WriteToFlat(buffer, dest, start, end);
+void Heap::AdjustLiveBytes(Address address, int by, InvocationMode mode) {
+ if (incremental_marking()->IsMarking() &&
+ Marking::IsBlack(Marking::MarkBitFrom(address))) {
+ if (mode == FROM_GC) {
+ MemoryChunk::IncrementLiveBytesFromGC(address, by);
} else {
- ASSERT(string_result->IsTwoByteRepresentation());
- uc16* dest = SeqTwoByteString::cast(string_result)->GetChars();
- String::WriteToFlat(buffer, dest, start, end);
+ MemoryChunk::IncrementLiveBytesFromMutator(address, by);
}
- return result;
- }
-
- ASSERT(buffer->IsFlat());
-#if VERIFY_HEAP
- if (FLAG_verify_heap) {
- buffer->StringVerify();
- }
-#endif
-
- Object* result;
- // When slicing an indirect string we use its encoding for a newly created
- // slice and don't check the encoding of the underlying string. This is safe
- // even if the encodings are different because of externalization. If an
- // indirect ASCII string is pointing to a two-byte string, the two-byte char
- // codes of the underlying string must still fit into ASCII (because
- // externalization must not change char codes).
- { Map* map = buffer->IsOneByteRepresentation()
- ? sliced_ascii_string_map()
- : sliced_string_map();
- MaybeObject* maybe_result = Allocate(map, NEW_SPACE);
- if (!maybe_result->ToObject(&result)) return maybe_result;
- }
-
- DisallowHeapAllocation no_gc;
- SlicedString* sliced_string = SlicedString::cast(result);
- sliced_string->set_length(length);
- sliced_string->set_hash_field(String::kEmptyHashField);
- if (buffer->IsConsString()) {
- ConsString* cons = ConsString::cast(buffer);
- ASSERT(cons->second()->length() == 0);
- sliced_string->set_parent(cons->first());
- sliced_string->set_offset(start);
- } else if (buffer->IsSlicedString()) {
- // Prevent nesting sliced strings.
- SlicedString* parent_slice = SlicedString::cast(buffer);
- sliced_string->set_parent(parent_slice->parent());
- sliced_string->set_offset(start + parent_slice->offset());
- } else {
- sliced_string->set_parent(buffer);
- sliced_string->set_offset(start);
}
- ASSERT(sliced_string->parent()->IsSeqString() ||
- sliced_string->parent()->IsExternalString());
- return result;
}
-MaybeObject* Heap::AllocateExternalStringFromAscii(
- const ExternalAsciiString::Resource* resource) {
- size_t length = resource->length();
- if (length > static_cast<size_t>(String::kMaxLength)) {
- isolate()->context()->mark_out_of_memory();
- return Failure::OutOfMemoryException(0x5);
- }
-
- Map* map = external_ascii_string_map();
- Object* result;
- { MaybeObject* maybe_result = Allocate(map, NEW_SPACE);
- if (!maybe_result->ToObject(&result)) return maybe_result;
- }
-
- ExternalAsciiString* external_string = ExternalAsciiString::cast(result);
- external_string->set_length(static_cast<int>(length));
- external_string->set_hash_field(String::kEmptyHashField);
- external_string->set_resource(resource);
-
- return result;
-}
-
-
-MaybeObject* Heap::AllocateExternalStringFromTwoByte(
- const ExternalTwoByteString::Resource* resource) {
- size_t length = resource->length();
- if (length > static_cast<size_t>(String::kMaxLength)) {
- isolate()->context()->mark_out_of_memory();
- return Failure::OutOfMemoryException(0x6);
- }
-
- // For small strings we check whether the resource contains only
- // one byte characters. If yes, we use a different string map.
- static const size_t kOneByteCheckLengthLimit = 32;
- bool is_one_byte = length <= kOneByteCheckLengthLimit &&
- String::IsOneByte(resource->data(), static_cast<int>(length));
- Map* map = is_one_byte ?
- external_string_with_one_byte_data_map() : external_string_map();
- Object* result;
- { MaybeObject* maybe_result = Allocate(map, NEW_SPACE);
- if (!maybe_result->ToObject(&result)) return maybe_result;
+AllocationResult Heap::AllocateExternalArray(int length,
+ ExternalArrayType array_type,
+ void* external_pointer,
+ PretenureFlag pretenure) {
+ int size = ExternalArray::kAlignedSize;
+ AllocationSpace space = SelectSpace(size, OLD_DATA_SPACE, pretenure);
+ HeapObject* result;
+ { AllocationResult allocation = AllocateRaw(size, space, OLD_DATA_SPACE);
+ if (!allocation.To(&result)) return allocation;
}
- ExternalTwoByteString* external_string = ExternalTwoByteString::cast(result);
- external_string->set_length(static_cast<int>(length));
- external_string->set_hash_field(String::kEmptyHashField);
- external_string->set_resource(resource);
-
+ result->set_map_no_write_barrier(
+ MapForExternalArrayType(array_type));
+ ExternalArray::cast(result)->set_length(length);
+ ExternalArray::cast(result)->set_external_pointer(external_pointer);
return result;
}
+static void ForFixedTypedArray(ExternalArrayType array_type,
+ int* element_size,
+ ElementsKind* element_kind) {
+ switch (array_type) {
+#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype, size) \
+ case kExternal##Type##Array: \
+ *element_size = size; \
+ *element_kind = TYPE##_ELEMENTS; \
+ return;
-MaybeObject* Heap::LookupSingleCharacterStringFromCode(uint16_t code) {
- if (code <= String::kMaxOneByteCharCode) {
- Object* value = single_character_string_cache()->get(code);
- if (value != undefined_value()) return value;
-
- uint8_t buffer[1];
- buffer[0] = static_cast<uint8_t>(code);
- Object* result;
- MaybeObject* maybe_result =
- InternalizeOneByteString(Vector<const uint8_t>(buffer, 1));
-
- if (!maybe_result->ToObject(&result)) return maybe_result;
- single_character_string_cache()->set(code, result);
- return result;
- }
+ TYPED_ARRAYS(TYPED_ARRAY_CASE)
+#undef TYPED_ARRAY_CASE
- SeqTwoByteString* result;
- { MaybeObject* maybe_result = AllocateRawTwoByteString(1);
- if (!maybe_result->To<SeqTwoByteString>(&result)) return maybe_result;
+ default:
+ *element_size = 0; // Bogus
+ *element_kind = UINT8_ELEMENTS; // Bogus
+ UNREACHABLE();
}
- result->SeqTwoByteStringSet(0, code);
- return result;
}
-MaybeObject* Heap::AllocateByteArray(int length, PretenureFlag pretenure) {
- if (length < 0 || length > ByteArray::kMaxLength) {
- return Failure::OutOfMemoryException(0x7);
+AllocationResult Heap::AllocateFixedTypedArray(int length,
+ ExternalArrayType array_type,
+ PretenureFlag pretenure) {
+ int element_size;
+ ElementsKind elements_kind;
+ ForFixedTypedArray(array_type, &element_size, &elements_kind);
+ int size = OBJECT_POINTER_ALIGN(
+ length * element_size + FixedTypedArrayBase::kDataOffset);
+#ifndef V8_HOST_ARCH_64_BIT
+ if (array_type == kExternalFloat64Array) {
+ size += kPointerSize;
}
- int size = ByteArray::SizeFor(length);
+#endif
AllocationSpace space = SelectSpace(size, OLD_DATA_SPACE, pretenure);
- Object* result;
- { MaybeObject* maybe_result = AllocateRaw(size, space, OLD_DATA_SPACE);
- if (!maybe_result->ToObject(&result)) return maybe_result;
- }
-
- reinterpret_cast<ByteArray*>(result)->set_map_no_write_barrier(
- byte_array_map());
- reinterpret_cast<ByteArray*>(result)->set_length(length);
- return result;
-}
-
-
-void Heap::CreateFillerObjectAt(Address addr, int size) {
- if (size == 0) return;
- HeapObject* filler = HeapObject::FromAddress(addr);
- if (size == kPointerSize) {
- filler->set_map_no_write_barrier(one_pointer_filler_map());
- } else if (size == 2 * kPointerSize) {
- filler->set_map_no_write_barrier(two_pointer_filler_map());
- } else {
- filler->set_map_no_write_barrier(free_space_map());
- FreeSpace::cast(filler)->set_size(size);
- }
-}
+ HeapObject* object;
+ AllocationResult allocation = AllocateRaw(size, space, OLD_DATA_SPACE);
+ if (!allocation.To(&object)) return allocation;
-MaybeObject* Heap::AllocateExternalArray(int length,
- ExternalArrayType array_type,
- void* external_pointer,
- PretenureFlag pretenure) {
- int size = ExternalArray::kAlignedSize;
- AllocationSpace space = SelectSpace(size, OLD_DATA_SPACE, pretenure);
- Object* result;
- { MaybeObject* maybe_result = AllocateRaw(size, space, OLD_DATA_SPACE);
- if (!maybe_result->ToObject(&result)) return maybe_result;
+ if (array_type == kExternalFloat64Array) {
+ object = EnsureDoubleAligned(this, object, size);
}
- reinterpret_cast<ExternalArray*>(result)->set_map_no_write_barrier(
- MapForExternalArrayType(array_type));
- reinterpret_cast<ExternalArray*>(result)->set_length(length);
- reinterpret_cast<ExternalArray*>(result)->set_external_pointer(
- external_pointer);
-
- return result;
+ object->set_map(MapForFixedTypedArray(array_type));
+ FixedTypedArrayBase* elements = FixedTypedArrayBase::cast(object);
+ elements->set_length(length);
+ memset(elements->DataPtr(), 0, elements->DataSize());
+ return elements;
}
-MaybeObject* Heap::CreateCode(const CodeDesc& desc,
- Code::Flags flags,
- Handle<Object> self_reference,
- bool immovable,
- bool crankshafted,
- int prologue_offset) {
- // Allocate ByteArray before the Code object, so that we do not risk
- // leaving uninitialized Code object (and breaking the heap).
- ByteArray* reloc_info;
- MaybeObject* maybe_reloc_info = AllocateByteArray(desc.reloc_size, TENURED);
- if (!maybe_reloc_info->To(&reloc_info)) return maybe_reloc_info;
-
- // Compute size.
- int body_size = RoundUp(desc.instr_size, kObjectAlignment);
- int obj_size = Code::SizeFor(body_size);
- ASSERT(IsAligned(static_cast<intptr_t>(obj_size), kCodeAlignment));
- MaybeObject* maybe_result;
+AllocationResult Heap::AllocateCode(int object_size,
+ bool immovable) {
+ ASSERT(IsAligned(static_cast<intptr_t>(object_size), kCodeAlignment));
+ AllocationResult allocation;
// Large code objects and code objects which should stay at a fixed address
// are allocated in large object space.
HeapObject* result;
- bool force_lo_space = obj_size > code_space()->AreaSize();
+ bool force_lo_space = object_size > code_space()->AreaSize();
if (force_lo_space) {
- maybe_result = lo_space_->AllocateRaw(obj_size, EXECUTABLE);
+ allocation = lo_space_->AllocateRaw(object_size, EXECUTABLE);
} else {
- maybe_result = AllocateRaw(obj_size, CODE_SPACE, CODE_SPACE);
+ allocation = AllocateRaw(object_size, CODE_SPACE, CODE_SPACE);
}
- if (!maybe_result->To<HeapObject>(&result)) return maybe_result;
+ if (!allocation.To(&result)) return allocation;
if (immovable && !force_lo_space &&
- // Objects on the first page of each space are never moved.
- !code_space_->FirstPage()->Contains(result->address())) {
+ // Objects on the first page of each space are never moved.
+ !code_space_->FirstPage()->Contains(result->address())) {
// Discard the first code allocation, which was on a page where it could be
// moved.
- CreateFillerObjectAt(result->address(), obj_size);
- maybe_result = lo_space_->AllocateRaw(obj_size, EXECUTABLE);
- if (!maybe_result->To<HeapObject>(&result)) return maybe_result;
+ CreateFillerObjectAt(result->address(), object_size);
+ allocation = lo_space_->AllocateRaw(object_size, EXECUTABLE);
+ if (!allocation.To(&result)) return allocation;
}
- // Initialize the object
result->set_map_no_write_barrier(code_map());
Code* code = Code::cast(result);
- ASSERT(!isolate_->code_range()->exists() ||
- isolate_->code_range()->contains(code->address()));
- code->set_instruction_size(desc.instr_size);
- code->set_relocation_info(reloc_info);
- code->set_flags(flags);
- code->set_raw_kind_specific_flags1(0);
- code->set_raw_kind_specific_flags2(0);
- if (code->is_call_stub() || code->is_keyed_call_stub()) {
- code->set_check_type(RECEIVER_MAP_CHECK);
- }
- code->set_is_crankshafted(crankshafted);
- code->set_deoptimization_data(empty_fixed_array(), SKIP_WRITE_BARRIER);
- code->set_raw_type_feedback_info(undefined_value());
- code->set_handler_table(empty_fixed_array(), SKIP_WRITE_BARRIER);
+ ASSERT(isolate_->code_range() == NULL ||
+ !isolate_->code_range()->valid() ||
+ isolate_->code_range()->contains(code->address()));
code->set_gc_metadata(Smi::FromInt(0));
code->set_ic_age(global_ic_age_);
- code->set_prologue_offset(prologue_offset);
- if (code->kind() == Code::OPTIMIZED_FUNCTION) {
- code->set_marked_for_deoptimization(false);
- }
-
-#ifdef ENABLE_DEBUGGER_SUPPORT
- if (code->kind() == Code::FUNCTION) {
- code->set_has_debug_break_slots(
- isolate_->debugger()->IsDebuggerActive());
- }
-#endif
-
- // Allow self references to created code object by patching the handle to
- // point to the newly allocated Code object.
- if (!self_reference.is_null()) {
- *(self_reference.location()) = code;
- }
- // Migrate generated code.
- // The generated code can contain Object** values (typically from handles)
- // that are dereferenced during the copy to point directly to the actual heap
- // objects. These pointers can include references to the code object itself,
- // through the self_reference parameter.
- code->CopyFrom(desc);
-
-#ifdef VERIFY_HEAP
- if (FLAG_verify_heap) {
- code->Verify();
- }
-#endif
return code;
}
-MaybeObject* Heap::CopyCode(Code* code) {
+AllocationResult Heap::CopyCode(Code* code) {
+ AllocationResult allocation;
+ HeapObject* new_constant_pool;
+ if (FLAG_enable_ool_constant_pool &&
+ code->constant_pool() != empty_constant_pool_array()) {
+ // Copy the constant pool, since edits to the copied code may modify
+ // the constant pool.
+ allocation = CopyConstantPoolArray(code->constant_pool());
+ if (!allocation.To(&new_constant_pool)) return allocation;
+ } else {
+ new_constant_pool = empty_constant_pool_array();
+ }
+
// Allocate an object the same size as the code object.
int obj_size = code->Size();
- MaybeObject* maybe_result;
if (obj_size > code_space()->AreaSize()) {
- maybe_result = lo_space_->AllocateRaw(obj_size, EXECUTABLE);
+ allocation = lo_space_->AllocateRaw(obj_size, EXECUTABLE);
} else {
- maybe_result = AllocateRaw(obj_size, CODE_SPACE, CODE_SPACE);
+ allocation = AllocateRaw(obj_size, CODE_SPACE, CODE_SPACE);
}
- Object* result;
- if (!maybe_result->ToObject(&result)) return maybe_result;
+ HeapObject* result;
+ if (!allocation.To(&result)) return allocation;
// Copy code object.
Address old_addr = code->address();
- Address new_addr = reinterpret_cast<HeapObject*>(result)->address();
+ Address new_addr = result->address();
CopyBlock(new_addr, old_addr, obj_size);
- // Relocate the copy.
Code* new_code = Code::cast(result);
- ASSERT(!isolate_->code_range()->exists() ||
- isolate_->code_range()->contains(code->address()));
+
+ // Update the constant pool.
+ new_code->set_constant_pool(new_constant_pool);
+
+ // Relocate the copy.
+ ASSERT(isolate_->code_range() == NULL ||
+ !isolate_->code_range()->valid() ||
+ isolate_->code_range()->contains(code->address()));
new_code->Relocate(new_addr - old_addr);
return new_code;
}
-MaybeObject* Heap::CopyCode(Code* code, Vector<byte> reloc_info) {
- // Allocate ByteArray before the Code object, so that we do not risk
- // leaving uninitialized Code object (and breaking the heap).
- Object* reloc_info_array;
- { MaybeObject* maybe_reloc_info_array =
+AllocationResult Heap::CopyCode(Code* code, Vector<byte> reloc_info) {
+ // Allocate ByteArray and ConstantPoolArray before the Code object, so that we
+ // do not risk leaving uninitialized Code object (and breaking the heap).
+ ByteArray* reloc_info_array;
+ { AllocationResult allocation =
AllocateByteArray(reloc_info.length(), TENURED);
- if (!maybe_reloc_info_array->ToObject(&reloc_info_array)) {
- return maybe_reloc_info_array;
- }
+ if (!allocation.To(&reloc_info_array)) return allocation;
+ }
+ HeapObject* new_constant_pool;
+ if (FLAG_enable_ool_constant_pool &&
+ code->constant_pool() != empty_constant_pool_array()) {
+ // Copy the constant pool, since edits to the copied code may modify
+ // the constant pool.
+ AllocationResult allocation =
+ CopyConstantPoolArray(code->constant_pool());
+ if (!allocation.To(&new_constant_pool)) return allocation;
+ } else {
+ new_constant_pool = empty_constant_pool_array();
}
int new_body_size = RoundUp(code->instruction_size(), kObjectAlignment);
@@ -4352,24 +3445,27 @@ MaybeObject* Heap::CopyCode(Code* code, Vector<byte> reloc_info) {
size_t relocation_offset =
static_cast<size_t>(code->instruction_end() - old_addr);
- MaybeObject* maybe_result;
+ AllocationResult allocation;
if (new_obj_size > code_space()->AreaSize()) {
- maybe_result = lo_space_->AllocateRaw(new_obj_size, EXECUTABLE);
+ allocation = lo_space_->AllocateRaw(new_obj_size, EXECUTABLE);
} else {
- maybe_result = AllocateRaw(new_obj_size, CODE_SPACE, CODE_SPACE);
+ allocation = AllocateRaw(new_obj_size, CODE_SPACE, CODE_SPACE);
}
- Object* result;
- if (!maybe_result->ToObject(&result)) return maybe_result;
+ HeapObject* result;
+ if (!allocation.To(&result)) return allocation;
// Copy code object.
- Address new_addr = reinterpret_cast<HeapObject*>(result)->address();
+ Address new_addr = result->address();
// Copy header and instructions.
CopyBytes(new_addr, old_addr, relocation_offset);
Code* new_code = Code::cast(result);
- new_code->set_relocation_info(ByteArray::cast(reloc_info_array));
+ new_code->set_relocation_info(reloc_info_array);
+
+ // Update constant pool.
+ new_code->set_constant_pool(new_constant_pool);
// Copy patched rinfo.
CopyBytes(new_code->relocation_start(),
@@ -4377,14 +3473,13 @@ MaybeObject* Heap::CopyCode(Code* code, Vector<byte> reloc_info) {
static_cast<size_t>(reloc_info.length()));
// Relocate the copy.
- ASSERT(!isolate_->code_range()->exists() ||
- isolate_->code_range()->contains(code->address()));
+ ASSERT(isolate_->code_range() == NULL ||
+ !isolate_->code_range()->valid() ||
+ isolate_->code_range()->contains(code->address()));
new_code->Relocate(new_addr - old_addr);
#ifdef VERIFY_HEAP
- if (FLAG_verify_heap) {
- code->Verify();
- }
+ if (FLAG_verify_heap) code->ObjectVerify();
#endif
return new_code;
}
@@ -4401,28 +3496,8 @@ void Heap::InitializeAllocationMemento(AllocationMemento* memento,
}
-MaybeObject* Heap::AllocateWithAllocationSite(Map* map, AllocationSpace space,
- Handle<AllocationSite> allocation_site) {
- ASSERT(gc_state_ == NOT_IN_GC);
- ASSERT(map->instance_type() != MAP_TYPE);
- // If allocation failures are disallowed, we may allocate in a different
- // space when new space is full and the object is not a large object.
- AllocationSpace retry_space =
- (space != NEW_SPACE) ? space : TargetSpaceId(map->instance_type());
- int size = map->instance_size() + AllocationMemento::kSize;
- Object* result;
- MaybeObject* maybe_result = AllocateRaw(size, space, retry_space);
- if (!maybe_result->ToObject(&result)) return maybe_result;
- // No need for write barrier since object is white and map is in old space.
- HeapObject::cast(result)->set_map_no_write_barrier(map);
- AllocationMemento* alloc_memento = reinterpret_cast<AllocationMemento*>(
- reinterpret_cast<Address>(result) + map->instance_size());
- InitializeAllocationMemento(alloc_memento, *allocation_site);
- return result;
-}
-
-
-MaybeObject* Heap::Allocate(Map* map, AllocationSpace space) {
+AllocationResult Heap::Allocate(Map* map, AllocationSpace space,
+ AllocationSite* allocation_site) {
ASSERT(gc_state_ == NOT_IN_GC);
ASSERT(map->instance_type() != MAP_TYPE);
// If allocation failures are disallowed, we may allocate in a different
@@ -4430,62 +3505,39 @@ MaybeObject* Heap::Allocate(Map* map, AllocationSpace space) {
AllocationSpace retry_space =
(space != NEW_SPACE) ? space : TargetSpaceId(map->instance_type());
int size = map->instance_size();
- Object* result;
- MaybeObject* maybe_result = AllocateRaw(size, space, retry_space);
- if (!maybe_result->ToObject(&result)) return maybe_result;
+ if (allocation_site != NULL) {
+ size += AllocationMemento::kSize;
+ }
+ HeapObject* result;
+ AllocationResult allocation = AllocateRaw(size, space, retry_space);
+ if (!allocation.To(&result)) return allocation;
// No need for write barrier since object is white and map is in old space.
- HeapObject::cast(result)->set_map_no_write_barrier(map);
- return result;
-}
-
-
-void Heap::InitializeFunction(JSFunction* function,
- SharedFunctionInfo* shared,
- Object* prototype) {
- ASSERT(!prototype->IsMap());
- function->initialize_properties();
- function->initialize_elements();
- function->set_shared(shared);
- function->set_code(shared->code());
- function->set_prototype_or_initial_map(prototype);
- function->set_context(undefined_value());
- function->set_literals_or_bindings(empty_fixed_array());
- function->set_next_function_link(undefined_value());
-}
-
-
-MaybeObject* Heap::AllocateFunction(Map* function_map,
- SharedFunctionInfo* shared,
- Object* prototype,
- PretenureFlag pretenure) {
- AllocationSpace space =
- (pretenure == TENURED) ? OLD_POINTER_SPACE : NEW_SPACE;
- Object* result;
- { MaybeObject* maybe_result = Allocate(function_map, space);
- if (!maybe_result->ToObject(&result)) return maybe_result;
+ result->set_map_no_write_barrier(map);
+ if (allocation_site != NULL) {
+ AllocationMemento* alloc_memento = reinterpret_cast<AllocationMemento*>(
+ reinterpret_cast<Address>(result) + map->instance_size());
+ InitializeAllocationMemento(alloc_memento, allocation_site);
}
- InitializeFunction(JSFunction::cast(result), shared, prototype);
return result;
}
-MaybeObject* Heap::AllocateArgumentsObject(Object* callee, int length) {
+AllocationResult Heap::AllocateArgumentsObject(Object* callee, int length) {
// To get fast allocation and map sharing for arguments objects we
// allocate them based on an arguments boilerplate.
JSObject* boilerplate;
int arguments_object_size;
bool strict_mode_callee = callee->IsJSFunction() &&
- !JSFunction::cast(callee)->shared()->is_classic_mode();
+ JSFunction::cast(callee)->shared()->strict_mode() == STRICT;
if (strict_mode_callee) {
boilerplate =
- isolate()->context()->native_context()->
- strict_mode_arguments_boilerplate();
- arguments_object_size = kArgumentsObjectSizeStrict;
+ isolate()->context()->native_context()->strict_arguments_boilerplate();
+ arguments_object_size = kStrictArgumentsObjectSize;
} else {
boilerplate =
- isolate()->context()->native_context()->arguments_boilerplate();
- arguments_object_size = kArgumentsObjectSize;
+ isolate()->context()->native_context()->sloppy_arguments_boilerplate();
+ arguments_object_size = kSloppyArgumentsObjectSize;
}
// Check that the size of the boilerplate matches our
@@ -4494,34 +3546,31 @@ MaybeObject* Heap::AllocateArgumentsObject(Object* callee, int length) {
ASSERT(arguments_object_size == boilerplate->map()->instance_size());
// Do the allocation.
- Object* result;
- { MaybeObject* maybe_result =
+ HeapObject* result;
+ { AllocationResult allocation =
AllocateRaw(arguments_object_size, NEW_SPACE, OLD_POINTER_SPACE);
- if (!maybe_result->ToObject(&result)) return maybe_result;
+ if (!allocation.To(&result)) return allocation;
}
// Copy the content. The arguments boilerplate doesn't have any
// fields that point to new space so it's safe to skip the write
// barrier here.
- CopyBlock(HeapObject::cast(result)->address(),
- boilerplate->address(),
- JSObject::kHeaderSize);
+ CopyBlock(result->address(), boilerplate->address(), JSObject::kHeaderSize);
// Set the length property.
- JSObject::cast(result)->InObjectPropertyAtPut(kArgumentsLengthIndex,
- Smi::FromInt(length),
- SKIP_WRITE_BARRIER);
- // Set the callee property for non-strict mode arguments object only.
+ JSObject* js_obj = JSObject::cast(result);
+ js_obj->InObjectPropertyAtPut(
+ kArgumentsLengthIndex, Smi::FromInt(length), SKIP_WRITE_BARRIER);
+ // Set the callee property for sloppy mode arguments object only.
if (!strict_mode_callee) {
- JSObject::cast(result)->InObjectPropertyAtPut(kArgumentsCalleeIndex,
- callee);
+ js_obj->InObjectPropertyAtPut(kArgumentsCalleeIndex, callee);
}
// Check the state of the object
- ASSERT(JSObject::cast(result)->HasFastProperties());
- ASSERT(JSObject::cast(result)->HasFastObjectElements());
+ ASSERT(js_obj->HasFastProperties());
+ ASSERT(js_obj->HasFastObjectElements());
- return result;
+ return js_obj;
}
@@ -4545,7 +3594,7 @@ void Heap::InitializeJSObjectFromMap(JSObject* obj,
// so that object accesses before the constructor completes (e.g. in the
// debugger) will not cause a crash.
if (map->constructor()->IsJSFunction() &&
- JSFunction::cast(map->constructor())->shared()->
+ JSFunction::cast(map->constructor())->
IsInobjectSlackTrackingInProgress()) {
// We might want to shrink the object later.
ASSERT(obj->GetInternalFieldCount() == 0);
@@ -4557,8 +3606,11 @@ void Heap::InitializeJSObjectFromMap(JSObject* obj,
}
-MaybeObject* Heap::AllocateJSObjectFromMap(
- Map* map, PretenureFlag pretenure, bool allocate_properties) {
+AllocationResult Heap::AllocateJSObjectFromMap(
+ Map* map,
+ PretenureFlag pretenure,
+ bool allocate_properties,
+ AllocationSite* allocation_site) {
// JSFunctions should be allocated using AllocateFunction to be
// properly initialized.
ASSERT(map->instance_type() != JS_FUNCTION_TYPE);
@@ -4573,8 +3625,8 @@ MaybeObject* Heap::AllocateJSObjectFromMap(
if (allocate_properties) {
int prop_size = map->InitialPropertiesLength();
ASSERT(prop_size >= 0);
- { MaybeObject* maybe_properties = AllocateFixedArray(prop_size, pretenure);
- if (!maybe_properties->To(&properties)) return maybe_properties;
+ { AllocationResult allocation = AllocateFixedArray(prop_size, pretenure);
+ if (!allocation.To(&properties)) return allocation;
}
} else {
properties = empty_fixed_array();
@@ -4583,264 +3635,37 @@ MaybeObject* Heap::AllocateJSObjectFromMap(
// Allocate the JSObject.
int size = map->instance_size();
AllocationSpace space = SelectSpace(size, OLD_POINTER_SPACE, pretenure);
- Object* obj;
- MaybeObject* maybe_obj = Allocate(map, space);
- if (!maybe_obj->To(&obj)) return maybe_obj;
+ JSObject* js_obj;
+ AllocationResult allocation = Allocate(map, space, allocation_site);
+ if (!allocation.To(&js_obj)) return allocation;
// Initialize the JSObject.
- InitializeJSObjectFromMap(JSObject::cast(obj), properties, map);
- ASSERT(JSObject::cast(obj)->HasFastElements() ||
- JSObject::cast(obj)->HasExternalArrayElements());
- return obj;
+ InitializeJSObjectFromMap(js_obj, properties, map);
+ ASSERT(js_obj->HasFastElements() ||
+ js_obj->HasExternalArrayElements() ||
+ js_obj->HasFixedTypedArrayElements());
+ return js_obj;
}
-MaybeObject* Heap::AllocateJSObjectFromMapWithAllocationSite(
- Map* map, Handle<AllocationSite> allocation_site) {
- // JSFunctions should be allocated using AllocateFunction to be
- // properly initialized.
- ASSERT(map->instance_type() != JS_FUNCTION_TYPE);
-
- // Both types of global objects should be allocated using
- // AllocateGlobalObject to be properly initialized.
- ASSERT(map->instance_type() != JS_GLOBAL_OBJECT_TYPE);
- ASSERT(map->instance_type() != JS_BUILTINS_OBJECT_TYPE);
-
- // Allocate the backing storage for the properties.
- int prop_size = map->InitialPropertiesLength();
- ASSERT(prop_size >= 0);
- FixedArray* properties;
- { MaybeObject* maybe_properties = AllocateFixedArray(prop_size);
- if (!maybe_properties->To(&properties)) return maybe_properties;
- }
-
- // Allocate the JSObject.
- int size = map->instance_size();
- AllocationSpace space = SelectSpace(size, OLD_POINTER_SPACE, NOT_TENURED);
- Object* obj;
- MaybeObject* maybe_obj =
- AllocateWithAllocationSite(map, space, allocation_site);
- if (!maybe_obj->To(&obj)) return maybe_obj;
-
- // Initialize the JSObject.
- InitializeJSObjectFromMap(JSObject::cast(obj), properties, map);
- ASSERT(JSObject::cast(obj)->HasFastElements());
- return obj;
-}
-
-
-MaybeObject* Heap::AllocateJSObject(JSFunction* constructor,
- PretenureFlag pretenure) {
+AllocationResult Heap::AllocateJSObject(JSFunction* constructor,
+ PretenureFlag pretenure,
+ AllocationSite* allocation_site) {
ASSERT(constructor->has_initial_map());
- // Allocate the object based on the constructors initial map.
- MaybeObject* result = AllocateJSObjectFromMap(
- constructor->initial_map(), pretenure);
-#ifdef DEBUG
- // Make sure result is NOT a global object if valid.
- Object* non_failure;
- ASSERT(!result->ToObject(&non_failure) || !non_failure->IsGlobalObject());
-#endif
- return result;
-}
-
-MaybeObject* Heap::AllocateJSObjectWithAllocationSite(JSFunction* constructor,
- Handle<AllocationSite> allocation_site) {
- ASSERT(constructor->has_initial_map());
- // Allocate the object based on the constructors initial map, or the payload
- // advice
- Map* initial_map = constructor->initial_map();
-
- ElementsKind to_kind = allocation_site->GetElementsKind();
- AllocationSiteMode mode = TRACK_ALLOCATION_SITE;
- if (to_kind != initial_map->elements_kind()) {
- MaybeObject* maybe_new_map = initial_map->AsElementsKind(to_kind);
- if (!maybe_new_map->To(&initial_map)) return maybe_new_map;
- // Possibly alter the mode, since we found an updated elements kind
- // in the type info cell.
- mode = AllocationSite::GetMode(to_kind);
- }
-
- MaybeObject* result;
- if (mode == TRACK_ALLOCATION_SITE) {
- result = AllocateJSObjectFromMapWithAllocationSite(initial_map,
- allocation_site);
- } else {
- result = AllocateJSObjectFromMap(initial_map, NOT_TENURED);
- }
+ // Allocate the object based on the constructors initial map.
+ AllocationResult allocation = AllocateJSObjectFromMap(
+ constructor->initial_map(), pretenure, true, allocation_site);
#ifdef DEBUG
// Make sure result is NOT a global object if valid.
- Object* non_failure;
- ASSERT(!result->ToObject(&non_failure) || !non_failure->IsGlobalObject());
+ HeapObject* obj;
+ ASSERT(!allocation.To(&obj) || !obj->IsGlobalObject());
#endif
- return result;
-}
-
-
-MaybeObject* Heap::AllocateJSModule(Context* context, ScopeInfo* scope_info) {
- // Allocate a fresh map. Modules do not have a prototype.
- Map* map;
- MaybeObject* maybe_map = AllocateMap(JS_MODULE_TYPE, JSModule::kSize);
- if (!maybe_map->To(&map)) return maybe_map;
- // Allocate the object based on the map.
- JSModule* module;
- MaybeObject* maybe_module = AllocateJSObjectFromMap(map, TENURED);
- if (!maybe_module->To(&module)) return maybe_module;
- module->set_context(context);
- module->set_scope_info(scope_info);
- return module;
-}
-
-
-MaybeObject* Heap::AllocateJSArrayAndStorage(
- ElementsKind elements_kind,
- int length,
- int capacity,
- ArrayStorageAllocationMode mode,
- PretenureFlag pretenure) {
- MaybeObject* maybe_array = AllocateJSArray(elements_kind, pretenure);
- JSArray* array;
- if (!maybe_array->To(&array)) return maybe_array;
-
- // TODO(mvstanton): this body of code is duplicate with AllocateJSArrayStorage
- // for performance reasons.
- ASSERT(capacity >= length);
-
- if (capacity == 0) {
- array->set_length(Smi::FromInt(0));
- array->set_elements(empty_fixed_array());
- return array;
- }
-
- FixedArrayBase* elms;
- MaybeObject* maybe_elms = NULL;
- if (IsFastDoubleElementsKind(elements_kind)) {
- if (mode == DONT_INITIALIZE_ARRAY_ELEMENTS) {
- maybe_elms = AllocateUninitializedFixedDoubleArray(capacity);
- } else {
- ASSERT(mode == INITIALIZE_ARRAY_ELEMENTS_WITH_HOLE);
- maybe_elms = AllocateFixedDoubleArrayWithHoles(capacity);
- }
- } else {
- ASSERT(IsFastSmiOrObjectElementsKind(elements_kind));
- if (mode == DONT_INITIALIZE_ARRAY_ELEMENTS) {
- maybe_elms = AllocateUninitializedFixedArray(capacity);
- } else {
- ASSERT(mode == INITIALIZE_ARRAY_ELEMENTS_WITH_HOLE);
- maybe_elms = AllocateFixedArrayWithHoles(capacity);
- }
- }
- if (!maybe_elms->To(&elms)) return maybe_elms;
-
- array->set_elements(elms);
- array->set_length(Smi::FromInt(length));
- return array;
-}
-
-
-MaybeObject* Heap::AllocateJSArrayStorage(
- JSArray* array,
- int length,
- int capacity,
- ArrayStorageAllocationMode mode) {
- ASSERT(capacity >= length);
-
- if (capacity == 0) {
- array->set_length(Smi::FromInt(0));
- array->set_elements(empty_fixed_array());
- return array;
- }
-
- FixedArrayBase* elms;
- MaybeObject* maybe_elms = NULL;
- ElementsKind elements_kind = array->GetElementsKind();
- if (IsFastDoubleElementsKind(elements_kind)) {
- if (mode == DONT_INITIALIZE_ARRAY_ELEMENTS) {
- maybe_elms = AllocateUninitializedFixedDoubleArray(capacity);
- } else {
- ASSERT(mode == INITIALIZE_ARRAY_ELEMENTS_WITH_HOLE);
- maybe_elms = AllocateFixedDoubleArrayWithHoles(capacity);
- }
- } else {
- ASSERT(IsFastSmiOrObjectElementsKind(elements_kind));
- if (mode == DONT_INITIALIZE_ARRAY_ELEMENTS) {
- maybe_elms = AllocateUninitializedFixedArray(capacity);
- } else {
- ASSERT(mode == INITIALIZE_ARRAY_ELEMENTS_WITH_HOLE);
- maybe_elms = AllocateFixedArrayWithHoles(capacity);
- }
- }
- if (!maybe_elms->To(&elms)) return maybe_elms;
-
- array->set_elements(elms);
- array->set_length(Smi::FromInt(length));
- return array;
-}
-
-
-MaybeObject* Heap::AllocateJSArrayWithElements(
- FixedArrayBase* elements,
- ElementsKind elements_kind,
- int length,
- PretenureFlag pretenure) {
- MaybeObject* maybe_array = AllocateJSArray(elements_kind, pretenure);
- JSArray* array;
- if (!maybe_array->To(&array)) return maybe_array;
-
- array->set_elements(elements);
- array->set_length(Smi::FromInt(length));
- array->ValidateElements();
- return array;
+ return allocation;
}
-MaybeObject* Heap::AllocateJSProxy(Object* handler, Object* prototype) {
- // Allocate map.
- // TODO(rossberg): Once we optimize proxies, think about a scheme to share
- // maps. Will probably depend on the identity of the handler object, too.
- Map* map;
- MaybeObject* maybe_map_obj = AllocateMap(JS_PROXY_TYPE, JSProxy::kSize);
- if (!maybe_map_obj->To<Map>(&map)) return maybe_map_obj;
- map->set_prototype(prototype);
-
- // Allocate the proxy object.
- JSProxy* result;
- MaybeObject* maybe_result = Allocate(map, NEW_SPACE);
- if (!maybe_result->To<JSProxy>(&result)) return maybe_result;
- result->InitializeBody(map->instance_size(), Smi::FromInt(0));
- result->set_handler(handler);
- result->set_hash(undefined_value(), SKIP_WRITE_BARRIER);
- return result;
-}
-
-
-MaybeObject* Heap::AllocateJSFunctionProxy(Object* handler,
- Object* call_trap,
- Object* construct_trap,
- Object* prototype) {
- // Allocate map.
- // TODO(rossberg): Once we optimize proxies, think about a scheme to share
- // maps. Will probably depend on the identity of the handler object, too.
- Map* map;
- MaybeObject* maybe_map_obj =
- AllocateMap(JS_FUNCTION_PROXY_TYPE, JSFunctionProxy::kSize);
- if (!maybe_map_obj->To<Map>(&map)) return maybe_map_obj;
- map->set_prototype(prototype);
-
- // Allocate the proxy object.
- JSFunctionProxy* result;
- MaybeObject* maybe_result = Allocate(map, NEW_SPACE);
- if (!maybe_result->To<JSFunctionProxy>(&result)) return maybe_result;
- result->InitializeBody(map->instance_size(), Smi::FromInt(0));
- result->set_handler(handler);
- result->set_hash(undefined_value(), SKIP_WRITE_BARRIER);
- result->set_call_trap(call_trap);
- result->set_construct_trap(construct_trap);
- return result;
-}
-
-
-MaybeObject* Heap::CopyJSObject(JSObject* source, AllocationSite* site) {
+AllocationResult Heap::CopyJSObject(JSObject* source, AllocationSite* site) {
// Never used to copy functions. If functions need to be copied we
// have to be careful to clear the literals array.
SLOW_ASSERT(!source->IsJSFunction());
@@ -4848,7 +3673,7 @@ MaybeObject* Heap::CopyJSObject(JSObject* source, AllocationSite* site) {
// Make the clone.
Map* map = source->map();
int object_size = map->instance_size();
- Object* clone;
+ HeapObject* clone;
ASSERT(site == NULL || AllocationSite::CanTrack(map->instance_type()));
@@ -4857,11 +3682,11 @@ MaybeObject* Heap::CopyJSObject(JSObject* source, AllocationSite* site) {
// If we're forced to always allocate, we use the general allocation
// functions which may leave us with an object in old space.
if (always_allocate()) {
- { MaybeObject* maybe_clone =
+ { AllocationResult allocation =
AllocateRaw(object_size, NEW_SPACE, OLD_POINTER_SPACE);
- if (!maybe_clone->ToObject(&clone)) return maybe_clone;
+ if (!allocation.To(&clone)) return allocation;
}
- Address clone_address = HeapObject::cast(clone)->address();
+ Address clone_address = clone->address();
CopyBlock(clone_address,
source->address(),
object_size);
@@ -4875,14 +3700,14 @@ MaybeObject* Heap::CopyJSObject(JSObject* source, AllocationSite* site) {
{ int adjusted_object_size = site != NULL
? object_size + AllocationMemento::kSize
: object_size;
- MaybeObject* maybe_clone =
+ AllocationResult allocation =
AllocateRaw(adjusted_object_size, NEW_SPACE, NEW_SPACE);
- if (!maybe_clone->ToObject(&clone)) return maybe_clone;
+ if (!allocation.To(&clone)) return allocation;
}
SLOW_ASSERT(InNewSpace(clone));
// Since we know the clone is allocated in new space, we can copy
// the contents without worrying about updating the write barrier.
- CopyBlock(HeapObject::cast(clone)->address(),
+ CopyBlock(clone->address(),
source->address(),
object_size);
@@ -4899,225 +3724,38 @@ MaybeObject* Heap::CopyJSObject(JSObject* source, AllocationSite* site) {
FixedArray* properties = FixedArray::cast(source->properties());
// Update elements if necessary.
if (elements->length() > 0) {
- Object* elem;
- { MaybeObject* maybe_elem;
+ FixedArrayBase* elem;
+ { AllocationResult allocation;
if (elements->map() == fixed_cow_array_map()) {
- maybe_elem = FixedArray::cast(elements);
+ allocation = FixedArray::cast(elements);
} else if (source->HasFastDoubleElements()) {
- maybe_elem = CopyFixedDoubleArray(FixedDoubleArray::cast(elements));
+ allocation = CopyFixedDoubleArray(FixedDoubleArray::cast(elements));
} else {
- maybe_elem = CopyFixedArray(FixedArray::cast(elements));
+ allocation = CopyFixedArray(FixedArray::cast(elements));
}
- if (!maybe_elem->ToObject(&elem)) return maybe_elem;
+ if (!allocation.To(&elem)) return allocation;
}
- JSObject::cast(clone)->set_elements(FixedArrayBase::cast(elem), wb_mode);
+ JSObject::cast(clone)->set_elements(elem, wb_mode);
}
// Update properties if necessary.
if (properties->length() > 0) {
- Object* prop;
- { MaybeObject* maybe_prop = CopyFixedArray(properties);
- if (!maybe_prop->ToObject(&prop)) return maybe_prop;
+ FixedArray* prop;
+ { AllocationResult allocation = CopyFixedArray(properties);
+ if (!allocation.To(&prop)) return allocation;
}
- JSObject::cast(clone)->set_properties(FixedArray::cast(prop), wb_mode);
+ JSObject::cast(clone)->set_properties(prop, wb_mode);
}
// Return the new clone.
return clone;
}
-MaybeObject* Heap::ReinitializeJSReceiver(
- JSReceiver* object, InstanceType type, int size) {
- ASSERT(type >= FIRST_JS_OBJECT_TYPE);
-
- // Allocate fresh map.
- // TODO(rossberg): Once we optimize proxies, cache these maps.
- Map* map;
- MaybeObject* maybe = AllocateMap(type, size);
- if (!maybe->To<Map>(&map)) return maybe;
-
- // Check that the receiver has at least the size of the fresh object.
- int size_difference = object->map()->instance_size() - map->instance_size();
- ASSERT(size_difference >= 0);
-
- map->set_prototype(object->map()->prototype());
-
- // Allocate the backing storage for the properties.
- int prop_size = map->unused_property_fields() - map->inobject_properties();
- Object* properties;
- maybe = AllocateFixedArray(prop_size, TENURED);
- if (!maybe->ToObject(&properties)) return maybe;
-
- // Functions require some allocation, which might fail here.
- SharedFunctionInfo* shared = NULL;
- if (type == JS_FUNCTION_TYPE) {
- String* name;
- maybe =
- InternalizeOneByteString(STATIC_ASCII_VECTOR("<freezing call trap>"));
- if (!maybe->To<String>(&name)) return maybe;
- maybe = AllocateSharedFunctionInfo(name);
- if (!maybe->To<SharedFunctionInfo>(&shared)) return maybe;
- }
-
- // Because of possible retries of this function after failure,
- // we must NOT fail after this point, where we have changed the type!
-
- // Reset the map for the object.
- object->set_map(map);
- JSObject* jsobj = JSObject::cast(object);
-
- // Reinitialize the object from the constructor map.
- InitializeJSObjectFromMap(jsobj, FixedArray::cast(properties), map);
-
- // Functions require some minimal initialization.
- if (type == JS_FUNCTION_TYPE) {
- map->set_function_with_prototype(true);
- InitializeFunction(JSFunction::cast(object), shared, the_hole_value());
- JSFunction::cast(object)->set_context(
- isolate()->context()->native_context());
- }
-
- // Put in filler if the new object is smaller than the old.
- if (size_difference > 0) {
- CreateFillerObjectAt(
- object->address() + map->instance_size(), size_difference);
- }
-
- return object;
-}
-
-
-MaybeObject* Heap::ReinitializeJSGlobalProxy(JSFunction* constructor,
- JSGlobalProxy* object) {
- ASSERT(constructor->has_initial_map());
- Map* map = constructor->initial_map();
-
- // Check that the already allocated object has the same size and type as
- // objects allocated using the constructor.
- ASSERT(map->instance_size() == object->map()->instance_size());
- ASSERT(map->instance_type() == object->map()->instance_type());
-
- // Allocate the backing storage for the properties.
- int prop_size = map->unused_property_fields() - map->inobject_properties();
- Object* properties;
- { MaybeObject* maybe_properties = AllocateFixedArray(prop_size, TENURED);
- if (!maybe_properties->ToObject(&properties)) return maybe_properties;
- }
-
- // Reset the map for the object.
- object->set_map(constructor->initial_map());
-
- // Reinitialize the object from the constructor map.
- InitializeJSObjectFromMap(object, FixedArray::cast(properties), map);
- return object;
-}
-
-
-MaybeObject* Heap::AllocateStringFromOneByte(Vector<const uint8_t> string,
- PretenureFlag pretenure) {
- int length = string.length();
- if (length == 1) {
- return Heap::LookupSingleCharacterStringFromCode(string[0]);
- }
- Object* result;
- { MaybeObject* maybe_result =
- AllocateRawOneByteString(string.length(), pretenure);
- if (!maybe_result->ToObject(&result)) return maybe_result;
- }
-
- // Copy the characters into the new object.
- CopyChars(SeqOneByteString::cast(result)->GetChars(),
- string.start(),
- length);
- return result;
-}
-
-
-MaybeObject* Heap::AllocateStringFromUtf8Slow(Vector<const char> string,
- int non_ascii_start,
- PretenureFlag pretenure) {
- // Continue counting the number of characters in the UTF-8 string, starting
- // from the first non-ascii character or word.
- Access<UnicodeCache::Utf8Decoder>
- decoder(isolate_->unicode_cache()->utf8_decoder());
- decoder->Reset(string.start() + non_ascii_start,
- string.length() - non_ascii_start);
- int utf16_length = decoder->Utf16Length();
- ASSERT(utf16_length > 0);
- // Allocate string.
- Object* result;
- {
- int chars = non_ascii_start + utf16_length;
- MaybeObject* maybe_result = AllocateRawTwoByteString(chars, pretenure);
- if (!maybe_result->ToObject(&result)) return maybe_result;
- }
- // Convert and copy the characters into the new object.
- SeqTwoByteString* twobyte = SeqTwoByteString::cast(result);
- // Copy ascii portion.
- uint16_t* data = twobyte->GetChars();
- if (non_ascii_start != 0) {
- const char* ascii_data = string.start();
- for (int i = 0; i < non_ascii_start; i++) {
- *data++ = *ascii_data++;
- }
- }
- // Now write the remainder.
- decoder->WriteUtf16(data, utf16_length);
- return result;
-}
-
-
-MaybeObject* Heap::AllocateStringFromTwoByte(Vector<const uc16> string,
- PretenureFlag pretenure) {
- // Check if the string is an ASCII string.
- Object* result;
- int length = string.length();
- const uc16* start = string.start();
-
- if (String::IsOneByte(start, length)) {
- MaybeObject* maybe_result = AllocateRawOneByteString(length, pretenure);
- if (!maybe_result->ToObject(&result)) return maybe_result;
- CopyChars(SeqOneByteString::cast(result)->GetChars(), start, length);
- } else { // It's not a one byte string.
- MaybeObject* maybe_result = AllocateRawTwoByteString(length, pretenure);
- if (!maybe_result->ToObject(&result)) return maybe_result;
- CopyChars(SeqTwoByteString::cast(result)->GetChars(), start, length);
- }
- return result;
-}
-
-
-Map* Heap::InternalizedStringMapForString(String* string) {
- // If the string is in new space it cannot be used as internalized.
- if (InNewSpace(string)) return NULL;
-
- // Find the corresponding internalized string map for strings.
- switch (string->map()->instance_type()) {
- case STRING_TYPE: return internalized_string_map();
- case ASCII_STRING_TYPE: return ascii_internalized_string_map();
- case CONS_STRING_TYPE: return cons_internalized_string_map();
- case CONS_ASCII_STRING_TYPE: return cons_ascii_internalized_string_map();
- case EXTERNAL_STRING_TYPE: return external_internalized_string_map();
- case EXTERNAL_ASCII_STRING_TYPE:
- return external_ascii_internalized_string_map();
- case EXTERNAL_STRING_WITH_ONE_BYTE_DATA_TYPE:
- return external_internalized_string_with_one_byte_data_map();
- case SHORT_EXTERNAL_STRING_TYPE:
- return short_external_internalized_string_map();
- case SHORT_EXTERNAL_ASCII_STRING_TYPE:
- return short_external_ascii_internalized_string_map();
- case SHORT_EXTERNAL_STRING_WITH_ONE_BYTE_DATA_TYPE:
- return short_external_internalized_string_with_one_byte_data_map();
- default: return NULL; // No match found.
- }
-}
-
-
static inline void WriteOneByteData(Vector<const char> vector,
uint8_t* chars,
int len) {
// Only works for ascii.
ASSERT(vector.length() == len);
- OS::MemCopy(chars, vector.start(), len);
+ MemCopy(chars, vector.start(), len);
}
static inline void WriteTwoByteData(Vector<const char> vector,
@@ -5161,35 +3799,31 @@ static inline void WriteTwoByteData(String* s, uint16_t* chars, int len) {
template<bool is_one_byte, typename T>
-MaybeObject* Heap::AllocateInternalizedStringImpl(
+AllocationResult Heap::AllocateInternalizedStringImpl(
T t, int chars, uint32_t hash_field) {
ASSERT(chars >= 0);
// Compute map and object size.
int size;
Map* map;
+ ASSERT_LE(0, chars);
+ ASSERT_GE(String::kMaxLength, chars);
if (is_one_byte) {
- if (chars > SeqOneByteString::kMaxLength) {
- return Failure::OutOfMemoryException(0x9);
- }
map = ascii_internalized_string_map();
size = SeqOneByteString::SizeFor(chars);
} else {
- if (chars > SeqTwoByteString::kMaxLength) {
- return Failure::OutOfMemoryException(0xa);
- }
map = internalized_string_map();
size = SeqTwoByteString::SizeFor(chars);
}
AllocationSpace space = SelectSpace(size, OLD_DATA_SPACE, TENURED);
// Allocate string.
- Object* result;
- { MaybeObject* maybe_result = AllocateRaw(size, space, OLD_DATA_SPACE);
- if (!maybe_result->ToObject(&result)) return maybe_result;
+ HeapObject* result;
+ { AllocationResult allocation = AllocateRaw(size, space, OLD_DATA_SPACE);
+ if (!allocation.To(&result)) return allocation;
}
- reinterpret_cast<HeapObject*>(result)->set_map_no_write_barrier(map);
+ result->set_map_no_write_barrier(map);
// Set length and hash fields of the allocated string.
String* answer = String::cast(result);
answer->set_length(chars);
@@ -5208,31 +3842,31 @@ MaybeObject* Heap::AllocateInternalizedStringImpl(
// Need explicit instantiations.
template
-MaybeObject* Heap::AllocateInternalizedStringImpl<true>(String*, int, uint32_t);
+AllocationResult Heap::AllocateInternalizedStringImpl<true>(
+ String*, int, uint32_t);
template
-MaybeObject* Heap::AllocateInternalizedStringImpl<false>(
+AllocationResult Heap::AllocateInternalizedStringImpl<false>(
String*, int, uint32_t);
template
-MaybeObject* Heap::AllocateInternalizedStringImpl<false>(
+AllocationResult Heap::AllocateInternalizedStringImpl<false>(
Vector<const char>, int, uint32_t);
-MaybeObject* Heap::AllocateRawOneByteString(int length,
- PretenureFlag pretenure) {
- if (length < 0 || length > SeqOneByteString::kMaxLength) {
- return Failure::OutOfMemoryException(0xb);
- }
+AllocationResult Heap::AllocateRawOneByteString(int length,
+ PretenureFlag pretenure) {
+ ASSERT_LE(0, length);
+ ASSERT_GE(String::kMaxLength, length);
int size = SeqOneByteString::SizeFor(length);
ASSERT(size <= SeqOneByteString::kMaxSize);
AllocationSpace space = SelectSpace(size, OLD_DATA_SPACE, pretenure);
- Object* result;
- { MaybeObject* maybe_result = AllocateRaw(size, space, OLD_DATA_SPACE);
- if (!maybe_result->ToObject(&result)) return maybe_result;
+ HeapObject* result;
+ { AllocationResult allocation = AllocateRaw(size, space, OLD_DATA_SPACE);
+ if (!allocation.To(&result)) return allocation;
}
// Partially initialize the object.
- HeapObject::cast(result)->set_map_no_write_barrier(ascii_string_map());
+ result->set_map_no_write_barrier(ascii_string_map());
String::cast(result)->set_length(length);
String::cast(result)->set_hash_field(String::kEmptyHashField);
ASSERT_EQ(size, HeapObject::cast(result)->Size());
@@ -5241,22 +3875,21 @@ MaybeObject* Heap::AllocateRawOneByteString(int length,
}
-MaybeObject* Heap::AllocateRawTwoByteString(int length,
- PretenureFlag pretenure) {
- if (length < 0 || length > SeqTwoByteString::kMaxLength) {
- return Failure::OutOfMemoryException(0xc);
- }
+AllocationResult Heap::AllocateRawTwoByteString(int length,
+ PretenureFlag pretenure) {
+ ASSERT_LE(0, length);
+ ASSERT_GE(String::kMaxLength, length);
int size = SeqTwoByteString::SizeFor(length);
ASSERT(size <= SeqTwoByteString::kMaxSize);
AllocationSpace space = SelectSpace(size, OLD_DATA_SPACE, pretenure);
- Object* result;
- { MaybeObject* maybe_result = AllocateRaw(size, space, OLD_DATA_SPACE);
- if (!maybe_result->ToObject(&result)) return maybe_result;
+ HeapObject* result;
+ { AllocationResult allocation = AllocateRaw(size, space, OLD_DATA_SPACE);
+ if (!allocation.To(&result)) return allocation;
}
// Partially initialize the object.
- HeapObject::cast(result)->set_map_no_write_barrier(string_map());
+ result->set_map_no_write_barrier(string_map());
String::cast(result)->set_length(length);
String::cast(result)->set_hash_field(String::kEmptyHashField);
ASSERT_EQ(size, HeapObject::cast(result)->Size());
@@ -5264,53 +3897,73 @@ MaybeObject* Heap::AllocateRawTwoByteString(int length,
}
-MaybeObject* Heap::AllocateJSArray(
- ElementsKind elements_kind,
- PretenureFlag pretenure) {
- Context* native_context = isolate()->context()->native_context();
- JSFunction* array_function = native_context->array_function();
- Map* map = array_function->initial_map();
- Map* transition_map = isolate()->get_initial_js_array_map(elements_kind);
- if (transition_map != NULL) map = transition_map;
- return AllocateJSObjectFromMap(map, pretenure);
-}
-
-
-MaybeObject* Heap::AllocateEmptyFixedArray() {
+AllocationResult Heap::AllocateEmptyFixedArray() {
int size = FixedArray::SizeFor(0);
- Object* result;
- { MaybeObject* maybe_result =
+ HeapObject* result;
+ { AllocationResult allocation =
AllocateRaw(size, OLD_DATA_SPACE, OLD_DATA_SPACE);
- if (!maybe_result->ToObject(&result)) return maybe_result;
+ if (!allocation.To(&result)) return allocation;
}
// Initialize the object.
- reinterpret_cast<FixedArray*>(result)->set_map_no_write_barrier(
- fixed_array_map());
- reinterpret_cast<FixedArray*>(result)->set_length(0);
+ result->set_map_no_write_barrier(fixed_array_map());
+ FixedArray::cast(result)->set_length(0);
return result;
}
-MaybeObject* Heap::AllocateEmptyExternalArray(ExternalArrayType array_type) {
+AllocationResult Heap::AllocateEmptyExternalArray(
+ ExternalArrayType array_type) {
return AllocateExternalArray(0, array_type, NULL, TENURED);
}
-MaybeObject* Heap::CopyFixedArrayWithMap(FixedArray* src, Map* map) {
+AllocationResult Heap::CopyAndTenureFixedCOWArray(FixedArray* src) {
+ if (!InNewSpace(src)) {
+ return src;
+ }
+
int len = src->length();
- Object* obj;
- { MaybeObject* maybe_obj = AllocateRawFixedArray(len, NOT_TENURED);
- if (!maybe_obj->ToObject(&obj)) return maybe_obj;
+ HeapObject* obj;
+ { AllocationResult allocation = AllocateRawFixedArray(len, TENURED);
+ if (!allocation.To(&obj)) return allocation;
+ }
+ obj->set_map_no_write_barrier(fixed_array_map());
+ FixedArray* result = FixedArray::cast(obj);
+ result->set_length(len);
+
+ // Copy the content
+ DisallowHeapAllocation no_gc;
+ WriteBarrierMode mode = result->GetWriteBarrierMode(no_gc);
+ for (int i = 0; i < len; i++) result->set(i, src->get(i), mode);
+
+ // TODO(mvstanton): The map is set twice because of protection against calling
+ // set() on a COW FixedArray. Issue v8:3221 created to track this, and
+ // we might then be able to remove this whole method.
+ HeapObject::cast(obj)->set_map_no_write_barrier(fixed_cow_array_map());
+ return result;
+}
+
+
+AllocationResult Heap::AllocateEmptyFixedTypedArray(
+ ExternalArrayType array_type) {
+ return AllocateFixedTypedArray(0, array_type, TENURED);
+}
+
+
+AllocationResult Heap::CopyFixedArrayWithMap(FixedArray* src, Map* map) {
+ int len = src->length();
+ HeapObject* obj;
+ { AllocationResult allocation = AllocateRawFixedArray(len, NOT_TENURED);
+ if (!allocation.To(&obj)) return allocation;
}
if (InNewSpace(obj)) {
- HeapObject* dst = HeapObject::cast(obj);
- dst->set_map_no_write_barrier(map);
- CopyBlock(dst->address() + kPointerSize,
+ obj->set_map_no_write_barrier(map);
+ CopyBlock(obj->address() + kPointerSize,
src->address() + kPointerSize,
FixedArray::SizeFor(len) - kPointerSize);
return obj;
}
- HeapObject::cast(obj)->set_map_no_write_barrier(map);
+ obj->set_map_no_write_barrier(map);
FixedArray* result = FixedArray::cast(obj);
result->set_length(len);
@@ -5322,47 +3975,52 @@ MaybeObject* Heap::CopyFixedArrayWithMap(FixedArray* src, Map* map) {
}
-MaybeObject* Heap::CopyFixedDoubleArrayWithMap(FixedDoubleArray* src,
- Map* map) {
+AllocationResult Heap::CopyFixedDoubleArrayWithMap(FixedDoubleArray* src,
+ Map* map) {
int len = src->length();
- Object* obj;
- { MaybeObject* maybe_obj = AllocateRawFixedDoubleArray(len, NOT_TENURED);
- if (!maybe_obj->ToObject(&obj)) return maybe_obj;
+ HeapObject* obj;
+ { AllocationResult allocation = AllocateRawFixedDoubleArray(len, NOT_TENURED);
+ if (!allocation.To(&obj)) return allocation;
}
- HeapObject* dst = HeapObject::cast(obj);
- dst->set_map_no_write_barrier(map);
+ obj->set_map_no_write_barrier(map);
CopyBlock(
- dst->address() + FixedDoubleArray::kLengthOffset,
+ obj->address() + FixedDoubleArray::kLengthOffset,
src->address() + FixedDoubleArray::kLengthOffset,
FixedDoubleArray::SizeFor(len) - FixedDoubleArray::kLengthOffset);
return obj;
}
-MaybeObject* Heap::CopyConstantPoolArrayWithMap(ConstantPoolArray* src,
- Map* map) {
- int int64_entries = src->count_of_int64_entries();
- int ptr_entries = src->count_of_ptr_entries();
- int int32_entries = src->count_of_int32_entries();
- Object* obj;
- { MaybeObject* maybe_obj =
- AllocateConstantPoolArray(int64_entries, ptr_entries, int32_entries);
- if (!maybe_obj->ToObject(&obj)) return maybe_obj;
+AllocationResult Heap::CopyConstantPoolArrayWithMap(ConstantPoolArray* src,
+ Map* map) {
+ HeapObject* obj;
+ if (src->is_extended_layout()) {
+ ConstantPoolArray::NumberOfEntries small(src,
+ ConstantPoolArray::SMALL_SECTION);
+ ConstantPoolArray::NumberOfEntries extended(src,
+ ConstantPoolArray::EXTENDED_SECTION);
+ AllocationResult allocation =
+ AllocateExtendedConstantPoolArray(small, extended);
+ if (!allocation.To(&obj)) return allocation;
+ } else {
+ ConstantPoolArray::NumberOfEntries small(src,
+ ConstantPoolArray::SMALL_SECTION);
+ AllocationResult allocation = AllocateConstantPoolArray(small);
+ if (!allocation.To(&obj)) return allocation;
}
- HeapObject* dst = HeapObject::cast(obj);
- dst->set_map_no_write_barrier(map);
+ obj->set_map_no_write_barrier(map);
CopyBlock(
- dst->address() + ConstantPoolArray::kLengthOffset,
- src->address() + ConstantPoolArray::kLengthOffset,
- ConstantPoolArray::SizeFor(int64_entries, ptr_entries, int32_entries)
- - ConstantPoolArray::kLengthOffset);
+ obj->address() + ConstantPoolArray::kFirstEntryOffset,
+ src->address() + ConstantPoolArray::kFirstEntryOffset,
+ src->size() - ConstantPoolArray::kFirstEntryOffset);
return obj;
}
-MaybeObject* Heap::AllocateRawFixedArray(int length, PretenureFlag pretenure) {
+AllocationResult Heap::AllocateRawFixedArray(int length,
+ PretenureFlag pretenure) {
if (length < 0 || length > FixedArray::kMaxLength) {
- return Failure::OutOfMemoryException(0xe);
+ v8::internal::Heap::FatalProcessOutOfMemory("invalid array length", true);
}
int size = FixedArray::SizeFor(length);
AllocationSpace space = SelectSpace(size, OLD_POINTER_SPACE, pretenure);
@@ -5371,20 +4029,20 @@ MaybeObject* Heap::AllocateRawFixedArray(int length, PretenureFlag pretenure) {
}
-MaybeObject* Heap::AllocateFixedArrayWithFiller(int length,
- PretenureFlag pretenure,
- Object* filler) {
+AllocationResult Heap::AllocateFixedArrayWithFiller(int length,
+ PretenureFlag pretenure,
+ Object* filler) {
ASSERT(length >= 0);
ASSERT(empty_fixed_array()->IsFixedArray());
if (length == 0) return empty_fixed_array();
ASSERT(!InNewSpace(filler));
- Object* result;
- { MaybeObject* maybe_result = AllocateRawFixedArray(length, pretenure);
- if (!maybe_result->ToObject(&result)) return maybe_result;
+ HeapObject* result;
+ { AllocationResult allocation = AllocateRawFixedArray(length, pretenure);
+ if (!allocation.To(&result)) return allocation;
}
- HeapObject::cast(result)->set_map_no_write_barrier(fixed_array_map());
+ result->set_map_no_write_barrier(fixed_array_map());
FixedArray* array = FixedArray::cast(result);
array->set_length(length);
MemsetPointer(array->data_start(), filler, length);
@@ -5392,162 +4050,132 @@ MaybeObject* Heap::AllocateFixedArrayWithFiller(int length,
}
-MaybeObject* Heap::AllocateFixedArray(int length, PretenureFlag pretenure) {
+AllocationResult Heap::AllocateFixedArray(int length, PretenureFlag pretenure) {
return AllocateFixedArrayWithFiller(length, pretenure, undefined_value());
}
-MaybeObject* Heap::AllocateFixedArrayWithHoles(int length,
- PretenureFlag pretenure) {
- return AllocateFixedArrayWithFiller(length, pretenure, the_hole_value());
-}
-
-
-MaybeObject* Heap::AllocateUninitializedFixedArray(int length) {
+AllocationResult Heap::AllocateUninitializedFixedArray(int length) {
if (length == 0) return empty_fixed_array();
- Object* obj;
- { MaybeObject* maybe_obj = AllocateRawFixedArray(length, NOT_TENURED);
- if (!maybe_obj->ToObject(&obj)) return maybe_obj;
+ HeapObject* obj;
+ { AllocationResult allocation = AllocateRawFixedArray(length, NOT_TENURED);
+ if (!allocation.To(&obj)) return allocation;
}
- reinterpret_cast<FixedArray*>(obj)->set_map_no_write_barrier(
- fixed_array_map());
+ obj->set_map_no_write_barrier(fixed_array_map());
FixedArray::cast(obj)->set_length(length);
return obj;
}
-MaybeObject* Heap::AllocateEmptyFixedDoubleArray() {
- int size = FixedDoubleArray::SizeFor(0);
- Object* result;
- { MaybeObject* maybe_result =
- AllocateRaw(size, OLD_DATA_SPACE, OLD_DATA_SPACE);
- if (!maybe_result->ToObject(&result)) return maybe_result;
- }
- // Initialize the object.
- reinterpret_cast<FixedDoubleArray*>(result)->set_map_no_write_barrier(
- fixed_double_array_map());
- reinterpret_cast<FixedDoubleArray*>(result)->set_length(0);
- return result;
-}
-
-
-MaybeObject* Heap::AllocateUninitializedFixedDoubleArray(
+AllocationResult Heap::AllocateUninitializedFixedDoubleArray(
int length,
PretenureFlag pretenure) {
if (length == 0) return empty_fixed_array();
- Object* elements_object;
- MaybeObject* maybe_obj = AllocateRawFixedDoubleArray(length, pretenure);
- if (!maybe_obj->ToObject(&elements_object)) return maybe_obj;
- FixedDoubleArray* elements =
- reinterpret_cast<FixedDoubleArray*>(elements_object);
+ HeapObject* elements;
+ AllocationResult allocation = AllocateRawFixedDoubleArray(length, pretenure);
+ if (!allocation.To(&elements)) return allocation;
elements->set_map_no_write_barrier(fixed_double_array_map());
- elements->set_length(length);
+ FixedDoubleArray::cast(elements)->set_length(length);
return elements;
}
-MaybeObject* Heap::AllocateFixedDoubleArrayWithHoles(
- int length,
- PretenureFlag pretenure) {
- if (length == 0) return empty_fixed_array();
-
- Object* elements_object;
- MaybeObject* maybe_obj = AllocateRawFixedDoubleArray(length, pretenure);
- if (!maybe_obj->ToObject(&elements_object)) return maybe_obj;
- FixedDoubleArray* elements =
- reinterpret_cast<FixedDoubleArray*>(elements_object);
+AllocationResult Heap::AllocateRawFixedDoubleArray(int length,
+ PretenureFlag pretenure) {
+ if (length < 0 || length > FixedDoubleArray::kMaxLength) {
+ v8::internal::Heap::FatalProcessOutOfMemory("invalid array length", true);
+ }
+ int size = FixedDoubleArray::SizeFor(length);
+#ifndef V8_HOST_ARCH_64_BIT
+ size += kPointerSize;
+#endif
+ AllocationSpace space = SelectSpace(size, OLD_DATA_SPACE, pretenure);
- for (int i = 0; i < length; ++i) {
- elements->set_the_hole(i);
+ HeapObject* object;
+ { AllocationResult allocation = AllocateRaw(size, space, OLD_DATA_SPACE);
+ if (!allocation.To(&object)) return allocation;
}
- elements->set_map_no_write_barrier(fixed_double_array_map());
- elements->set_length(length);
- return elements;
+ return EnsureDoubleAligned(this, object, size);
}
-MaybeObject* Heap::AllocateRawFixedDoubleArray(int length,
- PretenureFlag pretenure) {
- if (length < 0 || length > FixedDoubleArray::kMaxLength) {
- return Failure::OutOfMemoryException(0xf);
- }
- int size = FixedDoubleArray::SizeFor(length);
+AllocationResult Heap::AllocateConstantPoolArray(
+ const ConstantPoolArray::NumberOfEntries& small) {
+ CHECK(small.are_in_range(0, ConstantPoolArray::kMaxSmallEntriesPerType));
+ int size = ConstantPoolArray::SizeFor(small);
#ifndef V8_HOST_ARCH_64_BIT
size += kPointerSize;
#endif
- AllocationSpace space = SelectSpace(size, OLD_DATA_SPACE, pretenure);
+ AllocationSpace space = SelectSpace(size, OLD_POINTER_SPACE, TENURED);
HeapObject* object;
- { MaybeObject* maybe_object = AllocateRaw(size, space, OLD_DATA_SPACE);
- if (!maybe_object->To<HeapObject>(&object)) return maybe_object;
+ { AllocationResult allocation = AllocateRaw(size, space, OLD_POINTER_SPACE);
+ if (!allocation.To(&object)) return allocation;
}
+ object = EnsureDoubleAligned(this, object, size);
+ object->set_map_no_write_barrier(constant_pool_array_map());
- return EnsureDoubleAligned(this, object, size);
+ ConstantPoolArray* constant_pool = ConstantPoolArray::cast(object);
+ constant_pool->Init(small);
+ constant_pool->ClearPtrEntries(isolate());
+ return constant_pool;
}
-MaybeObject* Heap::AllocateConstantPoolArray(int number_of_int64_entries,
- int number_of_ptr_entries,
- int number_of_int32_entries) {
- ASSERT(number_of_int64_entries > 0 || number_of_ptr_entries > 0 ||
- number_of_int32_entries > 0);
- int size = ConstantPoolArray::SizeFor(number_of_int64_entries,
- number_of_ptr_entries,
- number_of_int32_entries);
+AllocationResult Heap::AllocateExtendedConstantPoolArray(
+ const ConstantPoolArray::NumberOfEntries& small,
+ const ConstantPoolArray::NumberOfEntries& extended) {
+ CHECK(small.are_in_range(0, ConstantPoolArray::kMaxSmallEntriesPerType));
+ CHECK(extended.are_in_range(0, kMaxInt));
+ int size = ConstantPoolArray::SizeForExtended(small, extended);
#ifndef V8_HOST_ARCH_64_BIT
size += kPointerSize;
#endif
AllocationSpace space = SelectSpace(size, OLD_POINTER_SPACE, TENURED);
HeapObject* object;
- { MaybeObject* maybe_object = AllocateRaw(size, space, OLD_POINTER_SPACE);
- if (!maybe_object->To<HeapObject>(&object)) return maybe_object;
+ { AllocationResult allocation = AllocateRaw(size, space, OLD_POINTER_SPACE);
+ if (!allocation.To(&object)) return allocation;
}
object = EnsureDoubleAligned(this, object, size);
- HeapObject::cast(object)->set_map_no_write_barrier(constant_pool_array_map());
-
- ConstantPoolArray* constant_pool =
- reinterpret_cast<ConstantPoolArray*>(object);
- constant_pool->SetEntryCounts(number_of_int64_entries,
- number_of_ptr_entries,
- number_of_int32_entries);
- MemsetPointer(
- HeapObject::RawField(
- constant_pool,
- constant_pool->OffsetOfElementAt(constant_pool->first_ptr_index())),
- undefined_value(),
- number_of_ptr_entries);
+ object->set_map_no_write_barrier(constant_pool_array_map());
+
+ ConstantPoolArray* constant_pool = ConstantPoolArray::cast(object);
+ constant_pool->InitExtended(small, extended);
+ constant_pool->ClearPtrEntries(isolate());
return constant_pool;
}
-MaybeObject* Heap::AllocateHashTable(int length, PretenureFlag pretenure) {
- Object* result;
- { MaybeObject* maybe_result = AllocateFixedArray(length, pretenure);
- if (!maybe_result->ToObject(&result)) return maybe_result;
+AllocationResult Heap::AllocateEmptyConstantPoolArray() {
+ ConstantPoolArray::NumberOfEntries small(0, 0, 0, 0);
+ int size = ConstantPoolArray::SizeFor(small);
+ HeapObject* result;
+ { AllocationResult allocation =
+ AllocateRaw(size, OLD_DATA_SPACE, OLD_DATA_SPACE);
+ if (!allocation.To(&result)) return allocation;
}
- reinterpret_cast<HeapObject*>(result)->set_map_no_write_barrier(
- hash_table_map());
- ASSERT(result->IsHashTable());
+ result->set_map_no_write_barrier(constant_pool_array_map());
+ ConstantPoolArray::cast(result)->Init(small);
return result;
}
-MaybeObject* Heap::AllocateSymbol() {
+AllocationResult Heap::AllocateSymbol() {
// Statically ensure that it is safe to allocate symbols in paged spaces.
- STATIC_ASSERT(Symbol::kSize <= Page::kNonCodeObjectAreaSize);
+ STATIC_ASSERT(Symbol::kSize <= Page::kMaxRegularHeapObjectSize);
- Object* result;
- MaybeObject* maybe =
+ HeapObject* result;
+ AllocationResult allocation =
AllocateRaw(Symbol::kSize, OLD_POINTER_SPACE, OLD_POINTER_SPACE);
- if (!maybe->ToObject(&result)) return maybe;
+ if (!allocation.To(&result)) return allocation;
- HeapObject::cast(result)->set_map_no_write_barrier(symbol_map());
+ result->set_map_no_write_barrier(symbol_map());
// Generate a random hash value.
int hash;
@@ -5568,159 +4196,7 @@ MaybeObject* Heap::AllocateSymbol() {
}
-MaybeObject* Heap::AllocatePrivateSymbol() {
- MaybeObject* maybe = AllocateSymbol();
- Symbol* symbol;
- if (!maybe->To(&symbol)) return maybe;
- symbol->set_is_private(true);
- return symbol;
-}
-
-
-MaybeObject* Heap::AllocateNativeContext() {
- Object* result;
- { MaybeObject* maybe_result =
- AllocateFixedArray(Context::NATIVE_CONTEXT_SLOTS);
- if (!maybe_result->ToObject(&result)) return maybe_result;
- }
- Context* context = reinterpret_cast<Context*>(result);
- context->set_map_no_write_barrier(native_context_map());
- context->set_js_array_maps(undefined_value());
- ASSERT(context->IsNativeContext());
- ASSERT(result->IsContext());
- return result;
-}
-
-
-MaybeObject* Heap::AllocateGlobalContext(JSFunction* function,
- ScopeInfo* scope_info) {
- Object* result;
- { MaybeObject* maybe_result =
- AllocateFixedArray(scope_info->ContextLength(), TENURED);
- if (!maybe_result->ToObject(&result)) return maybe_result;
- }
- Context* context = reinterpret_cast<Context*>(result);
- context->set_map_no_write_barrier(global_context_map());
- context->set_closure(function);
- context->set_previous(function->context());
- context->set_extension(scope_info);
- context->set_global_object(function->context()->global_object());
- ASSERT(context->IsGlobalContext());
- ASSERT(result->IsContext());
- return context;
-}
-
-
-MaybeObject* Heap::AllocateModuleContext(ScopeInfo* scope_info) {
- Object* result;
- { MaybeObject* maybe_result =
- AllocateFixedArray(scope_info->ContextLength(), TENURED);
- if (!maybe_result->ToObject(&result)) return maybe_result;
- }
- Context* context = reinterpret_cast<Context*>(result);
- context->set_map_no_write_barrier(module_context_map());
- // Instance link will be set later.
- context->set_extension(Smi::FromInt(0));
- return context;
-}
-
-
-MaybeObject* Heap::AllocateFunctionContext(int length, JSFunction* function) {
- ASSERT(length >= Context::MIN_CONTEXT_SLOTS);
- Object* result;
- { MaybeObject* maybe_result = AllocateFixedArray(length);
- if (!maybe_result->ToObject(&result)) return maybe_result;
- }
- Context* context = reinterpret_cast<Context*>(result);
- context->set_map_no_write_barrier(function_context_map());
- context->set_closure(function);
- context->set_previous(function->context());
- context->set_extension(Smi::FromInt(0));
- context->set_global_object(function->context()->global_object());
- return context;
-}
-
-
-MaybeObject* Heap::AllocateCatchContext(JSFunction* function,
- Context* previous,
- String* name,
- Object* thrown_object) {
- STATIC_ASSERT(Context::MIN_CONTEXT_SLOTS == Context::THROWN_OBJECT_INDEX);
- Object* result;
- { MaybeObject* maybe_result =
- AllocateFixedArray(Context::MIN_CONTEXT_SLOTS + 1);
- if (!maybe_result->ToObject(&result)) return maybe_result;
- }
- Context* context = reinterpret_cast<Context*>(result);
- context->set_map_no_write_barrier(catch_context_map());
- context->set_closure(function);
- context->set_previous(previous);
- context->set_extension(name);
- context->set_global_object(previous->global_object());
- context->set(Context::THROWN_OBJECT_INDEX, thrown_object);
- return context;
-}
-
-
-MaybeObject* Heap::AllocateWithContext(JSFunction* function,
- Context* previous,
- JSReceiver* extension) {
- Object* result;
- { MaybeObject* maybe_result = AllocateFixedArray(Context::MIN_CONTEXT_SLOTS);
- if (!maybe_result->ToObject(&result)) return maybe_result;
- }
- Context* context = reinterpret_cast<Context*>(result);
- context->set_map_no_write_barrier(with_context_map());
- context->set_closure(function);
- context->set_previous(previous);
- context->set_extension(extension);
- context->set_global_object(previous->global_object());
- return context;
-}
-
-
-MaybeObject* Heap::AllocateBlockContext(JSFunction* function,
- Context* previous,
- ScopeInfo* scope_info) {
- Object* result;
- { MaybeObject* maybe_result =
- AllocateFixedArrayWithHoles(scope_info->ContextLength());
- if (!maybe_result->ToObject(&result)) return maybe_result;
- }
- Context* context = reinterpret_cast<Context*>(result);
- context->set_map_no_write_barrier(block_context_map());
- context->set_closure(function);
- context->set_previous(previous);
- context->set_extension(scope_info);
- context->set_global_object(previous->global_object());
- return context;
-}
-
-
-MaybeObject* Heap::AllocateScopeInfo(int length) {
- FixedArray* scope_info;
- MaybeObject* maybe_scope_info = AllocateFixedArray(length, TENURED);
- if (!maybe_scope_info->To(&scope_info)) return maybe_scope_info;
- scope_info->set_map_no_write_barrier(scope_info_map());
- return scope_info;
-}
-
-
-MaybeObject* Heap::AllocateExternal(void* value) {
- Foreign* foreign;
- { MaybeObject* maybe_result = AllocateForeign(static_cast<Address>(value));
- if (!maybe_result->To(&foreign)) return maybe_result;
- }
- JSObject* external;
- { MaybeObject* maybe_result = AllocateJSObjectFromMap(external_map());
- if (!maybe_result->To(&external)) return maybe_result;
- }
- external->SetInternalField(0, foreign);
- return external;
-}
-
-
-MaybeObject* Heap::AllocateStruct(InstanceType type) {
+AllocationResult Heap::AllocateStruct(InstanceType type) {
Map* map;
switch (type) {
#define MAKE_CASE(NAME, Name, name) \
@@ -5729,29 +4205,30 @@ STRUCT_LIST(MAKE_CASE)
#undef MAKE_CASE
default:
UNREACHABLE();
- return Failure::InternalError();
+ return exception();
}
int size = map->instance_size();
AllocationSpace space = SelectSpace(size, OLD_POINTER_SPACE, TENURED);
- Object* result;
- { MaybeObject* maybe_result = Allocate(map, space);
- if (!maybe_result->ToObject(&result)) return maybe_result;
+ Struct* result;
+ { AllocationResult allocation = Allocate(map, space);
+ if (!allocation.To(&result)) return allocation;
}
- Struct::cast(result)->InitializeBody(size);
+ result->InitializeBody(size);
return result;
}
bool Heap::IsHeapIterable() {
return (!old_pointer_space()->was_swept_conservatively() &&
- !old_data_space()->was_swept_conservatively());
+ !old_data_space()->was_swept_conservatively() &&
+ new_space_top_after_last_gc_ == new_space()->top());
}
-void Heap::EnsureHeapIsIterable() {
+void Heap::MakeHeapIterable() {
ASSERT(AllowHeapAllocation::IsAllowed());
if (!IsHeapIterable()) {
- CollectAllGarbage(kMakeHeapIterableMask, "Heap::EnsureHeapIsIterable");
+ CollectAllGarbage(kMakeHeapIterableMask, "Heap::MakeHeapIterable");
}
ASSERT(IsHeapIterable());
}
@@ -5768,7 +4245,8 @@ void Heap::AdvanceIdleIncrementalMarking(intptr_t step_size) {
isolate_->compilation_cache()->Clear();
uncommit = true;
}
- CollectAllGarbage(kNoGCFlags, "idle notification: finalize incremental");
+ CollectAllGarbage(kReduceMemoryFootprintMask,
+ "idle notification: finalize incremental");
mark_sweeps_since_idle_round_started_++;
gc_count_at_last_idle_gc_ = gc_count_;
if (uncommit) {
@@ -5812,7 +4290,7 @@ bool Heap::IdleNotification(int hint) {
return false;
}
- if (!FLAG_incremental_marking || FLAG_expose_gc || Serializer::enabled()) {
+ if (!FLAG_incremental_marking || isolate_->serializer_enabled()) {
return IdleGlobalGC();
}
@@ -5822,17 +4300,8 @@ bool Heap::IdleNotification(int hint) {
// An incremental GC progresses as follows:
// 1. many incremental marking steps,
// 2. one old space mark-sweep-compact,
- // 3. many lazy sweep steps.
// Use mark-sweep-compact events to count incremental GCs in a round.
- if (incremental_marking()->IsStopped()) {
- if (!mark_compact_collector()->AreSweeperThreadsActivated() &&
- !IsSweepingComplete() &&
- !AdvanceSweepers(static_cast<int>(step_size))) {
- return false;
- }
- }
-
if (mark_sweeps_since_idle_round_started_ >= kMaxMarkSweepsInIdleRound) {
if (EnoughGarbageSinceLastIdleRound()) {
StartIdleRound();
@@ -5868,6 +4337,13 @@ bool Heap::IdleNotification(int hint) {
return true;
}
+ // If the IdleNotifcation is called with a large hint we will wait for
+ // the sweepter threads here.
+ if (hint >= kMinHintForFullGC &&
+ mark_compact_collector()->IsConcurrentSweepingInProgress()) {
+ mark_compact_collector()->WaitUntilSweepingCompleted();
+ }
+
return false;
}
@@ -6038,8 +4514,10 @@ bool Heap::InSpace(Address addr, AllocationSpace space) {
return property_cell_space_->Contains(addr);
case LO_SPACE:
return lo_space_->SlowContains(addr);
+ case INVALID_SPACE:
+ break;
}
-
+ UNREACHABLE();
return false;
}
@@ -6047,12 +4525,16 @@ bool Heap::InSpace(Address addr, AllocationSpace space) {
#ifdef VERIFY_HEAP
void Heap::Verify() {
CHECK(HasBeenSetUp());
+ HandleScope scope(isolate());
store_buffer()->Verify();
VerifyPointersVisitor visitor;
IterateRoots(&visitor, VISIT_ONLY_STRONG);
+ VerifySmisVisitor smis_visitor;
+ IterateSmiRoots(&smis_visitor);
+
new_space_.Verify();
old_pointer_space_->Verify(&visitor);
@@ -6069,96 +4551,6 @@ void Heap::Verify() {
#endif
-MaybeObject* Heap::InternalizeUtf8String(Vector<const char> string) {
- Object* result = NULL;
- Object* new_table;
- { MaybeObject* maybe_new_table =
- string_table()->LookupUtf8String(string, &result);
- if (!maybe_new_table->ToObject(&new_table)) return maybe_new_table;
- }
- // Can't use set_string_table because StringTable::cast knows that
- // StringTable is a singleton and checks for identity.
- roots_[kStringTableRootIndex] = new_table;
- ASSERT(result != NULL);
- return result;
-}
-
-
-MaybeObject* Heap::InternalizeOneByteString(Vector<const uint8_t> string) {
- Object* result = NULL;
- Object* new_table;
- { MaybeObject* maybe_new_table =
- string_table()->LookupOneByteString(string, &result);
- if (!maybe_new_table->ToObject(&new_table)) return maybe_new_table;
- }
- // Can't use set_string_table because StringTable::cast knows that
- // StringTable is a singleton and checks for identity.
- roots_[kStringTableRootIndex] = new_table;
- ASSERT(result != NULL);
- return result;
-}
-
-
-MaybeObject* Heap::InternalizeOneByteString(Handle<SeqOneByteString> string,
- int from,
- int length) {
- Object* result = NULL;
- Object* new_table;
- { MaybeObject* maybe_new_table =
- string_table()->LookupSubStringOneByteString(string,
- from,
- length,
- &result);
- if (!maybe_new_table->ToObject(&new_table)) return maybe_new_table;
- }
- // Can't use set_string_table because StringTable::cast knows that
- // StringTable is a singleton and checks for identity.
- roots_[kStringTableRootIndex] = new_table;
- ASSERT(result != NULL);
- return result;
-}
-
-
-MaybeObject* Heap::InternalizeTwoByteString(Vector<const uc16> string) {
- Object* result = NULL;
- Object* new_table;
- { MaybeObject* maybe_new_table =
- string_table()->LookupTwoByteString(string, &result);
- if (!maybe_new_table->ToObject(&new_table)) return maybe_new_table;
- }
- // Can't use set_string_table because StringTable::cast knows that
- // StringTable is a singleton and checks for identity.
- roots_[kStringTableRootIndex] = new_table;
- ASSERT(result != NULL);
- return result;
-}
-
-
-MaybeObject* Heap::InternalizeString(String* string) {
- if (string->IsInternalizedString()) return string;
- Object* result = NULL;
- Object* new_table;
- { MaybeObject* maybe_new_table =
- string_table()->LookupString(string, &result);
- if (!maybe_new_table->ToObject(&new_table)) return maybe_new_table;
- }
- // Can't use set_string_table because StringTable::cast knows that
- // StringTable is a singleton and checks for identity.
- roots_[kStringTableRootIndex] = new_table;
- ASSERT(result != NULL);
- return result;
-}
-
-
-bool Heap::InternalizeStringIfExists(String* string, String** result) {
- if (string->IsInternalizedString()) {
- *result = string;
- return true;
- }
- return string_table()->LookupStringIfExists(string, result);
-}
-
-
void Heap::ZapFromSpace() {
NewSpacePageIterator it(new_space_.FromSpaceStart(),
new_space_.FromSpaceEnd());
@@ -6394,6 +4786,14 @@ void Heap::IterateWeakRoots(ObjectVisitor* v, VisitMode mode) {
}
+void Heap::IterateSmiRoots(ObjectVisitor* v) {
+ // Acquire execution access since we are going to read stack limit values.
+ ExecutionAccess access(isolate());
+ v->VisitPointers(&roots_[kSmiRootsStart], &roots_[kRootListLength]);
+ v->Synchronize(VisitorSynchronization::kSmiRootList);
+}
+
+
void Heap::IterateStrongRoots(ObjectVisitor* v, VisitMode mode) {
v->VisitPointers(&roots_[0], &roots_[kStrongRootListLength]);
v->Synchronize(VisitorSynchronization::kStrongRootList);
@@ -6408,12 +4808,9 @@ void Heap::IterateStrongRoots(ObjectVisitor* v, VisitMode mode) {
Relocatable::Iterate(isolate_, v);
v->Synchronize(VisitorSynchronization::kRelocatable);
-#ifdef ENABLE_DEBUGGER_SUPPORT
- isolate_->debug()->Iterate(v);
if (isolate_->deoptimizer_data() != NULL) {
isolate_->deoptimizer_data()->Iterate(v);
}
-#endif
v->Synchronize(VisitorSynchronization::kDebug);
isolate_->compilation_cache()->Iterate(v);
v->Synchronize(VisitorSynchronization::kCompilationCache);
@@ -6477,25 +4874,37 @@ void Heap::IterateStrongRoots(ObjectVisitor* v, VisitMode mode) {
// TODO(1236194): Since the heap size is configurable on the command line
// and through the API, we should gracefully handle the case that the heap
// size is not big enough to fit all the initial objects.
-bool Heap::ConfigureHeap(int max_semispace_size,
- intptr_t max_old_gen_size,
- intptr_t max_executable_size) {
+bool Heap::ConfigureHeap(int max_semi_space_size,
+ int max_old_space_size,
+ int max_executable_size,
+ size_t code_range_size) {
if (HasBeenSetUp()) return false;
- if (FLAG_stress_compaction) {
- // This will cause more frequent GCs when stressing.
- max_semispace_size_ = Page::kPageSize;
+ // Overwrite default configuration.
+ if (max_semi_space_size > 0) {
+ max_semi_space_size_ = max_semi_space_size * MB;
+ }
+ if (max_old_space_size > 0) {
+ max_old_generation_size_ = max_old_space_size * MB;
+ }
+ if (max_executable_size > 0) {
+ max_executable_size_ = max_executable_size * MB;
}
- if (max_semispace_size > 0) {
- if (max_semispace_size < Page::kPageSize) {
- max_semispace_size = Page::kPageSize;
- if (FLAG_trace_gc) {
- PrintPID("Max semispace size cannot be less than %dkbytes\n",
- Page::kPageSize >> 10);
- }
- }
- max_semispace_size_ = max_semispace_size;
+ // If max space size flags are specified overwrite the configuration.
+ if (FLAG_max_semi_space_size > 0) {
+ max_semi_space_size_ = FLAG_max_semi_space_size * MB;
+ }
+ if (FLAG_max_old_space_size > 0) {
+ max_old_generation_size_ = FLAG_max_old_space_size * MB;
+ }
+ if (FLAG_max_executable_size > 0) {
+ max_executable_size_ = FLAG_max_executable_size * MB;
+ }
+
+ if (FLAG_stress_compaction) {
+ // This will cause more frequent GCs when stressing.
+ max_semi_space_size_ = Page::kPageSize;
}
if (Snapshot::IsEnabled()) {
@@ -6504,22 +4913,17 @@ bool Heap::ConfigureHeap(int max_semispace_size,
// write-barrier code that relies on the size and alignment of new
// space. We therefore cannot use a larger max semispace size
// than the default reserved semispace size.
- if (max_semispace_size_ > reserved_semispace_size_) {
- max_semispace_size_ = reserved_semispace_size_;
+ if (max_semi_space_size_ > reserved_semispace_size_) {
+ max_semi_space_size_ = reserved_semispace_size_;
if (FLAG_trace_gc) {
- PrintPID("Max semispace size cannot be more than %dkbytes\n",
+ PrintPID("Max semi-space size cannot be more than %d kbytes\n",
reserved_semispace_size_ >> 10);
}
}
} else {
// If we are not using snapshots we reserve space for the actual
// max semispace size.
- reserved_semispace_size_ = max_semispace_size_;
- }
-
- if (max_old_gen_size > 0) max_old_generation_size_ = max_old_gen_size;
- if (max_executable_size > 0) {
- max_executable_size_ = RoundUp(max_executable_size, Page::kPageSize);
+ reserved_semispace_size_ = max_semi_space_size_;
}
// The max executable size must be less than or equal to the max old
@@ -6530,38 +4934,45 @@ bool Heap::ConfigureHeap(int max_semispace_size,
// The new space size must be a power of two to support single-bit testing
// for containment.
- max_semispace_size_ = RoundUpToPowerOf2(max_semispace_size_);
+ max_semi_space_size_ = RoundUpToPowerOf2(max_semi_space_size_);
reserved_semispace_size_ = RoundUpToPowerOf2(reserved_semispace_size_);
- initial_semispace_size_ = Min(initial_semispace_size_, max_semispace_size_);
- // The external allocation limit should be below 256 MB on all architectures
- // to avoid unnecessary low memory notifications, as that is the threshold
- // for some embedders.
- external_allocation_limit_ = 12 * max_semispace_size_;
- ASSERT(external_allocation_limit_ <= 256 * MB);
+ if (FLAG_min_semi_space_size > 0) {
+ int initial_semispace_size = FLAG_min_semi_space_size * MB;
+ if (initial_semispace_size > max_semi_space_size_) {
+ initial_semispace_size_ = max_semi_space_size_;
+ if (FLAG_trace_gc) {
+ PrintPID("Min semi-space size cannot be more than the maximum"
+ "semi-space size of %d MB\n", max_semi_space_size_);
+ }
+ } else {
+ initial_semispace_size_ = initial_semispace_size;
+ }
+ }
+
+ initial_semispace_size_ = Min(initial_semispace_size_, max_semi_space_size_);
// The old generation is paged and needs at least one page for each space.
int paged_space_count = LAST_PAGED_SPACE - FIRST_PAGED_SPACE + 1;
- max_old_generation_size_ = Max(static_cast<intptr_t>(paged_space_count *
- Page::kPageSize),
- RoundUp(max_old_generation_size_,
- Page::kPageSize));
+ max_old_generation_size_ =
+ Max(static_cast<intptr_t>(paged_space_count * Page::kPageSize),
+ max_old_generation_size_);
// We rely on being able to allocate new arrays in paged spaces.
- ASSERT(MaxRegularSpaceAllocationSize() >=
+ ASSERT(Page::kMaxRegularHeapObjectSize >=
(JSArray::kSize +
FixedArray::SizeFor(JSObject::kInitialMaxFastElementArray) +
AllocationMemento::kSize));
+ code_range_size_ = code_range_size * MB;
+
configured_ = true;
return true;
}
bool Heap::ConfigureHeapDefault() {
- return ConfigureHeap(static_cast<intptr_t>(FLAG_max_new_space_size / 2) * KB,
- static_cast<intptr_t>(FLAG_max_old_space_size) * MB,
- static_cast<intptr_t>(FLAG_max_executable_size) * MB);
+ return ConfigureHeap(0, 0, 0, 0);
}
@@ -6615,14 +5026,6 @@ intptr_t Heap::PromotedSpaceSizeOfObjects() {
}
-bool Heap::AdvanceSweepers(int step_size) {
- ASSERT(isolate()->num_sweeper_threads() == 0);
- bool sweeping_complete = old_data_space()->AdvanceSweeper(step_size);
- sweeping_complete &= old_pointer_space()->AdvanceSweeper(step_size);
- return sweeping_complete;
-}
-
-
int64_t Heap::PromotedExternalMemorySize() {
if (amount_of_external_allocated_memory_
<= amount_of_external_allocated_memory_at_last_global_gc_) return 0;
@@ -6631,8 +5034,49 @@ int64_t Heap::PromotedExternalMemorySize() {
}
+intptr_t Heap::OldGenerationAllocationLimit(intptr_t old_gen_size,
+ int freed_global_handles) {
+ const int kMaxHandles = 1000;
+ const int kMinHandles = 100;
+ double min_factor = 1.1;
+ double max_factor = 4;
+ // We set the old generation growing factor to 2 to grow the heap slower on
+ // memory-constrained devices.
+ if (max_old_generation_size_ <= kMaxOldSpaceSizeMediumMemoryDevice) {
+ max_factor = 2;
+ }
+ // If there are many freed global handles, then the next full GC will
+ // likely collect a lot of garbage. Choose the heap growing factor
+ // depending on freed global handles.
+ // TODO(ulan, hpayer): Take into account mutator utilization.
+ double factor;
+ if (freed_global_handles <= kMinHandles) {
+ factor = max_factor;
+ } else if (freed_global_handles >= kMaxHandles) {
+ factor = min_factor;
+ } else {
+ // Compute factor using linear interpolation between points
+ // (kMinHandles, max_factor) and (kMaxHandles, min_factor).
+ factor = max_factor -
+ (freed_global_handles - kMinHandles) * (max_factor - min_factor) /
+ (kMaxHandles - kMinHandles);
+ }
+
+ if (FLAG_stress_compaction ||
+ mark_compact_collector()->reduce_memory_footprint_) {
+ factor = min_factor;
+ }
+
+ intptr_t limit = static_cast<intptr_t>(old_gen_size * factor);
+ limit = Max(limit, kMinimumOldGenerationAllocationLimit);
+ limit += new_space_.Capacity();
+ intptr_t halfway_to_the_max = (old_gen_size + max_old_generation_size_) / 2;
+ return Min(limit, halfway_to_the_max);
+}
+
+
void Heap::EnableInlineAllocation() {
- ASSERT(inline_allocation_disabled_);
+ if (!inline_allocation_disabled_) return;
inline_allocation_disabled_ = false;
// Update inline allocation limit for new space.
@@ -6641,7 +5085,7 @@ void Heap::EnableInlineAllocation() {
void Heap::DisableInlineAllocation() {
- ASSERT(!inline_allocation_disabled_);
+ if (inline_allocation_disabled_) return;
inline_allocation_disabled_ = true;
// Update inline allocation limit for new space.
@@ -6683,7 +5127,7 @@ bool Heap::SetUp() {
if (!ConfigureHeapDefault()) return false;
}
- CallOnce(&initialize_gc_once, &InitializeGCOnce);
+ base::CallOnce(&initialize_gc_once, &InitializeGCOnce);
MarkMapPointersAsEncoded(false);
@@ -6692,9 +5136,10 @@ bool Heap::SetUp() {
return false;
// Set up new space.
- if (!new_space_.SetUp(reserved_semispace_size_, max_semispace_size_)) {
+ if (!new_space_.SetUp(reserved_semispace_size_, max_semi_space_size_)) {
return false;
}
+ new_space_top_after_last_gc_ = new_space()->top();
// Initialize old pointer space.
old_pointer_space_ =
@@ -6714,16 +5159,10 @@ bool Heap::SetUp() {
if (old_data_space_ == NULL) return false;
if (!old_data_space_->SetUp()) return false;
+ if (!isolate_->code_range()->SetUp(code_range_size_)) return false;
+
// Initialize the code space, set its maximum capacity to the old
// generation size. It needs executable memory.
- // On 64-bit platform(s), we put all code objects in a 2 GB range of
- // virtual address space, so that they can call each other with near calls.
- if (code_range_size_ > 0) {
- if (!isolate_->code_range()->SetUp(code_range_size_)) {
- return false;
- }
- }
-
code_space_ =
new OldSpace(this, max_old_generation_size_, CODE_SPACE, EXECUTABLE);
if (code_space_ == NULL) return false;
@@ -6768,7 +5207,7 @@ bool Heap::SetUp() {
store_buffer()->SetUp();
- if (FLAG_concurrent_recompilation) relocation_mutex_ = new Mutex;
+ mark_compact_collector()->SetUp();
return true;
}
@@ -6777,14 +5216,15 @@ bool Heap::SetUp() {
bool Heap::CreateHeapObjects() {
// Create initial maps.
if (!CreateInitialMaps()) return false;
- if (!CreateApiObjects()) return false;
+ CreateApiObjects();
// Create initial objects
- if (!CreateInitialObjects()) return false;
+ CreateInitialObjects();
+ CHECK_EQ(0, gc_count_);
- native_contexts_list_ = undefined_value();
- array_buffers_list_ = undefined_value();
- allocation_sites_list_ = undefined_value();
+ set_native_contexts_list(undefined_value());
+ set_array_buffers_list(undefined_value());
+ set_allocation_sites_list(undefined_value());
weak_object_to_code_table_ = undefined_value();
return true;
}
@@ -6911,9 +5351,6 @@ void Heap::TearDown() {
incremental_marking()->TearDown();
isolate_->memory_allocator()->TearDown();
-
- delete relocation_mutex_;
- relocation_mutex_ = NULL;
}
@@ -6961,24 +5398,27 @@ void Heap::RemoveGCEpilogueCallback(v8::Isolate::GCEpilogueCallback callback) {
}
-MaybeObject* Heap::AddWeakObjectToCodeDependency(Object* obj,
- DependentCode* dep) {
- ASSERT(!InNewSpace(obj));
- ASSERT(!InNewSpace(dep));
- MaybeObject* maybe_obj =
- WeakHashTable::cast(weak_object_to_code_table_)->Put(obj, dep);
- WeakHashTable* table;
- if (!maybe_obj->To(&table)) return maybe_obj;
- if (ShouldZapGarbage() && weak_object_to_code_table_ != table) {
+// TODO(ishell): Find a better place for this.
+void Heap::AddWeakObjectToCodeDependency(Handle<Object> obj,
+ Handle<DependentCode> dep) {
+ ASSERT(!InNewSpace(*obj));
+ ASSERT(!InNewSpace(*dep));
+ // This handle scope keeps the table handle local to this function, which
+ // allows us to safely skip write barriers in table update operations.
+ HandleScope scope(isolate());
+ Handle<WeakHashTable> table(WeakHashTable::cast(weak_object_to_code_table_),
+ isolate());
+ table = WeakHashTable::Put(table, obj, dep);
+
+ if (ShouldZapGarbage() && weak_object_to_code_table_ != *table) {
WeakHashTable::cast(weak_object_to_code_table_)->Zap(the_hole_value());
}
- set_weak_object_to_code_table(table);
- ASSERT_EQ(dep, WeakHashTable::cast(weak_object_to_code_table_)->Lookup(obj));
- return weak_object_to_code_table_;
+ set_weak_object_to_code_table(*table);
+ ASSERT_EQ(*dep, table->Lookup(obj));
}
-DependentCode* Heap::LookupWeakObjectToCodeDependency(Object* obj) {
+DependentCode* Heap::LookupWeakObjectToCodeDependency(Handle<Object> obj) {
Object* dep = WeakHashTable::cast(weak_object_to_code_table_)->Lookup(obj);
if (dep->IsDependentCode()) return DependentCode::cast(dep);
return DependentCode::cast(empty_fixed_array());
@@ -6987,11 +5427,16 @@ DependentCode* Heap::LookupWeakObjectToCodeDependency(Object* obj) {
void Heap::EnsureWeakObjectToCodeTable() {
if (!weak_object_to_code_table()->IsHashTable()) {
- set_weak_object_to_code_table(*isolate()->factory()->NewWeakHashTable(16));
+ set_weak_object_to_code_table(*WeakHashTable::New(
+ isolate(), 16, USE_DEFAULT_MINIMUM_CAPACITY, TENURED));
}
}
+void Heap::FatalProcessOutOfMemory(const char* location, bool take_snapshot) {
+ v8::internal::V8::FatalProcessOutOfMemory(location, take_snapshot);
+}
+
#ifdef DEBUG
class PrintHandleVisitor: public ObjectVisitor {
@@ -7218,7 +5663,9 @@ class UnreachableObjectsFilter : public HeapObjectsFilter {
HeapIterator::HeapIterator(Heap* heap)
- : heap_(heap),
+ : make_heap_iterable_helper_(heap),
+ no_heap_allocation_(),
+ heap_(heap),
filtering_(HeapIterator::kNoFiltering),
filter_(NULL) {
Init();
@@ -7227,7 +5674,9 @@ HeapIterator::HeapIterator(Heap* heap)
HeapIterator::HeapIterator(Heap* heap,
HeapIterator::HeapObjectsFiltering filtering)
- : heap_(heap),
+ : make_heap_iterable_helper_(heap),
+ no_heap_allocation_(),
+ heap_(heap),
filtering_(filtering),
filter_(NULL) {
Init();
@@ -7388,9 +5837,8 @@ void PathTracer::MarkRecursively(Object** p, MarkVisitor* mark_visitor) {
HeapObject* obj = HeapObject::cast(*p);
- Object* map = obj->map();
-
- if (!map->IsHeapObject()) return; // visited before
+ MapWord map_word = obj->map_word();
+ if (!map_word.ToMap()->IsHeapObject()) return; // visited before
if (found_target_in_trace_) return; // stop if target found
object_stack_.Add(obj);
@@ -7404,11 +5852,11 @@ void PathTracer::MarkRecursively(Object** p, MarkVisitor* mark_visitor) {
bool is_native_context = SafeIsNativeContext(obj);
// not visited yet
- Map* map_p = reinterpret_cast<Map*>(HeapObject::cast(map));
-
- Address map_addr = map_p->address();
+ Map* map = Map::cast(map_word.ToMap());
- obj->set_map_no_write_barrier(reinterpret_cast<Map*>(map_addr + kMarkTag));
+ MapWord marked_map_word =
+ MapWord::FromRawValue(obj->map_word().ToRawValue() + kMarkTag);
+ obj->set_map_word(marked_map_word);
// Scan the object body.
if (is_native_context && (visit_mode_ == VISIT_ONLY_STRONG)) {
@@ -7419,17 +5867,16 @@ void PathTracer::MarkRecursively(Object** p, MarkVisitor* mark_visitor) {
Context::kHeaderSize + Context::FIRST_WEAK_SLOT * kPointerSize);
mark_visitor->VisitPointers(start, end);
} else {
- obj->IterateBody(map_p->instance_type(),
- obj->SizeFromMap(map_p),
- mark_visitor);
+ obj->IterateBody(map->instance_type(), obj->SizeFromMap(map), mark_visitor);
}
// Scan the map after the body because the body is a lot more interesting
// when doing leak detection.
- MarkRecursively(&map, mark_visitor);
+ MarkRecursively(reinterpret_cast<Object**>(&map), mark_visitor);
- if (!found_target_in_trace_) // don't pop if found the target
+ if (!found_target_in_trace_) { // don't pop if found the target
object_stack_.RemoveLast();
+ }
}
@@ -7438,25 +5885,18 @@ void PathTracer::UnmarkRecursively(Object** p, UnmarkVisitor* unmark_visitor) {
HeapObject* obj = HeapObject::cast(*p);
- Object* map = obj->map();
-
- if (map->IsHeapObject()) return; // unmarked already
-
- Address map_addr = reinterpret_cast<Address>(map);
-
- map_addr -= kMarkTag;
+ MapWord map_word = obj->map_word();
+ if (map_word.ToMap()->IsHeapObject()) return; // unmarked already
- ASSERT_TAG_ALIGNED(map_addr);
+ MapWord unmarked_map_word =
+ MapWord::FromRawValue(map_word.ToRawValue() - kMarkTag);
+ obj->set_map_word(unmarked_map_word);
- HeapObject* map_p = HeapObject::FromAddress(map_addr);
+ Map* map = Map::cast(unmarked_map_word.ToMap());
- obj->set_map_no_write_barrier(reinterpret_cast<Map*>(map_p));
+ UnmarkRecursively(reinterpret_cast<Object**>(&map), unmark_visitor);
- UnmarkRecursively(reinterpret_cast<Object**>(&map_p), unmark_visitor);
-
- obj->IterateBody(Map::cast(map_p)->instance_type(),
- obj->SizeFromMap(Map::cast(map_p)),
- unmark_visitor);
+ obj->IterateBody(map->instance_type(), obj->SizeFromMap(map), unmark_visitor);
}
@@ -7528,7 +5968,6 @@ GCTracer::GCTracer(Heap* heap,
full_gc_count_(0),
allocated_since_last_gc_(0),
spent_in_mutator_(0),
- promoted_objects_size_(0),
nodes_died_in_new_space_(0),
nodes_copied_in_new_space_(0),
nodes_promoted_(0),
@@ -7651,8 +6090,9 @@ GCTracer::~GCTracer() {
PrintF("external=%.1f ", scopes_[Scope::EXTERNAL]);
PrintF("mark=%.1f ", scopes_[Scope::MC_MARK]);
- PrintF("sweep=%.1f ", scopes_[Scope::MC_SWEEP]);
- PrintF("sweepns=%.1f ", scopes_[Scope::MC_SWEEP_NEWSPACE]);
+ PrintF("sweep=%.2f ", scopes_[Scope::MC_SWEEP]);
+ PrintF("sweepns=%.2f ", scopes_[Scope::MC_SWEEP_NEWSPACE]);
+ PrintF("sweepos=%.2f ", scopes_[Scope::MC_SWEEP_OLDSPACE]);
PrintF("evacuate=%.1f ", scopes_[Scope::MC_EVACUATE_PAGES]);
PrintF("new_new=%.1f ", scopes_[Scope::MC_UPDATE_NEW_TO_NEW_POINTERS]);
PrintF("root_new=%.1f ", scopes_[Scope::MC_UPDATE_ROOT_TO_NEW_POINTERS]);
@@ -7674,10 +6114,14 @@ GCTracer::~GCTracer() {
PrintF("holes_size_after=%" V8_PTR_PREFIX "d ", CountTotalHolesSize(heap_));
PrintF("allocated=%" V8_PTR_PREFIX "d ", allocated_since_last_gc_);
- PrintF("promoted=%" V8_PTR_PREFIX "d ", promoted_objects_size_);
+ PrintF("promoted=%" V8_PTR_PREFIX "d ", heap_->promoted_objects_size_);
+ PrintF("semi_space_copied=%" V8_PTR_PREFIX "d ",
+ heap_->semi_space_copied_object_size_);
PrintF("nodes_died_in_new=%d ", nodes_died_in_new_space_);
PrintF("nodes_copied_in_new=%d ", nodes_copied_in_new_space_);
PrintF("nodes_promoted=%d ", nodes_promoted_);
+ PrintF("promotion_rate=%.1f%% ", heap_->promotion_rate_);
+ PrintF("semi_space_copy_rate=%.1f%% ", heap_->semi_space_copied_rate_);
if (collector_ == SCAVENGER) {
PrintF("stepscount=%d ", steps_count_since_last_gc_);
@@ -7706,19 +6150,21 @@ const char* GCTracer::CollectorString() {
}
-int KeyedLookupCache::Hash(Map* map, Name* name) {
+int KeyedLookupCache::Hash(Handle<Map> map, Handle<Name> name) {
+ DisallowHeapAllocation no_gc;
// Uses only lower 32 bits if pointers are larger.
uintptr_t addr_hash =
- static_cast<uint32_t>(reinterpret_cast<uintptr_t>(map)) >> kMapHashShift;
+ static_cast<uint32_t>(reinterpret_cast<uintptr_t>(*map)) >> kMapHashShift;
return static_cast<uint32_t>((addr_hash ^ name->Hash()) & kCapacityMask);
}
-int KeyedLookupCache::Lookup(Map* map, Name* name) {
+int KeyedLookupCache::Lookup(Handle<Map> map, Handle<Name> name) {
+ DisallowHeapAllocation no_gc;
int index = (Hash(map, name) & kHashMask);
for (int i = 0; i < kEntriesPerBucket; i++) {
Key& key = keys_[index + i];
- if ((key.map == map) && key.name->Equals(name)) {
+ if ((key.map == *map) && key.name->Equals(*name)) {
return field_offsets_[index + i];
}
}
@@ -7726,18 +6172,20 @@ int KeyedLookupCache::Lookup(Map* map, Name* name) {
}
-void KeyedLookupCache::Update(Map* map, Name* name, int field_offset) {
+void KeyedLookupCache::Update(Handle<Map> map,
+ Handle<Name> name,
+ int field_offset) {
+ DisallowHeapAllocation no_gc;
if (!name->IsUniqueName()) {
- String* internalized_string;
- if (!map->GetIsolate()->heap()->InternalizeStringIfExists(
- String::cast(name), &internalized_string)) {
+ if (!StringTable::InternalizeStringIfExists(name->GetIsolate(),
+ Handle<String>::cast(name)).
+ ToHandle(&name)) {
return;
}
- name = internalized_string;
}
// This cache is cleared only between mark compact passes, so we expect the
// cache to only contain old space names.
- ASSERT(!map->GetIsolate()->heap()->InNewSpace(name));
+ ASSERT(!map->GetIsolate()->heap()->InNewSpace(*name));
int index = (Hash(map, name) & kHashMask);
// After a GC there will be free slots, so we use them in order (this may
@@ -7746,8 +6194,8 @@ void KeyedLookupCache::Update(Map* map, Name* name, int field_offset) {
Key& key = keys_[index];
Object* free_entry_indicator = NULL;
if (key.map == free_entry_indicator) {
- key.map = map;
- key.name = name;
+ key.map = *map;
+ key.name = *name;
field_offsets_[index + i] = field_offset;
return;
}
@@ -7763,8 +6211,8 @@ void KeyedLookupCache::Update(Map* map, Name* name, int field_offset) {
// Write the new first entry.
Key& key = keys_[index];
- key.map = map;
- key.name = name;
+ key.map = *map;
+ key.name = *name;
field_offsets_[index] = field_offset;
}
@@ -7779,39 +6227,6 @@ void DescriptorLookupCache::Clear() {
}
-#ifdef DEBUG
-void Heap::GarbageCollectionGreedyCheck() {
- ASSERT(FLAG_gc_greedy);
- if (isolate_->bootstrapper()->IsActive()) return;
- if (disallow_allocation_failure()) return;
- CollectGarbage(NEW_SPACE);
-}
-#endif
-
-
-TranscendentalCache::SubCache::SubCache(Isolate* isolate, Type t)
- : type_(t),
- isolate_(isolate) {
- uint32_t in0 = 0xffffffffu; // Bit-pattern for a NaN that isn't
- uint32_t in1 = 0xffffffffu; // generated by the FPU.
- for (int i = 0; i < kCacheSize; i++) {
- elements_[i].in[0] = in0;
- elements_[i].in[1] = in1;
- elements_[i].output = NULL;
- }
-}
-
-
-void TranscendentalCache::Clear() {
- for (int i = 0; i < kNumberOfCaches; i++) {
- if (caches_[i] != NULL) {
- delete caches_[i];
- caches_[i] = NULL;
- }
- }
-}
-
-
void ExternalStringTable::CleanUp() {
int last = 0;
for (int i = 0; i < new_space_strings_.length(); ++i) {
@@ -7994,8 +6409,8 @@ void Heap::CheckpointObjectStats() {
CODE_AGE_LIST_COMPLETE(ADJUST_LAST_TIME_OBJECT_COUNT)
#undef ADJUST_LAST_TIME_OBJECT_COUNT
- OS::MemCopy(object_counts_last_time_, object_counts_, sizeof(object_counts_));
- OS::MemCopy(object_sizes_last_time_, object_sizes_, sizeof(object_sizes_));
+ MemCopy(object_counts_last_time_, object_counts_, sizeof(object_counts_));
+ MemCopy(object_sizes_last_time_, object_sizes_, sizeof(object_sizes_));
ClearObjectStats();
}
diff --git a/chromium/v8/src/heap.h b/chromium/v8/src/heap.h
index 1c8e0e16e60..d05e3502518 100644
--- a/chromium/v8/src/heap.h
+++ b/chromium/v8/src/heap.h
@@ -1,47 +1,23 @@
// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
#ifndef V8_HEAP_H_
#define V8_HEAP_H_
#include <cmath>
-#include "allocation.h"
-#include "assert-scope.h"
-#include "globals.h"
-#include "incremental-marking.h"
-#include "list.h"
-#include "mark-compact.h"
-#include "objects-visiting.h"
-#include "spaces.h"
-#include "splay-tree-inl.h"
-#include "store-buffer.h"
-#include "v8-counters.h"
-#include "v8globals.h"
+#include "src/allocation.h"
+#include "src/assert-scope.h"
+#include "src/counters.h"
+#include "src/globals.h"
+#include "src/incremental-marking.h"
+#include "src/list.h"
+#include "src/mark-compact.h"
+#include "src/objects-visiting.h"
+#include "src/spaces.h"
+#include "src/splay-tree-inl.h"
+#include "src/store-buffer.h"
namespace v8 {
namespace internal {
@@ -60,6 +36,7 @@ namespace internal {
V(Oddball, true_value, TrueValue) \
V(Oddball, false_value, FalseValue) \
V(Oddball, uninitialized_value, UninitializedValue) \
+ V(Oddball, exception, Exception) \
V(Map, cell_map, CellMap) \
V(Map, global_property_cell_map, GlobalPropertyCellMap) \
V(Map, shared_function_info_map, SharedFunctionInfoMap) \
@@ -72,12 +49,13 @@ namespace internal {
V(Map, fixed_cow_array_map, FixedCOWArrayMap) \
V(Map, fixed_double_array_map, FixedDoubleArrayMap) \
V(Map, constant_pool_array_map, ConstantPoolArrayMap) \
- V(Object, no_interceptor_result_sentinel, NoInterceptorResultSentinel) \
+ V(Oddball, no_interceptor_result_sentinel, NoInterceptorResultSentinel) \
V(Map, hash_table_map, HashTableMap) \
+ V(Map, ordered_hash_table_map, OrderedHashTableMap) \
V(FixedArray, empty_fixed_array, EmptyFixedArray) \
V(ByteArray, empty_byte_array, EmptyByteArray) \
V(DescriptorArray, empty_descriptor_array, EmptyDescriptorArray) \
- V(Smi, stack_limit, StackLimit) \
+ V(ConstantPoolArray, empty_constant_pool_array, EmptyConstantPoolArray) \
V(Oddball, arguments_marker, ArgumentsMarker) \
/* The roots above this line should be boring from a GC point of view. */ \
/* This means they are never in new space and never on a page that is */ \
@@ -89,7 +67,7 @@ namespace internal {
V(FixedArray, single_character_string_cache, SingleCharacterStringCache) \
V(FixedArray, string_split_cache, StringSplitCache) \
V(FixedArray, regexp_multiple_cache, RegExpMultipleCache) \
- V(Object, termination_exception, TerminationException) \
+ V(Oddball, termination_exception, TerminationException) \
V(Smi, hash_seed, HashSeed) \
V(Map, symbol_map, SymbolMap) \
V(Map, string_map, StringMap) \
@@ -109,8 +87,6 @@ namespace internal {
ShortExternalStringWithOneByteDataMap) \
V(Map, internalized_string_map, InternalizedStringMap) \
V(Map, ascii_internalized_string_map, AsciiInternalizedStringMap) \
- V(Map, cons_internalized_string_map, ConsInternalizedStringMap) \
- V(Map, cons_ascii_internalized_string_map, ConsAsciiInternalizedStringMap) \
V(Map, \
external_internalized_string_map, \
ExternalInternalizedStringMap) \
@@ -132,37 +108,64 @@ namespace internal {
V(Map, short_external_ascii_string_map, ShortExternalAsciiStringMap) \
V(Map, undetectable_string_map, UndetectableStringMap) \
V(Map, undetectable_ascii_string_map, UndetectableAsciiStringMap) \
- V(Map, external_byte_array_map, ExternalByteArrayMap) \
- V(Map, external_unsigned_byte_array_map, ExternalUnsignedByteArrayMap) \
- V(Map, external_short_array_map, ExternalShortArrayMap) \
- V(Map, external_unsigned_short_array_map, ExternalUnsignedShortArrayMap) \
- V(Map, external_int_array_map, ExternalIntArrayMap) \
- V(Map, external_unsigned_int_array_map, ExternalUnsignedIntArrayMap) \
- V(Map, external_float_array_map, ExternalFloatArrayMap) \
- V(Map, external_double_array_map, ExternalDoubleArrayMap) \
- V(Map, external_pixel_array_map, ExternalPixelArrayMap) \
- V(ExternalArray, empty_external_byte_array, \
- EmptyExternalByteArray) \
- V(ExternalArray, empty_external_unsigned_byte_array, \
- EmptyExternalUnsignedByteArray) \
- V(ExternalArray, empty_external_short_array, EmptyExternalShortArray) \
- V(ExternalArray, empty_external_unsigned_short_array, \
- EmptyExternalUnsignedShortArray) \
- V(ExternalArray, empty_external_int_array, EmptyExternalIntArray) \
- V(ExternalArray, empty_external_unsigned_int_array, \
- EmptyExternalUnsignedIntArray) \
- V(ExternalArray, empty_external_float_array, EmptyExternalFloatArray) \
- V(ExternalArray, empty_external_double_array, EmptyExternalDoubleArray) \
- V(ExternalArray, empty_external_pixel_array, \
- EmptyExternalPixelArray) \
- V(Map, non_strict_arguments_elements_map, NonStrictArgumentsElementsMap) \
+ V(Map, external_int8_array_map, ExternalInt8ArrayMap) \
+ V(Map, external_uint8_array_map, ExternalUint8ArrayMap) \
+ V(Map, external_int16_array_map, ExternalInt16ArrayMap) \
+ V(Map, external_uint16_array_map, ExternalUint16ArrayMap) \
+ V(Map, external_int32_array_map, ExternalInt32ArrayMap) \
+ V(Map, external_uint32_array_map, ExternalUint32ArrayMap) \
+ V(Map, external_float32_array_map, ExternalFloat32ArrayMap) \
+ V(Map, external_float64_array_map, ExternalFloat64ArrayMap) \
+ V(Map, external_uint8_clamped_array_map, ExternalUint8ClampedArrayMap) \
+ V(ExternalArray, empty_external_int8_array, \
+ EmptyExternalInt8Array) \
+ V(ExternalArray, empty_external_uint8_array, \
+ EmptyExternalUint8Array) \
+ V(ExternalArray, empty_external_int16_array, EmptyExternalInt16Array) \
+ V(ExternalArray, empty_external_uint16_array, \
+ EmptyExternalUint16Array) \
+ V(ExternalArray, empty_external_int32_array, EmptyExternalInt32Array) \
+ V(ExternalArray, empty_external_uint32_array, \
+ EmptyExternalUint32Array) \
+ V(ExternalArray, empty_external_float32_array, EmptyExternalFloat32Array) \
+ V(ExternalArray, empty_external_float64_array, EmptyExternalFloat64Array) \
+ V(ExternalArray, empty_external_uint8_clamped_array, \
+ EmptyExternalUint8ClampedArray) \
+ V(Map, fixed_uint8_array_map, FixedUint8ArrayMap) \
+ V(Map, fixed_int8_array_map, FixedInt8ArrayMap) \
+ V(Map, fixed_uint16_array_map, FixedUint16ArrayMap) \
+ V(Map, fixed_int16_array_map, FixedInt16ArrayMap) \
+ V(Map, fixed_uint32_array_map, FixedUint32ArrayMap) \
+ V(Map, fixed_int32_array_map, FixedInt32ArrayMap) \
+ V(Map, fixed_float32_array_map, FixedFloat32ArrayMap) \
+ V(Map, fixed_float64_array_map, FixedFloat64ArrayMap) \
+ V(Map, fixed_uint8_clamped_array_map, FixedUint8ClampedArrayMap) \
+ V(FixedTypedArrayBase, empty_fixed_uint8_array, EmptyFixedUint8Array) \
+ V(FixedTypedArrayBase, empty_fixed_int8_array, EmptyFixedInt8Array) \
+ V(FixedTypedArrayBase, empty_fixed_uint16_array, EmptyFixedUint16Array) \
+ V(FixedTypedArrayBase, empty_fixed_int16_array, EmptyFixedInt16Array) \
+ V(FixedTypedArrayBase, empty_fixed_uint32_array, EmptyFixedUint32Array) \
+ V(FixedTypedArrayBase, empty_fixed_int32_array, EmptyFixedInt32Array) \
+ V(FixedTypedArrayBase, empty_fixed_float32_array, EmptyFixedFloat32Array) \
+ V(FixedTypedArrayBase, empty_fixed_float64_array, EmptyFixedFloat64Array) \
+ V(FixedTypedArrayBase, empty_fixed_uint8_clamped_array, \
+ EmptyFixedUint8ClampedArray) \
+ V(Map, sloppy_arguments_elements_map, SloppyArgumentsElementsMap) \
V(Map, function_context_map, FunctionContextMap) \
V(Map, catch_context_map, CatchContextMap) \
V(Map, with_context_map, WithContextMap) \
V(Map, block_context_map, BlockContextMap) \
V(Map, module_context_map, ModuleContextMap) \
V(Map, global_context_map, GlobalContextMap) \
- V(Map, oddball_map, OddballMap) \
+ V(Map, undefined_map, UndefinedMap) \
+ V(Map, the_hole_map, TheHoleMap) \
+ V(Map, null_map, NullMap) \
+ V(Map, boolean_map, BooleanMap) \
+ V(Map, uninitialized_map, UninitializedMap) \
+ V(Map, arguments_marker_map, ArgumentsMarkerMap) \
+ V(Map, no_interceptor_result_sentinel_map, NoInterceptorResultSentinelMap) \
+ V(Map, exception_map, ExceptionMap) \
+ V(Map, termination_exception_map, TerminationExceptionMap) \
V(Map, message_object_map, JSMessageObjectMap) \
V(Map, foreign_map, ForeignMap) \
V(HeapNumber, nan_value, NanValue) \
@@ -176,26 +179,89 @@ namespace internal {
V(Code, js_entry_code, JsEntryCode) \
V(Code, js_construct_entry_code, JsConstructEntryCode) \
V(FixedArray, natives_source_cache, NativesSourceCache) \
- V(Smi, last_script_id, LastScriptId) \
V(Script, empty_script, EmptyScript) \
- V(Smi, real_stack_limit, RealStackLimit) \
V(NameDictionary, intrinsic_function_names, IntrinsicFunctionNames) \
- V(Smi, arguments_adaptor_deopt_pc_offset, ArgumentsAdaptorDeoptPCOffset) \
- V(Smi, construct_stub_deopt_pc_offset, ConstructStubDeoptPCOffset) \
- V(Smi, getter_stub_deopt_pc_offset, GetterStubDeoptPCOffset) \
- V(Smi, setter_stub_deopt_pc_offset, SetterStubDeoptPCOffset) \
+ V(Cell, undefined_cell, UndefineCell) \
V(JSObject, observation_state, ObservationState) \
V(Map, external_map, ExternalMap) \
+ V(Object, symbol_registry, SymbolRegistry) \
V(Symbol, frozen_symbol, FrozenSymbol) \
+ V(Symbol, nonexistent_symbol, NonExistentSymbol) \
V(Symbol, elements_transition_symbol, ElementsTransitionSymbol) \
V(SeededNumberDictionary, empty_slow_element_dictionary, \
EmptySlowElementDictionary) \
- V(Symbol, observed_symbol, ObservedSymbol)
+ V(Symbol, observed_symbol, ObservedSymbol) \
+ V(Symbol, uninitialized_symbol, UninitializedSymbol) \
+ V(Symbol, megamorphic_symbol, MegamorphicSymbol) \
+ V(FixedArray, materialized_objects, MaterializedObjects) \
+ V(FixedArray, allocation_sites_scratchpad, AllocationSitesScratchpad) \
+ V(FixedArray, microtask_queue, MicrotaskQueue)
+
+// Entries in this list are limited to Smis and are not visited during GC.
+#define SMI_ROOT_LIST(V) \
+ V(Smi, stack_limit, StackLimit) \
+ V(Smi, real_stack_limit, RealStackLimit) \
+ V(Smi, last_script_id, LastScriptId) \
+ V(Smi, arguments_adaptor_deopt_pc_offset, ArgumentsAdaptorDeoptPCOffset) \
+ V(Smi, construct_stub_deopt_pc_offset, ConstructStubDeoptPCOffset) \
+ V(Smi, getter_stub_deopt_pc_offset, GetterStubDeoptPCOffset) \
+ V(Smi, setter_stub_deopt_pc_offset, SetterStubDeoptPCOffset)
#define ROOT_LIST(V) \
STRONG_ROOT_LIST(V) \
+ SMI_ROOT_LIST(V) \
V(StringTable, string_table, StringTable)
+// Heap roots that are known to be immortal immovable, for which we can safely
+// skip write barriers.
+#define IMMORTAL_IMMOVABLE_ROOT_LIST(V) \
+ V(byte_array_map) \
+ V(free_space_map) \
+ V(one_pointer_filler_map) \
+ V(two_pointer_filler_map) \
+ V(undefined_value) \
+ V(the_hole_value) \
+ V(null_value) \
+ V(true_value) \
+ V(false_value) \
+ V(uninitialized_value) \
+ V(cell_map) \
+ V(global_property_cell_map) \
+ V(shared_function_info_map) \
+ V(meta_map) \
+ V(heap_number_map) \
+ V(native_context_map) \
+ V(fixed_array_map) \
+ V(code_map) \
+ V(scope_info_map) \
+ V(fixed_cow_array_map) \
+ V(fixed_double_array_map) \
+ V(constant_pool_array_map) \
+ V(no_interceptor_result_sentinel) \
+ V(hash_table_map) \
+ V(ordered_hash_table_map) \
+ V(empty_fixed_array) \
+ V(empty_byte_array) \
+ V(empty_descriptor_array) \
+ V(empty_constant_pool_array) \
+ V(arguments_marker) \
+ V(symbol_map) \
+ V(sloppy_arguments_elements_map) \
+ V(function_context_map) \
+ V(catch_context_map) \
+ V(with_context_map) \
+ V(block_context_map) \
+ V(module_context_map) \
+ V(global_context_map) \
+ V(undefined_map) \
+ V(the_hole_map) \
+ V(null_map) \
+ V(boolean_map) \
+ V(uninitialized_map) \
+ V(message_object_map) \
+ V(foreign_map) \
+ V(neander_map)
+
#define INTERNALIZED_STRING_LIST(V) \
V(Array_string, "Array") \
V(Object_string, "Object") \
@@ -211,6 +277,7 @@ namespace internal {
V(constructor_string, "constructor") \
V(dot_result_string, ".result") \
V(dot_for_string, ".for.") \
+ V(dot_iterable_string, ".iterable") \
V(dot_iterator_string, ".iterator") \
V(dot_generator_object_string, ".generator_object") \
V(eval_string, "eval") \
@@ -239,6 +306,11 @@ namespace internal {
V(String_string, "String") \
V(symbol_string, "symbol") \
V(Symbol_string, "Symbol") \
+ V(for_string, "for") \
+ V(for_api_string, "for_api") \
+ V(for_intern_string, "for_intern") \
+ V(private_api_string, "private_api") \
+ V(private_intern_string, "private_intern") \
V(Date_string, "Date") \
V(this_string, "this") \
V(to_string_string, "toString") \
@@ -255,7 +327,6 @@ namespace internal {
"KeyedStoreElementMonomorphic") \
V(stack_overflow_string, "kStackOverflowBoilerplate") \
V(illegal_access_string, "illegal access") \
- V(illegal_execution_state_string, "illegal execution state") \
V(get_string, "get") \
V(set_string, "set") \
V(map_field_string, "%map") \
@@ -267,15 +338,7 @@ namespace internal {
V(MakeReferenceError_string, "MakeReferenceError") \
V(MakeSyntaxError_string, "MakeSyntaxError") \
V(MakeTypeError_string, "MakeTypeError") \
- V(invalid_lhs_in_assignment_string, "invalid_lhs_in_assignment") \
- V(invalid_lhs_in_for_in_string, "invalid_lhs_in_for_in") \
- V(invalid_lhs_in_postfix_op_string, "invalid_lhs_in_postfix_op") \
- V(invalid_lhs_in_prefix_op_string, "invalid_lhs_in_prefix_op") \
- V(illegal_return_string, "illegal_return") \
- V(illegal_break_string, "illegal_break") \
- V(illegal_continue_string, "illegal_continue") \
V(unknown_label_string, "unknown_label") \
- V(redeclaration_string, "redeclaration") \
V(space_string, " ") \
V(exec_string, "exec") \
V(zero_string, "0") \
@@ -298,7 +361,9 @@ namespace internal {
V(next_string, "next") \
V(byte_length_string, "byteLength") \
V(byte_offset_string, "byteOffset") \
- V(buffer_string, "buffer")
+ V(buffer_string, "buffer") \
+ V(intl_initialized_marker_string, "v8::intl_initialized_marker") \
+ V(intl_impl_object_string, "v8::intl_object")
// Forward declarations.
class GCTracer;
@@ -451,7 +516,7 @@ class ExternalStringTable {
void TearDown();
private:
- ExternalStringTable() { }
+ explicit ExternalStringTable(Heap* heap) : heap_(heap) { }
friend class Heap;
@@ -481,11 +546,12 @@ enum ArrayStorageAllocationMode {
class Heap {
public:
- // Configure heap size before setup. Return false if the heap has been
+ // Configure heap size in MB before setup. Return false if the heap has been
// set up already.
- bool ConfigureHeap(int max_semispace_size,
- intptr_t max_old_gen_size,
- intptr_t max_executable_size);
+ bool ConfigureHeap(int max_semi_space_size,
+ int max_old_space_size,
+ int max_executable_size,
+ size_t code_range_size);
bool ConfigureHeapDefault();
// Prepares the heap, setting up memory areas that are needed in the isolate
@@ -515,12 +581,11 @@ class Heap {
intptr_t MaxReserved() {
return 4 * reserved_semispace_size_ + max_old_generation_size_;
}
- int MaxSemiSpaceSize() { return max_semispace_size_; }
+ int MaxSemiSpaceSize() { return max_semi_space_size_; }
int ReservedSemiSpaceSize() { return reserved_semispace_size_; }
int InitialSemiSpaceSize() { return initial_semispace_size_; }
intptr_t MaxOldGenerationSize() { return max_old_generation_size_; }
intptr_t MaxExecutableSize() { return max_executable_size_; }
- int MaxRegularSpaceAllocationSize() { return InitialSemiSpaceSize() * 4/5; }
// Returns the capacity of the heap in bytes w/o growing. Heap grows when
// more spaces are needed until it reaches the limit.
@@ -617,146 +682,11 @@ class Heap {
return old_data_space_->allocation_limit_address();
}
- // Allocates and initializes a new JavaScript object based on a
- // constructor.
- // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
- // failed.
- // Please note this does not perform a garbage collection.
- MUST_USE_RESULT MaybeObject* AllocateJSObject(
- JSFunction* constructor,
- PretenureFlag pretenure = NOT_TENURED);
-
- MUST_USE_RESULT MaybeObject* AllocateJSObjectWithAllocationSite(
- JSFunction* constructor,
- Handle<AllocationSite> allocation_site);
-
- MUST_USE_RESULT MaybeObject* AllocateJSModule(Context* context,
- ScopeInfo* scope_info);
-
- // Allocate a JSArray with no elements
- MUST_USE_RESULT MaybeObject* AllocateEmptyJSArray(
- ElementsKind elements_kind,
- PretenureFlag pretenure = NOT_TENURED) {
- return AllocateJSArrayAndStorage(elements_kind, 0, 0,
- DONT_INITIALIZE_ARRAY_ELEMENTS,
- pretenure);
- }
-
- // Allocate a JSArray with a specified length but elements that are left
- // uninitialized.
- MUST_USE_RESULT MaybeObject* AllocateJSArrayAndStorage(
- ElementsKind elements_kind,
- int length,
- int capacity,
- ArrayStorageAllocationMode mode = DONT_INITIALIZE_ARRAY_ELEMENTS,
- PretenureFlag pretenure = NOT_TENURED);
-
- MUST_USE_RESULT MaybeObject* AllocateJSArrayStorage(
- JSArray* array,
- int length,
- int capacity,
- ArrayStorageAllocationMode mode = DONT_INITIALIZE_ARRAY_ELEMENTS);
-
- // Allocate a JSArray with no elements
- MUST_USE_RESULT MaybeObject* AllocateJSArrayWithElements(
- FixedArrayBase* array_base,
- ElementsKind elements_kind,
- int length,
- PretenureFlag pretenure = NOT_TENURED);
-
// Returns a deep copy of the JavaScript object.
// Properties and elements are copied too.
- // Returns failure if allocation failed.
// Optionally takes an AllocationSite to be appended in an AllocationMemento.
- MUST_USE_RESULT MaybeObject* CopyJSObject(JSObject* source,
- AllocationSite* site = NULL);
-
- // Allocates a JS ArrayBuffer object.
- // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
- // failed.
- // Please note this does not perform a garbage collection.
- MUST_USE_RESULT MaybeObject* AllocateJSArrayBuffer();
-
- // Allocates a Harmony proxy or function proxy.
- // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
- // failed.
- // Please note this does not perform a garbage collection.
- MUST_USE_RESULT MaybeObject* AllocateJSProxy(Object* handler,
- Object* prototype);
-
- MUST_USE_RESULT MaybeObject* AllocateJSFunctionProxy(Object* handler,
- Object* call_trap,
- Object* construct_trap,
- Object* prototype);
-
- // Reinitialize a JSReceiver into an (empty) JS object of respective type and
- // size, but keeping the original prototype. The receiver must have at least
- // the size of the new object. The object is reinitialized and behaves as an
- // object that has been freshly allocated.
- // Returns failure if an error occured, otherwise object.
- MUST_USE_RESULT MaybeObject* ReinitializeJSReceiver(JSReceiver* object,
- InstanceType type,
- int size);
-
- // Reinitialize an JSGlobalProxy based on a constructor. The object
- // must have the same size as objects allocated using the
- // constructor. The object is reinitialized and behaves as an
- // object that has been freshly allocated using the constructor.
- MUST_USE_RESULT MaybeObject* ReinitializeJSGlobalProxy(
- JSFunction* constructor, JSGlobalProxy* global);
-
- // Allocates and initializes a new JavaScript object based on a map.
- // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
- // failed.
- // Please note this does not perform a garbage collection.
- MUST_USE_RESULT MaybeObject* AllocateJSObjectFromMap(
- Map* map, PretenureFlag pretenure = NOT_TENURED, bool alloc_props = true);
-
- MUST_USE_RESULT MaybeObject* AllocateJSObjectFromMapWithAllocationSite(
- Map* map, Handle<AllocationSite> allocation_site);
-
- // Allocates a heap object based on the map.
- // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
- // failed.
- // Please note this function does not perform a garbage collection.
- MUST_USE_RESULT MaybeObject* Allocate(Map* map, AllocationSpace space);
-
- MUST_USE_RESULT MaybeObject* AllocateWithAllocationSite(Map* map,
- AllocationSpace space, Handle<AllocationSite> allocation_site);
-
- // Allocates a JS Map in the heap.
- // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
- // failed.
- // Please note this function does not perform a garbage collection.
- MUST_USE_RESULT MaybeObject* AllocateMap(
- InstanceType instance_type,
- int instance_size,
- ElementsKind elements_kind = TERMINAL_FAST_ELEMENTS_KIND);
-
- // Allocates a partial map for bootstrapping.
- MUST_USE_RESULT MaybeObject* AllocatePartialMap(InstanceType instance_type,
- int instance_size);
-
- // Allocates an empty code cache.
- MUST_USE_RESULT MaybeObject* AllocateCodeCache();
-
- // Allocates a serialized scope info.
- MUST_USE_RESULT MaybeObject* AllocateScopeInfo(int length);
-
- // Allocates an External object for v8's external API.
- MUST_USE_RESULT MaybeObject* AllocateExternal(void* value);
-
- // Allocates an empty PolymorphicCodeCache.
- MUST_USE_RESULT MaybeObject* AllocatePolymorphicCodeCache();
-
- // Allocates a pre-tenured empty AccessorPair.
- MUST_USE_RESULT MaybeObject* AllocateAccessorPair();
-
- // Allocates an empty TypeFeedbackInfo.
- MUST_USE_RESULT MaybeObject* AllocateTypeFeedbackInfo();
-
- // Allocates an AliasedArgumentsEntry.
- MUST_USE_RESULT MaybeObject* AllocateAliasedArgumentsEntry(int slot);
+ MUST_USE_RESULT AllocationResult CopyJSObject(JSObject* source,
+ AllocationSite* site = NULL);
// Clear the Instanceof cache (used when a prototype changes).
inline void ClearInstanceofCache();
@@ -767,407 +697,38 @@ class Heap {
// For use during bootup.
void RepairFreeListsAfterBoot();
- // Allocates and fully initializes a String. There are two String
- // encodings: ASCII and two byte. One should choose between the three string
- // allocation functions based on the encoding of the string buffer used to
- // initialized the string.
- // - ...FromAscii initializes the string from a buffer that is ASCII
- // encoded (it does not check that the buffer is ASCII encoded) and the
- // result will be ASCII encoded.
- // - ...FromUTF8 initializes the string from a buffer that is UTF-8
- // encoded. If the characters are all single-byte characters, the
- // result will be ASCII encoded, otherwise it will converted to two
- // byte.
- // - ...FromTwoByte initializes the string from a buffer that is two-byte
- // encoded. If the characters are all single-byte characters, the
- // result will be converted to ASCII, otherwise it will be left as
- // two-byte.
- // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
- // failed.
- // Please note this does not perform a garbage collection.
- MUST_USE_RESULT MaybeObject* AllocateStringFromOneByte(
- Vector<const uint8_t> str,
- PretenureFlag pretenure = NOT_TENURED);
- // TODO(dcarney): remove this function.
- MUST_USE_RESULT inline MaybeObject* AllocateStringFromOneByte(
- Vector<const char> str,
- PretenureFlag pretenure = NOT_TENURED) {
- return AllocateStringFromOneByte(Vector<const uint8_t>::cast(str),
- pretenure);
- }
- MUST_USE_RESULT inline MaybeObject* AllocateStringFromUtf8(
- Vector<const char> str,
- PretenureFlag pretenure = NOT_TENURED);
- MUST_USE_RESULT MaybeObject* AllocateStringFromUtf8Slow(
- Vector<const char> str,
- int non_ascii_start,
- PretenureFlag pretenure = NOT_TENURED);
- MUST_USE_RESULT MaybeObject* AllocateStringFromTwoByte(
- Vector<const uc16> str,
- PretenureFlag pretenure = NOT_TENURED);
-
- // Allocates an internalized string in old space based on the character
- // stream. Returns Failure::RetryAfterGC(requested_bytes, space) if the
- // allocation failed.
- // Please note this function does not perform a garbage collection.
- MUST_USE_RESULT inline MaybeObject* AllocateInternalizedStringFromUtf8(
- Vector<const char> str,
- int chars,
- uint32_t hash_field);
-
- MUST_USE_RESULT inline MaybeObject* AllocateOneByteInternalizedString(
- Vector<const uint8_t> str,
- uint32_t hash_field);
-
- MUST_USE_RESULT inline MaybeObject* AllocateTwoByteInternalizedString(
- Vector<const uc16> str,
- uint32_t hash_field);
-
template<typename T>
static inline bool IsOneByte(T t, int chars);
- template<typename T>
- MUST_USE_RESULT inline MaybeObject* AllocateInternalizedStringImpl(
- T t, int chars, uint32_t hash_field);
-
- template<bool is_one_byte, typename T>
- MUST_USE_RESULT MaybeObject* AllocateInternalizedStringImpl(
- T t, int chars, uint32_t hash_field);
-
- // Allocates and partially initializes a String. There are two String
- // encodings: ASCII and two byte. These functions allocate a string of the
- // given length and set its map and length fields. The characters of the
- // string are uninitialized.
- // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
- // failed.
- // Please note this does not perform a garbage collection.
- MUST_USE_RESULT MaybeObject* AllocateRawOneByteString(
- int length,
- PretenureFlag pretenure = NOT_TENURED);
- MUST_USE_RESULT MaybeObject* AllocateRawTwoByteString(
- int length,
- PretenureFlag pretenure = NOT_TENURED);
-
- // Computes a single character string where the character has code.
- // A cache is used for ASCII codes.
- // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
- // failed. Please note this does not perform a garbage collection.
- MUST_USE_RESULT MaybeObject* LookupSingleCharacterStringFromCode(
- uint16_t code);
-
- // Allocate a byte array of the specified length
- // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
- // failed.
- // Please note this does not perform a garbage collection.
- MUST_USE_RESULT MaybeObject* AllocateByteArray(
- int length,
- PretenureFlag pretenure = NOT_TENURED);
-
- // Allocates an external array of the specified length and type.
- // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
- // failed.
- // Please note this does not perform a garbage collection.
- MUST_USE_RESULT MaybeObject* AllocateExternalArray(
- int length,
- ExternalArrayType array_type,
- void* external_pointer,
- PretenureFlag pretenure);
-
- // Allocate a symbol in old space.
- // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
- // failed.
- // Please note this does not perform a garbage collection.
- MUST_USE_RESULT MaybeObject* AllocateSymbol();
- MUST_USE_RESULT MaybeObject* AllocatePrivateSymbol();
-
- // Allocate a tenured AllocationSite. It's payload is null
- MUST_USE_RESULT MaybeObject* AllocateAllocationSite();
-
- // Allocates a fixed array initialized with undefined values
- // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
- // failed.
- // Please note this does not perform a garbage collection.
- MUST_USE_RESULT MaybeObject* AllocateFixedArray(
- int length,
- PretenureFlag pretenure = NOT_TENURED);
-
- // Allocates an uninitialized fixed array. It must be filled by the caller.
- //
- // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
- // failed.
- // Please note this does not perform a garbage collection.
- MUST_USE_RESULT MaybeObject* AllocateUninitializedFixedArray(int length);
-
// Move len elements within a given array from src_index index to dst_index
// index.
void MoveElements(FixedArray* array, int dst_index, int src_index, int len);
- // Make a copy of src and return it. Returns
- // Failure::RetryAfterGC(requested_bytes, space) if the allocation failed.
- MUST_USE_RESULT inline MaybeObject* CopyFixedArray(FixedArray* src);
-
- // Make a copy of src, set the map, and return the copy. Returns
- // Failure::RetryAfterGC(requested_bytes, space) if the allocation failed.
- MUST_USE_RESULT MaybeObject* CopyFixedArrayWithMap(FixedArray* src, Map* map);
-
- // Make a copy of src and return it. Returns
- // Failure::RetryAfterGC(requested_bytes, space) if the allocation failed.
- MUST_USE_RESULT inline MaybeObject* CopyFixedDoubleArray(
- FixedDoubleArray* src);
-
- // Make a copy of src, set the map, and return the copy. Returns
- // Failure::RetryAfterGC(requested_bytes, space) if the allocation failed.
- MUST_USE_RESULT MaybeObject* CopyFixedDoubleArrayWithMap(
- FixedDoubleArray* src, Map* map);
-
- // Make a copy of src and return it. Returns
- // Failure::RetryAfterGC(requested_bytes, space) if the allocation failed.
- MUST_USE_RESULT inline MaybeObject* CopyConstantPoolArray(
- ConstantPoolArray* src);
-
- // Make a copy of src, set the map, and return the copy. Returns
- // Failure::RetryAfterGC(requested_bytes, space) if the allocation failed.
- MUST_USE_RESULT MaybeObject* CopyConstantPoolArrayWithMap(
- ConstantPoolArray* src, Map* map);
-
- // Allocates a fixed array initialized with the hole values.
- // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
- // failed.
- // Please note this does not perform a garbage collection.
- MUST_USE_RESULT MaybeObject* AllocateFixedArrayWithHoles(
- int length,
- PretenureFlag pretenure = NOT_TENURED);
-
- MUST_USE_RESULT MaybeObject* AllocateConstantPoolArray(
- int first_int64_index,
- int first_ptr_index,
- int first_int32_index);
-
- // Allocates a fixed double array with uninitialized values. Returns
- // Failure::RetryAfterGC(requested_bytes, space) if the allocation failed.
- // Please note this does not perform a garbage collection.
- MUST_USE_RESULT MaybeObject* AllocateUninitializedFixedDoubleArray(
- int length,
- PretenureFlag pretenure = NOT_TENURED);
-
- // Allocates a fixed double array with hole values. Returns
- // Failure::RetryAfterGC(requested_bytes, space) if the allocation failed.
- // Please note this does not perform a garbage collection.
- MUST_USE_RESULT MaybeObject* AllocateFixedDoubleArrayWithHoles(
- int length,
- PretenureFlag pretenure = NOT_TENURED);
-
- // AllocateHashTable is identical to AllocateFixedArray except
- // that the resulting object has hash_table_map as map.
- MUST_USE_RESULT MaybeObject* AllocateHashTable(
- int length, PretenureFlag pretenure = NOT_TENURED);
-
- // Allocate a native (but otherwise uninitialized) context.
- MUST_USE_RESULT MaybeObject* AllocateNativeContext();
-
- // Allocate a global context.
- MUST_USE_RESULT MaybeObject* AllocateGlobalContext(JSFunction* function,
- ScopeInfo* scope_info);
-
- // Allocate a module context.
- MUST_USE_RESULT MaybeObject* AllocateModuleContext(ScopeInfo* scope_info);
-
- // Allocate a function context.
- MUST_USE_RESULT MaybeObject* AllocateFunctionContext(int length,
- JSFunction* function);
-
- // Allocate a catch context.
- MUST_USE_RESULT MaybeObject* AllocateCatchContext(JSFunction* function,
- Context* previous,
- String* name,
- Object* thrown_object);
- // Allocate a 'with' context.
- MUST_USE_RESULT MaybeObject* AllocateWithContext(JSFunction* function,
- Context* previous,
- JSReceiver* extension);
-
- // Allocate a block context.
- MUST_USE_RESULT MaybeObject* AllocateBlockContext(JSFunction* function,
- Context* previous,
- ScopeInfo* info);
-
- // Allocates a new utility object in the old generation.
- MUST_USE_RESULT MaybeObject* AllocateStruct(InstanceType type);
-
- // Allocates a function initialized with a shared part.
- // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
- // failed.
- // Please note this does not perform a garbage collection.
- MUST_USE_RESULT MaybeObject* AllocateFunction(
- Map* function_map,
- SharedFunctionInfo* shared,
- Object* prototype,
- PretenureFlag pretenure = TENURED);
-
- // Arguments object size.
- static const int kArgumentsObjectSize =
+ // Sloppy mode arguments object size.
+ static const int kSloppyArgumentsObjectSize =
JSObject::kHeaderSize + 2 * kPointerSize;
// Strict mode arguments has no callee so it is smaller.
- static const int kArgumentsObjectSizeStrict =
+ static const int kStrictArgumentsObjectSize =
JSObject::kHeaderSize + 1 * kPointerSize;
// Indicies for direct access into argument objects.
static const int kArgumentsLengthIndex = 0;
- // callee is only valid in non-strict mode.
+ // callee is only valid in sloppy mode.
static const int kArgumentsCalleeIndex = 1;
- // Allocates an arguments object - optionally with an elements array.
- // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
- // failed.
- // Please note this does not perform a garbage collection.
- MUST_USE_RESULT MaybeObject* AllocateArgumentsObject(
- Object* callee, int length);
-
- // Same as NewNumberFromDouble, but may return a preallocated/immutable
- // number object (e.g., minus_zero_value_, nan_value_)
- MUST_USE_RESULT MaybeObject* NumberFromDouble(
- double value, PretenureFlag pretenure = NOT_TENURED);
-
- // Allocated a HeapNumber from value.
- MUST_USE_RESULT MaybeObject* AllocateHeapNumber(
- double value, PretenureFlag pretenure = NOT_TENURED);
-
- // Converts an int into either a Smi or a HeapNumber object.
- // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
- // failed.
- // Please note this does not perform a garbage collection.
- MUST_USE_RESULT inline MaybeObject* NumberFromInt32(
- int32_t value, PretenureFlag pretenure = NOT_TENURED);
-
- // Converts an int into either a Smi or a HeapNumber object.
- // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
- // failed.
- // Please note this does not perform a garbage collection.
- MUST_USE_RESULT inline MaybeObject* NumberFromUint32(
- uint32_t value, PretenureFlag pretenure = NOT_TENURED);
-
- // Allocates a new foreign object.
- // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
- // failed.
- // Please note this does not perform a garbage collection.
- MUST_USE_RESULT MaybeObject* AllocateForeign(
- Address address, PretenureFlag pretenure = NOT_TENURED);
-
- // Allocates a new SharedFunctionInfo object.
- // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
- // failed.
- // Please note this does not perform a garbage collection.
- MUST_USE_RESULT MaybeObject* AllocateSharedFunctionInfo(Object* name);
-
- // Allocates a new JSMessageObject object.
- // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
- // failed.
- // Please note that this does not perform a garbage collection.
- MUST_USE_RESULT MaybeObject* AllocateJSMessageObject(
- String* type,
- JSArray* arguments,
- int start_position,
- int end_position,
- Object* script,
- Object* stack_trace,
- Object* stack_frames);
-
- // Allocates a new cons string object.
- // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
- // failed.
- // Please note this does not perform a garbage collection.
- MUST_USE_RESULT MaybeObject* AllocateConsString(String* first,
- String* second);
-
- // Allocates a new sub string object which is a substring of an underlying
- // string buffer stretching from the index start (inclusive) to the index
- // end (exclusive).
- // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
- // failed.
- // Please note this does not perform a garbage collection.
- MUST_USE_RESULT MaybeObject* AllocateSubString(
- String* buffer,
- int start,
- int end,
- PretenureFlag pretenure = NOT_TENURED);
-
- // Allocate a new external string object, which is backed by a string
- // resource that resides outside the V8 heap.
- // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
- // failed.
- // Please note this does not perform a garbage collection.
- MUST_USE_RESULT MaybeObject* AllocateExternalStringFromAscii(
- const ExternalAsciiString::Resource* resource);
- MUST_USE_RESULT MaybeObject* AllocateExternalStringFromTwoByte(
- const ExternalTwoByteString::Resource* resource);
-
// Finalizes an external string by deleting the associated external
// data and clearing the resource pointer.
inline void FinalizeExternalString(String* string);
- // Allocates an uninitialized object. The memory is non-executable if the
- // hardware and OS allow.
- // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
- // failed.
- // Please note this function does not perform a garbage collection.
- MUST_USE_RESULT inline MaybeObject* AllocateRaw(int size_in_bytes,
- AllocationSpace space,
- AllocationSpace retry_space);
-
// Initialize a filler object to keep the ability to iterate over the heap
// when shortening objects.
void CreateFillerObjectAt(Address addr, int size);
- // Makes a new native code object
- // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
- // failed. On success, the pointer to the Code object is stored in the
- // self_reference. This allows generated code to reference its own Code
- // object by containing this pointer.
- // Please note this function does not perform a garbage collection.
- MUST_USE_RESULT MaybeObject* CreateCode(
- const CodeDesc& desc,
- Code::Flags flags,
- Handle<Object> self_reference,
- bool immovable = false,
- bool crankshafted = false,
- int prologue_offset = Code::kPrologueOffsetNotSet);
-
- MUST_USE_RESULT MaybeObject* CopyCode(Code* code);
+ bool CanMoveObjectStart(HeapObject* object);
- // Copy the code and scope info part of the code object, but insert
- // the provided data as the relocation information.
- MUST_USE_RESULT MaybeObject* CopyCode(Code* code, Vector<byte> reloc_info);
-
- // Finds the internalized copy for string in the string table.
- // If not found, a new string is added to the table and returned.
- // Returns Failure::RetryAfterGC(requested_bytes, space) if allocation
- // failed.
- // Please note this function does not perform a garbage collection.
- MUST_USE_RESULT MaybeObject* InternalizeUtf8String(Vector<const char> str);
- MUST_USE_RESULT MaybeObject* InternalizeUtf8String(const char* str) {
- return InternalizeUtf8String(CStrVector(str));
- }
- MUST_USE_RESULT MaybeObject* InternalizeOneByteString(
- Vector<const uint8_t> str);
- MUST_USE_RESULT MaybeObject* InternalizeTwoByteString(Vector<const uc16> str);
- MUST_USE_RESULT MaybeObject* InternalizeString(String* str);
- MUST_USE_RESULT MaybeObject* InternalizeOneByteString(
- Handle<SeqOneByteString> string, int from, int length);
-
- bool InternalizeStringIfExists(String* str, String** result);
- bool InternalizeTwoCharsStringIfExists(String* str, String** result);
-
- // Compute the matching internalized string map for a string if possible.
- // NULL is returned if string is in new space or not flattened.
- Map* InternalizedStringMapForString(String* str);
-
- // Tries to flatten a string before compare operation.
- //
- // Returns a failure in case it was decided that flattening was
- // necessary and failed. Note, if flattening is not necessary the
- // string might stay non-flat even when not a failure is returned.
- //
- // Please note this function does not perform a garbage collection.
- MUST_USE_RESULT inline MaybeObject* PrepareForCompare(String* str);
+ enum InvocationMode { FROM_GC, FROM_MUTATOR };
+
+ // Maintain marking consistency for IncrementalMarking.
+ void AdjustLiveBytes(Address address, int by, InvocationMode mode);
// Converts the given boolean condition to JavaScript boolean value.
inline Object* ToBoolean(bool condition);
@@ -1175,8 +736,10 @@ class Heap {
// Performs garbage collection operation.
// Returns whether there is a chance that another major GC could
// collect more garbage.
- inline bool CollectGarbage(AllocationSpace space,
- const char* gc_reason = NULL);
+ inline bool CollectGarbage(
+ AllocationSpace space,
+ const char* gc_reason = NULL,
+ const GCCallbackFlags gc_callback_flags = kNoGCCallbackFlags);
static const int kNoGCFlags = 0;
static const int kSweepPreciselyMask = 1;
@@ -1191,7 +754,10 @@ class Heap {
// Performs a full garbage collection. If (flags & kMakeHeapIterableMask) is
// non-zero, then the slower precise sweeper is used, which leaves the heap
// in a state where we can iterate over the heap visiting all objects.
- void CollectAllGarbage(int flags, const char* gc_reason = NULL);
+ void CollectAllGarbage(
+ int flags,
+ const char* gc_reason = NULL,
+ const GCCallbackFlags gc_callback_flags = kNoGCCallbackFlags);
// Last hope GC, should try to squeeze as much as possible.
void CollectAllAvailableGarbage(const char* gc_reason = NULL);
@@ -1199,17 +765,9 @@ class Heap {
// Check whether the heap is currently iterable.
bool IsHeapIterable();
- // Ensure that we have swept all spaces in such a way that we can iterate
- // over all objects. May cause a GC.
- void EnsureHeapIsIterable();
-
// Notify the heap that a context has been disposed.
int NotifyContextDisposed();
- // Utility to invoke the scavenger. This is needed in test code to
- // ensure correct callback for weak global handles.
- void PerformScavenge();
-
inline void increment_scan_on_scavenge_pages() {
scan_on_scavenge_pages_++;
if (FLAG_gc_verbose) {
@@ -1226,11 +784,6 @@ class Heap {
PromotionQueue* promotion_queue() { return &promotion_queue_; }
-#ifdef DEBUG
- // Utility used with flag gc-greedy.
- void GarbageCollectionGreedyCheck();
-#endif
-
void AddGCPrologueCallback(v8::Isolate::GCPrologueCallback callback,
GCType gc_type_filter,
bool pass_isolate = true);
@@ -1276,21 +829,30 @@ class Heap {
void set_native_contexts_list(Object* object) {
native_contexts_list_ = object;
}
- Object* native_contexts_list() { return native_contexts_list_; }
+ Object* native_contexts_list() const { return native_contexts_list_; }
void set_array_buffers_list(Object* object) {
array_buffers_list_ = object;
}
- Object* array_buffers_list() { return array_buffers_list_; }
+ Object* array_buffers_list() const { return array_buffers_list_; }
void set_allocation_sites_list(Object* object) {
allocation_sites_list_ = object;
}
Object* allocation_sites_list() { return allocation_sites_list_; }
+
+ // Used in CreateAllocationSiteStub and the (de)serializer.
Object** allocation_sites_list_address() { return &allocation_sites_list_; }
Object* weak_object_to_code_table() { return weak_object_to_code_table_; }
+ void set_encountered_weak_collections(Object* weak_collection) {
+ encountered_weak_collections_ = weak_collection;
+ }
+ Object* encountered_weak_collections() const {
+ return encountered_weak_collections_;
+ }
+
// Number of mark-sweeps.
unsigned int ms_count() { return ms_count_; }
@@ -1298,6 +860,9 @@ class Heap {
void IterateRoots(ObjectVisitor* v, VisitMode mode);
// Iterates over all strong roots in the heap.
void IterateStrongRoots(ObjectVisitor* v, VisitMode mode);
+ // Iterates over entries in the smi roots list. Only interesting to the
+ // serializer/deserializer, since GC does not care about smis.
+ void IterateSmiRoots(ObjectVisitor* v);
// Iterates over all the other roots in the heap.
void IterateWeakRoots(ObjectVisitor* v, VisitMode mode);
@@ -1365,6 +930,10 @@ class Heap {
roots_[kStoreBufferTopRootIndex] = reinterpret_cast<Smi*>(top);
}
+ void public_set_materialized_objects(FixedArray* objects) {
+ roots_[kMaterializedObjectsRootIndex] = objects;
+ }
+
// Generated code can embed this address to get access to the roots.
Object** roots_array_start() { return roots_; }
@@ -1372,11 +941,6 @@ class Heap {
return reinterpret_cast<Address*>(&roots_[kStoreBufferTopRootIndex]);
}
- // Get address of native contexts list for serialization support.
- Object** native_contexts_list_address() {
- return &native_contexts_list_;
- }
-
#ifdef VERIFY_HEAP
// Verify the heap is in its normal state before or after a GC.
void Verify();
@@ -1416,14 +980,6 @@ class Heap {
// Print short heap statistics.
void PrintShortHeapStatistics();
- // Makes a new internalized string object
- // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
- // failed.
- // Please note this function does not perform a garbage collection.
- MUST_USE_RESULT MaybeObject* CreateInternalizedString(
- const char* str, int length, int hash);
- MUST_USE_RESULT MaybeObject* CreateInternalizedString(String* str);
-
// Write barrier support for address[offset] = o.
INLINE(void RecordWrite(Address address, int offset));
@@ -1440,10 +996,6 @@ class Heap {
allocation_timeout_ = timeout;
}
- bool disallow_allocation_failure() {
- return disallow_allocation_failure_;
- }
-
void TracePathToObjectFrom(Object* target, Object* root);
void TracePathToObject(Object* target);
void TracePathToGlobal();
@@ -1456,10 +1008,20 @@ class Heap {
static inline void ScavengePointer(HeapObject** p);
static inline void ScavengeObject(HeapObject** p, HeapObject* object);
+ enum ScratchpadSlotMode {
+ IGNORE_SCRATCHPAD_SLOT,
+ RECORD_SCRATCHPAD_SLOT
+ };
+
+ // If an object has an AllocationMemento trailing it, return it, otherwise
+ // return NULL;
+ inline AllocationMemento* FindAllocationMemento(HeapObject* object);
+
// An object may have an AllocationSite associated with it through a trailing
// AllocationMemento. Its feedback should be updated when objects are found
// in the heap.
- static inline void UpdateAllocationSiteFeedback(HeapObject* object);
+ static inline void UpdateAllocationSiteFeedback(
+ HeapObject* object, ScratchpadSlotMode mode);
// Support for partial snapshots. After calling this we have a linear
// space to write objects in each space.
@@ -1469,35 +1031,7 @@ class Heap {
// Support for the API.
//
- bool CreateApiObjects();
-
- // Attempt to find the number in a small cache. If we finds it, return
- // the string representation of the number. Otherwise return undefined.
- Object* GetNumberStringCache(Object* number);
-
- // Update the cache with a new number-string pair.
- void SetNumberStringCache(Object* number, String* str);
-
- // Adjusts the amount of registered external memory.
- // Returns the adjusted value.
- inline int64_t AdjustAmountOfExternalAllocatedMemory(
- int64_t change_in_bytes);
-
- // This is only needed for testing high promotion mode.
- void SetNewSpaceHighPromotionModeActive(bool mode) {
- new_space_high_promotion_mode_active_ = mode;
- }
-
- // Returns the allocation mode (pre-tenuring) based on observed promotion
- // rates of previous collections.
- inline PretenureFlag GetPretenureMode() {
- return FLAG_pretenuring && new_space_high_promotion_mode_active_
- ? TENURED : NOT_TENURED;
- }
-
- inline Address* NewSpaceHighPromotionModeActiveAddress() {
- return reinterpret_cast<Address*>(&new_space_high_promotion_mode_active_);
- }
+ void CreateApiObjects();
inline intptr_t PromotedTotalSize() {
int64_t total = PromotedSpaceSizeOfObjects() + PromotedExternalMemorySize();
@@ -1517,21 +1051,41 @@ class Heap {
static const intptr_t kMinimumOldGenerationAllocationLimit =
8 * (Page::kPageSize > MB ? Page::kPageSize : MB);
- intptr_t OldGenerationAllocationLimit(intptr_t old_gen_size) {
- const int divisor = FLAG_stress_compaction ? 10 :
- new_space_high_promotion_mode_active_ ? 1 : 3;
- intptr_t limit =
- Max(old_gen_size + old_gen_size / divisor,
- kMinimumOldGenerationAllocationLimit);
- limit += new_space_.Capacity();
- // TODO(hpayer): Can be removed when when pretenuring is supported for all
- // allocation sites.
- if (IsHighSurvivalRate() && IsStableOrIncreasingSurvivalTrend()) {
- limit *= 2;
- }
- intptr_t halfway_to_the_max = (old_gen_size + max_old_generation_size_) / 2;
- return Min(limit, halfway_to_the_max);
- }
+ static const int kPointerMultiplier = i::kPointerSize / 4;
+
+ // The new space size has to be a power of 2. Sizes are in MB.
+ static const int kMaxSemiSpaceSizeLowMemoryDevice =
+ 1 * kPointerMultiplier;
+ static const int kMaxSemiSpaceSizeMediumMemoryDevice =
+ 4 * kPointerMultiplier;
+ static const int kMaxSemiSpaceSizeHighMemoryDevice =
+ 8 * kPointerMultiplier;
+ static const int kMaxSemiSpaceSizeHugeMemoryDevice =
+ 8 * kPointerMultiplier;
+
+ // The old space size has to be a multiple of Page::kPageSize.
+ // Sizes are in MB.
+ static const int kMaxOldSpaceSizeLowMemoryDevice =
+ 128 * kPointerMultiplier;
+ static const int kMaxOldSpaceSizeMediumMemoryDevice =
+ 256 * kPointerMultiplier;
+ static const int kMaxOldSpaceSizeHighMemoryDevice =
+ 512 * kPointerMultiplier;
+ static const int kMaxOldSpaceSizeHugeMemoryDevice =
+ 700 * kPointerMultiplier;
+
+ // The executable size has to be a multiple of Page::kPageSize.
+ // Sizes are in MB.
+ static const int kMaxExecutableSizeLowMemoryDevice = 96 * kPointerMultiplier;
+ static const int kMaxExecutableSizeMediumMemoryDevice =
+ 192 * kPointerMultiplier;
+ static const int kMaxExecutableSizeHighMemoryDevice =
+ 256 * kPointerMultiplier;
+ static const int kMaxExecutableSizeHugeMemoryDevice =
+ 256 * kPointerMultiplier;
+
+ intptr_t OldGenerationAllocationLimit(intptr_t old_gen_size,
+ int freed_global_handles);
// Indicates whether inline bump-pointer allocation has been disabled.
bool inline_allocation_disabled() { return inline_allocation_disabled_; }
@@ -1543,7 +1097,7 @@ class Heap {
// Implements the corresponding V8 API function.
bool IdleNotification(int hint);
- // Declare all the root indices.
+ // Declare all the root indices. This defines the root list order.
enum RootListIndex {
#define ROOT_INDEX_DECLARATION(type, name, camel_name) k##camel_name##RootIndex,
STRONG_ROOT_LIST(ROOT_INDEX_DECLARATION)
@@ -1559,15 +1113,22 @@ class Heap {
#undef DECLARE_STRUCT_MAP
kStringTableRootIndex,
+
+#define ROOT_INDEX_DECLARATION(type, name, camel_name) k##camel_name##RootIndex,
+ SMI_ROOT_LIST(ROOT_INDEX_DECLARATION)
+#undef ROOT_INDEX_DECLARATION
+
+ kRootListLength,
kStrongRootListLength = kStringTableRootIndex,
- kRootListLength
+ kSmiRootsStart = kStringTableRootIndex + 1
};
- STATIC_CHECK(kUndefinedValueRootIndex == Internals::kUndefinedValueRootIndex);
- STATIC_CHECK(kNullValueRootIndex == Internals::kNullValueRootIndex);
- STATIC_CHECK(kTrueValueRootIndex == Internals::kTrueValueRootIndex);
- STATIC_CHECK(kFalseValueRootIndex == Internals::kFalseValueRootIndex);
- STATIC_CHECK(kempty_stringRootIndex == Internals::kEmptyStringRootIndex);
+ STATIC_ASSERT(kUndefinedValueRootIndex ==
+ Internals::kUndefinedValueRootIndex);
+ STATIC_ASSERT(kNullValueRootIndex == Internals::kNullValueRootIndex);
+ STATIC_ASSERT(kTrueValueRootIndex == Internals::kTrueValueRootIndex);
+ STATIC_ASSERT(kFalseValueRootIndex == Internals::kFalseValueRootIndex);
+ STATIC_ASSERT(kempty_stringRootIndex == Internals::kEmptyStringRootIndex);
// Generated code can embed direct references to non-writable roots if
// they are in new space.
@@ -1575,18 +1136,18 @@ class Heap {
// Generated code can treat direct references to this root as constant.
bool RootCanBeTreatedAsConstant(RootListIndex root_index);
- MUST_USE_RESULT MaybeObject* NumberToString(
- Object* number, bool check_number_string_cache = true,
- PretenureFlag pretenure = NOT_TENURED);
- MUST_USE_RESULT MaybeObject* Uint32ToString(
- uint32_t value, bool check_number_string_cache = true);
+ Map* MapForFixedTypedArray(ExternalArrayType array_type);
+ RootListIndex RootIndexForFixedTypedArray(
+ ExternalArrayType array_type);
Map* MapForExternalArrayType(ExternalArrayType array_type);
RootListIndex RootIndexForExternalArrayType(
ExternalArrayType array_type);
RootListIndex RootIndexForEmptyExternalArray(ElementsKind kind);
+ RootListIndex RootIndexForEmptyFixedTypedArray(ElementsKind kind);
ExternalArray* EmptyExternalArrayForMap(Map* map);
+ FixedTypedArrayBase* EmptyFixedTypedArrayForMap(Map* map);
void RecordStats(HeapStats* stats, bool take_snapshot = false);
@@ -1601,9 +1162,18 @@ class Heap {
// Check new space expansion criteria and expand semispaces if it was hit.
void CheckNewSpaceExpansionCriteria();
+ inline void IncrementPromotedObjectsSize(int object_size) {
+ ASSERT(object_size > 0);
+ promoted_objects_size_ += object_size;
+ }
+
+ inline void IncrementSemiSpaceCopiedObjectSize(int object_size) {
+ ASSERT(object_size > 0);
+ semi_space_copied_object_size_ += object_size;
+ }
+
inline void IncrementYoungSurvivorsCounter(int survived) {
ASSERT(survived >= 0);
- young_survivors_after_last_gc_ = survived;
survived_since_last_expansion_ += survived;
}
@@ -1701,20 +1271,6 @@ class Heap {
return &incremental_marking_;
}
- bool IsSweepingComplete() {
- return !mark_compact_collector()->IsConcurrentSweepingInProgress() &&
- old_data_space()->IsLazySweepingComplete() &&
- old_pointer_space()->IsLazySweepingComplete();
- }
-
- bool AdvanceSweepers(int step_size);
-
- bool EnsureSweepersProgressed(int step_size) {
- bool sweeping_complete = old_data_space()->EnsureSweeperProgress(step_size);
- sweeping_complete &= old_pointer_space()->EnsureSweeperProgress(step_size);
- return sweeping_complete;
- }
-
ExternalStringTable* external_string_table() {
return &external_string_table_;
}
@@ -1727,7 +1283,7 @@ class Heap {
inline Isolate* isolate();
void CallGCPrologueCallbacks(GCType gc_type, GCCallbackFlags flags);
- void CallGCEpilogueCallbacks(GCType gc_type);
+ void CallGCEpilogueCallbacks(GCType gc_type, GCCallbackFlags flags);
inline bool OldGenerationAllocationLimitReached();
@@ -1792,6 +1348,16 @@ class Heap {
return amount_of_external_allocated_memory_;
}
+ void DeoptMarkedAllocationSites();
+
+ bool MaximumSizeScavenge() {
+ return maximum_size_scavenges_ > 0;
+ }
+
+ bool DeoptMaybeTenuredAllocationSites() {
+ return new_space_.IsAtMaximumCapacity() && maximum_size_scavenges_ == 0;
+ }
+
// ObjectStats are kept in two arrays, counts and sizes. Related stats are
// stored in a contiguous linear buffer. Stats groups are stored one after
// another.
@@ -1837,25 +1403,22 @@ class Heap {
class RelocationLock {
public:
explicit RelocationLock(Heap* heap) : heap_(heap) {
- if (FLAG_concurrent_recompilation) {
- heap_->relocation_mutex_->Lock();
- }
+ heap_->relocation_mutex_.Lock();
}
~RelocationLock() {
- if (FLAG_concurrent_recompilation) {
- heap_->relocation_mutex_->Unlock();
- }
+ heap_->relocation_mutex_.Unlock();
}
private:
Heap* heap_;
};
- MaybeObject* AddWeakObjectToCodeDependency(Object* obj, DependentCode* dep);
+ void AddWeakObjectToCodeDependency(Handle<Object> obj,
+ Handle<DependentCode> dep);
- DependentCode* LookupWeakObjectToCodeDependency(Object* obj);
+ DependentCode* LookupWeakObjectToCodeDependency(Handle<Object> obj);
void InitializeWeakObjectToCodeTable() {
set_weak_object_to_code_table(undefined_value());
@@ -1863,18 +1426,80 @@ class Heap {
void EnsureWeakObjectToCodeTable();
+ static void FatalProcessOutOfMemory(const char* location,
+ bool take_snapshot = false);
+
+ protected:
+ // Methods made available to tests.
+
+ // Allocates a JS Map in the heap.
+ MUST_USE_RESULT AllocationResult AllocateMap(
+ InstanceType instance_type,
+ int instance_size,
+ ElementsKind elements_kind = TERMINAL_FAST_ELEMENTS_KIND);
+
+ // Allocates and initializes a new JavaScript object based on a
+ // constructor.
+ // If allocation_site is non-null, then a memento is emitted after the object
+ // that points to the site.
+ MUST_USE_RESULT AllocationResult AllocateJSObject(
+ JSFunction* constructor,
+ PretenureFlag pretenure = NOT_TENURED,
+ AllocationSite* allocation_site = NULL);
+
+ // Allocates and initializes a new JavaScript object based on a map.
+ // Passing an allocation site means that a memento will be created that
+ // points to the site.
+ MUST_USE_RESULT AllocationResult AllocateJSObjectFromMap(
+ Map* map,
+ PretenureFlag pretenure = NOT_TENURED,
+ bool alloc_props = true,
+ AllocationSite* allocation_site = NULL);
+
+ // Allocated a HeapNumber from value.
+ MUST_USE_RESULT AllocationResult AllocateHeapNumber(
+ double value, PretenureFlag pretenure = NOT_TENURED);
+
+ // Allocate a byte array of the specified length
+ MUST_USE_RESULT AllocationResult AllocateByteArray(
+ int length,
+ PretenureFlag pretenure = NOT_TENURED);
+
+ // Allocates an arguments object - optionally with an elements array.
+ MUST_USE_RESULT AllocationResult AllocateArgumentsObject(
+ Object* callee, int length);
+
+ // Copy the code and scope info part of the code object, but insert
+ // the provided data as the relocation information.
+ MUST_USE_RESULT AllocationResult CopyCode(Code* code,
+ Vector<byte> reloc_info);
+
+ MUST_USE_RESULT AllocationResult CopyCode(Code* code);
+
+ // Allocates a fixed array initialized with undefined values
+ MUST_USE_RESULT AllocationResult AllocateFixedArray(
+ int length,
+ PretenureFlag pretenure = NOT_TENURED);
+
private:
Heap();
+ // The amount of external memory registered through the API kept alive
+ // by global handles
+ int64_t amount_of_external_allocated_memory_;
+
+ // Caches the amount of external memory registered at the last global gc.
+ int64_t amount_of_external_allocated_memory_at_last_global_gc_;
+
// This can be calculated directly from a pointer to the heap; however, it is
// more expedient to get at the isolate directly from within Heap methods.
Isolate* isolate_;
Object* roots_[kRootListLength];
- intptr_t code_range_size_;
+ size_t code_range_size_;
int reserved_semispace_size_;
- int max_semispace_size_;
+ int max_semi_space_size_;
int initial_semispace_size_;
intptr_t max_old_generation_size_;
intptr_t max_executable_size_;
@@ -1909,6 +1534,7 @@ class Heap {
LargeObjectSpace* lo_space_;
HeapState gc_state_;
int gc_post_processing_depth_;
+ Address new_space_top_after_last_gc_;
// Returns the amount of external memory registered since last global gc.
int64_t PromotedExternalMemorySize();
@@ -1939,37 +1565,14 @@ class Heap {
// variable holds the value indicating the number of allocations
// remain until the next failure and garbage collection.
int allocation_timeout_;
-
- // Do we expect to be able to handle allocation failure at this
- // time?
- bool disallow_allocation_failure_;
#endif // DEBUG
- // Indicates that the new space should be kept small due to high promotion
- // rates caused by the mutator allocating a lot of long-lived objects.
- // TODO(hpayer): change to bool if no longer accessed from generated code
- intptr_t new_space_high_promotion_mode_active_;
-
// Limit that triggers a global GC on the next (normally caused) GC. This
// is checked when we have already decided to do a GC to help determine
// which collector to invoke, before expanding a paged space in the old
// generation and on every allocation in large object space.
intptr_t old_generation_allocation_limit_;
- // Used to adjust the limits that control the timing of the next GC.
- intptr_t size_of_old_gen_at_last_old_space_gc_;
-
- // Limit on the amount of externally allocated memory allowed
- // between global GCs. If reached a global GC is forced.
- intptr_t external_allocation_limit_;
-
- // The amount of external memory registered through the API kept alive
- // by global handles
- int64_t amount_of_external_allocated_memory_;
-
- // Caches the amount of external memory registered at the last global gc.
- int64_t amount_of_external_allocated_memory_at_last_global_gc_;
-
// Indicates that an allocation has failed in the old generation since the
// last GC.
bool old_gen_exhausted_;
@@ -1989,6 +1592,11 @@ class Heap {
// start.
Object* weak_object_to_code_table_;
+ // List of encountered weak collections (JSWeakMap and JSWeakSet) during
+ // marking. It is initialized during marking, destroyed after marking and
+ // contains Smi(0) while marking is not active.
+ Object* encountered_weak_collections_;
+
StoreBufferRebuilder store_buffer_rebuilder_;
struct StringTypeTable {
@@ -2065,23 +1673,41 @@ class Heap {
void GarbageCollectionPrologue();
void GarbageCollectionEpilogue();
+ // Pretenuring decisions are made based on feedback collected during new
+ // space evacuation. Note that between feedback collection and calling this
+ // method object in old space must not move.
+ // Right now we only process pretenuring feedback in high promotion mode.
+ void ProcessPretenuringFeedback();
+
// Checks whether a global GC is necessary
GarbageCollector SelectGarbageCollector(AllocationSpace space,
const char** reason);
+ // Make sure there is a filler value behind the top of the new space
+ // so that the GC does not confuse some unintialized/stale memory
+ // with the allocation memento of the object at the top
+ void EnsureFillerObjectAtTop();
+
+ // Ensure that we have swept all spaces in such a way that we can iterate
+ // over all objects. May cause a GC.
+ void MakeHeapIterable();
+
// Performs garbage collection operation.
// Returns whether there is a chance that another major GC could
// collect more garbage.
- bool CollectGarbage(AllocationSpace space,
- GarbageCollector collector,
- const char* gc_reason,
- const char* collector_reason);
+ bool CollectGarbage(
+ GarbageCollector collector,
+ const char* gc_reason,
+ const char* collector_reason,
+ const GCCallbackFlags gc_callback_flags = kNoGCCallbackFlags);
// Performs garbage collection
// Returns whether there is a chance another major GC could
// collect more garbage.
- bool PerformGarbageCollection(GarbageCollector collector,
- GCTracer* tracer);
+ bool PerformGarbageCollection(
+ GarbageCollector collector,
+ GCTracer* tracer,
+ const GCCallbackFlags gc_callback_flags = kNoGCCallbackFlags);
inline void UpdateOldSpaceLimits();
@@ -2092,21 +1718,29 @@ class Heap {
PretenureFlag pretenure) {
ASSERT(preferred_old_space == OLD_POINTER_SPACE ||
preferred_old_space == OLD_DATA_SPACE);
- if (object_size > Page::kMaxNonCodeHeapObjectSize) return LO_SPACE;
+ if (object_size > Page::kMaxRegularHeapObjectSize) return LO_SPACE;
return (pretenure == TENURED) ? preferred_old_space : NEW_SPACE;
}
- // Allocate an uninitialized fixed array.
- MUST_USE_RESULT MaybeObject* AllocateRawFixedArray(
- int length, PretenureFlag pretenure);
+ // Allocate an uninitialized object. The memory is non-executable if the
+ // hardware and OS allow. This is the single choke-point for allocations
+ // performed by the runtime and should not be bypassed (to extend this to
+ // inlined allocations, use the Heap::DisableInlineAllocation() support).
+ MUST_USE_RESULT inline AllocationResult AllocateRaw(
+ int size_in_bytes,
+ AllocationSpace space,
+ AllocationSpace retry_space);
- // Allocate an uninitialized fixed double array.
- MUST_USE_RESULT MaybeObject* AllocateRawFixedDoubleArray(
- int length, PretenureFlag pretenure);
+ // Allocates a heap object based on the map.
+ MUST_USE_RESULT AllocationResult Allocate(
+ Map* map,
+ AllocationSpace space,
+ AllocationSite* allocation_site = NULL);
- // Allocate an initialized fixed array with the given filler value.
- MUST_USE_RESULT MaybeObject* AllocateFixedArrayWithFiller(
- int length, PretenureFlag pretenure, Object* filler);
+ // Allocates a partial map for bootstrapping.
+ MUST_USE_RESULT AllocationResult AllocatePartialMap(
+ InstanceType instance_type,
+ int instance_size);
// Initializes a JSObject based on its map.
void InitializeJSObjectFromMap(JSObject* obj,
@@ -2115,8 +1749,125 @@ class Heap {
void InitializeAllocationMemento(AllocationMemento* memento,
AllocationSite* allocation_site);
+ // Allocate a block of memory in the given space (filled with a filler).
+ // Used as a fall-back for generated code when the space is full.
+ MUST_USE_RESULT AllocationResult AllocateFillerObject(int size,
+ bool double_align,
+ AllocationSpace space);
+
+ // Allocate an uninitialized fixed array.
+ MUST_USE_RESULT AllocationResult AllocateRawFixedArray(
+ int length, PretenureFlag pretenure);
+
+ // Allocate an uninitialized fixed double array.
+ MUST_USE_RESULT AllocationResult AllocateRawFixedDoubleArray(
+ int length, PretenureFlag pretenure);
+
+ // Allocate an initialized fixed array with the given filler value.
+ MUST_USE_RESULT AllocationResult AllocateFixedArrayWithFiller(
+ int length, PretenureFlag pretenure, Object* filler);
+
+ // Allocate and partially initializes a String. There are two String
+ // encodings: ASCII and two byte. These functions allocate a string of the
+ // given length and set its map and length fields. The characters of the
+ // string are uninitialized.
+ MUST_USE_RESULT AllocationResult AllocateRawOneByteString(
+ int length, PretenureFlag pretenure);
+ MUST_USE_RESULT AllocationResult AllocateRawTwoByteString(
+ int length, PretenureFlag pretenure);
+
bool CreateInitialMaps();
- bool CreateInitialObjects();
+ void CreateInitialObjects();
+
+ // Allocates an internalized string in old space based on the character
+ // stream.
+ MUST_USE_RESULT inline AllocationResult AllocateInternalizedStringFromUtf8(
+ Vector<const char> str,
+ int chars,
+ uint32_t hash_field);
+
+ MUST_USE_RESULT inline AllocationResult AllocateOneByteInternalizedString(
+ Vector<const uint8_t> str,
+ uint32_t hash_field);
+
+ MUST_USE_RESULT inline AllocationResult AllocateTwoByteInternalizedString(
+ Vector<const uc16> str,
+ uint32_t hash_field);
+
+ template<bool is_one_byte, typename T>
+ MUST_USE_RESULT AllocationResult AllocateInternalizedStringImpl(
+ T t, int chars, uint32_t hash_field);
+
+ template<typename T>
+ MUST_USE_RESULT inline AllocationResult AllocateInternalizedStringImpl(
+ T t, int chars, uint32_t hash_field);
+
+ // Allocates an uninitialized fixed array. It must be filled by the caller.
+ MUST_USE_RESULT AllocationResult AllocateUninitializedFixedArray(int length);
+
+ // Make a copy of src and return it. Returns
+ // Failure::RetryAfterGC(requested_bytes, space) if the allocation failed.
+ MUST_USE_RESULT inline AllocationResult CopyFixedArray(FixedArray* src);
+
+ // Make a copy of src, set the map, and return the copy. Returns
+ // Failure::RetryAfterGC(requested_bytes, space) if the allocation failed.
+ MUST_USE_RESULT AllocationResult CopyFixedArrayWithMap(FixedArray* src,
+ Map* map);
+
+ // Make a copy of src and return it. Returns
+ // Failure::RetryAfterGC(requested_bytes, space) if the allocation failed.
+ MUST_USE_RESULT inline AllocationResult CopyFixedDoubleArray(
+ FixedDoubleArray* src);
+
+ // Make a copy of src and return it. Returns
+ // Failure::RetryAfterGC(requested_bytes, space) if the allocation failed.
+ MUST_USE_RESULT inline AllocationResult CopyConstantPoolArray(
+ ConstantPoolArray* src);
+
+
+ // Computes a single character string where the character has code.
+ // A cache is used for ASCII codes.
+ MUST_USE_RESULT AllocationResult LookupSingleCharacterStringFromCode(
+ uint16_t code);
+
+ // Allocate a symbol in old space.
+ MUST_USE_RESULT AllocationResult AllocateSymbol();
+
+ // Make a copy of src, set the map, and return the copy.
+ MUST_USE_RESULT AllocationResult CopyConstantPoolArrayWithMap(
+ ConstantPoolArray* src, Map* map);
+
+ MUST_USE_RESULT AllocationResult AllocateConstantPoolArray(
+ const ConstantPoolArray::NumberOfEntries& small);
+
+ MUST_USE_RESULT AllocationResult AllocateExtendedConstantPoolArray(
+ const ConstantPoolArray::NumberOfEntries& small,
+ const ConstantPoolArray::NumberOfEntries& extended);
+
+ // Allocates an external array of the specified length and type.
+ MUST_USE_RESULT AllocationResult AllocateExternalArray(
+ int length,
+ ExternalArrayType array_type,
+ void* external_pointer,
+ PretenureFlag pretenure);
+
+ // Allocates a fixed typed array of the specified length and type.
+ MUST_USE_RESULT AllocationResult AllocateFixedTypedArray(
+ int length,
+ ExternalArrayType array_type,
+ PretenureFlag pretenure);
+
+ // Make a copy of src and return it.
+ MUST_USE_RESULT AllocationResult CopyAndTenureFixedCOWArray(FixedArray* src);
+
+ // Make a copy of src, set the map, and return the copy.
+ MUST_USE_RESULT AllocationResult CopyFixedDoubleArrayWithMap(
+ FixedDoubleArray* src, Map* map);
+
+ // Allocates a fixed double array with uninitialized values. Returns
+ MUST_USE_RESULT AllocationResult AllocateUninitializedFixedDoubleArray(
+ int length,
+ PretenureFlag pretenure = NOT_TENURED);
// These five Create*EntryStub functions are here and forced to not be inlined
// because of a gcc-4.4 bug that assigns wrong vtable entries.
@@ -2124,36 +1875,40 @@ class Heap {
NO_INLINE(void CreateJSConstructEntryStub());
void CreateFixedStubs();
- void CreateStubsRequiringBuiltins();
-
- MUST_USE_RESULT MaybeObject* CreateOddball(const char* to_string,
- Object* to_number,
- byte kind);
-
- // Allocate a JSArray with no elements
- MUST_USE_RESULT MaybeObject* AllocateJSArray(
- ElementsKind elements_kind,
- PretenureFlag pretenure = NOT_TENURED);
// Allocate empty fixed array.
- MUST_USE_RESULT MaybeObject* AllocateEmptyFixedArray();
+ MUST_USE_RESULT AllocationResult AllocateEmptyFixedArray();
// Allocate empty external array of given type.
- MUST_USE_RESULT MaybeObject* AllocateEmptyExternalArray(
+ MUST_USE_RESULT AllocationResult AllocateEmptyExternalArray(
+ ExternalArrayType array_type);
+
+ // Allocate empty fixed typed array of given type.
+ MUST_USE_RESULT AllocationResult AllocateEmptyFixedTypedArray(
ExternalArrayType array_type);
- // Allocate empty fixed double array.
- MUST_USE_RESULT MaybeObject* AllocateEmptyFixedDoubleArray();
+ // Allocate empty constant pool array.
+ MUST_USE_RESULT AllocationResult AllocateEmptyConstantPoolArray();
// Allocate a tenured simple cell.
- MUST_USE_RESULT MaybeObject* AllocateCell(Object* value);
+ MUST_USE_RESULT AllocationResult AllocateCell(Object* value);
// Allocate a tenured JS global property cell initialized with the hole.
- MUST_USE_RESULT MaybeObject* AllocatePropertyCell();
+ MUST_USE_RESULT AllocationResult AllocatePropertyCell();
+
+ // Allocates a new utility object in the old generation.
+ MUST_USE_RESULT AllocationResult AllocateStruct(InstanceType type);
+
+ // Allocates a new foreign object.
+ MUST_USE_RESULT AllocationResult AllocateForeign(
+ Address address, PretenureFlag pretenure = NOT_TENURED);
+
+ MUST_USE_RESULT AllocationResult AllocateCode(int object_size,
+ bool immovable);
+
+ MUST_USE_RESULT AllocationResult InternalizeStringWithKey(HashTableKey* key);
- // Allocate Box.
- MUST_USE_RESULT MaybeObject* AllocateBox(Object* value,
- PretenureFlag pretenure);
+ MUST_USE_RESULT AllocationResult InternalizeString(String* str);
// Performs a minor collection in new generation.
void Scavenge();
@@ -2182,9 +1937,18 @@ class Heap {
// Code to be run before and after mark-compact.
void MarkCompactPrologue();
- void ProcessNativeContexts(WeakObjectRetainer* retainer, bool record_slots);
- void ProcessArrayBuffers(WeakObjectRetainer* retainer, bool record_slots);
- void ProcessAllocationSites(WeakObjectRetainer* retainer, bool record_slots);
+ void ProcessNativeContexts(WeakObjectRetainer* retainer);
+ void ProcessArrayBuffers(WeakObjectRetainer* retainer);
+ void ProcessAllocationSites(WeakObjectRetainer* retainer);
+
+ // Deopts all code that contains allocation instruction which are tenured or
+ // not tenured. Moreover it clears the pretenuring allocation site statistics.
+ void ResetAllAllocationSitesDependentCode(PretenureFlag flag);
+
+ // Evaluates local pretenuring for the old space and calls
+ // ResetAllTenuredAllocationSitesDependentCode if too many objects died in
+ // the old space.
+ void EvaluateOldSpaceLocalPretenuring(uint64_t size_of_objects_before_gc);
// Called on heap tear-down.
void TearDownArrayBuffers();
@@ -2196,96 +1960,51 @@ class Heap {
// Slow part of scavenge object.
static void ScavengeObjectSlow(HeapObject** p, HeapObject* object);
- // Initializes a function with a shared part and prototype.
- // Note: this code was factored out of AllocateFunction such that
- // other parts of the VM could use it. Specifically, a function that creates
- // instances of type JS_FUNCTION_TYPE benefit from the use of this function.
- // Please note this does not perform a garbage collection.
- inline void InitializeFunction(
- JSFunction* function,
- SharedFunctionInfo* shared,
- Object* prototype);
-
// Total RegExp code ever generated
double total_regexp_code_generated_;
GCTracer* tracer_;
- // Allocates a small number to string cache.
- MUST_USE_RESULT MaybeObject* AllocateInitialNumberStringCache();
// Creates and installs the full-sized number string cache.
- void AllocateFullSizeNumberStringCache();
- // Get the length of the number to string cache based on the max semispace
- // size.
int FullSizeNumberStringCacheLength();
// Flush the number to string cache.
void FlushNumberStringCache();
- void UpdateSurvivalRateTrend(int start_new_space_size);
+ // Sets used allocation sites entries to undefined.
+ void FlushAllocationSitesScratchpad();
- enum SurvivalRateTrend { INCREASING, STABLE, DECREASING, FLUCTUATING };
+ // Initializes the allocation sites scratchpad with undefined values.
+ void InitializeAllocationSitesScratchpad();
- static const int kYoungSurvivalRateHighThreshold = 90;
- static const int kYoungSurvivalRateLowThreshold = 10;
- static const int kYoungSurvivalRateAllowedDeviation = 15;
-
- int young_survivors_after_last_gc_;
- int high_survival_rate_period_length_;
- int low_survival_rate_period_length_;
- double survival_rate_;
- SurvivalRateTrend previous_survival_rate_trend_;
- SurvivalRateTrend survival_rate_trend_;
-
- void set_survival_rate_trend(SurvivalRateTrend survival_rate_trend) {
- ASSERT(survival_rate_trend != FLUCTUATING);
- previous_survival_rate_trend_ = survival_rate_trend_;
- survival_rate_trend_ = survival_rate_trend;
- }
-
- SurvivalRateTrend survival_rate_trend() {
- if (survival_rate_trend_ == STABLE) {
- return STABLE;
- } else if (previous_survival_rate_trend_ == STABLE) {
- return survival_rate_trend_;
- } else if (survival_rate_trend_ != previous_survival_rate_trend_) {
- return FLUCTUATING;
- } else {
- return survival_rate_trend_;
- }
- }
+ // Adds an allocation site to the scratchpad if there is space left.
+ void AddAllocationSiteToScratchpad(AllocationSite* site,
+ ScratchpadSlotMode mode);
- bool IsStableOrIncreasingSurvivalTrend() {
- switch (survival_rate_trend()) {
- case STABLE:
- case INCREASING:
- return true;
- default:
- return false;
- }
- }
+ void UpdateSurvivalStatistics(int start_new_space_size);
- bool IsStableOrDecreasingSurvivalTrend() {
- switch (survival_rate_trend()) {
- case STABLE:
- case DECREASING:
- return true;
- default:
- return false;
- }
- }
+ static const int kYoungSurvivalRateHighThreshold = 90;
+ static const int kYoungSurvivalRateAllowedDeviation = 15;
- bool IsIncreasingSurvivalTrend() {
- return survival_rate_trend() == INCREASING;
- }
+ static const int kOldSurvivalRateLowThreshold = 10;
+ int high_survival_rate_period_length_;
+ intptr_t promoted_objects_size_;
+ double promotion_rate_;
+ intptr_t semi_space_copied_object_size_;
+ double semi_space_copied_rate_;
+
+ // This is the pretenuring trigger for allocation sites that are in maybe
+ // tenure state. When we switched to the maximum new space size we deoptimize
+ // the code that belongs to the allocation site and derive the lifetime
+ // of the allocation site.
+ unsigned int maximum_size_scavenges_;
+
+ // TODO(hpayer): Allocation site pretenuring may make this method obsolete.
+ // Re-visit incremental marking heuristics.
bool IsHighSurvivalRate() {
return high_survival_rate_period_length_ > 0;
}
- bool IsLowSurvivalRate() {
- return low_survival_rate_period_length_ > 0;
- }
-
void SelectScavengingVisitorsTable();
void StartIdleRound() {
@@ -2391,6 +2110,9 @@ class Heap {
int no_weak_object_verification_scope_depth_;
#endif
+ static const int kAllocationSiteScratchpadSize = 256;
+ int allocation_sites_scratchpad_length_;
+
static const int kMaxMarkSweepsInIdleRound = 7;
static const int kIdleScavengeThreshold = 5;
@@ -2407,16 +2129,15 @@ class Heap {
MemoryChunk* chunks_queued_for_free_;
- Mutex* relocation_mutex_;
-#ifdef DEBUG
- bool relocation_mutex_locked_by_optimizer_thread_;
-#endif // DEBUG;
+ Mutex relocation_mutex_;
+
+ int gc_callbacks_depth_;
+ friend class AlwaysAllocateScope;
friend class Factory;
+ friend class GCCallbacksScope;
friend class GCTracer;
- friend class DisallowAllocationFailure;
- friend class AlwaysAllocateScope;
- friend class Page;
+ friend class HeapIterator;
friend class Isolate;
friend class MarkCompactCollector;
friend class MarkCompactMarkingVisitor;
@@ -2424,6 +2145,7 @@ class Heap {
#ifdef VERIFY_HEAP
friend class NoWeakObjectVerificationScope;
#endif
+ friend class Page;
DISALLOW_COPY_AND_ASSIGN(Heap);
};
@@ -2464,26 +2186,15 @@ class HeapStats {
};
-class DisallowAllocationFailure {
- public:
- inline DisallowAllocationFailure();
- inline ~DisallowAllocationFailure();
-
-#ifdef DEBUG
- private:
- bool old_state_;
-#endif
-};
-
-
class AlwaysAllocateScope {
public:
- inline AlwaysAllocateScope();
+ explicit inline AlwaysAllocateScope(Isolate* isolate);
inline ~AlwaysAllocateScope();
private:
// Implicitly disable artificial allocation failures.
- DisallowAllocationFailure disallow_allocation_failure_;
+ Heap* heap_;
+ DisallowAllocationFailure daf_;
};
@@ -2496,6 +2207,18 @@ class NoWeakObjectVerificationScope {
#endif
+class GCCallbacksScope {
+ public:
+ explicit inline GCCallbacksScope(Heap* heap);
+ inline ~GCCallbacksScope();
+
+ inline bool CheckReenter();
+
+ private:
+ Heap* heap_;
+};
+
+
// Visitor class to verify interior pointers in spaces that do not contain
// or care about intergenerational references. All heap object pointers have to
// point into the heap to a location that has a map pointer at its first word.
@@ -2507,6 +2230,13 @@ class VerifyPointersVisitor: public ObjectVisitor {
};
+// Verify that all objects are Smis.
+class VerifySmisVisitor: public ObjectVisitor {
+ public:
+ inline void VisitPointers(Object** start, Object** end);
+};
+
+
// Space iterator for iterating over all spaces of the heap. Returns each space
// in turn, and null when it is done.
class AllSpaces BASE_EMBEDDED {
@@ -2571,6 +2301,9 @@ class SpaceIterator : public Malloced {
// aggregates the specific iterators for the different spaces as
// these can only iterate over one space only.
//
+// HeapIterator ensures there is no allocation during its lifetime
+// (using an embedded DisallowHeapAllocation instance).
+//
// HeapIterator can skip free list nodes (that is, de-allocated heap
// objects that still remain in the heap). As implementation of free
// nodes filtering uses GC marks, it can't be used during MS/MC GC
@@ -2593,12 +2326,18 @@ class HeapIterator BASE_EMBEDDED {
void reset();
private:
+ struct MakeHeapIterableHelper {
+ explicit MakeHeapIterableHelper(Heap* heap) { heap->MakeHeapIterable(); }
+ };
+
// Perform the initialization.
void Init();
// Perform all necessary shutdown (destruction) work.
void Shutdown();
HeapObject* NextObject();
+ MakeHeapIterableHelper make_heap_iterable_helper_;
+ DisallowHeapAllocation no_heap_allocation_;
Heap* heap_;
HeapObjectsFiltering filtering_;
HeapObjectsFilter* filter_;
@@ -2614,10 +2353,10 @@ class HeapIterator BASE_EMBEDDED {
class KeyedLookupCache {
public:
// Lookup field offset for (map, name). If absent, -1 is returned.
- int Lookup(Map* map, Name* name);
+ int Lookup(Handle<Map> map, Handle<Name> name);
// Update an element in the cache.
- void Update(Map* map, Name* name, int field_offset);
+ void Update(Handle<Map> map, Handle<Name> name, int field_offset);
// Clear the cache.
void Clear();
@@ -2627,6 +2366,9 @@ class KeyedLookupCache {
static const int kMapHashShift = 5;
static const int kHashMask = -4; // Zero the last two bits.
static const int kEntriesPerBucket = 4;
+ static const int kEntryLength = 2;
+ static const int kMapIndex = 0;
+ static const int kKeyIndex = 1;
static const int kNotFound = -1;
// kEntriesPerBucket should be a power of 2.
@@ -2642,7 +2384,7 @@ class KeyedLookupCache {
}
}
- static inline int Hash(Map* map, Name* name);
+ static inline int Hash(Handle<Map> map, Handle<Name> name);
// Get the address of the keys and field_offsets arrays. Used in
// generated code to perform cache lookups.
@@ -2747,6 +2489,7 @@ class GCTracer BASE_EMBEDDED {
MC_MARK,
MC_SWEEP,
MC_SWEEP_NEWSPACE,
+ MC_SWEEP_OLDSPACE,
MC_EVACUATE_PAGES,
MC_UPDATE_NEW_TO_NEW_POINTERS,
MC_UPDATE_ROOT_TO_NEW_POINTERS,
@@ -2791,10 +2534,6 @@ class GCTracer BASE_EMBEDDED {
// Sets the full GC count.
void set_full_gc_count(int count) { full_gc_count_ = count; }
- void increment_promoted_objects_size(int object_size) {
- promoted_objects_size_ += object_size;
- }
-
void increment_nodes_died_in_new_space() {
nodes_died_in_new_space_++;
}
@@ -2848,9 +2587,6 @@ class GCTracer BASE_EMBEDDED {
// previous collection and the beginning of the current one.
double spent_in_mutator_;
- // Size of objects promoted during the current collection.
- intptr_t promoted_objects_size_;
-
// Number of died nodes in the new space.
int nodes_died_in_new_space_;
@@ -2886,10 +2622,10 @@ class RegExpResultsCache {
ResultsCacheType type);
// Attempt to add value_array to the cache specified by type. On success,
// value_array is turned into a COW-array.
- static void Enter(Heap* heap,
- String* key_string,
- Object* key_pattern,
- FixedArray* value_array,
+ static void Enter(Isolate* isolate,
+ Handle<String> key_string,
+ Handle<Object> key_pattern,
+ Handle<FixedArray> value_array,
ResultsCacheType type);
static void Clear(FixedArray* cache);
static const int kRegExpResultsCacheSize = 0x100;
@@ -2902,85 +2638,6 @@ class RegExpResultsCache {
};
-class TranscendentalCache {
- public:
- enum Type {ACOS, ASIN, ATAN, COS, EXP, LOG, SIN, TAN, kNumberOfCaches};
- static const int kTranscendentalTypeBits = 3;
- STATIC_ASSERT((1 << kTranscendentalTypeBits) >= kNumberOfCaches);
-
- // Returns a heap number with f(input), where f is a math function specified
- // by the 'type' argument.
- MUST_USE_RESULT inline MaybeObject* Get(Type type, double input);
-
- // The cache contains raw Object pointers. This method disposes of
- // them before a garbage collection.
- void Clear();
-
- private:
- class SubCache {
- static const int kCacheSize = 512;
-
- explicit SubCache(Isolate* isolate, Type t);
-
- MUST_USE_RESULT inline MaybeObject* Get(double input);
-
- inline double Calculate(double input);
-
- struct Element {
- uint32_t in[2];
- Object* output;
- };
-
- union Converter {
- double dbl;
- uint32_t integers[2];
- };
-
- inline static int Hash(const Converter& c) {
- uint32_t hash = (c.integers[0] ^ c.integers[1]);
- hash ^= static_cast<int32_t>(hash) >> 16;
- hash ^= static_cast<int32_t>(hash) >> 8;
- return (hash & (kCacheSize - 1));
- }
-
- Element elements_[kCacheSize];
- Type type_;
- Isolate* isolate_;
-
- // Allow access to the caches_ array as an ExternalReference.
- friend class ExternalReference;
- // Inline implementation of the cache.
- friend class TranscendentalCacheStub;
- // For evaluating value.
- friend class TranscendentalCache;
-
- DISALLOW_COPY_AND_ASSIGN(SubCache);
- };
-
- explicit TranscendentalCache(Isolate* isolate) : isolate_(isolate) {
- for (int i = 0; i < kNumberOfCaches; ++i) caches_[i] = NULL;
- }
-
- ~TranscendentalCache() {
- for (int i = 0; i < kNumberOfCaches; ++i) delete caches_[i];
- }
-
- // Used to create an external reference.
- inline Address cache_array_address();
-
- // Instantiation
- friend class Isolate;
- // Inline implementation of the caching.
- friend class TranscendentalCacheStub;
- // Allow access to the caches_ array as an ExternalReference.
- friend class ExternalReference;
-
- Isolate* isolate_;
- SubCache* caches_[kNumberOfCaches];
- DISALLOW_COPY_AND_ASSIGN(TranscendentalCache);
-};
-
-
// Abstract base class for checking whether a weak object should be retained.
class WeakObjectRetainer {
public:
@@ -3029,7 +2686,7 @@ class IntrusiveMarking {
private:
static const uintptr_t kNotMarkedBit = 0x1;
- STATIC_ASSERT((kHeapObjectTag & kNotMarkedBit) != 0);
+ STATIC_ASSERT((kHeapObjectTag & kNotMarkedBit) != 0); // NOLINT
};
@@ -3044,6 +2701,9 @@ class PathTracer : public ObjectVisitor {
FIND_FIRST // Will stop the search after first match.
};
+ // Tags 0, 1, and 3 are used. Use 2 for marking visited HeapObject.
+ static const int kMarkTag = 2;
+
// For the WhatToFind arg, if FIND_FIRST is specified, tracing will stop
// after the first match. If FIND_ALL is specified, then tracing will be
// done for all matches.
@@ -3075,9 +2735,6 @@ class PathTracer : public ObjectVisitor {
void UnmarkRecursively(Object** p, UnmarkVisitor* unmark_visitor);
virtual void ProcessResults();
- // Tags 0, 1, and 3 are used. Use 2 for marking visited HeapObject.
- static const int kMarkTag = 2;
-
Object* search_target_;
bool found_target_;
bool found_target_in_trace_;
diff --git a/chromium/v8/src/hydrogen-alias-analysis.h b/chromium/v8/src/hydrogen-alias-analysis.h
index 21a54625ff8..368dd5f020d 100644
--- a/chromium/v8/src/hydrogen-alias-analysis.h
+++ b/chromium/v8/src/hydrogen-alias-analysis.h
@@ -1,34 +1,11 @@
// Copyright 2013 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
#ifndef V8_HYDROGEN_ALIAS_ANALYSIS_H_
#define V8_HYDROGEN_ALIAS_ANALYSIS_H_
-#include "hydrogen.h"
+#include "src/hydrogen.h"
namespace v8 {
namespace internal {
diff --git a/chromium/v8/src/hydrogen-bce.cc b/chromium/v8/src/hydrogen-bce.cc
index e1a28471273..5b134290ee7 100644
--- a/chromium/v8/src/hydrogen-bce.cc
+++ b/chromium/v8/src/hydrogen-bce.cc
@@ -1,35 +1,13 @@
// Copyright 2013 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "hydrogen-bce.h"
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/hydrogen-bce.h"
namespace v8 {
namespace internal {
+
// We try to "factor up" HBoundsCheck instructions towards the root of the
// dominator tree.
// For now we handle checks where the index is like "exp + int32value".
@@ -69,13 +47,13 @@ class BoundsCheckKey : public ZoneObject {
} else if (check->index()->IsSub()) {
HSub* index = HSub::cast(check->index());
is_sub = true;
- if (index->left()->IsConstant()) {
- constant = HConstant::cast(index->left());
- index_base = index->right();
- } else if (index->right()->IsConstant()) {
+ if (index->right()->IsConstant()) {
constant = HConstant::cast(index->right());
index_base = index->left();
}
+ } else if (check->index()->IsConstant()) {
+ index_base = check->block()->graph()->GetConstant0();
+ constant = HConstant::cast(check->index());
}
if (constant != NULL && constant->HasInteger32Value()) {
@@ -132,6 +110,24 @@ class BoundsCheckBbData: public ZoneObject {
bool HasSingleCheck() { return lower_check_ == upper_check_; }
+ void UpdateUpperOffsets(HBoundsCheck* check, int32_t offset) {
+ BoundsCheckBbData* data = FatherInDominatorTree();
+ while (data != NULL && data->UpperCheck() == check) {
+ ASSERT(data->upper_offset_ < offset);
+ data->upper_offset_ = offset;
+ data = data->FatherInDominatorTree();
+ }
+ }
+
+ void UpdateLowerOffsets(HBoundsCheck* check, int32_t offset) {
+ BoundsCheckBbData* data = FatherInDominatorTree();
+ while (data != NULL && data->LowerCheck() == check) {
+ ASSERT(data->lower_offset_ > offset);
+ data->lower_offset_ = offset;
+ data = data->FatherInDominatorTree();
+ }
+ }
+
// The goal of this method is to modify either upper_offset_ or
// lower_offset_ so that also new_offset is covered (the covered
// range grows).
@@ -155,7 +151,8 @@ class BoundsCheckBbData: public ZoneObject {
keep_new_check = true;
upper_check_ = new_check;
} else {
- TightenCheck(upper_check_, new_check);
+ TightenCheck(upper_check_, new_check, new_offset);
+ UpdateUpperOffsets(upper_check_, upper_offset_);
}
} else if (new_offset < lower_offset_) {
lower_offset_ = new_offset;
@@ -163,7 +160,8 @@ class BoundsCheckBbData: public ZoneObject {
keep_new_check = true;
lower_check_ = new_check;
} else {
- TightenCheck(lower_check_, new_check);
+ TightenCheck(lower_check_, new_check, new_offset);
+ UpdateLowerOffsets(lower_check_, lower_offset_);
}
} else {
// Should never have called CoverCheck() in this case.
@@ -171,12 +169,20 @@ class BoundsCheckBbData: public ZoneObject {
}
if (!keep_new_check) {
+ if (FLAG_trace_bce) {
+ OS::Print("Eliminating check #%d after tightening\n",
+ new_check->id());
+ }
new_check->block()->graph()->isolate()->counters()->
bounds_checks_eliminated()->Increment();
new_check->DeleteAndReplaceWith(new_check->ActualValue());
} else {
HBoundsCheck* first_check = new_check == lower_check_ ? upper_check_
: lower_check_;
+ if (FLAG_trace_bce) {
+ OS::Print("Moving second check #%d after first check #%d\n",
+ new_check->id(), first_check->id());
+ }
// The length is guaranteed to be live at first_check.
ASSERT(new_check->length() == first_check->length());
HInstruction* old_position = new_check->next();
@@ -216,50 +222,70 @@ class BoundsCheckBbData: public ZoneObject {
void MoveIndexIfNecessary(HValue* index_raw,
HBoundsCheck* insert_before,
HInstruction* end_of_scan_range) {
- if (!index_raw->IsAdd() && !index_raw->IsSub()) {
- // index_raw can be HAdd(index_base, offset), HSub(index_base, offset),
- // or index_base directly. In the latter case, no need to move anything.
- return;
- }
- HArithmeticBinaryOperation* index =
- HArithmeticBinaryOperation::cast(index_raw);
- HValue* left_input = index->left();
- HValue* right_input = index->right();
- bool must_move_index = false;
- bool must_move_left_input = false;
- bool must_move_right_input = false;
- for (HInstruction* cursor = end_of_scan_range; cursor != insert_before;) {
- if (cursor == left_input) must_move_left_input = true;
- if (cursor == right_input) must_move_right_input = true;
- if (cursor == index) must_move_index = true;
- if (cursor->previous() == NULL) {
- cursor = cursor->block()->dominator()->end();
- } else {
- cursor = cursor->previous();
+ // index_raw can be HAdd(index_base, offset), HSub(index_base, offset),
+ // HConstant(offset) or index_base directly.
+ // In the latter case, no need to move anything.
+ if (index_raw->IsAdd() || index_raw->IsSub()) {
+ HArithmeticBinaryOperation* index =
+ HArithmeticBinaryOperation::cast(index_raw);
+ HValue* left_input = index->left();
+ HValue* right_input = index->right();
+ bool must_move_index = false;
+ bool must_move_left_input = false;
+ bool must_move_right_input = false;
+ for (HInstruction* cursor = end_of_scan_range; cursor != insert_before;) {
+ if (cursor == left_input) must_move_left_input = true;
+ if (cursor == right_input) must_move_right_input = true;
+ if (cursor == index) must_move_index = true;
+ if (cursor->previous() == NULL) {
+ cursor = cursor->block()->dominator()->end();
+ } else {
+ cursor = cursor->previous();
+ }
+ }
+ if (must_move_index) {
+ index->Unlink();
+ index->InsertBefore(insert_before);
+ }
+ // The BCE algorithm only selects mergeable bounds checks that share
+ // the same "index_base", so we'll only ever have to move constants.
+ if (must_move_left_input) {
+ HConstant::cast(left_input)->Unlink();
+ HConstant::cast(left_input)->InsertBefore(index);
+ }
+ if (must_move_right_input) {
+ HConstant::cast(right_input)->Unlink();
+ HConstant::cast(right_input)->InsertBefore(index);
+ }
+ } else if (index_raw->IsConstant()) {
+ HConstant* index = HConstant::cast(index_raw);
+ bool must_move = false;
+ for (HInstruction* cursor = end_of_scan_range; cursor != insert_before;) {
+ if (cursor == index) must_move = true;
+ if (cursor->previous() == NULL) {
+ cursor = cursor->block()->dominator()->end();
+ } else {
+ cursor = cursor->previous();
+ }
+ }
+ if (must_move) {
+ index->Unlink();
+ index->InsertBefore(insert_before);
}
- }
- if (must_move_index) {
- index->Unlink();
- index->InsertBefore(insert_before);
- }
- // The BCE algorithm only selects mergeable bounds checks that share
- // the same "index_base", so we'll only ever have to move constants.
- if (must_move_left_input) {
- HConstant::cast(left_input)->Unlink();
- HConstant::cast(left_input)->InsertBefore(index);
- }
- if (must_move_right_input) {
- HConstant::cast(right_input)->Unlink();
- HConstant::cast(right_input)->InsertBefore(index);
}
}
void TightenCheck(HBoundsCheck* original_check,
- HBoundsCheck* tighter_check) {
+ HBoundsCheck* tighter_check,
+ int32_t new_offset) {
ASSERT(original_check->length() == tighter_check->length());
MoveIndexIfNecessary(tighter_check->index(), original_check, tighter_check);
original_check->ReplaceAllUsesWith(original_check->index());
original_check->SetOperandAt(0, tighter_check->index());
+ if (FLAG_trace_bce) {
+ OS::Print("Tightened check #%d with offset %d from #%d\n",
+ original_check->id(), new_offset, tighter_check->id());
+ }
}
DISALLOW_COPY_AND_ASSIGN(BoundsCheckBbData);
@@ -369,11 +395,32 @@ BoundsCheckBbData* HBoundsCheckEliminationPhase::PreProcessBlock(
bb_data_list,
NULL);
*data_p = bb_data_list;
+ if (FLAG_trace_bce) {
+ OS::Print("Fresh bounds check data for block #%d: [%d]\n",
+ bb->block_id(), offset);
+ }
} else if (data->OffsetIsCovered(offset)) {
bb->graph()->isolate()->counters()->
bounds_checks_eliminated()->Increment();
+ if (FLAG_trace_bce) {
+ OS::Print("Eliminating bounds check #%d, offset %d is covered\n",
+ check->id(), offset);
+ }
check->DeleteAndReplaceWith(check->ActualValue());
} else if (data->BasicBlock() == bb) {
+ // TODO(jkummerow): I think the following logic would be preferable:
+ // if (data->Basicblock() == bb ||
+ // graph()->use_optimistic_licm() ||
+ // bb->IsLoopSuccessorDominator()) {
+ // data->CoverCheck(check, offset)
+ // } else {
+ // /* add pristine BCBbData like in (data == NULL) case above */
+ // }
+ // Even better would be: distinguish between read-only dominator-imposed
+ // knowledge and modifiable upper/lower checks.
+ // What happens currently is that the first bounds check in a dominated
+ // block will stay around while any further checks are hoisted out,
+ // which doesn't make sense. Investigate/fix this in a future CL.
data->CoverCheck(check, offset);
} else if (graph()->use_optimistic_licm() ||
bb->IsLoopSuccessorDominator()) {
@@ -391,6 +438,10 @@ BoundsCheckBbData* HBoundsCheckEliminationPhase::PreProcessBlock(
data->UpperCheck(),
bb_data_list,
data);
+ if (FLAG_trace_bce) {
+ OS::Print("Updated bounds check data for block #%d: [%d - %d]\n",
+ bb->block_id(), new_lower_offset, new_upper_offset);
+ }
table_.Insert(key, bb_data_list, zone());
}
}
diff --git a/chromium/v8/src/hydrogen-bce.h b/chromium/v8/src/hydrogen-bce.h
index c55dea7b7a5..70c0a07d066 100644
--- a/chromium/v8/src/hydrogen-bce.h
+++ b/chromium/v8/src/hydrogen-bce.h
@@ -1,34 +1,11 @@
// Copyright 2013 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
#ifndef V8_HYDROGEN_BCE_H_
#define V8_HYDROGEN_BCE_H_
-#include "hydrogen.h"
+#include "src/hydrogen.h"
namespace v8 {
namespace internal {
diff --git a/chromium/v8/src/hydrogen-bch.cc b/chromium/v8/src/hydrogen-bch.cc
index a0a0fee7105..34216c66d32 100644
--- a/chromium/v8/src/hydrogen-bch.cc
+++ b/chromium/v8/src/hydrogen-bch.cc
@@ -1,31 +1,8 @@
// Copyright 2013 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "hydrogen-bch.h"
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/hydrogen-bch.h"
namespace v8 {
namespace internal {
diff --git a/chromium/v8/src/hydrogen-bch.h b/chromium/v8/src/hydrogen-bch.h
index a22dacdd422..852c264c4f1 100644
--- a/chromium/v8/src/hydrogen-bch.h
+++ b/chromium/v8/src/hydrogen-bch.h
@@ -1,34 +1,11 @@
// Copyright 2013 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
#ifndef V8_HYDROGEN_BCH_H_
#define V8_HYDROGEN_BCH_H_
-#include "hydrogen.h"
+#include "src/hydrogen.h"
namespace v8 {
namespace internal {
diff --git a/chromium/v8/src/hydrogen-canonicalize.cc b/chromium/v8/src/hydrogen-canonicalize.cc
index d3f72e93398..c15b8d99c09 100644
--- a/chromium/v8/src/hydrogen-canonicalize.cc
+++ b/chromium/v8/src/hydrogen-canonicalize.cc
@@ -1,32 +1,9 @@
// Copyright 2013 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
-#include "hydrogen-canonicalize.h"
-#include "hydrogen-redundant-phi.h"
+#include "src/hydrogen-canonicalize.h"
+#include "src/hydrogen-redundant-phi.h"
namespace v8 {
namespace internal {
diff --git a/chromium/v8/src/hydrogen-canonicalize.h b/chromium/v8/src/hydrogen-canonicalize.h
index d2b289bc212..eb230332fdd 100644
--- a/chromium/v8/src/hydrogen-canonicalize.h
+++ b/chromium/v8/src/hydrogen-canonicalize.h
@@ -1,34 +1,11 @@
// Copyright 2013 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
#ifndef V8_HYDROGEN_CANONICALIZE_H_
#define V8_HYDROGEN_CANONICALIZE_H_
-#include "hydrogen.h"
+#include "src/hydrogen.h"
namespace v8 {
namespace internal {
diff --git a/chromium/v8/src/hydrogen-check-elimination.cc b/chromium/v8/src/hydrogen-check-elimination.cc
index bbd3042fb7a..98e3d3d96fb 100644
--- a/chromium/v8/src/hydrogen-check-elimination.cc
+++ b/chromium/v8/src/hydrogen-check-elimination.cc
@@ -1,33 +1,11 @@
// Copyright 2013 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "hydrogen-check-elimination.h"
-#include "hydrogen-alias-analysis.h"
-#include "hydrogen-flow-engine.h"
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/hydrogen-check-elimination.h"
+
+#include "src/hydrogen-alias-analysis.h"
+#include "src/hydrogen-flow-engine.h"
#define GLOBAL 1
@@ -44,20 +22,58 @@
namespace v8 {
namespace internal {
-typedef UniqueSet<Map>* MapSet;
+typedef const UniqueSet<Map>* MapSet;
struct HCheckTableEntry {
+ enum State {
+ // We have seen a map check (i.e. an HCheckMaps) for these maps, so we can
+ // use this information to eliminate further map checks, elements kind
+ // transitions, etc.
+ CHECKED,
+ // Same as CHECKED, but we also know that these maps are stable.
+ CHECKED_STABLE,
+ // These maps are stable, but not checked (i.e. we learned this via field
+ // type tracking or from a constant, or they were initially CHECKED_STABLE,
+ // but became UNCHECKED_STABLE because of an instruction that changes maps
+ // or elements kind), and we need a stability check for them in order to use
+ // this information for check elimination (which turns them back to
+ // CHECKED_STABLE).
+ UNCHECKED_STABLE
+ };
+
+ static const char* State2String(State state) {
+ switch (state) {
+ case CHECKED: return "checked";
+ case CHECKED_STABLE: return "checked stable";
+ case UNCHECKED_STABLE: return "unchecked stable";
+ }
+ UNREACHABLE();
+ return NULL;
+ }
+
+ static State StateMerge(State state1, State state2) {
+ if (state1 == state2) return state1;
+ if ((state1 == CHECKED && state2 == CHECKED_STABLE) ||
+ (state2 == CHECKED && state1 == CHECKED_STABLE)) {
+ return CHECKED;
+ }
+ ASSERT((state1 == CHECKED_STABLE && state2 == UNCHECKED_STABLE) ||
+ (state2 == CHECKED_STABLE && state1 == UNCHECKED_STABLE));
+ return UNCHECKED_STABLE;
+ }
+
HValue* object_; // The object being approximated. NULL => invalid entry.
- HValue* check_; // The last check instruction.
- MapSet maps_; // The set of known maps for the object.
+ HInstruction* check_; // The last check instruction.
+ MapSet maps_; // The set of known maps for the object.
+ State state_; // The state of this entry.
};
-// The main datastructure used during check elimination, which stores a
+// The main data structure used during check elimination, which stores a
// set of known maps for each object.
class HCheckTable : public ZoneObject {
public:
- static const int kMaxTrackedObjects = 10;
+ static const int kMaxTrackedObjects = 16;
explicit HCheckTable(HCheckEliminationPhase* phase)
: phase_(phase),
@@ -72,10 +88,6 @@ class HCheckTable : public ZoneObject {
ReduceCheckMaps(HCheckMaps::cast(instr));
break;
}
- case HValue::kCheckValue: {
- ReduceCheckValue(HCheckValue::cast(instr));
- break;
- }
case HValue::kLoadNamedField: {
ReduceLoadNamedField(HLoadNamedField::cast(instr));
break;
@@ -88,88 +100,261 @@ class HCheckTable : public ZoneObject {
ReduceCompareMap(HCompareMap::cast(instr));
break;
}
+ case HValue::kCompareObjectEqAndBranch: {
+ ReduceCompareObjectEqAndBranch(HCompareObjectEqAndBranch::cast(instr));
+ break;
+ }
+ case HValue::kIsStringAndBranch: {
+ ReduceIsStringAndBranch(HIsStringAndBranch::cast(instr));
+ break;
+ }
case HValue::kTransitionElementsKind: {
ReduceTransitionElementsKind(
HTransitionElementsKind::cast(instr));
break;
}
- case HValue::kCheckMapValue: {
- ReduceCheckMapValue(HCheckMapValue::cast(instr));
- break;
- }
case HValue::kCheckHeapObject: {
ReduceCheckHeapObject(HCheckHeapObject::cast(instr));
break;
}
+ case HValue::kCheckInstanceType: {
+ ReduceCheckInstanceType(HCheckInstanceType::cast(instr));
+ break;
+ }
default: {
// If the instruction changes maps uncontrollably, drop everything.
- if (instr->CheckGVNFlag(kChangesMaps) ||
- instr->CheckGVNFlag(kChangesOsrEntries)) {
+ if (instr->CheckChangesFlag(kOsrEntries)) {
Kill();
+ break;
+ }
+ if (instr->CheckChangesFlag(kElementsKind) ||
+ instr->CheckChangesFlag(kMaps)) {
+ KillUnstableEntries();
}
}
// Improvements possible:
- // - eliminate redundant HCheckSmi, HCheckInstanceType instructions
+ // - eliminate redundant HCheckSmi instructions
// - track which values have been HCheckHeapObject'd
}
return this;
}
- // Global analysis: Copy state to successor block.
- HCheckTable* Copy(HBasicBlock* succ, Zone* zone) {
- HCheckTable* copy = new(phase_->zone()) HCheckTable(phase_);
+ // Support for global analysis with HFlowEngine: Merge given state with
+ // the other incoming state.
+ static HCheckTable* Merge(HCheckTable* succ_state, HBasicBlock* succ_block,
+ HCheckTable* pred_state, HBasicBlock* pred_block,
+ Zone* zone) {
+ if (pred_state == NULL || pred_block->IsUnreachable()) {
+ return succ_state;
+ }
+ if (succ_state == NULL) {
+ return pred_state->Copy(succ_block, pred_block, zone);
+ } else {
+ return succ_state->Merge(succ_block, pred_state, pred_block, zone);
+ }
+ }
+
+ // Support for global analysis with HFlowEngine: Given state merged with all
+ // the other incoming states, prepare it for use.
+ static HCheckTable* Finish(HCheckTable* state, HBasicBlock* block,
+ Zone* zone) {
+ if (state == NULL) {
+ block->MarkUnreachable();
+ } else if (block->IsUnreachable()) {
+ state = NULL;
+ }
+ if (FLAG_trace_check_elimination) {
+ PrintF("Processing B%d, checkmaps-table:\n", block->block_id());
+ Print(state);
+ }
+ return state;
+ }
+
+ private:
+ // Copy state to successor block.
+ HCheckTable* Copy(HBasicBlock* succ, HBasicBlock* from_block, Zone* zone) {
+ HCheckTable* copy = new(zone) HCheckTable(phase_);
for (int i = 0; i < size_; i++) {
HCheckTableEntry* old_entry = &entries_[i];
+ ASSERT(old_entry->maps_->size() > 0);
HCheckTableEntry* new_entry = &copy->entries_[i];
- // TODO(titzer): keep the check if this block dominates the successor?
new_entry->object_ = old_entry->object_;
- new_entry->check_ = NULL;
- new_entry->maps_ = old_entry->maps_->Copy(phase_->zone());
+ new_entry->maps_ = old_entry->maps_;
+ new_entry->state_ = old_entry->state_;
+ // Keep the check if the existing check's block dominates the successor.
+ if (old_entry->check_ != NULL &&
+ old_entry->check_->block()->Dominates(succ)) {
+ new_entry->check_ = old_entry->check_;
+ } else {
+ // Leave it NULL till we meet a new check instruction for this object
+ // in the control flow.
+ new_entry->check_ = NULL;
+ }
+ }
+ copy->cursor_ = cursor_;
+ copy->size_ = size_;
+
+ // Create entries for succ block's phis.
+ if (!succ->IsLoopHeader() && succ->phis()->length() > 0) {
+ int pred_index = succ->PredecessorIndexOf(from_block);
+ for (int phi_index = 0;
+ phi_index < succ->phis()->length();
+ ++phi_index) {
+ HPhi* phi = succ->phis()->at(phi_index);
+ HValue* phi_operand = phi->OperandAt(pred_index);
+
+ HCheckTableEntry* pred_entry = copy->Find(phi_operand);
+ if (pred_entry != NULL) {
+ // Create an entry for a phi in the table.
+ copy->Insert(phi, NULL, pred_entry->maps_, pred_entry->state_);
+ }
+ }
}
+
+ // Branch-sensitive analysis for certain comparisons may add more facts
+ // to the state for the successor on the true branch.
+ bool learned = false;
if (succ->predecessors()->length() == 1) {
HControlInstruction* end = succ->predecessors()->at(0)->end();
- if (end->IsCompareMap() && end->SuccessorAt(0) == succ) {
- // Learn on the true branch of if(CompareMap(x)).
+ bool is_true_branch = end->SuccessorAt(0) == succ;
+ if (end->IsCompareMap()) {
HCompareMap* cmp = HCompareMap::cast(end);
HValue* object = cmp->value()->ActualValue();
HCheckTableEntry* entry = copy->Find(object);
- if (entry == NULL) {
- copy->Insert(object, cmp->map());
+ if (is_true_branch) {
+ HCheckTableEntry::State state = cmp->map_is_stable()
+ ? HCheckTableEntry::CHECKED_STABLE
+ : HCheckTableEntry::CHECKED;
+ // Learn on the true branch of if(CompareMap(x)).
+ if (entry == NULL) {
+ copy->Insert(object, cmp, cmp->map(), state);
+ } else {
+ entry->maps_ = new(zone) UniqueSet<Map>(cmp->map(), zone);
+ entry->check_ = cmp;
+ entry->state_ = state;
+ }
} else {
- MapSet list = new(phase_->zone()) UniqueSet<Map>();
- list->Add(cmp->map(), phase_->zone());
- entry->maps_ = list;
+ // Learn on the false branch of if(CompareMap(x)).
+ if (entry != NULL) {
+ EnsureChecked(entry, object, cmp);
+ UniqueSet<Map>* maps = entry->maps_->Copy(zone);
+ maps->Remove(cmp->map());
+ entry->maps_ = maps;
+ ASSERT_NE(HCheckTableEntry::UNCHECKED_STABLE, entry->state_);
+ }
+ }
+ learned = true;
+ } else if (is_true_branch && end->IsCompareObjectEqAndBranch()) {
+ // Learn on the true branch of if(CmpObjectEq(x, y)).
+ HCompareObjectEqAndBranch* cmp =
+ HCompareObjectEqAndBranch::cast(end);
+ HValue* left = cmp->left()->ActualValue();
+ HValue* right = cmp->right()->ActualValue();
+ HCheckTableEntry* le = copy->Find(left);
+ HCheckTableEntry* re = copy->Find(right);
+ if (le == NULL) {
+ if (re != NULL) {
+ copy->Insert(left, NULL, re->maps_, re->state_);
+ }
+ } else if (re == NULL) {
+ copy->Insert(right, NULL, le->maps_, le->state_);
+ } else {
+ EnsureChecked(le, cmp->left(), cmp);
+ EnsureChecked(re, cmp->right(), cmp);
+ le->maps_ = re->maps_ = le->maps_->Intersect(re->maps_, zone);
+ le->state_ = re->state_ = HCheckTableEntry::StateMerge(
+ le->state_, re->state_);
+ ASSERT_NE(HCheckTableEntry::UNCHECKED_STABLE, le->state_);
+ ASSERT_NE(HCheckTableEntry::UNCHECKED_STABLE, re->state_);
+ }
+ learned = true;
+ } else if (end->IsIsStringAndBranch()) {
+ HIsStringAndBranch* cmp = HIsStringAndBranch::cast(end);
+ HValue* object = cmp->value()->ActualValue();
+ HCheckTableEntry* entry = copy->Find(object);
+ if (is_true_branch) {
+ // Learn on the true branch of if(IsString(x)).
+ if (entry == NULL) {
+ copy->Insert(object, NULL, string_maps(),
+ HCheckTableEntry::CHECKED);
+ } else {
+ EnsureChecked(entry, object, cmp);
+ entry->maps_ = entry->maps_->Intersect(string_maps(), zone);
+ ASSERT_NE(HCheckTableEntry::UNCHECKED_STABLE, entry->state_);
+ }
+ } else {
+ // Learn on the false branch of if(IsString(x)).
+ if (entry != NULL) {
+ EnsureChecked(entry, object, cmp);
+ entry->maps_ = entry->maps_->Subtract(string_maps(), zone);
+ ASSERT_NE(HCheckTableEntry::UNCHECKED_STABLE, entry->state_);
+ }
}
}
- // TODO(titzer): is it worthwhile to learn on false branch too?
+ // Learning on false branches requires storing negative facts.
}
+
+ if (FLAG_trace_check_elimination) {
+ PrintF("B%d checkmaps-table %s from B%d:\n",
+ succ->block_id(),
+ learned ? "learned" : "copied",
+ from_block->block_id());
+ Print(copy);
+ }
+
return copy;
}
- // Global analysis: Merge this state with the other incoming state.
- HCheckTable* Merge(HBasicBlock* succ, HCheckTable* that, Zone* zone) {
+ // Merge this state with the other incoming state.
+ HCheckTable* Merge(HBasicBlock* succ, HCheckTable* that,
+ HBasicBlock* pred_block, Zone* zone) {
if (that->size_ == 0) {
// If the other state is empty, simply reset.
size_ = 0;
cursor_ = 0;
- return this;
- }
- bool compact = false;
- for (int i = 0; i < size_; i++) {
- HCheckTableEntry* this_entry = &entries_[i];
- HCheckTableEntry* that_entry = that->Find(this_entry->object_);
- if (that_entry == NULL) {
- this_entry->object_ = NULL;
- compact = true;
- } else {
- this_entry->maps_ = this_entry->maps_->Union(
- that_entry->maps_, phase_->zone());
- if (this_entry->check_ != that_entry->check_) this_entry->check_ = NULL;
- ASSERT(this_entry->maps_->size() > 0);
+ } else {
+ int pred_index = succ->PredecessorIndexOf(pred_block);
+ bool compact = false;
+ for (int i = 0; i < size_; i++) {
+ HCheckTableEntry* this_entry = &entries_[i];
+ HCheckTableEntry* that_entry;
+ if (this_entry->object_->IsPhi() &&
+ this_entry->object_->block() == succ) {
+ HPhi* phi = HPhi::cast(this_entry->object_);
+ HValue* phi_operand = phi->OperandAt(pred_index);
+ that_entry = that->Find(phi_operand);
+
+ } else {
+ that_entry = that->Find(this_entry->object_);
+ }
+
+ if (that_entry == NULL ||
+ (that_entry->state_ == HCheckTableEntry::CHECKED &&
+ this_entry->state_ == HCheckTableEntry::UNCHECKED_STABLE) ||
+ (this_entry->state_ == HCheckTableEntry::CHECKED &&
+ that_entry->state_ == HCheckTableEntry::UNCHECKED_STABLE)) {
+ this_entry->object_ = NULL;
+ compact = true;
+ } else {
+ this_entry->maps_ =
+ this_entry->maps_->Union(that_entry->maps_, zone);
+ this_entry->state_ = HCheckTableEntry::StateMerge(
+ this_entry->state_, that_entry->state_);
+ if (this_entry->check_ != that_entry->check_) {
+ this_entry->check_ = NULL;
+ }
+ ASSERT(this_entry->maps_->size() > 0);
+ }
}
+ if (compact) Compact();
+ }
+
+ if (FLAG_trace_check_elimination) {
+ PrintF("B%d checkmaps-table merged with B%d table:\n",
+ succ->block_id(), pred_block->block_id());
+ Print(this);
}
- if (compact) Compact();
return this;
}
@@ -178,90 +363,162 @@ class HCheckTable : public ZoneObject {
HCheckTableEntry* entry = Find(object);
if (entry != NULL) {
// entry found;
- MapSet a = entry->maps_;
- MapSet i = instr->map_set().Copy(phase_->zone());
- if (a->IsSubset(i)) {
+ HGraph* graph = instr->block()->graph();
+ if (entry->maps_->IsSubset(instr->maps())) {
// The first check is more strict; the second is redundant.
if (entry->check_ != NULL) {
+ ASSERT_NE(HCheckTableEntry::UNCHECKED_STABLE, entry->state_);
+ TRACE(("Replacing redundant CheckMaps #%d at B%d with #%d\n",
+ instr->id(), instr->block()->block_id(), entry->check_->id()));
instr->DeleteAndReplaceWith(entry->check_);
INC_STAT(redundant_);
- } else {
- instr->DeleteAndReplaceWith(instr->value());
+ } else if (entry->state_ == HCheckTableEntry::UNCHECKED_STABLE) {
+ ASSERT_EQ(NULL, entry->check_);
+ TRACE(("Marking redundant CheckMaps #%d at B%d as stability check\n",
+ instr->id(), instr->block()->block_id()));
+ instr->set_maps(entry->maps_->Copy(graph->zone()));
+ instr->MarkAsStabilityCheck();
+ entry->state_ = HCheckTableEntry::CHECKED_STABLE;
+ } else if (!instr->IsStabilityCheck()) {
+ TRACE(("Marking redundant CheckMaps #%d at B%d as dead\n",
+ instr->id(), instr->block()->block_id()));
+ // Mark check as dead but leave it in the graph as a checkpoint for
+ // subsequent checks.
+ instr->SetFlag(HValue::kIsDead);
+ entry->check_ = instr;
INC_STAT(removed_);
}
return;
}
- i = i->Intersect(a, phase_->zone());
- if (i->size() == 0) {
- // Intersection is empty; probably megamorphic, which is likely to
- // deopt anyway, so just leave things as they are.
+ MapSet intersection = instr->maps()->Intersect(
+ entry->maps_, graph->zone());
+ if (intersection->size() == 0) {
+ // Intersection is empty; probably megamorphic.
INC_STAT(empty_);
+ entry->object_ = NULL;
+ Compact();
} else {
- // TODO(titzer): replace the first check with a more strict check
- INC_STAT(narrowed_);
+ // Update set of maps in the entry.
+ entry->maps_ = intersection;
+ // Update state of the entry.
+ if (instr->maps_are_stable() ||
+ entry->state_ == HCheckTableEntry::UNCHECKED_STABLE) {
+ entry->state_ = HCheckTableEntry::CHECKED_STABLE;
+ }
+ if (intersection->size() != instr->maps()->size()) {
+ // Narrow set of maps in the second check maps instruction.
+ if (entry->check_ != NULL &&
+ entry->check_->block() == instr->block() &&
+ entry->check_->IsCheckMaps()) {
+ // There is a check in the same block so replace it with a more
+ // strict check and eliminate the second check entirely.
+ HCheckMaps* check = HCheckMaps::cast(entry->check_);
+ ASSERT(!check->IsStabilityCheck());
+ TRACE(("CheckMaps #%d at B%d narrowed\n", check->id(),
+ check->block()->block_id()));
+ // Update map set and ensure that the check is alive.
+ check->set_maps(intersection);
+ check->ClearFlag(HValue::kIsDead);
+ TRACE(("Replacing redundant CheckMaps #%d at B%d with #%d\n",
+ instr->id(), instr->block()->block_id(), entry->check_->id()));
+ instr->DeleteAndReplaceWith(entry->check_);
+ } else {
+ TRACE(("CheckMaps #%d at B%d narrowed\n", instr->id(),
+ instr->block()->block_id()));
+ instr->set_maps(intersection);
+ entry->check_ = instr->IsStabilityCheck() ? NULL : instr;
+ }
+
+ if (FLAG_trace_check_elimination) {
+ Print(this);
+ }
+ INC_STAT(narrowed_);
+ }
}
} else {
// No entry; insert a new one.
- Insert(object, instr, instr->map_set().Copy(phase_->zone()));
+ HCheckTableEntry::State state = instr->maps_are_stable()
+ ? HCheckTableEntry::CHECKED_STABLE
+ : HCheckTableEntry::CHECKED;
+ HCheckMaps* check = instr->IsStabilityCheck() ? NULL : instr;
+ Insert(object, check, instr->maps(), state);
}
}
- void ReduceCheckValue(HCheckValue* instr) {
- // Canonicalize HCheckValues; they might have their values load-eliminated.
- HValue* value = instr->Canonicalize();
- if (value == NULL) {
- instr->DeleteAndReplaceWith(instr->value());
- INC_STAT(removed_);
- } else if (value != instr) {
+ void ReduceCheckInstanceType(HCheckInstanceType* instr) {
+ HValue* value = instr->value()->ActualValue();
+ HCheckTableEntry* entry = Find(value);
+ if (entry == NULL) {
+ if (instr->check() == HCheckInstanceType::IS_STRING) {
+ Insert(value, NULL, string_maps(), HCheckTableEntry::CHECKED);
+ }
+ return;
+ }
+ UniqueSet<Map>* maps = new(zone()) UniqueSet<Map>(
+ entry->maps_->size(), zone());
+ for (int i = 0; i < entry->maps_->size(); ++i) {
+ InstanceType type;
+ Unique<Map> map = entry->maps_->at(i);
+ {
+ // This is safe, because maps don't move and their instance type does
+ // not change.
+ AllowHandleDereference allow_deref;
+ type = map.handle()->instance_type();
+ }
+ if (instr->is_interval_check()) {
+ InstanceType first_type, last_type;
+ instr->GetCheckInterval(&first_type, &last_type);
+ if (first_type <= type && type <= last_type) maps->Add(map, zone());
+ } else {
+ uint8_t mask, tag;
+ instr->GetCheckMaskAndTag(&mask, &tag);
+ if ((type & mask) == tag) maps->Add(map, zone());
+ }
+ }
+ if (maps->size() == entry->maps_->size()) {
+ TRACE(("Removing redundant CheckInstanceType #%d at B%d\n",
+ instr->id(), instr->block()->block_id()));
+ EnsureChecked(entry, value, instr);
instr->DeleteAndReplaceWith(value);
- INC_STAT(redundant_);
+ INC_STAT(removed_cit_);
+ } else if (maps->size() != 0) {
+ entry->maps_ = maps;
+ if (entry->state_ == HCheckTableEntry::UNCHECKED_STABLE) {
+ entry->state_ = HCheckTableEntry::CHECKED_STABLE;
+ }
}
}
void ReduceLoadNamedField(HLoadNamedField* instr) {
// Reduce a load of the map field when it is known to be a constant.
- if (!IsMapAccess(instr->access())) return;
+ if (!instr->access().IsMap()) {
+ // Check if we introduce field maps here.
+ MapSet maps = instr->maps();
+ if (maps != NULL) {
+ ASSERT_NE(0, maps->size());
+ Insert(instr, NULL, maps, HCheckTableEntry::UNCHECKED_STABLE);
+ }
+ return;
+ }
HValue* object = instr->object()->ActualValue();
- MapSet maps = FindMaps(object);
- if (maps == NULL || maps->size() != 1) return; // Not a constant.
+ HCheckTableEntry* entry = Find(object);
+ if (entry == NULL || entry->maps_->size() != 1) return; // Not a constant.
- Unique<Map> map = maps->at(0);
+ EnsureChecked(entry, object, instr);
+ Unique<Map> map = entry->maps_->at(0);
+ bool map_is_stable = (entry->state_ != HCheckTableEntry::CHECKED);
HConstant* constant = HConstant::CreateAndInsertBefore(
- instr->block()->graph()->zone(), map, true, instr);
+ instr->block()->graph()->zone(), map, map_is_stable, instr);
instr->DeleteAndReplaceWith(constant);
INC_STAT(loads_);
}
- void ReduceCheckMapValue(HCheckMapValue* instr) {
- if (!instr->map()->IsConstant()) return; // Nothing to learn.
-
- HValue* object = instr->value()->ActualValue();
- // Match a HCheckMapValue(object, HConstant(map))
- Unique<Map> map = MapConstant(instr->map());
- MapSet maps = FindMaps(object);
- if (maps != NULL) {
- if (maps->Contains(map)) {
- if (maps->size() == 1) {
- // Object is known to have exactly this map.
- instr->DeleteAndReplaceWith(NULL);
- INC_STAT(removed_);
- } else {
- // Only one map survives the check.
- maps->Clear();
- maps->Add(map, phase_->zone());
- }
- }
- } else {
- // No prior information.
- Insert(object, map);
- }
- }
-
void ReduceCheckHeapObject(HCheckHeapObject* instr) {
- if (FindMaps(instr->value()->ActualValue()) != NULL) {
+ HValue* value = instr->value()->ActualValue();
+ if (Find(value) != NULL) {
// If the object has known maps, it's definitely a heap object.
- instr->DeleteAndReplaceWith(instr->value());
+ instr->DeleteAndReplaceWith(value);
INC_STAT(removed_cho_);
}
}
@@ -271,53 +528,157 @@ class HCheckTable : public ZoneObject {
if (instr->has_transition()) {
// This store transitions the object to a new map.
Kill(object);
- Insert(object, MapConstant(instr->transition()));
- } else if (IsMapAccess(instr->access())) {
+ HConstant* c_transition = HConstant::cast(instr->transition());
+ HCheckTableEntry::State state = c_transition->HasStableMapValue()
+ ? HCheckTableEntry::CHECKED_STABLE
+ : HCheckTableEntry::CHECKED;
+ Insert(object, NULL, c_transition->MapValue(), state);
+ } else if (instr->access().IsMap()) {
// This is a store directly to the map field of the object.
Kill(object);
if (!instr->value()->IsConstant()) return;
- Insert(object, MapConstant(instr->value()));
+ HConstant* c_value = HConstant::cast(instr->value());
+ HCheckTableEntry::State state = c_value->HasStableMapValue()
+ ? HCheckTableEntry::CHECKED_STABLE
+ : HCheckTableEntry::CHECKED;
+ Insert(object, NULL, c_value->MapValue(), state);
} else {
// If the instruction changes maps, it should be handled above.
- CHECK(!instr->CheckGVNFlag(kChangesMaps));
+ CHECK(!instr->CheckChangesFlag(kMaps));
}
}
void ReduceCompareMap(HCompareMap* instr) {
- MapSet maps = FindMaps(instr->value()->ActualValue());
- if (maps == NULL) return;
- if (maps->Contains(instr->map())) {
- if (maps->size() == 1) {
- // TODO(titzer): replace with goto true branch
- INC_STAT(compares_true_);
+ HCheckTableEntry* entry = Find(instr->value()->ActualValue());
+ if (entry == NULL) return;
+
+ EnsureChecked(entry, instr->value(), instr);
+
+ int succ;
+ if (entry->maps_->Contains(instr->map())) {
+ if (entry->maps_->size() != 1) {
+ TRACE(("CompareMap #%d for #%d at B%d can't be eliminated: "
+ "ambiguous set of maps\n", instr->id(), instr->value()->id(),
+ instr->block()->block_id()));
+ return;
}
+ succ = 0;
+ INC_STAT(compares_true_);
} else {
- // TODO(titzer): replace with goto false branch
+ succ = 1;
INC_STAT(compares_false_);
}
+
+ TRACE(("Marking redundant CompareMap #%d for #%d at B%d as %s\n",
+ instr->id(), instr->value()->id(), instr->block()->block_id(),
+ succ == 0 ? "true" : "false"));
+ instr->set_known_successor_index(succ);
+
+ int unreachable_succ = 1 - succ;
+ instr->block()->MarkSuccEdgeUnreachable(unreachable_succ);
+ }
+
+ void ReduceCompareObjectEqAndBranch(HCompareObjectEqAndBranch* instr) {
+ HValue* left = instr->left()->ActualValue();
+ HCheckTableEntry* le = Find(left);
+ if (le == NULL) return;
+ HValue* right = instr->right()->ActualValue();
+ HCheckTableEntry* re = Find(right);
+ if (re == NULL) return;
+
+ EnsureChecked(le, left, instr);
+ EnsureChecked(re, right, instr);
+
+ // TODO(bmeurer): Add a predicate here instead of computing the intersection
+ MapSet intersection = le->maps_->Intersect(re->maps_, zone());
+ if (intersection->size() > 0) return;
+
+ TRACE(("Marking redundant CompareObjectEqAndBranch #%d at B%d as false\n",
+ instr->id(), instr->block()->block_id()));
+ int succ = 1;
+ instr->set_known_successor_index(succ);
+
+ int unreachable_succ = 1 - succ;
+ instr->block()->MarkSuccEdgeUnreachable(unreachable_succ);
+ }
+
+ void ReduceIsStringAndBranch(HIsStringAndBranch* instr) {
+ HValue* value = instr->value()->ActualValue();
+ HCheckTableEntry* entry = Find(value);
+ if (entry == NULL) return;
+ EnsureChecked(entry, value, instr);
+ int succ;
+ if (entry->maps_->IsSubset(string_maps())) {
+ TRACE(("Marking redundant IsStringAndBranch #%d at B%d as true\n",
+ instr->id(), instr->block()->block_id()));
+ succ = 0;
+ } else {
+ MapSet intersection = entry->maps_->Intersect(string_maps(), zone());
+ if (intersection->size() > 0) return;
+ TRACE(("Marking redundant IsStringAndBranch #%d at B%d as false\n",
+ instr->id(), instr->block()->block_id()));
+ succ = 1;
+ }
+ instr->set_known_successor_index(succ);
+ int unreachable_succ = 1 - succ;
+ instr->block()->MarkSuccEdgeUnreachable(unreachable_succ);
}
void ReduceTransitionElementsKind(HTransitionElementsKind* instr) {
- MapSet maps = FindMaps(instr->object()->ActualValue());
+ HValue* object = instr->object()->ActualValue();
+ HCheckTableEntry* entry = Find(object);
// Can only learn more about an object that already has a known set of maps.
- if (maps == NULL) return;
- if (maps->Contains(instr->original_map())) {
+ if (entry == NULL) return;
+ EnsureChecked(entry, object, instr);
+ if (entry->maps_->Contains(instr->original_map())) {
// If the object has the original map, it will be transitioned.
+ UniqueSet<Map>* maps = entry->maps_->Copy(zone());
maps->Remove(instr->original_map());
- maps->Add(instr->transitioned_map(), phase_->zone());
+ maps->Add(instr->transitioned_map(), zone());
+ entry->maps_ = maps;
} else {
// Object does not have the given map, thus the transition is redundant.
- instr->DeleteAndReplaceWith(instr->object());
+ instr->DeleteAndReplaceWith(object);
INC_STAT(transitions_);
}
}
+ void EnsureChecked(HCheckTableEntry* entry,
+ HValue* value,
+ HInstruction* instr) {
+ if (entry->state_ != HCheckTableEntry::UNCHECKED_STABLE) return;
+ HGraph* graph = instr->block()->graph();
+ HCheckMaps* check = HCheckMaps::CreateAndInsertBefore(
+ graph->zone(), value, entry->maps_->Copy(graph->zone()), true, instr);
+ check->MarkAsStabilityCheck();
+ entry->state_ = HCheckTableEntry::CHECKED_STABLE;
+ entry->check_ = NULL;
+ }
+
// Kill everything in the table.
void Kill() {
size_ = 0;
cursor_ = 0;
}
+ // Kill all unstable entries in the table.
+ void KillUnstableEntries() {
+ bool compact = false;
+ for (int i = 0; i < size_; ++i) {
+ HCheckTableEntry* entry = &entries_[i];
+ ASSERT_NOT_NULL(entry->object_);
+ if (entry->state_ == HCheckTableEntry::CHECKED) {
+ entry->object_ = NULL;
+ compact = true;
+ } else {
+ // All checked stable entries become unchecked stable.
+ entry->state_ = HCheckTableEntry::UNCHECKED_STABLE;
+ entry->check_ = NULL;
+ }
+ }
+ if (compact) Compact();
+ }
+
// Kill everything in the table that may alias {object}.
void Kill(HValue* object) {
bool compact = false;
@@ -357,24 +718,31 @@ class HCheckTable : public ZoneObject {
int L = cursor_;
int R = size_ - cursor_;
- OS::MemMove(&tmp_entries[0], &entries_[0], L * sizeof(HCheckTableEntry));
- OS::MemMove(&entries_[0], &entries_[L], R * sizeof(HCheckTableEntry));
- OS::MemMove(&entries_[R], &tmp_entries[0], L * sizeof(HCheckTableEntry));
+ MemMove(&tmp_entries[0], &entries_[0], L * sizeof(HCheckTableEntry));
+ MemMove(&entries_[0], &entries_[L], R * sizeof(HCheckTableEntry));
+ MemMove(&entries_[R], &tmp_entries[0], L * sizeof(HCheckTableEntry));
}
cursor_ = size_; // Move cursor to end.
}
- void Print() {
- for (int i = 0; i < size_; i++) {
- HCheckTableEntry* entry = &entries_[i];
+ static void Print(HCheckTable* table) {
+ if (table == NULL) {
+ PrintF(" unreachable\n");
+ return;
+ }
+
+ for (int i = 0; i < table->size_; i++) {
+ HCheckTableEntry* entry = &table->entries_[i];
ASSERT(entry->object_ != NULL);
- PrintF(" checkmaps-table @%d: object #%d ", i, entry->object_->id());
+ PrintF(" checkmaps-table @%d: %s #%d ", i,
+ entry->object_->IsPhi() ? "phi" : "object", entry->object_->id());
if (entry->check_ != NULL) {
PrintF("check #%d ", entry->check_->id());
}
MapSet list = entry->maps_;
- PrintF("%d maps { ", list->size());
+ PrintF("%d %s maps { ", list->size(),
+ HCheckTableEntry::State2String(entry->state_));
for (int j = 0; j < list->size(); j++) {
if (j > 0) PrintF(", ");
PrintF("%" V8PRIxPTR, list->at(j).Hashcode());
@@ -383,7 +751,6 @@ class HCheckTable : public ZoneObject {
}
}
- private:
HCheckTableEntry* Find(HValue* object) {
for (int i = size_ - 1; i >= 0; i--) {
// Search from most-recently-inserted to least-recently-inserted.
@@ -394,42 +761,39 @@ class HCheckTable : public ZoneObject {
return NULL;
}
- MapSet FindMaps(HValue* object) {
- HCheckTableEntry* entry = Find(object);
- return entry == NULL ? NULL : entry->maps_;
+ void Insert(HValue* object,
+ HInstruction* check,
+ Unique<Map> map,
+ HCheckTableEntry::State state) {
+ Insert(object, check, new(zone()) UniqueSet<Map>(map, zone()), state);
}
- void Insert(HValue* object, Unique<Map> map) {
- MapSet list = new(phase_->zone()) UniqueSet<Map>();
- list->Add(map, phase_->zone());
- Insert(object, NULL, list);
- }
-
- void Insert(HValue* object, HCheckMaps* check, MapSet maps) {
+ void Insert(HValue* object,
+ HInstruction* check,
+ MapSet maps,
+ HCheckTableEntry::State state) {
+ ASSERT(state != HCheckTableEntry::UNCHECKED_STABLE || check == NULL);
HCheckTableEntry* entry = &entries_[cursor_++];
entry->object_ = object;
entry->check_ = check;
entry->maps_ = maps;
+ entry->state_ = state;
// If the table becomes full, wrap around and overwrite older entries.
if (cursor_ == kMaxTrackedObjects) cursor_ = 0;
if (size_ < kMaxTrackedObjects) size_++;
}
- bool IsMapAccess(HObjectAccess access) {
- return access.IsInobject() && access.offset() == JSObject::kMapOffset;
- }
-
- Unique<Map> MapConstant(HValue* value) {
- return Unique<Map>::cast(HConstant::cast(value)->GetUnique());
- }
+ Zone* zone() const { return phase_->zone(); }
+ MapSet string_maps() const { return phase_->string_maps(); }
friend class HCheckMapsEffects;
+ friend class HCheckEliminationPhase;
HCheckEliminationPhase* phase_;
HCheckTableEntry entries_[kMaxTrackedObjects];
int16_t cursor_; // Must be <= kMaxTrackedObjects
int16_t size_; // Must be <= kMaxTrackedObjects
- // TODO(titzer): STATIC_ASSERT kMaxTrackedObjects < max(cursor_)
+ STATIC_ASSERT(kMaxTrackedObjects < (1 << 15));
};
@@ -437,60 +801,62 @@ class HCheckTable : public ZoneObject {
// needed for check elimination.
class HCheckMapsEffects : public ZoneObject {
public:
- explicit HCheckMapsEffects(Zone* zone)
- : maps_stored_(false),
- stores_(5, zone) { }
+ explicit HCheckMapsEffects(Zone* zone) : objects_(0, zone) { }
- inline bool Disabled() {
- return false; // Effects are _not_ disabled.
- }
+ // Effects are _not_ disabled.
+ inline bool Disabled() const { return false; }
// Process a possibly side-effecting instruction.
void Process(HInstruction* instr, Zone* zone) {
switch (instr->opcode()) {
case HValue::kStoreNamedField: {
- stores_.Add(HStoreNamedField::cast(instr), zone);
+ HStoreNamedField* store = HStoreNamedField::cast(instr);
+ if (store->access().IsMap() || store->has_transition()) {
+ objects_.Add(store->object(), zone);
+ }
break;
}
- case HValue::kOsrEntry: {
- // Kill everything. Loads must not be hoisted past the OSR entry.
- maps_stored_ = true;
+ case HValue::kTransitionElementsKind: {
+ objects_.Add(HTransitionElementsKind::cast(instr)->object(), zone);
+ break;
}
default: {
- maps_stored_ |= (instr->CheckGVNFlag(kChangesMaps) |
- instr->CheckGVNFlag(kChangesElementsKind));
+ flags_.Add(instr->ChangesFlags());
+ break;
}
}
}
// Apply these effects to the given check elimination table.
void Apply(HCheckTable* table) {
- if (maps_stored_) {
+ if (flags_.Contains(kOsrEntries)) {
// Uncontrollable map modifications; kill everything.
table->Kill();
return;
}
- // Kill maps for each store contained in these effects.
- for (int i = 0; i < stores_.length(); i++) {
- HStoreNamedField* s = stores_[i];
- if (table->IsMapAccess(s->access()) || s->has_transition()) {
- table->Kill(s->object()->ActualValue());
- }
+ // Kill all unstable entries.
+ if (flags_.Contains(kElementsKind) || flags_.Contains(kMaps)) {
+ table->KillUnstableEntries();
+ }
+
+ // Kill maps for each object contained in these effects.
+ for (int i = 0; i < objects_.length(); ++i) {
+ table->Kill(objects_[i]->ActualValue());
}
}
// Union these effects with the other effects.
void Union(HCheckMapsEffects* that, Zone* zone) {
- maps_stored_ |= that->maps_stored_;
- for (int i = 0; i < that->stores_.length(); i++) {
- stores_.Add(that->stores_[i], zone);
+ flags_.Add(that->flags_);
+ for (int i = 0; i < that->objects_.length(); ++i) {
+ objects_.Add(that->objects_[i], zone);
}
}
private:
- bool maps_stored_ : 1;
- ZoneList<HStoreNamedField*> stores_;
+ ZoneList<HValue*> objects_;
+ GVNFlagSet flags_;
};
@@ -525,6 +891,7 @@ void HCheckEliminationPhase::PrintStats() {
PRINT_STAT(redundant);
PRINT_STAT(removed);
PRINT_STAT(removed_cho);
+ PRINT_STAT(removed_cit);
PRINT_STAT(narrowed);
PRINT_STAT(loads);
PRINT_STAT(empty);
diff --git a/chromium/v8/src/hydrogen-check-elimination.h b/chromium/v8/src/hydrogen-check-elimination.h
index b429b174623..16f758b544c 100644
--- a/chromium/v8/src/hydrogen-check-elimination.h
+++ b/chromium/v8/src/hydrogen-check-elimination.h
@@ -1,35 +1,12 @@
// Copyright 2013 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
#ifndef V8_HYDROGEN_CHECK_ELIMINATION_H_
#define V8_HYDROGEN_CHECK_ELIMINATION_H_
-#include "hydrogen.h"
-#include "hydrogen-alias-analysis.h"
+#include "src/hydrogen.h"
+#include "src/hydrogen-alias-analysis.h"
namespace v8 {
namespace internal {
@@ -39,11 +16,20 @@ namespace internal {
class HCheckEliminationPhase : public HPhase {
public:
explicit HCheckEliminationPhase(HGraph* graph)
- : HPhase("H_Check Elimination", graph), aliasing_() {
+ : HPhase("H_Check Elimination", graph), aliasing_(),
+ string_maps_(kStringMapsSize, zone()) {
+ // Compute the set of string maps.
+ #define ADD_STRING_MAP(type, size, name, Name) \
+ string_maps_.Add(Unique<Map>::CreateImmovable( \
+ graph->isolate()->factory()->name##_map()), zone());
+ STRING_TYPE_LIST(ADD_STRING_MAP)
+ #undef ADD_STRING_MAP
+ ASSERT_EQ(kStringMapsSize, string_maps_.size());
#ifdef DEBUG
redundant_ = 0;
removed_ = 0;
removed_cho_ = 0;
+ removed_cit_ = 0;
narrowed_ = 0;
loads_ = 0;
empty_ = 0;
@@ -58,13 +44,20 @@ class HCheckEliminationPhase : public HPhase {
friend class HCheckTable;
private:
+ const UniqueSet<Map>* string_maps() const { return &string_maps_; }
+
void PrintStats();
HAliasAnalyzer* aliasing_;
+ #define COUNT(type, size, name, Name) + 1
+ static const int kStringMapsSize = 0 STRING_TYPE_LIST(COUNT);
+ #undef COUNT
+ UniqueSet<Map> string_maps_;
#ifdef DEBUG
int redundant_;
int removed_;
int removed_cho_;
+ int removed_cit_;
int narrowed_;
int loads_;
int empty_;
diff --git a/chromium/v8/src/hydrogen-dce.cc b/chromium/v8/src/hydrogen-dce.cc
index e101ee5bcc5..96f088d0c07 100644
--- a/chromium/v8/src/hydrogen-dce.cc
+++ b/chromium/v8/src/hydrogen-dce.cc
@@ -1,32 +1,9 @@
// Copyright 2013 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "hydrogen-dce.h"
-#include "v8.h"
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/hydrogen-dce.h"
+#include "src/v8.h"
namespace v8 {
namespace internal {
@@ -64,7 +41,7 @@ void HDeadCodeEliminationPhase::PrintLive(HValue* ref, HValue* instr) {
}
stream.Add(" -> ");
instr->PrintTo(&stream);
- PrintF("[MarkLive %s]\n", *stream.ToCString());
+ PrintF("[MarkLive %s]\n", stream.ToCString().get());
}
diff --git a/chromium/v8/src/hydrogen-dce.h b/chromium/v8/src/hydrogen-dce.h
index 2d73b380e40..af3679d9d39 100644
--- a/chromium/v8/src/hydrogen-dce.h
+++ b/chromium/v8/src/hydrogen-dce.h
@@ -1,34 +1,11 @@
// Copyright 2013 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
#ifndef V8_HYDROGEN_DCE_H_
#define V8_HYDROGEN_DCE_H_
-#include "hydrogen.h"
+#include "src/hydrogen.h"
namespace v8 {
namespace internal {
diff --git a/chromium/v8/src/hydrogen-dehoist.cc b/chromium/v8/src/hydrogen-dehoist.cc
index bdf2cfb2584..fe0ae764ad6 100644
--- a/chromium/v8/src/hydrogen-dehoist.cc
+++ b/chromium/v8/src/hydrogen-dehoist.cc
@@ -1,31 +1,8 @@
// Copyright 2013 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
-#include "hydrogen-dehoist.h"
+#include "src/hydrogen-dehoist.h"
namespace v8 {
namespace internal {
@@ -53,12 +30,13 @@ static void DehoistArrayIndex(ArrayInstructionInterface* array_operation) {
int32_t value = constant->Integer32Value() * sign;
// We limit offset values to 30 bits because we want to avoid the risk of
// overflows when the offset is added to the object header size.
- if (value >= 1 << array_operation->MaxIndexOffsetBits() || value < 0) return;
+ if (value >= 1 << array_operation->MaxBaseOffsetBits() || value < 0) return;
array_operation->SetKey(subexpression);
if (binary_operation->HasNoUses()) {
binary_operation->DeleteAndReplaceWith(NULL);
}
- array_operation->SetIndexOffset(static_cast<uint32_t>(value));
+ value <<= ElementsKindToShiftSize(array_operation->elements_kind());
+ array_operation->IncreaseBaseOffset(static_cast<uint32_t>(value));
array_operation->SetDehoisted(true);
}
diff --git a/chromium/v8/src/hydrogen-dehoist.h b/chromium/v8/src/hydrogen-dehoist.h
index 140dc6e0e22..4aab30fafa1 100644
--- a/chromium/v8/src/hydrogen-dehoist.h
+++ b/chromium/v8/src/hydrogen-dehoist.h
@@ -1,34 +1,11 @@
// Copyright 2013 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
#ifndef V8_HYDROGEN_DEHOIST_H_
#define V8_HYDROGEN_DEHOIST_H_
-#include "hydrogen.h"
+#include "src/hydrogen.h"
namespace v8 {
namespace internal {
diff --git a/chromium/v8/src/hydrogen-environment-liveness.cc b/chromium/v8/src/hydrogen-environment-liveness.cc
index d7501ac49e6..a72dfda01ae 100644
--- a/chromium/v8/src/hydrogen-environment-liveness.cc
+++ b/chromium/v8/src/hydrogen-environment-liveness.cc
@@ -1,32 +1,9 @@
// Copyright 2013 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-
-#include "hydrogen-environment-liveness.h"
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+
+#include "src/hydrogen-environment-liveness.h"
namespace v8 {
@@ -84,8 +61,8 @@ void HEnvironmentLivenessAnalysisPhase::ZapEnvironmentSlotsInSuccessors(
}
HSimulate* simulate = first_simulate_.at(successor_id);
if (simulate == NULL) continue;
- ASSERT(simulate->closure().is_identical_to(
- block->last_environment()->closure()));
+ ASSERT(VerifyClosures(simulate->closure(),
+ block->last_environment()->closure()));
ZapEnvironmentSlot(i, simulate);
}
}
@@ -97,7 +74,7 @@ void HEnvironmentLivenessAnalysisPhase::ZapEnvironmentSlotsForInstruction(
if (!marker->CheckFlag(HValue::kEndsLiveRange)) return;
HSimulate* simulate = marker->next_simulate();
if (simulate != NULL) {
- ASSERT(simulate->closure().is_identical_to(marker->closure()));
+ ASSERT(VerifyClosures(simulate->closure(), marker->closure()));
ZapEnvironmentSlot(marker->index(), simulate);
}
}
@@ -241,4 +218,14 @@ void HEnvironmentLivenessAnalysisPhase::Run() {
}
}
+
+#ifdef DEBUG
+bool HEnvironmentLivenessAnalysisPhase::VerifyClosures(
+ Handle<JSFunction> a, Handle<JSFunction> b) {
+ Heap::RelocationLock for_heap_access(isolate()->heap());
+ AllowHandleDereference for_verification;
+ return a.is_identical_to(b);
+}
+#endif
+
} } // namespace v8::internal
diff --git a/chromium/v8/src/hydrogen-environment-liveness.h b/chromium/v8/src/hydrogen-environment-liveness.h
index 248ec5ce5d7..e595927f9d4 100644
--- a/chromium/v8/src/hydrogen-environment-liveness.h
+++ b/chromium/v8/src/hydrogen-environment-liveness.h
@@ -1,35 +1,12 @@
// Copyright 2013 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
#ifndef V8_HYDROGEN_ENVIRONMENT_LIVENESS_H_
#define V8_HYDROGEN_ENVIRONMENT_LIVENESS_H_
-#include "hydrogen.h"
+#include "src/hydrogen.h"
namespace v8 {
namespace internal {
@@ -55,6 +32,9 @@ class HEnvironmentLivenessAnalysisPhase : public HPhase {
void ZapEnvironmentSlotsForInstruction(HEnvironmentMarker* marker);
void UpdateLivenessAtBlockEnd(HBasicBlock* block, BitVector* live);
void UpdateLivenessAtInstruction(HInstruction* instr, BitVector* live);
+#ifdef DEBUG
+ bool VerifyClosures(Handle<JSFunction> a, Handle<JSFunction> b);
+#endif
int block_count_;
diff --git a/chromium/v8/src/hydrogen-escape-analysis.cc b/chromium/v8/src/hydrogen-escape-analysis.cc
index 10230199233..23ca468ff92 100644
--- a/chromium/v8/src/hydrogen-escape-analysis.cc
+++ b/chromium/v8/src/hydrogen-escape-analysis.cc
@@ -1,31 +1,8 @@
// Copyright 2013 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "hydrogen-escape-analysis.h"
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/hydrogen-escape-analysis.h"
namespace v8 {
namespace internal {
@@ -155,12 +132,29 @@ HValue* HEscapeAnalysisPhase::NewMapCheckAndInsert(HCapturedObject* state,
// TODO(mstarzinger): This will narrow a map check against a set of maps
// down to the first element in the set. Revisit and fix this.
HCheckValue* check = HCheckValue::New(
- zone, NULL, value, mapcheck->first_map(), false);
+ zone, NULL, value, mapcheck->maps()->at(0), false);
check->InsertBefore(mapcheck);
return check;
}
+// Replace a field load with a given value, forcing Smi representation if
+// necessary.
+HValue* HEscapeAnalysisPhase::NewLoadReplacement(
+ HLoadNamedField* load, HValue* load_value) {
+ HValue* replacement = load_value;
+ Representation representation = load->representation();
+ if (representation.IsSmiOrInteger32() || representation.IsDouble()) {
+ Zone* zone = graph()->zone();
+ HInstruction* new_instr =
+ HForceRepresentation::New(zone, NULL, load_value, representation);
+ new_instr->InsertAfter(load);
+ replacement = new_instr;
+ }
+ return replacement;
+}
+
+
// Performs a forward data-flow analysis of all loads and stores on the
// given captured allocation. This uses a reverse post-order iteration
// over affected basic blocks. All non-escaping instructions are handled
@@ -196,10 +190,11 @@ void HEscapeAnalysisPhase::AnalyzeDataFlow(HInstruction* allocate) {
int index = load->access().offset() / kPointerSize;
if (load->object() != allocate) continue;
ASSERT(load->access().IsInobject());
- HValue* replacement = state->OperandAt(index);
+ HValue* replacement =
+ NewLoadReplacement(load, state->OperandAt(index));
load->DeleteAndReplaceWith(replacement);
if (FLAG_trace_escape_analysis) {
- PrintF("Replacing load #%d with #%d (%s)\n", instr->id(),
+ PrintF("Replacing load #%d with #%d (%s)\n", load->id(),
replacement->id(), replacement->Mnemonic());
}
break;
@@ -304,7 +299,7 @@ void HEscapeAnalysisPhase::PerformScalarReplacement() {
int size_in_bytes = allocate->size()->GetInteger32Constant();
number_of_values_ = size_in_bytes / kPointerSize;
number_of_objects_++;
- block_states_.Clear();
+ block_states_.Rewind(0);
// Perform actual analysis step.
AnalyzeDataFlow(allocate);
@@ -325,7 +320,7 @@ void HEscapeAnalysisPhase::Run() {
CollectCapturedValues();
if (captured_.is_empty()) break;
PerformScalarReplacement();
- captured_.Clear();
+ captured_.Rewind(0);
}
}
diff --git a/chromium/v8/src/hydrogen-escape-analysis.h b/chromium/v8/src/hydrogen-escape-analysis.h
index 3e27cc1b48b..0726b8edbe4 100644
--- a/chromium/v8/src/hydrogen-escape-analysis.h
+++ b/chromium/v8/src/hydrogen-escape-analysis.h
@@ -1,35 +1,12 @@
// Copyright 2013 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
#ifndef V8_HYDROGEN_ESCAPE_ANALYSIS_H_
#define V8_HYDROGEN_ESCAPE_ANALYSIS_H_
-#include "allocation.h"
-#include "hydrogen.h"
+#include "src/allocation.h"
+#include "src/hydrogen.h"
namespace v8 {
namespace internal {
@@ -62,6 +39,8 @@ class HEscapeAnalysisPhase : public HPhase {
HValue* NewMapCheckAndInsert(HCapturedObject* state, HCheckMaps* mapcheck);
+ HValue* NewLoadReplacement(HLoadNamedField* load, HValue* load_value);
+
HCapturedObject* StateAt(HBasicBlock* block) {
return block_states_.at(block->block_id());
}
diff --git a/chromium/v8/src/hydrogen-flow-engine.h b/chromium/v8/src/hydrogen-flow-engine.h
index 4e1275546f6..5ce320a4dbd 100644
--- a/chromium/v8/src/hydrogen-flow-engine.h
+++ b/chromium/v8/src/hydrogen-flow-engine.h
@@ -1,36 +1,13 @@
// Copyright 2013 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
#ifndef V8_HYDROGEN_FLOW_ENGINE_H_
#define V8_HYDROGEN_FLOW_ENGINE_H_
-#include "hydrogen.h"
-#include "hydrogen-instructions.h"
-#include "zone.h"
+#include "src/hydrogen.h"
+#include "src/hydrogen-instructions.h"
+#include "src/zone.h"
namespace v8 {
namespace internal {
@@ -122,19 +99,22 @@ class HFlowEngine {
// Skip blocks not dominated by the root node.
if (SkipNonDominatedBlock(root, block)) continue;
- State* state = StateAt(block);
+ State* state = State::Finish(StateAt(block), block, zone_);
- if (block->IsLoopHeader()) {
- // Apply loop effects before analyzing loop body.
- ComputeLoopEffects(block)->Apply(state);
- } else {
- // Must have visited all predecessors before this block.
- CheckPredecessorCount(block);
- }
+ if (block->IsReachable()) {
+ ASSERT(state != NULL);
+ if (block->IsLoopHeader()) {
+ // Apply loop effects before analyzing loop body.
+ ComputeLoopEffects(block)->Apply(state);
+ } else {
+ // Must have visited all predecessors before this block.
+ CheckPredecessorCount(block);
+ }
- // Go through all instructions of the current block, updating the state.
- for (HInstructionIterator it(block); !it.Done(); it.Advance()) {
- state = state->Process(it.Current(), zone_);
+ // Go through all instructions of the current block, updating the state.
+ for (HInstructionIterator it(block); !it.Done(); it.Advance()) {
+ state = state->Process(it.Current(), zone_);
+ }
}
// Propagate the block state forward to all successor blocks.
@@ -142,18 +122,14 @@ class HFlowEngine {
for (int i = 0; i < max; i++) {
HBasicBlock* succ = block->end()->SuccessorAt(i);
IncrementPredecessorCount(succ);
- if (StateAt(succ) == NULL) {
- // This is the first state to reach the successor.
- if (max == 1 && succ->predecessors()->length() == 1) {
- // Optimization: successor can inherit this state.
- SetStateAt(succ, state);
- } else {
- // Successor needs a copy of the state.
- SetStateAt(succ, state->Copy(succ, zone_));
- }
+
+ if (max == 1 && succ->predecessors()->length() == 1) {
+ // Optimization: successor can inherit this state.
+ SetStateAt(succ, state);
} else {
// Merge the current state with the state already at the successor.
- SetStateAt(succ, state->Merge(succ, StateAt(succ), zone_));
+ SetStateAt(succ,
+ State::Merge(StateAt(succ), succ, state, block, zone_));
}
}
}
@@ -185,6 +161,7 @@ class HFlowEngine {
i = member->loop_information()->GetLastBackEdge()->block_id();
} else {
// Process all the effects of the block.
+ if (member->IsUnreachable()) continue;
ASSERT(member->current_loop() == loop);
for (HInstructionIterator it(member); !it.Done(); it.Advance()) {
effects->Process(it.Current(), zone_);
diff --git a/chromium/v8/src/hydrogen-gvn.cc b/chromium/v8/src/hydrogen-gvn.cc
index 7553abe206c..e6f1ae90f39 100644
--- a/chromium/v8/src/hydrogen-gvn.cc
+++ b/chromium/v8/src/hydrogen-gvn.cc
@@ -1,70 +1,47 @@
// Copyright 2013 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "hydrogen.h"
-#include "hydrogen-gvn.h"
-#include "v8.h"
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/hydrogen.h"
+#include "src/hydrogen-gvn.h"
+#include "src/v8.h"
namespace v8 {
namespace internal {
-class HValueMap: public ZoneObject {
+class HInstructionMap V8_FINAL : public ZoneObject {
public:
- explicit HValueMap(Zone* zone)
+ HInstructionMap(Zone* zone, SideEffectsTracker* side_effects_tracker)
: array_size_(0),
lists_size_(0),
count_(0),
- present_flags_(0),
array_(NULL),
lists_(NULL),
- free_list_head_(kNil) {
+ free_list_head_(kNil),
+ side_effects_tracker_(side_effects_tracker) {
ResizeLists(kInitialSize, zone);
Resize(kInitialSize, zone);
}
- void Kill(GVNFlagSet flags);
+ void Kill(SideEffects side_effects);
- void Add(HValue* value, Zone* zone) {
- present_flags_.Add(value->gvn_flags());
- Insert(value, zone);
+ void Add(HInstruction* instr, Zone* zone) {
+ present_depends_on_.Add(side_effects_tracker_->ComputeDependsOn(instr));
+ Insert(instr, zone);
}
- HValue* Lookup(HValue* value) const;
+ HInstruction* Lookup(HInstruction* instr) const;
- HValueMap* Copy(Zone* zone) const {
- return new(zone) HValueMap(zone, this);
+ HInstructionMap* Copy(Zone* zone) const {
+ return new(zone) HInstructionMap(zone, this);
}
bool IsEmpty() const { return count_ == 0; }
private:
- // A linked list of HValue* values. Stored in arrays.
- struct HValueMapListElement {
- HValue* value;
+ // A linked list of HInstruction* values. Stored in arrays.
+ struct HInstructionMapListElement {
+ HInstruction* instr;
int next; // Index in the array of the next list element.
};
static const int kNil = -1; // The end of a linked list
@@ -72,34 +49,36 @@ class HValueMap: public ZoneObject {
// Must be a power of 2.
static const int kInitialSize = 16;
- HValueMap(Zone* zone, const HValueMap* other);
+ HInstructionMap(Zone* zone, const HInstructionMap* other);
void Resize(int new_size, Zone* zone);
void ResizeLists(int new_size, Zone* zone);
- void Insert(HValue* value, Zone* zone);
+ void Insert(HInstruction* instr, Zone* zone);
uint32_t Bound(uint32_t value) const { return value & (array_size_ - 1); }
int array_size_;
int lists_size_;
- int count_; // The number of values stored in the HValueMap.
- GVNFlagSet present_flags_; // All flags that are in any value in the
- // HValueMap.
- HValueMapListElement* array_; // Primary store - contains the first value
+ int count_; // The number of values stored in the HInstructionMap.
+ SideEffects present_depends_on_;
+ HInstructionMapListElement* array_;
+ // Primary store - contains the first value
// with a given hash. Colliding elements are stored in linked lists.
- HValueMapListElement* lists_; // The linked lists containing hash collisions.
+ HInstructionMapListElement* lists_;
+ // The linked lists containing hash collisions.
int free_list_head_; // Unused elements in lists_ are on the free list.
+ SideEffectsTracker* side_effects_tracker_;
};
-class HSideEffectMap BASE_EMBEDDED {
+class HSideEffectMap V8_FINAL BASE_EMBEDDED {
public:
HSideEffectMap();
explicit HSideEffectMap(HSideEffectMap* other);
HSideEffectMap& operator= (const HSideEffectMap& other);
- void Kill(GVNFlagSet flags);
+ void Kill(SideEffects side_effects);
- void Store(GVNFlagSet flags, HInstruction* instr);
+ void Store(SideEffects side_effects, HInstruction* instr);
bool IsEmpty() const { return count_ == 0; }
@@ -152,35 +131,36 @@ void TraceGVN(const char* msg, ...) {
}
-HValueMap::HValueMap(Zone* zone, const HValueMap* other)
+HInstructionMap::HInstructionMap(Zone* zone, const HInstructionMap* other)
: array_size_(other->array_size_),
lists_size_(other->lists_size_),
count_(other->count_),
- present_flags_(other->present_flags_),
- array_(zone->NewArray<HValueMapListElement>(other->array_size_)),
- lists_(zone->NewArray<HValueMapListElement>(other->lists_size_)),
- free_list_head_(other->free_list_head_) {
- OS::MemCopy(
- array_, other->array_, array_size_ * sizeof(HValueMapListElement));
- OS::MemCopy(
- lists_, other->lists_, lists_size_ * sizeof(HValueMapListElement));
+ present_depends_on_(other->present_depends_on_),
+ array_(zone->NewArray<HInstructionMapListElement>(other->array_size_)),
+ lists_(zone->NewArray<HInstructionMapListElement>(other->lists_size_)),
+ free_list_head_(other->free_list_head_),
+ side_effects_tracker_(other->side_effects_tracker_) {
+ MemCopy(array_, other->array_,
+ array_size_ * sizeof(HInstructionMapListElement));
+ MemCopy(lists_, other->lists_,
+ lists_size_ * sizeof(HInstructionMapListElement));
}
-void HValueMap::Kill(GVNFlagSet flags) {
- GVNFlagSet depends_flags = HValue::ConvertChangesToDependsFlags(flags);
- if (!present_flags_.ContainsAnyOf(depends_flags)) return;
- present_flags_.RemoveAll();
+void HInstructionMap::Kill(SideEffects changes) {
+ if (!present_depends_on_.ContainsAnyOf(changes)) return;
+ present_depends_on_.RemoveAll();
for (int i = 0; i < array_size_; ++i) {
- HValue* value = array_[i].value;
- if (value != NULL) {
+ HInstruction* instr = array_[i].instr;
+ if (instr != NULL) {
// Clear list of collisions first, so we know if it becomes empty.
int kept = kNil; // List of kept elements.
int next;
for (int current = array_[i].next; current != kNil; current = next) {
next = lists_[current].next;
- HValue* value = lists_[current].value;
- if (value->gvn_flags().ContainsAnyOf(depends_flags)) {
+ HInstruction* instr = lists_[current].instr;
+ SideEffects depends_on = side_effects_tracker_->ComputeDependsOn(instr);
+ if (depends_on.ContainsAnyOf(changes)) {
// Drop it.
count_--;
lists_[current].next = free_list_head_;
@@ -189,40 +169,41 @@ void HValueMap::Kill(GVNFlagSet flags) {
// Keep it.
lists_[current].next = kept;
kept = current;
- present_flags_.Add(value->gvn_flags());
+ present_depends_on_.Add(depends_on);
}
}
array_[i].next = kept;
// Now possibly drop directly indexed element.
- value = array_[i].value;
- if (value->gvn_flags().ContainsAnyOf(depends_flags)) { // Drop it.
+ instr = array_[i].instr;
+ SideEffects depends_on = side_effects_tracker_->ComputeDependsOn(instr);
+ if (depends_on.ContainsAnyOf(changes)) { // Drop it.
count_--;
int head = array_[i].next;
if (head == kNil) {
- array_[i].value = NULL;
+ array_[i].instr = NULL;
} else {
- array_[i].value = lists_[head].value;
+ array_[i].instr = lists_[head].instr;
array_[i].next = lists_[head].next;
lists_[head].next = free_list_head_;
free_list_head_ = head;
}
} else {
- present_flags_.Add(value->gvn_flags()); // Keep it.
+ present_depends_on_.Add(depends_on); // Keep it.
}
}
}
}
-HValue* HValueMap::Lookup(HValue* value) const {
- uint32_t hash = static_cast<uint32_t>(value->Hashcode());
+HInstruction* HInstructionMap::Lookup(HInstruction* instr) const {
+ uint32_t hash = static_cast<uint32_t>(instr->Hashcode());
uint32_t pos = Bound(hash);
- if (array_[pos].value != NULL) {
- if (array_[pos].value->Equals(value)) return array_[pos].value;
+ if (array_[pos].instr != NULL) {
+ if (array_[pos].instr->Equals(instr)) return array_[pos].instr;
int next = array_[pos].next;
while (next != kNil) {
- if (lists_[next].value->Equals(value)) return lists_[next].value;
+ if (lists_[next].instr->Equals(instr)) return lists_[next].instr;
next = lists_[next].next;
}
}
@@ -230,7 +211,7 @@ HValue* HValueMap::Lookup(HValue* value) const {
}
-void HValueMap::Resize(int new_size, Zone* zone) {
+void HInstructionMap::Resize(int new_size, Zone* zone) {
ASSERT(new_size > count_);
// Hashing the values into the new array has no more collisions than in the
// old hash map, so we can use the existing lists_ array, if we are careful.
@@ -240,33 +221,33 @@ void HValueMap::Resize(int new_size, Zone* zone) {
ResizeLists(lists_size_ << 1, zone);
}
- HValueMapListElement* new_array =
- zone->NewArray<HValueMapListElement>(new_size);
- memset(new_array, 0, sizeof(HValueMapListElement) * new_size);
+ HInstructionMapListElement* new_array =
+ zone->NewArray<HInstructionMapListElement>(new_size);
+ memset(new_array, 0, sizeof(HInstructionMapListElement) * new_size);
- HValueMapListElement* old_array = array_;
+ HInstructionMapListElement* old_array = array_;
int old_size = array_size_;
int old_count = count_;
count_ = 0;
- // Do not modify present_flags_. It is currently correct.
+ // Do not modify present_depends_on_. It is currently correct.
array_size_ = new_size;
array_ = new_array;
if (old_array != NULL) {
// Iterate over all the elements in lists, rehashing them.
for (int i = 0; i < old_size; ++i) {
- if (old_array[i].value != NULL) {
+ if (old_array[i].instr != NULL) {
int current = old_array[i].next;
while (current != kNil) {
- Insert(lists_[current].value, zone);
+ Insert(lists_[current].instr, zone);
int next = lists_[current].next;
lists_[current].next = free_list_head_;
free_list_head_ = current;
current = next;
}
- // Rehash the directly stored value.
- Insert(old_array[i].value, zone);
+ // Rehash the directly stored instruction.
+ Insert(old_array[i].instr, zone);
}
}
}
@@ -275,21 +256,21 @@ void HValueMap::Resize(int new_size, Zone* zone) {
}
-void HValueMap::ResizeLists(int new_size, Zone* zone) {
+void HInstructionMap::ResizeLists(int new_size, Zone* zone) {
ASSERT(new_size > lists_size_);
- HValueMapListElement* new_lists =
- zone->NewArray<HValueMapListElement>(new_size);
- memset(new_lists, 0, sizeof(HValueMapListElement) * new_size);
+ HInstructionMapListElement* new_lists =
+ zone->NewArray<HInstructionMapListElement>(new_size);
+ memset(new_lists, 0, sizeof(HInstructionMapListElement) * new_size);
- HValueMapListElement* old_lists = lists_;
+ HInstructionMapListElement* old_lists = lists_;
int old_size = lists_size_;
lists_size_ = new_size;
lists_ = new_lists;
if (old_lists != NULL) {
- OS::MemCopy(lists_, old_lists, old_size * sizeof(HValueMapListElement));
+ MemCopy(lists_, old_lists, old_size * sizeof(HInstructionMapListElement));
}
for (int i = old_size; i < lists_size_; ++i) {
lists_[i].next = free_list_head_;
@@ -298,15 +279,15 @@ void HValueMap::ResizeLists(int new_size, Zone* zone) {
}
-void HValueMap::Insert(HValue* value, Zone* zone) {
- ASSERT(value != NULL);
+void HInstructionMap::Insert(HInstruction* instr, Zone* zone) {
+ ASSERT(instr != NULL);
// Resizing when half of the hashtable is filled up.
if (count_ >= array_size_ >> 1) Resize(array_size_ << 1, zone);
ASSERT(count_ < array_size_);
count_++;
- uint32_t pos = Bound(static_cast<uint32_t>(value->Hashcode()));
- if (array_[pos].value == NULL) {
- array_[pos].value = value;
+ uint32_t pos = Bound(static_cast<uint32_t>(instr->Hashcode()));
+ if (array_[pos].instr == NULL) {
+ array_[pos].instr = instr;
array_[pos].next = kNil;
} else {
if (free_list_head_ == kNil) {
@@ -315,9 +296,9 @@ void HValueMap::Insert(HValue* value, Zone* zone) {
int new_element_pos = free_list_head_;
ASSERT(new_element_pos != kNil);
free_list_head_ = lists_[free_list_head_].next;
- lists_[new_element_pos].value = value;
+ lists_[new_element_pos].instr = instr;
lists_[new_element_pos].next = array_[pos].next;
- ASSERT(array_[pos].next == kNil || lists_[array_[pos].next].value != NULL);
+ ASSERT(array_[pos].next == kNil || lists_[array_[pos].next].instr != NULL);
array_[pos].next = new_element_pos;
}
}
@@ -333,18 +314,17 @@ HSideEffectMap::HSideEffectMap(HSideEffectMap* other) : count_(other->count_) {
}
-HSideEffectMap& HSideEffectMap::operator= (const HSideEffectMap& other) {
+HSideEffectMap& HSideEffectMap::operator=(const HSideEffectMap& other) {
if (this != &other) {
- OS::MemCopy(data_, other.data_, kNumberOfTrackedSideEffects * kPointerSize);
+ MemCopy(data_, other.data_, kNumberOfTrackedSideEffects * kPointerSize);
}
return *this;
}
-void HSideEffectMap::Kill(GVNFlagSet flags) {
+void HSideEffectMap::Kill(SideEffects side_effects) {
for (int i = 0; i < kNumberOfTrackedSideEffects; i++) {
- GVNFlag changes_flag = HValue::ChangesFlagFromInt(i);
- if (flags.Contains(changes_flag)) {
+ if (side_effects.ContainsFlag(GVNFlagFromInt(i))) {
if (data_[i] != NULL) count_--;
data_[i] = NULL;
}
@@ -352,10 +332,9 @@ void HSideEffectMap::Kill(GVNFlagSet flags) {
}
-void HSideEffectMap::Store(GVNFlagSet flags, HInstruction* instr) {
+void HSideEffectMap::Store(SideEffects side_effects, HInstruction* instr) {
for (int i = 0; i < kNumberOfTrackedSideEffects; i++) {
- GVNFlag changes_flag = HValue::ChangesFlagFromInt(i);
- if (flags.Contains(changes_flag)) {
+ if (side_effects.ContainsFlag(GVNFlagFromInt(i))) {
if (data_[i] == NULL) count_++;
data_[i] = instr;
}
@@ -363,45 +342,204 @@ void HSideEffectMap::Store(GVNFlagSet flags, HInstruction* instr) {
}
+SideEffects SideEffectsTracker::ComputeChanges(HInstruction* instr) {
+ int index;
+ SideEffects result(instr->ChangesFlags());
+ if (result.ContainsFlag(kGlobalVars)) {
+ if (instr->IsStoreGlobalCell() &&
+ ComputeGlobalVar(HStoreGlobalCell::cast(instr)->cell(), &index)) {
+ result.RemoveFlag(kGlobalVars);
+ result.AddSpecial(GlobalVar(index));
+ } else {
+ for (index = 0; index < kNumberOfGlobalVars; ++index) {
+ result.AddSpecial(GlobalVar(index));
+ }
+ }
+ }
+ if (result.ContainsFlag(kInobjectFields)) {
+ if (instr->IsStoreNamedField() &&
+ ComputeInobjectField(HStoreNamedField::cast(instr)->access(), &index)) {
+ result.RemoveFlag(kInobjectFields);
+ result.AddSpecial(InobjectField(index));
+ } else {
+ for (index = 0; index < kNumberOfInobjectFields; ++index) {
+ result.AddSpecial(InobjectField(index));
+ }
+ }
+ }
+ return result;
+}
+
+
+SideEffects SideEffectsTracker::ComputeDependsOn(HInstruction* instr) {
+ int index;
+ SideEffects result(instr->DependsOnFlags());
+ if (result.ContainsFlag(kGlobalVars)) {
+ if (instr->IsLoadGlobalCell() &&
+ ComputeGlobalVar(HLoadGlobalCell::cast(instr)->cell(), &index)) {
+ result.RemoveFlag(kGlobalVars);
+ result.AddSpecial(GlobalVar(index));
+ } else {
+ for (index = 0; index < kNumberOfGlobalVars; ++index) {
+ result.AddSpecial(GlobalVar(index));
+ }
+ }
+ }
+ if (result.ContainsFlag(kInobjectFields)) {
+ if (instr->IsLoadNamedField() &&
+ ComputeInobjectField(HLoadNamedField::cast(instr)->access(), &index)) {
+ result.RemoveFlag(kInobjectFields);
+ result.AddSpecial(InobjectField(index));
+ } else {
+ for (index = 0; index < kNumberOfInobjectFields; ++index) {
+ result.AddSpecial(InobjectField(index));
+ }
+ }
+ }
+ return result;
+}
+
+
+void SideEffectsTracker::PrintSideEffectsTo(StringStream* stream,
+ SideEffects side_effects) const {
+ const char* separator = "";
+ stream->Add("[");
+ for (int bit = 0; bit < kNumberOfFlags; ++bit) {
+ GVNFlag flag = GVNFlagFromInt(bit);
+ if (side_effects.ContainsFlag(flag)) {
+ stream->Add(separator);
+ separator = ", ";
+ switch (flag) {
+#define DECLARE_FLAG(Type) \
+ case k##Type: \
+ stream->Add(#Type); \
+ break;
+GVN_TRACKED_FLAG_LIST(DECLARE_FLAG)
+GVN_UNTRACKED_FLAG_LIST(DECLARE_FLAG)
+#undef DECLARE_FLAG
+ default:
+ break;
+ }
+ }
+ }
+ for (int index = 0; index < num_global_vars_; ++index) {
+ if (side_effects.ContainsSpecial(GlobalVar(index))) {
+ stream->Add(separator);
+ separator = ", ";
+ stream->Add("[%p]", *global_vars_[index].handle());
+ }
+ }
+ for (int index = 0; index < num_inobject_fields_; ++index) {
+ if (side_effects.ContainsSpecial(InobjectField(index))) {
+ stream->Add(separator);
+ separator = ", ";
+ inobject_fields_[index].PrintTo(stream);
+ }
+ }
+ stream->Add("]");
+}
+
+
+bool SideEffectsTracker::ComputeGlobalVar(Unique<Cell> cell, int* index) {
+ for (int i = 0; i < num_global_vars_; ++i) {
+ if (cell == global_vars_[i]) {
+ *index = i;
+ return true;
+ }
+ }
+ if (num_global_vars_ < kNumberOfGlobalVars) {
+ if (FLAG_trace_gvn) {
+ HeapStringAllocator allocator;
+ StringStream stream(&allocator);
+ stream.Add("Tracking global var [%p] (mapped to index %d)\n",
+ *cell.handle(), num_global_vars_);
+ stream.OutputToStdOut();
+ }
+ *index = num_global_vars_;
+ global_vars_[num_global_vars_++] = cell;
+ return true;
+ }
+ return false;
+}
+
+
+bool SideEffectsTracker::ComputeInobjectField(HObjectAccess access,
+ int* index) {
+ for (int i = 0; i < num_inobject_fields_; ++i) {
+ if (access.Equals(inobject_fields_[i])) {
+ *index = i;
+ return true;
+ }
+ }
+ if (num_inobject_fields_ < kNumberOfInobjectFields) {
+ if (FLAG_trace_gvn) {
+ HeapStringAllocator allocator;
+ StringStream stream(&allocator);
+ stream.Add("Tracking inobject field access ");
+ access.PrintTo(&stream);
+ stream.Add(" (mapped to index %d)\n", num_inobject_fields_);
+ stream.OutputToStdOut();
+ }
+ *index = num_inobject_fields_;
+ inobject_fields_[num_inobject_fields_++] = access;
+ return true;
+ }
+ return false;
+}
+
+
HGlobalValueNumberingPhase::HGlobalValueNumberingPhase(HGraph* graph)
- : HPhase("H_Global value numbering", graph),
- removed_side_effects_(false),
- block_side_effects_(graph->blocks()->length(), zone()),
- loop_side_effects_(graph->blocks()->length(), zone()),
- visited_on_paths_(graph->blocks()->length(), zone()) {
- ASSERT(!AllowHandleAllocation::IsAllowed());
- block_side_effects_.AddBlock(GVNFlagSet(), graph->blocks()->length(),
- zone());
- loop_side_effects_.AddBlock(GVNFlagSet(), graph->blocks()->length(),
- zone());
- }
-
-void HGlobalValueNumberingPhase::Analyze() {
- removed_side_effects_ = false;
- ComputeBlockSideEffects();
- if (FLAG_loop_invariant_code_motion) {
- LoopInvariantCodeMotion();
- }
- AnalyzeGraph();
+ : HPhase("H_Global value numbering", graph),
+ removed_side_effects_(false),
+ block_side_effects_(graph->blocks()->length(), zone()),
+ loop_side_effects_(graph->blocks()->length(), zone()),
+ visited_on_paths_(graph->blocks()->length(), zone()) {
+ ASSERT(!AllowHandleAllocation::IsAllowed());
+ block_side_effects_.AddBlock(
+ SideEffects(), graph->blocks()->length(), zone());
+ loop_side_effects_.AddBlock(
+ SideEffects(), graph->blocks()->length(), zone());
}
-void HGlobalValueNumberingPhase::ComputeBlockSideEffects() {
- // The Analyze phase of GVN can be called multiple times. Clear loop side
- // effects before computing them to erase the contents from previous Analyze
- // passes.
- for (int i = 0; i < loop_side_effects_.length(); ++i) {
- loop_side_effects_[i].RemoveAll();
+void HGlobalValueNumberingPhase::Run() {
+ ASSERT(!removed_side_effects_);
+ for (int i = FLAG_gvn_iterations; i > 0; --i) {
+ // Compute the side effects.
+ ComputeBlockSideEffects();
+
+ // Perform loop invariant code motion if requested.
+ if (FLAG_loop_invariant_code_motion) LoopInvariantCodeMotion();
+
+ // Perform the actual value numbering.
+ AnalyzeGraph();
+
+ // Continue GVN if we removed any side effects.
+ if (!removed_side_effects_) break;
+ removed_side_effects_ = false;
+
+ // Clear all side effects.
+ ASSERT_EQ(block_side_effects_.length(), graph()->blocks()->length());
+ ASSERT_EQ(loop_side_effects_.length(), graph()->blocks()->length());
+ for (int i = 0; i < graph()->blocks()->length(); ++i) {
+ block_side_effects_[i].RemoveAll();
+ loop_side_effects_[i].RemoveAll();
+ }
+ visited_on_paths_.Clear();
}
+}
+
+
+void HGlobalValueNumberingPhase::ComputeBlockSideEffects() {
for (int i = graph()->blocks()->length() - 1; i >= 0; --i) {
// Compute side effects for the block.
HBasicBlock* block = graph()->blocks()->at(i);
- GVNFlagSet side_effects;
+ SideEffects side_effects;
if (block->IsReachable() && !block->IsDeoptimizing()) {
int id = block->block_id();
for (HInstructionIterator it(block); !it.Done(); it.Advance()) {
HInstruction* instr = it.Current();
- side_effects.Add(instr->ChangesFlags());
+ side_effects.Add(side_effects_tracker_.ComputeChanges(instr));
}
block_side_effects_[id].Add(side_effects);
@@ -425,110 +563,25 @@ void HGlobalValueNumberingPhase::ComputeBlockSideEffects() {
}
-SmartArrayPointer<char> GetGVNFlagsString(GVNFlagSet flags) {
- char underlying_buffer[kLastFlag * 128];
- Vector<char> buffer(underlying_buffer, sizeof(underlying_buffer));
-#if DEBUG
- int offset = 0;
- const char* separator = "";
- const char* comma = ", ";
- buffer[0] = 0;
- uint32_t set_depends_on = 0;
- uint32_t set_changes = 0;
- for (int bit = 0; bit < kLastFlag; ++bit) {
- if (flags.Contains(static_cast<GVNFlag>(bit))) {
- if (bit % 2 == 0) {
- set_changes++;
- } else {
- set_depends_on++;
- }
- }
- }
- bool positive_changes = set_changes < (kLastFlag / 2);
- bool positive_depends_on = set_depends_on < (kLastFlag / 2);
- if (set_changes > 0) {
- if (positive_changes) {
- offset += OS::SNPrintF(buffer + offset, "changes [");
- } else {
- offset += OS::SNPrintF(buffer + offset, "changes all except [");
- }
- for (int bit = 0; bit < kLastFlag; ++bit) {
- if (flags.Contains(static_cast<GVNFlag>(bit)) == positive_changes) {
- switch (static_cast<GVNFlag>(bit)) {
-#define DECLARE_FLAG(type) \
- case kChanges##type: \
- offset += OS::SNPrintF(buffer + offset, separator); \
- offset += OS::SNPrintF(buffer + offset, #type); \
- separator = comma; \
- break;
-GVN_TRACKED_FLAG_LIST(DECLARE_FLAG)
-GVN_UNTRACKED_FLAG_LIST(DECLARE_FLAG)
-#undef DECLARE_FLAG
- default:
- break;
- }
- }
- }
- offset += OS::SNPrintF(buffer + offset, "]");
- }
- if (set_depends_on > 0) {
- separator = "";
- if (set_changes > 0) {
- offset += OS::SNPrintF(buffer + offset, ", ");
- }
- if (positive_depends_on) {
- offset += OS::SNPrintF(buffer + offset, "depends on [");
- } else {
- offset += OS::SNPrintF(buffer + offset, "depends on all except [");
- }
- for (int bit = 0; bit < kLastFlag; ++bit) {
- if (flags.Contains(static_cast<GVNFlag>(bit)) == positive_depends_on) {
- switch (static_cast<GVNFlag>(bit)) {
-#define DECLARE_FLAG(type) \
- case kDependsOn##type: \
- offset += OS::SNPrintF(buffer + offset, separator); \
- offset += OS::SNPrintF(buffer + offset, #type); \
- separator = comma; \
- break;
-GVN_TRACKED_FLAG_LIST(DECLARE_FLAG)
-GVN_UNTRACKED_FLAG_LIST(DECLARE_FLAG)
-#undef DECLARE_FLAG
- default:
- break;
- }
- }
- }
- offset += OS::SNPrintF(buffer + offset, "]");
- }
-#else
- OS::SNPrintF(buffer, "0x%08X", flags.ToIntegral());
-#endif
- size_t string_len = strlen(underlying_buffer) + 1;
- ASSERT(string_len <= sizeof(underlying_buffer));
- char* result = new char[strlen(underlying_buffer) + 1];
- OS::MemCopy(result, underlying_buffer, string_len);
- return SmartArrayPointer<char>(result);
-}
-
-
void HGlobalValueNumberingPhase::LoopInvariantCodeMotion() {
TRACE_GVN_1("Using optimistic loop invariant code motion: %s\n",
graph()->use_optimistic_licm() ? "yes" : "no");
for (int i = graph()->blocks()->length() - 1; i >= 0; --i) {
HBasicBlock* block = graph()->blocks()->at(i);
if (block->IsLoopHeader()) {
- GVNFlagSet side_effects = loop_side_effects_[block->block_id()];
- TRACE_GVN_2("Try loop invariant motion for block B%d %s\n",
- block->block_id(),
- *GetGVNFlagsString(side_effects));
-
- GVNFlagSet accumulated_first_time_depends;
- GVNFlagSet accumulated_first_time_changes;
+ SideEffects side_effects = loop_side_effects_[block->block_id()];
+ if (FLAG_trace_gvn) {
+ HeapStringAllocator allocator;
+ StringStream stream(&allocator);
+ stream.Add("Try loop invariant motion for block B%d changes ",
+ block->block_id());
+ side_effects_tracker_.PrintSideEffectsTo(&stream, side_effects);
+ stream.Add("\n");
+ stream.OutputToStdOut();
+ }
HBasicBlock* last = block->loop_information()->GetLastBackEdge();
for (int j = block->block_id(); j <= last->block_id(); ++j) {
- ProcessLoopBlock(graph()->blocks()->at(j), block, side_effects,
- &accumulated_first_time_depends,
- &accumulated_first_time_changes);
+ ProcessLoopBlock(graph()->blocks()->at(j), block, side_effects);
}
}
}
@@ -538,25 +591,37 @@ void HGlobalValueNumberingPhase::LoopInvariantCodeMotion() {
void HGlobalValueNumberingPhase::ProcessLoopBlock(
HBasicBlock* block,
HBasicBlock* loop_header,
- GVNFlagSet loop_kills,
- GVNFlagSet* first_time_depends,
- GVNFlagSet* first_time_changes) {
+ SideEffects loop_kills) {
HBasicBlock* pre_header = loop_header->predecessors()->at(0);
- GVNFlagSet depends_flags = HValue::ConvertChangesToDependsFlags(loop_kills);
- TRACE_GVN_2("Loop invariant motion for B%d %s\n",
- block->block_id(),
- *GetGVNFlagsString(depends_flags));
+ if (FLAG_trace_gvn) {
+ HeapStringAllocator allocator;
+ StringStream stream(&allocator);
+ stream.Add("Loop invariant code motion for B%d depends on ",
+ block->block_id());
+ side_effects_tracker_.PrintSideEffectsTo(&stream, loop_kills);
+ stream.Add("\n");
+ stream.OutputToStdOut();
+ }
HInstruction* instr = block->first();
while (instr != NULL) {
HInstruction* next = instr->next();
- bool hoisted = false;
if (instr->CheckFlag(HValue::kUseGVN)) {
- TRACE_GVN_4("Checking instruction %d (%s) %s. Loop %s\n",
- instr->id(),
- instr->Mnemonic(),
- *GetGVNFlagsString(instr->gvn_flags()),
- *GetGVNFlagsString(loop_kills));
- bool can_hoist = !instr->gvn_flags().ContainsAnyOf(depends_flags);
+ SideEffects changes = side_effects_tracker_.ComputeChanges(instr);
+ SideEffects depends_on = side_effects_tracker_.ComputeDependsOn(instr);
+ if (FLAG_trace_gvn) {
+ HeapStringAllocator allocator;
+ StringStream stream(&allocator);
+ stream.Add("Checking instruction i%d (%s) changes ",
+ instr->id(), instr->Mnemonic());
+ side_effects_tracker_.PrintSideEffectsTo(&stream, changes);
+ stream.Add(", depends on ");
+ side_effects_tracker_.PrintSideEffectsTo(&stream, depends_on);
+ stream.Add(". Loop changes ");
+ side_effects_tracker_.PrintSideEffectsTo(&stream, loop_kills);
+ stream.Add("\n");
+ stream.OutputToStdOut();
+ }
+ bool can_hoist = !depends_on.ContainsAnyOf(loop_kills);
if (can_hoist && !graph()->use_optimistic_licm()) {
can_hoist = block->IsLoopSuccessorDominator();
}
@@ -576,26 +641,9 @@ void HGlobalValueNumberingPhase::ProcessLoopBlock(
instr->Unlink();
instr->InsertBefore(pre_header->end());
if (instr->HasSideEffects()) removed_side_effects_ = true;
- hoisted = true;
}
}
}
- if (!hoisted) {
- // If an instruction is not hoisted, we have to account for its side
- // effects when hoisting later HTransitionElementsKind instructions.
- GVNFlagSet previous_depends = *first_time_depends;
- GVNFlagSet previous_changes = *first_time_changes;
- first_time_depends->Add(instr->DependsOnFlags());
- first_time_changes->Add(instr->ChangesFlags());
- if (!(previous_depends == *first_time_depends)) {
- TRACE_GVN_1("Updated first-time accumulated %s\n",
- *GetGVNFlagsString(*first_time_depends));
- }
- if (!(previous_changes == *first_time_changes)) {
- TRACE_GVN_1("Updated first-time accumulated %s\n",
- *GetGVNFlagsString(*first_time_changes));
- }
- }
instr = next;
}
}
@@ -615,10 +663,10 @@ bool HGlobalValueNumberingPhase::ShouldMove(HInstruction* instr,
}
-GVNFlagSet
+SideEffects
HGlobalValueNumberingPhase::CollectSideEffectsOnPathsToDominatedBlock(
HBasicBlock* dominator, HBasicBlock* dominated) {
- GVNFlagSet side_effects;
+ SideEffects side_effects;
for (int i = 0; i < dominated->predecessors()->length(); ++i) {
HBasicBlock* block = dominated->predecessors()->at(i);
if (dominator->block_id() < block->block_id() &&
@@ -647,13 +695,13 @@ class GvnBasicBlockState: public ZoneObject {
public:
static GvnBasicBlockState* CreateEntry(Zone* zone,
HBasicBlock* entry_block,
- HValueMap* entry_map) {
+ HInstructionMap* entry_map) {
return new(zone)
GvnBasicBlockState(NULL, entry_block, entry_map, NULL, zone);
}
HBasicBlock* block() { return block_; }
- HValueMap* map() { return map_; }
+ HInstructionMap* map() { return map_; }
HSideEffectMap* dominators() { return &dominators_; }
GvnBasicBlockState* next_in_dominator_tree_traversal(
@@ -680,7 +728,7 @@ class GvnBasicBlockState: public ZoneObject {
private:
void Initialize(HBasicBlock* block,
- HValueMap* map,
+ HInstructionMap* map,
HSideEffectMap* dominators,
bool copy_map,
Zone* zone) {
@@ -696,7 +744,7 @@ class GvnBasicBlockState: public ZoneObject {
GvnBasicBlockState(GvnBasicBlockState* previous,
HBasicBlock* block,
- HValueMap* map,
+ HInstructionMap* map,
HSideEffectMap* dominators,
Zone* zone)
: previous_(previous), next_(NULL) {
@@ -743,7 +791,7 @@ class GvnBasicBlockState: public ZoneObject {
GvnBasicBlockState* previous_;
GvnBasicBlockState* next_;
HBasicBlock* block_;
- HValueMap* map_;
+ HInstructionMap* map_;
HSideEffectMap dominators_;
int dominated_index_;
int length_;
@@ -756,13 +804,14 @@ class GvnBasicBlockState: public ZoneObject {
// GvnBasicBlockState instances.
void HGlobalValueNumberingPhase::AnalyzeGraph() {
HBasicBlock* entry_block = graph()->entry_block();
- HValueMap* entry_map = new(zone()) HValueMap(zone());
+ HInstructionMap* entry_map =
+ new(zone()) HInstructionMap(zone(), &side_effects_tracker_);
GvnBasicBlockState* current =
GvnBasicBlockState::CreateEntry(zone(), entry_block, entry_map);
while (current != NULL) {
HBasicBlock* block = current->block();
- HValueMap* map = current->map();
+ HInstructionMap* map = current->map();
HSideEffectMap* dominators = current->dominators();
TRACE_GVN_2("Analyzing block B%d%s\n",
@@ -781,38 +830,45 @@ void HGlobalValueNumberingPhase::AnalyzeGraph() {
if (instr->CheckFlag(HValue::kTrackSideEffectDominators)) {
for (int i = 0; i < kNumberOfTrackedSideEffects; i++) {
HValue* other = dominators->at(i);
- GVNFlag changes_flag = HValue::ChangesFlagFromInt(i);
- GVNFlag depends_on_flag = HValue::DependsOnFlagFromInt(i);
- if (instr->DependsOnFlags().Contains(depends_on_flag) &&
- (other != NULL)) {
+ GVNFlag flag = GVNFlagFromInt(i);
+ if (instr->DependsOnFlags().Contains(flag) && other != NULL) {
TRACE_GVN_5("Side-effect #%d in %d (%s) is dominated by %d (%s)\n",
i,
instr->id(),
instr->Mnemonic(),
other->id(),
other->Mnemonic());
- instr->HandleSideEffectDominator(changes_flag, other);
+ if (instr->HandleSideEffectDominator(flag, other)) {
+ removed_side_effects_ = true;
+ }
}
}
}
// Instruction was unlinked during graph traversal.
if (!instr->IsLinked()) continue;
- GVNFlagSet flags = instr->ChangesFlags();
- if (!flags.IsEmpty()) {
+ SideEffects changes = side_effects_tracker_.ComputeChanges(instr);
+ if (!changes.IsEmpty()) {
// Clear all instructions in the map that are affected by side effects.
// Store instruction as the dominating one for tracked side effects.
- map->Kill(flags);
- dominators->Store(flags, instr);
- TRACE_GVN_2("Instruction %d %s\n", instr->id(),
- *GetGVNFlagsString(flags));
+ map->Kill(changes);
+ dominators->Store(changes, instr);
+ if (FLAG_trace_gvn) {
+ HeapStringAllocator allocator;
+ StringStream stream(&allocator);
+ stream.Add("Instruction i%d changes ", instr->id());
+ side_effects_tracker_.PrintSideEffectsTo(&stream, changes);
+ stream.Add("\n");
+ stream.OutputToStdOut();
+ }
}
- if (instr->CheckFlag(HValue::kUseGVN)) {
+ if (instr->CheckFlag(HValue::kUseGVN) &&
+ !instr->CheckFlag(HValue::kCantBeReplaced)) {
ASSERT(!instr->HasObservableSideEffects());
- HValue* other = map->Lookup(instr);
+ HInstruction* other = map->Lookup(instr);
if (other != NULL) {
ASSERT(instr->Equals(other) && other->Equals(instr));
- TRACE_GVN_4("Replacing value %d (%s) with value %d (%s)\n",
+ TRACE_GVN_4("Replacing instruction i%d (%s) with i%d (%s)\n",
instr->id(),
instr->Mnemonic(),
other->id(),
@@ -832,7 +888,7 @@ void HGlobalValueNumberingPhase::AnalyzeGraph() {
if (next != NULL) {
HBasicBlock* dominated = next->block();
- HValueMap* successor_map = next->map();
+ HInstructionMap* successor_map = next->map();
HSideEffectMap* successor_dominators = next->dominators();
// Kill everything killed on any path between this block and the
@@ -843,7 +899,7 @@ void HGlobalValueNumberingPhase::AnalyzeGraph() {
if ((!successor_map->IsEmpty() || !successor_dominators->IsEmpty()) &&
dominator_block->block_id() + 1 < dominated->block_id()) {
visited_on_paths_.Clear();
- GVNFlagSet side_effects_on_all_paths =
+ SideEffects side_effects_on_all_paths =
CollectSideEffectsOnPathsToDominatedBlock(dominator_block,
dominated);
successor_map->Kill(side_effects_on_all_paths);
diff --git a/chromium/v8/src/hydrogen-gvn.h b/chromium/v8/src/hydrogen-gvn.h
index fdbad99c6bb..ad97c155887 100644
--- a/chromium/v8/src/hydrogen-gvn.h
+++ b/chromium/v8/src/hydrogen-gvn.h
@@ -1,60 +1,109 @@
// Copyright 2013 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
#ifndef V8_HYDROGEN_GVN_H_
#define V8_HYDROGEN_GVN_H_
-#include "hydrogen.h"
-#include "hydrogen-instructions.h"
-#include "compiler.h"
-#include "zone.h"
+#include "src/hydrogen.h"
+#include "src/hydrogen-instructions.h"
+#include "src/compiler.h"
+#include "src/zone.h"
namespace v8 {
namespace internal {
+// This class extends GVNFlagSet with additional "special" dynamic side effects,
+// which can be used to represent side effects that cannot be expressed using
+// the GVNFlags of an HInstruction. These special side effects are tracked by a
+// SideEffectsTracker (see below).
+class SideEffects V8_FINAL {
+ public:
+ static const int kNumberOfSpecials = 64 - kNumberOfFlags;
+
+ SideEffects() : bits_(0) {
+ ASSERT(kNumberOfFlags + kNumberOfSpecials == sizeof(bits_) * CHAR_BIT);
+ }
+ explicit SideEffects(GVNFlagSet flags) : bits_(flags.ToIntegral()) {}
+ bool IsEmpty() const { return bits_ == 0; }
+ bool ContainsFlag(GVNFlag flag) const {
+ return (bits_ & MaskFlag(flag)) != 0;
+ }
+ bool ContainsSpecial(int special) const {
+ return (bits_ & MaskSpecial(special)) != 0;
+ }
+ bool ContainsAnyOf(SideEffects set) const { return (bits_ & set.bits_) != 0; }
+ void Add(SideEffects set) { bits_ |= set.bits_; }
+ void AddSpecial(int special) { bits_ |= MaskSpecial(special); }
+ void RemoveFlag(GVNFlag flag) { bits_ &= ~MaskFlag(flag); }
+ void RemoveAll() { bits_ = 0; }
+ uint64_t ToIntegral() const { return bits_; }
+ void PrintTo(StringStream* stream) const;
+
+ private:
+ uint64_t MaskFlag(GVNFlag flag) const {
+ return static_cast<uint64_t>(1) << static_cast<unsigned>(flag);
+ }
+ uint64_t MaskSpecial(int special) const {
+ ASSERT(special >= 0);
+ ASSERT(special < kNumberOfSpecials);
+ return static_cast<uint64_t>(1) << static_cast<unsigned>(
+ special + kNumberOfFlags);
+ }
+
+ uint64_t bits_;
+};
+
+
+// Tracks global variable and inobject field loads/stores in a fine grained
+// fashion, and represents them using the "special" dynamic side effects of the
+// SideEffects class (see above). This way unrelated global variable/inobject
+// field stores don't prevent hoisting and merging of global variable/inobject
+// field loads.
+class SideEffectsTracker V8_FINAL BASE_EMBEDDED {
+ public:
+ SideEffectsTracker() : num_global_vars_(0), num_inobject_fields_(0) {}
+ SideEffects ComputeChanges(HInstruction* instr);
+ SideEffects ComputeDependsOn(HInstruction* instr);
+ void PrintSideEffectsTo(StringStream* stream, SideEffects side_effects) const;
+
+ private:
+ bool ComputeGlobalVar(Unique<Cell> cell, int* index);
+ bool ComputeInobjectField(HObjectAccess access, int* index);
+
+ static int GlobalVar(int index) {
+ ASSERT(index >= 0);
+ ASSERT(index < kNumberOfGlobalVars);
+ return index;
+ }
+ static int InobjectField(int index) {
+ ASSERT(index >= 0);
+ ASSERT(index < kNumberOfInobjectFields);
+ return index + kNumberOfGlobalVars;
+ }
+
+ // Track up to four global vars.
+ static const int kNumberOfGlobalVars = 4;
+ Unique<Cell> global_vars_[kNumberOfGlobalVars];
+ int num_global_vars_;
+
+ // Track up to n inobject fields.
+ static const int kNumberOfInobjectFields =
+ SideEffects::kNumberOfSpecials - kNumberOfGlobalVars;
+ HObjectAccess inobject_fields_[kNumberOfInobjectFields];
+ int num_inobject_fields_;
+};
+
+
// Perform common subexpression elimination and loop-invariant code motion.
-class HGlobalValueNumberingPhase : public HPhase {
+class HGlobalValueNumberingPhase V8_FINAL : public HPhase {
public:
explicit HGlobalValueNumberingPhase(HGraph* graph);
- void Run() {
- Analyze();
- // Trigger a second analysis pass to further eliminate duplicate values
- // that could only be discovered by removing side-effect-generating
- // instructions during the first pass.
- if (FLAG_smi_only_arrays && removed_side_effects_) {
- Analyze();
- // TODO(danno): Turn this into a fixpoint iteration.
- }
- }
+ void Run();
private:
- void Analyze();
- GVNFlagSet CollectSideEffectsOnPathsToDominatedBlock(
+ SideEffects CollectSideEffectsOnPathsToDominatedBlock(
HBasicBlock* dominator,
HBasicBlock* dominated);
void AnalyzeGraph();
@@ -62,19 +111,18 @@ class HGlobalValueNumberingPhase : public HPhase {
void LoopInvariantCodeMotion();
void ProcessLoopBlock(HBasicBlock* block,
HBasicBlock* before_loop,
- GVNFlagSet loop_kills,
- GVNFlagSet* accumulated_first_time_depends,
- GVNFlagSet* accumulated_first_time_changes);
+ SideEffects loop_kills);
bool AllowCodeMotion();
bool ShouldMove(HInstruction* instr, HBasicBlock* loop_header);
+ SideEffectsTracker side_effects_tracker_;
bool removed_side_effects_;
// A map of block IDs to their side effects.
- ZoneList<GVNFlagSet> block_side_effects_;
+ ZoneList<SideEffects> block_side_effects_;
// A map of loop header block IDs to their loop's side effects.
- ZoneList<GVNFlagSet> loop_side_effects_;
+ ZoneList<SideEffects> loop_side_effects_;
// Used when collecting side effects on paths from dominator to
// dominated.
@@ -83,7 +131,6 @@ class HGlobalValueNumberingPhase : public HPhase {
DISALLOW_COPY_AND_ASSIGN(HGlobalValueNumberingPhase);
};
-
} } // namespace v8::internal
#endif // V8_HYDROGEN_GVN_H_
diff --git a/chromium/v8/src/hydrogen-infer-representation.cc b/chromium/v8/src/hydrogen-infer-representation.cc
index f61649a68f4..3815ba514e6 100644
--- a/chromium/v8/src/hydrogen-infer-representation.cc
+++ b/chromium/v8/src/hydrogen-infer-representation.cc
@@ -1,31 +1,8 @@
// Copyright 2013 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "hydrogen-infer-representation.h"
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/hydrogen-infer-representation.h"
namespace v8 {
namespace internal {
diff --git a/chromium/v8/src/hydrogen-infer-representation.h b/chromium/v8/src/hydrogen-infer-representation.h
index 7c605696c4b..d07f89d973f 100644
--- a/chromium/v8/src/hydrogen-infer-representation.h
+++ b/chromium/v8/src/hydrogen-infer-representation.h
@@ -1,34 +1,11 @@
// Copyright 2013 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
#ifndef V8_HYDROGEN_INFER_REPRESENTATION_H_
#define V8_HYDROGEN_INFER_REPRESENTATION_H_
-#include "hydrogen.h"
+#include "src/hydrogen.h"
namespace v8 {
namespace internal {
diff --git a/chromium/v8/src/hydrogen-infer-types.cc b/chromium/v8/src/hydrogen-infer-types.cc
index 01c60847367..0b7c24bdc63 100644
--- a/chromium/v8/src/hydrogen-infer-types.cc
+++ b/chromium/v8/src/hydrogen-infer-types.cc
@@ -1,31 +1,8 @@
// Copyright 2013 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
-#include "hydrogen-infer-types.h"
+#include "src/hydrogen-infer-types.h"
namespace v8 {
namespace internal {
diff --git a/chromium/v8/src/hydrogen-infer-types.h b/chromium/v8/src/hydrogen-infer-types.h
index cfcbf3549ba..41337ac5c0d 100644
--- a/chromium/v8/src/hydrogen-infer-types.h
+++ b/chromium/v8/src/hydrogen-infer-types.h
@@ -1,34 +1,11 @@
// Copyright 2013 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
#ifndef V8_HYDROGEN_INFER_TYPES_H_
#define V8_HYDROGEN_INFER_TYPES_H_
-#include "hydrogen.h"
+#include "src/hydrogen.h"
namespace v8 {
namespace internal {
diff --git a/chromium/v8/src/hydrogen-instructions.cc b/chromium/v8/src/hydrogen-instructions.cc
index 6bf662a638f..8b40a249d34 100644
--- a/chromium/v8/src/hydrogen-instructions.cc
+++ b/chromium/v8/src/hydrogen-instructions.cc
@@ -1,44 +1,26 @@
// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#include "double.h"
-#include "factory.h"
-#include "hydrogen-infer-representation.h"
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#include "src/double.h"
+#include "src/factory.h"
+#include "src/hydrogen-infer-representation.h"
+#include "src/property-details-inl.h"
#if V8_TARGET_ARCH_IA32
-#include "ia32/lithium-ia32.h"
+#include "src/ia32/lithium-ia32.h"
#elif V8_TARGET_ARCH_X64
-#include "x64/lithium-x64.h"
+#include "src/x64/lithium-x64.h"
+#elif V8_TARGET_ARCH_ARM64
+#include "src/arm64/lithium-arm64.h"
#elif V8_TARGET_ARCH_ARM
-#include "arm/lithium-arm.h"
+#include "src/arm/lithium-arm.h"
#elif V8_TARGET_ARCH_MIPS
-#include "mips/lithium-mips.h"
+#include "src/mips/lithium-mips.h"
+#elif V8_TARGET_ARCH_X87
+#include "src/x87/lithium-x87.h"
#else
#error Unsupported target architecture.
#endif
@@ -54,14 +36,6 @@ HYDROGEN_CONCRETE_INSTRUCTION_LIST(DEFINE_COMPILE)
#undef DEFINE_COMPILE
-int HValue::LoopWeight() const {
- const int w = FLAG_loop_weight;
- static const int weights[] = { 1, w, w*w, w*w*w, w*w*w*w };
- return weights[Min(block()->LoopNestingDepth(),
- static_cast<int>(ARRAY_SIZE(weights)-1))];
-}
-
-
Isolate* HValue::isolate() const {
ASSERT(block() != NULL);
return block()->isolate();
@@ -106,7 +80,7 @@ Representation HValue::RepresentationFromUses() {
id(), Mnemonic(), use->id(), use->Mnemonic(), rep.Mnemonic(),
(use->CheckFlag(kTruncatingToInt32) ? "-trunc" : ""));
}
- use_count[rep.kind()] += use->LoopWeight();
+ use_count[rep.kind()] += 1;
}
if (IsPhi()) HPhi::cast(this)->AddIndirectUsesTo(&use_count[0]);
int tagged_count = use_count[Representation::kTagged];
@@ -334,46 +308,6 @@ bool Range::MulAndCheckOverflow(const Representation& r, Range* other) {
}
-const char* HType::ToString() {
- // Note: The c1visualizer syntax for locals allows only a sequence of the
- // following characters: A-Za-z0-9_-|:
- switch (type_) {
- case kNone: return "none";
- case kTagged: return "tagged";
- case kTaggedPrimitive: return "primitive";
- case kTaggedNumber: return "number";
- case kSmi: return "smi";
- case kHeapNumber: return "heap-number";
- case kString: return "string";
- case kBoolean: return "boolean";
- case kNonPrimitive: return "non-primitive";
- case kJSArray: return "array";
- case kJSObject: return "object";
- }
- UNREACHABLE();
- return "unreachable";
-}
-
-
-HType HType::TypeFromValue(Handle<Object> value) {
- HType result = HType::Tagged();
- if (value->IsSmi()) {
- result = HType::Smi();
- } else if (value->IsHeapNumber()) {
- result = HType::HeapNumber();
- } else if (value->IsString()) {
- result = HType::String();
- } else if (value->IsBoolean()) {
- result = HType::Boolean();
- } else if (value->IsJSObject()) {
- result = HType::JSObject();
- } else if (value->IsJSArray()) {
- result = HType::JSArray();
- }
- return result;
-}
-
-
bool HValue::IsDefinedAfter(HBasicBlock* other) const {
return block()->block_id() > other->block_id();
}
@@ -514,6 +448,8 @@ bool HValue::CanReplaceWithDummyUses() {
!(block()->IsReachable() ||
IsBlockEntry() ||
IsControlInstruction() ||
+ IsArgumentsObject() ||
+ IsCapturedObject() ||
IsSimulate() ||
IsEnterInlined() ||
IsLeaveInlined());
@@ -593,17 +529,6 @@ void HValue::PrintTypeTo(StringStream* stream) {
}
-void HValue::PrintRangeTo(StringStream* stream) {
- if (range() == NULL || range()->IsMostGeneric()) return;
- // Note: The c1visualizer syntax for locals allows only a sequence of the
- // following characters: A-Za-z0-9_-|:
- stream->Add(" range:%d_%d%s",
- range()->lower(),
- range()->upper(),
- range()->CanBeMinusZero() ? "_m0" : "");
-}
-
-
void HValue::PrintChangesTo(StringStream* stream) {
GVNFlagSet changes_flags = ChangesFlags();
if (changes_flags.IsEmpty()) return;
@@ -612,11 +537,11 @@ void HValue::PrintChangesTo(StringStream* stream) {
stream->Add("*");
} else {
bool add_comma = false;
-#define PRINT_DO(type) \
- if (changes_flags.Contains(kChanges##type)) { \
- if (add_comma) stream->Add(","); \
- add_comma = true; \
- stream->Add(#type); \
+#define PRINT_DO(Type) \
+ if (changes_flags.Contains(k##Type)) { \
+ if (add_comma) stream->Add(","); \
+ add_comma = true; \
+ stream->Add(#Type); \
}
GVN_TRACKED_FLAG_LIST(PRINT_DO);
GVN_UNTRACKED_FLAG_LIST(PRINT_DO);
@@ -688,15 +613,30 @@ void HValue::ComputeInitialRange(Zone* zone) {
}
+void HSourcePosition::PrintTo(FILE* out) {
+ if (IsUnknown()) {
+ PrintF(out, "<?>");
+ } else {
+ if (FLAG_hydrogen_track_positions) {
+ PrintF(out, "<%d:%d>", inlining_id(), position());
+ } else {
+ PrintF(out, "<0:%d>", raw());
+ }
+ }
+}
+
+
void HInstruction::PrintTo(StringStream* stream) {
PrintMnemonicTo(stream);
PrintDataTo(stream);
- PrintRangeTo(stream);
PrintChangesTo(stream);
PrintTypeTo(stream);
if (CheckFlag(HValue::kHasNoObservableSideEffects)) {
stream->Add(" [noOSE]");
}
+ if (CheckFlag(HValue::kIsDead)) {
+ stream->Add(" [dead]");
+ }
}
@@ -741,8 +681,7 @@ void HInstruction::InsertBefore(HInstruction* next) {
next_ = next;
previous_ = prev;
SetBlock(next->block());
- if (position() == RelocInfo::kNoPosition &&
- next->position() != RelocInfo::kNoPosition) {
+ if (!has_position() && next->has_position()) {
set_position(next->position());
}
}
@@ -779,13 +718,27 @@ void HInstruction::InsertAfter(HInstruction* previous) {
if (block->last() == previous) {
block->set_last(this);
}
- if (position() == RelocInfo::kNoPosition &&
- previous->position() != RelocInfo::kNoPosition) {
+ if (!has_position() && previous->has_position()) {
set_position(previous->position());
}
}
+bool HInstruction::Dominates(HInstruction* other) {
+ if (block() != other->block()) {
+ return block()->Dominates(other->block());
+ }
+ // Both instructions are in the same basic block. This instruction
+ // should precede the other one in order to dominate it.
+ for (HInstruction* instr = next(); instr != NULL; instr = instr->next()) {
+ if (instr == other) {
+ return true;
+ }
+ }
+ return false;
+}
+
+
#ifdef DEBUG
void HInstruction::Verify() {
// Verify that input operands are defined before use.
@@ -832,6 +785,138 @@ void HInstruction::Verify() {
#endif
+bool HInstruction::CanDeoptimize() {
+ // TODO(titzer): make this a virtual method?
+ switch (opcode()) {
+ case HValue::kAbnormalExit:
+ case HValue::kAccessArgumentsAt:
+ case HValue::kAllocate:
+ case HValue::kArgumentsElements:
+ case HValue::kArgumentsLength:
+ case HValue::kArgumentsObject:
+ case HValue::kBlockEntry:
+ case HValue::kBoundsCheckBaseIndexInformation:
+ case HValue::kCallFunction:
+ case HValue::kCallNew:
+ case HValue::kCallNewArray:
+ case HValue::kCallStub:
+ case HValue::kCallWithDescriptor:
+ case HValue::kCapturedObject:
+ case HValue::kClassOfTestAndBranch:
+ case HValue::kCompareGeneric:
+ case HValue::kCompareHoleAndBranch:
+ case HValue::kCompareMap:
+ case HValue::kCompareMinusZeroAndBranch:
+ case HValue::kCompareNumericAndBranch:
+ case HValue::kCompareObjectEqAndBranch:
+ case HValue::kConstant:
+ case HValue::kConstructDouble:
+ case HValue::kContext:
+ case HValue::kDebugBreak:
+ case HValue::kDeclareGlobals:
+ case HValue::kDoubleBits:
+ case HValue::kDummyUse:
+ case HValue::kEnterInlined:
+ case HValue::kEnvironmentMarker:
+ case HValue::kForceRepresentation:
+ case HValue::kGetCachedArrayIndex:
+ case HValue::kGoto:
+ case HValue::kHasCachedArrayIndexAndBranch:
+ case HValue::kHasInstanceTypeAndBranch:
+ case HValue::kInnerAllocatedObject:
+ case HValue::kInstanceOf:
+ case HValue::kInstanceOfKnownGlobal:
+ case HValue::kIsConstructCallAndBranch:
+ case HValue::kIsObjectAndBranch:
+ case HValue::kIsSmiAndBranch:
+ case HValue::kIsStringAndBranch:
+ case HValue::kIsUndetectableAndBranch:
+ case HValue::kLeaveInlined:
+ case HValue::kLoadFieldByIndex:
+ case HValue::kLoadGlobalGeneric:
+ case HValue::kLoadNamedField:
+ case HValue::kLoadNamedGeneric:
+ case HValue::kLoadRoot:
+ case HValue::kMapEnumLength:
+ case HValue::kMathMinMax:
+ case HValue::kParameter:
+ case HValue::kPhi:
+ case HValue::kPushArguments:
+ case HValue::kRegExpLiteral:
+ case HValue::kReturn:
+ case HValue::kSeqStringGetChar:
+ case HValue::kStoreCodeEntry:
+ case HValue::kStoreFrameContext:
+ case HValue::kStoreKeyed:
+ case HValue::kStoreNamedField:
+ case HValue::kStoreNamedGeneric:
+ case HValue::kStringCharCodeAt:
+ case HValue::kStringCharFromCode:
+ case HValue::kThisFunction:
+ case HValue::kTypeofIsAndBranch:
+ case HValue::kUnknownOSRValue:
+ case HValue::kUseConst:
+ return false;
+
+ case HValue::kAdd:
+ case HValue::kAllocateBlockContext:
+ case HValue::kApplyArguments:
+ case HValue::kBitwise:
+ case HValue::kBoundsCheck:
+ case HValue::kBranch:
+ case HValue::kCallJSFunction:
+ case HValue::kCallRuntime:
+ case HValue::kChange:
+ case HValue::kCheckHeapObject:
+ case HValue::kCheckInstanceType:
+ case HValue::kCheckMapValue:
+ case HValue::kCheckMaps:
+ case HValue::kCheckSmi:
+ case HValue::kCheckValue:
+ case HValue::kClampToUint8:
+ case HValue::kDateField:
+ case HValue::kDeoptimize:
+ case HValue::kDiv:
+ case HValue::kForInCacheArray:
+ case HValue::kForInPrepareMap:
+ case HValue::kFunctionLiteral:
+ case HValue::kInvokeFunction:
+ case HValue::kLoadContextSlot:
+ case HValue::kLoadFunctionPrototype:
+ case HValue::kLoadGlobalCell:
+ case HValue::kLoadKeyed:
+ case HValue::kLoadKeyedGeneric:
+ case HValue::kMathFloorOfDiv:
+ case HValue::kMod:
+ case HValue::kMul:
+ case HValue::kOsrEntry:
+ case HValue::kPower:
+ case HValue::kRor:
+ case HValue::kSar:
+ case HValue::kSeqStringSetChar:
+ case HValue::kShl:
+ case HValue::kShr:
+ case HValue::kSimulate:
+ case HValue::kStackCheck:
+ case HValue::kStoreContextSlot:
+ case HValue::kStoreGlobalCell:
+ case HValue::kStoreKeyedGeneric:
+ case HValue::kStringAdd:
+ case HValue::kStringCompareAndBranch:
+ case HValue::kSub:
+ case HValue::kToFastProperties:
+ case HValue::kTransitionElementsKind:
+ case HValue::kTrapAllocationMemento:
+ case HValue::kTypeof:
+ case HValue::kUnaryMathOperation:
+ case HValue::kWrapReceiver:
+ return true;
+ }
+ UNREACHABLE();
+ return true;
+}
+
+
void HDummyUse::PrintDataTo(StringStream* stream) {
value()->PrintNameTo(stream);
}
@@ -849,6 +934,37 @@ void HUnaryCall::PrintDataTo(StringStream* stream) {
}
+void HCallJSFunction::PrintDataTo(StringStream* stream) {
+ function()->PrintNameTo(stream);
+ stream->Add(" ");
+ stream->Add("#%d", argument_count());
+}
+
+
+HCallJSFunction* HCallJSFunction::New(
+ Zone* zone,
+ HValue* context,
+ HValue* function,
+ int argument_count,
+ bool pass_argument_count) {
+ bool has_stack_check = false;
+ if (function->IsConstant()) {
+ HConstant* fun_const = HConstant::cast(function);
+ Handle<JSFunction> jsfun =
+ Handle<JSFunction>::cast(fun_const->handle(zone->isolate()));
+ has_stack_check = !jsfun.is_null() &&
+ (jsfun->code()->kind() == Code::FUNCTION ||
+ jsfun->code()->kind() == Code::OPTIMIZED_FUNCTION);
+ }
+
+ return new(zone) HCallJSFunction(
+ function, argument_count, pass_argument_count,
+ has_stack_check);
+}
+
+
+
+
void HBinaryCall::PrintDataTo(StringStream* stream) {
first()->PrintNameTo(stream);
stream->Add(" ");
@@ -974,34 +1090,15 @@ void HBoundsCheckBaseIndexInformation::PrintDataTo(StringStream* stream) {
}
-void HCallConstantFunction::PrintDataTo(StringStream* stream) {
- if (IsApplyFunction()) {
- stream->Add("optimized apply ");
- } else {
- stream->Add("%o ", function()->shared()->DebugName());
+void HCallWithDescriptor::PrintDataTo(StringStream* stream) {
+ for (int i = 0; i < OperandCount(); i++) {
+ OperandAt(i)->PrintNameTo(stream);
+ stream->Add(" ");
}
stream->Add("#%d", argument_count());
}
-void HCallNamed::PrintDataTo(StringStream* stream) {
- stream->Add("%o ", *name());
- HUnaryCall::PrintDataTo(stream);
-}
-
-
-void HCallGlobal::PrintDataTo(StringStream* stream) {
- stream->Add("%o ", *name());
- HUnaryCall::PrintDataTo(stream);
-}
-
-
-void HCallKnownGlobal::PrintDataTo(StringStream* stream) {
- stream->Add("%o ", target()->shared()->DebugName());
- stream->Add("#%d", argument_count());
-}
-
-
void HCallNewArray::PrintDataTo(StringStream* stream) {
stream->Add(ElementsKindToString(elements_kind()));
stream->Add(" ");
@@ -1041,6 +1138,13 @@ void HAccessArgumentsAt::PrintDataTo(StringStream* stream) {
}
+void HAllocateBlockContext::PrintDataTo(StringStream* stream) {
+ context()->PrintNameTo(stream);
+ stream->Add(" ");
+ function()->PrintNameTo(stream);
+}
+
+
void HControlInstruction::PrintDataTo(StringStream* stream) {
stream->Add(" goto (");
bool first_block = true;
@@ -1106,10 +1210,22 @@ bool HBranch::KnownSuccessorBlock(HBasicBlock** block) {
}
+void HBranch::PrintDataTo(StringStream* stream) {
+ HUnaryControlInstruction::PrintDataTo(stream);
+ stream->Add(" ");
+ expected_input_types().Print(stream);
+}
+
+
void HCompareMap::PrintDataTo(StringStream* stream) {
value()->PrintNameTo(stream);
stream->Add(" (%p)", *map().handle());
HControlInstruction::PrintDataTo(stream);
+ if (known_successor_index() == 0) {
+ stream->Add(" [true]");
+ } else if (known_successor_index() == 1) {
+ stream->Add(" [false]");
+ }
}
@@ -1119,12 +1235,10 @@ const char* HUnaryMathOperation::OpName() const {
case kMathRound: return "round";
case kMathAbs: return "abs";
case kMathLog: return "log";
- case kMathSin: return "sin";
- case kMathCos: return "cos";
- case kMathTan: return "tan";
case kMathExp: return "exp";
case kMathSqrt: return "sqrt";
case kMathPowHalf: return "pow-half";
+ case kMathClz32: return "clz32";
default:
UNREACHABLE();
return NULL;
@@ -1134,6 +1248,7 @@ const char* HUnaryMathOperation::OpName() const {
Range* HUnaryMathOperation::InferRange(Zone* zone) {
Representation r = representation();
+ if (op() == kMathClz32) return new(zone) Range(0, 32);
if (r.IsSmiOrInteger32() && value()->HasRange()) {
if (op() == kMathAbs) {
int upper = value()->range()->upper();
@@ -1191,18 +1306,52 @@ void HHasInstanceTypeAndBranch::PrintDataTo(StringStream* stream) {
void HTypeofIsAndBranch::PrintDataTo(StringStream* stream) {
value()->PrintNameTo(stream);
- stream->Add(" == %o", *type_literal_);
+ stream->Add(" == %o", *type_literal_.handle());
HControlInstruction::PrintDataTo(stream);
}
-bool HTypeofIsAndBranch::KnownSuccessorBlock(HBasicBlock** block) {
- if (value()->representation().IsSpecialization()) {
- if (compares_number_type()) {
- *block = FirstSuccessor();
- } else {
- *block = SecondSuccessor();
+static String* TypeOfString(HConstant* constant, Isolate* isolate) {
+ Heap* heap = isolate->heap();
+ if (constant->HasNumberValue()) return heap->number_string();
+ if (constant->IsUndetectable()) return heap->undefined_string();
+ if (constant->HasStringValue()) return heap->string_string();
+ switch (constant->GetInstanceType()) {
+ case ODDBALL_TYPE: {
+ Unique<Object> unique = constant->GetUnique();
+ if (unique.IsKnownGlobal(heap->true_value()) ||
+ unique.IsKnownGlobal(heap->false_value())) {
+ return heap->boolean_string();
+ }
+ if (unique.IsKnownGlobal(heap->null_value())) {
+ return FLAG_harmony_typeof ? heap->null_string()
+ : heap->object_string();
+ }
+ ASSERT(unique.IsKnownGlobal(heap->undefined_value()));
+ return heap->undefined_string();
}
+ case SYMBOL_TYPE:
+ return heap->symbol_string();
+ case JS_FUNCTION_TYPE:
+ case JS_FUNCTION_PROXY_TYPE:
+ return heap->function_string();
+ default:
+ return heap->object_string();
+ }
+}
+
+
+bool HTypeofIsAndBranch::KnownSuccessorBlock(HBasicBlock** block) {
+ if (FLAG_fold_constants && value()->IsConstant()) {
+ HConstant* constant = HConstant::cast(value());
+ String* type_string = TypeOfString(constant, isolate());
+ bool same_type = type_literal_.IsKnownGlobal(type_string);
+ *block = same_type ? FirstSuccessor() : SecondSuccessor();
+ return true;
+ } else if (value()->representation().IsSpecialization()) {
+ bool number_type =
+ type_literal_.IsKnownGlobal(isolate()->heap()->number_string());
+ *block = number_type ? FirstSuccessor() : SecondSuccessor();
return true;
}
*block = NULL;
@@ -1217,6 +1366,17 @@ void HCheckMapValue::PrintDataTo(StringStream* stream) {
}
+HValue* HCheckMapValue::Canonicalize() {
+ if (map()->IsConstant()) {
+ HConstant* c_map = HConstant::cast(map());
+ return HCheckMaps::CreateAndInsertAfter(
+ block()->graph()->zone(), value(), c_map->MapValue(),
+ c_map->HasStableMapValue(), this);
+ }
+ return this;
+}
+
+
void HForInPrepareMap::PrintDataTo(StringStream* stream) {
enumerable()->PrintNameTo(stream);
}
@@ -1375,19 +1535,22 @@ void HTypeof::PrintDataTo(StringStream* stream) {
HInstruction* HForceRepresentation::New(Zone* zone, HValue* context,
- HValue* value, Representation required_representation) {
+ HValue* value, Representation representation) {
if (FLAG_fold_constants && value->IsConstant()) {
HConstant* c = HConstant::cast(value);
if (c->HasNumberValue()) {
double double_res = c->DoubleValue();
- if (TypeInfo::IsInt32Double(double_res)) {
+ if (representation.IsDouble()) {
+ return HConstant::New(zone, context, double_res);
+
+ } else if (representation.CanContainDouble(double_res)) {
return HConstant::New(zone, context,
static_cast<int32_t>(double_res),
- required_representation);
+ representation);
}
}
}
- return new(zone) HForceRepresentation(value, required_representation);
+ return new(zone) HForceRepresentation(value, representation);
}
@@ -1401,94 +1564,67 @@ void HChange::PrintDataTo(StringStream* stream) {
HUnaryOperation::PrintDataTo(stream);
stream->Add(" %s to %s", from().Mnemonic(), to().Mnemonic());
+ if (CanTruncateToSmi()) stream->Add(" truncating-smi");
if (CanTruncateToInt32()) stream->Add(" truncating-int32");
if (CheckFlag(kBailoutOnMinusZero)) stream->Add(" -0?");
if (CheckFlag(kAllowUndefinedAsNaN)) stream->Add(" allow-undefined-as-nan");
}
-static HValue* SimplifiedDividendForMathFloorOfDiv(HValue* dividend) {
- // A value with an integer representation does not need to be transformed.
- if (dividend->representation().IsInteger32()) {
- return dividend;
- }
- // A change from an integer32 can be replaced by the integer32 value.
- if (dividend->IsChange() &&
- HChange::cast(dividend)->from().IsInteger32()) {
- return HChange::cast(dividend)->value();
- }
- return NULL;
-}
-
-
HValue* HUnaryMathOperation::Canonicalize() {
if (op() == kMathRound || op() == kMathFloor) {
HValue* val = value();
if (val->IsChange()) val = HChange::cast(val)->value();
-
- // If the input is smi or integer32 then we replace the instruction with its
- // input.
if (val->representation().IsSmiOrInteger32()) {
- if (!val->representation().Equals(representation())) {
- HChange* result = new(block()->zone()) HChange(
- val, representation(), false, false);
- result->InsertBefore(this);
- return result;
- }
- return val;
+ if (val->representation().Equals(representation())) return val;
+ return Prepend(new(block()->zone()) HChange(
+ val, representation(), false, false));
}
}
+ if (op() == kMathFloor && value()->IsDiv() && value()->UseCount() == 1) {
+ HDiv* hdiv = HDiv::cast(value());
+
+ HValue* left = hdiv->left();
+ if (left->representation().IsInteger32()) {
+ // A value with an integer representation does not need to be transformed.
+ } else if (left->IsChange() && HChange::cast(left)->from().IsInteger32()) {
+ // A change from an integer32 can be replaced by the integer32 value.
+ left = HChange::cast(left)->value();
+ } else if (hdiv->observed_input_representation(1).IsSmiOrInteger32()) {
+ left = Prepend(new(block()->zone()) HChange(
+ left, Representation::Integer32(), false, false));
+ } else {
+ return this;
+ }
- if (op() == kMathFloor) {
- HValue* val = value();
- if (val->IsDiv() && (val->UseCount() == 1)) {
- HDiv* hdiv = HDiv::cast(val);
- HValue* left = hdiv->left();
- HValue* right = hdiv->right();
- // Try to simplify left and right values of the division.
- HValue* new_left = SimplifiedDividendForMathFloorOfDiv(left);
- if (new_left == NULL &&
- hdiv->observed_input_representation(1).IsSmiOrInteger32()) {
- new_left = new(block()->zone()) HChange(
- left, Representation::Integer32(), false, false);
- HChange::cast(new_left)->InsertBefore(this);
- }
- HValue* new_right =
- LChunkBuilder::SimplifiedDivisorForMathFloorOfDiv(right);
- if (new_right == NULL &&
-#if V8_TARGET_ARCH_ARM
- CpuFeatures::IsSupported(SUDIV) &&
-#endif
- hdiv->observed_input_representation(2).IsSmiOrInteger32()) {
- new_right = new(block()->zone()) HChange(
- right, Representation::Integer32(), false, false);
- HChange::cast(new_right)->InsertBefore(this);
- }
-
- // Return if left or right are not optimizable.
- if ((new_left == NULL) || (new_right == NULL)) return this;
-
- // Insert the new values in the graph.
- if (new_left->IsInstruction() &&
- !HInstruction::cast(new_left)->IsLinked()) {
- HInstruction::cast(new_left)->InsertBefore(this);
- }
- if (new_right->IsInstruction() &&
- !HInstruction::cast(new_right)->IsLinked()) {
- HInstruction::cast(new_right)->InsertBefore(this);
- }
- HMathFloorOfDiv* instr =
- HMathFloorOfDiv::New(block()->zone(), context(), new_left, new_right);
- instr->InsertBefore(this);
- return instr;
+ HValue* right = hdiv->right();
+ if (right->IsInteger32Constant()) {
+ right = Prepend(HConstant::cast(right)->CopyToRepresentation(
+ Representation::Integer32(), right->block()->zone()));
+ } else if (right->representation().IsInteger32()) {
+ // A value with an integer representation does not need to be transformed.
+ } else if (right->IsChange() &&
+ HChange::cast(right)->from().IsInteger32()) {
+ // A change from an integer32 can be replaced by the integer32 value.
+ right = HChange::cast(right)->value();
+ } else if (hdiv->observed_input_representation(2).IsSmiOrInteger32()) {
+ right = Prepend(new(block()->zone()) HChange(
+ right, Representation::Integer32(), false, false));
+ } else {
+ return this;
}
+
+ return Prepend(HMathFloorOfDiv::New(
+ block()->zone(), context(), left, right));
}
return this;
}
HValue* HCheckInstanceType::Canonicalize() {
- if (check_ == IS_STRING && value()->type().IsString()) {
+ if ((check_ == IS_SPEC_OBJECT && value()->type().IsJSObject()) ||
+ (check_ == IS_JS_ARRAY && value()->type().IsJSArray()) ||
+ (check_ == IS_STRING && value()->type().IsString())) {
return value();
}
@@ -1526,7 +1662,7 @@ void HCheckInstanceType::GetCheckMaskAndTag(uint8_t* mask, uint8_t* tag) {
*tag = kStringTag;
return;
case IS_INTERNALIZED_STRING:
- *mask = kIsNotInternalizedMask;
+ *mask = kIsNotStringMask | kIsNotInternalizedMask;
*tag = kInternalizedTag;
return;
default:
@@ -1535,31 +1671,33 @@ void HCheckInstanceType::GetCheckMaskAndTag(uint8_t* mask, uint8_t* tag) {
}
-void HCheckMaps::HandleSideEffectDominator(GVNFlag side_effect,
- HValue* dominator) {
- ASSERT(side_effect == kChangesMaps);
- // TODO(mstarzinger): For now we specialize on HStoreNamedField, but once
- // type information is rich enough we should generalize this to any HType
- // for which the map is known.
- if (HasNoUses() && dominator->IsStoreNamedField()) {
- HStoreNamedField* store = HStoreNamedField::cast(dominator);
- if (!store->has_transition() || store->object() != value()) return;
- HConstant* transition = HConstant::cast(store->transition());
- if (map_set_.Contains(transition->GetUnique())) {
- DeleteAndReplaceWith(NULL);
- return;
- }
+void HCheckMaps::PrintDataTo(StringStream* stream) {
+ value()->PrintNameTo(stream);
+ stream->Add(" [%p", *maps()->at(0).handle());
+ for (int i = 1; i < maps()->size(); ++i) {
+ stream->Add(",%p", *maps()->at(i).handle());
}
+ stream->Add("]%s", IsStabilityCheck() ? "(stability-check)" : "");
}
-void HCheckMaps::PrintDataTo(StringStream* stream) {
- value()->PrintNameTo(stream);
- stream->Add(" [%p", *map_set_.at(0).handle());
- for (int i = 1; i < map_set_.size(); ++i) {
- stream->Add(",%p", *map_set_.at(i).handle());
+HValue* HCheckMaps::Canonicalize() {
+ if (!IsStabilityCheck() && maps_are_stable() && value()->IsConstant()) {
+ HConstant* c_value = HConstant::cast(value());
+ if (c_value->HasObjectMap()) {
+ for (int i = 0; i < maps()->size(); ++i) {
+ if (c_value->ObjectMap() == maps()->at(i)) {
+ if (maps()->size() > 1) {
+ set_maps(new(block()->graph()->zone()) UniqueSet<Map>(
+ maps()->at(i), block()->graph()->zone()));
+ }
+ MarkAsStabilityCheck();
+ break;
+ }
+ }
+ }
}
- stream->Add("]%s", CanOmitMapChecks() ? "(omitted)" : "");
+ return this;
}
@@ -1572,9 +1710,7 @@ void HCheckValue::PrintDataTo(StringStream* stream) {
HValue* HCheckValue::Canonicalize() {
return (value()->IsConstant() &&
- HConstant::cast(value())->GetUnique() == object_)
- ? NULL
- : this;
+ HConstant::cast(value())->EqualsUnique(object_)) ? NULL : this;
}
@@ -1644,7 +1780,17 @@ Range* HChange::InferRange(Zone* zone) {
input_range != NULL &&
input_range->IsInSmiRange()))) {
set_type(HType::Smi());
- ClearGVNFlag(kChangesNewSpacePromotion);
+ ClearChangesFlag(kNewSpacePromotion);
+ }
+ if (to().IsSmiOrTagged() &&
+ input_range != NULL &&
+ input_range->IsInSmiRange() &&
+ (!SmiValuesAre32Bits() ||
+ !value()->CheckFlag(HValue::kUint32) ||
+ input_range->upper() != kMaxInt)) {
+ // The Range class can't express upper bounds in the (kMaxInt, kMaxUint32]
+ // interval, so we treat kMaxInt as a sentinel for this entire interval.
+ ClearFlag(kCanOverflow);
}
Range* result = (input_range != NULL)
? input_range->Copy(zone)
@@ -1667,7 +1813,7 @@ Range* HConstant::InferRange(Zone* zone) {
}
-int HPhi::position() const {
+HSourcePosition HPhi::position() const {
return block()->first()->position();
}
@@ -1769,15 +1915,46 @@ Range* HDiv::InferRange(Zone* zone) {
result->set_can_be_minus_zero(!CheckFlag(kAllUsesTruncatingToInt32) &&
(a->CanBeMinusZero() ||
(a->CanBeZero() && b->CanBeNegative())));
- if (!a->Includes(kMinInt) ||
- !b->Includes(-1) ||
- CheckFlag(kAllUsesTruncatingToInt32)) {
- // It is safe to clear kCanOverflow when kAllUsesTruncatingToInt32.
- ClearFlag(HValue::kCanOverflow);
+ if (!a->Includes(kMinInt) || !b->Includes(-1)) {
+ ClearFlag(kCanOverflow);
}
if (!b->CanBeZero()) {
- ClearFlag(HValue::kCanBeDivByZero);
+ ClearFlag(kCanBeDivByZero);
+ }
+ return result;
+ } else {
+ return HValue::InferRange(zone);
+ }
+}
+
+
+Range* HMathFloorOfDiv::InferRange(Zone* zone) {
+ if (representation().IsInteger32()) {
+ Range* a = left()->range();
+ Range* b = right()->range();
+ Range* result = new(zone) Range();
+ result->set_can_be_minus_zero(!CheckFlag(kAllUsesTruncatingToInt32) &&
+ (a->CanBeMinusZero() ||
+ (a->CanBeZero() && b->CanBeNegative())));
+ if (!a->Includes(kMinInt)) {
+ ClearFlag(kLeftCanBeMinInt);
+ }
+
+ if (!a->CanBeNegative()) {
+ ClearFlag(HValue::kLeftCanBeNegative);
+ }
+
+ if (!a->CanBePositive()) {
+ ClearFlag(HValue::kLeftCanBePositive);
+ }
+
+ if (!a->Includes(kMinInt) || !b->Includes(-1)) {
+ ClearFlag(kCanOverflow);
+ }
+
+ if (!b->CanBeZero()) {
+ ClearFlag(kCanBeDivByZero);
}
return result;
} else {
@@ -1804,6 +1981,10 @@ Range* HMod::InferRange(Zone* zone) {
result->set_can_be_minus_zero(!CheckFlag(kAllUsesTruncatingToInt32) &&
left_can_be_negative);
+ if (!a->CanBeNegative()) {
+ ClearFlag(HValue::kLeftCanBeNegative);
+ }
+
if (!a->Includes(kMinInt) || !b->Includes(-1)) {
ClearFlag(HValue::kCanOverflow);
}
@@ -2279,6 +2460,12 @@ Range* HMathMinMax::InferRange(Zone* zone) {
}
+void HPushArguments::AddInput(HValue* value) {
+ inputs_.Add(NULL, value->block()->zone());
+ SetOperandAt(OperandCount() - 1, value);
+}
+
+
void HPhi::PrintTo(StringStream* stream) {
stream->Add("[");
for (int i = 0; i < OperandCount(); ++i) {
@@ -2293,7 +2480,6 @@ void HPhi::PrintTo(StringStream* stream) {
int32_non_phi_uses() + int32_indirect_uses(),
double_non_phi_uses() + double_indirect_uses(),
tagged_non_phi_uses() + tagged_indirect_uses());
- PrintRangeTo(stream);
PrintTypeTo(stream);
stream->Add("]");
}
@@ -2353,7 +2539,7 @@ void HPhi::InitRealUses(int phi_id) {
HValue* value = it.value();
if (!value->IsPhi()) {
Representation rep = value->observed_input_representation(it.index());
- non_phi_uses_[rep.kind()] += value->LoopWeight();
+ non_phi_uses_[rep.kind()] += 1;
if (FLAG_trace_representation) {
PrintF("#%d Phi is used by real #%d %s as %s\n",
id(), value->id(), value->Mnemonic(), rep.Mnemonic());
@@ -2493,7 +2679,7 @@ void HEnterInlined::RegisterReturnTarget(HBasicBlock* return_target,
void HEnterInlined::PrintDataTo(StringStream* stream) {
SmartArrayPointer<char> name = function()->debug_name()->ToCString();
- stream->Add("%s, id=%d", *name, function()->id().ToInt());
+ stream->Add("%s, id=%d", name.get(), function()->id().ToInt());
}
@@ -2503,58 +2689,67 @@ static bool IsInteger32(double value) {
}
-HConstant::HConstant(Handle<Object> handle, Representation r)
- : HTemplateInstruction<0>(HType::TypeFromValue(handle)),
- object_(Unique<Object>::CreateUninitialized(handle)),
+HConstant::HConstant(Handle<Object> object, Representation r)
+ : HTemplateInstruction<0>(HType::FromValue(object)),
+ object_(Unique<Object>::CreateUninitialized(object)),
+ object_map_(Handle<Map>::null()),
+ has_stable_map_value_(false),
has_smi_value_(false),
has_int32_value_(false),
has_double_value_(false),
has_external_reference_value_(false),
- is_internalized_string_(false),
is_not_in_new_space_(true),
- is_cell_(false),
- boolean_value_(handle->BooleanValue()) {
- if (handle->IsHeapObject()) {
- Heap* heap = Handle<HeapObject>::cast(handle)->GetHeap();
- is_not_in_new_space_ = !heap->InNewSpace(*handle);
- }
- if (handle->IsNumber()) {
- double n = handle->Number();
+ boolean_value_(object->BooleanValue()),
+ is_undetectable_(false),
+ instance_type_(kUnknownInstanceType) {
+ if (object->IsHeapObject()) {
+ Handle<HeapObject> heap_object = Handle<HeapObject>::cast(object);
+ Isolate* isolate = heap_object->GetIsolate();
+ Handle<Map> map(heap_object->map(), isolate);
+ is_not_in_new_space_ = !isolate->heap()->InNewSpace(*object);
+ instance_type_ = map->instance_type();
+ is_undetectable_ = map->is_undetectable();
+ if (map->is_stable()) object_map_ = Unique<Map>::CreateImmovable(map);
+ has_stable_map_value_ = (instance_type_ == MAP_TYPE &&
+ Handle<Map>::cast(heap_object)->is_stable());
+ }
+ if (object->IsNumber()) {
+ double n = object->Number();
has_int32_value_ = IsInteger32(n);
int32_value_ = DoubleToInt32(n);
has_smi_value_ = has_int32_value_ && Smi::IsValid(int32_value_);
double_value_ = n;
has_double_value_ = true;
// TODO(titzer): if this heap number is new space, tenure a new one.
- } else {
- is_internalized_string_ = handle->IsInternalizedString();
}
- is_cell_ = !handle.is_null() &&
- (handle->IsCell() || handle->IsPropertyCell());
Initialize(r);
}
-HConstant::HConstant(Unique<Object> unique,
+HConstant::HConstant(Unique<Object> object,
+ Unique<Map> object_map,
+ bool has_stable_map_value,
Representation r,
HType type,
- bool is_internalize_string,
bool is_not_in_new_space,
- bool is_cell,
- bool boolean_value)
+ bool boolean_value,
+ bool is_undetectable,
+ InstanceType instance_type)
: HTemplateInstruction<0>(type),
- object_(unique),
+ object_(object),
+ object_map_(object_map),
+ has_stable_map_value_(has_stable_map_value),
has_smi_value_(false),
has_int32_value_(false),
has_double_value_(false),
has_external_reference_value_(false),
- is_internalized_string_(is_internalize_string),
is_not_in_new_space_(is_not_in_new_space),
- is_cell_(is_cell),
- boolean_value_(boolean_value) {
- ASSERT(!unique.handle().is_null());
- ASSERT(!type.IsTaggedNumber());
+ boolean_value_(boolean_value),
+ is_undetectable_(is_undetectable),
+ instance_type_(instance_type) {
+ ASSERT(!object.handle().is_null());
+ ASSERT(!type.IsTaggedNumber() || type.IsNone());
Initialize(r);
}
@@ -2564,16 +2759,18 @@ HConstant::HConstant(int32_t integer_value,
bool is_not_in_new_space,
Unique<Object> object)
: object_(object),
+ object_map_(Handle<Map>::null()),
+ has_stable_map_value_(false),
has_smi_value_(Smi::IsValid(integer_value)),
has_int32_value_(true),
has_double_value_(true),
has_external_reference_value_(false),
- is_internalized_string_(false),
is_not_in_new_space_(is_not_in_new_space),
- is_cell_(false),
boolean_value_(integer_value != 0),
+ is_undetectable_(false),
int32_value_(integer_value),
- double_value_(FastI2D(integer_value)) {
+ double_value_(FastI2D(integer_value)),
+ instance_type_(kUnknownInstanceType) {
// It's possible to create a constant with a value in Smi-range but stored
// in a (pre-existing) HeapNumber. See crbug.com/349878.
bool could_be_heapobject = r.IsTagged() && !object.handle().is_null();
@@ -2588,15 +2785,17 @@ HConstant::HConstant(double double_value,
bool is_not_in_new_space,
Unique<Object> object)
: object_(object),
+ object_map_(Handle<Map>::null()),
+ has_stable_map_value_(false),
has_int32_value_(IsInteger32(double_value)),
has_double_value_(true),
has_external_reference_value_(false),
- is_internalized_string_(false),
is_not_in_new_space_(is_not_in_new_space),
- is_cell_(false),
boolean_value_(double_value != 0 && !std::isnan(double_value)),
+ is_undetectable_(false),
int32_value_(DoubleToInt32(double_value)),
- double_value_(double_value) {
+ double_value_(double_value),
+ instance_type_(kUnknownInstanceType) {
has_smi_value_ = has_int32_value_ && Smi::IsValid(int32_value_);
// It's possible to create a constant with a value in Smi-range but stored
// in a (pre-existing) HeapNumber. See crbug.com/349878.
@@ -2608,17 +2807,19 @@ HConstant::HConstant(double double_value,
HConstant::HConstant(ExternalReference reference)
- : HTemplateInstruction<0>(HType::None()),
+ : HTemplateInstruction<0>(HType::Any()),
object_(Unique<Object>(Handle<Object>::null())),
+ object_map_(Handle<Map>::null()),
+ has_stable_map_value_(false),
has_smi_value_(false),
has_int32_value_(false),
has_double_value_(false),
has_external_reference_value_(true),
- is_internalized_string_(false),
is_not_in_new_space_(true),
- is_cell_(false),
boolean_value_(true),
- external_reference_value_(reference) {
+ is_undetectable_(false),
+ external_reference_value_(reference),
+ instance_type_(kUnknownInstanceType) {
Initialize(Representation::External());
}
@@ -2650,6 +2851,41 @@ void HConstant::Initialize(Representation r) {
}
+bool HConstant::ImmortalImmovable() const {
+ if (has_int32_value_) {
+ return false;
+ }
+ if (has_double_value_) {
+ if (IsSpecialDouble()) {
+ return true;
+ }
+ return false;
+ }
+ if (has_external_reference_value_) {
+ return false;
+ }
+
+ ASSERT(!object_.handle().is_null());
+ Heap* heap = isolate()->heap();
+ ASSERT(!object_.IsKnownGlobal(heap->minus_zero_value()));
+ ASSERT(!object_.IsKnownGlobal(heap->nan_value()));
+ return
+#define IMMORTAL_IMMOVABLE_ROOT(name) \
+ object_.IsKnownGlobal(heap->name()) ||
+ IMMORTAL_IMMOVABLE_ROOT_LIST(IMMORTAL_IMMOVABLE_ROOT)
+#undef IMMORTAL_IMMOVABLE_ROOT
+#define INTERNALIZED_STRING(name, value) \
+ object_.IsKnownGlobal(heap->name()) ||
+ INTERNALIZED_STRING_LIST(INTERNALIZED_STRING)
+#undef INTERNALIZED_STRING
+#define STRING_TYPE(NAME, size, name, Name) \
+ object_.IsKnownGlobal(heap->name##_map()) ||
+ STRING_TYPE_LIST(STRING_TYPE)
+#undef STRING_TYPE
+ false;
+}
+
+
bool HConstant::EmitAtUses() {
ASSERT(IsLinked());
if (block()->graph()->has_osr() &&
@@ -2660,6 +2896,7 @@ bool HConstant::EmitAtUses() {
if (UseCount() == 0) return true;
if (IsCell()) return false;
if (representation().IsDouble()) return false;
+ if (representation().IsExternal()) return false;
return true;
}
@@ -2680,12 +2917,14 @@ HConstant* HConstant::CopyToRepresentation(Representation r, Zone* zone) const {
}
ASSERT(!object_.handle().is_null());
return new(zone) HConstant(object_,
+ object_map_,
+ has_stable_map_value_,
r,
type_,
- is_internalized_string_,
is_not_in_new_space_,
- is_cell_,
- boolean_value_);
+ boolean_value_,
+ is_undetectable_,
+ instance_type_);
}
@@ -2731,6 +2970,13 @@ void HConstant::PrintDataTo(StringStream* stream) {
external_reference_value_.address()));
} else {
handle(Isolate::Current())->ShortPrint(stream);
+ stream->Add(" ");
+ if (HasStableMapValue()) {
+ stream->Add("[stable-map] ");
+ }
+ if (HasObjectMap()) {
+ stream->Add("[map %p] ", *ObjectMap().handle());
+ }
}
if (!is_not_in_new_space_) {
stream->Add("[new space] ");
@@ -2951,14 +3197,14 @@ Range* HLoadNamedField::InferRange(Zone* zone) {
Range* HLoadKeyed::InferRange(Zone* zone) {
switch (elements_kind()) {
- case EXTERNAL_BYTE_ELEMENTS:
+ case EXTERNAL_INT8_ELEMENTS:
return new(zone) Range(kMinInt8, kMaxInt8);
- case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
- case EXTERNAL_PIXEL_ELEMENTS:
+ case EXTERNAL_UINT8_ELEMENTS:
+ case EXTERNAL_UINT8_CLAMPED_ELEMENTS:
return new(zone) Range(kMinUInt8, kMaxUInt8);
- case EXTERNAL_SHORT_ELEMENTS:
+ case EXTERNAL_INT16_ELEMENTS:
return new(zone) Range(kMinInt16, kMaxInt16);
- case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
+ case EXTERNAL_UINT16_ELEMENTS:
return new(zone) Range(kMinUInt16, kMaxUInt16);
default:
return HValue::InferRange(zone);
@@ -2999,12 +3245,86 @@ void HCompareObjectEqAndBranch::PrintDataTo(StringStream* stream) {
bool HCompareObjectEqAndBranch::KnownSuccessorBlock(HBasicBlock** block) {
- if (left()->IsConstant() && right()->IsConstant()) {
- bool comparison_result =
- HConstant::cast(left())->DataEquals(HConstant::cast(right()));
- *block = comparison_result
- ? FirstSuccessor()
- : SecondSuccessor();
+ if (known_successor_index() != kNoKnownSuccessorIndex) {
+ *block = SuccessorAt(known_successor_index());
+ return true;
+ }
+ if (FLAG_fold_constants && left()->IsConstant() && right()->IsConstant()) {
+ *block = HConstant::cast(left())->DataEquals(HConstant::cast(right()))
+ ? FirstSuccessor() : SecondSuccessor();
+ return true;
+ }
+ *block = NULL;
+ return false;
+}
+
+
+bool ConstantIsObject(HConstant* constant, Isolate* isolate) {
+ if (constant->HasNumberValue()) return false;
+ if (constant->GetUnique().IsKnownGlobal(isolate->heap()->null_value())) {
+ return true;
+ }
+ if (constant->IsUndetectable()) return false;
+ InstanceType type = constant->GetInstanceType();
+ return (FIRST_NONCALLABLE_SPEC_OBJECT_TYPE <= type) &&
+ (type <= LAST_NONCALLABLE_SPEC_OBJECT_TYPE);
+}
+
+
+bool HIsObjectAndBranch::KnownSuccessorBlock(HBasicBlock** block) {
+ if (FLAG_fold_constants && value()->IsConstant()) {
+ *block = ConstantIsObject(HConstant::cast(value()), isolate())
+ ? FirstSuccessor() : SecondSuccessor();
+ return true;
+ }
+ *block = NULL;
+ return false;
+}
+
+
+bool HIsStringAndBranch::KnownSuccessorBlock(HBasicBlock** block) {
+ if (known_successor_index() != kNoKnownSuccessorIndex) {
+ *block = SuccessorAt(known_successor_index());
+ return true;
+ }
+ if (FLAG_fold_constants && value()->IsConstant()) {
+ *block = HConstant::cast(value())->HasStringValue()
+ ? FirstSuccessor() : SecondSuccessor();
+ return true;
+ }
+ if (value()->type().IsString()) {
+ *block = FirstSuccessor();
+ return true;
+ }
+ if (value()->type().IsSmi() ||
+ value()->type().IsNull() ||
+ value()->type().IsBoolean() ||
+ value()->type().IsUndefined() ||
+ value()->type().IsJSObject()) {
+ *block = SecondSuccessor();
+ return true;
+ }
+ *block = NULL;
+ return false;
+}
+
+
+bool HIsUndetectableAndBranch::KnownSuccessorBlock(HBasicBlock** block) {
+ if (FLAG_fold_constants && value()->IsConstant()) {
+ *block = HConstant::cast(value())->IsUndetectable()
+ ? FirstSuccessor() : SecondSuccessor();
+ return true;
+ }
+ *block = NULL;
+ return false;
+}
+
+
+bool HHasInstanceTypeAndBranch::KnownSuccessorBlock(HBasicBlock** block) {
+ if (FLAG_fold_constants && value()->IsConstant()) {
+ InstanceType type = HConstant::cast(value())->GetInstanceType();
+ *block = (from_ <= type) && (type <= to_)
+ ? FirstSuccessor() : SecondSuccessor();
return true;
}
*block = NULL;
@@ -3018,7 +3338,30 @@ void HCompareHoleAndBranch::InferRepresentation(
}
+bool HCompareNumericAndBranch::KnownSuccessorBlock(HBasicBlock** block) {
+ if (left() == right() &&
+ left()->representation().IsSmiOrInteger32()) {
+ *block = (token() == Token::EQ ||
+ token() == Token::EQ_STRICT ||
+ token() == Token::LTE ||
+ token() == Token::GTE)
+ ? FirstSuccessor() : SecondSuccessor();
+ return true;
+ }
+ *block = NULL;
+ return false;
+}
+
+
bool HCompareMinusZeroAndBranch::KnownSuccessorBlock(HBasicBlock** block) {
+ if (FLAG_fold_constants && value()->IsConstant()) {
+ HConstant* constant = HConstant::cast(value());
+ if (constant->HasDoubleValue()) {
+ *block = IsMinusZero(constant->DoubleValue())
+ ? FirstSuccessor() : SecondSuccessor();
+ return true;
+ }
+ }
if (value()->representation().IsSmiOrInteger32()) {
// A Smi or Integer32 cannot contain minus zero.
*block = SecondSuccessor();
@@ -3088,35 +3431,26 @@ void HParameter::PrintDataTo(StringStream* stream) {
void HLoadNamedField::PrintDataTo(StringStream* stream) {
object()->PrintNameTo(stream);
access_.PrintTo(stream);
-}
-
-HCheckMaps* HCheckMaps::New(Zone* zone,
- HValue* context,
- HValue* value,
- Handle<Map> map,
- CompilationInfo* info,
- HValue* typecheck) {
- HCheckMaps* check_map = new(zone) HCheckMaps(value, zone, typecheck);
- check_map->Add(map, zone);
- if (map->CanOmitMapChecks() &&
- value->IsConstant() &&
- HConstant::cast(value)->HasMap(map)) {
- // TODO(titzer): collect dependent map checks into a list.
- check_map->omit_ = true;
- if (map->CanTransition()) {
- map->AddDependentCompilationInfo(
- DependentCode::kPrototypeCheckGroup, info);
+ if (maps() != NULL) {
+ stream->Add(" [%p", *maps()->at(0).handle());
+ for (int i = 1; i < maps()->size(); ++i) {
+ stream->Add(",%p", *maps()->at(i).handle());
}
+ stream->Add("]");
+ }
+
+ if (HasDependency()) {
+ stream->Add(" ");
+ dependency()->PrintNameTo(stream);
}
- return check_map;
}
void HLoadNamedGeneric::PrintDataTo(StringStream* stream) {
object()->PrintNameTo(stream);
stream->Add(".");
- stream->Add(*String::cast(*name())->ToCString());
+ stream->Add(String::cast(*name())->ToCString().get());
}
@@ -3134,7 +3468,7 @@ void HLoadKeyed::PrintDataTo(StringStream* stream) {
stream->Add("[");
key()->PrintNameTo(stream);
if (IsDehoisted()) {
- stream->Add(" + %d]", index_offset());
+ stream->Add(" + %d]", base_offset());
} else {
stream->Add("]");
}
@@ -3238,10 +3572,8 @@ HValue* HLoadKeyedGeneric::Canonicalize() {
key_load->elements_kind());
map_check->InsertBefore(this);
index->InsertBefore(this);
- HLoadFieldByIndex* load = new(block()->zone()) HLoadFieldByIndex(
- object(), index);
- load->InsertBefore(this);
- return load;
+ return Prepend(new(block()->zone()) HLoadFieldByIndex(
+ object(), index));
}
}
}
@@ -3254,7 +3586,7 @@ void HStoreNamedGeneric::PrintDataTo(StringStream* stream) {
object()->PrintNameTo(stream);
stream->Add(".");
ASSERT(name()->IsString());
- stream->Add(*String::cast(*name())->ToCString());
+ stream->Add(String::cast(*name())->ToCString().get());
stream->Add(" = ");
value()->PrintNameTo(stream);
}
@@ -3288,7 +3620,7 @@ void HStoreKeyed::PrintDataTo(StringStream* stream) {
stream->Add("[");
key()->PrintNameTo(stream);
if (IsDehoisted()) {
- stream->Add(" + %d] = ", index_offset());
+ stream->Add(" + %d] = ", base_offset());
} else {
stream->Add("] = ");
}
@@ -3343,7 +3675,8 @@ void HLoadGlobalGeneric::PrintDataTo(StringStream* stream) {
void HInnerAllocatedObject::PrintDataTo(StringStream* stream) {
base_object()->PrintNameTo(stream);
- stream->Add(" offset %d", offset());
+ stream->Add(" offset ");
+ offset()->PrintTo(stream);
}
@@ -3355,12 +3688,6 @@ void HStoreGlobalCell::PrintDataTo(StringStream* stream) {
}
-void HStoreGlobalGeneric::PrintDataTo(StringStream* stream) {
- stream->Add("%o = ", *name());
- value()->PrintNameTo(stream);
-}
-
-
void HLoadContextSlot::PrintDataTo(StringStream* stream) {
value()->PrintNameTo(stream);
stream->Add("[%d]", slot_index());
@@ -3400,6 +3727,12 @@ HType HChange::CalculateInferredType() {
Representation HUnaryMathOperation::RepresentationFromInputs() {
+ if (SupportsFlexibleFloorAndRound() &&
+ (op_ == kMathFloor || op_ == kMathRound)) {
+ // Floor and Round always take a double input. The integral result can be
+ // used as an integer or a double. Infer the representation from the uses.
+ return Representation::None();
+ }
Representation rep = representation();
// If any of the actual input representation is more general than what we
// have so far but not Tagged, use that representation instead.
@@ -3411,11 +3744,11 @@ Representation HUnaryMathOperation::RepresentationFromInputs() {
}
-void HAllocate::HandleSideEffectDominator(GVNFlag side_effect,
+bool HAllocate::HandleSideEffectDominator(GVNFlag side_effect,
HValue* dominator) {
- ASSERT(side_effect == kChangesNewSpacePromotion);
+ ASSERT(side_effect == kNewSpacePromotion);
Zone* zone = block()->zone();
- if (!FLAG_use_allocation_folding) return;
+ if (!FLAG_use_allocation_folding) return false;
// Try to fold allocations together with their dominating allocations.
if (!dominator->IsAllocate()) {
@@ -3423,7 +3756,16 @@ void HAllocate::HandleSideEffectDominator(GVNFlag side_effect,
PrintF("#%d (%s) cannot fold into #%d (%s)\n",
id(), Mnemonic(), dominator->id(), dominator->Mnemonic());
}
- return;
+ return false;
+ }
+
+ // Check whether we are folding within the same block for local folding.
+ if (FLAG_use_local_allocation_folding && dominator->block() != block()) {
+ if (FLAG_trace_allocation_folding) {
+ PrintF("#%d (%s) cannot fold into #%d (%s), crosses basic blocks\n",
+ id(), Mnemonic(), dominator->id(), dominator->Mnemonic());
+ }
+ return false;
}
HAllocate* dominator_allocate = HAllocate::cast(dominator);
@@ -3431,18 +3773,44 @@ void HAllocate::HandleSideEffectDominator(GVNFlag side_effect,
HValue* current_size = size();
// TODO(hpayer): Add support for non-constant allocation in dominator.
- if (!current_size->IsInteger32Constant() ||
- !dominator_size->IsInteger32Constant()) {
+ if (!dominator_size->IsInteger32Constant()) {
if (FLAG_trace_allocation_folding) {
- PrintF("#%d (%s) cannot fold into #%d (%s), dynamic allocation size\n",
+ PrintF("#%d (%s) cannot fold into #%d (%s), "
+ "dynamic allocation size in dominator\n",
id(), Mnemonic(), dominator->id(), dominator->Mnemonic());
}
- return;
+ return false;
}
dominator_allocate = GetFoldableDominator(dominator_allocate);
if (dominator_allocate == NULL) {
- return;
+ return false;
+ }
+
+ if (!has_size_upper_bound()) {
+ if (FLAG_trace_allocation_folding) {
+ PrintF("#%d (%s) cannot fold into #%d (%s), "
+ "can't estimate total allocation size\n",
+ id(), Mnemonic(), dominator->id(), dominator->Mnemonic());
+ }
+ return false;
+ }
+
+ if (!current_size->IsInteger32Constant()) {
+ // If it's not constant then it is a size_in_bytes calculation graph
+ // like this: (const_header_size + const_element_size * size).
+ ASSERT(current_size->IsInstruction());
+
+ HInstruction* current_instr = HInstruction::cast(current_size);
+ if (!current_instr->Dominates(dominator_allocate)) {
+ if (FLAG_trace_allocation_folding) {
+ PrintF("#%d (%s) cannot fold into #%d (%s), dynamic size "
+ "value does not dominate target allocation\n",
+ id(), Mnemonic(), dominator_allocate->id(),
+ dominator_allocate->Mnemonic());
+ }
+ return false;
+ }
}
ASSERT((IsNewSpaceAllocation() &&
@@ -3457,59 +3825,91 @@ void HAllocate::HandleSideEffectDominator(GVNFlag side_effect,
int32_t original_object_size =
HConstant::cast(dominator_size)->GetInteger32Constant();
int32_t dominator_size_constant = original_object_size;
- int32_t current_size_constant =
- HConstant::cast(current_size)->GetInteger32Constant();
- int32_t new_dominator_size = dominator_size_constant + current_size_constant;
if (MustAllocateDoubleAligned()) {
- if (!dominator_allocate->MustAllocateDoubleAligned()) {
- dominator_allocate->MakeDoubleAligned();
- }
if ((dominator_size_constant & kDoubleAlignmentMask) != 0) {
dominator_size_constant += kDoubleSize / 2;
- new_dominator_size += kDoubleSize / 2;
}
}
- if (new_dominator_size > isolate()->heap()->MaxRegularSpaceAllocationSize()) {
+ int32_t current_size_max_value = size_upper_bound()->GetInteger32Constant();
+ int32_t new_dominator_size = dominator_size_constant + current_size_max_value;
+
+ // Since we clear the first word after folded memory, we cannot use the
+ // whole Page::kMaxRegularHeapObjectSize memory.
+ if (new_dominator_size > Page::kMaxRegularHeapObjectSize - kPointerSize) {
if (FLAG_trace_allocation_folding) {
PrintF("#%d (%s) cannot fold into #%d (%s) due to size: %d\n",
id(), Mnemonic(), dominator_allocate->id(),
dominator_allocate->Mnemonic(), new_dominator_size);
}
- return;
+ return false;
}
- HInstruction* new_dominator_size_constant = HConstant::CreateAndInsertBefore(
- zone,
- context(),
- new_dominator_size,
- Representation::None(),
- dominator_allocate);
- dominator_allocate->UpdateSize(new_dominator_size_constant);
+ HInstruction* new_dominator_size_value;
+
+ if (current_size->IsInteger32Constant()) {
+ new_dominator_size_value =
+ HConstant::CreateAndInsertBefore(zone,
+ context(),
+ new_dominator_size,
+ Representation::None(),
+ dominator_allocate);
+ } else {
+ HValue* new_dominator_size_constant =
+ HConstant::CreateAndInsertBefore(zone,
+ context(),
+ dominator_size_constant,
+ Representation::Integer32(),
+ dominator_allocate);
+
+ // Add old and new size together and insert.
+ current_size->ChangeRepresentation(Representation::Integer32());
+ new_dominator_size_value = HAdd::New(zone, context(),
+ new_dominator_size_constant, current_size);
+ new_dominator_size_value->ClearFlag(HValue::kCanOverflow);
+ new_dominator_size_value->ChangeRepresentation(Representation::Integer32());
+
+ new_dominator_size_value->InsertBefore(dominator_allocate);
+ }
+
+ dominator_allocate->UpdateSize(new_dominator_size_value);
+
+ if (MustAllocateDoubleAligned()) {
+ if (!dominator_allocate->MustAllocateDoubleAligned()) {
+ dominator_allocate->MakeDoubleAligned();
+ }
+ }
+
+ bool keep_new_space_iterable = FLAG_log_gc || FLAG_heap_stats;
#ifdef VERIFY_HEAP
- if (FLAG_verify_heap && dominator_allocate->IsNewSpaceAllocation()) {
+ keep_new_space_iterable = keep_new_space_iterable || FLAG_verify_heap;
+#endif
+
+ if (keep_new_space_iterable && dominator_allocate->IsNewSpaceAllocation()) {
dominator_allocate->MakePrefillWithFiller();
} else {
// TODO(hpayer): This is a short-term hack to make allocation mementos
// work again in new space.
dominator_allocate->ClearNextMapWord(original_object_size);
}
-#else
- // TODO(hpayer): This is a short-term hack to make allocation mementos
- // work again in new space.
- dominator_allocate->ClearNextMapWord(original_object_size);
-#endif
- dominator_allocate->clear_next_map_word_ = clear_next_map_word_;
+ dominator_allocate->UpdateClearNextMapWord(MustClearNextMapWord());
// After that replace the dominated allocate instruction.
+ HInstruction* inner_offset = HConstant::CreateAndInsertBefore(
+ zone,
+ context(),
+ dominator_size_constant,
+ Representation::None(),
+ this);
+
HInstruction* dominated_allocate_instr =
HInnerAllocatedObject::New(zone,
context(),
dominator_allocate,
- dominator_size,
+ inner_offset,
type());
dominated_allocate_instr->InsertBefore(this);
DeleteAndReplaceWith(dominated_allocate_instr);
@@ -3518,6 +3918,7 @@ void HAllocate::HandleSideEffectDominator(GVNFlag side_effect,
id(), Mnemonic(), dominator_allocate->id(),
dominator_allocate->Mnemonic());
}
+ return true;
}
@@ -3609,12 +4010,9 @@ void HAllocate::CreateFreeSpaceFiller(int32_t free_space_size) {
HInnerAllocatedObject::New(zone, context(), dominating_allocate_,
dominating_allocate_->size(), type());
free_space_instr->InsertBefore(this);
- HConstant* filler_map = HConstant::New(
- zone,
- context(),
- isolate()->factory()->free_space_map());
- filler_map->FinalizeUniqueness(); // TODO(titzer): should be init'd a'ready
- filler_map->InsertAfter(free_space_instr);
+ HConstant* filler_map = HConstant::CreateAndInsertAfter(
+ zone, Unique<Map>::CreateImmovable(
+ isolate()->factory()->free_space_map()), true, free_space_instr);
HInstruction* store_map = HStoreNamedField::New(zone, context(),
free_space_instr, HObjectAccess::ForMap(), filler_map);
store_map->SetFlag(HValue::kHasNoObservableSideEffects);
@@ -3627,8 +4025,9 @@ void HAllocate::CreateFreeSpaceFiller(int32_t free_space_size) {
zone, context(), free_space_size, Representation::Smi(), store_map);
// Must force Smi representation for x64 (see comment above).
HObjectAccess access =
- HObjectAccess::ForJSObjectOffset(FreeSpace::kSizeOffset,
- Representation::Smi());
+ HObjectAccess::ForMapAndOffset(isolate()->factory()->free_space_map(),
+ FreeSpace::kSizeOffset,
+ Representation::Smi());
HStoreNamedField* store_size = HStoreNamedField::New(zone, context(),
free_space_instr, access, filler_size);
store_size->SetFlag(HValue::kHasNoObservableSideEffects);
@@ -3638,12 +4037,13 @@ void HAllocate::CreateFreeSpaceFiller(int32_t free_space_size) {
void HAllocate::ClearNextMapWord(int offset) {
- if (clear_next_map_word_) {
+ if (MustClearNextMapWord()) {
Zone* zone = block()->zone();
- HObjectAccess access = HObjectAccess::ForJSObjectOffset(offset);
+ HObjectAccess access =
+ HObjectAccess::ForObservableJSObjectOffset(offset);
HStoreNamedField* clear_next_map =
HStoreNamedField::New(zone, context(), this, access,
- block()->graph()->GetConstantNull());
+ block()->graph()->GetConstant0());
clear_next_map->ClearAllSideEffects();
clear_next_map->InsertAfter(this);
}
@@ -3662,99 +4062,6 @@ void HAllocate::PrintDataTo(StringStream* stream) {
}
-HValue* HUnaryMathOperation::EnsureAndPropagateNotMinusZero(
- BitVector* visited) {
- visited->Add(id());
- if (representation().IsSmiOrInteger32() &&
- !value()->representation().Equals(representation())) {
- if (value()->range() == NULL || value()->range()->CanBeMinusZero()) {
- SetFlag(kBailoutOnMinusZero);
- }
- }
- if (RequiredInputRepresentation(0).IsSmiOrInteger32() &&
- representation().Equals(RequiredInputRepresentation(0))) {
- return value();
- }
- return NULL;
-}
-
-
-HValue* HChange::EnsureAndPropagateNotMinusZero(BitVector* visited) {
- visited->Add(id());
- if (from().IsSmiOrInteger32()) return NULL;
- if (CanTruncateToInt32()) return NULL;
- if (value()->range() == NULL || value()->range()->CanBeMinusZero()) {
- SetFlag(kBailoutOnMinusZero);
- }
- ASSERT(!from().IsSmiOrInteger32() || !to().IsSmiOrInteger32());
- return NULL;
-}
-
-
-HValue* HForceRepresentation::EnsureAndPropagateNotMinusZero(
- BitVector* visited) {
- visited->Add(id());
- return value();
-}
-
-
-HValue* HMod::EnsureAndPropagateNotMinusZero(BitVector* visited) {
- visited->Add(id());
- if (range() == NULL || range()->CanBeMinusZero()) {
- SetFlag(kBailoutOnMinusZero);
- return left();
- }
- return NULL;
-}
-
-
-HValue* HDiv::EnsureAndPropagateNotMinusZero(BitVector* visited) {
- visited->Add(id());
- if (range() == NULL || range()->CanBeMinusZero()) {
- SetFlag(kBailoutOnMinusZero);
- }
- return NULL;
-}
-
-
-HValue* HMathFloorOfDiv::EnsureAndPropagateNotMinusZero(BitVector* visited) {
- visited->Add(id());
- SetFlag(kBailoutOnMinusZero);
- return NULL;
-}
-
-
-HValue* HMul::EnsureAndPropagateNotMinusZero(BitVector* visited) {
- visited->Add(id());
- if (range() == NULL || range()->CanBeMinusZero()) {
- SetFlag(kBailoutOnMinusZero);
- }
- return NULL;
-}
-
-
-HValue* HSub::EnsureAndPropagateNotMinusZero(BitVector* visited) {
- visited->Add(id());
- // Propagate to the left argument. If the left argument cannot be -0, then
- // the result of the add operation cannot be either.
- if (range() == NULL || range()->CanBeMinusZero()) {
- return left();
- }
- return NULL;
-}
-
-
-HValue* HAdd::EnsureAndPropagateNotMinusZero(BitVector* visited) {
- visited->Add(id());
- // Propagate to the left argument. If the left argument cannot be -0, then
- // the result of the sub operation cannot be either.
- if (range() == NULL || range()->CanBeMinusZero()) {
- return left();
- }
- return NULL;
-}
-
-
bool HStoreKeyed::NeedsCanonicalization() {
// If value is an integer or smi or comes from the result of a keyed load or
// constant then it is either be a non-hole value or in the case of a constant
@@ -3797,7 +4104,7 @@ HInstruction* HInstr::New( \
HConstant* c_right = HConstant::cast(right); \
if ((c_left->HasNumberValue() && c_right->HasNumberValue())) { \
double double_res = c_left->DoubleValue() op c_right->DoubleValue(); \
- if (TypeInfo::IsInt32Double(double_res)) { \
+ if (IsInt32Double(double_res)) { \
return H_CONSTANT_INT(double_res); \
} \
return H_CONSTANT_DOUBLE(double_res); \
@@ -3818,17 +4125,42 @@ HInstruction* HStringAdd::New(Zone* zone,
HValue* context,
HValue* left,
HValue* right,
- StringAddFlags flags) {
+ PretenureFlag pretenure_flag,
+ StringAddFlags flags,
+ Handle<AllocationSite> allocation_site) {
if (FLAG_fold_constants && left->IsConstant() && right->IsConstant()) {
HConstant* c_right = HConstant::cast(right);
HConstant* c_left = HConstant::cast(left);
if (c_left->HasStringValue() && c_right->HasStringValue()) {
- Handle<String> concat = zone->isolate()->factory()->NewFlatConcatString(
- c_left->StringValue(), c_right->StringValue());
- return HConstant::New(zone, context, concat);
+ Handle<String> left_string = c_left->StringValue();
+ Handle<String> right_string = c_right->StringValue();
+ // Prevent possible exception by invalid string length.
+ if (left_string->length() + right_string->length() < String::kMaxLength) {
+ Handle<String> concat = zone->isolate()->factory()->NewFlatConcatString(
+ c_left->StringValue(), c_right->StringValue());
+ ASSERT(!concat.is_null());
+ return HConstant::New(zone, context, concat);
+ }
}
}
- return new(zone) HStringAdd(context, left, right, flags);
+ return new(zone) HStringAdd(
+ context, left, right, pretenure_flag, flags, allocation_site);
+}
+
+
+void HStringAdd::PrintDataTo(StringStream* stream) {
+ if ((flags() & STRING_ADD_CHECK_BOTH) == STRING_ADD_CHECK_BOTH) {
+ stream->Add("_CheckBoth");
+ } else if ((flags() & STRING_ADD_CHECK_BOTH) == STRING_ADD_CHECK_LEFT) {
+ stream->Add("_CheckLeft");
+ } else if ((flags() & STRING_ADD_CHECK_BOTH) == STRING_ADD_CHECK_RIGHT) {
+ stream->Add("_CheckRight");
+ }
+ HBinaryOperation::PrintDataTo(stream);
+ stream->Add(" (");
+ if (pretenure_flag() == NOT_TENURED) stream->Add("N");
+ else if (pretenure_flag() == TENURED) stream->Add("D");
+ stream->Add(")");
}
@@ -3841,7 +4173,7 @@ HInstruction* HStringCharFromCode::New(
if (std::isfinite(c_code->DoubleValue())) {
uint32_t code = c_code->NumberValueAsInteger32() & 0xffff;
return HConstant::New(zone, context,
- LookupSingleCharacterStringFromCode(isolate, code));
+ isolate->factory()->LookupSingleCharacterStringFromCode(code));
}
return HConstant::New(zone, context, isolate->factory()->empty_string());
}
@@ -3863,10 +4195,6 @@ HInstruction* HUnaryMathOperation::New(
}
if (std::isinf(d)) { // +Infinity and -Infinity.
switch (op) {
- case kMathSin:
- case kMathCos:
- case kMathTan:
- return H_CONSTANT_DOUBLE(OS::nan_value());
case kMathExp:
return H_CONSTANT_DOUBLE((d > 0.0) ? d : 0.0);
case kMathLog:
@@ -3878,22 +4206,18 @@ HInstruction* HUnaryMathOperation::New(
case kMathRound:
case kMathFloor:
return H_CONSTANT_DOUBLE(d);
+ case kMathClz32:
+ return H_CONSTANT_INT(32);
default:
UNREACHABLE();
break;
}
}
switch (op) {
- case kMathSin:
- return H_CONSTANT_DOUBLE(fast_sin(d));
- case kMathCos:
- return H_CONSTANT_DOUBLE(fast_cos(d));
- case kMathTan:
- return H_CONSTANT_DOUBLE(fast_tan(d));
case kMathExp:
return H_CONSTANT_DOUBLE(fast_exp(d));
case kMathLog:
- return H_CONSTANT_DOUBLE(fast_log(d));
+ return H_CONSTANT_DOUBLE(std::log(d));
case kMathSqrt:
return H_CONSTANT_DOUBLE(fast_sqrt(d));
case kMathPowHalf:
@@ -3906,9 +4230,14 @@ HInstruction* HUnaryMathOperation::New(
// Doubles are represented as Significant * 2 ^ Exponent. If the
// Exponent is not negative, the double value is already an integer.
if (Double(d).Exponent() >= 0) return H_CONSTANT_DOUBLE(d);
- return H_CONSTANT_DOUBLE(floor(d + 0.5));
+ return H_CONSTANT_DOUBLE(std::floor(d + 0.5));
case kMathFloor:
- return H_CONSTANT_DOUBLE(floor(d));
+ return H_CONSTANT_DOUBLE(std::floor(d));
+ case kMathClz32: {
+ uint32_t i = DoubleToUint32(d);
+ return H_CONSTANT_INT(
+ (i == 0) ? 32 : CompilerIntrinsics::CountLeadingZeros(i));
+ }
default:
UNREACHABLE();
break;
@@ -3918,6 +4247,43 @@ HInstruction* HUnaryMathOperation::New(
}
+Representation HUnaryMathOperation::RepresentationFromUses() {
+ if (op_ != kMathFloor && op_ != kMathRound) {
+ return HValue::RepresentationFromUses();
+ }
+
+ // The instruction can have an int32 or double output. Prefer a double
+ // representation if there are double uses.
+ bool use_double = false;
+
+ for (HUseIterator it(uses()); !it.Done(); it.Advance()) {
+ HValue* use = it.value();
+ int use_index = it.index();
+ Representation rep_observed = use->observed_input_representation(use_index);
+ Representation rep_required = use->RequiredInputRepresentation(use_index);
+ use_double |= (rep_observed.IsDouble() || rep_required.IsDouble());
+ if (use_double && !FLAG_trace_representation) {
+ // Having seen one double is enough.
+ break;
+ }
+ if (FLAG_trace_representation) {
+ if (!rep_required.IsDouble() || rep_observed.IsDouble()) {
+ PrintF("#%d %s is used by #%d %s as %s%s\n",
+ id(), Mnemonic(), use->id(),
+ use->Mnemonic(), rep_observed.Mnemonic(),
+ (use->CheckFlag(kTruncatingToInt32) ? "-trunc" : ""));
+ } else {
+ PrintF("#%d %s is required by #%d %s as %s%s\n",
+ id(), Mnemonic(), use->id(),
+ use->Mnemonic(), rep_required.Mnemonic(),
+ (use->CheckFlag(kTruncatingToInt32) ? "-trunc" : ""));
+ }
+ }
+ }
+ return use_double ? Representation::Double() : Representation::Integer32();
+}
+
+
HInstruction* HPower::New(Zone* zone,
HValue* context,
HValue* left,
@@ -4003,7 +4369,7 @@ HInstruction* HDiv::New(
if ((c_left->HasNumberValue() && c_right->HasNumberValue())) {
if (c_right->DoubleValue() != 0) {
double double_res = c_left->DoubleValue() / c_right->DoubleValue();
- if (TypeInfo::IsInt32Double(double_res)) {
+ if (IsInt32Double(double_res)) {
return H_CONSTANT_INT(double_res);
}
return H_CONSTANT_DOUBLE(double_res);
@@ -4233,7 +4599,7 @@ void HPhi::Verify() {
void HSimulate::Verify() {
HInstruction::Verify();
- ASSERT(HasAstId());
+ ASSERT(HasAstId() || next()->IsEnterInlined());
}
@@ -4259,7 +4625,7 @@ HObjectAccess HObjectAccess::ForFixedArrayHeader(int offset) {
}
-HObjectAccess HObjectAccess::ForJSObjectOffset(int offset,
+HObjectAccess HObjectAccess::ForMapAndOffset(Handle<Map> map, int offset,
Representation representation) {
ASSERT(offset >= 0);
Portion portion = kInobject;
@@ -4269,7 +4635,34 @@ HObjectAccess HObjectAccess::ForJSObjectOffset(int offset,
} else if (offset == JSObject::kMapOffset) {
portion = kMaps;
}
- return HObjectAccess(portion, offset, representation);
+ bool existing_inobject_property = true;
+ if (!map.is_null()) {
+ existing_inobject_property = (offset <
+ map->instance_size() - map->unused_property_fields() * kPointerSize);
+ }
+ return HObjectAccess(portion, offset, representation, Handle<String>::null(),
+ false, existing_inobject_property);
+}
+
+
+HObjectAccess HObjectAccess::ForAllocationSiteOffset(int offset) {
+ switch (offset) {
+ case AllocationSite::kTransitionInfoOffset:
+ return HObjectAccess(kInobject, offset, Representation::Tagged());
+ case AllocationSite::kNestedSiteOffset:
+ return HObjectAccess(kInobject, offset, Representation::Tagged());
+ case AllocationSite::kPretenureDataOffset:
+ return HObjectAccess(kInobject, offset, Representation::Smi());
+ case AllocationSite::kPretenureCreateCountOffset:
+ return HObjectAccess(kInobject, offset, Representation::Smi());
+ case AllocationSite::kDependentCodeOffset:
+ return HObjectAccess(kInobject, offset, Representation::Tagged());
+ case AllocationSite::kWeakNextOffset:
+ return HObjectAccess(kInobject, offset, Representation::Tagged());
+ default:
+ UNREACHABLE();
+ }
+ return HObjectAccess(kInobject, offset);
}
@@ -4300,12 +4693,14 @@ HObjectAccess HObjectAccess::ForJSArrayOffset(int offset) {
HObjectAccess HObjectAccess::ForBackingStoreOffset(int offset,
Representation representation) {
ASSERT(offset >= 0);
- return HObjectAccess(kBackingStore, offset, representation);
+ return HObjectAccess(kBackingStore, offset, representation,
+ Handle<String>::null(), false, false);
}
HObjectAccess HObjectAccess::ForField(Handle<Map> map,
- LookupResult *lookup, Handle<String> name) {
+ LookupResult* lookup,
+ Handle<String> name) {
ASSERT(lookup->IsField() || lookup->IsTransitionToField());
int index;
Representation representation;
@@ -4325,11 +4720,12 @@ HObjectAccess HObjectAccess::ForField(Handle<Map> map,
// Negative property indices are in-object properties, indexed
// from the end of the fixed part of the object.
int offset = (index * kPointerSize) + map->instance_size();
- return HObjectAccess(kInobject, offset, representation);
+ return HObjectAccess(kInobject, offset, representation, name, false, true);
} else {
// Non-negative property indices are in the properties array.
int offset = (index * kPointerSize) + FixedArray::kHeaderSize;
- return HObjectAccess(kBackingStore, offset, representation, name);
+ return HObjectAccess(kBackingStore, offset, representation, name,
+ false, false);
}
}
@@ -4341,56 +4737,80 @@ HObjectAccess HObjectAccess::ForCellPayload(Isolate* isolate) {
}
-void HObjectAccess::SetGVNFlags(HValue *instr, bool is_store) {
+void HObjectAccess::SetGVNFlags(HValue *instr, PropertyAccessType access_type) {
// set the appropriate GVN flags for a given load or store instruction
- if (is_store) {
+ if (access_type == STORE) {
// track dominating allocations in order to eliminate write barriers
- instr->SetGVNFlag(kDependsOnNewSpacePromotion);
+ instr->SetDependsOnFlag(::v8::internal::kNewSpacePromotion);
instr->SetFlag(HValue::kTrackSideEffectDominators);
} else {
// try to GVN loads, but don't hoist above map changes
instr->SetFlag(HValue::kUseGVN);
- instr->SetGVNFlag(kDependsOnMaps);
+ instr->SetDependsOnFlag(::v8::internal::kMaps);
}
switch (portion()) {
case kArrayLengths:
- instr->SetGVNFlag(is_store
- ? kChangesArrayLengths : kDependsOnArrayLengths);
+ if (access_type == STORE) {
+ instr->SetChangesFlag(::v8::internal::kArrayLengths);
+ } else {
+ instr->SetDependsOnFlag(::v8::internal::kArrayLengths);
+ }
break;
case kStringLengths:
- instr->SetGVNFlag(is_store
- ? kChangesStringLengths : kDependsOnStringLengths);
+ if (access_type == STORE) {
+ instr->SetChangesFlag(::v8::internal::kStringLengths);
+ } else {
+ instr->SetDependsOnFlag(::v8::internal::kStringLengths);
+ }
break;
case kInobject:
- instr->SetGVNFlag(is_store
- ? kChangesInobjectFields : kDependsOnInobjectFields);
+ if (access_type == STORE) {
+ instr->SetChangesFlag(::v8::internal::kInobjectFields);
+ } else {
+ instr->SetDependsOnFlag(::v8::internal::kInobjectFields);
+ }
break;
case kDouble:
- instr->SetGVNFlag(is_store
- ? kChangesDoubleFields : kDependsOnDoubleFields);
+ if (access_type == STORE) {
+ instr->SetChangesFlag(::v8::internal::kDoubleFields);
+ } else {
+ instr->SetDependsOnFlag(::v8::internal::kDoubleFields);
+ }
break;
case kBackingStore:
- instr->SetGVNFlag(is_store
- ? kChangesBackingStoreFields : kDependsOnBackingStoreFields);
+ if (access_type == STORE) {
+ instr->SetChangesFlag(::v8::internal::kBackingStoreFields);
+ } else {
+ instr->SetDependsOnFlag(::v8::internal::kBackingStoreFields);
+ }
break;
case kElementsPointer:
- instr->SetGVNFlag(is_store
- ? kChangesElementsPointer : kDependsOnElementsPointer);
+ if (access_type == STORE) {
+ instr->SetChangesFlag(::v8::internal::kElementsPointer);
+ } else {
+ instr->SetDependsOnFlag(::v8::internal::kElementsPointer);
+ }
break;
case kMaps:
- instr->SetGVNFlag(is_store
- ? kChangesMaps : kDependsOnMaps);
+ if (access_type == STORE) {
+ instr->SetChangesFlag(::v8::internal::kMaps);
+ } else {
+ instr->SetDependsOnFlag(::v8::internal::kMaps);
+ }
break;
case kExternalMemory:
- instr->SetGVNFlag(is_store
- ? kChangesExternalMemory : kDependsOnExternalMemory);
+ if (access_type == STORE) {
+ instr->SetChangesFlag(::v8::internal::kExternalMemory);
+ } else {
+ instr->SetDependsOnFlag(::v8::internal::kExternalMemory);
+ }
break;
}
}
-void HObjectAccess::PrintTo(StringStream* stream) {
+void HObjectAccess::PrintTo(StringStream* stream) const {
stream->Add(".");
switch (portion()) {
@@ -4406,11 +4826,15 @@ void HObjectAccess::PrintTo(StringStream* stream) {
break;
case kDouble: // fall through
case kInobject:
- if (!name_.is_null()) stream->Add(*String::cast(*name_)->ToCString());
+ if (!name_.is_null()) {
+ stream->Add(String::cast(*name_)->ToCString().get());
+ }
stream->Add("[in-object]");
break;
case kBackingStore:
- if (!name_.is_null()) stream->Add(*String::cast(*name_)->ToCString());
+ if (!name_.is_null()) {
+ stream->Add(String::cast(*name_)->ToCString().get());
+ }
stream->Add("[backing-store]");
break;
case kExternalMemory:
diff --git a/chromium/v8/src/hydrogen-instructions.h b/chromium/v8/src/hydrogen-instructions.h
index 158e4c103ad..47ce499481d 100644
--- a/chromium/v8/src/hydrogen-instructions.h
+++ b/chromium/v8/src/hydrogen-instructions.h
@@ -1,51 +1,30 @@
// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
#ifndef V8_HYDROGEN_INSTRUCTIONS_H_
#define V8_HYDROGEN_INSTRUCTIONS_H_
-#include "v8.h"
+#include "src/v8.h"
-#include "allocation.h"
-#include "code-stubs.h"
-#include "data-flow.h"
-#include "deoptimizer.h"
-#include "small-pointer-list.h"
-#include "string-stream.h"
-#include "unique.h"
-#include "v8conversions.h"
-#include "v8utils.h"
-#include "zone.h"
+#include "src/allocation.h"
+#include "src/code-stubs.h"
+#include "src/conversions.h"
+#include "src/data-flow.h"
+#include "src/deoptimizer.h"
+#include "src/hydrogen-types.h"
+#include "src/small-pointer-list.h"
+#include "src/string-stream.h"
+#include "src/unique.h"
+#include "src/utils.h"
+#include "src/zone.h"
namespace v8 {
namespace internal {
// Forward declarations.
class HBasicBlock;
+class HDiv;
class HEnvironment;
class HInferRepresentationPhase;
class HInstruction;
@@ -67,6 +46,7 @@ class LChunkBuilder;
V(AbnormalExit) \
V(AccessArgumentsAt) \
V(Add) \
+ V(AllocateBlockContext) \
V(Allocate) \
V(ApplyArguments) \
V(ArgumentsElements) \
@@ -77,12 +57,9 @@ class LChunkBuilder;
V(BoundsCheck) \
V(BoundsCheckBaseIndexInformation) \
V(Branch) \
- V(CallConstantFunction) \
+ V(CallWithDescriptor) \
+ V(CallJSFunction) \
V(CallFunction) \
- V(CallGlobal) \
- V(CallKeyed) \
- V(CallKnownGlobal) \
- V(CallNamed) \
V(CallNew) \
V(CallNewArray) \
V(CallRuntime) \
@@ -104,14 +81,15 @@ class LChunkBuilder;
V(CompareObjectEqAndBranch) \
V(CompareMap) \
V(Constant) \
+ V(ConstructDouble) \
V(Context) \
V(DateField) \
V(DebugBreak) \
V(DeclareGlobals) \
V(Deoptimize) \
V(Div) \
+ V(DoubleBits) \
V(DummyUse) \
- V(ElementsKind) \
V(EnterInlined) \
V(EnvironmentMarker) \
V(ForceRepresentation) \
@@ -119,8 +97,6 @@ class LChunkBuilder;
V(ForInPrepareMap) \
V(FunctionLiteral) \
V(GetCachedArrayIndex) \
- V(GlobalObject) \
- V(GlobalReceiver) \
V(Goto) \
V(HasCachedArrayIndexAndBranch) \
V(HasInstanceTypeAndBranch) \
@@ -135,7 +111,6 @@ class LChunkBuilder;
V(IsUndetectableAndBranch) \
V(LeaveInlined) \
V(LoadContextSlot) \
- V(LoadExternalArrayPointer) \
V(LoadFieldByIndex) \
V(LoadFunctionPrototype) \
V(LoadGlobalCell) \
@@ -151,10 +126,9 @@ class LChunkBuilder;
V(Mod) \
V(Mul) \
V(OsrEntry) \
- V(OuterContext) \
V(Parameter) \
V(Power) \
- V(PushArgument) \
+ V(PushArguments) \
V(RegExpLiteral) \
V(Return) \
V(Ror) \
@@ -167,8 +141,8 @@ class LChunkBuilder;
V(StackCheck) \
V(StoreCodeEntry) \
V(StoreContextSlot) \
+ V(StoreFrameContext) \
V(StoreGlobalCell) \
- V(StoreGlobalGeneric) \
V(StoreKeyed) \
V(StoreKeyedGeneric) \
V(StoreNamedField) \
@@ -179,7 +153,6 @@ class LChunkBuilder;
V(StringCompareAndBranch) \
V(Sub) \
V(ThisFunction) \
- V(Throw) \
V(ToFastProperties) \
V(TransitionElementsKind) \
V(TrapAllocationMemento) \
@@ -188,11 +161,9 @@ class LChunkBuilder;
V(UnaryMathOperation) \
V(UnknownOSRValue) \
V(UseConst) \
- V(ValueOf) \
V(WrapReceiver)
#define GVN_TRACKED_FLAG_LIST(V) \
- V(Maps) \
V(NewSpacePromotion)
#define GVN_UNTRACKED_FLAG_LIST(V) \
@@ -208,9 +179,11 @@ class LChunkBuilder;
V(ElementsPointer) \
V(GlobalVars) \
V(InobjectFields) \
+ V(Maps) \
V(OsrEntries) \
V(ExternalMemory) \
- V(StringChars)
+ V(StringChars) \
+ V(TypedArrayElements)
#define DECLARE_ABSTRACT_INSTRUCTION(type) \
@@ -233,6 +206,9 @@ class LChunkBuilder;
}
+enum PropertyAccessType { LOAD, STORE };
+
+
class Range V8_FINAL : public ZoneObject {
public:
Range()
@@ -308,124 +284,6 @@ class Range V8_FINAL : public ZoneObject {
};
-class HType V8_FINAL {
- public:
- static HType None() { return HType(kNone); }
- static HType Tagged() { return HType(kTagged); }
- static HType TaggedPrimitive() { return HType(kTaggedPrimitive); }
- static HType TaggedNumber() { return HType(kTaggedNumber); }
- static HType Smi() { return HType(kSmi); }
- static HType HeapNumber() { return HType(kHeapNumber); }
- static HType String() { return HType(kString); }
- static HType Boolean() { return HType(kBoolean); }
- static HType NonPrimitive() { return HType(kNonPrimitive); }
- static HType JSArray() { return HType(kJSArray); }
- static HType JSObject() { return HType(kJSObject); }
-
- // Return the weakest (least precise) common type.
- HType Combine(HType other) {
- return HType(static_cast<Type>(type_ & other.type_));
- }
-
- bool Equals(const HType& other) const {
- return type_ == other.type_;
- }
-
- bool IsSubtypeOf(const HType& other) {
- return Combine(other).Equals(other);
- }
-
- bool IsTaggedPrimitive() const {
- return ((type_ & kTaggedPrimitive) == kTaggedPrimitive);
- }
-
- bool IsTaggedNumber() const {
- return ((type_ & kTaggedNumber) == kTaggedNumber);
- }
-
- bool IsSmi() const {
- return ((type_ & kSmi) == kSmi);
- }
-
- bool IsHeapNumber() const {
- return ((type_ & kHeapNumber) == kHeapNumber);
- }
-
- bool IsString() const {
- return ((type_ & kString) == kString);
- }
-
- bool IsNonString() const {
- return IsTaggedPrimitive() || IsSmi() || IsHeapNumber() ||
- IsBoolean() || IsJSArray();
- }
-
- bool IsBoolean() const {
- return ((type_ & kBoolean) == kBoolean);
- }
-
- bool IsNonPrimitive() const {
- return ((type_ & kNonPrimitive) == kNonPrimitive);
- }
-
- bool IsJSArray() const {
- return ((type_ & kJSArray) == kJSArray);
- }
-
- bool IsJSObject() const {
- return ((type_ & kJSObject) == kJSObject);
- }
-
- bool IsHeapObject() const {
- return IsHeapNumber() || IsString() || IsBoolean() || IsNonPrimitive();
- }
-
- bool ToStringOrToNumberCanBeObserved(Representation representation) {
- switch (type_) {
- case kTaggedPrimitive: // fallthru
- case kTaggedNumber: // fallthru
- case kSmi: // fallthru
- case kHeapNumber: // fallthru
- case kString: // fallthru
- case kBoolean:
- return false;
- case kJSArray: // fallthru
- case kJSObject:
- return true;
- case kTagged:
- break;
- }
- return !representation.IsSmiOrInteger32() && !representation.IsDouble();
- }
-
- static HType TypeFromValue(Handle<Object> value);
-
- const char* ToString();
-
- private:
- enum Type {
- kNone = 0x0, // 0000 0000 0000 0000
- kTagged = 0x1, // 0000 0000 0000 0001
- kTaggedPrimitive = 0x5, // 0000 0000 0000 0101
- kTaggedNumber = 0xd, // 0000 0000 0000 1101
- kSmi = 0x1d, // 0000 0000 0001 1101
- kHeapNumber = 0x2d, // 0000 0000 0010 1101
- kString = 0x45, // 0000 0000 0100 0101
- kBoolean = 0x85, // 0000 0000 1000 0101
- kNonPrimitive = 0x101, // 0000 0001 0000 0001
- kJSObject = 0x301, // 0000 0011 0000 0001
- kJSArray = 0x701 // 0000 0111 0000 0001
- };
-
- // Make sure type fits in int16.
- STATIC_ASSERT(kJSArray < (1 << (2 * kBitsPerByte)));
-
- explicit HType(Type t) : type_(t) { }
-
- int16_t type_;
-};
-
-
class HUseListNode: public ZoneObject {
public:
HUseListNode(HValue* value, int index, HUseListNode* tail)
@@ -482,23 +340,28 @@ class HUseIterator V8_FINAL BASE_EMBEDDED {
};
-// There must be one corresponding kDepends flag for every kChanges flag and
-// the order of the kChanges flags must be exactly the same as of the kDepends
-// flags. All tracked flags should appear before untracked ones.
+// All tracked flags should appear before untracked ones.
enum GVNFlag {
// Declare global value numbering flags.
-#define DECLARE_FLAG(type) kChanges##type, kDependsOn##type,
+#define DECLARE_FLAG(Type) k##Type,
GVN_TRACKED_FLAG_LIST(DECLARE_FLAG)
GVN_UNTRACKED_FLAG_LIST(DECLARE_FLAG)
#undef DECLARE_FLAG
- kAfterLastFlag,
- kLastFlag = kAfterLastFlag - 1,
-#define COUNT_FLAG(type) + 1
- kNumberOfTrackedSideEffects = 0 GVN_TRACKED_FLAG_LIST(COUNT_FLAG)
+#define COUNT_FLAG(Type) + 1
+ kNumberOfTrackedSideEffects = 0 GVN_TRACKED_FLAG_LIST(COUNT_FLAG),
+ kNumberOfUntrackedSideEffects = 0 GVN_UNTRACKED_FLAG_LIST(COUNT_FLAG),
#undef COUNT_FLAG
+ kNumberOfFlags = kNumberOfTrackedSideEffects + kNumberOfUntrackedSideEffects
};
+static inline GVNFlag GVNFlagFromInt(int i) {
+ ASSERT(i >= 0);
+ ASSERT(i < kNumberOfFlags);
+ return static_cast<GVNFlag>(i);
+}
+
+
class DecompositionResult V8_FINAL BASE_EMBEDDED {
public:
DecompositionResult() : base_(NULL), offset_(0), scale_(0) {}
@@ -544,7 +407,62 @@ class DecompositionResult V8_FINAL BASE_EMBEDDED {
};
-typedef EnumSet<GVNFlag, int64_t> GVNFlagSet;
+typedef EnumSet<GVNFlag, int32_t> GVNFlagSet;
+
+
+// This class encapsulates encoding and decoding of sources positions from
+// which hydrogen values originated.
+// When FLAG_track_hydrogen_positions is set this object encodes the
+// identifier of the inlining and absolute offset from the start of the
+// inlined function.
+// When the flag is not set we simply track absolute offset from the
+// script start.
+class HSourcePosition {
+ public:
+ HSourcePosition(const HSourcePosition& other) : value_(other.value_) { }
+
+ static HSourcePosition Unknown() {
+ return HSourcePosition(RelocInfo::kNoPosition);
+ }
+
+ bool IsUnknown() const { return value_ == RelocInfo::kNoPosition; }
+
+ int position() const { return PositionField::decode(value_); }
+ void set_position(int position) {
+ if (FLAG_hydrogen_track_positions) {
+ value_ = static_cast<int>(PositionField::update(value_, position));
+ } else {
+ value_ = position;
+ }
+ }
+
+ int inlining_id() const { return InliningIdField::decode(value_); }
+ void set_inlining_id(int inlining_id) {
+ if (FLAG_hydrogen_track_positions) {
+ value_ = static_cast<int>(InliningIdField::update(value_, inlining_id));
+ }
+ }
+
+ int raw() const { return value_; }
+
+ void PrintTo(FILE* f);
+
+ private:
+ typedef BitField<int, 0, 9> InliningIdField;
+
+ // Offset from the start of the inlined function.
+ typedef BitField<int, 9, 22> PositionField;
+
+ // On HPositionInfo can use this constructor.
+ explicit HSourcePosition(int value) : value_(value) { }
+
+ friend class HPositionInfo;
+
+ // If FLAG_hydrogen_track_positions is set contains bitfields InliningIdField
+ // and PositionField.
+ // Otherwise contains absolute offset from the script start.
+ int value_;
+};
class HValue : public ZoneObject {
@@ -566,6 +484,9 @@ class HValue : public ZoneObject {
kCanOverflow,
kBailoutOnMinusZero,
kCanBeDivByZero,
+ kLeftCanBeMinInt,
+ kLeftCanBeNegative,
+ kLeftCanBePositive,
kAllowUndefinedAsNaN,
kIsArguments,
kTruncatingToInt32,
@@ -576,12 +497,16 @@ class HValue : public ZoneObject {
kIsDead,
// Instructions that are allowed to produce full range unsigned integer
// values are marked with kUint32 flag. If arithmetic shift or a load from
- // EXTERNAL_UNSIGNED_INT_ELEMENTS array is not marked with this flag
+ // EXTERNAL_UINT32_ELEMENTS array is not marked with this flag
// it will deoptimize if result does not fit into signed integer range.
// HGraph::ComputeSafeUint32Operations is responsible for setting this
// flag.
kUint32,
kHasNoObservableSideEffects,
+ // Indicates an instruction shouldn't be replaced by optimization, this flag
+ // is useful to set in cases where recomputing a value is cheaper than
+ // extending the value's live range and spilling it.
+ kCantBeReplaced,
// Indicates the instruction is live during dead code elimination.
kIsLive,
@@ -595,18 +520,6 @@ class HValue : public ZoneObject {
STATIC_ASSERT(kLastFlag < kBitsPerInt);
- static const int kChangesToDependsFlagsLeftShift = 1;
-
- static GVNFlag ChangesFlagFromInt(int x) {
- return static_cast<GVNFlag>(x * 2);
- }
- static GVNFlag DependsOnFlagFromInt(int x) {
- return static_cast<GVNFlag>(x * 2 + 1);
- }
- static GVNFlagSet ConvertChangesToDependsFlags(GVNFlagSet flags) {
- return GVNFlagSet(flags.ToIntegral() << kChangesToDependsFlagsLeftShift);
- }
-
static HValue* cast(HValue* value) { return value; }
enum Opcode {
@@ -631,21 +544,31 @@ class HValue : public ZoneObject {
HYDROGEN_ABSTRACT_INSTRUCTION_LIST(DECLARE_PREDICATE)
#undef DECLARE_PREDICATE
+ bool IsBitwiseBinaryShift() {
+ return IsShl() || IsShr() || IsSar();
+ }
+
HValue(HType type = HType::Tagged())
: block_(NULL),
id_(kNoNumber),
type_(type),
use_list_(NULL),
range_(NULL),
+#ifdef DEBUG
+ range_poisoned_(false),
+#endif
flags_(0) {}
virtual ~HValue() {}
- virtual int position() const { return RelocInfo::kNoPosition; }
- virtual int operand_position(int index) const { return position(); }
+ virtual HSourcePosition position() const {
+ return HSourcePosition::Unknown();
+ }
+ virtual HSourcePosition operand_position(int index) const {
+ return position();
+ }
HBasicBlock* block() const { return block_; }
void SetBlock(HBasicBlock* block);
- int LoopWeight() const;
// Note: Never call this method for an unlinked value.
Isolate* isolate() const;
@@ -688,25 +611,6 @@ class HValue : public ZoneObject {
type_ = new_type;
}
- bool IsHeapObject() {
- return representation_.IsHeapObject() || type_.IsHeapObject();
- }
-
- // An operation needs to override this function iff:
- // 1) it can produce an int32 output.
- // 2) the true value of its output can potentially be minus zero.
- // The implementation must set a flag so that it bails out in the case where
- // it would otherwise output what should be a minus zero as an int32 zero.
- // If the operation also exists in a form that takes int32 and outputs int32
- // then the operation should return its input value so that we can propagate
- // back. There are three operations that need to propagate back to more than
- // one input. They are phi and binary div and mul. They always return NULL
- // and expect the caller to take care of things.
- virtual HValue* EnsureAndPropagateNotMinusZero(BitVector* visited) {
- visited->Add(id());
- return NULL;
- }
-
// There are HInstructions that do not really change a value, they
// only add pieces of information to it (like bounds checks, map checks,
// smi checks...).
@@ -783,55 +687,53 @@ class HValue : public ZoneObject {
// of uses is non-empty.
bool HasAtLeastOneUseWithFlagAndNoneWithout(Flag f) const;
- GVNFlagSet gvn_flags() const { return gvn_flags_; }
- void SetGVNFlag(GVNFlag f) { gvn_flags_.Add(f); }
- void ClearGVNFlag(GVNFlag f) { gvn_flags_.Remove(f); }
- bool CheckGVNFlag(GVNFlag f) const { return gvn_flags_.Contains(f); }
- void SetAllSideEffects() { gvn_flags_.Add(AllSideEffectsFlagSet()); }
+ GVNFlagSet ChangesFlags() const { return changes_flags_; }
+ GVNFlagSet DependsOnFlags() const { return depends_on_flags_; }
+ void SetChangesFlag(GVNFlag f) { changes_flags_.Add(f); }
+ void SetDependsOnFlag(GVNFlag f) { depends_on_flags_.Add(f); }
+ void ClearChangesFlag(GVNFlag f) { changes_flags_.Remove(f); }
+ void ClearDependsOnFlag(GVNFlag f) { depends_on_flags_.Remove(f); }
+ bool CheckChangesFlag(GVNFlag f) const {
+ return changes_flags_.Contains(f);
+ }
+ bool CheckDependsOnFlag(GVNFlag f) const {
+ return depends_on_flags_.Contains(f);
+ }
+ void SetAllSideEffects() { changes_flags_.Add(AllSideEffectsFlagSet()); }
void ClearAllSideEffects() {
- gvn_flags_.Remove(AllSideEffectsFlagSet());
+ changes_flags_.Remove(AllSideEffectsFlagSet());
}
bool HasSideEffects() const {
- return gvn_flags_.ContainsAnyOf(AllSideEffectsFlagSet());
+ return changes_flags_.ContainsAnyOf(AllSideEffectsFlagSet());
}
bool HasObservableSideEffects() const {
return !CheckFlag(kHasNoObservableSideEffects) &&
- gvn_flags_.ContainsAnyOf(AllObservableSideEffectsFlagSet());
- }
-
- GVNFlagSet DependsOnFlags() const {
- GVNFlagSet result = gvn_flags_;
- result.Intersect(AllDependsOnFlagSet());
- return result;
+ changes_flags_.ContainsAnyOf(AllObservableSideEffectsFlagSet());
}
GVNFlagSet SideEffectFlags() const {
- GVNFlagSet result = gvn_flags_;
+ GVNFlagSet result = ChangesFlags();
result.Intersect(AllSideEffectsFlagSet());
return result;
}
- GVNFlagSet ChangesFlags() const {
- GVNFlagSet result = gvn_flags_;
- result.Intersect(AllChangesFlagSet());
- return result;
- }
-
GVNFlagSet ObservableChangesFlags() const {
- GVNFlagSet result = gvn_flags_;
- result.Intersect(AllChangesFlagSet());
+ GVNFlagSet result = ChangesFlags();
result.Intersect(AllObservableSideEffectsFlagSet());
return result;
}
- Range* range() const { return range_; }
- // TODO(svenpanne) We should really use the null object pattern here.
- bool HasRange() const { return range_ != NULL; }
- bool CanBeNegative() const { return !HasRange() || range()->CanBeNegative(); }
- bool CanBeZero() const { return !HasRange() || range()->CanBeZero(); }
- bool RangeCanInclude(int value) const {
- return !HasRange() || range()->Includes(value);
+ Range* range() const {
+ ASSERT(!range_poisoned_);
+ return range_;
+ }
+ bool HasRange() const {
+ ASSERT(!range_poisoned_);
+ return range_ != NULL;
}
+#ifdef DEBUG
+ void PoisonRange() { range_poisoned_ = true; }
+#endif
void AddNewRange(Range* r, Zone* zone);
void RemoveLastAddedRange();
void ComputeInitialRange(Zone* zone);
@@ -863,7 +765,6 @@ class HValue : public ZoneObject {
virtual void PrintTo(StringStream* stream) = 0;
void PrintNameTo(StringStream* stream);
void PrintTypeTo(StringStream* stream);
- void PrintRangeTo(StringStream* stream);
void PrintChangesTo(StringStream* stream);
const char* Mnemonic() const;
@@ -885,9 +786,11 @@ class HValue : public ZoneObject {
// This function must be overridden for instructions which have the
// kTrackSideEffectDominators flag set, to track instructions that are
// dominating side effects.
- virtual void HandleSideEffectDominator(GVNFlag side_effect,
+ // It returns true if it removed an instruction which had side effects.
+ virtual bool HandleSideEffectDominator(GVNFlag side_effect,
HValue* dominator) {
UNREACHABLE();
+ return false;
}
// Check if this instruction has some reason that prevents elimination.
@@ -910,13 +813,13 @@ class HValue : public ZoneObject {
// Returns true conservatively if the program might be able to observe a
// ToString() operation on this value.
bool ToStringCanBeObserved() const {
- return type().ToStringOrToNumberCanBeObserved(representation());
+ return ToStringOrToNumberCanBeObserved();
}
// Returns true conservatively if the program might be able to observe a
// ToNumber() operation on this value.
bool ToNumberCanBeObserved() const {
- return type().ToStringOrToNumberCanBeObserved(representation());
+ return ToStringOrToNumberCanBeObserved();
}
MinusZeroMode GetMinusZeroMode() {
@@ -932,10 +835,16 @@ class HValue : public ZoneObject {
return false;
}
+ bool ToStringOrToNumberCanBeObserved() const {
+ if (type().IsTaggedPrimitive()) return false;
+ if (type().IsJSObject()) return true;
+ return !representation().IsSmiOrInteger32() && !representation().IsDouble();
+ }
+
virtual Representation RepresentationFromInputs() {
return representation();
}
- Representation RepresentationFromUses();
+ virtual Representation RepresentationFromUses();
Representation RepresentationFromUseRequirements();
bool HasNonSmiUse();
virtual void UpdateRepresentation(Representation new_rep,
@@ -958,20 +867,9 @@ class HValue : public ZoneObject {
representation_ = r;
}
- static GVNFlagSet AllDependsOnFlagSet() {
+ static GVNFlagSet AllFlagSet() {
GVNFlagSet result;
- // Create changes mask.
-#define ADD_FLAG(type) result.Add(kDependsOn##type);
- GVN_TRACKED_FLAG_LIST(ADD_FLAG)
- GVN_UNTRACKED_FLAG_LIST(ADD_FLAG)
-#undef ADD_FLAG
- return result;
- }
-
- static GVNFlagSet AllChangesFlagSet() {
- GVNFlagSet result;
- // Create changes mask.
-#define ADD_FLAG(type) result.Add(kChanges##type);
+#define ADD_FLAG(Type) result.Add(k##Type);
GVN_TRACKED_FLAG_LIST(ADD_FLAG)
GVN_UNTRACKED_FLAG_LIST(ADD_FLAG)
#undef ADD_FLAG
@@ -980,19 +878,19 @@ class HValue : public ZoneObject {
// A flag mask to mark an instruction as having arbitrary side effects.
static GVNFlagSet AllSideEffectsFlagSet() {
- GVNFlagSet result = AllChangesFlagSet();
- result.Remove(kChangesOsrEntries);
+ GVNFlagSet result = AllFlagSet();
+ result.Remove(kOsrEntries);
return result;
}
// A flag mask of all side effects that can make observable changes in
// an executing program (i.e. are not safe to repeat, move or remove);
static GVNFlagSet AllObservableSideEffectsFlagSet() {
- GVNFlagSet result = AllChangesFlagSet();
- result.Remove(kChangesNewSpacePromotion);
- result.Remove(kChangesElementsKind);
- result.Remove(kChangesElementsPointer);
- result.Remove(kChangesMaps);
+ GVNFlagSet result = AllFlagSet();
+ result.Remove(kNewSpacePromotion);
+ result.Remove(kElementsKind);
+ result.Remove(kElementsPointer);
+ result.Remove(kMaps);
return result;
}
@@ -1012,8 +910,12 @@ class HValue : public ZoneObject {
HType type_;
HUseListNode* use_list_;
Range* range_;
+#ifdef DEBUG
+ bool range_poisoned_;
+#endif
int flags_;
- GVNFlagSet gvn_flags_;
+ GVNFlagSet changes_flags_;
+ GVNFlagSet depends_on_flags_;
private:
virtual bool IsDeletable() const { return false; }
@@ -1063,6 +965,18 @@ class HValue : public ZoneObject {
return new(zone) I(p1, p2, p3, p4, p5); \
}
+#define DECLARE_INSTRUCTION_FACTORY_P6(I, P1, P2, P3, P4, P5, P6) \
+ static I* New(Zone* zone, \
+ HValue* context, \
+ P1 p1, \
+ P2 p2, \
+ P3 p3, \
+ P4 p4, \
+ P5 p5, \
+ P6 p6) { \
+ return new(zone) I(p1, p2, p3, p4, p5, p6); \
+ }
+
#define DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P0(I) \
static I* New(Zone* zone, HValue* context) { \
return new(zone) I(context); \
@@ -1112,25 +1026,22 @@ class HValue : public ZoneObject {
// In the first case it contains intruction's position as a tagged value.
// In the second case it points to an array which contains instruction's
// position and operands' positions.
-// TODO(vegorov): what we really want to track here is a combination of
-// source position and a script id because cross script inlining can easily
-// result in optimized functions composed of several scripts.
class HPositionInfo {
public:
explicit HPositionInfo(int pos) : data_(TagPosition(pos)) { }
- int position() const {
+ HSourcePosition position() const {
if (has_operand_positions()) {
- return static_cast<int>(operand_positions()[kInstructionPosIndex]);
+ return operand_positions()[kInstructionPosIndex];
}
- return static_cast<int>(UntagPosition(data_));
+ return HSourcePosition(static_cast<int>(UntagPosition(data_)));
}
- void set_position(int pos) {
+ void set_position(HSourcePosition pos) {
if (has_operand_positions()) {
operand_positions()[kInstructionPosIndex] = pos;
} else {
- data_ = TagPosition(pos);
+ data_ = TagPosition(pos.raw());
}
}
@@ -1140,27 +1051,27 @@ class HPositionInfo {
}
const int length = kFirstOperandPosIndex + operand_count;
- intptr_t* positions =
- zone->NewArray<intptr_t>(length);
+ HSourcePosition* positions =
+ zone->NewArray<HSourcePosition>(length);
for (int i = 0; i < length; i++) {
- positions[i] = RelocInfo::kNoPosition;
+ positions[i] = HSourcePosition::Unknown();
}
- const int pos = position();
+ const HSourcePosition pos = position();
data_ = reinterpret_cast<intptr_t>(positions);
set_position(pos);
ASSERT(has_operand_positions());
}
- int operand_position(int idx) const {
+ HSourcePosition operand_position(int idx) const {
if (!has_operand_positions()) {
return position();
}
- return static_cast<int>(*operand_position_slot(idx));
+ return *operand_position_slot(idx);
}
- void set_operand_position(int idx, int pos) {
+ void set_operand_position(int idx, HSourcePosition pos) {
*operand_position_slot(idx) = pos;
}
@@ -1168,7 +1079,7 @@ class HPositionInfo {
static const intptr_t kInstructionPosIndex = 0;
static const intptr_t kFirstOperandPosIndex = 1;
- intptr_t* operand_position_slot(int idx) const {
+ HSourcePosition* operand_position_slot(int idx) const {
ASSERT(has_operand_positions());
return &(operand_positions()[kFirstOperandPosIndex + idx]);
}
@@ -1177,9 +1088,9 @@ class HPositionInfo {
return !IsTaggedPosition(data_);
}
- intptr_t* operand_positions() const {
+ HSourcePosition* operand_positions() const {
ASSERT(has_operand_positions());
- return reinterpret_cast<intptr_t*>(data_);
+ return reinterpret_cast<HSourcePosition*>(data_);
}
static const intptr_t kPositionTag = 1;
@@ -1211,32 +1122,46 @@ class HInstruction : public HValue {
bool IsLinked() const { return block() != NULL; }
void Unlink();
+
void InsertBefore(HInstruction* next);
+
+ template<class T> T* Prepend(T* instr) {
+ instr->InsertBefore(this);
+ return instr;
+ }
+
void InsertAfter(HInstruction* previous);
+ template<class T> T* Append(T* instr) {
+ instr->InsertAfter(this);
+ return instr;
+ }
+
// The position is a write-once variable.
- virtual int position() const V8_OVERRIDE {
- return position_.position();
+ virtual HSourcePosition position() const V8_OVERRIDE {
+ return HSourcePosition(position_.position());
}
bool has_position() const {
- return position_.position() != RelocInfo::kNoPosition;
+ return !position().IsUnknown();
}
- void set_position(int position) {
+ void set_position(HSourcePosition position) {
ASSERT(!has_position());
- ASSERT(position != RelocInfo::kNoPosition);
+ ASSERT(!position.IsUnknown());
position_.set_position(position);
}
- virtual int operand_position(int index) const V8_OVERRIDE {
- const int pos = position_.operand_position(index);
- return (pos != RelocInfo::kNoPosition) ? pos : position();
+ virtual HSourcePosition operand_position(int index) const V8_OVERRIDE {
+ const HSourcePosition pos = position_.operand_position(index);
+ return pos.IsUnknown() ? position() : pos;
}
- void set_operand_position(Zone* zone, int index, int pos) {
+ void set_operand_position(Zone* zone, int index, HSourcePosition pos) {
ASSERT(0 <= index && index < OperandCount());
position_.ensure_storage_for_operand_positions(zone, OperandCount());
position_.set_operand_position(index, pos);
}
+ bool Dominates(HInstruction* other);
+ bool CanTruncateToSmi() const { return CheckFlag(kTruncatingToSmi); }
bool CanTruncateToInt32() const { return CheckFlag(kTruncatingToInt32); }
virtual LInstruction* CompileToLithium(LChunkBuilder* builder) = 0;
@@ -1245,7 +1170,9 @@ class HInstruction : public HValue {
virtual void Verify() V8_OVERRIDE;
#endif
- virtual bool IsCall() { return false; }
+ bool CanDeoptimize();
+
+ virtual bool HasStackCheck() { return false; }
DECLARE_ABSTRACT_INSTRUCTION(Instruction)
@@ -1255,7 +1182,7 @@ class HInstruction : public HValue {
next_(NULL),
previous_(NULL),
position_(RelocInfo::kNoPosition) {
- SetGVNFlag(kDependsOnOsrEntries);
+ SetDependsOnFlag(kOsrEntries);
}
virtual void DeleteFromGraph() V8_OVERRIDE { Unlink(); }
@@ -1501,6 +1428,8 @@ class HBranch V8_FINAL : public HUnaryControlInstruction {
virtual bool KnownSuccessorBlock(HBasicBlock** block) V8_OVERRIDE;
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+
ToBooleanStub::Types expected_input_types() const {
return expected_input_types_;
}
@@ -1527,9 +1456,25 @@ class HCompareMap V8_FINAL : public HUnaryControlInstruction {
DECLARE_INSTRUCTION_FACTORY_P4(HCompareMap, HValue*, Handle<Map>,
HBasicBlock*, HBasicBlock*);
+ virtual bool KnownSuccessorBlock(HBasicBlock** block) V8_OVERRIDE {
+ if (known_successor_index() != kNoKnownSuccessorIndex) {
+ *block = SuccessorAt(known_successor_index());
+ return true;
+ }
+ *block = NULL;
+ return false;
+ }
+
virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+ static const int kNoKnownSuccessorIndex = -1;
+ int known_successor_index() const { return known_successor_index_; }
+ void set_known_successor_index(int known_successor_index) {
+ known_successor_index_ = known_successor_index;
+ }
+
Unique<Map> map() const { return map_; }
+ bool map_is_stable() const { return map_is_stable_; }
virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
return Representation::Tagged();
@@ -1546,11 +1491,14 @@ class HCompareMap V8_FINAL : public HUnaryControlInstruction {
HBasicBlock* true_target = NULL,
HBasicBlock* false_target = NULL)
: HUnaryControlInstruction(value, true_target, false_target),
- map_(Unique<Map>(map)) {
- ASSERT(!map.is_null());
+ known_successor_index_(kNoKnownSuccessorIndex),
+ map_is_stable_(map->is_stable()),
+ map_(Unique<Map>::CreateImmovable(map)) {
set_representation(Representation::Tagged());
}
+ int known_successor_index_ : 31;
+ bool map_is_stable_ : 1;
Unique<Map> map_;
};
@@ -1638,28 +1586,6 @@ class HUnaryOperation : public HTemplateInstruction<1> {
};
-class HThrow V8_FINAL : public HTemplateInstruction<2> {
- public:
- DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P1(HThrow, HValue*);
-
- virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
- return Representation::Tagged();
- }
-
- HValue* context() { return OperandAt(0); }
- HValue* value() { return OperandAt(1); }
-
- DECLARE_CONCRETE_INSTRUCTION(Throw)
-
- private:
- HThrow(HValue* context, HValue* value) {
- SetOperandAt(0, context);
- SetOperandAt(1, value);
- SetAllSideEffects();
- }
-};
-
-
class HUseConst V8_FINAL : public HUnaryOperation {
public:
DECLARE_INSTRUCTION_FACTORY_P1(HUseConst, HValue*);
@@ -1682,9 +1608,6 @@ class HForceRepresentation V8_FINAL : public HTemplateInstruction<1> {
HValue* value() { return OperandAt(0); }
- virtual HValue* EnsureAndPropagateNotMinusZero(
- BitVector* visited) V8_OVERRIDE;
-
virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
return representation(); // Same as the output representation.
}
@@ -1713,7 +1636,8 @@ class HChange V8_FINAL : public HUnaryOperation {
ASSERT(!value->representation().Equals(to));
set_representation(to);
SetFlag(kUseGVN);
- if (is_truncating_to_smi) {
+ SetFlag(kCanOverflow);
+ if (is_truncating_to_smi && to.IsSmi()) {
SetFlag(kTruncatingToSmi);
SetFlag(kTruncatingToInt32);
}
@@ -1722,7 +1646,7 @@ class HChange V8_FINAL : public HUnaryOperation {
set_type(HType::Smi());
} else {
set_type(HType::TaggedNumber());
- if (to.IsTagged()) SetGVNFlag(kChangesNewSpacePromotion);
+ if (to.IsTagged()) SetChangesFlag(kNewSpacePromotion);
}
}
@@ -1730,8 +1654,6 @@ class HChange V8_FINAL : public HUnaryOperation {
return CheckUsesForFlag(kAllowUndefinedAsNaN);
}
- virtual HValue* EnsureAndPropagateNotMinusZero(
- BitVector* visited) V8_OVERRIDE;
virtual HType CalculateInferredType() V8_OVERRIDE;
virtual HValue* Canonicalize() V8_OVERRIDE;
@@ -1785,6 +1707,65 @@ class HClampToUint8 V8_FINAL : public HUnaryOperation {
};
+class HDoubleBits V8_FINAL : public HUnaryOperation {
+ public:
+ enum Bits { HIGH, LOW };
+ DECLARE_INSTRUCTION_FACTORY_P2(HDoubleBits, HValue*, Bits);
+
+ virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
+ return Representation::Double();
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(DoubleBits)
+
+ Bits bits() { return bits_; }
+
+ protected:
+ virtual bool DataEquals(HValue* other) V8_OVERRIDE {
+ return other->IsDoubleBits() && HDoubleBits::cast(other)->bits() == bits();
+ }
+
+ private:
+ HDoubleBits(HValue* value, Bits bits)
+ : HUnaryOperation(value), bits_(bits) {
+ set_representation(Representation::Integer32());
+ SetFlag(kUseGVN);
+ }
+
+ virtual bool IsDeletable() const V8_OVERRIDE { return true; }
+
+ Bits bits_;
+};
+
+
+class HConstructDouble V8_FINAL : public HTemplateInstruction<2> {
+ public:
+ DECLARE_INSTRUCTION_FACTORY_P2(HConstructDouble, HValue*, HValue*);
+
+ virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
+ return Representation::Integer32();
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(ConstructDouble)
+
+ HValue* hi() { return OperandAt(0); }
+ HValue* lo() { return OperandAt(1); }
+
+ protected:
+ virtual bool DataEquals(HValue* other) V8_OVERRIDE { return true; }
+
+ private:
+ explicit HConstructDouble(HValue* hi, HValue* lo) {
+ set_representation(Representation::Double());
+ SetFlag(kUseGVN);
+ SetOperandAt(0, hi);
+ SetOperandAt(1, lo);
+ }
+
+ virtual bool IsDeletable() const V8_OVERRIDE { return true; }
+};
+
+
enum RemovableSimulate {
REMOVABLE_SIMULATE,
FIXED_SIMULATE
@@ -1970,7 +1951,7 @@ class HStackCheck V8_FINAL : public HTemplateInstruction<1> {
private:
HStackCheck(HValue* context, Type type) : type_(type) {
SetOperandAt(0, context);
- SetGVNFlag(kChangesNewSpacePromotion);
+ SetChangesFlag(kNewSpacePromotion);
}
Type type_;
@@ -1978,8 +1959,7 @@ class HStackCheck V8_FINAL : public HTemplateInstruction<1> {
enum InliningKind {
- NORMAL_RETURN, // Normal function/method call and return.
- DROP_EXTRA_ON_RETURN, // Drop an extra value from the environment on return.
+ NORMAL_RETURN, // Drop the function from the environment on return.
CONSTRUCT_CALL_RETURN, // Either use allocated receiver or return value.
GETTER_CALL_RETURN, // Returning from a getter, need to restore context.
SETTER_CALL_RETURN // Use the RHS of the assignment as the return value.
@@ -1993,16 +1973,16 @@ class HEnterInlined V8_FINAL : public HTemplateInstruction<0> {
public:
static HEnterInlined* New(Zone* zone,
HValue* context,
+ BailoutId return_id,
Handle<JSFunction> closure,
int arguments_count,
FunctionLiteral* function,
InliningKind inlining_kind,
Variable* arguments_var,
- HArgumentsObject* arguments_object,
- bool undefined_receiver) {
- return new(zone) HEnterInlined(closure, arguments_count, function,
- inlining_kind, arguments_var,
- arguments_object, undefined_receiver, zone);
+ HArgumentsObject* arguments_object) {
+ return new(zone) HEnterInlined(return_id, closure, arguments_count,
+ function, inlining_kind, arguments_var,
+ arguments_object, zone);
}
void RegisterReturnTarget(HBasicBlock* return_target, Zone* zone);
@@ -2016,7 +1996,7 @@ class HEnterInlined V8_FINAL : public HTemplateInstruction<0> {
void set_arguments_pushed() { arguments_pushed_ = true; }
FunctionLiteral* function() const { return function_; }
InliningKind inlining_kind() const { return inlining_kind_; }
- bool undefined_receiver() const { return undefined_receiver_; }
+ BailoutId ReturnId() const { return return_id_; }
virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
return Representation::None();
@@ -2028,25 +2008,26 @@ class HEnterInlined V8_FINAL : public HTemplateInstruction<0> {
DECLARE_CONCRETE_INSTRUCTION(EnterInlined)
private:
- HEnterInlined(Handle<JSFunction> closure,
+ HEnterInlined(BailoutId return_id,
+ Handle<JSFunction> closure,
int arguments_count,
FunctionLiteral* function,
InliningKind inlining_kind,
Variable* arguments_var,
HArgumentsObject* arguments_object,
- bool undefined_receiver,
Zone* zone)
- : closure_(closure),
+ : return_id_(return_id),
+ closure_(closure),
arguments_count_(arguments_count),
arguments_pushed_(false),
function_(function),
inlining_kind_(inlining_kind),
arguments_var_(arguments_var),
arguments_object_(arguments_object),
- undefined_receiver_(undefined_receiver),
return_targets_(2, zone) {
}
+ BailoutId return_id_;
Handle<JSFunction> closure_;
int arguments_count_;
bool arguments_pushed_;
@@ -2054,7 +2035,6 @@ class HEnterInlined V8_FINAL : public HTemplateInstruction<0> {
InliningKind inlining_kind_;
Variable* arguments_var_;
HArgumentsObject* arguments_object_;
- bool undefined_receiver_;
ZoneList<HBasicBlock*> return_targets_;
};
@@ -2082,64 +2062,87 @@ class HLeaveInlined V8_FINAL : public HTemplateInstruction<0> {
};
-class HPushArgument V8_FINAL : public HUnaryOperation {
+class HPushArguments V8_FINAL : public HInstruction {
public:
- DECLARE_INSTRUCTION_FACTORY_P1(HPushArgument, HValue*);
+ static HPushArguments* New(Zone* zone, HValue* context) {
+ return new(zone) HPushArguments(zone);
+ }
+ static HPushArguments* New(Zone* zone, HValue* context, HValue* arg1) {
+ HPushArguments* instr = new(zone) HPushArguments(zone);
+ instr->AddInput(arg1);
+ return instr;
+ }
+ static HPushArguments* New(Zone* zone, HValue* context, HValue* arg1,
+ HValue* arg2) {
+ HPushArguments* instr = new(zone) HPushArguments(zone);
+ instr->AddInput(arg1);
+ instr->AddInput(arg2);
+ return instr;
+ }
+ static HPushArguments* New(Zone* zone, HValue* context, HValue* arg1,
+ HValue* arg2, HValue* arg3) {
+ HPushArguments* instr = new(zone) HPushArguments(zone);
+ instr->AddInput(arg1);
+ instr->AddInput(arg2);
+ instr->AddInput(arg3);
+ return instr;
+ }
+ static HPushArguments* New(Zone* zone, HValue* context, HValue* arg1,
+ HValue* arg2, HValue* arg3, HValue* arg4) {
+ HPushArguments* instr = new(zone) HPushArguments(zone);
+ instr->AddInput(arg1);
+ instr->AddInput(arg2);
+ instr->AddInput(arg3);
+ instr->AddInput(arg4);
+ return instr;
+ }
virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
return Representation::Tagged();
}
- virtual int argument_delta() const V8_OVERRIDE { return 1; }
- HValue* argument() { return OperandAt(0); }
+ virtual int argument_delta() const V8_OVERRIDE { return inputs_.length(); }
+ HValue* argument(int i) { return OperandAt(i); }
- DECLARE_CONCRETE_INSTRUCTION(PushArgument)
-
- private:
- explicit HPushArgument(HValue* value) : HUnaryOperation(value) {
- set_representation(Representation::Tagged());
+ virtual int OperandCount() V8_FINAL V8_OVERRIDE { return inputs_.length(); }
+ virtual HValue* OperandAt(int i) const V8_FINAL V8_OVERRIDE {
+ return inputs_[i];
}
-};
-
-class HThisFunction V8_FINAL : public HTemplateInstruction<0> {
- public:
- DECLARE_INSTRUCTION_FACTORY_P0(HThisFunction);
-
- virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
- return Representation::None();
- }
+ void AddInput(HValue* value);
- DECLARE_CONCRETE_INSTRUCTION(ThisFunction)
+ DECLARE_CONCRETE_INSTRUCTION(PushArguments)
protected:
- virtual bool DataEquals(HValue* other) V8_OVERRIDE { return true; }
+ virtual void InternalSetOperandAt(int i, HValue* value) V8_FINAL V8_OVERRIDE {
+ inputs_[i] = value;
+ }
private:
- HThisFunction() {
+ explicit HPushArguments(Zone* zone)
+ : HInstruction(HType::Tagged()), inputs_(4, zone) {
set_representation(Representation::Tagged());
- SetFlag(kUseGVN);
}
- virtual bool IsDeletable() const V8_OVERRIDE { return true; }
+ ZoneList<HValue*> inputs_;
};
-class HOuterContext V8_FINAL : public HUnaryOperation {
+class HThisFunction V8_FINAL : public HTemplateInstruction<0> {
public:
- DECLARE_INSTRUCTION_FACTORY_P1(HOuterContext, HValue*);
-
- DECLARE_CONCRETE_INSTRUCTION(OuterContext);
+ DECLARE_INSTRUCTION_FACTORY_P0(HThisFunction);
virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
- return Representation::Tagged();
+ return Representation::None();
}
+ DECLARE_CONCRETE_INSTRUCTION(ThisFunction)
+
protected:
virtual bool DataEquals(HValue* other) V8_OVERRIDE { return true; }
private:
- explicit HOuterContext(HValue* inner) : HUnaryOperation(inner) {
+ HThisFunction() {
set_representation(Representation::Tagged());
SetFlag(kUseGVN);
}
@@ -2180,53 +2183,6 @@ class HDeclareGlobals V8_FINAL : public HUnaryOperation {
};
-class HGlobalObject V8_FINAL : public HUnaryOperation {
- public:
- DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P0(HGlobalObject);
-
- DECLARE_CONCRETE_INSTRUCTION(GlobalObject)
-
- virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
- return Representation::Tagged();
- }
-
- protected:
- virtual bool DataEquals(HValue* other) V8_OVERRIDE { return true; }
-
- private:
- explicit HGlobalObject(HValue* context) : HUnaryOperation(context) {
- set_representation(Representation::Tagged());
- SetFlag(kUseGVN);
- }
-
- virtual bool IsDeletable() const V8_OVERRIDE { return true; }
-};
-
-
-class HGlobalReceiver V8_FINAL : public HUnaryOperation {
- public:
- DECLARE_INSTRUCTION_FACTORY_P1(HGlobalReceiver, HValue*);
-
- DECLARE_CONCRETE_INSTRUCTION(GlobalReceiver)
-
- virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
- return Representation::Tagged();
- }
-
- protected:
- virtual bool DataEquals(HValue* other) V8_OVERRIDE { return true; }
-
- private:
- explicit HGlobalReceiver(HValue* global_object)
- : HUnaryOperation(global_object) {
- set_representation(Representation::Tagged());
- SetFlag(kUseGVN);
- }
-
- virtual bool IsDeletable() const V8_OVERRIDE { return true; }
-};
-
-
template <int V>
class HCall : public HTemplateInstruction<V> {
public:
@@ -2248,8 +2204,6 @@ class HCall : public HTemplateInstruction<V> {
return -argument_count();
}
- virtual bool IsCall() V8_FINAL V8_OVERRIDE { return true; }
-
private:
int argument_count_;
};
@@ -2293,118 +2247,183 @@ class HBinaryCall : public HCall<2> {
};
-class HInvokeFunction V8_FINAL : public HBinaryCall {
+class HCallJSFunction V8_FINAL : public HCall<1> {
public:
- DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P2(HInvokeFunction, HValue*, int);
-
- HInvokeFunction(HValue* context,
- HValue* function,
- Handle<JSFunction> known_function,
- int argument_count)
- : HBinaryCall(context, function, argument_count),
- known_function_(known_function) {
- formal_parameter_count_ = known_function.is_null()
- ? 0 : known_function->shared()->formal_parameter_count();
- }
-
- static HInvokeFunction* New(Zone* zone,
+ static HCallJSFunction* New(Zone* zone,
HValue* context,
HValue* function,
- Handle<JSFunction> known_function,
- int argument_count) {
- return new(zone) HInvokeFunction(context, function,
- known_function, argument_count);
+ int argument_count,
+ bool pass_argument_count);
+
+ HValue* function() { return OperandAt(0); }
+
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+
+ virtual Representation RequiredInputRepresentation(
+ int index) V8_FINAL V8_OVERRIDE {
+ ASSERT(index == 0);
+ return Representation::Tagged();
}
- HValue* context() { return first(); }
- HValue* function() { return second(); }
- Handle<JSFunction> known_function() { return known_function_; }
- int formal_parameter_count() const { return formal_parameter_count_; }
+ bool pass_argument_count() const { return pass_argument_count_; }
- DECLARE_CONCRETE_INSTRUCTION(InvokeFunction)
+ virtual bool HasStackCheck() V8_FINAL V8_OVERRIDE {
+ return has_stack_check_;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(CallJSFunction)
private:
- HInvokeFunction(HValue* context, HValue* function, int argument_count)
- : HBinaryCall(context, function, argument_count) {
+ // The argument count includes the receiver.
+ HCallJSFunction(HValue* function,
+ int argument_count,
+ bool pass_argument_count,
+ bool has_stack_check)
+ : HCall<1>(argument_count),
+ pass_argument_count_(pass_argument_count),
+ has_stack_check_(has_stack_check) {
+ SetOperandAt(0, function);
}
- Handle<JSFunction> known_function_;
- int formal_parameter_count_;
+ bool pass_argument_count_;
+ bool has_stack_check_;
};
-class HCallConstantFunction V8_FINAL : public HCall<0> {
+class HCallWithDescriptor V8_FINAL : public HInstruction {
public:
- DECLARE_INSTRUCTION_FACTORY_P2(HCallConstantFunction,
- Handle<JSFunction>,
- int);
+ static HCallWithDescriptor* New(Zone* zone, HValue* context,
+ HValue* target,
+ int argument_count,
+ const CallInterfaceDescriptor* descriptor,
+ const Vector<HValue*>& operands) {
+ ASSERT(operands.length() == descriptor->environment_length());
+ HCallWithDescriptor* res =
+ new(zone) HCallWithDescriptor(target, argument_count,
+ descriptor, operands, zone);
+ return res;
+ }
- Handle<JSFunction> function() const { return function_; }
- int formal_parameter_count() const { return formal_parameter_count_; }
+ virtual int OperandCount() V8_FINAL V8_OVERRIDE { return values_.length(); }
+ virtual HValue* OperandAt(int index) const V8_FINAL V8_OVERRIDE {
+ return values_[index];
+ }
- bool IsApplyFunction() const {
- return function_->code() ==
- function_->GetIsolate()->builtins()->builtin(Builtins::kFunctionApply);
+ virtual Representation RequiredInputRepresentation(
+ int index) V8_FINAL V8_OVERRIDE {
+ if (index == 0) {
+ return Representation::Tagged();
+ } else {
+ int par_index = index - 1;
+ ASSERT(par_index < descriptor_->environment_length());
+ return descriptor_->GetParameterRepresentation(par_index);
+ }
}
- virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+ DECLARE_CONCRETE_INSTRUCTION(CallWithDescriptor)
- virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
- return Representation::None();
+ virtual HType CalculateInferredType() V8_FINAL V8_OVERRIDE {
+ return HType::Tagged();
}
- DECLARE_CONCRETE_INSTRUCTION(CallConstantFunction)
+ virtual int argument_count() const {
+ return argument_count_;
+ }
- private:
- HCallConstantFunction(Handle<JSFunction> function, int argument_count)
- : HCall<0>(argument_count),
- function_(function),
- formal_parameter_count_(function->shared()->formal_parameter_count()) {}
+ virtual int argument_delta() const V8_OVERRIDE {
+ return -argument_count_;
+ }
- Handle<JSFunction> function_;
- int formal_parameter_count_;
-};
+ const CallInterfaceDescriptor* descriptor() const {
+ return descriptor_;
+ }
+ HValue* target() {
+ return OperandAt(0);
+ }
-class HCallKeyed V8_FINAL : public HBinaryCall {
- public:
- DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P2(HCallKeyed, HValue*, int);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
- HValue* context() { return first(); }
- HValue* key() { return second(); }
+ private:
+ // The argument count includes the receiver.
+ HCallWithDescriptor(HValue* target,
+ int argument_count,
+ const CallInterfaceDescriptor* descriptor,
+ const Vector<HValue*>& operands,
+ Zone* zone)
+ : descriptor_(descriptor),
+ values_(descriptor->environment_length() + 1, zone) {
+ argument_count_ = argument_count;
+ AddOperand(target, zone);
+ for (int i = 0; i < operands.length(); i++) {
+ AddOperand(operands[i], zone);
+ }
+ this->set_representation(Representation::Tagged());
+ this->SetAllSideEffects();
+ }
- DECLARE_CONCRETE_INSTRUCTION(CallKeyed)
+ void AddOperand(HValue* v, Zone* zone) {
+ values_.Add(NULL, zone);
+ SetOperandAt(values_.length() - 1, v);
+ }
- private:
- HCallKeyed(HValue* context, HValue* key, int argument_count)
- : HBinaryCall(context, key, argument_count) {
+ void InternalSetOperandAt(int index,
+ HValue* value) V8_FINAL V8_OVERRIDE {
+ values_[index] = value;
}
+
+ const CallInterfaceDescriptor* descriptor_;
+ ZoneList<HValue*> values_;
+ int argument_count_;
};
-class HCallNamed V8_FINAL : public HUnaryCall {
+class HInvokeFunction V8_FINAL : public HBinaryCall {
public:
- DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P2(HCallNamed, Handle<String>, int);
+ DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P2(HInvokeFunction, HValue*, int);
- virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+ HInvokeFunction(HValue* context,
+ HValue* function,
+ Handle<JSFunction> known_function,
+ int argument_count)
+ : HBinaryCall(context, function, argument_count),
+ known_function_(known_function) {
+ formal_parameter_count_ = known_function.is_null()
+ ? 0 : known_function->shared()->formal_parameter_count();
+ has_stack_check_ = !known_function.is_null() &&
+ (known_function->code()->kind() == Code::FUNCTION ||
+ known_function->code()->kind() == Code::OPTIMIZED_FUNCTION);
+ }
- HValue* context() { return value(); }
- Handle<String> name() const { return name_; }
+ static HInvokeFunction* New(Zone* zone,
+ HValue* context,
+ HValue* function,
+ Handle<JSFunction> known_function,
+ int argument_count) {
+ return new(zone) HInvokeFunction(context, function,
+ known_function, argument_count);
+ }
- DECLARE_CONCRETE_INSTRUCTION(CallNamed)
+ HValue* context() { return first(); }
+ HValue* function() { return second(); }
+ Handle<JSFunction> known_function() { return known_function_; }
+ int formal_parameter_count() const { return formal_parameter_count_; }
- private:
- HCallNamed(HValue* context, Handle<String> name, int argument_count)
- : HUnaryCall(context, argument_count), name_(name) {
+ virtual bool HasStackCheck() V8_FINAL V8_OVERRIDE {
+ return has_stack_check_;
}
- Handle<String> name_;
-};
+ DECLARE_CONCRETE_INSTRUCTION(InvokeFunction)
+ private:
+ HInvokeFunction(HValue* context, HValue* function, int argument_count)
+ : HBinaryCall(context, function, argument_count),
+ has_stack_check_(false) {
+ }
-enum CallMode {
- NORMAL_CALL,
- TAIL_CALL
+ Handle<JSFunction> known_function_;
+ int formal_parameter_count_;
+ bool has_stack_check_;
};
@@ -2412,74 +2431,24 @@ class HCallFunction V8_FINAL : public HBinaryCall {
public:
DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P2(HCallFunction, HValue*, int);
DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P3(
- HCallFunction, HValue*, int, CallMode);
-
- bool IsTailCall() const { return call_mode_ == TAIL_CALL; }
+ HCallFunction, HValue*, int, CallFunctionFlags);
HValue* context() { return first(); }
HValue* function() { return second(); }
+ CallFunctionFlags function_flags() const { return function_flags_; }
DECLARE_CONCRETE_INSTRUCTION(CallFunction)
- virtual int argument_delta() const V8_OVERRIDE {
- if (IsTailCall()) return 0;
- return -argument_count();
- }
+ virtual int argument_delta() const V8_OVERRIDE { return -argument_count(); }
private:
HCallFunction(HValue* context,
HValue* function,
int argument_count,
- CallMode mode = NORMAL_CALL)
- : HBinaryCall(context, function, argument_count), call_mode_(mode) {
+ CallFunctionFlags flags = NO_CALL_FUNCTION_FLAGS)
+ : HBinaryCall(context, function, argument_count), function_flags_(flags) {
}
- CallMode call_mode_;
-};
-
-
-class HCallGlobal V8_FINAL : public HUnaryCall {
- public:
- DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P2(HCallGlobal, Handle<String>, int);
-
- virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
-
- HValue* context() { return value(); }
- Handle<String> name() const { return name_; }
-
- DECLARE_CONCRETE_INSTRUCTION(CallGlobal)
-
- private:
- HCallGlobal(HValue* context, Handle<String> name, int argument_count)
- : HUnaryCall(context, argument_count), name_(name) {
- }
-
- Handle<String> name_;
-};
-
-
-class HCallKnownGlobal V8_FINAL : public HCall<0> {
- public:
- DECLARE_INSTRUCTION_FACTORY_P2(HCallKnownGlobal, Handle<JSFunction>, int);
-
- virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
-
- Handle<JSFunction> target() const { return target_; }
- int formal_parameter_count() const { return formal_parameter_count_; }
-
- virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
- return Representation::None();
- }
-
- DECLARE_CONCRETE_INSTRUCTION(CallKnownGlobal)
-
- private:
- HCallKnownGlobal(Handle<JSFunction> target, int argument_count)
- : HCall<0>(argument_count),
- target_(target),
- formal_parameter_count_(target->shared()->formal_parameter_count()) { }
-
- Handle<JSFunction> target_;
- int formal_parameter_count_;
+ CallFunctionFlags function_flags_;
};
@@ -2500,10 +2469,9 @@ class HCallNew V8_FINAL : public HBinaryCall {
class HCallNewArray V8_FINAL : public HBinaryCall {
public:
- DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P4(HCallNewArray,
+ DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P3(HCallNewArray,
HValue*,
int,
- Handle<Cell>,
ElementsKind);
HValue* context() { return first(); }
@@ -2511,23 +2479,17 @@ class HCallNewArray V8_FINAL : public HBinaryCall {
virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
- Handle<Cell> property_cell() const {
- return type_cell_;
- }
-
ElementsKind elements_kind() const { return elements_kind_; }
DECLARE_CONCRETE_INSTRUCTION(CallNewArray)
private:
HCallNewArray(HValue* context, HValue* constructor, int argument_count,
- Handle<Cell> type_cell, ElementsKind elements_kind)
+ ElementsKind elements_kind)
: HBinaryCall(context, constructor, argument_count),
- elements_kind_(elements_kind),
- type_cell_(type_cell) {}
+ elements_kind_(elements_kind) {}
ElementsKind elements_kind_;
- Handle<Cell> type_cell_;
};
@@ -2588,31 +2550,9 @@ class HMapEnumLength V8_FINAL : public HUnaryOperation {
: HUnaryOperation(value, HType::Smi()) {
set_representation(Representation::Smi());
SetFlag(kUseGVN);
- SetGVNFlag(kDependsOnMaps);
- }
-
- virtual bool IsDeletable() const V8_OVERRIDE { return true; }
-};
-
-
-class HElementsKind V8_FINAL : public HUnaryOperation {
- public:
- explicit HElementsKind(HValue* value) : HUnaryOperation(value) {
- set_representation(Representation::Integer32());
- SetFlag(kUseGVN);
- SetGVNFlag(kDependsOnElementsKind);
+ SetDependsOnFlag(kMaps);
}
- virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
- return Representation::Tagged();
- }
-
- DECLARE_CONCRETE_INSTRUCTION(ElementsKind)
-
- protected:
- virtual bool DataEquals(HValue* other) V8_OVERRIDE { return true; }
-
- private:
virtual bool IsDeletable() const V8_OVERRIDE { return true; }
};
@@ -2629,9 +2569,6 @@ class HUnaryMathOperation V8_FINAL : public HTemplateInstruction<2> {
virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
- virtual HValue* EnsureAndPropagateNotMinusZero(
- BitVector* visited) V8_OVERRIDE;
-
virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
if (index == 0) {
return Representation::Tagged();
@@ -2643,12 +2580,11 @@ class HUnaryMathOperation V8_FINAL : public HTemplateInstruction<2> {
case kMathPowHalf:
case kMathLog:
case kMathExp:
- case kMathSin:
- case kMathCos:
- case kMathTan:
return Representation::Double();
case kMathAbs:
return representation();
+ case kMathClz32:
+ return Representation::Integer32();
default:
UNREACHABLE();
return Representation::None();
@@ -2659,6 +2595,7 @@ class HUnaryMathOperation V8_FINAL : public HTemplateInstruction<2> {
virtual Range* InferRange(Zone* zone) V8_OVERRIDE;
virtual HValue* Canonicalize() V8_OVERRIDE;
+ virtual Representation RepresentationFromUses() V8_OVERRIDE;
virtual Representation RepresentationFromInputs() V8_OVERRIDE;
BuiltinFunctionId op() const { return op_; }
@@ -2673,6 +2610,15 @@ class HUnaryMathOperation V8_FINAL : public HTemplateInstruction<2> {
}
private:
+ // Indicates if we support a double (and int32) output for Math.floor and
+ // Math.round.
+ bool SupportsFlexibleFloorAndRound() const {
+#ifdef V8_TARGET_ARCH_ARM64
+ return true;
+#else
+ return false;
+#endif
+ }
HUnaryMathOperation(HValue* context, HValue* value, BuiltinFunctionId op)
: HTemplateInstruction<2>(HType::TaggedNumber()), op_(op) {
SetOperandAt(0, context);
@@ -2680,6 +2626,13 @@ class HUnaryMathOperation V8_FINAL : public HTemplateInstruction<2> {
switch (op) {
case kMathFloor:
case kMathRound:
+ if (SupportsFlexibleFloorAndRound()) {
+ SetFlag(kFlexibleRepresentation);
+ } else {
+ set_representation(Representation::Integer32());
+ }
+ break;
+ case kMathClz32:
set_representation(Representation::Integer32());
break;
case kMathAbs:
@@ -2687,16 +2640,9 @@ class HUnaryMathOperation V8_FINAL : public HTemplateInstruction<2> {
SetFlag(kFlexibleRepresentation);
// TODO(svenpanne) This flag is actually only needed if representation()
// is tagged, and not when it is an unboxed double or unboxed integer.
- SetGVNFlag(kChangesNewSpacePromotion);
+ SetChangesFlag(kNewSpacePromotion);
break;
case kMathLog:
- case kMathSin:
- case kMathCos:
- case kMathTan:
- set_representation(Representation::Double());
- // These operations use the TranscendentalCache, so they may allocate.
- SetGVNFlag(kChangesNewSpacePromotion);
- break;
case kMathExp:
case kMathSqrt:
case kMathPowHalf:
@@ -2711,6 +2657,9 @@ class HUnaryMathOperation V8_FINAL : public HTemplateInstruction<2> {
virtual bool IsDeletable() const V8_OVERRIDE { return true; }
+ HValue* SimplifiedDividendForMathFloorOfDiv(HDiv* hdiv);
+ HValue* SimplifiedDivisorForMathFloorOfDiv(HDiv* hdiv);
+
BuiltinFunctionId op_;
};
@@ -2740,7 +2689,7 @@ class HLoadRoot V8_FINAL : public HTemplateInstruction<0> {
SetFlag(kUseGVN);
// TODO(bmeurer): We'll need kDependsOnRoots once we add the
// corresponding HStoreRoot instruction.
- SetGVNFlag(kDependsOnCalls);
+ SetDependsOnFlag(kCalls);
}
virtual bool IsDeletable() const V8_OVERRIDE { return true; }
@@ -2749,107 +2698,122 @@ class HLoadRoot V8_FINAL : public HTemplateInstruction<0> {
};
-class HLoadExternalArrayPointer V8_FINAL : public HUnaryOperation {
+class HCheckMaps V8_FINAL : public HTemplateInstruction<2> {
public:
- DECLARE_INSTRUCTION_FACTORY_P1(HLoadExternalArrayPointer, HValue*);
+ static HCheckMaps* New(Zone* zone, HValue* context, HValue* value,
+ Handle<Map> map, HValue* typecheck = NULL) {
+ return new(zone) HCheckMaps(value, new(zone) UniqueSet<Map>(
+ Unique<Map>::CreateImmovable(map), zone), typecheck);
+ }
+ static HCheckMaps* New(Zone* zone, HValue* context,
+ HValue* value, SmallMapList* map_list,
+ HValue* typecheck = NULL) {
+ UniqueSet<Map>* maps = new(zone) UniqueSet<Map>(map_list->length(), zone);
+ for (int i = 0; i < map_list->length(); ++i) {
+ maps->Add(Unique<Map>::CreateImmovable(map_list->at(i)), zone);
+ }
+ return new(zone) HCheckMaps(value, maps, typecheck);
+ }
+ bool IsStabilityCheck() const { return is_stability_check_; }
+ void MarkAsStabilityCheck() {
+ maps_are_stable_ = true;
+ has_migration_target_ = false;
+ is_stability_check_ = true;
+ ClearChangesFlag(kNewSpacePromotion);
+ ClearDependsOnFlag(kElementsKind);
+ ClearDependsOnFlag(kMaps);
+ }
+
+ virtual bool HasEscapingOperandAt(int index) V8_OVERRIDE { return false; }
virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
return Representation::Tagged();
}
virtual HType CalculateInferredType() V8_OVERRIDE {
- return HType::None();
+ if (value()->type().IsHeapObject()) return value()->type();
+ return HType::HeapObject();
}
- DECLARE_CONCRETE_INSTRUCTION(LoadExternalArrayPointer)
-
- protected:
- virtual bool DataEquals(HValue* other) V8_OVERRIDE { return true; }
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
- private:
- explicit HLoadExternalArrayPointer(HValue* value)
- : HUnaryOperation(value) {
- set_representation(Representation::External());
- // The result of this instruction is idempotent as long as its inputs don't
- // change. The external array of a specialized array elements object cannot
- // change once set, so it's no necessary to introduce any additional
- // dependencies on top of the inputs.
- SetFlag(kUseGVN);
- }
+ HValue* value() const { return OperandAt(0); }
+ HValue* typecheck() const { return OperandAt(1); }
- virtual bool IsDeletable() const V8_OVERRIDE { return true; }
-};
+ const UniqueSet<Map>* maps() const { return maps_; }
+ void set_maps(const UniqueSet<Map>* maps) { maps_ = maps; }
+ bool maps_are_stable() const { return maps_are_stable_; }
-class HCheckMaps V8_FINAL : public HTemplateInstruction<2> {
- public:
- static HCheckMaps* New(Zone* zone, HValue* context, HValue* value,
- Handle<Map> map, CompilationInfo* info,
- HValue *typecheck = NULL);
- static HCheckMaps* New(Zone* zone, HValue* context,
- HValue* value, SmallMapList* maps,
- HValue *typecheck = NULL) {
- HCheckMaps* check_map = new(zone) HCheckMaps(value, zone, typecheck);
- for (int i = 0; i < maps->length(); i++) {
- check_map->Add(maps->at(i), zone);
- }
- return check_map;
- }
+ bool HasMigrationTarget() const { return has_migration_target_; }
- bool CanOmitMapChecks() { return omit_; }
+ virtual HValue* Canonicalize() V8_OVERRIDE;
- virtual bool HasEscapingOperandAt(int index) V8_OVERRIDE { return false; }
- virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
- return Representation::Tagged();
+ static HCheckMaps* CreateAndInsertAfter(Zone* zone,
+ HValue* value,
+ Unique<Map> map,
+ bool map_is_stable,
+ HInstruction* instr) {
+ return instr->Append(new(zone) HCheckMaps(
+ value, new(zone) UniqueSet<Map>(map, zone), map_is_stable));
}
- virtual void HandleSideEffectDominator(GVNFlag side_effect,
- HValue* dominator) V8_OVERRIDE;
- virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
-
- HValue* value() { return OperandAt(0); }
- Unique<Map> first_map() const { return map_set_.at(0); }
- UniqueSet<Map> map_set() const { return map_set_; }
-
- bool has_migration_target() const {
- return has_migration_target_;
+ static HCheckMaps* CreateAndInsertBefore(Zone* zone,
+ HValue* value,
+ const UniqueSet<Map>* maps,
+ bool maps_are_stable,
+ HInstruction* instr) {
+ return instr->Prepend(new(zone) HCheckMaps(value, maps, maps_are_stable));
}
DECLARE_CONCRETE_INSTRUCTION(CheckMaps)
protected:
virtual bool DataEquals(HValue* other) V8_OVERRIDE {
- return this->map_set_.Equals(&HCheckMaps::cast(other)->map_set_);
+ return this->maps()->Equals(HCheckMaps::cast(other)->maps());
}
virtual int RedefinedOperandIndex() { return 0; }
private:
- void Add(Handle<Map> map, Zone* zone) {
- map_set_.Add(Unique<Map>(map), zone);
- if (!has_migration_target_ && map->is_migration_target()) {
- has_migration_target_ = true;
- SetGVNFlag(kChangesNewSpacePromotion);
- }
+ HCheckMaps(HValue* value, const UniqueSet<Map>* maps, bool maps_are_stable)
+ : HTemplateInstruction<2>(HType::HeapObject()), maps_(maps),
+ has_migration_target_(false), is_stability_check_(false),
+ maps_are_stable_(maps_are_stable) {
+ ASSERT_NE(0, maps->size());
+ SetOperandAt(0, value);
+ // Use the object value for the dependency.
+ SetOperandAt(1, value);
+ set_representation(Representation::Tagged());
+ SetFlag(kUseGVN);
+ SetDependsOnFlag(kMaps);
+ SetDependsOnFlag(kElementsKind);
}
- // Clients should use one of the static New* methods above.
- HCheckMaps(HValue* value, Zone *zone, HValue* typecheck)
- : HTemplateInstruction<2>(value->type()),
- omit_(false), has_migration_target_(false) {
+ HCheckMaps(HValue* value, const UniqueSet<Map>* maps, HValue* typecheck)
+ : HTemplateInstruction<2>(HType::HeapObject()), maps_(maps),
+ has_migration_target_(false), is_stability_check_(false),
+ maps_are_stable_(true) {
+ ASSERT_NE(0, maps->size());
SetOperandAt(0, value);
// Use the object value for the dependency if NULL is passed.
- SetOperandAt(1, typecheck != NULL ? typecheck : value);
+ SetOperandAt(1, typecheck ? typecheck : value);
set_representation(Representation::Tagged());
SetFlag(kUseGVN);
- SetFlag(kTrackSideEffectDominators);
- SetGVNFlag(kDependsOnMaps);
- SetGVNFlag(kDependsOnElementsKind);
+ SetDependsOnFlag(kMaps);
+ SetDependsOnFlag(kElementsKind);
+ for (int i = 0; i < maps->size(); ++i) {
+ Handle<Map> map = maps->at(i).handle();
+ if (map->is_migration_target()) has_migration_target_ = true;
+ if (!map->is_stable()) maps_are_stable_ = false;
+ }
+ if (has_migration_target_) SetChangesFlag(kNewSpacePromotion);
}
- bool omit_;
- bool has_migration_target_;
- UniqueSet<Map> map_set_;
+ const UniqueSet<Map>* maps_;
+ bool has_migration_target_ : 1;
+ bool is_stability_check_ : 1;
+ bool maps_are_stable_ : 1;
};
@@ -2931,12 +2895,25 @@ class HCheckInstanceType V8_FINAL : public HUnaryOperation {
return Representation::Tagged();
}
+ virtual HType CalculateInferredType() V8_OVERRIDE {
+ switch (check_) {
+ case IS_SPEC_OBJECT: return HType::JSObject();
+ case IS_JS_ARRAY: return HType::JSArray();
+ case IS_STRING: return HType::String();
+ case IS_INTERNALIZED_STRING: return HType::String();
+ }
+ UNREACHABLE();
+ return HType::Tagged();
+ }
+
virtual HValue* Canonicalize() V8_OVERRIDE;
bool is_interval_check() const { return check_ <= LAST_INTERVAL_CHECK; }
void GetCheckInterval(InstanceType* first, InstanceType* last);
void GetCheckMaskAndTag(uint8_t* mask, uint8_t* tag);
+ Check check() const { return check_; }
+
DECLARE_CONCRETE_INSTRUCTION(CheckInstanceType)
protected:
@@ -2954,7 +2931,7 @@ class HCheckInstanceType V8_FINAL : public HUnaryOperation {
const char* GetCheckName();
HCheckInstanceType(HValue* value, Check check)
- : HUnaryOperation(value), check_(check) {
+ : HUnaryOperation(value, HType::HeapObject()), check_(check) {
set_representation(Representation::Tagged());
SetFlag(kUseGVN);
}
@@ -3001,6 +2978,11 @@ class HCheckHeapObject V8_FINAL : public HUnaryOperation {
return Representation::Tagged();
}
+ virtual HType CalculateInferredType() V8_OVERRIDE {
+ if (value()->type().IsHeapObject()) return value()->type();
+ return HType::HeapObject();
+ }
+
#ifdef DEBUG
virtual void Verify() V8_OVERRIDE;
#endif
@@ -3015,8 +2997,7 @@ class HCheckHeapObject V8_FINAL : public HUnaryOperation {
virtual bool DataEquals(HValue* other) V8_OVERRIDE { return true; }
private:
- explicit HCheckHeapObject(HValue* value)
- : HUnaryOperation(value, HType::NonPrimitive()) {
+ explicit HCheckHeapObject(HValue* value) : HUnaryOperation(value) {
set_representation(Representation::Tagged());
SetFlag(kUseGVN);
}
@@ -3283,7 +3264,7 @@ class HPhi V8_FINAL : public HValue {
bool IsReceiver() const { return merged_index_ == 0; }
bool HasMergedIndex() const { return merged_index_ != kInvalidMergedIndex; }
- virtual int position() const V8_OVERRIDE;
+ virtual HSourcePosition position() const V8_OVERRIDE;
int merged_index() const { return merged_index_; }
@@ -3422,8 +3403,6 @@ class HArgumentsObject V8_FINAL : public HDematerializedObject {
set_representation(Representation::Tagged());
SetFlag(kIsArguments);
}
-
- virtual bool IsDeletable() const V8_FINAL V8_OVERRIDE { return true; }
};
@@ -3448,7 +3427,7 @@ class HCapturedObject V8_FINAL : public HDematerializedObject {
void ReuseSideEffectsFromStore(HInstruction* store) {
ASSERT(store->HasObservableSideEffects());
ASSERT(store->IsStoreNamedField());
- gvn_flags_.Add(store->gvn_flags());
+ changes_flags_.Add(store->ChangesFlags());
}
// Replay effects of this instruction on the given environment.
@@ -3481,10 +3460,8 @@ class HConstant V8_FINAL : public HTemplateInstruction<0> {
int32_t value,
Representation representation,
HInstruction* instruction) {
- HConstant* new_constant =
- HConstant::New(zone, context, value, representation);
- new_constant->InsertAfter(instruction);
- return new_constant;
+ return instruction->Append(HConstant::New(
+ zone, context, value, representation));
}
static HConstant* CreateAndInsertBefore(Zone* zone,
@@ -3492,21 +3469,28 @@ class HConstant V8_FINAL : public HTemplateInstruction<0> {
int32_t value,
Representation representation,
HInstruction* instruction) {
- HConstant* new_constant =
- HConstant::New(zone, context, value, representation);
- new_constant->InsertBefore(instruction);
- return new_constant;
+ return instruction->Prepend(HConstant::New(
+ zone, context, value, representation));
}
static HConstant* CreateAndInsertBefore(Zone* zone,
- Unique<Object> unique,
- bool is_not_in_new_space,
+ Unique<Map> map,
+ bool map_is_stable,
HInstruction* instruction) {
- HConstant* new_constant = new(zone) HConstant(unique,
- Representation::Tagged(), HType::Tagged(), false, is_not_in_new_space,
- false, false);
- new_constant->InsertBefore(instruction);
- return new_constant;
+ return instruction->Prepend(new(zone) HConstant(
+ map, Unique<Map>(Handle<Map>::null()), map_is_stable,
+ Representation::Tagged(), HType::HeapObject(), true,
+ false, false, MAP_TYPE));
+ }
+
+ static HConstant* CreateAndInsertAfter(Zone* zone,
+ Unique<Map> map,
+ bool map_is_stable,
+ HInstruction* instruction) {
+ return instruction->Append(new(zone) HConstant(
+ map, Unique<Map>(Handle<Map>::null()), map_is_stable,
+ Representation::Tagged(), HType::HeapObject(), true,
+ false, false, MAP_TYPE));
}
Handle<Object> handle(Isolate* isolate) {
@@ -3521,12 +3505,6 @@ class HConstant V8_FINAL : public HTemplateInstruction<0> {
return object_.handle();
}
- bool HasMap(Handle<Map> map) {
- Handle<Object> constant_object = handle(map->GetIsolate());
- return constant_object->IsHeapObject() &&
- Handle<HeapObject>::cast(constant_object)->map() == *map;
- }
-
bool IsSpecialDouble() const {
return has_double_value_ &&
(BitCast<int64_t>(double_value_) == BitCast<int64_t>(-0.0) ||
@@ -3538,36 +3516,14 @@ class HConstant V8_FINAL : public HTemplateInstruction<0> {
return is_not_in_new_space_;
}
- bool ImmortalImmovable() const {
- if (has_int32_value_) {
- return false;
- }
- if (has_double_value_) {
- if (IsSpecialDouble()) {
- return true;
- }
- return false;
- }
- if (has_external_reference_value_) {
- return false;
- }
+ bool ImmortalImmovable() const;
- ASSERT(!object_.handle().is_null());
- Heap* heap = isolate()->heap();
- ASSERT(!object_.IsKnownGlobal(heap->minus_zero_value()));
- ASSERT(!object_.IsKnownGlobal(heap->nan_value()));
- return
- object_.IsKnownGlobal(heap->undefined_value()) ||
- object_.IsKnownGlobal(heap->null_value()) ||
- object_.IsKnownGlobal(heap->true_value()) ||
- object_.IsKnownGlobal(heap->false_value()) ||
- object_.IsKnownGlobal(heap->the_hole_value()) ||
- object_.IsKnownGlobal(heap->empty_string()) ||
- object_.IsKnownGlobal(heap->empty_fixed_array());
+ bool IsCell() const {
+ return instance_type_ == CELL_TYPE || instance_type_ == PROPERTY_CELL_TYPE;
}
- bool IsCell() const {
- return is_cell_;
+ bool IsMap() const {
+ return instance_type_ == MAP_TYPE;
}
virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
@@ -3615,14 +3571,14 @@ class HConstant V8_FINAL : public HTemplateInstruction<0> {
bool HasStringValue() const {
if (has_double_value_ || has_int32_value_) return false;
ASSERT(!object_.handle().is_null());
- return type_.IsString();
+ return instance_type_ < FIRST_NONSTRING_TYPE;
}
Handle<String> StringValue() const {
ASSERT(HasStringValue());
return Handle<String>::cast(object_.handle());
}
bool HasInternalizedStringValue() const {
- return HasStringValue() && is_internalized_string_;
+ return HasStringValue() && StringShape(instance_type_).IsInternalized();
}
bool HasExternalReferenceValue() const {
@@ -3634,6 +3590,24 @@ class HConstant V8_FINAL : public HTemplateInstruction<0> {
bool HasBooleanValue() const { return type_.IsBoolean(); }
bool BooleanValue() const { return boolean_value_; }
+ bool IsUndetectable() const { return is_undetectable_; }
+ InstanceType GetInstanceType() const { return instance_type_; }
+
+ bool HasMapValue() const { return instance_type_ == MAP_TYPE; }
+ Unique<Map> MapValue() const {
+ ASSERT(HasMapValue());
+ return Unique<Map>::cast(GetUnique());
+ }
+ bool HasStableMapValue() const {
+ ASSERT(HasMapValue() || !has_stable_map_value_);
+ return has_stable_map_value_;
+ }
+
+ bool HasObjectMap() const { return !object_map_.IsNull(); }
+ Unique<Map> ObjectMap() const {
+ ASSERT(HasObjectMap());
+ return object_map_;
+ }
virtual intptr_t Hashcode() V8_OVERRIDE {
if (has_int32_value_) {
@@ -3659,6 +3633,10 @@ class HConstant V8_FINAL : public HTemplateInstruction<0> {
return object_;
}
+ bool EqualsUnique(Unique<Object> other) const {
+ return object_.IsInitialized() && object_ == other;
+ }
+
virtual bool DataEquals(HValue* other) V8_OVERRIDE {
HConstant* other_constant = HConstant::cast(other);
if (has_int32_value_) {
@@ -3703,13 +3681,15 @@ class HConstant V8_FINAL : public HTemplateInstruction<0> {
Representation r = Representation::None(),
bool is_not_in_new_space = true,
Unique<Object> optional = Unique<Object>(Handle<Object>::null()));
- HConstant(Unique<Object> unique,
+ HConstant(Unique<Object> object,
+ Unique<Map> object_map,
+ bool has_stable_map_value,
Representation r,
HType type,
- bool is_internalized_string,
bool is_not_in_new_space,
- bool is_cell,
- bool boolean_value);
+ bool boolean_value,
+ bool is_undetectable,
+ InstanceType instance_type);
explicit HConstant(ExternalReference reference);
@@ -3723,6 +3703,12 @@ class HConstant V8_FINAL : public HTemplateInstruction<0> {
// constant HeapObject.
Unique<Object> object_;
+ // If object_ is a heap object, this points to the stable map of the object.
+ Unique<Map> object_map_;
+
+ // If object_ is a map, this indicates whether the map is stable.
+ bool has_stable_map_value_ : 1;
+
// We store the HConstant in the most specific form safely possible.
// The two flags, has_int32_value_ and has_double_value_ tell us if
// int32_value_ and double_value_ hold valid, safe representations
@@ -3732,13 +3718,15 @@ class HConstant V8_FINAL : public HTemplateInstruction<0> {
bool has_int32_value_ : 1;
bool has_double_value_ : 1;
bool has_external_reference_value_ : 1;
- bool is_internalized_string_ : 1; // TODO(yangguo): make this part of HType.
bool is_not_in_new_space_ : 1;
- bool is_cell_ : 1;
bool boolean_value_ : 1;
+ bool is_undetectable_: 1;
int32_t int32_value_;
double double_value_;
ExternalReference external_reference_value_;
+
+ static const InstanceType kUnknownInstanceType = FILLER_TYPE;
+ InstanceType instance_type_;
};
@@ -3820,11 +3808,19 @@ class HBinaryOperation : public HTemplateInstruction<3> {
return representation();
}
- void SetOperandPositions(Zone* zone, int left_pos, int right_pos) {
+ void SetOperandPositions(Zone* zone,
+ HSourcePosition left_pos,
+ HSourcePosition right_pos) {
set_operand_position(zone, 1, left_pos);
set_operand_position(zone, 2, right_pos);
}
+ bool RightIsPowerOf2() {
+ if (!right()->IsInteger32Constant()) return false;
+ int32_t value = right()->GetInteger32Constant();
+ return IsPowerOf2(value) || IsPowerOf2(-value);
+ }
+
DECLARE_ABSTRACT_INSTRUCTION(BinaryOperation)
private:
@@ -3839,6 +3835,8 @@ class HWrapReceiver V8_FINAL : public HTemplateInstruction<2> {
public:
DECLARE_INSTRUCTION_FACTORY_P2(HWrapReceiver, HValue*, HValue*);
+ virtual bool DataEquals(HValue* other) V8_OVERRIDE { return true; }
+
virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
return Representation::Tagged();
}
@@ -3849,15 +3847,21 @@ class HWrapReceiver V8_FINAL : public HTemplateInstruction<2> {
virtual HValue* Canonicalize() V8_OVERRIDE;
virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+ bool known_function() const { return known_function_; }
DECLARE_CONCRETE_INSTRUCTION(WrapReceiver)
private:
HWrapReceiver(HValue* receiver, HValue* function) {
+ known_function_ = function->IsConstant() &&
+ HConstant::cast(function)->handle(function->isolate())->IsJSFunction();
set_representation(Representation::Tagged());
SetOperandAt(0, receiver);
SetOperandAt(1, function);
+ SetFlag(kUseGVN);
}
+
+ bool known_function_;
};
@@ -4096,7 +4100,7 @@ class HBoundsCheckBaseIndexInformation V8_FINAL
class HBitwiseBinaryOperation : public HBinaryOperation {
public:
HBitwiseBinaryOperation(HValue* context, HValue* left, HValue* right,
- HType type = HType::Tagged())
+ HType type = HType::TaggedNumber())
: HBinaryOperation(context, left, right, type) {
SetFlag(kFlexibleRepresentation);
SetFlag(kTruncatingToInt32);
@@ -4105,7 +4109,6 @@ class HBitwiseBinaryOperation : public HBinaryOperation {
}
virtual void RepresentationChanged(Representation to) V8_OVERRIDE {
- if (to.IsTagged()) SetGVNFlag(kChangesNewSpacePromotion);
if (to.IsTagged() &&
(left()->ToNumberCanBeObserved() || right()->ToNumberCanBeObserved())) {
SetAllSideEffects();
@@ -4114,6 +4117,7 @@ class HBitwiseBinaryOperation : public HBinaryOperation {
ClearAllSideEffects();
SetFlag(kUseGVN);
}
+ if (to.IsTagged()) SetChangesFlag(kNewSpacePromotion);
}
virtual void UpdateRepresentation(Representation new_rep,
@@ -4148,9 +4152,6 @@ class HMathFloorOfDiv V8_FINAL : public HBinaryOperation {
HValue*,
HValue*);
- virtual HValue* EnsureAndPropagateNotMinusZero(
- BitVector* visited) V8_OVERRIDE;
-
DECLARE_CONCRETE_INSTRUCTION(MathFloorOfDiv)
protected:
@@ -4162,12 +4163,15 @@ class HMathFloorOfDiv V8_FINAL : public HBinaryOperation {
set_representation(Representation::Integer32());
SetFlag(kUseGVN);
SetFlag(kCanOverflow);
- if (!right->IsConstant()) {
- SetFlag(kCanBeDivByZero);
- }
+ SetFlag(kCanBeDivByZero);
+ SetFlag(kLeftCanBeMinInt);
+ SetFlag(kLeftCanBeNegative);
+ SetFlag(kLeftCanBePositive);
SetFlag(kAllowUndefinedAsNaN);
}
+ virtual Range* InferRange(Zone* zone) V8_OVERRIDE;
+
virtual bool IsDeletable() const V8_OVERRIDE { return true; }
};
@@ -4182,7 +4186,6 @@ class HArithmeticBinaryOperation : public HBinaryOperation {
}
virtual void RepresentationChanged(Representation to) V8_OVERRIDE {
- if (to.IsTagged()) SetGVNFlag(kChangesNewSpacePromotion);
if (to.IsTagged() &&
(left()->ToNumberCanBeObserved() || right()->ToNumberCanBeObserved())) {
SetAllSideEffects();
@@ -4191,9 +4194,11 @@ class HArithmeticBinaryOperation : public HBinaryOperation {
ClearAllSideEffects();
SetFlag(kUseGVN);
}
+ if (to.IsTagged()) SetChangesFlag(kNewSpacePromotion);
}
DECLARE_ABSTRACT_INSTRUCTION(ArithmeticBinaryOperation)
+
private:
virtual bool IsDeletable() const V8_OVERRIDE { return true; }
};
@@ -4258,9 +4263,14 @@ class HCompareNumericAndBranch : public HTemplateControlInstruction<2, 2> {
virtual Representation observed_input_representation(int index) V8_OVERRIDE {
return observed_input_representation_[index];
}
+
+ virtual bool KnownSuccessorBlock(HBasicBlock** block) V8_OVERRIDE;
+
virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
- void SetOperandPositions(Zone* zone, int left_pos, int right_pos) {
+ void SetOperandPositions(Zone* zone,
+ HSourcePosition left_pos,
+ HSourcePosition right_pos) {
set_operand_position(zone, 0, left_pos);
set_operand_position(zone, 1, right_pos);
}
@@ -4343,6 +4353,12 @@ class HCompareObjectEqAndBranch : public HTemplateControlInstruction<2, 2> {
virtual bool KnownSuccessorBlock(HBasicBlock** block) V8_OVERRIDE;
+ static const int kNoKnownSuccessorIndex = -1;
+ int known_successor_index() const { return known_successor_index_; }
+ void set_known_successor_index(int known_successor_index) {
+ known_successor_index_ = known_successor_index;
+ }
+
HValue* left() { return OperandAt(0); }
HValue* right() { return OperandAt(1); }
@@ -4362,7 +4378,8 @@ class HCompareObjectEqAndBranch : public HTemplateControlInstruction<2, 2> {
HCompareObjectEqAndBranch(HValue* left,
HValue* right,
HBasicBlock* true_target = NULL,
- HBasicBlock* false_target = NULL) {
+ HBasicBlock* false_target = NULL)
+ : known_successor_index_(kNoKnownSuccessorIndex) {
ASSERT(!left->IsConstant() ||
(!HConstant::cast(left)->HasInteger32Value() ||
HConstant::cast(left)->HasSmiValue()));
@@ -4374,6 +4391,8 @@ class HCompareObjectEqAndBranch : public HTemplateControlInstruction<2, 2> {
SetSuccessorAt(0, true_target);
SetSuccessorAt(1, false_target);
}
+
+ int known_successor_index_;
};
@@ -4387,6 +4406,8 @@ class HIsObjectAndBranch V8_FINAL : public HUnaryControlInstruction {
return Representation::Tagged();
}
+ virtual bool KnownSuccessorBlock(HBasicBlock** block) V8_OVERRIDE;
+
DECLARE_CONCRETE_INSTRUCTION(IsObjectAndBranch)
private:
@@ -4407,13 +4428,27 @@ class HIsStringAndBranch V8_FINAL : public HUnaryControlInstruction {
return Representation::Tagged();
}
+ virtual bool KnownSuccessorBlock(HBasicBlock** block) V8_OVERRIDE;
+
+ static const int kNoKnownSuccessorIndex = -1;
+ int known_successor_index() const { return known_successor_index_; }
+ void set_known_successor_index(int known_successor_index) {
+ known_successor_index_ = known_successor_index;
+ }
+
DECLARE_CONCRETE_INSTRUCTION(IsStringAndBranch)
+ protected:
+ virtual int RedefinedOperandIndex() { return 0; }
+
private:
HIsStringAndBranch(HValue* value,
HBasicBlock* true_target = NULL,
HBasicBlock* false_target = NULL)
- : HUnaryControlInstruction(value, true_target, false_target) {}
+ : HUnaryControlInstruction(value, true_target, false_target),
+ known_successor_index_(kNoKnownSuccessorIndex) { }
+
+ int known_successor_index_;
};
@@ -4431,12 +4466,15 @@ class HIsSmiAndBranch V8_FINAL : public HUnaryControlInstruction {
protected:
virtual bool DataEquals(HValue* other) V8_OVERRIDE { return true; }
+ virtual int RedefinedOperandIndex() { return 0; }
private:
HIsSmiAndBranch(HValue* value,
HBasicBlock* true_target = NULL,
HBasicBlock* false_target = NULL)
- : HUnaryControlInstruction(value, true_target, false_target) {}
+ : HUnaryControlInstruction(value, true_target, false_target) {
+ set_representation(Representation::Tagged());
+ }
};
@@ -4450,6 +4488,8 @@ class HIsUndetectableAndBranch V8_FINAL : public HUnaryControlInstruction {
return Representation::Tagged();
}
+ virtual bool KnownSuccessorBlock(HBasicBlock** block) V8_OVERRIDE;
+
DECLARE_CONCRETE_INSTRUCTION(IsUndetectableAndBranch)
private:
@@ -4495,7 +4535,7 @@ class HStringCompareAndBranch : public HTemplateControlInstruction<2, 3> {
SetOperandAt(1, left);
SetOperandAt(2, right);
set_representation(Representation::Tagged());
- SetGVNFlag(kChangesNewSpacePromotion);
+ SetChangesFlag(kNewSpacePromotion);
}
Token::Value token_;
@@ -4532,6 +4572,8 @@ class HHasInstanceTypeAndBranch V8_FINAL : public HUnaryControlInstruction {
return Representation::Tagged();
}
+ virtual bool KnownSuccessorBlock(HBasicBlock** block) V8_OVERRIDE;
+
DECLARE_CONCRETE_INSTRUCTION(HasInstanceTypeAndBranch)
private:
@@ -4613,8 +4655,7 @@ class HTypeofIsAndBranch V8_FINAL : public HUnaryControlInstruction {
public:
DECLARE_INSTRUCTION_FACTORY_P2(HTypeofIsAndBranch, HValue*, Handle<String>);
- Handle<String> type_literal() { return type_literal_; }
- bool compares_number_type() { return compares_number_type_; }
+ Handle<String> type_literal() { return type_literal_.handle(); }
virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
DECLARE_CONCRETE_INSTRUCTION(TypeofIsAndBranch)
@@ -4625,16 +4666,16 @@ class HTypeofIsAndBranch V8_FINAL : public HUnaryControlInstruction {
virtual bool KnownSuccessorBlock(HBasicBlock** block) V8_OVERRIDE;
+ virtual void FinalizeUniqueness() V8_OVERRIDE {
+ type_literal_ = Unique<String>(type_literal_.handle());
+ }
+
private:
HTypeofIsAndBranch(HValue* value, Handle<String> type_literal)
: HUnaryControlInstruction(value, NULL, NULL),
- type_literal_(type_literal) {
- Heap* heap = type_literal->GetHeap();
- compares_number_type_ = type_literal->Equals(heap->number_string());
- }
+ type_literal_(Unique<String>::CreateUninitialized(type_literal)) { }
- Handle<String> type_literal_;
- bool compares_number_type_ : 1;
+ Unique<String> type_literal_;
};
@@ -4720,7 +4761,7 @@ class HPower V8_FINAL : public HTemplateInstruction<2> {
SetOperandAt(1, right);
set_representation(Representation::Double());
SetFlag(kUseGVN);
- SetGVNFlag(kChangesNewSpacePromotion);
+ SetChangesFlag(kNewSpacePromotion);
}
virtual bool IsDeletable() const V8_OVERRIDE {
@@ -4743,9 +4784,6 @@ class HAdd V8_FINAL : public HArithmeticBinaryOperation {
return !representation().IsTagged() && !representation().IsExternal();
}
- virtual HValue* EnsureAndPropagateNotMinusZero(
- BitVector* visited) V8_OVERRIDE;
-
virtual HValue* Canonicalize() V8_OVERRIDE;
virtual bool TryDecompose(DecompositionResult* decomposition) V8_OVERRIDE {
@@ -4761,10 +4799,6 @@ class HAdd V8_FINAL : public HArithmeticBinaryOperation {
}
virtual void RepresentationChanged(Representation to) V8_OVERRIDE {
- if (to.IsTagged()) {
- SetGVNFlag(kChangesNewSpacePromotion);
- ClearFlag(kAllowUndefinedAsNaN);
- }
if (to.IsTagged() &&
(left()->ToNumberCanBeObserved() || right()->ToNumberCanBeObserved() ||
left()->ToStringCanBeObserved() || right()->ToStringCanBeObserved())) {
@@ -4774,6 +4808,10 @@ class HAdd V8_FINAL : public HArithmeticBinaryOperation {
ClearAllSideEffects();
SetFlag(kUseGVN);
}
+ if (to.IsTagged()) {
+ SetChangesFlag(kNewSpacePromotion);
+ ClearFlag(kAllowUndefinedAsNaN);
+ }
}
virtual Representation RepresentationFromInputs() V8_OVERRIDE;
@@ -4802,9 +4840,6 @@ class HSub V8_FINAL : public HArithmeticBinaryOperation {
HValue* left,
HValue* right);
- virtual HValue* EnsureAndPropagateNotMinusZero(
- BitVector* visited) V8_OVERRIDE;
-
virtual HValue* Canonicalize() V8_OVERRIDE;
virtual bool TryDecompose(DecompositionResult* decomposition) V8_OVERRIDE {
@@ -4851,9 +4886,6 @@ class HMul V8_FINAL : public HArithmeticBinaryOperation {
return mul;
}
- virtual HValue* EnsureAndPropagateNotMinusZero(
- BitVector* visited) V8_OVERRIDE;
-
virtual HValue* Canonicalize() V8_OVERRIDE;
// Only commutative if it is certain that not two objects are multiplicated.
@@ -4891,19 +4923,6 @@ class HMod V8_FINAL : public HArithmeticBinaryOperation {
HValue* left,
HValue* right);
- bool HasPowerOf2Divisor() {
- if (right()->IsConstant() &&
- HConstant::cast(right())->HasInteger32Value()) {
- int32_t value = HConstant::cast(right())->Integer32Value();
- return value != 0 && (IsPowerOf2(value) || IsPowerOf2(-value));
- }
-
- return false;
- }
-
- virtual HValue* EnsureAndPropagateNotMinusZero(
- BitVector* visited) V8_OVERRIDE;
-
virtual HValue* Canonicalize() V8_OVERRIDE;
virtual void UpdateRepresentation(Representation new_rep,
@@ -4926,6 +4945,7 @@ class HMod V8_FINAL : public HArithmeticBinaryOperation {
HValue* right) : HArithmeticBinaryOperation(context, left, right) {
SetFlag(kCanBeDivByZero);
SetFlag(kCanOverflow);
+ SetFlag(kLeftCanBeNegative);
}
};
@@ -4937,18 +4957,6 @@ class HDiv V8_FINAL : public HArithmeticBinaryOperation {
HValue* left,
HValue* right);
- bool HasPowerOf2Divisor() {
- if (right()->IsInteger32Constant()) {
- int32_t value = right()->GetInteger32Constant();
- return value != 0 && (IsPowerOf2(value) || IsPowerOf2(-value));
- }
-
- return false;
- }
-
- virtual HValue* EnsureAndPropagateNotMinusZero(
- BitVector* visited) V8_OVERRIDE;
-
virtual HValue* Canonicalize() V8_OVERRIDE;
virtual void UpdateRepresentation(Representation new_rep,
@@ -5054,7 +5062,7 @@ class HBitwise V8_FINAL : public HBitwiseBinaryOperation {
Token::Value op,
HValue* left,
HValue* right)
- : HBitwiseBinaryOperation(context, left, right, HType::TaggedNumber()),
+ : HBitwiseBinaryOperation(context, left, right),
op_(op) {
ASSERT(op == Token::BIT_AND || op == Token::BIT_OR || op == Token::BIT_XOR);
// BIT_AND with a smi-range positive value will always unset the
@@ -5238,8 +5246,8 @@ class HOsrEntry V8_FINAL : public HTemplateInstruction<0> {
private:
explicit HOsrEntry(BailoutId ast_id) : ast_id_(ast_id) {
- SetGVNFlag(kChangesOsrEntries);
- SetGVNFlag(kChangesNewSpacePromotion);
+ SetChangesFlag(kOsrEntries);
+ SetChangesFlag(kNewSpacePromotion);
}
BailoutId ast_id_;
@@ -5297,13 +5305,6 @@ class HCallStub V8_FINAL : public HUnaryCall {
HValue* context() { return value(); }
- void set_transcendental_type(TranscendentalCache::Type transcendental_type) {
- transcendental_type_ = transcendental_type;
- }
- TranscendentalCache::Type transcendental_type() {
- return transcendental_type_;
- }
-
virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
DECLARE_CONCRETE_INSTRUCTION(CallStub)
@@ -5311,12 +5312,10 @@ class HCallStub V8_FINAL : public HUnaryCall {
private:
HCallStub(HValue* context, CodeStub::Major major_key, int argument_count)
: HUnaryCall(context, argument_count),
- major_key_(major_key),
- transcendental_type_(TranscendentalCache::kNumberOfCaches) {
+ major_key_(major_key) {
}
CodeStub::Major major_key_;
- TranscendentalCache::Type transcendental_type_;
};
@@ -5390,7 +5389,7 @@ class HLoadGlobalCell V8_FINAL : public HTemplateInstruction<0> {
: cell_(Unique<Cell>::CreateUninitialized(cell)), details_(details) {
set_representation(Representation::Tagged());
SetFlag(kUseGVN);
- SetGVNFlag(kDependsOnGlobalVars);
+ SetDependsOnFlag(kGlobalVars);
}
virtual bool IsDeletable() const V8_OVERRIDE { return !RequiresHoleCheck(); }
@@ -5438,6 +5437,12 @@ class HLoadGlobalGeneric V8_FINAL : public HTemplateInstruction<2> {
class HAllocate V8_FINAL : public HTemplateInstruction<2> {
public:
+ static bool CompatibleInstanceTypes(InstanceType type1,
+ InstanceType type2) {
+ return ComputeFlags(TENURED, type1) == ComputeFlags(TENURED, type2) &&
+ ComputeFlags(NOT_TENURED, type1) == ComputeFlags(NOT_TENURED, type2);
+ }
+
static HAllocate* New(Zone* zone,
HValue* context,
HValue* size,
@@ -5456,6 +5461,13 @@ class HAllocate V8_FINAL : public HTemplateInstruction<2> {
HValue* context() { return OperandAt(0); }
HValue* size() { return OperandAt(1); }
+ bool has_size_upper_bound() { return size_upper_bound_ != NULL; }
+ HConstant* size_upper_bound() { return size_upper_bound_; }
+ void set_size_upper_bound(HConstant* value) {
+ ASSERT(size_upper_bound_ == NULL);
+ size_upper_bound_ = value;
+ }
+
virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
if (index == 0) {
return Representation::Tagged();
@@ -5496,11 +5508,15 @@ class HAllocate V8_FINAL : public HTemplateInstruction<2> {
flags_ = static_cast<HAllocate::Flags>(flags_ | PREFILL_WITH_FILLER);
}
+ bool MustClearNextMapWord() const {
+ return (flags_ & CLEAR_NEXT_MAP_WORD) != 0;
+ }
+
void MakeDoubleAligned() {
flags_ = static_cast<HAllocate::Flags>(flags_ | ALLOCATE_DOUBLE_ALIGNED);
}
- virtual void HandleSideEffectDominator(GVNFlag side_effect,
+ virtual bool HandleSideEffectDominator(GVNFlag side_effect,
HValue* dominator) V8_OVERRIDE;
virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
@@ -5513,7 +5529,8 @@ class HAllocate V8_FINAL : public HTemplateInstruction<2> {
ALLOCATE_IN_OLD_DATA_SPACE = 1 << 1,
ALLOCATE_IN_OLD_POINTER_SPACE = 1 << 2,
ALLOCATE_DOUBLE_ALIGNED = 1 << 3,
- PREFILL_WITH_FILLER = 1 << 4
+ PREFILL_WITH_FILLER = 1 << 4,
+ CLEAR_NEXT_MAP_WORD = 1 << 5
};
HAllocate(HValue* context,
@@ -5524,21 +5541,34 @@ class HAllocate V8_FINAL : public HTemplateInstruction<2> {
Handle<AllocationSite> allocation_site =
Handle<AllocationSite>::null())
: HTemplateInstruction<2>(type),
+ flags_(ComputeFlags(pretenure_flag, instance_type)),
dominating_allocate_(NULL),
filler_free_space_size_(NULL),
- clear_next_map_word_(false) {
+ size_upper_bound_(NULL) {
SetOperandAt(0, context);
- SetOperandAt(1, size);
+ UpdateSize(size);
set_representation(Representation::Tagged());
SetFlag(kTrackSideEffectDominators);
- SetGVNFlag(kChangesNewSpacePromotion);
- SetGVNFlag(kDependsOnNewSpacePromotion);
- flags_ = pretenure_flag == TENURED
+ SetChangesFlag(kNewSpacePromotion);
+ SetDependsOnFlag(kNewSpacePromotion);
+
+ if (FLAG_trace_pretenuring) {
+ PrintF("HAllocate with AllocationSite %p %s\n",
+ allocation_site.is_null()
+ ? static_cast<void*>(NULL)
+ : static_cast<void*>(*allocation_site),
+ pretenure_flag == TENURED ? "tenured" : "not tenured");
+ }
+ }
+
+ static Flags ComputeFlags(PretenureFlag pretenure_flag,
+ InstanceType instance_type) {
+ Flags flags = pretenure_flag == TENURED
? (Heap::TargetSpaceId(instance_type) == OLD_POINTER_SPACE
? ALLOCATE_IN_OLD_POINTER_SPACE : ALLOCATE_IN_OLD_DATA_SPACE)
: ALLOCATE_IN_NEW_SPACE;
if (instance_type == FIXED_DOUBLE_ARRAY_TYPE) {
- flags_ = static_cast<HAllocate::Flags>(flags_ | ALLOCATE_DOUBLE_ALIGNED);
+ flags = static_cast<Flags>(flags | ALLOCATE_DOUBLE_ALIGNED);
}
// We have to fill the allocated object with one word fillers if we do
// not use allocation folding since some allocations may depend on each
@@ -5546,22 +5576,28 @@ class HAllocate V8_FINAL : public HTemplateInstruction<2> {
// allocations may leave such objects behind in a not completely initialized
// state.
if (!FLAG_use_gvn || !FLAG_use_allocation_folding) {
- flags_ = static_cast<HAllocate::Flags>(flags_ | PREFILL_WITH_FILLER);
+ flags = static_cast<Flags>(flags | PREFILL_WITH_FILLER);
}
- clear_next_map_word_ = pretenure_flag == NOT_TENURED &&
- AllocationSite::CanTrack(instance_type);
-
- if (FLAG_trace_pretenuring) {
- PrintF("HAllocate with AllocationSite %p %s\n",
- allocation_site.is_null()
- ? static_cast<void*>(NULL)
- : static_cast<void*>(*allocation_site),
- pretenure_flag == TENURED ? "tenured" : "not tenured");
+ if (pretenure_flag == NOT_TENURED &&
+ AllocationSite::CanTrack(instance_type)) {
+ flags = static_cast<Flags>(flags | CLEAR_NEXT_MAP_WORD);
}
+ return flags;
+ }
+
+ void UpdateClearNextMapWord(bool clear_next_map_word) {
+ flags_ = static_cast<Flags>(clear_next_map_word
+ ? flags_ | CLEAR_NEXT_MAP_WORD
+ : flags_ & ~CLEAR_NEXT_MAP_WORD);
}
void UpdateSize(HValue* size) {
SetOperandAt(1, size);
+ if (size->IsInteger32Constant()) {
+ size_upper_bound_ = HConstant::cast(size);
+ } else {
+ size_upper_bound_ = NULL;
+ }
}
HAllocate* GetFoldableDominator(HAllocate* dominator);
@@ -5583,7 +5619,7 @@ class HAllocate V8_FINAL : public HTemplateInstruction<2> {
Handle<Map> known_initial_map_;
HAllocate* dominating_allocate_;
HStoreNamedField* filler_free_space_size_;
- bool clear_next_map_word_;
+ HConstant* size_upper_bound_;
};
@@ -5619,7 +5655,7 @@ class HInnerAllocatedObject V8_FINAL : public HTemplateInstruction<2> {
HValue* context,
HValue* value,
HValue* offset,
- HType type = HType::Tagged()) {
+ HType type) {
return new(zone) HInnerAllocatedObject(value, offset, type);
}
@@ -5637,30 +5673,30 @@ class HInnerAllocatedObject V8_FINAL : public HTemplateInstruction<2> {
private:
HInnerAllocatedObject(HValue* value,
HValue* offset,
- HType type = HType::Tagged())
- : HTemplateInstruction<2>(type) {
+ HType type) : HTemplateInstruction<2>(type) {
ASSERT(value->IsAllocate());
+ ASSERT(type.IsHeapObject());
SetOperandAt(0, value);
SetOperandAt(1, offset);
- set_type(type);
set_representation(Representation::Tagged());
}
};
inline bool StoringValueNeedsWriteBarrier(HValue* value) {
- return !value->type().IsBoolean()
- && !value->type().IsSmi()
+ return !value->type().IsSmi()
+ && !value->type().IsNull()
+ && !value->type().IsBoolean()
+ && !value->type().IsUndefined()
&& !(value->IsConstant() && HConstant::cast(value)->ImmortalImmovable());
}
inline bool ReceiverObjectNeedsWriteBarrier(HValue* object,
- HValue* new_space_dominator) {
- if (object->IsInnerAllocatedObject()) {
- return ReceiverObjectNeedsWriteBarrier(
- HInnerAllocatedObject::cast(object)->base_object(),
- new_space_dominator);
+ HValue* value,
+ HValue* dominator) {
+ while (object->IsInnerAllocatedObject()) {
+ object = HInnerAllocatedObject::cast(object)->base_object();
}
if (object->IsConstant() && HConstant::cast(object)->IsCell()) {
return false;
@@ -5670,14 +5706,46 @@ inline bool ReceiverObjectNeedsWriteBarrier(HValue* object,
// Stores to external references require no write barriers
return false;
}
- if (object != new_space_dominator) return true;
- if (object->IsAllocate()) {
- return !HAllocate::cast(object)->IsNewSpaceAllocation();
+ // We definitely need a write barrier unless the object is the allocation
+ // dominator.
+ if (object == dominator && object->IsAllocate()) {
+ // Stores to new space allocations require no write barriers.
+ if (HAllocate::cast(object)->IsNewSpaceAllocation()) {
+ return false;
+ }
+ // Stores to old space allocations require no write barriers if the value is
+ // a constant provably not in new space.
+ if (value->IsConstant() && HConstant::cast(value)->NotInNewSpace()) {
+ return false;
+ }
+ // Stores to old space allocations require no write barriers if the value is
+ // an old space allocation.
+ while (value->IsInnerAllocatedObject()) {
+ value = HInnerAllocatedObject::cast(value)->base_object();
+ }
+ if (value->IsAllocate() &&
+ !HAllocate::cast(value)->IsNewSpaceAllocation()) {
+ return false;
+ }
}
return true;
}
+inline PointersToHereCheck PointersToHereCheckForObject(HValue* object,
+ HValue* dominator) {
+ while (object->IsInnerAllocatedObject()) {
+ object = HInnerAllocatedObject::cast(object)->base_object();
+ }
+ if (object == dominator &&
+ object->IsAllocate() &&
+ HAllocate::cast(object)->IsNewSpaceAllocation()) {
+ return kPointersToHereAreAlwaysInteresting;
+ }
+ return kPointersToHereMaybeInteresting;
+}
+
+
class HStoreGlobalCell V8_FINAL : public HUnaryOperation {
public:
DECLARE_INSTRUCTION_FACTORY_P3(HStoreGlobalCell, HValue*,
@@ -5709,7 +5777,7 @@ class HStoreGlobalCell V8_FINAL : public HUnaryOperation {
: HUnaryOperation(value),
cell_(Unique<PropertyCell>::CreateUninitialized(cell)),
details_(details) {
- SetGVNFlag(kChangesGlobalVars);
+ SetChangesFlag(kGlobalVars);
}
Unique<PropertyCell> cell_;
@@ -5717,52 +5785,6 @@ class HStoreGlobalCell V8_FINAL : public HUnaryOperation {
};
-class HStoreGlobalGeneric : public HTemplateInstruction<3> {
- public:
- inline static HStoreGlobalGeneric* New(Zone* zone,
- HValue* context,
- HValue* global_object,
- Handle<Object> name,
- HValue* value,
- StrictModeFlag strict_mode_flag) {
- return new(zone) HStoreGlobalGeneric(context, global_object,
- name, value, strict_mode_flag);
- }
-
- HValue* context() { return OperandAt(0); }
- HValue* global_object() { return OperandAt(1); }
- Handle<Object> name() const { return name_; }
- HValue* value() { return OperandAt(2); }
- StrictModeFlag strict_mode_flag() { return strict_mode_flag_; }
-
- virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
-
- virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
- return Representation::Tagged();
- }
-
- DECLARE_CONCRETE_INSTRUCTION(StoreGlobalGeneric)
-
- private:
- HStoreGlobalGeneric(HValue* context,
- HValue* global_object,
- Handle<Object> name,
- HValue* value,
- StrictModeFlag strict_mode_flag)
- : name_(name),
- strict_mode_flag_(strict_mode_flag) {
- SetOperandAt(0, context);
- SetOperandAt(1, global_object);
- SetOperandAt(2, value);
- set_representation(Representation::Tagged());
- SetAllSideEffects();
- }
-
- Handle<Object> name_;
- StrictModeFlag strict_mode_flag_;
-};
-
-
class HLoadContextSlot V8_FINAL : public HUnaryOperation {
public:
enum Mode {
@@ -5778,23 +5800,11 @@ class HLoadContextSlot V8_FINAL : public HUnaryOperation {
kCheckReturnUndefined
};
- HLoadContextSlot(HValue* context, Variable* var)
- : HUnaryOperation(context), slot_index_(var->index()) {
- ASSERT(var->IsContextSlot());
- switch (var->mode()) {
- case LET:
- case CONST_HARMONY:
- mode_ = kCheckDeoptimize;
- break;
- case CONST:
- mode_ = kCheckReturnUndefined;
- break;
- default:
- mode_ = kNoCheck;
- }
+ HLoadContextSlot(HValue* context, int slot_index, Mode mode)
+ : HUnaryOperation(context), slot_index_(slot_index), mode_(mode) {
set_representation(Representation::Tagged());
SetFlag(kUseGVN);
- SetGVNFlag(kDependsOnContextSlots);
+ SetDependsOnFlag(kContextSlots);
}
int slot_index() const { return slot_index_; }
@@ -5878,7 +5888,7 @@ class HStoreContextSlot V8_FINAL : public HTemplateInstruction<2> {
: slot_index_(slot_index), mode_(mode) {
SetOperandAt(0, context);
SetOperandAt(1, value);
- SetGVNFlag(kChangesContextSlots);
+ SetChangesFlag(kContextSlots);
}
int slot_index_;
@@ -5902,6 +5912,10 @@ class HObjectAccess V8_FINAL {
return portion() == kStringLengths;
}
+ inline bool IsMap() const {
+ return portion() == kMaps;
+ }
+
inline int offset() const {
return OffsetField::decode(value_);
}
@@ -5914,8 +5928,19 @@ class HObjectAccess V8_FINAL {
return name_;
}
+ inline bool immutable() const {
+ return ImmutableField::decode(value_);
+ }
+
+ // Returns true if access is being made to an in-object property that
+ // was already added to the object.
+ inline bool existing_inobject_property() const {
+ return ExistingInobjectPropertyField::decode(value_);
+ }
+
inline HObjectAccess WithRepresentation(Representation representation) {
- return HObjectAccess(portion(), offset(), representation, name());
+ return HObjectAccess(portion(), offset(), representation, name(),
+ immutable(), existing_inobject_property());
}
static HObjectAccess ForHeapNumberValue() {
@@ -5951,25 +5976,22 @@ class HObjectAccess V8_FINAL {
return HObjectAccess(
kArrayLengths,
JSArray::kLengthOffset,
- IsFastElementsKind(elements_kind) &&
- FLAG_track_fields
- ? Representation::Smi() : Representation::Tagged());
+ IsFastElementsKind(elements_kind)
+ ? Representation::Smi() : Representation::Tagged());
}
- static HObjectAccess ForAllocationSiteOffset(int offset) {
- ASSERT(offset >= HeapObject::kHeaderSize && offset < AllocationSite::kSize);
- return HObjectAccess(kInobject, offset);
- }
+ static HObjectAccess ForAllocationSiteOffset(int offset);
static HObjectAccess ForAllocationSiteList() {
- return HObjectAccess(kExternalMemory, 0, Representation::Tagged());
+ return HObjectAccess(kExternalMemory, 0, Representation::Tagged(),
+ Handle<String>::null(), false, false);
}
static HObjectAccess ForFixedArrayLength() {
return HObjectAccess(
kArrayLengths,
FixedArray::kLengthOffset,
- FLAG_track_fields ? Representation::Smi() : Representation::Tagged());
+ Representation::Smi());
}
static HObjectAccess ForStringHashField() {
@@ -5983,7 +6005,7 @@ class HObjectAccess V8_FINAL {
return HObjectAccess(
kStringLengths,
String::kLengthOffset,
- FLAG_track_fields ? Representation::Smi() : Representation::Tagged());
+ Representation::Smi());
}
static HObjectAccess ForConsStringFirst() {
@@ -6014,14 +6036,6 @@ class HObjectAccess V8_FINAL {
return HObjectAccess(kInobject, SharedFunctionInfo::kCodeOffset);
}
- static HObjectAccess ForFirstCodeSlot() {
- return HObjectAccess(kInobject, SharedFunctionInfo::kFirstCodeSlot);
- }
-
- static HObjectAccess ForFirstContextSlot() {
- return HObjectAccess(kInobject, SharedFunctionInfo::kFirstContextSlot);
- }
-
static HObjectAccess ForOptimizedCodeMap() {
return HObjectAccess(kInobject,
SharedFunctionInfo::kOptimizedCodeMapOffset);
@@ -6035,9 +6049,14 @@ class HObjectAccess V8_FINAL {
return HObjectAccess(kMaps, JSObject::kMapOffset);
}
- static HObjectAccess ForMapInstanceSize() {
+ static HObjectAccess ForMapAsInteger32() {
+ return HObjectAccess(kMaps, JSObject::kMapOffset,
+ Representation::Integer32());
+ }
+
+ static HObjectAccess ForMapInObjectProperties() {
return HObjectAccess(kInobject,
- Map::kInstanceSizeOffset,
+ Map::kInObjectPropertiesOffset,
Representation::UInteger8());
}
@@ -6047,6 +6066,38 @@ class HObjectAccess V8_FINAL {
Representation::UInteger8());
}
+ static HObjectAccess ForMapInstanceSize() {
+ return HObjectAccess(kInobject,
+ Map::kInstanceSizeOffset,
+ Representation::UInteger8());
+ }
+
+ static HObjectAccess ForMapBitField() {
+ return HObjectAccess(kInobject,
+ Map::kBitFieldOffset,
+ Representation::UInteger8());
+ }
+
+ static HObjectAccess ForMapBitField2() {
+ return HObjectAccess(kInobject,
+ Map::kBitField2Offset,
+ Representation::UInteger8());
+ }
+
+ static HObjectAccess ForNameHashField() {
+ return HObjectAccess(kInobject,
+ Name::kHashFieldOffset,
+ Representation::Integer32());
+ }
+
+ static HObjectAccess ForMapInstanceTypeAndBitField() {
+ STATIC_ASSERT((Map::kInstanceTypeOffset & 1) == 0);
+ STATIC_ASSERT(Map::kBitFieldOffset == Map::kInstanceTypeOffset + 1);
+ return HObjectAccess(kInobject,
+ Map::kInstanceTypeOffset,
+ Representation::UInteger16());
+ }
+
static HObjectAccess ForPropertyCellValue() {
return HObjectAccess(kInobject, PropertyCell::kValueOffset);
}
@@ -6060,16 +6111,29 @@ class HObjectAccess V8_FINAL {
}
static HObjectAccess ForCounter() {
- return HObjectAccess(kExternalMemory, 0, Representation::Integer32());
+ return HObjectAccess(kExternalMemory, 0, Representation::Integer32(),
+ Handle<String>::null(), false, false);
}
// Create an access to an offset in a fixed array header.
static HObjectAccess ForFixedArrayHeader(int offset);
// Create an access to an in-object property in a JSObject.
- static HObjectAccess ForJSObjectOffset(int offset,
+ // This kind of access must be used when the object |map| is known and
+ // in-object properties are being accessed. Accesses of the in-object
+ // properties can have different semantics depending on whether corresponding
+ // property was added to the map or not.
+ static HObjectAccess ForMapAndOffset(Handle<Map> map, int offset,
Representation representation = Representation::Tagged());
+ // Create an access to an in-object property in a JSObject.
+ // This kind of access can be used for accessing object header fields or
+ // in-object properties if the map of the object is not known.
+ static HObjectAccess ForObservableJSObjectOffset(int offset,
+ Representation representation = Representation::Tagged()) {
+ return ForMapAndOffset(Handle<Map>::null(), offset, representation);
+ }
+
// Create an access to an in-object property in a JSArray.
static HObjectAccess ForJSArrayOffset(int offset);
@@ -6087,50 +6151,62 @@ class HObjectAccess V8_FINAL {
static HObjectAccess ForCellPayload(Isolate* isolate);
static HObjectAccess ForJSTypedArrayLength() {
- return HObjectAccess::ForJSObjectOffset(JSTypedArray::kLengthOffset);
+ return HObjectAccess::ForObservableJSObjectOffset(
+ JSTypedArray::kLengthOffset);
}
static HObjectAccess ForJSArrayBufferBackingStore() {
- return HObjectAccess::ForJSObjectOffset(
+ return HObjectAccess::ForObservableJSObjectOffset(
JSArrayBuffer::kBackingStoreOffset, Representation::External());
}
+ static HObjectAccess ForJSArrayBufferByteLength() {
+ return HObjectAccess::ForObservableJSObjectOffset(
+ JSArrayBuffer::kByteLengthOffset, Representation::Tagged());
+ }
+
static HObjectAccess ForExternalArrayExternalPointer() {
- return HObjectAccess::ForJSObjectOffset(
+ return HObjectAccess::ForObservableJSObjectOffset(
ExternalArray::kExternalPointerOffset, Representation::External());
}
static HObjectAccess ForJSArrayBufferViewWeakNext() {
- return HObjectAccess::ForJSObjectOffset(JSArrayBufferView::kWeakNextOffset);
+ return HObjectAccess::ForObservableJSObjectOffset(
+ JSArrayBufferView::kWeakNextOffset);
}
static HObjectAccess ForJSArrayBufferWeakFirstView() {
- return HObjectAccess::ForJSObjectOffset(
+ return HObjectAccess::ForObservableJSObjectOffset(
JSArrayBuffer::kWeakFirstViewOffset);
}
static HObjectAccess ForJSArrayBufferViewBuffer() {
- return HObjectAccess::ForJSObjectOffset(JSArrayBufferView::kBufferOffset);
+ return HObjectAccess::ForObservableJSObjectOffset(
+ JSArrayBufferView::kBufferOffset);
}
static HObjectAccess ForJSArrayBufferViewByteOffset() {
- return HObjectAccess::ForJSObjectOffset(
+ return HObjectAccess::ForObservableJSObjectOffset(
JSArrayBufferView::kByteOffsetOffset);
}
static HObjectAccess ForJSArrayBufferViewByteLength() {
- return HObjectAccess::ForJSObjectOffset(
+ return HObjectAccess::ForObservableJSObjectOffset(
JSArrayBufferView::kByteLengthOffset);
}
- void PrintTo(StringStream* stream);
+ static HObjectAccess ForGlobalObjectNativeContext() {
+ return HObjectAccess(kInobject, GlobalObject::kNativeContextOffset);
+ }
+
+ void PrintTo(StringStream* stream) const;
inline bool Equals(HObjectAccess that) const {
return value_ == that.value_; // portion and offset must match
}
protected:
- void SetGVNFlags(HValue *instr, bool is_store);
+ void SetGVNFlags(HValue *instr, PropertyAccessType access_type);
private:
// internal use only; different parts of an object or array
@@ -6145,28 +6221,41 @@ class HObjectAccess V8_FINAL {
kExternalMemory // some field in external memory
};
+ HObjectAccess() : value_(0) {}
+
HObjectAccess(Portion portion, int offset,
Representation representation = Representation::Tagged(),
- Handle<String> name = Handle<String>::null())
+ Handle<String> name = Handle<String>::null(),
+ bool immutable = false,
+ bool existing_inobject_property = true)
: value_(PortionField::encode(portion) |
RepresentationField::encode(representation.kind()) |
+ ImmutableField::encode(immutable ? 1 : 0) |
+ ExistingInobjectPropertyField::encode(
+ existing_inobject_property ? 1 : 0) |
OffsetField::encode(offset)),
name_(name) {
// assert that the fields decode correctly
ASSERT(this->offset() == offset);
ASSERT(this->portion() == portion);
+ ASSERT(this->immutable() == immutable);
+ ASSERT(this->existing_inobject_property() == existing_inobject_property);
ASSERT(RepresentationField::decode(value_) == representation.kind());
+ ASSERT(!this->existing_inobject_property() || IsInobject());
}
class PortionField : public BitField<Portion, 0, 3> {};
class RepresentationField : public BitField<Representation::Kind, 3, 4> {};
- class OffsetField : public BitField<int, 7, 25> {};
+ class ImmutableField : public BitField<bool, 7, 1> {};
+ class ExistingInobjectPropertyField : public BitField<bool, 8, 1> {};
+ class OffsetField : public BitField<int, 9, 23> {};
- uint32_t value_; // encodes portion, representation, and offset
+ uint32_t value_; // encodes portion, representation, immutable, and offset
Handle<String> name_;
friend class HLoadNamedField;
friend class HStoreNamedField;
+ friend class SideEffectsTracker;
inline Portion portion() const {
return PortionField::decode(value_);
@@ -6174,17 +6263,26 @@ class HObjectAccess V8_FINAL {
};
-class HLoadNamedField V8_FINAL : public HTemplateInstruction<1> {
+class HLoadNamedField V8_FINAL : public HTemplateInstruction<2> {
public:
- DECLARE_INSTRUCTION_FACTORY_P2(HLoadNamedField, HValue*, HObjectAccess);
+ DECLARE_INSTRUCTION_FACTORY_P3(HLoadNamedField, HValue*,
+ HValue*, HObjectAccess);
+ DECLARE_INSTRUCTION_FACTORY_P5(HLoadNamedField, HValue*, HValue*,
+ HObjectAccess, const UniqueSet<Map>*, HType);
HValue* object() { return OperandAt(0); }
- bool HasTypeCheck() { return object()->IsCheckMaps(); }
+ HValue* dependency() {
+ ASSERT(HasDependency());
+ return OperandAt(1);
+ }
+ bool HasDependency() const { return OperandAt(0) != OperandAt(1); }
HObjectAccess access() const { return access_; }
Representation field_representation() const {
return access_.representation();
}
+ const UniqueSet<Map>* maps() const { return maps_; }
+
virtual bool HasEscapingOperandAt(int index) V8_OVERRIDE { return false; }
virtual bool HasOutOfBoundsAccess(int size) V8_OVERRIDE {
return !access().IsInobject() || access().offset() >= size;
@@ -6199,18 +6297,37 @@ class HLoadNamedField V8_FINAL : public HTemplateInstruction<1> {
virtual Range* InferRange(Zone* zone) V8_OVERRIDE;
virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+ bool CanBeReplacedWith(HValue* other) const {
+ if (!CheckFlag(HValue::kCantBeReplaced)) return false;
+ if (!type().Equals(other->type())) return false;
+ if (!representation().Equals(other->representation())) return false;
+ if (!other->IsLoadNamedField()) return true;
+ HLoadNamedField* that = HLoadNamedField::cast(other);
+ if (this->maps_ == that->maps_) return true;
+ if (this->maps_ == NULL || that->maps_ == NULL) return false;
+ return this->maps_->IsSubset(that->maps_);
+ }
+
DECLARE_CONCRETE_INSTRUCTION(LoadNamedField)
protected:
virtual bool DataEquals(HValue* other) V8_OVERRIDE {
- HLoadNamedField* b = HLoadNamedField::cast(other);
- return access_.Equals(b->access_);
+ HLoadNamedField* that = HLoadNamedField::cast(other);
+ if (!this->access_.Equals(that->access_)) return false;
+ if (this->maps_ == that->maps_) return true;
+ return (this->maps_ != NULL &&
+ that->maps_ != NULL &&
+ this->maps_->Equals(that->maps_));
}
private:
- HLoadNamedField(HValue* object, HObjectAccess access) : access_(access) {
- ASSERT(object != NULL);
+ HLoadNamedField(HValue* object,
+ HValue* dependency,
+ HObjectAccess access)
+ : access_(access), maps_(NULL) {
+ ASSERT_NOT_NULL(object);
SetOperandAt(0, object);
+ SetOperandAt(1, dependency ? dependency : object);
Representation representation = access.representation();
if (representation.IsInteger8() ||
@@ -6220,24 +6337,48 @@ class HLoadNamedField V8_FINAL : public HTemplateInstruction<1> {
set_representation(Representation::Integer32());
} else if (representation.IsSmi()) {
set_type(HType::Smi());
- set_representation(representation);
+ if (SmiValuesAre32Bits()) {
+ set_representation(Representation::Integer32());
+ } else {
+ set_representation(representation);
+ }
} else if (representation.IsDouble() ||
representation.IsExternal() ||
representation.IsInteger32()) {
set_representation(representation);
- } else if (FLAG_track_heap_object_fields &&
- representation.IsHeapObject()) {
- set_type(HType::NonPrimitive());
+ } else if (representation.IsHeapObject()) {
+ set_type(HType::HeapObject());
set_representation(Representation::Tagged());
} else {
set_representation(Representation::Tagged());
}
- access.SetGVNFlags(this, false);
+ access.SetGVNFlags(this, LOAD);
+ }
+
+ HLoadNamedField(HValue* object,
+ HValue* dependency,
+ HObjectAccess access,
+ const UniqueSet<Map>* maps,
+ HType type)
+ : HTemplateInstruction<2>(type), access_(access), maps_(maps) {
+ ASSERT_NOT_NULL(maps);
+ ASSERT_NE(0, maps->size());
+
+ ASSERT_NOT_NULL(object);
+ SetOperandAt(0, object);
+ SetOperandAt(1, dependency ? dependency : object);
+
+ ASSERT(access.representation().IsHeapObject());
+ ASSERT(type.IsHeapObject());
+ set_representation(Representation::Tagged());
+
+ access.SetGVNFlags(this, LOAD);
}
virtual bool IsDeletable() const V8_OVERRIDE { return true; }
HObjectAccess access_;
+ const UniqueSet<Map>* maps_;
};
@@ -6291,7 +6432,7 @@ class HLoadFunctionPrototype V8_FINAL : public HUnaryOperation {
: HUnaryOperation(function) {
set_representation(Representation::Tagged());
SetFlag(kUseGVN);
- SetGVNFlag(kDependsOnCalls);
+ SetDependsOnFlag(kCalls);
}
};
@@ -6299,11 +6440,12 @@ class ArrayInstructionInterface {
public:
virtual HValue* GetKey() = 0;
virtual void SetKey(HValue* key) = 0;
- virtual void SetIndexOffset(uint32_t index_offset) = 0;
- virtual int MaxIndexOffsetBits() = 0;
+ virtual ElementsKind elements_kind() const = 0;
+ virtual void IncreaseBaseOffset(uint32_t base_offset) = 0;
+ virtual int MaxBaseOffsetBits() = 0;
virtual bool IsDehoisted() = 0;
virtual void SetDehoisted(bool is_dehoisted) = 0;
- virtual ~ArrayInstructionInterface() { };
+ virtual ~ArrayInstructionInterface() { }
static Representation KeyedAccessIndexRequirement(Representation r) {
return r.IsInteger32() || SmiValuesAre32Bits()
@@ -6312,6 +6454,8 @@ class ArrayInstructionInterface {
};
+static const int kDefaultKeyedHeaderOffsetSentinel = -1;
+
enum LoadKeyedHoleMode {
NEVER_RETURN_HOLE,
ALLOW_RETURN_HOLE
@@ -6325,10 +6469,18 @@ class HLoadKeyed V8_FINAL
ElementsKind);
DECLARE_INSTRUCTION_FACTORY_P5(HLoadKeyed, HValue*, HValue*, HValue*,
ElementsKind, LoadKeyedHoleMode);
+ DECLARE_INSTRUCTION_FACTORY_P6(HLoadKeyed, HValue*, HValue*, HValue*,
+ ElementsKind, LoadKeyedHoleMode, int);
bool is_external() const {
return IsExternalArrayElementsKind(elements_kind());
}
+ bool is_fixed_typed_array() const {
+ return IsFixedTypedArrayElementsKind(elements_kind());
+ }
+ bool is_typed_elements() const {
+ return is_external() || is_fixed_typed_array();
+ }
HValue* elements() { return OperandAt(0); }
HValue* key() { return OperandAt(1); }
HValue* dependency() {
@@ -6336,12 +6488,17 @@ class HLoadKeyed V8_FINAL
return OperandAt(2);
}
bool HasDependency() const { return OperandAt(0) != OperandAt(2); }
- uint32_t index_offset() { return IndexOffsetField::decode(bit_field_); }
- void SetIndexOffset(uint32_t index_offset) {
- bit_field_ = IndexOffsetField::update(bit_field_, index_offset);
- }
- virtual int MaxIndexOffsetBits() {
- return kBitsForIndexOffset;
+ uint32_t base_offset() { return BaseOffsetField::decode(bit_field_); }
+ void IncreaseBaseOffset(uint32_t base_offset) {
+ // The base offset is usually simply the size of the array header, except
+ // with dehoisting adds an addition offset due to a array index key
+ // manipulation, in which case it becomes (array header size +
+ // constant-offset-from-key * kPointerSize)
+ base_offset += BaseOffsetField::decode(bit_field_);
+ bit_field_ = BaseOffsetField::update(bit_field_, base_offset);
+ }
+ virtual int MaxBaseOffsetBits() {
+ return kBitsForBaseOffset;
}
HValue* GetKey() { return key(); }
void SetKey(HValue* key) { SetOperandAt(1, key); }
@@ -6349,7 +6506,7 @@ class HLoadKeyed V8_FINAL
void SetDehoisted(bool is_dehoisted) {
bit_field_ = IsDehoistedField::update(bit_field_, is_dehoisted);
}
- ElementsKind elements_kind() const {
+ virtual ElementsKind elements_kind() const V8_OVERRIDE {
return ElementsKindField::decode(bit_field_);
}
LoadKeyedHoleMode hole_mode() const {
@@ -6357,9 +6514,10 @@ class HLoadKeyed V8_FINAL
}
virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
- // kind_fast: tagged[int32] (none)
- // kind_double: tagged[int32] (none)
- // kind_external: external[int32] (none)
+ // kind_fast: tagged[int32] (none)
+ // kind_double: tagged[int32] (none)
+ // kind_fixed_typed_array: tagged[int32] (none)
+ // kind_external: external[int32] (none)
if (index == 0) {
return is_external() ? Representation::External()
: Representation::Tagged();
@@ -6390,7 +6548,7 @@ class HLoadKeyed V8_FINAL
if (!other->IsLoadKeyed()) return false;
HLoadKeyed* other_load = HLoadKeyed::cast(other);
- if (IsDehoisted() && index_offset() != other_load->index_offset())
+ if (IsDehoisted() && base_offset() != other_load->base_offset())
return false;
return elements_kind() == other_load->elements_kind();
}
@@ -6400,16 +6558,21 @@ class HLoadKeyed V8_FINAL
HValue* key,
HValue* dependency,
ElementsKind elements_kind,
- LoadKeyedHoleMode mode = NEVER_RETURN_HOLE)
+ LoadKeyedHoleMode mode = NEVER_RETURN_HOLE,
+ int offset = kDefaultKeyedHeaderOffsetSentinel)
: bit_field_(0) {
+ offset = offset == kDefaultKeyedHeaderOffsetSentinel
+ ? GetDefaultHeaderSizeForElementsKind(elements_kind)
+ : offset;
bit_field_ = ElementsKindField::encode(elements_kind) |
- HoleModeField::encode(mode);
+ HoleModeField::encode(mode) |
+ BaseOffsetField::encode(offset);
SetOperandAt(0, obj);
SetOperandAt(1, key);
SetOperandAt(2, dependency != NULL ? dependency : obj);
- if (!is_external()) {
+ if (!is_typed_elements()) {
// I can detect the case between storing double (holey and fast) and
// smi/object by looking at elements_kind_.
ASSERT(IsFastSmiOrObjectElementsKind(elements_kind) ||
@@ -6420,27 +6583,39 @@ class HLoadKeyed V8_FINAL
(!IsHoleyElementsKind(elements_kind) ||
mode == NEVER_RETURN_HOLE)) {
set_type(HType::Smi());
- set_representation(Representation::Smi());
+ if (SmiValuesAre32Bits() && !RequiresHoleCheck()) {
+ set_representation(Representation::Integer32());
+ } else {
+ set_representation(Representation::Smi());
+ }
} else {
set_representation(Representation::Tagged());
}
- SetGVNFlag(kDependsOnArrayElements);
+ SetDependsOnFlag(kArrayElements);
} else {
set_representation(Representation::Double());
- SetGVNFlag(kDependsOnDoubleArrayElements);
+ SetDependsOnFlag(kDoubleArrayElements);
}
} else {
- if (elements_kind == EXTERNAL_FLOAT_ELEMENTS ||
- elements_kind == EXTERNAL_DOUBLE_ELEMENTS) {
+ if (elements_kind == EXTERNAL_FLOAT32_ELEMENTS ||
+ elements_kind == EXTERNAL_FLOAT64_ELEMENTS ||
+ elements_kind == FLOAT32_ELEMENTS ||
+ elements_kind == FLOAT64_ELEMENTS) {
set_representation(Representation::Double());
} else {
set_representation(Representation::Integer32());
}
- SetGVNFlag(kDependsOnExternalMemory);
+ if (is_external()) {
+ SetDependsOnFlag(kExternalMemory);
+ } else if (is_fixed_typed_array()) {
+ SetDependsOnFlag(kTypedArrayElements);
+ } else {
+ UNREACHABLE();
+ }
// Native code could change the specialized array.
- SetGVNFlag(kDependsOnCalls);
+ SetDependsOnFlag(kCalls);
}
SetFlag(kUseGVN);
@@ -6454,16 +6629,16 @@ class HLoadKeyed V8_FINAL
enum LoadKeyedBits {
kBitsForElementsKind = 5,
kBitsForHoleMode = 1,
- kBitsForIndexOffset = 25,
+ kBitsForBaseOffset = 25,
kBitsForIsDehoisted = 1,
kStartElementsKind = 0,
kStartHoleMode = kStartElementsKind + kBitsForElementsKind,
- kStartIndexOffset = kStartHoleMode + kBitsForHoleMode,
- kStartIsDehoisted = kStartIndexOffset + kBitsForIndexOffset
+ kStartBaseOffset = kStartHoleMode + kBitsForHoleMode,
+ kStartIsDehoisted = kStartBaseOffset + kBitsForBaseOffset
};
- STATIC_ASSERT((kBitsForElementsKind + kBitsForIndexOffset +
+ STATIC_ASSERT((kBitsForElementsKind + kBitsForBaseOffset +
kBitsForIsDehoisted) <= sizeof(uint32_t)*8);
STATIC_ASSERT(kElementsKindCount <= (1 << kBitsForElementsKind));
class ElementsKindField:
@@ -6472,8 +6647,8 @@ class HLoadKeyed V8_FINAL
class HoleModeField:
public BitField<LoadKeyedHoleMode, kStartHoleMode, kBitsForHoleMode>
{}; // NOLINT
- class IndexOffsetField:
- public BitField<uint32_t, kStartIndexOffset, kBitsForIndexOffset>
+ class BaseOffsetField:
+ public BitField<uint32_t, kStartBaseOffset, kBitsForBaseOffset>
{}; // NOLINT
class IsDehoistedField:
public BitField<bool, kStartIsDehoisted, kBitsForIsDehoisted>
@@ -6512,10 +6687,23 @@ class HLoadKeyedGeneric V8_FINAL : public HTemplateInstruction<3> {
};
+// Indicates whether the store is a store to an entry that was previously
+// initialized or not.
+enum StoreFieldOrKeyedMode {
+ // The entry could be either previously initialized or not.
+ INITIALIZING_STORE,
+ // At the time of this store it is guaranteed that the entry is already
+ // initialized.
+ STORE_TO_INITIALIZED_ENTRY
+};
+
+
class HStoreNamedField V8_FINAL : public HTemplateInstruction<3> {
public:
DECLARE_INSTRUCTION_FACTORY_P3(HStoreNamedField, HValue*,
HObjectAccess, HValue*);
+ DECLARE_INSTRUCTION_FACTORY_P4(HStoreNamedField, HValue*,
+ HObjectAccess, HValue*, StoreFieldOrKeyedMode);
DECLARE_CONCRETE_INSTRUCTION(StoreNamedField)
@@ -6536,8 +6724,12 @@ class HStoreNamedField V8_FINAL : public HTemplateInstruction<3> {
field_representation().IsUInteger16() ||
field_representation().IsInteger32()) {
return Representation::Integer32();
- } else if (field_representation().IsDouble() ||
- field_representation().IsSmi()) {
+ } else if (field_representation().IsDouble()) {
+ return field_representation();
+ } else if (field_representation().IsSmi()) {
+ if (SmiValuesAre32Bits() && store_mode_ == STORE_TO_INITIALIZED_ENTRY) {
+ return Representation::Integer32();
+ }
return field_representation();
} else if (field_representation().IsExternal()) {
return Representation::External();
@@ -6545,25 +6737,23 @@ class HStoreNamedField V8_FINAL : public HTemplateInstruction<3> {
}
return Representation::Tagged();
}
- virtual void HandleSideEffectDominator(GVNFlag side_effect,
+ virtual bool HandleSideEffectDominator(GVNFlag side_effect,
HValue* dominator) V8_OVERRIDE {
- ASSERT(side_effect == kChangesNewSpacePromotion);
- new_space_dominator_ = dominator;
+ ASSERT(side_effect == kNewSpacePromotion);
+ if (!FLAG_use_write_barrier_elimination) return false;
+ dominator_ = dominator;
+ return false;
}
virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
- void SkipWriteBarrier() { write_barrier_mode_ = SKIP_WRITE_BARRIER; }
- bool IsSkipWriteBarrier() const {
- return write_barrier_mode_ == SKIP_WRITE_BARRIER;
- }
-
HValue* object() const { return OperandAt(0); }
HValue* value() const { return OperandAt(1); }
HValue* transition() const { return OperandAt(2); }
HObjectAccess access() const { return access_; }
- HValue* new_space_dominator() const { return new_space_dominator_; }
+ HValue* dominator() const { return dominator_; }
bool has_transition() const { return has_transition_; }
+ StoreFieldOrKeyedMode store_mode() const { return store_mode_; }
Handle<Map> transition_map() const {
if (has_transition()) {
@@ -6574,31 +6764,36 @@ class HStoreNamedField V8_FINAL : public HTemplateInstruction<3> {
}
}
- void SetTransition(HConstant* map_constant, CompilationInfo* info) {
+ void SetTransition(HConstant* transition) {
ASSERT(!has_transition()); // Only set once.
- Handle<Map> map = Handle<Map>::cast(map_constant->handle(info->isolate()));
- if (map->CanBeDeprecated()) {
- map->AddDependentCompilationInfo(DependentCode::kTransitionGroup, info);
- }
- SetOperandAt(2, map_constant);
+ SetOperandAt(2, transition);
has_transition_ = true;
+ SetChangesFlag(kMaps);
}
bool NeedsWriteBarrier() {
- ASSERT(!(FLAG_track_double_fields && field_representation().IsDouble()) ||
- !has_transition());
- if (IsSkipWriteBarrier()) return false;
+ ASSERT(!field_representation().IsDouble() || !has_transition());
if (field_representation().IsDouble()) return false;
if (field_representation().IsSmi()) return false;
if (field_representation().IsInteger32()) return false;
if (field_representation().IsExternal()) return false;
return StoringValueNeedsWriteBarrier(value()) &&
- ReceiverObjectNeedsWriteBarrier(object(), new_space_dominator());
+ ReceiverObjectNeedsWriteBarrier(object(), value(), dominator());
}
bool NeedsWriteBarrierForMap() {
- if (IsSkipWriteBarrier()) return false;
- return ReceiverObjectNeedsWriteBarrier(object(), new_space_dominator());
+ return ReceiverObjectNeedsWriteBarrier(object(), transition(),
+ dominator());
+ }
+
+ SmiCheck SmiCheckForWriteBarrier() const {
+ if (field_representation().IsHeapObject()) return OMIT_SMI_CHECK;
+ if (value()->type().IsHeapObject()) return OMIT_SMI_CHECK;
+ return INLINE_SMI_CHECK;
+ }
+
+ PointersToHereCheck PointersToHereCheckForValue() const {
+ return PointersToHereCheckForObject(value(), dominator());
}
Representation field_representation() const {
@@ -6609,24 +6804,42 @@ class HStoreNamedField V8_FINAL : public HTemplateInstruction<3> {
SetOperandAt(1, value);
}
+ bool CanBeReplacedWith(HStoreNamedField* that) const {
+ if (!this->access().Equals(that->access())) return false;
+ if (SmiValuesAre32Bits() &&
+ this->field_representation().IsSmi() &&
+ this->store_mode() == INITIALIZING_STORE &&
+ that->store_mode() == STORE_TO_INITIALIZED_ENTRY) {
+ // We cannot replace an initializing store to a smi field with a store to
+ // an initialized entry on 64-bit architectures (with 32-bit smis).
+ return false;
+ }
+ return true;
+ }
+
private:
HStoreNamedField(HValue* obj,
HObjectAccess access,
- HValue* val)
+ HValue* val,
+ StoreFieldOrKeyedMode store_mode = INITIALIZING_STORE)
: access_(access),
- new_space_dominator_(NULL),
- write_barrier_mode_(UPDATE_WRITE_BARRIER),
- has_transition_(false) {
+ dominator_(NULL),
+ has_transition_(false),
+ store_mode_(store_mode) {
+ // Stores to a non existing in-object property are allowed only to the
+ // newly allocated objects (via HAllocate or HInnerAllocatedObject).
+ ASSERT(!access.IsInobject() || access.existing_inobject_property() ||
+ obj->IsAllocate() || obj->IsInnerAllocatedObject());
SetOperandAt(0, obj);
SetOperandAt(1, val);
SetOperandAt(2, obj);
- access.SetGVNFlags(this, true);
+ access.SetGVNFlags(this, STORE);
}
HObjectAccess access_;
- HValue* new_space_dominator_;
- WriteBarrierMode write_barrier_mode_ : 1;
+ HValue* dominator_;
bool has_transition_ : 1;
+ StoreFieldOrKeyedMode store_mode_ : 1;
};
@@ -6634,12 +6847,12 @@ class HStoreNamedGeneric V8_FINAL : public HTemplateInstruction<3> {
public:
DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P4(HStoreNamedGeneric, HValue*,
Handle<String>, HValue*,
- StrictModeFlag);
+ StrictMode);
HValue* object() { return OperandAt(0); }
HValue* value() { return OperandAt(1); }
HValue* context() { return OperandAt(2); }
Handle<String> name() { return name_; }
- StrictModeFlag strict_mode_flag() { return strict_mode_flag_; }
+ StrictMode strict_mode() { return strict_mode_; }
virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
@@ -6654,9 +6867,9 @@ class HStoreNamedGeneric V8_FINAL : public HTemplateInstruction<3> {
HValue* object,
Handle<String> name,
HValue* value,
- StrictModeFlag strict_mode_flag)
+ StrictMode strict_mode)
: name_(name),
- strict_mode_flag_(strict_mode_flag) {
+ strict_mode_(strict_mode) {
SetOperandAt(0, object);
SetOperandAt(1, value);
SetOperandAt(2, context);
@@ -6664,7 +6877,7 @@ class HStoreNamedGeneric V8_FINAL : public HTemplateInstruction<3> {
}
Handle<String> name_;
- StrictModeFlag strict_mode_flag_;
+ StrictMode strict_mode_;
};
@@ -6673,12 +6886,17 @@ class HStoreKeyed V8_FINAL
public:
DECLARE_INSTRUCTION_FACTORY_P4(HStoreKeyed, HValue*, HValue*, HValue*,
ElementsKind);
+ DECLARE_INSTRUCTION_FACTORY_P5(HStoreKeyed, HValue*, HValue*, HValue*,
+ ElementsKind, StoreFieldOrKeyedMode);
+ DECLARE_INSTRUCTION_FACTORY_P6(HStoreKeyed, HValue*, HValue*, HValue*,
+ ElementsKind, StoreFieldOrKeyedMode, int);
virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
- // kind_fast: tagged[int32] = tagged
- // kind_double: tagged[int32] = double
- // kind_smi : tagged[int32] = smi
- // kind_external: external[int32] = (double | int32)
+ // kind_fast: tagged[int32] = tagged
+ // kind_double: tagged[int32] = double
+ // kind_smi : tagged[int32] = smi
+ // kind_fixed_typed_array: tagged[int32] = (double | int32)
+ // kind_external: external[int32] = (double | int32)
if (index == 0) {
return is_external() ? Representation::External()
: Representation::Tagged();
@@ -6688,50 +6906,70 @@ class HStoreKeyed V8_FINAL
}
ASSERT_EQ(index, 2);
- if (IsDoubleOrFloatElementsKind(elements_kind())) {
+ return RequiredValueRepresentation(elements_kind_, store_mode_);
+ }
+
+ static Representation RequiredValueRepresentation(
+ ElementsKind kind, StoreFieldOrKeyedMode mode) {
+ if (IsDoubleOrFloatElementsKind(kind)) {
return Representation::Double();
}
- if (IsFastSmiElementsKind(elements_kind())) {
+ if (kind == FAST_SMI_ELEMENTS && SmiValuesAre32Bits() &&
+ mode == STORE_TO_INITIALIZED_ENTRY) {
+ return Representation::Integer32();
+ }
+
+ if (IsFastSmiElementsKind(kind)) {
return Representation::Smi();
}
- return is_external() ? Representation::Integer32()
- : Representation::Tagged();
+ return IsExternalArrayElementsKind(kind) ||
+ IsFixedTypedArrayElementsKind(kind)
+ ? Representation::Integer32()
+ : Representation::Tagged();
}
bool is_external() const {
return IsExternalArrayElementsKind(elements_kind());
}
+ bool is_fixed_typed_array() const {
+ return IsFixedTypedArrayElementsKind(elements_kind());
+ }
+
+ bool is_typed_elements() const {
+ return is_external() || is_fixed_typed_array();
+ }
+
virtual Representation observed_input_representation(int index) V8_OVERRIDE {
if (index < 2) return RequiredInputRepresentation(index);
if (IsUninitialized()) {
return Representation::None();
}
- if (IsFastSmiElementsKind(elements_kind())) {
- return Representation::Smi();
- }
- if (IsDoubleOrFloatElementsKind(elements_kind())) {
- return Representation::Double();
- }
- if (is_external()) {
- return Representation::Integer32();
- }
+ Representation r = RequiredValueRepresentation(elements_kind_, store_mode_);
// For fast object elements kinds, don't assume anything.
- return Representation::None();
+ if (r.IsTagged()) return Representation::None();
+ return r;
}
- HValue* elements() { return OperandAt(0); }
- HValue* key() { return OperandAt(1); }
- HValue* value() { return OperandAt(2); }
+ HValue* elements() const { return OperandAt(0); }
+ HValue* key() const { return OperandAt(1); }
+ HValue* value() const { return OperandAt(2); }
bool value_is_smi() const {
return IsFastSmiElementsKind(elements_kind_);
}
+ StoreFieldOrKeyedMode store_mode() const { return store_mode_; }
ElementsKind elements_kind() const { return elements_kind_; }
- uint32_t index_offset() { return index_offset_; }
- void SetIndexOffset(uint32_t index_offset) { index_offset_ = index_offset; }
- virtual int MaxIndexOffsetBits() {
+ uint32_t base_offset() { return base_offset_; }
+ void IncreaseBaseOffset(uint32_t base_offset) {
+ // The base offset is usually simply the size of the array header, except
+ // with dehoisting adds an addition offset due to a array index key
+ // manipulation, in which case it becomes (array header size +
+ // constant-offset-from-key * kPointerSize)
+ base_offset_ += base_offset;
+ }
+ virtual int MaxBaseOffsetBits() {
return 31 - ElementsKindToShiftSize(elements_kind_);
}
HValue* GetKey() { return key(); }
@@ -6747,23 +6985,28 @@ class HStoreKeyed V8_FINAL
return value()->IsConstant() && HConstant::cast(value())->IsTheHole();
}
- virtual void HandleSideEffectDominator(GVNFlag side_effect,
+ virtual bool HandleSideEffectDominator(GVNFlag side_effect,
HValue* dominator) V8_OVERRIDE {
- ASSERT(side_effect == kChangesNewSpacePromotion);
- new_space_dominator_ = dominator;
+ ASSERT(side_effect == kNewSpacePromotion);
+ dominator_ = dominator;
+ return false;
}
- HValue* new_space_dominator() const { return new_space_dominator_; }
+ HValue* dominator() const { return dominator_; }
bool NeedsWriteBarrier() {
if (value_is_smi()) {
return false;
} else {
return StoringValueNeedsWriteBarrier(value()) &&
- ReceiverObjectNeedsWriteBarrier(elements(), new_space_dominator());
+ ReceiverObjectNeedsWriteBarrier(elements(), value(), dominator());
}
}
+ PointersToHereCheck PointersToHereCheckForValue() const {
+ return PointersToHereCheckForObject(value(), dominator());
+ }
+
bool NeedsCanonicalization();
virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
@@ -6772,56 +7015,67 @@ class HStoreKeyed V8_FINAL
private:
HStoreKeyed(HValue* obj, HValue* key, HValue* val,
- ElementsKind elements_kind)
+ ElementsKind elements_kind,
+ StoreFieldOrKeyedMode store_mode = INITIALIZING_STORE,
+ int offset = kDefaultKeyedHeaderOffsetSentinel)
: elements_kind_(elements_kind),
- index_offset_(0),
+ base_offset_(offset == kDefaultKeyedHeaderOffsetSentinel
+ ? GetDefaultHeaderSizeForElementsKind(elements_kind)
+ : offset),
is_dehoisted_(false),
is_uninitialized_(false),
- new_space_dominator_(NULL) {
+ store_mode_(store_mode),
+ dominator_(NULL) {
SetOperandAt(0, obj);
SetOperandAt(1, key);
SetOperandAt(2, val);
if (IsFastObjectElementsKind(elements_kind)) {
SetFlag(kTrackSideEffectDominators);
- SetGVNFlag(kDependsOnNewSpacePromotion);
+ SetDependsOnFlag(kNewSpacePromotion);
}
if (is_external()) {
- SetGVNFlag(kChangesExternalMemory);
+ SetChangesFlag(kExternalMemory);
SetFlag(kAllowUndefinedAsNaN);
} else if (IsFastDoubleElementsKind(elements_kind)) {
- SetGVNFlag(kChangesDoubleArrayElements);
+ SetChangesFlag(kDoubleArrayElements);
} else if (IsFastSmiElementsKind(elements_kind)) {
- SetGVNFlag(kChangesArrayElements);
+ SetChangesFlag(kArrayElements);
+ } else if (is_fixed_typed_array()) {
+ SetChangesFlag(kTypedArrayElements);
+ SetFlag(kAllowUndefinedAsNaN);
} else {
- SetGVNFlag(kChangesArrayElements);
+ SetChangesFlag(kArrayElements);
}
// EXTERNAL_{UNSIGNED_,}{BYTE,SHORT,INT}_ELEMENTS are truncating.
- if (elements_kind >= EXTERNAL_BYTE_ELEMENTS &&
- elements_kind <= EXTERNAL_UNSIGNED_INT_ELEMENTS) {
+ if ((elements_kind >= EXTERNAL_INT8_ELEMENTS &&
+ elements_kind <= EXTERNAL_UINT32_ELEMENTS) ||
+ (elements_kind >= UINT8_ELEMENTS &&
+ elements_kind <= INT32_ELEMENTS)) {
SetFlag(kTruncatingToInt32);
}
}
ElementsKind elements_kind_;
- uint32_t index_offset_;
+ uint32_t base_offset_;
bool is_dehoisted_ : 1;
bool is_uninitialized_ : 1;
- HValue* new_space_dominator_;
+ StoreFieldOrKeyedMode store_mode_: 1;
+ HValue* dominator_;
};
class HStoreKeyedGeneric V8_FINAL : public HTemplateInstruction<4> {
public:
DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P4(HStoreKeyedGeneric, HValue*,
- HValue*, HValue*, StrictModeFlag);
+ HValue*, HValue*, StrictMode);
HValue* object() { return OperandAt(0); }
HValue* key() { return OperandAt(1); }
HValue* value() { return OperandAt(2); }
HValue* context() { return OperandAt(3); }
- StrictModeFlag strict_mode_flag() { return strict_mode_flag_; }
+ StrictMode strict_mode() { return strict_mode_; }
virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
// tagged[tagged] = tagged
@@ -6837,8 +7091,8 @@ class HStoreKeyedGeneric V8_FINAL : public HTemplateInstruction<4> {
HValue* object,
HValue* key,
HValue* value,
- StrictModeFlag strict_mode_flag)
- : strict_mode_flag_(strict_mode_flag) {
+ StrictMode strict_mode)
+ : strict_mode_(strict_mode) {
SetOperandAt(0, object);
SetOperandAt(1, key);
SetOperandAt(2, value);
@@ -6846,7 +7100,7 @@ class HStoreKeyedGeneric V8_FINAL : public HTemplateInstruction<4> {
SetAllSideEffects();
}
- StrictModeFlag strict_mode_flag_;
+ StrictMode strict_mode_;
};
@@ -6883,6 +7137,8 @@ class HTransitionElementsKind V8_FINAL : public HTemplateInstruction<2> {
transitioned_map_ == instr->transitioned_map_;
}
+ virtual int RedefinedOperandIndex() { return 0; }
+
private:
HTransitionElementsKind(HValue* context,
HValue* object,
@@ -6895,10 +7151,10 @@ class HTransitionElementsKind V8_FINAL : public HTemplateInstruction<2> {
SetOperandAt(0, object);
SetOperandAt(1, context);
SetFlag(kUseGVN);
- SetGVNFlag(kChangesElementsKind);
+ SetChangesFlag(kElementsKind);
if (!IsSimpleMapChangeTransition(from_kind_, to_kind_)) {
- SetGVNFlag(kChangesElementsPointer);
- SetGVNFlag(kChangesNewSpacePromotion);
+ SetChangesFlag(kElementsPointer);
+ SetChangesFlag(kNewSpacePromotion);
}
set_representation(Representation::Tagged());
}
@@ -6916,45 +7172,55 @@ class HStringAdd V8_FINAL : public HBinaryOperation {
HValue* context,
HValue* left,
HValue* right,
- StringAddFlags flags = STRING_ADD_CHECK_NONE);
+ PretenureFlag pretenure_flag = NOT_TENURED,
+ StringAddFlags flags = STRING_ADD_CHECK_BOTH,
+ Handle<AllocationSite> allocation_site =
+ Handle<AllocationSite>::null());
StringAddFlags flags() const { return flags_; }
+ PretenureFlag pretenure_flag() const { return pretenure_flag_; }
virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
return Representation::Tagged();
}
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+
DECLARE_CONCRETE_INSTRUCTION(StringAdd)
protected:
- virtual bool DataEquals(HValue* other) V8_OVERRIDE { return true; }
+ virtual bool DataEquals(HValue* other) V8_OVERRIDE {
+ return flags_ == HStringAdd::cast(other)->flags_ &&
+ pretenure_flag_ == HStringAdd::cast(other)->pretenure_flag_;
+ }
private:
- HStringAdd(HValue* context, HValue* left, HValue* right, StringAddFlags flags)
- : HBinaryOperation(context, left, right, HType::String()), flags_(flags) {
+ HStringAdd(HValue* context,
+ HValue* left,
+ HValue* right,
+ PretenureFlag pretenure_flag,
+ StringAddFlags flags,
+ Handle<AllocationSite> allocation_site)
+ : HBinaryOperation(context, left, right, HType::String()),
+ flags_(flags), pretenure_flag_(pretenure_flag) {
set_representation(Representation::Tagged());
- if (MightHaveSideEffects()) {
- SetAllSideEffects();
- } else {
- SetFlag(kUseGVN);
- SetGVNFlag(kDependsOnMaps);
- SetGVNFlag(kChangesNewSpacePromotion);
+ SetFlag(kUseGVN);
+ SetDependsOnFlag(kMaps);
+ SetChangesFlag(kNewSpacePromotion);
+ if (FLAG_trace_pretenuring) {
+ PrintF("HStringAdd with AllocationSite %p %s\n",
+ allocation_site.is_null()
+ ? static_cast<void*>(NULL)
+ : static_cast<void*>(*allocation_site),
+ pretenure_flag == TENURED ? "tenured" : "not tenured");
}
}
- bool MightHaveSideEffects() const {
- return flags_ != STRING_ADD_CHECK_NONE &&
- (left()->ToStringCanBeObserved() || right()->ToStringCanBeObserved());
- }
-
// No side-effects except possible allocation:
- // NOTE: this instruction does not call ToString() on its inputs, when flags_
- // is set to STRING_ADD_CHECK_NONE.
- virtual bool IsDeletable() const V8_OVERRIDE {
- return !MightHaveSideEffects();
- }
+ virtual bool IsDeletable() const V8_OVERRIDE { return true; }
const StringAddFlags flags_;
+ const PretenureFlag pretenure_flag_;
};
@@ -6991,9 +7257,9 @@ class HStringCharCodeAt V8_FINAL : public HTemplateInstruction<3> {
SetOperandAt(2, index);
set_representation(Representation::Integer32());
SetFlag(kUseGVN);
- SetGVNFlag(kDependsOnMaps);
- SetGVNFlag(kDependsOnStringChars);
- SetGVNFlag(kChangesNewSpacePromotion);
+ SetDependsOnFlag(kMaps);
+ SetDependsOnFlag(kStringChars);
+ SetChangesFlag(kNewSpacePromotion);
}
// No side effects: runtime function assumes string + number inputs.
@@ -7027,7 +7293,7 @@ class HStringCharFromCode V8_FINAL : public HTemplateInstruction<2> {
SetOperandAt(1, char_code);
set_representation(Representation::Tagged());
SetFlag(kUseGVN);
- SetGVNFlag(kChangesNewSpacePromotion);
+ SetChangesFlag(kNewSpacePromotion);
}
virtual bool IsDeletable() const V8_OVERRIDE {
@@ -7122,7 +7388,7 @@ class HFunctionLiteral V8_FINAL : public HTemplateInstruction<1> {
bool pretenure() const { return pretenure_; }
bool has_no_literals() const { return has_no_literals_; }
bool is_generator() const { return is_generator_; }
- LanguageMode language_mode() const { return language_mode_; }
+ StrictMode strict_mode() const { return strict_mode_; }
private:
HFunctionLiteral(HValue* context,
@@ -7133,10 +7399,10 @@ class HFunctionLiteral V8_FINAL : public HTemplateInstruction<1> {
pretenure_(pretenure),
has_no_literals_(shared->num_literals() == 0),
is_generator_(shared->is_generator()),
- language_mode_(shared->language_mode()) {
+ strict_mode_(shared->strict_mode()) {
SetOperandAt(0, context);
set_representation(Representation::Tagged());
- SetGVNFlag(kChangesNewSpacePromotion);
+ SetChangesFlag(kNewSpacePromotion);
}
virtual bool IsDeletable() const V8_OVERRIDE { return true; }
@@ -7145,7 +7411,7 @@ class HFunctionLiteral V8_FINAL : public HTemplateInstruction<1> {
bool pretenure_ : 1;
bool has_no_literals_ : 1;
bool is_generator_ : 1;
- LanguageMode language_mode_;
+ StrictMode strict_mode_;
};
@@ -7207,7 +7473,7 @@ class HToFastProperties V8_FINAL : public HUnaryOperation {
private:
explicit HToFastProperties(HValue* value) : HUnaryOperation(value) {
set_representation(Representation::Tagged());
- SetGVNFlag(kChangesNewSpacePromotion);
+ SetChangesFlag(kNewSpacePromotion);
// This instruction is not marked as kChangesMaps, but does
// change the map of the input operand. Use it only when creating
@@ -7215,7 +7481,7 @@ class HToFastProperties V8_FINAL : public HUnaryOperation {
ASSERT(value->IsCallRuntime());
#ifdef DEBUG
const Runtime::Function* function = HCallRuntime::cast(value)->function();
- ASSERT(function->function_id == Runtime::kCreateObjectLiteral);
+ ASSERT(function->function_id == Runtime::kHiddenCreateObjectLiteral);
#endif
}
@@ -7223,25 +7489,6 @@ class HToFastProperties V8_FINAL : public HUnaryOperation {
};
-class HValueOf V8_FINAL : public HUnaryOperation {
- public:
- DECLARE_INSTRUCTION_FACTORY_P1(HValueOf, HValue*);
-
- virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
- return Representation::Tagged();
- }
-
- DECLARE_CONCRETE_INSTRUCTION(ValueOf)
-
- private:
- explicit HValueOf(HValue* value) : HUnaryOperation(value) {
- set_representation(Representation::Tagged());
- }
-
- virtual bool IsDeletable() const V8_OVERRIDE { return true; }
-};
-
-
class HDateField V8_FINAL : public HUnaryOperation {
public:
DECLARE_INSTRUCTION_FACTORY_P2(HDateField, HValue*, Smi*);
@@ -7305,7 +7552,7 @@ class HSeqStringGetChar V8_FINAL : public HTemplateInstruction<2> {
SetOperandAt(1, index);
set_representation(Representation::Integer32());
SetFlag(kUseGVN);
- SetGVNFlag(kDependsOnStringChars);
+ SetDependsOnFlag(kStringChars);
}
virtual bool IsDeletable() const V8_OVERRIDE { return true; }
@@ -7344,7 +7591,7 @@ class HSeqStringSetChar V8_FINAL : public HTemplateInstruction<4> {
SetOperandAt(2, index);
SetOperandAt(3, value);
set_representation(Representation::Tagged());
- SetGVNFlag(kChangesStringChars);
+ SetChangesFlag(kStringChars);
}
String::Encoding encoding_;
@@ -7362,11 +7609,14 @@ class HCheckMapValue V8_FINAL : public HTemplateInstruction<2> {
virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
virtual HType CalculateInferredType() V8_OVERRIDE {
- return HType::Tagged();
+ if (value()->type().IsHeapObject()) return value()->type();
+ return HType::HeapObject();
}
- HValue* value() { return OperandAt(0); }
- HValue* map() { return OperandAt(1); }
+ HValue* value() const { return OperandAt(0); }
+ HValue* map() const { return OperandAt(1); }
+
+ virtual HValue* Canonicalize() V8_OVERRIDE;
DECLARE_CONCRETE_INSTRUCTION(CheckMapValue)
@@ -7378,14 +7628,14 @@ class HCheckMapValue V8_FINAL : public HTemplateInstruction<2> {
}
private:
- HCheckMapValue(HValue* value,
- HValue* map) {
+ HCheckMapValue(HValue* value, HValue* map)
+ : HTemplateInstruction<2>(HType::HeapObject()) {
SetOperandAt(0, value);
SetOperandAt(1, map);
set_representation(Representation::Tagged());
SetFlag(kUseGVN);
- SetGVNFlag(kDependsOnMaps);
- SetGVNFlag(kDependsOnElementsKind);
+ SetDependsOnFlag(kMaps);
+ SetDependsOnFlag(kElementsKind);
}
};
@@ -7464,15 +7714,22 @@ class HForInCacheArray V8_FINAL : public HTemplateInstruction<2> {
class HLoadFieldByIndex V8_FINAL : public HTemplateInstruction<2> {
public:
+ DECLARE_INSTRUCTION_FACTORY_P2(HLoadFieldByIndex, HValue*, HValue*);
+
HLoadFieldByIndex(HValue* object,
HValue* index) {
SetOperandAt(0, object);
SetOperandAt(1, index);
+ SetChangesFlag(kNewSpacePromotion);
set_representation(Representation::Tagged());
}
virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
- return Representation::Tagged();
+ if (index == 1) {
+ return Representation::Smi();
+ } else {
+ return Representation::Tagged();
+ }
}
HValue* object() { return OperandAt(0); }
@@ -7491,6 +7748,57 @@ class HLoadFieldByIndex V8_FINAL : public HTemplateInstruction<2> {
};
+class HStoreFrameContext: public HUnaryOperation {
+ public:
+ DECLARE_INSTRUCTION_FACTORY_P1(HStoreFrameContext, HValue*);
+
+ HValue* context() { return OperandAt(0); }
+
+ virtual Representation RequiredInputRepresentation(int index) {
+ return Representation::Tagged();
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(StoreFrameContext)
+ private:
+ explicit HStoreFrameContext(HValue* context)
+ : HUnaryOperation(context) {
+ set_representation(Representation::Tagged());
+ SetChangesFlag(kContextSlots);
+ }
+};
+
+
+class HAllocateBlockContext: public HTemplateInstruction<2> {
+ public:
+ DECLARE_INSTRUCTION_FACTORY_P3(HAllocateBlockContext, HValue*,
+ HValue*, Handle<ScopeInfo>);
+ HValue* context() { return OperandAt(0); }
+ HValue* function() { return OperandAt(1); }
+ Handle<ScopeInfo> scope_info() { return scope_info_; }
+
+ virtual Representation RequiredInputRepresentation(int index) {
+ return Representation::Tagged();
+ }
+
+ virtual void PrintDataTo(StringStream* stream);
+
+ DECLARE_CONCRETE_INSTRUCTION(AllocateBlockContext)
+
+ private:
+ HAllocateBlockContext(HValue* context,
+ HValue* function,
+ Handle<ScopeInfo> scope_info)
+ : scope_info_(scope_info) {
+ SetOperandAt(0, context);
+ SetOperandAt(1, function);
+ set_representation(Representation::Tagged());
+ }
+
+ Handle<ScopeInfo> scope_info_;
+};
+
+
+
#undef DECLARE_INSTRUCTION
#undef DECLARE_CONCRETE_INSTRUCTION
diff --git a/chromium/v8/src/hydrogen-load-elimination.cc b/chromium/v8/src/hydrogen-load-elimination.cc
index f3b574847f8..5cefcf75258 100644
--- a/chromium/v8/src/hydrogen-load-elimination.cc
+++ b/chromium/v8/src/hydrogen-load-elimination.cc
@@ -1,34 +1,11 @@
// Copyright 2013 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "hydrogen-alias-analysis.h"
-#include "hydrogen-load-elimination.h"
-#include "hydrogen-instructions.h"
-#include "hydrogen-flow-engine.h"
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/hydrogen-alias-analysis.h"
+#include "src/hydrogen-load-elimination.h"
+#include "src/hydrogen-instructions.h"
+#include "src/hydrogen-flow-engine.h"
namespace v8 {
namespace internal {
@@ -43,18 +20,15 @@ static const int kMaxTrackedObjects = 5;
class HFieldApproximation : public ZoneObject {
public: // Just a data blob.
HValue* object_;
- HLoadNamedField* last_load_;
HValue* last_value_;
HFieldApproximation* next_;
// Recursively copy the entire linked list of field approximations.
HFieldApproximation* Copy(Zone* zone) {
- if (this == NULL) return NULL;
HFieldApproximation* copy = new(zone) HFieldApproximation();
copy->object_ = this->object_;
- copy->last_load_ = this->last_load_;
copy->last_value_ = this->last_value_;
- copy->next_ = this->next_->Copy(zone);
+ copy->next_ = this->next_ == NULL ? NULL : this->next_->Copy(zone);
return copy;
}
};
@@ -78,7 +52,7 @@ class HLoadEliminationTable : public ZoneObject {
FieldOf(l->access()),
l->object()->ActualValue()->id()));
HValue* result = load(l);
- if (result != instr) {
+ if (result != instr && l->CanBeReplacedWith(result)) {
// The load can be replaced with a previous load or a value.
TRACE((" replace L%d -> v%d\n", instr->id(), result->id()));
instr->DeleteAndReplaceWith(result);
@@ -100,26 +74,33 @@ class HLoadEliminationTable : public ZoneObject {
}
break;
}
+ case HValue::kTransitionElementsKind: {
+ HTransitionElementsKind* t = HTransitionElementsKind::cast(instr);
+ HValue* object = t->object()->ActualValue();
+ KillFieldInternal(object, FieldOf(JSArray::kElementsOffset), NULL);
+ KillFieldInternal(object, FieldOf(JSObject::kMapOffset), NULL);
+ break;
+ }
default: {
- if (instr->CheckGVNFlag(kChangesInobjectFields)) {
+ if (instr->CheckChangesFlag(kInobjectFields)) {
TRACE((" kill-all i%d\n", instr->id()));
Kill();
break;
}
- if (instr->CheckGVNFlag(kChangesMaps)) {
+ if (instr->CheckChangesFlag(kMaps)) {
TRACE((" kill-maps i%d\n", instr->id()));
KillOffset(JSObject::kMapOffset);
}
- if (instr->CheckGVNFlag(kChangesElementsKind)) {
+ if (instr->CheckChangesFlag(kElementsKind)) {
TRACE((" kill-elements-kind i%d\n", instr->id()));
KillOffset(JSObject::kMapOffset);
KillOffset(JSObject::kElementsOffset);
}
- if (instr->CheckGVNFlag(kChangesElementsPointer)) {
+ if (instr->CheckChangesFlag(kElementsPointer)) {
TRACE((" kill-elements i%d\n", instr->id()));
KillOffset(JSObject::kElementsOffset);
}
- if (instr->CheckGVNFlag(kChangesOsrEntries)) {
+ if (instr->CheckChangesFlag(kOsrEntries)) {
TRACE((" kill-osr i%d\n", instr->id()));
Kill();
}
@@ -134,13 +115,39 @@ class HLoadEliminationTable : public ZoneObject {
return this;
}
- // Support for global analysis with HFlowEngine: Copy state to sucessor block.
- HLoadEliminationTable* Copy(HBasicBlock* succ, Zone* zone) {
+ // Support for global analysis with HFlowEngine: Merge given state with
+ // the other incoming state.
+ static HLoadEliminationTable* Merge(HLoadEliminationTable* succ_state,
+ HBasicBlock* succ_block,
+ HLoadEliminationTable* pred_state,
+ HBasicBlock* pred_block,
+ Zone* zone) {
+ ASSERT(pred_state != NULL);
+ if (succ_state == NULL) {
+ return pred_state->Copy(succ_block, pred_block, zone);
+ } else {
+ return succ_state->Merge(succ_block, pred_state, pred_block, zone);
+ }
+ }
+
+ // Support for global analysis with HFlowEngine: Given state merged with all
+ // the other incoming states, prepare it for use.
+ static HLoadEliminationTable* Finish(HLoadEliminationTable* state,
+ HBasicBlock* block,
+ Zone* zone) {
+ ASSERT(state != NULL);
+ return state;
+ }
+
+ private:
+ // Copy state to successor block.
+ HLoadEliminationTable* Copy(HBasicBlock* succ, HBasicBlock* from_block,
+ Zone* zone) {
HLoadEliminationTable* copy =
new(zone) HLoadEliminationTable(zone, aliasing_);
copy->EnsureFields(fields_.length());
for (int i = 0; i < fields_.length(); i++) {
- copy->fields_[i] = fields_[i]->Copy(zone);
+ copy->fields_[i] = fields_[i] == NULL ? NULL : fields_[i]->Copy(zone);
}
if (FLAG_trace_load_elimination) {
TRACE((" copy-to B%d\n", succ->block_id()));
@@ -149,10 +156,9 @@ class HLoadEliminationTable : public ZoneObject {
return copy;
}
- // Support for global analysis with HFlowEngine: Merge this state with
- // the other incoming state.
- HLoadEliminationTable* Merge(HBasicBlock* succ,
- HLoadEliminationTable* that, Zone* zone) {
+ // Merge this state with the other incoming state.
+ HLoadEliminationTable* Merge(HBasicBlock* succ, HLoadEliminationTable* that,
+ HBasicBlock* that_block, Zone* zone) {
if (that->fields_.length() < fields_.length()) {
// Drop fields not in the other table.
fields_.Rewind(that->fields_.length());
@@ -178,6 +184,10 @@ class HLoadEliminationTable : public ZoneObject {
approx = approx->next_;
}
}
+ if (FLAG_trace_load_elimination) {
+ TRACE((" merge-to B%d\n", succ->block_id()));
+ Print();
+ }
return this;
}
@@ -189,6 +199,10 @@ class HLoadEliminationTable : public ZoneObject {
// load or store for this object and field exists, return the new value with
// which the load should be replaced. Otherwise, return {instr}.
HValue* load(HLoadNamedField* instr) {
+ // There must be no loads from non observable in-object properties.
+ ASSERT(!instr->access().IsInobject() ||
+ instr->access().existing_inobject_property());
+
int field = FieldOf(instr->access());
if (field < 0) return instr;
@@ -197,12 +211,14 @@ class HLoadEliminationTable : public ZoneObject {
if (approx->last_value_ == NULL) {
// Load is not redundant. Fill out a new entry.
- approx->last_load_ = instr;
approx->last_value_ = instr;
return instr;
- } else {
+ } else if (approx->last_value_->block()->EqualToOrDominates(
+ instr->block())) {
// Eliminate the load. Reuse previously stored value or load instruction.
return approx->last_value_;
+ } else {
+ return instr;
}
}
@@ -211,18 +227,26 @@ class HLoadEliminationTable : public ZoneObject {
// the stored values are the same), return NULL indicating that this store
// instruction is redundant. Otherwise, return {instr}.
HValue* store(HStoreNamedField* instr) {
+ if (instr->access().IsInobject() &&
+ !instr->access().existing_inobject_property()) {
+ TRACE((" skipping non existing property initialization store\n"));
+ return instr;
+ }
+
int field = FieldOf(instr->access());
if (field < 0) return KillIfMisaligned(instr);
HValue* object = instr->object()->ActualValue();
HValue* value = instr->value();
- // Kill non-equivalent may-alias entries.
- KillFieldInternal(object, field, value);
if (instr->has_transition()) {
- // A transition store alters the map of the object.
- // TODO(titzer): remember the new map (a constant) for the object.
+ // A transition introduces a new field and alters the map of the object.
+ // Since the field in the object is new, it cannot alias existing entries.
+ // TODO(titzer): introduce a constant for the new map and remember it.
KillFieldInternal(object, FieldOf(JSObject::kMapOffset), NULL);
+ } else {
+ // Kill non-equivalent may-alias entries.
+ KillFieldInternal(object, field, value);
}
HFieldApproximation* approx = FindOrCreate(object, field);
@@ -231,7 +255,6 @@ class HLoadEliminationTable : public ZoneObject {
return NULL;
} else {
// The store is not redundant. Update the entry.
- approx->last_load_ = NULL;
approx->last_value_ = value;
return instr;
}
@@ -314,7 +337,6 @@ class HLoadEliminationTable : public ZoneObject {
// Insert the entry at the head of the list.
approx->object_ = object;
- approx->last_load_ = NULL;
approx->last_value_ = NULL;
approx->next_ = fields_[field];
fields_[field] = approx;
@@ -397,7 +419,6 @@ class HLoadEliminationTable : public ZoneObject {
PrintF(" field %d: ", i);
for (HFieldApproximation* a = fields_[i]; a != NULL; a = a->next_) {
PrintF("[o%d =", a->object_->id());
- if (a->last_load_ != NULL) PrintF(" L%d", a->last_load_->id());
if (a->last_value_ != NULL) PrintF(" v%d", a->last_value_->id());
PrintF("] ");
}
@@ -415,11 +436,7 @@ class HLoadEliminationTable : public ZoneObject {
class HLoadEliminationEffects : public ZoneObject {
public:
explicit HLoadEliminationEffects(Zone* zone)
- : zone_(zone),
- maps_stored_(false),
- fields_stored_(false),
- elements_stored_(false),
- stores_(5, zone) { }
+ : zone_(zone), stores_(5, zone) { }
inline bool Disabled() {
return false; // Effects are _not_ disabled.
@@ -427,37 +444,25 @@ class HLoadEliminationEffects : public ZoneObject {
// Process a possibly side-effecting instruction.
void Process(HInstruction* instr, Zone* zone) {
- switch (instr->opcode()) {
- case HValue::kStoreNamedField: {
- stores_.Add(HStoreNamedField::cast(instr), zone_);
- break;
- }
- case HValue::kOsrEntry: {
- // Kill everything. Loads must not be hoisted past the OSR entry.
- maps_stored_ = true;
- fields_stored_ = true;
- elements_stored_ = true;
- }
- default: {
- fields_stored_ |= instr->CheckGVNFlag(kChangesInobjectFields);
- maps_stored_ |= instr->CheckGVNFlag(kChangesMaps);
- maps_stored_ |= instr->CheckGVNFlag(kChangesElementsKind);
- elements_stored_ |= instr->CheckGVNFlag(kChangesElementsKind);
- elements_stored_ |= instr->CheckGVNFlag(kChangesElementsPointer);
- }
+ if (instr->IsStoreNamedField()) {
+ stores_.Add(HStoreNamedField::cast(instr), zone_);
+ } else {
+ flags_.Add(instr->ChangesFlags());
}
}
// Apply these effects to the given load elimination table.
void Apply(HLoadEliminationTable* table) {
- if (fields_stored_) {
+ // Loads must not be hoisted past the OSR entry, therefore we kill
+ // everything if we see an OSR entry.
+ if (flags_.Contains(kInobjectFields) || flags_.Contains(kOsrEntries)) {
table->Kill();
return;
}
- if (maps_stored_) {
+ if (flags_.Contains(kElementsKind) || flags_.Contains(kMaps)) {
table->KillOffset(JSObject::kMapOffset);
}
- if (elements_stored_) {
+ if (flags_.Contains(kElementsKind) || flags_.Contains(kElementsPointer)) {
table->KillOffset(JSObject::kElementsOffset);
}
@@ -469,9 +474,7 @@ class HLoadEliminationEffects : public ZoneObject {
// Union these effects with the other effects.
void Union(HLoadEliminationEffects* that, Zone* zone) {
- maps_stored_ |= that->maps_stored_;
- fields_stored_ |= that->fields_stored_;
- elements_stored_ |= that->elements_stored_;
+ flags_.Add(that->flags_);
for (int i = 0; i < that->stores_.length(); i++) {
stores_.Add(that->stores_[i], zone);
}
@@ -479,9 +482,7 @@ class HLoadEliminationEffects : public ZoneObject {
private:
Zone* zone_;
- bool maps_stored_ : 1;
- bool fields_stored_ : 1;
- bool elements_stored_ : 1;
+ GVNFlagSet flags_;
ZoneList<HStoreNamedField*> stores_;
};
diff --git a/chromium/v8/src/hydrogen-load-elimination.h b/chromium/v8/src/hydrogen-load-elimination.h
index ef6f71fa113..e6b432c6aca 100644
--- a/chromium/v8/src/hydrogen-load-elimination.h
+++ b/chromium/v8/src/hydrogen-load-elimination.h
@@ -1,34 +1,11 @@
// Copyright 2013 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
#ifndef V8_HYDROGEN_LOAD_ELIMINATION_H_
#define V8_HYDROGEN_LOAD_ELIMINATION_H_
-#include "hydrogen.h"
+#include "src/hydrogen.h"
namespace v8 {
namespace internal {
diff --git a/chromium/v8/src/hydrogen-mark-deoptimize.cc b/chromium/v8/src/hydrogen-mark-deoptimize.cc
index c0236e91cbb..998be073e22 100644
--- a/chromium/v8/src/hydrogen-mark-deoptimize.cc
+++ b/chromium/v8/src/hydrogen-mark-deoptimize.cc
@@ -1,31 +1,8 @@
// Copyright 2013 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
-#include "hydrogen-mark-deoptimize.h"
+#include "src/hydrogen-mark-deoptimize.h"
namespace v8 {
namespace internal {
diff --git a/chromium/v8/src/hydrogen-mark-deoptimize.h b/chromium/v8/src/hydrogen-mark-deoptimize.h
index 30f35b3dec5..52a6ef96c9e 100644
--- a/chromium/v8/src/hydrogen-mark-deoptimize.h
+++ b/chromium/v8/src/hydrogen-mark-deoptimize.h
@@ -1,34 +1,11 @@
// Copyright 2013 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
#ifndef V8_HYDROGEN_MARK_DEOPTIMIZE_H_
#define V8_HYDROGEN_MARK_DEOPTIMIZE_H_
-#include "hydrogen.h"
+#include "src/hydrogen.h"
namespace v8 {
namespace internal {
diff --git a/chromium/v8/src/hydrogen-mark-unreachable.cc b/chromium/v8/src/hydrogen-mark-unreachable.cc
index d7c5ed2b180..05779ca524b 100644
--- a/chromium/v8/src/hydrogen-mark-unreachable.cc
+++ b/chromium/v8/src/hydrogen-mark-unreachable.cc
@@ -1,31 +1,8 @@
// Copyright 2013 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
-#include "hydrogen-mark-unreachable.h"
+#include "src/hydrogen-mark-unreachable.h"
namespace v8 {
namespace internal {
diff --git a/chromium/v8/src/hydrogen-mark-unreachable.h b/chromium/v8/src/hydrogen-mark-unreachable.h
index 9ecc6e9f164..d43d22bbba0 100644
--- a/chromium/v8/src/hydrogen-mark-unreachable.h
+++ b/chromium/v8/src/hydrogen-mark-unreachable.h
@@ -1,34 +1,11 @@
// Copyright 2013 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
#ifndef V8_HYDROGEN_MARK_UNREACHABLE_H_
#define V8_HYDROGEN_MARK_UNREACHABLE_H_
-#include "hydrogen.h"
+#include "src/hydrogen.h"
namespace v8 {
namespace internal {
diff --git a/chromium/v8/src/hydrogen-minus-zero.cc b/chromium/v8/src/hydrogen-minus-zero.cc
deleted file mode 100644
index 316e0f5077c..00000000000
--- a/chromium/v8/src/hydrogen-minus-zero.cc
+++ /dev/null
@@ -1,91 +0,0 @@
-// Copyright 2013 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "hydrogen-minus-zero.h"
-
-namespace v8 {
-namespace internal {
-
-void HComputeMinusZeroChecksPhase::Run() {
- const ZoneList<HBasicBlock*>* blocks(graph()->blocks());
- for (int i = 0; i < blocks->length(); ++i) {
- for (HInstructionIterator it(blocks->at(i)); !it.Done(); it.Advance()) {
- HInstruction* current = it.Current();
- if (current->IsChange()) {
- HChange* change = HChange::cast(current);
- // Propagate flags for negative zero checks upwards from conversions
- // int32-to-tagged and int32-to-double.
- Representation from = change->value()->representation();
- ASSERT(from.Equals(change->from()));
- if (from.IsSmiOrInteger32()) {
- ASSERT(change->to().IsTagged() ||
- change->to().IsDouble() ||
- change->to().IsSmiOrInteger32());
- ASSERT(visited_.IsEmpty());
- PropagateMinusZeroChecks(change->value());
- visited_.Clear();
- }
- } else if (current->IsCompareMinusZeroAndBranch()) {
- HCompareMinusZeroAndBranch* check =
- HCompareMinusZeroAndBranch::cast(current);
- if (check->value()->representation().IsSmiOrInteger32()) {
- ASSERT(visited_.IsEmpty());
- PropagateMinusZeroChecks(check->value());
- visited_.Clear();
- }
- }
- }
- }
-}
-
-
-void HComputeMinusZeroChecksPhase::PropagateMinusZeroChecks(HValue* value) {
- for (HValue* current = value;
- current != NULL && !visited_.Contains(current->id());
- current = current->EnsureAndPropagateNotMinusZero(&visited_)) {
- // For phis, we must propagate the check to all of its inputs.
- if (current->IsPhi()) {
- visited_.Add(current->id());
- HPhi* phi = HPhi::cast(current);
- for (int i = 0; i < phi->OperandCount(); ++i) {
- PropagateMinusZeroChecks(phi->OperandAt(i));
- }
- break;
- }
-
- // For multiplication, division, and Math.min/max(), we must propagate
- // to the left and the right side.
- if (current->IsMul() || current->IsDiv() || current->IsMathMinMax()) {
- HBinaryOperation* operation = HBinaryOperation::cast(current);
- operation->EnsureAndPropagateNotMinusZero(&visited_);
- PropagateMinusZeroChecks(operation->left());
- PropagateMinusZeroChecks(operation->right());
- }
- }
-}
-
-} } // namespace v8::internal
diff --git a/chromium/v8/src/hydrogen-minus-zero.h b/chromium/v8/src/hydrogen-minus-zero.h
deleted file mode 100644
index d23ec1196b3..00000000000
--- a/chromium/v8/src/hydrogen-minus-zero.h
+++ /dev/null
@@ -1,56 +0,0 @@
-// Copyright 2013 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_HYDROGEN_MINUS_ZERO_H_
-#define V8_HYDROGEN_MINUS_ZERO_H_
-
-#include "hydrogen.h"
-
-namespace v8 {
-namespace internal {
-
-
-class HComputeMinusZeroChecksPhase : public HPhase {
- public:
- explicit HComputeMinusZeroChecksPhase(HGraph* graph)
- : HPhase("H_Compute minus zero checks", graph),
- visited_(graph->GetMaximumValueID(), zone()) { }
-
- void Run();
-
- private:
- void PropagateMinusZeroChecks(HValue* value);
-
- BitVector visited_;
-
- DISALLOW_COPY_AND_ASSIGN(HComputeMinusZeroChecksPhase);
-};
-
-
-} } // namespace v8::internal
-
-#endif // V8_HYDROGEN_MINUS_ZERO_H_
diff --git a/chromium/v8/src/hydrogen-osr.cc b/chromium/v8/src/hydrogen-osr.cc
index 6e39df6aa95..b2b15f1b97b 100644
--- a/chromium/v8/src/hydrogen-osr.cc
+++ b/chromium/v8/src/hydrogen-osr.cc
@@ -1,32 +1,9 @@
// Copyright 2013 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "hydrogen.h"
-#include "hydrogen-osr.h"
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/hydrogen.h"
+#include "src/hydrogen-osr.h"
namespace v8 {
namespace internal {
diff --git a/chromium/v8/src/hydrogen-osr.h b/chromium/v8/src/hydrogen-osr.h
index ae72ce650c5..433548c1a8e 100644
--- a/chromium/v8/src/hydrogen-osr.h
+++ b/chromium/v8/src/hydrogen-osr.h
@@ -1,36 +1,13 @@
// Copyright 2013 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
#ifndef V8_HYDROGEN_OSR_H_
#define V8_HYDROGEN_OSR_H_
-#include "hydrogen.h"
-#include "ast.h"
-#include "zone.h"
+#include "src/hydrogen.h"
+#include "src/ast.h"
+#include "src/zone.h"
namespace v8 {
namespace internal {
diff --git a/chromium/v8/src/hydrogen-range-analysis.cc b/chromium/v8/src/hydrogen-range-analysis.cc
index 76fd5f35f28..64d1dc0b1e6 100644
--- a/chromium/v8/src/hydrogen-range-analysis.cc
+++ b/chromium/v8/src/hydrogen-range-analysis.cc
@@ -1,31 +1,8 @@
// Copyright 2013 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "hydrogen-range-analysis.h"
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/hydrogen-range-analysis.h"
namespace v8 {
namespace internal {
@@ -78,7 +55,29 @@ void HRangeAnalysisPhase::Run() {
// Go through all instructions of the current block.
for (HInstructionIterator it(block); !it.Done(); it.Advance()) {
- InferRange(it.Current());
+ HValue* value = it.Current();
+ InferRange(value);
+
+ // Compute the bailout-on-minus-zero flag.
+ if (value->IsChange()) {
+ HChange* instr = HChange::cast(value);
+ // Propagate flags for negative zero checks upwards from conversions
+ // int32-to-tagged and int32-to-double.
+ Representation from = instr->value()->representation();
+ ASSERT(from.Equals(instr->from()));
+ if (from.IsSmiOrInteger32()) {
+ ASSERT(instr->to().IsTagged() ||
+ instr->to().IsDouble() ||
+ instr->to().IsSmiOrInteger32());
+ PropagateMinusZeroChecks(instr->value());
+ }
+ } else if (value->IsCompareMinusZeroAndBranch()) {
+ HCompareMinusZeroAndBranch* instr =
+ HCompareMinusZeroAndBranch::cast(value);
+ if (instr->value()->representation().IsSmiOrInteger32()) {
+ PropagateMinusZeroChecks(instr->value());
+ }
+ }
}
// Continue analysis in all dominated blocks.
@@ -101,6 +100,22 @@ void HRangeAnalysisPhase::Run() {
block = NULL;
}
}
+
+ // The ranges are not valid anymore due to SSI vs. SSA!
+ PoisonRanges();
+}
+
+
+void HRangeAnalysisPhase::PoisonRanges() {
+#ifdef DEBUG
+ for (int i = 0; i < graph()->blocks()->length(); ++i) {
+ HBasicBlock* block = graph()->blocks()->at(i);
+ for (HInstructionIterator it(block); !it.Done(); it.Advance()) {
+ HInstruction* instr = it.Current();
+ if (instr->HasRange()) instr->PoisonRange();
+ }
+ }
+#endif
}
@@ -197,4 +212,79 @@ void HRangeAnalysisPhase::AddRange(HValue* value, Range* range) {
}
+void HRangeAnalysisPhase::PropagateMinusZeroChecks(HValue* value) {
+ ASSERT(worklist_.is_empty());
+ ASSERT(in_worklist_.IsEmpty());
+
+ AddToWorklist(value);
+ while (!worklist_.is_empty()) {
+ value = worklist_.RemoveLast();
+
+ if (value->IsPhi()) {
+ // For phis, we must propagate the check to all of its inputs.
+ HPhi* phi = HPhi::cast(value);
+ for (int i = 0; i < phi->OperandCount(); ++i) {
+ AddToWorklist(phi->OperandAt(i));
+ }
+ } else if (value->IsUnaryMathOperation()) {
+ HUnaryMathOperation* instr = HUnaryMathOperation::cast(value);
+ if (instr->representation().IsSmiOrInteger32() &&
+ !instr->value()->representation().Equals(instr->representation())) {
+ if (instr->value()->range() == NULL ||
+ instr->value()->range()->CanBeMinusZero()) {
+ instr->SetFlag(HValue::kBailoutOnMinusZero);
+ }
+ }
+ if (instr->RequiredInputRepresentation(0).IsSmiOrInteger32() &&
+ instr->representation().Equals(
+ instr->RequiredInputRepresentation(0))) {
+ AddToWorklist(instr->value());
+ }
+ } else if (value->IsChange()) {
+ HChange* instr = HChange::cast(value);
+ if (!instr->from().IsSmiOrInteger32() &&
+ !instr->CanTruncateToInt32() &&
+ (instr->value()->range() == NULL ||
+ instr->value()->range()->CanBeMinusZero())) {
+ instr->SetFlag(HValue::kBailoutOnMinusZero);
+ }
+ } else if (value->IsForceRepresentation()) {
+ HForceRepresentation* instr = HForceRepresentation::cast(value);
+ AddToWorklist(instr->value());
+ } else if (value->IsMod()) {
+ HMod* instr = HMod::cast(value);
+ if (instr->range() == NULL || instr->range()->CanBeMinusZero()) {
+ instr->SetFlag(HValue::kBailoutOnMinusZero);
+ AddToWorklist(instr->left());
+ }
+ } else if (value->IsDiv() || value->IsMul()) {
+ HBinaryOperation* instr = HBinaryOperation::cast(value);
+ if (instr->range() == NULL || instr->range()->CanBeMinusZero()) {
+ instr->SetFlag(HValue::kBailoutOnMinusZero);
+ }
+ AddToWorklist(instr->right());
+ AddToWorklist(instr->left());
+ } else if (value->IsMathFloorOfDiv()) {
+ HMathFloorOfDiv* instr = HMathFloorOfDiv::cast(value);
+ instr->SetFlag(HValue::kBailoutOnMinusZero);
+ } else if (value->IsAdd() || value->IsSub()) {
+ HBinaryOperation* instr = HBinaryOperation::cast(value);
+ if (instr->range() == NULL || instr->range()->CanBeMinusZero()) {
+ // Propagate to the left argument. If the left argument cannot be -0,
+ // then the result of the add/sub operation cannot be either.
+ AddToWorklist(instr->left());
+ }
+ } else if (value->IsMathMinMax()) {
+ HMathMinMax* instr = HMathMinMax::cast(value);
+ AddToWorklist(instr->right());
+ AddToWorklist(instr->left());
+ }
+ }
+
+ in_worklist_.Clear();
+ ASSERT(in_worklist_.IsEmpty());
+ ASSERT(worklist_.is_empty());
+}
+
+
} } // namespace v8::internal
diff --git a/chromium/v8/src/hydrogen-range-analysis.h b/chromium/v8/src/hydrogen-range-analysis.h
index a1e9737c5e0..1269ec7529c 100644
--- a/chromium/v8/src/hydrogen-range-analysis.h
+++ b/chromium/v8/src/hydrogen-range-analysis.h
@@ -1,34 +1,11 @@
// Copyright 2013 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
#ifndef V8_HYDROGEN_RANGE_ANALYSIS_H_
#define V8_HYDROGEN_RANGE_ANALYSIS_H_
-#include "hydrogen.h"
+#include "src/hydrogen.h"
namespace v8 {
namespace internal {
@@ -37,7 +14,9 @@ namespace internal {
class HRangeAnalysisPhase : public HPhase {
public:
explicit HRangeAnalysisPhase(HGraph* graph)
- : HPhase("H_Range analysis", graph), changed_ranges_(16, zone()) { }
+ : HPhase("H_Range analysis", graph), changed_ranges_(16, zone()),
+ in_worklist_(graph->GetMaximumValueID(), zone()),
+ worklist_(32, zone()) {}
void Run();
@@ -49,8 +28,20 @@ class HRangeAnalysisPhase : public HPhase {
void InferRange(HValue* value);
void RollBackTo(int index);
void AddRange(HValue* value, Range* range);
+ void AddToWorklist(HValue* value) {
+ if (in_worklist_.Contains(value->id())) return;
+ in_worklist_.Add(value->id());
+ worklist_.Add(value, zone());
+ }
+ void PropagateMinusZeroChecks(HValue* value);
+ void PoisonRanges();
ZoneList<HValue*> changed_ranges_;
+
+ BitVector in_worklist_;
+ ZoneList<HValue*> worklist_;
+
+ DISALLOW_COPY_AND_ASSIGN(HRangeAnalysisPhase);
};
diff --git a/chromium/v8/src/hydrogen-redundant-phi.cc b/chromium/v8/src/hydrogen-redundant-phi.cc
index 1263833dac9..67d534eda01 100644
--- a/chromium/v8/src/hydrogen-redundant-phi.cc
+++ b/chromium/v8/src/hydrogen-redundant-phi.cc
@@ -1,31 +1,8 @@
// Copyright 2013 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
-#include "hydrogen-redundant-phi.h"
+#include "src/hydrogen-redundant-phi.h"
namespace v8 {
namespace internal {
diff --git a/chromium/v8/src/hydrogen-redundant-phi.h b/chromium/v8/src/hydrogen-redundant-phi.h
index 960ae69c95f..7f5ec4e52dd 100644
--- a/chromium/v8/src/hydrogen-redundant-phi.h
+++ b/chromium/v8/src/hydrogen-redundant-phi.h
@@ -1,34 +1,11 @@
// Copyright 2013 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
#ifndef V8_HYDROGEN_REDUNDANT_PHI_H_
#define V8_HYDROGEN_REDUNDANT_PHI_H_
-#include "hydrogen.h"
+#include "src/hydrogen.h"
namespace v8 {
namespace internal {
diff --git a/chromium/v8/src/hydrogen-removable-simulates.cc b/chromium/v8/src/hydrogen-removable-simulates.cc
index f952832431c..43e9d18b880 100644
--- a/chromium/v8/src/hydrogen-removable-simulates.cc
+++ b/chromium/v8/src/hydrogen-removable-simulates.cc
@@ -1,94 +1,180 @@
// Copyright 2013 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "hydrogen-removable-simulates.h"
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/hydrogen-flow-engine.h"
+#include "src/hydrogen-instructions.h"
+#include "src/hydrogen-removable-simulates.h"
namespace v8 {
namespace internal {
-void HMergeRemovableSimulatesPhase::Run() {
- ZoneList<HSimulate*> mergelist(2, zone());
- for (int i = 0; i < graph()->blocks()->length(); ++i) {
- HBasicBlock* block = graph()->blocks()->at(i);
- // Make sure the merge list is empty at the start of a block.
- ASSERT(mergelist.is_empty());
- // Nasty heuristic: Never remove the first simulate in a block. This
- // just so happens to have a beneficial effect on register allocation.
- bool first = true;
- for (HInstructionIterator it(block); !it.Done(); it.Advance()) {
- HInstruction* current = it.Current();
- if (current->IsLeaveInlined()) {
- // Never fold simulates from inlined environments into simulates
- // in the outer environment.
- // (Before each HEnterInlined, there is a non-foldable HSimulate
- // anyway, so we get the barrier in the other direction for free.)
- // Simply remove all accumulated simulates without merging. This
- // is safe because simulates after instructions with side effects
- // are never added to the merge list.
- while (!mergelist.is_empty()) {
- mergelist.RemoveLast()->DeleteAndReplaceWith(NULL);
- }
- continue;
- }
- if (current->IsReturn()) {
- // Drop mergeable simulates in the list. This is safe because
- // simulates after instructions with side effects are never added
- // to the merge list.
- while (!mergelist.is_empty()) {
- mergelist.RemoveLast()->DeleteAndReplaceWith(NULL);
+class State : public ZoneObject {
+ public:
+ explicit State(Zone* zone)
+ : zone_(zone), mergelist_(2, zone), first_(true), mode_(NORMAL) { }
+
+ State* Process(HInstruction* instr, Zone* zone) {
+ if (FLAG_trace_removable_simulates) {
+ PrintF("[%s with state %p in B%d: #%d %s]\n",
+ mode_ == NORMAL ? "processing" : "collecting",
+ reinterpret_cast<void*>(this), instr->block()->block_id(),
+ instr->id(), instr->Mnemonic());
+ }
+ // Forward-merge "trains" of simulates after an instruction with observable
+ // side effects to keep live ranges short.
+ if (mode_ == COLLECT_CONSECUTIVE_SIMULATES) {
+ if (instr->IsSimulate()) {
+ HSimulate* current_simulate = HSimulate::cast(instr);
+ if (current_simulate->is_candidate_for_removal() &&
+ !current_simulate->ast_id().IsNone()) {
+ Remember(current_simulate);
+ return this;
}
- continue;
- }
- // Skip the non-simulates and the first simulate.
- if (!current->IsSimulate()) continue;
- if (first) {
- first = false;
- continue;
- }
- HSimulate* current_simulate = HSimulate::cast(current);
- if ((current_simulate->previous()->HasObservableSideEffects() &&
- !current_simulate->next()->IsSimulate()) ||
- !current_simulate->is_candidate_for_removal()) {
- // This simulate is not suitable for folding.
- // Fold the ones accumulated so far.
- current_simulate->MergeWith(&mergelist);
- continue;
- } else {
- // Accumulate this simulate for folding later on.
- mergelist.Add(current_simulate, zone());
}
+ FlushSimulates();
+ mode_ = NORMAL;
}
-
- if (!mergelist.is_empty()) {
+ // Ensure there's a non-foldable HSimulate before an HEnterInlined to avoid
+ // folding across HEnterInlined.
+ ASSERT(!(instr->IsEnterInlined() &&
+ HSimulate::cast(instr->previous())->is_candidate_for_removal()));
+ if (instr->IsLeaveInlined() || instr->IsReturn()) {
+ // Never fold simulates from inlined environments into simulates in the
+ // outer environment. Simply remove all accumulated simulates without
+ // merging. This is safe because simulates after instructions with side
+ // effects are never added to the merge list. The same reasoning holds for
+ // return instructions.
+ RemoveSimulates();
+ return this;
+ }
+ if (instr->IsControlInstruction()) {
// Merge the accumulated simulates at the end of the block.
- HSimulate* last = mergelist.RemoveLast();
- last->MergeWith(&mergelist);
+ FlushSimulates();
+ return this;
}
+ // Skip the non-simulates and the first simulate.
+ if (!instr->IsSimulate()) return this;
+ if (first_) {
+ first_ = false;
+ return this;
+ }
+ HSimulate* current_simulate = HSimulate::cast(instr);
+ if (!current_simulate->is_candidate_for_removal()) {
+ Remember(current_simulate);
+ FlushSimulates();
+ } else if (current_simulate->ast_id().IsNone()) {
+ ASSERT(current_simulate->next()->IsEnterInlined());
+ FlushSimulates();
+ } else if (current_simulate->previous()->HasObservableSideEffects()) {
+ Remember(current_simulate);
+ mode_ = COLLECT_CONSECUTIVE_SIMULATES;
+ } else {
+ Remember(current_simulate);
+ }
+
+ return this;
}
+
+ static State* Merge(State* succ_state,
+ HBasicBlock* succ_block,
+ State* pred_state,
+ HBasicBlock* pred_block,
+ Zone* zone) {
+ return (succ_state == NULL)
+ ? pred_state->Copy(succ_block, pred_block, zone)
+ : succ_state->Merge(succ_block, pred_state, pred_block, zone);
+ }
+
+ static State* Finish(State* state, HBasicBlock* block, Zone* zone) {
+ if (FLAG_trace_removable_simulates) {
+ PrintF("[preparing state %p for B%d]\n", reinterpret_cast<void*>(state),
+ block->block_id());
+ }
+ // For our current local analysis, we should not remember simulates across
+ // block boundaries.
+ ASSERT(!state->HasRememberedSimulates());
+ // Nasty heuristic: Never remove the first simulate in a block. This
+ // just so happens to have a beneficial effect on register allocation.
+ state->first_ = true;
+ return state;
+ }
+
+ private:
+ explicit State(const State& other)
+ : zone_(other.zone_),
+ mergelist_(other.mergelist_, other.zone_),
+ first_(other.first_),
+ mode_(other.mode_) { }
+
+ enum Mode { NORMAL, COLLECT_CONSECUTIVE_SIMULATES };
+
+ bool HasRememberedSimulates() const { return !mergelist_.is_empty(); }
+
+ void Remember(HSimulate* sim) {
+ mergelist_.Add(sim, zone_);
+ }
+
+ void FlushSimulates() {
+ if (HasRememberedSimulates()) {
+ mergelist_.RemoveLast()->MergeWith(&mergelist_);
+ }
+ }
+
+ void RemoveSimulates() {
+ while (HasRememberedSimulates()) {
+ mergelist_.RemoveLast()->DeleteAndReplaceWith(NULL);
+ }
+ }
+
+ State* Copy(HBasicBlock* succ_block, HBasicBlock* pred_block, Zone* zone) {
+ State* copy = new(zone) State(*this);
+ if (FLAG_trace_removable_simulates) {
+ PrintF("[copy state %p from B%d to new state %p for B%d]\n",
+ reinterpret_cast<void*>(this), pred_block->block_id(),
+ reinterpret_cast<void*>(copy), succ_block->block_id());
+ }
+ return copy;
+ }
+
+ State* Merge(HBasicBlock* succ_block,
+ State* pred_state,
+ HBasicBlock* pred_block,
+ Zone* zone) {
+ // For our current local analysis, we should not remember simulates across
+ // block boundaries.
+ ASSERT(!pred_state->HasRememberedSimulates());
+ ASSERT(!HasRememberedSimulates());
+ if (FLAG_trace_removable_simulates) {
+ PrintF("[merge state %p from B%d into %p for B%d]\n",
+ reinterpret_cast<void*>(pred_state), pred_block->block_id(),
+ reinterpret_cast<void*>(this), succ_block->block_id());
+ }
+ return this;
+ }
+
+ Zone* zone_;
+ ZoneList<HSimulate*> mergelist_;
+ bool first_;
+ Mode mode_;
+};
+
+
+// We don't use effects here.
+class Effects : public ZoneObject {
+ public:
+ explicit Effects(Zone* zone) { }
+ bool Disabled() { return true; }
+ void Process(HInstruction* instr, Zone* zone) { }
+ void Apply(State* state) { }
+ void Union(Effects* that, Zone* zone) { }
+};
+
+
+void HMergeRemovableSimulatesPhase::Run() {
+ HFlowEngine<State, Effects> engine(graph(), zone());
+ State* state = new(zone()) State(zone());
+ engine.AnalyzeDominatedBlocks(graph()->blocks()->at(0), state);
}
} } // namespace v8::internal
diff --git a/chromium/v8/src/hydrogen-removable-simulates.h b/chromium/v8/src/hydrogen-removable-simulates.h
index f5bcd6ddfa8..9bd25056bdf 100644
--- a/chromium/v8/src/hydrogen-removable-simulates.h
+++ b/chromium/v8/src/hydrogen-removable-simulates.h
@@ -1,34 +1,11 @@
// Copyright 2013 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
#ifndef V8_HYDROGEN_REMOVABLE_SIMULATES_H_
#define V8_HYDROGEN_REMOVABLE_SIMULATES_H_
-#include "hydrogen.h"
+#include "src/hydrogen.h"
namespace v8 {
namespace internal {
diff --git a/chromium/v8/src/hydrogen-representation-changes.cc b/chromium/v8/src/hydrogen-representation-changes.cc
index 07fc8be38c0..6cca53650c8 100644
--- a/chromium/v8/src/hydrogen-representation-changes.cc
+++ b/chromium/v8/src/hydrogen-representation-changes.cc
@@ -1,31 +1,8 @@
// Copyright 2013 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "hydrogen-representation-changes.h"
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/hydrogen-representation-changes.h"
namespace v8 {
namespace internal {
@@ -61,10 +38,11 @@ void HRepresentationChangesPhase::InsertRepresentationChangeForUse(
if (new_value == NULL) {
new_value = new(graph()->zone()) HChange(
value, to, is_truncating_to_smi, is_truncating_to_int);
- if (use_value->operand_position(use_index) != RelocInfo::kNoPosition) {
+ if (!use_value->operand_position(use_index).IsUnknown()) {
new_value->set_position(use_value->operand_position(use_index));
} else {
- ASSERT(!FLAG_emit_opt_code_positions || !graph()->info()->IsOptimizing());
+ ASSERT(!FLAG_hydrogen_track_positions ||
+ !graph()->info()->IsOptimizing());
}
}
@@ -73,28 +51,56 @@ void HRepresentationChangesPhase::InsertRepresentationChangeForUse(
}
+static bool IsNonDeoptingIntToSmiChange(HChange* change) {
+ Representation from_rep = change->from();
+ Representation to_rep = change->to();
+ // Flags indicating Uint32 operations are set in a later Hydrogen phase.
+ ASSERT(!change->CheckFlag(HValue::kUint32));
+ return from_rep.IsInteger32() && to_rep.IsSmi() && SmiValuesAre32Bits();
+}
+
+
void HRepresentationChangesPhase::InsertRepresentationChangesForValue(
HValue* value) {
Representation r = value->representation();
if (r.IsNone()) return;
- if (value->HasNoUses()) return;
+ if (value->HasNoUses()) {
+ if (value->IsForceRepresentation()) value->DeleteAndReplaceWith(NULL);
+ return;
+ }
for (HUseIterator it(value->uses()); !it.Done(); it.Advance()) {
HValue* use_value = it.value();
int use_index = it.index();
Representation req = use_value->RequiredInputRepresentation(use_index);
if (req.IsNone() || req.Equals(r)) continue;
+
+ // If this is an HForceRepresentation instruction, and an HChange has been
+ // inserted above it, examine the input representation of the HChange. If
+ // that's int32, and this HForceRepresentation use is int32, and int32 to
+ // smi changes can't cause deoptimisation, set the input of the use to the
+ // input of the HChange.
+ if (value->IsForceRepresentation()) {
+ HValue* input = HForceRepresentation::cast(value)->value();
+ if (input->IsChange()) {
+ HChange* change = HChange::cast(input);
+ if (change->from().Equals(req) && IsNonDeoptingIntToSmiChange(change)) {
+ use_value->SetOperandAt(use_index, change->value());
+ continue;
+ }
+ }
+ }
InsertRepresentationChangeForUse(value, use_value, use_index, req);
}
if (value->HasNoUses()) {
- ASSERT(value->IsConstant());
+ ASSERT(value->IsConstant() || value->IsForceRepresentation());
value->DeleteAndReplaceWith(NULL);
- }
-
- // The only purpose of a HForceRepresentation is to represent the value
- // after the (possible) HChange instruction. We make it disappear.
- if (value->IsForceRepresentation()) {
- value->DeleteAndReplaceWith(HForceRepresentation::cast(value)->value());
+ } else {
+ // The only purpose of a HForceRepresentation is to represent the value
+ // after the (possible) HChange instruction. We make it disappear.
+ if (value->IsForceRepresentation()) {
+ value->DeleteAndReplaceWith(HForceRepresentation::cast(value)->value());
+ }
}
}
diff --git a/chromium/v8/src/hydrogen-representation-changes.h b/chromium/v8/src/hydrogen-representation-changes.h
index 77e899b60b3..2f5958a70f3 100644
--- a/chromium/v8/src/hydrogen-representation-changes.h
+++ b/chromium/v8/src/hydrogen-representation-changes.h
@@ -1,34 +1,11 @@
// Copyright 2013 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
#ifndef V8_HYDROGEN_REPRESENTATION_CHANGES_H_
#define V8_HYDROGEN_REPRESENTATION_CHANGES_H_
-#include "hydrogen.h"
+#include "src/hydrogen.h"
namespace v8 {
namespace internal {
diff --git a/chromium/v8/src/hydrogen-sce.cc b/chromium/v8/src/hydrogen-sce.cc
index a6995f647af..b7ab9fd7db6 100644
--- a/chromium/v8/src/hydrogen-sce.cc
+++ b/chromium/v8/src/hydrogen-sce.cc
@@ -1,32 +1,9 @@
// Copyright 2013 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
-#include "hydrogen-sce.h"
-#include "v8.h"
+#include "src/hydrogen-sce.h"
+#include "src/v8.h"
namespace v8 {
namespace internal {
@@ -43,7 +20,7 @@ void HStackCheckEliminationPhase::Run() {
HBasicBlock* dominator = back_edge;
while (true) {
for (HInstructionIterator it(dominator); !it.Done(); it.Advance()) {
- if (it.Current()->IsCall()) {
+ if (it.Current()->HasStackCheck()) {
block->loop_information()->stack_check()->Eliminate();
break;
}
diff --git a/chromium/v8/src/hydrogen-sce.h b/chromium/v8/src/hydrogen-sce.h
index 55e153e0ed5..276d3486764 100644
--- a/chromium/v8/src/hydrogen-sce.h
+++ b/chromium/v8/src/hydrogen-sce.h
@@ -1,34 +1,11 @@
// Copyright 2013 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
#ifndef V8_HYDROGEN_SCE_H_
#define V8_HYDROGEN_SCE_H_
-#include "hydrogen.h"
+#include "src/hydrogen.h"
namespace v8 {
namespace internal {
diff --git a/chromium/v8/src/hydrogen-store-elimination.cc b/chromium/v8/src/hydrogen-store-elimination.cc
new file mode 100644
index 00000000000..eb2bcf4a947
--- /dev/null
+++ b/chromium/v8/src/hydrogen-store-elimination.cc
@@ -0,0 +1,121 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/hydrogen-store-elimination.h"
+#include "src/hydrogen-instructions.h"
+
+namespace v8 {
+namespace internal {
+
+#define TRACE(x) if (FLAG_trace_store_elimination) PrintF x
+
+// Performs a block-by-block local analysis for removable stores.
+void HStoreEliminationPhase::Run() {
+ GVNFlagSet flags; // Use GVN flags as an approximation for some instructions.
+ flags.RemoveAll();
+
+ flags.Add(kArrayElements);
+ flags.Add(kArrayLengths);
+ flags.Add(kStringLengths);
+ flags.Add(kBackingStoreFields);
+ flags.Add(kDoubleArrayElements);
+ flags.Add(kDoubleFields);
+ flags.Add(kElementsPointer);
+ flags.Add(kInobjectFields);
+ flags.Add(kExternalMemory);
+ flags.Add(kStringChars);
+ flags.Add(kTypedArrayElements);
+
+ for (int i = 0; i < graph()->blocks()->length(); i++) {
+ unobserved_.Rewind(0);
+ HBasicBlock* block = graph()->blocks()->at(i);
+ if (!block->IsReachable()) continue;
+ for (HInstructionIterator it(block); !it.Done(); it.Advance()) {
+ HInstruction* instr = it.Current();
+ if (instr->CheckFlag(HValue::kIsDead)) continue;
+
+ // TODO(titzer): eliminate unobserved HStoreKeyed instructions too.
+ switch (instr->opcode()) {
+ case HValue::kStoreNamedField:
+ // Remove any unobserved stores overwritten by this store.
+ ProcessStore(HStoreNamedField::cast(instr));
+ break;
+ case HValue::kLoadNamedField:
+ // Observe any unobserved stores on this object + field.
+ ProcessLoad(HLoadNamedField::cast(instr));
+ break;
+ default:
+ ProcessInstr(instr, flags);
+ break;
+ }
+ }
+ }
+}
+
+
+void HStoreEliminationPhase::ProcessStore(HStoreNamedField* store) {
+ HValue* object = store->object()->ActualValue();
+ int i = 0;
+ while (i < unobserved_.length()) {
+ HStoreNamedField* prev = unobserved_.at(i);
+ if (aliasing_->MustAlias(object, prev->object()->ActualValue()) &&
+ prev->CanBeReplacedWith(store)) {
+ // This store is guaranteed to overwrite the previous store.
+ prev->DeleteAndReplaceWith(NULL);
+ TRACE(("++ Unobserved store S%d overwritten by S%d\n",
+ prev->id(), store->id()));
+ unobserved_.Remove(i);
+ } else {
+ // TODO(titzer): remove map word clearing from folded allocations.
+ i++;
+ }
+ }
+ // Only non-transitioning stores are removable.
+ if (!store->has_transition()) {
+ TRACE(("-- Might remove store S%d\n", store->id()));
+ unobserved_.Add(store, zone());
+ }
+}
+
+
+void HStoreEliminationPhase::ProcessLoad(HLoadNamedField* load) {
+ HValue* object = load->object()->ActualValue();
+ int i = 0;
+ while (i < unobserved_.length()) {
+ HStoreNamedField* prev = unobserved_.at(i);
+ if (aliasing_->MayAlias(object, prev->object()->ActualValue()) &&
+ load->access().Equals(prev->access())) {
+ TRACE(("-- Observed store S%d by load L%d\n", prev->id(), load->id()));
+ unobserved_.Remove(i);
+ } else {
+ i++;
+ }
+ }
+}
+
+
+void HStoreEliminationPhase::ProcessInstr(HInstruction* instr,
+ GVNFlagSet flags) {
+ if (unobserved_.length() == 0) return; // Nothing to do.
+ if (instr->CanDeoptimize()) {
+ TRACE(("-- Observed stores at I%d (%s might deoptimize)\n",
+ instr->id(), instr->Mnemonic()));
+ unobserved_.Rewind(0);
+ return;
+ }
+ if (instr->CheckChangesFlag(kNewSpacePromotion)) {
+ TRACE(("-- Observed stores at I%d (%s might GC)\n",
+ instr->id(), instr->Mnemonic()));
+ unobserved_.Rewind(0);
+ return;
+ }
+ if (instr->DependsOnFlags().ContainsAnyOf(flags)) {
+ TRACE(("-- Observed stores at I%d (GVN flags of %s)\n",
+ instr->id(), instr->Mnemonic()));
+ unobserved_.Rewind(0);
+ return;
+ }
+}
+
+} } // namespace v8::internal
diff --git a/chromium/v8/src/hydrogen-store-elimination.h b/chromium/v8/src/hydrogen-store-elimination.h
new file mode 100644
index 00000000000..35a23a26602
--- /dev/null
+++ b/chromium/v8/src/hydrogen-store-elimination.h
@@ -0,0 +1,34 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_HYDROGEN_STORE_ELIMINATION_H_
+#define V8_HYDROGEN_STORE_ELIMINATION_H_
+
+#include "src/hydrogen.h"
+#include "src/hydrogen-alias-analysis.h"
+
+namespace v8 {
+namespace internal {
+
+class HStoreEliminationPhase : public HPhase {
+ public:
+ explicit HStoreEliminationPhase(HGraph* graph)
+ : HPhase("H_Store elimination", graph),
+ unobserved_(10, zone()),
+ aliasing_() { }
+
+ void Run();
+ private:
+ ZoneList<HStoreNamedField*> unobserved_;
+ HAliasAnalyzer* aliasing_;
+
+ void ProcessStore(HStoreNamedField* store);
+ void ProcessLoad(HLoadNamedField* load);
+ void ProcessInstr(HInstruction* instr, GVNFlagSet flags);
+};
+
+
+} } // namespace v8::internal
+
+#endif
diff --git a/chromium/v8/src/hydrogen-types.cc b/chromium/v8/src/hydrogen-types.cc
new file mode 100644
index 00000000000..2da8a594ee7
--- /dev/null
+++ b/chromium/v8/src/hydrogen-types.cc
@@ -0,0 +1,67 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/hydrogen-types.h"
+
+#include "src/types-inl.h"
+
+
+namespace v8 {
+namespace internal {
+
+// static
+template <class T>
+HType HType::FromType(typename T::TypeHandle type) {
+ if (T::Any()->Is(type)) return HType::Any();
+ if (type->Is(T::None())) return HType::None();
+ if (type->Is(T::SignedSmall())) return HType::Smi();
+ if (type->Is(T::Number())) return HType::TaggedNumber();
+ if (type->Is(T::Null())) return HType::Null();
+ if (type->Is(T::String())) return HType::String();
+ if (type->Is(T::Boolean())) return HType::Boolean();
+ if (type->Is(T::Undefined())) return HType::Undefined();
+ if (type->Is(T::Array())) return HType::JSArray();
+ if (type->Is(T::Object())) return HType::JSObject();
+ return HType::Tagged();
+}
+
+
+// static
+template
+HType HType::FromType<Type>(Type* type);
+
+
+// static
+template
+HType HType::FromType<HeapType>(Handle<HeapType> type);
+
+
+// static
+HType HType::FromValue(Handle<Object> value) {
+ if (value->IsSmi()) return HType::Smi();
+ if (value->IsNull()) return HType::Null();
+ if (value->IsHeapNumber()) return HType::HeapNumber();
+ if (value->IsString()) return HType::String();
+ if (value->IsBoolean()) return HType::Boolean();
+ if (value->IsUndefined()) return HType::Undefined();
+ if (value->IsJSArray()) return HType::JSArray();
+ if (value->IsJSObject()) return HType::JSObject();
+ ASSERT(value->IsHeapObject());
+ return HType::HeapObject();
+}
+
+
+const char* HType::ToString() const {
+ // Note: The c1visualizer syntax for locals allows only a sequence of the
+ // following characters: A-Za-z0-9_-|:
+ switch (kind_) {
+ #define DEFINE_CASE(Name, mask) case k##Name: return #Name;
+ HTYPE_LIST(DEFINE_CASE)
+ #undef DEFINE_CASE
+ }
+ UNREACHABLE();
+ return NULL;
+}
+
+} } // namespace v8::internal
diff --git a/chromium/v8/src/hydrogen-types.h b/chromium/v8/src/hydrogen-types.h
new file mode 100644
index 00000000000..e924a6b549e
--- /dev/null
+++ b/chromium/v8/src/hydrogen-types.h
@@ -0,0 +1,87 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef HYDROGEN_TYPES_H_
+#define HYDROGEN_TYPES_H_
+
+#include <climits>
+
+#include "src/base/macros.h"
+
+namespace v8 {
+namespace internal {
+
+// Forward declarations.
+template <typename T> class Handle;
+class Object;
+
+#define HTYPE_LIST(V) \
+ V(Any, 0x0) /* 0000 0000 0000 0000 */ \
+ V(Tagged, 0x1) /* 0000 0000 0000 0001 */ \
+ V(TaggedPrimitive, 0x5) /* 0000 0000 0000 0101 */ \
+ V(TaggedNumber, 0xd) /* 0000 0000 0000 1101 */ \
+ V(Smi, 0x1d) /* 0000 0000 0001 1101 */ \
+ V(HeapObject, 0x21) /* 0000 0000 0010 0001 */ \
+ V(HeapPrimitive, 0x25) /* 0000 0000 0010 0101 */ \
+ V(Null, 0x27) /* 0000 0000 0010 0111 */ \
+ V(HeapNumber, 0x2d) /* 0000 0000 0010 1101 */ \
+ V(String, 0x65) /* 0000 0000 0110 0101 */ \
+ V(Boolean, 0xa5) /* 0000 0000 1010 0101 */ \
+ V(Undefined, 0x125) /* 0000 0001 0010 0101 */ \
+ V(JSObject, 0x221) /* 0000 0010 0010 0001 */ \
+ V(JSArray, 0x621) /* 0000 0110 0010 0001 */ \
+ V(None, 0x7ff) /* 0000 0111 1111 1111 */
+
+class HType V8_FINAL {
+ public:
+ #define DECLARE_CONSTRUCTOR(Name, mask) \
+ static HType Name() V8_WARN_UNUSED_RESULT { return HType(k##Name); }
+ HTYPE_LIST(DECLARE_CONSTRUCTOR)
+ #undef DECLARE_CONSTRUCTOR
+
+ // Return the weakest (least precise) common type.
+ HType Combine(HType other) const V8_WARN_UNUSED_RESULT {
+ return HType(static_cast<Kind>(kind_ & other.kind_));
+ }
+
+ bool Equals(HType other) const V8_WARN_UNUSED_RESULT {
+ return kind_ == other.kind_;
+ }
+
+ bool IsSubtypeOf(HType other) const V8_WARN_UNUSED_RESULT {
+ return Combine(other).Equals(other);
+ }
+
+ #define DECLARE_IS_TYPE(Name, mask) \
+ bool Is##Name() const V8_WARN_UNUSED_RESULT { \
+ return IsSubtypeOf(HType::Name()); \
+ }
+ HTYPE_LIST(DECLARE_IS_TYPE)
+ #undef DECLARE_IS_TYPE
+
+ template <class T>
+ static HType FromType(typename T::TypeHandle type) V8_WARN_UNUSED_RESULT;
+ static HType FromValue(Handle<Object> value) V8_WARN_UNUSED_RESULT;
+
+ const char* ToString() const V8_WARN_UNUSED_RESULT;
+
+ private:
+ enum Kind {
+ #define DECLARE_TYPE(Name, mask) k##Name = mask,
+ HTYPE_LIST(DECLARE_TYPE)
+ #undef DECLARE_TYPE
+ LAST_KIND = kNone
+ };
+
+ // Make sure type fits in int16.
+ STATIC_ASSERT(LAST_KIND < (1 << (CHAR_BIT * sizeof(int16_t))));
+
+ explicit HType(Kind kind) : kind_(kind) { }
+
+ int16_t kind_;
+};
+
+} } // namespace v8::internal
+
+#endif // HYDROGEN_TYPES_H_
diff --git a/chromium/v8/src/hydrogen-uint32-analysis.cc b/chromium/v8/src/hydrogen-uint32-analysis.cc
index 8de887d6f80..7616f3d46b6 100644
--- a/chromium/v8/src/hydrogen-uint32-analysis.cc
+++ b/chromium/v8/src/hydrogen-uint32-analysis.cc
@@ -1,36 +1,37 @@
// Copyright 2013 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "hydrogen-uint32-analysis.h"
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/hydrogen-uint32-analysis.h"
namespace v8 {
namespace internal {
+static bool IsUnsignedLoad(HLoadKeyed* instr) {
+ switch (instr->elements_kind()) {
+ case EXTERNAL_UINT8_ELEMENTS:
+ case EXTERNAL_UINT16_ELEMENTS:
+ case EXTERNAL_UINT32_ELEMENTS:
+ case EXTERNAL_UINT8_CLAMPED_ELEMENTS:
+ case UINT8_ELEMENTS:
+ case UINT16_ELEMENTS:
+ case UINT32_ELEMENTS:
+ case UINT8_CLAMPED_ELEMENTS:
+ return true;
+ default:
+ return false;
+ }
+}
+
+
+static bool IsUint32Operation(HValue* instr) {
+ return instr->IsShr() ||
+ (instr->IsLoadKeyed() && IsUnsignedLoad(HLoadKeyed::cast(instr))) ||
+ (instr->IsInteger32Constant() && instr->GetInteger32Constant() >= 0);
+}
+
+
bool HUint32AnalysisPhase::IsSafeUint32Use(HValue* val, HValue* use) {
// Operations that operate on bits are safe.
if (use->IsBitwise() || use->IsShl() || use->IsSar() || use->IsShr()) {
@@ -54,12 +55,15 @@ bool HUint32AnalysisPhase::IsSafeUint32Use(HValue* val, HValue* use) {
// operation.
if (store->value() == val) {
// Clamping or a conversion to double should have beed inserted.
- ASSERT(store->elements_kind() != EXTERNAL_PIXEL_ELEMENTS);
- ASSERT(store->elements_kind() != EXTERNAL_FLOAT_ELEMENTS);
- ASSERT(store->elements_kind() != EXTERNAL_DOUBLE_ELEMENTS);
+ ASSERT(store->elements_kind() != EXTERNAL_UINT8_CLAMPED_ELEMENTS);
+ ASSERT(store->elements_kind() != EXTERNAL_FLOAT32_ELEMENTS);
+ ASSERT(store->elements_kind() != EXTERNAL_FLOAT64_ELEMENTS);
return true;
}
}
+ } else if (use->IsCompareNumericAndBranch()) {
+ HCompareNumericAndBranch* c = HCompareNumericAndBranch::cast(use);
+ return IsUint32Operation(c->left()) && IsUint32Operation(c->right());
}
return false;
diff --git a/chromium/v8/src/hydrogen-uint32-analysis.h b/chromium/v8/src/hydrogen-uint32-analysis.h
index 59739d1ccf1..4d2797fa3a1 100644
--- a/chromium/v8/src/hydrogen-uint32-analysis.h
+++ b/chromium/v8/src/hydrogen-uint32-analysis.h
@@ -1,34 +1,11 @@
// Copyright 2013 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
#ifndef V8_HYDROGEN_UINT32_ANALYSIS_H_
#define V8_HYDROGEN_UINT32_ANALYSIS_H_
-#include "hydrogen.h"
+#include "src/hydrogen.h"
namespace v8 {
namespace internal {
diff --git a/chromium/v8/src/hydrogen.cc b/chromium/v8/src/hydrogen.cc
index c40d2e77ffc..8fff497eea9 100644
--- a/chromium/v8/src/hydrogen.cc
+++ b/chromium/v8/src/hydrogen.cc
@@ -1,77 +1,58 @@
// Copyright 2013 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "hydrogen.h"
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/hydrogen.h"
#include <algorithm>
-#include "v8.h"
-#include "allocation-site-scopes.h"
-#include "codegen.h"
-#include "full-codegen.h"
-#include "hashmap.h"
-#include "hydrogen-bce.h"
-#include "hydrogen-bch.h"
-#include "hydrogen-canonicalize.h"
-#include "hydrogen-check-elimination.h"
-#include "hydrogen-dce.h"
-#include "hydrogen-dehoist.h"
-#include "hydrogen-environment-liveness.h"
-#include "hydrogen-escape-analysis.h"
-#include "hydrogen-infer-representation.h"
-#include "hydrogen-infer-types.h"
-#include "hydrogen-load-elimination.h"
-#include "hydrogen-gvn.h"
-#include "hydrogen-mark-deoptimize.h"
-#include "hydrogen-mark-unreachable.h"
-#include "hydrogen-minus-zero.h"
-#include "hydrogen-osr.h"
-#include "hydrogen-range-analysis.h"
-#include "hydrogen-redundant-phi.h"
-#include "hydrogen-removable-simulates.h"
-#include "hydrogen-representation-changes.h"
-#include "hydrogen-sce.h"
-#include "hydrogen-uint32-analysis.h"
-#include "lithium-allocator.h"
-#include "parser.h"
-#include "runtime.h"
-#include "scopeinfo.h"
-#include "scopes.h"
-#include "stub-cache.h"
-#include "typing.h"
+#include "src/v8.h"
+#include "src/allocation-site-scopes.h"
+#include "src/codegen.h"
+#include "src/full-codegen.h"
+#include "src/hashmap.h"
+#include "src/hydrogen-bce.h"
+#include "src/hydrogen-bch.h"
+#include "src/hydrogen-canonicalize.h"
+#include "src/hydrogen-check-elimination.h"
+#include "src/hydrogen-dce.h"
+#include "src/hydrogen-dehoist.h"
+#include "src/hydrogen-environment-liveness.h"
+#include "src/hydrogen-escape-analysis.h"
+#include "src/hydrogen-infer-representation.h"
+#include "src/hydrogen-infer-types.h"
+#include "src/hydrogen-load-elimination.h"
+#include "src/hydrogen-gvn.h"
+#include "src/hydrogen-mark-deoptimize.h"
+#include "src/hydrogen-mark-unreachable.h"
+#include "src/hydrogen-osr.h"
+#include "src/hydrogen-range-analysis.h"
+#include "src/hydrogen-redundant-phi.h"
+#include "src/hydrogen-removable-simulates.h"
+#include "src/hydrogen-representation-changes.h"
+#include "src/hydrogen-sce.h"
+#include "src/hydrogen-store-elimination.h"
+#include "src/hydrogen-uint32-analysis.h"
+#include "src/lithium-allocator.h"
+#include "src/parser.h"
+#include "src/runtime.h"
+#include "src/scopeinfo.h"
+#include "src/scopes.h"
+#include "src/stub-cache.h"
+#include "src/typing.h"
#if V8_TARGET_ARCH_IA32
-#include "ia32/lithium-codegen-ia32.h"
+#include "src/ia32/lithium-codegen-ia32.h"
#elif V8_TARGET_ARCH_X64
-#include "x64/lithium-codegen-x64.h"
+#include "src/x64/lithium-codegen-x64.h"
+#elif V8_TARGET_ARCH_ARM64
+#include "src/arm64/lithium-codegen-arm64.h"
#elif V8_TARGET_ARCH_ARM
-#include "arm/lithium-codegen-arm.h"
+#include "src/arm/lithium-codegen-arm.h"
#elif V8_TARGET_ARCH_MIPS
-#include "mips/lithium-codegen-mips.h"
+#include "src/mips/lithium-codegen-mips.h"
+#elif V8_TARGET_ARCH_X87
+#include "src/x87/lithium-codegen-x87.h"
#else
#error Unsupported target architecture.
#endif
@@ -100,7 +81,8 @@ HBasicBlock::HBasicBlock(HGraph* graph)
is_inline_return_target_(false),
is_reachable_(true),
dominates_loop_successors_(false),
- is_osr_entry_(false) { }
+ is_osr_entry_(false),
+ is_ordered_(false) { }
Isolate* HBasicBlock::isolate() const {
@@ -141,12 +123,13 @@ void HBasicBlock::RemovePhi(HPhi* phi) {
}
-void HBasicBlock::AddInstruction(HInstruction* instr, int position) {
+void HBasicBlock::AddInstruction(HInstruction* instr,
+ HSourcePosition position) {
ASSERT(!IsStartBlock() || !IsFinished());
ASSERT(!instr->IsLinked());
ASSERT(!IsFinished());
- if (position != RelocInfo::kNoPosition) {
+ if (!position.IsUnknown()) {
instr->set_position(position);
}
if (first_ == NULL) {
@@ -154,10 +137,10 @@ void HBasicBlock::AddInstruction(HInstruction* instr, int position) {
ASSERT(!last_environment()->ast_id().IsNone());
HBlockEntry* entry = new(zone()) HBlockEntry();
entry->InitializeAsFirst(this);
- if (position != RelocInfo::kNoPosition) {
+ if (!position.IsUnknown()) {
entry->set_position(position);
} else {
- ASSERT(!FLAG_emit_opt_code_positions ||
+ ASSERT(!FLAG_hydrogen_track_positions ||
!graph()->info()->IsOptimizing());
}
first_ = last_ = entry;
@@ -210,7 +193,7 @@ HSimulate* HBasicBlock::CreateSimulate(BailoutId ast_id,
}
-void HBasicBlock::Finish(HControlInstruction* end, int position) {
+void HBasicBlock::Finish(HControlInstruction* end, HSourcePosition position) {
ASSERT(!IsFinished());
AddInstruction(end, position);
end_ = end;
@@ -221,11 +204,11 @@ void HBasicBlock::Finish(HControlInstruction* end, int position) {
void HBasicBlock::Goto(HBasicBlock* block,
- int position,
+ HSourcePosition position,
FunctionState* state,
bool add_simulate) {
bool drop_extra = state != NULL &&
- state->inlining_kind() == DROP_EXTRA_ON_RETURN;
+ state->inlining_kind() == NORMAL_RETURN;
if (block->IsInlineReturnTarget()) {
HEnvironment* env = last_environment();
@@ -244,9 +227,9 @@ void HBasicBlock::Goto(HBasicBlock* block,
void HBasicBlock::AddLeaveInlined(HValue* return_value,
FunctionState* state,
- int position) {
+ HSourcePosition position) {
HBasicBlock* target = state->function_return();
- bool drop_extra = state->inlining_kind() == DROP_EXTRA_ON_RETURN;
+ bool drop_extra = state->inlining_kind() == NORMAL_RETURN;
ASSERT(target->IsInlineReturnTarget());
ASSERT(return_value != NULL);
@@ -302,6 +285,12 @@ bool HBasicBlock::Dominates(HBasicBlock* other) const {
}
+bool HBasicBlock::EqualToOrDominates(HBasicBlock* other) const {
+ if (this == other) return true;
+ return Dominates(other);
+}
+
+
int HBasicBlock::LoopNestingDepth() const {
const HBasicBlock* current = this;
int result = (current->IsLoopHeader()) ? 1 : 0;
@@ -331,6 +320,15 @@ void HBasicBlock::PostProcessLoopHeader(IterationStatement* stmt) {
}
+void HBasicBlock::MarkSuccEdgeUnreachable(int succ) {
+ ASSERT(IsFinished());
+ HBasicBlock* succ_block = end()->SuccessorAt(succ);
+
+ ASSERT(succ_block->predecessors()->length() == 1);
+ succ_block->MarkUnreachable();
+}
+
+
void HBasicBlock::RegisterPredecessor(HBasicBlock* pred) {
if (HasPredecessor()) {
// Only loop header blocks can have a predecessor added after
@@ -683,17 +681,19 @@ HConstant* HGraph::GetConstantMinus1() {
}
-#define DEFINE_GET_CONSTANT(Name, name, htype, boolean_value) \
+#define DEFINE_GET_CONSTANT(Name, name, type, htype, boolean_value) \
HConstant* HGraph::GetConstant##Name() { \
if (!constant_##name##_.is_set()) { \
HConstant* constant = new(zone()) HConstant( \
Unique<Object>::CreateImmovable(isolate()->factory()->name##_value()), \
+ Unique<Map>::CreateImmovable(isolate()->factory()->type##_map()), \
+ false, \
Representation::Tagged(), \
htype, \
- false, \
true, \
+ boolean_value, \
false, \
- boolean_value); \
+ ODDBALL_TYPE); \
constant->InsertAfter(entry_block()->first()); \
constant_##name##_.set(constant); \
} \
@@ -701,11 +701,11 @@ HConstant* HGraph::GetConstant##Name() { \
}
-DEFINE_GET_CONSTANT(Undefined, undefined, HType::Tagged(), false)
-DEFINE_GET_CONSTANT(True, true, HType::Boolean(), true)
-DEFINE_GET_CONSTANT(False, false, HType::Boolean(), false)
-DEFINE_GET_CONSTANT(Hole, the_hole, HType::Tagged(), false)
-DEFINE_GET_CONSTANT(Null, null, HType::Tagged(), false)
+DEFINE_GET_CONSTANT(Undefined, undefined, undefined, HType::Undefined(), false)
+DEFINE_GET_CONSTANT(True, true, boolean, HType::Boolean(), true)
+DEFINE_GET_CONSTANT(False, false, boolean, HType::Boolean(), false)
+DEFINE_GET_CONSTANT(Hole, the_hole, the_hole, HType::None(), false)
+DEFINE_GET_CONSTANT(Null, null, null, HType::Null(), false)
#undef DEFINE_GET_CONSTANT
@@ -1026,9 +1026,9 @@ void HGraphBuilder::IfBuilder::End() {
current = merge_at_join_blocks_;
while (current != NULL) {
if (current->deopt_ && current->block_ != NULL) {
- builder_->PadEnvironmentForContinuation(current->block_,
- merge_block);
- builder_->GotoNoSimulate(current->block_, merge_block);
+ current->block_->FinishExit(
+ HAbnormalExit::New(builder_->zone(), NULL),
+ HSourcePosition::Unknown());
}
current = current->next_;
}
@@ -1161,9 +1161,10 @@ HGraph* HGraphBuilder::CreateGraph() {
HInstruction* HGraphBuilder::AddInstruction(HInstruction* instr) {
ASSERT(current_block() != NULL);
- ASSERT(!FLAG_emit_opt_code_positions ||
- position_ != RelocInfo::kNoPosition || !info_->IsOptimizing());
- current_block()->AddInstruction(instr, position_);
+ ASSERT(!FLAG_hydrogen_track_positions ||
+ !position_.IsUnknown() ||
+ !info_->IsOptimizing());
+ current_block()->AddInstruction(instr, source_position());
if (graph()->IsInsideNoSideEffectsScope()) {
instr->SetFlag(HValue::kHasNoObservableSideEffects);
}
@@ -1172,9 +1173,10 @@ HInstruction* HGraphBuilder::AddInstruction(HInstruction* instr) {
void HGraphBuilder::FinishCurrentBlock(HControlInstruction* last) {
- ASSERT(!FLAG_emit_opt_code_positions || !info_->IsOptimizing() ||
- position_ != RelocInfo::kNoPosition);
- current_block()->Finish(last, position_);
+ ASSERT(!FLAG_hydrogen_track_positions ||
+ !info_->IsOptimizing() ||
+ !position_.IsUnknown());
+ current_block()->Finish(last, source_position());
if (last->IsReturn() || last->IsAbnormalExit()) {
set_current_block(NULL);
}
@@ -1182,9 +1184,9 @@ void HGraphBuilder::FinishCurrentBlock(HControlInstruction* last) {
void HGraphBuilder::FinishExitCurrentBlock(HControlInstruction* instruction) {
- ASSERT(!FLAG_emit_opt_code_positions || !info_->IsOptimizing() ||
- position_ != RelocInfo::kNoPosition);
- current_block()->FinishExit(instruction, position_);
+ ASSERT(!FLAG_hydrogen_track_positions || !info_->IsOptimizing() ||
+ !position_.IsUnknown());
+ current_block()->FinishExit(instruction, source_position());
if (instruction->IsReturn() || instruction->IsAbnormalExit()) {
set_current_block(NULL);
}
@@ -1194,12 +1196,12 @@ void HGraphBuilder::FinishExitCurrentBlock(HControlInstruction* instruction) {
void HGraphBuilder::AddIncrementCounter(StatsCounter* counter) {
if (FLAG_native_code_counters && counter->Enabled()) {
HValue* reference = Add<HConstant>(ExternalReference(counter));
- HValue* old_value = Add<HLoadNamedField>(reference,
- HObjectAccess::ForCounter());
+ HValue* old_value = Add<HLoadNamedField>(
+ reference, static_cast<HValue*>(NULL), HObjectAccess::ForCounter());
HValue* new_value = AddUncasted<HAdd>(old_value, graph()->GetConstant1());
new_value->ClearFlag(HValue::kCanOverflow); // Ignore counter overflow
Add<HStoreNamedField>(reference, HObjectAccess::ForCounter(),
- new_value);
+ new_value, STORE_TO_INITIALIZED_ENTRY);
}
}
@@ -1208,7 +1210,7 @@ void HGraphBuilder::AddSimulate(BailoutId id,
RemovableSimulate removable) {
ASSERT(current_block() != NULL);
ASSERT(!graph()->IsInsideNoSideEffectsScope());
- current_block()->AddNewSimulate(id, removable);
+ current_block()->AddNewSimulate(id, source_position(), removable);
}
@@ -1228,49 +1230,25 @@ HBasicBlock* HGraphBuilder::CreateLoopHeaderBlock() {
}
-HValue* HGraphBuilder::BuildCheckHeapObject(HValue* obj) {
- if (obj->type().IsHeapObject()) return obj;
- return Add<HCheckHeapObject>(obj);
-}
-
+HValue* HGraphBuilder::BuildGetElementsKind(HValue* object) {
+ HValue* map = Add<HLoadNamedField>(object, static_cast<HValue*>(NULL),
+ HObjectAccess::ForMap());
-void HGraphBuilder::FinishExitWithHardDeoptimization(
- const char* reason, HBasicBlock* continuation) {
- PadEnvironmentForContinuation(current_block(), continuation);
- Add<HDeoptimize>(reason, Deoptimizer::EAGER);
- if (graph()->IsInsideNoSideEffectsScope()) {
- GotoNoSimulate(continuation);
- } else {
- Goto(continuation);
- }
+ HValue* bit_field2 = Add<HLoadNamedField>(map, static_cast<HValue*>(NULL),
+ HObjectAccess::ForMapBitField2());
+ return BuildDecodeField<Map::ElementsKindBits>(bit_field2);
}
-void HGraphBuilder::PadEnvironmentForContinuation(
- HBasicBlock* from,
- HBasicBlock* continuation) {
- if (continuation->last_environment() != NULL) {
- // When merging from a deopt block to a continuation, resolve differences in
- // environment by pushing constant 0 and popping extra values so that the
- // environments match during the join. Push 0 since it has the most specific
- // representation, and will not influence representation inference of the
- // phi.
- int continuation_env_length = continuation->last_environment()->length();
- while (continuation_env_length != from->last_environment()->length()) {
- if (continuation_env_length > from->last_environment()->length()) {
- from->last_environment()->Push(graph()->GetConstant0());
- } else {
- from->last_environment()->Pop();
- }
- }
- } else {
- ASSERT(continuation->predecessors()->length() == 0);
- }
+HValue* HGraphBuilder::BuildCheckHeapObject(HValue* obj) {
+ if (obj->type().IsHeapObject()) return obj;
+ return Add<HCheckHeapObject>(obj);
}
-HValue* HGraphBuilder::BuildCheckMap(HValue* obj, Handle<Map> map) {
- return Add<HCheckMaps>(obj, map, top_info());
+void HGraphBuilder::FinishExitWithHardDeoptimization(const char* reason) {
+ Add<HDeoptimize>(reason, Deoptimizer::EAGER);
+ FinishExitCurrentBlock(New<HAbnormalExit>());
}
@@ -1287,16 +1265,25 @@ HValue* HGraphBuilder::BuildCheckString(HValue* string) {
HValue* HGraphBuilder::BuildWrapReceiver(HValue* object, HValue* function) {
if (object->type().IsJSObject()) return object;
+ if (function->IsConstant() &&
+ HConstant::cast(function)->handle(isolate())->IsJSFunction()) {
+ Handle<JSFunction> f = Handle<JSFunction>::cast(
+ HConstant::cast(function)->handle(isolate()));
+ SharedFunctionInfo* shared = f->shared();
+ if (shared->strict_mode() == STRICT || shared->native()) return object;
+ }
return Add<HWrapReceiver>(object, function);
}
-HValue* HGraphBuilder::BuildCheckForCapacityGrow(HValue* object,
- HValue* elements,
- ElementsKind kind,
- HValue* length,
- HValue* key,
- bool is_js_array) {
+HValue* HGraphBuilder::BuildCheckForCapacityGrow(
+ HValue* object,
+ HValue* elements,
+ ElementsKind kind,
+ HValue* length,
+ HValue* key,
+ bool is_js_array,
+ PropertyAccessType access_type) {
IfBuilder length_checker(this);
Token::Value token = IsHoleyElementsKind(kind) ? Token::GTE : Token::EQ;
@@ -1314,11 +1301,8 @@ HValue* HGraphBuilder::BuildCheckForCapacityGrow(HValue* object,
HValue* max_gap = Add<HConstant>(static_cast<int32_t>(JSObject::kMaxGap));
HValue* max_capacity = AddUncasted<HAdd>(current_capacity, max_gap);
- IfBuilder key_checker(this);
- key_checker.If<HCompareNumericAndBranch>(key, max_capacity, Token::LT);
- key_checker.Then();
- key_checker.ElseDeopt("Key out of capacity range");
- key_checker.End();
+
+ Add<HBoundsCheck>(key, max_capacity);
HValue* new_capacity = BuildNewElementsCapacity(key);
HValue* new_elements = BuildGrowElementsCapacity(object, elements,
@@ -1339,6 +1323,13 @@ HValue* HGraphBuilder::BuildCheckForCapacityGrow(HValue* object,
new_length);
}
+ if (access_type == STORE && kind == FAST_SMI_ELEMENTS) {
+ HValue* checked_elements = environment()->Top();
+
+ // Write zero to ensure that the new element is initialized with some smi.
+ Add<HStoreKeyed>(checked_elements, key, graph()->GetConstant0(), kind);
+ }
+
length_checker.Else();
Add<HBoundsCheck>(key, length);
@@ -1404,7 +1395,8 @@ void HGraphBuilder::BuildTransitionElementsKind(HValue* object,
HInstruction* elements_length = AddLoadFixedArrayLength(elements);
HInstruction* array_length = is_jsarray
- ? Add<HLoadNamedField>(object, HObjectAccess::ForArrayLength(from_kind))
+ ? Add<HLoadNamedField>(object, static_cast<HValue*>(NULL),
+ HObjectAccess::ForArrayLength(from_kind))
: elements_length;
BuildGrowElementsCapacity(object, elements, from_kind, to_kind,
@@ -1417,6 +1409,194 @@ void HGraphBuilder::BuildTransitionElementsKind(HValue* object,
}
+void HGraphBuilder::BuildJSObjectCheck(HValue* receiver,
+ int bit_field_mask) {
+ // Check that the object isn't a smi.
+ Add<HCheckHeapObject>(receiver);
+
+ // Get the map of the receiver.
+ HValue* map = Add<HLoadNamedField>(receiver, static_cast<HValue*>(NULL),
+ HObjectAccess::ForMap());
+
+ // Check the instance type and if an access check is needed, this can be
+ // done with a single load, since both bytes are adjacent in the map.
+ HObjectAccess access(HObjectAccess::ForMapInstanceTypeAndBitField());
+ HValue* instance_type_and_bit_field =
+ Add<HLoadNamedField>(map, static_cast<HValue*>(NULL), access);
+
+ HValue* mask = Add<HConstant>(0x00FF | (bit_field_mask << 8));
+ HValue* and_result = AddUncasted<HBitwise>(Token::BIT_AND,
+ instance_type_and_bit_field,
+ mask);
+ HValue* sub_result = AddUncasted<HSub>(and_result,
+ Add<HConstant>(JS_OBJECT_TYPE));
+ Add<HBoundsCheck>(sub_result, Add<HConstant>(0x100 - JS_OBJECT_TYPE));
+}
+
+
+void HGraphBuilder::BuildKeyedIndexCheck(HValue* key,
+ HIfContinuation* join_continuation) {
+ // The sometimes unintuitively backward ordering of the ifs below is
+ // convoluted, but necessary. All of the paths must guarantee that the
+ // if-true of the continuation returns a smi element index and the if-false of
+ // the continuation returns either a symbol or a unique string key. All other
+ // object types cause a deopt to fall back to the runtime.
+
+ IfBuilder key_smi_if(this);
+ key_smi_if.If<HIsSmiAndBranch>(key);
+ key_smi_if.Then();
+ {
+ Push(key); // Nothing to do, just continue to true of continuation.
+ }
+ key_smi_if.Else();
+ {
+ HValue* map = Add<HLoadNamedField>(key, static_cast<HValue*>(NULL),
+ HObjectAccess::ForMap());
+ HValue* instance_type =
+ Add<HLoadNamedField>(map, static_cast<HValue*>(NULL),
+ HObjectAccess::ForMapInstanceType());
+
+ // Non-unique string, check for a string with a hash code that is actually
+ // an index.
+ STATIC_ASSERT(LAST_UNIQUE_NAME_TYPE == FIRST_NONSTRING_TYPE);
+ IfBuilder not_string_or_name_if(this);
+ not_string_or_name_if.If<HCompareNumericAndBranch>(
+ instance_type,
+ Add<HConstant>(LAST_UNIQUE_NAME_TYPE),
+ Token::GT);
+
+ not_string_or_name_if.Then();
+ {
+ // Non-smi, non-Name, non-String: Try to convert to smi in case of
+ // HeapNumber.
+ // TODO(danno): This could call some variant of ToString
+ Push(AddUncasted<HForceRepresentation>(key, Representation::Smi()));
+ }
+ not_string_or_name_if.Else();
+ {
+ // String or Name: check explicitly for Name, they can short-circuit
+ // directly to unique non-index key path.
+ IfBuilder not_symbol_if(this);
+ not_symbol_if.If<HCompareNumericAndBranch>(
+ instance_type,
+ Add<HConstant>(SYMBOL_TYPE),
+ Token::NE);
+
+ not_symbol_if.Then();
+ {
+ // String: check whether the String is a String of an index. If it is,
+ // extract the index value from the hash.
+ HValue* hash =
+ Add<HLoadNamedField>(key, static_cast<HValue*>(NULL),
+ HObjectAccess::ForNameHashField());
+ HValue* not_index_mask = Add<HConstant>(static_cast<int>(
+ String::kContainsCachedArrayIndexMask));
+
+ HValue* not_index_test = AddUncasted<HBitwise>(
+ Token::BIT_AND, hash, not_index_mask);
+
+ IfBuilder string_index_if(this);
+ string_index_if.If<HCompareNumericAndBranch>(not_index_test,
+ graph()->GetConstant0(),
+ Token::EQ);
+ string_index_if.Then();
+ {
+ // String with index in hash: extract string and merge to index path.
+ Push(BuildDecodeField<String::ArrayIndexValueBits>(hash));
+ }
+ string_index_if.Else();
+ {
+ // Key is a non-index String, check for uniqueness/internalization. If
+ // it's not, deopt.
+ HValue* not_internalized_bit = AddUncasted<HBitwise>(
+ Token::BIT_AND,
+ instance_type,
+ Add<HConstant>(static_cast<int>(kIsNotInternalizedMask)));
+ DeoptimizeIf<HCompareNumericAndBranch>(
+ not_internalized_bit,
+ graph()->GetConstant0(),
+ Token::NE,
+ "BuildKeyedIndexCheck: string isn't internalized");
+ // Key guaranteed to be a unqiue string
+ Push(key);
+ }
+ string_index_if.JoinContinuation(join_continuation);
+ }
+ not_symbol_if.Else();
+ {
+ Push(key); // Key is symbol
+ }
+ not_symbol_if.JoinContinuation(join_continuation);
+ }
+ not_string_or_name_if.JoinContinuation(join_continuation);
+ }
+ key_smi_if.JoinContinuation(join_continuation);
+}
+
+
+void HGraphBuilder::BuildNonGlobalObjectCheck(HValue* receiver) {
+ // Get the the instance type of the receiver, and make sure that it is
+ // not one of the global object types.
+ HValue* map = Add<HLoadNamedField>(receiver, static_cast<HValue*>(NULL),
+ HObjectAccess::ForMap());
+ HValue* instance_type =
+ Add<HLoadNamedField>(map, static_cast<HValue*>(NULL),
+ HObjectAccess::ForMapInstanceType());
+ STATIC_ASSERT(JS_BUILTINS_OBJECT_TYPE == JS_GLOBAL_OBJECT_TYPE + 1);
+ HValue* min_global_type = Add<HConstant>(JS_GLOBAL_OBJECT_TYPE);
+ HValue* max_global_type = Add<HConstant>(JS_BUILTINS_OBJECT_TYPE);
+
+ IfBuilder if_global_object(this);
+ if_global_object.If<HCompareNumericAndBranch>(instance_type,
+ max_global_type,
+ Token::LTE);
+ if_global_object.And();
+ if_global_object.If<HCompareNumericAndBranch>(instance_type,
+ min_global_type,
+ Token::GTE);
+ if_global_object.ThenDeopt("receiver was a global object");
+ if_global_object.End();
+}
+
+
+void HGraphBuilder::BuildTestForDictionaryProperties(
+ HValue* object,
+ HIfContinuation* continuation) {
+ HValue* properties = Add<HLoadNamedField>(
+ object, static_cast<HValue*>(NULL),
+ HObjectAccess::ForPropertiesPointer());
+ HValue* properties_map =
+ Add<HLoadNamedField>(properties, static_cast<HValue*>(NULL),
+ HObjectAccess::ForMap());
+ HValue* hash_map = Add<HLoadRoot>(Heap::kHashTableMapRootIndex);
+ IfBuilder builder(this);
+ builder.If<HCompareObjectEqAndBranch>(properties_map, hash_map);
+ builder.CaptureContinuation(continuation);
+}
+
+
+HValue* HGraphBuilder::BuildKeyedLookupCacheHash(HValue* object,
+ HValue* key) {
+ // Load the map of the receiver, compute the keyed lookup cache hash
+ // based on 32 bits of the map pointer and the string hash.
+ HValue* object_map =
+ Add<HLoadNamedField>(object, static_cast<HValue*>(NULL),
+ HObjectAccess::ForMapAsInteger32());
+ HValue* shifted_map = AddUncasted<HShr>(
+ object_map, Add<HConstant>(KeyedLookupCache::kMapHashShift));
+ HValue* string_hash =
+ Add<HLoadNamedField>(key, static_cast<HValue*>(NULL),
+ HObjectAccess::ForStringHashField());
+ HValue* shifted_hash = AddUncasted<HShr>(
+ string_hash, Add<HConstant>(String::kHashShift));
+ HValue* xor_result = AddUncasted<HBitwise>(Token::BIT_XOR, shifted_map,
+ shifted_hash);
+ int mask = (KeyedLookupCache::kCapacityMask & KeyedLookupCache::kHashMask);
+ return AddUncasted<HBitwise>(Token::BIT_AND, xor_result,
+ Add<HConstant>(mask));
+}
+
+
HValue* HGraphBuilder::BuildUncheckedDictionaryElementLoadHelper(
HValue* elements,
HValue* key,
@@ -1442,7 +1622,7 @@ HValue* HGraphBuilder::BuildUncheckedDictionaryElementLoadHelper(
HValue* candidate_key = Add<HLoadKeyed>(elements, key_index,
static_cast<HValue*>(NULL),
- FAST_SMI_ELEMENTS);
+ FAST_ELEMENTS);
IfBuilder key_compare(this);
key_compare.IfNot<HCompareObjectEqAndBranch>(key, candidate_key);
@@ -1468,7 +1648,7 @@ HValue* HGraphBuilder::BuildUncheckedDictionaryElementLoadHelper(
HValue* details = Add<HLoadKeyed>(elements, details_index,
static_cast<HValue*>(NULL),
- FAST_SMI_ELEMENTS);
+ FAST_ELEMENTS);
IfBuilder details_compare(this);
details_compare.If<HCompareNumericAndBranch>(details,
graph()->GetConstant0(),
@@ -1529,16 +1709,14 @@ HValue* HGraphBuilder::BuildElementIndexHash(HValue* index) {
HValue* HGraphBuilder::BuildUncheckedDictionaryElementLoad(HValue* receiver,
- HValue* key) {
- HValue* elements = AddLoadElements(receiver);
-
- HValue* hash = BuildElementIndexHash(key);
-
+ HValue* elements,
+ HValue* key,
+ HValue* hash) {
HValue* capacity = Add<HLoadKeyed>(
elements,
Add<HConstant>(NameDictionary::kCapacityIndex),
static_cast<HValue*>(NULL),
- FAST_SMI_ELEMENTS);
+ FAST_ELEMENTS);
HValue* mask = AddUncasted<HSub>(capacity, graph()->GetConstant1());
mask->ChangeRepresentation(Representation::Integer32());
@@ -1549,8 +1727,76 @@ HValue* HGraphBuilder::BuildUncheckedDictionaryElementLoad(HValue* receiver,
}
-HValue* HGraphBuilder::BuildNumberToString(HValue* object,
- Handle<Type> type) {
+HValue* HGraphBuilder::BuildRegExpConstructResult(HValue* length,
+ HValue* index,
+ HValue* input) {
+ NoObservableSideEffectsScope scope(this);
+ HConstant* max_length = Add<HConstant>(JSObject::kInitialMaxFastElementArray);
+ Add<HBoundsCheck>(length, max_length);
+
+ // Generate size calculation code here in order to make it dominate
+ // the JSRegExpResult allocation.
+ ElementsKind elements_kind = FAST_ELEMENTS;
+ HValue* size = BuildCalculateElementsSize(elements_kind, length);
+
+ // Allocate the JSRegExpResult and the FixedArray in one step.
+ HValue* result = Add<HAllocate>(
+ Add<HConstant>(JSRegExpResult::kSize), HType::JSArray(),
+ NOT_TENURED, JS_ARRAY_TYPE);
+
+ // Initialize the JSRegExpResult header.
+ HValue* global_object = Add<HLoadNamedField>(
+ context(), static_cast<HValue*>(NULL),
+ HObjectAccess::ForContextSlot(Context::GLOBAL_OBJECT_INDEX));
+ HValue* native_context = Add<HLoadNamedField>(
+ global_object, static_cast<HValue*>(NULL),
+ HObjectAccess::ForGlobalObjectNativeContext());
+ Add<HStoreNamedField>(
+ result, HObjectAccess::ForMap(),
+ Add<HLoadNamedField>(
+ native_context, static_cast<HValue*>(NULL),
+ HObjectAccess::ForContextSlot(Context::REGEXP_RESULT_MAP_INDEX)));
+ HConstant* empty_fixed_array =
+ Add<HConstant>(isolate()->factory()->empty_fixed_array());
+ Add<HStoreNamedField>(
+ result, HObjectAccess::ForJSArrayOffset(JSArray::kPropertiesOffset),
+ empty_fixed_array);
+ Add<HStoreNamedField>(
+ result, HObjectAccess::ForJSArrayOffset(JSArray::kElementsOffset),
+ empty_fixed_array);
+ Add<HStoreNamedField>(
+ result, HObjectAccess::ForJSArrayOffset(JSArray::kLengthOffset), length);
+
+ // Initialize the additional fields.
+ Add<HStoreNamedField>(
+ result, HObjectAccess::ForJSArrayOffset(JSRegExpResult::kIndexOffset),
+ index);
+ Add<HStoreNamedField>(
+ result, HObjectAccess::ForJSArrayOffset(JSRegExpResult::kInputOffset),
+ input);
+
+ // Allocate and initialize the elements header.
+ HAllocate* elements = BuildAllocateElements(elements_kind, size);
+ BuildInitializeElementsHeader(elements, elements_kind, length);
+
+ HConstant* size_in_bytes_upper_bound = EstablishElementsAllocationSize(
+ elements_kind, max_length->Integer32Value());
+ elements->set_size_upper_bound(size_in_bytes_upper_bound);
+
+ Add<HStoreNamedField>(
+ result, HObjectAccess::ForJSArrayOffset(JSArray::kElementsOffset),
+ elements);
+
+ // Initialize the elements contents with undefined.
+ BuildFillElementsWithValue(
+ elements, elements_kind, graph()->GetConstant0(), length,
+ graph()->GetConstantUndefined());
+
+ return result;
+}
+
+
+HValue* HGraphBuilder::BuildNumberToString(HValue* object, Type* type) {
NoObservableSideEffectsScope scope(this);
// Convert constant numbers at compile time.
@@ -1601,20 +1847,22 @@ HValue* HGraphBuilder::BuildNumberToString(HValue* object,
}
if_objectissmi.Else();
{
- if (type->Is(Type::Smi())) {
+ if (type->Is(Type::SignedSmall())) {
if_objectissmi.Deopt("Expected smi");
} else {
// Check if the object is a heap number.
IfBuilder if_objectisnumber(this);
- if_objectisnumber.If<HCompareMap>(
+ HValue* objectisnumber = if_objectisnumber.If<HCompareMap>(
object, isolate()->factory()->heap_number_map());
if_objectisnumber.Then();
{
// Compute hash for heap number similar to double_get_hash().
HValue* low = Add<HLoadNamedField>(
- object, HObjectAccess::ForHeapNumberValueLowestBits());
+ object, objectisnumber,
+ HObjectAccess::ForHeapNumberValueLowestBits());
HValue* high = Add<HLoadNamedField>(
- object, HObjectAccess::ForHeapNumberValueHighestBits());
+ object, objectisnumber,
+ HObjectAccess::ForHeapNumberValueHighestBits());
HValue* hash = AddUncasted<HBitwise>(Token::BIT_XOR, low, high);
hash = AddUncasted<HBitwise>(Token::BIT_AND, hash, mask);
@@ -1624,24 +1872,32 @@ HValue* HGraphBuilder::BuildNumberToString(HValue* object,
static_cast<HValue*>(NULL),
FAST_ELEMENTS, ALLOW_RETURN_HOLE);
- // Check if key is a heap number (the number string cache contains only
- // SMIs and heap number, so it is sufficient to do a SMI check here).
+ // Check if the key is a heap number and compare it with the object.
IfBuilder if_keyisnotsmi(this);
- if_keyisnotsmi.IfNot<HIsSmiAndBranch>(key);
+ HValue* keyisnotsmi = if_keyisnotsmi.IfNot<HIsSmiAndBranch>(key);
if_keyisnotsmi.Then();
{
- // Check if values of key and object match.
- IfBuilder if_keyeqobject(this);
- if_keyeqobject.If<HCompareNumericAndBranch>(
- Add<HLoadNamedField>(key, HObjectAccess::ForHeapNumberValue()),
- Add<HLoadNamedField>(object, HObjectAccess::ForHeapNumberValue()),
- Token::EQ);
- if_keyeqobject.Then();
+ IfBuilder if_keyisheapnumber(this);
+ if_keyisheapnumber.If<HCompareMap>(
+ key, isolate()->factory()->heap_number_map());
+ if_keyisheapnumber.Then();
{
- // Make the key_index available.
- Push(key_index);
+ // Check if values of key and object match.
+ IfBuilder if_keyeqobject(this);
+ if_keyeqobject.If<HCompareNumericAndBranch>(
+ Add<HLoadNamedField>(key, keyisnotsmi,
+ HObjectAccess::ForHeapNumberValue()),
+ Add<HLoadNamedField>(object, objectisnumber,
+ HObjectAccess::ForHeapNumberValue()),
+ Token::EQ);
+ if_keyeqobject.Then();
+ {
+ // Make the key_index available.
+ Push(key_index);
+ }
+ if_keyeqobject.JoinContinuation(&found);
}
- if_keyeqobject.JoinContinuation(&found);
+ if_keyisheapnumber.JoinContinuation(&found);
}
if_keyisnotsmi.JoinContinuation(&found);
}
@@ -1673,10 +1929,10 @@ HValue* HGraphBuilder::BuildNumberToString(HValue* object,
if_found.Else();
{
// Cache miss, fallback to runtime.
- Add<HPushArgument>(object);
+ Add<HPushArguments>(object);
Push(Add<HCallRuntime>(
isolate()->factory()->empty_string(),
- Runtime::FunctionForId(Runtime::kNumberToStringSkipCache),
+ Runtime::FunctionForId(Runtime::kHiddenNumberToStringSkipCache),
1));
}
if_found.End();
@@ -1685,22 +1941,125 @@ HValue* HGraphBuilder::BuildNumberToString(HValue* object,
}
-HValue* HGraphBuilder::BuildSeqStringSizeFor(HValue* length,
- String::Encoding encoding) {
- STATIC_ASSERT((SeqString::kHeaderSize & kObjectAlignmentMask) == 0);
- HValue* size = length;
- if (encoding == String::TWO_BYTE_ENCODING) {
- size = AddUncasted<HShl>(length, graph()->GetConstant1());
+HAllocate* HGraphBuilder::BuildAllocate(
+ HValue* object_size,
+ HType type,
+ InstanceType instance_type,
+ HAllocationMode allocation_mode) {
+ // Compute the effective allocation size.
+ HValue* size = object_size;
+ if (allocation_mode.CreateAllocationMementos()) {
+ size = AddUncasted<HAdd>(size, Add<HConstant>(AllocationMemento::kSize));
size->ClearFlag(HValue::kCanOverflow);
- size->SetFlag(HValue::kUint32);
}
- size = AddUncasted<HAdd>(size, Add<HConstant>(static_cast<int32_t>(
- SeqString::kHeaderSize + kObjectAlignmentMask)));
- size->ClearFlag(HValue::kCanOverflow);
- size = AddUncasted<HBitwise>(
- Token::BIT_AND, size, Add<HConstant>(static_cast<int32_t>(
- ~kObjectAlignmentMask)));
- return size;
+
+ // Perform the actual allocation.
+ HAllocate* object = Add<HAllocate>(
+ size, type, allocation_mode.GetPretenureMode(),
+ instance_type, allocation_mode.feedback_site());
+
+ // Setup the allocation memento.
+ if (allocation_mode.CreateAllocationMementos()) {
+ BuildCreateAllocationMemento(
+ object, object_size, allocation_mode.current_site());
+ }
+
+ return object;
+}
+
+
+HValue* HGraphBuilder::BuildAddStringLengths(HValue* left_length,
+ HValue* right_length) {
+ // Compute the combined string length and check against max string length.
+ HValue* length = AddUncasted<HAdd>(left_length, right_length);
+ // Check that length <= kMaxLength <=> length < MaxLength + 1.
+ HValue* max_length = Add<HConstant>(String::kMaxLength + 1);
+ Add<HBoundsCheck>(length, max_length);
+ return length;
+}
+
+
+HValue* HGraphBuilder::BuildCreateConsString(
+ HValue* length,
+ HValue* left,
+ HValue* right,
+ HAllocationMode allocation_mode) {
+ // Determine the string instance types.
+ HInstruction* left_instance_type = AddLoadStringInstanceType(left);
+ HInstruction* right_instance_type = AddLoadStringInstanceType(right);
+
+ // Allocate the cons string object. HAllocate does not care whether we
+ // pass CONS_STRING_TYPE or CONS_ASCII_STRING_TYPE here, so we just use
+ // CONS_STRING_TYPE here. Below we decide whether the cons string is
+ // one-byte or two-byte and set the appropriate map.
+ ASSERT(HAllocate::CompatibleInstanceTypes(CONS_STRING_TYPE,
+ CONS_ASCII_STRING_TYPE));
+ HAllocate* result = BuildAllocate(Add<HConstant>(ConsString::kSize),
+ HType::String(), CONS_STRING_TYPE,
+ allocation_mode);
+
+ // Compute intersection and difference of instance types.
+ HValue* anded_instance_types = AddUncasted<HBitwise>(
+ Token::BIT_AND, left_instance_type, right_instance_type);
+ HValue* xored_instance_types = AddUncasted<HBitwise>(
+ Token::BIT_XOR, left_instance_type, right_instance_type);
+
+ // We create a one-byte cons string if
+ // 1. both strings are one-byte, or
+ // 2. at least one of the strings is two-byte, but happens to contain only
+ // one-byte characters.
+ // To do this, we check
+ // 1. if both strings are one-byte, or if the one-byte data hint is set in
+ // both strings, or
+ // 2. if one of the strings has the one-byte data hint set and the other
+ // string is one-byte.
+ IfBuilder if_onebyte(this);
+ STATIC_ASSERT(kOneByteStringTag != 0);
+ STATIC_ASSERT(kOneByteDataHintMask != 0);
+ if_onebyte.If<HCompareNumericAndBranch>(
+ AddUncasted<HBitwise>(
+ Token::BIT_AND, anded_instance_types,
+ Add<HConstant>(static_cast<int32_t>(
+ kStringEncodingMask | kOneByteDataHintMask))),
+ graph()->GetConstant0(), Token::NE);
+ if_onebyte.Or();
+ STATIC_ASSERT(kOneByteStringTag != 0 &&
+ kOneByteDataHintTag != 0 &&
+ kOneByteDataHintTag != kOneByteStringTag);
+ if_onebyte.If<HCompareNumericAndBranch>(
+ AddUncasted<HBitwise>(
+ Token::BIT_AND, xored_instance_types,
+ Add<HConstant>(static_cast<int32_t>(
+ kOneByteStringTag | kOneByteDataHintTag))),
+ Add<HConstant>(static_cast<int32_t>(
+ kOneByteStringTag | kOneByteDataHintTag)), Token::EQ);
+ if_onebyte.Then();
+ {
+ // We can safely skip the write barrier for storing the map here.
+ Add<HStoreNamedField>(
+ result, HObjectAccess::ForMap(),
+ Add<HConstant>(isolate()->factory()->cons_ascii_string_map()));
+ }
+ if_onebyte.Else();
+ {
+ // We can safely skip the write barrier for storing the map here.
+ Add<HStoreNamedField>(
+ result, HObjectAccess::ForMap(),
+ Add<HConstant>(isolate()->factory()->cons_string_map()));
+ }
+ if_onebyte.End();
+
+ // Initialize the cons string fields.
+ Add<HStoreNamedField>(result, HObjectAccess::ForStringHashField(),
+ Add<HConstant>(String::kEmptyHashField));
+ Add<HStoreNamedField>(result, HObjectAccess::ForStringLength(), length);
+ Add<HStoreNamedField>(result, HObjectAccess::ForConsStringFirst(), left);
+ Add<HStoreNamedField>(result, HObjectAccess::ForConsStringSecond(), right);
+
+ // Count the native string addition.
+ AddIncrementCounter(isolate()->counters()->string_add_native());
+
+ return result;
}
@@ -1726,40 +2085,46 @@ void HGraphBuilder::BuildCopySeqStringChars(HValue* src,
}
-HValue* HGraphBuilder::BuildUncheckedStringAdd(HValue* left,
- HValue* right,
- PretenureFlag pretenure_flag) {
+HValue* HGraphBuilder::BuildObjectSizeAlignment(
+ HValue* unaligned_size, int header_size) {
+ ASSERT((header_size & kObjectAlignmentMask) == 0);
+ HValue* size = AddUncasted<HAdd>(
+ unaligned_size, Add<HConstant>(static_cast<int32_t>(
+ header_size + kObjectAlignmentMask)));
+ size->ClearFlag(HValue::kCanOverflow);
+ return AddUncasted<HBitwise>(
+ Token::BIT_AND, size, Add<HConstant>(static_cast<int32_t>(
+ ~kObjectAlignmentMask)));
+}
+
+
+HValue* HGraphBuilder::BuildUncheckedStringAdd(
+ HValue* left,
+ HValue* right,
+ HAllocationMode allocation_mode) {
// Determine the string lengths.
- HValue* left_length = Add<HLoadNamedField>(
- left, HObjectAccess::ForStringLength());
- HValue* right_length = Add<HLoadNamedField>(
- right, HObjectAccess::ForStringLength());
-
- // Compute the combined string length. If the result is larger than the max
- // supported string length, we bailout to the runtime. This is done implicitly
- // when converting the result back to a smi in case the max string length
- // equals the max smi valie. Otherwise, for platforms with 32-bit smis, we do
- HValue* length = AddUncasted<HAdd>(left_length, right_length);
- STATIC_ASSERT(String::kMaxLength <= Smi::kMaxValue);
- if (String::kMaxLength != Smi::kMaxValue) {
- IfBuilder if_nooverflow(this);
- if_nooverflow.If<HCompareNumericAndBranch>(
- length, Add<HConstant>(String::kMaxLength), Token::LTE);
- if_nooverflow.Then();
- if_nooverflow.ElseDeopt("String length exceeds limit");
- }
+ HValue* left_length = AddLoadStringLength(left);
+ HValue* right_length = AddLoadStringLength(right);
- // Determine the string instance types.
- HLoadNamedField* left_instance_type = Add<HLoadNamedField>(
- Add<HLoadNamedField>(left, HObjectAccess::ForMap()),
- HObjectAccess::ForMapInstanceType());
- HLoadNamedField* right_instance_type = Add<HLoadNamedField>(
- Add<HLoadNamedField>(right, HObjectAccess::ForMap()),
- HObjectAccess::ForMapInstanceType());
-
- // Compute difference of instance types.
- HValue* xored_instance_types = AddUncasted<HBitwise>(
- Token::BIT_XOR, left_instance_type, right_instance_type);
+ // Compute the combined string length.
+ HValue* length = BuildAddStringLengths(left_length, right_length);
+
+ // Do some manual constant folding here.
+ if (left_length->IsConstant()) {
+ HConstant* c_left_length = HConstant::cast(left_length);
+ ASSERT_NE(0, c_left_length->Integer32Value());
+ if (c_left_length->Integer32Value() + 1 >= ConsString::kMinLength) {
+ // The right string contains at least one character.
+ return BuildCreateConsString(length, left, right, allocation_mode);
+ }
+ } else if (right_length->IsConstant()) {
+ HConstant* c_right_length = HConstant::cast(right_length);
+ ASSERT_NE(0, c_right_length->Integer32Value());
+ if (c_right_length->Integer32Value() + 1 >= ConsString::kMinLength) {
+ // The left string contains at least one character.
+ return BuildCreateConsString(length, left, right, allocation_mode);
+ }
+ }
// Check if we should create a cons string.
IfBuilder if_createcons(this);
@@ -1767,80 +2132,20 @@ HValue* HGraphBuilder::BuildUncheckedStringAdd(HValue* left,
length, Add<HConstant>(ConsString::kMinLength), Token::GTE);
if_createcons.Then();
{
- // Allocate the cons string object. HAllocate does not care whether we
- // pass CONS_STRING_TYPE or CONS_ASCII_STRING_TYPE here, so we just use
- // CONS_STRING_TYPE here. Below we decide whether the cons string is
- // one-byte or two-byte and set the appropriate map.
- HAllocate* string = Add<HAllocate>(Add<HConstant>(ConsString::kSize),
- HType::String(), pretenure_flag,
- CONS_STRING_TYPE);
-
- // Compute the intersection of instance types.
- HValue* anded_instance_types = AddUncasted<HBitwise>(
- Token::BIT_AND, left_instance_type, right_instance_type);
-
- // We create a one-byte cons string if
- // 1. both strings are one-byte, or
- // 2. at least one of the strings is two-byte, but happens to contain only
- // one-byte characters.
- // To do this, we check
- // 1. if both strings are one-byte, or if the one-byte data hint is set in
- // both strings, or
- // 2. if one of the strings has the one-byte data hint set and the other
- // string is one-byte.
- IfBuilder if_onebyte(this);
- STATIC_ASSERT(kOneByteStringTag != 0);
- STATIC_ASSERT(kOneByteDataHintMask != 0);
- if_onebyte.If<HCompareNumericAndBranch>(
- AddUncasted<HBitwise>(
- Token::BIT_AND, anded_instance_types,
- Add<HConstant>(static_cast<int32_t>(
- kStringEncodingMask | kOneByteDataHintMask))),
- graph()->GetConstant0(), Token::NE);
- if_onebyte.Or();
- STATIC_ASSERT(kOneByteStringTag != 0 &&
- kOneByteDataHintTag != 0 &&
- kOneByteDataHintTag != kOneByteStringTag);
- if_onebyte.If<HCompareNumericAndBranch>(
- AddUncasted<HBitwise>(
- Token::BIT_AND, xored_instance_types,
- Add<HConstant>(static_cast<int32_t>(
- kOneByteStringTag | kOneByteDataHintTag))),
- Add<HConstant>(static_cast<int32_t>(
- kOneByteStringTag | kOneByteDataHintTag)), Token::EQ);
- if_onebyte.Then();
- {
- // We can safely skip the write barrier for storing the map here.
- Handle<Map> map = isolate()->factory()->cons_ascii_string_map();
- AddStoreMapConstantNoWriteBarrier(string, map);
- }
- if_onebyte.Else();
- {
- // We can safely skip the write barrier for storing the map here.
- Handle<Map> map = isolate()->factory()->cons_string_map();
- AddStoreMapConstantNoWriteBarrier(string, map);
- }
- if_onebyte.End();
-
- // Initialize the cons string fields.
- Add<HStoreNamedField>(string, HObjectAccess::ForStringHashField(),
- Add<HConstant>(String::kEmptyHashField));
- Add<HStoreNamedField>(string, HObjectAccess::ForStringLength(), length);
- Add<HStoreNamedField>(string, HObjectAccess::ForConsStringFirst(), left);
- Add<HStoreNamedField>(string, HObjectAccess::ForConsStringSecond(),
- right);
-
- // Count the native string addition.
- AddIncrementCounter(isolate()->counters()->string_add_native());
-
- // Cons string is result.
- Push(string);
+ // Create a cons string.
+ Push(BuildCreateConsString(length, left, right, allocation_mode));
}
if_createcons.Else();
{
- // Compute union of instance types.
+ // Determine the string instance types.
+ HValue* left_instance_type = AddLoadStringInstanceType(left);
+ HValue* right_instance_type = AddLoadStringInstanceType(right);
+
+ // Compute union and difference of instance types.
HValue* ored_instance_types = AddUncasted<HBitwise>(
Token::BIT_OR, left_instance_type, right_instance_type);
+ HValue* xored_instance_types = AddUncasted<HBitwise>(
+ Token::BIT_XOR, left_instance_type, right_instance_type);
// Check if both strings have the same encoding and both are
// sequential.
@@ -1859,7 +2164,12 @@ HValue* HGraphBuilder::BuildUncheckedStringAdd(HValue* left,
graph()->GetConstant0(), Token::EQ);
if_sameencodingandsequential.Then();
{
- // Check if the result is a one-byte string.
+ HConstant* string_map =
+ Add<HConstant>(isolate()->factory()->string_map());
+ HConstant* ascii_string_map =
+ Add<HConstant>(isolate()->factory()->ascii_string_map());
+
+ // Determine map and size depending on whether result is one-byte string.
IfBuilder if_onebyte(this);
STATIC_ASSERT(kOneByteStringTag != 0);
if_onebyte.If<HCompareNumericAndBranch>(
@@ -1869,99 +2179,85 @@ HValue* HGraphBuilder::BuildUncheckedStringAdd(HValue* left,
graph()->GetConstant0(), Token::NE);
if_onebyte.Then();
{
- // Calculate the number of bytes needed for the characters in the
- // string while observing object alignment.
- HValue* size = BuildSeqStringSizeFor(
- length, String::ONE_BYTE_ENCODING);
-
- // Allocate the ASCII string object.
- Handle<Map> map = isolate()->factory()->ascii_string_map();
- HAllocate* string = Add<HAllocate>(size, HType::String(),
- pretenure_flag, ASCII_STRING_TYPE);
- string->set_known_initial_map(map);
-
- // We can safely skip the write barrier for storing map here.
- AddStoreMapConstantNoWriteBarrier(string, map);
-
- // Length must be stored into the string before we copy characters to
- // make debug verification code happy.
- Add<HStoreNamedField>(string, HObjectAccess::ForStringLength(),
- length);
-
- // Copy bytes from the left string.
- BuildCopySeqStringChars(
- left, graph()->GetConstant0(), String::ONE_BYTE_ENCODING,
- string, graph()->GetConstant0(), String::ONE_BYTE_ENCODING,
- left_length);
+ // Allocate sequential one-byte string object.
+ Push(length);
+ Push(ascii_string_map);
+ }
+ if_onebyte.Else();
+ {
+ // Allocate sequential two-byte string object.
+ HValue* size = AddUncasted<HShl>(length, graph()->GetConstant1());
+ size->ClearFlag(HValue::kCanOverflow);
+ size->SetFlag(HValue::kUint32);
+ Push(size);
+ Push(string_map);
+ }
+ if_onebyte.End();
+ HValue* map = Pop();
- // Copy bytes from the right string.
- BuildCopySeqStringChars(
- right, graph()->GetConstant0(), String::ONE_BYTE_ENCODING,
- string, left_length, String::ONE_BYTE_ENCODING,
- right_length);
+ // Calculate the number of bytes needed for the characters in the
+ // string while observing object alignment.
+ STATIC_ASSERT((SeqString::kHeaderSize & kObjectAlignmentMask) == 0);
+ HValue* size = BuildObjectSizeAlignment(Pop(), SeqString::kHeaderSize);
- // Count the native string addition.
- AddIncrementCounter(isolate()->counters()->string_add_native());
+ // Allocate the string object. HAllocate does not care whether we pass
+ // STRING_TYPE or ASCII_STRING_TYPE here, so we just use STRING_TYPE here.
+ HAllocate* result = BuildAllocate(
+ size, HType::String(), STRING_TYPE, allocation_mode);
+ Add<HStoreNamedField>(result, HObjectAccess::ForMap(), map);
- // Return the string.
- Push(string);
- }
- if_onebyte.Else();
+ // Initialize the string fields.
+ Add<HStoreNamedField>(result, HObjectAccess::ForStringHashField(),
+ Add<HConstant>(String::kEmptyHashField));
+ Add<HStoreNamedField>(result, HObjectAccess::ForStringLength(), length);
+
+ // Copy characters to the result string.
+ IfBuilder if_twobyte(this);
+ if_twobyte.If<HCompareObjectEqAndBranch>(map, string_map);
+ if_twobyte.Then();
{
- // Calculate the number of bytes needed for the characters in the
- // string while observing object alignment.
- HValue* size = BuildSeqStringSizeFor(
- length, String::TWO_BYTE_ENCODING);
-
- // Allocate the two-byte string object.
- Handle<Map> map = isolate()->factory()->string_map();
- HAllocate* string = Add<HAllocate>(size, HType::String(),
- pretenure_flag, STRING_TYPE);
- string->set_known_initial_map(map);
-
- // We can safely skip the write barrier for storing map here.
- AddStoreMapConstantNoWriteBarrier(string, map);
-
- // Length must be stored into the string before we copy characters to
- // make debug verification code happy.
- Add<HStoreNamedField>(string, HObjectAccess::ForStringLength(),
- length);
-
- // Copy bytes from the left string.
+ // Copy characters from the left string.
BuildCopySeqStringChars(
left, graph()->GetConstant0(), String::TWO_BYTE_ENCODING,
- string, graph()->GetConstant0(), String::TWO_BYTE_ENCODING,
+ result, graph()->GetConstant0(), String::TWO_BYTE_ENCODING,
left_length);
- // Copy bytes from the right string.
+ // Copy characters from the right string.
BuildCopySeqStringChars(
right, graph()->GetConstant0(), String::TWO_BYTE_ENCODING,
- string, left_length, String::TWO_BYTE_ENCODING,
+ result, left_length, String::TWO_BYTE_ENCODING,
right_length);
-
- // Return the string.
- Push(string);
}
- if_onebyte.End();
+ if_twobyte.Else();
+ {
+ // Copy characters from the left string.
+ BuildCopySeqStringChars(
+ left, graph()->GetConstant0(), String::ONE_BYTE_ENCODING,
+ result, graph()->GetConstant0(), String::ONE_BYTE_ENCODING,
+ left_length);
- // Initialize the (common) string fields.
- HValue* string = Pop();
- Add<HStoreNamedField>(string, HObjectAccess::ForStringHashField(),
- Add<HConstant>(String::kEmptyHashField));
+ // Copy characters from the right string.
+ BuildCopySeqStringChars(
+ right, graph()->GetConstant0(), String::ONE_BYTE_ENCODING,
+ result, left_length, String::ONE_BYTE_ENCODING,
+ right_length);
+ }
+ if_twobyte.End();
// Count the native string addition.
AddIncrementCounter(isolate()->counters()->string_add_native());
- Push(string);
+ // Return the sequential string.
+ Push(result);
}
if_sameencodingandsequential.Else();
{
// Fallback to the runtime to add the two strings.
- Add<HPushArgument>(left);
- Add<HPushArgument>(right);
- Push(Add<HCallRuntime>(isolate()->factory()->empty_string(),
- Runtime::FunctionForId(Runtime::kStringAdd),
- 2));
+ Add<HPushArguments>(left, right);
+ Push(Add<HCallRuntime>(
+ isolate()->factory()->empty_string(),
+ Runtime::FunctionForId(Runtime::kHiddenStringAdd),
+ 2));
}
if_sameencodingandsequential.End();
}
@@ -1971,20 +2267,21 @@ HValue* HGraphBuilder::BuildUncheckedStringAdd(HValue* left,
}
-HValue* HGraphBuilder::BuildStringAdd(HValue* left,
- HValue* right,
- PretenureFlag pretenure_flag) {
- // Determine the string lengths.
- HValue* left_length = Add<HLoadNamedField>(
- left, HObjectAccess::ForStringLength());
- HValue* right_length = Add<HLoadNamedField>(
- right, HObjectAccess::ForStringLength());
+HValue* HGraphBuilder::BuildStringAdd(
+ HValue* left,
+ HValue* right,
+ HAllocationMode allocation_mode) {
+ NoObservableSideEffectsScope no_effects(this);
+
+ // Determine string lengths.
+ HValue* left_length = AddLoadStringLength(left);
+ HValue* right_length = AddLoadStringLength(right);
// Check if left string is empty.
- IfBuilder if_leftisempty(this);
- if_leftisempty.If<HCompareNumericAndBranch>(
+ IfBuilder if_leftempty(this);
+ if_leftempty.If<HCompareNumericAndBranch>(
left_length, graph()->GetConstant0(), Token::EQ);
- if_leftisempty.Then();
+ if_leftempty.Then();
{
// Count the native string addition.
AddIncrementCounter(isolate()->counters()->string_add_native());
@@ -1992,13 +2289,13 @@ HValue* HGraphBuilder::BuildStringAdd(HValue* left,
// Just return the right string.
Push(right);
}
- if_leftisempty.Else();
+ if_leftempty.Else();
{
// Check if right string is empty.
- IfBuilder if_rightisempty(this);
- if_rightisempty.If<HCompareNumericAndBranch>(
+ IfBuilder if_rightempty(this);
+ if_rightempty.If<HCompareNumericAndBranch>(
right_length, graph()->GetConstant0(), Token::EQ);
- if_rightisempty.Then();
+ if_rightempty.Then();
{
// Count the native string addition.
AddIncrementCounter(isolate()->counters()->string_add_native());
@@ -2006,14 +2303,14 @@ HValue* HGraphBuilder::BuildStringAdd(HValue* left,
// Just return the left string.
Push(left);
}
- if_rightisempty.Else();
+ if_rightempty.Else();
{
- // Concatenate the two non-empty strings.
- Push(BuildUncheckedStringAdd(left, right, pretenure_flag));
+ // Add the two non-empty strings.
+ Push(BuildUncheckedStringAdd(left, right, allocation_mode));
}
- if_rightisempty.End();
+ if_rightempty.End();
}
- if_leftisempty.End();
+ if_leftempty.End();
return Pop();
}
@@ -2025,10 +2322,12 @@ HInstruction* HGraphBuilder::BuildUncheckedMonomorphicElementAccess(
HValue* val,
bool is_js_array,
ElementsKind elements_kind,
- bool is_store,
+ PropertyAccessType access_type,
LoadKeyedHoleMode load_mode,
KeyedAccessStoreMode store_mode) {
- ASSERT(!IsExternalArrayElementsKind(elements_kind) || !is_js_array);
+ ASSERT((!IsExternalArrayElementsKind(elements_kind) &&
+ !IsFixedTypedArrayElementsKind(elements_kind)) ||
+ !is_js_array);
// No GVNFlag is necessary for ElementsKind if there is an explicit dependency
// on a HElementsTransition instruction. The flag can also be removed if the
// map to check has FAST_HOLEY_ELEMENTS, since there can be no further
@@ -2036,33 +2335,41 @@ HInstruction* HGraphBuilder::BuildUncheckedMonomorphicElementAccess(
// for FAST_ELEMENTS, since a transition to HOLEY elements won't change the
// generated store code.
if ((elements_kind == FAST_HOLEY_ELEMENTS) ||
- (elements_kind == FAST_ELEMENTS && is_store)) {
- checked_object->ClearGVNFlag(kDependsOnElementsKind);
+ (elements_kind == FAST_ELEMENTS && access_type == STORE)) {
+ checked_object->ClearDependsOnFlag(kElementsKind);
}
bool fast_smi_only_elements = IsFastSmiElementsKind(elements_kind);
bool fast_elements = IsFastObjectElementsKind(elements_kind);
HValue* elements = AddLoadElements(checked_object);
- if (is_store && (fast_elements || fast_smi_only_elements) &&
+ if (access_type == STORE && (fast_elements || fast_smi_only_elements) &&
store_mode != STORE_NO_TRANSITION_HANDLE_COW) {
HCheckMaps* check_cow_map = Add<HCheckMaps>(
- elements, isolate()->factory()->fixed_array_map(), top_info());
- check_cow_map->ClearGVNFlag(kDependsOnElementsKind);
+ elements, isolate()->factory()->fixed_array_map());
+ check_cow_map->ClearDependsOnFlag(kElementsKind);
}
HInstruction* length = NULL;
if (is_js_array) {
length = Add<HLoadNamedField>(
- checked_object, HObjectAccess::ForArrayLength(elements_kind));
+ checked_object->ActualValue(), checked_object,
+ HObjectAccess::ForArrayLength(elements_kind));
} else {
length = AddLoadFixedArrayLength(elements);
}
length->set_type(HType::Smi());
HValue* checked_key = NULL;
- if (IsExternalArrayElementsKind(elements_kind)) {
+ if (IsExternalArrayElementsKind(elements_kind) ||
+ IsFixedTypedArrayElementsKind(elements_kind)) {
+ HValue* backing_store;
+ if (IsExternalArrayElementsKind(elements_kind)) {
+ backing_store = Add<HLoadNamedField>(
+ elements, static_cast<HValue*>(NULL),
+ HObjectAccess::ForExternalArrayExternalPointer());
+ } else {
+ backing_store = elements;
+ }
if (store_mode == STORE_NO_TRANSITION_IGNORE_OUT_OF_BOUNDS) {
NoObservableSideEffectsScope no_effects(this);
- HLoadExternalArrayPointer* external_elements =
- Add<HLoadExternalArrayPointer>(elements);
IfBuilder length_checker(this);
length_checker.If<HCompareNumericAndBranch>(key, length, Token::LT);
length_checker.Then();
@@ -2071,7 +2378,7 @@ HInstruction* HGraphBuilder::BuildUncheckedMonomorphicElementAccess(
key, graph()->GetConstant0(), Token::GTE);
negative_checker.Then();
HInstruction* result = AddElementAccess(
- external_elements, key, val, bounds_check, elements_kind, is_store);
+ backing_store, key, val, bounds_check, elements_kind, access_type);
negative_checker.ElseDeopt("Negative key encountered");
negative_checker.End();
length_checker.End();
@@ -2079,11 +2386,9 @@ HInstruction* HGraphBuilder::BuildUncheckedMonomorphicElementAccess(
} else {
ASSERT(store_mode == STANDARD_STORE);
checked_key = Add<HBoundsCheck>(key, length);
- HLoadExternalArrayPointer* external_elements =
- Add<HLoadExternalArrayPointer>(elements);
return AddElementAccess(
- external_elements, checked_key, val,
- checked_object, elements_kind, is_store);
+ backing_store, checked_key, val,
+ checked_object, elements_kind, access_type);
}
}
ASSERT(fast_smi_only_elements ||
@@ -2093,48 +2398,53 @@ HInstruction* HGraphBuilder::BuildUncheckedMonomorphicElementAccess(
// In case val is stored into a fast smi array, assure that the value is a smi
// before manipulating the backing store. Otherwise the actual store may
// deopt, leaving the backing store in an invalid state.
- if (is_store && IsFastSmiElementsKind(elements_kind) &&
+ if (access_type == STORE && IsFastSmiElementsKind(elements_kind) &&
!val->type().IsSmi()) {
val = AddUncasted<HForceRepresentation>(val, Representation::Smi());
}
if (IsGrowStoreMode(store_mode)) {
NoObservableSideEffectsScope no_effects(this);
+ Representation representation = HStoreKeyed::RequiredValueRepresentation(
+ elements_kind, STORE_TO_INITIALIZED_ENTRY);
+ val = AddUncasted<HForceRepresentation>(val, representation);
elements = BuildCheckForCapacityGrow(checked_object, elements,
elements_kind, length, key,
- is_js_array);
+ is_js_array, access_type);
checked_key = key;
} else {
checked_key = Add<HBoundsCheck>(key, length);
- if (is_store && (fast_elements || fast_smi_only_elements)) {
+ if (access_type == STORE && (fast_elements || fast_smi_only_elements)) {
if (store_mode == STORE_NO_TRANSITION_HANDLE_COW) {
NoObservableSideEffectsScope no_effects(this);
elements = BuildCopyElementsOnWrite(checked_object, elements,
elements_kind, length);
} else {
HCheckMaps* check_cow_map = Add<HCheckMaps>(
- elements, isolate()->factory()->fixed_array_map(), top_info());
- check_cow_map->ClearGVNFlag(kDependsOnElementsKind);
+ elements, isolate()->factory()->fixed_array_map());
+ check_cow_map->ClearDependsOnFlag(kElementsKind);
}
}
}
return AddElementAccess(elements, checked_key, val, checked_object,
- elements_kind, is_store, load_mode);
+ elements_kind, access_type, load_mode);
}
-
HValue* HGraphBuilder::BuildAllocateArrayFromLength(
JSArrayBuilder* array_builder,
HValue* length_argument) {
if (length_argument->IsConstant() &&
HConstant::cast(length_argument)->HasSmiValue()) {
int array_length = HConstant::cast(length_argument)->Integer32Value();
- HValue* new_object = array_length == 0
- ? array_builder->AllocateEmptyArray()
- : array_builder->AllocateArray(length_argument, length_argument);
- return new_object;
+ if (array_length == 0) {
+ return array_builder->AllocateEmptyArray();
+ } else {
+ return array_builder->AllocateArray(length_argument,
+ array_length,
+ length_argument);
+ }
}
HValue* constant_zero = graph()->GetConstant0();
@@ -2164,32 +2474,62 @@ HValue* HGraphBuilder::BuildAllocateArrayFromLength(
// Figure out total size
HValue* length = Pop();
HValue* capacity = Pop();
- return array_builder->AllocateArray(capacity, length);
+ return array_builder->AllocateArray(capacity, max_alloc_length, length);
}
-HValue* HGraphBuilder::BuildAllocateElements(ElementsKind kind,
- HValue* capacity) {
- int elements_size;
- InstanceType instance_type;
- if (IsFastDoubleElementsKind(kind)) {
- elements_size = kDoubleSize;
- instance_type = FIXED_DOUBLE_ARRAY_TYPE;
- } else {
- elements_size = kPointerSize;
- instance_type = FIXED_ARRAY_TYPE;
- }
+HValue* HGraphBuilder::BuildCalculateElementsSize(ElementsKind kind,
+ HValue* capacity) {
+ int elements_size = IsFastDoubleElementsKind(kind)
+ ? kDoubleSize
+ : kPointerSize;
HConstant* elements_size_value = Add<HConstant>(elements_size);
- HValue* mul = AddUncasted<HMul>(capacity, elements_size_value);
+ HInstruction* mul = HMul::NewImul(zone(), context(),
+ capacity->ActualValue(),
+ elements_size_value);
+ AddInstruction(mul);
mul->ClearFlag(HValue::kCanOverflow);
+ STATIC_ASSERT(FixedDoubleArray::kHeaderSize == FixedArray::kHeaderSize);
+
HConstant* header_size = Add<HConstant>(FixedArray::kHeaderSize);
HValue* total_size = AddUncasted<HAdd>(mul, header_size);
total_size->ClearFlag(HValue::kCanOverflow);
+ return total_size;
+}
+
+
+HAllocate* HGraphBuilder::AllocateJSArrayObject(AllocationSiteMode mode) {
+ int base_size = JSArray::kSize;
+ if (mode == TRACK_ALLOCATION_SITE) {
+ base_size += AllocationMemento::kSize;
+ }
+ HConstant* size_in_bytes = Add<HConstant>(base_size);
+ return Add<HAllocate>(
+ size_in_bytes, HType::JSArray(), NOT_TENURED, JS_OBJECT_TYPE);
+}
+
+
+HConstant* HGraphBuilder::EstablishElementsAllocationSize(
+ ElementsKind kind,
+ int capacity) {
+ int base_size = IsFastDoubleElementsKind(kind)
+ ? FixedDoubleArray::SizeFor(capacity)
+ : FixedArray::SizeFor(capacity);
+
+ return Add<HConstant>(base_size);
+}
+
- return Add<HAllocate>(total_size, HType::JSArray(),
- isolate()->heap()->GetPretenureMode(), instance_type);
+HAllocate* HGraphBuilder::BuildAllocateElements(ElementsKind kind,
+ HValue* size_in_bytes) {
+ InstanceType instance_type = IsFastDoubleElementsKind(kind)
+ ? FIXED_DOUBLE_ARRAY_TYPE
+ : FIXED_ARRAY_TYPE;
+
+ return Add<HAllocate>(size_in_bytes, HType::HeapObject(), NOT_TENURED,
+ instance_type);
}
@@ -2201,7 +2541,7 @@ void HGraphBuilder::BuildInitializeElementsHeader(HValue* elements,
? factory->fixed_double_array_map()
: factory->fixed_array_map();
- AddStoreMapConstant(elements, map);
+ Add<HStoreNamedField>(elements, HObjectAccess::ForMap(), Add<HConstant>(map));
Add<HStoreNamedField>(elements, HObjectAccess::ForFixedArrayLength(),
capacity);
}
@@ -2213,43 +2553,39 @@ HValue* HGraphBuilder::BuildAllocateElementsAndInitializeElementsHeader(
// The HForceRepresentation is to prevent possible deopt on int-smi
// conversion after allocation but before the new object fields are set.
capacity = AddUncasted<HForceRepresentation>(capacity, Representation::Smi());
- HValue* new_elements = BuildAllocateElements(kind, capacity);
+ HValue* size_in_bytes = BuildCalculateElementsSize(kind, capacity);
+ HValue* new_elements = BuildAllocateElements(kind, size_in_bytes);
BuildInitializeElementsHeader(new_elements, kind, capacity);
return new_elements;
}
-HInnerAllocatedObject* HGraphBuilder::BuildJSArrayHeader(HValue* array,
- HValue* array_map,
- AllocationSiteMode mode,
- ElementsKind elements_kind,
- HValue* allocation_site_payload,
- HValue* length_field) {
-
+void HGraphBuilder::BuildJSArrayHeader(HValue* array,
+ HValue* array_map,
+ HValue* elements,
+ AllocationSiteMode mode,
+ ElementsKind elements_kind,
+ HValue* allocation_site_payload,
+ HValue* length_field) {
Add<HStoreNamedField>(array, HObjectAccess::ForMap(), array_map);
HConstant* empty_fixed_array =
Add<HConstant>(isolate()->factory()->empty_fixed_array());
- HObjectAccess access = HObjectAccess::ForPropertiesPointer();
- Add<HStoreNamedField>(array, access, empty_fixed_array);
- Add<HStoreNamedField>(array, HObjectAccess::ForArrayLength(elements_kind),
- length_field);
+ Add<HStoreNamedField>(
+ array, HObjectAccess::ForPropertiesPointer(), empty_fixed_array);
+
+ Add<HStoreNamedField>(
+ array, HObjectAccess::ForElementsPointer(),
+ elements != NULL ? elements : empty_fixed_array);
+
+ Add<HStoreNamedField>(
+ array, HObjectAccess::ForArrayLength(elements_kind), length_field);
if (mode == TRACK_ALLOCATION_SITE) {
BuildCreateAllocationMemento(
array, Add<HConstant>(JSArray::kSize), allocation_site_payload);
}
-
- int elements_location = JSArray::kSize;
- if (mode == TRACK_ALLOCATION_SITE) {
- elements_location += AllocationMemento::kSize;
- }
-
- HInnerAllocatedObject* elements = Add<HInnerAllocatedObject>(
- array, Add<HConstant>(elements_location));
- Add<HStoreNamedField>(array, HObjectAccess::ForElementsPointer(), elements);
- return elements;
}
@@ -2259,36 +2595,57 @@ HInstruction* HGraphBuilder::AddElementAccess(
HValue* val,
HValue* dependency,
ElementsKind elements_kind,
- bool is_store,
+ PropertyAccessType access_type,
LoadKeyedHoleMode load_mode) {
- if (is_store) {
+ if (access_type == STORE) {
ASSERT(val != NULL);
- if (elements_kind == EXTERNAL_PIXEL_ELEMENTS) {
+ if (elements_kind == EXTERNAL_UINT8_CLAMPED_ELEMENTS ||
+ elements_kind == UINT8_CLAMPED_ELEMENTS) {
val = Add<HClampToUint8>(val);
}
- return Add<HStoreKeyed>(elements, checked_key, val, elements_kind);
+ return Add<HStoreKeyed>(elements, checked_key, val, elements_kind,
+ STORE_TO_INITIALIZED_ENTRY);
}
- ASSERT(!is_store);
+ ASSERT(access_type == LOAD);
ASSERT(val == NULL);
HLoadKeyed* load = Add<HLoadKeyed>(
elements, checked_key, dependency, elements_kind, load_mode);
if (FLAG_opt_safe_uint32_operations &&
- elements_kind == EXTERNAL_UNSIGNED_INT_ELEMENTS) {
+ (elements_kind == EXTERNAL_UINT32_ELEMENTS ||
+ elements_kind == UINT32_ELEMENTS)) {
graph()->RecordUint32Instruction(load);
}
return load;
}
-HLoadNamedField* HGraphBuilder::AddLoadElements(HValue* object) {
- return Add<HLoadNamedField>(object, HObjectAccess::ForElementsPointer());
+HLoadNamedField* HGraphBuilder::AddLoadMap(HValue* object,
+ HValue* dependency) {
+ return Add<HLoadNamedField>(object, dependency, HObjectAccess::ForMap());
}
-HLoadNamedField* HGraphBuilder::AddLoadFixedArrayLength(HValue* object) {
- return Add<HLoadNamedField>(object,
- HObjectAccess::ForFixedArrayLength());
+HLoadNamedField* HGraphBuilder::AddLoadElements(HValue* object,
+ HValue* dependency) {
+ return Add<HLoadNamedField>(
+ object, dependency, HObjectAccess::ForElementsPointer());
+}
+
+
+HLoadNamedField* HGraphBuilder::AddLoadFixedArrayLength(
+ HValue* array,
+ HValue* dependency) {
+ return Add<HLoadNamedField>(
+ array, dependency, HObjectAccess::ForFixedArrayLength());
+}
+
+
+HLoadNamedField* HGraphBuilder::AddLoadArrayLength(HValue* array,
+ ElementsKind kind,
+ HValue* dependency) {
+ return Add<HLoadNamedField>(
+ array, dependency, HObjectAccess::ForArrayLength(kind));
}
@@ -2308,31 +2665,21 @@ HValue* HGraphBuilder::BuildNewElementsCapacity(HValue* old_capacity) {
}
-void HGraphBuilder::BuildNewSpaceArrayCheck(HValue* length, ElementsKind kind) {
- Heap* heap = isolate()->heap();
- int element_size = IsFastDoubleElementsKind(kind) ? kDoubleSize
- : kPointerSize;
- int max_size = heap->MaxRegularSpaceAllocationSize() / element_size;
- max_size -= JSArray::kSize / element_size;
- HConstant* max_size_constant = Add<HConstant>(max_size);
- Add<HBoundsCheck>(length, max_size_constant);
-}
-
-
HValue* HGraphBuilder::BuildGrowElementsCapacity(HValue* object,
HValue* elements,
ElementsKind kind,
ElementsKind new_kind,
HValue* length,
HValue* new_capacity) {
- BuildNewSpaceArrayCheck(new_capacity, new_kind);
+ Add<HBoundsCheck>(new_capacity, Add<HConstant>(
+ (Page::kMaxRegularHeapObjectSize - FixedArray::kHeaderSize) >>
+ ElementsKindToShiftSize(kind)));
HValue* new_elements = BuildAllocateElementsAndInitializeElementsHeader(
new_kind, new_capacity);
- BuildCopyElements(elements, kind,
- new_elements, new_kind,
- length, new_capacity);
+ BuildCopyElements(elements, kind, new_elements,
+ new_kind, length, new_capacity);
Add<HStoreNamedField>(object, HObjectAccess::ForElementsPointer(),
new_elements);
@@ -2341,28 +2688,24 @@ HValue* HGraphBuilder::BuildGrowElementsCapacity(HValue* object,
}
-void HGraphBuilder::BuildFillElementsWithHole(HValue* elements,
- ElementsKind elements_kind,
- HValue* from,
- HValue* to) {
- // Fast elements kinds need to be initialized in case statements below cause
- // a garbage collection.
- Factory* factory = isolate()->factory();
-
- double nan_double = FixedDoubleArray::hole_nan_as_double();
- HValue* hole = IsFastSmiOrObjectElementsKind(elements_kind)
- ? Add<HConstant>(factory->the_hole_value())
- : Add<HConstant>(nan_double);
+void HGraphBuilder::BuildFillElementsWithValue(HValue* elements,
+ ElementsKind elements_kind,
+ HValue* from,
+ HValue* to,
+ HValue* value) {
+ if (to == NULL) {
+ to = AddLoadFixedArrayLength(elements);
+ }
// Special loop unfolding case
- static const int kLoopUnfoldLimit = 8;
- STATIC_ASSERT(JSArray::kPreallocatedArrayElements <= kLoopUnfoldLimit);
+ STATIC_ASSERT(JSArray::kPreallocatedArrayElements <=
+ kElementLoopUnrollThreshold);
int initial_capacity = -1;
if (from->IsInteger32Constant() && to->IsInteger32Constant()) {
int constant_from = from->GetInteger32Constant();
int constant_to = to->GetInteger32Constant();
- if (constant_from == 0 && constant_to <= kLoopUnfoldLimit) {
+ if (constant_from == 0 && constant_to <= kElementLoopUnrollThreshold) {
initial_capacity = constant_to;
}
}
@@ -2376,154 +2719,231 @@ void HGraphBuilder::BuildFillElementsWithHole(HValue* elements,
if (initial_capacity >= 0) {
for (int i = 0; i < initial_capacity; i++) {
HInstruction* key = Add<HConstant>(i);
- Add<HStoreKeyed>(elements, key, hole, elements_kind);
+ Add<HStoreKeyed>(elements, key, value, elements_kind);
}
} else {
- LoopBuilder builder(this, context(), LoopBuilder::kPostIncrement);
+ // Carefully loop backwards so that the "from" remains live through the loop
+ // rather than the to. This often corresponds to keeping length live rather
+ // then capacity, which helps register allocation, since length is used more
+ // other than capacity after filling with holes.
+ LoopBuilder builder(this, context(), LoopBuilder::kPostDecrement);
+
+ HValue* key = builder.BeginBody(to, from, Token::GT);
- HValue* key = builder.BeginBody(from, to, Token::LT);
+ HValue* adjusted_key = AddUncasted<HSub>(key, graph()->GetConstant1());
+ adjusted_key->ClearFlag(HValue::kCanOverflow);
- Add<HStoreKeyed>(elements, key, hole, elements_kind);
+ Add<HStoreKeyed>(elements, adjusted_key, value, elements_kind);
builder.EndBody();
}
}
+void HGraphBuilder::BuildFillElementsWithHole(HValue* elements,
+ ElementsKind elements_kind,
+ HValue* from,
+ HValue* to) {
+ // Fast elements kinds need to be initialized in case statements below cause a
+ // garbage collection.
+ Factory* factory = isolate()->factory();
+
+ double nan_double = FixedDoubleArray::hole_nan_as_double();
+ HValue* hole = IsFastSmiOrObjectElementsKind(elements_kind)
+ ? Add<HConstant>(factory->the_hole_value())
+ : Add<HConstant>(nan_double);
+
+ BuildFillElementsWithValue(elements, elements_kind, from, to, hole);
+}
+
+
void HGraphBuilder::BuildCopyElements(HValue* from_elements,
ElementsKind from_elements_kind,
HValue* to_elements,
ElementsKind to_elements_kind,
HValue* length,
HValue* capacity) {
- bool pre_fill_with_holes =
- IsFastDoubleElementsKind(from_elements_kind) &&
- IsFastObjectElementsKind(to_elements_kind);
+ int constant_capacity = -1;
+ if (capacity != NULL &&
+ capacity->IsConstant() &&
+ HConstant::cast(capacity)->HasInteger32Value()) {
+ int constant_candidate = HConstant::cast(capacity)->Integer32Value();
+ if (constant_candidate <= kElementLoopUnrollThreshold) {
+ constant_capacity = constant_candidate;
+ }
+ }
+ bool pre_fill_with_holes =
+ IsFastDoubleElementsKind(from_elements_kind) &&
+ IsFastObjectElementsKind(to_elements_kind);
if (pre_fill_with_holes) {
// If the copy might trigger a GC, make sure that the FixedArray is
- // pre-initialized with holes to make sure that it's always in a consistent
- // state.
+ // pre-initialized with holes to make sure that it's always in a
+ // consistent state.
BuildFillElementsWithHole(to_elements, to_elements_kind,
- graph()->GetConstant0(), capacity);
+ graph()->GetConstant0(), NULL);
}
- LoopBuilder builder(this, context(), LoopBuilder::kPostIncrement);
+ if (constant_capacity != -1) {
+ // Unroll the loop for small elements kinds.
+ for (int i = 0; i < constant_capacity; i++) {
+ HValue* key_constant = Add<HConstant>(i);
+ HInstruction* value = Add<HLoadKeyed>(from_elements, key_constant,
+ static_cast<HValue*>(NULL),
+ from_elements_kind);
+ Add<HStoreKeyed>(to_elements, key_constant, value, to_elements_kind);
+ }
+ } else {
+ if (!pre_fill_with_holes &&
+ (capacity == NULL || !length->Equals(capacity))) {
+ BuildFillElementsWithHole(to_elements, to_elements_kind,
+ length, NULL);
+ }
- HValue* key = builder.BeginBody(graph()->GetConstant0(), length, Token::LT);
+ if (capacity == NULL) {
+ capacity = AddLoadFixedArrayLength(to_elements);
+ }
- HValue* element = Add<HLoadKeyed>(from_elements, key,
- static_cast<HValue*>(NULL),
- from_elements_kind,
- ALLOW_RETURN_HOLE);
+ LoopBuilder builder(this, context(), LoopBuilder::kPostDecrement);
- ElementsKind kind = (IsHoleyElementsKind(from_elements_kind) &&
- IsFastSmiElementsKind(to_elements_kind))
+ HValue* key = builder.BeginBody(length, graph()->GetConstant0(),
+ Token::GT);
+
+ key = AddUncasted<HSub>(key, graph()->GetConstant1());
+ key->ClearFlag(HValue::kCanOverflow);
+
+ HValue* element = Add<HLoadKeyed>(from_elements, key,
+ static_cast<HValue*>(NULL),
+ from_elements_kind,
+ ALLOW_RETURN_HOLE);
+
+ ElementsKind kind = (IsHoleyElementsKind(from_elements_kind) &&
+ IsFastSmiElementsKind(to_elements_kind))
? FAST_HOLEY_ELEMENTS : to_elements_kind;
- if (IsHoleyElementsKind(from_elements_kind) &&
- from_elements_kind != to_elements_kind) {
- IfBuilder if_hole(this);
- if_hole.If<HCompareHoleAndBranch>(element);
- if_hole.Then();
- HConstant* hole_constant = IsFastDoubleElementsKind(to_elements_kind)
+ if (IsHoleyElementsKind(from_elements_kind) &&
+ from_elements_kind != to_elements_kind) {
+ IfBuilder if_hole(this);
+ if_hole.If<HCompareHoleAndBranch>(element);
+ if_hole.Then();
+ HConstant* hole_constant = IsFastDoubleElementsKind(to_elements_kind)
? Add<HConstant>(FixedDoubleArray::hole_nan_as_double())
: graph()->GetConstantHole();
- Add<HStoreKeyed>(to_elements, key, hole_constant, kind);
- if_hole.Else();
- HStoreKeyed* store = Add<HStoreKeyed>(to_elements, key, element, kind);
- store->SetFlag(HValue::kAllowUndefinedAsNaN);
- if_hole.End();
- } else {
- HStoreKeyed* store = Add<HStoreKeyed>(to_elements, key, element, kind);
- store->SetFlag(HValue::kAllowUndefinedAsNaN);
+ Add<HStoreKeyed>(to_elements, key, hole_constant, kind);
+ if_hole.Else();
+ HStoreKeyed* store = Add<HStoreKeyed>(to_elements, key, element, kind);
+ store->SetFlag(HValue::kAllowUndefinedAsNaN);
+ if_hole.End();
+ } else {
+ HStoreKeyed* store = Add<HStoreKeyed>(to_elements, key, element, kind);
+ store->SetFlag(HValue::kAllowUndefinedAsNaN);
+ }
+
+ builder.EndBody();
}
- builder.EndBody();
+ Counters* counters = isolate()->counters();
+ AddIncrementCounter(counters->inlined_copied_elements());
+}
- if (!pre_fill_with_holes && length != capacity) {
- // Fill unused capacity with the hole.
- BuildFillElementsWithHole(to_elements, to_elements_kind,
- key, capacity);
- }
+
+HValue* HGraphBuilder::BuildCloneShallowArrayCow(HValue* boilerplate,
+ HValue* allocation_site,
+ AllocationSiteMode mode,
+ ElementsKind kind) {
+ HAllocate* array = AllocateJSArrayObject(mode);
+
+ HValue* map = AddLoadMap(boilerplate);
+ HValue* elements = AddLoadElements(boilerplate);
+ HValue* length = AddLoadArrayLength(boilerplate, kind);
+
+ BuildJSArrayHeader(array,
+ map,
+ elements,
+ mode,
+ FAST_ELEMENTS,
+ allocation_site,
+ length);
+ return array;
}
-HValue* HGraphBuilder::BuildCloneShallowArray(HValue* boilerplate,
- HValue* allocation_site,
- AllocationSiteMode mode,
- ElementsKind kind,
- int length) {
- NoObservableSideEffectsScope no_effects(this);
+HValue* HGraphBuilder::BuildCloneShallowArrayEmpty(HValue* boilerplate,
+ HValue* allocation_site,
+ AllocationSiteMode mode) {
+ HAllocate* array = AllocateJSArrayObject(mode);
- // All sizes here are multiples of kPointerSize.
- int size = JSArray::kSize;
- if (mode == TRACK_ALLOCATION_SITE) {
- size += AllocationMemento::kSize;
- }
+ HValue* map = AddLoadMap(boilerplate);
- HValue* size_in_bytes = Add<HConstant>(size);
- HInstruction* object = Add<HAllocate>(size_in_bytes,
- HType::JSObject(),
- NOT_TENURED,
- JS_OBJECT_TYPE);
+ BuildJSArrayHeader(array,
+ map,
+ NULL, // set elements to empty fixed array
+ mode,
+ FAST_ELEMENTS,
+ allocation_site,
+ graph()->GetConstant0());
+ return array;
+}
- // Copy the JS array part.
- for (int i = 0; i < JSArray::kSize; i += kPointerSize) {
- if ((i != JSArray::kElementsOffset) || (length == 0)) {
- HObjectAccess access = HObjectAccess::ForJSArrayOffset(i);
- Add<HStoreNamedField>(object, access,
- Add<HLoadNamedField>(boilerplate, access));
- }
- }
- // Create an allocation site info if requested.
- if (mode == TRACK_ALLOCATION_SITE) {
- BuildCreateAllocationMemento(
- object, Add<HConstant>(JSArray::kSize), allocation_site);
- }
+HValue* HGraphBuilder::BuildCloneShallowArrayNonEmpty(HValue* boilerplate,
+ HValue* allocation_site,
+ AllocationSiteMode mode,
+ ElementsKind kind) {
+ HValue* boilerplate_elements = AddLoadElements(boilerplate);
+ HValue* capacity = AddLoadFixedArrayLength(boilerplate_elements);
- if (length > 0) {
- HValue* boilerplate_elements = AddLoadElements(boilerplate);
- HValue* object_elements;
- if (IsFastDoubleElementsKind(kind)) {
- HValue* elems_size = Add<HConstant>(FixedDoubleArray::SizeFor(length));
- object_elements = Add<HAllocate>(elems_size, HType::JSArray(),
- NOT_TENURED, FIXED_DOUBLE_ARRAY_TYPE);
- } else {
- HValue* elems_size = Add<HConstant>(FixedArray::SizeFor(length));
- object_elements = Add<HAllocate>(elems_size, HType::JSArray(),
- NOT_TENURED, FIXED_ARRAY_TYPE);
- }
- Add<HStoreNamedField>(object, HObjectAccess::ForElementsPointer(),
- object_elements);
+ // Generate size calculation code here in order to make it dominate
+ // the JSArray allocation.
+ HValue* elements_size = BuildCalculateElementsSize(kind, capacity);
- // Copy the elements array header.
- for (int i = 0; i < FixedArrayBase::kHeaderSize; i += kPointerSize) {
- HObjectAccess access = HObjectAccess::ForFixedArrayHeader(i);
- Add<HStoreNamedField>(object_elements, access,
- Add<HLoadNamedField>(boilerplate_elements, access));
- }
+ // Create empty JSArray object for now, store elimination should remove
+ // redundant initialization of elements and length fields and at the same
+ // time the object will be fully prepared for GC if it happens during
+ // elements allocation.
+ HValue* result = BuildCloneShallowArrayEmpty(
+ boilerplate, allocation_site, mode);
- // Copy the elements array contents.
- // TODO(mstarzinger): Teach HGraphBuilder::BuildCopyElements to unfold
- // copying loops with constant length up to a given boundary and use this
- // helper here instead.
- for (int i = 0; i < length; i++) {
- HValue* key_constant = Add<HConstant>(i);
- HInstruction* value = Add<HLoadKeyed>(boilerplate_elements, key_constant,
- static_cast<HValue*>(NULL), kind);
- Add<HStoreKeyed>(object_elements, key_constant, value, kind);
- }
+ HAllocate* elements = BuildAllocateElements(kind, elements_size);
+
+ // This function implicitly relies on the fact that the
+ // FastCloneShallowArrayStub is called only for literals shorter than
+ // JSObject::kInitialMaxFastElementArray.
+ // Can't add HBoundsCheck here because otherwise the stub will eager a frame.
+ HConstant* size_upper_bound = EstablishElementsAllocationSize(
+ kind, JSObject::kInitialMaxFastElementArray);
+ elements->set_size_upper_bound(size_upper_bound);
+
+ Add<HStoreNamedField>(result, HObjectAccess::ForElementsPointer(), elements);
+
+ // The allocation for the cloned array above causes register pressure on
+ // machines with low register counts. Force a reload of the boilerplate
+ // elements here to free up a register for the allocation to avoid unnecessary
+ // spillage.
+ boilerplate_elements = AddLoadElements(boilerplate);
+ boilerplate_elements->SetFlag(HValue::kCantBeReplaced);
+
+ // Copy the elements array header.
+ for (int i = 0; i < FixedArrayBase::kHeaderSize; i += kPointerSize) {
+ HObjectAccess access = HObjectAccess::ForFixedArrayHeader(i);
+ Add<HStoreNamedField>(elements, access,
+ Add<HLoadNamedField>(boilerplate_elements,
+ static_cast<HValue*>(NULL), access));
}
- return object;
+ // And the result of the length
+ HValue* length = AddLoadArrayLength(boilerplate, kind);
+ Add<HStoreNamedField>(result, HObjectAccess::ForArrayLength(kind), length);
+
+ BuildCopyElements(boilerplate_elements, kind, elements,
+ kind, length, NULL);
+ return result;
}
void HGraphBuilder::BuildCompareNil(
HValue* value,
- Handle<Type> type,
+ Type* type,
HIfContinuation* continuation) {
IfBuilder if_nil(this);
bool some_case_handled = false;
@@ -2563,7 +2983,7 @@ void HGraphBuilder::BuildCompareNil(
// the monomorphic map when the code is used as a template to generate a
// new IC. For optimized functions, there is no sentinel map, the map
// emitted below is the actual monomorphic map.
- BuildCheckMap(value, type->Classes().Current());
+ Add<HCheckMaps>(value, type->Classes().Current());
} else {
if_nil.Deopt("Too many undetectable types");
}
@@ -2579,7 +2999,7 @@ void HGraphBuilder::BuildCreateAllocationMemento(
HValue* allocation_site) {
ASSERT(allocation_site != NULL);
HInnerAllocatedObject* allocation_memento = Add<HInnerAllocatedObject>(
- previous_object, previous_object_size);
+ previous_object, previous_object_size, HType::HeapObject());
AddStoreMapConstant(
allocation_memento, isolate()->factory()->allocation_memento_map());
Add<HStoreNamedField>(
@@ -2588,25 +3008,45 @@ void HGraphBuilder::BuildCreateAllocationMemento(
allocation_site);
if (FLAG_allocation_site_pretenuring) {
HValue* memento_create_count = Add<HLoadNamedField>(
- allocation_site, HObjectAccess::ForAllocationSiteOffset(
- AllocationSite::kMementoCreateCountOffset));
+ allocation_site, static_cast<HValue*>(NULL),
+ HObjectAccess::ForAllocationSiteOffset(
+ AllocationSite::kPretenureCreateCountOffset));
memento_create_count = AddUncasted<HAdd>(
memento_create_count, graph()->GetConstant1());
- HStoreNamedField* store = Add<HStoreNamedField>(
+ // This smi value is reset to zero after every gc, overflow isn't a problem
+ // since the counter is bounded by the new space size.
+ memento_create_count->ClearFlag(HValue::kCanOverflow);
+ Add<HStoreNamedField>(
allocation_site, HObjectAccess::ForAllocationSiteOffset(
- AllocationSite::kMementoCreateCountOffset), memento_create_count);
- // No write barrier needed to store a smi.
- store->SkipWriteBarrier();
+ AllocationSite::kPretenureCreateCountOffset), memento_create_count);
}
}
-HInstruction* HGraphBuilder::BuildGetNativeContext() {
+HInstruction* HGraphBuilder::BuildGetNativeContext(HValue* closure) {
// Get the global context, then the native context
- HInstruction* global_object = Add<HGlobalObject>();
- HObjectAccess access = HObjectAccess::ForJSObjectOffset(
+ HInstruction* context =
+ Add<HLoadNamedField>(closure, static_cast<HValue*>(NULL),
+ HObjectAccess::ForFunctionContextPointer());
+ HInstruction* global_object = Add<HLoadNamedField>(
+ context, static_cast<HValue*>(NULL),
+ HObjectAccess::ForContextSlot(Context::GLOBAL_OBJECT_INDEX));
+ HObjectAccess access = HObjectAccess::ForObservableJSObjectOffset(
GlobalObject::kNativeContextOffset);
- return Add<HLoadNamedField>(global_object, access);
+ return Add<HLoadNamedField>(
+ global_object, static_cast<HValue*>(NULL), access);
+}
+
+
+HInstruction* HGraphBuilder::BuildGetNativeContext() {
+ // Get the global context, then the native context
+ HValue* global_object = Add<HLoadNamedField>(
+ context(), static_cast<HValue*>(NULL),
+ HObjectAccess::ForContextSlot(Context::GLOBAL_OBJECT_INDEX));
+ return Add<HLoadNamedField>(
+ global_object, static_cast<HValue*>(NULL),
+ HObjectAccess::ForObservableJSObjectOffset(
+ GlobalObject::kNativeContextOffset));
}
@@ -2628,6 +3068,9 @@ HGraphBuilder::JSArrayBuilder::JSArrayBuilder(HGraphBuilder* builder,
kind_(kind),
allocation_site_payload_(allocation_site_payload),
constructor_function_(constructor_function) {
+ ASSERT(!allocation_site_payload->IsConstant() ||
+ HConstant::cast(allocation_site_payload)->handle(
+ builder_->isolate())->IsAllocationSite());
mode_ = override_mode == DISABLE_ALLOCATION_SITES
? DONT_TRACK_ALLOCATION_SITE
: AllocationSite::GetMode(kind);
@@ -2657,10 +3100,16 @@ HValue* HGraphBuilder::JSArrayBuilder::EmitMapCode() {
// No need for a context lookup if the kind_ matches the initial
// map, because we can just load the map in that case.
HObjectAccess access = HObjectAccess::ForPrototypeOrInitialMap();
- return builder()->AddLoadNamedField(constructor_function_, access);
+ return builder()->Add<HLoadNamedField>(
+ constructor_function_, static_cast<HValue*>(NULL), access);
}
- HInstruction* native_context = builder()->BuildGetNativeContext();
+ // TODO(mvstanton): we should always have a constructor function if we
+ // are creating a stub.
+ HInstruction* native_context = constructor_function_ != NULL
+ ? builder()->BuildGetNativeContext(constructor_function_)
+ : builder()->BuildGetNativeContext();
+
HInstruction* index = builder()->Add<HConstant>(
static_cast<int32_t>(Context::JS_ARRAY_MAPS_INDEX));
@@ -2677,71 +3126,52 @@ HValue* HGraphBuilder::JSArrayBuilder::EmitMapCode() {
HValue* HGraphBuilder::JSArrayBuilder::EmitInternalMapCode() {
// Find the map near the constructor function
HObjectAccess access = HObjectAccess::ForPrototypeOrInitialMap();
- return builder()->AddLoadNamedField(constructor_function_, access);
+ return builder()->Add<HLoadNamedField>(
+ constructor_function_, static_cast<HValue*>(NULL), access);
}
-HValue* HGraphBuilder::JSArrayBuilder::EstablishAllocationSize(
- HValue* length_node) {
- ASSERT(length_node != NULL);
-
- int base_size = JSArray::kSize;
- if (mode_ == TRACK_ALLOCATION_SITE) {
- base_size += AllocationMemento::kSize;
- }
-
- STATIC_ASSERT(FixedDoubleArray::kHeaderSize == FixedArray::kHeaderSize);
- base_size += FixedArray::kHeaderSize;
-
- HInstruction* elements_size_value =
- builder()->Add<HConstant>(elements_size());
- HInstruction* mul = HMul::NewImul(builder()->zone(), builder()->context(),
- length_node, elements_size_value);
- builder()->AddInstruction(mul);
- HInstruction* base = builder()->Add<HConstant>(base_size);
- HInstruction* total_size = HAdd::New(builder()->zone(), builder()->context(),
- base, mul);
- total_size->ClearFlag(HValue::kCanOverflow);
- builder()->AddInstruction(total_size);
- return total_size;
+HAllocate* HGraphBuilder::JSArrayBuilder::AllocateEmptyArray() {
+ HConstant* capacity = builder()->Add<HConstant>(initial_capacity());
+ return AllocateArray(capacity,
+ capacity,
+ builder()->graph()->GetConstant0());
}
-HValue* HGraphBuilder::JSArrayBuilder::EstablishEmptyArrayAllocationSize() {
- int base_size = JSArray::kSize;
- if (mode_ == TRACK_ALLOCATION_SITE) {
- base_size += AllocationMemento::kSize;
- }
-
- base_size += IsFastDoubleElementsKind(kind_)
- ? FixedDoubleArray::SizeFor(initial_capacity())
- : FixedArray::SizeFor(initial_capacity());
-
- return builder()->Add<HConstant>(base_size);
+HAllocate* HGraphBuilder::JSArrayBuilder::AllocateArray(
+ HValue* capacity,
+ HConstant* capacity_upper_bound,
+ HValue* length_field,
+ FillMode fill_mode) {
+ return AllocateArray(capacity,
+ capacity_upper_bound->GetInteger32Constant(),
+ length_field,
+ fill_mode);
}
-HValue* HGraphBuilder::JSArrayBuilder::AllocateEmptyArray() {
- HValue* size_in_bytes = EstablishEmptyArrayAllocationSize();
- HConstant* capacity = builder()->Add<HConstant>(initial_capacity());
- return AllocateArray(size_in_bytes,
- capacity,
- builder()->graph()->GetConstant0());
-}
-
+HAllocate* HGraphBuilder::JSArrayBuilder::AllocateArray(
+ HValue* capacity,
+ int capacity_upper_bound,
+ HValue* length_field,
+ FillMode fill_mode) {
+ HConstant* elememts_size_upper_bound = capacity->IsInteger32Constant()
+ ? HConstant::cast(capacity)
+ : builder()->EstablishElementsAllocationSize(kind_, capacity_upper_bound);
-HValue* HGraphBuilder::JSArrayBuilder::AllocateArray(HValue* capacity,
- HValue* length_field,
- FillMode fill_mode) {
- HValue* size_in_bytes = EstablishAllocationSize(capacity);
- return AllocateArray(size_in_bytes, capacity, length_field, fill_mode);
+ HAllocate* array = AllocateArray(capacity, length_field, fill_mode);
+ if (!elements_location_->has_size_upper_bound()) {
+ elements_location_->set_size_upper_bound(elememts_size_upper_bound);
+ }
+ return array;
}
-HValue* HGraphBuilder::JSArrayBuilder::AllocateArray(HValue* size_in_bytes,
- HValue* capacity,
- HValue* length_field,
- FillMode fill_mode) {
+HAllocate* HGraphBuilder::JSArrayBuilder::AllocateArray(
+ HValue* capacity,
+ HValue* length_field,
+ FillMode fill_mode) {
// These HForceRepresentations are because we store these as fields in the
// objects we construct, and an int32-to-smi HChange could deopt. Accept
// the deopt possibility now, before allocation occurs.
@@ -2751,14 +3181,14 @@ HValue* HGraphBuilder::JSArrayBuilder::AllocateArray(HValue* size_in_bytes,
length_field =
builder()->AddUncasted<HForceRepresentation>(length_field,
Representation::Smi());
- // Allocate (dealing with failure appropriately)
- HAllocate* new_object = builder()->Add<HAllocate>(size_in_bytes,
- HType::JSArray(), NOT_TENURED, JS_ARRAY_TYPE);
- // Folded array allocation should be aligned if it has fast double elements.
- if (IsFastDoubleElementsKind(kind_)) {
- new_object->MakeDoubleAligned();
- }
+ // Generate size calculation code here in order to make it dominate
+ // the JSArray allocation.
+ HValue* elements_size =
+ builder()->BuildCalculateElementsSize(kind_, capacity);
+
+ // Allocate (dealing with failure appropriately)
+ HAllocate* array_object = builder()->AllocateJSArrayObject(mode_);
// Fill in the fields: map, properties, length
HValue* map;
@@ -2767,47 +3197,52 @@ HValue* HGraphBuilder::JSArrayBuilder::AllocateArray(HValue* size_in_bytes,
} else {
map = EmitMapCode();
}
- elements_location_ = builder()->BuildJSArrayHeader(new_object,
- map,
- mode_,
- kind_,
- allocation_site_payload_,
- length_field);
- // Initialize the elements
+ builder()->BuildJSArrayHeader(array_object,
+ map,
+ NULL, // set elements to empty fixed array
+ mode_,
+ kind_,
+ allocation_site_payload_,
+ length_field);
+
+ // Allocate and initialize the elements
+ elements_location_ = builder()->BuildAllocateElements(kind_, elements_size);
+
builder()->BuildInitializeElementsHeader(elements_location_, kind_, capacity);
+ // Set the elements
+ builder()->Add<HStoreNamedField>(
+ array_object, HObjectAccess::ForElementsPointer(), elements_location_);
+
if (fill_mode == FILL_WITH_HOLE) {
builder()->BuildFillElementsWithHole(elements_location_, kind_,
graph()->GetConstant0(), capacity);
}
- return new_object;
-}
-
-
-HStoreNamedField* HGraphBuilder::AddStoreMapConstant(HValue *object,
- Handle<Map> map) {
- return Add<HStoreNamedField>(object, HObjectAccess::ForMap(),
- Add<HConstant>(map));
+ return array_object;
}
HValue* HGraphBuilder::AddLoadJSBuiltin(Builtins::JavaScript builtin) {
- HGlobalObject* global_object = Add<HGlobalObject>();
- HObjectAccess access = HObjectAccess::ForJSObjectOffset(
+ HValue* global_object = Add<HLoadNamedField>(
+ context(), static_cast<HValue*>(NULL),
+ HObjectAccess::ForContextSlot(Context::GLOBAL_OBJECT_INDEX));
+ HObjectAccess access = HObjectAccess::ForObservableJSObjectOffset(
GlobalObject::kBuiltinsOffset);
- HValue* builtins = Add<HLoadNamedField>(global_object, access);
- HObjectAccess function_access = HObjectAccess::ForJSObjectOffset(
- JSBuiltinsObject::OffsetOfFunctionWithId(builtin));
- return Add<HLoadNamedField>(builtins, function_access);
+ HValue* builtins = Add<HLoadNamedField>(
+ global_object, static_cast<HValue*>(NULL), access);
+ HObjectAccess function_access = HObjectAccess::ForObservableJSObjectOffset(
+ JSBuiltinsObject::OffsetOfFunctionWithId(builtin));
+ return Add<HLoadNamedField>(
+ builtins, static_cast<HValue*>(NULL), function_access);
}
HOptimizedGraphBuilder::HOptimizedGraphBuilder(CompilationInfo* info)
: HGraphBuilder(info),
function_state_(NULL),
- initial_function_state_(this, info, NORMAL_RETURN),
+ initial_function_state_(this, info, NORMAL_RETURN, 0),
ast_context_(NULL),
break_scope_(NULL),
inlined_count_(0),
@@ -2818,8 +3253,8 @@ HOptimizedGraphBuilder::HOptimizedGraphBuilder(CompilationInfo* info)
// constructor for the initial state relies on function_state_ == NULL
// to know it's the initial state.
function_state_= &initial_function_state_;
- InitializeAstVisitor(info->isolate());
- if (FLAG_emit_opt_code_positions) {
+ InitializeAstVisitor(info->zone());
+ if (FLAG_hydrogen_track_positions) {
SetSourcePosition(info->shared_info()->start_position());
}
}
@@ -2888,7 +3323,8 @@ HBasicBlock* HOptimizedGraphBuilder::BuildLoopEntry(
}
-void HBasicBlock::FinishExit(HControlInstruction* instruction, int position) {
+void HBasicBlock::FinishExit(HControlInstruction* instruction,
+ HSourcePosition position) {
Finish(instruction, position);
ClearEnvironment();
}
@@ -2911,14 +3347,16 @@ HGraph::HGraph(CompilationInfo* info)
type_change_checksum_(0),
maximum_environment_size_(0),
no_side_effects_scope_count_(0),
- disallow_adding_new_values_(false) {
+ disallow_adding_new_values_(false),
+ next_inline_id_(0),
+ inlined_functions_(5, info->zone()) {
if (info->IsStub()) {
HydrogenCodeStub* stub = info->code_stub();
- CodeStubInterfaceDescriptor* descriptor =
- stub->GetInterfaceDescriptor(isolate_);
+ CodeStubInterfaceDescriptor* descriptor = stub->GetInterfaceDescriptor();
start_environment_ =
new(zone_) HEnvironment(zone_, descriptor->environment_length());
} else {
+ TraceInlinedFunction(info->shared_info(), HSourcePosition::Unknown());
start_environment_ =
new(zone_) HEnvironment(NULL, info->scope(), info->closure(), zone_);
}
@@ -2946,6 +3384,81 @@ void HGraph::FinalizeUniqueness() {
}
+int HGraph::TraceInlinedFunction(
+ Handle<SharedFunctionInfo> shared,
+ HSourcePosition position) {
+ if (!FLAG_hydrogen_track_positions) {
+ return 0;
+ }
+
+ int id = 0;
+ for (; id < inlined_functions_.length(); id++) {
+ if (inlined_functions_[id].shared().is_identical_to(shared)) {
+ break;
+ }
+ }
+
+ if (id == inlined_functions_.length()) {
+ inlined_functions_.Add(InlinedFunctionInfo(shared), zone());
+
+ if (!shared->script()->IsUndefined()) {
+ Handle<Script> script(Script::cast(shared->script()));
+ if (!script->source()->IsUndefined()) {
+ CodeTracer::Scope tracing_scope(isolate()->GetCodeTracer());
+ PrintF(tracing_scope.file(),
+ "--- FUNCTION SOURCE (%s) id{%d,%d} ---\n",
+ shared->DebugName()->ToCString().get(),
+ info()->optimization_id(),
+ id);
+
+ {
+ ConsStringIteratorOp op;
+ StringCharacterStream stream(String::cast(script->source()),
+ &op,
+ shared->start_position());
+ // fun->end_position() points to the last character in the stream. We
+ // need to compensate by adding one to calculate the length.
+ int source_len =
+ shared->end_position() - shared->start_position() + 1;
+ for (int i = 0; i < source_len; i++) {
+ if (stream.HasMore()) {
+ PrintF(tracing_scope.file(), "%c", stream.GetNext());
+ }
+ }
+ }
+
+ PrintF(tracing_scope.file(), "\n--- END ---\n");
+ }
+ }
+ }
+
+ int inline_id = next_inline_id_++;
+
+ if (inline_id != 0) {
+ CodeTracer::Scope tracing_scope(isolate()->GetCodeTracer());
+ PrintF(tracing_scope.file(), "INLINE (%s) id{%d,%d} AS %d AT ",
+ shared->DebugName()->ToCString().get(),
+ info()->optimization_id(),
+ id,
+ inline_id);
+ position.PrintTo(tracing_scope.file());
+ PrintF(tracing_scope.file(), "\n");
+ }
+
+ return inline_id;
+}
+
+
+int HGraph::SourcePositionToScriptPosition(HSourcePosition pos) {
+ if (!FLAG_hydrogen_track_positions || pos.IsUnknown()) {
+ return pos.raw();
+ }
+
+ return inlined_functions_[pos.inlining_id()].start_position() +
+ pos.position();
+}
+
+
// Block ordering was implemented with two mutually recursive methods,
// HGraph::Postorder and HGraph::PostorderLoopBlocks.
// The recursion could lead to stack overflow so the algorithm has been
@@ -3002,21 +3515,19 @@ class PostorderProcessor : public ZoneObject {
HBasicBlock* loop_header() { return loop_header_; }
static PostorderProcessor* CreateEntryProcessor(Zone* zone,
- HBasicBlock* block,
- BitVector* visited) {
+ HBasicBlock* block) {
PostorderProcessor* result = new(zone) PostorderProcessor(NULL);
- return result->SetupSuccessors(zone, block, NULL, visited);
+ return result->SetupSuccessors(zone, block, NULL);
}
PostorderProcessor* PerformStep(Zone* zone,
- BitVector* visited,
ZoneList<HBasicBlock*>* order) {
PostorderProcessor* next =
- PerformNonBacktrackingStep(zone, visited, order);
+ PerformNonBacktrackingStep(zone, order);
if (next != NULL) {
return next;
} else {
- return Backtrack(zone, visited, order);
+ return Backtrack(zone, order);
}
}
@@ -3036,9 +3547,8 @@ class PostorderProcessor : public ZoneObject {
// Each "Setup..." method is like a constructor for a cycle state.
PostorderProcessor* SetupSuccessors(Zone* zone,
HBasicBlock* block,
- HBasicBlock* loop_header,
- BitVector* visited) {
- if (block == NULL || visited->Contains(block->block_id()) ||
+ HBasicBlock* loop_header) {
+ if (block == NULL || block->IsOrdered() ||
block->parent_loop_header() != loop_header) {
kind_ = NONE;
block_ = NULL;
@@ -3048,7 +3558,7 @@ class PostorderProcessor : public ZoneObject {
} else {
block_ = block;
loop_ = NULL;
- visited->Add(block->block_id());
+ block->MarkAsOrdered();
if (block->IsLoopHeader()) {
kind_ = SUCCESSORS_OF_LOOP_HEADER;
@@ -3111,7 +3621,6 @@ class PostorderProcessor : public ZoneObject {
// This method is the basic block to walk up the stack.
PostorderProcessor* Pop(Zone* zone,
- BitVector* visited,
ZoneList<HBasicBlock*>* order) {
switch (kind_) {
case SUCCESSORS:
@@ -3138,16 +3647,15 @@ class PostorderProcessor : public ZoneObject {
// Walks up the stack.
PostorderProcessor* Backtrack(Zone* zone,
- BitVector* visited,
ZoneList<HBasicBlock*>* order) {
- PostorderProcessor* parent = Pop(zone, visited, order);
+ PostorderProcessor* parent = Pop(zone, order);
while (parent != NULL) {
PostorderProcessor* next =
- parent->PerformNonBacktrackingStep(zone, visited, order);
+ parent->PerformNonBacktrackingStep(zone, order);
if (next != NULL) {
return next;
} else {
- parent = parent->Pop(zone, visited, order);
+ parent = parent->Pop(zone, order);
}
}
return NULL;
@@ -3155,7 +3663,6 @@ class PostorderProcessor : public ZoneObject {
PostorderProcessor* PerformNonBacktrackingStep(
Zone* zone,
- BitVector* visited,
ZoneList<HBasicBlock*>* order) {
HBasicBlock* next_block;
switch (kind_) {
@@ -3163,16 +3670,14 @@ class PostorderProcessor : public ZoneObject {
next_block = AdvanceSuccessors();
if (next_block != NULL) {
PostorderProcessor* result = Push(zone);
- return result->SetupSuccessors(zone, next_block,
- loop_header_, visited);
+ return result->SetupSuccessors(zone, next_block, loop_header_);
}
break;
case SUCCESSORS_OF_LOOP_HEADER:
next_block = AdvanceSuccessors();
if (next_block != NULL) {
PostorderProcessor* result = Push(zone);
- return result->SetupSuccessors(zone, next_block,
- block(), visited);
+ return result->SetupSuccessors(zone, next_block, block());
}
break;
case LOOP_MEMBERS:
@@ -3187,8 +3692,7 @@ class PostorderProcessor : public ZoneObject {
next_block = AdvanceSuccessors();
if (next_block != NULL) {
PostorderProcessor* result = Push(zone);
- return result->SetupSuccessors(zone, next_block,
- loop_header_, visited);
+ return result->SetupSuccessors(zone, next_block, loop_header_);
}
break;
case NONE:
@@ -3243,21 +3747,36 @@ class PostorderProcessor : public ZoneObject {
void HGraph::OrderBlocks() {
CompilationPhase phase("H_Block ordering", info());
- BitVector visited(blocks_.length(), zone());
- ZoneList<HBasicBlock*> reverse_result(8, zone());
- HBasicBlock* start = blocks_[0];
- PostorderProcessor* postorder =
- PostorderProcessor::CreateEntryProcessor(zone(), start, &visited);
- while (postorder != NULL) {
- postorder = postorder->PerformStep(zone(), &visited, &reverse_result);
+#ifdef DEBUG
+ // Initially the blocks must not be ordered.
+ for (int i = 0; i < blocks_.length(); ++i) {
+ ASSERT(!blocks_[i]->IsOrdered());
}
+#endif
+
+ PostorderProcessor* postorder =
+ PostorderProcessor::CreateEntryProcessor(zone(), blocks_[0]);
blocks_.Rewind(0);
- int index = 0;
- for (int i = reverse_result.length() - 1; i >= 0; --i) {
- HBasicBlock* b = reverse_result[i];
- blocks_.Add(b, zone());
- b->set_block_id(index++);
+ while (postorder) {
+ postorder = postorder->PerformStep(zone(), &blocks_);
+ }
+
+#ifdef DEBUG
+ // Now all blocks must be marked as ordered.
+ for (int i = 0; i < blocks_.length(); ++i) {
+ ASSERT(blocks_[i]->IsOrdered());
+ }
+#endif
+
+ // Reverse block list and assign block IDs.
+ for (int i = 0, j = blocks_.length(); --j >= i; ++i) {
+ HBasicBlock* bi = blocks_[i];
+ HBasicBlock* bj = blocks_[j];
+ bi->set_block_id(j);
+ bj->set_block_id(i);
+ blocks_[i] = bj;
+ blocks_[j] = bi;
}
}
@@ -3324,7 +3843,8 @@ void HGraph::CollectPhis() {
// a (possibly inlined) function.
FunctionState::FunctionState(HOptimizedGraphBuilder* owner,
CompilationInfo* info,
- InliningKind inlining_kind)
+ InliningKind inlining_kind,
+ int inlining_id)
: owner_(owner),
compilation_info_(info),
call_context_(NULL),
@@ -3334,6 +3854,8 @@ FunctionState::FunctionState(HOptimizedGraphBuilder* owner,
entry_(NULL),
arguments_object_(NULL),
arguments_elements_(NULL),
+ inlining_id_(inlining_id),
+ outer_source_position_(HSourcePosition::Unknown()),
outer_(owner->function_state()) {
if (outer_ != NULL) {
// State for an inline function.
@@ -3357,12 +3879,27 @@ FunctionState::FunctionState(HOptimizedGraphBuilder* owner,
// Push on the state stack.
owner->set_function_state(this);
+
+ if (FLAG_hydrogen_track_positions) {
+ outer_source_position_ = owner->source_position();
+ owner->EnterInlinedSource(
+ info->shared_info()->start_position(),
+ inlining_id);
+ owner->SetSourcePosition(info->shared_info()->start_position());
+ }
}
FunctionState::~FunctionState() {
delete test_context_;
owner_->set_function_state(outer_);
+
+ if (FLAG_hydrogen_track_positions) {
+ owner_->set_source_position(outer_source_position_);
+ owner_->EnterInlinedSource(
+ outer_->compilation_info()->shared_info()->start_position(),
+ outer_->inlining_id());
+ }
}
@@ -3621,7 +4158,6 @@ void HOptimizedGraphBuilder::VisitForTypeOf(Expression* expr) {
}
-
void HOptimizedGraphBuilder::VisitForControl(Expression* expr,
HBasicBlock* true_block,
HBasicBlock* false_block) {
@@ -3630,20 +4166,6 @@ void HOptimizedGraphBuilder::VisitForControl(Expression* expr,
}
-void HOptimizedGraphBuilder::VisitArgument(Expression* expr) {
- CHECK_ALIVE(VisitForValue(expr));
- Push(Add<HPushArgument>(Pop()));
-}
-
-
-void HOptimizedGraphBuilder::VisitArgumentList(
- ZoneList<Expression*>* arguments) {
- for (int i = 0; i < arguments->length(); i++) {
- CHECK_ALIVE(VisitArgument(arguments->at(i)));
- }
-}
-
-
void HOptimizedGraphBuilder::VisitExpressions(
ZoneList<Expression*>* exprs) {
for (int i = 0; i < exprs->length(); ++i) {
@@ -3793,10 +4315,11 @@ bool HGraph::Optimize(BailoutReason* bailout_reason) {
if (FLAG_check_elimination) Run<HCheckEliminationPhase>();
- if (FLAG_use_range) Run<HRangeAnalysisPhase>();
+ if (FLAG_store_elimination) Run<HStoreEliminationPhase>();
+
+ Run<HRangeAnalysisPhase>();
Run<HComputeChangeUndefinedToNaN>();
- Run<HComputeMinusZeroChecksPhase>();
// Eliminate redundant stack checks on backwards branches.
Run<HStackCheckEliminationPhase>();
@@ -3831,7 +4354,13 @@ void HGraph::RestoreActualValues() {
for (HInstructionIterator it(block); !it.Done(); it.Advance()) {
HInstruction* instruction = it.Current();
- if (instruction->ActualValue() != instruction) {
+ if (instruction->ActualValue() == instruction) continue;
+ if (instruction->CheckFlag(HValue::kIsDead)) {
+ // The instruction was marked as deleted but left in the graph
+ // as a control flow dependency point for subsequent
+ // instructions.
+ instruction->DeleteAndReplaceWith(instruction->ActualValue());
+ } else {
ASSERT(instruction->IsInformativeDefinition());
if (instruction->IsPurelyInformativeDefinition()) {
instruction->DeleteAndReplaceWith(instruction->RedefinedOperand());
@@ -3844,17 +4373,23 @@ void HGraph::RestoreActualValues() {
}
-template <class Instruction>
-HInstruction* HOptimizedGraphBuilder::PreProcessCall(Instruction* call) {
- int count = call->argument_count();
+void HOptimizedGraphBuilder::PushArgumentsFromEnvironment(int count) {
ZoneList<HValue*> arguments(count, zone());
for (int i = 0; i < count; ++i) {
arguments.Add(Pop(), zone());
}
+ HPushArguments* push_args = New<HPushArguments>();
while (!arguments.is_empty()) {
- Add<HPushArgument>(arguments.RemoveLast());
+ push_args->AddInput(arguments.RemoveLast());
}
+ AddInstruction(push_args);
+}
+
+
+template <class Instruction>
+HInstruction* HOptimizedGraphBuilder::PreProcessCall(Instruction* call) {
+ PushArgumentsFromEnvironment(call->argument_count());
return call;
}
@@ -3911,13 +4446,52 @@ void HOptimizedGraphBuilder::VisitBlock(Block* stmt) {
ASSERT(!HasStackOverflow());
ASSERT(current_block() != NULL);
ASSERT(current_block()->HasPredecessor());
- if (stmt->scope() != NULL) {
- return Bailout(kScopedBlock);
- }
- BreakAndContinueInfo break_info(stmt);
+
+ Scope* outer_scope = scope();
+ Scope* scope = stmt->scope();
+ BreakAndContinueInfo break_info(stmt, outer_scope);
+
{ BreakAndContinueScope push(&break_info, this);
+ if (scope != NULL) {
+ // Load the function object.
+ Scope* declaration_scope = scope->DeclarationScope();
+ HInstruction* function;
+ HValue* outer_context = environment()->context();
+ if (declaration_scope->is_global_scope() ||
+ declaration_scope->is_eval_scope()) {
+ function = new(zone()) HLoadContextSlot(
+ outer_context, Context::CLOSURE_INDEX, HLoadContextSlot::kNoCheck);
+ } else {
+ function = New<HThisFunction>();
+ }
+ AddInstruction(function);
+ // Allocate a block context and store it to the stack frame.
+ HInstruction* inner_context = Add<HAllocateBlockContext>(
+ outer_context, function, scope->GetScopeInfo());
+ HInstruction* instr = Add<HStoreFrameContext>(inner_context);
+ if (instr->HasObservableSideEffects()) {
+ AddSimulate(stmt->EntryId(), REMOVABLE_SIMULATE);
+ }
+ set_scope(scope);
+ environment()->BindContext(inner_context);
+ VisitDeclarations(scope->declarations());
+ AddSimulate(stmt->DeclsId(), REMOVABLE_SIMULATE);
+ }
CHECK_BAILOUT(VisitStatements(stmt->statements()));
}
+ set_scope(outer_scope);
+ if (scope != NULL && current_block() != NULL) {
+ HValue* inner_context = environment()->context();
+ HValue* outer_context = Add<HLoadNamedField>(
+ inner_context, static_cast<HValue*>(NULL),
+ HObjectAccess::ForContextSlot(Context::PREVIOUS_INDEX));
+
+ HInstruction* instr = Add<HStoreFrameContext>(outer_context);
+ if (instr->HasObservableSideEffects()) {
+ AddSimulate(stmt->ExitId(), REMOVABLE_SIMULATE);
+ }
+ environment()->BindContext(outer_context);
+ }
HBasicBlock* break_block = break_info.break_block();
if (break_block != NULL) {
if (current_block() != NULL) Goto(break_block);
@@ -3985,6 +4559,7 @@ void HOptimizedGraphBuilder::VisitIfStatement(IfStatement* stmt) {
HBasicBlock* HOptimizedGraphBuilder::BreakAndContinueScope::Get(
BreakableStatement* stmt,
BreakType type,
+ Scope** scope,
int* drop_extra) {
*drop_extra = 0;
BreakAndContinueScope* current = this;
@@ -3993,6 +4568,7 @@ HBasicBlock* HOptimizedGraphBuilder::BreakAndContinueScope::Get(
current = current->next();
}
ASSERT(current != NULL); // Always found (unless stack is malformed).
+ *scope = current->info()->scope();
if (type == BREAK) {
*drop_extra += current->info()->drop_extra();
@@ -4026,10 +4602,29 @@ void HOptimizedGraphBuilder::VisitContinueStatement(
ASSERT(!HasStackOverflow());
ASSERT(current_block() != NULL);
ASSERT(current_block()->HasPredecessor());
+ Scope* outer_scope = NULL;
+ Scope* inner_scope = scope();
int drop_extra = 0;
HBasicBlock* continue_block = break_scope()->Get(
- stmt->target(), BreakAndContinueScope::CONTINUE, &drop_extra);
+ stmt->target(), BreakAndContinueScope::CONTINUE,
+ &outer_scope, &drop_extra);
+ HValue* context = environment()->context();
Drop(drop_extra);
+ int context_pop_count = inner_scope->ContextChainLength(outer_scope);
+ if (context_pop_count > 0) {
+ while (context_pop_count-- > 0) {
+ HInstruction* context_instruction = Add<HLoadNamedField>(
+ context, static_cast<HValue*>(NULL),
+ HObjectAccess::ForContextSlot(Context::PREVIOUS_INDEX));
+ context = context_instruction;
+ }
+ HInstruction* instr = Add<HStoreFrameContext>(context);
+ if (instr->HasObservableSideEffects()) {
+ AddSimulate(stmt->target()->EntryId(), REMOVABLE_SIMULATE);
+ }
+ environment()->BindContext(context);
+ }
+
Goto(continue_block);
set_current_block(NULL);
}
@@ -4039,10 +4634,28 @@ void HOptimizedGraphBuilder::VisitBreakStatement(BreakStatement* stmt) {
ASSERT(!HasStackOverflow());
ASSERT(current_block() != NULL);
ASSERT(current_block()->HasPredecessor());
+ Scope* outer_scope = NULL;
+ Scope* inner_scope = scope();
int drop_extra = 0;
HBasicBlock* break_block = break_scope()->Get(
- stmt->target(), BreakAndContinueScope::BREAK, &drop_extra);
+ stmt->target(), BreakAndContinueScope::BREAK,
+ &outer_scope, &drop_extra);
+ HValue* context = environment()->context();
Drop(drop_extra);
+ int context_pop_count = inner_scope->ContextChainLength(outer_scope);
+ if (context_pop_count > 0) {
+ while (context_pop_count-- > 0) {
+ HInstruction* context_instruction = Add<HLoadNamedField>(
+ context, static_cast<HValue*>(NULL),
+ HObjectAccess::ForContextSlot(Context::PREVIOUS_INDEX));
+ context = context_instruction;
+ }
+ HInstruction* instr = Add<HStoreFrameContext>(context);
+ if (instr->HasObservableSideEffects()) {
+ AddSimulate(stmt->target()->ExitId(), REMOVABLE_SIMULATE);
+ }
+ environment()->BindContext(context);
+ }
Goto(break_block);
set_current_block(NULL);
}
@@ -4108,7 +4721,12 @@ void HOptimizedGraphBuilder::VisitReturnStatement(ReturnStatement* stmt) {
TestContext* test = TestContext::cast(context);
VisitForControl(stmt->expression(), test->if_true(), test->if_false());
} else if (context->IsEffect()) {
- CHECK_ALIVE(VisitForEffect(stmt->expression()));
+ // Visit in value context and ignore the result. This is needed to keep
+ // environment in sync with full-codegen since some visitors (e.g.
+ // VisitCountOperation) use the operand stack differently depending on
+ // context.
+ CHECK_ALIVE(VisitForValue(stmt->expression()));
+ Pop();
Goto(function_return(), state);
} else {
ASSERT(context->IsValue());
@@ -4133,45 +4751,27 @@ void HOptimizedGraphBuilder::VisitSwitchStatement(SwitchStatement* stmt) {
ASSERT(current_block() != NULL);
ASSERT(current_block()->HasPredecessor());
- // We only optimize switch statements with smi-literal smi comparisons,
- // with a bounded number of clauses.
+ // We only optimize switch statements with a bounded number of clauses.
const int kCaseClauseLimit = 128;
ZoneList<CaseClause*>* clauses = stmt->cases();
int clause_count = clauses->length();
+ ZoneList<HBasicBlock*> body_blocks(clause_count, zone());
if (clause_count > kCaseClauseLimit) {
return Bailout(kSwitchStatementTooManyClauses);
}
- ASSERT(stmt->switch_type() != SwitchStatement::UNKNOWN_SWITCH);
- if (stmt->switch_type() == SwitchStatement::GENERIC_SWITCH) {
- return Bailout(kSwitchStatementMixedOrNonLiteralSwitchLabels);
- }
-
CHECK_ALIVE(VisitForValue(stmt->tag()));
Add<HSimulate>(stmt->EntryId());
- HValue* tag_value = Pop();
- HBasicBlock* first_test_block = current_block();
-
- HUnaryControlInstruction* string_check = NULL;
- HBasicBlock* not_string_block = NULL;
-
- // Test switch's tag value if all clauses are string literals
- if (stmt->switch_type() == SwitchStatement::STRING_SWITCH) {
- first_test_block = graph()->CreateBasicBlock();
- not_string_block = graph()->CreateBasicBlock();
- string_check = New<HIsStringAndBranch>(
- tag_value, first_test_block, not_string_block);
- FinishCurrentBlock(string_check);
-
- set_current_block(first_test_block);
- }
+ HValue* tag_value = Top();
+ Type* tag_type = stmt->tag()->bounds().lower;
// 1. Build all the tests, with dangling true branches
BailoutId default_id = BailoutId::None();
for (int i = 0; i < clause_count; ++i) {
CaseClause* clause = clauses->at(i);
if (clause->is_default()) {
- default_id = clause->EntryId();
+ body_blocks.Add(NULL, zone());
+ if (default_id.IsNone()) default_id = clause->EntryId();
continue;
}
@@ -4179,51 +4779,38 @@ void HOptimizedGraphBuilder::VisitSwitchStatement(SwitchStatement* stmt) {
CHECK_ALIVE(VisitForValue(clause->label()));
HValue* label_value = Pop();
+ Type* label_type = clause->label()->bounds().lower;
+ Type* combined_type = clause->compare_type();
+ HControlInstruction* compare = BuildCompareInstruction(
+ Token::EQ_STRICT, tag_value, label_value, tag_type, label_type,
+ combined_type,
+ ScriptPositionToSourcePosition(stmt->tag()->position()),
+ ScriptPositionToSourcePosition(clause->label()->position()),
+ PUSH_BEFORE_SIMULATE, clause->id());
+
HBasicBlock* next_test_block = graph()->CreateBasicBlock();
HBasicBlock* body_block = graph()->CreateBasicBlock();
-
- HControlInstruction* compare;
-
- if (stmt->switch_type() == SwitchStatement::SMI_SWITCH) {
- if (!clause->compare_type()->Is(Type::Smi())) {
- Add<HDeoptimize>("Non-smi switch type", Deoptimizer::SOFT);
- }
-
- HCompareNumericAndBranch* compare_ =
- New<HCompareNumericAndBranch>(tag_value,
- label_value,
- Token::EQ_STRICT);
- compare_->set_observed_input_representation(
- Representation::Smi(), Representation::Smi());
- compare = compare_;
- } else {
- compare = New<HStringCompareAndBranch>(tag_value,
- label_value,
- Token::EQ_STRICT);
- }
-
+ body_blocks.Add(body_block, zone());
compare->SetSuccessorAt(0, body_block);
compare->SetSuccessorAt(1, next_test_block);
FinishCurrentBlock(compare);
+ set_current_block(body_block);
+ Drop(1); // tag_value
+
set_current_block(next_test_block);
}
// Save the current block to use for the default or to join with the
// exit.
HBasicBlock* last_block = current_block();
-
- if (not_string_block != NULL) {
- BailoutId join_id = !default_id.IsNone() ? default_id : stmt->ExitId();
- last_block = CreateJoin(last_block, not_string_block, join_id);
- }
+ Drop(1); // tag_value
// 2. Loop over the clauses and the linked list of tests in lockstep,
// translating the clause bodies.
- HBasicBlock* curr_test_block = first_test_block;
HBasicBlock* fall_through_block = NULL;
- BreakAndContinueInfo break_info(stmt);
+ BreakAndContinueInfo break_info(stmt, scope());
{ BreakAndContinueScope push(&break_info, this);
for (int i = 0; i < clause_count; ++i) {
CaseClause* clause = clauses->at(i);
@@ -4232,40 +4819,16 @@ void HOptimizedGraphBuilder::VisitSwitchStatement(SwitchStatement* stmt) {
// goes to.
HBasicBlock* normal_block = NULL;
if (clause->is_default()) {
- if (last_block != NULL) {
- normal_block = last_block;
- last_block = NULL; // Cleared to indicate we've handled it.
- }
+ if (last_block == NULL) continue;
+ normal_block = last_block;
+ last_block = NULL; // Cleared to indicate we've handled it.
} else {
- // If the current test block is deoptimizing due to an unhandled clause
- // of the switch, the test instruction is in the next block since the
- // deopt must end the current block.
- if (curr_test_block->IsDeoptimizing()) {
- ASSERT(curr_test_block->end()->SecondSuccessor() == NULL);
- curr_test_block = curr_test_block->end()->FirstSuccessor();
- }
- normal_block = curr_test_block->end()->FirstSuccessor();
- curr_test_block = curr_test_block->end()->SecondSuccessor();
+ normal_block = body_blocks[i];
}
- // Identify a block to emit the body into.
- if (normal_block == NULL) {
- if (fall_through_block == NULL) {
- // (a) Unreachable.
- if (clause->is_default()) {
- continue; // Might still be reachable clause bodies.
- } else {
- break;
- }
- } else {
- // (b) Reachable only as fall through.
- set_current_block(fall_through_block);
- }
- } else if (fall_through_block == NULL) {
- // (c) Reachable only normally.
+ if (fall_through_block == NULL) {
set_current_block(normal_block);
} else {
- // (d) Reachable both ways.
HBasicBlock* join = CreateJoin(fall_through_block,
normal_block,
clause->EntryId());
@@ -4294,9 +4857,7 @@ void HOptimizedGraphBuilder::VisitSwitchStatement(SwitchStatement* stmt) {
void HOptimizedGraphBuilder::VisitLoopBody(IterationStatement* stmt,
- HBasicBlock* loop_entry,
- BreakAndContinueInfo* break_info) {
- BreakAndContinueScope push(break_info, this);
+ HBasicBlock* loop_entry) {
Add<HSimulate>(stmt->StackCheckId());
HStackCheck* stack_check =
HStackCheck::cast(Add<HStackCheck>(HStackCheck::kBackwardsBranch));
@@ -4313,19 +4874,28 @@ void HOptimizedGraphBuilder::VisitDoWhileStatement(DoWhileStatement* stmt) {
ASSERT(current_block() != NULL);
HBasicBlock* loop_entry = BuildLoopEntry(stmt);
- BreakAndContinueInfo break_info(stmt);
- CHECK_BAILOUT(VisitLoopBody(stmt, loop_entry, &break_info));
+ BreakAndContinueInfo break_info(stmt, scope());
+ {
+ BreakAndContinueScope push(&break_info, this);
+ CHECK_BAILOUT(VisitLoopBody(stmt, loop_entry));
+ }
HBasicBlock* body_exit =
JoinContinue(stmt, current_block(), break_info.continue_block());
HBasicBlock* loop_successor = NULL;
if (body_exit != NULL && !stmt->cond()->ToBooleanIsTrue()) {
set_current_block(body_exit);
- // The block for a true condition, the actual predecessor block of the
- // back edge.
- body_exit = graph()->CreateBasicBlock();
loop_successor = graph()->CreateBasicBlock();
- CHECK_BAILOUT(VisitForControl(stmt->cond(), body_exit, loop_successor));
- if (body_exit->HasPredecessor()) {
+ if (stmt->cond()->ToBooleanIsFalse()) {
+ loop_entry->loop_information()->stack_check()->Eliminate();
+ Goto(loop_successor);
+ body_exit = NULL;
+ } else {
+ // The block for a true condition, the actual predecessor block of the
+ // back edge.
+ body_exit = graph()->CreateBasicBlock();
+ CHECK_BAILOUT(VisitForControl(stmt->cond(), body_exit, loop_successor));
+ }
+ if (body_exit != NULL && body_exit->HasPredecessor()) {
body_exit->SetJoinId(stmt->BackEdgeId());
} else {
body_exit = NULL;
@@ -4369,9 +4939,10 @@ void HOptimizedGraphBuilder::VisitWhileStatement(WhileStatement* stmt) {
}
}
- BreakAndContinueInfo break_info(stmt);
+ BreakAndContinueInfo break_info(stmt, scope());
if (current_block() != NULL) {
- CHECK_BAILOUT(VisitLoopBody(stmt, loop_entry, &break_info));
+ BreakAndContinueScope push(&break_info, this);
+ CHECK_BAILOUT(VisitLoopBody(stmt, loop_entry));
}
HBasicBlock* body_exit =
JoinContinue(stmt, current_block(), break_info.continue_block());
@@ -4410,9 +4981,10 @@ void HOptimizedGraphBuilder::VisitForStatement(ForStatement* stmt) {
}
}
- BreakAndContinueInfo break_info(stmt);
+ BreakAndContinueInfo break_info(stmt, scope());
if (current_block() != NULL) {
- CHECK_BAILOUT(VisitLoopBody(stmt, loop_entry, &break_info));
+ BreakAndContinueScope push(&break_info, this);
+ CHECK_BAILOUT(VisitLoopBody(stmt, loop_entry));
}
HBasicBlock* body_exit =
JoinContinue(stmt, current_block(), break_info.continue_block());
@@ -4511,8 +5083,11 @@ void HOptimizedGraphBuilder::VisitForInStatement(ForInStatement* stmt) {
Bind(each_var, key);
- BreakAndContinueInfo break_info(stmt, 5);
- CHECK_BAILOUT(VisitLoopBody(stmt, loop_entry, &break_info));
+ BreakAndContinueInfo break_info(stmt, scope(), 5);
+ {
+ BreakAndContinueScope push(&break_info, this);
+ CHECK_BAILOUT(VisitLoopBody(stmt, loop_entry));
+ }
HBasicBlock* body_exit =
JoinContinue(stmt, current_block(), break_info.continue_block());
@@ -4573,31 +5148,11 @@ void HOptimizedGraphBuilder::VisitCaseClause(CaseClause* clause) {
}
-static Handle<SharedFunctionInfo> SearchSharedFunctionInfo(
- Code* unoptimized_code, FunctionLiteral* expr) {
- int start_position = expr->start_position();
- for (RelocIterator it(unoptimized_code); !it.done(); it.next()) {
- RelocInfo* rinfo = it.rinfo();
- if (rinfo->rmode() != RelocInfo::EMBEDDED_OBJECT) continue;
- Object* obj = rinfo->target_object();
- if (obj->IsSharedFunctionInfo()) {
- SharedFunctionInfo* shared = SharedFunctionInfo::cast(obj);
- if (shared->start_position() == start_position) {
- return Handle<SharedFunctionInfo>(shared);
- }
- }
- }
-
- return Handle<SharedFunctionInfo>();
-}
-
-
void HOptimizedGraphBuilder::VisitFunctionLiteral(FunctionLiteral* expr) {
ASSERT(!HasStackOverflow());
ASSERT(current_block() != NULL);
ASSERT(current_block()->HasPredecessor());
- Handle<SharedFunctionInfo> shared_info =
- SearchSharedFunctionInfo(current_info()->shared_info()->code(), expr);
+ Handle<SharedFunctionInfo> shared_info = expr->shared_info();
if (shared_info.is_null()) {
shared_info = Compiler::BuildFunctionInfo(expr, current_info()->script());
}
@@ -4658,14 +5213,14 @@ void HOptimizedGraphBuilder::VisitConditional(Conditional* expr) {
HOptimizedGraphBuilder::GlobalPropertyAccess
HOptimizedGraphBuilder::LookupGlobalProperty(
- Variable* var, LookupResult* lookup, bool is_store) {
+ Variable* var, LookupResult* lookup, PropertyAccessType access_type) {
if (var->is_this() || !current_info()->has_global_object()) {
return kUseGeneric;
}
Handle<GlobalObject> global(current_info()->global_object());
- global->Lookup(*var->name(), lookup);
+ global->Lookup(var->name(), lookup);
if (!lookup->IsNormal() ||
- (is_store && lookup->IsReadOnly()) ||
+ (access_type == STORE && lookup->IsReadOnly()) ||
lookup->holder() != *global) {
return kUseGeneric;
}
@@ -4677,15 +5232,21 @@ HOptimizedGraphBuilder::GlobalPropertyAccess
HValue* HOptimizedGraphBuilder::BuildContextChainWalk(Variable* var) {
ASSERT(var->IsContextSlot());
HValue* context = environment()->context();
- int length = current_info()->scope()->ContextChainLength(var->scope());
+ int length = scope()->ContextChainLength(var->scope());
while (length-- > 0) {
- context = Add<HOuterContext>(context);
+ context = Add<HLoadNamedField>(
+ context, static_cast<HValue*>(NULL),
+ HObjectAccess::ForContextSlot(Context::PREVIOUS_INDEX));
}
return context;
}
void HOptimizedGraphBuilder::VisitVariableProxy(VariableProxy* expr) {
+ if (expr->is_this()) {
+ current_info()->set_this_has_uses(true);
+ }
+
ASSERT(!HasStackOverflow());
ASSERT(current_block() != NULL);
ASSERT(current_block()->HasPredecessor());
@@ -4706,8 +5267,7 @@ void HOptimizedGraphBuilder::VisitVariableProxy(VariableProxy* expr) {
}
LookupResult lookup(isolate());
- GlobalPropertyAccess type =
- LookupGlobalProperty(variable, &lookup, false);
+ GlobalPropertyAccess type = LookupGlobalProperty(variable, &lookup, LOAD);
if (type == kUseCell &&
current_info()->global_object()->IsAccessCheckNeeded()) {
@@ -4718,11 +5278,11 @@ void HOptimizedGraphBuilder::VisitVariableProxy(VariableProxy* expr) {
Handle<GlobalObject> global(current_info()->global_object());
Handle<PropertyCell> cell(global->GetPropertyCell(&lookup));
if (cell->type()->IsConstant()) {
- cell->AddDependentCompilationInfo(top_info());
- Handle<Object> constant_object = cell->type()->AsConstant();
+ PropertyCell::AddDependentCompilationInfo(cell, top_info());
+ Handle<Object> constant_object = cell->type()->AsConstant()->Value();
if (constant_object->IsConsString()) {
constant_object =
- FlattenGetString(Handle<String>::cast(constant_object));
+ String::Flatten(Handle<String>::cast(constant_object));
}
HConstant* constant = New<HConstant>(constant_object);
return ast_context()->ReturnInstruction(constant, expr->id());
@@ -4732,7 +5292,9 @@ void HOptimizedGraphBuilder::VisitVariableProxy(VariableProxy* expr) {
return ast_context()->ReturnInstruction(instr, expr->id());
}
} else {
- HGlobalObject* global_object = Add<HGlobalObject>();
+ HValue* global_object = Add<HLoadNamedField>(
+ context(), static_cast<HValue*>(NULL),
+ HObjectAccess::ForContextSlot(Context::GLOBAL_OBJECT_INDEX));
HLoadGlobalGeneric* instr =
New<HLoadGlobalGeneric>(global_object,
variable->name(),
@@ -4754,7 +5316,21 @@ void HOptimizedGraphBuilder::VisitVariableProxy(VariableProxy* expr) {
case Variable::CONTEXT: {
HValue* context = BuildContextChainWalk(variable);
- HLoadContextSlot* instr = new(zone()) HLoadContextSlot(context, variable);
+ HLoadContextSlot::Mode mode;
+ switch (variable->mode()) {
+ case LET:
+ case CONST:
+ mode = HLoadContextSlot::kCheckDeoptimize;
+ break;
+ case CONST_LEGACY:
+ mode = HLoadContextSlot::kCheckReturnUndefined;
+ break;
+ default:
+ mode = HLoadContextSlot::kNoCheck;
+ break;
+ }
+ HLoadContextSlot* instr =
+ new(zone()) HLoadContextSlot(context, variable->index(), mode);
return ast_context()->ReturnInstruction(instr, expr->id());
}
@@ -4787,81 +5363,13 @@ void HOptimizedGraphBuilder::VisitRegExpLiteral(RegExpLiteral* expr) {
}
-static bool CanInlinePropertyAccess(Map* type) {
- return type->IsJSObjectMap() &&
- !type->is_dictionary_map() &&
- !type->has_named_interceptor();
-}
-
-
-static void LookupInPrototypes(Handle<Map> map,
- Handle<String> name,
- LookupResult* lookup) {
- while (map->prototype()->IsJSObject()) {
- Handle<JSObject> holder(JSObject::cast(map->prototype()));
- map = Handle<Map>(holder->map());
- if (!CanInlinePropertyAccess(*map)) break;
- map->LookupDescriptor(*holder, *name, lookup);
- if (lookup->IsFound()) return;
- }
- lookup->NotFound();
-}
-
-
-// Tries to find a JavaScript accessor of the given name in the prototype chain
-// starting at the given map. Return true iff there is one, including the
-// corresponding AccessorPair plus its holder (which could be null when the
-// accessor is found directly in the given map).
-static bool LookupAccessorPair(Handle<Map> map,
- Handle<String> name,
- Handle<AccessorPair>* accessors,
- Handle<JSObject>* holder) {
- Isolate* isolate = map->GetIsolate();
- LookupResult lookup(isolate);
-
- // Check for a JavaScript accessor directly in the map.
- map->LookupDescriptor(NULL, *name, &lookup);
- if (lookup.IsPropertyCallbacks()) {
- Handle<Object> callback(lookup.GetValueFromMap(*map), isolate);
- if (!callback->IsAccessorPair()) return false;
- *accessors = Handle<AccessorPair>::cast(callback);
- *holder = Handle<JSObject>();
- return true;
- }
-
- // Everything else, e.g. a field, can't be an accessor call.
- if (lookup.IsFound()) return false;
-
- // Check for a JavaScript accessor somewhere in the proto chain.
- LookupInPrototypes(map, name, &lookup);
- if (lookup.IsPropertyCallbacks()) {
- Handle<Object> callback(lookup.GetValue(), isolate);
- if (!callback->IsAccessorPair()) return false;
- *accessors = Handle<AccessorPair>::cast(callback);
- *holder = Handle<JSObject>(lookup.holder());
- return true;
- }
-
- // We haven't found a JavaScript accessor anywhere.
- return false;
-}
-
-
-static bool LookupSetter(Handle<Map> map,
- Handle<String> name,
- Handle<JSFunction>* setter,
- Handle<JSObject>* holder) {
- Handle<AccessorPair> accessors;
- if (LookupAccessorPair(map, name, &accessors, holder) &&
- accessors->setter()->IsJSFunction()) {
- Handle<JSFunction> func(JSFunction::cast(accessors->setter()));
- CallOptimization call_optimization(func);
- // TODO(dcarney): temporary hack unless crankshaft can handle api calls.
- if (call_optimization.is_simple_api_call()) return false;
- *setter = func;
- return true;
- }
- return false;
+static bool CanInlinePropertyAccess(Type* type) {
+ if (type->Is(Type::NumberOrString())) return true;
+ if (!type->IsClass()) return false;
+ Handle<Map> map = type->AsClass()->Map();
+ return map->IsJSObjectMap() &&
+ !map->is_dictionary_map() &&
+ !map->has_named_interceptor();
}
@@ -4871,9 +5379,9 @@ static bool LookupSetter(Handle<Map> map,
static bool IsFastLiteral(Handle<JSObject> boilerplate,
int max_depth,
int* max_properties) {
- if (boilerplate->map()->is_deprecated()) {
- Handle<Object> result = JSObject::TryMigrateInstance(boilerplate);
- if (result.is_null()) return false;
+ if (boilerplate->map()->is_deprecated() &&
+ !JSObject::TryMigrateInstance(boilerplate)) {
+ return false;
}
ASSERT(max_depth >= 0 && *max_properties >= 0);
@@ -4967,15 +5475,15 @@ void HOptimizedGraphBuilder::VisitObjectLiteral(ObjectLiteral* expr) {
flags |= expr->has_function()
? ObjectLiteral::kHasFunction : ObjectLiteral::kNoFlags;
- Add<HPushArgument>(Add<HConstant>(closure_literals));
- Add<HPushArgument>(Add<HConstant>(literal_index));
- Add<HPushArgument>(Add<HConstant>(constant_properties));
- Add<HPushArgument>(Add<HConstant>(flags));
+ Add<HPushArguments>(Add<HConstant>(closure_literals),
+ Add<HConstant>(literal_index),
+ Add<HConstant>(constant_properties),
+ Add<HConstant>(flags));
// TODO(mvstanton): Add a flag to turn off creation of any
// AllocationMementos for this call: we are in crankshaft and should have
// learned enough about transition behavior to stop emitting mementos.
- Runtime::FunctionId function_id = Runtime::kCreateObjectLiteral;
+ Runtime::FunctionId function_id = Runtime::kHiddenCreateObjectLiteral;
literal = Add<HCallRuntime>(isolate()->factory()->empty_string(),
Runtime::FunctionForId(function_id),
4);
@@ -5008,17 +5516,20 @@ void HOptimizedGraphBuilder::VisitObjectLiteral(ObjectLiteral* expr) {
HInstruction* store;
if (map.is_null()) {
// If we don't know the monomorphic type, do a generic store.
- CHECK_ALIVE(store = BuildStoreNamedGeneric(literal, name, value));
+ CHECK_ALIVE(store = BuildNamedGeneric(
+ STORE, literal, name, value));
} else {
-#if DEBUG
- Handle<JSFunction> setter;
- Handle<JSObject> holder;
- ASSERT(!LookupSetter(map, name, &setter, &holder));
-#endif
- CHECK_ALIVE(store = BuildStoreNamedMonomorphic(literal,
- name,
- value,
- map));
+ PropertyAccessInfo info(this, STORE, ToType(map), name);
+ if (info.CanAccessMonomorphic()) {
+ HValue* checked_literal = Add<HCheckMaps>(literal, map);
+ ASSERT(!info.lookup()->IsPropertyCallbacks());
+ store = BuildMonomorphicAccess(
+ &info, literal, checked_literal, value,
+ BailoutId::None(), BailoutId::None());
+ } else {
+ CHECK_ALIVE(store = BuildNamedGeneric(
+ STORE, literal, name, value));
+ }
}
AddInstruction(store);
if (store->HasObservableSideEffects()) {
@@ -5069,11 +5580,12 @@ void HOptimizedGraphBuilder::VisitArrayLiteral(ArrayLiteral* expr) {
Handle<JSObject> boilerplate_object;
if (literals_cell->IsUndefined()) {
uninitialized = true;
- Handle<Object> raw_boilerplate = Runtime::CreateArrayLiteralBoilerplate(
- isolate(), literals, expr->constant_elements());
- if (raw_boilerplate.is_null()) {
- return Bailout(kArrayBoilerplateCreationFailed);
- }
+ Handle<Object> raw_boilerplate;
+ ASSIGN_RETURN_ON_EXCEPTION_VALUE(
+ isolate(), raw_boilerplate,
+ Runtime::CreateArrayLiteralBoilerplate(
+ isolate(), literals, expr->constant_elements()),
+ Bailout(kArrayBoilerplateCreationFailed));
boilerplate_object = Handle<JSObject>::cast(raw_boilerplate);
AllocationSiteCreationContext creation_context(isolate());
@@ -5121,22 +5633,22 @@ void HOptimizedGraphBuilder::VisitArrayLiteral(ArrayLiteral* expr) {
: ArrayLiteral::kNoFlags;
flags |= ArrayLiteral::kDisableMementos;
- Add<HPushArgument>(Add<HConstant>(literals));
- Add<HPushArgument>(Add<HConstant>(literal_index));
- Add<HPushArgument>(Add<HConstant>(constants));
- Add<HPushArgument>(Add<HConstant>(flags));
+ Add<HPushArguments>(Add<HConstant>(literals),
+ Add<HConstant>(literal_index),
+ Add<HConstant>(constants),
+ Add<HConstant>(flags));
// TODO(mvstanton): Consider a flag to turn off creation of any
// AllocationMementos for this call: we are in crankshaft and should have
// learned enough about transition behavior to stop emitting mementos.
- Runtime::FunctionId function_id = Runtime::kCreateArrayLiteral;
+ Runtime::FunctionId function_id = Runtime::kHiddenCreateArrayLiteral;
literal = Add<HCallRuntime>(isolate()->factory()->empty_string(),
Runtime::FunctionForId(function_id),
4);
// De-opt if elements kind changed from boilerplate_elements_kind.
Handle<Map> map = Handle<Map>(boilerplate_object->map(), isolate());
- literal = Add<HCheckMaps>(literal, map, top_info());
+ literal = Add<HCheckMaps>(literal, map);
}
// The array is expected in the bailout environment during computation
@@ -5189,59 +5701,76 @@ void HOptimizedGraphBuilder::VisitArrayLiteral(ArrayLiteral* expr) {
HCheckMaps* HOptimizedGraphBuilder::AddCheckMap(HValue* object,
Handle<Map> map) {
BuildCheckHeapObject(object);
- return Add<HCheckMaps>(object, map, top_info());
+ return Add<HCheckMaps>(object, map);
}
-HInstruction* HOptimizedGraphBuilder::BuildStoreNamedField(
- HValue* checked_object,
- Handle<String> name,
- HValue* value,
- Handle<Map> map,
- LookupResult* lookup) {
- ASSERT(lookup->IsFound());
- // If the property does not exist yet, we have to check that it wasn't made
- // readonly or turned into a setter by some meanwhile modifications on the
- // prototype chain.
- if (!lookup->IsProperty() && map->prototype()->IsJSReceiver()) {
- Object* proto = map->prototype();
- // First check that the prototype chain isn't affected already.
- LookupResult proto_result(isolate());
- proto->Lookup(*name, &proto_result);
- if (proto_result.IsProperty()) {
- // If the inherited property could induce readonly-ness, bail out.
- if (proto_result.IsReadOnly() || !proto_result.IsCacheable()) {
- Bailout(kImproperObjectOnPrototypeChainForStore);
- return NULL;
- }
- // We only need to check up to the preexisting property.
- proto = proto_result.holder();
- } else {
- // Otherwise, find the top prototype.
- while (proto->GetPrototype(isolate())->IsJSObject()) {
- proto = proto->GetPrototype(isolate());
+HInstruction* HOptimizedGraphBuilder::BuildLoadNamedField(
+ PropertyAccessInfo* info,
+ HValue* checked_object) {
+ // See if this is a load for an immutable property
+ if (checked_object->ActualValue()->IsConstant() &&
+ info->lookup()->IsCacheable() &&
+ info->lookup()->IsReadOnly() && info->lookup()->IsDontDelete()) {
+ Handle<Object> object(
+ HConstant::cast(checked_object->ActualValue())->handle(isolate()));
+
+ if (object->IsJSObject()) {
+ LookupResult lookup(isolate());
+ Handle<JSObject>::cast(object)->Lookup(info->name(), &lookup);
+ Handle<Object> value(lookup.GetLazyValue(), isolate());
+
+ if (!value->IsTheHole()) {
+ return New<HConstant>(value);
}
- ASSERT(proto->GetPrototype(isolate())->IsNull());
}
- ASSERT(proto->IsJSObject());
- BuildCheckPrototypeMaps(
- Handle<JSObject>(JSObject::cast(map->prototype())),
- Handle<JSObject>(JSObject::cast(proto)));
}
- HObjectAccess field_access = HObjectAccess::ForField(map, lookup, name);
- bool transition_to_field = lookup->IsTransitionToField();
+ HObjectAccess access = info->access();
+ if (access.representation().IsDouble()) {
+ // Load the heap number.
+ checked_object = Add<HLoadNamedField>(
+ checked_object, static_cast<HValue*>(NULL),
+ access.WithRepresentation(Representation::Tagged()));
+ // Load the double value from it.
+ access = HObjectAccess::ForHeapNumberValue();
+ }
+
+ SmallMapList* map_list = info->field_maps();
+ if (map_list->length() == 0) {
+ return New<HLoadNamedField>(checked_object, checked_object, access);
+ }
+
+ UniqueSet<Map>* maps = new(zone()) UniqueSet<Map>(map_list->length(), zone());
+ for (int i = 0; i < map_list->length(); ++i) {
+ maps->Add(Unique<Map>::CreateImmovable(map_list->at(i)), zone());
+ }
+ return New<HLoadNamedField>(
+ checked_object, checked_object, access, maps, info->field_type());
+}
+
+
+HInstruction* HOptimizedGraphBuilder::BuildStoreNamedField(
+ PropertyAccessInfo* info,
+ HValue* checked_object,
+ HValue* value) {
+ bool transition_to_field = info->lookup()->IsTransition();
+ // TODO(verwaest): Move this logic into PropertyAccessInfo.
+ HObjectAccess field_access = info->access();
HStoreNamedField *instr;
- if (FLAG_track_double_fields && field_access.representation().IsDouble()) {
+ if (field_access.representation().IsDouble()) {
HObjectAccess heap_number_access =
field_access.WithRepresentation(Representation::Tagged());
if (transition_to_field) {
// The store requires a mutable HeapNumber to be allocated.
NoObservableSideEffectsScope no_side_effects(this);
HInstruction* heap_number_size = Add<HConstant>(HeapNumber::kSize);
+
+ // TODO(hpayer): Allocation site pretenuring support.
HInstruction* heap_number = Add<HAllocate>(heap_number_size,
- HType::HeapNumber(), isolate()->heap()->GetPretenureMode(),
+ HType::HeapObject(),
+ NOT_TENURED,
HEAP_NUMBER_TYPE);
AddStoreMapConstant(heap_number, isolate()->factory()->heap_number_map());
Add<HStoreNamedField>(heap_number, HObjectAccess::ForHeapNumberValue(),
@@ -5251,92 +5780,59 @@ HInstruction* HOptimizedGraphBuilder::BuildStoreNamedField(
heap_number);
} else {
// Already holds a HeapNumber; load the box and write its value field.
- HInstruction* heap_number = Add<HLoadNamedField>(checked_object,
- heap_number_access);
- heap_number->set_type(HType::HeapNumber());
+ HInstruction* heap_number = Add<HLoadNamedField>(
+ checked_object, static_cast<HValue*>(NULL), heap_number_access);
instr = New<HStoreNamedField>(heap_number,
HObjectAccess::ForHeapNumberValue(),
- value);
+ value, STORE_TO_INITIALIZED_ENTRY);
}
} else {
+ if (field_access.representation().IsHeapObject()) {
+ BuildCheckHeapObject(value);
+ }
+
+ if (!info->field_maps()->is_empty()) {
+ ASSERT(field_access.representation().IsHeapObject());
+ value = Add<HCheckMaps>(value, info->field_maps());
+ }
+
// This is a normal store.
- instr = New<HStoreNamedField>(checked_object->ActualValue(),
- field_access,
- value);
+ instr = New<HStoreNamedField>(
+ checked_object->ActualValue(), field_access, value,
+ transition_to_field ? INITIALIZING_STORE : STORE_TO_INITIALIZED_ENTRY);
}
if (transition_to_field) {
- Handle<Map> transition(lookup->GetTransitionTarget());
- HConstant* transition_constant = Add<HConstant>(transition);
- instr->SetTransition(transition_constant, top_info());
- // TODO(fschneider): Record the new map type of the object in the IR to
- // enable elimination of redundant checks after the transition store.
- instr->SetGVNFlag(kChangesMaps);
+ Handle<Map> transition(info->transition());
+ ASSERT(!transition->is_deprecated());
+ instr->SetTransition(Add<HConstant>(transition));
}
return instr;
}
-HInstruction* HOptimizedGraphBuilder::BuildStoreNamedGeneric(
- HValue* object,
- Handle<String> name,
- HValue* value) {
- return New<HStoreNamedGeneric>(
- object,
- name,
- value,
- function_strict_mode_flag());
-}
-
-
-// Sets the lookup result and returns true if the load/store can be inlined.
-static bool ComputeStoreField(Handle<Map> type,
- Handle<String> name,
- LookupResult* lookup,
- bool lookup_transition = true) {
- ASSERT(!type->is_observed());
- if (!CanInlinePropertyAccess(*type)) {
- lookup->NotFound();
- return false;
- }
- // If we directly find a field, the access can be inlined.
- type->LookupDescriptor(NULL, *name, lookup);
- if (lookup->IsField()) return true;
-
- if (!lookup_transition) return false;
-
- type->LookupTransition(NULL, *name, lookup);
- return lookup->IsTransitionToField() &&
- (type->unused_property_fields() > 0);
-}
+bool HOptimizedGraphBuilder::PropertyAccessInfo::IsCompatible(
+ PropertyAccessInfo* info) {
+ if (!CanInlinePropertyAccess(type_)) return false;
+ // Currently only handle Type::Number as a polymorphic case.
+ // TODO(verwaest): Support monomorphic handling of numbers with a HCheckNumber
+ // instruction.
+ if (type_->Is(Type::Number())) return false;
-HInstruction* HOptimizedGraphBuilder::BuildStoreNamedMonomorphic(
- HValue* object,
- Handle<String> name,
- HValue* value,
- Handle<Map> map) {
- // Handle a store to a known field.
- LookupResult lookup(isolate());
- if (ComputeStoreField(map, name, &lookup)) {
- HCheckMaps* checked_object = AddCheckMap(object, map);
- return BuildStoreNamedField(checked_object, name, value, map, &lookup);
+ // Values are only compatible for monomorphic load if they all behave the same
+ // regarding value wrappers.
+ if (type_->Is(Type::NumberOrString())) {
+ if (!info->type_->Is(Type::NumberOrString())) return false;
+ } else {
+ if (info->type_->Is(Type::NumberOrString())) return false;
}
- // No luck, do a generic store.
- return BuildStoreNamedGeneric(object, name, value);
-}
-
-
-bool HOptimizedGraphBuilder::PropertyAccessInfo::IsCompatibleForLoad(
- PropertyAccessInfo* info) {
- if (!CanInlinePropertyAccess(*map_)) return false;
-
if (!LookupDescriptor()) return false;
if (!lookup_.IsFound()) {
return (!info->lookup_.IsFound() || info->has_holder()) &&
- map_->prototype() == info->map_->prototype();
+ map()->prototype() == info->map()->prototype();
}
// Mismatch if the other access info found the property in the prototype
@@ -5344,7 +5840,8 @@ bool HOptimizedGraphBuilder::PropertyAccessInfo::IsCompatibleForLoad(
if (info->has_holder()) return false;
if (lookup_.IsPropertyCallbacks()) {
- return accessor_.is_identical_to(info->accessor_);
+ return accessor_.is_identical_to(info->accessor_) &&
+ api_holder_.is_identical_to(info->api_holder_);
}
if (lookup_.IsConstant()) {
@@ -5355,32 +5852,74 @@ bool HOptimizedGraphBuilder::PropertyAccessInfo::IsCompatibleForLoad(
if (!info->lookup_.IsField()) return false;
Representation r = access_.representation();
- if (!info->access_.representation().IsCompatibleForLoad(r)) return false;
+ if (IsLoad()) {
+ if (!info->access_.representation().IsCompatibleForLoad(r)) return false;
+ } else {
+ if (!info->access_.representation().IsCompatibleForStore(r)) return false;
+ }
if (info->access_.offset() != access_.offset()) return false;
if (info->access_.IsInobject() != access_.IsInobject()) return false;
+ if (IsLoad()) {
+ if (field_maps_.is_empty()) {
+ info->field_maps_.Clear();
+ } else if (!info->field_maps_.is_empty()) {
+ for (int i = 0; i < field_maps_.length(); ++i) {
+ info->field_maps_.AddMapIfMissing(field_maps_.at(i), info->zone());
+ }
+ info->field_maps_.Sort();
+ }
+ } else {
+ // We can only merge stores that agree on their field maps. The comparison
+ // below is safe, since we keep the field maps sorted.
+ if (field_maps_.length() != info->field_maps_.length()) return false;
+ for (int i = 0; i < field_maps_.length(); ++i) {
+ if (!field_maps_.at(i).is_identical_to(info->field_maps_.at(i))) {
+ return false;
+ }
+ }
+ }
info->GeneralizeRepresentation(r);
+ info->field_type_ = info->field_type_.Combine(field_type_);
return true;
}
bool HOptimizedGraphBuilder::PropertyAccessInfo::LookupDescriptor() {
- map_->LookupDescriptor(NULL, *name_, &lookup_);
- return LoadResult(map_);
+ if (!type_->IsClass()) return true;
+ map()->LookupDescriptor(NULL, *name_, &lookup_);
+ return LoadResult(map());
}
bool HOptimizedGraphBuilder::PropertyAccessInfo::LoadResult(Handle<Map> map) {
+ if (!IsLoad() && lookup_.IsProperty() &&
+ (lookup_.IsReadOnly() || !lookup_.IsCacheable())) {
+ return false;
+ }
+
if (lookup_.IsField()) {
+ // Construct the object field access.
access_ = HObjectAccess::ForField(map, &lookup_, name_);
+
+ // Load field map for heap objects.
+ LoadFieldMaps(map);
} else if (lookup_.IsPropertyCallbacks()) {
Handle<Object> callback(lookup_.GetValueFromMap(*map), isolate());
if (!callback->IsAccessorPair()) return false;
- Object* getter = Handle<AccessorPair>::cast(callback)->getter();
- if (!getter->IsJSFunction()) return false;
- Handle<JSFunction> accessor = handle(JSFunction::cast(getter));
- CallOptimization call_optimization(accessor);
- // TODO(dcarney): temporary hack unless crankshaft can handle api calls.
- if (call_optimization.is_simple_api_call()) return false;
+ Object* raw_accessor = IsLoad()
+ ? Handle<AccessorPair>::cast(callback)->getter()
+ : Handle<AccessorPair>::cast(callback)->setter();
+ if (!raw_accessor->IsJSFunction()) return false;
+ Handle<JSFunction> accessor = handle(JSFunction::cast(raw_accessor));
+ if (accessor->shared()->IsApiFunction()) {
+ CallOptimization call_optimization(accessor);
+ if (call_optimization.is_simple_api_call()) {
+ CallOptimization::HolderLookup holder_lookup;
+ Handle<Map> receiver_map = this->map();
+ api_holder_ = call_optimization.LookupHolderOfExpectedType(
+ receiver_map, &holder_lookup);
+ }
+ }
accessor_ = accessor;
} else if (lookup_.IsConstant()) {
constant_ = handle(lookup_.GetConstantFromMap(*map), isolate());
@@ -5390,15 +5929,54 @@ bool HOptimizedGraphBuilder::PropertyAccessInfo::LoadResult(Handle<Map> map) {
}
+void HOptimizedGraphBuilder::PropertyAccessInfo::LoadFieldMaps(
+ Handle<Map> map) {
+ // Clear any previously collected field maps/type.
+ field_maps_.Clear();
+ field_type_ = HType::Tagged();
+
+ // Figure out the field type from the accessor map.
+ Handle<HeapType> field_type(lookup_.GetFieldTypeFromMap(*map), isolate());
+
+ // Collect the (stable) maps from the field type.
+ int num_field_maps = field_type->NumClasses();
+ if (num_field_maps == 0) return;
+ ASSERT(access_.representation().IsHeapObject());
+ field_maps_.Reserve(num_field_maps, zone());
+ HeapType::Iterator<Map> it = field_type->Classes();
+ while (!it.Done()) {
+ Handle<Map> field_map = it.Current();
+ if (!field_map->is_stable()) {
+ field_maps_.Clear();
+ return;
+ }
+ field_maps_.Add(field_map, zone());
+ it.Advance();
+ }
+ field_maps_.Sort();
+ ASSERT_EQ(num_field_maps, field_maps_.length());
+
+ // Determine field HType from field HeapType.
+ field_type_ = HType::FromType<HeapType>(field_type);
+ ASSERT(field_type_.IsHeapObject());
+
+ // Add dependency on the map that introduced the field.
+ Map::AddDependentCompilationInfo(
+ handle(lookup_.GetFieldOwnerFromMap(*map), isolate()),
+ DependentCode::kFieldTypeGroup, top_info());
+}
+
+
bool HOptimizedGraphBuilder::PropertyAccessInfo::LookupInPrototypes() {
- Handle<Map> map = map_;
+ Handle<Map> map = this->map();
+
while (map->prototype()->IsJSObject()) {
holder_ = handle(JSObject::cast(map->prototype()));
if (holder_->map()->is_deprecated()) {
JSObject::TryMigrateInstance(holder_);
}
map = Handle<Map>(holder_->map());
- if (!CanInlinePropertyAccess(*map)) {
+ if (!CanInlinePropertyAccess(ToType(map))) {
lookup_.NotFound();
return false;
}
@@ -5410,68 +5988,90 @@ bool HOptimizedGraphBuilder::PropertyAccessInfo::LookupInPrototypes() {
}
-bool HOptimizedGraphBuilder::PropertyAccessInfo::CanLoadMonomorphic() {
- if (!CanInlinePropertyAccess(*map_)) return IsStringLength();
- if (IsJSObjectFieldAccessor()) return true;
+bool HOptimizedGraphBuilder::PropertyAccessInfo::CanAccessMonomorphic() {
+ if (!CanInlinePropertyAccess(type_)) return false;
+ if (IsJSObjectFieldAccessor()) return IsLoad();
if (!LookupDescriptor()) return false;
- if (lookup_.IsFound()) return true;
- return LookupInPrototypes();
+ if (lookup_.IsFound()) {
+ if (IsLoad()) return true;
+ return !lookup_.IsReadOnly() && lookup_.IsCacheable();
+ }
+ if (!LookupInPrototypes()) return false;
+ if (IsLoad()) return true;
+
+ if (lookup_.IsPropertyCallbacks()) return true;
+ Handle<Map> map = this->map();
+ map->LookupTransition(NULL, *name_, &lookup_);
+ if (lookup_.IsTransitionToField() && map->unused_property_fields() > 0) {
+ // Construct the object field access.
+ access_ = HObjectAccess::ForField(map, &lookup_, name_);
+
+ // Load field map for heap objects.
+ LoadFieldMaps(transition());
+ return true;
+ }
+ return false;
}
-bool HOptimizedGraphBuilder::PropertyAccessInfo::CanLoadAsMonomorphic(
+bool HOptimizedGraphBuilder::PropertyAccessInfo::CanAccessAsMonomorphic(
SmallMapList* types) {
- ASSERT(map_.is_identical_to(types->first()));
- if (!CanLoadMonomorphic()) return false;
+ ASSERT(type_->Is(ToType(types->first())));
+ if (!CanAccessMonomorphic()) return false;
+ STATIC_ASSERT(kMaxLoadPolymorphism == kMaxStorePolymorphism);
if (types->length() > kMaxLoadPolymorphism) return false;
- if (IsStringLength()) {
+ HObjectAccess access = HObjectAccess::ForMap(); // bogus default
+ if (GetJSObjectFieldAccess(&access)) {
for (int i = 1; i < types->length(); ++i) {
- if (types->at(i)->instance_type() >= FIRST_NONSTRING_TYPE) return false;
+ PropertyAccessInfo test_info(
+ builder_, access_type_, ToType(types->at(i)), name_);
+ HObjectAccess test_access = HObjectAccess::ForMap(); // bogus default
+ if (!test_info.GetJSObjectFieldAccess(&test_access)) return false;
+ if (!access.Equals(test_access)) return false;
}
return true;
}
- if (IsArrayLength()) {
- bool is_fast = IsFastElementsKind(map_->elements_kind());
- for (int i = 1; i < types->length(); ++i) {
- Handle<Map> test_map = types->at(i);
- if (test_map->instance_type() != JS_ARRAY_TYPE) return false;
- if (IsFastElementsKind(test_map->elements_kind()) != is_fast) {
- return false;
- }
- }
- return true;
- }
+ // Currently only handle Type::Number as a polymorphic case.
+ // TODO(verwaest): Support monomorphic handling of numbers with a HCheckNumber
+ // instruction.
+ if (type_->Is(Type::Number())) return false;
- if (IsJSObjectFieldAccessor()) {
- InstanceType instance_type = map_->instance_type();
- for (int i = 1; i < types->length(); ++i) {
- if (types->at(i)->instance_type() != instance_type) return false;
- }
- return true;
- }
+ // Multiple maps cannot transition to the same target map.
+ ASSERT(!IsLoad() || !lookup_.IsTransition());
+ if (lookup_.IsTransition() && types->length() > 1) return false;
for (int i = 1; i < types->length(); ++i) {
- PropertyAccessInfo test_info(isolate(), types->at(i), name_);
- if (!test_info.IsCompatibleForLoad(this)) return false;
+ PropertyAccessInfo test_info(
+ builder_, access_type_, ToType(types->at(i)), name_);
+ if (!test_info.IsCompatible(this)) return false;
}
return true;
}
-HInstruction* HOptimizedGraphBuilder::BuildLoadMonomorphic(
+static bool NeedsWrappingFor(Type* type, Handle<JSFunction> target) {
+ return type->Is(Type::NumberOrString()) &&
+ target->shared()->strict_mode() == SLOPPY &&
+ !target->shared()->native();
+}
+
+
+HInstruction* HOptimizedGraphBuilder::BuildMonomorphicAccess(
PropertyAccessInfo* info,
HValue* object,
- HInstruction* checked_object,
+ HValue* checked_object,
+ HValue* value,
BailoutId ast_id,
BailoutId return_id,
bool can_inline_accessor) {
HObjectAccess access = HObjectAccess::ForMap(); // bogus default
if (info->GetJSObjectFieldAccess(&access)) {
- return New<HLoadNamedField>(checked_object, access);
+ ASSERT(info->IsLoad());
+ return New<HLoadNamedField>(object, checked_object, access);
}
HValue* checked_holder = checked_object;
@@ -5480,242 +6080,200 @@ HInstruction* HOptimizedGraphBuilder::BuildLoadMonomorphic(
checked_holder = BuildCheckPrototypeMaps(prototype, info->holder());
}
- if (!info->lookup()->IsFound()) return graph()->GetConstantUndefined();
+ if (!info->lookup()->IsFound()) {
+ ASSERT(info->IsLoad());
+ return graph()->GetConstantUndefined();
+ }
if (info->lookup()->IsField()) {
- return BuildLoadNamedField(checked_holder, info->access());
+ if (info->IsLoad()) {
+ return BuildLoadNamedField(info, checked_holder);
+ } else {
+ return BuildStoreNamedField(info, checked_object, value);
+ }
+ }
+
+ if (info->lookup()->IsTransition()) {
+ ASSERT(!info->IsLoad());
+ return BuildStoreNamedField(info, checked_object, value);
}
if (info->lookup()->IsPropertyCallbacks()) {
Push(checked_object);
- if (FLAG_inline_accessors &&
- can_inline_accessor &&
- TryInlineGetter(info->accessor(), ast_id, return_id)) {
- return NULL;
+ int argument_count = 1;
+ if (!info->IsLoad()) {
+ argument_count = 2;
+ Push(value);
}
- Add<HPushArgument>(Pop());
- return New<HCallConstantFunction>(info->accessor(), 1);
+
+ if (NeedsWrappingFor(info->type(), info->accessor())) {
+ HValue* function = Add<HConstant>(info->accessor());
+ PushArgumentsFromEnvironment(argument_count);
+ return New<HCallFunction>(function, argument_count, WRAP_AND_CALL);
+ } else if (FLAG_inline_accessors && can_inline_accessor) {
+ bool success = info->IsLoad()
+ ? TryInlineGetter(info->accessor(), info->map(), ast_id, return_id)
+ : TryInlineSetter(
+ info->accessor(), info->map(), ast_id, return_id, value);
+ if (success || HasStackOverflow()) return NULL;
+ }
+
+ PushArgumentsFromEnvironment(argument_count);
+ return BuildCallConstantFunction(info->accessor(), argument_count);
}
ASSERT(info->lookup()->IsConstant());
- return New<HConstant>(info->constant());
+ if (info->IsLoad()) {
+ return New<HConstant>(info->constant());
+ } else {
+ return New<HCheckValue>(value, Handle<JSFunction>::cast(info->constant()));
+ }
}
-void HOptimizedGraphBuilder::HandlePolymorphicLoadNamedField(
+void HOptimizedGraphBuilder::HandlePolymorphicNamedFieldAccess(
+ PropertyAccessType access_type,
BailoutId ast_id,
BailoutId return_id,
HValue* object,
+ HValue* value,
SmallMapList* types,
Handle<String> name) {
// Something did not match; must use a polymorphic load.
int count = 0;
HBasicBlock* join = NULL;
+ HBasicBlock* number_block = NULL;
+ bool handled_string = false;
+
+ bool handle_smi = false;
+ STATIC_ASSERT(kMaxLoadPolymorphism == kMaxStorePolymorphism);
for (int i = 0; i < types->length() && count < kMaxLoadPolymorphism; ++i) {
- PropertyAccessInfo info(isolate(), types->at(i), name);
- if (info.CanLoadMonomorphic()) {
- if (count == 0) {
- BuildCheckHeapObject(object);
- join = graph()->CreateBasicBlock();
- }
- ++count;
- HBasicBlock* if_true = graph()->CreateBasicBlock();
- HBasicBlock* if_false = graph()->CreateBasicBlock();
- HCompareMap* compare = New<HCompareMap>(
- object, info.map(), if_true, if_false);
- FinishCurrentBlock(compare);
-
- set_current_block(if_true);
-
- HInstruction* load = BuildLoadMonomorphic(
- &info, object, compare, ast_id, return_id, FLAG_polymorphic_inlining);
- if (load == NULL) {
- if (HasStackOverflow()) return;
- } else {
- if (!load->IsLinked()) {
- AddInstruction(load);
- }
- if (!ast_context()->IsEffect()) Push(load);
+ PropertyAccessInfo info(this, access_type, ToType(types->at(i)), name);
+ if (info.type()->Is(Type::String())) {
+ if (handled_string) continue;
+ handled_string = true;
+ }
+ if (info.CanAccessMonomorphic()) {
+ count++;
+ if (info.type()->Is(Type::Number())) {
+ handle_smi = true;
+ break;
}
-
- if (current_block() != NULL) Goto(join);
- set_current_block(if_false);
}
}
- // Finish up. Unconditionally deoptimize if we've handled all the maps we
- // know about and do not want to handle ones we've never seen. Otherwise
- // use a generic IC.
- if (count == types->length() && FLAG_deoptimize_uncommon_cases) {
- // Because the deopt may be the only path in the polymorphic load, make sure
- // that the environment stack matches the depth on deopt that it otherwise
- // would have had after a successful load.
- if (!ast_context()->IsEffect()) Push(graph()->GetConstant0());
- FinishExitWithHardDeoptimization("Unknown map in polymorphic load", join);
- } else {
- HInstruction* load = Add<HLoadNamedGeneric>(object, name);
- if (!ast_context()->IsEffect()) Push(load);
+ count = 0;
+ HControlInstruction* smi_check = NULL;
+ handled_string = false;
- if (join != NULL) {
- Goto(join);
- } else {
- Add<HSimulate>(ast_id, REMOVABLE_SIMULATE);
- if (!ast_context()->IsEffect()) ast_context()->ReturnValue(Pop());
- return;
+ for (int i = 0; i < types->length() && count < kMaxLoadPolymorphism; ++i) {
+ PropertyAccessInfo info(this, access_type, ToType(types->at(i)), name);
+ if (info.type()->Is(Type::String())) {
+ if (handled_string) continue;
+ handled_string = true;
}
- }
-
- ASSERT(join != NULL);
- join->SetJoinId(ast_id);
- set_current_block(join);
- if (!ast_context()->IsEffect()) ast_context()->ReturnValue(Pop());
-}
-
-
-bool HOptimizedGraphBuilder::TryStorePolymorphicAsMonomorphic(
- BailoutId assignment_id,
- HValue* object,
- HValue* value,
- SmallMapList* types,
- Handle<String> name) {
- // Use monomorphic store if property lookup results in the same field index
- // for all maps. Requires special map check on the set of all handled maps.
- if (types->length() > kMaxStorePolymorphism) return false;
-
- LookupResult lookup(isolate());
- int count;
- Representation representation = Representation::None();
- HObjectAccess access = HObjectAccess::ForMap(); // initial value unused.
- for (count = 0; count < types->length(); ++count) {
- Handle<Map> map = types->at(count);
- // Pass false to ignore transitions.
- if (!ComputeStoreField(map, name, &lookup, false)) break;
- ASSERT(!map->is_observed());
-
- HObjectAccess new_access = HObjectAccess::ForField(map, &lookup, name);
- Representation new_representation = new_access.representation();
+ if (!info.CanAccessMonomorphic()) continue;
if (count == 0) {
- // First time through the loop; set access and representation.
- access = new_access;
- representation = new_representation;
- } else if (!representation.IsCompatibleForStore(new_representation)) {
- // Representations did not match.
- break;
- } else if (access.offset() != new_access.offset()) {
- // Offsets did not match.
- break;
- } else if (access.IsInobject() != new_access.IsInobject()) {
- // In-objectness did not match.
- break;
+ join = graph()->CreateBasicBlock();
+ if (handle_smi) {
+ HBasicBlock* empty_smi_block = graph()->CreateBasicBlock();
+ HBasicBlock* not_smi_block = graph()->CreateBasicBlock();
+ number_block = graph()->CreateBasicBlock();
+ smi_check = New<HIsSmiAndBranch>(
+ object, empty_smi_block, not_smi_block);
+ FinishCurrentBlock(smi_check);
+ GotoNoSimulate(empty_smi_block, number_block);
+ set_current_block(not_smi_block);
+ } else {
+ BuildCheckHeapObject(object);
+ }
}
- }
+ ++count;
+ HBasicBlock* if_true = graph()->CreateBasicBlock();
+ HBasicBlock* if_false = graph()->CreateBasicBlock();
+ HUnaryControlInstruction* compare;
- if (count != types->length()) return false;
+ HValue* dependency;
+ if (info.type()->Is(Type::Number())) {
+ Handle<Map> heap_number_map = isolate()->factory()->heap_number_map();
+ compare = New<HCompareMap>(object, heap_number_map, if_true, if_false);
+ dependency = smi_check;
+ } else if (info.type()->Is(Type::String())) {
+ compare = New<HIsStringAndBranch>(object, if_true, if_false);
+ dependency = compare;
+ } else {
+ compare = New<HCompareMap>(object, info.map(), if_true, if_false);
+ dependency = compare;
+ }
+ FinishCurrentBlock(compare);
- // Everything matched; can use monomorphic store.
- BuildCheckHeapObject(object);
- HCheckMaps* checked_object = Add<HCheckMaps>(object, types);
- HInstruction* store;
- CHECK_ALIVE_OR_RETURN(
- store = BuildStoreNamedField(
- checked_object, name, value, types->at(count - 1), &lookup),
- true);
- if (!ast_context()->IsEffect()) Push(value);
- AddInstruction(store);
- Add<HSimulate>(assignment_id);
- if (!ast_context()->IsEffect()) Drop(1);
- ast_context()->ReturnValue(value);
- return true;
-}
+ if (info.type()->Is(Type::Number())) {
+ GotoNoSimulate(if_true, number_block);
+ if_true = number_block;
+ }
+ set_current_block(if_true);
-void HOptimizedGraphBuilder::HandlePolymorphicStoreNamedField(
- BailoutId assignment_id,
- HValue* object,
- HValue* value,
- SmallMapList* types,
- Handle<String> name) {
- if (TryStorePolymorphicAsMonomorphic(
- assignment_id, object, value, types, name)) {
- return;
- }
+ HInstruction* access = BuildMonomorphicAccess(
+ &info, object, dependency, value, ast_id,
+ return_id, FLAG_polymorphic_inlining);
- // TODO(ager): We should recognize when the prototype chains for different
- // maps are identical. In that case we can avoid repeatedly generating the
- // same prototype map checks.
- int count = 0;
- HBasicBlock* join = NULL;
- for (int i = 0; i < types->length() && count < kMaxStorePolymorphism; ++i) {
- Handle<Map> map = types->at(i);
- LookupResult lookup(isolate());
- if (ComputeStoreField(map, name, &lookup)) {
- if (count == 0) {
- BuildCheckHeapObject(object);
- join = graph()->CreateBasicBlock();
- }
- ++count;
- HBasicBlock* if_true = graph()->CreateBasicBlock();
- HBasicBlock* if_false = graph()->CreateBasicBlock();
- HCompareMap* compare = New<HCompareMap>(object, map, if_true, if_false);
- FinishCurrentBlock(compare);
-
- set_current_block(if_true);
- HInstruction* instr;
- CHECK_ALIVE(instr = BuildStoreNamedField(
- compare, name, value, map, &lookup));
- // Goto will add the HSimulate for the store.
- AddInstruction(instr);
- if (!ast_context()->IsEffect()) Push(value);
- Goto(join);
+ HValue* result = NULL;
+ switch (access_type) {
+ case LOAD:
+ result = access;
+ break;
+ case STORE:
+ result = value;
+ break;
+ }
- set_current_block(if_false);
+ if (access == NULL) {
+ if (HasStackOverflow()) return;
+ } else {
+ if (!access->IsLinked()) AddInstruction(access);
+ if (!ast_context()->IsEffect()) Push(result);
}
+
+ if (current_block() != NULL) Goto(join);
+ set_current_block(if_false);
}
// Finish up. Unconditionally deoptimize if we've handled all the maps we
// know about and do not want to handle ones we've never seen. Otherwise
// use a generic IC.
if (count == types->length() && FLAG_deoptimize_uncommon_cases) {
- FinishExitWithHardDeoptimization("Unknown map in polymorphic store", join);
+ FinishExitWithHardDeoptimization("Uknown map in polymorphic access");
} else {
- HInstruction* instr = BuildStoreNamedGeneric(object, name, value);
+ HInstruction* instr = BuildNamedGeneric(access_type, object, name, value);
AddInstruction(instr);
+ if (!ast_context()->IsEffect()) Push(access_type == LOAD ? instr : value);
if (join != NULL) {
- if (!ast_context()->IsEffect()) {
- Push(value);
- }
Goto(join);
} else {
- // The HSimulate for the store should not see the stored value in
- // effect contexts (it is not materialized at expr->id() in the
- // unoptimized code).
- if (instr->HasObservableSideEffects()) {
- if (ast_context()->IsEffect()) {
- Add<HSimulate>(assignment_id, REMOVABLE_SIMULATE);
- } else {
- Push(value);
- Add<HSimulate>(assignment_id, REMOVABLE_SIMULATE);
- Drop(1);
- }
- }
- return ast_context()->ReturnValue(value);
+ Add<HSimulate>(ast_id, REMOVABLE_SIMULATE);
+ if (!ast_context()->IsEffect()) ast_context()->ReturnValue(Pop());
+ return;
}
}
ASSERT(join != NULL);
- join->SetJoinId(assignment_id);
- set_current_block(join);
- if (!ast_context()->IsEffect()) {
- ast_context()->ReturnValue(Pop());
+ if (join->HasPredecessor()) {
+ join->SetJoinId(ast_id);
+ set_current_block(join);
+ if (!ast_context()->IsEffect()) ast_context()->ReturnValue(Pop());
+ } else {
+ set_current_block(NULL);
}
}
static bool ComputeReceiverTypes(Expression* expr,
HValue* receiver,
- SmallMapList** t) {
+ SmallMapList** t,
+ Zone* zone) {
SmallMapList* types = expr->GetReceiverTypes();
*t = types;
bool monomorphic = expr->IsMonomorphic();
@@ -5724,7 +6282,16 @@ static bool ComputeReceiverTypes(Expression* expr,
types->FilterForPossibleTransitions(root_map);
monomorphic = types->length() == 1;
}
- return monomorphic && CanInlinePropertyAccess(*types->first());
+ return monomorphic && CanInlinePropertyAccess(
+ IC::MapToType<Type>(types->first(), zone));
+}
+
+
+static bool AreStringTypes(SmallMapList* types) {
+ for (int i = 0; i < types->length(); i++) {
+ if (types->at(i)->instance_type() >= FIRST_NONSTRING_TYPE) return false;
+ }
+ return true;
}
@@ -5733,16 +6300,14 @@ void HOptimizedGraphBuilder::BuildStore(Expression* expr,
BailoutId ast_id,
BailoutId return_id,
bool is_uninitialized) {
- HValue* value = environment()->ExpressionStackAt(0);
-
if (!prop->key()->IsPropertyName()) {
// Keyed store.
+ HValue* value = environment()->ExpressionStackAt(0);
HValue* key = environment()->ExpressionStackAt(1);
HValue* object = environment()->ExpressionStackAt(2);
bool has_side_effects = false;
HandleKeyedElementAccess(object, key, value, expr,
- true, // is_store
- &has_side_effects);
+ STORE, &has_side_effects);
Drop(3);
Push(value);
Add<HSimulate>(return_id, REMOVABLE_SIMULATE);
@@ -5750,50 +6315,16 @@ void HOptimizedGraphBuilder::BuildStore(Expression* expr,
}
// Named store.
- HValue* object = environment()->ExpressionStackAt(1);
-
- if (is_uninitialized) {
- Add<HDeoptimize>("Insufficient type feedback for property assignment",
- Deoptimizer::SOFT);
- }
+ HValue* value = Pop();
+ HValue* object = Pop();
Literal* key = prop->key()->AsLiteral();
Handle<String> name = Handle<String>::cast(key->value());
ASSERT(!name.is_null());
- HInstruction* instr = NULL;
-
- SmallMapList* types;
- bool monomorphic = ComputeReceiverTypes(expr, object, &types);
-
- if (monomorphic) {
- Handle<Map> map = types->first();
- Handle<JSFunction> setter;
- Handle<JSObject> holder;
- if (LookupSetter(map, name, &setter, &holder)) {
- AddCheckConstantFunction(holder, object, map);
- if (FLAG_inline_accessors &&
- TryInlineSetter(setter, ast_id, return_id, value)) {
- return;
- }
- Drop(2);
- Add<HPushArgument>(object);
- Add<HPushArgument>(value);
- instr = New<HCallConstantFunction>(setter, 2);
- } else {
- Drop(2);
- CHECK_ALIVE(instr = BuildStoreNamedMonomorphic(object,
- name,
- value,
- map));
- }
- } else if (types != NULL && types->length() > 1) {
- Drop(2);
- return HandlePolymorphicStoreNamedField(ast_id, object, value, types, name);
- } else {
- Drop(2);
- instr = BuildStoreNamedGeneric(object, name, value);
- }
+ HInstruction* instr = BuildNamedAccess(STORE, ast_id, return_id, expr,
+ object, name, value, is_uninitialized);
+ if (instr == NULL) return;
if (!ast_context()->IsEffect()) Push(value);
AddInstruction(instr);
@@ -5826,23 +6357,32 @@ void HOptimizedGraphBuilder::HandleGlobalVariableAssignment(
HValue* value,
BailoutId ast_id) {
LookupResult lookup(isolate());
- GlobalPropertyAccess type = LookupGlobalProperty(var, &lookup, true);
+ GlobalPropertyAccess type = LookupGlobalProperty(var, &lookup, STORE);
if (type == kUseCell) {
Handle<GlobalObject> global(current_info()->global_object());
Handle<PropertyCell> cell(global->GetPropertyCell(&lookup));
if (cell->type()->IsConstant()) {
- IfBuilder builder(this);
- HValue* constant = Add<HConstant>(cell->type()->AsConstant());
- if (cell->type()->AsConstant()->IsNumber()) {
- builder.If<HCompareNumericAndBranch>(value, constant, Token::EQ);
+ Handle<Object> constant = cell->type()->AsConstant()->Value();
+ if (value->IsConstant()) {
+ HConstant* c_value = HConstant::cast(value);
+ if (!constant.is_identical_to(c_value->handle(isolate()))) {
+ Add<HDeoptimize>("Constant global variable assignment",
+ Deoptimizer::EAGER);
+ }
} else {
- builder.If<HCompareObjectEqAndBranch>(value, constant);
+ HValue* c_constant = Add<HConstant>(constant);
+ IfBuilder builder(this);
+ if (constant->IsNumber()) {
+ builder.If<HCompareNumericAndBranch>(value, c_constant, Token::EQ);
+ } else {
+ builder.If<HCompareObjectEqAndBranch>(value, c_constant);
+ }
+ builder.Then();
+ builder.Else();
+ Add<HDeoptimize>("Constant global variable assignment",
+ Deoptimizer::EAGER);
+ builder.End();
}
- builder.Then();
- builder.Else();
- Add<HDeoptimize>("Constant global variable assignment",
- Deoptimizer::EAGER);
- builder.End();
}
HInstruction* instr =
Add<HStoreGlobalCell>(value, cell, lookup.GetPropertyDetails());
@@ -5850,10 +6390,12 @@ void HOptimizedGraphBuilder::HandleGlobalVariableAssignment(
Add<HSimulate>(ast_id, REMOVABLE_SIMULATE);
}
} else {
- HGlobalObject* global_object = Add<HGlobalObject>();
- HStoreGlobalGeneric* instr =
- Add<HStoreGlobalGeneric>(global_object, var->name(),
- value, function_strict_mode_flag());
+ HValue* global_object = Add<HLoadNamedField>(
+ context(), static_cast<HValue*>(NULL),
+ HObjectAccess::ForContextSlot(Context::GLOBAL_OBJECT_INDEX));
+ HStoreNamedGeneric* instr =
+ Add<HStoreNamedGeneric>(global_object, var->name(),
+ value, function_strict_mode());
USE(instr);
ASSERT(instr->HasObservableSideEffects());
Add<HSimulate>(ast_id, REMOVABLE_SIMULATE);
@@ -5888,7 +6430,7 @@ void HOptimizedGraphBuilder::HandleCompoundAssignment(Assignment* expr) {
case Variable::PARAMETER:
case Variable::LOCAL:
- if (var->mode() == CONST) {
+ if (var->mode() == CONST_LEGACY) {
return Bailout(kUnsupportedConstCompoundAssignment);
}
BindIfLive(var, Top());
@@ -5917,11 +6459,11 @@ void HOptimizedGraphBuilder::HandleCompoundAssignment(Assignment* expr) {
mode = HStoreContextSlot::kCheckDeoptimize;
break;
case CONST:
- return ast_context()->ReturnValue(Pop());
- case CONST_HARMONY:
// This case is checked statically so no need to
// perform checks here
UNREACHABLE();
+ case CONST_LEGACY:
+ return ast_context()->ReturnValue(Pop());
default:
mode = HStoreContextSlot::kNoCheck;
}
@@ -5956,7 +6498,8 @@ void HOptimizedGraphBuilder::HandleCompoundAssignment(Assignment* expr) {
HValue* right = Pop();
HValue* left = Pop();
- Push(BuildBinaryOperation(operation, left, right));
+ Push(BuildBinaryOperation(operation, left, right, PUSH_BEFORE_SIMULATE));
+
BuildStore(expr, prop, expr->id(),
expr->AssignmentId(), expr->IsUninitialized());
} else {
@@ -5985,6 +6528,10 @@ void HOptimizedGraphBuilder::VisitAssignment(Assignment* expr) {
if (var->mode() == CONST) {
if (expr->op() != Token::INIT_CONST) {
+ return Bailout(kNonInitializerAssignmentToConst);
+ }
+ } else if (var->mode() == CONST_LEGACY) {
+ if (expr->op() != Token::INIT_CONST_LEGACY) {
CHECK_ALIVE(VisitForValue(expr->value()));
return ast_context()->ReturnValue(Pop());
}
@@ -5995,10 +6542,6 @@ void HOptimizedGraphBuilder::VisitAssignment(Assignment* expr) {
HValue* old_value = environment()->Lookup(var);
Add<HUseConst>(old_value);
}
- } else if (var->mode() == CONST_HARMONY) {
- if (expr->op() != Token::INIT_CONST_HARMONY) {
- return Bailout(kNonInitializerAssignmentToConst);
- }
}
if (proxy->IsArguments()) return Bailout(kAssignmentToArguments);
@@ -6054,20 +6597,20 @@ void HOptimizedGraphBuilder::VisitAssignment(Assignment* expr) {
mode = HStoreContextSlot::kCheckDeoptimize;
break;
case CONST:
- return ast_context()->ReturnValue(Pop());
- case CONST_HARMONY:
// This case is checked statically so no need to
// perform checks here
UNREACHABLE();
+ case CONST_LEGACY:
+ return ast_context()->ReturnValue(Pop());
default:
mode = HStoreContextSlot::kNoCheck;
}
} else if (expr->op() == Token::INIT_VAR ||
expr->op() == Token::INIT_LET ||
- expr->op() == Token::INIT_CONST_HARMONY) {
+ expr->op() == Token::INIT_CONST) {
mode = HStoreContextSlot::kNoCheck;
} else {
- ASSERT(expr->op() == Token::INIT_CONST);
+ ASSERT(expr->op() == Token::INIT_CONST_LEGACY);
mode = HStoreContextSlot::kCheckIgnoreAssignment;
}
@@ -6107,8 +6650,10 @@ void HOptimizedGraphBuilder::VisitThrow(Throw* expr) {
CHECK_ALIVE(VisitForValue(expr->exception()));
HValue* value = environment()->Pop();
- if (!FLAG_emit_opt_code_positions) SetSourcePosition(expr->position());
- Add<HThrow>(value);
+ if (!FLAG_hydrogen_track_positions) SetSourcePosition(expr->position());
+ Add<HPushArguments>(value);
+ Add<HCallRuntime>(isolate()->factory()->empty_string(),
+ Runtime::FunctionForId(Runtime::kHiddenThrow), 1);
Add<HSimulate>(expr->id());
// If the throw definitely exits the function, we can finish with a dummy
@@ -6120,55 +6665,61 @@ void HOptimizedGraphBuilder::VisitThrow(Throw* expr) {
}
-HLoadNamedField* HGraphBuilder::BuildLoadNamedField(HValue* object,
- HObjectAccess access) {
- if (FLAG_track_double_fields && access.representation().IsDouble()) {
- // load the heap number
- HLoadNamedField* heap_number = Add<HLoadNamedField>(
- object, access.WithRepresentation(Representation::Tagged()));
- heap_number->set_type(HType::HeapNumber());
- // load the double value from it
- return New<HLoadNamedField>(
- heap_number, HObjectAccess::ForHeapNumberValue());
+HInstruction* HGraphBuilder::AddLoadStringInstanceType(HValue* string) {
+ if (string->IsConstant()) {
+ HConstant* c_string = HConstant::cast(string);
+ if (c_string->HasStringValue()) {
+ return Add<HConstant>(c_string->StringValue()->map()->instance_type());
+ }
}
- return New<HLoadNamedField>(object, access);
-}
-
-
-HInstruction* HGraphBuilder::AddLoadNamedField(HValue* object,
- HObjectAccess access) {
- return AddInstruction(BuildLoadNamedField(object, access));
+ return Add<HLoadNamedField>(
+ Add<HLoadNamedField>(string, static_cast<HValue*>(NULL),
+ HObjectAccess::ForMap()),
+ static_cast<HValue*>(NULL), HObjectAccess::ForMapInstanceType());
}
-HInstruction* HGraphBuilder::BuildLoadStringLength(HValue* object,
- HValue* checked_string) {
- if (FLAG_fold_constants && object->IsConstant()) {
- HConstant* constant = HConstant::cast(object);
- if (constant->HasStringValue()) {
- return New<HConstant>(constant->StringValue()->length());
+HInstruction* HGraphBuilder::AddLoadStringLength(HValue* string) {
+ if (string->IsConstant()) {
+ HConstant* c_string = HConstant::cast(string);
+ if (c_string->HasStringValue()) {
+ return Add<HConstant>(c_string->StringValue()->length());
}
}
- return BuildLoadNamedField(checked_string, HObjectAccess::ForStringLength());
+ return Add<HLoadNamedField>(string, static_cast<HValue*>(NULL),
+ HObjectAccess::ForStringLength());
}
-HInstruction* HOptimizedGraphBuilder::BuildLoadNamedGeneric(
+HInstruction* HOptimizedGraphBuilder::BuildNamedGeneric(
+ PropertyAccessType access_type,
HValue* object,
Handle<String> name,
- Property* expr) {
- if (expr->IsUninitialized()) {
- Add<HDeoptimize>("Insufficient type feedback for generic named load",
+ HValue* value,
+ bool is_uninitialized) {
+ if (is_uninitialized) {
+ Add<HDeoptimize>("Insufficient type feedback for generic named access",
Deoptimizer::SOFT);
}
- return New<HLoadNamedGeneric>(object, name);
+ if (access_type == LOAD) {
+ return New<HLoadNamedGeneric>(object, name);
+ } else {
+ return New<HStoreNamedGeneric>(object, name, value, function_strict_mode());
+ }
}
-HInstruction* HOptimizedGraphBuilder::BuildLoadKeyedGeneric(HValue* object,
- HValue* key) {
- return New<HLoadKeyedGeneric>(object, key);
+HInstruction* HOptimizedGraphBuilder::BuildKeyedGeneric(
+ PropertyAccessType access_type,
+ HValue* object,
+ HValue* key,
+ HValue* value) {
+ if (access_type == LOAD) {
+ return New<HLoadKeyedGeneric>(object, key);
+ } else {
+ return New<HStoreKeyedGeneric>(object, key, value, function_strict_mode());
+ }
}
@@ -6194,24 +6745,22 @@ HInstruction* HOptimizedGraphBuilder::BuildMonomorphicElementAccess(
HValue* val,
HValue* dependency,
Handle<Map> map,
- bool is_store,
+ PropertyAccessType access_type,
KeyedAccessStoreMode store_mode) {
- HCheckMaps* checked_object = Add<HCheckMaps>(object, map, top_info(),
- dependency);
+ HCheckMaps* checked_object = Add<HCheckMaps>(object, map, dependency);
if (dependency) {
- checked_object->ClearGVNFlag(kDependsOnElementsKind);
+ checked_object->ClearDependsOnFlag(kElementsKind);
}
- if (is_store && map->prototype()->IsJSObject()) {
+ if (access_type == STORE && map->prototype()->IsJSObject()) {
// monomorphic stores need a prototype chain check because shape
// changes could allow callbacks on elements in the chain that
// aren't compatible with monomorphic keyed stores.
Handle<JSObject> prototype(JSObject::cast(map->prototype()));
- Object* holder = map->prototype();
- while (holder->GetPrototype(isolate())->IsJSObject()) {
- holder = holder->GetPrototype(isolate());
+ JSObject* holder = JSObject::cast(map->prototype());
+ while (!holder->GetPrototype()->IsNull()) {
+ holder = JSObject::cast(holder->GetPrototype());
}
- ASSERT(holder->GetPrototype(isolate())->IsNull());
BuildCheckPrototypeMaps(prototype,
Handle<JSObject>(JSObject::cast(holder)));
@@ -6221,7 +6770,7 @@ HInstruction* HOptimizedGraphBuilder::BuildMonomorphicElementAccess(
return BuildUncheckedMonomorphicElementAccess(
checked_object, key, val,
map->instance_type() == JS_ARRAY_TYPE,
- map->elements_kind(), is_store,
+ map->elements_kind(), access_type,
load_mode, store_mode);
}
@@ -6287,7 +6836,7 @@ HInstruction* HOptimizedGraphBuilder::TryBuildConsolidatedElementLoad(
checked_object, key, val,
most_general_consolidated_map->instance_type() == JS_ARRAY_TYPE,
consolidated_elements_kind,
- false, NEVER_RETURN_HOLE, STANDARD_STORE);
+ LOAD, NEVER_RETURN_HOLE, STANDARD_STORE);
return instr;
}
@@ -6297,13 +6846,13 @@ HValue* HOptimizedGraphBuilder::HandlePolymorphicElementAccess(
HValue* key,
HValue* val,
SmallMapList* maps,
- bool is_store,
+ PropertyAccessType access_type,
KeyedAccessStoreMode store_mode,
bool* has_side_effects) {
*has_side_effects = false;
BuildCheckHeapObject(object);
- if (!is_store) {
+ if (access_type == LOAD) {
HInstruction* consolidated_load =
TryBuildConsolidatedElementLoad(object, key, val, maps);
if (consolidated_load != NULL) {
@@ -6323,6 +6872,11 @@ HValue* HOptimizedGraphBuilder::HandlePolymorphicElementAccess(
elements_kind != GetInitialFastElementsKind()) {
possible_transitioned_maps.Add(map);
}
+ if (elements_kind == SLOPPY_ARGUMENTS_ELEMENTS) {
+ HInstruction* result = BuildKeyedGeneric(access_type, object, key, val);
+ *has_side_effects = result->HasObservableSideEffects();
+ return AddInstruction(result);
+ }
}
// Get transition target for each map (NULL == no transition).
for (int i = 0; i < maps->length(); ++i) {
@@ -6356,15 +6910,14 @@ HValue* HOptimizedGraphBuilder::HandlePolymorphicElementAccess(
HInstruction* instr = NULL;
if (untransitionable_map->has_slow_elements_kind() ||
!untransitionable_map->IsJSObjectMap()) {
- instr = AddInstruction(is_store ? BuildStoreKeyedGeneric(object, key, val)
- : BuildLoadKeyedGeneric(object, key));
+ instr = AddInstruction(BuildKeyedGeneric(access_type, object, key, val));
} else {
instr = BuildMonomorphicElementAccess(
- object, key, val, transition, untransitionable_map, is_store,
+ object, key, val, transition, untransitionable_map, access_type,
store_mode);
}
*has_side_effects |= instr->HasObservableSideEffects();
- return is_store ? NULL : instr;
+ return access_type == STORE ? NULL : instr;
}
HBasicBlock* join = graph()->CreateBasicBlock();
@@ -6382,25 +6935,24 @@ HValue* HOptimizedGraphBuilder::HandlePolymorphicElementAccess(
set_current_block(this_map);
HInstruction* access = NULL;
if (IsDictionaryElementsKind(elements_kind)) {
- access = is_store
- ? AddInstruction(BuildStoreKeyedGeneric(object, key, val))
- : AddInstruction(BuildLoadKeyedGeneric(object, key));
+ access = AddInstruction(BuildKeyedGeneric(access_type, object, key, val));
} else {
ASSERT(IsFastElementsKind(elements_kind) ||
- IsExternalArrayElementsKind(elements_kind));
+ IsExternalArrayElementsKind(elements_kind) ||
+ IsFixedTypedArrayElementsKind(elements_kind));
LoadKeyedHoleMode load_mode = BuildKeyedHoleMode(map);
// Happily, mapcompare is a checked object.
access = BuildUncheckedMonomorphicElementAccess(
mapcompare, key, val,
map->instance_type() == JS_ARRAY_TYPE,
- elements_kind, is_store,
+ elements_kind, access_type,
load_mode,
store_mode);
}
*has_side_effects |= access->HasObservableSideEffects();
// The caller will use has_side_effects and add a correct Simulate.
access->SetFlag(HValue::kHasNoObservableSideEffects);
- if (!is_store) {
+ if (access_type == LOAD) {
Push(access);
}
NoObservableSideEffectsScope scope(this);
@@ -6408,12 +6960,16 @@ HValue* HOptimizedGraphBuilder::HandlePolymorphicElementAccess(
set_current_block(other_map);
}
+ // Ensure that we visited at least one map above that goes to join. This is
+ // necessary because FinishExitWithHardDeoptimization does an AbnormalExit
+ // rather than joining the join block. If this becomes an issue, insert a
+ // generic access in the case length() == 0.
+ ASSERT(join->predecessors()->length() > 0);
// Deopt if none of the cases matched.
NoObservableSideEffectsScope scope(this);
- FinishExitWithHardDeoptimization("Unknown map in polymorphic element access",
- join);
+ FinishExitWithHardDeoptimization("Unknown map in polymorphic element access");
set_current_block(join);
- return is_store ? NULL : Pop();
+ return access_type == STORE ? NULL : Pop();
}
@@ -6422,16 +6978,17 @@ HValue* HOptimizedGraphBuilder::HandleKeyedElementAccess(
HValue* key,
HValue* val,
Expression* expr,
- bool is_store,
+ PropertyAccessType access_type,
bool* has_side_effects) {
ASSERT(!expr->IsPropertyName());
HInstruction* instr = NULL;
SmallMapList* types;
- bool monomorphic = ComputeReceiverTypes(expr, obj, &types);
+ bool monomorphic = ComputeReceiverTypes(expr, obj, &types, zone());
bool force_generic = false;
- if (is_store && (monomorphic || (types != NULL && !types->is_empty()))) {
+ if (access_type == STORE &&
+ (monomorphic || (types != NULL && !types->is_empty()))) {
// Stores can't be mono/polymorphic if their prototype chain has dictionary
// elements. However a receiver map that has dictionary elements itself
// should be left to normal mono/poly behavior (the other maps may benefit
@@ -6448,53 +7005,37 @@ HValue* HOptimizedGraphBuilder::HandleKeyedElementAccess(
if (monomorphic) {
Handle<Map> map = types->first();
- if (map->has_slow_elements_kind()) {
- instr = is_store ? BuildStoreKeyedGeneric(obj, key, val)
- : BuildLoadKeyedGeneric(obj, key);
- AddInstruction(instr);
+ if (map->has_slow_elements_kind() || !map->IsJSObjectMap()) {
+ instr = AddInstruction(BuildKeyedGeneric(access_type, obj, key, val));
} else {
BuildCheckHeapObject(obj);
instr = BuildMonomorphicElementAccess(
- obj, key, val, NULL, map, is_store, expr->GetStoreMode());
+ obj, key, val, NULL, map, access_type, expr->GetStoreMode());
}
} else if (!force_generic && (types != NULL && !types->is_empty())) {
return HandlePolymorphicElementAccess(
- obj, key, val, types, is_store,
+ obj, key, val, types, access_type,
expr->GetStoreMode(), has_side_effects);
} else {
- if (is_store) {
+ if (access_type == STORE) {
if (expr->IsAssignment() &&
expr->AsAssignment()->HasNoTypeInformation()) {
Add<HDeoptimize>("Insufficient type feedback for keyed store",
Deoptimizer::SOFT);
}
- instr = BuildStoreKeyedGeneric(obj, key, val);
} else {
if (expr->AsProperty()->HasNoTypeInformation()) {
Add<HDeoptimize>("Insufficient type feedback for keyed load",
Deoptimizer::SOFT);
}
- instr = BuildLoadKeyedGeneric(obj, key);
}
- AddInstruction(instr);
+ instr = AddInstruction(BuildKeyedGeneric(access_type, obj, key, val));
}
*has_side_effects = instr->HasObservableSideEffects();
return instr;
}
-HInstruction* HOptimizedGraphBuilder::BuildStoreKeyedGeneric(
- HValue* object,
- HValue* key,
- HValue* value) {
- return New<HStoreKeyedGeneric>(
- object,
- key,
- value,
- function_strict_mode_flag());
-}
-
-
void HOptimizedGraphBuilder::EnsureArgumentsArePushedForAccess() {
// Outermost function already has arguments on the stack.
if (function_state()->outer() == NULL) return;
@@ -6511,7 +7052,7 @@ void HOptimizedGraphBuilder::EnsureArgumentsArePushedForAccess() {
HInstruction* insert_after = entry;
for (int i = 0; i < arguments_values->length(); i++) {
HValue* argument = arguments_values->at(i);
- HInstruction* push_argument = New<HPushArgument>(argument);
+ HInstruction* push_argument = New<HPushArguments>(argument);
push_argument->InsertAfter(insert_after);
insert_after = push_argument;
}
@@ -6572,6 +7113,45 @@ bool HOptimizedGraphBuilder::TryArgumentsAccess(Property* expr) {
}
+HInstruction* HOptimizedGraphBuilder::BuildNamedAccess(
+ PropertyAccessType access,
+ BailoutId ast_id,
+ BailoutId return_id,
+ Expression* expr,
+ HValue* object,
+ Handle<String> name,
+ HValue* value,
+ bool is_uninitialized) {
+ SmallMapList* types;
+ ComputeReceiverTypes(expr, object, &types, zone());
+ ASSERT(types != NULL);
+
+ if (types->length() > 0) {
+ PropertyAccessInfo info(this, access, ToType(types->first()), name);
+ if (!info.CanAccessAsMonomorphic(types)) {
+ HandlePolymorphicNamedFieldAccess(
+ access, ast_id, return_id, object, value, types, name);
+ return NULL;
+ }
+
+ HValue* checked_object;
+ // Type::Number() is only supported by polymorphic load/call handling.
+ ASSERT(!info.type()->Is(Type::Number()));
+ BuildCheckHeapObject(object);
+ if (AreStringTypes(types)) {
+ checked_object =
+ Add<HCheckInstanceType>(object, HCheckInstanceType::IS_STRING);
+ } else {
+ checked_object = Add<HCheckMaps>(object, types);
+ }
+ return BuildMonomorphicAccess(
+ &info, object, checked_object, value, ast_id, return_id);
+ }
+
+ return BuildNamedGeneric(access, object, name, value, is_uninitialized);
+}
+
+
void HOptimizedGraphBuilder::PushLoad(Property* expr,
HValue* object,
HValue* key) {
@@ -6582,14 +7162,6 @@ void HOptimizedGraphBuilder::PushLoad(Property* expr,
}
-static bool AreStringTypes(SmallMapList* types) {
- for (int i = 0; i < types->length(); i++) {
- if (types->at(i)->instance_type() >= FIRST_NONSTRING_TYPE) return false;
- }
- return true;
-}
-
-
void HOptimizedGraphBuilder::BuildLoad(Property* expr,
BailoutId ast_id) {
HInstruction* instr = NULL;
@@ -6609,32 +7181,10 @@ void HOptimizedGraphBuilder::BuildLoad(Property* expr,
Handle<String> name = expr->key()->AsLiteral()->AsPropertyName();
HValue* object = Pop();
- SmallMapList* types;
- ComputeReceiverTypes(expr, object, &types);
- ASSERT(types != NULL);
-
- if (types->length() > 0) {
- PropertyAccessInfo info(isolate(), types->first(), name);
- if (!info.CanLoadAsMonomorphic(types)) {
- return HandlePolymorphicLoadNamedField(
- ast_id, expr->LoadId(), object, types, name);
- }
-
- BuildCheckHeapObject(object);
- HInstruction* checked_object;
- if (AreStringTypes(types)) {
- checked_object =
- Add<HCheckInstanceType>(object, HCheckInstanceType::IS_STRING);
- } else {
- checked_object = Add<HCheckMaps>(object, types);
- }
- instr = BuildLoadMonomorphic(
- &info, object, checked_object, ast_id, expr->LoadId());
- if (instr == NULL) return;
- if (instr->IsLinked()) return ast_context()->ReturnValue(instr);
- } else {
- instr = BuildLoadNamedGeneric(object, name, expr);
- }
+ instr = BuildNamedAccess(LOAD, ast_id, expr->LoadId(), expr,
+ object, name, NULL, expr->IsUninitialized());
+ if (instr == NULL) return;
+ if (instr->IsLinked()) return ast_context()->ReturnValue(instr);
} else {
HValue* key = Pop();
@@ -6642,9 +7192,7 @@ void HOptimizedGraphBuilder::BuildLoad(Property* expr,
bool has_side_effects = false;
HValue* load = HandleKeyedElementAccess(
- obj, key, NULL, expr,
- false, // is_store
- &has_side_effects);
+ obj, key, NULL, expr, LOAD, &has_side_effects);
if (has_side_effects) {
if (ast_context()->IsEffect()) {
Add<HSimulate>(ast_id, REMOVABLE_SIMULATE);
@@ -6677,34 +7225,24 @@ void HOptimizedGraphBuilder::VisitProperty(Property* expr) {
}
-HInstruction* HGraphBuilder::BuildConstantMapCheck(Handle<JSObject> constant,
- CompilationInfo* info) {
- HConstant* constant_value = New<HConstant>(constant);
-
- if (constant->map()->CanOmitMapChecks()) {
- constant->map()->AddDependentCompilationInfo(
- DependentCode::kPrototypeCheckGroup, info);
- return constant_value;
- }
-
- AddInstruction(constant_value);
- HCheckMaps* check =
- Add<HCheckMaps>(constant_value, handle(constant->map()), info);
- check->ClearGVNFlag(kDependsOnElementsKind);
+HInstruction* HGraphBuilder::BuildConstantMapCheck(Handle<JSObject> constant) {
+ HCheckMaps* check = Add<HCheckMaps>(
+ Add<HConstant>(constant), handle(constant->map()));
+ check->ClearDependsOnFlag(kElementsKind);
return check;
}
HInstruction* HGraphBuilder::BuildCheckPrototypeMaps(Handle<JSObject> prototype,
Handle<JSObject> holder) {
- while (!prototype.is_identical_to(holder)) {
- BuildConstantMapCheck(prototype, top_info());
- prototype = handle(JSObject::cast(prototype->GetPrototype()));
+ while (holder.is_null() || !prototype.is_identical_to(holder)) {
+ BuildConstantMapCheck(prototype);
+ Object* next_prototype = prototype->GetPrototype();
+ if (next_prototype->IsNull()) return NULL;
+ CHECK(next_prototype->IsJSObject());
+ prototype = handle(JSObject::cast(next_prototype));
}
-
- HInstruction* checked_object = BuildConstantMapCheck(prototype, top_info());
- if (!checked_object->IsLinked()) AddInstruction(checked_object);
- return checked_object;
+ return BuildConstantMapCheck(prototype);
}
@@ -6717,83 +7255,83 @@ void HOptimizedGraphBuilder::AddCheckPrototypeMaps(Handle<JSObject> holder,
}
-void HOptimizedGraphBuilder::AddCheckConstantFunction(
- Handle<JSObject> holder,
- HValue* receiver,
- Handle<Map> receiver_map) {
- // Constant functions have the nice property that the map will change if they
- // are overwritten. Therefore it is enough to check the map of the holder and
- // its prototypes.
- AddCheckMap(receiver, receiver_map);
- AddCheckPrototypeMaps(holder, receiver_map);
+HInstruction* HOptimizedGraphBuilder::NewPlainFunctionCall(
+ HValue* fun, int argument_count, bool pass_argument_count) {
+ return New<HCallJSFunction>(
+ fun, argument_count, pass_argument_count);
+}
+
+
+HInstruction* HOptimizedGraphBuilder::NewArgumentAdaptorCall(
+ HValue* fun, HValue* context,
+ int argument_count, HValue* expected_param_count) {
+ CallInterfaceDescriptor* descriptor =
+ isolate()->call_descriptor(Isolate::ArgumentAdaptorCall);
+
+ HValue* arity = Add<HConstant>(argument_count - 1);
+
+ HValue* op_vals[] = { fun, context, arity, expected_param_count };
+
+ Handle<Code> adaptor =
+ isolate()->builtins()->ArgumentsAdaptorTrampoline();
+ HConstant* adaptor_value = Add<HConstant>(adaptor);
+
+ return New<HCallWithDescriptor>(
+ adaptor_value, argument_count, descriptor,
+ Vector<HValue*>(op_vals, descriptor->environment_length()));
+}
+
+
+HInstruction* HOptimizedGraphBuilder::BuildCallConstantFunction(
+ Handle<JSFunction> jsfun, int argument_count) {
+ HValue* target = Add<HConstant>(jsfun);
+ // For constant functions, we try to avoid calling the
+ // argument adaptor and instead call the function directly
+ int formal_parameter_count = jsfun->shared()->formal_parameter_count();
+ bool dont_adapt_arguments =
+ (formal_parameter_count ==
+ SharedFunctionInfo::kDontAdaptArgumentsSentinel);
+ int arity = argument_count - 1;
+ bool can_invoke_directly =
+ dont_adapt_arguments || formal_parameter_count == arity;
+ if (can_invoke_directly) {
+ if (jsfun.is_identical_to(current_info()->closure())) {
+ graph()->MarkRecursive();
+ }
+ return NewPlainFunctionCall(target, argument_count, dont_adapt_arguments);
+ } else {
+ HValue* param_count_value = Add<HConstant>(formal_parameter_count);
+ HValue* context = Add<HLoadNamedField>(
+ target, static_cast<HValue*>(NULL),
+ HObjectAccess::ForFunctionContextPointer());
+ return NewArgumentAdaptorCall(target, context,
+ argument_count, param_count_value);
+ }
+ UNREACHABLE();
+ return NULL;
}
class FunctionSorter {
public:
- FunctionSorter() : index_(0), ticks_(0), ast_length_(0), src_length_(0) { }
- FunctionSorter(int index, int ticks, int ast_length, int src_length)
- : index_(index),
- ticks_(ticks),
- ast_length_(ast_length),
- src_length_(src_length) { }
+ FunctionSorter(int index = 0, int ticks = 0, int size = 0)
+ : index_(index), ticks_(ticks), size_(size) { }
int index() const { return index_; }
int ticks() const { return ticks_; }
- int ast_length() const { return ast_length_; }
- int src_length() const { return src_length_; }
+ int size() const { return size_; }
private:
int index_;
int ticks_;
- int ast_length_;
- int src_length_;
+ int size_;
};
inline bool operator<(const FunctionSorter& lhs, const FunctionSorter& rhs) {
int diff = lhs.ticks() - rhs.ticks();
if (diff != 0) return diff > 0;
- diff = lhs.ast_length() - rhs.ast_length();
- if (diff != 0) return diff < 0;
- return lhs.src_length() < rhs.src_length();
-}
-
-
-bool HOptimizedGraphBuilder::TryCallPolymorphicAsMonomorphic(
- Call* expr,
- HValue* receiver,
- SmallMapList* types,
- Handle<String> name) {
- if (types->length() > kMaxCallPolymorphism) return false;
-
- PropertyAccessInfo info(isolate(), types->at(0), name);
- if (!info.CanLoadAsMonomorphic(types)) return false;
- if (!expr->ComputeTarget(info.map(), name)) return false;
-
- BuildCheckHeapObject(receiver);
- Add<HCheckMaps>(receiver, types);
- AddCheckPrototypeMaps(expr->holder(), info.map());
- if (FLAG_trace_inlining) {
- Handle<JSFunction> caller = current_info()->closure();
- SmartArrayPointer<char> caller_name =
- caller->shared()->DebugName()->ToCString();
- PrintF("Trying to inline the polymorphic call to %s from %s\n",
- *name->ToCString(), *caller_name);
- }
-
- if (!TryInlineCall(expr)) {
- int argument_count = expr->arguments()->length() + 1; // Includes receiver.
- HCallConstantFunction* call =
- New<HCallConstantFunction>(expr->target(), argument_count);
- PreProcessCall(call);
- AddInstruction(call);
- if (!ast_context()->IsEffect()) Push(call);
- Add<HSimulate>(expr->id(), REMOVABLE_SIMULATE);
- if (!ast_context()->IsEffect()) ast_context()->ReturnValue(Pop());
- }
-
- return true;
+ return lhs.size() < rhs.size();
}
@@ -6802,47 +7340,54 @@ void HOptimizedGraphBuilder::HandlePolymorphicCallNamed(
HValue* receiver,
SmallMapList* types,
Handle<String> name) {
- if (TryCallPolymorphicAsMonomorphic(expr, receiver, types, name)) return;
-
int argument_count = expr->arguments()->length() + 1; // Includes receiver.
- HBasicBlock* join = NULL;
FunctionSorter order[kMaxCallPolymorphism];
- int ordered_functions = 0;
-
- Handle<Map> initial_string_map(
- isolate()->native_context()->string_function()->initial_map());
- Handle<Map> string_marker_map(
- JSObject::cast(initial_string_map->prototype())->map());
- Handle<Map> initial_number_map(
- isolate()->native_context()->number_function()->initial_map());
- Handle<Map> number_marker_map(
- JSObject::cast(initial_number_map->prototype())->map());
- Handle<Map> heap_number_map = isolate()->factory()->heap_number_map();
bool handle_smi = false;
+ bool handled_string = false;
+ int ordered_functions = 0;
for (int i = 0;
i < types->length() && ordered_functions < kMaxCallPolymorphism;
++i) {
- Handle<Map> map = types->at(i);
- if (expr->ComputeTarget(map, name)) {
- if (map.is_identical_to(number_marker_map)) handle_smi = true;
- order[ordered_functions++] =
- FunctionSorter(i,
- expr->target()->shared()->profiler_ticks(),
- InliningAstSize(expr->target()),
- expr->target()->shared()->SourceSize());
+ PropertyAccessInfo info(this, LOAD, ToType(types->at(i)), name);
+ if (info.CanAccessMonomorphic() &&
+ info.lookup()->IsConstant() &&
+ info.constant()->IsJSFunction()) {
+ if (info.type()->Is(Type::String())) {
+ if (handled_string) continue;
+ handled_string = true;
+ }
+ Handle<JSFunction> target = Handle<JSFunction>::cast(info.constant());
+ if (info.type()->Is(Type::Number())) {
+ handle_smi = true;
+ }
+ expr->set_target(target);
+ order[ordered_functions++] = FunctionSorter(
+ i, target->shared()->profiler_ticks(), InliningAstSize(target));
}
}
std::sort(order, order + ordered_functions);
HBasicBlock* number_block = NULL;
+ HBasicBlock* join = NULL;
+ handled_string = false;
+ int count = 0;
for (int fn = 0; fn < ordered_functions; ++fn) {
int i = order[fn].index();
- Handle<Map> map = types->at(i);
- if (fn == 0) {
+ PropertyAccessInfo info(this, LOAD, ToType(types->at(i)), name);
+ if (info.type()->Is(Type::String())) {
+ if (handled_string) continue;
+ handled_string = true;
+ }
+ // Reloads the target.
+ info.CanAccessMonomorphic();
+ Handle<JSFunction> target = Handle<JSFunction>::cast(info.constant());
+
+ expr->set_target(target);
+ if (count == 0) {
// Only needed once.
join = graph()->CreateBasicBlock();
if (handle_smi) {
@@ -6851,59 +7396,67 @@ void HOptimizedGraphBuilder::HandlePolymorphicCallNamed(
number_block = graph()->CreateBasicBlock();
FinishCurrentBlock(New<HIsSmiAndBranch>(
receiver, empty_smi_block, not_smi_block));
- Goto(empty_smi_block, number_block);
+ GotoNoSimulate(empty_smi_block, number_block);
set_current_block(not_smi_block);
} else {
BuildCheckHeapObject(receiver);
}
}
+ ++count;
HBasicBlock* if_true = graph()->CreateBasicBlock();
HBasicBlock* if_false = graph()->CreateBasicBlock();
HUnaryControlInstruction* compare;
- if (handle_smi && map.is_identical_to(number_marker_map)) {
+ Handle<Map> map = info.map();
+ if (info.type()->Is(Type::Number())) {
+ Handle<Map> heap_number_map = isolate()->factory()->heap_number_map();
compare = New<HCompareMap>(receiver, heap_number_map, if_true, if_false);
- map = initial_number_map;
- expr->set_number_check(
- Handle<JSObject>(JSObject::cast(map->prototype())));
- } else if (map.is_identical_to(string_marker_map)) {
+ } else if (info.type()->Is(Type::String())) {
compare = New<HIsStringAndBranch>(receiver, if_true, if_false);
- map = initial_string_map;
- expr->set_string_check(
- Handle<JSObject>(JSObject::cast(map->prototype())));
} else {
compare = New<HCompareMap>(receiver, map, if_true, if_false);
- expr->set_map_check();
}
-
FinishCurrentBlock(compare);
- if (expr->check_type() == NUMBER_CHECK) {
- Goto(if_true, number_block);
+ if (info.type()->Is(Type::Number())) {
+ GotoNoSimulate(if_true, number_block);
if_true = number_block;
- number_block->SetJoinId(expr->id());
}
+
set_current_block(if_true);
- expr->ComputeTarget(map, name);
- AddCheckPrototypeMaps(expr->holder(), map);
- if (FLAG_trace_inlining && FLAG_polymorphic_inlining) {
+ AddCheckPrototypeMaps(info.holder(), map);
+
+ HValue* function = Add<HConstant>(expr->target());
+ environment()->SetExpressionStackAt(0, function);
+ Push(receiver);
+ CHECK_ALIVE(VisitExpressions(expr->arguments()));
+ bool needs_wrapping = NeedsWrappingFor(info.type(), target);
+ bool try_inline = FLAG_polymorphic_inlining && !needs_wrapping;
+ if (FLAG_trace_inlining && try_inline) {
Handle<JSFunction> caller = current_info()->closure();
SmartArrayPointer<char> caller_name =
caller->shared()->DebugName()->ToCString();
PrintF("Trying to inline the polymorphic call to %s from %s\n",
- *name->ToCString(),
- *caller_name);
+ name->ToCString().get(),
+ caller_name.get());
}
- if (FLAG_polymorphic_inlining && TryInlineCall(expr)) {
+ if (try_inline && TryInlineCall(expr)) {
// Trying to inline will signal that we should bailout from the
// entire compilation by setting stack overflow on the visitor.
if (HasStackOverflow()) return;
} else {
- HCallConstantFunction* call =
- New<HCallConstantFunction>(expr->target(), argument_count);
- PreProcessCall(call);
+ // Since HWrapReceiver currently cannot actually wrap numbers and strings,
+ // use the regular CallFunctionStub for method calls to wrap the receiver.
+ // TODO(verwaest): Support creation of value wrappers directly in
+ // HWrapReceiver.
+ HInstruction* call = needs_wrapping
+ ? NewUncasted<HCallFunction>(
+ function, argument_count, WRAP_AND_CALL)
+ : BuildCallConstantFunction(target, argument_count);
+ PushArgumentsFromEnvironment(argument_count);
AddInstruction(call);
+ Drop(1); // Drop the function.
if (!ast_context()->IsEffect()) Push(call);
}
@@ -6915,15 +7468,27 @@ void HOptimizedGraphBuilder::HandlePolymorphicCallNamed(
// know about and do not want to handle ones we've never seen. Otherwise
// use a generic IC.
if (ordered_functions == types->length() && FLAG_deoptimize_uncommon_cases) {
- // Because the deopt may be the only path in the polymorphic call, make sure
- // that the environment stack matches the depth on deopt that it otherwise
- // would have had after a successful call.
- Drop(argument_count);
- if (!ast_context()->IsEffect()) Push(graph()->GetConstant0());
- FinishExitWithHardDeoptimization("Unknown map in polymorphic call", join);
+ FinishExitWithHardDeoptimization("Unknown map in polymorphic call");
} else {
- HCallNamed* call = New<HCallNamed>(name, argument_count);
- PreProcessCall(call);
+ Property* prop = expr->expression()->AsProperty();
+ HInstruction* function = BuildNamedGeneric(
+ LOAD, receiver, name, NULL, prop->IsUninitialized());
+ AddInstruction(function);
+ Push(function);
+ AddSimulate(prop->LoadId(), REMOVABLE_SIMULATE);
+
+ environment()->SetExpressionStackAt(1, function);
+ environment()->SetExpressionStackAt(0, receiver);
+ CHECK_ALIVE(VisitExpressions(expr->arguments()));
+
+ CallFunctionFlags flags = receiver->type().IsJSObject()
+ ? NO_CALL_FUNCTION_FLAGS : CALL_AS_METHOD;
+ HInstruction* call = New<HCallFunction>(
+ function, argument_count, flags);
+
+ PushArgumentsFromEnvironment(argument_count);
+
+ Drop(1); // Function.
if (join != NULL) {
AddInstruction(call);
@@ -6957,10 +7522,11 @@ void HOptimizedGraphBuilder::TraceInline(Handle<JSFunction> target,
SmartArrayPointer<char> caller_name =
caller->shared()->DebugName()->ToCString();
if (reason == NULL) {
- PrintF("Inlined %s called from %s.\n", *target_name, *caller_name);
+ PrintF("Inlined %s called from %s.\n", target_name.get(),
+ caller_name.get());
} else {
PrintF("Did not inline %s called from %s (%s).\n",
- *target_name, *caller_name, reason);
+ target_name.get(), caller_name.get(), reason);
}
}
}
@@ -6982,6 +7548,11 @@ int HOptimizedGraphBuilder::InliningAstSize(Handle<JSFunction> target) {
return target_shared->inline_builtin() ? 0 : kNotInlinable;
}
+ if (target_shared->IsApiFunction()) {
+ TraceInline(target, caller, "target is api function");
+ return kNotInlinable;
+ }
+
// Do a quick check on source code length to avoid parsing large
// inlining candidates.
if (target_shared->SourceSize() >
@@ -7005,13 +7576,13 @@ int HOptimizedGraphBuilder::InliningAstSize(Handle<JSFunction> target) {
}
-bool HOptimizedGraphBuilder::TryInline(CallKind call_kind,
- Handle<JSFunction> target,
+bool HOptimizedGraphBuilder::TryInline(Handle<JSFunction> target,
int arguments_count,
HValue* implicit_return_value,
BailoutId ast_id,
BailoutId return_id,
- InliningKind inlining_kind) {
+ InliningKind inlining_kind,
+ HSourcePosition position) {
int nodes_added = InliningAstSize(target);
if (nodes_added == kNotInlinable) return false;
@@ -7130,6 +7701,7 @@ bool HOptimizedGraphBuilder::TryInline(CallKind call_kind,
target_shared->set_scope_info(*target_scope_info);
}
target_shared->EnableDeoptimizationSupport(*target_info.code());
+ target_shared->set_feedback_vector(*target_info.feedback_vector());
Compiler::RecordFunctionCompilation(Logger::FUNCTION_TAG,
&target_info,
target_shared);
@@ -7143,28 +7715,26 @@ bool HOptimizedGraphBuilder::TryInline(CallKind call_kind,
ASSERT(target_shared->has_deoptimization_support());
AstTyper::Run(&target_info);
+ int function_id = graph()->TraceInlinedFunction(target_shared, position);
+
// Save the pending call context. Set up new one for the inlined function.
// The function state is new-allocated because we need to delete it
// in two different places.
FunctionState* target_state = new FunctionState(
- this, &target_info, inlining_kind);
+ this, &target_info, inlining_kind, function_id);
HConstant* undefined = graph()->GetConstantUndefined();
- bool undefined_receiver = HEnvironment::UseUndefinedReceiver(
- target, function, call_kind, inlining_kind);
+
HEnvironment* inner_env =
environment()->CopyForInlining(target,
arguments_count,
function,
undefined,
- function_state()->inlining_kind(),
- undefined_receiver);
+ function_state()->inlining_kind());
HConstant* context = Add<HConstant>(Handle<Context>(target->context()));
inner_env->BindContext(context);
- Add<HSimulate>(return_id);
- current_block()->UpdateEnvironment(inner_env);
HArgumentsObject* arguments_object = NULL;
// If the function uses arguments object create and bind one, also copy
@@ -7180,15 +7750,26 @@ bool HOptimizedGraphBuilder::TryInline(CallKind call_kind,
}
}
+ // Capture the state before invoking the inlined function for deopt in the
+ // inlined function. This simulate has no bailout-id since it's not directly
+ // reachable for deopt, and is only used to capture the state. If the simulate
+ // becomes reachable by merging, the ast id of the simulate merged into it is
+ // adopted.
+ Add<HSimulate>(BailoutId::None());
+
+ current_block()->UpdateEnvironment(inner_env);
+ Scope* saved_scope = scope();
+ set_scope(target_info.scope());
HEnterInlined* enter_inlined =
- Add<HEnterInlined>(target, arguments_count, function,
+ Add<HEnterInlined>(return_id, target, arguments_count, function,
function_state()->inlining_kind(),
function->scope()->arguments(),
- arguments_object, undefined_receiver);
+ arguments_object);
function_state()->set_entry(enter_inlined);
VisitDeclarations(target_info.scope()->declarations());
VisitStatements(function->body());
+ set_scope(saved_scope);
if (HasStackOverflow()) {
// Bail out if the inline function did, as we cannot residualize a call
// instead.
@@ -7290,76 +7871,73 @@ bool HOptimizedGraphBuilder::TryInline(CallKind call_kind,
}
-bool HOptimizedGraphBuilder::TryInlineCall(Call* expr, bool drop_extra) {
- // The function call we are inlining is a method call if the call
- // is a property call.
- CallKind call_kind = (expr->expression()->AsProperty() == NULL)
- ? CALL_AS_FUNCTION
- : CALL_AS_METHOD;
-
- return TryInline(call_kind,
- expr->target(),
+bool HOptimizedGraphBuilder::TryInlineCall(Call* expr) {
+ return TryInline(expr->target(),
expr->arguments()->length(),
NULL,
expr->id(),
expr->ReturnId(),
- drop_extra ? DROP_EXTRA_ON_RETURN : NORMAL_RETURN);
+ NORMAL_RETURN,
+ ScriptPositionToSourcePosition(expr->position()));
}
bool HOptimizedGraphBuilder::TryInlineConstruct(CallNew* expr,
HValue* implicit_return_value) {
- return TryInline(CALL_AS_FUNCTION,
- expr->target(),
+ return TryInline(expr->target(),
expr->arguments()->length(),
implicit_return_value,
expr->id(),
expr->ReturnId(),
- CONSTRUCT_CALL_RETURN);
+ CONSTRUCT_CALL_RETURN,
+ ScriptPositionToSourcePosition(expr->position()));
}
bool HOptimizedGraphBuilder::TryInlineGetter(Handle<JSFunction> getter,
+ Handle<Map> receiver_map,
BailoutId ast_id,
BailoutId return_id) {
- return TryInline(CALL_AS_METHOD,
- getter,
+ if (TryInlineApiGetter(getter, receiver_map, ast_id)) return true;
+ return TryInline(getter,
0,
NULL,
ast_id,
return_id,
- GETTER_CALL_RETURN);
+ GETTER_CALL_RETURN,
+ source_position());
}
bool HOptimizedGraphBuilder::TryInlineSetter(Handle<JSFunction> setter,
+ Handle<Map> receiver_map,
BailoutId id,
BailoutId assignment_id,
HValue* implicit_return_value) {
- return TryInline(CALL_AS_METHOD,
- setter,
+ if (TryInlineApiSetter(setter, receiver_map, id)) return true;
+ return TryInline(setter,
1,
implicit_return_value,
id, assignment_id,
- SETTER_CALL_RETURN);
+ SETTER_CALL_RETURN,
+ source_position());
}
bool HOptimizedGraphBuilder::TryInlineApply(Handle<JSFunction> function,
Call* expr,
int arguments_count) {
- return TryInline(CALL_AS_METHOD,
- function,
+ return TryInline(function,
arguments_count,
NULL,
expr->id(),
expr->ReturnId(),
- NORMAL_RETURN);
+ NORMAL_RETURN,
+ ScriptPositionToSourcePosition(expr->position()));
}
-bool HOptimizedGraphBuilder::TryInlineBuiltinFunctionCall(Call* expr,
- bool drop_extra) {
+bool HOptimizedGraphBuilder::TryInlineBuiltinFunctionCall(Call* expr) {
if (!expr->target()->shared()->HasBuiltinFunctionId()) return false;
BuiltinFunctionId id = expr->target()->shared()->builtin_function_id();
switch (id) {
@@ -7371,11 +7949,11 @@ bool HOptimizedGraphBuilder::TryInlineBuiltinFunctionCall(Call* expr,
case kMathAbs:
case kMathSqrt:
case kMathLog:
+ case kMathClz32:
if (expr->arguments()->length() == 1) {
HValue* argument = Pop();
- Drop(1); // Receiver.
+ Drop(2); // Receiver and function.
HInstruction* op = NewUncasted<HUnaryMathOperation>(argument, id);
- if (drop_extra) Drop(1); // Optionally drop the function.
ast_context()->ReturnInstruction(op, expr->id());
return true;
}
@@ -7384,9 +7962,8 @@ bool HOptimizedGraphBuilder::TryInlineBuiltinFunctionCall(Call* expr,
if (expr->arguments()->length() == 2) {
HValue* right = Pop();
HValue* left = Pop();
- Drop(1); // Receiver.
+ Drop(2); // Receiver and function.
HInstruction* op = HMul::NewImul(zone(), context(), left, right);
- if (drop_extra) Drop(1); // Optionally drop the function.
ast_context()->ReturnInstruction(op, expr->id());
return true;
}
@@ -7402,9 +7979,7 @@ bool HOptimizedGraphBuilder::TryInlineBuiltinFunctionCall(Call* expr,
bool HOptimizedGraphBuilder::TryInlineBuiltinMethodCall(
Call* expr,
HValue* receiver,
- Handle<Map> receiver_map,
- CheckType check_type) {
- ASSERT(check_type != RECEIVER_MAP_CHECK || !receiver_map.is_null());
+ Handle<Map> receiver_map) {
// Try to inline calls like Math.* as operations in the calling function.
if (!expr->target()->shared()->HasBuiltinFunctionId()) return false;
BuiltinFunctionId id = expr->target()->shared()->builtin_function_id();
@@ -7412,13 +7987,10 @@ bool HOptimizedGraphBuilder::TryInlineBuiltinMethodCall(
switch (id) {
case kStringCharCodeAt:
case kStringCharAt:
- if (argument_count == 2 && check_type == STRING_CHECK) {
+ if (argument_count == 2) {
HValue* index = Pop();
HValue* string = Pop();
- ASSERT(!expr->holder().is_null());
- BuildCheckPrototypeMaps(Call::GetPrototypeForPrimitiveCheck(
- STRING_CHECK, expr->holder()->GetIsolate()),
- expr->holder());
+ Drop(1); // Function.
HInstruction* char_code =
BuildStringCharCodeAt(string, index);
if (id == kStringCharCodeAt) {
@@ -7432,10 +8004,9 @@ bool HOptimizedGraphBuilder::TryInlineBuiltinMethodCall(
}
break;
case kStringFromCharCode:
- if (argument_count == 2 && check_type == RECEIVER_MAP_CHECK) {
- AddCheckConstantFunction(expr->holder(), receiver, receiver_map);
+ if (argument_count == 2) {
HValue* argument = Pop();
- Drop(1); // Receiver.
+ Drop(2); // Receiver and function.
HInstruction* result = NewUncasted<HStringCharFromCode>(argument);
ast_context()->ReturnInstruction(result, expr->id());
return true;
@@ -7449,21 +8020,20 @@ bool HOptimizedGraphBuilder::TryInlineBuiltinMethodCall(
case kMathAbs:
case kMathSqrt:
case kMathLog:
- if (argument_count == 2 && check_type == RECEIVER_MAP_CHECK) {
- AddCheckConstantFunction(expr->holder(), receiver, receiver_map);
+ case kMathClz32:
+ if (argument_count == 2) {
HValue* argument = Pop();
- Drop(1); // Receiver.
+ Drop(2); // Receiver and function.
HInstruction* op = NewUncasted<HUnaryMathOperation>(argument, id);
ast_context()->ReturnInstruction(op, expr->id());
return true;
}
break;
case kMathPow:
- if (argument_count == 3 && check_type == RECEIVER_MAP_CHECK) {
- AddCheckConstantFunction(expr->holder(), receiver, receiver_map);
+ if (argument_count == 3) {
HValue* right = Pop();
HValue* left = Pop();
- Pop(); // Pop receiver.
+ Drop(2); // Receiver and function.
HInstruction* result = NULL;
// Use sqrt() if exponent is 0.5 or -0.5.
if (right->IsConstant() && HConstant::cast(right)->HasDoubleValue()) {
@@ -7492,11 +8062,10 @@ bool HOptimizedGraphBuilder::TryInlineBuiltinMethodCall(
break;
case kMathMax:
case kMathMin:
- if (argument_count == 3 && check_type == RECEIVER_MAP_CHECK) {
- AddCheckConstantFunction(expr->holder(), receiver, receiver_map);
+ if (argument_count == 3) {
HValue* right = Pop();
HValue* left = Pop();
- Drop(1); // Receiver.
+ Drop(2); // Receiver and function.
HMathMinMax::Operation op = (id == kMathMin) ? HMathMinMax::kMathMin
: HMathMinMax::kMathMax;
HInstruction* result = NewUncasted<HMathMinMax>(left, right, op);
@@ -7505,16 +8074,282 @@ bool HOptimizedGraphBuilder::TryInlineBuiltinMethodCall(
}
break;
case kMathImul:
- if (argument_count == 3 && check_type == RECEIVER_MAP_CHECK) {
- AddCheckConstantFunction(expr->holder(), receiver, receiver_map);
+ if (argument_count == 3) {
HValue* right = Pop();
HValue* left = Pop();
- Drop(1); // Receiver.
+ Drop(2); // Receiver and function.
HInstruction* result = HMul::NewImul(zone(), context(), left, right);
ast_context()->ReturnInstruction(result, expr->id());
return true;
}
break;
+ case kArrayPop: {
+ if (receiver_map.is_null()) return false;
+ if (receiver_map->instance_type() != JS_ARRAY_TYPE) return false;
+ ElementsKind elements_kind = receiver_map->elements_kind();
+ if (!IsFastElementsKind(elements_kind)) return false;
+ if (receiver_map->is_observed()) return false;
+ ASSERT(receiver_map->is_extensible());
+
+ Drop(expr->arguments()->length());
+ HValue* result;
+ HValue* reduced_length;
+ HValue* receiver = Pop();
+
+ HValue* checked_object = AddCheckMap(receiver, receiver_map);
+ HValue* length = Add<HLoadNamedField>(
+ checked_object, static_cast<HValue*>(NULL),
+ HObjectAccess::ForArrayLength(elements_kind));
+
+ Drop(1); // Function.
+
+ { NoObservableSideEffectsScope scope(this);
+ IfBuilder length_checker(this);
+
+ HValue* bounds_check = length_checker.If<HCompareNumericAndBranch>(
+ length, graph()->GetConstant0(), Token::EQ);
+ length_checker.Then();
+
+ if (!ast_context()->IsEffect()) Push(graph()->GetConstantUndefined());
+
+ length_checker.Else();
+ HValue* elements = AddLoadElements(checked_object);
+ // Ensure that we aren't popping from a copy-on-write array.
+ if (IsFastSmiOrObjectElementsKind(elements_kind)) {
+ elements = BuildCopyElementsOnWrite(checked_object, elements,
+ elements_kind, length);
+ }
+ reduced_length = AddUncasted<HSub>(length, graph()->GetConstant1());
+ result = AddElementAccess(elements, reduced_length, NULL,
+ bounds_check, elements_kind, LOAD);
+ Factory* factory = isolate()->factory();
+ double nan_double = FixedDoubleArray::hole_nan_as_double();
+ HValue* hole = IsFastSmiOrObjectElementsKind(elements_kind)
+ ? Add<HConstant>(factory->the_hole_value())
+ : Add<HConstant>(nan_double);
+ if (IsFastSmiOrObjectElementsKind(elements_kind)) {
+ elements_kind = FAST_HOLEY_ELEMENTS;
+ }
+ AddElementAccess(
+ elements, reduced_length, hole, bounds_check, elements_kind, STORE);
+ Add<HStoreNamedField>(
+ checked_object, HObjectAccess::ForArrayLength(elements_kind),
+ reduced_length, STORE_TO_INITIALIZED_ENTRY);
+
+ if (!ast_context()->IsEffect()) Push(result);
+
+ length_checker.End();
+ }
+ result = ast_context()->IsEffect() ? graph()->GetConstant0() : Top();
+ Add<HSimulate>(expr->id(), REMOVABLE_SIMULATE);
+ if (!ast_context()->IsEffect()) Drop(1);
+
+ ast_context()->ReturnValue(result);
+ return true;
+ }
+ case kArrayPush: {
+ if (receiver_map.is_null()) return false;
+ if (receiver_map->instance_type() != JS_ARRAY_TYPE) return false;
+ ElementsKind elements_kind = receiver_map->elements_kind();
+ if (!IsFastElementsKind(elements_kind)) return false;
+ if (receiver_map->is_observed()) return false;
+ if (JSArray::IsReadOnlyLengthDescriptor(receiver_map)) return false;
+ ASSERT(receiver_map->is_extensible());
+
+ // If there may be elements accessors in the prototype chain, the fast
+ // inlined version can't be used.
+ if (receiver_map->DictionaryElementsInPrototypeChainOnly()) return false;
+ // If there currently can be no elements accessors on the prototype chain,
+ // it doesn't mean that there won't be any later. Install a full prototype
+ // chain check to trap element accessors being installed on the prototype
+ // chain, which would cause elements to go to dictionary mode and result
+ // in a map change.
+ Handle<JSObject> prototype(JSObject::cast(receiver_map->prototype()));
+ BuildCheckPrototypeMaps(prototype, Handle<JSObject>());
+
+ const int argc = expr->arguments()->length();
+ if (argc != 1) return false;
+
+ HValue* value_to_push = Pop();
+ HValue* array = Pop();
+ Drop(1); // Drop function.
+
+ HInstruction* new_size = NULL;
+ HValue* length = NULL;
+
+ {
+ NoObservableSideEffectsScope scope(this);
+
+ length = Add<HLoadNamedField>(array, static_cast<HValue*>(NULL),
+ HObjectAccess::ForArrayLength(elements_kind));
+
+ new_size = AddUncasted<HAdd>(length, graph()->GetConstant1());
+
+ bool is_array = receiver_map->instance_type() == JS_ARRAY_TYPE;
+ BuildUncheckedMonomorphicElementAccess(array, length,
+ value_to_push, is_array,
+ elements_kind, STORE,
+ NEVER_RETURN_HOLE,
+ STORE_AND_GROW_NO_TRANSITION);
+
+ if (!ast_context()->IsEffect()) Push(new_size);
+ Add<HSimulate>(expr->id(), REMOVABLE_SIMULATE);
+ if (!ast_context()->IsEffect()) Drop(1);
+ }
+
+ ast_context()->ReturnValue(new_size);
+ return true;
+ }
+ case kArrayShift: {
+ if (receiver_map.is_null()) return false;
+ if (receiver_map->instance_type() != JS_ARRAY_TYPE) return false;
+ ElementsKind kind = receiver_map->elements_kind();
+ if (!IsFastElementsKind(kind)) return false;
+ if (receiver_map->is_observed()) return false;
+ ASSERT(receiver_map->is_extensible());
+
+ // If there may be elements accessors in the prototype chain, the fast
+ // inlined version can't be used.
+ if (receiver_map->DictionaryElementsInPrototypeChainOnly()) return false;
+
+ // If there currently can be no elements accessors on the prototype chain,
+ // it doesn't mean that there won't be any later. Install a full prototype
+ // chain check to trap element accessors being installed on the prototype
+ // chain, which would cause elements to go to dictionary mode and result
+ // in a map change.
+ BuildCheckPrototypeMaps(
+ handle(JSObject::cast(receiver_map->prototype()), isolate()),
+ Handle<JSObject>::null());
+
+ // Threshold for fast inlined Array.shift().
+ HConstant* inline_threshold = Add<HConstant>(static_cast<int32_t>(16));
+
+ Drop(expr->arguments()->length());
+ HValue* receiver = Pop();
+ HValue* function = Pop();
+ HValue* result;
+
+ {
+ NoObservableSideEffectsScope scope(this);
+
+ HValue* length = Add<HLoadNamedField>(
+ receiver, static_cast<HValue*>(NULL),
+ HObjectAccess::ForArrayLength(kind));
+
+ IfBuilder if_lengthiszero(this);
+ HValue* lengthiszero = if_lengthiszero.If<HCompareNumericAndBranch>(
+ length, graph()->GetConstant0(), Token::EQ);
+ if_lengthiszero.Then();
+ {
+ if (!ast_context()->IsEffect()) Push(graph()->GetConstantUndefined());
+ }
+ if_lengthiszero.Else();
+ {
+ HValue* elements = AddLoadElements(receiver);
+
+ // Check if we can use the fast inlined Array.shift().
+ IfBuilder if_inline(this);
+ if_inline.If<HCompareNumericAndBranch>(
+ length, inline_threshold, Token::LTE);
+ if (IsFastSmiOrObjectElementsKind(kind)) {
+ // We cannot handle copy-on-write backing stores here.
+ if_inline.AndIf<HCompareMap>(
+ elements, isolate()->factory()->fixed_array_map());
+ }
+ if_inline.Then();
+ {
+ // Remember the result.
+ if (!ast_context()->IsEffect()) {
+ Push(AddElementAccess(elements, graph()->GetConstant0(), NULL,
+ lengthiszero, kind, LOAD));
+ }
+
+ // Compute the new length.
+ HValue* new_length = AddUncasted<HSub>(
+ length, graph()->GetConstant1());
+ new_length->ClearFlag(HValue::kCanOverflow);
+
+ // Copy the remaining elements.
+ LoopBuilder loop(this, context(), LoopBuilder::kPostIncrement);
+ {
+ HValue* new_key = loop.BeginBody(
+ graph()->GetConstant0(), new_length, Token::LT);
+ HValue* key = AddUncasted<HAdd>(new_key, graph()->GetConstant1());
+ key->ClearFlag(HValue::kCanOverflow);
+ HValue* element = AddUncasted<HLoadKeyed>(
+ elements, key, lengthiszero, kind, ALLOW_RETURN_HOLE);
+ HStoreKeyed* store = Add<HStoreKeyed>(
+ elements, new_key, element, kind);
+ store->SetFlag(HValue::kAllowUndefinedAsNaN);
+ }
+ loop.EndBody();
+
+ // Put a hole at the end.
+ HValue* hole = IsFastSmiOrObjectElementsKind(kind)
+ ? Add<HConstant>(isolate()->factory()->the_hole_value())
+ : Add<HConstant>(FixedDoubleArray::hole_nan_as_double());
+ if (IsFastSmiOrObjectElementsKind(kind)) kind = FAST_HOLEY_ELEMENTS;
+ Add<HStoreKeyed>(
+ elements, new_length, hole, kind, INITIALIZING_STORE);
+
+ // Remember new length.
+ Add<HStoreNamedField>(
+ receiver, HObjectAccess::ForArrayLength(kind),
+ new_length, STORE_TO_INITIALIZED_ENTRY);
+ }
+ if_inline.Else();
+ {
+ Add<HPushArguments>(receiver);
+ result = Add<HCallJSFunction>(function, 1, true);
+ if (!ast_context()->IsEffect()) Push(result);
+ }
+ if_inline.End();
+ }
+ if_lengthiszero.End();
+ }
+ result = ast_context()->IsEffect() ? graph()->GetConstant0() : Top();
+ Add<HSimulate>(expr->id(), REMOVABLE_SIMULATE);
+ if (!ast_context()->IsEffect()) Drop(1);
+ ast_context()->ReturnValue(result);
+ return true;
+ }
+ case kArrayIndexOf:
+ case kArrayLastIndexOf: {
+ if (receiver_map.is_null()) return false;
+ if (receiver_map->instance_type() != JS_ARRAY_TYPE) return false;
+ ElementsKind kind = receiver_map->elements_kind();
+ if (!IsFastElementsKind(kind)) return false;
+ if (receiver_map->is_observed()) return false;
+ if (argument_count != 2) return false;
+ ASSERT(receiver_map->is_extensible());
+
+ // If there may be elements accessors in the prototype chain, the fast
+ // inlined version can't be used.
+ if (receiver_map->DictionaryElementsInPrototypeChainOnly()) return false;
+
+ // If there currently can be no elements accessors on the prototype chain,
+ // it doesn't mean that there won't be any later. Install a full prototype
+ // chain check to trap element accessors being installed on the prototype
+ // chain, which would cause elements to go to dictionary mode and result
+ // in a map change.
+ BuildCheckPrototypeMaps(
+ handle(JSObject::cast(receiver_map->prototype()), isolate()),
+ Handle<JSObject>::null());
+
+ HValue* search_element = Pop();
+ HValue* receiver = Pop();
+ Drop(1); // Drop function.
+
+ ArrayIndexOfMode mode = (id == kArrayIndexOf)
+ ? kFirstIndexOf : kLastIndexOf;
+ HValue* index = BuildArrayIndexOf(receiver, search_element, kind, mode);
+
+ if (!ast_context()->IsEffect()) Push(index);
+ Add<HSimulate>(expr->id(), REMOVABLE_SIMULATE);
+ if (!ast_context()->IsEffect()) Drop(1);
+ ast_context()->ReturnValue(index);
+ return true;
+ }
default:
// Not yet supported for inlining.
break;
@@ -7523,12 +8358,187 @@ bool HOptimizedGraphBuilder::TryInlineBuiltinMethodCall(
}
+bool HOptimizedGraphBuilder::TryInlineApiFunctionCall(Call* expr,
+ HValue* receiver) {
+ Handle<JSFunction> function = expr->target();
+ int argc = expr->arguments()->length();
+ SmallMapList receiver_maps;
+ return TryInlineApiCall(function,
+ receiver,
+ &receiver_maps,
+ argc,
+ expr->id(),
+ kCallApiFunction);
+}
+
+
+bool HOptimizedGraphBuilder::TryInlineApiMethodCall(
+ Call* expr,
+ HValue* receiver,
+ SmallMapList* receiver_maps) {
+ Handle<JSFunction> function = expr->target();
+ int argc = expr->arguments()->length();
+ return TryInlineApiCall(function,
+ receiver,
+ receiver_maps,
+ argc,
+ expr->id(),
+ kCallApiMethod);
+}
+
+
+bool HOptimizedGraphBuilder::TryInlineApiGetter(Handle<JSFunction> function,
+ Handle<Map> receiver_map,
+ BailoutId ast_id) {
+ SmallMapList receiver_maps(1, zone());
+ receiver_maps.Add(receiver_map, zone());
+ return TryInlineApiCall(function,
+ NULL, // Receiver is on expression stack.
+ &receiver_maps,
+ 0,
+ ast_id,
+ kCallApiGetter);
+}
+
+
+bool HOptimizedGraphBuilder::TryInlineApiSetter(Handle<JSFunction> function,
+ Handle<Map> receiver_map,
+ BailoutId ast_id) {
+ SmallMapList receiver_maps(1, zone());
+ receiver_maps.Add(receiver_map, zone());
+ return TryInlineApiCall(function,
+ NULL, // Receiver is on expression stack.
+ &receiver_maps,
+ 1,
+ ast_id,
+ kCallApiSetter);
+}
+
+
+bool HOptimizedGraphBuilder::TryInlineApiCall(Handle<JSFunction> function,
+ HValue* receiver,
+ SmallMapList* receiver_maps,
+ int argc,
+ BailoutId ast_id,
+ ApiCallType call_type) {
+ CallOptimization optimization(function);
+ if (!optimization.is_simple_api_call()) return false;
+ Handle<Map> holder_map;
+ if (call_type == kCallApiFunction) {
+ // Cannot embed a direct reference to the global proxy map
+ // as it maybe dropped on deserialization.
+ CHECK(!isolate()->serializer_enabled());
+ ASSERT_EQ(0, receiver_maps->length());
+ receiver_maps->Add(handle(
+ function->context()->global_object()->global_receiver()->map()),
+ zone());
+ }
+ CallOptimization::HolderLookup holder_lookup =
+ CallOptimization::kHolderNotFound;
+ Handle<JSObject> api_holder = optimization.LookupHolderOfExpectedType(
+ receiver_maps->first(), &holder_lookup);
+ if (holder_lookup == CallOptimization::kHolderNotFound) return false;
+
+ if (FLAG_trace_inlining) {
+ PrintF("Inlining api function ");
+ function->ShortPrint();
+ PrintF("\n");
+ }
+
+ bool drop_extra = false;
+ bool is_store = false;
+ switch (call_type) {
+ case kCallApiFunction:
+ case kCallApiMethod:
+ // Need to check that none of the receiver maps could have changed.
+ Add<HCheckMaps>(receiver, receiver_maps);
+ // Need to ensure the chain between receiver and api_holder is intact.
+ if (holder_lookup == CallOptimization::kHolderFound) {
+ AddCheckPrototypeMaps(api_holder, receiver_maps->first());
+ } else {
+ ASSERT_EQ(holder_lookup, CallOptimization::kHolderIsReceiver);
+ }
+ // Includes receiver.
+ PushArgumentsFromEnvironment(argc + 1);
+ // Drop function after call.
+ drop_extra = true;
+ break;
+ case kCallApiGetter:
+ // Receiver and prototype chain cannot have changed.
+ ASSERT_EQ(0, argc);
+ ASSERT_EQ(NULL, receiver);
+ // Receiver is on expression stack.
+ receiver = Pop();
+ Add<HPushArguments>(receiver);
+ break;
+ case kCallApiSetter:
+ {
+ is_store = true;
+ // Receiver and prototype chain cannot have changed.
+ ASSERT_EQ(1, argc);
+ ASSERT_EQ(NULL, receiver);
+ // Receiver and value are on expression stack.
+ HValue* value = Pop();
+ receiver = Pop();
+ Add<HPushArguments>(receiver, value);
+ break;
+ }
+ }
+
+ HValue* holder = NULL;
+ switch (holder_lookup) {
+ case CallOptimization::kHolderFound:
+ holder = Add<HConstant>(api_holder);
+ break;
+ case CallOptimization::kHolderIsReceiver:
+ holder = receiver;
+ break;
+ case CallOptimization::kHolderNotFound:
+ UNREACHABLE();
+ break;
+ }
+ Handle<CallHandlerInfo> api_call_info = optimization.api_call_info();
+ Handle<Object> call_data_obj(api_call_info->data(), isolate());
+ bool call_data_is_undefined = call_data_obj->IsUndefined();
+ HValue* call_data = Add<HConstant>(call_data_obj);
+ ApiFunction fun(v8::ToCData<Address>(api_call_info->callback()));
+ ExternalReference ref = ExternalReference(&fun,
+ ExternalReference::DIRECT_API_CALL,
+ isolate());
+ HValue* api_function_address = Add<HConstant>(ExternalReference(ref));
+
+ HValue* op_vals[] = {
+ Add<HConstant>(function),
+ call_data,
+ holder,
+ api_function_address,
+ context()
+ };
+
+ CallInterfaceDescriptor* descriptor =
+ isolate()->call_descriptor(Isolate::ApiFunctionCall);
+
+ CallApiFunctionStub stub(isolate(), is_store, call_data_is_undefined, argc);
+ Handle<Code> code = stub.GetCode();
+ HConstant* code_value = Add<HConstant>(code);
+
+ ASSERT((sizeof(op_vals) / kPointerSize) ==
+ descriptor->environment_length());
+
+ HInstruction* call = New<HCallWithDescriptor>(
+ code_value, argc + 1, descriptor,
+ Vector<HValue*>(op_vals, descriptor->environment_length()));
+
+ if (drop_extra) Drop(1); // Drop function.
+ ast_context()->ReturnInstruction(call, ast_id);
+ return true;
+}
+
+
bool HOptimizedGraphBuilder::TryCallApply(Call* expr) {
- Expression* callee = expr->expression();
- Property* prop = callee->AsProperty();
- ASSERT(prop != NULL);
+ ASSERT(expr->expression()->IsProperty());
- if (!expr->IsMonomorphic() || expr->check_type() != RECEIVER_MAP_CHECK) {
+ if (!expr->IsMonomorphic()) {
return false;
}
Handle<Map> function_map = expr->GetReceiverTypes()->first();
@@ -7549,20 +8559,17 @@ bool HOptimizedGraphBuilder::TryCallApply(Call* expr) {
if (!arg_two_value->CheckFlag(HValue::kIsArguments)) return false;
// Found pattern f.apply(receiver, arguments).
- CHECK_ALIVE_OR_RETURN(VisitForValue(prop->obj()), true);
- HValue* function = Top();
-
- AddCheckConstantFunction(expr->holder(), function, function_map);
-
CHECK_ALIVE_OR_RETURN(VisitForValue(args->at(0)), true);
- HValue* receiver = Pop();
+ HValue* receiver = Pop(); // receiver
+ HValue* function = Pop(); // f
+ Drop(1); // apply
- Drop(1); // Pop the function.
+ HValue* checked_function = AddCheckMap(function, function_map);
if (function_state()->outer() == NULL) {
HInstruction* elements = Add<HArgumentsElements>(false);
HInstruction* length = Add<HArgumentsLength>(elements);
- HValue* wrapped_receiver = BuildWrapReceiver(receiver, function);
+ HValue* wrapped_receiver = BuildWrapReceiver(receiver, checked_function);
HInstruction* result = New<HApplyArguments>(function,
wrapped_receiver,
length,
@@ -7577,7 +8584,8 @@ bool HOptimizedGraphBuilder::TryCallApply(Call* expr) {
HArgumentsObject* args = function_state()->entry()->arguments_object();
const ZoneList<HValue*>* arguments_values = args->arguments_values();
int arguments_count = arguments_values->length();
- Push(BuildWrapReceiver(receiver, function));
+ Push(function);
+ Push(BuildWrapReceiver(receiver, checked_function));
for (int i = 1; i < arguments_count; i++) {
Push(arguments_values->at(i));
}
@@ -7591,22 +8599,230 @@ bool HOptimizedGraphBuilder::TryCallApply(Call* expr) {
if (TryInlineApply(known_function, expr, args_count)) return true;
}
- Drop(arguments_count - 1);
- Push(Add<HPushArgument>(Pop()));
- for (int i = 1; i < arguments_count; i++) {
- Push(Add<HPushArgument>(arguments_values->at(i)));
- }
-
- HInvokeFunction* call = New<HInvokeFunction>(function,
- known_function,
- arguments_count);
- Drop(arguments_count);
+ PushArgumentsFromEnvironment(arguments_count);
+ HInvokeFunction* call = New<HInvokeFunction>(
+ function, known_function, arguments_count);
+ Drop(1); // Function.
ast_context()->ReturnInstruction(call, expr->id());
return true;
}
}
+HValue* HOptimizedGraphBuilder::ImplicitReceiverFor(HValue* function,
+ Handle<JSFunction> target) {
+ SharedFunctionInfo* shared = target->shared();
+ if (shared->strict_mode() == SLOPPY && !shared->native()) {
+ // Cannot embed a direct reference to the global proxy
+ // as is it dropped on deserialization.
+ CHECK(!isolate()->serializer_enabled());
+ Handle<JSObject> global_receiver(
+ target->context()->global_object()->global_receiver());
+ return Add<HConstant>(global_receiver);
+ }
+ return graph()->GetConstantUndefined();
+}
+
+
+void HOptimizedGraphBuilder::BuildArrayCall(Expression* expression,
+ int arguments_count,
+ HValue* function,
+ Handle<AllocationSite> site) {
+ Add<HCheckValue>(function, array_function());
+
+ if (IsCallArrayInlineable(arguments_count, site)) {
+ BuildInlinedCallArray(expression, arguments_count, site);
+ return;
+ }
+
+ HInstruction* call = PreProcessCall(New<HCallNewArray>(
+ function, arguments_count + 1, site->GetElementsKind()));
+ if (expression->IsCall()) {
+ Drop(1);
+ }
+ ast_context()->ReturnInstruction(call, expression->id());
+}
+
+
+HValue* HOptimizedGraphBuilder::BuildArrayIndexOf(HValue* receiver,
+ HValue* search_element,
+ ElementsKind kind,
+ ArrayIndexOfMode mode) {
+ ASSERT(IsFastElementsKind(kind));
+
+ NoObservableSideEffectsScope no_effects(this);
+
+ HValue* elements = AddLoadElements(receiver);
+ HValue* length = AddLoadArrayLength(receiver, kind);
+
+ HValue* initial;
+ HValue* terminating;
+ Token::Value token;
+ LoopBuilder::Direction direction;
+ if (mode == kFirstIndexOf) {
+ initial = graph()->GetConstant0();
+ terminating = length;
+ token = Token::LT;
+ direction = LoopBuilder::kPostIncrement;
+ } else {
+ ASSERT_EQ(kLastIndexOf, mode);
+ initial = length;
+ terminating = graph()->GetConstant0();
+ token = Token::GT;
+ direction = LoopBuilder::kPreDecrement;
+ }
+
+ Push(graph()->GetConstantMinus1());
+ if (IsFastDoubleElementsKind(kind) || IsFastSmiElementsKind(kind)) {
+ LoopBuilder loop(this, context(), direction);
+ {
+ HValue* index = loop.BeginBody(initial, terminating, token);
+ HValue* element = AddUncasted<HLoadKeyed>(
+ elements, index, static_cast<HValue*>(NULL),
+ kind, ALLOW_RETURN_HOLE);
+ IfBuilder if_issame(this);
+ if (IsFastDoubleElementsKind(kind)) {
+ if_issame.If<HCompareNumericAndBranch>(
+ element, search_element, Token::EQ_STRICT);
+ } else {
+ if_issame.If<HCompareObjectEqAndBranch>(element, search_element);
+ }
+ if_issame.Then();
+ {
+ Drop(1);
+ Push(index);
+ loop.Break();
+ }
+ if_issame.End();
+ }
+ loop.EndBody();
+ } else {
+ IfBuilder if_isstring(this);
+ if_isstring.If<HIsStringAndBranch>(search_element);
+ if_isstring.Then();
+ {
+ LoopBuilder loop(this, context(), direction);
+ {
+ HValue* index = loop.BeginBody(initial, terminating, token);
+ HValue* element = AddUncasted<HLoadKeyed>(
+ elements, index, static_cast<HValue*>(NULL),
+ kind, ALLOW_RETURN_HOLE);
+ IfBuilder if_issame(this);
+ if_issame.If<HIsStringAndBranch>(element);
+ if_issame.AndIf<HStringCompareAndBranch>(
+ element, search_element, Token::EQ_STRICT);
+ if_issame.Then();
+ {
+ Drop(1);
+ Push(index);
+ loop.Break();
+ }
+ if_issame.End();
+ }
+ loop.EndBody();
+ }
+ if_isstring.Else();
+ {
+ IfBuilder if_isnumber(this);
+ if_isnumber.If<HIsSmiAndBranch>(search_element);
+ if_isnumber.OrIf<HCompareMap>(
+ search_element, isolate()->factory()->heap_number_map());
+ if_isnumber.Then();
+ {
+ HValue* search_number =
+ AddUncasted<HForceRepresentation>(search_element,
+ Representation::Double());
+ LoopBuilder loop(this, context(), direction);
+ {
+ HValue* index = loop.BeginBody(initial, terminating, token);
+ HValue* element = AddUncasted<HLoadKeyed>(
+ elements, index, static_cast<HValue*>(NULL),
+ kind, ALLOW_RETURN_HOLE);
+
+ IfBuilder if_element_isnumber(this);
+ if_element_isnumber.If<HIsSmiAndBranch>(element);
+ if_element_isnumber.OrIf<HCompareMap>(
+ element, isolate()->factory()->heap_number_map());
+ if_element_isnumber.Then();
+ {
+ HValue* number =
+ AddUncasted<HForceRepresentation>(element,
+ Representation::Double());
+ IfBuilder if_issame(this);
+ if_issame.If<HCompareNumericAndBranch>(
+ number, search_number, Token::EQ_STRICT);
+ if_issame.Then();
+ {
+ Drop(1);
+ Push(index);
+ loop.Break();
+ }
+ if_issame.End();
+ }
+ if_element_isnumber.End();
+ }
+ loop.EndBody();
+ }
+ if_isnumber.Else();
+ {
+ LoopBuilder loop(this, context(), direction);
+ {
+ HValue* index = loop.BeginBody(initial, terminating, token);
+ HValue* element = AddUncasted<HLoadKeyed>(
+ elements, index, static_cast<HValue*>(NULL),
+ kind, ALLOW_RETURN_HOLE);
+ IfBuilder if_issame(this);
+ if_issame.If<HCompareObjectEqAndBranch>(
+ element, search_element);
+ if_issame.Then();
+ {
+ Drop(1);
+ Push(index);
+ loop.Break();
+ }
+ if_issame.End();
+ }
+ loop.EndBody();
+ }
+ if_isnumber.End();
+ }
+ if_isstring.End();
+ }
+
+ return Pop();
+}
+
+
+bool HOptimizedGraphBuilder::TryHandleArrayCall(Call* expr, HValue* function) {
+ if (!array_function().is_identical_to(expr->target())) {
+ return false;
+ }
+
+ Handle<AllocationSite> site = expr->allocation_site();
+ if (site.is_null()) return false;
+
+ BuildArrayCall(expr,
+ expr->arguments()->length(),
+ function,
+ site);
+ return true;
+}
+
+
+bool HOptimizedGraphBuilder::TryHandleArrayCallNew(CallNew* expr,
+ HValue* function) {
+ if (!array_function().is_identical_to(expr->target())) {
+ return false;
+ }
+
+ BuildArrayCall(expr,
+ expr->arguments()->length(),
+ function,
+ expr->allocation_site());
+ return true;
+}
+
+
void HOptimizedGraphBuilder::VisitCall(Call* expr) {
ASSERT(!HasStackOverflow());
ASSERT(current_block() != NULL);
@@ -7617,125 +8833,116 @@ void HOptimizedGraphBuilder::VisitCall(Call* expr) {
Property* prop = callee->AsProperty();
if (prop != NULL) {
- if (!prop->key()->IsPropertyName()) {
- // Keyed function call.
- CHECK_ALIVE(VisitForValue(prop->obj()));
- CHECK_ALIVE(VisitForValue(prop->key()));
+ CHECK_ALIVE(VisitForValue(prop->obj()));
+ HValue* receiver = Top();
- // Push receiver and key like the non-optimized code generator expects it.
- HValue* key = Pop();
- HValue* receiver = Pop();
- Push(key);
- Push(Add<HPushArgument>(receiver));
- CHECK_ALIVE(VisitArgumentList(expr->arguments()));
+ SmallMapList* types;
+ ComputeReceiverTypes(expr, receiver, &types, zone());
- if (expr->IsMonomorphic()) {
- BuildCheckHeapObject(receiver);
- ElementsKind kind = expr->KeyedArrayCallIsHoley()
- ? FAST_HOLEY_ELEMENTS : FAST_ELEMENTS;
+ if (prop->key()->IsPropertyName() && types->length() > 0) {
+ Handle<String> name = prop->key()->AsLiteral()->AsPropertyName();
+ PropertyAccessInfo info(this, LOAD, ToType(types->first()), name);
+ if (!info.CanAccessAsMonomorphic(types)) {
+ HandlePolymorphicCallNamed(expr, receiver, types, name);
+ return;
+ }
+ }
- Handle<Map> map(isolate()->get_initial_js_array_map(kind));
+ HValue* key = NULL;
+ if (!prop->key()->IsPropertyName()) {
+ CHECK_ALIVE(VisitForValue(prop->key()));
+ key = Pop();
+ }
- HValue* function = BuildMonomorphicElementAccess(
- receiver, key, NULL, NULL, map, false, STANDARD_STORE);
+ CHECK_ALIVE(PushLoad(prop, receiver, key));
+ HValue* function = Pop();
- call = New<HCallFunction>(function, argument_count);
- } else {
- call = New<HCallKeyed>(key, argument_count);
- }
- Drop(argument_count + 1); // 1 is the key.
- return ast_context()->ReturnInstruction(call, expr->id());
- }
+ if (FLAG_hydrogen_track_positions) SetSourcePosition(expr->position());
- // Named function call.
- if (TryCallApply(expr)) return;
+ // Push the function under the receiver.
+ environment()->SetExpressionStackAt(0, function);
- CHECK_ALIVE(VisitForValue(prop->obj()));
- CHECK_ALIVE(VisitExpressions(expr->arguments()));
+ Push(receiver);
- Handle<String> name = prop->key()->AsLiteral()->AsPropertyName();
- HValue* receiver =
- environment()->ExpressionStackAt(expr->arguments()->length());
+ if (function->IsConstant() &&
+ HConstant::cast(function)->handle(isolate())->IsJSFunction()) {
+ Handle<JSFunction> known_function = Handle<JSFunction>::cast(
+ HConstant::cast(function)->handle(isolate()));
+ expr->set_target(known_function);
- SmallMapList* types;
- bool was_monomorphic = expr->IsMonomorphic();
- bool monomorphic = ComputeReceiverTypes(expr, receiver, &types);
- if (!was_monomorphic && monomorphic) {
- monomorphic = expr->ComputeTarget(types->first(), name);
- }
+ if (TryCallApply(expr)) return;
+ CHECK_ALIVE(VisitExpressions(expr->arguments()));
- if (monomorphic) {
- Handle<Map> map = types->first();
- if (TryInlineBuiltinMethodCall(expr, receiver, map, expr->check_type())) {
+ Handle<Map> map = types->length() == 1 ? types->first() : Handle<Map>();
+ if (TryInlineBuiltinMethodCall(expr, receiver, map)) {
if (FLAG_trace_inlining) {
PrintF("Inlining builtin ");
- expr->target()->ShortPrint();
+ known_function->ShortPrint();
PrintF("\n");
}
return;
}
-
- if (CallStubCompiler::HasCustomCallGenerator(expr->target()) ||
- expr->check_type() != RECEIVER_MAP_CHECK) {
- // When the target has a custom call IC generator, use the IC,
- // because it is likely to generate better code. Also use the IC
- // when a primitive receiver check is required.
- call = PreProcessCall(New<HCallNamed>(name, argument_count));
+ if (TryInlineApiMethodCall(expr, receiver, types)) return;
+
+ // Wrap the receiver if necessary.
+ if (NeedsWrappingFor(ToType(types->first()), known_function)) {
+ // Since HWrapReceiver currently cannot actually wrap numbers and
+ // strings, use the regular CallFunctionStub for method calls to wrap
+ // the receiver.
+ // TODO(verwaest): Support creation of value wrappers directly in
+ // HWrapReceiver.
+ call = New<HCallFunction>(
+ function, argument_count, WRAP_AND_CALL);
+ } else if (TryInlineCall(expr)) {
+ return;
} else {
- AddCheckConstantFunction(expr->holder(), receiver, map);
-
- if (TryInlineCall(expr)) return;
- call = PreProcessCall(
- New<HCallConstantFunction>(expr->target(), argument_count));
+ call = BuildCallConstantFunction(known_function, argument_count);
}
- } else if (types != NULL && types->length() > 1) {
- ASSERT(expr->check_type() == RECEIVER_MAP_CHECK);
- HandlePolymorphicCallNamed(expr, receiver, types, name);
- return;
} else {
- call = PreProcessCall(New<HCallNamed>(name, argument_count));
+ CHECK_ALIVE(VisitExpressions(expr->arguments()));
+ CallFunctionFlags flags = receiver->type().IsJSObject()
+ ? NO_CALL_FUNCTION_FLAGS : CALL_AS_METHOD;
+ call = New<HCallFunction>(function, argument_count, flags);
}
+ PushArgumentsFromEnvironment(argument_count);
+
} else {
VariableProxy* proxy = expr->expression()->AsVariableProxy();
if (proxy != NULL && proxy->var()->is_possibly_eval(isolate())) {
return Bailout(kPossibleDirectCallToEval);
}
- bool global_call = proxy != NULL && proxy->var()->IsUnallocated();
- if (global_call) {
+ // The function is on the stack in the unoptimized code during
+ // evaluation of the arguments.
+ CHECK_ALIVE(VisitForValue(expr->expression()));
+ HValue* function = Top();
+ if (expr->global_call()) {
Variable* var = proxy->var();
bool known_global_function = false;
// If there is a global property cell for the name at compile time and
// access check is not enabled we assume that the function will not change
// and generate optimized code for calling the function.
LookupResult lookup(isolate());
- GlobalPropertyAccess type = LookupGlobalProperty(var, &lookup, false);
+ GlobalPropertyAccess type = LookupGlobalProperty(var, &lookup, LOAD);
if (type == kUseCell &&
!current_info()->global_object()->IsAccessCheckNeeded()) {
Handle<GlobalObject> global(current_info()->global_object());
known_global_function = expr->ComputeGlobalTarget(global, &lookup);
}
if (known_global_function) {
- // Push the global object instead of the global receiver because
- // code generated by the full code generator expects it.
- HGlobalObject* global_object = Add<HGlobalObject>();
- Push(global_object);
- CHECK_ALIVE(VisitExpressions(expr->arguments()));
-
- CHECK_ALIVE(VisitForValue(expr->expression()));
- HValue* function = Pop();
Add<HCheckValue>(function, expr->target());
- // Replace the global object with the global receiver.
- HGlobalReceiver* global_receiver = Add<HGlobalReceiver>(global_object);
- // Index of the receiver from the top of the expression stack.
+ // Placeholder for the receiver.
+ Push(graph()->GetConstantUndefined());
+ CHECK_ALIVE(VisitExpressions(expr->arguments()));
+
+ // Patch the global object on the stack by the expected receiver.
+ HValue* receiver = ImplicitReceiverFor(function, expr->target());
const int receiver_index = argument_count - 1;
- ASSERT(environment()->ExpressionStackAt(receiver_index)->
- IsGlobalObject());
- environment()->SetExpressionStackAt(receiver_index, global_receiver);
+ environment()->SetExpressionStackAt(receiver_index, receiver);
- if (TryInlineBuiltinFunctionCall(expr, false)) { // Nothing to drop.
+ if (TryInlineBuiltinFunctionCall(expr)) {
if (FLAG_trace_inlining) {
PrintF("Inlining builtin ");
expr->target()->ShortPrint();
@@ -7743,41 +8950,30 @@ void HOptimizedGraphBuilder::VisitCall(Call* expr) {
}
return;
}
+ if (TryInlineApiFunctionCall(expr, receiver)) return;
+ if (TryHandleArrayCall(expr, function)) return;
if (TryInlineCall(expr)) return;
- if (expr->target().is_identical_to(current_info()->closure())) {
- graph()->MarkRecursive();
- }
-
- if (CallStubCompiler::HasCustomCallGenerator(expr->target())) {
- // When the target has a custom call IC generator, use the IC,
- // because it is likely to generate better code.
- call = PreProcessCall(New<HCallNamed>(var->name(), argument_count));
- } else {
- call = PreProcessCall(New<HCallKnownGlobal>(
- expr->target(), argument_count));
- }
+ PushArgumentsFromEnvironment(argument_count);
+ call = BuildCallConstantFunction(expr->target(), argument_count);
} else {
- HGlobalObject* receiver = Add<HGlobalObject>();
- Push(Add<HPushArgument>(receiver));
- CHECK_ALIVE(VisitArgumentList(expr->arguments()));
-
- call = New<HCallGlobal>(var->name(), argument_count);
- Drop(argument_count);
+ Push(graph()->GetConstantUndefined());
+ CHECK_ALIVE(VisitExpressions(expr->arguments()));
+ PushArgumentsFromEnvironment(argument_count);
+ call = New<HCallFunction>(function, argument_count);
}
} else if (expr->IsMonomorphic()) {
- // The function is on the stack in the unoptimized code during
- // evaluation of the arguments.
- CHECK_ALIVE(VisitForValue(expr->expression()));
- HValue* function = Top();
- HGlobalObject* global = Add<HGlobalObject>();
- HGlobalReceiver* receiver = Add<HGlobalReceiver>(global);
- Push(receiver);
- CHECK_ALIVE(VisitExpressions(expr->arguments()));
Add<HCheckValue>(function, expr->target());
- if (TryInlineBuiltinFunctionCall(expr, true)) { // Drop the function.
+ Push(graph()->GetConstantUndefined());
+ CHECK_ALIVE(VisitExpressions(expr->arguments()));
+
+ HValue* receiver = ImplicitReceiverFor(function, expr->target());
+ const int receiver_index = argument_count - 1;
+ environment()->SetExpressionStackAt(receiver_index, receiver);
+
+ if (TryInlineBuiltinFunctionCall(expr)) {
if (FLAG_trace_inlining) {
PrintF("Inlining builtin ");
expr->target()->ShortPrint();
@@ -7785,46 +8981,42 @@ void HOptimizedGraphBuilder::VisitCall(Call* expr) {
}
return;
}
+ if (TryInlineApiFunctionCall(expr, receiver)) return;
- if (TryInlineCall(expr, true)) { // Drop function from environment.
- return;
- } else {
- call = PreProcessCall(New<HInvokeFunction>(function, expr->target(),
- argument_count));
- Drop(1); // The function.
- }
+ if (TryInlineCall(expr)) return;
- } else {
- CHECK_ALIVE(VisitForValue(expr->expression()));
- HValue* function = Top();
- HGlobalObject* global_object = Add<HGlobalObject>();
- HGlobalReceiver* receiver = Add<HGlobalReceiver>(global_object);
- Push(Add<HPushArgument>(receiver));
- CHECK_ALIVE(VisitArgumentList(expr->arguments()));
+ call = PreProcessCall(New<HInvokeFunction>(
+ function, expr->target(), argument_count));
+ } else {
+ Push(graph()->GetConstantUndefined());
+ CHECK_ALIVE(VisitExpressions(expr->arguments()));
+ PushArgumentsFromEnvironment(argument_count);
call = New<HCallFunction>(function, argument_count);
- Drop(argument_count + 1);
}
}
+ Drop(1); // Drop the function.
return ast_context()->ReturnInstruction(call, expr->id());
}
-void HOptimizedGraphBuilder::BuildInlinedCallNewArray(CallNew* expr) {
+void HOptimizedGraphBuilder::BuildInlinedCallArray(
+ Expression* expression,
+ int argument_count,
+ Handle<AllocationSite> site) {
+ ASSERT(!site.is_null());
+ ASSERT(argument_count >= 0 && argument_count <= 1);
NoObservableSideEffectsScope no_effects(this);
- int argument_count = expr->arguments()->length();
// We should at least have the constructor on the expression stack.
HValue* constructor = environment()->ExpressionStackAt(argument_count);
- ElementsKind kind = expr->elements_kind();
- Handle<Cell> cell = expr->allocation_info_cell();
- AllocationSite* site = AllocationSite::cast(cell->value());
-
- // Register on the site for deoptimization if the cell value changes.
- site->AddDependentCompilationInfo(AllocationSite::TRANSITIONS, top_info());
- HInstruction* cell_instruction = Add<HConstant>(cell);
+ // Register on the site for deoptimization if the transition feedback changes.
+ AllocationSite::AddDependentCompilationInfo(
+ site, AllocationSite::TRANSITIONS, top_info());
+ ElementsKind kind = site->GetElementsKind();
+ HInstruction* site_instruction = Add<HConstant>(site);
// In the single constant argument case, we may have to adjust elements kind
// to avoid creating a packed non-empty array.
@@ -7843,35 +9035,15 @@ void HOptimizedGraphBuilder::BuildInlinedCallNewArray(CallNew* expr) {
// Build the array.
JSArrayBuilder array_builder(this,
kind,
- cell_instruction,
+ site_instruction,
constructor,
DISABLE_ALLOCATION_SITES);
- HValue* new_object;
- if (argument_count == 0) {
- new_object = array_builder.AllocateEmptyArray();
- } else if (argument_count == 1) {
- HValue* argument = environment()->Top();
- new_object = BuildAllocateArrayFromLength(&array_builder, argument);
- } else {
- HValue* length = Add<HConstant>(argument_count);
- // Smi arrays need to initialize array elements with the hole because
- // bailout could occur if the arguments don't fit in a smi.
- //
- // TODO(mvstanton): If all the arguments are constants in smi range, then
- // we could set fill_with_hole to false and save a few instructions.
- JSArrayBuilder::FillMode fill_mode = IsFastSmiElementsKind(kind)
- ? JSArrayBuilder::FILL_WITH_HOLE
- : JSArrayBuilder::DONT_FILL_WITH_HOLE;
- new_object = array_builder.AllocateArray(length, length, fill_mode);
- HValue* elements = array_builder.GetElementsLocation();
- for (int i = 0; i < argument_count; i++) {
- HValue* value = environment()->ExpressionStackAt(argument_count - i - 1);
- HValue* constant_i = Add<HConstant>(i);
- Add<HStoreKeyed>(elements, constant_i, value, kind);
- }
- }
-
- Drop(argument_count + 1); // drop constructor and args.
+ HValue* new_object = argument_count == 0
+ ? array_builder.AllocateEmptyArray()
+ : BuildAllocateArrayFromLength(&array_builder, Top());
+
+ int args_to_drop = argument_count + (expression->IsCall() ? 2 : 1);
+ Drop(args_to_drop);
ast_context()->ReturnValue(new_object);
}
@@ -7885,38 +9057,40 @@ static bool IsAllocationInlineable(Handle<JSFunction> constructor) {
}
-bool HOptimizedGraphBuilder::IsCallNewArrayInlineable(CallNew* expr) {
- bool inline_ok = false;
+bool HOptimizedGraphBuilder::IsCallArrayInlineable(
+ int argument_count,
+ Handle<AllocationSite> site) {
Handle<JSFunction> caller = current_info()->closure();
- Handle<JSFunction> target(isolate()->global_context()->array_function(),
- isolate());
- int argument_count = expr->arguments()->length();
+ Handle<JSFunction> target = array_function();
// We should have the function plus array arguments on the environment stack.
ASSERT(environment()->length() >= (argument_count + 1));
- Handle<Cell> cell = expr->allocation_info_cell();
- AllocationSite* site = AllocationSite::cast(cell->value());
+ ASSERT(!site.is_null());
+
+ bool inline_ok = false;
if (site->CanInlineCall()) {
// We also want to avoid inlining in certain 1 argument scenarios.
if (argument_count == 1) {
HValue* argument = Top();
if (argument->IsConstant()) {
// Do not inline if the constant length argument is not a smi or
- // outside the valid range for a fast array.
+ // outside the valid range for unrolled loop initialization.
HConstant* constant_argument = HConstant::cast(argument);
if (constant_argument->HasSmiValue()) {
int value = constant_argument->Integer32Value();
- inline_ok = value >= 0 &&
- value < JSObject::kInitialMaxFastElementArray;
+ inline_ok = value >= 0 && value <= kElementLoopUnrollThreshold;
if (!inline_ok) {
TraceInline(target, caller,
- "Length outside of valid array range");
+ "Constant length outside of valid inlining range.");
}
}
} else {
- inline_ok = true;
+ TraceInline(target, caller,
+ "Dont inline [new] Array(n) where n isn't constant.");
}
- } else {
+ } else if (argument_count == 0) {
inline_ok = true;
+ } else {
+ TraceInline(target, caller, "Too many arguments to inline.");
}
} else {
TraceInline(target, caller, "AllocationSite requested no inlining.");
@@ -7933,7 +9107,7 @@ void HOptimizedGraphBuilder::VisitCallNew(CallNew* expr) {
ASSERT(!HasStackOverflow());
ASSERT(current_block() != NULL);
ASSERT(current_block()->HasPredecessor());
- if (!FLAG_emit_opt_code_positions) SetSourcePosition(expr->position());
+ if (!FLAG_hydrogen_track_positions) SetSourcePosition(expr->position());
int argument_count = expr->arguments()->length() + 1; // Plus constructor.
Factory* factory = isolate()->factory();
@@ -7951,8 +9125,8 @@ void HOptimizedGraphBuilder::VisitCallNew(CallNew* expr) {
// Force completion of inobject slack tracking before generating
// allocation code to finalize instance size.
- if (constructor->shared()->IsInobjectSlackTrackingInProgress()) {
- constructor->shared()->CompleteInobjectSlackTracking();
+ if (constructor->IsInobjectSlackTrackingInProgress()) {
+ constructor->CompleteInobjectSlackTracking();
}
// Calculate instance size from initial map of constructor.
@@ -7963,40 +9137,44 @@ void HOptimizedGraphBuilder::VisitCallNew(CallNew* expr) {
// Allocate an instance of the implicit receiver object.
HValue* size_in_bytes = Add<HConstant>(instance_size);
- PretenureFlag pretenure_flag =
- (FLAG_pretenuring_call_new &&
- isolate()->heap()->GetPretenureMode() == TENURED)
- ? TENURED : NOT_TENURED;
- HAllocate* receiver =
- Add<HAllocate>(size_in_bytes, HType::JSObject(), pretenure_flag,
- JS_OBJECT_TYPE);
- receiver->set_known_initial_map(initial_map);
+ HAllocationMode allocation_mode;
+ if (FLAG_pretenuring_call_new) {
+ if (FLAG_allocation_site_pretenuring) {
+ // Try to use pretenuring feedback.
+ Handle<AllocationSite> allocation_site = expr->allocation_site();
+ allocation_mode = HAllocationMode(allocation_site);
+ // Take a dependency on allocation site.
+ AllocationSite::AddDependentCompilationInfo(allocation_site,
+ AllocationSite::TENURING,
+ top_info());
+ }
+ }
- // Load the initial map from the constructor.
- HValue* constructor_value = Add<HConstant>(constructor);
- HValue* initial_map_value =
- Add<HLoadNamedField>(constructor_value, HObjectAccess::ForJSObjectOffset(
- JSFunction::kPrototypeOrInitialMapOffset));
+ HAllocate* receiver = BuildAllocate(
+ size_in_bytes, HType::JSObject(), JS_OBJECT_TYPE, allocation_mode);
+ receiver->set_known_initial_map(initial_map);
// Initialize map and fields of the newly allocated object.
{ NoObservableSideEffectsScope no_effects(this);
ASSERT(initial_map->instance_type() == JS_OBJECT_TYPE);
Add<HStoreNamedField>(receiver,
- HObjectAccess::ForJSObjectOffset(JSObject::kMapOffset),
- initial_map_value);
+ HObjectAccess::ForMapAndOffset(initial_map, JSObject::kMapOffset),
+ Add<HConstant>(initial_map));
HValue* empty_fixed_array = Add<HConstant>(factory->empty_fixed_array());
Add<HStoreNamedField>(receiver,
- HObjectAccess::ForJSObjectOffset(JSObject::kPropertiesOffset),
+ HObjectAccess::ForMapAndOffset(initial_map,
+ JSObject::kPropertiesOffset),
empty_fixed_array);
Add<HStoreNamedField>(receiver,
- HObjectAccess::ForJSObjectOffset(JSObject::kElementsOffset),
+ HObjectAccess::ForMapAndOffset(initial_map,
+ JSObject::kElementsOffset),
empty_fixed_array);
if (initial_map->inobject_properties() != 0) {
HConstant* undefined = graph()->GetConstantUndefined();
for (int i = 0; i < initial_map->inobject_properties(); i++) {
- int property_offset = JSObject::kHeaderSize + i * kPointerSize;
+ int property_offset = initial_map->GetInObjectPropertyOffset(i);
Add<HStoreNamedField>(receiver,
- HObjectAccess::ForJSObjectOffset(property_offset),
+ HObjectAccess::ForMapAndOffset(initial_map, property_offset),
undefined);
}
}
@@ -8008,21 +9186,25 @@ void HOptimizedGraphBuilder::VisitCallNew(CallNew* expr) {
ASSERT(environment()->ExpressionStackAt(receiver_index) == function);
environment()->SetExpressionStackAt(receiver_index, receiver);
- if (TryInlineConstruct(expr, receiver)) return;
+ if (TryInlineConstruct(expr, receiver)) {
+ // Inlining worked, add a dependency on the initial map to make sure that
+ // this code is deoptimized whenever the initial map of the constructor
+ // changes.
+ Map::AddDependentCompilationInfo(
+ initial_map, DependentCode::kInitialMapChangedGroup, top_info());
+ return;
+ }
// TODO(mstarzinger): For now we remove the previous HAllocate and all
- // corresponding instructions and instead add HPushArgument for the
+ // corresponding instructions and instead add HPushArguments for the
// arguments in case inlining failed. What we actually should do is for
// inlining to try to build a subgraph without mutating the parent graph.
HInstruction* instr = current_block()->last();
- while (instr != initial_map_value) {
+ do {
HInstruction* prev_instr = instr->previous();
instr->DeleteAndReplaceWith(NULL);
instr = prev_instr;
- }
- initial_map_value->DeleteAndReplaceWith(NULL);
- receiver->DeleteAndReplaceWith(NULL);
- check->DeleteAndReplaceWith(NULL);
+ } while (instr != check);
environment()->SetExpressionStackAt(receiver_index, function);
HInstruction* call =
PreProcessCall(New<HCallNew>(function, argument_count));
@@ -8030,26 +9212,10 @@ void HOptimizedGraphBuilder::VisitCallNew(CallNew* expr) {
} else {
// The constructor function is both an operand to the instruction and an
// argument to the construct call.
- Handle<JSFunction> array_function(
- isolate()->global_context()->array_function(), isolate());
- bool use_call_new_array = expr->target().is_identical_to(array_function);
- Handle<Cell> cell = expr->allocation_info_cell();
- if (use_call_new_array && IsCallNewArrayInlineable(expr)) {
- // Verify we are still calling the array function for our native context.
- Add<HCheckValue>(function, array_function);
- BuildInlinedCallNewArray(expr);
- return;
- }
+ if (TryHandleArrayCallNew(expr, function)) return;
- HBinaryCall* call;
- if (use_call_new_array) {
- Add<HCheckValue>(function, array_function);
- call = New<HCallNewArray>(function, argument_count, cell,
- expr->elements_kind());
- } else {
- call = New<HCallNew>(function, argument_count);
- }
- PreProcessCall(call);
+ HInstruction* call =
+ PreProcessCall(New<HCallNew>(function, argument_count));
return ast_context()->ReturnInstruction(call, expr->id());
}
}
@@ -8066,7 +9232,7 @@ void HOptimizedGraphBuilder::VisitCallNew(CallNew* expr) {
const HOptimizedGraphBuilder::InlineFunctionGenerator
HOptimizedGraphBuilder::kInlineFunctionGenerators[] = {
INLINE_FUNCTION_LIST(INLINE_FUNCTION_GENERATOR_ADDRESS)
- INLINE_RUNTIME_FUNCTION_LIST(INLINE_FUNCTION_GENERATOR_ADDRESS)
+ INLINE_OPTIMIZED_FUNCTION_LIST(INLINE_FUNCTION_GENERATOR_ADDRESS)
};
#undef INLINE_FUNCTION_GENERATOR_ADDRESS
@@ -8082,15 +9248,12 @@ void HGraphBuilder::BuildArrayBufferViewInitialization(
offset < ViewClass::kSizeWithInternalFields;
offset += kPointerSize) {
Add<HStoreNamedField>(obj,
- HObjectAccess::ForJSObjectOffset(offset),
- Add<HConstant>(static_cast<int32_t>(0)));
+ HObjectAccess::ForObservableJSObjectOffset(offset),
+ graph()->GetConstant0());
}
Add<HStoreNamedField>(
obj,
- HObjectAccess::ForJSArrayBufferViewBuffer(), buffer);
- Add<HStoreNamedField>(
- obj,
HObjectAccess::ForJSArrayBufferViewByteOffset(),
byte_offset);
Add<HStoreNamedField>(
@@ -8098,20 +9261,34 @@ void HGraphBuilder::BuildArrayBufferViewInitialization(
HObjectAccess::ForJSArrayBufferViewByteLength(),
byte_length);
- HObjectAccess weak_first_view_access =
- HObjectAccess::ForJSArrayBufferWeakFirstView();
- Add<HStoreNamedField>(obj,
- HObjectAccess::ForJSArrayBufferViewWeakNext(),
- Add<HLoadNamedField>(buffer, weak_first_view_access));
- Add<HStoreNamedField>(buffer, weak_first_view_access, obj);
+ if (buffer != NULL) {
+ Add<HStoreNamedField>(
+ obj,
+ HObjectAccess::ForJSArrayBufferViewBuffer(), buffer);
+ HObjectAccess weak_first_view_access =
+ HObjectAccess::ForJSArrayBufferWeakFirstView();
+ Add<HStoreNamedField>(obj,
+ HObjectAccess::ForJSArrayBufferViewWeakNext(),
+ Add<HLoadNamedField>(buffer,
+ static_cast<HValue*>(NULL),
+ weak_first_view_access));
+ Add<HStoreNamedField>(buffer, weak_first_view_access, obj);
+ } else {
+ Add<HStoreNamedField>(
+ obj,
+ HObjectAccess::ForJSArrayBufferViewBuffer(),
+ Add<HConstant>(static_cast<int32_t>(0)));
+ Add<HStoreNamedField>(obj,
+ HObjectAccess::ForJSArrayBufferViewWeakNext(),
+ graph()->GetConstantUndefined());
+ }
}
-void HOptimizedGraphBuilder::VisitDataViewInitialize(
+void HOptimizedGraphBuilder::GenerateDataViewInitialize(
CallRuntime* expr) {
ZoneList<Expression*>* arguments = expr->arguments();
- NoObservableSideEffectsScope scope(this);
ASSERT(arguments->length()== 4);
CHECK_ALIVE(VisitForValue(arguments->at(0)));
HValue* obj = Pop();
@@ -8125,16 +9302,131 @@ void HOptimizedGraphBuilder::VisitDataViewInitialize(
CHECK_ALIVE(VisitForValue(arguments->at(3)));
HValue* byte_length = Pop();
- BuildArrayBufferViewInitialization<JSDataView>(
- obj, buffer, byte_offset, byte_length);
+ {
+ NoObservableSideEffectsScope scope(this);
+ BuildArrayBufferViewInitialization<JSDataView>(
+ obj, buffer, byte_offset, byte_length);
+ }
}
-void HOptimizedGraphBuilder::VisitTypedArrayInitialize(
+static Handle<Map> TypedArrayMap(Isolate* isolate,
+ ExternalArrayType array_type,
+ ElementsKind target_kind) {
+ Handle<Context> native_context = isolate->native_context();
+ Handle<JSFunction> fun;
+ switch (array_type) {
+#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype, size) \
+ case kExternal##Type##Array: \
+ fun = Handle<JSFunction>(native_context->type##_array_fun()); \
+ break;
+
+ TYPED_ARRAYS(TYPED_ARRAY_CASE)
+#undef TYPED_ARRAY_CASE
+ }
+ Handle<Map> map(fun->initial_map());
+ return Map::AsElementsKind(map, target_kind);
+}
+
+
+HValue* HOptimizedGraphBuilder::BuildAllocateExternalElements(
+ ExternalArrayType array_type,
+ bool is_zero_byte_offset,
+ HValue* buffer, HValue* byte_offset, HValue* length) {
+ Handle<Map> external_array_map(
+ isolate()->heap()->MapForExternalArrayType(array_type));
+
+ // The HForceRepresentation is to prevent possible deopt on int-smi
+ // conversion after allocation but before the new object fields are set.
+ length = AddUncasted<HForceRepresentation>(length, Representation::Smi());
+ HValue* elements =
+ Add<HAllocate>(
+ Add<HConstant>(ExternalArray::kAlignedSize),
+ HType::HeapObject(),
+ NOT_TENURED,
+ external_array_map->instance_type());
+
+ AddStoreMapConstant(elements, external_array_map);
+ Add<HStoreNamedField>(elements,
+ HObjectAccess::ForFixedArrayLength(), length);
+
+ HValue* backing_store = Add<HLoadNamedField>(
+ buffer, static_cast<HValue*>(NULL),
+ HObjectAccess::ForJSArrayBufferBackingStore());
+
+ HValue* typed_array_start;
+ if (is_zero_byte_offset) {
+ typed_array_start = backing_store;
+ } else {
+ HInstruction* external_pointer =
+ AddUncasted<HAdd>(backing_store, byte_offset);
+ // Arguments are checked prior to call to TypedArrayInitialize,
+ // including byte_offset.
+ external_pointer->ClearFlag(HValue::kCanOverflow);
+ typed_array_start = external_pointer;
+ }
+
+ Add<HStoreNamedField>(elements,
+ HObjectAccess::ForExternalArrayExternalPointer(),
+ typed_array_start);
+
+ return elements;
+}
+
+
+HValue* HOptimizedGraphBuilder::BuildAllocateFixedTypedArray(
+ ExternalArrayType array_type, size_t element_size,
+ ElementsKind fixed_elements_kind,
+ HValue* byte_length, HValue* length) {
+ STATIC_ASSERT(
+ (FixedTypedArrayBase::kHeaderSize & kObjectAlignmentMask) == 0);
+ HValue* total_size;
+
+ // if fixed array's elements are not aligned to object's alignment,
+ // we need to align the whole array to object alignment.
+ if (element_size % kObjectAlignment != 0) {
+ total_size = BuildObjectSizeAlignment(
+ byte_length, FixedTypedArrayBase::kHeaderSize);
+ } else {
+ total_size = AddUncasted<HAdd>(byte_length,
+ Add<HConstant>(FixedTypedArrayBase::kHeaderSize));
+ total_size->ClearFlag(HValue::kCanOverflow);
+ }
+
+ // The HForceRepresentation is to prevent possible deopt on int-smi
+ // conversion after allocation but before the new object fields are set.
+ length = AddUncasted<HForceRepresentation>(length, Representation::Smi());
+ Handle<Map> fixed_typed_array_map(
+ isolate()->heap()->MapForFixedTypedArray(array_type));
+ HValue* elements =
+ Add<HAllocate>(total_size, HType::HeapObject(),
+ NOT_TENURED, fixed_typed_array_map->instance_type());
+ AddStoreMapConstant(elements, fixed_typed_array_map);
+
+ Add<HStoreNamedField>(elements,
+ HObjectAccess::ForFixedArrayLength(),
+ length);
+
+ HValue* filler = Add<HConstant>(static_cast<int32_t>(0));
+
+ {
+ LoopBuilder builder(this, context(), LoopBuilder::kPostIncrement);
+
+ HValue* key = builder.BeginBody(
+ Add<HConstant>(static_cast<int32_t>(0)),
+ length, Token::LT);
+ Add<HStoreKeyed>(elements, key, filler, fixed_elements_kind);
+
+ builder.EndBody();
+ }
+ return elements;
+}
+
+
+void HOptimizedGraphBuilder::GenerateTypedArrayInitialize(
CallRuntime* expr) {
ZoneList<Expression*>* arguments = expr->arguments();
- NoObservableSideEffectsScope scope(this);
static const int kObjectArg = 0;
static const int kArrayIdArg = 1;
static const int kBufferArg = 2;
@@ -8147,19 +9439,34 @@ void HOptimizedGraphBuilder::VisitTypedArrayInitialize(
CHECK_ALIVE(VisitForValue(arguments->at(kObjectArg)));
HValue* obj = Pop();
- ASSERT(arguments->at(kArrayIdArg)->node_type() == AstNode::kLiteral);
+ if (arguments->at(kArrayIdArg)->IsLiteral()) {
+ // This should never happen in real use, but can happen when fuzzing.
+ // Just bail out.
+ Bailout(kNeedSmiLiteral);
+ return;
+ }
Handle<Object> value =
static_cast<Literal*>(arguments->at(kArrayIdArg))->value();
- ASSERT(value->IsSmi());
+ if (!value->IsSmi()) {
+ // This should never happen in real use, but can happen when fuzzing.
+ // Just bail out.
+ Bailout(kNeedSmiLiteral);
+ return;
+ }
int array_id = Smi::cast(*value)->value();
- CHECK_ALIVE(VisitForValue(arguments->at(kBufferArg)));
- HValue* buffer = Pop();
+ HValue* buffer;
+ if (!arguments->at(kBufferArg)->IsNullLiteral()) {
+ CHECK_ALIVE(VisitForValue(arguments->at(kBufferArg)));
+ buffer = Pop();
+ } else {
+ buffer = NULL;
+ }
HValue* byte_offset;
bool is_zero_byte_offset;
- if (arguments->at(kByteOffsetArg)->node_type() == AstNode::kLiteral
+ if (arguments->at(kByteOffsetArg)->IsLiteral()
&& Smi::FromInt(0) ==
*static_cast<Literal*>(arguments->at(kByteOffsetArg))->value()) {
byte_offset = Add<HConstant>(static_cast<int32_t>(0));
@@ -8168,11 +9475,13 @@ void HOptimizedGraphBuilder::VisitTypedArrayInitialize(
CHECK_ALIVE(VisitForValue(arguments->at(kByteOffsetArg)));
byte_offset = Pop();
is_zero_byte_offset = false;
+ ASSERT(buffer != NULL);
}
CHECK_ALIVE(VisitForValue(arguments->at(kByteLengthArg)));
HValue* byte_length = Pop();
+ NoObservableSideEffectsScope scope(this);
IfBuilder byte_offset_smi(this);
if (!is_zero_byte_offset) {
@@ -8180,13 +9489,24 @@ void HOptimizedGraphBuilder::VisitTypedArrayInitialize(
byte_offset_smi.Then();
}
+ ExternalArrayType array_type =
+ kExternalInt8Array; // Bogus initialization.
+ size_t element_size = 1; // Bogus initialization.
+ ElementsKind external_elements_kind = // Bogus initialization.
+ EXTERNAL_INT8_ELEMENTS;
+ ElementsKind fixed_elements_kind = // Bogus initialization.
+ INT8_ELEMENTS;
+ Runtime::ArrayIdToTypeAndSize(array_id,
+ &array_type,
+ &external_elements_kind,
+ &fixed_elements_kind,
+ &element_size);
+
+
{ // byte_offset is Smi.
BuildArrayBufferViewInitialization<JSTypedArray>(
obj, buffer, byte_offset, byte_length);
- ExternalArrayType array_type = kExternalByteArray; // Bogus initialization.
- size_t element_size = 1; // Bogus initialization.
- Runtime::ArrayIdToTypeAndSize(array_id, &array_type, &element_size);
HInstruction* length = AddUncasted<HDiv>(byte_length,
Add<HConstant>(static_cast<int32_t>(element_size)));
@@ -8195,40 +9515,19 @@ void HOptimizedGraphBuilder::VisitTypedArrayInitialize(
HObjectAccess::ForJSTypedArrayLength(),
length);
- HValue* elements =
- Add<HAllocate>(
- Add<HConstant>(ExternalArray::kAlignedSize),
- HType::JSArray(),
- NOT_TENURED,
- static_cast<InstanceType>(FIRST_EXTERNAL_ARRAY_TYPE + array_type));
-
- Handle<Map> external_array_map(
- isolate()->heap()->MapForExternalArrayType(array_type));
- Add<HStoreNamedField>(elements,
- HObjectAccess::ForMap(),
- Add<HConstant>(external_array_map));
-
- HValue* backing_store = Add<HLoadNamedField>(
- buffer, HObjectAccess::ForJSArrayBufferBackingStore());
-
- HValue* typed_array_start;
- if (is_zero_byte_offset) {
- typed_array_start = backing_store;
+ HValue* elements;
+ if (buffer != NULL) {
+ elements = BuildAllocateExternalElements(
+ array_type, is_zero_byte_offset, buffer, byte_offset, length);
+ Handle<Map> obj_map = TypedArrayMap(
+ isolate(), array_type, external_elements_kind);
+ AddStoreMapConstant(obj, obj_map);
} else {
- HInstruction* external_pointer =
- AddUncasted<HAdd>(backing_store, byte_offset);
- // Arguments are checked prior to call to TypedArrayInitialize,
- // including byte_offset.
- external_pointer->ClearFlag(HValue::kCanOverflow);
- typed_array_start = external_pointer;
- }
-
- Add<HStoreNamedField>(elements,
- HObjectAccess::ForExternalArrayExternalPointer(),
- typed_array_start);
- Add<HStoreNamedField>(elements,
- HObjectAccess::ForFixedArrayLength(),
- length);
+ ASSERT(is_zero_byte_offset);
+ elements = BuildAllocateFixedTypedArray(
+ array_type, element_size, fixed_elements_kind,
+ byte_length, length);
+ }
Add<HStoreNamedField>(
obj, HObjectAccess::ForElementsPointer(), elements);
}
@@ -8236,19 +9535,87 @@ void HOptimizedGraphBuilder::VisitTypedArrayInitialize(
if (!is_zero_byte_offset) {
byte_offset_smi.Else();
{ // byte_offset is not Smi.
- Push(Add<HPushArgument>(obj));
- VisitArgument(arguments->at(kArrayIdArg));
- Push(Add<HPushArgument>(buffer));
- Push(Add<HPushArgument>(byte_offset));
- Push(Add<HPushArgument>(byte_length));
+ Push(obj);
+ CHECK_ALIVE(VisitForValue(arguments->at(kArrayIdArg)));
+ Push(buffer);
+ Push(byte_offset);
+ Push(byte_length);
+ PushArgumentsFromEnvironment(kArgsLength);
Add<HCallRuntime>(expr->name(), expr->function(), kArgsLength);
- Drop(kArgsLength);
}
}
byte_offset_smi.End();
}
+void HOptimizedGraphBuilder::GenerateMaxSmi(CallRuntime* expr) {
+ ASSERT(expr->arguments()->length() == 0);
+ HConstant* max_smi = New<HConstant>(static_cast<int32_t>(Smi::kMaxValue));
+ return ast_context()->ReturnInstruction(max_smi, expr->id());
+}
+
+
+void HOptimizedGraphBuilder::GenerateTypedArrayMaxSizeInHeap(
+ CallRuntime* expr) {
+ ASSERT(expr->arguments()->length() == 0);
+ HConstant* result = New<HConstant>(static_cast<int32_t>(
+ FLAG_typed_array_max_size_in_heap));
+ return ast_context()->ReturnInstruction(result, expr->id());
+}
+
+
+void HOptimizedGraphBuilder::GenerateArrayBufferGetByteLength(
+ CallRuntime* expr) {
+ ASSERT(expr->arguments()->length() == 1);
+ CHECK_ALIVE(VisitForValue(expr->arguments()->at(0)));
+ HValue* buffer = Pop();
+ HInstruction* result = New<HLoadNamedField>(
+ buffer,
+ static_cast<HValue*>(NULL),
+ HObjectAccess::ForJSArrayBufferByteLength());
+ return ast_context()->ReturnInstruction(result, expr->id());
+}
+
+
+void HOptimizedGraphBuilder::GenerateArrayBufferViewGetByteLength(
+ CallRuntime* expr) {
+ ASSERT(expr->arguments()->length() == 1);
+ CHECK_ALIVE(VisitForValue(expr->arguments()->at(0)));
+ HValue* buffer = Pop();
+ HInstruction* result = New<HLoadNamedField>(
+ buffer,
+ static_cast<HValue*>(NULL),
+ HObjectAccess::ForJSArrayBufferViewByteLength());
+ return ast_context()->ReturnInstruction(result, expr->id());
+}
+
+
+void HOptimizedGraphBuilder::GenerateArrayBufferViewGetByteOffset(
+ CallRuntime* expr) {
+ ASSERT(expr->arguments()->length() == 1);
+ CHECK_ALIVE(VisitForValue(expr->arguments()->at(0)));
+ HValue* buffer = Pop();
+ HInstruction* result = New<HLoadNamedField>(
+ buffer,
+ static_cast<HValue*>(NULL),
+ HObjectAccess::ForJSArrayBufferViewByteOffset());
+ return ast_context()->ReturnInstruction(result, expr->id());
+}
+
+
+void HOptimizedGraphBuilder::GenerateTypedArrayGetLength(
+ CallRuntime* expr) {
+ ASSERT(expr->arguments()->length() == 1);
+ CHECK_ALIVE(VisitForValue(expr->arguments()->at(0)));
+ HValue* buffer = Pop();
+ HInstruction* result = New<HLoadNamedField>(
+ buffer,
+ static_cast<HValue*>(NULL),
+ HObjectAccess::ForJSTypedArrayLength());
+ return ast_context()->ReturnInstruction(result, expr->id());
+}
+
+
void HOptimizedGraphBuilder::VisitCallRuntime(CallRuntime* expr) {
ASSERT(!HasStackOverflow());
ASSERT(current_block() != NULL);
@@ -8260,21 +9627,8 @@ void HOptimizedGraphBuilder::VisitCallRuntime(CallRuntime* expr) {
const Runtime::Function* function = expr->function();
ASSERT(function != NULL);
- if (function->function_id == Runtime::kDataViewInitialize) {
- return VisitDataViewInitialize(expr);
- }
-
- if (function->function_id == Runtime::kTypedArrayInitialize) {
- return VisitTypedArrayInitialize(expr);
- }
-
- if (function->function_id == Runtime::kMaxSmi) {
- ASSERT(expr->arguments()->length() == 0);
- HConstant* max_smi = New<HConstant>(static_cast<int32_t>(Smi::kMaxValue));
- return ast_context()->ReturnInstruction(max_smi, expr->id());
- }
-
- if (function->intrinsic_type == Runtime::INLINE) {
+ if (function->intrinsic_type == Runtime::INLINE ||
+ function->intrinsic_type == Runtime::INLINE_OPTIMIZED) {
ASSERT(expr->name()->length() > 0);
ASSERT(expr->name()->Get(0) == '_');
// Call to an inline function.
@@ -8289,13 +9643,12 @@ void HOptimizedGraphBuilder::VisitCallRuntime(CallRuntime* expr) {
(this->*generator)(expr);
} else {
ASSERT(function->intrinsic_type == Runtime::RUNTIME);
- CHECK_ALIVE(VisitArgumentList(expr->arguments()));
-
Handle<String> name = expr->name();
int argument_count = expr->arguments()->length();
+ CHECK_ALIVE(VisitExpressions(expr->arguments()));
+ PushArgumentsFromEnvironment(argument_count);
HCallRuntime* call = New<HCallRuntime>(name, function,
argument_count);
- Drop(argument_count);
return ast_context()->ReturnInstruction(call, expr->id());
}
}
@@ -8324,9 +9677,7 @@ void HOptimizedGraphBuilder::VisitDelete(UnaryOperation* expr) {
HValue* key = Pop();
HValue* obj = Pop();
HValue* function = AddLoadJSBuiltin(Builtins::DELETE);
- Add<HPushArgument>(obj);
- Add<HPushArgument>(key);
- Add<HPushArgument>(Add<HConstant>(function_strict_mode_flag()));
+ Add<HPushArguments>(obj, key, Add<HConstant>(function_strict_mode()));
// TODO(olivf) InvokeFunction produces a check for the parameter count,
// even though we are certain to pass the correct number of arguments here.
HInstruction* instr = New<HInvokeFunction>(function, 3);
@@ -8417,8 +9768,7 @@ HInstruction* HOptimizedGraphBuilder::BuildIncrement(
bool returns_original_input,
CountOperation* expr) {
// The input to the count operation is on top of the expression stack.
- Handle<Type> info = expr->type();
- Representation rep = Representation::FromType(info);
+ Representation rep = Representation::FromType(expr->type());
if (rep.IsNone() || rep.IsTagged()) {
rep = Representation::Smi();
}
@@ -8473,7 +9823,7 @@ void HOptimizedGraphBuilder::VisitCountOperation(CountOperation* expr) {
ASSERT(!HasStackOverflow());
ASSERT(current_block() != NULL);
ASSERT(current_block()->HasPredecessor());
- if (!FLAG_emit_opt_code_positions) SetSourcePosition(expr->position());
+ if (!FLAG_hydrogen_track_positions) SetSourcePosition(expr->position());
Expression* target = expr->expression();
VariableProxy* proxy = target->AsVariableProxy();
Property* prop = target->AsProperty();
@@ -8491,7 +9841,7 @@ void HOptimizedGraphBuilder::VisitCountOperation(CountOperation* expr) {
if (proxy != NULL) {
Variable* var = proxy->var();
- if (var->mode() == CONST) {
+ if (var->mode() == CONST_LEGACY) {
return Bailout(kUnsupportedCountOperationWithConst);
}
// Argument of the count operation is a variable, not a property.
@@ -8597,13 +9947,9 @@ HInstruction* HOptimizedGraphBuilder::BuildStringCharCodeAt(
return New<HConstant>(s->Get(i));
}
}
- BuildCheckHeapObject(string);
- HValue* checkstring =
- Add<HCheckInstanceType>(string, HCheckInstanceType::IS_STRING);
- HInstruction* length = BuildLoadStringLength(string, checkstring);
- AddInstruction(length);
- HInstruction* checked_index = Add<HBoundsCheck>(index, length);
- return New<HStringCharCodeAt>(string, checked_index);
+ string = BuildCheckString(string);
+ index = Add<HBoundsCheck>(index, AddLoadStringLength(string));
+ return New<HStringCharCodeAt>(string, index);
}
@@ -8619,13 +9965,7 @@ static bool ShiftAmountsAllowReplaceByRotate(HValue* sa,
}
if (!const32_minus_sa->IsSub()) return false;
HSub* sub = HSub::cast(const32_minus_sa);
- if (sa != sub->right()) return false;
- HValue* const32 = sub->left();
- if (!const32->IsConstant() ||
- HConstant::cast(const32)->Integer32Value() != 32) {
- return false;
- }
- return (sub->right() == sa);
+ return sub->left()->EqualsInteger32Constant(32) && sub->right() == sa;
}
@@ -8673,8 +10013,8 @@ bool CanBeZero(HValue* right) {
HValue* HGraphBuilder::EnforceNumberType(HValue* number,
- Handle<Type> expected) {
- if (expected->Is(Type::Smi())) {
+ Type* expected) {
+ if (expected->Is(Type::SignedSmall())) {
return AddUncasted<HForceRepresentation>(number, Representation::Smi());
}
if (expected->Is(Type::Signed32())) {
@@ -8685,12 +10025,12 @@ HValue* HGraphBuilder::EnforceNumberType(HValue* number,
}
-HValue* HGraphBuilder::TruncateToNumber(HValue* value, Handle<Type>* expected) {
+HValue* HGraphBuilder::TruncateToNumber(HValue* value, Type** expected) {
if (value->IsConstant()) {
HConstant* constant = HConstant::cast(value);
Maybe<HConstant*> number = constant->CopyToTruncatedNumber(zone());
if (number.has_value) {
- *expected = handle(Type::Number(), isolate());
+ *expected = Type::Number(zone());
return AddInstruction(number.value);
}
}
@@ -8700,25 +10040,24 @@ HValue* HGraphBuilder::TruncateToNumber(HValue* value, Handle<Type>* expected) {
// pushes with a NoObservableSideEffectsScope.
NoObservableSideEffectsScope no_effects(this);
- Handle<Type> expected_type = *expected;
+ Type* expected_type = *expected;
// Separate the number type from the rest.
- Handle<Type> expected_obj = handle(Type::Intersect(
- expected_type, handle(Type::NonNumber(), isolate())), isolate());
- Handle<Type> expected_number = handle(Type::Intersect(
- expected_type, handle(Type::Number(), isolate())), isolate());
+ Type* expected_obj =
+ Type::Intersect(expected_type, Type::NonNumber(zone()), zone());
+ Type* expected_number =
+ Type::Intersect(expected_type, Type::Number(zone()), zone());
// We expect to get a number.
// (We need to check first, since Type::None->Is(Type::Any()) == true.
if (expected_obj->Is(Type::None())) {
- ASSERT(!expected_number->Is(Type::None()));
+ ASSERT(!expected_number->Is(Type::None(zone())));
return value;
}
- if (expected_obj->Is(Type::Undefined())) {
+ if (expected_obj->Is(Type::Undefined(zone()))) {
// This is already done by HChange.
- *expected = handle(Type::Union(
- expected_number, handle(Type::Double(), isolate())), isolate());
+ *expected = Type::Union(expected_number, Type::Number(zone()), zone());
return value;
}
@@ -8729,22 +10068,33 @@ HValue* HGraphBuilder::TruncateToNumber(HValue* value, Handle<Type>* expected) {
HValue* HOptimizedGraphBuilder::BuildBinaryOperation(
BinaryOperation* expr,
HValue* left,
- HValue* right) {
- Handle<Type> left_type = expr->left()->bounds().lower;
- Handle<Type> right_type = expr->right()->bounds().lower;
- Handle<Type> result_type = expr->bounds().lower;
+ HValue* right,
+ PushBeforeSimulateBehavior push_sim_result) {
+ Type* left_type = expr->left()->bounds().lower;
+ Type* right_type = expr->right()->bounds().lower;
+ Type* result_type = expr->bounds().lower;
Maybe<int> fixed_right_arg = expr->fixed_right_arg();
+ Handle<AllocationSite> allocation_site = expr->allocation_site();
+
+ HAllocationMode allocation_mode;
+ if (FLAG_allocation_site_pretenuring && !allocation_site.is_null()) {
+ allocation_mode = HAllocationMode(allocation_site);
+ }
HValue* result = HGraphBuilder::BuildBinaryOperation(
- expr->op(), left, right, left_type, right_type,
- result_type, fixed_right_arg);
+ expr->op(), left, right, left_type, right_type, result_type,
+ fixed_right_arg, allocation_mode);
// Add a simulate after instructions with observable side effects, and
// after phis, which are the result of BuildBinaryOperation when we
// inlined some complex subgraph.
if (result->HasObservableSideEffects() || result->IsPhi()) {
- Push(result);
- Add<HSimulate>(expr->id(), REMOVABLE_SIMULATE);
- Drop(1);
+ if (push_sim_result == PUSH_BEFORE_SIMULATE) {
+ Push(result);
+ Add<HSimulate>(expr->id(), REMOVABLE_SIMULATE);
+ Drop(1);
+ } else {
+ Add<HSimulate>(expr->id(), REMOVABLE_SIMULATE);
+ }
}
return result;
}
@@ -8754,10 +10104,11 @@ HValue* HGraphBuilder::BuildBinaryOperation(
Token::Value op,
HValue* left,
HValue* right,
- Handle<Type> left_type,
- Handle<Type> right_type,
- Handle<Type> result_type,
- Maybe<int> fixed_right_arg) {
+ Type* left_type,
+ Type* right_type,
+ Type* result_type,
+ Maybe<int> fixed_right_arg,
+ HAllocationMode allocation_mode) {
Representation left_rep = Representation::FromType(left_type);
Representation right_rep = Representation::FromType(right_type);
@@ -8771,7 +10122,7 @@ HValue* HGraphBuilder::BuildBinaryOperation(
Deoptimizer::SOFT);
// TODO(rossberg): we should be able to get rid of non-continuous
// defaults.
- left_type = handle(Type::Any(), isolate());
+ left_type = Type::Any(zone());
} else {
if (!maybe_string_add) left = TruncateToNumber(left, &left_type);
left_rep = Representation::FromType(left_type);
@@ -8780,7 +10131,7 @@ HValue* HGraphBuilder::BuildBinaryOperation(
if (right_type->Is(Type::None())) {
Add<HDeoptimize>("Insufficient type feedback for RHS of binary operation",
Deoptimizer::SOFT);
- right_type = handle(Type::Any(), isolate());
+ right_type = Type::Any(zone());
} else {
if (!maybe_string_add) right = TruncateToNumber(right, &right_type);
right_rep = Representation::FromType(right_type);
@@ -8806,8 +10157,7 @@ HValue* HGraphBuilder::BuildBinaryOperation(
} else if (!left_type->Is(Type::String())) {
ASSERT(right_type->Is(Type::String()));
HValue* function = AddLoadJSBuiltin(Builtins::STRING_ADD_RIGHT);
- Add<HPushArgument>(left);
- Add<HPushArgument>(right);
+ Add<HPushArguments>(left, right);
return AddUncasted<HInvokeFunction>(function, 2);
}
@@ -8818,12 +10168,50 @@ HValue* HGraphBuilder::BuildBinaryOperation(
} else if (!right_type->Is(Type::String())) {
ASSERT(left_type->Is(Type::String()));
HValue* function = AddLoadJSBuiltin(Builtins::STRING_ADD_LEFT);
- Add<HPushArgument>(left);
- Add<HPushArgument>(right);
+ Add<HPushArguments>(left, right);
return AddUncasted<HInvokeFunction>(function, 2);
}
- return AddUncasted<HStringAdd>(left, right, STRING_ADD_CHECK_NONE);
+ // Fast path for empty constant strings.
+ if (left->IsConstant() &&
+ HConstant::cast(left)->HasStringValue() &&
+ HConstant::cast(left)->StringValue()->length() == 0) {
+ return right;
+ }
+ if (right->IsConstant() &&
+ HConstant::cast(right)->HasStringValue() &&
+ HConstant::cast(right)->StringValue()->length() == 0) {
+ return left;
+ }
+
+ // Register the dependent code with the allocation site.
+ if (!allocation_mode.feedback_site().is_null()) {
+ ASSERT(!graph()->info()->IsStub());
+ Handle<AllocationSite> site(allocation_mode.feedback_site());
+ AllocationSite::AddDependentCompilationInfo(
+ site, AllocationSite::TENURING, top_info());
+ }
+
+ // Inline the string addition into the stub when creating allocation
+ // mementos to gather allocation site feedback, or if we can statically
+ // infer that we're going to create a cons string.
+ if ((graph()->info()->IsStub() &&
+ allocation_mode.CreateAllocationMementos()) ||
+ (left->IsConstant() &&
+ HConstant::cast(left)->HasStringValue() &&
+ HConstant::cast(left)->StringValue()->length() + 1 >=
+ ConsString::kMinLength) ||
+ (right->IsConstant() &&
+ HConstant::cast(right)->HasStringValue() &&
+ HConstant::cast(right)->StringValue()->length() + 1 >=
+ ConsString::kMinLength)) {
+ return BuildStringAdd(left, right, allocation_mode);
+ }
+
+ // Fallback to using the string add stub.
+ return AddUncasted<HStringAdd>(
+ left, right, allocation_mode.GetPretenureMode(),
+ STRING_ADD_CHECK_NONE, allocation_mode.feedback_site());
}
if (graph()->info()->IsStub()) {
@@ -8842,8 +10230,7 @@ HValue* HGraphBuilder::BuildBinaryOperation(
// operation in optimized code, which is more expensive, than a stub call.
if (graph()->info()->IsStub() && is_non_primitive) {
HValue* function = AddLoadJSBuiltin(BinaryOpIC::TokenToJSBuiltin(op));
- Add<HPushArgument>(left);
- Add<HPushArgument>(right);
+ Add<HPushArguments>(left, right);
instr = AddUncasted<HInvokeFunction>(function, 2);
} else {
switch (op) {
@@ -8857,21 +10244,15 @@ HValue* HGraphBuilder::BuildBinaryOperation(
instr = AddUncasted<HMul>(left, right);
break;
case Token::MOD: {
- if (fixed_right_arg.has_value) {
- if (right->IsConstant()) {
- HConstant* c_right = HConstant::cast(right);
- if (c_right->HasInteger32Value()) {
- ASSERT_EQ(fixed_right_arg.value, c_right->Integer32Value());
- }
- } else {
- HConstant* fixed_right = Add<HConstant>(
- static_cast<int>(fixed_right_arg.value));
- IfBuilder if_same(this);
- if_same.If<HCompareNumericAndBranch>(right, fixed_right, Token::EQ);
- if_same.Then();
- if_same.ElseDeopt("Unexpected RHS of binary operation");
- right = fixed_right;
- }
+ if (fixed_right_arg.has_value &&
+ !right->EqualsInteger32Constant(fixed_right_arg.value)) {
+ HConstant* fixed_right = Add<HConstant>(
+ static_cast<int>(fixed_right_arg.value));
+ IfBuilder if_same(this);
+ if_same.If<HCompareNumericAndBranch>(right, fixed_right, Token::EQ);
+ if_same.Then();
+ if_same.ElseDeopt("Unexpected RHS of binary operation");
+ right = fixed_right;
}
instr = AddUncasted<HMod>(left, right);
break;
@@ -9084,10 +10465,15 @@ void HOptimizedGraphBuilder::VisitArithmeticExpression(BinaryOperation* expr) {
SetSourcePosition(expr->position());
HValue* right = Pop();
HValue* left = Pop();
- HValue* result = BuildBinaryOperation(expr, left, right);
- if (FLAG_emit_opt_code_positions && result->IsBinaryOperation()) {
+ HValue* result =
+ BuildBinaryOperation(expr, left, right,
+ ast_context()->IsEffect() ? NO_PUSH_BEFORE_SIMULATE
+ : PUSH_BEFORE_SIMULATE);
+ if (FLAG_hydrogen_track_positions && result->IsBinaryOperation()) {
HBinaryOperation::cast(result)->SetOperandPositions(
- zone(), expr->left()->position(), expr->right()->position());
+ zone(),
+ ScriptPositionToSourcePosition(expr->left()->position()),
+ ScriptPositionToSourcePosition(expr->right()->position()));
}
return ast_context()->ReturnValue(result);
}
@@ -9121,7 +10507,7 @@ void HOptimizedGraphBuilder::VisitCompareOperation(CompareOperation* expr) {
ASSERT(current_block() != NULL);
ASSERT(current_block()->HasPredecessor());
- if (!FLAG_emit_opt_code_positions) SetSourcePosition(expr->position());
+ if (!FLAG_hydrogen_track_positions) SetSourcePosition(expr->position());
// Check for a few fast cases. The AST visiting behavior must be in sync
// with the full codegen: We don't push both left and right values onto
@@ -9149,17 +10535,14 @@ void HOptimizedGraphBuilder::VisitCompareOperation(CompareOperation* expr) {
return ast_context()->ReturnControl(instr, expr->id());
}
- Handle<Type> left_type = expr->left()->bounds().lower;
- Handle<Type> right_type = expr->right()->bounds().lower;
- Handle<Type> combined_type = expr->combined_type();
- Representation combined_rep = Representation::FromType(combined_type);
- Representation left_rep = Representation::FromType(left_type);
- Representation right_rep = Representation::FromType(right_type);
+ Type* left_type = expr->left()->bounds().lower;
+ Type* right_type = expr->right()->bounds().lower;
+ Type* combined_type = expr->combined_type();
CHECK_ALIVE(VisitForValue(expr->left()));
CHECK_ALIVE(VisitForValue(expr->right()));
- if (FLAG_emit_opt_code_positions) SetSourcePosition(expr->position());
+ if (FLAG_hydrogen_track_positions) SetSourcePosition(expr->position());
HValue* right = Pop();
HValue* left = Pop();
@@ -9184,7 +10567,7 @@ void HOptimizedGraphBuilder::VisitCompareOperation(CompareOperation* expr) {
Handle<String> name = proxy->name();
Handle<GlobalObject> global(current_info()->global_object());
LookupResult lookup(isolate());
- global->Lookup(*name, &lookup);
+ global->Lookup(name, &lookup);
if (lookup.IsNormal() && lookup.GetValue()->IsJSFunction()) {
Handle<JSFunction> candidate(JSFunction::cast(lookup.GetValue()));
// If the function is in new space we assume it's more likely to
@@ -9211,61 +10594,108 @@ void HOptimizedGraphBuilder::VisitCompareOperation(CompareOperation* expr) {
UNREACHABLE();
} else if (op == Token::IN) {
HValue* function = AddLoadJSBuiltin(Builtins::IN);
- Add<HPushArgument>(left);
- Add<HPushArgument>(right);
+ Add<HPushArguments>(left, right);
// TODO(olivf) InvokeFunction produces a check for the parameter count,
// even though we are certain to pass the correct number of arguments here.
HInstruction* result = New<HInvokeFunction>(function, 2);
return ast_context()->ReturnInstruction(result, expr->id());
}
+ PushBeforeSimulateBehavior push_behavior =
+ ast_context()->IsEffect() ? NO_PUSH_BEFORE_SIMULATE
+ : PUSH_BEFORE_SIMULATE;
+ HControlInstruction* compare = BuildCompareInstruction(
+ op, left, right, left_type, right_type, combined_type,
+ ScriptPositionToSourcePosition(expr->left()->position()),
+ ScriptPositionToSourcePosition(expr->right()->position()),
+ push_behavior, expr->id());
+ if (compare == NULL) return; // Bailed out.
+ return ast_context()->ReturnControl(compare, expr->id());
+}
+
+
+HControlInstruction* HOptimizedGraphBuilder::BuildCompareInstruction(
+ Token::Value op,
+ HValue* left,
+ HValue* right,
+ Type* left_type,
+ Type* right_type,
+ Type* combined_type,
+ HSourcePosition left_position,
+ HSourcePosition right_position,
+ PushBeforeSimulateBehavior push_sim_result,
+ BailoutId bailout_id) {
// Cases handled below depend on collected type feedback. They should
// soft deoptimize when there is no type feedback.
if (combined_type->Is(Type::None())) {
Add<HDeoptimize>("Insufficient type feedback for combined type "
"of binary operation",
Deoptimizer::SOFT);
- combined_type = left_type = right_type = handle(Type::Any(), isolate());
+ combined_type = left_type = right_type = Type::Any(zone());
}
+ Representation left_rep = Representation::FromType(left_type);
+ Representation right_rep = Representation::FromType(right_type);
+ Representation combined_rep = Representation::FromType(combined_type);
+
if (combined_type->Is(Type::Receiver())) {
- switch (op) {
- case Token::EQ:
- case Token::EQ_STRICT: {
- // Can we get away with map check and not instance type check?
- if (combined_type->IsClass()) {
- Handle<Map> map = combined_type->AsClass();
- AddCheckMap(left, map);
- AddCheckMap(right, map);
- HCompareObjectEqAndBranch* result =
- New<HCompareObjectEqAndBranch>(left, right);
- if (FLAG_emit_opt_code_positions) {
- result->set_operand_position(zone(), 0, expr->left()->position());
- result->set_operand_position(zone(), 1, expr->right()->position());
- }
- return ast_context()->ReturnControl(result, expr->id());
- } else {
- BuildCheckHeapObject(left);
- Add<HCheckInstanceType>(left, HCheckInstanceType::IS_SPEC_OBJECT);
- BuildCheckHeapObject(right);
- Add<HCheckInstanceType>(right, HCheckInstanceType::IS_SPEC_OBJECT);
- HCompareObjectEqAndBranch* result =
- New<HCompareObjectEqAndBranch>(left, right);
- return ast_context()->ReturnControl(result, expr->id());
+ if (Token::IsEqualityOp(op)) {
+ // HCompareObjectEqAndBranch can only deal with object, so
+ // exclude numbers.
+ if ((left->IsConstant() &&
+ HConstant::cast(left)->HasNumberValue()) ||
+ (right->IsConstant() &&
+ HConstant::cast(right)->HasNumberValue())) {
+ Add<HDeoptimize>("Type mismatch between feedback and constant",
+ Deoptimizer::SOFT);
+ // The caller expects a branch instruction, so make it happy.
+ return New<HBranch>(graph()->GetConstantTrue());
+ }
+ // Can we get away with map check and not instance type check?
+ HValue* operand_to_check =
+ left->block()->block_id() < right->block()->block_id() ? left : right;
+ if (combined_type->IsClass()) {
+ Handle<Map> map = combined_type->AsClass()->Map();
+ AddCheckMap(operand_to_check, map);
+ HCompareObjectEqAndBranch* result =
+ New<HCompareObjectEqAndBranch>(left, right);
+ if (FLAG_hydrogen_track_positions) {
+ result->set_operand_position(zone(), 0, left_position);
+ result->set_operand_position(zone(), 1, right_position);
}
+ return result;
+ } else {
+ BuildCheckHeapObject(operand_to_check);
+ Add<HCheckInstanceType>(operand_to_check,
+ HCheckInstanceType::IS_SPEC_OBJECT);
+ HCompareObjectEqAndBranch* result =
+ New<HCompareObjectEqAndBranch>(left, right);
+ return result;
}
- default:
- return Bailout(kUnsupportedNonPrimitiveCompare);
+ } else {
+ Bailout(kUnsupportedNonPrimitiveCompare);
+ return NULL;
}
} else if (combined_type->Is(Type::InternalizedString()) &&
Token::IsEqualityOp(op)) {
+ // If we have a constant argument, it should be consistent with the type
+ // feedback (otherwise we fail assertions in HCompareObjectEqAndBranch).
+ if ((left->IsConstant() &&
+ !HConstant::cast(left)->HasInternalizedStringValue()) ||
+ (right->IsConstant() &&
+ !HConstant::cast(right)->HasInternalizedStringValue())) {
+ Add<HDeoptimize>("Type mismatch between feedback and constant",
+ Deoptimizer::SOFT);
+ // The caller expects a branch instruction, so make it happy.
+ return New<HBranch>(graph()->GetConstantTrue());
+ }
BuildCheckHeapObject(left);
Add<HCheckInstanceType>(left, HCheckInstanceType::IS_INTERNALIZED_STRING);
BuildCheckHeapObject(right);
Add<HCheckInstanceType>(right, HCheckInstanceType::IS_INTERNALIZED_STRING);
HCompareObjectEqAndBranch* result =
New<HCompareObjectEqAndBranch>(left, right);
- return ast_context()->ReturnControl(result, expr->id());
+ return result;
} else if (combined_type->Is(Type::String())) {
BuildCheckHeapObject(left);
Add<HCheckInstanceType>(left, HCheckInstanceType::IS_STRING);
@@ -9273,23 +10703,32 @@ void HOptimizedGraphBuilder::VisitCompareOperation(CompareOperation* expr) {
Add<HCheckInstanceType>(right, HCheckInstanceType::IS_STRING);
HStringCompareAndBranch* result =
New<HStringCompareAndBranch>(left, right, op);
- return ast_context()->ReturnControl(result, expr->id());
+ return result;
} else {
if (combined_rep.IsTagged() || combined_rep.IsNone()) {
- HCompareGeneric* result = New<HCompareGeneric>(left, right, op);
+ HCompareGeneric* result = Add<HCompareGeneric>(left, right, op);
result->set_observed_input_representation(1, left_rep);
result->set_observed_input_representation(2, right_rep);
- return ast_context()->ReturnInstruction(result, expr->id());
+ if (result->HasObservableSideEffects()) {
+ if (push_sim_result == PUSH_BEFORE_SIMULATE) {
+ Push(result);
+ AddSimulate(bailout_id, REMOVABLE_SIMULATE);
+ Drop(1);
+ } else {
+ AddSimulate(bailout_id, REMOVABLE_SIMULATE);
+ }
+ }
+ // TODO(jkummerow): Can we make this more efficient?
+ HBranch* branch = New<HBranch>(result);
+ return branch;
} else {
HCompareNumericAndBranch* result =
New<HCompareNumericAndBranch>(left, right, op);
result->set_observed_input_representation(left_rep, right_rep);
- if (FLAG_emit_opt_code_positions) {
- result->SetOperandPositions(zone(),
- expr->left()->position(),
- expr->right()->position());
+ if (FLAG_hydrogen_track_positions) {
+ result->SetOperandPositions(zone(), left_position, right_position);
}
- return ast_context()->ReturnControl(result, expr->id());
+ return result;
}
}
}
@@ -9302,7 +10741,7 @@ void HOptimizedGraphBuilder::HandleLiteralCompareNil(CompareOperation* expr,
ASSERT(current_block() != NULL);
ASSERT(current_block()->HasPredecessor());
ASSERT(expr->op() == Token::EQ || expr->op() == Token::EQ_STRICT);
- if (!FLAG_emit_opt_code_positions) SetSourcePosition(expr->position());
+ if (!FLAG_hydrogen_track_positions) SetSourcePosition(expr->position());
CHECK_ALIVE(VisitForValue(sub_expr));
HValue* value = Pop();
if (expr->op() == Token::EQ_STRICT) {
@@ -9314,9 +10753,8 @@ void HOptimizedGraphBuilder::HandleLiteralCompareNil(CompareOperation* expr,
return ast_context()->ReturnControl(instr, expr->id());
} else {
ASSERT_EQ(Token::EQ, expr->op());
- Handle<Type> type = expr->combined_type()->Is(Type::None())
- ? handle(Type::Any(), isolate_)
- : expr->combined_type();
+ Type* type = expr->combined_type()->Is(Type::None())
+ ? Type::Any(zone()) : expr->combined_type();
HIfContinuation continuation;
BuildCompareNil(value, type, &continuation);
return ast_context()->ReturnContinuation(&continuation, expr->id());
@@ -9348,20 +10786,26 @@ HInstruction* HOptimizedGraphBuilder::BuildFastLiteral(
HValue* object_size_constant = Add<HConstant>(
boilerplate_object->map()->instance_size());
- // We should pull pre-tenure mode from the allocation site.
- // For now, just see what it says, and remark on it if it sez
- // we should pretenure. That means the rudimentary counting in the garbage
- // collector is having an effect.
- PretenureFlag pretenure_flag = isolate()->heap()->GetPretenureMode();
+ PretenureFlag pretenure_flag = NOT_TENURED;
if (FLAG_allocation_site_pretenuring) {
- pretenure_flag = site_context->current()->GetPretenureMode()
- ? TENURED
- : NOT_TENURED;
+ pretenure_flag = site_context->current()->GetPretenureMode();
+ Handle<AllocationSite> site(site_context->current());
+ AllocationSite::AddDependentCompilationInfo(
+ site, AllocationSite::TENURING, top_info());
}
HInstruction* object = Add<HAllocate>(object_size_constant, type,
pretenure_flag, instance_type, site_context->current());
+ // If allocation folding reaches Page::kMaxRegularHeapObjectSize the
+ // elements array may not get folded into the object. Hence, we set the
+ // elements pointer to empty fixed array and let store elimination remove
+ // this store in the folding case.
+ HConstant* empty_fixed_array = Add<HConstant>(
+ isolate()->factory()->empty_fixed_array());
+ Add<HStoreNamedField>(object, HObjectAccess::ForElementsPointer(),
+ empty_fixed_array);
+
BuildEmitObjectHeader(boilerplate_object, object);
Handle<FixedArrayBase> elements(boilerplate_object->elements());
@@ -9369,16 +10813,26 @@ HInstruction* HOptimizedGraphBuilder::BuildFastLiteral(
elements->map() != isolate()->heap()->fixed_cow_array_map()) ?
elements->Size() : 0;
+ if (pretenure_flag == TENURED &&
+ elements->map() == isolate()->heap()->fixed_cow_array_map() &&
+ isolate()->heap()->InNewSpace(*elements)) {
+ // If we would like to pretenure a fixed cow array, we must ensure that the
+ // array is already in old space, otherwise we'll create too many old-to-
+ // new-space pointers (overflowing the store buffer).
+ elements = Handle<FixedArrayBase>(
+ isolate()->factory()->CopyAndTenureFixedCOWArray(
+ Handle<FixedArray>::cast(elements)));
+ boilerplate_object->set_elements(*elements);
+ }
+
HInstruction* object_elements = NULL;
if (elements_size > 0) {
HValue* object_elements_size = Add<HConstant>(elements_size);
- if (boilerplate_object->HasFastDoubleElements()) {
- object_elements = Add<HAllocate>(object_elements_size, HType::JSObject(),
- pretenure_flag, FIXED_DOUBLE_ARRAY_TYPE, site_context->current());
- } else {
- object_elements = Add<HAllocate>(object_elements_size, HType::JSObject(),
- pretenure_flag, FIXED_ARRAY_TYPE, site_context->current());
- }
+ InstanceType instance_type = boilerplate_object->HasFastDoubleElements()
+ ? FIXED_DOUBLE_ARRAY_TYPE : FIXED_ARRAY_TYPE;
+ object_elements = Add<HAllocate>(
+ object_elements_size, HType::HeapObject(),
+ pretenure_flag, instance_type, site_context->current());
}
BuildInitElementsInObjectHeader(boilerplate_object, object, object_elements);
@@ -9446,9 +10900,9 @@ void HOptimizedGraphBuilder::BuildEmitInObjectProperties(
HInstruction* object,
AllocationSiteUsageContext* site_context,
PretenureFlag pretenure_flag) {
- Handle<DescriptorArray> descriptors(
- boilerplate_object->map()->instance_descriptors());
- int limit = boilerplate_object->map()->NumberOfOwnDescriptors();
+ Handle<Map> boilerplate_map(boilerplate_object->map());
+ Handle<DescriptorArray> descriptors(boilerplate_map->instance_descriptors());
+ int limit = boilerplate_map->NumberOfOwnDescriptors();
int copied_fields = 0;
for (int i = 0; i < limit; i++) {
@@ -9465,7 +10919,7 @@ void HOptimizedGraphBuilder::BuildEmitInObjectProperties(
// The access for the store depends on the type of the boilerplate.
HObjectAccess access = boilerplate_object->IsJSArray() ?
HObjectAccess::ForJSArrayOffset(property_offset) :
- HObjectAccess::ForJSObjectOffset(property_offset);
+ HObjectAccess::ForMapAndOffset(boilerplate_map, property_offset);
if (value->IsJSObject()) {
Handle<JSObject> value_object = Handle<JSObject>::cast(value);
@@ -9486,7 +10940,7 @@ void HOptimizedGraphBuilder::BuildEmitInObjectProperties(
// 1) it's a child object of another object with a valid allocation site
// 2) we can just use the mode of the parent object for pretenuring
HInstruction* double_box =
- Add<HAllocate>(heap_number_constant, HType::HeapNumber(),
+ Add<HAllocate>(heap_number_constant, HType::HeapObject(),
pretenure_flag, HEAP_NUMBER_TYPE);
AddStoreMapConstant(double_box,
isolate()->factory()->heap_number_map());
@@ -9513,7 +10967,8 @@ void HOptimizedGraphBuilder::BuildEmitInObjectProperties(
for (int i = copied_fields; i < inobject_properties; i++) {
ASSERT(boilerplate_object->IsJSObject());
int property_offset = boilerplate_object->GetInObjectPropertyOffset(i);
- HObjectAccess access = HObjectAccess::ForJSObjectOffset(property_offset);
+ HObjectAccess access =
+ HObjectAccess::ForMapAndOffset(boilerplate_map, property_offset);
Add<HStoreNamedField>(object, access, value_instruction);
}
}
@@ -9608,9 +11063,9 @@ void HOptimizedGraphBuilder::VisitDeclarations(
for (int i = 0; i < globals_.length(); ++i) array->set(i, *globals_.at(i));
int flags = DeclareGlobalsEvalFlag::encode(current_info()->is_eval()) |
DeclareGlobalsNativeFlag::encode(current_info()->is_native()) |
- DeclareGlobalsLanguageMode::encode(current_info()->language_mode());
+ DeclareGlobalsStrictMode::encode(current_info()->strict_mode());
Add<HDeclareGlobals>(array, flags);
- globals_.Clear();
+ globals_.Rewind(0);
}
}
@@ -9620,7 +11075,7 @@ void HOptimizedGraphBuilder::VisitVariableDeclaration(
VariableProxy* proxy = declaration->proxy();
VariableMode mode = declaration->mode();
Variable* variable = proxy->var();
- bool hole_init = mode == CONST || mode == CONST_HARMONY || mode == LET;
+ bool hole_init = mode == LET || mode == CONST || mode == CONST_LEGACY;
switch (variable->location()) {
case Variable::UNALLOCATED:
globals_.Add(variable->name(), zone());
@@ -9891,9 +11346,28 @@ void HOptimizedGraphBuilder::GenerateClassOf(CallRuntime* call) {
void HOptimizedGraphBuilder::GenerateValueOf(CallRuntime* call) {
ASSERT(call->arguments()->length() == 1);
CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
- HValue* value = Pop();
- HValueOf* result = New<HValueOf>(value);
- return ast_context()->ReturnInstruction(result, call->id());
+ HValue* object = Pop();
+
+ IfBuilder if_objectisvalue(this);
+ HValue* objectisvalue = if_objectisvalue.If<HHasInstanceTypeAndBranch>(
+ object, JS_VALUE_TYPE);
+ if_objectisvalue.Then();
+ {
+ // Return the actual value.
+ Push(Add<HLoadNamedField>(
+ object, objectisvalue,
+ HObjectAccess::ForObservableJSObjectOffset(
+ JSValue::kValueOffset)));
+ Add<HSimulate>(call->id(), FIXED_SIMULATE);
+ }
+ if_objectisvalue.Else();
+ {
+ // If the object is not a value return the object.
+ Push(object);
+ Add<HSimulate>(call->id(), FIXED_SIMULATE);
+ }
+ if_objectisvalue.End();
+ return ast_context()->ReturnValue(Pop());
}
@@ -9911,12 +11385,13 @@ void HOptimizedGraphBuilder::GenerateDateField(CallRuntime* call) {
void HOptimizedGraphBuilder::GenerateOneByteSeqStringSetChar(
CallRuntime* call) {
ASSERT(call->arguments()->length() == 3);
- CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
+ // We need to follow the evaluation order of full codegen.
CHECK_ALIVE(VisitForValue(call->arguments()->at(1)));
CHECK_ALIVE(VisitForValue(call->arguments()->at(2)));
+ CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
+ HValue* string = Pop();
HValue* value = Pop();
HValue* index = Pop();
- HValue* string = Pop();
Add<HSeqStringSetChar>(String::ONE_BYTE_ENCODING, string,
index, value);
Add<HSimulate>(call->id(), FIXED_SIMULATE);
@@ -9927,12 +11402,13 @@ void HOptimizedGraphBuilder::GenerateOneByteSeqStringSetChar(
void HOptimizedGraphBuilder::GenerateTwoByteSeqStringSetChar(
CallRuntime* call) {
ASSERT(call->arguments()->length() == 3);
- CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
+ // We need to follow the evaluation order of full codegen.
CHECK_ALIVE(VisitForValue(call->arguments()->at(1)));
CHECK_ALIVE(VisitForValue(call->arguments()->at(2)));
+ CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
+ HValue* string = Pop();
HValue* value = Pop();
HValue* index = Pop();
- HValue* string = Pop();
Add<HSeqStringSetChar>(String::TWO_BYTE_ENCODING, string,
index, value);
Add<HSimulate>(call->id(), FIXED_SIMULATE);
@@ -9946,31 +11422,33 @@ void HOptimizedGraphBuilder::GenerateSetValueOf(CallRuntime* call) {
CHECK_ALIVE(VisitForValue(call->arguments()->at(1)));
HValue* value = Pop();
HValue* object = Pop();
- // Check if object is a not a smi.
- HBasicBlock* if_smi = graph()->CreateBasicBlock();
- HBasicBlock* if_heap_object = graph()->CreateBasicBlock();
- HBasicBlock* join = graph()->CreateBasicBlock();
- FinishCurrentBlock(New<HIsSmiAndBranch>(object, if_smi, if_heap_object));
- Goto(if_smi, join);
// Check if object is a JSValue.
- set_current_block(if_heap_object);
- HHasInstanceTypeAndBranch* typecheck =
- New<HHasInstanceTypeAndBranch>(object, JS_VALUE_TYPE);
- HBasicBlock* if_js_value = graph()->CreateBasicBlock();
- HBasicBlock* not_js_value = graph()->CreateBasicBlock();
- typecheck->SetSuccessorAt(0, if_js_value);
- typecheck->SetSuccessorAt(1, not_js_value);
- FinishCurrentBlock(typecheck);
- Goto(not_js_value, join);
-
- // Create in-object property store to kValueOffset.
- set_current_block(if_js_value);
- Add<HStoreNamedField>(object,
- HObjectAccess::ForJSObjectOffset(JSValue::kValueOffset), value);
- Goto(if_js_value, join);
- join->SetJoinId(call->id());
- set_current_block(join);
+ IfBuilder if_objectisvalue(this);
+ if_objectisvalue.If<HHasInstanceTypeAndBranch>(object, JS_VALUE_TYPE);
+ if_objectisvalue.Then();
+ {
+ // Create in-object property store to kValueOffset.
+ Add<HStoreNamedField>(object,
+ HObjectAccess::ForObservableJSObjectOffset(JSValue::kValueOffset),
+ value);
+ if (!ast_context()->IsEffect()) {
+ Push(value);
+ }
+ Add<HSimulate>(call->id(), FIXED_SIMULATE);
+ }
+ if_objectisvalue.Else();
+ {
+ // Nothing to do in this case.
+ if (!ast_context()->IsEffect()) {
+ Push(value);
+ }
+ Add<HSimulate>(call->id(), FIXED_SIMULATE);
+ }
+ if_objectisvalue.End();
+ if (!ast_context()->IsEffect()) {
+ Drop(1);
+ }
return ast_context()->ReturnValue(value);
}
@@ -10024,12 +11502,6 @@ void HOptimizedGraphBuilder::GenerateObjectEquals(CallRuntime* call) {
}
-void HOptimizedGraphBuilder::GenerateLog(CallRuntime* call) {
- // %_Log is ignored in optimized code.
- return ast_context()->ReturnValue(graph()->GetConstantUndefined());
-}
-
-
// Fast support for StringAdd.
void HOptimizedGraphBuilder::GenerateStringAdd(CallRuntime* call) {
ASSERT_EQ(2, call->arguments()->length());
@@ -10037,8 +11509,7 @@ void HOptimizedGraphBuilder::GenerateStringAdd(CallRuntime* call) {
CHECK_ALIVE(VisitForValue(call->arguments()->at(1)));
HValue* right = Pop();
HValue* left = Pop();
- HInstruction* result =
- NewUncasted<HStringAdd>(left, right, STRING_ADD_CHECK_BOTH);
+ HInstruction* result = NewUncasted<HStringAdd>(left, right);
return ast_context()->ReturnInstruction(result, call->id());
}
@@ -10046,9 +11517,9 @@ void HOptimizedGraphBuilder::GenerateStringAdd(CallRuntime* call) {
// Fast support for SubString.
void HOptimizedGraphBuilder::GenerateSubString(CallRuntime* call) {
ASSERT_EQ(3, call->arguments()->length());
- CHECK_ALIVE(VisitArgumentList(call->arguments()));
+ CHECK_ALIVE(VisitExpressions(call->arguments()));
+ PushArgumentsFromEnvironment(call->arguments()->length());
HCallStub* result = New<HCallStub>(CodeStub::SubString, 3);
- Drop(3);
return ast_context()->ReturnInstruction(result, call->id());
}
@@ -10056,9 +11527,9 @@ void HOptimizedGraphBuilder::GenerateSubString(CallRuntime* call) {
// Fast support for StringCompare.
void HOptimizedGraphBuilder::GenerateStringCompare(CallRuntime* call) {
ASSERT_EQ(2, call->arguments()->length());
- CHECK_ALIVE(VisitArgumentList(call->arguments()));
+ CHECK_ALIVE(VisitExpressions(call->arguments()));
+ PushArgumentsFromEnvironment(call->arguments()->length());
HCallStub* result = New<HCallStub>(CodeStub::StringCompare, 2);
- Drop(2);
return ast_context()->ReturnInstruction(result, call->id());
}
@@ -10066,9 +11537,38 @@ void HOptimizedGraphBuilder::GenerateStringCompare(CallRuntime* call) {
// Support for direct calls from JavaScript to native RegExp code.
void HOptimizedGraphBuilder::GenerateRegExpExec(CallRuntime* call) {
ASSERT_EQ(4, call->arguments()->length());
- CHECK_ALIVE(VisitArgumentList(call->arguments()));
+ CHECK_ALIVE(VisitExpressions(call->arguments()));
+ PushArgumentsFromEnvironment(call->arguments()->length());
HCallStub* result = New<HCallStub>(CodeStub::RegExpExec, 4);
- Drop(4);
+ return ast_context()->ReturnInstruction(result, call->id());
+}
+
+
+void HOptimizedGraphBuilder::GenerateDoubleLo(CallRuntime* call) {
+ ASSERT_EQ(1, call->arguments()->length());
+ CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
+ HValue* value = Pop();
+ HInstruction* result = NewUncasted<HDoubleBits>(value, HDoubleBits::LOW);
+ return ast_context()->ReturnInstruction(result, call->id());
+}
+
+
+void HOptimizedGraphBuilder::GenerateDoubleHi(CallRuntime* call) {
+ ASSERT_EQ(1, call->arguments()->length());
+ CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
+ HValue* value = Pop();
+ HInstruction* result = NewUncasted<HDoubleBits>(value, HDoubleBits::HIGH);
+ return ast_context()->ReturnInstruction(result, call->id());
+}
+
+
+void HOptimizedGraphBuilder::GenerateConstructDouble(CallRuntime* call) {
+ ASSERT_EQ(2, call->arguments()->length());
+ CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
+ CHECK_ALIVE(VisitForValue(call->arguments()->at(1)));
+ HValue* lo = Pop();
+ HValue* hi = Pop();
+ HInstruction* result = NewUncasted<HConstructDouble>(hi, lo);
return ast_context()->ReturnInstruction(result, call->id());
}
@@ -10076,10 +11576,14 @@ void HOptimizedGraphBuilder::GenerateRegExpExec(CallRuntime* call) {
// Construct a RegExp exec result with two in-object properties.
void HOptimizedGraphBuilder::GenerateRegExpConstructResult(CallRuntime* call) {
ASSERT_EQ(3, call->arguments()->length());
- CHECK_ALIVE(VisitArgumentList(call->arguments()));
- HCallStub* result = New<HCallStub>(CodeStub::RegExpConstructResult, 3);
- Drop(3);
- return ast_context()->ReturnInstruction(result, call->id());
+ CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
+ CHECK_ALIVE(VisitForValue(call->arguments()->at(1)));
+ CHECK_ALIVE(VisitForValue(call->arguments()->at(2)));
+ HValue* input = Pop();
+ HValue* index = Pop();
+ HValue* length = Pop();
+ HValue* result = BuildRegExpConstructResult(length, index, input);
+ return ast_context()->ReturnValue(result);
}
@@ -10094,8 +11598,7 @@ void HOptimizedGraphBuilder::GenerateNumberToString(CallRuntime* call) {
ASSERT_EQ(1, call->arguments()->length());
CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
HValue* number = Pop();
- HValue* result = BuildNumberToString(
- number, handle(Type::Number(), isolate()));
+ HValue* result = BuildNumberToString(number, Type::Any(zone()));
return ast_context()->ReturnValue(result);
}
@@ -10106,38 +11609,43 @@ void HOptimizedGraphBuilder::GenerateCallFunction(CallRuntime* call) {
int arg_count = call->arguments()->length() - 1;
ASSERT(arg_count >= 1); // There's always at least a receiver.
- for (int i = 0; i < arg_count; ++i) {
- CHECK_ALIVE(VisitArgument(call->arguments()->at(i)));
- }
- CHECK_ALIVE(VisitForValue(call->arguments()->last()));
-
+ CHECK_ALIVE(VisitExpressions(call->arguments()));
+ // The function is the last argument
HValue* function = Pop();
+ // Push the arguments to the stack
+ PushArgumentsFromEnvironment(arg_count);
- // Branch for function proxies, or other non-functions.
- HHasInstanceTypeAndBranch* typecheck =
- New<HHasInstanceTypeAndBranch>(function, JS_FUNCTION_TYPE);
- HBasicBlock* if_jsfunction = graph()->CreateBasicBlock();
- HBasicBlock* if_nonfunction = graph()->CreateBasicBlock();
- HBasicBlock* join = graph()->CreateBasicBlock();
- typecheck->SetSuccessorAt(0, if_jsfunction);
- typecheck->SetSuccessorAt(1, if_nonfunction);
- FinishCurrentBlock(typecheck);
-
- set_current_block(if_jsfunction);
- HInstruction* invoke_result = Add<HInvokeFunction>(function, arg_count);
- Drop(arg_count);
- Push(invoke_result);
- Goto(if_jsfunction, join);
-
- set_current_block(if_nonfunction);
- HInstruction* call_result = Add<HCallFunction>(function, arg_count);
- Drop(arg_count);
- Push(call_result);
- Goto(if_nonfunction, join);
+ IfBuilder if_is_jsfunction(this);
+ if_is_jsfunction.If<HHasInstanceTypeAndBranch>(function, JS_FUNCTION_TYPE);
- set_current_block(join);
- join->SetJoinId(call->id());
- return ast_context()->ReturnValue(Pop());
+ if_is_jsfunction.Then();
+ {
+ HInstruction* invoke_result =
+ Add<HInvokeFunction>(function, arg_count);
+ if (!ast_context()->IsEffect()) {
+ Push(invoke_result);
+ }
+ Add<HSimulate>(call->id(), FIXED_SIMULATE);
+ }
+
+ if_is_jsfunction.Else();
+ {
+ HInstruction* call_result =
+ Add<HCallFunction>(function, arg_count);
+ if (!ast_context()->IsEffect()) {
+ Push(call_result);
+ }
+ Add<HSimulate>(call->id(), FIXED_SIMULATE);
+ }
+ if_is_jsfunction.End();
+
+ if (ast_context()->IsEffect()) {
+ // EffectContext::ReturnValue ignores the value, so we can just pass
+ // 'undefined' (as we do not have the call result anymore).
+ return ast_context()->ReturnValue(graph()->GetConstantUndefined());
+ } else {
+ return ast_context()->ReturnValue(Pop());
+ }
}
@@ -10153,17 +11661,16 @@ void HOptimizedGraphBuilder::GenerateMathPow(CallRuntime* call) {
}
-void HOptimizedGraphBuilder::GenerateMathLog(CallRuntime* call) {
- ASSERT_EQ(1, call->arguments()->length());
- CHECK_ALIVE(VisitArgumentList(call->arguments()));
- HCallStub* result = New<HCallStub>(CodeStub::TranscendentalCache, 1);
- result->set_transcendental_type(TranscendentalCache::LOG);
- Drop(1);
+void HOptimizedGraphBuilder::GenerateMathLogRT(CallRuntime* call) {
+ ASSERT(call->arguments()->length() == 1);
+ CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
+ HValue* value = Pop();
+ HInstruction* result = NewUncasted<HUnaryMathOperation>(value, kMathLog);
return ast_context()->ReturnInstruction(result, call->id());
}
-void HOptimizedGraphBuilder::GenerateMathSqrt(CallRuntime* call) {
+void HOptimizedGraphBuilder::GenerateMathSqrtRT(CallRuntime* call) {
ASSERT(call->arguments()->length() == 1);
CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
HValue* value = Pop();
@@ -10172,12 +11679,6 @@ void HOptimizedGraphBuilder::GenerateMathSqrt(CallRuntime* call) {
}
-// Check whether two RegExps are equivalent
-void HOptimizedGraphBuilder::GenerateIsRegExpEquivalent(CallRuntime* call) {
- return Bailout(kInlinedRuntimeFunctionIsRegExpEquivalent);
-}
-
-
void HOptimizedGraphBuilder::GenerateGetCachedArrayIndex(CallRuntime* call) {
ASSERT(call->arguments()->length() == 1);
CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
@@ -10210,6 +11711,14 @@ void HOptimizedGraphBuilder::GenerateDebugBreakInOptimizedCode(
}
+void HOptimizedGraphBuilder::GenerateDebugCallbackSupportsStepping(
+ CallRuntime* call) {
+ ASSERT(call->arguments()->length() == 1);
+ // Debugging is not supported in optimized code.
+ return ast_context()->ReturnValue(graph()->GetConstantFalse());
+}
+
+
#undef CHECK_BAILOUT
#undef CHECK_ALIVE
@@ -10230,7 +11739,9 @@ HEnvironment::HEnvironment(HEnvironment* outer,
push_count_(0),
ast_id_(BailoutId::None()),
zone_(zone) {
- Initialize(scope->num_parameters() + 1, scope->num_stack_slots(), 0);
+ Scope* declaration_scope = scope->DeclarationScope();
+ Initialize(declaration_scope->num_parameters() + 1,
+ declaration_scope->num_stack_slots(), 0);
}
@@ -10428,8 +11939,7 @@ HEnvironment* HEnvironment::CopyForInlining(
int arguments,
FunctionLiteral* function,
HConstant* undefined,
- InliningKind inlining_kind,
- bool undefined_receiver) const {
+ InliningKind inlining_kind) const {
ASSERT(frame_type() == JS_FUNCTION);
// Outer environment is a copy of this one without the arguments.
@@ -10467,12 +11977,6 @@ HEnvironment* HEnvironment::CopyForInlining(
ExpressionStackAt(arguments - i) : undefined;
inner->SetValueAt(i, push);
}
- // If the function we are inlining is a strict mode function or a
- // builtin function, pass undefined as the receiver for function
- // calls (instead of the global receiver).
- if (undefined_receiver) {
- inner->SetValueAt(0, undefined);
- }
inner->SetValueAt(arity + 1, context());
for (int i = arity + 2; i < inner->length(); ++i) {
inner->SetValueAt(i, undefined);
@@ -10508,7 +12012,7 @@ void HEnvironment::PrintToStd() {
HeapStringAllocator string_allocator;
StringStream trace(&string_allocator);
PrintTo(&trace);
- PrintF("%s", *trace.ToCString());
+ PrintF("%s", trace.ToCString().get());
}
@@ -10516,8 +12020,11 @@ void HTracer::TraceCompilation(CompilationInfo* info) {
Tag tag(this, "compilation");
if (info->IsOptimizing()) {
Handle<String> name = info->function()->debug_name();
- PrintStringProperty("name", *name->ToCString());
- PrintStringProperty("method", *name->ToCString());
+ PrintStringProperty("name", name->ToCString().get());
+ PrintIndent();
+ trace_.Add("method \"%s:%d\"\n",
+ name->ToCString().get(),
+ info->optimization_id());
} else {
CodeStub::Major major_key = info->code_stub()->MajorKey();
PrintStringProperty("name", CodeStub::MajorName(major_key, false));
@@ -10577,10 +12084,21 @@ void HTracer::Trace(const char* name, HGraph* graph, LChunk* chunk) {
}
PrintEmptyProperty("xhandlers");
- const char* flags = current->IsLoopSuccessorDominator()
- ? "dom-loop-succ"
- : "";
- PrintStringProperty("flags", flags);
+
+ {
+ PrintIndent();
+ trace_.Add("flags");
+ if (current->IsLoopSuccessorDominator()) {
+ trace_.Add(" \"dom-loop-succ\"");
+ }
+ if (current->IsUnreachable()) {
+ trace_.Add(" \"dead\"");
+ }
+ if (current->is_osr_entry()) {
+ trace_.Add(" \"osr\"");
+ }
+ trace_.Add("\n");
+ }
if (current->dominator() != NULL) {
PrintBlockProperty("dominator", current->dominator()->block_id());
@@ -10620,14 +12138,22 @@ void HTracer::Trace(const char* name, HGraph* graph, LChunk* chunk) {
Tag HIR_tag(this, "HIR");
for (HInstructionIterator it(current); !it.Done(); it.Advance()) {
HInstruction* instruction = it.Current();
- int bci = FLAG_emit_opt_code_positions && instruction->has_position() ?
- instruction->position() : 0;
int uses = instruction->UseCount();
PrintIndent();
- trace_.Add("%d %d ", bci, uses);
+ trace_.Add("0 %d ", uses);
instruction->PrintNameTo(&trace_);
trace_.Add(" ");
instruction->PrintTo(&trace_);
+ if (FLAG_hydrogen_track_positions &&
+ instruction->has_position() &&
+ instruction->position().raw() != 0) {
+ const HSourcePosition pos = instruction->position();
+ trace_.Add(" pos:");
+ if (pos.inlining_id() != 0) {
+ trace_.Add("%d_", pos.inlining_id());
+ }
+ trace_.Add("%d", pos.position());
+ }
trace_.Add(" <|@\n");
}
}
@@ -10737,7 +12263,8 @@ void HTracer::TraceLiveRange(LiveRange* range, const char* type,
void HTracer::FlushToFile() {
- AppendChars(filename_.start(), *trace_.ToCString(), trace_.length(), false);
+ AppendChars(filename_.start(), trace_.ToCString().get(), trace_.length(),
+ false);
trace_.Reset();
}
diff --git a/chromium/v8/src/hydrogen.h b/chromium/v8/src/hydrogen.h
index 61e98b2b0ce..5df1d65debe 100644
--- a/chromium/v8/src/hydrogen.h
+++ b/chromium/v8/src/hydrogen.h
@@ -1,42 +1,19 @@
// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
#ifndef V8_HYDROGEN_H_
#define V8_HYDROGEN_H_
-#include "v8.h"
+#include "src/v8.h"
-#include "accessors.h"
-#include "allocation.h"
-#include "ast.h"
-#include "compiler.h"
-#include "hydrogen-instructions.h"
-#include "zone.h"
-#include "scopes.h"
+#include "src/accessors.h"
+#include "src/allocation.h"
+#include "src/ast.h"
+#include "src/compiler.h"
+#include "src/hydrogen-instructions.h"
+#include "src/zone.h"
+#include "src/scopes.h"
namespace v8 {
namespace internal {
@@ -110,8 +87,9 @@ class HBasicBlock V8_FINAL : public ZoneObject {
bool IsFinished() const { return end_ != NULL; }
void AddPhi(HPhi* phi);
void RemovePhi(HPhi* phi);
- void AddInstruction(HInstruction* instr, int position);
+ void AddInstruction(HInstruction* instr, HSourcePosition position);
bool Dominates(HBasicBlock* other) const;
+ bool EqualToOrDominates(HBasicBlock* other) const;
int LoopNestingDepth() const;
void SetInitialEnvironment(HEnvironment* env);
@@ -136,7 +114,7 @@ class HBasicBlock V8_FINAL : public ZoneObject {
int PredecessorIndexOf(HBasicBlock* predecessor) const;
HPhi* AddNewPhi(int merged_index);
HSimulate* AddNewSimulate(BailoutId ast_id,
- int position,
+ HSourcePosition position,
RemovableSimulate removable = FIXED_SIMULATE) {
HSimulate* instr = CreateSimulate(ast_id, removable);
AddInstruction(instr, position);
@@ -173,6 +151,11 @@ class HBasicBlock V8_FINAL : public ZoneObject {
dominates_loop_successors_ = true;
}
+ bool IsOrdered() const { return is_ordered_; }
+ void MarkAsOrdered() { is_ordered_ = true; }
+
+ void MarkSuccEdgeUnreachable(int succ);
+
inline Zone* zone() const;
#ifdef DEBUG
@@ -183,13 +166,13 @@ class HBasicBlock V8_FINAL : public ZoneObject {
friend class HGraphBuilder;
HSimulate* CreateSimulate(BailoutId ast_id, RemovableSimulate removable);
- void Finish(HControlInstruction* last, int position);
- void FinishExit(HControlInstruction* instruction, int position);
+ void Finish(HControlInstruction* last, HSourcePosition position);
+ void FinishExit(HControlInstruction* instruction, HSourcePosition position);
void Goto(HBasicBlock* block,
- int position,
+ HSourcePosition position,
FunctionState* state = NULL,
bool add_simulate = true);
- void GotoNoSimulate(HBasicBlock* block, int position) {
+ void GotoNoSimulate(HBasicBlock* block, HSourcePosition position) {
Goto(block, position, NULL, false);
}
@@ -197,7 +180,7 @@ class HBasicBlock V8_FINAL : public ZoneObject {
// instruction and updating the bailout environment.
void AddLeaveInlined(HValue* return_value,
FunctionState* state,
- int position);
+ HSourcePosition position);
private:
void RegisterPredecessor(HBasicBlock* pred);
@@ -227,6 +210,7 @@ class HBasicBlock V8_FINAL : public ZoneObject {
bool is_reachable_ : 1;
bool dominates_loop_successors_ : 1;
bool is_osr_entry_ : 1;
+ bool is_ordered_ : 1;
};
@@ -435,9 +419,11 @@ class HGraph V8_FINAL : public ZoneObject {
void MarkDependsOnEmptyArrayProtoElements() {
// Add map dependency if not already added.
if (depends_on_empty_array_proto_elements_) return;
- isolate()->initial_object_prototype()->map()->AddDependentCompilationInfo(
+ Map::AddDependentCompilationInfo(
+ handle(isolate()->initial_object_prototype()->map()),
DependentCode::kElementsCantBeAddedGroup, info());
- isolate()->initial_array_prototype()->map()->AddDependentCompilationInfo(
+ Map::AddDependentCompilationInfo(
+ handle(isolate()->initial_array_prototype()->map()),
DependentCode::kElementsCantBeAddedGroup, info());
depends_on_empty_array_proto_elements_ = true;
}
@@ -468,6 +454,16 @@ class HGraph V8_FINAL : public ZoneObject {
void DecrementInNoSideEffectsScope() { no_side_effects_scope_count_--; }
bool IsInsideNoSideEffectsScope() { return no_side_effects_scope_count_ > 0; }
+ // If we are tracking source positions then this function assigns a unique
+ // identifier to each inlining and dumps function source if it was inlined
+ // for the first time during the current optimization.
+ int TraceInlinedFunction(Handle<SharedFunctionInfo> shared,
+ HSourcePosition position);
+
+ // Converts given HSourcePosition to the absolute offset from the start of
+ // the corresponding script.
+ int SourcePositionToScriptPosition(HSourcePosition position);
+
private:
HConstant* ReinsertConstantIfNecessary(HConstant* constant);
HConstant* GetConstant(SetOncePointer<HConstant>* pointer,
@@ -513,6 +509,23 @@ class HGraph V8_FINAL : public ZoneObject {
int no_side_effects_scope_count_;
bool disallow_adding_new_values_;
+ class InlinedFunctionInfo {
+ public:
+ explicit InlinedFunctionInfo(Handle<SharedFunctionInfo> shared)
+ : shared_(shared), start_position_(shared->start_position()) {
+ }
+
+ Handle<SharedFunctionInfo> shared() const { return shared_; }
+ int start_position() const { return start_position_; }
+
+ private:
+ Handle<SharedFunctionInfo> shared_;
+ int start_position_;
+ };
+
+ int next_inline_id_;
+ ZoneList<InlinedFunctionInfo> inlined_functions_;
+
DISALLOW_COPY_AND_ASSIGN(HGraph);
};
@@ -640,16 +653,7 @@ class HEnvironment V8_FINAL : public ZoneObject {
int arguments,
FunctionLiteral* function,
HConstant* undefined,
- InliningKind inlining_kind,
- bool undefined_receiver) const;
-
- static bool UseUndefinedReceiver(Handle<JSFunction> closure,
- FunctionLiteral* function,
- CallKind call_kind,
- InliningKind inlining_kind) {
- return (closure->shared()->native() || !function->is_classic_mode()) &&
- call_kind == CALL_AS_FUNCTION && inlining_kind != CONSTRUCT_CALL_RETURN;
- }
+ InliningKind inlining_kind) const;
HEnvironment* DiscardInlined(bool drop_extra) {
HEnvironment* outer = outer_;
@@ -888,7 +892,8 @@ class FunctionState V8_FINAL {
public:
FunctionState(HOptimizedGraphBuilder* owner,
CompilationInfo* info,
- InliningKind inlining_kind);
+ InliningKind inlining_kind,
+ int inlining_id);
~FunctionState();
CompilationInfo* compilation_info() { return compilation_info_; }
@@ -918,6 +923,8 @@ class FunctionState V8_FINAL {
bool arguments_pushed() { return arguments_elements() != NULL; }
+ int inlining_id() const { return inlining_id_; }
+
private:
HOptimizedGraphBuilder* owner_;
@@ -947,6 +954,9 @@ class FunctionState V8_FINAL {
HArgumentsObject* arguments_object_;
HArgumentsElements* arguments_elements_;
+ int inlining_id_;
+ HSourcePosition outer_source_position_;
+
FunctionState* outer_;
};
@@ -995,15 +1005,51 @@ class HIfContinuation V8_FINAL {
};
+class HAllocationMode V8_FINAL BASE_EMBEDDED {
+ public:
+ explicit HAllocationMode(Handle<AllocationSite> feedback_site)
+ : current_site_(NULL), feedback_site_(feedback_site),
+ pretenure_flag_(NOT_TENURED) {}
+ explicit HAllocationMode(HValue* current_site)
+ : current_site_(current_site), pretenure_flag_(NOT_TENURED) {}
+ explicit HAllocationMode(PretenureFlag pretenure_flag)
+ : current_site_(NULL), pretenure_flag_(pretenure_flag) {}
+ HAllocationMode()
+ : current_site_(NULL), pretenure_flag_(NOT_TENURED) {}
+
+ HValue* current_site() const { return current_site_; }
+ Handle<AllocationSite> feedback_site() const { return feedback_site_; }
+
+ bool CreateAllocationMementos() const V8_WARN_UNUSED_RESULT {
+ return current_site() != NULL;
+ }
+
+ PretenureFlag GetPretenureMode() const V8_WARN_UNUSED_RESULT {
+ if (!feedback_site().is_null()) return feedback_site()->GetPretenureMode();
+ return pretenure_flag_;
+ }
+
+ private:
+ HValue* current_site_;
+ Handle<AllocationSite> feedback_site_;
+ PretenureFlag pretenure_flag_;
+};
+
+
class HGraphBuilder {
public:
explicit HGraphBuilder(CompilationInfo* info)
: info_(info),
graph_(NULL),
current_block_(NULL),
- position_(RelocInfo::kNoPosition) {}
+ scope_(info->scope()),
+ position_(HSourcePosition::Unknown()),
+ start_position_(0) {}
virtual ~HGraphBuilder() {}
+ Scope* scope() const { return scope_; }
+ void set_scope(Scope* scope) { scope_ = scope; }
+
HBasicBlock* current_block() const { return current_block_; }
void set_current_block(HBasicBlock* block) { current_block_ = block; }
HEnvironment* environment() const {
@@ -1031,7 +1077,7 @@ class HGraphBuilder {
HBasicBlock* target,
FunctionState* state = NULL,
bool add_simulate = true) {
- from->Goto(target, position_, state, add_simulate);
+ from->Goto(target, source_position(), state, add_simulate);
}
void Goto(HBasicBlock* target,
FunctionState* state = NULL,
@@ -1047,7 +1093,7 @@ class HGraphBuilder {
void AddLeaveInlined(HBasicBlock* block,
HValue* return_value,
FunctionState* state) {
- block->AddLeaveInlined(return_value, state, position_);
+ block->AddLeaveInlined(return_value, state, source_position());
}
void AddLeaveInlined(HValue* return_value, FunctionState* state) {
return AddLeaveInlined(current_block(), return_value, state);
@@ -1253,7 +1299,9 @@ class HGraphBuilder {
void AddSimulate(BailoutId id, RemovableSimulate removable = FIXED_SIMULATE);
- int position() const { return position_; }
+ // When initializing arrays, we'll unfold the loop if the number of elements
+ // is known at compile time and is <= kElementLoopUnrollThreshold.
+ static const int kElementLoopUnrollThreshold = 8;
protected:
virtual bool BuildGraph() = 0;
@@ -1261,8 +1309,17 @@ class HGraphBuilder {
HBasicBlock* CreateBasicBlock(HEnvironment* env);
HBasicBlock* CreateLoopHeaderBlock();
+ template <class BitFieldClass>
+ HValue* BuildDecodeField(HValue* encoded_field) {
+ HValue* shifted_field = AddUncasted<HShr>(encoded_field,
+ Add<HConstant>(static_cast<int>(BitFieldClass::kShift)));
+ HValue* mask_value = Add<HConstant>(static_cast<int>(BitFieldClass::kMask));
+ return AddUncasted<HBitwise>(Token::BIT_AND, shifted_field, mask_value);
+ }
+
+ HValue* BuildGetElementsKind(HValue* object);
+
HValue* BuildCheckHeapObject(HValue* object);
- HValue* BuildCheckMap(HValue* obj, Handle<Map> map);
HValue* BuildCheckString(HValue* string);
HValue* BuildWrapReceiver(HValue* object, HValue* function);
@@ -1272,7 +1329,8 @@ class HGraphBuilder {
ElementsKind kind,
HValue* length,
HValue* key,
- bool is_js_array);
+ bool is_js_array,
+ PropertyAccessType access_type);
HValue* BuildCopyElementsOnWrite(HValue* object,
HValue* elements,
@@ -1285,14 +1343,51 @@ class HGraphBuilder {
ElementsKind to_kind,
bool is_jsarray);
- HValue* BuildNumberToString(HValue* object, Handle<Type> type);
+ HValue* BuildNumberToString(HValue* object, Type* type);
- HValue* BuildUncheckedDictionaryElementLoad(HValue* receiver,
- HValue* key);
+ void BuildJSObjectCheck(HValue* receiver,
+ int bit_field_mask);
+
+ // Checks a key value that's being used for a keyed element access context. If
+ // the key is a index, i.e. a smi or a number in a unique string with a cached
+ // numeric value, the "true" of the continuation is joined. Otherwise,
+ // if the key is a name or a unique string, the "false" of the continuation is
+ // joined. Otherwise, a deoptimization is triggered. In both paths of the
+ // continuation, the key is pushed on the top of the environment.
+ void BuildKeyedIndexCheck(HValue* key,
+ HIfContinuation* join_continuation);
+
+ // Checks the properties of an object if they are in dictionary case, in which
+ // case "true" of continuation is taken, otherwise the "false"
+ void BuildTestForDictionaryProperties(HValue* object,
+ HIfContinuation* continuation);
- // Computes the size for a sequential string of the given length and encoding.
- HValue* BuildSeqStringSizeFor(HValue* length,
- String::Encoding encoding);
+ void BuildNonGlobalObjectCheck(HValue* receiver);
+
+ HValue* BuildKeyedLookupCacheHash(HValue* object,
+ HValue* key);
+
+ HValue* BuildUncheckedDictionaryElementLoad(HValue* receiver,
+ HValue* elements,
+ HValue* key,
+ HValue* hash);
+
+ HValue* BuildRegExpConstructResult(HValue* length,
+ HValue* index,
+ HValue* input);
+
+ // Allocates a new object according with the given allocation properties.
+ HAllocate* BuildAllocate(HValue* object_size,
+ HType type,
+ InstanceType instance_type,
+ HAllocationMode allocation_mode);
+ // Computes the sum of two string lengths, taking care of overflow handling.
+ HValue* BuildAddStringLengths(HValue* left_length, HValue* right_length);
+ // Creates a cons string using the two input strings.
+ HValue* BuildCreateConsString(HValue* length,
+ HValue* left,
+ HValue* right,
+ HAllocationMode allocation_mode);
// Copies characters from one sequential string to another.
void BuildCopySeqStringChars(HValue* src,
HValue* src_offset,
@@ -1301,14 +1396,18 @@ class HGraphBuilder {
HValue* dst_offset,
String::Encoding dst_encoding,
HValue* length);
+
+ // Align an object size to object alignment boundary
+ HValue* BuildObjectSizeAlignment(HValue* unaligned_size, int header_size);
+
// Both operands are non-empty strings.
HValue* BuildUncheckedStringAdd(HValue* left,
HValue* right,
- PretenureFlag pretenure_flag);
- // Both operands are strings.
+ HAllocationMode allocation_mode);
+ // Add two strings using allocation mode, validating type feedback.
HValue* BuildStringAdd(HValue* left,
HValue* right,
- PretenureFlag pretenure_flag);
+ HAllocationMode allocation_mode);
HInstruction* BuildUncheckedMonomorphicElementAccess(
HValue* checked_object,
@@ -1316,7 +1415,7 @@ class HGraphBuilder {
HValue* val,
bool is_js_array,
ElementsKind elements_kind,
- bool is_store,
+ PropertyAccessType access_type,
LoadKeyedHoleMode load_mode,
KeyedAccessStoreMode store_mode);
@@ -1326,20 +1425,19 @@ class HGraphBuilder {
HValue* val,
HValue* dependency,
ElementsKind elements_kind,
- bool is_store,
+ PropertyAccessType access_type,
LoadKeyedHoleMode load_mode = NEVER_RETURN_HOLE);
- HLoadNamedField* BuildLoadNamedField(HValue* object, HObjectAccess access);
- HInstruction* AddLoadNamedField(HValue* object, HObjectAccess access);
- HInstruction* BuildLoadStringLength(HValue* object, HValue* checked_value);
- HStoreNamedField* AddStoreMapConstant(HValue* object, Handle<Map> map);
- HStoreNamedField* AddStoreMapConstantNoWriteBarrier(HValue* object,
- Handle<Map> map) {
- HStoreNamedField* store_map = AddStoreMapConstant(object, map);
- store_map->SkipWriteBarrier();
- return store_map;
+ HInstruction* AddLoadStringInstanceType(HValue* string);
+ HInstruction* AddLoadStringLength(HValue* string);
+ HStoreNamedField* AddStoreMapConstant(HValue* object, Handle<Map> map) {
+ return Add<HStoreNamedField>(object, HObjectAccess::ForMap(),
+ Add<HConstant>(map));
}
- HLoadNamedField* AddLoadElements(HValue* object);
+ HLoadNamedField* AddLoadMap(HValue* object,
+ HValue* dependency = NULL);
+ HLoadNamedField* AddLoadElements(HValue* object,
+ HValue* dependency = NULL);
bool MatchRotateRight(HValue* left,
HValue* right,
@@ -1349,20 +1447,25 @@ class HGraphBuilder {
HValue* BuildBinaryOperation(Token::Value op,
HValue* left,
HValue* right,
- Handle<Type> left_type,
- Handle<Type> right_type,
- Handle<Type> result_type,
- Maybe<int> fixed_right_arg);
+ Type* left_type,
+ Type* right_type,
+ Type* result_type,
+ Maybe<int> fixed_right_arg,
+ HAllocationMode allocation_mode);
- HLoadNamedField* AddLoadFixedArrayLength(HValue *object);
+ HLoadNamedField* AddLoadFixedArrayLength(HValue *object,
+ HValue *dependency = NULL);
+
+ HLoadNamedField* AddLoadArrayLength(HValue *object,
+ ElementsKind kind,
+ HValue *dependency = NULL);
HValue* AddLoadJSBuiltin(Builtins::JavaScript builtin);
- HValue* EnforceNumberType(HValue* number, Handle<Type> expected);
- HValue* TruncateToNumber(HValue* value, Handle<Type>* expected);
+ HValue* EnforceNumberType(HValue* number, Type* expected);
+ HValue* TruncateToNumber(HValue* value, Type** expected);
- void FinishExitWithHardDeoptimization(const char* reason,
- HBasicBlock* continuation);
+ void FinishExitWithHardDeoptimization(const char* reason);
void AddIncrementCounter(StatsCounter* counter);
@@ -1605,10 +1708,28 @@ class HGraphBuilder {
bool finished_;
};
- HValue* BuildNewElementsCapacity(HValue* old_capacity);
+ template <class A, class P1>
+ void DeoptimizeIf(P1 p1, char* const reason) {
+ IfBuilder builder(this);
+ builder.If<A>(p1);
+ builder.ThenDeopt(reason);
+ }
- void BuildNewSpaceArrayCheck(HValue* length,
- ElementsKind kind);
+ template <class A, class P1, class P2>
+ void DeoptimizeIf(P1 p1, P2 p2, const char* reason) {
+ IfBuilder builder(this);
+ builder.If<A>(p1, p2);
+ builder.ThenDeopt(reason);
+ }
+
+ template <class A, class P1, class P2, class P3>
+ void DeoptimizeIf(P1 p1, P2 p2, P3 p3, const char* reason) {
+ IfBuilder builder(this);
+ builder.If<A>(p1, p2, p3);
+ builder.ThenDeopt(reason);
+ }
+
+ HValue* BuildNewElementsCapacity(HValue* old_capacity);
class JSArrayBuilder V8_FINAL {
public:
@@ -1628,10 +1749,24 @@ class HGraphBuilder {
};
ElementsKind kind() { return kind_; }
-
- HValue* AllocateEmptyArray();
- HValue* AllocateArray(HValue* capacity, HValue* length_field,
- FillMode fill_mode = FILL_WITH_HOLE);
+ HAllocate* elements_location() { return elements_location_; }
+
+ HAllocate* AllocateEmptyArray();
+ HAllocate* AllocateArray(HValue* capacity,
+ HValue* length_field,
+ FillMode fill_mode = FILL_WITH_HOLE);
+ // Use these allocators when capacity could be unknown at compile time
+ // but its limit is known. For constant |capacity| the value of
+ // |capacity_upper_bound| is ignored and the actual |capacity|
+ // value is used as an upper bound.
+ HAllocate* AllocateArray(HValue* capacity,
+ int capacity_upper_bound,
+ HValue* length_field,
+ FillMode fill_mode = FILL_WITH_HOLE);
+ HAllocate* AllocateArray(HValue* capacity,
+ HConstant* capacity_upper_bound,
+ HValue* length_field,
+ FillMode fill_mode = FILL_WITH_HOLE);
HValue* GetElementsLocation() { return elements_location_; }
HValue* EmitMapCode();
@@ -1648,25 +1783,23 @@ class HGraphBuilder {
}
HValue* EmitInternalMapCode();
- HValue* EstablishEmptyArrayAllocationSize();
- HValue* EstablishAllocationSize(HValue* length_node);
- HValue* AllocateArray(HValue* size_in_bytes, HValue* capacity,
- HValue* length_field,
- FillMode fill_mode = FILL_WITH_HOLE);
HGraphBuilder* builder_;
ElementsKind kind_;
AllocationSiteMode mode_;
HValue* allocation_site_payload_;
HValue* constructor_function_;
- HInnerAllocatedObject* elements_location_;
+ HAllocate* elements_location_;
};
HValue* BuildAllocateArrayFromLength(JSArrayBuilder* array_builder,
HValue* length_argument);
+ HValue* BuildCalculateElementsSize(ElementsKind kind,
+ HValue* capacity);
+ HAllocate* AllocateJSArrayObject(AllocationSiteMode mode);
+ HConstant* EstablishElementsAllocationSize(ElementsKind kind, int capacity);
- HValue* BuildAllocateElements(ElementsKind kind,
- HValue* capacity);
+ HAllocate* BuildAllocateElements(ElementsKind kind, HValue* size_in_bytes);
void BuildInitializeElementsHeader(HValue* elements,
ElementsKind kind,
@@ -1675,16 +1808,17 @@ class HGraphBuilder {
HValue* BuildAllocateElementsAndInitializeElementsHeader(ElementsKind kind,
HValue* capacity);
- // array must have been allocated with enough room for
- // 1) the JSArray, 2) a AllocationMemento if mode requires it,
- // 3) a FixedArray or FixedDoubleArray.
- // A pointer to the Fixed(Double)Array is returned.
- HInnerAllocatedObject* BuildJSArrayHeader(HValue* array,
- HValue* array_map,
- AllocationSiteMode mode,
- ElementsKind elements_kind,
- HValue* allocation_site_payload,
- HValue* length_field);
+ // |array| must have been allocated with enough room for
+ // 1) the JSArray and 2) an AllocationMemento if mode requires it.
+ // If the |elements| value provided is NULL then the array elements storage
+ // is initialized with empty array.
+ void BuildJSArrayHeader(HValue* array,
+ HValue* array_map,
+ HValue* elements,
+ AllocationSiteMode mode,
+ ElementsKind elements_kind,
+ HValue* allocation_site_payload,
+ HValue* length_field);
HValue* BuildGrowElementsCapacity(HValue* object,
HValue* elements,
@@ -1693,6 +1827,12 @@ class HGraphBuilder {
HValue* length,
HValue* new_capacity);
+ void BuildFillElementsWithValue(HValue* elements,
+ ElementsKind elements_kind,
+ HValue* from,
+ HValue* to,
+ HValue* value);
+
void BuildFillElementsWithHole(HValue* elements,
ElementsKind elements_kind,
HValue* from,
@@ -1705,34 +1845,63 @@ class HGraphBuilder {
HValue* length,
HValue* capacity);
- HValue* BuildCloneShallowArray(HValue* boilerplate,
- HValue* allocation_site,
- AllocationSiteMode mode,
- ElementsKind kind,
- int length);
+ HValue* BuildCloneShallowArrayCow(HValue* boilerplate,
+ HValue* allocation_site,
+ AllocationSiteMode mode,
+ ElementsKind kind);
+
+ HValue* BuildCloneShallowArrayEmpty(HValue* boilerplate,
+ HValue* allocation_site,
+ AllocationSiteMode mode);
+
+ HValue* BuildCloneShallowArrayNonEmpty(HValue* boilerplate,
+ HValue* allocation_site,
+ AllocationSiteMode mode,
+ ElementsKind kind);
HValue* BuildElementIndexHash(HValue* index);
void BuildCompareNil(
HValue* value,
- Handle<Type> type,
+ Type* type,
HIfContinuation* continuation);
void BuildCreateAllocationMemento(HValue* previous_object,
HValue* previous_object_size,
HValue* payload);
- HInstruction* BuildConstantMapCheck(Handle<JSObject> constant,
- CompilationInfo* info);
+ HInstruction* BuildConstantMapCheck(Handle<JSObject> constant);
HInstruction* BuildCheckPrototypeMaps(Handle<JSObject> prototype,
Handle<JSObject> holder);
+ HInstruction* BuildGetNativeContext(HValue* closure);
HInstruction* BuildGetNativeContext();
HInstruction* BuildGetArrayFunction();
protected:
void SetSourcePosition(int position) {
ASSERT(position != RelocInfo::kNoPosition);
+ position_.set_position(position - start_position_);
+ }
+
+ void EnterInlinedSource(int start_position, int id) {
+ if (FLAG_hydrogen_track_positions) {
+ start_position_ = start_position;
+ position_.set_inlining_id(id);
+ }
+ }
+
+ // Convert the given absolute offset from the start of the script to
+ // the HSourcePosition assuming that this position corresponds to the
+ // same function as current position_.
+ HSourcePosition ScriptPositionToSourcePosition(int position) {
+ HSourcePosition pos = position_;
+ pos.set_position(position - start_position_);
+ return pos;
+ }
+
+ HSourcePosition source_position() { return position_; }
+ void set_source_position(HSourcePosition position) {
position_ = position;
}
@@ -1752,9 +1921,6 @@ class HGraphBuilder {
HValue* mask,
int current_probe);
- void PadEnvironmentForContinuation(HBasicBlock* from,
- HBasicBlock* continuation);
-
template <class I>
I* AddInstructionTyped(I* instr) {
return I::cast(AddInstruction(instr));
@@ -1763,7 +1929,9 @@ class HGraphBuilder {
CompilationInfo* info_;
HGraph* graph_;
HBasicBlock* current_block_;
- int position_;
+ Scope* scope_;
+ HSourcePosition position_;
+ int start_position_;
};
@@ -1889,10 +2057,12 @@ class HOptimizedGraphBuilder : public HGraphBuilder, public AstVisitor {
class BreakAndContinueInfo V8_FINAL BASE_EMBEDDED {
public:
explicit BreakAndContinueInfo(BreakableStatement* target,
+ Scope* scope,
int drop_extra = 0)
: target_(target),
break_block_(NULL),
continue_block_(NULL),
+ scope_(scope),
drop_extra_(drop_extra) {
}
@@ -1901,12 +2071,14 @@ class HOptimizedGraphBuilder : public HGraphBuilder, public AstVisitor {
void set_break_block(HBasicBlock* block) { break_block_ = block; }
HBasicBlock* continue_block() { return continue_block_; }
void set_continue_block(HBasicBlock* block) { continue_block_ = block; }
+ Scope* scope() { return scope_; }
int drop_extra() { return drop_extra_; }
private:
BreakableStatement* target_;
HBasicBlock* break_block_;
HBasicBlock* continue_block_;
+ Scope* scope_;
int drop_extra_;
};
@@ -1928,7 +2100,8 @@ class HOptimizedGraphBuilder : public HGraphBuilder, public AstVisitor {
// Search the break stack for a break or continue target.
enum BreakType { BREAK, CONTINUE };
- HBasicBlock* Get(BreakableStatement* stmt, BreakType type, int* drop_extra);
+ HBasicBlock* Get(BreakableStatement* stmt, BreakType type,
+ Scope** scope, int* drop_extra);
private:
BreakAndContinueInfo* info_;
@@ -2015,9 +2188,8 @@ class HOptimizedGraphBuilder : public HGraphBuilder, public AstVisitor {
void ClearInlinedTestContext() {
function_state()->ClearInlinedTestContext();
}
- StrictModeFlag function_strict_mode_flag() {
- return function_state()->compilation_info()->is_classic_mode()
- ? kNonStrictMode : kStrictMode;
+ StrictMode function_strict_mode() {
+ return function_state()->compilation_info()->strict_mode();
}
// Generators for inline runtime functions.
@@ -2025,7 +2197,7 @@ class HOptimizedGraphBuilder : public HGraphBuilder, public AstVisitor {
void Generate##Name(CallRuntime* call);
INLINE_FUNCTION_LIST(INLINE_FUNCTION_GENERATOR_DECLARATION)
- INLINE_RUNTIME_FUNCTION_LIST(INLINE_FUNCTION_GENERATOR_DECLARATION)
+ INLINE_OPTIMIZED_FUNCTION_LIST(INLINE_FUNCTION_GENERATOR_DECLARATION)
#undef INLINE_FUNCTION_GENERATOR_DECLARATION
void VisitDelete(UnaryOperation* expr);
@@ -2039,8 +2211,7 @@ class HOptimizedGraphBuilder : public HGraphBuilder, public AstVisitor {
bool PreProcessOsrEntry(IterationStatement* statement);
void VisitLoopBody(IterationStatement* stmt,
- HBasicBlock* loop_entry,
- BreakAndContinueInfo* break_info);
+ HBasicBlock* loop_entry);
// Create a back edge in the flow graph. body_exit is the predecessor
// block and loop_entry is the successor block. loop_successor is the
@@ -2120,17 +2291,13 @@ class HOptimizedGraphBuilder : public HGraphBuilder, public AstVisitor {
HBasicBlock* true_block,
HBasicBlock* false_block);
- // Visit an argument subexpression and emit a push to the outgoing arguments.
- void VisitArgument(Expression* expr);
-
- void VisitArgumentList(ZoneList<Expression*>* arguments);
-
// Visit a list of expressions from left to right, each in a value context.
void VisitExpressions(ZoneList<Expression*>* exprs);
// Remove the arguments from the bailout environment and emit instructions
// to push them as outgoing parameters.
template <class Instruction> HInstruction* PreProcessCall(Instruction* call);
+ void PushArgumentsFromEnvironment(int count);
void SetUpScope(Scope* scope);
virtual void VisitStatements(ZoneList<Statement*>* statements) V8_OVERRIDE;
@@ -2139,6 +2306,8 @@ class HOptimizedGraphBuilder : public HGraphBuilder, public AstVisitor {
AST_NODE_LIST(DECLARE_VISIT)
#undef DECLARE_VISIT
+ Type* ToType(Handle<Map> map) { return IC::MapToType<Type>(map, zone()); }
+
private:
// Helpers for flow graph construction.
enum GlobalPropertyAccess {
@@ -2147,7 +2316,7 @@ class HOptimizedGraphBuilder : public HGraphBuilder, public AstVisitor {
};
GlobalPropertyAccess LookupGlobalProperty(Variable* var,
LookupResult* lookup,
- bool is_store);
+ PropertyAccessType access_type);
void EnsureArgumentsArePushedForAccess();
bool TryArgumentsAccess(Property* expr);
@@ -2155,21 +2324,37 @@ class HOptimizedGraphBuilder : public HGraphBuilder, public AstVisitor {
// Try to optimize fun.apply(receiver, arguments) pattern.
bool TryCallApply(Call* expr);
+ bool TryHandleArrayCall(Call* expr, HValue* function);
+ bool TryHandleArrayCallNew(CallNew* expr, HValue* function);
+ void BuildArrayCall(Expression* expr, int arguments_count, HValue* function,
+ Handle<AllocationSite> cell);
+
+ enum ArrayIndexOfMode { kFirstIndexOf, kLastIndexOf };
+ HValue* BuildArrayIndexOf(HValue* receiver,
+ HValue* search_element,
+ ElementsKind kind,
+ ArrayIndexOfMode mode);
+
+ HValue* ImplicitReceiverFor(HValue* function,
+ Handle<JSFunction> target);
+
int InliningAstSize(Handle<JSFunction> target);
- bool TryInline(CallKind call_kind,
- Handle<JSFunction> target,
+ bool TryInline(Handle<JSFunction> target,
int arguments_count,
HValue* implicit_return_value,
BailoutId ast_id,
BailoutId return_id,
- InliningKind inlining_kind);
+ InliningKind inlining_kind,
+ HSourcePosition position);
- bool TryInlineCall(Call* expr, bool drop_extra = false);
+ bool TryInlineCall(Call* expr);
bool TryInlineConstruct(CallNew* expr, HValue* implicit_return_value);
bool TryInlineGetter(Handle<JSFunction> getter,
+ Handle<Map> receiver_map,
BailoutId ast_id,
BailoutId return_id);
bool TryInlineSetter(Handle<JSFunction> setter,
+ Handle<Map> receiver_map,
BailoutId id,
BailoutId assignment_id,
HValue* implicit_return_value);
@@ -2178,9 +2363,30 @@ class HOptimizedGraphBuilder : public HGraphBuilder, public AstVisitor {
int arguments_count);
bool TryInlineBuiltinMethodCall(Call* expr,
HValue* receiver,
- Handle<Map> receiver_map,
- CheckType check_type);
- bool TryInlineBuiltinFunctionCall(Call* expr, bool drop_extra);
+ Handle<Map> receiver_map);
+ bool TryInlineBuiltinFunctionCall(Call* expr);
+ enum ApiCallType {
+ kCallApiFunction,
+ kCallApiMethod,
+ kCallApiGetter,
+ kCallApiSetter
+ };
+ bool TryInlineApiMethodCall(Call* expr,
+ HValue* receiver,
+ SmallMapList* receiver_types);
+ bool TryInlineApiFunctionCall(Call* expr, HValue* receiver);
+ bool TryInlineApiGetter(Handle<JSFunction> function,
+ Handle<Map> receiver_map,
+ BailoutId ast_id);
+ bool TryInlineApiSetter(Handle<JSFunction> function,
+ Handle<Map> receiver_map,
+ BailoutId ast_id);
+ bool TryInlineApiCall(Handle<JSFunction> function,
+ HValue* receiver,
+ SmallMapList* receiver_maps,
+ int argc,
+ BailoutId ast_id,
+ ApiCallType call_type);
// If --trace-inlining, print a line of the inlining trace. Inlining
// succeeded if the reason string is NULL and failed if there is a
@@ -2195,31 +2401,49 @@ class HOptimizedGraphBuilder : public HGraphBuilder, public AstVisitor {
void HandlePropertyAssignment(Assignment* expr);
void HandleCompoundAssignment(Assignment* expr);
- void HandlePolymorphicLoadNamedField(BailoutId ast_id,
- BailoutId return_id,
- HValue* object,
- SmallMapList* types,
- Handle<String> name);
-
- void VisitTypedArrayInitialize(CallRuntime* expr);
-
- bool IsCallNewArrayInlineable(CallNew* expr);
- void BuildInlinedCallNewArray(CallNew* expr);
-
- void VisitDataViewInitialize(CallRuntime* expr);
+ void HandlePolymorphicNamedFieldAccess(PropertyAccessType access_type,
+ BailoutId ast_id,
+ BailoutId return_id,
+ HValue* object,
+ HValue* value,
+ SmallMapList* types,
+ Handle<String> name);
+
+ HValue* BuildAllocateExternalElements(
+ ExternalArrayType array_type,
+ bool is_zero_byte_offset,
+ HValue* buffer, HValue* byte_offset, HValue* length);
+ HValue* BuildAllocateFixedTypedArray(
+ ExternalArrayType array_type, size_t element_size,
+ ElementsKind fixed_elements_kind,
+ HValue* byte_length, HValue* length);
+
+ Handle<JSFunction> array_function() {
+ return handle(isolate()->native_context()->array_function());
+ }
+
+ bool IsCallArrayInlineable(int argument_count, Handle<AllocationSite> site);
+ void BuildInlinedCallArray(Expression* expression, int argument_count,
+ Handle<AllocationSite> site);
class PropertyAccessInfo {
public:
- PropertyAccessInfo(Isolate* isolate, Handle<Map> map, Handle<String> name)
- : lookup_(isolate),
- map_(map),
+ PropertyAccessInfo(HOptimizedGraphBuilder* builder,
+ PropertyAccessType access_type,
+ Type* type,
+ Handle<String> name)
+ : lookup_(builder->isolate()),
+ builder_(builder),
+ access_type_(access_type),
+ type_(type),
name_(name),
+ field_type_(HType::Tagged()),
access_(HObjectAccess::ForMap()) { }
// Checkes whether this PropertyAccessInfo can be handled as a monomorphic
// load named. It additionally fills in the fields necessary to generate the
// lookup code.
- bool CanLoadMonomorphic();
+ bool CanAccessMonomorphic();
// Checks whether all types behave uniform when loading name. If all maps
// behave the same, a single monomorphic load instruction can be emitted,
@@ -2227,56 +2451,74 @@ class HOptimizedGraphBuilder : public HGraphBuilder, public AstVisitor {
// an instance of any of the types.
// This method skips the first type in types, assuming that this
// PropertyAccessInfo is built for types->first().
- bool CanLoadAsMonomorphic(SmallMapList* types);
+ bool CanAccessAsMonomorphic(SmallMapList* types);
+
+ Handle<Map> map() {
+ if (type_->Is(Type::Number())) {
+ Context* context = current_info()->closure()->context();
+ context = context->native_context();
+ return handle(context->number_function()->initial_map());
+ } else if (type_->Is(Type::Boolean())) {
+ Context* context = current_info()->closure()->context();
+ context = context->native_context();
+ return handle(context->boolean_function()->initial_map());
+ } else if (type_->Is(Type::String())) {
+ Context* context = current_info()->closure()->context();
+ context = context->native_context();
+ return handle(context->string_function()->initial_map());
+ } else {
+ return type_->AsClass()->Map();
+ }
+ }
+ Type* type() const { return type_; }
+ Handle<String> name() const { return name_; }
bool IsJSObjectFieldAccessor() {
int offset; // unused
- return Accessors::IsJSObjectFieldAccessor(map_, name_, &offset);
+ return Accessors::IsJSObjectFieldAccessor<Type>(type_, name_, &offset);
}
bool GetJSObjectFieldAccess(HObjectAccess* access) {
- if (IsStringLength()) {
- *access = HObjectAccess::ForStringLength();
- return true;
- } else if (IsArrayLength()) {
- *access = HObjectAccess::ForArrayLength(map_->elements_kind());
- return true;
- } else {
- int offset;
- if (Accessors::IsJSObjectFieldAccessor(map_, name_, &offset)) {
- *access = HObjectAccess::ForJSObjectOffset(offset);
- return true;
+ int offset;
+ if (Accessors::IsJSObjectFieldAccessor<Type>(type_, name_, &offset)) {
+ if (type_->Is(Type::String())) {
+ ASSERT(String::Equals(isolate()->factory()->length_string(), name_));
+ *access = HObjectAccess::ForStringLength();
+ } else if (type_->Is(Type::Array())) {
+ ASSERT(String::Equals(isolate()->factory()->length_string(), name_));
+ *access = HObjectAccess::ForArrayLength(map()->elements_kind());
+ } else {
+ *access = HObjectAccess::ForMapAndOffset(map(), offset);
}
- return false;
+ return true;
}
+ return false;
}
bool has_holder() { return !holder_.is_null(); }
+ bool IsLoad() const { return access_type_ == LOAD; }
LookupResult* lookup() { return &lookup_; }
- Handle<Map> map() { return map_; }
Handle<JSObject> holder() { return holder_; }
Handle<JSFunction> accessor() { return accessor_; }
Handle<Object> constant() { return constant_; }
+ Handle<Map> transition() { return handle(lookup_.GetTransitionTarget()); }
+ SmallMapList* field_maps() { return &field_maps_; }
+ HType field_type() const { return field_type_; }
HObjectAccess access() { return access_; }
private:
+ Type* ToType(Handle<Map> map) { return builder_->ToType(map); }
+ Zone* zone() { return builder_->zone(); }
Isolate* isolate() { return lookup_.isolate(); }
-
- bool IsStringLength() {
- return map_->instance_type() < FIRST_NONSTRING_TYPE &&
- name_->Equals(isolate()->heap()->length_string());
- }
-
- bool IsArrayLength() {
- return map_->instance_type() == JS_ARRAY_TYPE &&
- name_->Equals(isolate()->heap()->length_string());
- }
+ CompilationInfo* top_info() { return builder_->top_info(); }
+ CompilationInfo* current_info() { return builder_->current_info(); }
bool LoadResult(Handle<Map> map);
+ void LoadFieldMaps(Handle<Map> map);
bool LookupDescriptor();
bool LookupInPrototypes();
- bool IsCompatibleForLoad(PropertyAccessInfo* other);
+ bool IsCompatible(PropertyAccessInfo* other);
void GeneralizeRepresentation(Representation r) {
access_ = access_.WithRepresentation(
@@ -2284,39 +2526,40 @@ class HOptimizedGraphBuilder : public HGraphBuilder, public AstVisitor {
}
LookupResult lookup_;
- Handle<Map> map_;
+ HOptimizedGraphBuilder* builder_;
+ PropertyAccessType access_type_;
+ Type* type_;
Handle<String> name_;
Handle<JSObject> holder_;
Handle<JSFunction> accessor_;
+ Handle<JSObject> api_holder_;
Handle<Object> constant_;
+ SmallMapList field_maps_;
+ HType field_type_;
HObjectAccess access_;
};
- HInstruction* BuildLoadMonomorphic(PropertyAccessInfo* info,
- HValue* object,
- HInstruction* checked_object,
- BailoutId ast_id,
- BailoutId return_id,
- bool can_inline_accessor = true);
-
- void HandlePolymorphicStoreNamedField(BailoutId assignment_id,
- HValue* object,
- HValue* value,
- SmallMapList* types,
- Handle<String> name);
- bool TryStorePolymorphicAsMonomorphic(BailoutId assignment_id,
- HValue* object,
- HValue* value,
- SmallMapList* types,
- Handle<String> name);
+ HInstruction* BuildMonomorphicAccess(PropertyAccessInfo* info,
+ HValue* object,
+ HValue* checked_object,
+ HValue* value,
+ BailoutId ast_id,
+ BailoutId return_id,
+ bool can_inline_accessor = true);
+
+ HInstruction* BuildNamedAccess(PropertyAccessType access,
+ BailoutId ast_id,
+ BailoutId reutrn_id,
+ Expression* expr,
+ HValue* object,
+ Handle<String> name,
+ HValue* value,
+ bool is_uninitialized = false);
+
void HandlePolymorphicCallNamed(Call* expr,
HValue* receiver,
SmallMapList* types,
Handle<String> name);
- bool TryCallPolymorphicAsMonomorphic(Call* expr,
- HValue* receiver,
- SmallMapList* types,
- Handle<String> name);
void HandleLiteralCompareTypeof(CompareOperation* expr,
Expression* sub_expr,
Handle<String> check);
@@ -2324,15 +2567,37 @@ class HOptimizedGraphBuilder : public HGraphBuilder, public AstVisitor {
Expression* sub_expr,
NilValue nil);
+ enum PushBeforeSimulateBehavior {
+ PUSH_BEFORE_SIMULATE,
+ NO_PUSH_BEFORE_SIMULATE
+ };
+
+ HControlInstruction* BuildCompareInstruction(
+ Token::Value op,
+ HValue* left,
+ HValue* right,
+ Type* left_type,
+ Type* right_type,
+ Type* combined_type,
+ HSourcePosition left_position,
+ HSourcePosition right_position,
+ PushBeforeSimulateBehavior push_sim_result,
+ BailoutId bailout_id);
+
HInstruction* BuildStringCharCodeAt(HValue* string,
HValue* index);
- HValue* BuildBinaryOperation(BinaryOperation* expr,
- HValue* left,
- HValue* right);
+
+ HValue* BuildBinaryOperation(
+ BinaryOperation* expr,
+ HValue* left,
+ HValue* right,
+ PushBeforeSimulateBehavior push_sim_result);
HInstruction* BuildIncrement(bool returns_original_input,
CountOperation* expr);
- HInstruction* BuildLoadKeyedGeneric(HValue* object,
- HValue* key);
+ HInstruction* BuildKeyedGeneric(PropertyAccessType access_type,
+ HValue* object,
+ HValue* key,
+ HValue* value);
HInstruction* TryBuildConsolidatedElementLoad(HValue* object,
HValue* key,
@@ -2346,14 +2611,14 @@ class HOptimizedGraphBuilder : public HGraphBuilder, public AstVisitor {
HValue* val,
HValue* dependency,
Handle<Map> map,
- bool is_store,
+ PropertyAccessType access_type,
KeyedAccessStoreMode store_mode);
HValue* HandlePolymorphicElementAccess(HValue* object,
HValue* key,
HValue* val,
SmallMapList* maps,
- bool is_store,
+ PropertyAccessType access_type,
KeyedAccessStoreMode store_mode,
bool* has_side_effects);
@@ -2361,12 +2626,14 @@ class HOptimizedGraphBuilder : public HGraphBuilder, public AstVisitor {
HValue* key,
HValue* val,
Expression* expr,
- bool is_store,
+ PropertyAccessType access_type,
bool* has_side_effects);
- HInstruction* BuildLoadNamedGeneric(HValue* object,
- Handle<String> name,
- Property* expr);
+ HInstruction* BuildNamedGeneric(PropertyAccessType access,
+ HValue* object,
+ Handle<String> name,
+ HValue* value,
+ bool is_uninitialized = false);
HCheckMaps* AddCheckMap(HValue* object, Handle<Map> map);
@@ -2390,21 +2657,11 @@ class HOptimizedGraphBuilder : public HGraphBuilder, public AstVisitor {
BailoutId return_id,
bool is_uninitialized = false);
- HInstruction* BuildStoreNamedField(HValue* object,
- Handle<String> name,
- HValue* value,
- Handle<Map> map,
- LookupResult* lookup);
- HInstruction* BuildStoreNamedGeneric(HValue* object,
- Handle<String> name,
- HValue* value);
- HInstruction* BuildStoreNamedMonomorphic(HValue* object,
- Handle<String> name,
- HValue* value,
- Handle<Map> map);
- HInstruction* BuildStoreKeyedGeneric(HValue* object,
- HValue* key,
- HValue* value);
+ HInstruction* BuildLoadNamedField(PropertyAccessInfo* info,
+ HValue* checked_object);
+ HInstruction* BuildStoreNamedField(PropertyAccessInfo* info,
+ HValue* checked_object,
+ HValue* value);
HValue* BuildContextChainWalk(Variable* var);
@@ -2442,9 +2699,16 @@ class HOptimizedGraphBuilder : public HGraphBuilder, public AstVisitor {
void AddCheckPrototypeMaps(Handle<JSObject> holder,
Handle<Map> receiver_map);
- void AddCheckConstantFunction(Handle<JSObject> holder,
- HValue* receiver,
- Handle<Map> receiver_map);
+ HInstruction* NewPlainFunctionCall(HValue* fun,
+ int argument_count,
+ bool pass_argument_count);
+
+ HInstruction* NewArgumentAdaptorCall(HValue* fun, HValue* context,
+ int argument_count,
+ HValue* expected_param_count);
+
+ HInstruction* BuildCallConstantFunction(Handle<JSFunction> target,
+ int argument_count);
// The translation state of the currently-being-translated function.
FunctionState* function_state_;
@@ -2538,12 +2802,12 @@ class HTracer V8_FINAL : public Malloced {
explicit HTracer(int isolate_id)
: trace_(&string_allocator_), indent_(0) {
if (FLAG_trace_hydrogen_file == NULL) {
- OS::SNPrintF(filename_,
- "hydrogen-%d-%d.cfg",
- OS::GetCurrentProcessId(),
- isolate_id);
+ SNPrintF(filename_,
+ "hydrogen-%d-%d.cfg",
+ OS::GetCurrentProcessId(),
+ isolate_id);
} else {
- OS::StrNCpy(filename_, FLAG_trace_hydrogen_file, filename_.length());
+ StrNCpy(filename_, FLAG_trace_hydrogen_file, filename_.length());
}
WriteChars(filename_.start(), "", 0, false);
}
diff --git a/chromium/v8/src/i18n.cc b/chromium/v8/src/i18n.cc
index 80a739c285e..900da188c4e 100644
--- a/chromium/v8/src/i18n.cc
+++ b/chromium/v8/src/i18n.cc
@@ -1,32 +1,9 @@
// Copyright 2013 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
// limitations under the License.
-#include "i18n.h"
+#include "src/i18n.h"
#include "unicode/brkiter.h"
#include "unicode/calendar.h"
@@ -57,12 +34,11 @@ bool ExtractStringSetting(Isolate* isolate,
Handle<JSObject> options,
const char* key,
icu::UnicodeString* setting) {
- Handle<String> str = isolate->factory()->NewStringFromAscii(CStrVector(key));
- MaybeObject* maybe_object = options->GetProperty(*str);
- Object* object;
- if (maybe_object->ToObject(&object) && object->IsString()) {
+ Handle<String> str = isolate->factory()->NewStringFromAsciiChecked(key);
+ Handle<Object> object = Object::GetProperty(options, str).ToHandleChecked();
+ if (object->IsString()) {
v8::String::Utf8Value utf8_string(
- v8::Utils::ToLocal(Handle<String>(String::cast(object))));
+ v8::Utils::ToLocal(Handle<String>::cast(object)));
*setting = icu::UnicodeString::fromUTF8(*utf8_string);
return true;
}
@@ -74,10 +50,9 @@ bool ExtractIntegerSetting(Isolate* isolate,
Handle<JSObject> options,
const char* key,
int32_t* value) {
- Handle<String> str = isolate->factory()->NewStringFromAscii(CStrVector(key));
- MaybeObject* maybe_object = options->GetProperty(*str);
- Object* object;
- if (maybe_object->ToObject(&object) && object->IsNumber()) {
+ Handle<String> str = isolate->factory()->NewStringFromAsciiChecked(key);
+ Handle<Object> object = Object::GetProperty(options, str).ToHandleChecked();
+ if (object->IsNumber()) {
object->ToInt32(value);
return true;
}
@@ -89,10 +64,9 @@ bool ExtractBooleanSetting(Isolate* isolate,
Handle<JSObject> options,
const char* key,
bool* value) {
- Handle<String> str = isolate->factory()->NewStringFromAscii(CStrVector(key));
- MaybeObject* maybe_object = options->GetProperty(*str);
- Object* object;
- if (maybe_object->ToObject(&object) && object->IsBoolean()) {
+ Handle<String> str = isolate->factory()->NewStringFromAsciiChecked(key);
+ Handle<Object> object = Object::GetProperty(options, str).ToHandleChecked();
+ if (object->IsBoolean()) {
*value = object->BooleanValue();
return true;
}
@@ -152,28 +126,29 @@ void SetResolvedDateSettings(Isolate* isolate,
const icu::Locale& icu_locale,
icu::SimpleDateFormat* date_format,
Handle<JSObject> resolved) {
+ Factory* factory = isolate->factory();
UErrorCode status = U_ZERO_ERROR;
icu::UnicodeString pattern;
date_format->toPattern(pattern);
JSObject::SetProperty(
resolved,
- isolate->factory()->NewStringFromAscii(CStrVector("pattern")),
- isolate->factory()->NewStringFromTwoByte(
+ factory->NewStringFromStaticAscii("pattern"),
+ factory->NewStringFromTwoByte(
Vector<const uint16_t>(
reinterpret_cast<const uint16_t*>(pattern.getBuffer()),
- pattern.length())),
+ pattern.length())).ToHandleChecked(),
NONE,
- kNonStrictMode);
+ SLOPPY).Assert();
// Set time zone and calendar.
const icu::Calendar* calendar = date_format->getCalendar();
const char* calendar_name = calendar->getType();
JSObject::SetProperty(
resolved,
- isolate->factory()->NewStringFromAscii(CStrVector("calendar")),
- isolate->factory()->NewStringFromAscii(CStrVector(calendar_name)),
+ factory->NewStringFromStaticAscii("calendar"),
+ factory->NewStringFromAsciiChecked(calendar_name),
NONE,
- kNonStrictMode);
+ SLOPPY).Assert();
const icu::TimeZone& tz = calendar->getTimeZone();
icu::UnicodeString time_zone;
@@ -185,21 +160,21 @@ void SetResolvedDateSettings(Isolate* isolate,
if (canonical_time_zone == UNICODE_STRING_SIMPLE("Etc/GMT")) {
JSObject::SetProperty(
resolved,
- isolate->factory()->NewStringFromAscii(CStrVector("timeZone")),
- isolate->factory()->NewStringFromAscii(CStrVector("UTC")),
+ factory->NewStringFromStaticAscii("timeZone"),
+ factory->NewStringFromStaticAscii("UTC"),
NONE,
- kNonStrictMode);
+ SLOPPY).Assert();
} else {
JSObject::SetProperty(
resolved,
- isolate->factory()->NewStringFromAscii(CStrVector("timeZone")),
- isolate->factory()->NewStringFromTwoByte(
+ factory->NewStringFromStaticAscii("timeZone"),
+ factory->NewStringFromTwoByte(
Vector<const uint16_t>(
reinterpret_cast<const uint16_t*>(
canonical_time_zone.getBuffer()),
- canonical_time_zone.length())),
+ canonical_time_zone.length())).ToHandleChecked(),
NONE,
- kNonStrictMode);
+ SLOPPY).Assert();
}
}
@@ -213,17 +188,17 @@ void SetResolvedDateSettings(Isolate* isolate,
const char* ns = numbering_system->getName();
JSObject::SetProperty(
resolved,
- isolate->factory()->NewStringFromAscii(CStrVector("numberingSystem")),
- isolate->factory()->NewStringFromAscii(CStrVector(ns)),
+ factory->NewStringFromStaticAscii("numberingSystem"),
+ factory->NewStringFromAsciiChecked(ns),
NONE,
- kNonStrictMode);
+ SLOPPY).Assert();
} else {
JSObject::SetProperty(
resolved,
- isolate->factory()->NewStringFromAscii(CStrVector("numberingSystem")),
- isolate->factory()->undefined_value(),
+ factory->NewStringFromStaticAscii("numberingSystem"),
+ factory->undefined_value(),
NONE,
- kNonStrictMode);
+ SLOPPY).Assert();
}
delete numbering_system;
@@ -235,18 +210,18 @@ void SetResolvedDateSettings(Isolate* isolate,
if (U_SUCCESS(status)) {
JSObject::SetProperty(
resolved,
- isolate->factory()->NewStringFromAscii(CStrVector("locale")),
- isolate->factory()->NewStringFromAscii(CStrVector(result)),
+ factory->NewStringFromStaticAscii("locale"),
+ factory->NewStringFromAsciiChecked(result),
NONE,
- kNonStrictMode);
+ SLOPPY).Assert();
} else {
// This would never happen, since we got the locale from ICU.
JSObject::SetProperty(
resolved,
- isolate->factory()->NewStringFromAscii(CStrVector("locale")),
- isolate->factory()->NewStringFromAscii(CStrVector("und")),
+ factory->NewStringFromStaticAscii("locale"),
+ factory->NewStringFromStaticAscii("und"),
NONE,
- kNonStrictMode);
+ SLOPPY).Assert();
}
}
@@ -257,7 +232,8 @@ Handle<ObjectTemplateInfo> GetEternal(Isolate* isolate) {
return Handle<ObjectTemplateInfo>::cast(
isolate->eternal_handles()->GetSingleton(field));
}
- v8::Local<v8::ObjectTemplate> raw_template(v8::ObjectTemplate::New());
+ v8::Local<v8::ObjectTemplate> raw_template =
+ v8::ObjectTemplate::New(reinterpret_cast<v8::Isolate*>(isolate));
raw_template->SetInternalFieldCount(internal_fields);
return Handle<ObjectTemplateInfo>::cast(
isolate->eternal_handles()->CreateSingleton(
@@ -378,30 +354,31 @@ void SetResolvedNumberSettings(Isolate* isolate,
const icu::Locale& icu_locale,
icu::DecimalFormat* number_format,
Handle<JSObject> resolved) {
+ Factory* factory = isolate->factory();
icu::UnicodeString pattern;
number_format->toPattern(pattern);
JSObject::SetProperty(
resolved,
- isolate->factory()->NewStringFromAscii(CStrVector("pattern")),
- isolate->factory()->NewStringFromTwoByte(
+ factory->NewStringFromStaticAscii("pattern"),
+ factory->NewStringFromTwoByte(
Vector<const uint16_t>(
reinterpret_cast<const uint16_t*>(pattern.getBuffer()),
- pattern.length())),
+ pattern.length())).ToHandleChecked(),
NONE,
- kNonStrictMode);
+ SLOPPY).Assert();
// Set resolved currency code in options.currency if not empty.
icu::UnicodeString currency(number_format->getCurrency());
if (!currency.isEmpty()) {
JSObject::SetProperty(
resolved,
- isolate->factory()->NewStringFromAscii(CStrVector("currency")),
- isolate->factory()->NewStringFromTwoByte(
+ factory->NewStringFromStaticAscii("currency"),
+ factory->NewStringFromTwoByte(
Vector<const uint16_t>(
reinterpret_cast<const uint16_t*>(currency.getBuffer()),
- currency.length())),
+ currency.length())).ToHandleChecked(),
NONE,
- kNonStrictMode);
+ SLOPPY).Assert();
}
// Ugly hack. ICU doesn't expose numbering system in any way, so we have
@@ -414,78 +391,67 @@ void SetResolvedNumberSettings(Isolate* isolate,
const char* ns = numbering_system->getName();
JSObject::SetProperty(
resolved,
- isolate->factory()->NewStringFromAscii(CStrVector("numberingSystem")),
- isolate->factory()->NewStringFromAscii(CStrVector(ns)),
+ factory->NewStringFromStaticAscii("numberingSystem"),
+ factory->NewStringFromAsciiChecked(ns),
NONE,
- kNonStrictMode);
+ SLOPPY).Assert();
} else {
JSObject::SetProperty(
resolved,
- isolate->factory()->NewStringFromAscii(CStrVector("numberingSystem")),
- isolate->factory()->undefined_value(),
+ factory->NewStringFromStaticAscii("numberingSystem"),
+ factory->undefined_value(),
NONE,
- kNonStrictMode);
+ SLOPPY).Assert();
}
delete numbering_system;
JSObject::SetProperty(
resolved,
- isolate->factory()->NewStringFromAscii(CStrVector("useGrouping")),
- isolate->factory()->ToBoolean(number_format->isGroupingUsed()),
+ factory->NewStringFromStaticAscii("useGrouping"),
+ factory->ToBoolean(number_format->isGroupingUsed()),
NONE,
- kNonStrictMode);
+ SLOPPY).Assert();
JSObject::SetProperty(
resolved,
- isolate->factory()->NewStringFromAscii(
- CStrVector("minimumIntegerDigits")),
- isolate->factory()->NewNumberFromInt(
- number_format->getMinimumIntegerDigits()),
+ factory->NewStringFromStaticAscii("minimumIntegerDigits"),
+ factory->NewNumberFromInt(number_format->getMinimumIntegerDigits()),
NONE,
- kNonStrictMode);
+ SLOPPY).Assert();
JSObject::SetProperty(
resolved,
- isolate->factory()->NewStringFromAscii(
- CStrVector("minimumFractionDigits")),
- isolate->factory()->NewNumberFromInt(
- number_format->getMinimumFractionDigits()),
+ factory->NewStringFromStaticAscii("minimumFractionDigits"),
+ factory->NewNumberFromInt(number_format->getMinimumFractionDigits()),
NONE,
- kNonStrictMode);
+ SLOPPY).Assert();
JSObject::SetProperty(
resolved,
- isolate->factory()->NewStringFromAscii(
- CStrVector("maximumFractionDigits")),
- isolate->factory()->NewNumberFromInt(
- number_format->getMaximumFractionDigits()),
+ factory->NewStringFromStaticAscii("maximumFractionDigits"),
+ factory->NewNumberFromInt(number_format->getMaximumFractionDigits()),
NONE,
- kNonStrictMode);
+ SLOPPY).Assert();
- Handle<String> key = isolate->factory()->NewStringFromAscii(
- CStrVector("minimumSignificantDigits"));
- if (JSReceiver::HasLocalProperty(resolved, key)) {
+ Handle<String> key =
+ factory->NewStringFromStaticAscii("minimumSignificantDigits");
+ if (JSReceiver::HasOwnProperty(resolved, key)) {
JSObject::SetProperty(
resolved,
- isolate->factory()->NewStringFromAscii(
- CStrVector("minimumSignificantDigits")),
- isolate->factory()->NewNumberFromInt(
- number_format->getMinimumSignificantDigits()),
+ factory->NewStringFromStaticAscii("minimumSignificantDigits"),
+ factory->NewNumberFromInt(number_format->getMinimumSignificantDigits()),
NONE,
- kNonStrictMode);
+ SLOPPY).Assert();
}
- key = isolate->factory()->NewStringFromAscii(
- CStrVector("maximumSignificantDigits"));
- if (JSReceiver::HasLocalProperty(resolved, key)) {
+ key = factory->NewStringFromStaticAscii("maximumSignificantDigits");
+ if (JSReceiver::HasOwnProperty(resolved, key)) {
JSObject::SetProperty(
resolved,
- isolate->factory()->NewStringFromAscii(
- CStrVector("maximumSignificantDigits")),
- isolate->factory()->NewNumberFromInt(
- number_format->getMaximumSignificantDigits()),
+ factory->NewStringFromStaticAscii("maximumSignificantDigits"),
+ factory->NewNumberFromInt(number_format->getMaximumSignificantDigits()),
NONE,
- kNonStrictMode);
+ SLOPPY).Assert();
}
// Set the locale
@@ -496,18 +462,18 @@ void SetResolvedNumberSettings(Isolate* isolate,
if (U_SUCCESS(status)) {
JSObject::SetProperty(
resolved,
- isolate->factory()->NewStringFromAscii(CStrVector("locale")),
- isolate->factory()->NewStringFromAscii(CStrVector(result)),
+ factory->NewStringFromStaticAscii("locale"),
+ factory->NewStringFromAsciiChecked(result),
NONE,
- kNonStrictMode);
+ SLOPPY).Assert();
} else {
// This would never happen, since we got the locale from ICU.
JSObject::SetProperty(
resolved,
- isolate->factory()->NewStringFromAscii(CStrVector("locale")),
- isolate->factory()->NewStringFromAscii(CStrVector("und")),
+ factory->NewStringFromStaticAscii("locale"),
+ factory->NewStringFromStaticAscii("und"),
NONE,
- kNonStrictMode);
+ SLOPPY).Assert();
}
}
@@ -580,135 +546,136 @@ void SetResolvedCollatorSettings(Isolate* isolate,
const icu::Locale& icu_locale,
icu::Collator* collator,
Handle<JSObject> resolved) {
+ Factory* factory = isolate->factory();
UErrorCode status = U_ZERO_ERROR;
JSObject::SetProperty(
resolved,
- isolate->factory()->NewStringFromAscii(CStrVector("numeric")),
- isolate->factory()->ToBoolean(
+ factory->NewStringFromStaticAscii("numeric"),
+ factory->ToBoolean(
collator->getAttribute(UCOL_NUMERIC_COLLATION, status) == UCOL_ON),
NONE,
- kNonStrictMode);
+ SLOPPY).Assert();
switch (collator->getAttribute(UCOL_CASE_FIRST, status)) {
case UCOL_LOWER_FIRST:
JSObject::SetProperty(
resolved,
- isolate->factory()->NewStringFromAscii(CStrVector("caseFirst")),
- isolate->factory()->NewStringFromAscii(CStrVector("lower")),
+ factory->NewStringFromStaticAscii("caseFirst"),
+ factory->NewStringFromStaticAscii("lower"),
NONE,
- kNonStrictMode);
+ SLOPPY).Assert();
break;
case UCOL_UPPER_FIRST:
JSObject::SetProperty(
resolved,
- isolate->factory()->NewStringFromAscii(CStrVector("caseFirst")),
- isolate->factory()->NewStringFromAscii(CStrVector("upper")),
+ factory->NewStringFromStaticAscii("caseFirst"),
+ factory->NewStringFromStaticAscii("upper"),
NONE,
- kNonStrictMode);
+ SLOPPY).Assert();
break;
default:
JSObject::SetProperty(
resolved,
- isolate->factory()->NewStringFromAscii(CStrVector("caseFirst")),
- isolate->factory()->NewStringFromAscii(CStrVector("false")),
+ factory->NewStringFromStaticAscii("caseFirst"),
+ factory->NewStringFromStaticAscii("false"),
NONE,
- kNonStrictMode);
+ SLOPPY).Assert();
}
switch (collator->getAttribute(UCOL_STRENGTH, status)) {
case UCOL_PRIMARY: {
JSObject::SetProperty(
resolved,
- isolate->factory()->NewStringFromAscii(CStrVector("strength")),
- isolate->factory()->NewStringFromAscii(CStrVector("primary")),
+ factory->NewStringFromStaticAscii("strength"),
+ factory->NewStringFromStaticAscii("primary"),
NONE,
- kNonStrictMode);
+ SLOPPY).Assert();
// case level: true + s1 -> case, s1 -> base.
if (UCOL_ON == collator->getAttribute(UCOL_CASE_LEVEL, status)) {
JSObject::SetProperty(
resolved,
- isolate->factory()->NewStringFromAscii(CStrVector("sensitivity")),
- isolate->factory()->NewStringFromAscii(CStrVector("case")),
+ factory->NewStringFromStaticAscii("sensitivity"),
+ factory->NewStringFromStaticAscii("case"),
NONE,
- kNonStrictMode);
+ SLOPPY).Assert();
} else {
JSObject::SetProperty(
resolved,
- isolate->factory()->NewStringFromAscii(CStrVector("sensitivity")),
- isolate->factory()->NewStringFromAscii(CStrVector("base")),
+ factory->NewStringFromStaticAscii("sensitivity"),
+ factory->NewStringFromStaticAscii("base"),
NONE,
- kNonStrictMode);
+ SLOPPY).Assert();
}
break;
}
case UCOL_SECONDARY:
JSObject::SetProperty(
resolved,
- isolate->factory()->NewStringFromAscii(CStrVector("strength")),
- isolate->factory()->NewStringFromAscii(CStrVector("secondary")),
+ factory->NewStringFromStaticAscii("strength"),
+ factory->NewStringFromStaticAscii("secondary"),
NONE,
- kNonStrictMode);
+ SLOPPY).Assert();
JSObject::SetProperty(
resolved,
- isolate->factory()->NewStringFromAscii(CStrVector("sensitivity")),
- isolate->factory()->NewStringFromAscii(CStrVector("accent")),
+ factory->NewStringFromStaticAscii("sensitivity"),
+ factory->NewStringFromStaticAscii("accent"),
NONE,
- kNonStrictMode);
+ SLOPPY).Assert();
break;
case UCOL_TERTIARY:
JSObject::SetProperty(
resolved,
- isolate->factory()->NewStringFromAscii(CStrVector("strength")),
- isolate->factory()->NewStringFromAscii(CStrVector("tertiary")),
+ factory->NewStringFromStaticAscii("strength"),
+ factory->NewStringFromStaticAscii("tertiary"),
NONE,
- kNonStrictMode);
+ SLOPPY).Assert();
JSObject::SetProperty(
resolved,
- isolate->factory()->NewStringFromAscii(CStrVector("sensitivity")),
- isolate->factory()->NewStringFromAscii(CStrVector("variant")),
+ factory->NewStringFromStaticAscii("sensitivity"),
+ factory->NewStringFromStaticAscii("variant"),
NONE,
- kNonStrictMode);
+ SLOPPY).Assert();
break;
case UCOL_QUATERNARY:
// We shouldn't get quaternary and identical from ICU, but if we do
// put them into variant.
JSObject::SetProperty(
resolved,
- isolate->factory()->NewStringFromAscii(CStrVector("strength")),
- isolate->factory()->NewStringFromAscii(CStrVector("quaternary")),
+ factory->NewStringFromStaticAscii("strength"),
+ factory->NewStringFromStaticAscii("quaternary"),
NONE,
- kNonStrictMode);
+ SLOPPY).Assert();
JSObject::SetProperty(
resolved,
- isolate->factory()->NewStringFromAscii(CStrVector("sensitivity")),
- isolate->factory()->NewStringFromAscii(CStrVector("variant")),
+ factory->NewStringFromStaticAscii("sensitivity"),
+ factory->NewStringFromStaticAscii("variant"),
NONE,
- kNonStrictMode);
+ SLOPPY).Assert();
break;
default:
JSObject::SetProperty(
resolved,
- isolate->factory()->NewStringFromAscii(CStrVector("strength")),
- isolate->factory()->NewStringFromAscii(CStrVector("identical")),
+ factory->NewStringFromStaticAscii("strength"),
+ factory->NewStringFromStaticAscii("identical"),
NONE,
- kNonStrictMode);
+ SLOPPY).Assert();
JSObject::SetProperty(
resolved,
- isolate->factory()->NewStringFromAscii(CStrVector("sensitivity")),
- isolate->factory()->NewStringFromAscii(CStrVector("variant")),
+ factory->NewStringFromStaticAscii("sensitivity"),
+ factory->NewStringFromStaticAscii("variant"),
NONE,
- kNonStrictMode);
+ SLOPPY).Assert();
}
JSObject::SetProperty(
resolved,
- isolate->factory()->NewStringFromAscii(CStrVector("ignorePunctuation")),
- isolate->factory()->ToBoolean(collator->getAttribute(
+ factory->NewStringFromStaticAscii("ignorePunctuation"),
+ factory->ToBoolean(collator->getAttribute(
UCOL_ALTERNATE_HANDLING, status) == UCOL_SHIFTED),
NONE,
- kNonStrictMode);
+ SLOPPY).Assert();
// Set the locale
char result[ULOC_FULLNAME_CAPACITY];
@@ -718,18 +685,18 @@ void SetResolvedCollatorSettings(Isolate* isolate,
if (U_SUCCESS(status)) {
JSObject::SetProperty(
resolved,
- isolate->factory()->NewStringFromAscii(CStrVector("locale")),
- isolate->factory()->NewStringFromAscii(CStrVector(result)),
+ factory->NewStringFromStaticAscii("locale"),
+ factory->NewStringFromAsciiChecked(result),
NONE,
- kNonStrictMode);
+ SLOPPY).Assert();
} else {
// This would never happen, since we got the locale from ICU.
JSObject::SetProperty(
resolved,
- isolate->factory()->NewStringFromAscii(CStrVector("locale")),
- isolate->factory()->NewStringFromAscii(CStrVector("und")),
+ factory->NewStringFromStaticAscii("locale"),
+ factory->NewStringFromStaticAscii("und"),
NONE,
- kNonStrictMode);
+ SLOPPY).Assert();
}
}
@@ -771,6 +738,7 @@ void SetResolvedBreakIteratorSettings(Isolate* isolate,
const icu::Locale& icu_locale,
icu::BreakIterator* break_iterator,
Handle<JSObject> resolved) {
+ Factory* factory = isolate->factory();
UErrorCode status = U_ZERO_ERROR;
// Set the locale
@@ -781,18 +749,18 @@ void SetResolvedBreakIteratorSettings(Isolate* isolate,
if (U_SUCCESS(status)) {
JSObject::SetProperty(
resolved,
- isolate->factory()->NewStringFromAscii(CStrVector("locale")),
- isolate->factory()->NewStringFromAscii(CStrVector(result)),
+ factory->NewStringFromStaticAscii("locale"),
+ factory->NewStringFromAsciiChecked(result),
NONE,
- kNonStrictMode);
+ SLOPPY).Assert();
} else {
// This would never happen, since we got the locale from ICU.
JSObject::SetProperty(
resolved,
- isolate->factory()->NewStringFromAscii(CStrVector("locale")),
- isolate->factory()->NewStringFromAscii(CStrVector("und")),
+ factory->NewStringFromStaticAscii("locale"),
+ factory->NewStringFromStaticAscii("und"),
NONE,
- kNonStrictMode);
+ SLOPPY).Assert();
}
}
@@ -854,8 +822,8 @@ icu::SimpleDateFormat* DateFormat::UnpackDateFormat(
Isolate* isolate,
Handle<JSObject> obj) {
Handle<String> key =
- isolate->factory()->NewStringFromAscii(CStrVector("dateFormat"));
- if (JSReceiver::HasLocalProperty(obj, key)) {
+ isolate->factory()->NewStringFromStaticAscii("dateFormat");
+ if (JSReceiver::HasOwnProperty(obj, key)) {
return reinterpret_cast<icu::SimpleDateFormat*>(
obj->GetInternalField(0));
}
@@ -864,15 +832,24 @@ icu::SimpleDateFormat* DateFormat::UnpackDateFormat(
}
-void DateFormat::DeleteDateFormat(v8::Isolate* isolate,
- Persistent<v8::Value>* object,
- void* param) {
- // First delete the hidden C++ object.
- delete reinterpret_cast<icu::SimpleDateFormat*>(Handle<JSObject>::cast(
- v8::Utils::OpenPersistent(object))->GetInternalField(0));
+template<class T>
+void DeleteNativeObjectAt(const v8::WeakCallbackData<v8::Value, void>& data,
+ int index) {
+ v8::Local<v8::Object> obj = v8::Handle<v8::Object>::Cast(data.GetValue());
+ delete reinterpret_cast<T*>(obj->GetAlignedPointerFromInternalField(index));
+}
+
+
+static void DestroyGlobalHandle(
+ const v8::WeakCallbackData<v8::Value, void>& data) {
+ GlobalHandles::Destroy(reinterpret_cast<Object**>(data.GetParameter()));
+}
+
- // Then dispose of the persistent handle to JS object.
- object->Reset();
+void DateFormat::DeleteDateFormat(
+ const v8::WeakCallbackData<v8::Value, void>& data) {
+ DeleteNativeObjectAt<icu::SimpleDateFormat>(data, 0);
+ DestroyGlobalHandle(data);
}
@@ -919,8 +896,8 @@ icu::DecimalFormat* NumberFormat::UnpackNumberFormat(
Isolate* isolate,
Handle<JSObject> obj) {
Handle<String> key =
- isolate->factory()->NewStringFromAscii(CStrVector("numberFormat"));
- if (JSReceiver::HasLocalProperty(obj, key)) {
+ isolate->factory()->NewStringFromStaticAscii("numberFormat");
+ if (JSReceiver::HasOwnProperty(obj, key)) {
return reinterpret_cast<icu::DecimalFormat*>(obj->GetInternalField(0));
}
@@ -928,15 +905,10 @@ icu::DecimalFormat* NumberFormat::UnpackNumberFormat(
}
-void NumberFormat::DeleteNumberFormat(v8::Isolate* isolate,
- Persistent<v8::Value>* object,
- void* param) {
- // First delete the hidden C++ object.
- delete reinterpret_cast<icu::DecimalFormat*>(Handle<JSObject>::cast(
- v8::Utils::OpenPersistent(object))->GetInternalField(0));
-
- // Then dispose of the persistent handle to JS object.
- object->Reset();
+void NumberFormat::DeleteNumberFormat(
+ const v8::WeakCallbackData<v8::Value, void>& data) {
+ DeleteNativeObjectAt<icu::DecimalFormat>(data, 0);
+ DestroyGlobalHandle(data);
}
@@ -979,9 +951,8 @@ icu::Collator* Collator::InitializeCollator(
icu::Collator* Collator::UnpackCollator(Isolate* isolate,
Handle<JSObject> obj) {
- Handle<String> key =
- isolate->factory()->NewStringFromAscii(CStrVector("collator"));
- if (JSReceiver::HasLocalProperty(obj, key)) {
+ Handle<String> key = isolate->factory()->NewStringFromStaticAscii("collator");
+ if (JSReceiver::HasOwnProperty(obj, key)) {
return reinterpret_cast<icu::Collator*>(obj->GetInternalField(0));
}
@@ -989,15 +960,10 @@ icu::Collator* Collator::UnpackCollator(Isolate* isolate,
}
-void Collator::DeleteCollator(v8::Isolate* isolate,
- Persistent<v8::Value>* object,
- void* param) {
- // First delete the hidden C++ object.
- delete reinterpret_cast<icu::Collator*>(Handle<JSObject>::cast(
- v8::Utils::OpenPersistent(object))->GetInternalField(0));
-
- // Then dispose of the persistent handle to JS object.
- object->Reset();
+void Collator::DeleteCollator(
+ const v8::WeakCallbackData<v8::Value, void>& data) {
+ DeleteNativeObjectAt<icu::Collator>(data, 0);
+ DestroyGlobalHandle(data);
}
@@ -1044,8 +1010,8 @@ icu::BreakIterator* BreakIterator::InitializeBreakIterator(
icu::BreakIterator* BreakIterator::UnpackBreakIterator(Isolate* isolate,
Handle<JSObject> obj) {
Handle<String> key =
- isolate->factory()->NewStringFromAscii(CStrVector("breakIterator"));
- if (JSReceiver::HasLocalProperty(obj, key)) {
+ isolate->factory()->NewStringFromStaticAscii("breakIterator");
+ if (JSReceiver::HasOwnProperty(obj, key)) {
return reinterpret_cast<icu::BreakIterator*>(obj->GetInternalField(0));
}
@@ -1053,18 +1019,11 @@ icu::BreakIterator* BreakIterator::UnpackBreakIterator(Isolate* isolate,
}
-void BreakIterator::DeleteBreakIterator(v8::Isolate* isolate,
- Persistent<v8::Value>* object,
- void* param) {
- // First delete the hidden C++ object.
- delete reinterpret_cast<icu::BreakIterator*>(Handle<JSObject>::cast(
- v8::Utils::OpenPersistent(object))->GetInternalField(0));
-
- delete reinterpret_cast<icu::UnicodeString*>(Handle<JSObject>::cast(
- v8::Utils::OpenPersistent(object))->GetInternalField(1));
-
- // Then dispose of the persistent handle to JS object.
- object->Reset();
+void BreakIterator::DeleteBreakIterator(
+ const v8::WeakCallbackData<v8::Value, void>& data) {
+ DeleteNativeObjectAt<icu::BreakIterator>(data, 0);
+ DeleteNativeObjectAt<icu::UnicodeString>(data, 1);
+ DestroyGlobalHandle(data);
}
} } // namespace v8::internal
diff --git a/chromium/v8/src/i18n.h b/chromium/v8/src/i18n.h
index 08e7f2b7137..5a195eb49fe 100644
--- a/chromium/v8/src/i18n.h
+++ b/chromium/v8/src/i18n.h
@@ -1,36 +1,13 @@
// Copyright 2013 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
// limitations under the License.
#ifndef V8_I18N_H_
#define V8_I18N_H_
#include "unicode/uversion.h"
-#include "v8.h"
+#include "src/v8.h"
namespace U_ICU_NAMESPACE {
class BreakIterator;
@@ -71,9 +48,9 @@ class DateFormat {
// Release memory we allocated for the DateFormat once the JS object that
// holds the pointer gets garbage collected.
- static void DeleteDateFormat(v8::Isolate* isolate,
- Persistent<v8::Value>* object,
- void* param);
+ static void DeleteDateFormat(
+ const v8::WeakCallbackData<v8::Value, void>& data);
+
private:
DateFormat();
};
@@ -95,9 +72,9 @@ class NumberFormat {
// Release memory we allocated for the NumberFormat once the JS object that
// holds the pointer gets garbage collected.
- static void DeleteNumberFormat(v8::Isolate* isolate,
- Persistent<v8::Value>* object,
- void* param);
+ static void DeleteNumberFormat(
+ const v8::WeakCallbackData<v8::Value, void>& data);
+
private:
NumberFormat();
};
@@ -118,9 +95,9 @@ class Collator {
// Release memory we allocated for the Collator once the JS object that holds
// the pointer gets garbage collected.
- static void DeleteCollator(v8::Isolate* isolate,
- Persistent<v8::Value>* object,
- void* param);
+ static void DeleteCollator(
+ const v8::WeakCallbackData<v8::Value, void>& data);
+
private:
Collator();
};
@@ -141,9 +118,8 @@ class BreakIterator {
// Release memory we allocated for the BreakIterator once the JS object that
// holds the pointer gets garbage collected.
- static void DeleteBreakIterator(v8::Isolate* isolate,
- Persistent<v8::Value>* object,
- void* param);
+ static void DeleteBreakIterator(
+ const v8::WeakCallbackData<v8::Value, void>& data);
private:
BreakIterator();
diff --git a/chromium/v8/src/i18n.js b/chromium/v8/src/i18n.js
index 6b563a00f91..076845bb111 100644
--- a/chromium/v8/src/i18n.js
+++ b/chromium/v8/src/i18n.js
@@ -1,30 +1,8 @@
// Copyright 2013 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// limitations under the License.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+"use strict";
// ECMAScript 402 API implementation.
@@ -34,8 +12,6 @@
*/
$Object.defineProperty(global, "Intl", { enumerable: false, value: (function() {
-'use strict';
-
var Intl = {};
var undefined = global.undefined;
@@ -45,6 +21,11 @@ var AVAILABLE_SERVICES = ['collator',
'dateformat',
'breakiterator'];
+var NORMALIZATION_FORMS = ['NFC',
+ 'NFD',
+ 'NFKC',
+ 'NFKD'];
+
/**
* Caches available locales for each service.
*/
@@ -227,8 +208,7 @@ var ORDINARY_FUNCTION_CALLED_AS_CONSTRUCTOR =
*/
function addBoundMethod(obj, methodName, implementation, length) {
function getter() {
- if (!this || typeof this !== 'object' ||
- this.__initializedIntlObject === undefined) {
+ if (!%IsInitializedIntlObject(this)) {
throw new $TypeError('Method ' + methodName + ' called on a ' +
'non-object or on a wrong type of object.');
}
@@ -277,7 +257,7 @@ function addBoundMethod(obj, methodName, implementation, length) {
%FunctionRemovePrototype(getter);
%SetNativeFlag(getter);
- $Object.defineProperty(obj.prototype, methodName, {
+ ObjectDefineProperty(obj.prototype, methodName, {
get: getter,
enumerable: false,
configurable: true
@@ -298,7 +278,7 @@ function supportedLocalesOf(service, locales, options) {
if (options === undefined) {
options = {};
} else {
- options = toObject(options);
+ options = ToObject(options);
}
var matcher = options.localeMatcher;
@@ -532,18 +512,6 @@ function parseExtension(extension) {
/**
- * Converts parameter to an Object if possible.
- */
-function toObject(value) {
- if (IS_NULL_OR_UNDEFINED(value)) {
- throw new $TypeError('Value cannot be converted to an Object.');
- }
-
- return $Object(value);
-}
-
-
-/**
* Populates internalOptions object with boolean key-value pairs
* from extensionMap and options.
* Returns filtered extension (number and date format constructors use
@@ -612,15 +580,14 @@ function setOptions(inOptions, extensionMap, keyValues, getOption, outOptions) {
*/
function freezeArray(array) {
array.forEach(function(element, index) {
- $Object.defineProperty(array, index, {value: element,
+ ObjectDefineProperty(array, index, {value: element,
configurable: false,
writable: false,
enumerable: true});
});
- $Object.defineProperty(array, 'length', {value: array.length,
- writable: false});
-
+ ObjectDefineProperty(array, 'length', {value: array.length,
+ writable: false});
return array;
}
@@ -681,8 +648,8 @@ function getAvailableLocalesOf(service) {
* Configurable is false by default.
*/
function defineWEProperty(object, property, value) {
- $Object.defineProperty(object, property,
- {value: value, writable: true, enumerable: true});
+ ObjectDefineProperty(object, property,
+ {value: value, writable: true, enumerable: true});
}
@@ -701,11 +668,11 @@ function addWEPropertyIfDefined(object, property, value) {
* Defines a property and sets writable, enumerable and configurable to true.
*/
function defineWECProperty(object, property, value) {
- $Object.defineProperty(object, property,
- {value: value,
- writable: true,
- enumerable: true,
- configurable: true});
+ ObjectDefineProperty(object, property,
+ {value: value,
+ writable: true,
+ enumerable: true,
+ configurable: true});
}
@@ -772,7 +739,7 @@ function initializeLocaleList(locales) {
return freezeArray(seen);
}
- var o = toObject(locales);
+ var o = ToObject(locales);
// Converts it to UInt32 (>>> is shr on 32bit integers).
var len = o.length >>> 0;
@@ -891,7 +858,7 @@ function BuildLanguageTagREs() {
* Useful for subclassing.
*/
function initializeCollator(collator, locales, options) {
- if (collator.hasOwnProperty('__initializedIntlObject')) {
+ if (%IsInitializedIntlObject(collator)) {
throw new $TypeError('Trying to re-initialize Collator object.');
}
@@ -944,8 +911,8 @@ function initializeCollator(collator, locales, options) {
// We define all properties C++ code may produce, to prevent security
// problems. If malicious user decides to redefine Object.prototype.locale
// we can't just use plain x.locale = 'us' or in C++ Set("locale", "us").
- // Object.defineProperties will either succeed defining or throw an error.
- var resolved = $Object.defineProperties({}, {
+ // ObjectDefineProperties will either succeed defining or throw an error.
+ var resolved = ObjectDefineProperties({}, {
caseFirst: {writable: true},
collation: {value: internalOptions.collation, writable: true},
ignorePunctuation: {writable: true},
@@ -962,10 +929,8 @@ function initializeCollator(collator, locales, options) {
resolved);
// Writable, configurable and enumerable are set to false by default.
- $Object.defineProperty(collator, 'collator', {value: internalCollator});
- $Object.defineProperty(collator, '__initializedIntlObject',
- {value: 'collator'});
- $Object.defineProperty(collator, 'resolved', {value: resolved});
+ %MarkAsInitializedIntlObjectOfType(collator, 'collator', internalCollator);
+ ObjectDefineProperty(collator, 'resolved', {value: resolved});
return collator;
}
@@ -986,7 +951,7 @@ function initializeCollator(collator, locales, options) {
return new Intl.Collator(locales, options);
}
- return initializeCollator(toObject(this), locales, options);
+ return initializeCollator(ToObject(this), locales, options);
},
DONT_ENUM
);
@@ -1000,8 +965,7 @@ function initializeCollator(collator, locales, options) {
throw new $TypeError(ORDINARY_FUNCTION_CALLED_AS_CONSTRUCTOR);
}
- if (!this || typeof this !== 'object' ||
- this.__initializedIntlObject !== 'collator') {
+ if (!%IsInitializedIntlObjectOfType(this, 'collator')) {
throw new $TypeError('resolvedOptions method called on a non-object ' +
'or on a object that is not Intl.Collator.');
}
@@ -1058,7 +1022,8 @@ function initializeCollator(collator, locales, options) {
* the sort order, or x comes after y in the sort order, respectively.
*/
function compare(collator, x, y) {
- return %InternalCompare(collator.collator, $String(x), $String(y));
+ return %InternalCompare(%GetImplFromInitializedIntlObject(collator),
+ $String(x), $String(y));
};
@@ -1099,7 +1064,7 @@ function getNumberOption(options, property, min, max, fallback) {
* Useful for subclassing.
*/
function initializeNumberFormat(numberFormat, locales, options) {
- if (numberFormat.hasOwnProperty('__initializedIntlObject')) {
+ if (%IsInitializedIntlObject(numberFormat)) {
throw new $TypeError('Trying to re-initialize NumberFormat object.');
}
@@ -1162,7 +1127,7 @@ function initializeNumberFormat(numberFormat, locales, options) {
getOption, internalOptions);
var requestedLocale = locale.locale + extension;
- var resolved = $Object.defineProperties({}, {
+ var resolved = ObjectDefineProperties({}, {
currency: {writable: true},
currencyDisplay: {writable: true},
locale: {writable: true},
@@ -1187,14 +1152,12 @@ function initializeNumberFormat(numberFormat, locales, options) {
// We can't get information about number or currency style from ICU, so we
// assume user request was fulfilled.
if (internalOptions.style === 'currency') {
- $Object.defineProperty(resolved, 'currencyDisplay', {value: currencyDisplay,
- writable: true});
+ ObjectDefineProperty(resolved, 'currencyDisplay', {value: currencyDisplay,
+ writable: true});
}
- $Object.defineProperty(numberFormat, 'formatter', {value: formatter});
- $Object.defineProperty(numberFormat, 'resolved', {value: resolved});
- $Object.defineProperty(numberFormat, '__initializedIntlObject',
- {value: 'numberformat'});
+ %MarkAsInitializedIntlObjectOfType(numberFormat, 'numberformat', formatter);
+ ObjectDefineProperty(numberFormat, 'resolved', {value: resolved});
return numberFormat;
}
@@ -1215,7 +1178,7 @@ function initializeNumberFormat(numberFormat, locales, options) {
return new Intl.NumberFormat(locales, options);
}
- return initializeNumberFormat(toObject(this), locales, options);
+ return initializeNumberFormat(ToObject(this), locales, options);
},
DONT_ENUM
);
@@ -1229,8 +1192,7 @@ function initializeNumberFormat(numberFormat, locales, options) {
throw new $TypeError(ORDINARY_FUNCTION_CALLED_AS_CONSTRUCTOR);
}
- if (!this || typeof this !== 'object' ||
- this.__initializedIntlObject !== 'numberformat') {
+ if (!%IsInitializedIntlObjectOfType(this, 'numberformat')) {
throw new $TypeError('resolvedOptions method called on a non-object' +
' or on a object that is not Intl.NumberFormat.');
}
@@ -1304,7 +1266,8 @@ function formatNumber(formatter, value) {
// Spec treats -0 and +0 as 0.
var number = $Number(value) + 0;
- return %InternalNumberFormat(formatter.formatter, number);
+ return %InternalNumberFormat(%GetImplFromInitializedIntlObject(formatter),
+ number);
}
@@ -1312,7 +1275,8 @@ function formatNumber(formatter, value) {
* Returns a Number that represents string value that was passed in.
*/
function parseNumber(formatter, value) {
- return %InternalNumberParse(formatter.formatter, $String(value));
+ return %InternalNumberParse(%GetImplFromInitializedIntlObject(formatter),
+ $String(value));
}
@@ -1465,13 +1429,11 @@ function appendToDateTimeObject(options, option, match, pairs) {
*/
function toDateTimeOptions(options, required, defaults) {
if (options === undefined) {
- options = null;
+ options = {};
} else {
- options = toObject(options);
+ options = TO_OBJECT_INLINE(options);
}
- options = $Object.apply(this, [options]);
-
var needsDefault = true;
if ((required === 'date' || required === 'any') &&
(options.weekday !== undefined || options.year !== undefined ||
@@ -1486,30 +1448,30 @@ function toDateTimeOptions(options, required, defaults) {
}
if (needsDefault && (defaults === 'date' || defaults === 'all')) {
- $Object.defineProperty(options, 'year', {value: 'numeric',
- writable: true,
- enumerable: true,
- configurable: true});
- $Object.defineProperty(options, 'month', {value: 'numeric',
- writable: true,
- enumerable: true,
- configurable: true});
- $Object.defineProperty(options, 'day', {value: 'numeric',
+ ObjectDefineProperty(options, 'year', {value: 'numeric',
+ writable: true,
+ enumerable: true,
+ configurable: true});
+ ObjectDefineProperty(options, 'month', {value: 'numeric',
writable: true,
enumerable: true,
configurable: true});
+ ObjectDefineProperty(options, 'day', {value: 'numeric',
+ writable: true,
+ enumerable: true,
+ configurable: true});
}
if (needsDefault && (defaults === 'time' || defaults === 'all')) {
- $Object.defineProperty(options, 'hour', {value: 'numeric',
+ ObjectDefineProperty(options, 'hour', {value: 'numeric',
writable: true,
enumerable: true,
configurable: true});
- $Object.defineProperty(options, 'minute', {value: 'numeric',
+ ObjectDefineProperty(options, 'minute', {value: 'numeric',
writable: true,
enumerable: true,
configurable: true});
- $Object.defineProperty(options, 'second', {value: 'numeric',
+ ObjectDefineProperty(options, 'second', {value: 'numeric',
writable: true,
enumerable: true,
configurable: true});
@@ -1525,7 +1487,7 @@ function toDateTimeOptions(options, required, defaults) {
*/
function initializeDateTimeFormat(dateFormat, locales, options) {
- if (dateFormat.hasOwnProperty('__initializedIntlObject')) {
+ if (%IsInitializedIntlObject(dateFormat)) {
throw new $TypeError('Trying to re-initialize DateTimeFormat object.');
}
@@ -1560,7 +1522,7 @@ function initializeDateTimeFormat(dateFormat, locales, options) {
getOption, internalOptions);
var requestedLocale = locale.locale + extension;
- var resolved = $Object.defineProperties({}, {
+ var resolved = ObjectDefineProperties({}, {
calendar: {writable: true},
day: {writable: true},
era: {writable: true},
@@ -1587,10 +1549,8 @@ function initializeDateTimeFormat(dateFormat, locales, options) {
throw new $RangeError('Unsupported time zone specified ' + tz);
}
- $Object.defineProperty(dateFormat, 'formatter', {value: formatter});
- $Object.defineProperty(dateFormat, 'resolved', {value: resolved});
- $Object.defineProperty(dateFormat, '__initializedIntlObject',
- {value: 'dateformat'});
+ %MarkAsInitializedIntlObjectOfType(dateFormat, 'dateformat', formatter);
+ ObjectDefineProperty(dateFormat, 'resolved', {value: resolved});
return dateFormat;
}
@@ -1611,7 +1571,7 @@ function initializeDateTimeFormat(dateFormat, locales, options) {
return new Intl.DateTimeFormat(locales, options);
}
- return initializeDateTimeFormat(toObject(this), locales, options);
+ return initializeDateTimeFormat(ToObject(this), locales, options);
},
DONT_ENUM
);
@@ -1625,8 +1585,7 @@ function initializeDateTimeFormat(dateFormat, locales, options) {
throw new $TypeError(ORDINARY_FUNCTION_CALLED_AS_CONSTRUCTOR);
}
- if (!this || typeof this !== 'object' ||
- this.__initializedIntlObject !== 'dateformat') {
+ if (!%IsInitializedIntlObjectOfType(this, 'dateformat')) {
throw new $TypeError('resolvedOptions method called on a non-object or ' +
'on a object that is not Intl.DateTimeFormat.');
}
@@ -1708,7 +1667,8 @@ function formatDate(formatter, dateValue) {
throw new $RangeError('Provided date is not in valid range.');
}
- return %InternalDateFormat(formatter.formatter, new $Date(dateMs));
+ return %InternalDateFormat(%GetImplFromInitializedIntlObject(formatter),
+ new $Date(dateMs));
}
@@ -1719,7 +1679,8 @@ function formatDate(formatter, dateValue) {
* Returns undefined if date string cannot be parsed.
*/
function parseDate(formatter, value) {
- return %InternalDateParse(formatter.formatter, $String(value));
+ return %InternalDateParse(%GetImplFromInitializedIntlObject(formatter),
+ $String(value));
}
@@ -1767,7 +1728,7 @@ function canonicalizeTimeZoneID(tzID) {
* Useful for subclassing.
*/
function initializeBreakIterator(iterator, locales, options) {
- if (iterator.hasOwnProperty('__initializedIntlObject')) {
+ if (%IsInitializedIntlObject(iterator)) {
throw new $TypeError('Trying to re-initialize v8BreakIterator object.');
}
@@ -1783,7 +1744,7 @@ function initializeBreakIterator(iterator, locales, options) {
'type', 'string', ['character', 'word', 'sentence', 'line'], 'word'));
var locale = resolveLocale('breakiterator', locales, options);
- var resolved = $Object.defineProperties({}, {
+ var resolved = ObjectDefineProperties({}, {
requestedLocale: {value: locale.locale, writable: true},
type: {value: internalOptions.type, writable: true},
locale: {writable: true}
@@ -1793,10 +1754,9 @@ function initializeBreakIterator(iterator, locales, options) {
internalOptions,
resolved);
- $Object.defineProperty(iterator, 'iterator', {value: internalIterator});
- $Object.defineProperty(iterator, 'resolved', {value: resolved});
- $Object.defineProperty(iterator, '__initializedIntlObject',
- {value: 'breakiterator'});
+ %MarkAsInitializedIntlObjectOfType(iterator, 'breakiterator',
+ internalIterator);
+ ObjectDefineProperty(iterator, 'resolved', {value: resolved});
return iterator;
}
@@ -1817,7 +1777,7 @@ function initializeBreakIterator(iterator, locales, options) {
return new Intl.v8BreakIterator(locales, options);
}
- return initializeBreakIterator(toObject(this), locales, options);
+ return initializeBreakIterator(ToObject(this), locales, options);
},
DONT_ENUM
);
@@ -1831,8 +1791,7 @@ function initializeBreakIterator(iterator, locales, options) {
throw new $TypeError(ORDINARY_FUNCTION_CALLED_AS_CONSTRUCTOR);
}
- if (!this || typeof this !== 'object' ||
- this.__initializedIntlObject !== 'breakiterator') {
+ if (!%IsInitializedIntlObjectOfType(this, 'breakiterator')) {
throw new $TypeError('resolvedOptions method called on a non-object or ' +
'on a object that is not Intl.v8BreakIterator.');
}
@@ -1879,7 +1838,8 @@ function initializeBreakIterator(iterator, locales, options) {
* gets discarded.
*/
function adoptText(iterator, text) {
- %BreakIteratorAdoptText(iterator.iterator, $String(text));
+ %BreakIteratorAdoptText(%GetImplFromInitializedIntlObject(iterator),
+ $String(text));
}
@@ -1887,7 +1847,7 @@ function adoptText(iterator, text) {
* Returns index of the first break in the string and moves current pointer.
*/
function first(iterator) {
- return %BreakIteratorFirst(iterator.iterator);
+ return %BreakIteratorFirst(%GetImplFromInitializedIntlObject(iterator));
}
@@ -1895,7 +1855,7 @@ function first(iterator) {
* Returns the index of the next break and moves the pointer.
*/
function next(iterator) {
- return %BreakIteratorNext(iterator.iterator);
+ return %BreakIteratorNext(%GetImplFromInitializedIntlObject(iterator));
}
@@ -1903,7 +1863,7 @@ function next(iterator) {
* Returns index of the current break.
*/
function current(iterator) {
- return %BreakIteratorCurrent(iterator.iterator);
+ return %BreakIteratorCurrent(%GetImplFromInitializedIntlObject(iterator));
}
@@ -1911,7 +1871,7 @@ function current(iterator) {
* Returns type of the current break.
*/
function breakType(iterator) {
- return %BreakIteratorBreakType(iterator.iterator);
+ return %BreakIteratorBreakType(%GetImplFromInitializedIntlObject(iterator));
}
@@ -1962,7 +1922,7 @@ function cachedOrNewService(service, locales, options, defaults) {
* Compares this and that, and returns less than 0, 0 or greater than 0 value.
* Overrides the built-in method.
*/
-$Object.defineProperty($String.prototype, 'localeCompare', {
+ObjectDefineProperty($String.prototype, 'localeCompare', {
value: function(that) {
if (%_IsConstructCall()) {
throw new $TypeError(ORDINARY_FUNCTION_CALLED_AS_CONSTRUCTOR);
@@ -1987,10 +1947,44 @@ $Object.defineProperty($String.prototype, 'localeCompare', {
/**
+ * Unicode normalization. This method is called with one argument that
+ * specifies the normalization form.
+ * If none is specified, "NFC" is assumed.
+ * If the form is not one of "NFC", "NFD", "NFKC", or "NFKD", then throw
+ * a RangeError Exception.
+ */
+ObjectDefineProperty($String.prototype, 'normalize', {
+ value: function(that) {
+ if (%_IsConstructCall()) {
+ throw new $TypeError(ORDINARY_FUNCTION_CALLED_AS_CONSTRUCTOR);
+ }
+
+ CHECK_OBJECT_COERCIBLE(this, "String.prototype.normalize");
+
+ var form = $String(%_Arguments(0) || 'NFC');
+
+ var normalizationForm = NORMALIZATION_FORMS.indexOf(form);
+ if (normalizationForm === -1) {
+ throw new $RangeError('The normalization form should be one of '
+ + NORMALIZATION_FORMS.join(', ') + '.');
+ }
+
+ return %StringNormalize(this, normalizationForm);
+ },
+ writable: true,
+ configurable: true,
+ enumerable: false
+});
+%FunctionSetName($String.prototype.normalize, 'normalize');
+%FunctionRemovePrototype($String.prototype.normalize);
+%SetNativeFlag($String.prototype.normalize);
+
+
+/**
* Formats a Number object (this) using locale and options values.
* If locale or options are omitted, defaults are used.
*/
-$Object.defineProperty($Number.prototype, 'toLocaleString', {
+ObjectDefineProperty($Number.prototype, 'toLocaleString', {
value: function() {
if (%_IsConstructCall()) {
throw new $TypeError(ORDINARY_FUNCTION_CALLED_AS_CONSTRUCTOR);
@@ -2040,7 +2034,7 @@ function toLocaleDateTime(date, locales, options, required, defaults, service) {
* If locale or options are omitted, defaults are used - both date and time are
* present in the output.
*/
-$Object.defineProperty($Date.prototype, 'toLocaleString', {
+ObjectDefineProperty($Date.prototype, 'toLocaleString', {
value: function() {
if (%_IsConstructCall()) {
throw new $TypeError(ORDINARY_FUNCTION_CALLED_AS_CONSTRUCTOR);
@@ -2065,7 +2059,7 @@ $Object.defineProperty($Date.prototype, 'toLocaleString', {
* If locale or options are omitted, defaults are used - only date is present
* in the output.
*/
-$Object.defineProperty($Date.prototype, 'toLocaleDateString', {
+ObjectDefineProperty($Date.prototype, 'toLocaleDateString', {
value: function() {
if (%_IsConstructCall()) {
throw new $TypeError(ORDINARY_FUNCTION_CALLED_AS_CONSTRUCTOR);
@@ -2090,7 +2084,7 @@ $Object.defineProperty($Date.prototype, 'toLocaleDateString', {
* If locale or options are omitted, defaults are used - only time is present
* in the output.
*/
-$Object.defineProperty($Date.prototype, 'toLocaleTimeString', {
+ObjectDefineProperty($Date.prototype, 'toLocaleTimeString', {
value: function() {
if (%_IsConstructCall()) {
throw new $TypeError(ORDINARY_FUNCTION_CALLED_AS_CONSTRUCTOR);
diff --git a/chromium/v8/src/ia32/assembler-ia32-inl.h b/chromium/v8/src/ia32/assembler-ia32-inl.h
index ee5d991e38a..422e1fdd734 100644
--- a/chromium/v8/src/ia32/assembler-ia32-inl.h
+++ b/chromium/v8/src/ia32/assembler-ia32-inl.h
@@ -37,55 +37,58 @@
#ifndef V8_IA32_ASSEMBLER_IA32_INL_H_
#define V8_IA32_ASSEMBLER_IA32_INL_H_
-#include "ia32/assembler-ia32.h"
+#include "src/ia32/assembler-ia32.h"
-#include "cpu.h"
-#include "debug.h"
+#include "src/cpu.h"
+#include "src/debug.h"
namespace v8 {
namespace internal {
+bool CpuFeatures::SupportsCrankshaft() { return true; }
+
static const byte kCallOpcode = 0xE8;
static const int kNoCodeAgeSequenceLength = 5;
// The modes possibly affected by apply must be in kApplyMask.
-void RelocInfo::apply(intptr_t delta) {
+void RelocInfo::apply(intptr_t delta, ICacheFlushMode icache_flush_mode) {
+ bool flush_icache = icache_flush_mode != SKIP_ICACHE_FLUSH;
if (IsRuntimeEntry(rmode_) || IsCodeTarget(rmode_)) {
int32_t* p = reinterpret_cast<int32_t*>(pc_);
*p -= delta; // Relocate entry.
- CPU::FlushICache(p, sizeof(uint32_t));
+ if (flush_icache) CPU::FlushICache(p, sizeof(uint32_t));
} else if (rmode_ == CODE_AGE_SEQUENCE) {
if (*pc_ == kCallOpcode) {
int32_t* p = reinterpret_cast<int32_t*>(pc_ + 1);
*p -= delta; // Relocate entry.
- CPU::FlushICache(p, sizeof(uint32_t));
+ if (flush_icache) CPU::FlushICache(p, sizeof(uint32_t));
}
} else if (rmode_ == JS_RETURN && IsPatchedReturnSequence()) {
// Special handling of js_return when a break point is set (call
// instruction has been inserted).
int32_t* p = reinterpret_cast<int32_t*>(pc_ + 1);
*p -= delta; // Relocate entry.
- CPU::FlushICache(p, sizeof(uint32_t));
+ if (flush_icache) CPU::FlushICache(p, sizeof(uint32_t));
} else if (rmode_ == DEBUG_BREAK_SLOT && IsPatchedDebugBreakSlotSequence()) {
// Special handling of a debug break slot when a break point is set (call
// instruction has been inserted).
int32_t* p = reinterpret_cast<int32_t*>(pc_ + 1);
*p -= delta; // Relocate entry.
- CPU::FlushICache(p, sizeof(uint32_t));
+ if (flush_icache) CPU::FlushICache(p, sizeof(uint32_t));
} else if (IsInternalReference(rmode_)) {
// absolute code pointer inside code object moves with the code object.
int32_t* p = reinterpret_cast<int32_t*>(pc_);
*p += delta; // Relocate entry.
- CPU::FlushICache(p, sizeof(uint32_t));
+ if (flush_icache) CPU::FlushICache(p, sizeof(uint32_t));
}
}
Address RelocInfo::target_address() {
ASSERT(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_));
- return Assembler::target_address_at(pc_);
+ return Assembler::target_address_at(pc_, host_);
}
@@ -97,15 +100,24 @@ Address RelocInfo::target_address_address() {
}
+Address RelocInfo::constant_pool_entry_address() {
+ UNREACHABLE();
+ return NULL;
+}
+
+
int RelocInfo::target_address_size() {
return Assembler::kSpecialTargetSize;
}
-void RelocInfo::set_target_address(Address target, WriteBarrierMode mode) {
- Assembler::set_target_address_at(pc_, target);
+void RelocInfo::set_target_address(Address target,
+ WriteBarrierMode write_barrier_mode,
+ ICacheFlushMode icache_flush_mode) {
+ Assembler::set_target_address_at(pc_, host_, target, icache_flush_mode);
ASSERT(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_));
- if (mode == UPDATE_WRITE_BARRIER && host() != NULL && IsCodeTarget(rmode_)) {
+ if (write_barrier_mode == UPDATE_WRITE_BARRIER && host() != NULL &&
+ IsCodeTarget(rmode_)) {
Object* target_code = Code::GetCodeFromTargetAddress(target);
host()->GetHeap()->incremental_marking()->RecordWriteIntoCode(
host(), this, HeapObject::cast(target_code));
@@ -125,12 +137,16 @@ Handle<Object> RelocInfo::target_object_handle(Assembler* origin) {
}
-void RelocInfo::set_target_object(Object* target, WriteBarrierMode mode) {
+void RelocInfo::set_target_object(Object* target,
+ WriteBarrierMode write_barrier_mode,
+ ICacheFlushMode icache_flush_mode) {
ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
ASSERT(!target->IsConsString());
Memory::Object_at(pc_) = target;
- CPU::FlushICache(pc_, sizeof(Address));
- if (mode == UPDATE_WRITE_BARRIER &&
+ if (icache_flush_mode != SKIP_ICACHE_FLUSH) {
+ CPU::FlushICache(pc_, sizeof(Address));
+ }
+ if (write_barrier_mode == UPDATE_WRITE_BARRIER &&
host() != NULL &&
target->IsHeapObject()) {
host()->GetHeap()->incremental_marking()->RecordWrite(
@@ -152,9 +168,12 @@ Address RelocInfo::target_runtime_entry(Assembler* origin) {
void RelocInfo::set_target_runtime_entry(Address target,
- WriteBarrierMode mode) {
+ WriteBarrierMode write_barrier_mode,
+ ICacheFlushMode icache_flush_mode) {
ASSERT(IsRuntimeEntry(rmode_));
- if (target_address() != target) set_target_address(target, mode);
+ if (target_address() != target) {
+ set_target_address(target, write_barrier_mode, icache_flush_mode);
+ }
}
@@ -171,12 +190,16 @@ Cell* RelocInfo::target_cell() {
}
-void RelocInfo::set_target_cell(Cell* cell, WriteBarrierMode mode) {
+void RelocInfo::set_target_cell(Cell* cell,
+ WriteBarrierMode write_barrier_mode,
+ ICacheFlushMode icache_flush_mode) {
ASSERT(rmode_ == RelocInfo::CELL);
Address address = cell->address() + Cell::kValueOffset;
Memory::Address_at(pc_) = address;
- CPU::FlushICache(pc_, sizeof(Address));
- if (mode == UPDATE_WRITE_BARRIER && host() != NULL) {
+ if (icache_flush_mode != SKIP_ICACHE_FLUSH) {
+ CPU::FlushICache(pc_, sizeof(Address));
+ }
+ if (write_barrier_mode == UPDATE_WRITE_BARRIER && host() != NULL) {
// TODO(1550) We are passing NULL as a slot because cell can never be on
// evacuation candidate.
host()->GetHeap()->incremental_marking()->RecordWrite(
@@ -196,28 +219,30 @@ Code* RelocInfo::code_age_stub() {
ASSERT(rmode_ == RelocInfo::CODE_AGE_SEQUENCE);
ASSERT(*pc_ == kCallOpcode);
return Code::GetCodeFromTargetAddress(
- Assembler::target_address_at(pc_ + 1));
+ Assembler::target_address_at(pc_ + 1, host_));
}
-void RelocInfo::set_code_age_stub(Code* stub) {
+void RelocInfo::set_code_age_stub(Code* stub,
+ ICacheFlushMode icache_flush_mode) {
ASSERT(*pc_ == kCallOpcode);
ASSERT(rmode_ == RelocInfo::CODE_AGE_SEQUENCE);
- Assembler::set_target_address_at(pc_ + 1, stub->instruction_start());
+ Assembler::set_target_address_at(pc_ + 1, host_, stub->instruction_start(),
+ icache_flush_mode);
}
Address RelocInfo::call_address() {
ASSERT((IsJSReturn(rmode()) && IsPatchedReturnSequence()) ||
(IsDebugBreakSlot(rmode()) && IsPatchedDebugBreakSlotSequence()));
- return Assembler::target_address_at(pc_ + 1);
+ return Assembler::target_address_at(pc_ + 1, host_);
}
void RelocInfo::set_call_address(Address target) {
ASSERT((IsJSReturn(rmode()) && IsPatchedReturnSequence()) ||
(IsDebugBreakSlot(rmode()) && IsPatchedDebugBreakSlotSequence()));
- Assembler::set_target_address_at(pc_ + 1, target);
+ Assembler::set_target_address_at(pc_ + 1, host_, target);
if (host() != NULL) {
Object* target_code = Code::GetCodeFromTargetAddress(target);
host()->GetHeap()->incremental_marking()->RecordWriteIntoCode(
@@ -248,7 +273,7 @@ void RelocInfo::WipeOut() {
Memory::Address_at(pc_) = NULL;
} else if (IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_)) {
// Effectively write zero into the relocation.
- Assembler::set_target_address_at(pc_, pc_ + sizeof(int32_t));
+ Assembler::set_target_address_at(pc_, host_, pc_ + sizeof(int32_t));
} else {
UNREACHABLE();
}
@@ -279,14 +304,12 @@ void RelocInfo::Visit(Isolate* isolate, ObjectVisitor* visitor) {
CPU::FlushICache(pc_, sizeof(Address));
} else if (RelocInfo::IsCodeAgeSequence(mode)) {
visitor->VisitCodeAgeSequence(this);
- #ifdef ENABLE_DEBUGGER_SUPPORT
} else if (((RelocInfo::IsJSReturn(mode) &&
IsPatchedReturnSequence()) ||
(RelocInfo::IsDebugBreakSlot(mode) &&
IsPatchedDebugBreakSlotSequence())) &&
isolate->debug()->has_break_points()) {
visitor->VisitDebugTarget(this);
-#endif
} else if (IsRuntimeEntry(mode)) {
visitor->VisitRuntimeEntry(this);
}
@@ -308,14 +331,12 @@ void RelocInfo::Visit(Heap* heap) {
CPU::FlushICache(pc_, sizeof(Address));
} else if (RelocInfo::IsCodeAgeSequence(mode)) {
StaticVisitor::VisitCodeAgeSequence(heap, this);
-#ifdef ENABLE_DEBUGGER_SUPPORT
} else if (heap->isolate()->debug()->has_break_points() &&
((RelocInfo::IsJSReturn(mode) &&
IsPatchedReturnSequence()) ||
(RelocInfo::IsDebugBreakSlot(mode) &&
IsPatchedDebugBreakSlotSequence()))) {
StaticVisitor::VisitDebugTarget(heap, this);
-#endif
} else if (IsRuntimeEntry(mode)) {
StaticVisitor::VisitRuntimeEntry(this);
}
@@ -439,15 +460,21 @@ void Assembler::emit_w(const Immediate& x) {
}
-Address Assembler::target_address_at(Address pc) {
+Address Assembler::target_address_at(Address pc,
+ ConstantPoolArray* constant_pool) {
return pc + sizeof(int32_t) + *reinterpret_cast<int32_t*>(pc);
}
-void Assembler::set_target_address_at(Address pc, Address target) {
+void Assembler::set_target_address_at(Address pc,
+ ConstantPoolArray* constant_pool,
+ Address target,
+ ICacheFlushMode icache_flush_mode) {
int32_t* p = reinterpret_cast<int32_t*>(pc);
*p = target - (pc + sizeof(int32_t));
- CPU::FlushICache(p, sizeof(int32_t));
+ if (icache_flush_mode != SKIP_ICACHE_FLUSH) {
+ CPU::FlushICache(p, sizeof(int32_t));
+ }
}
diff --git a/chromium/v8/src/ia32/assembler-ia32.cc b/chromium/v8/src/ia32/assembler-ia32.cc
index 733432028af..75cb6ed7c4c 100644
--- a/chromium/v8/src/ia32/assembler-ia32.cc
+++ b/chromium/v8/src/ia32/assembler-ia32.cc
@@ -34,13 +34,13 @@
// significantly by Google Inc.
// Copyright 2012 the V8 project authors. All rights reserved.
-#include "v8.h"
+#include "src/v8.h"
#if V8_TARGET_ARCH_IA32
-#include "disassembler.h"
-#include "macro-assembler.h"
-#include "serialize.h"
+#include "src/disassembler.h"
+#include "src/macro-assembler.h"
+#include "src/serialize.h"
namespace v8 {
namespace internal {
@@ -48,81 +48,21 @@ namespace internal {
// -----------------------------------------------------------------------------
// Implementation of CpuFeatures
-#ifdef DEBUG
-bool CpuFeatures::initialized_ = false;
-#endif
-uint64_t CpuFeatures::supported_ = 0;
-uint64_t CpuFeatures::found_by_runtime_probing_only_ = 0;
-uint64_t CpuFeatures::cross_compile_ = 0;
-
-
-ExternalReference ExternalReference::cpu_features() {
- ASSERT(CpuFeatures::initialized_);
- return ExternalReference(&CpuFeatures::supported_);
-}
-
-
-int IntelDoubleRegister::NumAllocatableRegisters() {
- if (CpuFeatures::IsSupported(SSE2)) {
- return XMMRegister::kNumAllocatableRegisters;
- } else {
- return X87Register::kNumAllocatableRegisters;
- }
-}
-
-
-int IntelDoubleRegister::NumRegisters() {
- if (CpuFeatures::IsSupported(SSE2)) {
- return XMMRegister::kNumRegisters;
- } else {
- return X87Register::kNumRegisters;
- }
-}
+void CpuFeatures::ProbeImpl(bool cross_compile) {
+ CPU cpu;
+ CHECK(cpu.has_sse2()); // SSE2 support is mandatory.
+ CHECK(cpu.has_cmov()); // CMOV support is mandatory.
+ // Only use statically determined features for cross compile (snapshot).
+ if (cross_compile) return;
-const char* IntelDoubleRegister::AllocationIndexToString(int index) {
- if (CpuFeatures::IsSupported(SSE2)) {
- return XMMRegister::AllocationIndexToString(index);
- } else {
- return X87Register::AllocationIndexToString(index);
- }
+ if (cpu.has_sse41() && FLAG_enable_sse4_1) supported_ |= 1u << SSE4_1;
+ if (cpu.has_sse3() && FLAG_enable_sse3) supported_ |= 1u << SSE3;
}
-void CpuFeatures::Probe() {
- ASSERT(!initialized_);
- ASSERT(supported_ == 0);
-#ifdef DEBUG
- initialized_ = true;
-#endif
- if (Serializer::enabled()) {
- supported_ |= OS::CpuFeaturesImpliedByPlatform();
- return; // No features if we might serialize.
- }
-
- uint64_t probed_features = 0;
- CPU cpu;
- if (cpu.has_sse41()) {
- probed_features |= static_cast<uint64_t>(1) << SSE4_1;
- }
- if (cpu.has_sse3()) {
- probed_features |= static_cast<uint64_t>(1) << SSE3;
- }
- if (cpu.has_sse2()) {
- probed_features |= static_cast<uint64_t>(1) << SSE2;
- }
- if (cpu.has_cmov()) {
- probed_features |= static_cast<uint64_t>(1) << CMOV;
- }
-
- // SAHF must be available in compat/legacy mode.
- ASSERT(cpu.has_sahf());
- probed_features |= static_cast<uint64_t>(1) << SAHF;
-
- uint64_t platform_features = OS::CpuFeaturesImpliedByPlatform();
- supported_ = probed_features | platform_features;
- found_by_runtime_probing_only_ = probed_features & ~platform_features;
-}
+void CpuFeatures::PrintTarget() { }
+void CpuFeatures::PrintFeatures() { }
// -----------------------------------------------------------------------------
@@ -160,6 +100,11 @@ bool RelocInfo::IsCodedSpecially() {
}
+bool RelocInfo::IsInConstantPool() {
+ return false;
+}
+
+
void RelocInfo::PatchCode(byte* instructions, int instruction_count) {
// Patch the code at the current address with the supplied instructions.
for (int i = 0; i < instruction_count; i++) {
@@ -344,15 +289,6 @@ bool Assembler::IsNop(Address addr) {
void Assembler::Nop(int bytes) {
EnsureSpace ensure_space(this);
- if (!CpuFeatures::IsSupported(SSE2)) {
- // Older CPUs that do not support SSE2 may not support multibyte NOP
- // instructions.
- for (; bytes > 0; bytes--) {
- EMIT(0x90);
- }
- return;
- }
-
// Multi byte nops from http://support.amd.com/us/Processor_TechDocs/40546.pdf
while (bytes > 0) {
switch (bytes) {
@@ -652,7 +588,6 @@ void Assembler::movzx_w(Register dst, const Operand& src) {
void Assembler::cmov(Condition cc, Register dst, const Operand& src) {
- ASSERT(IsEnabled(CMOV));
EnsureSpace ensure_space(this);
// Opcode: 0f 40 + cc /r.
EMIT(0x0F);
@@ -1259,6 +1194,14 @@ void Assembler::bts(const Operand& dst, Register src) {
}
+void Assembler::bsr(Register dst, const Operand& src) {
+ EnsureSpace ensure_space(this);
+ EMIT(0x0F);
+ EMIT(0xBD);
+ emit_operand(dst, src);
+}
+
+
void Assembler::hlt() {
EnsureSpace ensure_space(this);
EMIT(0xF4);
@@ -1938,7 +1881,6 @@ void Assembler::setcc(Condition cc, Register reg) {
void Assembler::cvttss2si(Register dst, const Operand& src) {
- ASSERT(IsEnabled(SSE2));
EnsureSpace ensure_space(this);
EMIT(0xF3);
EMIT(0x0F);
@@ -1948,7 +1890,6 @@ void Assembler::cvttss2si(Register dst, const Operand& src) {
void Assembler::cvttsd2si(Register dst, const Operand& src) {
- ASSERT(IsEnabled(SSE2));
EnsureSpace ensure_space(this);
EMIT(0xF2);
EMIT(0x0F);
@@ -1958,7 +1899,6 @@ void Assembler::cvttsd2si(Register dst, const Operand& src) {
void Assembler::cvtsd2si(Register dst, XMMRegister src) {
- ASSERT(IsEnabled(SSE2));
EnsureSpace ensure_space(this);
EMIT(0xF2);
EMIT(0x0F);
@@ -1968,7 +1908,6 @@ void Assembler::cvtsd2si(Register dst, XMMRegister src) {
void Assembler::cvtsi2sd(XMMRegister dst, const Operand& src) {
- ASSERT(IsEnabled(SSE2));
EnsureSpace ensure_space(this);
EMIT(0xF2);
EMIT(0x0F);
@@ -1978,7 +1917,6 @@ void Assembler::cvtsi2sd(XMMRegister dst, const Operand& src) {
void Assembler::cvtss2sd(XMMRegister dst, XMMRegister src) {
- ASSERT(IsEnabled(SSE2));
EnsureSpace ensure_space(this);
EMIT(0xF3);
EMIT(0x0F);
@@ -1988,7 +1926,6 @@ void Assembler::cvtss2sd(XMMRegister dst, XMMRegister src) {
void Assembler::cvtsd2ss(XMMRegister dst, XMMRegister src) {
- ASSERT(IsEnabled(SSE2));
EnsureSpace ensure_space(this);
EMIT(0xF2);
EMIT(0x0F);
@@ -1998,7 +1935,6 @@ void Assembler::cvtsd2ss(XMMRegister dst, XMMRegister src) {
void Assembler::addsd(XMMRegister dst, XMMRegister src) {
- ASSERT(IsEnabled(SSE2));
EnsureSpace ensure_space(this);
EMIT(0xF2);
EMIT(0x0F);
@@ -2008,7 +1944,6 @@ void Assembler::addsd(XMMRegister dst, XMMRegister src) {
void Assembler::addsd(XMMRegister dst, const Operand& src) {
- ASSERT(IsEnabled(SSE2));
EnsureSpace ensure_space(this);
EMIT(0xF2);
EMIT(0x0F);
@@ -2018,7 +1953,6 @@ void Assembler::addsd(XMMRegister dst, const Operand& src) {
void Assembler::mulsd(XMMRegister dst, XMMRegister src) {
- ASSERT(IsEnabled(SSE2));
EnsureSpace ensure_space(this);
EMIT(0xF2);
EMIT(0x0F);
@@ -2028,7 +1962,6 @@ void Assembler::mulsd(XMMRegister dst, XMMRegister src) {
void Assembler::mulsd(XMMRegister dst, const Operand& src) {
- ASSERT(IsEnabled(SSE2));
EnsureSpace ensure_space(this);
EMIT(0xF2);
EMIT(0x0F);
@@ -2038,7 +1971,6 @@ void Assembler::mulsd(XMMRegister dst, const Operand& src) {
void Assembler::subsd(XMMRegister dst, XMMRegister src) {
- ASSERT(IsEnabled(SSE2));
EnsureSpace ensure_space(this);
EMIT(0xF2);
EMIT(0x0F);
@@ -2048,7 +1980,6 @@ void Assembler::subsd(XMMRegister dst, XMMRegister src) {
void Assembler::divsd(XMMRegister dst, XMMRegister src) {
- ASSERT(IsEnabled(SSE2));
EnsureSpace ensure_space(this);
EMIT(0xF2);
EMIT(0x0F);
@@ -2058,7 +1989,6 @@ void Assembler::divsd(XMMRegister dst, XMMRegister src) {
void Assembler::xorpd(XMMRegister dst, XMMRegister src) {
- ASSERT(IsEnabled(SSE2));
EnsureSpace ensure_space(this);
EMIT(0x66);
EMIT(0x0F);
@@ -2068,7 +1998,6 @@ void Assembler::xorpd(XMMRegister dst, XMMRegister src) {
void Assembler::andps(XMMRegister dst, const Operand& src) {
- ASSERT(IsEnabled(SSE2));
EnsureSpace ensure_space(this);
EMIT(0x0F);
EMIT(0x54);
@@ -2077,7 +2006,6 @@ void Assembler::andps(XMMRegister dst, const Operand& src) {
void Assembler::orps(XMMRegister dst, const Operand& src) {
- ASSERT(IsEnabled(SSE2));
EnsureSpace ensure_space(this);
EMIT(0x0F);
EMIT(0x56);
@@ -2086,7 +2014,6 @@ void Assembler::orps(XMMRegister dst, const Operand& src) {
void Assembler::xorps(XMMRegister dst, const Operand& src) {
- ASSERT(IsEnabled(SSE2));
EnsureSpace ensure_space(this);
EMIT(0x0F);
EMIT(0x57);
@@ -2095,7 +2022,6 @@ void Assembler::xorps(XMMRegister dst, const Operand& src) {
void Assembler::addps(XMMRegister dst, const Operand& src) {
- ASSERT(IsEnabled(SSE2));
EnsureSpace ensure_space(this);
EMIT(0x0F);
EMIT(0x58);
@@ -2104,7 +2030,6 @@ void Assembler::addps(XMMRegister dst, const Operand& src) {
void Assembler::subps(XMMRegister dst, const Operand& src) {
- ASSERT(IsEnabled(SSE2));
EnsureSpace ensure_space(this);
EMIT(0x0F);
EMIT(0x5C);
@@ -2113,7 +2038,6 @@ void Assembler::subps(XMMRegister dst, const Operand& src) {
void Assembler::mulps(XMMRegister dst, const Operand& src) {
- ASSERT(IsEnabled(SSE2));
EnsureSpace ensure_space(this);
EMIT(0x0F);
EMIT(0x59);
@@ -2122,7 +2046,6 @@ void Assembler::mulps(XMMRegister dst, const Operand& src) {
void Assembler::divps(XMMRegister dst, const Operand& src) {
- ASSERT(IsEnabled(SSE2));
EnsureSpace ensure_space(this);
EMIT(0x0F);
EMIT(0x5E);
@@ -2131,7 +2054,15 @@ void Assembler::divps(XMMRegister dst, const Operand& src) {
void Assembler::sqrtsd(XMMRegister dst, XMMRegister src) {
- ASSERT(IsEnabled(SSE2));
+ EnsureSpace ensure_space(this);
+ EMIT(0xF2);
+ EMIT(0x0F);
+ EMIT(0x51);
+ emit_sse_operand(dst, src);
+}
+
+
+void Assembler::sqrtsd(XMMRegister dst, const Operand& src) {
EnsureSpace ensure_space(this);
EMIT(0xF2);
EMIT(0x0F);
@@ -2141,7 +2072,6 @@ void Assembler::sqrtsd(XMMRegister dst, XMMRegister src) {
void Assembler::andpd(XMMRegister dst, XMMRegister src) {
- ASSERT(IsEnabled(SSE2));
EnsureSpace ensure_space(this);
EMIT(0x66);
EMIT(0x0F);
@@ -2151,7 +2081,6 @@ void Assembler::andpd(XMMRegister dst, XMMRegister src) {
void Assembler::orpd(XMMRegister dst, XMMRegister src) {
- ASSERT(IsEnabled(SSE2));
EnsureSpace ensure_space(this);
EMIT(0x66);
EMIT(0x0F);
@@ -2161,7 +2090,6 @@ void Assembler::orpd(XMMRegister dst, XMMRegister src) {
void Assembler::ucomisd(XMMRegister dst, const Operand& src) {
- ASSERT(IsEnabled(SSE2));
EnsureSpace ensure_space(this);
EMIT(0x66);
EMIT(0x0F);
@@ -2184,7 +2112,6 @@ void Assembler::roundsd(XMMRegister dst, XMMRegister src, RoundingMode mode) {
void Assembler::movmskpd(Register dst, XMMRegister src) {
- ASSERT(IsEnabled(SSE2));
EnsureSpace ensure_space(this);
EMIT(0x66);
EMIT(0x0F);
@@ -2194,7 +2121,6 @@ void Assembler::movmskpd(Register dst, XMMRegister src) {
void Assembler::movmskps(Register dst, XMMRegister src) {
- ASSERT(IsEnabled(SSE2));
EnsureSpace ensure_space(this);
EMIT(0x0F);
EMIT(0x50);
@@ -2203,7 +2129,6 @@ void Assembler::movmskps(Register dst, XMMRegister src) {
void Assembler::pcmpeqd(XMMRegister dst, XMMRegister src) {
- ASSERT(IsEnabled(SSE2));
EnsureSpace ensure_space(this);
EMIT(0x66);
EMIT(0x0F);
@@ -2213,7 +2138,6 @@ void Assembler::pcmpeqd(XMMRegister dst, XMMRegister src) {
void Assembler::cmpltsd(XMMRegister dst, XMMRegister src) {
- ASSERT(IsEnabled(SSE2));
EnsureSpace ensure_space(this);
EMIT(0xF2);
EMIT(0x0F);
@@ -2224,7 +2148,6 @@ void Assembler::cmpltsd(XMMRegister dst, XMMRegister src) {
void Assembler::movaps(XMMRegister dst, XMMRegister src) {
- ASSERT(IsEnabled(SSE2));
EnsureSpace ensure_space(this);
EMIT(0x0F);
EMIT(0x28);
@@ -2233,7 +2156,6 @@ void Assembler::movaps(XMMRegister dst, XMMRegister src) {
void Assembler::shufps(XMMRegister dst, XMMRegister src, byte imm8) {
- ASSERT(IsEnabled(SSE2));
ASSERT(is_uint8(imm8));
EnsureSpace ensure_space(this);
EMIT(0x0F);
@@ -2244,7 +2166,6 @@ void Assembler::shufps(XMMRegister dst, XMMRegister src, byte imm8) {
void Assembler::movdqa(const Operand& dst, XMMRegister src) {
- ASSERT(IsEnabled(SSE2));
EnsureSpace ensure_space(this);
EMIT(0x66);
EMIT(0x0F);
@@ -2254,7 +2175,6 @@ void Assembler::movdqa(const Operand& dst, XMMRegister src) {
void Assembler::movdqa(XMMRegister dst, const Operand& src) {
- ASSERT(IsEnabled(SSE2));
EnsureSpace ensure_space(this);
EMIT(0x66);
EMIT(0x0F);
@@ -2264,7 +2184,6 @@ void Assembler::movdqa(XMMRegister dst, const Operand& src) {
void Assembler::movdqu(const Operand& dst, XMMRegister src ) {
- ASSERT(IsEnabled(SSE2));
EnsureSpace ensure_space(this);
EMIT(0xF3);
EMIT(0x0F);
@@ -2274,7 +2193,6 @@ void Assembler::movdqu(const Operand& dst, XMMRegister src ) {
void Assembler::movdqu(XMMRegister dst, const Operand& src) {
- ASSERT(IsEnabled(SSE2));
EnsureSpace ensure_space(this);
EMIT(0xF3);
EMIT(0x0F);
@@ -2295,7 +2213,6 @@ void Assembler::movntdqa(XMMRegister dst, const Operand& src) {
void Assembler::movntdq(const Operand& dst, XMMRegister src) {
- ASSERT(IsEnabled(SSE2));
EnsureSpace ensure_space(this);
EMIT(0x66);
EMIT(0x0F);
@@ -2316,7 +2233,6 @@ void Assembler::prefetch(const Operand& src, int level) {
void Assembler::movsd(const Operand& dst, XMMRegister src ) {
- ASSERT(IsEnabled(SSE2));
EnsureSpace ensure_space(this);
EMIT(0xF2); // double
EMIT(0x0F);
@@ -2326,7 +2242,6 @@ void Assembler::movsd(const Operand& dst, XMMRegister src ) {
void Assembler::movsd(XMMRegister dst, const Operand& src) {
- ASSERT(IsEnabled(SSE2));
EnsureSpace ensure_space(this);
EMIT(0xF2); // double
EMIT(0x0F);
@@ -2336,7 +2251,6 @@ void Assembler::movsd(XMMRegister dst, const Operand& src) {
void Assembler::movss(const Operand& dst, XMMRegister src ) {
- ASSERT(IsEnabled(SSE2));
EnsureSpace ensure_space(this);
EMIT(0xF3); // float
EMIT(0x0F);
@@ -2346,7 +2260,6 @@ void Assembler::movss(const Operand& dst, XMMRegister src ) {
void Assembler::movss(XMMRegister dst, const Operand& src) {
- ASSERT(IsEnabled(SSE2));
EnsureSpace ensure_space(this);
EMIT(0xF3); // float
EMIT(0x0F);
@@ -2356,7 +2269,6 @@ void Assembler::movss(XMMRegister dst, const Operand& src) {
void Assembler::movd(XMMRegister dst, const Operand& src) {
- ASSERT(IsEnabled(SSE2));
EnsureSpace ensure_space(this);
EMIT(0x66);
EMIT(0x0F);
@@ -2366,7 +2278,6 @@ void Assembler::movd(XMMRegister dst, const Operand& src) {
void Assembler::movd(const Operand& dst, XMMRegister src) {
- ASSERT(IsEnabled(SSE2));
EnsureSpace ensure_space(this);
EMIT(0x66);
EMIT(0x0F);
@@ -2389,7 +2300,6 @@ void Assembler::extractps(Register dst, XMMRegister src, byte imm8) {
void Assembler::pand(XMMRegister dst, XMMRegister src) {
- ASSERT(IsEnabled(SSE2));
EnsureSpace ensure_space(this);
EMIT(0x66);
EMIT(0x0F);
@@ -2399,7 +2309,6 @@ void Assembler::pand(XMMRegister dst, XMMRegister src) {
void Assembler::pxor(XMMRegister dst, XMMRegister src) {
- ASSERT(IsEnabled(SSE2));
EnsureSpace ensure_space(this);
EMIT(0x66);
EMIT(0x0F);
@@ -2409,7 +2318,6 @@ void Assembler::pxor(XMMRegister dst, XMMRegister src) {
void Assembler::por(XMMRegister dst, XMMRegister src) {
- ASSERT(IsEnabled(SSE2));
EnsureSpace ensure_space(this);
EMIT(0x66);
EMIT(0x0F);
@@ -2430,7 +2338,6 @@ void Assembler::ptest(XMMRegister dst, XMMRegister src) {
void Assembler::psllq(XMMRegister reg, int8_t shift) {
- ASSERT(IsEnabled(SSE2));
EnsureSpace ensure_space(this);
EMIT(0x66);
EMIT(0x0F);
@@ -2441,7 +2348,6 @@ void Assembler::psllq(XMMRegister reg, int8_t shift) {
void Assembler::psllq(XMMRegister dst, XMMRegister src) {
- ASSERT(IsEnabled(SSE2));
EnsureSpace ensure_space(this);
EMIT(0x66);
EMIT(0x0F);
@@ -2451,7 +2357,6 @@ void Assembler::psllq(XMMRegister dst, XMMRegister src) {
void Assembler::psrlq(XMMRegister reg, int8_t shift) {
- ASSERT(IsEnabled(SSE2));
EnsureSpace ensure_space(this);
EMIT(0x66);
EMIT(0x0F);
@@ -2462,7 +2367,6 @@ void Assembler::psrlq(XMMRegister reg, int8_t shift) {
void Assembler::psrlq(XMMRegister dst, XMMRegister src) {
- ASSERT(IsEnabled(SSE2));
EnsureSpace ensure_space(this);
EMIT(0x66);
EMIT(0x0F);
@@ -2472,7 +2376,6 @@ void Assembler::psrlq(XMMRegister dst, XMMRegister src) {
void Assembler::pshufd(XMMRegister dst, XMMRegister src, uint8_t shuffle) {
- ASSERT(IsEnabled(SSE2));
EnsureSpace ensure_space(this);
EMIT(0x66);
EMIT(0x0F);
@@ -2555,7 +2458,7 @@ void Assembler::RecordComment(const char* msg, bool force) {
void Assembler::GrowBuffer() {
- ASSERT(overflow());
+ ASSERT(buffer_overflow());
if (!own_buffer_) FATAL("external code buffer is too small");
// Compute new buffer size.
@@ -2586,9 +2489,9 @@ void Assembler::GrowBuffer() {
// Copy the data.
int pc_delta = desc.buffer - buffer_;
int rc_delta = (desc.buffer + desc.buffer_size) - (buffer_ + buffer_size_);
- OS::MemMove(desc.buffer, buffer_, desc.instr_size);
- OS::MemMove(rc_delta + reloc_info_writer.pos(),
- reloc_info_writer.pos(), desc.reloc_size);
+ MemMove(desc.buffer, buffer_, desc.instr_size);
+ MemMove(rc_delta + reloc_info_writer.pos(), reloc_info_writer.pos(),
+ desc.reloc_size);
// Switch buffers.
if (isolate()->assembler_spare_buffer() == NULL &&
@@ -2614,7 +2517,7 @@ void Assembler::GrowBuffer() {
}
}
- ASSERT(!overflow());
+ ASSERT(!buffer_overflow());
}
@@ -2689,21 +2592,29 @@ void Assembler::dd(uint32_t data) {
void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
ASSERT(!RelocInfo::IsNone(rmode));
// Don't record external references unless the heap will be serialized.
- if (rmode == RelocInfo::EXTERNAL_REFERENCE) {
-#ifdef DEBUG
- if (!Serializer::enabled()) {
- Serializer::TooLateToEnableNow();
- }
-#endif
- if (!Serializer::enabled() && !emit_debug_code()) {
- return;
- }
+ if (rmode == RelocInfo::EXTERNAL_REFERENCE &&
+ !serializer_enabled() && !emit_debug_code()) {
+ return;
}
RelocInfo rinfo(pc_, rmode, data, NULL);
reloc_info_writer.Write(&rinfo);
}
+Handle<ConstantPoolArray> Assembler::NewConstantPool(Isolate* isolate) {
+ // No out-of-line constant pool support.
+ ASSERT(!FLAG_enable_ool_constant_pool);
+ return isolate->factory()->empty_constant_pool_array();
+}
+
+
+void Assembler::PopulateConstantPool(ConstantPoolArray* constant_pool) {
+ // No out-of-line constant pool support.
+ ASSERT(!FLAG_enable_ool_constant_pool);
+ return;
+}
+
+
#ifdef GENERATED_CODE_COVERAGE
static FILE* coverage_log = NULL;
diff --git a/chromium/v8/src/ia32/assembler-ia32.h b/chromium/v8/src/ia32/assembler-ia32.h
index 6ed0bc6d662..de8b04f84e3 100644
--- a/chromium/v8/src/ia32/assembler-ia32.h
+++ b/chromium/v8/src/ia32/assembler-ia32.h
@@ -37,8 +37,8 @@
#ifndef V8_IA32_ASSEMBLER_IA32_H_
#define V8_IA32_ASSEMBLER_IA32_H_
-#include "isolate.h"
-#include "serialize.h"
+#include "src/isolate.h"
+#include "src/serialize.h"
namespace v8 {
namespace internal {
@@ -141,71 +141,41 @@ inline Register Register::FromAllocationIndex(int index) {
}
-struct IntelDoubleRegister {
- static const int kMaxNumRegisters = 8;
+struct XMMRegister {
static const int kMaxNumAllocatableRegisters = 7;
- static int NumAllocatableRegisters();
- static int NumRegisters();
- static const char* AllocationIndexToString(int index);
+ static const int kMaxNumRegisters = 8;
+ static int NumAllocatableRegisters() {
+ return kMaxNumAllocatableRegisters;
+ }
- static int ToAllocationIndex(IntelDoubleRegister reg) {
+ static int ToAllocationIndex(XMMRegister reg) {
ASSERT(reg.code() != 0);
return reg.code() - 1;
}
- static IntelDoubleRegister FromAllocationIndex(int index) {
- ASSERT(index >= 0 && index < NumAllocatableRegisters());
+ static XMMRegister FromAllocationIndex(int index) {
+ ASSERT(index >= 0 && index < kMaxNumAllocatableRegisters);
return from_code(index + 1);
}
- static IntelDoubleRegister from_code(int code) {
- IntelDoubleRegister result = { code };
+ static XMMRegister from_code(int code) {
+ XMMRegister result = { code };
return result;
}
bool is_valid() const {
- return 0 <= code_ && code_ < NumRegisters();
+ return 0 <= code_ && code_ < kMaxNumRegisters;
}
+
int code() const {
ASSERT(is_valid());
return code_;
}
- int code_;
-};
-
-
-const IntelDoubleRegister double_register_0 = { 0 };
-const IntelDoubleRegister double_register_1 = { 1 };
-const IntelDoubleRegister double_register_2 = { 2 };
-const IntelDoubleRegister double_register_3 = { 3 };
-const IntelDoubleRegister double_register_4 = { 4 };
-const IntelDoubleRegister double_register_5 = { 5 };
-const IntelDoubleRegister double_register_6 = { 6 };
-const IntelDoubleRegister double_register_7 = { 7 };
-const IntelDoubleRegister no_double_reg = { -1 };
-
-
-struct XMMRegister : IntelDoubleRegister {
- static const int kNumAllocatableRegisters = 7;
- static const int kNumRegisters = 8;
-
- static XMMRegister from_code(int code) {
- STATIC_ASSERT(sizeof(XMMRegister) == sizeof(IntelDoubleRegister));
- XMMRegister result;
- result.code_ = code;
- return result;
- }
-
bool is(XMMRegister reg) const { return code_ == reg.code_; }
- static XMMRegister FromAllocationIndex(int index) {
- ASSERT(index >= 0 && index < NumAllocatableRegisters());
- return from_code(index + 1);
- }
-
static const char* AllocationIndexToString(int index) {
- ASSERT(index >= 0 && index < kNumAllocatableRegisters);
+ ASSERT(index >= 0 && index < kMaxNumAllocatableRegisters);
const char* const names[] = {
"xmm1",
"xmm2",
@@ -217,57 +187,23 @@ struct XMMRegister : IntelDoubleRegister {
};
return names[index];
}
-};
-
-
-#define xmm0 (static_cast<const XMMRegister&>(double_register_0))
-#define xmm1 (static_cast<const XMMRegister&>(double_register_1))
-#define xmm2 (static_cast<const XMMRegister&>(double_register_2))
-#define xmm3 (static_cast<const XMMRegister&>(double_register_3))
-#define xmm4 (static_cast<const XMMRegister&>(double_register_4))
-#define xmm5 (static_cast<const XMMRegister&>(double_register_5))
-#define xmm6 (static_cast<const XMMRegister&>(double_register_6))
-#define xmm7 (static_cast<const XMMRegister&>(double_register_7))
-#define no_xmm_reg (static_cast<const XMMRegister&>(no_double_reg))
-
-
-struct X87Register : IntelDoubleRegister {
- static const int kNumAllocatableRegisters = 5;
- static const int kNumRegisters = 5;
-
- bool is(X87Register reg) const {
- return code_ == reg.code_;
- }
-
- static const char* AllocationIndexToString(int index) {
- ASSERT(index >= 0 && index < kNumAllocatableRegisters);
- const char* const names[] = {
- "stX_0", "stX_1", "stX_2", "stX_3", "stX_4"
- };
- return names[index];
- }
- static X87Register FromAllocationIndex(int index) {
- STATIC_ASSERT(sizeof(X87Register) == sizeof(IntelDoubleRegister));
- ASSERT(index >= 0 && index < NumAllocatableRegisters());
- X87Register result;
- result.code_ = index;
- return result;
- }
-
- static int ToAllocationIndex(X87Register reg) {
- return reg.code_;
- }
+ int code_;
};
-#define stX_0 static_cast<const X87Register&>(double_register_0)
-#define stX_1 static_cast<const X87Register&>(double_register_1)
-#define stX_2 static_cast<const X87Register&>(double_register_2)
-#define stX_3 static_cast<const X87Register&>(double_register_3)
-#define stX_4 static_cast<const X87Register&>(double_register_4)
+typedef XMMRegister DoubleRegister;
-typedef IntelDoubleRegister DoubleRegister;
+
+const XMMRegister xmm0 = { 0 };
+const XMMRegister xmm1 = { 1 };
+const XMMRegister xmm2 = { 2 };
+const XMMRegister xmm3 = { 3 };
+const XMMRegister xmm4 = { 4 };
+const XMMRegister xmm5 = { 5 };
+const XMMRegister xmm6 = { 6 };
+const XMMRegister xmm7 = { 7 };
+const XMMRegister no_xmm_reg = { -1 };
enum Condition {
@@ -310,8 +246,8 @@ inline Condition NegateCondition(Condition cc) {
}
-// Corresponds to transposing the operands of a comparison.
-inline Condition ReverseCondition(Condition cc) {
+// Commute a condition such that {a cond b == b cond' a}.
+inline Condition CommuteCondition(Condition cc) {
switch (cc) {
case below:
return above;
@@ -331,7 +267,7 @@ inline Condition ReverseCondition(Condition cc) {
return greater_equal;
default:
return cc;
- };
+ }
}
@@ -516,77 +452,6 @@ class Displacement BASE_EMBEDDED {
};
-
-// CpuFeatures keeps track of which features are supported by the target CPU.
-// Supported features must be enabled by a CpuFeatureScope before use.
-// Example:
-// if (assembler->IsSupported(SSE2)) {
-// CpuFeatureScope fscope(assembler, SSE2);
-// // Generate SSE2 floating point code.
-// } else {
-// // Generate standard x87 floating point code.
-// }
-class CpuFeatures : public AllStatic {
- public:
- // Detect features of the target CPU. Set safe defaults if the serializer
- // is enabled (snapshots must be portable).
- static void Probe();
-
- // Check whether a feature is supported by the target CPU.
- static bool IsSupported(CpuFeature f) {
- ASSERT(initialized_);
- if (Check(f, cross_compile_)) return true;
- if (f == SSE2 && !FLAG_enable_sse2) return false;
- if (f == SSE3 && !FLAG_enable_sse3) return false;
- if (f == SSE4_1 && !FLAG_enable_sse4_1) return false;
- if (f == CMOV && !FLAG_enable_cmov) return false;
- return Check(f, supported_);
- }
-
- static bool IsFoundByRuntimeProbingOnly(CpuFeature f) {
- ASSERT(initialized_);
- return Check(f, found_by_runtime_probing_only_);
- }
-
- static bool IsSafeForSnapshot(CpuFeature f) {
- return Check(f, cross_compile_) ||
- (IsSupported(f) &&
- (!Serializer::enabled() || !IsFoundByRuntimeProbingOnly(f)));
- }
-
- static bool VerifyCrossCompiling() {
- return cross_compile_ == 0;
- }
-
- static bool VerifyCrossCompiling(CpuFeature f) {
- uint64_t mask = flag2set(f);
- return cross_compile_ == 0 ||
- (cross_compile_ & mask) == mask;
- }
-
- private:
- static bool Check(CpuFeature f, uint64_t set) {
- return (set & flag2set(f)) != 0;
- }
-
- static uint64_t flag2set(CpuFeature f) {
- return static_cast<uint64_t>(1) << f;
- }
-
-#ifdef DEBUG
- static bool initialized_;
-#endif
- static uint64_t supported_;
- static uint64_t found_by_runtime_probing_only_;
-
- static uint64_t cross_compile_;
-
- friend class ExternalReference;
- friend class PlatformFeatureScope;
- DISALLOW_COPY_AND_ASSIGN(CpuFeatures);
-};
-
-
class Assembler : public AssemblerBase {
private:
// We check before assembling an instruction that there is sufficient
@@ -624,8 +489,25 @@ class Assembler : public AssemblerBase {
void GetCode(CodeDesc* desc);
// Read/Modify the code target in the branch/call instruction at pc.
- inline static Address target_address_at(Address pc);
- inline static void set_target_address_at(Address pc, Address target);
+ inline static Address target_address_at(Address pc,
+ ConstantPoolArray* constant_pool);
+ inline static void set_target_address_at(Address pc,
+ ConstantPoolArray* constant_pool,
+ Address target,
+ ICacheFlushMode icache_flush_mode =
+ FLUSH_ICACHE_IF_NEEDED);
+ static inline Address target_address_at(Address pc, Code* code) {
+ ConstantPoolArray* constant_pool = code ? code->constant_pool() : NULL;
+ return target_address_at(pc, constant_pool);
+ }
+ static inline void set_target_address_at(Address pc,
+ Code* code,
+ Address target,
+ ICacheFlushMode icache_flush_mode =
+ FLUSH_ICACHE_IF_NEEDED) {
+ ConstantPoolArray* constant_pool = code ? code->constant_pool() : NULL;
+ set_target_address_at(pc, constant_pool, target);
+ }
// Return the code target address at a call site from the return address
// of that call in the instruction stream.
@@ -634,8 +516,8 @@ class Assembler : public AssemblerBase {
// This sets the branch destination (which is in the instruction on x86).
// This is for calls and branches within generated code.
inline static void deserialization_set_special_target_at(
- Address instruction_payload, Address target) {
- set_target_address_at(instruction_payload, target);
+ Address instruction_payload, Code* code, Address target) {
+ set_target_address_at(instruction_payload, code, target);
}
static const int kSpecialTargetSize = kPointerSize;
@@ -882,6 +764,8 @@ class Assembler : public AssemblerBase {
void bt(const Operand& dst, Register src);
void bts(Register dst, Register src) { bts(Operand(dst), src); }
void bts(const Operand& dst, Register src);
+ void bsr(Register dst, Register src) { bsr(dst, Operand(src)); }
+ void bsr(Register dst, const Operand& src);
// Miscellaneous
void hlt();
@@ -1052,6 +936,7 @@ class Assembler : public AssemblerBase {
void divsd(XMMRegister dst, XMMRegister src);
void xorpd(XMMRegister dst, XMMRegister src);
void sqrtsd(XMMRegister dst, XMMRegister src);
+ void sqrtsd(XMMRegister dst, const Operand& src);
void andpd(XMMRegister dst, XMMRegister src);
void orpd(XMMRegister dst, XMMRegister src);
@@ -1155,7 +1040,9 @@ class Assembler : public AssemblerBase {
// Check if there is less than kGap bytes available in the buffer.
// If this is the case, we need to grow the buffer before emitting
// an instruction or relocation information.
- inline bool overflow() const { return pc_ >= reloc_info_writer.pos() - kGap; }
+ inline bool buffer_overflow() const {
+ return pc_ >= reloc_info_writer.pos() - kGap;
+ }
// Get the number of bytes available in the buffer.
inline int available_space() const { return reloc_info_writer.pos() - pc_; }
@@ -1174,6 +1061,12 @@ class Assembler : public AssemblerBase {
byte byte_at(int pos) { return buffer_[pos]; }
void set_byte_at(int pos, byte value) { buffer_[pos] = value; }
+ // Allocate a constant pool of the correct size for the generated code.
+ Handle<ConstantPoolArray> NewConstantPool(Isolate* isolate);
+
+ // Generate the constant pool for the generated code.
+ void PopulateConstantPool(ConstantPoolArray* constant_pool);
+
protected:
void emit_sse_operand(XMMRegister reg, const Operand& adr);
void emit_sse_operand(XMMRegister dst, XMMRegister src);
@@ -1251,7 +1144,7 @@ class Assembler : public AssemblerBase {
class EnsureSpace BASE_EMBEDDED {
public:
explicit EnsureSpace(Assembler* assembler) : assembler_(assembler) {
- if (assembler_->overflow()) assembler_->GrowBuffer();
+ if (assembler_->buffer_overflow()) assembler_->GrowBuffer();
#ifdef DEBUG
space_before_ = assembler_->available_space();
#endif
diff --git a/chromium/v8/src/ia32/builtins-ia32.cc b/chromium/v8/src/ia32/builtins-ia32.cc
index 5a3fa78e339..c62afb01fab 100644
--- a/chromium/v8/src/ia32/builtins-ia32.cc
+++ b/chromium/v8/src/ia32/builtins-ia32.cc
@@ -1,37 +1,15 @@
// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
#if V8_TARGET_ARCH_IA32
-#include "codegen.h"
-#include "deoptimizer.h"
-#include "full-codegen.h"
+#include "src/codegen.h"
+#include "src/deoptimizer.h"
+#include "src/full-codegen.h"
+#include "src/stub-cache.h"
namespace v8 {
namespace internal {
@@ -74,19 +52,15 @@ void Builtins::Generate_Adaptor(MacroAssembler* masm,
}
-static void CallRuntimePassFunction(MacroAssembler* masm,
- Runtime::FunctionId function_id) {
+static void CallRuntimePassFunction(
+ MacroAssembler* masm, Runtime::FunctionId function_id) {
FrameScope scope(masm, StackFrame::INTERNAL);
// Push a copy of the function.
__ push(edi);
- // Push call kind information.
- __ push(ecx);
// Function is also the parameter to the runtime call.
__ push(edi);
__ CallRuntime(function_id, 1);
- // Restore call kind information.
- __ pop(ecx);
// Restore receiver.
__ pop(edi);
}
@@ -100,7 +74,13 @@ static void GenerateTailCallToSharedCode(MacroAssembler* masm) {
}
-void Builtins::Generate_InRecompileQueue(MacroAssembler* masm) {
+static void GenerateTailCallToReturnedCode(MacroAssembler* masm) {
+ __ lea(eax, FieldOperand(eax, Code::kHeaderSize));
+ __ jmp(eax);
+}
+
+
+void Builtins::Generate_InOptimizationQueue(MacroAssembler* masm) {
// Checking whether the queued function is ready for install is optional,
// since we come across interrupts and stack checks elsewhere. However,
// not checking may delay installing ready functions, and always checking
@@ -112,37 +92,35 @@ void Builtins::Generate_InRecompileQueue(MacroAssembler* masm) {
__ cmp(esp, Operand::StaticVariable(stack_limit));
__ j(above_equal, &ok, Label::kNear);
- CallRuntimePassFunction(masm, Runtime::kTryInstallRecompiledCode);
- // Tail call to returned code.
- __ lea(eax, FieldOperand(eax, Code::kHeaderSize));
- __ jmp(eax);
+ CallRuntimePassFunction(masm, Runtime::kHiddenTryInstallOptimizedCode);
+ GenerateTailCallToReturnedCode(masm);
__ bind(&ok);
GenerateTailCallToSharedCode(masm);
}
-void Builtins::Generate_ConcurrentRecompile(MacroAssembler* masm) {
- CallRuntimePassFunction(masm, Runtime::kConcurrentRecompile);
- GenerateTailCallToSharedCode(masm);
-}
-
-
static void Generate_JSConstructStubHelper(MacroAssembler* masm,
bool is_api_function,
- bool count_constructions) {
+ bool create_memento) {
// ----------- S t a t e -------------
// -- eax: number of arguments
// -- edi: constructor function
+ // -- ebx: allocation site or undefined
// -----------------------------------
- // Should never count constructions for api objects.
- ASSERT(!is_api_function || !count_constructions);
+ // Should never create mementos for api functions.
+ ASSERT(!is_api_function || !create_memento);
// Enter a construct frame.
{
FrameScope scope(masm, StackFrame::CONSTRUCT);
+ if (create_memento) {
+ __ AssertUndefinedOrAllocationSite(ebx);
+ __ push(ebx);
+ }
+
// Store a smi-tagged arguments count on the stack.
__ SmiTag(eax);
__ push(eax);
@@ -155,12 +133,10 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
Label rt_call, allocated;
if (FLAG_inline_new) {
Label undo_allocation;
-#ifdef ENABLE_DEBUGGER_SUPPORT
ExternalReference debug_step_in_fp =
ExternalReference::debug_step_in_fp_address(masm->isolate());
__ cmp(Operand::StaticVariable(debug_step_in_fp), Immediate(0));
__ j(not_equal, &rt_call);
-#endif
// Verified that the constructor is a JSFunction.
// Load the initial map and verify that it is in fact a map.
@@ -181,23 +157,32 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
__ CmpInstanceType(eax, JS_FUNCTION_TYPE);
__ j(equal, &rt_call);
- if (count_constructions) {
+ if (!is_api_function) {
Label allocate;
+ // The code below relies on these assumptions.
+ STATIC_ASSERT(JSFunction::kNoSlackTracking == 0);
+ STATIC_ASSERT(Map::ConstructionCount::kShift +
+ Map::ConstructionCount::kSize == 32);
+ // Check if slack tracking is enabled.
+ __ mov(esi, FieldOperand(eax, Map::kBitField3Offset));
+ __ shr(esi, Map::ConstructionCount::kShift);
+ __ j(zero, &allocate); // JSFunction::kNoSlackTracking
// Decrease generous allocation count.
- __ mov(ecx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
- __ dec_b(FieldOperand(ecx,
- SharedFunctionInfo::kConstructionCountOffset));
- __ j(not_zero, &allocate);
+ __ sub(FieldOperand(eax, Map::kBitField3Offset),
+ Immediate(1 << Map::ConstructionCount::kShift));
+
+ __ cmp(esi, JSFunction::kFinishSlackTracking);
+ __ j(not_equal, &allocate);
__ push(eax);
__ push(edi);
__ push(edi); // constructor
- // The call will replace the stub, so the countdown is only done once.
- __ CallRuntime(Runtime::kFinalizeInstanceSize, 1);
+ __ CallRuntime(Runtime::kHiddenFinalizeInstanceSize, 1);
__ pop(edi);
__ pop(eax);
+ __ xor_(esi, esi); // JSFunction::kNoSlackTracking
__ bind(&allocate);
}
@@ -207,23 +192,37 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// eax: initial map
__ movzx_b(edi, FieldOperand(eax, Map::kInstanceSizeOffset));
__ shl(edi, kPointerSizeLog2);
+ if (create_memento) {
+ __ add(edi, Immediate(AllocationMemento::kSize));
+ }
+
__ Allocate(edi, ebx, edi, no_reg, &rt_call, NO_ALLOCATION_FLAGS);
+
+ Factory* factory = masm->isolate()->factory();
+
// Allocated the JSObject, now initialize the fields.
// eax: initial map
// ebx: JSObject
- // edi: start of next object
+ // edi: start of next object (including memento if create_memento)
__ mov(Operand(ebx, JSObject::kMapOffset), eax);
- Factory* factory = masm->isolate()->factory();
__ mov(ecx, factory->empty_fixed_array());
__ mov(Operand(ebx, JSObject::kPropertiesOffset), ecx);
__ mov(Operand(ebx, JSObject::kElementsOffset), ecx);
// Set extra fields in the newly allocated object.
// eax: initial map
// ebx: JSObject
- // edi: start of next object
- __ lea(ecx, Operand(ebx, JSObject::kHeaderSize));
+ // edi: start of next object (including memento if create_memento)
+ // esi: slack tracking counter (non-API function case)
__ mov(edx, factory->undefined_value());
- if (count_constructions) {
+ __ lea(ecx, Operand(ebx, JSObject::kHeaderSize));
+ if (!is_api_function) {
+ Label no_inobject_slack_tracking;
+
+ // Check if slack tracking is enabled.
+ __ cmp(esi, JSFunction::kNoSlackTracking);
+ __ j(equal, &no_inobject_slack_tracking);
+
+ // Allocate object with a slack.
__ movzx_b(esi,
FieldOperand(eax, Map::kPreAllocatedPropertyFieldsOffset));
__ lea(esi,
@@ -236,8 +235,26 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
}
__ InitializeFieldsWithFiller(ecx, esi, edx);
__ mov(edx, factory->one_pointer_filler_map());
+ // Fill the remaining fields with one pointer filler map.
+
+ __ bind(&no_inobject_slack_tracking);
+ }
+
+ if (create_memento) {
+ __ lea(esi, Operand(edi, -AllocationMemento::kSize));
+ __ InitializeFieldsWithFiller(ecx, esi, edx);
+
+ // Fill in memento fields if necessary.
+ // esi: points to the allocated but uninitialized memento.
+ __ mov(Operand(esi, AllocationMemento::kMapOffset),
+ factory->allocation_memento_map());
+ // Get the cell or undefined.
+ __ mov(edx, Operand(esp, kPointerSize*2));
+ __ mov(Operand(esi, AllocationMemento::kAllocationSiteOffset),
+ edx);
+ } else {
+ __ InitializeFieldsWithFiller(ecx, edi, edx);
}
- __ InitializeFieldsWithFiller(ecx, edi, edx);
// Add the object tag to make the JSObject real, so that we can continue
// and jump into the continuation code at any time from now on. Any
@@ -328,16 +345,49 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// Allocate the new receiver object using the runtime call.
__ bind(&rt_call);
- // Must restore edi (constructor) before calling runtime.
- __ mov(edi, Operand(esp, 0));
+ int offset = 0;
+ if (create_memento) {
+ // Get the cell or allocation site.
+ __ mov(edi, Operand(esp, kPointerSize * 2));
+ __ push(edi);
+ offset = kPointerSize;
+ }
+
+ // Must restore esi (context) and edi (constructor) before calling runtime.
+ __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
+ __ mov(edi, Operand(esp, offset));
// edi: function (constructor)
__ push(edi);
- __ CallRuntime(Runtime::kNewObject, 1);
+ if (create_memento) {
+ __ CallRuntime(Runtime::kHiddenNewObjectWithAllocationSite, 2);
+ } else {
+ __ CallRuntime(Runtime::kHiddenNewObject, 1);
+ }
__ mov(ebx, eax); // store result in ebx
+ // If we ended up using the runtime, and we want a memento, then the
+ // runtime call made it for us, and we shouldn't do create count
+ // increment.
+ Label count_incremented;
+ if (create_memento) {
+ __ jmp(&count_incremented);
+ }
+
// New object allocated.
// ebx: newly allocated object
__ bind(&allocated);
+
+ if (create_memento) {
+ __ mov(ecx, Operand(esp, kPointerSize * 2));
+ __ cmp(ecx, masm->isolate()->factory()->undefined_value());
+ __ j(equal, &count_incremented);
+ // ecx is an AllocationSite. We are creating a memento from it, so we
+ // need to increment the memento create count.
+ __ add(FieldOperand(ecx, AllocationSite::kPretenureCreateCountOffset),
+ Immediate(Smi::FromInt(1)));
+ __ bind(&count_incremented);
+ }
+
// Retrieve the function from the stack.
__ pop(edi);
@@ -369,17 +419,15 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
__ mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
Handle<Code> code =
masm->isolate()->builtins()->HandleApiCallConstruct();
- ParameterCount expected(0);
- __ InvokeCode(code, expected, expected, RelocInfo::CODE_TARGET,
- CALL_FUNCTION, NullCallWrapper(), CALL_AS_METHOD);
+ __ call(code, RelocInfo::CODE_TARGET);
} else {
ParameterCount actual(eax);
__ InvokeFunction(edi, actual, CALL_FUNCTION,
- NullCallWrapper(), CALL_AS_METHOD);
+ NullCallWrapper());
}
// Store offset of return address for deoptimizer.
- if (!is_api_function && !count_constructions) {
+ if (!is_api_function) {
masm->isolate()->heap()->SetConstructStubDeoptPCOffset(masm->pc_offset());
}
@@ -421,13 +469,8 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
}
-void Builtins::Generate_JSConstructStubCountdown(MacroAssembler* masm) {
- Generate_JSConstructStubHelper(masm, false, true);
-}
-
-
void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
- Generate_JSConstructStubHelper(masm, false, false);
+ Generate_JSConstructStubHelper(masm, false, FLAG_pretenuring_call_new);
}
@@ -441,7 +484,7 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
ProfileEntryHookStub::MaybeCallEntryHook(masm);
// Clear the context before we push it when entering the internal frame.
- __ Set(esi, Immediate(0));
+ __ Move(esi, Immediate(0));
{
FrameScope scope(masm, StackFrame::INTERNAL);
@@ -463,7 +506,7 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
// Copy arguments to the stack in a loop.
Label loop, entry;
- __ Set(ecx, Immediate(0));
+ __ Move(ecx, Immediate(0));
__ jmp(&entry);
__ bind(&loop);
__ mov(edx, Operand(ebx, ecx, times_4, 0)); // push parameter from argv
@@ -480,15 +523,13 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
// Invoke the code.
if (is_construct) {
// No type feedback cell is available
- Handle<Object> undefined_sentinel(
- masm->isolate()->heap()->undefined_value(), masm->isolate());
- __ mov(ebx, Immediate(undefined_sentinel));
- CallConstructStub stub(NO_CALL_FUNCTION_FLAGS);
+ __ mov(ebx, masm->isolate()->factory()->undefined_value());
+ CallConstructStub stub(masm->isolate(), NO_CALL_CONSTRUCTOR_FLAGS);
__ CallStub(&stub);
} else {
ParameterCount actual(eax);
__ InvokeFunction(edi, actual, CALL_FUNCTION,
- NullCallWrapper(), CALL_AS_METHOD);
+ NullCallWrapper());
}
// Exit the internal frame. Notice that this also removes the empty.
@@ -509,19 +550,37 @@ void Builtins::Generate_JSConstructEntryTrampoline(MacroAssembler* masm) {
}
-void Builtins::Generate_LazyCompile(MacroAssembler* masm) {
- CallRuntimePassFunction(masm, Runtime::kLazyCompile);
- // Do a tail-call of the compiled function.
- __ lea(eax, FieldOperand(eax, Code::kHeaderSize));
- __ jmp(eax);
+void Builtins::Generate_CompileUnoptimized(MacroAssembler* masm) {
+ CallRuntimePassFunction(masm, Runtime::kHiddenCompileUnoptimized);
+ GenerateTailCallToReturnedCode(masm);
}
-void Builtins::Generate_LazyRecompile(MacroAssembler* masm) {
- CallRuntimePassFunction(masm, Runtime::kLazyRecompile);
- // Do a tail-call of the compiled function.
- __ lea(eax, FieldOperand(eax, Code::kHeaderSize));
- __ jmp(eax);
+
+static void CallCompileOptimized(MacroAssembler* masm, bool concurrent) {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ // Push a copy of the function.
+ __ push(edi);
+ // Function is also the parameter to the runtime call.
+ __ push(edi);
+ // Whether to compile in a background thread.
+ __ Push(masm->isolate()->factory()->ToBoolean(concurrent));
+
+ __ CallRuntime(Runtime::kHiddenCompileOptimized, 2);
+ // Restore receiver.
+ __ pop(edi);
+}
+
+
+void Builtins::Generate_CompileOptimized(MacroAssembler* masm) {
+ CallCompileOptimized(masm, false);
+ GenerateTailCallToReturnedCode(masm);
+}
+
+
+void Builtins::Generate_CompileOptimizedConcurrent(MacroAssembler* masm) {
+ CallCompileOptimized(masm, true);
+ GenerateTailCallToReturnedCode(masm);
}
@@ -611,7 +670,7 @@ static void Generate_NotifyStubFailureHelper(MacroAssembler* masm,
// stubs that tail call the runtime on deopts passing their parameters in
// registers.
__ pushad();
- __ CallRuntime(Runtime::kNotifyStubFailure, 0, save_doubles);
+ __ CallRuntime(Runtime::kHiddenNotifyStubFailure, 0, save_doubles);
__ popad();
// Tear down internal frame.
}
@@ -627,12 +686,7 @@ void Builtins::Generate_NotifyStubFailure(MacroAssembler* masm) {
void Builtins::Generate_NotifyStubFailureSaveDoubles(MacroAssembler* masm) {
- if (Serializer::enabled()) {
- PlatformFeatureScope sse2(SSE2);
- Generate_NotifyStubFailureHelper(masm, kSaveFPRegs);
- } else {
- Generate_NotifyStubFailureHelper(masm, kSaveFPRegs);
- }
+ Generate_NotifyStubFailureHelper(masm, kSaveFPRegs);
}
@@ -643,7 +697,7 @@ static void Generate_NotifyDeoptimizedHelper(MacroAssembler* masm,
// Pass deoptimization type to the runtime system.
__ push(Immediate(Smi::FromInt(static_cast<int>(type))));
- __ CallRuntime(Runtime::kNotifyDeoptimized, 1);
+ __ CallRuntime(Runtime::kHiddenNotifyDeoptimized, 1);
// Tear down internal frame.
}
@@ -710,7 +764,7 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
// 3a. Patch the first argument if necessary when calling a function.
Label shift_arguments;
- __ Set(edx, Immediate(0)); // indicate regular JS_FUNCTION
+ __ Move(edx, Immediate(0)); // indicate regular JS_FUNCTION
{ Label convert_to_object, use_global_receiver, patch_receiver;
// Change context eagerly in case we need the global receiver.
__ mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
@@ -726,7 +780,7 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
1 << SharedFunctionInfo::kNativeBitWithinByte);
__ j(not_equal, &shift_arguments);
- // Compute the receiver in non-strict mode.
+ // Compute the receiver in sloppy mode.
__ mov(ebx, Operand(esp, eax, times_4, 0)); // First argument.
// Call ToObject on the receiver if it is not an object, or use the
@@ -750,7 +804,7 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
__ push(ebx);
__ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
__ mov(ebx, eax);
- __ Set(edx, Immediate(0)); // restore
+ __ Move(edx, Immediate(0)); // restore
__ pop(eax);
__ SmiUntag(eax);
@@ -760,14 +814,9 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
__ mov(edi, Operand(esp, eax, times_4, 1 * kPointerSize));
__ jmp(&patch_receiver);
- // Use the global receiver object from the called function as the
- // receiver.
__ bind(&use_global_receiver);
- const int kGlobalIndex =
- Context::kHeaderSize + Context::GLOBAL_OBJECT_INDEX * kPointerSize;
- __ mov(ebx, FieldOperand(esi, kGlobalIndex));
- __ mov(ebx, FieldOperand(ebx, GlobalObject::kNativeContextOffset));
- __ mov(ebx, FieldOperand(ebx, kGlobalIndex));
+ __ mov(ebx,
+ Operand(esi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
__ mov(ebx, FieldOperand(ebx, GlobalObject::kGlobalReceiverOffset));
__ bind(&patch_receiver);
@@ -778,11 +827,11 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
// 3b. Check for function proxy.
__ bind(&slow);
- __ Set(edx, Immediate(1)); // indicate function proxy
+ __ Move(edx, Immediate(1)); // indicate function proxy
__ CmpInstanceType(ecx, JS_FUNCTION_PROXY_TYPE);
__ j(equal, &shift_arguments);
__ bind(&non_function);
- __ Set(edx, Immediate(2)); // indicate non-function
+ __ Move(edx, Immediate(2)); // indicate non-function
// 3c. Patch the first argument when calling a non-function. The
// CALL_NON_FUNCTION builtin expects the non-function callee as
@@ -810,7 +859,7 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
{ Label function, non_proxy;
__ test(edx, edx);
__ j(zero, &function);
- __ Set(ebx, Immediate(0));
+ __ Move(ebx, Immediate(0));
__ cmp(edx, Immediate(1));
__ j(not_equal, &non_proxy);
@@ -818,13 +867,11 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
__ push(edi); // re-add proxy object as additional argument
__ push(edx);
__ inc(eax);
- __ SetCallKind(ecx, CALL_AS_FUNCTION);
__ GetBuiltinEntry(edx, Builtins::CALL_FUNCTION_PROXY);
__ jmp(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
RelocInfo::CODE_TARGET);
__ bind(&non_proxy);
- __ SetCallKind(ecx, CALL_AS_METHOD);
__ GetBuiltinEntry(edx, Builtins::CALL_NON_FUNCTION);
__ jmp(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
RelocInfo::CODE_TARGET);
@@ -839,14 +886,12 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
FieldOperand(edx, SharedFunctionInfo::kFormalParameterCountOffset));
__ mov(edx, FieldOperand(edi, JSFunction::kCodeEntryOffset));
__ SmiUntag(ebx);
- __ SetCallKind(ecx, CALL_AS_METHOD);
__ cmp(eax, ebx);
__ j(not_equal,
masm->isolate()->builtins()->ArgumentsAdaptorTrampoline());
ParameterCount expected(0);
- __ InvokeCode(edx, expected, expected, JUMP_FUNCTION, NullCallWrapper(),
- CALL_AS_METHOD);
+ __ InvokeCode(edx, expected, expected, JUMP_FUNCTION, NullCallWrapper());
}
@@ -883,7 +928,7 @@ void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
// Out of stack space.
__ push(Operand(ebp, 4 * kPointerSize)); // push this
__ push(eax);
- __ InvokeBuiltin(Builtins::APPLY_OVERFLOW, CALL_FUNCTION);
+ __ InvokeBuiltin(Builtins::STACK_OVERFLOW, CALL_FUNCTION);
__ bind(&okay);
// End of stack check.
@@ -898,7 +943,7 @@ void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
__ mov(ebx, Operand(ebp, kReceiverOffset));
// Check that the function is a JS function (otherwise it must be a proxy).
- Label push_receiver;
+ Label push_receiver, use_global_receiver;
__ mov(edi, Operand(ebp, kFunctionOffset));
__ CmpObjectType(edi, JS_FUNCTION_TYPE, ecx);
__ j(not_equal, &push_receiver);
@@ -908,7 +953,7 @@ void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
// Compute the receiver.
// Do not transform the receiver for strict mode functions.
- Label call_to_object, use_global_receiver;
+ Label call_to_object;
__ mov(ecx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
__ test_b(FieldOperand(ecx, SharedFunctionInfo::kStrictModeByteOffset),
1 << SharedFunctionInfo::kStrictModeBitWithinByte);
@@ -921,7 +966,7 @@ void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
1 << SharedFunctionInfo::kNativeBitWithinByte);
__ j(not_equal, &push_receiver);
- // Compute the receiver in non-strict mode.
+ // Compute the receiver in sloppy mode.
// Call ToObject on the receiver if it is not an object, or use the
// global object if it is null or undefined.
__ JumpIfSmi(ebx, &call_to_object);
@@ -939,13 +984,9 @@ void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
__ mov(ebx, eax);
__ jmp(&push_receiver);
- // Use the current global receiver object as the receiver.
__ bind(&use_global_receiver);
- const int kGlobalOffset =
- Context::kHeaderSize + Context::GLOBAL_OBJECT_INDEX * kPointerSize;
- __ mov(ebx, FieldOperand(esi, kGlobalOffset));
- __ mov(ebx, FieldOperand(ebx, GlobalObject::kNativeContextOffset));
- __ mov(ebx, FieldOperand(ebx, kGlobalOffset));
+ __ mov(ebx,
+ Operand(esi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
__ mov(ebx, FieldOperand(ebx, GlobalObject::kGlobalReceiverOffset));
// Push the receiver.
@@ -979,7 +1020,7 @@ void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
__ cmp(ecx, Operand(ebp, kLimitOffset));
__ j(not_equal, &loop);
- // Invoke the function.
+ // Call the function.
Label call_proxy;
__ mov(eax, ecx);
ParameterCount actual(eax);
@@ -987,18 +1028,16 @@ void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
__ mov(edi, Operand(ebp, kFunctionOffset));
__ CmpObjectType(edi, JS_FUNCTION_TYPE, ecx);
__ j(not_equal, &call_proxy);
- __ InvokeFunction(edi, actual, CALL_FUNCTION,
- NullCallWrapper(), CALL_AS_METHOD);
+ __ InvokeFunction(edi, actual, CALL_FUNCTION, NullCallWrapper());
frame_scope.GenerateLeaveFrame();
__ ret(3 * kPointerSize); // remove this, receiver, and arguments
- // Invoke the function proxy.
+ // Call the function proxy.
__ bind(&call_proxy);
__ push(edi); // add function proxy as last argument
__ inc(eax);
- __ Set(ebx, Immediate(0));
- __ SetCallKind(ecx, CALL_AS_METHOD);
+ __ Move(ebx, Immediate(0));
__ GetBuiltinEntry(edx, Builtins::CALL_FUNCTION_PROXY);
__ call(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
RelocInfo::CODE_TARGET);
@@ -1061,10 +1100,7 @@ void Builtins::Generate_ArrayCode(MacroAssembler* masm) {
// Run the native code for the Array function called as a normal function.
// tail call a stub
- Handle<Object> undefined_sentinel(
- masm->isolate()->heap()->undefined_value(),
- masm->isolate());
- __ mov(ebx, Immediate(undefined_sentinel));
+ __ mov(ebx, masm->isolate()->factory()->undefined_value());
ArrayConstructorStub stub(masm->isolate());
__ TailCallStub(&stub);
}
@@ -1135,7 +1171,7 @@ void Builtins::Generate_StringConstructCode(MacroAssembler* masm) {
// Set properties and elements.
Factory* factory = masm->isolate()->factory();
- __ Set(ecx, Immediate(factory->empty_fixed_array()));
+ __ Move(ecx, Immediate(factory->empty_fixed_array()));
__ mov(FieldOperand(eax, JSObject::kPropertiesOffset), ecx);
__ mov(FieldOperand(eax, JSObject::kElementsOffset), ecx);
@@ -1176,7 +1212,7 @@ void Builtins::Generate_StringConstructCode(MacroAssembler* masm) {
// Load the empty string into ebx, remove the receiver from the
// stack, and jump back to the case where the argument is a string.
__ bind(&no_arguments);
- __ Set(ebx, Immediate(factory->empty_string()));
+ __ Move(ebx, Immediate(factory->empty_string()));
__ pop(ecx);
__ lea(esp, Operand(esp, kPointerSize));
__ push(ecx);
@@ -1195,6 +1231,33 @@ void Builtins::Generate_StringConstructCode(MacroAssembler* masm) {
}
+static void ArgumentsAdaptorStackCheck(MacroAssembler* masm,
+ Label* stack_overflow) {
+ // ----------- S t a t e -------------
+ // -- eax : actual number of arguments
+ // -- ebx : expected number of arguments
+ // -- edi : function (passed through to callee)
+ // -----------------------------------
+ // Check the stack for overflow. We are not trying to catch
+ // interruptions (e.g. debug break and preemption) here, so the "real stack
+ // limit" is checked.
+ ExternalReference real_stack_limit =
+ ExternalReference::address_of_real_stack_limit(masm->isolate());
+ __ mov(edx, Operand::StaticVariable(real_stack_limit));
+ // Make ecx the space we have left. The stack might already be overflowed
+ // here which will cause ecx to become negative.
+ __ mov(ecx, esp);
+ __ sub(ecx, edx);
+ // Make edx the space we need for the array when it is unrolled onto the
+ // stack.
+ __ mov(edx, ebx);
+ __ shl(edx, kPointerSizeLog2);
+ // Check if the arguments will overflow the stack.
+ __ cmp(ecx, edx);
+ __ j(less_equal, stack_overflow); // Signed comparison.
+}
+
+
static void EnterArgumentsAdaptorFrame(MacroAssembler* masm) {
__ push(ebp);
__ mov(ebp, esp);
@@ -1233,14 +1296,17 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- eax : actual number of arguments
// -- ebx : expected number of arguments
- // -- ecx : call kind information
- // -- edx : code entry to call
+ // -- edi : function (passed through to callee)
// -----------------------------------
Label invoke, dont_adapt_arguments;
__ IncrementCounter(masm->isolate()->counters()->arguments_adaptors(), 1);
+ Label stack_overflow;
+ ArgumentsAdaptorStackCheck(masm, &stack_overflow);
+
Label enough, too_few;
+ __ mov(edx, FieldOperand(edi, JSFunction::kCodeEntryOffset));
__ cmp(eax, ebx);
__ j(less, &too_few);
__ cmp(ebx, SharedFunctionInfo::kDontAdaptArgumentsSentinel);
@@ -1313,6 +1379,14 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// -------------------------------------------
__ bind(&dont_adapt_arguments);
__ jmp(edx);
+
+ __ bind(&stack_overflow);
+ {
+ FrameScope frame(masm, StackFrame::MANUAL);
+ EnterArgumentsAdaptorFrame(masm);
+ __ InvokeBuiltin(Builtins::STACK_OVERFLOW, CALL_FUNCTION);
+ __ int3();
+ }
}
@@ -1321,17 +1395,9 @@ void Builtins::Generate_OnStackReplacement(MacroAssembler* masm) {
__ mov(eax, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
{
FrameScope scope(masm, StackFrame::INTERNAL);
- // Lookup and calculate pc offset.
- __ mov(edx, Operand(ebp, StandardFrameConstants::kCallerPCOffset));
- __ mov(ebx, FieldOperand(eax, JSFunction::kSharedFunctionInfoOffset));
- __ sub(edx, Immediate(Code::kHeaderSize - kHeapObjectTag));
- __ sub(edx, FieldOperand(ebx, SharedFunctionInfo::kCodeOffset));
- __ SmiTag(edx);
-
- // Pass both function and pc offset as arguments.
+ // Pass function as argument.
__ push(eax);
- __ push(edx);
- __ CallRuntime(Runtime::kCompileForOnStackReplacement, 2);
+ __ CallRuntime(Runtime::kCompileForOnStackReplacement, 1);
}
Label skip;
@@ -1370,7 +1436,7 @@ void Builtins::Generate_OsrAfterStackCheck(MacroAssembler* masm) {
__ j(above_equal, &ok, Label::kNear);
{
FrameScope scope(masm, StackFrame::INTERNAL);
- __ CallRuntime(Runtime::kStackGuard, 0);
+ __ CallRuntime(Runtime::kHiddenStackGuard, 0);
}
__ jmp(masm->isolate()->builtins()->OnStackReplacement(),
RelocInfo::CODE_TARGET);
diff --git a/chromium/v8/src/ia32/code-stubs-ia32.cc b/chromium/v8/src/ia32/code-stubs-ia32.cc
index 04818149202..e61d22138f1 100644
--- a/chromium/v8/src/ia32/code-stubs-ia32.cc
+++ b/chromium/v8/src/ia32/code-stubs-ia32.cc
@@ -1,61 +1,45 @@
// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
#if V8_TARGET_ARCH_IA32
-#include "bootstrapper.h"
-#include "code-stubs.h"
-#include "isolate.h"
-#include "jsregexp.h"
-#include "regexp-macro-assembler.h"
-#include "runtime.h"
-#include "stub-cache.h"
-#include "codegen.h"
-#include "runtime.h"
+#include "src/bootstrapper.h"
+#include "src/code-stubs.h"
+#include "src/isolate.h"
+#include "src/jsregexp.h"
+#include "src/regexp-macro-assembler.h"
+#include "src/runtime.h"
+#include "src/stub-cache.h"
+#include "src/codegen.h"
+#include "src/runtime.h"
namespace v8 {
namespace internal {
void FastNewClosureStub::InitializeInterfaceDescriptor(
- Isolate* isolate,
CodeStubInterfaceDescriptor* descriptor) {
static Register registers[] = { ebx };
descriptor->register_param_count_ = 1;
descriptor->register_params_ = registers;
descriptor->deoptimization_handler_ =
- Runtime::FunctionForId(Runtime::kNewClosureFromStubFailure)->entry;
+ Runtime::FunctionForId(Runtime::kHiddenNewClosureFromStubFailure)->entry;
+}
+
+
+void FastNewContextStub::InitializeInterfaceDescriptor(
+ CodeStubInterfaceDescriptor* descriptor) {
+ static Register registers[] = { edi };
+ descriptor->register_param_count_ = 1;
+ descriptor->register_params_ = registers;
+ descriptor->deoptimization_handler_ = NULL;
}
void ToNumberStub::InitializeInterfaceDescriptor(
- Isolate* isolate,
CodeStubInterfaceDescriptor* descriptor) {
static Register registers[] = { eax };
descriptor->register_param_count_ = 1;
@@ -65,50 +49,51 @@ void ToNumberStub::InitializeInterfaceDescriptor(
void NumberToStringStub::InitializeInterfaceDescriptor(
- Isolate* isolate,
CodeStubInterfaceDescriptor* descriptor) {
static Register registers[] = { eax };
descriptor->register_param_count_ = 1;
descriptor->register_params_ = registers;
descriptor->deoptimization_handler_ =
- Runtime::FunctionForId(Runtime::kNumberToString)->entry;
+ Runtime::FunctionForId(Runtime::kHiddenNumberToString)->entry;
}
void FastCloneShallowArrayStub::InitializeInterfaceDescriptor(
- Isolate* isolate,
CodeStubInterfaceDescriptor* descriptor) {
static Register registers[] = { eax, ebx, ecx };
descriptor->register_param_count_ = 3;
descriptor->register_params_ = registers;
+ static Representation representations[] = {
+ Representation::Tagged(),
+ Representation::Smi(),
+ Representation::Tagged() };
+ descriptor->register_param_representations_ = representations;
descriptor->deoptimization_handler_ =
- Runtime::FunctionForId(Runtime::kCreateArrayLiteralStubBailout)->entry;
+ Runtime::FunctionForId(
+ Runtime::kHiddenCreateArrayLiteralStubBailout)->entry;
}
void FastCloneShallowObjectStub::InitializeInterfaceDescriptor(
- Isolate* isolate,
CodeStubInterfaceDescriptor* descriptor) {
static Register registers[] = { eax, ebx, ecx, edx };
descriptor->register_param_count_ = 4;
descriptor->register_params_ = registers;
descriptor->deoptimization_handler_ =
- Runtime::FunctionForId(Runtime::kCreateObjectLiteral)->entry;
+ Runtime::FunctionForId(Runtime::kHiddenCreateObjectLiteral)->entry;
}
void CreateAllocationSiteStub::InitializeInterfaceDescriptor(
- Isolate* isolate,
CodeStubInterfaceDescriptor* descriptor) {
- static Register registers[] = { ebx };
- descriptor->register_param_count_ = 1;
+ static Register registers[] = { ebx, edx };
+ descriptor->register_param_count_ = 2;
descriptor->register_params_ = registers;
descriptor->deoptimization_handler_ = NULL;
}
void KeyedLoadFastElementStub::InitializeInterfaceDescriptor(
- Isolate* isolate,
CodeStubInterfaceDescriptor* descriptor) {
static Register registers[] = { edx, ecx };
descriptor->register_param_count_ = 2;
@@ -119,7 +104,6 @@ void KeyedLoadFastElementStub::InitializeInterfaceDescriptor(
void KeyedLoadDictionaryElementStub::InitializeInterfaceDescriptor(
- Isolate* isolate,
CodeStubInterfaceDescriptor* descriptor) {
static Register registers[] = { edx, ecx };
descriptor->register_param_count_ = 2;
@@ -129,8 +113,27 @@ void KeyedLoadDictionaryElementStub::InitializeInterfaceDescriptor(
}
+void RegExpConstructResultStub::InitializeInterfaceDescriptor(
+ CodeStubInterfaceDescriptor* descriptor) {
+ static Register registers[] = { ecx, ebx, eax };
+ descriptor->register_param_count_ = 3;
+ descriptor->register_params_ = registers;
+ descriptor->deoptimization_handler_ =
+ Runtime::FunctionForId(Runtime::kHiddenRegExpConstructResult)->entry;
+}
+
+
+void KeyedLoadGenericElementStub::InitializeInterfaceDescriptor(
+ CodeStubInterfaceDescriptor* descriptor) {
+ static Register registers[] = { edx, ecx };
+ descriptor->register_param_count_ = 2;
+ descriptor->register_params_ = registers;
+ descriptor->deoptimization_handler_ =
+ Runtime::FunctionForId(Runtime::kKeyedGetProperty)->entry;
+}
+
+
void LoadFieldStub::InitializeInterfaceDescriptor(
- Isolate* isolate,
CodeStubInterfaceDescriptor* descriptor) {
static Register registers[] = { edx };
descriptor->register_param_count_ = 1;
@@ -140,7 +143,6 @@ void LoadFieldStub::InitializeInterfaceDescriptor(
void KeyedLoadFieldStub::InitializeInterfaceDescriptor(
- Isolate* isolate,
CodeStubInterfaceDescriptor* descriptor) {
static Register registers[] = { edx };
descriptor->register_param_count_ = 1;
@@ -149,21 +151,25 @@ void KeyedLoadFieldStub::InitializeInterfaceDescriptor(
}
-void KeyedArrayCallStub::InitializeInterfaceDescriptor(
- Isolate* isolate,
+void StringLengthStub::InitializeInterfaceDescriptor(
CodeStubInterfaceDescriptor* descriptor) {
- static Register registers[] = { ecx };
- descriptor->register_param_count_ = 1;
+ static Register registers[] = { edx, ecx };
+ descriptor->register_param_count_ = 2;
descriptor->register_params_ = registers;
- descriptor->continuation_type_ = TAIL_CALL_CONTINUATION;
- descriptor->handler_arguments_mode_ = PASS_ARGUMENTS;
- descriptor->deoptimization_handler_ =
- FUNCTION_ADDR(KeyedCallIC_MissFromStubFailure);
+ descriptor->deoptimization_handler_ = NULL;
+}
+
+
+void KeyedStringLengthStub::InitializeInterfaceDescriptor(
+ CodeStubInterfaceDescriptor* descriptor) {
+ static Register registers[] = { edx, ecx };
+ descriptor->register_param_count_ = 2;
+ descriptor->register_params_ = registers;
+ descriptor->deoptimization_handler_ = NULL;
}
void KeyedStoreFastElementStub::InitializeInterfaceDescriptor(
- Isolate* isolate,
CodeStubInterfaceDescriptor* descriptor) {
static Register registers[] = { edx, ecx, eax };
descriptor->register_param_count_ = 3;
@@ -174,7 +180,6 @@ void KeyedStoreFastElementStub::InitializeInterfaceDescriptor(
void TransitionElementsKindStub::InitializeInterfaceDescriptor(
- Isolate* isolate,
CodeStubInterfaceDescriptor* descriptor) {
static Register registers[] = { eax, ebx };
descriptor->register_param_count_ = 2;
@@ -191,7 +196,7 @@ static void InitializeArrayConstructorDescriptor(
// register state
// eax -- number of arguments
// edi -- function
- // ebx -- type info cell with elements kind
+ // ebx -- allocation site with elements kind
static Register registers_variable_args[] = { edi, ebx, eax };
static Register registers_no_args[] = { edi, ebx };
@@ -204,17 +209,21 @@ static void InitializeArrayConstructorDescriptor(
descriptor->stack_parameter_count_ = eax;
descriptor->register_param_count_ = 3;
descriptor->register_params_ = registers_variable_args;
+ static Representation representations[] = {
+ Representation::Tagged(),
+ Representation::Tagged(),
+ Representation::Integer32() };
+ descriptor->register_param_representations_ = representations;
}
descriptor->hint_stack_parameter_count_ = constant_stack_parameter_count;
descriptor->function_mode_ = JS_FUNCTION_STUB_MODE;
descriptor->deoptimization_handler_ =
- Runtime::FunctionForId(Runtime::kArrayConstructor)->entry;
+ Runtime::FunctionForId(Runtime::kHiddenArrayConstructor)->entry;
}
static void InitializeInternalArrayConstructorDescriptor(
- Isolate* isolate,
CodeStubInterfaceDescriptor* descriptor,
int constant_stack_parameter_count) {
// register state
@@ -232,59 +241,56 @@ static void InitializeInternalArrayConstructorDescriptor(
descriptor->stack_parameter_count_ = eax;
descriptor->register_param_count_ = 2;
descriptor->register_params_ = registers_variable_args;
+ static Representation representations[] = {
+ Representation::Tagged(),
+ Representation::Integer32() };
+ descriptor->register_param_representations_ = representations;
}
descriptor->hint_stack_parameter_count_ = constant_stack_parameter_count;
descriptor->function_mode_ = JS_FUNCTION_STUB_MODE;
descriptor->deoptimization_handler_ =
- Runtime::FunctionForId(Runtime::kInternalArrayConstructor)->entry;
+ Runtime::FunctionForId(Runtime::kHiddenInternalArrayConstructor)->entry;
}
void ArrayNoArgumentConstructorStub::InitializeInterfaceDescriptor(
- Isolate* isolate,
CodeStubInterfaceDescriptor* descriptor) {
- InitializeArrayConstructorDescriptor(isolate, descriptor, 0);
+ InitializeArrayConstructorDescriptor(isolate(), descriptor, 0);
}
void ArraySingleArgumentConstructorStub::InitializeInterfaceDescriptor(
- Isolate* isolate,
CodeStubInterfaceDescriptor* descriptor) {
- InitializeArrayConstructorDescriptor(isolate, descriptor, 1);
+ InitializeArrayConstructorDescriptor(isolate(), descriptor, 1);
}
void ArrayNArgumentsConstructorStub::InitializeInterfaceDescriptor(
- Isolate* isolate,
CodeStubInterfaceDescriptor* descriptor) {
- InitializeArrayConstructorDescriptor(isolate, descriptor, -1);
+ InitializeArrayConstructorDescriptor(isolate(), descriptor, -1);
}
void InternalArrayNoArgumentConstructorStub::InitializeInterfaceDescriptor(
- Isolate* isolate,
CodeStubInterfaceDescriptor* descriptor) {
- InitializeInternalArrayConstructorDescriptor(isolate, descriptor, 0);
+ InitializeInternalArrayConstructorDescriptor(descriptor, 0);
}
void InternalArraySingleArgumentConstructorStub::InitializeInterfaceDescriptor(
- Isolate* isolate,
CodeStubInterfaceDescriptor* descriptor) {
- InitializeInternalArrayConstructorDescriptor(isolate, descriptor, 1);
+ InitializeInternalArrayConstructorDescriptor(descriptor, 1);
}
void InternalArrayNArgumentsConstructorStub::InitializeInterfaceDescriptor(
- Isolate* isolate,
CodeStubInterfaceDescriptor* descriptor) {
- InitializeInternalArrayConstructorDescriptor(isolate, descriptor, -1);
+ InitializeInternalArrayConstructorDescriptor(descriptor, -1);
}
void CompareNilICStub::InitializeInterfaceDescriptor(
- Isolate* isolate,
CodeStubInterfaceDescriptor* descriptor) {
static Register registers[] = { eax };
descriptor->register_param_count_ = 1;
@@ -292,11 +298,10 @@ void CompareNilICStub::InitializeInterfaceDescriptor(
descriptor->deoptimization_handler_ =
FUNCTION_ADDR(CompareNilIC_Miss);
descriptor->SetMissHandler(
- ExternalReference(IC_Utility(IC::kCompareNilIC_Miss), isolate));
+ ExternalReference(IC_Utility(IC::kCompareNilIC_Miss), isolate()));
}
void ToBooleanStub::InitializeInterfaceDescriptor(
- Isolate* isolate,
CodeStubInterfaceDescriptor* descriptor) {
static Register registers[] = { eax };
descriptor->register_param_count_ = 1;
@@ -304,12 +309,11 @@ void ToBooleanStub::InitializeInterfaceDescriptor(
descriptor->deoptimization_handler_ =
FUNCTION_ADDR(ToBooleanIC_Miss);
descriptor->SetMissHandler(
- ExternalReference(IC_Utility(IC::kToBooleanIC_Miss), isolate));
+ ExternalReference(IC_Utility(IC::kToBooleanIC_Miss), isolate()));
}
void StoreGlobalStub::InitializeInterfaceDescriptor(
- Isolate* isolate,
CodeStubInterfaceDescriptor* descriptor) {
static Register registers[] = { edx, ecx, eax };
descriptor->register_param_count_ = 3;
@@ -320,7 +324,6 @@ void StoreGlobalStub::InitializeInterfaceDescriptor(
void ElementsTransitionAndStoreStub::InitializeInterfaceDescriptor(
- Isolate* isolate,
CodeStubInterfaceDescriptor* descriptor) {
static Register registers[] = { eax, ebx, ecx, edx };
descriptor->register_param_count_ = 4;
@@ -331,25 +334,117 @@ void ElementsTransitionAndStoreStub::InitializeInterfaceDescriptor(
void BinaryOpICStub::InitializeInterfaceDescriptor(
- Isolate* isolate,
CodeStubInterfaceDescriptor* descriptor) {
static Register registers[] = { edx, eax };
descriptor->register_param_count_ = 2;
descriptor->register_params_ = registers;
descriptor->deoptimization_handler_ = FUNCTION_ADDR(BinaryOpIC_Miss);
descriptor->SetMissHandler(
- ExternalReference(IC_Utility(IC::kBinaryOpIC_Miss), isolate));
+ ExternalReference(IC_Utility(IC::kBinaryOpIC_Miss), isolate()));
}
-void NewStringAddStub::InitializeInterfaceDescriptor(
- Isolate* isolate,
+void BinaryOpWithAllocationSiteStub::InitializeInterfaceDescriptor(
+ CodeStubInterfaceDescriptor* descriptor) {
+ static Register registers[] = { ecx, edx, eax };
+ descriptor->register_param_count_ = 3;
+ descriptor->register_params_ = registers;
+ descriptor->deoptimization_handler_ =
+ FUNCTION_ADDR(BinaryOpIC_MissWithAllocationSite);
+}
+
+
+void StringAddStub::InitializeInterfaceDescriptor(
CodeStubInterfaceDescriptor* descriptor) {
static Register registers[] = { edx, eax };
descriptor->register_param_count_ = 2;
descriptor->register_params_ = registers;
descriptor->deoptimization_handler_ =
- Runtime::FunctionForId(Runtime::kStringAdd)->entry;
+ Runtime::FunctionForId(Runtime::kHiddenStringAdd)->entry;
+}
+
+
+void CallDescriptors::InitializeForIsolate(Isolate* isolate) {
+ {
+ CallInterfaceDescriptor* descriptor =
+ isolate->call_descriptor(Isolate::ArgumentAdaptorCall);
+ static Register registers[] = { edi, // JSFunction
+ esi, // context
+ eax, // actual number of arguments
+ ebx, // expected number of arguments
+ };
+ static Representation representations[] = {
+ Representation::Tagged(), // JSFunction
+ Representation::Tagged(), // context
+ Representation::Integer32(), // actual number of arguments
+ Representation::Integer32(), // expected number of arguments
+ };
+ descriptor->register_param_count_ = 4;
+ descriptor->register_params_ = registers;
+ descriptor->param_representations_ = representations;
+ }
+ {
+ CallInterfaceDescriptor* descriptor =
+ isolate->call_descriptor(Isolate::KeyedCall);
+ static Register registers[] = { esi, // context
+ ecx, // key
+ };
+ static Representation representations[] = {
+ Representation::Tagged(), // context
+ Representation::Tagged(), // key
+ };
+ descriptor->register_param_count_ = 2;
+ descriptor->register_params_ = registers;
+ descriptor->param_representations_ = representations;
+ }
+ {
+ CallInterfaceDescriptor* descriptor =
+ isolate->call_descriptor(Isolate::NamedCall);
+ static Register registers[] = { esi, // context
+ ecx, // name
+ };
+ static Representation representations[] = {
+ Representation::Tagged(), // context
+ Representation::Tagged(), // name
+ };
+ descriptor->register_param_count_ = 2;
+ descriptor->register_params_ = registers;
+ descriptor->param_representations_ = representations;
+ }
+ {
+ CallInterfaceDescriptor* descriptor =
+ isolate->call_descriptor(Isolate::CallHandler);
+ static Register registers[] = { esi, // context
+ edx, // receiver
+ };
+ static Representation representations[] = {
+ Representation::Tagged(), // context
+ Representation::Tagged(), // receiver
+ };
+ descriptor->register_param_count_ = 2;
+ descriptor->register_params_ = registers;
+ descriptor->param_representations_ = representations;
+ }
+ {
+ CallInterfaceDescriptor* descriptor =
+ isolate->call_descriptor(Isolate::ApiFunctionCall);
+ static Register registers[] = { eax, // callee
+ ebx, // call_data
+ ecx, // holder
+ edx, // api_function_address
+ esi, // context
+ };
+ static Representation representations[] = {
+ Representation::Tagged(), // callee
+ Representation::Tagged(), // call_data
+ Representation::Tagged(), // holder
+ Representation::External(), // api_function_address
+ Representation::Tagged(), // context
+ };
+ descriptor->register_param_count_ = 5;
+ descriptor->register_params_ = registers;
+ descriptor->param_representations_ = representations;
+ }
}
@@ -358,10 +453,9 @@ void NewStringAddStub::InitializeInterfaceDescriptor(
void HydrogenCodeStub::GenerateLightweightMiss(MacroAssembler* masm) {
// Update the static counter each time a new code stub is generated.
- Isolate* isolate = masm->isolate();
- isolate->counters()->code_stubs()->Increment();
+ isolate()->counters()->code_stubs()->Increment();
- CodeStubInterfaceDescriptor* descriptor = GetInterfaceDescriptor(isolate);
+ CodeStubInterfaceDescriptor* descriptor = GetInterfaceDescriptor();
int param_count = descriptor->register_param_count_;
{
// Call the runtime system in a fresh internal frame.
@@ -380,127 +474,14 @@ void HydrogenCodeStub::GenerateLightweightMiss(MacroAssembler* masm) {
}
-void FastNewContextStub::Generate(MacroAssembler* masm) {
- // Try to allocate the context in new space.
- Label gc;
- int length = slots_ + Context::MIN_CONTEXT_SLOTS;
- __ Allocate((length * kPointerSize) + FixedArray::kHeaderSize,
- eax, ebx, ecx, &gc, TAG_OBJECT);
-
- // Get the function from the stack.
- __ mov(ecx, Operand(esp, 1 * kPointerSize));
-
- // Set up the object header.
- Factory* factory = masm->isolate()->factory();
- __ mov(FieldOperand(eax, HeapObject::kMapOffset),
- factory->function_context_map());
- __ mov(FieldOperand(eax, Context::kLengthOffset),
- Immediate(Smi::FromInt(length)));
-
- // Set up the fixed slots.
- __ Set(ebx, Immediate(0)); // Set to NULL.
- __ mov(Operand(eax, Context::SlotOffset(Context::CLOSURE_INDEX)), ecx);
- __ mov(Operand(eax, Context::SlotOffset(Context::PREVIOUS_INDEX)), esi);
- __ mov(Operand(eax, Context::SlotOffset(Context::EXTENSION_INDEX)), ebx);
-
- // Copy the global object from the previous context.
- __ mov(ebx, Operand(esi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
- __ mov(Operand(eax, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)), ebx);
-
- // Initialize the rest of the slots to undefined.
- __ mov(ebx, factory->undefined_value());
- for (int i = Context::MIN_CONTEXT_SLOTS; i < length; i++) {
- __ mov(Operand(eax, Context::SlotOffset(i)), ebx);
- }
-
- // Return and remove the on-stack parameter.
- __ mov(esi, eax);
- __ ret(1 * kPointerSize);
-
- // Need to collect. Call into runtime system.
- __ bind(&gc);
- __ TailCallRuntime(Runtime::kNewFunctionContext, 1, 1);
-}
-
-
-void FastNewBlockContextStub::Generate(MacroAssembler* masm) {
- // Stack layout on entry:
- //
- // [esp + (1 * kPointerSize)]: function
- // [esp + (2 * kPointerSize)]: serialized scope info
-
- // Try to allocate the context in new space.
- Label gc;
- int length = slots_ + Context::MIN_CONTEXT_SLOTS;
- __ Allocate(FixedArray::SizeFor(length), eax, ebx, ecx, &gc, TAG_OBJECT);
-
- // Get the function or sentinel from the stack.
- __ mov(ecx, Operand(esp, 1 * kPointerSize));
-
- // Get the serialized scope info from the stack.
- __ mov(ebx, Operand(esp, 2 * kPointerSize));
-
- // Set up the object header.
- Factory* factory = masm->isolate()->factory();
- __ mov(FieldOperand(eax, HeapObject::kMapOffset),
- factory->block_context_map());
- __ mov(FieldOperand(eax, Context::kLengthOffset),
- Immediate(Smi::FromInt(length)));
-
- // If this block context is nested in the native context we get a smi
- // sentinel instead of a function. The block context should get the
- // canonical empty function of the native context as its closure which
- // we still have to look up.
- Label after_sentinel;
- __ JumpIfNotSmi(ecx, &after_sentinel, Label::kNear);
- if (FLAG_debug_code) {
- __ cmp(ecx, 0);
- __ Assert(equal, kExpected0AsASmiSentinel);
- }
- __ mov(ecx, GlobalObjectOperand());
- __ mov(ecx, FieldOperand(ecx, GlobalObject::kNativeContextOffset));
- __ mov(ecx, ContextOperand(ecx, Context::CLOSURE_INDEX));
- __ bind(&after_sentinel);
-
- // Set up the fixed slots.
- __ mov(ContextOperand(eax, Context::CLOSURE_INDEX), ecx);
- __ mov(ContextOperand(eax, Context::PREVIOUS_INDEX), esi);
- __ mov(ContextOperand(eax, Context::EXTENSION_INDEX), ebx);
-
- // Copy the global object from the previous context.
- __ mov(ebx, ContextOperand(esi, Context::GLOBAL_OBJECT_INDEX));
- __ mov(ContextOperand(eax, Context::GLOBAL_OBJECT_INDEX), ebx);
-
- // Initialize the rest of the slots to the hole value.
- if (slots_ == 1) {
- __ mov(ContextOperand(eax, Context::MIN_CONTEXT_SLOTS),
- factory->the_hole_value());
- } else {
- __ mov(ebx, factory->the_hole_value());
- for (int i = 0; i < slots_; i++) {
- __ mov(ContextOperand(eax, i + Context::MIN_CONTEXT_SLOTS), ebx);
- }
- }
-
- // Return and remove the on-stack parameters.
- __ mov(esi, eax);
- __ ret(2 * kPointerSize);
-
- // Need to collect. Call into runtime system.
- __ bind(&gc);
- __ TailCallRuntime(Runtime::kPushBlockContext, 2, 1);
-}
-
-
void StoreBufferOverflowStub::Generate(MacroAssembler* masm) {
// We don't allow a GC during a store buffer overflow so there is no need to
// store the registers in any particular way, but we do have to store and
// restore them.
__ pushad();
if (save_doubles_ == kSaveFPRegs) {
- CpuFeatureScope scope(masm, SSE2);
- __ sub(esp, Immediate(kDoubleSize * XMMRegister::kNumRegisters));
- for (int i = 0; i < XMMRegister::kNumRegisters; i++) {
+ __ sub(esp, Immediate(kDoubleSize * XMMRegister::kMaxNumRegisters));
+ for (int i = 0; i < XMMRegister::kMaxNumRegisters; i++) {
XMMRegister reg = XMMRegister::from_code(i);
__ movsd(Operand(esp, i * kDoubleSize), reg);
}
@@ -510,17 +491,16 @@ void StoreBufferOverflowStub::Generate(MacroAssembler* masm) {
AllowExternalCallThatCantCauseGC scope(masm);
__ PrepareCallCFunction(argument_count, ecx);
__ mov(Operand(esp, 0 * kPointerSize),
- Immediate(ExternalReference::isolate_address(masm->isolate())));
+ Immediate(ExternalReference::isolate_address(isolate())));
__ CallCFunction(
- ExternalReference::store_buffer_overflow_function(masm->isolate()),
+ ExternalReference::store_buffer_overflow_function(isolate()),
argument_count);
if (save_doubles_ == kSaveFPRegs) {
- CpuFeatureScope scope(masm, SSE2);
- for (int i = 0; i < XMMRegister::kNumRegisters; i++) {
+ for (int i = 0; i < XMMRegister::kMaxNumRegisters; i++) {
XMMRegister reg = XMMRegister::from_code(i);
__ movsd(reg, Operand(esp, i * kDoubleSize));
}
- __ add(esp, Immediate(kDoubleSize * XMMRegister::kNumRegisters));
+ __ add(esp, Immediate(kDoubleSize * XMMRegister::kMaxNumRegisters));
}
__ popad();
__ ret(0);
@@ -649,15 +629,7 @@ void DoubleToIStub::Generate(MacroAssembler* masm) {
__ shrd(result_reg, scratch1);
__ shr_cl(result_reg);
__ test(ecx, Immediate(32));
- if (CpuFeatures::IsSupported(CMOV)) {
- CpuFeatureScope use_cmov(masm, CMOV);
- __ cmov(not_equal, scratch1, result_reg);
- } else {
- Label skip_mov;
- __ j(equal, &skip_mov, Label::kNear);
- __ mov(scratch1, result_reg);
- __ bind(&skip_mov);
- }
+ __ cmov(not_equal, scratch1, result_reg);
}
// If the double was negative, negate the integer result.
@@ -669,15 +641,7 @@ void DoubleToIStub::Generate(MacroAssembler* masm) {
} else {
__ cmp(exponent_operand, Immediate(0));
}
- if (CpuFeatures::IsSupported(CMOV)) {
- CpuFeatureScope use_cmov(masm, CMOV);
__ cmov(greater, result_reg, scratch1);
- } else {
- Label skip_mov;
- __ j(less_equal, &skip_mov, Label::kNear);
- __ mov(result_reg, scratch1);
- __ bind(&skip_mov);
- }
// Restore registers
__ bind(&done);
@@ -695,316 +659,6 @@ void DoubleToIStub::Generate(MacroAssembler* masm) {
}
-void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
- // TAGGED case:
- // Input:
- // esp[4]: tagged number input argument (should be number).
- // esp[0]: return address.
- // Output:
- // eax: tagged double result.
- // UNTAGGED case:
- // Input::
- // esp[0]: return address.
- // xmm1: untagged double input argument
- // Output:
- // xmm1: untagged double result.
-
- Label runtime_call;
- Label runtime_call_clear_stack;
- Label skip_cache;
- const bool tagged = (argument_type_ == TAGGED);
- if (tagged) {
- // Test that eax is a number.
- Label input_not_smi;
- Label loaded;
- __ mov(eax, Operand(esp, kPointerSize));
- __ JumpIfNotSmi(eax, &input_not_smi, Label::kNear);
- // Input is a smi. Untag and load it onto the FPU stack.
- // Then load the low and high words of the double into ebx, edx.
- STATIC_ASSERT(kSmiTagSize == 1);
- __ sar(eax, 1);
- __ sub(esp, Immediate(2 * kPointerSize));
- __ mov(Operand(esp, 0), eax);
- __ fild_s(Operand(esp, 0));
- __ fst_d(Operand(esp, 0));
- __ pop(edx);
- __ pop(ebx);
- __ jmp(&loaded, Label::kNear);
- __ bind(&input_not_smi);
- // Check if input is a HeapNumber.
- __ mov(ebx, FieldOperand(eax, HeapObject::kMapOffset));
- Factory* factory = masm->isolate()->factory();
- __ cmp(ebx, Immediate(factory->heap_number_map()));
- __ j(not_equal, &runtime_call);
- // Input is a HeapNumber. Push it on the FPU stack and load its
- // low and high words into ebx, edx.
- __ fld_d(FieldOperand(eax, HeapNumber::kValueOffset));
- __ mov(edx, FieldOperand(eax, HeapNumber::kExponentOffset));
- __ mov(ebx, FieldOperand(eax, HeapNumber::kMantissaOffset));
-
- __ bind(&loaded);
- } else { // UNTAGGED.
- CpuFeatureScope scope(masm, SSE2);
- if (CpuFeatures::IsSupported(SSE4_1)) {
- CpuFeatureScope sse4_scope(masm, SSE4_1);
- __ pextrd(edx, xmm1, 0x1); // copy xmm1[63..32] to edx.
- } else {
- __ pshufd(xmm0, xmm1, 0x1);
- __ movd(edx, xmm0);
- }
- __ movd(ebx, xmm1);
- }
-
- // ST[0] or xmm1 == double value
- // ebx = low 32 bits of double value
- // edx = high 32 bits of double value
- // Compute hash (the shifts are arithmetic):
- // h = (low ^ high); h ^= h >> 16; h ^= h >> 8; h = h & (cacheSize - 1);
- __ mov(ecx, ebx);
- __ xor_(ecx, edx);
- __ mov(eax, ecx);
- __ sar(eax, 16);
- __ xor_(ecx, eax);
- __ mov(eax, ecx);
- __ sar(eax, 8);
- __ xor_(ecx, eax);
- ASSERT(IsPowerOf2(TranscendentalCache::SubCache::kCacheSize));
- __ and_(ecx,
- Immediate(TranscendentalCache::SubCache::kCacheSize - 1));
-
- // ST[0] or xmm1 == double value.
- // ebx = low 32 bits of double value.
- // edx = high 32 bits of double value.
- // ecx = TranscendentalCache::hash(double value).
- ExternalReference cache_array =
- ExternalReference::transcendental_cache_array_address(masm->isolate());
- __ mov(eax, Immediate(cache_array));
- int cache_array_index =
- type_ * sizeof(masm->isolate()->transcendental_cache()->caches_[0]);
- __ mov(eax, Operand(eax, cache_array_index));
- // Eax points to the cache for the type type_.
- // If NULL, the cache hasn't been initialized yet, so go through runtime.
- __ test(eax, eax);
- __ j(zero, &runtime_call_clear_stack);
-#ifdef DEBUG
- // Check that the layout of cache elements match expectations.
- { TranscendentalCache::SubCache::Element test_elem[2];
- char* elem_start = reinterpret_cast<char*>(&test_elem[0]);
- char* elem2_start = reinterpret_cast<char*>(&test_elem[1]);
- char* elem_in0 = reinterpret_cast<char*>(&(test_elem[0].in[0]));
- char* elem_in1 = reinterpret_cast<char*>(&(test_elem[0].in[1]));
- char* elem_out = reinterpret_cast<char*>(&(test_elem[0].output));
- CHECK_EQ(12, elem2_start - elem_start); // Two uint_32's and a pointer.
- CHECK_EQ(0, elem_in0 - elem_start);
- CHECK_EQ(kIntSize, elem_in1 - elem_start);
- CHECK_EQ(2 * kIntSize, elem_out - elem_start);
- }
-#endif
- // Find the address of the ecx'th entry in the cache, i.e., &eax[ecx*12].
- __ lea(ecx, Operand(ecx, ecx, times_2, 0));
- __ lea(ecx, Operand(eax, ecx, times_4, 0));
- // Check if cache matches: Double value is stored in uint32_t[2] array.
- Label cache_miss;
- __ cmp(ebx, Operand(ecx, 0));
- __ j(not_equal, &cache_miss, Label::kNear);
- __ cmp(edx, Operand(ecx, kIntSize));
- __ j(not_equal, &cache_miss, Label::kNear);
- // Cache hit!
- Counters* counters = masm->isolate()->counters();
- __ IncrementCounter(counters->transcendental_cache_hit(), 1);
- __ mov(eax, Operand(ecx, 2 * kIntSize));
- if (tagged) {
- __ fstp(0);
- __ ret(kPointerSize);
- } else { // UNTAGGED.
- CpuFeatureScope scope(masm, SSE2);
- __ movsd(xmm1, FieldOperand(eax, HeapNumber::kValueOffset));
- __ Ret();
- }
-
- __ bind(&cache_miss);
- __ IncrementCounter(counters->transcendental_cache_miss(), 1);
- // Update cache with new value.
- // We are short on registers, so use no_reg as scratch.
- // This gives slightly larger code.
- if (tagged) {
- __ AllocateHeapNumber(eax, edi, no_reg, &runtime_call_clear_stack);
- } else { // UNTAGGED.
- CpuFeatureScope scope(masm, SSE2);
- __ AllocateHeapNumber(eax, edi, no_reg, &skip_cache);
- __ sub(esp, Immediate(kDoubleSize));
- __ movsd(Operand(esp, 0), xmm1);
- __ fld_d(Operand(esp, 0));
- __ add(esp, Immediate(kDoubleSize));
- }
- GenerateOperation(masm, type_);
- __ mov(Operand(ecx, 0), ebx);
- __ mov(Operand(ecx, kIntSize), edx);
- __ mov(Operand(ecx, 2 * kIntSize), eax);
- __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
- if (tagged) {
- __ ret(kPointerSize);
- } else { // UNTAGGED.
- CpuFeatureScope scope(masm, SSE2);
- __ movsd(xmm1, FieldOperand(eax, HeapNumber::kValueOffset));
- __ Ret();
-
- // Skip cache and return answer directly, only in untagged case.
- __ bind(&skip_cache);
- __ sub(esp, Immediate(kDoubleSize));
- __ movsd(Operand(esp, 0), xmm1);
- __ fld_d(Operand(esp, 0));
- GenerateOperation(masm, type_);
- __ fstp_d(Operand(esp, 0));
- __ movsd(xmm1, Operand(esp, 0));
- __ add(esp, Immediate(kDoubleSize));
- // We return the value in xmm1 without adding it to the cache, but
- // we cause a scavenging GC so that future allocations will succeed.
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- // Allocate an unused object bigger than a HeapNumber.
- __ push(Immediate(Smi::FromInt(2 * kDoubleSize)));
- __ CallRuntimeSaveDoubles(Runtime::kAllocateInNewSpace);
- }
- __ Ret();
- }
-
- // Call runtime, doing whatever allocation and cleanup is necessary.
- if (tagged) {
- __ bind(&runtime_call_clear_stack);
- __ fstp(0);
- __ bind(&runtime_call);
- ExternalReference runtime =
- ExternalReference(RuntimeFunction(), masm->isolate());
- __ TailCallExternalReference(runtime, 1, 1);
- } else { // UNTAGGED.
- CpuFeatureScope scope(masm, SSE2);
- __ bind(&runtime_call_clear_stack);
- __ bind(&runtime_call);
- __ AllocateHeapNumber(eax, edi, no_reg, &skip_cache);
- __ movsd(FieldOperand(eax, HeapNumber::kValueOffset), xmm1);
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ push(eax);
- __ CallRuntime(RuntimeFunction(), 1);
- }
- __ movsd(xmm1, FieldOperand(eax, HeapNumber::kValueOffset));
- __ Ret();
- }
-}
-
-
-Runtime::FunctionId TranscendentalCacheStub::RuntimeFunction() {
- switch (type_) {
- case TranscendentalCache::SIN: return Runtime::kMath_sin;
- case TranscendentalCache::COS: return Runtime::kMath_cos;
- case TranscendentalCache::TAN: return Runtime::kMath_tan;
- case TranscendentalCache::LOG: return Runtime::kMath_log;
- default:
- UNIMPLEMENTED();
- return Runtime::kAbort;
- }
-}
-
-
-void TranscendentalCacheStub::GenerateOperation(
- MacroAssembler* masm, TranscendentalCache::Type type) {
- // Only free register is edi.
- // Input value is on FP stack, and also in ebx/edx.
- // Input value is possibly in xmm1.
- // Address of result (a newly allocated HeapNumber) may be in eax.
- if (type == TranscendentalCache::SIN ||
- type == TranscendentalCache::COS ||
- type == TranscendentalCache::TAN) {
- // Both fsin and fcos require arguments in the range +/-2^63 and
- // return NaN for infinities and NaN. They can share all code except
- // the actual fsin/fcos operation.
- Label in_range, done;
- // If argument is outside the range -2^63..2^63, fsin/cos doesn't
- // work. We must reduce it to the appropriate range.
- __ mov(edi, edx);
- __ and_(edi, Immediate(0x7ff00000)); // Exponent only.
- int supported_exponent_limit =
- (63 + HeapNumber::kExponentBias) << HeapNumber::kExponentShift;
- __ cmp(edi, Immediate(supported_exponent_limit));
- __ j(below, &in_range, Label::kNear);
- // Check for infinity and NaN. Both return NaN for sin.
- __ cmp(edi, Immediate(0x7ff00000));
- Label non_nan_result;
- __ j(not_equal, &non_nan_result, Label::kNear);
- // Input is +/-Infinity or NaN. Result is NaN.
- __ fstp(0);
- // NaN is represented by 0x7ff8000000000000.
- __ push(Immediate(0x7ff80000));
- __ push(Immediate(0));
- __ fld_d(Operand(esp, 0));
- __ add(esp, Immediate(2 * kPointerSize));
- __ jmp(&done, Label::kNear);
-
- __ bind(&non_nan_result);
-
- // Use fpmod to restrict argument to the range +/-2*PI.
- __ mov(edi, eax); // Save eax before using fnstsw_ax.
- __ fldpi();
- __ fadd(0);
- __ fld(1);
- // FPU Stack: input, 2*pi, input.
- {
- Label no_exceptions;
- __ fwait();
- __ fnstsw_ax();
- // Clear if Illegal Operand or Zero Division exceptions are set.
- __ test(eax, Immediate(5));
- __ j(zero, &no_exceptions, Label::kNear);
- __ fnclex();
- __ bind(&no_exceptions);
- }
-
- // Compute st(0) % st(1)
- {
- Label partial_remainder_loop;
- __ bind(&partial_remainder_loop);
- __ fprem1();
- __ fwait();
- __ fnstsw_ax();
- __ test(eax, Immediate(0x400 /* C2 */));
- // If C2 is set, computation only has partial result. Loop to
- // continue computation.
- __ j(not_zero, &partial_remainder_loop);
- }
- // FPU Stack: input, 2*pi, input % 2*pi
- __ fstp(2);
- __ fstp(0);
- __ mov(eax, edi); // Restore eax (allocated HeapNumber pointer).
-
- // FPU Stack: input % 2*pi
- __ bind(&in_range);
- switch (type) {
- case TranscendentalCache::SIN:
- __ fsin();
- break;
- case TranscendentalCache::COS:
- __ fcos();
- break;
- case TranscendentalCache::TAN:
- // FPTAN calculates tangent onto st(0) and pushes 1.0 onto the
- // FP register stack.
- __ fptan();
- __ fstp(0); // Pop FP register stack.
- break;
- default:
- UNREACHABLE();
- }
- __ bind(&done);
- } else {
- ASSERT(type == TranscendentalCache::LOG);
- __ fldln2();
- __ fxch();
- __ fyl2x();
- }
-}
-
-
void FloatingPointHelper::LoadFloatOperand(MacroAssembler* masm,
Register number) {
Label load_smi, done;
@@ -1078,8 +732,7 @@ void FloatingPointHelper::CheckFloatOperands(MacroAssembler* masm,
void MathPowStub::Generate(MacroAssembler* masm) {
- CpuFeatureScope use_sse2(masm, SSE2);
- Factory* factory = masm->isolate()->factory();
+ Factory* factory = isolate()->factory();
const Register exponent = eax;
const Register base = edx;
const Register scratch = ecx;
@@ -1144,8 +797,8 @@ void MathPowStub::Generate(MacroAssembler* masm) {
__ bind(&try_arithmetic_simplification);
// Skip to runtime if possibly NaN (indicated by the indefinite integer).
__ cvttsd2si(exponent, Operand(double_exponent));
- __ cmp(exponent, Immediate(0x80000000u));
- __ j(equal, &call_runtime);
+ __ cmp(exponent, Immediate(0x1));
+ __ j(overflow, &call_runtime);
if (exponent_type_ == ON_STACK) {
// Detect square root case. Crankshaft detects constant +/-0.5 at
@@ -1308,11 +961,11 @@ void MathPowStub::Generate(MacroAssembler* masm) {
__ Cvtsi2sd(double_exponent, exponent);
// Returning or bailing out.
- Counters* counters = masm->isolate()->counters();
+ Counters* counters = isolate()->counters();
if (exponent_type_ == ON_STACK) {
// The arguments are still on the stack.
__ bind(&call_runtime);
- __ TailCallRuntime(Runtime::kMath_pow_cfunction, 2, 1);
+ __ TailCallRuntime(Runtime::kHiddenMathPow, 2, 1);
// The stub is called from non-optimized code, which expects the result
// as heap number in exponent.
@@ -1329,7 +982,7 @@ void MathPowStub::Generate(MacroAssembler* masm) {
__ movsd(Operand(esp, 0 * kDoubleSize), double_base);
__ movsd(Operand(esp, 1 * kDoubleSize), double_exponent);
__ CallCFunction(
- ExternalReference::power_double_double_function(masm->isolate()), 4);
+ ExternalReference::power_double_double_function(isolate()), 4);
}
// Return value is in st(0) on ia32.
// Store it into the (fixed) result register.
@@ -1354,7 +1007,7 @@ void FunctionPrototypeStub::Generate(MacroAssembler* masm) {
Label miss;
if (kind() == Code::KEYED_LOAD_IC) {
- __ cmp(ecx, Immediate(masm->isolate()->factory()->prototype_string()));
+ __ cmp(ecx, Immediate(isolate()->factory()->prototype_string()));
__ j(not_equal, &miss);
}
@@ -1365,91 +1018,6 @@ void FunctionPrototypeStub::Generate(MacroAssembler* masm) {
}
-void StringLengthStub::Generate(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- ecx : name
- // -- edx : receiver
- // -- esp[0] : return address
- // -----------------------------------
- Label miss;
-
- if (kind() == Code::KEYED_LOAD_IC) {
- __ cmp(ecx, Immediate(masm->isolate()->factory()->length_string()));
- __ j(not_equal, &miss);
- }
-
- StubCompiler::GenerateLoadStringLength(masm, edx, eax, ebx, &miss);
- __ bind(&miss);
- StubCompiler::TailCallBuiltin(
- masm, BaseLoadStoreStubCompiler::MissBuiltin(kind()));
-}
-
-
-void StoreArrayLengthStub::Generate(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- eax : value
- // -- ecx : name
- // -- edx : receiver
- // -- esp[0] : return address
- // -----------------------------------
- //
- // This accepts as a receiver anything JSArray::SetElementsLength accepts
- // (currently anything except for external arrays which means anything with
- // elements of FixedArray type). Value must be a number, but only smis are
- // accepted as the most common case.
-
- Label miss;
-
- Register receiver = edx;
- Register value = eax;
- Register scratch = ebx;
-
- if (kind() == Code::KEYED_STORE_IC) {
- __ cmp(ecx, Immediate(masm->isolate()->factory()->length_string()));
- __ j(not_equal, &miss);
- }
-
- // Check that the receiver isn't a smi.
- __ JumpIfSmi(receiver, &miss);
-
- // Check that the object is a JS array.
- __ CmpObjectType(receiver, JS_ARRAY_TYPE, scratch);
- __ j(not_equal, &miss);
-
- // Check that elements are FixedArray.
- // We rely on StoreIC_ArrayLength below to deal with all types of
- // fast elements (including COW).
- __ mov(scratch, FieldOperand(receiver, JSArray::kElementsOffset));
- __ CmpObjectType(scratch, FIXED_ARRAY_TYPE, scratch);
- __ j(not_equal, &miss);
-
- // Check that the array has fast properties, otherwise the length
- // property might have been redefined.
- __ mov(scratch, FieldOperand(receiver, JSArray::kPropertiesOffset));
- __ CompareRoot(FieldOperand(scratch, FixedArray::kMapOffset),
- Heap::kHashTableMapRootIndex);
- __ j(equal, &miss);
-
- // Check that value is a smi.
- __ JumpIfNotSmi(value, &miss);
-
- // Prepare tail call to StoreIC_ArrayLength.
- __ pop(scratch);
- __ push(receiver);
- __ push(value);
- __ push(scratch); // return address
-
- ExternalReference ref =
- ExternalReference(IC_Utility(IC::kStoreIC_ArrayLength), masm->isolate());
- __ TailCallExternalReference(ref, 2, 1);
-
- __ bind(&miss);
-
- StubCompiler::TailCallBuiltin(
- masm, BaseLoadStoreStubCompiler::MissBuiltin(kind()));
-}
-
-
void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
// The key is in edx and the parameter count is in eax.
@@ -1509,7 +1077,7 @@ void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
}
-void ArgumentsAccessStub::GenerateNewNonStrictSlow(MacroAssembler* masm) {
+void ArgumentsAccessStub::GenerateNewSloppySlow(MacroAssembler* masm) {
// esp[0] : return address
// esp[4] : number of parameters
// esp[8] : receiver displacement
@@ -1530,13 +1098,11 @@ void ArgumentsAccessStub::GenerateNewNonStrictSlow(MacroAssembler* masm) {
__ mov(Operand(esp, 2 * kPointerSize), edx);
__ bind(&runtime);
- __ TailCallRuntime(Runtime::kNewArgumentsFast, 3, 1);
+ __ TailCallRuntime(Runtime::kHiddenNewSloppyArguments, 3, 1);
}
-void ArgumentsAccessStub::GenerateNewNonStrictFast(MacroAssembler* masm) {
- Isolate* isolate = masm->isolate();
-
+void ArgumentsAccessStub::GenerateNewSloppyFast(MacroAssembler* masm) {
// esp[0] : return address
// esp[4] : number of parameters (tagged)
// esp[8] : receiver displacement
@@ -1594,7 +1160,7 @@ void ArgumentsAccessStub::GenerateNewNonStrictFast(MacroAssembler* masm) {
__ lea(ebx, Operand(ebx, ecx, times_2, FixedArray::kHeaderSize));
// 3. Arguments object.
- __ add(ebx, Immediate(Heap::kArgumentsObjectSize));
+ __ add(ebx, Immediate(Heap::kSloppyArgumentsObjectSize));
// Do the allocation of all three objects in one go.
__ Allocate(ebx, eax, edx, edi, &runtime, TAG_OBJECT);
@@ -1612,7 +1178,7 @@ void ArgumentsAccessStub::GenerateNewNonStrictFast(MacroAssembler* masm) {
__ test(ebx, ebx);
__ j(not_zero, &has_mapped_parameters, Label::kNear);
__ mov(edi, Operand(edi,
- Context::SlotOffset(Context::ARGUMENTS_BOILERPLATE_INDEX)));
+ Context::SlotOffset(Context::SLOPPY_ARGUMENTS_BOILERPLATE_INDEX)));
__ jmp(&copy, Label::kNear);
__ bind(&has_mapped_parameters);
@@ -1649,7 +1215,7 @@ void ArgumentsAccessStub::GenerateNewNonStrictFast(MacroAssembler* masm) {
// Set up the elements pointer in the allocated arguments object.
// If we allocated a parameter map, edi will point there, otherwise to the
// backing store.
- __ lea(edi, Operand(eax, Heap::kArgumentsObjectSize));
+ __ lea(edi, Operand(eax, Heap::kSloppyArgumentsObjectSize));
__ mov(FieldOperand(eax, JSObject::kElementsOffset), edi);
// eax = address of new object (tagged)
@@ -1668,7 +1234,7 @@ void ArgumentsAccessStub::GenerateNewNonStrictFast(MacroAssembler* masm) {
__ j(zero, &skip_parameter_map);
__ mov(FieldOperand(edi, FixedArray::kMapOffset),
- Immediate(isolate->factory()->non_strict_arguments_elements_map()));
+ Immediate(isolate()->factory()->sloppy_arguments_elements_map()));
__ lea(eax, Operand(ebx, reinterpret_cast<intptr_t>(Smi::FromInt(2))));
__ mov(FieldOperand(edi, FixedArray::kLengthOffset), eax);
__ mov(FieldOperand(edi, FixedArray::kHeaderSize + 0 * kPointerSize), esi);
@@ -1689,7 +1255,7 @@ void ArgumentsAccessStub::GenerateNewNonStrictFast(MacroAssembler* masm) {
__ mov(ebx, Immediate(Smi::FromInt(Context::MIN_CONTEXT_SLOTS)));
__ add(ebx, Operand(esp, 4 * kPointerSize));
__ sub(ebx, eax);
- __ mov(ecx, isolate->factory()->the_hole_value());
+ __ mov(ecx, isolate()->factory()->the_hole_value());
__ mov(edx, edi);
__ lea(edi, Operand(edi, eax, times_2, kParameterMapHeaderSize));
// eax = loop variable (tagged)
@@ -1724,7 +1290,7 @@ void ArgumentsAccessStub::GenerateNewNonStrictFast(MacroAssembler* masm) {
// esp[16] = address of receiver argument
// Copy arguments header and remaining slots (if there are any).
__ mov(FieldOperand(edi, FixedArray::kMapOffset),
- Immediate(isolate->factory()->fixed_array_map()));
+ Immediate(isolate()->factory()->fixed_array_map()));
__ mov(FieldOperand(edi, FixedArray::kLengthOffset), ecx);
Label arguments_loop, arguments_test;
@@ -1755,13 +1321,11 @@ void ArgumentsAccessStub::GenerateNewNonStrictFast(MacroAssembler* masm) {
__ bind(&runtime);
__ pop(eax); // Remove saved parameter count.
__ mov(Operand(esp, 1 * kPointerSize), ecx); // Patch argument count.
- __ TailCallRuntime(Runtime::kNewArgumentsFast, 3, 1);
+ __ TailCallRuntime(Runtime::kHiddenNewSloppyArguments, 3, 1);
}
void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
- Isolate* isolate = masm->isolate();
-
// esp[0] : return address
// esp[4] : number of parameters
// esp[8] : receiver displacement
@@ -1794,7 +1358,7 @@ void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
__ j(zero, &add_arguments_object, Label::kNear);
__ lea(ecx, Operand(ecx, times_2, FixedArray::kHeaderSize));
__ bind(&add_arguments_object);
- __ add(ecx, Immediate(Heap::kArgumentsObjectSizeStrict));
+ __ add(ecx, Immediate(Heap::kStrictArgumentsObjectSize));
// Do the allocation of both objects in one go.
__ Allocate(ecx, eax, edx, ebx, &runtime, TAG_OBJECT);
@@ -1803,7 +1367,7 @@ void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
__ mov(edi, Operand(esi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
__ mov(edi, FieldOperand(edi, GlobalObject::kNativeContextOffset));
const int offset =
- Context::SlotOffset(Context::STRICT_MODE_ARGUMENTS_BOILERPLATE_INDEX);
+ Context::SlotOffset(Context::STRICT_ARGUMENTS_BOILERPLATE_INDEX);
__ mov(edi, Operand(edi, offset));
// Copy the JS object part.
@@ -1829,10 +1393,10 @@ void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
// Set up the elements pointer in the allocated arguments object and
// initialize the header in the elements fixed array.
- __ lea(edi, Operand(eax, Heap::kArgumentsObjectSizeStrict));
+ __ lea(edi, Operand(eax, Heap::kStrictArgumentsObjectSize));
__ mov(FieldOperand(eax, JSObject::kElementsOffset), edi);
__ mov(FieldOperand(edi, FixedArray::kMapOffset),
- Immediate(isolate->factory()->fixed_array_map()));
+ Immediate(isolate()->factory()->fixed_array_map()));
__ mov(FieldOperand(edi, FixedArray::kLengthOffset), ecx);
// Untag the length for the loop below.
@@ -1854,7 +1418,7 @@ void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
// Do the runtime call to allocate the arguments object.
__ bind(&runtime);
- __ TailCallRuntime(Runtime::kNewStrictArgumentsFast, 3, 1);
+ __ TailCallRuntime(Runtime::kHiddenNewStrictArguments, 3, 1);
}
@@ -1863,7 +1427,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// time or if regexp entry in generated code is turned off runtime switch or
// at compilation.
#ifdef V8_INTERPRETED_REGEXP
- __ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
+ __ TailCallRuntime(Runtime::kHiddenRegExpExec, 4, 1);
#else // V8_INTERPRETED_REGEXP
// Stack frame on entry.
@@ -1879,14 +1443,13 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
static const int kJSRegExpOffset = 4 * kPointerSize;
Label runtime;
- Factory* factory = masm->isolate()->factory();
+ Factory* factory = isolate()->factory();
// Ensure that a RegExp stack is allocated.
ExternalReference address_of_regexp_stack_memory_address =
- ExternalReference::address_of_regexp_stack_memory_address(
- masm->isolate());
+ ExternalReference::address_of_regexp_stack_memory_address(isolate());
ExternalReference address_of_regexp_stack_memory_size =
- ExternalReference::address_of_regexp_stack_memory_size(masm->isolate());
+ ExternalReference::address_of_regexp_stack_memory_size(isolate());
__ mov(ebx, Operand::StaticVariable(address_of_regexp_stack_memory_size));
__ test(ebx, ebx);
__ j(zero, &runtime);
@@ -1926,7 +1489,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
__ j(above, &runtime);
// Reset offset for possibly sliced string.
- __ Set(edi, Immediate(0));
+ __ Move(edi, Immediate(0));
__ mov(eax, Operand(esp, kSubjectOffset));
__ JumpIfSmi(eax, &runtime);
__ mov(edx, eax); // Make a copy of the original subject string.
@@ -2003,8 +1566,8 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// (5b) Is subject external? If yes, go to (8).
__ test_b(ebx, kStringRepresentationMask);
// The underlying external string is never a short external string.
- STATIC_CHECK(ExternalString::kMaxShortLength < ConsString::kMinLength);
- STATIC_CHECK(ExternalString::kMaxShortLength < SlicedString::kMinLength);
+ STATIC_ASSERT(ExternalString::kMaxShortLength < ConsString::kMinLength);
+ STATIC_ASSERT(ExternalString::kMaxShortLength < SlicedString::kMinLength);
__ j(not_zero, &external_string); // Go to (8).
// eax: sequential subject string (or look-alike, external string)
@@ -2020,7 +1583,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
__ cmp(ebx, FieldOperand(edx, String::kLengthOffset));
__ j(above_equal, &runtime);
__ mov(edx, FieldOperand(ecx, JSRegExp::kDataAsciiCodeOffset));
- __ Set(ecx, Immediate(1)); // Type is one byte.
+ __ Move(ecx, Immediate(1)); // Type is one byte.
// (E) Carry on. String handling is done.
__ bind(&check_code);
@@ -2035,7 +1598,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// edx: code
// ecx: encoding of subject string (1 if ASCII, 0 if two_byte);
// All checks done. Now push arguments for native regexp code.
- Counters* counters = masm->isolate()->counters();
+ Counters* counters = isolate()->counters();
__ IncrementCounter(counters->regexp_entry_native(), 1);
// Isolates: note we add an additional parameter here (isolate pointer).
@@ -2044,7 +1607,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// Argument 9: Pass current isolate address.
__ mov(Operand(esp, 8 * kPointerSize),
- Immediate(ExternalReference::isolate_address(masm->isolate())));
+ Immediate(ExternalReference::isolate_address(isolate())));
// Argument 8: Indicate that this is a direct call from JavaScript.
__ mov(Operand(esp, 7 * kPointerSize), Immediate(1));
@@ -2061,7 +1624,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// Argument 5: static offsets vector buffer.
__ mov(Operand(esp, 4 * kPointerSize),
Immediate(ExternalReference::address_of_static_offsets_vector(
- masm->isolate())));
+ isolate())));
// Argument 2: Previous index.
__ SmiUntag(ebx);
@@ -2135,8 +1698,8 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// haven't created the exception yet. Handle that in the runtime system.
// TODO(592): Rerunning the RegExp to get the stack overflow exception.
ExternalReference pending_exception(Isolate::kPendingExceptionAddress,
- masm->isolate());
- __ mov(edx, Immediate(masm->isolate()->factory()->the_hole_value()));
+ isolate());
+ __ mov(edx, Immediate(isolate()->factory()->the_hole_value()));
__ mov(eax, Operand::StaticVariable(pending_exception));
__ cmp(edx, eax);
__ j(equal, &runtime);
@@ -2217,7 +1780,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// Get the static offsets vector filled by the native regexp code.
ExternalReference address_of_static_offsets_vector =
- ExternalReference::address_of_static_offsets_vector(masm->isolate());
+ ExternalReference::address_of_static_offsets_vector(isolate());
__ mov(ecx, Immediate(address_of_static_offsets_vector));
// ebx: last_match_info backing store (FixedArray)
@@ -2247,7 +1810,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// Do the runtime call to execute the regexp.
__ bind(&runtime);
- __ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
+ __ TailCallRuntime(Runtime::kHiddenRegExpExec, 4, 1);
// Deferred code for string handling.
// (7) Not a long external string? If yes, go to (10).
@@ -2288,7 +1851,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
__ cmp(ebx, FieldOperand(edx, String::kLengthOffset));
__ j(above_equal, &runtime);
__ mov(edx, FieldOperand(ecx, JSRegExp::kDataUC16CodeOffset));
- __ Set(ecx, Immediate(0)); // Type is two byte.
+ __ Move(ecx, Immediate(0)); // Type is two byte.
__ jmp(&check_code); // Go to (E).
// (10) Not a string or a short external string? If yes, bail out to runtime.
@@ -2307,88 +1870,6 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
}
-void RegExpConstructResultStub::Generate(MacroAssembler* masm) {
- const int kMaxInlineLength = 100;
- Label slowcase;
- Label done;
- __ mov(ebx, Operand(esp, kPointerSize * 3));
- __ JumpIfNotSmi(ebx, &slowcase);
- __ cmp(ebx, Immediate(Smi::FromInt(kMaxInlineLength)));
- __ j(above, &slowcase);
- // Smi-tagging is equivalent to multiplying by 2.
- STATIC_ASSERT(kSmiTag == 0);
- STATIC_ASSERT(kSmiTagSize == 1);
- // Allocate RegExpResult followed by FixedArray with size in ebx.
- // JSArray: [Map][empty properties][Elements][Length-smi][index][input]
- // Elements: [Map][Length][..elements..]
- __ Allocate(JSRegExpResult::kSize + FixedArray::kHeaderSize,
- times_pointer_size,
- ebx, // In: Number of elements as a smi
- REGISTER_VALUE_IS_SMI,
- eax, // Out: Start of allocation (tagged).
- ecx, // Out: End of allocation.
- edx, // Scratch register
- &slowcase,
- TAG_OBJECT);
- // eax: Start of allocated area, object-tagged.
-
- // Set JSArray map to global.regexp_result_map().
- // Set empty properties FixedArray.
- // Set elements to point to FixedArray allocated right after the JSArray.
- // Interleave operations for better latency.
- __ mov(edx, ContextOperand(esi, Context::GLOBAL_OBJECT_INDEX));
- Factory* factory = masm->isolate()->factory();
- __ mov(ecx, Immediate(factory->empty_fixed_array()));
- __ lea(ebx, Operand(eax, JSRegExpResult::kSize));
- __ mov(edx, FieldOperand(edx, GlobalObject::kNativeContextOffset));
- __ mov(FieldOperand(eax, JSObject::kElementsOffset), ebx);
- __ mov(FieldOperand(eax, JSObject::kPropertiesOffset), ecx);
- __ mov(edx, ContextOperand(edx, Context::REGEXP_RESULT_MAP_INDEX));
- __ mov(FieldOperand(eax, HeapObject::kMapOffset), edx);
-
- // Set input, index and length fields from arguments.
- __ mov(ecx, Operand(esp, kPointerSize * 1));
- __ mov(FieldOperand(eax, JSRegExpResult::kInputOffset), ecx);
- __ mov(ecx, Operand(esp, kPointerSize * 2));
- __ mov(FieldOperand(eax, JSRegExpResult::kIndexOffset), ecx);
- __ mov(ecx, Operand(esp, kPointerSize * 3));
- __ mov(FieldOperand(eax, JSArray::kLengthOffset), ecx);
-
- // Fill out the elements FixedArray.
- // eax: JSArray.
- // ebx: FixedArray.
- // ecx: Number of elements in array, as smi.
-
- // Set map.
- __ mov(FieldOperand(ebx, HeapObject::kMapOffset),
- Immediate(factory->fixed_array_map()));
- // Set length.
- __ mov(FieldOperand(ebx, FixedArray::kLengthOffset), ecx);
- // Fill contents of fixed-array with undefined.
- __ SmiUntag(ecx);
- __ mov(edx, Immediate(factory->undefined_value()));
- __ lea(ebx, FieldOperand(ebx, FixedArray::kHeaderSize));
- // Fill fixed array elements with undefined.
- // eax: JSArray.
- // ecx: Number of elements to fill.
- // ebx: Start of elements in FixedArray.
- // edx: undefined.
- Label loop;
- __ test(ecx, ecx);
- __ bind(&loop);
- __ j(less_equal, &done, Label::kNear); // Jump if ecx is negative or zero.
- __ sub(ecx, Immediate(1));
- __ mov(Operand(ebx, ecx, times_pointer_size, 0), edx);
- __ jmp(&loop);
-
- __ bind(&done);
- __ ret(3 * kPointerSize);
-
- __ bind(&slowcase);
- __ TailCallRuntime(Runtime::kRegExpConstructResult, 3, 1);
-}
-
-
static int NegativeComparisonResult(Condition cc) {
ASSERT(cc != equal);
ASSERT((cc == less) || (cc == less_equal)
@@ -2465,9 +1946,9 @@ void ICCompareStub::GenerateGeneric(MacroAssembler* masm) {
// Check for undefined. undefined OP undefined is false even though
// undefined == undefined.
Label check_for_nan;
- __ cmp(edx, masm->isolate()->factory()->undefined_value());
+ __ cmp(edx, isolate()->factory()->undefined_value());
__ j(not_equal, &check_for_nan, Label::kNear);
- __ Set(eax, Immediate(Smi::FromInt(NegativeComparisonResult(cc))));
+ __ Move(eax, Immediate(Smi::FromInt(NegativeComparisonResult(cc))));
__ ret(0);
__ bind(&check_for_nan);
}
@@ -2475,14 +1956,14 @@ void ICCompareStub::GenerateGeneric(MacroAssembler* masm) {
// Test for NaN. Compare heap numbers in a general way,
// to hanlde NaNs correctly.
__ cmp(FieldOperand(edx, HeapObject::kMapOffset),
- Immediate(masm->isolate()->factory()->heap_number_map()));
+ Immediate(isolate()->factory()->heap_number_map()));
__ j(equal, &generic_heap_number_comparison, Label::kNear);
if (cc != equal) {
// Call runtime on identical JSObjects. Otherwise return equal.
__ CmpObjectType(eax, FIRST_SPEC_OBJECT_TYPE, ecx);
__ j(above_equal, &not_identical);
}
- __ Set(eax, Immediate(Smi::FromInt(EQUAL)));
+ __ Move(eax, Immediate(Smi::FromInt(EQUAL)));
__ ret(0);
@@ -2520,7 +2001,7 @@ void ICCompareStub::GenerateGeneric(MacroAssembler* masm) {
// Check if the non-smi operand is a heap number.
__ cmp(FieldOperand(ebx, HeapObject::kMapOffset),
- Immediate(masm->isolate()->factory()->heap_number_map()));
+ Immediate(isolate()->factory()->heap_number_map()));
// If heap number, handle it in the slow case.
__ j(equal, &slow, Label::kNear);
// Return non-equal (ebx is not zero)
@@ -2565,48 +2046,18 @@ void ICCompareStub::GenerateGeneric(MacroAssembler* masm) {
Label non_number_comparison;
Label unordered;
__ bind(&generic_heap_number_comparison);
- if (CpuFeatures::IsSupported(SSE2)) {
- CpuFeatureScope use_sse2(masm, SSE2);
- CpuFeatureScope use_cmov(masm, CMOV);
-
- FloatingPointHelper::LoadSSE2Operands(masm, &non_number_comparison);
- __ ucomisd(xmm0, xmm1);
-
- // Don't base result on EFLAGS when a NaN is involved.
- __ j(parity_even, &unordered, Label::kNear);
- // Return a result of -1, 0, or 1, based on EFLAGS.
- __ mov(eax, 0); // equal
- __ mov(ecx, Immediate(Smi::FromInt(1)));
- __ cmov(above, eax, ecx);
- __ mov(ecx, Immediate(Smi::FromInt(-1)));
- __ cmov(below, eax, ecx);
- __ ret(0);
- } else {
- FloatingPointHelper::CheckFloatOperands(
- masm, &non_number_comparison, ebx);
- FloatingPointHelper::LoadFloatOperand(masm, eax);
- FloatingPointHelper::LoadFloatOperand(masm, edx);
- __ FCmp();
-
- // Don't base result on EFLAGS when a NaN is involved.
- __ j(parity_even, &unordered, Label::kNear);
- Label below_label, above_label;
- // Return a result of -1, 0, or 1, based on EFLAGS.
- __ j(below, &below_label, Label::kNear);
- __ j(above, &above_label, Label::kNear);
+ FloatingPointHelper::LoadSSE2Operands(masm, &non_number_comparison);
+ __ ucomisd(xmm0, xmm1);
+ // Don't base result on EFLAGS when a NaN is involved.
+ __ j(parity_even, &unordered, Label::kNear);
- __ Set(eax, Immediate(0));
- __ ret(0);
-
- __ bind(&below_label);
- __ mov(eax, Immediate(Smi::FromInt(-1)));
- __ ret(0);
-
- __ bind(&above_label);
- __ mov(eax, Immediate(Smi::FromInt(1)));
- __ ret(0);
- }
+ __ mov(eax, 0); // equal
+ __ mov(ecx, Immediate(Smi::FromInt(1)));
+ __ cmov(above, eax, ecx);
+ __ mov(ecx, Immediate(Smi::FromInt(-1)));
+ __ cmov(below, eax, ecx);
+ __ ret(0);
// If one of the numbers was NaN, then the result is always false.
// The cc is never not-equal.
@@ -2688,7 +2139,7 @@ void ICCompareStub::GenerateGeneric(MacroAssembler* masm) {
__ j(zero, &return_unequal, Label::kNear);
// The objects are both undetectable, so they both compare as the value
// undefined, and are equal.
- __ Set(eax, Immediate(EQUAL));
+ __ Move(eax, Immediate(EQUAL));
__ bind(&return_unequal);
// Return non-equal by returning the non-zero object pointer in eax,
// or return equal if we fell through to here.
@@ -2723,165 +2174,136 @@ void ICCompareStub::GenerateGeneric(MacroAssembler* masm) {
static void GenerateRecordCallTarget(MacroAssembler* masm) {
- // Cache the called function in a global property cell. Cache states
+ // Cache the called function in a feedback vector slot. Cache states
// are uninitialized, monomorphic (indicated by a JSFunction), and
// megamorphic.
// eax : number of arguments to the construct function
- // ebx : cache cell for call target
+ // ebx : Feedback vector
+ // edx : slot in feedback vector (Smi)
// edi : the function to call
Isolate* isolate = masm->isolate();
Label initialize, done, miss, megamorphic, not_array_function;
// Load the cache state into ecx.
- __ mov(ecx, FieldOperand(ebx, Cell::kValueOffset));
+ __ mov(ecx, FieldOperand(ebx, edx, times_half_pointer_size,
+ FixedArray::kHeaderSize));
// A monomorphic cache hit or an already megamorphic state: invoke the
// function without changing the state.
__ cmp(ecx, edi);
- __ j(equal, &done);
- __ cmp(ecx, Immediate(TypeFeedbackCells::MegamorphicSentinel(isolate)));
- __ j(equal, &done);
-
- // If we came here, we need to see if we are the array function.
- // If we didn't have a matching function, and we didn't find the megamorph
- // sentinel, then we have in the cell either some other function or an
- // AllocationSite. Do a map check on the object in ecx.
- Handle<Map> allocation_site_map =
- masm->isolate()->factory()->allocation_site_map();
- __ cmp(FieldOperand(ecx, 0), Immediate(allocation_site_map));
- __ j(not_equal, &miss);
+ __ j(equal, &done, Label::kFar);
+ __ cmp(ecx, Immediate(TypeFeedbackInfo::MegamorphicSentinel(isolate)));
+ __ j(equal, &done, Label::kFar);
+
+ if (!FLAG_pretenuring_call_new) {
+ // If we came here, we need to see if we are the array function.
+ // If we didn't have a matching function, and we didn't find the megamorph
+ // sentinel, then we have in the slot either some other function or an
+ // AllocationSite. Do a map check on the object in ecx.
+ Handle<Map> allocation_site_map = isolate->factory()->allocation_site_map();
+ __ cmp(FieldOperand(ecx, 0), Immediate(allocation_site_map));
+ __ j(not_equal, &miss);
- // Load the global or builtins object from the current context
- __ LoadGlobalContext(ecx);
- // Make sure the function is the Array() function
- __ cmp(edi, Operand(ecx,
- Context::SlotOffset(Context::ARRAY_FUNCTION_INDEX)));
- __ j(not_equal, &megamorphic);
- __ jmp(&done);
+ // Make sure the function is the Array() function
+ __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, ecx);
+ __ cmp(edi, ecx);
+ __ j(not_equal, &megamorphic);
+ __ jmp(&done, Label::kFar);
+ }
__ bind(&miss);
// A monomorphic miss (i.e, here the cache is not uninitialized) goes
// megamorphic.
- __ cmp(ecx, Immediate(TypeFeedbackCells::UninitializedSentinel(isolate)));
+ __ cmp(ecx, Immediate(TypeFeedbackInfo::UninitializedSentinel(isolate)));
__ j(equal, &initialize);
// MegamorphicSentinel is an immortal immovable object (undefined) so no
// write-barrier is needed.
__ bind(&megamorphic);
- __ mov(FieldOperand(ebx, Cell::kValueOffset),
- Immediate(TypeFeedbackCells::MegamorphicSentinel(isolate)));
- __ jmp(&done, Label::kNear);
+ __ mov(FieldOperand(ebx, edx, times_half_pointer_size,
+ FixedArray::kHeaderSize),
+ Immediate(TypeFeedbackInfo::MegamorphicSentinel(isolate)));
+ __ jmp(&done, Label::kFar);
// An uninitialized cache is patched with the function or sentinel to
// indicate the ElementsKind if function is the Array constructor.
__ bind(&initialize);
- __ LoadGlobalContext(ecx);
- // Make sure the function is the Array() function
- __ cmp(edi, Operand(ecx,
- Context::SlotOffset(Context::ARRAY_FUNCTION_INDEX)));
- __ j(not_equal, &not_array_function);
-
- // The target function is the Array constructor,
- // Create an AllocationSite if we don't already have it, store it in the cell
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
-
- // Arguments register must be smi-tagged to call out.
- __ SmiTag(eax);
- __ push(eax);
- __ push(edi);
- __ push(ebx);
+ if (!FLAG_pretenuring_call_new) {
+ // Make sure the function is the Array() function
+ __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, ecx);
+ __ cmp(edi, ecx);
+ __ j(not_equal, &not_array_function);
+
+ // The target function is the Array constructor,
+ // Create an AllocationSite if we don't already have it, store it in the
+ // slot.
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
- CreateAllocationSiteStub create_stub;
- __ CallStub(&create_stub);
+ // Arguments register must be smi-tagged to call out.
+ __ SmiTag(eax);
+ __ push(eax);
+ __ push(edi);
+ __ push(edx);
+ __ push(ebx);
+
+ CreateAllocationSiteStub create_stub(isolate);
+ __ CallStub(&create_stub);
+
+ __ pop(ebx);
+ __ pop(edx);
+ __ pop(edi);
+ __ pop(eax);
+ __ SmiUntag(eax);
+ }
+ __ jmp(&done);
- __ pop(ebx);
- __ pop(edi);
- __ pop(eax);
- __ SmiUntag(eax);
+ __ bind(&not_array_function);
}
- __ jmp(&done);
- __ bind(&not_array_function);
- __ mov(FieldOperand(ebx, Cell::kValueOffset), edi);
- // No need for a write barrier here - cells are rescanned.
+ __ mov(FieldOperand(ebx, edx, times_half_pointer_size,
+ FixedArray::kHeaderSize),
+ edi);
+ // We won't need edx or ebx anymore, just save edi
+ __ push(edi);
+ __ push(ebx);
+ __ push(edx);
+ __ RecordWriteArray(ebx, edi, edx, kDontSaveFPRegs,
+ EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
+ __ pop(edx);
+ __ pop(ebx);
+ __ pop(edi);
__ bind(&done);
}
-void CallFunctionStub::Generate(MacroAssembler* masm) {
- // ebx : cache cell for call target
- // edi : the function to call
- Isolate* isolate = masm->isolate();
- Label slow, non_function;
-
- // The receiver might implicitly be the global object. This is
- // indicated by passing the hole as the receiver to the call
- // function stub.
- if (ReceiverMightBeImplicit()) {
- Label receiver_ok;
- // Get the receiver from the stack.
- // +1 ~ return address
- __ mov(eax, Operand(esp, (argc_ + 1) * kPointerSize));
- // Call as function is indicated with the hole.
- __ cmp(eax, isolate->factory()->the_hole_value());
- __ j(not_equal, &receiver_ok, Label::kNear);
- // Patch the receiver on the stack with the global receiver object.
- __ mov(ecx, GlobalObjectOperand());
- __ mov(ecx, FieldOperand(ecx, GlobalObject::kGlobalReceiverOffset));
- __ mov(Operand(esp, (argc_ + 1) * kPointerSize), ecx);
- __ bind(&receiver_ok);
- }
-
- // Check that the function really is a JavaScript function.
- __ JumpIfSmi(edi, &non_function);
- // Goto slow case if we do not have a function.
- __ CmpObjectType(edi, JS_FUNCTION_TYPE, ecx);
- __ j(not_equal, &slow);
+static void EmitContinueIfStrictOrNative(MacroAssembler* masm, Label* cont) {
+ // Do not transform the receiver for strict mode functions.
+ __ mov(ecx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
+ __ test_b(FieldOperand(ecx, SharedFunctionInfo::kStrictModeByteOffset),
+ 1 << SharedFunctionInfo::kStrictModeBitWithinByte);
+ __ j(not_equal, cont);
- if (RecordCallTarget()) {
- GenerateRecordCallTarget(masm);
- }
+ // Do not transform the receiver for natives (shared already in ecx).
+ __ test_b(FieldOperand(ecx, SharedFunctionInfo::kNativeByteOffset),
+ 1 << SharedFunctionInfo::kNativeBitWithinByte);
+ __ j(not_equal, cont);
+}
- // Fast-case: Just invoke the function.
- ParameterCount actual(argc_);
-
- if (ReceiverMightBeImplicit()) {
- Label call_as_function;
- __ cmp(eax, isolate->factory()->the_hole_value());
- __ j(equal, &call_as_function);
- __ InvokeFunction(edi,
- actual,
- JUMP_FUNCTION,
- NullCallWrapper(),
- CALL_AS_METHOD);
- __ bind(&call_as_function);
- }
- __ InvokeFunction(edi,
- actual,
- JUMP_FUNCTION,
- NullCallWrapper(),
- CALL_AS_FUNCTION);
- // Slow-case: Non-function called.
- __ bind(&slow);
- if (RecordCallTarget()) {
- // If there is a call target cache, mark it megamorphic in the
- // non-function case. MegamorphicSentinel is an immortal immovable
- // object (undefined) so no write barrier is needed.
- __ mov(FieldOperand(ebx, Cell::kValueOffset),
- Immediate(TypeFeedbackCells::MegamorphicSentinel(isolate)));
- }
+static void EmitSlowCase(Isolate* isolate,
+ MacroAssembler* masm,
+ int argc,
+ Label* non_function) {
// Check for function proxy.
__ CmpInstanceType(ecx, JS_FUNCTION_PROXY_TYPE);
- __ j(not_equal, &non_function);
+ __ j(not_equal, non_function);
__ pop(ecx);
__ push(edi); // put proxy as additional argument under return address
__ push(ecx);
- __ Set(eax, Immediate(argc_ + 1));
- __ Set(ebx, Immediate(0));
- __ SetCallKind(ecx, CALL_AS_FUNCTION);
+ __ Move(eax, Immediate(argc + 1));
+ __ Move(ebx, Immediate(0));
__ GetBuiltinEntry(edx, Builtins::CALL_FUNCTION_PROXY);
{
Handle<Code> adaptor = isolate->builtins()->ArgumentsAdaptorTrampoline();
@@ -2890,20 +2312,93 @@ void CallFunctionStub::Generate(MacroAssembler* masm) {
// CALL_NON_FUNCTION expects the non-function callee as receiver (instead
// of the original receiver from the call site).
- __ bind(&non_function);
- __ mov(Operand(esp, (argc_ + 1) * kPointerSize), edi);
- __ Set(eax, Immediate(argc_));
- __ Set(ebx, Immediate(0));
- __ SetCallKind(ecx, CALL_AS_METHOD);
+ __ bind(non_function);
+ __ mov(Operand(esp, (argc + 1) * kPointerSize), edi);
+ __ Move(eax, Immediate(argc));
+ __ Move(ebx, Immediate(0));
__ GetBuiltinEntry(edx, Builtins::CALL_NON_FUNCTION);
Handle<Code> adaptor = isolate->builtins()->ArgumentsAdaptorTrampoline();
__ jmp(adaptor, RelocInfo::CODE_TARGET);
}
+static void EmitWrapCase(MacroAssembler* masm, int argc, Label* cont) {
+ // Wrap the receiver and patch it back onto the stack.
+ { FrameScope frame_scope(masm, StackFrame::INTERNAL);
+ __ push(edi);
+ __ push(eax);
+ __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
+ __ pop(edi);
+ }
+ __ mov(Operand(esp, (argc + 1) * kPointerSize), eax);
+ __ jmp(cont);
+}
+
+
+static void CallFunctionNoFeedback(MacroAssembler* masm,
+ int argc, bool needs_checks,
+ bool call_as_method) {
+ // edi : the function to call
+ Label slow, non_function, wrap, cont;
+
+ if (needs_checks) {
+ // Check that the function really is a JavaScript function.
+ __ JumpIfSmi(edi, &non_function);
+
+ // Goto slow case if we do not have a function.
+ __ CmpObjectType(edi, JS_FUNCTION_TYPE, ecx);
+ __ j(not_equal, &slow);
+ }
+
+ // Fast-case: Just invoke the function.
+ ParameterCount actual(argc);
+
+ if (call_as_method) {
+ if (needs_checks) {
+ EmitContinueIfStrictOrNative(masm, &cont);
+ }
+
+ // Load the receiver from the stack.
+ __ mov(eax, Operand(esp, (argc + 1) * kPointerSize));
+
+ if (call_as_method) {
+ __ JumpIfSmi(eax, &wrap);
+
+ __ CmpObjectType(eax, FIRST_SPEC_OBJECT_TYPE, ecx);
+ __ j(below, &wrap);
+ } else {
+ __ jmp(&wrap);
+ }
+
+ __ bind(&cont);
+ }
+
+ __ InvokeFunction(edi, actual, JUMP_FUNCTION, NullCallWrapper());
+
+ if (needs_checks) {
+ // Slow-case: Non-function called.
+ __ bind(&slow);
+ // (non_function is bound in EmitSlowCase)
+ EmitSlowCase(masm->isolate(), masm, argc, &non_function);
+ }
+
+ if (call_as_method) {
+ __ bind(&wrap);
+ EmitWrapCase(masm, argc, &cont);
+ }
+}
+
+
+void CallFunctionStub::Generate(MacroAssembler* masm) {
+ CallFunctionNoFeedback(masm, argc_, NeedsChecks(), CallAsMethod());
+}
+
+
void CallConstructStub::Generate(MacroAssembler* masm) {
// eax : number of arguments
- // ebx : cache cell for call target
+ // ebx : feedback vector
+ // edx : (only if ebx is not the megamorphic symbol) slot in feedback
+ // vector (Smi)
// edi : constructor function
Label slow, non_function_call;
@@ -2915,6 +2410,27 @@ void CallConstructStub::Generate(MacroAssembler* masm) {
if (RecordCallTarget()) {
GenerateRecordCallTarget(masm);
+
+ if (FLAG_pretenuring_call_new) {
+ // Put the AllocationSite from the feedback vector into ebx.
+ // By adding kPointerSize we encode that we know the AllocationSite
+ // entry is at the feedback vector slot given by edx + 1.
+ __ mov(ebx, FieldOperand(ebx, edx, times_half_pointer_size,
+ FixedArray::kHeaderSize + kPointerSize));
+ } else {
+ Label feedback_register_initialized;
+ // Put the AllocationSite from the feedback vector into ebx, or undefined.
+ __ mov(ebx, FieldOperand(ebx, edx, times_half_pointer_size,
+ FixedArray::kHeaderSize));
+ Handle<Map> allocation_site_map =
+ isolate()->factory()->allocation_site_map();
+ __ cmp(FieldOperand(ebx, 0), Immediate(allocation_site_map));
+ __ j(equal, &feedback_register_initialized);
+ __ mov(ebx, isolate()->factory()->undefined_value());
+ __ bind(&feedback_register_initialized);
+ }
+
+ __ AssertUndefinedOrAllocationSite(ebx);
}
// Jump to the function-specific construct stub.
@@ -2939,14 +2455,160 @@ void CallConstructStub::Generate(MacroAssembler* masm) {
__ GetBuiltinEntry(edx, Builtins::CALL_NON_FUNCTION_AS_CONSTRUCTOR);
__ bind(&do_call);
// Set expected number of arguments to zero (not changing eax).
- __ Set(ebx, Immediate(0));
+ __ Move(ebx, Immediate(0));
Handle<Code> arguments_adaptor =
- masm->isolate()->builtins()->ArgumentsAdaptorTrampoline();
- __ SetCallKind(ecx, CALL_AS_METHOD);
+ isolate()->builtins()->ArgumentsAdaptorTrampoline();
__ jmp(arguments_adaptor, RelocInfo::CODE_TARGET);
}
+static void EmitLoadTypeFeedbackVector(MacroAssembler* masm, Register vector) {
+ __ mov(vector, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
+ __ mov(vector, FieldOperand(vector, JSFunction::kSharedFunctionInfoOffset));
+ __ mov(vector, FieldOperand(vector,
+ SharedFunctionInfo::kFeedbackVectorOffset));
+}
+
+
+void CallIC_ArrayStub::Generate(MacroAssembler* masm) {
+ // edi - function
+ // edx - slot id
+ Label miss;
+ int argc = state_.arg_count();
+ ParameterCount actual(argc);
+
+ EmitLoadTypeFeedbackVector(masm, ebx);
+
+ __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, ecx);
+ __ cmp(edi, ecx);
+ __ j(not_equal, &miss);
+
+ __ mov(eax, arg_count());
+ __ mov(ebx, FieldOperand(ebx, edx, times_half_pointer_size,
+ FixedArray::kHeaderSize));
+ // Verify that ecx contains an AllocationSite
+ __ AssertUndefinedOrAllocationSite(ebx);
+ ArrayConstructorStub stub(masm->isolate(), arg_count());
+ __ TailCallStub(&stub);
+
+ __ bind(&miss);
+ GenerateMiss(masm, IC::kCallIC_Customization_Miss);
+
+ // The slow case, we need this no matter what to complete a call after a miss.
+ CallFunctionNoFeedback(masm,
+ arg_count(),
+ true,
+ CallAsMethod());
+
+ // Unreachable.
+ __ int3();
+}
+
+
+void CallICStub::Generate(MacroAssembler* masm) {
+ // edi - function
+ // edx - slot id
+ Isolate* isolate = masm->isolate();
+ Label extra_checks_or_miss, slow_start;
+ Label slow, non_function, wrap, cont;
+ Label have_js_function;
+ int argc = state_.arg_count();
+ ParameterCount actual(argc);
+
+ EmitLoadTypeFeedbackVector(masm, ebx);
+
+ // The checks. First, does edi match the recorded monomorphic target?
+ __ cmp(edi, FieldOperand(ebx, edx, times_half_pointer_size,
+ FixedArray::kHeaderSize));
+ __ j(not_equal, &extra_checks_or_miss);
+
+ __ bind(&have_js_function);
+ if (state_.CallAsMethod()) {
+ EmitContinueIfStrictOrNative(masm, &cont);
+
+ // Load the receiver from the stack.
+ __ mov(eax, Operand(esp, (argc + 1) * kPointerSize));
+
+ __ JumpIfSmi(eax, &wrap);
+
+ __ CmpObjectType(eax, FIRST_SPEC_OBJECT_TYPE, ecx);
+ __ j(below, &wrap);
+
+ __ bind(&cont);
+ }
+
+ __ InvokeFunction(edi, actual, JUMP_FUNCTION, NullCallWrapper());
+
+ __ bind(&slow);
+ EmitSlowCase(isolate, masm, argc, &non_function);
+
+ if (state_.CallAsMethod()) {
+ __ bind(&wrap);
+ EmitWrapCase(masm, argc, &cont);
+ }
+
+ __ bind(&extra_checks_or_miss);
+ Label miss;
+
+ __ mov(ecx, FieldOperand(ebx, edx, times_half_pointer_size,
+ FixedArray::kHeaderSize));
+ __ cmp(ecx, Immediate(TypeFeedbackInfo::MegamorphicSentinel(isolate)));
+ __ j(equal, &slow_start);
+ __ cmp(ecx, Immediate(TypeFeedbackInfo::UninitializedSentinel(isolate)));
+ __ j(equal, &miss);
+
+ if (!FLAG_trace_ic) {
+ // We are going megamorphic, and we don't want to visit the runtime.
+ __ mov(FieldOperand(ebx, edx, times_half_pointer_size,
+ FixedArray::kHeaderSize),
+ Immediate(TypeFeedbackInfo::MegamorphicSentinel(isolate)));
+ __ jmp(&slow_start);
+ }
+
+ // We are here because tracing is on or we are going monomorphic.
+ __ bind(&miss);
+ GenerateMiss(masm, IC::kCallIC_Miss);
+
+ // the slow case
+ __ bind(&slow_start);
+
+ // Check that the function really is a JavaScript function.
+ __ JumpIfSmi(edi, &non_function);
+
+ // Goto slow case if we do not have a function.
+ __ CmpObjectType(edi, JS_FUNCTION_TYPE, ecx);
+ __ j(not_equal, &slow);
+ __ jmp(&have_js_function);
+
+ // Unreachable
+ __ int3();
+}
+
+
+void CallICStub::GenerateMiss(MacroAssembler* masm, IC::UtilityId id) {
+ // Get the receiver of the function from the stack; 1 ~ return address.
+ __ mov(ecx, Operand(esp, (state_.arg_count() + 1) * kPointerSize));
+
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+
+ // Push the receiver and the function and feedback info.
+ __ push(ecx);
+ __ push(edi);
+ __ push(ebx);
+ __ push(edx);
+
+ // Call the entry.
+ ExternalReference miss = ExternalReference(IC_Utility(id),
+ masm->isolate());
+ __ CallExternalReference(miss, 4);
+
+ // Move result to edi and exit the internal frame.
+ __ mov(edi, eax);
+ }
+}
+
+
bool CEntryStub::NeedsImmovableCode() {
return false;
}
@@ -2959,55 +2621,42 @@ void CodeStub::GenerateStubsAheadOfTime(Isolate* isolate) {
// It is important that the store buffer overflow stubs are generated first.
ArrayConstructorStubBase::GenerateStubsAheadOfTime(isolate);
CreateAllocationSiteStub::GenerateAheadOfTime(isolate);
- if (Serializer::enabled()) {
- PlatformFeatureScope sse2(SSE2);
- BinaryOpICStub::GenerateAheadOfTime(isolate);
- } else {
- BinaryOpICStub::GenerateAheadOfTime(isolate);
- }
+ BinaryOpICStub::GenerateAheadOfTime(isolate);
+ BinaryOpICWithAllocationSiteStub::GenerateAheadOfTime(isolate);
}
void CodeStub::GenerateFPStubs(Isolate* isolate) {
- if (CpuFeatures::IsSupported(SSE2)) {
- CEntryStub save_doubles(1, kSaveFPRegs);
- // Stubs might already be in the snapshot, detect that and don't regenerate,
- // which would lead to code stub initialization state being messed up.
- Code* save_doubles_code;
- if (!save_doubles.FindCodeInCache(&save_doubles_code, isolate)) {
- save_doubles_code = *(save_doubles.GetCode(isolate));
- }
- isolate->set_fp_stubs_generated(true);
+ CEntryStub save_doubles(isolate, 1, kSaveFPRegs);
+ // Stubs might already be in the snapshot, detect that and don't regenerate,
+ // which would lead to code stub initialization state being messed up.
+ Code* save_doubles_code;
+ if (!save_doubles.FindCodeInCache(&save_doubles_code)) {
+ save_doubles_code = *(save_doubles.GetCode());
}
+ isolate->set_fp_stubs_generated(true);
}
void CEntryStub::GenerateAheadOfTime(Isolate* isolate) {
- CEntryStub stub(1, kDontSaveFPRegs);
- stub.GetCode(isolate);
+ CEntryStub stub(isolate, 1, kDontSaveFPRegs);
+ stub.GetCode();
}
-static void JumpIfOOM(MacroAssembler* masm,
- Register value,
- Register scratch,
- Label* oom_label) {
- __ mov(scratch, value);
- STATIC_ASSERT(Failure::OUT_OF_MEMORY_EXCEPTION == 3);
- STATIC_ASSERT(kFailureTag == 3);
- __ and_(scratch, 0xf);
- __ cmp(scratch, 0xf);
- __ j(equal, oom_label);
-}
+void CEntryStub::Generate(MacroAssembler* masm) {
+ // eax: number of arguments including receiver
+ // ebx: pointer to C function (C callee-saved)
+ // ebp: frame pointer (restored after C call)
+ // esp: stack pointer (restored after C call)
+ // esi: current context (C callee-saved)
+ // edi: JS function of the caller (C callee-saved)
+ ProfileEntryHookStub::MaybeCallEntryHook(masm);
+
+ // Enter the exit frame that transitions from JavaScript to C++.
+ __ EnterExitFrame(save_doubles_ == kSaveFPRegs);
-void CEntryStub::GenerateCore(MacroAssembler* masm,
- Label* throw_normal_exception,
- Label* throw_termination_exception,
- Label* throw_out_of_memory_exception,
- bool do_gc,
- bool always_allocate_scope) {
- // eax: result parameter for PerformGC, if any
// ebx: pointer to C function (C callee-saved)
// ebp: frame pointer (restored after C call)
// esp: stack pointer (restored after C call)
@@ -3021,62 +2670,37 @@ void CEntryStub::GenerateCore(MacroAssembler* masm,
__ CheckStackAlignment();
}
- if (do_gc) {
- // Pass failure code returned from last attempt as first argument to
- // PerformGC. No need to use PrepareCallCFunction/CallCFunction here as the
- // stack alignment is known to be correct. This function takes one argument
- // which is passed on the stack, and we know that the stack has been
- // prepared to pass at least one argument.
- __ mov(Operand(esp, 1 * kPointerSize),
- Immediate(ExternalReference::isolate_address(masm->isolate())));
- __ mov(Operand(esp, 0 * kPointerSize), eax); // Result.
- __ call(FUNCTION_ADDR(Runtime::PerformGC), RelocInfo::RUNTIME_ENTRY);
- }
-
- ExternalReference scope_depth =
- ExternalReference::heap_always_allocate_scope_depth(masm->isolate());
- if (always_allocate_scope) {
- __ inc(Operand::StaticVariable(scope_depth));
- }
-
// Call C function.
__ mov(Operand(esp, 0 * kPointerSize), edi); // argc.
__ mov(Operand(esp, 1 * kPointerSize), esi); // argv.
__ mov(Operand(esp, 2 * kPointerSize),
- Immediate(ExternalReference::isolate_address(masm->isolate())));
+ Immediate(ExternalReference::isolate_address(isolate())));
__ call(ebx);
// Result is in eax or edx:eax - do not destroy these registers!
- if (always_allocate_scope) {
- __ dec(Operand::StaticVariable(scope_depth));
- }
-
// Runtime functions should not return 'the hole'. Allowing it to escape may
// lead to crashes in the IC code later.
if (FLAG_debug_code) {
Label okay;
- __ cmp(eax, masm->isolate()->factory()->the_hole_value());
+ __ cmp(eax, isolate()->factory()->the_hole_value());
__ j(not_equal, &okay, Label::kNear);
__ int3();
__ bind(&okay);
}
- // Check for failure result.
- Label failure_returned;
- STATIC_ASSERT(((kFailureTag + 1) & kFailureTagMask) == 0);
- __ lea(ecx, Operand(eax, 1));
- // Lower 2 bits of ecx are 0 iff eax has failure tag.
- __ test(ecx, Immediate(kFailureTagMask));
- __ j(zero, &failure_returned);
+ // Check result for exception sentinel.
+ Label exception_returned;
+ __ cmp(eax, isolate()->factory()->exception());
+ __ j(equal, &exception_returned);
ExternalReference pending_exception_address(
- Isolate::kPendingExceptionAddress, masm->isolate());
+ Isolate::kPendingExceptionAddress, isolate());
// Check that there is no pending exception, otherwise we
- // should have returned some failure value.
+ // should have returned the exception sentinel.
if (FLAG_debug_code) {
__ push(edx);
- __ mov(edx, Immediate(masm->isolate()->factory()->the_hole_value()));
+ __ mov(edx, Immediate(isolate()->factory()->the_hole_value()));
Label okay;
__ cmp(edx, Operand::StaticVariable(pending_exception_address));
// Cannot use check here as it attempts to generate call into runtime.
@@ -3090,117 +2714,27 @@ void CEntryStub::GenerateCore(MacroAssembler* masm,
__ LeaveExitFrame(save_doubles_ == kSaveFPRegs);
__ ret(0);
- // Handling of failure.
- __ bind(&failure_returned);
-
- Label retry;
- // If the returned exception is RETRY_AFTER_GC continue at retry label
- STATIC_ASSERT(Failure::RETRY_AFTER_GC == 0);
- __ test(eax, Immediate(((1 << kFailureTypeTagSize) - 1) << kFailureTagSize));
- __ j(zero, &retry, Label::kNear);
-
- // Special handling of out of memory exceptions.
- JumpIfOOM(masm, eax, ecx, throw_out_of_memory_exception);
+ // Handling of exception.
+ __ bind(&exception_returned);
// Retrieve the pending exception.
__ mov(eax, Operand::StaticVariable(pending_exception_address));
- // See if we just retrieved an OOM exception.
- JumpIfOOM(masm, eax, ecx, throw_out_of_memory_exception);
-
// Clear the pending exception.
- __ mov(edx, Immediate(masm->isolate()->factory()->the_hole_value()));
+ __ mov(edx, Immediate(isolate()->factory()->the_hole_value()));
__ mov(Operand::StaticVariable(pending_exception_address), edx);
// Special handling of termination exceptions which are uncatchable
// by javascript code.
- __ cmp(eax, masm->isolate()->factory()->termination_exception());
- __ j(equal, throw_termination_exception);
-
- // Handle normal exception.
- __ jmp(throw_normal_exception);
-
- // Retry.
- __ bind(&retry);
-}
-
-
-void CEntryStub::Generate(MacroAssembler* masm) {
- // eax: number of arguments including receiver
- // ebx: pointer to C function (C callee-saved)
- // ebp: frame pointer (restored after C call)
- // esp: stack pointer (restored after C call)
- // esi: current context (C callee-saved)
- // edi: JS function of the caller (C callee-saved)
-
- ProfileEntryHookStub::MaybeCallEntryHook(masm);
-
- // NOTE: Invocations of builtins may return failure objects instead
- // of a proper result. The builtin entry handles this by performing
- // a garbage collection and retrying the builtin (twice).
-
- // Enter the exit frame that transitions from JavaScript to C++.
- __ EnterExitFrame(save_doubles_ == kSaveFPRegs);
-
- // eax: result parameter for PerformGC, if any (setup below)
- // ebx: pointer to builtin function (C callee-saved)
- // ebp: frame pointer (restored after C call)
- // esp: stack pointer (restored after C call)
- // edi: number of arguments including receiver (C callee-saved)
- // esi: argv pointer (C callee-saved)
-
- Label throw_normal_exception;
Label throw_termination_exception;
- Label throw_out_of_memory_exception;
-
- // Call into the runtime system.
- GenerateCore(masm,
- &throw_normal_exception,
- &throw_termination_exception,
- &throw_out_of_memory_exception,
- false,
- false);
-
- // Do space-specific GC and retry runtime call.
- GenerateCore(masm,
- &throw_normal_exception,
- &throw_termination_exception,
- &throw_out_of_memory_exception,
- true,
- false);
-
- // Do full GC and retry runtime call one final time.
- Failure* failure = Failure::InternalError();
- __ mov(eax, Immediate(reinterpret_cast<int32_t>(failure)));
- GenerateCore(masm,
- &throw_normal_exception,
- &throw_termination_exception,
- &throw_out_of_memory_exception,
- true,
- true);
-
- __ bind(&throw_out_of_memory_exception);
- // Set external caught exception to false.
- Isolate* isolate = masm->isolate();
- ExternalReference external_caught(Isolate::kExternalCaughtExceptionAddress,
- isolate);
- __ mov(Operand::StaticVariable(external_caught), Immediate(false));
+ __ cmp(eax, isolate()->factory()->termination_exception());
+ __ j(equal, &throw_termination_exception);
- // Set pending exception and eax to out of memory exception.
- ExternalReference pending_exception(Isolate::kPendingExceptionAddress,
- isolate);
- Label already_have_failure;
- JumpIfOOM(masm, eax, ecx, &already_have_failure);
- __ mov(eax, reinterpret_cast<int32_t>(Failure::OutOfMemoryException(0x1)));
- __ bind(&already_have_failure);
- __ mov(Operand::StaticVariable(pending_exception), eax);
- // Fall through to the next label.
+ // Handle normal exception.
+ __ Throw(eax);
__ bind(&throw_termination_exception);
__ ThrowUncatchable(eax);
-
- __ bind(&throw_normal_exception);
- __ Throw(eax);
}
@@ -3224,12 +2758,11 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
__ push(ebx);
// Save copies of the top frame descriptor on the stack.
- ExternalReference c_entry_fp(Isolate::kCEntryFPAddress, masm->isolate());
+ ExternalReference c_entry_fp(Isolate::kCEntryFPAddress, isolate());
__ push(Operand::StaticVariable(c_entry_fp));
// If this is the outermost JS call, set js_entry_sp value.
- ExternalReference js_entry_sp(Isolate::kJSEntrySPAddress,
- masm->isolate());
+ ExternalReference js_entry_sp(Isolate::kJSEntrySPAddress, isolate());
__ cmp(Operand::StaticVariable(js_entry_sp), Immediate(0));
__ j(not_equal, &not_outermost_js, Label::kNear);
__ mov(Operand::StaticVariable(js_entry_sp), ebp);
@@ -3246,9 +2779,9 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
// Caught exception: Store result (exception) in the pending exception
// field in the JSEnv and return a failure sentinel.
ExternalReference pending_exception(Isolate::kPendingExceptionAddress,
- masm->isolate());
+ isolate());
__ mov(Operand::StaticVariable(pending_exception), eax);
- __ mov(eax, reinterpret_cast<int32_t>(Failure::Exception()));
+ __ mov(eax, Immediate(isolate()->factory()->exception()));
__ jmp(&exit);
// Invoke: Link this frame into the handler chain. There's only one
@@ -3257,7 +2790,7 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
__ PushTryHandler(StackHandler::JS_ENTRY, 0);
// Clear any pending exceptions.
- __ mov(edx, Immediate(masm->isolate()->factory()->the_hole_value()));
+ __ mov(edx, Immediate(isolate()->factory()->the_hole_value()));
__ mov(Operand::StaticVariable(pending_exception), edx);
// Fake a receiver (NULL).
@@ -3269,11 +2802,10 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
// builtin stubs may not have been generated yet.
if (is_construct) {
ExternalReference construct_entry(Builtins::kJSConstructEntryTrampoline,
- masm->isolate());
+ isolate());
__ mov(edx, Immediate(construct_entry));
} else {
- ExternalReference entry(Builtins::kJSEntryTrampoline,
- masm->isolate());
+ ExternalReference entry(Builtins::kJSEntryTrampoline, isolate());
__ mov(edx, Immediate(entry));
}
__ mov(edx, Operand(edx, 0)); // deref address
@@ -3293,8 +2825,7 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
// Restore the top frame descriptor from the stack.
__ pop(Operand::StaticVariable(ExternalReference(
- Isolate::kCEntryFPAddress,
- masm->isolate())));
+ Isolate::kCEntryFPAddress, isolate())));
// Restore callee-saved registers (C calling conventions).
__ pop(ebx);
@@ -3407,7 +2938,7 @@ void InstanceofStub::Generate(MacroAssembler* masm) {
__ bind(&loop);
__ cmp(scratch, prototype);
__ j(equal, &is_instance, Label::kNear);
- Factory* factory = masm->isolate()->factory();
+ Factory* factory = isolate()->factory();
__ cmp(scratch, Immediate(factory->null_value()));
__ j(equal, &is_not_instance, Label::kNear);
__ mov(scratch, FieldOperand(scratch, HeapObject::kMapOffset));
@@ -3429,7 +2960,7 @@ void InstanceofStub::Generate(MacroAssembler* masm) {
}
__ mov(Operand(scratch, kDeltaToMovImmediate), eax);
if (!ReturnTrueFalseObject()) {
- __ Set(eax, Immediate(0));
+ __ Move(eax, Immediate(0));
}
}
__ ret((HasArgsInRegisters() ? 0 : 2) * kPointerSize);
@@ -3449,7 +2980,7 @@ void InstanceofStub::Generate(MacroAssembler* masm) {
}
__ mov(Operand(scratch, kDeltaToMovImmediate), eax);
if (!ReturnTrueFalseObject()) {
- __ Set(eax, Immediate(Smi::FromInt(1)));
+ __ Move(eax, Immediate(Smi::FromInt(1)));
}
}
__ ret((HasArgsInRegisters() ? 0 : 2) * kPointerSize);
@@ -3465,20 +2996,20 @@ void InstanceofStub::Generate(MacroAssembler* masm) {
// Null is not instance of anything.
__ cmp(object, factory->null_value());
__ j(not_equal, &object_not_null, Label::kNear);
- __ Set(eax, Immediate(Smi::FromInt(1)));
+ __ Move(eax, Immediate(Smi::FromInt(1)));
__ ret((HasArgsInRegisters() ? 0 : 2) * kPointerSize);
__ bind(&object_not_null);
// Smi values is not instance of anything.
__ JumpIfNotSmi(object, &object_not_null_or_smi, Label::kNear);
- __ Set(eax, Immediate(Smi::FromInt(1)));
+ __ Move(eax, Immediate(Smi::FromInt(1)));
__ ret((HasArgsInRegisters() ? 0 : 2) * kPointerSize);
__ bind(&object_not_null_or_smi);
// String values is not instance of anything.
Condition is_string = masm->IsObjectStringType(object, scratch, scratch);
__ j(NegateCondition(is_string), &slow, Label::kNear);
- __ Set(eax, Immediate(Smi::FromInt(1)));
+ __ Move(eax, Immediate(Smi::FromInt(1)));
__ ret((HasArgsInRegisters() ? 0 : 2) * kPointerSize);
// Slow-case: Go through the JavaScript implementation.
@@ -3575,7 +3106,7 @@ void StringCharCodeAtGenerator::GenerateSlow(
} else {
ASSERT(index_flags_ == STRING_INDEX_IS_ARRAY_INDEX);
// NumberToSmi discards numbers that are not exact integers.
- __ CallRuntime(Runtime::kNumberToSmi, 1);
+ __ CallRuntime(Runtime::kHiddenNumberToSmi, 1);
}
if (!index_.is(eax)) {
// Save the conversion result before the pop instructions below
@@ -3601,7 +3132,7 @@ void StringCharCodeAtGenerator::GenerateSlow(
__ push(object_);
__ SmiTag(index_);
__ push(index_);
- __ CallRuntime(Runtime::kStringCharCodeAt, 2);
+ __ CallRuntime(Runtime::kHiddenStringCharCodeAt, 2);
if (!result_.is(eax)) {
__ mov(result_, eax);
}
@@ -3626,7 +3157,7 @@ void StringCharFromCodeGenerator::GenerateFast(MacroAssembler* masm) {
__ j(not_zero, &slow_case_);
Factory* factory = masm->isolate()->factory();
- __ Set(result_, Immediate(factory->single_character_string_cache()));
+ __ Move(result_, Immediate(factory->single_character_string_cache()));
STATIC_ASSERT(kSmiTag == 0);
STATIC_ASSERT(kSmiTagSize == 1);
STATIC_ASSERT(kSmiShiftSize == 0);
@@ -3659,408 +3190,12 @@ void StringCharFromCodeGenerator::GenerateSlow(
}
-void StringAddStub::Generate(MacroAssembler* masm) {
- Label call_runtime, call_builtin;
- Builtins::JavaScript builtin_id = Builtins::ADD;
-
- // Load the two arguments.
- __ mov(eax, Operand(esp, 2 * kPointerSize)); // First argument.
- __ mov(edx, Operand(esp, 1 * kPointerSize)); // Second argument.
-
- // Make sure that both arguments are strings if not known in advance.
- // Otherwise, at least one of the arguments is definitely a string,
- // and we convert the one that is not known to be a string.
- if ((flags_ & STRING_ADD_CHECK_BOTH) == STRING_ADD_CHECK_BOTH) {
- ASSERT((flags_ & STRING_ADD_CHECK_LEFT) == STRING_ADD_CHECK_LEFT);
- ASSERT((flags_ & STRING_ADD_CHECK_RIGHT) == STRING_ADD_CHECK_RIGHT);
- __ JumpIfSmi(eax, &call_runtime);
- __ CmpObjectType(eax, FIRST_NONSTRING_TYPE, ebx);
- __ j(above_equal, &call_runtime);
-
- // First argument is a a string, test second.
- __ JumpIfSmi(edx, &call_runtime);
- __ CmpObjectType(edx, FIRST_NONSTRING_TYPE, ebx);
- __ j(above_equal, &call_runtime);
- } else if ((flags_ & STRING_ADD_CHECK_LEFT) == STRING_ADD_CHECK_LEFT) {
- ASSERT((flags_ & STRING_ADD_CHECK_RIGHT) == 0);
- GenerateConvertArgument(masm, 2 * kPointerSize, eax, ebx, ecx, edi,
- &call_builtin);
- builtin_id = Builtins::STRING_ADD_RIGHT;
- } else if ((flags_ & STRING_ADD_CHECK_RIGHT) == STRING_ADD_CHECK_RIGHT) {
- ASSERT((flags_ & STRING_ADD_CHECK_LEFT) == 0);
- GenerateConvertArgument(masm, 1 * kPointerSize, edx, ebx, ecx, edi,
- &call_builtin);
- builtin_id = Builtins::STRING_ADD_LEFT;
- }
-
- // Both arguments are strings.
- // eax: first string
- // edx: second string
- // Check if either of the strings are empty. In that case return the other.
- Label second_not_zero_length, both_not_zero_length;
- __ mov(ecx, FieldOperand(edx, String::kLengthOffset));
- STATIC_ASSERT(kSmiTag == 0);
- __ test(ecx, ecx);
- __ j(not_zero, &second_not_zero_length, Label::kNear);
- // Second string is empty, result is first string which is already in eax.
- Counters* counters = masm->isolate()->counters();
- __ IncrementCounter(counters->string_add_native(), 1);
- __ ret(2 * kPointerSize);
- __ bind(&second_not_zero_length);
- __ mov(ebx, FieldOperand(eax, String::kLengthOffset));
- STATIC_ASSERT(kSmiTag == 0);
- __ test(ebx, ebx);
- __ j(not_zero, &both_not_zero_length, Label::kNear);
- // First string is empty, result is second string which is in edx.
- __ mov(eax, edx);
- __ IncrementCounter(counters->string_add_native(), 1);
- __ ret(2 * kPointerSize);
-
- // Both strings are non-empty.
- // eax: first string
- // ebx: length of first string as a smi
- // ecx: length of second string as a smi
- // edx: second string
- // Look at the length of the result of adding the two strings.
- Label string_add_flat_result, longer_than_two;
- __ bind(&both_not_zero_length);
- __ add(ebx, ecx);
- STATIC_ASSERT(Smi::kMaxValue == String::kMaxLength);
- // Handle exceptionally long strings in the runtime system.
- __ j(overflow, &call_runtime);
- // Use the string table when adding two one character strings, as it
- // helps later optimizations to return an internalized string here.
- __ cmp(ebx, Immediate(Smi::FromInt(2)));
- __ j(not_equal, &longer_than_two);
-
- // Check that both strings are non-external ASCII strings.
- __ JumpIfNotBothSequentialAsciiStrings(eax, edx, ebx, ecx, &call_runtime);
-
- // Get the two characters forming the new string.
- __ movzx_b(ebx, FieldOperand(eax, SeqOneByteString::kHeaderSize));
- __ movzx_b(ecx, FieldOperand(edx, SeqOneByteString::kHeaderSize));
-
- // Try to lookup two character string in string table. If it is not found
- // just allocate a new one.
- Label make_two_character_string, make_two_character_string_no_reload;
- StringHelper::GenerateTwoCharacterStringTableProbe(
- masm, ebx, ecx, eax, edx, edi,
- &make_two_character_string_no_reload, &make_two_character_string);
- __ IncrementCounter(counters->string_add_native(), 1);
- __ ret(2 * kPointerSize);
-
- // Allocate a two character string.
- __ bind(&make_two_character_string);
- // Reload the arguments.
- __ mov(eax, Operand(esp, 2 * kPointerSize)); // First argument.
- __ mov(edx, Operand(esp, 1 * kPointerSize)); // Second argument.
- // Get the two characters forming the new string.
- __ movzx_b(ebx, FieldOperand(eax, SeqOneByteString::kHeaderSize));
- __ movzx_b(ecx, FieldOperand(edx, SeqOneByteString::kHeaderSize));
- __ bind(&make_two_character_string_no_reload);
- __ IncrementCounter(counters->string_add_make_two_char(), 1);
- __ AllocateAsciiString(eax, 2, edi, edx, &call_runtime);
- // Pack both characters in ebx.
- __ shl(ecx, kBitsPerByte);
- __ or_(ebx, ecx);
- // Set the characters in the new string.
- __ mov_w(FieldOperand(eax, SeqOneByteString::kHeaderSize), ebx);
- __ IncrementCounter(counters->string_add_native(), 1);
- __ ret(2 * kPointerSize);
-
- __ bind(&longer_than_two);
- // Check if resulting string will be flat.
- __ cmp(ebx, Immediate(Smi::FromInt(ConsString::kMinLength)));
- __ j(below, &string_add_flat_result);
-
- // If result is not supposed to be flat allocate a cons string object. If both
- // strings are ASCII the result is an ASCII cons string.
- Label non_ascii, allocated, ascii_data;
- __ mov(edi, FieldOperand(eax, HeapObject::kMapOffset));
- __ movzx_b(ecx, FieldOperand(edi, Map::kInstanceTypeOffset));
- __ mov(edi, FieldOperand(edx, HeapObject::kMapOffset));
- __ movzx_b(edi, FieldOperand(edi, Map::kInstanceTypeOffset));
- __ and_(ecx, edi);
- STATIC_ASSERT((kStringEncodingMask & kOneByteStringTag) != 0);
- STATIC_ASSERT((kStringEncodingMask & kTwoByteStringTag) == 0);
- __ test(ecx, Immediate(kStringEncodingMask));
- __ j(zero, &non_ascii);
- __ bind(&ascii_data);
- // Allocate an ASCII cons string.
- __ AllocateAsciiConsString(ecx, edi, no_reg, &call_runtime);
- __ bind(&allocated);
- // Fill the fields of the cons string.
- __ AssertSmi(ebx);
- __ mov(FieldOperand(ecx, ConsString::kLengthOffset), ebx);
- __ mov(FieldOperand(ecx, ConsString::kHashFieldOffset),
- Immediate(String::kEmptyHashField));
-
- Label skip_write_barrier, after_writing;
- ExternalReference high_promotion_mode = ExternalReference::
- new_space_high_promotion_mode_active_address(masm->isolate());
- __ test(Operand::StaticVariable(high_promotion_mode), Immediate(1));
- __ j(zero, &skip_write_barrier);
-
- __ mov(FieldOperand(ecx, ConsString::kFirstOffset), eax);
- __ RecordWriteField(ecx,
- ConsString::kFirstOffset,
- eax,
- ebx,
- kDontSaveFPRegs);
- __ mov(FieldOperand(ecx, ConsString::kSecondOffset), edx);
- __ RecordWriteField(ecx,
- ConsString::kSecondOffset,
- edx,
- ebx,
- kDontSaveFPRegs);
- __ jmp(&after_writing);
-
- __ bind(&skip_write_barrier);
- __ mov(FieldOperand(ecx, ConsString::kFirstOffset), eax);
- __ mov(FieldOperand(ecx, ConsString::kSecondOffset), edx);
-
- __ bind(&after_writing);
-
- __ mov(eax, ecx);
- __ IncrementCounter(counters->string_add_native(), 1);
- __ ret(2 * kPointerSize);
- __ bind(&non_ascii);
- // At least one of the strings is two-byte. Check whether it happens
- // to contain only one byte characters.
- // ecx: first instance type AND second instance type.
- // edi: second instance type.
- __ test(ecx, Immediate(kOneByteDataHintMask));
- __ j(not_zero, &ascii_data);
- __ mov(ecx, FieldOperand(eax, HeapObject::kMapOffset));
- __ movzx_b(ecx, FieldOperand(ecx, Map::kInstanceTypeOffset));
- __ xor_(edi, ecx);
- STATIC_ASSERT(kOneByteStringTag != 0 && kOneByteDataHintTag != 0);
- __ and_(edi, kOneByteStringTag | kOneByteDataHintTag);
- __ cmp(edi, kOneByteStringTag | kOneByteDataHintTag);
- __ j(equal, &ascii_data);
- // Allocate a two byte cons string.
- __ AllocateTwoByteConsString(ecx, edi, no_reg, &call_runtime);
- __ jmp(&allocated);
-
- // We cannot encounter sliced strings or cons strings here since:
- STATIC_ASSERT(SlicedString::kMinLength >= ConsString::kMinLength);
- // Handle creating a flat result from either external or sequential strings.
- // Locate the first characters' locations.
- // eax: first string
- // ebx: length of resulting flat string as a smi
- // edx: second string
- Label first_prepared, second_prepared;
- Label first_is_sequential, second_is_sequential;
- __ bind(&string_add_flat_result);
- __ mov(ecx, FieldOperand(eax, HeapObject::kMapOffset));
- __ movzx_b(ecx, FieldOperand(ecx, Map::kInstanceTypeOffset));
- // ecx: instance type of first string
- STATIC_ASSERT(kSeqStringTag == 0);
- __ test_b(ecx, kStringRepresentationMask);
- __ j(zero, &first_is_sequential, Label::kNear);
- // Rule out short external string and load string resource.
- STATIC_ASSERT(kShortExternalStringTag != 0);
- __ test_b(ecx, kShortExternalStringMask);
- __ j(not_zero, &call_runtime);
- __ mov(eax, FieldOperand(eax, ExternalString::kResourceDataOffset));
- STATIC_ASSERT(SeqOneByteString::kHeaderSize == SeqTwoByteString::kHeaderSize);
- __ jmp(&first_prepared, Label::kNear);
- __ bind(&first_is_sequential);
- __ add(eax, Immediate(SeqOneByteString::kHeaderSize - kHeapObjectTag));
- __ bind(&first_prepared);
-
- __ mov(edi, FieldOperand(edx, HeapObject::kMapOffset));
- __ movzx_b(edi, FieldOperand(edi, Map::kInstanceTypeOffset));
- // Check whether both strings have same encoding.
- // edi: instance type of second string
- __ xor_(ecx, edi);
- __ test_b(ecx, kStringEncodingMask);
- __ j(not_zero, &call_runtime);
- STATIC_ASSERT(kSeqStringTag == 0);
- __ test_b(edi, kStringRepresentationMask);
- __ j(zero, &second_is_sequential, Label::kNear);
- // Rule out short external string and load string resource.
- STATIC_ASSERT(kShortExternalStringTag != 0);
- __ test_b(edi, kShortExternalStringMask);
- __ j(not_zero, &call_runtime);
- __ mov(edx, FieldOperand(edx, ExternalString::kResourceDataOffset));
- STATIC_ASSERT(SeqOneByteString::kHeaderSize == SeqTwoByteString::kHeaderSize);
- __ jmp(&second_prepared, Label::kNear);
- __ bind(&second_is_sequential);
- __ add(edx, Immediate(SeqOneByteString::kHeaderSize - kHeapObjectTag));
- __ bind(&second_prepared);
-
- // Push the addresses of both strings' first characters onto the stack.
- __ push(edx);
- __ push(eax);
-
- Label non_ascii_string_add_flat_result, call_runtime_drop_two;
- // edi: instance type of second string
- // First string and second string have the same encoding.
- STATIC_ASSERT(kTwoByteStringTag == 0);
- __ test_b(edi, kStringEncodingMask);
- __ j(zero, &non_ascii_string_add_flat_result);
-
- // Both strings are ASCII strings.
- // ebx: length of resulting flat string as a smi
- __ SmiUntag(ebx);
- __ AllocateAsciiString(eax, ebx, ecx, edx, edi, &call_runtime_drop_two);
- // eax: result string
- __ mov(ecx, eax);
- // Locate first character of result.
- __ add(ecx, Immediate(SeqOneByteString::kHeaderSize - kHeapObjectTag));
- // Load first argument's length and first character location. Account for
- // values currently on the stack when fetching arguments from it.
- __ mov(edx, Operand(esp, 4 * kPointerSize));
- __ mov(edi, FieldOperand(edx, String::kLengthOffset));
- __ SmiUntag(edi);
- __ pop(edx);
- // eax: result string
- // ecx: first character of result
- // edx: first char of first argument
- // edi: length of first argument
- StringHelper::GenerateCopyCharacters(masm, ecx, edx, edi, ebx, true);
- // Load second argument's length and first character location. Account for
- // values currently on the stack when fetching arguments from it.
- __ mov(edx, Operand(esp, 2 * kPointerSize));
- __ mov(edi, FieldOperand(edx, String::kLengthOffset));
- __ SmiUntag(edi);
- __ pop(edx);
- // eax: result string
- // ecx: next character of result
- // edx: first char of second argument
- // edi: length of second argument
- StringHelper::GenerateCopyCharacters(masm, ecx, edx, edi, ebx, true);
- __ IncrementCounter(counters->string_add_native(), 1);
- __ ret(2 * kPointerSize);
-
- // Handle creating a flat two byte result.
- // eax: first string - known to be two byte
- // ebx: length of resulting flat string as a smi
- // edx: second string
- __ bind(&non_ascii_string_add_flat_result);
- // Both strings are two byte strings.
- __ SmiUntag(ebx);
- __ AllocateTwoByteString(eax, ebx, ecx, edx, edi, &call_runtime_drop_two);
- // eax: result string
- __ mov(ecx, eax);
- // Locate first character of result.
- __ add(ecx, Immediate(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
- // Load second argument's length and first character location. Account for
- // values currently on the stack when fetching arguments from it.
- __ mov(edx, Operand(esp, 4 * kPointerSize));
- __ mov(edi, FieldOperand(edx, String::kLengthOffset));
- __ SmiUntag(edi);
- __ pop(edx);
- // eax: result string
- // ecx: first character of result
- // edx: first char of first argument
- // edi: length of first argument
- StringHelper::GenerateCopyCharacters(masm, ecx, edx, edi, ebx, false);
- // Load second argument's length and first character location. Account for
- // values currently on the stack when fetching arguments from it.
- __ mov(edx, Operand(esp, 2 * kPointerSize));
- __ mov(edi, FieldOperand(edx, String::kLengthOffset));
- __ SmiUntag(edi);
- __ pop(edx);
- // eax: result string
- // ecx: next character of result
- // edx: first char of second argument
- // edi: length of second argument
- StringHelper::GenerateCopyCharacters(masm, ecx, edx, edi, ebx, false);
- __ IncrementCounter(counters->string_add_native(), 1);
- __ ret(2 * kPointerSize);
-
- // Recover stack pointer before jumping to runtime.
- __ bind(&call_runtime_drop_two);
- __ Drop(2);
- // Just jump to runtime to add the two strings.
- __ bind(&call_runtime);
- __ TailCallRuntime(Runtime::kStringAdd, 2, 1);
-
- if (call_builtin.is_linked()) {
- __ bind(&call_builtin);
- __ InvokeBuiltin(builtin_id, JUMP_FUNCTION);
- }
-}
-
-
-void StringAddStub::GenerateRegisterArgsPush(MacroAssembler* masm) {
- __ push(eax);
- __ push(edx);
-}
-
-
-void StringAddStub::GenerateRegisterArgsPop(MacroAssembler* masm,
- Register temp) {
- __ pop(temp);
- __ pop(edx);
- __ pop(eax);
- __ push(temp);
-}
-
-
-void StringAddStub::GenerateConvertArgument(MacroAssembler* masm,
- int stack_offset,
- Register arg,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Label* slow) {
- // First check if the argument is already a string.
- Label not_string, done;
- __ JumpIfSmi(arg, &not_string);
- __ CmpObjectType(arg, FIRST_NONSTRING_TYPE, scratch1);
- __ j(below, &done);
-
- // Check the number to string cache.
- __ bind(&not_string);
- // Puts the cached result into scratch1.
- __ LookupNumberStringCache(arg, scratch1, scratch2, scratch3, slow);
- __ mov(arg, scratch1);
- __ mov(Operand(esp, stack_offset), arg);
- __ bind(&done);
-}
-
-
void StringHelper::GenerateCopyCharacters(MacroAssembler* masm,
Register dest,
Register src,
Register count,
Register scratch,
- bool ascii) {
- Label loop;
- __ bind(&loop);
- // This loop just copies one character at a time, as it is only used for very
- // short strings.
- if (ascii) {
- __ mov_b(scratch, Operand(src, 0));
- __ mov_b(Operand(dest, 0), scratch);
- __ add(src, Immediate(1));
- __ add(dest, Immediate(1));
- } else {
- __ mov_w(scratch, Operand(src, 0));
- __ mov_w(Operand(dest, 0), scratch);
- __ add(src, Immediate(2));
- __ add(dest, Immediate(2));
- }
- __ sub(count, Immediate(1));
- __ j(not_zero, &loop);
-}
-
-
-void StringHelper::GenerateCopyCharactersREP(MacroAssembler* masm,
- Register dest,
- Register src,
- Register count,
- Register scratch,
- bool ascii) {
- // Copy characters using rep movs of doublewords.
- // The destination is aligned on a 4 byte boundary because we are
- // copying to the beginning of a newly allocated string.
- ASSERT(dest.is(edi)); // rep movs destination
- ASSERT(src.is(esi)); // rep movs source
- ASSERT(count.is(ecx)); // rep movs count
+ String::Encoding encoding) {
ASSERT(!scratch.is(dest));
ASSERT(!scratch.is(src));
ASSERT(!scratch.is(count));
@@ -4071,172 +3206,29 @@ void StringHelper::GenerateCopyCharactersREP(MacroAssembler* masm,
__ j(zero, &done);
// Make count the number of bytes to copy.
- if (!ascii) {
+ if (encoding == String::TWO_BYTE_ENCODING) {
__ shl(count, 1);
}
- // Don't enter the rep movs if there are less than 4 bytes to copy.
- Label last_bytes;
- __ test(count, Immediate(~3));
- __ j(zero, &last_bytes, Label::kNear);
-
- // Copy from edi to esi using rep movs instruction.
- __ mov(scratch, count);
- __ sar(count, 2); // Number of doublewords to copy.
- __ cld();
- __ rep_movs();
-
- // Find number of bytes left.
- __ mov(count, scratch);
- __ and_(count, 3);
-
- // Check if there are more bytes to copy.
- __ bind(&last_bytes);
- __ test(count, count);
- __ j(zero, &done);
-
- // Copy remaining characters.
Label loop;
__ bind(&loop);
__ mov_b(scratch, Operand(src, 0));
__ mov_b(Operand(dest, 0), scratch);
- __ add(src, Immediate(1));
- __ add(dest, Immediate(1));
- __ sub(count, Immediate(1));
+ __ inc(src);
+ __ inc(dest);
+ __ dec(count);
__ j(not_zero, &loop);
__ bind(&done);
}
-void StringHelper::GenerateTwoCharacterStringTableProbe(MacroAssembler* masm,
- Register c1,
- Register c2,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Label* not_probed,
- Label* not_found) {
- // Register scratch3 is the general scratch register in this function.
- Register scratch = scratch3;
-
- // Make sure that both characters are not digits as such strings has a
- // different hash algorithm. Don't try to look for these in the string table.
- Label not_array_index;
- __ mov(scratch, c1);
- __ sub(scratch, Immediate(static_cast<int>('0')));
- __ cmp(scratch, Immediate(static_cast<int>('9' - '0')));
- __ j(above, &not_array_index, Label::kNear);
- __ mov(scratch, c2);
- __ sub(scratch, Immediate(static_cast<int>('0')));
- __ cmp(scratch, Immediate(static_cast<int>('9' - '0')));
- __ j(below_equal, not_probed);
-
- __ bind(&not_array_index);
- // Calculate the two character string hash.
- Register hash = scratch1;
- GenerateHashInit(masm, hash, c1, scratch);
- GenerateHashAddCharacter(masm, hash, c2, scratch);
- GenerateHashGetHash(masm, hash, scratch);
-
- // Collect the two characters in a register.
- Register chars = c1;
- __ shl(c2, kBitsPerByte);
- __ or_(chars, c2);
-
- // chars: two character string, char 1 in byte 0 and char 2 in byte 1.
- // hash: hash of two character string.
-
- // Load the string table.
- Register string_table = c2;
- __ LoadRoot(string_table, Heap::kStringTableRootIndex);
-
- // Calculate capacity mask from the string table capacity.
- Register mask = scratch2;
- __ mov(mask, FieldOperand(string_table, StringTable::kCapacityOffset));
- __ SmiUntag(mask);
- __ sub(mask, Immediate(1));
-
- // Registers
- // chars: two character string, char 1 in byte 0 and char 2 in byte 1.
- // hash: hash of two character string
- // string_table: string table
- // mask: capacity mask
- // scratch: -
-
- // Perform a number of probes in the string table.
- static const int kProbes = 4;
- Label found_in_string_table;
- Label next_probe[kProbes], next_probe_pop_mask[kProbes];
- Register candidate = scratch; // Scratch register contains candidate.
- for (int i = 0; i < kProbes; i++) {
- // Calculate entry in string table.
- __ mov(scratch, hash);
- if (i > 0) {
- __ add(scratch, Immediate(StringTable::GetProbeOffset(i)));
- }
- __ and_(scratch, mask);
-
- // Load the entry from the string table.
- STATIC_ASSERT(StringTable::kEntrySize == 1);
- __ mov(candidate,
- FieldOperand(string_table,
- scratch,
- times_pointer_size,
- StringTable::kElementsStartOffset));
-
- // If entry is undefined no string with this hash can be found.
- Factory* factory = masm->isolate()->factory();
- __ cmp(candidate, factory->undefined_value());
- __ j(equal, not_found);
- __ cmp(candidate, factory->the_hole_value());
- __ j(equal, &next_probe[i]);
-
- // If length is not 2 the string is not a candidate.
- __ cmp(FieldOperand(candidate, String::kLengthOffset),
- Immediate(Smi::FromInt(2)));
- __ j(not_equal, &next_probe[i]);
-
- // As we are out of registers save the mask on the stack and use that
- // register as a temporary.
- __ push(mask);
- Register temp = mask;
-
- // Check that the candidate is a non-external ASCII string.
- __ mov(temp, FieldOperand(candidate, HeapObject::kMapOffset));
- __ movzx_b(temp, FieldOperand(temp, Map::kInstanceTypeOffset));
- __ JumpIfInstanceTypeIsNotSequentialAscii(
- temp, temp, &next_probe_pop_mask[i]);
-
- // Check if the two characters match.
- __ mov(temp, FieldOperand(candidate, SeqOneByteString::kHeaderSize));
- __ and_(temp, 0x0000ffff);
- __ cmp(chars, temp);
- __ j(equal, &found_in_string_table);
- __ bind(&next_probe_pop_mask[i]);
- __ pop(mask);
- __ bind(&next_probe[i]);
- }
-
- // No matching 2 character string found by probing.
- __ jmp(not_found);
-
- // Scratch register contains result when we fall through to here.
- Register result = candidate;
- __ bind(&found_in_string_table);
- __ pop(mask); // Pop saved mask from the stack.
- if (!result.is(eax)) {
- __ mov(eax, result);
- }
-}
-
-
void StringHelper::GenerateHashInit(MacroAssembler* masm,
Register hash,
Register character,
Register scratch) {
// hash = (seed + character) + ((seed + character) << 10);
- if (Serializer::enabled()) {
+ if (masm->serializer_enabled()) {
__ LoadRoot(scratch, Heap::kHashSeedRootIndex);
__ SmiUntag(scratch);
__ add(scratch, character);
@@ -4331,7 +3323,7 @@ void SubStringStub::Generate(MacroAssembler* masm) {
// Longer than original string's length or negative: unsafe arguments.
__ j(above, &runtime);
// Return original string.
- Counters* counters = masm->isolate()->counters();
+ Counters* counters = isolate()->counters();
__ IncrementCounter(counters->sub_string_native(), 1);
__ ret(3 * kPointerSize);
__ bind(&not_original_string);
@@ -4353,7 +3345,7 @@ void SubStringStub::Generate(MacroAssembler* masm) {
__ test(ebx, Immediate(kIsIndirectStringMask));
__ j(zero, &seq_or_external_string, Label::kNear);
- Factory* factory = masm->isolate()->factory();
+ Factory* factory = isolate()->factory();
__ test(ebx, Immediate(kSlicedNotConsMask));
__ j(not_zero, &sliced_string, Label::kNear);
// Cons string. Check whether it is flat, then fetch first part.
@@ -4431,7 +3423,7 @@ void SubStringStub::Generate(MacroAssembler* masm) {
// Handle external string.
// Rule out short external strings.
- STATIC_CHECK(kShortExternalStringTag != 0);
+ STATIC_ASSERT(kShortExternalStringTag != 0);
__ test_b(ebx, kShortExternalStringMask);
__ j(not_zero, &runtime);
__ mov(edi, FieldOperand(edi, ExternalString::kResourceDataOffset));
@@ -4453,23 +3445,21 @@ void SubStringStub::Generate(MacroAssembler* masm) {
// eax: result string
// ecx: result string length
- __ mov(edx, esi); // esi used by following code.
// Locate first character of result.
__ mov(edi, eax);
__ add(edi, Immediate(SeqOneByteString::kHeaderSize - kHeapObjectTag));
// Load string argument and locate character of sub string start.
- __ pop(esi);
+ __ pop(edx);
__ pop(ebx);
__ SmiUntag(ebx);
- __ lea(esi, FieldOperand(esi, ebx, times_1, SeqOneByteString::kHeaderSize));
+ __ lea(edx, FieldOperand(edx, ebx, times_1, SeqOneByteString::kHeaderSize));
// eax: result string
// ecx: result length
- // edx: original value of esi
// edi: first character of result
- // esi: character of sub string start
- StringHelper::GenerateCopyCharactersREP(masm, edi, esi, ecx, ebx, true);
- __ mov(esi, edx); // Restore esi.
+ // edx: character of sub string start
+ StringHelper::GenerateCopyCharacters(
+ masm, edi, edx, ecx, ebx, String::ONE_BYTE_ENCODING);
__ IncrementCounter(counters->sub_string_native(), 1);
__ ret(3 * kPointerSize);
@@ -4479,27 +3469,25 @@ void SubStringStub::Generate(MacroAssembler* masm) {
// eax: result string
// ecx: result string length
- __ mov(edx, esi); // esi used by following code.
// Locate first character of result.
__ mov(edi, eax);
__ add(edi,
Immediate(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
// Load string argument and locate character of sub string start.
- __ pop(esi);
+ __ pop(edx);
__ pop(ebx);
// As from is a smi it is 2 times the value which matches the size of a two
// byte character.
STATIC_ASSERT(kSmiTag == 0);
STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1);
- __ lea(esi, FieldOperand(esi, ebx, times_1, SeqTwoByteString::kHeaderSize));
+ __ lea(edx, FieldOperand(edx, ebx, times_1, SeqTwoByteString::kHeaderSize));
// eax: result string
// ecx: result length
- // edx: original value of esi
// edi: first character of result
- // esi: character of sub string start
- StringHelper::GenerateCopyCharactersREP(masm, edi, esi, ecx, ebx, false);
- __ mov(esi, edx); // Restore esi.
+ // edx: character of sub string start
+ StringHelper::GenerateCopyCharacters(
+ masm, edi, edx, ecx, ebx, String::TWO_BYTE_ENCODING);
__ IncrementCounter(counters->sub_string_native(), 1);
__ ret(3 * kPointerSize);
@@ -4509,7 +3497,7 @@ void SubStringStub::Generate(MacroAssembler* masm) {
// Just jump to runtime to create the sub string.
__ bind(&runtime);
- __ TailCallRuntime(Runtime::kSubString, 3, 1);
+ __ TailCallRuntime(Runtime::kHiddenSubString, 3, 1);
__ bind(&single_char);
// eax: string
@@ -4537,7 +3525,7 @@ void StringCompareStub::GenerateFlatAsciiStringEquals(MacroAssembler* masm,
__ cmp(length, FieldOperand(right, String::kLengthOffset));
__ j(equal, &check_zero_length, Label::kNear);
__ bind(&strings_not_equal);
- __ Set(eax, Immediate(Smi::FromInt(NOT_EQUAL)));
+ __ Move(eax, Immediate(Smi::FromInt(NOT_EQUAL)));
__ ret(0);
// Check if the length is zero.
@@ -4546,7 +3534,7 @@ void StringCompareStub::GenerateFlatAsciiStringEquals(MacroAssembler* masm,
STATIC_ASSERT(kSmiTag == 0);
__ test(length, length);
__ j(not_zero, &compare_chars, Label::kNear);
- __ Set(eax, Immediate(Smi::FromInt(EQUAL)));
+ __ Move(eax, Immediate(Smi::FromInt(EQUAL)));
__ ret(0);
// Compare characters.
@@ -4555,7 +3543,7 @@ void StringCompareStub::GenerateFlatAsciiStringEquals(MacroAssembler* masm,
&strings_not_equal, Label::kNear);
// Characters are equal.
- __ Set(eax, Immediate(Smi::FromInt(EQUAL)));
+ __ Move(eax, Immediate(Smi::FromInt(EQUAL)));
__ ret(0);
}
@@ -4603,7 +3591,7 @@ void StringCompareStub::GenerateCompareFlatAsciiStrings(MacroAssembler* masm,
// Result is EQUAL.
STATIC_ASSERT(EQUAL == 0);
STATIC_ASSERT(kSmiTag == 0);
- __ Set(eax, Immediate(Smi::FromInt(EQUAL)));
+ __ Move(eax, Immediate(Smi::FromInt(EQUAL)));
__ ret(0);
Label result_greater;
@@ -4616,12 +3604,12 @@ void StringCompareStub::GenerateCompareFlatAsciiStrings(MacroAssembler* masm,
__ bind(&result_less);
// Result is LESS.
- __ Set(eax, Immediate(Smi::FromInt(LESS)));
+ __ Move(eax, Immediate(Smi::FromInt(LESS)));
__ ret(0);
// Result is GREATER.
__ bind(&result_greater);
- __ Set(eax, Immediate(Smi::FromInt(GREATER)));
+ __ Move(eax, Immediate(Smi::FromInt(GREATER)));
__ ret(0);
}
@@ -4672,8 +3660,8 @@ void StringCompareStub::Generate(MacroAssembler* masm) {
__ j(not_equal, &not_same, Label::kNear);
STATIC_ASSERT(EQUAL == 0);
STATIC_ASSERT(kSmiTag == 0);
- __ Set(eax, Immediate(Smi::FromInt(EQUAL)));
- __ IncrementCounter(masm->isolate()->counters()->string_compare_native(), 1);
+ __ Move(eax, Immediate(Smi::FromInt(EQUAL)));
+ __ IncrementCounter(isolate()->counters()->string_compare_native(), 1);
__ ret(2 * kPointerSize);
__ bind(&not_same);
@@ -4691,7 +3679,35 @@ void StringCompareStub::Generate(MacroAssembler* masm) {
// Call the runtime; it returns -1 (less), 0 (equal), or 1 (greater)
// tagged as a small integer.
__ bind(&runtime);
- __ TailCallRuntime(Runtime::kStringCompare, 2, 1);
+ __ TailCallRuntime(Runtime::kHiddenStringCompare, 2, 1);
+}
+
+
+void BinaryOpICWithAllocationSiteStub::Generate(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- edx : left
+ // -- eax : right
+ // -- esp[0] : return address
+ // -----------------------------------
+
+ // Load ecx with the allocation site. We stick an undefined dummy value here
+ // and replace it with the real allocation site later when we instantiate this
+ // stub in BinaryOpICWithAllocationSiteStub::GetCodeCopyFromTemplate().
+ __ mov(ecx, handle(isolate()->heap()->undefined_value()));
+
+ // Make sure that we actually patched the allocation site.
+ if (FLAG_debug_code) {
+ __ test(ecx, Immediate(kSmiTagMask));
+ __ Assert(not_equal, kExpectedAllocationSite);
+ __ cmp(FieldOperand(ecx, HeapObject::kMapOffset),
+ isolate()->factory()->allocation_site_map());
+ __ Assert(equal, kExpectedAllocationSite);
+ }
+
+ // Tail call into the stub that handles binary operations with allocation
+ // sites.
+ BinaryOpWithAllocationSiteStub stub(isolate(), state_);
+ __ TailCallStub(&stub);
}
@@ -4735,74 +3751,56 @@ void ICCompareStub::GenerateNumbers(MacroAssembler* masm) {
__ JumpIfNotSmi(eax, &miss);
}
- // Inlining the double comparison and falling back to the general compare
- // stub if NaN is involved or SSE2 or CMOV is unsupported.
- if (CpuFeatures::IsSupported(SSE2) && CpuFeatures::IsSupported(CMOV)) {
- CpuFeatureScope scope1(masm, SSE2);
- CpuFeatureScope scope2(masm, CMOV);
-
- // Load left and right operand.
- Label done, left, left_smi, right_smi;
- __ JumpIfSmi(eax, &right_smi, Label::kNear);
- __ cmp(FieldOperand(eax, HeapObject::kMapOffset),
- masm->isolate()->factory()->heap_number_map());
- __ j(not_equal, &maybe_undefined1, Label::kNear);
- __ movsd(xmm1, FieldOperand(eax, HeapNumber::kValueOffset));
- __ jmp(&left, Label::kNear);
- __ bind(&right_smi);
- __ mov(ecx, eax); // Can't clobber eax because we can still jump away.
- __ SmiUntag(ecx);
- __ Cvtsi2sd(xmm1, ecx);
-
- __ bind(&left);
- __ JumpIfSmi(edx, &left_smi, Label::kNear);
- __ cmp(FieldOperand(edx, HeapObject::kMapOffset),
- masm->isolate()->factory()->heap_number_map());
- __ j(not_equal, &maybe_undefined2, Label::kNear);
- __ movsd(xmm0, FieldOperand(edx, HeapNumber::kValueOffset));
- __ jmp(&done);
- __ bind(&left_smi);
- __ mov(ecx, edx); // Can't clobber edx because we can still jump away.
- __ SmiUntag(ecx);
- __ Cvtsi2sd(xmm0, ecx);
+ // Load left and right operand.
+ Label done, left, left_smi, right_smi;
+ __ JumpIfSmi(eax, &right_smi, Label::kNear);
+ __ cmp(FieldOperand(eax, HeapObject::kMapOffset),
+ isolate()->factory()->heap_number_map());
+ __ j(not_equal, &maybe_undefined1, Label::kNear);
+ __ movsd(xmm1, FieldOperand(eax, HeapNumber::kValueOffset));
+ __ jmp(&left, Label::kNear);
+ __ bind(&right_smi);
+ __ mov(ecx, eax); // Can't clobber eax because we can still jump away.
+ __ SmiUntag(ecx);
+ __ Cvtsi2sd(xmm1, ecx);
- __ bind(&done);
- // Compare operands.
- __ ucomisd(xmm0, xmm1);
-
- // Don't base result on EFLAGS when a NaN is involved.
- __ j(parity_even, &unordered, Label::kNear);
-
- // Return a result of -1, 0, or 1, based on EFLAGS.
- // Performing mov, because xor would destroy the flag register.
- __ mov(eax, 0); // equal
- __ mov(ecx, Immediate(Smi::FromInt(1)));
- __ cmov(above, eax, ecx);
- __ mov(ecx, Immediate(Smi::FromInt(-1)));
- __ cmov(below, eax, ecx);
- __ ret(0);
- } else {
- __ mov(ecx, edx);
- __ and_(ecx, eax);
- __ JumpIfSmi(ecx, &generic_stub, Label::kNear);
+ __ bind(&left);
+ __ JumpIfSmi(edx, &left_smi, Label::kNear);
+ __ cmp(FieldOperand(edx, HeapObject::kMapOffset),
+ isolate()->factory()->heap_number_map());
+ __ j(not_equal, &maybe_undefined2, Label::kNear);
+ __ movsd(xmm0, FieldOperand(edx, HeapNumber::kValueOffset));
+ __ jmp(&done);
+ __ bind(&left_smi);
+ __ mov(ecx, edx); // Can't clobber edx because we can still jump away.
+ __ SmiUntag(ecx);
+ __ Cvtsi2sd(xmm0, ecx);
- __ cmp(FieldOperand(eax, HeapObject::kMapOffset),
- masm->isolate()->factory()->heap_number_map());
- __ j(not_equal, &maybe_undefined1, Label::kNear);
- __ cmp(FieldOperand(edx, HeapObject::kMapOffset),
- masm->isolate()->factory()->heap_number_map());
- __ j(not_equal, &maybe_undefined2, Label::kNear);
- }
+ __ bind(&done);
+ // Compare operands.
+ __ ucomisd(xmm0, xmm1);
+
+ // Don't base result on EFLAGS when a NaN is involved.
+ __ j(parity_even, &unordered, Label::kNear);
+
+ // Return a result of -1, 0, or 1, based on EFLAGS.
+ // Performing mov, because xor would destroy the flag register.
+ __ mov(eax, 0); // equal
+ __ mov(ecx, Immediate(Smi::FromInt(1)));
+ __ cmov(above, eax, ecx);
+ __ mov(ecx, Immediate(Smi::FromInt(-1)));
+ __ cmov(below, eax, ecx);
+ __ ret(0);
__ bind(&unordered);
__ bind(&generic_stub);
- ICCompareStub stub(op_, CompareIC::GENERIC, CompareIC::GENERIC,
+ ICCompareStub stub(isolate(), op_, CompareIC::GENERIC, CompareIC::GENERIC,
CompareIC::GENERIC);
- __ jmp(stub.GetCode(masm->isolate()), RelocInfo::CODE_TARGET);
+ __ jmp(stub.GetCode(), RelocInfo::CODE_TARGET);
__ bind(&maybe_undefined1);
if (Token::IsOrderedRelationalCompareOp(op_)) {
- __ cmp(eax, Immediate(masm->isolate()->factory()->undefined_value()));
+ __ cmp(eax, Immediate(isolate()->factory()->undefined_value()));
__ j(not_equal, &miss);
__ JumpIfSmi(edx, &unordered);
__ CmpObjectType(edx, HEAP_NUMBER_TYPE, ecx);
@@ -4812,7 +3810,7 @@ void ICCompareStub::GenerateNumbers(MacroAssembler* masm) {
__ bind(&maybe_undefined2);
if (Token::IsOrderedRelationalCompareOp(op_)) {
- __ cmp(edx, Immediate(masm->isolate()->factory()->undefined_value()));
+ __ cmp(edx, Immediate(isolate()->factory()->undefined_value()));
__ j(equal, &unordered);
}
@@ -4857,7 +3855,7 @@ void ICCompareStub::GenerateInternalizedStrings(MacroAssembler* masm) {
__ j(not_equal, &done, Label::kNear);
STATIC_ASSERT(EQUAL == 0);
STATIC_ASSERT(kSmiTag == 0);
- __ Set(eax, Immediate(Smi::FromInt(EQUAL)));
+ __ Move(eax, Immediate(Smi::FromInt(EQUAL)));
__ bind(&done);
__ ret(0);
@@ -4902,7 +3900,7 @@ void ICCompareStub::GenerateUniqueNames(MacroAssembler* masm) {
__ j(not_equal, &done, Label::kNear);
STATIC_ASSERT(EQUAL == 0);
STATIC_ASSERT(kSmiTag == 0);
- __ Set(eax, Immediate(Smi::FromInt(EQUAL)));
+ __ Move(eax, Immediate(Smi::FromInt(EQUAL)));
__ bind(&done);
__ ret(0);
@@ -4948,7 +3946,7 @@ void ICCompareStub::GenerateStrings(MacroAssembler* masm) {
__ j(not_equal, &not_same, Label::kNear);
STATIC_ASSERT(EQUAL == 0);
STATIC_ASSERT(kSmiTag == 0);
- __ Set(eax, Immediate(Smi::FromInt(EQUAL)));
+ __ Move(eax, Immediate(Smi::FromInt(EQUAL)));
__ ret(0);
// Handle not identical strings.
@@ -4993,7 +3991,7 @@ void ICCompareStub::GenerateStrings(MacroAssembler* masm) {
if (equality) {
__ TailCallRuntime(Runtime::kStringEquals, 2, 1);
} else {
- __ TailCallRuntime(Runtime::kStringCompare, 2, 1);
+ __ TailCallRuntime(Runtime::kHiddenStringCompare, 2, 1);
}
__ bind(&miss);
@@ -5047,7 +4045,7 @@ void ICCompareStub::GenerateMiss(MacroAssembler* masm) {
{
// Call the runtime system in a fresh internal frame.
ExternalReference miss = ExternalReference(IC_Utility(IC::kCompareIC_Miss),
- masm->isolate());
+ isolate());
FrameScope scope(masm, StackFrame::INTERNAL);
__ push(edx); // Preserve edx and eax.
__ push(eax);
@@ -5121,7 +4119,8 @@ void NameDictionaryLookupStub::GenerateNegativeLookup(MacroAssembler* masm,
__ bind(&good);
}
- NameDictionaryLookupStub stub(properties, r0, r0, NEGATIVE_LOOKUP);
+ NameDictionaryLookupStub stub(masm->isolate(), properties, r0, r0,
+ NEGATIVE_LOOKUP);
__ push(Immediate(Handle<Object>(name)));
__ push(Immediate(name->Hash()));
__ CallStub(&stub);
@@ -5177,7 +4176,8 @@ void NameDictionaryLookupStub::GeneratePositiveLookup(MacroAssembler* masm,
__ j(equal, done);
}
- NameDictionaryLookupStub stub(elements, r1, r0, POSITIVE_LOOKUP);
+ NameDictionaryLookupStub stub(masm->isolate(), elements, r1, r0,
+ POSITIVE_LOOKUP);
__ push(name);
__ mov(r0, FieldOperand(name, Name::kHashFieldOffset));
__ shr(r0, Name::kHashShift);
@@ -5237,7 +4237,7 @@ void NameDictionaryLookupStub::Generate(MacroAssembler* masm) {
index_,
times_pointer_size,
kElementsStartOffset - kHeapObjectTag));
- __ cmp(scratch, masm->isolate()->factory()->undefined_value());
+ __ cmp(scratch, isolate()->factory()->undefined_value());
__ j(equal, &not_in_dictionary);
// Stop if found the property.
@@ -5280,17 +4280,10 @@ void NameDictionaryLookupStub::Generate(MacroAssembler* masm) {
void StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime(
Isolate* isolate) {
- StoreBufferOverflowStub stub(kDontSaveFPRegs);
- stub.GetCode(isolate);
- if (CpuFeatures::IsSafeForSnapshot(SSE2)) {
- StoreBufferOverflowStub stub2(kSaveFPRegs);
- stub2.GetCode(isolate);
- }
-}
-
-
-bool CodeStub::CanUseFPRegisters() {
- return CpuFeatures::IsSupported(SSE2);
+ StoreBufferOverflowStub stub(isolate, kDontSaveFPRegs);
+ stub.GetCode();
+ StoreBufferOverflowStub stub2(isolate, kSaveFPRegs);
+ stub2.GetCode();
}
@@ -5355,7 +4348,7 @@ void RecordWriteStub::GenerateIncremental(MacroAssembler* masm, Mode mode) {
masm,
kUpdateRememberedSetOnNoNeedToInformIncrementalMarker,
mode);
- InformIncrementalMarker(masm, mode);
+ InformIncrementalMarker(masm);
regs_.Restore(masm);
__ RememberedSetHelper(object_,
address_,
@@ -5370,34 +4363,26 @@ void RecordWriteStub::GenerateIncremental(MacroAssembler* masm, Mode mode) {
masm,
kReturnOnNoNeedToInformIncrementalMarker,
mode);
- InformIncrementalMarker(masm, mode);
+ InformIncrementalMarker(masm);
regs_.Restore(masm);
__ ret(0);
}
-void RecordWriteStub::InformIncrementalMarker(MacroAssembler* masm, Mode mode) {
+void RecordWriteStub::InformIncrementalMarker(MacroAssembler* masm) {
regs_.SaveCallerSaveRegisters(masm, save_fp_regs_mode_);
int argument_count = 3;
__ PrepareCallCFunction(argument_count, regs_.scratch0());
__ mov(Operand(esp, 0 * kPointerSize), regs_.object());
__ mov(Operand(esp, 1 * kPointerSize), regs_.address()); // Slot.
__ mov(Operand(esp, 2 * kPointerSize),
- Immediate(ExternalReference::isolate_address(masm->isolate())));
+ Immediate(ExternalReference::isolate_address(isolate())));
AllowExternalCallThatCantCauseGC scope(masm);
- if (mode == INCREMENTAL_COMPACTION) {
- __ CallCFunction(
- ExternalReference::incremental_evacuation_record_write_function(
- masm->isolate()),
- argument_count);
- } else {
- ASSERT(mode == INCREMENTAL);
- __ CallCFunction(
- ExternalReference::incremental_marking_record_write_function(
- masm->isolate()),
- argument_count);
- }
+ __ CallCFunction(
+ ExternalReference::incremental_marking_record_write_function(isolate()),
+ argument_count);
+
regs_.RestoreCallerSaveRegisters(masm, save_fp_regs_mode_);
}
@@ -5574,16 +4559,15 @@ void StoreArrayLiteralElementStub::Generate(MacroAssembler* masm) {
ecx,
edi,
xmm0,
- &slow_elements_from_double,
- false);
+ &slow_elements_from_double);
__ pop(edx);
__ ret(0);
}
void StubFailureTrampolineStub::Generate(MacroAssembler* masm) {
- CEntryStub ces(1, fp_registers_ ? kSaveFPRegs : kDontSaveFPRegs);
- __ call(ces.GetCode(masm->isolate()), RelocInfo::CODE_TARGET);
+ CEntryStub ces(isolate(), 1, kSaveFPRegs);
+ __ call(ces.GetCode(), RelocInfo::CODE_TARGET);
int parameter_count_offset =
StubFailureTrampolineFrame::kCallerStackParameterCountFrameOffset;
__ mov(ebx, MemOperand(ebp, parameter_count_offset));
@@ -5597,27 +4581,9 @@ void StubFailureTrampolineStub::Generate(MacroAssembler* masm) {
}
-void StubFailureTailCallTrampolineStub::Generate(MacroAssembler* masm) {
- CEntryStub ces(1, fp_registers_ ? kSaveFPRegs : kDontSaveFPRegs);
- __ call(ces.GetCode(masm->isolate()), RelocInfo::CODE_TARGET);
- __ mov(edi, eax);
- int parameter_count_offset =
- StubFailureTrampolineFrame::kCallerStackParameterCountFrameOffset;
- __ mov(eax, MemOperand(ebp, parameter_count_offset));
- // The parameter count above includes the receiver for the arguments passed to
- // the deoptimization handler. Subtract the receiver for the parameter count
- // for the call.
- __ sub(eax, Immediate(1));
- masm->LeaveFrame(StackFrame::STUB_FAILURE_TRAMPOLINE);
- ParameterCount argument_count(eax);
- __ InvokeFunction(
- edi, argument_count, JUMP_FUNCTION, NullCallWrapper(), CALL_AS_METHOD);
-}
-
-
void ProfileEntryHookStub::MaybeCallEntryHook(MacroAssembler* masm) {
if (masm->isolate()->function_entry_hook() != NULL) {
- ProfileEntryHookStub stub;
+ ProfileEntryHookStub stub(masm->isolate());
masm->CallStub(&stub);
}
}
@@ -5641,8 +4607,8 @@ void ProfileEntryHookStub::Generate(MacroAssembler* masm) {
__ push(eax);
// Call the entry hook.
- ASSERT(masm->isolate()->function_entry_hook() != NULL);
- __ call(FUNCTION_ADDR(masm->isolate()->function_entry_hook()),
+ ASSERT(isolate()->function_entry_hook() != NULL);
+ __ call(FUNCTION_ADDR(isolate()->function_entry_hook()),
RelocInfo::RUNTIME_ENTRY);
__ add(esp, Immediate(2 * kPointerSize));
@@ -5659,8 +4625,8 @@ template<class T>
static void CreateArrayDispatch(MacroAssembler* masm,
AllocationSiteOverrideMode mode) {
if (mode == DISABLE_ALLOCATION_SITES) {
- T stub(GetInitialFastElementsKind(),
- CONTEXT_CHECK_REQUIRED,
+ T stub(masm->isolate(),
+ GetInitialFastElementsKind(),
mode);
__ TailCallStub(&stub);
} else if (mode == DONT_OVERRIDE) {
@@ -5671,7 +4637,7 @@ static void CreateArrayDispatch(MacroAssembler* masm,
ElementsKind kind = GetFastElementsKindFromSequenceIndex(i);
__ cmp(edx, kind);
__ j(not_equal, &next);
- T stub(kind);
+ T stub(masm->isolate(), kind);
__ TailCallStub(&stub);
__ bind(&next);
}
@@ -5686,7 +4652,7 @@ static void CreateArrayDispatch(MacroAssembler* masm,
static void CreateArrayDispatchOneArgument(MacroAssembler* masm,
AllocationSiteOverrideMode mode) {
- // ebx - type info cell (if mode != DISABLE_ALLOCATION_SITES)
+ // ebx - allocation site (if mode != DISABLE_ALLOCATION_SITES)
// edx - kind (if mode != DISABLE_ALLOCATION_SITES)
// eax - number of arguments
// edi - constructor?
@@ -5715,33 +4681,33 @@ static void CreateArrayDispatchOneArgument(MacroAssembler* masm,
ElementsKind initial = GetInitialFastElementsKind();
ElementsKind holey_initial = GetHoleyElementsKind(initial);
- ArraySingleArgumentConstructorStub stub_holey(holey_initial,
- CONTEXT_CHECK_REQUIRED,
+ ArraySingleArgumentConstructorStub stub_holey(masm->isolate(),
+ holey_initial,
DISABLE_ALLOCATION_SITES);
__ TailCallStub(&stub_holey);
__ bind(&normal_sequence);
- ArraySingleArgumentConstructorStub stub(initial,
- CONTEXT_CHECK_REQUIRED,
+ ArraySingleArgumentConstructorStub stub(masm->isolate(),
+ initial,
DISABLE_ALLOCATION_SITES);
__ TailCallStub(&stub);
} else if (mode == DONT_OVERRIDE) {
// We are going to create a holey array, but our kind is non-holey.
// Fix kind and retry.
__ inc(edx);
- __ mov(ecx, FieldOperand(ebx, Cell::kValueOffset));
+
if (FLAG_debug_code) {
Handle<Map> allocation_site_map =
masm->isolate()->factory()->allocation_site_map();
- __ cmp(FieldOperand(ecx, 0), Immediate(allocation_site_map));
- __ Assert(equal, kExpectedAllocationSiteInCell);
+ __ cmp(FieldOperand(ebx, 0), Immediate(allocation_site_map));
+ __ Assert(equal, kExpectedAllocationSite);
}
// Save the resulting elements kind in type info. We can't just store r3
// in the AllocationSite::transition_info field because elements kind is
// restricted to a portion of the field...upper bits need to be left alone.
STATIC_ASSERT(AllocationSite::ElementsKindBits::kShift == 0);
- __ add(FieldOperand(ecx, AllocationSite::kTransitionInfoOffset),
+ __ add(FieldOperand(ebx, AllocationSite::kTransitionInfoOffset),
Immediate(Smi::FromInt(kFastElementsKindPackedToHoley)));
__ bind(&normal_sequence);
@@ -5752,7 +4718,7 @@ static void CreateArrayDispatchOneArgument(MacroAssembler* masm,
ElementsKind kind = GetFastElementsKindFromSequenceIndex(i);
__ cmp(edx, kind);
__ j(not_equal, &next);
- ArraySingleArgumentConstructorStub stub(kind);
+ ArraySingleArgumentConstructorStub stub(masm->isolate(), kind);
__ TailCallStub(&stub);
__ bind(&next);
}
@@ -5767,20 +4733,15 @@ static void CreateArrayDispatchOneArgument(MacroAssembler* masm,
template<class T>
static void ArrayConstructorStubAheadOfTimeHelper(Isolate* isolate) {
- ElementsKind initial_kind = GetInitialFastElementsKind();
- ElementsKind initial_holey_kind = GetHoleyElementsKind(initial_kind);
-
int to_index = GetSequenceIndexFromFastElementsKind(
TERMINAL_FAST_ELEMENTS_KIND);
for (int i = 0; i <= to_index; ++i) {
ElementsKind kind = GetFastElementsKindFromSequenceIndex(i);
- T stub(kind);
- stub.GetCode(isolate);
- if (AllocationSite::GetMode(kind) != DONT_TRACK_ALLOCATION_SITE ||
- (!FLAG_track_allocation_sites &&
- (kind == initial_kind || kind == initial_holey_kind))) {
- T stub1(kind, CONTEXT_CHECK_REQUIRED, DISABLE_ALLOCATION_SITES);
- stub1.GetCode(isolate);
+ T stub(isolate, kind);
+ stub.GetCode();
+ if (AllocationSite::GetMode(kind) != DONT_TRACK_ALLOCATION_SITE) {
+ T stub1(isolate, kind, DISABLE_ALLOCATION_SITES);
+ stub1.GetCode();
}
}
}
@@ -5801,12 +4762,12 @@ void InternalArrayConstructorStubBase::GenerateStubsAheadOfTime(
ElementsKind kinds[2] = { FAST_ELEMENTS, FAST_HOLEY_ELEMENTS };
for (int i = 0; i < 2; i++) {
// For internal arrays we only need a few things
- InternalArrayNoArgumentConstructorStub stubh1(kinds[i]);
- stubh1.GetCode(isolate);
- InternalArraySingleArgumentConstructorStub stubh2(kinds[i]);
- stubh2.GetCode(isolate);
- InternalArrayNArgumentsConstructorStub stubh3(kinds[i]);
- stubh3.GetCode(isolate);
+ InternalArrayNoArgumentConstructorStub stubh1(isolate, kinds[i]);
+ stubh1.GetCode();
+ InternalArraySingleArgumentConstructorStub stubh2(isolate, kinds[i]);
+ stubh2.GetCode();
+ InternalArrayNArgumentsConstructorStub stubh3(isolate, kinds[i]);
+ stubh3.GetCode();
}
}
@@ -5842,15 +4803,11 @@ void ArrayConstructorStub::GenerateDispatchToArrayStub(
void ArrayConstructorStub::Generate(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- eax : argc (only if argument_count_ == ANY)
- // -- ebx : type info cell
+ // -- ebx : AllocationSite or undefined
// -- edi : constructor
// -- esp[0] : return address
// -- esp[4] : last argument
// -----------------------------------
- Handle<Object> undefined_sentinel(
- masm->isolate()->heap()->undefined_value(),
- masm->isolate());
-
if (FLAG_debug_code) {
// The array construct code is only set for the global and natives
// builtin Array functions which always have maps.
@@ -5863,28 +4820,18 @@ void ArrayConstructorStub::Generate(MacroAssembler* masm) {
__ CmpObjectType(ecx, MAP_TYPE, ecx);
__ Assert(equal, kUnexpectedInitialMapForArrayFunction);
- // We should either have undefined in ebx or a valid cell
- Label okay_here;
- Handle<Map> cell_map = masm->isolate()->factory()->cell_map();
- __ cmp(ebx, Immediate(undefined_sentinel));
- __ j(equal, &okay_here);
- __ cmp(FieldOperand(ebx, 0), Immediate(cell_map));
- __ Assert(equal, kExpectedPropertyCellInRegisterEbx);
- __ bind(&okay_here);
+ // We should either have undefined in ebx or a valid AllocationSite
+ __ AssertUndefinedOrAllocationSite(ebx);
}
Label no_info;
- // If the type cell is undefined, or contains anything other than an
- // AllocationSite, call an array constructor that doesn't use AllocationSites.
- __ cmp(ebx, Immediate(undefined_sentinel));
+ // If the feedback vector is the undefined value call an array constructor
+ // that doesn't use AllocationSites.
+ __ cmp(ebx, isolate()->factory()->undefined_value());
__ j(equal, &no_info);
- __ mov(edx, FieldOperand(ebx, Cell::kValueOffset));
- __ cmp(FieldOperand(edx, 0), Immediate(
- masm->isolate()->factory()->allocation_site_map()));
- __ j(not_equal, &no_info);
// Only look at the lower 16 bits of the transition info.
- __ mov(edx, FieldOperand(edx, AllocationSite::kTransitionInfoOffset));
+ __ mov(edx, FieldOperand(ebx, AllocationSite::kTransitionInfoOffset));
__ SmiUntag(edx);
STATIC_ASSERT(AllocationSite::ElementsKindBits::kShift == 0);
__ and_(edx, Immediate(AllocationSite::ElementsKindBits::kMask));
@@ -5902,7 +4849,7 @@ void InternalArrayConstructorStub::GenerateCase(
__ test(eax, eax);
__ j(not_zero, &not_zero_case);
- InternalArrayNoArgumentConstructorStub stub0(kind);
+ InternalArrayNoArgumentConstructorStub stub0(isolate(), kind);
__ TailCallStub(&stub0);
__ bind(&not_zero_case);
@@ -5917,16 +4864,16 @@ void InternalArrayConstructorStub::GenerateCase(
__ j(zero, &normal_sequence);
InternalArraySingleArgumentConstructorStub
- stub1_holey(GetHoleyElementsKind(kind));
+ stub1_holey(isolate(), GetHoleyElementsKind(kind));
__ TailCallStub(&stub1_holey);
}
__ bind(&normal_sequence);
- InternalArraySingleArgumentConstructorStub stub1(kind);
+ InternalArraySingleArgumentConstructorStub stub1(isolate(), kind);
__ TailCallStub(&stub1);
__ bind(&not_one_case);
- InternalArrayNArgumentsConstructorStub stubN(kind);
+ InternalArrayNArgumentsConstructorStub stubN(isolate(), kind);
__ TailCallStub(&stubN);
}
@@ -5934,7 +4881,6 @@ void InternalArrayConstructorStub::GenerateCase(
void InternalArrayConstructorStub::Generate(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- eax : argc
- // -- ebx : type info cell
// -- edi : constructor
// -- esp[0] : return address
// -- esp[4] : last argument
@@ -5960,8 +4906,7 @@ void InternalArrayConstructorStub::Generate(MacroAssembler* masm) {
// but the following masking takes care of that anyway.
__ mov(ecx, FieldOperand(ecx, Map::kBitField2Offset));
// Retrieve elements_kind from bit field 2.
- __ and_(ecx, Map::kElementsKindMask);
- __ shr(ecx, Map::kElementsKindShift);
+ __ DecodeField<Map::ElementsKindBits>(ecx);
if (FLAG_debug_code) {
Label done;
@@ -5983,6 +4928,165 @@ void InternalArrayConstructorStub::Generate(MacroAssembler* masm) {
}
+void CallApiFunctionStub::Generate(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- eax : callee
+ // -- ebx : call_data
+ // -- ecx : holder
+ // -- edx : api_function_address
+ // -- esi : context
+ // --
+ // -- esp[0] : return address
+ // -- esp[4] : last argument
+ // -- ...
+ // -- esp[argc * 4] : first argument
+ // -- esp[(argc + 1) * 4] : receiver
+ // -----------------------------------
+
+ Register callee = eax;
+ Register call_data = ebx;
+ Register holder = ecx;
+ Register api_function_address = edx;
+ Register return_address = edi;
+ Register context = esi;
+
+ int argc = ArgumentBits::decode(bit_field_);
+ bool is_store = IsStoreBits::decode(bit_field_);
+ bool call_data_undefined = CallDataUndefinedBits::decode(bit_field_);
+
+ typedef FunctionCallbackArguments FCA;
+
+ STATIC_ASSERT(FCA::kContextSaveIndex == 6);
+ STATIC_ASSERT(FCA::kCalleeIndex == 5);
+ STATIC_ASSERT(FCA::kDataIndex == 4);
+ STATIC_ASSERT(FCA::kReturnValueOffset == 3);
+ STATIC_ASSERT(FCA::kReturnValueDefaultValueIndex == 2);
+ STATIC_ASSERT(FCA::kIsolateIndex == 1);
+ STATIC_ASSERT(FCA::kHolderIndex == 0);
+ STATIC_ASSERT(FCA::kArgsLength == 7);
+
+ __ pop(return_address);
+
+ // context save
+ __ push(context);
+ // load context from callee
+ __ mov(context, FieldOperand(callee, JSFunction::kContextOffset));
+
+ // callee
+ __ push(callee);
+
+ // call data
+ __ push(call_data);
+
+ Register scratch = call_data;
+ if (!call_data_undefined) {
+ // return value
+ __ push(Immediate(isolate()->factory()->undefined_value()));
+ // return value default
+ __ push(Immediate(isolate()->factory()->undefined_value()));
+ } else {
+ // return value
+ __ push(scratch);
+ // return value default
+ __ push(scratch);
+ }
+ // isolate
+ __ push(Immediate(reinterpret_cast<int>(isolate())));
+ // holder
+ __ push(holder);
+
+ __ mov(scratch, esp);
+
+ // return address
+ __ push(return_address);
+
+ // API function gets reference to the v8::Arguments. If CPU profiler
+ // is enabled wrapper function will be called and we need to pass
+ // address of the callback as additional parameter, always allocate
+ // space for it.
+ const int kApiArgc = 1 + 1;
+
+ // Allocate the v8::Arguments structure in the arguments' space since
+ // it's not controlled by GC.
+ const int kApiStackSpace = 4;
+
+ __ PrepareCallApiFunction(kApiArgc + kApiStackSpace);
+
+ // FunctionCallbackInfo::implicit_args_.
+ __ mov(ApiParameterOperand(2), scratch);
+ __ add(scratch, Immediate((argc + FCA::kArgsLength - 1) * kPointerSize));
+ // FunctionCallbackInfo::values_.
+ __ mov(ApiParameterOperand(3), scratch);
+ // FunctionCallbackInfo::length_.
+ __ Move(ApiParameterOperand(4), Immediate(argc));
+ // FunctionCallbackInfo::is_construct_call_.
+ __ Move(ApiParameterOperand(5), Immediate(0));
+
+ // v8::InvocationCallback's argument.
+ __ lea(scratch, ApiParameterOperand(2));
+ __ mov(ApiParameterOperand(0), scratch);
+
+ ExternalReference thunk_ref =
+ ExternalReference::invoke_function_callback(isolate());
+
+ Operand context_restore_operand(ebp,
+ (2 + FCA::kContextSaveIndex) * kPointerSize);
+ // Stores return the first js argument
+ int return_value_offset = 0;
+ if (is_store) {
+ return_value_offset = 2 + FCA::kArgsLength;
+ } else {
+ return_value_offset = 2 + FCA::kReturnValueOffset;
+ }
+ Operand return_value_operand(ebp, return_value_offset * kPointerSize);
+ __ CallApiFunctionAndReturn(api_function_address,
+ thunk_ref,
+ ApiParameterOperand(1),
+ argc + FCA::kArgsLength + 1,
+ return_value_operand,
+ &context_restore_operand);
+}
+
+
+void CallApiGetterStub::Generate(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- esp[0] : return address
+ // -- esp[4] : name
+ // -- esp[8 - kArgsLength*4] : PropertyCallbackArguments object
+ // -- ...
+ // -- edx : api_function_address
+ // -----------------------------------
+
+ // array for v8::Arguments::values_, handler for name and pointer
+ // to the values (it considered as smi in GC).
+ const int kStackSpace = PropertyCallbackArguments::kArgsLength + 2;
+ // Allocate space for opional callback address parameter in case
+ // CPU profiler is active.
+ const int kApiArgc = 2 + 1;
+
+ Register api_function_address = edx;
+ Register scratch = ebx;
+
+ // load address of name
+ __ lea(scratch, Operand(esp, 1 * kPointerSize));
+
+ __ PrepareCallApiFunction(kApiArgc);
+ __ mov(ApiParameterOperand(0), scratch); // name.
+ __ add(scratch, Immediate(kPointerSize));
+ __ mov(ApiParameterOperand(1), scratch); // arguments pointer.
+
+ ExternalReference thunk_ref =
+ ExternalReference::invoke_accessor_getter_callback(isolate());
+
+ __ CallApiFunctionAndReturn(api_function_address,
+ thunk_ref,
+ ApiParameterOperand(2),
+ kStackSpace,
+ Operand(ebp, 7 * kPointerSize),
+ NULL);
+}
+
+
#undef __
} } // namespace v8::internal
diff --git a/chromium/v8/src/ia32/code-stubs-ia32.h b/chromium/v8/src/ia32/code-stubs-ia32.h
index 14259241c85..8700181a86f 100644
--- a/chromium/v8/src/ia32/code-stubs-ia32.h
+++ b/chromium/v8/src/ia32/code-stubs-ia32.h
@@ -1,36 +1,12 @@
// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
#ifndef V8_IA32_CODE_STUBS_IA32_H_
#define V8_IA32_CODE_STUBS_IA32_H_
-#include "macro-assembler.h"
-#include "code-stubs.h"
-#include "ic-inl.h"
+#include "src/macro-assembler.h"
+#include "src/ic-inl.h"
namespace v8 {
namespace internal {
@@ -40,37 +16,11 @@ void ArrayNativeCode(MacroAssembler* masm,
bool construct_call,
Label* call_generic_code);
-// Compute a transcendental math function natively, or call the
-// TranscendentalCache runtime function.
-class TranscendentalCacheStub: public PlatformCodeStub {
- public:
- enum ArgumentType {
- TAGGED = 0,
- UNTAGGED = 1 << TranscendentalCache::kTranscendentalTypeBits
- };
-
- TranscendentalCacheStub(TranscendentalCache::Type type,
- ArgumentType argument_type)
- : type_(type), argument_type_(argument_type) {}
- void Generate(MacroAssembler* masm);
- static void GenerateOperation(MacroAssembler* masm,
- TranscendentalCache::Type type);
- private:
- TranscendentalCache::Type type_;
- ArgumentType argument_type_;
-
- Major MajorKey() { return TranscendentalCache; }
- int MinorKey() { return type_ | argument_type_; }
- Runtime::FunctionId RuntimeFunction();
-};
-
class StoreBufferOverflowStub: public PlatformCodeStub {
public:
- explicit StoreBufferOverflowStub(SaveFPRegsMode save_fp)
- : save_doubles_(save_fp) {
- ASSERT(CpuFeatures::IsSafeForSnapshot(SSE2) || save_fp == kDontSaveFPRegs);
- }
+ StoreBufferOverflowStub(Isolate* isolate, SaveFPRegsMode save_fp)
+ : PlatformCodeStub(isolate), save_doubles_(save_fp) { }
void Generate(MacroAssembler* masm);
@@ -87,43 +37,15 @@ class StoreBufferOverflowStub: public PlatformCodeStub {
class StringHelper : public AllStatic {
public:
- // Generate code for copying characters using a simple loop. This should only
- // be used in places where the number of characters is small and the
- // additional setup and checking in GenerateCopyCharactersREP adds too much
- // overhead. Copying of overlapping regions is not supported.
+ // Generate code for copying characters using the rep movs instruction.
+ // Copies ecx characters from esi to edi. Copying of overlapping regions is
+ // not supported.
static void GenerateCopyCharacters(MacroAssembler* masm,
Register dest,
Register src,
Register count,
Register scratch,
- bool ascii);
-
- // Generate code for copying characters using the rep movs instruction.
- // Copies ecx characters from esi to edi. Copying of overlapping regions is
- // not supported.
- static void GenerateCopyCharactersREP(MacroAssembler* masm,
- Register dest, // Must be edi.
- Register src, // Must be esi.
- Register count, // Must be ecx.
- Register scratch, // Neither of above.
- bool ascii);
-
- // Probe the string table for a two character string. If the string
- // requires non-standard hashing a jump to the label not_probed is
- // performed and registers c1 and c2 are preserved. In all other
- // cases they are clobbered. If the string is not found by probing a
- // jump to the label not_found is performed. This jump does not
- // guarantee that the string is not in the string table. If the
- // string is found the code falls through with the string in
- // register eax.
- static void GenerateTwoCharacterStringTableProbe(MacroAssembler* masm,
- Register c1,
- Register c2,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Label* not_probed,
- Label* not_found);
+ String::Encoding encoding);
// Generate string hash.
static void GenerateHashInit(MacroAssembler* masm,
@@ -143,34 +65,9 @@ class StringHelper : public AllStatic {
};
-class StringAddStub: public PlatformCodeStub {
- public:
- explicit StringAddStub(StringAddFlags flags) : flags_(flags) {}
-
- private:
- Major MajorKey() { return StringAdd; }
- int MinorKey() { return flags_; }
-
- void Generate(MacroAssembler* masm);
-
- void GenerateConvertArgument(MacroAssembler* masm,
- int stack_offset,
- Register arg,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Label* slow);
-
- void GenerateRegisterArgsPush(MacroAssembler* masm);
- void GenerateRegisterArgsPop(MacroAssembler* masm, Register temp);
-
- const StringAddFlags flags_;
-};
-
-
class SubStringStub: public PlatformCodeStub {
public:
- SubStringStub() {}
+ explicit SubStringStub(Isolate* isolate) : PlatformCodeStub(isolate) {}
private:
Major MajorKey() { return SubString; }
@@ -182,7 +79,7 @@ class SubStringStub: public PlatformCodeStub {
class StringCompareStub: public PlatformCodeStub {
public:
- StringCompareStub() { }
+ explicit StringCompareStub(Isolate* isolate) : PlatformCodeStub(isolate) { }
// Compares two flat ASCII strings and returns result in eax.
static void GenerateCompareFlatAsciiStrings(MacroAssembler* masm,
@@ -220,11 +117,13 @@ class NameDictionaryLookupStub: public PlatformCodeStub {
public:
enum LookupMode { POSITIVE_LOOKUP, NEGATIVE_LOOKUP };
- NameDictionaryLookupStub(Register dictionary,
+ NameDictionaryLookupStub(Isolate* isolate,
+ Register dictionary,
Register result,
Register index,
LookupMode mode)
- : dictionary_(dictionary), result_(result), index_(index), mode_(mode) { }
+ : PlatformCodeStub(isolate),
+ dictionary_(dictionary), result_(result), index_(index), mode_(mode) { }
void Generate(MacroAssembler* masm);
@@ -280,12 +179,14 @@ class NameDictionaryLookupStub: public PlatformCodeStub {
class RecordWriteStub: public PlatformCodeStub {
public:
- RecordWriteStub(Register object,
+ RecordWriteStub(Isolate* isolate,
+ Register object,
Register value,
Register address,
RememberedSetAction remembered_set_action,
SaveFPRegsMode fp_mode)
- : object_(object),
+ : PlatformCodeStub(isolate),
+ object_(object),
value_(value),
address_(address),
remembered_set_action_(remembered_set_action),
@@ -293,7 +194,6 @@ class RecordWriteStub: public PlatformCodeStub {
regs_(object, // An input reg.
address, // An input reg.
value) { // One scratch reg.
- ASSERT(CpuFeatures::IsSafeForSnapshot(SSE2) || fp_mode == kDontSaveFPRegs);
}
enum Mode {
@@ -435,11 +335,10 @@ class RecordWriteStub: public PlatformCodeStub {
if (!scratch0_.is(eax) && !scratch1_.is(eax)) masm->push(eax);
if (!scratch0_.is(edx) && !scratch1_.is(edx)) masm->push(edx);
if (mode == kSaveFPRegs) {
- CpuFeatureScope scope(masm, SSE2);
masm->sub(esp,
- Immediate(kDoubleSize * (XMMRegister::kNumRegisters - 1)));
+ Immediate(kDoubleSize * (XMMRegister::kMaxNumRegisters - 1)));
// Save all XMM registers except XMM0.
- for (int i = XMMRegister::kNumRegisters - 1; i > 0; i--) {
+ for (int i = XMMRegister::kMaxNumRegisters - 1; i > 0; i--) {
XMMRegister reg = XMMRegister::from_code(i);
masm->movsd(Operand(esp, (i - 1) * kDoubleSize), reg);
}
@@ -449,14 +348,13 @@ class RecordWriteStub: public PlatformCodeStub {
inline void RestoreCallerSaveRegisters(MacroAssembler*masm,
SaveFPRegsMode mode) {
if (mode == kSaveFPRegs) {
- CpuFeatureScope scope(masm, SSE2);
// Restore all XMM registers except XMM0.
- for (int i = XMMRegister::kNumRegisters - 1; i > 0; i--) {
+ for (int i = XMMRegister::kMaxNumRegisters - 1; i > 0; i--) {
XMMRegister reg = XMMRegister::from_code(i);
masm->movsd(reg, Operand(esp, (i - 1) * kDoubleSize));
}
masm->add(esp,
- Immediate(kDoubleSize * (XMMRegister::kNumRegisters - 1)));
+ Immediate(kDoubleSize * (XMMRegister::kMaxNumRegisters - 1)));
}
if (!scratch0_.is(edx) && !scratch1_.is(edx)) masm->pop(edx);
if (!scratch0_.is(eax) && !scratch1_.is(eax)) masm->pop(eax);
@@ -505,7 +403,7 @@ class RecordWriteStub: public PlatformCodeStub {
MacroAssembler* masm,
OnNoNeedToInformIncrementalMarker on_no_need,
Mode mode);
- void InformIncrementalMarker(MacroAssembler* masm, Mode mode);
+ void InformIncrementalMarker(MacroAssembler* masm);
Major MajorKey() { return RecordWrite; }
diff --git a/chromium/v8/src/ia32/codegen-ia32.cc b/chromium/v8/src/ia32/codegen-ia32.cc
index ab4029da119..fbd5b89060c 100644
--- a/chromium/v8/src/ia32/codegen-ia32.cc
+++ b/chromium/v8/src/ia32/codegen-ia32.cc
@@ -1,37 +1,14 @@
// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
#if V8_TARGET_ARCH_IA32
-#include "codegen.h"
-#include "heap.h"
-#include "macro-assembler.h"
+#include "src/codegen.h"
+#include "src/heap.h"
+#include "src/macro-assembler.h"
namespace v8 {
namespace internal {
@@ -57,64 +34,17 @@ void StubRuntimeCallHelper::AfterCall(MacroAssembler* masm) const {
#define __ masm.
-UnaryMathFunction CreateTranscendentalFunction(TranscendentalCache::Type type) {
- size_t actual_size;
- // Allocate buffer in executable space.
- byte* buffer = static_cast<byte*>(OS::Allocate(1 * KB,
- &actual_size,
- true));
- if (buffer == NULL) {
- // Fallback to library function if function cannot be created.
- switch (type) {
- case TranscendentalCache::SIN: return &sin;
- case TranscendentalCache::COS: return &cos;
- case TranscendentalCache::TAN: return &tan;
- case TranscendentalCache::LOG: return &log;
- default: UNIMPLEMENTED();
- }
- }
-
- MacroAssembler masm(NULL, buffer, static_cast<int>(actual_size));
- // esp[1 * kPointerSize]: raw double input
- // esp[0 * kPointerSize]: return address
- // Move double input into registers.
-
- __ push(ebx);
- __ push(edx);
- __ push(edi);
- __ fld_d(Operand(esp, 4 * kPointerSize));
- __ mov(ebx, Operand(esp, 4 * kPointerSize));
- __ mov(edx, Operand(esp, 5 * kPointerSize));
- TranscendentalCacheStub::GenerateOperation(&masm, type);
- // The return value is expected to be on ST(0) of the FPU stack.
- __ pop(edi);
- __ pop(edx);
- __ pop(ebx);
- __ Ret();
-
- CodeDesc desc;
- masm.GetCode(&desc);
- ASSERT(!RelocInfo::RequiresRelocation(desc));
-
- CPU::FlushICache(buffer, actual_size);
- OS::ProtectCode(buffer, actual_size);
- return FUNCTION_CAST<UnaryMathFunction>(buffer);
-}
-
-
UnaryMathFunction CreateExpFunction() {
- if (!CpuFeatures::IsSupported(SSE2)) return &exp;
- if (!FLAG_fast_math) return &exp;
+ if (!FLAG_fast_math) return &std::exp;
size_t actual_size;
byte* buffer = static_cast<byte*>(OS::Allocate(1 * KB, &actual_size, true));
- if (buffer == NULL) return &exp;
+ if (buffer == NULL) return &std::exp;
ExternalReference::InitializeMathExpData();
MacroAssembler masm(NULL, buffer, static_cast<int>(actual_size));
// esp[1 * kPointerSize]: raw double input
// esp[0 * kPointerSize]: return address
{
- CpuFeatureScope use_sse2(&masm, SSE2);
XMMRegister input = xmm1;
XMMRegister result = xmm2;
__ movsd(input, Operand(esp, 1 * kPointerSize));
@@ -146,15 +76,12 @@ UnaryMathFunction CreateSqrtFunction() {
byte* buffer = static_cast<byte*>(OS::Allocate(1 * KB,
&actual_size,
true));
- // If SSE2 is not available, we can use libc's implementation to ensure
- // consistency since code by fullcodegen's calls into runtime in that case.
- if (buffer == NULL || !CpuFeatures::IsSupported(SSE2)) return &sqrt;
+ if (buffer == NULL) return &std::sqrt;
MacroAssembler masm(NULL, buffer, static_cast<int>(actual_size));
// esp[1 * kPointerSize]: raw double input
// esp[0 * kPointerSize]: return address
// Move double input into registers.
{
- CpuFeatureScope use_sse2(&masm, SSE2);
__ movsd(xmm0, Operand(esp, 1 * kPointerSize));
__ sqrtsd(xmm0, xmm0);
__ movsd(Operand(esp, 1 * kPointerSize), xmm0);
@@ -259,7 +186,7 @@ class LabelConverter {
};
-OS::MemMoveFunction CreateMemMoveFunction() {
+MemMoveFunction CreateMemMoveFunction() {
size_t actual_size;
// Allocate buffer in executable space.
byte* buffer = static_cast<byte*>(OS::Allocate(1 * KB, &actual_size, true));
@@ -311,325 +238,264 @@ OS::MemMoveFunction CreateMemMoveFunction() {
__ cmp(dst, src);
__ j(equal, &pop_and_return);
- if (CpuFeatures::IsSupported(SSE2)) {
- CpuFeatureScope sse2_scope(&masm, SSE2);
- __ prefetch(Operand(src, 0), 1);
+ __ prefetch(Operand(src, 0), 1);
+ __ cmp(count, kSmallCopySize);
+ __ j(below_equal, &small_size);
+ __ cmp(count, kMediumCopySize);
+ __ j(below_equal, &medium_size);
+ __ cmp(dst, src);
+ __ j(above, &backward);
+
+ {
+ // |dst| is a lower address than |src|. Copy front-to-back.
+ Label unaligned_source, move_last_15, skip_last_move;
+ __ mov(eax, src);
+ __ sub(eax, dst);
+ __ cmp(eax, kMinMoveDistance);
+ __ j(below, &forward_much_overlap);
+ // Copy first 16 bytes.
+ __ movdqu(xmm0, Operand(src, 0));
+ __ movdqu(Operand(dst, 0), xmm0);
+ // Determine distance to alignment: 16 - (dst & 0xF).
+ __ mov(edx, dst);
+ __ and_(edx, 0xF);
+ __ neg(edx);
+ __ add(edx, Immediate(16));
+ __ add(dst, edx);
+ __ add(src, edx);
+ __ sub(count, edx);
+ // dst is now aligned. Main copy loop.
+ __ mov(loop_count, count);
+ __ shr(loop_count, 6);
+ // Check if src is also aligned.
+ __ test(src, Immediate(0xF));
+ __ j(not_zero, &unaligned_source);
+ // Copy loop for aligned source and destination.
+ MemMoveEmitMainLoop(&masm, &move_last_15, FORWARD, MOVE_ALIGNED);
+ // At most 15 bytes to copy. Copy 16 bytes at end of string.
+ __ bind(&move_last_15);
+ __ and_(count, 0xF);
+ __ j(zero, &skip_last_move, Label::kNear);
+ __ movdqu(xmm0, Operand(src, count, times_1, -0x10));
+ __ movdqu(Operand(dst, count, times_1, -0x10), xmm0);
+ __ bind(&skip_last_move);
+ MemMoveEmitPopAndReturn(&masm);
+
+ // Copy loop for unaligned source and aligned destination.
+ __ bind(&unaligned_source);
+ MemMoveEmitMainLoop(&masm, &move_last_15, FORWARD, MOVE_UNALIGNED);
+ __ jmp(&move_last_15);
+
+ // Less than kMinMoveDistance offset between dst and src.
+ Label loop_until_aligned, last_15_much_overlap;
+ __ bind(&loop_until_aligned);
+ __ mov_b(eax, Operand(src, 0));
+ __ inc(src);
+ __ mov_b(Operand(dst, 0), eax);
+ __ inc(dst);
+ __ dec(count);
+ __ bind(&forward_much_overlap); // Entry point into this block.
+ __ test(dst, Immediate(0xF));
+ __ j(not_zero, &loop_until_aligned);
+ // dst is now aligned, src can't be. Main copy loop.
+ __ mov(loop_count, count);
+ __ shr(loop_count, 6);
+ MemMoveEmitMainLoop(&masm, &last_15_much_overlap,
+ FORWARD, MOVE_UNALIGNED);
+ __ bind(&last_15_much_overlap);
+ __ and_(count, 0xF);
+ __ j(zero, &pop_and_return);
__ cmp(count, kSmallCopySize);
__ j(below_equal, &small_size);
- __ cmp(count, kMediumCopySize);
- __ j(below_equal, &medium_size);
- __ cmp(dst, src);
- __ j(above, &backward);
-
- {
- // |dst| is a lower address than |src|. Copy front-to-back.
- Label unaligned_source, move_last_15, skip_last_move;
- __ mov(eax, src);
- __ sub(eax, dst);
- __ cmp(eax, kMinMoveDistance);
- __ j(below, &forward_much_overlap);
- // Copy first 16 bytes.
- __ movdqu(xmm0, Operand(src, 0));
- __ movdqu(Operand(dst, 0), xmm0);
- // Determine distance to alignment: 16 - (dst & 0xF).
- __ mov(edx, dst);
- __ and_(edx, 0xF);
- __ neg(edx);
- __ add(edx, Immediate(16));
- __ add(dst, edx);
- __ add(src, edx);
- __ sub(count, edx);
- // dst is now aligned. Main copy loop.
- __ mov(loop_count, count);
- __ shr(loop_count, 6);
- // Check if src is also aligned.
- __ test(src, Immediate(0xF));
- __ j(not_zero, &unaligned_source);
- // Copy loop for aligned source and destination.
- MemMoveEmitMainLoop(&masm, &move_last_15, FORWARD, MOVE_ALIGNED);
- // At most 15 bytes to copy. Copy 16 bytes at end of string.
- __ bind(&move_last_15);
- __ and_(count, 0xF);
- __ j(zero, &skip_last_move, Label::kNear);
- __ movdqu(xmm0, Operand(src, count, times_1, -0x10));
- __ movdqu(Operand(dst, count, times_1, -0x10), xmm0);
- __ bind(&skip_last_move);
- MemMoveEmitPopAndReturn(&masm);
-
- // Copy loop for unaligned source and aligned destination.
- __ bind(&unaligned_source);
- MemMoveEmitMainLoop(&masm, &move_last_15, FORWARD, MOVE_UNALIGNED);
- __ jmp(&move_last_15);
-
- // Less than kMinMoveDistance offset between dst and src.
- Label loop_until_aligned, last_15_much_overlap;
- __ bind(&loop_until_aligned);
- __ mov_b(eax, Operand(src, 0));
- __ inc(src);
- __ mov_b(Operand(dst, 0), eax);
- __ inc(dst);
- __ dec(count);
- __ bind(&forward_much_overlap); // Entry point into this block.
- __ test(dst, Immediate(0xF));
- __ j(not_zero, &loop_until_aligned);
- // dst is now aligned, src can't be. Main copy loop.
- __ mov(loop_count, count);
- __ shr(loop_count, 6);
- MemMoveEmitMainLoop(&masm, &last_15_much_overlap,
- FORWARD, MOVE_UNALIGNED);
- __ bind(&last_15_much_overlap);
- __ and_(count, 0xF);
- __ j(zero, &pop_and_return);
- __ cmp(count, kSmallCopySize);
- __ j(below_equal, &small_size);
- __ jmp(&medium_size);
- }
+ __ jmp(&medium_size);
+ }
- {
- // |dst| is a higher address than |src|. Copy backwards.
- Label unaligned_source, move_first_15, skip_last_move;
- __ bind(&backward);
- // |dst| and |src| always point to the end of what's left to copy.
- __ add(dst, count);
- __ add(src, count);
- __ mov(eax, dst);
- __ sub(eax, src);
- __ cmp(eax, kMinMoveDistance);
- __ j(below, &backward_much_overlap);
- // Copy last 16 bytes.
- __ movdqu(xmm0, Operand(src, -0x10));
- __ movdqu(Operand(dst, -0x10), xmm0);
- // Find distance to alignment: dst & 0xF
- __ mov(edx, dst);
- __ and_(edx, 0xF);
- __ sub(dst, edx);
- __ sub(src, edx);
- __ sub(count, edx);
- // dst is now aligned. Main copy loop.
- __ mov(loop_count, count);
- __ shr(loop_count, 6);
- // Check if src is also aligned.
- __ test(src, Immediate(0xF));
- __ j(not_zero, &unaligned_source);
- // Copy loop for aligned source and destination.
- MemMoveEmitMainLoop(&masm, &move_first_15, BACKWARD, MOVE_ALIGNED);
- // At most 15 bytes to copy. Copy 16 bytes at beginning of string.
- __ bind(&move_first_15);
- __ and_(count, 0xF);
- __ j(zero, &skip_last_move, Label::kNear);
- __ sub(src, count);
- __ sub(dst, count);
- __ movdqu(xmm0, Operand(src, 0));
- __ movdqu(Operand(dst, 0), xmm0);
- __ bind(&skip_last_move);
- MemMoveEmitPopAndReturn(&masm);
-
- // Copy loop for unaligned source and aligned destination.
- __ bind(&unaligned_source);
- MemMoveEmitMainLoop(&masm, &move_first_15, BACKWARD, MOVE_UNALIGNED);
- __ jmp(&move_first_15);
-
- // Less than kMinMoveDistance offset between dst and src.
- Label loop_until_aligned, first_15_much_overlap;
- __ bind(&loop_until_aligned);
- __ dec(src);
- __ dec(dst);
- __ mov_b(eax, Operand(src, 0));
- __ mov_b(Operand(dst, 0), eax);
- __ dec(count);
- __ bind(&backward_much_overlap); // Entry point into this block.
- __ test(dst, Immediate(0xF));
- __ j(not_zero, &loop_until_aligned);
- // dst is now aligned, src can't be. Main copy loop.
- __ mov(loop_count, count);
- __ shr(loop_count, 6);
- MemMoveEmitMainLoop(&masm, &first_15_much_overlap,
- BACKWARD, MOVE_UNALIGNED);
- __ bind(&first_15_much_overlap);
- __ and_(count, 0xF);
- __ j(zero, &pop_and_return);
- // Small/medium handlers expect dst/src to point to the beginning.
- __ sub(dst, count);
- __ sub(src, count);
- __ cmp(count, kSmallCopySize);
- __ j(below_equal, &small_size);
- __ jmp(&medium_size);
- }
- {
- // Special handlers for 9 <= copy_size < 64. No assumptions about
- // alignment or move distance, so all reads must be unaligned and
- // must happen before any writes.
- Label medium_handlers, f9_16, f17_32, f33_48, f49_63;
-
- __ bind(&f9_16);
- __ movsd(xmm0, Operand(src, 0));
- __ movsd(xmm1, Operand(src, count, times_1, -8));
- __ movsd(Operand(dst, 0), xmm0);
- __ movsd(Operand(dst, count, times_1, -8), xmm1);
- MemMoveEmitPopAndReturn(&masm);
-
- __ bind(&f17_32);
- __ movdqu(xmm0, Operand(src, 0));
- __ movdqu(xmm1, Operand(src, count, times_1, -0x10));
- __ movdqu(Operand(dst, 0x00), xmm0);
- __ movdqu(Operand(dst, count, times_1, -0x10), xmm1);
- MemMoveEmitPopAndReturn(&masm);
-
- __ bind(&f33_48);
- __ movdqu(xmm0, Operand(src, 0x00));
- __ movdqu(xmm1, Operand(src, 0x10));
- __ movdqu(xmm2, Operand(src, count, times_1, -0x10));
- __ movdqu(Operand(dst, 0x00), xmm0);
- __ movdqu(Operand(dst, 0x10), xmm1);
- __ movdqu(Operand(dst, count, times_1, -0x10), xmm2);
- MemMoveEmitPopAndReturn(&masm);
-
- __ bind(&f49_63);
- __ movdqu(xmm0, Operand(src, 0x00));
- __ movdqu(xmm1, Operand(src, 0x10));
- __ movdqu(xmm2, Operand(src, 0x20));
- __ movdqu(xmm3, Operand(src, count, times_1, -0x10));
- __ movdqu(Operand(dst, 0x00), xmm0);
- __ movdqu(Operand(dst, 0x10), xmm1);
- __ movdqu(Operand(dst, 0x20), xmm2);
- __ movdqu(Operand(dst, count, times_1, -0x10), xmm3);
- MemMoveEmitPopAndReturn(&masm);
-
- __ bind(&medium_handlers);
- __ dd(conv.address(&f9_16));
- __ dd(conv.address(&f17_32));
- __ dd(conv.address(&f33_48));
- __ dd(conv.address(&f49_63));
-
- __ bind(&medium_size); // Entry point into this block.
- __ mov(eax, count);
- __ dec(eax);
- __ shr(eax, 4);
- if (FLAG_debug_code) {
- Label ok;
- __ cmp(eax, 3);
- __ j(below_equal, &ok);
- __ int3();
- __ bind(&ok);
- }
- __ mov(eax, Operand(eax, times_4, conv.address(&medium_handlers)));
- __ jmp(eax);
- }
- {
- // Specialized copiers for copy_size <= 8 bytes.
- Label small_handlers, f0, f1, f2, f3, f4, f5_8;
- __ bind(&f0);
- MemMoveEmitPopAndReturn(&masm);
-
- __ bind(&f1);
- __ mov_b(eax, Operand(src, 0));
- __ mov_b(Operand(dst, 0), eax);
- MemMoveEmitPopAndReturn(&masm);
-
- __ bind(&f2);
- __ mov_w(eax, Operand(src, 0));
- __ mov_w(Operand(dst, 0), eax);
- MemMoveEmitPopAndReturn(&masm);
-
- __ bind(&f3);
- __ mov_w(eax, Operand(src, 0));
- __ mov_b(edx, Operand(src, 2));
- __ mov_w(Operand(dst, 0), eax);
- __ mov_b(Operand(dst, 2), edx);
- MemMoveEmitPopAndReturn(&masm);
-
- __ bind(&f4);
- __ mov(eax, Operand(src, 0));
- __ mov(Operand(dst, 0), eax);
- MemMoveEmitPopAndReturn(&masm);
-
- __ bind(&f5_8);
- __ mov(eax, Operand(src, 0));
- __ mov(edx, Operand(src, count, times_1, -4));
- __ mov(Operand(dst, 0), eax);
- __ mov(Operand(dst, count, times_1, -4), edx);
- MemMoveEmitPopAndReturn(&masm);
-
- __ bind(&small_handlers);
- __ dd(conv.address(&f0));
- __ dd(conv.address(&f1));
- __ dd(conv.address(&f2));
- __ dd(conv.address(&f3));
- __ dd(conv.address(&f4));
- __ dd(conv.address(&f5_8));
- __ dd(conv.address(&f5_8));
- __ dd(conv.address(&f5_8));
- __ dd(conv.address(&f5_8));
-
- __ bind(&small_size); // Entry point into this block.
- if (FLAG_debug_code) {
- Label ok;
- __ cmp(count, 8);
- __ j(below_equal, &ok);
- __ int3();
- __ bind(&ok);
- }
- __ mov(eax, Operand(count, times_4, conv.address(&small_handlers)));
- __ jmp(eax);
- }
- } else {
- // No SSE2.
- Label forward;
- __ cmp(count, 0);
- __ j(equal, &pop_and_return);
- __ cmp(dst, src);
- __ j(above, &backward);
- __ jmp(&forward);
- {
- // Simple forward copier.
- Label forward_loop_1byte, forward_loop_4byte;
- __ bind(&forward_loop_4byte);
- __ mov(eax, Operand(src, 0));
- __ sub(count, Immediate(4));
- __ add(src, Immediate(4));
- __ mov(Operand(dst, 0), eax);
- __ add(dst, Immediate(4));
- __ bind(&forward); // Entry point.
- __ cmp(count, 3);
- __ j(above, &forward_loop_4byte);
- __ bind(&forward_loop_1byte);
- __ cmp(count, 0);
- __ j(below_equal, &pop_and_return);
- __ mov_b(eax, Operand(src, 0));
- __ dec(count);
- __ inc(src);
- __ mov_b(Operand(dst, 0), eax);
- __ inc(dst);
- __ jmp(&forward_loop_1byte);
+ {
+ // |dst| is a higher address than |src|. Copy backwards.
+ Label unaligned_source, move_first_15, skip_last_move;
+ __ bind(&backward);
+ // |dst| and |src| always point to the end of what's left to copy.
+ __ add(dst, count);
+ __ add(src, count);
+ __ mov(eax, dst);
+ __ sub(eax, src);
+ __ cmp(eax, kMinMoveDistance);
+ __ j(below, &backward_much_overlap);
+ // Copy last 16 bytes.
+ __ movdqu(xmm0, Operand(src, -0x10));
+ __ movdqu(Operand(dst, -0x10), xmm0);
+ // Find distance to alignment: dst & 0xF
+ __ mov(edx, dst);
+ __ and_(edx, 0xF);
+ __ sub(dst, edx);
+ __ sub(src, edx);
+ __ sub(count, edx);
+ // dst is now aligned. Main copy loop.
+ __ mov(loop_count, count);
+ __ shr(loop_count, 6);
+ // Check if src is also aligned.
+ __ test(src, Immediate(0xF));
+ __ j(not_zero, &unaligned_source);
+ // Copy loop for aligned source and destination.
+ MemMoveEmitMainLoop(&masm, &move_first_15, BACKWARD, MOVE_ALIGNED);
+ // At most 15 bytes to copy. Copy 16 bytes at beginning of string.
+ __ bind(&move_first_15);
+ __ and_(count, 0xF);
+ __ j(zero, &skip_last_move, Label::kNear);
+ __ sub(src, count);
+ __ sub(dst, count);
+ __ movdqu(xmm0, Operand(src, 0));
+ __ movdqu(Operand(dst, 0), xmm0);
+ __ bind(&skip_last_move);
+ MemMoveEmitPopAndReturn(&masm);
+
+ // Copy loop for unaligned source and aligned destination.
+ __ bind(&unaligned_source);
+ MemMoveEmitMainLoop(&masm, &move_first_15, BACKWARD, MOVE_UNALIGNED);
+ __ jmp(&move_first_15);
+
+ // Less than kMinMoveDistance offset between dst and src.
+ Label loop_until_aligned, first_15_much_overlap;
+ __ bind(&loop_until_aligned);
+ __ dec(src);
+ __ dec(dst);
+ __ mov_b(eax, Operand(src, 0));
+ __ mov_b(Operand(dst, 0), eax);
+ __ dec(count);
+ __ bind(&backward_much_overlap); // Entry point into this block.
+ __ test(dst, Immediate(0xF));
+ __ j(not_zero, &loop_until_aligned);
+ // dst is now aligned, src can't be. Main copy loop.
+ __ mov(loop_count, count);
+ __ shr(loop_count, 6);
+ MemMoveEmitMainLoop(&masm, &first_15_much_overlap,
+ BACKWARD, MOVE_UNALIGNED);
+ __ bind(&first_15_much_overlap);
+ __ and_(count, 0xF);
+ __ j(zero, &pop_and_return);
+ // Small/medium handlers expect dst/src to point to the beginning.
+ __ sub(dst, count);
+ __ sub(src, count);
+ __ cmp(count, kSmallCopySize);
+ __ j(below_equal, &small_size);
+ __ jmp(&medium_size);
+ }
+ {
+ // Special handlers for 9 <= copy_size < 64. No assumptions about
+ // alignment or move distance, so all reads must be unaligned and
+ // must happen before any writes.
+ Label medium_handlers, f9_16, f17_32, f33_48, f49_63;
+
+ __ bind(&f9_16);
+ __ movsd(xmm0, Operand(src, 0));
+ __ movsd(xmm1, Operand(src, count, times_1, -8));
+ __ movsd(Operand(dst, 0), xmm0);
+ __ movsd(Operand(dst, count, times_1, -8), xmm1);
+ MemMoveEmitPopAndReturn(&masm);
+
+ __ bind(&f17_32);
+ __ movdqu(xmm0, Operand(src, 0));
+ __ movdqu(xmm1, Operand(src, count, times_1, -0x10));
+ __ movdqu(Operand(dst, 0x00), xmm0);
+ __ movdqu(Operand(dst, count, times_1, -0x10), xmm1);
+ MemMoveEmitPopAndReturn(&masm);
+
+ __ bind(&f33_48);
+ __ movdqu(xmm0, Operand(src, 0x00));
+ __ movdqu(xmm1, Operand(src, 0x10));
+ __ movdqu(xmm2, Operand(src, count, times_1, -0x10));
+ __ movdqu(Operand(dst, 0x00), xmm0);
+ __ movdqu(Operand(dst, 0x10), xmm1);
+ __ movdqu(Operand(dst, count, times_1, -0x10), xmm2);
+ MemMoveEmitPopAndReturn(&masm);
+
+ __ bind(&f49_63);
+ __ movdqu(xmm0, Operand(src, 0x00));
+ __ movdqu(xmm1, Operand(src, 0x10));
+ __ movdqu(xmm2, Operand(src, 0x20));
+ __ movdqu(xmm3, Operand(src, count, times_1, -0x10));
+ __ movdqu(Operand(dst, 0x00), xmm0);
+ __ movdqu(Operand(dst, 0x10), xmm1);
+ __ movdqu(Operand(dst, 0x20), xmm2);
+ __ movdqu(Operand(dst, count, times_1, -0x10), xmm3);
+ MemMoveEmitPopAndReturn(&masm);
+
+ __ bind(&medium_handlers);
+ __ dd(conv.address(&f9_16));
+ __ dd(conv.address(&f17_32));
+ __ dd(conv.address(&f33_48));
+ __ dd(conv.address(&f49_63));
+
+ __ bind(&medium_size); // Entry point into this block.
+ __ mov(eax, count);
+ __ dec(eax);
+ __ shr(eax, 4);
+ if (FLAG_debug_code) {
+ Label ok;
+ __ cmp(eax, 3);
+ __ j(below_equal, &ok);
+ __ int3();
+ __ bind(&ok);
}
- {
- // Simple backward copier.
- Label backward_loop_1byte, backward_loop_4byte, entry_shortcut;
- __ bind(&backward);
- __ add(src, count);
- __ add(dst, count);
- __ cmp(count, 3);
- __ j(below_equal, &entry_shortcut);
-
- __ bind(&backward_loop_4byte);
- __ sub(src, Immediate(4));
- __ sub(count, Immediate(4));
- __ mov(eax, Operand(src, 0));
- __ sub(dst, Immediate(4));
- __ mov(Operand(dst, 0), eax);
- __ cmp(count, 3);
- __ j(above, &backward_loop_4byte);
- __ bind(&backward_loop_1byte);
- __ cmp(count, 0);
- __ j(below_equal, &pop_and_return);
- __ bind(&entry_shortcut);
- __ dec(src);
- __ dec(count);
- __ mov_b(eax, Operand(src, 0));
- __ dec(dst);
- __ mov_b(Operand(dst, 0), eax);
- __ jmp(&backward_loop_1byte);
+ __ mov(eax, Operand(eax, times_4, conv.address(&medium_handlers)));
+ __ jmp(eax);
+ }
+ {
+ // Specialized copiers for copy_size <= 8 bytes.
+ Label small_handlers, f0, f1, f2, f3, f4, f5_8;
+ __ bind(&f0);
+ MemMoveEmitPopAndReturn(&masm);
+
+ __ bind(&f1);
+ __ mov_b(eax, Operand(src, 0));
+ __ mov_b(Operand(dst, 0), eax);
+ MemMoveEmitPopAndReturn(&masm);
+
+ __ bind(&f2);
+ __ mov_w(eax, Operand(src, 0));
+ __ mov_w(Operand(dst, 0), eax);
+ MemMoveEmitPopAndReturn(&masm);
+
+ __ bind(&f3);
+ __ mov_w(eax, Operand(src, 0));
+ __ mov_b(edx, Operand(src, 2));
+ __ mov_w(Operand(dst, 0), eax);
+ __ mov_b(Operand(dst, 2), edx);
+ MemMoveEmitPopAndReturn(&masm);
+
+ __ bind(&f4);
+ __ mov(eax, Operand(src, 0));
+ __ mov(Operand(dst, 0), eax);
+ MemMoveEmitPopAndReturn(&masm);
+
+ __ bind(&f5_8);
+ __ mov(eax, Operand(src, 0));
+ __ mov(edx, Operand(src, count, times_1, -4));
+ __ mov(Operand(dst, 0), eax);
+ __ mov(Operand(dst, count, times_1, -4), edx);
+ MemMoveEmitPopAndReturn(&masm);
+
+ __ bind(&small_handlers);
+ __ dd(conv.address(&f0));
+ __ dd(conv.address(&f1));
+ __ dd(conv.address(&f2));
+ __ dd(conv.address(&f3));
+ __ dd(conv.address(&f4));
+ __ dd(conv.address(&f5_8));
+ __ dd(conv.address(&f5_8));
+ __ dd(conv.address(&f5_8));
+ __ dd(conv.address(&f5_8));
+
+ __ bind(&small_size); // Entry point into this block.
+ if (FLAG_debug_code) {
+ Label ok;
+ __ cmp(count, 8);
+ __ j(below_equal, &ok);
+ __ int3();
+ __ bind(&ok);
}
+ __ mov(eax, Operand(count, times_4, conv.address(&small_handlers)));
+ __ jmp(eax);
}
__ bind(&pop_and_return);
@@ -642,7 +508,7 @@ OS::MemMoveFunction CreateMemMoveFunction() {
OS::ProtectCode(buffer, actual_size);
// TODO(jkummerow): It would be nice to register this code creation event
// with the PROFILE / GDBJIT system.
- return FUNCTION_CAST<OS::MemMoveFunction>(buffer);
+ return FUNCTION_CAST<MemMoveFunction>(buffer);
}
@@ -739,11 +605,8 @@ void ElementsTransitionGenerator::GenerateSmiToDouble(
ExternalReference canonical_the_hole_nan_reference =
ExternalReference::address_of_the_hole_nan();
XMMRegister the_hole_nan = xmm1;
- if (CpuFeatures::IsSupported(SSE2)) {
- CpuFeatureScope use_sse2(masm, SSE2);
- __ movsd(the_hole_nan,
- Operand::StaticVariable(canonical_the_hole_nan_reference));
- }
+ __ movsd(the_hole_nan,
+ Operand::StaticVariable(canonical_the_hole_nan_reference));
__ jmp(&entry);
// Call into runtime if GC is required.
@@ -764,17 +627,9 @@ void ElementsTransitionGenerator::GenerateSmiToDouble(
// Normal smi, convert it to double and store.
__ SmiUntag(ebx);
- if (CpuFeatures::IsSupported(SSE2)) {
- CpuFeatureScope fscope(masm, SSE2);
- __ Cvtsi2sd(xmm0, ebx);
- __ movsd(FieldOperand(eax, edi, times_4, FixedDoubleArray::kHeaderSize),
- xmm0);
- } else {
- __ push(ebx);
- __ fild_s(Operand(esp, 0));
- __ pop(ebx);
- __ fstp_d(FieldOperand(eax, edi, times_4, FixedDoubleArray::kHeaderSize));
- }
+ __ Cvtsi2sd(xmm0, ebx);
+ __ movsd(FieldOperand(eax, edi, times_4, FixedDoubleArray::kHeaderSize),
+ xmm0);
__ jmp(&entry);
// Found hole, store hole_nan_as_double instead.
@@ -785,14 +640,8 @@ void ElementsTransitionGenerator::GenerateSmiToDouble(
__ Assert(equal, kObjectFoundInSmiOnlyArray);
}
- if (CpuFeatures::IsSupported(SSE2)) {
- CpuFeatureScope use_sse2(masm, SSE2);
- __ movsd(FieldOperand(eax, edi, times_4, FixedDoubleArray::kHeaderSize),
- the_hole_nan);
- } else {
- __ fld_d(Operand::StaticVariable(canonical_the_hole_nan_reference));
- __ fstp_d(FieldOperand(eax, edi, times_4, FixedDoubleArray::kHeaderSize));
- }
+ __ movsd(FieldOperand(eax, edi, times_4, FixedDoubleArray::kHeaderSize),
+ the_hole_nan);
__ bind(&entry);
__ sub(edi, Immediate(Smi::FromInt(1)));
@@ -894,17 +743,9 @@ void ElementsTransitionGenerator::GenerateDoubleToObject(
// Non-hole double, copy value into a heap number.
__ AllocateHeapNumber(edx, esi, no_reg, &gc_required);
// edx: new heap number
- if (CpuFeatures::IsSupported(SSE2)) {
- CpuFeatureScope fscope(masm, SSE2);
- __ movsd(xmm0,
- FieldOperand(edi, ebx, times_4, FixedDoubleArray::kHeaderSize));
- __ movsd(FieldOperand(edx, HeapNumber::kValueOffset), xmm0);
- } else {
- __ mov(esi, FieldOperand(edi, ebx, times_4, FixedDoubleArray::kHeaderSize));
- __ mov(FieldOperand(edx, HeapNumber::kValueOffset), esi);
- __ mov(esi, FieldOperand(edi, ebx, times_4, offset));
- __ mov(FieldOperand(edx, HeapNumber::kValueOffset + kPointerSize), esi);
- }
+ __ movsd(xmm0,
+ FieldOperand(edi, ebx, times_4, FixedDoubleArray::kHeaderSize));
+ __ movsd(FieldOperand(edx, HeapNumber::kValueOffset), xmm0);
__ mov(FieldOperand(eax, ebx, times_2, FixedArray::kHeaderSize), edx);
__ mov(esi, ebx);
__ RecordWriteArray(eax,
@@ -1016,7 +857,7 @@ void StringCharLoadGenerator::Generate(MacroAssembler* masm,
__ Assert(zero, kExternalStringExpectedButNotFound);
}
// Rule out short external strings.
- STATIC_CHECK(kShortExternalStringTag != 0);
+ STATIC_ASSERT(kShortExternalStringTag != 0);
__ test_b(result, kShortExternalStringMask);
__ j(not_zero, call_runtime);
// Check encoding.
@@ -1118,37 +959,33 @@ void MathExpGenerator::EmitMathExp(MacroAssembler* masm,
#undef __
-static byte* GetNoCodeAgeSequence(uint32_t* length) {
- static bool initialized = false;
- static byte sequence[kNoCodeAgeSequenceLength];
- *length = kNoCodeAgeSequenceLength;
- if (!initialized) {
- // The sequence of instructions that is patched out for aging code is the
- // following boilerplate stack-building prologue that is found both in
- // FUNCTION and OPTIMIZED_FUNCTION code:
- CodePatcher patcher(sequence, kNoCodeAgeSequenceLength);
- patcher.masm()->push(ebp);
- patcher.masm()->mov(ebp, esp);
- patcher.masm()->push(esi);
- patcher.masm()->push(edi);
- initialized = true;
- }
- return sequence;
+CodeAgingHelper::CodeAgingHelper() {
+ ASSERT(young_sequence_.length() == kNoCodeAgeSequenceLength);
+ CodePatcher patcher(young_sequence_.start(), young_sequence_.length());
+ patcher.masm()->push(ebp);
+ patcher.masm()->mov(ebp, esp);
+ patcher.masm()->push(esi);
+ patcher.masm()->push(edi);
+}
+
+
+#ifdef DEBUG
+bool CodeAgingHelper::IsOld(byte* candidate) const {
+ return *candidate == kCallOpcode;
}
+#endif
-bool Code::IsYoungSequence(byte* sequence) {
- uint32_t young_length;
- byte* young_sequence = GetNoCodeAgeSequence(&young_length);
- bool result = (!memcmp(sequence, young_sequence, young_length));
- ASSERT(result || *sequence == kCallOpcode);
+bool Code::IsYoungSequence(Isolate* isolate, byte* sequence) {
+ bool result = isolate->code_aging_helper()->IsYoung(sequence);
+ ASSERT(result || isolate->code_aging_helper()->IsOld(sequence));
return result;
}
-void Code::GetCodeAgeAndParity(byte* sequence, Age* age,
+void Code::GetCodeAgeAndParity(Isolate* isolate, byte* sequence, Age* age,
MarkingParity* parity) {
- if (IsYoungSequence(sequence)) {
+ if (IsYoungSequence(isolate, sequence)) {
*age = kNoAgeCodeAge;
*parity = NO_MARKING_PARITY;
} else {
@@ -1165,10 +1002,9 @@ void Code::PatchPlatformCodeAge(Isolate* isolate,
byte* sequence,
Code::Age age,
MarkingParity parity) {
- uint32_t young_length;
- byte* young_sequence = GetNoCodeAgeSequence(&young_length);
+ uint32_t young_length = isolate->code_aging_helper()->young_sequence_length();
if (age == kNoAgeCodeAge) {
- CopyBytes(sequence, young_sequence, young_length);
+ isolate->code_aging_helper()->CopyYoungSequenceTo(sequence);
CPU::FlushICache(sequence, young_length);
} else {
Code* stub = GetCodeAgeStub(isolate, age, parity);
diff --git a/chromium/v8/src/ia32/codegen-ia32.h b/chromium/v8/src/ia32/codegen-ia32.h
index 6a207ca9b51..3f59c2cb2fe 100644
--- a/chromium/v8/src/ia32/codegen-ia32.h
+++ b/chromium/v8/src/ia32/codegen-ia32.h
@@ -1,76 +1,16 @@
// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
#ifndef V8_IA32_CODEGEN_IA32_H_
#define V8_IA32_CODEGEN_IA32_H_
-#include "ast.h"
-#include "ic-inl.h"
+#include "src/ast.h"
+#include "src/ic-inl.h"
namespace v8 {
namespace internal {
-// Forward declarations
-class CompilationInfo;
-
-// -------------------------------------------------------------------------
-// CodeGenerator
-
-class CodeGenerator {
- public:
- // Printing of AST, etc. as requested by flags.
- static void MakeCodePrologue(CompilationInfo* info, const char* kind);
-
- // Allocate and install the code.
- static Handle<Code> MakeCodeEpilogue(MacroAssembler* masm,
- Code::Flags flags,
- CompilationInfo* info);
-
- // Print the code after compiling it.
- static void PrintCode(Handle<Code> code, CompilationInfo* info);
-
- static bool ShouldGenerateLog(Isolate* isolate, Expression* type);
-
- static bool RecordPositions(MacroAssembler* masm,
- int pos,
- bool right_here = false);
-
-
- static Operand FixedArrayElementOperand(Register array,
- Register index_as_smi,
- int additional_offset = 0) {
- int offset = FixedArray::kHeaderSize + additional_offset * kPointerSize;
- return FieldOperand(array, index_as_smi, times_half_pointer_size, offset);
- }
-
- private:
- DISALLOW_COPY_AND_ASSIGN(CodeGenerator);
-};
-
class StringCharLoadGenerator : public AllStatic {
public:
diff --git a/chromium/v8/src/ia32/cpu-ia32.cc b/chromium/v8/src/ia32/cpu-ia32.cc
index 5fb04fc7272..7094f468eab 100644
--- a/chromium/v8/src/ia32/cpu-ia32.cc
+++ b/chromium/v8/src/ia32/cpu-ia32.cc
@@ -1,56 +1,23 @@
// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
// CPU specific code for ia32 independent of OS goes here.
#ifdef __GNUC__
-#include "third_party/valgrind/valgrind.h"
+#include "src/third_party/valgrind/valgrind.h"
#endif
-#include "v8.h"
+#include "src/v8.h"
#if V8_TARGET_ARCH_IA32
-#include "cpu.h"
-#include "macro-assembler.h"
+#include "src/cpu.h"
+#include "src/macro-assembler.h"
namespace v8 {
namespace internal {
-void CPU::SetUp() {
- CpuFeatures::Probe();
-}
-
-
-bool CPU::SupportsCrankshaft() {
- return CpuFeatures::IsSupported(SSE2);
-}
-
-
void CPU::FlushICache(void* start, size_t size) {
// No need to flush the instruction cache on Intel. On Intel instruction
// cache flushing is only necessary when multiple cores running the same
diff --git a/chromium/v8/src/ia32/debug-ia32.cc b/chromium/v8/src/ia32/debug-ia32.cc
index 76a7003bfe0..7572d3e0dd9 100644
--- a/chromium/v8/src/ia32/debug-ia32.cc
+++ b/chromium/v8/src/ia32/debug-ia32.cc
@@ -1,43 +1,18 @@
// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
#if V8_TARGET_ARCH_IA32
-#include "codegen.h"
-#include "debug.h"
+#include "src/codegen.h"
+#include "src/debug.h"
namespace v8 {
namespace internal {
-#ifdef ENABLE_DEBUGGER_SUPPORT
-
bool BreakLocationIterator::IsDebugBreakAtReturn() {
return Debug::IsDebugBreakAtReturn(rinfo());
}
@@ -50,7 +25,7 @@ void BreakLocationIterator::SetDebugBreakAtReturn() {
ASSERT(Assembler::kJSReturnSequenceLength >=
Assembler::kCallInstructionLength);
rinfo()->PatchCodeWithCall(
- debug_info_->GetIsolate()->debug()->debug_break_return()->entry(),
+ debug_info_->GetIsolate()->builtins()->Return_DebugBreak()->entry(),
Assembler::kJSReturnSequenceLength - Assembler::kCallInstructionLength);
}
@@ -81,7 +56,7 @@ void BreakLocationIterator::SetDebugBreakAtSlot() {
ASSERT(IsDebugBreakSlot());
Isolate* isolate = debug_info_->GetIsolate();
rinfo()->PatchCodeWithCall(
- isolate->debug()->debug_break_slot()->entry(),
+ isolate->builtins()->Slot_DebugBreak()->entry(),
Assembler::kDebugBreakSlotLength - Assembler::kCallInstructionLength);
}
@@ -92,10 +67,6 @@ void BreakLocationIterator::ClearDebugBreakAtSlot() {
}
-// All debug break stubs support padding for LiveEdit.
-const bool Debug::FramePaddingLayout::kIsSupported = true;
-
-
#define __ ACCESS_MASM(masm)
static void Generate_DebugBreakCallHelper(MacroAssembler* masm,
@@ -107,11 +78,10 @@ static void Generate_DebugBreakCallHelper(MacroAssembler* masm,
FrameScope scope(masm, StackFrame::INTERNAL);
// Load padding words on stack.
- for (int i = 0; i < Debug::FramePaddingLayout::kInitialSize; i++) {
- __ push(Immediate(Smi::FromInt(
- Debug::FramePaddingLayout::kPaddingValue)));
+ for (int i = 0; i < LiveEdit::kFramePaddingInitialSize; i++) {
+ __ push(Immediate(Smi::FromInt(LiveEdit::kFramePaddingValue)));
}
- __ push(Immediate(Smi::FromInt(Debug::FramePaddingLayout::kInitialSize)));
+ __ push(Immediate(Smi::FromInt(LiveEdit::kFramePaddingInitialSize)));
// Store the registers containing live values on the expression stack to
// make sure that these are correctly updated during GC. Non object values
@@ -138,10 +108,10 @@ static void Generate_DebugBreakCallHelper(MacroAssembler* masm,
#ifdef DEBUG
__ RecordComment("// Calling from debug break to runtime - come in - over");
#endif
- __ Set(eax, Immediate(0)); // No arguments.
+ __ Move(eax, Immediate(0)); // No arguments.
__ mov(ebx, Immediate(ExternalReference::debug_break(masm->isolate())));
- CEntryStub ceb(1);
+ CEntryStub ceb(masm->isolate(), 1);
__ CallStub(&ceb);
// Automatically find register that could be used after register restore.
@@ -154,7 +124,7 @@ static void Generate_DebugBreakCallHelper(MacroAssembler* masm,
int r = JSCallerSavedCode(i);
Register reg = { r };
if (FLAG_debug_code) {
- __ Set(reg, Immediate(kDebugZapValue));
+ __ Move(reg, Immediate(kDebugZapValue));
}
bool taken = reg.code() == esi.code();
if ((object_regs & (1 << r)) != 0) {
@@ -192,12 +162,23 @@ static void Generate_DebugBreakCallHelper(MacroAssembler* masm,
// jumping to the target address intended by the caller and that was
// overwritten by the address of DebugBreakXXX.
ExternalReference after_break_target =
- ExternalReference(Debug_Address::AfterBreakTarget(), masm->isolate());
+ ExternalReference::debug_after_break_target_address(masm->isolate());
__ jmp(Operand::StaticVariable(after_break_target));
}
-void Debug::GenerateLoadICDebugBreak(MacroAssembler* masm) {
+void DebugCodegen::GenerateCallICStubDebugBreak(MacroAssembler* masm) {
+ // Register state for CallICStub
+ // ----------- S t a t e -------------
+ // -- edx : type feedback slot (smi)
+ // -- edi : function
+ // -----------------------------------
+ Generate_DebugBreakCallHelper(masm, edx.bit() | edi.bit(),
+ 0, false);
+}
+
+
+void DebugCodegen::GenerateLoadICDebugBreak(MacroAssembler* masm) {
// Register state for IC load call (from ic-ia32.cc).
// ----------- S t a t e -------------
// -- ecx : name
@@ -207,7 +188,7 @@ void Debug::GenerateLoadICDebugBreak(MacroAssembler* masm) {
}
-void Debug::GenerateStoreICDebugBreak(MacroAssembler* masm) {
+void DebugCodegen::GenerateStoreICDebugBreak(MacroAssembler* masm) {
// Register state for IC store call (from ic-ia32.cc).
// ----------- S t a t e -------------
// -- eax : value
@@ -219,7 +200,7 @@ void Debug::GenerateStoreICDebugBreak(MacroAssembler* masm) {
}
-void Debug::GenerateKeyedLoadICDebugBreak(MacroAssembler* masm) {
+void DebugCodegen::GenerateKeyedLoadICDebugBreak(MacroAssembler* masm) {
// Register state for keyed IC load call (from ic-ia32.cc).
// ----------- S t a t e -------------
// -- ecx : key
@@ -229,7 +210,7 @@ void Debug::GenerateKeyedLoadICDebugBreak(MacroAssembler* masm) {
}
-void Debug::GenerateKeyedStoreICDebugBreak(MacroAssembler* masm) {
+void DebugCodegen::GenerateKeyedStoreICDebugBreak(MacroAssembler* masm) {
// Register state for keyed IC load call (from ic-ia32.cc).
// ----------- S t a t e -------------
// -- eax : value
@@ -241,7 +222,7 @@ void Debug::GenerateKeyedStoreICDebugBreak(MacroAssembler* masm) {
}
-void Debug::GenerateCompareNilICDebugBreak(MacroAssembler* masm) {
+void DebugCodegen::GenerateCompareNilICDebugBreak(MacroAssembler* masm) {
// Register state for CompareNil IC
// ----------- S t a t e -------------
// -- eax : value
@@ -250,16 +231,7 @@ void Debug::GenerateCompareNilICDebugBreak(MacroAssembler* masm) {
}
-void Debug::GenerateCallICDebugBreak(MacroAssembler* masm) {
- // Register state for keyed IC call call (from ic-ia32.cc)
- // ----------- S t a t e -------------
- // -- ecx: name
- // -----------------------------------
- Generate_DebugBreakCallHelper(masm, ecx.bit(), 0, false);
-}
-
-
-void Debug::GenerateReturnDebugBreak(MacroAssembler* masm) {
+void DebugCodegen::GenerateReturnDebugBreak(MacroAssembler* masm) {
// Register state just before return from JS function (from codegen-ia32.cc).
// ----------- S t a t e -------------
// -- eax: return value
@@ -268,7 +240,7 @@ void Debug::GenerateReturnDebugBreak(MacroAssembler* masm) {
}
-void Debug::GenerateCallFunctionStubDebugBreak(MacroAssembler* masm) {
+void DebugCodegen::GenerateCallFunctionStubDebugBreak(MacroAssembler* masm) {
// Register state for CallFunctionStub (from code-stubs-ia32.cc).
// ----------- S t a t e -------------
// -- edi: function
@@ -277,17 +249,7 @@ void Debug::GenerateCallFunctionStubDebugBreak(MacroAssembler* masm) {
}
-void Debug::GenerateCallFunctionStubRecordDebugBreak(MacroAssembler* masm) {
- // Register state for CallFunctionStub (from code-stubs-ia32.cc).
- // ----------- S t a t e -------------
- // -- ebx: cache cell for call target
- // -- edi: function
- // -----------------------------------
- Generate_DebugBreakCallHelper(masm, ebx.bit() | edi.bit(), 0, false);
-}
-
-
-void Debug::GenerateCallConstructStubDebugBreak(MacroAssembler* masm) {
+void DebugCodegen::GenerateCallConstructStubDebugBreak(MacroAssembler* masm) {
// Register state for CallConstructStub (from code-stubs-ia32.cc).
// eax is the actual number of arguments not encoded as a smi see comment
// above IC call.
@@ -300,21 +262,24 @@ void Debug::GenerateCallConstructStubDebugBreak(MacroAssembler* masm) {
}
-void Debug::GenerateCallConstructStubRecordDebugBreak(MacroAssembler* masm) {
+void DebugCodegen::GenerateCallConstructStubRecordDebugBreak(
+ MacroAssembler* masm) {
// Register state for CallConstructStub (from code-stubs-ia32.cc).
// eax is the actual number of arguments not encoded as a smi see comment
// above IC call.
// ----------- S t a t e -------------
// -- eax: number of arguments (not smi)
- // -- ebx: cache cell for call target
+ // -- ebx: feedback array
+ // -- edx: feedback slot (smi)
// -- edi: constructor function
// -----------------------------------
// The number of arguments in eax is not smi encoded.
- Generate_DebugBreakCallHelper(masm, ebx.bit() | edi.bit(), eax.bit(), false);
+ Generate_DebugBreakCallHelper(masm, ebx.bit() | edx.bit() | edi.bit(),
+ eax.bit(), false);
}
-void Debug::GenerateSlot(MacroAssembler* masm) {
+void DebugCodegen::GenerateSlot(MacroAssembler* masm) {
// Generate enough nop's to make space for a call instruction.
Label check_codesize;
__ bind(&check_codesize);
@@ -325,22 +290,22 @@ void Debug::GenerateSlot(MacroAssembler* masm) {
}
-void Debug::GenerateSlotDebugBreak(MacroAssembler* masm) {
+void DebugCodegen::GenerateSlotDebugBreak(MacroAssembler* masm) {
// In the places where a debug break slot is inserted no registers can contain
// object pointers.
Generate_DebugBreakCallHelper(masm, 0, 0, true);
}
-void Debug::GeneratePlainReturnLiveEdit(MacroAssembler* masm) {
+void DebugCodegen::GeneratePlainReturnLiveEdit(MacroAssembler* masm) {
masm->ret(0);
}
-void Debug::GenerateFrameDropperLiveEdit(MacroAssembler* masm) {
+void DebugCodegen::GenerateFrameDropperLiveEdit(MacroAssembler* masm) {
ExternalReference restarter_frame_function_slot =
- ExternalReference(Debug_Address::RestarterFrameFunctionPointer(),
- masm->isolate());
+ ExternalReference::debug_restarter_frame_function_pointer_address(
+ masm->isolate());
__ mov(Operand::StaticVariable(restarter_frame_function_slot), Immediate(0));
// We do not know our frame height, but set esp based on ebp.
@@ -361,11 +326,10 @@ void Debug::GenerateFrameDropperLiveEdit(MacroAssembler* masm) {
__ jmp(edx);
}
-const bool Debug::kFrameDropperSupported = true;
-#undef __
+const bool LiveEdit::kFrameDropperSupported = true;
-#endif // ENABLE_DEBUGGER_SUPPORT
+#undef __
} } // namespace v8::internal
diff --git a/chromium/v8/src/ia32/deoptimizer-ia32.cc b/chromium/v8/src/ia32/deoptimizer-ia32.cc
index 5300dde9a21..266899eb659 100644
--- a/chromium/v8/src/ia32/deoptimizer-ia32.cc
+++ b/chromium/v8/src/ia32/deoptimizer-ia32.cc
@@ -1,38 +1,15 @@
// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
#if V8_TARGET_ARCH_IA32
-#include "codegen.h"
-#include "deoptimizer.h"
-#include "full-codegen.h"
-#include "safepoint-table.h"
+#include "src/codegen.h"
+#include "src/deoptimizer.h"
+#include "src/full-codegen.h"
+#include "src/safepoint-table.h"
namespace v8 {
namespace internal {
@@ -90,9 +67,8 @@ void Deoptimizer::EnsureRelocSpaceForLazyDeoptimization(Handle<Code> code) {
Factory* factory = isolate->factory();
Handle<ByteArray> new_reloc =
factory->NewByteArray(reloc_length + padding, TENURED);
- OS::MemCopy(new_reloc->GetDataStartAddress() + padding,
- code->relocation_info()->GetDataStartAddress(),
- reloc_length);
+ MemCopy(new_reloc->GetDataStartAddress() + padding,
+ code->relocation_info()->GetDataStartAddress(), reloc_length);
// Create a relocation writer to write the comments in the padding
// space. Use position 0 for everything to ensure short encoding.
RelocInfoWriter reloc_info_writer(
@@ -116,6 +92,27 @@ void Deoptimizer::EnsureRelocSpaceForLazyDeoptimization(Handle<Code> code) {
void Deoptimizer::PatchCodeForDeoptimization(Isolate* isolate, Code* code) {
Address code_start_address = code->instruction_start();
+
+ if (FLAG_zap_code_space) {
+ // Fail hard and early if we enter this code object again.
+ byte* pointer = code->FindCodeAgeSequence();
+ if (pointer != NULL) {
+ pointer += kNoCodeAgeSequenceLength;
+ } else {
+ pointer = code->instruction_start();
+ }
+ CodePatcher patcher(pointer, 1);
+ patcher.masm()->int3();
+
+ DeoptimizationInputData* data =
+ DeoptimizationInputData::cast(code->deoptimization_data());
+ int osr_offset = data->OsrPcOffset()->value();
+ if (osr_offset > 0) {
+ CodePatcher osr_patcher(code->instruction_start() + osr_offset, 1);
+ osr_patcher.masm()->int3();
+ }
+ }
+
// We will overwrite the code's relocation info in-place. Relocation info
// is written backward. The relocation info is the payload of a byte
// array. Later on we will slide this to the start of the byte array and
@@ -124,9 +121,6 @@ void Deoptimizer::PatchCodeForDeoptimization(Isolate* isolate, Code* code) {
Address reloc_end_address = reloc_info->address() + reloc_info->Size();
RelocInfoWriter reloc_info_writer(reloc_end_address, code_start_address);
- // For each LLazyBailout instruction insert a call to the corresponding
- // deoptimization entry.
-
// Since the call is a relative encoding, write new
// reloc info. We do not need any of the existing reloc info because the
// existing code will not be used again (we zap it in debug builds).
@@ -134,9 +128,14 @@ void Deoptimizer::PatchCodeForDeoptimization(Isolate* isolate, Code* code) {
// Emit call to lazy deoptimization at all lazy deopt points.
DeoptimizationInputData* deopt_data =
DeoptimizationInputData::cast(code->deoptimization_data());
+ SharedFunctionInfo* shared =
+ SharedFunctionInfo::cast(deopt_data->SharedFunctionInfo());
+ shared->EvictFromOptimizedCodeMap(code, "deoptimized code");
#ifdef DEBUG
Address prev_call_address = NULL;
#endif
+ // For each LLazyBailout instruction insert a call to the corresponding
+ // deoptimization entry.
for (int i = 0; i < deopt_data->DeoptCount(); i++) {
if (deopt_data->Pc(i)->value() == -1) continue;
// Patch lazy deoptimization entry.
@@ -162,8 +161,7 @@ void Deoptimizer::PatchCodeForDeoptimization(Isolate* isolate, Code* code) {
// Move the relocation info to the beginning of the byte array.
int new_reloc_size = reloc_end_address - reloc_info_writer.pos();
- OS::MemMove(
- code->relocation_start(), reloc_info_writer.pos(), new_reloc_size);
+ MemMove(code->relocation_start(), reloc_info_writer.pos(), new_reloc_size);
// The relocation info is in place, update the size.
reloc_info->set_length(new_reloc_size);
@@ -187,7 +185,7 @@ void Deoptimizer::FillInputFrame(Address tos, JavaScriptFrame* frame) {
}
input_->SetRegister(esp.code(), reinterpret_cast<intptr_t>(frame->sp()));
input_->SetRegister(ebp.code(), reinterpret_cast<intptr_t>(frame->fp()));
- for (int i = 0; i < DoubleRegister::NumAllocatableRegisters(); i++) {
+ for (int i = 0; i < XMMRegister::kMaxNumAllocatableRegisters; i++) {
input_->SetDoubleRegister(i, 0.0);
}
@@ -209,8 +207,7 @@ void Deoptimizer::SetPlatformCompiledStubRegisters(
void Deoptimizer::CopyDoubleRegisters(FrameDescription* output_frame) {
- if (!CpuFeatures::IsSupported(SSE2)) return;
- for (int i = 0; i < XMMRegister::kNumAllocatableRegisters; ++i) {
+ for (int i = 0; i < XMMRegister::kMaxNumAllocatableRegisters; ++i) {
double double_value = input_->GetDoubleRegister(i);
output_frame->SetDoubleRegister(i, double_value);
}
@@ -231,13 +228,6 @@ bool Deoptimizer::HasAlignmentPadding(JSFunction* function) {
}
-Code* Deoptimizer::NotifyStubFailureBuiltin() {
- Builtins::Name name = CpuFeatures::IsSupported(SSE2) ?
- Builtins::kNotifyStubFailureSaveDoubles : Builtins::kNotifyStubFailure;
- return isolate_->builtins()->builtin(name);
-}
-
-
#define __ masm()->
void Deoptimizer::EntryGenerator::Generate() {
@@ -247,15 +237,12 @@ void Deoptimizer::EntryGenerator::Generate() {
const int kNumberOfRegisters = Register::kNumRegisters;
const int kDoubleRegsSize = kDoubleSize *
- XMMRegister::kNumAllocatableRegisters;
+ XMMRegister::kMaxNumAllocatableRegisters;
__ sub(esp, Immediate(kDoubleRegsSize));
- if (CpuFeatures::IsSupported(SSE2)) {
- CpuFeatureScope scope(masm(), SSE2);
- for (int i = 0; i < XMMRegister::kNumAllocatableRegisters; ++i) {
- XMMRegister xmm_reg = XMMRegister::FromAllocationIndex(i);
- int offset = i * kDoubleSize;
- __ movsd(Operand(esp, offset), xmm_reg);
- }
+ for (int i = 0; i < XMMRegister::kMaxNumAllocatableRegisters; ++i) {
+ XMMRegister xmm_reg = XMMRegister::FromAllocationIndex(i);
+ int offset = i * kDoubleSize;
+ __ movsd(Operand(esp, offset), xmm_reg);
}
__ pushad();
@@ -300,15 +287,12 @@ void Deoptimizer::EntryGenerator::Generate() {
}
int double_regs_offset = FrameDescription::double_registers_offset();
- if (CpuFeatures::IsSupported(SSE2)) {
- CpuFeatureScope scope(masm(), SSE2);
- // Fill in the double input registers.
- for (int i = 0; i < XMMRegister::kNumAllocatableRegisters; ++i) {
- int dst_offset = i * kDoubleSize + double_regs_offset;
- int src_offset = i * kDoubleSize;
- __ movsd(xmm0, Operand(esp, src_offset));
- __ movsd(Operand(ebx, dst_offset), xmm0);
- }
+ // Fill in the double input registers.
+ for (int i = 0; i < XMMRegister::kMaxNumAllocatableRegisters; ++i) {
+ int dst_offset = i * kDoubleSize + double_regs_offset;
+ int src_offset = i * kDoubleSize;
+ __ movsd(xmm0, Operand(esp, src_offset));
+ __ movsd(Operand(ebx, dst_offset), xmm0);
}
// Clear FPU all exceptions.
@@ -387,13 +371,10 @@ void Deoptimizer::EntryGenerator::Generate() {
__ j(below, &outer_push_loop);
// In case of a failed STUB, we have to restore the XMM registers.
- if (CpuFeatures::IsSupported(SSE2)) {
- CpuFeatureScope scope(masm(), SSE2);
- for (int i = 0; i < XMMRegister::kNumAllocatableRegisters; ++i) {
- XMMRegister xmm_reg = XMMRegister::FromAllocationIndex(i);
- int src_offset = i * kDoubleSize + double_regs_offset;
- __ movsd(xmm_reg, Operand(ebx, src_offset));
- }
+ for (int i = 0; i < XMMRegister::kMaxNumAllocatableRegisters; ++i) {
+ XMMRegister xmm_reg = XMMRegister::FromAllocationIndex(i);
+ int src_offset = i * kDoubleSize + double_regs_offset;
+ __ movsd(xmm_reg, Operand(ebx, src_offset));
}
// Push state, pc, and continuation from the last output frame.
@@ -440,6 +421,12 @@ void FrameDescription::SetCallerFp(unsigned offset, intptr_t value) {
}
+void FrameDescription::SetCallerConstantPool(unsigned offset, intptr_t value) {
+ // No out-of-line constant pool support.
+ UNREACHABLE();
+}
+
+
#undef __
diff --git a/chromium/v8/src/ia32/disasm-ia32.cc b/chromium/v8/src/ia32/disasm-ia32.cc
index 057a558e28f..3b3ff09921a 100644
--- a/chromium/v8/src/ia32/disasm-ia32.cc
+++ b/chromium/v8/src/ia32/disasm-ia32.cc
@@ -1,39 +1,16 @@
// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
#include <assert.h>
#include <stdio.h>
#include <stdarg.h>
-#include "v8.h"
+#include "src/v8.h"
#if V8_TARGET_ARCH_IA32
-#include "disasm.h"
+#include "src/disasm.h"
namespace disasm {
@@ -380,7 +357,7 @@ void DisassemblerIA32::AppendToBuffer(const char* format, ...) {
v8::internal::Vector<char> buf = tmp_buffer_ + tmp_buffer_pos_;
va_list args;
va_start(args, format);
- int result = v8::internal::OS::VSNPrintF(buf, format, args);
+ int result = v8::internal::VSNPrintF(buf, format, args);
va_end(args);
tmp_buffer_pos_ += result;
}
@@ -407,10 +384,11 @@ int DisassemblerIA32::PrintRightOperandHelper(
return 2;
} else if (base == ebp) {
int32_t disp = *reinterpret_cast<int32_t*>(modrmp + 2);
- AppendToBuffer("[%s*%d+0x%x]",
+ AppendToBuffer("[%s*%d%s0x%x]",
(this->*register_name)(index),
1 << scale,
- disp);
+ disp < 0 ? "-" : "+",
+ disp < 0 ? -disp : disp);
return 6;
} else if (index != esp && base != ebp) {
// [base+index*scale]
@@ -434,23 +412,30 @@ int DisassemblerIA32::PrintRightOperandHelper(
byte sib = *(modrmp + 1);
int scale, index, base;
get_sib(sib, &scale, &index, &base);
- int disp =
- mod == 2 ? *reinterpret_cast<int32_t*>(modrmp + 2) : *(modrmp + 2);
+ int disp = mod == 2 ? *reinterpret_cast<int32_t*>(modrmp + 2)
+ : *reinterpret_cast<int8_t*>(modrmp + 2);
if (index == base && index == rm /*esp*/ && scale == 0 /*times_1*/) {
- AppendToBuffer("[%s+0x%x]", (this->*register_name)(rm), disp);
+ AppendToBuffer("[%s%s0x%x]",
+ (this->*register_name)(rm),
+ disp < 0 ? "-" : "+",
+ disp < 0 ? -disp : disp);
} else {
- AppendToBuffer("[%s+%s*%d+0x%x]",
+ AppendToBuffer("[%s+%s*%d%s0x%x]",
(this->*register_name)(base),
(this->*register_name)(index),
1 << scale,
- disp);
+ disp < 0 ? "-" : "+",
+ disp < 0 ? -disp : disp);
}
return mod == 2 ? 6 : 3;
} else {
// No sib.
- int disp =
- mod == 2 ? *reinterpret_cast<int32_t*>(modrmp + 1) : *(modrmp + 1);
- AppendToBuffer("[%s+0x%x]", (this->*register_name)(rm), disp);
+ int disp = mod == 2 ? *reinterpret_cast<int32_t*>(modrmp + 1)
+ : *reinterpret_cast<int8_t*>(modrmp + 1);
+ AppendToBuffer("[%s%s0x%x]",
+ (this->*register_name)(rm),
+ disp < 0 ? "-" : "+",
+ disp < 0 ? -disp : disp);
return mod == 2 ? 5 : 2;
}
break;
@@ -881,6 +866,7 @@ static const char* F0Mnem(byte f0byte) {
case 0xAD: return "shrd";
case 0xAC: return "shrd"; // 3-operand version.
case 0xAB: return "bts";
+ case 0xBD: return "bsr";
default: return NULL;
}
}
@@ -1007,6 +993,7 @@ int DisassemblerIA32::InstructionDecode(v8::internal::Vector<char> out_buffer,
{ byte f0byte = data[1];
const char* f0mnem = F0Mnem(f0byte);
if (f0byte == 0x18) {
+ data += 2;
int mod, regop, rm;
get_modrm(*data, &mod, &regop, &rm);
const char* suffix[] = {"nta", "1", "2", "3"};
@@ -1095,22 +1082,26 @@ int DisassemblerIA32::InstructionDecode(v8::internal::Vector<char> out_buffer,
data += SetCC(data);
} else if ((f0byte & 0xF0) == 0x40) {
data += CMov(data);
- } else {
+ } else if (f0byte == 0xAB || f0byte == 0xA5 || f0byte == 0xAD) {
+ // shrd, shld, bts
data += 2;
- if (f0byte == 0xAB || f0byte == 0xA5 || f0byte == 0xAD) {
- // shrd, shld, bts
- AppendToBuffer("%s ", f0mnem);
- int mod, regop, rm;
- get_modrm(*data, &mod, &regop, &rm);
- data += PrintRightOperand(data);
- if (f0byte == 0xAB) {
- AppendToBuffer(",%s", NameOfCPURegister(regop));
- } else {
- AppendToBuffer(",%s,cl", NameOfCPURegister(regop));
- }
+ AppendToBuffer("%s ", f0mnem);
+ int mod, regop, rm;
+ get_modrm(*data, &mod, &regop, &rm);
+ data += PrintRightOperand(data);
+ if (f0byte == 0xAB) {
+ AppendToBuffer(",%s", NameOfCPURegister(regop));
} else {
- UnimplementedInstruction();
+ AppendToBuffer(",%s,cl", NameOfCPURegister(regop));
}
+ } else if (f0byte == 0xBD) {
+ data += 2;
+ int mod, regop, rm;
+ get_modrm(*data, &mod, &regop, &rm);
+ AppendToBuffer("%s %s,", f0mnem, NameOfCPURegister(regop));
+ data += PrintRightOperand(data);
+ } else {
+ UnimplementedInstruction();
}
}
break;
@@ -1605,13 +1596,13 @@ int DisassemblerIA32::InstructionDecode(v8::internal::Vector<char> out_buffer,
get_modrm(*data, &mod, &regop, &rm);
AppendToBuffer("cvtss2sd %s,", NameOfXMMRegister(regop));
data += PrintRightXMMOperand(data);
- } else if (b2 == 0x6F) {
+ } else if (b2 == 0x6F) {
data += 3;
int mod, regop, rm;
get_modrm(*data, &mod, &regop, &rm);
AppendToBuffer("movdqu %s,", NameOfXMMRegister(regop));
data += PrintRightXMMOperand(data);
- } else if (b2 == 0x7F) {
+ } else if (b2 == 0x7F) {
AppendToBuffer("movdqu ");
data += 3;
int mod, regop, rm;
@@ -1654,18 +1645,17 @@ int DisassemblerIA32::InstructionDecode(v8::internal::Vector<char> out_buffer,
int outp = 0;
// Instruction bytes.
for (byte* bp = instr; bp < data; bp++) {
- outp += v8::internal::OS::SNPrintF(out_buffer + outp,
- "%02x",
- *bp);
+ outp += v8::internal::SNPrintF(out_buffer + outp,
+ "%02x",
+ *bp);
}
for (int i = 6 - instr_len; i >= 0; i--) {
- outp += v8::internal::OS::SNPrintF(out_buffer + outp,
- " ");
+ outp += v8::internal::SNPrintF(out_buffer + outp, " ");
}
- outp += v8::internal::OS::SNPrintF(out_buffer + outp,
- " %s",
- tmp_buffer_.start());
+ outp += v8::internal::SNPrintF(out_buffer + outp,
+ " %s",
+ tmp_buffer_.start());
return instr_len;
} // NOLINT (function is too long)
@@ -1689,7 +1679,7 @@ static const char* xmm_regs[8] = {
const char* NameConverter::NameOfAddress(byte* addr) const {
- v8::internal::OS::SNPrintF(tmp_buffer_, "%p", addr);
+ v8::internal::SNPrintF(tmp_buffer_, "%p", addr);
return tmp_buffer_.start();
}
diff --git a/chromium/v8/src/ia32/frames-ia32.cc b/chromium/v8/src/ia32/frames-ia32.cc
index 55708117689..1acc873d715 100644
--- a/chromium/v8/src/ia32/frames-ia32.cc
+++ b/chromium/v8/src/ia32/frames-ia32.cc
@@ -1,38 +1,15 @@
// Copyright 2006-2008 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
#if V8_TARGET_ARCH_IA32
-#include "assembler.h"
-#include "assembler-ia32.h"
-#include "assembler-ia32-inl.h"
-#include "frames.h"
+#include "src/assembler.h"
+#include "src/ia32/assembler-ia32.h"
+#include "src/ia32/assembler-ia32-inl.h"
+#include "src/frames.h"
namespace v8 {
namespace internal {
@@ -40,10 +17,24 @@ namespace internal {
Register JavaScriptFrame::fp_register() { return ebp; }
Register JavaScriptFrame::context_register() { return esi; }
+Register JavaScriptFrame::constant_pool_pointer_register() {
+ UNREACHABLE();
+ return no_reg;
+}
Register StubFailureTrampolineFrame::fp_register() { return ebp; }
Register StubFailureTrampolineFrame::context_register() { return esi; }
+Register StubFailureTrampolineFrame::constant_pool_pointer_register() {
+ UNREACHABLE();
+ return no_reg;
+}
+
+
+Object*& ExitFrame::constant_pool_slot() const {
+ UNREACHABLE();
+ return Memory::Object_at(NULL);
+}
} } // namespace v8::internal
diff --git a/chromium/v8/src/ia32/frames-ia32.h b/chromium/v8/src/ia32/frames-ia32.h
index 86061251018..1290ad6e09d 100644
--- a/chromium/v8/src/ia32/frames-ia32.h
+++ b/chromium/v8/src/ia32/frames-ia32.h
@@ -1,29 +1,6 @@
// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
#ifndef V8_IA32_FRAMES_IA32_H_
#define V8_IA32_FRAMES_IA32_H_
@@ -47,8 +24,6 @@ const RegList kJSCallerSaved =
const int kNumJSCallerSaved = 5;
-typedef Object* JSCallerSavedBuffer[kNumJSCallerSaved];
-
// Number of registers for which space is reserved in safepoints.
const int kNumSafepointRegisters = 8;
@@ -73,6 +48,8 @@ class EntryFrameConstants : public AllStatic {
class ExitFrameConstants : public AllStatic {
public:
+ static const int kFrameSize = 2 * kPointerSize;
+
static const int kCodeOffset = -2 * kPointerSize;
static const int kSPOffset = -1 * kPointerSize;
@@ -82,6 +59,8 @@ class ExitFrameConstants : public AllStatic {
// FP-relative displacement of the caller's SP. It points just
// below the saved PC.
static const int kCallerSPDisplacement = +2 * kPointerSize;
+
+ static const int kConstantPoolOffset = 0; // Not used
};
diff --git a/chromium/v8/src/ia32/full-codegen-ia32.cc b/chromium/v8/src/ia32/full-codegen-ia32.cc
index 3c5d4aa2788..0ea77f09145 100644
--- a/chromium/v8/src/ia32/full-codegen-ia32.cc
+++ b/chromium/v8/src/ia32/full-codegen-ia32.cc
@@ -1,43 +1,20 @@
// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
#if V8_TARGET_ARCH_IA32
-#include "code-stubs.h"
-#include "codegen.h"
-#include "compiler.h"
-#include "debug.h"
-#include "full-codegen.h"
-#include "isolate-inl.h"
-#include "parser.h"
-#include "scopes.h"
-#include "stub-cache.h"
+#include "src/code-stubs.h"
+#include "src/codegen.h"
+#include "src/compiler.h"
+#include "src/debug.h"
+#include "src/full-codegen.h"
+#include "src/isolate-inl.h"
+#include "src/parser.h"
+#include "src/scopes.h"
+#include "src/stub-cache.h"
namespace v8 {
namespace internal {
@@ -74,7 +51,7 @@ class JumpPatchSite BASE_EMBEDDED {
void EmitPatchInfo() {
if (patch_site_.is_bound()) {
int delta_to_patch_site = masm_->SizeOfCodeGeneratedSince(&patch_site_);
- ASSERT(is_int8(delta_to_patch_site));
+ ASSERT(is_uint8(delta_to_patch_site));
__ test(eax, Immediate(delta_to_patch_site));
#ifdef DEBUG
info_emitted_ = true;
@@ -107,7 +84,6 @@ class JumpPatchSite BASE_EMBEDDED {
// formal parameter count expected by the function.
//
// The live registers are:
-// o ecx: CallKind
// o edi: the JS function object being called (i.e. ourselves)
// o esi: our context
// o ebp: our caller's frame pointer
@@ -119,6 +95,7 @@ void FullCodeGenerator::Generate() {
CompilationInfo* info = info_;
handler_table_ =
isolate()->factory()->NewFixedArray(function()->handler_count(), TENURED);
+
profiling_counter_ = isolate()->factory()->NewCell(
Handle<Smi>(Smi::FromInt(FLAG_interrupt_budget), isolate()));
SetFunctionPosition(function());
@@ -133,22 +110,23 @@ void FullCodeGenerator::Generate() {
}
#endif
- // Strict mode functions and builtins need to replace the receiver
- // with undefined when called as functions (without an explicit
- // receiver object). ecx is zero for method calls and non-zero for
- // function calls.
- if (!info->is_classic_mode() || info->is_native()) {
+ // Sloppy mode functions and builtins need to replace the receiver with the
+ // global proxy when called as functions (without an explicit receiver
+ // object).
+ if (info->strict_mode() == SLOPPY && !info->is_native()) {
Label ok;
- __ test(ecx, ecx);
- __ j(zero, &ok, Label::kNear);
// +1 for return address.
int receiver_offset = (info->scope()->num_parameters() + 1) * kPointerSize;
__ mov(ecx, Operand(esp, receiver_offset));
- __ JumpIfSmi(ecx, &ok);
- __ CmpObjectType(ecx, JS_GLOBAL_PROXY_TYPE, ecx);
+
+ __ cmp(ecx, isolate()->factory()->undefined_value());
__ j(not_equal, &ok, Label::kNear);
- __ mov(Operand(esp, receiver_offset),
- Immediate(isolate()->factory()->undefined_value()));
+
+ __ mov(ecx, GlobalObjectOperand());
+ __ mov(ecx, FieldOperand(ecx, GlobalObject::kGlobalReceiverOffset));
+
+ __ mov(Operand(esp, receiver_offset), ecx);
+
__ bind(&ok);
}
@@ -158,7 +136,7 @@ void FullCodeGenerator::Generate() {
FrameScope frame_scope(masm_, StackFrame::MANUAL);
info->set_prologue_offset(masm_->pc_offset());
- __ Prologue(BUILD_FUNCTION_FRAME);
+ __ Prologue(info->IsCodePreAgingActive());
info->AddNoFrameRange(0, masm_->pc_offset());
{ Comment cmnt(masm_, "[ Allocate locals");
@@ -168,8 +146,34 @@ void FullCodeGenerator::Generate() {
if (locals_count == 1) {
__ push(Immediate(isolate()->factory()->undefined_value()));
} else if (locals_count > 1) {
+ if (locals_count >= 128) {
+ Label ok;
+ __ mov(ecx, esp);
+ __ sub(ecx, Immediate(locals_count * kPointerSize));
+ ExternalReference stack_limit =
+ ExternalReference::address_of_real_stack_limit(isolate());
+ __ cmp(ecx, Operand::StaticVariable(stack_limit));
+ __ j(above_equal, &ok, Label::kNear);
+ __ InvokeBuiltin(Builtins::STACK_OVERFLOW, CALL_FUNCTION);
+ __ bind(&ok);
+ }
__ mov(eax, Immediate(isolate()->factory()->undefined_value()));
- for (int i = 0; i < locals_count; i++) {
+ const int kMaxPushes = 32;
+ if (locals_count >= kMaxPushes) {
+ int loop_iterations = locals_count / kMaxPushes;
+ __ mov(ecx, loop_iterations);
+ Label loop_header;
+ __ bind(&loop_header);
+ // Do pushes.
+ for (int i = 0; i < kMaxPushes; i++) {
+ __ push(eax);
+ }
+ __ dec(ecx);
+ __ j(not_zero, &loop_header, Label::kNear);
+ }
+ int remaining = locals_count % kMaxPushes;
+ // Emit the remaining pushes.
+ for (int i = 0; i < remaining; i++) {
__ push(eax);
}
}
@@ -181,21 +185,26 @@ void FullCodeGenerator::Generate() {
int heap_slots = info->scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
if (heap_slots > 0) {
Comment cmnt(masm_, "[ Allocate context");
+ bool need_write_barrier = true;
// Argument to NewContext is the function, which is still in edi.
- __ push(edi);
if (FLAG_harmony_scoping && info->scope()->is_global_scope()) {
+ __ push(edi);
__ Push(info->scope()->GetScopeInfo());
- __ CallRuntime(Runtime::kNewGlobalContext, 2);
+ __ CallRuntime(Runtime::kHiddenNewGlobalContext, 2);
} else if (heap_slots <= FastNewContextStub::kMaximumSlots) {
- FastNewContextStub stub(heap_slots);
+ FastNewContextStub stub(isolate(), heap_slots);
__ CallStub(&stub);
+ // Result of FastNewContextStub is always in new space.
+ need_write_barrier = false;
} else {
- __ CallRuntime(Runtime::kNewFunctionContext, 1);
+ __ push(edi);
+ __ CallRuntime(Runtime::kHiddenNewFunctionContext, 1);
}
function_in_register = false;
- // Context is returned in both eax and esi. It replaces the context
- // passed to us. It's saved in the stack and kept live in esi.
- __ mov(Operand(ebp, StandardFrameConstants::kContextOffset), esi);
+ // Context is returned in eax. It replaces the context passed to us.
+ // It's saved in the stack and kept live in esi.
+ __ mov(esi, eax);
+ __ mov(Operand(ebp, StandardFrameConstants::kContextOffset), eax);
// Copy parameters into context if necessary.
int num_parameters = info->scope()->num_parameters();
@@ -210,11 +219,18 @@ void FullCodeGenerator::Generate() {
int context_offset = Context::SlotOffset(var->index());
__ mov(Operand(esi, context_offset), eax);
// Update the write barrier. This clobbers eax and ebx.
- __ RecordWriteContextSlot(esi,
- context_offset,
- eax,
- ebx,
- kDontSaveFPRegs);
+ if (need_write_barrier) {
+ __ RecordWriteContextSlot(esi,
+ context_offset,
+ eax,
+ ebx,
+ kDontSaveFPRegs);
+ } else if (FLAG_debug_code) {
+ Label done;
+ __ JumpIfInNewSpace(esi, eax, &done, Label::kNear);
+ __ Abort(kExpectedNewSpaceObject);
+ __ bind(&done);
+ }
}
}
}
@@ -240,14 +256,14 @@ void FullCodeGenerator::Generate() {
// The stub will rewrite receiver and parameter count if the previous
// stack frame was an arguments adapter frame.
ArgumentsAccessStub::Type type;
- if (!is_classic_mode()) {
+ if (strict_mode() == STRICT) {
type = ArgumentsAccessStub::NEW_STRICT;
} else if (function()->has_duplicate_parameters()) {
- type = ArgumentsAccessStub::NEW_NON_STRICT_SLOW;
+ type = ArgumentsAccessStub::NEW_SLOPPY_SLOW;
} else {
- type = ArgumentsAccessStub::NEW_NON_STRICT_FAST;
+ type = ArgumentsAccessStub::NEW_SLOPPY_FAST;
}
- ArgumentsAccessStub stub(type);
+ ArgumentsAccessStub stub(isolate(), type);
__ CallStub(&stub);
SetVar(arguments, eax, ebx, edx);
@@ -271,7 +287,7 @@ void FullCodeGenerator::Generate() {
if (scope()->is_function_scope() && scope()->function() != NULL) {
VariableDeclaration* function = scope()->function();
ASSERT(function->proxy()->var()->mode() == CONST ||
- function->proxy()->var()->mode() == CONST_HARMONY);
+ function->proxy()->var()->mode() == CONST_LEGACY);
ASSERT(function->proxy()->var()->location() != Variable::UNALLOCATED);
VisitVariableDeclaration(function);
}
@@ -281,8 +297,8 @@ void FullCodeGenerator::Generate() {
{ Comment cmnt(masm_, "[ Stack check");
PrepareForBailoutForId(BailoutId::Declarations(), NO_REGISTERS);
Label ok;
- ExternalReference stack_limit =
- ExternalReference::address_of_stack_limit(isolate());
+ ExternalReference stack_limit
+ = ExternalReference::address_of_stack_limit(isolate());
__ cmp(esp, Operand::StaticVariable(stack_limit));
__ j(above_equal, &ok, Label::kNear);
__ call(isolate()->builtins()->StackCheck(), RelocInfo::CODE_TARGET);
@@ -306,7 +322,7 @@ void FullCodeGenerator::Generate() {
void FullCodeGenerator::ClearAccumulator() {
- __ Set(eax, Immediate(Smi::FromInt(0)));
+ __ Move(eax, Immediate(Smi::FromInt(0)));
}
@@ -319,10 +335,6 @@ void FullCodeGenerator::EmitProfilingCounterDecrement(int delta) {
void FullCodeGenerator::EmitProfilingCounterReset() {
int reset_value = FLAG_interrupt_budget;
- if (info_->ShouldSelfOptimize() && !FLAG_retry_self_opt) {
- // Self-optimization is a one-off thing: if it fails, don't try again.
- reset_value = Smi::kMaxValue;
- }
__ mov(ebx, Immediate(profiling_counter_));
__ mov(FieldOperand(ebx, Cell::kValueOffset),
Immediate(Smi::FromInt(reset_value)));
@@ -334,13 +346,10 @@ void FullCodeGenerator::EmitBackEdgeBookkeeping(IterationStatement* stmt,
Comment cmnt(masm_, "[ Back edge bookkeeping");
Label ok;
- int weight = 1;
- if (FLAG_weighted_back_edges) {
- ASSERT(back_edge_target->is_bound());
- int distance = masm_->SizeOfCodeGeneratedSince(back_edge_target);
- weight = Min(kMaxBackEdgeWeight,
- Max(1, distance / kCodeSizeMultiplier));
- }
+ ASSERT(back_edge_target->is_bound());
+ int distance = masm_->SizeOfCodeGeneratedSince(back_edge_target);
+ int weight = Min(kMaxBackEdgeWeight,
+ Max(1, distance / kCodeSizeMultiplier));
EmitProfilingCounterDecrement(weight);
__ j(positive, &ok, Label::kNear);
__ call(isolate()->builtins()->InterruptCheck(), RelocInfo::CODE_TARGET);
@@ -372,31 +381,24 @@ void FullCodeGenerator::EmitReturnSequence() {
__ push(eax);
__ CallRuntime(Runtime::kTraceExit, 1);
}
- if (FLAG_interrupt_at_exit || FLAG_self_optimization) {
- // Pretend that the exit is a backwards jump to the entry.
- int weight = 1;
- if (info_->ShouldSelfOptimize()) {
- weight = FLAG_interrupt_budget / FLAG_self_opt_count;
- } else if (FLAG_weighted_back_edges) {
- int distance = masm_->pc_offset();
- weight = Min(kMaxBackEdgeWeight,
- Max(1, distance / kCodeSizeMultiplier));
- }
- EmitProfilingCounterDecrement(weight);
- Label ok;
- __ j(positive, &ok, Label::kNear);
- __ push(eax);
- if (info_->ShouldSelfOptimize() && FLAG_direct_self_opt) {
- __ push(Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
- __ CallRuntime(Runtime::kOptimizeFunctionOnNextCall, 1);
- } else {
- __ call(isolate()->builtins()->InterruptCheck(),
- RelocInfo::CODE_TARGET);
- }
- __ pop(eax);
- EmitProfilingCounterReset();
- __ bind(&ok);
+ // Pretend that the exit is a backwards jump to the entry.
+ int weight = 1;
+ if (info_->ShouldSelfOptimize()) {
+ weight = FLAG_interrupt_budget / FLAG_self_opt_count;
+ } else {
+ int distance = masm_->pc_offset();
+ weight = Min(kMaxBackEdgeWeight,
+ Max(1, distance / kCodeSizeMultiplier));
}
+ EmitProfilingCounterDecrement(weight);
+ Label ok;
+ __ j(positive, &ok, Label::kNear);
+ __ push(eax);
+ __ call(isolate()->builtins()->InterruptCheck(),
+ RelocInfo::CODE_TARGET);
+ __ pop(eax);
+ EmitProfilingCounterReset();
+ __ bind(&ok);
#ifdef DEBUG
// Add a label for checking the size of the code used for returning.
Label check_exit_codesize;
@@ -412,12 +414,10 @@ void FullCodeGenerator::EmitReturnSequence() {
int arguments_bytes = (info_->scope()->num_parameters() + 1) * kPointerSize;
__ Ret(arguments_bytes, ecx);
-#ifdef ENABLE_DEBUGGER_SUPPORT
// Check that the size of the code used for returning is large enough
// for the debugger's requirements.
ASSERT(Assembler::kJSReturnSequenceLength <=
masm_->SizeOfCodeGeneratedSince(&check_exit_codesize));
-#endif
info_->AddNoFrameRange(no_frame_start, masm_->pc_offset());
}
}
@@ -479,9 +479,9 @@ void FullCodeGenerator::EffectContext::Plug(Handle<Object> lit) const {
void FullCodeGenerator::AccumulatorValueContext::Plug(
Handle<Object> lit) const {
if (lit->IsSmi()) {
- __ SafeSet(result_register(), Immediate(lit));
+ __ SafeMove(result_register(), Immediate(lit));
} else {
- __ Set(result_register(), Immediate(lit));
+ __ Move(result_register(), Immediate(lit));
}
}
@@ -638,7 +638,7 @@ void FullCodeGenerator::DoTest(Expression* condition,
Label* if_false,
Label* fall_through) {
Handle<Code> ic = ToBooleanStub::GetUninitialized(isolate());
- CallIC(ic, RelocInfo::CODE_TARGET, condition->test_id());
+ CallIC(ic, condition->test_id());
__ test(result_register(), result_register());
// The stub returns nonzero for true.
Split(not_zero, if_true, if_false, fall_through);
@@ -755,7 +755,7 @@ void FullCodeGenerator::VisitVariableDeclaration(
VariableProxy* proxy = declaration->proxy();
VariableMode mode = declaration->mode();
Variable* variable = proxy->var();
- bool hole_init = mode == CONST || mode == CONST_HARMONY || mode == LET;
+ bool hole_init = mode == LET || mode == CONST || mode == CONST_LEGACY;
switch (variable->location()) {
case Variable::UNALLOCATED:
globals_->Add(variable->name(), zone());
@@ -802,7 +802,7 @@ void FullCodeGenerator::VisitVariableDeclaration(
} else {
__ push(Immediate(Smi::FromInt(0))); // Indicates no initial value.
}
- __ CallRuntime(Runtime::kDeclareContextSlot, 4);
+ __ CallRuntime(Runtime::kHiddenDeclareContextSlot, 4);
break;
}
}
@@ -855,7 +855,7 @@ void FullCodeGenerator::VisitFunctionDeclaration(
__ push(Immediate(variable->name()));
__ push(Immediate(Smi::FromInt(NONE)));
VisitForStackValue(declaration->fun());
- __ CallRuntime(Runtime::kDeclareContextSlot, 4);
+ __ CallRuntime(Runtime::kHiddenDeclareContextSlot, 4);
break;
}
}
@@ -925,7 +925,7 @@ void FullCodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) {
__ push(esi); // The context is the first argument.
__ Push(pairs);
__ Push(Smi::FromInt(DeclareGlobalsFlags()));
- __ CallRuntime(Runtime::kDeclareGlobals, 3);
+ __ CallRuntime(Runtime::kHiddenDeclareGlobals, 3);
// Return value is ignored.
}
@@ -933,7 +933,7 @@ void FullCodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) {
void FullCodeGenerator::DeclareModules(Handle<FixedArray> descriptions) {
// Call the runtime to declare the modules.
__ Push(descriptions);
- __ CallRuntime(Runtime::kDeclareModules, 1);
+ __ CallRuntime(Runtime::kHiddenDeclareModules, 1);
// Return value is ignored.
}
@@ -989,8 +989,18 @@ void FullCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) {
// Record position before stub call for type feedback.
SetSourcePosition(clause->position());
Handle<Code> ic = CompareIC::GetUninitialized(isolate(), Token::EQ_STRICT);
- CallIC(ic, RelocInfo::CODE_TARGET, clause->CompareId());
+ CallIC(ic, clause->CompareId());
patch_site.EmitPatchInfo();
+
+ Label skip;
+ __ jmp(&skip, Label::kNear);
+ PrepareForBailout(clause, TOS_REG);
+ __ cmp(eax, isolate()->factory()->true_value());
+ __ j(not_equal, &next_test);
+ __ Drop(1);
+ __ jmp(clause->body_target());
+ __ bind(&skip);
+
__ test(eax, eax);
__ j(not_equal, &next_test);
__ Drop(1); // Switch value is no longer needed.
@@ -1023,6 +1033,8 @@ void FullCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) {
void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
Comment cmnt(masm_, "[ ForInStatement");
+ int slot = stmt->ForInFeedbackSlot();
+
SetStatementPosition(stmt);
Label loop, exit;
@@ -1101,20 +1113,17 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
Label non_proxy;
__ bind(&fixed_array);
- Handle<Cell> cell = isolate()->factory()->NewCell(
- Handle<Object>(Smi::FromInt(TypeFeedbackCells::kForInFastCaseMarker),
- isolate()));
- RecordTypeFeedbackCell(stmt->ForInFeedbackId(), cell);
- __ LoadHeapObject(ebx, cell);
- __ mov(FieldOperand(ebx, Cell::kValueOffset),
- Immediate(Smi::FromInt(TypeFeedbackCells::kForInSlowCaseMarker)));
+ // No need for a write barrier, we are storing a Smi in the feedback vector.
+ __ LoadHeapObject(ebx, FeedbackVector());
+ __ mov(FieldOperand(ebx, FixedArray::OffsetOfElementAt(slot)),
+ Immediate(TypeFeedbackInfo::MegamorphicSentinel(isolate())));
__ mov(ebx, Immediate(Smi::FromInt(1))); // Smi indicates slow check
__ mov(ecx, Operand(esp, 0 * kPointerSize)); // Get enumerated object
STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE);
__ CmpObjectType(ecx, LAST_JS_PROXY_TYPE, ecx);
__ j(above, &non_proxy);
- __ Set(ebx, Immediate(Smi::FromInt(0))); // Zero indicates proxy
+ __ Move(ebx, Immediate(Smi::FromInt(0))); // Zero indicates proxy
__ bind(&non_proxy);
__ push(ebx); // Smi
__ push(eax); // Array
@@ -1198,8 +1207,8 @@ void FullCodeGenerator::VisitForOfStatement(ForOfStatement* stmt) {
Iteration loop_statement(this, stmt);
increment_loop_depth();
- // var iterator = iterable[@@iterator]()
- VisitForAccumulatorValue(stmt->assign_iterator());
+ // var iterable = subject
+ VisitForAccumulatorValue(stmt->assign_iterable());
// As with for-in, skip the loop if the iterator is null or undefined.
__ CompareRoot(eax, Heap::kUndefinedValueRootIndex);
@@ -1207,15 +1216,8 @@ void FullCodeGenerator::VisitForOfStatement(ForOfStatement* stmt) {
__ CompareRoot(eax, Heap::kNullValueRootIndex);
__ j(equal, loop_statement.break_label());
- // Convert the iterator to a JS object.
- Label convert, done_convert;
- __ JumpIfSmi(eax, &convert);
- __ CmpObjectType(eax, FIRST_SPEC_OBJECT_TYPE, ecx);
- __ j(above_equal, &done_convert);
- __ bind(&convert);
- __ push(eax);
- __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
- __ bind(&done_convert);
+ // var iterator = iterable[Symbol.iterator]();
+ VisitForEffect(stmt->assign_iterator());
// Loop entry.
__ bind(loop_statement.continue_label());
@@ -1262,7 +1264,9 @@ void FullCodeGenerator::EmitNewClosure(Handle<SharedFunctionInfo> info,
!pretenure &&
scope()->is_function_scope() &&
info->num_literals() == 0) {
- FastNewClosureStub stub(info->language_mode(), info->is_generator());
+ FastNewClosureStub stub(isolate(),
+ info->strict_mode(),
+ info->is_generator());
__ mov(ebx, Immediate(info));
__ CallStub(&stub);
} else {
@@ -1271,7 +1275,7 @@ void FullCodeGenerator::EmitNewClosure(Handle<SharedFunctionInfo> info,
__ push(Immediate(pretenure
? isolate()->factory()->true_value()
: isolate()->factory()->false_value()));
- __ CallRuntime(Runtime::kNewClosure, 3);
+ __ CallRuntime(Runtime::kHiddenNewClosure, 3);
}
context()->Plug(eax);
}
@@ -1292,7 +1296,7 @@ void FullCodeGenerator::EmitLoadGlobalCheckExtensions(Variable* var,
Scope* s = scope();
while (s != NULL) {
if (s->num_heap_slots() > 0) {
- if (s->calls_non_strict_eval()) {
+ if (s->calls_sloppy_eval()) {
// Check that extension is NULL.
__ cmp(ContextOperand(context, Context::EXTENSION_INDEX),
Immediate(0));
@@ -1306,7 +1310,7 @@ void FullCodeGenerator::EmitLoadGlobalCheckExtensions(Variable* var,
// If no outer scope calls eval, we do not need to check more
// context extensions. If we have reached an eval scope, we check
// all extensions from this point.
- if (!s->outer_scope_calls_non_strict_eval() || s->is_eval_scope()) break;
+ if (!s->outer_scope_calls_sloppy_eval() || s->is_eval_scope()) break;
s = s->outer_scope();
}
@@ -1335,11 +1339,11 @@ void FullCodeGenerator::EmitLoadGlobalCheckExtensions(Variable* var,
// load IC call.
__ mov(edx, GlobalObjectOperand());
__ mov(ecx, var->name());
- Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
- RelocInfo::Mode mode = (typeof_state == INSIDE_TYPEOF)
- ? RelocInfo::CODE_TARGET
- : RelocInfo::CODE_TARGET_CONTEXT;
- CallIC(ic, mode);
+ ContextualMode mode = (typeof_state == INSIDE_TYPEOF)
+ ? NOT_CONTEXTUAL
+ : CONTEXTUAL;
+
+ CallLoadIC(mode);
}
@@ -1351,7 +1355,7 @@ MemOperand FullCodeGenerator::ContextSlotOperandCheckExtensions(Variable* var,
for (Scope* s = scope(); s != var->scope(); s = s->outer_scope()) {
if (s->num_heap_slots() > 0) {
- if (s->calls_non_strict_eval()) {
+ if (s->calls_sloppy_eval()) {
// Check that extension is NULL.
__ cmp(ContextOperand(context, Context::EXTENSION_INDEX),
Immediate(0));
@@ -1388,16 +1392,15 @@ void FullCodeGenerator::EmitDynamicLookupFastCase(Variable* var,
} else if (var->mode() == DYNAMIC_LOCAL) {
Variable* local = var->local_if_not_shadowed();
__ mov(eax, ContextSlotOperandCheckExtensions(local, slow));
- if (local->mode() == LET ||
- local->mode() == CONST ||
- local->mode() == CONST_HARMONY) {
+ if (local->mode() == LET || local->mode() == CONST ||
+ local->mode() == CONST_LEGACY) {
__ cmp(eax, isolate()->factory()->the_hole_value());
__ j(not_equal, done);
- if (local->mode() == CONST) {
+ if (local->mode() == CONST_LEGACY) {
__ mov(eax, isolate()->factory()->undefined_value());
- } else { // LET || CONST_HARMONY
+ } else { // LET || CONST
__ push(Immediate(var->name()));
- __ CallRuntime(Runtime::kThrowReferenceError, 1);
+ __ CallRuntime(Runtime::kHiddenThrowReferenceError, 1);
}
}
__ jmp(done);
@@ -1414,13 +1417,12 @@ void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy) {
// variables.
switch (var->location()) {
case Variable::UNALLOCATED: {
- Comment cmnt(masm_, "Global variable");
+ Comment cmnt(masm_, "[ Global variable");
// Use inline caching. Variable name is passed in ecx and the global
// object in eax.
__ mov(edx, GlobalObjectOperand());
__ mov(ecx, var->name());
- Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
- CallIC(ic, RelocInfo::CODE_TARGET_CONTEXT);
+ CallLoadIC(CONTEXTUAL);
context()->Plug(eax);
break;
}
@@ -1428,9 +1430,8 @@ void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy) {
case Variable::PARAMETER:
case Variable::LOCAL:
case Variable::CONTEXT: {
- Comment cmnt(masm_, var->IsContextSlot()
- ? "Context variable"
- : "Stack variable");
+ Comment cmnt(masm_, var->IsContextSlot() ? "[ Context variable"
+ : "[ Stack variable");
if (var->binding_needs_init()) {
// var->scope() may be NULL when the proxy is located in eval code and
// refers to a potential outside binding. Currently those bindings are
@@ -1462,7 +1463,7 @@ void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy) {
// Check that we always have valid source position.
ASSERT(var->initializer_position() != RelocInfo::kNoPosition);
ASSERT(proxy->position() != RelocInfo::kNoPosition);
- skip_init_check = var->mode() != CONST &&
+ skip_init_check = var->mode() != CONST_LEGACY &&
var->initializer_position() < proxy->position();
}
@@ -1472,14 +1473,14 @@ void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy) {
GetVar(eax, var);
__ cmp(eax, isolate()->factory()->the_hole_value());
__ j(not_equal, &done, Label::kNear);
- if (var->mode() == LET || var->mode() == CONST_HARMONY) {
+ if (var->mode() == LET || var->mode() == CONST) {
// Throw a reference error when using an uninitialized let/const
// binding in harmony mode.
__ push(Immediate(var->name()));
- __ CallRuntime(Runtime::kThrowReferenceError, 1);
+ __ CallRuntime(Runtime::kHiddenThrowReferenceError, 1);
} else {
// Uninitalized const bindings outside of harmony mode are unholed.
- ASSERT(var->mode() == CONST);
+ ASSERT(var->mode() == CONST_LEGACY);
__ mov(eax, isolate()->factory()->undefined_value());
}
__ bind(&done);
@@ -1492,15 +1493,15 @@ void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy) {
}
case Variable::LOOKUP: {
+ Comment cmnt(masm_, "[ Lookup variable");
Label done, slow;
// Generate code for loading from variables potentially shadowed
// by eval-introduced variables.
EmitDynamicLookupFastCase(var, NOT_INSIDE_TYPEOF, &slow, &done);
__ bind(&slow);
- Comment cmnt(masm_, "Lookup variable");
__ push(esi); // Context.
__ push(Immediate(var->name()));
- __ CallRuntime(Runtime::kLoadContextSlot, 2);
+ __ CallRuntime(Runtime::kHiddenLoadContextSlot, 2);
__ bind(&done);
context()->Plug(eax);
break;
@@ -1531,7 +1532,7 @@ void FullCodeGenerator::VisitRegExpLiteral(RegExpLiteral* expr) {
__ push(Immediate(Smi::FromInt(expr->literal_index())));
__ push(Immediate(expr->pattern()));
__ push(Immediate(expr->flags()));
- __ CallRuntime(Runtime::kMaterializeRegExpLiteral, 4);
+ __ CallRuntime(Runtime::kHiddenMaterializeRegExpLiteral, 4);
__ mov(ebx, eax);
__ bind(&materialized);
@@ -1543,7 +1544,7 @@ void FullCodeGenerator::VisitRegExpLiteral(RegExpLiteral* expr) {
__ bind(&runtime_allocate);
__ push(ebx);
__ push(Immediate(Smi::FromInt(size)));
- __ CallRuntime(Runtime::kAllocateInNewSpace, 1);
+ __ CallRuntime(Runtime::kHiddenAllocateInNewSpace, 1);
__ pop(ebx);
__ bind(&allocated);
@@ -1584,8 +1585,8 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
? ObjectLiteral::kHasFunction
: ObjectLiteral::kNoFlags;
int properties_count = constant_properties->length() / 2;
- if ((FLAG_track_double_fields && expr->may_store_doubles()) ||
- expr->depth() > 1 || Serializer::enabled() ||
+ if (expr->may_store_doubles() || expr->depth() > 1 ||
+ masm()->serializer_enabled() ||
flags != ObjectLiteral::kFastElements ||
properties_count > FastCloneShallowObjectStub::kMaximumClonedProperties) {
__ mov(edi, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
@@ -1593,14 +1594,14 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
__ push(Immediate(Smi::FromInt(expr->literal_index())));
__ push(Immediate(constant_properties));
__ push(Immediate(Smi::FromInt(flags)));
- __ CallRuntime(Runtime::kCreateObjectLiteral, 4);
+ __ CallRuntime(Runtime::kHiddenCreateObjectLiteral, 4);
} else {
__ mov(edi, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
__ mov(eax, FieldOperand(edi, JSFunction::kLiteralsOffset));
__ mov(ebx, Immediate(Smi::FromInt(expr->literal_index())));
__ mov(ecx, Immediate(constant_properties));
__ mov(edx, Immediate(Smi::FromInt(flags)));
- FastCloneShallowObjectStub stub(properties_count);
+ FastCloneShallowObjectStub stub(isolate(), properties_count);
__ CallStub(&stub);
}
@@ -1636,10 +1637,7 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
VisitForAccumulatorValue(value);
__ mov(ecx, Immediate(key->value()));
__ mov(edx, Operand(esp, 0));
- Handle<Code> ic = is_classic_mode()
- ? isolate()->builtins()->StoreIC_Initialize()
- : isolate()->builtins()->StoreIC_Initialize_Strict();
- CallIC(ic, RelocInfo::CODE_TARGET, key->LiteralFeedbackId());
+ CallStoreIC(key->LiteralFeedbackId());
PrepareForBailoutForId(key->id(), NO_REGISTERS);
} else {
VisitForEffect(value);
@@ -1720,54 +1718,26 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
Handle<FixedArrayBase> constant_elements_values(
FixedArrayBase::cast(constant_elements->get(1)));
- AllocationSiteMode allocation_site_mode = FLAG_track_allocation_sites
- ? TRACK_ALLOCATION_SITE : DONT_TRACK_ALLOCATION_SITE;
+ AllocationSiteMode allocation_site_mode = TRACK_ALLOCATION_SITE;
if (has_constant_fast_elements && !FLAG_allocation_site_pretenuring) {
// If the only customer of allocation sites is transitioning, then
// we can turn it off if we don't have anywhere else to transition to.
allocation_site_mode = DONT_TRACK_ALLOCATION_SITE;
}
- Heap* heap = isolate()->heap();
- if (has_constant_fast_elements &&
- constant_elements_values->map() == heap->fixed_cow_array_map()) {
- // If the elements are already FAST_*_ELEMENTS, the boilerplate cannot
- // change, so it's possible to specialize the stub in advance.
- __ IncrementCounter(isolate()->counters()->cow_arrays_created_stub(), 1);
- __ mov(ebx, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
- __ mov(eax, FieldOperand(ebx, JSFunction::kLiteralsOffset));
- __ mov(ebx, Immediate(Smi::FromInt(expr->literal_index())));
- __ mov(ecx, Immediate(constant_elements));
- FastCloneShallowArrayStub stub(
- FastCloneShallowArrayStub::COPY_ON_WRITE_ELEMENTS,
- allocation_site_mode,
- length);
- __ CallStub(&stub);
- } else if (expr->depth() > 1 || Serializer::enabled() ||
- length > FastCloneShallowArrayStub::kMaximumClonedLength) {
+ if (expr->depth() > 1 || length > JSObject::kInitialMaxFastElementArray) {
__ mov(ebx, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
__ push(FieldOperand(ebx, JSFunction::kLiteralsOffset));
__ push(Immediate(Smi::FromInt(expr->literal_index())));
__ push(Immediate(constant_elements));
__ push(Immediate(Smi::FromInt(flags)));
- __ CallRuntime(Runtime::kCreateArrayLiteral, 4);
+ __ CallRuntime(Runtime::kHiddenCreateArrayLiteral, 4);
} else {
- ASSERT(IsFastSmiOrObjectElementsKind(constant_elements_kind) ||
- FLAG_smi_only_arrays);
- FastCloneShallowArrayStub::Mode mode =
- FastCloneShallowArrayStub::CLONE_ANY_ELEMENTS;
-
- // If the elements are already FAST_*_ELEMENTS, the boilerplate cannot
- // change, so it's possible to specialize the stub in advance.
- if (has_constant_fast_elements) {
- mode = FastCloneShallowArrayStub::CLONE_ELEMENTS;
- }
-
__ mov(ebx, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
__ mov(eax, FieldOperand(ebx, JSFunction::kLiteralsOffset));
__ mov(ebx, Immediate(Smi::FromInt(expr->literal_index())));
__ mov(ecx, Immediate(constant_elements));
- FastCloneShallowArrayStub stub(mode, allocation_site_mode, length);
+ FastCloneShallowArrayStub stub(isolate(), allocation_site_mode);
__ CallStub(&stub);
}
@@ -1804,7 +1774,7 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
} else {
// Store the subexpression value in the array's elements.
__ mov(ecx, Immediate(Smi::FromInt(i)));
- StoreArrayLiteralElementStub stub;
+ StoreArrayLiteralElementStub stub(isolate());
__ CallStub(&stub);
}
@@ -1821,13 +1791,9 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
void FullCodeGenerator::VisitAssignment(Assignment* expr) {
+ ASSERT(expr->target()->IsValidReferenceExpression());
+
Comment cmnt(masm_, "[ Assignment");
- // Invalid left-hand sides are rewritten to have a 'throw ReferenceError'
- // on the left-hand side.
- if (!expr->target()->IsValidLeftHandSide()) {
- VisitForEffect(expr->target());
- return;
- }
// Left-hand side can only be a property, a global or a (parameter or local)
// slot.
@@ -1967,7 +1933,7 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
__ cmp(esp, ebx);
__ j(equal, &post_runtime);
__ push(eax); // generator object
- __ CallRuntime(Runtime::kSuspendJSGeneratorObject, 1);
+ __ CallRuntime(Runtime::kHiddenSuspendJSGeneratorObject, 1);
__ mov(context_register(),
Operand(ebp, StandardFrameConstants::kContextOffset));
__ bind(&post_runtime);
@@ -2035,7 +2001,7 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
__ mov(ecx, esi);
__ RecordWriteField(eax, JSGeneratorObject::kContextOffset, ecx, edx,
kDontSaveFPRegs);
- __ CallRuntime(Runtime::kSuspendJSGeneratorObject, 1);
+ __ CallRuntime(Runtime::kHiddenSuspendJSGeneratorObject, 1);
__ mov(context_register(),
Operand(ebp, StandardFrameConstants::kContextOffset));
__ pop(eax); // result
@@ -2052,29 +2018,33 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
// result = receiver[f](arg);
__ bind(&l_call);
- Handle<Code> ic = isolate()->stub_cache()->ComputeKeyedCallInitialize(1);
- CallIC(ic);
+ __ mov(edx, Operand(esp, kPointerSize));
+ Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize();
+ CallIC(ic, TypeFeedbackId::None());
+ __ mov(edi, eax);
+ __ mov(Operand(esp, 2 * kPointerSize), edi);
+ CallFunctionStub stub(isolate(), 1, CALL_AS_METHOD);
+ __ CallStub(&stub);
+
__ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
- __ Drop(1); // The key is still on the stack; drop it.
+ __ Drop(1); // The function is still on the stack; drop it.
// if (!result.done) goto l_try;
__ bind(&l_loop);
__ push(eax); // save result
__ mov(edx, eax); // result
__ mov(ecx, isolate()->factory()->done_string()); // "done"
- Handle<Code> done_ic = isolate()->builtins()->LoadIC_Initialize();
- CallIC(done_ic); // result.done in eax
+ CallLoadIC(NOT_CONTEXTUAL); // result.done in eax
Handle<Code> bool_ic = ToBooleanStub::GetUninitialized(isolate());
CallIC(bool_ic);
__ test(eax, eax);
__ j(zero, &l_try);
// result.value
- __ pop(edx); // result
+ __ pop(edx); // result
__ mov(ecx, isolate()->factory()->value_string()); // "value"
- Handle<Code> value_ic = isolate()->builtins()->LoadIC_Initialize();
- CallIC(value_ic); // result.value in eax
- context()->DropAndPlug(2, eax); // drop iter and g
+ CallLoadIC(NOT_CONTEXTUAL); // result.value in eax
+ context()->DropAndPlug(2, eax); // drop iter and g
break;
}
}
@@ -2085,19 +2055,21 @@ void FullCodeGenerator::EmitGeneratorResume(Expression *generator,
Expression *value,
JSGeneratorObject::ResumeMode resume_mode) {
// The value stays in eax, and is ultimately read by the resumed generator, as
- // if the CallRuntime(Runtime::kSuspendJSGeneratorObject) returned it. ebx
- // will hold the generator object until the activation has been resumed.
+ // if CallRuntime(Runtime::kHiddenSuspendJSGeneratorObject) returned it. Or it
+ // is read to throw the value when the resumed generator is already closed.
+ // ebx will hold the generator object until the activation has been resumed.
VisitForStackValue(generator);
VisitForAccumulatorValue(value);
__ pop(ebx);
// Check generator state.
- Label wrong_state, done;
- STATIC_ASSERT(JSGeneratorObject::kGeneratorExecuting <= 0);
- STATIC_ASSERT(JSGeneratorObject::kGeneratorClosed <= 0);
+ Label wrong_state, closed_state, done;
+ STATIC_ASSERT(JSGeneratorObject::kGeneratorExecuting < 0);
+ STATIC_ASSERT(JSGeneratorObject::kGeneratorClosed == 0);
__ cmp(FieldOperand(ebx, JSGeneratorObject::kContinuationOffset),
Immediate(Smi::FromInt(0)));
- __ j(less_equal, &wrong_state);
+ __ j(equal, &closed_state);
+ __ j(less, &wrong_state);
// Load suspended function and context.
__ mov(esi, FieldOperand(ebx, JSGeneratorObject::kContextOffset));
@@ -2163,14 +2135,28 @@ void FullCodeGenerator::EmitGeneratorResume(Expression *generator,
__ push(ebx);
__ push(result_register());
__ Push(Smi::FromInt(resume_mode));
- __ CallRuntime(Runtime::kResumeJSGeneratorObject, 3);
+ __ CallRuntime(Runtime::kHiddenResumeJSGeneratorObject, 3);
// Not reached: the runtime call returns elsewhere.
__ Abort(kGeneratorFailedToResume);
+ // Reach here when generator is closed.
+ __ bind(&closed_state);
+ if (resume_mode == JSGeneratorObject::NEXT) {
+ // Return completed iterator result when generator is closed.
+ __ push(Immediate(isolate()->factory()->undefined_value()));
+ // Pop value from top-of-stack slot; box result into result register.
+ EmitCreateIteratorResult(true);
+ } else {
+ // Throw the provided value.
+ __ push(eax);
+ __ CallRuntime(Runtime::kHiddenThrow, 1);
+ }
+ __ jmp(&done);
+
// Throw error if we attempt to operate on a running generator.
__ bind(&wrong_state);
__ push(ebx);
- __ CallRuntime(Runtime::kThrowGeneratorStateError, 1);
+ __ CallRuntime(Runtime::kHiddenThrowGeneratorStateError, 1);
__ bind(&done);
context()->Plug(result_register());
@@ -2181,14 +2167,14 @@ void FullCodeGenerator::EmitCreateIteratorResult(bool done) {
Label gc_required;
Label allocated;
- Handle<Map> map(isolate()->native_context()->generator_result_map());
+ Handle<Map> map(isolate()->native_context()->iterator_result_map());
__ Allocate(map->instance_size(), eax, ecx, edx, &gc_required, TAG_OBJECT);
__ jmp(&allocated);
__ bind(&gc_required);
__ Push(Smi::FromInt(map->instance_size()));
- __ CallRuntime(Runtime::kAllocateInNewSpace, 1);
+ __ CallRuntime(Runtime::kHiddenAllocateInNewSpace, 1);
__ mov(context_register(),
Operand(ebp, StandardFrameConstants::kContextOffset));
@@ -2217,15 +2203,14 @@ void FullCodeGenerator::EmitNamedPropertyLoad(Property* prop) {
Literal* key = prop->key()->AsLiteral();
ASSERT(!key->value()->IsSmi());
__ mov(ecx, Immediate(key->value()));
- Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
- CallIC(ic, RelocInfo::CODE_TARGET, prop->PropertyFeedbackId());
+ CallLoadIC(NOT_CONTEXTUAL, prop->PropertyFeedbackId());
}
void FullCodeGenerator::EmitKeyedPropertyLoad(Property* prop) {
SetSourcePosition(prop->position());
Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize();
- CallIC(ic, RelocInfo::CODE_TARGET, prop->PropertyFeedbackId());
+ CallIC(ic, prop->PropertyFeedbackId());
}
@@ -2245,9 +2230,8 @@ void FullCodeGenerator::EmitInlineSmiBinaryOp(BinaryOperation* expr,
__ bind(&stub_call);
__ mov(eax, ecx);
- BinaryOpICStub stub(op, mode);
- CallIC(stub.GetCode(isolate()), RelocInfo::CODE_TARGET,
- expr->BinaryOperationFeedbackId());
+ BinaryOpICStub stub(isolate(), op, mode);
+ CallIC(stub.GetCode(), expr->BinaryOperationFeedbackId());
patch_site.EmitPatchInfo();
__ jmp(&done, Label::kNear);
@@ -2257,10 +2241,9 @@ void FullCodeGenerator::EmitInlineSmiBinaryOp(BinaryOperation* expr,
switch (op) {
case Token::SAR:
- __ SmiUntag(eax);
__ SmiUntag(ecx);
__ sar_cl(eax); // No checks of result necessary
- __ SmiTag(eax);
+ __ and_(eax, Immediate(~kSmiTagMask));
break;
case Token::SHL: {
Label result_ok;
@@ -2330,22 +2313,16 @@ void FullCodeGenerator::EmitBinaryOp(BinaryOperation* expr,
Token::Value op,
OverwriteMode mode) {
__ pop(edx);
- BinaryOpICStub stub(op, mode);
+ BinaryOpICStub stub(isolate(), op, mode);
JumpPatchSite patch_site(masm_); // unbound, signals no inlined smi code.
- CallIC(stub.GetCode(isolate()), RelocInfo::CODE_TARGET,
- expr->BinaryOperationFeedbackId());
+ CallIC(stub.GetCode(), expr->BinaryOperationFeedbackId());
patch_site.EmitPatchInfo();
context()->Plug(eax);
}
void FullCodeGenerator::EmitAssignment(Expression* expr) {
- // Invalid left-hand sides are rewritten by the parser to have a 'throw
- // ReferenceError' on the left-hand side.
- if (!expr->IsValidLeftHandSide()) {
- VisitForEffect(expr);
- return;
- }
+ ASSERT(expr->IsValidReferenceExpression());
// Left-hand side can only be a property, a global or a (parameter or local)
// slot.
@@ -2371,10 +2348,7 @@ void FullCodeGenerator::EmitAssignment(Expression* expr) {
__ mov(edx, eax);
__ pop(eax); // Restore value.
__ mov(ecx, prop->key()->AsLiteral()->value());
- Handle<Code> ic = is_classic_mode()
- ? isolate()->builtins()->StoreIC_Initialize()
- : isolate()->builtins()->StoreIC_Initialize_Strict();
- CallIC(ic);
+ CallStoreIC();
break;
}
case KEYED_PROPERTY: {
@@ -2384,7 +2358,7 @@ void FullCodeGenerator::EmitAssignment(Expression* expr) {
__ mov(ecx, eax);
__ pop(edx); // Receiver.
__ pop(eax); // Restore value.
- Handle<Code> ic = is_classic_mode()
+ Handle<Code> ic = strict_mode() == SLOPPY
? isolate()->builtins()->KeyedStoreIC_Initialize()
: isolate()->builtins()->KeyedStoreIC_Initialize_Strict();
CallIC(ic);
@@ -2395,48 +2369,58 @@ void FullCodeGenerator::EmitAssignment(Expression* expr) {
}
+void FullCodeGenerator::EmitStoreToStackLocalOrContextSlot(
+ Variable* var, MemOperand location) {
+ __ mov(location, eax);
+ if (var->IsContextSlot()) {
+ __ mov(edx, eax);
+ int offset = Context::SlotOffset(var->index());
+ __ RecordWriteContextSlot(ecx, offset, edx, ebx, kDontSaveFPRegs);
+ }
+}
+
+
+void FullCodeGenerator::EmitCallStoreContextSlot(
+ Handle<String> name, StrictMode strict_mode) {
+ __ push(eax); // Value.
+ __ push(esi); // Context.
+ __ push(Immediate(name));
+ __ push(Immediate(Smi::FromInt(strict_mode)));
+ __ CallRuntime(Runtime::kHiddenStoreContextSlot, 4);
+}
+
+
void FullCodeGenerator::EmitVariableAssignment(Variable* var,
Token::Value op) {
if (var->IsUnallocated()) {
// Global var, const, or let.
__ mov(ecx, var->name());
__ mov(edx, GlobalObjectOperand());
- Handle<Code> ic = is_classic_mode()
- ? isolate()->builtins()->StoreIC_Initialize()
- : isolate()->builtins()->StoreIC_Initialize_Strict();
- CallIC(ic, RelocInfo::CODE_TARGET_CONTEXT);
+ CallStoreIC();
- } else if (op == Token::INIT_CONST) {
+ } else if (op == Token::INIT_CONST_LEGACY) {
// Const initializers need a write barrier.
ASSERT(!var->IsParameter()); // No const parameters.
- if (var->IsStackLocal()) {
- Label skip;
- __ mov(edx, StackOperand(var));
- __ cmp(edx, isolate()->factory()->the_hole_value());
- __ j(not_equal, &skip);
- __ mov(StackOperand(var), eax);
- __ bind(&skip);
- } else {
- ASSERT(var->IsContextSlot() || var->IsLookupSlot());
- // Like var declarations, const declarations are hoisted to function
- // scope. However, unlike var initializers, const initializers are
- // able to drill a hole to that function context, even from inside a
- // 'with' context. We thus bypass the normal static scope lookup for
- // var->IsContextSlot().
+ if (var->IsLookupSlot()) {
__ push(eax);
__ push(esi);
__ push(Immediate(var->name()));
- __ CallRuntime(Runtime::kInitializeConstContextSlot, 3);
+ __ CallRuntime(Runtime::kHiddenInitializeConstContextSlot, 3);
+ } else {
+ ASSERT(var->IsStackLocal() || var->IsContextSlot());
+ Label skip;
+ MemOperand location = VarOperand(var, ecx);
+ __ mov(edx, location);
+ __ cmp(edx, isolate()->factory()->the_hole_value());
+ __ j(not_equal, &skip, Label::kNear);
+ EmitStoreToStackLocalOrContextSlot(var, location);
+ __ bind(&skip);
}
} else if (var->mode() == LET && op != Token::INIT_LET) {
// Non-initializing assignment to let variable needs a write barrier.
if (var->IsLookupSlot()) {
- __ push(eax); // Value.
- __ push(esi); // Context.
- __ push(Immediate(var->name()));
- __ push(Immediate(Smi::FromInt(language_mode())));
- __ CallRuntime(Runtime::kStoreContextSlot, 4);
+ EmitCallStoreContextSlot(var->name(), strict_mode());
} else {
ASSERT(var->IsStackAllocated() || var->IsContextSlot());
Label assign;
@@ -2445,20 +2429,18 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var,
__ cmp(edx, isolate()->factory()->the_hole_value());
__ j(not_equal, &assign, Label::kNear);
__ push(Immediate(var->name()));
- __ CallRuntime(Runtime::kThrowReferenceError, 1);
+ __ CallRuntime(Runtime::kHiddenThrowReferenceError, 1);
__ bind(&assign);
- __ mov(location, eax);
- if (var->IsContextSlot()) {
- __ mov(edx, eax);
- int offset = Context::SlotOffset(var->index());
- __ RecordWriteContextSlot(ecx, offset, edx, ebx, kDontSaveFPRegs);
- }
+ EmitStoreToStackLocalOrContextSlot(var, location);
}
- } else if (!var->is_const_mode() || op == Token::INIT_CONST_HARMONY) {
+ } else if (!var->is_const_mode() || op == Token::INIT_CONST) {
// Assignment to var or initializing assignment to let/const
// in harmony mode.
- if (var->IsStackAllocated() || var->IsContextSlot()) {
+ if (var->IsLookupSlot()) {
+ EmitCallStoreContextSlot(var->name(), strict_mode());
+ } else {
+ ASSERT(var->IsStackAllocated() || var->IsContextSlot());
MemOperand location = VarOperand(var, ecx);
if (generate_debug_code_ && op == Token::INIT_LET) {
// Check for an uninitialized let binding.
@@ -2466,20 +2448,7 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var,
__ cmp(edx, isolate()->factory()->the_hole_value());
__ Check(equal, kLetBindingReInitialization);
}
- // Perform the assignment.
- __ mov(location, eax);
- if (var->IsContextSlot()) {
- __ mov(edx, eax);
- int offset = Context::SlotOffset(var->index());
- __ RecordWriteContextSlot(ecx, offset, edx, ebx, kDontSaveFPRegs);
- }
- } else {
- ASSERT(var->IsLookupSlot());
- __ push(eax); // Value.
- __ push(esi); // Context.
- __ push(Immediate(var->name()));
- __ push(Immediate(Smi::FromInt(language_mode())));
- __ CallRuntime(Runtime::kStoreContextSlot, 4);
+ EmitStoreToStackLocalOrContextSlot(var, location);
}
}
// Non-initializing assignments to consts are ignored.
@@ -2493,17 +2462,13 @@ void FullCodeGenerator::EmitNamedPropertyAssignment(Assignment* expr) {
Property* prop = expr->target()->AsProperty();
ASSERT(prop != NULL);
- ASSERT(prop->key()->AsLiteral() != NULL);
+ ASSERT(prop->key()->IsLiteral());
// Record source code position before IC call.
SetSourcePosition(expr->position());
__ mov(ecx, prop->key()->AsLiteral()->value());
__ pop(edx);
- Handle<Code> ic = is_classic_mode()
- ? isolate()->builtins()->StoreIC_Initialize()
- : isolate()->builtins()->StoreIC_Initialize_Strict();
- CallIC(ic, RelocInfo::CODE_TARGET, expr->AssignmentFeedbackId());
-
+ CallStoreIC(expr->AssignmentFeedbackId());
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
context()->Plug(eax);
}
@@ -2519,10 +2484,10 @@ void FullCodeGenerator::EmitKeyedPropertyAssignment(Assignment* expr) {
__ pop(edx);
// Record source code position before IC call.
SetSourcePosition(expr->position());
- Handle<Code> ic = is_classic_mode()
+ Handle<Code> ic = strict_mode() == SLOPPY
? isolate()->builtins()->KeyedStoreIC_Initialize()
: isolate()->builtins()->KeyedStoreIC_Initialize_Strict();
- CallIC(ic, RelocInfo::CODE_TARGET, expr->AssignmentFeedbackId());
+ CallIC(ic, expr->AssignmentFeedbackId());
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
context()->Plug(eax);
@@ -2551,73 +2516,69 @@ void FullCodeGenerator::VisitProperty(Property* expr) {
void FullCodeGenerator::CallIC(Handle<Code> code,
- RelocInfo::Mode rmode,
TypeFeedbackId ast_id) {
ic_total_count_++;
- __ call(code, rmode, ast_id);
+ __ call(code, RelocInfo::CODE_TARGET, ast_id);
}
+// Code common for calls using the IC.
+void FullCodeGenerator::EmitCallWithLoadIC(Call* expr) {
+ Expression* callee = expr->expression();
-
-void FullCodeGenerator::EmitCallWithIC(Call* expr,
- Handle<Object> name,
- RelocInfo::Mode mode) {
- // Code common for calls using the IC.
- ZoneList<Expression*>* args = expr->arguments();
- int arg_count = args->length();
- { PreservePositionScope scope(masm()->positions_recorder());
- for (int i = 0; i < arg_count; i++) {
- VisitForStackValue(args->at(i));
+ CallIC::CallType call_type = callee->IsVariableProxy()
+ ? CallIC::FUNCTION
+ : CallIC::METHOD;
+ // Get the target function.
+ if (call_type == CallIC::FUNCTION) {
+ { StackValueContext context(this);
+ EmitVariableLoad(callee->AsVariableProxy());
+ PrepareForBailout(callee, NO_REGISTERS);
}
- __ Set(ecx, Immediate(name));
+ // Push undefined as receiver. This is patched in the method prologue if it
+ // is a sloppy mode method.
+ __ push(Immediate(isolate()->factory()->undefined_value()));
+ } else {
+ // Load the function from the receiver.
+ ASSERT(callee->IsProperty());
+ __ mov(edx, Operand(esp, 0));
+ EmitNamedPropertyLoad(callee->AsProperty());
+ PrepareForBailoutForId(callee->AsProperty()->LoadId(), TOS_REG);
+ // Push the target function under the receiver.
+ __ push(Operand(esp, 0));
+ __ mov(Operand(esp, kPointerSize), eax);
}
- // Record source position of the IC call.
- SetSourcePosition(expr->position());
- Handle<Code> ic =
- isolate()->stub_cache()->ComputeCallInitialize(arg_count, mode);
- CallIC(ic, mode, expr->CallFeedbackId());
- RecordJSReturnSite(expr);
- // Restore context register.
- __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
- context()->Plug(eax);
+
+ EmitCall(expr, call_type);
}
-void FullCodeGenerator::EmitKeyedCallWithIC(Call* expr,
- Expression* key) {
+// Code common for calls using the IC.
+void FullCodeGenerator::EmitKeyedCallWithLoadIC(Call* expr,
+ Expression* key) {
// Load the key.
VisitForAccumulatorValue(key);
- // Swap the name of the function and the receiver on the stack to follow
- // the calling convention for call ICs.
- __ pop(ecx);
- __ push(eax);
- __ push(ecx);
+ Expression* callee = expr->expression();
- // Load the arguments.
- ZoneList<Expression*>* args = expr->arguments();
- int arg_count = args->length();
- { PreservePositionScope scope(masm()->positions_recorder());
- for (int i = 0; i < arg_count; i++) {
- VisitForStackValue(args->at(i));
- }
- }
- // Record source position of the IC call.
- SetSourcePosition(expr->position());
- Handle<Code> ic =
- isolate()->stub_cache()->ComputeKeyedCallInitialize(arg_count);
- __ mov(ecx, Operand(esp, (arg_count + 1) * kPointerSize)); // Key.
- CallIC(ic, RelocInfo::CODE_TARGET, expr->CallFeedbackId());
- RecordJSReturnSite(expr);
- // Restore context register.
- __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
- context()->DropAndPlug(1, eax); // Drop the key still on the stack.
+ // Load the function from the receiver.
+ ASSERT(callee->IsProperty());
+ __ mov(edx, Operand(esp, 0));
+ // Move the key into the right register for the keyed load IC.
+ __ mov(ecx, eax);
+ EmitKeyedPropertyLoad(callee->AsProperty());
+ PrepareForBailoutForId(callee->AsProperty()->LoadId(), TOS_REG);
+
+ // Push the target function under the receiver.
+ __ push(Operand(esp, 0));
+ __ mov(Operand(esp, kPointerSize), eax);
+
+ EmitCall(expr, CallIC::METHOD);
}
-void FullCodeGenerator::EmitCallWithStub(Call* expr, CallFunctionFlags flags) {
- // Code common for calls using the call stub.
+void FullCodeGenerator::EmitCall(Call* expr, CallIC::CallType call_type) {
+ // Load the arguments.
ZoneList<Expression*>* args = expr->arguments();
int arg_count = args->length();
{ PreservePositionScope scope(masm()->positions_recorder());
@@ -2625,24 +2586,22 @@ void FullCodeGenerator::EmitCallWithStub(Call* expr, CallFunctionFlags flags) {
VisitForStackValue(args->at(i));
}
}
- // Record source position for debugger.
- SetSourcePosition(expr->position());
- // Record call targets in unoptimized code.
- flags = static_cast<CallFunctionFlags>(flags | RECORD_CALL_TARGET);
- Handle<Object> uninitialized =
- TypeFeedbackCells::UninitializedSentinel(isolate());
- Handle<Cell> cell = isolate()->factory()->NewCell(uninitialized);
- RecordTypeFeedbackCell(expr->CallFeedbackId(), cell);
- __ mov(ebx, cell);
-
- CallFunctionStub stub(arg_count, flags);
+ // Record source position of the IC call.
+ SetSourcePosition(expr->position());
+ Handle<Code> ic = CallIC::initialize_stub(
+ isolate(), arg_count, call_type);
+ __ Move(edx, Immediate(Smi::FromInt(expr->CallFeedbackSlot())));
__ mov(edi, Operand(esp, (arg_count + 1) * kPointerSize));
- __ CallStub(&stub, expr->CallFeedbackId());
+ // Don't assign a type feedback id to the IC, since type feedback is provided
+ // by the vector above.
+ CallIC(ic);
RecordJSReturnSite(expr);
+
// Restore context register.
__ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
+
context()->DropAndPlug(1, eax);
}
@@ -2658,13 +2617,13 @@ void FullCodeGenerator::EmitResolvePossiblyDirectEval(int arg_count) {
// Push the receiver of the enclosing function.
__ push(Operand(ebp, (2 + info_->scope()->num_parameters()) * kPointerSize));
// Push the language mode.
- __ push(Immediate(Smi::FromInt(language_mode())));
+ __ push(Immediate(Smi::FromInt(strict_mode())));
// Push the start position of the scope the calls resides in.
__ push(Immediate(Smi::FromInt(scope()->start_position())));
// Do the runtime call.
- __ CallRuntime(Runtime::kResolvePossiblyDirectEval, 5);
+ __ CallRuntime(Runtime::kHiddenResolvePossiblyDirectEval, 5);
}
@@ -2677,12 +2636,11 @@ void FullCodeGenerator::VisitCall(Call* expr) {
Comment cmnt(masm_, "[ Call");
Expression* callee = expr->expression();
- VariableProxy* proxy = callee->AsVariableProxy();
- Property* property = callee->AsProperty();
+ Call::CallType call_type = expr->GetCallType(isolate());
- if (proxy != NULL && proxy->var()->is_possibly_eval(isolate())) {
- // In a call to eval, we first call %ResolvePossiblyDirectEval to
- // resolve the function we need to call and the receiver of the call.
+ if (call_type == Call::POSSIBLY_EVAL_CALL) {
+ // In a call to eval, we first call RuntimeHidden_ResolvePossiblyDirectEval
+ // to resolve the function we need to call and the receiver of the call.
// Then we call the resolved function using the given arguments.
ZoneList<Expression*>* args = expr->arguments();
int arg_count = args->length();
@@ -2707,7 +2665,7 @@ void FullCodeGenerator::VisitCall(Call* expr) {
}
// Record source position for debugger.
SetSourcePosition(expr->position());
- CallFunctionStub stub(arg_count, RECEIVER_MIGHT_BE_IMPLICIT);
+ CallFunctionStub stub(isolate(), arg_count, NO_CALL_FUNCTION_FLAGS);
__ mov(edi, Operand(esp, (arg_count + 1) * kPointerSize));
__ CallStub(&stub);
RecordJSReturnSite(expr);
@@ -2715,13 +2673,12 @@ void FullCodeGenerator::VisitCall(Call* expr) {
__ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
context()->DropAndPlug(1, eax);
- } else if (proxy != NULL && proxy->var()->IsUnallocated()) {
- // Push global object as receiver for the call IC.
- __ push(GlobalObjectOperand());
- EmitCallWithIC(expr, proxy->name(), RelocInfo::CODE_TARGET_CONTEXT);
+ } else if (call_type == Call::GLOBAL_CALL) {
+ EmitCallWithLoadIC(expr);
- } else if (proxy != NULL && proxy->var()->IsLookupSlot()) {
+ } else if (call_type == Call::LOOKUP_SLOT_CALL) {
// Call to a lookup slot (dynamically introduced variable).
+ VariableProxy* proxy = callee->AsVariableProxy();
Label slow, done;
{ PreservePositionScope scope(masm()->positions_recorder());
// Generate code for loading from variables potentially shadowed by
@@ -2733,7 +2690,7 @@ void FullCodeGenerator::VisitCall(Call* expr) {
// the object holding it (returned in edx).
__ push(context_register());
__ push(Immediate(proxy->name()));
- __ CallRuntime(Runtime::kLoadContextSlot, 2);
+ __ CallRuntime(Runtime::kHiddenLoadContextSlot, 2);
__ push(eax); // Function.
__ push(edx); // Receiver.
@@ -2747,37 +2704,34 @@ void FullCodeGenerator::VisitCall(Call* expr) {
__ push(eax);
// The receiver is implicitly the global receiver. Indicate this by
// passing the hole to the call function stub.
- __ push(Immediate(isolate()->factory()->the_hole_value()));
+ __ push(Immediate(isolate()->factory()->undefined_value()));
__ bind(&call);
}
// The receiver is either the global receiver or an object found by
- // LoadContextSlot. That object could be the hole if the receiver is
- // implicitly the global object.
- EmitCallWithStub(expr, RECEIVER_MIGHT_BE_IMPLICIT);
+ // LoadContextSlot.
+ EmitCall(expr);
- } else if (property != NULL) {
+ } else if (call_type == Call::PROPERTY_CALL) {
+ Property* property = callee->AsProperty();
{ PreservePositionScope scope(masm()->positions_recorder());
VisitForStackValue(property->obj());
}
if (property->key()->IsPropertyName()) {
- EmitCallWithIC(expr,
- property->key()->AsLiteral()->value(),
- RelocInfo::CODE_TARGET);
+ EmitCallWithLoadIC(expr);
} else {
- EmitKeyedCallWithIC(expr, property->key());
+ EmitKeyedCallWithLoadIC(expr, property->key());
}
} else {
+ ASSERT(call_type == Call::OTHER_CALL);
// Call to an arbitrary expression not handled specially above.
{ PreservePositionScope scope(masm()->positions_recorder());
VisitForStackValue(callee);
}
- // Load global receiver object.
- __ mov(ebx, GlobalObjectOperand());
- __ push(FieldOperand(ebx, GlobalObject::kGlobalReceiverOffset));
+ __ push(Immediate(isolate()->factory()->undefined_value()));
// Emit function call.
- EmitCallWithStub(expr, NO_CALL_FUNCTION_FLAGS);
+ EmitCall(expr);
}
#ifdef DEBUG
@@ -2810,18 +2764,21 @@ void FullCodeGenerator::VisitCallNew(CallNew* expr) {
SetSourcePosition(expr->position());
// Load function and argument count into edi and eax.
- __ Set(eax, Immediate(arg_count));
+ __ Move(eax, Immediate(arg_count));
__ mov(edi, Operand(esp, arg_count * kPointerSize));
// Record call targets in unoptimized code.
- Handle<Object> uninitialized =
- TypeFeedbackCells::UninitializedSentinel(isolate());
- Handle<Cell> cell = isolate()->factory()->NewCell(uninitialized);
- RecordTypeFeedbackCell(expr->CallNewFeedbackId(), cell);
- __ mov(ebx, cell);
-
- CallConstructStub stub(RECORD_CALL_TARGET);
- __ call(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL);
+ if (FLAG_pretenuring_call_new) {
+ EnsureSlotContainsAllocationSite(expr->AllocationSiteFeedbackSlot());
+ ASSERT(expr->AllocationSiteFeedbackSlot() ==
+ expr->CallNewFeedbackSlot() + 1);
+ }
+
+ __ LoadHeapObject(ebx, FeedbackVector());
+ __ mov(edx, Immediate(Smi::FromInt(expr->CallNewFeedbackSlot())));
+
+ CallConstructStub stub(isolate(), RECORD_CONSTRUCTOR_TARGET);
+ __ call(stub.GetCode(), RelocInfo::CONSTRUCT_CALL);
PrepareForBailoutForId(expr->ReturnId(), TOS_REG);
context()->Plug(eax);
}
@@ -2994,7 +2951,7 @@ void FullCodeGenerator::EmitIsStringWrapperSafeForDefaultValueOf(
STATIC_ASSERT(kSmiTagSize == 1);
STATIC_ASSERT(kPointerSize == 4);
__ imul(ecx, ecx, DescriptorArray::kDescriptorSize);
- __ lea(ecx, Operand(ebx, ecx, times_2, DescriptorArray::kFirstOffset));
+ __ lea(ecx, Operand(ebx, ecx, times_4, DescriptorArray::kFirstOffset));
// Calculate location of the first key name.
__ add(ebx, Immediate(DescriptorArray::kFirstOffset));
// Loop through all the keys in the descriptor array. If one of these is the
@@ -3075,9 +3032,11 @@ void FullCodeGenerator::EmitIsMinusZero(CallRuntime* expr) {
Handle<Map> map = masm()->isolate()->factory()->heap_number_map();
__ CheckMap(eax, map, if_false, DO_SMI_CHECK);
- __ cmp(FieldOperand(eax, HeapNumber::kExponentOffset), Immediate(0x80000000));
- __ j(not_equal, if_false);
- __ cmp(FieldOperand(eax, HeapNumber::kMantissaOffset), Immediate(0x00000000));
+ // Check if the exponent half is 0x80000000. Comparing against 1 and
+ // checking for overflow is the shortest possible encoding.
+ __ cmp(FieldOperand(eax, HeapNumber::kExponentOffset), Immediate(0x1));
+ __ j(no_overflow, if_false);
+ __ cmp(FieldOperand(eax, HeapNumber::kMantissaOffset), Immediate(0x0));
PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
Split(equal, if_true, if_false, fall_through);
@@ -3194,8 +3153,8 @@ void FullCodeGenerator::EmitArguments(CallRuntime* expr) {
// parameter count in eax.
VisitForAccumulatorValue(args->at(0));
__ mov(edx, eax);
- __ Set(eax, Immediate(Smi::FromInt(info_->scope()->num_parameters())));
- ArgumentsAccessStub stub(ArgumentsAccessStub::READ_ELEMENT);
+ __ Move(eax, Immediate(Smi::FromInt(info_->scope()->num_parameters())));
+ ArgumentsAccessStub stub(isolate(), ArgumentsAccessStub::READ_ELEMENT);
__ CallStub(&stub);
context()->Plug(eax);
}
@@ -3206,7 +3165,7 @@ void FullCodeGenerator::EmitArgumentsLength(CallRuntime* expr) {
Label exit;
// Get the number of formal parameters.
- __ Set(eax, Immediate(Smi::FromInt(info_->scope()->num_parameters())));
+ __ Move(eax, Immediate(Smi::FromInt(info_->scope()->num_parameters())));
// Check if the calling frame is an arguments adaptor frame.
__ mov(ebx, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
@@ -3285,30 +3244,9 @@ void FullCodeGenerator::EmitClassOf(CallRuntime* expr) {
}
-void FullCodeGenerator::EmitLog(CallRuntime* expr) {
- // Conditionally generate a log call.
- // Args:
- // 0 (literal string): The type of logging (corresponds to the flags).
- // This is used to determine whether or not to generate the log call.
- // 1 (string): Format string. Access the string at argument index 2
- // with '%2s' (see Logger::LogRuntime for all the formats).
- // 2 (array): Arguments to the format string.
- ZoneList<Expression*>* args = expr->arguments();
- ASSERT_EQ(args->length(), 3);
- if (CodeGenerator::ShouldGenerateLog(isolate(), args->at(0))) {
- VisitForStackValue(args->at(1));
- VisitForStackValue(args->at(2));
- __ CallRuntime(Runtime::kLog, 2);
- }
- // Finally, we're expected to leave a value on the top of the stack.
- __ mov(eax, isolate()->factory()->undefined_value());
- context()->Plug(eax);
-}
-
-
void FullCodeGenerator::EmitSubString(CallRuntime* expr) {
// Load the arguments on the stack and call the stub.
- SubStringStub stub;
+ SubStringStub stub(isolate());
ZoneList<Expression*>* args = expr->arguments();
ASSERT(args->length() == 3);
VisitForStackValue(args->at(0));
@@ -3321,7 +3259,7 @@ void FullCodeGenerator::EmitSubString(CallRuntime* expr) {
void FullCodeGenerator::EmitRegExpExec(CallRuntime* expr) {
// Load the arguments on the stack and call the stub.
- RegExpExecStub stub;
+ RegExpExecStub stub(isolate());
ZoneList<Expression*>* args = expr->arguments();
ASSERT(args->length() == 4);
VisitForStackValue(args->at(0));
@@ -3391,7 +3329,7 @@ void FullCodeGenerator::EmitDateField(CallRuntime* expr) {
}
__ bind(&not_date_object);
- __ CallRuntime(Runtime::kThrowNotDateError, 0);
+ __ CallRuntime(Runtime::kHiddenThrowNotDateError, 0);
__ bind(&done);
context()->Plug(result);
}
@@ -3414,9 +3352,9 @@ void FullCodeGenerator::EmitOneByteSeqStringSetChar(CallRuntime* expr) {
if (FLAG_debug_code) {
__ test(value, Immediate(kSmiTagMask));
- __ ThrowIf(not_zero, kNonSmiValue);
+ __ Check(zero, kNonSmiValue);
__ test(index, Immediate(kSmiTagMask));
- __ ThrowIf(not_zero, kNonSmiValue);
+ __ Check(zero, kNonSmiValue);
}
__ SmiUntag(value);
@@ -3449,9 +3387,9 @@ void FullCodeGenerator::EmitTwoByteSeqStringSetChar(CallRuntime* expr) {
if (FLAG_debug_code) {
__ test(value, Immediate(kSmiTagMask));
- __ ThrowIf(not_zero, kNonSmiValue);
+ __ Check(zero, kNonSmiValue);
__ test(index, Immediate(kSmiTagMask));
- __ ThrowIf(not_zero, kNonSmiValue);
+ __ Check(zero, kNonSmiValue);
__ SmiUntag(index);
static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
__ EmitSeqStringSetCharCheck(string, index, value, two_byte_seq_type);
@@ -3473,12 +3411,8 @@ void FullCodeGenerator::EmitMathPow(CallRuntime* expr) {
VisitForStackValue(args->at(0));
VisitForStackValue(args->at(1));
- if (CpuFeatures::IsSupported(SSE2)) {
- MathPowStub stub(MathPowStub::ON_STACK);
- __ CallStub(&stub);
- } else {
- __ CallRuntime(Runtime::kMath_pow, 2);
- }
+ MathPowStub stub(isolate(), MathPowStub::ON_STACK);
+ __ CallStub(&stub);
context()->Plug(eax);
}
@@ -3519,7 +3453,7 @@ void FullCodeGenerator::EmitNumberToString(CallRuntime* expr) {
// Load the argument into eax and call the stub.
VisitForAccumulatorValue(args->at(0));
- NumberToStringStub stub;
+ NumberToStringStub stub(isolate());
__ CallStub(&stub);
context()->Plug(eax);
}
@@ -3573,13 +3507,13 @@ void FullCodeGenerator::EmitStringCharCodeAt(CallRuntime* expr) {
__ bind(&index_out_of_range);
// When the index is out of range, the spec requires us to return
// NaN.
- __ Set(result, Immediate(isolate()->factory()->nan_value()));
+ __ Move(result, Immediate(isolate()->factory()->nan_value()));
__ jmp(&done);
__ bind(&need_conversion);
// Move the undefined value into the result register, which will
// trigger conversion.
- __ Set(result, Immediate(isolate()->factory()->undefined_value()));
+ __ Move(result, Immediate(isolate()->factory()->undefined_value()));
__ jmp(&done);
NopRuntimeCallHelper call_helper;
@@ -3621,13 +3555,13 @@ void FullCodeGenerator::EmitStringCharAt(CallRuntime* expr) {
__ bind(&index_out_of_range);
// When the index is out of range, the spec requires us to return
// the empty string.
- __ Set(result, Immediate(isolate()->factory()->empty_string()));
+ __ Move(result, Immediate(isolate()->factory()->empty_string()));
__ jmp(&done);
__ bind(&need_conversion);
// Move smi zero into the result register, which will trigger
// conversion.
- __ Set(result, Immediate(Smi::FromInt(0)));
+ __ Move(result, Immediate(Smi::FromInt(0)));
__ jmp(&done);
NopRuntimeCallHelper call_helper;
@@ -3641,21 +3575,12 @@ void FullCodeGenerator::EmitStringCharAt(CallRuntime* expr) {
void FullCodeGenerator::EmitStringAdd(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
ASSERT_EQ(2, args->length());
+ VisitForStackValue(args->at(0));
+ VisitForAccumulatorValue(args->at(1));
- if (FLAG_new_string_add) {
- VisitForStackValue(args->at(0));
- VisitForAccumulatorValue(args->at(1));
-
- __ pop(edx);
- NewStringAddStub stub(STRING_ADD_CHECK_BOTH, NOT_TENURED);
- __ CallStub(&stub);
- } else {
- VisitForStackValue(args->at(0));
- VisitForStackValue(args->at(1));
-
- StringAddStub stub(STRING_ADD_CHECK_BOTH);
- __ CallStub(&stub);
- }
+ __ pop(edx);
+ StringAddStub stub(isolate(), STRING_ADD_CHECK_BOTH, NOT_TENURED);
+ __ CallStub(&stub);
context()->Plug(eax);
}
@@ -3667,34 +3592,12 @@ void FullCodeGenerator::EmitStringCompare(CallRuntime* expr) {
VisitForStackValue(args->at(0));
VisitForStackValue(args->at(1));
- StringCompareStub stub;
+ StringCompareStub stub(isolate());
__ CallStub(&stub);
context()->Plug(eax);
}
-void FullCodeGenerator::EmitMathLog(CallRuntime* expr) {
- // Load the argument on the stack and call the stub.
- TranscendentalCacheStub stub(TranscendentalCache::LOG,
- TranscendentalCacheStub::TAGGED);
- ZoneList<Expression*>* args = expr->arguments();
- ASSERT(args->length() == 1);
- VisitForStackValue(args->at(0));
- __ CallStub(&stub);
- context()->Plug(eax);
-}
-
-
-void FullCodeGenerator::EmitMathSqrt(CallRuntime* expr) {
- // Load the argument on the stack and call the runtime function.
- ZoneList<Expression*>* args = expr->arguments();
- ASSERT(args->length() == 1);
- VisitForStackValue(args->at(0));
- __ CallRuntime(Runtime::kMath_sqrt, 1);
- context()->Plug(eax);
-}
-
-
void FullCodeGenerator::EmitCallFunction(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
ASSERT(args->length() >= 2);
@@ -3714,8 +3617,7 @@ void FullCodeGenerator::EmitCallFunction(CallRuntime* expr) {
// InvokeFunction requires the function in edi. Move it in there.
__ mov(edi, result_register());
ParameterCount count(arg_count);
- __ InvokeFunction(edi, count, CALL_FUNCTION,
- NullCallWrapper(), CALL_AS_METHOD);
+ __ InvokeFunction(edi, count, CALL_FUNCTION, NullCallWrapper());
__ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
__ jmp(&done);
@@ -3730,12 +3632,14 @@ void FullCodeGenerator::EmitCallFunction(CallRuntime* expr) {
void FullCodeGenerator::EmitRegExpConstructResult(CallRuntime* expr) {
// Load the arguments on the stack and call the stub.
- RegExpConstructResultStub stub;
+ RegExpConstructResultStub stub(isolate());
ZoneList<Expression*>* args = expr->arguments();
ASSERT(args->length() == 3);
VisitForStackValue(args->at(0));
VisitForStackValue(args->at(1));
- VisitForStackValue(args->at(2));
+ VisitForAccumulatorValue(args->at(2));
+ __ pop(ebx);
+ __ pop(ecx);
__ CallStub(&stub);
context()->Plug(eax);
}
@@ -3770,60 +3674,22 @@ void FullCodeGenerator::EmitGetFromCache(CallRuntime* expr) {
FieldOperand(cache, FixedArray::OffsetOfElementAt(cache_id)));
Label done, not_found;
- // tmp now holds finger offset as a smi.
STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
__ mov(tmp, FieldOperand(cache, JSFunctionResultCache::kFingerOffset));
- __ cmp(key, CodeGenerator::FixedArrayElementOperand(cache, tmp));
+ // tmp now holds finger offset as a smi.
+ __ cmp(key, FixedArrayElementOperand(cache, tmp));
__ j(not_equal, &not_found);
- __ mov(eax, CodeGenerator::FixedArrayElementOperand(cache, tmp, 1));
+ __ mov(eax, FixedArrayElementOperand(cache, tmp, 1));
__ jmp(&done);
__ bind(&not_found);
// Call runtime to perform the lookup.
__ push(cache);
__ push(key);
- __ CallRuntime(Runtime::kGetFromCache, 2);
-
- __ bind(&done);
- context()->Plug(eax);
-}
-
+ __ CallRuntime(Runtime::kHiddenGetFromCache, 2);
-void FullCodeGenerator::EmitIsRegExpEquivalent(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- ASSERT_EQ(2, args->length());
-
- Register right = eax;
- Register left = ebx;
- Register tmp = ecx;
-
- VisitForStackValue(args->at(0));
- VisitForAccumulatorValue(args->at(1));
- __ pop(left);
-
- Label done, fail, ok;
- __ cmp(left, right);
- __ j(equal, &ok);
- // Fail if either is a non-HeapObject.
- __ mov(tmp, left);
- __ and_(tmp, right);
- __ JumpIfSmi(tmp, &fail);
- __ mov(tmp, FieldOperand(left, HeapObject::kMapOffset));
- __ CmpInstanceType(tmp, JS_REGEXP_TYPE);
- __ j(not_equal, &fail);
- __ cmp(tmp, FieldOperand(right, HeapObject::kMapOffset));
- __ j(not_equal, &fail);
- __ mov(tmp, FieldOperand(left, JSRegExp::kDataOffset));
- __ cmp(tmp, FieldOperand(right, JSRegExp::kDataOffset));
- __ j(equal, &ok);
- __ bind(&fail);
- __ mov(eax, Immediate(isolate()->factory()->false_value()));
- __ jmp(&done);
- __ bind(&ok);
- __ mov(eax, Immediate(isolate()->factory()->true_value()));
__ bind(&done);
-
context()->Plug(eax);
}
@@ -3926,8 +3792,8 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
// Check that all array elements are sequential ASCII strings, and
// accumulate the sum of their lengths, as a smi-encoded value.
- __ Set(index, Immediate(0));
- __ Set(string_length, Immediate(0));
+ __ Move(index, Immediate(0));
+ __ Move(string_length, Immediate(0));
// Loop condition: while (index < length).
// Live loop registers: index, array_length, string,
// scratch, string_length, elements.
@@ -4043,7 +3909,7 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
__ mov_b(scratch, FieldOperand(string, SeqOneByteString::kHeaderSize));
__ mov_b(separator_operand, scratch);
- __ Set(index, Immediate(0));
+ __ Move(index, Immediate(0));
// Jump into the loop after the code that copies the separator, so the first
// element is not preceded by a separator
__ jmp(&loop_2_entry);
@@ -4080,7 +3946,7 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
// Long separator case (separator is more than one character).
__ bind(&long_separator);
- __ Set(index, Immediate(0));
+ __ Move(index, Immediate(0));
// Jump into the loop after the code that copies the separator, so the first
// element is not preceded by a separator
__ jmp(&loop_3_entry);
@@ -4131,8 +3997,8 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
void FullCodeGenerator::VisitCallRuntime(CallRuntime* expr) {
- Handle<String> name = expr->name();
- if (name->length() > 0 && name->Get(0) == '_') {
+ if (expr->function() != NULL &&
+ expr->function()->intrinsic_type == Runtime::INLINE) {
Comment cmnt(masm_, "[ InlineRuntimeCall");
EmitInlineRuntimeCall(expr);
return;
@@ -4142,31 +4008,47 @@ void FullCodeGenerator::VisitCallRuntime(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
if (expr->is_jsruntime()) {
- // Prepare for calling JS runtime function.
+ // Push the builtins object as receiver.
__ mov(eax, GlobalObjectOperand());
__ push(FieldOperand(eax, GlobalObject::kBuiltinsOffset));
- }
- // Push the arguments ("left-to-right").
- int arg_count = args->length();
- for (int i = 0; i < arg_count; i++) {
- VisitForStackValue(args->at(i));
- }
+ // Load the function from the receiver.
+ __ mov(edx, Operand(esp, 0));
+ __ mov(ecx, Immediate(expr->name()));
+ CallLoadIC(NOT_CONTEXTUAL, expr->CallRuntimeFeedbackId());
- if (expr->is_jsruntime()) {
- // Call the JS runtime function via a call IC.
- __ Set(ecx, Immediate(expr->name()));
- RelocInfo::Mode mode = RelocInfo::CODE_TARGET;
- Handle<Code> ic =
- isolate()->stub_cache()->ComputeCallInitialize(arg_count, mode);
- CallIC(ic, mode, expr->CallRuntimeFeedbackId());
+ // Push the target function under the receiver.
+ __ push(Operand(esp, 0));
+ __ mov(Operand(esp, kPointerSize), eax);
+
+ // Code common for calls using the IC.
+ ZoneList<Expression*>* args = expr->arguments();
+ int arg_count = args->length();
+ for (int i = 0; i < arg_count; i++) {
+ VisitForStackValue(args->at(i));
+ }
+
+ // Record source position of the IC call.
+ SetSourcePosition(expr->position());
+ CallFunctionStub stub(isolate(), arg_count, NO_CALL_FUNCTION_FLAGS);
+ __ mov(edi, Operand(esp, (arg_count + 1) * kPointerSize));
+ __ CallStub(&stub);
// Restore context register.
__ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
+ context()->DropAndPlug(1, eax);
+
} else {
+ // Push the arguments ("left-to-right").
+ int arg_count = args->length();
+ for (int i = 0; i < arg_count; i++) {
+ VisitForStackValue(args->at(i));
+ }
+
// Call the C runtime function.
__ CallRuntime(expr->function(), arg_count);
+
+ context()->Plug(eax);
}
- context()->Plug(eax);
}
@@ -4180,20 +4062,18 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
if (property != NULL) {
VisitForStackValue(property->obj());
VisitForStackValue(property->key());
- StrictModeFlag strict_mode_flag = (language_mode() == CLASSIC_MODE)
- ? kNonStrictMode : kStrictMode;
- __ push(Immediate(Smi::FromInt(strict_mode_flag)));
+ __ push(Immediate(Smi::FromInt(strict_mode())));
__ InvokeBuiltin(Builtins::DELETE, CALL_FUNCTION);
context()->Plug(eax);
} else if (proxy != NULL) {
Variable* var = proxy->var();
// Delete of an unqualified identifier is disallowed in strict mode
// but "delete this" is allowed.
- ASSERT(language_mode() == CLASSIC_MODE || var->is_this());
+ ASSERT(strict_mode() == SLOPPY || var->is_this());
if (var->IsUnallocated()) {
__ push(GlobalObjectOperand());
__ push(Immediate(var->name()));
- __ push(Immediate(Smi::FromInt(kNonStrictMode)));
+ __ push(Immediate(Smi::FromInt(SLOPPY)));
__ InvokeBuiltin(Builtins::DELETE, CALL_FUNCTION);
context()->Plug(eax);
} else if (var->IsStackAllocated() || var->IsContextSlot()) {
@@ -4206,7 +4086,7 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
// context where the variable was introduced.
__ push(context_register());
__ push(Immediate(var->name()));
- __ CallRuntime(Runtime::kDeleteContextSlot, 2);
+ __ CallRuntime(Runtime::kHiddenDeleteContextSlot, 2);
context()->Plug(eax);
}
} else {
@@ -4287,16 +4167,11 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
+ ASSERT(expr->expression()->IsValidReferenceExpression());
+
Comment cmnt(masm_, "[ CountOperation");
SetSourcePosition(expr->position());
- // Invalid left-hand sides are rewritten to have a 'throw ReferenceError'
- // as the left-hand side.
- if (!expr->expression()->IsValidLeftHandSide()) {
- VisitForEffect(expr->expression());
- return;
- }
-
// Expression can only be a property, a global or a (parameter or local)
// slot.
enum LhsKind { VARIABLE, NAMED_PROPERTY, KEYED_PROPERTY };
@@ -4384,7 +4259,7 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
__ jmp(&stub_call, Label::kNear);
__ bind(&slow);
}
- ToNumberStub convert_stub;
+ ToNumberStub convert_stub(isolate());
__ CallStub(&convert_stub);
// Save result for postfix expressions.
@@ -4414,10 +4289,8 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
__ bind(&stub_call);
__ mov(edx, eax);
__ mov(eax, Immediate(Smi::FromInt(1)));
- BinaryOpICStub stub(expr->binary_op(), NO_OVERWRITE);
- CallIC(stub.GetCode(isolate()),
- RelocInfo::CODE_TARGET,
- expr->CountBinOpFeedbackId());
+ BinaryOpICStub stub(isolate(), expr->binary_op(), NO_OVERWRITE);
+ CallIC(stub.GetCode(), expr->CountBinOpFeedbackId());
patch_site.EmitPatchInfo();
__ bind(&done);
@@ -4448,10 +4321,7 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
case NAMED_PROPERTY: {
__ mov(ecx, prop->key()->AsLiteral()->value());
__ pop(edx);
- Handle<Code> ic = is_classic_mode()
- ? isolate()->builtins()->StoreIC_Initialize()
- : isolate()->builtins()->StoreIC_Initialize_Strict();
- CallIC(ic, RelocInfo::CODE_TARGET, expr->CountStoreFeedbackId());
+ CallStoreIC(expr->CountStoreFeedbackId());
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
if (expr->is_postfix()) {
if (!context()->IsEffect()) {
@@ -4465,10 +4335,10 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
case KEYED_PROPERTY: {
__ pop(ecx);
__ pop(edx);
- Handle<Code> ic = is_classic_mode()
+ Handle<Code> ic = strict_mode() == SLOPPY
? isolate()->builtins()->KeyedStoreIC_Initialize()
: isolate()->builtins()->KeyedStoreIC_Initialize_Strict();
- CallIC(ic, RelocInfo::CODE_TARGET, expr->CountStoreFeedbackId());
+ CallIC(ic, expr->CountStoreFeedbackId());
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
if (expr->is_postfix()) {
// Result is on the stack
@@ -4490,16 +4360,16 @@ void FullCodeGenerator::VisitForTypeofValue(Expression* expr) {
ASSERT(!context()->IsTest());
if (proxy != NULL && proxy->var()->IsUnallocated()) {
- Comment cmnt(masm_, "Global variable");
+ Comment cmnt(masm_, "[ Global variable");
__ mov(edx, GlobalObjectOperand());
__ mov(ecx, Immediate(proxy->name()));
- Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
// Use a regular load, not a contextual load, to avoid a reference
// error.
- CallIC(ic);
+ CallLoadIC(NOT_CONTEXTUAL);
PrepareForBailout(expr, TOS_REG);
context()->Plug(eax);
} else if (proxy != NULL && proxy->var()->IsLookupSlot()) {
+ Comment cmnt(masm_, "[ Lookup slot");
Label done, slow;
// Generate code for loading from variables potentially shadowed
@@ -4509,7 +4379,7 @@ void FullCodeGenerator::VisitForTypeofValue(Expression* expr) {
__ bind(&slow);
__ push(esi);
__ push(Immediate(proxy->name()));
- __ CallRuntime(Runtime::kLoadContextSlotNoReferenceError, 2);
+ __ CallRuntime(Runtime::kHiddenLoadContextSlotNoReferenceError, 2);
PrepareForBailout(expr, TOS_REG);
__ bind(&done);
@@ -4536,12 +4406,13 @@ void FullCodeGenerator::EmitLiteralCompareTypeof(Expression* expr,
}
PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
- if (check->Equals(isolate()->heap()->number_string())) {
+ Factory* factory = isolate()->factory();
+ if (String::Equals(check, factory->number_string())) {
__ JumpIfSmi(eax, if_true);
__ cmp(FieldOperand(eax, HeapObject::kMapOffset),
isolate()->factory()->heap_number_map());
Split(equal, if_true, if_false, fall_through);
- } else if (check->Equals(isolate()->heap()->string_string())) {
+ } else if (String::Equals(check, factory->string_string())) {
__ JumpIfSmi(eax, if_false);
__ CmpObjectType(eax, FIRST_NONSTRING_TYPE, edx);
__ j(above_equal, if_false);
@@ -4549,20 +4420,20 @@ void FullCodeGenerator::EmitLiteralCompareTypeof(Expression* expr,
__ test_b(FieldOperand(edx, Map::kBitFieldOffset),
1 << Map::kIsUndetectable);
Split(zero, if_true, if_false, fall_through);
- } else if (check->Equals(isolate()->heap()->symbol_string())) {
+ } else if (String::Equals(check, factory->symbol_string())) {
__ JumpIfSmi(eax, if_false);
__ CmpObjectType(eax, SYMBOL_TYPE, edx);
Split(equal, if_true, if_false, fall_through);
- } else if (check->Equals(isolate()->heap()->boolean_string())) {
+ } else if (String::Equals(check, factory->boolean_string())) {
__ cmp(eax, isolate()->factory()->true_value());
__ j(equal, if_true);
__ cmp(eax, isolate()->factory()->false_value());
Split(equal, if_true, if_false, fall_through);
} else if (FLAG_harmony_typeof &&
- check->Equals(isolate()->heap()->null_string())) {
+ String::Equals(check, factory->null_string())) {
__ cmp(eax, isolate()->factory()->null_value());
Split(equal, if_true, if_false, fall_through);
- } else if (check->Equals(isolate()->heap()->undefined_string())) {
+ } else if (String::Equals(check, factory->undefined_string())) {
__ cmp(eax, isolate()->factory()->undefined_value());
__ j(equal, if_true);
__ JumpIfSmi(eax, if_false);
@@ -4571,14 +4442,14 @@ void FullCodeGenerator::EmitLiteralCompareTypeof(Expression* expr,
__ movzx_b(ecx, FieldOperand(edx, Map::kBitFieldOffset));
__ test(ecx, Immediate(1 << Map::kIsUndetectable));
Split(not_zero, if_true, if_false, fall_through);
- } else if (check->Equals(isolate()->heap()->function_string())) {
+ } else if (String::Equals(check, factory->function_string())) {
__ JumpIfSmi(eax, if_false);
STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
__ CmpObjectType(eax, JS_FUNCTION_TYPE, edx);
__ j(equal, if_true);
__ CmpInstanceType(edx, JS_FUNCTION_PROXY_TYPE);
Split(equal, if_true, if_false, fall_through);
- } else if (check->Equals(isolate()->heap()->object_string())) {
+ } else if (String::Equals(check, factory->object_string())) {
__ JumpIfSmi(eax, if_false);
if (!FLAG_harmony_typeof) {
__ cmp(eax, isolate()->factory()->null_value());
@@ -4629,7 +4500,7 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
case Token::INSTANCEOF: {
VisitForStackValue(expr->right());
- InstanceofStub stub(InstanceofStub::kNoFlags);
+ InstanceofStub stub(isolate(), InstanceofStub::kNoFlags);
__ CallStub(&stub);
PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
__ test(eax, eax);
@@ -4658,7 +4529,7 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
// Record position and call the compare IC.
SetSourcePosition(expr->position());
Handle<Code> ic = CompareIC::GetUninitialized(isolate(), op);
- CallIC(ic, RelocInfo::CODE_TARGET, expr->CompareOperationFeedbackId());
+ CallIC(ic, expr->CompareOperationFeedbackId());
patch_site.EmitPatchInfo();
PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
@@ -4694,7 +4565,7 @@ void FullCodeGenerator::EmitLiteralCompareNil(CompareOperation* expr,
Split(equal, if_true, if_false, fall_through);
} else {
Handle<Code> ic = CompareNilICStub::GetUninitialized(isolate(), nil);
- CallIC(ic, RelocInfo::CODE_TARGET, expr->CompareOperationFeedbackId());
+ CallIC(ic, expr->CompareOperationFeedbackId());
__ test(eax, eax);
Split(not_zero, if_true, if_false, fall_through);
}
@@ -4847,9 +4718,11 @@ FullCodeGenerator::NestedStatement* FullCodeGenerator::TryFinally::Exit(
static const byte kJnsInstruction = 0x79;
static const byte kJnsOffset = 0x11;
-static const byte kCallInstruction = 0xe8;
static const byte kNopByteOne = 0x66;
static const byte kNopByteTwo = 0x90;
+#ifdef DEBUG
+static const byte kCallInstruction = 0xe8;
+#endif
void BackEdgeTable::PatchAt(Code* unoptimized_code,
@@ -4882,6 +4755,7 @@ void BackEdgeTable::PatchAt(Code* unoptimized_code,
}
Assembler::set_target_address_at(call_target_address,
+ unoptimized_code,
replacement_code->entry());
unoptimized_code->GetHeap()->incremental_marking()->RecordCodeTargetPatch(
unoptimized_code, call_target_address, replacement_code);
@@ -4899,20 +4773,22 @@ BackEdgeTable::BackEdgeState BackEdgeTable::GetBackEdgeState(
if (*jns_instr_address == kJnsInstruction) {
ASSERT_EQ(kJnsOffset, *(call_target_address - 2));
ASSERT_EQ(isolate->builtins()->InterruptCheck()->entry(),
- Assembler::target_address_at(call_target_address));
+ Assembler::target_address_at(call_target_address,
+ unoptimized_code));
return INTERRUPT;
}
ASSERT_EQ(kNopByteOne, *jns_instr_address);
ASSERT_EQ(kNopByteTwo, *(call_target_address - 2));
- if (Assembler::target_address_at(call_target_address) ==
+ if (Assembler::target_address_at(call_target_address, unoptimized_code) ==
isolate->builtins()->OnStackReplacement()->entry()) {
return ON_STACK_REPLACEMENT;
}
ASSERT_EQ(isolate->builtins()->OsrAfterStackCheck()->entry(),
- Assembler::target_address_at(call_target_address));
+ Assembler::target_address_at(call_target_address,
+ unoptimized_code));
return OSR_AFTER_STACK_CHECK;
}
diff --git a/chromium/v8/src/ia32/ic-ia32.cc b/chromium/v8/src/ia32/ic-ia32.cc
index 2973beb3e46..b0e4ca0c60c 100644
--- a/chromium/v8/src/ia32/ic-ia32.cc
+++ b/chromium/v8/src/ia32/ic-ia32.cc
@@ -1,38 +1,15 @@
// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
#if V8_TARGET_ARCH_IA32
-#include "codegen.h"
-#include "ic-inl.h"
-#include "runtime.h"
-#include "stub-cache.h"
+#include "src/codegen.h"
+#include "src/ic-inl.h"
+#include "src/runtime.h"
+#include "src/stub-cache.h"
namespace v8 {
namespace internal {
@@ -351,7 +328,7 @@ static Operand GenerateMappedArgumentsLookup(MacroAssembler* masm,
__ j(not_zero, slow_case);
// Load the elements into scratch1 and check its map.
- Handle<Map> arguments_map(heap->non_strict_arguments_elements_map());
+ Handle<Map> arguments_map(heap->sloppy_arguments_elements_map());
__ mov(scratch1, FieldOperand(object, JSObject::kElementsOffset));
__ CheckMap(scratch1, arguments_map, slow_case, DONT_DO_SMI_CHECK);
@@ -657,7 +634,7 @@ void KeyedLoadIC::GenerateIndexedInterceptor(MacroAssembler* masm) {
}
-void KeyedLoadIC::GenerateNonStrictArguments(MacroAssembler* masm) {
+void KeyedLoadIC::GenerateSloppyArguments(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- ecx : key
// -- edx : receiver
@@ -682,7 +659,7 @@ void KeyedLoadIC::GenerateNonStrictArguments(MacroAssembler* masm) {
}
-void KeyedStoreIC::GenerateNonStrictArguments(MacroAssembler* masm) {
+void KeyedStoreIC::GenerateSloppyArguments(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- eax : value
// -- ecx : key
@@ -738,7 +715,7 @@ static void KeyedStoreGenerateGenericHelper(
// We have to go to the runtime if the current value is the hole because
// there may be a callback on the element
Label holecheck_passed1;
- __ cmp(CodeGenerator::FixedArrayElementOperand(ebx, ecx),
+ __ cmp(FixedArrayElementOperand(ebx, ecx),
masm->isolate()->factory()->the_hole_value());
__ j(not_equal, &holecheck_passed1);
__ JumpIfDictionaryInPrototypeChain(edx, ebx, edi, slow);
@@ -755,7 +732,7 @@ static void KeyedStoreGenerateGenericHelper(
Immediate(Smi::FromInt(1)));
}
// It's irrelevant whether array is smi-only or not when writing a smi.
- __ mov(CodeGenerator::FixedArrayElementOperand(ebx, ecx), eax);
+ __ mov(FixedArrayElementOperand(ebx, ecx), eax);
__ ret(0);
__ bind(&non_smi_value);
@@ -770,7 +747,7 @@ static void KeyedStoreGenerateGenericHelper(
__ add(FieldOperand(edx, JSArray::kLengthOffset),
Immediate(Smi::FromInt(1)));
}
- __ mov(CodeGenerator::FixedArrayElementOperand(ebx, ecx), eax);
+ __ mov(FixedArrayElementOperand(ebx, ecx), eax);
// Update write barrier for the elements array address.
__ mov(edx, eax); // Preserve the value which is returned.
__ RecordWriteArray(
@@ -798,7 +775,7 @@ static void KeyedStoreGenerateGenericHelper(
__ bind(&fast_double_without_map_check);
__ StoreNumberToDoubleElements(eax, ebx, ecx, edi, xmm0,
- &transition_double_elements, false);
+ &transition_double_elements);
if (increment_length == kIncrementLength) {
// Add 1 to receiver->length.
__ add(FieldOperand(edx, JSArray::kLengthOffset),
@@ -859,7 +836,7 @@ static void KeyedStoreGenerateGenericHelper(
void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm,
- StrictModeFlag strict_mode) {
+ StrictMode strict_mode) {
// ----------- S t a t e -------------
// -- eax : value
// -- ecx : key
@@ -947,377 +924,6 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm,
}
-// The generated code does not accept smi keys.
-// The generated code falls through if both probes miss.
-void CallICBase::GenerateMonomorphicCacheProbe(MacroAssembler* masm,
- int argc,
- Code::Kind kind,
- ExtraICState extra_state) {
- // ----------- S t a t e -------------
- // -- ecx : name
- // -- edx : receiver
- // -----------------------------------
- Label number, non_number, non_string, boolean, probe, miss;
-
- // Probe the stub cache.
- Code::Flags flags = Code::ComputeFlags(kind,
- MONOMORPHIC,
- extra_state,
- Code::NORMAL,
- argc);
- Isolate* isolate = masm->isolate();
- isolate->stub_cache()->GenerateProbe(masm, flags, edx, ecx, ebx, eax);
-
- // If the stub cache probing failed, the receiver might be a value.
- // For value objects, we use the map of the prototype objects for
- // the corresponding JSValue for the cache and that is what we need
- // to probe.
- //
- // Check for number.
- __ JumpIfSmi(edx, &number);
- __ CmpObjectType(edx, HEAP_NUMBER_TYPE, ebx);
- __ j(not_equal, &non_number);
- __ bind(&number);
- StubCompiler::GenerateLoadGlobalFunctionPrototype(
- masm, Context::NUMBER_FUNCTION_INDEX, edx);
- __ jmp(&probe);
-
- // Check for string.
- __ bind(&non_number);
- __ CmpInstanceType(ebx, FIRST_NONSTRING_TYPE);
- __ j(above_equal, &non_string);
- StubCompiler::GenerateLoadGlobalFunctionPrototype(
- masm, Context::STRING_FUNCTION_INDEX, edx);
- __ jmp(&probe);
-
- // Check for boolean.
- __ bind(&non_string);
- __ cmp(edx, isolate->factory()->true_value());
- __ j(equal, &boolean);
- __ cmp(edx, isolate->factory()->false_value());
- __ j(not_equal, &miss);
- __ bind(&boolean);
- StubCompiler::GenerateLoadGlobalFunctionPrototype(
- masm, Context::BOOLEAN_FUNCTION_INDEX, edx);
-
- // Probe the stub cache for the value object.
- __ bind(&probe);
- isolate->stub_cache()->GenerateProbe(masm, flags, edx, ecx, ebx, no_reg);
- __ bind(&miss);
-}
-
-
-static void GenerateFunctionTailCall(MacroAssembler* masm,
- int argc,
- Label* miss) {
- // ----------- S t a t e -------------
- // -- ecx : name
- // -- edi : function
- // -- esp[0] : return address
- // -- esp[(argc - n) * 4] : arg[n] (zero-based)
- // -- ...
- // -- esp[(argc + 1) * 4] : receiver
- // -----------------------------------
-
- // Check that the result is not a smi.
- __ JumpIfSmi(edi, miss);
-
- // Check that the value is a JavaScript function, fetching its map into eax.
- __ CmpObjectType(edi, JS_FUNCTION_TYPE, eax);
- __ j(not_equal, miss);
-
- // Invoke the function.
- ParameterCount actual(argc);
- __ InvokeFunction(edi, actual, JUMP_FUNCTION,
- NullCallWrapper(), CALL_AS_METHOD);
-}
-
-
-// The generated code falls through if the call should be handled by runtime.
-void CallICBase::GenerateNormal(MacroAssembler* masm, int argc) {
- // ----------- S t a t e -------------
- // -- ecx : name
- // -- esp[0] : return address
- // -- esp[(argc - n) * 4] : arg[n] (zero-based)
- // -- ...
- // -- esp[(argc + 1) * 4] : receiver
- // -----------------------------------
- Label miss;
-
- // Get the receiver of the function from the stack; 1 ~ return address.
- __ mov(edx, Operand(esp, (argc + 1) * kPointerSize));
-
- GenerateNameDictionaryReceiverCheck(masm, edx, eax, ebx, &miss);
-
- // eax: elements
- // Search the dictionary placing the result in edi.
- GenerateDictionaryLoad(masm, &miss, eax, ecx, edi, ebx, edi);
- GenerateFunctionTailCall(masm, argc, &miss);
-
- __ bind(&miss);
-}
-
-
-void CallICBase::GenerateMiss(MacroAssembler* masm,
- int argc,
- IC::UtilityId id,
- ExtraICState extra_state) {
- // ----------- S t a t e -------------
- // -- ecx : name
- // -- esp[0] : return address
- // -- esp[(argc - n) * 4] : arg[n] (zero-based)
- // -- ...
- // -- esp[(argc + 1) * 4] : receiver
- // -----------------------------------
-
- Counters* counters = masm->isolate()->counters();
- if (id == IC::kCallIC_Miss) {
- __ IncrementCounter(counters->call_miss(), 1);
- } else {
- __ IncrementCounter(counters->keyed_call_miss(), 1);
- }
-
- // Get the receiver of the function from the stack; 1 ~ return address.
- __ mov(edx, Operand(esp, (argc + 1) * kPointerSize));
-
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
-
- // Push the receiver and the name of the function.
- __ push(edx);
- __ push(ecx);
-
- // Call the entry.
- CEntryStub stub(1);
- __ mov(eax, Immediate(2));
- __ mov(ebx, Immediate(ExternalReference(IC_Utility(id), masm->isolate())));
- __ CallStub(&stub);
-
- // Move result to edi and exit the internal frame.
- __ mov(edi, eax);
- }
-
- // Check if the receiver is a global object of some sort.
- // This can happen only for regular CallIC but not KeyedCallIC.
- if (id == IC::kCallIC_Miss) {
- Label invoke, global;
- __ mov(edx, Operand(esp, (argc + 1) * kPointerSize)); // receiver
- __ JumpIfSmi(edx, &invoke, Label::kNear);
- __ mov(ebx, FieldOperand(edx, HeapObject::kMapOffset));
- __ movzx_b(ebx, FieldOperand(ebx, Map::kInstanceTypeOffset));
- __ cmp(ebx, JS_GLOBAL_OBJECT_TYPE);
- __ j(equal, &global, Label::kNear);
- __ cmp(ebx, JS_BUILTINS_OBJECT_TYPE);
- __ j(not_equal, &invoke, Label::kNear);
-
- // Patch the receiver on the stack.
- __ bind(&global);
- __ mov(edx, FieldOperand(edx, GlobalObject::kGlobalReceiverOffset));
- __ mov(Operand(esp, (argc + 1) * kPointerSize), edx);
- __ bind(&invoke);
- }
-
- // Invoke the function.
- CallKind call_kind = CallICBase::Contextual::decode(extra_state)
- ? CALL_AS_FUNCTION
- : CALL_AS_METHOD;
- ParameterCount actual(argc);
- __ InvokeFunction(edi,
- actual,
- JUMP_FUNCTION,
- NullCallWrapper(),
- call_kind);
-}
-
-
-void CallIC::GenerateMegamorphic(MacroAssembler* masm,
- int argc,
- ExtraICState extra_state) {
- // ----------- S t a t e -------------
- // -- ecx : name
- // -- esp[0] : return address
- // -- esp[(argc - n) * 4] : arg[n] (zero-based)
- // -- ...
- // -- esp[(argc + 1) * 4] : receiver
- // -----------------------------------
-
- // Get the receiver of the function from the stack; 1 ~ return address.
- __ mov(edx, Operand(esp, (argc + 1) * kPointerSize));
- CallICBase::GenerateMonomorphicCacheProbe(masm, argc, Code::CALL_IC,
- extra_state);
-
- GenerateMiss(masm, argc, extra_state);
-}
-
-
-void KeyedCallIC::GenerateMegamorphic(MacroAssembler* masm, int argc) {
- // ----------- S t a t e -------------
- // -- ecx : name
- // -- esp[0] : return address
- // -- esp[(argc - n) * 4] : arg[n] (zero-based)
- // -- ...
- // -- esp[(argc + 1) * 4] : receiver
- // -----------------------------------
-
- // Get the receiver of the function from the stack; 1 ~ return address.
- __ mov(edx, Operand(esp, (argc + 1) * kPointerSize));
-
- Label do_call, slow_call, slow_load, slow_reload_receiver;
- Label check_number_dictionary, check_name, lookup_monomorphic_cache;
- Label index_smi, index_name;
-
- // Check that the key is a smi.
- __ JumpIfNotSmi(ecx, &check_name);
-
- __ bind(&index_smi);
- // Now the key is known to be a smi. This place is also jumped to from
- // where a numeric string is converted to a smi.
-
- GenerateKeyedLoadReceiverCheck(
- masm, edx, eax, Map::kHasIndexedInterceptor, &slow_call);
-
- GenerateFastArrayLoad(
- masm, edx, ecx, eax, edi, &check_number_dictionary, &slow_load);
- Isolate* isolate = masm->isolate();
- Counters* counters = isolate->counters();
- __ IncrementCounter(counters->keyed_call_generic_smi_fast(), 1);
-
- __ bind(&do_call);
- // receiver in edx is not used after this point.
- // ecx: key
- // edi: function
- GenerateFunctionTailCall(masm, argc, &slow_call);
-
- __ bind(&check_number_dictionary);
- // eax: elements
- // ecx: smi key
- // Check whether the elements is a number dictionary.
- __ CheckMap(eax,
- isolate->factory()->hash_table_map(),
- &slow_load,
- DONT_DO_SMI_CHECK);
- __ mov(ebx, ecx);
- __ SmiUntag(ebx);
- // ebx: untagged index
- // Receiver in edx will be clobbered, need to reload it on miss.
- __ LoadFromNumberDictionary(
- &slow_reload_receiver, eax, ecx, ebx, edx, edi, edi);
- __ IncrementCounter(counters->keyed_call_generic_smi_dict(), 1);
- __ jmp(&do_call);
-
- __ bind(&slow_reload_receiver);
- __ mov(edx, Operand(esp, (argc + 1) * kPointerSize));
-
- __ bind(&slow_load);
- // This branch is taken when calling KeyedCallIC_Miss is neither required
- // nor beneficial.
- __ IncrementCounter(counters->keyed_call_generic_slow_load(), 1);
-
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ push(ecx); // save the key
- __ push(edx); // pass the receiver
- __ push(ecx); // pass the key
- __ CallRuntime(Runtime::kKeyedGetProperty, 2);
- __ pop(ecx); // restore the key
- // Leave the internal frame.
- }
-
- __ mov(edi, eax);
- __ jmp(&do_call);
-
- __ bind(&check_name);
- GenerateKeyNameCheck(masm, ecx, eax, ebx, &index_name, &slow_call);
-
- // The key is known to be a unique name.
- // If the receiver is a regular JS object with slow properties then do
- // a quick inline probe of the receiver's dictionary.
- // Otherwise do the monomorphic cache probe.
- GenerateKeyedLoadReceiverCheck(
- masm, edx, eax, Map::kHasNamedInterceptor, &lookup_monomorphic_cache);
-
- __ mov(ebx, FieldOperand(edx, JSObject::kPropertiesOffset));
- __ CheckMap(ebx,
- isolate->factory()->hash_table_map(),
- &lookup_monomorphic_cache,
- DONT_DO_SMI_CHECK);
-
- GenerateDictionaryLoad(masm, &slow_load, ebx, ecx, eax, edi, edi);
- __ IncrementCounter(counters->keyed_call_generic_lookup_dict(), 1);
- __ jmp(&do_call);
-
- __ bind(&lookup_monomorphic_cache);
- __ IncrementCounter(counters->keyed_call_generic_lookup_cache(), 1);
- CallICBase::GenerateMonomorphicCacheProbe(masm, argc, Code::KEYED_CALL_IC,
- kNoExtraICState);
- // Fall through on miss.
-
- __ bind(&slow_call);
- // This branch is taken if:
- // - the receiver requires boxing or access check,
- // - the key is neither smi nor a unique name,
- // - the value loaded is not a function,
- // - there is hope that the runtime will create a monomorphic call stub
- // that will get fetched next time.
- __ IncrementCounter(counters->keyed_call_generic_slow(), 1);
- GenerateMiss(masm, argc);
-
- __ bind(&index_name);
- __ IndexFromHash(ebx, ecx);
- // Now jump to the place where smi keys are handled.
- __ jmp(&index_smi);
-}
-
-
-void KeyedCallIC::GenerateNonStrictArguments(MacroAssembler* masm,
- int argc) {
- // ----------- S t a t e -------------
- // -- ecx : name
- // -- esp[0] : return address
- // -- esp[(argc - n) * 4] : arg[n] (zero-based)
- // -- ...
- // -- esp[(argc + 1) * 4] : receiver
- // -----------------------------------
- Label slow, notin;
- Factory* factory = masm->isolate()->factory();
- __ mov(edx, Operand(esp, (argc + 1) * kPointerSize));
- Operand mapped_location =
- GenerateMappedArgumentsLookup(masm, edx, ecx, ebx, eax, &notin, &slow);
- __ mov(edi, mapped_location);
- GenerateFunctionTailCall(masm, argc, &slow);
- __ bind(&notin);
- // The unmapped lookup expects that the parameter map is in ebx.
- Operand unmapped_location =
- GenerateUnmappedArgumentsLookup(masm, ecx, ebx, eax, &slow);
- __ cmp(unmapped_location, factory->the_hole_value());
- __ j(equal, &slow);
- __ mov(edi, unmapped_location);
- GenerateFunctionTailCall(masm, argc, &slow);
- __ bind(&slow);
- GenerateMiss(masm, argc);
-}
-
-
-void KeyedCallIC::GenerateNormal(MacroAssembler* masm, int argc) {
- // ----------- S t a t e -------------
- // -- ecx : name
- // -- esp[0] : return address
- // -- esp[(argc - n) * 4] : arg[n] (zero-based)
- // -- ...
- // -- esp[(argc + 1) * 4] : receiver
- // -----------------------------------
-
- // Check if the name is really a name.
- Label miss;
- __ JumpIfSmi(ecx, &miss);
- Condition cond = masm->IsObjectNameType(ecx, eax, eax);
- __ j(NegateCondition(cond), &miss);
- CallICBase::GenerateNormal(masm, argc);
- __ bind(&miss);
- GenerateMiss(masm, argc);
-}
-
-
void LoadIC::GenerateMegamorphic(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- ecx : name
@@ -1326,9 +932,7 @@ void LoadIC::GenerateMegamorphic(MacroAssembler* masm) {
// -----------------------------------
// Probe the stub cache.
- Code::Flags flags = Code::ComputeFlags(
- Code::HANDLER, MONOMORPHIC, kNoExtraICState,
- Code::NORMAL, Code::LOAD_IC);
+ Code::Flags flags = Code::ComputeHandlerFlags(Code::LOAD_IC);
masm->isolate()->stub_cache()->GenerateProbe(
masm, flags, edx, ecx, ebx, eax);
@@ -1343,15 +947,19 @@ void LoadIC::GenerateNormal(MacroAssembler* masm) {
// -- edx : receiver
// -- esp[0] : return address
// -----------------------------------
- Label miss;
+ Label miss, slow;
GenerateNameDictionaryReceiverCheck(masm, edx, eax, ebx, &miss);
// eax: elements
// Search the dictionary placing the result in eax.
- GenerateDictionaryLoad(masm, &miss, eax, ecx, edi, ebx, eax);
+ GenerateDictionaryLoad(masm, &slow, eax, ecx, edi, ebx, eax);
__ ret(0);
+ // Dictionary load failed, go slow (but don't miss).
+ __ bind(&slow);
+ GenerateRuntimeGetProperty(masm);
+
// Cache miss: Jump to runtime.
__ bind(&miss);
GenerateMiss(masm);
@@ -1434,17 +1042,14 @@ void KeyedLoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
}
-void StoreIC::GenerateMegamorphic(MacroAssembler* masm,
- ExtraICState extra_ic_state) {
+void StoreIC::GenerateMegamorphic(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- eax : value
// -- ecx : name
// -- edx : receiver
// -- esp[0] : return address
// -----------------------------------
- Code::Flags flags = Code::ComputeFlags(
- Code::HANDLER, MONOMORPHIC, extra_ic_state,
- Code::NORMAL, Code::STORE_IC);
+ Code::Flags flags = Code::ComputeHandlerFlags(Code::STORE_IC);
masm->isolate()->stub_cache()->GenerateProbe(
masm, flags, edx, ecx, ebx, no_reg);
@@ -1506,7 +1111,7 @@ void StoreIC::GenerateNormal(MacroAssembler* masm) {
void StoreIC::GenerateRuntimeSetProperty(MacroAssembler* masm,
- StrictModeFlag strict_mode) {
+ StrictMode strict_mode) {
// ----------- S t a t e -------------
// -- eax : value
// -- ecx : name
@@ -1527,7 +1132,7 @@ void StoreIC::GenerateRuntimeSetProperty(MacroAssembler* masm,
void KeyedStoreIC::GenerateRuntimeSetProperty(MacroAssembler* masm,
- StrictModeFlag strict_mode) {
+ StrictMode strict_mode) {
// ----------- S t a t e -------------
// -- eax : value
// -- ecx : key
@@ -1658,7 +1263,7 @@ void PatchInlinedSmiCode(Address address, InlinedSmiCheck check) {
Address delta_address = test_instruction_address + 1;
// The delta to the start of the map check instruction and the
// condition code uses at the patched jump.
- int8_t delta = *reinterpret_cast<int8_t*>(delta_address);
+ uint8_t delta = *reinterpret_cast<uint8_t*>(delta_address);
if (FLAG_trace_ic) {
PrintF("[ patching ic at %p, test=%p, delta=%d\n",
address, test_instruction_address, delta);
diff --git a/chromium/v8/src/ia32/lithium-codegen-ia32.cc b/chromium/v8/src/ia32/lithium-codegen-ia32.cc
index 80369516dc8..1e4f7561be9 100644
--- a/chromium/v8/src/ia32/lithium-codegen-ia32.cc
+++ b/chromium/v8/src/ia32/lithium-codegen-ia32.cc
@@ -1,52 +1,22 @@
// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
#if V8_TARGET_ARCH_IA32
-#include "ia32/lithium-codegen-ia32.h"
-#include "ic.h"
-#include "code-stubs.h"
-#include "deoptimizer.h"
-#include "stub-cache.h"
-#include "codegen.h"
-#include "hydrogen-osr.h"
+#include "src/ia32/lithium-codegen-ia32.h"
+#include "src/ic.h"
+#include "src/code-stubs.h"
+#include "src/deoptimizer.h"
+#include "src/stub-cache.h"
+#include "src/codegen.h"
+#include "src/hydrogen-osr.h"
namespace v8 {
namespace internal {
-
-static SaveFPRegsMode GetSaveFPRegsMode() {
- // We don't need to save floating point regs when generating the snapshot
- return CpuFeatures::IsSafeForSnapshot(SSE2) ? kSaveFPRegs : kDontSaveFPRegs;
-}
-
-
// When invoking builtins, we need to record the safepoint in the middle of
// the invoke instruction sequence generated by the macro assembler.
class SafepointGenerator V8_FINAL : public CallWrapper {
@@ -103,20 +73,11 @@ void LCodeGen::FinishCode(Handle<Code> code) {
ASSERT(is_done());
code->set_stack_slots(GetStackSlotCount());
code->set_safepoint_table_offset(safepoints_.GetCodeOffset());
- if (FLAG_weak_embedded_maps_in_optimized_code) {
- RegisterDependentCodeForEmbeddedMaps(code);
- }
+ if (code->is_optimized_code()) RegisterWeakObjectsInOptimizedCode(code);
PopulateDeoptimizationData(code);
if (!info()->IsStub()) {
Deoptimizer::EnsureRelocSpaceForLazyDeoptimization(code);
}
- info()->CommitDependencies(code);
-}
-
-
-void LCodeGen::Abort(BailoutReason reason) {
- info()->set_bailout_reason(reason);
- status_ = ABORTED;
}
@@ -134,7 +95,6 @@ void LCodeGen::SaveCallerDoubles() {
ASSERT(info()->saves_caller_doubles());
ASSERT(NeedsEagerFrame());
Comment(";;; Save clobbered callee double registers");
- CpuFeatureScope scope(masm(), SSE2);
int count = 0;
BitVector* doubles = chunk()->allocated_double_registers();
BitVector::Iterator save_iterator(doubles);
@@ -151,7 +111,6 @@ void LCodeGen::RestoreCallerDoubles() {
ASSERT(info()->saves_caller_doubles());
ASSERT(NeedsEagerFrame());
Comment(";;; Restore clobbered callee double registers");
- CpuFeatureScope scope(masm(), SSE2);
BitVector* doubles = chunk()->allocated_double_registers();
BitVector::Iterator save_iterator(doubles);
int count = 0;
@@ -177,24 +136,31 @@ bool LCodeGen::GeneratePrologue() {
}
#endif
- // Strict mode functions and builtins need to replace the receiver
- // with undefined when called as functions (without an explicit
- // receiver object). ecx is zero for method calls and non-zero for
- // function calls.
- if (!info_->is_classic_mode() || info_->is_native()) {
+ // Sloppy mode functions and builtins need to replace the receiver with the
+ // global proxy when called as functions (without an explicit receiver
+ // object).
+ if (info_->this_has_uses() &&
+ info_->strict_mode() == SLOPPY &&
+ !info_->is_native()) {
Label ok;
- __ test(ecx, Operand(ecx));
- __ j(zero, &ok, Label::kNear);
// +1 for return address.
int receiver_offset = (scope()->num_parameters() + 1) * kPointerSize;
- __ mov(Operand(esp, receiver_offset),
- Immediate(isolate()->factory()->undefined_value()));
+ __ mov(ecx, Operand(esp, receiver_offset));
+
+ __ cmp(ecx, isolate()->factory()->undefined_value());
+ __ j(not_equal, &ok, Label::kNear);
+
+ __ mov(ecx, GlobalObjectOperand());
+ __ mov(ecx, FieldOperand(ecx, GlobalObject::kGlobalReceiverOffset));
+
+ __ mov(Operand(esp, receiver_offset), ecx);
+
__ bind(&ok);
}
if (support_aligned_spilled_doubles_ && dynamic_frame_alignment_) {
// Move state of dynamic frame alignment into edx.
- __ Set(edx, Immediate(kNoAlignmentPadding));
+ __ Move(edx, Immediate(kNoAlignmentPadding));
Label do_not_pad, align_loop;
STATIC_ASSERT(kDoubleSize == 2 * kPointerSize);
@@ -222,7 +188,11 @@ bool LCodeGen::GeneratePrologue() {
if (NeedsEagerFrame()) {
ASSERT(!frame_is_built_);
frame_is_built_ = true;
- __ Prologue(info()->IsStub() ? BUILD_STUB_FRAME : BUILD_FUNCTION_FRAME);
+ if (info()->IsStub()) {
+ __ StubPrologue();
+ } else {
+ __ Prologue(info()->IsCodePreAgingActive());
+ }
info()->AddNoFrameRange(0, masm_->pc_offset());
}
@@ -277,27 +247,29 @@ bool LCodeGen::GeneratePrologue() {
}
}
- if (info()->saves_caller_doubles() && CpuFeatures::IsSupported(SSE2)) {
- SaveCallerDoubles();
- }
+ if (info()->saves_caller_doubles()) SaveCallerDoubles();
}
// Possibly allocate a local context.
int heap_slots = info_->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
if (heap_slots > 0) {
Comment(";;; Allocate local context");
+ bool need_write_barrier = true;
// Argument to NewContext is the function, which is still in edi.
- __ push(edi);
if (heap_slots <= FastNewContextStub::kMaximumSlots) {
- FastNewContextStub stub(heap_slots);
+ FastNewContextStub stub(isolate(), heap_slots);
__ CallStub(&stub);
+ // Result of FastNewContextStub is always in new space.
+ need_write_barrier = false;
} else {
- __ CallRuntime(Runtime::kNewFunctionContext, 1);
+ __ push(edi);
+ __ CallRuntime(Runtime::kHiddenNewFunctionContext, 1);
}
RecordSafepoint(Safepoint::kNoLazyDeopt);
- // Context is returned in both eax and esi. It replaces the context
- // passed to us. It's saved in the stack and kept live in esi.
- __ mov(Operand(ebp, StandardFrameConstants::kContextOffset), esi);
+ // Context is returned in eax. It replaces the context passed to us.
+ // It's saved in the stack and kept live in esi.
+ __ mov(esi, eax);
+ __ mov(Operand(ebp, StandardFrameConstants::kContextOffset), eax);
// Copy parameters into context if necessary.
int num_parameters = scope()->num_parameters();
@@ -312,11 +284,18 @@ bool LCodeGen::GeneratePrologue() {
int context_offset = Context::SlotOffset(var->index());
__ mov(Operand(esi, context_offset), eax);
// Update the write barrier. This clobbers eax and ebx.
- __ RecordWriteContextSlot(esi,
- context_offset,
- eax,
- ebx,
- kDontSaveFPRegs);
+ if (need_write_barrier) {
+ __ RecordWriteContextSlot(esi,
+ context_offset,
+ eax,
+ ebx,
+ kDontSaveFPRegs);
+ } else if (FLAG_debug_code) {
+ Label done;
+ __ JumpIfInNewSpace(esi, eax, &done, Label::kNear);
+ __ Abort(kExpectedNewSpaceObject);
+ __ bind(&done);
+ }
}
}
Comment(";;; End allocate local context");
@@ -340,7 +319,7 @@ void LCodeGen::GenerateOsrPrologue() {
osr_pc_offset_ = masm()->pc_offset();
// Move state of dynamic frame alignment into edx.
- __ Set(edx, Immediate(kNoAlignmentPadding));
+ __ Move(edx, Immediate(kNoAlignmentPadding));
if (support_aligned_spilled_doubles_ && dynamic_frame_alignment_) {
Label do_not_pad, align_loop;
@@ -384,30 +363,16 @@ void LCodeGen::GenerateOsrPrologue() {
void LCodeGen::GenerateBodyInstructionPre(LInstruction* instr) {
+ if (instr->IsCall()) {
+ EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
+ }
if (!instr->IsLazyBailout() && !instr->IsGap()) {
safepoints_.BumpLastLazySafepointIndex();
}
- if (!CpuFeatures::IsSupported(SSE2)) FlushX87StackIfNecessary(instr);
}
-void LCodeGen::GenerateBodyInstructionPost(LInstruction* instr) {
- if (!CpuFeatures::IsSupported(SSE2)) {
- if (instr->IsGoto()) {
- x87_stack_.LeavingBlock(current_block_, LGoto::cast(instr));
- } else if (FLAG_debug_code && FLAG_enable_slow_asserts &&
- !instr->IsGap() && !instr->IsReturn()) {
- if (instr->ClobbersDoubleRegisters()) {
- if (instr->HasDoubleRegisterResult()) {
- ASSERT_EQ(1, x87_stack_.depth());
- } else {
- ASSERT_EQ(0, x87_stack_.depth());
- }
- }
- __ VerifyX87StackDepth(x87_stack_.depth());
- }
- }
-}
+void LCodeGen::GenerateBodyInstructionPost(LInstruction* instr) { }
bool LCodeGen::GenerateJumpTable() {
@@ -453,9 +418,7 @@ bool LCodeGen::GenerateJumpTable() {
__ ret(0); // Call the continuation without clobbering registers.
}
} else {
- if (info()->saves_caller_doubles() && CpuFeatures::IsSupported(SSE2)) {
- RestoreCallerDoubles();
- }
+ if (info()->saves_caller_doubles()) RestoreCallerDoubles();
__ call(entry, RelocInfo::RUNTIME_ENTRY);
}
}
@@ -468,12 +431,11 @@ bool LCodeGen::GenerateDeferredCode() {
if (deferred_.length() > 0) {
for (int i = 0; !is_aborted() && i < deferred_.length(); i++) {
LDeferredCode* code = deferred_[i];
- X87Stack copy(code->x87_stack());
- x87_stack_ = copy;
HValue* value =
instructions_->at(code->instruction_index())->hydrogen_value();
- RecordAndWritePosition(value->position());
+ RecordAndWritePosition(
+ chunk()->graph()->SourcePositionToScriptPosition(value->position()));
Comment(";;; <@%d,#%d> "
"-------------------- Deferred %s --------------------",
@@ -534,232 +496,17 @@ Register LCodeGen::ToRegister(int index) const {
}
-X87Register LCodeGen::ToX87Register(int index) const {
- return X87Register::FromAllocationIndex(index);
-}
-
-
XMMRegister LCodeGen::ToDoubleRegister(int index) const {
return XMMRegister::FromAllocationIndex(index);
}
-void LCodeGen::X87LoadForUsage(X87Register reg) {
- ASSERT(x87_stack_.Contains(reg));
- x87_stack_.Fxch(reg);
- x87_stack_.pop();
-}
-
-
-void LCodeGen::X87LoadForUsage(X87Register reg1, X87Register reg2) {
- ASSERT(x87_stack_.Contains(reg1));
- ASSERT(x87_stack_.Contains(reg2));
- x87_stack_.Fxch(reg1, 1);
- x87_stack_.Fxch(reg2);
- x87_stack_.pop();
- x87_stack_.pop();
-}
-
-
-void LCodeGen::X87Stack::Fxch(X87Register reg, int other_slot) {
- ASSERT(is_mutable_);
- ASSERT(Contains(reg) && stack_depth_ > other_slot);
- int i = ArrayIndex(reg);
- int st = st2idx(i);
- if (st != other_slot) {
- int other_i = st2idx(other_slot);
- X87Register other = stack_[other_i];
- stack_[other_i] = reg;
- stack_[i] = other;
- if (st == 0) {
- __ fxch(other_slot);
- } else if (other_slot == 0) {
- __ fxch(st);
- } else {
- __ fxch(st);
- __ fxch(other_slot);
- __ fxch(st);
- }
- }
-}
-
-
-int LCodeGen::X87Stack::st2idx(int pos) {
- return stack_depth_ - pos - 1;
-}
-
-
-int LCodeGen::X87Stack::ArrayIndex(X87Register reg) {
- for (int i = 0; i < stack_depth_; i++) {
- if (stack_[i].is(reg)) return i;
- }
- UNREACHABLE();
- return -1;
-}
-
-
-bool LCodeGen::X87Stack::Contains(X87Register reg) {
- for (int i = 0; i < stack_depth_; i++) {
- if (stack_[i].is(reg)) return true;
- }
- return false;
-}
-
-
-void LCodeGen::X87Stack::Free(X87Register reg) {
- ASSERT(is_mutable_);
- ASSERT(Contains(reg));
- int i = ArrayIndex(reg);
- int st = st2idx(i);
- if (st > 0) {
- // keep track of how fstp(i) changes the order of elements
- int tos_i = st2idx(0);
- stack_[i] = stack_[tos_i];
- }
- pop();
- __ fstp(st);
-}
-
-
-void LCodeGen::X87Mov(X87Register dst, Operand src, X87OperandType opts) {
- if (x87_stack_.Contains(dst)) {
- x87_stack_.Fxch(dst);
- __ fstp(0);
- } else {
- x87_stack_.push(dst);
- }
- X87Fld(src, opts);
-}
-
-
-void LCodeGen::X87Fld(Operand src, X87OperandType opts) {
- ASSERT(!src.is_reg_only());
- switch (opts) {
- case kX87DoubleOperand:
- __ fld_d(src);
- break;
- case kX87FloatOperand:
- __ fld_s(src);
- break;
- case kX87IntOperand:
- __ fild_s(src);
- break;
- default:
- UNREACHABLE();
- }
-}
-
-
-void LCodeGen::X87Mov(Operand dst, X87Register src, X87OperandType opts) {
- ASSERT(!dst.is_reg_only());
- x87_stack_.Fxch(src);
- switch (opts) {
- case kX87DoubleOperand:
- __ fst_d(dst);
- break;
- case kX87IntOperand:
- __ fist_s(dst);
- break;
- default:
- UNREACHABLE();
- }
-}
-
-
-void LCodeGen::X87Stack::PrepareToWrite(X87Register reg) {
- ASSERT(is_mutable_);
- if (Contains(reg)) {
- Free(reg);
- }
- // Mark this register as the next register to write to
- stack_[stack_depth_] = reg;
-}
-
-
-void LCodeGen::X87Stack::CommitWrite(X87Register reg) {
- ASSERT(is_mutable_);
- // Assert the reg is prepared to write, but not on the virtual stack yet
- ASSERT(!Contains(reg) && stack_[stack_depth_].is(reg) &&
- stack_depth_ < X87Register::kNumAllocatableRegisters);
- stack_depth_++;
-}
-
-
-void LCodeGen::X87PrepareBinaryOp(
- X87Register left, X87Register right, X87Register result) {
- // You need to use DefineSameAsFirst for x87 instructions
- ASSERT(result.is(left));
- x87_stack_.Fxch(right, 1);
- x87_stack_.Fxch(left);
-}
-
-
-void LCodeGen::X87Stack::FlushIfNecessary(LInstruction* instr, LCodeGen* cgen) {
- if (stack_depth_ > 0 && instr->ClobbersDoubleRegisters()) {
- bool double_inputs = instr->HasDoubleRegisterInput();
-
- // Flush stack from tos down, since FreeX87() will mess with tos
- for (int i = stack_depth_-1; i >= 0; i--) {
- X87Register reg = stack_[i];
- // Skip registers which contain the inputs for the next instruction
- // when flushing the stack
- if (double_inputs && instr->IsDoubleInput(reg, cgen)) {
- continue;
- }
- Free(reg);
- if (i < stack_depth_-1) i++;
- }
- }
- if (instr->IsReturn()) {
- while (stack_depth_ > 0) {
- __ fstp(0);
- stack_depth_--;
- }
- if (FLAG_debug_code && FLAG_enable_slow_asserts) __ VerifyX87StackDepth(0);
- }
-}
-
-
-void LCodeGen::X87Stack::LeavingBlock(int current_block_id, LGoto* goto_instr) {
- ASSERT(stack_depth_ <= 1);
- // If ever used for new stubs producing two pairs of doubles joined into two
- // phis this assert hits. That situation is not handled, since the two stacks
- // might have st0 and st1 swapped.
- if (current_block_id + 1 != goto_instr->block_id()) {
- // If we have a value on the x87 stack on leaving a block, it must be a
- // phi input. If the next block we compile is not the join block, we have
- // to discard the stack state.
- stack_depth_ = 0;
- }
-}
-
-
-void LCodeGen::EmitFlushX87ForDeopt() {
- // The deoptimizer does not support X87 Registers. But as long as we
- // deopt from a stub its not a problem, since we will re-materialize the
- // original stub inputs, which can't be double registers.
- ASSERT(info()->IsStub());
- if (FLAG_debug_code && FLAG_enable_slow_asserts) {
- __ pushfd();
- __ VerifyX87StackDepth(x87_stack_.depth());
- __ popfd();
- }
- for (int i = 0; i < x87_stack_.depth(); i++) __ fstp(0);
-}
-
-
Register LCodeGen::ToRegister(LOperand* op) const {
ASSERT(op->IsRegister());
return ToRegister(op->index());
}
-X87Register LCodeGen::ToX87Register(LOperand* op) const {
- ASSERT(op->IsDoubleRegister());
- return ToX87Register(op->index());
-}
-
-
XMMRegister LCodeGen::ToDoubleRegister(LOperand* op) const {
ASSERT(op->IsDoubleRegister());
return ToDoubleRegister(op->index());
@@ -948,10 +695,6 @@ void LCodeGen::AddToTranslation(LEnvironment* environment,
}
} else if (op->IsDoubleStackSlot()) {
translation->StoreDoubleStackSlot(op->index());
- } else if (op->IsArgument()) {
- ASSERT(is_tagged);
- int src_index = GetStackSlotCount() + op->index();
- translation->StoreStackSlot(src_index);
} else if (op->IsRegister()) {
Register reg = ToRegister(op);
if (is_tagged) {
@@ -1045,6 +788,7 @@ void LCodeGen::CallRuntimeFromDeferred(Runtime::FunctionId id,
void LCodeGen::RegisterEnvironmentForDeoptimization(
LEnvironment* environment, Safepoint::DeoptMode mode) {
+ environment->set_has_been_used();
if (!environment->HasBeenRegistered()) {
// Physical stack frame layout:
// -x ............. -4 0 ..................................... y
@@ -1114,17 +858,6 @@ void LCodeGen::DeoptimizeIf(Condition cc,
__ popfd();
}
- // Before Instructions which can deopt, we normally flush the x87 stack. But
- // we can have inputs or outputs of the current instruction on the stack,
- // thus we need to flush them here from the physical stack to leave it in a
- // consistent state.
- if (x87_stack_.depth() > 0) {
- Label done;
- if (cc != no_condition) __ j(NegateCondition(cc), &done, Label::kNear);
- EmitFlushX87ForDeopt();
- __ bind(&done);
- }
-
if (info()->ShouldTrapOnDeopt()) {
Label done;
if (cc != no_condition) __ j(NegateCondition(cc), &done, Label::kNear);
@@ -1165,46 +898,24 @@ void LCodeGen::DeoptimizeIf(Condition cc,
}
-void LCodeGen::RegisterDependentCodeForEmbeddedMaps(Handle<Code> code) {
- ZoneList<Handle<Map> > maps(1, zone());
- ZoneList<Handle<JSObject> > objects(1, zone());
- int mode_mask = RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT);
- for (RelocIterator it(*code, mode_mask); !it.done(); it.next()) {
- if (Code::IsWeakEmbeddedObject(code->kind(), it.rinfo()->target_object())) {
- if (it.rinfo()->target_object()->IsMap()) {
- Handle<Map> map(Map::cast(it.rinfo()->target_object()));
- maps.Add(map, zone());
- } else if (it.rinfo()->target_object()->IsJSObject()) {
- Handle<JSObject> object(JSObject::cast(it.rinfo()->target_object()));
- objects.Add(object, zone());
- }
- }
- }
-#ifdef VERIFY_HEAP
- // This disables verification of weak embedded objects after full GC.
- // AddDependentCode can cause a GC, which would observe the state where
- // this code is not yet in the depended code lists of the embedded maps.
- NoWeakObjectVerificationScope disable_verification_of_embedded_objects;
-#endif
- for (int i = 0; i < maps.length(); i++) {
- maps.at(i)->AddDependentCode(DependentCode::kWeaklyEmbeddedGroup, code);
- }
- for (int i = 0; i < objects.length(); i++) {
- AddWeakObjectToCodeDependency(isolate()->heap(), objects.at(i), code);
- }
-}
-
-
void LCodeGen::PopulateDeoptimizationData(Handle<Code> code) {
int length = deoptimizations_.length();
if (length == 0) return;
Handle<DeoptimizationInputData> data =
- factory()->NewDeoptimizationInputData(length, TENURED);
+ DeoptimizationInputData::New(isolate(), length, TENURED);
Handle<ByteArray> translations =
translations_.CreateByteArray(isolate()->factory());
data->SetTranslationByteArray(*translations);
data->SetInlinedFunctionCount(Smi::FromInt(inlined_function_count_));
+ data->SetOptimizationId(Smi::FromInt(info_->optimization_id()));
+ if (info_->IsOptimizing()) {
+ // Reference to shared function info does not change between phases.
+ AllowDeferredHandleDereference allow_handle_dereference;
+ data->SetSharedFunctionInfo(*info_->shared_info());
+ } else {
+ data->SetSharedFunctionInfo(Smi::FromInt(0));
+ }
Handle<FixedArray> literals =
factory()->NewFixedArray(deoptimization_literals_.length(), TENURED);
@@ -1364,30 +1075,19 @@ void LCodeGen::DoCallStub(LCallStub* instr) {
ASSERT(ToRegister(instr->context()).is(esi));
ASSERT(ToRegister(instr->result()).is(eax));
switch (instr->hydrogen()->major_key()) {
- case CodeStub::RegExpConstructResult: {
- RegExpConstructResultStub stub;
- CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
- break;
- }
case CodeStub::RegExpExec: {
- RegExpExecStub stub;
- CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
+ RegExpExecStub stub(isolate());
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
break;
}
case CodeStub::SubString: {
- SubStringStub stub;
- CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
+ SubStringStub stub(isolate());
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
break;
}
case CodeStub::StringCompare: {
- StringCompareStub stub;
- CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
- break;
- }
- case CodeStub::TranscendentalCache: {
- TranscendentalCacheStub stub(instr->transcendental_type(),
- TranscendentalCacheStub::TAGGED);
- CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
+ StringCompareStub stub(isolate());
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
break;
}
default:
@@ -1401,301 +1101,377 @@ void LCodeGen::DoUnknownOSRValue(LUnknownOSRValue* instr) {
}
-void LCodeGen::DoModI(LModI* instr) {
+void LCodeGen::DoModByPowerOf2I(LModByPowerOf2I* instr) {
+ Register dividend = ToRegister(instr->dividend());
+ int32_t divisor = instr->divisor();
+ ASSERT(dividend.is(ToRegister(instr->result())));
+
+ // Theoretically, a variation of the branch-free code for integer division by
+ // a power of 2 (calculating the remainder via an additional multiplication
+ // (which gets simplified to an 'and') and subtraction) should be faster, and
+ // this is exactly what GCC and clang emit. Nevertheless, benchmarks seem to
+ // indicate that positive dividends are heavily favored, so the branching
+ // version performs better.
HMod* hmod = instr->hydrogen();
- HValue* left = hmod->left();
- HValue* right = hmod->right();
- if (hmod->HasPowerOf2Divisor()) {
- // TODO(svenpanne) We should really do the strength reduction on the
- // Hydrogen level.
- Register left_reg = ToRegister(instr->left());
- ASSERT(left_reg.is(ToRegister(instr->result())));
-
- // Note: The code below even works when right contains kMinInt.
- int32_t divisor = Abs(right->GetInteger32Constant());
-
- Label left_is_not_negative, done;
- if (left->CanBeNegative()) {
- __ test(left_reg, Operand(left_reg));
- __ j(not_sign, &left_is_not_negative, Label::kNear);
- __ neg(left_reg);
- __ and_(left_reg, divisor - 1);
- __ neg(left_reg);
- if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
- DeoptimizeIf(zero, instr->environment());
- }
- __ jmp(&done, Label::kNear);
+ int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1);
+ Label dividend_is_not_negative, done;
+ if (hmod->CheckFlag(HValue::kLeftCanBeNegative)) {
+ __ test(dividend, dividend);
+ __ j(not_sign, &dividend_is_not_negative, Label::kNear);
+ // Note that this is correct even for kMinInt operands.
+ __ neg(dividend);
+ __ and_(dividend, mask);
+ __ neg(dividend);
+ if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ DeoptimizeIf(zero, instr->environment());
}
+ __ jmp(&done, Label::kNear);
+ }
- __ bind(&left_is_not_negative);
- __ and_(left_reg, divisor - 1);
- __ bind(&done);
- } else {
- Register left_reg = ToRegister(instr->left());
- ASSERT(left_reg.is(eax));
- Register right_reg = ToRegister(instr->right());
- ASSERT(!right_reg.is(eax));
- ASSERT(!right_reg.is(edx));
- Register result_reg = ToRegister(instr->result());
- ASSERT(result_reg.is(edx));
+ __ bind(&dividend_is_not_negative);
+ __ and_(dividend, mask);
+ __ bind(&done);
+}
- Label done;
- // Check for x % 0, idiv would signal a divide error. We have to
- // deopt in this case because we can't return a NaN.
- if (right->CanBeZero()) {
- __ test(right_reg, Operand(right_reg));
- DeoptimizeIf(zero, instr->environment());
- }
- // Check for kMinInt % -1, idiv would signal a divide error. We
- // have to deopt if we care about -0, because we can't return that.
- if (left->RangeCanInclude(kMinInt) && right->RangeCanInclude(-1)) {
- Label no_overflow_possible;
- __ cmp(left_reg, kMinInt);
- __ j(not_equal, &no_overflow_possible, Label::kNear);
- __ cmp(right_reg, -1);
- if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
- DeoptimizeIf(equal, instr->environment());
- } else {
- __ j(not_equal, &no_overflow_possible, Label::kNear);
- __ Set(result_reg, Immediate(0));
- __ jmp(&done, Label::kNear);
- }
- __ bind(&no_overflow_possible);
- }
+void LCodeGen::DoModByConstI(LModByConstI* instr) {
+ Register dividend = ToRegister(instr->dividend());
+ int32_t divisor = instr->divisor();
+ ASSERT(ToRegister(instr->result()).is(eax));
- // Sign extend dividend in eax into edx:eax.
- __ cdq();
-
- // If we care about -0, test if the dividend is <0 and the result is 0.
- if (left->CanBeNegative() &&
- hmod->CanBeZero() &&
- hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
- Label positive_left;
- __ test(left_reg, Operand(left_reg));
- __ j(not_sign, &positive_left, Label::kNear);
- __ idiv(right_reg);
- __ test(result_reg, Operand(result_reg));
- DeoptimizeIf(zero, instr->environment());
+ if (divisor == 0) {
+ DeoptimizeIf(no_condition, instr->environment());
+ return;
+ }
+
+ __ TruncatingDiv(dividend, Abs(divisor));
+ __ imul(edx, edx, Abs(divisor));
+ __ mov(eax, dividend);
+ __ sub(eax, edx);
+
+ // Check for negative zero.
+ HMod* hmod = instr->hydrogen();
+ if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ Label remainder_not_zero;
+ __ j(not_zero, &remainder_not_zero, Label::kNear);
+ __ cmp(dividend, Immediate(0));
+ DeoptimizeIf(less, instr->environment());
+ __ bind(&remainder_not_zero);
+ }
+}
+
+
+void LCodeGen::DoModI(LModI* instr) {
+ HMod* hmod = instr->hydrogen();
+
+ Register left_reg = ToRegister(instr->left());
+ ASSERT(left_reg.is(eax));
+ Register right_reg = ToRegister(instr->right());
+ ASSERT(!right_reg.is(eax));
+ ASSERT(!right_reg.is(edx));
+ Register result_reg = ToRegister(instr->result());
+ ASSERT(result_reg.is(edx));
+
+ Label done;
+ // Check for x % 0, idiv would signal a divide error. We have to
+ // deopt in this case because we can't return a NaN.
+ if (hmod->CheckFlag(HValue::kCanBeDivByZero)) {
+ __ test(right_reg, Operand(right_reg));
+ DeoptimizeIf(zero, instr->environment());
+ }
+
+ // Check for kMinInt % -1, idiv would signal a divide error. We
+ // have to deopt if we care about -0, because we can't return that.
+ if (hmod->CheckFlag(HValue::kCanOverflow)) {
+ Label no_overflow_possible;
+ __ cmp(left_reg, kMinInt);
+ __ j(not_equal, &no_overflow_possible, Label::kNear);
+ __ cmp(right_reg, -1);
+ if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ DeoptimizeIf(equal, instr->environment());
+ } else {
+ __ j(not_equal, &no_overflow_possible, Label::kNear);
+ __ Move(result_reg, Immediate(0));
__ jmp(&done, Label::kNear);
- __ bind(&positive_left);
}
+ __ bind(&no_overflow_possible);
+ }
+
+ // Sign extend dividend in eax into edx:eax.
+ __ cdq();
+
+ // If we care about -0, test if the dividend is <0 and the result is 0.
+ if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ Label positive_left;
+ __ test(left_reg, Operand(left_reg));
+ __ j(not_sign, &positive_left, Label::kNear);
__ idiv(right_reg);
- __ bind(&done);
+ __ test(result_reg, Operand(result_reg));
+ DeoptimizeIf(zero, instr->environment());
+ __ jmp(&done, Label::kNear);
+ __ bind(&positive_left);
}
+ __ idiv(right_reg);
+ __ bind(&done);
}
-void LCodeGen::DoDivI(LDivI* instr) {
- if (!instr->is_flooring() && instr->hydrogen()->HasPowerOf2Divisor()) {
- Register dividend = ToRegister(instr->left());
- int32_t divisor = instr->hydrogen()->right()->GetInteger32Constant();
- int32_t test_value = 0;
- int32_t power = 0;
-
- if (divisor > 0) {
- test_value = divisor - 1;
- power = WhichPowerOf2(divisor);
- } else {
- // Check for (0 / -x) that will produce negative zero.
- if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
- __ test(dividend, Operand(dividend));
- DeoptimizeIf(zero, instr->environment());
- }
- // Check for (kMinInt / -1).
- if (divisor == -1 && instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
- __ cmp(dividend, kMinInt);
- DeoptimizeIf(zero, instr->environment());
- }
- test_value = - divisor - 1;
- power = WhichPowerOf2(-divisor);
- }
+void LCodeGen::DoDivByPowerOf2I(LDivByPowerOf2I* instr) {
+ Register dividend = ToRegister(instr->dividend());
+ int32_t divisor = instr->divisor();
+ Register result = ToRegister(instr->result());
+ ASSERT(divisor == kMinInt || IsPowerOf2(Abs(divisor)));
+ ASSERT(!result.is(dividend));
+
+ // Check for (0 / -x) that will produce negative zero.
+ HDiv* hdiv = instr->hydrogen();
+ if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
+ __ test(dividend, dividend);
+ DeoptimizeIf(zero, instr->environment());
+ }
+ // Check for (kMinInt / -1).
+ if (hdiv->CheckFlag(HValue::kCanOverflow) && divisor == -1) {
+ __ cmp(dividend, kMinInt);
+ DeoptimizeIf(zero, instr->environment());
+ }
+ // Deoptimize if remainder will not be 0.
+ if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32) &&
+ divisor != 1 && divisor != -1) {
+ int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1);
+ __ test(dividend, Immediate(mask));
+ DeoptimizeIf(not_zero, instr->environment());
+ }
+ __ Move(result, dividend);
+ int32_t shift = WhichPowerOf2Abs(divisor);
+ if (shift > 0) {
+ // The arithmetic shift is always OK, the 'if' is an optimization only.
+ if (shift > 1) __ sar(result, 31);
+ __ shr(result, 32 - shift);
+ __ add(result, dividend);
+ __ sar(result, shift);
+ }
+ if (divisor < 0) __ neg(result);
+}
- if (test_value != 0) {
- if (instr->hydrogen()->CheckFlag(
- HInstruction::kAllUsesTruncatingToInt32)) {
- Label done, negative;
- __ cmp(dividend, 0);
- __ j(less, &negative, Label::kNear);
- __ sar(dividend, power);
- if (divisor < 0) __ neg(dividend);
- __ jmp(&done, Label::kNear);
-
- __ bind(&negative);
- __ neg(dividend);
- __ sar(dividend, power);
- if (divisor > 0) __ neg(dividend);
- __ bind(&done);
- return; // Don't fall through to "__ neg" below.
- } else {
- // Deoptimize if remainder is not 0.
- __ test(dividend, Immediate(test_value));
- DeoptimizeIf(not_zero, instr->environment());
- __ sar(dividend, power);
- }
- }
- if (divisor < 0) __ neg(dividend);
+void LCodeGen::DoDivByConstI(LDivByConstI* instr) {
+ Register dividend = ToRegister(instr->dividend());
+ int32_t divisor = instr->divisor();
+ ASSERT(ToRegister(instr->result()).is(edx));
+ if (divisor == 0) {
+ DeoptimizeIf(no_condition, instr->environment());
return;
}
- LOperand* right = instr->right();
- ASSERT(ToRegister(instr->result()).is(eax));
- ASSERT(ToRegister(instr->left()).is(eax));
- ASSERT(!ToRegister(instr->right()).is(eax));
- ASSERT(!ToRegister(instr->right()).is(edx));
+ // Check for (0 / -x) that will produce negative zero.
+ HDiv* hdiv = instr->hydrogen();
+ if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
+ __ test(dividend, dividend);
+ DeoptimizeIf(zero, instr->environment());
+ }
+
+ __ TruncatingDiv(dividend, Abs(divisor));
+ if (divisor < 0) __ neg(edx);
+
+ if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32)) {
+ __ mov(eax, edx);
+ __ imul(eax, eax, divisor);
+ __ sub(eax, dividend);
+ DeoptimizeIf(not_equal, instr->environment());
+ }
+}
+
- Register left_reg = eax;
+// TODO(svenpanne) Refactor this to avoid code duplication with DoFlooringDivI.
+void LCodeGen::DoDivI(LDivI* instr) {
+ HBinaryOperation* hdiv = instr->hydrogen();
+ Register dividend = ToRegister(instr->dividend());
+ Register divisor = ToRegister(instr->divisor());
+ Register remainder = ToRegister(instr->temp());
+ ASSERT(dividend.is(eax));
+ ASSERT(remainder.is(edx));
+ ASSERT(ToRegister(instr->result()).is(eax));
+ ASSERT(!divisor.is(eax));
+ ASSERT(!divisor.is(edx));
// Check for x / 0.
- Register right_reg = ToRegister(right);
- if (instr->hydrogen_value()->CheckFlag(HValue::kCanBeDivByZero)) {
- __ test(right_reg, ToOperand(right));
+ if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) {
+ __ test(divisor, divisor);
DeoptimizeIf(zero, instr->environment());
}
// Check for (0 / -x) that will produce negative zero.
- if (instr->hydrogen_value()->CheckFlag(HValue::kBailoutOnMinusZero)) {
- Label left_not_zero;
- __ test(left_reg, Operand(left_reg));
- __ j(not_zero, &left_not_zero, Label::kNear);
- __ test(right_reg, ToOperand(right));
+ if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ Label dividend_not_zero;
+ __ test(dividend, dividend);
+ __ j(not_zero, &dividend_not_zero, Label::kNear);
+ __ test(divisor, divisor);
DeoptimizeIf(sign, instr->environment());
- __ bind(&left_not_zero);
+ __ bind(&dividend_not_zero);
}
// Check for (kMinInt / -1).
- if (instr->hydrogen_value()->CheckFlag(HValue::kCanOverflow)) {
- Label left_not_min_int;
- __ cmp(left_reg, kMinInt);
- __ j(not_zero, &left_not_min_int, Label::kNear);
- __ cmp(right_reg, -1);
+ if (hdiv->CheckFlag(HValue::kCanOverflow)) {
+ Label dividend_not_min_int;
+ __ cmp(dividend, kMinInt);
+ __ j(not_zero, &dividend_not_min_int, Label::kNear);
+ __ cmp(divisor, -1);
DeoptimizeIf(zero, instr->environment());
- __ bind(&left_not_min_int);
+ __ bind(&dividend_not_min_int);
}
- // Sign extend to edx.
+ // Sign extend to edx (= remainder).
__ cdq();
- __ idiv(right_reg);
+ __ idiv(divisor);
- if (instr->is_flooring()) {
- Label done;
- __ test(edx, edx);
- __ j(zero, &done, Label::kNear);
- __ xor_(edx, right_reg);
- __ sar(edx, 31);
- __ add(eax, edx);
- __ bind(&done);
- } else if (!instr->hydrogen()->CheckFlag(
- HInstruction::kAllUsesTruncatingToInt32)) {
+ if (!hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) {
// Deoptimize if remainder is not 0.
- __ test(edx, Operand(edx));
+ __ test(remainder, remainder);
DeoptimizeIf(not_zero, instr->environment());
}
}
-void LCodeGen::DoMathFloorOfDiv(LMathFloorOfDiv* instr) {
- ASSERT(instr->right()->IsConstantOperand());
-
- Register dividend = ToRegister(instr->left());
- int32_t divisor = ToInteger32(LConstantOperand::cast(instr->right()));
- Register result = ToRegister(instr->result());
+void LCodeGen::DoFlooringDivByPowerOf2I(LFlooringDivByPowerOf2I* instr) {
+ Register dividend = ToRegister(instr->dividend());
+ int32_t divisor = instr->divisor();
+ ASSERT(dividend.is(ToRegister(instr->result())));
- switch (divisor) {
- case 0:
- DeoptimizeIf(no_condition, instr->environment());
+ // If the divisor is positive, things are easy: There can be no deopts and we
+ // can simply do an arithmetic right shift.
+ if (divisor == 1) return;
+ int32_t shift = WhichPowerOf2Abs(divisor);
+ if (divisor > 1) {
+ __ sar(dividend, shift);
return;
+ }
- case 1:
- __ Move(result, dividend);
- return;
+ // If the divisor is negative, we have to negate and handle edge cases.
+ __ neg(dividend);
+ if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ DeoptimizeIf(zero, instr->environment());
+ }
- case -1:
- __ Move(result, dividend);
- __ neg(result);
- if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
- DeoptimizeIf(zero, instr->environment());
- }
- if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
+ // Dividing by -1 is basically negation, unless we overflow.
+ if (divisor == -1) {
+ if (instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) {
DeoptimizeIf(overflow, instr->environment());
}
return;
}
- uint32_t divisor_abs = abs(divisor);
- if (IsPowerOf2(divisor_abs)) {
- int32_t power = WhichPowerOf2(divisor_abs);
- if (divisor < 0) {
- // Input[dividend] is clobbered.
- // The sequence is tedious because neg(dividend) might overflow.
- __ mov(result, dividend);
- __ sar(dividend, 31);
- __ neg(result);
- if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
- DeoptimizeIf(zero, instr->environment());
- }
- __ shl(dividend, 32 - power);
- __ sar(result, power);
- __ not_(dividend);
- // Clear result.sign if dividend.sign is set.
- __ and_(result, dividend);
- } else {
- __ Move(result, dividend);
- __ sar(result, power);
- }
- } else {
- ASSERT(ToRegister(instr->left()).is(eax));
- ASSERT(ToRegister(instr->result()).is(edx));
- Register scratch = ToRegister(instr->temp());
-
- // Find b which: 2^b < divisor_abs < 2^(b+1).
- unsigned b = 31 - CompilerIntrinsics::CountLeadingZeros(divisor_abs);
- unsigned shift = 32 + b; // Precision +1bit (effectively).
- double multiplier_f =
- static_cast<double>(static_cast<uint64_t>(1) << shift) / divisor_abs;
- int64_t multiplier;
- if (multiplier_f - floor(multiplier_f) < 0.5) {
- multiplier = static_cast<int64_t>(floor(multiplier_f));
- } else {
- multiplier = static_cast<int64_t>(floor(multiplier_f)) + 1;
- }
- // The multiplier is a uint32.
- ASSERT(multiplier > 0 &&
- multiplier < (static_cast<int64_t>(1) << 32));
- __ mov(scratch, dividend);
- if (divisor < 0 &&
- instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
- __ test(dividend, dividend);
- DeoptimizeIf(zero, instr->environment());
- }
- __ mov(edx, static_cast<int32_t>(multiplier));
- __ imul(edx);
- if (static_cast<int32_t>(multiplier) < 0) {
- __ add(edx, scratch);
- }
- Register reg_lo = eax;
- Register reg_byte_scratch = scratch;
- if (!reg_byte_scratch.is_byte_register()) {
- __ xchg(reg_lo, reg_byte_scratch);
- reg_lo = scratch;
- reg_byte_scratch = eax;
- }
- if (divisor < 0) {
- __ xor_(reg_byte_scratch, reg_byte_scratch);
- __ cmp(reg_lo, 0x40000000);
- __ setcc(above, reg_byte_scratch);
- __ neg(edx);
- __ sub(edx, reg_byte_scratch);
- } else {
- __ xor_(reg_byte_scratch, reg_byte_scratch);
- __ cmp(reg_lo, 0xC0000000);
- __ setcc(above_equal, reg_byte_scratch);
- __ add(edx, reg_byte_scratch);
- }
- __ sar(edx, shift - 32);
+ // If the negation could not overflow, simply shifting is OK.
+ if (!instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) {
+ __ sar(dividend, shift);
+ return;
}
+
+ Label not_kmin_int, done;
+ __ j(no_overflow, &not_kmin_int, Label::kNear);
+ __ mov(dividend, Immediate(kMinInt / divisor));
+ __ jmp(&done, Label::kNear);
+ __ bind(&not_kmin_int);
+ __ sar(dividend, shift);
+ __ bind(&done);
+}
+
+
+void LCodeGen::DoFlooringDivByConstI(LFlooringDivByConstI* instr) {
+ Register dividend = ToRegister(instr->dividend());
+ int32_t divisor = instr->divisor();
+ ASSERT(ToRegister(instr->result()).is(edx));
+
+ if (divisor == 0) {
+ DeoptimizeIf(no_condition, instr->environment());
+ return;
+ }
+
+ // Check for (0 / -x) that will produce negative zero.
+ HMathFloorOfDiv* hdiv = instr->hydrogen();
+ if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
+ __ test(dividend, dividend);
+ DeoptimizeIf(zero, instr->environment());
+ }
+
+ // Easy case: We need no dynamic check for the dividend and the flooring
+ // division is the same as the truncating division.
+ if ((divisor > 0 && !hdiv->CheckFlag(HValue::kLeftCanBeNegative)) ||
+ (divisor < 0 && !hdiv->CheckFlag(HValue::kLeftCanBePositive))) {
+ __ TruncatingDiv(dividend, Abs(divisor));
+ if (divisor < 0) __ neg(edx);
+ return;
+ }
+
+ // In the general case we may need to adjust before and after the truncating
+ // division to get a flooring division.
+ Register temp = ToRegister(instr->temp3());
+ ASSERT(!temp.is(dividend) && !temp.is(eax) && !temp.is(edx));
+ Label needs_adjustment, done;
+ __ cmp(dividend, Immediate(0));
+ __ j(divisor > 0 ? less : greater, &needs_adjustment, Label::kNear);
+ __ TruncatingDiv(dividend, Abs(divisor));
+ if (divisor < 0) __ neg(edx);
+ __ jmp(&done, Label::kNear);
+ __ bind(&needs_adjustment);
+ __ lea(temp, Operand(dividend, divisor > 0 ? 1 : -1));
+ __ TruncatingDiv(temp, Abs(divisor));
+ if (divisor < 0) __ neg(edx);
+ __ dec(edx);
+ __ bind(&done);
+}
+
+
+// TODO(svenpanne) Refactor this to avoid code duplication with DoDivI.
+void LCodeGen::DoFlooringDivI(LFlooringDivI* instr) {
+ HBinaryOperation* hdiv = instr->hydrogen();
+ Register dividend = ToRegister(instr->dividend());
+ Register divisor = ToRegister(instr->divisor());
+ Register remainder = ToRegister(instr->temp());
+ Register result = ToRegister(instr->result());
+ ASSERT(dividend.is(eax));
+ ASSERT(remainder.is(edx));
+ ASSERT(result.is(eax));
+ ASSERT(!divisor.is(eax));
+ ASSERT(!divisor.is(edx));
+
+ // Check for x / 0.
+ if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) {
+ __ test(divisor, divisor);
+ DeoptimizeIf(zero, instr->environment());
+ }
+
+ // Check for (0 / -x) that will produce negative zero.
+ if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ Label dividend_not_zero;
+ __ test(dividend, dividend);
+ __ j(not_zero, &dividend_not_zero, Label::kNear);
+ __ test(divisor, divisor);
+ DeoptimizeIf(sign, instr->environment());
+ __ bind(&dividend_not_zero);
+ }
+
+ // Check for (kMinInt / -1).
+ if (hdiv->CheckFlag(HValue::kCanOverflow)) {
+ Label dividend_not_min_int;
+ __ cmp(dividend, kMinInt);
+ __ j(not_zero, &dividend_not_min_int, Label::kNear);
+ __ cmp(divisor, -1);
+ DeoptimizeIf(zero, instr->environment());
+ __ bind(&dividend_not_min_int);
+ }
+
+ // Sign extend to edx (= remainder).
+ __ cdq();
+ __ idiv(divisor);
+
+ Label done;
+ __ test(remainder, remainder);
+ __ j(zero, &done, Label::kNear);
+ __ xor_(remainder, divisor);
+ __ sar(remainder, 31);
+ __ add(result, remainder);
+ __ bind(&done);
}
@@ -1882,11 +1658,11 @@ void LCodeGen::DoShiftI(LShiftI* instr) {
}
break;
case Token::SHR:
- if (shift_count == 0 && instr->can_deopt()) {
+ if (shift_count != 0) {
+ __ shr(ToRegister(left), shift_count);
+ } else if (instr->can_deopt()) {
__ test(ToRegister(left), ToRegister(left));
DeoptimizeIf(sign, instr->environment());
- } else {
- __ shr(ToRegister(left), shift_count);
}
break;
case Token::SHL:
@@ -1929,12 +1705,12 @@ void LCodeGen::DoSubI(LSubI* instr) {
void LCodeGen::DoConstantI(LConstantI* instr) {
- __ Set(ToRegister(instr->result()), Immediate(instr->value()));
+ __ Move(ToRegister(instr->result()), Immediate(instr->value()));
}
void LCodeGen::DoConstantS(LConstantS* instr) {
- __ Set(ToRegister(instr->result()), Immediate(instr->value()));
+ __ Move(ToRegister(instr->result()), Immediate(instr->value()));
}
@@ -1945,41 +1721,32 @@ void LCodeGen::DoConstantD(LConstantD* instr) {
int32_t upper = static_cast<int32_t>(int_val >> (kBitsPerInt));
ASSERT(instr->result()->IsDoubleRegister());
- if (!CpuFeatures::IsSafeForSnapshot(SSE2)) {
- __ push(Immediate(upper));
- __ push(Immediate(lower));
- X87Register reg = ToX87Register(instr->result());
- X87Mov(reg, Operand(esp, 0));
- __ add(Operand(esp), Immediate(kDoubleSize));
+ XMMRegister res = ToDoubleRegister(instr->result());
+ if (int_val == 0) {
+ __ xorps(res, res);
} else {
- CpuFeatureScope scope1(masm(), SSE2);
- XMMRegister res = ToDoubleRegister(instr->result());
- if (int_val == 0) {
- __ xorps(res, res);
- } else {
- Register temp = ToRegister(instr->temp());
- if (CpuFeatures::IsSupported(SSE4_1)) {
- CpuFeatureScope scope2(masm(), SSE4_1);
- if (lower != 0) {
- __ Set(temp, Immediate(lower));
- __ movd(res, Operand(temp));
- __ Set(temp, Immediate(upper));
- __ pinsrd(res, Operand(temp), 1);
- } else {
- __ xorps(res, res);
- __ Set(temp, Immediate(upper));
- __ pinsrd(res, Operand(temp), 1);
- }
- } else {
- __ Set(temp, Immediate(upper));
+ Register temp = ToRegister(instr->temp());
+ if (CpuFeatures::IsSupported(SSE4_1)) {
+ CpuFeatureScope scope2(masm(), SSE4_1);
+ if (lower != 0) {
+ __ Move(temp, Immediate(lower));
__ movd(res, Operand(temp));
- __ psllq(res, 32);
- if (lower != 0) {
- XMMRegister xmm_scratch = double_scratch0();
- __ Set(temp, Immediate(lower));
- __ movd(xmm_scratch, Operand(temp));
- __ orps(res, xmm_scratch);
- }
+ __ Move(temp, Immediate(upper));
+ __ pinsrd(res, Operand(temp), 1);
+ } else {
+ __ xorps(res, res);
+ __ Move(temp, Immediate(upper));
+ __ pinsrd(res, Operand(temp), 1);
+ }
+ } else {
+ __ Move(temp, Immediate(upper));
+ __ movd(res, Operand(temp));
+ __ psllq(res, 32);
+ if (lower != 0) {
+ XMMRegister xmm_scratch = double_scratch0();
+ __ Move(temp, Immediate(lower));
+ __ movd(xmm_scratch, Operand(temp));
+ __ orps(res, xmm_scratch);
}
}
}
@@ -1993,9 +1760,9 @@ void LCodeGen::DoConstantE(LConstantE* instr) {
void LCodeGen::DoConstantT(LConstantT* instr) {
Register reg = ToRegister(instr->result());
- Handle<Object> handle = instr->value(isolate());
+ Handle<Object> object = instr->value(isolate());
AllowDeferredHandleDereference smi_check;
- __ LoadObject(reg, handle);
+ __ LoadObject(reg, object);
}
@@ -2006,43 +1773,6 @@ void LCodeGen::DoMapEnumLength(LMapEnumLength* instr) {
}
-void LCodeGen::DoElementsKind(LElementsKind* instr) {
- Register result = ToRegister(instr->result());
- Register input = ToRegister(instr->value());
-
- // Load map into |result|.
- __ mov(result, FieldOperand(input, HeapObject::kMapOffset));
- // Load the map's "bit field 2" into |result|. We only need the first byte,
- // but the following masking takes care of that anyway.
- __ mov(result, FieldOperand(result, Map::kBitField2Offset));
- // Retrieve elements_kind from bit field 2.
- __ and_(result, Map::kElementsKindMask);
- __ shr(result, Map::kElementsKindShift);
-}
-
-
-void LCodeGen::DoValueOf(LValueOf* instr) {
- Register input = ToRegister(instr->value());
- Register result = ToRegister(instr->result());
- Register map = ToRegister(instr->temp());
- ASSERT(input.is(result));
-
- Label done;
-
- if (!instr->hydrogen()->value()->IsHeapObject()) {
- // If the object is a smi return the object.
- __ JumpIfSmi(input, &done, Label::kNear);
- }
-
- // If the object is not a value type, return the object.
- __ CmpObjectType(input, JS_VALUE_TYPE, map);
- __ j(not_equal, &done, Label::kNear);
- __ mov(result, FieldOperand(input, JSValue::kValueOffset));
-
- __ bind(&done);
-}
-
-
void LCodeGen::DoDateField(LDateField* instr) {
Register object = ToRegister(instr->date());
Register result = ToRegister(instr->result());
@@ -2164,18 +1894,6 @@ void LCodeGen::DoSeqStringSetChar(LSeqStringSetChar* instr) {
}
-void LCodeGen::DoThrow(LThrow* instr) {
- __ push(ToOperand(instr->value()));
- ASSERT(ToRegister(instr->context()).is(esi));
- CallRuntime(Runtime::kThrow, 1, instr);
-
- if (FLAG_debug_code) {
- Comment("Unreachable code.");
- __ int3();
- }
-}
-
-
void LCodeGen::DoAddI(LAddI* instr) {
LOperand* left = instr->left();
LOperand* right = instr->right();
@@ -2204,7 +1922,6 @@ void LCodeGen::DoAddI(LAddI* instr) {
void LCodeGen::DoMathMinMax(LMathMinMax* instr) {
- CpuFeatureScope scope(masm(), SSE2);
LOperand* left = instr->left();
LOperand* right = instr->right();
ASSERT(left->Equals(instr->result()));
@@ -2267,88 +1984,45 @@ void LCodeGen::DoMathMinMax(LMathMinMax* instr) {
void LCodeGen::DoArithmeticD(LArithmeticD* instr) {
- if (CpuFeatures::IsSafeForSnapshot(SSE2)) {
- CpuFeatureScope scope(masm(), SSE2);
- XMMRegister left = ToDoubleRegister(instr->left());
- XMMRegister right = ToDoubleRegister(instr->right());
- XMMRegister result = ToDoubleRegister(instr->result());
- switch (instr->op()) {
- case Token::ADD:
- __ addsd(left, right);
- break;
- case Token::SUB:
- __ subsd(left, right);
- break;
- case Token::MUL:
- __ mulsd(left, right);
- break;
- case Token::DIV:
- __ divsd(left, right);
- // Don't delete this mov. It may improve performance on some CPUs,
- // when there is a mulsd depending on the result
- __ movaps(left, left);
- break;
- case Token::MOD: {
- // Pass two doubles as arguments on the stack.
- __ PrepareCallCFunction(4, eax);
- __ movsd(Operand(esp, 0 * kDoubleSize), left);
- __ movsd(Operand(esp, 1 * kDoubleSize), right);
- __ CallCFunction(
- ExternalReference::double_fp_operation(Token::MOD, isolate()),
- 4);
-
- // Return value is in st(0) on ia32.
- // Store it into the result register.
- __ sub(Operand(esp), Immediate(kDoubleSize));
- __ fstp_d(Operand(esp, 0));
- __ movsd(result, Operand(esp, 0));
- __ add(Operand(esp), Immediate(kDoubleSize));
- break;
- }
- default:
- UNREACHABLE();
- break;
- }
- } else {
- X87Register left = ToX87Register(instr->left());
- X87Register right = ToX87Register(instr->right());
- X87Register result = ToX87Register(instr->result());
- if (instr->op() != Token::MOD) {
- X87PrepareBinaryOp(left, right, result);
- }
- switch (instr->op()) {
- case Token::ADD:
- __ fadd_i(1);
- break;
- case Token::SUB:
- __ fsub_i(1);
- break;
- case Token::MUL:
- __ fmul_i(1);
- break;
- case Token::DIV:
- __ fdiv_i(1);
- break;
- case Token::MOD: {
- // Pass two doubles as arguments on the stack.
- __ PrepareCallCFunction(4, eax);
- X87Mov(Operand(esp, 1 * kDoubleSize), right);
- X87Mov(Operand(esp, 0), left);
- X87Free(right);
- ASSERT(left.is(result));
- X87PrepareToWrite(result);
- __ CallCFunction(
- ExternalReference::double_fp_operation(Token::MOD, isolate()),
- 4);
-
- // Return value is in st(0) on ia32.
- X87CommitWrite(result);
- break;
- }
- default:
- UNREACHABLE();
- break;
+ XMMRegister left = ToDoubleRegister(instr->left());
+ XMMRegister right = ToDoubleRegister(instr->right());
+ XMMRegister result = ToDoubleRegister(instr->result());
+ switch (instr->op()) {
+ case Token::ADD:
+ __ addsd(left, right);
+ break;
+ case Token::SUB:
+ __ subsd(left, right);
+ break;
+ case Token::MUL:
+ __ mulsd(left, right);
+ break;
+ case Token::DIV:
+ __ divsd(left, right);
+ // Don't delete this mov. It may improve performance on some CPUs,
+ // when there is a mulsd depending on the result
+ __ movaps(left, left);
+ break;
+ case Token::MOD: {
+ // Pass two doubles as arguments on the stack.
+ __ PrepareCallCFunction(4, eax);
+ __ movsd(Operand(esp, 0 * kDoubleSize), left);
+ __ movsd(Operand(esp, 1 * kDoubleSize), right);
+ __ CallCFunction(
+ ExternalReference::mod_two_doubles_operation(isolate()),
+ 4);
+
+ // Return value is in st(0) on ia32.
+ // Store it into the result register.
+ __ sub(Operand(esp), Immediate(kDoubleSize));
+ __ fstp_d(Operand(esp, 0));
+ __ movsd(result, Operand(esp, 0));
+ __ add(Operand(esp), Immediate(kDoubleSize));
+ break;
}
+ default:
+ UNREACHABLE();
+ break;
}
}
@@ -2359,8 +2033,8 @@ void LCodeGen::DoArithmeticT(LArithmeticT* instr) {
ASSERT(ToRegister(instr->right()).is(eax));
ASSERT(ToRegister(instr->result()).is(eax));
- BinaryOpICStub stub(instr->op(), NO_OVERWRITE);
- CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
+ BinaryOpICStub stub(isolate(), instr->op(), NO_OVERWRITE);
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
}
@@ -2403,7 +2077,6 @@ void LCodeGen::DoBranch(LBranch* instr) {
EmitBranch(instr, not_zero);
} else if (r.IsDouble()) {
ASSERT(!info()->IsStub());
- CpuFeatureScope scope(masm(), SSE2);
XMMRegister reg = ToDoubleRegister(instr->value());
XMMRegister xmm_scratch = double_scratch0();
__ xorps(xmm_scratch, xmm_scratch);
@@ -2426,7 +2099,6 @@ void LCodeGen::DoBranch(LBranch* instr) {
EmitBranch(instr, no_condition);
} else if (type.IsHeapNumber()) {
ASSERT(!info()->IsStub());
- CpuFeatureScope scope(masm(), SSE2);
XMMRegister xmm_scratch = double_scratch0();
__ xorps(xmm_scratch, xmm_scratch);
__ ucomisd(xmm_scratch, FieldOperand(reg, HeapNumber::kValueOffset));
@@ -2512,16 +2184,9 @@ void LCodeGen::DoBranch(LBranch* instr) {
__ cmp(FieldOperand(reg, HeapObject::kMapOffset),
factory()->heap_number_map());
__ j(not_equal, &not_heap_number, Label::kNear);
- if (CpuFeatures::IsSafeForSnapshot(SSE2)) {
- CpuFeatureScope scope(masm(), SSE2);
- XMMRegister xmm_scratch = double_scratch0();
- __ xorps(xmm_scratch, xmm_scratch);
- __ ucomisd(xmm_scratch, FieldOperand(reg, HeapNumber::kValueOffset));
- } else {
- __ fldz();
- __ fld_d(FieldOperand(reg, HeapNumber::kValueOffset));
- __ FCmp();
- }
+ XMMRegister xmm_scratch = double_scratch0();
+ __ xorps(xmm_scratch, xmm_scratch);
+ __ ucomisd(xmm_scratch, FieldOperand(reg, HeapNumber::kValueOffset));
__ j(zero, instr->FalseLabel(chunk_));
__ jmp(instr->TrueLabel(chunk_));
__ bind(&not_heap_number);
@@ -2544,10 +2209,6 @@ void LCodeGen::EmitGoto(int block) {
}
-void LCodeGen::DoClobberDoubles(LClobberDoubles* instr) {
-}
-
-
void LCodeGen::DoGoto(LGoto* instr) {
EmitGoto(instr->block_id());
}
@@ -2588,7 +2249,11 @@ Condition LCodeGen::TokenToCondition(Token::Value op, bool is_unsigned) {
void LCodeGen::DoCompareNumericAndBranch(LCompareNumericAndBranch* instr) {
LOperand* left = instr->left();
LOperand* right = instr->right();
- Condition cc = TokenToCondition(instr->op(), instr->is_double());
+ bool is_unsigned =
+ instr->is_double() ||
+ instr->hydrogen()->left()->CheckFlag(HInstruction::kUint32) ||
+ instr->hydrogen()->right()->CheckFlag(HInstruction::kUint32);
+ Condition cc = TokenToCondition(instr->op(), is_unsigned);
if (left->IsConstantOperand() && right->IsConstantOperand()) {
// We can statically evaluate the comparison.
@@ -2599,13 +2264,7 @@ void LCodeGen::DoCompareNumericAndBranch(LCompareNumericAndBranch* instr) {
EmitGoto(next_block);
} else {
if (instr->is_double()) {
- if (CpuFeatures::IsSafeForSnapshot(SSE2)) {
- CpuFeatureScope scope(masm(), SSE2);
- __ ucomisd(ToDoubleRegister(left), ToDoubleRegister(right));
- } else {
- X87LoadForUsage(ToX87Register(right), ToX87Register(left));
- __ FCmp();
- }
+ __ ucomisd(ToDoubleRegister(left), ToDoubleRegister(right));
// Don't base result on EFLAGS when a NaN is involved. Instead
// jump to the false block.
__ j(parity_even, instr->FalseLabel(chunk_));
@@ -2616,8 +2275,8 @@ void LCodeGen::DoCompareNumericAndBranch(LCompareNumericAndBranch* instr) {
} else if (left->IsConstantOperand()) {
__ cmp(ToOperand(right),
ToImmediate(left, instr->hydrogen()->representation()));
- // We transposed the operands. Reverse the condition.
- cc = ReverseCondition(cc);
+ // We commuted the operands, so commute the condition.
+ cc = CommuteCondition(cc);
} else {
__ cmp(ToRegister(left), ToOperand(right));
}
@@ -2649,35 +2308,12 @@ void LCodeGen::DoCmpHoleAndBranch(LCmpHoleAndBranch* instr) {
return;
}
- bool use_sse2 = CpuFeatures::IsSupported(SSE2);
- if (use_sse2) {
- CpuFeatureScope scope(masm(), SSE2);
- XMMRegister input_reg = ToDoubleRegister(instr->object());
- __ ucomisd(input_reg, input_reg);
- EmitFalseBranch(instr, parity_odd);
- } else {
- // Put the value to the top of stack
- X87Register src = ToX87Register(instr->object());
- X87LoadForUsage(src);
- __ fld(0);
- __ fld(0);
- __ FCmp();
- Label ok;
- __ j(parity_even, &ok, Label::kNear);
- __ fstp(0);
- EmitFalseBranch(instr, no_condition);
- __ bind(&ok);
- }
-
+ XMMRegister input_reg = ToDoubleRegister(instr->object());
+ __ ucomisd(input_reg, input_reg);
+ EmitFalseBranch(instr, parity_odd);
__ sub(esp, Immediate(kDoubleSize));
- if (use_sse2) {
- CpuFeatureScope scope(masm(), SSE2);
- XMMRegister input_reg = ToDoubleRegister(instr->object());
- __ movsd(MemOperand(esp, 0), input_reg);
- } else {
- __ fstp_d(MemOperand(esp, 0));
- }
+ __ movsd(MemOperand(esp, 0), input_reg);
__ add(esp, Immediate(kDoubleSize));
int offset = sizeof(kHoleNanUpper32);
@@ -2692,7 +2328,6 @@ void LCodeGen::DoCompareMinusZeroAndBranch(LCompareMinusZeroAndBranch* instr) {
Register scratch = ToRegister(instr->temp());
if (rep.IsDouble()) {
- CpuFeatureScope use_sse2(masm(), SSE2);
XMMRegister value = ToDoubleRegister(instr->value());
XMMRegister xmm_scratch = double_scratch0();
__ xorps(xmm_scratch, xmm_scratch);
@@ -2706,8 +2341,8 @@ void LCodeGen::DoCompareMinusZeroAndBranch(LCompareMinusZeroAndBranch* instr) {
Handle<Map> map = masm()->isolate()->factory()->heap_number_map();
__ CheckMap(value, map, instr->FalseLabel(chunk()), DO_SMI_CHECK);
__ cmp(FieldOperand(value, HeapNumber::kExponentOffset),
- Immediate(0x80000000));
- EmitFalseBranch(instr, not_equal);
+ Immediate(0x1));
+ EmitFalseBranch(instr, no_overflow);
__ cmp(FieldOperand(value, HeapNumber::kMantissaOffset),
Immediate(0x00000000));
EmitBranch(instr, equal);
@@ -2768,7 +2403,7 @@ void LCodeGen::DoIsStringAndBranch(LIsStringAndBranch* instr) {
Register temp = ToRegister(instr->temp());
SmiCheck check_needed =
- instr->hydrogen()->value()->IsHeapObject()
+ instr->hydrogen()->value()->type().IsHeapObject()
? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
Condition true_cond = EmitIsString(
@@ -2790,7 +2425,7 @@ void LCodeGen::DoIsUndetectableAndBranch(LIsUndetectableAndBranch* instr) {
Register input = ToRegister(instr->value());
Register temp = ToRegister(instr->temp());
- if (!instr->hydrogen()->value()->IsHeapObject()) {
+ if (!instr->hydrogen()->value()->type().IsHeapObject()) {
STATIC_ASSERT(kSmiTag == 0);
__ JumpIfSmi(input, instr->FalseLabel(chunk_));
}
@@ -2858,7 +2493,7 @@ void LCodeGen::DoHasInstanceTypeAndBranch(LHasInstanceTypeAndBranch* instr) {
Register input = ToRegister(instr->value());
Register temp = ToRegister(instr->temp());
- if (!instr->hydrogen()->value()->IsHeapObject()) {
+ if (!instr->hydrogen()->value()->type().IsHeapObject()) {
__ JumpIfSmi(input, instr->FalseLabel(chunk_));
}
@@ -2977,8 +2612,8 @@ void LCodeGen::DoCmpMapAndBranch(LCmpMapAndBranch* instr) {
void LCodeGen::DoInstanceOf(LInstanceOf* instr) {
// Object and function are in fixed registers defined by the stub.
ASSERT(ToRegister(instr->context()).is(esi));
- InstanceofStub stub(InstanceofStub::kArgsInRegisters);
- CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
+ InstanceofStub stub(isolate(), InstanceofStub::kArgsInRegisters);
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
Label true_value, done;
__ test(eax, Operand(eax));
@@ -2995,9 +2630,8 @@ void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) {
class DeferredInstanceOfKnownGlobal V8_FINAL : public LDeferredCode {
public:
DeferredInstanceOfKnownGlobal(LCodeGen* codegen,
- LInstanceOfKnownGlobal* instr,
- const X87Stack& x87_stack)
- : LDeferredCode(codegen, x87_stack), instr_(instr) { }
+ LInstanceOfKnownGlobal* instr)
+ : LDeferredCode(codegen), instr_(instr) { }
virtual void Generate() V8_OVERRIDE {
codegen()->DoDeferredInstanceOfKnownGlobal(instr_, &map_check_);
}
@@ -3009,7 +2643,7 @@ void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) {
};
DeferredInstanceOfKnownGlobal* deferred;
- deferred = new(zone()) DeferredInstanceOfKnownGlobal(this, instr, x87_stack_);
+ deferred = new(zone()) DeferredInstanceOfKnownGlobal(this, instr);
Label done, false_result;
Register object = ToRegister(instr->value());
@@ -3066,7 +2700,7 @@ void LCodeGen::DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
flags | InstanceofStub::kCallSiteInlineCheck);
flags = static_cast<InstanceofStub::Flags>(
flags | InstanceofStub::kReturnTrueFalseObject);
- InstanceofStub stub(flags);
+ InstanceofStub stub(isolate(), flags);
// Get the temp register reserved by the instruction. This needs to be a
// register which is pushed last by PushSafepointRegisters as top of the
@@ -3079,7 +2713,7 @@ void LCodeGen::DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
int delta = masm_->SizeOfCodeGeneratedSince(map_check) + kAdditionalDelta;
__ mov(temp, Immediate(delta));
__ StoreToSafepointRegisterSlot(temp, temp);
- CallCodeGeneric(stub.GetCode(isolate()),
+ CallCodeGeneric(stub.GetCode(),
RelocInfo::CODE_TARGET,
instr,
RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
@@ -3158,9 +2792,7 @@ void LCodeGen::DoReturn(LReturn* instr) {
__ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
__ CallRuntime(Runtime::kTraceExit, 1);
}
- if (info()->saves_caller_doubles() && CpuFeatures::IsSupported(SSE2)) {
- RestoreCallerDoubles();
- }
+ if (info()->saves_caller_doubles()) RestoreCallerDoubles();
if (dynamic_frame_alignment_) {
// Fetch the state of the dynamic frame alignment.
__ mov(edx, Operand(ebp,
@@ -3204,10 +2836,9 @@ void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) {
ASSERT(ToRegister(instr->result()).is(eax));
__ mov(ecx, instr->name());
- RelocInfo::Mode mode = instr->for_typeof() ? RelocInfo::CODE_TARGET :
- RelocInfo::CODE_TARGET_CONTEXT;
- Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
- CallCode(ic, mode, instr);
+ ContextualMode mode = instr->for_typeof() ? NOT_CONTEXTUAL : CONTEXTUAL;
+ Handle<Code> ic = LoadIC::initialize_stub(isolate(), mode);
+ CallCode(ic, RelocInfo::CODE_TARGET, instr);
}
@@ -3230,19 +2861,6 @@ void LCodeGen::DoStoreGlobalCell(LStoreGlobalCell* instr) {
}
-void LCodeGen::DoStoreGlobalGeneric(LStoreGlobalGeneric* instr) {
- ASSERT(ToRegister(instr->context()).is(esi));
- ASSERT(ToRegister(instr->global_object()).is(edx));
- ASSERT(ToRegister(instr->value()).is(eax));
-
- __ mov(ecx, instr->name());
- Handle<Code> ic = (instr->strict_mode_flag() == kStrictMode)
- ? isolate()->builtins()->StoreIC_Initialize_Strict()
- : isolate()->builtins()->StoreIC_Initialize();
- CallCode(ic, RelocInfo::CODE_TARGET_CONTEXT, instr);
-}
-
-
void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) {
Register context = ToRegister(instr->context());
Register result = ToRegister(instr->result());
@@ -3281,7 +2899,7 @@ void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) {
__ mov(target, value);
if (instr->hydrogen()->NeedsWriteBarrier()) {
SmiCheck check_needed =
- instr->hydrogen()->value()->IsHeapObject()
+ instr->hydrogen()->value()->type().IsHeapObject()
? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
Register temp = ToRegister(instr->temp());
int offset = Context::SlotOffset(instr->slot_index());
@@ -3289,7 +2907,7 @@ void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) {
offset,
value,
temp,
- GetSaveFPRegsMode(),
+ kSaveFPRegs,
EMIT_REMEMBERED_SET,
check_needed);
}
@@ -3313,15 +2931,9 @@ void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) {
}
Register object = ToRegister(instr->object());
- if (FLAG_track_double_fields &&
- instr->hydrogen()->representation().IsDouble()) {
- if (CpuFeatures::IsSupported(SSE2)) {
- CpuFeatureScope scope(masm(), SSE2);
- XMMRegister result = ToDoubleRegister(instr->result());
- __ movsd(result, FieldOperand(object, offset));
- } else {
- X87Mov(ToX87Register(instr->result()), FieldOperand(object, offset));
- }
+ if (instr->hydrogen()->representation().IsDouble()) {
+ XMMRegister result = ToDoubleRegister(instr->result());
+ __ movsd(result, FieldOperand(object, offset));
return;
}
@@ -3358,7 +2970,7 @@ void LCodeGen::DoLoadNamedGeneric(LLoadNamedGeneric* instr) {
ASSERT(ToRegister(instr->result()).is(eax));
__ mov(ecx, instr->name());
- Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
+ Handle<Code> ic = LoadIC::initialize_stub(isolate(), NOT_CONTEXTUAL);
CallCode(ic, RelocInfo::CODE_TARGET, instr);
}
@@ -3411,15 +3023,6 @@ void LCodeGen::DoLoadRoot(LLoadRoot* instr) {
}
-void LCodeGen::DoLoadExternalArrayPointer(
- LLoadExternalArrayPointer* instr) {
- Register result = ToRegister(instr->result());
- Register input = ToRegister(instr->object());
- __ mov(result, FieldOperand(input,
- ExternalArray::kExternalPointerOffset));
-}
-
-
void LCodeGen::DoAccessArgumentsAt(LAccessArgumentsAt* instr) {
Register arguments = ToRegister(instr->arguments());
Register result = ToRegister(instr->result());
@@ -3453,52 +3056,52 @@ void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) {
key,
instr->hydrogen()->key()->representation(),
elements_kind,
- 0,
- instr->additional_index()));
- if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
- if (CpuFeatures::IsSupported(SSE2)) {
- CpuFeatureScope scope(masm(), SSE2);
- XMMRegister result(ToDoubleRegister(instr->result()));
- __ movss(result, operand);
- __ cvtss2sd(result, result);
- } else {
- X87Mov(ToX87Register(instr->result()), operand, kX87FloatOperand);
- }
- } else if (elements_kind == EXTERNAL_DOUBLE_ELEMENTS) {
- if (CpuFeatures::IsSupported(SSE2)) {
- CpuFeatureScope scope(masm(), SSE2);
- __ movsd(ToDoubleRegister(instr->result()), operand);
- } else {
- X87Mov(ToX87Register(instr->result()), operand);
- }
+ instr->base_offset()));
+ if (elements_kind == EXTERNAL_FLOAT32_ELEMENTS ||
+ elements_kind == FLOAT32_ELEMENTS) {
+ XMMRegister result(ToDoubleRegister(instr->result()));
+ __ movss(result, operand);
+ __ cvtss2sd(result, result);
+ } else if (elements_kind == EXTERNAL_FLOAT64_ELEMENTS ||
+ elements_kind == FLOAT64_ELEMENTS) {
+ __ movsd(ToDoubleRegister(instr->result()), operand);
} else {
Register result(ToRegister(instr->result()));
switch (elements_kind) {
- case EXTERNAL_BYTE_ELEMENTS:
+ case EXTERNAL_INT8_ELEMENTS:
+ case INT8_ELEMENTS:
__ movsx_b(result, operand);
break;
- case EXTERNAL_PIXEL_ELEMENTS:
- case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
+ case EXTERNAL_UINT8_CLAMPED_ELEMENTS:
+ case EXTERNAL_UINT8_ELEMENTS:
+ case UINT8_ELEMENTS:
+ case UINT8_CLAMPED_ELEMENTS:
__ movzx_b(result, operand);
break;
- case EXTERNAL_SHORT_ELEMENTS:
+ case EXTERNAL_INT16_ELEMENTS:
+ case INT16_ELEMENTS:
__ movsx_w(result, operand);
break;
- case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
+ case EXTERNAL_UINT16_ELEMENTS:
+ case UINT16_ELEMENTS:
__ movzx_w(result, operand);
break;
- case EXTERNAL_INT_ELEMENTS:
+ case EXTERNAL_INT32_ELEMENTS:
+ case INT32_ELEMENTS:
__ mov(result, operand);
break;
- case EXTERNAL_UNSIGNED_INT_ELEMENTS:
+ case EXTERNAL_UINT32_ELEMENTS:
+ case UINT32_ELEMENTS:
__ mov(result, operand);
if (!instr->hydrogen()->CheckFlag(HInstruction::kUint32)) {
__ test(result, Operand(result));
DeoptimizeIf(negative, instr->environment());
}
break;
- case EXTERNAL_FLOAT_ELEMENTS:
- case EXTERNAL_DOUBLE_ELEMENTS:
+ case EXTERNAL_FLOAT32_ELEMENTS:
+ case EXTERNAL_FLOAT64_ELEMENTS:
+ case FLOAT32_ELEMENTS:
+ case FLOAT64_ELEMENTS:
case FAST_SMI_ELEMENTS:
case FAST_ELEMENTS:
case FAST_DOUBLE_ELEMENTS:
@@ -3506,7 +3109,7 @@ void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) {
case FAST_HOLEY_ELEMENTS:
case FAST_HOLEY_DOUBLE_ELEMENTS:
case DICTIONARY_ELEMENTS:
- case NON_STRICT_ARGUMENTS_ELEMENTS:
+ case SLOPPY_ARGUMENTS_ELEMENTS:
UNREACHABLE();
break;
}
@@ -3516,14 +3119,11 @@ void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) {
void LCodeGen::DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr) {
if (instr->hydrogen()->RequiresHoleCheck()) {
- int offset = FixedDoubleArray::kHeaderSize - kHeapObjectTag +
- sizeof(kHoleNanLower32);
Operand hole_check_operand = BuildFastArrayOperand(
instr->elements(), instr->key(),
instr->hydrogen()->key()->representation(),
FAST_DOUBLE_ELEMENTS,
- offset,
- instr->additional_index());
+ instr->base_offset() + sizeof(kHoleNanLower32));
__ cmp(hole_check_operand, Immediate(kHoleNanUpper32));
DeoptimizeIf(equal, instr->environment());
}
@@ -3533,15 +3133,9 @@ void LCodeGen::DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr) {
instr->key(),
instr->hydrogen()->key()->representation(),
FAST_DOUBLE_ELEMENTS,
- FixedDoubleArray::kHeaderSize - kHeapObjectTag,
- instr->additional_index());
- if (CpuFeatures::IsSupported(SSE2)) {
- CpuFeatureScope scope(masm(), SSE2);
- XMMRegister result = ToDoubleRegister(instr->result());
- __ movsd(result, double_load_operand);
- } else {
- X87Mov(ToX87Register(instr->result()), double_load_operand);
- }
+ instr->base_offset());
+ XMMRegister result = ToDoubleRegister(instr->result());
+ __ movsd(result, double_load_operand);
}
@@ -3554,8 +3148,7 @@ void LCodeGen::DoLoadKeyedFixedArray(LLoadKeyed* instr) {
instr->key(),
instr->hydrogen()->key()->representation(),
FAST_ELEMENTS,
- FixedArray::kHeaderSize - kHeapObjectTag,
- instr->additional_index()));
+ instr->base_offset()));
// Check for the hole value.
if (instr->hydrogen()->RequiresHoleCheck()) {
@@ -3571,7 +3164,7 @@ void LCodeGen::DoLoadKeyedFixedArray(LLoadKeyed* instr) {
void LCodeGen::DoLoadKeyed(LLoadKeyed* instr) {
- if (instr->is_external()) {
+ if (instr->is_typed_elements()) {
DoLoadKeyedExternalArray(instr);
} else if (instr->hydrogen()->representation().IsDouble()) {
DoLoadKeyedFixedDoubleArray(instr);
@@ -3586,8 +3179,7 @@ Operand LCodeGen::BuildFastArrayOperand(
LOperand* key,
Representation key_representation,
ElementsKind elements_kind,
- uint32_t offset,
- uint32_t additional_index) {
+ uint32_t base_offset) {
Register elements_pointer_reg = ToRegister(elements_pointer);
int element_shift_size = ElementsKindToShiftSize(elements_kind);
int shift_size = element_shift_size;
@@ -3597,8 +3189,8 @@ Operand LCodeGen::BuildFastArrayOperand(
Abort(kArrayIndexConstantValueTooBig);
}
return Operand(elements_pointer_reg,
- ((constant_value + additional_index) << shift_size)
- + offset);
+ ((constant_value) << shift_size)
+ + base_offset);
} else {
// Take the tag bit into account while computing the shift size.
if (key_representation.IsSmi() && (shift_size >= 1)) {
@@ -3608,7 +3200,7 @@ Operand LCodeGen::BuildFastArrayOperand(
return Operand(elements_pointer_reg,
ToRegister(key),
scale_factor,
- offset + (additional_index << element_shift_size));
+ base_offset);
}
}
@@ -3677,26 +3269,28 @@ void LCodeGen::DoArgumentsLength(LArgumentsLength* instr) {
void LCodeGen::DoWrapReceiver(LWrapReceiver* instr) {
Register receiver = ToRegister(instr->receiver());
Register function = ToRegister(instr->function());
- Register scratch = ToRegister(instr->temp());
// If the receiver is null or undefined, we have to pass the global
// object as a receiver to normal functions. Values have to be
// passed unchanged to builtins and strict-mode functions.
- Label global_object, receiver_ok;
+ Label receiver_ok, global_object;
Label::Distance dist = DeoptEveryNTimes() ? Label::kFar : Label::kNear;
+ Register scratch = ToRegister(instr->temp());
- // Do not transform the receiver to object for strict mode
- // functions.
- __ mov(scratch,
- FieldOperand(function, JSFunction::kSharedFunctionInfoOffset));
- __ test_b(FieldOperand(scratch, SharedFunctionInfo::kStrictModeByteOffset),
- 1 << SharedFunctionInfo::kStrictModeBitWithinByte);
- __ j(not_equal, &receiver_ok, dist);
+ if (!instr->hydrogen()->known_function()) {
+ // Do not transform the receiver to object for strict mode
+ // functions.
+ __ mov(scratch,
+ FieldOperand(function, JSFunction::kSharedFunctionInfoOffset));
+ __ test_b(FieldOperand(scratch, SharedFunctionInfo::kStrictModeByteOffset),
+ 1 << SharedFunctionInfo::kStrictModeBitWithinByte);
+ __ j(not_equal, &receiver_ok, dist);
- // Do not transform the receiver to object for builtins.
- __ test_b(FieldOperand(scratch, SharedFunctionInfo::kNativeByteOffset),
- 1 << SharedFunctionInfo::kNativeBitWithinByte);
- __ j(not_equal, &receiver_ok, dist);
+ // Do not transform the receiver to object for builtins.
+ __ test_b(FieldOperand(scratch, SharedFunctionInfo::kNativeByteOffset),
+ 1 << SharedFunctionInfo::kNativeBitWithinByte);
+ __ j(not_equal, &receiver_ok, dist);
+ }
// Normal function. Replace undefined or null with global receiver.
__ cmp(receiver, factory()->null_value());
@@ -3709,16 +3303,14 @@ void LCodeGen::DoWrapReceiver(LWrapReceiver* instr) {
DeoptimizeIf(equal, instr->environment());
__ CmpObjectType(receiver, FIRST_SPEC_OBJECT_TYPE, scratch);
DeoptimizeIf(below, instr->environment());
- __ jmp(&receiver_ok, Label::kNear);
+ __ jmp(&receiver_ok, Label::kNear);
__ bind(&global_object);
- // TODO(kmillikin): We have a hydrogen value for the global object. See
- // if it's better to use it than to explicitly fetch it from the context
- // here.
- __ mov(receiver, Operand(ebp, StandardFrameConstants::kContextOffset));
- __ mov(receiver, ContextOperand(receiver, Context::GLOBAL_OBJECT_INDEX));
- __ mov(receiver,
- FieldOperand(receiver, JSGlobalObject::kGlobalReceiverOffset));
+ __ mov(receiver, FieldOperand(function, JSFunction::kContextOffset));
+ const int global_offset = Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX);
+ __ mov(receiver, Operand(receiver, global_offset));
+ const int receiver_offset = GlobalObject::kGlobalReceiverOffset;
+ __ mov(receiver, FieldOperand(receiver, receiver_offset));
__ bind(&receiver_ok);
}
@@ -3759,8 +3351,7 @@ void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
SafepointGenerator safepoint_generator(
this, pointers, Safepoint::kLazyDeopt);
ParameterCount actual(eax);
- __ InvokeFunction(function, actual, CALL_FUNCTION,
- safepoint_generator, CALL_AS_METHOD);
+ __ InvokeFunction(function, actual, CALL_FUNCTION, safepoint_generator);
}
@@ -3797,35 +3388,12 @@ void LCodeGen::DoContext(LContext* instr) {
}
-void LCodeGen::DoOuterContext(LOuterContext* instr) {
- Register context = ToRegister(instr->context());
- Register result = ToRegister(instr->result());
- __ mov(result,
- Operand(context, Context::SlotOffset(Context::PREVIOUS_INDEX)));
-}
-
-
void LCodeGen::DoDeclareGlobals(LDeclareGlobals* instr) {
ASSERT(ToRegister(instr->context()).is(esi));
__ push(esi); // The context is the first argument.
__ push(Immediate(instr->hydrogen()->pairs()));
__ push(Immediate(Smi::FromInt(instr->hydrogen()->flags())));
- CallRuntime(Runtime::kDeclareGlobals, 3, instr);
-}
-
-
-void LCodeGen::DoGlobalObject(LGlobalObject* instr) {
- Register context = ToRegister(instr->context());
- Register result = ToRegister(instr->result());
- __ mov(result,
- Operand(context, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
-}
-
-
-void LCodeGen::DoGlobalReceiver(LGlobalReceiver* instr) {
- Register global = ToRegister(instr->global());
- Register result = ToRegister(instr->result());
- __ mov(result, FieldOperand(global, GlobalObject::kGlobalReceiverOffset));
+ CallRuntime(Runtime::kHiddenDeclareGlobals, 3, instr);
}
@@ -3833,7 +3401,6 @@ void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
int formal_parameter_count,
int arity,
LInstruction* instr,
- CallKind call_kind,
EDIState edi_state) {
bool dont_adapt_arguments =
formal_parameter_count == SharedFunctionInfo::kDontAdaptArgumentsSentinel;
@@ -3855,7 +3422,6 @@ void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
}
// Invoke function directly.
- __ SetCallKind(ecx, call_kind);
if (function.is_identical_to(info()->closure())) {
__ CallSelf();
} else {
@@ -3869,20 +3435,59 @@ void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
this, pointers, Safepoint::kLazyDeopt);
ParameterCount count(arity);
ParameterCount expected(formal_parameter_count);
- __ InvokeFunction(
- function, expected, count, CALL_FUNCTION, generator, call_kind);
+ __ InvokeFunction(function, expected, count, CALL_FUNCTION, generator);
}
}
-void LCodeGen::DoCallConstantFunction(LCallConstantFunction* instr) {
+void LCodeGen::DoCallWithDescriptor(LCallWithDescriptor* instr) {
ASSERT(ToRegister(instr->result()).is(eax));
- CallKnownFunction(instr->hydrogen()->function(),
- instr->hydrogen()->formal_parameter_count(),
- instr->arity(),
- instr,
- CALL_AS_METHOD,
- EDI_UNINITIALIZED);
+
+ LPointerMap* pointers = instr->pointer_map();
+ SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
+
+ if (instr->target()->IsConstantOperand()) {
+ LConstantOperand* target = LConstantOperand::cast(instr->target());
+ Handle<Code> code = Handle<Code>::cast(ToHandle(target));
+ generator.BeforeCall(__ CallSize(code, RelocInfo::CODE_TARGET));
+ __ call(code, RelocInfo::CODE_TARGET);
+ } else {
+ ASSERT(instr->target()->IsRegister());
+ Register target = ToRegister(instr->target());
+ generator.BeforeCall(__ CallSize(Operand(target)));
+ __ add(target, Immediate(Code::kHeaderSize - kHeapObjectTag));
+ __ call(target);
+ }
+ generator.AfterCall();
+}
+
+
+void LCodeGen::DoCallJSFunction(LCallJSFunction* instr) {
+ ASSERT(ToRegister(instr->function()).is(edi));
+ ASSERT(ToRegister(instr->result()).is(eax));
+
+ if (instr->hydrogen()->pass_argument_count()) {
+ __ mov(eax, instr->arity());
+ }
+
+ // Change context.
+ __ mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
+
+ bool is_self_call = false;
+ if (instr->hydrogen()->function()->IsConstant()) {
+ HConstant* fun_const = HConstant::cast(instr->hydrogen()->function());
+ Handle<JSFunction> jsfun =
+ Handle<JSFunction>::cast(fun_const->handle(isolate()));
+ is_self_call = jsfun.is_identical_to(info()->closure());
+ }
+
+ if (is_self_call) {
+ __ CallSelf();
+ } else {
+ __ call(FieldOperand(edi, JSFunction::kCodeEntryOffset));
+ }
+
+ RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
}
@@ -3912,7 +3517,7 @@ void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LMathAbs* instr) {
// Slow case: Call the runtime system to do the number allocation.
__ bind(&slow);
- CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0,
+ CallRuntimeFromDeferred(Runtime::kHiddenAllocateHeapNumber, 0,
instr, instr->context());
// Set the pointer to the new heap number in tmp.
if (!tmp.is(eax)) __ mov(tmp, eax);
@@ -3947,9 +3552,8 @@ void LCodeGen::DoMathAbs(LMathAbs* instr) {
class DeferredMathAbsTaggedHeapNumber V8_FINAL : public LDeferredCode {
public:
DeferredMathAbsTaggedHeapNumber(LCodeGen* codegen,
- LMathAbs* instr,
- const X87Stack& x87_stack)
- : LDeferredCode(codegen, x87_stack), instr_(instr) { }
+ LMathAbs* instr)
+ : LDeferredCode(codegen), instr_(instr) { }
virtual void Generate() V8_OVERRIDE {
codegen()->DoDeferredMathAbsTaggedHeapNumber(instr_);
}
@@ -3961,7 +3565,6 @@ void LCodeGen::DoMathAbs(LMathAbs* instr) {
ASSERT(instr->value()->Equals(instr->result()));
Representation r = instr->hydrogen()->value()->representation();
- CpuFeatureScope scope(masm(), SSE2);
if (r.IsDouble()) {
XMMRegister scratch = double_scratch0();
XMMRegister input_reg = ToDoubleRegister(instr->value());
@@ -3972,7 +3575,7 @@ void LCodeGen::DoMathAbs(LMathAbs* instr) {
EmitIntegerMathAbs(instr);
} else { // Tagged case.
DeferredMathAbsTaggedHeapNumber* deferred =
- new(zone()) DeferredMathAbsTaggedHeapNumber(this, instr, x87_stack_);
+ new(zone()) DeferredMathAbsTaggedHeapNumber(this, instr);
Register input_reg = ToRegister(instr->value());
// Smi check.
__ JumpIfNotSmi(input_reg, deferred->entry());
@@ -3983,7 +3586,6 @@ void LCodeGen::DoMathAbs(LMathAbs* instr) {
void LCodeGen::DoMathFloor(LMathFloor* instr) {
- CpuFeatureScope scope(masm(), SSE2);
XMMRegister xmm_scratch = double_scratch0();
Register output_reg = ToRegister(instr->result());
XMMRegister input_reg = ToDoubleRegister(instr->value());
@@ -4004,8 +3606,8 @@ void LCodeGen::DoMathFloor(LMathFloor* instr) {
__ roundsd(xmm_scratch, input_reg, Assembler::kRoundDown);
__ cvttsd2si(output_reg, Operand(xmm_scratch));
// Overflow is signalled with minint.
- __ cmp(output_reg, 0x80000000u);
- DeoptimizeIf(equal, instr->environment());
+ __ cmp(output_reg, 0x1);
+ DeoptimizeIf(overflow, instr->environment());
} else {
Label negative_sign, done;
// Deoptimize on unordered.
@@ -4021,7 +3623,7 @@ void LCodeGen::DoMathFloor(LMathFloor* instr) {
__ movmskpd(output_reg, input_reg);
__ test(output_reg, Immediate(1));
DeoptimizeIf(not_zero, instr->environment());
- __ Set(output_reg, Immediate(0));
+ __ Move(output_reg, Immediate(0));
__ jmp(&done, Label::kNear);
__ bind(&positive_sign);
}
@@ -4029,8 +3631,8 @@ void LCodeGen::DoMathFloor(LMathFloor* instr) {
// Use truncating instruction (OK because input is positive).
__ cvttsd2si(output_reg, Operand(input_reg));
// Overflow is signalled with minint.
- __ cmp(output_reg, 0x80000000u);
- DeoptimizeIf(equal, instr->environment());
+ __ cmp(output_reg, 0x1);
+ DeoptimizeIf(overflow, instr->environment());
__ jmp(&done, Label::kNear);
// Non-zero negative reaches here.
@@ -4049,7 +3651,6 @@ void LCodeGen::DoMathFloor(LMathFloor* instr) {
void LCodeGen::DoMathRound(LMathRound* instr) {
- CpuFeatureScope scope(masm(), SSE2);
Register output_reg = ToRegister(instr->result());
XMMRegister input_reg = ToDoubleRegister(instr->value());
XMMRegister xmm_scratch = double_scratch0();
@@ -4069,9 +3670,9 @@ void LCodeGen::DoMathRound(LMathRound* instr) {
__ addsd(xmm_scratch, input_reg);
__ cvttsd2si(output_reg, Operand(xmm_scratch));
// Overflow is signalled with minint.
- __ cmp(output_reg, 0x80000000u);
+ __ cmp(output_reg, 0x1);
__ RecordComment("D2I conversion overflow");
- DeoptimizeIf(equal, instr->environment());
+ DeoptimizeIf(overflow, instr->environment());
__ jmp(&done, dist);
__ bind(&below_one_half);
@@ -4085,9 +3686,9 @@ void LCodeGen::DoMathRound(LMathRound* instr) {
__ subsd(input_temp, xmm_scratch);
__ cvttsd2si(output_reg, Operand(input_temp));
// Catch minint due to overflow, and to prevent overflow when compensating.
- __ cmp(output_reg, 0x80000000u);
+ __ cmp(output_reg, 0x1);
__ RecordComment("D2I conversion overflow");
- DeoptimizeIf(equal, instr->environment());
+ DeoptimizeIf(overflow, instr->environment());
__ Cvtsi2sd(xmm_scratch, output_reg);
__ ucomisd(xmm_scratch, input_temp);
@@ -4106,21 +3707,19 @@ void LCodeGen::DoMathRound(LMathRound* instr) {
__ RecordComment("Minus zero");
DeoptimizeIf(not_zero, instr->environment());
}
- __ Set(output_reg, Immediate(0));
+ __ Move(output_reg, Immediate(0));
__ bind(&done);
}
void LCodeGen::DoMathSqrt(LMathSqrt* instr) {
- CpuFeatureScope scope(masm(), SSE2);
- XMMRegister input_reg = ToDoubleRegister(instr->value());
- ASSERT(ToDoubleRegister(instr->result()).is(input_reg));
- __ sqrtsd(input_reg, input_reg);
+ Operand input = ToOperand(instr->value());
+ XMMRegister output = ToDoubleRegister(instr->result());
+ __ sqrtsd(output, input);
}
void LCodeGen::DoMathPowHalf(LMathPowHalf* instr) {
- CpuFeatureScope scope(masm(), SSE2);
XMMRegister xmm_scratch = double_scratch0();
XMMRegister input_reg = ToDoubleRegister(instr->value());
Register scratch = ToRegister(instr->temp());
@@ -4166,7 +3765,7 @@ void LCodeGen::DoPower(LPower* instr) {
ASSERT(ToDoubleRegister(instr->result()).is(xmm3));
if (exponent_type.IsSmi()) {
- MathPowStub stub(MathPowStub::TAGGED);
+ MathPowStub stub(isolate(), MathPowStub::TAGGED);
__ CallStub(&stub);
} else if (exponent_type.IsTagged()) {
Label no_deopt;
@@ -4174,21 +3773,20 @@ void LCodeGen::DoPower(LPower* instr) {
__ CmpObjectType(eax, HEAP_NUMBER_TYPE, ecx);
DeoptimizeIf(not_equal, instr->environment());
__ bind(&no_deopt);
- MathPowStub stub(MathPowStub::TAGGED);
+ MathPowStub stub(isolate(), MathPowStub::TAGGED);
__ CallStub(&stub);
} else if (exponent_type.IsInteger32()) {
- MathPowStub stub(MathPowStub::INTEGER);
+ MathPowStub stub(isolate(), MathPowStub::INTEGER);
__ CallStub(&stub);
} else {
ASSERT(exponent_type.IsDouble());
- MathPowStub stub(MathPowStub::DOUBLE);
+ MathPowStub stub(isolate(), MathPowStub::DOUBLE);
__ CallStub(&stub);
}
}
void LCodeGen::DoMathLog(LMathLog* instr) {
- CpuFeatureScope scope(masm(), SSE2);
ASSERT(instr->value()->Equals(instr->result()));
XMMRegister input_reg = ToDoubleRegister(instr->value());
XMMRegister xmm_scratch = double_scratch0();
@@ -4196,7 +3794,7 @@ void LCodeGen::DoMathLog(LMathLog* instr) {
__ xorps(xmm_scratch, xmm_scratch);
__ ucomisd(input_reg, xmm_scratch);
__ j(above, &positive, Label::kNear);
- __ j(equal, &zero, Label::kNear);
+ __ j(not_carry, &zero, Label::kNear);
ExternalReference nan =
ExternalReference::address_of_canonical_non_hole_nan();
__ movsd(input_reg, Operand::StaticVariable(nan));
@@ -4219,8 +3817,21 @@ void LCodeGen::DoMathLog(LMathLog* instr) {
}
+void LCodeGen::DoMathClz32(LMathClz32* instr) {
+ Register input = ToRegister(instr->value());
+ Register result = ToRegister(instr->result());
+ Label not_zero_input;
+ __ bsr(result, input);
+
+ __ j(not_zero, &not_zero_input);
+ __ Move(result, Immediate(63)); // 63^31 == 32
+
+ __ bind(&not_zero_input);
+ __ xor_(result, Immediate(31)); // for x in [0..31], 31^x == 31-x.
+}
+
+
void LCodeGen::DoMathExp(LMathExp* instr) {
- CpuFeatureScope scope(masm(), SSE2);
XMMRegister input = ToDoubleRegister(instr->value());
XMMRegister result = ToDoubleRegister(instr->result());
XMMRegister temp0 = double_scratch0();
@@ -4231,39 +3842,6 @@ void LCodeGen::DoMathExp(LMathExp* instr) {
}
-void LCodeGen::DoMathTan(LMathTan* instr) {
- ASSERT(ToDoubleRegister(instr->result()).is(xmm1));
- // Set the context register to a GC-safe fake value. Clobbering it is
- // OK because this instruction is marked as a call.
- __ Set(esi, Immediate(0));
- TranscendentalCacheStub stub(TranscendentalCache::TAN,
- TranscendentalCacheStub::UNTAGGED);
- CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
-}
-
-
-void LCodeGen::DoMathCos(LMathCos* instr) {
- ASSERT(ToDoubleRegister(instr->result()).is(xmm1));
- // Set the context register to a GC-safe fake value. Clobbering it is
- // OK because this instruction is marked as a call.
- __ Set(esi, Immediate(0));
- TranscendentalCacheStub stub(TranscendentalCache::COS,
- TranscendentalCacheStub::UNTAGGED);
- CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
-}
-
-
-void LCodeGen::DoMathSin(LMathSin* instr) {
- ASSERT(ToDoubleRegister(instr->result()).is(xmm1));
- // Set the context register to a GC-safe fake value. Clobbering it is
- // OK because this instruction is marked as a call.
- __ Set(esi, Immediate(0));
- TranscendentalCacheStub stub(TranscendentalCache::SIN,
- TranscendentalCacheStub::UNTAGGED);
- CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
-}
-
-
void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) {
ASSERT(ToRegister(instr->context()).is(esi));
ASSERT(ToRegister(instr->function()).is(edi));
@@ -4275,80 +3853,25 @@ void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) {
SafepointGenerator generator(
this, pointers, Safepoint::kLazyDeopt);
ParameterCount count(instr->arity());
- __ InvokeFunction(edi, count, CALL_FUNCTION, generator, CALL_AS_METHOD);
+ __ InvokeFunction(edi, count, CALL_FUNCTION, generator);
} else {
CallKnownFunction(known_function,
instr->hydrogen()->formal_parameter_count(),
instr->arity(),
instr,
- CALL_AS_METHOD,
EDI_CONTAINS_TARGET);
}
}
-void LCodeGen::DoCallKeyed(LCallKeyed* instr) {
- ASSERT(ToRegister(instr->context()).is(esi));
- ASSERT(ToRegister(instr->key()).is(ecx));
- ASSERT(ToRegister(instr->result()).is(eax));
-
- int arity = instr->arity();
- Handle<Code> ic =
- isolate()->stub_cache()->ComputeKeyedCallInitialize(arity);
- CallCode(ic, RelocInfo::CODE_TARGET, instr);
-}
-
-
-void LCodeGen::DoCallNamed(LCallNamed* instr) {
- ASSERT(ToRegister(instr->context()).is(esi));
- ASSERT(ToRegister(instr->result()).is(eax));
-
- int arity = instr->arity();
- RelocInfo::Mode mode = RelocInfo::CODE_TARGET;
- Handle<Code> ic =
- isolate()->stub_cache()->ComputeCallInitialize(arity, mode);
- __ mov(ecx, instr->name());
- CallCode(ic, mode, instr);
-}
-
-
void LCodeGen::DoCallFunction(LCallFunction* instr) {
ASSERT(ToRegister(instr->context()).is(esi));
ASSERT(ToRegister(instr->function()).is(edi));
ASSERT(ToRegister(instr->result()).is(eax));
int arity = instr->arity();
- CallFunctionStub stub(arity, NO_CALL_FUNCTION_FLAGS);
- if (instr->hydrogen()->IsTailCall()) {
- if (NeedsEagerFrame()) __ leave();
- __ jmp(stub.GetCode(isolate()), RelocInfo::CODE_TARGET);
- } else {
- CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
- }
-}
-
-
-void LCodeGen::DoCallGlobal(LCallGlobal* instr) {
- ASSERT(ToRegister(instr->context()).is(esi));
- ASSERT(ToRegister(instr->result()).is(eax));
-
- int arity = instr->arity();
- RelocInfo::Mode mode = RelocInfo::CODE_TARGET_CONTEXT;
- Handle<Code> ic =
- isolate()->stub_cache()->ComputeCallInitialize(arity, mode);
- __ mov(ecx, instr->name());
- CallCode(ic, mode, instr);
-}
-
-
-void LCodeGen::DoCallKnownGlobal(LCallKnownGlobal* instr) {
- ASSERT(ToRegister(instr->result()).is(eax));
- CallKnownFunction(instr->hydrogen()->target(),
- instr->hydrogen()->formal_parameter_count(),
- instr->arity(),
- instr,
- CALL_AS_FUNCTION,
- EDI_UNINITIALIZED);
+ CallFunctionStub stub(isolate(), arity, instr->hydrogen()->function_flags());
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
}
@@ -4358,11 +3881,10 @@ void LCodeGen::DoCallNew(LCallNew* instr) {
ASSERT(ToRegister(instr->result()).is(eax));
// No cell in ebx for construct type feedback in optimized code
- Handle<Object> undefined_value(isolate()->factory()->undefined_value());
- __ mov(ebx, Immediate(undefined_value));
- CallConstructStub stub(NO_CALL_FUNCTION_FLAGS);
- __ Set(eax, Immediate(instr->arity()));
- CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr);
+ __ mov(ebx, isolate()->factory()->undefined_value());
+ CallConstructStub stub(isolate(), NO_CALL_CONSTRUCTOR_FLAGS);
+ __ Move(eax, Immediate(instr->arity()));
+ CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
}
@@ -4371,18 +3893,17 @@ void LCodeGen::DoCallNewArray(LCallNewArray* instr) {
ASSERT(ToRegister(instr->constructor()).is(edi));
ASSERT(ToRegister(instr->result()).is(eax));
- __ Set(eax, Immediate(instr->arity()));
- __ mov(ebx, instr->hydrogen()->property_cell());
+ __ Move(eax, Immediate(instr->arity()));
+ __ mov(ebx, isolate()->factory()->undefined_value());
ElementsKind kind = instr->hydrogen()->elements_kind();
AllocationSiteOverrideMode override_mode =
(AllocationSite::GetMode(kind) == TRACK_ALLOCATION_SITE)
? DISABLE_ALLOCATION_SITES
: DONT_OVERRIDE;
- ContextCheckMode context_mode = CONTEXT_CHECK_NOT_REQUIRED;
if (instr->arity() == 0) {
- ArrayNoArgumentConstructorStub stub(kind, context_mode, override_mode);
- CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr);
+ ArrayNoArgumentConstructorStub stub(isolate(), kind, override_mode);
+ CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
} else if (instr->arity() == 1) {
Label done;
if (IsFastPackedElementsKind(kind)) {
@@ -4394,19 +3915,20 @@ void LCodeGen::DoCallNewArray(LCallNewArray* instr) {
__ j(zero, &packed_case, Label::kNear);
ElementsKind holey_kind = GetHoleyElementsKind(kind);
- ArraySingleArgumentConstructorStub stub(holey_kind, context_mode,
+ ArraySingleArgumentConstructorStub stub(isolate(),
+ holey_kind,
override_mode);
- CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr);
+ CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
__ jmp(&done, Label::kNear);
__ bind(&packed_case);
}
- ArraySingleArgumentConstructorStub stub(kind, context_mode, override_mode);
- CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr);
+ ArraySingleArgumentConstructorStub stub(isolate(), kind, override_mode);
+ CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
__ bind(&done);
} else {
- ArrayNArgumentsConstructorStub stub(kind, context_mode, override_mode);
- CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr);
+ ArrayNArgumentsConstructorStub stub(isolate(), kind, override_mode);
+ CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
}
}
@@ -4439,7 +3961,7 @@ void LCodeGen::DoInnerAllocatedObject(LInnerAllocatedObject* instr) {
void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
- Representation representation = instr->representation();
+ Representation representation = instr->hydrogen()->field_representation();
HObjectAccess access = instr->hydrogen()->access();
int offset = access.offset();
@@ -4461,67 +3983,33 @@ void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
}
Register object = ToRegister(instr->object());
- Handle<Map> transition = instr->transition();
+ __ AssertNotSmi(object);
- if (FLAG_track_fields && representation.IsSmi()) {
- if (instr->value()->IsConstantOperand()) {
- LConstantOperand* operand_value = LConstantOperand::cast(instr->value());
- if (!IsSmi(operand_value)) {
- DeoptimizeIf(no_condition, instr->environment());
- }
- }
- } else if (FLAG_track_heap_object_fields && representation.IsHeapObject()) {
- if (instr->value()->IsConstantOperand()) {
- LConstantOperand* operand_value = LConstantOperand::cast(instr->value());
- if (IsInteger32(operand_value)) {
- DeoptimizeIf(no_condition, instr->environment());
- }
- } else {
- if (!instr->hydrogen()->value()->type().IsHeapObject()) {
- Register value = ToRegister(instr->value());
- __ test(value, Immediate(kSmiTagMask));
- DeoptimizeIf(zero, instr->environment());
- }
- }
- } else if (FLAG_track_double_fields && representation.IsDouble()) {
- ASSERT(transition.is_null());
+ ASSERT(!representation.IsSmi() ||
+ !instr->value()->IsConstantOperand() ||
+ IsSmi(LConstantOperand::cast(instr->value())));
+ if (representation.IsDouble()) {
ASSERT(access.IsInobject());
+ ASSERT(!instr->hydrogen()->has_transition());
ASSERT(!instr->hydrogen()->NeedsWriteBarrier());
- if (CpuFeatures::IsSupported(SSE2)) {
- CpuFeatureScope scope(masm(), SSE2);
- XMMRegister value = ToDoubleRegister(instr->value());
- __ movsd(FieldOperand(object, offset), value);
- } else {
- X87Register value = ToX87Register(instr->value());
- X87Mov(FieldOperand(object, offset), value);
- }
+ XMMRegister value = ToDoubleRegister(instr->value());
+ __ movsd(FieldOperand(object, offset), value);
return;
}
- if (!transition.is_null()) {
- if (!instr->hydrogen()->NeedsWriteBarrierForMap()) {
- __ mov(FieldOperand(object, HeapObject::kMapOffset), transition);
- } else {
+ if (instr->hydrogen()->has_transition()) {
+ Handle<Map> transition = instr->hydrogen()->transition_map();
+ AddDeprecationDependency(transition);
+ __ mov(FieldOperand(object, HeapObject::kMapOffset), transition);
+ if (instr->hydrogen()->NeedsWriteBarrierForMap()) {
Register temp = ToRegister(instr->temp());
Register temp_map = ToRegister(instr->temp_map());
- __ mov(temp_map, transition);
- __ mov(FieldOperand(object, HeapObject::kMapOffset), temp_map);
// Update the write barrier for the map field.
- __ RecordWriteField(object,
- HeapObject::kMapOffset,
- temp_map,
- temp,
- GetSaveFPRegsMode(),
- OMIT_REMEMBERED_SET,
- OMIT_SMI_CHECK);
+ __ RecordWriteForMap(object, transition, temp_map, temp, kSaveFPRegs);
}
}
// Do the store.
- SmiCheck check_needed =
- instr->hydrogen()->value()->IsHeapObject()
- ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
-
Register write_register = object;
if (!access.IsInobject()) {
write_register = ToRegister(instr->temp());
@@ -4556,9 +4044,10 @@ void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
offset,
value,
temp,
- GetSaveFPRegsMode(),
+ kSaveFPRegs,
EMIT_REMEMBERED_SET,
- check_needed);
+ instr->hydrogen()->SmiCheckForWriteBarrier(),
+ instr->hydrogen()->PointersToHereCheckForValue());
}
}
@@ -4569,41 +4058,32 @@ void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) {
ASSERT(ToRegister(instr->value()).is(eax));
__ mov(ecx, instr->name());
- Handle<Code> ic = (instr->strict_mode_flag() == kStrictMode)
- ? isolate()->builtins()->StoreIC_Initialize_Strict()
- : isolate()->builtins()->StoreIC_Initialize();
+ Handle<Code> ic = StoreIC::initialize_stub(isolate(), instr->strict_mode());
CallCode(ic, RelocInfo::CODE_TARGET, instr);
}
-void LCodeGen::ApplyCheckIf(Condition cc, LBoundsCheck* check) {
- if (FLAG_debug_code && check->hydrogen()->skip_check()) {
+void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) {
+ Condition cc = instr->hydrogen()->allow_equality() ? above : above_equal;
+ if (instr->index()->IsConstantOperand()) {
+ __ cmp(ToOperand(instr->length()),
+ ToImmediate(LConstantOperand::cast(instr->index()),
+ instr->hydrogen()->length()->representation()));
+ cc = CommuteCondition(cc);
+ } else if (instr->length()->IsConstantOperand()) {
+ __ cmp(ToOperand(instr->index()),
+ ToImmediate(LConstantOperand::cast(instr->length()),
+ instr->hydrogen()->index()->representation()));
+ } else {
+ __ cmp(ToRegister(instr->index()), ToOperand(instr->length()));
+ }
+ if (FLAG_debug_code && instr->hydrogen()->skip_check()) {
Label done;
__ j(NegateCondition(cc), &done, Label::kNear);
__ int3();
__ bind(&done);
} else {
- DeoptimizeIf(cc, check->environment());
- }
-}
-
-
-void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) {
- if (instr->hydrogen()->skip_check() && !FLAG_debug_code) return;
-
- if (instr->index()->IsConstantOperand()) {
- Immediate immediate =
- ToImmediate(LConstantOperand::cast(instr->index()),
- instr->hydrogen()->length()->representation());
- __ cmp(ToOperand(instr->length()), immediate);
- Condition condition =
- instr->hydrogen()->allow_equality() ? below : below_equal;
- ApplyCheckIf(condition, instr);
- } else {
- __ cmp(ToRegister(instr->index()), ToOperand(instr->length()));
- Condition condition =
- instr->hydrogen()->allow_equality() ? above : above_equal;
- ApplyCheckIf(condition, instr);
+ DeoptimizeIf(cc, instr->environment());
}
}
@@ -4621,43 +4101,42 @@ void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) {
key,
instr->hydrogen()->key()->representation(),
elements_kind,
- 0,
- instr->additional_index()));
- if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
- if (CpuFeatures::IsSafeForSnapshot(SSE2)) {
- CpuFeatureScope scope(masm(), SSE2);
- XMMRegister xmm_scratch = double_scratch0();
- __ cvtsd2ss(xmm_scratch, ToDoubleRegister(instr->value()));
- __ movss(operand, xmm_scratch);
- } else {
- __ fld(0);
- __ fstp_s(operand);
- }
- } else if (elements_kind == EXTERNAL_DOUBLE_ELEMENTS) {
- if (CpuFeatures::IsSafeForSnapshot(SSE2)) {
- CpuFeatureScope scope(masm(), SSE2);
- __ movsd(operand, ToDoubleRegister(instr->value()));
- } else {
- X87Mov(operand, ToX87Register(instr->value()));
- }
+ instr->base_offset()));
+ if (elements_kind == EXTERNAL_FLOAT32_ELEMENTS ||
+ elements_kind == FLOAT32_ELEMENTS) {
+ XMMRegister xmm_scratch = double_scratch0();
+ __ cvtsd2ss(xmm_scratch, ToDoubleRegister(instr->value()));
+ __ movss(operand, xmm_scratch);
+ } else if (elements_kind == EXTERNAL_FLOAT64_ELEMENTS ||
+ elements_kind == FLOAT64_ELEMENTS) {
+ __ movsd(operand, ToDoubleRegister(instr->value()));
} else {
Register value = ToRegister(instr->value());
switch (elements_kind) {
- case EXTERNAL_PIXEL_ELEMENTS:
- case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
- case EXTERNAL_BYTE_ELEMENTS:
+ case EXTERNAL_UINT8_CLAMPED_ELEMENTS:
+ case EXTERNAL_UINT8_ELEMENTS:
+ case EXTERNAL_INT8_ELEMENTS:
+ case UINT8_ELEMENTS:
+ case INT8_ELEMENTS:
+ case UINT8_CLAMPED_ELEMENTS:
__ mov_b(operand, value);
break;
- case EXTERNAL_SHORT_ELEMENTS:
- case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
+ case EXTERNAL_INT16_ELEMENTS:
+ case EXTERNAL_UINT16_ELEMENTS:
+ case UINT16_ELEMENTS:
+ case INT16_ELEMENTS:
__ mov_w(operand, value);
break;
- case EXTERNAL_INT_ELEMENTS:
- case EXTERNAL_UNSIGNED_INT_ELEMENTS:
+ case EXTERNAL_INT32_ELEMENTS:
+ case EXTERNAL_UINT32_ELEMENTS:
+ case UINT32_ELEMENTS:
+ case INT32_ELEMENTS:
__ mov(operand, value);
break;
- case EXTERNAL_FLOAT_ELEMENTS:
- case EXTERNAL_DOUBLE_ELEMENTS:
+ case EXTERNAL_FLOAT32_ELEMENTS:
+ case EXTERNAL_FLOAT64_ELEMENTS:
+ case FLOAT32_ELEMENTS:
+ case FLOAT64_ELEMENTS:
case FAST_SMI_ELEMENTS:
case FAST_ELEMENTS:
case FAST_DOUBLE_ELEMENTS:
@@ -4665,7 +4144,7 @@ void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) {
case FAST_HOLEY_ELEMENTS:
case FAST_HOLEY_DOUBLE_ELEMENTS:
case DICTIONARY_ELEMENTS:
- case NON_STRICT_ARGUMENTS_ELEMENTS:
+ case SLOPPY_ARGUMENTS_ELEMENTS:
UNREACHABLE();
break;
}
@@ -4681,71 +4160,21 @@ void LCodeGen::DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr) {
instr->key(),
instr->hydrogen()->key()->representation(),
FAST_DOUBLE_ELEMENTS,
- FixedDoubleArray::kHeaderSize - kHeapObjectTag,
- instr->additional_index());
-
- if (CpuFeatures::IsSafeForSnapshot(SSE2)) {
- CpuFeatureScope scope(masm(), SSE2);
- XMMRegister value = ToDoubleRegister(instr->value());
+ instr->base_offset());
- if (instr->NeedsCanonicalization()) {
- Label have_value;
+ XMMRegister value = ToDoubleRegister(instr->value());
- __ ucomisd(value, value);
- __ j(parity_odd, &have_value, Label::kNear); // NaN.
+ if (instr->NeedsCanonicalization()) {
+ Label have_value;
- __ movsd(value, Operand::StaticVariable(canonical_nan_reference));
- __ bind(&have_value);
- }
+ __ ucomisd(value, value);
+ __ j(parity_odd, &have_value, Label::kNear); // NaN.
- __ movsd(double_store_operand, value);
- } else {
- // Can't use SSE2 in the serializer
- if (instr->hydrogen()->IsConstantHoleStore()) {
- // This means we should store the (double) hole. No floating point
- // registers required.
- double nan_double = FixedDoubleArray::hole_nan_as_double();
- uint64_t int_val = BitCast<uint64_t, double>(nan_double);
- int32_t lower = static_cast<int32_t>(int_val);
- int32_t upper = static_cast<int32_t>(int_val >> (kBitsPerInt));
-
- __ mov(double_store_operand, Immediate(lower));
- Operand double_store_operand2 = BuildFastArrayOperand(
- instr->elements(),
- instr->key(),
- instr->hydrogen()->key()->representation(),
- FAST_DOUBLE_ELEMENTS,
- FixedDoubleArray::kHeaderSize - kHeapObjectTag + kPointerSize,
- instr->additional_index());
- __ mov(double_store_operand2, Immediate(upper));
- } else {
- Label no_special_nan_handling;
- X87Register value = ToX87Register(instr->value());
- X87Fxch(value);
-
- if (instr->NeedsCanonicalization()) {
- __ fld(0);
- __ fld(0);
- __ FCmp();
-
- __ j(parity_odd, &no_special_nan_handling, Label::kNear);
- __ sub(esp, Immediate(kDoubleSize));
- __ fst_d(MemOperand(esp, 0));
- __ cmp(MemOperand(esp, sizeof(kHoleNanLower32)),
- Immediate(kHoleNanUpper32));
- __ add(esp, Immediate(kDoubleSize));
- Label canonicalize;
- __ j(not_equal, &canonicalize, Label::kNear);
- __ jmp(&no_special_nan_handling, Label::kNear);
- __ bind(&canonicalize);
- __ fstp(0);
- __ fld_d(Operand::StaticVariable(canonical_nan_reference));
- }
-
- __ bind(&no_special_nan_handling);
- __ fst_d(double_store_operand);
- }
+ __ movsd(value, Operand::StaticVariable(canonical_nan_reference));
+ __ bind(&have_value);
}
+
+ __ movsd(double_store_operand, value);
}
@@ -4758,8 +4187,7 @@ void LCodeGen::DoStoreKeyedFixedArray(LStoreKeyed* instr) {
instr->key(),
instr->hydrogen()->key()->representation(),
FAST_ELEMENTS,
- FixedArray::kHeaderSize - kHeapObjectTag,
- instr->additional_index());
+ instr->base_offset());
if (instr->value()->IsRegister()) {
__ mov(operand, ToRegister(instr->value()));
} else {
@@ -4779,23 +4207,24 @@ void LCodeGen::DoStoreKeyedFixedArray(LStoreKeyed* instr) {
Register value = ToRegister(instr->value());
ASSERT(!instr->key()->IsConstantOperand());
SmiCheck check_needed =
- instr->hydrogen()->value()->IsHeapObject()
+ instr->hydrogen()->value()->type().IsHeapObject()
? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
// Compute address of modified element and store it into key register.
__ lea(key, operand);
__ RecordWrite(elements,
key,
value,
- GetSaveFPRegsMode(),
+ kSaveFPRegs,
EMIT_REMEMBERED_SET,
- check_needed);
+ check_needed,
+ instr->hydrogen()->PointersToHereCheckForValue());
}
}
void LCodeGen::DoStoreKeyed(LStoreKeyed* instr) {
// By cases...external, fast-double, fast
- if (instr->is_external()) {
+ if (instr->is_typed_elements()) {
DoStoreKeyedExternalArray(instr);
} else if (instr->hydrogen()->value()->representation().IsDouble()) {
DoStoreKeyedFixedDoubleArray(instr);
@@ -4811,7 +4240,7 @@ void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) {
ASSERT(ToRegister(instr->key()).is(ecx));
ASSERT(ToRegister(instr->value()).is(eax));
- Handle<Code> ic = (instr->strict_mode_flag() == kStrictMode)
+ Handle<Code> ic = instr->strict_mode() == STRICT
? isolate()->builtins()->KeyedStoreIC_Initialize_Strict()
: isolate()->builtins()->KeyedStoreIC_Initialize();
CallCode(ic, RelocInfo::CODE_TARGET, instr);
@@ -4854,15 +4283,14 @@ void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) {
kDontSaveFPRegs);
} else {
ASSERT(ToRegister(instr->context()).is(esi));
+ ASSERT(object_reg.is(eax));
PushSafepointRegistersScope scope(this);
- if (!object_reg.is(eax)) {
- __ mov(eax, object_reg);
- }
__ mov(ebx, to_map);
- TransitionElementsKindStub stub(from_kind, to_kind);
+ bool is_js_array = from_map->instance_type() == JS_ARRAY_TYPE;
+ TransitionElementsKindStub stub(isolate(), from_kind, to_kind, is_js_array);
__ CallStub(&stub);
- RecordSafepointWithRegisters(
- instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
+ RecordSafepointWithLazyDeopt(instr,
+ RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
}
__ bind(&not_applicable);
}
@@ -4872,9 +4300,8 @@ void LCodeGen::DoStringCharCodeAt(LStringCharCodeAt* instr) {
class DeferredStringCharCodeAt V8_FINAL : public LDeferredCode {
public:
DeferredStringCharCodeAt(LCodeGen* codegen,
- LStringCharCodeAt* instr,
- const X87Stack& x87_stack)
- : LDeferredCode(codegen, x87_stack), instr_(instr) { }
+ LStringCharCodeAt* instr)
+ : LDeferredCode(codegen), instr_(instr) { }
virtual void Generate() V8_OVERRIDE {
codegen()->DoDeferredStringCharCodeAt(instr_);
}
@@ -4884,7 +4311,7 @@ void LCodeGen::DoStringCharCodeAt(LStringCharCodeAt* instr) {
};
DeferredStringCharCodeAt* deferred =
- new(zone()) DeferredStringCharCodeAt(this, instr, x87_stack_);
+ new(zone()) DeferredStringCharCodeAt(this, instr);
StringCharLoadGenerator::Generate(masm(),
factory(),
@@ -4903,7 +4330,7 @@ void LCodeGen::DoDeferredStringCharCodeAt(LStringCharCodeAt* instr) {
// TODO(3095996): Get rid of this. For now, we need to make the
// result register contain a valid pointer because it is already
// contained in the register pointer map.
- __ Set(result, Immediate(0));
+ __ Move(result, Immediate(0));
PushSafepointRegistersScope scope(this);
__ push(string);
@@ -4919,7 +4346,7 @@ void LCodeGen::DoDeferredStringCharCodeAt(LStringCharCodeAt* instr) {
__ SmiTag(index);
__ push(index);
}
- CallRuntimeFromDeferred(Runtime::kStringCharCodeAt, 2,
+ CallRuntimeFromDeferred(Runtime::kHiddenStringCharCodeAt, 2,
instr, instr->context());
__ AssertSmi(eax);
__ SmiUntag(eax);
@@ -4931,9 +4358,8 @@ void LCodeGen::DoStringCharFromCode(LStringCharFromCode* instr) {
class DeferredStringCharFromCode V8_FINAL : public LDeferredCode {
public:
DeferredStringCharFromCode(LCodeGen* codegen,
- LStringCharFromCode* instr,
- const X87Stack& x87_stack)
- : LDeferredCode(codegen, x87_stack), instr_(instr) { }
+ LStringCharFromCode* instr)
+ : LDeferredCode(codegen), instr_(instr) { }
virtual void Generate() V8_OVERRIDE {
codegen()->DoDeferredStringCharFromCode(instr_);
}
@@ -4943,7 +4369,7 @@ void LCodeGen::DoStringCharFromCode(LStringCharFromCode* instr) {
};
DeferredStringCharFromCode* deferred =
- new(zone()) DeferredStringCharFromCode(this, instr, x87_stack_);
+ new(zone()) DeferredStringCharFromCode(this, instr);
ASSERT(instr->hydrogen()->value()->representation().IsInteger32());
Register char_code = ToRegister(instr->char_code());
@@ -4952,7 +4378,7 @@ void LCodeGen::DoStringCharFromCode(LStringCharFromCode* instr) {
__ cmp(char_code, String::kMaxOneByteCharCode);
__ j(above, deferred->entry());
- __ Set(result, Immediate(factory()->single_character_string_cache()));
+ __ Move(result, Immediate(factory()->single_character_string_cache()));
__ mov(result, FieldOperand(result,
char_code, times_pointer_size,
FixedArray::kHeaderSize));
@@ -4969,7 +4395,7 @@ void LCodeGen::DoDeferredStringCharFromCode(LStringCharFromCode* instr) {
// TODO(3095996): Get rid of this. For now, we need to make the
// result register contain a valid pointer because it is already
// contained in the register pointer map.
- __ Set(result, Immediate(0));
+ __ Move(result, Immediate(0));
PushSafepointRegistersScope scope(this);
__ SmiTag(char_code);
@@ -4981,18 +4407,12 @@ void LCodeGen::DoDeferredStringCharFromCode(LStringCharFromCode* instr) {
void LCodeGen::DoStringAdd(LStringAdd* instr) {
ASSERT(ToRegister(instr->context()).is(esi));
- if (FLAG_new_string_add) {
- ASSERT(ToRegister(instr->left()).is(edx));
- ASSERT(ToRegister(instr->right()).is(eax));
- NewStringAddStub stub(instr->hydrogen()->flags(),
- isolate()->heap()->GetPretenureMode());
- CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
- } else {
- EmitPushTaggedOperand(instr->left());
- EmitPushTaggedOperand(instr->right());
- StringAddStub stub(instr->hydrogen()->flags());
- CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
- }
+ ASSERT(ToRegister(instr->left()).is(edx));
+ ASSERT(ToRegister(instr->right()).is(eax));
+ StringAddStub stub(isolate(),
+ instr->hydrogen()->flags(),
+ instr->hydrogen()->pretenure_flag());
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
}
@@ -5001,57 +4421,14 @@ void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) {
LOperand* output = instr->result();
ASSERT(input->IsRegister() || input->IsStackSlot());
ASSERT(output->IsDoubleRegister());
- if (CpuFeatures::IsSupported(SSE2)) {
- CpuFeatureScope scope(masm(), SSE2);
- __ Cvtsi2sd(ToDoubleRegister(output), ToOperand(input));
- } else if (input->IsRegister()) {
- Register input_reg = ToRegister(input);
- __ push(input_reg);
- X87Mov(ToX87Register(output), Operand(esp, 0), kX87IntOperand);
- __ pop(input_reg);
- } else {
- X87Mov(ToX87Register(output), ToOperand(input), kX87IntOperand);
- }
-}
-
-
-void LCodeGen::DoInteger32ToSmi(LInteger32ToSmi* instr) {
- Register input = ToRegister(instr->value());
- __ SmiTag(input);
- if (!instr->hydrogen()->value()->HasRange() ||
- !instr->hydrogen()->value()->range()->IsInSmiRange()) {
- DeoptimizeIf(overflow, instr->environment());
- }
+ __ Cvtsi2sd(ToDoubleRegister(output), ToOperand(input));
}
void LCodeGen::DoUint32ToDouble(LUint32ToDouble* instr) {
LOperand* input = instr->value();
LOperand* output = instr->result();
- if (CpuFeatures::IsSupported(SSE2)) {
- CpuFeatureScope scope(masm(), SSE2);
- LOperand* temp = instr->temp();
-
- __ LoadUint32(ToDoubleRegister(output),
- ToRegister(input),
- ToDoubleRegister(temp));
- } else {
- X87Register res = ToX87Register(output);
- X87PrepareToWrite(res);
- __ LoadUint32NoSSE2(ToRegister(input));
- X87CommitWrite(res);
- }
-}
-
-
-void LCodeGen::DoUint32ToSmi(LUint32ToSmi* instr) {
- Register input = ToRegister(instr->value());
- if (!instr->hydrogen()->value()->HasRange() ||
- !instr->hydrogen()->value()->range()->IsInSmiRange()) {
- __ test(input, Immediate(0xc0000000));
- DeoptimizeIf(not_zero, instr->environment());
- }
- __ SmiTag(input);
+ __ LoadUint32(ToDoubleRegister(output), ToRegister(input));
}
@@ -5059,11 +4436,11 @@ void LCodeGen::DoNumberTagI(LNumberTagI* instr) {
class DeferredNumberTagI V8_FINAL : public LDeferredCode {
public:
DeferredNumberTagI(LCodeGen* codegen,
- LNumberTagI* instr,
- const X87Stack& x87_stack)
- : LDeferredCode(codegen, x87_stack), instr_(instr) { }
+ LNumberTagI* instr)
+ : LDeferredCode(codegen), instr_(instr) { }
virtual void Generate() V8_OVERRIDE {
- codegen()->DoDeferredNumberTagI(instr_, instr_->value(), SIGNED_INT32);
+ codegen()->DoDeferredNumberTagIU(
+ instr_, instr_->value(), instr_->temp(), SIGNED_INT32);
}
virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
private:
@@ -5075,7 +4452,7 @@ void LCodeGen::DoNumberTagI(LNumberTagI* instr) {
Register reg = ToRegister(input);
DeferredNumberTagI* deferred =
- new(zone()) DeferredNumberTagI(this, instr, x87_stack_);
+ new(zone()) DeferredNumberTagI(this, instr);
__ SmiTag(reg);
__ j(overflow, deferred->entry());
__ bind(deferred->exit());
@@ -5085,12 +4462,11 @@ void LCodeGen::DoNumberTagI(LNumberTagI* instr) {
void LCodeGen::DoNumberTagU(LNumberTagU* instr) {
class DeferredNumberTagU V8_FINAL : public LDeferredCode {
public:
- DeferredNumberTagU(LCodeGen* codegen,
- LNumberTagU* instr,
- const X87Stack& x87_stack)
- : LDeferredCode(codegen, x87_stack), instr_(instr) { }
+ DeferredNumberTagU(LCodeGen* codegen, LNumberTagU* instr)
+ : LDeferredCode(codegen), instr_(instr) { }
virtual void Generate() V8_OVERRIDE {
- codegen()->DoDeferredNumberTagI(instr_, instr_->value(), UNSIGNED_INT32);
+ codegen()->DoDeferredNumberTagIU(
+ instr_, instr_->value(), instr_->temp(), UNSIGNED_INT32);
}
virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
private:
@@ -5102,7 +4478,7 @@ void LCodeGen::DoNumberTagU(LNumberTagU* instr) {
Register reg = ToRegister(input);
DeferredNumberTagU* deferred =
- new(zone()) DeferredNumberTagU(this, instr, x87_stack_);
+ new(zone()) DeferredNumberTagU(this, instr);
__ cmp(reg, Immediate(Smi::kMaxValue));
__ j(above, deferred->entry());
__ SmiTag(reg);
@@ -5110,47 +4486,24 @@ void LCodeGen::DoNumberTagU(LNumberTagU* instr) {
}
-void LCodeGen::DoDeferredNumberTagI(LInstruction* instr,
- LOperand* value,
- IntegerSignedness signedness) {
- Label slow;
+void LCodeGen::DoDeferredNumberTagIU(LInstruction* instr,
+ LOperand* value,
+ LOperand* temp,
+ IntegerSignedness signedness) {
+ Label done, slow;
Register reg = ToRegister(value);
- Register tmp = reg.is(eax) ? ecx : eax;
+ Register tmp = ToRegister(temp);
XMMRegister xmm_scratch = double_scratch0();
- // Preserve the value of all registers.
- PushSafepointRegistersScope scope(this);
-
- Label done;
-
if (signedness == SIGNED_INT32) {
// There was overflow, so bits 30 and 31 of the original integer
// disagree. Try to allocate a heap number in new space and store
// the value in there. If that fails, call the runtime system.
__ SmiUntag(reg);
__ xor_(reg, 0x80000000);
- if (CpuFeatures::IsSupported(SSE2)) {
- CpuFeatureScope feature_scope(masm(), SSE2);
- __ Cvtsi2sd(xmm_scratch, Operand(reg));
- } else {
- __ push(reg);
- __ fild_s(Operand(esp, 0));
- __ pop(reg);
- }
+ __ Cvtsi2sd(xmm_scratch, Operand(reg));
} else {
- if (CpuFeatures::IsSupported(SSE2)) {
- CpuFeatureScope feature_scope(masm(), SSE2);
- __ LoadUint32(xmm_scratch, reg,
- ToDoubleRegister(LNumberTagU::cast(instr)->temp()));
- } else {
- // There's no fild variant for unsigned values, so zero-extend to a 64-bit
- // int manually.
- __ push(Immediate(0));
- __ push(reg);
- __ fild_d(Operand(esp, 0));
- __ pop(reg);
- __ pop(reg);
- }
+ __ LoadUint32(xmm_scratch, reg);
}
if (FLAG_inline_new) {
@@ -5160,42 +4513,39 @@ void LCodeGen::DoDeferredNumberTagI(LInstruction* instr,
// Slow case: Call the runtime system to do the number allocation.
__ bind(&slow);
+ {
+ // TODO(3095996): Put a valid pointer value in the stack slot where the
+ // result register is stored, as this register is in the pointer map, but
+ // contains an integer value.
+ __ Move(reg, Immediate(0));
- // TODO(3095996): Put a valid pointer value in the stack slot where the result
- // register is stored, as this register is in the pointer map, but contains an
- // integer value.
- __ StoreToSafepointRegisterSlot(reg, Immediate(0));
- // NumberTagI and NumberTagD use the context from the frame, rather than
- // the environment's HContext or HInlinedContext value.
- // They only call Runtime::kAllocateHeapNumber.
- // The corresponding HChange instructions are added in a phase that does
- // not have easy access to the local context.
- __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
- __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
- RecordSafepointWithRegisters(
- instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
- if (!reg.is(eax)) __ mov(reg, eax);
+ // Preserve the value of all registers.
+ PushSafepointRegistersScope scope(this);
+
+ // NumberTagI and NumberTagD use the context from the frame, rather than
+ // the environment's HContext or HInlinedContext value.
+ // They only call Runtime::kHiddenAllocateHeapNumber.
+ // The corresponding HChange instructions are added in a phase that does
+ // not have easy access to the local context.
+ __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
+ __ CallRuntimeSaveDoubles(Runtime::kHiddenAllocateHeapNumber);
+ RecordSafepointWithRegisters(
+ instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
+ __ StoreToSafepointRegisterSlot(reg, eax);
+ }
// Done. Put the value in xmm_scratch into the value of the allocated heap
// number.
__ bind(&done);
- if (CpuFeatures::IsSupported(SSE2)) {
- CpuFeatureScope feature_scope(masm(), SSE2);
- __ movsd(FieldOperand(reg, HeapNumber::kValueOffset), xmm_scratch);
- } else {
- __ fstp_d(FieldOperand(reg, HeapNumber::kValueOffset));
- }
- __ StoreToSafepointRegisterSlot(reg, reg);
+ __ movsd(FieldOperand(reg, HeapNumber::kValueOffset), xmm_scratch);
}
void LCodeGen::DoNumberTagD(LNumberTagD* instr) {
class DeferredNumberTagD V8_FINAL : public LDeferredCode {
public:
- DeferredNumberTagD(LCodeGen* codegen,
- LNumberTagD* instr,
- const X87Stack& x87_stack)
- : LDeferredCode(codegen, x87_stack), instr_(instr) { }
+ DeferredNumberTagD(LCodeGen* codegen, LNumberTagD* instr)
+ : LDeferredCode(codegen), instr_(instr) { }
virtual void Generate() V8_OVERRIDE {
codegen()->DoDeferredNumberTagD(instr_);
}
@@ -5206,15 +4556,8 @@ void LCodeGen::DoNumberTagD(LNumberTagD* instr) {
Register reg = ToRegister(instr->result());
- bool use_sse2 = CpuFeatures::IsSupported(SSE2);
- if (!use_sse2) {
- // Put the value to the top of stack
- X87Register src = ToX87Register(instr->value());
- X87LoadForUsage(src);
- }
-
DeferredNumberTagD* deferred =
- new(zone()) DeferredNumberTagD(this, instr, x87_stack_);
+ new(zone()) DeferredNumberTagD(this, instr);
if (FLAG_inline_new) {
Register tmp = ToRegister(instr->temp());
__ AllocateHeapNumber(reg, tmp, no_reg, deferred->entry());
@@ -5222,13 +4565,8 @@ void LCodeGen::DoNumberTagD(LNumberTagD* instr) {
__ jmp(deferred->entry());
}
__ bind(deferred->exit());
- if (use_sse2) {
- CpuFeatureScope scope(masm(), SSE2);
- XMMRegister input_reg = ToDoubleRegister(instr->value());
- __ movsd(FieldOperand(reg, HeapNumber::kValueOffset), input_reg);
- } else {
- __ fstp_d(FieldOperand(reg, HeapNumber::kValueOffset));
- }
+ XMMRegister input_reg = ToDoubleRegister(instr->value());
+ __ movsd(FieldOperand(reg, HeapNumber::kValueOffset), input_reg);
}
@@ -5237,16 +4575,16 @@ void LCodeGen::DoDeferredNumberTagD(LNumberTagD* instr) {
// result register contain a valid pointer because it is already
// contained in the register pointer map.
Register reg = ToRegister(instr->result());
- __ Set(reg, Immediate(0));
+ __ Move(reg, Immediate(0));
PushSafepointRegistersScope scope(this);
// NumberTagI and NumberTagD use the context from the frame, rather than
// the environment's HContext or HInlinedContext value.
- // They only call Runtime::kAllocateHeapNumber.
+ // They only call Runtime::kHiddenAllocateHeapNumber.
// The corresponding HChange instructions are added in a phase that does
// not have easy access to the local context.
__ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
- __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
+ __ CallRuntimeSaveDoubles(Runtime::kHiddenAllocateHeapNumber);
RecordSafepointWithRegisters(
instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
__ StoreToSafepointRegisterSlot(reg, eax);
@@ -5254,10 +4592,18 @@ void LCodeGen::DoDeferredNumberTagD(LNumberTagD* instr) {
void LCodeGen::DoSmiTag(LSmiTag* instr) {
- LOperand* input = instr->value();
- ASSERT(input->IsRegister() && input->Equals(instr->result()));
- ASSERT(!instr->hydrogen_value()->CheckFlag(HValue::kCanOverflow));
- __ SmiTag(ToRegister(input));
+ HChange* hchange = instr->hydrogen();
+ Register input = ToRegister(instr->value());
+ if (hchange->CheckFlag(HValue::kCanOverflow) &&
+ hchange->value()->CheckFlag(HValue::kUint32)) {
+ __ test(input, Immediate(0xc0000000));
+ DeoptimizeIf(not_zero, instr->environment());
+ }
+ __ SmiTag(input);
+ if (hchange->CheckFlag(HValue::kCanOverflow) &&
+ !hchange->value()->CheckFlag(HValue::kUint32)) {
+ DeoptimizeIf(overflow, instr->environment());
+ }
}
@@ -5275,76 +4621,6 @@ void LCodeGen::DoSmiUntag(LSmiUntag* instr) {
}
-void LCodeGen::EmitNumberUntagDNoSSE2(Register input_reg,
- Register temp_reg,
- X87Register res_reg,
- bool can_convert_undefined_to_nan,
- bool deoptimize_on_minus_zero,
- LEnvironment* env,
- NumberUntagDMode mode) {
- Label load_smi, done;
-
- X87PrepareToWrite(res_reg);
- if (mode == NUMBER_CANDIDATE_IS_ANY_TAGGED) {
- // Smi check.
- __ JumpIfSmi(input_reg, &load_smi, Label::kNear);
-
- // Heap number map check.
- __ cmp(FieldOperand(input_reg, HeapObject::kMapOffset),
- factory()->heap_number_map());
- if (!can_convert_undefined_to_nan) {
- DeoptimizeIf(not_equal, env);
- } else {
- Label heap_number, convert;
- __ j(equal, &heap_number, Label::kNear);
-
- // Convert undefined (or hole) to NaN.
- __ cmp(input_reg, factory()->undefined_value());
- DeoptimizeIf(not_equal, env);
-
- __ bind(&convert);
- ExternalReference nan =
- ExternalReference::address_of_canonical_non_hole_nan();
- __ fld_d(Operand::StaticVariable(nan));
- __ jmp(&done, Label::kNear);
-
- __ bind(&heap_number);
- }
- // Heap number to x87 conversion.
- __ fld_d(FieldOperand(input_reg, HeapNumber::kValueOffset));
- if (deoptimize_on_minus_zero) {
- __ fldz();
- __ FCmp();
- __ fld_d(FieldOperand(input_reg, HeapNumber::kValueOffset));
- __ j(not_zero, &done, Label::kNear);
-
- // Use general purpose registers to check if we have -0.0
- __ mov(temp_reg, FieldOperand(input_reg, HeapNumber::kExponentOffset));
- __ test(temp_reg, Immediate(HeapNumber::kSignMask));
- __ j(zero, &done, Label::kNear);
-
- // Pop FPU stack before deoptimizing.
- __ fstp(0);
- DeoptimizeIf(not_zero, env);
- }
- __ jmp(&done, Label::kNear);
- } else {
- ASSERT(mode == NUMBER_CANDIDATE_IS_SMI);
- }
-
- __ bind(&load_smi);
- // Clobbering a temp is faster than re-tagging the
- // input register since we avoid dependencies.
- __ mov(temp_reg, input_reg);
- __ SmiUntag(temp_reg); // Untag smi before converting to float.
- __ push(temp_reg);
- __ fild_s(Operand(esp, 0));
- __ add(esp, Immediate(kPointerSize));
- __ bind(&done);
- X87CommitWrite(res_reg);
-}
-
-
void LCodeGen::EmitNumberUntagD(Register input_reg,
Register temp_reg,
XMMRegister result_reg,
@@ -5410,6 +4686,10 @@ void LCodeGen::EmitNumberUntagD(Register input_reg,
void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr, Label* done) {
Register input_reg = ToRegister(instr->value());
+ // The input was optimistically untagged; revert it.
+ STATIC_ASSERT(kSmiTagSize == 1);
+ __ lea(input_reg, Operand(input_reg, times_2, kHeapObjectTag));
+
if (instr->truncating()) {
Label no_heap_number, check_bools, check_false;
@@ -5425,21 +4705,20 @@ void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr, Label* done) {
// for truncating conversions.
__ cmp(input_reg, factory()->undefined_value());
__ j(not_equal, &check_bools, Label::kNear);
- __ Set(input_reg, Immediate(0));
+ __ Move(input_reg, Immediate(0));
__ jmp(done);
__ bind(&check_bools);
__ cmp(input_reg, factory()->true_value());
__ j(not_equal, &check_false, Label::kNear);
- __ Set(input_reg, Immediate(1));
+ __ Move(input_reg, Immediate(1));
__ jmp(done);
__ bind(&check_false);
__ cmp(input_reg, factory()->false_value());
__ RecordComment("Deferred TaggedToI: cannot truncate");
DeoptimizeIf(not_equal, instr->environment());
- __ Set(input_reg, Immediate(0));
- __ jmp(done);
+ __ Move(input_reg, Immediate(0));
} else {
Label bailout;
XMMRegister scratch = (instr->temp() != NULL)
@@ -5457,10 +4736,8 @@ void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr, Label* done) {
void LCodeGen::DoTaggedToI(LTaggedToI* instr) {
class DeferredTaggedToI V8_FINAL : public LDeferredCode {
public:
- DeferredTaggedToI(LCodeGen* codegen,
- LTaggedToI* instr,
- const X87Stack& x87_stack)
- : LDeferredCode(codegen, x87_stack), instr_(instr) { }
+ DeferredTaggedToI(LCodeGen* codegen, LTaggedToI* instr)
+ : LDeferredCode(codegen), instr_(instr) { }
virtual void Generate() V8_OVERRIDE {
codegen()->DoDeferredTaggedToI(instr_, done());
}
@@ -5478,10 +4755,14 @@ void LCodeGen::DoTaggedToI(LTaggedToI* instr) {
__ SmiUntag(input_reg);
} else {
DeferredTaggedToI* deferred =
- new(zone()) DeferredTaggedToI(this, instr, x87_stack_);
-
- __ JumpIfNotSmi(input_reg, deferred->entry());
+ new(zone()) DeferredTaggedToI(this, instr);
+ // Optimistically untag the input.
+ // If the input is a HeapObject, SmiUntag will set the carry flag.
+ STATIC_ASSERT(kSmiTagSize == 1 && kSmiTag == 0);
__ SmiUntag(input_reg);
+ // Branch to deferred code if the input was tagged.
+ // The deferred code will take care of restoring the tag.
+ __ j(carry, deferred->entry());
__ bind(deferred->exit());
}
}
@@ -5504,25 +4785,14 @@ void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) {
NumberUntagDMode mode = value->representation().IsSmi()
? NUMBER_CANDIDATE_IS_SMI : NUMBER_CANDIDATE_IS_ANY_TAGGED;
- if (CpuFeatures::IsSupported(SSE2)) {
- CpuFeatureScope scope(masm(), SSE2);
- XMMRegister result_reg = ToDoubleRegister(result);
- EmitNumberUntagD(input_reg,
- temp_reg,
- result_reg,
- instr->hydrogen()->can_convert_undefined_to_nan(),
- deoptimize_on_minus_zero,
- instr->environment(),
- mode);
- } else {
- EmitNumberUntagDNoSSE2(input_reg,
- temp_reg,
- ToX87Register(instr->result()),
- instr->hydrogen()->can_convert_undefined_to_nan(),
- deoptimize_on_minus_zero,
- instr->environment(),
- mode);
- }
+ XMMRegister result_reg = ToDoubleRegister(result);
+ EmitNumberUntagD(input_reg,
+ temp_reg,
+ result_reg,
+ instr->hydrogen()->can_convert_undefined_to_nan(),
+ deoptimize_on_minus_zero,
+ instr->environment(),
+ mode);
}
@@ -5534,29 +4804,14 @@ void LCodeGen::DoDoubleToI(LDoubleToI* instr) {
Register result_reg = ToRegister(result);
if (instr->truncating()) {
- if (CpuFeatures::IsSafeForSnapshot(SSE2)) {
- CpuFeatureScope scope(masm(), SSE2);
- XMMRegister input_reg = ToDoubleRegister(input);
- __ TruncateDoubleToI(result_reg, input_reg);
- } else {
- X87Register input_reg = ToX87Register(input);
- X87Fxch(input_reg);
- __ TruncateX87TOSToI(result_reg);
- }
+ XMMRegister input_reg = ToDoubleRegister(input);
+ __ TruncateDoubleToI(result_reg, input_reg);
} else {
Label bailout, done;
- if (CpuFeatures::IsSafeForSnapshot(SSE2)) {
- CpuFeatureScope scope(masm(), SSE2);
- XMMRegister input_reg = ToDoubleRegister(input);
- XMMRegister xmm_scratch = double_scratch0();
- __ DoubleToI(result_reg, input_reg, xmm_scratch,
- instr->hydrogen()->GetMinusZeroMode(), &bailout, Label::kNear);
- } else {
- X87Register input_reg = ToX87Register(input);
- X87Fxch(input_reg);
- __ X87TOSToI(result_reg, instr->hydrogen()->GetMinusZeroMode(),
- &bailout, Label::kNear);
- }
+ XMMRegister input_reg = ToDoubleRegister(input);
+ XMMRegister xmm_scratch = double_scratch0();
+ __ DoubleToI(result_reg, input_reg, xmm_scratch,
+ instr->hydrogen()->GetMinusZeroMode(), &bailout, Label::kNear);
__ jmp(&done, Label::kNear);
__ bind(&bailout);
DeoptimizeIf(no_condition, instr->environment());
@@ -5573,18 +4828,10 @@ void LCodeGen::DoDoubleToSmi(LDoubleToSmi* instr) {
Register result_reg = ToRegister(result);
Label bailout, done;
- if (CpuFeatures::IsSafeForSnapshot(SSE2)) {
- CpuFeatureScope scope(masm(), SSE2);
- XMMRegister input_reg = ToDoubleRegister(input);
- XMMRegister xmm_scratch = double_scratch0();
- __ DoubleToI(result_reg, input_reg, xmm_scratch,
- instr->hydrogen()->GetMinusZeroMode(), &bailout, Label::kNear);
- } else {
- X87Register input_reg = ToX87Register(input);
- X87Fxch(input_reg);
- __ X87TOSToI(result_reg, instr->hydrogen()->GetMinusZeroMode(),
- &bailout, Label::kNear);
- }
+ XMMRegister input_reg = ToDoubleRegister(input);
+ XMMRegister xmm_scratch = double_scratch0();
+ __ DoubleToI(result_reg, input_reg, xmm_scratch,
+ instr->hydrogen()->GetMinusZeroMode(), &bailout, Label::kNear);
__ jmp(&done, Label::kNear);
__ bind(&bailout);
DeoptimizeIf(no_condition, instr->environment());
@@ -5603,7 +4850,7 @@ void LCodeGen::DoCheckSmi(LCheckSmi* instr) {
void LCodeGen::DoCheckNonSmi(LCheckNonSmi* instr) {
- if (!instr->hydrogen()->value()->IsHeapObject()) {
+ if (!instr->hydrogen()->value()->type().IsHeapObject()) {
LOperand* input = instr->value();
__ test(ToOperand(input), Immediate(kSmiTagMask));
DeoptimizeIf(zero, instr->environment());
@@ -5675,7 +4922,7 @@ void LCodeGen::DoDeferredInstanceMigration(LCheckMaps* instr, Register object) {
PushSafepointRegistersScope scope(this);
__ push(object);
__ xor_(esi, esi);
- __ CallRuntimeSaveDoubles(Runtime::kMigrateInstance);
+ __ CallRuntimeSaveDoubles(Runtime::kTryMigrateInstance);
RecordSafepointWithRegisters(
instr->pointer_map(), 1, Safepoint::kNoLazyDeopt);
@@ -5688,11 +4935,8 @@ void LCodeGen::DoDeferredInstanceMigration(LCheckMaps* instr, Register object) {
void LCodeGen::DoCheckMaps(LCheckMaps* instr) {
class DeferredCheckMaps V8_FINAL : public LDeferredCode {
public:
- DeferredCheckMaps(LCodeGen* codegen,
- LCheckMaps* instr,
- Register object,
- const X87Stack& x87_stack)
- : LDeferredCode(codegen, x87_stack), instr_(instr), object_(object) {
+ DeferredCheckMaps(LCodeGen* codegen, LCheckMaps* instr, Register object)
+ : LDeferredCode(codegen), instr_(instr), object_(object) {
SetExit(check_maps());
}
virtual void Generate() V8_OVERRIDE {
@@ -5706,29 +4950,35 @@ void LCodeGen::DoCheckMaps(LCheckMaps* instr) {
Register object_;
};
- if (instr->hydrogen()->CanOmitMapChecks()) return;
+ if (instr->hydrogen()->IsStabilityCheck()) {
+ const UniqueSet<Map>* maps = instr->hydrogen()->maps();
+ for (int i = 0; i < maps->size(); ++i) {
+ AddStabilityDependency(maps->at(i).handle());
+ }
+ return;
+ }
LOperand* input = instr->value();
ASSERT(input->IsRegister());
Register reg = ToRegister(input);
DeferredCheckMaps* deferred = NULL;
- if (instr->hydrogen()->has_migration_target()) {
- deferred = new(zone()) DeferredCheckMaps(this, instr, reg, x87_stack_);
+ if (instr->hydrogen()->HasMigrationTarget()) {
+ deferred = new(zone()) DeferredCheckMaps(this, instr, reg);
__ bind(deferred->check_maps());
}
- UniqueSet<Map> map_set = instr->hydrogen()->map_set();
+ const UniqueSet<Map>* maps = instr->hydrogen()->maps();
Label success;
- for (int i = 0; i < map_set.size() - 1; i++) {
- Handle<Map> map = map_set.at(i).handle();
+ for (int i = 0; i < maps->size() - 1; i++) {
+ Handle<Map> map = maps->at(i).handle();
__ CompareMap(reg, map);
__ j(equal, &success, Label::kNear);
}
- Handle<Map> map = map_set.at(map_set.size() - 1).handle();
+ Handle<Map> map = maps->at(maps->size() - 1).handle();
__ CompareMap(reg, map);
- if (instr->hydrogen()->has_migration_target()) {
+ if (instr->hydrogen()->HasMigrationTarget()) {
__ j(not_equal, deferred->entry());
} else {
DeoptimizeIf(not_equal, instr->environment());
@@ -5739,7 +4989,6 @@ void LCodeGen::DoCheckMaps(LCheckMaps* instr) {
void LCodeGen::DoClampDToUint8(LClampDToUint8* instr) {
- CpuFeatureScope scope(masm(), SSE2);
XMMRegister value_reg = ToDoubleRegister(instr->unclamped());
XMMRegister xmm_scratch = double_scratch0();
Register result_reg = ToRegister(instr->result());
@@ -5755,8 +5004,6 @@ void LCodeGen::DoClampIToUint8(LClampIToUint8* instr) {
void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) {
- CpuFeatureScope scope(masm(), SSE2);
-
ASSERT(instr->unclamped()->Equals(instr->result()));
Register input_reg = ToRegister(instr->unclamped());
XMMRegister temp_xmm_reg = ToDoubleRegister(instr->temp_xmm());
@@ -5791,135 +5038,48 @@ void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) {
}
-void LCodeGen::DoClampTToUint8NoSSE2(LClampTToUint8NoSSE2* instr) {
- Register input_reg = ToRegister(instr->unclamped());
+void LCodeGen::DoDoubleBits(LDoubleBits* instr) {
+ XMMRegister value_reg = ToDoubleRegister(instr->value());
Register result_reg = ToRegister(instr->result());
- Register scratch = ToRegister(instr->scratch());
- Register scratch2 = ToRegister(instr->scratch2());
- Register scratch3 = ToRegister(instr->scratch3());
- Label is_smi, done, heap_number, valid_exponent,
- largest_value, zero_result, maybe_nan_or_infinity;
-
- __ JumpIfSmi(input_reg, &is_smi);
-
- // Check for heap number
- __ cmp(FieldOperand(input_reg, HeapObject::kMapOffset),
- factory()->heap_number_map());
- __ j(equal, &heap_number, Label::kNear);
-
- // Check for undefined. Undefined is converted to zero for clamping
- // conversions.
- __ cmp(input_reg, factory()->undefined_value());
- DeoptimizeIf(not_equal, instr->environment());
- __ jmp(&zero_result, Label::kNear);
-
- // Heap number
- __ bind(&heap_number);
-
- // Surprisingly, all of the hand-crafted bit-manipulations below are much
- // faster than the x86 FPU built-in instruction, especially since "banker's
- // rounding" would be additionally very expensive
-
- // Get exponent word.
- __ mov(scratch, FieldOperand(input_reg, HeapNumber::kExponentOffset));
- __ mov(scratch3, FieldOperand(input_reg, HeapNumber::kMantissaOffset));
-
- // Test for negative values --> clamp to zero
- __ test(scratch, scratch);
- __ j(negative, &zero_result, Label::kNear);
-
- // Get exponent alone in scratch2.
- __ mov(scratch2, scratch);
- __ and_(scratch2, HeapNumber::kExponentMask);
- __ shr(scratch2, HeapNumber::kExponentShift);
- __ j(zero, &zero_result, Label::kNear);
- __ sub(scratch2, Immediate(HeapNumber::kExponentBias - 1));
- __ j(negative, &zero_result, Label::kNear);
-
- const uint32_t non_int8_exponent = 7;
- __ cmp(scratch2, Immediate(non_int8_exponent + 1));
- // If the exponent is too big, check for special values.
- __ j(greater, &maybe_nan_or_infinity, Label::kNear);
-
- __ bind(&valid_exponent);
- // Exponent word in scratch, exponent in scratch2. We know that 0 <= exponent
- // < 7. The shift bias is the number of bits to shift the mantissa such that
- // with an exponent of 7 such the that top-most one is in bit 30, allowing
- // detection the rounding overflow of a 255.5 to 256 (bit 31 goes from 0 to
- // 1).
- int shift_bias = (30 - HeapNumber::kExponentShift) - 7 - 1;
- __ lea(result_reg, MemOperand(scratch2, shift_bias));
- // Here result_reg (ecx) is the shift, scratch is the exponent word. Get the
- // top bits of the mantissa.
- __ and_(scratch, HeapNumber::kMantissaMask);
- // Put back the implicit 1 of the mantissa
- __ or_(scratch, 1 << HeapNumber::kExponentShift);
- // Shift up to round
- __ shl_cl(scratch);
- // Use "banker's rounding" to spec: If fractional part of number is 0.5, then
- // use the bit in the "ones" place and add it to the "halves" place, which has
- // the effect of rounding to even.
- __ mov(scratch2, scratch);
- const uint32_t one_half_bit_shift = 30 - sizeof(uint8_t) * 8;
- const uint32_t one_bit_shift = one_half_bit_shift + 1;
- __ and_(scratch2, Immediate((1 << one_bit_shift) - 1));
- __ cmp(scratch2, Immediate(1 << one_half_bit_shift));
- Label no_round;
- __ j(less, &no_round, Label::kNear);
- Label round_up;
- __ mov(scratch2, Immediate(1 << one_half_bit_shift));
- __ j(greater, &round_up, Label::kNear);
- __ test(scratch3, scratch3);
- __ j(not_zero, &round_up, Label::kNear);
- __ mov(scratch2, scratch);
- __ and_(scratch2, Immediate(1 << one_bit_shift));
- __ shr(scratch2, 1);
- __ bind(&round_up);
- __ add(scratch, scratch2);
- __ j(overflow, &largest_value, Label::kNear);
- __ bind(&no_round);
- __ shr(scratch, 23);
- __ mov(result_reg, scratch);
- __ jmp(&done, Label::kNear);
-
- __ bind(&maybe_nan_or_infinity);
- // Check for NaN/Infinity, all other values map to 255
- __ cmp(scratch2, Immediate(HeapNumber::kInfinityOrNanExponent + 1));
- __ j(not_equal, &largest_value, Label::kNear);
-
- // Check for NaN, which differs from Infinity in that at least one mantissa
- // bit is set.
- __ and_(scratch, HeapNumber::kMantissaMask);
- __ or_(scratch, FieldOperand(input_reg, HeapNumber::kMantissaOffset));
- __ j(not_zero, &zero_result, Label::kNear); // M!=0 --> NaN
- // Infinity -> Fall through to map to 255.
+ if (instr->hydrogen()->bits() == HDoubleBits::HIGH) {
+ if (CpuFeatures::IsSupported(SSE4_1)) {
+ CpuFeatureScope scope2(masm(), SSE4_1);
+ __ pextrd(result_reg, value_reg, 1);
+ } else {
+ XMMRegister xmm_scratch = double_scratch0();
+ __ pshufd(xmm_scratch, value_reg, 1);
+ __ movd(result_reg, xmm_scratch);
+ }
+ } else {
+ __ movd(result_reg, value_reg);
+ }
+}
- __ bind(&largest_value);
- __ mov(result_reg, Immediate(255));
- __ jmp(&done, Label::kNear);
- __ bind(&zero_result);
- __ xor_(result_reg, result_reg);
- __ jmp(&done, Label::kNear);
+void LCodeGen::DoConstructDouble(LConstructDouble* instr) {
+ Register hi_reg = ToRegister(instr->hi());
+ Register lo_reg = ToRegister(instr->lo());
+ XMMRegister result_reg = ToDoubleRegister(instr->result());
- // smi
- __ bind(&is_smi);
- if (!input_reg.is(result_reg)) {
- __ mov(result_reg, input_reg);
+ if (CpuFeatures::IsSupported(SSE4_1)) {
+ CpuFeatureScope scope2(masm(), SSE4_1);
+ __ movd(result_reg, lo_reg);
+ __ pinsrd(result_reg, hi_reg, 1);
+ } else {
+ XMMRegister xmm_scratch = double_scratch0();
+ __ movd(result_reg, hi_reg);
+ __ psllq(result_reg, 32);
+ __ movd(xmm_scratch, lo_reg);
+ __ orps(result_reg, xmm_scratch);
}
- __ SmiUntag(result_reg);
- __ ClampUint8(result_reg);
- __ bind(&done);
}
void LCodeGen::DoAllocate(LAllocate* instr) {
class DeferredAllocate V8_FINAL : public LDeferredCode {
public:
- DeferredAllocate(LCodeGen* codegen,
- LAllocate* instr,
- const X87Stack& x87_stack)
- : LDeferredCode(codegen, x87_stack), instr_(instr) { }
+ DeferredAllocate(LCodeGen* codegen, LAllocate* instr)
+ : LDeferredCode(codegen), instr_(instr) { }
virtual void Generate() V8_OVERRIDE {
codegen()->DoDeferredAllocate(instr_);
}
@@ -5928,8 +5088,7 @@ void LCodeGen::DoAllocate(LAllocate* instr) {
LAllocate* instr_;
};
- DeferredAllocate* deferred =
- new(zone()) DeferredAllocate(this, instr, x87_stack_);
+ DeferredAllocate* deferred = new(zone()) DeferredAllocate(this, instr);
Register result = ToRegister(instr->result());
Register temp = ToRegister(instr->temp());
@@ -5987,7 +5146,7 @@ void LCodeGen::DoDeferredAllocate(LAllocate* instr) {
// TODO(3095996): Get rid of this. For now, we need to make the
// result register contain a valid pointer because it is already
// contained in the register pointer map.
- __ Set(result, Immediate(Smi::FromInt(0)));
+ __ Move(result, Immediate(Smi::FromInt(0)));
PushSafepointRegistersScope scope(this);
if (instr->size()->IsRegister()) {
@@ -5997,7 +5156,13 @@ void LCodeGen::DoDeferredAllocate(LAllocate* instr) {
__ push(size);
} else {
int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
- __ push(Immediate(Smi::FromInt(size)));
+ if (size >= 0 && size <= Smi::kMaxValue) {
+ __ push(Immediate(Smi::FromInt(size)));
+ } else {
+ // We should never get here at runtime => abort
+ __ int3();
+ return;
+ }
}
int flags = AllocateDoubleAlignFlag::encode(
@@ -6015,7 +5180,7 @@ void LCodeGen::DoDeferredAllocate(LAllocate* instr) {
__ push(Immediate(Smi::FromInt(flags)));
CallRuntimeFromDeferred(
- Runtime::kAllocateInTargetSpace, 2, instr, instr->context());
+ Runtime::kHiddenAllocateInTargetSpace, 2, instr, instr->context());
__ StoreToSafepointRegisterSlot(result, eax);
}
@@ -6048,7 +5213,7 @@ void LCodeGen::DoRegExpLiteral(LRegExpLiteral* instr) {
__ push(Immediate(Smi::FromInt(instr->hydrogen()->literal_index())));
__ push(Immediate(instr->hydrogen()->pattern()));
__ push(Immediate(instr->hydrogen()->flags()));
- CallRuntime(Runtime::kMaterializeRegExpLiteral, 4, instr);
+ CallRuntime(Runtime::kHiddenMaterializeRegExpLiteral, 4, instr);
__ mov(ebx, eax);
__ bind(&materialized);
@@ -6060,7 +5225,7 @@ void LCodeGen::DoRegExpLiteral(LRegExpLiteral* instr) {
__ bind(&runtime_allocate);
__ push(ebx);
__ push(Immediate(Smi::FromInt(size)));
- CallRuntime(Runtime::kAllocateInNewSpace, 1, instr);
+ CallRuntime(Runtime::kHiddenAllocateInNewSpace, 1, instr);
__ pop(ebx);
__ bind(&allocated);
@@ -6085,16 +5250,17 @@ void LCodeGen::DoFunctionLiteral(LFunctionLiteral* instr) {
// space for nested functions that don't need literals cloning.
bool pretenure = instr->hydrogen()->pretenure();
if (!pretenure && instr->hydrogen()->has_no_literals()) {
- FastNewClosureStub stub(instr->hydrogen()->language_mode(),
+ FastNewClosureStub stub(isolate(),
+ instr->hydrogen()->strict_mode(),
instr->hydrogen()->is_generator());
__ mov(ebx, Immediate(instr->hydrogen()->shared_info()));
- CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
} else {
__ push(esi);
__ push(Immediate(instr->hydrogen()->shared_info()));
__ push(Immediate(pretenure ? factory()->true_value()
: factory()->false_value()));
- CallRuntime(Runtime::kNewClosure, 3, instr);
+ CallRuntime(Runtime::kHiddenNewClosure, 3, instr);
}
}
@@ -6129,13 +5295,13 @@ Condition LCodeGen::EmitTypeofIs(LTypeofIsAndBranch* instr, Register input) {
Label::Distance false_distance = right_block == next_block ? Label::kNear
: Label::kFar;
Condition final_branch_condition = no_condition;
- if (type_name->Equals(heap()->number_string())) {
+ if (String::Equals(type_name, factory()->number_string())) {
__ JumpIfSmi(input, true_label, true_distance);
__ cmp(FieldOperand(input, HeapObject::kMapOffset),
factory()->heap_number_map());
final_branch_condition = equal;
- } else if (type_name->Equals(heap()->string_string())) {
+ } else if (String::Equals(type_name, factory()->string_string())) {
__ JumpIfSmi(input, false_label, false_distance);
__ CmpObjectType(input, FIRST_NONSTRING_TYPE, input);
__ j(above_equal, false_label, false_distance);
@@ -6143,22 +5309,23 @@ Condition LCodeGen::EmitTypeofIs(LTypeofIsAndBranch* instr, Register input) {
1 << Map::kIsUndetectable);
final_branch_condition = zero;
- } else if (type_name->Equals(heap()->symbol_string())) {
+ } else if (String::Equals(type_name, factory()->symbol_string())) {
__ JumpIfSmi(input, false_label, false_distance);
__ CmpObjectType(input, SYMBOL_TYPE, input);
final_branch_condition = equal;
- } else if (type_name->Equals(heap()->boolean_string())) {
+ } else if (String::Equals(type_name, factory()->boolean_string())) {
__ cmp(input, factory()->true_value());
__ j(equal, true_label, true_distance);
__ cmp(input, factory()->false_value());
final_branch_condition = equal;
- } else if (FLAG_harmony_typeof && type_name->Equals(heap()->null_string())) {
+ } else if (FLAG_harmony_typeof &&
+ String::Equals(type_name, factory()->null_string())) {
__ cmp(input, factory()->null_value());
final_branch_condition = equal;
- } else if (type_name->Equals(heap()->undefined_string())) {
+ } else if (String::Equals(type_name, factory()->undefined_string())) {
__ cmp(input, factory()->undefined_value());
__ j(equal, true_label, true_distance);
__ JumpIfSmi(input, false_label, false_distance);
@@ -6168,7 +5335,7 @@ Condition LCodeGen::EmitTypeofIs(LTypeofIsAndBranch* instr, Register input) {
1 << Map::kIsUndetectable);
final_branch_condition = not_zero;
- } else if (type_name->Equals(heap()->function_string())) {
+ } else if (String::Equals(type_name, factory()->function_string())) {
STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
__ JumpIfSmi(input, false_label, false_distance);
__ CmpObjectType(input, JS_FUNCTION_TYPE, input);
@@ -6176,7 +5343,7 @@ Condition LCodeGen::EmitTypeofIs(LTypeofIsAndBranch* instr, Register input) {
__ CmpInstanceType(input, JS_FUNCTION_PROXY_TYPE);
final_branch_condition = equal;
- } else if (type_name->Equals(heap()->object_string())) {
+ } else if (String::Equals(type_name, factory()->object_string())) {
__ JumpIfSmi(input, false_label, false_distance);
if (!FLAG_harmony_typeof) {
__ cmp(input, factory()->null_value());
@@ -6239,7 +5406,7 @@ void LCodeGen::EnsureSpaceForLazyDeopt(int space_needed) {
void LCodeGen::DoLazyBailout(LLazyBailout* instr) {
- EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
+ last_lazy_deopt_pc_ = masm()->pc_offset();
ASSERT(instr->HasEnvironment());
LEnvironment* env = instr->environment();
RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
@@ -6274,7 +5441,7 @@ void LCodeGen::DoDummyUse(LDummyUse* instr) {
void LCodeGen::DoDeferredStackCheck(LStackCheck* instr) {
PushSafepointRegistersScope scope(this);
__ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
- __ CallRuntimeSaveDoubles(Runtime::kStackGuard);
+ __ CallRuntimeSaveDoubles(Runtime::kHiddenStackGuard);
RecordSafepointWithLazyDeopt(
instr, RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
ASSERT(instr->HasEnvironment());
@@ -6286,10 +5453,8 @@ void LCodeGen::DoDeferredStackCheck(LStackCheck* instr) {
void LCodeGen::DoStackCheck(LStackCheck* instr) {
class DeferredStackCheck V8_FINAL : public LDeferredCode {
public:
- DeferredStackCheck(LCodeGen* codegen,
- LStackCheck* instr,
- const X87Stack& x87_stack)
- : LDeferredCode(codegen, x87_stack), instr_(instr) { }
+ DeferredStackCheck(LCodeGen* codegen, LStackCheck* instr)
+ : LDeferredCode(codegen), instr_(instr) { }
virtual void Generate() V8_OVERRIDE {
codegen()->DoDeferredStackCheck(instr_);
}
@@ -6315,15 +5480,12 @@ void LCodeGen::DoStackCheck(LStackCheck* instr) {
CallCode(isolate()->builtins()->StackCheck(),
RelocInfo::CODE_TARGET,
instr);
- EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
__ bind(&done);
- RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
- safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
} else {
ASSERT(instr->hydrogen()->is_backwards_branch());
// Perform stack overflow check if this goto needs it before jumping.
DeferredStackCheck* deferred_stack_check =
- new(zone()) DeferredStackCheck(this, instr, x87_stack_);
+ new(zone()) DeferredStackCheck(this, instr);
ExternalReference stack_limit =
ExternalReference::address_of_stack_limit(isolate());
__ cmp(esp, Operand::StaticVariable(stack_limit));
@@ -6417,11 +5579,55 @@ void LCodeGen::DoCheckMapValue(LCheckMapValue* instr) {
}
+void LCodeGen::DoDeferredLoadMutableDouble(LLoadFieldByIndex* instr,
+ Register object,
+ Register index) {
+ PushSafepointRegistersScope scope(this);
+ __ push(object);
+ __ push(index);
+ __ xor_(esi, esi);
+ __ CallRuntimeSaveDoubles(Runtime::kLoadMutableDouble);
+ RecordSafepointWithRegisters(
+ instr->pointer_map(), 2, Safepoint::kNoLazyDeopt);
+ __ StoreToSafepointRegisterSlot(object, eax);
+}
+
+
void LCodeGen::DoLoadFieldByIndex(LLoadFieldByIndex* instr) {
+ class DeferredLoadMutableDouble V8_FINAL : public LDeferredCode {
+ public:
+ DeferredLoadMutableDouble(LCodeGen* codegen,
+ LLoadFieldByIndex* instr,
+ Register object,
+ Register index)
+ : LDeferredCode(codegen),
+ instr_(instr),
+ object_(object),
+ index_(index) {
+ }
+ virtual void Generate() V8_OVERRIDE {
+ codegen()->DoDeferredLoadMutableDouble(instr_, object_, index_);
+ }
+ virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
+ private:
+ LLoadFieldByIndex* instr_;
+ Register object_;
+ Register index_;
+ };
+
Register object = ToRegister(instr->object());
Register index = ToRegister(instr->index());
+ DeferredLoadMutableDouble* deferred;
+ deferred = new(zone()) DeferredLoadMutableDouble(
+ this, instr, object, index);
+
Label out_of_object, done;
+ __ test(index, Immediate(Smi::FromInt(1)));
+ __ j(not_zero, deferred->entry());
+
+ __ sar(index, 1);
+
__ cmp(index, Immediate(0));
__ j(less, &out_of_object, Label::kNear);
__ mov(object, FieldOperand(object,
@@ -6438,10 +5644,26 @@ void LCodeGen::DoLoadFieldByIndex(LLoadFieldByIndex* instr) {
index,
times_half_pointer_size,
FixedArray::kHeaderSize - kPointerSize));
+ __ bind(deferred->exit());
__ bind(&done);
}
+void LCodeGen::DoStoreFrameContext(LStoreFrameContext* instr) {
+ Register context = ToRegister(instr->context());
+ __ mov(Operand(ebp, StandardFrameConstants::kContextOffset), context);
+}
+
+
+void LCodeGen::DoAllocateBlockContext(LAllocateBlockContext* instr) {
+ Handle<ScopeInfo> scope_info = instr->scope_info();
+ __ Push(scope_info);
+ __ push(ToRegister(instr->function()));
+ CallRuntime(Runtime::kHiddenPushBlockContext, 2, instr);
+ RecordSafepoint(Safepoint::kNoLazyDeopt);
+}
+
+
#undef __
} } // namespace v8::internal
diff --git a/chromium/v8/src/ia32/lithium-codegen-ia32.h b/chromium/v8/src/ia32/lithium-codegen-ia32.h
index 638f80c3549..64a6b3c40ce 100644
--- a/chromium/v8/src/ia32/lithium-codegen-ia32.h
+++ b/chromium/v8/src/ia32/lithium-codegen-ia32.h
@@ -1,42 +1,19 @@
// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
#ifndef V8_IA32_LITHIUM_CODEGEN_IA32_H_
#define V8_IA32_LITHIUM_CODEGEN_IA32_H_
-#include "ia32/lithium-ia32.h"
+#include "src/ia32/lithium-ia32.h"
-#include "checks.h"
-#include "deoptimizer.h"
-#include "ia32/lithium-gap-resolver-ia32.h"
-#include "lithium-codegen.h"
-#include "safepoint-table.h"
-#include "scopes.h"
-#include "v8utils.h"
+#include "src/checks.h"
+#include "src/deoptimizer.h"
+#include "src/ia32/lithium-gap-resolver-ia32.h"
+#include "src/lithium-codegen.h"
+#include "src/safepoint-table.h"
+#include "src/scopes.h"
+#include "src/utils.h"
namespace v8 {
namespace internal {
@@ -61,7 +38,6 @@ class LCodeGen: public LCodeGenBase {
support_aligned_spilled_doubles_(false),
osr_pc_offset_(-1),
frame_is_built_(false),
- x87_stack_(assembler),
safepoints_(info->zone()),
resolver_(this),
expected_safepoint_kind_(Safepoint::kSimple) {
@@ -90,7 +66,6 @@ class LCodeGen: public LCodeGenBase {
Operand ToOperand(LOperand* op) const;
Register ToRegister(LOperand* op) const;
XMMRegister ToDoubleRegister(LOperand* op) const;
- X87Register ToX87Register(LOperand* op) const;
bool IsInteger32(LConstantOperand* op) const;
bool IsSmi(LConstantOperand* op) const;
@@ -99,36 +74,6 @@ class LCodeGen: public LCodeGenBase {
}
double ToDouble(LConstantOperand* op) const;
- // Support for non-sse2 (x87) floating point stack handling.
- // These functions maintain the mapping of physical stack registers to our
- // virtual registers between instructions.
- enum X87OperandType { kX87DoubleOperand, kX87FloatOperand, kX87IntOperand };
-
- void X87Mov(X87Register reg, Operand src,
- X87OperandType operand = kX87DoubleOperand);
- void X87Mov(Operand src, X87Register reg,
- X87OperandType operand = kX87DoubleOperand);
-
- void X87PrepareBinaryOp(
- X87Register left, X87Register right, X87Register result);
-
- void X87LoadForUsage(X87Register reg);
- void X87LoadForUsage(X87Register reg1, X87Register reg2);
- void X87PrepareToWrite(X87Register reg) { x87_stack_.PrepareToWrite(reg); }
- void X87CommitWrite(X87Register reg) { x87_stack_.CommitWrite(reg); }
-
- void X87Fxch(X87Register reg, int other_slot = 0) {
- x87_stack_.Fxch(reg, other_slot);
- }
- void X87Free(X87Register reg) {
- x87_stack_.Free(reg);
- }
-
-
- bool X87StackEmpty() {
- return x87_stack_.depth() == 0;
- }
-
Handle<Object> ToHandle(LConstantOperand* op) const;
// The operand denoting the second word (the one with a higher address) of
@@ -148,9 +93,10 @@ class LCodeGen: public LCodeGenBase {
void DoDeferredNumberTagD(LNumberTagD* instr);
enum IntegerSignedness { SIGNED_INT32, UNSIGNED_INT32 };
- void DoDeferredNumberTagI(LInstruction* instr,
- LOperand* value,
- IntegerSignedness signedness);
+ void DoDeferredNumberTagIU(LInstruction* instr,
+ LOperand* value,
+ LOperand* temp,
+ IntegerSignedness signedness);
void DoDeferredTaggedToI(LTaggedToI* instr, Label* done);
void DoDeferredMathAbsTaggedHeapNumber(LMathAbs* instr);
@@ -161,6 +107,9 @@ class LCodeGen: public LCodeGenBase {
void DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
Label* map_check);
void DoDeferredInstanceMigration(LCheckMaps* instr, Register object);
+ void DoDeferredLoadMutableDouble(LLoadFieldByIndex* instr,
+ Register object,
+ Register index);
// Parallel move support.
void DoParallelMove(LParallelMove* move);
@@ -177,9 +126,7 @@ class LCodeGen: public LCodeGenBase {
#undef DECLARE_DO
private:
- StrictModeFlag strict_mode_flag() const {
- return info()->is_classic_mode() ? kNonStrictMode : kStrictMode;
- }
+ StrictMode strict_mode() const { return info()->strict_mode(); }
Scope* scope() const { return scope_; }
@@ -194,8 +141,6 @@ class LCodeGen: public LCodeGenBase {
int GetStackSlotCount() const { return chunk()->spill_slot_count(); }
- void Abort(BailoutReason reason);
-
void AddDeferredCode(LDeferredCode* code) { deferred_.Add(code, zone()); }
void SaveCallerDoubles();
@@ -257,7 +202,6 @@ class LCodeGen: public LCodeGenBase {
int formal_parameter_count,
int arity,
LInstruction* instr,
- CallKind call_kind,
EDIState edi_state);
void RecordSafepointWithLazyDeopt(LInstruction* instr,
@@ -269,7 +213,6 @@ class LCodeGen: public LCodeGenBase {
LEnvironment* environment,
Deoptimizer::BailoutType bailout_type);
void DeoptimizeIf(Condition cc, LEnvironment* environment);
- void ApplyCheckIf(Condition cc, LBoundsCheck* check);
bool DeoptEveryNTimes() {
return FLAG_deopt_every_n_times != 0 && !info()->IsStub();
@@ -282,7 +225,6 @@ class LCodeGen: public LCodeGenBase {
bool is_uint32,
int* object_index_pointer,
int* dematerialized_index_pointer);
- void RegisterDependentCodeForEmbeddedMaps(Handle<Code> code);
void PopulateDeoptimizationData(Handle<Code> code);
int DefineDeoptimizationLiteral(Handle<Object> literal);
@@ -290,7 +232,6 @@ class LCodeGen: public LCodeGenBase {
Register ToRegister(int index) const;
XMMRegister ToDoubleRegister(int index) const;
- X87Register ToX87Register(int index) const;
int32_t ToRepresentation(LConstantOperand* op, const Representation& r) const;
int32_t ToInteger32(LConstantOperand* op) const;
ExternalReference ToExternalReference(LConstantOperand* op) const;
@@ -299,8 +240,7 @@ class LCodeGen: public LCodeGenBase {
LOperand* key,
Representation key_representation,
ElementsKind elements_kind,
- uint32_t offset,
- uint32_t additional_index = 0);
+ uint32_t base_offset);
Operand BuildSeqStringOperand(Register string,
LOperand* index,
@@ -338,15 +278,6 @@ class LCodeGen: public LCodeGenBase {
LEnvironment* env,
NumberUntagDMode mode = NUMBER_CANDIDATE_IS_ANY_TAGGED);
- void EmitNumberUntagDNoSSE2(
- Register input,
- Register temp,
- X87Register res_reg,
- bool allow_undefined_as_nan,
- bool deoptimize_on_minus_zero,
- LEnvironment* env,
- NumberUntagDMode mode = NUMBER_CANDIDATE_IS_ANY_TAGGED);
-
// Emits optimized code for typeof x == "y". Modifies input register.
// Returns the condition on which a final split to
// true and false label should be made, to optimize fallthrough.
@@ -394,12 +325,6 @@ class LCodeGen: public LCodeGenBase {
// register, or a stack slot operand.
void EmitPushTaggedOperand(LOperand* operand);
- void X87Fld(Operand src, X87OperandType opts);
-
- void EmitFlushX87ForDeopt();
- void FlushX87StackIfNecessary(LInstruction* instr) {
- x87_stack_.FlushIfNecessary(instr, this);
- }
friend class LGapResolver;
#ifdef _MSC_VER
@@ -422,55 +347,6 @@ class LCodeGen: public LCodeGenBase {
int osr_pc_offset_;
bool frame_is_built_;
- class X87Stack {
- public:
- explicit X87Stack(MacroAssembler* masm)
- : stack_depth_(0), is_mutable_(true), masm_(masm) { }
- explicit X87Stack(const X87Stack& other)
- : stack_depth_(other.stack_depth_), is_mutable_(false), masm_(masm()) {
- for (int i = 0; i < stack_depth_; i++) {
- stack_[i] = other.stack_[i];
- }
- }
- bool operator==(const X87Stack& other) const {
- if (stack_depth_ != other.stack_depth_) return false;
- for (int i = 0; i < stack_depth_; i++) {
- if (!stack_[i].is(other.stack_[i])) return false;
- }
- return true;
- }
- bool Contains(X87Register reg);
- void Fxch(X87Register reg, int other_slot = 0);
- void Free(X87Register reg);
- void PrepareToWrite(X87Register reg);
- void CommitWrite(X87Register reg);
- void FlushIfNecessary(LInstruction* instr, LCodeGen* cgen);
- void LeavingBlock(int current_block_id, LGoto* goto_instr);
- int depth() const { return stack_depth_; }
- void pop() {
- ASSERT(is_mutable_);
- stack_depth_--;
- }
- void push(X87Register reg) {
- ASSERT(is_mutable_);
- ASSERT(stack_depth_ < X87Register::kNumAllocatableRegisters);
- stack_[stack_depth_] = reg;
- stack_depth_++;
- }
-
- MacroAssembler* masm() const { return masm_; }
-
- private:
- int ArrayIndex(X87Register reg);
- int st2idx(int pos);
-
- X87Register stack_[X87Register::kNumAllocatableRegisters];
- int stack_depth_;
- bool is_mutable_;
- MacroAssembler* masm_;
- };
- X87Stack x87_stack_;
-
// Builder that keeps track of safepoints in the code. The table
// itself is emitted at the end of the generated code.
SafepointTableBuilder safepoints_;
@@ -509,11 +385,10 @@ class LCodeGen: public LCodeGenBase {
class LDeferredCode : public ZoneObject {
public:
- explicit LDeferredCode(LCodeGen* codegen, const LCodeGen::X87Stack& x87_stack)
+ explicit LDeferredCode(LCodeGen* codegen)
: codegen_(codegen),
external_exit_(NULL),
- instruction_index_(codegen->current_instruction_),
- x87_stack_(x87_stack) {
+ instruction_index_(codegen->current_instruction_) {
codegen->AddDeferredCode(this);
}
@@ -526,7 +401,6 @@ class LDeferredCode : public ZoneObject {
Label* exit() { return external_exit_ != NULL ? external_exit_ : &exit_; }
Label* done() { return codegen_->NeedsDeferredFrame() ? &done_ : exit(); }
int instruction_index() const { return instruction_index_; }
- const LCodeGen::X87Stack& x87_stack() const { return x87_stack_; }
protected:
LCodeGen* codegen() const { return codegen_; }
@@ -539,7 +413,6 @@ class LDeferredCode : public ZoneObject {
Label* external_exit_;
Label done_;
int instruction_index_;
- LCodeGen::X87Stack x87_stack_;
};
} } // namespace v8::internal
diff --git a/chromium/v8/src/ia32/lithium-gap-resolver-ia32.cc b/chromium/v8/src/ia32/lithium-gap-resolver-ia32.cc
index d621bd261d6..71a4a0e8d93 100644
--- a/chromium/v8/src/ia32/lithium-gap-resolver-ia32.cc
+++ b/chromium/v8/src/ia32/lithium-gap-resolver-ia32.cc
@@ -1,36 +1,13 @@
// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
#if V8_TARGET_ARCH_IA32
-#include "ia32/lithium-gap-resolver-ia32.h"
-#include "ia32/lithium-codegen-ia32.h"
+#include "src/ia32/lithium-gap-resolver-ia32.h"
+#include "src/ia32/lithium-codegen-ia32.h"
namespace v8 {
namespace internal {
@@ -309,7 +286,7 @@ void LGapResolver::EmitMove(int index) {
Representation r = cgen_->IsSmi(constant_source)
? Representation::Smi() : Representation::Integer32();
if (cgen_->IsInteger32(constant_source)) {
- __ Set(dst, cgen_->ToImmediate(constant_source, r));
+ __ Move(dst, cgen_->ToImmediate(constant_source, r));
} else {
__ LoadObject(dst, cgen_->ToHandle(constant_source));
}
@@ -318,22 +295,13 @@ void LGapResolver::EmitMove(int index) {
uint64_t int_val = BitCast<uint64_t, double>(v);
int32_t lower = static_cast<int32_t>(int_val);
int32_t upper = static_cast<int32_t>(int_val >> kBitsPerInt);
- if (CpuFeatures::IsSupported(SSE2)) {
- CpuFeatureScope scope(cgen_->masm(), SSE2);
- XMMRegister dst = cgen_->ToDoubleRegister(destination);
- if (int_val == 0) {
- __ xorps(dst, dst);
- } else {
- __ push(Immediate(upper));
- __ push(Immediate(lower));
- __ movsd(dst, Operand(esp, 0));
- __ add(esp, Immediate(kDoubleSize));
- }
+ XMMRegister dst = cgen_->ToDoubleRegister(destination);
+ if (int_val == 0) {
+ __ xorps(dst, dst);
} else {
__ push(Immediate(upper));
__ push(Immediate(lower));
- X87Register dst = cgen_->ToX87Register(destination);
- cgen_->X87Mov(dst, MemOperand(esp, 0));
+ __ movsd(dst, Operand(esp, 0));
__ add(esp, Immediate(kDoubleSize));
}
} else {
@@ -342,7 +310,7 @@ void LGapResolver::EmitMove(int index) {
Representation r = cgen_->IsSmi(constant_source)
? Representation::Smi() : Representation::Integer32();
if (cgen_->IsInteger32(constant_source)) {
- __ Set(dst, cgen_->ToImmediate(constant_source, r));
+ __ Move(dst, cgen_->ToImmediate(constant_source, r));
} else {
Register tmp = EnsureTempRegister();
__ LoadObject(tmp, cgen_->ToHandle(constant_source));
@@ -351,59 +319,27 @@ void LGapResolver::EmitMove(int index) {
}
} else if (source->IsDoubleRegister()) {
- if (CpuFeatures::IsSupported(SSE2)) {
- CpuFeatureScope scope(cgen_->masm(), SSE2);
- XMMRegister src = cgen_->ToDoubleRegister(source);
- if (destination->IsDoubleRegister()) {
- XMMRegister dst = cgen_->ToDoubleRegister(destination);
- __ movaps(dst, src);
- } else {
- ASSERT(destination->IsDoubleStackSlot());
- Operand dst = cgen_->ToOperand(destination);
- __ movsd(dst, src);
- }
+ XMMRegister src = cgen_->ToDoubleRegister(source);
+ if (destination->IsDoubleRegister()) {
+ XMMRegister dst = cgen_->ToDoubleRegister(destination);
+ __ movaps(dst, src);
} else {
- // load from the register onto the stack, store in destination, which must
- // be a double stack slot in the non-SSE2 case.
ASSERT(destination->IsDoubleStackSlot());
Operand dst = cgen_->ToOperand(destination);
- X87Register src = cgen_->ToX87Register(source);
- cgen_->X87Mov(dst, src);
+ __ movsd(dst, src);
}
} else if (source->IsDoubleStackSlot()) {
- if (CpuFeatures::IsSupported(SSE2)) {
- CpuFeatureScope scope(cgen_->masm(), SSE2);
- ASSERT(destination->IsDoubleRegister() ||
- destination->IsDoubleStackSlot());
- Operand src = cgen_->ToOperand(source);
- if (destination->IsDoubleRegister()) {
- XMMRegister dst = cgen_->ToDoubleRegister(destination);
- __ movsd(dst, src);
- } else {
- // We rely on having xmm0 available as a fixed scratch register.
- Operand dst = cgen_->ToOperand(destination);
- __ movsd(xmm0, src);
- __ movsd(dst, xmm0);
- }
+ ASSERT(destination->IsDoubleRegister() ||
+ destination->IsDoubleStackSlot());
+ Operand src = cgen_->ToOperand(source);
+ if (destination->IsDoubleRegister()) {
+ XMMRegister dst = cgen_->ToDoubleRegister(destination);
+ __ movsd(dst, src);
} else {
- // load from the stack slot on top of the floating point stack, and then
- // store in destination. If destination is a double register, then it
- // represents the top of the stack and nothing needs to be done.
- if (destination->IsDoubleStackSlot()) {
- Register tmp = EnsureTempRegister();
- Operand src0 = cgen_->ToOperand(source);
- Operand src1 = cgen_->HighOperand(source);
- Operand dst0 = cgen_->ToOperand(destination);
- Operand dst1 = cgen_->HighOperand(destination);
- __ mov(tmp, src0); // Then use tmp to copy source to destination.
- __ mov(dst0, tmp);
- __ mov(tmp, src1);
- __ mov(dst1, tmp);
- } else {
- Operand src = cgen_->ToOperand(source);
- X87Register dst = cgen_->ToX87Register(destination);
- cgen_->X87Mov(dst, src);
- }
+ // We rely on having xmm0 available as a fixed scratch register.
+ Operand dst = cgen_->ToOperand(destination);
+ __ movsd(xmm0, src);
+ __ movsd(dst, xmm0);
}
} else {
UNREACHABLE();
@@ -468,7 +404,6 @@ void LGapResolver::EmitSwap(int index) {
__ mov(src, tmp0);
}
} else if (source->IsDoubleRegister() && destination->IsDoubleRegister()) {
- CpuFeatureScope scope(cgen_->masm(), SSE2);
// XMM register-register swap. We rely on having xmm0
// available as a fixed scratch register.
XMMRegister src = cgen_->ToDoubleRegister(source);
@@ -477,7 +412,6 @@ void LGapResolver::EmitSwap(int index) {
__ movaps(src, dst);
__ movaps(dst, xmm0);
} else if (source->IsDoubleRegister() || destination->IsDoubleRegister()) {
- CpuFeatureScope scope(cgen_->masm(), SSE2);
// XMM register-memory swap. We rely on having xmm0
// available as a fixed scratch register.
ASSERT(source->IsDoubleStackSlot() || destination->IsDoubleStackSlot());
@@ -490,7 +424,6 @@ void LGapResolver::EmitSwap(int index) {
__ movsd(other, reg);
__ movaps(reg, xmm0);
} else if (source->IsDoubleStackSlot() && destination->IsDoubleStackSlot()) {
- CpuFeatureScope scope(cgen_->masm(), SSE2);
// Double-width memory-to-memory. Spill on demand to use a general
// purpose temporary register and also rely on having xmm0 available as
// a fixed scratch register.
diff --git a/chromium/v8/src/ia32/lithium-gap-resolver-ia32.h b/chromium/v8/src/ia32/lithium-gap-resolver-ia32.h
index 4aff241f431..87549d00bbe 100644
--- a/chromium/v8/src/ia32/lithium-gap-resolver-ia32.h
+++ b/chromium/v8/src/ia32/lithium-gap-resolver-ia32.h
@@ -1,36 +1,13 @@
// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
#ifndef V8_IA32_LITHIUM_GAP_RESOLVER_IA32_H_
#define V8_IA32_LITHIUM_GAP_RESOLVER_IA32_H_
-#include "v8.h"
+#include "src/v8.h"
-#include "lithium.h"
+#include "src/lithium.h"
namespace v8 {
namespace internal {
diff --git a/chromium/v8/src/ia32/lithium-ia32.cc b/chromium/v8/src/ia32/lithium-ia32.cc
index 5c92580c354..44b4ea5089d 100644
--- a/chromium/v8/src/ia32/lithium-ia32.cc
+++ b/chromium/v8/src/ia32/lithium-ia32.cc
@@ -1,38 +1,15 @@
// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
#if V8_TARGET_ARCH_IA32
-#include "lithium-allocator-inl.h"
-#include "ia32/lithium-ia32.h"
-#include "ia32/lithium-codegen-ia32.h"
-#include "hydrogen-osr.h"
+#include "src/lithium-allocator-inl.h"
+#include "src/ia32/lithium-ia32.h"
+#include "src/ia32/lithium-codegen-ia32.h"
+#include "src/hydrogen-osr.h"
namespace v8 {
namespace internal {
@@ -83,17 +60,6 @@ bool LInstruction::HasDoubleRegisterInput() {
}
-bool LInstruction::IsDoubleInput(X87Register reg, LCodeGen* cgen) {
- for (int i = 0; i < InputCount(); i++) {
- LOperand* op = InputAt(i);
- if (op != NULL && op->IsDoubleRegister()) {
- if (cgen->ToX87Register(op).is(reg)) return true;
- }
- }
- return false;
-}
-
-
void LInstruction::PrintTo(StringStream* stream) {
stream->Add("%s ", this->Mnemonic());
@@ -286,7 +252,7 @@ void LTypeofIsAndBranch::PrintDataTo(StringStream* stream) {
stream->Add("if typeof ");
value()->PrintTo(stream);
stream->Add(" == \"%s\" then B%d else B%d",
- *hydrogen()->type_literal()->ToCString(),
+ hydrogen()->type_literal()->ToCString().get(),
true_block_id(), false_block_id());
}
@@ -307,7 +273,18 @@ void LInnerAllocatedObject::PrintDataTo(StringStream* stream) {
}
-void LCallConstantFunction::PrintDataTo(StringStream* stream) {
+void LCallJSFunction::PrintDataTo(StringStream* stream) {
+ stream->Add("= ");
+ function()->PrintTo(stream);
+ stream->Add("#%d / ", arity());
+}
+
+
+void LCallWithDescriptor::PrintDataTo(StringStream* stream) {
+ for (int i = 0; i < InputCount(); i++) {
+ InputAt(i)->PrintTo(stream);
+ stream->Add(" ");
+ }
stream->Add("#%d / ", arity());
}
@@ -334,28 +311,6 @@ void LInvokeFunction::PrintDataTo(StringStream* stream) {
}
-void LCallKeyed::PrintDataTo(StringStream* stream) {
- stream->Add("[ecx] #%d / ", arity());
-}
-
-
-void LCallNamed::PrintDataTo(StringStream* stream) {
- SmartArrayPointer<char> name_string = name()->ToCString();
- stream->Add("%s #%d / ", *name_string, arity());
-}
-
-
-void LCallGlobal::PrintDataTo(StringStream* stream) {
- SmartArrayPointer<char> name_string = name()->ToCString();
- stream->Add("%s #%d / ", *name_string, arity());
-}
-
-
-void LCallKnownGlobal::PrintDataTo(StringStream* stream) {
- stream->Add("#%d / ", arity());
-}
-
-
void LCallNew::PrintDataTo(StringStream* stream) {
stream->Add("= ");
context()->PrintTo(stream);
@@ -420,7 +375,7 @@ void LStoreNamedField::PrintDataTo(StringStream* stream) {
void LStoreNamedGeneric::PrintDataTo(StringStream* stream) {
object()->PrintTo(stream);
stream->Add(".");
- stream->Add(*String::cast(*name())->ToCString());
+ stream->Add(String::cast(*name())->ToCString().get());
stream->Add(" <- ");
value()->PrintTo(stream);
}
@@ -431,7 +386,7 @@ void LLoadKeyed::PrintDataTo(StringStream* stream) {
stream->Add("[");
key()->PrintTo(stream);
if (hydrogen()->IsDehoisted()) {
- stream->Add(" + %d]", additional_index());
+ stream->Add(" + %d]", base_offset());
} else {
stream->Add("]");
}
@@ -443,7 +398,7 @@ void LStoreKeyed::PrintDataTo(StringStream* stream) {
stream->Add("[");
key()->PrintTo(stream);
if (hydrogen()->IsDehoisted()) {
- stream->Add(" + %d] <-", additional_index());
+ stream->Add(" + %d] <-", base_offset());
} else {
stream->Add("] <- ");
}
@@ -625,8 +580,7 @@ LOperand* LChunkBuilder::Use(HValue* value, LUnallocated* operand) {
}
-template<int I, int T>
-LInstruction* LChunkBuilder::Define(LTemplateInstruction<1, I, T>* instr,
+LInstruction* LChunkBuilder::Define(LTemplateResultInstruction<1>* instr,
LUnallocated* result) {
result->set_virtual_register(current_instruction_->id());
instr->set_result(result);
@@ -634,41 +588,36 @@ LInstruction* LChunkBuilder::Define(LTemplateInstruction<1, I, T>* instr,
}
-template<int I, int T>
LInstruction* LChunkBuilder::DefineAsRegister(
- LTemplateInstruction<1, I, T>* instr) {
+ LTemplateResultInstruction<1>* instr) {
return Define(instr,
new(zone()) LUnallocated(LUnallocated::MUST_HAVE_REGISTER));
}
-template<int I, int T>
LInstruction* LChunkBuilder::DefineAsSpilled(
- LTemplateInstruction<1, I, T>* instr,
+ LTemplateResultInstruction<1>* instr,
int index) {
return Define(instr,
new(zone()) LUnallocated(LUnallocated::FIXED_SLOT, index));
}
-template<int I, int T>
LInstruction* LChunkBuilder::DefineSameAsFirst(
- LTemplateInstruction<1, I, T>* instr) {
+ LTemplateResultInstruction<1>* instr) {
return Define(instr,
new(zone()) LUnallocated(LUnallocated::SAME_AS_FIRST_INPUT));
}
-template<int I, int T>
-LInstruction* LChunkBuilder::DefineFixed(LTemplateInstruction<1, I, T>* instr,
+LInstruction* LChunkBuilder::DefineFixed(LTemplateResultInstruction<1>* instr,
Register reg) {
return Define(instr, ToUnallocated(reg));
}
-template<int I, int T>
LInstruction* LChunkBuilder::DefineFixedDouble(
- LTemplateInstruction<1, I, T>* instr,
+ LTemplateResultInstruction<1>* instr,
XMMRegister reg) {
return Define(instr, ToUnallocated(reg));
}
@@ -705,6 +654,8 @@ LInstruction* LChunkBuilder::MarkAsCall(LInstruction* instr,
!hinstr->HasObservableSideEffects();
if (needs_environment && !instr->HasEnvironment()) {
instr = AssignEnvironment(instr);
+ // We can't really figure out if the environment is needed or not.
+ instr->environment()->set_has_been_used();
}
return instr;
@@ -919,179 +870,102 @@ void LChunkBuilder::VisitInstruction(HInstruction* current) {
if (current->OperandCount() == 0) {
instr = DefineAsRegister(new(zone()) LDummy());
} else {
+ ASSERT(!current->OperandAt(0)->IsControlInstruction());
instr = DefineAsRegister(new(zone())
LDummyUse(UseAny(current->OperandAt(0))));
}
for (int i = 1; i < current->OperandCount(); ++i) {
+ if (current->OperandAt(i)->IsControlInstruction()) continue;
LInstruction* dummy =
new(zone()) LDummyUse(UseAny(current->OperandAt(i)));
dummy->set_hydrogen_value(current);
chunk_->AddInstruction(dummy, current_block_);
}
} else {
- instr = current->CompileToLithium(this);
+ HBasicBlock* successor;
+ if (current->IsControlInstruction() &&
+ HControlInstruction::cast(current)->KnownSuccessorBlock(&successor) &&
+ successor != NULL) {
+ instr = new(zone()) LGoto(successor);
+ } else {
+ instr = current->CompileToLithium(this);
+ }
}
argument_count_ += current->argument_delta();
ASSERT(argument_count_ >= 0);
if (instr != NULL) {
- // Associate the hydrogen instruction first, since we may need it for
- // the ClobbersRegisters() or ClobbersDoubleRegisters() calls below.
- instr->set_hydrogen_value(current);
-
-#if DEBUG
- // Make sure that the lithium instruction has either no fixed register
- // constraints in temps or the result OR no uses that are only used at
- // start. If this invariant doesn't hold, the register allocator can decide
- // to insert a split of a range immediately before the instruction due to an
- // already allocated register needing to be used for the instruction's fixed
- // register constraint. In this case, The register allocator won't see an
- // interference between the split child and the use-at-start (it would if
- // the it was just a plain use), so it is free to move the split child into
- // the same register that is used for the use-at-start.
- // See https://code.google.com/p/chromium/issues/detail?id=201590
- if (!(instr->ClobbersRegisters() && instr->ClobbersDoubleRegisters())) {
- int fixed = 0;
- int used_at_start = 0;
- for (UseIterator it(instr); !it.Done(); it.Advance()) {
- LUnallocated* operand = LUnallocated::cast(it.Current());
- if (operand->IsUsedAtStart()) ++used_at_start;
- }
- if (instr->Output() != NULL) {
- if (LUnallocated::cast(instr->Output())->HasFixedPolicy()) ++fixed;
- }
- for (TempIterator it(instr); !it.Done(); it.Advance()) {
- LUnallocated* operand = LUnallocated::cast(it.Current());
- if (operand->HasFixedPolicy()) ++fixed;
- }
- ASSERT(fixed == 0 || used_at_start == 0);
- }
-#endif
-
- if (FLAG_stress_pointer_maps && !instr->HasPointerMap()) {
- instr = AssignPointerMap(instr);
- }
- if (FLAG_stress_environments && !instr->HasEnvironment()) {
- instr = AssignEnvironment(instr);
- }
- if (!CpuFeatures::IsSafeForSnapshot(SSE2) && instr->IsGoto() &&
- LGoto::cast(instr)->jumps_to_join()) {
- // TODO(olivf) Since phis of spilled values are joined as registers
- // (not in the stack slot), we need to allow the goto gaps to keep one
- // x87 register alive. To ensure all other values are still spilled, we
- // insert a fpu register barrier right before.
- LClobberDoubles* clobber = new(zone()) LClobberDoubles();
- clobber->set_hydrogen_value(current);
- chunk_->AddInstruction(clobber, current_block_);
- }
- chunk_->AddInstruction(instr, current_block_);
-
- if (instr->IsCall()) {
- HValue* hydrogen_value_for_lazy_bailout = current;
- LInstruction* instruction_needing_environment = NULL;
- if (current->HasObservableSideEffects()) {
- HSimulate* sim = HSimulate::cast(current->next());
- instruction_needing_environment = instr;
- sim->ReplayEnvironment(current_block_->last_environment());
- hydrogen_value_for_lazy_bailout = sim;
- }
- LInstruction* bailout = AssignEnvironment(new(zone()) LLazyBailout());
- bailout->set_hydrogen_value(hydrogen_value_for_lazy_bailout);
- chunk_->AddInstruction(bailout, current_block_);
- if (instruction_needing_environment != NULL) {
- // Store the lazy deopt environment with the instruction if needed.
- // Right now it is only used for LInstanceOfKnownGlobal.
- instruction_needing_environment->
- SetDeferredLazyDeoptimizationEnvironment(bailout->environment());
- }
- }
+ AddInstruction(instr, current);
}
+
current_instruction_ = old_current;
}
-LEnvironment* LChunkBuilder::CreateEnvironment(
- HEnvironment* hydrogen_env,
- int* argument_index_accumulator,
- ZoneList<HValue*>* objects_to_materialize) {
- if (hydrogen_env == NULL) return NULL;
-
- LEnvironment* outer = CreateEnvironment(hydrogen_env->outer(),
- argument_index_accumulator,
- objects_to_materialize);
- BailoutId ast_id = hydrogen_env->ast_id();
- ASSERT(!ast_id.IsNone() ||
- hydrogen_env->frame_type() != JS_FUNCTION);
- int value_count = hydrogen_env->length() - hydrogen_env->specials_count();
- LEnvironment* result =
- new(zone()) LEnvironment(hydrogen_env->closure(),
- hydrogen_env->frame_type(),
- ast_id,
- hydrogen_env->parameter_count(),
- argument_count_,
- value_count,
- outer,
- hydrogen_env->entry(),
- zone());
- int argument_index = *argument_index_accumulator;
- int object_index = objects_to_materialize->length();
- for (int i = 0; i < hydrogen_env->length(); ++i) {
- if (hydrogen_env->is_special_index(i)) continue;
-
- LOperand* op;
- HValue* value = hydrogen_env->values()->at(i);
- if (value->IsArgumentsObject() || value->IsCapturedObject()) {
- objects_to_materialize->Add(value, zone());
- op = LEnvironment::materialization_marker();
- } else if (value->IsPushArgument()) {
- op = new(zone()) LArgument(argument_index++);
- } else {
- op = UseAny(value);
+void LChunkBuilder::AddInstruction(LInstruction* instr,
+ HInstruction* hydrogen_val) {
+ // Associate the hydrogen instruction first, since we may need it for
+ // the ClobbersRegisters() or ClobbersDoubleRegisters() calls below.
+ instr->set_hydrogen_value(hydrogen_val);
+
+#if DEBUG
+ // Make sure that the lithium instruction has either no fixed register
+ // constraints in temps or the result OR no uses that are only used at
+ // start. If this invariant doesn't hold, the register allocator can decide
+ // to insert a split of a range immediately before the instruction due to an
+ // already allocated register needing to be used for the instruction's fixed
+ // register constraint. In this case, The register allocator won't see an
+ // interference between the split child and the use-at-start (it would if
+ // the it was just a plain use), so it is free to move the split child into
+ // the same register that is used for the use-at-start.
+ // See https://code.google.com/p/chromium/issues/detail?id=201590
+ if (!(instr->ClobbersRegisters() &&
+ instr->ClobbersDoubleRegisters(isolate()))) {
+ int fixed = 0;
+ int used_at_start = 0;
+ for (UseIterator it(instr); !it.Done(); it.Advance()) {
+ LUnallocated* operand = LUnallocated::cast(it.Current());
+ if (operand->IsUsedAtStart()) ++used_at_start;
}
- result->AddValue(op,
- value->representation(),
- value->CheckFlag(HInstruction::kUint32));
- }
-
- for (int i = object_index; i < objects_to_materialize->length(); ++i) {
- HValue* object_to_materialize = objects_to_materialize->at(i);
- int previously_materialized_object = -1;
- for (int prev = 0; prev < i; ++prev) {
- if (objects_to_materialize->at(prev) == objects_to_materialize->at(i)) {
- previously_materialized_object = prev;
- break;
- }
+ if (instr->Output() != NULL) {
+ if (LUnallocated::cast(instr->Output())->HasFixedPolicy()) ++fixed;
}
- int length = object_to_materialize->OperandCount();
- bool is_arguments = object_to_materialize->IsArgumentsObject();
- if (previously_materialized_object >= 0) {
- result->AddDuplicateObject(previously_materialized_object);
- continue;
- } else {
- result->AddNewObject(is_arguments ? length - 1 : length, is_arguments);
- }
- for (int i = is_arguments ? 1 : 0; i < length; ++i) {
- LOperand* op;
- HValue* value = object_to_materialize->OperandAt(i);
- if (value->IsArgumentsObject() || value->IsCapturedObject()) {
- objects_to_materialize->Add(value, zone());
- op = LEnvironment::materialization_marker();
- } else {
- ASSERT(!value->IsPushArgument());
- op = UseAny(value);
- }
- result->AddValue(op,
- value->representation(),
- value->CheckFlag(HInstruction::kUint32));
+ for (TempIterator it(instr); !it.Done(); it.Advance()) {
+ LUnallocated* operand = LUnallocated::cast(it.Current());
+ if (operand->HasFixedPolicy()) ++fixed;
}
+ ASSERT(fixed == 0 || used_at_start == 0);
}
+#endif
- if (hydrogen_env->frame_type() == JS_FUNCTION) {
- *argument_index_accumulator = argument_index;
+ if (FLAG_stress_pointer_maps && !instr->HasPointerMap()) {
+ instr = AssignPointerMap(instr);
}
+ if (FLAG_stress_environments && !instr->HasEnvironment()) {
+ instr = AssignEnvironment(instr);
+ }
+ chunk_->AddInstruction(instr, current_block_);
- return result;
+ if (instr->IsCall()) {
+ HValue* hydrogen_value_for_lazy_bailout = hydrogen_val;
+ LInstruction* instruction_needing_environment = NULL;
+ if (hydrogen_val->HasObservableSideEffects()) {
+ HSimulate* sim = HSimulate::cast(hydrogen_val->next());
+ instruction_needing_environment = instr;
+ sim->ReplayEnvironment(current_block_->last_environment());
+ hydrogen_value_for_lazy_bailout = sim;
+ }
+ LInstruction* bailout = AssignEnvironment(new(zone()) LLazyBailout());
+ bailout->set_hydrogen_value(hydrogen_value_for_lazy_bailout);
+ chunk_->AddInstruction(bailout, current_block_);
+ if (instruction_needing_environment != NULL) {
+ // Store the lazy deopt environment with the instruction if needed.
+ // Right now it is only used for LInstanceOfKnownGlobal.
+ instruction_needing_environment->
+ SetDeferredLazyDeoptimizationEnvironment(bailout->environment());
+ }
+ }
}
@@ -1101,33 +975,22 @@ LInstruction* LChunkBuilder::DoGoto(HGoto* instr) {
LInstruction* LChunkBuilder::DoBranch(HBranch* instr) {
- LInstruction* goto_instr = CheckElideControlInstruction(instr);
- if (goto_instr != NULL) return goto_instr;
-
- ToBooleanStub::Types expected = instr->expected_input_types();
-
- // Tagged values that are not known smis or booleans require a
- // deoptimization environment. If the instruction is generic no
- // environment is needed since all cases are handled.
HValue* value = instr->value();
- Representation rep = value->representation();
+ Representation r = value->representation();
HType type = value->type();
- if (!rep.IsTagged() || type.IsSmi() || type.IsBoolean()) {
- return new(zone()) LBranch(UseRegister(value), NULL);
- }
-
- bool needs_temp = expected.NeedsMap() || expected.IsEmpty();
- LOperand* temp = needs_temp ? TempRegister() : NULL;
+ ToBooleanStub::Types expected = instr->expected_input_types();
+ if (expected.IsEmpty()) expected = ToBooleanStub::Types::Generic();
- // The Generic stub does not have a deopt, so we need no environment.
- if (expected.IsGeneric()) {
- return new(zone()) LBranch(UseRegister(value), temp);
+ bool easy_case = !r.IsTagged() || type.IsBoolean() || type.IsSmi() ||
+ type.IsJSArray() || type.IsHeapNumber() || type.IsString();
+ LOperand* temp = !easy_case && expected.NeedsMap() ? TempRegister() : NULL;
+ LInstruction* branch = new(zone()) LBranch(UseRegister(value), temp);
+ if (!easy_case &&
+ ((!expected.Contains(ToBooleanStub::SMI) && expected.NeedsMap()) ||
+ !expected.IsGeneric())) {
+ branch = AssignEnvironment(branch);
}
-
- // We need a temporary register when we have to access the map *or* we have
- // no type info yet, in which case we handle all cases (including the ones
- // involving maps).
- return AssignEnvironment(new(zone()) LBranch(UseRegister(value), temp));
+ return branch;
}
@@ -1177,7 +1040,7 @@ LInstruction* LChunkBuilder::DoInstanceOfKnownGlobal(
LInstruction* LChunkBuilder::DoWrapReceiver(HWrapReceiver* instr) {
LOperand* receiver = UseRegister(instr->receiver());
- LOperand* function = UseRegisterAtStart(instr->function());
+ LOperand* function = UseRegister(instr->function());
LOperand* temp = TempRegister();
LWrapReceiver* result =
new(zone()) LWrapReceiver(receiver, function, temp);
@@ -1198,9 +1061,13 @@ LInstruction* LChunkBuilder::DoApplyArguments(HApplyArguments* instr) {
}
-LInstruction* LChunkBuilder::DoPushArgument(HPushArgument* instr) {
- LOperand* argument = UseAny(instr->argument());
- return new(zone()) LPushArgument(argument);
+LInstruction* LChunkBuilder::DoPushArguments(HPushArguments* instr) {
+ int argc = instr->OperandCount();
+ for (int i = 0; i < argc; ++i) {
+ LOperand* argument = UseAny(instr->argument(i));
+ AddInstruction(new(zone()) LPushArgument(argument), instr);
+ }
+ return NULL;
}
@@ -1239,33 +1106,38 @@ LInstruction* LChunkBuilder::DoContext(HContext* instr) {
}
-LInstruction* LChunkBuilder::DoOuterContext(HOuterContext* instr) {
- LOperand* context = UseRegisterAtStart(instr->value());
- return DefineAsRegister(new(zone()) LOuterContext(context));
-}
-
-
LInstruction* LChunkBuilder::DoDeclareGlobals(HDeclareGlobals* instr) {
LOperand* context = UseFixed(instr->context(), esi);
return MarkAsCall(new(zone()) LDeclareGlobals(context), instr);
}
-LInstruction* LChunkBuilder::DoGlobalObject(HGlobalObject* instr) {
- LOperand* context = UseRegisterAtStart(instr->value());
- return DefineAsRegister(new(zone()) LGlobalObject(context));
-}
+LInstruction* LChunkBuilder::DoCallJSFunction(
+ HCallJSFunction* instr) {
+ LOperand* function = UseFixed(instr->function(), edi);
+ LCallJSFunction* result = new(zone()) LCallJSFunction(function);
-LInstruction* LChunkBuilder::DoGlobalReceiver(HGlobalReceiver* instr) {
- LOperand* global_object = UseRegisterAtStart(instr->value());
- return DefineAsRegister(new(zone()) LGlobalReceiver(global_object));
+ return MarkAsCall(DefineFixed(result, eax), instr, CANNOT_DEOPTIMIZE_EAGERLY);
}
-LInstruction* LChunkBuilder::DoCallConstantFunction(
- HCallConstantFunction* instr) {
- return MarkAsCall(DefineFixed(new(zone()) LCallConstantFunction, eax), instr);
+LInstruction* LChunkBuilder::DoCallWithDescriptor(
+ HCallWithDescriptor* instr) {
+ const CallInterfaceDescriptor* descriptor = instr->descriptor();
+
+ LOperand* target = UseRegisterOrConstantAtStart(instr->target());
+ ZoneList<LOperand*> ops(instr->OperandCount(), zone());
+ ops.Add(target, zone());
+ for (int i = 1; i < instr->OperandCount(); i++) {
+ LOperand* op = UseFixed(instr->OperandAt(i),
+ descriptor->GetParameterRegister(i - 1));
+ ops.Add(op, zone());
+ }
+
+ LCallWithDescriptor* result = new(zone()) LCallWithDescriptor(
+ descriptor, ops, zone());
+ return MarkAsCall(DefineFixed(result, eax), instr, CANNOT_DEOPTIMIZE_EAGERLY);
}
@@ -1283,12 +1155,10 @@ LInstruction* LChunkBuilder::DoUnaryMathOperation(HUnaryMathOperation* instr) {
case kMathRound: return DoMathRound(instr);
case kMathAbs: return DoMathAbs(instr);
case kMathLog: return DoMathLog(instr);
- case kMathSin: return DoMathSin(instr);
- case kMathCos: return DoMathCos(instr);
- case kMathTan: return DoMathTan(instr);
case kMathExp: return DoMathExp(instr);
case kMathSqrt: return DoMathSqrt(instr);
case kMathPowHalf: return DoMathPowHalf(instr);
+ case kMathClz32: return DoMathClz32(instr);
default:
UNREACHABLE();
return NULL;
@@ -1314,8 +1184,12 @@ LInstruction* LChunkBuilder::DoMathRound(HUnaryMathOperation* instr) {
LInstruction* LChunkBuilder::DoMathAbs(HUnaryMathOperation* instr) {
LOperand* context = UseAny(instr->context()); // Deferred use.
LOperand* input = UseRegisterAtStart(instr->value());
- LMathAbs* result = new(zone()) LMathAbs(context, input);
- return AssignEnvironment(AssignPointerMap(DefineSameAsFirst(result)));
+ LInstruction* result =
+ DefineSameAsFirst(new(zone()) LMathAbs(context, input));
+ Representation r = instr->value()->representation();
+ if (!r.IsDouble() && !r.IsSmiOrInteger32()) result = AssignPointerMap(result);
+ if (!r.IsDouble()) result = AssignEnvironment(result);
+ return result;
}
@@ -1323,29 +1197,14 @@ LInstruction* LChunkBuilder::DoMathLog(HUnaryMathOperation* instr) {
ASSERT(instr->representation().IsDouble());
ASSERT(instr->value()->representation().IsDouble());
LOperand* input = UseRegisterAtStart(instr->value());
- LMathLog* result = new(zone()) LMathLog(input);
- return DefineSameAsFirst(result);
-}
-
-
-LInstruction* LChunkBuilder::DoMathSin(HUnaryMathOperation* instr) {
- LOperand* input = UseFixedDouble(instr->value(), xmm1);
- LMathSin* result = new(zone()) LMathSin(input);
- return MarkAsCall(DefineFixedDouble(result, xmm1), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoMathCos(HUnaryMathOperation* instr) {
- LOperand* input = UseFixedDouble(instr->value(), xmm1);
- LMathCos* result = new(zone()) LMathCos(input);
- return MarkAsCall(DefineFixedDouble(result, xmm1), instr);
+ return MarkAsCall(DefineSameAsFirst(new(zone()) LMathLog(input)), instr);
}
-LInstruction* LChunkBuilder::DoMathTan(HUnaryMathOperation* instr) {
- LOperand* input = UseFixedDouble(instr->value(), xmm1);
- LMathTan* result = new(zone()) LMathTan(input);
- return MarkAsCall(DefineFixedDouble(result, xmm1), instr);
+LInstruction* LChunkBuilder::DoMathClz32(HUnaryMathOperation* instr) {
+ LOperand* input = UseRegisterAtStart(instr->value());
+ LMathClz32* result = new(zone()) LMathClz32(input);
+ return DefineAsRegister(result);
}
@@ -1361,9 +1220,8 @@ LInstruction* LChunkBuilder::DoMathExp(HUnaryMathOperation* instr) {
LInstruction* LChunkBuilder::DoMathSqrt(HUnaryMathOperation* instr) {
- LOperand* input = UseRegisterAtStart(instr->value());
- LMathSqrt* result = new(zone()) LMathSqrt(input);
- return DefineSameAsFirst(result);
+ LOperand* input = UseAtStart(instr->value());
+ return DefineAsRegister(new(zone()) LMathSqrt(input));
}
@@ -1375,34 +1233,6 @@ LInstruction* LChunkBuilder::DoMathPowHalf(HUnaryMathOperation* instr) {
}
-LInstruction* LChunkBuilder::DoCallKeyed(HCallKeyed* instr) {
- ASSERT(instr->key()->representation().IsTagged());
- LOperand* context = UseFixed(instr->context(), esi);
- LOperand* key = UseFixed(instr->key(), ecx);
- LCallKeyed* result = new(zone()) LCallKeyed(context, key);
- return MarkAsCall(DefineFixed(result, eax), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoCallNamed(HCallNamed* instr) {
- LOperand* context = UseFixed(instr->context(), esi);
- LCallNamed* result = new(zone()) LCallNamed(context);
- return MarkAsCall(DefineFixed(result, eax), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoCallGlobal(HCallGlobal* instr) {
- LOperand* context = UseFixed(instr->context(), esi);
- LCallGlobal* result = new(zone()) LCallGlobal(context);
- return MarkAsCall(DefineFixed(result, eax), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoCallKnownGlobal(HCallKnownGlobal* instr) {
- return MarkAsCall(DefineFixed(new(zone()) LCallKnownGlobal, eax), instr);
-}
-
-
LInstruction* LChunkBuilder::DoCallNew(HCallNew* instr) {
LOperand* context = UseFixed(instr->context(), esi);
LOperand* constructor = UseFixed(instr->constructor(), edi);
@@ -1423,9 +1253,7 @@ LInstruction* LChunkBuilder::DoCallFunction(HCallFunction* instr) {
LOperand* context = UseFixed(instr->context(), esi);
LOperand* function = UseFixed(instr->function(), edi);
LCallFunction* call = new(zone()) LCallFunction(context, function);
- LInstruction* result = DefineFixed(call, eax);
- if (instr->IsTailCall()) return result;
- return MarkAsCall(result, instr);
+ return MarkAsCall(DefineFixed(call, eax), instr);
}
@@ -1470,24 +1298,71 @@ LInstruction* LChunkBuilder::DoBitwise(HBitwise* instr) {
}
+LInstruction* LChunkBuilder::DoDivByPowerOf2I(HDiv* instr) {
+ ASSERT(instr->representation().IsSmiOrInteger32());
+ ASSERT(instr->left()->representation().Equals(instr->representation()));
+ ASSERT(instr->right()->representation().Equals(instr->representation()));
+ LOperand* dividend = UseRegister(instr->left());
+ int32_t divisor = instr->right()->GetInteger32Constant();
+ LInstruction* result = DefineAsRegister(new(zone()) LDivByPowerOf2I(
+ dividend, divisor));
+ if ((instr->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) ||
+ (instr->CheckFlag(HValue::kCanOverflow) && divisor == -1) ||
+ (!instr->CheckFlag(HInstruction::kAllUsesTruncatingToInt32) &&
+ divisor != 1 && divisor != -1)) {
+ result = AssignEnvironment(result);
+ }
+ return result;
+}
+
+
+LInstruction* LChunkBuilder::DoDivByConstI(HDiv* instr) {
+ ASSERT(instr->representation().IsInteger32());
+ ASSERT(instr->left()->representation().Equals(instr->representation()));
+ ASSERT(instr->right()->representation().Equals(instr->representation()));
+ LOperand* dividend = UseRegister(instr->left());
+ int32_t divisor = instr->right()->GetInteger32Constant();
+ LOperand* temp1 = FixedTemp(eax);
+ LOperand* temp2 = FixedTemp(edx);
+ LInstruction* result = DefineFixed(new(zone()) LDivByConstI(
+ dividend, divisor, temp1, temp2), edx);
+ if (divisor == 0 ||
+ (instr->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) ||
+ !instr->CheckFlag(HInstruction::kAllUsesTruncatingToInt32)) {
+ result = AssignEnvironment(result);
+ }
+ return result;
+}
+
+
+LInstruction* LChunkBuilder::DoDivI(HDiv* instr) {
+ ASSERT(instr->representation().IsSmiOrInteger32());
+ ASSERT(instr->left()->representation().Equals(instr->representation()));
+ ASSERT(instr->right()->representation().Equals(instr->representation()));
+ LOperand* dividend = UseFixed(instr->left(), eax);
+ LOperand* divisor = UseRegister(instr->right());
+ LOperand* temp = FixedTemp(edx);
+ LInstruction* result = DefineFixed(new(zone()) LDivI(
+ dividend, divisor, temp), eax);
+ if (instr->CheckFlag(HValue::kCanBeDivByZero) ||
+ instr->CheckFlag(HValue::kBailoutOnMinusZero) ||
+ instr->CheckFlag(HValue::kCanOverflow) ||
+ !instr->CheckFlag(HValue::kAllUsesTruncatingToInt32)) {
+ result = AssignEnvironment(result);
+ }
+ return result;
+}
+
+
LInstruction* LChunkBuilder::DoDiv(HDiv* instr) {
if (instr->representation().IsSmiOrInteger32()) {
- ASSERT(instr->left()->representation().Equals(instr->representation()));
- ASSERT(instr->right()->representation().Equals(instr->representation()));
- if (instr->HasPowerOf2Divisor()) {
- ASSERT(!instr->CheckFlag(HValue::kCanBeDivByZero));
- LOperand* value = UseRegisterAtStart(instr->left());
- LDivI* div =
- new(zone()) LDivI(value, UseOrConstant(instr->right()), NULL);
- return AssignEnvironment(DefineSameAsFirst(div));
+ if (instr->RightIsPowerOf2()) {
+ return DoDivByPowerOf2I(instr);
+ } else if (instr->right()->IsConstant()) {
+ return DoDivByConstI(instr);
+ } else {
+ return DoDivI(instr);
}
- // The temporary operand is necessary to ensure that right is not allocated
- // into edx.
- LOperand* temp = FixedTemp(edx);
- LOperand* dividend = UseFixed(instr->left(), eax);
- LOperand* divisor = UseRegister(instr->right());
- LDivI* result = new(zone()) LDivI(dividend, divisor, temp);
- return AssignEnvironment(DefineFixed(result, eax));
} else if (instr->representation().IsDouble()) {
return DoArithmeticD(Token::DIV, instr);
} else {
@@ -1496,97 +1371,132 @@ LInstruction* LChunkBuilder::DoDiv(HDiv* instr) {
}
-HValue* LChunkBuilder::SimplifiedDivisorForMathFloorOfDiv(HValue* divisor) {
- if (divisor->IsConstant() &&
- HConstant::cast(divisor)->HasInteger32Value()) {
- HConstant* constant_val = HConstant::cast(divisor);
- return constant_val->CopyToRepresentation(Representation::Integer32(),
- divisor->block()->zone());
+LInstruction* LChunkBuilder::DoFlooringDivByPowerOf2I(HMathFloorOfDiv* instr) {
+ LOperand* dividend = UseRegisterAtStart(instr->left());
+ int32_t divisor = instr->right()->GetInteger32Constant();
+ LInstruction* result = DefineSameAsFirst(new(zone()) LFlooringDivByPowerOf2I(
+ dividend, divisor));
+ if ((instr->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) ||
+ (instr->CheckFlag(HValue::kLeftCanBeMinInt) && divisor == -1)) {
+ result = AssignEnvironment(result);
}
- // A value with an integer representation does not need to be transformed.
- if (divisor->representation().IsInteger32()) {
- return divisor;
- // A change from an integer32 can be replaced by the integer32 value.
- } else if (divisor->IsChange() &&
- HChange::cast(divisor)->from().IsInteger32()) {
- return HChange::cast(divisor)->value();
+ return result;
+}
+
+
+LInstruction* LChunkBuilder::DoFlooringDivByConstI(HMathFloorOfDiv* instr) {
+ ASSERT(instr->representation().IsInteger32());
+ ASSERT(instr->left()->representation().Equals(instr->representation()));
+ ASSERT(instr->right()->representation().Equals(instr->representation()));
+ LOperand* dividend = UseRegister(instr->left());
+ int32_t divisor = instr->right()->GetInteger32Constant();
+ LOperand* temp1 = FixedTemp(eax);
+ LOperand* temp2 = FixedTemp(edx);
+ LOperand* temp3 =
+ ((divisor > 0 && !instr->CheckFlag(HValue::kLeftCanBeNegative)) ||
+ (divisor < 0 && !instr->CheckFlag(HValue::kLeftCanBePositive))) ?
+ NULL : TempRegister();
+ LInstruction* result =
+ DefineFixed(new(zone()) LFlooringDivByConstI(dividend,
+ divisor,
+ temp1,
+ temp2,
+ temp3),
+ edx);
+ if (divisor == 0 ||
+ (instr->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0)) {
+ result = AssignEnvironment(result);
}
- return NULL;
+ return result;
+}
+
+
+LInstruction* LChunkBuilder::DoFlooringDivI(HMathFloorOfDiv* instr) {
+ ASSERT(instr->representation().IsSmiOrInteger32());
+ ASSERT(instr->left()->representation().Equals(instr->representation()));
+ ASSERT(instr->right()->representation().Equals(instr->representation()));
+ LOperand* dividend = UseFixed(instr->left(), eax);
+ LOperand* divisor = UseRegister(instr->right());
+ LOperand* temp = FixedTemp(edx);
+ LInstruction* result = DefineFixed(new(zone()) LFlooringDivI(
+ dividend, divisor, temp), eax);
+ if (instr->CheckFlag(HValue::kCanBeDivByZero) ||
+ instr->CheckFlag(HValue::kBailoutOnMinusZero) ||
+ instr->CheckFlag(HValue::kCanOverflow)) {
+ result = AssignEnvironment(result);
+ }
+ return result;
}
LInstruction* LChunkBuilder::DoMathFloorOfDiv(HMathFloorOfDiv* instr) {
- HValue* right = instr->right();
- if (!right->IsConstant()) {
- ASSERT(right->representation().IsInteger32());
- // The temporary operand is necessary to ensure that right is not allocated
- // into edx.
- LOperand* temp = FixedTemp(edx);
- LOperand* dividend = UseFixed(instr->left(), eax);
- LOperand* divisor = UseRegister(instr->right());
- LDivI* flooring_div = new(zone()) LDivI(dividend, divisor, temp);
- return AssignEnvironment(DefineFixed(flooring_div, eax));
- }
-
- ASSERT(right->IsConstant() && HConstant::cast(right)->HasInteger32Value());
- LOperand* divisor = chunk_->DefineConstantOperand(HConstant::cast(right));
- int32_t divisor_si = HConstant::cast(right)->Integer32Value();
- if (divisor_si == 0) {
- LOperand* dividend = UseRegister(instr->left());
- return AssignEnvironment(DefineAsRegister(
- new(zone()) LMathFloorOfDiv(dividend, divisor, NULL)));
- } else if (IsPowerOf2(abs(divisor_si))) {
- // use dividend as temp if divisor < 0 && divisor != -1
- LOperand* dividend = divisor_si < -1 ? UseTempRegister(instr->left()) :
- UseRegisterAtStart(instr->left());
- LInstruction* result = DefineAsRegister(
- new(zone()) LMathFloorOfDiv(dividend, divisor, NULL));
- return divisor_si < 0 ? AssignEnvironment(result) : result;
+ if (instr->RightIsPowerOf2()) {
+ return DoFlooringDivByPowerOf2I(instr);
+ } else if (instr->right()->IsConstant()) {
+ return DoFlooringDivByConstI(instr);
} else {
- // needs edx:eax, plus a temp
- LOperand* dividend = UseFixed(instr->left(), eax);
- LOperand* temp = TempRegister();
- LInstruction* result = DefineFixed(
- new(zone()) LMathFloorOfDiv(dividend, divisor, temp), edx);
- return divisor_si < 0 ? AssignEnvironment(result) : result;
+ return DoFlooringDivI(instr);
+ }
+}
+
+
+LInstruction* LChunkBuilder::DoModByPowerOf2I(HMod* instr) {
+ ASSERT(instr->representation().IsSmiOrInteger32());
+ ASSERT(instr->left()->representation().Equals(instr->representation()));
+ ASSERT(instr->right()->representation().Equals(instr->representation()));
+ LOperand* dividend = UseRegisterAtStart(instr->left());
+ int32_t divisor = instr->right()->GetInteger32Constant();
+ LInstruction* result = DefineSameAsFirst(new(zone()) LModByPowerOf2I(
+ dividend, divisor));
+ if (instr->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ result = AssignEnvironment(result);
+ }
+ return result;
+}
+
+
+LInstruction* LChunkBuilder::DoModByConstI(HMod* instr) {
+ ASSERT(instr->representation().IsSmiOrInteger32());
+ ASSERT(instr->left()->representation().Equals(instr->representation()));
+ ASSERT(instr->right()->representation().Equals(instr->representation()));
+ LOperand* dividend = UseRegister(instr->left());
+ int32_t divisor = instr->right()->GetInteger32Constant();
+ LOperand* temp1 = FixedTemp(eax);
+ LOperand* temp2 = FixedTemp(edx);
+ LInstruction* result = DefineFixed(new(zone()) LModByConstI(
+ dividend, divisor, temp1, temp2), eax);
+ if (divisor == 0 || instr->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ result = AssignEnvironment(result);
+ }
+ return result;
+}
+
+
+LInstruction* LChunkBuilder::DoModI(HMod* instr) {
+ ASSERT(instr->representation().IsSmiOrInteger32());
+ ASSERT(instr->left()->representation().Equals(instr->representation()));
+ ASSERT(instr->right()->representation().Equals(instr->representation()));
+ LOperand* dividend = UseFixed(instr->left(), eax);
+ LOperand* divisor = UseRegister(instr->right());
+ LOperand* temp = FixedTemp(edx);
+ LInstruction* result = DefineFixed(new(zone()) LModI(
+ dividend, divisor, temp), edx);
+ if (instr->CheckFlag(HValue::kCanBeDivByZero) ||
+ instr->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ result = AssignEnvironment(result);
}
+ return result;
}
LInstruction* LChunkBuilder::DoMod(HMod* instr) {
- HValue* left = instr->left();
- HValue* right = instr->right();
if (instr->representation().IsSmiOrInteger32()) {
- ASSERT(instr->left()->representation().Equals(instr->representation()));
- ASSERT(instr->right()->representation().Equals(instr->representation()));
-
- if (instr->HasPowerOf2Divisor()) {
- ASSERT(!right->CanBeZero());
- LModI* mod = new(zone()) LModI(UseRegisterAtStart(left),
- UseOrConstant(right),
- NULL);
- LInstruction* result = DefineSameAsFirst(mod);
- return (left->CanBeNegative() &&
- instr->CheckFlag(HValue::kBailoutOnMinusZero))
- ? AssignEnvironment(result)
- : result;
- return AssignEnvironment(DefineSameAsFirst(mod));
+ if (instr->RightIsPowerOf2()) {
+ return DoModByPowerOf2I(instr);
+ } else if (instr->right()->IsConstant()) {
+ return DoModByConstI(instr);
} else {
- // The temporary operand is necessary to ensure that right is not
- // allocated into edx.
- LModI* mod = new(zone()) LModI(UseFixed(left, eax),
- UseRegister(right),
- FixedTemp(edx));
- LInstruction* result = DefineFixed(mod, edx);
- return (right->CanBeZero() ||
- (left->RangeCanInclude(kMinInt) &&
- right->RangeCanInclude(-1) &&
- instr->CheckFlag(HValue::kBailoutOnMinusZero)) ||
- (left->CanBeNegative() &&
- instr->CanBeZero() &&
- instr->CheckFlag(HValue::kBailoutOnMinusZero)))
- ? AssignEnvironment(result)
- : result;
+ return DoModI(instr);
}
} else if (instr->representation().IsDouble()) {
return DoArithmeticD(Token::MOD, instr);
@@ -1765,8 +1675,6 @@ LInstruction* LChunkBuilder::DoCompareNumericAndBranch(
LInstruction* LChunkBuilder::DoCompareObjectEqAndBranch(
HCompareObjectEqAndBranch* instr) {
- LInstruction* goto_instr = CheckElideControlInstruction(instr);
- if (goto_instr != NULL) return goto_instr;
LOperand* left = UseRegisterAtStart(instr->left());
LOperand* right = UseOrConstantAtStart(instr->right());
return new(zone()) LCmpObjectEqAndBranch(left, right);
@@ -1782,8 +1690,6 @@ LInstruction* LChunkBuilder::DoCompareHoleAndBranch(
LInstruction* LChunkBuilder::DoCompareMinusZeroAndBranch(
HCompareMinusZeroAndBranch* instr) {
- LInstruction* goto_instr = CheckElideControlInstruction(instr);
- if (goto_instr != NULL) return goto_instr;
LOperand* value = UseRegister(instr->value());
LOperand* scratch = TempRegister();
return new(zone()) LCompareMinusZeroAndBranch(value, scratch);
@@ -1874,19 +1780,6 @@ LInstruction* LChunkBuilder::DoMapEnumLength(HMapEnumLength* instr) {
}
-LInstruction* LChunkBuilder::DoElementsKind(HElementsKind* instr) {
- LOperand* object = UseRegisterAtStart(instr->value());
- return DefineAsRegister(new(zone()) LElementsKind(object));
-}
-
-
-LInstruction* LChunkBuilder::DoValueOf(HValueOf* instr) {
- LOperand* object = UseRegister(instr->value());
- LValueOf* result = new(zone()) LValueOf(object, TempRegister());
- return DefineSameAsFirst(result);
-}
-
-
LInstruction* LChunkBuilder::DoDateField(HDateField* instr) {
LOperand* date = UseFixed(instr->value(), eax);
LDateField* result =
@@ -1936,9 +1829,16 @@ LInstruction* LChunkBuilder::DoSeqStringSetChar(HSeqStringSetChar* instr) {
LInstruction* LChunkBuilder::DoBoundsCheck(HBoundsCheck* instr) {
- return AssignEnvironment(new(zone()) LBoundsCheck(
- UseRegisterOrConstantAtStart(instr->index()),
- UseAtStart(instr->length())));
+ if (!FLAG_debug_code && instr->skip_check()) return NULL;
+ LOperand* index = UseRegisterOrConstantAtStart(instr->index());
+ LOperand* length = !index->IsConstantOperand()
+ ? UseOrConstantAtStart(instr->length())
+ : UseAtStart(instr->length());
+ LInstruction* result = new(zone()) LBoundsCheck(index, length);
+ if (!FLAG_debug_code || !instr->skip_check()) {
+ result = AssignEnvironment(result);
+ }
+ return result;
}
@@ -1956,13 +1856,6 @@ LInstruction* LChunkBuilder::DoAbnormalExit(HAbnormalExit* instr) {
}
-LInstruction* LChunkBuilder::DoThrow(HThrow* instr) {
- LOperand* context = UseFixed(instr->context(), esi);
- LOperand* value = UseFixed(instr->value(), eax);
- return MarkAsCall(new(zone()) LThrow(context, value), instr);
-}
-
-
LInstruction* LChunkBuilder::DoUseConst(HUseConst* instr) {
return NULL;
}
@@ -1979,25 +1872,23 @@ LInstruction* LChunkBuilder::DoForceRepresentation(HForceRepresentation* bad) {
LInstruction* LChunkBuilder::DoChange(HChange* instr) {
Representation from = instr->from();
Representation to = instr->to();
+ HValue* val = instr->value();
if (from.IsSmi()) {
if (to.IsTagged()) {
- LOperand* value = UseRegister(instr->value());
+ LOperand* value = UseRegister(val);
return DefineSameAsFirst(new(zone()) LDummyUse(value));
}
from = Representation::Tagged();
}
- // Only mark conversions that might need to allocate as calling rather than
- // all changes. This makes simple, non-allocating conversion not have to force
- // building a stack frame.
if (from.IsTagged()) {
if (to.IsDouble()) {
- LOperand* value = UseRegister(instr->value());
- // Temp register only necessary for minus zero check.
+ LOperand* value = UseRegister(val);
LOperand* temp = TempRegister();
- LNumberUntagD* res = new(zone()) LNumberUntagD(value, temp);
- return AssignEnvironment(DefineAsRegister(res));
+ LInstruction* result =
+ DefineAsRegister(new(zone()) LNumberUntagD(value, temp));
+ if (!val->representation().IsSmi()) result = AssignEnvironment(result);
+ return result;
} else if (to.IsSmi()) {
- HValue* val = instr->value();
LOperand* value = UseRegister(val);
if (val->type().IsSmi()) {
return DefineSameAsFirst(new(zone()) LDummyUse(value));
@@ -2005,78 +1896,70 @@ LInstruction* LChunkBuilder::DoChange(HChange* instr) {
return AssignEnvironment(DefineSameAsFirst(new(zone()) LCheckSmi(value)));
} else {
ASSERT(to.IsInteger32());
- HValue* val = instr->value();
if (val->type().IsSmi() || val->representation().IsSmi()) {
LOperand* value = UseRegister(val);
return DefineSameAsFirst(new(zone()) LSmiUntag(value, false));
} else {
+ LOperand* value = UseRegister(val);
bool truncating = instr->CanTruncateToInt32();
- LOperand* xmm_temp =
- (CpuFeatures::IsSafeForSnapshot(SSE2) && !truncating)
- ? FixedTemp(xmm1) : NULL;
- LTaggedToI* res = new(zone()) LTaggedToI(UseRegister(val), xmm_temp);
- return AssignEnvironment(DefineSameAsFirst(res));
+ LOperand* xmm_temp = !truncating ? FixedTemp(xmm1) : NULL;
+ LInstruction* result =
+ DefineSameAsFirst(new(zone()) LTaggedToI(value, xmm_temp));
+ if (!val->representation().IsSmi()) result = AssignEnvironment(result);
+ return result;
}
}
} else if (from.IsDouble()) {
if (to.IsTagged()) {
info()->MarkAsDeferredCalling();
- LOperand* value = UseRegisterAtStart(instr->value());
+ LOperand* value = UseRegisterAtStart(val);
LOperand* temp = FLAG_inline_new ? TempRegister() : NULL;
-
- // Make sure that temp and result_temp are different registers.
LUnallocated* result_temp = TempRegister();
LNumberTagD* result = new(zone()) LNumberTagD(value, temp);
return AssignPointerMap(Define(result, result_temp));
} else if (to.IsSmi()) {
- LOperand* value = UseRegister(instr->value());
+ LOperand* value = UseRegister(val);
return AssignEnvironment(
DefineAsRegister(new(zone()) LDoubleToSmi(value)));
} else {
ASSERT(to.IsInteger32());
bool truncating = instr->CanTruncateToInt32();
- bool needs_temp = CpuFeatures::IsSafeForSnapshot(SSE2) && !truncating;
- LOperand* value = needs_temp ?
- UseTempRegister(instr->value()) : UseRegister(instr->value());
+ bool needs_temp = !truncating;
+ LOperand* value = needs_temp ? UseTempRegister(val) : UseRegister(val);
LOperand* temp = needs_temp ? TempRegister() : NULL;
- return AssignEnvironment(
- DefineAsRegister(new(zone()) LDoubleToI(value, temp)));
+ LInstruction* result =
+ DefineAsRegister(new(zone()) LDoubleToI(value, temp));
+ if (!truncating) result = AssignEnvironment(result);
+ return result;
}
} else if (from.IsInteger32()) {
info()->MarkAsDeferredCalling();
if (to.IsTagged()) {
- HValue* val = instr->value();
LOperand* value = UseRegister(val);
- if (val->HasRange() && val->range()->IsInSmiRange()) {
+ if (!instr->CheckFlag(HValue::kCanOverflow)) {
return DefineSameAsFirst(new(zone()) LSmiTag(value));
} else if (val->CheckFlag(HInstruction::kUint32)) {
- LOperand* temp = CpuFeatures::IsSupported(SSE2) ? FixedTemp(xmm1)
- : NULL;
+ LOperand* temp = TempRegister();
LNumberTagU* result = new(zone()) LNumberTagU(value, temp);
- return AssignEnvironment(AssignPointerMap(DefineSameAsFirst(result)));
+ return AssignPointerMap(DefineSameAsFirst(result));
} else {
- LNumberTagI* result = new(zone()) LNumberTagI(value);
- return AssignEnvironment(AssignPointerMap(DefineSameAsFirst(result)));
+ LOperand* temp = TempRegister();
+ LNumberTagI* result = new(zone()) LNumberTagI(value, temp);
+ return AssignPointerMap(DefineSameAsFirst(result));
}
} else if (to.IsSmi()) {
- HValue* val = instr->value();
LOperand* value = UseRegister(val);
- LInstruction* result = val->CheckFlag(HInstruction::kUint32)
- ? DefineSameAsFirst(new(zone()) LUint32ToSmi(value))
- : DefineSameAsFirst(new(zone()) LInteger32ToSmi(value));
- if (val->HasRange() && val->range()->IsInSmiRange()) {
- return result;
+ LInstruction* result = DefineSameAsFirst(new(zone()) LSmiTag(value));
+ if (instr->CheckFlag(HValue::kCanOverflow)) {
+ result = AssignEnvironment(result);
}
- return AssignEnvironment(result);
+ return result;
} else {
ASSERT(to.IsDouble());
- if (instr->value()->CheckFlag(HInstruction::kUint32)) {
- LOperand* temp = FixedTemp(xmm1);
- return DefineAsRegister(
- new(zone()) LUint32ToDouble(UseRegister(instr->value()), temp));
+ if (val->CheckFlag(HInstruction::kUint32)) {
+ return DefineAsRegister(new(zone()) LUint32ToDouble(UseRegister(val)));
} else {
- return DefineAsRegister(
- new(zone()) LInteger32ToDouble(Use(instr->value())));
+ return DefineAsRegister(new(zone()) LInteger32ToDouble(Use(val)));
}
}
}
@@ -2087,7 +1970,11 @@ LInstruction* LChunkBuilder::DoChange(HChange* instr) {
LInstruction* LChunkBuilder::DoCheckHeapObject(HCheckHeapObject* instr) {
LOperand* value = UseAtStart(instr->value());
- return AssignEnvironment(new(zone()) LCheckNonSmi(value));
+ LInstruction* result = new(zone()) LCheckNonSmi(value);
+ if (!instr->value()->type().IsHeapObject()) {
+ result = AssignEnvironment(result);
+ }
+ return result;
}
@@ -2117,15 +2004,12 @@ LInstruction* LChunkBuilder::DoCheckValue(HCheckValue* instr) {
LInstruction* LChunkBuilder::DoCheckMaps(HCheckMaps* instr) {
- LOperand* value = NULL;
- if (!instr->CanOmitMapChecks()) {
- value = UseRegisterAtStart(instr->value());
- if (instr->has_migration_target()) info()->MarkAsDeferredCalling();
- }
- LCheckMaps* result = new(zone()) LCheckMaps(value);
- if (!instr->CanOmitMapChecks()) {
- AssignEnvironment(result);
- if (instr->has_migration_target()) return AssignPointerMap(result);
+ if (instr->IsStabilityCheck()) return new(zone()) LCheckMaps;
+ LOperand* value = UseRegisterAtStart(instr->value());
+ LInstruction* result = AssignEnvironment(new(zone()) LCheckMaps(value));
+ if (instr->HasMigrationTarget()) {
+ info()->MarkAsDeferredCalling();
+ result = AssignPointerMap(result);
}
return result;
}
@@ -2142,24 +2026,30 @@ LInstruction* LChunkBuilder::DoClampToUint8(HClampToUint8* instr) {
return DefineFixed(new(zone()) LClampIToUint8(reg), eax);
} else {
ASSERT(input_rep.IsSmiOrTagged());
- if (CpuFeatures::IsSupported(SSE2)) {
- LOperand* reg = UseFixed(value, eax);
- // Register allocator doesn't (yet) support allocation of double
- // temps. Reserve xmm1 explicitly.
- LOperand* temp = FixedTemp(xmm1);
- LClampTToUint8* result = new(zone()) LClampTToUint8(reg, temp);
- return AssignEnvironment(DefineFixed(result, eax));
- } else {
- LOperand* value = UseRegister(instr->value());
- LClampTToUint8NoSSE2* res =
- new(zone()) LClampTToUint8NoSSE2(value, TempRegister(),
- TempRegister(), TempRegister());
- return AssignEnvironment(DefineFixed(res, ecx));
- }
+ LOperand* reg = UseFixed(value, eax);
+ // Register allocator doesn't (yet) support allocation of double
+ // temps. Reserve xmm1 explicitly.
+ LOperand* temp = FixedTemp(xmm1);
+ LClampTToUint8* result = new(zone()) LClampTToUint8(reg, temp);
+ return AssignEnvironment(DefineFixed(result, eax));
}
}
+LInstruction* LChunkBuilder::DoDoubleBits(HDoubleBits* instr) {
+ HValue* value = instr->value();
+ ASSERT(value->representation().IsDouble());
+ return DefineAsRegister(new(zone()) LDoubleBits(UseRegister(value)));
+}
+
+
+LInstruction* LChunkBuilder::DoConstructDouble(HConstructDouble* instr) {
+ LOperand* lo = UseRegister(instr->lo());
+ LOperand* hi = UseRegister(instr->hi());
+ return DefineAsRegister(new(zone()) LConstructDouble(hi, lo));
+}
+
+
LInstruction* LChunkBuilder::DoReturn(HReturn* instr) {
LOperand* context = info()->IsStub() ? UseFixed(instr->context(), esi) : NULL;
LOperand* parameter_count = UseRegisterOrConstant(instr->parameter_count());
@@ -2214,21 +2104,14 @@ LInstruction* LChunkBuilder::DoStoreGlobalCell(HStoreGlobalCell* instr) {
}
-LInstruction* LChunkBuilder::DoStoreGlobalGeneric(HStoreGlobalGeneric* instr) {
- LOperand* context = UseFixed(instr->context(), esi);
- LOperand* global_object = UseFixed(instr->global_object(), edx);
- LOperand* value = UseFixed(instr->value(), eax);
- LStoreGlobalGeneric* result =
- new(zone()) LStoreGlobalGeneric(context, global_object, value);
- return MarkAsCall(result, instr);
-}
-
-
LInstruction* LChunkBuilder::DoLoadContextSlot(HLoadContextSlot* instr) {
LOperand* context = UseRegisterAtStart(instr->value());
LInstruction* result =
DefineAsRegister(new(zone()) LLoadContextSlot(context));
- return instr->RequiresHoleCheck() ? AssignEnvironment(result) : result;
+ if (instr->RequiresHoleCheck() && instr->DeoptimizesOnHole()) {
+ result = AssignEnvironment(result);
+ }
+ return result;
}
@@ -2244,7 +2127,10 @@ LInstruction* LChunkBuilder::DoStoreContextSlot(HStoreContextSlot* instr) {
temp = NULL;
}
LInstruction* result = new(zone()) LStoreContextSlot(context, value, temp);
- return instr->RequiresHoleCheck() ? AssignEnvironment(result) : result;
+ if (instr->RequiresHoleCheck() && instr->DeoptimizesOnHole()) {
+ result = AssignEnvironment(result);
+ }
+ return result;
}
@@ -2278,13 +2164,6 @@ LInstruction* LChunkBuilder::DoLoadRoot(HLoadRoot* instr) {
}
-LInstruction* LChunkBuilder::DoLoadExternalArrayPointer(
- HLoadExternalArrayPointer* instr) {
- LOperand* input = UseRegisterAtStart(instr->value());
- return DefineAsRegister(new(zone()) LLoadExternalArrayPointer(input));
-}
-
-
LInstruction* LChunkBuilder::DoLoadKeyed(HLoadKeyed* instr) {
ASSERT(instr->key()->representation().IsSmiOrInteger32());
ElementsKind elements_kind = instr->elements_kind();
@@ -2293,29 +2172,32 @@ LInstruction* LChunkBuilder::DoLoadKeyed(HLoadKeyed* instr) {
LOperand* key = clobbers_key
? UseTempRegister(instr->key())
: UseRegisterOrConstantAtStart(instr->key());
- LLoadKeyed* result = NULL;
+ LInstruction* result = NULL;
- if (!instr->is_external()) {
+ if (!instr->is_typed_elements()) {
LOperand* obj = UseRegisterAtStart(instr->elements());
- result = new(zone()) LLoadKeyed(obj, key);
+ result = DefineAsRegister(new(zone()) LLoadKeyed(obj, key));
} else {
ASSERT(
(instr->representation().IsInteger32() &&
- (elements_kind != EXTERNAL_FLOAT_ELEMENTS) &&
- (elements_kind != EXTERNAL_DOUBLE_ELEMENTS)) ||
+ !(IsDoubleOrFloatElementsKind(instr->elements_kind()))) ||
(instr->representation().IsDouble() &&
- ((elements_kind == EXTERNAL_FLOAT_ELEMENTS) ||
- (elements_kind == EXTERNAL_DOUBLE_ELEMENTS))));
- LOperand* external_pointer = UseRegister(instr->elements());
- result = new(zone()) LLoadKeyed(external_pointer, key);
+ (IsDoubleOrFloatElementsKind(instr->elements_kind()))));
+ LOperand* backing_store = UseRegister(instr->elements());
+ result = DefineAsRegister(new(zone()) LLoadKeyed(backing_store, key));
}
- DefineAsRegister(result);
- bool can_deoptimize = instr->RequiresHoleCheck() ||
- (elements_kind == EXTERNAL_UNSIGNED_INT_ELEMENTS);
- // An unsigned int array load might overflow and cause a deopt, make sure it
- // has an environment.
- return can_deoptimize ? AssignEnvironment(result) : result;
+ if ((instr->is_external() || instr->is_fixed_typed_array()) ?
+ // see LCodeGen::DoLoadKeyedExternalArray
+ ((instr->elements_kind() == EXTERNAL_UINT32_ELEMENTS ||
+ instr->elements_kind() == UINT32_ELEMENTS) &&
+ !instr->CheckFlag(HInstruction::kUint32)) :
+ // see LCodeGen::DoLoadKeyedFixedDoubleArray and
+ // LCodeGen::DoLoadKeyedFixedArray
+ instr->RequiresHoleCheck()) {
+ result = AssignEnvironment(result);
+ }
+ return result;
}
@@ -2335,24 +2217,22 @@ LOperand* LChunkBuilder::GetStoreKeyedValueOperand(HStoreKeyed* instr) {
// Determine if we need a byte register in this case for the value.
bool val_is_fixed_register =
- elements_kind == EXTERNAL_BYTE_ELEMENTS ||
- elements_kind == EXTERNAL_UNSIGNED_BYTE_ELEMENTS ||
- elements_kind == EXTERNAL_PIXEL_ELEMENTS;
+ elements_kind == EXTERNAL_INT8_ELEMENTS ||
+ elements_kind == EXTERNAL_UINT8_ELEMENTS ||
+ elements_kind == EXTERNAL_UINT8_CLAMPED_ELEMENTS ||
+ elements_kind == UINT8_ELEMENTS ||
+ elements_kind == INT8_ELEMENTS ||
+ elements_kind == UINT8_CLAMPED_ELEMENTS;
if (val_is_fixed_register) {
return UseFixed(instr->value(), eax);
}
- if (!CpuFeatures::IsSafeForSnapshot(SSE2) &&
- IsDoubleOrFloatElementsKind(elements_kind)) {
- return UseRegisterAtStart(instr->value());
- }
-
return UseRegister(instr->value());
}
LInstruction* LChunkBuilder::DoStoreKeyed(HStoreKeyed* instr) {
- if (!instr->is_external()) {
+ if (!instr->is_typed_elements()) {
ASSERT(instr->elements()->representation().IsTagged());
ASSERT(instr->key()->representation().IsInteger32() ||
instr->key()->representation().IsSmi());
@@ -2384,23 +2264,22 @@ LInstruction* LChunkBuilder::DoStoreKeyed(HStoreKeyed* instr) {
ElementsKind elements_kind = instr->elements_kind();
ASSERT(
(instr->value()->representation().IsInteger32() &&
- (elements_kind != EXTERNAL_FLOAT_ELEMENTS) &&
- (elements_kind != EXTERNAL_DOUBLE_ELEMENTS)) ||
+ !IsDoubleOrFloatElementsKind(elements_kind)) ||
(instr->value()->representation().IsDouble() &&
- ((elements_kind == EXTERNAL_FLOAT_ELEMENTS) ||
- (elements_kind == EXTERNAL_DOUBLE_ELEMENTS))));
- ASSERT(instr->elements()->representation().IsExternal());
+ IsDoubleOrFloatElementsKind(elements_kind)));
+ ASSERT((instr->is_fixed_typed_array() &&
+ instr->elements()->representation().IsTagged()) ||
+ (instr->is_external() &&
+ instr->elements()->representation().IsExternal()));
- LOperand* external_pointer = UseRegister(instr->elements());
+ LOperand* backing_store = UseRegister(instr->elements());
LOperand* val = GetStoreKeyedValueOperand(instr);
bool clobbers_key = ExternalArrayOpRequiresTemp(
instr->key()->representation(), elements_kind);
LOperand* key = clobbers_key
? UseTempRegister(instr->key())
: UseRegisterOrConstantAtStart(instr->key());
- return new(zone()) LStoreKeyed(external_pointer,
- key,
- val);
+ return new(zone()) LStoreKeyed(backing_store, key, val);
}
@@ -2422,7 +2301,6 @@ LInstruction* LChunkBuilder::DoStoreKeyedGeneric(HStoreKeyedGeneric* instr) {
LInstruction* LChunkBuilder::DoTransitionElementsKind(
HTransitionElementsKind* instr) {
- LOperand* object = UseRegister(instr->object());
if (IsSimpleMapChangeTransition(instr->from_kind(), instr->to_kind())) {
LOperand* object = UseRegister(instr->object());
LOperand* new_map_reg = TempRegister();
@@ -2432,10 +2310,11 @@ LInstruction* LChunkBuilder::DoTransitionElementsKind(
new_map_reg, temp_reg);
return result;
} else {
+ LOperand* object = UseFixed(instr->object(), eax);
LOperand* context = UseFixed(instr->context(), esi);
LTransitionElementsKind* result =
new(zone()) LTransitionElementsKind(object, context, NULL, NULL);
- return AssignPointerMap(result);
+ return MarkAsCall(result, instr);
}
}
@@ -2476,7 +2355,7 @@ LInstruction* LChunkBuilder::DoStoreNamedField(HStoreNamedField* instr) {
bool can_be_constant = instr->value()->IsConstant() &&
HConstant::cast(instr->value())->NotInNewSpace() &&
- !(FLAG_track_double_fields && instr->field_representation().IsDouble());
+ !instr->field_representation().IsDouble();
LOperand* val;
if (instr->field_representation().IsInteger8() ||
@@ -2488,10 +2367,9 @@ LInstruction* LChunkBuilder::DoStoreNamedField(HStoreNamedField* instr) {
val = UseTempRegister(instr->value());
} else if (can_be_constant) {
val = UseRegisterOrConstant(instr->value());
- } else if (FLAG_track_fields && instr->field_representation().IsSmi()) {
+ } else if (instr->field_representation().IsSmi()) {
val = UseTempRegister(instr->value());
- } else if (FLAG_track_double_fields &&
- instr->field_representation().IsDouble()) {
+ } else if (instr->field_representation().IsDouble()) {
val = UseRegisterAtStart(instr->value());
} else {
val = UseRegister(instr->value());
@@ -2505,15 +2383,7 @@ LInstruction* LChunkBuilder::DoStoreNamedField(HStoreNamedField* instr) {
// We need a temporary register for write barrier of the map field.
LOperand* temp_map = needs_write_barrier_for_map ? TempRegister() : NULL;
- LStoreNamedField* result =
- new(zone()) LStoreNamedField(obj, val, temp, temp_map);
- if (FLAG_track_heap_object_fields &&
- instr->field_representation().IsHeapObject()) {
- if (!instr->value()->type().IsHeapObject()) {
- return AssignEnvironment(result);
- }
- }
- return result;
+ return new(zone()) LStoreNamedField(obj, val, temp, temp_map);
}
@@ -2530,12 +2400,8 @@ LInstruction* LChunkBuilder::DoStoreNamedGeneric(HStoreNamedGeneric* instr) {
LInstruction* LChunkBuilder::DoStringAdd(HStringAdd* instr) {
LOperand* context = UseFixed(instr->context(), esi);
- LOperand* left = FLAG_new_string_add
- ? UseFixed(instr->left(), edx)
- : UseOrConstantAtStart(instr->left());
- LOperand* right = FLAG_new_string_add
- ? UseFixed(instr->right(), eax)
- : UseOrConstantAtStart(instr->right());
+ LOperand* left = UseFixed(instr->left(), edx);
+ LOperand* right = UseFixed(instr->right(), eax);
LStringAdd* string_add = new(zone()) LStringAdd(context, left, right);
return MarkAsCall(DefineFixed(string_add, eax), instr);
}
@@ -2547,7 +2413,7 @@ LInstruction* LChunkBuilder::DoStringCharCodeAt(HStringCharCodeAt* instr) {
LOperand* context = UseAny(instr->context());
LStringCharCodeAt* result =
new(zone()) LStringCharCodeAt(context, string, index);
- return AssignEnvironment(AssignPointerMap(DefineAsRegister(result)));
+ return AssignPointerMap(DefineAsRegister(result));
}
@@ -2602,7 +2468,7 @@ LInstruction* LChunkBuilder::DoParameter(HParameter* instr) {
} else {
ASSERT(info()->IsStub());
CodeStubInterfaceDescriptor* descriptor =
- info()->code_stub()->GetInterfaceDescriptor(info()->isolate());
+ info()->code_stub()->GetInterfaceDescriptor();
int index = static_cast<int>(instr->index());
Register reg = descriptor->GetParameterRegister(index);
return DefineFixed(result, reg);
@@ -2689,8 +2555,6 @@ LInstruction* LChunkBuilder::DoTypeof(HTypeof* instr) {
LInstruction* LChunkBuilder::DoTypeofIsAndBranch(HTypeofIsAndBranch* instr) {
- LInstruction* goto_instr = CheckElideControlInstruction(instr);
- if (goto_instr != NULL) return goto_instr;
return new(zone()) LTypeofIsAndBranch(UseTempRegister(instr->value()));
}
@@ -2723,13 +2587,13 @@ LInstruction* LChunkBuilder::DoStackCheck(HStackCheck* instr) {
LInstruction* LChunkBuilder::DoEnterInlined(HEnterInlined* instr) {
HEnvironment* outer = current_block_->last_environment();
+ outer->set_ast_id(instr->ReturnId());
HConstant* undefined = graph()->GetConstantUndefined();
HEnvironment* inner = outer->CopyForInlining(instr->closure(),
instr->arguments_count(),
instr->function(),
undefined,
- instr->inlining_kind(),
- instr->undefined_receiver());
+ instr->inlining_kind());
// Only replay binding of arguments object if it wasn't removed from graph.
if (instr->arguments_var() != NULL && instr->arguments_object()->IsLinked()) {
inner->Bind(instr->arguments_var(), instr->arguments_object());
@@ -2784,7 +2648,25 @@ LInstruction* LChunkBuilder::DoCheckMapValue(HCheckMapValue* instr) {
LInstruction* LChunkBuilder::DoLoadFieldByIndex(HLoadFieldByIndex* instr) {
LOperand* object = UseRegister(instr->object());
LOperand* index = UseTempRegister(instr->index());
- return DefineSameAsFirst(new(zone()) LLoadFieldByIndex(object, index));
+ LLoadFieldByIndex* load = new(zone()) LLoadFieldByIndex(object, index);
+ LInstruction* result = DefineSameAsFirst(load);
+ return AssignPointerMap(result);
+}
+
+
+LInstruction* LChunkBuilder::DoStoreFrameContext(HStoreFrameContext* instr) {
+ LOperand* context = UseRegisterAtStart(instr->context());
+ return new(zone()) LStoreFrameContext(context);
+}
+
+
+LInstruction* LChunkBuilder::DoAllocateBlockContext(
+ HAllocateBlockContext* instr) {
+ LOperand* context = UseFixed(instr->context(), esi);
+ LOperand* function = UseRegisterAtStart(instr->function());
+ LAllocateBlockContext* result =
+ new(zone()) LAllocateBlockContext(context, function);
+ return MarkAsCall(DefineFixed(result, esi), instr);
}
diff --git a/chromium/v8/src/ia32/lithium-ia32.h b/chromium/v8/src/ia32/lithium-ia32.h
index c865d8d0e9a..e12ca5e9bda 100644
--- a/chromium/v8/src/ia32/lithium-ia32.h
+++ b/chromium/v8/src/ia32/lithium-ia32.h
@@ -1,38 +1,15 @@
// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
#ifndef V8_IA32_LITHIUM_IA32_H_
#define V8_IA32_LITHIUM_IA32_H_
-#include "hydrogen.h"
-#include "lithium-allocator.h"
-#include "lithium.h"
-#include "safepoint-table.h"
-#include "utils.h"
+#include "src/hydrogen.h"
+#include "src/lithium-allocator.h"
+#include "src/lithium.h"
+#include "src/safepoint-table.h"
+#include "src/utils.h"
namespace v8 {
namespace internal {
@@ -43,6 +20,7 @@ class LCodeGen;
#define LITHIUM_CONCRETE_INSTRUCTION_LIST(V) \
V(AccessArgumentsAt) \
V(AddI) \
+ V(AllocateBlockContext) \
V(Allocate) \
V(ApplyArguments) \
V(ArgumentsElements) \
@@ -52,12 +30,9 @@ class LCodeGen;
V(BitI) \
V(BoundsCheck) \
V(Branch) \
- V(CallConstantFunction) \
+ V(CallJSFunction) \
+ V(CallWithDescriptor) \
V(CallFunction) \
- V(CallGlobal) \
- V(CallKeyed) \
- V(CallKnownGlobal) \
- V(CallNamed) \
V(CallNew) \
V(CallNewArray) \
V(CallRuntime) \
@@ -71,9 +46,7 @@ class LCodeGen;
V(ClampDToUint8) \
V(ClampIToUint8) \
V(ClampTToUint8) \
- V(ClampTToUint8NoSSE2) \
V(ClassOfTestAndBranch) \
- V(ClobberDoubles) \
V(CompareMinusZeroAndBranch) \
V(CompareNumericAndBranch) \
V(CmpObjectEqAndBranch) \
@@ -85,24 +58,28 @@ class LCodeGen;
V(ConstantI) \
V(ConstantS) \
V(ConstantT) \
+ V(ConstructDouble) \
V(Context) \
V(DateField) \
V(DebugBreak) \
V(DeclareGlobals) \
V(Deoptimize) \
+ V(DivByConstI) \
+ V(DivByPowerOf2I) \
V(DivI) \
+ V(DoubleBits) \
V(DoubleToI) \
V(DoubleToSmi) \
V(Drop) \
V(Dummy) \
V(DummyUse) \
- V(ElementsKind) \
+ V(FlooringDivByConstI) \
+ V(FlooringDivByPowerOf2I) \
+ V(FlooringDivI) \
V(ForInCacheArray) \
V(ForInPrepareMap) \
V(FunctionLiteral) \
V(GetCachedArrayIndex) \
- V(GlobalObject) \
- V(GlobalReceiver) \
V(Goto) \
V(HasCachedArrayIndexAndBranch) \
V(HasInstanceTypeAndBranch) \
@@ -111,7 +88,6 @@ class LCodeGen;
V(InstanceOfKnownGlobal) \
V(InstructionGap) \
V(Integer32ToDouble) \
- V(Integer32ToSmi) \
V(InvokeFunction) \
V(IsConstructCallAndBranch) \
V(IsObjectAndBranch) \
@@ -121,7 +97,6 @@ class LCodeGen;
V(Label) \
V(LazyBailout) \
V(LoadContextSlot) \
- V(LoadExternalArrayPointer) \
V(LoadFieldByIndex) \
V(LoadFunctionPrototype) \
V(LoadGlobalCell) \
@@ -133,17 +108,16 @@ class LCodeGen;
V(LoadRoot) \
V(MapEnumLength) \
V(MathAbs) \
- V(MathCos) \
+ V(MathClz32) \
V(MathExp) \
V(MathFloor) \
- V(MathFloorOfDiv) \
V(MathLog) \
V(MathMinMax) \
V(MathPowHalf) \
V(MathRound) \
- V(MathSin) \
V(MathSqrt) \
- V(MathTan) \
+ V(ModByConstI) \
+ V(ModByPowerOf2I) \
V(ModI) \
V(MulI) \
V(NumberTagD) \
@@ -151,7 +125,6 @@ class LCodeGen;
V(NumberTagU) \
V(NumberUntagD) \
V(OsrEntry) \
- V(OuterContext) \
V(Parameter) \
V(Power) \
V(PushArgument) \
@@ -165,8 +138,8 @@ class LCodeGen;
V(StackCheck) \
V(StoreCodeEntry) \
V(StoreContextSlot) \
+ V(StoreFrameContext) \
V(StoreGlobalCell) \
- V(StoreGlobalGeneric) \
V(StoreKeyed) \
V(StoreKeyedGeneric) \
V(StoreNamedField) \
@@ -178,16 +151,13 @@ class LCodeGen;
V(SubI) \
V(TaggedToI) \
V(ThisFunction) \
- V(Throw) \
V(ToFastProperties) \
V(TransitionElementsKind) \
V(TrapAllocationMemento) \
V(Typeof) \
V(TypeofIsAndBranch) \
V(Uint32ToDouble) \
- V(Uint32ToSmi) \
V(UnknownOSRValue) \
- V(ValueOf) \
V(WrapReceiver)
@@ -268,11 +238,8 @@ class LInstruction : public ZoneObject {
// Interface to the register allocator and iterators.
bool ClobbersTemps() const { return IsCall(); }
bool ClobbersRegisters() const { return IsCall(); }
- virtual bool ClobbersDoubleRegisters() const {
- return IsCall() ||
- // We only have rudimentary X87Stack tracking, thus in general
- // cannot handle phi-nodes.
- (!CpuFeatures::IsSafeForSnapshot(SSE2) && IsControl());
+ virtual bool ClobbersDoubleRegisters(Isolate* isolate) const {
+ return IsCall();
}
virtual bool HasResult() const = 0;
@@ -280,7 +247,6 @@ class LInstruction : public ZoneObject {
bool HasDoubleRegisterResult();
bool HasDoubleRegisterInput();
- bool IsDoubleInput(X87Register reg, LCodeGen* cgen);
LOperand* FirstInput() { return InputAt(0); }
LOperand* Output() { return HasResult() ? result() : NULL; }
@@ -311,10 +277,8 @@ class LInstruction : public ZoneObject {
// R = number of result operands (0 or 1).
-// I = number of input operands.
-// T = number of temporary operands.
-template<int R, int I, int T>
-class LTemplateInstruction : public LInstruction {
+template<int R>
+class LTemplateResultInstruction : public LInstruction {
public:
// Allow 0 or 1 output operands.
STATIC_ASSERT(R == 0 || R == 1);
@@ -326,6 +290,15 @@ class LTemplateInstruction : public LInstruction {
protected:
EmbeddedContainer<LOperand*, R> results_;
+};
+
+
+// R = number of result operands (0 or 1).
+// I = number of input operands.
+// T = number of temporary operands.
+template<int R, int I, int T>
+class LTemplateInstruction : public LTemplateResultInstruction<R> {
+ protected:
EmbeddedContainer<LOperand*, I> inputs_;
EmbeddedContainer<LOperand*, T> temps_;
@@ -398,16 +371,6 @@ class LInstructionGap V8_FINAL : public LGap {
};
-class LClobberDoubles V8_FINAL : public LTemplateInstruction<0, 0, 0> {
- public:
- LClobberDoubles() { ASSERT(!CpuFeatures::IsSafeForSnapshot(SSE2)); }
-
- virtual bool ClobbersDoubleRegisters() const { return true; }
-
- DECLARE_CONCRETE_INSTRUCTION(ClobberDoubles, "clobber-d")
-};
-
-
class LGoto V8_FINAL : public LTemplateInstruction<0, 0, 0> {
public:
explicit LGoto(HBasicBlock* block) : block_(block) { }
@@ -418,7 +381,9 @@ class LGoto V8_FINAL : public LTemplateInstruction<0, 0, 0> {
virtual bool IsControl() const V8_OVERRIDE { return true; }
int block_id() const { return block_->block_id(); }
- virtual bool ClobbersDoubleRegisters() const { return false; }
+ virtual bool ClobbersDoubleRegisters(Isolate* isolate) const V8_OVERRIDE {
+ return false;
+ }
bool jumps_to_join() const { return block_->predecessors()->length() > 1; }
@@ -451,6 +416,7 @@ class LDummyUse V8_FINAL : public LTemplateInstruction<1, 1, 0> {
class LDeoptimize V8_FINAL : public LTemplateInstruction<0, 0, 0> {
public:
+ virtual bool IsControl() const V8_OVERRIDE { return true; }
DECLARE_CONCRETE_INSTRUCTION(Deoptimize, "deoptimize")
DECLARE_HYDROGEN_ACCESSOR(Deoptimize)
};
@@ -501,10 +467,6 @@ class LCallStub V8_FINAL : public LTemplateInstruction<1, 1, 0> {
DECLARE_CONCRETE_INSTRUCTION(CallStub, "call-stub")
DECLARE_HYDROGEN_ACCESSOR(CallStub)
-
- TranscendentalCache::Type transcendental_type() {
- return hydrogen()->transcendental_type();
- }
};
@@ -576,6 +538,7 @@ class LWrapReceiver V8_FINAL : public LTemplateInstruction<1, 2, 1> {
LOperand* temp() { return temps_[0]; }
DECLARE_CONCRETE_INSTRUCTION(WrapReceiver, "wrap-receiver")
+ DECLARE_HYDROGEN_ACCESSOR(WrapReceiver)
};
@@ -643,6 +606,49 @@ class LDebugBreak V8_FINAL : public LTemplateInstruction<0, 0, 0> {
};
+class LModByPowerOf2I V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+ LModByPowerOf2I(LOperand* dividend, int32_t divisor) {
+ inputs_[0] = dividend;
+ divisor_ = divisor;
+ }
+
+ LOperand* dividend() { return inputs_[0]; }
+ int32_t divisor() const { return divisor_; }
+
+ DECLARE_CONCRETE_INSTRUCTION(ModByPowerOf2I, "mod-by-power-of-2-i")
+ DECLARE_HYDROGEN_ACCESSOR(Mod)
+
+ private:
+ int32_t divisor_;
+};
+
+
+class LModByConstI V8_FINAL : public LTemplateInstruction<1, 1, 2> {
+ public:
+ LModByConstI(LOperand* dividend,
+ int32_t divisor,
+ LOperand* temp1,
+ LOperand* temp2) {
+ inputs_[0] = dividend;
+ divisor_ = divisor;
+ temps_[0] = temp1;
+ temps_[1] = temp2;
+ }
+
+ LOperand* dividend() { return inputs_[0]; }
+ int32_t divisor() const { return divisor_; }
+ LOperand* temp1() { return temps_[0]; }
+ LOperand* temp2() { return temps_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(ModByConstI, "mod-by-const-i")
+ DECLARE_HYDROGEN_ACCESSOR(Mod)
+
+ private:
+ int32_t divisor_;
+};
+
+
class LModI V8_FINAL : public LTemplateInstruction<1, 2, 1> {
public:
LModI(LOperand* left, LOperand* right, LOperand* temp) {
@@ -660,39 +666,126 @@ class LModI V8_FINAL : public LTemplateInstruction<1, 2, 1> {
};
+class LDivByPowerOf2I V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+ LDivByPowerOf2I(LOperand* dividend, int32_t divisor) {
+ inputs_[0] = dividend;
+ divisor_ = divisor;
+ }
+
+ LOperand* dividend() { return inputs_[0]; }
+ int32_t divisor() const { return divisor_; }
+
+ DECLARE_CONCRETE_INSTRUCTION(DivByPowerOf2I, "div-by-power-of-2-i")
+ DECLARE_HYDROGEN_ACCESSOR(Div)
+
+ private:
+ int32_t divisor_;
+};
+
+
+class LDivByConstI V8_FINAL : public LTemplateInstruction<1, 1, 2> {
+ public:
+ LDivByConstI(LOperand* dividend,
+ int32_t divisor,
+ LOperand* temp1,
+ LOperand* temp2) {
+ inputs_[0] = dividend;
+ divisor_ = divisor;
+ temps_[0] = temp1;
+ temps_[1] = temp2;
+ }
+
+ LOperand* dividend() { return inputs_[0]; }
+ int32_t divisor() const { return divisor_; }
+ LOperand* temp1() { return temps_[0]; }
+ LOperand* temp2() { return temps_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(DivByConstI, "div-by-const-i")
+ DECLARE_HYDROGEN_ACCESSOR(Div)
+
+ private:
+ int32_t divisor_;
+};
+
+
class LDivI V8_FINAL : public LTemplateInstruction<1, 2, 1> {
public:
- LDivI(LOperand* left, LOperand* right, LOperand* temp) {
- inputs_[0] = left;
- inputs_[1] = right;
+ LDivI(LOperand* dividend, LOperand* divisor, LOperand* temp) {
+ inputs_[0] = dividend;
+ inputs_[1] = divisor;
temps_[0] = temp;
}
- LOperand* left() { return inputs_[0]; }
- LOperand* right() { return inputs_[1]; }
-
- bool is_flooring() { return hydrogen_value()->IsMathFloorOfDiv(); }
+ LOperand* dividend() { return inputs_[0]; }
+ LOperand* divisor() { return inputs_[1]; }
+ LOperand* temp() { return temps_[0]; }
DECLARE_CONCRETE_INSTRUCTION(DivI, "div-i")
- DECLARE_HYDROGEN_ACCESSOR(Div)
+ DECLARE_HYDROGEN_ACCESSOR(BinaryOperation)
};
-class LMathFloorOfDiv V8_FINAL : public LTemplateInstruction<1, 2, 1> {
+class LFlooringDivByPowerOf2I V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
- LMathFloorOfDiv(LOperand* left,
- LOperand* right,
- LOperand* temp = NULL) {
- inputs_[0] = left;
- inputs_[1] = right;
+ LFlooringDivByPowerOf2I(LOperand* dividend, int32_t divisor) {
+ inputs_[0] = dividend;
+ divisor_ = divisor;
+ }
+
+ LOperand* dividend() { return inputs_[0]; }
+ int32_t divisor() const { return divisor_; }
+
+ DECLARE_CONCRETE_INSTRUCTION(FlooringDivByPowerOf2I,
+ "flooring-div-by-power-of-2-i")
+ DECLARE_HYDROGEN_ACCESSOR(MathFloorOfDiv)
+
+ private:
+ int32_t divisor_;
+};
+
+
+class LFlooringDivByConstI V8_FINAL : public LTemplateInstruction<1, 1, 3> {
+ public:
+ LFlooringDivByConstI(LOperand* dividend,
+ int32_t divisor,
+ LOperand* temp1,
+ LOperand* temp2,
+ LOperand* temp3) {
+ inputs_[0] = dividend;
+ divisor_ = divisor;
+ temps_[0] = temp1;
+ temps_[1] = temp2;
+ temps_[2] = temp3;
+ }
+
+ LOperand* dividend() { return inputs_[0]; }
+ int32_t divisor() const { return divisor_; }
+ LOperand* temp1() { return temps_[0]; }
+ LOperand* temp2() { return temps_[1]; }
+ LOperand* temp3() { return temps_[2]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(FlooringDivByConstI, "flooring-div-by-const-i")
+ DECLARE_HYDROGEN_ACCESSOR(MathFloorOfDiv)
+
+ private:
+ int32_t divisor_;
+};
+
+
+class LFlooringDivI V8_FINAL : public LTemplateInstruction<1, 2, 1> {
+ public:
+ LFlooringDivI(LOperand* dividend, LOperand* divisor, LOperand* temp) {
+ inputs_[0] = dividend;
+ inputs_[1] = divisor;
temps_[0] = temp;
}
- LOperand* left() { return inputs_[0]; }
- LOperand* right() { return inputs_[1]; }
+ LOperand* dividend() { return inputs_[0]; }
+ LOperand* divisor() { return inputs_[1]; }
LOperand* temp() { return temps_[0]; }
- DECLARE_CONCRETE_INSTRUCTION(MathFloorOfDiv, "math-floor-of-div")
+ DECLARE_CONCRETE_INSTRUCTION(FlooringDivI, "flooring-div-i")
DECLARE_HYDROGEN_ACCESSOR(MathFloorOfDiv)
};
@@ -792,39 +885,15 @@ class LMathLog V8_FINAL : public LTemplateInstruction<1, 1, 0> {
};
-class LMathSin V8_FINAL : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LMathSin(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(MathSin, "math-sin")
-};
-
-
-class LMathCos V8_FINAL : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LMathCos(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(MathCos, "math-cos")
-};
-
-
-class LMathTan V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LMathClz32 V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
- explicit LMathTan(LOperand* value) {
+ explicit LMathClz32(LOperand* value) {
inputs_[0] = value;
}
LOperand* value() { return inputs_[0]; }
- DECLARE_CONCRETE_INSTRUCTION(MathTan, "math-tan")
+ DECLARE_CONCRETE_INSTRUCTION(MathClz32, "math-clz32")
};
@@ -1312,34 +1381,6 @@ class LMapEnumLength V8_FINAL : public LTemplateInstruction<1, 1, 0> {
};
-class LElementsKind V8_FINAL : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LElementsKind(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(ElementsKind, "elements-kind")
- DECLARE_HYDROGEN_ACCESSOR(ElementsKind)
-};
-
-
-class LValueOf V8_FINAL : public LTemplateInstruction<1, 1, 1> {
- public:
- LValueOf(LOperand* value, LOperand* temp) {
- inputs_[0] = value;
- temps_[0] = temp;
- }
-
- LOperand* value() { return inputs_[0]; }
- LOperand* temp() { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(ValueOf, "value-of")
- DECLARE_HYDROGEN_ACCESSOR(ValueOf)
-};
-
-
class LDateField V8_FINAL : public LTemplateInstruction<1, 1, 1> {
public:
LDateField(LOperand* date, LOperand* temp, Smi* index)
@@ -1397,20 +1438,6 @@ class LSeqStringSetChar V8_FINAL : public LTemplateInstruction<1, 4, 0> {
};
-class LThrow V8_FINAL : public LTemplateInstruction<0, 2, 0> {
- public:
- LThrow(LOperand* context, LOperand* value) {
- inputs_[0] = context;
- inputs_[1] = value;
- }
-
- LOperand* context() { return inputs_[0]; }
- LOperand* value() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(Throw, "throw")
-};
-
-
class LAddI V8_FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LAddI(LOperand* left, LOperand* right) {
@@ -1592,20 +1619,6 @@ class LLoadRoot V8_FINAL : public LTemplateInstruction<1, 0, 0> {
};
-class LLoadExternalArrayPointer V8_FINAL
- : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LLoadExternalArrayPointer(LOperand* object) {
- inputs_[0] = object;
- }
-
- LOperand* object() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(LoadExternalArrayPointer,
- "load-external-array-pointer")
-};
-
-
class LLoadKeyed V8_FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LLoadKeyed(LOperand* elements, LOperand* key) {
@@ -1620,12 +1633,18 @@ class LLoadKeyed V8_FINAL : public LTemplateInstruction<1, 2, 0> {
bool is_external() const {
return hydrogen()->is_external();
}
+ bool is_fixed_typed_array() const {
+ return hydrogen()->is_fixed_typed_array();
+ }
+ bool is_typed_elements() const {
+ return is_external() || is_fixed_typed_array();
+ }
DECLARE_CONCRETE_INSTRUCTION(LoadKeyed, "load-keyed")
DECLARE_HYDROGEN_ACCESSOR(LoadKeyed)
virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
- uint32_t additional_index() const { return hydrogen()->index_offset(); }
+ uint32_t base_offset() const { return hydrogen()->base_offset(); }
bool key_is_smi() {
return hydrogen()->key()->representation().IsTagged();
}
@@ -1639,9 +1658,12 @@ inline static bool ExternalArrayOpRequiresTemp(
// an index cannot fold the scale operation into a load and need an extra
// temp register to do the work.
return key_representation.IsSmi() &&
- (elements_kind == EXTERNAL_BYTE_ELEMENTS ||
- elements_kind == EXTERNAL_UNSIGNED_BYTE_ELEMENTS ||
- elements_kind == EXTERNAL_PIXEL_ELEMENTS);
+ (elements_kind == EXTERNAL_INT8_ELEMENTS ||
+ elements_kind == EXTERNAL_UINT8_ELEMENTS ||
+ elements_kind == EXTERNAL_UINT8_CLAMPED_ELEMENTS ||
+ elements_kind == UINT8_ELEMENTS ||
+ elements_kind == INT8_ELEMENTS ||
+ elements_kind == UINT8_CLAMPED_ELEMENTS);
}
@@ -1699,28 +1721,6 @@ class LStoreGlobalCell V8_FINAL : public LTemplateInstruction<0, 1, 0> {
};
-class LStoreGlobalGeneric V8_FINAL : public LTemplateInstruction<0, 3, 0> {
- public:
- LStoreGlobalGeneric(LOperand* context,
- LOperand* global_object,
- LOperand* value) {
- inputs_[0] = context;
- inputs_[1] = global_object;
- inputs_[2] = value;
- }
-
- LOperand* context() { return inputs_[0]; }
- LOperand* global_object() { return inputs_[1]; }
- LOperand* value() { return inputs_[2]; }
-
- DECLARE_CONCRETE_INSTRUCTION(StoreGlobalGeneric, "store-global-generic")
- DECLARE_HYDROGEN_ACCESSOR(StoreGlobalGeneric)
-
- Handle<Object> name() const { return hydrogen()->name(); }
- StrictModeFlag strict_mode_flag() { return hydrogen()->strict_mode_flag(); }
-};
-
-
class LLoadContextSlot V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LLoadContextSlot(LOperand* context) {
@@ -1784,15 +1784,15 @@ class LDrop V8_FINAL : public LTemplateInstruction<0, 0, 0> {
};
-class LStoreCodeEntry V8_FINAL: public LTemplateInstruction<0, 1, 1> {
+class LStoreCodeEntry V8_FINAL: public LTemplateInstruction<0, 2, 0> {
public:
LStoreCodeEntry(LOperand* function, LOperand* code_object) {
inputs_[0] = function;
- temps_[0] = code_object;
+ inputs_[1] = code_object;
}
LOperand* function() { return inputs_[0]; }
- LOperand* code_object() { return temps_[0]; }
+ LOperand* code_object() { return inputs_[1]; }
virtual void PrintDataTo(StringStream* stream);
@@ -1831,18 +1831,6 @@ class LContext V8_FINAL : public LTemplateInstruction<1, 0, 0> {
};
-class LOuterContext V8_FINAL : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LOuterContext(LOperand* context) {
- inputs_[0] = context;
- }
-
- LOperand* context() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(OuterContext, "outer-context")
-};
-
-
class LDeclareGlobals V8_FINAL : public LTemplateInstruction<0, 1, 0> {
public:
explicit LDeclareGlobals(LOperand* context) {
@@ -1856,94 +1844,69 @@ class LDeclareGlobals V8_FINAL : public LTemplateInstruction<0, 1, 0> {
};
-class LGlobalObject V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LCallJSFunction V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
- explicit LGlobalObject(LOperand* context) {
- inputs_[0] = context;
- }
-
- LOperand* context() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(GlobalObject, "global-object")
-};
-
-
-class LGlobalReceiver V8_FINAL : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LGlobalReceiver(LOperand* global_object) {
- inputs_[0] = global_object;
+ explicit LCallJSFunction(LOperand* function) {
+ inputs_[0] = function;
}
- LOperand* global() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(GlobalReceiver, "global-receiver")
-};
-
+ LOperand* function() { return inputs_[0]; }
-class LCallConstantFunction V8_FINAL : public LTemplateInstruction<1, 0, 0> {
- public:
- DECLARE_CONCRETE_INSTRUCTION(CallConstantFunction, "call-constant-function")
- DECLARE_HYDROGEN_ACCESSOR(CallConstantFunction)
+ DECLARE_CONCRETE_INSTRUCTION(CallJSFunction, "call-js-function")
+ DECLARE_HYDROGEN_ACCESSOR(CallJSFunction)
virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
- Handle<JSFunction> function() { return hydrogen()->function(); }
int arity() const { return hydrogen()->argument_count() - 1; }
};
-class LInvokeFunction V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+class LCallWithDescriptor V8_FINAL : public LTemplateResultInstruction<1> {
public:
- LInvokeFunction(LOperand* context, LOperand* function) {
- inputs_[0] = context;
- inputs_[1] = function;
+ LCallWithDescriptor(const CallInterfaceDescriptor* descriptor,
+ const ZoneList<LOperand*>& operands,
+ Zone* zone)
+ : inputs_(descriptor->environment_length() + 1, zone) {
+ ASSERT(descriptor->environment_length() + 1 == operands.length());
+ inputs_.AddAll(operands, zone);
}
- LOperand* context() { return inputs_[0]; }
- LOperand* function() { return inputs_[1]; }
+ LOperand* target() const { return inputs_[0]; }
- DECLARE_CONCRETE_INSTRUCTION(InvokeFunction, "invoke-function")
- DECLARE_HYDROGEN_ACCESSOR(InvokeFunction)
+ private:
+ DECLARE_CONCRETE_INSTRUCTION(CallWithDescriptor, "call-with-descriptor")
+ DECLARE_HYDROGEN_ACCESSOR(CallWithDescriptor)
virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
int arity() const { return hydrogen()->argument_count() - 1; }
-};
+ ZoneList<LOperand*> inputs_;
-class LCallKeyed V8_FINAL : public LTemplateInstruction<1, 2, 0> {
- public:
- LCallKeyed(LOperand* context, LOperand* key) {
- inputs_[0] = context;
- inputs_[1] = key;
- }
-
- LOperand* context() { return inputs_[0]; }
- LOperand* key() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(CallKeyed, "call-keyed")
- DECLARE_HYDROGEN_ACCESSOR(CallKeyed)
-
- virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+ // Iterator support.
+ virtual int InputCount() V8_FINAL V8_OVERRIDE { return inputs_.length(); }
+ virtual LOperand* InputAt(int i) V8_FINAL V8_OVERRIDE { return inputs_[i]; }
- int arity() const { return hydrogen()->argument_count() - 1; }
+ virtual int TempCount() V8_FINAL V8_OVERRIDE { return 0; }
+ virtual LOperand* TempAt(int i) V8_FINAL V8_OVERRIDE { return NULL; }
};
-class LCallNamed V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LInvokeFunction V8_FINAL : public LTemplateInstruction<1, 2, 0> {
public:
- explicit LCallNamed(LOperand* context) {
+ LInvokeFunction(LOperand* context, LOperand* function) {
inputs_[0] = context;
+ inputs_[1] = function;
}
LOperand* context() { return inputs_[0]; }
+ LOperand* function() { return inputs_[1]; }
- DECLARE_CONCRETE_INSTRUCTION(CallNamed, "call-named")
- DECLARE_HYDROGEN_ACCESSOR(CallNamed)
+ DECLARE_CONCRETE_INSTRUCTION(InvokeFunction, "invoke-function")
+ DECLARE_HYDROGEN_ACCESSOR(InvokeFunction)
virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
- Handle<String> name() const { return hydrogen()->name(); }
int arity() const { return hydrogen()->argument_count() - 1; }
};
@@ -1965,35 +1928,6 @@ class LCallFunction V8_FINAL : public LTemplateInstruction<1, 2, 0> {
};
-class LCallGlobal V8_FINAL : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LCallGlobal(LOperand* context) {
- inputs_[0] = context;
- }
-
- LOperand* context() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(CallGlobal, "call-global")
- DECLARE_HYDROGEN_ACCESSOR(CallGlobal)
-
- virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
-
- Handle<String> name() const {return hydrogen()->name(); }
- int arity() const { return hydrogen()->argument_count() - 1; }
-};
-
-
-class LCallKnownGlobal V8_FINAL : public LTemplateInstruction<1, 0, 0> {
- public:
- DECLARE_CONCRETE_INSTRUCTION(CallKnownGlobal, "call-known-global")
- DECLARE_HYDROGEN_ACCESSOR(CallKnownGlobal)
-
- virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
-
- int arity() const { return hydrogen()->argument_count() - 1; }
-};
-
-
class LCallNew V8_FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LCallNew(LOperand* context, LOperand* constructor) {
@@ -2043,7 +1977,7 @@ class LCallRuntime V8_FINAL : public LTemplateInstruction<1, 1, 0> {
DECLARE_CONCRETE_INSTRUCTION(CallRuntime, "call-runtime")
DECLARE_HYDROGEN_ACCESSOR(CallRuntime)
- virtual bool ClobbersDoubleRegisters() const V8_OVERRIDE {
+ virtual bool ClobbersDoubleRegisters(Isolate* isolate) const V8_OVERRIDE {
return save_doubles() == kDontSaveFPRegs;
}
@@ -2065,22 +1999,21 @@ class LInteger32ToDouble V8_FINAL : public LTemplateInstruction<1, 1, 0> {
};
-class LInteger32ToSmi V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LUint32ToDouble V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
- explicit LInteger32ToSmi(LOperand* value) {
+ explicit LUint32ToDouble(LOperand* value) {
inputs_[0] = value;
}
LOperand* value() { return inputs_[0]; }
- DECLARE_CONCRETE_INSTRUCTION(Integer32ToSmi, "int32-to-smi")
- DECLARE_HYDROGEN_ACCESSOR(Change)
+ DECLARE_CONCRETE_INSTRUCTION(Uint32ToDouble, "uint32-to-double")
};
-class LUint32ToDouble V8_FINAL : public LTemplateInstruction<1, 1, 1> {
+class LNumberTagI V8_FINAL : public LTemplateInstruction<1, 1, 1> {
public:
- explicit LUint32ToDouble(LOperand* value, LOperand* temp) {
+ LNumberTagI(LOperand* value, LOperand* temp) {
inputs_[0] = value;
temps_[0] = temp;
}
@@ -2088,31 +2021,6 @@ class LUint32ToDouble V8_FINAL : public LTemplateInstruction<1, 1, 1> {
LOperand* value() { return inputs_[0]; }
LOperand* temp() { return temps_[0]; }
- DECLARE_CONCRETE_INSTRUCTION(Uint32ToDouble, "uint32-to-double")
-};
-
-
-class LUint32ToSmi V8_FINAL : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LUint32ToSmi(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(Uint32ToSmi, "uint32-to-smi")
- DECLARE_HYDROGEN_ACCESSOR(Change)
-};
-
-
-class LNumberTagI V8_FINAL : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LNumberTagI(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
DECLARE_CONCRETE_INSTRUCTION(NumberTagI, "number-tag-i")
};
@@ -2204,6 +2112,7 @@ class LSmiTag V8_FINAL : public LTemplateInstruction<1, 1, 0> {
LOperand* value() { return inputs_[0]; }
DECLARE_CONCRETE_INSTRUCTION(SmiTag, "smi-tag")
+ DECLARE_HYDROGEN_ACCESSOR(Change)
};
@@ -2261,11 +2170,6 @@ class LStoreNamedField V8_FINAL : public LTemplateInstruction<0, 2, 2> {
DECLARE_HYDROGEN_ACCESSOR(StoreNamedField)
virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
-
- Handle<Map> transition() const { return hydrogen()->transition_map(); }
- Representation representation() const {
- return hydrogen()->field_representation();
- }
};
@@ -2286,7 +2190,7 @@ class LStoreNamedGeneric V8_FINAL : public LTemplateInstruction<0, 3, 0> {
virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
Handle<Object> name() const { return hydrogen()->name(); }
- StrictModeFlag strict_mode_flag() { return hydrogen()->strict_mode_flag(); }
+ StrictMode strict_mode() { return hydrogen()->strict_mode(); }
};
@@ -2299,6 +2203,12 @@ class LStoreKeyed V8_FINAL : public LTemplateInstruction<0, 3, 0> {
}
bool is_external() const { return hydrogen()->is_external(); }
+ bool is_fixed_typed_array() const {
+ return hydrogen()->is_fixed_typed_array();
+ }
+ bool is_typed_elements() const {
+ return is_external() || is_fixed_typed_array();
+ }
LOperand* elements() { return inputs_[0]; }
LOperand* key() { return inputs_[1]; }
LOperand* value() { return inputs_[2]; }
@@ -2310,7 +2220,7 @@ class LStoreKeyed V8_FINAL : public LTemplateInstruction<0, 3, 0> {
DECLARE_HYDROGEN_ACCESSOR(StoreKeyed)
virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
- uint32_t additional_index() const { return hydrogen()->index_offset(); }
+ uint32_t base_offset() const { return hydrogen()->base_offset(); }
bool NeedsCanonicalization() { return hydrogen()->NeedsCanonicalization(); }
};
@@ -2337,7 +2247,7 @@ class LStoreKeyedGeneric V8_FINAL : public LTemplateInstruction<0, 4, 0> {
virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
- StrictModeFlag strict_mode_flag() { return hydrogen()->strict_mode_flag(); }
+ StrictMode strict_mode() { return hydrogen()->strict_mode(); }
};
@@ -2468,7 +2378,7 @@ class LCheckInstanceType V8_FINAL : public LTemplateInstruction<0, 1, 1> {
class LCheckMaps V8_FINAL : public LTemplateInstruction<0, 1, 0> {
public:
- explicit LCheckMaps(LOperand* value) {
+ explicit LCheckMaps(LOperand* value = NULL) {
inputs_[0] = value;
}
@@ -2529,40 +2439,43 @@ class LClampTToUint8 V8_FINAL : public LTemplateInstruction<1, 1, 1> {
};
-// Truncating conversion from a tagged value to an int32.
-class LClampTToUint8NoSSE2 V8_FINAL : public LTemplateInstruction<1, 1, 3> {
+class LCheckNonSmi V8_FINAL : public LTemplateInstruction<0, 1, 0> {
public:
- LClampTToUint8NoSSE2(LOperand* unclamped,
- LOperand* temp1,
- LOperand* temp2,
- LOperand* temp3) {
- inputs_[0] = unclamped;
- temps_[0] = temp1;
- temps_[1] = temp2;
- temps_[2] = temp3;
+ explicit LCheckNonSmi(LOperand* value) {
+ inputs_[0] = value;
}
- LOperand* unclamped() { return inputs_[0]; }
- LOperand* scratch() { return temps_[0]; }
- LOperand* scratch2() { return temps_[1]; }
- LOperand* scratch3() { return temps_[2]; }
+ LOperand* value() { return inputs_[0]; }
- DECLARE_CONCRETE_INSTRUCTION(ClampTToUint8NoSSE2,
- "clamp-t-to-uint8-nosse2")
- DECLARE_HYDROGEN_ACCESSOR(UnaryOperation)
+ DECLARE_CONCRETE_INSTRUCTION(CheckNonSmi, "check-non-smi")
+ DECLARE_HYDROGEN_ACCESSOR(CheckHeapObject)
};
-class LCheckNonSmi V8_FINAL : public LTemplateInstruction<0, 1, 0> {
+class LDoubleBits V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
- explicit LCheckNonSmi(LOperand* value) {
+ explicit LDoubleBits(LOperand* value) {
inputs_[0] = value;
}
LOperand* value() { return inputs_[0]; }
- DECLARE_CONCRETE_INSTRUCTION(CheckNonSmi, "check-non-smi")
- DECLARE_HYDROGEN_ACCESSOR(CheckHeapObject)
+ DECLARE_CONCRETE_INSTRUCTION(DoubleBits, "double-bits")
+ DECLARE_HYDROGEN_ACCESSOR(DoubleBits)
+};
+
+
+class LConstructDouble V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+ public:
+ LConstructDouble(LOperand* hi, LOperand* lo) {
+ inputs_[0] = hi;
+ inputs_[1] = lo;
+ }
+
+ LOperand* hi() { return inputs_[0]; }
+ LOperand* lo() { return inputs_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(ConstructDouble, "construct-double")
};
@@ -2738,6 +2651,35 @@ class LLoadFieldByIndex V8_FINAL : public LTemplateInstruction<1, 2, 0> {
};
+class LStoreFrameContext: public LTemplateInstruction<0, 1, 0> {
+ public:
+ explicit LStoreFrameContext(LOperand* context) {
+ inputs_[0] = context;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(StoreFrameContext, "store-frame-context")
+};
+
+
+class LAllocateBlockContext: public LTemplateInstruction<1, 2, 0> {
+ public:
+ LAllocateBlockContext(LOperand* context, LOperand* function) {
+ inputs_[0] = context;
+ inputs_[1] = function;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+ LOperand* function() { return inputs_[1]; }
+
+ Handle<ScopeInfo> scope_info() { return hydrogen()->scope_info(); }
+
+ DECLARE_CONCRETE_INSTRUCTION(AllocateBlockContext, "allocate-block-context")
+ DECLARE_HYDROGEN_ACCESSOR(AllocateBlockContext)
+};
+
+
class LChunkBuilder;
class LPlatformChunk V8_FINAL : public LChunk {
public:
@@ -2755,42 +2697,46 @@ class LPlatformChunk V8_FINAL : public LChunk {
};
-class LChunkBuilder V8_FINAL BASE_EMBEDDED {
+class LChunkBuilder V8_FINAL : public LChunkBuilderBase {
public:
LChunkBuilder(CompilationInfo* info, HGraph* graph, LAllocator* allocator)
- : chunk_(NULL),
+ : LChunkBuilderBase(graph->zone()),
+ chunk_(NULL),
info_(info),
graph_(graph),
- zone_(graph->zone()),
status_(UNUSED),
current_instruction_(NULL),
current_block_(NULL),
next_block_(NULL),
- argument_count_(0),
allocator_(allocator) { }
+ Isolate* isolate() const { return graph_->isolate(); }
+
// Build the sequence for the graph.
LPlatformChunk* Build();
- LInstruction* CheckElideControlInstruction(HControlInstruction* instr);
-
// Declare methods that deal with the individual node types.
#define DECLARE_DO(type) LInstruction* Do##type(H##type* node);
HYDROGEN_CONCRETE_INSTRUCTION_LIST(DECLARE_DO)
#undef DECLARE_DO
- static HValue* SimplifiedDivisorForMathFloorOfDiv(HValue* val);
-
LInstruction* DoMathFloor(HUnaryMathOperation* instr);
LInstruction* DoMathRound(HUnaryMathOperation* instr);
LInstruction* DoMathAbs(HUnaryMathOperation* instr);
LInstruction* DoMathLog(HUnaryMathOperation* instr);
- LInstruction* DoMathSin(HUnaryMathOperation* instr);
- LInstruction* DoMathCos(HUnaryMathOperation* instr);
- LInstruction* DoMathTan(HUnaryMathOperation* instr);
LInstruction* DoMathExp(HUnaryMathOperation* instr);
LInstruction* DoMathSqrt(HUnaryMathOperation* instr);
LInstruction* DoMathPowHalf(HUnaryMathOperation* instr);
+ LInstruction* DoMathClz32(HUnaryMathOperation* instr);
+ LInstruction* DoDivByPowerOf2I(HDiv* instr);
+ LInstruction* DoDivByConstI(HDiv* instr);
+ LInstruction* DoDivI(HDiv* instr);
+ LInstruction* DoModByPowerOf2I(HMod* instr);
+ LInstruction* DoModByConstI(HMod* instr);
+ LInstruction* DoModI(HMod* instr);
+ LInstruction* DoFlooringDivByPowerOf2I(HMathFloorOfDiv* instr);
+ LInstruction* DoFlooringDivByConstI(HMathFloorOfDiv* instr);
+ LInstruction* DoFlooringDivI(HMathFloorOfDiv* instr);
private:
enum Status {
@@ -2803,7 +2749,6 @@ class LChunkBuilder V8_FINAL BASE_EMBEDDED {
LPlatformChunk* chunk() const { return chunk_; }
CompilationInfo* info() const { return info_; }
HGraph* graph() const { return graph_; }
- Zone* zone() const { return zone_; }
bool is_unused() const { return status_ == UNUSED; }
bool is_building() const { return status_ == BUILDING; }
@@ -2815,7 +2760,6 @@ class LChunkBuilder V8_FINAL BASE_EMBEDDED {
// Methods for getting operands for Use / Define / Temp.
LUnallocated* ToUnallocated(Register reg);
LUnallocated* ToUnallocated(XMMRegister reg);
- LUnallocated* ToUnallocated(X87Register reg);
// Methods for setting up define-use relationships.
MUST_USE_RESULT LOperand* Use(HValue* value, LUnallocated* operand);
@@ -2858,7 +2802,7 @@ class LChunkBuilder V8_FINAL BASE_EMBEDDED {
// An input operand in register, stack slot or a constant operand.
// Will not be moved to a register even if one is freely available.
- MUST_USE_RESULT LOperand* UseAny(HValue* value);
+ virtual MUST_USE_RESULT LOperand* UseAny(HValue* value) V8_OVERRIDE;
// Temporary operand that must be in a register.
MUST_USE_RESULT LUnallocated* TempRegister();
@@ -2867,24 +2811,16 @@ class LChunkBuilder V8_FINAL BASE_EMBEDDED {
// Methods for setting up define-use relationships.
// Return the same instruction that they are passed.
- template<int I, int T>
- LInstruction* Define(LTemplateInstruction<1, I, T>* instr,
- LUnallocated* result);
- template<int I, int T>
- LInstruction* DefineAsRegister(LTemplateInstruction<1, I, T>* instr);
- template<int I, int T>
- LInstruction* DefineAsSpilled(LTemplateInstruction<1, I, T>* instr,
- int index);
- template<int I, int T>
- LInstruction* DefineSameAsFirst(LTemplateInstruction<1, I, T>* instr);
- template<int I, int T>
- LInstruction* DefineFixed(LTemplateInstruction<1, I, T>* instr,
- Register reg);
- template<int I, int T>
- LInstruction* DefineFixedDouble(LTemplateInstruction<1, I, T>* instr,
- XMMRegister reg);
- template<int I, int T>
- LInstruction* DefineX87TOS(LTemplateInstruction<1, I, T>* instr);
+ LInstruction* Define(LTemplateResultInstruction<1>* instr,
+ LUnallocated* result);
+ LInstruction* DefineAsRegister(LTemplateResultInstruction<1>* instr);
+ LInstruction* DefineAsSpilled(LTemplateResultInstruction<1>* instr,
+ int index);
+ LInstruction* DefineSameAsFirst(LTemplateResultInstruction<1>* instr);
+ LInstruction* DefineFixed(LTemplateResultInstruction<1>* instr,
+ Register reg);
+ LInstruction* DefineFixedDouble(LTemplateResultInstruction<1>* instr,
+ XMMRegister reg);
// Assigns an environment to an instruction. An instruction which can
// deoptimize must have an environment.
LInstruction* AssignEnvironment(LInstruction* instr);
@@ -2904,11 +2840,8 @@ class LChunkBuilder V8_FINAL BASE_EMBEDDED {
HInstruction* hinstr,
CanDeoptimize can_deoptimize = CANNOT_DEOPTIMIZE_EAGERLY);
- LEnvironment* CreateEnvironment(HEnvironment* hydrogen_env,
- int* argument_index_accumulator,
- ZoneList<HValue*>* objects_to_materialize);
-
void VisitInstruction(HInstruction* current);
+ void AddInstruction(LInstruction* instr, HInstruction* current);
void DoBasicBlock(HBasicBlock* block, HBasicBlock* next_block);
LInstruction* DoShift(Token::Value op, HBitwiseBinaryOperation* instr);
@@ -2922,12 +2855,10 @@ class LChunkBuilder V8_FINAL BASE_EMBEDDED {
LPlatformChunk* chunk_;
CompilationInfo* info_;
HGraph* const graph_;
- Zone* zone_;
Status status_;
HInstruction* current_instruction_;
HBasicBlock* current_block_;
HBasicBlock* next_block_;
- int argument_count_;
LAllocator* allocator_;
DISALLOW_COPY_AND_ASSIGN(LChunkBuilder);
diff --git a/chromium/v8/src/ia32/macro-assembler-ia32.cc b/chromium/v8/src/ia32/macro-assembler-ia32.cc
index 52d42f6ca87..1368501e225 100644
--- a/chromium/v8/src/ia32/macro-assembler-ia32.cc
+++ b/chromium/v8/src/ia32/macro-assembler-ia32.cc
@@ -1,41 +1,18 @@
// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
#if V8_TARGET_ARCH_IA32
-#include "bootstrapper.h"
-#include "codegen.h"
-#include "cpu-profiler.h"
-#include "debug.h"
-#include "isolate-inl.h"
-#include "runtime.h"
-#include "serialize.h"
+#include "src/bootstrapper.h"
+#include "src/codegen.h"
+#include "src/cpu-profiler.h"
+#include "src/debug.h"
+#include "src/isolate-inl.h"
+#include "src/runtime.h"
+#include "src/serialize.h"
namespace v8 {
namespace internal {
@@ -78,6 +55,11 @@ void MacroAssembler::Store(Register src, const Operand& dst, Representation r) {
} else if (r.IsInteger16() || r.IsUInteger16()) {
mov_w(dst, src);
} else {
+ if (r.IsHeapObject()) {
+ AssertNotSmi(src);
+ } else if (r.IsSmi()) {
+ AssertSmi(src);
+ }
mov(dst, src);
}
}
@@ -198,7 +180,7 @@ void MacroAssembler::RememberedSetHelper(
j(equal, &done, Label::kNear);
}
StoreBufferOverflowStub store_buffer_overflow =
- StoreBufferOverflowStub(save_fp);
+ StoreBufferOverflowStub(isolate(), save_fp);
CallStub(&store_buffer_overflow);
if (and_then == kReturnAtEnd) {
ret(0);
@@ -214,22 +196,22 @@ void MacroAssembler::ClampDoubleToUint8(XMMRegister input_reg,
Register result_reg) {
Label done;
Label conv_failure;
- pxor(scratch_reg, scratch_reg);
+ xorps(scratch_reg, scratch_reg);
cvtsd2si(result_reg, input_reg);
test(result_reg, Immediate(0xFFFFFF00));
j(zero, &done, Label::kNear);
- cmp(result_reg, Immediate(0x80000000));
- j(equal, &conv_failure, Label::kNear);
+ cmp(result_reg, Immediate(0x1));
+ j(overflow, &conv_failure, Label::kNear);
mov(result_reg, Immediate(0));
- setcc(above, result_reg);
+ setcc(sign, result_reg);
sub(result_reg, Immediate(1));
and_(result_reg, Immediate(255));
jmp(&done, Label::kNear);
bind(&conv_failure);
- Set(result_reg, Immediate(0));
+ Move(result_reg, Immediate(0));
ucomisd(input_reg, scratch_reg);
j(below, &done, Label::kNear);
- Set(result_reg, Immediate(255));
+ Move(result_reg, Immediate(255));
bind(&done);
}
@@ -247,8 +229,8 @@ void MacroAssembler::ClampUint8(Register reg) {
void MacroAssembler::SlowTruncateToI(Register result_reg,
Register input_reg,
int offset) {
- DoubleToIStub stub(input_reg, result_reg, offset, true);
- call(stub.GetCode(isolate()), RelocInfo::CODE_TARGET);
+ DoubleToIStub stub(isolate(), input_reg, result_reg, offset, true);
+ call(stub.GetCode(), RelocInfo::CODE_TARGET);
}
@@ -256,8 +238,8 @@ void MacroAssembler::TruncateDoubleToI(Register result_reg,
XMMRegister input_reg) {
Label done;
cvttsd2si(result_reg, Operand(input_reg));
- cmp(result_reg, 0x80000000u);
- j(not_equal, &done, Label::kNear);
+ cmp(result_reg, 0x1);
+ j(no_overflow, &done, Label::kNear);
sub(esp, Immediate(kDoubleSize));
movsd(MemOperand(esp, 0), input_reg);
@@ -267,42 +249,6 @@ void MacroAssembler::TruncateDoubleToI(Register result_reg,
}
-void MacroAssembler::TruncateX87TOSToI(Register result_reg) {
- sub(esp, Immediate(kDoubleSize));
- fst_d(MemOperand(esp, 0));
- SlowTruncateToI(result_reg, esp, 0);
- add(esp, Immediate(kDoubleSize));
-}
-
-
-void MacroAssembler::X87TOSToI(Register result_reg,
- MinusZeroMode minus_zero_mode,
- Label* conversion_failed,
- Label::Distance dst) {
- Label done;
- sub(esp, Immediate(kPointerSize));
- fld(0);
- fist_s(MemOperand(esp, 0));
- fild_s(MemOperand(esp, 0));
- pop(result_reg);
- FCmp();
- j(not_equal, conversion_failed, dst);
- j(parity_even, conversion_failed, dst);
- if (minus_zero_mode == FAIL_ON_MINUS_ZERO) {
- test(result_reg, Operand(result_reg));
- j(not_zero, &done, Label::kNear);
- // To check for minus zero, we load the value again as float, and check
- // if that is still 0.
- sub(esp, Immediate(kPointerSize));
- fst_s(MemOperand(esp, 0));
- pop(result_reg);
- test(result_reg, Operand(result_reg));
- j(not_zero, conversion_failed, dst);
- }
- bind(&done);
-}
-
-
void MacroAssembler::DoubleToI(Register result_reg,
XMMRegister input_reg,
XMMRegister scratch,
@@ -370,12 +316,11 @@ void MacroAssembler::TruncateHeapNumberToI(Register result_reg,
fstp(0);
SlowTruncateToI(result_reg, input_reg);
}
- } else if (CpuFeatures::IsSupported(SSE2)) {
- CpuFeatureScope scope(this, SSE2);
+ } else {
movsd(xmm0, FieldOperand(input_reg, HeapNumber::kValueOffset));
cvttsd2si(result_reg, Operand(xmm0));
- cmp(result_reg, 0x80000000u);
- j(not_equal, &done, Label::kNear);
+ cmp(result_reg, 0x1);
+ j(no_overflow, &done, Label::kNear);
// Check if the input was 0x8000000 (kMinInt).
// If no, then we got an overflow and we deoptimize.
ExternalReference min_int = ExternalReference::address_of_min_int();
@@ -395,8 +340,6 @@ void MacroAssembler::TruncateHeapNumberToI(Register result_reg,
} else {
SlowTruncateToI(result_reg, input_reg);
}
- } else {
- SlowTruncateToI(result_reg, input_reg);
}
bind(&done);
}
@@ -414,101 +357,49 @@ void MacroAssembler::TaggedToI(Register result_reg,
isolate()->factory()->heap_number_map());
j(not_equal, lost_precision, Label::kNear);
- if (CpuFeatures::IsSafeForSnapshot(SSE2)) {
- ASSERT(!temp.is(no_xmm_reg));
- CpuFeatureScope scope(this, SSE2);
+ ASSERT(!temp.is(no_xmm_reg));
- movsd(xmm0, FieldOperand(input_reg, HeapNumber::kValueOffset));
- cvttsd2si(result_reg, Operand(xmm0));
- Cvtsi2sd(temp, Operand(result_reg));
- ucomisd(xmm0, temp);
- RecordComment("Deferred TaggedToI: lost precision");
- j(not_equal, lost_precision, Label::kNear);
- RecordComment("Deferred TaggedToI: NaN");
- j(parity_even, lost_precision, Label::kNear);
- if (minus_zero_mode == FAIL_ON_MINUS_ZERO) {
- test(result_reg, Operand(result_reg));
- j(not_zero, &done, Label::kNear);
- movmskpd(result_reg, xmm0);
- and_(result_reg, 1);
- RecordComment("Deferred TaggedToI: minus zero");
- j(not_zero, lost_precision, Label::kNear);
- }
- } else {
- // TODO(olivf) Converting a number on the fpu is actually quite slow. We
- // should first try a fast conversion and then bailout to this slow case.
- Label lost_precision_pop, zero_check;
- Label* lost_precision_int = (minus_zero_mode == FAIL_ON_MINUS_ZERO)
- ? &lost_precision_pop : lost_precision;
- sub(esp, Immediate(kPointerSize));
- fld_d(FieldOperand(input_reg, HeapNumber::kValueOffset));
- if (minus_zero_mode == FAIL_ON_MINUS_ZERO) fld(0);
- fist_s(MemOperand(esp, 0));
- fild_s(MemOperand(esp, 0));
- FCmp();
- pop(result_reg);
- j(not_equal, lost_precision_int, Label::kNear);
- j(parity_even, lost_precision_int, Label::kNear); // NaN.
- if (minus_zero_mode == FAIL_ON_MINUS_ZERO) {
- test(result_reg, Operand(result_reg));
- j(zero, &zero_check, Label::kNear);
- fstp(0);
- jmp(&done, Label::kNear);
- bind(&zero_check);
- // To check for minus zero, we load the value again as float, and check
- // if that is still 0.
- sub(esp, Immediate(kPointerSize));
- fstp_s(Operand(esp, 0));
- pop(result_reg);
- test(result_reg, Operand(result_reg));
- j(zero, &done, Label::kNear);
- jmp(lost_precision, Label::kNear);
-
- bind(&lost_precision_pop);
- fstp(0);
- jmp(lost_precision, Label::kNear);
- }
+ movsd(xmm0, FieldOperand(input_reg, HeapNumber::kValueOffset));
+ cvttsd2si(result_reg, Operand(xmm0));
+ Cvtsi2sd(temp, Operand(result_reg));
+ ucomisd(xmm0, temp);
+ RecordComment("Deferred TaggedToI: lost precision");
+ j(not_equal, lost_precision, Label::kNear);
+ RecordComment("Deferred TaggedToI: NaN");
+ j(parity_even, lost_precision, Label::kNear);
+ if (minus_zero_mode == FAIL_ON_MINUS_ZERO) {
+ test(result_reg, Operand(result_reg));
+ j(not_zero, &done, Label::kNear);
+ movmskpd(result_reg, xmm0);
+ and_(result_reg, 1);
+ RecordComment("Deferred TaggedToI: minus zero");
+ j(not_zero, lost_precision, Label::kNear);
}
bind(&done);
}
void MacroAssembler::LoadUint32(XMMRegister dst,
- Register src,
- XMMRegister scratch) {
+ Register src) {
Label done;
cmp(src, Immediate(0));
ExternalReference uint32_bias =
ExternalReference::address_of_uint32_bias();
- movsd(scratch, Operand::StaticVariable(uint32_bias));
Cvtsi2sd(dst, src);
j(not_sign, &done, Label::kNear);
- addsd(dst, scratch);
+ addsd(dst, Operand::StaticVariable(uint32_bias));
bind(&done);
}
-void MacroAssembler::LoadUint32NoSSE2(Register src) {
- Label done;
- push(src);
- fild_s(Operand(esp, 0));
- cmp(src, Immediate(0));
- j(not_sign, &done, Label::kNear);
- ExternalReference uint32_bias =
- ExternalReference::address_of_uint32_bias();
- fld_d(Operand::StaticVariable(uint32_bias));
- faddp(1);
- bind(&done);
- add(esp, Immediate(kPointerSize));
-}
-
-
-void MacroAssembler::RecordWriteArray(Register object,
- Register value,
- Register index,
- SaveFPRegsMode save_fp,
- RememberedSetAction remembered_set_action,
- SmiCheck smi_check) {
+void MacroAssembler::RecordWriteArray(
+ Register object,
+ Register value,
+ Register index,
+ SaveFPRegsMode save_fp,
+ RememberedSetAction remembered_set_action,
+ SmiCheck smi_check,
+ PointersToHereCheck pointers_to_here_check_for_value) {
// First, check if a write barrier is even needed. The tests below
// catch stores of Smis.
Label done;
@@ -527,8 +418,8 @@ void MacroAssembler::RecordWriteArray(Register object,
lea(dst, Operand(object, index, times_half_pointer_size,
FixedArray::kHeaderSize - kHeapObjectTag));
- RecordWrite(
- object, dst, value, save_fp, remembered_set_action, OMIT_SMI_CHECK);
+ RecordWrite(object, dst, value, save_fp, remembered_set_action,
+ OMIT_SMI_CHECK, pointers_to_here_check_for_value);
bind(&done);
@@ -548,7 +439,8 @@ void MacroAssembler::RecordWriteField(
Register dst,
SaveFPRegsMode save_fp,
RememberedSetAction remembered_set_action,
- SmiCheck smi_check) {
+ SmiCheck smi_check,
+ PointersToHereCheck pointers_to_here_check_for_value) {
// First, check if a write barrier is even needed. The tests below
// catch stores of Smis.
Label done;
@@ -571,8 +463,8 @@ void MacroAssembler::RecordWriteField(
bind(&ok);
}
- RecordWrite(
- object, dst, value, save_fp, remembered_set_action, OMIT_SMI_CHECK);
+ RecordWrite(object, dst, value, save_fp, remembered_set_action,
+ OMIT_SMI_CHECK, pointers_to_here_check_for_value);
bind(&done);
@@ -613,6 +505,9 @@ void MacroAssembler::RecordWriteForMap(
return;
}
+ // Compute the address.
+ lea(address, FieldOperand(object, HeapObject::kMapOffset));
+
// Count number of write barriers in generated code.
isolate()->counters()->write_barriers_static()->Increment();
IncrementCounter(isolate()->counters()->write_barriers_dynamic(), 1);
@@ -628,13 +523,8 @@ void MacroAssembler::RecordWriteForMap(
&done,
Label::kNear);
- // Delay the initialization of |address| and |value| for the stub until it's
- // known that the will be needed. Up until this point their values are not
- // needed since they are embedded in the operands of instructions that need
- // them.
- lea(address, FieldOperand(object, HeapObject::kMapOffset));
- mov(value, Immediate(map));
- RecordWriteStub stub(object, value, address, OMIT_REMEMBERED_SET, save_fp);
+ RecordWriteStub stub(isolate(), object, value, address, OMIT_REMEMBERED_SET,
+ save_fp);
CallStub(&stub);
bind(&done);
@@ -649,12 +539,14 @@ void MacroAssembler::RecordWriteForMap(
}
-void MacroAssembler::RecordWrite(Register object,
- Register address,
- Register value,
- SaveFPRegsMode fp_mode,
- RememberedSetAction remembered_set_action,
- SmiCheck smi_check) {
+void MacroAssembler::RecordWrite(
+ Register object,
+ Register address,
+ Register value,
+ SaveFPRegsMode fp_mode,
+ RememberedSetAction remembered_set_action,
+ SmiCheck smi_check,
+ PointersToHereCheck pointers_to_here_check_for_value) {
ASSERT(!object.is(value));
ASSERT(!object.is(address));
ASSERT(!value.is(address));
@@ -686,12 +578,14 @@ void MacroAssembler::RecordWrite(Register object,
JumpIfSmi(value, &done, Label::kNear);
}
- CheckPageFlag(value,
- value, // Used as scratch.
- MemoryChunk::kPointersToHereAreInterestingMask,
- zero,
- &done,
- Label::kNear);
+ if (pointers_to_here_check_for_value != kPointersToHereAreAlwaysInteresting) {
+ CheckPageFlag(value,
+ value, // Used as scratch.
+ MemoryChunk::kPointersToHereAreInterestingMask,
+ zero,
+ &done,
+ Label::kNear);
+ }
CheckPageFlag(object,
value, // Used as scratch.
MemoryChunk::kPointersFromHereAreInterestingMask,
@@ -699,7 +593,8 @@ void MacroAssembler::RecordWrite(Register object,
&done,
Label::kNear);
- RecordWriteStub stub(object, value, address, remembered_set_action, fp_mode);
+ RecordWriteStub stub(isolate(), object, value, address, remembered_set_action,
+ fp_mode);
CallStub(&stub);
bind(&done);
@@ -713,14 +608,12 @@ void MacroAssembler::RecordWrite(Register object,
}
-#ifdef ENABLE_DEBUGGER_SUPPORT
void MacroAssembler::DebugBreak() {
- Set(eax, Immediate(0));
+ Move(eax, Immediate(0));
mov(ebx, Immediate(ExternalReference(Runtime::kDebugBreak, isolate())));
- CEntryStub ces(1);
- call(ces.GetCode(isolate()), RelocInfo::DEBUG_BREAK);
+ CEntryStub ces(isolate(), 1);
+ call(ces.GetCode(), RelocInfo::DEBUG_BREAK);
}
-#endif
void MacroAssembler::Cvtsi2sd(XMMRegister dst, const Operand& src) {
@@ -729,20 +622,6 @@ void MacroAssembler::Cvtsi2sd(XMMRegister dst, const Operand& src) {
}
-void MacroAssembler::Set(Register dst, const Immediate& x) {
- if (x.is_zero()) {
- xor_(dst, dst); // Shorter than mov.
- } else {
- mov(dst, x);
- }
-}
-
-
-void MacroAssembler::Set(const Operand& dst, const Immediate& x) {
- mov(dst, x);
-}
-
-
bool MacroAssembler::IsUnsafeImmediate(const Immediate& x) {
static const int kMaxImmediateBits = 17;
if (!RelocInfo::IsNone(x.rmode_)) return false;
@@ -750,12 +629,12 @@ bool MacroAssembler::IsUnsafeImmediate(const Immediate& x) {
}
-void MacroAssembler::SafeSet(Register dst, const Immediate& x) {
+void MacroAssembler::SafeMove(Register dst, const Immediate& x) {
if (IsUnsafeImmediate(x) && jit_cookie() != 0) {
- Set(dst, Immediate(x.x_ ^ jit_cookie()));
+ Move(dst, Immediate(x.x_ ^ jit_cookie()));
xor_(dst, jit_cookie());
} else {
- Set(dst, x);
+ Move(dst, x);
}
}
@@ -831,7 +710,6 @@ void MacroAssembler::StoreNumberToDoubleElements(
Register scratch1,
XMMRegister scratch2,
Label* fail,
- bool specialize_for_processor,
int elements_offset) {
Label smi_value, done, maybe_nan, not_nan, is_nan, have_double_value;
JumpIfSmi(maybe_number, &smi_value, Label::kNear);
@@ -850,19 +728,11 @@ void MacroAssembler::StoreNumberToDoubleElements(
bind(&not_nan);
ExternalReference canonical_nan_reference =
ExternalReference::address_of_canonical_non_hole_nan();
- if (CpuFeatures::IsSupported(SSE2) && specialize_for_processor) {
- CpuFeatureScope use_sse2(this, SSE2);
- movsd(scratch2, FieldOperand(maybe_number, HeapNumber::kValueOffset));
- bind(&have_double_value);
- movsd(FieldOperand(elements, key, times_4,
- FixedDoubleArray::kHeaderSize - elements_offset),
- scratch2);
- } else {
- fld_d(FieldOperand(maybe_number, HeapNumber::kValueOffset));
- bind(&have_double_value);
- fstp_d(FieldOperand(elements, key, times_4,
- FixedDoubleArray::kHeaderSize - elements_offset));
- }
+ movsd(scratch2, FieldOperand(maybe_number, HeapNumber::kValueOffset));
+ bind(&have_double_value);
+ movsd(FieldOperand(elements, key, times_4,
+ FixedDoubleArray::kHeaderSize - elements_offset),
+ scratch2);
jmp(&done);
bind(&maybe_nan);
@@ -872,12 +742,7 @@ void MacroAssembler::StoreNumberToDoubleElements(
cmp(FieldOperand(maybe_number, HeapNumber::kValueOffset), Immediate(0));
j(zero, &not_nan);
bind(&is_nan);
- if (CpuFeatures::IsSupported(SSE2) && specialize_for_processor) {
- CpuFeatureScope use_sse2(this, SSE2);
- movsd(scratch2, Operand::StaticVariable(canonical_nan_reference));
- } else {
- fld_d(Operand::StaticVariable(canonical_nan_reference));
- }
+ movsd(scratch2, Operand::StaticVariable(canonical_nan_reference));
jmp(&have_double_value, Label::kNear);
bind(&smi_value);
@@ -885,19 +750,10 @@ void MacroAssembler::StoreNumberToDoubleElements(
// Preserve original value.
mov(scratch1, maybe_number);
SmiUntag(scratch1);
- if (CpuFeatures::IsSupported(SSE2) && specialize_for_processor) {
- CpuFeatureScope fscope(this, SSE2);
- Cvtsi2sd(scratch2, scratch1);
- movsd(FieldOperand(elements, key, times_4,
- FixedDoubleArray::kHeaderSize - elements_offset),
- scratch2);
- } else {
- push(scratch1);
- fild_s(Operand(esp, 0));
- pop(scratch1);
- fstp_d(FieldOperand(elements, key, times_4,
- FixedDoubleArray::kHeaderSize - elements_offset));
- }
+ Cvtsi2sd(scratch2, scratch1);
+ movsd(FieldOperand(elements, key, times_4,
+ FixedDoubleArray::kHeaderSize - elements_offset),
+ scratch2);
bind(&done);
}
@@ -978,16 +834,8 @@ void MacroAssembler::IsInstanceJSObjectType(Register map,
void MacroAssembler::FCmp() {
- if (CpuFeatures::IsSupported(CMOV)) {
- fucomip();
- fstp(0);
- } else {
- fucompp();
- push(eax);
- fnstsw_ax();
- sahf();
- pop(eax);
- }
+ fucomip();
+ fstp(0);
}
@@ -1037,6 +885,20 @@ void MacroAssembler::AssertName(Register object) {
}
+void MacroAssembler::AssertUndefinedOrAllocationSite(Register object) {
+ if (emit_debug_code()) {
+ Label done_checking;
+ AssertNotSmi(object);
+ cmp(object, isolate()->factory()->undefined_value());
+ j(equal, &done_checking);
+ cmp(FieldOperand(object, 0),
+ Immediate(isolate()->factory()->allocation_site_map()));
+ Assert(equal, kExpectedUndefinedOrCell);
+ bind(&done_checking);
+ }
+}
+
+
void MacroAssembler::AssertNotSmi(Register object) {
if (emit_debug_code()) {
test(object, Immediate(kSmiTagMask));
@@ -1045,26 +907,27 @@ void MacroAssembler::AssertNotSmi(Register object) {
}
-void MacroAssembler::Prologue(PrologueFrameMode frame_mode) {
- if (frame_mode == BUILD_STUB_FRAME) {
+void MacroAssembler::StubPrologue() {
+ push(ebp); // Caller's frame pointer.
+ mov(ebp, esp);
+ push(esi); // Callee's context.
+ push(Immediate(Smi::FromInt(StackFrame::STUB)));
+}
+
+
+void MacroAssembler::Prologue(bool code_pre_aging) {
+ PredictableCodeSizeScope predictible_code_size_scope(this,
+ kNoCodeAgeSequenceLength);
+ if (code_pre_aging) {
+ // Pre-age the code.
+ call(isolate()->builtins()->MarkCodeAsExecutedOnce(),
+ RelocInfo::CODE_AGE_SEQUENCE);
+ Nop(kNoCodeAgeSequenceLength - Assembler::kCallInstructionLength);
+ } else {
push(ebp); // Caller's frame pointer.
mov(ebp, esp);
push(esi); // Callee's context.
- push(Immediate(Smi::FromInt(StackFrame::STUB)));
- } else {
- PredictableCodeSizeScope predictible_code_size_scope(this,
- kNoCodeAgeSequenceLength);
- if (isolate()->IsCodePreAgingActive()) {
- // Pre-age the code.
- call(isolate()->builtins()->MarkCodeAsExecutedOnce(),
- RelocInfo::CODE_AGE_SEQUENCE);
- Nop(kNoCodeAgeSequenceLength - Assembler::kCallInstructionLength);
- } else {
- push(ebp); // Caller's frame pointer.
- mov(ebp, esp);
- push(esi); // Callee's context.
- push(edi); // Callee's JS function.
- }
+ push(edi); // Callee's JS function.
}
}
@@ -1116,11 +979,11 @@ void MacroAssembler::EnterExitFramePrologue() {
void MacroAssembler::EnterExitFrameEpilogue(int argc, bool save_doubles) {
// Optionally save all XMM registers.
if (save_doubles) {
- CpuFeatureScope scope(this, SSE2);
- int space = XMMRegister::kNumRegisters * kDoubleSize + argc * kPointerSize;
+ int space = XMMRegister::kMaxNumRegisters * kDoubleSize +
+ argc * kPointerSize;
sub(esp, Immediate(space));
const int offset = -2 * kPointerSize;
- for (int i = 0; i < XMMRegister::kNumRegisters; i++) {
+ for (int i = 0; i < XMMRegister::kMaxNumRegisters; i++) {
XMMRegister reg = XMMRegister::from_code(i);
movsd(Operand(ebp, offset - ((i + 1) * kDoubleSize)), reg);
}
@@ -1162,9 +1025,8 @@ void MacroAssembler::EnterApiExitFrame(int argc) {
void MacroAssembler::LeaveExitFrame(bool save_doubles) {
// Optionally restore all XMM registers.
if (save_doubles) {
- CpuFeatureScope scope(this, SSE2);
const int offset = -2 * kPointerSize;
- for (int i = 0; i < XMMRegister::kNumRegisters; i++) {
+ for (int i = 0; i < XMMRegister::kMaxNumRegisters; i++) {
XMMRegister reg = XMMRegister::from_code(i);
movsd(reg, Operand(ebp, offset - ((i + 1) * kDoubleSize)));
}
@@ -1424,7 +1286,7 @@ void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
// Note: r0 will contain hash code
void MacroAssembler::GetNumberHash(Register r0, Register scratch) {
// Xor original key with a seed.
- if (Serializer::enabled()) {
+ if (serializer_enabled()) {
ExternalReference roots_array_start =
ExternalReference::roots_array_start(isolate());
mov(scratch, Immediate(Heap::kHashSeedRootIndex));
@@ -1591,7 +1453,7 @@ void MacroAssembler::Allocate(int object_size,
Label* gc_required,
AllocationFlags flags) {
ASSERT((flags & (RESULT_CONTAINS_TOP | SIZE_IN_WORDS)) == 0);
- ASSERT(object_size <= Page::kMaxNonCodeHeapObjectSize);
+ ASSERT(object_size <= Page::kMaxRegularHeapObjectSize);
if (!FLAG_inline_new) {
if (emit_debug_code()) {
// Trash the registers to simulate an allocation failure.
@@ -1943,32 +1805,13 @@ void MacroAssembler::AllocateAsciiConsString(Register result,
Register scratch1,
Register scratch2,
Label* gc_required) {
- Label allocate_new_space, install_map;
- AllocationFlags flags = TAG_OBJECT;
-
- ExternalReference high_promotion_mode = ExternalReference::
- new_space_high_promotion_mode_active_address(isolate());
-
- test(Operand::StaticVariable(high_promotion_mode), Immediate(1));
- j(zero, &allocate_new_space);
-
Allocate(ConsString::kSize,
result,
scratch1,
scratch2,
gc_required,
- static_cast<AllocationFlags>(flags | PRETENURE_OLD_POINTER_SPACE));
- jmp(&install_map);
-
- bind(&allocate_new_space);
- Allocate(ConsString::kSize,
- result,
- scratch1,
- scratch2,
- gc_required,
- flags);
+ TAG_OBJECT);
- bind(&install_map);
// Set the map. The other fields are left uninitialized.
mov(FieldOperand(result, HeapObject::kMapOffset),
Immediate(isolate()->factory()->cons_ascii_string_map()));
@@ -2182,12 +2025,12 @@ void MacroAssembler::TryGetFunctionPrototype(Register function,
void MacroAssembler::CallStub(CodeStub* stub, TypeFeedbackId ast_id) {
ASSERT(AllowThisStubCall(stub)); // Calls are not allowed in some stubs.
- call(stub->GetCode(isolate()), RelocInfo::CODE_TARGET, ast_id);
+ call(stub->GetCode(), RelocInfo::CODE_TARGET, ast_id);
}
void MacroAssembler::TailCallStub(CodeStub* stub) {
- jmp(stub->GetCode(isolate()), RelocInfo::CODE_TARGET);
+ jmp(stub->GetCode(), RelocInfo::CODE_TARGET);
}
@@ -2202,30 +2045,16 @@ bool MacroAssembler::AllowThisStubCall(CodeStub* stub) {
}
-void MacroAssembler::IllegalOperation(int num_arguments) {
- if (num_arguments > 0) {
- add(esp, Immediate(num_arguments * kPointerSize));
- }
- mov(eax, Immediate(isolate()->factory()->undefined_value()));
-}
-
-
void MacroAssembler::IndexFromHash(Register hash, Register index) {
// The assert checks that the constants for the maximum number of digits
// for an array index cached in the hash field and the number of bits
// reserved for it does not conflict.
ASSERT(TenToThe(String::kMaxCachedArrayIndexLength) <
(1 << String::kArrayIndexValueBits));
- // We want the smi-tagged index in key. kArrayIndexValueMask has zeros in
- // the low kHashShift bits.
- and_(hash, String::kArrayIndexValueMask);
- STATIC_ASSERT(String::kHashShift >= kSmiTagSize && kSmiTag == 0);
- if (String::kHashShift > kSmiTagSize) {
- shr(hash, String::kHashShift - kSmiTagSize);
- }
if (!index.is(hash)) {
mov(index, hash);
}
+ DecodeFieldToSmi<String::ArrayIndexValueBits>(index);
}
@@ -2235,19 +2064,15 @@ void MacroAssembler::CallRuntime(const Runtime::Function* f,
// If the expected number of arguments of the runtime function is
// constant, we check that the actual number of arguments match the
// expectation.
- if (f->nargs >= 0 && f->nargs != num_arguments) {
- IllegalOperation(num_arguments);
- return;
- }
+ CHECK(f->nargs < 0 || f->nargs == num_arguments);
// TODO(1236192): Most runtime routines don't need the number of
// arguments passed in because it is constant. At some point we
// should remove this need and make the runtime routine entry code
// smarter.
- Set(eax, Immediate(num_arguments));
+ Move(eax, Immediate(num_arguments));
mov(ebx, Immediate(ExternalReference(f, isolate())));
- CEntryStub ces(1, CpuFeatures::IsSupported(SSE2) ? save_doubles
- : kDontSaveFPRegs);
+ CEntryStub ces(isolate(), 1, save_doubles);
CallStub(&ces);
}
@@ -2257,7 +2082,7 @@ void MacroAssembler::CallExternalReference(ExternalReference ref,
mov(eax, Immediate(num_arguments));
mov(ebx, Immediate(ref));
- CEntryStub stub(1);
+ CEntryStub stub(isolate(), 1);
CallStub(&stub);
}
@@ -2269,7 +2094,7 @@ void MacroAssembler::TailCallExternalReference(const ExternalReference& ext,
// arguments passed in because it is constant. At some point we
// should remove this need and make the runtime routine entry code
// smarter.
- Set(eax, Immediate(num_arguments));
+ Move(eax, Immediate(num_arguments));
JumpToExternalReference(ext);
}
@@ -2297,8 +2122,8 @@ void MacroAssembler::PrepareCallApiFunction(int argc) {
void MacroAssembler::CallApiFunctionAndReturn(
- Address function_address,
- Address thunk_address,
+ Register function_address,
+ ExternalReference thunk_ref,
Operand thunk_last_arg,
int stack_space,
Operand return_value_operand,
@@ -2310,6 +2135,7 @@ void MacroAssembler::CallApiFunctionAndReturn(
ExternalReference level_address =
ExternalReference::handle_scope_level_address(isolate());
+ ASSERT(edx.is(function_address));
// Allocate HandleScope in callee-save registers.
mov(ebx, Operand::StaticVariable(next_address));
mov(edi, Operand::StaticVariable(limit_address));
@@ -2328,22 +2154,20 @@ void MacroAssembler::CallApiFunctionAndReturn(
Label profiler_disabled;
Label end_profiler_check;
- bool* is_profiling_flag =
- isolate()->cpu_profiler()->is_profiling_address();
- STATIC_ASSERT(sizeof(*is_profiling_flag) == 1);
- mov(eax, Immediate(reinterpret_cast<Address>(is_profiling_flag)));
+ mov(eax, Immediate(ExternalReference::is_profiling_address(isolate())));
cmpb(Operand(eax, 0), 0);
j(zero, &profiler_disabled);
// Additional parameter is the address of the actual getter function.
- mov(thunk_last_arg, Immediate(function_address));
+ mov(thunk_last_arg, function_address);
// Call the api function.
- call(thunk_address, RelocInfo::RUNTIME_ENTRY);
+ mov(eax, Immediate(thunk_ref));
+ call(eax);
jmp(&end_profiler_check);
bind(&profiler_disabled);
// Call the api function.
- call(function_address, RelocInfo::RUNTIME_ENTRY);
+ call(function_address);
bind(&end_profiler_check);
if (FLAG_log_timer_events) {
@@ -2428,7 +2252,7 @@ void MacroAssembler::CallApiFunctionAndReturn(
bind(&promote_scheduled_exception);
{
FrameScope frame(this, StackFrame::INTERNAL);
- CallRuntime(Runtime::kPromoteScheduledException, 0);
+ CallRuntime(Runtime::kHiddenPromoteScheduledException, 0);
}
jmp(&exception_handled);
@@ -2450,25 +2274,8 @@ void MacroAssembler::CallApiFunctionAndReturn(
void MacroAssembler::JumpToExternalReference(const ExternalReference& ext) {
// Set the entry point and jump to the C entry runtime stub.
mov(ebx, Immediate(ext));
- CEntryStub ces(1);
- jmp(ces.GetCode(isolate()), RelocInfo::CODE_TARGET);
-}
-
-
-void MacroAssembler::SetCallKind(Register dst, CallKind call_kind) {
- // This macro takes the dst register to make the code more readable
- // at the call sites. However, the dst register has to be ecx to
- // follow the calling convention which requires the call type to be
- // in ecx.
- ASSERT(dst.is(ecx));
- if (call_kind == CALL_AS_FUNCTION) {
- // Set to some non-zero smi by updating the least significant
- // byte.
- mov_b(dst, 1 << kSmiTagSize);
- } else {
- // Set to smi zero by clearing the register.
- xor_(dst, dst);
- }
+ CEntryStub ces(isolate(), 1);
+ jmp(ces.GetCode(), RelocInfo::CODE_TARGET);
}
@@ -2480,8 +2287,7 @@ void MacroAssembler::InvokePrologue(const ParameterCount& expected,
bool* definitely_mismatches,
InvokeFlag flag,
Label::Distance done_near,
- const CallWrapper& call_wrapper,
- CallKind call_kind) {
+ const CallWrapper& call_wrapper) {
bool definitely_matches = false;
*definitely_mismatches = false;
Label invoke;
@@ -2534,14 +2340,12 @@ void MacroAssembler::InvokePrologue(const ParameterCount& expected,
if (flag == CALL_FUNCTION) {
call_wrapper.BeforeCall(CallSize(adaptor, RelocInfo::CODE_TARGET));
- SetCallKind(ecx, call_kind);
call(adaptor, RelocInfo::CODE_TARGET);
call_wrapper.AfterCall();
if (!*definitely_mismatches) {
jmp(done, done_near);
}
} else {
- SetCallKind(ecx, call_kind);
jmp(adaptor, RelocInfo::CODE_TARGET);
}
bind(&invoke);
@@ -2553,8 +2357,7 @@ void MacroAssembler::InvokeCode(const Operand& code,
const ParameterCount& expected,
const ParameterCount& actual,
InvokeFlag flag,
- const CallWrapper& call_wrapper,
- CallKind call_kind) {
+ const CallWrapper& call_wrapper) {
// You can't call a function without a valid frame.
ASSERT(flag == JUMP_FUNCTION || has_frame());
@@ -2562,16 +2365,14 @@ void MacroAssembler::InvokeCode(const Operand& code,
bool definitely_mismatches = false;
InvokePrologue(expected, actual, Handle<Code>::null(), code,
&done, &definitely_mismatches, flag, Label::kNear,
- call_wrapper, call_kind);
+ call_wrapper);
if (!definitely_mismatches) {
if (flag == CALL_FUNCTION) {
call_wrapper.BeforeCall(CallSize(code));
- SetCallKind(ecx, call_kind);
call(code);
call_wrapper.AfterCall();
} else {
ASSERT(flag == JUMP_FUNCTION);
- SetCallKind(ecx, call_kind);
jmp(code);
}
bind(&done);
@@ -2579,42 +2380,10 @@ void MacroAssembler::InvokeCode(const Operand& code,
}
-void MacroAssembler::InvokeCode(Handle<Code> code,
- const ParameterCount& expected,
- const ParameterCount& actual,
- RelocInfo::Mode rmode,
- InvokeFlag flag,
- const CallWrapper& call_wrapper,
- CallKind call_kind) {
- // You can't call a function without a valid frame.
- ASSERT(flag == JUMP_FUNCTION || has_frame());
-
- Label done;
- Operand dummy(eax, 0);
- bool definitely_mismatches = false;
- InvokePrologue(expected, actual, code, dummy, &done, &definitely_mismatches,
- flag, Label::kNear, call_wrapper, call_kind);
- if (!definitely_mismatches) {
- if (flag == CALL_FUNCTION) {
- call_wrapper.BeforeCall(CallSize(code, rmode));
- SetCallKind(ecx, call_kind);
- call(code, rmode);
- call_wrapper.AfterCall();
- } else {
- ASSERT(flag == JUMP_FUNCTION);
- SetCallKind(ecx, call_kind);
- jmp(code, rmode);
- }
- bind(&done);
- }
-}
-
-
void MacroAssembler::InvokeFunction(Register fun,
const ParameterCount& actual,
InvokeFlag flag,
- const CallWrapper& call_wrapper,
- CallKind call_kind) {
+ const CallWrapper& call_wrapper) {
// You can't call a function without a valid frame.
ASSERT(flag == JUMP_FUNCTION || has_frame());
@@ -2626,7 +2395,7 @@ void MacroAssembler::InvokeFunction(Register fun,
ParameterCount expected(ebx);
InvokeCode(FieldOperand(edi, JSFunction::kCodeEntryOffset),
- expected, actual, flag, call_wrapper, call_kind);
+ expected, actual, flag, call_wrapper);
}
@@ -2634,8 +2403,7 @@ void MacroAssembler::InvokeFunction(Register fun,
const ParameterCount& expected,
const ParameterCount& actual,
InvokeFlag flag,
- const CallWrapper& call_wrapper,
- CallKind call_kind) {
+ const CallWrapper& call_wrapper) {
// You can't call a function without a valid frame.
ASSERT(flag == JUMP_FUNCTION || has_frame());
@@ -2643,7 +2411,7 @@ void MacroAssembler::InvokeFunction(Register fun,
mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
InvokeCode(FieldOperand(edi, JSFunction::kCodeEntryOffset),
- expected, actual, flag, call_wrapper, call_kind);
+ expected, actual, flag, call_wrapper);
}
@@ -2651,10 +2419,9 @@ void MacroAssembler::InvokeFunction(Handle<JSFunction> function,
const ParameterCount& expected,
const ParameterCount& actual,
InvokeFlag flag,
- const CallWrapper& call_wrapper,
- CallKind call_kind) {
+ const CallWrapper& call_wrapper) {
LoadHeapObject(edi, function);
- InvokeFunction(edi, expected, actual, flag, call_wrapper, call_kind);
+ InvokeFunction(edi, expected, actual, flag, call_wrapper);
}
@@ -2670,7 +2437,7 @@ void MacroAssembler::InvokeBuiltin(Builtins::JavaScript id,
ParameterCount expected(0);
GetBuiltinFunction(edi, id);
InvokeCode(FieldOperand(edi, JSFunction::kCodeEntryOffset),
- expected, expected, flag, call_wrapper, CALL_AS_METHOD);
+ expected, expected, flag, call_wrapper);
}
@@ -2745,41 +2512,6 @@ void MacroAssembler::LoadTransitionedArrayMapConditional(
}
-void MacroAssembler::LoadInitialArrayMap(
- Register function_in, Register scratch,
- Register map_out, bool can_have_holes) {
- ASSERT(!function_in.is(map_out));
- Label done;
- mov(map_out, FieldOperand(function_in,
- JSFunction::kPrototypeOrInitialMapOffset));
- if (!FLAG_smi_only_arrays) {
- ElementsKind kind = can_have_holes ? FAST_HOLEY_ELEMENTS : FAST_ELEMENTS;
- LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS,
- kind,
- map_out,
- scratch,
- &done);
- } else if (can_have_holes) {
- LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS,
- FAST_HOLEY_SMI_ELEMENTS,
- map_out,
- scratch,
- &done);
- }
- bind(&done);
-}
-
-
-void MacroAssembler::LoadGlobalContext(Register global_context) {
- // Load the global or builtins object from the current context.
- mov(global_context,
- Operand(esi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
- // Load the native context from the global or builtins object.
- mov(global_context,
- FieldOperand(global_context, GlobalObject::kNativeContextOffset));
-}
-
-
void MacroAssembler::LoadGlobalFunction(int index, Register function) {
// Load the global or builtins object from the current context.
mov(function,
@@ -2889,27 +2621,6 @@ void MacroAssembler::Ret(int bytes_dropped, Register scratch) {
}
-void MacroAssembler::VerifyX87StackDepth(uint32_t depth) {
- // Make sure the floating point stack is either empty or has depth items.
- ASSERT(depth <= 7);
- // This is very expensive.
- ASSERT(FLAG_debug_code && FLAG_enable_slow_asserts);
-
- // The top-of-stack (tos) is 7 if there is one item pushed.
- int tos = (8 - depth) % 8;
- const int kTopMask = 0x3800;
- push(eax);
- fwait();
- fnstsw_ax();
- and_(eax, kTopMask);
- shr(eax, 11);
- cmp(eax, Immediate(tos));
- Check(equal, kUnexpectedFPUStackDepthAfterInstruction);
- fnclex();
- pop(eax);
-}
-
-
void MacroAssembler::Drop(int stack_elements) {
if (stack_elements > 0) {
add(esp, Immediate(stack_elements * kPointerSize));
@@ -2924,6 +2635,36 @@ void MacroAssembler::Move(Register dst, Register src) {
}
+void MacroAssembler::Move(Register dst, const Immediate& x) {
+ if (x.is_zero()) {
+ xor_(dst, dst); // Shorter than mov of 32-bit immediate 0.
+ } else {
+ mov(dst, x);
+ }
+}
+
+
+void MacroAssembler::Move(const Operand& dst, const Immediate& x) {
+ mov(dst, x);
+}
+
+
+void MacroAssembler::Move(XMMRegister dst, double val) {
+ // TODO(titzer): recognize double constants with ExternalReferences.
+ uint64_t int_val = BitCast<uint64_t, double>(val);
+ if (int_val == 0) {
+ xorps(dst, dst);
+ } else {
+ int32_t lower = static_cast<int32_t>(int_val);
+ int32_t upper = static_cast<int32_t>(int_val >> kBitsPerInt);
+ push(Immediate(upper));
+ push(Immediate(lower));
+ movsd(dst, Operand(esp, 0));
+ add(esp, Immediate(kDoubleSize));
+ }
+}
+
+
void MacroAssembler::SetCounter(StatsCounter* counter, int value) {
if (FLAG_native_code_counters && counter->Enabled()) {
mov(Operand::StaticVariable(ExternalReference(counter)), Immediate(value));
@@ -3036,16 +2777,8 @@ void MacroAssembler::CheckStackAlignment() {
void MacroAssembler::Abort(BailoutReason reason) {
- // We want to pass the msg string like a smi to avoid GC
- // problems, however msg is not guaranteed to be aligned
- // properly. Instead, we pass an aligned pointer that is
- // a proper v8 smi, but also pass the alignment difference
- // from the real pointer as a smi.
- const char* msg = GetBailoutReason(reason);
- intptr_t p1 = reinterpret_cast<intptr_t>(msg);
- intptr_t p0 = (p1 & ~kSmiTagMask) + kSmiTag;
- ASSERT(reinterpret_cast<Object*>(p0)->IsSmi());
#ifdef DEBUG
+ const char* msg = GetBailoutReason(reason);
if (msg != NULL) {
RecordComment("Abort message: ");
RecordComment(msg);
@@ -3057,57 +2790,21 @@ void MacroAssembler::Abort(BailoutReason reason) {
}
#endif
- push(eax);
- push(Immediate(p0));
- push(Immediate(reinterpret_cast<intptr_t>(Smi::FromInt(p1 - p0))));
+ push(Immediate(reinterpret_cast<intptr_t>(Smi::FromInt(reason))));
// Disable stub call restrictions to always allow calls to abort.
if (!has_frame_) {
// We don't actually want to generate a pile of code for this, so just
// claim there is a stack frame, without generating one.
FrameScope scope(this, StackFrame::NONE);
- CallRuntime(Runtime::kAbort, 2);
+ CallRuntime(Runtime::kAbort, 1);
} else {
- CallRuntime(Runtime::kAbort, 2);
+ CallRuntime(Runtime::kAbort, 1);
}
// will not return here
int3();
}
-void MacroAssembler::Throw(BailoutReason reason) {
-#ifdef DEBUG
- const char* msg = GetBailoutReason(reason);
- if (msg != NULL) {
- RecordComment("Throw message: ");
- RecordComment(msg);
- }
-#endif
-
- push(eax);
- push(Immediate(Smi::FromInt(reason)));
- // Disable stub call restrictions to always allow calls to throw.
- if (!has_frame_) {
- // We don't actually want to generate a pile of code for this, so just
- // claim there is a stack frame, without generating one.
- FrameScope scope(this, StackFrame::NONE);
- CallRuntime(Runtime::kThrowMessage, 1);
- } else {
- CallRuntime(Runtime::kThrowMessage, 1);
- }
- // will not return here
- int3();
-}
-
-
-void MacroAssembler::ThrowIf(Condition cc, BailoutReason reason) {
- Label L;
- j(NegateCondition(cc), &L);
- Throw(reason);
- // will not return here
- bind(&L);
-}
-
-
void MacroAssembler::LoadInstanceDescriptors(Register map,
Register descriptors) {
mov(descriptors, FieldOperand(map, Map::kDescriptorsOffset));
@@ -3178,15 +2875,8 @@ void MacroAssembler::LookupNumberStringCache(Register object,
times_twice_pointer_size,
FixedArray::kHeaderSize));
JumpIfSmi(probe, not_found);
- if (CpuFeatures::IsSupported(SSE2)) {
- CpuFeatureScope fscope(this, SSE2);
- movsd(xmm0, FieldOperand(object, HeapNumber::kValueOffset));
- ucomisd(xmm0, FieldOperand(probe, HeapNumber::kValueOffset));
- } else {
- fld_d(FieldOperand(object, HeapNumber::kValueOffset));
- fld_d(FieldOperand(probe, HeapNumber::kValueOffset));
- FCmp();
- }
+ movsd(xmm0, FieldOperand(object, HeapNumber::kValueOffset));
+ ucomisd(xmm0, FieldOperand(probe, HeapNumber::kValueOffset));
j(parity_even, not_found); // Bail out if NaN is involved.
j(not_equal, not_found); // The cache did not contain this value.
jmp(&load_result_from_cache, Label::kNear);
@@ -3279,7 +2969,7 @@ void MacroAssembler::EmitSeqStringSetCharCheck(Register string,
uint32_t encoding_mask) {
Label is_object;
JumpIfNotSmi(string, &is_object, Label::kNear);
- Throw(kNonObject);
+ Abort(kNonObject);
bind(&is_object);
push(value);
@@ -3289,20 +2979,19 @@ void MacroAssembler::EmitSeqStringSetCharCheck(Register string,
and_(value, Immediate(kStringRepresentationMask | kStringEncodingMask));
cmp(value, Immediate(encoding_mask));
pop(value);
- ThrowIf(not_equal, kUnexpectedStringType);
+ Check(equal, kUnexpectedStringType);
// The index is assumed to be untagged coming in, tag it to compare with the
// string length without using a temp register, it is restored at the end of
// this function.
SmiTag(index);
- // Can't use overflow here directly, compiler can't seem to disambiguate.
- ThrowIf(NegateCondition(no_overflow), kIndexIsTooLarge);
+ Check(no_overflow, kIndexIsTooLarge);
cmp(index, FieldOperand(string, String::kLengthOffset));
- ThrowIf(greater_equal, kIndexIsTooLarge);
+ Check(less, kIndexIsTooLarge);
cmp(index, Immediate(Smi::FromInt(0)));
- ThrowIf(less, kIndexIsNegative);
+ Check(greater_equal, kIndexIsNegative);
// Restore the index
SmiUntag(index);
@@ -3434,7 +3123,7 @@ void MacroAssembler::CheckMapDeprecated(Handle<Map> map,
if (map->CanBeDeprecated()) {
mov(scratch, map);
mov(scratch, FieldOperand(scratch, Map::kBitField3Offset));
- and_(scratch, Immediate(Smi::FromInt(Map::Deprecated::kMask)));
+ and_(scratch, Immediate(Map::Deprecated::kMask));
j(not_zero, if_deprecated);
}
}
@@ -3612,7 +3301,8 @@ void MacroAssembler::EnsureNotWhite(
void MacroAssembler::EnumLength(Register dst, Register map) {
STATIC_ASSERT(Map::EnumLengthBits::kShift == 0);
mov(dst, FieldOperand(map, Map::kBitField3Offset));
- and_(dst, Immediate(Smi::FromInt(Map::EnumLengthBits::kMask)));
+ and_(dst, Immediate(Map::EnumLengthBits::kMask));
+ SmiTag(dst);
}
@@ -3642,10 +3332,16 @@ void MacroAssembler::CheckEnumCache(Label* call_runtime) {
// Check that there are no elements. Register rcx contains the current JS
// object we've reached through the prototype chain.
+ Label no_elements;
mov(ecx, FieldOperand(ecx, JSObject::kElementsOffset));
cmp(ecx, isolate()->factory()->empty_fixed_array());
+ j(equal, &no_elements);
+
+ // Second chance, the object may be using the empty slow element dictionary.
+ cmp(ecx, isolate()->factory()->empty_slow_element_dictionary());
j(not_equal, call_runtime);
+ bind(&no_elements);
mov(ecx, FieldOperand(ebx, Map::kPrototypeOffset));
cmp(ecx, isolate()->factory()->null_value());
j(not_equal, &next);
@@ -3689,8 +3385,7 @@ void MacroAssembler::JumpIfDictionaryInPrototypeChain(
bind(&loop_again);
mov(current, FieldOperand(current, HeapObject::kMapOffset));
mov(scratch1, FieldOperand(current, Map::kBitField2Offset));
- and_(scratch1, Map::kElementsKindMask);
- shr(scratch1, Map::kElementsKindShift);
+ DecodeField<Map::ElementsKindBits>(scratch1);
cmp(scratch1, Immediate(DICTIONARY_ELEMENTS));
j(equal, found);
mov(current, FieldOperand(current, Map::kPrototypeOffset));
@@ -3698,6 +3393,22 @@ void MacroAssembler::JumpIfDictionaryInPrototypeChain(
j(not_equal, &loop_again);
}
+
+void MacroAssembler::TruncatingDiv(Register dividend, int32_t divisor) {
+ ASSERT(!dividend.is(eax));
+ ASSERT(!dividend.is(edx));
+ MultiplierAndShift ms(divisor);
+ mov(eax, Immediate(ms.multiplier()));
+ imul(dividend);
+ if (divisor > 0 && ms.multiplier() < 0) add(edx, dividend);
+ if (divisor < 0 && ms.multiplier() > 0) sub(edx, dividend);
+ if (ms.shift() > 0) sar(edx, ms.shift());
+ mov(eax, dividend);
+ shr(eax, 31);
+ add(edx, eax);
+}
+
+
} } // namespace v8::internal
#endif // V8_TARGET_ARCH_IA32
diff --git a/chromium/v8/src/ia32/macro-assembler-ia32.h b/chromium/v8/src/ia32/macro-assembler-ia32.h
index 054b164846d..b0b61f7a88f 100644
--- a/chromium/v8/src/ia32/macro-assembler-ia32.h
+++ b/chromium/v8/src/ia32/macro-assembler-ia32.h
@@ -1,36 +1,13 @@
// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
#ifndef V8_IA32_MACRO_ASSEMBLER_IA32_H_
#define V8_IA32_MACRO_ASSEMBLER_IA32_H_
-#include "assembler.h"
-#include "frames.h"
-#include "v8globals.h"
+#include "src/assembler.h"
+#include "src/frames.h"
+#include "src/globals.h"
namespace v8 {
namespace internal {
@@ -41,6 +18,10 @@ typedef Operand MemOperand;
enum RememberedSetAction { EMIT_REMEMBERED_SET, OMIT_REMEMBERED_SET };
enum SmiCheck { INLINE_SMI_CHECK, OMIT_SMI_CHECK };
+enum PointersToHereCheck {
+ kPointersToHereMaybeInteresting,
+ kPointersToHereAreAlwaysInteresting
+};
enum RegisterValueType {
@@ -163,7 +144,9 @@ class MacroAssembler: public Assembler {
Register scratch,
SaveFPRegsMode save_fp,
RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
- SmiCheck smi_check = INLINE_SMI_CHECK);
+ SmiCheck smi_check = INLINE_SMI_CHECK,
+ PointersToHereCheck pointers_to_here_check_for_value =
+ kPointersToHereMaybeInteresting);
// As above, but the offset has the tag presubtracted. For use with
// Operand(reg, off).
@@ -174,14 +157,17 @@ class MacroAssembler: public Assembler {
Register scratch,
SaveFPRegsMode save_fp,
RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
- SmiCheck smi_check = INLINE_SMI_CHECK) {
+ SmiCheck smi_check = INLINE_SMI_CHECK,
+ PointersToHereCheck pointers_to_here_check_for_value =
+ kPointersToHereMaybeInteresting) {
RecordWriteField(context,
offset + kHeapObjectTag,
value,
scratch,
save_fp,
remembered_set_action,
- smi_check);
+ smi_check,
+ pointers_to_here_check_for_value);
}
// Notify the garbage collector that we wrote a pointer into a fixed array.
@@ -196,7 +182,9 @@ class MacroAssembler: public Assembler {
Register index,
SaveFPRegsMode save_fp,
RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
- SmiCheck smi_check = INLINE_SMI_CHECK);
+ SmiCheck smi_check = INLINE_SMI_CHECK,
+ PointersToHereCheck pointers_to_here_check_for_value =
+ kPointersToHereMaybeInteresting);
// For page containing |object| mark region covering |address|
// dirty. |object| is the object being stored into, |value| is the
@@ -209,7 +197,9 @@ class MacroAssembler: public Assembler {
Register value,
SaveFPRegsMode save_fp,
RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
- SmiCheck smi_check = INLINE_SMI_CHECK);
+ SmiCheck smi_check = INLINE_SMI_CHECK,
+ PointersToHereCheck pointers_to_here_check_for_value =
+ kPointersToHereMaybeInteresting);
// For page containing |object| mark the region covering the object's map
// dirty. |object| is the object being stored into, |map| is the Map object
@@ -221,15 +211,14 @@ class MacroAssembler: public Assembler {
Register scratch2,
SaveFPRegsMode save_fp);
-#ifdef ENABLE_DEBUGGER_SUPPORT
// ---------------------------------------------------------------------------
// Debugger Support
void DebugBreak();
-#endif
// Generates function and stub prologue code.
- void Prologue(PrologueFrameMode frame_mode);
+ void StubPrologue();
+ void Prologue(bool code_pre_aging);
// Enter specific kind of exit frame. Expects the number of
// arguments in register eax and sets up the number of arguments in
@@ -262,14 +251,6 @@ class MacroAssembler: public Assembler {
Register scratch,
Label* no_map_match);
- // Load the initial map for new Arrays from a JSFunction.
- void LoadInitialArrayMap(Register function_in,
- Register scratch,
- Register map_out,
- bool can_have_holes);
-
- void LoadGlobalContext(Register global_context);
-
// Load the global function with the given index.
void LoadGlobalFunction(int index, Register function);
@@ -295,7 +276,7 @@ class MacroAssembler: public Assembler {
if (object->IsHeapObject()) {
LoadHeapObject(result, Handle<HeapObject>::cast(object));
} else {
- Set(result, Immediate(object));
+ Move(result, Immediate(object));
}
}
@@ -311,57 +292,39 @@ class MacroAssembler: public Assembler {
// ---------------------------------------------------------------------------
// JavaScript invokes
- // Set up call kind marking in ecx. The method takes ecx as an
- // explicit first parameter to make the code more readable at the
- // call sites.
- void SetCallKind(Register dst, CallKind kind);
-
// Invoke the JavaScript function code by either calling or jumping.
void InvokeCode(Register code,
const ParameterCount& expected,
const ParameterCount& actual,
InvokeFlag flag,
- const CallWrapper& call_wrapper,
- CallKind call_kind) {
- InvokeCode(Operand(code), expected, actual, flag, call_wrapper, call_kind);
+ const CallWrapper& call_wrapper) {
+ InvokeCode(Operand(code), expected, actual, flag, call_wrapper);
}
void InvokeCode(const Operand& code,
const ParameterCount& expected,
const ParameterCount& actual,
InvokeFlag flag,
- const CallWrapper& call_wrapper,
- CallKind call_kind);
-
- void InvokeCode(Handle<Code> code,
- const ParameterCount& expected,
- const ParameterCount& actual,
- RelocInfo::Mode rmode,
- InvokeFlag flag,
- const CallWrapper& call_wrapper,
- CallKind call_kind);
+ const CallWrapper& call_wrapper);
// Invoke the JavaScript function in the given register. Changes the
// current context to the context in the function before invoking.
void InvokeFunction(Register function,
const ParameterCount& actual,
InvokeFlag flag,
- const CallWrapper& call_wrapper,
- CallKind call_kind);
+ const CallWrapper& call_wrapper);
void InvokeFunction(Register function,
const ParameterCount& expected,
const ParameterCount& actual,
InvokeFlag flag,
- const CallWrapper& call_wrapper,
- CallKind call_kind);
+ const CallWrapper& call_wrapper);
void InvokeFunction(Handle<JSFunction> function,
const ParameterCount& expected,
const ParameterCount& actual,
InvokeFlag flag,
- const CallWrapper& call_wrapper,
- CallKind call_kind);
+ const CallWrapper& call_wrapper);
// Invoke specified builtin JavaScript function. Adds an entry to
// the unresolved list if the name does not resolve.
@@ -376,9 +339,6 @@ class MacroAssembler: public Assembler {
void GetBuiltinEntry(Register target, Builtins::JavaScript id);
// Expression support
- void Set(Register dst, const Immediate& x);
- void Set(const Operand& dst, const Immediate& x);
-
// cvtsi2sd instruction only writes to the low 64-bit of dst register, which
// hinders register renaming and makes dependence chains longer. So we use
// xorps to clear the dst register before cvtsi2sd to solve this issue.
@@ -387,7 +347,7 @@ class MacroAssembler: public Assembler {
// Support for constant splitting.
bool IsUnsafeImmediate(const Immediate& x);
- void SafeSet(Register dst, const Immediate& x);
+ void SafeMove(Register dst, const Immediate& x);
void SafePush(const Immediate& x);
// Compare object type for heap object.
@@ -424,7 +384,6 @@ class MacroAssembler: public Assembler {
Register scratch1,
XMMRegister scratch2,
Label* fail,
- bool specialize_for_processor,
int offset = 0);
// Compare an object's map with the specified map.
@@ -493,13 +452,10 @@ class MacroAssembler: public Assembler {
void TruncateHeapNumberToI(Register result_reg, Register input_reg);
void TruncateDoubleToI(Register result_reg, XMMRegister input_reg);
- void TruncateX87TOSToI(Register result_reg);
void DoubleToI(Register result_reg, XMMRegister input_reg,
XMMRegister scratch, MinusZeroMode minus_zero_mode,
Label* conversion_failed, Label::Distance dst = Label::kFar);
- void X87TOSToI(Register result_reg, MinusZeroMode minus_zero_mode,
- Label* conversion_failed, Label::Distance dst = Label::kFar);
void TaggedToI(Register result_reg, Register input_reg, XMMRegister temp,
MinusZeroMode minus_zero_mode, Label* lost_precision);
@@ -522,8 +478,7 @@ class MacroAssembler: public Assembler {
j(not_carry, is_smi);
}
- void LoadUint32(XMMRegister dst, Register src, XMMRegister scratch);
- void LoadUint32NoSSE2(Register src);
+ void LoadUint32(XMMRegister dst, Register src);
// Jump the register contains a smi.
inline void JumpIfSmi(Register value,
@@ -554,10 +509,27 @@ class MacroAssembler: public Assembler {
template<typename Field>
void DecodeField(Register reg) {
static const int shift = Field::kShift;
+ static const int mask = Field::kMask >> Field::kShift;
+ if (shift != 0) {
+ sar(reg, shift);
+ }
+ and_(reg, Immediate(mask));
+ }
+
+ template<typename Field>
+ void DecodeFieldToSmi(Register reg) {
+ static const int shift = Field::kShift;
static const int mask = (Field::kMask >> Field::kShift) << kSmiTagSize;
- sar(reg, shift);
+ STATIC_ASSERT((mask & (0x80000000u >> (kSmiTagSize - 1))) == 0);
+ STATIC_ASSERT(kSmiTag == 0);
+ if (shift < kSmiTagSize) {
+ shl(reg, kSmiTagSize - shift);
+ } else if (shift > kSmiTagSize) {
+ sar(reg, shift - kSmiTagSize);
+ }
and_(reg, Immediate(mask));
}
+
void LoadPowerOf2(XMMRegister dst, Register scratch, int power);
// Abort execution if argument is not a number, enabled via --debug-code.
@@ -575,6 +547,10 @@ class MacroAssembler: public Assembler {
// Abort execution if argument is not a name, enabled via --debug-code.
void AssertName(Register object);
+ // Abort execution if argument is not undefined or an AllocationSite, enabled
+ // via --debug-code.
+ void AssertUndefinedOrAllocationSite(Register object);
+
// ---------------------------------------------------------------------------
// Exception handling
@@ -590,12 +566,6 @@ class MacroAssembler: public Assembler {
// Throw past all JS frames to the top JS entry frame.
void ThrowUncatchable(Register value);
- // Throw a message string as an exception.
- void Throw(BailoutReason reason);
-
- // Throw a message string as an exception if a condition is not true.
- void ThrowIf(Condition cc, BailoutReason reason);
-
// ---------------------------------------------------------------------------
// Inline caching support
@@ -752,10 +722,6 @@ class MacroAssembler: public Assembler {
Label* miss,
bool miss_on_bound_function = false);
- // Generates code for reporting that an illegal operation has
- // occurred.
- void IllegalOperation(int num_arguments);
-
// Picks out an array index from the hash field.
// Register use:
// hash - holds the index's hash. Clobbered.
@@ -833,8 +799,8 @@ class MacroAssembler: public Assembler {
// from handle and propagates exceptions. Clobbers ebx, edi and
// caller-save registers. Restores context. On return removes
// stack_space * kPointerSize (GCed).
- void CallApiFunctionAndReturn(Address function_address,
- Address thunk_address,
+ void CallApiFunctionAndReturn(Register function_address,
+ ExternalReference thunk_ref,
Operand thunk_last_arg,
int stack_space,
Operand return_value_operand,
@@ -869,6 +835,13 @@ class MacroAssembler: public Assembler {
// Move if the registers are not identical.
void Move(Register target, Register source);
+ // Move a constant into a destination using the most efficient encoding.
+ void Move(Register dst, const Immediate& x);
+ void Move(const Operand& dst, const Immediate& x);
+
+ // Move an immediate into an XMM register.
+ void Move(XMMRegister dst, double val);
+
// Push a handle value.
void Push(Handle<Object> handle) { push(Immediate(handle)); }
void Push(Smi* smi) { Push(Handle<Smi>(smi, isolate())); }
@@ -878,8 +851,9 @@ class MacroAssembler: public Assembler {
return code_object_;
}
- // Insert code to verify that the x87 stack has the specified depth (0-7)
- void VerifyX87StackDepth(uint32_t depth);
+ // Emit code for a truncating division by a constant. The dividend register is
+ // unchanged, the result is in edx, and eax gets clobbered.
+ void TruncatingDiv(Register dividend, int32_t divisor);
// ---------------------------------------------------------------------------
// StatsCounter support
@@ -1010,8 +984,7 @@ class MacroAssembler: public Assembler {
bool* definitely_mismatches,
InvokeFlag flag,
Label::Distance done_distance,
- const CallWrapper& call_wrapper = NullCallWrapper(),
- CallKind call_kind = CALL_AS_METHOD);
+ const CallWrapper& call_wrapper = NullCallWrapper());
void EnterExitFramePrologue();
void EnterExitFrameEpilogue(int argc, bool save_doubles);
@@ -1027,13 +1000,6 @@ class MacroAssembler: public Assembler {
Register scratch,
AllocationFlags flags);
- // Helper for PopHandleScope. Allowed to perform a GC and returns
- // NULL if gc_allowed. Does not perform a GC if !gc_allowed, and
- // possibly returns a failure object indicating an allocation failure.
- MUST_USE_RESULT MaybeObject* PopHandleScopeHelper(Register saved,
- Register scratch,
- bool gc_allowed);
-
// Helper for implementing JumpIfNotInNewSpace and JumpIfInNewSpace.
void InNewSpace(Register object,
Register scratch,
@@ -1101,6 +1067,14 @@ inline Operand FieldOperand(Register object,
}
+inline Operand FixedArrayElementOperand(Register array,
+ Register index_as_smi,
+ int additional_offset = 0) {
+ int offset = FixedArray::kHeaderSize + additional_offset * kPointerSize;
+ return FieldOperand(array, index_as_smi, times_half_pointer_size, offset);
+}
+
+
inline Operand ContextOperand(Register context, int index) {
return Operand(context, Context::SlotOffset(index));
}
diff --git a/chromium/v8/src/ia32/regexp-macro-assembler-ia32.cc b/chromium/v8/src/ia32/regexp-macro-assembler-ia32.cc
index d371c456c1a..1945bd63034 100644
--- a/chromium/v8/src/ia32/regexp-macro-assembler-ia32.cc
+++ b/chromium/v8/src/ia32/regexp-macro-assembler-ia32.cc
@@ -1,41 +1,18 @@
// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
#if V8_TARGET_ARCH_IA32
-#include "cpu-profiler.h"
-#include "unicode.h"
-#include "log.h"
-#include "regexp-stack.h"
-#include "macro-assembler.h"
-#include "regexp-macro-assembler.h"
-#include "ia32/regexp-macro-assembler-ia32.h"
+#include "src/cpu-profiler.h"
+#include "src/unicode.h"
+#include "src/log.h"
+#include "src/regexp-stack.h"
+#include "src/macro-assembler.h"
+#include "src/regexp-macro-assembler.h"
+#include "src/ia32/regexp-macro-assembler-ia32.h"
namespace v8 {
namespace internal {
@@ -632,7 +609,7 @@ bool RegExpMacroAssemblerIA32::CheckSpecialCharacterClass(uc16 type,
void RegExpMacroAssemblerIA32::Fail() {
STATIC_ASSERT(FAILURE == 0); // Return value for failure is zero.
if (!global()) {
- __ Set(eax, Immediate(FAILURE));
+ __ Move(eax, Immediate(FAILURE));
}
__ jmp(&exit_label_);
}
@@ -1099,7 +1076,8 @@ int RegExpMacroAssemblerIA32::CheckStackGuardState(Address* return_address,
Code* re_code,
Address re_frame) {
Isolate* isolate = frame_entry<Isolate*>(re_frame, kIsolate);
- if (isolate->stack_guard()->IsStackOverflow()) {
+ StackLimitCheck check(isolate);
+ if (check.JsHasOverflowed()) {
isolate->StackOverflow();
return EXCEPTION;
}
@@ -1126,7 +1104,7 @@ int RegExpMacroAssemblerIA32::CheckStackGuardState(Address* return_address,
ASSERT(*return_address <=
re_code->instruction_start() + re_code->instruction_size());
- MaybeObject* result = Execution::HandleStackGuardInterrupt(isolate);
+ Object* result = isolate->stack_guard()->HandleInterrupts();
if (*code_handle != re_code) { // Return address no longer valid
int delta = code_handle->address() - re_code->address();
diff --git a/chromium/v8/src/ia32/regexp-macro-assembler-ia32.h b/chromium/v8/src/ia32/regexp-macro-assembler-ia32.h
index 39333360077..e04a8ef4b62 100644
--- a/chromium/v8/src/ia32/regexp-macro-assembler-ia32.h
+++ b/chromium/v8/src/ia32/regexp-macro-assembler-ia32.h
@@ -1,36 +1,13 @@
// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
#ifndef V8_IA32_REGEXP_MACRO_ASSEMBLER_IA32_H_
#define V8_IA32_REGEXP_MACRO_ASSEMBLER_IA32_H_
-#include "ia32/assembler-ia32.h"
-#include "ia32/assembler-ia32-inl.h"
-#include "macro-assembler.h"
+#include "src/ia32/assembler-ia32.h"
+#include "src/ia32/assembler-ia32-inl.h"
+#include "src/macro-assembler.h"
namespace v8 {
namespace internal {
diff --git a/chromium/v8/src/ia32/simulator-ia32.cc b/chromium/v8/src/ia32/simulator-ia32.cc
index b6f2847332e..20edae83a2a 100644
--- a/chromium/v8/src/ia32/simulator-ia32.cc
+++ b/chromium/v8/src/ia32/simulator-ia32.cc
@@ -1,29 +1,6 @@
// Copyright 2008 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
// Since there is no simulator for the ia32 architecture this file is empty.
diff --git a/chromium/v8/src/ia32/simulator-ia32.h b/chromium/v8/src/ia32/simulator-ia32.h
index 478d4ce5cb3..02a8e9c03a4 100644
--- a/chromium/v8/src/ia32/simulator-ia32.h
+++ b/chromium/v8/src/ia32/simulator-ia32.h
@@ -1,34 +1,11 @@
// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
#ifndef V8_IA32_SIMULATOR_IA32_H_
#define V8_IA32_SIMULATOR_IA32_H_
-#include "allocation.h"
+#include "src/allocation.h"
namespace v8 {
namespace internal {
@@ -48,9 +25,6 @@ typedef int (*regexp_matcher)(String*, int, const byte*,
(FUNCTION_CAST<regexp_matcher>(entry)(p0, p1, p2, p3, p4, p5, p6, p7, p8))
-#define TRY_CATCH_FROM_ADDRESS(try_catch_address) \
- (reinterpret_cast<TryCatch*>(try_catch_address))
-
// The stack limit beyond which we will throw stack overflow errors in
// generated code. Because generated code on ia32 uses the C stack, we
// just use the C stack limit.
diff --git a/chromium/v8/src/ia32/stub-cache-ia32.cc b/chromium/v8/src/ia32/stub-cache-ia32.cc
index 9efedc67325..49270196f2c 100644
--- a/chromium/v8/src/ia32/stub-cache-ia32.cc
+++ b/chromium/v8/src/ia32/stub-cache-ia32.cc
@@ -1,37 +1,14 @@
// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
#if V8_TARGET_ARCH_IA32
-#include "ic-inl.h"
-#include "codegen.h"
-#include "stub-cache.h"
+#include "src/ic-inl.h"
+#include "src/codegen.h"
+#include "src/stub-cache.h"
namespace v8 {
namespace internal {
@@ -271,15 +248,19 @@ void StubCompiler::GenerateDirectLoadGlobalFunctionPrototype(
int index,
Register prototype,
Label* miss) {
- // Check we're still in the same context.
- __ cmp(Operand(esi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)),
- masm->isolate()->global_object());
- __ j(not_equal, miss);
// Get the global function with the given index.
Handle<JSFunction> function(
JSFunction::cast(masm->isolate()->native_context()->get(index)));
+ // Check we're still in the same context.
+ Register scratch = prototype;
+ const int offset = Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX);
+ __ mov(scratch, Operand(esi, offset));
+ __ mov(scratch, FieldOperand(scratch, GlobalObject::kNativeContextOffset));
+ __ cmp(Operand(scratch, Context::SlotOffset(index)), function);
+ __ j(not_equal, miss);
+
// Load its initial map. The global functions all have initial maps.
- __ Set(prototype, Immediate(Handle<Map>(function->initial_map())));
+ __ Move(prototype, Immediate(Handle<Map>(function->initial_map())));
// Load the prototype from the initial map.
__ mov(prototype, FieldOperand(prototype, Map::kPrototypeOffset));
}
@@ -302,54 +283,6 @@ void StubCompiler::GenerateLoadArrayLength(MacroAssembler* masm,
}
-// Generate code to check if an object is a string. If the object is
-// a string, the map's instance type is left in the scratch register.
-static void GenerateStringCheck(MacroAssembler* masm,
- Register receiver,
- Register scratch,
- Label* smi,
- Label* non_string_object) {
- // Check that the object isn't a smi.
- __ JumpIfSmi(receiver, smi);
-
- // Check that the object is a string.
- __ mov(scratch, FieldOperand(receiver, HeapObject::kMapOffset));
- __ movzx_b(scratch, FieldOperand(scratch, Map::kInstanceTypeOffset));
- STATIC_ASSERT(kNotStringTag != 0);
- __ test(scratch, Immediate(kNotStringTag));
- __ j(not_zero, non_string_object);
-}
-
-
-void StubCompiler::GenerateLoadStringLength(MacroAssembler* masm,
- Register receiver,
- Register scratch1,
- Register scratch2,
- Label* miss) {
- Label check_wrapper;
-
- // Check if the object is a string leaving the instance type in the
- // scratch register.
- GenerateStringCheck(masm, receiver, scratch1, miss, &check_wrapper);
-
- // Load length from the string and convert to a smi.
- __ mov(eax, FieldOperand(receiver, String::kLengthOffset));
- __ ret(0);
-
- // Check if the object is a JSValue wrapper.
- __ bind(&check_wrapper);
- __ cmp(scratch1, JS_VALUE_TYPE);
- __ j(not_equal, miss);
-
- // Check if the wrapped value is a string and load the length
- // directly if it is.
- __ mov(scratch2, FieldOperand(receiver, JSValue::kValueOffset));
- GenerateStringCheck(masm, scratch2, scratch1, miss, miss);
- __ mov(eax, FieldOperand(scratch2, String::kLengthOffset));
- __ ret(0);
-}
-
-
void StubCompiler::GenerateLoadFunctionPrototype(MacroAssembler* masm,
Register receiver,
Register scratch1,
@@ -367,7 +300,7 @@ void StubCompiler::GenerateFastPropertyLoad(MacroAssembler* masm,
bool inobject,
int index,
Representation representation) {
- ASSERT(!FLAG_track_double_fields || !representation.IsDouble());
+ ASSERT(!representation.IsDouble());
int offset = index * kPointerSize;
if (!inobject) {
// Calculate the offset into the properties array.
@@ -414,422 +347,85 @@ static void CompileCallLoadPropertyWithInterceptor(
}
-// Number of pointers to be reserved on stack for fast API call.
-static const int kFastApiCallArguments = FunctionCallbackArguments::kArgsLength;
-
-
-// Reserves space for the extra arguments to API function in the
-// caller's frame.
-//
-// These arguments are set by CheckPrototypes and GenerateFastApiCall.
-static void ReserveSpaceForFastApiCall(MacroAssembler* masm, Register scratch) {
- // ----------- S t a t e -------------
- // -- esp[0] : return address
- // -- esp[4] : last argument in the internal frame of the caller
- // -----------------------------------
- __ pop(scratch);
- for (int i = 0; i < kFastApiCallArguments; i++) {
- __ push(Immediate(Smi::FromInt(0)));
- }
- __ push(scratch);
-}
-
-
-// Undoes the effects of ReserveSpaceForFastApiCall.
-static void FreeSpaceForFastApiCall(MacroAssembler* masm, Register scratch) {
- // ----------- S t a t e -------------
- // -- esp[0] : return address.
- // -- esp[4] : last fast api call extra argument.
- // -- ...
- // -- esp[kFastApiCallArguments * 4] : first fast api call extra argument.
- // -- esp[kFastApiCallArguments * 4 + 4] : last argument in the internal
- // frame.
- // -----------------------------------
- __ pop(scratch);
- __ add(esp, Immediate(kPointerSize * kFastApiCallArguments));
- __ push(scratch);
-}
-
-
-static void GenerateFastApiCallBody(MacroAssembler* masm,
- const CallOptimization& optimization,
- int argc,
- bool restore_context);
-
-
-// Generates call to API function.
-static void GenerateFastApiCall(MacroAssembler* masm,
- const CallOptimization& optimization,
- int argc) {
- typedef FunctionCallbackArguments FCA;
- // Save calling context.
- __ mov(Operand(esp, (1 + FCA::kContextSaveIndex) * kPointerSize), esi);
-
- // Get the function and setup the context.
- Handle<JSFunction> function = optimization.constant_function();
- __ LoadHeapObject(edi, function);
- __ mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
-
- // Construct the FunctionCallbackInfo.
- __ mov(Operand(esp, (1 + FCA::kCalleeIndex) * kPointerSize), edi);
- Handle<CallHandlerInfo> api_call_info = optimization.api_call_info();
- Handle<Object> call_data(api_call_info->data(), masm->isolate());
- if (masm->isolate()->heap()->InNewSpace(*call_data)) {
- __ mov(ecx, api_call_info);
- __ mov(ebx, FieldOperand(ecx, CallHandlerInfo::kDataOffset));
- __ mov(Operand(esp, (1 + FCA::kDataIndex) * kPointerSize), ebx);
- } else {
- __ mov(Operand(esp, (1 + FCA::kDataIndex) * kPointerSize),
- Immediate(call_data));
- }
- __ mov(Operand(esp, (1 + FCA::kIsolateIndex) * kPointerSize),
- Immediate(reinterpret_cast<int>(masm->isolate())));
- __ mov(Operand(esp, (1 + FCA::kReturnValueOffset) * kPointerSize),
- masm->isolate()->factory()->undefined_value());
- __ mov(Operand(esp, (1 + FCA::kReturnValueDefaultValueIndex) * kPointerSize),
- masm->isolate()->factory()->undefined_value());
-
- // Prepare arguments.
- STATIC_ASSERT(kFastApiCallArguments == 7);
- __ lea(eax, Operand(esp, 1 * kPointerSize));
-
- GenerateFastApiCallBody(masm, optimization, argc, false);
-}
-
-
// Generate call to api function.
// This function uses push() to generate smaller, faster code than
// the version above. It is an optimization that should will be removed
// when api call ICs are generated in hydrogen.
-static void GenerateFastApiCall(MacroAssembler* masm,
- const CallOptimization& optimization,
- Register receiver,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- int argc,
- Register* values) {
- ASSERT(optimization.is_simple_api_call());
-
+void StubCompiler::GenerateFastApiCall(MacroAssembler* masm,
+ const CallOptimization& optimization,
+ Handle<Map> receiver_map,
+ Register receiver,
+ Register scratch_in,
+ bool is_store,
+ int argc,
+ Register* values) {
// Copy return value.
- __ pop(scratch1);
-
+ __ pop(scratch_in);
// receiver
__ push(receiver);
-
// Write the arguments to stack frame.
for (int i = 0; i < argc; i++) {
Register arg = values[argc-1-i];
ASSERT(!receiver.is(arg));
- ASSERT(!scratch1.is(arg));
- ASSERT(!scratch2.is(arg));
- ASSERT(!scratch3.is(arg));
+ ASSERT(!scratch_in.is(arg));
__ push(arg);
}
+ __ push(scratch_in);
+ // Stack now matches JSFunction abi.
+ ASSERT(optimization.is_simple_api_call());
- typedef FunctionCallbackArguments FCA;
-
- STATIC_ASSERT(FCA::kHolderIndex == 0);
- STATIC_ASSERT(FCA::kIsolateIndex == 1);
- STATIC_ASSERT(FCA::kReturnValueDefaultValueIndex == 2);
- STATIC_ASSERT(FCA::kReturnValueOffset == 3);
- STATIC_ASSERT(FCA::kDataIndex == 4);
- STATIC_ASSERT(FCA::kCalleeIndex == 5);
- STATIC_ASSERT(FCA::kContextSaveIndex == 6);
- STATIC_ASSERT(FCA::kArgsLength == 7);
-
- // context save
- __ push(esi);
-
- // Get the function and setup the context.
- Handle<JSFunction> function = optimization.constant_function();
- __ LoadHeapObject(scratch2, function);
- __ mov(esi, FieldOperand(scratch2, JSFunction::kContextOffset));
- // callee
- __ push(scratch2);
+ // Abi for CallApiFunctionStub.
+ Register callee = eax;
+ Register call_data = ebx;
+ Register holder = ecx;
+ Register api_function_address = edx;
+ Register scratch = edi; // scratch_in is no longer valid.
+
+ // Put holder in place.
+ CallOptimization::HolderLookup holder_lookup;
+ Handle<JSObject> api_holder = optimization.LookupHolderOfExpectedType(
+ receiver_map,
+ &holder_lookup);
+ switch (holder_lookup) {
+ case CallOptimization::kHolderIsReceiver:
+ __ Move(holder, receiver);
+ break;
+ case CallOptimization::kHolderFound:
+ __ LoadHeapObject(holder, api_holder);
+ break;
+ case CallOptimization::kHolderNotFound:
+ UNREACHABLE();
+ break;
+ }
Isolate* isolate = masm->isolate();
+ Handle<JSFunction> function = optimization.constant_function();
Handle<CallHandlerInfo> api_call_info = optimization.api_call_info();
- Handle<Object> call_data(api_call_info->data(), isolate);
- // Push data from ExecutableAccessorInfo.
- if (isolate->heap()->InNewSpace(*call_data)) {
- __ mov(scratch2, api_call_info);
- __ mov(scratch3, FieldOperand(scratch2, CallHandlerInfo::kDataOffset));
- __ push(scratch3);
+ Handle<Object> call_data_obj(api_call_info->data(), isolate);
+
+ // Put callee in place.
+ __ LoadHeapObject(callee, function);
+
+ bool call_data_undefined = false;
+ // Put call_data in place.
+ if (isolate->heap()->InNewSpace(*call_data_obj)) {
+ __ mov(scratch, api_call_info);
+ __ mov(call_data, FieldOperand(scratch, CallHandlerInfo::kDataOffset));
+ } else if (call_data_obj->IsUndefined()) {
+ call_data_undefined = true;
+ __ mov(call_data, Immediate(isolate->factory()->undefined_value()));
} else {
- __ push(Immediate(call_data));
+ __ mov(call_data, call_data_obj);
}
- // return value
- __ push(Immediate(isolate->factory()->undefined_value()));
- // return value default
- __ push(Immediate(isolate->factory()->undefined_value()));
- // isolate
- __ push(Immediate(reinterpret_cast<int>(isolate)));
- // holder
- __ push(receiver);
-
- // store receiver address for GenerateFastApiCallBody
- ASSERT(!scratch1.is(eax));
- __ mov(eax, esp);
-
- // return address
- __ push(scratch1);
-
- GenerateFastApiCallBody(masm, optimization, argc, true);
-}
-
-static void GenerateFastApiCallBody(MacroAssembler* masm,
- const CallOptimization& optimization,
- int argc,
- bool restore_context) {
- // ----------- S t a t e -------------
- // -- esp[0] : return address
- // -- esp[4] - esp[28] : FunctionCallbackInfo, incl.
- // : object passing the type check
- // (set by CheckPrototypes)
- // -- esp[32] : last argument
- // -- ...
- // -- esp[(argc + 7) * 4] : first argument
- // -- esp[(argc + 8) * 4] : receiver
- //
- // -- eax : receiver address
- // -----------------------------------
- typedef FunctionCallbackArguments FCA;
-
- // API function gets reference to the v8::Arguments. If CPU profiler
- // is enabled wrapper function will be called and we need to pass
- // address of the callback as additional parameter, always allocate
- // space for it.
- const int kApiArgc = 1 + 1;
-
- // Allocate the v8::Arguments structure in the arguments' space since
- // it's not controlled by GC.
- const int kApiStackSpace = 4;
-
- Handle<CallHandlerInfo> api_call_info = optimization.api_call_info();
-
- // Function address is a foreign pointer outside V8's heap.
+ // Put api_function_address in place.
Address function_address = v8::ToCData<Address>(api_call_info->callback());
- __ PrepareCallApiFunction(kApiArgc + kApiStackSpace);
-
- // FunctionCallbackInfo::implicit_args_.
- __ mov(ApiParameterOperand(2), eax);
- __ add(eax, Immediate((argc + kFastApiCallArguments - 1) * kPointerSize));
- // FunctionCallbackInfo::values_.
- __ mov(ApiParameterOperand(3), eax);
- // FunctionCallbackInfo::length_.
- __ Set(ApiParameterOperand(4), Immediate(argc));
- // FunctionCallbackInfo::is_construct_call_.
- __ Set(ApiParameterOperand(5), Immediate(0));
-
- // v8::InvocationCallback's argument.
- __ lea(eax, ApiParameterOperand(2));
- __ mov(ApiParameterOperand(0), eax);
-
- Address thunk_address = FUNCTION_ADDR(&InvokeFunctionCallback);
-
- Operand context_restore_operand(ebp,
- (2 + FCA::kContextSaveIndex) * kPointerSize);
- Operand return_value_operand(ebp,
- (2 + FCA::kReturnValueOffset) * kPointerSize);
- __ CallApiFunctionAndReturn(function_address,
- thunk_address,
- ApiParameterOperand(1),
- argc + kFastApiCallArguments + 1,
- return_value_operand,
- restore_context ?
- &context_restore_operand : NULL);
-}
-
-
-class CallInterceptorCompiler BASE_EMBEDDED {
- public:
- CallInterceptorCompiler(CallStubCompiler* stub_compiler,
- const ParameterCount& arguments,
- Register name,
- ExtraICState extra_state)
- : stub_compiler_(stub_compiler),
- arguments_(arguments),
- name_(name) {}
-
- void Compile(MacroAssembler* masm,
- Handle<JSObject> object,
- Handle<JSObject> holder,
- Handle<Name> name,
- LookupResult* lookup,
- Register receiver,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Label* miss) {
- ASSERT(holder->HasNamedInterceptor());
- ASSERT(!holder->GetNamedInterceptor()->getter()->IsUndefined());
-
- // Check that the receiver isn't a smi.
- __ JumpIfSmi(receiver, miss);
-
- CallOptimization optimization(lookup);
- if (optimization.is_constant_call()) {
- CompileCacheable(masm, object, receiver, scratch1, scratch2, scratch3,
- holder, lookup, name, optimization, miss);
- } else {
- CompileRegular(masm, object, receiver, scratch1, scratch2, scratch3,
- name, holder, miss);
- }
- }
-
- private:
- void CompileCacheable(MacroAssembler* masm,
- Handle<JSObject> object,
- Register receiver,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Handle<JSObject> interceptor_holder,
- LookupResult* lookup,
- Handle<Name> name,
- const CallOptimization& optimization,
- Label* miss_label) {
- ASSERT(optimization.is_constant_call());
- ASSERT(!lookup->holder()->IsGlobalObject());
-
- int depth1 = kInvalidProtoDepth;
- int depth2 = kInvalidProtoDepth;
- bool can_do_fast_api_call = false;
- if (optimization.is_simple_api_call() &&
- !lookup->holder()->IsGlobalObject()) {
- depth1 = optimization.GetPrototypeDepthOfExpectedType(
- object, interceptor_holder);
- if (depth1 == kInvalidProtoDepth) {
- depth2 = optimization.GetPrototypeDepthOfExpectedType(
- interceptor_holder, Handle<JSObject>(lookup->holder()));
- }
- can_do_fast_api_call =
- depth1 != kInvalidProtoDepth || depth2 != kInvalidProtoDepth;
- }
-
- Counters* counters = masm->isolate()->counters();
- __ IncrementCounter(counters->call_const_interceptor(), 1);
-
- if (can_do_fast_api_call) {
- __ IncrementCounter(counters->call_const_interceptor_fast_api(), 1);
- ReserveSpaceForFastApiCall(masm, scratch1);
- }
-
- // Check that the maps from receiver to interceptor's holder
- // haven't changed and thus we can invoke interceptor.
- Label miss_cleanup;
- Label* miss = can_do_fast_api_call ? &miss_cleanup : miss_label;
- Register holder =
- stub_compiler_->CheckPrototypes(
- IC::CurrentTypeOf(object, masm->isolate()), receiver,
- interceptor_holder, scratch1, scratch2, scratch3,
- name, depth1, miss);
-
- // Invoke an interceptor and if it provides a value,
- // branch to |regular_invoke|.
- Label regular_invoke;
- LoadWithInterceptor(masm, receiver, holder, interceptor_holder,
- &regular_invoke);
-
- // Interceptor returned nothing for this property. Try to use cached
- // constant function.
-
- // Check that the maps from interceptor's holder to constant function's
- // holder haven't changed and thus we can use cached constant function.
- if (*interceptor_holder != lookup->holder()) {
- stub_compiler_->CheckPrototypes(
- IC::CurrentTypeOf(interceptor_holder, masm->isolate()), holder,
- handle(lookup->holder()), scratch1, scratch2, scratch3,
- name, depth2, miss);
- } else {
- // CheckPrototypes has a side effect of fetching a 'holder'
- // for API (object which is instanceof for the signature). It's
- // safe to omit it here, as if present, it should be fetched
- // by the previous CheckPrototypes.
- ASSERT(depth2 == kInvalidProtoDepth);
- }
-
- // Invoke function.
- if (can_do_fast_api_call) {
- GenerateFastApiCall(masm, optimization, arguments_.immediate());
- } else {
- Handle<JSFunction> fun = optimization.constant_function();
- stub_compiler_->GenerateJumpFunction(object, fun);
- }
-
- // Deferred code for fast API call case---clean preallocated space.
- if (can_do_fast_api_call) {
- __ bind(&miss_cleanup);
- FreeSpaceForFastApiCall(masm, scratch1);
- __ jmp(miss_label);
- }
-
- // Invoke a regular function.
- __ bind(&regular_invoke);
- if (can_do_fast_api_call) {
- FreeSpaceForFastApiCall(masm, scratch1);
- }
- }
-
- void CompileRegular(MacroAssembler* masm,
- Handle<JSObject> object,
- Register receiver,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Handle<Name> name,
- Handle<JSObject> interceptor_holder,
- Label* miss_label) {
- Register holder =
- stub_compiler_->CheckPrototypes(
- IC::CurrentTypeOf(object, masm->isolate()), receiver,
- interceptor_holder, scratch1, scratch2, scratch3, name, miss_label);
-
- FrameScope scope(masm, StackFrame::INTERNAL);
- // Save the name_ register across the call.
- __ push(name_);
-
- CompileCallLoadPropertyWithInterceptor(
- masm, receiver, holder, name_, interceptor_holder,
- IC::kLoadPropertyWithInterceptorForCall);
-
- // Restore the name_ register.
- __ pop(name_);
-
- // Leave the internal frame.
- }
-
- void LoadWithInterceptor(MacroAssembler* masm,
- Register receiver,
- Register holder,
- Handle<JSObject> holder_obj,
- Label* interceptor_succeeded) {
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ push(receiver);
- __ push(holder);
- __ push(name_);
-
- CompileCallLoadPropertyWithInterceptor(
- masm, receiver, holder, name_, holder_obj,
- IC::kLoadPropertyWithInterceptorOnly);
+ __ mov(api_function_address, Immediate(function_address));
- __ pop(name_);
- __ pop(holder);
- __ pop(receiver);
- // Leave the internal frame.
- }
-
- __ cmp(eax, masm->isolate()->factory()->no_interceptor_result_sentinel());
- __ j(not_equal, interceptor_succeeded);
- }
-
- CallStubCompiler* stub_compiler_;
- const ParameterCount& arguments_;
- Register name_;
-};
+ // Jump to stub.
+ CallApiFunctionStub stub(isolate, is_store, call_data_undefined, argc);
+ __ TailCallStub(&stub);
+}
void StoreStubCompiler::GenerateRestoreName(MacroAssembler* masm,
@@ -854,7 +450,7 @@ void StubCompiler::GenerateCheckPropertyCell(MacroAssembler* masm,
JSGlobalObject::EnsurePropertyCell(global, name);
ASSERT(cell->value()->IsTheHole());
Handle<Oddball> the_hole = masm->isolate()->factory()->the_hole_value();
- if (Serializer::enabled()) {
+ if (masm->serializer_enabled()) {
__ mov(scratch, Immediate(cell));
__ cmp(FieldOperand(scratch, PropertyCell::kValueOffset),
Immediate(the_hole));
@@ -906,44 +502,42 @@ void StoreStubCompiler::GenerateStoreTransition(MacroAssembler* masm,
Handle<Object> constant(descriptors->GetValue(descriptor), masm->isolate());
__ CmpObject(value_reg, constant);
__ j(not_equal, miss_label);
- } else if (FLAG_track_fields && representation.IsSmi()) {
+ } else if (representation.IsSmi()) {
__ JumpIfNotSmi(value_reg, miss_label);
- } else if (FLAG_track_heap_object_fields && representation.IsHeapObject()) {
+ } else if (representation.IsHeapObject()) {
__ JumpIfSmi(value_reg, miss_label);
- } else if (FLAG_track_double_fields && representation.IsDouble()) {
+ HeapType* field_type = descriptors->GetFieldType(descriptor);
+ HeapType::Iterator<Map> it = field_type->Classes();
+ if (!it.Done()) {
+ Label do_store;
+ while (true) {
+ __ CompareMap(value_reg, it.Current());
+ it.Advance();
+ if (it.Done()) {
+ __ j(not_equal, miss_label);
+ break;
+ }
+ __ j(equal, &do_store, Label::kNear);
+ }
+ __ bind(&do_store);
+ }
+ } else if (representation.IsDouble()) {
Label do_store, heap_number;
__ AllocateHeapNumber(storage_reg, scratch1, scratch2, slow);
__ JumpIfNotSmi(value_reg, &heap_number);
__ SmiUntag(value_reg);
- if (CpuFeatures::IsSupported(SSE2)) {
- CpuFeatureScope use_sse2(masm, SSE2);
- __ Cvtsi2sd(xmm0, value_reg);
- } else {
- __ push(value_reg);
- __ fild_s(Operand(esp, 0));
- __ pop(value_reg);
- }
+ __ Cvtsi2sd(xmm0, value_reg);
__ SmiTag(value_reg);
__ jmp(&do_store);
__ bind(&heap_number);
__ CheckMap(value_reg, masm->isolate()->factory()->heap_number_map(),
miss_label, DONT_DO_SMI_CHECK);
- if (CpuFeatures::IsSupported(SSE2)) {
- CpuFeatureScope use_sse2(masm, SSE2);
- __ movsd(xmm0, FieldOperand(value_reg, HeapNumber::kValueOffset));
- } else {
- __ fld_d(FieldOperand(value_reg, HeapNumber::kValueOffset));
- }
+ __ movsd(xmm0, FieldOperand(value_reg, HeapNumber::kValueOffset));
__ bind(&do_store);
- if (CpuFeatures::IsSupported(SSE2)) {
- CpuFeatureScope use_sse2(masm, SSE2);
- __ movsd(FieldOperand(storage_reg, HeapNumber::kValueOffset), xmm0);
- } else {
- __ fstp_d(FieldOperand(storage_reg, HeapNumber::kValueOffset));
- }
+ __ movsd(FieldOperand(storage_reg, HeapNumber::kValueOffset), xmm0);
}
// Stub never generated for non-global objects that require access
@@ -1001,15 +595,15 @@ void StoreStubCompiler::GenerateStoreTransition(MacroAssembler* masm,
if (index < 0) {
// Set the property straight into the object.
int offset = object->map()->instance_size() + (index * kPointerSize);
- if (FLAG_track_double_fields && representation.IsDouble()) {
+ if (representation.IsDouble()) {
__ mov(FieldOperand(receiver_reg, offset), storage_reg);
} else {
__ mov(FieldOperand(receiver_reg, offset), value_reg);
}
- if (!FLAG_track_fields || !representation.IsSmi()) {
+ if (!representation.IsSmi()) {
// Update the write barrier for the array address.
- if (!FLAG_track_double_fields || !representation.IsDouble()) {
+ if (!representation.IsDouble()) {
__ mov(storage_reg, value_reg);
}
__ RecordWriteField(receiver_reg,
@@ -1025,15 +619,15 @@ void StoreStubCompiler::GenerateStoreTransition(MacroAssembler* masm,
int offset = index * kPointerSize + FixedArray::kHeaderSize;
// Get the properties array (optimistically).
__ mov(scratch1, FieldOperand(receiver_reg, JSObject::kPropertiesOffset));
- if (FLAG_track_double_fields && representation.IsDouble()) {
+ if (representation.IsDouble()) {
__ mov(FieldOperand(scratch1, offset), storage_reg);
} else {
__ mov(FieldOperand(scratch1, offset), value_reg);
}
- if (!FLAG_track_fields || !representation.IsSmi()) {
+ if (!representation.IsSmi()) {
// Update the write barrier for the array address.
- if (!FLAG_track_double_fields || !representation.IsDouble()) {
+ if (!representation.IsDouble()) {
__ mov(storage_reg, value_reg);
}
__ RecordWriteField(scratch1,
@@ -1067,81 +661,71 @@ void StoreStubCompiler::GenerateStoreField(MacroAssembler* masm,
// checks.
ASSERT(object->IsJSGlobalProxy() || !object->IsAccessCheckNeeded());
- int index = lookup->GetFieldIndex().field_index();
-
- // Adjust for the number of properties stored in the object. Even in the
- // face of a transition we can use the old map here because the size of the
- // object and the number of in-object properties is not going to change.
- index -= object->map()->inobject_properties();
+ FieldIndex index = lookup->GetFieldIndex();
Representation representation = lookup->representation();
ASSERT(!representation.IsNone());
- if (FLAG_track_fields && representation.IsSmi()) {
+ if (representation.IsSmi()) {
__ JumpIfNotSmi(value_reg, miss_label);
- } else if (FLAG_track_heap_object_fields && representation.IsHeapObject()) {
+ } else if (representation.IsHeapObject()) {
__ JumpIfSmi(value_reg, miss_label);
- } else if (FLAG_track_double_fields && representation.IsDouble()) {
+ HeapType* field_type = lookup->GetFieldType();
+ HeapType::Iterator<Map> it = field_type->Classes();
+ if (!it.Done()) {
+ Label do_store;
+ while (true) {
+ __ CompareMap(value_reg, it.Current());
+ it.Advance();
+ if (it.Done()) {
+ __ j(not_equal, miss_label);
+ break;
+ }
+ __ j(equal, &do_store, Label::kNear);
+ }
+ __ bind(&do_store);
+ }
+ } else if (representation.IsDouble()) {
// Load the double storage.
- if (index < 0) {
- int offset = object->map()->instance_size() + (index * kPointerSize);
- __ mov(scratch1, FieldOperand(receiver_reg, offset));
+ if (index.is_inobject()) {
+ __ mov(scratch1, FieldOperand(receiver_reg, index.offset()));
} else {
__ mov(scratch1, FieldOperand(receiver_reg, JSObject::kPropertiesOffset));
- int offset = index * kPointerSize + FixedArray::kHeaderSize;
- __ mov(scratch1, FieldOperand(scratch1, offset));
+ __ mov(scratch1, FieldOperand(scratch1, index.offset()));
}
// Store the value into the storage.
Label do_store, heap_number;
__ JumpIfNotSmi(value_reg, &heap_number);
__ SmiUntag(value_reg);
- if (CpuFeatures::IsSupported(SSE2)) {
- CpuFeatureScope use_sse2(masm, SSE2);
- __ Cvtsi2sd(xmm0, value_reg);
- } else {
- __ push(value_reg);
- __ fild_s(Operand(esp, 0));
- __ pop(value_reg);
- }
+ __ Cvtsi2sd(xmm0, value_reg);
__ SmiTag(value_reg);
__ jmp(&do_store);
__ bind(&heap_number);
__ CheckMap(value_reg, masm->isolate()->factory()->heap_number_map(),
miss_label, DONT_DO_SMI_CHECK);
- if (CpuFeatures::IsSupported(SSE2)) {
- CpuFeatureScope use_sse2(masm, SSE2);
- __ movsd(xmm0, FieldOperand(value_reg, HeapNumber::kValueOffset));
- } else {
- __ fld_d(FieldOperand(value_reg, HeapNumber::kValueOffset));
- }
+ __ movsd(xmm0, FieldOperand(value_reg, HeapNumber::kValueOffset));
__ bind(&do_store);
- if (CpuFeatures::IsSupported(SSE2)) {
- CpuFeatureScope use_sse2(masm, SSE2);
- __ movsd(FieldOperand(scratch1, HeapNumber::kValueOffset), xmm0);
- } else {
- __ fstp_d(FieldOperand(scratch1, HeapNumber::kValueOffset));
- }
+ __ movsd(FieldOperand(scratch1, HeapNumber::kValueOffset), xmm0);
// Return the value (register eax).
ASSERT(value_reg.is(eax));
__ ret(0);
return;
}
- ASSERT(!FLAG_track_double_fields || !representation.IsDouble());
+ ASSERT(!representation.IsDouble());
// TODO(verwaest): Share this code as a code stub.
SmiCheck smi_check = representation.IsTagged()
? INLINE_SMI_CHECK : OMIT_SMI_CHECK;
- if (index < 0) {
+ if (index.is_inobject()) {
// Set the property straight into the object.
- int offset = object->map()->instance_size() + (index * kPointerSize);
- __ mov(FieldOperand(receiver_reg, offset), value_reg);
+ __ mov(FieldOperand(receiver_reg, index.offset()), value_reg);
- if (!FLAG_track_fields || !representation.IsSmi()) {
+ if (!representation.IsSmi()) {
// Update the write barrier for the array address.
// Pass the value being stored in the now unused name_reg.
__ mov(name_reg, value_reg);
__ RecordWriteField(receiver_reg,
- offset,
+ index.offset(),
name_reg,
scratch1,
kDontSaveFPRegs,
@@ -1150,17 +734,16 @@ void StoreStubCompiler::GenerateStoreField(MacroAssembler* masm,
}
} else {
// Write to the properties array.
- int offset = index * kPointerSize + FixedArray::kHeaderSize;
// Get the properties array (optimistically).
__ mov(scratch1, FieldOperand(receiver_reg, JSObject::kPropertiesOffset));
- __ mov(FieldOperand(scratch1, offset), value_reg);
+ __ mov(FieldOperand(scratch1, index.offset()), value_reg);
- if (!FLAG_track_fields || !representation.IsSmi()) {
+ if (!representation.IsSmi()) {
// Update the write barrier for the array address.
// Pass the value being stored in the now unused name_reg.
__ mov(name_reg, value_reg);
__ RecordWriteField(scratch1,
- offset,
+ index.offset(),
name_reg,
receiver_reg,
kDontSaveFPRegs,
@@ -1184,20 +767,16 @@ void StubCompiler::GenerateTailCall(MacroAssembler* masm, Handle<Code> code) {
#define __ ACCESS_MASM(masm())
-Register StubCompiler::CheckPrototypes(Handle<Type> type,
+Register StubCompiler::CheckPrototypes(Handle<HeapType> type,
Register object_reg,
Handle<JSObject> holder,
Register holder_reg,
Register scratch1,
Register scratch2,
Handle<Name> name,
- int save_at_depth,
Label* miss,
PrototypeCheckType check) {
Handle<Map> receiver_map(IC::TypeToMap(*type, isolate()));
- // Make sure that the type feedback oracle harvests the receiver map.
- // TODO(svenpanne) Remove this hack when all ICs are reworked.
- __ mov(scratch1, receiver_map);
// Make sure there's no overlap between holder and object registers.
ASSERT(!scratch1.is(object_reg) && !scratch1.is(holder_reg));
@@ -1208,13 +787,9 @@ Register StubCompiler::CheckPrototypes(Handle<Type> type,
Register reg = object_reg;
int depth = 0;
- const int kHolderIndex = FunctionCallbackArguments::kHolderIndex + 1;
- if (save_at_depth == depth) {
- __ mov(Operand(esp, kHolderIndex * kPointerSize), reg);
- }
-
Handle<JSObject> current = Handle<JSObject>::null();
- if (type->IsConstant()) current = Handle<JSObject>::cast(type->AsConstant());
+ if (type->IsConstant()) current =
+ Handle<JSObject>::cast(type->AsConstant()->Value());
Handle<JSObject> prototype = Handle<JSObject>::null();
Handle<Map> current_map = receiver_map;
Handle<Map> holder_map(holder->map());
@@ -1237,7 +812,7 @@ Register StubCompiler::CheckPrototypes(Handle<Type> type,
name = factory()->InternalizeString(Handle<String>::cast(name));
}
ASSERT(current.is_null() ||
- current->property_dictionary()->FindEntry(*name) ==
+ current->property_dictionary()->FindEntry(name) ==
NameDictionary::kNotFound);
GenerateDictionaryNegativeLookup(masm(), miss, reg, name,
@@ -1280,10 +855,6 @@ Register StubCompiler::CheckPrototypes(Handle<Type> type,
}
}
- if (save_at_depth == depth) {
- __ mov(Operand(esp, kHolderIndex * kPointerSize), reg);
- }
-
// Go to the next object in the prototype chain.
current = prototype;
current_map = handle(current->map());
@@ -1332,7 +903,7 @@ void StoreStubCompiler::HandlerFrontendFooter(Handle<Name> name, Label* miss) {
Register LoadStubCompiler::CallbackHandlerFrontend(
- Handle<Type> type,
+ Handle<HeapType> type,
Register object_reg,
Handle<JSObject> holder,
Handle<Name> name,
@@ -1392,32 +963,20 @@ Register LoadStubCompiler::CallbackHandlerFrontend(
void LoadStubCompiler::GenerateLoadField(Register reg,
Handle<JSObject> holder,
- PropertyIndex field,
+ FieldIndex field,
Representation representation) {
if (!reg.is(receiver())) __ mov(receiver(), reg);
if (kind() == Code::LOAD_IC) {
- LoadFieldStub stub(field.is_inobject(holder),
- field.translate(holder),
- representation);
- GenerateTailCall(masm(), stub.GetCode(isolate()));
+ LoadFieldStub stub(isolate(), field);
+ GenerateTailCall(masm(), stub.GetCode());
} else {
- KeyedLoadFieldStub stub(field.is_inobject(holder),
- field.translate(holder),
- representation);
- GenerateTailCall(masm(), stub.GetCode(isolate()));
+ KeyedLoadFieldStub stub(isolate(), field);
+ GenerateTailCall(masm(), stub.GetCode());
}
}
void LoadStubCompiler::GenerateLoadCallback(
- const CallOptimization& call_optimization) {
- GenerateFastApiCall(
- masm(), call_optimization, receiver(), scratch1(),
- scratch2(), name(), 0, NULL);
-}
-
-
-void LoadStubCompiler::GenerateLoadCallback(
Register reg,
Handle<ExecutableAccessorInfo> callback) {
// Insert additional parameters into the stack frame above return address.
@@ -1450,36 +1009,16 @@ void LoadStubCompiler::GenerateLoadCallback(
__ push(esp);
__ push(name()); // name
- __ mov(ebx, esp); // esp points to reference to name (handler).
__ push(scratch3()); // Restore return address.
- // array for v8::Arguments::values_, handler for name and pointer
- // to the values (it considered as smi in GC).
- const int kStackSpace = PropertyCallbackArguments::kArgsLength + 2;
- // Allocate space for opional callback address parameter in case
- // CPU profiler is active.
- const int kApiArgc = 2 + 1;
-
- Address getter_address = v8::ToCData<Address>(callback->getter());
- __ PrepareCallApiFunction(kApiArgc);
- __ mov(ApiParameterOperand(0), ebx); // name.
- __ add(ebx, Immediate(kPointerSize));
- __ mov(ApiParameterOperand(1), ebx); // arguments pointer.
-
- // Emitting a stub call may try to allocate (if the code is not
- // already generated). Do not allow the assembler to perform a
- // garbage collection but instead return the allocation failure
- // object.
-
- Address thunk_address = FUNCTION_ADDR(&InvokeAccessorGetterCallback);
-
- __ CallApiFunctionAndReturn(getter_address,
- thunk_address,
- ApiParameterOperand(2),
- kStackSpace,
- Operand(ebp, 7 * kPointerSize),
- NULL);
+ // Abi for CallApiGetter
+ Register getter_address = edx;
+ Address function_address = v8::ToCData<Address>(callback->getter());
+ __ mov(getter_address, Immediate(function_address));
+
+ CallApiGetterStub stub(isolate());
+ __ TailCallStub(&stub);
}
@@ -1582,1069 +1121,24 @@ void LoadStubCompiler::GenerateLoadInterceptor(
__ push(scratch2()); // restore old return address
ExternalReference ref =
- ExternalReference(IC_Utility(IC::kLoadPropertyWithInterceptorForLoad),
+ ExternalReference(IC_Utility(IC::kLoadPropertyWithInterceptor),
isolate());
__ TailCallExternalReference(ref, StubCache::kInterceptorArgsLength, 1);
}
}
-void CallStubCompiler::GenerateNameCheck(Handle<Name> name, Label* miss) {
- if (kind_ == Code::KEYED_CALL_IC) {
- __ cmp(ecx, Immediate(name));
- __ j(not_equal, miss);
- }
-}
-
-
-void CallStubCompiler::GenerateFunctionCheck(Register function,
- Register scratch,
- Label* miss) {
- __ JumpIfSmi(function, miss);
- __ CmpObjectType(function, JS_FUNCTION_TYPE, scratch);
- __ j(not_equal, miss);
-}
-
-
-void CallStubCompiler::GenerateLoadFunctionFromCell(
- Handle<Cell> cell,
- Handle<JSFunction> function,
- Label* miss) {
- // Get the value from the cell.
- if (Serializer::enabled()) {
- __ mov(edi, Immediate(cell));
- __ mov(edi, FieldOperand(edi, Cell::kValueOffset));
- } else {
- __ mov(edi, Operand::ForCell(cell));
- }
-
- // Check that the cell contains the same function.
- if (isolate()->heap()->InNewSpace(*function)) {
- // We can't embed a pointer to a function in new space so we have
- // to verify that the shared function info is unchanged. This has
- // the nice side effect that multiple closures based on the same
- // function can all use this call IC. Before we load through the
- // function, we have to verify that it still is a function.
- GenerateFunctionCheck(edi, ebx, miss);
-
- // Check the shared function info. Make sure it hasn't changed.
- __ cmp(FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset),
- Immediate(Handle<SharedFunctionInfo>(function->shared())));
- } else {
- __ cmp(edi, Immediate(function));
- }
- __ j(not_equal, miss);
-}
-
-
-void CallStubCompiler::GenerateMissBranch() {
- Handle<Code> code =
- isolate()->stub_cache()->ComputeCallMiss(arguments().immediate(),
- kind_,
- extra_state());
- __ jmp(code, RelocInfo::CODE_TARGET);
-}
-
-
-Handle<Code> CallStubCompiler::CompileCallField(Handle<JSObject> object,
- Handle<JSObject> holder,
- PropertyIndex index,
- Handle<Name> name) {
- Label miss;
-
- Register reg = HandlerFrontendHeader(
- object, holder, name, RECEIVER_MAP_CHECK, &miss);
-
- GenerateFastPropertyLoad(
- masm(), edi, reg, index.is_inobject(holder),
- index.translate(holder), Representation::Tagged());
- GenerateJumpFunction(object, edi, &miss);
-
- HandlerFrontendFooter(&miss);
-
- // Return the generated code.
- return GetCode(Code::FAST, name);
-}
-
-
-Handle<Code> CallStubCompiler::CompileArrayCodeCall(
- Handle<Object> object,
- Handle<JSObject> holder,
- Handle<Cell> cell,
- Handle<JSFunction> function,
- Handle<String> name,
- Code::StubType type) {
- Label miss;
-
- HandlerFrontendHeader(object, holder, name, RECEIVER_MAP_CHECK, &miss);
- if (!cell.is_null()) {
- ASSERT(cell->value() == *function);
- GenerateLoadFunctionFromCell(cell, function, &miss);
- }
-
- Handle<AllocationSite> site = isolate()->factory()->NewAllocationSite();
- site->SetElementsKind(GetInitialFastElementsKind());
- Handle<Cell> site_feedback_cell = isolate()->factory()->NewCell(site);
- const int argc = arguments().immediate();
- __ mov(eax, Immediate(argc));
- __ mov(ebx, site_feedback_cell);
- __ mov(edi, function);
-
- ArrayConstructorStub stub(isolate());
- __ TailCallStub(&stub);
-
- HandlerFrontendFooter(&miss);
-
- // Return the generated code.
- return GetCode(type, name);
-}
-
-
-Handle<Code> CallStubCompiler::CompileArrayPushCall(
- Handle<Object> object,
- Handle<JSObject> holder,
- Handle<Cell> cell,
- Handle<JSFunction> function,
- Handle<String> name,
- Code::StubType type) {
- // If object is not an array or is observed or sealed, bail out to regular
- // call.
- if (!object->IsJSArray() ||
- !cell.is_null() ||
- Handle<JSArray>::cast(object)->map()->is_observed() ||
- !Handle<JSArray>::cast(object)->map()->is_extensible()) {
- return Handle<Code>::null();
- }
-
- Label miss;
-
- HandlerFrontendHeader(object, holder, name, RECEIVER_MAP_CHECK, &miss);
-
- const int argc = arguments().immediate();
- if (argc == 0) {
- // Noop, return the length.
- __ mov(eax, FieldOperand(edx, JSArray::kLengthOffset));
- __ ret((argc + 1) * kPointerSize);
- } else {
- Label call_builtin;
-
- if (argc == 1) { // Otherwise fall through to call builtin.
- Label attempt_to_grow_elements, with_write_barrier, check_double;
-
- // Get the elements array of the object.
- __ mov(edi, FieldOperand(edx, JSArray::kElementsOffset));
-
- // Check that the elements are in fast mode and writable.
- __ cmp(FieldOperand(edi, HeapObject::kMapOffset),
- Immediate(factory()->fixed_array_map()));
- __ j(not_equal, &check_double);
-
- // Get the array's length into eax and calculate new length.
- __ mov(eax, FieldOperand(edx, JSArray::kLengthOffset));
- STATIC_ASSERT(kSmiTagSize == 1);
- STATIC_ASSERT(kSmiTag == 0);
- __ add(eax, Immediate(Smi::FromInt(argc)));
-
- // Get the elements' length into ecx.
- __ mov(ecx, FieldOperand(edi, FixedArray::kLengthOffset));
-
- // Check if we could survive without allocation.
- __ cmp(eax, ecx);
- __ j(greater, &attempt_to_grow_elements);
-
- // Check if value is a smi.
- __ mov(ecx, Operand(esp, argc * kPointerSize));
- __ JumpIfNotSmi(ecx, &with_write_barrier);
-
- // Save new length.
- __ mov(FieldOperand(edx, JSArray::kLengthOffset), eax);
-
- // Store the value.
- __ mov(FieldOperand(edi,
- eax,
- times_half_pointer_size,
- FixedArray::kHeaderSize - argc * kPointerSize),
- ecx);
-
- __ ret((argc + 1) * kPointerSize);
-
- __ bind(&check_double);
-
-
- // Check that the elements are in double mode.
- __ cmp(FieldOperand(edi, HeapObject::kMapOffset),
- Immediate(factory()->fixed_double_array_map()));
- __ j(not_equal, &call_builtin);
-
- // Get the array's length into eax and calculate new length.
- __ mov(eax, FieldOperand(edx, JSArray::kLengthOffset));
- STATIC_ASSERT(kSmiTagSize == 1);
- STATIC_ASSERT(kSmiTag == 0);
- __ add(eax, Immediate(Smi::FromInt(argc)));
-
- // Get the elements' length into ecx.
- __ mov(ecx, FieldOperand(edi, FixedArray::kLengthOffset));
-
- // Check if we could survive without allocation.
- __ cmp(eax, ecx);
- __ j(greater, &call_builtin);
-
- __ mov(ecx, Operand(esp, argc * kPointerSize));
- __ StoreNumberToDoubleElements(
- ecx, edi, eax, ecx, xmm0, &call_builtin, true, argc * kDoubleSize);
-
- // Save new length.
- __ mov(FieldOperand(edx, JSArray::kLengthOffset), eax);
- __ ret((argc + 1) * kPointerSize);
-
- __ bind(&with_write_barrier);
-
- __ mov(ebx, FieldOperand(edx, HeapObject::kMapOffset));
-
- if (FLAG_smi_only_arrays && !FLAG_trace_elements_transitions) {
- Label fast_object, not_fast_object;
- __ CheckFastObjectElements(ebx, &not_fast_object, Label::kNear);
- __ jmp(&fast_object);
- // In case of fast smi-only, convert to fast object, otherwise bail out.
- __ bind(&not_fast_object);
- __ CheckFastSmiElements(ebx, &call_builtin);
- __ cmp(FieldOperand(ecx, HeapObject::kMapOffset),
- Immediate(factory()->heap_number_map()));
- __ j(equal, &call_builtin);
- // edi: elements array
- // edx: receiver
- // ebx: map
- Label try_holey_map;
- __ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS,
- FAST_ELEMENTS,
- ebx,
- edi,
- &try_holey_map);
-
- ElementsTransitionGenerator::
- GenerateMapChangeElementsTransition(masm(),
- DONT_TRACK_ALLOCATION_SITE,
- NULL);
- // Restore edi.
- __ mov(edi, FieldOperand(edx, JSArray::kElementsOffset));
- __ jmp(&fast_object);
-
- __ bind(&try_holey_map);
- __ LoadTransitionedArrayMapConditional(FAST_HOLEY_SMI_ELEMENTS,
- FAST_HOLEY_ELEMENTS,
- ebx,
- edi,
- &call_builtin);
- ElementsTransitionGenerator::
- GenerateMapChangeElementsTransition(masm(),
- DONT_TRACK_ALLOCATION_SITE,
- NULL);
- // Restore edi.
- __ mov(edi, FieldOperand(edx, JSArray::kElementsOffset));
- __ bind(&fast_object);
- } else {
- __ CheckFastObjectElements(ebx, &call_builtin);
- }
-
- // Save new length.
- __ mov(FieldOperand(edx, JSArray::kLengthOffset), eax);
-
- // Store the value.
- __ lea(edx, FieldOperand(edi,
- eax, times_half_pointer_size,
- FixedArray::kHeaderSize - argc * kPointerSize));
- __ mov(Operand(edx, 0), ecx);
-
- __ RecordWrite(edi, edx, ecx, kDontSaveFPRegs, EMIT_REMEMBERED_SET,
- OMIT_SMI_CHECK);
-
- __ ret((argc + 1) * kPointerSize);
-
- __ bind(&attempt_to_grow_elements);
- if (!FLAG_inline_new) {
- __ jmp(&call_builtin);
- }
-
- __ mov(ebx, Operand(esp, argc * kPointerSize));
- // Growing elements that are SMI-only requires special handling in case
- // the new element is non-Smi. For now, delegate to the builtin.
- Label no_fast_elements_check;
- __ JumpIfSmi(ebx, &no_fast_elements_check);
- __ mov(ecx, FieldOperand(edx, HeapObject::kMapOffset));
- __ CheckFastObjectElements(ecx, &call_builtin, Label::kFar);
- __ bind(&no_fast_elements_check);
-
- // We could be lucky and the elements array could be at the top of
- // new-space. In this case we can just grow it in place by moving the
- // allocation pointer up.
-
- ExternalReference new_space_allocation_top =
- ExternalReference::new_space_allocation_top_address(isolate());
- ExternalReference new_space_allocation_limit =
- ExternalReference::new_space_allocation_limit_address(isolate());
-
- const int kAllocationDelta = 4;
- // Load top.
- __ mov(ecx, Operand::StaticVariable(new_space_allocation_top));
-
- // Check if it's the end of elements.
- __ lea(edx, FieldOperand(edi,
- eax, times_half_pointer_size,
- FixedArray::kHeaderSize - argc * kPointerSize));
- __ cmp(edx, ecx);
- __ j(not_equal, &call_builtin);
- __ add(ecx, Immediate(kAllocationDelta * kPointerSize));
- __ cmp(ecx, Operand::StaticVariable(new_space_allocation_limit));
- __ j(above, &call_builtin);
-
- // We fit and could grow elements.
- __ mov(Operand::StaticVariable(new_space_allocation_top), ecx);
-
- // Push the argument...
- __ mov(Operand(edx, 0), ebx);
- // ... and fill the rest with holes.
- for (int i = 1; i < kAllocationDelta; i++) {
- __ mov(Operand(edx, i * kPointerSize),
- Immediate(factory()->the_hole_value()));
- }
-
- // We know the elements array is in new space so we don't need the
- // remembered set, but we just pushed a value onto it so we may have to
- // tell the incremental marker to rescan the object that we just grew. We
- // don't need to worry about the holes because they are in old space and
- // already marked black.
- __ RecordWrite(edi, edx, ebx, kDontSaveFPRegs, OMIT_REMEMBERED_SET);
-
- // Restore receiver to edx as finish sequence assumes it's here.
- __ mov(edx, Operand(esp, (argc + 1) * kPointerSize));
-
- // Increment element's and array's sizes.
- __ add(FieldOperand(edi, FixedArray::kLengthOffset),
- Immediate(Smi::FromInt(kAllocationDelta)));
-
- // NOTE: This only happen in new-space, where we don't
- // care about the black-byte-count on pages. Otherwise we should
- // update that too if the object is black.
-
- __ mov(FieldOperand(edx, JSArray::kLengthOffset), eax);
-
- __ ret((argc + 1) * kPointerSize);
- }
-
- __ bind(&call_builtin);
- __ TailCallExternalReference(
- ExternalReference(Builtins::c_ArrayPush, isolate()),
- argc + 1,
- 1);
- }
-
- HandlerFrontendFooter(&miss);
-
- // Return the generated code.
- return GetCode(type, name);
-}
-
-
-Handle<Code> CallStubCompiler::CompileArrayPopCall(
- Handle<Object> object,
- Handle<JSObject> holder,
- Handle<Cell> cell,
- Handle<JSFunction> function,
- Handle<String> name,
- Code::StubType type) {
- // If object is not an array or is observed or sealed, bail out to regular
- // call.
- if (!object->IsJSArray() ||
- !cell.is_null() ||
- Handle<JSArray>::cast(object)->map()->is_observed() ||
- !Handle<JSArray>::cast(object)->map()->is_extensible()) {
- return Handle<Code>::null();
- }
-
- Label miss, return_undefined, call_builtin;
-
- HandlerFrontendHeader(object, holder, name, RECEIVER_MAP_CHECK, &miss);
-
- // Get the elements array of the object.
- __ mov(ebx, FieldOperand(edx, JSArray::kElementsOffset));
-
- // Check that the elements are in fast mode and writable.
- __ cmp(FieldOperand(ebx, HeapObject::kMapOffset),
- Immediate(factory()->fixed_array_map()));
- __ j(not_equal, &call_builtin);
-
- // Get the array's length into ecx and calculate new length.
- __ mov(ecx, FieldOperand(edx, JSArray::kLengthOffset));
- __ sub(ecx, Immediate(Smi::FromInt(1)));
- __ j(negative, &return_undefined);
-
- // Get the last element.
- STATIC_ASSERT(kSmiTagSize == 1);
- STATIC_ASSERT(kSmiTag == 0);
- __ mov(eax, FieldOperand(ebx,
- ecx, times_half_pointer_size,
- FixedArray::kHeaderSize));
- __ cmp(eax, Immediate(factory()->the_hole_value()));
- __ j(equal, &call_builtin);
-
- // Set the array's length.
- __ mov(FieldOperand(edx, JSArray::kLengthOffset), ecx);
-
- // Fill with the hole.
- __ mov(FieldOperand(ebx,
- ecx, times_half_pointer_size,
- FixedArray::kHeaderSize),
- Immediate(factory()->the_hole_value()));
- const int argc = arguments().immediate();
- __ ret((argc + 1) * kPointerSize);
-
- __ bind(&return_undefined);
- __ mov(eax, Immediate(factory()->undefined_value()));
- __ ret((argc + 1) * kPointerSize);
-
- __ bind(&call_builtin);
- __ TailCallExternalReference(
- ExternalReference(Builtins::c_ArrayPop, isolate()),
- argc + 1,
- 1);
-
- HandlerFrontendFooter(&miss);
-
- // Return the generated code.
- return GetCode(type, name);
-}
-
-
-Handle<Code> CallStubCompiler::CompileStringCharCodeAtCall(
- Handle<Object> object,
- Handle<JSObject> holder,
- Handle<Cell> cell,
- Handle<JSFunction> function,
- Handle<String> name,
- Code::StubType type) {
- // If object is not a string, bail out to regular call.
- if (!object->IsString() || !cell.is_null()) {
- return Handle<Code>::null();
- }
-
- const int argc = arguments().immediate();
-
- Label miss;
- Label name_miss;
- Label index_out_of_range;
- Label* index_out_of_range_label = &index_out_of_range;
-
- if (kind_ == Code::CALL_IC &&
- (CallICBase::StringStubState::decode(extra_state()) ==
- DEFAULT_STRING_STUB)) {
- index_out_of_range_label = &miss;
- }
-
- HandlerFrontendHeader(object, holder, name, STRING_CHECK, &name_miss);
-
- Register receiver = ebx;
- Register index = edi;
- Register result = eax;
- __ mov(receiver, Operand(esp, (argc + 1) * kPointerSize));
- if (argc > 0) {
- __ mov(index, Operand(esp, (argc - 0) * kPointerSize));
- } else {
- __ Set(index, Immediate(factory()->undefined_value()));
- }
-
- StringCharCodeAtGenerator generator(receiver,
- index,
- result,
- &miss, // When not a string.
- &miss, // When not a number.
- index_out_of_range_label,
- STRING_INDEX_IS_NUMBER);
- generator.GenerateFast(masm());
- __ ret((argc + 1) * kPointerSize);
-
- StubRuntimeCallHelper call_helper;
- generator.GenerateSlow(masm(), call_helper);
-
- if (index_out_of_range.is_linked()) {
- __ bind(&index_out_of_range);
- __ Set(eax, Immediate(factory()->nan_value()));
- __ ret((argc + 1) * kPointerSize);
- }
-
- __ bind(&miss);
- // Restore function name in ecx.
- __ Set(ecx, Immediate(name));
- HandlerFrontendFooter(&name_miss);
-
- // Return the generated code.
- return GetCode(type, name);
-}
-
-
-Handle<Code> CallStubCompiler::CompileStringCharAtCall(
- Handle<Object> object,
- Handle<JSObject> holder,
- Handle<Cell> cell,
- Handle<JSFunction> function,
- Handle<String> name,
- Code::StubType type) {
- // If object is not a string, bail out to regular call.
- if (!object->IsString() || !cell.is_null()) {
- return Handle<Code>::null();
- }
-
- const int argc = arguments().immediate();
-
- Label miss;
- Label name_miss;
- Label index_out_of_range;
- Label* index_out_of_range_label = &index_out_of_range;
-
- if (kind_ == Code::CALL_IC &&
- (CallICBase::StringStubState::decode(extra_state()) ==
- DEFAULT_STRING_STUB)) {
- index_out_of_range_label = &miss;
- }
-
- HandlerFrontendHeader(object, holder, name, STRING_CHECK, &name_miss);
-
- Register receiver = eax;
- Register index = edi;
- Register scratch = edx;
- Register result = eax;
- __ mov(receiver, Operand(esp, (argc + 1) * kPointerSize));
- if (argc > 0) {
- __ mov(index, Operand(esp, (argc - 0) * kPointerSize));
- } else {
- __ Set(index, Immediate(factory()->undefined_value()));
- }
-
- StringCharAtGenerator generator(receiver,
- index,
- scratch,
- result,
- &miss, // When not a string.
- &miss, // When not a number.
- index_out_of_range_label,
- STRING_INDEX_IS_NUMBER);
- generator.GenerateFast(masm());
- __ ret((argc + 1) * kPointerSize);
-
- StubRuntimeCallHelper call_helper;
- generator.GenerateSlow(masm(), call_helper);
-
- if (index_out_of_range.is_linked()) {
- __ bind(&index_out_of_range);
- __ Set(eax, Immediate(factory()->empty_string()));
- __ ret((argc + 1) * kPointerSize);
- }
-
- __ bind(&miss);
- // Restore function name in ecx.
- __ Set(ecx, Immediate(name));
- HandlerFrontendFooter(&name_miss);
-
- // Return the generated code.
- return GetCode(type, name);
-}
-
-
-Handle<Code> CallStubCompiler::CompileStringFromCharCodeCall(
- Handle<Object> object,
- Handle<JSObject> holder,
- Handle<Cell> cell,
- Handle<JSFunction> function,
- Handle<String> name,
- Code::StubType type) {
- const int argc = arguments().immediate();
-
- // If the object is not a JSObject or we got an unexpected number of
- // arguments, bail out to the regular call.
- if (!object->IsJSObject() || argc != 1) {
- return Handle<Code>::null();
- }
-
- Label miss;
-
- HandlerFrontendHeader(object, holder, name, RECEIVER_MAP_CHECK, &miss);
- if (!cell.is_null()) {
- ASSERT(cell->value() == *function);
- GenerateLoadFunctionFromCell(cell, function, &miss);
- }
-
- // Load the char code argument.
- Register code = ebx;
- __ mov(code, Operand(esp, 1 * kPointerSize));
-
- // Check the code is a smi.
- Label slow;
- STATIC_ASSERT(kSmiTag == 0);
- __ JumpIfNotSmi(code, &slow);
-
- // Convert the smi code to uint16.
- __ and_(code, Immediate(Smi::FromInt(0xffff)));
-
- StringCharFromCodeGenerator generator(code, eax);
- generator.GenerateFast(masm());
- __ ret(2 * kPointerSize);
-
- StubRuntimeCallHelper call_helper;
- generator.GenerateSlow(masm(), call_helper);
-
- __ bind(&slow);
- // We do not have to patch the receiver because the function makes no use of
- // it.
- GenerateJumpFunctionIgnoreReceiver(function);
-
- HandlerFrontendFooter(&miss);
-
- // Return the generated code.
- return GetCode(type, name);
-}
-
-
-Handle<Code> CallStubCompiler::CompileMathFloorCall(
- Handle<Object> object,
- Handle<JSObject> holder,
- Handle<Cell> cell,
- Handle<JSFunction> function,
- Handle<String> name,
- Code::StubType type) {
- if (!CpuFeatures::IsSupported(SSE2)) {
- return Handle<Code>::null();
- }
-
- CpuFeatureScope use_sse2(masm(), SSE2);
-
- const int argc = arguments().immediate();
-
- // If the object is not a JSObject or we got an unexpected number of
- // arguments, bail out to the regular call.
- if (!object->IsJSObject() || argc != 1) {
- return Handle<Code>::null();
- }
-
- Label miss;
-
- HandlerFrontendHeader(object, holder, name, RECEIVER_MAP_CHECK, &miss);
- if (!cell.is_null()) {
- ASSERT(cell->value() == *function);
- GenerateLoadFunctionFromCell(cell, function, &miss);
- }
-
- // Load the (only) argument into eax.
- __ mov(eax, Operand(esp, 1 * kPointerSize));
-
- // Check if the argument is a smi.
- Label smi;
- STATIC_ASSERT(kSmiTag == 0);
- __ JumpIfSmi(eax, &smi);
-
- // Check if the argument is a heap number and load its value into xmm0.
- Label slow;
- __ CheckMap(eax, factory()->heap_number_map(), &slow, DONT_DO_SMI_CHECK);
- __ movsd(xmm0, FieldOperand(eax, HeapNumber::kValueOffset));
-
- // Check if the argument is strictly positive. Note this also
- // discards NaN.
- __ xorpd(xmm1, xmm1);
- __ ucomisd(xmm0, xmm1);
- __ j(below_equal, &slow);
-
- // Do a truncating conversion.
- __ cvttsd2si(eax, Operand(xmm0));
-
- // Check if the result fits into a smi. Note this also checks for
- // 0x80000000 which signals a failed conversion.
- Label wont_fit_into_smi;
- __ test(eax, Immediate(0xc0000000));
- __ j(not_zero, &wont_fit_into_smi);
-
- // Smi tag and return.
- __ SmiTag(eax);
- __ bind(&smi);
- __ ret(2 * kPointerSize);
-
- // Check if the argument is < 2^kMantissaBits.
- Label already_round;
- __ bind(&wont_fit_into_smi);
- __ LoadPowerOf2(xmm1, ebx, HeapNumber::kMantissaBits);
- __ ucomisd(xmm0, xmm1);
- __ j(above_equal, &already_round);
-
- // Save a copy of the argument.
- __ movaps(xmm2, xmm0);
-
- // Compute (argument + 2^kMantissaBits) - 2^kMantissaBits.
- __ addsd(xmm0, xmm1);
- __ subsd(xmm0, xmm1);
-
- // Compare the argument and the tentative result to get the right mask:
- // if xmm2 < xmm0:
- // xmm2 = 1...1
- // else:
- // xmm2 = 0...0
- __ cmpltsd(xmm2, xmm0);
-
- // Subtract 1 if the argument was less than the tentative result.
- __ LoadPowerOf2(xmm1, ebx, 0);
- __ andpd(xmm1, xmm2);
- __ subsd(xmm0, xmm1);
-
- // Return a new heap number.
- __ AllocateHeapNumber(eax, ebx, edx, &slow);
- __ movsd(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
- __ ret(2 * kPointerSize);
-
- // Return the argument (when it's an already round heap number).
- __ bind(&already_round);
- __ mov(eax, Operand(esp, 1 * kPointerSize));
- __ ret(2 * kPointerSize);
-
- __ bind(&slow);
- // We do not have to patch the receiver because the function makes no use of
- // it.
- GenerateJumpFunctionIgnoreReceiver(function);
-
- HandlerFrontendFooter(&miss);
-
- // Return the generated code.
- return GetCode(type, name);
-}
-
-
-Handle<Code> CallStubCompiler::CompileMathAbsCall(
- Handle<Object> object,
- Handle<JSObject> holder,
- Handle<Cell> cell,
- Handle<JSFunction> function,
- Handle<String> name,
- Code::StubType type) {
- const int argc = arguments().immediate();
-
- // If the object is not a JSObject or we got an unexpected number of
- // arguments, bail out to the regular call.
- if (!object->IsJSObject() || argc != 1) {
- return Handle<Code>::null();
- }
-
- Label miss;
-
- HandlerFrontendHeader(object, holder, name, RECEIVER_MAP_CHECK, &miss);
- if (!cell.is_null()) {
- ASSERT(cell->value() == *function);
- GenerateLoadFunctionFromCell(cell, function, &miss);
- }
-
- // Load the (only) argument into eax.
- __ mov(eax, Operand(esp, 1 * kPointerSize));
-
- // Check if the argument is a smi.
- Label not_smi;
- STATIC_ASSERT(kSmiTag == 0);
- __ JumpIfNotSmi(eax, &not_smi);
-
- // Branchless abs implementation, refer to below:
- // http://graphics.stanford.edu/~seander/bithacks.html#IntegerAbs
- // Set ebx to 1...1 (== -1) if the argument is negative, or to 0...0
- // otherwise.
- __ mov(ebx, eax);
- __ sar(ebx, kBitsPerInt - 1);
-
- // Do bitwise not or do nothing depending on ebx.
- __ xor_(eax, ebx);
-
- // Add 1 or do nothing depending on ebx.
- __ sub(eax, ebx);
-
- // If the result is still negative, go to the slow case.
- // This only happens for the most negative smi.
- Label slow;
- __ j(negative, &slow);
-
- // Smi case done.
- __ ret(2 * kPointerSize);
-
- // Check if the argument is a heap number and load its exponent and
- // sign into ebx.
- __ bind(&not_smi);
- __ CheckMap(eax, factory()->heap_number_map(), &slow, DONT_DO_SMI_CHECK);
- __ mov(ebx, FieldOperand(eax, HeapNumber::kExponentOffset));
-
- // Check the sign of the argument. If the argument is positive,
- // just return it.
- Label negative_sign;
- __ test(ebx, Immediate(HeapNumber::kSignMask));
- __ j(not_zero, &negative_sign);
- __ ret(2 * kPointerSize);
-
- // If the argument is negative, clear the sign, and return a new
- // number.
- __ bind(&negative_sign);
- __ and_(ebx, ~HeapNumber::kSignMask);
- __ mov(ecx, FieldOperand(eax, HeapNumber::kMantissaOffset));
- __ AllocateHeapNumber(eax, edi, edx, &slow);
- __ mov(FieldOperand(eax, HeapNumber::kExponentOffset), ebx);
- __ mov(FieldOperand(eax, HeapNumber::kMantissaOffset), ecx);
- __ ret(2 * kPointerSize);
-
- __ bind(&slow);
- // We do not have to patch the receiver because the function makes no use of
- // it.
- GenerateJumpFunctionIgnoreReceiver(function);
-
- HandlerFrontendFooter(&miss);
-
- // Return the generated code.
- return GetCode(type, name);
-}
-
-
-Handle<Code> CallStubCompiler::CompileFastApiCall(
- const CallOptimization& optimization,
- Handle<Object> object,
- Handle<JSObject> holder,
- Handle<Cell> cell,
- Handle<JSFunction> function,
- Handle<String> name) {
- ASSERT(optimization.is_simple_api_call());
- // Bail out if object is a global object as we don't want to
- // repatch it to global receiver.
- if (object->IsGlobalObject()) return Handle<Code>::null();
- if (!cell.is_null()) return Handle<Code>::null();
- if (!object->IsJSObject()) return Handle<Code>::null();
- int depth = optimization.GetPrototypeDepthOfExpectedType(
- Handle<JSObject>::cast(object), holder);
- if (depth == kInvalidProtoDepth) return Handle<Code>::null();
-
- Label miss, miss_before_stack_reserved;
-
- GenerateNameCheck(name, &miss_before_stack_reserved);
-
- // Get the receiver from the stack.
- const int argc = arguments().immediate();
- __ mov(edx, Operand(esp, (argc + 1) * kPointerSize));
-
- // Check that the receiver isn't a smi.
- __ JumpIfSmi(edx, &miss_before_stack_reserved);
-
- Counters* counters = isolate()->counters();
- __ IncrementCounter(counters->call_const(), 1);
- __ IncrementCounter(counters->call_const_fast_api(), 1);
-
- // Allocate space for v8::Arguments implicit values. Must be initialized
- // before calling any runtime function.
- __ sub(esp, Immediate(kFastApiCallArguments * kPointerSize));
-
- // Check that the maps haven't changed and find a Holder as a side effect.
- CheckPrototypes(IC::CurrentTypeOf(object, isolate()), edx, holder,
- ebx, eax, edi, name, depth, &miss);
-
- // Move the return address on top of the stack.
- __ mov(eax, Operand(esp, kFastApiCallArguments * kPointerSize));
- __ mov(Operand(esp, 0 * kPointerSize), eax);
-
- // esp[2 * kPointerSize] is uninitialized, esp[3 * kPointerSize] contains
- // duplicate of return address and will be overwritten.
- GenerateFastApiCall(masm(), optimization, argc);
-
- __ bind(&miss);
- __ add(esp, Immediate(kFastApiCallArguments * kPointerSize));
-
- HandlerFrontendFooter(&miss_before_stack_reserved);
-
- // Return the generated code.
- return GetCode(function);
-}
-
-
-void StubCompiler::GenerateBooleanCheck(Register object, Label* miss) {
- Label success;
- // Check that the object is a boolean.
- __ cmp(object, factory()->true_value());
- __ j(equal, &success);
- __ cmp(object, factory()->false_value());
- __ j(not_equal, miss);
- __ bind(&success);
-}
-
-
-void CallStubCompiler::PatchGlobalProxy(Handle<Object> object) {
- if (object->IsGlobalObject()) {
- const int argc = arguments().immediate();
- const int receiver_offset = (argc + 1) * kPointerSize;
- __ mov(edx, FieldOperand(edx, GlobalObject::kGlobalReceiverOffset));
- __ mov(Operand(esp, receiver_offset), edx);
- }
-}
-
-
-Register CallStubCompiler::HandlerFrontendHeader(Handle<Object> object,
- Handle<JSObject> holder,
- Handle<Name> name,
- CheckType check,
- Label* miss) {
- GenerateNameCheck(name, miss);
-
- Register reg = edx;
-
- const int argc = arguments().immediate();
- const int receiver_offset = (argc + 1) * kPointerSize;
- __ mov(reg, Operand(esp, receiver_offset));
-
- // Check that the receiver isn't a smi.
- if (check != NUMBER_CHECK) {
- __ JumpIfSmi(reg, miss);
- }
-
- // Make sure that it's okay not to patch the on stack receiver
- // unless we're doing a receiver map check.
- ASSERT(!object->IsGlobalObject() || check == RECEIVER_MAP_CHECK);
- switch (check) {
- case RECEIVER_MAP_CHECK:
- __ IncrementCounter(isolate()->counters()->call_const(), 1);
-
- // Check that the maps haven't changed.
- reg = CheckPrototypes(IC::CurrentTypeOf(object, isolate()), reg, holder,
- ebx, eax, edi, name, miss);
-
- break;
-
- case STRING_CHECK: {
- // Check that the object is a string.
- __ CmpObjectType(reg, FIRST_NONSTRING_TYPE, eax);
- __ j(above_equal, miss);
- // Check that the maps starting from the prototype haven't changed.
- GenerateDirectLoadGlobalFunctionPrototype(
- masm(), Context::STRING_FUNCTION_INDEX, eax, miss);
- break;
- }
- case SYMBOL_CHECK: {
- // Check that the object is a symbol.
- __ CmpObjectType(reg, SYMBOL_TYPE, eax);
- __ j(not_equal, miss);
- // Check that the maps starting from the prototype haven't changed.
- GenerateDirectLoadGlobalFunctionPrototype(
- masm(), Context::SYMBOL_FUNCTION_INDEX, eax, miss);
- break;
- }
- case NUMBER_CHECK: {
- Label fast;
- // Check that the object is a smi or a heap number.
- __ JumpIfSmi(reg, &fast);
- __ CmpObjectType(reg, HEAP_NUMBER_TYPE, eax);
- __ j(not_equal, miss);
- __ bind(&fast);
- // Check that the maps starting from the prototype haven't changed.
- GenerateDirectLoadGlobalFunctionPrototype(
- masm(), Context::NUMBER_FUNCTION_INDEX, eax, miss);
- break;
- }
- case BOOLEAN_CHECK: {
- GenerateBooleanCheck(reg, miss);
- // Check that the maps starting from the prototype haven't changed.
- GenerateDirectLoadGlobalFunctionPrototype(
- masm(), Context::BOOLEAN_FUNCTION_INDEX, eax, miss);
- break;
- }
- }
-
- if (check != RECEIVER_MAP_CHECK) {
- Handle<Object> prototype(object->GetPrototype(isolate()), isolate());
- reg = CheckPrototypes(
- IC::CurrentTypeOf(prototype, isolate()),
- eax, holder, ebx, edx, edi, name, miss);
- }
-
- return reg;
-}
-
-
-void CallStubCompiler::GenerateJumpFunction(Handle<Object> object,
- Register function,
- Label* miss) {
- // Check that the function really is a function.
- GenerateFunctionCheck(function, ebx, miss);
-
- if (!function.is(edi)) __ mov(edi, function);
- PatchGlobalProxy(object);
-
- // Invoke the function.
- __ InvokeFunction(edi, arguments(), JUMP_FUNCTION,
- NullCallWrapper(), call_kind());
-}
-
-
-Handle<Code> CallStubCompiler::CompileCallInterceptor(Handle<JSObject> object,
- Handle<JSObject> holder,
- Handle<Name> name) {
- Label miss;
-
- GenerateNameCheck(name, &miss);
-
- // Get the number of arguments.
- const int argc = arguments().immediate();
-
- LookupResult lookup(isolate());
- LookupPostInterceptor(holder, name, &lookup);
-
- // Get the receiver from the stack.
- __ mov(edx, Operand(esp, (argc + 1) * kPointerSize));
-
- CallInterceptorCompiler compiler(this, arguments(), ecx, extra_state());
- compiler.Compile(masm(), object, holder, name, &lookup, edx, ebx, edi, eax,
- &miss);
-
- // Restore receiver.
- __ mov(edx, Operand(esp, (argc + 1) * kPointerSize));
-
- GenerateJumpFunction(object, eax, &miss);
-
- HandlerFrontendFooter(&miss);
-
- // Return the generated code.
- return GetCode(Code::FAST, name);
-}
-
-
-Handle<Code> CallStubCompiler::CompileCallGlobal(
- Handle<JSObject> object,
- Handle<GlobalObject> holder,
- Handle<PropertyCell> cell,
- Handle<JSFunction> function,
- Handle<Name> name) {
- if (HasCustomCallGenerator(function)) {
- Handle<Code> code = CompileCustomCall(
- object, holder, cell, function, Handle<String>::cast(name),
- Code::NORMAL);
- // A null handle means bail out to the regular compiler code below.
- if (!code.is_null()) return code;
- }
-
- Label miss;
- HandlerFrontendHeader(object, holder, name, RECEIVER_MAP_CHECK, &miss);
- // Potentially loads a closure that matches the shared function info of the
- // function, rather than function.
- GenerateLoadFunctionFromCell(cell, function, &miss);
- GenerateJumpFunction(object, edi, function);
-
- HandlerFrontendFooter(&miss);
-
- // Return the generated code.
- return GetCode(Code::NORMAL, name);
-}
-
-
Handle<Code> StoreStubCompiler::CompileStoreCallback(
Handle<JSObject> object,
Handle<JSObject> holder,
Handle<Name> name,
Handle<ExecutableAccessorInfo> callback) {
- HandlerFrontend(IC::CurrentTypeOf(object, isolate()),
- receiver(), holder, name);
+ Register holder_reg = HandlerFrontend(
+ IC::CurrentTypeOf(object, isolate()), receiver(), holder, name);
__ pop(scratch1()); // remove the return address
__ push(receiver());
+ __ push(holder_reg);
__ Push(callback);
__ Push(name);
__ push(value());
@@ -2653,25 +1147,7 @@ Handle<Code> StoreStubCompiler::CompileStoreCallback(
// Do tail-call to the runtime system.
ExternalReference store_callback_property =
ExternalReference(IC_Utility(IC::kStoreCallbackProperty), isolate());
- __ TailCallExternalReference(store_callback_property, 4, 1);
-
- // Return the generated code.
- return GetCode(kind(), Code::FAST, name);
-}
-
-
-Handle<Code> StoreStubCompiler::CompileStoreCallback(
- Handle<JSObject> object,
- Handle<JSObject> holder,
- Handle<Name> name,
- const CallOptimization& call_optimization) {
- HandlerFrontend(IC::CurrentTypeOf(object, isolate()),
- receiver(), holder, name);
-
- Register values[] = { value() };
- GenerateFastApiCall(
- masm(), call_optimization, receiver(), scratch1(),
- scratch2(), this->name(), 1, values);
+ __ TailCallExternalReference(store_callback_property, 5, 1);
// Return the generated code.
return GetCode(kind(), Code::FAST, name);
@@ -2684,27 +1160,31 @@ Handle<Code> StoreStubCompiler::CompileStoreCallback(
void StoreStubCompiler::GenerateStoreViaSetter(
MacroAssembler* masm,
+ Handle<HeapType> type,
+ Register receiver,
Handle<JSFunction> setter) {
// ----------- S t a t e -------------
- // -- eax : value
- // -- ecx : name
- // -- edx : receiver
// -- esp[0] : return address
// -----------------------------------
{
FrameScope scope(masm, StackFrame::INTERNAL);
// Save value register, so we can restore it later.
- __ push(eax);
+ __ push(value());
if (!setter.is_null()) {
// Call the JavaScript setter with receiver and value on the stack.
- __ push(edx);
- __ push(eax);
+ if (IC::TypeToMap(*type, masm->isolate())->IsJSGlobalObjectMap()) {
+ // Swap in the global receiver.
+ __ mov(receiver,
+ FieldOperand(receiver, JSGlobalObject::kGlobalReceiverOffset));
+ }
+ __ push(receiver);
+ __ push(value());
ParameterCount actual(1);
ParameterCount expected(setter);
__ InvokeFunction(setter, expected, actual,
- CALL_FUNCTION, NullCallWrapper(), CALL_AS_METHOD);
+ CALL_FUNCTION, NullCallWrapper());
} else {
// If we generate a global code snippet for deoptimization only, remember
// the place to continue after deoptimization.
@@ -2744,6 +1224,20 @@ Handle<Code> StoreStubCompiler::CompileStoreInterceptor(
}
+void StoreStubCompiler::GenerateStoreArrayLength() {
+ // Prepare tail call to StoreIC_ArrayLength.
+ __ pop(scratch1()); // remove the return address
+ __ push(receiver());
+ __ push(value());
+ __ push(scratch1()); // restore return address
+
+ ExternalReference ref =
+ ExternalReference(IC_Utility(IC::kStoreIC_ArrayLength),
+ masm()->isolate());
+ __ TailCallExternalReference(ref, 2, 1);
+}
+
+
Handle<Code> KeyedStoreStubCompiler::CompileStorePolymorphic(
MapHandleList* receiver_maps,
CodeHandleList* handler_stubs,
@@ -2772,7 +1266,7 @@ Handle<Code> KeyedStoreStubCompiler::CompileStorePolymorphic(
}
-Handle<Code> LoadStubCompiler::CompileLoadNonexistent(Handle<Type> type,
+Handle<Code> LoadStubCompiler::CompileLoadNonexistent(Handle<HeapType> type,
Handle<JSObject> last,
Handle<Name> name) {
NonexistentHandlerFrontend(type, last, name);
@@ -2801,33 +1295,22 @@ Register* KeyedLoadStubCompiler::registers() {
}
-Register* StoreStubCompiler::registers() {
- // receiver, name, value, scratch1, scratch2, scratch3.
- static Register registers[] = { edx, ecx, eax, ebx, edi, no_reg };
- return registers;
+Register StoreStubCompiler::value() {
+ return eax;
}
-Register* KeyedStoreStubCompiler::registers() {
- // receiver, name, value, scratch1, scratch2, scratch3.
- static Register registers[] = { edx, ecx, eax, ebx, edi, no_reg };
+Register* StoreStubCompiler::registers() {
+ // receiver, name, scratch1, scratch2, scratch3.
+ static Register registers[] = { edx, ecx, ebx, edi, no_reg };
return registers;
}
-void KeyedLoadStubCompiler::GenerateNameCheck(Handle<Name> name,
- Register name_reg,
- Label* miss) {
- __ cmp(name_reg, Immediate(name));
- __ j(not_equal, miss);
-}
-
-
-void KeyedStoreStubCompiler::GenerateNameCheck(Handle<Name> name,
- Register name_reg,
- Label* miss) {
- __ cmp(name_reg, Immediate(name));
- __ j(not_equal, miss);
+Register* KeyedStoreStubCompiler::registers() {
+ // receiver, name, scratch1, scratch2, scratch3.
+ static Register registers[] = { edx, ecx, ebx, edi, no_reg };
+ return registers;
}
@@ -2836,6 +1319,7 @@ void KeyedStoreStubCompiler::GenerateNameCheck(Handle<Name> name,
void LoadStubCompiler::GenerateLoadViaGetter(MacroAssembler* masm,
+ Handle<HeapType> type,
Register receiver,
Handle<JSFunction> getter) {
{
@@ -2843,11 +1327,16 @@ void LoadStubCompiler::GenerateLoadViaGetter(MacroAssembler* masm,
if (!getter.is_null()) {
// Call the JavaScript getter with the receiver on the stack.
+ if (IC::TypeToMap(*type, masm->isolate())->IsJSGlobalObjectMap()) {
+ // Swap in the global receiver.
+ __ mov(receiver,
+ FieldOperand(receiver, JSGlobalObject::kGlobalReceiverOffset));
+ }
__ push(receiver);
ParameterCount actual(0);
ParameterCount expected(getter);
__ InvokeFunction(getter, expected, actual,
- CALL_FUNCTION, NullCallWrapper(), CALL_AS_METHOD);
+ CALL_FUNCTION, NullCallWrapper());
} else {
// If we generate a global code snippet for deoptimization only, remember
// the place to continue after deoptimization.
@@ -2866,7 +1355,7 @@ void LoadStubCompiler::GenerateLoadViaGetter(MacroAssembler* masm,
Handle<Code> LoadStubCompiler::CompileLoadGlobal(
- Handle<Type> type,
+ Handle<HeapType> type,
Handle<GlobalObject> global,
Handle<PropertyCell> cell,
Handle<Name> name,
@@ -2875,7 +1364,7 @@ Handle<Code> LoadStubCompiler::CompileLoadGlobal(
HandlerFrontendHeader(type, receiver(), global, name, &miss);
// Get the value from the cell.
- if (Serializer::enabled()) {
+ if (masm()->serializer_enabled()) {
__ mov(eax, Immediate(cell));
__ mov(eax, FieldOperand(eax, PropertyCell::kValueOffset));
} else {
@@ -2891,13 +1380,13 @@ Handle<Code> LoadStubCompiler::CompileLoadGlobal(
__ Check(not_equal, kDontDeleteCellsCannotContainTheHole);
}
- HandlerFrontendFooter(name, &miss);
-
Counters* counters = isolate()->counters();
__ IncrementCounter(counters->named_load_global_stub(), 1);
// The code above already loads the result into the return register.
__ ret(0);
+ HandlerFrontendFooter(name, &miss);
+
// Return the generated code.
return GetCode(kind(), Code::NORMAL, name);
}
@@ -2911,8 +1400,10 @@ Handle<Code> BaseLoadStoreStubCompiler::CompilePolymorphicIC(
IcCheckType check) {
Label miss;
- if (check == PROPERTY) {
- GenerateNameCheck(name, this->name(), &miss);
+ if (check == PROPERTY &&
+ (kind() == Code::KEYED_LOAD_IC || kind() == Code::KEYED_STORE_IC)) {
+ __ cmp(this->name(), Immediate(name));
+ __ j(not_equal, &miss);
}
Label number_case;
@@ -2924,12 +1415,12 @@ Handle<Code> BaseLoadStoreStubCompiler::CompilePolymorphicIC(
int receiver_count = types->length();
int number_of_handled_maps = 0;
for (int current = 0; current < receiver_count; ++current) {
- Handle<Type> type = types->at(current);
+ Handle<HeapType> type = types->at(current);
Handle<Map> map = IC::TypeToMap(*type, isolate());
if (!map->is_deprecated()) {
number_of_handled_maps++;
__ cmp(map_reg, map);
- if (type->Is(Type::Number())) {
+ if (type->Is(HeapType::Number())) {
ASSERT(!number_case.is_unused());
__ bind(&number_case);
}
diff --git a/chromium/v8/src/ic-inl.h b/chromium/v8/src/ic-inl.h
index 24a939dedbf..25094ae9cf2 100644
--- a/chromium/v8/src/ic-inl.h
+++ b/chromium/v8/src/ic-inl.h
@@ -1,38 +1,15 @@
// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
#ifndef V8_IC_INL_H_
#define V8_IC_INL_H_
-#include "ic.h"
+#include "src/ic.h"
-#include "compiler.h"
-#include "debug.h"
-#include "macro-assembler.h"
+#include "src/compiler.h"
+#include "src/debug.h"
+#include "src/macro-assembler.h"
namespace v8 {
namespace internal {
@@ -42,7 +19,6 @@ Address IC::address() const {
// Get the address of the call.
Address result = Assembler::target_address_from_return_address(pc());
-#ifdef ENABLE_DEBUGGER_SUPPORT
Debug* debug = isolate()->debug();
// First check if any break points are active if not just return the address
// of the call.
@@ -50,25 +26,64 @@ Address IC::address() const {
// At least one break point is active perform additional test to ensure that
// break point locations are updated correctly.
- if (debug->IsDebugBreak(Assembler::target_address_at(result))) {
+ if (debug->IsDebugBreak(Assembler::target_address_at(result,
+ raw_constant_pool()))) {
// If the call site is a call to debug break then return the address in
// the original code instead of the address in the running code. This will
// cause the original code to be updated and keeps the breakpoint active in
// the running code.
- return OriginalCodeAddress();
+ Code* code = GetCode();
+ Code* original_code = GetOriginalCode();
+ intptr_t delta =
+ original_code->instruction_start() - code->instruction_start();
+ // Return the address in the original code. This is the place where
+ // the call which has been overwritten by the DebugBreakXXX resides
+ // and the place where the inline cache system should look.
+ return result + delta;
} else {
// No break point here just return the address of the call.
return result;
}
-#else
- return result;
-#endif
}
-Code* IC::GetTargetAtAddress(Address address) {
+ConstantPoolArray* IC::constant_pool() const {
+ if (!FLAG_enable_ool_constant_pool) {
+ return NULL;
+ } else {
+ Handle<ConstantPoolArray> result = raw_constant_pool_;
+ Debug* debug = isolate()->debug();
+ // First check if any break points are active if not just return the
+ // original constant pool.
+ if (!debug->has_break_points()) return *result;
+
+ // At least one break point is active perform additional test to ensure that
+ // break point locations are updated correctly.
+ Address target = Assembler::target_address_from_return_address(pc());
+ if (debug->IsDebugBreak(
+ Assembler::target_address_at(target, raw_constant_pool()))) {
+ // If the call site is a call to debug break then we want to return the
+ // constant pool for the original code instead of the breakpointed code.
+ return GetOriginalCode()->constant_pool();
+ }
+ return *result;
+ }
+}
+
+
+ConstantPoolArray* IC::raw_constant_pool() const {
+ if (FLAG_enable_ool_constant_pool) {
+ return *raw_constant_pool_;
+ } else {
+ return NULL;
+ }
+}
+
+
+Code* IC::GetTargetAtAddress(Address address,
+ ConstantPoolArray* constant_pool) {
// Get the target address of the IC.
- Address target = Assembler::target_address_at(address);
+ Address target = Assembler::target_address_at(address, constant_pool);
// Convert target address to the code object. Code::GetCodeFromTargetAddress
// is safe for use during GC where the map might be marked.
Code* result = Code::GetCodeFromTargetAddress(target);
@@ -77,10 +92,12 @@ Code* IC::GetTargetAtAddress(Address address) {
}
-void IC::SetTargetAtAddress(Address address, Code* target) {
+void IC::SetTargetAtAddress(Address address,
+ Code* target,
+ ConstantPoolArray* constant_pool) {
ASSERT(target->is_inline_cache_stub() || target->is_compare_ic_stub());
Heap* heap = target->GetHeap();
- Code* old_target = GetTargetAtAddress(address);
+ Code* old_target = GetTargetAtAddress(address, constant_pool);
#ifdef DEBUG
// STORE_IC and KEYED_STORE_IC use Code::extra_ic_state() to mark
// ICs as strict mode. The strict-ness of the IC must be preserved.
@@ -90,7 +107,8 @@ void IC::SetTargetAtAddress(Address address, Code* target) {
StoreIC::GetStrictMode(target->extra_ic_state()));
}
#endif
- Assembler::set_target_address_at(address, target->instruction_start());
+ Assembler::set_target_address_at(
+ address, constant_pool, target->instruction_start());
if (heap->gc_state() == Heap::MARK_COMPACT) {
heap->mark_compact_collector()->RecordCodeTargetPatch(address, target);
} else {
@@ -111,8 +129,8 @@ InlineCacheHolderFlag IC::GetCodeCacheForObject(Object* object) {
HeapObject* IC::GetCodeCacheHolder(Isolate* isolate,
- Object* object,
- InlineCacheHolderFlag holder) {
+ Object* object,
+ InlineCacheHolderFlag holder) {
if (object->IsSmi()) holder = PROTOTYPE_MAP;
Object* map_owner = holder == OWN_MAP
? object : object->GetPrototype(isolate);
@@ -120,11 +138,11 @@ HeapObject* IC::GetCodeCacheHolder(Isolate* isolate,
}
-InlineCacheHolderFlag IC::GetCodeCacheFlag(Type* type) {
- if (type->Is(Type::Boolean()) ||
- type->Is(Type::Number()) ||
- type->Is(Type::String()) ||
- type->Is(Type::Symbol())) {
+InlineCacheHolderFlag IC::GetCodeCacheFlag(HeapType* type) {
+ if (type->Is(HeapType::Boolean()) ||
+ type->Is(HeapType::Number()) ||
+ type->Is(HeapType::String()) ||
+ type->Is(HeapType::Symbol())) {
return PROTOTYPE_MAP;
}
return OWN_MAP;
@@ -132,19 +150,19 @@ InlineCacheHolderFlag IC::GetCodeCacheFlag(Type* type) {
Handle<Map> IC::GetCodeCacheHolder(InlineCacheHolderFlag flag,
- Type* type,
+ HeapType* type,
Isolate* isolate) {
if (flag == PROTOTYPE_MAP) {
Context* context = isolate->context()->native_context();
JSFunction* constructor;
- if (type->Is(Type::Boolean())) {
+ if (type->Is(HeapType::Boolean())) {
constructor = context->boolean_function();
- } else if (type->Is(Type::Number())) {
+ } else if (type->Is(HeapType::Number())) {
constructor = context->number_function();
- } else if (type->Is(Type::String())) {
+ } else if (type->Is(HeapType::String())) {
constructor = context->string_function();
} else {
- ASSERT(type->Is(Type::Symbol()));
+ ASSERT(type->Is(HeapType::Symbol()));
constructor = context->symbol_function();
}
return handle(JSObject::cast(constructor->instance_prototype())->map());
diff --git a/chromium/v8/src/ic.cc b/chromium/v8/src/ic.cc
index cd508707e7f..cd92af11852 100644
--- a/chromium/v8/src/ic.cc
+++ b/chromium/v8/src/ic.cc
@@ -1,40 +1,18 @@
// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#include "accessors.h"
-#include "api.h"
-#include "arguments.h"
-#include "codegen.h"
-#include "execution.h"
-#include "ic-inl.h"
-#include "runtime.h"
-#include "stub-cache.h"
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#include "src/accessors.h"
+#include "src/api.h"
+#include "src/arguments.h"
+#include "src/codegen.h"
+#include "src/conversions.h"
+#include "src/execution.h"
+#include "src/ic-inl.h"
+#include "src/runtime.h"
+#include "src/stub-cache.h"
namespace v8 {
namespace internal {
@@ -90,9 +68,11 @@ void IC::TraceIC(const char* type,
}
JavaScriptFrame::PrintTop(isolate(), stdout, false, true);
ExtraICState extra_state = new_target->extra_ic_state();
- const char* modifier =
- GetTransitionMarkModifier(
- KeyedStoreIC::GetKeyedAccessStoreMode(extra_state));
+ const char* modifier = "";
+ if (new_target->kind() == Code::KEYED_STORE_IC) {
+ modifier = GetTransitionMarkModifier(
+ KeyedStoreIC::GetKeyedAccessStoreMode(extra_state));
+ }
PrintF(" (%c->%c%s)",
TransitionMarkFromState(state()),
TransitionMarkFromState(new_state),
@@ -120,12 +100,18 @@ void IC::TraceIC(const char* type,
IC::IC(FrameDepth depth, Isolate* isolate)
: isolate_(isolate),
- target_set_(false) {
+ target_set_(false),
+ target_maps_set_(false) {
// To improve the performance of the (much used) IC code, we unfold a few
// levels of the stack frame iteration code. This yields a ~35% speedup when
// running DeltaBlue and a ~25% speedup of gbemu with the '--nouse-ic' flag.
const Address entry =
Isolate::c_entry_fp(isolate->thread_local_top());
+ Address constant_pool = NULL;
+ if (FLAG_enable_ool_constant_pool) {
+ constant_pool = Memory::Address_at(
+ entry + ExitFrameConstants::kConstantPoolOffset);
+ }
Address* pc_address =
reinterpret_cast<Address*>(entry + ExitFrameConstants::kCallerPCOffset);
Address fp = Memory::Address_at(entry + ExitFrameConstants::kCallerFPOffset);
@@ -133,6 +119,10 @@ IC::IC(FrameDepth depth, Isolate* isolate)
// StubFailureTrampoline, we need to look one frame further down the stack to
// find the frame pointer and the return address stack slot.
if (depth == EXTRA_CALL_FRAME) {
+ if (FLAG_enable_ool_constant_pool) {
+ constant_pool = Memory::Address_at(
+ fp + StandardFrameConstants::kConstantPoolOffset);
+ }
const int kCallerPCOffset = StandardFrameConstants::kCallerPCOffset;
pc_address = reinterpret_cast<Address*>(fp + kCallerPCOffset);
fp = Memory::Address_at(fp + StandardFrameConstants::kCallerFPOffset);
@@ -144,15 +134,19 @@ IC::IC(FrameDepth depth, Isolate* isolate)
ASSERT(fp == frame->fp() && pc_address == frame->pc_address());
#endif
fp_ = fp;
+ if (FLAG_enable_ool_constant_pool) {
+ raw_constant_pool_ = handle(
+ ConstantPoolArray::cast(reinterpret_cast<Object*>(constant_pool)),
+ isolate);
+ }
pc_address_ = StackFrame::ResolveReturnAddressLocation(pc_address);
target_ = handle(raw_target(), isolate);
state_ = target_->ic_state();
+ extra_ic_state_ = target_->extra_ic_state();
}
-#ifdef ENABLE_DEBUGGER_SUPPORT
-Address IC::OriginalCodeAddress() const {
- HandleScope scope(isolate());
+SharedFunctionInfo* IC::GetSharedFunctionInfo() const {
// Compute the JavaScript frame for the frame pointer of this IC
// structure. We need this to be able to find the function
// corresponding to the frame.
@@ -162,23 +156,26 @@ Address IC::OriginalCodeAddress() const {
// Find the function on the stack and both the active code for the
// function and the original code.
JSFunction* function = frame->function();
- Handle<SharedFunctionInfo> shared(function->shared(), isolate());
+ return function->shared();
+}
+
+
+Code* IC::GetCode() const {
+ HandleScope scope(isolate());
+ Handle<SharedFunctionInfo> shared(GetSharedFunctionInfo(), isolate());
Code* code = shared->code();
+ return code;
+}
+
+
+Code* IC::GetOriginalCode() const {
+ HandleScope scope(isolate());
+ Handle<SharedFunctionInfo> shared(GetSharedFunctionInfo(), isolate());
ASSERT(Debug::HasDebugInfo(shared));
Code* original_code = Debug::GetDebugInfo(shared)->original_code();
ASSERT(original_code->IsCode());
- // Get the address of the call site in the active code. This is the
- // place where the call to DebugBreakXXX is and where the IC
- // normally would be.
- Address addr = Assembler::target_address_from_return_address(pc());
- // Return the address in the original code. This is the place where
- // the call which has been overwritten by the DebugBreakXXX resides
- // and the place where the inline cache system should look.
- intptr_t delta =
- original_code->instruction_start() - code->instruction_start();
- return addr + delta;
+ return original_code;
}
-#endif
static bool HasInterceptorGetter(JSObject* object) {
@@ -197,7 +194,7 @@ static void LookupForRead(Handle<Object> object,
// Skip all the objects with named interceptors, but
// without actual getter.
while (true) {
- object->Lookup(*name, lookup);
+ object->Lookup(name, lookup);
// Besides normal conditions (property not found or it's not
// an interceptor), bail out if lookup is not cacheable: we won't
// be able to IC it anyway and regular lookup should work fine.
@@ -210,7 +207,7 @@ static void LookupForRead(Handle<Object> object,
return;
}
- holder->LocalLookupRealNamedProperty(*name, lookup);
+ holder->LookupOwnRealNamedProperty(name, lookup);
if (lookup->IsFound()) {
ASSERT(!lookup->IsInterceptor());
return;
@@ -227,64 +224,9 @@ static void LookupForRead(Handle<Object> object,
}
-bool CallIC::TryUpdateExtraICState(LookupResult* lookup,
- Handle<Object> object) {
- if (!lookup->IsConstantFunction()) return false;
- JSFunction* function = lookup->GetConstantFunction();
- if (!function->shared()->HasBuiltinFunctionId()) return false;
-
- // Fetch the arguments passed to the called function.
- const int argc = target()->arguments_count();
- Address entry = isolate()->c_entry_fp(isolate()->thread_local_top());
- Address fp = Memory::Address_at(entry + ExitFrameConstants::kCallerFPOffset);
- Arguments args(argc + 1,
- &Memory::Object_at(fp +
- StandardFrameConstants::kCallerSPOffset +
- argc * kPointerSize));
- switch (function->shared()->builtin_function_id()) {
- case kStringCharCodeAt:
- case kStringCharAt:
- if (object->IsString()) {
- String* string = String::cast(*object);
- // Check there's the right string value or wrapper in the receiver slot.
- ASSERT(string == args[0] || string == JSValue::cast(args[0])->value());
- // If we're in the default (fastest) state and the index is
- // out of bounds, update the state to record this fact.
- if (StringStubState::decode(extra_ic_state()) == DEFAULT_STRING_STUB &&
- argc >= 1 && args[1]->IsNumber()) {
- double index = DoubleToInteger(args.number_at(1));
- if (index < 0 || index >= string->length()) {
- extra_ic_state_ =
- StringStubState::update(extra_ic_state(),
- STRING_INDEX_OUT_OF_BOUNDS);
- return true;
- }
- }
- }
- break;
- default:
- return false;
- }
- return false;
-}
-
-
bool IC::TryRemoveInvalidPrototypeDependentStub(Handle<Object> receiver,
Handle<String> name) {
- if (target()->is_call_stub()) {
- LookupResult lookup(isolate());
- LookupForRead(receiver, name, &lookup);
- if (static_cast<CallIC*>(this)->TryUpdateExtraICState(&lookup, receiver)) {
- return true;
- }
- }
-
- if (target()->is_keyed_stub()) {
- // Determine whether the failure is due to a name failure.
- if (!name->IsName()) return false;
- Name* stub_name = target()->FindFirstName();
- if (*name != stub_name) return false;
- }
+ if (!IsNameCompatibleWithMonomorphicPrototypeFailure(name)) return false;
InlineCacheHolderFlag cache_holder =
Code::ExtractCacheHolderFromFlags(target()->flags());
@@ -329,7 +271,7 @@ bool IC::TryRemoveInvalidPrototypeDependentStub(Handle<Object> receiver,
// If the IC is shared between multiple receivers (slow dictionary mode), then
// the map cannot be deprecated and the stub invalidated.
if (cache_holder == OWN_MAP) {
- Map* old_map = target()->FindFirstMap();
+ Map* old_map = FirstTargetMap();
if (old_map == *map) return true;
if (old_map != NULL) {
if (old_map->is_deprecated()) return true;
@@ -343,7 +285,7 @@ bool IC::TryRemoveInvalidPrototypeDependentStub(Handle<Object> receiver,
if (receiver->IsGlobalObject()) {
LookupResult lookup(isolate());
GlobalObject* global = GlobalObject::cast(*receiver);
- global->LocalLookupRealNamedProperty(*name, &lookup);
+ global->LookupOwnRealNamedProperty(name, &lookup);
if (!lookup.IsFound()) return false;
PropertyCell* cell = global->GetPropertyCell(&lookup);
return cell->type()->IsConstant();
@@ -367,6 +309,18 @@ void IC::TryRemoveInvalidHandlers(Handle<Map> map, Handle<String> name) {
}
+bool IC::IsNameCompatibleWithMonomorphicPrototypeFailure(Handle<Object> name) {
+ if (target()->is_keyed_stub()) {
+ // Determine whether the failure is due to a name failure.
+ if (!name->IsName()) return false;
+ Name* stub_name = target()->FindFirstName();
+ if (*name != stub_name) return false;
+ }
+
+ return true;
+}
+
+
void IC::UpdateState(Handle<Object> receiver, Handle<Object> name) {
if (!name->IsString()) return;
if (state() != MONOMORPHIC) {
@@ -383,8 +337,9 @@ void IC::UpdateState(Handle<Object> receiver, Handle<Object> name) {
// because of changes in the prototype chain to avoid hitting it
// again.
if (TryRemoveInvalidPrototypeDependentStub(
- receiver, Handle<String>::cast(name))) {
- return MarkMonomorphicPrototypeFailure();
+ receiver, Handle<String>::cast(name)) &&
+ TryMarkMonomorphicPrototypeFailure(name)) {
+ return;
}
// The builtins object is special. It only changes when JavaScript
@@ -397,35 +352,22 @@ void IC::UpdateState(Handle<Object> receiver, Handle<Object> name) {
}
-RelocInfo::Mode IC::ComputeMode() {
- Address addr = address();
- Code* code = Code::cast(isolate()->FindCodeObject(addr));
- for (RelocIterator it(code, RelocInfo::kCodeTargetMask);
- !it.done(); it.next()) {
- RelocInfo* info = it.rinfo();
- if (info->pc() == addr) return info->rmode();
- }
- UNREACHABLE();
- return RelocInfo::NONE32;
-}
-
-
-Failure* IC::TypeError(const char* type,
- Handle<Object> object,
- Handle<Object> key) {
+MaybeHandle<Object> IC::TypeError(const char* type,
+ Handle<Object> object,
+ Handle<Object> key) {
HandleScope scope(isolate());
Handle<Object> args[2] = { key, object };
Handle<Object> error = isolate()->factory()->NewTypeError(
type, HandleVector(args, 2));
- return isolate()->Throw(*error);
+ return isolate()->Throw<Object>(error);
}
-Failure* IC::ReferenceError(const char* type, Handle<String> name) {
+MaybeHandle<Object> IC::ReferenceError(const char* type, Handle<String> name) {
HandleScope scope(isolate());
Handle<Object> error = isolate()->factory()->NewReferenceError(
type, HandleVector(&name, 1));
- return isolate()->Throw(*error);
+ return isolate()->Throw<Object>(error);
}
@@ -440,9 +382,6 @@ static int ComputeTypeInfoCountDelta(IC::State old_state, IC::State new_state) {
void IC::PostPatching(Address address, Code* target, Code* old_target) {
- if (FLAG_type_info_threshold == 0 && !FLAG_watch_ic_patching) {
- return;
- }
Isolate* isolate = target->GetHeap()->isolate();
Code* host = isolate->
inner_pointer_to_code_cache()->GetCacheEntry(address)->code;
@@ -453,6 +392,10 @@ void IC::PostPatching(Address address, Code* target, Code* old_target) {
target->is_inline_cache_stub()) {
int delta = ComputeTypeInfoCountDelta(old_target->ic_state(),
target->ic_state());
+ // Call ICs don't have interesting state changes from this point
+ // of view.
+ ASSERT(target->kind() != Code::CALL_IC || delta == 0);
+
// Not all Code objects have TypeFeedbackInfo.
if (host->type_feedback_info()->IsTypeFeedbackInfo() && delta != 0) {
TypeFeedbackInfo* info =
@@ -465,33 +408,72 @@ void IC::PostPatching(Address address, Code* target, Code* old_target) {
TypeFeedbackInfo::cast(host->type_feedback_info());
info->change_own_type_change_checksum();
}
- if (FLAG_watch_ic_patching) {
- host->set_profiler_ticks(0);
- isolate->runtime_profiler()->NotifyICChanged();
- }
+ host->set_profiler_ticks(0);
+ isolate->runtime_profiler()->NotifyICChanged();
// TODO(2029): When an optimized function is patched, it would
// be nice to propagate the corresponding type information to its
// unoptimized version for the benefit of later inlining.
}
-void IC::Clear(Isolate* isolate, Address address) {
- Code* target = GetTargetAtAddress(address);
+void IC::RegisterWeakMapDependency(Handle<Code> stub) {
+ if (FLAG_collect_maps && FLAG_weak_embedded_maps_in_ic &&
+ stub->CanBeWeakStub()) {
+ ASSERT(!stub->is_weak_stub());
+ MapHandleList maps;
+ stub->FindAllMaps(&maps);
+ if (maps.length() == 1 && stub->IsWeakObjectInIC(*maps.at(0))) {
+ Map::AddDependentIC(maps.at(0), stub);
+ stub->mark_as_weak_stub();
+ if (FLAG_enable_ool_constant_pool) {
+ stub->constant_pool()->set_weak_object_state(
+ ConstantPoolArray::WEAK_OBJECTS_IN_IC);
+ }
+ }
+ }
+}
+
+
+void IC::InvalidateMaps(Code* stub) {
+ ASSERT(stub->is_weak_stub());
+ stub->mark_as_invalidated_weak_stub();
+ Isolate* isolate = stub->GetIsolate();
+ Heap* heap = isolate->heap();
+ Object* undefined = heap->undefined_value();
+ int mode_mask = RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT);
+ for (RelocIterator it(stub, mode_mask); !it.done(); it.next()) {
+ RelocInfo::Mode mode = it.rinfo()->rmode();
+ if (mode == RelocInfo::EMBEDDED_OBJECT &&
+ it.rinfo()->target_object()->IsMap()) {
+ it.rinfo()->set_target_object(undefined, SKIP_WRITE_BARRIER);
+ }
+ }
+ CPU::FlushICache(stub->instruction_start(), stub->instruction_size());
+}
+
+
+void IC::Clear(Isolate* isolate, Address address,
+ ConstantPoolArray* constant_pool) {
+ Code* target = GetTargetAtAddress(address, constant_pool);
// Don't clear debug break inline cache as it will remove the break point.
if (target->is_debug_stub()) return;
switch (target->kind()) {
- case Code::LOAD_IC: return LoadIC::Clear(isolate, address, target);
+ case Code::LOAD_IC:
+ return LoadIC::Clear(isolate, address, target, constant_pool);
case Code::KEYED_LOAD_IC:
- return KeyedLoadIC::Clear(isolate, address, target);
- case Code::STORE_IC: return StoreIC::Clear(isolate, address, target);
+ return KeyedLoadIC::Clear(isolate, address, target, constant_pool);
+ case Code::STORE_IC:
+ return StoreIC::Clear(isolate, address, target, constant_pool);
case Code::KEYED_STORE_IC:
- return KeyedStoreIC::Clear(isolate, address, target);
- case Code::CALL_IC: return CallIC::Clear(address, target);
- case Code::KEYED_CALL_IC: return KeyedCallIC::Clear(address, target);
- case Code::COMPARE_IC: return CompareIC::Clear(isolate, address, target);
- case Code::COMPARE_NIL_IC: return CompareNilIC::Clear(address, target);
+ return KeyedStoreIC::Clear(isolate, address, target, constant_pool);
+ case Code::CALL_IC:
+ return CallIC::Clear(isolate, address, target, constant_pool);
+ case Code::COMPARE_IC:
+ return CompareIC::Clear(isolate, address, target, constant_pool);
+ case Code::COMPARE_NIL_IC:
+ return CompareNilIC::Clear(address, target, constant_pool);
case Code::BINARY_OP_IC:
case Code::TO_BOOLEAN_IC:
// Clearing these is tricky and does not
@@ -502,50 +484,64 @@ void IC::Clear(Isolate* isolate, Address address) {
}
-void CallICBase::Clear(Address address, Code* target) {
- if (IsCleared(target)) return;
- bool contextual = CallICBase::Contextual::decode(target->extra_ic_state());
- Code* code =
- target->GetIsolate()->stub_cache()->FindCallInitialize(
- target->arguments_count(),
- contextual ? RelocInfo::CODE_TARGET_CONTEXT : RelocInfo::CODE_TARGET,
- target->kind());
- SetTargetAtAddress(address, code);
-}
-
-
-void KeyedLoadIC::Clear(Isolate* isolate, Address address, Code* target) {
+void KeyedLoadIC::Clear(Isolate* isolate,
+ Address address,
+ Code* target,
+ ConstantPoolArray* constant_pool) {
if (IsCleared(target)) return;
// Make sure to also clear the map used in inline fast cases. If we
// do not clear these maps, cached code can keep objects alive
// through the embedded maps.
- SetTargetAtAddress(address, *pre_monomorphic_stub(isolate));
+ SetTargetAtAddress(address, *pre_monomorphic_stub(isolate), constant_pool);
+}
+
+
+void CallIC::Clear(Isolate* isolate,
+ Address address,
+ Code* target,
+ ConstantPoolArray* constant_pool) {
+ // Currently, CallIC doesn't have state changes.
}
-void LoadIC::Clear(Isolate* isolate, Address address, Code* target) {
+void LoadIC::Clear(Isolate* isolate,
+ Address address,
+ Code* target,
+ ConstantPoolArray* constant_pool) {
if (IsCleared(target)) return;
- SetTargetAtAddress(address, *pre_monomorphic_stub(isolate));
+ Code* code = target->GetIsolate()->stub_cache()->FindPreMonomorphicIC(
+ Code::LOAD_IC, target->extra_ic_state());
+ SetTargetAtAddress(address, code, constant_pool);
}
-void StoreIC::Clear(Isolate* isolate, Address address, Code* target) {
+void StoreIC::Clear(Isolate* isolate,
+ Address address,
+ Code* target,
+ ConstantPoolArray* constant_pool) {
if (IsCleared(target)) return;
- SetTargetAtAddress(address,
- *pre_monomorphic_stub(
- isolate, StoreIC::GetStrictMode(target->extra_ic_state())));
+ Code* code = target->GetIsolate()->stub_cache()->FindPreMonomorphicIC(
+ Code::STORE_IC, target->extra_ic_state());
+ SetTargetAtAddress(address, code, constant_pool);
}
-void KeyedStoreIC::Clear(Isolate* isolate, Address address, Code* target) {
+void KeyedStoreIC::Clear(Isolate* isolate,
+ Address address,
+ Code* target,
+ ConstantPoolArray* constant_pool) {
if (IsCleared(target)) return;
SetTargetAtAddress(address,
*pre_monomorphic_stub(
- isolate, StoreIC::GetStrictMode(target->extra_ic_state())));
+ isolate, StoreIC::GetStrictMode(target->extra_ic_state())),
+ constant_pool);
}
-void CompareIC::Clear(Isolate* isolate, Address address, Code* target) {
+void CompareIC::Clear(Isolate* isolate,
+ Address address,
+ Code* target,
+ ConstantPoolArray* constant_pool) {
ASSERT(target->major_key() == CodeStub::CompareIC);
CompareIC::State handler_state;
Token::Value op;
@@ -553,51 +549,24 @@ void CompareIC::Clear(Isolate* isolate, Address address, Code* target) {
&handler_state, &op);
// Only clear CompareICs that can retain objects.
if (handler_state != KNOWN_OBJECT) return;
- SetTargetAtAddress(address, GetRawUninitialized(isolate, op));
+ SetTargetAtAddress(address, GetRawUninitialized(isolate, op), constant_pool);
PatchInlinedSmiCode(address, DISABLE_INLINED_SMI_CHECK);
}
-Handle<Object> CallICBase::TryCallAsFunction(Handle<Object> object) {
- Handle<Object> delegate = Execution::GetFunctionDelegate(isolate(), object);
-
- if (delegate->IsJSFunction() && !object->IsJSFunctionProxy()) {
- // Patch the receiver and use the delegate as the function to
- // invoke. This is used for invoking objects as if they were functions.
- const int argc = target()->arguments_count();
- StackFrameLocator locator(isolate());
- JavaScriptFrame* frame = locator.FindJavaScriptFrame(0);
- int index = frame->ComputeExpressionsCount() - (argc + 1);
- frame->SetExpression(index, *object);
+Handle<Code> KeyedLoadIC::megamorphic_stub() {
+ if (FLAG_compiled_keyed_generic_loads) {
+ return KeyedLoadGenericElementStub(isolate()).GetCode();
+ } else {
+ return isolate()->builtins()->KeyedLoadIC_Generic();
}
-
- return delegate;
}
-
-void CallICBase::ReceiverToObjectIfRequired(Handle<Object> callee,
- Handle<Object> object) {
- while (callee->IsJSFunctionProxy()) {
- callee = Handle<Object>(JSFunctionProxy::cast(*callee)->call_trap(),
- isolate());
- }
-
- if (callee->IsJSFunction()) {
- Handle<JSFunction> function = Handle<JSFunction>::cast(callee);
- if (!function->shared()->is_classic_mode() || function->IsBuiltin()) {
- // Do not wrap receiver for strict mode functions or for builtins.
- return;
- }
- }
-
- // And only wrap string, number or boolean.
- if (object->IsString() || object->IsNumber() || object->IsBoolean()) {
- // Change the receiver to the result of calling ToObject on it.
- const int argc = this->target()->arguments_count();
- StackFrameLocator locator(isolate());
- JavaScriptFrame* frame = locator.FindJavaScriptFrame(0);
- int index = frame->ComputeExpressionsCount() - (argc + 1);
- frame->SetExpression(index, *isolate()->factory()->ToObject(object));
+Handle<Code> KeyedLoadIC::generic_stub() const {
+ if (FLAG_compiled_keyed_generic_loads) {
+ return KeyedLoadGenericElementStub(isolate()).GetCode();
+ } else {
+ return isolate()->builtins()->KeyedLoadIC_Generic();
}
}
@@ -611,250 +580,7 @@ static bool MigrateDeprecated(Handle<Object> object) {
}
-MaybeObject* CallICBase::LoadFunction(Handle<Object> object,
- Handle<String> name) {
- bool use_ic = MigrateDeprecated(object) ? false : FLAG_use_ic;
-
- // If the object is undefined or null it's illegal to try to get any
- // of its properties; throw a TypeError in that case.
- if (object->IsUndefined() || object->IsNull()) {
- return TypeError("non_object_property_call", object, name);
- }
-
- // Check if the name is trivially convertible to an index and get
- // the element if so.
- uint32_t index;
- if (name->AsArrayIndex(&index)) {
- Handle<Object> result = Object::GetElement(isolate(), object, index);
- RETURN_IF_EMPTY_HANDLE(isolate(), result);
- if (result->IsJSFunction()) return *result;
-
- // Try to find a suitable function delegate for the object at hand.
- result = TryCallAsFunction(result);
- if (result->IsJSFunction()) return *result;
-
- // Otherwise, it will fail in the lookup step.
- }
-
- // Lookup the property in the object.
- LookupResult lookup(isolate());
- LookupForRead(object, name, &lookup);
-
- if (!lookup.IsFound()) {
- // If the object does not have the requested property, check which
- // exception we need to throw.
- return IsUndeclaredGlobal(object)
- ? ReferenceError("not_defined", name)
- : TypeError("undefined_method", object, name);
- }
-
- // Lookup is valid: Update inline cache and stub cache.
- if (use_ic) UpdateCaches(&lookup, object, name);
-
- // Get the property.
- PropertyAttributes attr;
- Handle<Object> result =
- Object::GetProperty(object, object, &lookup, name, &attr);
- RETURN_IF_EMPTY_HANDLE(isolate(), result);
-
- if (lookup.IsInterceptor() && attr == ABSENT) {
- // If the object does not have the requested property, check which
- // exception we need to throw.
- return IsUndeclaredGlobal(object)
- ? ReferenceError("not_defined", name)
- : TypeError("undefined_method", object, name);
- }
-
- ASSERT(!result->IsTheHole());
-
- // Make receiver an object if the callee requires it. Strict mode or builtin
- // functions do not wrap the receiver, non-strict functions and objects
- // called as functions do.
- ReceiverToObjectIfRequired(result, object);
-
- if (result->IsJSFunction()) {
- Handle<JSFunction> function = Handle<JSFunction>::cast(result);
-#ifdef ENABLE_DEBUGGER_SUPPORT
- // Handle stepping into a function if step into is active.
- Debug* debug = isolate()->debug();
- if (debug->StepInActive()) {
- // Protect the result in a handle as the debugger can allocate and might
- // cause GC.
- debug->HandleStepIn(function, object, fp(), false);
- }
-#endif
- return *function;
- }
-
- // Try to find a suitable function delegate for the object at hand.
- result = TryCallAsFunction(result);
- if (result->IsJSFunction()) return *result;
-
- return TypeError("property_not_function", object, name);
-}
-
-
-Handle<Code> CallICBase::ComputeMonomorphicStub(LookupResult* lookup,
- Handle<Object> object,
- Handle<String> name) {
- int argc = target()->arguments_count();
- Handle<JSObject> holder(lookup->holder(), isolate());
- switch (lookup->type()) {
- case FIELD: {
- PropertyIndex index = lookup->GetFieldIndex();
- return isolate()->stub_cache()->ComputeCallField(
- argc, kind_, extra_ic_state(), name, object, holder, index);
- }
- case CONSTANT: {
- if (!lookup->IsConstantFunction()) return Handle<Code>::null();
- // Get the constant function and compute the code stub for this
- // call; used for rewriting to monomorphic state and making sure
- // that the code stub is in the stub cache.
- Handle<JSFunction> function(lookup->GetConstantFunction(), isolate());
- return isolate()->stub_cache()->ComputeCallConstant(
- argc, kind_, extra_ic_state(), name, object, holder, function);
- }
- case NORMAL: {
- // If we return a null handle, the IC will not be patched.
- if (!object->IsJSObject()) return Handle<Code>::null();
- Handle<JSObject> receiver = Handle<JSObject>::cast(object);
-
- if (holder->IsGlobalObject()) {
- Handle<GlobalObject> global = Handle<GlobalObject>::cast(holder);
- Handle<PropertyCell> cell(
- global->GetPropertyCell(lookup), isolate());
- if (!cell->value()->IsJSFunction()) return Handle<Code>::null();
- Handle<JSFunction> function(JSFunction::cast(cell->value()));
- return isolate()->stub_cache()->ComputeCallGlobal(
- argc, kind_, extra_ic_state(), name,
- receiver, global, cell, function);
- } else {
- // There is only one shared stub for calling normalized
- // properties. It does not traverse the prototype chain, so the
- // property must be found in the receiver for the stub to be
- // applicable.
- if (!holder.is_identical_to(receiver)) return Handle<Code>::null();
- return isolate()->stub_cache()->ComputeCallNormal(
- argc, kind_, extra_ic_state());
- }
- break;
- }
- case INTERCEPTOR:
- ASSERT(HasInterceptorGetter(*holder));
- return isolate()->stub_cache()->ComputeCallInterceptor(
- argc, kind_, extra_ic_state(), name, object, holder);
- default:
- return Handle<Code>::null();
- }
-}
-
-
-Handle<Code> CallICBase::megamorphic_stub() {
- return isolate()->stub_cache()->ComputeCallMegamorphic(
- target()->arguments_count(), kind_, extra_ic_state());
-}
-
-
-Handle<Code> CallICBase::pre_monomorphic_stub() {
- return isolate()->stub_cache()->ComputeCallPreMonomorphic(
- target()->arguments_count(), kind_, extra_ic_state());
-}
-
-
-void CallICBase::UpdateCaches(LookupResult* lookup,
- Handle<Object> object,
- Handle<String> name) {
- // Bail out if we didn't find a result.
- if (!lookup->IsProperty() || !lookup->IsCacheable()) return;
-
- if (state() == UNINITIALIZED) {
- set_target(*pre_monomorphic_stub());
- TRACE_IC("CallIC", name);
- return;
- }
-
- Handle<Code> code = ComputeMonomorphicStub(lookup, object, name);
- // If there's no appropriate stub we simply avoid updating the caches.
- // TODO(verwaest): Install a slow fallback in this case to avoid not learning,
- // and deopting Crankshaft code.
- if (code.is_null()) return;
-
- Handle<JSObject> cache_object = object->IsJSObject()
- ? Handle<JSObject>::cast(object)
- : Handle<JSObject>(JSObject::cast(object->GetPrototype(isolate())),
- isolate());
-
- PatchCache(CurrentTypeOf(cache_object, isolate()), name, code);
- TRACE_IC("CallIC", name);
-}
-
-
-MaybeObject* KeyedCallIC::LoadFunction(Handle<Object> object,
- Handle<Object> key) {
- if (key->IsInternalizedString()) {
- return CallICBase::LoadFunction(object, Handle<String>::cast(key));
- }
-
- if (object->IsUndefined() || object->IsNull()) {
- return TypeError("non_object_property_call", object, key);
- }
-
- bool use_ic = MigrateDeprecated(object)
- ? false : FLAG_use_ic && !object->IsAccessCheckNeeded();
-
- if (use_ic && state() != MEGAMORPHIC) {
- ASSERT(!object->IsJSGlobalProxy());
- int argc = target()->arguments_count();
- Handle<Code> stub;
-
- // Use the KeyedArrayCallStub if the call is of the form array[smi](...),
- // where array is an instance of one of the initial array maps (without
- // extra named properties).
- // TODO(verwaest): Also support keyed calls on instances of other maps.
- if (object->IsJSArray() && key->IsSmi()) {
- Handle<JSArray> array = Handle<JSArray>::cast(object);
- ElementsKind kind = array->map()->elements_kind();
- if (IsFastObjectElementsKind(kind) &&
- array->map() == isolate()->get_initial_js_array_map(kind)) {
- KeyedArrayCallStub stub_gen(IsHoleyElementsKind(kind), argc);
- stub = stub_gen.GetCode(isolate());
- }
- }
-
- if (stub.is_null()) {
- stub = isolate()->stub_cache()->ComputeCallMegamorphic(
- argc, Code::KEYED_CALL_IC, kNoExtraICState);
- if (object->IsJSObject()) {
- Handle<JSObject> receiver = Handle<JSObject>::cast(object);
- if (receiver->elements()->map() ==
- isolate()->heap()->non_strict_arguments_elements_map()) {
- stub = isolate()->stub_cache()->ComputeCallArguments(argc);
- }
- }
- ASSERT(!stub.is_null());
- }
- set_target(*stub);
- TRACE_IC("CallIC", key);
- }
-
- Handle<Object> result = GetProperty(isolate(), object, key);
- RETURN_IF_EMPTY_HANDLE(isolate(), result);
-
- // Make receiver an object if the callee requires it. Strict mode or builtin
- // functions do not wrap the receiver, non-strict functions and objects
- // called as functions do.
- ReceiverToObjectIfRequired(result, object);
- if (result->IsJSFunction()) return *result;
-
- result = TryCallAsFunction(result);
- if (result->IsJSFunction()) return *result;
-
- return TypeError("property_not_function", object, key);
-}
-
-
-MaybeObject* LoadIC::Load(Handle<Object> object,
- Handle<String> name) {
+MaybeHandle<Object> LoadIC::Load(Handle<Object> object, Handle<String> name) {
// If the object is undefined or null it's illegal to try to get any
// of its properties; throw a TypeError in that case.
if (object->IsUndefined() || object->IsNull()) {
@@ -862,41 +588,16 @@ MaybeObject* LoadIC::Load(Handle<Object> object,
}
if (FLAG_use_ic) {
- // Use specialized code for getting the length of strings and
- // string wrapper objects. The length property of string wrapper
- // objects is read-only and therefore always returns the length of
- // the underlying string value. See ECMA-262 15.5.5.1.
- if (object->IsStringWrapper() &&
- name->Equals(isolate()->heap()->length_string())) {
- Handle<Code> stub;
- if (state() == UNINITIALIZED) {
- stub = pre_monomorphic_stub();
- } else if (state() == PREMONOMORPHIC || state() == MONOMORPHIC) {
- StringLengthStub string_length_stub(kind());
- stub = string_length_stub.GetCode(isolate());
- } else if (state() != MEGAMORPHIC) {
- ASSERT(state() != GENERIC);
- stub = megamorphic_stub();
- }
- if (!stub.is_null()) {
- set_target(*stub);
- if (FLAG_trace_ic) PrintF("[LoadIC : +#length /stringwrapper]\n");
- }
- // Get the string if we have a string wrapper object.
- String* string = String::cast(JSValue::cast(*object)->value());
- return Smi::FromInt(string->length());
- }
-
// Use specialized code for getting prototype of functions.
if (object->IsJSFunction() &&
- name->Equals(isolate()->heap()->prototype_string()) &&
+ String::Equals(isolate()->factory()->prototype_string(), name) &&
Handle<JSFunction>::cast(object)->should_have_prototype()) {
Handle<Code> stub;
if (state() == UNINITIALIZED) {
stub = pre_monomorphic_stub();
} else if (state() == PREMONOMORPHIC) {
- FunctionPrototypeStub function_prototype_stub(kind());
- stub = function_prototype_stub.GetCode(isolate());
+ FunctionPrototypeStub function_prototype_stub(isolate(), kind());
+ stub = function_prototype_stub.GetCode();
} else if (state() != MEGAMORPHIC) {
ASSERT(state() != GENERIC);
stub = megamorphic_stub();
@@ -905,7 +606,7 @@ MaybeObject* LoadIC::Load(Handle<Object> object,
set_target(*stub);
if (FLAG_trace_ic) PrintF("[LoadIC : +#prototype /function]\n");
}
- return *Accessors::FunctionGetPrototype(Handle<JSFunction>::cast(object));
+ return Accessors::FunctionGetPrototype(Handle<JSFunction>::cast(object));
}
}
@@ -915,7 +616,13 @@ MaybeObject* LoadIC::Load(Handle<Object> object,
if (kind() == Code::KEYED_LOAD_IC && name->AsArrayIndex(&index)) {
// Rewrite to the generic keyed load stub.
if (FLAG_use_ic) set_target(*generic_stub());
- return Runtime::GetElementOrCharAtOrFail(isolate(), object, index);
+ Handle<Object> result;
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate(),
+ result,
+ Runtime::GetElementOrCharAt(isolate(), object, index),
+ Object);
+ return result;
}
bool use_ic = MigrateDeprecated(object) ? false : FLAG_use_ic;
@@ -935,18 +642,18 @@ MaybeObject* LoadIC::Load(Handle<Object> object,
// Update inline cache and stub cache.
if (use_ic) UpdateCaches(&lookup, object, name);
- PropertyAttributes attr;
// Get the property.
- Handle<Object> result =
- Object::GetProperty(object, object, &lookup, name, &attr);
- RETURN_IF_EMPTY_HANDLE(isolate(), result);
- // If the property is not present, check if we need to throw an
- // exception.
+ LookupIterator it(object, name);
+ Handle<Object> result;
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate(), result, Object::GetProperty(&it), Object);
+ // If the property is not present, check if we need to throw an exception.
if ((lookup.IsInterceptor() || lookup.IsHandler()) &&
- attr == ABSENT && IsUndeclaredGlobal(object)) {
+ !it.IsFound() && IsUndeclaredGlobal(object)) {
return ReferenceError("not_defined", name);
}
- return *result;
+
+ return result;
}
@@ -964,35 +671,41 @@ static bool AddOneReceiverMapIfMissing(MapHandleList* receiver_maps,
}
-bool IC::UpdatePolymorphicIC(Handle<Type> type,
+bool IC::UpdatePolymorphicIC(Handle<HeapType> type,
Handle<String> name,
Handle<Code> code) {
if (!code->is_handler()) return false;
TypeHandleList types;
CodeHandleList handlers;
- int number_of_valid_types;
- int handler_to_overwrite = -1;
-
- target()->FindAllTypes(&types);
+ TargetTypes(&types);
int number_of_types = types.length();
- number_of_valid_types = number_of_types;
+ int deprecated_types = 0;
+ int handler_to_overwrite = -1;
for (int i = 0; i < number_of_types; i++) {
- Handle<Type> current_type = types.at(i);
- // Filter out deprecated maps to ensure their instances get migrated.
- if (current_type->IsClass() && current_type->AsClass()->is_deprecated()) {
- number_of_valid_types--;
- // If the receiver type is already in the polymorphic IC, this indicates
- // there was a prototoype chain failure. In that case, just overwrite the
- // handler.
- } else if (type->IsCurrently(current_type)) {
- ASSERT(handler_to_overwrite == -1);
- number_of_valid_types--;
+ Handle<HeapType> current_type = types.at(i);
+ if (current_type->IsClass() &&
+ current_type->AsClass()->Map()->is_deprecated()) {
+ // Filter out deprecated maps to ensure their instances get migrated.
+ ++deprecated_types;
+ } else if (type->NowIs(current_type)) {
+ // If the receiver type is already in the polymorphic IC, this indicates
+ // there was a prototoype chain failure. In that case, just overwrite the
+ // handler.
+ handler_to_overwrite = i;
+ } else if (handler_to_overwrite == -1 &&
+ current_type->IsClass() &&
+ type->IsClass() &&
+ IsTransitionOfMonomorphicTarget(*current_type->AsClass()->Map(),
+ *type->AsClass()->Map())) {
handler_to_overwrite = i;
}
}
+ int number_of_valid_types =
+ number_of_types - deprecated_types - (handler_to_overwrite != -1);
+
if (number_of_valid_types >= 4) return false;
if (number_of_types == 0) return false;
if (!target()->FindHandlers(&handlers, types.length())) return false;
@@ -1000,51 +713,69 @@ bool IC::UpdatePolymorphicIC(Handle<Type> type,
number_of_valid_types++;
if (handler_to_overwrite >= 0) {
handlers.Set(handler_to_overwrite, code);
+ if (!type->NowIs(types.at(handler_to_overwrite))) {
+ types.Set(handler_to_overwrite, type);
+ }
} else {
types.Add(type);
handlers.Add(code);
}
Handle<Code> ic = isolate()->stub_cache()->ComputePolymorphicIC(
- &types, &handlers, number_of_valid_types, name, extra_ic_state());
+ kind(), &types, &handlers, number_of_valid_types, name, extra_ic_state());
set_target(*ic);
return true;
}
-Handle<Type> IC::CurrentTypeOf(Handle<Object> object, Isolate* isolate) {
- Type* type = object->IsJSGlobalObject()
- ? Type::Constant(Handle<JSGlobalObject>::cast(object))
- : Type::OfCurrently(object);
- return handle(type, isolate);
+Handle<HeapType> IC::CurrentTypeOf(Handle<Object> object, Isolate* isolate) {
+ return object->IsJSGlobalObject()
+ ? HeapType::Constant(Handle<JSGlobalObject>::cast(object), isolate)
+ : HeapType::NowOf(object, isolate);
}
-Handle<Map> IC::TypeToMap(Type* type, Isolate* isolate) {
- if (type->Is(Type::Number())) return isolate->factory()->heap_number_map();
- if (type->Is(Type::Boolean())) return isolate->factory()->oddball_map();
+Handle<Map> IC::TypeToMap(HeapType* type, Isolate* isolate) {
+ if (type->Is(HeapType::Number()))
+ return isolate->factory()->heap_number_map();
+ if (type->Is(HeapType::Boolean())) return isolate->factory()->boolean_map();
if (type->IsConstant()) {
- return handle(Handle<JSGlobalObject>::cast(type->AsConstant())->map());
+ return handle(
+ Handle<JSGlobalObject>::cast(type->AsConstant()->Value())->map());
}
ASSERT(type->IsClass());
- return type->AsClass();
+ return type->AsClass()->Map();
}
-Type* IC::MapToType(Handle<Map> map) {
- if (map->instance_type() == HEAP_NUMBER_TYPE) return Type::Number();
- // The only oddballs that can be recorded in ICs are booleans.
- if (map->instance_type() == ODDBALL_TYPE) return Type::Boolean();
- return Type::Class(map);
+template <class T>
+typename T::TypeHandle IC::MapToType(Handle<Map> map,
+ typename T::Region* region) {
+ if (map->instance_type() == HEAP_NUMBER_TYPE) {
+ return T::Number(region);
+ } else if (map->instance_type() == ODDBALL_TYPE) {
+ // The only oddballs that can be recorded in ICs are booleans.
+ return T::Boolean(region);
+ } else {
+ return T::Class(map, region);
+ }
}
-void IC::UpdateMonomorphicIC(Handle<Type> type,
+template
+Type* IC::MapToType<Type>(Handle<Map> map, Zone* zone);
+
+
+template
+Handle<HeapType> IC::MapToType<HeapType>(Handle<Map> map, Isolate* region);
+
+
+void IC::UpdateMonomorphicIC(Handle<HeapType> type,
Handle<Code> handler,
Handle<String> name) {
if (!handler->is_handler()) return set_target(*handler);
Handle<Code> ic = isolate()->stub_cache()->ComputeMonomorphicIC(
- name, type, handler, extra_ic_state());
+ kind(), name, type, handler, extra_ic_state());
set_target(*ic);
}
@@ -1052,7 +783,7 @@ void IC::UpdateMonomorphicIC(Handle<Type> type,
void IC::CopyICToMegamorphicCache(Handle<String> name) {
TypeHandleList types;
CodeHandleList handlers;
- target()->FindAllTypes(&types);
+ TargetTypes(&types);
if (!target()->FindHandlers(&handlers, types.length())) return;
for (int i = 0; i < types.length(); i++) {
UpdateMegamorphicCache(*types.at(i), *name, *handlers.at(i));
@@ -1060,23 +791,22 @@ void IC::CopyICToMegamorphicCache(Handle<String> name) {
}
-bool IC::IsTransitionOfMonomorphicTarget(Type* type) {
- if (!type->IsClass()) return false;
- Map* receiver_map = *type->AsClass();
- Map* current_map = target()->FindFirstMap();
- ElementsKind receiver_elements_kind = receiver_map->elements_kind();
+bool IC::IsTransitionOfMonomorphicTarget(Map* source_map, Map* target_map) {
+ if (source_map == NULL) return true;
+ if (target_map == NULL) return false;
+ ElementsKind target_elements_kind = target_map->elements_kind();
bool more_general_transition =
IsMoreGeneralElementsKindTransition(
- current_map->elements_kind(), receiver_elements_kind);
+ source_map->elements_kind(), target_elements_kind);
Map* transitioned_map = more_general_transition
- ? current_map->LookupElementsTransitionMap(receiver_elements_kind)
+ ? source_map->LookupElementsTransitionMap(target_elements_kind)
: NULL;
- return transitioned_map == receiver_map;
+ return transitioned_map == target_map;
}
-void IC::PatchCache(Handle<Type> type,
+void IC::PatchCache(Handle<HeapType> type,
Handle<String> name,
Handle<Code> code) {
switch (state()) {
@@ -1085,19 +815,7 @@ void IC::PatchCache(Handle<Type> type,
case MONOMORPHIC_PROTOTYPE_FAILURE:
UpdateMonomorphicIC(type, code, name);
break;
- case MONOMORPHIC: {
- // For now, call stubs are allowed to rewrite to the same stub. This
- // happens e.g., when the field does not contain a function.
- ASSERT(target()->is_call_stub() ||
- target()->is_keyed_call_stub() ||
- !target().is_identical_to(code));
- Code* old_handler = target()->FindFirstHandler();
- if (old_handler == *code && IsTransitionOfMonomorphicTarget(*type)) {
- UpdateMonomorphicIC(type, code, name);
- break;
- }
- // Fall through.
- }
+ case MONOMORPHIC: // Fall through.
case POLYMORPHIC:
if (!target()->is_keyed_stub()) {
if (UpdatePolymorphicIC(type, name, code)) break;
@@ -1117,15 +835,30 @@ void IC::PatchCache(Handle<Type> type,
}
-Handle<Code> LoadIC::SimpleFieldLoad(int offset,
- bool inobject,
- Representation representation) {
+Handle<Code> LoadIC::initialize_stub(Isolate* isolate,
+ ExtraICState extra_state) {
+ return isolate->stub_cache()->ComputeLoad(UNINITIALIZED, extra_state);
+}
+
+
+Handle<Code> LoadIC::pre_monomorphic_stub(Isolate* isolate,
+ ExtraICState extra_state) {
+ return isolate->stub_cache()->ComputeLoad(PREMONOMORPHIC, extra_state);
+}
+
+
+Handle<Code> LoadIC::megamorphic_stub() {
+ return isolate()->stub_cache()->ComputeLoad(MEGAMORPHIC, extra_ic_state());
+}
+
+
+Handle<Code> LoadIC::SimpleFieldLoad(FieldIndex index) {
if (kind() == Code::LOAD_IC) {
- LoadFieldStub stub(inobject, offset, representation);
- return stub.GetCode(isolate());
+ LoadFieldStub stub(isolate(), index);
+ return stub.GetCode();
} else {
- KeyedLoadFieldStub stub(inobject, offset, representation);
- return stub.GetCode(isolate());
+ KeyedLoadFieldStub stub(isolate(), index);
+ return stub.GetCode();
}
}
@@ -1142,7 +875,7 @@ void LoadIC::UpdateCaches(LookupResult* lookup,
return;
}
- Handle<Type> type = CurrentTypeOf(object, isolate());
+ Handle<HeapType> type = CurrentTypeOf(object, isolate());
Handle<Code> code;
if (!lookup->IsCacheable()) {
// Bail out if the result is not cacheable.
@@ -1162,7 +895,7 @@ void LoadIC::UpdateCaches(LookupResult* lookup,
}
-void IC::UpdateMegamorphicCache(Type* type, Name* name, Code* code) {
+void IC::UpdateMegamorphicCache(HeapType* type, Name* name, Code* code) {
// Cache code holding map should be consistent with
// GenerateMonomorphicCacheProbe.
Map* map = *TypeToMap(type, isolate());
@@ -1179,8 +912,11 @@ Handle<Code> IC::ComputeHandler(LookupResult* lookup,
isolate(), *object, cache_holder));
Handle<Code> code = isolate()->stub_cache()->FindHandler(
- name, handle(stub_holder->map()), kind(), cache_holder);
- if (!code.is_null()) return code;
+ name, handle(stub_holder->map()), kind(), cache_holder,
+ lookup->holder()->HasFastProperties() ? Code::FAST : Code::NORMAL);
+ if (!code.is_null()) {
+ return code;
+ }
code = CompileHandler(lookup, object, name, value, cache_holder);
ASSERT(code->is_handler());
@@ -1198,22 +934,32 @@ Handle<Code> LoadIC::CompileHandler(LookupResult* lookup,
Handle<String> name,
Handle<Object> unused,
InlineCacheHolderFlag cache_holder) {
- if (object->IsString() && name->Equals(isolate()->heap()->length_string())) {
- int length_index = String::kLengthOffset / kPointerSize;
- return SimpleFieldLoad(length_index);
+ if (object->IsString() &&
+ String::Equals(isolate()->factory()->length_string(), name)) {
+ FieldIndex index = FieldIndex::ForInObjectOffset(String::kLengthOffset);
+ return SimpleFieldLoad(index);
}
- Handle<Type> type = CurrentTypeOf(object, isolate());
+ if (object->IsStringWrapper() &&
+ String::Equals(isolate()->factory()->length_string(), name)) {
+ if (kind() == Code::LOAD_IC) {
+ StringLengthStub string_length_stub(isolate());
+ return string_length_stub.GetCode();
+ } else {
+ KeyedStringLengthStub string_length_stub(isolate());
+ return string_length_stub.GetCode();
+ }
+ }
+
+ Handle<HeapType> type = CurrentTypeOf(object, isolate());
Handle<JSObject> holder(lookup->holder());
LoadStubCompiler compiler(isolate(), kNoExtraICState, cache_holder, kind());
switch (lookup->type()) {
case FIELD: {
- PropertyIndex field = lookup->GetFieldIndex();
+ FieldIndex field = lookup->GetFieldIndex();
if (object.is_identical_to(holder)) {
- return SimpleFieldLoad(field.translate(holder),
- field.is_inobject(holder),
- lookup->representation());
+ return SimpleFieldLoad(field);
}
return compiler.CompileLoadField(
type, holder, name, field, lookup->representation());
@@ -1250,9 +996,14 @@ Handle<Code> LoadIC::CompileHandler(LookupResult* lookup,
if (object->IsJSObject()) {
Handle<JSObject> receiver = Handle<JSObject>::cast(object);
Handle<Map> map(receiver->map());
+ Handle<HeapType> type = IC::MapToType<HeapType>(
+ handle(receiver->map()), isolate());
int object_offset;
- if (Accessors::IsJSObjectFieldAccessor(map, name, &object_offset)) {
- return SimpleFieldLoad(object_offset / kPointerSize);
+ if (Accessors::IsJSObjectFieldAccessor<HeapType>(
+ type, name, &object_offset)) {
+ FieldIndex index = FieldIndex::ForInObjectOffset(
+ object_offset, receiver->map());
+ return SimpleFieldLoad(index);
}
}
@@ -1272,23 +1023,21 @@ Handle<Code> LoadIC::CompileHandler(LookupResult* lookup,
Handle<JSFunction> function = Handle<JSFunction>::cast(getter);
if (!object->IsJSObject() &&
!function->IsBuiltin() &&
- function->shared()->is_classic_mode()) {
- // Calling non-strict non-builtins with a value as the receiver
+ function->shared()->strict_mode() == SLOPPY) {
+ // Calling sloppy non-builtins with a value as the receiver
// requires boxing.
break;
}
CallOptimization call_optimization(function);
if (call_optimization.is_simple_api_call() &&
- call_optimization.IsCompatibleReceiver(*object)) {
+ call_optimization.IsCompatibleReceiver(object, holder)) {
return compiler.CompileLoadCallback(
type, holder, name, call_optimization);
}
return compiler.CompileLoadViaGetter(type, holder, name, function);
}
// TODO(dcarney): Handle correctly.
- if (callback->IsDeclaredAccessorInfo()) break;
- ASSERT(callback->IsForeign());
- // No IC support for old-style native accessors.
+ ASSERT(callback->IsDeclaredAccessorInfo());
break;
}
case INTERCEPTOR:
@@ -1333,19 +1082,13 @@ Handle<Code> KeyedLoadIC::LoadElementStub(Handle<JSObject> receiver) {
Handle<Map> receiver_map(receiver->map(), isolate());
MapHandleList target_receiver_maps;
- if (state() == UNINITIALIZED || state() == PREMONOMORPHIC) {
- // Optimistically assume that ICs that haven't reached the MONOMORPHIC state
- // yet will do so and stay there.
- return isolate()->stub_cache()->ComputeKeyedLoadElement(receiver_map);
- }
-
if (target().is_identical_to(string_stub())) {
target_receiver_maps.Add(isolate()->factory()->string_map());
} else {
- target()->FindAllMaps(&target_receiver_maps);
- if (target_receiver_maps.length() == 0) {
- return isolate()->stub_cache()->ComputeKeyedLoadElement(receiver_map);
- }
+ TargetMaps(&target_receiver_maps);
+ }
+ if (target_receiver_maps.length() == 0) {
+ return isolate()->stub_cache()->ComputeKeyedLoadElement(receiver_map);
}
// The first time a receiver is seen that is a transitioned version of the
@@ -1385,50 +1128,65 @@ Handle<Code> KeyedLoadIC::LoadElementStub(Handle<JSObject> receiver) {
}
-MaybeObject* KeyedLoadIC::Load(Handle<Object> object, Handle<Object> key) {
+MaybeHandle<Object> KeyedLoadIC::Load(Handle<Object> object,
+ Handle<Object> key) {
if (MigrateDeprecated(object)) {
- return Runtime::GetObjectPropertyOrFail(isolate(), object, key);
+ Handle<Object> result;
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate(),
+ result,
+ Runtime::GetObjectProperty(isolate(), object, key),
+ Object);
+ return result;
}
- MaybeObject* maybe_object = NULL;
+ Handle<Object> load_handle;
Handle<Code> stub = generic_stub();
- // Check for values that can be converted into an internalized string directly
- // or is representable as a smi.
+ // Check for non-string values that can be converted into an
+ // internalized string directly or is representable as a smi.
key = TryConvertKey(key, isolate());
if (key->IsInternalizedString()) {
- maybe_object = LoadIC::Load(object, Handle<String>::cast(key));
- if (maybe_object->IsFailure()) return maybe_object;
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate(),
+ load_handle,
+ LoadIC::Load(object, Handle<String>::cast(key)),
+ Object);
} else if (FLAG_use_ic && !object->IsAccessCheckNeeded()) {
- ASSERT(!object->IsJSGlobalProxy());
if (object->IsString() && key->IsNumber()) {
if (state() == UNINITIALIZED) stub = string_stub();
} else if (object->IsJSObject()) {
Handle<JSObject> receiver = Handle<JSObject>::cast(object);
if (receiver->elements()->map() ==
- isolate()->heap()->non_strict_arguments_elements_map()) {
- stub = non_strict_arguments_stub();
+ isolate()->heap()->sloppy_arguments_elements_map()) {
+ stub = sloppy_arguments_stub();
} else if (receiver->HasIndexedInterceptor()) {
stub = indexed_interceptor_stub();
- } else if (!key->ToSmi()->IsFailure() &&
- (!target().is_identical_to(non_strict_arguments_stub()))) {
+ } else if (!Object::ToSmi(isolate(), key).is_null() &&
+ (!target().is_identical_to(sloppy_arguments_stub()))) {
stub = LoadElementStub(receiver);
}
}
}
if (!is_target_set()) {
- if (*stub == *generic_stub()) {
+ Code* generic = *generic_stub();
+ if (*stub == generic) {
TRACE_GENERIC_IC(isolate(), "KeyedLoadIC", "set generic");
}
- ASSERT(!stub.is_null());
set_target(*stub);
TRACE_IC("LoadIC", key);
}
- if (maybe_object != NULL) return maybe_object;
- return Runtime::GetObjectPropertyOrFail(isolate(), object, key);
+ if (!load_handle.is_null()) return load_handle;
+ Handle<Object> result;
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate(),
+ result,
+ Runtime::GetObjectProperty(isolate(), object, key),
+ Object);
+ return result;
}
@@ -1438,24 +1196,22 @@ static bool LookupForWrite(Handle<JSObject> receiver,
LookupResult* lookup,
IC* ic) {
Handle<JSObject> holder = receiver;
- receiver->Lookup(*name, lookup);
+ receiver->Lookup(name, lookup);
if (lookup->IsFound()) {
- if (lookup->IsReadOnly() || !lookup->IsCacheable()) return false;
-
- if (lookup->holder() == *receiver) {
- if (lookup->IsInterceptor() && !HasInterceptorSetter(*receiver)) {
- receiver->LocalLookupRealNamedProperty(*name, lookup);
- return lookup->IsFound() &&
- !lookup->IsReadOnly() &&
- lookup->CanHoldValue(value) &&
- lookup->IsCacheable();
- }
- return lookup->CanHoldValue(value);
+ if (lookup->IsInterceptor() && !HasInterceptorSetter(lookup->holder())) {
+ receiver->LookupOwnRealNamedProperty(name, lookup);
+ if (!lookup->IsFound()) return false;
}
+ if (lookup->IsReadOnly() || !lookup->IsCacheable()) return false;
+ if (lookup->holder() == *receiver) return lookup->CanHoldValue(value);
if (lookup->IsPropertyCallbacks()) return true;
- // JSGlobalProxy always goes via the runtime, so it's safe to cache.
- if (receiver->IsJSGlobalProxy()) return true;
+ // JSGlobalProxy either stores on the global object in the prototype, or
+ // goes into the runtime if access checks are needed, so this is always
+ // safe.
+ if (receiver->IsJSGlobalProxy()) {
+ return lookup->holder() == receiver->GetPrototype();
+ }
// Currently normal holders in the prototype chain are not supported. They
// would require a runtime positive lookup and verification that the details
// have not changed.
@@ -1469,9 +1225,7 @@ static bool LookupForWrite(Handle<JSObject> receiver,
// chain check. This avoids a double lookup, but requires us to pass in the
// receiver when trying to fetch extra information from the transition.
receiver->map()->LookupTransition(*holder, *name, lookup);
- if (!lookup->IsTransition()) return false;
- PropertyDetails target_details = lookup->GetTransitionDetails();
- if (target_details.IsReadOnly()) return false;
+ if (!lookup->IsTransition() || lookup->IsReadOnly()) return false;
// If the value that's being stored does not fit in the field that the
// instance would transition to, create a new transition that fits the value.
@@ -1480,30 +1234,38 @@ static bool LookupForWrite(Handle<JSObject> receiver,
// Ensure the instance and its map were migrated before trying to update the
// transition target.
ASSERT(!receiver->map()->is_deprecated());
- if (!value->FitsRepresentation(target_details.representation())) {
+ if (!lookup->CanHoldValue(value)) {
Handle<Map> target(lookup->GetTransitionTarget());
+ Representation field_representation = value->OptimalRepresentation();
+ Handle<HeapType> field_type = value->OptimalType(
+ lookup->isolate(), field_representation);
Map::GeneralizeRepresentation(
target, target->LastAdded(),
- value->OptimalRepresentation(), FORCE_FIELD);
+ field_representation, field_type, FORCE_FIELD);
// Lookup the transition again since the transition tree may have changed
// entirely by the migration above.
receiver->map()->LookupTransition(*holder, *name, lookup);
if (!lookup->IsTransition()) return false;
- ic->MarkMonomorphicPrototypeFailure();
+ return ic->TryMarkMonomorphicPrototypeFailure(name);
}
+
return true;
}
-MaybeObject* StoreIC::Store(Handle<Object> object,
- Handle<String> name,
- Handle<Object> value,
- JSReceiver::StoreFromKeyed store_mode) {
+MaybeHandle<Object> StoreIC::Store(Handle<Object> object,
+ Handle<String> name,
+ Handle<Object> value,
+ JSReceiver::StoreFromKeyed store_mode) {
if (MigrateDeprecated(object) || object->IsJSProxy()) {
- Handle<Object> result = JSReceiver::SetProperty(
- Handle<JSReceiver>::cast(object), name, value, NONE, strict_mode());
- RETURN_IF_EMPTY_HANDLE(isolate(), result);
- return *result;
+ Handle<JSReceiver> receiver = Handle<JSReceiver>::cast(object);
+ Handle<Object> result;
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate(),
+ result,
+ JSReceiver::SetProperty(receiver, name, value, NONE, strict_mode()),
+ Object);
+ return result;
}
// If the object is undefined or null it's illegal to try to set any
@@ -1513,60 +1275,47 @@ MaybeObject* StoreIC::Store(Handle<Object> object,
}
// The length property of string values is read-only. Throw in strict mode.
- if (strict_mode() == kStrictMode && object->IsString() &&
- name->Equals(isolate()->heap()->length_string())) {
+ if (strict_mode() == STRICT && object->IsString() &&
+ String::Equals(isolate()->factory()->length_string(), name)) {
return TypeError("strict_read_only_property", object, name);
}
// Ignore other stores where the receiver is not a JSObject.
// TODO(1475): Must check prototype chains of object wrappers.
- if (!object->IsJSObject()) return *value;
+ if (!object->IsJSObject()) return value;
Handle<JSObject> receiver = Handle<JSObject>::cast(object);
// Check if the given name is an array index.
uint32_t index;
if (name->AsArrayIndex(&index)) {
- Handle<Object> result =
- JSObject::SetElement(receiver, index, value, NONE, strict_mode());
- RETURN_IF_EMPTY_HANDLE(isolate(), result);
- return *value;
+ Handle<Object> result;
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate(),
+ result,
+ JSObject::SetElement(receiver, index, value, NONE, strict_mode()),
+ Object);
+ return value;
}
// Observed objects are always modified through the runtime.
- if (FLAG_harmony_observation && receiver->map()->is_observed()) {
- Handle<Object> result = JSReceiver::SetProperty(
- receiver, name, value, NONE, strict_mode(), store_mode);
- RETURN_IF_EMPTY_HANDLE(isolate(), result);
- return *result;
- }
-
- // Use specialized code for setting the length of arrays with fast
- // properties. Slow properties might indicate redefinition of the length
- // property. Note that when redefined using Object.freeze, it's possible
- // to have fast properties but a read-only length.
- if (FLAG_use_ic &&
- receiver->IsJSArray() &&
- name->Equals(isolate()->heap()->length_string()) &&
- Handle<JSArray>::cast(receiver)->AllowsSetElementsLength() &&
- receiver->HasFastProperties() &&
- !receiver->map()->is_frozen()) {
- Handle<Code> stub =
- StoreArrayLengthStub(kind(), strict_mode()).GetCode(isolate());
- set_target(*stub);
- TRACE_IC("StoreIC", name);
- Handle<Object> result = JSReceiver::SetProperty(
- receiver, name, value, NONE, strict_mode(), store_mode);
- RETURN_IF_EMPTY_HANDLE(isolate(), result);
- return *result;
+ if (receiver->map()->is_observed()) {
+ Handle<Object> result;
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate(),
+ result,
+ JSReceiver::SetProperty(
+ receiver, name, value, NONE, strict_mode(), store_mode),
+ Object);
+ return result;
}
LookupResult lookup(isolate());
bool can_store = LookupForWrite(receiver, name, value, &lookup, this);
if (!can_store &&
- strict_mode() == kStrictMode &&
+ strict_mode() == STRICT &&
!(lookup.IsProperty() && lookup.IsReadOnly()) &&
- IsUndeclaredGlobal(object)) {
+ object->IsGlobalObject()) {
// Strict mode doesn't allow setting non-existent global property.
return ReferenceError("not_defined", name);
}
@@ -1577,8 +1326,7 @@ MaybeObject* StoreIC::Store(Handle<Object> object,
TRACE_IC("StoreIC", name);
} else if (can_store) {
UpdateCaches(&lookup, receiver, name, value);
- } else if (!name->IsCacheable(isolate()) ||
- lookup.IsNormal() ||
+ } else if (lookup.IsNormal() ||
(lookup.IsField() && lookup.CanHoldValue(value))) {
Handle<Code> stub = generic_stub();
set_target(*stub);
@@ -1586,10 +1334,57 @@ MaybeObject* StoreIC::Store(Handle<Object> object,
}
// Set the property.
- Handle<Object> result = JSReceiver::SetProperty(
- receiver, name, value, NONE, strict_mode(), store_mode);
- RETURN_IF_EMPTY_HANDLE(isolate(), result);
- return *result;
+ Handle<Object> result;
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate(),
+ result,
+ JSReceiver::SetProperty(
+ receiver, name, value, NONE, strict_mode(), store_mode),
+ Object);
+ return result;
+}
+
+
+void CallIC::State::Print(StringStream* stream) const {
+ stream->Add("(args(%d), ",
+ argc_);
+ stream->Add("%s, ",
+ call_type_ == CallIC::METHOD ? "METHOD" : "FUNCTION");
+}
+
+
+Handle<Code> CallIC::initialize_stub(Isolate* isolate,
+ int argc,
+ CallType call_type) {
+ CallICStub stub(isolate, State(argc, call_type));
+ Handle<Code> code = stub.GetCode();
+ return code;
+}
+
+
+Handle<Code> StoreIC::initialize_stub(Isolate* isolate,
+ StrictMode strict_mode) {
+ ExtraICState extra_state = ComputeExtraICState(strict_mode);
+ Handle<Code> ic = isolate->stub_cache()->ComputeStore(
+ UNINITIALIZED, extra_state);
+ return ic;
+}
+
+
+Handle<Code> StoreIC::megamorphic_stub() {
+ return isolate()->stub_cache()->ComputeStore(MEGAMORPHIC, extra_ic_state());
+}
+
+
+Handle<Code> StoreIC::generic_stub() const {
+ return isolate()->stub_cache()->ComputeStore(GENERIC, extra_ic_state());
+}
+
+
+Handle<Code> StoreIC::pre_monomorphic_stub(Isolate* isolate,
+ StrictMode strict_mode) {
+ ExtraICState state = ComputeExtraICState(strict_mode);
+ return isolate->stub_cache()->ComputeStore(PREMONOMORPHIC, state);
}
@@ -1614,89 +1409,88 @@ Handle<Code> StoreIC::CompileHandler(LookupResult* lookup,
Handle<String> name,
Handle<Object> value,
InlineCacheHolderFlag cache_holder) {
- if (object->IsJSGlobalProxy()) return slow_stub();
+ if (object->IsAccessCheckNeeded()) return slow_stub();
ASSERT(cache_holder == OWN_MAP);
// This is currently guaranteed by checks in StoreIC::Store.
Handle<JSObject> receiver = Handle<JSObject>::cast(object);
Handle<JSObject> holder(lookup->holder());
// Handlers do not use strict mode.
- StoreStubCompiler compiler(isolate(), kNonStrictMode, kind());
- switch (lookup->type()) {
- case FIELD:
- return compiler.CompileStoreField(receiver, lookup, name);
- case TRANSITION: {
- // Explicitly pass in the receiver map since LookupForWrite may have
- // stored something else than the receiver in the holder.
- Handle<Map> transition(lookup->GetTransitionTarget());
- PropertyDetails details = transition->GetLastDescriptorDetails();
-
- if (details.type() == CALLBACKS || details.attributes() != NONE) break;
-
+ StoreStubCompiler compiler(isolate(), SLOPPY, kind());
+ if (lookup->IsTransition()) {
+ // Explicitly pass in the receiver map since LookupForWrite may have
+ // stored something else than the receiver in the holder.
+ Handle<Map> transition(lookup->GetTransitionTarget());
+ PropertyDetails details = lookup->GetPropertyDetails();
+
+ if (details.type() != CALLBACKS && details.attributes() == NONE) {
return compiler.CompileStoreTransition(
receiver, lookup, transition, name);
}
- case NORMAL:
- if (kind() == Code::KEYED_STORE_IC) break;
- if (receiver->IsGlobalObject()) {
- // The stub generated for the global object picks the value directly
- // from the property cell. So the property must be directly on the
- // global object.
- Handle<GlobalObject> global = Handle<GlobalObject>::cast(receiver);
- Handle<PropertyCell> cell(global->GetPropertyCell(lookup), isolate());
- Handle<Type> union_type = PropertyCell::UpdatedType(cell, value);
- StoreGlobalStub stub(union_type->IsConstant());
-
- Handle<Code> code = stub.GetCodeCopyFromTemplate(
- isolate(), receiver->map(), *cell);
- // TODO(verwaest): Move caching of these NORMAL stubs outside as well.
- HeapObject::UpdateMapCodeCache(receiver, name, code);
- return code;
- }
- ASSERT(holder.is_identical_to(receiver));
- return isolate()->builtins()->StoreIC_Normal();
- case CALLBACKS: {
- if (kind() == Code::KEYED_STORE_IC) break;
- Handle<Object> callback(lookup->GetCallbackObject(), isolate());
- if (callback->IsExecutableAccessorInfo()) {
- Handle<ExecutableAccessorInfo> info =
- Handle<ExecutableAccessorInfo>::cast(callback);
- if (v8::ToCData<Address>(info->setter()) == 0) break;
- if (!holder->HasFastProperties()) break;
- if (!info->IsCompatibleReceiver(*receiver)) break;
- return compiler.CompileStoreCallback(receiver, holder, name, info);
- } else if (callback->IsAccessorPair()) {
- Handle<Object> setter(
- Handle<AccessorPair>::cast(callback)->setter(), isolate());
- if (!setter->IsJSFunction()) break;
- if (holder->IsGlobalObject()) break;
- if (!holder->HasFastProperties()) break;
- Handle<JSFunction> function = Handle<JSFunction>::cast(setter);
- CallOptimization call_optimization(function);
- if (call_optimization.is_simple_api_call() &&
- call_optimization.IsCompatibleReceiver(*receiver)) {
- return compiler.CompileStoreCallback(
- receiver, holder, name, call_optimization);
+ } else {
+ switch (lookup->type()) {
+ case FIELD:
+ return compiler.CompileStoreField(receiver, lookup, name);
+ case NORMAL:
+ if (kind() == Code::KEYED_STORE_IC) break;
+ if (receiver->IsJSGlobalProxy() || receiver->IsGlobalObject()) {
+ // The stub generated for the global object picks the value directly
+ // from the property cell. So the property must be directly on the
+ // global object.
+ Handle<GlobalObject> global = receiver->IsJSGlobalProxy()
+ ? handle(GlobalObject::cast(receiver->GetPrototype()))
+ : Handle<GlobalObject>::cast(receiver);
+ Handle<PropertyCell> cell(global->GetPropertyCell(lookup), isolate());
+ Handle<HeapType> union_type = PropertyCell::UpdatedType(cell, value);
+ StoreGlobalStub stub(
+ isolate(), union_type->IsConstant(), receiver->IsJSGlobalProxy());
+ Handle<Code> code = stub.GetCodeCopyFromTemplate(global, cell);
+ // TODO(verwaest): Move caching of these NORMAL stubs outside as well.
+ HeapObject::UpdateMapCodeCache(receiver, name, code);
+ return code;
+ }
+ ASSERT(holder.is_identical_to(receiver));
+ return isolate()->builtins()->StoreIC_Normal();
+ case CALLBACKS: {
+ Handle<Object> callback(lookup->GetCallbackObject(), isolate());
+ if (callback->IsExecutableAccessorInfo()) {
+ Handle<ExecutableAccessorInfo> info =
+ Handle<ExecutableAccessorInfo>::cast(callback);
+ if (v8::ToCData<Address>(info->setter()) == 0) break;
+ if (!holder->HasFastProperties()) break;
+ if (!info->IsCompatibleReceiver(*receiver)) break;
+ return compiler.CompileStoreCallback(receiver, holder, name, info);
+ } else if (callback->IsAccessorPair()) {
+ Handle<Object> setter(
+ Handle<AccessorPair>::cast(callback)->setter(), isolate());
+ if (!setter->IsJSFunction()) break;
+ if (holder->IsGlobalObject()) break;
+ if (!holder->HasFastProperties()) break;
+ Handle<JSFunction> function = Handle<JSFunction>::cast(setter);
+ CallOptimization call_optimization(function);
+ if (call_optimization.is_simple_api_call() &&
+ call_optimization.IsCompatibleReceiver(receiver, holder)) {
+ return compiler.CompileStoreCallback(
+ receiver, holder, name, call_optimization);
+ }
+ return compiler.CompileStoreViaSetter(
+ receiver, holder, name, Handle<JSFunction>::cast(setter));
}
- return compiler.CompileStoreViaSetter(
- receiver, holder, name, Handle<JSFunction>::cast(setter));
+ // TODO(dcarney): Handle correctly.
+ ASSERT(callback->IsDeclaredAccessorInfo());
+ break;
}
- // TODO(dcarney): Handle correctly.
- if (callback->IsDeclaredAccessorInfo()) break;
- ASSERT(callback->IsForeign());
- // No IC support for old-style native accessors.
- break;
+ case INTERCEPTOR:
+ if (kind() == Code::KEYED_STORE_IC) break;
+ ASSERT(HasInterceptorSetter(*holder));
+ return compiler.CompileStoreInterceptor(receiver, name);
+ case CONSTANT:
+ break;
+ case NONEXISTENT:
+ case HANDLER:
+ UNREACHABLE();
+ break;
}
- case INTERCEPTOR:
- if (kind() == Code::KEYED_STORE_IC) break;
- ASSERT(HasInterceptorSetter(*receiver));
- return compiler.CompileStoreInterceptor(receiver, name);
- case CONSTANT:
- break;
- case NONEXISTENT:
- case HANDLER:
- UNREACHABLE();
- break;
}
return slow_stub();
}
@@ -1713,24 +1507,16 @@ Handle<Code> KeyedStoreIC::StoreElementStub(Handle<JSObject> receiver,
}
Handle<Map> receiver_map(receiver->map(), isolate());
- if (state() == UNINITIALIZED || state() == PREMONOMORPHIC) {
- // Optimistically assume that ICs that haven't reached the MONOMORPHIC state
- // yet will do so and stay there.
- Handle<Map> monomorphic_map = ComputeTransitionedMap(receiver, store_mode);
+ MapHandleList target_receiver_maps;
+ TargetMaps(&target_receiver_maps);
+ if (target_receiver_maps.length() == 0) {
+ Handle<Map> monomorphic_map =
+ ComputeTransitionedMap(receiver_map, store_mode);
store_mode = GetNonTransitioningStoreMode(store_mode);
return isolate()->stub_cache()->ComputeKeyedStoreElement(
monomorphic_map, strict_mode(), store_mode);
}
- MapHandleList target_receiver_maps;
- target()->FindAllMaps(&target_receiver_maps);
- if (target_receiver_maps.length() == 0) {
- // In the case that there is a non-map-specific IC is installed (e.g. keyed
- // stores into properties in dictionary mode), then there will be not
- // receiver maps in the target.
- return generic_stub();
- }
-
// There are several special cases where an IC that is MONOMORPHIC can still
// transition to a different GetNonTransitioningStoreMode IC that handles a
// superset of the original IC. Handle those here if the receiver map hasn't
@@ -1739,16 +1525,18 @@ Handle<Code> KeyedStoreIC::StoreElementStub(Handle<JSObject> receiver,
KeyedStoreIC::GetKeyedAccessStoreMode(target()->extra_ic_state());
Handle<Map> previous_receiver_map = target_receiver_maps.at(0);
if (state() == MONOMORPHIC) {
- // If the "old" and "new" maps are in the same elements map family, stay
- // MONOMORPHIC and use the map for the most generic ElementsKind.
Handle<Map> transitioned_receiver_map = receiver_map;
if (IsTransitionStoreMode(store_mode)) {
transitioned_receiver_map =
- ComputeTransitionedMap(receiver, store_mode);
+ ComputeTransitionedMap(receiver_map, store_mode);
}
- if (receiver_map.is_identical_to(previous_receiver_map) ||
- IsTransitionOfMonomorphicTarget(MapToType(transitioned_receiver_map))) {
- // Element family is the same, use the "worst" case map.
+ if ((receiver_map.is_identical_to(previous_receiver_map) &&
+ IsTransitionStoreMode(store_mode)) ||
+ IsTransitionOfMonomorphicTarget(*previous_receiver_map,
+ *transitioned_receiver_map)) {
+ // If the "old" and "new" maps are in the same elements map family, or
+ // if they at least come from the same origin for a transitioning store,
+ // stay MONOMORPHIC and use the map for the most generic ElementsKind.
store_mode = GetNonTransitioningStoreMode(store_mode);
return isolate()->stub_cache()->ComputeKeyedStoreElement(
transitioned_receiver_map, strict_mode(), store_mode);
@@ -1772,7 +1560,7 @@ Handle<Code> KeyedStoreIC::StoreElementStub(Handle<JSObject> receiver,
if (IsTransitionStoreMode(store_mode)) {
Handle<Map> transitioned_receiver_map =
- ComputeTransitionedMap(receiver, store_mode);
+ ComputeTransitionedMap(receiver_map, store_mode);
map_added |= AddOneReceiverMapIfMissing(&target_receiver_maps,
transitioned_receiver_map);
}
@@ -1809,7 +1597,8 @@ Handle<Code> KeyedStoreIC::StoreElementStub(Handle<JSObject> receiver,
if (store_mode != STANDARD_STORE) {
int external_arrays = 0;
for (int i = 0; i < target_receiver_maps.length(); ++i) {
- if (target_receiver_maps[i]->has_external_array_elements()) {
+ if (target_receiver_maps[i]->has_external_array_elements() ||
+ target_receiver_maps[i]->has_fixed_typed_array_elements()) {
external_arrays++;
}
}
@@ -1827,36 +1616,35 @@ Handle<Code> KeyedStoreIC::StoreElementStub(Handle<JSObject> receiver,
Handle<Map> KeyedStoreIC::ComputeTransitionedMap(
- Handle<JSObject> receiver,
+ Handle<Map> map,
KeyedAccessStoreMode store_mode) {
switch (store_mode) {
case STORE_TRANSITION_SMI_TO_OBJECT:
case STORE_TRANSITION_DOUBLE_TO_OBJECT:
case STORE_AND_GROW_TRANSITION_SMI_TO_OBJECT:
case STORE_AND_GROW_TRANSITION_DOUBLE_TO_OBJECT:
- return JSObject::GetElementsTransitionMap(receiver, FAST_ELEMENTS);
+ return Map::TransitionElementsTo(map, FAST_ELEMENTS);
case STORE_TRANSITION_SMI_TO_DOUBLE:
case STORE_AND_GROW_TRANSITION_SMI_TO_DOUBLE:
- return JSObject::GetElementsTransitionMap(receiver, FAST_DOUBLE_ELEMENTS);
+ return Map::TransitionElementsTo(map, FAST_DOUBLE_ELEMENTS);
case STORE_TRANSITION_HOLEY_SMI_TO_OBJECT:
case STORE_TRANSITION_HOLEY_DOUBLE_TO_OBJECT:
case STORE_AND_GROW_TRANSITION_HOLEY_SMI_TO_OBJECT:
case STORE_AND_GROW_TRANSITION_HOLEY_DOUBLE_TO_OBJECT:
- return JSObject::GetElementsTransitionMap(receiver,
- FAST_HOLEY_ELEMENTS);
+ return Map::TransitionElementsTo(map, FAST_HOLEY_ELEMENTS);
case STORE_TRANSITION_HOLEY_SMI_TO_DOUBLE:
case STORE_AND_GROW_TRANSITION_HOLEY_SMI_TO_DOUBLE:
- return JSObject::GetElementsTransitionMap(receiver,
- FAST_HOLEY_DOUBLE_ELEMENTS);
+ return Map::TransitionElementsTo(map, FAST_HOLEY_DOUBLE_ELEMENTS);
case STORE_NO_TRANSITION_IGNORE_OUT_OF_BOUNDS:
- ASSERT(receiver->map()->has_external_array_elements());
+ ASSERT(map->has_external_array_elements());
// Fall through
case STORE_NO_TRANSITION_HANDLE_COW:
case STANDARD_STORE:
case STORE_AND_GROW_NO_TRANSITION:
- return Handle<Map>(receiver->map(), isolate());
+ return map;
}
- return Handle<Map>::null();
+ UNREACHABLE();
+ return MaybeHandle<Map>().ToHandleChecked();
}
@@ -1873,12 +1661,13 @@ bool IsOutOfBoundsAccess(Handle<JSObject> receiver,
KeyedAccessStoreMode KeyedStoreIC::GetStoreMode(Handle<JSObject> receiver,
Handle<Object> key,
Handle<Object> value) {
- ASSERT(!key->ToSmi()->IsFailure());
- Smi* smi_key = NULL;
- key->ToSmi()->To(&smi_key);
+ Handle<Smi> smi_key = Object::ToSmi(isolate(), key).ToHandleChecked();
int index = smi_key->value();
bool oob_access = IsOutOfBoundsAccess(receiver, index);
- bool allow_growth = receiver->IsJSArray() && oob_access;
+ // Don't consider this a growing store if the store would send the receiver to
+ // dictionary mode.
+ bool allow_growth = receiver->IsJSArray() && oob_access &&
+ !receiver->WouldConvertToSlowElements(key);
if (allow_growth) {
// Handle growing array in stub if necessary.
if (receiver->HasFastSmiElements()) {
@@ -1945,35 +1734,42 @@ KeyedAccessStoreMode KeyedStoreIC::GetStoreMode(Handle<JSObject> receiver,
}
-MaybeObject* KeyedStoreIC::Store(Handle<Object> object,
- Handle<Object> key,
- Handle<Object> value) {
+MaybeHandle<Object> KeyedStoreIC::Store(Handle<Object> object,
+ Handle<Object> key,
+ Handle<Object> value) {
if (MigrateDeprecated(object)) {
- Handle<Object> result = Runtime::SetObjectProperty(isolate(), object,
- key,
- value,
- NONE,
- strict_mode());
- RETURN_IF_EMPTY_HANDLE(isolate(), result);
- return *result;
- }
-
- // Check for values that can be converted into an internalized string directly
- // or is representable as a smi.
+ Handle<Object> result;
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate(),
+ result,
+ Runtime::SetObjectProperty(
+ isolate(), object, key, value, NONE, strict_mode()),
+ Object);
+ return result;
+ }
+
+ // Check for non-string values that can be converted into an
+ // internalized string directly or is representable as a smi.
key = TryConvertKey(key, isolate());
- MaybeObject* maybe_object = NULL;
+ Handle<Object> store_handle;
Handle<Code> stub = generic_stub();
if (key->IsInternalizedString()) {
- maybe_object = StoreIC::Store(object,
- Handle<String>::cast(key),
- value,
- JSReceiver::MAY_BE_STORE_FROM_KEYED);
- if (maybe_object->IsFailure()) return maybe_object;
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate(),
+ store_handle,
+ StoreIC::Store(object,
+ Handle<String>::cast(key),
+ value,
+ JSReceiver::MAY_BE_STORE_FROM_KEYED),
+ Object);
} else {
- bool use_ic = FLAG_use_ic && !object->IsAccessCheckNeeded() &&
- !(FLAG_harmony_observation && object->IsJSObject() &&
+ bool use_ic = FLAG_use_ic &&
+ !object->IsStringWrapper() &&
+ !object->IsAccessCheckNeeded() &&
+ !object->IsJSGlobalProxy() &&
+ !(object->IsJSObject() &&
JSObject::cast(*object)->map()->is_observed());
if (use_ic && !object->IsSmi()) {
// Don't use ICs for maps of the objects in Array's prototype chain. We
@@ -1984,16 +1780,18 @@ MaybeObject* KeyedStoreIC::Store(Handle<Object> object,
}
if (use_ic) {
- ASSERT(!object->IsJSGlobalProxy());
+ ASSERT(!object->IsAccessCheckNeeded());
if (object->IsJSObject()) {
Handle<JSObject> receiver = Handle<JSObject>::cast(object);
- bool key_is_smi_like = key->IsSmi() || !key->ToSmi()->IsFailure();
+ bool key_is_smi_like = !Object::ToSmi(isolate(), key).is_null();
if (receiver->elements()->map() ==
- isolate()->heap()->non_strict_arguments_elements_map()) {
- stub = non_strict_arguments_stub();
+ isolate()->heap()->sloppy_arguments_elements_map()) {
+ if (strict_mode() == SLOPPY) {
+ stub = sloppy_arguments_stub();
+ }
} else if (key_is_smi_like &&
- !(target().is_identical_to(non_strict_arguments_stub()))) {
+ !(target().is_identical_to(sloppy_arguments_stub()))) {
// We should go generic if receiver isn't a dictionary, but our
// prototype chain does have dictionary elements. This ensures that
// other non-dictionary receivers in the polymorphic case benefit
@@ -2008,8 +1806,18 @@ MaybeObject* KeyedStoreIC::Store(Handle<Object> object,
}
}
+ if (store_handle.is_null()) {
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate(),
+ store_handle,
+ Runtime::SetObjectProperty(
+ isolate(), object, key, value, NONE, strict_mode()),
+ Object);
+ }
+
if (!is_target_set()) {
- if (*stub == *generic_stub()) {
+ Code* generic = *generic_stub();
+ if (*stub == generic) {
TRACE_GENERIC_IC(isolate(), "KeyedStoreIC", "set generic");
}
ASSERT(!stub.is_null());
@@ -2017,13 +1825,100 @@ MaybeObject* KeyedStoreIC::Store(Handle<Object> object,
TRACE_IC("StoreIC", key);
}
- if (maybe_object) return maybe_object;
- Handle<Object> result = Runtime::SetObjectProperty(isolate(), object, key,
- value,
- NONE,
- strict_mode());
- RETURN_IF_EMPTY_HANDLE(isolate(), result);
- return *result;
+ return store_handle;
+}
+
+
+CallIC::State::State(ExtraICState extra_ic_state)
+ : argc_(ArgcBits::decode(extra_ic_state)),
+ call_type_(CallTypeBits::decode(extra_ic_state)) {
+}
+
+
+ExtraICState CallIC::State::GetExtraICState() const {
+ ExtraICState extra_ic_state =
+ ArgcBits::encode(argc_) |
+ CallTypeBits::encode(call_type_);
+ return extra_ic_state;
+}
+
+
+bool CallIC::DoCustomHandler(Handle<Object> receiver,
+ Handle<Object> function,
+ Handle<FixedArray> vector,
+ Handle<Smi> slot,
+ const State& state) {
+ ASSERT(FLAG_use_ic && function->IsJSFunction());
+
+ // Are we the array function?
+ Handle<JSFunction> array_function = Handle<JSFunction>(
+ isolate()->context()->native_context()->array_function(), isolate());
+ if (array_function.is_identical_to(Handle<JSFunction>::cast(function))) {
+ // Alter the slot.
+ Handle<AllocationSite> new_site = isolate()->factory()->NewAllocationSite();
+ vector->set(slot->value(), *new_site);
+ CallIC_ArrayStub stub(isolate(), state);
+ set_target(*stub.GetCode());
+ Handle<String> name;
+ if (array_function->shared()->name()->IsString()) {
+ name = Handle<String>(String::cast(array_function->shared()->name()),
+ isolate());
+ }
+
+ TRACE_IC("CallIC (Array call)", name);
+ return true;
+ }
+ return false;
+}
+
+
+void CallIC::PatchMegamorphic(Handle<FixedArray> vector,
+ Handle<Smi> slot) {
+ State state(target()->extra_ic_state());
+
+ // We are going generic.
+ vector->set(slot->value(),
+ *TypeFeedbackInfo::MegamorphicSentinel(isolate()),
+ SKIP_WRITE_BARRIER);
+
+ CallICStub stub(isolate(), state);
+ Handle<Code> code = stub.GetCode();
+ set_target(*code);
+
+ TRACE_GENERIC_IC(isolate(), "CallIC", "megamorphic");
+}
+
+
+void CallIC::HandleMiss(Handle<Object> receiver,
+ Handle<Object> function,
+ Handle<FixedArray> vector,
+ Handle<Smi> slot) {
+ State state(target()->extra_ic_state());
+ Object* feedback = vector->get(slot->value());
+
+ if (feedback->IsJSFunction() || !function->IsJSFunction()) {
+ // We are going generic.
+ vector->set(slot->value(),
+ *TypeFeedbackInfo::MegamorphicSentinel(isolate()),
+ SKIP_WRITE_BARRIER);
+
+ TRACE_GENERIC_IC(isolate(), "CallIC", "megamorphic");
+ } else {
+ // If we came here feedback must be the uninitialized sentinel,
+ // and we are going monomorphic.
+ ASSERT(feedback == *TypeFeedbackInfo::UninitializedSentinel(isolate()));
+
+ // Do we want to install a custom handler?
+ if (FLAG_use_ic &&
+ DoCustomHandler(receiver, function, vector, slot, state)) {
+ return;
+ }
+
+ Handle<JSFunction> js_function = Handle<JSFunction>::cast(function);
+ Handle<Object> name(js_function->shared()->name(), isolate());
+ TRACE_IC("CallIC", name);
+ vector->set(slot->value(), *function);
+ }
}
@@ -2035,137 +1930,128 @@ MaybeObject* KeyedStoreIC::Store(Handle<Object> object,
//
// Used from ic-<arch>.cc.
-RUNTIME_FUNCTION(MaybeObject*, CallIC_Miss) {
+RUNTIME_FUNCTION(CallIC_Miss) {
+ Logger::TimerEventScope timer(
+ isolate, Logger::TimerEventScope::v8_ic_miss);
HandleScope scope(isolate);
- ASSERT(args.length() == 2);
+ ASSERT(args.length() == 4);
CallIC ic(isolate);
Handle<Object> receiver = args.at<Object>(0);
- Handle<String> key = args.at<String>(1);
- ic.UpdateState(receiver, key);
- MaybeObject* maybe_result = ic.LoadFunction(receiver, key);
- JSFunction* raw_function;
- if (!maybe_result->To(&raw_function)) return maybe_result;
-
- // The first time the inline cache is updated may be the first time the
- // function it references gets called. If the function is lazily compiled
- // then the first call will trigger a compilation. We check for this case
- // and we do the compilation immediately, instead of waiting for the stub
- // currently attached to the JSFunction object to trigger compilation.
- if (raw_function->is_compiled()) return raw_function;
-
- Handle<JSFunction> function(raw_function);
- JSFunction::CompileLazy(function, CLEAR_EXCEPTION);
+ Handle<Object> function = args.at<Object>(1);
+ Handle<FixedArray> vector = args.at<FixedArray>(2);
+ Handle<Smi> slot = args.at<Smi>(3);
+ ic.HandleMiss(receiver, function, vector, slot);
return *function;
}
-// Used from ic-<arch>.cc.
-RUNTIME_FUNCTION(MaybeObject*, KeyedCallIC_Miss) {
+RUNTIME_FUNCTION(CallIC_Customization_Miss) {
+ Logger::TimerEventScope timer(
+ isolate, Logger::TimerEventScope::v8_ic_miss);
HandleScope scope(isolate);
- ASSERT(args.length() == 2);
- KeyedCallIC ic(isolate);
- Handle<Object> receiver = args.at<Object>(0);
- Handle<Object> key = args.at<Object>(1);
- ic.UpdateState(receiver, key);
- MaybeObject* maybe_result = ic.LoadFunction(receiver, key);
- // Result could be a function or a failure.
- JSFunction* raw_function = NULL;
- if (!maybe_result->To(&raw_function)) return maybe_result;
-
- if (raw_function->is_compiled()) return raw_function;
-
- Handle<JSFunction> function(raw_function, isolate);
- JSFunction::CompileLazy(function, CLEAR_EXCEPTION);
+ ASSERT(args.length() == 4);
+ // A miss on a custom call ic always results in going megamorphic.
+ CallIC ic(isolate);
+ Handle<Object> function = args.at<Object>(1);
+ Handle<FixedArray> vector = args.at<FixedArray>(2);
+ Handle<Smi> slot = args.at<Smi>(3);
+ ic.PatchMegamorphic(vector, slot);
return *function;
}
// Used from ic-<arch>.cc.
-RUNTIME_FUNCTION(MaybeObject*, LoadIC_Miss) {
+RUNTIME_FUNCTION(LoadIC_Miss) {
+ Logger::TimerEventScope timer(
+ isolate, Logger::TimerEventScope::v8_ic_miss);
HandleScope scope(isolate);
ASSERT(args.length() == 2);
LoadIC ic(IC::NO_EXTRA_FRAME, isolate);
Handle<Object> receiver = args.at<Object>(0);
Handle<String> key = args.at<String>(1);
ic.UpdateState(receiver, key);
- return ic.Load(receiver, key);
+ Handle<Object> result;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, result, ic.Load(receiver, key));
+ return *result;
}
// Used from ic-<arch>.cc
-RUNTIME_FUNCTION(MaybeObject*, KeyedLoadIC_Miss) {
+RUNTIME_FUNCTION(KeyedLoadIC_Miss) {
+ Logger::TimerEventScope timer(
+ isolate, Logger::TimerEventScope::v8_ic_miss);
HandleScope scope(isolate);
ASSERT(args.length() == 2);
KeyedLoadIC ic(IC::NO_EXTRA_FRAME, isolate);
Handle<Object> receiver = args.at<Object>(0);
Handle<Object> key = args.at<Object>(1);
ic.UpdateState(receiver, key);
- return ic.Load(receiver, key);
+ Handle<Object> result;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, result, ic.Load(receiver, key));
+ return *result;
}
-RUNTIME_FUNCTION(MaybeObject*, KeyedLoadIC_MissFromStubFailure) {
+RUNTIME_FUNCTION(KeyedLoadIC_MissFromStubFailure) {
+ Logger::TimerEventScope timer(
+ isolate, Logger::TimerEventScope::v8_ic_miss);
HandleScope scope(isolate);
ASSERT(args.length() == 2);
KeyedLoadIC ic(IC::EXTRA_CALL_FRAME, isolate);
Handle<Object> receiver = args.at<Object>(0);
Handle<Object> key = args.at<Object>(1);
ic.UpdateState(receiver, key);
- return ic.Load(receiver, key);
+ Handle<Object> result;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, result, ic.Load(receiver, key));
+ return *result;
}
// Used from ic-<arch>.cc.
-RUNTIME_FUNCTION(MaybeObject*, StoreIC_Miss) {
+RUNTIME_FUNCTION(StoreIC_Miss) {
+ Logger::TimerEventScope timer(
+ isolate, Logger::TimerEventScope::v8_ic_miss);
HandleScope scope(isolate);
ASSERT(args.length() == 3);
StoreIC ic(IC::NO_EXTRA_FRAME, isolate);
Handle<Object> receiver = args.at<Object>(0);
Handle<String> key = args.at<String>(1);
ic.UpdateState(receiver, key);
- return ic.Store(receiver, key, args.at<Object>(2));
+ Handle<Object> result;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate,
+ result,
+ ic.Store(receiver, key, args.at<Object>(2)));
+ return *result;
}
-RUNTIME_FUNCTION(MaybeObject*, StoreIC_MissFromStubFailure) {
+RUNTIME_FUNCTION(StoreIC_MissFromStubFailure) {
+ Logger::TimerEventScope timer(
+ isolate, Logger::TimerEventScope::v8_ic_miss);
HandleScope scope(isolate);
ASSERT(args.length() == 3);
StoreIC ic(IC::EXTRA_CALL_FRAME, isolate);
Handle<Object> receiver = args.at<Object>(0);
Handle<String> key = args.at<String>(1);
ic.UpdateState(receiver, key);
- return ic.Store(receiver, key, args.at<Object>(2));
+ Handle<Object> result;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate,
+ result,
+ ic.Store(receiver, key, args.at<Object>(2)));
+ return *result;
}
-RUNTIME_FUNCTION(MaybeObject*, KeyedCallIC_MissFromStubFailure) {
+RUNTIME_FUNCTION(StoreIC_ArrayLength) {
+ Logger::TimerEventScope timer(
+ isolate, Logger::TimerEventScope::v8_ic_miss);
HandleScope scope(isolate);
- ASSERT(args.length() == 2);
- KeyedCallIC ic(isolate);
- Arguments* caller_args = reinterpret_cast<Arguments*>(args[0]);
- Handle<Object> key = args.at<Object>(1);
- Handle<Object> receiver((*caller_args)[0], isolate);
-
- ic.UpdateState(receiver, key);
- MaybeObject* maybe_result = ic.LoadFunction(receiver, key);
- // Result could be a function or a failure.
- JSFunction* raw_function = NULL;
- if (!maybe_result->To(&raw_function)) return maybe_result;
-
- if (raw_function->is_compiled()) return raw_function;
-
- Handle<JSFunction> function(raw_function, isolate);
- JSFunction::CompileLazy(function, CLEAR_EXCEPTION);
- return *function;
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, StoreIC_ArrayLength) {
- SealHandleScope shs(isolate);
ASSERT(args.length() == 2);
- JSArray* receiver = JSArray::cast(args[0]);
- Object* len = args[1];
+ Handle<JSArray> receiver = args.at<JSArray>(0);
+ Handle<Object> len = args.at<Object>(1);
// The generated code should filter out non-Smis before we get here.
ASSERT(len->IsSmi());
@@ -2173,125 +2059,134 @@ RUNTIME_FUNCTION(MaybeObject*, StoreIC_ArrayLength) {
#ifdef DEBUG
// The length property has to be a writable callback property.
LookupResult debug_lookup(isolate);
- receiver->LocalLookup(isolate->heap()->length_string(), &debug_lookup);
+ receiver->LookupOwn(isolate->factory()->length_string(), &debug_lookup);
ASSERT(debug_lookup.IsPropertyCallbacks() && !debug_lookup.IsReadOnly());
#endif
- Object* result;
- MaybeObject* maybe_result = receiver->SetElementsLength(len);
- if (!maybe_result->To(&result)) return maybe_result;
-
- return len;
+ RETURN_FAILURE_ON_EXCEPTION(
+ isolate, JSArray::SetElementsLength(receiver, len));
+ return *len;
}
// Extend storage is called in a store inline cache when
// it is necessary to extend the properties array of a
// JSObject.
-RUNTIME_FUNCTION(MaybeObject*, SharedStoreIC_ExtendStorage) {
- SealHandleScope shs(isolate);
+RUNTIME_FUNCTION(SharedStoreIC_ExtendStorage) {
+ Logger::TimerEventScope timer(
+ isolate, Logger::TimerEventScope::v8_ic_miss);
+ HandleScope shs(isolate);
ASSERT(args.length() == 3);
// Convert the parameters
- JSObject* object = JSObject::cast(args[0]);
- Map* transition = Map::cast(args[1]);
- Object* value = args[2];
+ Handle<JSObject> object = args.at<JSObject>(0);
+ Handle<Map> transition = args.at<Map>(1);
+ Handle<Object> value = args.at<Object>(2);
// Check the object has run out out property space.
ASSERT(object->HasFastProperties());
ASSERT(object->map()->unused_property_fields() == 0);
// Expand the properties array.
- FixedArray* old_storage = object->properties();
+ Handle<FixedArray> old_storage = handle(object->properties(), isolate);
int new_unused = transition->unused_property_fields();
int new_size = old_storage->length() + new_unused + 1;
- Object* result;
- MaybeObject* maybe_result = old_storage->CopySize(new_size);
- if (!maybe_result->ToObject(&result)) return maybe_result;
- FixedArray* new_storage = FixedArray::cast(result);
+ Handle<FixedArray> new_storage = FixedArray::CopySize(old_storage, new_size);
- Object* to_store = value;
+ Handle<Object> to_store = value;
- if (FLAG_track_double_fields) {
- DescriptorArray* descriptors = transition->instance_descriptors();
- PropertyDetails details = descriptors->GetDetails(transition->LastAdded());
- if (details.representation().IsDouble()) {
- MaybeObject* maybe_storage =
- isolate->heap()->AllocateHeapNumber(value->Number());
- if (!maybe_storage->To(&to_store)) return maybe_storage;
- }
+ PropertyDetails details = transition->instance_descriptors()->GetDetails(
+ transition->LastAdded());
+ if (details.representation().IsDouble()) {
+ to_store = isolate->factory()->NewHeapNumber(value->Number());
}
- new_storage->set(old_storage->length(), to_store);
+ new_storage->set(old_storage->length(), *to_store);
// Set the new property value and do the map transition.
- object->set_properties(new_storage);
- object->set_map(transition);
+ object->set_properties(*new_storage);
+ object->set_map(*transition);
// Return the stored value.
- return value;
+ return *value;
}
// Used from ic-<arch>.cc.
-RUNTIME_FUNCTION(MaybeObject*, KeyedStoreIC_Miss) {
+RUNTIME_FUNCTION(KeyedStoreIC_Miss) {
+ Logger::TimerEventScope timer(
+ isolate, Logger::TimerEventScope::v8_ic_miss);
HandleScope scope(isolate);
ASSERT(args.length() == 3);
KeyedStoreIC ic(IC::NO_EXTRA_FRAME, isolate);
Handle<Object> receiver = args.at<Object>(0);
Handle<Object> key = args.at<Object>(1);
ic.UpdateState(receiver, key);
- return ic.Store(receiver, key, args.at<Object>(2));
+ Handle<Object> result;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate,
+ result,
+ ic.Store(receiver, key, args.at<Object>(2)));
+ return *result;
}
-RUNTIME_FUNCTION(MaybeObject*, KeyedStoreIC_MissFromStubFailure) {
+RUNTIME_FUNCTION(KeyedStoreIC_MissFromStubFailure) {
+ Logger::TimerEventScope timer(
+ isolate, Logger::TimerEventScope::v8_ic_miss);
HandleScope scope(isolate);
ASSERT(args.length() == 3);
KeyedStoreIC ic(IC::EXTRA_CALL_FRAME, isolate);
Handle<Object> receiver = args.at<Object>(0);
Handle<Object> key = args.at<Object>(1);
ic.UpdateState(receiver, key);
- return ic.Store(receiver, key, args.at<Object>(2));
+ Handle<Object> result;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate,
+ result,
+ ic.Store(receiver, key, args.at<Object>(2)));
+ return *result;
}
-RUNTIME_FUNCTION(MaybeObject*, StoreIC_Slow) {
+RUNTIME_FUNCTION(StoreIC_Slow) {
HandleScope scope(isolate);
ASSERT(args.length() == 3);
StoreIC ic(IC::NO_EXTRA_FRAME, isolate);
Handle<Object> object = args.at<Object>(0);
Handle<Object> key = args.at<Object>(1);
Handle<Object> value = args.at<Object>(2);
- StrictModeFlag strict_mode = ic.strict_mode();
- Handle<Object> result = Runtime::SetObjectProperty(isolate, object, key,
- value,
- NONE,
- strict_mode);
- RETURN_IF_EMPTY_HANDLE(isolate, result);
+ StrictMode strict_mode = ic.strict_mode();
+ Handle<Object> result;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, result,
+ Runtime::SetObjectProperty(
+ isolate, object, key, value, NONE, strict_mode));
return *result;
}
-RUNTIME_FUNCTION(MaybeObject*, KeyedStoreIC_Slow) {
+RUNTIME_FUNCTION(KeyedStoreIC_Slow) {
HandleScope scope(isolate);
ASSERT(args.length() == 3);
KeyedStoreIC ic(IC::NO_EXTRA_FRAME, isolate);
Handle<Object> object = args.at<Object>(0);
Handle<Object> key = args.at<Object>(1);
Handle<Object> value = args.at<Object>(2);
- StrictModeFlag strict_mode = ic.strict_mode();
- Handle<Object> result = Runtime::SetObjectProperty(isolate, object, key,
- value,
- NONE,
- strict_mode);
- RETURN_IF_EMPTY_HANDLE(isolate, result);
+ StrictMode strict_mode = ic.strict_mode();
+ Handle<Object> result;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, result,
+ Runtime::SetObjectProperty(
+ isolate, object, key, value, NONE, strict_mode));
return *result;
}
-RUNTIME_FUNCTION(MaybeObject*, ElementsTransitionAndStoreIC_Miss) {
+RUNTIME_FUNCTION(ElementsTransitionAndStoreIC_Miss) {
+ Logger::TimerEventScope timer(
+ isolate, Logger::TimerEventScope::v8_ic_miss);
HandleScope scope(isolate);
ASSERT(args.length() == 4);
KeyedStoreIC ic(IC::EXTRA_CALL_FRAME, isolate);
@@ -2299,24 +2194,22 @@ RUNTIME_FUNCTION(MaybeObject*, ElementsTransitionAndStoreIC_Miss) {
Handle<Map> map = args.at<Map>(1);
Handle<Object> key = args.at<Object>(2);
Handle<Object> object = args.at<Object>(3);
- StrictModeFlag strict_mode = ic.strict_mode();
+ StrictMode strict_mode = ic.strict_mode();
if (object->IsJSObject()) {
JSObject::TransitionElementsKind(Handle<JSObject>::cast(object),
map->elements_kind());
}
- Handle<Object> result = Runtime::SetObjectProperty(isolate, object, key,
- value,
- NONE,
- strict_mode);
- RETURN_IF_EMPTY_HANDLE(isolate, result);
+ Handle<Object> result;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, result,
+ Runtime::SetObjectProperty(
+ isolate, object, key, value, NONE, strict_mode));
return *result;
}
-BinaryOpIC::State::State(ExtraICState extra_ic_state) {
- // We don't deserialize the SSE2 Field, since this is only used to be able
- // to include SSE2 as well as non-SSE2 versions in the snapshot. For code
- // generation we always want it to reflect the current state.
+BinaryOpIC::State::State(Isolate* isolate, ExtraICState extra_ic_state)
+ : isolate_(isolate) {
op_ = static_cast<Token::Value>(
FIRST_TOKEN + OpField::decode(extra_ic_state));
mode_ = OverwriteModeField::decode(extra_ic_state);
@@ -2336,10 +2229,7 @@ BinaryOpIC::State::State(ExtraICState extra_ic_state) {
ExtraICState BinaryOpIC::State::GetExtraICState() const {
- bool sse2 = (Max(result_kind_, Max(left_kind_, right_kind_)) > SMI &&
- CpuFeatures::IsSafeForSnapshot(SSE2));
ExtraICState extra_ic_state =
- SSE2Field::encode(sse2) |
OpField::encode(op_ - FIRST_TOKEN) |
OverwriteModeField::encode(mode_) |
LeftKindField::encode(left_kind_) |
@@ -2364,7 +2254,7 @@ void BinaryOpIC::State::GenerateAheadOfTime(
// Generated list of commonly used stubs
#define GENERATE(op, left_kind, right_kind, result_kind, mode) \
do { \
- State state(op, mode); \
+ State state(isolate, op, mode); \
state.left_kind_ = left_kind; \
state.fixed_right_arg_.has_value = false; \
state.right_kind_ = right_kind; \
@@ -2559,7 +2449,7 @@ void BinaryOpIC::State::GenerateAheadOfTime(
#undef GENERATE
#define GENERATE(op, left_kind, fixed_right_arg_value, result_kind, mode) \
do { \
- State state(op, mode); \
+ State state(isolate, op, mode); \
state.left_kind_ = left_kind; \
state.fixed_right_arg_.has_value = true; \
state.fixed_right_arg_.value = fixed_right_arg_value; \
@@ -2578,18 +2468,17 @@ void BinaryOpIC::State::GenerateAheadOfTime(
}
-Handle<Type> BinaryOpIC::State::GetResultType(Isolate* isolate) const {
+Type* BinaryOpIC::State::GetResultType(Zone* zone) const {
Kind result_kind = result_kind_;
if (HasSideEffects()) {
result_kind = NONE;
} else if (result_kind == GENERIC && op_ == Token::ADD) {
- return handle(Type::Union(handle(Type::Number(), isolate),
- handle(Type::String(), isolate)), isolate);
+ return Type::Union(Type::Number(zone), Type::String(zone), zone);
} else if (result_kind == NUMBER && op_ == Token::SHR) {
- return handle(Type::Unsigned32(), isolate);
+ return Type::Unsigned32(zone);
}
ASSERT_NE(GENERIC, result_kind);
- return KindToType(result_kind, isolate);
+ return KindToType(result_kind, zone);
}
@@ -2597,6 +2486,7 @@ void BinaryOpIC::State::Print(StringStream* stream) const {
stream->Add("(%s", Token::Name(op_));
if (mode_ == OVERWRITE_LEFT) stream->Add("_ReuseLeft");
else if (mode_ == OVERWRITE_RIGHT) stream->Add("_ReuseRight");
+ if (CouldCreateAllocationMementos()) stream->Add("_CreateAllocationMementos");
stream->Add(":%s*", KindToString(left_kind_));
if (fixed_right_arg_.has_value) {
stream->Add("%d", fixed_right_arg_.value);
@@ -2621,7 +2511,7 @@ void BinaryOpIC::State::Update(Handle<Object> left,
right->ToInt32(&fixed_right_arg_value) &&
fixed_right_arg_value > 0 &&
IsPowerOf2(fixed_right_arg_value) &&
- FixedRightArgValueField::is_valid(fixed_right_arg_value) &&
+ FixedRightArgValueField::is_valid(WhichPowerOf2(fixed_right_arg_value)) &&
(left_kind_ == SMI || left_kind_ == INT32) &&
(result_kind_ == NONE || !fixed_right_arg_.has_value);
fixed_right_arg_ = Maybe<int32_t>(has_fixed_right_arg,
@@ -2636,6 +2526,18 @@ void BinaryOpIC::State::Update(Handle<Object> left,
}
}
+ // We don't want to distinguish INT32 and NUMBER for string add (because
+ // NumberToString can't make use of this anyway).
+ if (left_kind_ == STRING && right_kind_ == INT32) {
+ ASSERT_EQ(STRING, result_kind_);
+ ASSERT_EQ(Token::ADD, op_);
+ right_kind_ = NUMBER;
+ } else if (right_kind_ == STRING && left_kind_ == INT32) {
+ ASSERT_EQ(STRING, result_kind_);
+ ASSERT_EQ(Token::ADD, op_);
+ left_kind_ = NUMBER;
+ }
+
// Reset overwrite mode unless we can actually make use of it, or may be able
// to make use of it at some point in the future.
if ((mode_ == OVERWRITE_LEFT && left_kind_ > NUMBER) ||
@@ -2648,14 +2550,9 @@ void BinaryOpIC::State::Update(Handle<Object> left,
// Tagged operations can lead to non-truncating HChanges
if (left->IsUndefined() || left->IsBoolean()) {
left_kind_ = GENERIC;
- } else if (right->IsUndefined() || right->IsBoolean()) {
- right_kind_ = GENERIC;
} else {
- // Since the X87 is too precise, we might bail out on numbers which
- // actually would truncate with 64 bit precision.
- ASSERT(!CpuFeatures::IsSupported(SSE2));
- ASSERT(result_kind_ < NUMBER);
- result_kind_ = NUMBER;
+ ASSERT(right->IsUndefined() || right->IsBoolean());
+ right_kind_ = GENERIC;
}
}
}
@@ -2675,7 +2572,7 @@ BinaryOpIC::State::Kind BinaryOpIC::State::UpdateKind(Handle<Object> object,
new_kind = SMI;
} else if (object->IsHeapNumber()) {
double value = Handle<HeapNumber>::cast(object)->value();
- new_kind = TypeInfo::IsInt32Double(value) ? INT32 : NUMBER;
+ new_kind = IsInt32Double(value) ? INT32 : NUMBER;
} else if (object->IsString() && op() == Token::ADD) {
new_kind = STRING;
}
@@ -2707,39 +2604,67 @@ const char* BinaryOpIC::State::KindToString(Kind kind) {
// static
-Handle<Type> BinaryOpIC::State::KindToType(Kind kind, Isolate* isolate) {
- Type* type = NULL;
+Type* BinaryOpIC::State::KindToType(Kind kind, Zone* zone) {
switch (kind) {
- case NONE: type = Type::None(); break;
- case SMI: type = Type::Smi(); break;
- case INT32: type = Type::Signed32(); break;
- case NUMBER: type = Type::Number(); break;
- case STRING: type = Type::String(); break;
- case GENERIC: type = Type::Any(); break;
+ case NONE: return Type::None(zone);
+ case SMI: return Type::SignedSmall(zone);
+ case INT32: return Type::Signed32(zone);
+ case NUMBER: return Type::Number(zone);
+ case STRING: return Type::String(zone);
+ case GENERIC: return Type::Any(zone);
}
- return handle(type, isolate);
+ UNREACHABLE();
+ return NULL;
}
-MaybeObject* BinaryOpIC::Transition(Handle<Object> left, Handle<Object> right) {
- State state(target()->extended_extra_ic_state());
+MaybeHandle<Object> BinaryOpIC::Transition(
+ Handle<AllocationSite> allocation_site,
+ Handle<Object> left,
+ Handle<Object> right) {
+ State state(isolate(), target()->extra_ic_state());
// Compute the actual result using the builtin for the binary operation.
Object* builtin = isolate()->js_builtins_object()->javascript_builtin(
TokenToJSBuiltin(state.op()));
Handle<JSFunction> function = handle(JSFunction::cast(builtin), isolate());
- bool caught_exception;
- Handle<Object> result = Execution::Call(
- isolate(), function, left, 1, &right, &caught_exception);
- if (caught_exception) return Failure::Exception();
-
+ Handle<Object> result;
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate(),
+ result,
+ Execution::Call(isolate(), function, left, 1, &right),
+ Object);
+
+ // Execution::Call can execute arbitrary JavaScript, hence potentially
+ // update the state of this very IC, so we must update the stored state.
+ UpdateTarget();
// Compute the new state.
- State old_state = state;
+ State old_state(isolate(), target()->extra_ic_state());
state.Update(left, right, result);
- // Install the new stub.
- BinaryOpICStub stub(state);
- set_target(*stub.GetCode(isolate()));
+ // Check if we have a string operation here.
+ Handle<Code> target;
+ if (!allocation_site.is_null() || state.ShouldCreateAllocationMementos()) {
+ // Setup the allocation site on-demand.
+ if (allocation_site.is_null()) {
+ allocation_site = isolate()->factory()->NewAllocationSite();
+ }
+
+ // Install the stub with an allocation site.
+ BinaryOpICWithAllocationSiteStub stub(isolate(), state);
+ target = stub.GetCodeCopyFromTemplate(allocation_site);
+
+ // Sanity check the trampoline stub.
+ ASSERT_EQ(*allocation_site, target->FindFirstAllocationSite());
+ } else {
+ // Install the generic stub.
+ BinaryOpICStub stub(isolate(), state);
+ target = stub.GetCode();
+
+ // Sanity check the generic stub.
+ ASSERT_EQ(NULL, target->FindFirstAllocationSite());
+ }
+ set_target(*target);
if (FLAG_trace_ic) {
char buffer[150];
@@ -2750,9 +2675,12 @@ MaybeObject* BinaryOpIC::Transition(Handle<Object> left, Handle<Object> right) {
old_state.Print(&stream);
stream.Add(" => ");
state.Print(&stream);
- stream.Add(" @ %p <- ", static_cast<void*>(*target()));
+ stream.Add(" @ %p <- ", static_cast<void*>(*target));
stream.OutputToStdOut();
JavaScriptFrame::PrintTop(isolate(), stdout, false, true);
+ if (!allocation_site.is_null()) {
+ PrintF(" using allocation site %p", static_cast<void*>(*allocation_site));
+ }
PrintF("]\n");
}
@@ -2763,30 +2691,59 @@ MaybeObject* BinaryOpIC::Transition(Handle<Object> left, Handle<Object> right) {
PatchInlinedSmiCode(address(), DISABLE_INLINED_SMI_CHECK);
}
- return *result;
+ return result;
}
-RUNTIME_FUNCTION(MaybeObject*, BinaryOpIC_Miss) {
+RUNTIME_FUNCTION(BinaryOpIC_Miss) {
+ Logger::TimerEventScope timer(
+ isolate, Logger::TimerEventScope::v8_ic_miss);
HandleScope scope(isolate);
+ ASSERT_EQ(2, args.length());
Handle<Object> left = args.at<Object>(BinaryOpICStub::kLeft);
Handle<Object> right = args.at<Object>(BinaryOpICStub::kRight);
BinaryOpIC ic(isolate);
- return ic.Transition(left, right);
+ Handle<Object> result;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate,
+ result,
+ ic.Transition(Handle<AllocationSite>::null(), left, right));
+ return *result;
+}
+
+
+RUNTIME_FUNCTION(BinaryOpIC_MissWithAllocationSite) {
+ Logger::TimerEventScope timer(
+ isolate, Logger::TimerEventScope::v8_ic_miss);
+ HandleScope scope(isolate);
+ ASSERT_EQ(3, args.length());
+ Handle<AllocationSite> allocation_site = args.at<AllocationSite>(
+ BinaryOpWithAllocationSiteStub::kAllocationSite);
+ Handle<Object> left = args.at<Object>(
+ BinaryOpWithAllocationSiteStub::kLeft);
+ Handle<Object> right = args.at<Object>(
+ BinaryOpWithAllocationSiteStub::kRight);
+ BinaryOpIC ic(isolate);
+ Handle<Object> result;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate,
+ result,
+ ic.Transition(allocation_site, left, right));
+ return *result;
}
Code* CompareIC::GetRawUninitialized(Isolate* isolate, Token::Value op) {
- ICCompareStub stub(op, UNINITIALIZED, UNINITIALIZED, UNINITIALIZED);
+ ICCompareStub stub(isolate, op, UNINITIALIZED, UNINITIALIZED, UNINITIALIZED);
Code* code = NULL;
- CHECK(stub.FindCodeInCache(&code, isolate));
+ CHECK(stub.FindCodeInCache(&code));
return code;
}
Handle<Code> CompareIC::GetUninitialized(Isolate* isolate, Token::Value op) {
- ICCompareStub stub(op, UNINITIALIZED, UNINITIALIZED, UNINITIALIZED);
- return stub.GetCode(isolate);
+ ICCompareStub stub(isolate, op, UNINITIALIZED, UNINITIALIZED, UNINITIALIZED);
+ return stub.GetCode();
}
@@ -2807,48 +2764,39 @@ const char* CompareIC::GetStateName(State state) {
}
-Handle<Type> CompareIC::StateToType(
- Isolate* isolate,
+Type* CompareIC::StateToType(
+ Zone* zone,
CompareIC::State state,
Handle<Map> map) {
switch (state) {
- case CompareIC::UNINITIALIZED:
- return handle(Type::None(), isolate);
- case CompareIC::SMI:
- return handle(Type::Smi(), isolate);
- case CompareIC::NUMBER:
- return handle(Type::Number(), isolate);
- case CompareIC::STRING:
- return handle(Type::String(), isolate);
- case CompareIC::INTERNALIZED_STRING:
- return handle(Type::InternalizedString(), isolate);
- case CompareIC::UNIQUE_NAME:
- return handle(Type::UniqueName(), isolate);
- case CompareIC::OBJECT:
- return handle(Type::Receiver(), isolate);
+ case CompareIC::UNINITIALIZED: return Type::None(zone);
+ case CompareIC::SMI: return Type::SignedSmall(zone);
+ case CompareIC::NUMBER: return Type::Number(zone);
+ case CompareIC::STRING: return Type::String(zone);
+ case CompareIC::INTERNALIZED_STRING: return Type::InternalizedString(zone);
+ case CompareIC::UNIQUE_NAME: return Type::UniqueName(zone);
+ case CompareIC::OBJECT: return Type::Receiver(zone);
case CompareIC::KNOWN_OBJECT:
- return handle(
- map.is_null() ? Type::Receiver() : Type::Class(map), isolate);
- case CompareIC::GENERIC:
- return handle(Type::Any(), isolate);
+ return map.is_null() ? Type::Receiver(zone) : Type::Class(map, zone);
+ case CompareIC::GENERIC: return Type::Any(zone);
}
UNREACHABLE();
- return Handle<Type>();
+ return NULL;
}
void CompareIC::StubInfoToType(int stub_minor_key,
- Handle<Type>* left_type,
- Handle<Type>* right_type,
- Handle<Type>* overall_type,
+ Type** left_type,
+ Type** right_type,
+ Type** overall_type,
Handle<Map> map,
- Isolate* isolate) {
+ Zone* zone) {
State left_state, right_state, handler_state;
ICCompareStub::DecodeMinorKey(stub_minor_key, &left_state, &right_state,
&handler_state, NULL);
- *left_type = StateToType(isolate, left_state);
- *right_type = StateToType(isolate, right_state);
- *overall_type = StateToType(isolate, handler_state, map);
+ *left_type = StateToType(zone, left_state);
+ *right_type = StateToType(zone, right_state);
+ *overall_type = StateToType(zone, handler_state, map);
}
@@ -2967,12 +2915,12 @@ Code* CompareIC::UpdateCaches(Handle<Object> x, Handle<Object> y) {
State new_right = NewInputState(previous_right, y);
State state = TargetState(previous_state, previous_left, previous_right,
HasInlinedSmiCode(address()), x, y);
- ICCompareStub stub(op_, new_left, new_right, state);
+ ICCompareStub stub(isolate(), op_, new_left, new_right, state);
if (state == KNOWN_OBJECT) {
stub.set_known_map(
Handle<Map>(Handle<JSObject>::cast(x)->map(), isolate()));
}
- Handle<Code> new_target = stub.GetCode(isolate());
+ Handle<Code> new_target = stub.GetCode();
set_target(*new_target);
if (FLAG_trace_ic) {
@@ -2986,7 +2934,7 @@ Code* CompareIC::UpdateCaches(Handle<Object> x, Handle<Object> y) {
GetStateName(new_right),
GetStateName(state),
Token::Name(op_),
- static_cast<void*>(*stub.GetCode(isolate())));
+ static_cast<void*>(*stub.GetCode()));
}
// Activate inlined smi code.
@@ -2999,7 +2947,9 @@ Code* CompareIC::UpdateCaches(Handle<Object> x, Handle<Object> y) {
// Used from ICCompareStub::GenerateMiss in code-stubs-<arch>.cc.
-RUNTIME_FUNCTION(Code*, CompareIC_Miss) {
+RUNTIME_FUNCTION(CompareIC_Miss) {
+ Logger::TimerEventScope timer(
+ isolate, Logger::TimerEventScope::v8_ic_miss);
HandleScope scope(isolate);
ASSERT(args.length() == 3);
CompareIC ic(isolate, static_cast<Token::Value>(args.smi_at(2)));
@@ -3007,33 +2957,38 @@ RUNTIME_FUNCTION(Code*, CompareIC_Miss) {
}
-void CompareNilIC::Clear(Address address, Code* target) {
+void CompareNilIC::Clear(Address address,
+ Code* target,
+ ConstantPoolArray* constant_pool) {
if (IsCleared(target)) return;
- ExtraICState state = target->extended_extra_ic_state();
+ ExtraICState state = target->extra_ic_state();
- CompareNilICStub stub(state, HydrogenCodeStub::UNINITIALIZED);
+ CompareNilICStub stub(target->GetIsolate(),
+ state,
+ HydrogenCodeStub::UNINITIALIZED);
stub.ClearState();
Code* code = NULL;
- CHECK(stub.FindCodeInCache(&code, target->GetIsolate()));
+ CHECK(stub.FindCodeInCache(&code));
- SetTargetAtAddress(address, code);
+ SetTargetAtAddress(address, code, constant_pool);
}
-MaybeObject* CompareNilIC::DoCompareNilSlow(NilValue nil,
- Handle<Object> object) {
+Handle<Object> CompareNilIC::DoCompareNilSlow(Isolate* isolate,
+ NilValue nil,
+ Handle<Object> object) {
if (object->IsNull() || object->IsUndefined()) {
- return Smi::FromInt(true);
+ return handle(Smi::FromInt(true), isolate);
}
- return Smi::FromInt(object->IsUndetectableObject());
+ return handle(Smi::FromInt(object->IsUndetectableObject()), isolate);
}
-MaybeObject* CompareNilIC::CompareNil(Handle<Object> object) {
- ExtraICState extra_ic_state = target()->extended_extra_ic_state();
+Handle<Object> CompareNilIC::CompareNil(Handle<Object> object) {
+ ExtraICState extra_ic_state = target()->extra_ic_state();
- CompareNilICStub stub(extra_ic_state);
+ CompareNilICStub stub(isolate(), extra_ic_state);
// Extract the current supported types from the patched IC and calculate what
// types must be supported as a result of the miss.
@@ -3046,27 +3001,29 @@ MaybeObject* CompareNilIC::CompareNil(Handle<Object> object) {
// Find or create the specialized stub to support the new set of types.
Handle<Code> code;
if (stub.IsMonomorphic()) {
- Handle<Map> monomorphic_map(already_monomorphic
- ? target()->FindFirstMap()
+ Handle<Map> monomorphic_map(already_monomorphic && FirstTargetMap() != NULL
+ ? FirstTargetMap()
: HeapObject::cast(*object)->map());
- code = isolate()->stub_cache()->ComputeCompareNil(monomorphic_map, stub);
+ code = isolate()->stub_cache()->ComputeCompareNil(monomorphic_map, &stub);
} else {
- code = stub.GetCode(isolate());
+ code = stub.GetCode();
}
set_target(*code);
- return DoCompareNilSlow(nil, object);
+ return DoCompareNilSlow(isolate(), nil, object);
}
-RUNTIME_FUNCTION(MaybeObject*, CompareNilIC_Miss) {
+RUNTIME_FUNCTION(CompareNilIC_Miss) {
+ Logger::TimerEventScope timer(
+ isolate, Logger::TimerEventScope::v8_ic_miss);
HandleScope scope(isolate);
Handle<Object> object = args.at<Object>(0);
CompareNilIC ic(isolate);
- return ic.CompareNil(object);
+ return *ic.CompareNil(object);
}
-RUNTIME_FUNCTION(MaybeObject*, Unreachable) {
+RUNTIME_FUNCTION(Unreachable) {
UNREACHABLE();
CHECK(false);
return isolate->heap()->undefined_value();
@@ -3114,21 +3071,23 @@ Builtins::JavaScript BinaryOpIC::TokenToJSBuiltin(Token::Value op) {
}
-MaybeObject* ToBooleanIC::ToBoolean(Handle<Object> object) {
- ToBooleanStub stub(target()->extended_extra_ic_state());
+Handle<Object> ToBooleanIC::ToBoolean(Handle<Object> object) {
+ ToBooleanStub stub(isolate(), target()->extra_ic_state());
bool to_boolean_value = stub.UpdateStatus(object);
- Handle<Code> code = stub.GetCode(isolate());
+ Handle<Code> code = stub.GetCode();
set_target(*code);
- return Smi::FromInt(to_boolean_value ? 1 : 0);
+ return handle(Smi::FromInt(to_boolean_value ? 1 : 0), isolate());
}
-RUNTIME_FUNCTION(MaybeObject*, ToBooleanIC_Miss) {
+RUNTIME_FUNCTION(ToBooleanIC_Miss) {
+ Logger::TimerEventScope timer(
+ isolate, Logger::TimerEventScope::v8_ic_miss);
ASSERT(args.length() == 1);
HandleScope scope(isolate);
Handle<Object> object = args.at<Object>(0);
ToBooleanIC ic(isolate);
- return ic.ToBoolean(object);
+ return *ic.ToBoolean(object);
}
diff --git a/chromium/v8/src/ic.h b/chromium/v8/src/ic.h
index fa7ed6dbc13..3f550438a48 100644
--- a/chromium/v8/src/ic.h
+++ b/chromium/v8/src/ic.h
@@ -1,47 +1,26 @@
// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
#ifndef V8_IC_H_
#define V8_IC_H_
-#include "macro-assembler.h"
-#include "type-info.h"
+#include "src/macro-assembler.h"
namespace v8 {
namespace internal {
+const int kMaxKeyedPolymorphism = 4;
+
+
// IC_UTIL_LIST defines all utility functions called from generated
// inline caching code. The argument for the macro, ICU, is the function name.
#define IC_UTIL_LIST(ICU) \
ICU(LoadIC_Miss) \
ICU(KeyedLoadIC_Miss) \
ICU(CallIC_Miss) \
- ICU(KeyedCallIC_Miss) \
+ ICU(CallIC_Customization_Miss) \
ICU(StoreIC_Miss) \
ICU(StoreIC_ArrayLength) \
ICU(StoreIC_Slow) \
@@ -51,8 +30,7 @@ namespace internal {
/* Utilities for IC stubs. */ \
ICU(StoreCallbackProperty) \
ICU(LoadPropertyWithInterceptorOnly) \
- ICU(LoadPropertyWithInterceptorForLoad) \
- ICU(LoadPropertyWithInterceptorForCall) \
+ ICU(LoadPropertyWithInterceptor) \
ICU(KeyedLoadPropertyWithInterceptor) \
ICU(StoreInterceptorProperty) \
ICU(CompareIC_Miss) \
@@ -61,8 +39,7 @@ namespace internal {
ICU(Unreachable) \
ICU(ToBooleanIC_Miss)
//
-// IC is the base class for LoadIC, StoreIC, CallIC, KeyedLoadIC,
-// and KeyedStoreIC.
+// IC is the base class for LoadIC, StoreIC, KeyedLoadIC, and KeyedStoreIC.
//
class IC {
public:
@@ -97,44 +74,40 @@ class IC {
// Compute the current IC state based on the target stub, receiver and name.
void UpdateState(Handle<Object> receiver, Handle<Object> name);
- void MarkMonomorphicPrototypeFailure() {
- state_ = MONOMORPHIC_PROTOTYPE_FAILURE;
- }
- // Clear the inline cache to initial state.
- static void Clear(Isolate* isolate, Address address);
-
- // Computes the reloc info for this IC. This is a fairly expensive
- // operation as it has to search through the heap to find the code
- // object that contains this IC site.
- RelocInfo::Mode ComputeMode();
-
- // Returns if this IC is for contextual (no explicit receiver)
- // access to properties.
- bool IsUndeclaredGlobal(Handle<Object> receiver) {
- if (receiver->IsGlobalObject()) {
- return SlowIsUndeclaredGlobal();
- } else {
- ASSERT(!SlowIsUndeclaredGlobal());
- return false;
+ bool IsNameCompatibleWithMonomorphicPrototypeFailure(Handle<Object> name);
+ bool TryMarkMonomorphicPrototypeFailure(Handle<Object> name) {
+ if (IsNameCompatibleWithMonomorphicPrototypeFailure(name)) {
+ state_ = MONOMORPHIC_PROTOTYPE_FAILURE;
+ return true;
}
+ return false;
}
- bool SlowIsUndeclaredGlobal() {
- return ComputeMode() == RelocInfo::CODE_TARGET_CONTEXT;
- }
+ // If the stub contains weak maps then this function adds the stub to
+ // the dependent code array of each weak map.
+ static void RegisterWeakMapDependency(Handle<Code> stub);
+
+ // This function is called when a weak map in the stub is dying,
+ // invalidates the stub by setting maps in it to undefined.
+ static void InvalidateMaps(Code* stub);
+
+ // Clear the inline cache to initial state.
+ static void Clear(Isolate* isolate,
+ Address address,
+ ConstantPoolArray* constant_pool);
#ifdef DEBUG
- bool IsLoadStub() {
+ bool IsLoadStub() const {
return target()->is_load_stub() || target()->is_keyed_load_stub();
}
- bool IsStoreStub() {
+ bool IsStoreStub() const {
return target()->is_store_stub() || target()->is_keyed_store_stub();
}
- bool IsCallStub() {
- return target()->is_call_stub() || target()->is_keyed_call_stub();
+ bool IsCallStub() const {
+ return target()->is_call_stub();
}
#endif
@@ -148,9 +121,9 @@ class IC {
Object* object,
InlineCacheHolderFlag holder);
- static inline InlineCacheHolderFlag GetCodeCacheFlag(Type* type);
+ static inline InlineCacheHolderFlag GetCodeCacheFlag(HeapType* type);
static inline Handle<Map> GetCodeCacheHolder(InlineCacheHolderFlag flag,
- Type* type,
+ HeapType* type,
Isolate* isolate);
static bool IsCleared(Code* code) {
@@ -163,9 +136,13 @@ class IC {
// - The heap_number_map is used as a marker which includes heap numbers as
// well as smis.
// - The oddball map is only used for booleans.
- static Handle<Map> TypeToMap(Type* type, Isolate* isolate);
- static Type* MapToType(Handle<Map> type);
- static Handle<Type> CurrentTypeOf(Handle<Object> object, Isolate* isolate);
+ static Handle<Map> TypeToMap(HeapType* type, Isolate* isolate);
+ template <class T>
+ static typename T::TypeHandle MapToType(Handle<Map> map,
+ typename T::Region* region);
+
+ static Handle<HeapType> CurrentTypeOf(Handle<Object> object,
+ Isolate* isolate);
protected:
// Get the call-site target; used for determining the state.
@@ -175,15 +152,19 @@ class IC {
Address pc() const { return *pc_address_; }
Isolate* isolate() const { return isolate_; }
-#ifdef ENABLE_DEBUGGER_SUPPORT
- // Computes the address in the original code when the code running is
- // containing break points (calls to DebugBreakXXX builtins).
- Address OriginalCodeAddress() const;
-#endif
+ // Get the shared function info of the caller.
+ SharedFunctionInfo* GetSharedFunctionInfo() const;
+ // Get the code object of the caller.
+ Code* GetCode() const;
+ // Get the original (non-breakpointed) code object of the caller.
+ Code* GetOriginalCode() const;
// Set the call-site target.
void set_target(Code* code) {
- SetTargetAtAddress(address(), code);
+#ifdef VERIFY_HEAP
+ code->VerifyEmbeddedObjectsDependency();
+#endif
+ SetTargetAtAddress(address(), code, constant_pool());
target_set_ = true;
}
@@ -195,14 +176,17 @@ class IC {
void TraceIC(const char* type, Handle<Object> name);
#endif
- Failure* TypeError(const char* type,
- Handle<Object> object,
- Handle<Object> key);
- Failure* ReferenceError(const char* type, Handle<String> name);
+ MaybeHandle<Object> TypeError(const char* type,
+ Handle<Object> object,
+ Handle<Object> key);
+ MaybeHandle<Object> ReferenceError(const char* type, Handle<String> name);
// Access the target code for the given IC address.
- static inline Code* GetTargetAtAddress(Address address);
- static inline void SetTargetAtAddress(Address address, Code* target);
+ static inline Code* GetTargetAtAddress(Address address,
+ ConstantPoolArray* constant_pool);
+ static inline void SetTargetAtAddress(Address address,
+ Code* target,
+ ConstantPoolArray* constant_pool);
static void PostPatching(Address address, Code* target, Code* old_target);
// Compute the handler either by compiling or by retrieving a cached version.
@@ -219,19 +203,19 @@ class IC {
return Handle<Code>::null();
}
- void UpdateMonomorphicIC(Handle<Type> type,
+ void UpdateMonomorphicIC(Handle<HeapType> type,
Handle<Code> handler,
Handle<String> name);
- bool UpdatePolymorphicIC(Handle<Type> type,
+ bool UpdatePolymorphicIC(Handle<HeapType> type,
Handle<String> name,
Handle<Code> code);
- virtual void UpdateMegamorphicCache(Type* type, Name* name, Code* code);
+ virtual void UpdateMegamorphicCache(HeapType* type, Name* name, Code* code);
void CopyICToMegamorphicCache(Handle<String> name);
- bool IsTransitionOfMonomorphicTarget(Type* type);
- void PatchCache(Handle<Type> type,
+ bool IsTransitionOfMonomorphicTarget(Map* source_map, Map* target_map);
+ void PatchCache(Handle<HeapType> type,
Handle<String> name,
Handle<Code> code);
virtual Code::Kind kind() const {
@@ -255,10 +239,52 @@ class IC {
Handle<String> name);
void TryRemoveInvalidHandlers(Handle<Map> map, Handle<String> name);
- virtual ExtraICState extra_ic_state() { return kNoExtraICState; }
+ ExtraICState extra_ic_state() const { return extra_ic_state_; }
+ void set_extra_ic_state(ExtraICState state) {
+ extra_ic_state_ = state;
+ }
+
+ void TargetMaps(MapHandleList* list) {
+ FindTargetMaps();
+ for (int i = 0; i < target_maps_.length(); i++) {
+ list->Add(target_maps_.at(i));
+ }
+ }
+
+ void TargetTypes(TypeHandleList* list) {
+ FindTargetMaps();
+ for (int i = 0; i < target_maps_.length(); i++) {
+ list->Add(IC::MapToType<HeapType>(target_maps_.at(i), isolate_));
+ }
+ }
+
+ Map* FirstTargetMap() {
+ FindTargetMaps();
+ return target_maps_.length() > 0 ? *target_maps_.at(0) : NULL;
+ }
+
+ protected:
+ void UpdateTarget() {
+ target_ = handle(raw_target(), isolate_);
+ }
private:
- Code* raw_target() const { return GetTargetAtAddress(address()); }
+ Code* raw_target() const {
+ return GetTargetAtAddress(address(), constant_pool());
+ }
+ inline ConstantPoolArray* constant_pool() const;
+ inline ConstantPoolArray* raw_constant_pool() const;
+
+ void FindTargetMaps() {
+ if (target_maps_set_) return;
+ target_maps_set_ = true;
+ if (state_ == MONOMORPHIC) {
+ Map* map = target_->FindFirstMap();
+ if (map != NULL) target_maps_.Add(handle(map));
+ } else if (state_ != UNINITIALIZED && state_ != PREMONOMORPHIC) {
+ target_->FindAllMaps(&target_maps_);
+ }
+ }
// Frame pointer for the frame that uses (calls) the IC.
Address fp_;
@@ -271,11 +297,19 @@ class IC {
Isolate* isolate_;
+ // The constant pool of the code which originally called the IC (which might
+ // be for the breakpointed copy of the original code).
+ Handle<ConstantPoolArray> raw_constant_pool_;
+
// The original code target that missed.
Handle<Code> target_;
State state_;
bool target_set_;
+ ExtraICState extra_ic_state_;
+ MapHandleList target_maps_;
+ bool target_maps_set_;
+
DISALLOW_IMPLICIT_CONSTRUCTORS(IC);
};
@@ -296,138 +330,102 @@ class IC_Utility {
};
-class CallICBase: public IC {
+class CallIC: public IC {
public:
- // ExtraICState bits
- class Contextual: public BitField<ContextualMode, 0, 1> {};
- class StringStubState: public BitField<StringStubFeedback, 1, 1> {};
- static ExtraICState ComputeExtraICState(ContextualMode mode,
- StringStubFeedback feedback) {
- return Contextual::encode(mode) | StringStubState::encode(feedback);
- }
+ enum CallType { METHOD, FUNCTION };
- // Returns a JSFunction or a Failure.
- MUST_USE_RESULT MaybeObject* LoadFunction(Handle<Object> object,
- Handle<String> name);
-
- protected:
- CallICBase(Code::Kind kind, Isolate* isolate)
- : IC(EXTRA_CALL_FRAME, isolate), kind_(kind) {}
-
- // Compute a monomorphic stub if possible, otherwise return a null handle.
- Handle<Code> ComputeMonomorphicStub(LookupResult* lookup,
- Handle<Object> object,
- Handle<String> name);
-
- // Update the inline cache and the global stub cache based on the lookup
- // result.
- void UpdateCaches(LookupResult* lookup,
- Handle<Object> object,
- Handle<String> name);
-
- // Returns a JSFunction if the object can be called as a function, and
- // patches the stack to be ready for the call. Otherwise, it returns the
- // undefined value.
- Handle<Object> TryCallAsFunction(Handle<Object> object);
+ class State V8_FINAL BASE_EMBEDDED {
+ public:
+ explicit State(ExtraICState extra_ic_state);
- void ReceiverToObjectIfRequired(Handle<Object> callee, Handle<Object> object);
+ State(int argc, CallType call_type)
+ : argc_(argc), call_type_(call_type) {
+ }
- static void Clear(Address address, Code* target);
+ InlineCacheState GetICState() const { return ::v8::internal::GENERIC; }
- // Platform-specific code generation functions used by both call and
- // keyed call.
- static void GenerateMiss(MacroAssembler* masm,
- int argc,
- IC::UtilityId id,
- ExtraICState extra_state);
+ ExtraICState GetExtraICState() const;
- static void GenerateNormal(MacroAssembler* masm, int argc);
+ static void GenerateAheadOfTime(
+ Isolate*, void (*Generate)(Isolate*, const State&));
- static void GenerateMonomorphicCacheProbe(MacroAssembler* masm,
- int argc,
- Code::Kind kind,
- ExtraICState extra_state);
+ int arg_count() const { return argc_; }
+ CallType call_type() const { return call_type_; }
- virtual Handle<Code> megamorphic_stub();
- virtual Handle<Code> pre_monomorphic_stub();
+ bool CallAsMethod() const { return call_type_ == METHOD; }
- Code::Kind kind_;
+ void Print(StringStream* stream) const;
- friend class IC;
-};
+ private:
+ class ArgcBits: public BitField<int, 0, Code::kArgumentsBits> {};
+ class CallTypeBits: public BitField<CallType, Code::kArgumentsBits, 1> {};
+ const int argc_;
+ const CallType call_type_;
+ };
-class CallIC: public CallICBase {
- public:
explicit CallIC(Isolate* isolate)
- : CallICBase(Code::CALL_IC, isolate),
- extra_ic_state_(target()->extra_ic_state()) {
- ASSERT(target()->is_call_stub());
- }
-
- // Code generator routines.
- static void GenerateInitialize(MacroAssembler* masm,
- int argc,
- ExtraICState extra_state) {
- GenerateMiss(masm, argc, extra_state);
+ : IC(EXTRA_CALL_FRAME, isolate) {
}
- static void GenerateMiss(MacroAssembler* masm,
- int argc,
- ExtraICState extra_state) {
- CallICBase::GenerateMiss(masm, argc, IC::kCallIC_Miss, extra_state);
- }
+ void PatchMegamorphic(Handle<FixedArray> vector, Handle<Smi> slot);
- static void GenerateMegamorphic(MacroAssembler* masm,
- int argc,
- ExtraICState extra_ic_state);
+ void HandleMiss(Handle<Object> receiver,
+ Handle<Object> function,
+ Handle<FixedArray> vector,
+ Handle<Smi> slot);
- static void GenerateNormal(MacroAssembler* masm, int argc) {
- CallICBase::GenerateNormal(masm, argc);
- GenerateMiss(masm, argc, kNoExtraICState);
- }
- bool TryUpdateExtraICState(LookupResult* lookup, Handle<Object> object);
+ // Returns true if a custom handler was installed.
+ bool DoCustomHandler(Handle<Object> receiver,
+ Handle<Object> function,
+ Handle<FixedArray> vector,
+ Handle<Smi> slot,
+ const State& state);
- protected:
- virtual ExtraICState extra_ic_state() { return extra_ic_state_; }
+ // Code generator routines.
+ static Handle<Code> initialize_stub(Isolate* isolate,
+ int argc,
+ CallType call_type);
- private:
- ExtraICState extra_ic_state_;
+ static void Clear(Isolate* isolate, Address address, Code* target,
+ ConstantPoolArray* constant_pool);
};
-class KeyedCallIC: public CallICBase {
+class LoadIC: public IC {
public:
- explicit KeyedCallIC(Isolate* isolate)
- : CallICBase(Code::KEYED_CALL_IC, isolate) {
- ASSERT(target()->is_keyed_call_stub());
- }
-
- MUST_USE_RESULT MaybeObject* LoadFunction(Handle<Object> object,
- Handle<Object> key);
+ // ExtraICState bits
+ class ContextualModeBits: public BitField<ContextualMode, 0, 1> {};
+ STATIC_ASSERT(static_cast<int>(NOT_CONTEXTUAL) == 0);
- // Code generator routines.
- static void GenerateInitialize(MacroAssembler* masm, int argc) {
- GenerateMiss(masm, argc);
+ static ExtraICState ComputeExtraICState(ContextualMode contextual_mode) {
+ return ContextualModeBits::encode(contextual_mode);
}
- static void GenerateMiss(MacroAssembler* masm, int argc) {
- CallICBase::GenerateMiss(masm, argc, IC::kKeyedCallIC_Miss,
- kNoExtraICState);
+ static ContextualMode GetContextualMode(ExtraICState state) {
+ return ContextualModeBits::decode(state);
}
- static void GenerateMegamorphic(MacroAssembler* masm, int argc);
- static void GenerateNormal(MacroAssembler* masm, int argc);
- static void GenerateNonStrictArguments(MacroAssembler* masm, int argc);
-};
-
+ ContextualMode contextual_mode() const {
+ return ContextualModeBits::decode(extra_ic_state());
+ }
-class LoadIC: public IC {
- public:
- explicit LoadIC(FrameDepth depth, Isolate* isolate) : IC(depth, isolate) {
+ explicit LoadIC(FrameDepth depth, Isolate* isolate)
+ : IC(depth, isolate) {
ASSERT(IsLoadStub());
}
+ // Returns if this IC is for contextual (no explicit receiver)
+ // access to properties.
+ bool IsUndeclaredGlobal(Handle<Object> receiver) {
+ if (receiver->IsGlobalObject()) {
+ return contextual_mode() == CONTEXTUAL;
+ } else {
+ ASSERT(contextual_mode() != CONTEXTUAL);
+ return false;
+ }
+ }
+
// Code generator routines.
static void GenerateInitialize(MacroAssembler* masm) { GenerateMiss(masm); }
static void GeneratePreMonomorphic(MacroAssembler* masm) {
@@ -438,19 +436,28 @@ class LoadIC: public IC {
static void GenerateNormal(MacroAssembler* masm);
static void GenerateRuntimeGetProperty(MacroAssembler* masm);
- MUST_USE_RESULT MaybeObject* Load(Handle<Object> object,
- Handle<String> name);
+ static Handle<Code> initialize_stub(Isolate* isolate,
+ ExtraICState extra_state);
+
+ MUST_USE_RESULT MaybeHandle<Object> Load(Handle<Object> object,
+ Handle<String> name);
protected:
virtual Code::Kind kind() const { return Code::LOAD_IC; }
+ void set_target(Code* code) {
+ // The contextual mode must be preserved across IC patching.
+ ASSERT(GetContextualMode(code->extra_ic_state()) ==
+ GetContextualMode(target()->extra_ic_state()));
+
+ IC::set_target(code);
+ }
+
virtual Handle<Code> slow_stub() const {
return isolate()->builtins()->LoadIC_Slow();
}
- virtual Handle<Code> megamorphic_stub() {
- return isolate()->builtins()->LoadIC_Megamorphic();
- }
+ virtual Handle<Code> megamorphic_stub();
// Update the inline cache and the global stub cache based on the
// lookup result.
@@ -466,24 +473,19 @@ class LoadIC: public IC {
private:
// Stub accessors.
- static Handle<Code> initialize_stub(Isolate* isolate) {
- return isolate->builtins()->LoadIC_Initialize();
- }
-
- static Handle<Code> pre_monomorphic_stub(Isolate* isolate) {
- return isolate->builtins()->LoadIC_PreMonomorphic();
- }
+ static Handle<Code> pre_monomorphic_stub(Isolate* isolate,
+ ExtraICState exstra_state);
virtual Handle<Code> pre_monomorphic_stub() {
- return pre_monomorphic_stub(isolate());
+ return pre_monomorphic_stub(isolate(), extra_ic_state());
}
- Handle<Code> SimpleFieldLoad(int offset,
- bool inobject = true,
- Representation representation =
- Representation::Tagged());
+ Handle<Code> SimpleFieldLoad(FieldIndex index);
- static void Clear(Isolate* isolate, Address address, Code* target);
+ static void Clear(Isolate* isolate,
+ Address address,
+ Code* target,
+ ConstantPoolArray* constant_pool);
friend class IC;
};
@@ -496,8 +498,8 @@ class KeyedLoadIC: public LoadIC {
ASSERT(target()->is_keyed_load_stub());
}
- MUST_USE_RESULT MaybeObject* Load(Handle<Object> object,
- Handle<Object> key);
+ MUST_USE_RESULT MaybeHandle<Object> Load(Handle<Object> object,
+ Handle<Object> key);
// Code generator routines.
static void GenerateMiss(MacroAssembler* masm);
@@ -509,7 +511,7 @@ class KeyedLoadIC: public LoadIC {
static void GenerateGeneric(MacroAssembler* masm);
static void GenerateString(MacroAssembler* masm);
static void GenerateIndexedInterceptor(MacroAssembler* masm);
- static void GenerateNonStrictArguments(MacroAssembler* masm);
+ static void GenerateSloppyArguments(MacroAssembler* masm);
// Bit mask to be tested against bit field for the cases when
// generic stub should go into slow case.
@@ -523,23 +525,17 @@ class KeyedLoadIC: public LoadIC {
Handle<Code> LoadElementStub(Handle<JSObject> receiver);
- virtual Handle<Code> megamorphic_stub() {
- return isolate()->builtins()->KeyedLoadIC_Generic();
- }
- virtual Handle<Code> generic_stub() const {
- return isolate()->builtins()->KeyedLoadIC_Generic();
- }
+ virtual Handle<Code> megamorphic_stub();
+ virtual Handle<Code> generic_stub() const;
+
virtual Handle<Code> slow_stub() const {
return isolate()->builtins()->KeyedLoadIC_Slow();
}
- virtual void UpdateMegamorphicCache(Type* type, Name* name, Code* code) { }
+ virtual void UpdateMegamorphicCache(HeapType* type, Name* name, Code* code) {}
private:
// Stub accessors.
- static Handle<Code> initialize_stub(Isolate* isolate) {
- return isolate->builtins()->KeyedLoadIC_Initialize();
- }
static Handle<Code> pre_monomorphic_stub(Isolate* isolate) {
return isolate->builtins()->KeyedLoadIC_PreMonomorphic();
}
@@ -549,14 +545,17 @@ class KeyedLoadIC: public LoadIC {
Handle<Code> indexed_interceptor_stub() {
return isolate()->builtins()->KeyedLoadIC_IndexedInterceptor();
}
- Handle<Code> non_strict_arguments_stub() {
- return isolate()->builtins()->KeyedLoadIC_NonStrictArguments();
+ Handle<Code> sloppy_arguments_stub() {
+ return isolate()->builtins()->KeyedLoadIC_SloppyArguments();
}
Handle<Code> string_stub() {
return isolate()->builtins()->KeyedLoadIC_String();
}
- static void Clear(Isolate* isolate, Address address, Code* target);
+ static void Clear(Isolate* isolate,
+ Address address,
+ Code* target,
+ ConstantPoolArray* constant_pool);
friend class IC;
};
@@ -564,13 +563,11 @@ class KeyedLoadIC: public LoadIC {
class StoreIC: public IC {
public:
- // ExtraICState bits
- class StrictModeState: public BitField<StrictModeFlag, 0, 1> {};
- static ExtraICState ComputeExtraICState(StrictModeFlag flag) {
+ class StrictModeState: public BitField<StrictMode, 1, 1> {};
+ static ExtraICState ComputeExtraICState(StrictMode flag) {
return StrictModeState::encode(flag);
}
-
- static StrictModeFlag GetStrictMode(ExtraICState state) {
+ static StrictMode GetStrictMode(ExtraICState state) {
return StrictModeState::decode(state);
}
@@ -580,12 +577,13 @@ class StoreIC: public IC {
1 << StrictModeState::kShift;
StoreIC(FrameDepth depth, Isolate* isolate)
- : IC(depth, isolate),
- strict_mode_(GetStrictMode(target()->extra_ic_state())) {
+ : IC(depth, isolate) {
ASSERT(IsStoreStub());
}
- StrictModeFlag strict_mode() const { return strict_mode_; }
+ StrictMode strict_mode() const {
+ return StrictModeState::decode(extra_ic_state());
+ }
// Code generators for stub routines. Only called once at startup.
static void GenerateSlow(MacroAssembler* masm);
@@ -594,13 +592,15 @@ class StoreIC: public IC {
GenerateMiss(masm);
}
static void GenerateMiss(MacroAssembler* masm);
- static void GenerateMegamorphic(MacroAssembler* masm,
- ExtraICState extra_ic_state);
+ static void GenerateMegamorphic(MacroAssembler* masm);
static void GenerateNormal(MacroAssembler* masm);
static void GenerateRuntimeSetProperty(MacroAssembler* masm,
- StrictModeFlag strict_mode);
+ StrictMode strict_mode);
+
+ static Handle<Code> initialize_stub(Isolate* isolate,
+ StrictMode strict_mode);
- MUST_USE_RESULT MaybeObject* Store(
+ MUST_USE_RESULT MaybeHandle<Object> Store(
Handle<Object> object,
Handle<String> name,
Handle<Object> value,
@@ -609,21 +609,10 @@ class StoreIC: public IC {
protected:
virtual Code::Kind kind() const { return Code::STORE_IC; }
- virtual Handle<Code> megamorphic_stub() {
- if (strict_mode() == kStrictMode) {
- return isolate()->builtins()->StoreIC_Megamorphic_Strict();
- } else {
- return isolate()->builtins()->StoreIC_Megamorphic();
- }
- }
+ virtual Handle<Code> megamorphic_stub();
+
// Stub accessors.
- virtual Handle<Code> generic_stub() const {
- if (strict_mode() == kStrictMode) {
- return isolate()->builtins()->StoreIC_Generic_Strict();
- } else {
- return isolate()->builtins()->StoreIC_Generic();
- }
- }
+ virtual Handle<Code> generic_stub() const;
virtual Handle<Code> slow_stub() const {
return isolate()->builtins()->StoreIC_Slow();
@@ -634,13 +623,7 @@ class StoreIC: public IC {
}
static Handle<Code> pre_monomorphic_stub(Isolate* isolate,
- StrictModeFlag strict_mode) {
- if (strict_mode == kStrictMode) {
- return isolate->builtins()->StoreIC_PreMonomorphic_Strict();
- } else {
- return isolate->builtins()->StoreIC_PreMonomorphic();
- }
- }
+ StrictMode strict_mode);
// Update the inline cache and the global stub cache based on the
// lookup result.
@@ -654,10 +637,6 @@ class StoreIC: public IC {
Handle<Object> value,
InlineCacheHolderFlag cache_holder);
- virtual ExtraICState extra_ic_state() {
- return ComputeExtraICState(strict_mode());
- }
-
private:
void set_target(Code* code) {
// Strict mode must be preserved across IC patching.
@@ -666,18 +645,10 @@ class StoreIC: public IC {
IC::set_target(code);
}
- static Handle<Code> initialize_stub(Isolate* isolate,
- StrictModeFlag strict_mode) {
- if (strict_mode == kStrictMode) {
- return isolate->builtins()->StoreIC_Initialize_Strict();
- } else {
- return isolate->builtins()->StoreIC_Initialize();
- }
- }
-
- static void Clear(Isolate* isolate, Address address, Code* target);
-
- StrictModeFlag strict_mode_;
+ static void Clear(Isolate* isolate,
+ Address address,
+ Code* target,
+ ConstantPoolArray* constant_pool);
friend class IC;
};
@@ -700,10 +671,10 @@ class KeyedStoreIC: public StoreIC {
// ExtraICState bits (building on IC)
// ExtraICState bits
class ExtraICStateKeyedAccessStoreMode:
- public BitField<KeyedAccessStoreMode, 1, 4> {}; // NOLINT
+ public BitField<KeyedAccessStoreMode, 2, 4> {}; // NOLINT
- static ExtraICState ComputeExtraICState(StrictModeFlag flag,
- KeyedAccessStoreMode mode) {
+ static ExtraICState ComputeExtraICState(StrictMode flag,
+ KeyedAccessStoreMode mode) {
return StrictModeState::encode(flag) |
ExtraICStateKeyedAccessStoreMode::encode(mode);
}
@@ -718,9 +689,9 @@ class KeyedStoreIC: public StoreIC {
ASSERT(target()->is_keyed_store_stub());
}
- MUST_USE_RESULT MaybeObject* Store(Handle<Object> object,
- Handle<Object> name,
- Handle<Object> value);
+ MUST_USE_RESULT MaybeHandle<Object> Store(Handle<Object> object,
+ Handle<Object> name,
+ Handle<Object> value);
// Code generators for stub routines. Only called once at startup.
static void GenerateInitialize(MacroAssembler* masm) { GenerateMiss(masm); }
@@ -730,25 +701,21 @@ class KeyedStoreIC: public StoreIC {
static void GenerateMiss(MacroAssembler* masm);
static void GenerateSlow(MacroAssembler* masm);
static void GenerateRuntimeSetProperty(MacroAssembler* masm,
- StrictModeFlag strict_mode);
- static void GenerateGeneric(MacroAssembler* masm, StrictModeFlag strict_mode);
- static void GenerateNonStrictArguments(MacroAssembler* masm);
+ StrictMode strict_mode);
+ static void GenerateGeneric(MacroAssembler* masm, StrictMode strict_mode);
+ static void GenerateSloppyArguments(MacroAssembler* masm);
protected:
virtual Code::Kind kind() const { return Code::KEYED_STORE_IC; }
- virtual void UpdateMegamorphicCache(Type* type, Name* name, Code* code) { }
-
- virtual ExtraICState extra_ic_state() {
- return ComputeExtraICState(strict_mode(), STANDARD_STORE);
- }
+ virtual void UpdateMegamorphicCache(HeapType* type, Name* name, Code* code) {}
virtual Handle<Code> pre_monomorphic_stub() {
return pre_monomorphic_stub(isolate(), strict_mode());
}
static Handle<Code> pre_monomorphic_stub(Isolate* isolate,
- StrictModeFlag strict_mode) {
- if (strict_mode == kStrictMode) {
+ StrictMode strict_mode) {
+ if (strict_mode == STRICT) {
return isolate->builtins()->KeyedStoreIC_PreMonomorphic_Strict();
} else {
return isolate->builtins()->KeyedStoreIC_PreMonomorphic();
@@ -758,7 +725,7 @@ class KeyedStoreIC: public StoreIC {
return isolate()->builtins()->KeyedStoreIC_Slow();
}
virtual Handle<Code> megamorphic_stub() {
- if (strict_mode() == kStrictMode) {
+ if (strict_mode() == STRICT) {
return isolate()->builtins()->KeyedStoreIC_Generic_Strict();
} else {
return isolate()->builtins()->KeyedStoreIC_Generic();
@@ -776,34 +743,28 @@ class KeyedStoreIC: public StoreIC {
}
// Stub accessors.
- static Handle<Code> initialize_stub(Isolate* isolate,
- StrictModeFlag strict_mode) {
- if (strict_mode == kStrictMode) {
- return isolate->builtins()->KeyedStoreIC_Initialize_Strict();
- } else {
- return isolate->builtins()->KeyedStoreIC_Initialize();
- }
- }
-
virtual Handle<Code> generic_stub() const {
- if (strict_mode() == kStrictMode) {
+ if (strict_mode() == STRICT) {
return isolate()->builtins()->KeyedStoreIC_Generic_Strict();
} else {
return isolate()->builtins()->KeyedStoreIC_Generic();
}
}
- Handle<Code> non_strict_arguments_stub() {
- return isolate()->builtins()->KeyedStoreIC_NonStrictArguments();
+ Handle<Code> sloppy_arguments_stub() {
+ return isolate()->builtins()->KeyedStoreIC_SloppyArguments();
}
- static void Clear(Isolate* isolate, Address address, Code* target);
+ static void Clear(Isolate* isolate,
+ Address address,
+ Code* target,
+ ConstantPoolArray* constant_pool);
KeyedAccessStoreMode GetStoreMode(Handle<JSObject> receiver,
Handle<Object> key,
Handle<Object> value);
- Handle<Map> ComputeTransitionedMap(Handle<JSObject> receiver,
+ Handle<Map> ComputeTransitionedMap(Handle<Map> map,
KeyedAccessStoreMode store_mode);
friend class IC;
@@ -818,11 +779,11 @@ class BinaryOpIC: public IC {
public:
class State V8_FINAL BASE_EMBEDDED {
public:
- explicit State(ExtraICState extra_ic_state);
+ State(Isolate* isolate, ExtraICState extra_ic_state);
- State(Token::Value op, OverwriteMode mode)
+ State(Isolate* isolate, Token::Value op, OverwriteMode mode)
: op_(op), mode_(mode), left_kind_(NONE), right_kind_(NONE),
- result_kind_(NONE) {
+ result_kind_(NONE), isolate_(isolate) {
ASSERT_LE(FIRST_TOKEN, op);
ASSERT_LE(op, LAST_TOKEN);
}
@@ -853,10 +814,27 @@ class BinaryOpIC: public IC {
right_kind_ > SMI && right_kind_ <= NUMBER));
}
+ // Returns true if the IC _could_ create allocation mementos.
+ bool CouldCreateAllocationMementos() const {
+ if (left_kind_ == STRING || right_kind_ == STRING) {
+ ASSERT_EQ(Token::ADD, op_);
+ return true;
+ }
+ return false;
+ }
+
+ // Returns true if the IC _should_ create allocation mementos.
+ bool ShouldCreateAllocationMementos() const {
+ return FLAG_allocation_site_pretenuring &&
+ CouldCreateAllocationMementos();
+ }
+
bool HasSideEffects() const {
return Max(left_kind_, right_kind_) == GENERIC;
}
+ // Returns true if the IC should enable the inline smi code (i.e. if either
+ // parameter may be a smi).
bool UseInlinedSmiCode() const {
return KindMaybeSmi(left_kind_) || KindMaybeSmi(right_kind_);
}
@@ -868,13 +846,13 @@ class BinaryOpIC: public IC {
OverwriteMode mode() const { return mode_; }
Maybe<int> fixed_right_arg() const { return fixed_right_arg_; }
- Handle<Type> GetLeftType(Isolate* isolate) const {
- return KindToType(left_kind_, isolate);
+ Type* GetLeftType(Zone* zone) const {
+ return KindToType(left_kind_, zone);
}
- Handle<Type> GetRightType(Isolate* isolate) const {
- return KindToType(right_kind_, isolate);
+ Type* GetRightType(Zone* zone) const {
+ return KindToType(right_kind_, zone);
}
- Handle<Type> GetResultType(Isolate* isolate) const;
+ Type* GetResultType(Zone* zone) const;
void Print(StringStream* stream) const;
@@ -882,13 +860,15 @@ class BinaryOpIC: public IC {
Handle<Object> right,
Handle<Object> result);
+ Isolate* isolate() const { return isolate_; }
+
private:
enum Kind { NONE, SMI, INT32, NUMBER, STRING, GENERIC };
Kind UpdateKind(Handle<Object> object, Kind kind) const;
static const char* KindToString(Kind kind);
- static Handle<Type> KindToType(Kind kind, Isolate* isolate);
+ static Type* KindToType(Kind kind, Zone* zone);
static bool KindMaybeSmi(Kind kind) {
return (kind >= SMI && kind <= NUMBER) || kind == GENERIC;
}
@@ -897,14 +877,13 @@ class BinaryOpIC: public IC {
STATIC_ASSERT(LAST_TOKEN - FIRST_TOKEN < (1 << 4));
class OpField: public BitField<int, 0, 4> {};
class OverwriteModeField: public BitField<OverwriteMode, 4, 2> {};
- class SSE2Field: public BitField<bool, 6, 1> {};
- class ResultKindField: public BitField<Kind, 7, 3> {};
- class LeftKindField: public BitField<Kind, 10, 3> {};
+ class ResultKindField: public BitField<Kind, 6, 3> {};
+ class LeftKindField: public BitField<Kind, 9, 3> {};
// When fixed right arg is set, we don't need to store the right kind.
// Thus the two fields can overlap.
- class HasFixedRightArgField: public BitField<bool, 13, 1> {};
- class FixedRightArgValueField: public BitField<int, 14, 4> {};
- class RightKindField: public BitField<Kind, 14, 3> {};
+ class HasFixedRightArgField: public BitField<bool, 12, 1> {};
+ class FixedRightArgValueField: public BitField<int, 13, 4> {};
+ class RightKindField: public BitField<Kind, 13, 3> {};
Token::Value op_;
OverwriteMode mode_;
@@ -912,14 +891,16 @@ class BinaryOpIC: public IC {
Kind right_kind_;
Kind result_kind_;
Maybe<int> fixed_right_arg_;
+ Isolate* isolate_;
};
explicit BinaryOpIC(Isolate* isolate) : IC(EXTRA_CALL_FRAME, isolate) { }
static Builtins::JavaScript TokenToJSBuiltin(Token::Value op);
- MUST_USE_RESULT MaybeObject* Transition(Handle<Object> left,
- Handle<Object> right);
+ MaybeHandle<Object> Transition(Handle<AllocationSite> allocation_site,
+ Handle<Object> left,
+ Handle<Object> right) V8_WARN_UNUSED_RESULT;
};
@@ -945,16 +926,16 @@ class CompareIC: public IC {
static State NewInputState(State old_state, Handle<Object> value);
- static Handle<Type> StateToType(Isolate* isolate,
- State state,
- Handle<Map> map = Handle<Map>());
+ static Type* StateToType(Zone* zone,
+ State state,
+ Handle<Map> map = Handle<Map>());
static void StubInfoToType(int stub_minor_key,
- Handle<Type>* left_type,
- Handle<Type>* right_type,
- Handle<Type>* overall_type,
+ Type** left_type,
+ Type** right_type,
+ Type** overall_type,
Handle<Map> map,
- Isolate* isolate);
+ Zone* zone);
CompareIC(Isolate* isolate, Token::Value op)
: IC(EXTRA_CALL_FRAME, isolate), op_(op) { }
@@ -986,7 +967,10 @@ class CompareIC: public IC {
static Code* GetRawUninitialized(Isolate* isolate, Token::Value op);
- static void Clear(Isolate* isolate, Address address, Code* target);
+ static void Clear(Isolate* isolate,
+ Address address,
+ Code* target,
+ ConstantPoolArray* constant_pool);
Token::Value op_;
@@ -998,14 +982,16 @@ class CompareNilIC: public IC {
public:
explicit CompareNilIC(Isolate* isolate) : IC(EXTRA_CALL_FRAME, isolate) {}
- MUST_USE_RESULT MaybeObject* CompareNil(Handle<Object> object);
+ Handle<Object> CompareNil(Handle<Object> object);
static Handle<Code> GetUninitialized();
- static void Clear(Address address, Code* target);
+ static void Clear(Address address,
+ Code* target,
+ ConstantPoolArray* constant_pool);
- static MUST_USE_RESULT MaybeObject* DoCompareNilSlow(NilValue nil,
- Handle<Object> object);
+ static Handle<Object> DoCompareNilSlow(Isolate* isolate, NilValue nil,
+ Handle<Object> object);
};
@@ -1013,7 +999,7 @@ class ToBooleanIC: public IC {
public:
explicit ToBooleanIC(Isolate* isolate) : IC(EXTRA_CALL_FRAME, isolate) { }
- MaybeObject* ToBoolean(Handle<Object> object);
+ Handle<Object> ToBoolean(Handle<Object> object);
};
@@ -1021,15 +1007,15 @@ class ToBooleanIC: public IC {
enum InlinedSmiCheck { ENABLE_INLINED_SMI_CHECK, DISABLE_INLINED_SMI_CHECK };
void PatchInlinedSmiCode(Address address, InlinedSmiCheck check);
-DECLARE_RUNTIME_FUNCTION(MaybeObject*, KeyedLoadIC_MissFromStubFailure);
-DECLARE_RUNTIME_FUNCTION(MaybeObject*, KeyedStoreIC_MissFromStubFailure);
-DECLARE_RUNTIME_FUNCTION(MaybeObject*, UnaryOpIC_Miss);
-DECLARE_RUNTIME_FUNCTION(MaybeObject*, StoreIC_MissFromStubFailure);
-DECLARE_RUNTIME_FUNCTION(MaybeObject*, KeyedCallIC_MissFromStubFailure);
-DECLARE_RUNTIME_FUNCTION(MaybeObject*, ElementsTransitionAndStoreIC_Miss);
-DECLARE_RUNTIME_FUNCTION(MaybeObject*, BinaryOpIC_Miss);
-DECLARE_RUNTIME_FUNCTION(MaybeObject*, CompareNilIC_Miss);
-DECLARE_RUNTIME_FUNCTION(MaybeObject*, ToBooleanIC_Miss);
+DECLARE_RUNTIME_FUNCTION(KeyedLoadIC_MissFromStubFailure);
+DECLARE_RUNTIME_FUNCTION(KeyedStoreIC_MissFromStubFailure);
+DECLARE_RUNTIME_FUNCTION(UnaryOpIC_Miss);
+DECLARE_RUNTIME_FUNCTION(StoreIC_MissFromStubFailure);
+DECLARE_RUNTIME_FUNCTION(ElementsTransitionAndStoreIC_Miss);
+DECLARE_RUNTIME_FUNCTION(BinaryOpIC_Miss);
+DECLARE_RUNTIME_FUNCTION(BinaryOpIC_MissWithAllocationSite);
+DECLARE_RUNTIME_FUNCTION(CompareNilIC_Miss);
+DECLARE_RUNTIME_FUNCTION(ToBooleanIC_Miss);
} } // namespace v8::internal
diff --git a/chromium/v8/src/icu_util.cc b/chromium/v8/src/icu_util.cc
index b9bd65edc69..b323942d02a 100644
--- a/chromium/v8/src/icu_util.cc
+++ b/chromium/v8/src/icu_util.cc
@@ -1,38 +1,24 @@
// Copyright 2013 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "icu_util.h"
-
-#if defined(_WIN32) && defined(V8_I18N_SUPPORT)
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/icu_util.h"
+
+#if defined(_WIN32)
#include <windows.h>
+#endif
+
+#if defined(V8_I18N_SUPPORT)
+#include <stdio.h>
+#include <stdlib.h>
#include "unicode/putil.h"
#include "unicode/udata.h"
+#define ICU_UTIL_DATA_FILE 0
+#define ICU_UTIL_DATA_SHARED 1
+#define ICU_UTIL_DATA_STATIC 2
+
#define ICU_UTIL_DATA_SYMBOL "icudt" U_ICU_VERSION_SHORT "_dat"
#define ICU_UTIL_DATA_SHARED_MODULE_NAME "icudt.dll"
#endif
@@ -41,8 +27,22 @@ namespace v8 {
namespace internal {
-bool InitializeICU() {
-#if defined(_WIN32) && defined(V8_I18N_SUPPORT)
+#if defined(V8_I18N_SUPPORT) && (ICU_UTIL_DATA_IMPL == ICU_UTIL_DATA_FILE)
+namespace {
+char* g_icu_data_ptr = NULL;
+
+void free_icu_data_ptr() {
+ delete[] g_icu_data_ptr;
+}
+
+} // namespace
+#endif
+
+bool InitializeICU(const char* icu_data_file) {
+#if !defined(V8_I18N_SUPPORT)
+ return true;
+#else
+#if ICU_UTIL_DATA_IMPL == ICU_UTIL_DATA_SHARED
// We expect to find the ICU data module alongside the current module.
HMODULE module = LoadLibraryA(ICU_UTIL_DATA_SHARED_MODULE_NAME);
if (!module) return false;
@@ -53,9 +53,36 @@ bool InitializeICU() {
UErrorCode err = U_ZERO_ERROR;
udata_setCommonData(reinterpret_cast<void*>(addr), &err);
return err == U_ZERO_ERROR;
-#else
+#elif ICU_UTIL_DATA_IMPL == ICU_UTIL_DATA_STATIC
// Mac/Linux bundle the ICU data in.
return true;
+#elif ICU_UTIL_DATA_IMPL == ICU_UTIL_DATA_FILE
+ if (!icu_data_file) return false;
+
+ if (g_icu_data_ptr) return true;
+
+ FILE* inf = fopen(icu_data_file, "rb");
+ if (!inf) return false;
+
+ fseek(inf, 0, SEEK_END);
+ size_t size = ftell(inf);
+ rewind(inf);
+
+ g_icu_data_ptr = new char[size];
+ if (fread(g_icu_data_ptr, 1, size, inf) != size) {
+ delete[] g_icu_data_ptr;
+ g_icu_data_ptr = NULL;
+ fclose(inf);
+ return false;
+ }
+ fclose(inf);
+
+ atexit(free_icu_data_ptr);
+
+ UErrorCode err = U_ZERO_ERROR;
+ udata_setCommonData(reinterpret_cast<void*>(g_icu_data_ptr), &err);
+ return err == U_ZERO_ERROR;
+#endif
#endif
}
diff --git a/chromium/v8/src/icu_util.h b/chromium/v8/src/icu_util.h
index 478abce508c..cd98ff0dfc5 100644
--- a/chromium/v8/src/icu_util.h
+++ b/chromium/v8/src/icu_util.h
@@ -1,29 +1,6 @@
// Copyright 2013 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
#ifndef V8_ICU_UTIL_H_
@@ -35,7 +12,7 @@ namespace internal {
// Call this function to load ICU's data tables for the current process. This
// function should be called before ICU is used.
-bool InitializeICU();
+bool InitializeICU(const char* icu_data_file);
} } // namespace v8::internal
diff --git a/chromium/v8/src/incremental-marking-inl.h b/chromium/v8/src/incremental-marking-inl.h
index 1c30383d521..7b984747bd4 100644
--- a/chromium/v8/src/incremental-marking-inl.h
+++ b/chromium/v8/src/incremental-marking-inl.h
@@ -1,34 +1,11 @@
// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
#ifndef V8_INCREMENTAL_MARKING_INL_H_
#define V8_INCREMENTAL_MARKING_INL_H_
-#include "incremental-marking.h"
+#include "src/incremental-marking.h"
namespace v8 {
namespace internal {
@@ -68,7 +45,7 @@ bool IncrementalMarking::BaseRecordWrite(HeapObject* obj,
void IncrementalMarking::RecordWrite(HeapObject* obj,
Object** slot,
Object* value) {
- if (IsMarking() && value->NonFailureIsHeapObject()) {
+ if (IsMarking() && value->IsHeapObject()) {
RecordWriteSlow(obj, slot, value);
}
}
@@ -84,7 +61,7 @@ void IncrementalMarking::RecordWriteOfCodeEntry(JSFunction* host,
void IncrementalMarking::RecordWriteIntoCode(HeapObject* obj,
RelocInfo* rinfo,
Object* value) {
- if (IsMarking() && value->NonFailureIsHeapObject()) {
+ if (IsMarking() && value->IsHeapObject()) {
RecordWriteIntoCodeSlow(obj, rinfo, value);
}
}
diff --git a/chromium/v8/src/incremental-marking.cc b/chromium/v8/src/incremental-marking.cc
index 4223dde211e..8a158c341d9 100644
--- a/chromium/v8/src/incremental-marking.cc
+++ b/chromium/v8/src/incremental-marking.cc
@@ -1,39 +1,16 @@
// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#include "incremental-marking.h"
-
-#include "code-stubs.h"
-#include "compilation-cache.h"
-#include "objects-visiting.h"
-#include "objects-visiting-inl.h"
-#include "v8conversions.h"
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#include "src/incremental-marking.h"
+
+#include "src/code-stubs.h"
+#include "src/compilation-cache.h"
+#include "src/conversions.h"
+#include "src/objects-visiting.h"
+#include "src/objects-visiting-inl.h"
namespace v8 {
namespace internal {
@@ -83,28 +60,6 @@ void IncrementalMarking::RecordWriteFromCode(HeapObject* obj,
Isolate* isolate) {
ASSERT(obj->IsHeapObject());
IncrementalMarking* marking = isolate->heap()->incremental_marking();
- ASSERT(!marking->is_compacting_);
-
- MemoryChunk* chunk = MemoryChunk::FromAddress(obj->address());
- int counter = chunk->write_barrier_counter();
- if (counter < (MemoryChunk::kWriteBarrierCounterGranularity / 2)) {
- marking->write_barriers_invoked_since_last_step_ +=
- MemoryChunk::kWriteBarrierCounterGranularity -
- chunk->write_barrier_counter();
- chunk->set_write_barrier_counter(
- MemoryChunk::kWriteBarrierCounterGranularity);
- }
-
- marking->RecordWrite(obj, slot, *slot);
-}
-
-
-void IncrementalMarking::RecordWriteForEvacuationFromCode(HeapObject* obj,
- Object** slot,
- Isolate* isolate) {
- ASSERT(obj->IsHeapObject());
- IncrementalMarking* marking = isolate->heap()->incremental_marking();
- ASSERT(marking->is_compacting_);
MemoryChunk* chunk = MemoryChunk::FromAddress(obj->address());
int counter = chunk->write_barrier_counter();
@@ -267,25 +222,19 @@ class IncrementalMarkingMarkingVisitor
static void VisitNativeContextIncremental(Map* map, HeapObject* object) {
Context* context = Context::cast(object);
- // We will mark cache black with a separate pass
- // when we finish marking.
- MarkObjectGreyDoNotEnqueue(context->normalized_map_cache());
+ // We will mark cache black with a separate pass when we finish marking.
+ // Note that GC can happen when the context is not fully initialized,
+ // so the cache can be undefined.
+ Object* cache = context->get(Context::NORMALIZED_MAP_CACHE_INDEX);
+ if (!cache->IsUndefined()) {
+ MarkObjectGreyDoNotEnqueue(cache);
+ }
VisitNativeContext(map, context);
}
- static void VisitWeakCollection(Map* map, HeapObject* object) {
- Heap* heap = map->GetHeap();
- VisitPointers(heap,
- HeapObject::RawField(object,
- JSWeakCollection::kPropertiesOffset),
- HeapObject::RawField(object, JSWeakCollection::kSize));
- }
-
- static void BeforeVisitingSharedFunctionInfo(HeapObject* object) {}
-
INLINE(static void VisitPointer(Heap* heap, Object** p)) {
Object* obj = *p;
- if (obj->NonFailureIsHeapObject()) {
+ if (obj->IsHeapObject()) {
heap->mark_compact_collector()->RecordSlot(p, p, obj);
MarkObject(heap, obj);
}
@@ -294,7 +243,7 @@ class IncrementalMarkingMarkingVisitor
INLINE(static void VisitPointers(Heap* heap, Object** start, Object** end)) {
for (Object** p = start; p < end; p++) {
Object* obj = *p;
- if (obj->NonFailureIsHeapObject()) {
+ if (obj->IsHeapObject()) {
heap->mark_compact_collector()->RecordSlot(start, p, obj);
MarkObject(heap, obj);
}
@@ -307,7 +256,7 @@ class IncrementalMarkingMarkingVisitor
Object** end)) {
for (Object** p = start; p < end; p++) {
Object* obj = *p;
- if (obj->NonFailureIsHeapObject()) {
+ if (obj->IsHeapObject()) {
heap->mark_compact_collector()->RecordSlot(anchor, p, obj);
MarkObject(heap, obj);
}
@@ -498,15 +447,13 @@ bool IncrementalMarking::WorthActivating() {
// debug tests run with incremental marking and some without.
static const intptr_t kActivationThreshold = 0;
#endif
- // Only start incremental marking in a safe state: 1) when expose GC is
- // deactivated, 2) when incremental marking is turned on, 3) when we are
- // currently not in a GC, and 4) when we are currently not serializing
- // or deserializing the heap.
- return !FLAG_expose_gc &&
- FLAG_incremental_marking &&
+ // Only start incremental marking in a safe state: 1) when incremental
+ // marking is turned on, 2) when we are currently not in a GC, and
+ // 3) when we are currently not serializing or deserializing the heap.
+ return FLAG_incremental_marking &&
FLAG_incremental_marking_steps &&
heap_->gc_state() == Heap::NOT_IN_GC &&
- !Serializer::enabled() &&
+ !heap_->isolate()->serializer_enabled() &&
heap_->isolate()->IsInitialized() &&
heap_->PromotedSpaceSizeOfObjects() > kActivationThreshold;
}
@@ -584,12 +531,12 @@ void IncrementalMarking::Start(CompactionFlag flag) {
ASSERT(FLAG_incremental_marking_steps);
ASSERT(state_ == STOPPED);
ASSERT(heap_->gc_state() == Heap::NOT_IN_GC);
- ASSERT(!Serializer::enabled());
+ ASSERT(!heap_->isolate()->serializer_enabled());
ASSERT(heap_->isolate()->IsInitialized());
ResetStepCounters();
- if (heap_->IsSweepingComplete()) {
+ if (!heap_->mark_compact_collector()->IsConcurrentSweepingInProgress()) {
StartMarking(flag);
} else {
if (FLAG_trace_incremental_marking) {
@@ -844,7 +791,7 @@ void IncrementalMarking::Abort() {
}
}
}
- heap_->isolate()->stack_guard()->Continue(GC_REQUEST);
+ heap_->isolate()->stack_guard()->ClearGC();
state_ = STOPPED;
is_compacting_ = false;
}
@@ -861,7 +808,7 @@ void IncrementalMarking::Finalize() {
RecordWriteStub::STORE_BUFFER_ONLY);
DeactivateIncrementalWriteBarrier();
ASSERT(marking_deque_.IsEmpty());
- heap_->isolate()->stack_guard()->Continue(GC_REQUEST);
+ heap_->isolate()->stack_guard()->ClearGC();
}
@@ -933,7 +880,11 @@ void IncrementalMarking::Step(intptr_t allocated_bytes,
}
if (state_ == SWEEPING) {
- if (heap_->EnsureSweepersProgressed(static_cast<int>(bytes_to_process))) {
+ if (heap_->mark_compact_collector()->IsConcurrentSweepingInProgress() &&
+ heap_->mark_compact_collector()->IsSweepingCompleted()) {
+ heap_->mark_compact_collector()->WaitUntilSweepingCompleted();
+ }
+ if (!heap_->mark_compact_collector()->IsConcurrentSweepingInProgress()) {
bytes_scanned_ = 0;
StartMarking(PREVENT_COMPACTION);
}
diff --git a/chromium/v8/src/incremental-marking.h b/chromium/v8/src/incremental-marking.h
index d47c300ef3f..31b97142b6d 100644
--- a/chromium/v8/src/incremental-marking.h
+++ b/chromium/v8/src/incremental-marking.h
@@ -1,37 +1,14 @@
// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
#ifndef V8_INCREMENTAL_MARKING_H_
#define V8_INCREMENTAL_MARKING_H_
-#include "execution.h"
-#include "mark-compact.h"
-#include "objects.h"
+#include "src/execution.h"
+#include "src/mark-compact.h"
+#include "src/objects.h"
namespace v8 {
namespace internal {
@@ -100,7 +77,7 @@ class IncrementalMarking {
// Do some marking every time this much memory has been allocated or that many
// heavy (color-checking) write barriers have been invoked.
static const intptr_t kAllocatedThreshold = 65536;
- static const intptr_t kWriteBarriersInvokedThreshold = 65536;
+ static const intptr_t kWriteBarriersInvokedThreshold = 32768;
// Start off by marking this many times more memory than has been allocated.
static const intptr_t kInitialMarkingSpeed = 1;
// But if we are promoting a lot of data we need to mark faster to keep up
@@ -129,10 +106,6 @@ class IncrementalMarking {
Object** slot,
Isolate* isolate);
- static void RecordWriteForEvacuationFromCode(HeapObject* obj,
- Object** slot,
- Isolate* isolate);
-
// Record a slot for compaction. Returns false for objects that are
// guaranteed to be rescanned or not guaranteed to survive.
//
diff --git a/chromium/v8/src/interface.cc b/chromium/v8/src/interface.cc
index 603dfe9b863..d6e84068e9a 100644
--- a/chromium/v8/src/interface.cc
+++ b/chromium/v8/src/interface.cc
@@ -1,33 +1,10 @@
// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#include "interface.h"
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#include "src/interface.h"
namespace v8 {
namespace internal {
@@ -89,9 +66,10 @@ void Interface::DoAdd(
ZoneHashMap** map = &Chase()->exports_;
ZoneAllocationPolicy allocator(zone);
- if (*map == NULL)
- *map = new ZoneHashMap(Match, ZoneHashMap::kDefaultHashMapCapacity,
- allocator);
+ if (*map == NULL) {
+ *map = new(zone->New(sizeof(ZoneHashMap)))
+ ZoneHashMap(Match, ZoneHashMap::kDefaultHashMapCapacity, allocator);
+ }
ZoneHashMap::Entry* p = (*map)->Lookup(name, hash, !IsFrozen(), allocator);
if (p == NULL) {
diff --git a/chromium/v8/src/interface.h b/chromium/v8/src/interface.h
index f824a9a8749..086facf6665 100644
--- a/chromium/v8/src/interface.h
+++ b/chromium/v8/src/interface.h
@@ -1,34 +1,11 @@
// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
#ifndef V8_INTERFACE_H_
#define V8_INTERFACE_H_
-#include "zone-inl.h" // For operator new.
+#include "src/zone-inl.h" // For operator new.
namespace v8 {
namespace internal {
diff --git a/chromium/v8/src/interpreter-irregexp.cc b/chromium/v8/src/interpreter-irregexp.cc
index 2fc9fd30257..c72a3d04f70 100644
--- a/chromium/v8/src/interpreter-irregexp.cc
+++ b/chromium/v8/src/interpreter-irregexp.cc
@@ -1,41 +1,18 @@
// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
// A simple interpreter for the Irregexp byte code.
-#include "v8.h"
-#include "unicode.h"
-#include "utils.h"
-#include "ast.h"
-#include "bytecodes-irregexp.h"
-#include "interpreter-irregexp.h"
-#include "jsregexp.h"
-#include "regexp-macro-assembler.h"
+#include "src/v8.h"
+#include "src/unicode.h"
+#include "src/utils.h"
+#include "src/ast.h"
+#include "src/bytecodes-irregexp.h"
+#include "src/interpreter-irregexp.h"
+#include "src/jsregexp.h"
+#include "src/regexp-macro-assembler.h"
namespace v8 {
namespace internal {
@@ -158,25 +135,12 @@ static int32_t Load16Aligned(const byte* pc) {
// matching terminates.
class BacktrackStack {
public:
- explicit BacktrackStack(Isolate* isolate) : isolate_(isolate) {
- if (isolate->irregexp_interpreter_backtrack_stack_cache() != NULL) {
- // If the cache is not empty reuse the previously allocated stack.
- data_ = isolate->irregexp_interpreter_backtrack_stack_cache();
- isolate->set_irregexp_interpreter_backtrack_stack_cache(NULL);
- } else {
- // Cache was empty. Allocate a new backtrack stack.
- data_ = NewArray<int>(kBacktrackStackSize);
- }
+ explicit BacktrackStack() {
+ data_ = NewArray<int>(kBacktrackStackSize);
}
~BacktrackStack() {
- if (isolate_->irregexp_interpreter_backtrack_stack_cache() == NULL) {
- // The cache is empty. Keep this backtrack stack around.
- isolate_->set_irregexp_interpreter_backtrack_stack_cache(data_);
- } else {
- // A backtrack stack was already cached, just release this one.
- DeleteArray(data_);
- }
+ DeleteArray(data_);
}
int* data() const { return data_; }
@@ -187,7 +151,6 @@ class BacktrackStack {
static const int kBacktrackStackSize = 10000;
int* data_;
- Isolate* isolate_;
DISALLOW_COPY_AND_ASSIGN(BacktrackStack);
};
@@ -204,7 +167,7 @@ static RegExpImpl::IrregexpResult RawMatch(Isolate* isolate,
// BacktrackStack ensures that the memory allocated for the backtracking stack
// is returned to the system or cached if there is no stack being cached at
// the moment.
- BacktrackStack backtrack_stack(isolate);
+ BacktrackStack backtrack_stack;
int* backtrack_stack_base = backtrack_stack.data();
int* backtrack_sp = backtrack_stack_base;
int backtrack_stack_space = backtrack_stack.max_size();
diff --git a/chromium/v8/src/interpreter-irregexp.h b/chromium/v8/src/interpreter-irregexp.h
index 0f45d98207f..4953a601e45 100644
--- a/chromium/v8/src/interpreter-irregexp.h
+++ b/chromium/v8/src/interpreter-irregexp.h
@@ -1,29 +1,6 @@
// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
// A simple interpreter for the Irregexp byte code.
diff --git a/chromium/v8/src/isolate-inl.h b/chromium/v8/src/isolate-inl.h
index 764bcb8bf38..12a861f2b89 100644
--- a/chromium/v8/src/isolate-inl.h
+++ b/chromium/v8/src/isolate-inl.h
@@ -1,36 +1,13 @@
// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
#ifndef V8_ISOLATE_INL_H_
#define V8_ISOLATE_INL_H_
-#include "debug.h"
-#include "isolate.h"
-#include "utils/random-number-generator.h"
+#include "src/debug.h"
+#include "src/isolate.h"
+#include "src/utils/random-number-generator.h"
namespace v8 {
namespace internal {
@@ -48,33 +25,18 @@ SaveContext::SaveContext(Isolate* isolate)
}
-bool Isolate::IsCodePreAgingActive() {
- return FLAG_optimize_for_size && FLAG_age_code && !IsDebuggerActive();
-}
-
-
-bool Isolate::IsDebuggerActive() {
-#ifdef ENABLE_DEBUGGER_SUPPORT
- if (!NoBarrier_Load(&debugger_initialized_)) return false;
- return debugger()->IsDebuggerActive();
-#else
- return false;
-#endif
-}
-
-
bool Isolate::DebuggerHasBreakPoints() {
-#ifdef ENABLE_DEBUGGER_SUPPORT
return debug()->has_break_points();
-#else
- return false;
-#endif
}
RandomNumberGenerator* Isolate::random_number_generator() {
if (random_number_generator_ == NULL) {
- random_number_generator_ = new RandomNumberGenerator;
+ if (FLAG_random_seed != 0) {
+ random_number_generator_ = new RandomNumberGenerator(FLAG_random_seed);
+ } else {
+ random_number_generator_ = new RandomNumberGenerator();
+ }
}
return random_number_generator_;
}
diff --git a/chromium/v8/src/isolate.cc b/chromium/v8/src/isolate.cc
index 25bc54685f4..9ec3c9b2896 100644
--- a/chromium/v8/src/isolate.cc
+++ b/chromium/v8/src/isolate.cc
@@ -1,69 +1,46 @@
// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
#include <stdlib.h>
-#include "v8.h"
-
-#include "ast.h"
-#include "bootstrapper.h"
-#include "codegen.h"
-#include "compilation-cache.h"
-#include "cpu-profiler.h"
-#include "debug.h"
-#include "deoptimizer.h"
-#include "heap-profiler.h"
-#include "hydrogen.h"
-#include "isolate-inl.h"
-#include "lithium-allocator.h"
-#include "log.h"
-#include "messages.h"
-#include "platform.h"
-#include "regexp-stack.h"
-#include "runtime-profiler.h"
-#include "sampler.h"
-#include "scopeinfo.h"
-#include "serialize.h"
-#include "simulator.h"
-#include "spaces.h"
-#include "stub-cache.h"
-#include "sweeper-thread.h"
-#include "utils/random-number-generator.h"
-#include "version.h"
-#include "vm-state-inl.h"
+#include "src/v8.h"
+
+#include "src/ast.h"
+#include "src/bootstrapper.h"
+#include "src/codegen.h"
+#include "src/compilation-cache.h"
+#include "src/cpu-profiler.h"
+#include "src/debug.h"
+#include "src/deoptimizer.h"
+#include "src/heap-profiler.h"
+#include "src/hydrogen.h"
+#include "src/isolate-inl.h"
+#include "src/lithium-allocator.h"
+#include "src/log.h"
+#include "src/messages.h"
+#include "src/platform.h"
+#include "src/regexp-stack.h"
+#include "src/runtime-profiler.h"
+#include "src/sampler.h"
+#include "src/scopeinfo.h"
+#include "src/serialize.h"
+#include "src/simulator.h"
+#include "src/spaces.h"
+#include "src/stub-cache.h"
+#include "src/sweeper-thread.h"
+#include "src/utils/random-number-generator.h"
+#include "src/version.h"
+#include "src/vm-state-inl.h"
namespace v8 {
namespace internal {
-Atomic32 ThreadId::highest_thread_id_ = 0;
+base::Atomic32 ThreadId::highest_thread_id_ = 0;
int ThreadId::AllocateThreadId() {
- int new_id = NoBarrier_AtomicIncrement(&highest_thread_id_, 1);
+ int new_id = base::NoBarrier_AtomicIncrement(&highest_thread_id_, 1);
return new_id;
}
@@ -80,10 +57,6 @@ int ThreadId::GetCurrentThreadId() {
ThreadLocalTop::ThreadLocalTop() {
InitializeInternal();
- // This flag may be set using v8::V8::IgnoreOutOfMemoryException()
- // before an isolate is initialized. The initialize methods below do
- // not touch it to preserve its value.
- ignore_out_of_memory_ = false;
}
@@ -96,7 +69,7 @@ void ThreadLocalTop::InitializeInternal() {
js_entry_sp_ = NULL;
external_callback_scope_ = NULL;
current_vm_state_ = EXTERNAL;
- try_catch_handler_address_ = NULL;
+ try_catch_handler_ = NULL;
context_ = NULL;
thread_id_ = ThreadId::Invalid();
external_caught_exception_ = false;
@@ -125,12 +98,6 @@ void ThreadLocalTop::Initialize() {
}
-v8::TryCatch* ThreadLocalTop::TryCatchHandler() {
- return TRY_CATCH_FROM_ADDRESS(try_catch_handler_address());
-}
-
-
-Isolate* Isolate::default_isolate_ = NULL;
Thread::LocalStorageKey Isolate::isolate_key_;
Thread::LocalStorageKey Isolate::thread_id_key_;
Thread::LocalStorageKey Isolate::per_isolate_thread_data_key_;
@@ -147,7 +114,7 @@ enum DefaultIsolateStatus {
static DefaultIsolateStatus default_isolate_status_
= kDefaultIsolateUninitialized;
Isolate::ThreadDataTable* Isolate::thread_data_table_ = NULL;
-Atomic32 Isolate::isolate_counter_ = 0;
+base::Atomic32 Isolate::isolate_counter_ = 0;
Isolate::PerIsolateThreadData*
Isolate::FindOrAllocatePerThreadDataForThisThread() {
@@ -160,8 +127,8 @@ Isolate::PerIsolateThreadData*
per_thread = new PerIsolateThreadData(this, thread_id);
thread_data_table_->Insert(per_thread);
}
+ ASSERT(thread_data_table_->Lookup(this, thread_id) == per_thread);
}
- ASSERT(thread_data_table_->Lookup(this, thread_id) == per_thread);
return per_thread;
}
@@ -193,7 +160,7 @@ void Isolate::SetCrashIfDefaultIsolateInitialized() {
void Isolate::EnsureDefaultIsolate() {
LockGuard<Mutex> lock_guard(&process_wide_mutex_);
CHECK(default_isolate_status_ != kDefaultIsolateCrashIfInitialized);
- if (default_isolate_ == NULL) {
+ if (thread_data_table_ == NULL) {
isolate_key_ = Thread::CreateThreadLocalKey();
thread_id_key_ = Thread::CreateThreadLocalKey();
per_isolate_thread_data_key_ = Thread::CreateThreadLocalKey();
@@ -201,12 +168,6 @@ void Isolate::EnsureDefaultIsolate() {
PerThreadAssertScopeBase::thread_local_key = Thread::CreateThreadLocalKey();
#endif // DEBUG
thread_data_table_ = new Isolate::ThreadDataTable();
- default_isolate_ = new Isolate();
- }
- // Can't use SetIsolateThreadLocals(default_isolate_, NULL) here
- // because a non-null thread data may be already set.
- if (Thread::GetThreadLocal(isolate_key_) == NULL) {
- Thread::SetThreadLocal(isolate_key_, default_isolate_);
}
}
@@ -216,37 +177,6 @@ struct StaticInitializer {
}
} static_initializer;
-#ifdef ENABLE_DEBUGGER_SUPPORT
-Debugger* Isolate::GetDefaultIsolateDebugger() {
- EnsureDefaultIsolate();
- return default_isolate_->debugger();
-}
-#endif
-
-
-StackGuard* Isolate::GetDefaultIsolateStackGuard() {
- EnsureDefaultIsolate();
- return default_isolate_->stack_guard();
-}
-
-
-void Isolate::EnterDefaultIsolate() {
- EnsureDefaultIsolate();
- ASSERT(default_isolate_ != NULL);
-
- PerIsolateThreadData* data = CurrentPerIsolateThreadData();
- // If not yet in default isolate - enter it.
- if (data == NULL || data->isolate() != default_isolate_) {
- default_isolate_->Enter();
- }
-}
-
-
-v8::Isolate* Isolate::GetDefaultIsolateForLocking() {
- EnsureDefaultIsolate();
- return reinterpret_cast<v8::Isolate*>(default_isolate_);
-}
-
Address Isolate::get_address_from_id(Isolate::AddressId id) {
return isolate_addresses_[id];
@@ -268,25 +198,15 @@ void Isolate::IterateThread(ThreadVisitor* v, char* t) {
void Isolate::Iterate(ObjectVisitor* v, ThreadLocalTop* thread) {
// Visit the roots from the top for a given thread.
- Object* pending;
- // The pending exception can sometimes be a failure. We can't show
- // that to the GC, which only understands objects.
- if (thread->pending_exception_->ToObject(&pending)) {
- v->VisitPointer(&pending);
- thread->pending_exception_ = pending; // In case GC updated it.
- }
+ v->VisitPointer(&thread->pending_exception_);
v->VisitPointer(&(thread->pending_message_obj_));
v->VisitPointer(BitCast<Object**>(&(thread->pending_message_script_)));
v->VisitPointer(BitCast<Object**>(&(thread->context_)));
- Object* scheduled;
- if (thread->scheduled_exception_->ToObject(&scheduled)) {
- v->VisitPointer(&scheduled);
- thread->scheduled_exception_ = scheduled;
- }
+ v->VisitPointer(&thread->scheduled_exception_);
- for (v8::TryCatch* block = thread->TryCatchHandler();
+ for (v8::TryCatch* block = thread->try_catch_handler();
block != NULL;
- block = TRY_CATCH_FROM_ADDRESS(block->next_)) {
+ block = block->next_) {
v->VisitPointer(BitCast<Object**>(&(block->exception_)));
v->VisitPointer(BitCast<Object**>(&(block->message_obj_)));
v->VisitPointer(BitCast<Object**>(&(block->message_script_)));
@@ -341,23 +261,14 @@ bool Isolate::IsDeferredHandle(Object** handle) {
void Isolate::RegisterTryCatchHandler(v8::TryCatch* that) {
- // The ARM simulator has a separate JS stack. We therefore register
- // the C++ try catch handler with the simulator and get back an
- // address that can be used for comparisons with addresses into the
- // JS stack. When running without the simulator, the address
- // returned will be the address of the C++ try catch handler itself.
- Address address = reinterpret_cast<Address>(
- SimulatorStack::RegisterCTryCatch(reinterpret_cast<uintptr_t>(that)));
- thread_local_top()->set_try_catch_handler_address(address);
+ thread_local_top()->set_try_catch_handler(that);
}
void Isolate::UnregisterTryCatchHandler(v8::TryCatch* that) {
- ASSERT(thread_local_top()->TryCatchHandler() == that);
- thread_local_top()->set_try_catch_handler_address(
- reinterpret_cast<Address>(that->next_));
+ ASSERT(thread_local_top()->try_catch_handler() == that);
+ thread_local_top()->set_try_catch_handler(that->next_);
thread_local_top()->catcher_ = NULL;
- SimulatorStack::UnregisterCTryCatch();
}
@@ -453,23 +364,25 @@ Handle<JSArray> Isolate::CaptureSimpleStackTrace(Handle<JSObject> error_object,
// If the caller parameter is a function we skip frames until we're
// under it before starting to collect.
bool seen_caller = !caller->IsJSFunction();
- // First element is reserved to store the number of non-strict frames.
+ // First element is reserved to store the number of sloppy frames.
int cursor = 1;
int frames_seen = 0;
- int non_strict_frames = 0;
+ int sloppy_frames = 0;
bool encountered_strict_function = false;
for (StackFrameIterator iter(this);
!iter.done() && frames_seen < limit;
iter.Advance()) {
StackFrame* raw_frame = iter.frame();
if (IsVisibleInStackTrace(raw_frame, *caller, &seen_caller)) {
- frames_seen++;
JavaScriptFrame* frame = JavaScriptFrame::cast(raw_frame);
// Set initial size to the maximum inlining level + 1 for the outermost
// function.
List<FrameSummary> frames(FLAG_max_inlining_levels + 1);
frame->Summarize(&frames);
for (int i = frames.length() - 1; i >= 0; i--) {
+ Handle<JSFunction> fun = frames[i].function();
+ // Filter out frames from other security contexts.
+ if (!this->context()->HasSameSecurityTokenAs(fun->context())) continue;
if (cursor + 4 > elements->length()) {
int new_capacity = JSObject::NewElementsCapacity(elements->length());
Handle<FixedArray> new_elements =
@@ -482,18 +395,17 @@ Handle<JSArray> Isolate::CaptureSimpleStackTrace(Handle<JSObject> error_object,
ASSERT(cursor + 4 <= elements->length());
Handle<Object> recv = frames[i].receiver();
- Handle<JSFunction> fun = frames[i].function();
Handle<Code> code = frames[i].code();
Handle<Smi> offset(Smi::FromInt(frames[i].offset()), this);
// The stack trace API should not expose receivers and function
// objects on frames deeper than the top-most one with a strict
- // mode function. The number of non-strict frames is stored as
+ // mode function. The number of sloppy frames is stored as
// first element in the result array.
if (!encountered_strict_function) {
- if (!fun->shared()->is_classic_mode()) {
+ if (fun->shared()->strict_mode() == STRICT) {
encountered_strict_function = true;
} else {
- non_strict_frames++;
+ sloppy_frames++;
}
}
elements->set(cursor++, *recv);
@@ -501,9 +413,10 @@ Handle<JSArray> Isolate::CaptureSimpleStackTrace(Handle<JSObject> error_object,
elements->set(cursor++, *code);
elements->set(cursor++, *offset);
}
+ frames_seen++;
}
}
- elements->set(0, Smi::FromInt(non_strict_frames));
+ elements->set(0, Smi::FromInt(sloppy_frames));
Handle<JSArray> result = factory()->NewJSArrayWithElements(elements);
result->set_length(Smi::FromInt(cursor));
return result;
@@ -555,16 +468,20 @@ Handle<JSArray> Isolate::CaptureCurrentStackTrace(
List<FrameSummary> frames(FLAG_max_inlining_levels + 1);
frame->Summarize(&frames);
for (int i = frames.length() - 1; i >= 0 && frames_seen < limit; i--) {
+ Handle<JSFunction> fun = frames[i].function();
+ // Filter frames from other security contexts.
+ if (!(options & StackTrace::kExposeFramesAcrossSecurityOrigins) &&
+ !this->context()->HasSameSecurityTokenAs(fun->context())) continue;
+
// Create a JSObject to hold the information for the StackFrame.
Handle<JSObject> stack_frame = factory()->NewJSObject(object_function());
- Handle<JSFunction> fun = frames[i].function();
Handle<Script> script(Script::cast(fun->shared()->script()));
if (options & StackTrace::kLineNumber) {
int script_line_offset = script->line_offset()->value();
int position = frames[i].code()->SourcePosition(frames[i].pc());
- int line_number = GetScriptLineNumber(script, position);
+ int line_number = Script::GetLineNumber(script, position);
// line_number is already shifted by the script_line_offset.
int relative_line_number = line_number - script_line_offset;
if (options & StackTrace::kColumnOffset && relative_line_number >= 0) {
@@ -577,41 +494,31 @@ Handle<JSArray> Isolate::CaptureCurrentStackTrace(
// tag.
column_offset += script->column_offset()->value();
}
- CHECK_NOT_EMPTY_HANDLE(
- this,
- JSObject::SetLocalPropertyIgnoreAttributes(
- stack_frame, column_key,
- Handle<Smi>(Smi::FromInt(column_offset + 1), this), NONE));
+ JSObject::SetOwnPropertyIgnoreAttributes(
+ stack_frame, column_key,
+ Handle<Smi>(Smi::FromInt(column_offset + 1), this), NONE).Check();
}
- CHECK_NOT_EMPTY_HANDLE(
- this,
- JSObject::SetLocalPropertyIgnoreAttributes(
- stack_frame, line_key,
- Handle<Smi>(Smi::FromInt(line_number + 1), this), NONE));
+ JSObject::SetOwnPropertyIgnoreAttributes(
+ stack_frame, line_key,
+ Handle<Smi>(Smi::FromInt(line_number + 1), this), NONE).Check();
}
if (options & StackTrace::kScriptId) {
Handle<Smi> script_id(script->id(), this);
- CHECK_NOT_EMPTY_HANDLE(this,
- JSObject::SetLocalPropertyIgnoreAttributes(
- stack_frame, script_id_key, script_id,
- NONE));
+ JSObject::SetOwnPropertyIgnoreAttributes(
+ stack_frame, script_id_key, script_id, NONE).Check();
}
if (options & StackTrace::kScriptName) {
Handle<Object> script_name(script->name(), this);
- CHECK_NOT_EMPTY_HANDLE(this,
- JSObject::SetLocalPropertyIgnoreAttributes(
- stack_frame, script_name_key, script_name,
- NONE));
+ JSObject::SetOwnPropertyIgnoreAttributes(
+ stack_frame, script_name_key, script_name, NONE).Check();
}
if (options & StackTrace::kScriptNameOrSourceURL) {
- Handle<Object> result = GetScriptNameOrSourceURL(script);
- CHECK_NOT_EMPTY_HANDLE(this,
- JSObject::SetLocalPropertyIgnoreAttributes(
- stack_frame, script_name_or_source_url_key,
- result, NONE));
+ Handle<Object> result = Script::GetNameOrSourceURL(script);
+ JSObject::SetOwnPropertyIgnoreAttributes(
+ stack_frame, script_name_or_source_url_key, result, NONE).Check();
}
if (options & StackTrace::kFunctionName) {
@@ -619,27 +526,23 @@ Handle<JSArray> Isolate::CaptureCurrentStackTrace(
if (!fun_name->BooleanValue()) {
fun_name = Handle<Object>(fun->shared()->inferred_name(), this);
}
- CHECK_NOT_EMPTY_HANDLE(this,
- JSObject::SetLocalPropertyIgnoreAttributes(
- stack_frame, function_key, fun_name, NONE));
+ JSObject::SetOwnPropertyIgnoreAttributes(
+ stack_frame, function_key, fun_name, NONE).Check();
}
if (options & StackTrace::kIsEval) {
Handle<Object> is_eval =
script->compilation_type() == Script::COMPILATION_TYPE_EVAL ?
factory()->true_value() : factory()->false_value();
- CHECK_NOT_EMPTY_HANDLE(this,
- JSObject::SetLocalPropertyIgnoreAttributes(
- stack_frame, eval_key, is_eval, NONE));
+ JSObject::SetOwnPropertyIgnoreAttributes(
+ stack_frame, eval_key, is_eval, NONE).Check();
}
if (options & StackTrace::kIsConstructor) {
Handle<Object> is_constructor = (frames[i].is_constructor()) ?
factory()->true_value() : factory()->false_value();
- CHECK_NOT_EMPTY_HANDLE(this,
- JSObject::SetLocalPropertyIgnoreAttributes(
- stack_frame, constructor_key,
- is_constructor, NONE));
+ JSObject::SetOwnPropertyIgnoreAttributes(
+ stack_frame, constructor_key, is_constructor, NONE).Check();
}
FixedArray::cast(stack_trace->elements())->set(frames_seen, *stack_frame);
@@ -721,28 +624,41 @@ void Isolate::SetFailedAccessCheckCallback(
}
-void Isolate::ReportFailedAccessCheck(JSObject* receiver, v8::AccessType type) {
+static inline AccessCheckInfo* GetAccessCheckInfo(Isolate* isolate,
+ Handle<JSObject> receiver) {
+ JSFunction* constructor = JSFunction::cast(receiver->map()->constructor());
+ if (!constructor->shared()->IsApiFunction()) return NULL;
+
+ Object* data_obj =
+ constructor->shared()->get_api_func_data()->access_check_info();
+ if (data_obj == isolate->heap()->undefined_value()) return NULL;
+
+ return AccessCheckInfo::cast(data_obj);
+}
+
+
+void Isolate::ReportFailedAccessCheck(Handle<JSObject> receiver,
+ v8::AccessType type) {
if (!thread_local_top()->failed_access_check_callback_) return;
ASSERT(receiver->IsAccessCheckNeeded());
ASSERT(context());
// Get the data object from access check info.
- JSFunction* constructor = JSFunction::cast(receiver->map()->constructor());
- if (!constructor->shared()->IsApiFunction()) return;
- Object* data_obj =
- constructor->shared()->get_api_func_data()->access_check_info();
- if (data_obj == heap_.undefined_value()) return;
-
HandleScope scope(this);
- Handle<JSObject> receiver_handle(receiver);
- Handle<Object> data(AccessCheckInfo::cast(data_obj)->data(), this);
- { VMState<EXTERNAL> state(this);
- thread_local_top()->failed_access_check_callback_(
- v8::Utils::ToLocal(receiver_handle),
+ Handle<Object> data;
+ { DisallowHeapAllocation no_gc;
+ AccessCheckInfo* access_check_info = GetAccessCheckInfo(this, receiver);
+ if (!access_check_info) return;
+ data = handle(access_check_info->data(), this);
+ }
+
+ // Leaving JavaScript.
+ VMState<EXTERNAL> state(this);
+ thread_local_top()->failed_access_check_callback_(
+ v8::Utils::ToLocal(receiver),
type,
v8::Utils::ToLocal(data));
- }
}
@@ -752,13 +668,14 @@ enum MayAccessDecision {
static MayAccessDecision MayAccessPreCheck(Isolate* isolate,
- JSObject* receiver,
+ Handle<JSObject> receiver,
v8::AccessType type) {
+ DisallowHeapAllocation no_gc;
// During bootstrapping, callback functions are not enabled yet.
if (isolate->bootstrapper()->IsActive()) return YES;
if (receiver->IsJSGlobalProxy()) {
- Object* receiver_context = JSGlobalProxy::cast(receiver)->native_context();
+ Object* receiver_context = JSGlobalProxy::cast(*receiver)->native_context();
if (!receiver_context->IsContext()) return NO;
// Get the native context of current top context.
@@ -776,16 +693,14 @@ static MayAccessDecision MayAccessPreCheck(Isolate* isolate,
}
-bool Isolate::MayNamedAccess(JSObject* receiver, Object* key,
+bool Isolate::MayNamedAccess(Handle<JSObject> receiver,
+ Handle<Object> key,
v8::AccessType type) {
- ASSERT(receiver->IsAccessCheckNeeded());
-
- // The callers of this method are not expecting a GC.
- DisallowHeapAllocation no_gc;
+ ASSERT(receiver->IsJSGlobalProxy() || receiver->IsAccessCheckNeeded());
// Skip checks for hidden properties access. Note, we do not
// require existence of a context in this case.
- if (key == heap_.hidden_string()) return true;
+ if (key.is_identical_to(factory()->hidden_string())) return true;
// Check for compatibility between the security tokens in the
// current lexical context and the accessed object.
@@ -794,42 +709,33 @@ bool Isolate::MayNamedAccess(JSObject* receiver, Object* key,
MayAccessDecision decision = MayAccessPreCheck(this, receiver, type);
if (decision != UNKNOWN) return decision == YES;
- // Get named access check callback
- JSFunction* constructor = JSFunction::cast(receiver->map()->constructor());
- if (!constructor->shared()->IsApiFunction()) return false;
-
- Object* data_obj =
- constructor->shared()->get_api_func_data()->access_check_info();
- if (data_obj == heap_.undefined_value()) return false;
-
- Object* fun_obj = AccessCheckInfo::cast(data_obj)->named_callback();
- v8::NamedSecurityCallback callback =
- v8::ToCData<v8::NamedSecurityCallback>(fun_obj);
-
- if (!callback) return false;
-
HandleScope scope(this);
- Handle<JSObject> receiver_handle(receiver, this);
- Handle<Object> key_handle(key, this);
- Handle<Object> data(AccessCheckInfo::cast(data_obj)->data(), this);
- LOG(this, ApiNamedSecurityCheck(key));
- bool result = false;
- {
- // Leaving JavaScript.
- VMState<EXTERNAL> state(this);
- result = callback(v8::Utils::ToLocal(receiver_handle),
- v8::Utils::ToLocal(key_handle),
- type,
- v8::Utils::ToLocal(data));
+ Handle<Object> data;
+ v8::NamedSecurityCallback callback;
+ { DisallowHeapAllocation no_gc;
+ AccessCheckInfo* access_check_info = GetAccessCheckInfo(this, receiver);
+ if (!access_check_info) return false;
+ Object* fun_obj = access_check_info->named_callback();
+ callback = v8::ToCData<v8::NamedSecurityCallback>(fun_obj);
+ if (!callback) return false;
+ data = handle(access_check_info->data(), this);
}
- return result;
+
+ LOG(this, ApiNamedSecurityCheck(*key));
+
+ // Leaving JavaScript.
+ VMState<EXTERNAL> state(this);
+ return callback(v8::Utils::ToLocal(receiver),
+ v8::Utils::ToLocal(key),
+ type,
+ v8::Utils::ToLocal(data));
}
-bool Isolate::MayIndexedAccess(JSObject* receiver,
+bool Isolate::MayIndexedAccess(Handle<JSObject> receiver,
uint32_t index,
v8::AccessType type) {
- ASSERT(receiver->IsAccessCheckNeeded());
+ ASSERT(receiver->IsJSGlobalProxy() || receiver->IsAccessCheckNeeded());
// Check for compatibility between the security tokens in the
// current lexical context and the accessed object.
ASSERT(context());
@@ -837,34 +743,25 @@ bool Isolate::MayIndexedAccess(JSObject* receiver,
MayAccessDecision decision = MayAccessPreCheck(this, receiver, type);
if (decision != UNKNOWN) return decision == YES;
- // Get indexed access check callback
- JSFunction* constructor = JSFunction::cast(receiver->map()->constructor());
- if (!constructor->shared()->IsApiFunction()) return false;
-
- Object* data_obj =
- constructor->shared()->get_api_func_data()->access_check_info();
- if (data_obj == heap_.undefined_value()) return false;
-
- Object* fun_obj = AccessCheckInfo::cast(data_obj)->indexed_callback();
- v8::IndexedSecurityCallback callback =
- v8::ToCData<v8::IndexedSecurityCallback>(fun_obj);
-
- if (!callback) return false;
-
HandleScope scope(this);
- Handle<JSObject> receiver_handle(receiver, this);
- Handle<Object> data(AccessCheckInfo::cast(data_obj)->data(), this);
- LOG(this, ApiIndexedSecurityCheck(index));
- bool result = false;
- {
- // Leaving JavaScript.
- VMState<EXTERNAL> state(this);
- result = callback(v8::Utils::ToLocal(receiver_handle),
- index,
- type,
- v8::Utils::ToLocal(data));
+ Handle<Object> data;
+ v8::IndexedSecurityCallback callback;
+ { DisallowHeapAllocation no_gc;
+ // Get named access check callback
+ AccessCheckInfo* access_check_info = GetAccessCheckInfo(this, receiver);
+ if (!access_check_info) return false;
+ Object* fun_obj = access_check_info->indexed_callback();
+ callback = v8::ToCData<v8::IndexedSecurityCallback>(fun_obj);
+ if (!callback) return false;
+ data = handle(access_check_info->data(), this);
}
- return result;
+
+ LOG(this, ApiIndexedSecurityCheck(index));
+
+ // Leaving JavaScript.
+ VMState<EXTERNAL> state(this);
+ return callback(
+ v8::Utils::ToLocal(receiver), index, type, v8::Utils::ToLocal(data));
}
@@ -872,23 +769,29 @@ const char* const Isolate::kStackOverflowMessage =
"Uncaught RangeError: Maximum call stack size exceeded";
-Failure* Isolate::StackOverflow() {
+Object* Isolate::StackOverflow() {
HandleScope scope(this);
// At this point we cannot create an Error object using its javascript
// constructor. Instead, we copy the pre-constructed boilerplate and
// attach the stack trace as a hidden property.
Handle<String> key = factory()->stack_overflow_string();
- Handle<JSObject> boilerplate =
- Handle<JSObject>::cast(GetProperty(this, js_builtins_object(), key));
- Handle<JSObject> exception = JSObject::Copy(boilerplate);
+ Handle<JSObject> boilerplate = Handle<JSObject>::cast(
+ Object::GetProperty(js_builtins_object(), key).ToHandleChecked());
+ Handle<JSObject> exception = factory()->CopyJSObject(boilerplate);
DoThrow(*exception, NULL);
// Get stack trace limit.
- Handle<Object> error = GetProperty(js_builtins_object(), "$Error");
- if (!error->IsJSObject()) return Failure::Exception();
+ Handle<Object> error = Object::GetProperty(
+ this, js_builtins_object(), "$Error").ToHandleChecked();
+ if (!error->IsJSObject()) return heap()->exception();
+
+ Handle<String> stackTraceLimit =
+ factory()->InternalizeUtf8String("stackTraceLimit");
+ ASSERT(!stackTraceLimit.is_null());
Handle<Object> stack_trace_limit =
- GetProperty(Handle<JSObject>::cast(error), "stackTraceLimit");
- if (!stack_trace_limit->IsNumber()) return Failure::Exception();
+ JSObject::GetDataProperty(Handle<JSObject>::cast(error),
+ stackTraceLimit);
+ if (!stack_trace_limit->IsNumber()) return heap()->exception();
double dlimit = stack_trace_limit->Number();
int limit = std::isnan(dlimit) ? 0 : static_cast<int>(dlimit);
@@ -897,13 +800,13 @@ Failure* Isolate::StackOverflow() {
JSObject::SetHiddenProperty(exception,
factory()->hidden_stack_trace_string(),
stack_trace);
- return Failure::Exception();
+ return heap()->exception();
}
-Failure* Isolate::TerminateExecution() {
+Object* Isolate::TerminateExecution() {
DoThrow(heap_.termination_exception(), NULL);
- return Failure::Exception();
+ return heap()->exception();
}
@@ -924,13 +827,33 @@ void Isolate::CancelTerminateExecution() {
}
-Failure* Isolate::Throw(Object* exception, MessageLocation* location) {
+void Isolate::InvokeApiInterruptCallback() {
+ // Note: callback below should be called outside of execution access lock.
+ InterruptCallback callback = NULL;
+ void* data = NULL;
+ {
+ ExecutionAccess access(this);
+ callback = api_interrupt_callback_;
+ data = api_interrupt_callback_data_;
+ api_interrupt_callback_ = NULL;
+ api_interrupt_callback_data_ = NULL;
+ }
+
+ if (callback != NULL) {
+ VMState<EXTERNAL> state(this);
+ HandleScope handle_scope(this);
+ callback(reinterpret_cast<v8::Isolate*>(this), data);
+ }
+}
+
+
+Object* Isolate::Throw(Object* exception, MessageLocation* location) {
DoThrow(exception, location);
- return Failure::Exception();
+ return heap()->exception();
}
-Failure* Isolate::ReThrow(MaybeObject* exception) {
+Object* Isolate::ReThrow(Object* exception) {
bool can_be_caught_externally = false;
bool catchable_by_javascript = is_catchable_by_javascript(exception);
ShouldReportException(&can_be_caught_externally, catchable_by_javascript);
@@ -940,16 +863,22 @@ Failure* Isolate::ReThrow(MaybeObject* exception) {
// Set the exception being re-thrown.
set_pending_exception(exception);
- if (exception->IsFailure()) return exception->ToFailureUnchecked();
- return Failure::Exception();
+ return heap()->exception();
}
-Failure* Isolate::ThrowIllegalOperation() {
+Object* Isolate::ThrowIllegalOperation() {
+ if (FLAG_stack_trace_on_illegal) PrintStack(stdout);
return Throw(heap_.illegal_access_string());
}
+Object* Isolate::ThrowInvalidStringLength() {
+ return Throw(*factory()->NewRangeError(
+ "invalid_string_length", HandleVector<Object>(NULL, 0)));
+}
+
+
void Isolate::ScheduleThrow(Object* exception) {
// When scheduling a throw we first throw the exception to get the
// error reporting if it is uncaught before rescheduling it.
@@ -979,8 +908,8 @@ void Isolate::RestorePendingMessageFromTryCatch(v8::TryCatch* handler) {
}
-Failure* Isolate::PromoteScheduledException() {
- MaybeObject* thrown = scheduled_exception();
+Object* Isolate::PromoteScheduledException() {
+ Object* thrown = scheduled_exception();
clear_scheduled_exception();
// Re-throw the exception to avoid getting repeated error reporting.
return ReThrow(thrown);
@@ -1067,15 +996,17 @@ bool Isolate::ShouldReportException(bool* can_be_caught_externally,
bool Isolate::IsErrorObject(Handle<Object> obj) {
if (!obj->IsJSObject()) return false;
- String* error_key =
- *(factory()->InternalizeOneByteString(STATIC_ASCII_VECTOR("$Error")));
- Object* error_constructor =
- js_builtins_object()->GetPropertyNoExceptionThrown(error_key);
+ Handle<String> error_key =
+ factory()->InternalizeOneByteString(STATIC_ASCII_VECTOR("$Error"));
+ Handle<Object> error_constructor = Object::GetProperty(
+ js_builtins_object(), error_key).ToHandleChecked();
+ DisallowHeapAllocation no_gc;
for (Object* prototype = *obj; !prototype->IsNull();
prototype = prototype->GetPrototype(this)) {
if (!prototype->IsJSObject()) return false;
- if (JSObject::cast(prototype)->map()->constructor() == error_constructor) {
+ if (JSObject::cast(prototype)->map()->constructor() ==
+ *error_constructor) {
return true;
}
}
@@ -1103,12 +1034,10 @@ void Isolate::DoThrow(Object* exception, MessageLocation* location) {
thread_local_top()->rethrowing_message_ = false;
-#ifdef ENABLE_DEBUGGER_SUPPORT
// Notify debugger of exception.
if (catchable_by_javascript) {
- debugger_->OnException(exception_handle, report_exception);
+ debug()->OnException(exception_handle, report_exception);
}
-#endif
// Generate the message if required.
if (report_exception || try_catch_needs_message) {
@@ -1122,13 +1051,11 @@ void Isolate::DoThrow(Object* exception, MessageLocation* location) {
// while the bootstrapper is active since the infrastructure may not have
// been properly initialized.
if (!bootstrapping) {
- Handle<String> stack_trace;
- if (FLAG_trace_exception) stack_trace = StackTraceString();
Handle<JSArray> stack_trace_object;
if (capture_stack_trace_for_uncaught_exceptions_) {
if (IsErrorObject(exception_handle)) {
// We fetch the stack trace that corresponds to this error object.
- String* key = heap()->hidden_stack_trace_string();
+ Handle<String> key = factory()->hidden_stack_trace_string();
Object* stack_property =
JSObject::cast(*exception_handle)->GetHiddenProperty(key);
// Property lookup may have failed. In this case it's probably not
@@ -1150,10 +1077,9 @@ void Isolate::DoThrow(Object* exception, MessageLocation* location) {
// before throwing as uncaught exception. Note that the pending
// exception object to be set later must not be turned into a string.
if (exception_arg->IsJSObject() && !IsErrorObject(exception_arg)) {
- bool failed = false;
- exception_arg =
- Execution::ToDetailString(this, exception_arg, &failed);
- if (failed) {
+ MaybeHandle<Object> maybe_exception =
+ Execution::ToDetailString(this, exception_arg);
+ if (!maybe_exception.ToHandle(&exception_arg)) {
exception_arg = factory()->InternalizeOneByteString(
STATIC_ASCII_VECTOR("exception"));
}
@@ -1163,7 +1089,6 @@ void Isolate::DoThrow(Object* exception, MessageLocation* location) {
"uncaught_exception",
location,
HandleVector<Object>(&exception_arg, 1),
- stack_trace,
stack_trace_object);
thread_local_top()->pending_message_obj_ = *message_obj;
if (location != NULL) {
@@ -1182,7 +1107,7 @@ void Isolate::DoThrow(Object* exception, MessageLocation* location) {
fatal_exception_depth++;
PrintF(stderr,
"%s\n\nFROM\n",
- *MessageHandler::GetLocalizedMessage(this, message_obj));
+ MessageHandler::GetLocalizedMessage(this, message_obj).get());
PrintCurrentStackTrace(stderr);
OS::Abort();
}
@@ -1192,22 +1117,40 @@ void Isolate::DoThrow(Object* exception, MessageLocation* location) {
// In this case we could have an extension (or an internal error
// somewhere) and we print out the line number at which the error occured
// to the console for easier debugging.
- int line_number = GetScriptLineNumberSafe(location->script(),
- location->start_pos());
+ int line_number =
+ location->script()->GetLineNumber(location->start_pos()) + 1;
if (exception->IsString() && location->script()->name()->IsString()) {
OS::PrintError(
"Extension or internal compilation error: %s in %s at line %d.\n",
- *String::cast(exception)->ToCString(),
- *String::cast(location->script()->name())->ToCString(),
- line_number + 1);
+ String::cast(exception)->ToCString().get(),
+ String::cast(location->script()->name())->ToCString().get(),
+ line_number);
} else if (location->script()->name()->IsString()) {
OS::PrintError(
"Extension or internal compilation error in %s at line %d.\n",
- *String::cast(location->script()->name())->ToCString(),
- line_number + 1);
+ String::cast(location->script()->name())->ToCString().get(),
+ line_number);
} else {
OS::PrintError("Extension or internal compilation error.\n");
}
+#ifdef OBJECT_PRINT
+ // Since comments and empty lines have been stripped from the source of
+ // builtins, print the actual source here so that line numbers match.
+ if (location->script()->source()->IsString()) {
+ Handle<String> src(String::cast(location->script()->source()));
+ PrintF("Failing script:\n");
+ int len = src->length();
+ int line_number = 1;
+ PrintF("%5d: ", line_number);
+ for (int i = 0; i < len; i++) {
+ uint16_t character = src->Get(i);
+ PrintF("%c", character);
+ if (character == '\n' && i < len - 2) {
+ PrintF("%5d: ", ++line_number);
+ }
+ }
+ }
+#endif
}
}
@@ -1223,20 +1166,15 @@ void Isolate::DoThrow(Object* exception, MessageLocation* location) {
}
-bool Isolate::IsExternallyCaught() {
+bool Isolate::HasExternalTryCatch() {
ASSERT(has_pending_exception());
- if ((thread_local_top()->catcher_ == NULL) ||
- (try_catch_handler() != thread_local_top()->catcher_)) {
- // When throwing the exception, we found no v8::TryCatch
- // which should care about this exception.
- return false;
- }
+ return (thread_local_top()->catcher_ != NULL) &&
+ (try_catch_handler() == thread_local_top()->catcher_);
+}
- if (!is_catchable_by_javascript(pending_exception())) {
- return true;
- }
+bool Isolate::IsFinallyOnTop() {
// Get the address of the external handler so we can compare the address to
// determine which one is closer to the top of the stack.
Address external_handler_address =
@@ -1256,28 +1194,22 @@ bool Isolate::IsExternallyCaught() {
StackHandler::FromAddress(Isolate::handler(thread_local_top()));
while (handler != NULL && handler->address() < external_handler_address) {
ASSERT(!handler->is_catch());
- if (handler->is_finally()) return false;
+ if (handler->is_finally()) return true;
handler = handler->next();
}
- return true;
+ return false;
}
void Isolate::ReportPendingMessages() {
ASSERT(has_pending_exception());
- PropagatePendingExceptionToExternalTryCatch();
+ bool can_clear_message = PropagatePendingExceptionToExternalTryCatch();
- // If the pending exception is OutOfMemoryException set out_of_memory in
- // the native context. Note: We have to mark the native context here
- // since the GenerateThrowOutOfMemory stub cannot make a RuntimeCall to
- // set it.
HandleScope scope(this);
- if (thread_local_top_.pending_exception_->IsOutOfMemory()) {
- context()->mark_out_of_memory();
- } else if (thread_local_top_.pending_exception_ ==
- heap()->termination_exception()) {
+ if (thread_local_top_.pending_exception_ ==
+ heap()->termination_exception()) {
// Do nothing: if needed, the exception has been already propagated to
// v8::TryCatch.
} else {
@@ -1300,15 +1232,14 @@ void Isolate::ReportPendingMessages() {
}
}
}
- clear_pending_message();
+ if (can_clear_message) clear_pending_message();
}
MessageLocation Isolate::GetMessageLocation() {
ASSERT(has_pending_exception());
- if (!thread_local_top_.pending_exception_->IsOutOfMemory() &&
- thread_local_top_.pending_exception_ != heap()->termination_exception() &&
+ if (thread_local_top_.pending_exception_ != heap()->termination_exception() &&
thread_local_top_.has_pending_message_ &&
!thread_local_top_.pending_message_obj_->IsTheHole() &&
!thread_local_top_.pending_message_obj_->IsTheHole()) {
@@ -1327,39 +1258,36 @@ bool Isolate::OptionalRescheduleException(bool is_bottom_call) {
ASSERT(has_pending_exception());
PropagatePendingExceptionToExternalTryCatch();
- // Always reschedule out of memory exceptions.
- if (!is_out_of_memory()) {
- bool is_termination_exception =
- pending_exception() == heap_.termination_exception();
-
- // Do not reschedule the exception if this is the bottom call.
- bool clear_exception = is_bottom_call;
+ bool is_termination_exception =
+ pending_exception() == heap_.termination_exception();
- if (is_termination_exception) {
- if (is_bottom_call) {
- thread_local_top()->external_caught_exception_ = false;
- clear_pending_exception();
- return false;
- }
- } else if (thread_local_top()->external_caught_exception_) {
- // If the exception is externally caught, clear it if there are no
- // JavaScript frames on the way to the C++ frame that has the
- // external handler.
- ASSERT(thread_local_top()->try_catch_handler_address() != NULL);
- Address external_handler_address =
- thread_local_top()->try_catch_handler_address();
- JavaScriptFrameIterator it(this);
- if (it.done() || (it.frame()->sp() > external_handler_address)) {
- clear_exception = true;
- }
- }
+ // Do not reschedule the exception if this is the bottom call.
+ bool clear_exception = is_bottom_call;
- // Clear the exception if needed.
- if (clear_exception) {
+ if (is_termination_exception) {
+ if (is_bottom_call) {
thread_local_top()->external_caught_exception_ = false;
clear_pending_exception();
return false;
}
+ } else if (thread_local_top()->external_caught_exception_) {
+ // If the exception is externally caught, clear it if there are no
+ // JavaScript frames on the way to the C++ frame that has the
+ // external handler.
+ ASSERT(thread_local_top()->try_catch_handler_address() != NULL);
+ Address external_handler_address =
+ thread_local_top()->try_catch_handler_address();
+ JavaScriptFrameIterator it(this);
+ if (it.done() || (it.frame()->sp() > external_handler_address)) {
+ clear_exception = true;
+ }
+ }
+
+ // Clear the exception if needed.
+ if (clear_exception) {
+ thread_local_top()->external_caught_exception_ = false;
+ clear_pending_exception();
+ return false;
}
// Reschedule the exception.
@@ -1379,23 +1307,6 @@ void Isolate::SetCaptureStackTraceForUncaughtExceptions(
}
-bool Isolate::is_out_of_memory() {
- if (has_pending_exception()) {
- MaybeObject* e = pending_exception();
- if (e->IsFailure() && Failure::cast(e)->IsOutOfMemoryException()) {
- return true;
- }
- }
- if (has_scheduled_exception()) {
- MaybeObject* e = scheduled_exception();
- if (e->IsFailure() && Failure::cast(e)->IsOutOfMemoryException()) {
- return true;
- }
- }
- return false;
-}
-
-
Handle<Context> Isolate::native_context() {
return Handle<Context>(context()->global_object()->native_context());
}
@@ -1408,8 +1319,7 @@ Handle<Context> Isolate::global_context() {
Handle<Context> Isolate::GetCallingNativeContext() {
JavaScriptFrameIterator it(this);
-#ifdef ENABLE_DEBUGGER_SUPPORT
- if (debug_->InDebugger()) {
+ if (debug_->in_debug_scope()) {
while (!it.done()) {
JavaScriptFrame* frame = it.frame();
Context* context = Context::cast(frame->context());
@@ -1420,7 +1330,6 @@ Handle<Context> Isolate::GetCallingNativeContext() {
}
}
}
-#endif // ENABLE_DEBUGGER_SUPPORT
if (it.done()) return Handle<Context>::null();
JavaScriptFrame* frame = it.frame();
Context* context = Context::cast(frame->context());
@@ -1429,8 +1338,8 @@ Handle<Context> Isolate::GetCallingNativeContext() {
char* Isolate::ArchiveThread(char* to) {
- OS::MemCopy(to, reinterpret_cast<char*>(thread_local_top()),
- sizeof(ThreadLocalTop));
+ MemCopy(to, reinterpret_cast<char*>(thread_local_top()),
+ sizeof(ThreadLocalTop));
InitializeThreadLocal();
clear_pending_exception();
clear_pending_message();
@@ -1440,10 +1349,10 @@ char* Isolate::ArchiveThread(char* to) {
char* Isolate::RestoreThread(char* from) {
- OS::MemCopy(reinterpret_cast<char*>(thread_local_top()), from,
- sizeof(ThreadLocalTop));
- // This might be just paranoia, but it seems to be needed in case a
- // thread_local_top_ is restored on a separate OS thread.
+ MemCopy(reinterpret_cast<char*>(thread_local_top()), from,
+ sizeof(ThreadLocalTop));
+// This might be just paranoia, but it seems to be needed in case a
+// thread_local_top_ is restored on a separate OS thread.
#ifdef USE_SIMULATOR
thread_local_top()->simulator_ = Simulator::current(this);
#endif
@@ -1465,6 +1374,13 @@ Isolate::ThreadDataTable::~ThreadDataTable() {
}
+Isolate::PerIsolateThreadData::~PerIsolateThreadData() {
+#if defined(USE_SIMULATOR)
+ delete simulator_;
+#endif
+}
+
+
Isolate::PerIsolateThreadData*
Isolate::ThreadDataTable::Lookup(Isolate* isolate,
ThreadId thread_id) {
@@ -1524,15 +1440,15 @@ Isolate::Isolate()
compilation_cache_(NULL),
counters_(NULL),
code_range_(NULL),
- debugger_initialized_(false),
logger_(NULL),
stats_table_(NULL),
stub_cache_(NULL),
+ code_aging_helper_(NULL),
deoptimizer_data_(NULL),
+ materialized_object_store_(NULL),
capture_stack_trace_for_uncaught_exceptions_(false),
stack_trace_for_uncaught_exceptions_frame_limit_(0),
stack_trace_for_uncaught_exceptions_options_(StackTrace::kOverview),
- transcendental_cache_(NULL),
memory_allocator_(NULL),
keyed_lookup_cache_(NULL),
context_slot_cache_(NULL),
@@ -1545,17 +1461,17 @@ Isolate::Isolate()
global_handles_(NULL),
eternal_handles_(NULL),
thread_manager_(NULL),
- fp_stubs_generated_(false),
has_installed_extensions_(false),
string_tracker_(NULL),
regexp_stack_(NULL),
date_cache_(NULL),
code_stub_interface_descriptors_(NULL),
+ call_descriptors_(NULL),
// TODO(bmeurer) Initialized lazily because it depends on flags; can
// be fixed once the default isolate cleanup is done.
random_number_generator_(NULL),
+ serializer_enabled_(false),
has_fatal_error_(false),
- use_crankshaft_(true),
initialized_from_snapshot_(false),
cpu_profiler_(NULL),
heap_profiler_(NULL),
@@ -1564,9 +1480,9 @@ Isolate::Isolate()
optimizing_compiler_thread_(NULL),
sweeper_thread_(NULL),
num_sweeper_threads_(0),
- max_available_threads_(0),
- stress_deopt_count_(0) {
- id_ = NoBarrier_AtomicIncrement(&isolate_counter_, 1);
+ stress_deopt_count_(0),
+ next_optimization_id_(0) {
+ id_ = base::NoBarrier_AtomicIncrement(&isolate_counter_, 1);
TRACE_ISOLATE(constructor);
memset(isolate_addresses_, 0,
@@ -1580,23 +1496,9 @@ Isolate::Isolate()
thread_manager_ = new ThreadManager();
thread_manager_->isolate_ = this;
-#if V8_TARGET_ARCH_ARM && !defined(__arm__) || \
- V8_TARGET_ARCH_MIPS && !defined(__mips__)
- simulator_initialized_ = false;
- simulator_i_cache_ = NULL;
- simulator_redirection_ = NULL;
-#endif
-
#ifdef DEBUG
// heap_histograms_ initializes itself.
memset(&js_spill_information_, 0, sizeof(js_spill_information_));
- memset(code_kind_statistics_, 0,
- sizeof(code_kind_statistics_[0]) * Code::NUMBER_OF_KINDS);
-#endif
-
-#ifdef ENABLE_DEBUGGER_SUPPORT
- debug_ = NULL;
- debugger_ = NULL;
#endif
handle_scope_data_.Initialize();
@@ -1610,6 +1512,9 @@ Isolate::Isolate()
memset(name##_, 0, sizeof(type) * length);
ISOLATE_INIT_ARRAY_LIST(ISOLATE_INIT_ARRAY_EXECUTE)
#undef ISOLATE_INIT_ARRAY_EXECUTE
+
+ InitializeLoggingAndCounters();
+ debug_ = new Debug(this);
}
@@ -1635,9 +1540,7 @@ void Isolate::TearDown() {
serialize_partial_snapshot_cache_ = NULL;
}
- if (!IsDefaultIsolate()) {
- delete this;
- }
+ delete this;
// Restore the previous current isolate.
SetIsolateThreadLocals(saved_isolate, saved_data);
@@ -1653,9 +1556,7 @@ void Isolate::Deinit() {
if (state_ == INITIALIZED) {
TRACE_ISOLATE(deinit);
-#ifdef ENABLE_DEBUGGER_SUPPORT
- debugger()->UnloadDebugger();
-#endif
+ debug()->Unload();
if (concurrent_recompilation_enabled()) {
optimizing_compiler_thread_->Stop();
@@ -1671,6 +1572,10 @@ void Isolate::Deinit() {
delete[] sweeper_thread_;
sweeper_thread_ = NULL;
+ if (FLAG_job_based_sweeping &&
+ heap_.mark_compact_collector()->IsConcurrentSweepingInProgress()) {
+ heap_.mark_compact_collector()->WaitUntilSweepingCompleted();
+ }
if (FLAG_hydrogen_stats) GetHStatistics()->Print();
@@ -1688,7 +1593,6 @@ void Isolate::Deinit() {
bootstrapper_->TearDown();
if (runtime_profiler_ != NULL) {
- runtime_profiler_->TearDown();
delete runtime_profiler_;
runtime_profiler_ = NULL;
}
@@ -1739,10 +1643,7 @@ Isolate::~Isolate() {
// Has to be called while counters_ are still alive
runtime_zone_.DeleteKeptSegment();
- // The entry stack must be empty when we get here,
- // except for the default isolate, where it can
- // still contain up to one entry stack item
- ASSERT(entry_stack_ == NULL || this == default_isolate_);
+ // The entry stack must be empty when we get here.
ASSERT(entry_stack_ == NULL || entry_stack_->previous_item == NULL);
delete entry_stack_;
@@ -1760,6 +1661,9 @@ Isolate::~Isolate() {
delete[] code_stub_interface_descriptors_;
code_stub_interface_descriptors_ = NULL;
+ delete[] call_descriptors_;
+ call_descriptors_ = NULL;
+
delete regexp_stack_;
regexp_stack_ = NULL;
@@ -1770,13 +1674,16 @@ Isolate::~Isolate() {
delete keyed_lookup_cache_;
keyed_lookup_cache_ = NULL;
- delete transcendental_cache_;
- transcendental_cache_ = NULL;
delete stub_cache_;
stub_cache_ = NULL;
+ delete code_aging_helper_;
+ code_aging_helper_ = NULL;
delete stats_table_;
stats_table_ = NULL;
+ delete materialized_object_store_;
+ materialized_object_store_ = NULL;
+
delete logger_;
logger_ = NULL;
@@ -1819,12 +1726,8 @@ Isolate::~Isolate() {
delete random_number_generator_;
random_number_generator_ = NULL;
-#ifdef ENABLE_DEBUGGER_SUPPORT
- delete debugger_;
- debugger_ = NULL;
delete debug_;
debug_ = NULL;
-#endif
}
@@ -1834,26 +1737,29 @@ void Isolate::InitializeThreadLocal() {
}
-void Isolate::PropagatePendingExceptionToExternalTryCatch() {
+bool Isolate::PropagatePendingExceptionToExternalTryCatch() {
ASSERT(has_pending_exception());
- bool external_caught = IsExternallyCaught();
- thread_local_top_.external_caught_exception_ = external_caught;
+ bool has_external_try_catch = HasExternalTryCatch();
+ if (!has_external_try_catch) {
+ thread_local_top_.external_caught_exception_ = false;
+ return true;
+ }
- if (!external_caught) return;
+ bool catchable_by_js = is_catchable_by_javascript(pending_exception());
+ if (catchable_by_js && IsFinallyOnTop()) {
+ thread_local_top_.external_caught_exception_ = false;
+ return false;
+ }
- if (thread_local_top_.pending_exception_->IsOutOfMemory()) {
- // Do not propagate OOM exception: we should kill VM asap.
- } else if (thread_local_top_.pending_exception_ ==
+ thread_local_top_.external_caught_exception_ = true;
+ if (thread_local_top_.pending_exception_ ==
heap()->termination_exception()) {
try_catch_handler()->can_continue_ = false;
try_catch_handler()->has_terminated_ = true;
try_catch_handler()->exception_ = heap()->null_value();
} else {
v8::TryCatch* handler = try_catch_handler();
- // At this point all non-object (failure) exceptions have
- // been dealt with so this shouldn't fail.
- ASSERT(!pending_exception()->IsFailure());
ASSERT(thread_local_top_.pending_message_obj_->IsJSMessageObject() ||
thread_local_top_.pending_message_obj_->IsTheHole());
ASSERT(thread_local_top_.pending_message_script_->IsScript() ||
@@ -1862,13 +1768,14 @@ void Isolate::PropagatePendingExceptionToExternalTryCatch() {
handler->has_terminated_ = false;
handler->exception_ = pending_exception();
// Propagate to the external try-catch only if we got an actual message.
- if (thread_local_top_.pending_message_obj_->IsTheHole()) return;
+ if (thread_local_top_.pending_message_obj_->IsTheHole()) return true;
handler->message_obj_ = thread_local_top_.pending_message_obj_;
handler->message_script_ = thread_local_top_.pending_message_script_;
handler->message_start_pos_ = thread_local_top_.pending_message_start_pos_;
handler->message_end_pos_ = thread_local_top_.pending_message_end_pos_;
}
+ return true;
}
@@ -1882,18 +1789,6 @@ void Isolate::InitializeLoggingAndCounters() {
}
-void Isolate::InitializeDebugger() {
-#ifdef ENABLE_DEBUGGER_SUPPORT
- LockGuard<RecursiveMutex> lock_guard(debugger_access());
- if (NoBarrier_Load(&debugger_initialized_)) return;
- InitializeLoggingAndCounters();
- debug_ = new Debug(this);
- debugger_ = new Debugger(this);
- Release_Store(&debugger_initialized_, true);
-#endif
-}
-
-
bool Isolate::Init(Deserializer* des) {
ASSERT(state_ != INITIALIZED);
TRACE_ISOLATE(init);
@@ -1902,10 +1797,6 @@ bool Isolate::Init(Deserializer* des) {
has_fatal_error_ = false;
- use_crankshaft_ = FLAG_crankshaft
- && !Serializer::enabled()
- && CPU::SupportsCrankshaft();
-
if (function_entry_hook() != NULL) {
// When function entry hooking is in effect, we have to create the code
// stubs from scratch to get entry hooks, rather than loading the previously
@@ -1915,11 +1806,7 @@ bool Isolate::Init(Deserializer* des) {
}
// The initialization process does not handle memory exhaustion.
- DisallowAllocationFailure disallow_allocation_failure;
-
- InitializeLoggingAndCounters();
-
- InitializeDebugger();
+ DisallowAllocationFailure disallow_allocation_failure(this);
memory_allocator_ = new MemoryAllocator(this);
code_range_ = new CodeRange(this);
@@ -1936,7 +1823,6 @@ bool Isolate::Init(Deserializer* des) {
string_tracker_ = new StringTracker();
string_tracker_->isolate_ = this;
compilation_cache_ = new CompilationCache(this);
- transcendental_cache_ = new TranscendentalCache(this);
keyed_lookup_cache_ = new KeyedLookupCache();
context_slot_cache_ = new ContextSlotCache();
descriptor_lookup_cache_ = new DescriptorLookupCache();
@@ -1948,11 +1834,14 @@ bool Isolate::Init(Deserializer* des) {
bootstrapper_ = new Bootstrapper(this);
handle_scope_implementer_ = new HandleScopeImplementer(this);
stub_cache_ = new StubCache(this);
+ materialized_object_store_ = new MaterializedObjectStore(this);
regexp_stack_ = new RegExpStack();
regexp_stack_->isolate_ = this;
date_cache_ = new DateCache();
code_stub_interface_descriptors_ =
new CodeStubInterfaceDescriptor[CodeStub::NUMBER_OF_IDS];
+ call_descriptors_ =
+ new CallInterfaceDescriptor[NUMBER_OF_CALL_DESCRIPTORS];
cpu_profiler_ = new CpuProfiler(this);
heap_profiler_ = new HeapProfiler(heap());
@@ -1961,11 +1850,13 @@ bool Isolate::Init(Deserializer* des) {
// Initialize other runtime facilities
#if defined(USE_SIMULATOR)
-#if V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_MIPS
+#if V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_MIPS
Simulator::Initialize(this);
#endif
#endif
+ code_aging_helper_ = new CodeAgingHelper();
+
{ // NOLINT
// Ensure that the thread has a valid stack guard. The v8::Locker object
// will ensure this too, but we don't have to use lockers if we are only
@@ -1999,17 +1890,24 @@ bool Isolate::Init(Deserializer* des) {
bootstrapper_->Initialize(create_heap_objects);
builtins_.SetUp(this, create_heap_objects);
- if (create_heap_objects) heap_.CreateStubsRequiringBuiltins();
+ if (FLAG_log_internal_timer_events) {
+ set_event_logger(Logger::LogInternalEvents);
+ } else {
+ set_event_logger(Logger::EmptyLogInternalEvents);
+ }
// Set default value if not yet set.
// TODO(yangguo): move this to ResourceConstraints::ConfigureDefaults
// once ResourceConstraints becomes an argument to the Isolate constructor.
if (max_available_threads_ < 1) {
// Choose the default between 1 and 4.
- max_available_threads_ = Max(Min(CPU::NumberOfProcessorsOnline(), 4), 1);
+ max_available_threads_ = Max(Min(OS::NumberOfProcessorsOnline(), 4), 1);
}
- num_sweeper_threads_ = SweeperThread::NumberOfThreads(max_available_threads_);
+ if (!FLAG_job_based_sweeping) {
+ num_sweeper_threads_ =
+ SweeperThread::NumberOfThreads(max_available_threads_);
+ }
if (FLAG_trace_hydrogen || FLAG_trace_hydrogen_stubs) {
PrintF("Concurrent recompilation has been disabled for tracing.\n");
@@ -2026,10 +1924,6 @@ bool Isolate::Init(Deserializer* des) {
}
}
-#ifdef ENABLE_DEBUGGER_SUPPORT
- debug_->SetUp(create_heap_objects);
-#endif
-
// If we are deserializing, read the state into the now-empty heap.
if (!create_heap_objects) {
des->Deserialize(this);
@@ -2049,7 +1943,6 @@ bool Isolate::Init(Deserializer* des) {
if (!create_heap_objects) Assembler::QuietNaN(heap_.nan_value());
runtime_profiler_ = new RuntimeProfiler(this);
- runtime_profiler_->SetUp();
// If we are deserializing, log non-function code objects and compiled
// functions found in the snapshot.
@@ -2074,6 +1967,13 @@ bool Isolate::Init(Deserializer* des) {
Internals::kIsolateEmbedderDataOffset);
CHECK_EQ(static_cast<int>(OFFSET_OF(Isolate, heap_.roots_)),
Internals::kIsolateRootsOffset);
+ CHECK_EQ(static_cast<int>(
+ OFFSET_OF(Isolate, heap_.amount_of_external_allocated_memory_)),
+ Internals::kAmountOfExternalAllocatedMemoryOffset);
+ CHECK_EQ(static_cast<int>(OFFSET_OF(
+ Isolate,
+ heap_.amount_of_external_allocated_memory_at_last_global_gc_)),
+ Internals::kAmountOfExternalAllocatedMemoryAtLastGlobalGCOffset);
state_ = INITIALIZED;
time_millis_at_init_ = OS::TimeCurrentMillis();
@@ -2089,31 +1989,33 @@ bool Isolate::Init(Deserializer* des) {
kDeoptTableSerializeEntryCount - 1);
}
- if (!Serializer::enabled()) {
+ if (!serializer_enabled()) {
// Ensure that all stubs which need to be generated ahead of time, but
// cannot be serialized into the snapshot have been generated.
HandleScope scope(this);
CodeStub::GenerateFPStubs(this);
StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime(this);
StubFailureTrampolineStub::GenerateAheadOfTime(this);
- StubFailureTailCallTrampolineStub::GenerateAheadOfTime(this);
- // TODO(mstarzinger): The following is an ugly hack to make sure the
- // interface descriptor is initialized even when stubs have been
- // deserialized out of the snapshot without the graph builder.
- FastCloneShallowArrayStub stub(FastCloneShallowArrayStub::CLONE_ELEMENTS,
- DONT_TRACK_ALLOCATION_SITE, 0);
- stub.InitializeInterfaceDescriptor(
- this, code_stub_interface_descriptor(CodeStub::FastCloneShallowArray));
+ // Ensure interface descriptors are initialized even when stubs have been
+ // deserialized out of the snapshot without using the graph builder.
+ FastCloneShallowArrayStub::InstallDescriptors(this);
BinaryOpICStub::InstallDescriptors(this);
- CompareNilICStub::InitializeForIsolate(this);
- ToBooleanStub::InitializeForIsolate(this);
+ BinaryOpWithAllocationSiteStub::InstallDescriptors(this);
+ CompareNilICStub::InstallDescriptors(this);
+ ToBooleanStub::InstallDescriptors(this);
+ ToNumberStub::InstallDescriptors(this);
ArrayConstructorStubBase::InstallDescriptors(this);
InternalArrayConstructorStubBase::InstallDescriptors(this);
FastNewClosureStub::InstallDescriptors(this);
+ FastNewContextStub::InstallDescriptors(this);
NumberToStringStub::InstallDescriptors(this);
- NewStringAddStub::InstallDescriptors(this);
+ StringAddStub::InstallDescriptors(this);
+ RegExpConstructResultStub::InstallDescriptors(this);
+ KeyedLoadGenericElementStub::InstallDescriptors(this);
}
+ CallDescriptors::InitializeForIsolate(this);
+
initialized_from_snapshot_ = (des != NULL);
return true;
@@ -2261,6 +2163,13 @@ Map* Isolate::get_initial_js_array_map(ElementsKind kind) {
}
+bool Isolate::use_crankshaft() const {
+ return FLAG_crankshaft &&
+ !serializer_enabled_ &&
+ CpuFeatures::SupportsCrankshaft();
+}
+
+
bool Isolate::IsFastArrayConstructorPrototypeChainIntact() {
Map* root_array_map =
get_initial_js_array_map(GetInitialFastElementsKind());
@@ -2291,6 +2200,13 @@ CodeStubInterfaceDescriptor*
}
+CallInterfaceDescriptor*
+ Isolate::call_descriptor(CallDescriptorKey index) {
+ ASSERT(0 <= index && index < NUMBER_OF_CALL_DESCRIPTORS);
+ return &call_descriptors_[index];
+}
+
+
Object* Isolate::FindCodeObject(Address a) {
return inner_pointer_to_code_cache()->GcSafeFindCodeForInnerPointer(a);
}
@@ -2304,4 +2220,140 @@ ISOLATE_INIT_ARRAY_LIST(ISOLATE_FIELD_OFFSET)
#undef ISOLATE_FIELD_OFFSET
#endif
+
+Handle<JSObject> Isolate::GetSymbolRegistry() {
+ if (heap()->symbol_registry()->IsUndefined()) {
+ Handle<Map> map = factory()->NewMap(JS_OBJECT_TYPE, JSObject::kHeaderSize);
+ Handle<JSObject> registry = factory()->NewJSObjectFromMap(map);
+ heap()->set_symbol_registry(*registry);
+
+ static const char* nested[] = {
+ "for", "for_api", "for_intern", "keyFor", "private_api", "private_intern"
+ };
+ for (unsigned i = 0; i < ARRAY_SIZE(nested); ++i) {
+ Handle<String> name = factory()->InternalizeUtf8String(nested[i]);
+ Handle<JSObject> obj = factory()->NewJSObjectFromMap(map);
+ JSObject::NormalizeProperties(obj, KEEP_INOBJECT_PROPERTIES, 8);
+ JSObject::SetProperty(registry, name, obj, NONE, STRICT).Assert();
+ }
+ }
+ return Handle<JSObject>::cast(factory()->symbol_registry());
+}
+
+
+void Isolate::AddCallCompletedCallback(CallCompletedCallback callback) {
+ for (int i = 0; i < call_completed_callbacks_.length(); i++) {
+ if (callback == call_completed_callbacks_.at(i)) return;
+ }
+ call_completed_callbacks_.Add(callback);
+}
+
+
+void Isolate::RemoveCallCompletedCallback(CallCompletedCallback callback) {
+ for (int i = 0; i < call_completed_callbacks_.length(); i++) {
+ if (callback == call_completed_callbacks_.at(i)) {
+ call_completed_callbacks_.Remove(i);
+ }
+ }
+}
+
+
+void Isolate::FireCallCompletedCallback() {
+ bool has_call_completed_callbacks = !call_completed_callbacks_.is_empty();
+ bool run_microtasks = autorun_microtasks() && pending_microtask_count();
+ if (!has_call_completed_callbacks && !run_microtasks) return;
+
+ if (!handle_scope_implementer()->CallDepthIsZero()) return;
+ if (run_microtasks) RunMicrotasks();
+ // Fire callbacks. Increase call depth to prevent recursive callbacks.
+ v8::Isolate::SuppressMicrotaskExecutionScope suppress(
+ reinterpret_cast<v8::Isolate*>(this));
+ for (int i = 0; i < call_completed_callbacks_.length(); i++) {
+ call_completed_callbacks_.at(i)();
+ }
+}
+
+
+void Isolate::EnqueueMicrotask(Handle<Object> microtask) {
+ ASSERT(microtask->IsJSFunction() || microtask->IsCallHandlerInfo());
+ Handle<FixedArray> queue(heap()->microtask_queue(), this);
+ int num_tasks = pending_microtask_count();
+ ASSERT(num_tasks <= queue->length());
+ if (num_tasks == 0) {
+ queue = factory()->NewFixedArray(8);
+ heap()->set_microtask_queue(*queue);
+ } else if (num_tasks == queue->length()) {
+ queue = FixedArray::CopySize(queue, num_tasks * 2);
+ heap()->set_microtask_queue(*queue);
+ }
+ ASSERT(queue->get(num_tasks)->IsUndefined());
+ queue->set(num_tasks, *microtask);
+ set_pending_microtask_count(num_tasks + 1);
+}
+
+
+void Isolate::RunMicrotasks() {
+ // TODO(adamk): This ASSERT triggers in mjsunit tests which
+ // call the %RunMicrotasks runtime function. But it should
+ // never happen outside of tests, so it would be nice to
+ // uncomment it.
+ //
+ // ASSERT(handle_scope_implementer()->CallDepthIsZero());
+
+ // Increase call depth to prevent recursive callbacks.
+ v8::Isolate::SuppressMicrotaskExecutionScope suppress(
+ reinterpret_cast<v8::Isolate*>(this));
+
+ while (pending_microtask_count() > 0) {
+ HandleScope scope(this);
+ int num_tasks = pending_microtask_count();
+ Handle<FixedArray> queue(heap()->microtask_queue(), this);
+ ASSERT(num_tasks <= queue->length());
+ set_pending_microtask_count(0);
+ heap()->set_microtask_queue(heap()->empty_fixed_array());
+
+ for (int i = 0; i < num_tasks; i++) {
+ HandleScope scope(this);
+ Handle<Object> microtask(queue->get(i), this);
+ if (microtask->IsJSFunction()) {
+ Handle<JSFunction> microtask_function =
+ Handle<JSFunction>::cast(microtask);
+ Handle<Object> exception;
+ MaybeHandle<Object> result = Execution::TryCall(
+ microtask_function, factory()->undefined_value(),
+ 0, NULL, &exception);
+ // If execution is terminating, just bail out.
+ if (result.is_null() &&
+ !exception.is_null() &&
+ *exception == heap()->termination_exception()) {
+ // Clear out any remaining callbacks in the queue.
+ heap()->set_microtask_queue(heap()->empty_fixed_array());
+ set_pending_microtask_count(0);
+ return;
+ }
+ } else {
+ Handle<CallHandlerInfo> callback_info =
+ Handle<CallHandlerInfo>::cast(microtask);
+ v8::MicrotaskCallback callback =
+ v8::ToCData<v8::MicrotaskCallback>(callback_info->callback());
+ void* data = v8::ToCData<void*>(callback_info->data());
+ callback(data);
+ }
+ }
+ }
+}
+
+
+bool StackLimitCheck::JsHasOverflowed() const {
+ StackGuard* stack_guard = isolate_->stack_guard();
+#ifdef USE_SIMULATOR
+ // The simulator uses a separate JS stack.
+ Address jssp_address = Simulator::current(isolate_)->get_sp();
+ uintptr_t jssp = reinterpret_cast<uintptr_t>(jssp_address);
+ if (jssp < stack_guard->real_jslimit()) return true;
+#endif // USE_SIMULATOR
+ return reinterpret_cast<uintptr_t>(this) < stack_guard->real_climit();
+}
+
+
} } // namespace v8::internal
diff --git a/chromium/v8/src/isolate.h b/chromium/v8/src/isolate.h
index 7ba30883c75..7de73034cde 100644
--- a/chromium/v8/src/isolate.h
+++ b/chromium/v8/src/isolate.h
@@ -1,62 +1,40 @@
// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
#ifndef V8_ISOLATE_H_
#define V8_ISOLATE_H_
-#include "../include/v8-debug.h"
-#include "allocation.h"
-#include "apiutils.h"
-#include "assert-scope.h"
-#include "atomicops.h"
-#include "builtins.h"
-#include "contexts.h"
-#include "execution.h"
-#include "frames.h"
-#include "date.h"
-#include "global-handles.h"
-#include "handles.h"
-#include "hashmap.h"
-#include "heap.h"
-#include "optimizing-compiler-thread.h"
-#include "regexp-stack.h"
-#include "runtime-profiler.h"
-#include "runtime.h"
-#include "zone.h"
+#include "include/v8-debug.h"
+#include "src/allocation.h"
+#include "src/assert-scope.h"
+#include "src/base/atomicops.h"
+#include "src/builtins.h"
+#include "src/contexts.h"
+#include "src/execution.h"
+#include "src/frames.h"
+#include "src/date.h"
+#include "src/global-handles.h"
+#include "src/handles.h"
+#include "src/hashmap.h"
+#include "src/heap.h"
+#include "src/optimizing-compiler-thread.h"
+#include "src/regexp-stack.h"
+#include "src/runtime-profiler.h"
+#include "src/runtime.h"
+#include "src/zone.h"
namespace v8 {
namespace internal {
class Bootstrapper;
+struct CallInterfaceDescriptor;
class CodeGenerator;
class CodeRange;
struct CodeStubInterfaceDescriptor;
class CodeTracer;
class CompilationCache;
+class ConsStringIteratorOp;
class ContextSlotCache;
class Counters;
class CpuFeatures;
@@ -73,19 +51,20 @@ class HeapProfiler;
class HStatistics;
class HTracer;
class InlineRuntimeFunctionsTable;
-class NoAllocationStringAllocator;
class InnerPointerToCodeCache;
+class MaterializedObjectStore;
+class NoAllocationStringAllocator;
+class CodeAgingHelper;
class RandomNumberGenerator;
class RegExpStack;
class SaveContext;
-class UnicodeCache;
-class ConsStringIteratorOp;
class StringTracker;
class StubCache;
class SweeperThread;
class ThreadManager;
class ThreadState;
class ThreadVisitor; // Defined in v8threads.h
+class UnicodeCache;
template <StateTag Tag> class VMState;
// 'void function pointer', used to roundtrip the
@@ -94,13 +73,11 @@ template <StateTag Tag> class VMState;
typedef void* ExternalReferenceRedirectorPointer();
-#ifdef ENABLE_DEBUGGER_SUPPORT
class Debug;
class Debugger;
-class DebuggerAgent;
-#endif
#if !defined(__arm__) && V8_TARGET_ARCH_ARM || \
+ !defined(__aarch64__) && V8_TARGET_ARCH_ARM64 || \
!defined(__mips__) && V8_TARGET_ARCH_MIPS
class Redirection;
class Simulator;
@@ -115,7 +92,7 @@ class Simulator;
// of handles to the actual constants.
typedef ZoneList<Handle<Object> > ZoneObjectList;
-#define RETURN_IF_SCHEDULED_EXCEPTION(isolate) \
+#define RETURN_FAILURE_IF_SCHEDULED_EXCEPTION(isolate) \
do { \
Isolate* __isolate__ = (isolate); \
if (__isolate__->has_scheduled_exception()) { \
@@ -123,32 +100,46 @@ typedef ZoneList<Handle<Object> > ZoneObjectList;
} \
} while (false)
-#define RETURN_HANDLE_IF_SCHEDULED_EXCEPTION(isolate, T) \
- do { \
- Isolate* __isolate__ = (isolate); \
- if (__isolate__->has_scheduled_exception()) { \
- __isolate__->PromoteScheduledException(); \
- return Handle<T>::null(); \
- } \
+// Macros for MaybeHandle.
+
+#define RETURN_EXCEPTION_IF_SCHEDULED_EXCEPTION(isolate, T) \
+ do { \
+ Isolate* __isolate__ = (isolate); \
+ if (__isolate__->has_scheduled_exception()) { \
+ __isolate__->PromoteScheduledException(); \
+ return MaybeHandle<T>(); \
+ } \
} while (false)
-#define RETURN_IF_EMPTY_HANDLE_VALUE(isolate, call, value) \
- do { \
- if ((call).is_null()) { \
- ASSERT((isolate)->has_pending_exception()); \
- return (value); \
- } \
+#define ASSIGN_RETURN_ON_EXCEPTION_VALUE(isolate, dst, call, value) \
+ do { \
+ if (!(call).ToHandle(&dst)) { \
+ ASSERT((isolate)->has_pending_exception()); \
+ return value; \
+ } \
} while (false)
-#define CHECK_NOT_EMPTY_HANDLE(isolate, call) \
- do { \
- ASSERT(!(isolate)->has_pending_exception()); \
- CHECK(!(call).is_null()); \
- CHECK(!(isolate)->has_pending_exception()); \
+#define ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, dst, call) \
+ ASSIGN_RETURN_ON_EXCEPTION_VALUE( \
+ isolate, dst, call, isolate->heap()->exception())
+
+#define ASSIGN_RETURN_ON_EXCEPTION(isolate, dst, call, T) \
+ ASSIGN_RETURN_ON_EXCEPTION_VALUE(isolate, dst, call, MaybeHandle<T>())
+
+#define RETURN_ON_EXCEPTION_VALUE(isolate, call, value) \
+ do { \
+ if ((call).is_null()) { \
+ ASSERT((isolate)->has_pending_exception()); \
+ return value; \
+ } \
} while (false)
-#define RETURN_IF_EMPTY_HANDLE(isolate, call) \
- RETURN_IF_EMPTY_HANDLE_VALUE(isolate, call, Failure::Exception())
+#define RETURN_FAILURE_ON_EXCEPTION(isolate, call) \
+ RETURN_ON_EXCEPTION_VALUE(isolate, call, isolate->heap()->exception())
+
+#define RETURN_ON_EXCEPTION(isolate, call, T) \
+ RETURN_ON_EXCEPTION_VALUE(isolate, call, MaybeHandle<T>())
+
#define FOR_EACH_ISOLATE_ADDRESS_NAME(C) \
C(Handler, handler) \
@@ -200,12 +191,17 @@ class ThreadId {
int id_;
- static Atomic32 highest_thread_id_;
+ static base::Atomic32 highest_thread_id_;
friend class Isolate;
};
+#define FIELD_ACCESSOR(type, name) \
+ inline void set_##name(type v) { name##_ = v; } \
+ inline type name() const { return name##_; }
+
+
class ThreadLocalTop BASE_EMBEDDED {
public:
// Does early low-level initialization that does not depend on the
@@ -217,10 +213,10 @@ class ThreadLocalTop BASE_EMBEDDED {
// Get the top C++ try catch handler or NULL if none are registered.
//
- // This method is not guarenteed to return an address that can be
+ // This method is not guaranteed to return an address that can be
// used for comparison with addresses into the JS stack. If such an
// address is needed, use try_catch_handler_address.
- v8::TryCatch* TryCatchHandler();
+ FIELD_ACCESSOR(v8::TryCatch*, try_catch_handler)
// Get the address of the top C++ try catch handler or NULL if
// none are registered.
@@ -232,19 +228,15 @@ class ThreadLocalTop BASE_EMBEDDED {
// stack, try_catch_handler_address returns a JS stack address that
// corresponds to the place on the JS stack where the C++ handler
// would have been if the stack were not separate.
- inline Address try_catch_handler_address() {
- return try_catch_handler_address_;
- }
-
- // Set the address of the top C++ try catch handler.
- inline void set_try_catch_handler_address(Address address) {
- try_catch_handler_address_ = address;
+ Address try_catch_handler_address() {
+ return reinterpret_cast<Address>(
+ v8::TryCatch::JSStackComparableAddress(try_catch_handler()));
}
void Free() {
ASSERT(!has_pending_message_);
ASSERT(!external_caught_exception_);
- ASSERT(try_catch_handler_address_ == NULL);
+ ASSERT(try_catch_handler_ == NULL);
}
Isolate* isolate_;
@@ -252,7 +244,7 @@ class ThreadLocalTop BASE_EMBEDDED {
// lookups.
Context* context_;
ThreadId thread_id_;
- MaybeObject* pending_exception_;
+ Object* pending_exception_;
bool has_pending_message_;
bool rethrowing_message_;
Object* pending_message_obj_;
@@ -262,7 +254,7 @@ class ThreadLocalTop BASE_EMBEDDED {
// Use a separate value for scheduled exceptions to preserve the
// invariants that hold about pending_exception. We may want to
// unify them later.
- MaybeObject* scheduled_exception_;
+ Object* scheduled_exception_;
bool external_caught_exception_;
SaveContext* save_context_;
v8::TryCatch* catcher_;
@@ -289,31 +281,34 @@ class ThreadLocalTop BASE_EMBEDDED {
// Head of the list of live LookupResults.
LookupResult* top_lookup_result_;
- // Whether out of memory exceptions should be ignored.
- bool ignore_out_of_memory_;
-
private:
void InitializeInternal();
- Address try_catch_handler_address_;
+ v8::TryCatch* try_catch_handler_;
};
-#ifdef ENABLE_DEBUGGER_SUPPORT
+#if V8_TARGET_ARCH_ARM && !defined(__arm__) || \
+ V8_TARGET_ARCH_ARM64 && !defined(__aarch64__) || \
+ V8_TARGET_ARCH_MIPS && !defined(__mips__)
-#define ISOLATE_DEBUGGER_INIT_LIST(V) \
- V(DebuggerAgent*, debugger_agent_instance, NULL)
+#define ISOLATE_INIT_SIMULATOR_LIST(V) \
+ V(bool, simulator_initialized, false) \
+ V(HashMap*, simulator_i_cache, NULL) \
+ V(Redirection*, simulator_redirection, NULL)
#else
-#define ISOLATE_DEBUGGER_INIT_LIST(V)
+#define ISOLATE_INIT_SIMULATOR_LIST(V)
#endif
+
#ifdef DEBUG
#define ISOLATE_INIT_DEBUG_ARRAY_LIST(V) \
V(CommentStatistic, paged_space_comments_statistics, \
- CommentStatistic::kMaxComments + 1)
+ CommentStatistic::kMaxComments + 1) \
+ V(int, code_kind_statistics, Code::NUMBER_OF_KINDS)
#else
#define ISOLATE_INIT_DEBUG_ARRAY_LIST(V)
@@ -340,30 +335,39 @@ typedef List<HeapObject*> DebugObjectCache;
/* A previously allocated buffer of kMinimalBufferSize bytes, or NULL. */ \
V(byte*, assembler_spare_buffer, NULL) \
V(FatalErrorCallback, exception_behavior, NULL) \
+ V(LogEventCallback, event_logger, NULL) \
V(AllowCodeGenerationFromStringsCallback, allow_code_gen_callback, NULL) \
/* To distinguish the function templates, so that we can find them in the */ \
/* function cache of the native context. */ \
V(int, next_serial_number, 0) \
V(ExternalReferenceRedirectorPointer*, external_reference_redirector, NULL) \
- V(bool, always_allow_natives_syntax, false) \
/* Part of the state of liveedit. */ \
V(FunctionInfoListener*, active_function_info_listener, NULL) \
/* State for Relocatable. */ \
V(Relocatable*, relocatable_top, NULL) \
V(DebugObjectCache*, string_stream_debug_object_cache, NULL) \
V(Object*, string_stream_current_security_token, NULL) \
- /* TODO(isolates): Release this on destruction? */ \
- V(int*, irregexp_interpreter_backtrack_stack_cache, NULL) \
/* Serializer state. */ \
V(ExternalReferenceTable*, external_reference_table, NULL) \
/* AstNode state. */ \
V(int, ast_node_id, 0) \
V(unsigned, ast_node_count, 0) \
- V(bool, microtask_pending, false) \
+ V(int, pending_microtask_count, 0) \
+ V(bool, autorun_microtasks, true) \
V(HStatistics*, hstatistics, NULL) \
V(HTracer*, htracer, NULL) \
V(CodeTracer*, code_tracer, NULL) \
- ISOLATE_DEBUGGER_INIT_LIST(V)
+ V(bool, fp_stubs_generated, false) \
+ V(int, max_available_threads, 0) \
+ V(uint32_t, per_isolate_assert_data, 0xFFFFFFFFu) \
+ V(InterruptCallback, api_interrupt_callback, NULL) \
+ V(void*, api_interrupt_callback_data, NULL) \
+ ISOLATE_INIT_SIMULATOR_LIST(V)
+
+#define THREAD_LOCAL_TOP_ACCESSOR(type, name) \
+ inline void set_##name(type v) { thread_local_top_.name##_ = v; } \
+ inline type name() const { return thread_local_top_.name##_; }
+
class Isolate {
// These forward declarations are required to make the friend declarations in
@@ -384,24 +388,23 @@ class Isolate {
stack_limit_(0),
thread_state_(NULL),
#if !defined(__arm__) && V8_TARGET_ARCH_ARM || \
+ !defined(__aarch64__) && V8_TARGET_ARCH_ARM64 || \
!defined(__mips__) && V8_TARGET_ARCH_MIPS
simulator_(NULL),
#endif
next_(NULL),
prev_(NULL) { }
+ ~PerIsolateThreadData();
Isolate* isolate() const { return isolate_; }
ThreadId thread_id() const { return thread_id_; }
- void set_stack_limit(uintptr_t value) { stack_limit_ = value; }
- uintptr_t stack_limit() const { return stack_limit_; }
- ThreadState* thread_state() const { return thread_state_; }
- void set_thread_state(ThreadState* value) { thread_state_ = value; }
+
+ FIELD_ACCESSOR(uintptr_t, stack_limit)
+ FIELD_ACCESSOR(ThreadState*, thread_state)
#if !defined(__arm__) && V8_TARGET_ARCH_ARM || \
+ !defined(__aarch64__) && V8_TARGET_ARCH_ARM64 || \
!defined(__mips__) && V8_TARGET_ARCH_MIPS
- Simulator* simulator() const { return simulator_; }
- void set_simulator(Simulator* simulator) {
- simulator_ = simulator;
- }
+ FIELD_ACCESSOR(Simulator*, simulator)
#endif
bool Matches(Isolate* isolate, ThreadId thread_id) const {
@@ -415,6 +418,7 @@ class Isolate {
ThreadState* thread_state_;
#if !defined(__arm__) && V8_TARGET_ARCH_ARM || \
+ !defined(__aarch64__) && V8_TARGET_ARCH_ARM64 || \
!defined(__mips__) && V8_TARGET_ARCH_MIPS
Simulator* simulator_;
#endif
@@ -477,8 +481,6 @@ class Isolate {
static void GlobalTearDown();
- bool IsDefaultIsolate() const { return this == default_isolate_; }
-
static void SetCrashIfDefaultIsolateInitialized();
// Ensures that process-wide resources and the default isolate have been
// allocated. It is only necessary to call this method in rare cases, for
@@ -494,16 +496,6 @@ class Isolate {
// If one does not yet exist, return null.
PerIsolateThreadData* FindPerThreadDataForThread(ThreadId thread_id);
-#ifdef ENABLE_DEBUGGER_SUPPORT
- // Get the debugger from the default isolate. Preinitializes the
- // default isolate if needed.
- static Debugger* GetDefaultIsolateDebugger();
-#endif
-
- // Get the stack guard from the default isolate. Preinitializes the
- // default isolate if needed.
- static StackGuard* GetDefaultIsolateStackGuard();
-
// Returns the key used to store the pointer to the current isolate.
// Used internally for V8 threads that do not execute JavaScript but still
// are part of the domain of an isolate (like the context switcher).
@@ -518,18 +510,9 @@ class Isolate {
static Thread::LocalStorageKey per_isolate_thread_data_key();
- // If a client attempts to create a Locker without specifying an isolate,
- // we assume that the client is using legacy behavior. Set up the current
- // thread to be inside the implicit isolate (or fail a check if we have
- // switched to non-legacy behavior).
- static void EnterDefaultIsolate();
-
// Mutex for serializing access to break control structures.
RecursiveMutex* break_access() { return &break_access_; }
- // Mutex for serializing access to debugger.
- RecursiveMutex* debugger_access() { return &debugger_access_; }
-
Address get_address_from_id(AddressId id);
// Access to top context (where the current function object was created).
@@ -540,45 +523,46 @@ class Isolate {
}
Context** context_address() { return &thread_local_top_.context_; }
- SaveContext* save_context() { return thread_local_top_.save_context_; }
- void set_save_context(SaveContext* save) {
- thread_local_top_.save_context_ = save;
- }
+ THREAD_LOCAL_TOP_ACCESSOR(SaveContext*, save_context)
// Access to current thread id.
- ThreadId thread_id() { return thread_local_top_.thread_id_; }
- void set_thread_id(ThreadId id) { thread_local_top_.thread_id_ = id; }
+ THREAD_LOCAL_TOP_ACCESSOR(ThreadId, thread_id)
// Interface to pending exception.
- MaybeObject* pending_exception() {
+ Object* pending_exception() {
ASSERT(has_pending_exception());
+ ASSERT(!thread_local_top_.pending_exception_->IsException());
return thread_local_top_.pending_exception_;
}
- bool external_caught_exception() {
- return thread_local_top_.external_caught_exception_;
- }
- void set_external_caught_exception(bool value) {
- thread_local_top_.external_caught_exception_ = value;
- }
- void set_pending_exception(MaybeObject* exception) {
- thread_local_top_.pending_exception_ = exception;
+
+ void set_pending_exception(Object* exception_obj) {
+ ASSERT(!exception_obj->IsException());
+ thread_local_top_.pending_exception_ = exception_obj;
}
+
void clear_pending_exception() {
+ ASSERT(!thread_local_top_.pending_exception_->IsException());
thread_local_top_.pending_exception_ = heap_.the_hole_value();
}
- MaybeObject** pending_exception_address() {
+
+ Object** pending_exception_address() {
return &thread_local_top_.pending_exception_;
}
+
bool has_pending_exception() {
+ ASSERT(!thread_local_top_.pending_exception_->IsException());
return !thread_local_top_.pending_exception_->IsTheHole();
}
+
+ THREAD_LOCAL_TOP_ACCESSOR(bool, external_caught_exception)
+
void clear_pending_message() {
thread_local_top_.has_pending_message_ = false;
thread_local_top_.pending_message_obj_ = heap_.the_hole_value();
thread_local_top_.pending_message_script_ = heap_.the_hole_value();
}
v8::TryCatch* try_catch_handler() {
- return thread_local_top_.TryCatchHandler();
+ return thread_local_top_.try_catch_handler();
}
Address try_catch_handler_address() {
return thread_local_top_.try_catch_handler_address();
@@ -586,14 +570,10 @@ class Isolate {
bool* external_caught_exception_address() {
return &thread_local_top_.external_caught_exception_;
}
- v8::TryCatch* catcher() {
- return thread_local_top_.catcher_;
- }
- void set_catcher(v8::TryCatch* catcher) {
- thread_local_top_.catcher_ = catcher;
- }
- MaybeObject** scheduled_exception_address() {
+ THREAD_LOCAL_TOP_ACCESSOR(v8::TryCatch*, catcher)
+
+ Object** scheduled_exception_address() {
return &thread_local_top_.scheduled_exception_;
}
@@ -610,22 +590,25 @@ class Isolate {
&thread_local_top_.pending_message_script_);
}
- MaybeObject* scheduled_exception() {
+ Object* scheduled_exception() {
ASSERT(has_scheduled_exception());
+ ASSERT(!thread_local_top_.scheduled_exception_->IsException());
return thread_local_top_.scheduled_exception_;
}
bool has_scheduled_exception() {
+ ASSERT(!thread_local_top_.scheduled_exception_->IsException());
return thread_local_top_.scheduled_exception_ != heap_.the_hole_value();
}
void clear_scheduled_exception() {
+ ASSERT(!thread_local_top_.scheduled_exception_->IsException());
thread_local_top_.scheduled_exception_ = heap_.the_hole_value();
}
- bool IsExternallyCaught();
+ bool HasExternalTryCatch();
+ bool IsFinallyOnTop();
- bool is_catchable_by_javascript(MaybeObject* exception) {
- return (!exception->IsOutOfMemory()) &&
- (exception != heap()->termination_exception());
+ bool is_catchable_by_javascript(Object* exception) {
+ return exception != heap()->termination_exception();
}
// Serializer.
@@ -680,11 +663,10 @@ class Isolate {
class ExceptionScope {
public:
explicit ExceptionScope(Isolate* isolate) :
- // Scope currently can only be used for regular exceptions, not
- // failures like OOM or termination exception.
+ // Scope currently can only be used for regular exceptions,
+ // not termination exception.
isolate_(isolate),
- pending_exception_(isolate_->pending_exception()->ToObjectUnchecked(),
- isolate_),
+ pending_exception_(isolate_->pending_exception(), isolate_),
catcher_(isolate_->catcher())
{ }
@@ -704,16 +686,6 @@ class Isolate {
int frame_limit,
StackTrace::StackTraceOptions options);
- // Tells whether the current context has experienced an out of memory
- // exception.
- bool is_out_of_memory();
- bool ignore_out_of_memory() {
- return thread_local_top_.ignore_out_of_memory_;
- }
- void set_ignore_out_of_memory(bool value) {
- thread_local_top_.ignore_out_of_memory_ = value;
- }
-
void PrintCurrentStackTrace(FILE* out);
void PrintStack(StringStream* accumulator);
void PrintStack(FILE* out);
@@ -735,35 +707,31 @@ class Isolate {
// the result is false, the pending exception is guaranteed to be
// set.
- // TODO(yangguo): temporary wrappers
- bool MayNamedAccessWrapper(Handle<JSObject> receiver,
- Handle<Object> key,
- v8::AccessType type) {
- return MayNamedAccess(*receiver, *key, type);
- }
- bool MayIndexedAccessWrapper(Handle<JSObject> receiver,
- uint32_t index,
- v8::AccessType type) {
- return MayIndexedAccess(*receiver, index, type);
- }
-
- bool MayNamedAccess(JSObject* receiver,
- Object* key,
+ bool MayNamedAccess(Handle<JSObject> receiver,
+ Handle<Object> key,
v8::AccessType type);
- bool MayIndexedAccess(JSObject* receiver,
+ bool MayIndexedAccess(Handle<JSObject> receiver,
uint32_t index,
v8::AccessType type);
void SetFailedAccessCheckCallback(v8::FailedAccessCheckCallback callback);
- void ReportFailedAccessCheck(JSObject* receiver, v8::AccessType type);
+ void ReportFailedAccessCheck(Handle<JSObject> receiver, v8::AccessType type);
// Exception throwing support. The caller should use the result
// of Throw() as its return value.
- Failure* Throw(Object* exception, MessageLocation* location = NULL);
+ Object* Throw(Object* exception, MessageLocation* location = NULL);
+
+ template <typename T>
+ MUST_USE_RESULT MaybeHandle<T> Throw(Handle<Object> exception,
+ MessageLocation* location = NULL) {
+ Throw(*exception, location);
+ return MaybeHandle<T>();
+ }
+
// Re-throw an exception. This involves no error reporting since
// error reporting was handled when the exception was thrown
// originally.
- Failure* ReThrow(MaybeObject* exception);
+ Object* ReThrow(Object* exception);
void ScheduleThrow(Object* exception);
// Re-set pending message, script and positions reported to the TryCatch
// back to the TLS for re-use when rethrowing.
@@ -771,10 +739,11 @@ class Isolate {
void ReportPendingMessages();
// Return pending location if any or unfilled structure.
MessageLocation GetMessageLocation();
- Failure* ThrowIllegalOperation();
+ Object* ThrowIllegalOperation();
+ Object* ThrowInvalidStringLength();
// Promote a scheduled exception to pending. Asserts has_scheduled_exception.
- Failure* PromoteScheduledException();
+ Object* PromoteScheduledException();
void DoThrow(Object* exception, MessageLocation* location);
// Checks if exception should be reported and finds out if it's
// caught externally.
@@ -786,10 +755,12 @@ class Isolate {
void ComputeLocation(MessageLocation* target);
// Out of resource exception helpers.
- Failure* StackOverflow();
- Failure* TerminateExecution();
+ Object* StackOverflow();
+ Object* TerminateExecution();
void CancelTerminateExecution();
+ void InvokeApiInterruptCallback();
+
// Administration
void Iterate(ObjectVisitor* v);
void Iterate(ObjectVisitor* v, ThreadLocalTop* t);
@@ -867,11 +838,11 @@ class Isolate {
Heap* heap() { return &heap_; }
StatsTable* stats_table();
StubCache* stub_cache() { return stub_cache_; }
+ CodeAgingHelper* code_aging_helper() { return code_aging_helper_; }
DeoptimizerData* deoptimizer_data() { return deoptimizer_data_; }
ThreadLocalTop* thread_local_top() { return &thread_local_top_; }
-
- TranscendentalCache* transcendental_cache() const {
- return transcendental_cache_;
+ MaterializedObjectStore* materialized_object_store() {
+ return materialized_object_store_;
}
MemoryAllocator* memory_allocator() {
@@ -890,9 +861,8 @@ class Isolate {
return descriptor_lookup_cache_;
}
- v8::ImplementationUtilities::HandleScopeData* handle_scope_data() {
- return &handle_scope_data_;
- }
+ HandleScopeData* handle_scope_data() { return &handle_scope_data_; }
+
HandleScopeImplementer* handle_scope_implementer() {
ASSERT(handle_scope_implementer_);
return handle_scope_implementer_;
@@ -939,12 +909,6 @@ class Isolate {
RuntimeState* runtime_state() { return &runtime_state_; }
- void set_fp_stubs_generated(bool value) {
- fp_stubs_generated_ = value;
- }
-
- bool fp_stubs_generated() { return fp_stubs_generated_; }
-
Builtins* builtins() { return &builtins_; }
void NotifyExtensionInstalled() {
@@ -965,20 +929,8 @@ class Isolate {
return &interp_canonicalize_mapping_;
}
- inline bool IsCodePreAgingActive();
+ Debug* debug() { return debug_; }
-#ifdef ENABLE_DEBUGGER_SUPPORT
- Debugger* debugger() {
- if (!NoBarrier_Load(&debugger_initialized_)) InitializeDebugger();
- return debugger_;
- }
- Debug* debug() {
- if (!NoBarrier_Load(&debugger_initialized_)) InitializeDebugger();
- return debug_;
- }
-#endif
-
- inline bool IsDebuggerActive();
inline bool DebuggerHasBreakPoints();
CpuProfiler* cpu_profiler() const { return cpu_profiler_; }
@@ -990,48 +942,15 @@ class Isolate {
JSObject::SpillInformation* js_spill_information() {
return &js_spill_information_;
}
-
- int* code_kind_statistics() { return code_kind_statistics_; }
-#endif
-
-#if V8_TARGET_ARCH_ARM && !defined(__arm__) || \
- V8_TARGET_ARCH_MIPS && !defined(__mips__)
- bool simulator_initialized() { return simulator_initialized_; }
- void set_simulator_initialized(bool initialized) {
- simulator_initialized_ = initialized;
- }
-
- HashMap* simulator_i_cache() { return simulator_i_cache_; }
- void set_simulator_i_cache(HashMap* hash_map) {
- simulator_i_cache_ = hash_map;
- }
-
- Redirection* simulator_redirection() {
- return simulator_redirection_;
- }
- void set_simulator_redirection(Redirection* redirection) {
- simulator_redirection_ = redirection;
- }
#endif
Factory* factory() { return reinterpret_cast<Factory*>(this); }
static const int kJSRegexpStaticOffsetsVectorSize = 128;
- ExternalCallbackScope* external_callback_scope() {
- return thread_local_top_.external_callback_scope_;
- }
- void set_external_callback_scope(ExternalCallbackScope* scope) {
- thread_local_top_.external_callback_scope_ = scope;
- }
+ THREAD_LOCAL_TOP_ACCESSOR(ExternalCallbackScope*, external_callback_scope)
- StateTag current_vm_state() {
- return thread_local_top_.current_vm_state_;
- }
-
- void set_current_vm_state(StateTag state) {
- thread_local_top_.current_vm_state_ = state;
- }
+ THREAD_LOCAL_TOP_ACCESSOR(StateTag, current_vm_state)
void SetData(uint32_t slot, void* data) {
ASSERT(slot < Internals::kNumIsolateDataSlots);
@@ -1042,17 +961,20 @@ class Isolate {
return embedder_data_[slot];
}
- LookupResult* top_lookup_result() {
- return thread_local_top_.top_lookup_result_;
- }
- void SetTopLookupResult(LookupResult* top) {
- thread_local_top_.top_lookup_result_ = top;
+ THREAD_LOCAL_TOP_ACCESSOR(LookupResult*, top_lookup_result)
+
+ void enable_serializer() {
+ // The serializer can only be enabled before the isolate init.
+ ASSERT(state_ != INITIALIZED);
+ serializer_enabled_ = true;
}
+ bool serializer_enabled() const { return serializer_enabled_; }
+
bool IsDead() { return has_fatal_error_; }
void SignalFatalError() { has_fatal_error_ = true; }
- bool use_crankshaft() const { return use_crankshaft_; }
+ bool use_crankshaft() const;
bool initialized_from_snapshot() { return initialized_from_snapshot_; }
@@ -1078,6 +1000,17 @@ class Isolate {
CodeStubInterfaceDescriptor*
code_stub_interface_descriptor(int index);
+ enum CallDescriptorKey {
+ KeyedCall,
+ NamedCall,
+ CallHandler,
+ ArgumentAdaptorCall,
+ ApiFunctionCall,
+ NUMBER_OF_CALL_DESCRIPTORS
+ };
+
+ CallInterfaceDescriptor* call_descriptor(CallDescriptorKey index);
+
void IterateDeferredHandles(ObjectVisitor* visitor);
void LinkDeferredHandles(DeferredHandles* deferred_handles);
void UnlinkDeferredHandles(DeferredHandles* deferred_handles);
@@ -1086,10 +1019,6 @@ class Isolate {
bool IsDeferredHandle(Object** location);
#endif // DEBUG
- void set_max_available_threads(int value) {
- max_available_threads_ = value;
- }
-
bool concurrent_recompilation_enabled() {
// Thread is only available with flag enabled.
ASSERT(optimizing_compiler_thread_ == NULL ||
@@ -1116,11 +1045,6 @@ class Isolate {
return sweeper_thread_;
}
- // PreInits and returns a default isolate. Needed when a new thread tries
- // to create a Locker for the first time (the lock itself is in the isolate).
- // TODO(svenpanne) This method is on death row...
- static v8::Isolate* GetDefaultIsolateForLocking();
-
int id() const { return static_cast<int>(id_); }
HStatistics* GetHStatistics();
@@ -1139,6 +1063,24 @@ class Isolate {
// Given an address occupied by a live code object, return that object.
Object* FindCodeObject(Address a);
+ int NextOptimizationId() {
+ int id = next_optimization_id_++;
+ if (!Smi::IsValid(next_optimization_id_)) {
+ next_optimization_id_ = 0;
+ }
+ return id;
+ }
+
+ // Get (and lazily initialize) the registry for per-isolate symbols.
+ Handle<JSObject> GetSymbolRegistry();
+
+ void AddCallCompletedCallback(CallCompletedCallback callback);
+ void RemoveCallCompletedCallback(CallCompletedCallback callback);
+ void FireCallCompletedCallback();
+
+ void EnqueueMicrotask(Handle<Object> microtask);
+ void RunMicrotasks();
+
private:
Isolate();
@@ -1198,18 +1140,16 @@ class Isolate {
DISALLOW_COPY_AND_ASSIGN(EntryStackItem);
};
- // This mutex protects highest_thread_id_, thread_data_table_ and
- // default_isolate_.
+ // This mutex protects highest_thread_id_ and thread_data_table_.
static Mutex process_wide_mutex_;
static Thread::LocalStorageKey per_isolate_thread_data_key_;
static Thread::LocalStorageKey isolate_key_;
static Thread::LocalStorageKey thread_id_key_;
- static Isolate* default_isolate_;
static ThreadDataTable* thread_data_table_;
// A global counter for all generated Isolates, might overflow.
- static Atomic32 isolate_counter_;
+ static base::Atomic32 isolate_counter_;
void Deinit();
@@ -1240,15 +1180,16 @@ class Isolate {
void FillCache();
- void PropagatePendingExceptionToExternalTryCatch();
-
- void InitializeDebugger();
+ // Propagate pending exception message to the v8::TryCatch.
+ // If there is no external try-catch or message was successfully propagated,
+ // then return true.
+ bool PropagatePendingExceptionToExternalTryCatch();
// Traverse prototype chain to find out whether the object is derived from
// the Error object.
bool IsErrorObject(Handle<Object> obj);
- Atomic32 id_;
+ base::Atomic32 id_;
EntryStackItem* entry_stack_;
int stack_trace_nesting_level_;
StringStream* incomplete_message_;
@@ -1259,23 +1200,23 @@ class Isolate {
Counters* counters_;
CodeRange* code_range_;
RecursiveMutex break_access_;
- Atomic32 debugger_initialized_;
- RecursiveMutex debugger_access_;
+ base::Atomic32 debugger_initialized_;
Logger* logger_;
StackGuard stack_guard_;
StatsTable* stats_table_;
StubCache* stub_cache_;
+ CodeAgingHelper* code_aging_helper_;
DeoptimizerData* deoptimizer_data_;
+ MaterializedObjectStore* materialized_object_store_;
ThreadLocalTop thread_local_top_;
bool capture_stack_trace_for_uncaught_exceptions_;
int stack_trace_for_uncaught_exceptions_frame_limit_;
StackTrace::StackTraceOptions stack_trace_for_uncaught_exceptions_options_;
- TranscendentalCache* transcendental_cache_;
MemoryAllocator* memory_allocator_;
KeyedLookupCache* keyed_lookup_cache_;
ContextSlotCache* context_slot_cache_;
DescriptorLookupCache* descriptor_lookup_cache_;
- v8::ImplementationUtilities::HandleScopeData handle_scope_data_;
+ HandleScopeData handle_scope_data_;
HandleScopeImplementer* handle_scope_implementer_;
UnicodeCache* unicode_cache_;
Zone runtime_zone_;
@@ -1285,7 +1226,6 @@ class Isolate {
EternalHandles* eternal_handles_;
ThreadManager* thread_manager_;
RuntimeState runtime_state_;
- bool fp_stubs_generated_;
Builtins builtins_;
bool has_installed_extensions_;
StringTracker* string_tracker_;
@@ -1300,38 +1240,28 @@ class Isolate {
DateCache* date_cache_;
unibrow::Mapping<unibrow::Ecma262Canonicalize> interp_canonicalize_mapping_;
CodeStubInterfaceDescriptor* code_stub_interface_descriptors_;
+ CallInterfaceDescriptor* call_descriptors_;
RandomNumberGenerator* random_number_generator_;
+ // Whether the isolate has been created for snapshotting.
+ bool serializer_enabled_;
+
// True if fatal error has been signaled for this isolate.
bool has_fatal_error_;
- // True if we are using the Crankshaft optimizing compiler.
- bool use_crankshaft_;
-
// True if this isolate was initialized from a snapshot.
bool initialized_from_snapshot_;
// Time stamp at initialization.
double time_millis_at_init_;
-#if V8_TARGET_ARCH_ARM && !defined(__arm__) || \
- V8_TARGET_ARCH_MIPS && !defined(__mips__)
- bool simulator_initialized_;
- HashMap* simulator_i_cache_;
- Redirection* simulator_redirection_;
-#endif
-
#ifdef DEBUG
// A static array of histogram info for each type.
HistogramInfo heap_histograms_[LAST_TYPE + 1];
JSObject::SpillInformation js_spill_information_;
- int code_kind_statistics_[Code::NUMBER_OF_KINDS];
#endif
-#ifdef ENABLE_DEBUGGER_SUPPORT
- Debugger* debugger_;
Debug* debug_;
-#endif
CpuProfiler* cpu_profiler_;
HeapProfiler* heap_profiler_;
FunctionEntryHook function_entry_hook_;
@@ -1362,13 +1292,14 @@ class Isolate {
SweeperThread** sweeper_thread_;
int num_sweeper_threads_;
- // TODO(yangguo): This will become obsolete once ResourceConstraints
- // becomes an argument to Isolate constructor.
- int max_available_threads_;
-
// Counts deopt points if deopt_every_n_times is enabled.
unsigned int stress_deopt_count_;
+ int next_optimization_id_;
+
+ // List of callbacks when a Call completes.
+ List<CallCompletedCallback> call_completed_callbacks_;
+
friend class ExecutionAccess;
friend class HandleScopeImplementer;
friend class IsolateInitializer;
@@ -1388,6 +1319,10 @@ class Isolate {
};
+#undef FIELD_ACCESSOR
+#undef THREAD_LOCAL_TOP_ACCESSOR
+
+
// If the GCC version is 4.1.x or 4.2.x an additional field is added to the
// class as a work around for a bug in the generated code found with these
// versions of GCC. See V8 issue 122 for details.
@@ -1455,15 +1390,20 @@ class ExecutionAccess BASE_EMBEDDED {
};
-// Support for checking for stack-overflows in C++ code.
+// Support for checking for stack-overflows.
class StackLimitCheck BASE_EMBEDDED {
public:
explicit StackLimitCheck(Isolate* isolate) : isolate_(isolate) { }
- bool HasOverflowed() const {
+ // Use this to check for stack-overflows in C++ code.
+ inline bool HasOverflowed() const {
StackGuard* stack_guard = isolate_->stack_guard();
- return (reinterpret_cast<uintptr_t>(this) < stack_guard->real_climit());
+ return reinterpret_cast<uintptr_t>(this) < stack_guard->real_climit();
}
+
+ // Use this to check for stack-overflow when entering runtime from JS code.
+ bool JsHasOverflowed() const;
+
private:
Isolate* isolate_;
};
@@ -1476,32 +1416,24 @@ class StackLimitCheck BASE_EMBEDDED {
class PostponeInterruptsScope BASE_EMBEDDED {
public:
explicit PostponeInterruptsScope(Isolate* isolate)
- : stack_guard_(isolate->stack_guard()) {
+ : stack_guard_(isolate->stack_guard()), isolate_(isolate) {
+ ExecutionAccess access(isolate_);
stack_guard_->thread_local_.postpone_interrupts_nesting_++;
stack_guard_->DisableInterrupts();
}
~PostponeInterruptsScope() {
+ ExecutionAccess access(isolate_);
if (--stack_guard_->thread_local_.postpone_interrupts_nesting_ == 0) {
stack_guard_->EnableInterrupts();
}
}
private:
StackGuard* stack_guard_;
+ Isolate* isolate_;
};
-// Tells whether the native context is marked with out of memory.
-inline bool Context::has_out_of_memory() {
- return native_context()->out_of_memory()->IsTrue();
-}
-
-
-// Mark the native context with out of memory.
-inline void Context::mark_out_of_memory() {
- native_context()->set_out_of_memory(GetIsolate()->heap()->true_value());
-}
-
class CodeTracer V8_FINAL : public Malloced {
public:
explicit CodeTracer(int isolate_id)
@@ -1513,12 +1445,12 @@ class CodeTracer V8_FINAL : public Malloced {
}
if (FLAG_redirect_code_traces_to == NULL) {
- OS::SNPrintF(filename_,
- "code-%d-%d.asm",
- OS::GetCurrentProcessId(),
- isolate_id);
+ SNPrintF(filename_,
+ "code-%d-%d.asm",
+ OS::GetCurrentProcessId(),
+ isolate_id);
} else {
- OS::StrNCpy(filename_, FLAG_redirect_code_traces_to, filename_.length());
+ StrNCpy(filename_, FLAG_redirect_code_traces_to, filename_.length());
}
WriteChars(filename_.start(), "", 0, false);
diff --git a/chromium/v8/src/json-parser.h b/chromium/v8/src/json-parser.h
index 72c69100d16..60855a0a801 100644
--- a/chromium/v8/src/json-parser.h
+++ b/chromium/v8/src/json-parser.h
@@ -1,40 +1,17 @@
// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
#ifndef V8_JSON_PARSER_H_
#define V8_JSON_PARSER_H_
-#include "v8.h"
+#include "src/v8.h"
-#include "char-predicates-inl.h"
-#include "v8conversions.h"
-#include "messages.h"
-#include "spaces-inl.h"
-#include "token.h"
+#include "src/char-predicates-inl.h"
+#include "src/conversions.h"
+#include "src/messages.h"
+#include "src/spaces-inl.h"
+#include "src/token.h"
namespace v8 {
namespace internal {
@@ -43,7 +20,7 @@ namespace internal {
template <bool seq_ascii>
class JsonParser BASE_EMBEDDED {
public:
- static Handle<Object> Parse(Handle<String> source) {
+ MUST_USE_RESULT static MaybeHandle<Object> Parse(Handle<String> source) {
return JsonParser(source).ParseJson();
}
@@ -59,7 +36,7 @@ class JsonParser BASE_EMBEDDED {
object_constructor_(isolate_->native_context()->object_function(),
isolate_),
position_(-1) {
- FlattenString(source_);
+ source_ = String::Flatten(source_);
pretenure_ = (source_length_ >= kPretenureTreshold) ? TENURED : NOT_TENURED;
// Optimized fast case where we only have ASCII characters.
@@ -69,7 +46,7 @@ class JsonParser BASE_EMBEDDED {
}
// Parse a string containing a single JSON value.
- Handle<Object> ParseJson();
+ MaybeHandle<Object> ParseJson();
inline void Advance() {
position_++;
@@ -219,7 +196,7 @@ class JsonParser BASE_EMBEDDED {
};
template <bool seq_ascii>
-Handle<Object> JsonParser<seq_ascii>::ParseJson() {
+MaybeHandle<Object> JsonParser<seq_ascii>::ParseJson() {
// Advance to the first character (possibly EOS)
AdvanceSkipWhitespace();
Handle<Object> result = ParseJsonValue();
@@ -257,8 +234,7 @@ Handle<Object> JsonParser<seq_ascii>::ParseJson() {
break;
default:
message = "unexpected_token";
- Handle<Object> name =
- LookupSingleCharacterStringFromCode(isolate_, c0_);
+ Handle<Object> name = factory->LookupSingleCharacterStringFromCode(c0_);
Handle<FixedArray> element = factory->NewFixedArray(1);
element->set(0, *name);
array = factory->NewJSArrayWithElements(element);
@@ -268,9 +244,8 @@ Handle<Object> JsonParser<seq_ascii>::ParseJson() {
MessageLocation location(factory->NewScript(source_),
position_,
position_ + 1);
- Handle<Object> result = factory->NewSyntaxError(message, array);
- isolate()->Throw(*result, &location);
- return Handle<Object>::null();
+ Handle<Object> error = factory->NewSyntaxError(message, array);
+ return isolate()->template Throw<Object>(error, &location);
}
return result;
}
@@ -361,7 +336,7 @@ Handle<Object> JsonParser<seq_ascii>::ParseJsonObject() {
Handle<Object> value = ParseJsonValue();
if (value.is_null()) return ReportUnexpectedCharacter();
- JSObject::SetOwnElement(json_object, index, value, kNonStrictMode);
+ JSObject::SetOwnElement(json_object, index, value, SLOPPY).Assert();
continue;
}
// Not an index, fallback to the slow path.
@@ -414,12 +389,18 @@ Handle<Object> JsonParser<seq_ascii>::ParseJsonObject() {
if (value->FitsRepresentation(expected_representation)) {
// If the target representation is double and the value is already
// double, use the existing box.
- if (FLAG_track_double_fields &&
- value->IsSmi() &&
- expected_representation.IsDouble()) {
+ if (value->IsSmi() && expected_representation.IsDouble()) {
value = factory()->NewHeapNumber(
Handle<Smi>::cast(value)->value());
+ } else if (expected_representation.IsHeapObject() &&
+ !target->instance_descriptors()->GetFieldType(
+ descriptor)->NowContains(value)) {
+ Handle<HeapType> value_type(value->OptimalType(
+ isolate(), expected_representation));
+ Map::GeneralizeFieldType(target, descriptor, value_type);
}
+ ASSERT(target->instance_descriptors()->GetFieldType(
+ descriptor)->NowContains(value));
properties.Add(value, zone());
map = target;
continue;
@@ -433,7 +414,8 @@ Handle<Object> JsonParser<seq_ascii>::ParseJsonObject() {
int length = properties.length();
for (int i = 0; i < length; i++) {
Handle<Object> value = properties[i];
- json_object->FastPropertyAtPut(i, *value);
+ FieldIndex index = FieldIndex::ForPropertyIndex(*map, i);
+ json_object->FastPropertyAtPut(index, *value);
}
} else {
key = ParseJsonInternalizedString();
@@ -444,8 +426,8 @@ Handle<Object> JsonParser<seq_ascii>::ParseJsonObject() {
if (value.is_null()) return ReportUnexpectedCharacter();
}
- JSObject::SetLocalPropertyIgnoreAttributes(
- json_object, key, value, NONE);
+ JSObject::SetOwnPropertyIgnoreAttributes(
+ json_object, key, value, NONE).Assert();
} while (MatchSkipWhiteSpace(','));
if (c0_ != '}') {
return ReportUnexpectedCharacter();
@@ -457,7 +439,8 @@ Handle<Object> JsonParser<seq_ascii>::ParseJsonObject() {
int length = properties.length();
for (int i = 0; i < length; i++) {
Handle<Object> value = properties[i];
- json_object->FastPropertyAtPut(i, *value);
+ FieldIndex index = FieldIndex::ForPropertyIndex(*map, i);
+ json_object->FastPropertyAtPut(index, *value);
}
}
}
@@ -543,17 +526,16 @@ Handle<Object> JsonParser<seq_ascii>::ParseJsonNumber() {
if (seq_ascii) {
Vector<const uint8_t> chars(seq_source_->GetChars() + beg_pos, length);
number = StringToDouble(isolate()->unicode_cache(),
- Vector<const char>::cast(chars),
- NO_FLAGS, // Hex, octal or trailing junk.
- OS::nan_value());
+ chars,
+ NO_FLAGS, // Hex, octal or trailing junk.
+ OS::nan_value());
} else {
Vector<uint8_t> buffer = Vector<uint8_t>::New(length);
String::WriteToFlat(*source_, buffer.start(), beg_pos, position_);
Vector<const uint8_t> result =
Vector<const uint8_t>(buffer.start(), length);
number = StringToDouble(isolate()->unicode_cache(),
- // TODO(dcarney): Convert StringToDouble to uint_t.
- Vector<const char>::cast(result),
+ result,
NO_FLAGS, // Hex, octal or trailing junk.
0.0);
buffer.Dispose();
@@ -585,14 +567,14 @@ template <>
inline Handle<SeqTwoByteString> NewRawString(Factory* factory,
int length,
PretenureFlag pretenure) {
- return factory->NewRawTwoByteString(length, pretenure);
+ return factory->NewRawTwoByteString(length, pretenure).ToHandleChecked();
}
template <>
inline Handle<SeqOneByteString> NewRawString(Factory* factory,
int length,
PretenureFlag pretenure) {
- return factory->NewRawOneByteString(length, pretenure);
+ return factory->NewRawOneByteString(length, pretenure).ToHandleChecked();
}
@@ -794,7 +776,8 @@ Handle<String> JsonParser<seq_ascii>::ScanJsonString() {
}
} while (c0_ != '"');
int length = position_ - beg_pos;
- Handle<String> result = factory()->NewRawOneByteString(length, pretenure_);
+ Handle<String> result =
+ factory()->NewRawOneByteString(length, pretenure_).ToHandleChecked();
uint8_t* dest = SeqOneByteString::cast(*result)->GetChars();
String::WriteToFlat(*source_, dest, beg_pos, position_);
diff --git a/chromium/v8/src/json-stringifier.h b/chromium/v8/src/json-stringifier.h
index 4510c4b45b6..03461d72353 100644
--- a/chromium/v8/src/json-stringifier.h
+++ b/chromium/v8/src/json-stringifier.h
@@ -1,36 +1,13 @@
// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
#ifndef V8_JSON_STRINGIFIER_H_
#define V8_JSON_STRINGIFIER_H_
-#include "v8.h"
-#include "v8utils.h"
-#include "v8conversions.h"
+#include "src/v8.h"
+#include "src/conversions.h"
+#include "src/utils.h"
namespace v8 {
namespace internal {
@@ -39,17 +16,20 @@ class BasicJsonStringifier BASE_EMBEDDED {
public:
explicit BasicJsonStringifier(Isolate* isolate);
- MaybeObject* Stringify(Handle<Object> object);
+ MUST_USE_RESULT MaybeHandle<Object> Stringify(Handle<Object> object);
- INLINE(static MaybeObject* StringifyString(Isolate* isolate,
- Handle<String> object));
+ MUST_USE_RESULT INLINE(static MaybeHandle<Object> StringifyString(
+ Isolate* isolate,
+ Handle<String> object));
private:
static const int kInitialPartLength = 32;
static const int kMaxPartLength = 16 * 1024;
static const int kPartLengthGrowthFactor = 2;
- enum Result { UNCHANGED, SUCCESS, EXCEPTION, CIRCULAR, STACK_OVERFLOW };
+ enum Result { UNCHANGED, SUCCESS, EXCEPTION };
+
+ void Accumulate();
void Extend();
@@ -79,8 +59,9 @@ class BasicJsonStringifier BASE_EMBEDDED {
}
}
- Handle<Object> ApplyToJsonFunction(Handle<Object> object,
- Handle<Object> key);
+ MUST_USE_RESULT MaybeHandle<Object> ApplyToJsonFunction(
+ Handle<Object> object,
+ Handle<Object> key);
Result SerializeGeneric(Handle<Object> object,
Handle<Object> key,
@@ -88,9 +69,9 @@ class BasicJsonStringifier BASE_EMBEDDED {
bool deferred_key);
template <typename ResultType, typename Char>
- INLINE(static MaybeObject* StringifyString_(Isolate* isolate,
- Vector<Char> vector,
- Handle<String> result));
+ INLINE(static Handle<String> StringifyString_(Isolate* isolate,
+ Vector<Char> vector,
+ Handle<String> result));
// Entry point to serialize the object.
INLINE(Result SerializeObject(Handle<Object> obj)) {
@@ -138,7 +119,7 @@ class BasicJsonStringifier BASE_EMBEDDED {
INLINE(Result SerializeJSArray(Handle<JSArray> object));
INLINE(Result SerializeJSObject(Handle<JSObject> object));
- Result SerializeJSArraySlow(Handle<JSArray> object, int length);
+ Result SerializeJSArraySlow(Handle<JSArray> object, uint32_t length);
void SerializeString(Handle<String> object);
@@ -178,6 +159,7 @@ class BasicJsonStringifier BASE_EMBEDDED {
int current_index_;
int part_length_;
bool is_ascii_;
+ bool overflowed_;
static const int kJsonEscapeTableEntrySize = 8;
static const char* const JsonEscapeTable;
@@ -254,37 +236,39 @@ const char* const BasicJsonStringifier::JsonEscapeTable =
BasicJsonStringifier::BasicJsonStringifier(Isolate* isolate)
- : isolate_(isolate), current_index_(0), is_ascii_(true) {
+ : isolate_(isolate),
+ current_index_(0),
+ is_ascii_(true),
+ overflowed_(false) {
factory_ = isolate_->factory();
accumulator_store_ = Handle<JSValue>::cast(
- factory_->ToObject(factory_->empty_string()));
+ Object::ToObject(isolate, factory_->empty_string()).ToHandleChecked());
part_length_ = kInitialPartLength;
- current_part_ = factory_->NewRawOneByteString(part_length_);
+ current_part_ = factory_->NewRawOneByteString(part_length_).ToHandleChecked();
tojson_string_ = factory_->toJSON_string();
stack_ = factory_->NewJSArray(8);
}
-MaybeObject* BasicJsonStringifier::Stringify(Handle<Object> object) {
- switch (SerializeObject(object)) {
- case UNCHANGED:
- return isolate_->heap()->undefined_value();
- case SUCCESS:
- ShrinkCurrentPart();
- return *factory_->NewConsString(accumulator(), current_part_);
- case CIRCULAR:
- return isolate_->Throw(*factory_->NewTypeError(
- "circular_structure", HandleVector<Object>(NULL, 0)));
- case STACK_OVERFLOW:
- return isolate_->StackOverflow();
- default:
- return Failure::Exception();
+MaybeHandle<Object> BasicJsonStringifier::Stringify(Handle<Object> object) {
+ Result result = SerializeObject(object);
+ if (result == UNCHANGED) return isolate_->factory()->undefined_value();
+ if (result == SUCCESS) {
+ ShrinkCurrentPart();
+ Accumulate();
+ if (overflowed_) {
+ return isolate_->Throw<Object>(
+ isolate_->factory()->NewInvalidStringLengthError());
+ }
+ return accumulator();
}
+ ASSERT(result == EXCEPTION);
+ return MaybeHandle<Object>();
}
-MaybeObject* BasicJsonStringifier::StringifyString(Isolate* isolate,
- Handle<String> object) {
+MaybeHandle<Object> BasicJsonStringifier::StringifyString(
+ Isolate* isolate, Handle<String> object) {
static const int kJsonQuoteWorstCaseBlowup = 6;
static const int kSpaceForQuotes = 2;
int worst_case_length =
@@ -295,19 +279,19 @@ MaybeObject* BasicJsonStringifier::StringifyString(Isolate* isolate,
return stringifier.Stringify(object);
}
- FlattenString(object);
+ object = String::Flatten(object);
ASSERT(object->IsFlat());
if (object->IsOneByteRepresentationUnderneath()) {
- Handle<String> result =
- isolate->factory()->NewRawOneByteString(worst_case_length);
+ Handle<String> result = isolate->factory()->NewRawOneByteString(
+ worst_case_length).ToHandleChecked();
DisallowHeapAllocation no_gc;
return StringifyString_<SeqOneByteString>(
isolate,
object->GetFlatContent().ToOneByteVector(),
result);
} else {
- Handle<String> result =
- isolate->factory()->NewRawTwoByteString(worst_case_length);
+ Handle<String> result = isolate->factory()->NewRawTwoByteString(
+ worst_case_length).ToHandleChecked();
DisallowHeapAllocation no_gc;
return StringifyString_<SeqTwoByteString>(
isolate,
@@ -318,9 +302,9 @@ MaybeObject* BasicJsonStringifier::StringifyString(Isolate* isolate,
template <typename ResultType, typename Char>
-MaybeObject* BasicJsonStringifier::StringifyString_(Isolate* isolate,
- Vector<Char> vector,
- Handle<String> result) {
+Handle<String> BasicJsonStringifier::StringifyString_(Isolate* isolate,
+ Vector<Char> vector,
+ Handle<String> result) {
DisallowHeapAllocation no_gc;
int final_size = 0;
ResultType* dest = ResultType::cast(*result);
@@ -329,7 +313,7 @@ MaybeObject* BasicJsonStringifier::StringifyString_(Isolate* isolate,
dest->GetChars() + 1,
vector.length());
dest->Set(final_size++, '\"');
- return *SeqString::Truncate(Handle<SeqString>::cast(result), final_size);
+ return SeqString::Truncate(Handle<SeqString>::cast(result), final_size);
}
@@ -352,25 +336,21 @@ void BasicJsonStringifier::Append_(const Char* chars) {
}
-Handle<Object> BasicJsonStringifier::ApplyToJsonFunction(
+MaybeHandle<Object> BasicJsonStringifier::ApplyToJsonFunction(
Handle<Object> object, Handle<Object> key) {
- LookupResult lookup(isolate_);
- JSObject::cast(*object)->LookupRealNamedProperty(*tojson_string_, &lookup);
- if (!lookup.IsProperty()) return object;
- PropertyAttributes attr;
- Handle<Object> fun =
- Object::GetProperty(object, object, &lookup, tojson_string_, &attr);
- if (fun.is_null()) return Handle<Object>::null();
+ LookupIterator it(object, tojson_string_, LookupIterator::SKIP_INTERCEPTOR);
+ Handle<Object> fun;
+ ASSIGN_RETURN_ON_EXCEPTION(isolate_, fun, Object::GetProperty(&it), Object);
if (!fun->IsJSFunction()) return object;
// Call toJSON function.
if (key->IsSmi()) key = factory_->NumberToString(key);
Handle<Object> argv[] = { key };
- bool has_exception = false;
HandleScope scope(isolate_);
- object = Execution::Call(isolate_, fun, object, 1, argv, &has_exception);
- // Return empty handle to signal an exception.
- if (has_exception) return Handle<Object>::null();
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate_, object,
+ Execution::Call(isolate_, fun, object, 1, argv),
+ Object);
return scope.CloseAndEscape(object);
}
@@ -378,16 +358,25 @@ Handle<Object> BasicJsonStringifier::ApplyToJsonFunction(
BasicJsonStringifier::Result BasicJsonStringifier::StackPush(
Handle<Object> object) {
StackLimitCheck check(isolate_);
- if (check.HasOverflowed()) return STACK_OVERFLOW;
+ if (check.HasOverflowed()) {
+ isolate_->StackOverflow();
+ return EXCEPTION;
+ }
int length = Smi::cast(stack_->length())->value();
- FixedArray* elements = FixedArray::cast(stack_->elements());
- for (int i = 0; i < length; i++) {
- if (elements->get(i) == *object) {
- return CIRCULAR;
+ {
+ DisallowHeapAllocation no_allocation;
+ FixedArray* elements = FixedArray::cast(stack_->elements());
+ for (int i = 0; i < length; i++) {
+ if (elements->get(i) == *object) {
+ AllowHeapAllocation allow_to_return_error;
+ isolate_->Throw(*factory_->NewTypeError(
+ "circular_structure", HandleVector<Object>(NULL, 0)));
+ return EXCEPTION;
+ }
}
}
- stack_->EnsureSize(length + 1);
+ JSArray::EnsureSize(stack_, length + 1);
FixedArray::cast(stack_->elements())->set(length, *object);
stack_->set_length(Smi::FromInt(length + 1));
return SUCCESS;
@@ -404,8 +393,10 @@ template <bool deferred_string_key>
BasicJsonStringifier::Result BasicJsonStringifier::Serialize_(
Handle<Object> object, bool comma, Handle<Object> key) {
if (object->IsJSObject()) {
- object = ApplyToJsonFunction(object, key);
- if (object.is_null()) return EXCEPTION;
+ ASSIGN_RETURN_ON_EXCEPTION_VALUE(
+ isolate_, object,
+ ApplyToJsonFunction(object, key),
+ EXCEPTION);
}
if (object->IsSmi()) {
@@ -464,15 +455,16 @@ BasicJsonStringifier::Result BasicJsonStringifier::SerializeGeneric(
Handle<Object> key,
bool deferred_comma,
bool deferred_key) {
- Handle<JSObject> builtins(isolate_->native_context()->builtins());
- Handle<JSFunction> builtin =
- Handle<JSFunction>::cast(GetProperty(builtins, "JSONSerializeAdapter"));
+ Handle<JSObject> builtins(isolate_->native_context()->builtins(), isolate_);
+ Handle<JSFunction> builtin = Handle<JSFunction>::cast(Object::GetProperty(
+ isolate_, builtins, "JSONSerializeAdapter").ToHandleChecked());
Handle<Object> argv[] = { key, object };
- bool has_exception = false;
- Handle<Object> result =
- Execution::Call(isolate_, builtin, object, 2, argv, &has_exception);
- if (has_exception) return EXCEPTION;
+ Handle<Object> result;
+ ASSIGN_RETURN_ON_EXCEPTION_VALUE(
+ isolate_, result,
+ Execution::Call(isolate_, builtin, object, 2, argv),
+ EXCEPTION);
if (result->IsUndefined()) return UNCHANGED;
if (deferred_key) {
if (key->IsSmi()) key = factory_->NumberToString(key);
@@ -486,24 +478,28 @@ BasicJsonStringifier::Result BasicJsonStringifier::SerializeGeneric(
part_length_ = kInitialPartLength; // Allocate conservatively.
Extend(); // Attach current part and allocate new part.
// Attach result string to the accumulator.
- set_accumulator(factory_->NewConsString(accumulator(), result_string));
+ Handle<String> cons;
+ ASSIGN_RETURN_ON_EXCEPTION_VALUE(
+ isolate_, cons,
+ factory_->NewConsString(accumulator(), result_string),
+ EXCEPTION);
+ set_accumulator(cons);
return SUCCESS;
}
BasicJsonStringifier::Result BasicJsonStringifier::SerializeJSValue(
Handle<JSValue> object) {
- bool has_exception = false;
String* class_name = object->class_name();
if (class_name == isolate_->heap()->String_string()) {
- Handle<Object> value =
- Execution::ToString(isolate_, object, &has_exception);
- if (has_exception) return EXCEPTION;
+ Handle<Object> value;
+ ASSIGN_RETURN_ON_EXCEPTION_VALUE(
+ isolate_, value, Execution::ToString(isolate_, object), EXCEPTION);
SerializeString(Handle<String>::cast(value));
} else if (class_name == isolate_->heap()->Number_string()) {
- Handle<Object> value =
- Execution::ToNumber(isolate_, object, &has_exception);
- if (has_exception) return EXCEPTION;
+ Handle<Object> value;
+ ASSIGN_RETURN_ON_EXCEPTION_VALUE(
+ isolate_, value, Execution::ToNumber(isolate_, object), EXCEPTION);
if (value->IsSmi()) return SerializeSmi(Smi::cast(*value));
SerializeHeapNumber(Handle<HeapNumber>::cast(value));
} else {
@@ -544,22 +540,25 @@ BasicJsonStringifier::Result BasicJsonStringifier::SerializeJSArray(
HandleScope handle_scope(isolate_);
Result stack_push = StackPush(object);
if (stack_push != SUCCESS) return stack_push;
- int length = Smi::cast(object->length())->value();
+ uint32_t length = 0;
+ CHECK(object->length()->ToArrayIndex(&length));
Append('[');
switch (object->GetElementsKind()) {
case FAST_SMI_ELEMENTS: {
Handle<FixedArray> elements(
FixedArray::cast(object->elements()), isolate_);
- for (int i = 0; i < length; i++) {
+ for (uint32_t i = 0; i < length; i++) {
if (i > 0) Append(',');
SerializeSmi(Smi::cast(elements->get(i)));
}
break;
}
case FAST_DOUBLE_ELEMENTS: {
+ // Empty array is FixedArray but not FixedDoubleArray.
+ if (length == 0) break;
Handle<FixedDoubleArray> elements(
FixedDoubleArray::cast(object->elements()), isolate_);
- for (int i = 0; i < length; i++) {
+ for (uint32_t i = 0; i < length; i++) {
if (i > 0) Append(',');
SerializeDouble(elements->get_scalar(i));
}
@@ -568,7 +567,7 @@ BasicJsonStringifier::Result BasicJsonStringifier::SerializeJSArray(
case FAST_ELEMENTS: {
Handle<FixedArray> elements(
FixedArray::cast(object->elements()), isolate_);
- for (int i = 0; i < length; i++) {
+ for (uint32_t i = 0; i < length; i++) {
if (i > 0) Append(',');
Result result =
SerializeElement(isolate_,
@@ -600,11 +599,14 @@ BasicJsonStringifier::Result BasicJsonStringifier::SerializeJSArray(
BasicJsonStringifier::Result BasicJsonStringifier::SerializeJSArraySlow(
- Handle<JSArray> object, int length) {
- for (int i = 0; i < length; i++) {
+ Handle<JSArray> object, uint32_t length) {
+ for (uint32_t i = 0; i < length; i++) {
if (i > 0) Append(',');
- Handle<Object> element = Object::GetElement(isolate_, object, i);
- RETURN_IF_EMPTY_HANDLE_VALUE(isolate_, element, EXCEPTION);
+ Handle<Object> element;
+ ASSIGN_RETURN_ON_EXCEPTION_VALUE(
+ isolate_, element,
+ Object::GetElement(isolate_, object, i),
+ EXCEPTION);
if (element->IsUndefined()) {
AppendAscii("null");
} else {
@@ -649,48 +651,51 @@ BasicJsonStringifier::Result BasicJsonStringifier::SerializeJSObject(
if (details.IsDontEnum()) continue;
Handle<Object> property;
if (details.type() == FIELD && *map == object->map()) {
- property = Handle<Object>(
- object->RawFastPropertyAt(
- map->instance_descriptors()->GetFieldIndex(i)),
- isolate_);
+ property = Handle<Object>(object->RawFastPropertyAt(
+ FieldIndex::ForDescriptor(*map, i)), isolate_);
} else {
- property = GetProperty(isolate_, object, key);
- if (property.is_null()) return EXCEPTION;
+ ASSIGN_RETURN_ON_EXCEPTION_VALUE(
+ isolate_, property,
+ Object::GetPropertyOrElement(object, key),
+ EXCEPTION);
}
Result result = SerializeProperty(property, comma, key);
if (!comma && result == SUCCESS) comma = true;
- if (result >= EXCEPTION) return result;
+ if (result == EXCEPTION) return result;
}
} else {
- bool has_exception = false;
- Handle<FixedArray> contents =
- GetKeysInFixedArrayFor(object, LOCAL_ONLY, &has_exception);
- if (has_exception) return EXCEPTION;
+ Handle<FixedArray> contents;
+ ASSIGN_RETURN_ON_EXCEPTION_VALUE(
+ isolate_, contents,
+ JSReceiver::GetKeys(object, JSReceiver::OWN_ONLY),
+ EXCEPTION);
for (int i = 0; i < contents->length(); i++) {
Object* key = contents->get(i);
Handle<String> key_handle;
- Handle<Object> property;
+ MaybeHandle<Object> maybe_property;
if (key->IsString()) {
key_handle = Handle<String>(String::cast(key), isolate_);
- property = GetProperty(isolate_, object, key_handle);
+ maybe_property = Object::GetPropertyOrElement(object, key_handle);
} else {
ASSERT(key->IsNumber());
key_handle = factory_->NumberToString(Handle<Object>(key, isolate_));
uint32_t index;
if (key->IsSmi()) {
- property = Object::GetElement(
+ maybe_property = Object::GetElement(
isolate_, object, Smi::cast(key)->value());
} else if (key_handle->AsArrayIndex(&index)) {
- property = Object::GetElement(isolate_, object, index);
+ maybe_property = Object::GetElement(isolate_, object, index);
} else {
- property = GetProperty(isolate_, object, key_handle);
+ maybe_property = Object::GetPropertyOrElement(object, key_handle);
}
}
- if (property.is_null()) return EXCEPTION;
+ Handle<Object> property;
+ ASSIGN_RETURN_ON_EXCEPTION_VALUE(
+ isolate_, property, maybe_property, EXCEPTION);
Result result = SerializeProperty(property, comma, key_handle);
if (!comma && result == SUCCESS) comma = true;
- if (result >= EXCEPTION) return result;
+ if (result == EXCEPTION) return result;
}
}
@@ -708,24 +713,41 @@ void BasicJsonStringifier::ShrinkCurrentPart() {
}
+void BasicJsonStringifier::Accumulate() {
+ if (accumulator()->length() + current_part_->length() > String::kMaxLength) {
+ // Screw it. Simply set the flag and carry on. Throw exception at the end.
+ set_accumulator(factory_->empty_string());
+ overflowed_ = true;
+ } else {
+ set_accumulator(factory_->NewConsString(accumulator(),
+ current_part_).ToHandleChecked());
+ }
+}
+
+
void BasicJsonStringifier::Extend() {
- set_accumulator(factory_->NewConsString(accumulator(), current_part_));
+ Accumulate();
if (part_length_ <= kMaxPartLength / kPartLengthGrowthFactor) {
part_length_ *= kPartLengthGrowthFactor;
}
if (is_ascii_) {
- current_part_ = factory_->NewRawOneByteString(part_length_);
+ current_part_ =
+ factory_->NewRawOneByteString(part_length_).ToHandleChecked();
} else {
- current_part_ = factory_->NewRawTwoByteString(part_length_);
+ current_part_ =
+ factory_->NewRawTwoByteString(part_length_).ToHandleChecked();
}
+ ASSERT(!current_part_.is_null());
current_index_ = 0;
}
void BasicJsonStringifier::ChangeEncoding() {
ShrinkCurrentPart();
- set_accumulator(factory_->NewConsString(accumulator(), current_part_));
- current_part_ = factory_->NewRawTwoByteString(part_length_);
+ Accumulate();
+ current_part_ =
+ factory_->NewRawTwoByteString(part_length_).ToHandleChecked();
+ ASSERT(!current_part_.is_null());
current_index_ = 0;
is_ascii_ = false;
}
@@ -834,7 +856,7 @@ Vector<const uc16> BasicJsonStringifier::GetCharVector(Handle<String> string) {
void BasicJsonStringifier::SerializeString(Handle<String> object) {
- object = FlattenGetString(object);
+ object = String::Flatten(object);
if (is_ascii_) {
if (object->IsOneByteRepresentationUnderneath()) {
SerializeString_<true, uint8_t>(object);
diff --git a/chromium/v8/src/json.js b/chromium/v8/src/json.js
index c21e6351d45..f767f4a195e 100644
--- a/chromium/v8/src/json.js
+++ b/chromium/v8/src/json.js
@@ -1,29 +1,8 @@
// Copyright 2009 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+"use strict";
// This file relies on the fact that the following declarations have been made
// in runtime.js:
@@ -210,6 +189,28 @@ function JSONStringify(value, replacer, space) {
} else {
gap = "";
}
+ if (IS_ARRAY(replacer)) {
+ // Deduplicate replacer array items.
+ var property_list = new InternalArray();
+ var seen_properties = { __proto__: null };
+ var seen_sentinel = {};
+ var length = replacer.length;
+ for (var i = 0; i < length; i++) {
+ var item = replacer[i];
+ if (IS_STRING_WRAPPER(item)) {
+ item = ToString(item);
+ } else {
+ if (IS_NUMBER_WRAPPER(item)) item = ToNumber(item);
+ if (IS_NUMBER(item)) item = %_NumberToString(item);
+ }
+ if (IS_STRING(item) && seen_properties[item] != seen_sentinel) {
+ property_list.push(item);
+ // We cannot use true here because __proto__ needs to be an object.
+ seen_properties[item] = seen_sentinel;
+ }
+ }
+ replacer = property_list;
+ }
return JSONSerialize('', {'': value}, replacer, new InternalArray(), "", gap);
}
diff --git a/chromium/v8/src/jsregexp-inl.h b/chromium/v8/src/jsregexp-inl.h
index 3ef07d8c540..86fe1d6db1c 100644
--- a/chromium/v8/src/jsregexp-inl.h
+++ b/chromium/v8/src/jsregexp-inl.h
@@ -1,39 +1,16 @@
// Copyright 2013 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
#ifndef V8_JSREGEXP_INL_H_
#define V8_JSREGEXP_INL_H_
-#include "allocation.h"
-#include "handles.h"
-#include "heap.h"
-#include "jsregexp.h"
-#include "objects.h"
+#include "src/allocation.h"
+#include "src/handles.h"
+#include "src/heap.h"
+#include "src/jsregexp.h"
+#include "src/objects.h"
namespace v8 {
namespace internal {
diff --git a/chromium/v8/src/jsregexp.cc b/chromium/v8/src/jsregexp.cc
index 1f3f2a172ab..8f378a6064b 100644
--- a/chromium/v8/src/jsregexp.cc
+++ b/chromium/v8/src/jsregexp.cc
@@ -1,77 +1,57 @@
// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#include "ast.h"
-#include "compiler.h"
-#include "execution.h"
-#include "factory.h"
-#include "jsregexp.h"
-#include "jsregexp-inl.h"
-#include "platform.h"
-#include "string-search.h"
-#include "runtime.h"
-#include "compilation-cache.h"
-#include "string-stream.h"
-#include "parser.h"
-#include "regexp-macro-assembler.h"
-#include "regexp-macro-assembler-tracer.h"
-#include "regexp-macro-assembler-irregexp.h"
-#include "regexp-stack.h"
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#include "src/ast.h"
+#include "src/compiler.h"
+#include "src/execution.h"
+#include "src/factory.h"
+#include "src/jsregexp.h"
+#include "src/jsregexp-inl.h"
+#include "src/platform.h"
+#include "src/string-search.h"
+#include "src/runtime.h"
+#include "src/compilation-cache.h"
+#include "src/string-stream.h"
+#include "src/parser.h"
+#include "src/regexp-macro-assembler.h"
+#include "src/regexp-macro-assembler-tracer.h"
+#include "src/regexp-macro-assembler-irregexp.h"
+#include "src/regexp-stack.h"
#ifndef V8_INTERPRETED_REGEXP
#if V8_TARGET_ARCH_IA32
-#include "ia32/regexp-macro-assembler-ia32.h"
+#include "src/ia32/regexp-macro-assembler-ia32.h"
#elif V8_TARGET_ARCH_X64
-#include "x64/regexp-macro-assembler-x64.h"
+#include "src/x64/regexp-macro-assembler-x64.h"
+#elif V8_TARGET_ARCH_ARM64
+#include "src/arm64/regexp-macro-assembler-arm64.h"
#elif V8_TARGET_ARCH_ARM
-#include "arm/regexp-macro-assembler-arm.h"
+#include "src/arm/regexp-macro-assembler-arm.h"
#elif V8_TARGET_ARCH_MIPS
-#include "mips/regexp-macro-assembler-mips.h"
+#include "src/mips/regexp-macro-assembler-mips.h"
+#elif V8_TARGET_ARCH_X87
+#include "src/x87/regexp-macro-assembler-x87.h"
#else
#error Unsupported target architecture.
#endif
#endif
-#include "interpreter-irregexp.h"
+#include "src/interpreter-irregexp.h"
namespace v8 {
namespace internal {
-Handle<Object> RegExpImpl::CreateRegExpLiteral(Handle<JSFunction> constructor,
- Handle<String> pattern,
- Handle<String> flags,
- bool* has_pending_exception) {
+MaybeHandle<Object> RegExpImpl::CreateRegExpLiteral(
+ Handle<JSFunction> constructor,
+ Handle<String> pattern,
+ Handle<String> flags) {
// Call the construct code with 2 arguments.
Handle<Object> argv[] = { pattern, flags };
- return Execution::New(constructor, ARRAY_SIZE(argv), argv,
- has_pending_exception);
+ return Execution::New(constructor, ARRAY_SIZE(argv), argv);
}
@@ -94,10 +74,12 @@ static JSRegExp::Flags RegExpFlagsFromString(Handle<String> str) {
}
-static inline void ThrowRegExpException(Handle<JSRegExp> re,
- Handle<String> pattern,
- Handle<String> error_text,
- const char* message) {
+MUST_USE_RESULT
+static inline MaybeHandle<Object> ThrowRegExpException(
+ Handle<JSRegExp> re,
+ Handle<String> pattern,
+ Handle<String> error_text,
+ const char* message) {
Isolate* isolate = re->GetIsolate();
Factory* factory = isolate->factory();
Handle<FixedArray> elements = factory->NewFixedArray(2);
@@ -105,7 +87,7 @@ static inline void ThrowRegExpException(Handle<JSRegExp> re,
elements->set(1, *error_text);
Handle<JSArray> array = factory->NewJSArrayWithElements(elements);
Handle<Object> regexp_err = factory->NewSyntaxError(message, array);
- isolate->Throw(*regexp_err);
+ return isolate->Throw<Object>(regexp_err);
}
@@ -166,15 +148,17 @@ static bool HasFewDifferentCharacters(Handle<String> pattern) {
// Generic RegExp methods. Dispatches to implementation specific methods.
-Handle<Object> RegExpImpl::Compile(Handle<JSRegExp> re,
- Handle<String> pattern,
- Handle<String> flag_str) {
+MaybeHandle<Object> RegExpImpl::Compile(Handle<JSRegExp> re,
+ Handle<String> pattern,
+ Handle<String> flag_str) {
Isolate* isolate = re->GetIsolate();
Zone zone(isolate);
JSRegExp::Flags flags = RegExpFlagsFromString(flag_str);
CompilationCache* compilation_cache = isolate->compilation_cache();
- Handle<FixedArray> cached = compilation_cache->LookupRegExp(pattern, flags);
- bool in_cache = !cached.is_null();
+ MaybeHandle<FixedArray> maybe_cached =
+ compilation_cache->LookupRegExp(pattern, flags);
+ Handle<FixedArray> cached;
+ bool in_cache = maybe_cached.ToHandle(&cached);
LOG(isolate, RegExpCompileEvent(re, in_cache));
Handle<Object> result;
@@ -182,18 +166,17 @@ Handle<Object> RegExpImpl::Compile(Handle<JSRegExp> re,
re->set_data(*cached);
return re;
}
- pattern = FlattenGetString(pattern);
+ pattern = String::Flatten(pattern);
PostponeInterruptsScope postpone(isolate);
RegExpCompileData parse_result;
FlatStringReader reader(isolate, pattern);
if (!RegExpParser::ParseRegExp(&reader, flags.is_multiline(),
&parse_result, &zone)) {
// Throw an exception if we fail to parse the pattern.
- ThrowRegExpException(re,
- pattern,
- parse_result.error,
- "malformed_regexp");
- return Handle<Object>::null();
+ return ThrowRegExpException(re,
+ pattern,
+ parse_result.error,
+ "malformed_regexp");
}
bool has_been_compiled = false;
@@ -209,8 +192,11 @@ Handle<Object> RegExpImpl::Compile(Handle<JSRegExp> re,
parse_result.capture_count == 0) {
RegExpAtom* atom = parse_result.tree->AsAtom();
Vector<const uc16> atom_pattern = atom->data();
- Handle<String> atom_string =
- isolate->factory()->NewStringFromTwoByte(atom_pattern);
+ Handle<String> atom_string;
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, atom_string,
+ isolate->factory()->NewStringFromTwoByte(atom_pattern),
+ Object);
if (!HasFewDifferentCharacters(atom_string)) {
AtomCompile(re, pattern, flags, atom_string);
has_been_compiled = true;
@@ -229,23 +215,19 @@ Handle<Object> RegExpImpl::Compile(Handle<JSRegExp> re,
}
-Handle<Object> RegExpImpl::Exec(Handle<JSRegExp> regexp,
- Handle<String> subject,
- int index,
- Handle<JSArray> last_match_info) {
+MaybeHandle<Object> RegExpImpl::Exec(Handle<JSRegExp> regexp,
+ Handle<String> subject,
+ int index,
+ Handle<JSArray> last_match_info) {
switch (regexp->TypeTag()) {
case JSRegExp::ATOM:
return AtomExec(regexp, subject, index, last_match_info);
case JSRegExp::IRREGEXP: {
- Handle<Object> result =
- IrregexpExec(regexp, subject, index, last_match_info);
- ASSERT(!result.is_null() ||
- regexp->GetIsolate()->has_pending_exception());
- return result;
+ return IrregexpExec(regexp, subject, index, last_match_info);
}
default:
UNREACHABLE();
- return Handle<Object>::null();
+ return MaybeHandle<Object>();
}
}
@@ -288,7 +270,7 @@ int RegExpImpl::AtomExecRaw(Handle<JSRegExp> regexp,
ASSERT(0 <= index);
ASSERT(index <= subject->length());
- if (!subject->IsFlat()) FlattenString(subject);
+ subject = String::Flatten(subject);
DisallowHeapAllocation no_gc; // ensure vectors stay valid
String* needle = String::cast(regexp->DataAt(JSRegExp::kAtomPatternIndex));
@@ -437,7 +419,7 @@ bool RegExpImpl::CompileIrregexp(Handle<JSRegExp> re,
JSRegExp::Flags flags = re->GetFlags();
Handle<String> pattern(re->Pattern());
- if (!pattern->IsFlat()) FlattenString(pattern);
+ pattern = String::Flatten(pattern);
RegExpCompileData compile_data;
FlatStringReader reader(isolate, pattern);
if (!RegExpParser::ParseRegExp(&reader, flags.is_multiline(),
@@ -445,10 +427,10 @@ bool RegExpImpl::CompileIrregexp(Handle<JSRegExp> re,
&zone)) {
// Throw an exception if we fail to parse the pattern.
// THIS SHOULD NOT HAPPEN. We already pre-parsed it successfully once.
- ThrowRegExpException(re,
- pattern,
- compile_data.error,
- "malformed_regexp");
+ USE(ThrowRegExpException(re,
+ pattern,
+ compile_data.error,
+ "malformed_regexp"));
return false;
}
RegExpEngine::CompilationResult result =
@@ -462,8 +444,8 @@ bool RegExpImpl::CompileIrregexp(Handle<JSRegExp> re,
&zone);
if (result.error_message != NULL) {
// Unable to compile regexp.
- Handle<String> error_message =
- isolate->factory()->NewStringFromUtf8(CStrVector(result.error_message));
+ Handle<String> error_message = isolate->factory()->NewStringFromUtf8(
+ CStrVector(result.error_message)).ToHandleChecked();
CreateRegExpErrorObjectAndThrow(re, is_ascii, error_message, isolate);
return false;
}
@@ -525,7 +507,7 @@ void RegExpImpl::IrregexpInitialize(Handle<JSRegExp> re,
int RegExpImpl::IrregexpPrepare(Handle<JSRegExp> regexp,
Handle<String> subject) {
- if (!subject->IsFlat()) FlattenString(subject);
+ subject = String::Flatten(subject);
// Check the asciiness of the underlying storage.
bool is_ascii = subject->IsOneByteRepresentationUnderneath();
@@ -622,8 +604,7 @@ int RegExpImpl::IrregexpExecRaw(Handle<JSRegExp> regexp,
index);
if (result == RE_SUCCESS) {
// Copy capture results to the start of the registers array.
- OS::MemCopy(
- output, raw_output, number_of_capture_registers * sizeof(int32_t));
+ MemCopy(output, raw_output, number_of_capture_registers * sizeof(int32_t));
}
if (result == RE_EXCEPTION) {
ASSERT(!isolate->has_pending_exception());
@@ -634,10 +615,10 @@ int RegExpImpl::IrregexpExecRaw(Handle<JSRegExp> regexp,
}
-Handle<Object> RegExpImpl::IrregexpExec(Handle<JSRegExp> regexp,
- Handle<String> subject,
- int previous_index,
- Handle<JSArray> last_match_info) {
+MaybeHandle<Object> RegExpImpl::IrregexpExec(Handle<JSRegExp> regexp,
+ Handle<String> subject,
+ int previous_index,
+ Handle<JSArray> last_match_info) {
Isolate* isolate = regexp->GetIsolate();
ASSERT_EQ(regexp->TypeTag(), JSRegExp::IRREGEXP);
@@ -645,15 +626,15 @@ Handle<Object> RegExpImpl::IrregexpExec(Handle<JSRegExp> regexp,
#if defined(V8_INTERPRETED_REGEXP) && defined(DEBUG)
if (FLAG_trace_regexp_bytecodes) {
String* pattern = regexp->Pattern();
- PrintF("\n\nRegexp match: /%s/\n\n", *(pattern->ToCString()));
- PrintF("\n\nSubject string: '%s'\n\n", *(subject->ToCString()));
+ PrintF("\n\nRegexp match: /%s/\n\n", pattern->ToCString().get());
+ PrintF("\n\nSubject string: '%s'\n\n", subject->ToCString().get());
}
#endif
int required_registers = RegExpImpl::IrregexpPrepare(regexp, subject);
if (required_registers < 0) {
// Compiling failed with an exception.
ASSERT(isolate->has_pending_exception());
- return Handle<Object>::null();
+ return MaybeHandle<Object>();
}
int32_t* output_registers = NULL;
@@ -675,7 +656,7 @@ Handle<Object> RegExpImpl::IrregexpExec(Handle<JSRegExp> regexp,
}
if (res == RE_EXCEPTION) {
ASSERT(isolate->has_pending_exception());
- return Handle<Object>::null();
+ return MaybeHandle<Object>();
}
ASSERT(res == RE_FAILURE);
return isolate->factory()->null_value();
@@ -688,7 +669,8 @@ Handle<JSArray> RegExpImpl::SetLastMatchInfo(Handle<JSArray> last_match_info,
int32_t* match) {
ASSERT(last_match_info->HasFastObjectElements());
int capture_register_count = (capture_count + 1) * 2;
- last_match_info->EnsureSize(capture_register_count + kLastMatchOverhead);
+ JSArray::EnsureSize(last_match_info,
+ capture_register_count + kLastMatchOverhead);
DisallowHeapAllocation no_allocation;
FixedArray* array = FixedArray::cast(last_match_info->elements());
if (match != NULL) {
@@ -1151,7 +1133,7 @@ RegExpEngine::CompilationResult RegExpCompiler::Assemble(
#ifdef DEBUG
if (FLAG_print_code) {
CodeTracer::Scope trace_scope(heap->isolate()->GetCodeTracer());
- Handle<Code>::cast(code)->Disassemble(*pattern->ToCString(),
+ Handle<Code>::cast(code)->Disassemble(pattern->ToCString().get(),
trace_scope.file());
}
if (FLAG_trace_regexp_assembler) {
@@ -1223,11 +1205,12 @@ int Trace::FindAffectedRegisters(OutSet* affected_registers,
void Trace::RestoreAffectedRegisters(RegExpMacroAssembler* assembler,
int max_register,
- OutSet& registers_to_pop,
- OutSet& registers_to_clear) {
+ const OutSet& registers_to_pop,
+ const OutSet& registers_to_clear) {
for (int reg = max_register; reg >= 0; reg--) {
- if (registers_to_pop.Get(reg)) assembler->PopRegister(reg);
- else if (registers_to_clear.Get(reg)) {
+ if (registers_to_pop.Get(reg)) {
+ assembler->PopRegister(reg);
+ } else if (registers_to_clear.Get(reg)) {
int clear_to = reg;
while (reg > 0 && registers_to_clear.Get(reg - 1)) {
reg--;
@@ -1240,7 +1223,7 @@ void Trace::RestoreAffectedRegisters(RegExpMacroAssembler* assembler,
void Trace::PerformDeferredActions(RegExpMacroAssembler* assembler,
int max_register,
- OutSet& affected_registers,
+ const OutSet& affected_registers,
OutSet* registers_to_pop,
OutSet* registers_to_clear,
Zone* zone) {
@@ -3597,9 +3580,12 @@ class AlternativeGenerationList {
// The '2' variant is has inclusive from and exclusive to.
-static const int kSpaceRanges[] = { '\t', '\r' + 1, ' ', ' ' + 1, 0x00A0,
- 0x00A1, 0x1680, 0x1681, 0x180E, 0x180F, 0x2000, 0x200B, 0x2028, 0x202A,
- 0x202F, 0x2030, 0x205F, 0x2060, 0x3000, 0x3001, 0xFEFF, 0xFF00, 0x10000 };
+// This covers \s as defined in ECMA-262 5.1, 15.10.2.12,
+// which include WhiteSpace (7.2) or LineTerminator (7.3) values.
+static const int kSpaceRanges[] = { '\t', '\r' + 1, ' ', ' ' + 1,
+ 0x00A0, 0x00A1, 0x1680, 0x1681, 0x180E, 0x180F, 0x2000, 0x200B,
+ 0x2028, 0x202A, 0x202F, 0x2030, 0x205F, 0x2060, 0x3000, 0x3001,
+ 0xFEFF, 0xFF00, 0x10000 };
static const int kSpaceRangeCount = ARRAY_SIZE(kSpaceRanges);
static const int kWordRanges[] = {
@@ -4374,7 +4360,7 @@ void DotPrinter::PrintNode(const char* label, RegExpNode* node) {
stream()->Add("\"];\n");
Visit(node);
stream()->Add("}\n");
- printf("%s", *(stream()->ToCString()));
+ printf("%s", stream()->ToCString().get());
}
@@ -4669,7 +4655,7 @@ void DispatchTable::Dump() {
StringStream stream(&alloc);
DispatchTableDumper dumper(&stream);
tree()->ForEach(&dumper);
- OS::PrintError("%s", *stream.ToCString());
+ OS::PrintError("%s", stream.ToCString().get());
}
@@ -5562,7 +5548,7 @@ void OutSet::Set(unsigned value, Zone *zone) {
}
-bool OutSet::Get(unsigned value) {
+bool OutSet::Get(unsigned value) const {
if (value < kFirstLimit) {
return (first_ & (1 << value)) != 0;
} else if (remaining_ == NULL) {
@@ -6008,7 +5994,7 @@ RegExpEngine::CompilationResult RegExpEngine::Compile(
// Sample some characters from the middle of the string.
static const int kSampleSize = 128;
- FlattenString(sample_subject);
+ sample_subject = String::Flatten(sample_subject);
int chars_sampled = 0;
int half_way = (sample_subject->length() - kSampleSize) / 2;
for (int i = Max(0, half_way);
@@ -6085,9 +6071,17 @@ RegExpEngine::CompilationResult RegExpEngine::Compile(
#elif V8_TARGET_ARCH_ARM
RegExpMacroAssemblerARM macro_assembler(mode, (data->capture_count + 1) * 2,
zone);
+#elif V8_TARGET_ARCH_ARM64
+ RegExpMacroAssemblerARM64 macro_assembler(mode, (data->capture_count + 1) * 2,
+ zone);
#elif V8_TARGET_ARCH_MIPS
RegExpMacroAssemblerMIPS macro_assembler(mode, (data->capture_count + 1) * 2,
zone);
+#elif V8_TARGET_ARCH_X87
+ RegExpMacroAssemblerX87 macro_assembler(mode, (data->capture_count + 1) * 2,
+ zone);
+#else
+#error "Unsupported architecture"
#endif
#else // V8_INTERPRETED_REGEXP
diff --git a/chromium/v8/src/jsregexp.h b/chromium/v8/src/jsregexp.h
index dfd415d5af8..4da8ba342c2 100644
--- a/chromium/v8/src/jsregexp.h
+++ b/chromium/v8/src/jsregexp.h
@@ -1,36 +1,13 @@
// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
#ifndef V8_JSREGEXP_H_
#define V8_JSREGEXP_H_
-#include "allocation.h"
-#include "assembler.h"
-#include "zone-inl.h"
+#include "src/allocation.h"
+#include "src/assembler.h"
+#include "src/zone-inl.h"
namespace v8 {
namespace internal {
@@ -55,10 +32,10 @@ class RegExpImpl {
// Creates a regular expression literal in the old space.
// This function calls the garbage collector if necessary.
- static Handle<Object> CreateRegExpLiteral(Handle<JSFunction> constructor,
- Handle<String> pattern,
- Handle<String> flags,
- bool* has_pending_exception);
+ MUST_USE_RESULT static MaybeHandle<Object> CreateRegExpLiteral(
+ Handle<JSFunction> constructor,
+ Handle<String> pattern,
+ Handle<String> flags);
// Returns a string representation of a regular expression.
// Implements RegExp.prototype.toString, see ECMA-262 section 15.10.6.4.
@@ -69,16 +46,18 @@ class RegExpImpl {
// generic data and choice of implementation - as well as what
// the implementation wants to store in the data field.
// Returns false if compilation fails.
- static Handle<Object> Compile(Handle<JSRegExp> re,
- Handle<String> pattern,
- Handle<String> flags);
+ MUST_USE_RESULT static MaybeHandle<Object> Compile(
+ Handle<JSRegExp> re,
+ Handle<String> pattern,
+ Handle<String> flags);
// See ECMA-262 section 15.10.6.2.
// This function calls the garbage collector if necessary.
- static Handle<Object> Exec(Handle<JSRegExp> regexp,
- Handle<String> subject,
- int index,
- Handle<JSArray> lastMatchInfo);
+ MUST_USE_RESULT static MaybeHandle<Object> Exec(
+ Handle<JSRegExp> regexp,
+ Handle<String> subject,
+ int index,
+ Handle<JSArray> lastMatchInfo);
// Prepares a JSRegExp object with Irregexp-specific data.
static void IrregexpInitialize(Handle<JSRegExp> re,
@@ -133,10 +112,11 @@ class RegExpImpl {
// On a successful match, the result is a JSArray containing
// captured positions. On a failure, the result is the null value.
// Returns an empty handle in case of an exception.
- static Handle<Object> IrregexpExec(Handle<JSRegExp> regexp,
- Handle<String> subject,
- int index,
- Handle<JSArray> lastMatchInfo);
+ MUST_USE_RESULT static MaybeHandle<Object> IrregexpExec(
+ Handle<JSRegExp> regexp,
+ Handle<String> subject,
+ int index,
+ Handle<JSArray> lastMatchInfo);
// Set last match info. If match is NULL, then setting captures is omitted.
static Handle<JSArray> SetLastMatchInfo(Handle<JSArray> last_match_info,
@@ -316,7 +296,7 @@ class OutSet: public ZoneObject {
public:
OutSet() : first_(0), remaining_(NULL), successors_(NULL) { }
OutSet* Extend(unsigned value, Zone* zone);
- bool Get(unsigned value);
+ bool Get(unsigned value) const;
static const unsigned kFirstLimit = 32;
private:
@@ -1485,14 +1465,14 @@ class Trace {
int FindAffectedRegisters(OutSet* affected_registers, Zone* zone);
void PerformDeferredActions(RegExpMacroAssembler* macro,
int max_register,
- OutSet& affected_registers,
+ const OutSet& affected_registers,
OutSet* registers_to_pop,
OutSet* registers_to_clear,
Zone* zone);
void RestoreAffectedRegisters(RegExpMacroAssembler* macro,
int max_register,
- OutSet& registers_to_pop,
- OutSet& registers_to_clear);
+ const OutSet& registers_to_pop,
+ const OutSet& registers_to_clear);
int cp_offset_;
DeferredAction* actions_;
Label* backtrack_;
diff --git a/chromium/v8/src/libplatform/DEPS b/chromium/v8/src/libplatform/DEPS
new file mode 100644
index 00000000000..bace5d31727
--- /dev/null
+++ b/chromium/v8/src/libplatform/DEPS
@@ -0,0 +1,6 @@
+include_rules = [
+ # TODO(jochen): Enable this.
+ #"-src",
+ "+src/base",
+ "+src/libplatform",
+]
diff --git a/chromium/v8/src/libplatform/default-platform.cc b/chromium/v8/src/libplatform/default-platform.cc
new file mode 100644
index 00000000000..733bcf09b83
--- /dev/null
+++ b/chromium/v8/src/libplatform/default-platform.cc
@@ -0,0 +1,70 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/libplatform/default-platform.h"
+
+#include <algorithm>
+#include <queue>
+
+// TODO(jochen): We should have our own version of checks.h.
+#include "src/checks.h"
+#include "src/platform.h"
+#include "src/libplatform/worker-thread.h"
+
+namespace v8 {
+namespace internal {
+
+
+const int DefaultPlatform::kMaxThreadPoolSize = 4;
+
+
+DefaultPlatform::DefaultPlatform()
+ : initialized_(false), thread_pool_size_(0) {}
+
+
+DefaultPlatform::~DefaultPlatform() {
+ LockGuard<Mutex> guard(&lock_);
+ queue_.Terminate();
+ if (initialized_) {
+ for (std::vector<WorkerThread*>::iterator i = thread_pool_.begin();
+ i != thread_pool_.end(); ++i) {
+ delete *i;
+ }
+ }
+}
+
+
+void DefaultPlatform::SetThreadPoolSize(int thread_pool_size) {
+ LockGuard<Mutex> guard(&lock_);
+ ASSERT(thread_pool_size >= 0);
+ if (thread_pool_size < 1)
+ thread_pool_size = OS::NumberOfProcessorsOnline();
+ thread_pool_size_ =
+ std::max(std::min(thread_pool_size, kMaxThreadPoolSize), 1);
+}
+
+
+void DefaultPlatform::EnsureInitialized() {
+ LockGuard<Mutex> guard(&lock_);
+ if (initialized_) return;
+ initialized_ = true;
+
+ for (int i = 0; i < thread_pool_size_; ++i)
+ thread_pool_.push_back(new WorkerThread(&queue_));
+}
+
+void DefaultPlatform::CallOnBackgroundThread(Task *task,
+ ExpectedRuntime expected_runtime) {
+ EnsureInitialized();
+ queue_.Append(task);
+}
+
+
+void DefaultPlatform::CallOnForegroundThread(v8::Isolate* isolate, Task* task) {
+ // TODO(jochen): implement.
+ task->Run();
+ delete task;
+}
+
+} } // namespace v8::internal
diff --git a/chromium/v8/src/libplatform/default-platform.h b/chromium/v8/src/libplatform/default-platform.h
new file mode 100644
index 00000000000..112ba4eb295
--- /dev/null
+++ b/chromium/v8/src/libplatform/default-platform.h
@@ -0,0 +1,53 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_LIBPLATFORM_DEFAULT_PLATFORM_H_
+#define V8_LIBPLATFORM_DEFAULT_PLATFORM_H_
+
+#include <vector>
+
+#include "include/v8-platform.h"
+#include "src/base/macros.h"
+#include "src/platform/mutex.h"
+#include "src/libplatform/task-queue.h"
+
+namespace v8 {
+namespace internal {
+
+class TaskQueue;
+class Thread;
+class WorkerThread;
+
+class DefaultPlatform : public Platform {
+ public:
+ DefaultPlatform();
+ virtual ~DefaultPlatform();
+
+ void SetThreadPoolSize(int thread_pool_size);
+
+ void EnsureInitialized();
+
+ // v8::Platform implementation.
+ virtual void CallOnBackgroundThread(
+ Task *task, ExpectedRuntime expected_runtime) V8_OVERRIDE;
+ virtual void CallOnForegroundThread(v8::Isolate *isolate,
+ Task *task) V8_OVERRIDE;
+
+ private:
+ static const int kMaxThreadPoolSize;
+
+ Mutex lock_;
+ bool initialized_;
+ int thread_pool_size_;
+ std::vector<WorkerThread*> thread_pool_;
+ TaskQueue queue_;
+
+ DISALLOW_COPY_AND_ASSIGN(DefaultPlatform);
+};
+
+
+} } // namespace v8::internal
+
+
+#endif // V8_LIBPLATFORM_DEFAULT_PLATFORM_H_
diff --git a/chromium/v8/src/libplatform/task-queue.cc b/chromium/v8/src/libplatform/task-queue.cc
new file mode 100644
index 00000000000..e618cb70b00
--- /dev/null
+++ b/chromium/v8/src/libplatform/task-queue.cc
@@ -0,0 +1,57 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/libplatform/task-queue.h"
+
+// TODO(jochen): We should have our own version of checks.h.
+#include "src/checks.h"
+
+namespace v8 {
+namespace internal {
+
+TaskQueue::TaskQueue() : process_queue_semaphore_(0), terminated_(false) {}
+
+
+TaskQueue::~TaskQueue() {
+ LockGuard<Mutex> guard(&lock_);
+ ASSERT(terminated_);
+ ASSERT(task_queue_.empty());
+}
+
+
+void TaskQueue::Append(Task* task) {
+ LockGuard<Mutex> guard(&lock_);
+ ASSERT(!terminated_);
+ task_queue_.push(task);
+ process_queue_semaphore_.Signal();
+}
+
+
+Task* TaskQueue::GetNext() {
+ for (;;) {
+ {
+ LockGuard<Mutex> guard(&lock_);
+ if (!task_queue_.empty()) {
+ Task* result = task_queue_.front();
+ task_queue_.pop();
+ return result;
+ }
+ if (terminated_) {
+ process_queue_semaphore_.Signal();
+ return NULL;
+ }
+ }
+ process_queue_semaphore_.Wait();
+ }
+}
+
+
+void TaskQueue::Terminate() {
+ LockGuard<Mutex> guard(&lock_);
+ ASSERT(!terminated_);
+ terminated_ = true;
+ process_queue_semaphore_.Signal();
+}
+
+} } // namespace v8::internal
diff --git a/chromium/v8/src/libplatform/task-queue.h b/chromium/v8/src/libplatform/task-queue.h
new file mode 100644
index 00000000000..140366427e6
--- /dev/null
+++ b/chromium/v8/src/libplatform/task-queue.h
@@ -0,0 +1,47 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_LIBPLATFORM_TASK_QUEUE_H_
+#define V8_LIBPLATFORM_TASK_QUEUE_H_
+
+#include <queue>
+
+#include "src/base/macros.h"
+#include "src/platform/mutex.h"
+#include "src/platform/semaphore.h"
+
+namespace v8 {
+
+class Task;
+
+namespace internal {
+
+class TaskQueue {
+ public:
+ TaskQueue();
+ ~TaskQueue();
+
+ // Appends a task to the queue. The queue takes ownership of |task|.
+ void Append(Task* task);
+
+ // Returns the next task to process. Blocks if no task is available. Returns
+ // NULL if the queue is terminated.
+ Task* GetNext();
+
+ // Terminate the queue.
+ void Terminate();
+
+ private:
+ Mutex lock_;
+ Semaphore process_queue_semaphore_;
+ std::queue<Task*> task_queue_;
+ bool terminated_;
+
+ DISALLOW_COPY_AND_ASSIGN(TaskQueue);
+};
+
+} } // namespace v8::internal
+
+
+#endif // V8_LIBPLATFORM_TASK_QUEUE_H_
diff --git a/chromium/v8/src/libplatform/worker-thread.cc b/chromium/v8/src/libplatform/worker-thread.cc
new file mode 100644
index 00000000000..6b3892cbd2f
--- /dev/null
+++ b/chromium/v8/src/libplatform/worker-thread.cc
@@ -0,0 +1,31 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/libplatform/worker-thread.h"
+
+#include "include/v8-platform.h"
+#include "src/libplatform/task-queue.h"
+
+namespace v8 {
+namespace internal {
+
+WorkerThread::WorkerThread(TaskQueue* queue)
+ : Thread("V8 WorkerThread"), queue_(queue) {
+ Start();
+}
+
+
+WorkerThread::~WorkerThread() {
+ Join();
+}
+
+
+void WorkerThread::Run() {
+ while (Task* task = queue_->GetNext()) {
+ task->Run();
+ delete task;
+ }
+}
+
+} } // namespace v8::internal
diff --git a/chromium/v8/src/libplatform/worker-thread.h b/chromium/v8/src/libplatform/worker-thread.h
new file mode 100644
index 00000000000..20b9add4cdb
--- /dev/null
+++ b/chromium/v8/src/libplatform/worker-thread.h
@@ -0,0 +1,38 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_LIBPLATFORM_WORKER_THREAD_H_
+#define V8_LIBPLATFORM_WORKER_THREAD_H_
+
+#include <queue>
+
+#include "src/base/macros.h"
+#include "src/platform.h"
+
+namespace v8 {
+
+namespace internal {
+
+class TaskQueue;
+
+class WorkerThread : public Thread {
+ public:
+ explicit WorkerThread(TaskQueue* queue);
+ virtual ~WorkerThread();
+
+ // Thread implementation.
+ virtual void Run() V8_OVERRIDE;
+
+ private:
+ friend class QuitTask;
+
+ TaskQueue* queue_;
+
+ DISALLOW_COPY_AND_ASSIGN(WorkerThread);
+};
+
+} } // namespace v8::internal
+
+
+#endif // V8_LIBPLATFORM_WORKER_THREAD_H_
diff --git a/chromium/v8/src/list-inl.h b/chromium/v8/src/list-inl.h
index 143c830ee92..8a4cf567040 100644
--- a/chromium/v8/src/list-inl.h
+++ b/chromium/v8/src/list-inl.h
@@ -1,35 +1,12 @@
// Copyright 2006-2009 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
#ifndef V8_LIST_INL_H_
#define V8_LIST_INL_H_
-#include "list.h"
-#include "platform.h"
+#include "src/list.h"
+#include "src/platform.h"
namespace v8 {
namespace internal {
@@ -88,7 +65,7 @@ template<typename T, class P>
void List<T, P>::Resize(int new_capacity, P alloc) {
ASSERT_LE(length_, new_capacity);
T* new_data = NewData(new_capacity, alloc);
- OS::MemCopy(new_data, data_, length_ * sizeof(T));
+ MemCopy(new_data, data_, length_ * sizeof(T));
List<T, P>::DeleteData(data_);
data_ = new_data;
capacity_ = new_capacity;
@@ -166,6 +143,7 @@ void List<T, P>::Clear() {
template<typename T, class P>
void List<T, P>::Rewind(int pos) {
+ ASSERT(0 <= pos && pos <= length_);
length_ = pos;
}
diff --git a/chromium/v8/src/list.h b/chromium/v8/src/list.h
index ea67b8b0c6c..2244d67f9f0 100644
--- a/chromium/v8/src/list.h
+++ b/chromium/v8/src/list.h
@@ -1,38 +1,16 @@
// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
#ifndef V8_LIST_H_
#define V8_LIST_H_
-#include "utils.h"
+#include "src/utils.h"
namespace v8 {
namespace internal {
+template<typename T> class Vector;
// ----------------------------------------------------------------------------
// The list is a template for very light-weight lists. We are not
@@ -91,6 +69,10 @@ class List {
inline T& last() const { return at(length_ - 1); }
inline T& first() const { return at(0); }
+ typedef T* iterator;
+ inline iterator begin() const { return &data_[0]; }
+ inline iterator end() const { return &data_[length_]; }
+
INLINE(bool is_empty() const) { return length_ == 0; }
INLINE(int length() const) { return length_; }
INLINE(int capacity() const) { return capacity_; }
@@ -196,14 +178,23 @@ class List {
DISALLOW_COPY_AND_ASSIGN(List);
};
+
+template<typename T, class P>
+size_t GetMemoryUsedByList(const List<T, P>& list) {
+ return list.length() * sizeof(T) + sizeof(list);
+}
+
+
class Map;
-class Type;
+template<class> class TypeImpl;
+struct HeapTypeConfig;
+typedef TypeImpl<HeapTypeConfig> HeapType;
class Code;
template<typename T> class Handle;
typedef List<Map*> MapList;
typedef List<Code*> CodeList;
typedef List<Handle<Map> > MapHandleList;
-typedef List<Handle<Type> > TypeHandleList;
+typedef List<Handle<HeapType> > TypeHandleList;
typedef List<Handle<Code> > CodeHandleList;
// Perform binary search for an element in an already sorted
diff --git a/chromium/v8/src/lithium-allocator-inl.h b/chromium/v8/src/lithium-allocator-inl.h
index deee98877d6..1016ee3c685 100644
--- a/chromium/v8/src/lithium-allocator-inl.h
+++ b/chromium/v8/src/lithium-allocator-inl.h
@@ -1,43 +1,24 @@
// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
#ifndef V8_LITHIUM_ALLOCATOR_INL_H_
#define V8_LITHIUM_ALLOCATOR_INL_H_
-#include "lithium-allocator.h"
+#include "src/lithium-allocator.h"
#if V8_TARGET_ARCH_IA32
-#include "ia32/lithium-ia32.h"
+#include "src/ia32/lithium-ia32.h"
#elif V8_TARGET_ARCH_X64
-#include "x64/lithium-x64.h"
+#include "src/x64/lithium-x64.h"
+#elif V8_TARGET_ARCH_ARM64
+#include "src/arm64/lithium-arm64.h"
#elif V8_TARGET_ARCH_ARM
-#include "arm/lithium-arm.h"
+#include "src/arm/lithium-arm.h"
#elif V8_TARGET_ARCH_MIPS
-#include "mips/lithium-mips.h"
+#include "src/mips/lithium-mips.h"
+#elif V8_TARGET_ARCH_X87
+#include "src/x87/lithium-x87.h"
#else
#error "Unknown architecture."
#endif
diff --git a/chromium/v8/src/lithium-allocator.cc b/chromium/v8/src/lithium-allocator.cc
index 29c31942e44..a36f7de5c8e 100644
--- a/chromium/v8/src/lithium-allocator.cc
+++ b/chromium/v8/src/lithium-allocator.cc
@@ -1,44 +1,25 @@
// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-#include "lithium-allocator-inl.h"
-
-#include "hydrogen.h"
-#include "string-stream.h"
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+#include "src/lithium-allocator-inl.h"
+
+#include "src/hydrogen.h"
+#include "src/string-stream.h"
#if V8_TARGET_ARCH_IA32
-#include "ia32/lithium-ia32.h"
+#include "src/ia32/lithium-ia32.h"
#elif V8_TARGET_ARCH_X64
-#include "x64/lithium-x64.h"
+#include "src/x64/lithium-x64.h"
+#elif V8_TARGET_ARCH_ARM64
+#include "src/arm64/lithium-arm64.h"
#elif V8_TARGET_ARCH_ARM
-#include "arm/lithium-arm.h"
+#include "src/arm/lithium-arm.h"
#elif V8_TARGET_ARCH_MIPS
-#include "mips/lithium-mips.h"
+#include "src/mips/lithium-mips.h"
+#elif V8_TARGET_ARCH_X87
+#include "src/x87/lithium-x87.h"
#else
#error "Unknown architecture."
#endif
@@ -67,7 +48,8 @@ UsePosition::UsePosition(LifetimePosition pos,
register_beneficial_(true) {
if (operand_ != NULL && operand_->IsUnallocated()) {
LUnallocated* unalloc = LUnallocated::cast(operand_);
- requires_reg_ = unalloc->HasRegisterPolicy();
+ requires_reg_ = unalloc->HasRegisterPolicy() ||
+ unalloc->HasDoubleRegisterPolicy();
register_beneficial_ = !unalloc->HasAnyPolicy();
}
ASSERT(pos_.IsValid());
@@ -984,7 +966,7 @@ void LAllocator::ProcessInstructions(HBasicBlock* block, BitVector* live) {
}
}
- if (instr->ClobbersDoubleRegisters()) {
+ if (instr->ClobbersDoubleRegisters(isolate())) {
for (int i = 0; i < DoubleRegister::NumAllocatableRegisters(); ++i) {
if (output == NULL || !output->IsDoubleRegister() ||
output->index() != i) {
@@ -1026,6 +1008,15 @@ void LAllocator::ProcessInstructions(HBasicBlock* block, BitVector* live) {
}
Use(block_start_position, curr_position.InstructionEnd(), temp, NULL);
Define(curr_position, temp, NULL);
+
+ if (temp->IsUnallocated()) {
+ LUnallocated* temp_unalloc = LUnallocated::cast(temp);
+ if (temp_unalloc->HasDoubleRegisterPolicy()) {
+ double_artificial_registers_.Add(
+ temp_unalloc->virtual_register() - first_artificial_register_,
+ zone());
+ }
+ }
}
}
}
@@ -1116,7 +1107,6 @@ bool LAllocator::Allocate(LChunk* chunk) {
void LAllocator::MeetRegisterConstraints() {
LAllocatorPhase phase("L_Register constraints", this);
- first_artificial_register_ = next_virtual_register_;
const ZoneList<HBasicBlock*>* blocks = graph_->blocks();
for (int i = 0; i < blocks->length(); ++i) {
HBasicBlock* block = blocks->at(i);
@@ -1369,7 +1359,7 @@ void LAllocator::BuildLiveRanges() {
ASSERT(chunk_->info()->IsOptimizing());
AllowHandleDereference allow_deref;
PrintF("Function: %s\n",
- *chunk_->info()->function()->debug_name()->ToCString());
+ chunk_->info()->function()->debug_name()->ToCString().get());
}
PrintF("Value %d used before first definition!\n", operand_index);
LiveRange* range = LiveRangeFor(operand_index);
diff --git a/chromium/v8/src/lithium-allocator.h b/chromium/v8/src/lithium-allocator.h
index 9908ea823d3..1d313a5a548 100644
--- a/chromium/v8/src/lithium-allocator.h
+++ b/chromium/v8/src/lithium-allocator.h
@@ -1,38 +1,15 @@
// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
#ifndef V8_LITHIUM_ALLOCATOR_H_
#define V8_LITHIUM_ALLOCATOR_H_
-#include "v8.h"
+#include "src/v8.h"
-#include "allocation.h"
-#include "lithium.h"
-#include "zone.h"
+#include "src/allocation.h"
+#include "src/lithium.h"
+#include "src/zone.h"
namespace v8 {
namespace internal {
@@ -47,16 +24,12 @@ class HValue;
class BitVector;
class StringStream;
-class LArgument;
class LPlatformChunk;
class LOperand;
class LUnallocated;
-class LConstantOperand;
class LGap;
class LParallelMove;
class LPointerMap;
-class LStackSlot;
-class LRegister;
// This class represents a single point of a LOperand's lifetime.
diff --git a/chromium/v8/src/lithium-codegen.cc b/chromium/v8/src/lithium-codegen.cc
index 19ebe7e516b..f49887d9d13 100644
--- a/chromium/v8/src/lithium-codegen.cc
+++ b/chromium/v8/src/lithium-codegen.cc
@@ -1,46 +1,29 @@
// Copyright 2013 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#include "lithium-codegen.h"
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#include "src/lithium-codegen.h"
#if V8_TARGET_ARCH_IA32
-#include "ia32/lithium-ia32.h"
-#include "ia32/lithium-codegen-ia32.h"
+#include "src/ia32/lithium-ia32.h"
+#include "src/ia32/lithium-codegen-ia32.h"
#elif V8_TARGET_ARCH_X64
-#include "x64/lithium-x64.h"
-#include "x64/lithium-codegen-x64.h"
+#include "src/x64/lithium-x64.h"
+#include "src/x64/lithium-codegen-x64.h"
#elif V8_TARGET_ARCH_ARM
-#include "arm/lithium-arm.h"
-#include "arm/lithium-codegen-arm.h"
+#include "src/arm/lithium-arm.h"
+#include "src/arm/lithium-codegen-arm.h"
+#elif V8_TARGET_ARCH_ARM64
+#include "src/arm64/lithium-arm64.h"
+#include "src/arm64/lithium-codegen-arm64.h"
#elif V8_TARGET_ARCH_MIPS
-#include "mips/lithium-mips.h"
-#include "mips/lithium-codegen-mips.h"
+#include "src/mips/lithium-mips.h"
+#include "src/mips/lithium-codegen-mips.h"
+#elif V8_TARGET_ARCH_X87
+#include "src/x87/lithium-x87.h"
+#include "src/x87/lithium-codegen-x87.h"
#else
#error Unsupported target architecture.
#endif
@@ -104,11 +87,9 @@ bool LCodeGenBase::GenerateBody() {
GenerateBodyInstructionPre(instr);
HValue* value = instr->hydrogen_value();
- if (value->position() != RelocInfo::kNoPosition) {
- ASSERT(!graph()->info()->IsOptimizing() ||
- !FLAG_emit_opt_code_positions ||
- value->position() != RelocInfo::kNoPosition);
- RecordAndWritePosition(value->position());
+ if (!value->position().IsUnknown()) {
+ RecordAndWritePosition(
+ chunk()->graph()->SourcePositionToScriptPosition(value->position()));
}
instr->CompileToNative(codegen);
@@ -121,6 +102,30 @@ bool LCodeGenBase::GenerateBody() {
}
+void LCodeGenBase::CheckEnvironmentUsage() {
+#ifdef DEBUG
+ bool dead_block = false;
+ for (int i = 0; i < instructions_->length(); i++) {
+ LInstruction* instr = instructions_->at(i);
+ HValue* hval = instr->hydrogen_value();
+ if (instr->IsLabel()) dead_block = LLabel::cast(instr)->HasReplacement();
+ if (dead_block || !hval->block()->IsReachable()) continue;
+
+ HInstruction* hinstr = HInstruction::cast(hval);
+ if (!hinstr->CanDeoptimize() && instr->HasEnvironment()) {
+ V8_Fatal(__FILE__, __LINE__, "CanDeoptimize is wrong for %s (%s)\n",
+ hinstr->Mnemonic(), instr->Mnemonic());
+ }
+
+ if (instr->HasEnvironment() && !instr->environment()->has_been_used()) {
+ V8_Fatal(__FILE__, __LINE__, "unused environment for %s (%s)\n",
+ hinstr->Mnemonic(), instr->Mnemonic());
+ }
+ }
+#endif
+}
+
+
void LCodeGenBase::Comment(const char* format, ...) {
if (!FLAG_code_comments) return;
char buffer[4 * KB];
@@ -134,17 +139,95 @@ void LCodeGenBase::Comment(const char* format, ...) {
// issues when the stack allocated buffer goes out of scope.
size_t length = builder.position();
Vector<char> copy = Vector<char>::New(static_cast<int>(length) + 1);
- OS::MemCopy(copy.start(), builder.Finalize(), copy.length());
+ MemCopy(copy.start(), builder.Finalize(), copy.length());
masm()->RecordComment(copy.start());
}
int LCodeGenBase::GetNextEmittedBlock() const {
for (int i = current_block_ + 1; i < graph()->blocks()->length(); ++i) {
+ if (!graph()->blocks()->at(i)->IsReachable()) continue;
if (!chunk_->GetLabel(i)->HasReplacement()) return i;
}
return -1;
}
+static void AddWeakObjectToCodeDependency(Isolate* isolate,
+ Handle<Object> object,
+ Handle<Code> code) {
+ Heap* heap = isolate->heap();
+ heap->EnsureWeakObjectToCodeTable();
+ Handle<DependentCode> dep(heap->LookupWeakObjectToCodeDependency(object));
+ dep = DependentCode::Insert(dep, DependentCode::kWeakCodeGroup, code);
+ heap->AddWeakObjectToCodeDependency(object, dep);
+}
+
+
+void LCodeGenBase::RegisterWeakObjectsInOptimizedCode(Handle<Code> code) {
+ ASSERT(code->is_optimized_code());
+ ZoneList<Handle<Map> > maps(1, zone());
+ ZoneList<Handle<JSObject> > objects(1, zone());
+ ZoneList<Handle<Cell> > cells(1, zone());
+ int mode_mask = RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT) |
+ RelocInfo::ModeMask(RelocInfo::CELL);
+ for (RelocIterator it(*code, mode_mask); !it.done(); it.next()) {
+ RelocInfo::Mode mode = it.rinfo()->rmode();
+ if (mode == RelocInfo::CELL &&
+ code->IsWeakObjectInOptimizedCode(it.rinfo()->target_cell())) {
+ Handle<Cell> cell(it.rinfo()->target_cell());
+ cells.Add(cell, zone());
+ } else if (mode == RelocInfo::EMBEDDED_OBJECT &&
+ code->IsWeakObjectInOptimizedCode(it.rinfo()->target_object())) {
+ if (it.rinfo()->target_object()->IsMap()) {
+ Handle<Map> map(Map::cast(it.rinfo()->target_object()));
+ maps.Add(map, zone());
+ } else if (it.rinfo()->target_object()->IsJSObject()) {
+ Handle<JSObject> object(JSObject::cast(it.rinfo()->target_object()));
+ objects.Add(object, zone());
+ } else if (it.rinfo()->target_object()->IsCell()) {
+ Handle<Cell> cell(Cell::cast(it.rinfo()->target_object()));
+ cells.Add(cell, zone());
+ }
+ }
+ }
+ if (FLAG_enable_ool_constant_pool) {
+ code->constant_pool()->set_weak_object_state(
+ ConstantPoolArray::WEAK_OBJECTS_IN_OPTIMIZED_CODE);
+ }
+#ifdef VERIFY_HEAP
+ // This disables verification of weak embedded objects after full GC.
+ // AddDependentCode can cause a GC, which would observe the state where
+ // this code is not yet in the depended code lists of the embedded maps.
+ NoWeakObjectVerificationScope disable_verification_of_embedded_objects;
+#endif
+ for (int i = 0; i < maps.length(); i++) {
+ Map::AddDependentCode(maps.at(i), DependentCode::kWeakCodeGroup, code);
+ }
+ for (int i = 0; i < objects.length(); i++) {
+ AddWeakObjectToCodeDependency(isolate(), objects.at(i), code);
+ }
+ for (int i = 0; i < cells.length(); i++) {
+ AddWeakObjectToCodeDependency(isolate(), cells.at(i), code);
+ }
+}
+
+
+void LCodeGenBase::Abort(BailoutReason reason) {
+ info()->set_bailout_reason(reason);
+ status_ = ABORTED;
+}
+
+
+void LCodeGenBase::AddDeprecationDependency(Handle<Map> map) {
+ if (map->is_deprecated()) return Abort(kMapBecameDeprecated);
+ chunk_->AddDeprecationDependency(map);
+}
+
+
+void LCodeGenBase::AddStabilityDependency(Handle<Map> map) {
+ if (!map->is_stable()) return Abort(kMapBecameUnstable);
+ chunk_->AddStabilityDependency(map);
+}
+
} } // namespace v8::internal
diff --git a/chromium/v8/src/lithium-codegen.h b/chromium/v8/src/lithium-codegen.h
index 9caab8127db..1eb963e6faa 100644
--- a/chromium/v8/src/lithium-codegen.h
+++ b/chromium/v8/src/lithium-codegen.h
@@ -1,36 +1,13 @@
// Copyright 2013 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
#ifndef V8_LITHIUM_CODEGEN_H_
#define V8_LITHIUM_CODEGEN_H_
-#include "v8.h"
+#include "src/v8.h"
-#include "compiler.h"
+#include "src/compiler.h"
namespace v8 {
namespace internal {
@@ -66,6 +43,13 @@ class LCodeGenBase BASE_EMBEDDED {
int GetNextEmittedBlock() const;
+ void RegisterWeakObjectsInOptimizedCode(Handle<Code> code);
+
+ // Check that an environment assigned via AssignEnvironment is actually being
+ // used. Redundant assignments keep things alive longer than necessary, and
+ // consequently lead to worse code, so it's important to minimize this.
+ void CheckEnvironmentUsage();
+
protected:
enum Status {
UNUSED,
@@ -88,6 +72,12 @@ class LCodeGenBase BASE_EMBEDDED {
bool is_generating() const { return status_ == GENERATING; }
bool is_done() const { return status_ == DONE; }
bool is_aborted() const { return status_ == ABORTED; }
+
+ void Abort(BailoutReason reason);
+
+ // Methods for code dependencies.
+ void AddDeprecationDependency(Handle<Map> map);
+ void AddStabilityDependency(Handle<Map> map);
};
diff --git a/chromium/v8/src/lithium.cc b/chromium/v8/src/lithium.cc
index 414d5f4edeb..b292b4ffd3a 100644
--- a/chromium/v8/src/lithium.cc
+++ b/chromium/v8/src/lithium.cc
@@ -1,46 +1,30 @@
// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-#include "lithium.h"
-#include "scopes.h"
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+#include "src/lithium.h"
+#include "src/scopes.h"
+#include "src/serialize.h"
#if V8_TARGET_ARCH_IA32
-#include "ia32/lithium-ia32.h"
-#include "ia32/lithium-codegen-ia32.h"
+#include "src/ia32/lithium-ia32.h"
+#include "src/ia32/lithium-codegen-ia32.h"
#elif V8_TARGET_ARCH_X64
-#include "x64/lithium-x64.h"
-#include "x64/lithium-codegen-x64.h"
+#include "src/x64/lithium-x64.h"
+#include "src/x64/lithium-codegen-x64.h"
#elif V8_TARGET_ARCH_ARM
-#include "arm/lithium-arm.h"
-#include "arm/lithium-codegen-arm.h"
+#include "src/arm/lithium-arm.h"
+#include "src/arm/lithium-codegen-arm.h"
#elif V8_TARGET_ARCH_MIPS
-#include "mips/lithium-mips.h"
-#include "mips/lithium-codegen-mips.h"
+#include "src/mips/lithium-mips.h"
+#include "src/mips/lithium-codegen-mips.h"
+#elif V8_TARGET_ARCH_ARM64
+#include "src/arm64/lithium-arm64.h"
+#include "src/arm64/lithium-codegen-arm64.h"
+#elif V8_TARGET_ARCH_X87
+#include "src/x87/lithium-x87.h"
+#include "src/x87/lithium-codegen-x87.h"
#else
#error "Unknown architecture."
#endif
@@ -82,6 +66,9 @@ void LOperand::PrintTo(StringStream* stream) {
case LUnallocated::MUST_HAVE_REGISTER:
stream->Add("(R)");
break;
+ case LUnallocated::MUST_HAVE_DOUBLE_REGISTER:
+ stream->Add("(D)");
+ break;
case LUnallocated::WRITABLE_REGISTER:
stream->Add("(WR)");
break;
@@ -108,39 +95,40 @@ void LOperand::PrintTo(StringStream* stream) {
case DOUBLE_REGISTER:
stream->Add("[%s|R]", DoubleRegister::AllocationIndexToString(index()));
break;
- case ARGUMENT:
- stream->Add("[arg:%d]", index());
- break;
}
}
-#define DEFINE_OPERAND_CACHE(name, type) \
- L##name* L##name::cache = NULL; \
- \
- void L##name::SetUpCache() { \
- if (cache) return; \
- cache = new L##name[kNumCachedOperands]; \
- for (int i = 0; i < kNumCachedOperands; i++) { \
- cache[i].ConvertTo(type, i); \
- } \
- } \
- \
- void L##name::TearDownCache() { \
- delete[] cache; \
+
+template<LOperand::Kind kOperandKind, int kNumCachedOperands>
+LSubKindOperand<kOperandKind, kNumCachedOperands>*
+LSubKindOperand<kOperandKind, kNumCachedOperands>::cache = NULL;
+
+
+template<LOperand::Kind kOperandKind, int kNumCachedOperands>
+void LSubKindOperand<kOperandKind, kNumCachedOperands>::SetUpCache() {
+ if (cache) return;
+ cache = new LSubKindOperand[kNumCachedOperands];
+ for (int i = 0; i < kNumCachedOperands; i++) {
+ cache[i].ConvertTo(kOperandKind, i);
}
+}
+
+
+template<LOperand::Kind kOperandKind, int kNumCachedOperands>
+void LSubKindOperand<kOperandKind, kNumCachedOperands>::TearDownCache() {
+ delete[] cache;
+}
-LITHIUM_OPERAND_LIST(DEFINE_OPERAND_CACHE)
-#undef DEFINE_OPERAND_CACHE
void LOperand::SetUpCaches() {
-#define LITHIUM_OPERAND_SETUP(name, type) L##name::SetUpCache();
+#define LITHIUM_OPERAND_SETUP(name, type, number) L##name::SetUpCache();
LITHIUM_OPERAND_LIST(LITHIUM_OPERAND_SETUP)
#undef LITHIUM_OPERAND_SETUP
}
void LOperand::TearDownCaches() {
-#define LITHIUM_OPERAND_TEARDOWN(name, type) L##name::TearDownCache();
+#define LITHIUM_OPERAND_TEARDOWN(name, type, number) L##name::TearDownCache();
LITHIUM_OPERAND_LIST(LITHIUM_OPERAND_TEARDOWN)
#undef LITHIUM_OPERAND_TEARDOWN
}
@@ -252,7 +240,9 @@ LChunk::LChunk(CompilationInfo* info, HGraph* graph)
graph_(graph),
instructions_(32, graph->zone()),
pointer_maps_(8, graph->zone()),
- inlined_closures_(1, graph->zone()) {
+ inlined_closures_(1, graph->zone()),
+ deprecation_dependencies_(MapLess(), MapAllocator(graph->zone())),
+ stability_dependencies_(MapLess(), MapAllocator(graph->zone())) {
}
@@ -391,6 +381,27 @@ Representation LChunk::LookupLiteralRepresentation(
}
+void LChunk::CommitDependencies(Handle<Code> code) const {
+ for (MapSet::const_iterator it = deprecation_dependencies_.begin(),
+ iend = deprecation_dependencies_.end(); it != iend; ++it) {
+ Handle<Map> map = *it;
+ ASSERT(!map->is_deprecated());
+ ASSERT(map->CanBeDeprecated());
+ Map::AddDependentCode(map, DependentCode::kTransitionGroup, code);
+ }
+
+ for (MapSet::const_iterator it = stability_dependencies_.begin(),
+ iend = stability_dependencies_.end(); it != iend; ++it) {
+ Handle<Map> map = *it;
+ ASSERT(map->is_stable());
+ ASSERT(map->CanTransition());
+ Map::AddDependentCode(map, DependentCode::kPrototypeCheckGroup, code);
+ }
+
+ info_->CommitDependencies(code);
+}
+
+
LChunk* LChunk::NewChunk(HGraph* graph) {
DisallowHandleAllocation no_handles;
DisallowHeapAllocation no_gc;
@@ -428,11 +439,13 @@ Handle<Code> LChunk::Codegen() {
MarkEmptyBlocks();
if (generator.GenerateCode()) {
+ generator.CheckEnvironmentUsage();
CodeGenerator::MakeCodePrologue(info(), "optimized");
Code::Flags flags = info()->flags();
Handle<Code> code =
CodeGenerator::MakeCodeEpilogue(&assembler, flags, info());
generator.FinishCode(code);
+ CommitDependencies(code);
code->set_is_crankshafted(true);
void* jit_handler_data =
assembler.positions_recorder()->DetachJITHandlerData();
@@ -440,8 +453,12 @@ Handle<Code> LChunk::Codegen() {
CodeEndLinePosInfoRecordEvent(*code, jit_handler_data));
CodeGenerator::PrintCode(code, info());
+ ASSERT(!(info()->isolate()->serializer_enabled() &&
+ info()->GetMustNotHaveEagerFrame() &&
+ generator.NeedsEagerFrame()));
return code;
}
+ assembler.AbortedCodeGeneration();
return Handle<Code>::null();
}
@@ -463,11 +480,135 @@ void LChunk::set_allocated_double_registers(BitVector* allocated_registers) {
}
-LInstruction* LChunkBuilder::CheckElideControlInstruction(
- HControlInstruction* instr) {
- HBasicBlock* successor;
- if (!instr->KnownSuccessorBlock(&successor)) return NULL;
- return new(zone()) LGoto(successor);
+LEnvironment* LChunkBuilderBase::CreateEnvironment(
+ HEnvironment* hydrogen_env,
+ int* argument_index_accumulator,
+ ZoneList<HValue*>* objects_to_materialize) {
+ if (hydrogen_env == NULL) return NULL;
+
+ LEnvironment* outer = CreateEnvironment(hydrogen_env->outer(),
+ argument_index_accumulator,
+ objects_to_materialize);
+ BailoutId ast_id = hydrogen_env->ast_id();
+ ASSERT(!ast_id.IsNone() ||
+ hydrogen_env->frame_type() != JS_FUNCTION);
+ int value_count = hydrogen_env->length() - hydrogen_env->specials_count();
+ LEnvironment* result =
+ new(zone()) LEnvironment(hydrogen_env->closure(),
+ hydrogen_env->frame_type(),
+ ast_id,
+ hydrogen_env->parameter_count(),
+ argument_count_,
+ value_count,
+ outer,
+ hydrogen_env->entry(),
+ zone());
+ int argument_index = *argument_index_accumulator;
+
+ // Store the environment description into the environment
+ // (with holes for nested objects)
+ for (int i = 0; i < hydrogen_env->length(); ++i) {
+ if (hydrogen_env->is_special_index(i)) continue;
+
+ LOperand* op;
+ HValue* value = hydrogen_env->values()->at(i);
+ CHECK(!value->IsPushArguments()); // Do not deopt outgoing arguments
+ if (value->IsArgumentsObject() || value->IsCapturedObject()) {
+ op = LEnvironment::materialization_marker();
+ } else {
+ op = UseAny(value);
+ }
+ result->AddValue(op,
+ value->representation(),
+ value->CheckFlag(HInstruction::kUint32));
+ }
+
+ // Recursively store the nested objects into the environment
+ for (int i = 0; i < hydrogen_env->length(); ++i) {
+ if (hydrogen_env->is_special_index(i)) continue;
+
+ HValue* value = hydrogen_env->values()->at(i);
+ if (value->IsArgumentsObject() || value->IsCapturedObject()) {
+ AddObjectToMaterialize(value, objects_to_materialize, result);
+ }
+ }
+
+ if (hydrogen_env->frame_type() == JS_FUNCTION) {
+ *argument_index_accumulator = argument_index;
+ }
+
+ return result;
+}
+
+
+// Add an object to the supplied environment and object materialization list.
+//
+// Notes:
+//
+// We are building three lists here:
+//
+// 1. In the result->object_mapping_ list (added to by the
+// LEnvironment::Add*Object methods), we store the lengths (number
+// of fields) of the captured objects in depth-first traversal order, or
+// in case of duplicated objects, we store the index to the duplicate object
+// (with a tag to differentiate between captured and duplicated objects).
+//
+// 2. The object fields are stored in the result->values_ list
+// (added to by the LEnvironment.AddValue method) sequentially as lists
+// of fields with holes for nested objects (the holes will be expanded
+// later by LCodegen::AddToTranslation according to the
+// LEnvironment.object_mapping_ list).
+//
+// 3. The auxiliary objects_to_materialize array stores the hydrogen values
+// in the same order as result->object_mapping_ list. This is used
+// to detect duplicate values and calculate the corresponding object index.
+void LChunkBuilderBase::AddObjectToMaterialize(HValue* value,
+ ZoneList<HValue*>* objects_to_materialize, LEnvironment* result) {
+ int object_index = objects_to_materialize->length();
+ // Store the hydrogen value into the de-duplication array
+ objects_to_materialize->Add(value, zone());
+ // Find out whether we are storing a duplicated value
+ int previously_materialized_object = -1;
+ for (int prev = 0; prev < object_index; ++prev) {
+ if (objects_to_materialize->at(prev) == value) {
+ previously_materialized_object = prev;
+ break;
+ }
+ }
+ // Store the captured object length (or duplicated object index)
+ // into the environment. For duplicated objects, we stop here.
+ int length = value->OperandCount();
+ bool is_arguments = value->IsArgumentsObject();
+ if (previously_materialized_object >= 0) {
+ result->AddDuplicateObject(previously_materialized_object);
+ return;
+ } else {
+ result->AddNewObject(is_arguments ? length - 1 : length, is_arguments);
+ }
+ // Store the captured object's fields into the environment
+ for (int i = is_arguments ? 1 : 0; i < length; ++i) {
+ LOperand* op;
+ HValue* arg_value = value->OperandAt(i);
+ if (arg_value->IsArgumentsObject() || arg_value->IsCapturedObject()) {
+ // Insert a hole for nested objects
+ op = LEnvironment::materialization_marker();
+ } else {
+ ASSERT(!arg_value->IsPushArguments());
+ // For ordinary values, tell the register allocator we need the value
+ // to be alive here
+ op = UseAny(arg_value);
+ }
+ result->AddValue(op,
+ arg_value->representation(),
+ arg_value->CheckFlag(HInstruction::kUint32));
+ }
+ // Recursively store all the nested captured objects into the environment
+ for (int i = is_arguments ? 1 : 0; i < length; ++i) {
+ HValue* arg_value = value->OperandAt(i);
+ if (arg_value->IsArgumentsObject() || arg_value->IsCapturedObject()) {
+ AddObjectToMaterialize(arg_value, objects_to_materialize, result);
+ }
+ }
}
diff --git a/chromium/v8/src/lithium.h b/chromium/v8/src/lithium.h
index d4395f2d7ee..8aeebe6c835 100644
--- a/chromium/v8/src/lithium.h
+++ b/chromium/v8/src/lithium.h
@@ -1,46 +1,26 @@
// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
#ifndef V8_LITHIUM_H_
#define V8_LITHIUM_H_
-#include "allocation.h"
-#include "hydrogen.h"
-#include "safepoint-table.h"
+#include <set>
+
+#include "src/allocation.h"
+#include "src/hydrogen.h"
+#include "src/safepoint-table.h"
+#include "src/zone-allocator.h"
namespace v8 {
namespace internal {
-#define LITHIUM_OPERAND_LIST(V) \
- V(ConstantOperand, CONSTANT_OPERAND) \
- V(StackSlot, STACK_SLOT) \
- V(DoubleStackSlot, DOUBLE_STACK_SLOT) \
- V(Register, REGISTER) \
- V(DoubleRegister, DOUBLE_REGISTER)
+#define LITHIUM_OPERAND_LIST(V) \
+ V(ConstantOperand, CONSTANT_OPERAND, 128) \
+ V(StackSlot, STACK_SLOT, 128) \
+ V(DoubleStackSlot, DOUBLE_STACK_SLOT, 128) \
+ V(Register, REGISTER, 16) \
+ V(DoubleRegister, DOUBLE_REGISTER, 16)
class LOperand : public ZoneObject {
@@ -52,20 +32,18 @@ class LOperand : public ZoneObject {
STACK_SLOT,
DOUBLE_STACK_SLOT,
REGISTER,
- DOUBLE_REGISTER,
- ARGUMENT
+ DOUBLE_REGISTER
};
LOperand() : value_(KindField::encode(INVALID)) { }
Kind kind() const { return KindField::decode(value_); }
int index() const { return static_cast<int>(value_) >> kKindFieldWidth; }
-#define LITHIUM_OPERAND_PREDICATE(name, type) \
+#define LITHIUM_OPERAND_PREDICATE(name, type, number) \
bool Is##name() const { return kind() == type; }
LITHIUM_OPERAND_LIST(LITHIUM_OPERAND_PREDICATE)
- LITHIUM_OPERAND_PREDICATE(Argument, ARGUMENT)
- LITHIUM_OPERAND_PREDICATE(Unallocated, UNALLOCATED)
- LITHIUM_OPERAND_PREDICATE(Ignored, INVALID)
+ LITHIUM_OPERAND_PREDICATE(Unallocated, UNALLOCATED, 0)
+ LITHIUM_OPERAND_PREDICATE(Ignored, INVALID, 0)
#undef LITHIUM_OPERAND_PREDICATE
bool Equals(LOperand* other) const { return value_ == other->value_; }
@@ -103,6 +81,7 @@ class LUnallocated : public LOperand {
FIXED_REGISTER,
FIXED_DOUBLE_REGISTER,
MUST_HAVE_REGISTER,
+ MUST_HAVE_DOUBLE_REGISTER,
WRITABLE_REGISTER,
SAME_AS_FIRST_INPUT
};
@@ -212,6 +191,10 @@ class LUnallocated : public LOperand {
extended_policy() == WRITABLE_REGISTER ||
extended_policy() == MUST_HAVE_REGISTER);
}
+ bool HasDoubleRegisterPolicy() const {
+ return basic_policy() == EXTENDED_POLICY &&
+ extended_policy() == MUST_HAVE_DOUBLE_REGISTER;
+ }
bool HasSameAsInputPolicy() const {
return basic_policy() == EXTENDED_POLICY &&
extended_policy() == SAME_AS_FIRST_INPUT;
@@ -317,140 +300,35 @@ class LMoveOperands V8_FINAL BASE_EMBEDDED {
};
-class LConstantOperand V8_FINAL : public LOperand {
- public:
- static LConstantOperand* Create(int index, Zone* zone) {
- ASSERT(index >= 0);
- if (index < kNumCachedOperands) return &cache[index];
- return new(zone) LConstantOperand(index);
- }
-
- static LConstantOperand* cast(LOperand* op) {
- ASSERT(op->IsConstantOperand());
- return reinterpret_cast<LConstantOperand*>(op);
- }
-
- static void SetUpCache();
- static void TearDownCache();
-
- private:
- static const int kNumCachedOperands = 128;
- static LConstantOperand* cache;
-
- LConstantOperand() : LOperand() { }
- explicit LConstantOperand(int index) : LOperand(CONSTANT_OPERAND, index) { }
-};
-
-
-class LArgument V8_FINAL : public LOperand {
- public:
- explicit LArgument(int index) : LOperand(ARGUMENT, index) { }
-
- static LArgument* cast(LOperand* op) {
- ASSERT(op->IsArgument());
- return reinterpret_cast<LArgument*>(op);
- }
-};
-
-
-class LStackSlot V8_FINAL : public LOperand {
- public:
- static LStackSlot* Create(int index, Zone* zone) {
- ASSERT(index >= 0);
- if (index < kNumCachedOperands) return &cache[index];
- return new(zone) LStackSlot(index);
- }
-
- static LStackSlot* cast(LOperand* op) {
- ASSERT(op->IsStackSlot());
- return reinterpret_cast<LStackSlot*>(op);
- }
-
- static void SetUpCache();
- static void TearDownCache();
-
- private:
- static const int kNumCachedOperands = 128;
- static LStackSlot* cache;
-
- LStackSlot() : LOperand() { }
- explicit LStackSlot(int index) : LOperand(STACK_SLOT, index) { }
-};
-
-
-class LDoubleStackSlot V8_FINAL : public LOperand {
- public:
- static LDoubleStackSlot* Create(int index, Zone* zone) {
- ASSERT(index >= 0);
- if (index < kNumCachedOperands) return &cache[index];
- return new(zone) LDoubleStackSlot(index);
- }
-
- static LDoubleStackSlot* cast(LOperand* op) {
- ASSERT(op->IsStackSlot());
- return reinterpret_cast<LDoubleStackSlot*>(op);
- }
-
- static void SetUpCache();
- static void TearDownCache();
-
- private:
- static const int kNumCachedOperands = 128;
- static LDoubleStackSlot* cache;
-
- LDoubleStackSlot() : LOperand() { }
- explicit LDoubleStackSlot(int index) : LOperand(DOUBLE_STACK_SLOT, index) { }
-};
-
-
-class LRegister V8_FINAL : public LOperand {
+template<LOperand::Kind kOperandKind, int kNumCachedOperands>
+class LSubKindOperand V8_FINAL : public LOperand {
public:
- static LRegister* Create(int index, Zone* zone) {
+ static LSubKindOperand* Create(int index, Zone* zone) {
ASSERT(index >= 0);
if (index < kNumCachedOperands) return &cache[index];
- return new(zone) LRegister(index);
+ return new(zone) LSubKindOperand(index);
}
- static LRegister* cast(LOperand* op) {
- ASSERT(op->IsRegister());
- return reinterpret_cast<LRegister*>(op);
+ static LSubKindOperand* cast(LOperand* op) {
+ ASSERT(op->kind() == kOperandKind);
+ return reinterpret_cast<LSubKindOperand*>(op);
}
static void SetUpCache();
static void TearDownCache();
private:
- static const int kNumCachedOperands = 16;
- static LRegister* cache;
+ static LSubKindOperand* cache;
- LRegister() : LOperand() { }
- explicit LRegister(int index) : LOperand(REGISTER, index) { }
+ LSubKindOperand() : LOperand() { }
+ explicit LSubKindOperand(int index) : LOperand(kOperandKind, index) { }
};
-class LDoubleRegister V8_FINAL : public LOperand {
- public:
- static LDoubleRegister* Create(int index, Zone* zone) {
- ASSERT(index >= 0);
- if (index < kNumCachedOperands) return &cache[index];
- return new(zone) LDoubleRegister(index);
- }
-
- static LDoubleRegister* cast(LOperand* op) {
- ASSERT(op->IsDoubleRegister());
- return reinterpret_cast<LDoubleRegister*>(op);
- }
-
- static void SetUpCache();
- static void TearDownCache();
-
- private:
- static const int kNumCachedOperands = 16;
- static LDoubleRegister* cache;
-
- LDoubleRegister() : LOperand() { }
- explicit LDoubleRegister(int index) : LOperand(DOUBLE_REGISTER, index) { }
-};
+#define LITHIUM_TYPEDEF_SUBKIND_OPERAND_CLASS(name, type, number) \
+typedef LSubKindOperand<LOperand::type, number> L##name;
+LITHIUM_OPERAND_LIST(LITHIUM_TYPEDEF_SUBKIND_OPERAND_CLASS)
+#undef LITHIUM_TYPEDEF_SUBKIND_OPERAND_CLASS
class LParallelMove V8_FINAL : public ZoneObject {
@@ -533,7 +411,8 @@ class LEnvironment V8_FINAL : public ZoneObject {
object_mapping_(0, zone),
outer_(outer),
entry_(entry),
- zone_(zone) { }
+ zone_(zone),
+ has_been_used_(false) { }
Handle<JSFunction> closure() const { return closure_; }
FrameType frame_type() const { return frame_type_; }
@@ -549,6 +428,9 @@ class LEnvironment V8_FINAL : public ZoneObject {
HEnterInlined* entry() { return entry_; }
Zone* zone() const { return zone_; }
+ bool has_been_used() const { return has_been_used_; }
+ void set_has_been_used() { has_been_used_ = true; }
+
void AddValue(LOperand* operand,
Representation representation,
bool is_uint32) {
@@ -648,6 +530,7 @@ class LEnvironment V8_FINAL : public ZoneObject {
LEnvironment* outer_;
HEnterInlined* entry_;
Zone* zone_;
+ bool has_been_used_;
};
@@ -679,7 +562,7 @@ class ShallowIterator V8_FINAL BASE_EMBEDDED {
private:
bool ShouldSkip(LOperand* op) {
- return op == NULL || op->IsConstantOperand() || op->IsArgument();
+ return op == NULL || op->IsConstantOperand();
}
// Skip until something interesting, beginning with and including current_.
@@ -767,6 +650,20 @@ class LChunk : public ZoneObject {
inlined_closures_.Add(closure, zone());
}
+ void AddDeprecationDependency(Handle<Map> map) {
+ ASSERT(!map->is_deprecated());
+ if (!map->CanBeDeprecated()) return;
+ ASSERT(!info_->IsStub());
+ deprecation_dependencies_.insert(map);
+ }
+
+ void AddStabilityDependency(Handle<Map> map) {
+ ASSERT(map->is_stable());
+ if (!map->CanTransition()) return;
+ ASSERT(!info_->IsStub());
+ stability_dependencies_.insert(map);
+ }
+
Zone* zone() const { return info_->zone(); }
Handle<Code> Codegen();
@@ -782,12 +679,49 @@ class LChunk : public ZoneObject {
int spill_slot_count_;
private:
+ typedef std::less<Handle<Map> > MapLess;
+ typedef zone_allocator<Handle<Map> > MapAllocator;
+ typedef std::set<Handle<Map>, MapLess, MapAllocator> MapSet;
+
+ void CommitDependencies(Handle<Code> code) const;
+
CompilationInfo* info_;
HGraph* const graph_;
BitVector* allocated_double_registers_;
ZoneList<LInstruction*> instructions_;
ZoneList<LPointerMap*> pointer_maps_;
ZoneList<Handle<JSFunction> > inlined_closures_;
+ MapSet deprecation_dependencies_;
+ MapSet stability_dependencies_;
+};
+
+
+class LChunkBuilderBase BASE_EMBEDDED {
+ public:
+ explicit LChunkBuilderBase(Zone* zone)
+ : argument_count_(0),
+ zone_(zone) { }
+
+ virtual ~LChunkBuilderBase() { }
+
+ protected:
+ // An input operand in register, stack slot or a constant operand.
+ // Will not be moved to a register even if one is freely available.
+ virtual MUST_USE_RESULT LOperand* UseAny(HValue* value) = 0;
+
+ LEnvironment* CreateEnvironment(HEnvironment* hydrogen_env,
+ int* argument_index_accumulator,
+ ZoneList<HValue*>* objects_to_materialize);
+ void AddObjectToMaterialize(HValue* value,
+ ZoneList<HValue*>* objects_to_materialize,
+ LEnvironment* result);
+
+ Zone* zone() const { return zone_; }
+
+ int argument_count_;
+
+ private:
+ Zone* zone_;
};
diff --git a/chromium/v8/src/liveedit-debugger.js b/chromium/v8/src/liveedit-debugger.js
index 4618eda3666..07214f9657c 100644
--- a/chromium/v8/src/liveedit-debugger.js
+++ b/chromium/v8/src/liveedit-debugger.js
@@ -1,29 +1,6 @@
// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
// LiveEdit feature implementation. The script should be executed after
// debug-debugger.js.
@@ -969,7 +946,9 @@ Debug.LiveEdit = new function() {
BLOCKED_ON_ACTIVE_STACK: 2,
BLOCKED_ON_OTHER_STACK: 3,
BLOCKED_UNDER_NATIVE_CODE: 4,
- REPLACED_ON_ACTIVE_STACK: 5
+ REPLACED_ON_ACTIVE_STACK: 5,
+ BLOCKED_UNDER_GENERATOR: 6,
+ BLOCKED_ACTIVE_GENERATOR: 7
};
FunctionPatchabilityStatus.SymbolName = function(code) {
diff --git a/chromium/v8/src/liveedit.cc b/chromium/v8/src/liveedit.cc
index 3d459d4ffb7..05bd550b65e 100644
--- a/chromium/v8/src/liveedit.cc
+++ b/chromium/v8/src/liveedit.cc
@@ -1,64 +1,34 @@
// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-
-#include "v8.h"
-
-#include "liveedit.h"
-
-#include "code-stubs.h"
-#include "compilation-cache.h"
-#include "compiler.h"
-#include "debug.h"
-#include "deoptimizer.h"
-#include "global-handles.h"
-#include "messages.h"
-#include "parser.h"
-#include "scopeinfo.h"
-#include "scopes.h"
-#include "v8memory.h"
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
-namespace v8 {
-namespace internal {
+#include "src/v8.h"
+
+#include "src/liveedit.h"
-#ifdef ENABLE_DEBUGGER_SUPPORT
+#include "src/code-stubs.h"
+#include "src/compilation-cache.h"
+#include "src/compiler.h"
+#include "src/debug.h"
+#include "src/deoptimizer.h"
+#include "src/global-handles.h"
+#include "src/messages.h"
+#include "src/parser.h"
+#include "src/scopeinfo.h"
+#include "src/scopes.h"
+#include "src/v8memory.h"
+namespace v8 {
+namespace internal {
-void SetElementNonStrict(Handle<JSObject> object,
- uint32_t index,
- Handle<Object> value) {
+void SetElementSloppy(Handle<JSObject> object,
+ uint32_t index,
+ Handle<Object> value) {
// Ignore return value from SetElement. It can only be a failure if there
// are element setters causing exceptions and the debugger context has none
// of these.
- Handle<Object> no_failure =
- JSObject::SetElement(object, index, value, NONE, kNonStrictMode);
- ASSERT(!no_failure.is_null());
- USE(no_failure);
+ JSObject::SetElement(object, index, value, NONE, SLOPPY).Assert();
}
@@ -359,17 +329,17 @@ class CompareOutputArrayWriter {
void WriteChunk(int char_pos1, int char_pos2, int char_len1, int char_len2) {
Isolate* isolate = array_->GetIsolate();
- SetElementNonStrict(array_,
- current_size_,
- Handle<Object>(Smi::FromInt(char_pos1), isolate));
- SetElementNonStrict(array_,
- current_size_ + 1,
- Handle<Object>(Smi::FromInt(char_pos1 + char_len1),
- isolate));
- SetElementNonStrict(array_,
- current_size_ + 2,
- Handle<Object>(Smi::FromInt(char_pos2 + char_len2),
- isolate));
+ SetElementSloppy(array_,
+ current_size_,
+ Handle<Object>(Smi::FromInt(char_pos1), isolate));
+ SetElementSloppy(array_,
+ current_size_ + 1,
+ Handle<Object>(Smi::FromInt(char_pos1 + char_len1),
+ isolate));
+ SetElementSloppy(array_,
+ current_size_ + 2,
+ Handle<Object>(Smi::FromInt(char_pos2 + char_len2),
+ isolate));
current_size_ += 3;
}
@@ -434,7 +404,7 @@ class TokensCompareOutput : public Comparator::Output {
class LineEndsWrapper {
public:
explicit LineEndsWrapper(Handle<String> string)
- : ends_array_(CalculateLineEnds(string, false)),
+ : ends_array_(String::CalculateLineEnds(string, false)),
string_len_(string->length()) {
}
int length() {
@@ -585,8 +555,8 @@ class TokenizingLineArrayCompareOutput : public SubrangableOutput {
Handle<JSArray> LiveEdit::CompareStrings(Handle<String> s1,
Handle<String> s2) {
- s1 = FlattenGetString(s1);
- s2 = FlattenGetString(s2);
+ s1 = String::Flatten(s1);
+ s2 = String::Flatten(s2);
LineEndsWrapper line_ends1(s1);
LineEndsWrapper line_ends2(s2);
@@ -602,27 +572,6 @@ Handle<JSArray> LiveEdit::CompareStrings(Handle<String> s1,
}
-static void CompileScriptForTracker(Isolate* isolate, Handle<Script> script) {
- // TODO(635): support extensions.
- PostponeInterruptsScope postpone(isolate);
-
- // Build AST.
- CompilationInfoWithZone info(script);
- info.MarkAsGlobal();
- // Parse and don't allow skipping lazy functions.
- if (Parser::Parse(&info)) {
- // Compile the code.
- LiveEditFunctionTracker tracker(info.isolate(), info.function());
- if (Compiler::MakeCodeForLiveEdit(&info)) {
- ASSERT(!info.code().is_null());
- tracker.RecordRootFunctionInfo(info.code());
- } else {
- info.isolate()->StackOverflow();
- }
- }
-}
-
-
// Unwraps JSValue object, returning its field "value"
static Handle<Object> UnwrapJSValue(Handle<JSValue> jsValue) {
return Handle<Object>(jsValue->value(), jsValue->GetIsolate());
@@ -656,171 +605,94 @@ static int GetArrayLength(Handle<JSArray> array) {
}
-// Simple helper class that creates more or less typed structures over
-// JSArray object. This is an adhoc method of passing structures from C++
-// to JavaScript.
-template<typename S>
-class JSArrayBasedStruct {
- public:
- static S Create(Isolate* isolate) {
- Factory* factory = isolate->factory();
- Handle<JSArray> array = factory->NewJSArray(S::kSize_);
- return S(array);
- }
- static S cast(Object* object) {
- JSArray* array = JSArray::cast(object);
- Handle<JSArray> array_handle(array);
- return S(array_handle);
- }
- explicit JSArrayBasedStruct(Handle<JSArray> array) : array_(array) {
- }
- Handle<JSArray> GetJSArray() {
- return array_;
- }
- Isolate* isolate() const {
- return array_->GetIsolate();
- }
+void FunctionInfoWrapper::SetInitialProperties(Handle<String> name,
+ int start_position,
+ int end_position,
+ int param_num,
+ int literal_count,
+ int slot_count,
+ int parent_index) {
+ HandleScope scope(isolate());
+ this->SetField(kFunctionNameOffset_, name);
+ this->SetSmiValueField(kStartPositionOffset_, start_position);
+ this->SetSmiValueField(kEndPositionOffset_, end_position);
+ this->SetSmiValueField(kParamNumOffset_, param_num);
+ this->SetSmiValueField(kLiteralNumOffset_, literal_count);
+ this->SetSmiValueField(kSlotNumOffset_, slot_count);
+ this->SetSmiValueField(kParentIndexOffset_, parent_index);
+}
- protected:
- void SetField(int field_position, Handle<Object> value) {
- SetElementNonStrict(array_, field_position, value);
- }
- void SetSmiValueField(int field_position, int value) {
- SetElementNonStrict(array_,
- field_position,
- Handle<Smi>(Smi::FromInt(value), isolate()));
- }
- Object* GetField(int field_position) {
- return array_->GetElementNoExceptionThrown(isolate(), field_position);
- }
- int GetSmiValueField(int field_position) {
- Object* res = GetField(field_position);
- CHECK(res->IsSmi());
- return Smi::cast(res)->value();
- }
- private:
- Handle<JSArray> array_;
-};
+void FunctionInfoWrapper::SetFunctionCode(Handle<Code> function_code,
+ Handle<HeapObject> code_scope_info) {
+ Handle<JSValue> code_wrapper = WrapInJSValue(function_code);
+ this->SetField(kCodeOffset_, code_wrapper);
+ Handle<JSValue> scope_wrapper = WrapInJSValue(code_scope_info);
+ this->SetField(kCodeScopeInfoOffset_, scope_wrapper);
+}
-// Represents some function compilation details. This structure will be used
-// from JavaScript. It contains Code object, which is kept wrapped
-// into a BlindReference for sanitizing reasons.
-class FunctionInfoWrapper : public JSArrayBasedStruct<FunctionInfoWrapper> {
- public:
- explicit FunctionInfoWrapper(Handle<JSArray> array)
- : JSArrayBasedStruct<FunctionInfoWrapper>(array) {
- }
- void SetInitialProperties(Handle<String> name, int start_position,
- int end_position, int param_num,
- int literal_count, int parent_index) {
- HandleScope scope(isolate());
- this->SetField(kFunctionNameOffset_, name);
- this->SetSmiValueField(kStartPositionOffset_, start_position);
- this->SetSmiValueField(kEndPositionOffset_, end_position);
- this->SetSmiValueField(kParamNumOffset_, param_num);
- this->SetSmiValueField(kLiteralNumOffset_, literal_count);
- this->SetSmiValueField(kParentIndexOffset_, parent_index);
- }
- void SetFunctionCode(Handle<Code> function_code,
- Handle<HeapObject> code_scope_info) {
- Handle<JSValue> code_wrapper = WrapInJSValue(function_code);
- this->SetField(kCodeOffset_, code_wrapper);
-
- Handle<JSValue> scope_wrapper = WrapInJSValue(code_scope_info);
- this->SetField(kCodeScopeInfoOffset_, scope_wrapper);
- }
- void SetFunctionScopeInfo(Handle<Object> scope_info_array) {
- this->SetField(kFunctionScopeInfoOffset_, scope_info_array);
- }
- void SetSharedFunctionInfo(Handle<SharedFunctionInfo> info) {
- Handle<JSValue> info_holder = WrapInJSValue(info);
- this->SetField(kSharedFunctionInfoOffset_, info_holder);
- }
- int GetLiteralCount() {
- return this->GetSmiValueField(kLiteralNumOffset_);
- }
- int GetParentIndex() {
- return this->GetSmiValueField(kParentIndexOffset_);
- }
- Handle<Code> GetFunctionCode() {
- Object* element = this->GetField(kCodeOffset_);
- CHECK(element->IsJSValue());
- Handle<JSValue> value_wrapper(JSValue::cast(element));
- Handle<Object> raw_result = UnwrapJSValue(value_wrapper);
- CHECK(raw_result->IsCode());
- return Handle<Code>::cast(raw_result);
- }
- Handle<Object> GetCodeScopeInfo() {
- Object* element = this->GetField(kCodeScopeInfoOffset_);
- CHECK(element->IsJSValue());
- return UnwrapJSValue(Handle<JSValue>(JSValue::cast(element)));
- }
- int GetStartPosition() {
- return this->GetSmiValueField(kStartPositionOffset_);
- }
- int GetEndPosition() {
- return this->GetSmiValueField(kEndPositionOffset_);
- }
- private:
- static const int kFunctionNameOffset_ = 0;
- static const int kStartPositionOffset_ = 1;
- static const int kEndPositionOffset_ = 2;
- static const int kParamNumOffset_ = 3;
- static const int kCodeOffset_ = 4;
- static const int kCodeScopeInfoOffset_ = 5;
- static const int kFunctionScopeInfoOffset_ = 6;
- static const int kParentIndexOffset_ = 7;
- static const int kSharedFunctionInfoOffset_ = 8;
- static const int kLiteralNumOffset_ = 9;
- static const int kSize_ = 10;
-
- friend class JSArrayBasedStruct<FunctionInfoWrapper>;
-};
+void FunctionInfoWrapper::SetSharedFunctionInfo(
+ Handle<SharedFunctionInfo> info) {
+ Handle<JSValue> info_holder = WrapInJSValue(info);
+ this->SetField(kSharedFunctionInfoOffset_, info_holder);
+}
-// Wraps SharedFunctionInfo along with some of its fields for passing it
-// back to JavaScript. SharedFunctionInfo object itself is additionally
-// wrapped into BlindReference for sanitizing reasons.
-class SharedInfoWrapper : public JSArrayBasedStruct<SharedInfoWrapper> {
- public:
- static bool IsInstance(Handle<JSArray> array) {
- return array->length() == Smi::FromInt(kSize_) &&
- array->GetElementNoExceptionThrown(
- array->GetIsolate(), kSharedInfoOffset_)->IsJSValue();
- }
+Handle<Code> FunctionInfoWrapper::GetFunctionCode() {
+ Handle<Object> element = this->GetField(kCodeOffset_);
+ Handle<JSValue> value_wrapper = Handle<JSValue>::cast(element);
+ Handle<Object> raw_result = UnwrapJSValue(value_wrapper);
+ CHECK(raw_result->IsCode());
+ return Handle<Code>::cast(raw_result);
+}
- explicit SharedInfoWrapper(Handle<JSArray> array)
- : JSArrayBasedStruct<SharedInfoWrapper>(array) {
- }
- void SetProperties(Handle<String> name, int start_position, int end_position,
- Handle<SharedFunctionInfo> info) {
- HandleScope scope(isolate());
- this->SetField(kFunctionNameOffset_, name);
- Handle<JSValue> info_holder = WrapInJSValue(info);
- this->SetField(kSharedInfoOffset_, info_holder);
- this->SetSmiValueField(kStartPositionOffset_, start_position);
- this->SetSmiValueField(kEndPositionOffset_, end_position);
- }
- Handle<SharedFunctionInfo> GetInfo() {
- Object* element = this->GetField(kSharedInfoOffset_);
- CHECK(element->IsJSValue());
- Handle<JSValue> value_wrapper(JSValue::cast(element));
- return UnwrapSharedFunctionInfoFromJSValue(value_wrapper);
+Handle<FixedArray> FunctionInfoWrapper::GetFeedbackVector() {
+ Handle<Object> element = this->GetField(kSharedFunctionInfoOffset_);
+ Handle<FixedArray> result;
+ if (element->IsJSValue()) {
+ Handle<JSValue> value_wrapper = Handle<JSValue>::cast(element);
+ Handle<Object> raw_result = UnwrapJSValue(value_wrapper);
+ Handle<SharedFunctionInfo> shared =
+ Handle<SharedFunctionInfo>::cast(raw_result);
+ result = Handle<FixedArray>(shared->feedback_vector(), isolate());
+ CHECK_EQ(result->length(), GetSlotCount());
+ } else {
+ // Scripts may never have a SharedFunctionInfo created, so
+ // create a type feedback vector here.
+ int slot_count = GetSlotCount();
+ result = isolate()->factory()->NewTypeFeedbackVector(slot_count);
}
+ return result;
+}
- private:
- static const int kFunctionNameOffset_ = 0;
- static const int kStartPositionOffset_ = 1;
- static const int kEndPositionOffset_ = 2;
- static const int kSharedInfoOffset_ = 3;
- static const int kSize_ = 4;
- friend class JSArrayBasedStruct<SharedInfoWrapper>;
-};
+Handle<Object> FunctionInfoWrapper::GetCodeScopeInfo() {
+ Handle<Object> element = this->GetField(kCodeScopeInfoOffset_);
+ return UnwrapJSValue(Handle<JSValue>::cast(element));
+}
+
+
+void SharedInfoWrapper::SetProperties(Handle<String> name,
+ int start_position,
+ int end_position,
+ Handle<SharedFunctionInfo> info) {
+ HandleScope scope(isolate());
+ this->SetField(kFunctionNameOffset_, name);
+ Handle<JSValue> info_holder = WrapInJSValue(info);
+ this->SetField(kSharedInfoOffset_, info_holder);
+ this->SetSmiValueField(kStartPositionOffset_, start_position);
+ this->SetSmiValueField(kEndPositionOffset_, end_position);
+}
+
+
+Handle<SharedFunctionInfo> SharedInfoWrapper::GetInfo() {
+ Handle<Object> element = this->GetField(kSharedInfoOffset_);
+ Handle<JSValue> value_wrapper = Handle<JSValue>::cast(element);
+ return UnwrapSharedFunctionInfoFromJSValue(value_wrapper);
+}
class FunctionInfoListener {
@@ -837,9 +709,10 @@ class FunctionInfoListener {
info.SetInitialProperties(fun->name(), fun->start_position(),
fun->end_position(), fun->parameter_count(),
fun->materialized_literal_count(),
+ fun->slot_count(),
current_parent_index_);
current_parent_index_ = len_;
- SetElementNonStrict(result_, len_, info.GetJSArray());
+ SetElementSloppy(result_, len_, info.GetJSArray());
len_++;
}
@@ -847,8 +720,8 @@ class FunctionInfoListener {
HandleScope scope(isolate());
FunctionInfoWrapper info =
FunctionInfoWrapper::cast(
- result_->GetElementNoExceptionThrown(
- isolate(), current_parent_index_));
+ *Object::GetElement(
+ isolate(), result_, current_parent_index_).ToHandleChecked());
current_parent_index_ = info.GetParentIndex();
}
@@ -857,8 +730,8 @@ class FunctionInfoListener {
void FunctionCode(Handle<Code> function_code) {
FunctionInfoWrapper info =
FunctionInfoWrapper::cast(
- result_->GetElementNoExceptionThrown(
- isolate(), current_parent_index_));
+ *Object::GetElement(
+ isolate(), result_, current_parent_index_).ToHandleChecked());
info.SetFunctionCode(function_code,
Handle<HeapObject>(isolate()->heap()->null_value()));
}
@@ -872,14 +745,13 @@ class FunctionInfoListener {
}
FunctionInfoWrapper info =
FunctionInfoWrapper::cast(
- result_->GetElementNoExceptionThrown(
- isolate(), current_parent_index_));
+ *Object::GetElement(
+ isolate(), result_, current_parent_index_).ToHandleChecked());
info.SetFunctionCode(Handle<Code>(shared->code()),
Handle<HeapObject>(shared->scope_info()));
info.SetSharedFunctionInfo(shared);
- Handle<Object> scope_info_list(SerializeFunctionScope(scope, zone),
- isolate());
+ Handle<Object> scope_info_list = SerializeFunctionScope(scope, zone);
info.SetFunctionScopeInfo(scope_info_list);
}
@@ -888,9 +760,7 @@ class FunctionInfoListener {
private:
Isolate* isolate() const { return result_->GetIsolate(); }
- Object* SerializeFunctionScope(Scope* scope, Zone* zone) {
- HandleScope handle_scope(isolate());
-
+ Handle<Object> SerializeFunctionScope(Scope* scope, Zone* zone) {
Handle<JSArray> scope_info_list = isolate()->factory()->NewJSArray(10);
int scope_info_length = 0;
@@ -899,6 +769,7 @@ class FunctionInfoListener {
// scopes of this chain.
Scope* current_scope = scope;
while (current_scope != NULL) {
+ HandleScope handle_scope(isolate());
ZoneList<Variable*> stack_list(current_scope->StackLocalCount(), zone);
ZoneList<Variable*> context_list(
current_scope->ContextLocalCount(), zone);
@@ -906,26 +777,26 @@ class FunctionInfoListener {
context_list.Sort(&Variable::CompareIndex);
for (int i = 0; i < context_list.length(); i++) {
- SetElementNonStrict(scope_info_list,
- scope_info_length,
- context_list[i]->name());
+ SetElementSloppy(scope_info_list,
+ scope_info_length,
+ context_list[i]->name());
scope_info_length++;
- SetElementNonStrict(
+ SetElementSloppy(
scope_info_list,
scope_info_length,
Handle<Smi>(Smi::FromInt(context_list[i]->index()), isolate()));
scope_info_length++;
}
- SetElementNonStrict(scope_info_list,
- scope_info_length,
- Handle<Object>(isolate()->heap()->null_value(),
- isolate()));
+ SetElementSloppy(scope_info_list,
+ scope_info_length,
+ Handle<Object>(isolate()->heap()->null_value(),
+ isolate()));
scope_info_length++;
current_scope = current_scope->outer_scope();
}
- return *scope_info_list;
+ return scope_info_list;
}
Handle<JSArray> result_;
@@ -934,8 +805,43 @@ class FunctionInfoListener {
};
-JSArray* LiveEdit::GatherCompileInfo(Handle<Script> script,
- Handle<String> source) {
+void LiveEdit::InitializeThreadLocal(Debug* debug) {
+ debug->thread_local_.frame_drop_mode_ = LiveEdit::FRAMES_UNTOUCHED;
+}
+
+
+bool LiveEdit::SetAfterBreakTarget(Debug* debug) {
+ Code* code = NULL;
+ Isolate* isolate = debug->isolate_;
+ switch (debug->thread_local_.frame_drop_mode_) {
+ case FRAMES_UNTOUCHED:
+ return false;
+ case FRAME_DROPPED_IN_IC_CALL:
+ // We must have been calling IC stub. Do not go there anymore.
+ code = isolate->builtins()->builtin(Builtins::kPlainReturn_LiveEdit);
+ break;
+ case FRAME_DROPPED_IN_DEBUG_SLOT_CALL:
+ // Debug break slot stub does not return normally, instead it manually
+ // cleans the stack and jumps. We should patch the jump address.
+ code = isolate->builtins()->builtin(Builtins::kFrameDropper_LiveEdit);
+ break;
+ case FRAME_DROPPED_IN_DIRECT_CALL:
+ // Nothing to do, after_break_target is not used here.
+ return true;
+ case FRAME_DROPPED_IN_RETURN_CALL:
+ code = isolate->builtins()->builtin(Builtins::kFrameDropper_LiveEdit);
+ break;
+ case CURRENTLY_SET_MODE:
+ UNREACHABLE();
+ break;
+ }
+ debug->after_break_target_ = code->entry();
+ return true;
+}
+
+
+MaybeHandle<JSArray> LiveEdit::GatherCompileInfo(Handle<Script> script,
+ Handle<String> source) {
Isolate* isolate = script->GetIsolate();
FunctionInfoListener listener(isolate);
@@ -951,14 +857,13 @@ JSArray* LiveEdit::GatherCompileInfo(Handle<Script> script,
try_catch.SetVerbose(true);
// A logical 'try' section.
- CompileScriptForTracker(isolate, script);
+ Compiler::CompileForLiveEdit(script);
}
// A logical 'catch' section.
Handle<JSObject> rethrow_exception;
if (isolate->has_pending_exception()) {
- Handle<Object> exception(isolate->pending_exception()->ToObjectChecked(),
- isolate);
+ Handle<Object> exception(isolate->pending_exception(), isolate);
MessageLocation message_location = isolate->GetMessageLocation();
isolate->clear_pending_message();
@@ -978,13 +883,14 @@ JSArray* LiveEdit::GatherCompileInfo(Handle<Script> script,
Handle<Smi> start_pos(
Smi::FromInt(message_location.start_pos()), isolate);
Handle<Smi> end_pos(Smi::FromInt(message_location.end_pos()), isolate);
- Handle<JSValue> script_obj = GetScriptWrapper(message_location.script());
+ Handle<JSObject> script_obj =
+ Script::GetWrapper(message_location.script());
JSReceiver::SetProperty(
- rethrow_exception, start_pos_key, start_pos, NONE, kNonStrictMode);
+ rethrow_exception, start_pos_key, start_pos, NONE, SLOPPY).Assert();
JSReceiver::SetProperty(
- rethrow_exception, end_pos_key, end_pos, NONE, kNonStrictMode);
+ rethrow_exception, end_pos_key, end_pos, NONE, SLOPPY).Assert();
JSReceiver::SetProperty(
- rethrow_exception, script_obj_key, script_obj, NONE, kNonStrictMode);
+ rethrow_exception, script_obj_key, script_obj, NONE, SLOPPY).Assert();
}
}
@@ -993,10 +899,9 @@ JSArray* LiveEdit::GatherCompileInfo(Handle<Script> script,
script->set_source(*original_source);
if (rethrow_exception.is_null()) {
- return *(listener.GetResult());
+ return listener.GetResult();
} else {
- isolate->Throw(*rethrow_exception);
- return 0;
+ return isolate->Throw<JSArray>(rethrow_exception);
}
}
@@ -1008,12 +913,12 @@ void LiveEdit::WrapSharedFunctionInfos(Handle<JSArray> array) {
for (int i = 0; i < len; i++) {
Handle<SharedFunctionInfo> info(
SharedFunctionInfo::cast(
- array->GetElementNoExceptionThrown(isolate, i)));
+ *Object::GetElement(isolate, array, i).ToHandleChecked()));
SharedInfoWrapper info_wrapper = SharedInfoWrapper::Create(isolate);
Handle<String> name_handle(String::cast(info->name()));
info_wrapper.SetProperties(name_handle, info->start_position(),
info->end_position(), info);
- SetElementNonStrict(array, i, info_wrapper.GetJSArray());
+ SetElementSloppy(array, i, info_wrapper.GetJSArray());
}
}
@@ -1069,13 +974,10 @@ static void ReplaceCodeObject(Handle<Code> original,
// to code objects (that are never in new space) without worrying about
// write barriers.
Heap* heap = original->GetHeap();
- heap->CollectAllGarbage(Heap::kMakeHeapIterableMask,
- "liveedit.cc ReplaceCodeObject");
+ HeapIterator iterator(heap);
ASSERT(!heap->InNewSpace(*substitution));
- DisallowHeapAllocation no_allocation;
-
ReplacingVisitor visitor(*original, *substitution);
// Iterate over all roots. Stack frames may have pointer into original code,
@@ -1085,7 +987,6 @@ static void ReplaceCodeObject(Handle<Code> original,
// Now iterate over all pointers of all objects, including code_target
// implicit pointers.
- HeapIterator iterator(heap);
for (HeapObject* obj = iterator.next(); obj != NULL; obj = iterator.next()) {
obj->Iterate(&visitor);
}
@@ -1112,7 +1013,7 @@ class LiteralFixer {
// If literal count didn't change, simply go over all functions
// and clear literal arrays.
ClearValuesVisitor visitor;
- IterateJSFunctions(*shared_info, &visitor);
+ IterateJSFunctions(shared_info, &visitor);
} else {
// When literal count changes, we have to create new array instances.
// Since we cannot create instances when iterating heap, we should first
@@ -1147,16 +1048,14 @@ class LiteralFixer {
// Iterates all function instances in the HEAP that refers to the
// provided shared_info.
template<typename Visitor>
- static void IterateJSFunctions(SharedFunctionInfo* shared_info,
+ static void IterateJSFunctions(Handle<SharedFunctionInfo> shared_info,
Visitor* visitor) {
- DisallowHeapAllocation no_allocation;
-
HeapIterator iterator(shared_info->GetHeap());
for (HeapObject* obj = iterator.next(); obj != NULL;
obj = iterator.next()) {
if (obj->IsJSFunction()) {
JSFunction* function = JSFunction::cast(obj);
- if (function->shared() == shared_info) {
+ if (function->shared() == *shared_info) {
visitor->visit(function);
}
}
@@ -1169,13 +1068,13 @@ class LiteralFixer {
Handle<SharedFunctionInfo> shared_info, Isolate* isolate) {
CountVisitor count_visitor;
count_visitor.count = 0;
- IterateJSFunctions(*shared_info, &count_visitor);
+ IterateJSFunctions(shared_info, &count_visitor);
int size = count_visitor.count;
Handle<FixedArray> result = isolate->factory()->NewFixedArray(size);
if (size > 0) {
CollectVisitor collect_visitor(result);
- IterateJSFunctions(*shared_info, &collect_visitor);
+ IterateJSFunctions(shared_info, &collect_visitor);
}
return result;
}
@@ -1285,23 +1184,16 @@ static void DeoptimizeDependentFunctions(SharedFunctionInfo* function_info) {
}
-MaybeObject* LiveEdit::ReplaceFunctionCode(
+void LiveEdit::ReplaceFunctionCode(
Handle<JSArray> new_compile_info_array,
Handle<JSArray> shared_info_array) {
Isolate* isolate = new_compile_info_array->GetIsolate();
- HandleScope scope(isolate);
-
- if (!SharedInfoWrapper::IsInstance(shared_info_array)) {
- return isolate->ThrowIllegalOperation();
- }
FunctionInfoWrapper compile_info_wrapper(new_compile_info_array);
SharedInfoWrapper shared_info_wrapper(shared_info_array);
Handle<SharedFunctionInfo> shared_info = shared_info_wrapper.GetInfo();
- isolate->heap()->EnsureHeapIsIterable();
-
if (IsJSFunctionCode(shared_info->code())) {
Handle<Code> code = compile_info_wrapper.GetFunctionCode();
ReplaceCodeObject(Handle<Code>(shared_info->code()), code);
@@ -1310,6 +1202,10 @@ MaybeObject* LiveEdit::ReplaceFunctionCode(
shared_info->set_scope_info(ScopeInfo::cast(*code_scope_info));
}
shared_info->DisableOptimization(kLiveEdit);
+ // Update the type feedback vector
+ Handle<FixedArray> feedback_vector =
+ compile_info_wrapper.GetFeedbackVector();
+ shared_info->set_feedback_vector(*feedback_vector);
}
if (shared_info->debug_info()->IsDebugInfo()) {
@@ -1331,27 +1227,15 @@ MaybeObject* LiveEdit::ReplaceFunctionCode(
DeoptimizeDependentFunctions(*shared_info);
isolate->compilation_cache()->Remove(shared_info);
-
- return isolate->heap()->undefined_value();
}
-MaybeObject* LiveEdit::FunctionSourceUpdated(
- Handle<JSArray> shared_info_array) {
- Isolate* isolate = shared_info_array->GetIsolate();
- HandleScope scope(isolate);
-
- if (!SharedInfoWrapper::IsInstance(shared_info_array)) {
- return isolate->ThrowIllegalOperation();
- }
-
+void LiveEdit::FunctionSourceUpdated(Handle<JSArray> shared_info_array) {
SharedInfoWrapper shared_info_wrapper(shared_info_array);
Handle<SharedFunctionInfo> shared_info = shared_info_wrapper.GetInfo();
DeoptimizeDependentFunctions(*shared_info);
- isolate->compilation_cache()->Remove(shared_info);
-
- return isolate->heap()->undefined_value();
+ shared_info_array->GetIsolate()->compilation_cache()->Remove(shared_info);
}
@@ -1382,23 +1266,24 @@ static int TranslatePosition(int original_position,
Isolate* isolate = position_change_array->GetIsolate();
// TODO(635): binary search may be used here
for (int i = 0; i < array_len; i += 3) {
- Object* element =
- position_change_array->GetElementNoExceptionThrown(isolate, i);
+ HandleScope scope(isolate);
+ Handle<Object> element = Object::GetElement(
+ isolate, position_change_array, i).ToHandleChecked();
CHECK(element->IsSmi());
- int chunk_start = Smi::cast(element)->value();
+ int chunk_start = Handle<Smi>::cast(element)->value();
if (original_position < chunk_start) {
break;
}
- element = position_change_array->GetElementNoExceptionThrown(isolate,
- i + 1);
+ element = Object::GetElement(
+ isolate, position_change_array, i + 1).ToHandleChecked();
CHECK(element->IsSmi());
- int chunk_end = Smi::cast(element)->value();
+ int chunk_end = Handle<Smi>::cast(element)->value();
// Position mustn't be inside a chunk.
ASSERT(original_position >= chunk_end);
- element = position_change_array->GetElementNoExceptionThrown(isolate,
- i + 2);
+ element = Object::GetElement(
+ isolate, position_change_array, i + 2).ToHandleChecked();
CHECK(element->IsSmi());
- int chunk_changed_end = Smi::cast(element)->value();
+ int chunk_changed_end = Handle<Smi>::cast(element)->value();
position_diff = chunk_changed_end - chunk_end;
}
@@ -1461,8 +1346,8 @@ class RelocInfoBuffer {
// Copy the data.
int curently_used_size =
static_cast<int>(buffer_ + buffer_size_ - reloc_info_writer_.pos());
- OS::MemMove(new_buffer + new_buffer_size - curently_used_size,
- reloc_info_writer_.pos(), curently_used_size);
+ MemMove(new_buffer + new_buffer_size - curently_used_size,
+ reloc_info_writer_.pos(), curently_used_size);
reloc_info_writer_.Reposition(
new_buffer + new_buffer_size - curently_used_size,
@@ -1493,7 +1378,6 @@ static Handle<Code> PatchPositionsInCode(
code->instruction_start());
{
- DisallowHeapAllocation no_allocation;
for (RelocIterator it(*code); !it.done(); it.next()) {
RelocInfo* rinfo = it.rinfo();
if (RelocInfo::IsPosition(rinfo->rmode())) {
@@ -1516,7 +1400,7 @@ static Handle<Code> PatchPositionsInCode(
if (buffer.length() == code->relocation_size()) {
// Simply patch relocation area of code.
- OS::MemCopy(code->relocation_start(), buffer.start(), buffer.length());
+ MemCopy(code->relocation_start(), buffer.start(), buffer.length());
return code;
} else {
// Relocation info section now has different size. We cannot simply
@@ -1528,12 +1412,8 @@ static Handle<Code> PatchPositionsInCode(
}
-MaybeObject* LiveEdit::PatchFunctionPositions(
- Handle<JSArray> shared_info_array, Handle<JSArray> position_change_array) {
- if (!SharedInfoWrapper::IsInstance(shared_info_array)) {
- return shared_info_array->GetIsolate()->ThrowIllegalOperation();
- }
-
+void LiveEdit::PatchFunctionPositions(Handle<JSArray> shared_info_array,
+ Handle<JSArray> position_change_array) {
SharedInfoWrapper shared_info_wrapper(shared_info_array);
Handle<SharedFunctionInfo> info = shared_info_wrapper.GetInfo();
@@ -1549,8 +1429,6 @@ MaybeObject* LiveEdit::PatchFunctionPositions(
info->set_end_position(new_function_end);
info->set_function_token_position(new_function_token_pos);
- info->GetIsolate()->heap()->EnsureHeapIsIterable();
-
if (IsJSFunctionCode(info->code())) {
// Patch relocation info section of the code.
Handle<Code> patched_code = PatchPositionsInCode(Handle<Code>(info->code()),
@@ -1564,8 +1442,6 @@ MaybeObject* LiveEdit::PatchFunctionPositions(
ReplaceCodeObject(Handle<Code>(info->code()), patched_code);
}
}
-
- return info->GetIsolate()->heap()->undefined_value();
}
@@ -1578,7 +1454,6 @@ static Handle<Script> CreateScriptCopy(Handle<Script> original) {
copy->set_name(original->name());
copy->set_line_offset(original->line_offset());
copy->set_column_offset(original->column_offset());
- copy->set_data(original->data());
copy->set_type(original->type());
copy->set_context_data(original->context_data());
copy->set_eval_from_shared(original->eval_from_shared());
@@ -1593,17 +1468,16 @@ static Handle<Script> CreateScriptCopy(Handle<Script> original) {
}
-Object* LiveEdit::ChangeScriptSource(Handle<Script> original_script,
- Handle<String> new_source,
- Handle<Object> old_script_name) {
+Handle<Object> LiveEdit::ChangeScriptSource(Handle<Script> original_script,
+ Handle<String> new_source,
+ Handle<Object> old_script_name) {
Isolate* isolate = original_script->GetIsolate();
Handle<Object> old_script_object;
if (old_script_name->IsString()) {
Handle<Script> old_script = CreateScriptCopy(original_script);
old_script->set_name(String::cast(*old_script_name));
old_script_object = old_script;
- isolate->debugger()->OnAfterCompile(
- old_script, Debugger::SEND_WHEN_DEBUGGING);
+ isolate->debug()->OnAfterCompile(old_script, Debug::SEND_WHEN_DEBUGGING);
} else {
old_script_object = isolate->factory()->null_value();
}
@@ -1613,7 +1487,7 @@ Object* LiveEdit::ChangeScriptSource(Handle<Script> original_script,
// Drop line ends so that they will be recalculated.
original_script->set_line_ends(isolate->heap()->undefined_value());
- return *old_script_object;
+ return old_script_object;
}
@@ -1653,16 +1527,15 @@ static bool CheckActivation(Handle<JSArray> shared_info_array,
Isolate* isolate = shared_info_array->GetIsolate();
int len = GetArrayLength(shared_info_array);
for (int i = 0; i < len; i++) {
- Object* element =
- shared_info_array->GetElementNoExceptionThrown(isolate, i);
- CHECK(element->IsJSValue());
- Handle<JSValue> jsvalue(JSValue::cast(element));
+ HandleScope scope(isolate);
+ Handle<Object> element =
+ Object::GetElement(isolate, shared_info_array, i).ToHandleChecked();
+ Handle<JSValue> jsvalue = Handle<JSValue>::cast(element);
Handle<SharedFunctionInfo> shared =
UnwrapSharedFunctionInfoFromJSValue(jsvalue);
if (function->shared() == *shared || IsInlined(*function, *shared)) {
- SetElementNonStrict(result, i, Handle<Smi>(Smi::FromInt(status),
- isolate));
+ SetElementSloppy(result, i, Handle<Smi>(Smi::FromInt(status), isolate));
return true;
}
}
@@ -1691,6 +1564,38 @@ static bool FixTryCatchHandler(StackFrame* top_frame,
}
+// Initializes an artificial stack frame. The data it contains is used for:
+// a. successful work of frame dropper code which eventually gets control,
+// b. being compatible with regular stack structure for various stack
+// iterators.
+// Returns address of stack allocated pointer to restarted function,
+// the value that is called 'restarter_frame_function_pointer'. The value
+// at this address (possibly updated by GC) may be used later when preparing
+// 'step in' operation.
+// Frame structure (conforms InternalFrame structure):
+// -- code
+// -- SMI maker
+// -- function (slot is called "context")
+// -- frame base
+static Object** SetUpFrameDropperFrame(StackFrame* bottom_js_frame,
+ Handle<Code> code) {
+ ASSERT(bottom_js_frame->is_java_script());
+
+ Address fp = bottom_js_frame->fp();
+
+ // Move function pointer into "context" slot.
+ Memory::Object_at(fp + StandardFrameConstants::kContextOffset) =
+ Memory::Object_at(fp + JavaScriptFrameConstants::kFunctionOffset);
+
+ Memory::Object_at(fp + InternalFrameConstants::kCodeOffset) = *code;
+ Memory::Object_at(fp + StandardFrameConstants::kMarkerOffset) =
+ Smi::FromInt(StackFrame::INTERNAL);
+
+ return reinterpret_cast<Object**>(&Memory::Object_at(
+ fp + StandardFrameConstants::kContextOffset));
+}
+
+
// Removes specified range of frames from stack. There may be 1 or more
// frames in range. Anyway the bottom frame is restarted rather than dropped,
// and therefore has to be a JavaScript frame.
@@ -1698,9 +1603,9 @@ static bool FixTryCatchHandler(StackFrame* top_frame,
static const char* DropFrames(Vector<StackFrame*> frames,
int top_frame_index,
int bottom_js_frame_index,
- Debug::FrameDropMode* mode,
+ LiveEdit::FrameDropMode* mode,
Object*** restarter_frame_function_pointer) {
- if (!Debug::kFrameDropperSupported) {
+ if (!LiveEdit::kFrameDropperSupported) {
return "Stack manipulations are not supported in this architecture.";
}
@@ -1713,34 +1618,30 @@ static const char* DropFrames(Vector<StackFrame*> frames,
// Check the nature of the top frame.
Isolate* isolate = bottom_js_frame->isolate();
Code* pre_top_frame_code = pre_top_frame->LookupCode();
- bool frame_has_padding;
+ bool frame_has_padding = true;
if (pre_top_frame_code->is_inline_cache_stub() &&
pre_top_frame_code->is_debug_stub()) {
// OK, we can drop inline cache calls.
- *mode = Debug::FRAME_DROPPED_IN_IC_CALL;
- frame_has_padding = Debug::FramePaddingLayout::kIsSupported;
+ *mode = LiveEdit::FRAME_DROPPED_IN_IC_CALL;
} else if (pre_top_frame_code ==
- isolate->debug()->debug_break_slot()) {
+ isolate->builtins()->builtin(Builtins::kSlot_DebugBreak)) {
// OK, we can drop debug break slot.
- *mode = Debug::FRAME_DROPPED_IN_DEBUG_SLOT_CALL;
- frame_has_padding = Debug::FramePaddingLayout::kIsSupported;
+ *mode = LiveEdit::FRAME_DROPPED_IN_DEBUG_SLOT_CALL;
} else if (pre_top_frame_code ==
- isolate->builtins()->builtin(
- Builtins::kFrameDropper_LiveEdit)) {
+ isolate->builtins()->builtin(Builtins::kFrameDropper_LiveEdit)) {
// OK, we can drop our own code.
pre_top_frame = frames[top_frame_index - 2];
top_frame = frames[top_frame_index - 1];
- *mode = Debug::CURRENTLY_SET_MODE;
+ *mode = LiveEdit::CURRENTLY_SET_MODE;
frame_has_padding = false;
} else if (pre_top_frame_code ==
- isolate->builtins()->builtin(Builtins::kReturn_DebugBreak)) {
- *mode = Debug::FRAME_DROPPED_IN_RETURN_CALL;
- frame_has_padding = Debug::FramePaddingLayout::kIsSupported;
+ isolate->builtins()->builtin(Builtins::kReturn_DebugBreak)) {
+ *mode = LiveEdit::FRAME_DROPPED_IN_RETURN_CALL;
} else if (pre_top_frame_code->kind() == Code::STUB &&
pre_top_frame_code->major_key() == CodeStub::CEntry) {
// Entry from our unit tests on 'debugger' statement.
// It's fine, we support this case.
- *mode = Debug::FRAME_DROPPED_IN_DIRECT_CALL;
+ *mode = LiveEdit::FRAME_DROPPED_IN_DIRECT_CALL;
// We don't have a padding from 'debugger' statement call.
// Here the stub is CEntry, it's not debug-only and can't be padded.
// If anyone would complain, a proxy padded stub could be added.
@@ -1749,19 +1650,19 @@ static const char* DropFrames(Vector<StackFrame*> frames,
// This must be adaptor that remain from the frame dropping that
// is still on stack. A frame dropper frame must be above it.
ASSERT(frames[top_frame_index - 2]->LookupCode() ==
- isolate->builtins()->builtin(Builtins::kFrameDropper_LiveEdit));
+ isolate->builtins()->builtin(Builtins::kFrameDropper_LiveEdit));
pre_top_frame = frames[top_frame_index - 3];
top_frame = frames[top_frame_index - 2];
- *mode = Debug::CURRENTLY_SET_MODE;
+ *mode = LiveEdit::CURRENTLY_SET_MODE;
frame_has_padding = false;
} else {
return "Unknown structure of stack above changing function";
}
Address unused_stack_top = top_frame->sp();
+ int new_frame_size = LiveEdit::kFrameDropperFrameSize * kPointerSize;
Address unused_stack_bottom = bottom_js_frame->fp()
- - Debug::kFrameDropperFrameSize * kPointerSize // Size of the new frame.
- + kPointerSize; // Bigger address end is exclusive.
+ - new_frame_size + kPointerSize; // Bigger address end is exclusive.
Address* top_frame_pc_address = top_frame->pc_address();
@@ -1774,11 +1675,10 @@ static const char* DropFrames(Vector<StackFrame*> frames,
static_cast<int>(unused_stack_top - unused_stack_bottom);
Address padding_start = pre_top_frame->fp() -
- Debug::FramePaddingLayout::kFrameBaseSize * kPointerSize;
+ LiveEdit::kFrameDropperFrameSize * kPointerSize;
Address padding_pointer = padding_start;
- Smi* padding_object =
- Smi::FromInt(Debug::FramePaddingLayout::kPaddingValue);
+ Smi* padding_object = Smi::FromInt(LiveEdit::kFramePaddingValue);
while (Memory::Object_at(padding_pointer) == padding_object) {
padding_pointer -= kPointerSize;
}
@@ -1793,9 +1693,9 @@ static const char* DropFrames(Vector<StackFrame*> frames,
StackFrame* pre_pre_frame = frames[top_frame_index - 2];
- OS::MemMove(padding_start + kPointerSize - shortage_bytes,
- padding_start + kPointerSize,
- Debug::FramePaddingLayout::kFrameBaseSize * kPointerSize);
+ MemMove(padding_start + kPointerSize - shortage_bytes,
+ padding_start + kPointerSize,
+ LiveEdit::kFrameDropperFrameSize * kPointerSize);
pre_top_frame->UpdateFp(pre_top_frame->fp() - shortage_bytes);
pre_pre_frame->SetCallerFp(pre_top_frame->fp());
@@ -1819,7 +1719,7 @@ static const char* DropFrames(Vector<StackFrame*> frames,
pre_top_frame->SetCallerFp(bottom_js_frame->fp());
*restarter_frame_function_pointer =
- Debug::SetUpFrameDropperFrame(bottom_js_frame, code);
+ SetUpFrameDropperFrame(bottom_js_frame, code);
ASSERT((**restarter_frame_function_pointer)->IsJSFunction());
@@ -1833,11 +1733,6 @@ static const char* DropFrames(Vector<StackFrame*> frames,
}
-static bool IsDropableFrame(StackFrame* frame) {
- return !frame->is_exit();
-}
-
-
// Describes a set of call frames that execute any of listed functions.
// Finding no such frames does not mean error.
class MultipleFunctionTarget {
@@ -1850,7 +1745,7 @@ class MultipleFunctionTarget {
LiveEdit::FunctionPatchabilityStatus status) {
return CheckActivation(m_shared_info_array, m_result, frame, status);
}
- const char* GetNotFoundMessage() {
+ const char* GetNotFoundMessage() const {
return NULL;
}
private:
@@ -1862,7 +1757,9 @@ class MultipleFunctionTarget {
// Drops all call frame matched by target and all frames above them.
template<typename TARGET>
static const char* DropActivationsInActiveThreadImpl(
- Isolate* isolate, TARGET& target, bool do_drop) {
+ Isolate* isolate,
+ TARGET& target, // NOLINT
+ bool do_drop) {
Debug* debug = isolate->debug();
Zone zone(isolate);
Vector<StackFrame*> frames = CreateStackMap(isolate, &zone);
@@ -1891,12 +1788,20 @@ static const char* DropActivationsInActiveThreadImpl(
bool target_frame_found = false;
int bottom_js_frame_index = top_frame_index;
- bool c_code_found = false;
+ bool non_droppable_frame_found = false;
+ LiveEdit::FunctionPatchabilityStatus non_droppable_reason;
for (; frame_index < frames.length(); frame_index++) {
StackFrame* frame = frames[frame_index];
- if (!IsDropableFrame(frame)) {
- c_code_found = true;
+ if (frame->is_exit()) {
+ non_droppable_frame_found = true;
+ non_droppable_reason = LiveEdit::FUNCTION_BLOCKED_UNDER_NATIVE_CODE;
+ break;
+ }
+ if (frame->is_java_script() &&
+ JavaScriptFrame::cast(frame)->function()->shared()->is_generator()) {
+ non_droppable_frame_found = true;
+ non_droppable_reason = LiveEdit::FUNCTION_BLOCKED_UNDER_GENERATOR;
break;
}
if (target.MatchActivation(
@@ -1906,15 +1811,15 @@ static const char* DropActivationsInActiveThreadImpl(
}
}
- if (c_code_found) {
- // There is a C frames on stack. Check that there are no target frames
- // below them.
+ if (non_droppable_frame_found) {
+ // There is a C or generator frame on stack. We can't drop C frames, and we
+ // can't restart generators. Check that there are no target frames below
+ // them.
for (; frame_index < frames.length(); frame_index++) {
StackFrame* frame = frames[frame_index];
if (frame->is_java_script()) {
- if (target.MatchActivation(
- frame, LiveEdit::FUNCTION_BLOCKED_UNDER_NATIVE_CODE)) {
- // Cannot drop frame under C frames.
+ if (target.MatchActivation(frame, non_droppable_reason)) {
+ // Fail.
return NULL;
}
}
@@ -1931,7 +1836,7 @@ static const char* DropActivationsInActiveThreadImpl(
return target.GetNotFoundMessage();
}
- Debug::FrameDropMode drop_mode = Debug::FRAMES_UNTOUCHED;
+ LiveEdit::FrameDropMode drop_mode = LiveEdit::FRAMES_UNTOUCHED;
Object** restarter_frame_function_pointer = NULL;
const char* error_message = DropFrames(frames, top_frame_index,
bottom_js_frame_index, &drop_mode,
@@ -1949,8 +1854,8 @@ static const char* DropActivationsInActiveThreadImpl(
break;
}
}
- debug->FramesHaveBeenDropped(new_id, drop_mode,
- restarter_frame_function_pointer);
+ debug->FramesHaveBeenDropped(
+ new_id, drop_mode, restarter_frame_function_pointer);
return NULL;
}
@@ -1972,17 +1877,56 @@ static const char* DropActivationsInActiveThread(
// Replace "blocked on active" with "replaced on active" status.
for (int i = 0; i < array_len; i++) {
- if (result->GetElement(result->GetIsolate(), i) ==
- Smi::FromInt(LiveEdit::FUNCTION_BLOCKED_ON_ACTIVE_STACK)) {
+ Handle<Object> obj =
+ Object::GetElement(isolate, result, i).ToHandleChecked();
+ if (*obj == Smi::FromInt(LiveEdit::FUNCTION_BLOCKED_ON_ACTIVE_STACK)) {
Handle<Object> replaced(
Smi::FromInt(LiveEdit::FUNCTION_REPLACED_ON_ACTIVE_STACK), isolate);
- SetElementNonStrict(result, i, replaced);
+ SetElementSloppy(result, i, replaced);
}
}
return NULL;
}
+bool LiveEdit::FindActiveGenerators(Handle<FixedArray> shared_info_array,
+ Handle<FixedArray> result,
+ int len) {
+ Isolate* isolate = shared_info_array->GetIsolate();
+ bool found_suspended_activations = false;
+
+ ASSERT_LE(len, result->length());
+
+ FunctionPatchabilityStatus active = FUNCTION_BLOCKED_ACTIVE_GENERATOR;
+
+ Heap* heap = isolate->heap();
+ HeapIterator iterator(heap);
+ HeapObject* obj = NULL;
+ while ((obj = iterator.next()) != NULL) {
+ if (!obj->IsJSGeneratorObject()) continue;
+
+ JSGeneratorObject* gen = JSGeneratorObject::cast(obj);
+ if (gen->is_closed()) continue;
+
+ HandleScope scope(isolate);
+
+ for (int i = 0; i < len; i++) {
+ Handle<JSValue> jsvalue =
+ Handle<JSValue>::cast(FixedArray::get(shared_info_array, i));
+ Handle<SharedFunctionInfo> shared =
+ UnwrapSharedFunctionInfoFromJSValue(jsvalue);
+
+ if (gen->function()->shared() == *shared) {
+ result->set(i, Smi::FromInt(active));
+ found_suspended_activations = true;
+ }
+ }
+ }
+
+ return found_suspended_activations;
+}
+
+
class InactiveThreadActivationsChecker : public ThreadVisitor {
public:
InactiveThreadActivationsChecker(Handle<JSArray> shared_info_array,
@@ -2013,18 +1957,29 @@ Handle<JSArray> LiveEdit::CheckAndDropActivations(
Isolate* isolate = shared_info_array->GetIsolate();
int len = GetArrayLength(shared_info_array);
+ CHECK(shared_info_array->HasFastElements());
+ Handle<FixedArray> shared_info_array_elements(
+ FixedArray::cast(shared_info_array->elements()));
+
Handle<JSArray> result = isolate->factory()->NewJSArray(len);
+ Handle<FixedArray> result_elements =
+ JSObject::EnsureWritableFastElements(result);
// Fill the default values.
for (int i = 0; i < len; i++) {
- SetElementNonStrict(
- result,
- i,
- Handle<Smi>(Smi::FromInt(FUNCTION_AVAILABLE_FOR_PATCH), isolate));
+ FunctionPatchabilityStatus status = FUNCTION_AVAILABLE_FOR_PATCH;
+ result_elements->set(i, Smi::FromInt(status));
}
+ // Scan the heap for active generators -- those that are either currently
+ // running (as we wouldn't want to restart them, because we don't know where
+ // to restart them from) or suspended. Fail if any one corresponds to the set
+ // of functions being edited.
+ if (FindActiveGenerators(shared_info_array_elements, result_elements, len)) {
+ return result;
+ }
- // First check inactive threads. Fail if some functions are blocked there.
+ // Check inactive threads. Fail if some functions are blocked there.
InactiveThreadActivationsChecker inactive_threads_checker(shared_info_array,
result);
isolate->thread_manager()->IterateArchivedThreads(
@@ -2038,9 +1993,9 @@ Handle<JSArray> LiveEdit::CheckAndDropActivations(
DropActivationsInActiveThread(shared_info_array, result, do_drop);
if (error_message != NULL) {
// Add error message as an array extra element.
- Vector<const char> vector_message(error_message, StrLength(error_message));
- Handle<String> str = isolate->factory()->NewStringFromAscii(vector_message);
- SetElementNonStrict(result, len, str);
+ Handle<String> str =
+ isolate->factory()->NewStringFromAsciiChecked(error_message);
+ SetElementSloppy(result, len, str);
}
return result;
}
@@ -2062,7 +2017,7 @@ class SingleFrameTarget {
}
return false;
}
- const char* GetNotFoundMessage() {
+ const char* GetNotFoundMessage() const {
return "Failed to found requested frame";
}
LiveEdit::FunctionPatchabilityStatus saved_status() {
@@ -2087,6 +2042,9 @@ const char* LiveEdit::RestartFrame(JavaScriptFrame* frame) {
if (target.saved_status() == LiveEdit::FUNCTION_BLOCKED_UNDER_NATIVE_CODE) {
return "Function is blocked under native code";
}
+ if (target.saved_status() == LiveEdit::FUNCTION_BLOCKED_UNDER_GENERATOR) {
+ return "Function is blocked under a generator activation";
+ }
return NULL;
}
@@ -2126,36 +2084,4 @@ bool LiveEditFunctionTracker::IsActive(Isolate* isolate) {
return isolate->active_function_info_listener() != NULL;
}
-
-#else // ENABLE_DEBUGGER_SUPPORT
-
-// This ifdef-else-endif section provides working or stub implementation of
-// LiveEditFunctionTracker.
-LiveEditFunctionTracker::LiveEditFunctionTracker(Isolate* isolate,
- FunctionLiteral* fun) {
-}
-
-
-LiveEditFunctionTracker::~LiveEditFunctionTracker() {
-}
-
-
-void LiveEditFunctionTracker::RecordFunctionInfo(
- Handle<SharedFunctionInfo> info, FunctionLiteral* lit,
- Zone* zone) {
-}
-
-
-void LiveEditFunctionTracker::RecordRootFunctionInfo(Handle<Code> code) {
-}
-
-
-bool LiveEditFunctionTracker::IsActive(Isolate* isolate) {
- return false;
-}
-
-#endif // ENABLE_DEBUGGER_SUPPORT
-
-
-
} } // namespace v8::internal
diff --git a/chromium/v8/src/liveedit.h b/chromium/v8/src/liveedit.h
index 0efbb95cc0b..3465d886d77 100644
--- a/chromium/v8/src/liveedit.h
+++ b/chromium/v8/src/liveedit.h
@@ -1,29 +1,6 @@
// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
#ifndef V8_LIVEEDIT_H_
#define V8_LIVEEDIT_H_
@@ -49,8 +26,8 @@
// instantiate newly compiled functions.
-#include "allocation.h"
-#include "compiler.h"
+#include "src/allocation.h"
+#include "src/compiler.h"
namespace v8 {
namespace internal {
@@ -75,39 +52,56 @@ class LiveEditFunctionTracker {
static bool IsActive(Isolate* isolate);
private:
-#ifdef ENABLE_DEBUGGER_SUPPORT
Isolate* isolate_;
-#endif
};
-#ifdef ENABLE_DEBUGGER_SUPPORT
class LiveEdit : AllStatic {
public:
- static JSArray* GatherCompileInfo(Handle<Script> script,
- Handle<String> source);
+ // Describes how exactly a frame has been dropped from stack.
+ enum FrameDropMode {
+ // No frame has been dropped.
+ FRAMES_UNTOUCHED,
+ // The top JS frame had been calling IC stub. IC stub mustn't be called now.
+ FRAME_DROPPED_IN_IC_CALL,
+ // The top JS frame had been calling debug break slot stub. Patch the
+ // address this stub jumps to in the end.
+ FRAME_DROPPED_IN_DEBUG_SLOT_CALL,
+ // The top JS frame had been calling some C++ function. The return address
+ // gets patched automatically.
+ FRAME_DROPPED_IN_DIRECT_CALL,
+ FRAME_DROPPED_IN_RETURN_CALL,
+ CURRENTLY_SET_MODE
+ };
+
+ static void InitializeThreadLocal(Debug* debug);
+
+ static bool SetAfterBreakTarget(Debug* debug);
+
+ MUST_USE_RESULT static MaybeHandle<JSArray> GatherCompileInfo(
+ Handle<Script> script,
+ Handle<String> source);
static void WrapSharedFunctionInfos(Handle<JSArray> array);
- MUST_USE_RESULT static MaybeObject* ReplaceFunctionCode(
- Handle<JSArray> new_compile_info_array,
- Handle<JSArray> shared_info_array);
+ static void ReplaceFunctionCode(Handle<JSArray> new_compile_info_array,
+ Handle<JSArray> shared_info_array);
- static MaybeObject* FunctionSourceUpdated(Handle<JSArray> shared_info_array);
+ static void FunctionSourceUpdated(Handle<JSArray> shared_info_array);
// Updates script field in FunctionSharedInfo.
static void SetFunctionScript(Handle<JSValue> function_wrapper,
Handle<Object> script_handle);
- MUST_USE_RESULT static MaybeObject* PatchFunctionPositions(
- Handle<JSArray> shared_info_array, Handle<JSArray> position_change_array);
+ static void PatchFunctionPositions(Handle<JSArray> shared_info_array,
+ Handle<JSArray> position_change_array);
// For a script updates its source field. If old_script_name is provided
// (i.e. is a String), also creates a copy of the script with its original
// source and sends notification to debugger.
- static Object* ChangeScriptSource(Handle<Script> original_script,
- Handle<String> new_source,
- Handle<Object> old_script_name);
+ static Handle<Object> ChangeScriptSource(Handle<Script> original_script,
+ Handle<String> new_source,
+ Handle<Object> old_script_name);
// In a code of a parent function replaces original function as embedded
// object with a substitution one.
@@ -115,6 +109,11 @@ class LiveEdit : AllStatic {
Handle<JSValue> orig_function_shared,
Handle<JSValue> subst_function_shared);
+ // Find open generator activations, and set corresponding "result" elements to
+ // FUNCTION_BLOCKED_ACTIVE_GENERATOR.
+ static bool FindActiveGenerators(Handle<FixedArray> shared_info_array,
+ Handle<FixedArray> result, int len);
+
// Checks listed functions on stack and return array with corresponding
// FunctionPatchabilityStatus statuses; extra array element may
// contain general error message. Modifies the current stack and
@@ -133,7 +132,9 @@ class LiveEdit : AllStatic {
FUNCTION_BLOCKED_ON_ACTIVE_STACK = 2,
FUNCTION_BLOCKED_ON_OTHER_STACK = 3,
FUNCTION_BLOCKED_UNDER_NATIVE_CODE = 4,
- FUNCTION_REPLACED_ON_ACTIVE_STACK = 5
+ FUNCTION_REPLACED_ON_ACTIVE_STACK = 5,
+ FUNCTION_BLOCKED_UNDER_GENERATOR = 6,
+ FUNCTION_BLOCKED_ACTIVE_GENERATOR = 7
};
// Compares 2 strings line-by-line, then token-wise and returns diff in form
@@ -141,6 +142,46 @@ class LiveEdit : AllStatic {
// of diff chunks.
static Handle<JSArray> CompareStrings(Handle<String> s1,
Handle<String> s2);
+
+ // Architecture-specific constant.
+ static const bool kFrameDropperSupported;
+
+ /**
+ * Defines layout of a stack frame that supports padding. This is a regular
+ * internal frame that has a flexible stack structure. LiveEdit can shift
+ * its lower part up the stack, taking up the 'padding' space when additional
+ * stack memory is required.
+ * Such frame is expected immediately above the topmost JavaScript frame.
+ *
+ * Stack Layout:
+ * --- Top
+ * LiveEdit routine frames
+ * ---
+ * C frames of debug handler
+ * ---
+ * ...
+ * ---
+ * An internal frame that has n padding words:
+ * - any number of words as needed by code -- upper part of frame
+ * - padding size: a Smi storing n -- current size of padding
+ * - padding: n words filled with kPaddingValue in form of Smi
+ * - 3 context/type words of a regular InternalFrame
+ * - fp
+ * ---
+ * Topmost JavaScript frame
+ * ---
+ * ...
+ * --- Bottom
+ */
+ // A size of frame base including fp. Padding words starts right above
+ // the base.
+ static const int kFrameDropperFrameSize = 4;
+ // A number of words that should be reserved on stack for the LiveEdit use.
+ // Stored on stack in form of Smi.
+ static const int kFramePaddingInitialSize = 1;
+ // A value that padding words are filled with (in form of Smi). Going
+ // bottom-top, the first word not having this value is a counter word.
+ static const int kFramePaddingValue = kFramePaddingInitialSize + 1;
};
@@ -175,9 +216,165 @@ class Comparator {
Output* result_writer);
};
-#endif // ENABLE_DEBUGGER_SUPPORT
+// Simple helper class that creates more or less typed structures over
+// JSArray object. This is an adhoc method of passing structures from C++
+// to JavaScript.
+template<typename S>
+class JSArrayBasedStruct {
+ public:
+ static S Create(Isolate* isolate) {
+ Factory* factory = isolate->factory();
+ Handle<JSArray> array = factory->NewJSArray(S::kSize_);
+ return S(array);
+ }
+
+ static S cast(Object* object) {
+ JSArray* array = JSArray::cast(object);
+ Handle<JSArray> array_handle(array);
+ return S(array_handle);
+ }
+
+ explicit JSArrayBasedStruct(Handle<JSArray> array) : array_(array) {
+ }
+
+ Handle<JSArray> GetJSArray() {
+ return array_;
+ }
+
+ Isolate* isolate() const {
+ return array_->GetIsolate();
+ }
+
+ protected:
+ void SetField(int field_position, Handle<Object> value) {
+ JSObject::SetElement(array_, field_position, value, NONE, SLOPPY).Assert();
+ }
+
+ void SetSmiValueField(int field_position, int value) {
+ SetField(field_position, Handle<Smi>(Smi::FromInt(value), isolate()));
+ }
+
+ Handle<Object> GetField(int field_position) {
+ return Object::GetElement(
+ isolate(), array_, field_position).ToHandleChecked();
+ }
+
+ int GetSmiValueField(int field_position) {
+ Handle<Object> res = GetField(field_position);
+ return Handle<Smi>::cast(res)->value();
+ }
+
+ private:
+ Handle<JSArray> array_;
+};
+
+
+// Represents some function compilation details. This structure will be used
+// from JavaScript. It contains Code object, which is kept wrapped
+// into a BlindReference for sanitizing reasons.
+class FunctionInfoWrapper : public JSArrayBasedStruct<FunctionInfoWrapper> {
+ public:
+ explicit FunctionInfoWrapper(Handle<JSArray> array)
+ : JSArrayBasedStruct<FunctionInfoWrapper>(array) {
+ }
+
+ void SetInitialProperties(Handle<String> name,
+ int start_position,
+ int end_position,
+ int param_num,
+ int literal_count,
+ int slot_count,
+ int parent_index);
+
+ void SetFunctionCode(Handle<Code> function_code,
+ Handle<HeapObject> code_scope_info);
+
+ void SetFunctionScopeInfo(Handle<Object> scope_info_array) {
+ this->SetField(kFunctionScopeInfoOffset_, scope_info_array);
+ }
+
+ void SetSharedFunctionInfo(Handle<SharedFunctionInfo> info);
+
+ int GetLiteralCount() {
+ return this->GetSmiValueField(kLiteralNumOffset_);
+ }
+
+ int GetParentIndex() {
+ return this->GetSmiValueField(kParentIndexOffset_);
+ }
+
+ Handle<Code> GetFunctionCode();
+
+ Handle<FixedArray> GetFeedbackVector();
+
+ Handle<Object> GetCodeScopeInfo();
+
+ int GetStartPosition() {
+ return this->GetSmiValueField(kStartPositionOffset_);
+ }
+
+ int GetEndPosition() { return this->GetSmiValueField(kEndPositionOffset_); }
+
+ int GetSlotCount() {
+ return this->GetSmiValueField(kSlotNumOffset_);
+ }
+
+ private:
+ static const int kFunctionNameOffset_ = 0;
+ static const int kStartPositionOffset_ = 1;
+ static const int kEndPositionOffset_ = 2;
+ static const int kParamNumOffset_ = 3;
+ static const int kCodeOffset_ = 4;
+ static const int kCodeScopeInfoOffset_ = 5;
+ static const int kFunctionScopeInfoOffset_ = 6;
+ static const int kParentIndexOffset_ = 7;
+ static const int kSharedFunctionInfoOffset_ = 8;
+ static const int kLiteralNumOffset_ = 9;
+ static const int kSlotNumOffset_ = 10;
+ static const int kSize_ = 11;
+
+ friend class JSArrayBasedStruct<FunctionInfoWrapper>;
+};
+
+
+// Wraps SharedFunctionInfo along with some of its fields for passing it
+// back to JavaScript. SharedFunctionInfo object itself is additionally
+// wrapped into BlindReference for sanitizing reasons.
+class SharedInfoWrapper : public JSArrayBasedStruct<SharedInfoWrapper> {
+ public:
+ static bool IsInstance(Handle<JSArray> array) {
+ if (array->length() != Smi::FromInt(kSize_)) return false;
+ Handle<Object> element(
+ Object::GetElement(array->GetIsolate(),
+ array,
+ kSharedInfoOffset_).ToHandleChecked());
+ if (!element->IsJSValue()) return false;
+ return Handle<JSValue>::cast(element)->value()->IsSharedFunctionInfo();
+ }
+
+ explicit SharedInfoWrapper(Handle<JSArray> array)
+ : JSArrayBasedStruct<SharedInfoWrapper>(array) {
+ }
+
+ void SetProperties(Handle<String> name,
+ int start_position,
+ int end_position,
+ Handle<SharedFunctionInfo> info);
+
+ Handle<SharedFunctionInfo> GetInfo();
+
+ private:
+ static const int kFunctionNameOffset_ = 0;
+ static const int kStartPositionOffset_ = 1;
+ static const int kEndPositionOffset_ = 2;
+ static const int kSharedInfoOffset_ = 3;
+ static const int kSize_ = 4;
+
+ friend class JSArrayBasedStruct<SharedInfoWrapper>;
+};
+
} } // namespace v8::internal
#endif /* V*_LIVEEDIT_H_ */
diff --git a/chromium/v8/src/log-inl.h b/chromium/v8/src/log-inl.h
index 7f653cb7283..28677ad235d 100644
--- a/chromium/v8/src/log-inl.h
+++ b/chromium/v8/src/log-inl.h
@@ -1,34 +1,11 @@
// Copyright 2006-2009 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
#ifndef V8_LOG_INL_H_
#define V8_LOG_INL_H_
-#include "log.h"
+#include "src/log.h"
namespace v8 {
namespace internal {
diff --git a/chromium/v8/src/log-utils.cc b/chromium/v8/src/log-utils.cc
index 909d4a51396..4598b81199c 100644
--- a/chromium/v8/src/log-utils.cc
+++ b/chromium/v8/src/log-utils.cc
@@ -1,34 +1,11 @@
// Copyright 2009 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#include "log-utils.h"
-#include "string-stream.h"
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#include "src/log-utils.h"
+#include "src/string-stream.h"
namespace v8 {
namespace internal {
@@ -51,7 +28,6 @@ void Log::Initialize(const char* log_file_name) {
// --log-all enables all the log flags.
if (FLAG_log_all) {
- FLAG_log_runtime = true;
FLAG_log_api = true;
FLAG_log_code = true;
FLAG_log_gc = true;
@@ -136,7 +112,7 @@ void Log::MessageBuilder::Append(const char* format, ...) {
void Log::MessageBuilder::AppendVA(const char* format, va_list args) {
Vector<char> buf(log_->message_buffer_ + pos_,
Log::kMessageBufferSize - pos_);
- int result = v8::internal::OS::VSNPrintF(buf, format, args);
+ int result = v8::internal::VSNPrintF(buf, format, args);
// Result is -1 if output was truncated.
if (result >= 0) {
@@ -235,7 +211,7 @@ void Log::MessageBuilder::AppendStringPart(const char* str, int len) {
}
Vector<char> buf(log_->message_buffer_ + pos_,
Log::kMessageBufferSize - pos_);
- OS::StrNCpy(buf, str, len);
+ StrNCpy(buf, str, len);
pos_ += len;
ASSERT(pos_ <= Log::kMessageBufferSize);
}
diff --git a/chromium/v8/src/log-utils.h b/chromium/v8/src/log-utils.h
index f1a21e2cc15..c5e0a0ca5f6 100644
--- a/chromium/v8/src/log-utils.h
+++ b/chromium/v8/src/log-utils.h
@@ -1,34 +1,11 @@
// Copyright 2006-2009 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
#ifndef V8_LOG_UTILS_H_
#define V8_LOG_UTILS_H_
-#include "allocation.h"
+#include "src/allocation.h"
namespace v8 {
namespace internal {
@@ -45,10 +22,10 @@ class Log {
void stop() { is_stopped_ = true; }
static bool InitLogAtStart() {
- return FLAG_log || FLAG_log_runtime || FLAG_log_api
- || FLAG_log_code || FLAG_log_gc || FLAG_log_handles || FLAG_log_suspect
- || FLAG_log_regexp || FLAG_ll_prof || FLAG_perf_basic_prof
- || FLAG_perf_jit_prof || FLAG_log_internal_timer_events;
+ return FLAG_log || FLAG_log_api || FLAG_log_code || FLAG_log_gc
+ || FLAG_log_handles || FLAG_log_suspect || FLAG_log_regexp
+ || FLAG_ll_prof || FLAG_perf_basic_prof || FLAG_perf_jit_prof
+ || FLAG_log_internal_timer_events;
}
// Frees all resources acquired in Initialize and Open... functions.
diff --git a/chromium/v8/src/log.cc b/chromium/v8/src/log.cc
index a508e8739ea..e8af5d0ff4a 100644
--- a/chromium/v8/src/log.cc
+++ b/chromium/v8/src/log.cc
@@ -1,47 +1,24 @@
// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
#include <stdarg.h>
-#include "v8.h"
-
-#include "bootstrapper.h"
-#include "code-stubs.h"
-#include "cpu-profiler.h"
-#include "deoptimizer.h"
-#include "global-handles.h"
-#include "log.h"
-#include "log-utils.h"
-#include "macro-assembler.h"
-#include "platform.h"
-#include "runtime-profiler.h"
-#include "serialize.h"
-#include "string-stream.h"
-#include "vm-state-inl.h"
+#include "src/v8.h"
+
+#include "src/bootstrapper.h"
+#include "src/code-stubs.h"
+#include "src/cpu-profiler.h"
+#include "src/deoptimizer.h"
+#include "src/global-handles.h"
+#include "src/log.h"
+#include "src/log-utils.h"
+#include "src/macro-assembler.h"
+#include "src/platform.h"
+#include "src/runtime-profiler.h"
+#include "src/serialize.h"
+#include "src/string-stream.h"
+#include "src/vm-state-inl.h"
namespace v8 {
namespace internal {
@@ -129,7 +106,7 @@ class CodeEventLogger::NameBuffer {
void AppendBytes(const char* bytes, int size) {
size = Min(size, kUtf8BufferSize - utf8_pos_);
- OS::MemCopy(utf8_buffer_ + utf8_pos_, bytes, size);
+ MemCopy(utf8_buffer_ + utf8_pos_, bytes, size);
utf8_pos_ += size;
}
@@ -145,7 +122,7 @@ class CodeEventLogger::NameBuffer {
void AppendInt(int n) {
Vector<char> buffer(utf8_buffer_ + utf8_pos_,
kUtf8BufferSize - utf8_pos_);
- int size = OS::SNPrintF(buffer, "%d", n);
+ int size = SNPrintF(buffer, "%d", n);
if (size > 0 && utf8_pos_ + size <= kUtf8BufferSize) {
utf8_pos_ += size;
}
@@ -154,7 +131,7 @@ class CodeEventLogger::NameBuffer {
void AppendHex(uint32_t n) {
Vector<char> buffer(utf8_buffer_ + utf8_pos_,
kUtf8BufferSize - utf8_pos_);
- int size = OS::SNPrintF(buffer, "%x", n);
+ int size = SNPrintF(buffer, "%x", n);
if (size > 0 && utf8_pos_ + size <= kUtf8BufferSize) {
utf8_pos_ += size;
}
@@ -253,6 +230,7 @@ class PerfBasicLogger : public CodeEventLogger {
virtual ~PerfBasicLogger();
virtual void CodeMoveEvent(Address from, Address to) { }
+ virtual void CodeDisableOptEvent(Code* code, SharedFunctionInfo* shared) { }
virtual void CodeDeleteEvent(Address from) { }
private:
@@ -281,7 +259,7 @@ PerfBasicLogger::PerfBasicLogger()
// Open the perf JIT dump file.
int bufferSize = sizeof(kFilenameFormatString) + kFilenameBufferPadding;
ScopedVector<char> perf_dump_name(bufferSize);
- int size = OS::SNPrintF(
+ int size = SNPrintF(
perf_dump_name,
kFilenameFormatString,
OS::GetCurrentProcessId());
@@ -318,6 +296,7 @@ class PerfJitLogger : public CodeEventLogger {
virtual ~PerfJitLogger();
virtual void CodeMoveEvent(Address from, Address to) { }
+ virtual void CodeDisableOptEvent(Code* code, SharedFunctionInfo* shared) { }
virtual void CodeDeleteEvent(Address from) { }
private:
@@ -343,6 +322,7 @@ class PerfJitLogger : public CodeEventLogger {
static const uint32_t kElfMachX64 = 62;
static const uint32_t kElfMachARM = 40;
static const uint32_t kElfMachMIPS = 10;
+ static const uint32_t kElfMachX87 = 3;
struct jitheader {
uint32_t magic;
@@ -382,6 +362,8 @@ class PerfJitLogger : public CodeEventLogger {
return kElfMachARM;
#elif V8_TARGET_ARCH_MIPS
return kElfMachMIPS;
+#elif V8_TARGET_ARCH_X87
+ return kElfMachX87;
#else
UNIMPLEMENTED();
return 0;
@@ -401,7 +383,7 @@ PerfJitLogger::PerfJitLogger()
// Open the perf JIT dump file.
int bufferSize = sizeof(kFilenameFormatString) + kFilenameBufferPadding;
ScopedVector<char> perf_dump_name(bufferSize);
- int size = OS::SNPrintF(
+ int size = SNPrintF(
perf_dump_name,
kFilenameFormatString,
OS::GetCurrentProcessId());
@@ -480,6 +462,7 @@ class LowLevelLogger : public CodeEventLogger {
virtual ~LowLevelLogger();
virtual void CodeMoveEvent(Address from, Address to);
+ virtual void CodeDisableOptEvent(Code* code, SharedFunctionInfo* shared) { }
virtual void CodeDeleteEvent(Address from);
virtual void SnapshotPositionEvent(Address addr, int pos);
virtual void CodeMovingGCEvent();
@@ -553,8 +536,8 @@ LowLevelLogger::LowLevelLogger(const char* name)
// Open the low-level log file.
size_t len = strlen(name);
ScopedVector<char> ll_name(static_cast<int>(len + sizeof(kLogExt)));
- OS::MemCopy(ll_name.start(), name, len);
- OS::MemCopy(ll_name.start() + len, kLogExt, sizeof(kLogExt));
+ MemCopy(ll_name.start(), name, len);
+ MemCopy(ll_name.start() + len, kLogExt, sizeof(kLogExt));
ll_output_handle_ = OS::FOpen(ll_name.start(), OS::LogFileOpenMode);
setvbuf(ll_output_handle_, NULL, _IOFBF, kLogBufferSize);
@@ -577,6 +560,8 @@ void LowLevelLogger::LogCodeInfo() {
const char arch[] = "arm";
#elif V8_TARGET_ARCH_MIPS
const char arch[] = "mips";
+#elif V8_TARGET_ARCH_X87
+ const char arch[] = "x87";
#else
const char arch[] = "unknown";
#endif
@@ -646,6 +631,7 @@ class JitLogger : public CodeEventLogger {
explicit JitLogger(JitCodeEventHandler code_event_handler);
virtual void CodeMoveEvent(Address from, Address to);
+ virtual void CodeDisableOptEvent(Code* code, SharedFunctionInfo* shared) { }
virtual void CodeDeleteEvent(Address from);
virtual void AddCodeLinePosInfoEvent(
void* jit_handler_data,
@@ -881,7 +867,12 @@ void Profiler::Engage() {
if (engaged_) return;
engaged_ = true;
- OS::LogSharedLibraryAddresses(isolate_);
+ std::vector<OS::SharedLibraryAddress> addresses =
+ OS::GetSharedLibraryAddresses();
+ for (size_t i = 0; i < addresses.size(); ++i) {
+ LOG(isolate_, SharedLibraryEvent(
+ addresses[i].library_path, addresses[i].start, addresses[i].end));
+ }
// Start thread processing the profiler buffer.
running_ = true;
@@ -1041,7 +1032,7 @@ void Logger::ApiNamedSecurityCheck(Object* key) {
if (key->IsString()) {
SmartArrayPointer<char> str =
String::cast(key)->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL);
- ApiEvent("api,check-security,\"%s\"\n", *str);
+ ApiEvent("api,check-security,\"%s\"\n", str.get());
} else if (key->IsSymbol()) {
Symbol* symbol = Symbol::cast(key);
if (symbol->name()->IsUndefined()) {
@@ -1051,7 +1042,7 @@ void Logger::ApiNamedSecurityCheck(Object* key) {
SmartArrayPointer<char> str = String::cast(symbol->name())->ToCString(
DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL);
ApiEvent("api,check-security,symbol(\"%s\" hash %x)\n",
- *str,
+ str.get(),
Symbol::cast(key)->Hash());
}
} else if (key->IsUndefined()) {
@@ -1062,38 +1053,35 @@ void Logger::ApiNamedSecurityCheck(Object* key) {
}
-void Logger::SharedLibraryEvent(const char* library_path,
+void Logger::SharedLibraryEvent(const std::string& library_path,
uintptr_t start,
uintptr_t end) {
if (!log_->IsEnabled() || !FLAG_prof) return;
Log::MessageBuilder msg(log_);
msg.Append("shared-library,\"%s\",0x%08" V8PRIxPTR ",0x%08" V8PRIxPTR "\n",
- library_path,
+ library_path.c_str(),
start,
end);
msg.WriteToLogFile();
}
-void Logger::SharedLibraryEvent(const wchar_t* library_path,
- uintptr_t start,
- uintptr_t end) {
- if (!log_->IsEnabled() || !FLAG_prof) return;
+void Logger::CodeDeoptEvent(Code* code) {
+ if (!log_->IsEnabled()) return;
+ ASSERT(FLAG_log_internal_timer_events);
Log::MessageBuilder msg(log_);
- msg.Append("shared-library,\"%ls\",0x%08" V8PRIxPTR ",0x%08" V8PRIxPTR "\n",
- library_path,
- start,
- end);
+ int since_epoch = static_cast<int>(timer_.Elapsed().InMicroseconds());
+ msg.Append("code-deopt,%ld,%d\n", since_epoch, code->CodeSize());
msg.WriteToLogFile();
}
-void Logger::CodeDeoptEvent(Code* code) {
+void Logger::CurrentTimeEvent() {
if (!log_->IsEnabled()) return;
ASSERT(FLAG_log_internal_timer_events);
Log::MessageBuilder msg(log_);
int since_epoch = static_cast<int>(timer_.Elapsed().InMicroseconds());
- msg.Append("code-deopt,%ld,%d\n", since_epoch, code->CodeSize());
+ msg.Append("current-time,%ld\n", since_epoch);
msg.WriteToLogFile();
}
@@ -1124,8 +1112,14 @@ void Logger::LeaveExternal(Isolate* isolate) {
}
+void Logger::LogInternalEvents(const char* name, int se) {
+ Isolate* isolate = Isolate::Current();
+ LOG(isolate, TimerEvent(static_cast<StartEnd>(se), name));
+}
+
+
void Logger::TimerEventScope::LogTimerEvent(StartEnd se) {
- LOG(isolate_, TimerEvent(se, name_));
+ isolate_->event_logger()(name_, se);
}
@@ -1137,6 +1131,7 @@ const char* Logger::TimerEventScope::v8_compile_full_code =
"V8.CompileFullCode";
const char* Logger::TimerEventScope::v8_execute = "V8.Execute";
const char* Logger::TimerEventScope::v8_external = "V8.External";
+const char* Logger::TimerEventScope::v8_ic_miss = "V8.IcMiss";
void Logger::LogRegExpSource(Handle<JSRegExp> regexp) {
@@ -1144,7 +1139,8 @@ void Logger::LogRegExpSource(Handle<JSRegExp> regexp) {
// (re.global?"g":"") + (re.ignorecase?"i":"") + (re.multiline?"m":"")
Log::MessageBuilder msg(log_);
- Handle<Object> source = GetProperty(regexp, "source");
+ Handle<Object> source = Object::GetProperty(
+ isolate_, regexp, "source").ToHandleChecked();
if (!source->IsString()) {
msg.Append("no source");
return;
@@ -1162,17 +1158,20 @@ void Logger::LogRegExpSource(Handle<JSRegExp> regexp) {
msg.Append('/');
// global flag
- Handle<Object> global = GetProperty(regexp, "global");
+ Handle<Object> global = Object::GetProperty(
+ isolate_, regexp, "global").ToHandleChecked();
if (global->IsTrue()) {
msg.Append('g');
}
// ignorecase flag
- Handle<Object> ignorecase = GetProperty(regexp, "ignoreCase");
+ Handle<Object> ignorecase = Object::GetProperty(
+ isolate_, regexp, "ignoreCase").ToHandleChecked();
if (ignorecase->IsTrue()) {
msg.Append('i');
}
// multiline flag
- Handle<Object> multiline = GetProperty(regexp, "multiline");
+ Handle<Object> multiline = Object::GetProperty(
+ isolate_, regexp, "multiline").ToHandleChecked();
if (multiline->IsTrue()) {
msg.Append('m');
}
@@ -1191,51 +1190,6 @@ void Logger::RegExpCompileEvent(Handle<JSRegExp> regexp, bool in_cache) {
}
-void Logger::LogRuntime(Vector<const char> format,
- JSArray* args) {
- if (!log_->IsEnabled() || !FLAG_log_runtime) return;
- HandleScope scope(isolate_);
- Log::MessageBuilder msg(log_);
- for (int i = 0; i < format.length(); i++) {
- char c = format[i];
- if (c == '%' && i <= format.length() - 2) {
- i++;
- ASSERT('0' <= format[i] && format[i] <= '9');
- MaybeObject* maybe = args->GetElement(isolate_, format[i] - '0');
- Object* obj;
- if (!maybe->ToObject(&obj)) {
- msg.Append("<exception>");
- continue;
- }
- i++;
- switch (format[i]) {
- case 's':
- msg.AppendDetailed(String::cast(obj), false);
- break;
- case 'S':
- msg.AppendDetailed(String::cast(obj), true);
- break;
- case 'r':
- Logger::LogRegExpSource(Handle<JSRegExp>(JSRegExp::cast(obj)));
- break;
- case 'x':
- msg.Append("0x%x", Smi::cast(obj)->value());
- break;
- case 'i':
- msg.Append("%i", Smi::cast(obj)->value());
- break;
- default:
- UNREACHABLE();
- }
- } else {
- msg.Append(c);
- }
- }
- msg.Append('\n');
- msg.WriteToLogFile();
-}
-
-
void Logger::ApiIndexedSecurityCheck(uint32_t index) {
if (!log_->IsEnabled() || !FLAG_log_api) return;
ApiEvent("api,check-security,%u\n", index);
@@ -1253,17 +1207,18 @@ void Logger::ApiNamedPropertyAccess(const char* tag,
if (name->IsString()) {
SmartArrayPointer<char> property_name =
String::cast(name)->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL);
- ApiEvent("api,%s,\"%s\",\"%s\"\n", tag, *class_name, *property_name);
+ ApiEvent("api,%s,\"%s\",\"%s\"\n", tag, class_name.get(),
+ property_name.get());
} else {
Symbol* symbol = Symbol::cast(name);
uint32_t hash = symbol->Hash();
if (symbol->name()->IsUndefined()) {
- ApiEvent("api,%s,\"%s\",symbol(hash %x)\n", tag, *class_name, hash);
+ ApiEvent("api,%s,\"%s\",symbol(hash %x)\n", tag, class_name.get(), hash);
} else {
SmartArrayPointer<char> str = String::cast(symbol->name())->ToCString(
DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL);
ApiEvent("api,%s,\"%s\",symbol(\"%s\" hash %x)\n",
- tag, *class_name, *str, hash);
+ tag, class_name.get(), str.get(), hash);
}
}
}
@@ -1275,7 +1230,7 @@ void Logger::ApiIndexedPropertyAccess(const char* tag,
String* class_name_obj = holder->class_name();
SmartArrayPointer<char> class_name =
class_name_obj->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL);
- ApiEvent("api,%s,\"%s\",%u\n", tag, *class_name, index);
+ ApiEvent("api,%s,\"%s\",%u\n", tag, class_name.get(), index);
}
@@ -1284,7 +1239,7 @@ void Logger::ApiObjectAccess(const char* tag, JSObject* object) {
String* class_name_obj = object->class_name();
SmartArrayPointer<char> class_name =
class_name_obj->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL);
- ApiEvent("api,%s,\"%s\"\n", tag, *class_name);
+ ApiEvent("api,%s,\"%s\"\n", tag, class_name.get());
}
@@ -1332,7 +1287,7 @@ void Logger::CallbackEventInternal(const char* prefix, Name* name,
if (name->IsString()) {
SmartArrayPointer<char> str =
String::cast(name)->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL);
- msg.Append(",1,\"%s%s\"", prefix, *str);
+ msg.Append(",1,\"%s%s\"", prefix, str.get());
} else {
Symbol* symbol = Symbol::cast(name);
if (symbol->name()->IsUndefined()) {
@@ -1340,7 +1295,8 @@ void Logger::CallbackEventInternal(const char* prefix, Name* name,
} else {
SmartArrayPointer<char> str = String::cast(symbol->name())->ToCString(
DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL);
- msg.Append(",1,symbol(\"%s\" hash %x)", prefix, *str, symbol->Hash());
+ msg.Append(",1,symbol(\"%s\" hash %x)", prefix, str.get(),
+ symbol->Hash());
}
}
msg.Append('\n');
@@ -1430,8 +1386,7 @@ void Logger::CodeCreateEvent(LogEventsAndTags tag,
CALL_LISTENERS(CodeCreateEvent(tag, code, shared, info, name));
if (!FLAG_log_code || !log_->IsEnabled()) return;
- if (code == isolate_->builtins()->builtin(
- Builtins::kLazyCompile))
+ if (code == isolate_->builtins()->builtin(Builtins::kCompileUnoptimized))
return;
Log::MessageBuilder msg(log_);
@@ -1439,7 +1394,7 @@ void Logger::CodeCreateEvent(LogEventsAndTags tag,
if (name->IsString()) {
SmartArrayPointer<char> str =
String::cast(name)->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL);
- msg.Append("\"%s\"", *str);
+ msg.Append("\"%s\"", str.get());
} else {
msg.AppendSymbolName(Symbol::cast(name));
}
@@ -1470,11 +1425,11 @@ void Logger::CodeCreateEvent(LogEventsAndTags tag,
AppendCodeCreateHeader(&msg, tag, code);
SmartArrayPointer<char> name =
shared->DebugName()->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL);
- msg.Append("\"%s ", *name);
+ msg.Append("\"%s ", name.get());
if (source->IsString()) {
SmartArrayPointer<char> sourcestr =
String::cast(source)->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL);
- msg.Append("%s", *sourcestr);
+ msg.Append("%s", sourcestr.get());
} else {
msg.AppendSymbolName(Symbol::cast(source));
}
@@ -1503,6 +1458,24 @@ void Logger::CodeCreateEvent(LogEventsAndTags tag,
}
+void Logger::CodeDisableOptEvent(Code* code,
+ SharedFunctionInfo* shared) {
+ PROFILER_LOG(CodeDisableOptEvent(code, shared));
+
+ if (!is_logging_code_events()) return;
+ CALL_LISTENERS(CodeDisableOptEvent(code, shared));
+
+ if (!FLAG_log_code || !log_->IsEnabled()) return;
+ Log::MessageBuilder msg(log_);
+ msg.Append("%s,", kLogEventsNames[CODE_DISABLE_OPT_EVENT]);
+ SmartArrayPointer<char> name =
+ shared->DebugName()->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL);
+ msg.Append("\"%s\",", name.get());
+ msg.Append("\"%s\"\n", GetBailoutReason(shared->DisableOptimizationReason()));
+ msg.WriteToLogFile();
+}
+
+
void Logger::CodeMovingGCEvent() {
PROFILER_LOG(CodeMovingGCEvent());
@@ -1869,6 +1842,10 @@ void Logger::LogCodeObject(Object* object) {
description = "A load IC from the snapshot";
tag = Logger::LOAD_IC_TAG;
break;
+ case Code::CALL_IC:
+ description = "A call IC from the snapshot";
+ tag = Logger::CALL_IC_TAG;
+ break;
case Code::STORE_IC:
description = "A store IC from the snapshot";
tag = Logger::STORE_IC_TAG;
@@ -1877,14 +1854,6 @@ void Logger::LogCodeObject(Object* object) {
description = "A keyed store IC from the snapshot";
tag = Logger::KEYED_STORE_IC_TAG;
break;
- case Code::CALL_IC:
- description = "A call IC from the snapshot";
- tag = Logger::CALL_IC_TAG;
- break;
- case Code::KEYED_CALL_IC:
- description = "A keyed call IC from the snapshot";
- tag = Logger::KEYED_CALL_IC_TAG;
- break;
case Code::NUMBER_OF_KINDS:
break;
}
@@ -1909,9 +1878,9 @@ void Logger::LogExistingFunction(Handle<SharedFunctionInfo> shared,
Handle<String> func_name(shared->DebugName());
if (shared->script()->IsScript()) {
Handle<Script> script(Script::cast(shared->script()));
- int line_num = GetScriptLineNumber(script, shared->start_position()) + 1;
+ int line_num = Script::GetLineNumber(script, shared->start_position()) + 1;
int column_num =
- GetScriptColumnNumber(script, shared->start_position()) + 1;
+ Script::GetColumnNumber(script, shared->start_position()) + 1;
if (script->name()->IsString()) {
Handle<String> script_name(String::cast(script->name()));
if (line_num > 0) {
@@ -1965,8 +1934,8 @@ void Logger::LogCompiledFunctions() {
// During iteration, there can be heap allocation due to
// GetScriptLineNumber call.
for (int i = 0; i < compiled_funcs_count; ++i) {
- if (*code_objects[i] == isolate_->builtins()->builtin(
- Builtins::kLazyCompile))
+ if (code_objects[i].is_identical_to(
+ isolate_->builtins()->CompileUnoptimized()))
continue;
LogExistingFunction(sfis[i], code_objects[i]);
}
@@ -1997,58 +1966,48 @@ void Logger::LogAccessorCallbacks() {
static void AddIsolateIdIfNeeded(Isolate* isolate, StringStream* stream) {
- if (isolate->IsDefaultIsolate() || !FLAG_logfile_per_isolate) return;
- stream->Add("isolate-%p-", isolate);
+ if (FLAG_logfile_per_isolate) stream->Add("isolate-%p-", isolate);
}
static SmartArrayPointer<const char> PrepareLogFileName(
Isolate* isolate, const char* file_name) {
- if (strchr(file_name, '%') != NULL || !isolate->IsDefaultIsolate()) {
- // If there's a '%' in the log file name we have to expand
- // placeholders.
- HeapStringAllocator allocator;
- StringStream stream(&allocator);
- AddIsolateIdIfNeeded(isolate, &stream);
- for (const char* p = file_name; *p; p++) {
- if (*p == '%') {
- p++;
- switch (*p) {
- case '\0':
- // If there's a % at the end of the string we back up
- // one character so we can escape the loop properly.
- p--;
- break;
- case 'p':
- stream.Add("%d", OS::GetCurrentProcessId());
- break;
- case 't': {
- // %t expands to the current time in milliseconds.
- double time = OS::TimeCurrentMillis();
- stream.Add("%.0f", FmtElm(time));
- break;
- }
- case '%':
- // %% expands (contracts really) to %.
- stream.Put('%');
- break;
- default:
- // All other %'s expand to themselves.
- stream.Put('%');
- stream.Put(*p);
- break;
+ HeapStringAllocator allocator;
+ StringStream stream(&allocator);
+ AddIsolateIdIfNeeded(isolate, &stream);
+ for (const char* p = file_name; *p; p++) {
+ if (*p == '%') {
+ p++;
+ switch (*p) {
+ case '\0':
+ // If there's a % at the end of the string we back up
+ // one character so we can escape the loop properly.
+ p--;
+ break;
+ case 'p':
+ stream.Add("%d", OS::GetCurrentProcessId());
+ break;
+ case 't': {
+ // %t expands to the current time in milliseconds.
+ double time = OS::TimeCurrentMillis();
+ stream.Add("%.0f", FmtElm(time));
+ break;
}
- } else {
- stream.Put(*p);
+ case '%':
+ // %% expands (contracts really) to %.
+ stream.Put('%');
+ break;
+ default:
+ // All other %'s expand to themselves.
+ stream.Put('%');
+ stream.Put(*p);
+ break;
}
+ } else {
+ stream.Put(*p);
}
- return SmartArrayPointer<const char>(stream.ToCString());
}
- int length = StrLength(file_name);
- char* str = NewArray<char>(length + 1);
- OS::MemCopy(str, file_name, length);
- str[length] = '\0';
- return SmartArrayPointer<const char>(str);
+ return SmartArrayPointer<const char>(stream.ToCString());
}
@@ -2064,7 +2023,7 @@ bool Logger::SetUp(Isolate* isolate) {
SmartArrayPointer<const char> log_file_name =
PrepareLogFileName(isolate, FLAG_logfile);
- log_->Initialize(*log_file_name);
+ log_->Initialize(log_file_name.get());
if (FLAG_perf_basic_prof) {
@@ -2078,7 +2037,7 @@ bool Logger::SetUp(Isolate* isolate) {
}
if (FLAG_ll_prof) {
- ll_logger_ = new LowLevelLogger(*log_file_name);
+ ll_logger_ = new LowLevelLogger(log_file_name.get());
addCodeEventListener(ll_logger_);
}
diff --git a/chromium/v8/src/log.h b/chromium/v8/src/log.h
index e53551d8f45..e98874b5baa 100644
--- a/chromium/v8/src/log.h
+++ b/chromium/v8/src/log.h
@@ -1,37 +1,16 @@
// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
#ifndef V8_LOG_H_
#define V8_LOG_H_
-#include "allocation.h"
-#include "objects.h"
-#include "platform.h"
-#include "platform/elapsed-timer.h"
+#include <string>
+
+#include "src/allocation.h"
+#include "src/objects.h"
+#include "src/platform.h"
+#include "src/platform/elapsed-timer.h"
namespace v8 {
namespace internal {
@@ -102,6 +81,7 @@ struct TickSample;
#define LOG_EVENTS_AND_TAGS_LIST(V) \
V(CODE_CREATION_EVENT, "code-creation") \
+ V(CODE_DISABLE_OPT_EVENT, "code-disable-optimization") \
V(CODE_MOVE_EVENT, "code-move") \
V(CODE_DELETE_EVENT, "code-delete") \
V(CODE_MOVING_GC, "code-moving-gc") \
@@ -113,16 +93,21 @@ struct TickSample;
V(BUILTIN_TAG, "Builtin") \
V(CALL_DEBUG_BREAK_TAG, "CallDebugBreak") \
V(CALL_DEBUG_PREPARE_STEP_IN_TAG, "CallDebugPrepareStepIn") \
- V(CALL_IC_TAG, "CallIC") \
V(CALL_INITIALIZE_TAG, "CallInitialize") \
V(CALL_MEGAMORPHIC_TAG, "CallMegamorphic") \
V(CALL_MISS_TAG, "CallMiss") \
V(CALL_NORMAL_TAG, "CallNormal") \
V(CALL_PRE_MONOMORPHIC_TAG, "CallPreMonomorphic") \
+ V(LOAD_INITIALIZE_TAG, "LoadInitialize") \
+ V(LOAD_PREMONOMORPHIC_TAG, "LoadPreMonomorphic") \
+ V(LOAD_MEGAMORPHIC_TAG, "LoadMegamorphic") \
+ V(STORE_INITIALIZE_TAG, "StoreInitialize") \
+ V(STORE_PREMONOMORPHIC_TAG, "StorePreMonomorphic") \
+ V(STORE_GENERIC_TAG, "StoreGeneric") \
+ V(STORE_MEGAMORPHIC_TAG, "StoreMegamorphic") \
V(KEYED_CALL_DEBUG_BREAK_TAG, "KeyedCallDebugBreak") \
V(KEYED_CALL_DEBUG_PREPARE_STEP_IN_TAG, \
"KeyedCallDebugPrepareStepIn") \
- V(KEYED_CALL_IC_TAG, "KeyedCallIC") \
V(KEYED_CALL_INITIALIZE_TAG, "KeyedCallInitialize") \
V(KEYED_CALL_MEGAMORPHIC_TAG, "KeyedCallMegamorphic") \
V(KEYED_CALL_MISS_TAG, "KeyedCallMiss") \
@@ -139,6 +124,7 @@ struct TickSample;
V(KEYED_STORE_POLYMORPHIC_IC_TAG, "KeyedStorePolymorphicIC") \
V(KEYED_EXTERNAL_ARRAY_STORE_IC_TAG, "KeyedExternalArrayStoreIC") \
V(LAZY_COMPILE_TAG, "LazyCompile") \
+ V(CALL_IC_TAG, "CallIC") \
V(LOAD_IC_TAG, "LoadIC") \
V(LOAD_POLYMORPHIC_IC_TAG, "LoadPolymorphicIC") \
V(REG_EXP_TAG, "RegExp") \
@@ -254,6 +240,8 @@ class Logger {
CompilationInfo* info,
Name* source, int line, int column);
void CodeCreateEvent(LogEventsAndTags tag, Code* code, int args_count);
+ // Emits a code deoptimization event.
+ void CodeDisableOptEvent(Code* code, SharedFunctionInfo* shared);
void CodeMovingGCEvent();
// Emits a code create event for a RegExp.
void RegExpCodeCreateEvent(Code* code, String* source);
@@ -294,10 +282,7 @@ class Logger {
void HeapSampleStats(const char* space, const char* kind,
intptr_t capacity, intptr_t used);
- void SharedLibraryEvent(const char* library_path,
- uintptr_t start,
- uintptr_t end);
- void SharedLibraryEvent(const wchar_t* library_path,
+ void SharedLibraryEvent(const std::string& library_path,
uintptr_t start,
uintptr_t end);
@@ -305,21 +290,25 @@ class Logger {
enum StartEnd { START, END };
void CodeDeoptEvent(Code* code);
+ void CurrentTimeEvent();
void TimerEvent(StartEnd se, const char* name);
static void EnterExternal(Isolate* isolate);
static void LeaveExternal(Isolate* isolate);
+ static void EmptyLogInternalEvents(const char* name, int se) { }
+ static void LogInternalEvents(const char* name, int se);
+
class TimerEventScope {
public:
TimerEventScope(Isolate* isolate, const char* name)
: isolate_(isolate), name_(name) {
- if (FLAG_log_internal_timer_events) LogTimerEvent(START);
+ LogTimerEvent(START);
}
~TimerEventScope() {
- if (FLAG_log_internal_timer_events) LogTimerEvent(END);
+ LogTimerEvent(END);
}
void LogTimerEvent(StartEnd se);
@@ -329,6 +318,7 @@ class Logger {
static const char* v8_compile_full_code;
static const char* v8_execute;
static const char* v8_external;
+ static const char* v8_ic_miss;
private:
Isolate* isolate_;
@@ -340,9 +330,6 @@ class Logger {
void RegExpCompileEvent(Handle<JSRegExp> regexp, bool in_cache);
- // Log an event reported from generated code
- void LogRuntime(Vector<const char> format, JSArray* args);
-
bool is_logging() {
return is_logging_;
}
@@ -487,6 +474,7 @@ class CodeEventListener {
virtual void CodeDeleteEvent(Address from) = 0;
virtual void SharedFunctionInfoMoveEvent(Address from, Address to) = 0;
virtual void CodeMovingGCEvent() = 0;
+ virtual void CodeDisableOptEvent(Code* code, SharedFunctionInfo* shared) = 0;
};
diff --git a/chromium/v8/src/lookup.cc b/chromium/v8/src/lookup.cc
new file mode 100644
index 00000000000..5391640ac1f
--- /dev/null
+++ b/chromium/v8/src/lookup.cc
@@ -0,0 +1,200 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#include "src/bootstrapper.h"
+#include "src/lookup.h"
+
+namespace v8 {
+namespace internal {
+
+
+void LookupIterator::Next() {
+ has_property_ = false;
+ do {
+ state_ = LookupInHolder();
+ } while (!IsFound() && NextHolder());
+}
+
+
+Handle<JSReceiver> LookupIterator::GetRoot() const {
+ Handle<Object> receiver = GetReceiver();
+ if (receiver->IsJSReceiver()) return Handle<JSReceiver>::cast(receiver);
+ Context* native_context = isolate_->context()->native_context();
+ JSFunction* function;
+ if (receiver->IsNumber()) {
+ function = native_context->number_function();
+ } else if (receiver->IsString()) {
+ function = native_context->string_function();
+ } else if (receiver->IsSymbol()) {
+ function = native_context->symbol_function();
+ } else if (receiver->IsBoolean()) {
+ function = native_context->boolean_function();
+ } else {
+ UNREACHABLE();
+ function = NULL;
+ }
+ return handle(JSReceiver::cast(function->instance_prototype()));
+}
+
+
+Handle<Map> LookupIterator::GetReceiverMap() const {
+ Handle<Object> receiver = GetReceiver();
+ if (receiver->IsNumber()) return isolate_->factory()->heap_number_map();
+ return handle(Handle<HeapObject>::cast(receiver)->map());
+}
+
+
+bool LookupIterator::NextHolder() {
+ if (holder_map_->prototype()->IsNull()) return false;
+
+ Handle<JSReceiver> next(JSReceiver::cast(holder_map_->prototype()));
+
+ if (!check_derived() &&
+ !(check_hidden() &&
+ // TODO(verwaest): Check if this is actually necessary currently. If it
+ // is, this should be handled by setting is_hidden_prototype on the
+ // global object behind the proxy.
+ (holder_map_->IsJSGlobalProxyMap() ||
+ next->map()->is_hidden_prototype()))) {
+ return false;
+ }
+
+ holder_map_ = handle(next->map());
+ maybe_holder_ = next;
+ return true;
+}
+
+
+LookupIterator::State LookupIterator::LookupInHolder() {
+ switch (state_) {
+ case NOT_FOUND:
+ if (holder_map_->IsJSProxyMap()) {
+ return JSPROXY;
+ }
+ if (check_access_check() && holder_map_->is_access_check_needed()) {
+ return ACCESS_CHECK;
+ }
+ // Fall through.
+ case ACCESS_CHECK:
+ if (check_interceptor() && holder_map_->has_named_interceptor()) {
+ return INTERCEPTOR;
+ }
+ // Fall through.
+ case INTERCEPTOR:
+ if (holder_map_->is_dictionary_map()) {
+ property_encoding_ = DICTIONARY;
+ } else {
+ DescriptorArray* descriptors = holder_map_->instance_descriptors();
+ number_ = descriptors->SearchWithCache(*name_, *holder_map_);
+ if (number_ == DescriptorArray::kNotFound) return NOT_FOUND;
+ property_encoding_ = DESCRIPTOR;
+ }
+ return PROPERTY;
+ case PROPERTY:
+ return NOT_FOUND;
+ case JSPROXY:
+ UNREACHABLE();
+ }
+ UNREACHABLE();
+ return state_;
+}
+
+
+bool LookupIterator::IsBootstrapping() const {
+ return isolate_->bootstrapper()->IsActive();
+}
+
+
+bool LookupIterator::HasAccess(v8::AccessType access_type) const {
+ ASSERT_EQ(ACCESS_CHECK, state_);
+ ASSERT(is_guaranteed_to_have_holder());
+ return isolate_->MayNamedAccess(GetHolder(), name_, access_type);
+}
+
+
+bool LookupIterator::HasProperty() {
+ ASSERT_EQ(PROPERTY, state_);
+ ASSERT(is_guaranteed_to_have_holder());
+
+ if (property_encoding_ == DICTIONARY) {
+ Handle<JSObject> holder = GetHolder();
+ number_ = holder->property_dictionary()->FindEntry(name_);
+ if (number_ == NameDictionary::kNotFound) return false;
+
+ property_details_ = GetHolder()->property_dictionary()->DetailsAt(number_);
+ // Holes in dictionary cells are absent values unless marked as read-only.
+ if (holder->IsGlobalObject() &&
+ (property_details_.IsDeleted() ||
+ (!property_details_.IsReadOnly() && FetchValue()->IsTheHole()))) {
+ return false;
+ }
+ } else {
+ property_details_ = holder_map_->instance_descriptors()->GetDetails(
+ number_);
+ }
+
+ switch (property_details_.type()) {
+ case v8::internal::FIELD:
+ case v8::internal::NORMAL:
+ case v8::internal::CONSTANT:
+ property_kind_ = DATA;
+ break;
+ case v8::internal::CALLBACKS:
+ property_kind_ = ACCESSOR;
+ break;
+ case v8::internal::HANDLER:
+ case v8::internal::NONEXISTENT:
+ case v8::internal::INTERCEPTOR:
+ UNREACHABLE();
+ }
+
+ has_property_ = true;
+ return true;
+}
+
+
+Handle<Object> LookupIterator::FetchValue() const {
+ Object* result = NULL;
+ switch (property_encoding_) {
+ case DICTIONARY:
+ result = GetHolder()->property_dictionary()->ValueAt(number_);
+ if (GetHolder()->IsGlobalObject()) {
+ result = PropertyCell::cast(result)->value();
+ }
+ break;
+ case DESCRIPTOR:
+ if (property_details_.type() == v8::internal::FIELD) {
+ FieldIndex field_index = FieldIndex::ForDescriptor(
+ *holder_map_, number_);
+ return JSObject::FastPropertyAt(
+ GetHolder(), property_details_.representation(), field_index);
+ }
+ result = holder_map_->instance_descriptors()->GetValue(number_);
+ }
+ return handle(result, isolate_);
+}
+
+
+Handle<Object> LookupIterator::GetAccessors() const {
+ ASSERT(has_property_);
+ ASSERT_EQ(ACCESSOR, property_kind_);
+ return FetchValue();
+}
+
+
+Handle<Object> LookupIterator::GetDataValue() const {
+ ASSERT(has_property_);
+ ASSERT_EQ(DATA, property_kind_);
+ Handle<Object> value = FetchValue();
+ if (value->IsTheHole()) {
+ ASSERT(property_details_.IsReadOnly());
+ return factory()->undefined_value();
+ }
+ return value;
+}
+
+
+} } // namespace v8::internal
diff --git a/chromium/v8/src/lookup.h b/chromium/v8/src/lookup.h
new file mode 100644
index 00000000000..0ac9d353adc
--- /dev/null
+++ b/chromium/v8/src/lookup.h
@@ -0,0 +1,183 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_LOOKUP_H_
+#define V8_LOOKUP_H_
+
+#include "src/factory.h"
+#include "src/isolate.h"
+#include "src/objects.h"
+
+namespace v8 {
+namespace internal {
+
+class LookupIterator V8_FINAL BASE_EMBEDDED {
+ public:
+ enum Configuration {
+ CHECK_OWN_REAL = 0,
+ CHECK_HIDDEN = 1 << 0,
+ CHECK_DERIVED = 1 << 1,
+ CHECK_INTERCEPTOR = 1 << 2,
+ CHECK_ACCESS_CHECK = 1 << 3,
+ CHECK_ALL = CHECK_HIDDEN | CHECK_DERIVED |
+ CHECK_INTERCEPTOR | CHECK_ACCESS_CHECK,
+ SKIP_INTERCEPTOR = CHECK_ALL ^ CHECK_INTERCEPTOR,
+ CHECK_OWN = CHECK_ALL ^ CHECK_DERIVED
+ };
+
+ enum State {
+ NOT_FOUND,
+ PROPERTY,
+ INTERCEPTOR,
+ ACCESS_CHECK,
+ JSPROXY
+ };
+
+ enum PropertyKind {
+ DATA,
+ ACCESSOR
+ };
+
+ enum PropertyEncoding {
+ DICTIONARY,
+ DESCRIPTOR
+ };
+
+ LookupIterator(Handle<Object> receiver,
+ Handle<Name> name,
+ Configuration configuration = CHECK_ALL)
+ : configuration_(configuration),
+ state_(NOT_FOUND),
+ property_kind_(DATA),
+ property_encoding_(DESCRIPTOR),
+ property_details_(NONE, NONEXISTENT, Representation::None()),
+ isolate_(name->GetIsolate()),
+ name_(name),
+ maybe_receiver_(receiver),
+ number_(DescriptorArray::kNotFound) {
+ Handle<JSReceiver> root = GetRoot();
+ holder_map_ = handle(root->map());
+ maybe_holder_ = root;
+ Next();
+ }
+
+ LookupIterator(Handle<Object> receiver,
+ Handle<Name> name,
+ Handle<JSReceiver> holder,
+ Configuration configuration = CHECK_ALL)
+ : configuration_(configuration),
+ state_(NOT_FOUND),
+ property_kind_(DATA),
+ property_encoding_(DESCRIPTOR),
+ property_details_(NONE, NONEXISTENT, Representation::None()),
+ isolate_(name->GetIsolate()),
+ name_(name),
+ holder_map_(holder->map()),
+ maybe_receiver_(receiver),
+ maybe_holder_(holder),
+ number_(DescriptorArray::kNotFound) {
+ Next();
+ }
+
+ Isolate* isolate() const { return isolate_; }
+ State state() const { return state_; }
+ Handle<Name> name() const { return name_; }
+
+ bool IsFound() const { return state_ != NOT_FOUND; }
+ void Next();
+
+ Heap* heap() const { return isolate_->heap(); }
+ Factory* factory() const { return isolate_->factory(); }
+ Handle<Object> GetReceiver() const {
+ return Handle<Object>::cast(maybe_receiver_.ToHandleChecked());
+ }
+ Handle<JSObject> GetHolder() const {
+ ASSERT(IsFound() && state_ != JSPROXY);
+ return Handle<JSObject>::cast(maybe_holder_.ToHandleChecked());
+ }
+ Handle<JSReceiver> GetRoot() const;
+
+ /* Dynamically reduce the trapped types. */
+ void skip_interceptor() {
+ configuration_ = static_cast<Configuration>(
+ configuration_ & ~CHECK_INTERCEPTOR);
+ }
+ void skip_access_check() {
+ configuration_ = static_cast<Configuration>(
+ configuration_ & ~CHECK_ACCESS_CHECK);
+ }
+
+ /* ACCESS_CHECK */
+ bool HasAccess(v8::AccessType access_type) const;
+
+ /* PROPERTY */
+ // HasProperty needs to be called before any of the other PROPERTY methods
+ // below can be used. It ensures that we are able to provide a definite
+ // answer, and loads extra information about the property.
+ bool HasProperty();
+ PropertyKind property_kind() const {
+ ASSERT(has_property_);
+ return property_kind_;
+ }
+ PropertyDetails property_details() const {
+ ASSERT(has_property_);
+ return property_details_;
+ }
+ Handle<Object> GetAccessors() const;
+ Handle<Object> GetDataValue() const;
+
+ /* JSPROXY */
+
+ Handle<JSProxy> GetJSProxy() const {
+ return Handle<JSProxy>::cast(maybe_holder_.ToHandleChecked());
+ }
+
+ private:
+ Handle<Map> GetReceiverMap() const;
+
+ MUST_USE_RESULT bool NextHolder();
+ State LookupInHolder();
+ Handle<Object> FetchValue() const;
+
+ bool IsBootstrapping() const;
+
+ // Methods that fetch data from the holder ensure they always have a holder.
+ // This means the receiver needs to be present as opposed to just the receiver
+ // map. Other objects in the prototype chain are transitively guaranteed to be
+ // present via the receiver map.
+ bool is_guaranteed_to_have_holder() const {
+ return !maybe_receiver_.is_null();
+ }
+ bool check_interceptor() const {
+ return !IsBootstrapping() && (configuration_ & CHECK_INTERCEPTOR) != 0;
+ }
+ bool check_derived() const {
+ return (configuration_ & CHECK_DERIVED) != 0;
+ }
+ bool check_hidden() const {
+ return (configuration_ & CHECK_HIDDEN) != 0;
+ }
+ bool check_access_check() const {
+ return (configuration_ & CHECK_ACCESS_CHECK) != 0;
+ }
+
+ Configuration configuration_;
+ State state_;
+ bool has_property_;
+ PropertyKind property_kind_;
+ PropertyEncoding property_encoding_;
+ PropertyDetails property_details_;
+ Isolate* isolate_;
+ Handle<Name> name_;
+ Handle<Map> holder_map_;
+ MaybeHandle<Object> maybe_receiver_;
+ MaybeHandle<JSReceiver> maybe_holder_;
+
+ int number_;
+};
+
+
+} } // namespace v8::internal
+
+#endif // V8_LOOKUP_H_
diff --git a/chromium/v8/src/macro-assembler.h b/chromium/v8/src/macro-assembler.h
index 9fdf2ee7d86..a11afd863bb 100644
--- a/chromium/v8/src/macro-assembler.h
+++ b/chromium/v8/src/macro-assembler.h
@@ -1,29 +1,6 @@
// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
#ifndef V8_MACRO_ASSEMBLER_H_
#define V8_MACRO_ASSEMBLER_H_
@@ -61,31 +38,45 @@ enum AllocationFlags {
const int kInvalidProtoDepth = -1;
#if V8_TARGET_ARCH_IA32
-#include "assembler.h"
-#include "ia32/assembler-ia32.h"
-#include "ia32/assembler-ia32-inl.h"
-#include "code.h" // must be after assembler_*.h
-#include "ia32/macro-assembler-ia32.h"
+#include "src/assembler.h"
+#include "src/ia32/assembler-ia32.h"
+#include "src/ia32/assembler-ia32-inl.h"
+#include "src/code.h" // must be after assembler_*.h
+#include "src/ia32/macro-assembler-ia32.h"
#elif V8_TARGET_ARCH_X64
-#include "assembler.h"
-#include "x64/assembler-x64.h"
-#include "x64/assembler-x64-inl.h"
-#include "code.h" // must be after assembler_*.h
-#include "x64/macro-assembler-x64.h"
+#include "src/assembler.h"
+#include "src/x64/assembler-x64.h"
+#include "src/x64/assembler-x64-inl.h"
+#include "src/code.h" // must be after assembler_*.h
+#include "src/x64/macro-assembler-x64.h"
+#elif V8_TARGET_ARCH_ARM64
+#include "src/arm64/constants-arm64.h"
+#include "src/assembler.h"
+#include "src/arm64/assembler-arm64.h"
+#include "src/arm64/assembler-arm64-inl.h"
+#include "src/code.h" // must be after assembler_*.h
+#include "src/arm64/macro-assembler-arm64.h"
+#include "src/arm64/macro-assembler-arm64-inl.h"
#elif V8_TARGET_ARCH_ARM
-#include "arm/constants-arm.h"
-#include "assembler.h"
-#include "arm/assembler-arm.h"
-#include "arm/assembler-arm-inl.h"
-#include "code.h" // must be after assembler_*.h
-#include "arm/macro-assembler-arm.h"
+#include "src/arm/constants-arm.h"
+#include "src/assembler.h"
+#include "src/arm/assembler-arm.h"
+#include "src/arm/assembler-arm-inl.h"
+#include "src/code.h" // must be after assembler_*.h
+#include "src/arm/macro-assembler-arm.h"
#elif V8_TARGET_ARCH_MIPS
-#include "mips/constants-mips.h"
-#include "assembler.h"
-#include "mips/assembler-mips.h"
-#include "mips/assembler-mips-inl.h"
-#include "code.h" // must be after assembler_*.h
-#include "mips/macro-assembler-mips.h"
+#include "src/mips/constants-mips.h"
+#include "src/assembler.h"
+#include "src/mips/assembler-mips.h"
+#include "src/mips/assembler-mips-inl.h"
+#include "src/code.h" // must be after assembler_*.h
+#include "src/mips/macro-assembler-mips.h"
+#elif V8_TARGET_ARCH_X87
+#include "src/assembler.h"
+#include "src/x87/assembler-x87.h"
+#include "src/x87/assembler-x87-inl.h"
+#include "src/code.h" // must be after assembler_*.h
+#include "src/x87/macro-assembler-x87.h"
#else
#error Unsupported target architecture.
#endif
@@ -116,6 +107,7 @@ class FrameScope {
// scope, the MacroAssembler is still marked as being in a frame scope, and
// the code will be generated again when it goes out of scope.
void GenerateLeaveFrame() {
+ ASSERT(type_ != StackFrame::MANUAL && type_ != StackFrame::NONE);
masm_->LeaveFrame(type_);
}
diff --git a/chromium/v8/src/macros.py b/chromium/v8/src/macros.py
index 7bad23bd425..305a693d441 100644
--- a/chromium/v8/src/macros.py
+++ b/chromium/v8/src/macros.py
@@ -87,6 +87,10 @@ const kMaxYear = 1000000;
const kMinMonth = -10000000;
const kMaxMonth = 10000000;
+# Strict mode flags for passing to %SetProperty
+const kSloppyMode = 0;
+const kStrictMode = 1;
+
# Native cache ids.
const STRING_TO_REGEXP_CACHE_ID = 0;
@@ -97,7 +101,7 @@ const STRING_TO_REGEXP_CACHE_ID = 0;
# values of 'bar'.
macro IS_NULL(arg) = (arg === null);
macro IS_NULL_OR_UNDEFINED(arg) = (arg == null);
-macro IS_UNDEFINED(arg) = (typeof(arg) === 'undefined');
+macro IS_UNDEFINED(arg) = (arg === (void 0));
macro IS_NUMBER(arg) = (typeof(arg) === 'number');
macro IS_STRING(arg) = (typeof(arg) === 'string');
macro IS_BOOLEAN(arg) = (typeof(arg) === 'boolean');
@@ -122,6 +126,8 @@ macro IS_GLOBAL(arg) = (%_ClassOf(arg) === 'global');
macro IS_ARRAYBUFFER(arg) = (%_ClassOf(arg) === 'ArrayBuffer');
macro IS_DATAVIEW(arg) = (%_ClassOf(arg) === 'DataView');
macro IS_GENERATOR(arg) = (%_ClassOf(arg) === 'Generator');
+macro IS_SET_ITERATOR(arg) = (%_ClassOf(arg) === 'Set Iterator');
+macro IS_MAP_ITERATOR(arg) = (%_ClassOf(arg) === 'Map Iterator');
macro IS_UNDETECTABLE(arg) = (%_IsUndetectableObject(arg));
macro FLOOR(arg) = $floor(arg);
@@ -139,6 +145,10 @@ macro IS_SPEC_OBJECT(arg) = (%_IsSpecObject(arg));
# we cannot handle those anyway.
macro IS_SPEC_FUNCTION(arg) = (%_ClassOf(arg) === 'Function');
+# Macro for ES6 CheckObjectCoercible
+# Will throw a TypeError of the form "[functionName] called on null or undefined".
+macro CHECK_OBJECT_COERCIBLE(arg, functionName) = if (IS_NULL_OR_UNDEFINED(arg) && !IS_UNDETECTABLE(arg)) throw MakeTypeError('called_on_null_or_undefined', [functionName]);
+
# Indices in bound function info retrieved by %BoundFunctionGetBindings(...).
const kBoundFunctionIndex = 0;
const kBoundThisIndex = 1;
@@ -158,8 +168,12 @@ macro TO_OBJECT_INLINE(arg) = (IS_SPEC_OBJECT(%IS_VAR(arg)) ? arg : ToObject(arg
macro JSON_NUMBER_TO_STRING(arg) = ((%_IsSmi(%IS_VAR(arg)) || arg - arg == 0) ? %_NumberToString(arg) : "null");
# Private names.
+# GET_PRIVATE should only be used if the property is known to exists on obj
+# itself (it should really use %GetOwnProperty, but that would be way slower).
+macro GLOBAL_PRIVATE(name) = (%CreateGlobalPrivateSymbol(name));
macro NEW_PRIVATE(name) = (%CreatePrivateSymbol(name));
-macro HAS_PRIVATE(obj, sym) = (sym in obj);
+macro IS_PRIVATE(sym) = (%SymbolIsPrivate(sym));
+macro HAS_PRIVATE(obj, sym) = (%HasOwnProperty(obj, sym));
macro GET_PRIVATE(obj, sym) = (obj[sym]);
macro SET_PRIVATE(obj, sym, val) = (obj[sym] = val);
macro DELETE_PRIVATE(obj, sym) = (delete obj[sym]);
@@ -260,3 +274,14 @@ const COMPILATION_TYPE_JSON = 2;
# Matches Messages::kNoLineNumberInfo from v8.h
const kNoLineNumberInfo = 0;
+
+# Matches PropertyAttributes from property-details.h
+const PROPERTY_ATTRIBUTES_NONE = 0;
+const PROPERTY_ATTRIBUTES_STRING = 8;
+const PROPERTY_ATTRIBUTES_SYMBOLIC = 16;
+const PROPERTY_ATTRIBUTES_PRIVATE_SYMBOL = 32;
+
+# Use for keys, values and entries iterators.
+const ITERATOR_KIND_KEYS = 1;
+const ITERATOR_KIND_VALUES = 2;
+const ITERATOR_KIND_ENTRIES = 3;
diff --git a/chromium/v8/src/mark-compact-inl.h b/chromium/v8/src/mark-compact-inl.h
index 321309c60e2..d1374c4b5d9 100644
--- a/chromium/v8/src/mark-compact-inl.h
+++ b/chromium/v8/src/mark-compact-inl.h
@@ -1,36 +1,14 @@
// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
#ifndef V8_MARK_COMPACT_INL_H_
#define V8_MARK_COMPACT_INL_H_
-#include "isolate.h"
-#include "memory.h"
-#include "mark-compact.h"
+#include <memory.h>
+
+#include "src/isolate.h"
+#include "src/mark-compact.h"
namespace v8 {
@@ -81,14 +59,15 @@ bool MarkCompactCollector::IsMarked(Object* obj) {
void MarkCompactCollector::RecordSlot(Object** anchor_slot,
Object** slot,
- Object* object) {
+ Object* object,
+ SlotsBuffer::AdditionMode mode) {
Page* object_page = Page::FromAddress(reinterpret_cast<Address>(object));
if (object_page->IsEvacuationCandidate() &&
!ShouldSkipEvacuationSlotRecording(anchor_slot)) {
if (!SlotsBuffer::AddTo(&slots_buffer_allocator_,
object_page->slots_buffer_address(),
slot,
- SlotsBuffer::FAIL_ON_OVERFLOW)) {
+ mode)) {
EvictEvacuationCandidate(object_page);
}
}
diff --git a/chromium/v8/src/mark-compact.cc b/chromium/v8/src/mark-compact.cc
index 07bcb7632c4..61b1b54c7bd 100644
--- a/chromium/v8/src/mark-compact.cc
+++ b/chromium/v8/src/mark-compact.cc
@@ -1,47 +1,26 @@
// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#include "code-stubs.h"
-#include "compilation-cache.h"
-#include "cpu-profiler.h"
-#include "deoptimizer.h"
-#include "execution.h"
-#include "gdb-jit.h"
-#include "global-handles.h"
-#include "heap-profiler.h"
-#include "ic-inl.h"
-#include "incremental-marking.h"
-#include "mark-compact.h"
-#include "objects-visiting.h"
-#include "objects-visiting-inl.h"
-#include "stub-cache.h"
-#include "sweeper-thread.h"
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#include "src/base/atomicops.h"
+#include "src/code-stubs.h"
+#include "src/compilation-cache.h"
+#include "src/cpu-profiler.h"
+#include "src/deoptimizer.h"
+#include "src/execution.h"
+#include "src/gdb-jit.h"
+#include "src/global-handles.h"
+#include "src/heap-profiler.h"
+#include "src/ic-inl.h"
+#include "src/incremental-marking.h"
+#include "src/mark-compact.h"
+#include "src/objects-visiting.h"
+#include "src/objects-visiting-inl.h"
+#include "src/spaces-inl.h"
+#include "src/stub-cache.h"
+#include "src/sweeper-thread.h"
namespace v8 {
namespace internal {
@@ -56,7 +35,7 @@ const char* Marking::kImpossibleBitPattern = "01";
// -------------------------------------------------------------------------
// MarkCompactCollector
-MarkCompactCollector::MarkCompactCollector() : // NOLINT
+MarkCompactCollector::MarkCompactCollector(Heap* heap) : // NOLINT
#ifdef DEBUG
state_(IDLE),
#endif
@@ -67,12 +46,12 @@ MarkCompactCollector::MarkCompactCollector() : // NOLINT
compacting_(false),
was_marked_incrementally_(false),
sweeping_pending_(false),
+ pending_sweeper_jobs_semaphore_(0),
sequential_sweeping_(false),
tracer_(NULL),
migration_slots_buffer_(NULL),
- heap_(NULL),
+ heap_(heap),
code_flusher_(NULL),
- encountered_weak_collections_(NULL),
have_code_to_deoptimize_(false) { }
#ifdef VERIFY_HEAP
@@ -91,13 +70,20 @@ class VerifyMarkingVisitor: public ObjectVisitor {
void VisitEmbeddedPointer(RelocInfo* rinfo) {
ASSERT(rinfo->rmode() == RelocInfo::EMBEDDED_OBJECT);
- if (!Code::IsWeakEmbeddedObject(rinfo->host()->kind(),
- rinfo->target_object())) {
+ if (!rinfo->host()->IsWeakObject(rinfo->target_object())) {
Object* p = rinfo->target_object();
VisitPointer(&p);
}
}
+ void VisitCell(RelocInfo* rinfo) {
+ Code* code = rinfo->host();
+ ASSERT(rinfo->rmode() == RelocInfo::CELL);
+ if (!code->IsWeakObject(rinfo->target_cell())) {
+ ObjectVisitor::VisitCell(rinfo);
+ }
+ }
+
private:
Heap* heap_;
};
@@ -219,6 +205,10 @@ static void VerifyEvacuation(NewSpace* space) {
static void VerifyEvacuation(PagedSpace* space) {
+ // TODO(hpayer): Bring back VerifyEvacuation for parallel-concurrently
+ // swept pages.
+ if ((FLAG_concurrent_sweeping || FLAG_parallel_sweeping) &&
+ space->was_swept_conservatively()) return;
PageIterator it(space);
while (it.has_next()) {
@@ -340,6 +330,12 @@ static void VerifyNativeContextSeparation(Heap* heap) {
#endif
+void MarkCompactCollector::SetUp() {
+ free_list_old_data_space_.Reset(new FreeList(heap_->old_data_space()));
+ free_list_old_pointer_space_.Reset(new FreeList(heap_->old_pointer_space()));
+}
+
+
void MarkCompactCollector::TearDown() {
AbortCompaction();
}
@@ -404,7 +400,6 @@ void MarkCompactCollector::CollectGarbage() {
// Make sure that Prepare() has been called. The individual steps below will
// update the state as they proceed.
ASSERT(state_ == PREPARE_GC);
- ASSERT(encountered_weak_collections_ == Smi::FromInt(0));
MarkLiveObjects();
ASSERT(heap_->incremental_marking()->IsStopped());
@@ -421,8 +416,6 @@ void MarkCompactCollector::CollectGarbage() {
SweepSpaces();
- if (!FLAG_collect_maps) ReattachInitialMaps();
-
#ifdef DEBUG
if (FLAG_verify_native_context_separation) {
VerifyNativeContextSeparation(heap_);
@@ -431,7 +424,7 @@ void MarkCompactCollector::CollectGarbage() {
#ifdef VERIFY_HEAP
if (heap()->weak_embedded_objects_verification_enabled()) {
- VerifyWeakEmbeddedObjectsInOptimizedCode();
+ VerifyWeakEmbeddedObjectsInCode();
}
if (FLAG_collect_maps && FLAG_omit_map_checks_for_leaf_maps) {
VerifyOmittedMapChecks();
@@ -492,13 +485,13 @@ void MarkCompactCollector::VerifyMarkbitsAreClean() {
}
-void MarkCompactCollector::VerifyWeakEmbeddedObjectsInOptimizedCode() {
+void MarkCompactCollector::VerifyWeakEmbeddedObjectsInCode() {
HeapObjectIterator code_iterator(heap()->code_space());
for (HeapObject* obj = code_iterator.Next();
obj != NULL;
obj = code_iterator.Next()) {
Code* code = Code::cast(obj);
- if (code->kind() != Code::OPTIMIZED_FUNCTION) continue;
+ if (!code->is_optimized_code() && !code->is_weak_stub()) continue;
if (WillBeDeoptimized(code)) continue;
code->VerifyEmbeddedObjectsDependency();
}
@@ -555,11 +548,42 @@ void MarkCompactCollector::ClearMarkbits() {
}
+class MarkCompactCollector::SweeperTask : public v8::Task {
+ public:
+ SweeperTask(Heap* heap, PagedSpace* space)
+ : heap_(heap), space_(space) {}
+
+ virtual ~SweeperTask() {}
+
+ private:
+ // v8::Task overrides.
+ virtual void Run() V8_OVERRIDE {
+ heap_->mark_compact_collector()->SweepInParallel(space_);
+ heap_->mark_compact_collector()->pending_sweeper_jobs_semaphore_.Signal();
+ }
+
+ Heap* heap_;
+ PagedSpace* space_;
+
+ DISALLOW_COPY_AND_ASSIGN(SweeperTask);
+};
+
+
void MarkCompactCollector::StartSweeperThreads() {
+ ASSERT(free_list_old_pointer_space_.get()->IsEmpty());
+ ASSERT(free_list_old_data_space_.get()->IsEmpty());
sweeping_pending_ = true;
for (int i = 0; i < isolate()->num_sweeper_threads(); i++) {
isolate()->sweeper_threads()[i]->StartSweeping();
}
+ if (FLAG_job_based_sweeping) {
+ V8::GetCurrentPlatform()->CallOnBackgroundThread(
+ new SweeperTask(heap(), heap()->old_data_space()),
+ v8::Platform::kShortRunningTask);
+ V8::GetCurrentPlatform()->CallOnBackgroundThread(
+ new SweeperTask(heap(), heap()->old_pointer_space()),
+ v8::Platform::kShortRunningTask);
+ }
}
@@ -568,28 +592,57 @@ void MarkCompactCollector::WaitUntilSweepingCompleted() {
for (int i = 0; i < isolate()->num_sweeper_threads(); i++) {
isolate()->sweeper_threads()[i]->WaitForSweeperThread();
}
+ if (FLAG_job_based_sweeping) {
+ // Wait twice for both jobs.
+ pending_sweeper_jobs_semaphore_.Wait();
+ pending_sweeper_jobs_semaphore_.Wait();
+ }
+ ParallelSweepSpacesComplete();
sweeping_pending_ = false;
- StealMemoryFromSweeperThreads(heap()->paged_space(OLD_DATA_SPACE));
- StealMemoryFromSweeperThreads(heap()->paged_space(OLD_POINTER_SPACE));
+ RefillFreeList(heap()->paged_space(OLD_DATA_SPACE));
+ RefillFreeList(heap()->paged_space(OLD_POINTER_SPACE));
heap()->paged_space(OLD_DATA_SPACE)->ResetUnsweptFreeBytes();
heap()->paged_space(OLD_POINTER_SPACE)->ResetUnsweptFreeBytes();
}
-intptr_t MarkCompactCollector::
- StealMemoryFromSweeperThreads(PagedSpace* space) {
- intptr_t freed_bytes = 0;
+bool MarkCompactCollector::IsSweepingCompleted() {
for (int i = 0; i < isolate()->num_sweeper_threads(); i++) {
- freed_bytes += isolate()->sweeper_threads()[i]->StealMemory(space);
+ if (!isolate()->sweeper_threads()[i]->SweepingCompleted()) {
+ return false;
+ }
+ }
+ if (FLAG_job_based_sweeping) {
+ if (!pending_sweeper_jobs_semaphore_.WaitFor(TimeDelta::FromSeconds(0))) {
+ return false;
+ }
+ pending_sweeper_jobs_semaphore_.Signal();
+ }
+ return true;
+}
+
+
+void MarkCompactCollector::RefillFreeList(PagedSpace* space) {
+ FreeList* free_list;
+
+ if (space == heap()->old_pointer_space()) {
+ free_list = free_list_old_pointer_space_.get();
+ } else if (space == heap()->old_data_space()) {
+ free_list = free_list_old_data_space_.get();
+ } else {
+ // Any PagedSpace might invoke RefillFreeLists, so we need to make sure
+ // to only refill them for old data and pointer spaces.
+ return;
}
+
+ intptr_t freed_bytes = space->free_list()->Concatenate(free_list);
space->AddToAccountingStats(freed_bytes);
space->DecrementUnsweptFreeBytes(freed_bytes);
- return freed_bytes;
}
bool MarkCompactCollector::AreSweeperThreadsActivated() {
- return isolate()->sweeper_threads() != NULL;
+ return isolate()->sweeper_threads() != NULL || FLAG_job_based_sweeping;
}
@@ -598,15 +651,17 @@ bool MarkCompactCollector::IsConcurrentSweepingInProgress() {
}
-bool Marking::TransferMark(Address old_start, Address new_start) {
+void Marking::TransferMark(Address old_start, Address new_start) {
// This is only used when resizing an object.
ASSERT(MemoryChunk::FromAddress(old_start) ==
MemoryChunk::FromAddress(new_start));
+ if (!heap_->incremental_marking()->IsMarking()) return;
+
// If the mark doesn't move, we don't check the color of the object.
// It doesn't matter whether the object is black, since it hasn't changed
// size, so the adjustment to the live data count will be zero anyway.
- if (old_start == new_start) return false;
+ if (old_start == new_start) return;
MarkBit new_mark_bit = MarkBitFrom(new_start);
MarkBit old_mark_bit = MarkBitFrom(old_start);
@@ -619,9 +674,8 @@ bool Marking::TransferMark(Address old_start, Address new_start) {
old_mark_bit.Clear();
ASSERT(IsWhite(old_mark_bit));
Marking::MarkBlack(new_mark_bit);
- return true;
+ return;
} else if (Marking::IsGrey(old_mark_bit)) {
- ASSERT(heap_->incremental_marking()->IsMarking());
old_mark_bit.Clear();
old_mark_bit.Next().Clear();
ASSERT(IsWhite(old_mark_bit));
@@ -634,8 +688,6 @@ bool Marking::TransferMark(Address old_start, Address new_start) {
ObjectColor new_color = Color(new_mark_bit);
ASSERT(new_color == old_color);
#endif
-
- return false;
}
@@ -726,7 +778,7 @@ void MarkCompactCollector::CollectEvacuationCandidates(PagedSpace* space) {
static const int kMaxMaxEvacuationCandidates = 1000;
int number_of_pages = space->CountTotalPages();
int max_evacuation_candidates =
- static_cast<int>(sqrt(number_of_pages / 2.0) + 1);
+ static_cast<int>(std::sqrt(number_of_pages / 2.0) + 1);
if (FLAG_stress_compaction || FLAG_always_compact) {
max_evacuation_candidates = kMaxMaxEvacuationCandidates;
@@ -986,7 +1038,8 @@ void MarkCompactCollector::Finish() {
// objects have been marked.
void CodeFlusher::ProcessJSFunctionCandidates() {
- Code* lazy_compile = isolate_->builtins()->builtin(Builtins::kLazyCompile);
+ Code* lazy_compile =
+ isolate_->builtins()->builtin(Builtins::kCompileUnoptimized);
Object* undefined = isolate_->heap()->undefined_value();
JSFunction* candidate = jsfunction_candidates_head_;
@@ -1031,7 +1084,8 @@ void CodeFlusher::ProcessJSFunctionCandidates() {
void CodeFlusher::ProcessSharedFunctionInfoCandidates() {
- Code* lazy_compile = isolate_->builtins()->builtin(Builtins::kLazyCompile);
+ Code* lazy_compile =
+ isolate_->builtins()->builtin(Builtins::kCompileUnoptimized);
SharedFunctionInfo* candidate = shared_function_info_candidates_head_;
SharedFunctionInfo* next_candidate;
@@ -1063,55 +1117,40 @@ void CodeFlusher::ProcessSharedFunctionInfoCandidates() {
void CodeFlusher::ProcessOptimizedCodeMaps() {
- static const int kEntriesStart = SharedFunctionInfo::kEntriesStart;
- static const int kEntryLength = SharedFunctionInfo::kEntryLength;
- static const int kContextOffset = 0;
- static const int kCodeOffset = 1;
- static const int kLiteralsOffset = 2;
- STATIC_ASSERT(kEntryLength == 3);
+ STATIC_ASSERT(SharedFunctionInfo::kEntryLength == 4);
SharedFunctionInfo* holder = optimized_code_map_holder_head_;
SharedFunctionInfo* next_holder;
+
while (holder != NULL) {
next_holder = GetNextCodeMap(holder);
ClearNextCodeMap(holder);
FixedArray* code_map = FixedArray::cast(holder->optimized_code_map());
- int new_length = kEntriesStart;
+ int new_length = SharedFunctionInfo::kEntriesStart;
int old_length = code_map->length();
- for (int i = kEntriesStart; i < old_length; i += kEntryLength) {
- Code* code = Code::cast(code_map->get(i + kCodeOffset));
- MarkBit code_mark = Marking::MarkBitFrom(code);
- if (!code_mark.Get()) {
- continue;
+ for (int i = SharedFunctionInfo::kEntriesStart;
+ i < old_length;
+ i += SharedFunctionInfo::kEntryLength) {
+ Code* code =
+ Code::cast(code_map->get(i + SharedFunctionInfo::kCachedCodeOffset));
+ if (!Marking::MarkBitFrom(code).Get()) continue;
+
+ // Move every slot in the entry.
+ for (int j = 0; j < SharedFunctionInfo::kEntryLength; j++) {
+ int dst_index = new_length++;
+ Object** slot = code_map->RawFieldOfElementAt(dst_index);
+ Object* object = code_map->get(i + j);
+ code_map->set(dst_index, object);
+ if (j == SharedFunctionInfo::kOsrAstIdOffset) {
+ ASSERT(object->IsSmi());
+ } else {
+ ASSERT(Marking::IsBlack(
+ Marking::MarkBitFrom(HeapObject::cast(*slot))));
+ isolate_->heap()->mark_compact_collector()->
+ RecordSlot(slot, slot, *slot);
+ }
}
-
- // Update and record the context slot in the optimized code map.
- Object** context_slot = HeapObject::RawField(code_map,
- FixedArray::OffsetOfElementAt(new_length));
- code_map->set(new_length++, code_map->get(i + kContextOffset));
- ASSERT(Marking::IsBlack(
- Marking::MarkBitFrom(HeapObject::cast(*context_slot))));
- isolate_->heap()->mark_compact_collector()->
- RecordSlot(context_slot, context_slot, *context_slot);
-
- // Update and record the code slot in the optimized code map.
- Object** code_slot = HeapObject::RawField(code_map,
- FixedArray::OffsetOfElementAt(new_length));
- code_map->set(new_length++, code_map->get(i + kCodeOffset));
- ASSERT(Marking::IsBlack(
- Marking::MarkBitFrom(HeapObject::cast(*code_slot))));
- isolate_->heap()->mark_compact_collector()->
- RecordSlot(code_slot, code_slot, *code_slot);
-
- // Update and record the literals slot in the optimized code map.
- Object** literals_slot = HeapObject::RawField(code_map,
- FixedArray::OffsetOfElementAt(new_length));
- code_map->set(new_length++, code_map->get(i + kLiteralsOffset));
- ASSERT(Marking::IsBlack(
- Marking::MarkBitFrom(HeapObject::cast(*literals_slot))));
- isolate_->heap()->mark_compact_collector()->
- RecordSlot(literals_slot, literals_slot, *literals_slot);
}
// Trim the optimized code map if entries have been removed.
@@ -1432,49 +1471,6 @@ class MarkCompactMarkingVisitor
return true;
}
- INLINE(static void BeforeVisitingSharedFunctionInfo(HeapObject* object)) {
- SharedFunctionInfo* shared = SharedFunctionInfo::cast(object);
- shared->BeforeVisitingPointers();
- }
-
- static void VisitWeakCollection(Map* map, HeapObject* object) {
- MarkCompactCollector* collector = map->GetHeap()->mark_compact_collector();
- JSWeakCollection* weak_collection =
- reinterpret_cast<JSWeakCollection*>(object);
-
- // Enqueue weak map in linked list of encountered weak maps.
- if (weak_collection->next() == Smi::FromInt(0)) {
- weak_collection->set_next(collector->encountered_weak_collections());
- collector->set_encountered_weak_collections(weak_collection);
- }
-
- // Skip visiting the backing hash table containing the mappings.
- int object_size = JSWeakCollection::BodyDescriptor::SizeOf(map, object);
- BodyVisitorBase<MarkCompactMarkingVisitor>::IteratePointers(
- map->GetHeap(),
- object,
- JSWeakCollection::BodyDescriptor::kStartOffset,
- JSWeakCollection::kTableOffset);
- BodyVisitorBase<MarkCompactMarkingVisitor>::IteratePointers(
- map->GetHeap(),
- object,
- JSWeakCollection::kTableOffset + kPointerSize,
- object_size);
-
- // Mark the backing hash table without pushing it on the marking stack.
- Object* table_object = weak_collection->table();
- if (!table_object->IsHashTable()) return;
- WeakHashTable* table = WeakHashTable::cast(table_object);
- Object** table_slot =
- HeapObject::RawField(weak_collection, JSWeakCollection::kTableOffset);
- MarkBit table_mark = Marking::MarkBitFrom(table);
- collector->RecordSlot(table_slot, table_slot, table);
- if (!table_mark.Get()) collector->SetMark(table, table_mark);
- // Recording the map slot can be skipped, because maps are not compacted.
- collector->MarkObject(table->map(), Marking::MarkBitFrom(table->map()));
- ASSERT(MarkCompactCollector::IsMarked(table->map()));
- }
-
private:
template<int id>
static inline void TrackObjectStatsAndVisit(Map* map, HeapObject* obj);
@@ -1811,6 +1807,10 @@ class RootMarkingVisitor : public ObjectVisitor {
for (Object** p = start; p < end; p++) MarkObjectByPointer(p);
}
+ // Skip the weak next code link in a code object, which is visited in
+ // ProcessTopOptimizedFrame.
+ void VisitNextCodeLink(Object** p) { }
+
private:
void MarkObjectByPointer(Object** p) {
if (!(*p)->IsHeapObject()) return;
@@ -1839,6 +1839,7 @@ class RootMarkingVisitor : public ObjectVisitor {
// Helper class for pruning the string table.
+template<bool finalize_external_strings>
class StringTableCleaner : public ObjectVisitor {
public:
explicit StringTableCleaner(Heap* heap)
@@ -1850,22 +1851,20 @@ class StringTableCleaner : public ObjectVisitor {
Object* o = *p;
if (o->IsHeapObject() &&
!Marking::MarkBitFrom(HeapObject::cast(o)).Get()) {
- // Check if the internalized string being pruned is external. We need to
- // delete the associated external data as this string is going away.
-
- // Since no objects have yet been moved we can safely access the map of
- // the object.
- if (o->IsExternalString()) {
+ if (finalize_external_strings) {
+ ASSERT(o->IsExternalString());
heap_->FinalizeExternalString(String::cast(*p));
+ } else {
+ pointers_removed_++;
}
// Set the entry to the_hole_value (as deleted).
*p = heap_->the_hole_value();
- pointers_removed_++;
}
}
}
int PointersRemoved() {
+ ASSERT(!finalize_external_strings);
return pointers_removed_;
}
@@ -1875,6 +1874,10 @@ class StringTableCleaner : public ObjectVisitor {
};
+typedef StringTableCleaner<false> InternalizedStringTableCleaner;
+typedef StringTableCleaner<true> ExternalStringTableCleaner;
+
+
// Implementation of WeakObjectRetainer for mark compact GCs. All marked objects
// are retained.
class MarkCompactWeakObjectRetainer : public WeakObjectRetainer {
@@ -2001,7 +2004,7 @@ int MarkCompactCollector::DiscoverAndPromoteBlackObjectsOnPage(
int size = object->Size();
survivors_size += size;
- Heap::UpdateAllocationSiteFeedback(object);
+ Heap::UpdateAllocationSiteFeedback(object, Heap::RECORD_SCRATCHPAD_SLOT);
offset++;
current_cell >>= 1;
@@ -2011,8 +2014,8 @@ int MarkCompactCollector::DiscoverAndPromoteBlackObjectsOnPage(
}
// Promotion failed. Just migrate object to another semispace.
- MaybeObject* allocation = new_space->AllocateRaw(size);
- if (allocation->IsFailure()) {
+ AllocationResult allocation = new_space->AllocateRaw(size);
+ if (allocation.IsRetry()) {
if (!new_space->AddFreshPage()) {
// Shouldn't happen. We are sweeping linearly, and to-space
// has the same number of pages as from-space, so there is
@@ -2020,14 +2023,15 @@ int MarkCompactCollector::DiscoverAndPromoteBlackObjectsOnPage(
UNREACHABLE();
}
allocation = new_space->AllocateRaw(size);
- ASSERT(!allocation->IsFailure());
+ ASSERT(!allocation.IsRetry());
}
- Object* target = allocation->ToObjectUnchecked();
+ Object* target = allocation.ToObjectChecked();
- MigrateObject(HeapObject::cast(target)->address(),
- object->address(),
+ MigrateObject(HeapObject::cast(target),
+ object,
size,
NEW_SPACE);
+ heap()->IncrementSemiSpaceCopiedObjectSize(size);
}
*cells = 0;
}
@@ -2087,7 +2091,10 @@ void MarkCompactCollector::MarkStringTable(RootMarkingVisitor* visitor) {
StringTable* string_table = heap()->string_table();
// Mark the string table itself.
MarkBit string_table_mark = Marking::MarkBitFrom(string_table);
- SetMark(string_table, string_table_mark);
+ if (!string_table_mark.Get()) {
+ // String table could have already been marked by visiting the handles list.
+ SetMark(string_table, string_table_mark);
+ }
// Explicitly mark the prefix.
string_table->IteratePrefix(visitor);
ProcessMarkingDeque();
@@ -2398,10 +2405,12 @@ void MarkCompactCollector::AfterMarking() {
// string table. Cannot use string_table() here because the string
// table is marked.
StringTable* string_table = heap()->string_table();
- StringTableCleaner v(heap());
- string_table->IterateElements(&v);
- string_table->ElementsRemoved(v.PointersRemoved());
- heap()->external_string_table_.Iterate(&v);
+ InternalizedStringTableCleaner internalized_visitor(heap());
+ string_table->IterateElements(&internalized_visitor);
+ string_table->ElementsRemoved(internalized_visitor.PointersRemoved());
+
+ ExternalStringTableCleaner external_visitor(heap());
+ heap()->external_string_table_.Iterate(&external_visitor);
heap()->external_string_table_.CleanUp();
// Process the weak references.
@@ -2422,11 +2431,6 @@ void MarkCompactCollector::AfterMarking() {
}
}
- if (!FLAG_watch_ic_patching) {
- // Clean up dead objects from the runtime profiler.
- heap()->isolate()->runtime_profiler()->RemoveDeadSamples();
- }
-
if (FLAG_track_gc_object_stats) {
heap()->CheckpointObjectStats();
}
@@ -2434,7 +2438,7 @@ void MarkCompactCollector::AfterMarking() {
void MarkCompactCollector::ProcessMapCaches() {
- Object* raw_context = heap()->native_contexts_list_;
+ Object* raw_context = heap()->native_contexts_list();
while (raw_context != heap()->undefined_value()) {
Context* context = reinterpret_cast<Context*>(raw_context);
if (IsMarked(context)) {
@@ -2484,23 +2488,6 @@ void MarkCompactCollector::ProcessMapCaches() {
}
-void MarkCompactCollector::ReattachInitialMaps() {
- HeapObjectIterator map_iterator(heap()->map_space());
- for (HeapObject* obj = map_iterator.Next();
- obj != NULL;
- obj = map_iterator.Next()) {
- Map* map = Map::cast(obj);
-
- STATIC_ASSERT(LAST_TYPE == LAST_JS_RECEIVER_TYPE);
- if (map->instance_type() < FIRST_JS_RECEIVER_TYPE) continue;
-
- if (map->attached_to_shared_function_info()) {
- JSFunction::cast(map->constructor())->shared()->AttachInitialMap(map);
- }
- }
-}
-
-
void MarkCompactCollector::ClearNonLiveReferences() {
// Iterate over the map space, setting map transitions that go from
// a marked map to an unmarked map to null transitions. This action
@@ -2514,20 +2501,13 @@ void MarkCompactCollector::ClearNonLiveReferences() {
if (!map->CanTransition()) continue;
MarkBit map_mark = Marking::MarkBitFrom(map);
- if (map_mark.Get() && map->attached_to_shared_function_info()) {
- // This map is used for inobject slack tracking and has been detached
- // from SharedFunctionInfo during the mark phase.
- // Since it survived the GC, reattach it now.
- JSFunction::cast(map->constructor())->shared()->AttachInitialMap(map);
- }
-
ClearNonLivePrototypeTransitions(map);
ClearNonLiveMapTransitions(map, map_mark);
if (map_mark.Get()) {
ClearNonLiveDependentCode(map->dependent_code());
} else {
- ClearAndDeoptimizeDependentCode(map->dependent_code());
+ ClearDependentCode(map->dependent_code());
map->set_dependent_code(DependentCode::cast(heap()->empty_fixed_array()));
}
}
@@ -2564,6 +2544,16 @@ void MarkCompactCollector::ClearNonLiveReferences() {
if (!table->IsKey(key)) continue;
uint32_t value_index = table->EntryToValueIndex(i);
Object* value = table->get(value_index);
+ if (key->IsCell() && !IsMarked(key)) {
+ Cell* cell = Cell::cast(key);
+ Object* object = cell->value();
+ if (IsMarked(object)) {
+ MarkBit mark = Marking::MarkBitFrom(cell);
+ SetMark(cell, mark);
+ Object** value_slot = HeapObject::RawField(cell, Cell::kValueOffset);
+ RecordSlot(value_slot, value_slot, *value_slot);
+ }
+ }
if (IsMarked(key)) {
if (!IsMarked(value)) {
HeapObject* obj = HeapObject::cast(value);
@@ -2572,9 +2562,10 @@ void MarkCompactCollector::ClearNonLiveReferences() {
}
ClearNonLiveDependentCode(DependentCode::cast(value));
} else {
- ClearAndDeoptimizeDependentCode(DependentCode::cast(value));
+ ClearDependentCode(DependentCode::cast(value));
table->set(key_index, heap_->the_hole_value());
table->set(value_index, heap_->the_hole_value());
+ table->ElementRemoved();
}
}
}
@@ -2607,9 +2598,7 @@ void MarkCompactCollector::ClearNonLivePrototypeTransitions(Map* map) {
cached_map,
SKIP_WRITE_BARRIER);
}
- Object** slot =
- HeapObject::RawField(prototype_transitions,
- FixedArray::OffsetOfElementAt(proto_index));
+ Object** slot = prototype_transitions->RawFieldOfElementAt(proto_index);
RecordSlot(slot, slot, prototype);
new_number_of_transitions++;
}
@@ -2644,56 +2633,102 @@ void MarkCompactCollector::ClearNonLiveMapTransitions(Map* map,
}
-void MarkCompactCollector::ClearAndDeoptimizeDependentCode(
+void MarkCompactCollector::ClearDependentICList(Object* head) {
+ Object* current = head;
+ Object* undefined = heap()->undefined_value();
+ while (current != undefined) {
+ Code* code = Code::cast(current);
+ if (IsMarked(code)) {
+ ASSERT(code->is_weak_stub());
+ IC::InvalidateMaps(code);
+ }
+ current = code->next_code_link();
+ code->set_next_code_link(undefined);
+ }
+}
+
+
+void MarkCompactCollector::ClearDependentCode(
DependentCode* entries) {
DisallowHeapAllocation no_allocation;
DependentCode::GroupStartIndexes starts(entries);
int number_of_entries = starts.number_of_entries();
if (number_of_entries == 0) return;
- for (int i = 0; i < number_of_entries; i++) {
+ int g = DependentCode::kWeakICGroup;
+ if (starts.at(g) != starts.at(g + 1)) {
+ int i = starts.at(g);
+ ASSERT(i + 1 == starts.at(g + 1));
+ Object* head = entries->object_at(i);
+ ClearDependentICList(head);
+ }
+ g = DependentCode::kWeakCodeGroup;
+ for (int i = starts.at(g); i < starts.at(g + 1); i++) {
// If the entry is compilation info then the map must be alive,
- // and ClearAndDeoptimizeDependentCode shouldn't be called.
+ // and ClearDependentCode shouldn't be called.
ASSERT(entries->is_code_at(i));
Code* code = entries->code_at(i);
-
if (IsMarked(code) && !code->marked_for_deoptimization()) {
code->set_marked_for_deoptimization(true);
code->InvalidateEmbeddedObjects();
have_code_to_deoptimize_ = true;
}
+ }
+ for (int i = 0; i < number_of_entries; i++) {
entries->clear_at(i);
}
}
-void MarkCompactCollector::ClearNonLiveDependentCode(DependentCode* entries) {
- DisallowHeapAllocation no_allocation;
- DependentCode::GroupStartIndexes starts(entries);
- int number_of_entries = starts.number_of_entries();
- if (number_of_entries == 0) return;
- int new_number_of_entries = 0;
- // Go through all groups, remove dead codes and compact.
- for (int g = 0; g < DependentCode::kGroupCount; g++) {
- int group_number_of_entries = 0;
- for (int i = starts.at(g); i < starts.at(g + 1); i++) {
+int MarkCompactCollector::ClearNonLiveDependentCodeInGroup(
+ DependentCode* entries, int group, int start, int end, int new_start) {
+ int survived = 0;
+ if (group == DependentCode::kWeakICGroup) {
+ // Dependent weak IC stubs form a linked list and only the head is stored
+ // in the dependent code array.
+ if (start != end) {
+ ASSERT(start + 1 == end);
+ Object* old_head = entries->object_at(start);
+ MarkCompactWeakObjectRetainer retainer;
+ Object* head = VisitWeakList<Code>(heap(), old_head, &retainer);
+ entries->set_object_at(new_start, head);
+ Object** slot = entries->slot_at(new_start);
+ RecordSlot(slot, slot, head);
+ // We do not compact this group even if the head is undefined,
+ // more dependent ICs are likely to be added later.
+ survived = 1;
+ }
+ } else {
+ for (int i = start; i < end; i++) {
Object* obj = entries->object_at(i);
ASSERT(obj->IsCode() || IsMarked(obj));
if (IsMarked(obj) &&
(!obj->IsCode() || !WillBeDeoptimized(Code::cast(obj)))) {
- if (new_number_of_entries + group_number_of_entries != i) {
- entries->set_object_at(
- new_number_of_entries + group_number_of_entries, obj);
+ if (new_start + survived != i) {
+ entries->set_object_at(new_start + survived, obj);
}
- Object** slot = entries->slot_at(new_number_of_entries +
- group_number_of_entries);
+ Object** slot = entries->slot_at(new_start + survived);
RecordSlot(slot, slot, obj);
- group_number_of_entries++;
+ survived++;
}
}
- entries->set_number_of_entries(
- static_cast<DependentCode::DependencyGroup>(g),
- group_number_of_entries);
- new_number_of_entries += group_number_of_entries;
+ }
+ entries->set_number_of_entries(
+ static_cast<DependentCode::DependencyGroup>(group), survived);
+ return survived;
+}
+
+
+void MarkCompactCollector::ClearNonLiveDependentCode(DependentCode* entries) {
+ DisallowHeapAllocation no_allocation;
+ DependentCode::GroupStartIndexes starts(entries);
+ int number_of_entries = starts.number_of_entries();
+ if (number_of_entries == 0) return;
+ int new_number_of_entries = 0;
+ // Go through all groups, remove dead codes and compact.
+ for (int g = 0; g < DependentCode::kGroupCount; g++) {
+ int survived = ClearNonLiveDependentCodeInGroup(
+ entries, g, starts.at(g), starts.at(g + 1), new_number_of_entries);
+ new_number_of_entries += survived;
}
for (int i = new_number_of_entries; i < number_of_entries; i++) {
entries->clear_at(i);
@@ -2703,25 +2738,24 @@ void MarkCompactCollector::ClearNonLiveDependentCode(DependentCode* entries) {
void MarkCompactCollector::ProcessWeakCollections() {
GCTracer::Scope gc_scope(tracer_, GCTracer::Scope::MC_WEAKCOLLECTION_PROCESS);
- Object* weak_collection_obj = encountered_weak_collections();
+ Object* weak_collection_obj = heap()->encountered_weak_collections();
while (weak_collection_obj != Smi::FromInt(0)) {
- ASSERT(MarkCompactCollector::IsMarked(
- HeapObject::cast(weak_collection_obj)));
JSWeakCollection* weak_collection =
reinterpret_cast<JSWeakCollection*>(weak_collection_obj);
- ObjectHashTable* table = ObjectHashTable::cast(weak_collection->table());
- Object** anchor = reinterpret_cast<Object**>(table->address());
- for (int i = 0; i < table->Capacity(); i++) {
- if (MarkCompactCollector::IsMarked(HeapObject::cast(table->KeyAt(i)))) {
- Object** key_slot =
- HeapObject::RawField(table, FixedArray::OffsetOfElementAt(
- ObjectHashTable::EntryToIndex(i)));
- RecordSlot(anchor, key_slot, *key_slot);
- Object** value_slot =
- HeapObject::RawField(table, FixedArray::OffsetOfElementAt(
- ObjectHashTable::EntryToValueIndex(i)));
- MarkCompactMarkingVisitor::MarkObjectByPointer(
- this, anchor, value_slot);
+ ASSERT(MarkCompactCollector::IsMarked(weak_collection));
+ if (weak_collection->table()->IsHashTable()) {
+ ObjectHashTable* table = ObjectHashTable::cast(weak_collection->table());
+ Object** anchor = reinterpret_cast<Object**>(table->address());
+ for (int i = 0; i < table->Capacity(); i++) {
+ if (MarkCompactCollector::IsMarked(HeapObject::cast(table->KeyAt(i)))) {
+ Object** key_slot =
+ table->RawFieldOfElementAt(ObjectHashTable::EntryToIndex(i));
+ RecordSlot(anchor, key_slot, *key_slot);
+ Object** value_slot =
+ table->RawFieldOfElementAt(ObjectHashTable::EntryToValueIndex(i));
+ MarkCompactMarkingVisitor::MarkObjectByPointer(
+ this, anchor, value_slot);
+ }
}
}
weak_collection_obj = weak_collection->next();
@@ -2731,22 +2765,24 @@ void MarkCompactCollector::ProcessWeakCollections() {
void MarkCompactCollector::ClearWeakCollections() {
GCTracer::Scope gc_scope(tracer_, GCTracer::Scope::MC_WEAKCOLLECTION_CLEAR);
- Object* weak_collection_obj = encountered_weak_collections();
+ Object* weak_collection_obj = heap()->encountered_weak_collections();
while (weak_collection_obj != Smi::FromInt(0)) {
- ASSERT(MarkCompactCollector::IsMarked(
- HeapObject::cast(weak_collection_obj)));
JSWeakCollection* weak_collection =
reinterpret_cast<JSWeakCollection*>(weak_collection_obj);
- ObjectHashTable* table = ObjectHashTable::cast(weak_collection->table());
- for (int i = 0; i < table->Capacity(); i++) {
- if (!MarkCompactCollector::IsMarked(HeapObject::cast(table->KeyAt(i)))) {
- table->RemoveEntry(i);
+ ASSERT(MarkCompactCollector::IsMarked(weak_collection));
+ if (weak_collection->table()->IsHashTable()) {
+ ObjectHashTable* table = ObjectHashTable::cast(weak_collection->table());
+ for (int i = 0; i < table->Capacity(); i++) {
+ HeapObject* key = HeapObject::cast(table->KeyAt(i));
+ if (!MarkCompactCollector::IsMarked(key)) {
+ table->RemoveEntry(i);
+ }
}
}
weak_collection_obj = weak_collection->next();
- weak_collection->set_next(Smi::FromInt(0));
+ weak_collection->set_next(heap()->undefined_value());
}
- set_encountered_weak_collections(Smi::FromInt(0));
+ heap()->set_encountered_weak_collections(Smi::FromInt(0));
}
@@ -2764,19 +2800,21 @@ void MarkCompactCollector::ClearWeakCollections() {
// pointer iteration. This is an issue if the store buffer overflows and we
// have to scan the entire old space, including dead objects, looking for
// pointers to new space.
-void MarkCompactCollector::MigrateObject(Address dst,
- Address src,
+void MarkCompactCollector::MigrateObject(HeapObject* dst,
+ HeapObject* src,
int size,
AllocationSpace dest) {
+ Address dst_addr = dst->address();
+ Address src_addr = src->address();
HeapProfiler* heap_profiler = heap()->isolate()->heap_profiler();
if (heap_profiler->is_tracking_object_moves()) {
- heap_profiler->ObjectMoveEvent(src, dst, size);
+ heap_profiler->ObjectMoveEvent(src_addr, dst_addr, size);
}
- ASSERT(heap()->AllowedToBeMigrated(HeapObject::FromAddress(src), dest));
- ASSERT(dest != LO_SPACE && size <= Page::kMaxNonCodeHeapObjectSize);
+ ASSERT(heap()->AllowedToBeMigrated(src, dest));
+ ASSERT(dest != LO_SPACE && size <= Page::kMaxRegularHeapObjectSize);
if (dest == OLD_POINTER_SPACE) {
- Address src_slot = src;
- Address dst_slot = dst;
+ Address src_slot = src_addr;
+ Address dst_slot = dst_addr;
ASSERT(IsAligned(size, kPointerSize));
for (int remaining = size / kPointerSize; remaining > 0; remaining--) {
@@ -2797,8 +2835,8 @@ void MarkCompactCollector::MigrateObject(Address dst,
dst_slot += kPointerSize;
}
- if (compacting_ && HeapObject::FromAddress(dst)->IsJSFunction()) {
- Address code_entry_slot = dst + JSFunction::kCodeEntryOffset;
+ if (compacting_ && dst->IsJSFunction()) {
+ Address code_entry_slot = dst_addr + JSFunction::kCodeEntryOffset;
Address code_entry = Memory::Address_at(code_entry_slot);
if (Page::FromAddress(code_entry)->IsEvacuationCandidate()) {
@@ -2808,21 +2846,37 @@ void MarkCompactCollector::MigrateObject(Address dst,
code_entry_slot,
SlotsBuffer::IGNORE_OVERFLOW);
}
+ } else if (compacting_ && dst->IsConstantPoolArray()) {
+ ConstantPoolArray* array = ConstantPoolArray::cast(dst);
+ ConstantPoolArray::Iterator code_iter(array, ConstantPoolArray::CODE_PTR);
+ while (!code_iter.is_finished()) {
+ Address code_entry_slot =
+ dst_addr + array->OffsetOfElementAt(code_iter.next_index());
+ Address code_entry = Memory::Address_at(code_entry_slot);
+
+ if (Page::FromAddress(code_entry)->IsEvacuationCandidate()) {
+ SlotsBuffer::AddTo(&slots_buffer_allocator_,
+ &migration_slots_buffer_,
+ SlotsBuffer::CODE_ENTRY_SLOT,
+ code_entry_slot,
+ SlotsBuffer::IGNORE_OVERFLOW);
+ }
+ }
}
} else if (dest == CODE_SPACE) {
- PROFILE(isolate(), CodeMoveEvent(src, dst));
- heap()->MoveBlock(dst, src, size);
+ PROFILE(isolate(), CodeMoveEvent(src_addr, dst_addr));
+ heap()->MoveBlock(dst_addr, src_addr, size);
SlotsBuffer::AddTo(&slots_buffer_allocator_,
&migration_slots_buffer_,
SlotsBuffer::RELOCATED_CODE_OBJECT,
- dst,
+ dst_addr,
SlotsBuffer::IGNORE_OVERFLOW);
- Code::cast(HeapObject::FromAddress(dst))->Relocate(dst - src);
+ Code::cast(dst)->Relocate(dst_addr - src_addr);
} else {
ASSERT(dest == OLD_DATA_SPACE || dest == NEW_SPACE);
- heap()->MoveBlock(dst, src, size);
+ heap()->MoveBlock(dst_addr, src_addr, size);
}
- Memory::Address_at(src) = dst;
+ Memory::Address_at(src_addr) = dst_addr;
}
@@ -2909,25 +2963,30 @@ class PointersUpdatingVisitor: public ObjectVisitor {
};
-static void UpdatePointer(HeapObject** p, HeapObject* object) {
- ASSERT(*p == object);
-
- Address old_addr = object->address();
-
- Address new_addr = Memory::Address_at(old_addr);
+static void UpdatePointer(HeapObject** address, HeapObject* object) {
+ Address new_addr = Memory::Address_at(object->address());
// The new space sweep will overwrite the map word of dead objects
// with NULL. In this case we do not need to transfer this entry to
// the store buffer which we are rebuilding.
+ // We perform the pointer update with a no barrier compare-and-swap. The
+ // compare and swap may fail in the case where the pointer update tries to
+ // update garbage memory which was concurrently accessed by the sweeper.
if (new_addr != NULL) {
- *p = HeapObject::FromAddress(new_addr);
+ base::NoBarrier_CompareAndSwap(
+ reinterpret_cast<base::AtomicWord*>(address),
+ reinterpret_cast<base::AtomicWord>(object),
+ reinterpret_cast<base::AtomicWord>(HeapObject::FromAddress(new_addr)));
} else {
// We have to zap this pointer, because the store buffer may overflow later,
// and then we have to scan the entire heap and we don't want to find
// spurious newspace pointers in the old space.
// TODO(mstarzinger): This was changed to a sentinel value to track down
// rare crashes, change it back to Smi::FromInt(0) later.
- *p = reinterpret_cast<HeapObject*>(Smi::FromInt(0x0f100d00 >> 1)); // flood
+ base::NoBarrier_CompareAndSwap(
+ reinterpret_cast<base::AtomicWord*>(address),
+ reinterpret_cast<base::AtomicWord>(object),
+ reinterpret_cast<base::AtomicWord>(Smi::FromInt(0x0f100d00 >> 1)));
}
}
@@ -2946,23 +3005,20 @@ static String* UpdateReferenceInExternalStringTableEntry(Heap* heap,
bool MarkCompactCollector::TryPromoteObject(HeapObject* object,
int object_size) {
- // TODO(hpayer): Replace that check with an assert.
- CHECK(object_size <= Page::kMaxNonCodeHeapObjectSize);
+ ASSERT(object_size <= Page::kMaxRegularHeapObjectSize);
OldSpace* target_space = heap()->TargetSpace(object);
ASSERT(target_space == heap()->old_pointer_space() ||
target_space == heap()->old_data_space());
- Object* result;
- MaybeObject* maybe_result = target_space->AllocateRaw(object_size);
- if (maybe_result->ToObject(&result)) {
- HeapObject* target = HeapObject::cast(result);
- MigrateObject(target->address(),
- object->address(),
+ HeapObject* target;
+ AllocationResult allocation = target_space->AllocateRaw(object_size);
+ if (allocation.To(&target)) {
+ MigrateObject(target,
+ object,
object_size,
target_space->identity());
- heap()->mark_compact_collector()->tracer()->
- increment_promoted_objects_size(object_size);
+ heap()->IncrementPromotedObjectsSize(object_size);
return true;
}
@@ -2974,8 +3030,7 @@ void MarkCompactCollector::EvacuateNewSpace() {
// There are soft limits in the allocation code, designed trigger a mark
// sweep collection by failing allocations. But since we are already in
// a mark-sweep allocation, there is no sense in trying to trigger one.
- AlwaysAllocateScope scope;
- heap()->CheckNewSpaceExpansionCriteria();
+ AlwaysAllocateScope scope(isolate());
NewSpace* new_space = heap()->new_space();
@@ -3006,7 +3061,7 @@ void MarkCompactCollector::EvacuateNewSpace() {
void MarkCompactCollector::EvacuateLiveObjectsFromPage(Page* p) {
- AlwaysAllocateScope always_allocate;
+ AlwaysAllocateScope always_allocate(isolate());
PagedSpace* space = static_cast<PagedSpace*>(p->owner());
ASSERT(p->IsEvacuationCandidate() && !p->WasSwept());
p->MarkSweptPrecisely();
@@ -3027,19 +3082,15 @@ void MarkCompactCollector::EvacuateLiveObjectsFromPage(Page* p) {
int size = object->Size();
- MaybeObject* target = space->AllocateRaw(size);
- if (target->IsFailure()) {
+ HeapObject* target_object;
+ AllocationResult allocation = space->AllocateRaw(size);
+ if (!allocation.To(&target_object)) {
// OS refused to give us memory.
V8::FatalProcessOutOfMemory("Evacuation");
return;
}
- Object* target_object = target->ToObjectUnchecked();
-
- MigrateObject(HeapObject::cast(target_object)->address(),
- object_addr,
- size,
- space->identity());
+ MigrateObject(target_object, object, size, space->identity());
ASSERT(object->map_word().IsForwardingAddress());
}
@@ -3056,6 +3107,8 @@ void MarkCompactCollector::EvacuatePages() {
Page* p = evacuation_candidates_[i];
ASSERT(p->IsEvacuationCandidate() ||
p->IsFlagSet(Page::RESCAN_ON_EVACUATION));
+ ASSERT(static_cast<int>(p->parallel_sweeping()) ==
+ MemoryChunk::PARALLEL_SWEEPING_DONE);
if (p->IsEvacuationCandidate()) {
// During compaction we might have to request a new page.
// Check that space still have room for that.
@@ -3069,7 +3122,6 @@ void MarkCompactCollector::EvacuatePages() {
slots_buffer_allocator_.DeallocateChain(page->slots_buffer_address());
page->ClearEvacuationCandidate();
page->SetFlag(Page::RESCAN_ON_EVACUATION);
- page->InsertAfter(static_cast<PagedSpace*>(page->owner())->anchor());
}
return;
}
@@ -3146,13 +3198,21 @@ enum SkipListRebuildingMode {
};
+enum FreeSpaceTreatmentMode {
+ IGNORE_FREE_SPACE,
+ ZAP_FREE_SPACE
+};
+
+
// Sweep a space precisely. After this has been done the space can
// be iterated precisely, hitting only the live objects. Code space
// is always swept precisely because we want to be able to iterate
// over it. Map space is swept precisely, because it is not compacted.
// Slots in live objects pointing into evacuation candidates are updated
// if requested.
-template<SweepingMode sweeping_mode, SkipListRebuildingMode skip_list_mode>
+template<SweepingMode sweeping_mode,
+ SkipListRebuildingMode skip_list_mode,
+ FreeSpaceTreatmentMode free_space_mode>
static void SweepPrecisely(PagedSpace* space,
Page* p,
ObjectVisitor* v) {
@@ -3186,6 +3246,9 @@ static void SweepPrecisely(PagedSpace* space,
for ( ; live_objects != 0; live_objects--) {
Address free_end = cell_base + offsets[live_index++] * kPointerSize;
if (free_end != free_start) {
+ if (free_space_mode == ZAP_FREE_SPACE) {
+ memset(free_start, 0xcc, static_cast<int>(free_end - free_start));
+ }
space->Free(free_start, static_cast<int>(free_end - free_start));
#ifdef ENABLE_GDB_JIT_INTERFACE
if (FLAG_gdbjit && space->identity() == CODE_SPACE) {
@@ -3217,6 +3280,9 @@ static void SweepPrecisely(PagedSpace* space,
*cell = 0;
}
if (free_start != p->area_end()) {
+ if (free_space_mode == ZAP_FREE_SPACE) {
+ memset(free_start, 0xcc, static_cast<int>(p->area_end() - free_start));
+ }
space->Free(free_start, static_cast<int>(p->area_end() - free_start));
#ifdef ENABLE_GDB_JIT_INTERFACE
if (FLAG_gdbjit && space->identity() == CODE_SPACE) {
@@ -3312,7 +3378,7 @@ void MarkCompactCollector::InvalidateCode(Code* code) {
// Return true if the given code is deoptimized or will be deoptimized.
bool MarkCompactCollector::WillBeDeoptimized(Code* code) {
- return code->marked_for_deoptimization();
+ return code->is_optimized_code() && code->marked_for_deoptimization();
}
@@ -3462,12 +3528,23 @@ void MarkCompactCollector::EvacuateNewSpaceAndCandidates() {
SweepConservatively<SWEEP_SEQUENTIALLY>(space, NULL, p);
break;
case OLD_POINTER_SPACE:
- SweepPrecisely<SWEEP_AND_VISIT_LIVE_OBJECTS, IGNORE_SKIP_LIST>(
+ SweepPrecisely<SWEEP_AND_VISIT_LIVE_OBJECTS,
+ IGNORE_SKIP_LIST,
+ IGNORE_FREE_SPACE>(
space, p, &updating_visitor);
break;
case CODE_SPACE:
- SweepPrecisely<SWEEP_AND_VISIT_LIVE_OBJECTS, REBUILD_SKIP_LIST>(
- space, p, &updating_visitor);
+ if (FLAG_zap_code_space) {
+ SweepPrecisely<SWEEP_AND_VISIT_LIVE_OBJECTS,
+ REBUILD_SKIP_LIST,
+ ZAP_FREE_SPACE>(
+ space, p, &updating_visitor);
+ } else {
+ SweepPrecisely<SWEEP_AND_VISIT_LIVE_OBJECTS,
+ REBUILD_SKIP_LIST,
+ IGNORE_FREE_SPACE>(
+ space, p, &updating_visitor);
+ }
break;
default:
UNREACHABLE();
@@ -3499,28 +3576,19 @@ void MarkCompactCollector::EvacuateNewSpaceAndCandidates() {
}
}
- // Update the head of the native contexts list in the heap.
- updating_visitor.VisitPointer(heap_->native_contexts_list_address());
-
heap_->string_table()->Iterate(&updating_visitor);
updating_visitor.VisitPointer(heap_->weak_object_to_code_table_address());
if (heap_->weak_object_to_code_table()->IsHashTable()) {
WeakHashTable* table =
WeakHashTable::cast(heap_->weak_object_to_code_table());
table->Iterate(&updating_visitor);
- table->Rehash(heap_->undefined_value());
+ table->Rehash(heap_->isolate()->factory()->undefined_value());
}
// Update pointers from external string table.
heap_->UpdateReferencesInExternalStringTable(
&UpdateReferenceInExternalStringTableEntry);
- if (!FLAG_watch_ic_patching) {
- // Update JSFunction pointers from the runtime profiler.
- heap()->isolate()->runtime_profiler()->UpdateSamplesAfterCompact(
- &updating_visitor);
- }
-
EvacuationWeakObjectRetainer evacuation_object_retainer;
heap()->ProcessWeakReferences(&evacuation_object_retainer);
@@ -3541,14 +3609,14 @@ void MarkCompactCollector::EvacuateNewSpaceAndCandidates() {
}
-void MarkCompactCollector::UnlinkEvacuationCandidates() {
+void MarkCompactCollector::MoveEvacuationCandidatesToEndOfPagesList() {
int npages = evacuation_candidates_.length();
for (int i = 0; i < npages; i++) {
Page* p = evacuation_candidates_[i];
if (!p->IsEvacuationCandidate()) continue;
p->Unlink();
- p->ClearSweptPrecisely();
- p->ClearSweptConservatively();
+ PagedSpace* space = static_cast<PagedSpace*>(p->owner());
+ p->InsertAfter(space->LastPage());
}
}
@@ -3563,7 +3631,7 @@ void MarkCompactCollector::ReleaseEvacuationCandidates() {
p->set_scan_on_scavenge(false);
slots_buffer_allocator_.DeallocateChain(p->slots_buffer_address());
p->ResetLiveBytes();
- space->ReleasePage(p, false);
+ space->ReleasePage(p);
}
evacuation_candidates_.Rewind(0);
compacting_ = false;
@@ -3891,7 +3959,11 @@ intptr_t MarkCompactCollector::SweepConservatively(PagedSpace* space,
(mode == MarkCompactCollector::SWEEP_SEQUENTIALLY &&
free_list == NULL));
- p->MarkSweptConservatively();
+ // When parallel sweeping is active, the page will be marked after
+ // sweeping by the main thread.
+ if (mode != MarkCompactCollector::SWEEP_IN_PARALLEL) {
+ p->MarkSweptConservatively();
+ }
intptr_t freed_bytes = 0;
size_t size = 0;
@@ -3969,46 +4041,51 @@ intptr_t MarkCompactCollector::SweepConservatively(PagedSpace* space,
}
-void MarkCompactCollector::SweepInParallel(PagedSpace* space,
- FreeList* private_free_list,
- FreeList* free_list) {
+void MarkCompactCollector::SweepInParallel(PagedSpace* space) {
PageIterator it(space);
+ FreeList* free_list = space == heap()->old_pointer_space()
+ ? free_list_old_pointer_space_.get()
+ : free_list_old_data_space_.get();
+ FreeList private_free_list(space);
while (it.has_next()) {
Page* p = it.next();
if (p->TryParallelSweeping()) {
- SweepConservatively<SWEEP_IN_PARALLEL>(space, private_free_list, p);
- free_list->Concatenate(private_free_list);
+ SweepConservatively<SWEEP_IN_PARALLEL>(space, &private_free_list, p);
+ free_list->Concatenate(&private_free_list);
+ p->set_parallel_sweeping(MemoryChunk::PARALLEL_SWEEPING_FINALIZE);
}
+ if (p == space->end_of_unswept_pages()) break;
}
}
void MarkCompactCollector::SweepSpace(PagedSpace* space, SweeperType sweeper) {
space->set_was_swept_conservatively(sweeper == CONSERVATIVE ||
- sweeper == LAZY_CONSERVATIVE ||
sweeper == PARALLEL_CONSERVATIVE ||
sweeper == CONCURRENT_CONSERVATIVE);
space->ClearStats();
+ // We defensively initialize end_of_unswept_pages_ here with the first page
+ // of the pages list.
+ space->set_end_of_unswept_pages(space->FirstPage());
+
PageIterator it(space);
int pages_swept = 0;
- bool lazy_sweeping_active = false;
bool unused_page_present = false;
bool parallel_sweeping_active = false;
while (it.has_next()) {
Page* p = it.next();
-
- ASSERT(p->parallel_sweeping() == 0);
- ASSERT(!p->IsEvacuationCandidate());
+ ASSERT(p->parallel_sweeping() == MemoryChunk::PARALLEL_SWEEPING_DONE);
// Clear sweeping flags indicating that marking bits are still intact.
p->ClearSweptPrecisely();
p->ClearSweptConservatively();
- if (p->IsFlagSet(Page::RESCAN_ON_EVACUATION)) {
+ if (p->IsFlagSet(Page::RESCAN_ON_EVACUATION) ||
+ p->IsEvacuationCandidate()) {
// Will be processed in EvacuateNewSpaceAndCandidates.
ASSERT(evacuation_candidates_.length() > 0);
continue;
@@ -4024,7 +4101,7 @@ void MarkCompactCollector::SweepSpace(PagedSpace* space, SweeperType sweeper) {
// Adjust unswept free bytes because releasing a page expects said
// counter to be accurate for unswept pages.
space->IncreaseUnsweptFreeBytes(p);
- space->ReleasePage(p, true);
+ space->ReleasePage(p);
continue;
}
unused_page_present = true;
@@ -4040,25 +4117,6 @@ void MarkCompactCollector::SweepSpace(PagedSpace* space, SweeperType sweeper) {
pages_swept++;
break;
}
- case LAZY_CONSERVATIVE: {
- if (lazy_sweeping_active) {
- if (FLAG_gc_verbose) {
- PrintF("Sweeping 0x%" V8PRIxPTR " lazily postponed.\n",
- reinterpret_cast<intptr_t>(p));
- }
- space->IncreaseUnsweptFreeBytes(p);
- } else {
- if (FLAG_gc_verbose) {
- PrintF("Sweeping 0x%" V8PRIxPTR " conservatively.\n",
- reinterpret_cast<intptr_t>(p));
- }
- SweepConservatively<SWEEP_SEQUENTIALLY>(space, NULL, p);
- pages_swept++;
- space->SetPagesToSweep(p->next_page());
- lazy_sweeping_active = true;
- }
- break;
- }
case CONCURRENT_CONSERVATIVE:
case PARALLEL_CONSERVATIVE: {
if (!parallel_sweeping_active) {
@@ -4074,9 +4132,10 @@ void MarkCompactCollector::SweepSpace(PagedSpace* space, SweeperType sweeper) {
PrintF("Sweeping 0x%" V8PRIxPTR " conservatively in parallel.\n",
reinterpret_cast<intptr_t>(p));
}
- p->set_parallel_sweeping(1);
+ p->set_parallel_sweeping(MemoryChunk::PARALLEL_SWEEPING_PENDING);
space->IncreaseUnsweptFreeBytes(p);
}
+ space->set_end_of_unswept_pages(p);
break;
}
case PRECISE: {
@@ -4084,10 +4143,15 @@ void MarkCompactCollector::SweepSpace(PagedSpace* space, SweeperType sweeper) {
PrintF("Sweeping 0x%" V8PRIxPTR " precisely.\n",
reinterpret_cast<intptr_t>(p));
}
- if (space->identity() == CODE_SPACE) {
- SweepPrecisely<SWEEP_ONLY, REBUILD_SKIP_LIST>(space, p, NULL);
+ if (space->identity() == CODE_SPACE && FLAG_zap_code_space) {
+ SweepPrecisely<SWEEP_ONLY, REBUILD_SKIP_LIST, ZAP_FREE_SPACE>(
+ space, p, NULL);
+ } else if (space->identity() == CODE_SPACE) {
+ SweepPrecisely<SWEEP_ONLY, REBUILD_SKIP_LIST, IGNORE_FREE_SPACE>(
+ space, p, NULL);
} else {
- SweepPrecisely<SWEEP_ONLY, IGNORE_SKIP_LIST>(space, p, NULL);
+ SweepPrecisely<SWEEP_ONLY, IGNORE_SKIP_LIST, IGNORE_FREE_SPACE>(
+ space, p, NULL);
}
pages_swept++;
break;
@@ -4114,38 +4178,35 @@ void MarkCompactCollector::SweepSpaces() {
#ifdef DEBUG
state_ = SWEEP_SPACES;
#endif
- SweeperType how_to_sweep =
- FLAG_lazy_sweeping ? LAZY_CONSERVATIVE : CONSERVATIVE;
- if (isolate()->num_sweeper_threads() > 0) {
+ SweeperType how_to_sweep = CONSERVATIVE;
+ if (AreSweeperThreadsActivated()) {
if (FLAG_parallel_sweeping) how_to_sweep = PARALLEL_CONSERVATIVE;
if (FLAG_concurrent_sweeping) how_to_sweep = CONCURRENT_CONSERVATIVE;
}
- if (FLAG_expose_gc) how_to_sweep = CONSERVATIVE;
if (sweep_precisely_) how_to_sweep = PRECISE;
- // Unlink evacuation candidates before sweeper threads access the list of
- // pages to avoid race condition.
- UnlinkEvacuationCandidates();
+ MoveEvacuationCandidatesToEndOfPagesList();
// Noncompacting collections simply sweep the spaces to clear the mark
// bits and free the nonlive blocks (for old and map spaces). We sweep
// the map space last because freeing non-live maps overwrites them and
// the other spaces rely on possibly non-live maps to get the sizes for
// non-live objects.
- SequentialSweepingScope scope(this);
- SweepSpace(heap()->old_pointer_space(), how_to_sweep);
- SweepSpace(heap()->old_data_space(), how_to_sweep);
+ { GCTracer::Scope sweep_scope(tracer_, GCTracer::Scope::MC_SWEEP_OLDSPACE);
+ { SequentialSweepingScope scope(this);
+ SweepSpace(heap()->old_pointer_space(), how_to_sweep);
+ SweepSpace(heap()->old_data_space(), how_to_sweep);
+ }
- if (how_to_sweep == PARALLEL_CONSERVATIVE ||
- how_to_sweep == CONCURRENT_CONSERVATIVE) {
- // TODO(hpayer): fix race with concurrent sweeper
- StartSweeperThreads();
- }
+ if (how_to_sweep == PARALLEL_CONSERVATIVE ||
+ how_to_sweep == CONCURRENT_CONSERVATIVE) {
+ StartSweeperThreads();
+ }
- if (how_to_sweep == PARALLEL_CONSERVATIVE) {
- WaitUntilSweepingCompleted();
+ if (how_to_sweep == PARALLEL_CONSERVATIVE) {
+ WaitUntilSweepingCompleted();
+ }
}
-
RemoveDeadInvalidatedCode();
SweepSpace(heap()->code_space(), PRECISE);
@@ -4167,13 +4228,30 @@ void MarkCompactCollector::SweepSpaces() {
}
+void MarkCompactCollector::ParallelSweepSpaceComplete(PagedSpace* space) {
+ PageIterator it(space);
+ while (it.has_next()) {
+ Page* p = it.next();
+ if (p->parallel_sweeping() == MemoryChunk::PARALLEL_SWEEPING_FINALIZE) {
+ p->set_parallel_sweeping(MemoryChunk::PARALLEL_SWEEPING_DONE);
+ p->MarkSweptConservatively();
+ }
+ ASSERT(p->parallel_sweeping() == MemoryChunk::PARALLEL_SWEEPING_DONE);
+ }
+}
+
+
+void MarkCompactCollector::ParallelSweepSpacesComplete() {
+ ParallelSweepSpaceComplete(heap()->old_pointer_space());
+ ParallelSweepSpaceComplete(heap()->old_data_space());
+}
+
+
void MarkCompactCollector::EnableCodeFlushing(bool enable) {
-#ifdef ENABLE_DEBUGGER_SUPPORT
- if (isolate()->debug()->IsLoaded() ||
+ if (isolate()->debug()->is_loaded() ||
isolate()->debug()->has_break_points()) {
enable = false;
}
-#endif
if (enable) {
if (code_flusher_ != NULL) return;
@@ -4261,14 +4339,33 @@ static inline SlotsBuffer::SlotType SlotTypeForRMode(RelocInfo::Mode rmode) {
void MarkCompactCollector::RecordRelocSlot(RelocInfo* rinfo, Object* target) {
Page* target_page = Page::FromAddress(reinterpret_cast<Address>(target));
+ RelocInfo::Mode rmode = rinfo->rmode();
if (target_page->IsEvacuationCandidate() &&
(rinfo->host() == NULL ||
!ShouldSkipEvacuationSlotRecording(rinfo->host()))) {
- if (!SlotsBuffer::AddTo(&slots_buffer_allocator_,
- target_page->slots_buffer_address(),
- SlotTypeForRMode(rinfo->rmode()),
- rinfo->pc(),
- SlotsBuffer::FAIL_ON_OVERFLOW)) {
+ bool success;
+ if (RelocInfo::IsEmbeddedObject(rmode) && rinfo->IsInConstantPool()) {
+ // This doesn't need to be typed since it is just a normal heap pointer.
+ Object** target_pointer =
+ reinterpret_cast<Object**>(rinfo->constant_pool_entry_address());
+ success = SlotsBuffer::AddTo(&slots_buffer_allocator_,
+ target_page->slots_buffer_address(),
+ target_pointer,
+ SlotsBuffer::FAIL_ON_OVERFLOW);
+ } else if (RelocInfo::IsCodeTarget(rmode) && rinfo->IsInConstantPool()) {
+ success = SlotsBuffer::AddTo(&slots_buffer_allocator_,
+ target_page->slots_buffer_address(),
+ SlotsBuffer::CODE_ENTRY_SLOT,
+ rinfo->constant_pool_entry_address(),
+ SlotsBuffer::FAIL_ON_OVERFLOW);
+ } else {
+ success = SlotsBuffer::AddTo(&slots_buffer_allocator_,
+ target_page->slots_buffer_address(),
+ SlotTypeForRMode(rmode),
+ rinfo->pc(),
+ SlotsBuffer::FAIL_ON_OVERFLOW);
+ }
+ if (!success) {
EvictEvacuationCandidate(target_page);
}
}
diff --git a/chromium/v8/src/mark-compact.h b/chromium/v8/src/mark-compact.h
index 2a1d97dc2ae..ae6767ff643 100644
--- a/chromium/v8/src/mark-compact.h
+++ b/chromium/v8/src/mark-compact.h
@@ -1,35 +1,12 @@
// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
#ifndef V8_MARK_COMPACT_H_
#define V8_MARK_COMPACT_H_
-#include "compiler-intrinsics.h"
-#include "spaces.h"
+#include "src/compiler-intrinsics.h"
+#include "src/spaces.h"
namespace v8 {
namespace internal {
@@ -110,8 +87,7 @@ class Marking {
markbit.Next().Set();
}
- // Returns true if the the object whose mark is transferred is marked black.
- bool TransferMark(Address old_start, Address new_start);
+ void TransferMark(Address old_start, Address new_start);
#ifdef DEBUG
enum ObjectColor {
@@ -537,40 +513,13 @@ class ThreadLocalTop;
// Mark-Compact collector
class MarkCompactCollector {
public:
- // Type of functions to compute forwarding addresses of objects in
- // compacted spaces. Given an object and its size, return a (non-failure)
- // Object* that will be the object after forwarding. There is a separate
- // allocation function for each (compactable) space based on the location
- // of the object before compaction.
- typedef MaybeObject* (*AllocationFunction)(Heap* heap,
- HeapObject* object,
- int object_size);
-
- // Type of functions to encode the forwarding address for an object.
- // Given the object, its size, and the new (non-failure) object it will be
- // forwarded to, encode the forwarding address. For paged spaces, the
- // 'offset' input/output parameter contains the offset of the forwarded
- // object from the forwarding address of the previous live object in the
- // page as input, and is updated to contain the offset to be used for the
- // next live object in the same page. For spaces using a different
- // encoding (i.e., contiguous spaces), the offset parameter is ignored.
- typedef void (*EncodingFunction)(Heap* heap,
- HeapObject* old_object,
- int object_size,
- Object* new_object,
- int* offset);
-
- // Type of functions to process non-live objects.
- typedef void (*ProcessNonLiveFunction)(HeapObject* object, Isolate* isolate);
-
- // Pointer to member function, used in IterateLiveObjects.
- typedef int (MarkCompactCollector::*LiveObjectCallback)(HeapObject* obj);
-
// Set the global flags, it must be called before Prepare to take effect.
inline void SetFlags(int flags);
static void Initialize();
+ void SetUp();
+
void TearDown();
void CollectEvacuationCandidates(PagedSpace* space);
@@ -622,7 +571,6 @@ class MarkCompactCollector {
enum SweeperType {
CONSERVATIVE,
- LAZY_CONSERVATIVE,
PARALLEL_CONSERVATIVE,
CONCURRENT_CONSERVATIVE,
PRECISE
@@ -637,7 +585,7 @@ class MarkCompactCollector {
void VerifyMarkbitsAreClean();
static void VerifyMarkbitsAreClean(PagedSpace* space);
static void VerifyMarkbitsAreClean(NewSpace* space);
- void VerifyWeakEmbeddedObjectsInOptimizedCode();
+ void VerifyWeakEmbeddedObjectsInCode();
void VerifyOmittedMapChecks();
#endif
@@ -688,22 +636,19 @@ class MarkCompactCollector {
void RecordCodeEntrySlot(Address slot, Code* target);
void RecordCodeTargetPatch(Address pc, Code* target);
- INLINE(void RecordSlot(Object** anchor_slot, Object** slot, Object* object));
+ INLINE(void RecordSlot(Object** anchor_slot,
+ Object** slot,
+ Object* object,
+ SlotsBuffer::AdditionMode mode =
+ SlotsBuffer::FAIL_ON_OVERFLOW));
- void MigrateObject(Address dst,
- Address src,
+ void MigrateObject(HeapObject* dst,
+ HeapObject* src,
int size,
AllocationSpace to_old_space);
bool TryPromoteObject(HeapObject* object, int object_size);
- inline Object* encountered_weak_collections() {
- return encountered_weak_collections_;
- }
- inline void set_encountered_weak_collections(Object* weak_collection) {
- encountered_weak_collections_ = weak_collection;
- }
-
void InvalidateCode(Code* code);
void ClearMarkbits();
@@ -715,13 +660,13 @@ class MarkCompactCollector {
MarkingParity marking_parity() { return marking_parity_; }
// Concurrent and parallel sweeping support.
- void SweepInParallel(PagedSpace* space,
- FreeList* private_free_list,
- FreeList* free_list);
+ void SweepInParallel(PagedSpace* space);
void WaitUntilSweepingCompleted();
- intptr_t StealMemoryFromSweeperThreads(PagedSpace* space);
+ bool IsSweepingCompleted();
+
+ void RefillFreeList(PagedSpace* space);
bool AreSweeperThreadsActivated();
@@ -740,11 +685,13 @@ class MarkCompactCollector {
void MarkWeakObjectToCodeTable();
// Special case for processing weak references in a full collection. We need
- // to artifically keep AllocationSites alive for a time.
+ // to artificially keep AllocationSites alive for a time.
void MarkAllocationSite(AllocationSite* site);
private:
- MarkCompactCollector();
+ class SweeperTask;
+
+ explicit MarkCompactCollector(Heap* heap);
~MarkCompactCollector();
bool MarkInvalidatedCode();
@@ -752,9 +699,6 @@ class MarkCompactCollector {
void RemoveDeadInvalidatedCode();
void ProcessInvalidatedCode(ObjectVisitor* visitor);
- void UnlinkEvacuationCandidates();
- void ReleaseEvacuationCandidates();
-
void StartSweeperThreads();
#ifdef DEBUG
@@ -791,6 +735,8 @@ class MarkCompactCollector {
// True if concurrent or parallel sweeping is currently in progress.
bool sweeping_pending_;
+ Semaphore pending_sweeper_jobs_semaphore_;
+
bool sequential_sweeping_;
// A pointer to the current stack-allocated GC tracer object during a full
@@ -892,14 +838,11 @@ class MarkCompactCollector {
void ClearNonLivePrototypeTransitions(Map* map);
void ClearNonLiveMapTransitions(Map* map, MarkBit map_mark);
- void ClearAndDeoptimizeDependentCode(DependentCode* dependent_code);
+ void ClearDependentCode(DependentCode* dependent_code);
+ void ClearDependentICList(Object* head);
void ClearNonLiveDependentCode(DependentCode* dependent_code);
-
- // Marking detaches initial maps from SharedFunctionInfo objects
- // to make this reference weak. We need to reattach initial maps
- // back after collection. This is either done during
- // ClearNonLiveTransitions pass or by calling this function.
- void ReattachInitialMaps();
+ int ClearNonLiveDependentCodeInGroup(DependentCode* dependent_code, int group,
+ int start, int end, int new_start);
// Mark all values associated with reachable keys in weak collections
// encountered so far. This might push new object or even new weak maps onto
@@ -938,8 +881,20 @@ class MarkCompactCollector {
void EvacuateNewSpaceAndCandidates();
+ void ReleaseEvacuationCandidates();
+
+ // Moves the pages of the evacuation_candidates_ list to the end of their
+ // corresponding space pages list.
+ void MoveEvacuationCandidatesToEndOfPagesList();
+
void SweepSpace(PagedSpace* space, SweeperType sweeper);
+ // Finalizes the parallel sweeping phase. Marks all the pages that were
+ // swept in parallel.
+ void ParallelSweepSpacesComplete();
+
+ void ParallelSweepSpaceComplete(PagedSpace* space);
+
#ifdef DEBUG
friend class MarkObjectVisitor;
static void VisitObject(HeapObject* obj);
@@ -951,12 +906,14 @@ class MarkCompactCollector {
Heap* heap_;
MarkingDeque marking_deque_;
CodeFlusher* code_flusher_;
- Object* encountered_weak_collections_;
bool have_code_to_deoptimize_;
List<Page*> evacuation_candidates_;
List<Code*> invalidated_code_;
+ SmartPointer<FreeList> free_list_old_data_space_;
+ SmartPointer<FreeList> free_list_old_pointer_space_;
+
friend class Heap;
};
diff --git a/chromium/v8/src/math.js b/chromium/v8/src/math.js
index 5cbe94a35c7..d231c22ea22 100644
--- a/chromium/v8/src/math.js
+++ b/chromium/v8/src/math.js
@@ -1,29 +1,8 @@
// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+"use strict";
// This file relies on the fact that the following declarations have been made
// in runtime.js:
@@ -51,25 +30,25 @@ function MathAbs(x) {
}
// ECMA 262 - 15.8.2.2
-function MathAcos(x) {
- return %Math_acos(TO_NUMBER_INLINE(x));
+function MathAcosJS(x) {
+ return %MathAcos(TO_NUMBER_INLINE(x));
}
// ECMA 262 - 15.8.2.3
-function MathAsin(x) {
- return %Math_asin(TO_NUMBER_INLINE(x));
+function MathAsinJS(x) {
+ return %MathAsin(TO_NUMBER_INLINE(x));
}
// ECMA 262 - 15.8.2.4
-function MathAtan(x) {
- return %Math_atan(TO_NUMBER_INLINE(x));
+function MathAtanJS(x) {
+ return %MathAtan(TO_NUMBER_INLINE(x));
}
// ECMA 262 - 15.8.2.5
// The naming of y and x matches the spec, as does the order in which
// ToNumber (valueOf) is called.
-function MathAtan2(y, x) {
- return %Math_atan2(TO_NUMBER_INLINE(y), TO_NUMBER_INLINE(x));
+function MathAtan2JS(y, x) {
+ return %MathAtan2(TO_NUMBER_INLINE(y), TO_NUMBER_INLINE(x));
}
// ECMA 262 - 15.8.2.6
@@ -85,7 +64,7 @@ function MathCos(x) {
// ECMA 262 - 15.8.2.8
function MathExp(x) {
- return %Math_exp(TO_NUMBER_INLINE(x));
+ return %MathExpRT(TO_NUMBER_INLINE(x));
}
// ECMA 262 - 15.8.2.9
@@ -100,13 +79,13 @@ function MathFloor(x) {
// has to be -0, which wouldn't be the case with the shift.
return TO_UINT32(x);
} else {
- return %Math_floor(x);
+ return %MathFloorRT(x);
}
}
// ECMA 262 - 15.8.2.10
function MathLog(x) {
- return %_MathLog(TO_NUMBER_INLINE(x));
+ return %_MathLogRT(TO_NUMBER_INLINE(x));
}
// ECMA 262 - 15.8.2.11
@@ -194,7 +173,7 @@ function MathSin(x) {
// ECMA 262 - 15.8.2.17
function MathSqrt(x) {
- return %_MathSqrt(TO_NUMBER_INLINE(x));
+ return %_MathSqrtRT(TO_NUMBER_INLINE(x));
}
// ECMA 262 - 15.8.2.18
@@ -285,53 +264,29 @@ function SetUpMath() {
%FunctionSetInstanceClassName(MathConstructor, 'Math');
// Set up math constants.
- // ECMA-262, section 15.8.1.1.
- %OptimizeObjectForAddingMultipleProperties($Math, 8);
- %SetProperty($Math,
- "E",
- 2.7182818284590452354,
- DONT_ENUM | DONT_DELETE | READ_ONLY);
- // ECMA-262, section 15.8.1.2.
- %SetProperty($Math,
- "LN10",
- 2.302585092994046,
- DONT_ENUM | DONT_DELETE | READ_ONLY);
- // ECMA-262, section 15.8.1.3.
- %SetProperty($Math,
- "LN2",
- 0.6931471805599453,
- DONT_ENUM | DONT_DELETE | READ_ONLY);
- // ECMA-262, section 15.8.1.4.
- %SetProperty($Math,
- "LOG2E",
- 1.4426950408889634,
- DONT_ENUM | DONT_DELETE | READ_ONLY);
- %SetProperty($Math,
- "LOG10E",
- 0.4342944819032518,
- DONT_ENUM | DONT_DELETE | READ_ONLY);
- %SetProperty($Math,
- "PI",
- 3.1415926535897932,
- DONT_ENUM | DONT_DELETE | READ_ONLY);
- %SetProperty($Math,
- "SQRT1_2",
- 0.7071067811865476,
- DONT_ENUM | DONT_DELETE | READ_ONLY);
- %SetProperty($Math,
- "SQRT2",
- 1.4142135623730951,
- DONT_ENUM | DONT_DELETE | READ_ONLY);
- %ToFastProperties($Math);
+ InstallConstants($Math, $Array(
+ // ECMA-262, section 15.8.1.1.
+ "E", 2.7182818284590452354,
+ // ECMA-262, section 15.8.1.2.
+ "LN10", 2.302585092994046,
+ // ECMA-262, section 15.8.1.3.
+ "LN2", 0.6931471805599453,
+ // ECMA-262, section 15.8.1.4.
+ "LOG2E", 1.4426950408889634,
+ "LOG10E", 0.4342944819032518,
+ "PI", 3.1415926535897932,
+ "SQRT1_2", 0.7071067811865476,
+ "SQRT2", 1.4142135623730951
+ ));
// Set up non-enumerable functions of the Math object and
// set their names.
InstallFunctions($Math, DONT_ENUM, $Array(
"random", MathRandom,
"abs", MathAbs,
- "acos", MathAcos,
- "asin", MathAsin,
- "atan", MathAtan,
+ "acos", MathAcosJS,
+ "asin", MathAsinJS,
+ "atan", MathAtanJS,
"ceil", MathCeil,
"cos", MathCos,
"exp", MathExp,
@@ -341,7 +296,7 @@ function SetUpMath() {
"sin", MathSin,
"sqrt", MathSqrt,
"tan", MathTan,
- "atan2", MathAtan2,
+ "atan2", MathAtan2JS,
"pow", MathPow,
"max", MathMax,
"min", MathMin,
diff --git a/chromium/v8/src/messages.cc b/chromium/v8/src/messages.cc
index 9eae67a7280..05402e99548 100644
--- a/chromium/v8/src/messages.cc
+++ b/chromium/v8/src/messages.cc
@@ -1,36 +1,13 @@
// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#include "api.h"
-#include "execution.h"
-#include "messages.h"
-#include "spaces-inl.h"
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#include "src/api.h"
+#include "src/execution.h"
+#include "src/messages.h"
+#include "src/spaces-inl.h"
namespace v8 {
namespace internal {
@@ -43,15 +20,15 @@ void MessageHandler::DefaultMessageReport(Isolate* isolate,
Handle<Object> message_obj) {
SmartArrayPointer<char> str = GetLocalizedMessage(isolate, message_obj);
if (loc == NULL) {
- PrintF("%s\n", *str);
+ PrintF("%s\n", str.get());
} else {
HandleScope scope(isolate);
Handle<Object> data(loc->script()->name(), isolate);
SmartArrayPointer<char> data_str;
if (data->IsString())
data_str = Handle<String>::cast(data)->ToCString(DISALLOW_NULLS);
- PrintF("%s:%i: %s\n", *data_str ? *data_str : "<unknown>",
- loc->start_pos(), *str);
+ PrintF("%s:%i: %s\n", data_str.get() ? data_str.get() : "<unknown>",
+ loc->start_pos(), str.get());
}
}
@@ -61,7 +38,6 @@ Handle<JSMessageObject> MessageHandler::MakeMessageObject(
const char* type,
MessageLocation* loc,
Vector< Handle<Object> > args,
- Handle<String> stack_trace,
Handle<JSArray> stack_frames) {
Factory* factory = isolate->factory();
Handle<String> type_handle = factory->InternalizeUtf8String(type);
@@ -79,13 +55,9 @@ Handle<JSMessageObject> MessageHandler::MakeMessageObject(
if (loc) {
start = loc->start_pos();
end = loc->end_pos();
- script_handle = GetScriptWrapper(loc->script());
+ script_handle = Script::GetWrapper(loc->script());
}
- Handle<Object> stack_trace_handle = stack_trace.is_null()
- ? Handle<Object>::cast(factory->undefined_value())
- : Handle<Object>::cast(stack_trace);
-
Handle<Object> stack_frames_handle = stack_frames.is_null()
? Handle<Object>::cast(factory->undefined_value())
: Handle<Object>::cast(stack_frames);
@@ -96,7 +68,6 @@ Handle<JSMessageObject> MessageHandler::MakeMessageObject(
start,
end,
script_handle,
- stack_trace_handle,
stack_frames_handle);
return message;
@@ -113,7 +84,7 @@ void MessageHandler::ReportMessage(Isolate* isolate,
// We pass the exception object into the message handler callback though.
Object* exception_object = isolate->heap()->undefined_value();
if (isolate->has_pending_exception()) {
- isolate->pending_exception()->ToObject(&exception_object);
+ exception_object = isolate->pending_exception();
}
Handle<Object> exception_handle(exception_object, isolate);
@@ -160,24 +131,16 @@ Handle<String> MessageHandler::GetMessage(Isolate* isolate,
Factory* factory = isolate->factory();
Handle<String> fmt_str =
factory->InternalizeOneByteString(STATIC_ASCII_VECTOR("FormatMessage"));
- Handle<JSFunction> fun =
- Handle<JSFunction>(
- JSFunction::cast(
- isolate->js_builtins_object()->
- GetPropertyNoExceptionThrown(*fmt_str)));
+ Handle<JSFunction> fun = Handle<JSFunction>::cast(Object::GetProperty(
+ isolate->js_builtins_object(), fmt_str).ToHandleChecked());
Handle<JSMessageObject> message = Handle<JSMessageObject>::cast(data);
Handle<Object> argv[] = { Handle<Object>(message->type(), isolate),
Handle<Object>(message->arguments(), isolate) };
- bool caught_exception;
- Handle<Object> result =
- Execution::TryCall(fun,
- isolate->js_builtins_object(),
- ARRAY_SIZE(argv),
- argv,
- &caught_exception);
-
- if (caught_exception || !result->IsString()) {
+ MaybeHandle<Object> maybe_result = Execution::TryCall(
+ fun, isolate->js_builtins_object(), ARRAY_SIZE(argv), argv);
+ Handle<Object> result;
+ if (!maybe_result.ToHandle(&result) || !result->IsString()) {
return factory->InternalizeOneByteString(STATIC_ASCII_VECTOR("<error>"));
}
Handle<String> result_string = Handle<String>::cast(result);
@@ -186,7 +149,7 @@ Handle<String> MessageHandler::GetMessage(Isolate* isolate,
// here to improve the efficiency of converting it to a C string and
// other operations that are likely to take place (see GetLocalizedMessage
// for example).
- FlattenString(result_string);
+ result_string = String::Flatten(result_string);
return result_string;
}
diff --git a/chromium/v8/src/messages.h b/chromium/v8/src/messages.h
index 5d84e46caa9..aec34690352 100644
--- a/chromium/v8/src/messages.h
+++ b/chromium/v8/src/messages.h
@@ -1,29 +1,6 @@
// Copyright 2006-2008 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
// The infrastructure used for (localized) message reporting in V8.
//
@@ -33,7 +10,7 @@
#ifndef V8_MESSAGES_H_
#define V8_MESSAGES_H_
-#include "handles-inl.h"
+#include "src/handles-inl.h"
// Forward declaration of MessageLocation.
namespace v8 {
@@ -95,7 +72,6 @@ class MessageHandler {
const char* type,
MessageLocation* loc,
Vector< Handle<Object> > args,
- Handle<String> stack_trace,
Handle<JSArray> stack_frames);
// Report a formatted message (needs JS allocation).
diff --git a/chromium/v8/src/messages.js b/chromium/v8/src/messages.js
index c7096724ace..859bc0d721c 100644
--- a/chromium/v8/src/messages.js
+++ b/chromium/v8/src/messages.js
@@ -1,29 +1,6 @@
// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
// -------------------------------------------------------------------
@@ -45,13 +22,10 @@ var kMessages = {
unterminated_regexp: ["Invalid regular expression: missing /"],
regexp_flags: ["Cannot supply flags when constructing one RegExp from another"],
incompatible_method_receiver: ["Method ", "%0", " called on incompatible receiver ", "%1"],
- invalid_lhs_in_assignment: ["Invalid left-hand side in assignment"],
- invalid_lhs_in_for_in: ["Invalid left-hand side in for-in"],
- invalid_lhs_in_postfix_op: ["Invalid left-hand side expression in postfix operation"],
- invalid_lhs_in_prefix_op: ["Invalid left-hand side expression in prefix operation"],
multiple_defaults_in_switch: ["More than one default clause in switch statement"],
newline_after_throw: ["Illegal newline after throw"],
- redeclaration: ["%0", " '", "%1", "' has already been declared"],
+ label_redeclaration: ["Label '", "%0", "' has already been declared"],
+ var_redeclaration: ["Identifier '", "%0", "' has already been declared"],
no_catch_or_finally: ["Missing catch or finally after try"],
unknown_label: ["Undefined label '", "%0", "'"],
uncaught_exception: ["Uncaught ", "%0"],
@@ -64,7 +38,6 @@ var kMessages = {
not_defined: ["%0", " is not defined"],
non_object_property_load: ["Cannot read property '", "%0", "' of ", "%1"],
non_object_property_store: ["Cannot set property '", "%0", "' of ", "%1"],
- non_object_property_call: ["Cannot call method '", "%0", "' of ", "%1"],
with_expression: ["%0", " has no properties"],
illegal_invocation: ["Illegal invocation"],
no_setter_in_callback: ["Cannot set property ", "%0", " of ", "%1", " which has only a getter"],
@@ -78,7 +51,7 @@ var kMessages = {
getter_must_be_callable: ["Getter must be a function: ", "%0"],
setter_must_be_callable: ["Setter must be a function: ", "%0"],
value_and_accessor: ["Invalid property. A property cannot both have accessors and be writable or have a value, ", "%0"],
- proto_object_or_null: ["Object prototype may only be an Object or null"],
+ proto_object_or_null: ["Object prototype may only be an Object or null: ", "%0"],
property_desc_object: ["Property description must be an object: ", "%0"],
redefine_disallowed: ["Cannot redefine property: ", "%0"],
define_disallowed: ["Cannot define property:", "%0", ", object is not extensible."],
@@ -104,21 +77,25 @@ var kMessages = {
observe_perform_non_string: ["Invalid non-string changeType"],
observe_perform_non_function: ["Cannot perform non-function"],
observe_notify_non_notifier: ["notify called on non-notifier object"],
- proto_poison_pill: ["Generic use of __proto__ accessor not allowed"],
+ observe_global_proxy: ["%0", " cannot be called on the global proxy object"],
not_typed_array: ["this is not a typed array."],
invalid_argument: ["invalid_argument"],
data_view_not_array_buffer: ["First argument to DataView constructor must be an ArrayBuffer"],
constructor_not_function: ["Constructor ", "%0", " requires 'new'"],
- not_a_promise: ["%0", "is not a promise"],
- promise_cyclic: ["Chaining cycle detected for promise", "%0"],
+ not_a_symbol: ["%0", " is not a symbol"],
+ not_a_promise: ["%0", " is not a promise"],
+ resolver_not_a_function: ["Promise resolver ", "%0", " is not a function"],
+ promise_cyclic: ["Chaining cycle detected for promise ", "%0"],
array_functions_on_frozen: ["Cannot modify frozen array elements"],
array_functions_change_sealed: ["Cannot add/remove sealed array elements"],
+ first_argument_not_regexp: ["First argument to ", "%0", " must not be a regular expression"],
// RangeError
invalid_array_length: ["Invalid array length"],
invalid_array_buffer_length: ["Invalid array buffer length"],
+ invalid_string_length: ["Invalid string length"],
invalid_typed_array_offset: ["Start offset is too large:"],
invalid_typed_array_length: ["Invalid typed array length"],
- invalid_typed_array_alignment: ["%0", "of", "%1", "should be a multiple of", "%3"],
+ invalid_typed_array_alignment: ["%0", " of ", "%1", " should be a multiple of ", "%2"],
typed_array_set_source_too_large:
["Source is too large"],
typed_array_set_negative_offset:
@@ -131,6 +108,11 @@ var kMessages = {
stack_overflow: ["Maximum call stack size exceeded"],
invalid_time_value: ["Invalid time value"],
invalid_count_value: ["Invalid count value"],
+ // ReferenceError
+ invalid_lhs_in_assignment: ["Invalid left-hand side in assignment"],
+ invalid_lhs_in_for: ["Invalid left-hand side in for-loop"],
+ invalid_lhs_in_postfix_op: ["Invalid left-hand side expression in postfix operation"],
+ invalid_lhs_in_prefix_op: ["Invalid left-hand side expression in prefix operation"],
// SyntaxError
paren_in_arg_string: ["Function arg string contains parenthesis"],
not_isvar: ["builtin %IS_VAR: not a variable"],
@@ -150,24 +132,18 @@ var kMessages = {
array_indexof_not_defined: ["Array.getIndexOf: Argument undefined"],
object_not_extensible: ["Can't add property ", "%0", ", object is not extensible"],
illegal_access: ["Illegal access"],
- invalid_preparser_data: ["Invalid preparser data for function ", "%0"],
+ invalid_cached_data_function: ["Invalid cached data for function ", "%0"],
+ invalid_cached_data: ["Invalid cached data"],
strict_mode_with: ["Strict mode code may not include a with statement"],
- strict_catch_variable: ["Catch variable may not be eval or arguments in strict mode"],
- too_many_arguments: ["Too many arguments in function call (only 32766 allowed)"],
- too_many_parameters: ["Too many parameters in function definition (only 32766 allowed)"],
- too_many_variables: ["Too many variables declared (only 131071 allowed)"],
- strict_param_name: ["Parameter name eval or arguments is not allowed in strict mode"],
+ strict_eval_arguments: ["Unexpected eval or arguments in strict mode"],
+ too_many_arguments: ["Too many arguments in function call (only 65535 allowed)"],
+ too_many_parameters: ["Too many parameters in function definition (only 65535 allowed)"],
+ too_many_variables: ["Too many variables declared (only 4194303 allowed)"],
strict_param_dupe: ["Strict mode function may not have duplicate parameter names"],
- strict_var_name: ["Variable name may not be eval or arguments in strict mode"],
- strict_function_name: ["Function name may not be eval or arguments in strict mode"],
strict_octal_literal: ["Octal literals are not allowed in strict mode."],
strict_duplicate_property: ["Duplicate data property in object literal not allowed in strict mode"],
accessor_data_property: ["Object literal may not have data and accessor property with the same name"],
accessor_get_set: ["Object literal may not have multiple get/set accessors with the same name"],
- strict_lhs_assignment: ["Assignment to eval or arguments is not allowed in strict mode"],
- strict_lhs_postfix: ["Postfix increment/decrement may not have eval or arguments operand in strict mode"],
- strict_lhs_prefix: ["Prefix increment/decrement may not have eval or arguments operand in strict mode"],
- strict_reserved_word: ["Use of future reserved word in strict mode"],
strict_delete: ["Delete of an unqualified identifier in strict mode."],
strict_delete_property: ["Cannot delete property '", "%0", "' of ", "%1"],
strict_const: ["Use of const in strict mode."],
@@ -176,12 +152,14 @@ var kMessages = {
strict_cannot_assign: ["Cannot assign to read only '", "%0", "' in strict mode"],
strict_poison_pill: ["'caller', 'callee', and 'arguments' properties may not be accessed on strict mode functions or the arguments objects for calls to them"],
strict_caller: ["Illegal access to a strict mode caller function."],
+ generator_poison_pill: ["'caller' and 'arguments' properties may not be accessed on generator functions."],
unprotected_let: ["Illegal let declaration in unprotected statement context."],
unprotected_const: ["Illegal const declaration in unprotected statement context."],
cant_prevent_ext_external_array_elements: ["Cannot prevent extension of an object with external array elements"],
redef_external_array_element: ["Cannot redefine a property of an object with external array elements"],
harmony_const_assign: ["Assignment to constant variable."],
- symbol_to_string: ["Conversion from symbol to string"],
+ symbol_to_string: ["Cannot convert a Symbol value to a string"],
+ symbol_to_primitive: ["Cannot convert a Symbol wrapper object to a primitive value"],
invalid_module_path: ["Module does not export '", "%0", "', or export is not itself a module"],
module_type_error: ["Module '", "%0", "' used improperly"],
module_export_undefined: ["Export '", "%0", "' is not defined in module"]
@@ -200,10 +178,6 @@ function FormatString(format, args) {
// str is one of %0, %1, %2 or %3.
try {
str = NoSideEffectToString(args[arg_num]);
- if (str.length > 256) {
- str = %SubString(str, 0, 239) + "...<omitted>..." +
- %SubString(str, str.length - 2, str.length);
- }
} catch (e) {
if (%IsJSModule(args[arg_num]))
str = "module";
@@ -223,10 +197,17 @@ function FormatString(format, args) {
function NoSideEffectToString(obj) {
if (IS_STRING(obj)) return obj;
if (IS_NUMBER(obj)) return %_NumberToString(obj);
- if (IS_BOOLEAN(obj)) return x ? 'true' : 'false';
+ if (IS_BOOLEAN(obj)) return obj ? 'true' : 'false';
if (IS_UNDEFINED(obj)) return 'undefined';
if (IS_NULL(obj)) return 'null';
- if (IS_FUNCTION(obj)) return %_CallFunction(obj, FunctionToString);
+ if (IS_FUNCTION(obj)) {
+ var str = %_CallFunction(obj, FunctionToString);
+ if (str.length > 128) {
+ str = %_SubString(str, 0, 111) + "...<omitted>..." +
+ %_SubString(str, str.length - 2, str.length);
+ }
+ return str;
+ }
if (IS_OBJECT(obj) && %GetDataProperty(obj, "toString") === ObjectToString) {
var constructor = %GetDataProperty(obj, "constructor");
if (typeof constructor == "function") {
@@ -791,11 +772,10 @@ function GetStackTraceLine(recv, fun, pos, isGlobal) {
// ----------------------------------------------------------------------------
// Error implementation
-//TODO(rossberg)
-var CallSiteReceiverKey = NEW_PRIVATE("receiver");
-var CallSiteFunctionKey = NEW_PRIVATE("function");
-var CallSitePositionKey = NEW_PRIVATE("position");
-var CallSiteStrictModeKey = NEW_PRIVATE("strict mode");
+var CallSiteReceiverKey = NEW_PRIVATE("CallSite#receiver");
+var CallSiteFunctionKey = NEW_PRIVATE("CallSite#function");
+var CallSitePositionKey = NEW_PRIVATE("CallSite#position");
+var CallSiteStrictModeKey = NEW_PRIVATE("CallSite#strict_mode");
function CallSite(receiver, fun, pos, strict_mode) {
SET_PRIVATE(this, CallSiteReceiverKey, receiver);
@@ -944,14 +924,10 @@ function CallSiteToString() {
if (this.isNative()) {
fileLocation = "native";
} else {
- if (this.isEval()) {
- fileName = this.getScriptNameOrSourceURL();
- if (!fileName) {
- fileLocation = this.getEvalOrigin();
- fileLocation += ", "; // Expecting source position to follow.
- }
- } else {
- fileName = this.getFileName();
+ fileName = this.getScriptNameOrSourceURL();
+ if (!fileName && this.isEval()) {
+ fileLocation = this.getEvalOrigin();
+ fileLocation += ", "; // Expecting source position to follow.
}
if (fileName) {
@@ -982,12 +958,12 @@ function CallSiteToString() {
var methodName = this.getMethodName();
if (functionName) {
if (typeName &&
- %_CallFunction(functionName, typeName, StringIndexOf) != 0) {
+ %_CallFunction(functionName, typeName, StringIndexOfJS) != 0) {
line += typeName + ".";
}
line += functionName;
if (methodName &&
- (%_CallFunction(functionName, "." + methodName, StringIndexOf) !=
+ (%_CallFunction(functionName, "." + methodName, StringIndexOfJS) !=
functionName.length - methodName.length - 1)) {
line += " [as " + methodName + "]";
}
@@ -1082,15 +1058,15 @@ function FormatErrorString(error) {
function GetStackFrames(raw_stack) {
var frames = new InternalArray();
- var non_strict_frames = raw_stack[0];
+ var sloppy_frames = raw_stack[0];
for (var i = 1; i < raw_stack.length; i += 4) {
var recv = raw_stack[i];
var fun = raw_stack[i + 1];
var code = raw_stack[i + 2];
var pc = raw_stack[i + 3];
var pos = %FunctionGetPositionForOffset(code, pc);
- non_strict_frames--;
- frames.push(new CallSite(recv, fun, pos, (non_strict_frames < 0)));
+ sloppy_frames--;
+ frames.push(new CallSite(recv, fun, pos, (sloppy_frames < 0)));
}
return frames;
}
@@ -1163,19 +1139,6 @@ function captureStackTrace(obj, cons_opt) {
stackTraceLimit);
var error_string = FormatErrorString(obj);
- // The holder of this getter ('obj') may not be the receiver ('this').
- // When this getter is called the first time, we use the context values to
- // format a stack trace string and turn this accessor pair into a data
- // property (on the holder).
- var getter = function() {
- // Stack is still a raw array awaiting to be formatted.
- var result = FormatStackTrace(obj, error_string, GetStackFrames(stack));
- // Turn this accessor into a data property.
- %DefineOrRedefineDataProperty(obj, 'stack', result, NONE);
- // Release context values.
- stack = error_string = UNDEFINED;
- return result;
- };
// Set the 'stack' property on the receiver. If the receiver is the same as
// holder of this setter, the accessor pair is turned into a data property.
@@ -1188,6 +1151,21 @@ function captureStackTrace(obj, cons_opt) {
}
};
+ // The holder of this getter ('obj') may not be the receiver ('this').
+ // When this getter is called the first time, we use the context values to
+ // format a stack trace string and turn this accessor pair into a data
+ // property (on the holder).
+ var getter = function() {
+ // Stack is still a raw array awaiting to be formatted.
+ var result = FormatStackTrace(obj, error_string, GetStackFrames(stack));
+ // Replace this accessor to return result directly.
+ %DefineOrRedefineAccessorProperty(
+ obj, 'stack', function() { return result }, setter, DONT_ENUM);
+ // Release context values.
+ stack = error_string = UNDEFINED;
+ return result;
+ };
+
%DefineOrRedefineAccessorProperty(obj, 'stack', getter, setter, DONT_ENUM);
}
@@ -1262,7 +1240,7 @@ var cyclic_error_marker = new $Object();
function GetPropertyWithoutInvokingMonkeyGetters(error, name) {
var current = error;
// Climb the prototype chain until we find the holder.
- while (current && !%HasLocalProperty(current, name)) {
+ while (current && !%HasOwnProperty(current, name)) {
current = %GetPrototype(current);
}
if (IS_NULL(current)) return UNDEFINED;
@@ -1326,6 +1304,15 @@ function SetUpStackOverflowBoilerplate() {
var error_string = boilerplate.name + ": " + boilerplate.message;
+ // Set the 'stack' property on the receiver. If the receiver is the same as
+ // holder of this setter, the accessor pair is turned into a data property.
+ var setter = function(v) {
+ %DefineOrRedefineDataProperty(this, 'stack', v, NONE);
+ // Tentatively clear the hidden property. If the receiver is the same as
+ // holder, we release the raw stack trace this way.
+ %GetAndClearOverflowedStackTrace(this);
+ };
+
// The raw stack trace is stored as a hidden property on the holder of this
// getter, which may not be the same as the receiver. Find the holder to
// retrieve the raw stack trace and then turn this accessor pair into a
@@ -1341,20 +1328,12 @@ function SetUpStackOverflowBoilerplate() {
if (IS_UNDEFINED(stack)) return stack;
var result = FormatStackTrace(holder, error_string, GetStackFrames(stack));
- // Replace this accessor with a data property.
- %DefineOrRedefineDataProperty(holder, 'stack', result, NONE);
+ // Replace this accessor to return result directly.
+ %DefineOrRedefineAccessorProperty(
+ holder, 'stack', function() { return result }, setter, DONT_ENUM);
return result;
};
- // Set the 'stack' property on the receiver. If the receiver is the same as
- // holder of this setter, the accessor pair is turned into a data property.
- var setter = function(v) {
- %DefineOrRedefineDataProperty(this, 'stack', v, NONE);
- // Tentatively clear the hidden property. If the receiver is the same as
- // holder, we release the raw stack trace this way.
- %GetAndClearOverflowedStackTrace(this);
- };
-
%DefineOrRedefineAccessorProperty(
boilerplate, 'stack', getter, setter, DONT_ENUM);
diff --git a/chromium/v8/src/mips/OWNERS b/chromium/v8/src/mips/OWNERS
index 38473b56d1f..2dc1d77d367 100644
--- a/chromium/v8/src/mips/OWNERS
+++ b/chromium/v8/src/mips/OWNERS
@@ -1,2 +1,5 @@
plind44@gmail.com
gergely@homejinni.com
+palfia@homejinni.com
+kilvadyb@homejinni.com
+Dusan.Milosavljevic@rt-rk.com
diff --git a/chromium/v8/src/mips/assembler-mips-inl.h b/chromium/v8/src/mips/assembler-mips-inl.h
index 514b3aaa4f0..eec19a671a0 100644
--- a/chromium/v8/src/mips/assembler-mips-inl.h
+++ b/chromium/v8/src/mips/assembler-mips-inl.h
@@ -37,15 +37,19 @@
#ifndef V8_MIPS_ASSEMBLER_MIPS_INL_H_
#define V8_MIPS_ASSEMBLER_MIPS_INL_H_
-#include "mips/assembler-mips.h"
+#include "src/mips/assembler-mips.h"
-#include "cpu.h"
-#include "debug.h"
+#include "src/cpu.h"
+#include "src/debug.h"
namespace v8 {
namespace internal {
+
+bool CpuFeatures::SupportsCrankshaft() { return IsSupported(FPU); }
+
+
// -----------------------------------------------------------------------------
// Operand and MemOperand.
@@ -108,7 +112,7 @@ int FPURegister::ToAllocationIndex(FPURegister reg) {
// -----------------------------------------------------------------------------
// RelocInfo.
-void RelocInfo::apply(intptr_t delta) {
+void RelocInfo::apply(intptr_t delta, ICacheFlushMode icache_flush_mode) {
if (IsCodeTarget(rmode_)) {
uint32_t scope1 = (uint32_t) target_address() & ~kImm28Mask;
uint32_t scope2 = reinterpret_cast<uint32_t>(pc_) & ~kImm28Mask;
@@ -128,7 +132,7 @@ void RelocInfo::apply(intptr_t delta) {
Address RelocInfo::target_address() {
ASSERT(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_));
- return Assembler::target_address_at(pc_);
+ return Assembler::target_address_at(pc_, host_);
}
@@ -156,15 +160,24 @@ Address RelocInfo::target_address_address() {
}
+Address RelocInfo::constant_pool_entry_address() {
+ UNREACHABLE();
+ return NULL;
+}
+
+
int RelocInfo::target_address_size() {
return Assembler::kSpecialTargetSize;
}
-void RelocInfo::set_target_address(Address target, WriteBarrierMode mode) {
+void RelocInfo::set_target_address(Address target,
+ WriteBarrierMode write_barrier_mode,
+ ICacheFlushMode icache_flush_mode) {
ASSERT(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_));
- Assembler::set_target_address_at(pc_, target);
- if (mode == UPDATE_WRITE_BARRIER && host() != NULL && IsCodeTarget(rmode_)) {
+ Assembler::set_target_address_at(pc_, host_, target, icache_flush_mode);
+ if (write_barrier_mode == UPDATE_WRITE_BARRIER &&
+ host() != NULL && IsCodeTarget(rmode_)) {
Object* target_code = Code::GetCodeFromTargetAddress(target);
host()->GetHeap()->incremental_marking()->RecordWriteIntoCode(
host(), this, HeapObject::cast(target_code));
@@ -179,22 +192,26 @@ Address Assembler::target_address_from_return_address(Address pc) {
Object* RelocInfo::target_object() {
ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
- return reinterpret_cast<Object*>(Assembler::target_address_at(pc_));
+ return reinterpret_cast<Object*>(Assembler::target_address_at(pc_, host_));
}
Handle<Object> RelocInfo::target_object_handle(Assembler* origin) {
ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
return Handle<Object>(reinterpret_cast<Object**>(
- Assembler::target_address_at(pc_)));
+ Assembler::target_address_at(pc_, host_)));
}
-void RelocInfo::set_target_object(Object* target, WriteBarrierMode mode) {
+void RelocInfo::set_target_object(Object* target,
+ WriteBarrierMode write_barrier_mode,
+ ICacheFlushMode icache_flush_mode) {
ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
ASSERT(!target->IsConsString());
- Assembler::set_target_address_at(pc_, reinterpret_cast<Address>(target));
- if (mode == UPDATE_WRITE_BARRIER &&
+ Assembler::set_target_address_at(pc_, host_,
+ reinterpret_cast<Address>(target),
+ icache_flush_mode);
+ if (write_barrier_mode == UPDATE_WRITE_BARRIER &&
host() != NULL &&
target->IsHeapObject()) {
host()->GetHeap()->incremental_marking()->RecordWrite(
@@ -205,7 +222,7 @@ void RelocInfo::set_target_object(Object* target, WriteBarrierMode mode) {
Address RelocInfo::target_reference() {
ASSERT(rmode_ == EXTERNAL_REFERENCE);
- return Assembler::target_address_at(pc_);
+ return Assembler::target_address_at(pc_, host_);
}
@@ -216,9 +233,11 @@ Address RelocInfo::target_runtime_entry(Assembler* origin) {
void RelocInfo::set_target_runtime_entry(Address target,
- WriteBarrierMode mode) {
+ WriteBarrierMode write_barrier_mode,
+ ICacheFlushMode icache_flush_mode) {
ASSERT(IsRuntimeEntry(rmode_));
- if (target_address() != target) set_target_address(target, mode);
+ if (target_address() != target)
+ set_target_address(target, write_barrier_mode, icache_flush_mode);
}
@@ -235,11 +254,13 @@ Cell* RelocInfo::target_cell() {
}
-void RelocInfo::set_target_cell(Cell* cell, WriteBarrierMode mode) {
+void RelocInfo::set_target_cell(Cell* cell,
+ WriteBarrierMode write_barrier_mode,
+ ICacheFlushMode icache_flush_mode) {
ASSERT(rmode_ == RelocInfo::CELL);
Address address = cell->address() + Cell::kValueOffset;
Memory::Address_at(pc_) = address;
- if (mode == UPDATE_WRITE_BARRIER && host() != NULL) {
+ if (write_barrier_mode == UPDATE_WRITE_BARRIER && host() != NULL) {
// TODO(1550) We are passing NULL as a slot because cell can never be on
// evacuation candidate.
host()->GetHeap()->incremental_marking()->RecordWrite(
@@ -248,7 +269,7 @@ void RelocInfo::set_target_cell(Cell* cell, WriteBarrierMode mode) {
}
-static const int kNoCodeAgeSequenceLength = 7;
+static const int kNoCodeAgeSequenceLength = 7 * Assembler::kInstrSize;
Handle<Object> RelocInfo::code_age_stub_handle(Assembler* origin) {
@@ -260,13 +281,15 @@ Handle<Object> RelocInfo::code_age_stub_handle(Assembler* origin) {
Code* RelocInfo::code_age_stub() {
ASSERT(rmode_ == RelocInfo::CODE_AGE_SEQUENCE);
return Code::GetCodeFromTargetAddress(
- Assembler::target_address_at(pc_ + Assembler::kInstrSize));
+ Assembler::target_address_at(pc_ + Assembler::kInstrSize, host_));
}
-void RelocInfo::set_code_age_stub(Code* stub) {
+void RelocInfo::set_code_age_stub(Code* stub,
+ ICacheFlushMode icache_flush_mode) {
ASSERT(rmode_ == RelocInfo::CODE_AGE_SEQUENCE);
Assembler::set_target_address_at(pc_ + Assembler::kInstrSize,
+ host_,
stub->instruction_start());
}
@@ -277,7 +300,7 @@ Address RelocInfo::call_address() {
// The pc_ offset of 0 assumes mips patched return sequence per
// debug-mips.cc BreakLocationIterator::SetDebugBreakAtReturn(), or
// debug break slot per BreakLocationIterator::SetDebugBreakAtSlot().
- return Assembler::target_address_at(pc_);
+ return Assembler::target_address_at(pc_, host_);
}
@@ -287,7 +310,7 @@ void RelocInfo::set_call_address(Address target) {
// The pc_ offset of 0 assumes mips patched return sequence per
// debug-mips.cc BreakLocationIterator::SetDebugBreakAtReturn(), or
// debug break slot per BreakLocationIterator::SetDebugBreakAtSlot().
- Assembler::set_target_address_at(pc_, target);
+ Assembler::set_target_address_at(pc_, host_, target);
if (host() != NULL) {
Object* target_code = Code::GetCodeFromTargetAddress(target);
host()->GetHeap()->incremental_marking()->RecordWriteIntoCode(
@@ -318,7 +341,7 @@ void RelocInfo::WipeOut() {
IsCodeTarget(rmode_) ||
IsRuntimeEntry(rmode_) ||
IsExternalReference(rmode_));
- Assembler::set_target_address_at(pc_, NULL);
+ Assembler::set_target_address_at(pc_, host_, NULL);
}
@@ -353,14 +376,12 @@ void RelocInfo::Visit(Isolate* isolate, ObjectVisitor* visitor) {
visitor->VisitExternalReference(this);
} else if (RelocInfo::IsCodeAgeSequence(mode)) {
visitor->VisitCodeAgeSequence(this);
-#ifdef ENABLE_DEBUGGER_SUPPORT
} else if (((RelocInfo::IsJSReturn(mode) &&
IsPatchedReturnSequence()) ||
(RelocInfo::IsDebugBreakSlot(mode) &&
IsPatchedDebugBreakSlotSequence())) &&
isolate->debug()->has_break_points()) {
visitor->VisitDebugTarget(this);
-#endif
} else if (RelocInfo::IsRuntimeEntry(mode)) {
visitor->VisitRuntimeEntry(this);
}
@@ -380,14 +401,12 @@ void RelocInfo::Visit(Heap* heap) {
StaticVisitor::VisitExternalReference(this);
} else if (RelocInfo::IsCodeAgeSequence(mode)) {
StaticVisitor::VisitCodeAgeSequence(heap, this);
-#ifdef ENABLE_DEBUGGER_SUPPORT
} else if (heap->isolate()->debug()->has_break_points() &&
((RelocInfo::IsJSReturn(mode) &&
IsPatchedReturnSequence()) ||
(RelocInfo::IsDebugBreakSlot(mode) &&
IsPatchedDebugBreakSlotSequence()))) {
StaticVisitor::VisitDebugTarget(heap, this);
-#endif
} else if (RelocInfo::IsRuntimeEntry(mode)) {
StaticVisitor::VisitRuntimeEntry(this);
}
diff --git a/chromium/v8/src/mips/assembler-mips.cc b/chromium/v8/src/mips/assembler-mips.cc
index 9aed3bd4aaa..e4bebfee4ba 100644
--- a/chromium/v8/src/mips/assembler-mips.cc
+++ b/chromium/v8/src/mips/assembler-mips.cc
@@ -33,48 +33,32 @@
// Copyright 2012 the V8 project authors. All rights reserved.
-#include "v8.h"
+#include "src/v8.h"
#if V8_TARGET_ARCH_MIPS
-#include "mips/assembler-mips-inl.h"
-#include "serialize.h"
+#include "src/mips/assembler-mips-inl.h"
+#include "src/serialize.h"
namespace v8 {
namespace internal {
-#ifdef DEBUG
-bool CpuFeatures::initialized_ = false;
-#endif
-unsigned CpuFeatures::supported_ = 0;
-unsigned CpuFeatures::found_by_runtime_probing_only_ = 0;
-unsigned CpuFeatures::cross_compile_ = 0;
-
-
-ExternalReference ExternalReference::cpu_features() {
- ASSERT(CpuFeatures::initialized_);
- return ExternalReference(&CpuFeatures::supported_);
-}
-
-
// Get the CPU features enabled by the build. For cross compilation the
// preprocessor symbols CAN_USE_FPU_INSTRUCTIONS
// can be defined to enable FPU instructions when building the
// snapshot.
-static uint64_t CpuFeaturesImpliedByCompiler() {
- uint64_t answer = 0;
+static unsigned CpuFeaturesImpliedByCompiler() {
+ unsigned answer = 0;
#ifdef CAN_USE_FPU_INSTRUCTIONS
- answer |= static_cast<uint64_t>(1) << FPU;
+ answer |= 1u << FPU;
#endif // def CAN_USE_FPU_INSTRUCTIONS
-#ifdef __mips__
// If the compiler is allowed to use FPU then we can use FPU too in our code
// generation even when generating snapshots. This won't work for cross
// compilation.
-#if(defined(__mips_hard_float) && __mips_hard_float != 0)
- answer |= static_cast<uint64_t>(1) << FPU;
-#endif // defined(__mips_hard_float) && __mips_hard_float != 0
-#endif // def __mips__
+#if defined(__mips__) && defined(__mips_hard_float) && __mips_hard_float != 0
+ answer |= 1u << FPU;
+#endif
return answer;
}
@@ -102,42 +86,29 @@ const char* DoubleRegister::AllocationIndexToString(int index) {
}
-void CpuFeatures::Probe() {
- unsigned standard_features = (OS::CpuFeaturesImpliedByPlatform() |
- CpuFeaturesImpliedByCompiler());
- ASSERT(supported_ == 0 || supported_ == standard_features);
-#ifdef DEBUG
- initialized_ = true;
-#endif
+void CpuFeatures::ProbeImpl(bool cross_compile) {
+ supported_ |= CpuFeaturesImpliedByCompiler();
- // Get the features implied by the OS and the compiler settings. This is the
- // minimal set of features which is also allowed for generated code in the
- // snapshot.
- supported_ |= standard_features;
-
- if (Serializer::enabled()) {
- // No probing for features if we might serialize (generate snapshot).
- return;
- }
+ // Only use statically determined features for cross compile (snapshot).
+ if (cross_compile) return;
// If the compiler is allowed to use fpu then we can use fpu too in our
// code generation.
-#if !defined(__mips__)
+#ifndef __mips__
// For the simulator build, use FPU.
- supported_ |= static_cast<uint64_t>(1) << FPU;
+ supported_ |= 1u << FPU;
#else
- // Probe for additional features not already known to be available.
+ // Probe for additional features at runtime.
CPU cpu;
- if (cpu.has_fpu()) {
- // This implementation also sets the FPU flags if
- // runtime detection of FPU returns true.
- supported_ |= static_cast<uint64_t>(1) << FPU;
- found_by_runtime_probing_only_ |= static_cast<uint64_t>(1) << FPU;
- }
+ if (cpu.has_fpu()) supported_ |= 1u << FPU;
#endif
}
+void CpuFeatures::PrintTarget() { }
+void CpuFeatures::PrintFeatures() { }
+
+
int ToNumber(Register reg) {
ASSERT(reg.is_valid());
const int kNumbers[] = {
@@ -213,6 +184,11 @@ bool RelocInfo::IsCodedSpecially() {
}
+bool RelocInfo::IsInConstantPool() {
+ return false;
+}
+
+
// Patch the code at the current address with the supplied instructions.
void RelocInfo::PatchCode(byte* instructions, int instruction_count) {
Instr* pc = reinterpret_cast<Instr*>(pc_);
@@ -260,6 +236,12 @@ MemOperand::MemOperand(Register rm, int32_t offset) : Operand(rm) {
}
+MemOperand::MemOperand(Register rm, int32_t unit, int32_t multiplier,
+ OffsetAddend offset_addend) : Operand(rm) {
+ offset_ = unit * multiplier + offset_addend;
+}
+
+
// -----------------------------------------------------------------------------
// Specific instructions, constants, and masks.
@@ -267,28 +249,30 @@ static const int kNegOffset = 0x00008000;
// addiu(sp, sp, 4) aka Pop() operation or part of Pop(r)
// operations as post-increment of sp.
const Instr kPopInstruction = ADDIU | (kRegister_sp_Code << kRsShift)
- | (kRegister_sp_Code << kRtShift) | (kPointerSize & kImm16Mask);
+ | (kRegister_sp_Code << kRtShift)
+ | (kPointerSize & kImm16Mask); // NOLINT
// addiu(sp, sp, -4) part of Push(r) operation as pre-decrement of sp.
const Instr kPushInstruction = ADDIU | (kRegister_sp_Code << kRsShift)
- | (kRegister_sp_Code << kRtShift) | (-kPointerSize & kImm16Mask);
+ | (kRegister_sp_Code << kRtShift)
+ | (-kPointerSize & kImm16Mask); // NOLINT
// sw(r, MemOperand(sp, 0))
const Instr kPushRegPattern = SW | (kRegister_sp_Code << kRsShift)
- | (0 & kImm16Mask);
+ | (0 & kImm16Mask); // NOLINT
// lw(r, MemOperand(sp, 0))
const Instr kPopRegPattern = LW | (kRegister_sp_Code << kRsShift)
- | (0 & kImm16Mask);
+ | (0 & kImm16Mask); // NOLINT
const Instr kLwRegFpOffsetPattern = LW | (kRegister_fp_Code << kRsShift)
- | (0 & kImm16Mask);
+ | (0 & kImm16Mask); // NOLINT
const Instr kSwRegFpOffsetPattern = SW | (kRegister_fp_Code << kRsShift)
- | (0 & kImm16Mask);
+ | (0 & kImm16Mask); // NOLINT
const Instr kLwRegFpNegOffsetPattern = LW | (kRegister_fp_Code << kRsShift)
- | (kNegOffset & kImm16Mask);
+ | (kNegOffset & kImm16Mask); // NOLINT
const Instr kSwRegFpNegOffsetPattern = SW | (kRegister_fp_Code << kRsShift)
- | (kNegOffset & kImm16Mask);
+ | (kNegOffset & kImm16Mask); // NOLINT
// A mask for the Rt register for push, pop, lw, sw instructions.
const Instr kRtMask = kRtFieldMask;
const Instr kLwSwInstrTypeMask = 0xffe00000;
@@ -307,11 +291,12 @@ Assembler::Assembler(Isolate* isolate, void* buffer, int buffer_size)
trampoline_pool_blocked_nesting_ = 0;
// We leave space (16 * kTrampolineSlotsSize)
// for BlockTrampolinePoolScope buffer.
- next_buffer_check_ = kMaxBranchOffset - kTrampolineSlotsSize * 16;
+ next_buffer_check_ = FLAG_force_long_branches
+ ? kMaxInt : kMaxBranchOffset - kTrampolineSlotsSize * 16;
internal_trampoline_exception_ = false;
last_bound_pos_ = 0;
- trampoline_emitted_ = false;
+ trampoline_emitted_ = FLAG_force_long_branches;
unbound_labels_count_ = 0;
block_buffer_growth_ = false;
@@ -1199,7 +1184,7 @@ void Assembler::jal_or_jalr(int32_t target, Register rs) {
}
-//-------Data-processing-instructions---------
+// -------Data-processing-instructions---------
// Arithmetic.
@@ -1342,7 +1327,7 @@ void Assembler::rotrv(Register rd, Register rt, Register rs) {
}
-//------------Memory-instructions-------------
+// ------------Memory-instructions-------------
// Helper for base-reg + offset, when offset is larger than int16.
void Assembler::LoadRegPlusOffsetToAt(const MemOperand& src) {
@@ -1459,7 +1444,7 @@ void Assembler::lui(Register rd, int32_t j) {
}
-//-------------Misc-instructions--------------
+// -------------Misc-instructions--------------
// Break / Trap instructions.
void Assembler::break_(uint32_t code, bool break_as_stop) {
@@ -1623,7 +1608,16 @@ void Assembler::ext_(Register rt, Register rs, uint16_t pos, uint16_t size) {
}
-//--------Coprocessor-instructions----------------
+void Assembler::pref(int32_t hint, const MemOperand& rs) {
+ ASSERT(kArchVariant != kLoongson);
+ ASSERT(is_uint5(hint) && is_uint16(rs.offset_));
+ Instr instr = PREF | (rs.rm().code() << kRsShift) | (hint << kRtShift)
+ | (rs.offset_);
+ emit(instr);
+}
+
+
+// --------Coprocessor-instructions----------------
// Load, store, move.
void Assembler::lwc1(FPURegister fd, const MemOperand& src) {
@@ -1634,10 +1628,12 @@ void Assembler::lwc1(FPURegister fd, const MemOperand& src) {
void Assembler::ldc1(FPURegister fd, const MemOperand& src) {
// Workaround for non-8-byte alignment of HeapNumber, convert 64-bit
// load to two 32-bit loads.
- GenInstrImmediate(LWC1, src.rm(), fd, src.offset_);
+ GenInstrImmediate(LWC1, src.rm(), fd, src.offset_ +
+ Register::kMantissaOffset);
FPURegister nextfpreg;
nextfpreg.setcode(fd.code() + 1);
- GenInstrImmediate(LWC1, src.rm(), nextfpreg, src.offset_ + 4);
+ GenInstrImmediate(LWC1, src.rm(), nextfpreg, src.offset_ +
+ Register::kExponentOffset);
}
@@ -1649,10 +1645,12 @@ void Assembler::swc1(FPURegister fd, const MemOperand& src) {
void Assembler::sdc1(FPURegister fd, const MemOperand& src) {
// Workaround for non-8-byte alignment of HeapNumber, convert 64-bit
// store to two 32-bit stores.
- GenInstrImmediate(SWC1, src.rm(), fd, src.offset_);
+ GenInstrImmediate(SWC1, src.rm(), fd, src.offset_ +
+ Register::kMantissaOffset);
FPURegister nextfpreg;
nextfpreg.setcode(fd.code() + 1);
- GenInstrImmediate(SWC1, src.rm(), nextfpreg, src.offset_ + 4);
+ GenInstrImmediate(SWC1, src.rm(), nextfpreg, src.offset_ +
+ Register::kExponentOffset);
}
@@ -1678,7 +1676,7 @@ void Assembler::cfc1(Register rt, FPUControlRegister fs) {
void Assembler::DoubleAsTwoUInt32(double d, uint32_t* lo, uint32_t* hi) {
uint64_t i;
- OS::MemCopy(&i, &d, 8);
+ memcpy(&i, &d, 8);
*lo = i & 0xffffffff;
*hi = i >> 32;
@@ -1993,9 +1991,9 @@ void Assembler::GrowBuffer() {
// Copy the data.
int pc_delta = desc.buffer - buffer_;
int rc_delta = (desc.buffer + desc.buffer_size) - (buffer_ + buffer_size_);
- OS::MemMove(desc.buffer, buffer_, desc.instr_size);
- OS::MemMove(reloc_info_writer.pos() + rc_delta,
- reloc_info_writer.pos(), desc.reloc_size);
+ MemMove(desc.buffer, buffer_, desc.instr_size);
+ MemMove(reloc_info_writer.pos() + rc_delta, reloc_info_writer.pos(),
+ desc.reloc_size);
// Switch buffers.
DeleteArray(buffer_);
@@ -2053,15 +2051,9 @@ void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
}
if (!RelocInfo::IsNone(rinfo.rmode())) {
// Don't record external references unless the heap will be serialized.
- if (rmode == RelocInfo::EXTERNAL_REFERENCE) {
-#ifdef DEBUG
- if (!Serializer::enabled()) {
- Serializer::TooLateToEnableNow();
- }
-#endif
- if (!Serializer::enabled() && !emit_debug_code()) {
- return;
- }
+ if (rmode == RelocInfo::EXTERNAL_REFERENCE &&
+ !serializer_enabled() && !emit_debug_code()) {
+ return;
}
ASSERT(buffer_space() >= kMaxRelocSize); // Too late to grow buffer here.
if (rmode == RelocInfo::CODE_TARGET_WITH_ID) {
@@ -2175,7 +2167,9 @@ void Assembler::QuietNaN(HeapObject* object) {
// There is an optimization below, which emits a nop when the address
// fits in just 16 bits. This is unlikely to help, and should be benchmarked,
// and possibly removed.
-void Assembler::set_target_address_at(Address pc, Address target) {
+void Assembler::set_target_address_at(Address pc,
+ Address target,
+ ICacheFlushMode icache_flush_mode) {
Instr instr2 = instr_at(pc + kInstrSize);
uint32_t rt_code = GetRtField(instr2);
uint32_t* p = reinterpret_cast<uint32_t*>(pc);
@@ -2269,7 +2263,9 @@ void Assembler::set_target_address_at(Address pc, Address target) {
patched_jump = true;
}
- CPU::FlushICache(pc, (patched_jump ? 3 : 2) * sizeof(int32_t));
+ if (icache_flush_mode != SKIP_ICACHE_FLUSH) {
+ CPU::FlushICache(pc, (patched_jump ? 3 : 2) * sizeof(int32_t));
+ }
}
@@ -2306,6 +2302,21 @@ void Assembler::JumpLabelToJumpRegister(Address pc) {
}
}
+
+Handle<ConstantPoolArray> Assembler::NewConstantPool(Isolate* isolate) {
+ // No out-of-line constant pool support.
+ ASSERT(!FLAG_enable_ool_constant_pool);
+ return isolate->factory()->empty_constant_pool_array();
+}
+
+
+void Assembler::PopulateConstantPool(ConstantPoolArray* constant_pool) {
+ // No out-of-line constant pool support.
+ ASSERT(!FLAG_enable_ool_constant_pool);
+ return;
+}
+
+
} } // namespace v8::internal
#endif // V8_TARGET_ARCH_MIPS
diff --git a/chromium/v8/src/mips/assembler-mips.h b/chromium/v8/src/mips/assembler-mips.h
index d9ef46cd014..2ba3ef7166f 100644
--- a/chromium/v8/src/mips/assembler-mips.h
+++ b/chromium/v8/src/mips/assembler-mips.h
@@ -37,9 +37,10 @@
#define V8_MIPS_ASSEMBLER_MIPS_H_
#include <stdio.h>
-#include "assembler.h"
-#include "constants-mips.h"
-#include "serialize.h"
+
+#include "src/assembler.h"
+#include "src/mips/constants-mips.h"
+#include "src/serialize.h"
namespace v8 {
namespace internal {
@@ -76,6 +77,16 @@ struct Register {
static const int kSizeInBytes = 4;
static const int kCpRegister = 23; // cp (s7) is the 23rd register.
+#if defined(V8_TARGET_LITTLE_ENDIAN)
+ static const int kMantissaOffset = 0;
+ static const int kExponentOffset = 4;
+#elif defined(V8_TARGET_BIG_ENDIAN)
+ static const int kMantissaOffset = 4;
+ static const int kExponentOffset = 0;
+#else
+#error Unknown endianness
+#endif
+
inline static int NumAllocatableRegisters();
static int ToAllocationIndex(Register reg) {
@@ -386,7 +397,15 @@ class Operand BASE_EMBEDDED {
// Class MemOperand represents a memory operand in load and store instructions.
class MemOperand : public Operand {
public:
+ // Immediate value attached to offset.
+ enum OffsetAddend {
+ offset_minus_one = -1,
+ offset_zero = 0
+ };
+
explicit MemOperand(Register rn, int32_t offset = 0);
+ explicit MemOperand(Register rn, int32_t unit, int32_t multiplier,
+ OffsetAddend offset_addend = offset_zero);
int32_t offset() const { return offset_; }
bool OffsetIsInt16Encodable() const {
@@ -400,64 +419,6 @@ class MemOperand : public Operand {
};
-// CpuFeatures keeps track of which features are supported by the target CPU.
-// Supported features must be enabled by a CpuFeatureScope before use.
-class CpuFeatures : public AllStatic {
- public:
- // Detect features of the target CPU. Set safe defaults if the serializer
- // is enabled (snapshots must be portable).
- static void Probe();
-
- // Check whether a feature is supported by the target CPU.
- static bool IsSupported(CpuFeature f) {
- ASSERT(initialized_);
- return Check(f, supported_);
- }
-
- static bool IsFoundByRuntimeProbingOnly(CpuFeature f) {
- ASSERT(initialized_);
- return Check(f, found_by_runtime_probing_only_);
- }
-
- static bool IsSafeForSnapshot(CpuFeature f) {
- return Check(f, cross_compile_) ||
- (IsSupported(f) &&
- (!Serializer::enabled() || !IsFoundByRuntimeProbingOnly(f)));
- }
-
- static bool VerifyCrossCompiling() {
- return cross_compile_ == 0;
- }
-
- static bool VerifyCrossCompiling(CpuFeature f) {
- unsigned mask = flag2set(f);
- return cross_compile_ == 0 ||
- (cross_compile_ & mask) == mask;
- }
-
- private:
- static bool Check(CpuFeature f, unsigned set) {
- return (set & flag2set(f)) != 0;
- }
-
- static unsigned flag2set(CpuFeature f) {
- return 1u << f;
- }
-
-#ifdef DEBUG
- static bool initialized_;
-#endif
- static unsigned supported_;
- static unsigned found_by_runtime_probing_only_;
-
- static unsigned cross_compile_;
-
- friend class ExternalReference;
- friend class PlatformFeatureScope;
- DISALLOW_COPY_AND_ASSIGN(CpuFeatures);
-};
-
-
class Assembler : public AssemblerBase {
public:
// Create an assembler. Instructions and relocation information are emitted
@@ -517,7 +478,34 @@ class Assembler : public AssemblerBase {
// Read/Modify the code target address in the branch/call instruction at pc.
static Address target_address_at(Address pc);
- static void set_target_address_at(Address pc, Address target);
+ static void set_target_address_at(Address pc,
+ Address target,
+ ICacheFlushMode icache_flush_mode =
+ FLUSH_ICACHE_IF_NEEDED);
+ // On MIPS there is no Constant Pool so we skip that parameter.
+ INLINE(static Address target_address_at(Address pc,
+ ConstantPoolArray* constant_pool)) {
+ return target_address_at(pc);
+ }
+ INLINE(static void set_target_address_at(Address pc,
+ ConstantPoolArray* constant_pool,
+ Address target,
+ ICacheFlushMode icache_flush_mode =
+ FLUSH_ICACHE_IF_NEEDED)) {
+ set_target_address_at(pc, target, icache_flush_mode);
+ }
+ INLINE(static Address target_address_at(Address pc, Code* code)) {
+ ConstantPoolArray* constant_pool = code ? code->constant_pool() : NULL;
+ return target_address_at(pc, constant_pool);
+ }
+ INLINE(static void set_target_address_at(Address pc,
+ Code* code,
+ Address target,
+ ICacheFlushMode icache_flush_mode =
+ FLUSH_ICACHE_IF_NEEDED)) {
+ ConstantPoolArray* constant_pool = code ? code->constant_pool() : NULL;
+ set_target_address_at(pc, constant_pool, target, icache_flush_mode);
+ }
// Return the code target address at a call site from the return address
// of that call in the instruction stream.
@@ -531,9 +519,10 @@ class Assembler : public AssemblerBase {
// This is for calls and branches within generated code. The serializer
// has already deserialized the lui/ori instructions etc.
inline static void deserialization_set_special_target_at(
- Address instruction_payload, Address target) {
+ Address instruction_payload, Code* code, Address target) {
set_target_address_at(
instruction_payload - kInstructionsFor32BitConstant * kInstrSize,
+ code,
target);
}
@@ -657,7 +646,7 @@ class Assembler : public AssemblerBase {
void jal_or_jalr(int32_t target, Register rs);
- //-------Data-processing-instructions---------
+ // -------Data-processing-instructions---------
// Arithmetic.
void addu(Register rd, Register rs, Register rt);
@@ -695,7 +684,7 @@ class Assembler : public AssemblerBase {
void rotrv(Register rd, Register rt, Register rs);
- //------------Memory-instructions-------------
+ // ------------Memory-instructions-------------
void lb(Register rd, const MemOperand& rs);
void lbu(Register rd, const MemOperand& rs);
@@ -711,7 +700,12 @@ class Assembler : public AssemblerBase {
void swr(Register rd, const MemOperand& rs);
- //-------------Misc-instructions--------------
+ // ----------------Prefetch--------------------
+
+ void pref(int32_t hint, const MemOperand& rs);
+
+
+ // -------------Misc-instructions--------------
// Break / Trap instructions.
void break_(uint32_t code, bool break_as_stop = false);
@@ -744,7 +738,7 @@ class Assembler : public AssemblerBase {
void ins_(Register rt, Register rs, uint16_t pos, uint16_t size);
void ext_(Register rt, Register rs, uint16_t pos, uint16_t size);
- //--------Coprocessor-instructions----------------
+ // --------Coprocessor-instructions----------------
// Load, store, and move.
void lwc1(FPURegister fd, const MemOperand& src);
@@ -850,10 +844,10 @@ class Assembler : public AssemblerBase {
assem_->EndBlockGrowBuffer();
}
- private:
- Assembler* assem_;
+ private:
+ Assembler* assem_;
- DISALLOW_IMPLICIT_CONSTRUCTORS(BlockGrowBufferScope);
+ DISALLOW_IMPLICIT_CONSTRUCTORS(BlockGrowBufferScope);
};
// Debugging.
@@ -971,6 +965,12 @@ class Assembler : public AssemblerBase {
void CheckTrampolinePool();
+ // Allocate a constant pool of the correct size for the generated code.
+ Handle<ConstantPoolArray> NewConstantPool(Isolate* isolate);
+
+ // Generate the constant pool for the generated code.
+ void PopulateConstantPool(ConstantPoolArray* constant_pool);
+
protected:
// Relocation for a type-recording IC has the AST id added to it. This
// member variable is a way to pass the information from the call site to
diff --git a/chromium/v8/src/mips/builtins-mips.cc b/chromium/v8/src/mips/builtins-mips.cc
index 19f3cdf4ff8..800a79e1920 100644
--- a/chromium/v8/src/mips/builtins-mips.cc
+++ b/chromium/v8/src/mips/builtins-mips.cc
@@ -1,41 +1,19 @@
// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-
-
-#include "v8.h"
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+
+
+#include "src/v8.h"
#if V8_TARGET_ARCH_MIPS
-#include "codegen.h"
-#include "debug.h"
-#include "deoptimizer.h"
-#include "full-codegen.h"
-#include "runtime.h"
+#include "src/codegen.h"
+#include "src/debug.h"
+#include "src/deoptimizer.h"
+#include "src/full-codegen.h"
+#include "src/runtime.h"
+#include "src/stub-cache.h"
namespace v8 {
namespace internal {
@@ -162,10 +140,7 @@ void Builtins::Generate_ArrayCode(MacroAssembler* masm) {
// Run the native code for the Array function called as a normal function.
// Tail call a stub.
- Handle<Object> undefined_sentinel(
- masm->isolate()->heap()->undefined_value(),
- masm->isolate());
- __ li(a2, Operand(undefined_sentinel));
+ __ LoadRoot(a2, Heap::kUndefinedValueRootIndex);
ArrayConstructorStub stub(masm->isolate());
__ TailCallStub(&stub);
}
@@ -297,16 +272,16 @@ void Builtins::Generate_StringConstructCode(MacroAssembler* masm) {
}
-static void CallRuntimePassFunction(MacroAssembler* masm,
- Runtime::FunctionId function_id) {
+static void CallRuntimePassFunction(
+ MacroAssembler* masm, Runtime::FunctionId function_id) {
FrameScope scope(masm, StackFrame::INTERNAL);
// Push a copy of the function onto the stack.
// Push call kind information and function as parameter to the runtime call.
- __ Push(a1, t1, a1);
+ __ Push(a1, a1);
__ CallRuntime(function_id, 1);
// Restore call kind information and receiver.
- __ Pop(a1, t1);
+ __ Pop(a1);
}
@@ -318,7 +293,13 @@ static void GenerateTailCallToSharedCode(MacroAssembler* masm) {
}
-void Builtins::Generate_InRecompileQueue(MacroAssembler* masm) {
+static void GenerateTailCallToReturnedCode(MacroAssembler* masm) {
+ __ Addu(at, v0, Operand(Code::kHeaderSize - kHeapObjectTag));
+ __ Jump(at);
+}
+
+
+void Builtins::Generate_InOptimizationQueue(MacroAssembler* masm) {
// Checking whether the queued function is ready for install is optional,
// since we come across interrupts and stack checks elsewhere. However,
// not checking may delay installing ready functions, and always checking
@@ -328,34 +309,27 @@ void Builtins::Generate_InRecompileQueue(MacroAssembler* masm) {
__ LoadRoot(t0, Heap::kStackLimitRootIndex);
__ Branch(&ok, hs, sp, Operand(t0));
- CallRuntimePassFunction(masm, Runtime::kTryInstallRecompiledCode);
- // Tail call to returned code.
- __ Addu(at, v0, Operand(Code::kHeaderSize - kHeapObjectTag));
- __ Jump(at);
+ CallRuntimePassFunction(masm, Runtime::kHiddenTryInstallOptimizedCode);
+ GenerateTailCallToReturnedCode(masm);
__ bind(&ok);
GenerateTailCallToSharedCode(masm);
}
-void Builtins::Generate_ConcurrentRecompile(MacroAssembler* masm) {
- CallRuntimePassFunction(masm, Runtime::kConcurrentRecompile);
- GenerateTailCallToSharedCode(masm);
-}
-
-
static void Generate_JSConstructStubHelper(MacroAssembler* masm,
bool is_api_function,
- bool count_constructions) {
+ bool create_memento) {
// ----------- S t a t e -------------
// -- a0 : number of arguments
// -- a1 : constructor function
+ // -- a2 : allocation site or undefined
// -- ra : return address
// -- sp[...]: constructor arguments
// -----------------------------------
- // Should never count constructions for api objects.
- ASSERT(!is_api_function || !count_constructions);
+ // Should never create mementos for api functions.
+ ASSERT(!is_api_function || !create_memento);
Isolate* isolate = masm->isolate();
@@ -370,25 +344,25 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
{
FrameScope scope(masm, StackFrame::CONSTRUCT);
+ if (create_memento) {
+ __ AssertUndefinedOrAllocationSite(a2, a3);
+ __ push(a2);
+ }
+
// Preserve the two incoming parameters on the stack.
__ sll(a0, a0, kSmiTagSize); // Tag arguments count.
__ MultiPushReversed(a0.bit() | a1.bit());
- // Use t7 to hold undefined, which is used in several places below.
- __ LoadRoot(t7, Heap::kUndefinedValueRootIndex);
-
Label rt_call, allocated;
// Try to allocate the object without transitioning into C code. If any of
// the preconditions is not met, the code bails out to the runtime call.
if (FLAG_inline_new) {
Label undo_allocation;
-#ifdef ENABLE_DEBUGGER_SUPPORT
ExternalReference debug_step_in_fp =
ExternalReference::debug_step_in_fp_address(isolate);
__ li(a2, Operand(debug_step_in_fp));
__ lw(a2, MemOperand(a2));
__ Branch(&rt_call, ne, a2, Operand(zero_reg));
-#endif
// Load the initial map and verify that it is in fact a map.
// a1: constructor function
@@ -405,22 +379,26 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
__ lbu(a3, FieldMemOperand(a2, Map::kInstanceTypeOffset));
__ Branch(&rt_call, eq, a3, Operand(JS_FUNCTION_TYPE));
- if (count_constructions) {
+ if (!is_api_function) {
Label allocate;
+ MemOperand bit_field3 = FieldMemOperand(a2, Map::kBitField3Offset);
+ // Check if slack tracking is enabled.
+ __ lw(t0, bit_field3);
+ __ DecodeField<Map::ConstructionCount>(t2, t0);
+ __ Branch(&allocate, eq, t2, Operand(JSFunction::kNoSlackTracking));
// Decrease generous allocation count.
- __ lw(a3, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
- MemOperand constructor_count =
- FieldMemOperand(a3, SharedFunctionInfo::kConstructionCountOffset);
- __ lbu(t0, constructor_count);
- __ Subu(t0, t0, Operand(1));
- __ sb(t0, constructor_count);
- __ Branch(&allocate, ne, t0, Operand(zero_reg));
+ __ Subu(t0, t0, Operand(1 << Map::ConstructionCount::kShift));
+ __ Branch(USE_DELAY_SLOT,
+ &allocate, ne, t2, Operand(JSFunction::kFinishSlackTracking));
+ __ sw(t0, bit_field3); // In delay slot.
__ Push(a1, a2, a1); // a1 = Constructor.
- // The call will replace the stub, so the countdown is only done once.
- __ CallRuntime(Runtime::kFinalizeInstanceSize, 1);
+ __ CallRuntime(Runtime::kHiddenFinalizeInstanceSize, 1);
__ Pop(a1, a2);
+ // Slack tracking counter is kNoSlackTracking after runtime call.
+ ASSERT(JSFunction::kNoSlackTracking == 0);
+ __ mov(t2, zero_reg);
__ bind(&allocate);
}
@@ -429,13 +407,17 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// a1: constructor function
// a2: initial map
__ lbu(a3, FieldMemOperand(a2, Map::kInstanceSizeOffset));
+ if (create_memento) {
+ __ Addu(a3, a3, Operand(AllocationMemento::kSize / kPointerSize));
+ }
+
__ Allocate(a3, t4, t5, t6, &rt_call, SIZE_IN_WORDS);
// Allocated the JSObject, now initialize the fields. Map is set to
// initial map and properties and elements are set to empty fixed array.
// a1: constructor function
// a2: initial map
- // a3: object size
+ // a3: object size (not including memento if create_memento)
// t4: JSObject (not tagged)
__ LoadRoot(t6, Heap::kEmptyFixedArrayRootIndex);
__ mov(t5, t4);
@@ -450,29 +432,63 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// Fill all the in-object properties with appropriate filler.
// a1: constructor function
// a2: initial map
- // a3: object size (in words)
+ // a3: object size (in words, including memento if create_memento)
// t4: JSObject (not tagged)
// t5: First in-object property of JSObject (not tagged)
- __ sll(t0, a3, kPointerSizeLog2);
- __ addu(t6, t4, t0); // End of object.
+ // t2: slack tracking counter (non-API function case)
ASSERT_EQ(3 * kPointerSize, JSObject::kHeaderSize);
+
+ // Use t7 to hold undefined, which is used in several places below.
__ LoadRoot(t7, Heap::kUndefinedValueRootIndex);
- if (count_constructions) {
- __ lw(a0, FieldMemOperand(a2, Map::kInstanceSizesOffset));
- __ Ext(a0, a0, Map::kPreAllocatedPropertyFieldsByte * kBitsPerByte,
- kBitsPerByte);
- __ sll(t0, a0, kPointerSizeLog2);
- __ addu(a0, t5, t0);
+
+ if (!is_api_function) {
+ Label no_inobject_slack_tracking;
+
+ // Check if slack tracking is enabled.
+ __ Branch(&no_inobject_slack_tracking,
+ eq, t2, Operand(JSFunction::kNoSlackTracking));
+
+ // Allocate object with a slack.
+ __ lbu(a0, FieldMemOperand(a2, Map::kPreAllocatedPropertyFieldsOffset));
+ __ sll(at, a0, kPointerSizeLog2);
+ __ addu(a0, t5, at);
// a0: offset of first field after pre-allocated fields
if (FLAG_debug_code) {
+ __ sll(at, a3, kPointerSizeLog2);
+ __ Addu(t6, t4, Operand(at)); // End of object.
__ Assert(le, kUnexpectedNumberOfPreAllocatedPropertyFields,
a0, Operand(t6));
}
__ InitializeFieldsWithFiller(t5, a0, t7);
// To allow for truncation.
__ LoadRoot(t7, Heap::kOnePointerFillerMapRootIndex);
+ // Fill the remaining fields with one pointer filler map.
+
+ __ bind(&no_inobject_slack_tracking);
+ }
+
+ if (create_memento) {
+ __ Subu(a0, a3, Operand(AllocationMemento::kSize / kPointerSize));
+ __ sll(a0, a0, kPointerSizeLog2);
+ __ Addu(a0, t4, Operand(a0)); // End of object.
+ __ InitializeFieldsWithFiller(t5, a0, t7);
+
+ // Fill in memento fields.
+ // t5: points to the allocated but uninitialized memento.
+ __ LoadRoot(t7, Heap::kAllocationMementoMapRootIndex);
+ ASSERT_EQ(0 * kPointerSize, AllocationMemento::kMapOffset);
+ __ sw(t7, MemOperand(t5));
+ __ Addu(t5, t5, kPointerSize);
+ // Load the AllocationSite.
+ __ lw(t7, MemOperand(sp, 2 * kPointerSize));
+ ASSERT_EQ(1 * kPointerSize, AllocationMemento::kAllocationSiteOffset);
+ __ sw(t7, MemOperand(t5));
+ __ Addu(t5, t5, kPointerSize);
+ } else {
+ __ sll(at, a3, kPointerSizeLog2);
+ __ Addu(a0, t4, Operand(at)); // End of object.
+ __ InitializeFieldsWithFiller(t5, a0, t7);
}
- __ InitializeFieldsWithFiller(t5, t6, t7);
// Add the object tag to make the JSObject real, so that we can continue
// and jump into the continuation code at any time from now on. Any
@@ -488,12 +504,9 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
__ lbu(a3, FieldMemOperand(a2, Map::kUnusedPropertyFieldsOffset));
// The field instance sizes contains both pre-allocated property fields
// and in-object properties.
- __ lw(a0, FieldMemOperand(a2, Map::kInstanceSizesOffset));
- __ Ext(t6, a0, Map::kPreAllocatedPropertyFieldsByte * kBitsPerByte,
- kBitsPerByte);
+ __ lbu(t6, FieldMemOperand(a2, Map::kPreAllocatedPropertyFieldsOffset));
__ Addu(a3, a3, Operand(t6));
- __ Ext(t6, a0, Map::kInObjectPropertiesByte * kBitsPerByte,
- kBitsPerByte);
+ __ lbu(t6, FieldMemOperand(a2, Map::kInObjectPropertiesOffset));
__ subu(a3, a3, t6);
// Done if no extra properties are to be allocated.
@@ -541,11 +554,11 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
__ addu(t6, a2, t3); // End of object.
ASSERT_EQ(2 * kPointerSize, FixedArray::kHeaderSize);
{ Label loop, entry;
- if (count_constructions) {
+ if (!is_api_function || create_memento) {
__ LoadRoot(t7, Heap::kUndefinedValueRootIndex);
} else if (FLAG_debug_code) {
- __ LoadRoot(t8, Heap::kUndefinedValueRootIndex);
- __ Assert(eq, kUndefinedValueNotLoaded, t7, Operand(t8));
+ __ LoadRoot(t2, Heap::kUndefinedValueRootIndex);
+ __ Assert(eq, kUndefinedValueNotLoaded, t7, Operand(t2));
}
__ jmp(&entry);
__ bind(&loop);
@@ -576,18 +589,50 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
__ UndoAllocationInNewSpace(t4, t5);
}
- __ bind(&rt_call);
// Allocate the new receiver object using the runtime call.
// a1: constructor function
+ __ bind(&rt_call);
+ if (create_memento) {
+ // Get the cell or allocation site.
+ __ lw(a2, MemOperand(sp, 2 * kPointerSize));
+ __ push(a2);
+ }
+
__ push(a1); // Argument for Runtime_NewObject.
- __ CallRuntime(Runtime::kNewObject, 1);
+ if (create_memento) {
+ __ CallRuntime(Runtime::kHiddenNewObjectWithAllocationSite, 2);
+ } else {
+ __ CallRuntime(Runtime::kHiddenNewObject, 1);
+ }
__ mov(t4, v0);
+ // If we ended up using the runtime, and we want a memento, then the
+ // runtime call made it for us, and we shouldn't do create count
+ // increment.
+ Label count_incremented;
+ if (create_memento) {
+ __ jmp(&count_incremented);
+ }
+
// Receiver for constructor call allocated.
// t4: JSObject
__ bind(&allocated);
- __ push(t4);
- __ push(t4);
+
+ if (create_memento) {
+ __ lw(a2, MemOperand(sp, kPointerSize * 2));
+ __ LoadRoot(t5, Heap::kUndefinedValueRootIndex);
+ __ Branch(&count_incremented, eq, a2, Operand(t5));
+ // a2 is an AllocationSite. We are creating a memento from it, so we
+ // need to increment the memento create count.
+ __ lw(a3, FieldMemOperand(a2,
+ AllocationSite::kPretenureCreateCountOffset));
+ __ Addu(a3, a3, Operand(Smi::FromInt(1)));
+ __ sw(a3, FieldMemOperand(a2,
+ AllocationSite::kPretenureCreateCountOffset));
+ __ bind(&count_incremented);
+ }
+
+ __ Push(t4, t4);
// Reload the number of arguments from the stack.
// sp[0]: receiver
@@ -630,17 +675,14 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
__ lw(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
Handle<Code> code =
masm->isolate()->builtins()->HandleApiCallConstruct();
- ParameterCount expected(0);
- __ InvokeCode(code, expected, expected,
- RelocInfo::CODE_TARGET, CALL_FUNCTION, CALL_AS_METHOD);
+ __ Call(code, RelocInfo::CODE_TARGET);
} else {
ParameterCount actual(a0);
- __ InvokeFunction(a1, actual, CALL_FUNCTION,
- NullCallWrapper(), CALL_AS_METHOD);
+ __ InvokeFunction(a1, actual, CALL_FUNCTION, NullCallWrapper());
}
// Store offset of return address for deoptimizer.
- if (!is_api_function && !count_constructions) {
+ if (!is_api_function) {
masm->isolate()->heap()->SetConstructStubDeoptPCOffset(masm->pc_offset());
}
@@ -689,13 +731,8 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
}
-void Builtins::Generate_JSConstructStubCountdown(MacroAssembler* masm) {
- Generate_JSConstructStubHelper(masm, false, true);
-}
-
-
void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
- Generate_JSConstructStubHelper(masm, false, false);
+ Generate_JSConstructStubHelper(masm, false, FLAG_pretenuring_call_new);
}
@@ -762,15 +799,12 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
__ mov(a0, a3);
if (is_construct) {
// No type feedback cell is available
- Handle<Object> undefined_sentinel(
- masm->isolate()->heap()->undefined_value(), masm->isolate());
- __ li(a2, Operand(undefined_sentinel));
- CallConstructStub stub(NO_CALL_FUNCTION_FLAGS);
+ __ LoadRoot(a2, Heap::kUndefinedValueRootIndex);
+ CallConstructStub stub(masm->isolate(), NO_CALL_CONSTRUCTOR_FLAGS);
__ CallStub(&stub);
} else {
ParameterCount actual(a0);
- __ InvokeFunction(a1, actual, CALL_FUNCTION,
- NullCallWrapper(), CALL_AS_METHOD);
+ __ InvokeFunction(a1, actual, CALL_FUNCTION, NullCallWrapper());
}
// Leave internal frame.
@@ -790,22 +824,39 @@ void Builtins::Generate_JSConstructEntryTrampoline(MacroAssembler* masm) {
}
-void Builtins::Generate_LazyCompile(MacroAssembler* masm) {
- CallRuntimePassFunction(masm, Runtime::kLazyCompile);
- // Do a tail-call of the compiled function.
- __ Addu(t9, v0, Operand(Code::kHeaderSize - kHeapObjectTag));
- __ Jump(t9);
+void Builtins::Generate_CompileUnoptimized(MacroAssembler* masm) {
+ CallRuntimePassFunction(masm, Runtime::kHiddenCompileUnoptimized);
+ GenerateTailCallToReturnedCode(masm);
}
-void Builtins::Generate_LazyRecompile(MacroAssembler* masm) {
- CallRuntimePassFunction(masm, Runtime::kLazyRecompile);
- // Do a tail-call of the compiled function.
- __ Addu(t9, v0, Operand(Code::kHeaderSize - kHeapObjectTag));
- __ Jump(t9);
+static void CallCompileOptimized(MacroAssembler* masm, bool concurrent) {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ // Push a copy of the function onto the stack.
+ // Push function as parameter to the runtime call.
+ __ Push(a1, a1);
+ // Whether to compile in a background thread.
+ __ Push(masm->isolate()->factory()->ToBoolean(concurrent));
+
+ __ CallRuntime(Runtime::kHiddenCompileOptimized, 2);
+ // Restore receiver.
+ __ Pop(a1);
+}
+
+
+void Builtins::Generate_CompileOptimized(MacroAssembler* masm) {
+ CallCompileOptimized(masm, false);
+ GenerateTailCallToReturnedCode(masm);
}
+void Builtins::Generate_CompileOptimizedConcurrent(MacroAssembler* masm) {
+ CallCompileOptimized(masm, true);
+ GenerateTailCallToReturnedCode(masm);
+}
+
+
+
static void GenerateMakeCodeYoungAgainCommon(MacroAssembler* masm) {
// For now, we are relying on the fact that make_code_young doesn't do any
// garbage collection which allows us to save/restore the registers without
@@ -815,7 +866,7 @@ static void GenerateMakeCodeYoungAgainCommon(MacroAssembler* masm) {
// Set a0 to point to the head of the PlatformCodeAge sequence.
__ Subu(a0, a0,
- Operand((kNoCodeAgeSequenceLength - 1) * Assembler::kInstrSize));
+ Operand(kNoCodeAgeSequenceLength - Assembler::kInstrSize));
// The following registers must be saved and restored when calling through to
// the runtime:
@@ -825,7 +876,7 @@ static void GenerateMakeCodeYoungAgainCommon(MacroAssembler* masm) {
(a0.bit() | a1.bit() | ra.bit() | fp.bit()) & ~sp.bit();
FrameScope scope(masm, StackFrame::MANUAL);
__ MultiPush(saved_regs);
- __ PrepareCallCFunction(1, 0, a2);
+ __ PrepareCallCFunction(2, 0, a2);
__ li(a1, Operand(ExternalReference::isolate_address(masm->isolate())));
__ CallCFunction(
ExternalReference::get_make_code_young_function(masm->isolate()), 2);
@@ -854,7 +905,7 @@ void Builtins::Generate_MarkCodeAsExecutedOnce(MacroAssembler* masm) {
// Set a0 to point to the head of the PlatformCodeAge sequence.
__ Subu(a0, a0,
- Operand((kNoCodeAgeSequenceLength - 1) * Assembler::kInstrSize));
+ Operand(kNoCodeAgeSequenceLength - Assembler::kInstrSize));
// The following registers must be saved and restored when calling through to
// the runtime:
@@ -864,7 +915,7 @@ void Builtins::Generate_MarkCodeAsExecutedOnce(MacroAssembler* masm) {
(a0.bit() | a1.bit() | ra.bit() | fp.bit()) & ~sp.bit();
FrameScope scope(masm, StackFrame::MANUAL);
__ MultiPush(saved_regs);
- __ PrepareCallCFunction(1, 0, a2);
+ __ PrepareCallCFunction(2, 0, a2);
__ li(a1, Operand(ExternalReference::isolate_address(masm->isolate())));
__ CallCFunction(
ExternalReference::get_mark_code_as_executed_function(masm->isolate()),
@@ -876,7 +927,7 @@ void Builtins::Generate_MarkCodeAsExecutedOnce(MacroAssembler* masm) {
__ Addu(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
// Jump to point after the code-age stub.
- __ Addu(a0, a0, Operand((kNoCodeAgeSequenceLength) * Assembler::kInstrSize));
+ __ Addu(a0, a0, Operand(kNoCodeAgeSequenceLength));
__ Jump(a0);
}
@@ -896,7 +947,7 @@ static void Generate_NotifyStubFailureHelper(MacroAssembler* masm,
// registers.
__ MultiPush(kJSCallerSaved | kCalleeSaved);
// Pass the function and deoptimization type to the runtime system.
- __ CallRuntime(Runtime::kNotifyStubFailure, 0, save_doubles);
+ __ CallRuntime(Runtime::kHiddenNotifyStubFailure, 0, save_doubles);
__ MultiPop(kJSCallerSaved | kCalleeSaved);
}
@@ -922,7 +973,7 @@ static void Generate_NotifyDeoptimizedHelper(MacroAssembler* masm,
// Pass the function and deoptimization type to the runtime system.
__ li(a0, Operand(Smi::FromInt(static_cast<int>(type))));
__ push(a0);
- __ CallRuntime(Runtime::kNotifyDeoptimized, 1);
+ __ CallRuntime(Runtime::kHiddenNotifyDeoptimized, 1);
}
// Get the full codegen state from the stack and untag it -> t2.
@@ -969,18 +1020,9 @@ void Builtins::Generate_OnStackReplacement(MacroAssembler* masm) {
__ lw(a0, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
{
FrameScope scope(masm, StackFrame::INTERNAL);
- // Lookup and calculate pc offset.
- __ lw(a1, MemOperand(fp, StandardFrameConstants::kCallerPCOffset));
- __ lw(a2, FieldMemOperand(a0, JSFunction::kSharedFunctionInfoOffset));
- __ lw(a2, FieldMemOperand(a2, SharedFunctionInfo::kCodeOffset));
- __ Subu(a1, a1, Operand(Code::kHeaderSize - kHeapObjectTag));
- __ Subu(a1, a1, a2);
- __ SmiTag(a1);
-
- // Pass both function and pc offset as arguments.
+ // Pass function as argument.
__ push(a0);
- __ push(a1);
- __ CallRuntime(Runtime::kCompileForOnStackReplacement, 2);
+ __ CallRuntime(Runtime::kCompileForOnStackReplacement, 1);
}
// If the code object is null, just return to the unoptimized code.
@@ -1013,7 +1055,7 @@ void Builtins::Generate_OsrAfterStackCheck(MacroAssembler* masm) {
__ Branch(&ok, hs, sp, Operand(at));
{
FrameScope scope(masm, StackFrame::INTERNAL);
- __ CallRuntime(Runtime::kStackGuard, 0);
+ __ CallRuntime(Runtime::kHiddenStackGuard, 0);
}
__ Jump(masm->isolate()->builtins()->OnStackReplacement(),
RelocInfo::CODE_TARGET);
@@ -1065,7 +1107,7 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
__ And(t3, a3, Operand(1 << (SharedFunctionInfo::kNative + kSmiTagSize)));
__ Branch(&shift_arguments, ne, t3, Operand(zero_reg));
- // Compute the receiver in non-strict mode.
+ // Compute the receiver in sloppy mode.
// Load first argument in a2. a2 = -kPointerSize(sp + n_args << 2).
__ sll(at, a0, kPointerSizeLog2);
__ addu(a2, sp, at);
@@ -1089,9 +1131,7 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
{
FrameScope scope(masm, StackFrame::INTERNAL);
__ sll(a0, a0, kSmiTagSize); // Smi tagged.
- __ push(a0);
-
- __ push(a2);
+ __ Push(a0, a2);
__ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
__ mov(a2, v0);
@@ -1106,14 +1146,8 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
__ li(t0, Operand(0, RelocInfo::NONE32));
__ Branch(&patch_receiver);
- // Use the global receiver object from the called function as the
- // receiver.
__ bind(&use_global_receiver);
- const int kGlobalIndex =
- Context::kHeaderSize + Context::GLOBAL_OBJECT_INDEX * kPointerSize;
- __ lw(a2, FieldMemOperand(cp, kGlobalIndex));
- __ lw(a2, FieldMemOperand(a2, GlobalObject::kNativeContextOffset));
- __ lw(a2, FieldMemOperand(a2, kGlobalIndex));
+ __ lw(a2, ContextOperand(cp, Context::GLOBAL_OBJECT_INDEX));
__ lw(a2, FieldMemOperand(a2, GlobalObject::kGlobalReceiverOffset));
__ bind(&patch_receiver);
@@ -1175,17 +1209,16 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
__ Branch(&function, eq, t0, Operand(zero_reg));
// Expected number of arguments is 0 for CALL_NON_FUNCTION.
__ mov(a2, zero_reg);
- __ SetCallKind(t1, CALL_AS_METHOD);
__ Branch(&non_proxy, ne, t0, Operand(1));
__ push(a1); // Re-add proxy object as additional argument.
__ Addu(a0, a0, Operand(1));
- __ GetBuiltinEntry(a3, Builtins::CALL_FUNCTION_PROXY);
+ __ GetBuiltinFunction(a1, Builtins::CALL_FUNCTION_PROXY);
__ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
RelocInfo::CODE_TARGET);
__ bind(&non_proxy);
- __ GetBuiltinEntry(a3, Builtins::CALL_NON_FUNCTION);
+ __ GetBuiltinFunction(a1, Builtins::CALL_NON_FUNCTION);
__ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
RelocInfo::CODE_TARGET);
__ bind(&function);
@@ -1200,15 +1233,13 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
__ lw(a2,
FieldMemOperand(a3, SharedFunctionInfo::kFormalParameterCountOffset));
__ sra(a2, a2, kSmiTagSize);
- __ lw(a3, FieldMemOperand(a1, JSFunction::kCodeEntryOffset));
- __ SetCallKind(t1, CALL_AS_METHOD);
// Check formal and actual parameter counts.
__ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
RelocInfo::CODE_TARGET, ne, a2, Operand(a0));
+ __ lw(a3, FieldMemOperand(a1, JSFunction::kCodeEntryOffset));
ParameterCount expected(0);
- __ InvokeCode(a3, expected, expected, JUMP_FUNCTION,
- NullCallWrapper(), CALL_AS_METHOD);
+ __ InvokeCode(a3, expected, expected, JUMP_FUNCTION, NullCallWrapper());
}
@@ -1245,14 +1276,13 @@ void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
// Out of stack space.
__ lw(a1, MemOperand(fp, kFunctionOffset));
__ Push(a1, v0);
- __ InvokeBuiltin(Builtins::APPLY_OVERFLOW, CALL_FUNCTION);
+ __ InvokeBuiltin(Builtins::STACK_OVERFLOW, CALL_FUNCTION);
// End of stack check.
// Push current limit and index.
__ bind(&okay);
- __ push(v0); // Limit.
- __ mov(a1, zero_reg); // Initial index.
- __ push(a1);
+ __ mov(a1, zero_reg);
+ __ Push(v0, a1); // Limit and initial index.
// Get the receiver.
__ lw(a0, MemOperand(fp, kRecvOffset));
@@ -1280,7 +1310,7 @@ void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
__ And(t3, a2, Operand(1 << (SharedFunctionInfo::kNative + kSmiTagSize)));
__ Branch(&push_receiver, ne, t3, Operand(zero_reg));
- // Compute the receiver in non-strict mode.
+ // Compute the receiver in sloppy mode.
__ JumpIfSmi(a0, &call_to_object);
__ LoadRoot(a1, Heap::kNullValueRootIndex);
__ Branch(&use_global_receiver, eq, a0, Operand(a1));
@@ -1301,13 +1331,8 @@ void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
__ mov(a0, v0); // Put object in a0 to match other paths to push_receiver.
__ Branch(&push_receiver);
- // Use the current global receiver object as the receiver.
__ bind(&use_global_receiver);
- const int kGlobalOffset =
- Context::kHeaderSize + Context::GLOBAL_OBJECT_INDEX * kPointerSize;
- __ lw(a0, FieldMemOperand(cp, kGlobalOffset));
- __ lw(a0, FieldMemOperand(a0, GlobalObject::kNativeContextOffset));
- __ lw(a0, FieldMemOperand(a0, kGlobalOffset));
+ __ lw(a0, ContextOperand(cp, Context::GLOBAL_OBJECT_INDEX));
__ lw(a0, FieldMemOperand(a0, GlobalObject::kGlobalReceiverOffset));
// Push the receiver.
@@ -1342,7 +1367,7 @@ void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
__ lw(a1, MemOperand(fp, kLimitOffset));
__ Branch(&loop, ne, a0, Operand(a1));
- // Invoke the function.
+ // Call the function.
Label call_proxy;
ParameterCount actual(a0);
__ sra(a0, a0, kSmiTagSize);
@@ -1350,20 +1375,18 @@ void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
__ GetObjectType(a1, a2, a2);
__ Branch(&call_proxy, ne, a2, Operand(JS_FUNCTION_TYPE));
- __ InvokeFunction(a1, actual, CALL_FUNCTION,
- NullCallWrapper(), CALL_AS_METHOD);
+ __ InvokeFunction(a1, actual, CALL_FUNCTION, NullCallWrapper());
frame_scope.GenerateLeaveFrame();
__ Ret(USE_DELAY_SLOT);
__ Addu(sp, sp, Operand(3 * kPointerSize)); // In delay slot.
- // Invoke the function proxy.
+ // Call the function proxy.
__ bind(&call_proxy);
__ push(a1); // Add function proxy as last argument.
__ Addu(a0, a0, Operand(1));
__ li(a2, Operand(0, RelocInfo::NONE32));
- __ SetCallKind(t1, CALL_AS_METHOD);
- __ GetBuiltinEntry(a3, Builtins::CALL_FUNCTION_PROXY);
+ __ GetBuiltinFunction(a1, Builtins::CALL_FUNCTION_PROXY);
__ Call(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
RelocInfo::CODE_TARGET);
// Tear down the internal frame and remove function, receiver and args.
@@ -1374,6 +1397,27 @@ void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
}
+static void ArgumentAdaptorStackCheck(MacroAssembler* masm,
+ Label* stack_overflow) {
+ // ----------- S t a t e -------------
+ // -- a0 : actual number of arguments
+ // -- a1 : function (passed through to callee)
+ // -- a2 : expected number of arguments
+ // -----------------------------------
+ // Check the stack for overflow. We are not trying to catch
+ // interruptions (e.g. debug break and preemption) here, so the "real stack
+ // limit" is checked.
+ __ LoadRoot(t1, Heap::kRealStackLimitRootIndex);
+ // Make t1 the space we have left. The stack might already be overflowed
+ // here which will cause t1 to become negative.
+ __ subu(t1, sp, t1);
+ // Check if the arguments will overflow the stack.
+ __ sll(at, a2, kPointerSizeLog2);
+ // Signed comparison.
+ __ Branch(stack_overflow, le, t1, Operand(at));
+}
+
+
static void EnterArgumentsAdaptorFrame(MacroAssembler* masm) {
__ sll(a0, a0, kSmiTagSize);
__ li(t0, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
@@ -1406,13 +1450,14 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// -- a0: actual arguments count
// -- a1: function (passed through to callee)
// -- a2: expected arguments count
- // -- a3: callee code entry
- // -- t1: call kind information
// -----------------------------------
+ Label stack_overflow;
+ ArgumentAdaptorStackCheck(masm, &stack_overflow);
Label invoke, dont_adapt_arguments;
Label enough, too_few;
+ __ lw(a3, FieldMemOperand(a1, JSFunction::kCodeEntryOffset));
__ Branch(&dont_adapt_arguments, eq,
a2, Operand(SharedFunctionInfo::kDontAdaptArgumentsSentinel));
// We use Uless as the number of argument should always be greater than 0.
@@ -1517,6 +1562,14 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// -------------------------------------------
__ bind(&dont_adapt_arguments);
__ Jump(a3);
+
+ __ bind(&stack_overflow);
+ {
+ FrameScope frame(masm, StackFrame::MANUAL);
+ EnterArgumentsAdaptorFrame(masm);
+ __ InvokeBuiltin(Builtins::STACK_OVERFLOW, CALL_FUNCTION);
+ __ break_(0xCC);
+ }
}
diff --git a/chromium/v8/src/mips/code-stubs-mips.cc b/chromium/v8/src/mips/code-stubs-mips.cc
index 4c3708ce7a5..0287a9a616f 100644
--- a/chromium/v8/src/mips/code-stubs-mips.cc
+++ b/chromium/v8/src/mips/code-stubs-mips.cc
@@ -1,57 +1,41 @@
// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
#if V8_TARGET_ARCH_MIPS
-#include "bootstrapper.h"
-#include "code-stubs.h"
-#include "codegen.h"
-#include "regexp-macro-assembler.h"
-#include "stub-cache.h"
+#include "src/bootstrapper.h"
+#include "src/code-stubs.h"
+#include "src/codegen.h"
+#include "src/regexp-macro-assembler.h"
+#include "src/stub-cache.h"
namespace v8 {
namespace internal {
void FastNewClosureStub::InitializeInterfaceDescriptor(
- Isolate* isolate,
CodeStubInterfaceDescriptor* descriptor) {
static Register registers[] = { a2 };
descriptor->register_param_count_ = 1;
descriptor->register_params_ = registers;
descriptor->deoptimization_handler_ =
- Runtime::FunctionForId(Runtime::kNewClosureFromStubFailure)->entry;
+ Runtime::FunctionForId(Runtime::kHiddenNewClosureFromStubFailure)->entry;
+}
+
+
+void FastNewContextStub::InitializeInterfaceDescriptor(
+ CodeStubInterfaceDescriptor* descriptor) {
+ static Register registers[] = { a1 };
+ descriptor->register_param_count_ = 1;
+ descriptor->register_params_ = registers;
+ descriptor->deoptimization_handler_ = NULL;
}
void ToNumberStub::InitializeInterfaceDescriptor(
- Isolate* isolate,
CodeStubInterfaceDescriptor* descriptor) {
static Register registers[] = { a0 };
descriptor->register_param_count_ = 1;
@@ -61,50 +45,51 @@ void ToNumberStub::InitializeInterfaceDescriptor(
void NumberToStringStub::InitializeInterfaceDescriptor(
- Isolate* isolate,
CodeStubInterfaceDescriptor* descriptor) {
static Register registers[] = { a0 };
descriptor->register_param_count_ = 1;
descriptor->register_params_ = registers;
descriptor->deoptimization_handler_ =
- Runtime::FunctionForId(Runtime::kNumberToString)->entry;
+ Runtime::FunctionForId(Runtime::kHiddenNumberToString)->entry;
}
void FastCloneShallowArrayStub::InitializeInterfaceDescriptor(
- Isolate* isolate,
CodeStubInterfaceDescriptor* descriptor) {
static Register registers[] = { a3, a2, a1 };
descriptor->register_param_count_ = 3;
descriptor->register_params_ = registers;
+ static Representation representations[] = {
+ Representation::Tagged(),
+ Representation::Smi(),
+ Representation::Tagged() };
+ descriptor->register_param_representations_ = representations;
descriptor->deoptimization_handler_ =
- Runtime::FunctionForId(Runtime::kCreateArrayLiteralStubBailout)->entry;
+ Runtime::FunctionForId(
+ Runtime::kHiddenCreateArrayLiteralStubBailout)->entry;
}
void FastCloneShallowObjectStub::InitializeInterfaceDescriptor(
- Isolate* isolate,
CodeStubInterfaceDescriptor* descriptor) {
static Register registers[] = { a3, a2, a1, a0 };
descriptor->register_param_count_ = 4;
descriptor->register_params_ = registers;
descriptor->deoptimization_handler_ =
- Runtime::FunctionForId(Runtime::kCreateObjectLiteral)->entry;
+ Runtime::FunctionForId(Runtime::kHiddenCreateObjectLiteral)->entry;
}
void CreateAllocationSiteStub::InitializeInterfaceDescriptor(
- Isolate* isolate,
CodeStubInterfaceDescriptor* descriptor) {
- static Register registers[] = { a2 };
- descriptor->register_param_count_ = 1;
+ static Register registers[] = { a2, a3 };
+ descriptor->register_param_count_ = 2;
descriptor->register_params_ = registers;
descriptor->deoptimization_handler_ = NULL;
}
void KeyedLoadFastElementStub::InitializeInterfaceDescriptor(
- Isolate* isolate,
CodeStubInterfaceDescriptor* descriptor) {
static Register registers[] = { a1, a0 };
descriptor->register_param_count_ = 2;
@@ -115,7 +100,6 @@ void KeyedLoadFastElementStub::InitializeInterfaceDescriptor(
void KeyedLoadDictionaryElementStub::InitializeInterfaceDescriptor(
- Isolate* isolate,
CodeStubInterfaceDescriptor* descriptor) {
static Register registers[] = {a1, a0 };
descriptor->register_param_count_ = 2;
@@ -125,8 +109,27 @@ void KeyedLoadDictionaryElementStub::InitializeInterfaceDescriptor(
}
+void RegExpConstructResultStub::InitializeInterfaceDescriptor(
+ CodeStubInterfaceDescriptor* descriptor) {
+ static Register registers[] = { a2, a1, a0 };
+ descriptor->register_param_count_ = 3;
+ descriptor->register_params_ = registers;
+ descriptor->deoptimization_handler_ =
+ Runtime::FunctionForId(Runtime::kHiddenRegExpConstructResult)->entry;
+}
+
+
+void KeyedLoadGenericElementStub::InitializeInterfaceDescriptor(
+ CodeStubInterfaceDescriptor* descriptor) {
+ static Register registers[] = { a1, a0 };
+ descriptor->register_param_count_ = 2;
+ descriptor->register_params_ = registers;
+ descriptor->deoptimization_handler_ =
+ Runtime::FunctionForId(Runtime::kKeyedGetProperty)->entry;
+}
+
+
void LoadFieldStub::InitializeInterfaceDescriptor(
- Isolate* isolate,
CodeStubInterfaceDescriptor* descriptor) {
static Register registers[] = { a0 };
descriptor->register_param_count_ = 1;
@@ -136,7 +139,6 @@ void LoadFieldStub::InitializeInterfaceDescriptor(
void KeyedLoadFieldStub::InitializeInterfaceDescriptor(
- Isolate* isolate,
CodeStubInterfaceDescriptor* descriptor) {
static Register registers[] = { a1 };
descriptor->register_param_count_ = 1;
@@ -145,21 +147,25 @@ void KeyedLoadFieldStub::InitializeInterfaceDescriptor(
}
-void KeyedArrayCallStub::InitializeInterfaceDescriptor(
- Isolate* isolate,
+void StringLengthStub::InitializeInterfaceDescriptor(
CodeStubInterfaceDescriptor* descriptor) {
- static Register registers[] = { a2 };
- descriptor->register_param_count_ = 1;
+ static Register registers[] = { a0, a2 };
+ descriptor->register_param_count_ = 2;
descriptor->register_params_ = registers;
- descriptor->continuation_type_ = TAIL_CALL_CONTINUATION;
- descriptor->handler_arguments_mode_ = PASS_ARGUMENTS;
- descriptor->deoptimization_handler_ =
- FUNCTION_ADDR(KeyedCallIC_MissFromStubFailure);
+ descriptor->deoptimization_handler_ = NULL;
+}
+
+
+void KeyedStringLengthStub::InitializeInterfaceDescriptor(
+ CodeStubInterfaceDescriptor* descriptor) {
+ static Register registers[] = { a1, a0 };
+ descriptor->register_param_count_ = 2;
+ descriptor->register_params_ = registers;
+ descriptor->deoptimization_handler_ = NULL;
}
void KeyedStoreFastElementStub::InitializeInterfaceDescriptor(
- Isolate* isolate,
CodeStubInterfaceDescriptor* descriptor) {
static Register registers[] = { a2, a1, a0 };
descriptor->register_param_count_ = 3;
@@ -170,7 +176,6 @@ void KeyedStoreFastElementStub::InitializeInterfaceDescriptor(
void TransitionElementsKindStub::InitializeInterfaceDescriptor(
- Isolate* isolate,
CodeStubInterfaceDescriptor* descriptor) {
static Register registers[] = { a0, a1 };
descriptor->register_param_count_ = 2;
@@ -182,7 +187,6 @@ void TransitionElementsKindStub::InitializeInterfaceDescriptor(
void CompareNilICStub::InitializeInterfaceDescriptor(
- Isolate* isolate,
CodeStubInterfaceDescriptor* descriptor) {
static Register registers[] = { a0 };
descriptor->register_param_count_ = 1;
@@ -190,18 +194,17 @@ void CompareNilICStub::InitializeInterfaceDescriptor(
descriptor->deoptimization_handler_ =
FUNCTION_ADDR(CompareNilIC_Miss);
descriptor->SetMissHandler(
- ExternalReference(IC_Utility(IC::kCompareNilIC_Miss), isolate));
+ ExternalReference(IC_Utility(IC::kCompareNilIC_Miss), isolate()));
}
static void InitializeArrayConstructorDescriptor(
- Isolate* isolate,
CodeStubInterfaceDescriptor* descriptor,
int constant_stack_parameter_count) {
// register state
// a0 -- number of arguments
// a1 -- function
- // a2 -- type info cell with elements kind
+ // a2 -- allocation site with elements kind
static Register registers_variable_args[] = { a1, a2, a0 };
static Register registers_no_args[] = { a1, a2 };
@@ -214,17 +217,21 @@ static void InitializeArrayConstructorDescriptor(
descriptor->stack_parameter_count_ = a0;
descriptor->register_param_count_ = 3;
descriptor->register_params_ = registers_variable_args;
+ static Representation representations[] = {
+ Representation::Tagged(),
+ Representation::Tagged(),
+ Representation::Integer32() };
+ descriptor->register_param_representations_ = representations;
}
descriptor->hint_stack_parameter_count_ = constant_stack_parameter_count;
descriptor->function_mode_ = JS_FUNCTION_STUB_MODE;
descriptor->deoptimization_handler_ =
- Runtime::FunctionForId(Runtime::kArrayConstructor)->entry;
+ Runtime::FunctionForId(Runtime::kHiddenArrayConstructor)->entry;
}
static void InitializeInternalArrayConstructorDescriptor(
- Isolate* isolate,
CodeStubInterfaceDescriptor* descriptor,
int constant_stack_parameter_count) {
// register state
@@ -242,38 +249,38 @@ static void InitializeInternalArrayConstructorDescriptor(
descriptor->stack_parameter_count_ = a0;
descriptor->register_param_count_ = 2;
descriptor->register_params_ = registers_variable_args;
+ static Representation representations[] = {
+ Representation::Tagged(),
+ Representation::Integer32() };
+ descriptor->register_param_representations_ = representations;
}
descriptor->hint_stack_parameter_count_ = constant_stack_parameter_count;
descriptor->function_mode_ = JS_FUNCTION_STUB_MODE;
descriptor->deoptimization_handler_ =
- Runtime::FunctionForId(Runtime::kInternalArrayConstructor)->entry;
+ Runtime::FunctionForId(Runtime::kHiddenInternalArrayConstructor)->entry;
}
void ArrayNoArgumentConstructorStub::InitializeInterfaceDescriptor(
- Isolate* isolate,
CodeStubInterfaceDescriptor* descriptor) {
- InitializeArrayConstructorDescriptor(isolate, descriptor, 0);
+ InitializeArrayConstructorDescriptor(descriptor, 0);
}
void ArraySingleArgumentConstructorStub::InitializeInterfaceDescriptor(
- Isolate* isolate,
CodeStubInterfaceDescriptor* descriptor) {
- InitializeArrayConstructorDescriptor(isolate, descriptor, 1);
+ InitializeArrayConstructorDescriptor(descriptor, 1);
}
void ArrayNArgumentsConstructorStub::InitializeInterfaceDescriptor(
- Isolate* isolate,
CodeStubInterfaceDescriptor* descriptor) {
- InitializeArrayConstructorDescriptor(isolate, descriptor, -1);
+ InitializeArrayConstructorDescriptor(descriptor, -1);
}
void ToBooleanStub::InitializeInterfaceDescriptor(
- Isolate* isolate,
CodeStubInterfaceDescriptor* descriptor) {
static Register registers[] = { a0 };
descriptor->register_param_count_ = 1;
@@ -281,33 +288,29 @@ void ToBooleanStub::InitializeInterfaceDescriptor(
descriptor->deoptimization_handler_ =
FUNCTION_ADDR(ToBooleanIC_Miss);
descriptor->SetMissHandler(
- ExternalReference(IC_Utility(IC::kToBooleanIC_Miss), isolate));
+ ExternalReference(IC_Utility(IC::kToBooleanIC_Miss), isolate()));
}
void InternalArrayNoArgumentConstructorStub::InitializeInterfaceDescriptor(
- Isolate* isolate,
CodeStubInterfaceDescriptor* descriptor) {
- InitializeInternalArrayConstructorDescriptor(isolate, descriptor, 0);
+ InitializeInternalArrayConstructorDescriptor(descriptor, 0);
}
void InternalArraySingleArgumentConstructorStub::InitializeInterfaceDescriptor(
- Isolate* isolate,
CodeStubInterfaceDescriptor* descriptor) {
- InitializeInternalArrayConstructorDescriptor(isolate, descriptor, 1);
+ InitializeInternalArrayConstructorDescriptor(descriptor, 1);
}
void InternalArrayNArgumentsConstructorStub::InitializeInterfaceDescriptor(
- Isolate* isolate,
CodeStubInterfaceDescriptor* descriptor) {
- InitializeInternalArrayConstructorDescriptor(isolate, descriptor, -1);
+ InitializeInternalArrayConstructorDescriptor(descriptor, -1);
}
void StoreGlobalStub::InitializeInterfaceDescriptor(
- Isolate* isolate,
CodeStubInterfaceDescriptor* descriptor) {
static Register registers[] = { a1, a2, a0 };
descriptor->register_param_count_ = 3;
@@ -318,7 +321,6 @@ void StoreGlobalStub::InitializeInterfaceDescriptor(
void ElementsTransitionAndStoreStub::InitializeInterfaceDescriptor(
- Isolate* isolate,
CodeStubInterfaceDescriptor* descriptor) {
static Register registers[] = { a0, a3, a1, a2 };
descriptor->register_param_count_ = 4;
@@ -328,14 +330,118 @@ void ElementsTransitionAndStoreStub::InitializeInterfaceDescriptor(
}
-void NewStringAddStub::InitializeInterfaceDescriptor(
- Isolate* isolate,
+void BinaryOpICStub::InitializeInterfaceDescriptor(
CodeStubInterfaceDescriptor* descriptor) {
static Register registers[] = { a1, a0 };
descriptor->register_param_count_ = 2;
descriptor->register_params_ = registers;
+ descriptor->deoptimization_handler_ = FUNCTION_ADDR(BinaryOpIC_Miss);
+ descriptor->SetMissHandler(
+ ExternalReference(IC_Utility(IC::kBinaryOpIC_Miss), isolate()));
+}
+
+
+void BinaryOpWithAllocationSiteStub::InitializeInterfaceDescriptor(
+ CodeStubInterfaceDescriptor* descriptor) {
+ static Register registers[] = { a2, a1, a0 };
+ descriptor->register_param_count_ = 3;
+ descriptor->register_params_ = registers;
descriptor->deoptimization_handler_ =
- Runtime::FunctionForId(Runtime::kStringAdd)->entry;
+ FUNCTION_ADDR(BinaryOpIC_MissWithAllocationSite);
+}
+
+
+void StringAddStub::InitializeInterfaceDescriptor(
+ CodeStubInterfaceDescriptor* descriptor) {
+ static Register registers[] = { a1, a0 };
+ descriptor->register_param_count_ = 2;
+ descriptor->register_params_ = registers;
+ descriptor->deoptimization_handler_ =
+ Runtime::FunctionForId(Runtime::kHiddenStringAdd)->entry;
+}
+
+
+void CallDescriptors::InitializeForIsolate(Isolate* isolate) {
+ {
+ CallInterfaceDescriptor* descriptor =
+ isolate->call_descriptor(Isolate::ArgumentAdaptorCall);
+ static Register registers[] = { a1, // JSFunction
+ cp, // context
+ a0, // actual number of arguments
+ a2, // expected number of arguments
+ };
+ static Representation representations[] = {
+ Representation::Tagged(), // JSFunction
+ Representation::Tagged(), // context
+ Representation::Integer32(), // actual number of arguments
+ Representation::Integer32(), // expected number of arguments
+ };
+ descriptor->register_param_count_ = 4;
+ descriptor->register_params_ = registers;
+ descriptor->param_representations_ = representations;
+ }
+ {
+ CallInterfaceDescriptor* descriptor =
+ isolate->call_descriptor(Isolate::KeyedCall);
+ static Register registers[] = { cp, // context
+ a2, // key
+ };
+ static Representation representations[] = {
+ Representation::Tagged(), // context
+ Representation::Tagged(), // key
+ };
+ descriptor->register_param_count_ = 2;
+ descriptor->register_params_ = registers;
+ descriptor->param_representations_ = representations;
+ }
+ {
+ CallInterfaceDescriptor* descriptor =
+ isolate->call_descriptor(Isolate::NamedCall);
+ static Register registers[] = { cp, // context
+ a2, // name
+ };
+ static Representation representations[] = {
+ Representation::Tagged(), // context
+ Representation::Tagged(), // name
+ };
+ descriptor->register_param_count_ = 2;
+ descriptor->register_params_ = registers;
+ descriptor->param_representations_ = representations;
+ }
+ {
+ CallInterfaceDescriptor* descriptor =
+ isolate->call_descriptor(Isolate::CallHandler);
+ static Register registers[] = { cp, // context
+ a0, // receiver
+ };
+ static Representation representations[] = {
+ Representation::Tagged(), // context
+ Representation::Tagged(), // receiver
+ };
+ descriptor->register_param_count_ = 2;
+ descriptor->register_params_ = registers;
+ descriptor->param_representations_ = representations;
+ }
+ {
+ CallInterfaceDescriptor* descriptor =
+ isolate->call_descriptor(Isolate::ApiFunctionCall);
+ static Register registers[] = { a0, // callee
+ t0, // call_data
+ a2, // holder
+ a1, // api_function_address
+ cp, // context
+ };
+ static Representation representations[] = {
+ Representation::Tagged(), // callee
+ Representation::Tagged(), // call_data
+ Representation::Tagged(), // holder
+ Representation::External(), // api_function_address
+ Representation::Tagged(), // context
+ };
+ descriptor->register_param_count_ = 5;
+ descriptor->register_params_ = registers;
+ descriptor->param_representations_ = representations;
+ }
}
@@ -358,19 +464,21 @@ static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm,
void HydrogenCodeStub::GenerateLightweightMiss(MacroAssembler* masm) {
// Update the static counter each time a new code stub is generated.
- Isolate* isolate = masm->isolate();
- isolate->counters()->code_stubs()->Increment();
+ isolate()->counters()->code_stubs()->Increment();
- CodeStubInterfaceDescriptor* descriptor = GetInterfaceDescriptor(isolate);
+ CodeStubInterfaceDescriptor* descriptor = GetInterfaceDescriptor();
int param_count = descriptor->register_param_count_;
{
// Call the runtime system in a fresh internal frame.
FrameScope scope(masm, StackFrame::INTERNAL);
ASSERT(descriptor->register_param_count_ == 0 ||
a0.is(descriptor->register_params_[param_count - 1]));
- // Push arguments
+ // Push arguments, adjust sp.
+ __ Subu(sp, sp, Operand(param_count * kPointerSize));
for (int i = 0; i < param_count; ++i) {
- __ push(descriptor->register_params_[i]);
+ // Store argument to stack.
+ __ sw(descriptor->register_params_[i],
+ MemOperand(sp, (param_count-1-i) * kPointerSize));
}
ExternalReference miss = descriptor->miss_handler();
__ CallExternalReference(miss, descriptor->register_param_count_);
@@ -380,107 +488,6 @@ void HydrogenCodeStub::GenerateLightweightMiss(MacroAssembler* masm) {
}
-void FastNewContextStub::Generate(MacroAssembler* masm) {
- // Try to allocate the context in new space.
- Label gc;
- int length = slots_ + Context::MIN_CONTEXT_SLOTS;
-
- // Attempt to allocate the context in new space.
- __ Allocate(FixedArray::SizeFor(length), v0, a1, a2, &gc, TAG_OBJECT);
-
- // Load the function from the stack.
- __ lw(a3, MemOperand(sp, 0));
-
- // Set up the object header.
- __ LoadRoot(a1, Heap::kFunctionContextMapRootIndex);
- __ li(a2, Operand(Smi::FromInt(length)));
- __ sw(a2, FieldMemOperand(v0, FixedArray::kLengthOffset));
- __ sw(a1, FieldMemOperand(v0, HeapObject::kMapOffset));
-
- // Set up the fixed slots, copy the global object from the previous context.
- __ lw(a2, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
- __ li(a1, Operand(Smi::FromInt(0)));
- __ sw(a3, MemOperand(v0, Context::SlotOffset(Context::CLOSURE_INDEX)));
- __ sw(cp, MemOperand(v0, Context::SlotOffset(Context::PREVIOUS_INDEX)));
- __ sw(a1, MemOperand(v0, Context::SlotOffset(Context::EXTENSION_INDEX)));
- __ sw(a2, MemOperand(v0, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
-
- // Initialize the rest of the slots to undefined.
- __ LoadRoot(a1, Heap::kUndefinedValueRootIndex);
- for (int i = Context::MIN_CONTEXT_SLOTS; i < length; i++) {
- __ sw(a1, MemOperand(v0, Context::SlotOffset(i)));
- }
-
- // Remove the on-stack argument and return.
- __ mov(cp, v0);
- __ DropAndRet(1);
-
- // Need to collect. Call into runtime system.
- __ bind(&gc);
- __ TailCallRuntime(Runtime::kNewFunctionContext, 1, 1);
-}
-
-
-void FastNewBlockContextStub::Generate(MacroAssembler* masm) {
- // Stack layout on entry:
- //
- // [sp]: function.
- // [sp + kPointerSize]: serialized scope info
-
- // Try to allocate the context in new space.
- Label gc;
- int length = slots_ + Context::MIN_CONTEXT_SLOTS;
- __ Allocate(FixedArray::SizeFor(length), v0, a1, a2, &gc, TAG_OBJECT);
-
- // Load the function from the stack.
- __ lw(a3, MemOperand(sp, 0));
-
- // Load the serialized scope info from the stack.
- __ lw(a1, MemOperand(sp, 1 * kPointerSize));
-
- // Set up the object header.
- __ LoadRoot(a2, Heap::kBlockContextMapRootIndex);
- __ sw(a2, FieldMemOperand(v0, HeapObject::kMapOffset));
- __ li(a2, Operand(Smi::FromInt(length)));
- __ sw(a2, FieldMemOperand(v0, FixedArray::kLengthOffset));
-
- // If this block context is nested in the native context we get a smi
- // sentinel instead of a function. The block context should get the
- // canonical empty function of the native context as its closure which
- // we still have to look up.
- Label after_sentinel;
- __ JumpIfNotSmi(a3, &after_sentinel);
- if (FLAG_debug_code) {
- __ Assert(eq, kExpected0AsASmiSentinel, a3, Operand(zero_reg));
- }
- __ lw(a3, GlobalObjectOperand());
- __ lw(a3, FieldMemOperand(a3, GlobalObject::kNativeContextOffset));
- __ lw(a3, ContextOperand(a3, Context::CLOSURE_INDEX));
- __ bind(&after_sentinel);
-
- // Set up the fixed slots, copy the global object from the previous context.
- __ lw(a2, ContextOperand(cp, Context::GLOBAL_OBJECT_INDEX));
- __ sw(a3, ContextOperand(v0, Context::CLOSURE_INDEX));
- __ sw(cp, ContextOperand(v0, Context::PREVIOUS_INDEX));
- __ sw(a1, ContextOperand(v0, Context::EXTENSION_INDEX));
- __ sw(a2, ContextOperand(v0, Context::GLOBAL_OBJECT_INDEX));
-
- // Initialize the rest of the slots to the hole value.
- __ LoadRoot(a1, Heap::kTheHoleValueRootIndex);
- for (int i = 0; i < slots_; i++) {
- __ sw(a1, ContextOperand(v0, i + Context::MIN_CONTEXT_SLOTS));
- }
-
- // Remove the on-stack argument and return.
- __ mov(cp, v0);
- __ DropAndRet(2);
-
- // Need to collect. Call into runtime system.
- __ bind(&gc);
- __ TailCallRuntime(Runtime::kPushBlockContext, 2, 1);
-}
-
-
// Takes a Smi and converts to an IEEE 64 bit floating point value in two
// registers. The format is 1 sign bit, 11 exponent bits (biased 1023) and
// 52 fraction bits (20 in the first word, 32 in the second). Zeros is a
@@ -488,11 +495,13 @@ void FastNewBlockContextStub::Generate(MacroAssembler* masm) {
// stub so you don't have to set up the frame.
class ConvertToDoubleStub : public PlatformCodeStub {
public:
- ConvertToDoubleStub(Register result_reg_1,
+ ConvertToDoubleStub(Isolate* isolate,
+ Register result_reg_1,
Register result_reg_2,
Register source_reg,
Register scratch_reg)
- : result1_(result_reg_1),
+ : PlatformCodeStub(isolate),
+ result1_(result_reg_1),
result2_(result_reg_2),
source_(source_reg),
zeros_(scratch_reg) { }
@@ -521,13 +530,14 @@ class ConvertToDoubleStub : public PlatformCodeStub {
void ConvertToDoubleStub::Generate(MacroAssembler* masm) {
-#ifndef BIG_ENDIAN_FLOATING_POINT
- Register exponent = result1_;
- Register mantissa = result2_;
-#else
- Register exponent = result2_;
- Register mantissa = result1_;
-#endif
+ Register exponent, mantissa;
+ if (kArchEndian == kLittle) {
+ exponent = result1_;
+ mantissa = result2_;
+ } else {
+ exponent = result2_;
+ mantissa = result1_;
+ }
Label not_special;
// Convert from Smi to integer.
__ sra(source_, source_, kSmiTagSize);
@@ -610,7 +620,7 @@ void DoubleToIStub::Generate(MacroAssembler* masm) {
// Try a conversion to a signed integer.
__ Trunc_w_d(double_scratch, double_scratch);
// Move the converted value into the result register.
- __ mfc1(result_reg, double_scratch);
+ __ mfc1(scratch3, double_scratch);
// Retrieve and restore the FCSR.
__ cfc1(scratch, FCSR);
@@ -621,16 +631,22 @@ void DoubleToIStub::Generate(MacroAssembler* masm) {
scratch, scratch,
kFCSROverflowFlagMask | kFCSRUnderflowFlagMask
| kFCSRInvalidOpFlagMask);
- // If we had no exceptions we are done.
- __ Branch(&done, eq, scratch, Operand(zero_reg));
+ // If we had no exceptions then set result_reg and we are done.
+ Label error;
+ __ Branch(&error, ne, scratch, Operand(zero_reg));
+ __ Move(result_reg, scratch3);
+ __ Branch(&done);
+ __ bind(&error);
}
// Load the double value and perform a manual truncation.
Register input_high = scratch2;
Register input_low = scratch3;
- __ lw(input_low, MemOperand(input_reg, double_offset));
- __ lw(input_high, MemOperand(input_reg, double_offset + kIntSize));
+ __ lw(input_low,
+ MemOperand(input_reg, double_offset + Register::kMantissaOffset));
+ __ lw(input_high,
+ MemOperand(input_reg, double_offset + Register::kExponentOffset));
Label normal_exponent, restore_sign;
// Extract the biased exponent in result.
@@ -716,10 +732,10 @@ void DoubleToIStub::Generate(MacroAssembler* masm) {
void WriteInt32ToHeapNumberStub::GenerateFixedRegStubsAheadOfTime(
Isolate* isolate) {
- WriteInt32ToHeapNumberStub stub1(a1, v0, a2, a3);
- WriteInt32ToHeapNumberStub stub2(a2, v0, a3, a0);
- stub1.GetCode(isolate);
- stub2.GetCode(isolate);
+ WriteInt32ToHeapNumberStub stub1(isolate, a1, v0, a2, a3);
+ WriteInt32ToHeapNumberStub stub2(isolate, a2, v0, a3, a0);
+ stub1.GetCode();
+ stub2.GetCode();
}
@@ -1109,8 +1125,6 @@ void ICCompareStub::GenerateGeneric(MacroAssembler* masm) {
// f12, f14 are the double representations of the left hand side
// and the right hand side if we have FPU. Otherwise a2, a3 represent
// left hand side and a0, a1 represent right hand side.
-
- Isolate* isolate = masm->isolate();
Label nan;
__ li(t0, Operand(LESS));
__ li(t1, Operand(GREATER));
@@ -1185,7 +1199,8 @@ void ICCompareStub::GenerateGeneric(MacroAssembler* masm) {
__ JumpIfNonSmisNotBothSequentialAsciiStrings(lhs, rhs, a2, a3, &slow);
- __ IncrementCounter(isolate->counters()->string_compare_native(), 1, a2, a3);
+ __ IncrementCounter(isolate()->counters()->string_compare_native(), 1, a2,
+ a3);
if (cc == eq) {
StringCompareStub::GenerateFlatAsciiStringEquals(masm,
lhs,
@@ -1234,6 +1249,31 @@ void ICCompareStub::GenerateGeneric(MacroAssembler* masm) {
}
+void StoreRegistersStateStub::Generate(MacroAssembler* masm) {
+ __ mov(t9, ra);
+ __ pop(ra);
+ if (save_doubles_ == kSaveFPRegs) {
+ __ PushSafepointRegistersAndDoubles();
+ } else {
+ __ PushSafepointRegisters();
+ }
+ __ Jump(t9);
+}
+
+
+void RestoreRegistersStateStub::Generate(MacroAssembler* masm) {
+ __ mov(t9, ra);
+ __ pop(ra);
+ __ StoreToSafepointRegisterSlot(t9, t9);
+ if (save_doubles_ == kSaveFPRegs) {
+ __ PopSafepointRegistersAndDoubles();
+ } else {
+ __ PopSafepointRegisters();
+ }
+ __ Jump(t9);
+}
+
+
void StoreBufferOverflowStub::Generate(MacroAssembler* masm) {
// We don't allow a GC during a store buffer overflow so there is no need to
// store the registers in any particular way, but we do have to store and
@@ -1248,9 +1288,9 @@ void StoreBufferOverflowStub::Generate(MacroAssembler* masm) {
AllowExternalCallThatCantCauseGC scope(masm);
__ PrepareCallCFunction(argument_count, fp_argument_count, scratch);
- __ li(a0, Operand(ExternalReference::isolate_address(masm->isolate())));
+ __ li(a0, Operand(ExternalReference::isolate_address(isolate())));
__ CallCFunction(
- ExternalReference::store_buffer_overflow_function(masm->isolate()),
+ ExternalReference::store_buffer_overflow_function(isolate()),
argument_count);
if (save_doubles_ == kSaveFPRegs) {
__ MultiPopFPU(kCallerSavedFPU);
@@ -1261,253 +1301,6 @@ void StoreBufferOverflowStub::Generate(MacroAssembler* masm) {
}
-void BinaryOpICStub::InitializeInterfaceDescriptor(
- Isolate* isolate,
- CodeStubInterfaceDescriptor* descriptor) {
- static Register registers[] = { a1, a0 };
- descriptor->register_param_count_ = 2;
- descriptor->register_params_ = registers;
- descriptor->deoptimization_handler_ = FUNCTION_ADDR(BinaryOpIC_Miss);
- descriptor->SetMissHandler(
- ExternalReference(IC_Utility(IC::kBinaryOpIC_Miss), isolate));
-}
-
-
-void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
- // Untagged case: double input in f4, double result goes
- // into f4.
- // Tagged case: tagged input on top of stack and in a0,
- // tagged result (heap number) goes into v0.
-
- Label input_not_smi;
- Label loaded;
- Label calculate;
- Label invalid_cache;
- const Register scratch0 = t5;
- const Register scratch1 = t3;
- const Register cache_entry = a0;
- const bool tagged = (argument_type_ == TAGGED);
-
- if (tagged) {
- // Argument is a number and is on stack and in a0.
- // Load argument and check if it is a smi.
- __ JumpIfNotSmi(a0, &input_not_smi);
-
- // Input is a smi. Convert to double and load the low and high words
- // of the double into a2, a3.
- __ sra(t0, a0, kSmiTagSize);
- __ mtc1(t0, f4);
- __ cvt_d_w(f4, f4);
- __ Move(a2, a3, f4);
- __ Branch(&loaded);
-
- __ bind(&input_not_smi);
- // Check if input is a HeapNumber.
- __ CheckMap(a0,
- a1,
- Heap::kHeapNumberMapRootIndex,
- &calculate,
- DONT_DO_SMI_CHECK);
- // Input is a HeapNumber. Store the
- // low and high words into a2, a3.
- __ lw(a2, FieldMemOperand(a0, HeapNumber::kValueOffset));
- __ lw(a3, FieldMemOperand(a0, HeapNumber::kValueOffset + 4));
- } else {
- // Input is untagged double in f4. Output goes to f4.
- __ Move(a2, a3, f4);
- }
- __ bind(&loaded);
- // a2 = low 32 bits of double value.
- // a3 = high 32 bits of double value.
- // Compute hash (the shifts are arithmetic):
- // h = (low ^ high); h ^= h >> 16; h ^= h >> 8; h = h & (cacheSize - 1);
- __ Xor(a1, a2, a3);
- __ sra(t0, a1, 16);
- __ Xor(a1, a1, t0);
- __ sra(t0, a1, 8);
- __ Xor(a1, a1, t0);
- ASSERT(IsPowerOf2(TranscendentalCache::SubCache::kCacheSize));
- __ And(a1, a1, Operand(TranscendentalCache::SubCache::kCacheSize - 1));
-
- // a2 = low 32 bits of double value.
- // a3 = high 32 bits of double value.
- // a1 = TranscendentalCache::hash(double value).
- __ li(cache_entry, Operand(
- ExternalReference::transcendental_cache_array_address(
- masm->isolate())));
- // a0 points to cache array.
- __ lw(cache_entry, MemOperand(cache_entry, type_ * sizeof(
- Isolate::Current()->transcendental_cache()->caches_[0])));
- // a0 points to the cache for the type type_.
- // If NULL, the cache hasn't been initialized yet, so go through runtime.
- __ Branch(&invalid_cache, eq, cache_entry, Operand(zero_reg));
-
-#ifdef DEBUG
- // Check that the layout of cache elements match expectations.
- { TranscendentalCache::SubCache::Element test_elem[2];
- char* elem_start = reinterpret_cast<char*>(&test_elem[0]);
- char* elem2_start = reinterpret_cast<char*>(&test_elem[1]);
- char* elem_in0 = reinterpret_cast<char*>(&(test_elem[0].in[0]));
- char* elem_in1 = reinterpret_cast<char*>(&(test_elem[0].in[1]));
- char* elem_out = reinterpret_cast<char*>(&(test_elem[0].output));
- CHECK_EQ(12, elem2_start - elem_start); // Two uint_32's and a pointer.
- CHECK_EQ(0, elem_in0 - elem_start);
- CHECK_EQ(kIntSize, elem_in1 - elem_start);
- CHECK_EQ(2 * kIntSize, elem_out - elem_start);
- }
-#endif
-
- // Find the address of the a1'st entry in the cache, i.e., &a0[a1*12].
- __ sll(t0, a1, 1);
- __ Addu(a1, a1, t0);
- __ sll(t0, a1, 2);
- __ Addu(cache_entry, cache_entry, t0);
-
- // Check if cache matches: Double value is stored in uint32_t[2] array.
- __ lw(t0, MemOperand(cache_entry, 0));
- __ lw(t1, MemOperand(cache_entry, 4));
- __ lw(t2, MemOperand(cache_entry, 8));
- __ Branch(&calculate, ne, a2, Operand(t0));
- __ Branch(&calculate, ne, a3, Operand(t1));
- // Cache hit. Load result, cleanup and return.
- Counters* counters = masm->isolate()->counters();
- __ IncrementCounter(
- counters->transcendental_cache_hit(), 1, scratch0, scratch1);
- if (tagged) {
- // Pop input value from stack and load result into v0.
- __ Drop(1);
- __ mov(v0, t2);
- } else {
- // Load result into f4.
- __ ldc1(f4, FieldMemOperand(t2, HeapNumber::kValueOffset));
- }
- __ Ret();
-
- __ bind(&calculate);
- __ IncrementCounter(
- counters->transcendental_cache_miss(), 1, scratch0, scratch1);
- if (tagged) {
- __ bind(&invalid_cache);
- __ TailCallExternalReference(ExternalReference(RuntimeFunction(),
- masm->isolate()),
- 1,
- 1);
- } else {
- Label no_update;
- Label skip_cache;
-
- // Call C function to calculate the result and update the cache.
- // a0: precalculated cache entry address.
- // a2 and a3: parts of the double value.
- // Store a0, a2 and a3 on stack for later before calling C function.
- __ Push(a3, a2, cache_entry);
- GenerateCallCFunction(masm, scratch0);
- __ GetCFunctionDoubleResult(f4);
-
- // Try to update the cache. If we cannot allocate a
- // heap number, we return the result without updating.
- __ Pop(a3, a2, cache_entry);
- __ LoadRoot(t1, Heap::kHeapNumberMapRootIndex);
- __ AllocateHeapNumber(t2, scratch0, scratch1, t1, &no_update);
- __ sdc1(f4, FieldMemOperand(t2, HeapNumber::kValueOffset));
-
- __ sw(a2, MemOperand(cache_entry, 0 * kPointerSize));
- __ sw(a3, MemOperand(cache_entry, 1 * kPointerSize));
- __ sw(t2, MemOperand(cache_entry, 2 * kPointerSize));
-
- __ Ret(USE_DELAY_SLOT);
- __ mov(v0, cache_entry);
-
- __ bind(&invalid_cache);
- // The cache is invalid. Call runtime which will recreate the
- // cache.
- __ LoadRoot(t1, Heap::kHeapNumberMapRootIndex);
- __ AllocateHeapNumber(a0, scratch0, scratch1, t1, &skip_cache);
- __ sdc1(f4, FieldMemOperand(a0, HeapNumber::kValueOffset));
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ push(a0);
- __ CallRuntime(RuntimeFunction(), 1);
- }
- __ ldc1(f4, FieldMemOperand(v0, HeapNumber::kValueOffset));
- __ Ret();
-
- __ bind(&skip_cache);
- // Call C function to calculate the result and answer directly
- // without updating the cache.
- GenerateCallCFunction(masm, scratch0);
- __ GetCFunctionDoubleResult(f4);
- __ bind(&no_update);
-
- // We return the value in f4 without adding it to the cache, but
- // we cause a scavenging GC so that future allocations will succeed.
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
-
- // Allocate an aligned object larger than a HeapNumber.
- ASSERT(4 * kPointerSize >= HeapNumber::kSize);
- __ li(scratch0, Operand(4 * kPointerSize));
- __ push(scratch0);
- __ CallRuntimeSaveDoubles(Runtime::kAllocateInNewSpace);
- }
- __ Ret();
- }
-}
-
-
-void TranscendentalCacheStub::GenerateCallCFunction(MacroAssembler* masm,
- Register scratch) {
- __ push(ra);
- __ PrepareCallCFunction(2, scratch);
- if (IsMipsSoftFloatABI) {
- __ Move(a0, a1, f4);
- } else {
- __ mov_d(f12, f4);
- }
- AllowExternalCallThatCantCauseGC scope(masm);
- Isolate* isolate = masm->isolate();
- switch (type_) {
- case TranscendentalCache::SIN:
- __ CallCFunction(
- ExternalReference::math_sin_double_function(isolate),
- 0, 1);
- break;
- case TranscendentalCache::COS:
- __ CallCFunction(
- ExternalReference::math_cos_double_function(isolate),
- 0, 1);
- break;
- case TranscendentalCache::TAN:
- __ CallCFunction(ExternalReference::math_tan_double_function(isolate),
- 0, 1);
- break;
- case TranscendentalCache::LOG:
- __ CallCFunction(
- ExternalReference::math_log_double_function(isolate),
- 0, 1);
- break;
- default:
- UNIMPLEMENTED();
- break;
- }
- __ pop(ra);
-}
-
-
-Runtime::FunctionId TranscendentalCacheStub::RuntimeFunction() {
- switch (type_) {
- // Add more cases when necessary.
- case TranscendentalCache::SIN: return Runtime::kMath_sin;
- case TranscendentalCache::COS: return Runtime::kMath_cos;
- case TranscendentalCache::TAN: return Runtime::kMath_tan;
- case TranscendentalCache::LOG: return Runtime::kMath_log;
- default:
- UNIMPLEMENTED();
- return Runtime::kAbort;
- }
-}
-
-
void MathPowStub::Generate(MacroAssembler* masm) {
const Register base = a1;
const Register exponent = a2;
@@ -1624,13 +1417,13 @@ void MathPowStub::Generate(MacroAssembler* masm) {
{
AllowExternalCallThatCantCauseGC scope(masm);
__ PrepareCallCFunction(0, 2, scratch2);
- __ SetCallCDoubleArguments(double_base, double_exponent);
+ __ MovToFloatParameters(double_base, double_exponent);
__ CallCFunction(
- ExternalReference::power_double_double_function(masm->isolate()),
+ ExternalReference::power_double_double_function(isolate()),
0, 2);
}
__ pop(ra);
- __ GetCFunctionDoubleResult(double_result);
+ __ MovFromFloatResult(double_result);
__ jmp(&done);
__ bind(&int_exponent_convert);
@@ -1687,11 +1480,11 @@ void MathPowStub::Generate(MacroAssembler* masm) {
__ cvt_d_w(double_exponent, single_scratch);
// Returning or bailing out.
- Counters* counters = masm->isolate()->counters();
+ Counters* counters = isolate()->counters();
if (exponent_type_ == ON_STACK) {
// The arguments are still on the stack.
__ bind(&call_runtime);
- __ TailCallRuntime(Runtime::kMath_pow_cfunction, 2, 1);
+ __ TailCallRuntime(Runtime::kHiddenMathPow, 2, 1);
// The stub is called from non-optimized code, which expects the result
// as heap number in exponent.
@@ -1708,13 +1501,13 @@ void MathPowStub::Generate(MacroAssembler* masm) {
{
AllowExternalCallThatCantCauseGC scope(masm);
__ PrepareCallCFunction(0, 2, scratch);
- __ SetCallCDoubleArguments(double_base, double_exponent);
+ __ MovToFloatParameters(double_base, double_exponent);
__ CallCFunction(
- ExternalReference::power_double_double_function(masm->isolate()),
+ ExternalReference::power_double_double_function(isolate()),
0, 2);
}
__ pop(ra);
- __ GetCFunctionDoubleResult(double_result);
+ __ MovFromFloatResult(double_result);
__ bind(&done);
__ IncrementCounter(counters->math_pow(), 1, scratch, scratch2);
@@ -1736,74 +1529,83 @@ void CodeStub::GenerateStubsAheadOfTime(Isolate* isolate) {
ArrayConstructorStubBase::GenerateStubsAheadOfTime(isolate);
CreateAllocationSiteStub::GenerateAheadOfTime(isolate);
BinaryOpICStub::GenerateAheadOfTime(isolate);
+ StoreRegistersStateStub::GenerateAheadOfTime(isolate);
+ RestoreRegistersStateStub::GenerateAheadOfTime(isolate);
+ BinaryOpICWithAllocationSiteStub::GenerateAheadOfTime(isolate);
+}
+
+
+void StoreRegistersStateStub::GenerateAheadOfTime(
+ Isolate* isolate) {
+ StoreRegistersStateStub stub1(isolate, kDontSaveFPRegs);
+ stub1.GetCode();
+ // Hydrogen code stubs need stub2 at snapshot time.
+ StoreRegistersStateStub stub2(isolate, kSaveFPRegs);
+ stub2.GetCode();
+}
+
+
+void RestoreRegistersStateStub::GenerateAheadOfTime(
+ Isolate* isolate) {
+ RestoreRegistersStateStub stub1(isolate, kDontSaveFPRegs);
+ stub1.GetCode();
+ // Hydrogen code stubs need stub2 at snapshot time.
+ RestoreRegistersStateStub stub2(isolate, kSaveFPRegs);
+ stub2.GetCode();
}
void CodeStub::GenerateFPStubs(Isolate* isolate) {
SaveFPRegsMode mode = kSaveFPRegs;
- CEntryStub save_doubles(1, mode);
- StoreBufferOverflowStub stub(mode);
+ CEntryStub save_doubles(isolate, 1, mode);
+ StoreBufferOverflowStub stub(isolate, mode);
// These stubs might already be in the snapshot, detect that and don't
// regenerate, which would lead to code stub initialization state being messed
// up.
Code* save_doubles_code;
- if (!save_doubles.FindCodeInCache(&save_doubles_code, isolate)) {
- save_doubles_code = *save_doubles.GetCode(isolate);
+ if (!save_doubles.FindCodeInCache(&save_doubles_code)) {
+ save_doubles_code = *save_doubles.GetCode();
}
Code* store_buffer_overflow_code;
- if (!stub.FindCodeInCache(&store_buffer_overflow_code, isolate)) {
- store_buffer_overflow_code = *stub.GetCode(isolate);
+ if (!stub.FindCodeInCache(&store_buffer_overflow_code)) {
+ store_buffer_overflow_code = *stub.GetCode();
}
isolate->set_fp_stubs_generated(true);
}
void CEntryStub::GenerateAheadOfTime(Isolate* isolate) {
- CEntryStub stub(1, kDontSaveFPRegs);
- stub.GetCode(isolate);
+ CEntryStub stub(isolate, 1, kDontSaveFPRegs);
+ stub.GetCode();
}
-static void JumpIfOOM(MacroAssembler* masm,
- Register value,
- Register scratch,
- Label* oom_label) {
- STATIC_ASSERT(Failure::OUT_OF_MEMORY_EXCEPTION == 3);
- STATIC_ASSERT(kFailureTag == 3);
- __ andi(scratch, value, 0xf);
- __ Branch(oom_label, eq, scratch, Operand(0xf));
-}
+void CEntryStub::Generate(MacroAssembler* masm) {
+ // Called from JavaScript; parameters are on stack as if calling JS function
+ // s0: number of arguments including receiver
+ // s1: size of arguments excluding receiver
+ // s2: pointer to builtin function
+ // fp: frame pointer (restored after C call)
+ // sp: stack pointer (restored as callee's sp after C call)
+ // cp: current context (C callee-saved)
+ ProfileEntryHookStub::MaybeCallEntryHook(masm);
-void CEntryStub::GenerateCore(MacroAssembler* masm,
- Label* throw_normal_exception,
- Label* throw_termination_exception,
- Label* throw_out_of_memory_exception,
- bool do_gc,
- bool always_allocate) {
- // v0: result parameter for PerformGC, if any
- // s0: number of arguments including receiver (C callee-saved)
- // s1: pointer to the first argument (C callee-saved)
- // s2: pointer to builtin function (C callee-saved)
+ // NOTE: s0-s2 hold the arguments of this function instead of a0-a2.
+ // The reason for this is that these arguments would need to be saved anyway
+ // so it's faster to set them up directly.
+ // See MacroAssembler::PrepareCEntryArgs and PrepareCEntryFunction.
- Isolate* isolate = masm->isolate();
+ // Compute the argv pointer in a callee-saved register.
+ __ Addu(s1, sp, s1);
- if (do_gc) {
- // Move result passed in v0 into a0 to call PerformGC.
- __ mov(a0, v0);
- __ PrepareCallCFunction(2, 0, a1);
- __ li(a1, Operand(ExternalReference::isolate_address(masm->isolate())));
- __ CallCFunction(ExternalReference::perform_gc_function(isolate), 2, 0);
- }
+ // Enter the exit frame that transitions from JavaScript to C++.
+ FrameScope scope(masm, StackFrame::MANUAL);
+ __ EnterExitFrame(save_doubles_);
- ExternalReference scope_depth =
- ExternalReference::heap_always_allocate_scope_depth(isolate);
- if (always_allocate) {
- __ li(a0, Operand(scope_depth));
- __ lw(a1, MemOperand(a0));
- __ Addu(a1, a1, Operand(1));
- __ sw(a1, MemOperand(a0));
- }
+ // s0: number of arguments including receiver (C callee-saved)
+ // s1: pointer to first argument (C callee-saved)
+ // s2: pointer to builtin function (C callee-saved)
// Prepare arguments for C routine.
// a0 = argc
@@ -1815,7 +1617,7 @@ void CEntryStub::GenerateCore(MacroAssembler* masm,
__ AssertStackIsAligned();
- __ li(a2, Operand(ExternalReference::isolate_address(isolate)));
+ __ li(a2, Operand(ExternalReference::isolate_address(isolate())));
// To let the GC traverse the return address of the exit frames, we need to
// know where the return address is. The CEntryStub is unmovable, so
@@ -1850,154 +1652,67 @@ void CEntryStub::GenerateCore(MacroAssembler* masm,
masm->InstructionsGeneratedSince(&find_ra));
}
- if (always_allocate) {
- // It's okay to clobber a2 and a3 here. v0 & v1 contain result.
- __ li(a2, Operand(scope_depth));
- __ lw(a3, MemOperand(a2));
- __ Subu(a3, a3, Operand(1));
- __ sw(a3, MemOperand(a2));
+
+ // Runtime functions should not return 'the hole'. Allowing it to escape may
+ // lead to crashes in the IC code later.
+ if (FLAG_debug_code) {
+ Label okay;
+ __ LoadRoot(t0, Heap::kTheHoleValueRootIndex);
+ __ Branch(&okay, ne, v0, Operand(t0));
+ __ stop("The hole escaped");
+ __ bind(&okay);
}
- // Check for failure result.
- Label failure_returned;
- STATIC_ASSERT(((kFailureTag + 1) & kFailureTagMask) == 0);
- __ addiu(a2, v0, 1);
- __ andi(t0, a2, kFailureTagMask);
- __ Branch(USE_DELAY_SLOT, &failure_returned, eq, t0, Operand(zero_reg));
- // Restore stack (remove arg slots) in branch delay slot.
- __ addiu(sp, sp, kCArgsSlotsSize);
+ // Check result for exception sentinel.
+ Label exception_returned;
+ __ LoadRoot(t0, Heap::kExceptionRootIndex);
+ __ Branch(&exception_returned, eq, t0, Operand(v0));
+ ExternalReference pending_exception_address(
+ Isolate::kPendingExceptionAddress, isolate());
+
+ // Check that there is no pending exception, otherwise we
+ // should have returned the exception sentinel.
+ if (FLAG_debug_code) {
+ Label okay;
+ __ li(a2, Operand(pending_exception_address));
+ __ lw(a2, MemOperand(a2));
+ __ LoadRoot(t0, Heap::kTheHoleValueRootIndex);
+ // Cannot use check here as it attempts to generate call into runtime.
+ __ Branch(&okay, eq, t0, Operand(a2));
+ __ stop("Unexpected pending exception");
+ __ bind(&okay);
+ }
// Exit C frame and return.
// v0:v1: result
// sp: stack pointer
// fp: frame pointer
+ // s0: still holds argc (callee-saved).
__ LeaveExitFrame(save_doubles_, s0, true, EMIT_RETURN);
- // Check if we should retry or throw exception.
- Label retry;
- __ bind(&failure_returned);
- STATIC_ASSERT(Failure::RETRY_AFTER_GC == 0);
- __ andi(t0, v0, ((1 << kFailureTypeTagSize) - 1) << kFailureTagSize);
- __ Branch(&retry, eq, t0, Operand(zero_reg));
-
- // Special handling of out of memory exceptions.
- JumpIfOOM(masm, v0, t0, throw_out_of_memory_exception);
+ // Handling of exception.
+ __ bind(&exception_returned);
// Retrieve the pending exception.
- __ li(t0, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
- isolate)));
- __ lw(v0, MemOperand(t0));
-
- // See if we just retrieved an OOM exception.
- JumpIfOOM(masm, v0, t0, throw_out_of_memory_exception);
+ __ li(a2, Operand(pending_exception_address));
+ __ lw(v0, MemOperand(a2));
// Clear the pending exception.
- __ li(a3, Operand(isolate->factory()->the_hole_value()));
- __ li(t0, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
- isolate)));
- __ sw(a3, MemOperand(t0));
+ __ li(a3, Operand(isolate()->factory()->the_hole_value()));
+ __ sw(a3, MemOperand(a2));
// Special handling of termination exceptions which are uncatchable
// by javascript code.
+ Label throw_termination_exception;
__ LoadRoot(t0, Heap::kTerminationExceptionRootIndex);
- __ Branch(throw_termination_exception, eq, v0, Operand(t0));
+ __ Branch(&throw_termination_exception, eq, v0, Operand(t0));
// Handle normal exception.
- __ jmp(throw_normal_exception);
-
- __ bind(&retry);
- // Last failure (v0) will be moved to (a0) for parameter when retrying.
-}
-
-
-void CEntryStub::Generate(MacroAssembler* masm) {
- // Called from JavaScript; parameters are on stack as if calling JS function
- // s0: number of arguments including receiver
- // s1: size of arguments excluding receiver
- // s2: pointer to builtin function
- // fp: frame pointer (restored after C call)
- // sp: stack pointer (restored as callee's sp after C call)
- // cp: current context (C callee-saved)
-
- ProfileEntryHookStub::MaybeCallEntryHook(masm);
-
- // NOTE: Invocations of builtins may return failure objects
- // instead of a proper result. The builtin entry handles
- // this by performing a garbage collection and retrying the
- // builtin once.
-
- // NOTE: s0-s2 hold the arguments of this function instead of a0-a2.
- // The reason for this is that these arguments would need to be saved anyway
- // so it's faster to set them up directly.
- // See MacroAssembler::PrepareCEntryArgs and PrepareCEntryFunction.
-
- // Compute the argv pointer in a callee-saved register.
- __ Addu(s1, sp, s1);
-
- // Enter the exit frame that transitions from JavaScript to C++.
- FrameScope scope(masm, StackFrame::MANUAL);
- __ EnterExitFrame(save_doubles_);
-
- // s0: number of arguments (C callee-saved)
- // s1: pointer to first argument (C callee-saved)
- // s2: pointer to builtin function (C callee-saved)
-
- Label throw_normal_exception;
- Label throw_termination_exception;
- Label throw_out_of_memory_exception;
-
- // Call into the runtime system.
- GenerateCore(masm,
- &throw_normal_exception,
- &throw_termination_exception,
- &throw_out_of_memory_exception,
- false,
- false);
-
- // Do space-specific GC and retry runtime call.
- GenerateCore(masm,
- &throw_normal_exception,
- &throw_termination_exception,
- &throw_out_of_memory_exception,
- true,
- false);
-
- // Do full GC and retry runtime call one final time.
- Failure* failure = Failure::InternalError();
- __ li(v0, Operand(reinterpret_cast<int32_t>(failure)));
- GenerateCore(masm,
- &throw_normal_exception,
- &throw_termination_exception,
- &throw_out_of_memory_exception,
- true,
- true);
-
- __ bind(&throw_out_of_memory_exception);
- // Set external caught exception to false.
- Isolate* isolate = masm->isolate();
- ExternalReference external_caught(Isolate::kExternalCaughtExceptionAddress,
- isolate);
- __ li(a0, Operand(false, RelocInfo::NONE32));
- __ li(a2, Operand(external_caught));
- __ sw(a0, MemOperand(a2));
-
- // Set pending exception and v0 to out of memory exception.
- Label already_have_failure;
- JumpIfOOM(masm, v0, t0, &already_have_failure);
- Failure* out_of_memory = Failure::OutOfMemoryException(0x1);
- __ li(v0, Operand(reinterpret_cast<int32_t>(out_of_memory)));
- __ bind(&already_have_failure);
- __ li(a2, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
- isolate)));
- __ sw(v0, MemOperand(a2));
- // Fall through to the next label.
+ __ Throw(v0);
__ bind(&throw_termination_exception);
__ ThrowUncatchable(v0);
-
- __ bind(&throw_normal_exception);
- __ Throw(v0);
}
@@ -2089,7 +1804,7 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
__ li(t0, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
isolate)));
__ sw(v0, MemOperand(t0)); // We come back from 'invoke'. result is in v0.
- __ li(v0, Operand(reinterpret_cast<int32_t>(Failure::Exception())));
+ __ LoadRoot(v0, Heap::kExceptionRootIndex);
__ b(&exit); // b exposes branch delay slot.
__ nop(); // Branch delay slot nop.
@@ -2314,7 +2029,7 @@ void InstanceofStub::Generate(MacroAssembler* masm) {
__ Branch(&object_not_null,
ne,
scratch,
- Operand(masm->isolate()->factory()->null_value()));
+ Operand(isolate()->factory()->null_value()));
__ li(v0, Operand(Smi::FromInt(1)));
__ DropAndRet(HasArgsInRegisters() ? 0 : 2);
@@ -2362,7 +2077,7 @@ void FunctionPrototypeStub::Generate(MacroAssembler* masm) {
// -- a1 : receiver
// -----------------------------------
__ Branch(&miss, ne, a0,
- Operand(masm->isolate()->factory()->prototype_string()));
+ Operand(isolate()->factory()->prototype_string()));
receiver = a1;
} else {
ASSERT(kind() == Code::LOAD_IC);
@@ -2382,108 +2097,6 @@ void FunctionPrototypeStub::Generate(MacroAssembler* masm) {
}
-void StringLengthStub::Generate(MacroAssembler* masm) {
- Label miss;
- Register receiver;
- if (kind() == Code::KEYED_LOAD_IC) {
- // ----------- S t a t e -------------
- // -- ra : return address
- // -- a0 : key
- // -- a1 : receiver
- // -----------------------------------
- __ Branch(&miss, ne, a0,
- Operand(masm->isolate()->factory()->length_string()));
- receiver = a1;
- } else {
- ASSERT(kind() == Code::LOAD_IC);
- // ----------- S t a t e -------------
- // -- a2 : name
- // -- ra : return address
- // -- a0 : receiver
- // -- sp[0] : receiver
- // -----------------------------------
- receiver = a0;
- }
-
- StubCompiler::GenerateLoadStringLength(masm, receiver, a3, t0, &miss);
-
- __ bind(&miss);
- StubCompiler::TailCallBuiltin(
- masm, BaseLoadStoreStubCompiler::MissBuiltin(kind()));
-}
-
-
-void StoreArrayLengthStub::Generate(MacroAssembler* masm) {
- // This accepts as a receiver anything JSArray::SetElementsLength accepts
- // (currently anything except for external arrays which means anything with
- // elements of FixedArray type). Value must be a number, but only smis are
- // accepted as the most common case.
- Label miss;
-
- Register receiver;
- Register value;
- if (kind() == Code::KEYED_STORE_IC) {
- // ----------- S t a t e -------------
- // -- ra : return address
- // -- a0 : value
- // -- a1 : key
- // -- a2 : receiver
- // -----------------------------------
- __ Branch(&miss, ne, a1,
- Operand(masm->isolate()->factory()->length_string()));
- receiver = a2;
- value = a0;
- } else {
- ASSERT(kind() == Code::STORE_IC);
- // ----------- S t a t e -------------
- // -- ra : return address
- // -- a0 : value
- // -- a1 : receiver
- // -- a2 : key
- // -----------------------------------
- receiver = a1;
- value = a0;
- }
- Register scratch = a3;
-
- // Check that the receiver isn't a smi.
- __ JumpIfSmi(receiver, &miss);
-
- // Check that the object is a JS array.
- __ GetObjectType(receiver, scratch, scratch);
- __ Branch(&miss, ne, scratch, Operand(JS_ARRAY_TYPE));
-
- // Check that elements are FixedArray.
- // We rely on StoreIC_ArrayLength below to deal with all types of
- // fast elements (including COW).
- __ lw(scratch, FieldMemOperand(receiver, JSArray::kElementsOffset));
- __ GetObjectType(scratch, scratch, scratch);
- __ Branch(&miss, ne, scratch, Operand(FIXED_ARRAY_TYPE));
-
- // Check that the array has fast properties, otherwise the length
- // property might have been redefined.
- __ lw(scratch, FieldMemOperand(receiver, JSArray::kPropertiesOffset));
- __ lw(scratch, FieldMemOperand(scratch, FixedArray::kMapOffset));
- __ LoadRoot(at, Heap::kHashTableMapRootIndex);
- __ Branch(&miss, eq, scratch, Operand(at));
-
- // Check that value is a smi.
- __ JumpIfNotSmi(value, &miss);
-
- // Prepare tail call to StoreIC_ArrayLength.
- __ Push(receiver, value);
-
- ExternalReference ref =
- ExternalReference(IC_Utility(IC::kStoreIC_ArrayLength), masm->isolate());
- __ TailCallExternalReference(ref, 2, 1);
-
- __ bind(&miss);
-
- StubCompiler::TailCallBuiltin(
- masm, BaseLoadStoreStubCompiler::MissBuiltin(kind()));
-}
-
-
Register InstanceofStub::left() { return a0; }
@@ -2543,7 +2156,7 @@ void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
}
-void ArgumentsAccessStub::GenerateNewNonStrictSlow(MacroAssembler* masm) {
+void ArgumentsAccessStub::GenerateNewSloppySlow(MacroAssembler* masm) {
// sp[0] : number of parameters
// sp[4] : receiver displacement
// sp[8] : function
@@ -2565,11 +2178,11 @@ void ArgumentsAccessStub::GenerateNewNonStrictSlow(MacroAssembler* masm) {
__ sw(a3, MemOperand(sp, 1 * kPointerSize));
__ bind(&runtime);
- __ TailCallRuntime(Runtime::kNewArgumentsFast, 3, 1);
+ __ TailCallRuntime(Runtime::kHiddenNewSloppyArguments, 3, 1);
}
-void ArgumentsAccessStub::GenerateNewNonStrictFast(MacroAssembler* masm) {
+void ArgumentsAccessStub::GenerateNewSloppyFast(MacroAssembler* masm) {
// Stack layout:
// sp[0] : number of parameters (tagged)
// sp[4] : address of receiver argument
@@ -2633,7 +2246,7 @@ void ArgumentsAccessStub::GenerateNewNonStrictFast(MacroAssembler* masm) {
__ Addu(t5, t5, Operand(FixedArray::kHeaderSize));
// 3. Arguments object.
- __ Addu(t5, t5, Operand(Heap::kArgumentsObjectSize));
+ __ Addu(t5, t5, Operand(Heap::kSloppyArgumentsObjectSize));
// Do the allocation of all three objects in one go.
__ Allocate(t5, v0, a3, t0, &runtime, TAG_OBJECT);
@@ -2642,7 +2255,7 @@ void ArgumentsAccessStub::GenerateNewNonStrictFast(MacroAssembler* masm) {
// a2 = argument count (tagged)
// Get the arguments boilerplate from the current native context into t0.
const int kNormalOffset =
- Context::SlotOffset(Context::ARGUMENTS_BOILERPLATE_INDEX);
+ Context::SlotOffset(Context::SLOPPY_ARGUMENTS_BOILERPLATE_INDEX);
const int kAliasedOffset =
Context::SlotOffset(Context::ALIASED_ARGUMENTS_BOILERPLATE_INDEX);
@@ -2683,7 +2296,7 @@ void ArgumentsAccessStub::GenerateNewNonStrictFast(MacroAssembler* masm) {
// Set up the elements pointer in the allocated arguments object.
// If we allocated a parameter map, t0 will point there, otherwise
// it will point to the backing store.
- __ Addu(t0, v0, Operand(Heap::kArgumentsObjectSize));
+ __ Addu(t0, v0, Operand(Heap::kSloppyArgumentsObjectSize));
__ sw(t0, FieldMemOperand(v0, JSObject::kElementsOffset));
// v0 = address of new object (tagged)
@@ -2701,7 +2314,7 @@ void ArgumentsAccessStub::GenerateNewNonStrictFast(MacroAssembler* masm) {
__ Branch(&skip_parameter_map, eq, a1, Operand(Smi::FromInt(0)));
- __ LoadRoot(t2, Heap::kNonStrictArgumentsElementsMapRootIndex);
+ __ LoadRoot(t2, Heap::kSloppyArgumentsElementsMapRootIndex);
__ sw(t2, FieldMemOperand(t0, FixedArray::kMapOffset));
__ Addu(t2, a1, Operand(Smi::FromInt(2)));
__ sw(t2, FieldMemOperand(t0, FixedArray::kLengthOffset));
@@ -2784,7 +2397,7 @@ void ArgumentsAccessStub::GenerateNewNonStrictFast(MacroAssembler* masm) {
// a2 = argument count (tagged)
__ bind(&runtime);
__ sw(a2, MemOperand(sp, 0 * kPointerSize)); // Patch argument count.
- __ TailCallRuntime(Runtime::kNewArgumentsFast, 3, 1);
+ __ TailCallRuntime(Runtime::kHiddenNewSloppyArguments, 3, 1);
}
@@ -2824,7 +2437,7 @@ void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
__ Addu(a1, a1, Operand(FixedArray::kHeaderSize / kPointerSize));
__ bind(&add_arguments_object);
- __ Addu(a1, a1, Operand(Heap::kArgumentsObjectSizeStrict / kPointerSize));
+ __ Addu(a1, a1, Operand(Heap::kStrictArgumentsObjectSize / kPointerSize));
// Do the allocation of both objects in one go.
__ Allocate(a1, v0, a2, a3, &runtime,
@@ -2834,7 +2447,7 @@ void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
__ lw(t0, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
__ lw(t0, FieldMemOperand(t0, GlobalObject::kNativeContextOffset));
__ lw(t0, MemOperand(t0, Context::SlotOffset(
- Context::STRICT_MODE_ARGUMENTS_BOILERPLATE_INDEX)));
+ Context::STRICT_ARGUMENTS_BOILERPLATE_INDEX)));
// Copy the JS object part.
__ CopyFields(v0, t0, a3.bit(), JSObject::kHeaderSize / kPointerSize);
@@ -2853,7 +2466,7 @@ void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
// Set up the elements pointer in the allocated arguments object and
// initialize the header in the elements fixed array.
- __ Addu(t0, v0, Operand(Heap::kArgumentsObjectSizeStrict));
+ __ Addu(t0, v0, Operand(Heap::kStrictArgumentsObjectSize));
__ sw(t0, FieldMemOperand(v0, JSObject::kElementsOffset));
__ LoadRoot(a3, Heap::kFixedArrayMapRootIndex);
__ sw(a3, FieldMemOperand(t0, FixedArray::kMapOffset));
@@ -2882,7 +2495,7 @@ void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
// Do the runtime call to allocate the arguments object.
__ bind(&runtime);
- __ TailCallRuntime(Runtime::kNewStrictArgumentsFast, 3, 1);
+ __ TailCallRuntime(Runtime::kHiddenNewStrictArguments, 3, 1);
}
@@ -2891,7 +2504,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// time or if regexp entry in generated code is turned off runtime switch or
// at compilation.
#ifdef V8_INTERPRETED_REGEXP
- __ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
+ __ TailCallRuntime(Runtime::kHiddenRegExpExec, 4, 1);
#else // V8_INTERPRETED_REGEXP
// Stack frame on entry.
@@ -2905,8 +2518,6 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
const int kSubjectOffset = 2 * kPointerSize;
const int kJSRegExpOffset = 3 * kPointerSize;
- Isolate* isolate = masm->isolate();
-
Label runtime;
// Allocation of registers for this function. These are in callee save
// registers and will be preserved by the call to the native RegExp code, as
@@ -2921,9 +2532,9 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// Ensure that a RegExp stack is allocated.
ExternalReference address_of_regexp_stack_memory_address =
ExternalReference::address_of_regexp_stack_memory_address(
- isolate);
+ isolate());
ExternalReference address_of_regexp_stack_memory_size =
- ExternalReference::address_of_regexp_stack_memory_size(isolate);
+ ExternalReference::address_of_regexp_stack_memory_size(isolate());
__ li(a0, Operand(address_of_regexp_stack_memory_size));
__ lw(a0, MemOperand(a0, 0));
__ Branch(&runtime, eq, a0, Operand(zero_reg));
@@ -3031,8 +2642,8 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
STATIC_ASSERT(kSeqStringTag == 0);
__ And(at, a0, Operand(kStringRepresentationMask));
// The underlying external string is never a short external string.
- STATIC_CHECK(ExternalString::kMaxShortLength < ConsString::kMinLength);
- STATIC_CHECK(ExternalString::kMaxShortLength < SlicedString::kMinLength);
+ STATIC_ASSERT(ExternalString::kMaxShortLength < ConsString::kMinLength);
+ STATIC_ASSERT(ExternalString::kMaxShortLength < SlicedString::kMinLength);
__ Branch(&external_string, ne, at, Operand(zero_reg)); // Go to (7).
// (5) Sequential string. Load regexp code according to encoding.
@@ -3070,7 +2681,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// subject: Subject string
// regexp_data: RegExp data (FixedArray)
// All checks done. Now push arguments for native regexp code.
- __ IncrementCounter(isolate->counters()->regexp_entry_native(),
+ __ IncrementCounter(isolate()->counters()->regexp_entry_native(),
1, a0, a2);
// Isolates: note we add an additional parameter here (isolate pointer).
@@ -3094,7 +2705,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// Argument 9: Pass current isolate address.
// CFunctionArgumentOperand handles MIPS stack argument slots.
- __ li(a0, Operand(ExternalReference::isolate_address(isolate)));
+ __ li(a0, Operand(ExternalReference::isolate_address(isolate())));
__ sw(a0, MemOperand(sp, 5 * kPointerSize));
// Argument 8: Indicate that this is a direct call from JavaScript.
@@ -3116,7 +2727,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// Argument 5: static offsets vector buffer.
__ li(a0, Operand(
- ExternalReference::address_of_static_offsets_vector(isolate)));
+ ExternalReference::address_of_static_offsets_vector(isolate())));
__ sw(a0, MemOperand(sp, 1 * kPointerSize));
// For arguments 4 and 3 get string length, calculate start of string data
@@ -3149,7 +2760,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// Locate the code entry and call it.
__ Addu(t9, t9, Operand(Code::kHeaderSize - kHeapObjectTag));
- DirectCEntryStub stub;
+ DirectCEntryStub stub(isolate());
stub.GenerateCall(masm, t9);
__ LeaveExitFrame(false, no_reg, true);
@@ -3171,9 +2782,9 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// stack overflow (on the backtrack stack) was detected in RegExp code but
// haven't created the exception yet. Handle that in the runtime system.
// TODO(592): Rerunning the RegExp to get the stack overflow exception.
- __ li(a1, Operand(isolate->factory()->the_hole_value()));
+ __ li(a1, Operand(isolate()->factory()->the_hole_value()));
__ li(a2, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
- isolate)));
+ isolate())));
__ lw(v0, MemOperand(a2, 0));
__ Branch(&runtime, eq, v0, Operand(a1));
@@ -3191,7 +2802,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
__ bind(&failure);
// For failure and exception return null.
- __ li(v0, Operand(isolate->factory()->null_value()));
+ __ li(v0, Operand(isolate()->factory()->null_value()));
__ DropAndRet(4);
// Process the result from the native regexp code.
@@ -3252,7 +2863,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// Get the static offsets vector filled by the native regexp code.
ExternalReference address_of_static_offsets_vector =
- ExternalReference::address_of_static_offsets_vector(isolate);
+ ExternalReference::address_of_static_offsets_vector(isolate());
__ li(a2, Operand(address_of_static_offsets_vector));
// a1: number of capture registers
@@ -3283,7 +2894,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// Do the runtime call to execute the regexp.
__ bind(&runtime);
- __ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
+ __ TailCallRuntime(Runtime::kHiddenRegExpExec, 4, 1);
// Deferred code for string handling.
// (6) Not a long external string? If yes, go to (8).
@@ -3329,287 +2940,259 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
}
-void RegExpConstructResultStub::Generate(MacroAssembler* masm) {
- const int kMaxInlineLength = 100;
- Label slowcase;
- Label done;
- __ lw(a1, MemOperand(sp, kPointerSize * 2));
- STATIC_ASSERT(kSmiTag == 0);
- STATIC_ASSERT(kSmiTagSize == 1);
- __ JumpIfNotSmi(a1, &slowcase);
- __ Branch(&slowcase, hi, a1, Operand(Smi::FromInt(kMaxInlineLength)));
- // Smi-tagging is equivalent to multiplying by 2.
- // Allocate RegExpResult followed by FixedArray with size in ebx.
- // JSArray: [Map][empty properties][Elements][Length-smi][index][input]
- // Elements: [Map][Length][..elements..]
- // Size of JSArray with two in-object properties and the header of a
- // FixedArray.
- int objects_size =
- (JSRegExpResult::kSize + FixedArray::kHeaderSize) / kPointerSize;
- __ srl(t1, a1, kSmiTagSize + kSmiShiftSize);
- __ Addu(a2, t1, Operand(objects_size));
- __ Allocate(
- a2, // In: Size, in words.
- v0, // Out: Start of allocation (tagged).
- a3, // Scratch register.
- t0, // Scratch register.
- &slowcase,
- static_cast<AllocationFlags>(TAG_OBJECT | SIZE_IN_WORDS));
- // v0: Start of allocated area, object-tagged.
- // a1: Number of elements in array, as smi.
- // t1: Number of elements, untagged.
-
- // Set JSArray map to global.regexp_result_map().
- // Set empty properties FixedArray.
- // Set elements to point to FixedArray allocated right after the JSArray.
- // Interleave operations for better latency.
- __ lw(a2, ContextOperand(cp, Context::GLOBAL_OBJECT_INDEX));
- __ Addu(a3, v0, Operand(JSRegExpResult::kSize));
- __ li(t0, Operand(masm->isolate()->factory()->empty_fixed_array()));
- __ lw(a2, FieldMemOperand(a2, GlobalObject::kNativeContextOffset));
- __ sw(a3, FieldMemOperand(v0, JSObject::kElementsOffset));
- __ lw(a2, ContextOperand(a2, Context::REGEXP_RESULT_MAP_INDEX));
- __ sw(t0, FieldMemOperand(v0, JSObject::kPropertiesOffset));
- __ sw(a2, FieldMemOperand(v0, HeapObject::kMapOffset));
-
- // Set input, index and length fields from arguments.
- __ lw(a1, MemOperand(sp, kPointerSize * 0));
- __ lw(a2, MemOperand(sp, kPointerSize * 1));
- __ lw(t2, MemOperand(sp, kPointerSize * 2));
- __ sw(a1, FieldMemOperand(v0, JSRegExpResult::kInputOffset));
- __ sw(a2, FieldMemOperand(v0, JSRegExpResult::kIndexOffset));
- __ sw(t2, FieldMemOperand(v0, JSArray::kLengthOffset));
-
- // Fill out the elements FixedArray.
- // v0: JSArray, tagged.
- // a3: FixedArray, tagged.
- // t1: Number of elements in array, untagged.
-
- // Set map.
- __ li(a2, Operand(masm->isolate()->factory()->fixed_array_map()));
- __ sw(a2, FieldMemOperand(a3, HeapObject::kMapOffset));
- // Set FixedArray length.
- __ sll(t2, t1, kSmiTagSize);
- __ sw(t2, FieldMemOperand(a3, FixedArray::kLengthOffset));
- // Fill contents of fixed-array with undefined.
- __ LoadRoot(a2, Heap::kUndefinedValueRootIndex);
- __ Addu(a3, a3, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
- // Fill fixed array elements with undefined.
- // v0: JSArray, tagged.
- // a2: undefined.
- // a3: Start of elements in FixedArray.
- // t1: Number of elements to fill.
- Label loop;
- __ sll(t1, t1, kPointerSizeLog2); // Convert num elements to num bytes.
- __ addu(t1, t1, a3); // Point past last element to store.
- __ bind(&loop);
- __ Branch(&done, ge, a3, Operand(t1)); // Break when a3 past end of elem.
- __ sw(a2, MemOperand(a3));
- __ Branch(&loop, USE_DELAY_SLOT);
- __ addiu(a3, a3, kPointerSize); // In branch delay slot.
-
- __ bind(&done);
- __ DropAndRet(3);
-
- __ bind(&slowcase);
- __ TailCallRuntime(Runtime::kRegExpConstructResult, 3, 1);
-}
-
-
static void GenerateRecordCallTarget(MacroAssembler* masm) {
- // Cache the called function in a global property cell. Cache states
+ // Cache the called function in a feedback vector slot. Cache states
// are uninitialized, monomorphic (indicated by a JSFunction), and
// megamorphic.
// a0 : number of arguments to the construct function
// a1 : the function to call
- // a2 : cache cell for call target
+ // a2 : Feedback vector
+ // a3 : slot in feedback vector (Smi)
Label initialize, done, miss, megamorphic, not_array_function;
- ASSERT_EQ(*TypeFeedbackCells::MegamorphicSentinel(masm->isolate()),
- masm->isolate()->heap()->undefined_value());
- ASSERT_EQ(*TypeFeedbackCells::UninitializedSentinel(masm->isolate()),
- masm->isolate()->heap()->the_hole_value());
+ ASSERT_EQ(*TypeFeedbackInfo::MegamorphicSentinel(masm->isolate()),
+ masm->isolate()->heap()->megamorphic_symbol());
+ ASSERT_EQ(*TypeFeedbackInfo::UninitializedSentinel(masm->isolate()),
+ masm->isolate()->heap()->uninitialized_symbol());
- // Load the cache state into a3.
- __ lw(a3, FieldMemOperand(a2, Cell::kValueOffset));
+ // Load the cache state into t0.
+ __ sll(t0, a3, kPointerSizeLog2 - kSmiTagSize);
+ __ Addu(t0, a2, Operand(t0));
+ __ lw(t0, FieldMemOperand(t0, FixedArray::kHeaderSize));
// A monomorphic cache hit or an already megamorphic state: invoke the
// function without changing the state.
- __ Branch(&done, eq, a3, Operand(a1));
-
- // If we came here, we need to see if we are the array function.
- // If we didn't have a matching function, and we didn't find the megamorph
- // sentinel, then we have in the cell either some other function or an
- // AllocationSite. Do a map check on the object in a3.
- __ lw(t1, FieldMemOperand(a3, 0));
- __ LoadRoot(at, Heap::kAllocationSiteMapRootIndex);
- __ Branch(&miss, ne, t1, Operand(at));
-
- // Make sure the function is the Array() function
- __ LoadArrayFunction(a3);
- __ Branch(&megamorphic, ne, a1, Operand(a3));
- __ jmp(&done);
+ __ Branch(&done, eq, t0, Operand(a1));
+
+ if (!FLAG_pretenuring_call_new) {
+ // If we came here, we need to see if we are the array function.
+ // If we didn't have a matching function, and we didn't find the megamorph
+ // sentinel, then we have in the slot either some other function or an
+ // AllocationSite. Do a map check on the object in a3.
+ __ lw(t1, FieldMemOperand(t0, 0));
+ __ LoadRoot(at, Heap::kAllocationSiteMapRootIndex);
+ __ Branch(&miss, ne, t1, Operand(at));
+
+ // Make sure the function is the Array() function
+ __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, t0);
+ __ Branch(&megamorphic, ne, a1, Operand(t0));
+ __ jmp(&done);
+ }
__ bind(&miss);
// A monomorphic miss (i.e, here the cache is not uninitialized) goes
// megamorphic.
- __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
- __ Branch(&initialize, eq, a3, Operand(at));
+ __ LoadRoot(at, Heap::kUninitializedSymbolRootIndex);
+ __ Branch(&initialize, eq, t0, Operand(at));
// MegamorphicSentinel is an immortal immovable object (undefined) so no
// write-barrier is needed.
__ bind(&megamorphic);
- __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
- __ sw(at, FieldMemOperand(a2, Cell::kValueOffset));
+ __ sll(t0, a3, kPointerSizeLog2 - kSmiTagSize);
+ __ Addu(t0, a2, Operand(t0));
+ __ LoadRoot(at, Heap::kMegamorphicSymbolRootIndex);
+ __ sw(at, FieldMemOperand(t0, FixedArray::kHeaderSize));
__ jmp(&done);
- // An uninitialized cache is patched with the function or sentinel to
- // indicate the ElementsKind if function is the Array constructor.
+ // An uninitialized cache is patched with the function.
__ bind(&initialize);
- // Make sure the function is the Array() function
- __ LoadArrayFunction(a3);
- __ Branch(&not_array_function, ne, a1, Operand(a3));
+ if (!FLAG_pretenuring_call_new) {
+ // Make sure the function is the Array() function.
+ __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, t0);
+ __ Branch(&not_array_function, ne, a1, Operand(t0));
+
+ // The target function is the Array constructor,
+ // Create an AllocationSite if we don't already have it, store it in the
+ // slot.
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ const RegList kSavedRegs =
+ 1 << 4 | // a0
+ 1 << 5 | // a1
+ 1 << 6 | // a2
+ 1 << 7; // a3
- // The target function is the Array constructor.
- // Create an AllocationSite if we don't already have it, store it in the cell.
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- const RegList kSavedRegs =
- 1 << 4 | // a0
- 1 << 5 | // a1
- 1 << 6; // a2
+ // Arguments register must be smi-tagged to call out.
+ __ SmiTag(a0);
+ __ MultiPush(kSavedRegs);
- // Arguments register must be smi-tagged to call out.
- __ SmiTag(a0);
- __ MultiPush(kSavedRegs);
+ CreateAllocationSiteStub create_stub(masm->isolate());
+ __ CallStub(&create_stub);
- CreateAllocationSiteStub create_stub;
- __ CallStub(&create_stub);
+ __ MultiPop(kSavedRegs);
+ __ SmiUntag(a0);
+ }
+ __ Branch(&done);
- __ MultiPop(kSavedRegs);
- __ SmiUntag(a0);
+ __ bind(&not_array_function);
}
- __ Branch(&done);
- __ bind(&not_array_function);
- __ sw(a1, FieldMemOperand(a2, Cell::kValueOffset));
- // No need for a write barrier here - cells are rescanned.
+ __ sll(t0, a3, kPointerSizeLog2 - kSmiTagSize);
+ __ Addu(t0, a2, Operand(t0));
+ __ Addu(t0, t0, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+ __ sw(a1, MemOperand(t0, 0));
+
+ __ Push(t0, a2, a1);
+ __ RecordWrite(a2, t0, a1, kRAHasNotBeenSaved, kDontSaveFPRegs,
+ EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
+ __ Pop(t0, a2, a1);
__ bind(&done);
}
-void CallFunctionStub::Generate(MacroAssembler* masm) {
- // a1 : the function to call
- // a2 : cache cell for call target
- Label slow, non_function;
-
- // The receiver might implicitly be the global object. This is
- // indicated by passing the hole as the receiver to the call
- // function stub.
- if (ReceiverMightBeImplicit()) {
- Label call;
- // Get the receiver from the stack.
- // function, receiver [, arguments]
- __ lw(t0, MemOperand(sp, argc_ * kPointerSize));
- // Call as function is indicated with the hole.
- __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
- __ Branch(&call, ne, t0, Operand(at));
- // Patch the receiver on the stack with the global receiver object.
- __ lw(a3,
- MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
- __ lw(a3, FieldMemOperand(a3, GlobalObject::kGlobalReceiverOffset));
- __ sw(a3, MemOperand(sp, argc_ * kPointerSize));
- __ bind(&call);
- }
-
- // Check that the function is really a JavaScript function.
- // a1: pushed function (to be verified)
- __ JumpIfSmi(a1, &non_function);
- // Get the map of the function object.
- __ GetObjectType(a1, a3, a3);
- __ Branch(&slow, ne, a3, Operand(JS_FUNCTION_TYPE));
+static void EmitContinueIfStrictOrNative(MacroAssembler* masm, Label* cont) {
+ __ lw(a3, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
+ __ lw(t0, FieldMemOperand(a3, SharedFunctionInfo::kCompilerHintsOffset));
- if (RecordCallTarget()) {
- GenerateRecordCallTarget(masm);
- }
+ // Do not transform the receiver for strict mode functions.
+ int32_t strict_mode_function_mask =
+ 1 << (SharedFunctionInfo::kStrictModeFunction + kSmiTagSize);
+ // Do not transform the receiver for native (Compilerhints already in a3).
+ int32_t native_mask = 1 << (SharedFunctionInfo::kNative + kSmiTagSize);
+ __ And(at, t0, Operand(strict_mode_function_mask | native_mask));
+ __ Branch(cont, ne, at, Operand(zero_reg));
+}
- // Fast-case: Invoke the function now.
- // a1: pushed function
- ParameterCount actual(argc_);
-
- if (ReceiverMightBeImplicit()) {
- Label call_as_function;
- __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
- __ Branch(&call_as_function, eq, t0, Operand(at));
- __ InvokeFunction(a1,
- actual,
- JUMP_FUNCTION,
- NullCallWrapper(),
- CALL_AS_METHOD);
- __ bind(&call_as_function);
- }
- __ InvokeFunction(a1,
- actual,
- JUMP_FUNCTION,
- NullCallWrapper(),
- CALL_AS_FUNCTION);
- // Slow-case: Non-function called.
- __ bind(&slow);
- if (RecordCallTarget()) {
- // If there is a call target cache, mark it megamorphic in the
- // non-function case. MegamorphicSentinel is an immortal immovable
- // object (undefined) so no write barrier is needed.
- ASSERT_EQ(*TypeFeedbackCells::MegamorphicSentinel(masm->isolate()),
- masm->isolate()->heap()->undefined_value());
- __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
- __ sw(at, FieldMemOperand(a2, Cell::kValueOffset));
- }
+static void EmitSlowCase(MacroAssembler* masm,
+ int argc,
+ Label* non_function) {
// Check for function proxy.
- __ Branch(&non_function, ne, a3, Operand(JS_FUNCTION_PROXY_TYPE));
- __ push(a1); // Put proxy as additional argument.
- __ li(a0, Operand(argc_ + 1, RelocInfo::NONE32));
- __ li(a2, Operand(0, RelocInfo::NONE32));
- __ GetBuiltinEntry(a3, Builtins::CALL_FUNCTION_PROXY);
- __ SetCallKind(t1, CALL_AS_METHOD);
+ __ Branch(non_function, ne, t0, Operand(JS_FUNCTION_PROXY_TYPE));
+ __ push(a1); // put proxy as additional argument
+ __ li(a0, Operand(argc + 1, RelocInfo::NONE32));
+ __ mov(a2, zero_reg);
+ __ GetBuiltinFunction(a1, Builtins::CALL_FUNCTION_PROXY);
{
Handle<Code> adaptor =
- masm->isolate()->builtins()->ArgumentsAdaptorTrampoline();
+ masm->isolate()->builtins()->ArgumentsAdaptorTrampoline();
__ Jump(adaptor, RelocInfo::CODE_TARGET);
}
// CALL_NON_FUNCTION expects the non-function callee as receiver (instead
// of the original receiver from the call site).
- __ bind(&non_function);
- __ sw(a1, MemOperand(sp, argc_ * kPointerSize));
- __ li(a0, Operand(argc_)); // Set up the number of arguments.
+ __ bind(non_function);
+ __ sw(a1, MemOperand(sp, argc * kPointerSize));
+ __ li(a0, Operand(argc)); // Set up the number of arguments.
__ mov(a2, zero_reg);
- __ GetBuiltinEntry(a3, Builtins::CALL_NON_FUNCTION);
- __ SetCallKind(t1, CALL_AS_METHOD);
+ __ GetBuiltinFunction(a1, Builtins::CALL_NON_FUNCTION);
__ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
RelocInfo::CODE_TARGET);
}
+static void EmitWrapCase(MacroAssembler* masm, int argc, Label* cont) {
+ // Wrap the receiver and patch it back onto the stack.
+ { FrameScope frame_scope(masm, StackFrame::INTERNAL);
+ __ Push(a1, a3);
+ __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
+ __ pop(a1);
+ }
+ __ Branch(USE_DELAY_SLOT, cont);
+ __ sw(v0, MemOperand(sp, argc * kPointerSize));
+}
+
+
+static void CallFunctionNoFeedback(MacroAssembler* masm,
+ int argc, bool needs_checks,
+ bool call_as_method) {
+ // a1 : the function to call
+ Label slow, non_function, wrap, cont;
+
+ if (needs_checks) {
+ // Check that the function is really a JavaScript function.
+ // a1: pushed function (to be verified)
+ __ JumpIfSmi(a1, &non_function);
+
+ // Goto slow case if we do not have a function.
+ __ GetObjectType(a1, t0, t0);
+ __ Branch(&slow, ne, t0, Operand(JS_FUNCTION_TYPE));
+ }
+
+ // Fast-case: Invoke the function now.
+ // a1: pushed function
+ ParameterCount actual(argc);
+
+ if (call_as_method) {
+ if (needs_checks) {
+ EmitContinueIfStrictOrNative(masm, &cont);
+ }
+
+ // Compute the receiver in sloppy mode.
+ __ lw(a3, MemOperand(sp, argc * kPointerSize));
+
+ if (needs_checks) {
+ __ JumpIfSmi(a3, &wrap);
+ __ GetObjectType(a3, t0, t0);
+ __ Branch(&wrap, lt, t0, Operand(FIRST_SPEC_OBJECT_TYPE));
+ } else {
+ __ jmp(&wrap);
+ }
+
+ __ bind(&cont);
+ }
+
+ __ InvokeFunction(a1, actual, JUMP_FUNCTION, NullCallWrapper());
+
+ if (needs_checks) {
+ // Slow-case: Non-function called.
+ __ bind(&slow);
+ EmitSlowCase(masm, argc, &non_function);
+ }
+
+ if (call_as_method) {
+ __ bind(&wrap);
+ // Wrap the receiver and patch it back onto the stack.
+ EmitWrapCase(masm, argc, &cont);
+ }
+}
+
+
+void CallFunctionStub::Generate(MacroAssembler* masm) {
+ CallFunctionNoFeedback(masm, argc_, NeedsChecks(), CallAsMethod());
+}
+
+
void CallConstructStub::Generate(MacroAssembler* masm) {
// a0 : number of arguments
// a1 : the function to call
- // a2 : cache cell for call target
+ // a2 : feedback vector
+ // a3 : (only if a2 is not undefined) slot in feedback vector (Smi)
Label slow, non_function_call;
// Check that the function is not a smi.
__ JumpIfSmi(a1, &non_function_call);
// Check that the function is a JSFunction.
- __ GetObjectType(a1, a3, a3);
- __ Branch(&slow, ne, a3, Operand(JS_FUNCTION_TYPE));
+ __ GetObjectType(a1, t0, t0);
+ __ Branch(&slow, ne, t0, Operand(JS_FUNCTION_TYPE));
if (RecordCallTarget()) {
GenerateRecordCallTarget(masm);
+
+ __ sll(at, a3, kPointerSizeLog2 - kSmiTagSize);
+ __ Addu(t1, a2, at);
+ if (FLAG_pretenuring_call_new) {
+ // Put the AllocationSite from the feedback vector into a2.
+ // By adding kPointerSize we encode that we know the AllocationSite
+ // entry is at the feedback vector slot given by a3 + 1.
+ __ lw(a2, FieldMemOperand(t1, FixedArray::kHeaderSize + kPointerSize));
+ } else {
+ Label feedback_register_initialized;
+ // Put the AllocationSite from the feedback vector into a2, or undefined.
+ __ lw(a2, FieldMemOperand(t1, FixedArray::kHeaderSize));
+ __ lw(t1, FieldMemOperand(a2, AllocationSite::kMapOffset));
+ __ LoadRoot(at, Heap::kAllocationSiteMapRootIndex);
+ __ Branch(&feedback_register_initialized, eq, t1, Operand(at));
+ __ LoadRoot(a2, Heap::kUndefinedValueRootIndex);
+ __ bind(&feedback_register_initialized);
+ }
+
+ __ AssertUndefinedOrAllocationSite(a2, t1);
}
// Jump to the function-specific construct stub.
- Register jmp_reg = a3;
+ Register jmp_reg = t0;
__ lw(jmp_reg, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
__ lw(jmp_reg, FieldMemOperand(jmp_reg,
SharedFunctionInfo::kConstructStubOffset));
@@ -3618,21 +3201,157 @@ void CallConstructStub::Generate(MacroAssembler* masm) {
// a0: number of arguments
// a1: called object
- // a3: object type
+ // t0: object type
Label do_call;
__ bind(&slow);
- __ Branch(&non_function_call, ne, a3, Operand(JS_FUNCTION_PROXY_TYPE));
- __ GetBuiltinEntry(a3, Builtins::CALL_FUNCTION_PROXY_AS_CONSTRUCTOR);
+ __ Branch(&non_function_call, ne, t0, Operand(JS_FUNCTION_PROXY_TYPE));
+ __ GetBuiltinFunction(a1, Builtins::CALL_FUNCTION_PROXY_AS_CONSTRUCTOR);
__ jmp(&do_call);
__ bind(&non_function_call);
- __ GetBuiltinEntry(a3, Builtins::CALL_NON_FUNCTION_AS_CONSTRUCTOR);
+ __ GetBuiltinFunction(a1, Builtins::CALL_NON_FUNCTION_AS_CONSTRUCTOR);
__ bind(&do_call);
// Set expected number of arguments to zero (not changing r0).
__ li(a2, Operand(0, RelocInfo::NONE32));
- __ SetCallKind(t1, CALL_AS_METHOD);
__ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
- RelocInfo::CODE_TARGET);
+ RelocInfo::CODE_TARGET);
+}
+
+
+static void EmitLoadTypeFeedbackVector(MacroAssembler* masm, Register vector) {
+ __ lw(vector, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+ __ lw(vector, FieldMemOperand(vector,
+ JSFunction::kSharedFunctionInfoOffset));
+ __ lw(vector, FieldMemOperand(vector,
+ SharedFunctionInfo::kFeedbackVectorOffset));
+}
+
+
+void CallIC_ArrayStub::Generate(MacroAssembler* masm) {
+ // a1 - function
+ // a3 - slot id
+ Label miss;
+
+ EmitLoadTypeFeedbackVector(masm, a2);
+
+ __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, at);
+ __ Branch(&miss, ne, a1, Operand(at));
+
+ __ li(a0, Operand(arg_count()));
+ __ sll(at, a3, kPointerSizeLog2 - kSmiTagSize);
+ __ Addu(at, a2, Operand(at));
+ __ lw(a2, FieldMemOperand(at, FixedArray::kHeaderSize));
+ // Verify that a2 contains an AllocationSite
+ __ AssertUndefinedOrAllocationSite(a2, at);
+ ArrayConstructorStub stub(masm->isolate(), arg_count());
+ __ TailCallStub(&stub);
+
+ __ bind(&miss);
+ GenerateMiss(masm, IC::kCallIC_Customization_Miss);
+
+ // The slow case, we need this no matter what to complete a call after a miss.
+ CallFunctionNoFeedback(masm,
+ arg_count(),
+ true,
+ CallAsMethod());
+
+ // Unreachable.
+ __ stop("Unexpected code address");
+}
+
+
+void CallICStub::Generate(MacroAssembler* masm) {
+ // r1 - function
+ // r3 - slot id (Smi)
+ Label extra_checks_or_miss, slow_start;
+ Label slow, non_function, wrap, cont;
+ Label have_js_function;
+ int argc = state_.arg_count();
+ ParameterCount actual(argc);
+
+ EmitLoadTypeFeedbackVector(masm, a2);
+
+ // The checks. First, does r1 match the recorded monomorphic target?
+ __ sll(t0, a3, kPointerSizeLog2 - kSmiTagSize);
+ __ Addu(t0, a2, Operand(t0));
+ __ lw(t0, FieldMemOperand(t0, FixedArray::kHeaderSize));
+ __ Branch(&extra_checks_or_miss, ne, a1, Operand(t0));
+
+ __ bind(&have_js_function);
+ if (state_.CallAsMethod()) {
+ EmitContinueIfStrictOrNative(masm, &cont);
+ // Compute the receiver in sloppy mode.
+ __ lw(a3, MemOperand(sp, argc * kPointerSize));
+
+ __ JumpIfSmi(a3, &wrap);
+ __ GetObjectType(a3, t0, t0);
+ __ Branch(&wrap, lt, t0, Operand(FIRST_SPEC_OBJECT_TYPE));
+
+ __ bind(&cont);
+ }
+
+ __ InvokeFunction(a1, actual, JUMP_FUNCTION, NullCallWrapper());
+
+ __ bind(&slow);
+ EmitSlowCase(masm, argc, &non_function);
+
+ if (state_.CallAsMethod()) {
+ __ bind(&wrap);
+ EmitWrapCase(masm, argc, &cont);
+ }
+
+ __ bind(&extra_checks_or_miss);
+ Label miss;
+
+ __ LoadRoot(at, Heap::kMegamorphicSymbolRootIndex);
+ __ Branch(&slow_start, eq, t0, Operand(at));
+ __ LoadRoot(at, Heap::kUninitializedSymbolRootIndex);
+ __ Branch(&miss, eq, t0, Operand(at));
+
+ if (!FLAG_trace_ic) {
+ // We are going megamorphic, and we don't want to visit the runtime.
+ __ sll(t0, a3, kPointerSizeLog2 - kSmiTagSize);
+ __ Addu(t0, a2, Operand(t0));
+ __ LoadRoot(at, Heap::kMegamorphicSymbolRootIndex);
+ __ sw(at, FieldMemOperand(t0, FixedArray::kHeaderSize));
+ __ Branch(&slow_start);
+ }
+
+ // We are here because tracing is on or we are going monomorphic.
+ __ bind(&miss);
+ GenerateMiss(masm, IC::kCallIC_Miss);
+
+ // the slow case
+ __ bind(&slow_start);
+ // Check that the function is really a JavaScript function.
+ // r1: pushed function (to be verified)
+ __ JumpIfSmi(a1, &non_function);
+
+ // Goto slow case if we do not have a function.
+ __ GetObjectType(a1, t0, t0);
+ __ Branch(&slow, ne, t0, Operand(JS_FUNCTION_TYPE));
+ __ Branch(&have_js_function);
+}
+
+
+void CallICStub::GenerateMiss(MacroAssembler* masm, IC::UtilityId id) {
+ // Get the receiver of the function from the stack; 1 ~ return address.
+ __ lw(t0, MemOperand(sp, (state_.arg_count() + 1) * kPointerSize));
+
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+
+ // Push the receiver and the function and feedback info.
+ __ Push(t0, a1, a2, a3);
+
+ // Call the entry.
+ ExternalReference miss = ExternalReference(IC_Utility(id),
+ masm->isolate());
+ __ CallExternalReference(miss, 4);
+
+ // Move result to a1 and exit the internal frame.
+ __ mov(a1, v0);
+ }
}
@@ -3700,7 +3419,7 @@ void StringCharCodeAtGenerator::GenerateSlow(
} else {
ASSERT(index_flags_ == STRING_INDEX_IS_ARRAY_INDEX);
// NumberToSmi discards numbers that are not exact integers.
- __ CallRuntime(Runtime::kNumberToSmi, 1);
+ __ CallRuntime(Runtime::kHiddenNumberToSmi, 1);
}
// Save the conversion result before the pop instructions below
@@ -3724,7 +3443,7 @@ void StringCharCodeAtGenerator::GenerateSlow(
call_helper.BeforeCall(masm);
__ sll(index_, index_, kSmiTagSize);
__ Push(object_, index_);
- __ CallRuntime(Runtime::kStringCharCodeAt, 2);
+ __ CallRuntime(Runtime::kHiddenStringCharCodeAt, 2);
__ Move(result_, v0);
@@ -3783,290 +3502,53 @@ void StringCharFromCodeGenerator::GenerateSlow(
}
-void StringHelper::GenerateCopyCharacters(MacroAssembler* masm,
- Register dest,
- Register src,
- Register count,
- Register scratch,
- bool ascii) {
- Label loop;
- Label done;
- // This loop just copies one character at a time, as it is only used for
- // very short strings.
- if (!ascii) {
- __ addu(count, count, count);
- }
- __ Branch(&done, eq, count, Operand(zero_reg));
- __ addu(count, dest, count); // Count now points to the last dest byte.
-
- __ bind(&loop);
- __ lbu(scratch, MemOperand(src));
- __ addiu(src, src, 1);
- __ sb(scratch, MemOperand(dest));
- __ addiu(dest, dest, 1);
- __ Branch(&loop, lt, dest, Operand(count));
-
- __ bind(&done);
-}
-
-
enum CopyCharactersFlags {
COPY_ASCII = 1,
DEST_ALWAYS_ALIGNED = 2
};
-void StringHelper::GenerateCopyCharactersLong(MacroAssembler* masm,
- Register dest,
- Register src,
- Register count,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Register scratch4,
- Register scratch5,
- int flags) {
- bool ascii = (flags & COPY_ASCII) != 0;
- bool dest_always_aligned = (flags & DEST_ALWAYS_ALIGNED) != 0;
-
- if (dest_always_aligned && FLAG_debug_code) {
- // Check that destination is actually word aligned if the flag says
- // that it is.
- __ And(scratch4, dest, Operand(kPointerAlignmentMask));
+void StringHelper::GenerateCopyCharacters(MacroAssembler* masm,
+ Register dest,
+ Register src,
+ Register count,
+ Register scratch,
+ String::Encoding encoding) {
+ if (FLAG_debug_code) {
+ // Check that destination is word aligned.
+ __ And(scratch, dest, Operand(kPointerAlignmentMask));
__ Check(eq,
kDestinationOfCopyNotAligned,
- scratch4,
+ scratch,
Operand(zero_reg));
}
- const int kReadAlignment = 4;
- const int kReadAlignmentMask = kReadAlignment - 1;
- // Ensure that reading an entire aligned word containing the last character
- // of a string will not read outside the allocated area (because we pad up
- // to kObjectAlignment).
- STATIC_ASSERT(kObjectAlignment >= kReadAlignment);
// Assumes word reads and writes are little endian.
// Nothing to do for zero characters.
Label done;
- if (!ascii) {
- __ addu(count, count, count);
- }
- __ Branch(&done, eq, count, Operand(zero_reg));
-
- Label byte_loop;
- // Must copy at least eight bytes, otherwise just do it one byte at a time.
- __ Subu(scratch1, count, Operand(8));
- __ Addu(count, dest, Operand(count));
- Register limit = count; // Read until src equals this.
- __ Branch(&byte_loop, lt, scratch1, Operand(zero_reg));
-
- if (!dest_always_aligned) {
- // Align dest by byte copying. Copies between zero and three bytes.
- __ And(scratch4, dest, Operand(kReadAlignmentMask));
- Label dest_aligned;
- __ Branch(&dest_aligned, eq, scratch4, Operand(zero_reg));
- Label aligned_loop;
- __ bind(&aligned_loop);
- __ lbu(scratch1, MemOperand(src));
- __ addiu(src, src, 1);
- __ sb(scratch1, MemOperand(dest));
- __ addiu(dest, dest, 1);
- __ addiu(scratch4, scratch4, 1);
- __ Branch(&aligned_loop, le, scratch4, Operand(kReadAlignmentMask));
- __ bind(&dest_aligned);
- }
-
- Label simple_loop;
-
- __ And(scratch4, src, Operand(kReadAlignmentMask));
- __ Branch(&simple_loop, eq, scratch4, Operand(zero_reg));
-
- // Loop for src/dst that are not aligned the same way.
- // This loop uses lwl and lwr instructions. These instructions
- // depend on the endianness, and the implementation assumes little-endian.
- {
- Label loop;
- __ bind(&loop);
- __ lwr(scratch1, MemOperand(src));
- __ Addu(src, src, Operand(kReadAlignment));
- __ lwl(scratch1, MemOperand(src, -1));
- __ sw(scratch1, MemOperand(dest));
- __ Addu(dest, dest, Operand(kReadAlignment));
- __ Subu(scratch2, limit, dest);
- __ Branch(&loop, ge, scratch2, Operand(kReadAlignment));
+ if (encoding == String::TWO_BYTE_ENCODING) {
+ __ Addu(count, count, count);
}
- __ Branch(&byte_loop);
-
- // Simple loop.
- // Copy words from src to dest, until less than four bytes left.
- // Both src and dest are word aligned.
- __ bind(&simple_loop);
- {
- Label loop;
- __ bind(&loop);
- __ lw(scratch1, MemOperand(src));
- __ Addu(src, src, Operand(kReadAlignment));
- __ sw(scratch1, MemOperand(dest));
- __ Addu(dest, dest, Operand(kReadAlignment));
- __ Subu(scratch2, limit, dest);
- __ Branch(&loop, ge, scratch2, Operand(kReadAlignment));
- }
+ Register limit = count; // Read until dest equals this.
+ __ Addu(limit, dest, Operand(count));
+ Label loop_entry, loop;
// Copy bytes from src to dest until dest hits limit.
- __ bind(&byte_loop);
- // Test if dest has already reached the limit.
- __ Branch(&done, ge, dest, Operand(limit));
- __ lbu(scratch1, MemOperand(src));
- __ addiu(src, src, 1);
- __ sb(scratch1, MemOperand(dest));
- __ addiu(dest, dest, 1);
- __ Branch(&byte_loop);
+ __ Branch(&loop_entry);
+ __ bind(&loop);
+ __ lbu(scratch, MemOperand(src));
+ __ Addu(src, src, Operand(1));
+ __ sb(scratch, MemOperand(dest));
+ __ Addu(dest, dest, Operand(1));
+ __ bind(&loop_entry);
+ __ Branch(&loop, lt, dest, Operand(limit));
__ bind(&done);
}
-void StringHelper::GenerateTwoCharacterStringTableProbe(MacroAssembler* masm,
- Register c1,
- Register c2,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Register scratch4,
- Register scratch5,
- Label* not_found) {
- // Register scratch3 is the general scratch register in this function.
- Register scratch = scratch3;
-
- // Make sure that both characters are not digits as such strings has a
- // different hash algorithm. Don't try to look for these in the string table.
- Label not_array_index;
- __ Subu(scratch, c1, Operand(static_cast<int>('0')));
- __ Branch(&not_array_index,
- Ugreater,
- scratch,
- Operand(static_cast<int>('9' - '0')));
- __ Subu(scratch, c2, Operand(static_cast<int>('0')));
-
- // If check failed combine both characters into single halfword.
- // This is required by the contract of the method: code at the
- // not_found branch expects this combination in c1 register.
- Label tmp;
- __ sll(scratch1, c2, kBitsPerByte);
- __ Branch(&tmp, Ugreater, scratch, Operand(static_cast<int>('9' - '0')));
- __ Or(c1, c1, scratch1);
- __ bind(&tmp);
- __ Branch(
- not_found, Uless_equal, scratch, Operand(static_cast<int>('9' - '0')));
-
- __ bind(&not_array_index);
- // Calculate the two character string hash.
- Register hash = scratch1;
- StringHelper::GenerateHashInit(masm, hash, c1);
- StringHelper::GenerateHashAddCharacter(masm, hash, c2);
- StringHelper::GenerateHashGetHash(masm, hash);
-
- // Collect the two characters in a register.
- Register chars = c1;
- __ sll(scratch, c2, kBitsPerByte);
- __ Or(chars, chars, scratch);
-
- // chars: two character string, char 1 in byte 0 and char 2 in byte 1.
- // hash: hash of two character string.
-
- // Load string table.
- // Load address of first element of the string table.
- Register string_table = c2;
- __ LoadRoot(string_table, Heap::kStringTableRootIndex);
-
- Register undefined = scratch4;
- __ LoadRoot(undefined, Heap::kUndefinedValueRootIndex);
-
- // Calculate capacity mask from the string table capacity.
- Register mask = scratch2;
- __ lw(mask, FieldMemOperand(string_table, StringTable::kCapacityOffset));
- __ sra(mask, mask, 1);
- __ Addu(mask, mask, -1);
-
- // Calculate untagged address of the first element of the string table.
- Register first_string_table_element = string_table;
- __ Addu(first_string_table_element, string_table,
- Operand(StringTable::kElementsStartOffset - kHeapObjectTag));
-
- // Registers.
- // chars: two character string, char 1 in byte 0 and char 2 in byte 1.
- // hash: hash of two character string
- // mask: capacity mask
- // first_string_table_element: address of the first element of
- // the string table
- // undefined: the undefined object
- // scratch: -
-
- // Perform a number of probes in the string table.
- const int kProbes = 4;
- Label found_in_string_table;
- Label next_probe[kProbes];
- Register candidate = scratch5; // Scratch register contains candidate.
- for (int i = 0; i < kProbes; i++) {
- // Calculate entry in string table.
- if (i > 0) {
- __ Addu(candidate, hash, Operand(StringTable::GetProbeOffset(i)));
- } else {
- __ mov(candidate, hash);
- }
-
- __ And(candidate, candidate, Operand(mask));
-
- // Load the entry from the symble table.
- STATIC_ASSERT(StringTable::kEntrySize == 1);
- __ sll(scratch, candidate, kPointerSizeLog2);
- __ Addu(scratch, scratch, first_string_table_element);
- __ lw(candidate, MemOperand(scratch));
-
- // If entry is undefined no string with this hash can be found.
- Label is_string;
- __ GetObjectType(candidate, scratch, scratch);
- __ Branch(&is_string, ne, scratch, Operand(ODDBALL_TYPE));
-
- __ Branch(not_found, eq, undefined, Operand(candidate));
- // Must be the hole (deleted entry).
- if (FLAG_debug_code) {
- __ LoadRoot(scratch, Heap::kTheHoleValueRootIndex);
- __ Assert(eq, kOddballInStringTableIsNotUndefinedOrTheHole,
- scratch, Operand(candidate));
- }
- __ jmp(&next_probe[i]);
-
- __ bind(&is_string);
-
- // Check that the candidate is a non-external ASCII string. The instance
- // type is still in the scratch register from the CompareObjectType
- // operation.
- __ JumpIfInstanceTypeIsNotSequentialAscii(scratch, scratch, &next_probe[i]);
-
- // If length is not 2 the string is not a candidate.
- __ lw(scratch, FieldMemOperand(candidate, String::kLengthOffset));
- __ Branch(&next_probe[i], ne, scratch, Operand(Smi::FromInt(2)));
-
- // Check if the two characters match.
- // Assumes that word load is little endian.
- __ lhu(scratch, FieldMemOperand(candidate, SeqOneByteString::kHeaderSize));
- __ Branch(&found_in_string_table, eq, chars, Operand(scratch));
- __ bind(&next_probe[i]);
- }
-
- // No matching 2 character string found by probing.
- __ jmp(not_found);
-
- // Scratch register contains result when we fall through to here.
- Register result = candidate;
- __ bind(&found_in_string_table);
- __ mov(v0, result);
-}
-
-
void StringHelper::GenerateHashInit(MacroAssembler* masm,
Register hash,
Register character) {
@@ -4263,7 +3745,7 @@ void SubStringStub::Generate(MacroAssembler* masm) {
// Handle external string.
// Rule out short external strings.
- STATIC_CHECK(kShortExternalStringTag != 0);
+ STATIC_ASSERT(kShortExternalStringTag != 0);
__ And(t0, a1, Operand(kShortExternalStringTag));
__ Branch(&runtime, ne, t0, Operand(zero_reg));
__ lw(t1, FieldMemOperand(t1, ExternalString::kResourceDataOffset));
@@ -4295,8 +3777,8 @@ void SubStringStub::Generate(MacroAssembler* masm) {
// a2: result string length
// t1: first character of substring to copy
STATIC_ASSERT((SeqOneByteString::kHeaderSize & kObjectAlignmentMask) == 0);
- StringHelper::GenerateCopyCharactersLong(
- masm, a1, t1, a2, a3, t0, t2, t3, t4, COPY_ASCII | DEST_ALWAYS_ALIGNED);
+ StringHelper::GenerateCopyCharacters(
+ masm, a1, t1, a2, a3, String::ONE_BYTE_ENCODING);
__ jmp(&return_v0);
// Allocate and copy the resulting two-byte string.
@@ -4315,17 +3797,17 @@ void SubStringStub::Generate(MacroAssembler* masm) {
// a2: result length.
// t1: first character of substring to copy.
STATIC_ASSERT((SeqTwoByteString::kHeaderSize & kObjectAlignmentMask) == 0);
- StringHelper::GenerateCopyCharactersLong(
- masm, a1, t1, a2, a3, t0, t2, t3, t4, DEST_ALWAYS_ALIGNED);
+ StringHelper::GenerateCopyCharacters(
+ masm, a1, t1, a2, a3, String::TWO_BYTE_ENCODING);
__ bind(&return_v0);
- Counters* counters = masm->isolate()->counters();
+ Counters* counters = isolate()->counters();
__ IncrementCounter(counters->sub_string_native(), 1, a3, t0);
__ DropAndRet(3);
// Just jump to runtime to create the sub string.
__ bind(&runtime);
- __ TailCallRuntime(Runtime::kSubString, 3, 1);
+ __ TailCallRuntime(Runtime::kHiddenSubString, 3, 1);
__ bind(&single_char);
// v0: original string
@@ -4463,7 +3945,7 @@ void StringCompareStub::GenerateAsciiCharsCompareLoop(
void StringCompareStub::Generate(MacroAssembler* masm) {
Label runtime;
- Counters* counters = masm->isolate()->counters();
+ Counters* counters = isolate()->counters();
// Stack frame on entry.
// sp[0]: right string
@@ -4490,356 +3972,35 @@ void StringCompareStub::Generate(MacroAssembler* masm) {
GenerateCompareFlatAsciiStrings(masm, a1, a0, a2, a3, t0, t1);
__ bind(&runtime);
- __ TailCallRuntime(Runtime::kStringCompare, 2, 1);
+ __ TailCallRuntime(Runtime::kHiddenStringCompare, 2, 1);
}
-void StringAddStub::Generate(MacroAssembler* masm) {
- Label call_runtime, call_builtin;
- Builtins::JavaScript builtin_id = Builtins::ADD;
-
- Counters* counters = masm->isolate()->counters();
-
- // Stack on entry:
- // sp[0]: second argument (right).
- // sp[4]: first argument (left).
-
- // Load the two arguments.
- __ lw(a0, MemOperand(sp, 1 * kPointerSize)); // First argument.
- __ lw(a1, MemOperand(sp, 0 * kPointerSize)); // Second argument.
-
- // Make sure that both arguments are strings if not known in advance.
- // Otherwise, at least one of the arguments is definitely a string,
- // and we convert the one that is not known to be a string.
- if ((flags_ & STRING_ADD_CHECK_BOTH) == STRING_ADD_CHECK_BOTH) {
- ASSERT((flags_ & STRING_ADD_CHECK_LEFT) == STRING_ADD_CHECK_LEFT);
- ASSERT((flags_ & STRING_ADD_CHECK_RIGHT) == STRING_ADD_CHECK_RIGHT);
- __ JumpIfEitherSmi(a0, a1, &call_runtime);
- // Load instance types.
- __ lw(t0, FieldMemOperand(a0, HeapObject::kMapOffset));
- __ lw(t1, FieldMemOperand(a1, HeapObject::kMapOffset));
- __ lbu(t0, FieldMemOperand(t0, Map::kInstanceTypeOffset));
- __ lbu(t1, FieldMemOperand(t1, Map::kInstanceTypeOffset));
- STATIC_ASSERT(kStringTag == 0);
- // If either is not a string, go to runtime.
- __ Or(t4, t0, Operand(t1));
- __ And(t4, t4, Operand(kIsNotStringMask));
- __ Branch(&call_runtime, ne, t4, Operand(zero_reg));
- } else if ((flags_ & STRING_ADD_CHECK_LEFT) == STRING_ADD_CHECK_LEFT) {
- ASSERT((flags_ & STRING_ADD_CHECK_RIGHT) == 0);
- GenerateConvertArgument(
- masm, 1 * kPointerSize, a0, a2, a3, t0, t1, &call_builtin);
- builtin_id = Builtins::STRING_ADD_RIGHT;
- } else if ((flags_ & STRING_ADD_CHECK_RIGHT) == STRING_ADD_CHECK_RIGHT) {
- ASSERT((flags_ & STRING_ADD_CHECK_LEFT) == 0);
- GenerateConvertArgument(
- masm, 0 * kPointerSize, a1, a2, a3, t0, t1, &call_builtin);
- builtin_id = Builtins::STRING_ADD_LEFT;
- }
-
- // Both arguments are strings.
- // a0: first string
- // a1: second string
- // t0: first string instance type (if flags_ == NO_STRING_ADD_FLAGS)
- // t1: second string instance type (if flags_ == NO_STRING_ADD_FLAGS)
- {
- Label strings_not_empty;
- // Check if either of the strings are empty. In that case return the other.
- // These tests use zero-length check on string-length whch is an Smi.
- // Assert that Smi::FromInt(0) is really 0.
- STATIC_ASSERT(kSmiTag == 0);
- ASSERT(Smi::FromInt(0) == 0);
- __ lw(a2, FieldMemOperand(a0, String::kLengthOffset));
- __ lw(a3, FieldMemOperand(a1, String::kLengthOffset));
- __ mov(v0, a0); // Assume we'll return first string (from a0).
- __ Movz(v0, a1, a2); // If first is empty, return second (from a1).
- __ slt(t4, zero_reg, a2); // if (a2 > 0) t4 = 1.
- __ slt(t5, zero_reg, a3); // if (a3 > 0) t5 = 1.
- __ and_(t4, t4, t5); // Branch if both strings were non-empty.
- __ Branch(&strings_not_empty, ne, t4, Operand(zero_reg));
-
- __ IncrementCounter(counters->string_add_native(), 1, a2, a3);
- __ DropAndRet(2);
-
- __ bind(&strings_not_empty);
- }
-
- // Untag both string-lengths.
- __ sra(a2, a2, kSmiTagSize);
- __ sra(a3, a3, kSmiTagSize);
-
- // Both strings are non-empty.
- // a0: first string
- // a1: second string
- // a2: length of first string
- // a3: length of second string
- // t0: first string instance type (if flags_ == NO_STRING_ADD_FLAGS)
- // t1: second string instance type (if flags_ == NO_STRING_ADD_FLAGS)
- // Look at the length of the result of adding the two strings.
- Label string_add_flat_result, longer_than_two;
- // Adding two lengths can't overflow.
- STATIC_ASSERT(String::kMaxLength < String::kMaxLength * 2);
- __ Addu(t2, a2, Operand(a3));
- // Use the string table when adding two one character strings, as it
- // helps later optimizations to return a string here.
- __ Branch(&longer_than_two, ne, t2, Operand(2));
-
- // Check that both strings are non-external ASCII strings.
- if ((flags_ & STRING_ADD_CHECK_BOTH) != STRING_ADD_CHECK_BOTH) {
- __ lw(t0, FieldMemOperand(a0, HeapObject::kMapOffset));
- __ lw(t1, FieldMemOperand(a1, HeapObject::kMapOffset));
- __ lbu(t0, FieldMemOperand(t0, Map::kInstanceTypeOffset));
- __ lbu(t1, FieldMemOperand(t1, Map::kInstanceTypeOffset));
- }
- __ JumpIfBothInstanceTypesAreNotSequentialAscii(t0, t1, t2, t3,
- &call_runtime);
-
- // Get the two characters forming the sub string.
- __ lbu(a2, FieldMemOperand(a0, SeqOneByteString::kHeaderSize));
- __ lbu(a3, FieldMemOperand(a1, SeqOneByteString::kHeaderSize));
-
- // Try to lookup two character string in string table. If it is not found
- // just allocate a new one.
- Label make_two_character_string;
- StringHelper::GenerateTwoCharacterStringTableProbe(
- masm, a2, a3, t2, t3, t0, t1, t5, &make_two_character_string);
- __ IncrementCounter(counters->string_add_native(), 1, a2, a3);
- __ DropAndRet(2);
-
- __ bind(&make_two_character_string);
- // Resulting string has length 2 and first chars of two strings
- // are combined into single halfword in a2 register.
- // So we can fill resulting string without two loops by a single
- // halfword store instruction (which assumes that processor is
- // in a little endian mode).
- __ li(t2, Operand(2));
- __ AllocateAsciiString(v0, t2, t0, t1, t5, &call_runtime);
- __ sh(a2, FieldMemOperand(v0, SeqOneByteString::kHeaderSize));
- __ IncrementCounter(counters->string_add_native(), 1, a2, a3);
- __ DropAndRet(2);
-
- __ bind(&longer_than_two);
- // Check if resulting string will be flat.
- __ Branch(&string_add_flat_result, lt, t2, Operand(ConsString::kMinLength));
- // Handle exceptionally long strings in the runtime system.
- STATIC_ASSERT((String::kMaxLength & 0x80000000) == 0);
- ASSERT(IsPowerOf2(String::kMaxLength + 1));
- // kMaxLength + 1 is representable as shifted literal, kMaxLength is not.
- __ Branch(&call_runtime, hs, t2, Operand(String::kMaxLength + 1));
-
- // If result is not supposed to be flat, allocate a cons string object.
- // If both strings are ASCII the result is an ASCII cons string.
- if ((flags_ & STRING_ADD_CHECK_BOTH) != STRING_ADD_CHECK_BOTH) {
- __ lw(t0, FieldMemOperand(a0, HeapObject::kMapOffset));
- __ lw(t1, FieldMemOperand(a1, HeapObject::kMapOffset));
- __ lbu(t0, FieldMemOperand(t0, Map::kInstanceTypeOffset));
- __ lbu(t1, FieldMemOperand(t1, Map::kInstanceTypeOffset));
- }
- Label non_ascii, allocated, ascii_data;
- STATIC_ASSERT(kTwoByteStringTag == 0);
- // Branch to non_ascii if either string-encoding field is zero (non-ASCII).
- __ And(t4, t0, Operand(t1));
- __ And(t4, t4, Operand(kStringEncodingMask));
- __ Branch(&non_ascii, eq, t4, Operand(zero_reg));
-
- // Allocate an ASCII cons string.
- __ bind(&ascii_data);
- __ AllocateAsciiConsString(v0, t2, t0, t1, &call_runtime);
- __ bind(&allocated);
- // Fill the fields of the cons string.
- Label skip_write_barrier, after_writing;
- ExternalReference high_promotion_mode = ExternalReference::
- new_space_high_promotion_mode_active_address(masm->isolate());
- __ li(t0, Operand(high_promotion_mode));
- __ lw(t0, MemOperand(t0, 0));
- __ Branch(&skip_write_barrier, eq, t0, Operand(zero_reg));
-
- __ mov(t3, v0);
- __ sw(a0, FieldMemOperand(t3, ConsString::kFirstOffset));
- __ RecordWriteField(t3,
- ConsString::kFirstOffset,
- a0,
- t0,
- kRAHasNotBeenSaved,
- kDontSaveFPRegs);
- __ sw(a1, FieldMemOperand(t3, ConsString::kSecondOffset));
- __ RecordWriteField(t3,
- ConsString::kSecondOffset,
- a1,
- t0,
- kRAHasNotBeenSaved,
- kDontSaveFPRegs);
- __ jmp(&after_writing);
-
- __ bind(&skip_write_barrier);
- __ sw(a0, FieldMemOperand(v0, ConsString::kFirstOffset));
- __ sw(a1, FieldMemOperand(v0, ConsString::kSecondOffset));
-
- __ bind(&after_writing);
-
- __ IncrementCounter(counters->string_add_native(), 1, a2, a3);
- __ DropAndRet(2);
-
- __ bind(&non_ascii);
- // At least one of the strings is two-byte. Check whether it happens
- // to contain only one byte characters.
- // t0: first instance type.
- // t1: second instance type.
- // Branch to if _both_ instances have kOneByteDataHintMask set.
- __ And(at, t0, Operand(kOneByteDataHintMask));
- __ and_(at, at, t1);
- __ Branch(&ascii_data, ne, at, Operand(zero_reg));
- __ Xor(t0, t0, Operand(t1));
- STATIC_ASSERT(kOneByteStringTag != 0 && kOneByteDataHintTag != 0);
- __ And(t0, t0, Operand(kOneByteStringTag | kOneByteDataHintTag));
- __ Branch(&ascii_data, eq, t0,
- Operand(kOneByteStringTag | kOneByteDataHintTag));
-
- // Allocate a two byte cons string.
- __ AllocateTwoByteConsString(v0, t2, t0, t1, &call_runtime);
- __ Branch(&allocated);
-
- // We cannot encounter sliced strings or cons strings here since:
- STATIC_ASSERT(SlicedString::kMinLength >= ConsString::kMinLength);
- // Handle creating a flat result from either external or sequential strings.
- // Locate the first characters' locations.
- // a0: first string
- // a1: second string
- // a2: length of first string
- // a3: length of second string
- // t0: first string instance type (if flags_ == NO_STRING_ADD_FLAGS)
- // t1: second string instance type (if flags_ == NO_STRING_ADD_FLAGS)
- // t2: sum of lengths.
- Label first_prepared, second_prepared;
- __ bind(&string_add_flat_result);
- if ((flags_ & STRING_ADD_CHECK_BOTH) != STRING_ADD_CHECK_BOTH) {
- __ lw(t0, FieldMemOperand(a0, HeapObject::kMapOffset));
- __ lw(t1, FieldMemOperand(a1, HeapObject::kMapOffset));
- __ lbu(t0, FieldMemOperand(t0, Map::kInstanceTypeOffset));
- __ lbu(t1, FieldMemOperand(t1, Map::kInstanceTypeOffset));
- }
- // Check whether both strings have same encoding
- __ Xor(t3, t0, Operand(t1));
- __ And(t3, t3, Operand(kStringEncodingMask));
- __ Branch(&call_runtime, ne, t3, Operand(zero_reg));
-
- STATIC_ASSERT(kSeqStringTag == 0);
- __ And(t4, t0, Operand(kStringRepresentationMask));
-
- STATIC_ASSERT(SeqOneByteString::kHeaderSize == SeqTwoByteString::kHeaderSize);
- Label skip_first_add;
- __ Branch(&skip_first_add, ne, t4, Operand(zero_reg));
- __ Branch(USE_DELAY_SLOT, &first_prepared);
- __ addiu(t3, a0, SeqOneByteString::kHeaderSize - kHeapObjectTag);
- __ bind(&skip_first_add);
- // External string: rule out short external string and load string resource.
- STATIC_ASSERT(kShortExternalStringTag != 0);
- __ And(t4, t0, Operand(kShortExternalStringMask));
- __ Branch(&call_runtime, ne, t4, Operand(zero_reg));
- __ lw(t3, FieldMemOperand(a0, ExternalString::kResourceDataOffset));
- __ bind(&first_prepared);
-
- STATIC_ASSERT(kSeqStringTag == 0);
- __ And(t4, t1, Operand(kStringRepresentationMask));
- STATIC_ASSERT(SeqOneByteString::kHeaderSize == SeqTwoByteString::kHeaderSize);
- Label skip_second_add;
- __ Branch(&skip_second_add, ne, t4, Operand(zero_reg));
- __ Branch(USE_DELAY_SLOT, &second_prepared);
- __ addiu(a1, a1, SeqOneByteString::kHeaderSize - kHeapObjectTag);
- __ bind(&skip_second_add);
- // External string: rule out short external string and load string resource.
- STATIC_ASSERT(kShortExternalStringTag != 0);
- __ And(t4, t1, Operand(kShortExternalStringMask));
- __ Branch(&call_runtime, ne, t4, Operand(zero_reg));
- __ lw(a1, FieldMemOperand(a1, ExternalString::kResourceDataOffset));
- __ bind(&second_prepared);
-
- Label non_ascii_string_add_flat_result;
- // t3: first character of first string
- // a1: first character of second string
- // a2: length of first string
- // a3: length of second string
- // t2: sum of lengths.
- // Both strings have the same encoding.
- STATIC_ASSERT(kTwoByteStringTag == 0);
- __ And(t4, t1, Operand(kStringEncodingMask));
- __ Branch(&non_ascii_string_add_flat_result, eq, t4, Operand(zero_reg));
-
- __ AllocateAsciiString(v0, t2, t0, t1, t5, &call_runtime);
- __ Addu(t2, v0, Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
- // v0: result string.
- // t3: first character of first string.
- // a1: first character of second string
- // a2: length of first string.
- // a3: length of second string.
- // t2: first character of result.
-
- StringHelper::GenerateCopyCharacters(masm, t2, t3, a2, t0, true);
- // t2: next character of result.
- StringHelper::GenerateCopyCharacters(masm, t2, a1, a3, t0, true);
- __ IncrementCounter(counters->string_add_native(), 1, a2, a3);
- __ DropAndRet(2);
-
- __ bind(&non_ascii_string_add_flat_result);
- __ AllocateTwoByteString(v0, t2, t0, t1, t5, &call_runtime);
- __ Addu(t2, v0, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
- // v0: result string.
- // t3: first character of first string.
- // a1: first character of second string.
- // a2: length of first string.
- // a3: length of second string.
- // t2: first character of result.
- StringHelper::GenerateCopyCharacters(masm, t2, t3, a2, t0, false);
- // t2: next character of result.
- StringHelper::GenerateCopyCharacters(masm, t2, a1, a3, t0, false);
-
- __ IncrementCounter(counters->string_add_native(), 1, a2, a3);
- __ DropAndRet(2);
+void BinaryOpICWithAllocationSiteStub::Generate(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- a1 : left
+ // -- a0 : right
+ // -- ra : return address
+ // -----------------------------------
- // Just jump to runtime to add the two strings.
- __ bind(&call_runtime);
- __ TailCallRuntime(Runtime::kStringAdd, 2, 1);
+ // Load a2 with the allocation site. We stick an undefined dummy value here
+ // and replace it with the real allocation site later when we instantiate this
+ // stub in BinaryOpICWithAllocationSiteStub::GetCodeCopyFromTemplate().
+ __ li(a2, handle(isolate()->heap()->undefined_value()));
- if (call_builtin.is_linked()) {
- __ bind(&call_builtin);
- __ InvokeBuiltin(builtin_id, JUMP_FUNCTION);
+ // Make sure that we actually patched the allocation site.
+ if (FLAG_debug_code) {
+ __ And(at, a2, Operand(kSmiTagMask));
+ __ Assert(ne, kExpectedAllocationSite, at, Operand(zero_reg));
+ __ lw(t0, FieldMemOperand(a2, HeapObject::kMapOffset));
+ __ LoadRoot(at, Heap::kAllocationSiteMapRootIndex);
+ __ Assert(eq, kExpectedAllocationSite, t0, Operand(at));
}
-}
-
-void StringAddStub::GenerateRegisterArgsPush(MacroAssembler* masm) {
- __ push(a0);
- __ push(a1);
-}
-
-
-void StringAddStub::GenerateRegisterArgsPop(MacroAssembler* masm) {
- __ pop(a1);
- __ pop(a0);
-}
-
-
-void StringAddStub::GenerateConvertArgument(MacroAssembler* masm,
- int stack_offset,
- Register arg,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Register scratch4,
- Label* slow) {
- // First check if the argument is already a string.
- Label not_string, done;
- __ JumpIfSmi(arg, &not_string);
- __ GetObjectType(arg, scratch1, scratch1);
- __ Branch(&done, lt, scratch1, Operand(FIRST_NONSTRING_TYPE));
-
- // Check the number to string cache.
- __ bind(&not_string);
- // Puts the cached result into scratch1.
- __ LookupNumberStringCache(arg, scratch1, scratch2, scratch3, scratch4, slow);
- __ mov(arg, scratch1);
- __ sw(arg, MemOperand(sp, stack_offset));
- __ bind(&done);
+ // Tail call into the stub that handles binary operations with allocation
+ // sites.
+ BinaryOpWithAllocationSiteStub stub(isolate(), state_);
+ __ TailCallStub(&stub);
}
@@ -4934,9 +4095,9 @@ void ICCompareStub::GenerateNumbers(MacroAssembler* masm) {
__ bind(&unordered);
__ bind(&generic_stub);
- ICCompareStub stub(op_, CompareIC::GENERIC, CompareIC::GENERIC,
+ ICCompareStub stub(isolate(), op_, CompareIC::GENERIC, CompareIC::GENERIC,
CompareIC::GENERIC);
- __ Jump(stub.GetCode(masm->isolate()), RelocInfo::CODE_TARGET);
+ __ Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
__ bind(&maybe_undefined1);
if (Token::IsOrderedRelationalCompareOp(op_)) {
@@ -5121,7 +4282,7 @@ void ICCompareStub::GenerateStrings(MacroAssembler* masm) {
if (equality) {
__ TailCallRuntime(Runtime::kStringEquals, 2, 1);
} else {
- __ TailCallRuntime(Runtime::kStringCompare, 2, 1);
+ __ TailCallRuntime(Runtime::kHiddenStringCompare, 2, 1);
}
__ bind(&miss);
@@ -5170,7 +4331,7 @@ void ICCompareStub::GenerateMiss(MacroAssembler* masm) {
{
// Call the runtime system in a fresh internal frame.
ExternalReference miss =
- ExternalReference(IC_Utility(IC::kCompareIC_Miss), masm->isolate());
+ ExternalReference(IC_Utility(IC::kCompareIC_Miss), isolate());
FrameScope scope(masm, StackFrame::INTERNAL);
__ Push(a1, a0);
__ Push(ra, a1, a0);
@@ -5215,7 +4376,7 @@ void DirectCEntryStub::Generate(MacroAssembler* masm) {
void DirectCEntryStub::GenerateCall(MacroAssembler* masm,
Register target) {
intptr_t loc =
- reinterpret_cast<intptr_t>(GetCode(masm->isolate()).location());
+ reinterpret_cast<intptr_t>(GetCode().location());
__ Move(t9, target);
__ li(ra, Operand(loc, RelocInfo::CODE_TARGET), CONSTANT_SIZE);
__ Call(ra);
@@ -5290,7 +4451,7 @@ void NameDictionaryLookupStub::GenerateNegativeLookup(MacroAssembler* masm,
__ MultiPush(spill_mask);
__ lw(a0, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
__ li(a1, Operand(Handle<Name>(name)));
- NameDictionaryLookupStub stub(NEGATIVE_LOOKUP);
+ NameDictionaryLookupStub stub(masm->isolate(), NEGATIVE_LOOKUP);
__ CallStub(&stub);
__ mov(at, v0);
__ MultiPop(spill_mask);
@@ -5369,7 +4530,7 @@ void NameDictionaryLookupStub::GeneratePositiveLookup(MacroAssembler* masm,
__ Move(a0, elements);
__ Move(a1, name);
}
- NameDictionaryLookupStub stub(POSITIVE_LOOKUP);
+ NameDictionaryLookupStub stub(masm->isolate(), POSITIVE_LOOKUP);
__ CallStub(&stub);
__ mov(scratch2, a2);
__ mov(at, v0);
@@ -5477,16 +4638,11 @@ void NameDictionaryLookupStub::Generate(MacroAssembler* masm) {
void StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime(
Isolate* isolate) {
- StoreBufferOverflowStub stub1(kDontSaveFPRegs);
- stub1.GetCode(isolate);
+ StoreBufferOverflowStub stub1(isolate, kDontSaveFPRegs);
+ stub1.GetCode();
// Hydrogen code stubs need stub2 at snapshot time.
- StoreBufferOverflowStub stub2(kSaveFPRegs);
- stub2.GetCode(isolate);
-}
-
-
-bool CodeStub::CanUseFPRegisters() {
- return true; // FPU is a base requirement for V8.
+ StoreBufferOverflowStub stub2(isolate, kSaveFPRegs);
+ stub2.GetCode();
}
@@ -5553,7 +4709,7 @@ void RecordWriteStub::GenerateIncremental(MacroAssembler* masm, Mode mode) {
// remembered set.
CheckNeedsToInformIncrementalMarker(
masm, kUpdateRememberedSetOnNoNeedToInformIncrementalMarker, mode);
- InformIncrementalMarker(masm, mode);
+ InformIncrementalMarker(masm);
regs_.Restore(masm);
__ RememberedSetHelper(object_,
address_,
@@ -5566,13 +4722,13 @@ void RecordWriteStub::GenerateIncremental(MacroAssembler* masm, Mode mode) {
CheckNeedsToInformIncrementalMarker(
masm, kReturnOnNoNeedToInformIncrementalMarker, mode);
- InformIncrementalMarker(masm, mode);
+ InformIncrementalMarker(masm);
regs_.Restore(masm);
__ Ret();
}
-void RecordWriteStub::InformIncrementalMarker(MacroAssembler* masm, Mode mode) {
+void RecordWriteStub::InformIncrementalMarker(MacroAssembler* masm) {
regs_.SaveCallerSaveRegisters(masm, save_fp_regs_mode_);
int argument_count = 3;
__ PrepareCallCFunction(argument_count, regs_.scratch0());
@@ -5583,21 +4739,12 @@ void RecordWriteStub::InformIncrementalMarker(MacroAssembler* masm, Mode mode) {
__ Move(address, regs_.address());
__ Move(a0, regs_.object());
__ Move(a1, address);
- __ li(a2, Operand(ExternalReference::isolate_address(masm->isolate())));
+ __ li(a2, Operand(ExternalReference::isolate_address(isolate())));
AllowExternalCallThatCantCauseGC scope(masm);
- if (mode == INCREMENTAL_COMPACTION) {
- __ CallCFunction(
- ExternalReference::incremental_evacuation_record_write_function(
- masm->isolate()),
- argument_count);
- } else {
- ASSERT(mode == INCREMENTAL);
- __ CallCFunction(
- ExternalReference::incremental_marking_record_write_function(
- masm->isolate()),
- argument_count);
- }
+ __ CallCFunction(
+ ExternalReference::incremental_marking_record_write_function(isolate()),
+ argument_count);
regs_.RestoreCallerSaveRegisters(masm, save_fp_regs_mode_);
}
@@ -5756,8 +4903,8 @@ void StoreArrayLiteralElementStub::Generate(MacroAssembler* masm) {
void StubFailureTrampolineStub::Generate(MacroAssembler* masm) {
- CEntryStub ces(1, fp_registers_ ? kSaveFPRegs : kDontSaveFPRegs);
- __ Call(ces.GetCode(masm->isolate()), RelocInfo::CODE_TARGET);
+ CEntryStub ces(isolate(), 1, kSaveFPRegs);
+ __ Call(ces.GetCode(), RelocInfo::CODE_TARGET);
int parameter_count_offset =
StubFailureTrampolineFrame::kCallerStackParameterCountFrameOffset;
__ lw(a1, MemOperand(fp, parameter_count_offset));
@@ -5771,27 +4918,9 @@ void StubFailureTrampolineStub::Generate(MacroAssembler* masm) {
}
-void StubFailureTailCallTrampolineStub::Generate(MacroAssembler* masm) {
- CEntryStub ces(1, fp_registers_ ? kSaveFPRegs : kDontSaveFPRegs);
- __ Call(ces.GetCode(masm->isolate()), RelocInfo::CODE_TARGET);
- __ mov(a1, v0);
- int parameter_count_offset =
- StubFailureTrampolineFrame::kCallerStackParameterCountFrameOffset;
- __ lw(a0, MemOperand(fp, parameter_count_offset));
- // The parameter count above includes the receiver for the arguments passed to
- // the deoptimization handler. Subtract the receiver for the parameter count
- // for the call.
- __ Subu(a0, a0, 1);
- masm->LeaveFrame(StackFrame::STUB_FAILURE_TRAMPOLINE);
- ParameterCount argument_count(a0);
- __ InvokeFunction(
- a1, argument_count, JUMP_FUNCTION, NullCallWrapper(), CALL_AS_METHOD);
-}
-
-
void ProfileEntryHookStub::MaybeCallEntryHook(MacroAssembler* masm) {
if (masm->isolate()->function_entry_hook() != NULL) {
- ProfileEntryHookStub stub;
+ ProfileEntryHookStub stub(masm->isolate());
__ push(ra);
__ CallStub(&stub);
__ pop(ra);
@@ -5830,27 +4959,30 @@ void ProfileEntryHookStub::Generate(MacroAssembler* masm) {
ASSERT(IsPowerOf2(frame_alignment));
__ And(sp, sp, Operand(-frame_alignment));
}
-
+ __ Subu(sp, sp, kCArgsSlotsSize);
#if defined(V8_HOST_ARCH_MIPS)
int32_t entry_hook =
- reinterpret_cast<int32_t>(masm->isolate()->function_entry_hook());
- __ li(at, Operand(entry_hook));
+ reinterpret_cast<int32_t>(isolate()->function_entry_hook());
+ __ li(t9, Operand(entry_hook));
#else
// Under the simulator we need to indirect the entry hook through a
// trampoline function at a known address.
// It additionally takes an isolate as a third parameter.
- __ li(a2, Operand(ExternalReference::isolate_address(masm->isolate())));
+ __ li(a2, Operand(ExternalReference::isolate_address(isolate())));
ApiFunction dispatcher(FUNCTION_ADDR(EntryHookTrampoline));
- __ li(at, Operand(ExternalReference(&dispatcher,
+ __ li(t9, Operand(ExternalReference(&dispatcher,
ExternalReference::BUILTIN_CALL,
- masm->isolate())));
+ isolate())));
#endif
- __ Call(at);
+ // Call C function through t9 to conform ABI for PIC.
+ __ Call(t9);
// Restore the stack pointer if needed.
if (frame_alignment > kPointerSize) {
__ mov(sp, s5);
+ } else {
+ __ Addu(sp, sp, kCArgsSlotsSize);
}
// Also pop ra to get Ret(0).
@@ -5863,20 +4995,15 @@ template<class T>
static void CreateArrayDispatch(MacroAssembler* masm,
AllocationSiteOverrideMode mode) {
if (mode == DISABLE_ALLOCATION_SITES) {
- T stub(GetInitialFastElementsKind(),
- CONTEXT_CHECK_REQUIRED,
- mode);
+ T stub(masm->isolate(), GetInitialFastElementsKind(), mode);
__ TailCallStub(&stub);
} else if (mode == DONT_OVERRIDE) {
int last_index = GetSequenceIndexFromFastElementsKind(
TERMINAL_FAST_ELEMENTS_KIND);
for (int i = 0; i <= last_index; ++i) {
- Label next;
ElementsKind kind = GetFastElementsKindFromSequenceIndex(i);
- __ Branch(&next, ne, a3, Operand(kind));
- T stub(kind);
- __ TailCallStub(&stub);
- __ bind(&next);
+ T stub(masm->isolate(), kind);
+ __ TailCallStub(&stub, eq, a3, Operand(kind));
}
// If we reached this point there is a problem.
@@ -5889,7 +5016,7 @@ static void CreateArrayDispatch(MacroAssembler* masm,
static void CreateArrayDispatchOneArgument(MacroAssembler* masm,
AllocationSiteOverrideMode mode) {
- // a2 - type info cell (if mode != DISABLE_ALLOCATION_SITES)
+ // a2 - allocation site (if mode != DISABLE_ALLOCATION_SITES)
// a3 - kind (if mode != DISABLE_ALLOCATION_SITES)
// a0 - number of arguments
// a1 - constructor?
@@ -5916,48 +5043,43 @@ static void CreateArrayDispatchOneArgument(MacroAssembler* masm,
ElementsKind initial = GetInitialFastElementsKind();
ElementsKind holey_initial = GetHoleyElementsKind(initial);
- ArraySingleArgumentConstructorStub stub_holey(holey_initial,
- CONTEXT_CHECK_REQUIRED,
+ ArraySingleArgumentConstructorStub stub_holey(masm->isolate(),
+ holey_initial,
DISABLE_ALLOCATION_SITES);
__ TailCallStub(&stub_holey);
__ bind(&normal_sequence);
- ArraySingleArgumentConstructorStub stub(initial,
- CONTEXT_CHECK_REQUIRED,
+ ArraySingleArgumentConstructorStub stub(masm->isolate(),
+ initial,
DISABLE_ALLOCATION_SITES);
__ TailCallStub(&stub);
} else if (mode == DONT_OVERRIDE) {
// We are going to create a holey array, but our kind is non-holey.
- // Fix kind and retry (only if we have an allocation site in the cell).
+ // Fix kind and retry (only if we have an allocation site in the slot).
__ Addu(a3, a3, Operand(1));
- __ lw(t1, FieldMemOperand(a2, Cell::kValueOffset));
if (FLAG_debug_code) {
- __ lw(t1, FieldMemOperand(t1, 0));
+ __ lw(t1, FieldMemOperand(a2, 0));
__ LoadRoot(at, Heap::kAllocationSiteMapRootIndex);
- __ Assert(eq, kExpectedAllocationSiteInCell, t1, Operand(at));
- __ lw(t1, FieldMemOperand(a2, Cell::kValueOffset));
+ __ Assert(eq, kExpectedAllocationSite, t1, Operand(at));
}
// Save the resulting elements kind in type info. We can't just store a3
// in the AllocationSite::transition_info field because elements kind is
// restricted to a portion of the field...upper bits need to be left alone.
STATIC_ASSERT(AllocationSite::ElementsKindBits::kShift == 0);
- __ lw(t0, FieldMemOperand(t1, AllocationSite::kTransitionInfoOffset));
+ __ lw(t0, FieldMemOperand(a2, AllocationSite::kTransitionInfoOffset));
__ Addu(t0, t0, Operand(Smi::FromInt(kFastElementsKindPackedToHoley)));
- __ sw(t0, FieldMemOperand(t1, AllocationSite::kTransitionInfoOffset));
+ __ sw(t0, FieldMemOperand(a2, AllocationSite::kTransitionInfoOffset));
__ bind(&normal_sequence);
int last_index = GetSequenceIndexFromFastElementsKind(
TERMINAL_FAST_ELEMENTS_KIND);
for (int i = 0; i <= last_index; ++i) {
- Label next;
ElementsKind kind = GetFastElementsKindFromSequenceIndex(i);
- __ Branch(&next, ne, a3, Operand(kind));
- ArraySingleArgumentConstructorStub stub(kind);
- __ TailCallStub(&stub);
- __ bind(&next);
+ ArraySingleArgumentConstructorStub stub(masm->isolate(), kind);
+ __ TailCallStub(&stub, eq, a3, Operand(kind));
}
// If we reached this point there is a problem.
@@ -5970,20 +5092,15 @@ static void CreateArrayDispatchOneArgument(MacroAssembler* masm,
template<class T>
static void ArrayConstructorStubAheadOfTimeHelper(Isolate* isolate) {
- ElementsKind initial_kind = GetInitialFastElementsKind();
- ElementsKind initial_holey_kind = GetHoleyElementsKind(initial_kind);
-
int to_index = GetSequenceIndexFromFastElementsKind(
TERMINAL_FAST_ELEMENTS_KIND);
for (int i = 0; i <= to_index; ++i) {
ElementsKind kind = GetFastElementsKindFromSequenceIndex(i);
- T stub(kind);
- stub.GetCode(isolate);
- if (AllocationSite::GetMode(kind) != DONT_TRACK_ALLOCATION_SITE ||
- (!FLAG_track_allocation_sites &&
- (kind == initial_kind || kind == initial_holey_kind))) {
- T stub1(kind, CONTEXT_CHECK_REQUIRED, DISABLE_ALLOCATION_SITES);
- stub1.GetCode(isolate);
+ T stub(isolate, kind);
+ stub.GetCode();
+ if (AllocationSite::GetMode(kind) != DONT_TRACK_ALLOCATION_SITE) {
+ T stub1(isolate, kind, DISABLE_ALLOCATION_SITES);
+ stub1.GetCode();
}
}
}
@@ -6004,12 +5121,12 @@ void InternalArrayConstructorStubBase::GenerateStubsAheadOfTime(
ElementsKind kinds[2] = { FAST_ELEMENTS, FAST_HOLEY_ELEMENTS };
for (int i = 0; i < 2; i++) {
// For internal arrays we only need a few things.
- InternalArrayNoArgumentConstructorStub stubh1(kinds[i]);
- stubh1.GetCode(isolate);
- InternalArraySingleArgumentConstructorStub stubh2(kinds[i]);
- stubh2.GetCode(isolate);
- InternalArrayNArgumentsConstructorStub stubh3(kinds[i]);
- stubh3.GetCode(isolate);
+ InternalArrayNoArgumentConstructorStub stubh1(isolate, kinds[i]);
+ stubh1.GetCode();
+ InternalArraySingleArgumentConstructorStub stubh2(isolate, kinds[i]);
+ stubh2.GetCode();
+ InternalArrayNArgumentsConstructorStub stubh3(isolate, kinds[i]);
+ stubh3.GetCode();
}
}
@@ -6045,48 +5162,35 @@ void ArrayConstructorStub::Generate(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- a0 : argc (only if argument_count_ == ANY)
// -- a1 : constructor
- // -- a2 : type info cell
+ // -- a2 : AllocationSite or undefined
// -- sp[0] : return address
// -- sp[4] : last argument
// -----------------------------------
+
if (FLAG_debug_code) {
// The array construct code is only set for the global and natives
// builtin Array functions which always have maps.
// Initial map for the builtin Array function should be a map.
- __ lw(a3, FieldMemOperand(a1, JSFunction::kPrototypeOrInitialMapOffset));
+ __ lw(t0, FieldMemOperand(a1, JSFunction::kPrototypeOrInitialMapOffset));
// Will both indicate a NULL and a Smi.
- __ SmiTst(a3, at);
+ __ SmiTst(t0, at);
__ Assert(ne, kUnexpectedInitialMapForArrayFunction,
at, Operand(zero_reg));
- __ GetObjectType(a3, a3, t0);
+ __ GetObjectType(t0, t0, t1);
__ Assert(eq, kUnexpectedInitialMapForArrayFunction,
- t0, Operand(MAP_TYPE));
+ t1, Operand(MAP_TYPE));
- // We should either have undefined in a2 or a valid cell.
- Label okay_here;
- Handle<Map> cell_map = masm->isolate()->factory()->cell_map();
- __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
- __ Branch(&okay_here, eq, a2, Operand(at));
- __ lw(a3, FieldMemOperand(a2, 0));
- __ Assert(eq, kExpectedPropertyCellInRegisterA2,
- a3, Operand(cell_map));
- __ bind(&okay_here);
+ // We should either have undefined in a2 or a valid AllocationSite
+ __ AssertUndefinedOrAllocationSite(a2, t0);
}
Label no_info;
// Get the elements kind and case on that.
__ LoadRoot(at, Heap::kUndefinedValueRootIndex);
__ Branch(&no_info, eq, a2, Operand(at));
- __ lw(a3, FieldMemOperand(a2, Cell::kValueOffset));
-
- // If the type cell is undefined, or contains anything other than an
- // AllocationSite, call an array constructor that doesn't use AllocationSites.
- __ lw(t0, FieldMemOperand(a3, 0));
- __ LoadRoot(at, Heap::kAllocationSiteMapRootIndex);
- __ Branch(&no_info, ne, t0, Operand(at));
- __ lw(a3, FieldMemOperand(a3, AllocationSite::kTransitionInfoOffset));
+ __ lw(a3, FieldMemOperand(a2, AllocationSite::kTransitionInfoOffset));
__ SmiUntag(a3);
STATIC_ASSERT(AllocationSite::ElementsKindBits::kShift == 0);
__ And(a3, a3, Operand(AllocationSite::ElementsKindBits::kMask));
@@ -6099,34 +5203,25 @@ void ArrayConstructorStub::Generate(MacroAssembler* masm) {
void InternalArrayConstructorStub::GenerateCase(
MacroAssembler* masm, ElementsKind kind) {
- Label not_zero_case, not_one_case;
- Label normal_sequence;
- __ Branch(&not_zero_case, ne, a0, Operand(zero_reg));
- InternalArrayNoArgumentConstructorStub stub0(kind);
- __ TailCallStub(&stub0);
+ InternalArrayNoArgumentConstructorStub stub0(isolate(), kind);
+ __ TailCallStub(&stub0, lo, a0, Operand(1));
- __ bind(&not_zero_case);
- __ Branch(&not_one_case, gt, a0, Operand(1));
+ InternalArrayNArgumentsConstructorStub stubN(isolate(), kind);
+ __ TailCallStub(&stubN, hi, a0, Operand(1));
if (IsFastPackedElementsKind(kind)) {
// We might need to create a holey array
// look at the first argument.
__ lw(at, MemOperand(sp, 0));
- __ Branch(&normal_sequence, eq, at, Operand(zero_reg));
InternalArraySingleArgumentConstructorStub
- stub1_holey(GetHoleyElementsKind(kind));
- __ TailCallStub(&stub1_holey);
+ stub1_holey(isolate(), GetHoleyElementsKind(kind));
+ __ TailCallStub(&stub1_holey, ne, at, Operand(zero_reg));
}
- __ bind(&normal_sequence);
- InternalArraySingleArgumentConstructorStub stub1(kind);
+ InternalArraySingleArgumentConstructorStub stub1(isolate(), kind);
__ TailCallStub(&stub1);
-
- __ bind(&not_one_case);
- InternalArrayNArgumentsConstructorStub stubN(kind);
- __ TailCallStub(&stubN);
}
@@ -6160,7 +5255,7 @@ void InternalArrayConstructorStub::Generate(MacroAssembler* masm) {
// but the following bit field extraction takes care of that anyway.
__ lbu(a3, FieldMemOperand(a3, Map::kBitField2Offset));
// Retrieve elements_kind from bit field 2.
- __ Ext(a3, a3, Map::kElementsKindShift, Map::kElementsKindBitCount);
+ __ DecodeField<Map::ElementsKindBits>(a3);
if (FLAG_debug_code) {
Label done;
@@ -6180,6 +5275,140 @@ void InternalArrayConstructorStub::Generate(MacroAssembler* masm) {
}
+void CallApiFunctionStub::Generate(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- a0 : callee
+ // -- t0 : call_data
+ // -- a2 : holder
+ // -- a1 : api_function_address
+ // -- cp : context
+ // --
+ // -- sp[0] : last argument
+ // -- ...
+ // -- sp[(argc - 1)* 4] : first argument
+ // -- sp[argc * 4] : receiver
+ // -----------------------------------
+
+ Register callee = a0;
+ Register call_data = t0;
+ Register holder = a2;
+ Register api_function_address = a1;
+ Register context = cp;
+
+ int argc = ArgumentBits::decode(bit_field_);
+ bool is_store = IsStoreBits::decode(bit_field_);
+ bool call_data_undefined = CallDataUndefinedBits::decode(bit_field_);
+
+ typedef FunctionCallbackArguments FCA;
+
+ STATIC_ASSERT(FCA::kContextSaveIndex == 6);
+ STATIC_ASSERT(FCA::kCalleeIndex == 5);
+ STATIC_ASSERT(FCA::kDataIndex == 4);
+ STATIC_ASSERT(FCA::kReturnValueOffset == 3);
+ STATIC_ASSERT(FCA::kReturnValueDefaultValueIndex == 2);
+ STATIC_ASSERT(FCA::kIsolateIndex == 1);
+ STATIC_ASSERT(FCA::kHolderIndex == 0);
+ STATIC_ASSERT(FCA::kArgsLength == 7);
+
+ // Save context, callee and call data.
+ __ Push(context, callee, call_data);
+ // Load context from callee.
+ __ lw(context, FieldMemOperand(callee, JSFunction::kContextOffset));
+
+ Register scratch = call_data;
+ if (!call_data_undefined) {
+ __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
+ }
+ // Push return value and default return value.
+ __ Push(scratch, scratch);
+ __ li(scratch,
+ Operand(ExternalReference::isolate_address(isolate())));
+ // Push isolate and holder.
+ __ Push(scratch, holder);
+
+ // Prepare arguments.
+ __ mov(scratch, sp);
+
+ // Allocate the v8::Arguments structure in the arguments' space since
+ // it's not controlled by GC.
+ const int kApiStackSpace = 4;
+
+ FrameScope frame_scope(masm, StackFrame::MANUAL);
+ __ EnterExitFrame(false, kApiStackSpace);
+
+ ASSERT(!api_function_address.is(a0) && !scratch.is(a0));
+ // a0 = FunctionCallbackInfo&
+ // Arguments is after the return address.
+ __ Addu(a0, sp, Operand(1 * kPointerSize));
+ // FunctionCallbackInfo::implicit_args_
+ __ sw(scratch, MemOperand(a0, 0 * kPointerSize));
+ // FunctionCallbackInfo::values_
+ __ Addu(at, scratch, Operand((FCA::kArgsLength - 1 + argc) * kPointerSize));
+ __ sw(at, MemOperand(a0, 1 * kPointerSize));
+ // FunctionCallbackInfo::length_ = argc
+ __ li(at, Operand(argc));
+ __ sw(at, MemOperand(a0, 2 * kPointerSize));
+ // FunctionCallbackInfo::is_construct_call = 0
+ __ sw(zero_reg, MemOperand(a0, 3 * kPointerSize));
+
+ const int kStackUnwindSpace = argc + FCA::kArgsLength + 1;
+ ExternalReference thunk_ref =
+ ExternalReference::invoke_function_callback(isolate());
+
+ AllowExternalCallThatCantCauseGC scope(masm);
+ MemOperand context_restore_operand(
+ fp, (2 + FCA::kContextSaveIndex) * kPointerSize);
+ // Stores return the first js argument.
+ int return_value_offset = 0;
+ if (is_store) {
+ return_value_offset = 2 + FCA::kArgsLength;
+ } else {
+ return_value_offset = 2 + FCA::kReturnValueOffset;
+ }
+ MemOperand return_value_operand(fp, return_value_offset * kPointerSize);
+
+ __ CallApiFunctionAndReturn(api_function_address,
+ thunk_ref,
+ kStackUnwindSpace,
+ return_value_operand,
+ &context_restore_operand);
+}
+
+
+void CallApiGetterStub::Generate(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- sp[0] : name
+ // -- sp[4 - kArgsLength*4] : PropertyCallbackArguments object
+ // -- ...
+ // -- a2 : api_function_address
+ // -----------------------------------
+
+ Register api_function_address = a2;
+
+ __ mov(a0, sp); // a0 = Handle<Name>
+ __ Addu(a1, a0, Operand(1 * kPointerSize)); // a1 = PCA
+
+ const int kApiStackSpace = 1;
+ FrameScope frame_scope(masm, StackFrame::MANUAL);
+ __ EnterExitFrame(false, kApiStackSpace);
+
+ // Create PropertyAccessorInfo instance on the stack above the exit frame with
+ // a1 (internal::Object** args_) as the data.
+ __ sw(a1, MemOperand(sp, 1 * kPointerSize));
+ __ Addu(a1, sp, Operand(1 * kPointerSize)); // a1 = AccessorInfo&
+
+ const int kStackUnwindSpace = PropertyCallbackArguments::kArgsLength + 1;
+
+ ExternalReference thunk_ref =
+ ExternalReference::invoke_accessor_getter_callback(isolate());
+ __ CallApiFunctionAndReturn(api_function_address,
+ thunk_ref,
+ kStackUnwindSpace,
+ MemOperand(fp, 6 * kPointerSize),
+ NULL);
+}
+
+
#undef __
} } // namespace v8::internal
diff --git a/chromium/v8/src/mips/code-stubs-mips.h b/chromium/v8/src/mips/code-stubs-mips.h
index c3e05b8a2d4..3e0eaa160e0 100644
--- a/chromium/v8/src/mips/code-stubs-mips.h
+++ b/chromium/v8/src/mips/code-stubs-mips.h
@@ -1,34 +1,11 @@
// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
#ifndef V8_MIPS_CODE_STUBS_ARM_H_
#define V8_MIPS_CODE_STUBS_ARM_H_
-#include "ic-inl.h"
+#include "src/ic-inl.h"
namespace v8 {
@@ -38,34 +15,10 @@ namespace internal {
void ArrayNativeCode(MacroAssembler* masm, Label* call_generic_code);
-// Compute a transcendental math function natively, or call the
-// TranscendentalCache runtime function.
-class TranscendentalCacheStub: public PlatformCodeStub {
- public:
- enum ArgumentType {
- TAGGED = 0 << TranscendentalCache::kTranscendentalTypeBits,
- UNTAGGED = 1 << TranscendentalCache::kTranscendentalTypeBits
- };
-
- TranscendentalCacheStub(TranscendentalCache::Type type,
- ArgumentType argument_type)
- : type_(type), argument_type_(argument_type) { }
- void Generate(MacroAssembler* masm);
- private:
- TranscendentalCache::Type type_;
- ArgumentType argument_type_;
- void GenerateCallCFunction(MacroAssembler* masm, Register scratch);
-
- Major MajorKey() { return TranscendentalCache; }
- int MinorKey() { return type_ | argument_type_; }
- Runtime::FunctionId RuntimeFunction();
-};
-
-
class StoreBufferOverflowStub: public PlatformCodeStub {
public:
- explicit StoreBufferOverflowStub(SaveFPRegsMode save_fp)
- : save_doubles_(save_fp) {}
+ StoreBufferOverflowStub(Isolate* isolate, SaveFPRegsMode save_fp)
+ : PlatformCodeStub(isolate), save_doubles_(save_fp) {}
void Generate(MacroAssembler* masm);
@@ -82,50 +35,17 @@ class StoreBufferOverflowStub: public PlatformCodeStub {
class StringHelper : public AllStatic {
public:
- // Generate code for copying characters using a simple loop. This should only
- // be used in places where the number of characters is small and the
- // additional setup and checking in GenerateCopyCharactersLong adds too much
- // overhead. Copying of overlapping regions is not supported.
+ // Generate code for copying a large number of characters. This function
+ // is allowed to spend extra time setting up conditions to make copying
+ // faster. Copying of overlapping regions is not supported.
// Dest register ends at the position after the last character written.
static void GenerateCopyCharacters(MacroAssembler* masm,
Register dest,
Register src,
Register count,
Register scratch,
- bool ascii);
+ String::Encoding encoding);
- // Generate code for copying a large number of characters. This function
- // is allowed to spend extra time setting up conditions to make copying
- // faster. Copying of overlapping regions is not supported.
- // Dest register ends at the position after the last character written.
- static void GenerateCopyCharactersLong(MacroAssembler* masm,
- Register dest,
- Register src,
- Register count,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Register scratch4,
- Register scratch5,
- int flags);
-
-
- // Probe the string table for a two character string. If the string is
- // not found by probing a jump to the label not_found is performed. This jump
- // does not guarantee that the string is not in the string table. If the
- // string is found the code falls through with the string in register r0.
- // Contents of both c1 and c2 registers are modified. At the exit c1 is
- // guaranteed to contain halfword with low and high bytes equal to
- // initial contents of c1 and c2 respectively.
- static void GenerateTwoCharacterStringTableProbe(MacroAssembler* masm,
- Register c1,
- Register c2,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Register scratch4,
- Register scratch5,
- Label* not_found);
// Generate string hash.
static void GenerateHashInit(MacroAssembler* masm,
@@ -144,47 +64,48 @@ class StringHelper : public AllStatic {
};
-class StringAddStub: public PlatformCodeStub {
+class SubStringStub: public PlatformCodeStub {
public:
- explicit StringAddStub(StringAddFlags flags) : flags_(flags) {}
+ explicit SubStringStub(Isolate* isolate) : PlatformCodeStub(isolate) {}
private:
- Major MajorKey() { return StringAdd; }
- int MinorKey() { return flags_; }
+ Major MajorKey() { return SubString; }
+ int MinorKey() { return 0; }
void Generate(MacroAssembler* masm);
+};
- void GenerateConvertArgument(MacroAssembler* masm,
- int stack_offset,
- Register arg,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Register scratch4,
- Label* slow);
+class StoreRegistersStateStub: public PlatformCodeStub {
+ public:
+ explicit StoreRegistersStateStub(Isolate* isolate, SaveFPRegsMode with_fp)
+ : PlatformCodeStub(isolate), save_doubles_(with_fp) {}
- void GenerateRegisterArgsPush(MacroAssembler* masm);
- void GenerateRegisterArgsPop(MacroAssembler* masm);
+ static void GenerateAheadOfTime(Isolate* isolate);
+ private:
+ Major MajorKey() { return StoreRegistersState; }
+ int MinorKey() { return (save_doubles_ == kSaveFPRegs) ? 1 : 0; }
+ SaveFPRegsMode save_doubles_;
- const StringAddFlags flags_;
+ void Generate(MacroAssembler* masm);
};
-
-class SubStringStub: public PlatformCodeStub {
+class RestoreRegistersStateStub: public PlatformCodeStub {
public:
- SubStringStub() {}
+ explicit RestoreRegistersStateStub(Isolate* isolate, SaveFPRegsMode with_fp)
+ : PlatformCodeStub(isolate), save_doubles_(with_fp) {}
+ static void GenerateAheadOfTime(Isolate* isolate);
private:
- Major MajorKey() { return SubString; }
- int MinorKey() { return 0; }
+ Major MajorKey() { return RestoreRegistersState; }
+ int MinorKey() { return (save_doubles_ == kSaveFPRegs) ? 1 : 0; }
+ SaveFPRegsMode save_doubles_;
void Generate(MacroAssembler* masm);
};
-
class StringCompareStub: public PlatformCodeStub {
public:
- StringCompareStub() { }
+ explicit StringCompareStub(Isolate* isolate) : PlatformCodeStub(isolate) { }
// Compare two flat ASCII strings and returns result in v0.
static void GenerateCompareFlatAsciiStrings(MacroAssembler* masm,
@@ -225,11 +146,13 @@ class StringCompareStub: public PlatformCodeStub {
// so you don't have to set up the frame.
class WriteInt32ToHeapNumberStub : public PlatformCodeStub {
public:
- WriteInt32ToHeapNumberStub(Register the_int,
+ WriteInt32ToHeapNumberStub(Isolate* isolate,
+ Register the_int,
Register the_heap_number,
Register scratch,
Register scratch2)
- : the_int_(the_int),
+ : PlatformCodeStub(isolate),
+ the_int_(the_int),
the_heap_number_(the_heap_number),
scratch_(scratch),
sign_(scratch2) {
@@ -268,12 +191,14 @@ class WriteInt32ToHeapNumberStub : public PlatformCodeStub {
class RecordWriteStub: public PlatformCodeStub {
public:
- RecordWriteStub(Register object,
+ RecordWriteStub(Isolate* isolate,
+ Register object,
Register value,
Register address,
RememberedSetAction remembered_set_action,
SaveFPRegsMode fp_mode)
- : object_(object),
+ : PlatformCodeStub(isolate),
+ object_(object),
value_(value),
address_(address),
remembered_set_action_(remembered_set_action),
@@ -419,7 +344,7 @@ class RecordWriteStub: public PlatformCodeStub {
MacroAssembler* masm,
OnNoNeedToInformIncrementalMarker on_no_need,
Mode mode);
- void InformIncrementalMarker(MacroAssembler* masm, Mode mode);
+ void InformIncrementalMarker(MacroAssembler* masm);
Major MajorKey() { return RecordWrite; }
@@ -458,7 +383,7 @@ class RecordWriteStub: public PlatformCodeStub {
// moved by GC
class DirectCEntryStub: public PlatformCodeStub {
public:
- DirectCEntryStub() {}
+ explicit DirectCEntryStub(Isolate* isolate) : PlatformCodeStub(isolate) {}
void Generate(MacroAssembler* masm);
void GenerateCall(MacroAssembler* masm, Register target);
@@ -474,7 +399,8 @@ class NameDictionaryLookupStub: public PlatformCodeStub {
public:
enum LookupMode { POSITIVE_LOOKUP, NEGATIVE_LOOKUP };
- explicit NameDictionaryLookupStub(LookupMode mode) : mode_(mode) { }
+ NameDictionaryLookupStub(Isolate* isolate, LookupMode mode)
+ : PlatformCodeStub(isolate), mode_(mode) { }
void Generate(MacroAssembler* masm);
diff --git a/chromium/v8/src/mips/codegen-mips.cc b/chromium/v8/src/mips/codegen-mips.cc
index 3a87c5af886..5d613d0fb0a 100644
--- a/chromium/v8/src/mips/codegen-mips.cc
+++ b/chromium/v8/src/mips/codegen-mips.cc
@@ -1,54 +1,19 @@
// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
#if V8_TARGET_ARCH_MIPS
-#include "codegen.h"
-#include "macro-assembler.h"
-#include "simulator-mips.h"
+#include "src/codegen.h"
+#include "src/macro-assembler.h"
+#include "src/mips/simulator-mips.h"
namespace v8 {
namespace internal {
-UnaryMathFunction CreateTranscendentalFunction(TranscendentalCache::Type type) {
- switch (type) {
- case TranscendentalCache::SIN: return &sin;
- case TranscendentalCache::COS: return &cos;
- case TranscendentalCache::TAN: return &tan;
- case TranscendentalCache::LOG: return &log;
- default: UNIMPLEMENTED();
- }
- return NULL;
-}
-
-
#define __ masm.
@@ -62,10 +27,10 @@ double fast_exp_simulator(double x) {
UnaryMathFunction CreateExpFunction() {
- if (!FLAG_fast_math) return &exp;
+ if (!FLAG_fast_math) return &std::exp;
size_t actual_size;
byte* buffer = static_cast<byte*>(OS::Allocate(1 * KB, &actual_size, true));
- if (buffer == NULL) return &exp;
+ if (buffer == NULL) return &std::exp;
ExternalReference::InitializeMathExpData();
MacroAssembler masm(NULL, buffer, static_cast<int>(actual_size));
@@ -79,21 +44,13 @@ UnaryMathFunction CreateExpFunction() {
Register temp2 = t1;
Register temp3 = t2;
- if (!IsMipsSoftFloatABI) {
- // Input value is in f12 anyway, nothing to do.
- } else {
- __ Move(input, a0, a1);
- }
+ __ MovFromFloatParameter(input);
__ Push(temp3, temp2, temp1);
MathExpGenerator::EmitMathExp(
&masm, input, result, double_scratch1, double_scratch2,
temp1, temp2, temp3);
__ Pop(temp3, temp2, temp1);
- if (!IsMipsSoftFloatABI) {
- // Result is already in f0, nothing to do.
- } else {
- __ Move(v0, v1, result);
- }
+ __ MovToFloatResult(result);
__ Ret();
}
@@ -113,13 +70,564 @@ UnaryMathFunction CreateExpFunction() {
}
-#undef __
+#if defined(V8_HOST_ARCH_MIPS)
+MemCopyUint8Function CreateMemCopyUint8Function(MemCopyUint8Function stub) {
+#if defined(USE_SIMULATOR)
+ return stub;
+#else
+ size_t actual_size;
+ byte* buffer = static_cast<byte*>(OS::Allocate(3 * KB, &actual_size, true));
+ if (buffer == NULL) return stub;
+
+ // This code assumes that cache lines are 32 bytes and if the cache line is
+ // larger it will not work correctly.
+ MacroAssembler masm(NULL, buffer, static_cast<int>(actual_size));
+
+ {
+ Label lastb, unaligned, aligned, chkw,
+ loop16w, chk1w, wordCopy_loop, skip_pref, lastbloop,
+ leave, ua_chk16w, ua_loop16w, ua_skip_pref, ua_chkw,
+ ua_chk1w, ua_wordCopy_loop, ua_smallCopy, ua_smallCopy_loop;
+
+ // The size of each prefetch.
+ uint32_t pref_chunk = 32;
+ // The maximum size of a prefetch, it must not be less then pref_chunk.
+ // If the real size of a prefetch is greater then max_pref_size and
+ // the kPrefHintPrepareForStore hint is used, the code will not work
+ // correctly.
+ uint32_t max_pref_size = 128;
+ ASSERT(pref_chunk < max_pref_size);
+
+ // pref_limit is set based on the fact that we never use an offset
+ // greater then 5 on a store pref and that a single pref can
+ // never be larger then max_pref_size.
+ uint32_t pref_limit = (5 * pref_chunk) + max_pref_size;
+ int32_t pref_hint_load = kPrefHintLoadStreamed;
+ int32_t pref_hint_store = kPrefHintPrepareForStore;
+ uint32_t loadstore_chunk = 4;
+
+ // The initial prefetches may fetch bytes that are before the buffer being
+ // copied. Start copies with an offset of 4 so avoid this situation when
+ // using kPrefHintPrepareForStore.
+ ASSERT(pref_hint_store != kPrefHintPrepareForStore ||
+ pref_chunk * 4 >= max_pref_size);
+
+ // If the size is less than 8, go to lastb. Regardless of size,
+ // copy dst pointer to v0 for the retuen value.
+ __ slti(t2, a2, 2 * loadstore_chunk);
+ __ bne(t2, zero_reg, &lastb);
+ __ mov(v0, a0); // In delay slot.
+
+ // If src and dst have different alignments, go to unaligned, if they
+ // have the same alignment (but are not actually aligned) do a partial
+ // load/store to make them aligned. If they are both already aligned
+ // we can start copying at aligned.
+ __ xor_(t8, a1, a0);
+ __ andi(t8, t8, loadstore_chunk - 1); // t8 is a0/a1 word-displacement.
+ __ bne(t8, zero_reg, &unaligned);
+ __ subu(a3, zero_reg, a0); // In delay slot.
+
+ __ andi(a3, a3, loadstore_chunk - 1); // Copy a3 bytes to align a0/a1.
+ __ beq(a3, zero_reg, &aligned); // Already aligned.
+ __ subu(a2, a2, a3); // In delay slot. a2 is the remining bytes count.
+
+ if (kArchEndian == kLittle) {
+ __ lwr(t8, MemOperand(a1));
+ __ addu(a1, a1, a3);
+ __ swr(t8, MemOperand(a0));
+ __ addu(a0, a0, a3);
+ } else {
+ __ lwl(t8, MemOperand(a1));
+ __ addu(a1, a1, a3);
+ __ swl(t8, MemOperand(a0));
+ __ addu(a0, a0, a3);
+ }
+ // Now dst/src are both aligned to (word) aligned addresses. Set a2 to
+ // count how many bytes we have to copy after all the 64 byte chunks are
+ // copied and a3 to the dst pointer after all the 64 byte chunks have been
+ // copied. We will loop, incrementing a0 and a1 until a0 equals a3.
+ __ bind(&aligned);
+ __ andi(t8, a2, 0x3f);
+ __ beq(a2, t8, &chkw); // Less than 64?
+ __ subu(a3, a2, t8); // In delay slot.
+ __ addu(a3, a0, a3); // Now a3 is the final dst after loop.
+
+ // When in the loop we prefetch with kPrefHintPrepareForStore hint,
+ // in this case the a0+x should be past the "t0-32" address. This means:
+ // for x=128 the last "safe" a0 address is "t0-160". Alternatively, for
+ // x=64 the last "safe" a0 address is "t0-96". In the current version we
+ // will use "pref hint, 128(a0)", so "t0-160" is the limit.
+ if (pref_hint_store == kPrefHintPrepareForStore) {
+ __ addu(t0, a0, a2); // t0 is the "past the end" address.
+ __ Subu(t9, t0, pref_limit); // t9 is the "last safe pref" address.
+ }
+
+ __ Pref(pref_hint_load, MemOperand(a1, 0 * pref_chunk));
+ __ Pref(pref_hint_load, MemOperand(a1, 1 * pref_chunk));
+ __ Pref(pref_hint_load, MemOperand(a1, 2 * pref_chunk));
+ __ Pref(pref_hint_load, MemOperand(a1, 3 * pref_chunk));
+
+ if (pref_hint_store != kPrefHintPrepareForStore) {
+ __ Pref(pref_hint_store, MemOperand(a0, 1 * pref_chunk));
+ __ Pref(pref_hint_store, MemOperand(a0, 2 * pref_chunk));
+ __ Pref(pref_hint_store, MemOperand(a0, 3 * pref_chunk));
+ }
+ __ bind(&loop16w);
+ __ lw(t0, MemOperand(a1));
+
+ if (pref_hint_store == kPrefHintPrepareForStore) {
+ __ sltu(v1, t9, a0); // If a0 > t9, don't use next prefetch.
+ __ Branch(USE_DELAY_SLOT, &skip_pref, gt, v1, Operand(zero_reg));
+ }
+ __ lw(t1, MemOperand(a1, 1, loadstore_chunk)); // Maybe in delay slot.
+
+ __ Pref(pref_hint_store, MemOperand(a0, 4 * pref_chunk));
+ __ Pref(pref_hint_store, MemOperand(a0, 5 * pref_chunk));
+
+ __ bind(&skip_pref);
+ __ lw(t2, MemOperand(a1, 2, loadstore_chunk));
+ __ lw(t3, MemOperand(a1, 3, loadstore_chunk));
+ __ lw(t4, MemOperand(a1, 4, loadstore_chunk));
+ __ lw(t5, MemOperand(a1, 5, loadstore_chunk));
+ __ lw(t6, MemOperand(a1, 6, loadstore_chunk));
+ __ lw(t7, MemOperand(a1, 7, loadstore_chunk));
+ __ Pref(pref_hint_load, MemOperand(a1, 4 * pref_chunk));
+
+ __ sw(t0, MemOperand(a0));
+ __ sw(t1, MemOperand(a0, 1, loadstore_chunk));
+ __ sw(t2, MemOperand(a0, 2, loadstore_chunk));
+ __ sw(t3, MemOperand(a0, 3, loadstore_chunk));
+ __ sw(t4, MemOperand(a0, 4, loadstore_chunk));
+ __ sw(t5, MemOperand(a0, 5, loadstore_chunk));
+ __ sw(t6, MemOperand(a0, 6, loadstore_chunk));
+ __ sw(t7, MemOperand(a0, 7, loadstore_chunk));
+
+ __ lw(t0, MemOperand(a1, 8, loadstore_chunk));
+ __ lw(t1, MemOperand(a1, 9, loadstore_chunk));
+ __ lw(t2, MemOperand(a1, 10, loadstore_chunk));
+ __ lw(t3, MemOperand(a1, 11, loadstore_chunk));
+ __ lw(t4, MemOperand(a1, 12, loadstore_chunk));
+ __ lw(t5, MemOperand(a1, 13, loadstore_chunk));
+ __ lw(t6, MemOperand(a1, 14, loadstore_chunk));
+ __ lw(t7, MemOperand(a1, 15, loadstore_chunk));
+ __ Pref(pref_hint_load, MemOperand(a1, 5 * pref_chunk));
+
+ __ sw(t0, MemOperand(a0, 8, loadstore_chunk));
+ __ sw(t1, MemOperand(a0, 9, loadstore_chunk));
+ __ sw(t2, MemOperand(a0, 10, loadstore_chunk));
+ __ sw(t3, MemOperand(a0, 11, loadstore_chunk));
+ __ sw(t4, MemOperand(a0, 12, loadstore_chunk));
+ __ sw(t5, MemOperand(a0, 13, loadstore_chunk));
+ __ sw(t6, MemOperand(a0, 14, loadstore_chunk));
+ __ sw(t7, MemOperand(a0, 15, loadstore_chunk));
+ __ addiu(a0, a0, 16 * loadstore_chunk);
+ __ bne(a0, a3, &loop16w);
+ __ addiu(a1, a1, 16 * loadstore_chunk); // In delay slot.
+ __ mov(a2, t8);
+
+ // Here we have src and dest word-aligned but less than 64-bytes to go.
+ // Check for a 32 bytes chunk and copy if there is one. Otherwise jump
+ // down to chk1w to handle the tail end of the copy.
+ __ bind(&chkw);
+ __ Pref(pref_hint_load, MemOperand(a1, 0 * pref_chunk));
+ __ andi(t8, a2, 0x1f);
+ __ beq(a2, t8, &chk1w); // Less than 32?
+ __ nop(); // In delay slot.
+ __ lw(t0, MemOperand(a1));
+ __ lw(t1, MemOperand(a1, 1, loadstore_chunk));
+ __ lw(t2, MemOperand(a1, 2, loadstore_chunk));
+ __ lw(t3, MemOperand(a1, 3, loadstore_chunk));
+ __ lw(t4, MemOperand(a1, 4, loadstore_chunk));
+ __ lw(t5, MemOperand(a1, 5, loadstore_chunk));
+ __ lw(t6, MemOperand(a1, 6, loadstore_chunk));
+ __ lw(t7, MemOperand(a1, 7, loadstore_chunk));
+ __ addiu(a1, a1, 8 * loadstore_chunk);
+ __ sw(t0, MemOperand(a0));
+ __ sw(t1, MemOperand(a0, 1, loadstore_chunk));
+ __ sw(t2, MemOperand(a0, 2, loadstore_chunk));
+ __ sw(t3, MemOperand(a0, 3, loadstore_chunk));
+ __ sw(t4, MemOperand(a0, 4, loadstore_chunk));
+ __ sw(t5, MemOperand(a0, 5, loadstore_chunk));
+ __ sw(t6, MemOperand(a0, 6, loadstore_chunk));
+ __ sw(t7, MemOperand(a0, 7, loadstore_chunk));
+ __ addiu(a0, a0, 8 * loadstore_chunk);
+
+ // Here we have less than 32 bytes to copy. Set up for a loop to copy
+ // one word at a time. Set a2 to count how many bytes we have to copy
+ // after all the word chunks are copied and a3 to the dst pointer after
+ // all the word chunks have been copied. We will loop, incrementing a0
+ // and a1 untill a0 equals a3.
+ __ bind(&chk1w);
+ __ andi(a2, t8, loadstore_chunk - 1);
+ __ beq(a2, t8, &lastb);
+ __ subu(a3, t8, a2); // In delay slot.
+ __ addu(a3, a0, a3);
+
+ __ bind(&wordCopy_loop);
+ __ lw(t3, MemOperand(a1));
+ __ addiu(a0, a0, loadstore_chunk);
+ __ addiu(a1, a1, loadstore_chunk);
+ __ bne(a0, a3, &wordCopy_loop);
+ __ sw(t3, MemOperand(a0, -1, loadstore_chunk)); // In delay slot.
+
+ __ bind(&lastb);
+ __ Branch(&leave, le, a2, Operand(zero_reg));
+ __ addu(a3, a0, a2);
+
+ __ bind(&lastbloop);
+ __ lb(v1, MemOperand(a1));
+ __ addiu(a0, a0, 1);
+ __ addiu(a1, a1, 1);
+ __ bne(a0, a3, &lastbloop);
+ __ sb(v1, MemOperand(a0, -1)); // In delay slot.
+
+ __ bind(&leave);
+ __ jr(ra);
+ __ nop();
+
+ // Unaligned case. Only the dst gets aligned so we need to do partial
+ // loads of the source followed by normal stores to the dst (once we
+ // have aligned the destination).
+ __ bind(&unaligned);
+ __ andi(a3, a3, loadstore_chunk - 1); // Copy a3 bytes to align a0/a1.
+ __ beq(a3, zero_reg, &ua_chk16w);
+ __ subu(a2, a2, a3); // In delay slot.
+
+ if (kArchEndian == kLittle) {
+ __ lwr(v1, MemOperand(a1));
+ __ lwl(v1,
+ MemOperand(a1, 1, loadstore_chunk, MemOperand::offset_minus_one));
+ __ addu(a1, a1, a3);
+ __ swr(v1, MemOperand(a0));
+ __ addu(a0, a0, a3);
+ } else {
+ __ lwl(v1, MemOperand(a1));
+ __ lwr(v1,
+ MemOperand(a1, 1, loadstore_chunk, MemOperand::offset_minus_one));
+ __ addu(a1, a1, a3);
+ __ swl(v1, MemOperand(a0));
+ __ addu(a0, a0, a3);
+ }
+
+ // Now the dst (but not the source) is aligned. Set a2 to count how many
+ // bytes we have to copy after all the 64 byte chunks are copied and a3 to
+ // the dst pointer after all the 64 byte chunks have been copied. We will
+ // loop, incrementing a0 and a1 until a0 equals a3.
+ __ bind(&ua_chk16w);
+ __ andi(t8, a2, 0x3f);
+ __ beq(a2, t8, &ua_chkw);
+ __ subu(a3, a2, t8); // In delay slot.
+ __ addu(a3, a0, a3);
+
+ if (pref_hint_store == kPrefHintPrepareForStore) {
+ __ addu(t0, a0, a2);
+ __ Subu(t9, t0, pref_limit);
+ }
+
+ __ Pref(pref_hint_load, MemOperand(a1, 0 * pref_chunk));
+ __ Pref(pref_hint_load, MemOperand(a1, 1 * pref_chunk));
+ __ Pref(pref_hint_load, MemOperand(a1, 2 * pref_chunk));
+
+ if (pref_hint_store != kPrefHintPrepareForStore) {
+ __ Pref(pref_hint_store, MemOperand(a0, 1 * pref_chunk));
+ __ Pref(pref_hint_store, MemOperand(a0, 2 * pref_chunk));
+ __ Pref(pref_hint_store, MemOperand(a0, 3 * pref_chunk));
+ }
+
+ __ bind(&ua_loop16w);
+ __ Pref(pref_hint_load, MemOperand(a1, 3 * pref_chunk));
+ if (kArchEndian == kLittle) {
+ __ lwr(t0, MemOperand(a1));
+ __ lwr(t1, MemOperand(a1, 1, loadstore_chunk));
+ __ lwr(t2, MemOperand(a1, 2, loadstore_chunk));
+
+ if (pref_hint_store == kPrefHintPrepareForStore) {
+ __ sltu(v1, t9, a0);
+ __ Branch(USE_DELAY_SLOT, &ua_skip_pref, gt, v1, Operand(zero_reg));
+ }
+ __ lwr(t3, MemOperand(a1, 3, loadstore_chunk)); // Maybe in delay slot.
+
+ __ Pref(pref_hint_store, MemOperand(a0, 4 * pref_chunk));
+ __ Pref(pref_hint_store, MemOperand(a0, 5 * pref_chunk));
+
+ __ bind(&ua_skip_pref);
+ __ lwr(t4, MemOperand(a1, 4, loadstore_chunk));
+ __ lwr(t5, MemOperand(a1, 5, loadstore_chunk));
+ __ lwr(t6, MemOperand(a1, 6, loadstore_chunk));
+ __ lwr(t7, MemOperand(a1, 7, loadstore_chunk));
+ __ lwl(t0,
+ MemOperand(a1, 1, loadstore_chunk, MemOperand::offset_minus_one));
+ __ lwl(t1,
+ MemOperand(a1, 2, loadstore_chunk, MemOperand::offset_minus_one));
+ __ lwl(t2,
+ MemOperand(a1, 3, loadstore_chunk, MemOperand::offset_minus_one));
+ __ lwl(t3,
+ MemOperand(a1, 4, loadstore_chunk, MemOperand::offset_minus_one));
+ __ lwl(t4,
+ MemOperand(a1, 5, loadstore_chunk, MemOperand::offset_minus_one));
+ __ lwl(t5,
+ MemOperand(a1, 6, loadstore_chunk, MemOperand::offset_minus_one));
+ __ lwl(t6,
+ MemOperand(a1, 7, loadstore_chunk, MemOperand::offset_minus_one));
+ __ lwl(t7,
+ MemOperand(a1, 8, loadstore_chunk, MemOperand::offset_minus_one));
+ } else {
+ __ lwl(t0, MemOperand(a1));
+ __ lwl(t1, MemOperand(a1, 1, loadstore_chunk));
+ __ lwl(t2, MemOperand(a1, 2, loadstore_chunk));
+
+ if (pref_hint_store == kPrefHintPrepareForStore) {
+ __ sltu(v1, t9, a0);
+ __ Branch(USE_DELAY_SLOT, &ua_skip_pref, gt, v1, Operand(zero_reg));
+ }
+ __ lwl(t3, MemOperand(a1, 3, loadstore_chunk)); // Maybe in delay slot.
+
+ __ Pref(pref_hint_store, MemOperand(a0, 4 * pref_chunk));
+ __ Pref(pref_hint_store, MemOperand(a0, 5 * pref_chunk));
+
+ __ bind(&ua_skip_pref);
+ __ lwl(t4, MemOperand(a1, 4, loadstore_chunk));
+ __ lwl(t5, MemOperand(a1, 5, loadstore_chunk));
+ __ lwl(t6, MemOperand(a1, 6, loadstore_chunk));
+ __ lwl(t7, MemOperand(a1, 7, loadstore_chunk));
+ __ lwr(t0,
+ MemOperand(a1, 1, loadstore_chunk, MemOperand::offset_minus_one));
+ __ lwr(t1,
+ MemOperand(a1, 2, loadstore_chunk, MemOperand::offset_minus_one));
+ __ lwr(t2,
+ MemOperand(a1, 3, loadstore_chunk, MemOperand::offset_minus_one));
+ __ lwr(t3,
+ MemOperand(a1, 4, loadstore_chunk, MemOperand::offset_minus_one));
+ __ lwr(t4,
+ MemOperand(a1, 5, loadstore_chunk, MemOperand::offset_minus_one));
+ __ lwr(t5,
+ MemOperand(a1, 6, loadstore_chunk, MemOperand::offset_minus_one));
+ __ lwr(t6,
+ MemOperand(a1, 7, loadstore_chunk, MemOperand::offset_minus_one));
+ __ lwr(t7,
+ MemOperand(a1, 8, loadstore_chunk, MemOperand::offset_minus_one));
+ }
+ __ Pref(pref_hint_load, MemOperand(a1, 4 * pref_chunk));
+ __ sw(t0, MemOperand(a0));
+ __ sw(t1, MemOperand(a0, 1, loadstore_chunk));
+ __ sw(t2, MemOperand(a0, 2, loadstore_chunk));
+ __ sw(t3, MemOperand(a0, 3, loadstore_chunk));
+ __ sw(t4, MemOperand(a0, 4, loadstore_chunk));
+ __ sw(t5, MemOperand(a0, 5, loadstore_chunk));
+ __ sw(t6, MemOperand(a0, 6, loadstore_chunk));
+ __ sw(t7, MemOperand(a0, 7, loadstore_chunk));
+ if (kArchEndian == kLittle) {
+ __ lwr(t0, MemOperand(a1, 8, loadstore_chunk));
+ __ lwr(t1, MemOperand(a1, 9, loadstore_chunk));
+ __ lwr(t2, MemOperand(a1, 10, loadstore_chunk));
+ __ lwr(t3, MemOperand(a1, 11, loadstore_chunk));
+ __ lwr(t4, MemOperand(a1, 12, loadstore_chunk));
+ __ lwr(t5, MemOperand(a1, 13, loadstore_chunk));
+ __ lwr(t6, MemOperand(a1, 14, loadstore_chunk));
+ __ lwr(t7, MemOperand(a1, 15, loadstore_chunk));
+ __ lwl(t0,
+ MemOperand(a1, 9, loadstore_chunk, MemOperand::offset_minus_one));
+ __ lwl(t1,
+ MemOperand(a1, 10, loadstore_chunk, MemOperand::offset_minus_one));
+ __ lwl(t2,
+ MemOperand(a1, 11, loadstore_chunk, MemOperand::offset_minus_one));
+ __ lwl(t3,
+ MemOperand(a1, 12, loadstore_chunk, MemOperand::offset_minus_one));
+ __ lwl(t4,
+ MemOperand(a1, 13, loadstore_chunk, MemOperand::offset_minus_one));
+ __ lwl(t5,
+ MemOperand(a1, 14, loadstore_chunk, MemOperand::offset_minus_one));
+ __ lwl(t6,
+ MemOperand(a1, 15, loadstore_chunk, MemOperand::offset_minus_one));
+ __ lwl(t7,
+ MemOperand(a1, 16, loadstore_chunk, MemOperand::offset_minus_one));
+ } else {
+ __ lwl(t0, MemOperand(a1, 8, loadstore_chunk));
+ __ lwl(t1, MemOperand(a1, 9, loadstore_chunk));
+ __ lwl(t2, MemOperand(a1, 10, loadstore_chunk));
+ __ lwl(t3, MemOperand(a1, 11, loadstore_chunk));
+ __ lwl(t4, MemOperand(a1, 12, loadstore_chunk));
+ __ lwl(t5, MemOperand(a1, 13, loadstore_chunk));
+ __ lwl(t6, MemOperand(a1, 14, loadstore_chunk));
+ __ lwl(t7, MemOperand(a1, 15, loadstore_chunk));
+ __ lwr(t0,
+ MemOperand(a1, 9, loadstore_chunk, MemOperand::offset_minus_one));
+ __ lwr(t1,
+ MemOperand(a1, 10, loadstore_chunk, MemOperand::offset_minus_one));
+ __ lwr(t2,
+ MemOperand(a1, 11, loadstore_chunk, MemOperand::offset_minus_one));
+ __ lwr(t3,
+ MemOperand(a1, 12, loadstore_chunk, MemOperand::offset_minus_one));
+ __ lwr(t4,
+ MemOperand(a1, 13, loadstore_chunk, MemOperand::offset_minus_one));
+ __ lwr(t5,
+ MemOperand(a1, 14, loadstore_chunk, MemOperand::offset_minus_one));
+ __ lwr(t6,
+ MemOperand(a1, 15, loadstore_chunk, MemOperand::offset_minus_one));
+ __ lwr(t7,
+ MemOperand(a1, 16, loadstore_chunk, MemOperand::offset_minus_one));
+ }
+ __ Pref(pref_hint_load, MemOperand(a1, 5 * pref_chunk));
+ __ sw(t0, MemOperand(a0, 8, loadstore_chunk));
+ __ sw(t1, MemOperand(a0, 9, loadstore_chunk));
+ __ sw(t2, MemOperand(a0, 10, loadstore_chunk));
+ __ sw(t3, MemOperand(a0, 11, loadstore_chunk));
+ __ sw(t4, MemOperand(a0, 12, loadstore_chunk));
+ __ sw(t5, MemOperand(a0, 13, loadstore_chunk));
+ __ sw(t6, MemOperand(a0, 14, loadstore_chunk));
+ __ sw(t7, MemOperand(a0, 15, loadstore_chunk));
+ __ addiu(a0, a0, 16 * loadstore_chunk);
+ __ bne(a0, a3, &ua_loop16w);
+ __ addiu(a1, a1, 16 * loadstore_chunk); // In delay slot.
+ __ mov(a2, t8);
+
+ // Here less than 64-bytes. Check for
+ // a 32 byte chunk and copy if there is one. Otherwise jump down to
+ // ua_chk1w to handle the tail end of the copy.
+ __ bind(&ua_chkw);
+ __ Pref(pref_hint_load, MemOperand(a1));
+ __ andi(t8, a2, 0x1f);
+
+ __ beq(a2, t8, &ua_chk1w);
+ __ nop(); // In delay slot.
+ if (kArchEndian == kLittle) {
+ __ lwr(t0, MemOperand(a1));
+ __ lwr(t1, MemOperand(a1, 1, loadstore_chunk));
+ __ lwr(t2, MemOperand(a1, 2, loadstore_chunk));
+ __ lwr(t3, MemOperand(a1, 3, loadstore_chunk));
+ __ lwr(t4, MemOperand(a1, 4, loadstore_chunk));
+ __ lwr(t5, MemOperand(a1, 5, loadstore_chunk));
+ __ lwr(t6, MemOperand(a1, 6, loadstore_chunk));
+ __ lwr(t7, MemOperand(a1, 7, loadstore_chunk));
+ __ lwl(t0,
+ MemOperand(a1, 1, loadstore_chunk, MemOperand::offset_minus_one));
+ __ lwl(t1,
+ MemOperand(a1, 2, loadstore_chunk, MemOperand::offset_minus_one));
+ __ lwl(t2,
+ MemOperand(a1, 3, loadstore_chunk, MemOperand::offset_minus_one));
+ __ lwl(t3,
+ MemOperand(a1, 4, loadstore_chunk, MemOperand::offset_minus_one));
+ __ lwl(t4,
+ MemOperand(a1, 5, loadstore_chunk, MemOperand::offset_minus_one));
+ __ lwl(t5,
+ MemOperand(a1, 6, loadstore_chunk, MemOperand::offset_minus_one));
+ __ lwl(t6,
+ MemOperand(a1, 7, loadstore_chunk, MemOperand::offset_minus_one));
+ __ lwl(t7,
+ MemOperand(a1, 8, loadstore_chunk, MemOperand::offset_minus_one));
+ } else {
+ __ lwl(t0, MemOperand(a1));
+ __ lwl(t1, MemOperand(a1, 1, loadstore_chunk));
+ __ lwl(t2, MemOperand(a1, 2, loadstore_chunk));
+ __ lwl(t3, MemOperand(a1, 3, loadstore_chunk));
+ __ lwl(t4, MemOperand(a1, 4, loadstore_chunk));
+ __ lwl(t5, MemOperand(a1, 5, loadstore_chunk));
+ __ lwl(t6, MemOperand(a1, 6, loadstore_chunk));
+ __ lwl(t7, MemOperand(a1, 7, loadstore_chunk));
+ __ lwr(t0,
+ MemOperand(a1, 1, loadstore_chunk, MemOperand::offset_minus_one));
+ __ lwr(t1,
+ MemOperand(a1, 2, loadstore_chunk, MemOperand::offset_minus_one));
+ __ lwr(t2,
+ MemOperand(a1, 3, loadstore_chunk, MemOperand::offset_minus_one));
+ __ lwr(t3,
+ MemOperand(a1, 4, loadstore_chunk, MemOperand::offset_minus_one));
+ __ lwr(t4,
+ MemOperand(a1, 5, loadstore_chunk, MemOperand::offset_minus_one));
+ __ lwr(t5,
+ MemOperand(a1, 6, loadstore_chunk, MemOperand::offset_minus_one));
+ __ lwr(t6,
+ MemOperand(a1, 7, loadstore_chunk, MemOperand::offset_minus_one));
+ __ lwr(t7,
+ MemOperand(a1, 8, loadstore_chunk, MemOperand::offset_minus_one));
+ }
+ __ addiu(a1, a1, 8 * loadstore_chunk);
+ __ sw(t0, MemOperand(a0));
+ __ sw(t1, MemOperand(a0, 1, loadstore_chunk));
+ __ sw(t2, MemOperand(a0, 2, loadstore_chunk));
+ __ sw(t3, MemOperand(a0, 3, loadstore_chunk));
+ __ sw(t4, MemOperand(a0, 4, loadstore_chunk));
+ __ sw(t5, MemOperand(a0, 5, loadstore_chunk));
+ __ sw(t6, MemOperand(a0, 6, loadstore_chunk));
+ __ sw(t7, MemOperand(a0, 7, loadstore_chunk));
+ __ addiu(a0, a0, 8 * loadstore_chunk);
+
+ // Less than 32 bytes to copy. Set up for a loop to
+ // copy one word at a time.
+ __ bind(&ua_chk1w);
+ __ andi(a2, t8, loadstore_chunk - 1);
+ __ beq(a2, t8, &ua_smallCopy);
+ __ subu(a3, t8, a2); // In delay slot.
+ __ addu(a3, a0, a3);
+
+ __ bind(&ua_wordCopy_loop);
+ if (kArchEndian == kLittle) {
+ __ lwr(v1, MemOperand(a1));
+ __ lwl(v1,
+ MemOperand(a1, 1, loadstore_chunk, MemOperand::offset_minus_one));
+ } else {
+ __ lwl(v1, MemOperand(a1));
+ __ lwr(v1,
+ MemOperand(a1, 1, loadstore_chunk, MemOperand::offset_minus_one));
+ }
+ __ addiu(a0, a0, loadstore_chunk);
+ __ addiu(a1, a1, loadstore_chunk);
+ __ bne(a0, a3, &ua_wordCopy_loop);
+ __ sw(v1, MemOperand(a0, -1, loadstore_chunk)); // In delay slot.
+
+ // Copy the last 8 bytes.
+ __ bind(&ua_smallCopy);
+ __ beq(a2, zero_reg, &leave);
+ __ addu(a3, a0, a2); // In delay slot.
+
+ __ bind(&ua_smallCopy_loop);
+ __ lb(v1, MemOperand(a1));
+ __ addiu(a0, a0, 1);
+ __ addiu(a1, a1, 1);
+ __ bne(a0, a3, &ua_smallCopy_loop);
+ __ sb(v1, MemOperand(a0, -1)); // In delay slot.
+
+ __ jr(ra);
+ __ nop();
+ }
+ CodeDesc desc;
+ masm.GetCode(&desc);
+ ASSERT(!RelocInfo::RequiresRelocation(desc));
+ CPU::FlushICache(buffer, actual_size);
+ OS::ProtectCode(buffer, actual_size);
+ return FUNCTION_CAST<MemCopyUint8Function>(buffer);
+#endif
+}
+#endif
UnaryMathFunction CreateSqrtFunction() {
- return &sqrt;
+#if defined(USE_SIMULATOR)
+ return &std::sqrt;
+#else
+ size_t actual_size;
+ byte* buffer = static_cast<byte*>(OS::Allocate(1 * KB, &actual_size, true));
+ if (buffer == NULL) return &std::sqrt;
+
+ MacroAssembler masm(NULL, buffer, static_cast<int>(actual_size));
+
+ __ MovFromFloatParameter(f12);
+ __ sqrt_d(f0, f12);
+ __ MovToFloatResult(f0);
+ __ Ret();
+
+ CodeDesc desc;
+ masm.GetCode(&desc);
+ ASSERT(!RelocInfo::RequiresRelocation(desc));
+
+ CPU::FlushICache(buffer, actual_size);
+ OS::ProtectCode(buffer, actual_size);
+ return FUNCTION_CAST<UnaryMathFunction>(buffer);
+#endif
}
+#undef __
+
// -------------------------------------------------------------------------
// Platform-specific RuntimeCallHelper functions.
@@ -290,8 +798,8 @@ void ElementsTransitionGenerator::GenerateSmiToDouble(
__ LoadRoot(at, Heap::kTheHoleValueRootIndex);
__ Assert(eq, kObjectFoundInSmiOnlyArray, at, Operand(t5));
}
- __ sw(t0, MemOperand(t3)); // mantissa
- __ sw(t1, MemOperand(t3, kIntSize)); // exponent
+ __ sw(t0, MemOperand(t3, Register::kMantissaOffset)); // mantissa
+ __ sw(t1, MemOperand(t3, Register::kExponentOffset)); // exponent
__ Addu(t3, t3, kDoubleSize);
__ bind(&entry);
@@ -341,7 +849,9 @@ void ElementsTransitionGenerator::GenerateDoubleToObject(
__ sw(t5, MemOperand(t2, HeapObject::kMapOffset));
// Prepare for conversion loop.
- __ Addu(t0, t0, Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag + 4));
+ __ Addu(t0, t0, Operand(
+ FixedDoubleArray::kHeaderSize - kHeapObjectTag
+ + Register::kExponentOffset));
__ Addu(a3, t2, Operand(FixedArray::kHeaderSize));
__ Addu(t2, t2, Operand(kHeapObjectTag));
__ sll(t1, t1, 1);
@@ -350,7 +860,8 @@ void ElementsTransitionGenerator::GenerateDoubleToObject(
__ LoadRoot(t5, Heap::kHeapNumberMapRootIndex);
// Using offsetted addresses.
// a3: begin of destination FixedArray element fields, not tagged
- // t0: begin of source FixedDoubleArray element fields, not tagged, +4
+ // t0: begin of source FixedDoubleArray element fields, not tagged,
+ // points to the exponent
// t1: end of destination FixedArray, not tagged
// t2: destination FixedArray
// t3: the-hole pointer
@@ -373,7 +884,9 @@ void ElementsTransitionGenerator::GenerateDoubleToObject(
// Non-hole double, copy value into a heap number.
__ AllocateHeapNumber(a2, a0, t6, t5, &gc_required);
// a2: new heap number
- __ lw(a0, MemOperand(t0, -12));
+ // Load mantissa of current element, t0 point to exponent of next element.
+ __ lw(a0, MemOperand(t0, (Register::kMantissaOffset
+ - Register::kExponentOffset - kDoubleSize)));
__ sw(a0, FieldMemOperand(a2, HeapNumber::kMantissaOffset));
__ sw(a1, FieldMemOperand(a2, HeapNumber::kExponentOffset));
__ mov(a0, a3);
@@ -492,7 +1005,7 @@ void StringCharLoadGenerator::Generate(MacroAssembler* masm,
at, Operand(zero_reg));
}
// Rule out short external strings.
- STATIC_CHECK(kShortExternalStringTag != 0);
+ STATIC_ASSERT(kShortExternalStringTag != 0);
__ And(at, result, Operand(kShortExternalStringMask));
__ Branch(call_runtime, ne, at, Operand(zero_reg));
__ lw(string, FieldMemOperand(string, ExternalString::kResourceDataOffset));
@@ -578,8 +1091,8 @@ void MathExpGenerator::EmitMathExp(MacroAssembler* masm,
__ li(temp3, Operand(ExternalReference::math_exp_log_table()));
__ sll(at, temp2, 3);
__ Addu(temp3, temp3, Operand(at));
- __ lw(temp2, MemOperand(temp3, 0));
- __ lw(temp3, MemOperand(temp3, kPointerSize));
+ __ lw(temp2, MemOperand(temp3, Register::kMantissaOffset));
+ __ lw(temp3, MemOperand(temp3, Register::kExponentOffset));
// The first word is loaded is the lower number register.
if (temp2.code() < temp3.code()) {
__ sll(at, temp1, 20);
@@ -591,11 +1104,11 @@ void MathExpGenerator::EmitMathExp(MacroAssembler* masm,
__ Move(double_scratch1, temp3, temp1);
}
__ mul_d(result, result, double_scratch1);
- __ Branch(&done);
+ __ BranchShort(&done);
__ bind(&zero);
__ Move(result, kDoubleRegZero);
- __ Branch(&done);
+ __ BranchShort(&done);
__ bind(&infinity);
__ ldc1(result, ExpConstant(2, temp3));
@@ -603,42 +1116,47 @@ void MathExpGenerator::EmitMathExp(MacroAssembler* masm,
__ bind(&done);
}
-
+#ifdef DEBUG
// nop(CODE_AGE_MARKER_NOP)
static const uint32_t kCodeAgePatchFirstInstruction = 0x00010180;
+#endif
-static byte* GetNoCodeAgeSequence(uint32_t* length) {
- // The sequence of instructions that is patched out for aging code is the
- // following boilerplate stack-building prologue that is found in FUNCTIONS
- static bool initialized = false;
- static uint32_t sequence[kNoCodeAgeSequenceLength];
- byte* byte_sequence = reinterpret_cast<byte*>(sequence);
- *length = kNoCodeAgeSequenceLength * Assembler::kInstrSize;
- if (!initialized) {
- CodePatcher patcher(byte_sequence, kNoCodeAgeSequenceLength);
- patcher.masm()->Push(ra, fp, cp, a1);
- patcher.masm()->nop(Assembler::CODE_AGE_SEQUENCE_NOP);
- patcher.masm()->Addu(fp, sp,
- Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
- initialized = true;
- }
- return byte_sequence;
+
+CodeAgingHelper::CodeAgingHelper() {
+ ASSERT(young_sequence_.length() == kNoCodeAgeSequenceLength);
+ // Since patcher is a large object, allocate it dynamically when needed,
+ // to avoid overloading the stack in stress conditions.
+ // DONT_FLUSH is used because the CodeAgingHelper is initialized early in
+ // the process, before MIPS simulator ICache is setup.
+ SmartPointer<CodePatcher> patcher(
+ new CodePatcher(young_sequence_.start(),
+ young_sequence_.length() / Assembler::kInstrSize,
+ CodePatcher::DONT_FLUSH));
+ PredictableCodeSizeScope scope(patcher->masm(), young_sequence_.length());
+ patcher->masm()->Push(ra, fp, cp, a1);
+ patcher->masm()->nop(Assembler::CODE_AGE_SEQUENCE_NOP);
+ patcher->masm()->Addu(
+ fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
}
-bool Code::IsYoungSequence(byte* sequence) {
- uint32_t young_length;
- byte* young_sequence = GetNoCodeAgeSequence(&young_length);
- bool result = !memcmp(sequence, young_sequence, young_length);
- ASSERT(result ||
- Memory::uint32_at(sequence) == kCodeAgePatchFirstInstruction);
+#ifdef DEBUG
+bool CodeAgingHelper::IsOld(byte* candidate) const {
+ return Memory::uint32_at(candidate) == kCodeAgePatchFirstInstruction;
+}
+#endif
+
+
+bool Code::IsYoungSequence(Isolate* isolate, byte* sequence) {
+ bool result = isolate->code_aging_helper()->IsYoung(sequence);
+ ASSERT(result || isolate->code_aging_helper()->IsOld(sequence));
return result;
}
-void Code::GetCodeAgeAndParity(byte* sequence, Age* age,
+void Code::GetCodeAgeAndParity(Isolate* isolate, byte* sequence, Age* age,
MarkingParity* parity) {
- if (IsYoungSequence(sequence)) {
+ if (IsYoungSequence(isolate, sequence)) {
*age = kNoAgeCodeAge;
*parity = NO_MARKING_PARITY;
} else {
@@ -654,10 +1172,9 @@ void Code::PatchPlatformCodeAge(Isolate* isolate,
byte* sequence,
Code::Age age,
MarkingParity parity) {
- uint32_t young_length;
- byte* young_sequence = GetNoCodeAgeSequence(&young_length);
+ uint32_t young_length = isolate->code_aging_helper()->young_sequence_length();
if (age == kNoAgeCodeAge) {
- CopyBytes(sequence, young_sequence, young_length);
+ isolate->code_aging_helper()->CopyYoungSequenceTo(sequence);
CPU::FlushICache(sequence, young_length);
} else {
Code* stub = GetCodeAgeStub(isolate, age, parity);
diff --git a/chromium/v8/src/mips/codegen-mips.h b/chromium/v8/src/mips/codegen-mips.h
index 822b94ad799..82a410ec235 100644
--- a/chromium/v8/src/mips/codegen-mips.h
+++ b/chromium/v8/src/mips/codegen-mips.h
@@ -1,85 +1,21 @@
// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
#ifndef V8_MIPS_CODEGEN_MIPS_H_
#define V8_MIPS_CODEGEN_MIPS_H_
-#include "ast.h"
-#include "ic-inl.h"
+#include "src/ast.h"
+#include "src/ic-inl.h"
namespace v8 {
namespace internal {
-// Forward declarations
-class CompilationInfo;
enum TypeofState { INSIDE_TYPEOF, NOT_INSIDE_TYPEOF };
-// -------------------------------------------------------------------------
-// CodeGenerator
-
-class CodeGenerator: public AstVisitor {
- public:
- explicit CodeGenerator(Isolate* isolate) {
- InitializeAstVisitor(isolate);
- }
-
- static bool MakeCode(CompilationInfo* info);
-
- // Printing of AST, etc. as requested by flags.
- static void MakeCodePrologue(CompilationInfo* info, const char* kind);
-
- // Allocate and install the code.
- static Handle<Code> MakeCodeEpilogue(MacroAssembler* masm,
- Code::Flags flags,
- CompilationInfo* info);
-
- // Print the code after compiling it.
- static void PrintCode(Handle<Code> code, CompilationInfo* info);
-
- static bool ShouldGenerateLog(Isolate* isolate, Expression* type);
-
- static void SetFunctionInfo(Handle<JSFunction> fun,
- FunctionLiteral* lit,
- bool is_toplevel,
- Handle<Script> script);
-
- static bool RecordPositions(MacroAssembler* masm,
- int pos,
- bool right_here = false);
-
- DEFINE_AST_VISITOR_SUBCLASS_MEMBERS();
-
- private:
- DISALLOW_COPY_AND_ASSIGN(CodeGenerator);
-};
-
class StringCharLoadGenerator : public AllStatic {
public:
diff --git a/chromium/v8/src/mips/constants-mips.cc b/chromium/v8/src/mips/constants-mips.cc
index 2dd7a31f388..f14992719db 100644
--- a/chromium/v8/src/mips/constants-mips.cc
+++ b/chromium/v8/src/mips/constants-mips.cc
@@ -1,35 +1,12 @@
// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
#if V8_TARGET_ARCH_MIPS
-#include "constants-mips.h"
+#include "src/mips/constants-mips.h"
namespace v8 {
namespace internal {
@@ -174,7 +151,7 @@ bool Instruction::IsForbiddenInBranchDelay() const {
return true;
default:
return false;
- };
+ }
break;
case SPECIAL:
switch (FunctionFieldRaw()) {
@@ -183,11 +160,11 @@ bool Instruction::IsForbiddenInBranchDelay() const {
return true;
default:
return false;
- };
+ }
break;
default:
return false;
- };
+ }
}
@@ -203,17 +180,17 @@ bool Instruction::IsLinkingInstruction() const {
return true;
default:
return false;
- };
+ }
case SPECIAL:
switch (FunctionFieldRaw()) {
case JALR:
return true;
default:
return false;
- };
+ }
default:
return false;
- };
+ }
}
@@ -232,7 +209,7 @@ bool Instruction::IsTrap() const {
return true;
default:
return false;
- };
+ }
}
}
@@ -278,7 +255,7 @@ Instruction::Type Instruction::InstructionType() const {
return kRegisterType;
default:
return kUnsupported;
- };
+ }
break;
case SPECIAL2:
switch (FunctionFieldRaw()) {
@@ -287,7 +264,7 @@ Instruction::Type Instruction::InstructionType() const {
return kRegisterType;
default:
return kUnsupported;
- };
+ }
break;
case SPECIAL3:
switch (FunctionFieldRaw()) {
@@ -296,7 +273,7 @@ Instruction::Type Instruction::InstructionType() const {
return kRegisterType;
default:
return kUnsupported;
- };
+ }
break;
case COP1: // Coprocessor instructions.
switch (RsFieldRawNoAssert()) {
@@ -304,7 +281,7 @@ Instruction::Type Instruction::InstructionType() const {
return kImmediateType;
default:
return kRegisterType;
- };
+ }
break;
case COP1X:
return kRegisterType;
@@ -349,7 +326,7 @@ Instruction::Type Instruction::InstructionType() const {
return kJumpType;
default:
return kUnsupported;
- };
+ }
return kUnsupported;
}
diff --git a/chromium/v8/src/mips/constants-mips.h b/chromium/v8/src/mips/constants-mips.h
index 5a0870fd218..fc64f7dbbf9 100644
--- a/chromium/v8/src/mips/constants-mips.h
+++ b/chromium/v8/src/mips/constants-mips.h
@@ -1,29 +1,6 @@
// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
#ifndef V8_MIPS_CONSTANTS_H_
#define V8_MIPS_CONSTANTS_H_
@@ -55,6 +32,18 @@ enum ArchVariants {
static const ArchVariants kArchVariant = kMips32r1;
#endif
+enum Endianness {
+ kLittle,
+ kBig
+};
+
+#if defined(V8_TARGET_LITTLE_ENDIAN)
+ static const Endianness kArchEndian = kLittle;
+#elif defined(V8_TARGET_BIG_ENDIAN)
+ static const Endianness kArchEndian = kBig;
+#else
+#error Unknown endianness
+#endif
#if(defined(__mips_hard_float) && __mips_hard_float != 0)
// Use floating-point coprocessor instructions. This flag is raised when
@@ -69,6 +58,15 @@ const bool IsMipsSoftFloatABI = true;
const bool IsMipsSoftFloatABI = true;
#endif
+#if defined(V8_TARGET_LITTLE_ENDIAN)
+const uint32_t kHoleNanUpper32Offset = 4;
+const uint32_t kHoleNanLower32Offset = 0;
+#elif defined(V8_TARGET_BIG_ENDIAN)
+const uint32_t kHoleNanUpper32Offset = 0;
+const uint32_t kHoleNanLower32Offset = 4;
+#else
+#error Unknown endianness
+#endif
// Defines constants and accessor classes to assemble, disassemble and
// simulate MIPS32 instructions.
@@ -124,6 +122,16 @@ const uint32_t kFCSRFlagMask =
const uint32_t kFCSRExceptionFlagMask = kFCSRFlagMask ^ kFCSRInexactFlagMask;
+// 'pref' instruction hints
+const int32_t kPrefHintLoad = 0;
+const int32_t kPrefHintStore = 1;
+const int32_t kPrefHintLoadStreamed = 4;
+const int32_t kPrefHintStoreStreamed = 5;
+const int32_t kPrefHintLoadRetained = 6;
+const int32_t kPrefHintStoreRetained = 7;
+const int32_t kPrefHintWritebackInvalidate = 25;
+const int32_t kPrefHintPrepareForStore = 30;
+
// Helper functions for converting between register numbers and names.
class Registers {
public:
@@ -297,6 +305,8 @@ enum Opcode {
LWC1 = ((6 << 3) + 1) << kOpcodeShift,
LDC1 = ((6 << 3) + 5) << kOpcodeShift,
+ PREF = ((6 << 3) + 3) << kOpcodeShift,
+
SWC1 = ((7 << 3) + 1) << kOpcodeShift,
SDC1 = ((7 << 3) + 5) << kOpcodeShift,
@@ -494,7 +504,8 @@ inline Condition NegateCondition(Condition cc) {
}
-inline Condition ReverseCondition(Condition cc) {
+// Commute a condition such that {a cond b == b cond' a}.
+inline Condition CommuteCondition(Condition cc) {
switch (cc) {
case Uless:
return Ugreater;
@@ -514,7 +525,7 @@ inline Condition ReverseCondition(Condition cc) {
return greater_equal;
default:
return cc;
- };
+ }
}
diff --git a/chromium/v8/src/mips/cpu-mips.cc b/chromium/v8/src/mips/cpu-mips.cc
index 49d0b377ebc..ce471265bb2 100644
--- a/chromium/v8/src/mips/cpu-mips.cc
+++ b/chromium/v8/src/mips/cpu-mips.cc
@@ -1,29 +1,6 @@
// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
// CPU specific code for arm independent of OS goes here.
@@ -34,29 +11,19 @@
#include <asm/cachectl.h>
#endif // #ifdef __mips
-#include "v8.h"
+#include "src/v8.h"
#if V8_TARGET_ARCH_MIPS
-#include "cpu.h"
-#include "macro-assembler.h"
+#include "src/cpu.h"
+#include "src/macro-assembler.h"
-#include "simulator.h" // For cache flushing.
+#include "src/simulator.h" // For cache flushing.
namespace v8 {
namespace internal {
-void CPU::SetUp() {
- CpuFeatures::Probe();
-}
-
-
-bool CPU::SupportsCrankshaft() {
- return CpuFeatures::IsSupported(FPU);
-}
-
-
void CPU::FlushICache(void* start, size_t size) {
// Nothing to do, flushing no instructions.
if (size == 0) {
diff --git a/chromium/v8/src/mips/debug-mips.cc b/chromium/v8/src/mips/debug-mips.cc
index 1535231dd81..fc052114233 100644
--- a/chromium/v8/src/mips/debug-mips.cc
+++ b/chromium/v8/src/mips/debug-mips.cc
@@ -1,44 +1,19 @@
// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-
-
-#include "v8.h"
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+
+
+#include "src/v8.h"
#if V8_TARGET_ARCH_MIPS
-#include "codegen.h"
-#include "debug.h"
+#include "src/codegen.h"
+#include "src/debug.h"
namespace v8 {
namespace internal {
-#ifdef ENABLE_DEBUGGER_SUPPORT
-
bool BreakLocationIterator::IsDebugBreakAtReturn() {
return Debug::IsDebugBreakAtReturn(rinfo());
}
@@ -58,9 +33,8 @@ void BreakLocationIterator::SetDebugBreakAtReturn() {
ASSERT(Assembler::kJSReturnSequenceInstructions == 7);
CodePatcher patcher(rinfo()->pc(), Assembler::kJSReturnSequenceInstructions);
// li and Call pseudo-instructions emit two instructions each.
- patcher.masm()->li(v8::internal::t9,
- Operand(reinterpret_cast<int32_t>(
- debug_info_->GetIsolate()->debug()->debug_break_return()->entry())));
+ patcher.masm()->li(v8::internal::t9, Operand(reinterpret_cast<int32_t>(
+ debug_info_->GetIsolate()->builtins()->Return_DebugBreak()->entry())));
patcher.masm()->Call(v8::internal::t9);
patcher.masm()->nop();
patcher.masm()->nop();
@@ -105,7 +79,7 @@ void BreakLocationIterator::SetDebugBreakAtSlot() {
// call t9 (jalr t9 / nop instruction pair)
CodePatcher patcher(rinfo()->pc(), Assembler::kDebugBreakSlotInstructions);
patcher.masm()->li(v8::internal::t9, Operand(reinterpret_cast<int32_t>(
- debug_info_->GetIsolate()->debug()->debug_break_slot()->entry())));
+ debug_info_->GetIsolate()->builtins()->Slot_DebugBreak()->entry())));
patcher.masm()->Call(v8::internal::t9);
}
@@ -116,8 +90,6 @@ void BreakLocationIterator::ClearDebugBreakAtSlot() {
Assembler::kDebugBreakSlotInstructions);
}
-const bool Debug::FramePaddingLayout::kIsSupported = false;
-
#define __ ACCESS_MASM(masm)
@@ -156,7 +128,7 @@ static void Generate_DebugBreakCallHelper(MacroAssembler* masm,
__ PrepareCEntryArgs(0); // No arguments.
__ PrepareCEntryFunction(ExternalReference::debug_break(masm->isolate()));
- CEntryStub ceb(1);
+ CEntryStub ceb(masm->isolate(), 1);
__ CallStub(&ceb);
// Restore the register values from the expression stack.
@@ -181,14 +153,25 @@ static void Generate_DebugBreakCallHelper(MacroAssembler* masm,
// Now that the break point has been handled, resume normal execution by
// jumping to the target address intended by the caller and that was
// overwritten by the address of DebugBreakXXX.
- __ li(t9, Operand(
- ExternalReference(Debug_Address::AfterBreakTarget(), masm->isolate())));
+ ExternalReference after_break_target =
+ ExternalReference::debug_after_break_target_address(masm->isolate());
+ __ li(t9, Operand(after_break_target));
__ lw(t9, MemOperand(t9));
__ Jump(t9);
}
-void Debug::GenerateLoadICDebugBreak(MacroAssembler* masm) {
+void DebugCodegen::GenerateCallICStubDebugBreak(MacroAssembler* masm) {
+ // Register state for CallICStub
+ // ----------- S t a t e -------------
+ // -- a1 : function
+ // -- a3 : slot in feedback array (smi)
+ // -----------------------------------
+ Generate_DebugBreakCallHelper(masm, a1.bit() | a3.bit(), 0);
+}
+
+
+void DebugCodegen::GenerateLoadICDebugBreak(MacroAssembler* masm) {
// Calling convention for IC load (from ic-mips.cc).
// ----------- S t a t e -------------
// -- a2 : name
@@ -202,7 +185,7 @@ void Debug::GenerateLoadICDebugBreak(MacroAssembler* masm) {
}
-void Debug::GenerateStoreICDebugBreak(MacroAssembler* masm) {
+void DebugCodegen::GenerateStoreICDebugBreak(MacroAssembler* masm) {
// Calling convention for IC store (from ic-mips.cc).
// ----------- S t a t e -------------
// -- a0 : value
@@ -216,7 +199,7 @@ void Debug::GenerateStoreICDebugBreak(MacroAssembler* masm) {
}
-void Debug::GenerateKeyedLoadICDebugBreak(MacroAssembler* masm) {
+void DebugCodegen::GenerateKeyedLoadICDebugBreak(MacroAssembler* masm) {
// ---------- S t a t e --------------
// -- ra : return address
// -- a0 : key
@@ -225,7 +208,7 @@ void Debug::GenerateKeyedLoadICDebugBreak(MacroAssembler* masm) {
}
-void Debug::GenerateKeyedStoreICDebugBreak(MacroAssembler* masm) {
+void DebugCodegen::GenerateKeyedStoreICDebugBreak(MacroAssembler* masm) {
// ---------- S t a t e --------------
// -- a0 : value
// -- a1 : key
@@ -235,7 +218,7 @@ void Debug::GenerateKeyedStoreICDebugBreak(MacroAssembler* masm) {
}
-void Debug::GenerateCompareNilICDebugBreak(MacroAssembler* masm) {
+void DebugCodegen::GenerateCompareNilICDebugBreak(MacroAssembler* masm) {
// Register state for CompareNil IC
// ----------- S t a t e -------------
// -- a0 : value
@@ -244,16 +227,7 @@ void Debug::GenerateCompareNilICDebugBreak(MacroAssembler* masm) {
}
-void Debug::GenerateCallICDebugBreak(MacroAssembler* masm) {
- // Calling convention for IC call (from ic-mips.cc).
- // ----------- S t a t e -------------
- // -- a2: name
- // -----------------------------------
- Generate_DebugBreakCallHelper(masm, a2.bit(), 0);
-}
-
-
-void Debug::GenerateReturnDebugBreak(MacroAssembler* masm) {
+void DebugCodegen::GenerateReturnDebugBreak(MacroAssembler* masm) {
// In places other than IC call sites it is expected that v0 is TOS which
// is an object - this is not generally the case so this should be used with
// care.
@@ -261,7 +235,7 @@ void Debug::GenerateReturnDebugBreak(MacroAssembler* masm) {
}
-void Debug::GenerateCallFunctionStubDebugBreak(MacroAssembler* masm) {
+void DebugCodegen::GenerateCallFunctionStubDebugBreak(MacroAssembler* masm) {
// Register state for CallFunctionStub (from code-stubs-mips.cc).
// ----------- S t a t e -------------
// -- a1 : function
@@ -270,17 +244,7 @@ void Debug::GenerateCallFunctionStubDebugBreak(MacroAssembler* masm) {
}
-void Debug::GenerateCallFunctionStubRecordDebugBreak(MacroAssembler* masm) {
- // Register state for CallFunctionStub (from code-stubs-mips.cc).
- // ----------- S t a t e -------------
- // -- a1 : function
- // -- a2 : cache cell for call target
- // -----------------------------------
- Generate_DebugBreakCallHelper(masm, a1.bit() | a2.bit(), 0);
-}
-
-
-void Debug::GenerateCallConstructStubDebugBreak(MacroAssembler* masm) {
+void DebugCodegen::GenerateCallConstructStubDebugBreak(MacroAssembler* masm) {
// Calling convention for CallConstructStub (from code-stubs-mips.cc).
// ----------- S t a t e -------------
// -- a0 : number of arguments (not smi)
@@ -290,18 +254,20 @@ void Debug::GenerateCallConstructStubDebugBreak(MacroAssembler* masm) {
}
-void Debug::GenerateCallConstructStubRecordDebugBreak(MacroAssembler* masm) {
+void DebugCodegen::GenerateCallConstructStubRecordDebugBreak(
+ MacroAssembler* masm) {
// Calling convention for CallConstructStub (from code-stubs-mips.cc).
// ----------- S t a t e -------------
// -- a0 : number of arguments (not smi)
// -- a1 : constructor function
- // -- a2 : cache cell for call target
+ // -- a2 : feedback array
+ // -- a3 : feedback slot (smi)
// -----------------------------------
- Generate_DebugBreakCallHelper(masm, a1.bit() | a2.bit(), a0.bit());
+ Generate_DebugBreakCallHelper(masm, a1.bit() | a2.bit() | a3.bit(), a0.bit());
}
-void Debug::GenerateSlot(MacroAssembler* masm) {
+void DebugCodegen::GenerateSlot(MacroAssembler* masm) {
// Generate enough nop's to make space for a call instruction. Avoid emitting
// the trampoline pool in the debug break slot code.
Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm);
@@ -316,30 +282,27 @@ void Debug::GenerateSlot(MacroAssembler* masm) {
}
-void Debug::GenerateSlotDebugBreak(MacroAssembler* masm) {
+void DebugCodegen::GenerateSlotDebugBreak(MacroAssembler* masm) {
// In the places where a debug break slot is inserted no registers can contain
// object pointers.
Generate_DebugBreakCallHelper(masm, 0, 0);
}
-void Debug::GeneratePlainReturnLiveEdit(MacroAssembler* masm) {
+void DebugCodegen::GeneratePlainReturnLiveEdit(MacroAssembler* masm) {
masm->Abort(kLiveEditFrameDroppingIsNotSupportedOnMips);
}
-void Debug::GenerateFrameDropperLiveEdit(MacroAssembler* masm) {
+void DebugCodegen::GenerateFrameDropperLiveEdit(MacroAssembler* masm) {
masm->Abort(kLiveEditFrameDroppingIsNotSupportedOnMips);
}
-const bool Debug::kFrameDropperSupported = false;
+const bool LiveEdit::kFrameDropperSupported = false;
#undef __
-
-#endif // ENABLE_DEBUGGER_SUPPORT
-
} } // namespace v8::internal
#endif // V8_TARGET_ARCH_MIPS
diff --git a/chromium/v8/src/mips/deoptimizer-mips.cc b/chromium/v8/src/mips/deoptimizer-mips.cc
index 0662b17366b..71c82fb8af7 100644
--- a/chromium/v8/src/mips/deoptimizer-mips.cc
+++ b/chromium/v8/src/mips/deoptimizer-mips.cc
@@ -1,37 +1,14 @@
// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#include "codegen.h"
-#include "deoptimizer.h"
-#include "full-codegen.h"
-#include "safepoint-table.h"
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#include "src/codegen.h"
+#include "src/deoptimizer.h"
+#include "src/full-codegen.h"
+#include "src/safepoint-table.h"
namespace v8 {
namespace internal {
@@ -49,13 +26,36 @@ void Deoptimizer::PatchCodeForDeoptimization(Isolate* isolate, Code* code) {
// code patching below, and is not needed any more.
code->InvalidateRelocation();
- // For each LLazyBailout instruction insert a call to the corresponding
- // deoptimization entry.
+ if (FLAG_zap_code_space) {
+ // Fail hard and early if we enter this code object again.
+ byte* pointer = code->FindCodeAgeSequence();
+ if (pointer != NULL) {
+ pointer += kNoCodeAgeSequenceLength;
+ } else {
+ pointer = code->instruction_start();
+ }
+ CodePatcher patcher(pointer, 1);
+ patcher.masm()->break_(0xCC);
+
+ DeoptimizationInputData* data =
+ DeoptimizationInputData::cast(code->deoptimization_data());
+ int osr_offset = data->OsrPcOffset()->value();
+ if (osr_offset > 0) {
+ CodePatcher osr_patcher(code->instruction_start() + osr_offset, 1);
+ osr_patcher.masm()->break_(0xCC);
+ }
+ }
+
DeoptimizationInputData* deopt_data =
DeoptimizationInputData::cast(code->deoptimization_data());
+ SharedFunctionInfo* shared =
+ SharedFunctionInfo::cast(deopt_data->SharedFunctionInfo());
+ shared->EvictFromOptimizedCodeMap(code, "deoptimized code");
#ifdef DEBUG
Address prev_call_address = NULL;
#endif
+ // For each LLazyBailout instruction insert a call to the corresponding
+ // deoptimization entry.
for (int i = 0; i < deopt_data->DeoptCount(); i++) {
if (deopt_data->Pc(i)->value() == -1) continue;
Address call_address = code_start_address + deopt_data->Pc(i)->value();
@@ -125,11 +125,6 @@ bool Deoptimizer::HasAlignmentPadding(JSFunction* function) {
}
-Code* Deoptimizer::NotifyStubFailureBuiltin() {
- return isolate_->builtins()->builtin(Builtins::kNotifyStubFailureSaveDoubles);
-}
-
-
#define __ masm()->
@@ -239,13 +234,13 @@ void Deoptimizer::EntryGenerator::Generate() {
__ Addu(a3, a1, Operand(FrameDescription::frame_content_offset()));
Label pop_loop;
Label pop_loop_header;
- __ Branch(&pop_loop_header);
+ __ BranchShort(&pop_loop_header);
__ bind(&pop_loop);
__ pop(t0);
__ sw(t0, MemOperand(a3, 0));
__ addiu(a3, a3, sizeof(uint32_t));
__ bind(&pop_loop_header);
- __ Branch(&pop_loop, ne, a2, Operand(sp));
+ __ BranchShort(&pop_loop, ne, a2, Operand(sp));
// Compute the output frame in the deoptimizer.
__ push(a0); // Preserve deoptimizer object across call.
@@ -280,11 +275,11 @@ void Deoptimizer::EntryGenerator::Generate() {
__ lw(t3, MemOperand(t2, FrameDescription::frame_content_offset()));
__ push(t3);
__ bind(&inner_loop_header);
- __ Branch(&inner_push_loop, ne, a3, Operand(zero_reg));
+ __ BranchShort(&inner_push_loop, ne, a3, Operand(zero_reg));
__ Addu(t0, t0, Operand(kPointerSize));
__ bind(&outer_loop_header);
- __ Branch(&outer_push_loop, lt, t0, Operand(a1));
+ __ BranchShort(&outer_push_loop, lt, t0, Operand(a1));
__ lw(a1, MemOperand(a0, Deoptimizer::input_offset()));
for (int i = 0; i < FPURegister::kMaxNumAllocatableRegisters; ++i) {
@@ -371,6 +366,12 @@ void FrameDescription::SetCallerFp(unsigned offset, intptr_t value) {
}
+void FrameDescription::SetCallerConstantPool(unsigned offset, intptr_t value) {
+ // No out-of-line constant pool support.
+ UNREACHABLE();
+}
+
+
#undef __
diff --git a/chromium/v8/src/mips/disasm-mips.cc b/chromium/v8/src/mips/disasm-mips.cc
index 691df940f2d..82a47582ad3 100644
--- a/chromium/v8/src/mips/disasm-mips.cc
+++ b/chromium/v8/src/mips/disasm-mips.cc
@@ -1,29 +1,6 @@
// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
// A Disassembler object is used to disassemble a block of code instruction by
// instruction. The default implementation of the NameConverter object can be
@@ -51,14 +28,14 @@
#include <stdarg.h>
#include <string.h>
-#include "v8.h"
+#include "src/v8.h"
#if V8_TARGET_ARCH_MIPS
-#include "mips/constants-mips.h"
-#include "disasm.h"
-#include "macro-assembler.h"
-#include "platform.h"
+#include "src/mips/constants-mips.h"
+#include "src/disasm.h"
+#include "src/macro-assembler.h"
+#include "src/platform.h"
namespace v8 {
namespace internal {
@@ -207,21 +184,21 @@ void Decoder::PrintFd(Instruction* instr) {
// Print the integer value of the sa field.
void Decoder::PrintSa(Instruction* instr) {
int sa = instr->SaValue();
- out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_, "%d", sa);
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "%d", sa);
}
// Print the integer value of the rd field, when it is not used as reg.
void Decoder::PrintSd(Instruction* instr) {
int sd = instr->RdValue();
- out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_, "%d", sd);
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "%d", sd);
}
// Print the integer value of the rd field, when used as 'ext' size.
void Decoder::PrintSs1(Instruction* instr) {
int ss = instr->RdValue();
- out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_, "%d", ss + 1);
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "%d", ss + 1);
}
@@ -230,49 +207,49 @@ void Decoder::PrintSs2(Instruction* instr) {
int ss = instr->RdValue();
int pos = instr->SaValue();
out_buffer_pos_ +=
- OS::SNPrintF(out_buffer_ + out_buffer_pos_, "%d", ss - pos + 1);
+ SNPrintF(out_buffer_ + out_buffer_pos_, "%d", ss - pos + 1);
}
// Print the integer value of the cc field for the bc1t/f instructions.
void Decoder::PrintBc(Instruction* instr) {
int cc = instr->FBccValue();
- out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_, "%d", cc);
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "%d", cc);
}
// Print the integer value of the cc field for the FP compare instructions.
void Decoder::PrintCc(Instruction* instr) {
int cc = instr->FCccValue();
- out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_, "cc(%d)", cc);
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "cc(%d)", cc);
}
// Print 16-bit unsigned immediate value.
void Decoder::PrintUImm16(Instruction* instr) {
int32_t imm = instr->Imm16Value();
- out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_, "%u", imm);
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "%u", imm);
}
// Print 16-bit signed immediate value.
void Decoder::PrintSImm16(Instruction* instr) {
int32_t imm = ((instr->Imm16Value()) << 16) >> 16;
- out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_, "%d", imm);
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "%d", imm);
}
// Print 16-bit hexa immediate value.
void Decoder::PrintXImm16(Instruction* instr) {
int32_t imm = instr->Imm16Value();
- out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_, "0x%x", imm);
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "0x%x", imm);
}
// Print 26-bit immediate value.
void Decoder::PrintXImm26(Instruction* instr) {
uint32_t imm = instr->Imm26Value() << kImmFieldShift;
- out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_, "0x%x", imm);
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "0x%x", imm);
}
@@ -283,8 +260,8 @@ void Decoder::PrintCode(Instruction* instr) {
switch (instr->FunctionFieldRaw()) {
case BREAK: {
int32_t code = instr->Bits(25, 6);
- out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_,
- "0x%05x (%d)", code, code);
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
+ "0x%05x (%d)", code, code);
break;
}
case TGE:
@@ -295,12 +272,12 @@ void Decoder::PrintCode(Instruction* instr) {
case TNE: {
int32_t code = instr->Bits(15, 6);
out_buffer_pos_ +=
- OS::SNPrintF(out_buffer_ + out_buffer_pos_, "0x%03x", code);
+ SNPrintF(out_buffer_ + out_buffer_pos_, "0x%03x", code);
break;
}
default: // Not a break or trap instruction.
break;
- };
+ }
}
@@ -430,7 +407,7 @@ int Decoder::FormatOption(Instruction* instr, const char* format) {
PrintCc(instr);
return 2;
}
- };
+ }
UNREACHABLE();
return -1;
}
@@ -626,7 +603,7 @@ void Decoder::DecodeTypeRegister(Instruction* instr) {
break;
default:
UNREACHABLE();
- };
+ }
break;
case SPECIAL:
switch (instr->FunctionFieldRaw()) {
@@ -819,7 +796,7 @@ void Decoder::DecodeTypeImmediate(Instruction* instr) {
break;
default:
UNREACHABLE();
- };
+ }
break; // Case COP1.
case REGIMM:
switch (instr->RtFieldRaw()) {
@@ -899,6 +876,9 @@ void Decoder::DecodeTypeImmediate(Instruction* instr) {
case LWR:
Format(instr, "lwr 'rt, 'imm16s('rs)");
break;
+ case PREF:
+ Format(instr, "pref 'rt, 'imm16s('rs)");
+ break;
case SB:
Format(instr, "sb 'rt, 'imm16s('rs)");
break;
@@ -929,7 +909,7 @@ void Decoder::DecodeTypeImmediate(Instruction* instr) {
default:
UNREACHABLE();
break;
- };
+ }
}
@@ -951,9 +931,9 @@ void Decoder::DecodeTypeJump(Instruction* instr) {
int Decoder::InstructionDecode(byte* instr_ptr) {
Instruction* instr = Instruction::At(instr_ptr);
// Print raw instruction bytes.
- out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_,
- "%08x ",
- instr->InstructionBits());
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
+ "%08x ",
+ instr->InstructionBits());
switch (instr->InstructionType()) {
case Instruction::kRegisterType: {
DecodeTypeRegister(instr);
@@ -985,7 +965,7 @@ int Decoder::InstructionDecode(byte* instr_ptr) {
namespace disasm {
const char* NameConverter::NameOfAddress(byte* addr) const {
- v8::internal::OS::SNPrintF(tmp_buffer_, "%p", addr);
+ v8::internal::SNPrintF(tmp_buffer_, "%p", addr);
return tmp_buffer_.start();
}
diff --git a/chromium/v8/src/mips/frames-mips.cc b/chromium/v8/src/mips/frames-mips.cc
index 1bd511654a1..5da00801c77 100644
--- a/chromium/v8/src/mips/frames-mips.cc
+++ b/chromium/v8/src/mips/frames-mips.cc
@@ -1,39 +1,15 @@
// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-
-#include "v8.h"
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
#if V8_TARGET_ARCH_MIPS
-#include "assembler.h"
-#include "assembler-mips.h"
-#include "assembler-mips-inl.h"
-#include "frames.h"
+#include "src/assembler.h"
+#include "src/mips/assembler-mips.h"
+#include "src/mips/assembler-mips-inl.h"
+#include "src/frames.h"
namespace v8 {
namespace internal {
@@ -41,10 +17,24 @@ namespace internal {
Register JavaScriptFrame::fp_register() { return v8::internal::fp; }
Register JavaScriptFrame::context_register() { return cp; }
+Register JavaScriptFrame::constant_pool_pointer_register() {
+ UNREACHABLE();
+ return no_reg;
+}
Register StubFailureTrampolineFrame::fp_register() { return v8::internal::fp; }
Register StubFailureTrampolineFrame::context_register() { return cp; }
+Register StubFailureTrampolineFrame::constant_pool_pointer_register() {
+ UNREACHABLE();
+ return no_reg;
+}
+
+
+Object*& ExitFrame::constant_pool_slot() const {
+ UNREACHABLE();
+ return Memory::Object_at(NULL);
+}
} } // namespace v8::internal
diff --git a/chromium/v8/src/mips/frames-mips.h b/chromium/v8/src/mips/frames-mips.h
index 55951b58c47..5666f642f99 100644
--- a/chromium/v8/src/mips/frames-mips.h
+++ b/chromium/v8/src/mips/frames-mips.h
@@ -1,29 +1,6 @@
// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
@@ -110,8 +87,6 @@ const RegList kSafepointSavedRegisters = kJSCallerSaved | kCalleeSaved;
const int kNumSafepointSavedRegisters =
kNumJSCallerSaved + kNumCalleeSaved;
-typedef Object* JSCallerSavedBuffer[kNumJSCallerSaved];
-
const int kUndefIndex = -1;
// Map with indexes on stack that corresponds to codes of saved registers.
const int kSafepointRegisterStackIndexMap[kNumRegs] = {
@@ -161,12 +136,9 @@ class EntryFrameConstants : public AllStatic {
class ExitFrameConstants : public AllStatic {
public:
- // See some explanation in MacroAssembler::EnterExitFrame.
- // This marks the top of the extra allocated stack space.
- static const int kStackSpaceOffset = -3 * kPointerSize;
+ static const int kFrameSize = 2 * kPointerSize;
static const int kCodeOffset = -2 * kPointerSize;
-
static const int kSPOffset = -1 * kPointerSize;
// The caller fields are below the frame pointer on the stack.
@@ -179,6 +151,8 @@ class ExitFrameConstants : public AllStatic {
// FP-relative displacement of the caller's SP.
static const int kCallerSPDisplacement = +2 * kPointerSize;
+
+ static const int kConstantPoolOffset = 0; // Not used.
};
diff --git a/chromium/v8/src/mips/full-codegen-mips.cc b/chromium/v8/src/mips/full-codegen-mips.cc
index 3ce2ab5f19c..41acad355f9 100644
--- a/chromium/v8/src/mips/full-codegen-mips.cc
+++ b/chromium/v8/src/mips/full-codegen-mips.cc
@@ -1,31 +1,8 @@
// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
#if V8_TARGET_ARCH_MIPS
@@ -37,18 +14,18 @@
// places where we have to move a previous result in v0 to a0 for the
// next call: mov(a0, v0). This is not needed on the other architectures.
-#include "code-stubs.h"
-#include "codegen.h"
-#include "compiler.h"
-#include "debug.h"
-#include "full-codegen.h"
-#include "isolate-inl.h"
-#include "parser.h"
-#include "scopes.h"
-#include "stub-cache.h"
+#include "src/code-stubs.h"
+#include "src/codegen.h"
+#include "src/compiler.h"
+#include "src/debug.h"
+#include "src/full-codegen.h"
+#include "src/isolate-inl.h"
+#include "src/parser.h"
+#include "src/scopes.h"
+#include "src/stub-cache.h"
-#include "mips/code-stubs-mips.h"
-#include "mips/macro-assembler-mips.h"
+#include "src/mips/code-stubs-mips.h"
+#include "src/mips/macro-assembler-mips.h"
namespace v8 {
namespace internal {
@@ -84,7 +61,7 @@ class JumpPatchSite BASE_EMBEDDED {
__ bind(&patch_site_);
__ andi(at, reg, 0);
// Always taken before patched.
- __ Branch(target, eq, at, Operand(zero_reg));
+ __ BranchShort(target, eq, at, Operand(zero_reg));
}
// When initially emitting this ensure that a jump is never generated to skip
@@ -95,7 +72,7 @@ class JumpPatchSite BASE_EMBEDDED {
__ bind(&patch_site_);
__ andi(at, reg, 0);
// Never taken before patched.
- __ Branch(target, ne, at, Operand(zero_reg));
+ __ BranchShort(target, ne, at, Operand(zero_reg));
}
void EmitPatchInfo() {
@@ -138,6 +115,7 @@ void FullCodeGenerator::Generate() {
CompilationInfo* info = info_;
handler_table_ =
isolate()->factory()->NewFixedArray(function()->handler_count(), TENURED);
+
profiling_counter_ = isolate()->factory()->NewCell(
Handle<Smi>(Smi::FromInt(FLAG_interrupt_budget), isolate()));
SetFunctionPosition(function());
@@ -152,16 +130,21 @@ void FullCodeGenerator::Generate() {
}
#endif
- // Strict mode functions and builtins need to replace the receiver
- // with undefined when called as functions (without an explicit
- // receiver object). t1 is zero for method calls and non-zero for
- // function calls.
- if (!info->is_classic_mode() || info->is_native()) {
+ // Sloppy mode functions and builtins need to replace the receiver with the
+ // global proxy when called as functions (without an explicit receiver
+ // object).
+ if (info->strict_mode() == SLOPPY && !info->is_native()) {
Label ok;
- __ Branch(&ok, eq, t1, Operand(zero_reg));
int receiver_offset = info->scope()->num_parameters() * kPointerSize;
+ __ lw(at, MemOperand(sp, receiver_offset));
__ LoadRoot(a2, Heap::kUndefinedValueRootIndex);
+ __ Branch(&ok, ne, a2, Operand(at));
+
+ __ lw(a2, GlobalObjectOperand());
+ __ lw(a2, FieldMemOperand(a2, GlobalObject::kGlobalReceiverOffset));
+
__ sw(a2, MemOperand(sp, receiver_offset));
+
__ bind(&ok);
}
@@ -171,7 +154,7 @@ void FullCodeGenerator::Generate() {
FrameScope frame_scope(masm_, StackFrame::MANUAL);
info->set_prologue_offset(masm_->pc_offset());
- __ Prologue(BUILD_FUNCTION_FRAME);
+ __ Prologue(info->IsCodePreAgingActive());
info->AddNoFrameRange(0, masm_->pc_offset());
{ Comment cmnt(masm_, "[ Allocate locals");
@@ -179,21 +162,35 @@ void FullCodeGenerator::Generate() {
// Generators allocate locals, if any, in context slots.
ASSERT(!info->function()->is_generator() || locals_count == 0);
if (locals_count > 0) {
- __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
- // Emit a loop to initialize stack cells for locals when optimizing for
- // size. Otherwise, unroll the loop for maximum performance.
+ if (locals_count >= 128) {
+ Label ok;
+ __ Subu(t5, sp, Operand(locals_count * kPointerSize));
+ __ LoadRoot(a2, Heap::kRealStackLimitRootIndex);
+ __ Branch(&ok, hs, t5, Operand(a2));
+ __ InvokeBuiltin(Builtins::STACK_OVERFLOW, CALL_FUNCTION);
+ __ bind(&ok);
+ }
__ LoadRoot(t5, Heap::kUndefinedValueRootIndex);
- if (FLAG_optimize_for_size && locals_count > 4) {
- Label loop;
- __ li(a2, Operand(locals_count));
- __ bind(&loop);
- __ Subu(a2, a2, 1);
- __ push(t5);
- __ Branch(&loop, gt, a2, Operand(zero_reg));
- } else {
- for (int i = 0; i < locals_count; i++) {
- __ push(t5);
+ int kMaxPushes = FLAG_optimize_for_size ? 4 : 32;
+ if (locals_count >= kMaxPushes) {
+ int loop_iterations = locals_count / kMaxPushes;
+ __ li(a2, Operand(loop_iterations));
+ Label loop_header;
+ __ bind(&loop_header);
+ // Do pushes.
+ __ Subu(sp, sp, Operand(kMaxPushes * kPointerSize));
+ for (int i = 0; i < kMaxPushes; i++) {
+ __ sw(t5, MemOperand(sp, i * kPointerSize));
}
+ // Continue loop if not done.
+ __ Subu(a2, a2, Operand(1));
+ __ Branch(&loop_header, ne, a2, Operand(zero_reg));
+ }
+ int remaining = locals_count % kMaxPushes;
+ // Emit the remaining pushes.
+ __ Subu(sp, sp, Operand(remaining * kPointerSize));
+ for (int i = 0; i < remaining; i++) {
+ __ sw(t5, MemOperand(sp, i * kPointerSize));
}
}
}
@@ -205,20 +202,25 @@ void FullCodeGenerator::Generate() {
if (heap_slots > 0) {
Comment cmnt(masm_, "[ Allocate context");
// Argument to NewContext is the function, which is still in a1.
- __ push(a1);
+ bool need_write_barrier = true;
if (FLAG_harmony_scoping && info->scope()->is_global_scope()) {
+ __ push(a1);
__ Push(info->scope()->GetScopeInfo());
- __ CallRuntime(Runtime::kNewGlobalContext, 2);
+ __ CallRuntime(Runtime::kHiddenNewGlobalContext, 2);
} else if (heap_slots <= FastNewContextStub::kMaximumSlots) {
- FastNewContextStub stub(heap_slots);
+ FastNewContextStub stub(isolate(), heap_slots);
__ CallStub(&stub);
+ // Result of FastNewContextStub is always in new space.
+ need_write_barrier = false;
} else {
- __ CallRuntime(Runtime::kNewFunctionContext, 1);
+ __ push(a1);
+ __ CallRuntime(Runtime::kHiddenNewFunctionContext, 1);
}
function_in_register = false;
- // Context is returned in both v0 and cp. It replaces the context
- // passed to us. It's saved in the stack and kept live in cp.
- __ sw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ // Context is returned in v0. It replaces the context passed to us.
+ // It's saved in the stack and kept live in cp.
+ __ mov(cp, v0);
+ __ sw(v0, MemOperand(fp, StandardFrameConstants::kContextOffset));
// Copy any necessary parameters into the context.
int num_parameters = info->scope()->num_parameters();
for (int i = 0; i < num_parameters; i++) {
@@ -233,8 +235,15 @@ void FullCodeGenerator::Generate() {
__ sw(a0, target);
// Update the write barrier.
- __ RecordWriteContextSlot(
- cp, target.offset(), a0, a3, kRAHasBeenSaved, kDontSaveFPRegs);
+ if (need_write_barrier) {
+ __ RecordWriteContextSlot(
+ cp, target.offset(), a0, a3, kRAHasBeenSaved, kDontSaveFPRegs);
+ } else if (FLAG_debug_code) {
+ Label done;
+ __ JumpIfInNewSpace(cp, a0, &done);
+ __ Abort(kExpectedNewSpaceObject);
+ __ bind(&done);
+ }
}
}
}
@@ -262,14 +271,14 @@ void FullCodeGenerator::Generate() {
// The stub will rewrite receiever and parameter count if the previous
// stack frame was an arguments adapter frame.
ArgumentsAccessStub::Type type;
- if (!is_classic_mode()) {
+ if (strict_mode() == STRICT) {
type = ArgumentsAccessStub::NEW_STRICT;
} else if (function()->has_duplicate_parameters()) {
- type = ArgumentsAccessStub::NEW_NON_STRICT_SLOW;
+ type = ArgumentsAccessStub::NEW_SLOPPY_SLOW;
} else {
- type = ArgumentsAccessStub::NEW_NON_STRICT_FAST;
+ type = ArgumentsAccessStub::NEW_SLOPPY_FAST;
}
- ArgumentsAccessStub stub(type);
+ ArgumentsAccessStub stub(isolate(), type);
__ CallStub(&stub);
SetVar(arguments, v0, a1, a2);
@@ -293,7 +302,7 @@ void FullCodeGenerator::Generate() {
if (scope()->is_function_scope() && scope()->function() != NULL) {
VariableDeclaration* function = scope()->function();
ASSERT(function->proxy()->var()->mode() == CONST ||
- function->proxy()->var()->mode() == CONST_HARMONY);
+ function->proxy()->var()->mode() == CONST_LEGACY);
ASSERT(function->proxy()->var()->location() != Variable::UNALLOCATED);
VisitVariableDeclaration(function);
}
@@ -303,9 +312,12 @@ void FullCodeGenerator::Generate() {
{ Comment cmnt(masm_, "[ Stack check");
PrepareForBailoutForId(BailoutId::Declarations(), NO_REGISTERS);
Label ok;
- __ LoadRoot(t0, Heap::kStackLimitRootIndex);
- __ Branch(&ok, hs, sp, Operand(t0));
- __ Call(isolate()->builtins()->StackCheck(), RelocInfo::CODE_TARGET);
+ __ LoadRoot(at, Heap::kStackLimitRootIndex);
+ __ Branch(&ok, hs, sp, Operand(at));
+ Handle<Code> stack_check = isolate()->builtins()->StackCheck();
+ PredictableCodeSizeScope predictable(masm_,
+ masm_->CallSize(stack_check, RelocInfo::CODE_TARGET));
+ __ Call(stack_check, RelocInfo::CODE_TARGET);
__ bind(&ok);
}
@@ -341,11 +353,7 @@ void FullCodeGenerator::EmitProfilingCounterDecrement(int delta) {
void FullCodeGenerator::EmitProfilingCounterReset() {
int reset_value = FLAG_interrupt_budget;
- if (info_->ShouldSelfOptimize() && !FLAG_retry_self_opt) {
- // Self-optimization is a one-off thing: if it fails, don't try again.
- reset_value = Smi::kMaxValue;
- }
- if (isolate()->IsDebuggerActive()) {
+ if (info_->is_debug()) {
// Detect debug break requests as soon as possible.
reset_value = FLAG_interrupt_budget >> 4;
}
@@ -365,13 +373,10 @@ void FullCodeGenerator::EmitBackEdgeBookkeeping(IterationStatement* stmt,
Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm_);
Comment cmnt(masm_, "[ Back edge bookkeeping");
Label ok;
- int weight = 1;
- if (FLAG_weighted_back_edges) {
- ASSERT(back_edge_target->is_bound());
- int distance = masm_->SizeOfCodeGeneratedSince(back_edge_target);
- weight = Min(kMaxBackEdgeWeight,
- Max(1, distance / kCodeSizeMultiplier));
- }
+ ASSERT(back_edge_target->is_bound());
+ int distance = masm_->SizeOfCodeGeneratedSince(back_edge_target);
+ int weight = Min(kMaxBackEdgeWeight,
+ Max(1, distance / kCodeSizeMultiplier));
EmitProfilingCounterDecrement(weight);
__ slt(at, a3, zero_reg);
__ beq(at, zero_reg, &ok);
@@ -404,32 +409,24 @@ void FullCodeGenerator::EmitReturnSequence() {
__ push(v0);
__ CallRuntime(Runtime::kTraceExit, 1);
}
- if (FLAG_interrupt_at_exit || FLAG_self_optimization) {
- // Pretend that the exit is a backwards jump to the entry.
- int weight = 1;
- if (info_->ShouldSelfOptimize()) {
- weight = FLAG_interrupt_budget / FLAG_self_opt_count;
- } else if (FLAG_weighted_back_edges) {
- int distance = masm_->pc_offset();
- weight = Min(kMaxBackEdgeWeight,
- Max(1, distance / kCodeSizeMultiplier));
- }
- EmitProfilingCounterDecrement(weight);
- Label ok;
- __ Branch(&ok, ge, a3, Operand(zero_reg));
- __ push(v0);
- if (info_->ShouldSelfOptimize() && FLAG_direct_self_opt) {
- __ lw(a2, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
- __ push(a2);
- __ CallRuntime(Runtime::kOptimizeFunctionOnNextCall, 1);
- } else {
- __ Call(isolate()->builtins()->InterruptCheck(),
- RelocInfo::CODE_TARGET);
- }
- __ pop(v0);
- EmitProfilingCounterReset();
- __ bind(&ok);
+ // Pretend that the exit is a backwards jump to the entry.
+ int weight = 1;
+ if (info_->ShouldSelfOptimize()) {
+ weight = FLAG_interrupt_budget / FLAG_self_opt_count;
+ } else {
+ int distance = masm_->pc_offset();
+ weight = Min(kMaxBackEdgeWeight,
+ Max(1, distance / kCodeSizeMultiplier));
}
+ EmitProfilingCounterDecrement(weight);
+ Label ok;
+ __ Branch(&ok, ge, a3, Operand(zero_reg));
+ __ push(v0);
+ __ Call(isolate()->builtins()->InterruptCheck(),
+ RelocInfo::CODE_TARGET);
+ __ pop(v0);
+ EmitProfilingCounterReset();
+ __ bind(&ok);
#ifdef DEBUG
// Add a label for checking the size of the code used for returning.
@@ -686,7 +683,7 @@ void FullCodeGenerator::DoTest(Expression* condition,
Label* fall_through) {
__ mov(a0, result_register());
Handle<Code> ic = ToBooleanStub::GetUninitialized(isolate());
- CallIC(ic, RelocInfo::CODE_TARGET, condition->test_id());
+ CallIC(ic, condition->test_id());
__ mov(at, zero_reg);
Split(ne, v0, Operand(at), if_true, if_false, fall_through);
}
@@ -809,7 +806,7 @@ void FullCodeGenerator::VisitVariableDeclaration(
VariableProxy* proxy = declaration->proxy();
VariableMode mode = declaration->mode();
Variable* variable = proxy->var();
- bool hole_init = mode == CONST || mode == CONST_HARMONY || mode == LET;
+ bool hole_init = mode == LET || mode == CONST || mode == CONST_LEGACY;
switch (variable->location()) {
case Variable::UNALLOCATED:
globals_->Add(variable->name(), zone());
@@ -859,7 +856,7 @@ void FullCodeGenerator::VisitVariableDeclaration(
__ mov(a0, zero_reg); // Smi::FromInt(0) indicates no initial value.
__ Push(cp, a2, a1, a0);
}
- __ CallRuntime(Runtime::kDeclareContextSlot, 4);
+ __ CallRuntime(Runtime::kHiddenDeclareContextSlot, 4);
break;
}
}
@@ -915,7 +912,7 @@ void FullCodeGenerator::VisitFunctionDeclaration(
__ Push(cp, a2, a1);
// Push initial value for function declaration.
VisitForStackValue(declaration->fun());
- __ CallRuntime(Runtime::kDeclareContextSlot, 4);
+ __ CallRuntime(Runtime::kHiddenDeclareContextSlot, 4);
break;
}
}
@@ -987,7 +984,7 @@ void FullCodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) {
__ li(a1, Operand(pairs));
__ li(a0, Operand(Smi::FromInt(DeclareGlobalsFlags())));
__ Push(cp, a1, a0);
- __ CallRuntime(Runtime::kDeclareGlobals, 3);
+ __ CallRuntime(Runtime::kHiddenDeclareGlobals, 3);
// Return value is ignored.
}
@@ -995,7 +992,7 @@ void FullCodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) {
void FullCodeGenerator::DeclareModules(Handle<FixedArray> descriptions) {
// Call the runtime to declare the modules.
__ Push(descriptions);
- __ CallRuntime(Runtime::kDeclareModules, 1);
+ __ CallRuntime(Runtime::kHiddenDeclareModules, 1);
// Return value is ignored.
}
@@ -1051,9 +1048,18 @@ void FullCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) {
// Record position before stub call for type feedback.
SetSourcePosition(clause->position());
Handle<Code> ic = CompareIC::GetUninitialized(isolate(), Token::EQ_STRICT);
- CallIC(ic, RelocInfo::CODE_TARGET, clause->CompareId());
+ CallIC(ic, clause->CompareId());
patch_site.EmitPatchInfo();
+ Label skip;
+ __ Branch(&skip);
+ PrepareForBailout(clause, TOS_REG);
+ __ LoadRoot(at, Heap::kTrueValueRootIndex);
+ __ Branch(&next_test, ne, v0, Operand(at));
+ __ Drop(1);
+ __ Branch(clause->body_target());
+ __ bind(&skip);
+
__ Branch(&next_test, ne, v0, Operand(zero_reg));
__ Drop(1); // Switch value is no longer needed.
__ Branch(clause->body_target());
@@ -1085,6 +1091,7 @@ void FullCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) {
void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
Comment cmnt(masm_, "[ ForInStatement");
+ int slot = stmt->ForInFeedbackSlot();
SetStatementPosition(stmt);
Label loop, exit;
@@ -1157,10 +1164,9 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
__ lw(a2, FieldMemOperand(a2, DescriptorArray::kEnumCacheBridgeCacheOffset));
// Set up the four remaining stack slots.
- __ push(v0); // Map.
__ li(a0, Operand(Smi::FromInt(0)));
- // Push enumeration cache, enumeration cache length (as smi) and zero.
- __ Push(a2, a1, a0);
+ // Push map, enumeration cache, enumeration cache length (as smi) and zero.
+ __ Push(v0, a2, a1, a0);
__ jmp(&loop);
__ bind(&no_descriptors);
@@ -1171,13 +1177,9 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
Label non_proxy;
__ bind(&fixed_array);
- Handle<Cell> cell = isolate()->factory()->NewCell(
- Handle<Object>(Smi::FromInt(TypeFeedbackCells::kForInFastCaseMarker),
- isolate()));
- RecordTypeFeedbackCell(stmt->ForInFeedbackId(), cell);
- __ li(a1, cell);
- __ li(a2, Operand(Smi::FromInt(TypeFeedbackCells::kForInSlowCaseMarker)));
- __ sw(a2, FieldMemOperand(a1, Cell::kValueOffset));
+ __ li(a1, FeedbackVector());
+ __ li(a2, Operand(TypeFeedbackInfo::MegamorphicSentinel(isolate())));
+ __ sw(a2, FieldMemOperand(a1, FixedArray::OffsetOfElementAt(slot)));
__ li(a1, Operand(Smi::FromInt(1))); // Smi indicates slow check
__ lw(a2, MemOperand(sp, 0 * kPointerSize)); // Get enumerated object
@@ -1225,8 +1227,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
// Convert the entry to a string or (smi) 0 if it isn't a property
// any more. If the property has been removed while iterating, we
// just skip it.
- __ push(a1); // Enumerable.
- __ push(a3); // Current entry.
+ __ Push(a1, a3); // Enumerable and current entry.
__ InvokeBuiltin(Builtins::FILTER_KEY, CALL_FUNCTION);
__ mov(a3, result_register());
__ Branch(loop_statement.continue_label(), eq, a3, Operand(zero_reg));
@@ -1271,8 +1272,8 @@ void FullCodeGenerator::VisitForOfStatement(ForOfStatement* stmt) {
Iteration loop_statement(this, stmt);
increment_loop_depth();
- // var iterator = iterable[@@iterator]()
- VisitForAccumulatorValue(stmt->assign_iterator());
+ // var iterable = subject
+ VisitForAccumulatorValue(stmt->assign_iterable());
__ mov(a0, v0);
// As with for-in, skip the loop if the iterator is null or undefined.
@@ -1281,17 +1282,8 @@ void FullCodeGenerator::VisitForOfStatement(ForOfStatement* stmt) {
__ LoadRoot(at, Heap::kNullValueRootIndex);
__ Branch(loop_statement.break_label(), eq, a0, Operand(at));
- // Convert the iterator to a JS object.
- Label convert, done_convert;
- __ JumpIfSmi(a0, &convert);
- __ GetObjectType(a0, a1, a1);
- __ Branch(&done_convert, ge, a1, Operand(FIRST_SPEC_OBJECT_TYPE));
- __ bind(&convert);
- __ push(a0);
- __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
- __ mov(a0, v0);
- __ bind(&done_convert);
- __ push(a0);
+ // var iterator = iterable[Symbol.iterator]();
+ VisitForEffect(stmt->assign_iterator());
// Loop entry.
__ bind(loop_statement.continue_label());
@@ -1338,7 +1330,9 @@ void FullCodeGenerator::EmitNewClosure(Handle<SharedFunctionInfo> info,
!pretenure &&
scope()->is_function_scope() &&
info->num_literals() == 0) {
- FastNewClosureStub stub(info->language_mode(), info->is_generator());
+ FastNewClosureStub stub(isolate(),
+ info->strict_mode(),
+ info->is_generator());
__ li(a2, Operand(info));
__ CallStub(&stub);
} else {
@@ -1346,7 +1340,7 @@ void FullCodeGenerator::EmitNewClosure(Handle<SharedFunctionInfo> info,
__ LoadRoot(a1, pretenure ? Heap::kTrueValueRootIndex
: Heap::kFalseValueRootIndex);
__ Push(cp, a0, a1);
- __ CallRuntime(Runtime::kNewClosure, 3);
+ __ CallRuntime(Runtime::kHiddenNewClosure, 3);
}
context()->Plug(v0);
}
@@ -1368,7 +1362,7 @@ void FullCodeGenerator::EmitLoadGlobalCheckExtensions(Variable* var,
Scope* s = scope();
while (s != NULL) {
if (s->num_heap_slots() > 0) {
- if (s->calls_non_strict_eval()) {
+ if (s->calls_sloppy_eval()) {
// Check that extension is NULL.
__ lw(temp, ContextOperand(current, Context::EXTENSION_INDEX));
__ Branch(slow, ne, temp, Operand(zero_reg));
@@ -1380,7 +1374,7 @@ void FullCodeGenerator::EmitLoadGlobalCheckExtensions(Variable* var,
}
// If no outer scope calls eval, we do not need to check more
// context extensions.
- if (!s->outer_scope_calls_non_strict_eval() || s->is_eval_scope()) break;
+ if (!s->outer_scope_calls_sloppy_eval() || s->is_eval_scope()) break;
s = s->outer_scope();
}
@@ -1405,11 +1399,10 @@ void FullCodeGenerator::EmitLoadGlobalCheckExtensions(Variable* var,
__ lw(a0, GlobalObjectOperand());
__ li(a2, Operand(var->name()));
- RelocInfo::Mode mode = (typeof_state == INSIDE_TYPEOF)
- ? RelocInfo::CODE_TARGET
- : RelocInfo::CODE_TARGET_CONTEXT;
- Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
- CallIC(ic, mode);
+ ContextualMode mode = (typeof_state == INSIDE_TYPEOF)
+ ? NOT_CONTEXTUAL
+ : CONTEXTUAL;
+ CallLoadIC(mode);
}
@@ -1422,7 +1415,7 @@ MemOperand FullCodeGenerator::ContextSlotOperandCheckExtensions(Variable* var,
for (Scope* s = scope(); s != var->scope(); s = s->outer_scope()) {
if (s->num_heap_slots() > 0) {
- if (s->calls_non_strict_eval()) {
+ if (s->calls_sloppy_eval()) {
// Check that extension is NULL.
__ lw(temp, ContextOperand(context, Context::EXTENSION_INDEX));
__ Branch(slow, ne, temp, Operand(zero_reg));
@@ -1458,19 +1451,18 @@ void FullCodeGenerator::EmitDynamicLookupFastCase(Variable* var,
} else if (var->mode() == DYNAMIC_LOCAL) {
Variable* local = var->local_if_not_shadowed();
__ lw(v0, ContextSlotOperandCheckExtensions(local, slow));
- if (local->mode() == LET ||
- local->mode() == CONST ||
- local->mode() == CONST_HARMONY) {
+ if (local->mode() == LET || local->mode() == CONST ||
+ local->mode() == CONST_LEGACY) {
__ LoadRoot(at, Heap::kTheHoleValueRootIndex);
__ subu(at, v0, at); // Sub as compare: at == 0 on eq.
- if (local->mode() == CONST) {
+ if (local->mode() == CONST_LEGACY) {
__ LoadRoot(a0, Heap::kUndefinedValueRootIndex);
__ Movz(v0, a0, at); // Conditional move: return Undefined if TheHole.
- } else { // LET || CONST_HARMONY
+ } else { // LET || CONST
__ Branch(done, ne, at, Operand(zero_reg));
__ li(a0, Operand(var->name()));
__ push(a0);
- __ CallRuntime(Runtime::kThrowReferenceError, 1);
+ __ CallRuntime(Runtime::kHiddenThrowReferenceError, 1);
}
}
__ Branch(done);
@@ -1487,13 +1479,12 @@ void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy) {
// variables.
switch (var->location()) {
case Variable::UNALLOCATED: {
- Comment cmnt(masm_, "Global variable");
+ Comment cmnt(masm_, "[ Global variable");
// Use inline caching. Variable name is passed in a2 and the global
// object (receiver) in a0.
__ lw(a0, GlobalObjectOperand());
__ li(a2, Operand(var->name()));
- Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
- CallIC(ic, RelocInfo::CODE_TARGET_CONTEXT);
+ CallLoadIC(CONTEXTUAL);
context()->Plug(v0);
break;
}
@@ -1501,9 +1492,8 @@ void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy) {
case Variable::PARAMETER:
case Variable::LOCAL:
case Variable::CONTEXT: {
- Comment cmnt(masm_, var->IsContextSlot()
- ? "Context variable"
- : "Stack variable");
+ Comment cmnt(masm_, var->IsContextSlot() ? "[ Context variable"
+ : "[ Stack variable");
if (var->binding_needs_init()) {
// var->scope() may be NULL when the proxy is located in eval code and
// refers to a potential outside binding. Currently those bindings are
@@ -1535,7 +1525,7 @@ void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy) {
// Check that we always have valid source position.
ASSERT(var->initializer_position() != RelocInfo::kNoPosition);
ASSERT(proxy->position() != RelocInfo::kNoPosition);
- skip_init_check = var->mode() != CONST &&
+ skip_init_check = var->mode() != CONST_LEGACY &&
var->initializer_position() < proxy->position();
}
@@ -1544,18 +1534,18 @@ void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy) {
GetVar(v0, var);
__ LoadRoot(at, Heap::kTheHoleValueRootIndex);
__ subu(at, v0, at); // Sub as compare: at == 0 on eq.
- if (var->mode() == LET || var->mode() == CONST_HARMONY) {
+ if (var->mode() == LET || var->mode() == CONST) {
// Throw a reference error when using an uninitialized let/const
// binding in harmony mode.
Label done;
__ Branch(&done, ne, at, Operand(zero_reg));
__ li(a0, Operand(var->name()));
__ push(a0);
- __ CallRuntime(Runtime::kThrowReferenceError, 1);
+ __ CallRuntime(Runtime::kHiddenThrowReferenceError, 1);
__ bind(&done);
} else {
// Uninitalized const bindings outside of harmony mode are unholed.
- ASSERT(var->mode() == CONST);
+ ASSERT(var->mode() == CONST_LEGACY);
__ LoadRoot(a0, Heap::kUndefinedValueRootIndex);
__ Movz(v0, a0, at); // Conditional move: Undefined if TheHole.
}
@@ -1568,15 +1558,15 @@ void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy) {
}
case Variable::LOOKUP: {
+ Comment cmnt(masm_, "[ Lookup variable");
Label done, slow;
// Generate code for loading from variables potentially shadowed
// by eval-introduced variables.
EmitDynamicLookupFastCase(var, NOT_INSIDE_TYPEOF, &slow, &done);
__ bind(&slow);
- Comment cmnt(masm_, "Lookup variable");
__ li(a1, Operand(var->name()));
__ Push(cp, a1); // Context and name.
- __ CallRuntime(Runtime::kLoadContextSlot, 2);
+ __ CallRuntime(Runtime::kHiddenLoadContextSlot, 2);
__ bind(&done);
context()->Plug(v0);
}
@@ -1608,7 +1598,7 @@ void FullCodeGenerator::VisitRegExpLiteral(RegExpLiteral* expr) {
__ li(a2, Operand(expr->pattern()));
__ li(a1, Operand(expr->flags()));
__ Push(t0, a3, a2, a1);
- __ CallRuntime(Runtime::kMaterializeRegExpLiteral, 4);
+ __ CallRuntime(Runtime::kHiddenMaterializeRegExpLiteral, 4);
__ mov(t1, v0);
__ bind(&materialized);
@@ -1620,7 +1610,7 @@ void FullCodeGenerator::VisitRegExpLiteral(RegExpLiteral* expr) {
__ bind(&runtime_allocate);
__ li(a0, Operand(Smi::FromInt(size)));
__ Push(t1, a0);
- __ CallRuntime(Runtime::kAllocateInNewSpace, 1);
+ __ CallRuntime(Runtime::kHiddenAllocateInNewSpace, 1);
__ pop(t1);
__ bind(&allocated);
@@ -1661,14 +1651,13 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
: ObjectLiteral::kNoFlags;
__ li(a0, Operand(Smi::FromInt(flags)));
int properties_count = constant_properties->length() / 2;
- if ((FLAG_track_double_fields && expr->may_store_doubles()) ||
- expr->depth() > 1 || Serializer::enabled() ||
- flags != ObjectLiteral::kFastElements ||
+ if (expr->may_store_doubles() || expr->depth() > 1 ||
+ masm()->serializer_enabled() || flags != ObjectLiteral::kFastElements ||
properties_count > FastCloneShallowObjectStub::kMaximumClonedProperties) {
__ Push(a3, a2, a1, a0);
- __ CallRuntime(Runtime::kCreateObjectLiteral, 4);
+ __ CallRuntime(Runtime::kHiddenCreateObjectLiteral, 4);
} else {
- FastCloneShallowObjectStub stub(properties_count);
+ FastCloneShallowObjectStub stub(isolate(), properties_count);
__ CallStub(&stub);
}
@@ -1705,10 +1694,7 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
__ mov(a0, result_register());
__ li(a2, Operand(key->value()));
__ lw(a1, MemOperand(sp));
- Handle<Code> ic = is_classic_mode()
- ? isolate()->builtins()->StoreIC_Initialize()
- : isolate()->builtins()->StoreIC_Initialize_Strict();
- CallIC(ic, RelocInfo::CODE_TARGET, key->LiteralFeedbackId());
+ CallStoreIC(key->LiteralFeedbackId());
PrepareForBailoutForId(key->id(), NO_REGISTERS);
} else {
VisitForEffect(value);
@@ -1798,8 +1784,7 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
Handle<FixedArrayBase> constant_elements_values(
FixedArrayBase::cast(constant_elements->get(1)));
- AllocationSiteMode allocation_site_mode = FLAG_track_allocation_sites
- ? TRACK_ALLOCATION_SITE : DONT_TRACK_ALLOCATION_SITE;
+ AllocationSiteMode allocation_site_mode = TRACK_ALLOCATION_SITE;
if (has_fast_elements && !FLAG_allocation_site_pretenuring) {
// If the only customer of allocation sites is transitioning, then
// we can turn it off if we don't have anywhere else to transition to.
@@ -1811,31 +1796,12 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
__ lw(a3, FieldMemOperand(a3, JSFunction::kLiteralsOffset));
__ li(a2, Operand(Smi::FromInt(expr->literal_index())));
__ li(a1, Operand(constant_elements));
- if (has_fast_elements && constant_elements_values->map() ==
- isolate()->heap()->fixed_cow_array_map()) {
- FastCloneShallowArrayStub stub(
- FastCloneShallowArrayStub::COPY_ON_WRITE_ELEMENTS,
- allocation_site_mode,
- length);
- __ CallStub(&stub);
- __ IncrementCounter(isolate()->counters()->cow_arrays_created_stub(),
- 1, a1, a2);
- } else if (expr->depth() > 1 || Serializer::enabled() ||
- length > FastCloneShallowArrayStub::kMaximumClonedLength) {
+ if (expr->depth() > 1 || length > JSObject::kInitialMaxFastElementArray) {
__ li(a0, Operand(Smi::FromInt(flags)));
__ Push(a3, a2, a1, a0);
- __ CallRuntime(Runtime::kCreateArrayLiteral, 4);
+ __ CallRuntime(Runtime::kHiddenCreateArrayLiteral, 4);
} else {
- ASSERT(IsFastSmiOrObjectElementsKind(constant_elements_kind) ||
- FLAG_smi_only_arrays);
- FastCloneShallowArrayStub::Mode mode =
- FastCloneShallowArrayStub::CLONE_ANY_ELEMENTS;
-
- if (has_fast_elements) {
- mode = FastCloneShallowArrayStub::CLONE_ELEMENTS;
- }
-
- FastCloneShallowArrayStub stub(mode, allocation_site_mode, length);
+ FastCloneShallowArrayStub stub(isolate(), allocation_site_mode);
__ CallStub(&stub);
}
@@ -1869,7 +1835,7 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
} else {
__ li(a3, Operand(Smi::FromInt(i)));
__ mov(a0, result_register());
- StoreArrayLiteralElementStub stub;
+ StoreArrayLiteralElementStub stub(isolate());
__ CallStub(&stub);
}
@@ -1885,13 +1851,9 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
void FullCodeGenerator::VisitAssignment(Assignment* expr) {
+ ASSERT(expr->target()->IsValidReferenceExpression());
+
Comment cmnt(masm_, "[ Assignment");
- // Invalid left-hand sides are rewritten to have a 'throw ReferenceError'
- // on the left-hand side.
- if (!expr->target()->IsValidLeftHandSide()) {
- VisitForEffect(expr->target());
- return;
- }
// Left-hand side can only be a property, a global or a (parameter or local)
// slot.
@@ -2030,7 +1992,7 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
__ Addu(a1, fp, Operand(StandardFrameConstants::kExpressionsOffset));
__ Branch(&post_runtime, eq, sp, Operand(a1));
__ push(v0); // generator object
- __ CallRuntime(Runtime::kSuspendJSGeneratorObject, 1);
+ __ CallRuntime(Runtime::kHiddenSuspendJSGeneratorObject, 1);
__ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
__ bind(&post_runtime);
__ pop(result_register());
@@ -2070,9 +2032,9 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
__ bind(&l_catch);
__ mov(a0, v0);
handler_table()->set(expr->index(), Smi::FromInt(l_catch.pos()));
- __ LoadRoot(a2, Heap::kthrow_stringRootIndex); // "throw"
- __ lw(a3, MemOperand(sp, 1 * kPointerSize)); // iter
- __ Push(a3, a0); // iter, exception
+ __ LoadRoot(a2, Heap::kthrow_stringRootIndex); // "throw"
+ __ lw(a3, MemOperand(sp, 1 * kPointerSize)); // iter
+ __ Push(a2, a3, a0); // "throw", iter, except
__ jmp(&l_call);
// try { received = %yield result }
@@ -2098,33 +2060,41 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
__ mov(a1, cp);
__ RecordWriteField(a0, JSGeneratorObject::kContextOffset, a1, a2,
kRAHasBeenSaved, kDontSaveFPRegs);
- __ CallRuntime(Runtime::kSuspendJSGeneratorObject, 1);
+ __ CallRuntime(Runtime::kHiddenSuspendJSGeneratorObject, 1);
__ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
- __ pop(v0); // result
+ __ pop(v0); // result
EmitReturnSequence();
__ mov(a0, v0);
- __ bind(&l_resume); // received in a0
+ __ bind(&l_resume); // received in a0
__ PopTryHandler();
// receiver = iter; f = 'next'; arg = received;
__ bind(&l_next);
- __ LoadRoot(a2, Heap::knext_stringRootIndex); // "next"
- __ lw(a3, MemOperand(sp, 1 * kPointerSize)); // iter
- __ Push(a3, a0); // iter, received
+ __ LoadRoot(a2, Heap::knext_stringRootIndex); // "next"
+ __ lw(a3, MemOperand(sp, 1 * kPointerSize)); // iter
+ __ Push(a2, a3, a0); // "next", iter, received
// result = receiver[f](arg);
__ bind(&l_call);
- Handle<Code> ic = isolate()->stub_cache()->ComputeKeyedCallInitialize(1);
- CallIC(ic);
+ __ lw(a1, MemOperand(sp, kPointerSize));
+ __ lw(a0, MemOperand(sp, 2 * kPointerSize));
+ Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize();
+ CallIC(ic, TypeFeedbackId::None());
+ __ mov(a0, v0);
+ __ mov(a1, a0);
+ __ sw(a1, MemOperand(sp, 2 * kPointerSize));
+ CallFunctionStub stub(isolate(), 1, CALL_AS_METHOD);
+ __ CallStub(&stub);
+
__ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ __ Drop(1); // The function is still on the stack; drop it.
// if (!result.done) goto l_try;
__ bind(&l_loop);
__ mov(a0, v0);
__ push(a0); // save result
__ LoadRoot(a2, Heap::kdone_stringRootIndex); // "done"
- Handle<Code> done_ic = isolate()->builtins()->LoadIC_Initialize();
- CallIC(done_ic); // result.done in v0
+ CallLoadIC(NOT_CONTEXTUAL); // result.done in v0
__ mov(a0, v0);
Handle<Code> bool_ic = ToBooleanStub::GetUninitialized(isolate());
CallIC(bool_ic);
@@ -2133,8 +2103,7 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
// result.value
__ pop(a0); // result
__ LoadRoot(a2, Heap::kvalue_stringRootIndex); // "value"
- Handle<Code> value_ic = isolate()->builtins()->LoadIC_Initialize();
- CallIC(value_ic); // result.value in v0
+ CallLoadIC(NOT_CONTEXTUAL); // result.value in v0
context()->DropAndPlug(2, v0); // drop iter and g
break;
}
@@ -2146,18 +2115,20 @@ void FullCodeGenerator::EmitGeneratorResume(Expression *generator,
Expression *value,
JSGeneratorObject::ResumeMode resume_mode) {
// The value stays in a0, and is ultimately read by the resumed generator, as
- // if the CallRuntime(Runtime::kSuspendJSGeneratorObject) returned it. a1
- // will hold the generator object until the activation has been resumed.
+ // if CallRuntime(Runtime::kHiddenSuspendJSGeneratorObject) returned it. Or it
+ // is read to throw the value when the resumed generator is already closed.
+ // a1 will hold the generator object until the activation has been resumed.
VisitForStackValue(generator);
VisitForAccumulatorValue(value);
__ pop(a1);
// Check generator state.
- Label wrong_state, done;
+ Label wrong_state, closed_state, done;
__ lw(a3, FieldMemOperand(a1, JSGeneratorObject::kContinuationOffset));
- STATIC_ASSERT(JSGeneratorObject::kGeneratorExecuting <= 0);
- STATIC_ASSERT(JSGeneratorObject::kGeneratorClosed <= 0);
- __ Branch(&wrong_state, le, a3, Operand(zero_reg));
+ STATIC_ASSERT(JSGeneratorObject::kGeneratorExecuting < 0);
+ STATIC_ASSERT(JSGeneratorObject::kGeneratorClosed == 0);
+ __ Branch(&closed_state, eq, a3, Operand(zero_reg));
+ __ Branch(&wrong_state, lt, a3, Operand(zero_reg));
// Load suspended function and context.
__ lw(cp, FieldMemOperand(a1, JSGeneratorObject::kContextOffset));
@@ -2226,14 +2197,29 @@ void FullCodeGenerator::EmitGeneratorResume(Expression *generator,
ASSERT(!result_register().is(a1));
__ Push(a1, result_register());
__ Push(Smi::FromInt(resume_mode));
- __ CallRuntime(Runtime::kResumeJSGeneratorObject, 3);
+ __ CallRuntime(Runtime::kHiddenResumeJSGeneratorObject, 3);
// Not reached: the runtime call returns elsewhere.
__ stop("not-reached");
+ // Reach here when generator is closed.
+ __ bind(&closed_state);
+ if (resume_mode == JSGeneratorObject::NEXT) {
+ // Return completed iterator result when generator is closed.
+ __ LoadRoot(a2, Heap::kUndefinedValueRootIndex);
+ __ push(a2);
+ // Pop value from top-of-stack slot; box result into result register.
+ EmitCreateIteratorResult(true);
+ } else {
+ // Throw the provided value.
+ __ push(a0);
+ __ CallRuntime(Runtime::kHiddenThrow, 1);
+ }
+ __ jmp(&done);
+
// Throw error if we attempt to operate on a running generator.
__ bind(&wrong_state);
__ push(a1);
- __ CallRuntime(Runtime::kThrowGeneratorStateError, 1);
+ __ CallRuntime(Runtime::kHiddenThrowGeneratorStateError, 1);
__ bind(&done);
context()->Plug(result_register());
@@ -2244,14 +2230,14 @@ void FullCodeGenerator::EmitCreateIteratorResult(bool done) {
Label gc_required;
Label allocated;
- Handle<Map> map(isolate()->native_context()->generator_result_map());
+ Handle<Map> map(isolate()->native_context()->iterator_result_map());
__ Allocate(map->instance_size(), v0, a2, a3, &gc_required, TAG_OBJECT);
__ jmp(&allocated);
__ bind(&gc_required);
__ Push(Smi::FromInt(map->instance_size()));
- __ CallRuntime(Runtime::kAllocateInNewSpace, 1);
+ __ CallRuntime(Runtime::kHiddenAllocateInNewSpace, 1);
__ lw(context_register(),
MemOperand(fp, StandardFrameConstants::kContextOffset));
@@ -2282,8 +2268,7 @@ void FullCodeGenerator::EmitNamedPropertyLoad(Property* prop) {
__ mov(a0, result_register());
__ li(a2, Operand(key->value()));
// Call load IC. It has arguments receiver and property name a0 and a2.
- Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
- CallIC(ic, RelocInfo::CODE_TARGET, prop->PropertyFeedbackId());
+ CallLoadIC(NOT_CONTEXTUAL, prop->PropertyFeedbackId());
}
@@ -2292,7 +2277,7 @@ void FullCodeGenerator::EmitKeyedPropertyLoad(Property* prop) {
__ mov(a0, result_register());
// Call keyed load IC. It has arguments key and receiver in a0 and a1.
Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize();
- CallIC(ic, RelocInfo::CODE_TARGET, prop->PropertyFeedbackId());
+ CallIC(ic, prop->PropertyFeedbackId());
}
@@ -2319,9 +2304,8 @@ void FullCodeGenerator::EmitInlineSmiBinaryOp(BinaryOperation* expr,
patch_site.EmitJumpIfSmi(scratch1, &smi_case);
__ bind(&stub_call);
- BinaryOpICStub stub(op, mode);
- CallIC(stub.GetCode(isolate()), RelocInfo::CODE_TARGET,
- expr->BinaryOperationFeedbackId());
+ BinaryOpICStub stub(isolate(), op, mode);
+ CallIC(stub.GetCode(), expr->BinaryOperationFeedbackId());
patch_site.EmitPatchInfo();
__ jmp(&done);
@@ -2330,13 +2314,11 @@ void FullCodeGenerator::EmitInlineSmiBinaryOp(BinaryOperation* expr,
// recording binary operation stub, see
switch (op) {
case Token::SAR:
- __ Branch(&stub_call);
__ GetLeastBitsFromSmi(scratch1, right, 5);
__ srav(right, left, scratch1);
__ And(v0, right, Operand(~kSmiTagMask));
break;
case Token::SHL: {
- __ Branch(&stub_call);
__ SmiUntag(scratch1, left);
__ GetLeastBitsFromSmi(scratch2, right, 5);
__ sllv(scratch1, scratch1, scratch2);
@@ -2346,7 +2328,6 @@ void FullCodeGenerator::EmitInlineSmiBinaryOp(BinaryOperation* expr,
break;
}
case Token::SHR: {
- __ Branch(&stub_call);
__ SmiUntag(scratch1, left);
__ GetLeastBitsFromSmi(scratch2, right, 5);
__ srlv(scratch1, scratch1, scratch2);
@@ -2401,22 +2382,16 @@ void FullCodeGenerator::EmitBinaryOp(BinaryOperation* expr,
OverwriteMode mode) {
__ mov(a0, result_register());
__ pop(a1);
- BinaryOpICStub stub(op, mode);
+ BinaryOpICStub stub(isolate(), op, mode);
JumpPatchSite patch_site(masm_); // unbound, signals no inlined smi code.
- CallIC(stub.GetCode(isolate()), RelocInfo::CODE_TARGET,
- expr->BinaryOperationFeedbackId());
+ CallIC(stub.GetCode(), expr->BinaryOperationFeedbackId());
patch_site.EmitPatchInfo();
context()->Plug(v0);
}
void FullCodeGenerator::EmitAssignment(Expression* expr) {
- // Invalid left-hand sides are rewritten by the parser to have a 'throw
- // ReferenceError' on the left-hand side.
- if (!expr->IsValidLeftHandSide()) {
- VisitForEffect(expr);
- return;
- }
+ ASSERT(expr->IsValidReferenceExpression());
// Left-hand side can only be a property, a global or a (parameter or local)
// slot.
@@ -2442,10 +2417,7 @@ void FullCodeGenerator::EmitAssignment(Expression* expr) {
__ mov(a1, result_register());
__ pop(a0); // Restore value.
__ li(a2, Operand(prop->key()->AsLiteral()->value()));
- Handle<Code> ic = is_classic_mode()
- ? isolate()->builtins()->StoreIC_Initialize()
- : isolate()->builtins()->StoreIC_Initialize_Strict();
- CallIC(ic);
+ CallStoreIC();
break;
}
case KEYED_PROPERTY: {
@@ -2454,7 +2426,7 @@ void FullCodeGenerator::EmitAssignment(Expression* expr) {
VisitForAccumulatorValue(prop->key());
__ mov(a1, result_register());
__ Pop(a0, a2); // a0 = restored value.
- Handle<Code> ic = is_classic_mode()
+ Handle<Code> ic = strict_mode() == SLOPPY
? isolate()->builtins()->KeyedStoreIC_Initialize()
: isolate()->builtins()->KeyedStoreIC_Initialize_Strict();
CallIC(ic);
@@ -2465,49 +2437,58 @@ void FullCodeGenerator::EmitAssignment(Expression* expr) {
}
-void FullCodeGenerator::EmitVariableAssignment(Variable* var,
- Token::Value op) {
+void FullCodeGenerator::EmitStoreToStackLocalOrContextSlot(
+ Variable* var, MemOperand location) {
+ __ sw(result_register(), location);
+ if (var->IsContextSlot()) {
+ // RecordWrite may destroy all its register arguments.
+ __ Move(a3, result_register());
+ int offset = Context::SlotOffset(var->index());
+ __ RecordWriteContextSlot(
+ a1, offset, a3, a2, kRAHasBeenSaved, kDontSaveFPRegs);
+ }
+}
+
+
+void FullCodeGenerator::EmitCallStoreContextSlot(
+ Handle<String> name, StrictMode strict_mode) {
+ __ li(a1, Operand(name));
+ __ li(a0, Operand(Smi::FromInt(strict_mode)));
+ __ Push(v0, cp, a1, a0); // Value, context, name, strict mode.
+ __ CallRuntime(Runtime::kHiddenStoreContextSlot, 4);
+}
+
+
+void FullCodeGenerator::EmitVariableAssignment(Variable* var, Token::Value op) {
if (var->IsUnallocated()) {
// Global var, const, or let.
__ mov(a0, result_register());
__ li(a2, Operand(var->name()));
__ lw(a1, GlobalObjectOperand());
- Handle<Code> ic = is_classic_mode()
- ? isolate()->builtins()->StoreIC_Initialize()
- : isolate()->builtins()->StoreIC_Initialize_Strict();
- CallIC(ic, RelocInfo::CODE_TARGET_CONTEXT);
+ CallStoreIC();
- } else if (op == Token::INIT_CONST) {
+ } else if (op == Token::INIT_CONST_LEGACY) {
// Const initializers need a write barrier.
ASSERT(!var->IsParameter()); // No const parameters.
- if (var->IsStackLocal()) {
+ if (var->IsLookupSlot()) {
+ __ li(a0, Operand(var->name()));
+ __ Push(v0, cp, a0); // Context and name.
+ __ CallRuntime(Runtime::kHiddenInitializeConstContextSlot, 3);
+ } else {
+ ASSERT(var->IsStackAllocated() || var->IsContextSlot());
Label skip;
- __ lw(a1, StackOperand(var));
- __ LoadRoot(t0, Heap::kTheHoleValueRootIndex);
- __ Branch(&skip, ne, a1, Operand(t0));
- __ sw(result_register(), StackOperand(var));
+ MemOperand location = VarOperand(var, a1);
+ __ lw(a2, location);
+ __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
+ __ Branch(&skip, ne, a2, Operand(at));
+ EmitStoreToStackLocalOrContextSlot(var, location);
__ bind(&skip);
- } else {
- ASSERT(var->IsContextSlot() || var->IsLookupSlot());
- // Like var declarations, const declarations are hoisted to function
- // scope. However, unlike var initializers, const initializers are
- // able to drill a hole to that function context, even from inside a
- // 'with' context. We thus bypass the normal static scope lookup for
- // var->IsContextSlot().
- __ push(v0);
- __ li(a0, Operand(var->name()));
- __ Push(cp, a0); // Context and name.
- __ CallRuntime(Runtime::kInitializeConstContextSlot, 3);
}
} else if (var->mode() == LET && op != Token::INIT_LET) {
// Non-initializing assignment to let variable needs a write barrier.
if (var->IsLookupSlot()) {
- __ push(v0); // Value.
- __ li(a1, Operand(var->name()));
- __ li(a0, Operand(Smi::FromInt(language_mode())));
- __ Push(cp, a1, a0); // Context, name, strict mode.
- __ CallRuntime(Runtime::kStoreContextSlot, 4);
+ EmitCallStoreContextSlot(var->name(), strict_mode());
} else {
ASSERT(var->IsStackAllocated() || var->IsContextSlot());
Label assign;
@@ -2517,23 +2498,19 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var,
__ Branch(&assign, ne, a3, Operand(t0));
__ li(a3, Operand(var->name()));
__ push(a3);
- __ CallRuntime(Runtime::kThrowReferenceError, 1);
+ __ CallRuntime(Runtime::kHiddenThrowReferenceError, 1);
// Perform the assignment.
__ bind(&assign);
- __ sw(result_register(), location);
- if (var->IsContextSlot()) {
- // RecordWrite may destroy all its register arguments.
- __ mov(a3, result_register());
- int offset = Context::SlotOffset(var->index());
- __ RecordWriteContextSlot(
- a1, offset, a3, a2, kRAHasBeenSaved, kDontSaveFPRegs);
- }
+ EmitStoreToStackLocalOrContextSlot(var, location);
}
- } else if (!var->is_const_mode() || op == Token::INIT_CONST_HARMONY) {
+ } else if (!var->is_const_mode() || op == Token::INIT_CONST) {
// Assignment to var or initializing assignment to let/const
// in harmony mode.
- if (var->IsStackAllocated() || var->IsContextSlot()) {
+ if (var->IsLookupSlot()) {
+ EmitCallStoreContextSlot(var->name(), strict_mode());
+ } else {
+ ASSERT((var->IsStackAllocated() || var->IsContextSlot()));
MemOperand location = VarOperand(var, a1);
if (generate_debug_code_ && op == Token::INIT_LET) {
// Check for an uninitialized let binding.
@@ -2541,24 +2518,10 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var,
__ LoadRoot(t0, Heap::kTheHoleValueRootIndex);
__ Check(eq, kLetBindingReInitialization, a2, Operand(t0));
}
- // Perform the assignment.
- __ sw(v0, location);
- if (var->IsContextSlot()) {
- __ mov(a3, v0);
- int offset = Context::SlotOffset(var->index());
- __ RecordWriteContextSlot(
- a1, offset, a3, a2, kRAHasBeenSaved, kDontSaveFPRegs);
- }
- } else {
- ASSERT(var->IsLookupSlot());
- __ push(v0); // Value.
- __ li(a1, Operand(var->name()));
- __ li(a0, Operand(Smi::FromInt(language_mode())));
- __ Push(cp, a1, a0); // Context, name, strict mode.
- __ CallRuntime(Runtime::kStoreContextSlot, 4);
+ EmitStoreToStackLocalOrContextSlot(var, location);
}
}
- // Non-initializing assignments to consts are ignored.
+ // Non-initializing assignments to consts are ignored.
}
@@ -2566,7 +2529,7 @@ void FullCodeGenerator::EmitNamedPropertyAssignment(Assignment* expr) {
// Assignment to a property, using a named store IC.
Property* prop = expr->target()->AsProperty();
ASSERT(prop != NULL);
- ASSERT(prop->key()->AsLiteral() != NULL);
+ ASSERT(prop->key()->IsLiteral());
// Record source code position before IC call.
SetSourcePosition(expr->position());
@@ -2574,10 +2537,7 @@ void FullCodeGenerator::EmitNamedPropertyAssignment(Assignment* expr) {
__ li(a2, Operand(prop->key()->AsLiteral()->value()));
__ pop(a1);
- Handle<Code> ic = is_classic_mode()
- ? isolate()->builtins()->StoreIC_Initialize()
- : isolate()->builtins()->StoreIC_Initialize_Strict();
- CallIC(ic, RelocInfo::CODE_TARGET, expr->AssignmentFeedbackId());
+ CallStoreIC(expr->AssignmentFeedbackId());
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
context()->Plug(v0);
@@ -2597,10 +2557,10 @@ void FullCodeGenerator::EmitKeyedPropertyAssignment(Assignment* expr) {
__ mov(a0, result_register());
__ Pop(a2, a1); // a1 = key.
- Handle<Code> ic = is_classic_mode()
+ Handle<Code> ic = strict_mode() == SLOPPY
? isolate()->builtins()->KeyedStoreIC_Initialize()
: isolate()->builtins()->KeyedStoreIC_Initialize_Strict();
- CallIC(ic, RelocInfo::CODE_TARGET, expr->AssignmentFeedbackId());
+ CallIC(ic, expr->AssignmentFeedbackId());
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
context()->Plug(v0);
@@ -2627,73 +2587,70 @@ void FullCodeGenerator::VisitProperty(Property* expr) {
void FullCodeGenerator::CallIC(Handle<Code> code,
- RelocInfo::Mode rmode,
TypeFeedbackId id) {
ic_total_count_++;
- __ Call(code, rmode, id);
+ __ Call(code, RelocInfo::CODE_TARGET, id);
}
-void FullCodeGenerator::EmitCallWithIC(Call* expr,
- Handle<Object> name,
- RelocInfo::Mode mode) {
- // Code common for calls using the IC.
- ZoneList<Expression*>* args = expr->arguments();
- int arg_count = args->length();
- { PreservePositionScope scope(masm()->positions_recorder());
- for (int i = 0; i < arg_count; i++) {
- VisitForStackValue(args->at(i));
+// Code common for calls using the IC.
+void FullCodeGenerator::EmitCallWithLoadIC(Call* expr) {
+ Expression* callee = expr->expression();
+
+ CallIC::CallType call_type = callee->IsVariableProxy()
+ ? CallIC::FUNCTION
+ : CallIC::METHOD;
+
+ // Get the target function.
+ if (call_type == CallIC::FUNCTION) {
+ { StackValueContext context(this);
+ EmitVariableLoad(callee->AsVariableProxy());
+ PrepareForBailout(callee, NO_REGISTERS);
}
- __ li(a2, Operand(name));
+ // Push undefined as receiver. This is patched in the method prologue if it
+ // is a sloppy mode method.
+ __ Push(isolate()->factory()->undefined_value());
+ } else {
+ // Load the function from the receiver.
+ ASSERT(callee->IsProperty());
+ __ lw(v0, MemOperand(sp, 0));
+ EmitNamedPropertyLoad(callee->AsProperty());
+ PrepareForBailoutForId(callee->AsProperty()->LoadId(), TOS_REG);
+ // Push the target function under the receiver.
+ __ lw(at, MemOperand(sp, 0));
+ __ push(at);
+ __ sw(v0, MemOperand(sp, kPointerSize));
}
- // Record source position for debugger.
- SetSourcePosition(expr->position());
- // Call the IC initialization code.
- Handle<Code> ic =
- isolate()->stub_cache()->ComputeCallInitialize(arg_count, mode);
- CallIC(ic, mode, expr->CallFeedbackId());
- RecordJSReturnSite(expr);
- // Restore context register.
- __ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
- context()->Plug(v0);
+
+ EmitCall(expr, call_type);
}
-void FullCodeGenerator::EmitKeyedCallWithIC(Call* expr,
- Expression* key) {
+// Code common for calls using the IC.
+void FullCodeGenerator::EmitKeyedCallWithLoadIC(Call* expr,
+ Expression* key) {
// Load the key.
VisitForAccumulatorValue(key);
- // Swap the name of the function and the receiver on the stack to follow
- // the calling convention for call ICs.
- __ pop(a1);
- __ push(v0);
- __ push(a1);
+ Expression* callee = expr->expression();
- // Code common for calls using the IC.
- ZoneList<Expression*>* args = expr->arguments();
- int arg_count = args->length();
- { PreservePositionScope scope(masm()->positions_recorder());
- for (int i = 0; i < arg_count; i++) {
- VisitForStackValue(args->at(i));
- }
- }
- // Record source position for debugger.
- SetSourcePosition(expr->position());
- // Call the IC initialization code.
- Handle<Code> ic =
- isolate()->stub_cache()->ComputeKeyedCallInitialize(arg_count);
- __ lw(a2, MemOperand(sp, (arg_count + 1) * kPointerSize)); // Key.
- CallIC(ic, RelocInfo::CODE_TARGET, expr->CallFeedbackId());
- RecordJSReturnSite(expr);
- // Restore context register.
- __ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
- context()->DropAndPlug(1, v0); // Drop the key still on the stack.
+ // Load the function from the receiver.
+ ASSERT(callee->IsProperty());
+ __ lw(a1, MemOperand(sp, 0));
+ EmitKeyedPropertyLoad(callee->AsProperty());
+ PrepareForBailoutForId(callee->AsProperty()->LoadId(), TOS_REG);
+
+ // Push the target function under the receiver.
+ __ lw(at, MemOperand(sp, 0));
+ __ push(at);
+ __ sw(v0, MemOperand(sp, kPointerSize));
+
+ EmitCall(expr, CallIC::METHOD);
}
-void FullCodeGenerator::EmitCallWithStub(Call* expr, CallFunctionFlags flags) {
- // Code common for calls using the call stub.
+void FullCodeGenerator::EmitCall(Call* expr, CallIC::CallType call_type) {
+ // Load the arguments.
ZoneList<Expression*>* args = expr->arguments();
int arg_count = args->length();
{ PreservePositionScope scope(masm()->positions_recorder());
@@ -2701,20 +2658,17 @@ void FullCodeGenerator::EmitCallWithStub(Call* expr, CallFunctionFlags flags) {
VisitForStackValue(args->at(i));
}
}
- // Record source position for debugger.
- SetSourcePosition(expr->position());
- // Record call targets.
- flags = static_cast<CallFunctionFlags>(flags | RECORD_CALL_TARGET);
- Handle<Object> uninitialized =
- TypeFeedbackCells::UninitializedSentinel(isolate());
- Handle<Cell> cell = isolate()->factory()->NewCell(uninitialized);
- RecordTypeFeedbackCell(expr->CallFeedbackId(), cell);
- __ li(a2, Operand(cell));
-
- CallFunctionStub stub(arg_count, flags);
+ // Record source position of the IC call.
+ SetSourcePosition(expr->position());
+ Handle<Code> ic = CallIC::initialize_stub(
+ isolate(), arg_count, call_type);
+ __ li(a3, Operand(Smi::FromInt(expr->CallFeedbackSlot())));
__ lw(a1, MemOperand(sp, (arg_count + 1) * kPointerSize));
- __ CallStub(&stub, expr->CallFeedbackId());
+ // Don't assign a type feedback id to the IC, since type feedback is provided
+ // by the vector above.
+ CallIC(ic);
+
RecordJSReturnSite(expr);
// Restore context register.
__ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
@@ -2734,15 +2688,15 @@ void FullCodeGenerator::EmitResolvePossiblyDirectEval(int arg_count) {
int receiver_offset = 2 + info_->scope()->num_parameters();
__ lw(t1, MemOperand(fp, receiver_offset * kPointerSize));
- // t0: the language mode.
- __ li(t0, Operand(Smi::FromInt(language_mode())));
+ // t0: the strict mode.
+ __ li(t0, Operand(Smi::FromInt(strict_mode())));
// a1: the start position of the scope the calls resides in.
__ li(a1, Operand(Smi::FromInt(scope()->start_position())));
// Do the runtime call.
__ Push(t2, t1, t0, a1);
- __ CallRuntime(Runtime::kResolvePossiblyDirectEval, 5);
+ __ CallRuntime(Runtime::kHiddenResolvePossiblyDirectEval, 5);
}
@@ -2755,12 +2709,11 @@ void FullCodeGenerator::VisitCall(Call* expr) {
Comment cmnt(masm_, "[ Call");
Expression* callee = expr->expression();
- VariableProxy* proxy = callee->AsVariableProxy();
- Property* property = callee->AsProperty();
+ Call::CallType call_type = expr->GetCallType(isolate());
- if (proxy != NULL && proxy->var()->is_possibly_eval(isolate())) {
- // In a call to eval, we first call %ResolvePossiblyDirectEval to
- // resolve the function we need to call and the receiver of the
+ if (call_type == Call::POSSIBLY_EVAL_CALL) {
+ // In a call to eval, we first call RuntimeHidden_ResolvePossiblyDirectEval
+ // to resolve the function we need to call and the receiver of the
// call. Then we call the resolved function using the given
// arguments.
ZoneList<Expression*>* args = expr->arguments();
@@ -2789,20 +2742,18 @@ void FullCodeGenerator::VisitCall(Call* expr) {
}
// Record source position for debugger.
SetSourcePosition(expr->position());
- CallFunctionStub stub(arg_count, RECEIVER_MIGHT_BE_IMPLICIT);
+ CallFunctionStub stub(isolate(), arg_count, NO_CALL_FUNCTION_FLAGS);
__ lw(a1, MemOperand(sp, (arg_count + 1) * kPointerSize));
__ CallStub(&stub);
RecordJSReturnSite(expr);
// Restore context register.
__ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
context()->DropAndPlug(1, v0);
- } else if (proxy != NULL && proxy->var()->IsUnallocated()) {
- // Push global object as receiver for the call IC.
- __ lw(a0, GlobalObjectOperand());
- __ push(a0);
- EmitCallWithIC(expr, proxy->name(), RelocInfo::CODE_TARGET_CONTEXT);
- } else if (proxy != NULL && proxy->var()->IsLookupSlot()) {
+ } else if (call_type == Call::GLOBAL_CALL) {
+ EmitCallWithLoadIC(expr);
+ } else if (call_type == Call::LOOKUP_SLOT_CALL) {
// Call to a lookup slot (dynamically introduced variable).
+ VariableProxy* proxy = callee->AsVariableProxy();
Label slow, done;
{ PreservePositionScope scope(masm()->positions_recorder());
@@ -2817,7 +2768,7 @@ void FullCodeGenerator::VisitCall(Call* expr) {
ASSERT(!context_register().is(a2));
__ li(a2, Operand(proxy->name()));
__ Push(context_register(), a2);
- __ CallRuntime(Runtime::kLoadContextSlot, 2);
+ __ CallRuntime(Runtime::kHiddenLoadContextSlot, 2);
__ Push(v0, v1); // Function, receiver.
// If fast case code has been generated, emit code to push the
@@ -2831,37 +2782,34 @@ void FullCodeGenerator::VisitCall(Call* expr) {
__ push(v0);
// The receiver is implicitly the global receiver. Indicate this
// by passing the hole to the call function stub.
- __ LoadRoot(a1, Heap::kTheHoleValueRootIndex);
+ __ LoadRoot(a1, Heap::kUndefinedValueRootIndex);
__ push(a1);
__ bind(&call);
}
// The receiver is either the global receiver or an object found
- // by LoadContextSlot. That object could be the hole if the
- // receiver is implicitly the global object.
- EmitCallWithStub(expr, RECEIVER_MIGHT_BE_IMPLICIT);
- } else if (property != NULL) {
+ // by LoadContextSlot.
+ EmitCall(expr);
+ } else if (call_type == Call::PROPERTY_CALL) {
+ Property* property = callee->AsProperty();
{ PreservePositionScope scope(masm()->positions_recorder());
VisitForStackValue(property->obj());
}
if (property->key()->IsPropertyName()) {
- EmitCallWithIC(expr,
- property->key()->AsLiteral()->value(),
- RelocInfo::CODE_TARGET);
+ EmitCallWithLoadIC(expr);
} else {
- EmitKeyedCallWithIC(expr, property->key());
+ EmitKeyedCallWithLoadIC(expr, property->key());
}
} else {
+ ASSERT(call_type == Call::OTHER_CALL);
// Call to an arbitrary expression not handled specially above.
{ PreservePositionScope scope(masm()->positions_recorder());
VisitForStackValue(callee);
}
- // Load global receiver object.
- __ lw(a1, GlobalObjectOperand());
- __ lw(a1, FieldMemOperand(a1, GlobalObject::kGlobalReceiverOffset));
+ __ LoadRoot(a1, Heap::kUndefinedValueRootIndex);
__ push(a1);
// Emit function call.
- EmitCallWithStub(expr, NO_CALL_FUNCTION_FLAGS);
+ EmitCall(expr);
}
#ifdef DEBUG
@@ -2898,14 +2846,17 @@ void FullCodeGenerator::VisitCallNew(CallNew* expr) {
__ lw(a1, MemOperand(sp, arg_count * kPointerSize));
// Record call targets in unoptimized code.
- Handle<Object> uninitialized =
- TypeFeedbackCells::UninitializedSentinel(isolate());
- Handle<Cell> cell = isolate()->factory()->NewCell(uninitialized);
- RecordTypeFeedbackCell(expr->CallNewFeedbackId(), cell);
- __ li(a2, Operand(cell));
-
- CallConstructStub stub(RECORD_CALL_TARGET);
- __ Call(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL);
+ if (FLAG_pretenuring_call_new) {
+ EnsureSlotContainsAllocationSite(expr->AllocationSiteFeedbackSlot());
+ ASSERT(expr->AllocationSiteFeedbackSlot() ==
+ expr->CallNewFeedbackSlot() + 1);
+ }
+
+ __ li(a2, FeedbackVector());
+ __ li(a3, Operand(Smi::FromInt(expr->CallNewFeedbackSlot())));
+
+ CallConstructStub stub(isolate(), RECORD_CONSTRUCTOR_TARGET);
+ __ Call(stub.GetCode(), RelocInfo::CONSTRUCT_CALL);
PrepareForBailoutForId(expr->ReturnId(), TOS_REG);
context()->Plug(v0);
}
@@ -3023,8 +2974,8 @@ void FullCodeGenerator::EmitIsUndetectableObject(CallRuntime* expr) {
__ JumpIfSmi(v0, if_false);
__ lw(a1, FieldMemOperand(v0, HeapObject::kMapOffset));
__ lbu(a1, FieldMemOperand(a1, Map::kBitFieldOffset));
- __ And(at, a1, Operand(1 << Map::kIsUndetectable));
PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
+ __ And(at, a1, Operand(1 << Map::kIsUndetectable));
Split(ne, at, Operand(zero_reg), if_true, if_false, fall_through);
context()->Plug(if_true, if_false);
@@ -3079,7 +3030,7 @@ void FullCodeGenerator::EmitIsStringWrapperSafeForDefaultValueOf(
__ Addu(t0, t0, Operand(DescriptorArray::kFirstOffset - kHeapObjectTag));
// Calculate the end of the descriptor array.
__ mov(a2, t0);
- __ sll(t1, a3, kPointerSizeLog2 - kSmiTagSize);
+ __ sll(t1, a3, kPointerSizeLog2);
__ Addu(a2, a2, t1);
// Loop through all the keys in the descriptor array. If one of these is the
@@ -3280,7 +3231,7 @@ void FullCodeGenerator::EmitArguments(CallRuntime* expr) {
VisitForAccumulatorValue(args->at(0));
__ mov(a1, v0);
__ li(a0, Operand(Smi::FromInt(info_->scope()->num_parameters())));
- ArgumentsAccessStub stub(ArgumentsAccessStub::READ_ELEMENT);
+ ArgumentsAccessStub stub(isolate(), ArgumentsAccessStub::READ_ELEMENT);
__ CallStub(&stub);
context()->Plug(v0);
}
@@ -3367,31 +3318,9 @@ void FullCodeGenerator::EmitClassOf(CallRuntime* expr) {
}
-void FullCodeGenerator::EmitLog(CallRuntime* expr) {
- // Conditionally generate a log call.
- // Args:
- // 0 (literal string): The type of logging (corresponds to the flags).
- // This is used to determine whether or not to generate the log call.
- // 1 (string): Format string. Access the string at argument index 2
- // with '%2s' (see Logger::LogRuntime for all the formats).
- // 2 (array): Arguments to the format string.
- ZoneList<Expression*>* args = expr->arguments();
- ASSERT_EQ(args->length(), 3);
- if (CodeGenerator::ShouldGenerateLog(isolate(), args->at(0))) {
- VisitForStackValue(args->at(1));
- VisitForStackValue(args->at(2));
- __ CallRuntime(Runtime::kLog, 2);
- }
-
- // Finally, we're expected to leave a value on the top of the stack.
- __ LoadRoot(v0, Heap::kUndefinedValueRootIndex);
- context()->Plug(v0);
-}
-
-
void FullCodeGenerator::EmitSubString(CallRuntime* expr) {
// Load the arguments on the stack and call the stub.
- SubStringStub stub;
+ SubStringStub stub(isolate());
ZoneList<Expression*>* args = expr->arguments();
ASSERT(args->length() == 3);
VisitForStackValue(args->at(0));
@@ -3404,7 +3333,7 @@ void FullCodeGenerator::EmitSubString(CallRuntime* expr) {
void FullCodeGenerator::EmitRegExpExec(CallRuntime* expr) {
// Load the arguments on the stack and call the stub.
- RegExpExecStub stub;
+ RegExpExecStub stub(isolate());
ZoneList<Expression*>* args = expr->arguments();
ASSERT(args->length() == 4);
VisitForStackValue(args->at(0));
@@ -3477,7 +3406,7 @@ void FullCodeGenerator::EmitDateField(CallRuntime* expr) {
}
__ bind(&not_date_object);
- __ CallRuntime(Runtime::kThrowNotDateError, 0);
+ __ CallRuntime(Runtime::kHiddenThrowNotDateError, 0);
__ bind(&done);
context()->Plug(v0);
}
@@ -3498,9 +3427,9 @@ void FullCodeGenerator::EmitOneByteSeqStringSetChar(CallRuntime* expr) {
if (FLAG_debug_code) {
__ SmiTst(value, at);
- __ ThrowIf(ne, kNonSmiValue, at, Operand(zero_reg));
+ __ Check(eq, kNonSmiValue, at, Operand(zero_reg));
__ SmiTst(index, at);
- __ ThrowIf(ne, kNonSmiIndex, at, Operand(zero_reg));
+ __ Check(eq, kNonSmiIndex, at, Operand(zero_reg));
__ SmiUntag(index, index);
static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
Register scratch = t5;
@@ -3535,9 +3464,9 @@ void FullCodeGenerator::EmitTwoByteSeqStringSetChar(CallRuntime* expr) {
if (FLAG_debug_code) {
__ SmiTst(value, at);
- __ ThrowIf(ne, kNonSmiValue, at, Operand(zero_reg));
+ __ Check(eq, kNonSmiValue, at, Operand(zero_reg));
__ SmiTst(index, at);
- __ ThrowIf(ne, kNonSmiIndex, at, Operand(zero_reg));
+ __ Check(eq, kNonSmiIndex, at, Operand(zero_reg));
__ SmiUntag(index, index);
static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
Register scratch = t5;
@@ -3563,7 +3492,7 @@ void FullCodeGenerator::EmitMathPow(CallRuntime* expr) {
ASSERT(args->length() == 2);
VisitForStackValue(args->at(0));
VisitForStackValue(args->at(1));
- MathPowStub stub(MathPowStub::ON_STACK);
+ MathPowStub stub(isolate(), MathPowStub::ON_STACK);
__ CallStub(&stub);
context()->Plug(v0);
}
@@ -3606,7 +3535,7 @@ void FullCodeGenerator::EmitNumberToString(CallRuntime* expr) {
VisitForAccumulatorValue(args->at(0));
__ mov(a0, result_register());
- NumberToStringStub stub;
+ NumberToStringStub stub(isolate());
__ CallStub(&stub);
context()->Plug(v0);
}
@@ -3730,21 +3659,13 @@ void FullCodeGenerator::EmitStringCharAt(CallRuntime* expr) {
void FullCodeGenerator::EmitStringAdd(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
ASSERT_EQ(2, args->length());
- if (FLAG_new_string_add) {
- VisitForStackValue(args->at(0));
- VisitForAccumulatorValue(args->at(1));
-
- __ pop(a1);
- __ mov(a0, result_register()); // NewStringAddStub requires args in a0, a1.
- NewStringAddStub stub(STRING_ADD_CHECK_BOTH, NOT_TENURED);
- __ CallStub(&stub);
- } else {
- VisitForStackValue(args->at(0));
- VisitForStackValue(args->at(1));
+ VisitForStackValue(args->at(0));
+ VisitForAccumulatorValue(args->at(1));
- StringAddStub stub(STRING_ADD_CHECK_BOTH);
- __ CallStub(&stub);
- }
+ __ pop(a1);
+ __ mov(a0, result_register()); // StringAddStub requires args in a0, a1.
+ StringAddStub stub(isolate(), STRING_ADD_CHECK_BOTH, NOT_TENURED);
+ __ CallStub(&stub);
context()->Plug(v0);
}
@@ -3756,35 +3677,12 @@ void FullCodeGenerator::EmitStringCompare(CallRuntime* expr) {
VisitForStackValue(args->at(0));
VisitForStackValue(args->at(1));
- StringCompareStub stub;
+ StringCompareStub stub(isolate());
__ CallStub(&stub);
context()->Plug(v0);
}
-void FullCodeGenerator::EmitMathLog(CallRuntime* expr) {
- // Load the argument on the stack and call the stub.
- TranscendentalCacheStub stub(TranscendentalCache::LOG,
- TranscendentalCacheStub::TAGGED);
- ZoneList<Expression*>* args = expr->arguments();
- ASSERT(args->length() == 1);
- VisitForStackValue(args->at(0));
- __ mov(a0, result_register()); // Stub requires parameter in a0 and on tos.
- __ CallStub(&stub);
- context()->Plug(v0);
-}
-
-
-void FullCodeGenerator::EmitMathSqrt(CallRuntime* expr) {
- // Load the argument on the stack and call the runtime function.
- ZoneList<Expression*>* args = expr->arguments();
- ASSERT(args->length() == 1);
- VisitForStackValue(args->at(0));
- __ CallRuntime(Runtime::kMath_sqrt, 1);
- context()->Plug(v0);
-}
-
-
void FullCodeGenerator::EmitCallFunction(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
ASSERT(args->length() >= 2);
@@ -3804,8 +3702,7 @@ void FullCodeGenerator::EmitCallFunction(CallRuntime* expr) {
// InvokeFunction requires the function in a1. Move it in there.
__ mov(a1, result_register());
ParameterCount count(arg_count);
- __ InvokeFunction(a1, count, CALL_FUNCTION,
- NullCallWrapper(), CALL_AS_METHOD);
+ __ InvokeFunction(a1, count, CALL_FUNCTION, NullCallWrapper());
__ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
__ jmp(&done);
@@ -3819,12 +3716,15 @@ void FullCodeGenerator::EmitCallFunction(CallRuntime* expr) {
void FullCodeGenerator::EmitRegExpConstructResult(CallRuntime* expr) {
- RegExpConstructResultStub stub;
+ RegExpConstructResultStub stub(isolate());
ZoneList<Expression*>* args = expr->arguments();
ASSERT(args->length() == 3);
VisitForStackValue(args->at(0));
VisitForStackValue(args->at(1));
- VisitForStackValue(args->at(2));
+ VisitForAccumulatorValue(args->at(2));
+ __ mov(a0, result_register());
+ __ pop(a1);
+ __ pop(a2);
__ CallStub(&stub);
context()->Plug(v0);
}
@@ -3877,50 +3777,13 @@ void FullCodeGenerator::EmitGetFromCache(CallRuntime* expr) {
__ bind(&not_found);
// Call runtime to perform the lookup.
__ Push(cache, key);
- __ CallRuntime(Runtime::kGetFromCache, 2);
+ __ CallRuntime(Runtime::kHiddenGetFromCache, 2);
__ bind(&done);
context()->Plug(v0);
}
-void FullCodeGenerator::EmitIsRegExpEquivalent(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- ASSERT_EQ(2, args->length());
-
- Register right = v0;
- Register left = a1;
- Register tmp = a2;
- Register tmp2 = a3;
-
- VisitForStackValue(args->at(0));
- VisitForAccumulatorValue(args->at(1)); // Result (right) in v0.
- __ pop(left);
-
- Label done, fail, ok;
- __ Branch(&ok, eq, left, Operand(right));
- // Fail if either is a non-HeapObject.
- __ And(tmp, left, Operand(right));
- __ JumpIfSmi(tmp, &fail);
- __ lw(tmp, FieldMemOperand(left, HeapObject::kMapOffset));
- __ lbu(tmp2, FieldMemOperand(tmp, Map::kInstanceTypeOffset));
- __ Branch(&fail, ne, tmp2, Operand(JS_REGEXP_TYPE));
- __ lw(tmp2, FieldMemOperand(right, HeapObject::kMapOffset));
- __ Branch(&fail, ne, tmp, Operand(tmp2));
- __ lw(tmp, FieldMemOperand(left, JSRegExp::kDataOffset));
- __ lw(tmp2, FieldMemOperand(right, JSRegExp::kDataOffset));
- __ Branch(&ok, eq, tmp, Operand(tmp2));
- __ bind(&fail);
- __ LoadRoot(v0, Heap::kFalseValueRootIndex);
- __ jmp(&done);
- __ bind(&ok);
- __ LoadRoot(v0, Heap::kTrueValueRootIndex);
- __ bind(&done);
-
- context()->Plug(v0);
-}
-
-
void FullCodeGenerator::EmitHasCachedArrayIndex(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
VisitForAccumulatorValue(args->at(0));
@@ -4195,8 +4058,8 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
void FullCodeGenerator::VisitCallRuntime(CallRuntime* expr) {
- Handle<String> name = expr->name();
- if (name->length() > 0 && name->Get(0) == '_') {
+ if (expr->function() != NULL &&
+ expr->function()->intrinsic_type == Runtime::INLINE) {
Comment cmnt(masm_, "[ InlineRuntimeCall");
EmitInlineRuntimeCall(expr);
return;
@@ -4204,34 +4067,48 @@ void FullCodeGenerator::VisitCallRuntime(CallRuntime* expr) {
Comment cmnt(masm_, "[ CallRuntime");
ZoneList<Expression*>* args = expr->arguments();
+ int arg_count = args->length();
if (expr->is_jsruntime()) {
- // Prepare for calling JS runtime function.
+ // Push the builtins object as the receiver.
__ lw(a0, GlobalObjectOperand());
__ lw(a0, FieldMemOperand(a0, GlobalObject::kBuiltinsOffset));
__ push(a0);
- }
+ // Load the function from the receiver.
+ __ li(a2, Operand(expr->name()));
+ CallLoadIC(NOT_CONTEXTUAL, expr->CallRuntimeFeedbackId());
- // Push the arguments ("left-to-right").
- int arg_count = args->length();
- for (int i = 0; i < arg_count; i++) {
- VisitForStackValue(args->at(i));
- }
+ // Push the target function under the receiver.
+ __ lw(at, MemOperand(sp, 0));
+ __ push(at);
+ __ sw(v0, MemOperand(sp, kPointerSize));
+
+ // Push the arguments ("left-to-right").
+ int arg_count = args->length();
+ for (int i = 0; i < arg_count; i++) {
+ VisitForStackValue(args->at(i));
+ }
+
+ // Record source position of the IC call.
+ SetSourcePosition(expr->position());
+ CallFunctionStub stub(isolate(), arg_count, NO_CALL_FUNCTION_FLAGS);
+ __ lw(a1, MemOperand(sp, (arg_count + 1) * kPointerSize));
+ __ CallStub(&stub);
- if (expr->is_jsruntime()) {
- // Call the JS runtime function.
- __ li(a2, Operand(expr->name()));
- RelocInfo::Mode mode = RelocInfo::CODE_TARGET;
- Handle<Code> ic =
- isolate()->stub_cache()->ComputeCallInitialize(arg_count, mode);
- CallIC(ic, mode, expr->CallRuntimeFeedbackId());
// Restore context register.
__ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+
+ context()->DropAndPlug(1, v0);
} else {
+ // Push the arguments ("left-to-right").
+ for (int i = 0; i < arg_count; i++) {
+ VisitForStackValue(args->at(i));
+ }
+
// Call the C runtime function.
__ CallRuntime(expr->function(), arg_count);
+ context()->Plug(v0);
}
- context()->Plug(v0);
}
@@ -4245,9 +4122,7 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
if (property != NULL) {
VisitForStackValue(property->obj());
VisitForStackValue(property->key());
- StrictModeFlag strict_mode_flag = (language_mode() == CLASSIC_MODE)
- ? kNonStrictMode : kStrictMode;
- __ li(a1, Operand(Smi::FromInt(strict_mode_flag)));
+ __ li(a1, Operand(Smi::FromInt(strict_mode())));
__ push(a1);
__ InvokeBuiltin(Builtins::DELETE, CALL_FUNCTION);
context()->Plug(v0);
@@ -4255,11 +4130,11 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
Variable* var = proxy->var();
// Delete of an unqualified identifier is disallowed in strict mode
// but "delete this" is allowed.
- ASSERT(language_mode() == CLASSIC_MODE || var->is_this());
+ ASSERT(strict_mode() == SLOPPY || var->is_this());
if (var->IsUnallocated()) {
__ lw(a2, GlobalObjectOperand());
__ li(a1, Operand(var->name()));
- __ li(a0, Operand(Smi::FromInt(kNonStrictMode)));
+ __ li(a0, Operand(Smi::FromInt(SLOPPY)));
__ Push(a2, a1, a0);
__ InvokeBuiltin(Builtins::DELETE, CALL_FUNCTION);
context()->Plug(v0);
@@ -4273,7 +4148,7 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
ASSERT(!context_register().is(a2));
__ li(a2, Operand(var->name()));
__ Push(context_register(), a2);
- __ CallRuntime(Runtime::kDeleteContextSlot, 2);
+ __ CallRuntime(Runtime::kHiddenDeleteContextSlot, 2);
context()->Plug(v0);
}
} else {
@@ -4348,16 +4223,11 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
+ ASSERT(expr->expression()->IsValidReferenceExpression());
+
Comment cmnt(masm_, "[ CountOperation");
SetSourcePosition(expr->position());
- // Invalid left-hand sides are rewritten to have a 'throw ReferenceError'
- // as the left-hand side.
- if (!expr->expression()->IsValidLeftHandSide()) {
- VisitForEffect(expr->expression());
- return;
- }
-
// Expression can only be a property, a global or a (parameter or local)
// slot.
enum LhsKind { VARIABLE, NAMED_PROPERTY, KEYED_PROPERTY };
@@ -4443,7 +4313,7 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
__ jmp(&stub_call);
__ bind(&slow);
}
- ToNumberStub convert_stub;
+ ToNumberStub convert_stub(isolate());
__ CallStub(&convert_stub);
// Save result for postfix expressions.
@@ -4473,10 +4343,8 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
// Record position before stub call.
SetSourcePosition(expr->position());
- BinaryOpICStub stub(Token::ADD, NO_OVERWRITE);
- CallIC(stub.GetCode(isolate()),
- RelocInfo::CODE_TARGET,
- expr->CountBinOpFeedbackId());
+ BinaryOpICStub stub(isolate(), Token::ADD, NO_OVERWRITE);
+ CallIC(stub.GetCode(), expr->CountBinOpFeedbackId());
patch_site.EmitPatchInfo();
__ bind(&done);
@@ -4506,10 +4374,7 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
__ mov(a0, result_register()); // Value.
__ li(a2, Operand(prop->key()->AsLiteral()->value())); // Name.
__ pop(a1); // Receiver.
- Handle<Code> ic = is_classic_mode()
- ? isolate()->builtins()->StoreIC_Initialize()
- : isolate()->builtins()->StoreIC_Initialize_Strict();
- CallIC(ic, RelocInfo::CODE_TARGET, expr->CountStoreFeedbackId());
+ CallStoreIC(expr->CountStoreFeedbackId());
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
if (expr->is_postfix()) {
if (!context()->IsEffect()) {
@@ -4523,10 +4388,10 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
case KEYED_PROPERTY: {
__ mov(a0, result_register()); // Value.
__ Pop(a2, a1); // a1 = key, a2 = receiver.
- Handle<Code> ic = is_classic_mode()
+ Handle<Code> ic = strict_mode() == SLOPPY
? isolate()->builtins()->KeyedStoreIC_Initialize()
: isolate()->builtins()->KeyedStoreIC_Initialize_Strict();
- CallIC(ic, RelocInfo::CODE_TARGET, expr->CountStoreFeedbackId());
+ CallIC(ic, expr->CountStoreFeedbackId());
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
if (expr->is_postfix()) {
if (!context()->IsEffect()) {
@@ -4546,16 +4411,16 @@ void FullCodeGenerator::VisitForTypeofValue(Expression* expr) {
ASSERT(!context()->IsTest());
VariableProxy* proxy = expr->AsVariableProxy();
if (proxy != NULL && proxy->var()->IsUnallocated()) {
- Comment cmnt(masm_, "Global variable");
+ Comment cmnt(masm_, "[ Global variable");
__ lw(a0, GlobalObjectOperand());
__ li(a2, Operand(proxy->name()));
- Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
// Use a regular load, not a contextual load, to avoid a reference
// error.
- CallIC(ic);
+ CallLoadIC(NOT_CONTEXTUAL);
PrepareForBailout(expr, TOS_REG);
context()->Plug(v0);
} else if (proxy != NULL && proxy->var()->IsLookupSlot()) {
+ Comment cmnt(masm_, "[ Lookup slot");
Label done, slow;
// Generate code for loading from variables potentially shadowed
@@ -4565,7 +4430,7 @@ void FullCodeGenerator::VisitForTypeofValue(Expression* expr) {
__ bind(&slow);
__ li(a0, Operand(proxy->name()));
__ Push(cp, a0);
- __ CallRuntime(Runtime::kLoadContextSlotNoReferenceError, 2);
+ __ CallRuntime(Runtime::kHiddenLoadContextSlotNoReferenceError, 2);
PrepareForBailout(expr, TOS_REG);
__ bind(&done);
@@ -4591,12 +4456,13 @@ void FullCodeGenerator::EmitLiteralCompareTypeof(Expression* expr,
}
PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
- if (check->Equals(isolate()->heap()->number_string())) {
+ Factory* factory = isolate()->factory();
+ if (String::Equals(check, factory->number_string())) {
__ JumpIfSmi(v0, if_true);
__ lw(v0, FieldMemOperand(v0, HeapObject::kMapOffset));
__ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
Split(eq, v0, Operand(at), if_true, if_false, fall_through);
- } else if (check->Equals(isolate()->heap()->string_string())) {
+ } else if (String::Equals(check, factory->string_string())) {
__ JumpIfSmi(v0, if_false);
// Check for undetectable objects => false.
__ GetObjectType(v0, v0, a1);
@@ -4605,20 +4471,20 @@ void FullCodeGenerator::EmitLiteralCompareTypeof(Expression* expr,
__ And(a1, a1, Operand(1 << Map::kIsUndetectable));
Split(eq, a1, Operand(zero_reg),
if_true, if_false, fall_through);
- } else if (check->Equals(isolate()->heap()->symbol_string())) {
+ } else if (String::Equals(check, factory->symbol_string())) {
__ JumpIfSmi(v0, if_false);
__ GetObjectType(v0, v0, a1);
Split(eq, a1, Operand(SYMBOL_TYPE), if_true, if_false, fall_through);
- } else if (check->Equals(isolate()->heap()->boolean_string())) {
+ } else if (String::Equals(check, factory->boolean_string())) {
__ LoadRoot(at, Heap::kTrueValueRootIndex);
__ Branch(if_true, eq, v0, Operand(at));
__ LoadRoot(at, Heap::kFalseValueRootIndex);
Split(eq, v0, Operand(at), if_true, if_false, fall_through);
} else if (FLAG_harmony_typeof &&
- check->Equals(isolate()->heap()->null_string())) {
+ String::Equals(check, factory->null_string())) {
__ LoadRoot(at, Heap::kNullValueRootIndex);
Split(eq, v0, Operand(at), if_true, if_false, fall_through);
- } else if (check->Equals(isolate()->heap()->undefined_string())) {
+ } else if (String::Equals(check, factory->undefined_string())) {
__ LoadRoot(at, Heap::kUndefinedValueRootIndex);
__ Branch(if_true, eq, v0, Operand(at));
__ JumpIfSmi(v0, if_false);
@@ -4627,14 +4493,14 @@ void FullCodeGenerator::EmitLiteralCompareTypeof(Expression* expr,
__ lbu(a1, FieldMemOperand(v0, Map::kBitFieldOffset));
__ And(a1, a1, Operand(1 << Map::kIsUndetectable));
Split(ne, a1, Operand(zero_reg), if_true, if_false, fall_through);
- } else if (check->Equals(isolate()->heap()->function_string())) {
+ } else if (String::Equals(check, factory->function_string())) {
__ JumpIfSmi(v0, if_false);
STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
__ GetObjectType(v0, v0, a1);
__ Branch(if_true, eq, a1, Operand(JS_FUNCTION_TYPE));
Split(eq, a1, Operand(JS_FUNCTION_PROXY_TYPE),
if_true, if_false, fall_through);
- } else if (check->Equals(isolate()->heap()->object_string())) {
+ } else if (String::Equals(check, factory->object_string())) {
__ JumpIfSmi(v0, if_false);
if (!FLAG_harmony_typeof) {
__ LoadRoot(at, Heap::kNullValueRootIndex);
@@ -4686,7 +4552,7 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
case Token::INSTANCEOF: {
VisitForStackValue(expr->right());
- InstanceofStub stub(InstanceofStub::kNoFlags);
+ InstanceofStub stub(isolate(), InstanceofStub::kNoFlags);
__ CallStub(&stub);
PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
// The stub returns 0 for true.
@@ -4712,7 +4578,7 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
// Record position and call the compare IC.
SetSourcePosition(expr->position());
Handle<Code> ic = CompareIC::GetUninitialized(isolate(), op);
- CallIC(ic, RelocInfo::CODE_TARGET, expr->CompareOperationFeedbackId());
+ CallIC(ic, expr->CompareOperationFeedbackId());
patch_site.EmitPatchInfo();
PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
Split(cc, v0, Operand(zero_reg), if_true, if_false, fall_through);
@@ -4746,7 +4612,7 @@ void FullCodeGenerator::EmitLiteralCompareNil(CompareOperation* expr,
Split(eq, a0, Operand(a1), if_true, if_false, fall_through);
} else {
Handle<Code> ic = CompareNilICStub::GetUninitialized(isolate(), nil);
- CallIC(ic, RelocInfo::CODE_TARGET, expr->CompareOperationFeedbackId());
+ CallIC(ic, expr->CompareOperationFeedbackId());
Split(ne, v0, Operand(zero_reg), if_true, if_false, fall_through);
}
context()->Plug(if_true, if_false);
diff --git a/chromium/v8/src/mips/ic-mips.cc b/chromium/v8/src/mips/ic-mips.cc
index 4c1ddbd5caf..834135cc031 100644
--- a/chromium/v8/src/mips/ic-mips.cc
+++ b/chromium/v8/src/mips/ic-mips.cc
@@ -1,41 +1,18 @@
// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
-#include "v8.h"
+#include "src/v8.h"
#if V8_TARGET_ARCH_MIPS
-#include "codegen.h"
-#include "code-stubs.h"
-#include "ic-inl.h"
-#include "runtime.h"
-#include "stub-cache.h"
+#include "src/codegen.h"
+#include "src/code-stubs.h"
+#include "src/ic-inl.h"
+#include "src/runtime.h"
+#include "src/stub-cache.h"
namespace v8 {
namespace internal {
@@ -100,7 +77,7 @@ static void GenerateNameDictionaryReceiverCheck(MacroAssembler* masm,
}
-// Helper function used from LoadIC/CallIC GenerateNormal.
+// Helper function used from LoadIC GenerateNormal.
//
// elements: Property dictionary. It is not clobbered if a jump to the miss
// label is done.
@@ -229,7 +206,8 @@ static void GenerateKeyedLoadReceiverCheck(MacroAssembler* masm,
__ lw(map, FieldMemOperand(receiver, HeapObject::kMapOffset));
// Check bit field.
__ lbu(scratch, FieldMemOperand(map, Map::kBitFieldOffset));
- __ And(at, scratch, Operand(KeyedLoadIC::kSlowCaseBitFieldMask));
+ __ And(at, scratch,
+ Operand((1 << Map::kIsAccessCheckNeeded) | (1 << interceptor_bit)));
__ Branch(slow, ne, at, Operand(zero_reg));
// Check that the object is some kind of JS object EXCEPT JS Value type.
// In the case that the object is a value-wrapper object,
@@ -338,314 +316,6 @@ static void GenerateKeyNameCheck(MacroAssembler* masm,
}
-// Defined in ic.cc.
-Object* CallIC_Miss(Arguments args);
-
-// The generated code does not accept smi keys.
-// The generated code falls through if both probes miss.
-void CallICBase::GenerateMonomorphicCacheProbe(MacroAssembler* masm,
- int argc,
- Code::Kind kind,
- ExtraICState extra_state) {
- // ----------- S t a t e -------------
- // -- a1 : receiver
- // -- a2 : name
- // -----------------------------------
- Label number, non_number, non_string, boolean, probe, miss;
-
- // Probe the stub cache.
- Code::Flags flags = Code::ComputeFlags(kind,
- MONOMORPHIC,
- extra_state,
- Code::NORMAL,
- argc);
- masm->isolate()->stub_cache()->GenerateProbe(
- masm, flags, a1, a2, a3, t0, t1, t2);
-
- // If the stub cache probing failed, the receiver might be a value.
- // For value objects, we use the map of the prototype objects for
- // the corresponding JSValue for the cache and that is what we need
- // to probe.
- //
- // Check for number.
- __ JumpIfSmi(a1, &number, t1);
- __ GetObjectType(a1, a3, a3);
- __ Branch(&non_number, ne, a3, Operand(HEAP_NUMBER_TYPE));
- __ bind(&number);
- StubCompiler::GenerateLoadGlobalFunctionPrototype(
- masm, Context::NUMBER_FUNCTION_INDEX, a1);
- __ Branch(&probe);
-
- // Check for string.
- __ bind(&non_number);
- __ Branch(&non_string, Ugreater_equal, a3, Operand(FIRST_NONSTRING_TYPE));
- StubCompiler::GenerateLoadGlobalFunctionPrototype(
- masm, Context::STRING_FUNCTION_INDEX, a1);
- __ Branch(&probe);
-
- // Check for boolean.
- __ bind(&non_string);
- __ LoadRoot(t0, Heap::kTrueValueRootIndex);
- __ Branch(&boolean, eq, a1, Operand(t0));
- __ LoadRoot(t1, Heap::kFalseValueRootIndex);
- __ Branch(&miss, ne, a1, Operand(t1));
- __ bind(&boolean);
- StubCompiler::GenerateLoadGlobalFunctionPrototype(
- masm, Context::BOOLEAN_FUNCTION_INDEX, a1);
-
- // Probe the stub cache for the value object.
- __ bind(&probe);
- masm->isolate()->stub_cache()->GenerateProbe(
- masm, flags, a1, a2, a3, t0, t1, t2);
-
- __ bind(&miss);
-}
-
-
-static void GenerateFunctionTailCall(MacroAssembler* masm,
- int argc,
- Label* miss,
- Register scratch) {
- // a1: function
-
- // Check that the value isn't a smi.
- __ JumpIfSmi(a1, miss);
-
- // Check that the value is a JSFunction.
- __ GetObjectType(a1, scratch, scratch);
- __ Branch(miss, ne, scratch, Operand(JS_FUNCTION_TYPE));
-
- // Invoke the function.
- ParameterCount actual(argc);
- __ InvokeFunction(a1, actual, JUMP_FUNCTION,
- NullCallWrapper(), CALL_AS_METHOD);
-}
-
-
-void CallICBase::GenerateNormal(MacroAssembler* masm, int argc) {
- // ----------- S t a t e -------------
- // -- a2 : name
- // -- ra : return address
- // -----------------------------------
- Label miss;
-
- // Get the receiver of the function from the stack into a1.
- __ lw(a1, MemOperand(sp, argc * kPointerSize));
-
- GenerateNameDictionaryReceiverCheck(masm, a1, a0, a3, t0, &miss);
-
- // a0: elements
- // Search the dictionary - put result in register a1.
- GenerateDictionaryLoad(masm, &miss, a0, a2, a1, a3, t0);
-
- GenerateFunctionTailCall(masm, argc, &miss, t0);
-
- // Cache miss: Jump to runtime.
- __ bind(&miss);
-}
-
-
-void CallICBase::GenerateMiss(MacroAssembler* masm,
- int argc,
- IC::UtilityId id,
- ExtraICState extra_state) {
- // ----------- S t a t e -------------
- // -- a2 : name
- // -- ra : return address
- // -----------------------------------
- Isolate* isolate = masm->isolate();
-
- if (id == IC::kCallIC_Miss) {
- __ IncrementCounter(isolate->counters()->call_miss(), 1, a3, t0);
- } else {
- __ IncrementCounter(isolate->counters()->keyed_call_miss(), 1, a3, t0);
- }
-
- // Get the receiver of the function from the stack.
- __ lw(a3, MemOperand(sp, argc*kPointerSize));
-
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
-
- // Push the receiver and the name of the function.
- __ Push(a3, a2);
-
- // Call the entry.
- __ PrepareCEntryArgs(2);
- __ PrepareCEntryFunction(ExternalReference(IC_Utility(id), isolate));
-
- CEntryStub stub(1);
- __ CallStub(&stub);
-
- // Move result to a1 and leave the internal frame.
- __ mov(a1, v0);
- }
-
- // Check if the receiver is a global object of some sort.
- // This can happen only for regular CallIC but not KeyedCallIC.
- if (id == IC::kCallIC_Miss) {
- Label invoke, global;
- __ lw(a2, MemOperand(sp, argc * kPointerSize));
- __ JumpIfSmi(a2, &invoke);
- __ GetObjectType(a2, a3, a3);
- __ Branch(&global, eq, a3, Operand(JS_GLOBAL_OBJECT_TYPE));
- __ Branch(&invoke, ne, a3, Operand(JS_BUILTINS_OBJECT_TYPE));
-
- // Patch the receiver on the stack.
- __ bind(&global);
- __ lw(a2, FieldMemOperand(a2, GlobalObject::kGlobalReceiverOffset));
- __ sw(a2, MemOperand(sp, argc * kPointerSize));
- __ bind(&invoke);
- }
- // Invoke the function.
- CallKind call_kind = CallICBase::Contextual::decode(extra_state)
- ? CALL_AS_FUNCTION
- : CALL_AS_METHOD;
- ParameterCount actual(argc);
- __ InvokeFunction(a1,
- actual,
- JUMP_FUNCTION,
- NullCallWrapper(),
- call_kind);
-}
-
-
-void CallIC::GenerateMegamorphic(MacroAssembler* masm,
- int argc,
- ExtraICState extra_ic_state) {
- // ----------- S t a t e -------------
- // -- a2 : name
- // -- ra : return address
- // -----------------------------------
-
- // Get the receiver of the function from the stack into a1.
- __ lw(a1, MemOperand(sp, argc * kPointerSize));
- GenerateMonomorphicCacheProbe(masm, argc, Code::CALL_IC, extra_ic_state);
- GenerateMiss(masm, argc, extra_ic_state);
-}
-
-
-void KeyedCallIC::GenerateMegamorphic(MacroAssembler* masm, int argc) {
- // ----------- S t a t e -------------
- // -- a2 : name
- // -- ra : return address
- // -----------------------------------
-
- // Get the receiver of the function from the stack into a1.
- __ lw(a1, MemOperand(sp, argc * kPointerSize));
-
- Label do_call, slow_call, slow_load, slow_reload_receiver;
- Label check_number_dictionary, check_name, lookup_monomorphic_cache;
- Label index_smi, index_name;
-
- // Check that the key is a smi.
- __ JumpIfNotSmi(a2, &check_name);
- __ bind(&index_smi);
- // Now the key is known to be a smi. This place is also jumped to from below
- // where a numeric string is converted to a smi.
-
- GenerateKeyedLoadReceiverCheck(
- masm, a1, a0, a3, Map::kHasIndexedInterceptor, &slow_call);
-
- GenerateFastArrayLoad(
- masm, a1, a2, t0, a3, a0, a1, &check_number_dictionary, &slow_load);
- Counters* counters = masm->isolate()->counters();
- __ IncrementCounter(counters->keyed_call_generic_smi_fast(), 1, a0, a3);
-
- __ bind(&do_call);
- // receiver in a1 is not used after this point.
- // a2: key
- // a1: function
-
- GenerateFunctionTailCall(masm, argc, &slow_call, a0);
-
- __ bind(&check_number_dictionary);
- // a2: key
- // a3: elements map
- // t0: elements pointer
- // Check whether the elements is a number dictionary.
- __ LoadRoot(at, Heap::kHashTableMapRootIndex);
- __ Branch(&slow_load, ne, a3, Operand(at));
- __ sra(a0, a2, kSmiTagSize);
- // a0: untagged index
- __ LoadFromNumberDictionary(&slow_load, t0, a2, a1, a0, a3, t1);
- __ IncrementCounter(counters->keyed_call_generic_smi_dict(), 1, a0, a3);
- __ jmp(&do_call);
-
- __ bind(&slow_load);
- // This branch is taken when calling KeyedCallIC_Miss is neither required
- // nor beneficial.
- __ IncrementCounter(counters->keyed_call_generic_slow_load(), 1, a0, a3);
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ Push(a2, a1, a2); // Save the key and pass the receiver and the key.
- __ CallRuntime(Runtime::kKeyedGetProperty, 2);
- __ pop(a2); // Restore the key.
- }
- __ mov(a1, v0);
- __ jmp(&do_call);
-
- __ bind(&check_name);
- GenerateKeyNameCheck(masm, a2, a0, a3, &index_name, &slow_call);
-
- // The key is known to be a unique name.
- // If the receiver is a regular JS object with slow properties then do
- // a quick inline probe of the receiver's dictionary.
- // Otherwise do the monomorphic cache probe.
- GenerateKeyedLoadReceiverCheck(
- masm, a1, a0, a3, Map::kHasNamedInterceptor, &lookup_monomorphic_cache);
-
- __ lw(a0, FieldMemOperand(a1, JSObject::kPropertiesOffset));
- __ lw(a3, FieldMemOperand(a0, HeapObject::kMapOffset));
- __ LoadRoot(at, Heap::kHashTableMapRootIndex);
- __ Branch(&lookup_monomorphic_cache, ne, a3, Operand(at));
-
- GenerateDictionaryLoad(masm, &slow_load, a0, a2, a1, a3, t0);
- __ IncrementCounter(counters->keyed_call_generic_lookup_dict(), 1, a0, a3);
- __ jmp(&do_call);
-
- __ bind(&lookup_monomorphic_cache);
- __ IncrementCounter(counters->keyed_call_generic_lookup_cache(), 1, a0, a3);
- GenerateMonomorphicCacheProbe(masm,
- argc,
- Code::KEYED_CALL_IC,
- kNoExtraICState);
- // Fall through on miss.
-
- __ bind(&slow_call);
- // This branch is taken if:
- // - the receiver requires boxing or access check,
- // - the key is neither smi nor a unique name,
- // - the value loaded is not a function,
- // - there is hope that the runtime will create a monomorphic call stub,
- // that will get fetched next time.
- __ IncrementCounter(counters->keyed_call_generic_slow(), 1, a0, a3);
- GenerateMiss(masm, argc);
-
- __ bind(&index_name);
- __ IndexFromHash(a3, a2);
- // Now jump to the place where smi keys are handled.
- __ jmp(&index_smi);
-}
-
-
-void KeyedCallIC::GenerateNormal(MacroAssembler* masm, int argc) {
- // ----------- S t a t e -------------
- // -- a2 : name
- // -- ra : return address
- // -----------------------------------
-
- // Check if the name is really a name.
- Label miss;
- __ JumpIfSmi(a2, &miss);
- __ IsObjectNameType(a2, a0, &miss);
-
- CallICBase::GenerateNormal(masm, argc);
- __ bind(&miss);
- GenerateMiss(masm, argc);
-}
-
-
void LoadIC::GenerateMegamorphic(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- a2 : name
@@ -654,9 +324,7 @@ void LoadIC::GenerateMegamorphic(MacroAssembler* masm) {
// -----------------------------------
// Probe the stub cache.
- Code::Flags flags = Code::ComputeFlags(
- Code::HANDLER, MONOMORPHIC, kNoExtraICState,
- Code::NORMAL, Code::LOAD_IC);
+ Code::Flags flags = Code::ComputeHandlerFlags(Code::LOAD_IC);
masm->isolate()->stub_cache()->GenerateProbe(
masm, flags, a0, a2, a3, t0, t1, t2);
@@ -671,14 +339,18 @@ void LoadIC::GenerateNormal(MacroAssembler* masm) {
// -- lr : return address
// -- a0 : receiver
// -----------------------------------
- Label miss;
+ Label miss, slow;
GenerateNameDictionaryReceiverCheck(masm, a0, a1, a3, t0, &miss);
// a1: elements
- GenerateDictionaryLoad(masm, &miss, a1, a2, v0, a3, t0);
+ GenerateDictionaryLoad(masm, &slow, a1, a2, v0, a3, t0);
__ Ret();
+ // Dictionary load failed, go slow (but don't miss).
+ __ bind(&slow);
+ GenerateRuntimeGetProperty(masm);
+
// Cache miss: Jump to runtime.
__ bind(&miss);
GenerateMiss(masm);
@@ -726,6 +398,8 @@ static MemOperand GenerateMappedArgumentsLookup(MacroAssembler* masm,
Register scratch3,
Label* unmapped_case,
Label* slow_case) {
+ Heap* heap = masm->isolate()->heap();
+
// Check that the receiver is a JSObject. Because of the map check
// later, we do not need to check for interceptors or whether it
// requires access checks.
@@ -739,10 +413,11 @@ static MemOperand GenerateMappedArgumentsLookup(MacroAssembler* masm,
__ Branch(slow_case, ne, scratch1, Operand(zero_reg));
// Load the elements into scratch1 and check its map.
+ Handle<Map> arguments_map(heap->sloppy_arguments_elements_map());
__ lw(scratch1, FieldMemOperand(object, JSObject::kElementsOffset));
__ CheckMap(scratch1,
scratch2,
- Heap::kNonStrictArgumentsElementsMapRootIndex,
+ arguments_map,
slow_case,
DONT_DO_SMI_CHECK);
// Check if element is in the range of mapped arguments. If not, jump
@@ -805,7 +480,7 @@ static MemOperand GenerateUnmappedArgumentsLookup(MacroAssembler* masm,
}
-void KeyedLoadIC::GenerateNonStrictArguments(MacroAssembler* masm) {
+void KeyedLoadIC::GenerateSloppyArguments(MacroAssembler* masm) {
// ---------- S t a t e --------------
// -- lr : return address
// -- a0 : key
@@ -830,7 +505,7 @@ void KeyedLoadIC::GenerateNonStrictArguments(MacroAssembler* masm) {
}
-void KeyedStoreIC::GenerateNonStrictArguments(MacroAssembler* masm) {
+void KeyedStoreIC::GenerateSloppyArguments(MacroAssembler* masm) {
// ---------- S t a t e --------------
// -- a0 : value
// -- a1 : key
@@ -865,32 +540,6 @@ void KeyedStoreIC::GenerateNonStrictArguments(MacroAssembler* masm) {
}
-void KeyedCallIC::GenerateNonStrictArguments(MacroAssembler* masm,
- int argc) {
- // ----------- S t a t e -------------
- // -- a2 : name
- // -- lr : return address
- // -----------------------------------
- Label slow, notin;
- // Load receiver.
- __ lw(a1, MemOperand(sp, argc * kPointerSize));
- MemOperand mapped_location =
- GenerateMappedArgumentsLookup(masm, a1, a2, a3, t0, t1, &notin, &slow);
- __ lw(a1, mapped_location);
- GenerateFunctionTailCall(masm, argc, &slow, a3);
- __ bind(&notin);
- // The unmapped lookup expects that the parameter map is in a3.
- MemOperand unmapped_location =
- GenerateUnmappedArgumentsLookup(masm, a2, a3, t0, &slow);
- __ lw(a1, unmapped_location);
- __ LoadRoot(a3, Heap::kTheHoleValueRootIndex);
- __ Branch(&slow, eq, a1, Operand(a3));
- GenerateFunctionTailCall(masm, argc, &slow, a3);
- __ bind(&slow);
- GenerateMiss(masm, argc);
-}
-
-
void KeyedLoadIC::GenerateMiss(MacroAssembler* masm) {
// ---------- S t a t e --------------
// -- ra : return address
@@ -982,7 +631,7 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
GenerateKeyNameCheck(masm, key, a2, a3, &index_name, &slow);
GenerateKeyedLoadReceiverCheck(
- masm, receiver, a2, a3, Map::kHasIndexedInterceptor, &slow);
+ masm, receiver, a2, a3, Map::kHasNamedInterceptor, &slow);
// If the receiver is a fast-case object, check the keyed lookup
@@ -1135,7 +784,7 @@ void KeyedLoadIC::GenerateString(MacroAssembler* masm) {
void KeyedStoreIC::GenerateRuntimeSetProperty(MacroAssembler* masm,
- StrictModeFlag strict_mode) {
+ StrictMode strict_mode) {
// ---------- S t a t e --------------
// -- a0 : value
// -- a1 : key
@@ -1250,7 +899,7 @@ static void KeyedStoreGenerateGenericHelper(
// We have to see if the double version of the hole is present. If so
// go to the runtime.
__ Addu(address, elements,
- Operand(FixedDoubleArray::kHeaderSize + sizeof(kHoleNanLower32)
+ Operand(FixedDoubleArray::kHeaderSize + kHoleNanUpper32Offset
- kHeapObjectTag));
__ sll(at, key, kPointerSizeLog2);
__ addu(address, address, at);
@@ -1327,7 +976,7 @@ static void KeyedStoreGenerateGenericHelper(
void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm,
- StrictModeFlag strict_mode) {
+ StrictMode strict_mode) {
// ---------- S t a t e --------------
// -- a0 : value
// -- a1 : key
@@ -1513,8 +1162,7 @@ void KeyedStoreIC::GenerateSlow(MacroAssembler* masm) {
}
-void StoreIC::GenerateMegamorphic(MacroAssembler* masm,
- ExtraICState extra_ic_state) {
+void StoreIC::GenerateMegamorphic(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- a0 : value
// -- a1 : receiver
@@ -1523,9 +1171,7 @@ void StoreIC::GenerateMegamorphic(MacroAssembler* masm,
// -----------------------------------
// Get the receiver from the stack and probe the stub cache.
- Code::Flags flags = Code::ComputeFlags(
- Code::HANDLER, MONOMORPHIC, extra_ic_state,
- Code::NORMAL, Code::STORE_IC);
+ Code::Flags flags = Code::ComputeHandlerFlags(Code::STORE_IC);
masm->isolate()->stub_cache()->GenerateProbe(
masm, flags, a1, a2, a3, t0, t1, t2);
@@ -1573,7 +1219,7 @@ void StoreIC::GenerateNormal(MacroAssembler* masm) {
void StoreIC::GenerateRuntimeSetProperty(MacroAssembler* masm,
- StrictModeFlag strict_mode) {
+ StrictMode strict_mode) {
// ----------- S t a t e -------------
// -- a0 : value
// -- a1 : receiver
diff --git a/chromium/v8/src/mips/lithium-codegen-mips.cc b/chromium/v8/src/mips/lithium-codegen-mips.cc
index 423ff9f5058..5edca6a3919 100644
--- a/chromium/v8/src/mips/lithium-codegen-mips.cc
+++ b/chromium/v8/src/mips/lithium-codegen-mips.cc
@@ -1,4 +1,4 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.7
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -25,13 +25,13 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-#include "v8.h"
+#include "src/v8.h"
-#include "mips/lithium-codegen-mips.h"
-#include "mips/lithium-gap-resolver-mips.h"
-#include "code-stubs.h"
-#include "stub-cache.h"
-#include "hydrogen-osr.h"
+#include "src/mips/lithium-codegen-mips.h"
+#include "src/mips/lithium-gap-resolver-mips.h"
+#include "src/code-stubs.h"
+#include "src/stub-cache.h"
+#include "src/hydrogen-osr.h"
namespace v8 {
namespace internal {
@@ -84,17 +84,8 @@ void LCodeGen::FinishCode(Handle<Code> code) {
ASSERT(is_done());
code->set_stack_slots(GetStackSlotCount());
code->set_safepoint_table_offset(safepoints_.GetCodeOffset());
- if (FLAG_weak_embedded_maps_in_optimized_code) {
- RegisterDependentCodeForEmbeddedMaps(code);
- }
+ if (code->is_optimized_code()) RegisterWeakObjectsInOptimizedCode(code);
PopulateDeoptimizationData(code);
- info()->CommitDependencies(code);
-}
-
-
-void LChunkBuilder::Abort(BailoutReason reason) {
- info()->set_bailout_reason(reason);
- status_ = ABORTED;
}
@@ -148,24 +139,34 @@ bool LCodeGen::GeneratePrologue() {
// fp: Caller's frame pointer.
// lr: Caller's pc.
- // Strict mode functions and builtins need to replace the receiver
- // with undefined when called as functions (without an explicit
- // receiver object). r5 is zero for method calls and non-zero for
- // function calls.
- if (!info_->is_classic_mode() || info_->is_native()) {
+ // Sloppy mode functions and builtins need to replace the receiver with the
+ // global proxy when called as functions (without an explicit receiver
+ // object).
+ if (info_->this_has_uses() &&
+ info_->strict_mode() == SLOPPY &&
+ !info_->is_native()) {
Label ok;
- __ Branch(&ok, eq, t1, Operand(zero_reg));
+ int receiver_offset = info_->scope()->num_parameters() * kPointerSize;
+ __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
+ __ lw(a2, MemOperand(sp, receiver_offset));
+ __ Branch(&ok, ne, a2, Operand(at));
+
+ __ lw(a2, GlobalObjectOperand());
+ __ lw(a2, FieldMemOperand(a2, GlobalObject::kGlobalReceiverOffset));
- int receiver_offset = scope()->num_parameters() * kPointerSize;
- __ LoadRoot(a2, Heap::kUndefinedValueRootIndex);
__ sw(a2, MemOperand(sp, receiver_offset));
+
__ bind(&ok);
}
}
info()->set_prologue_offset(masm_->pc_offset());
if (NeedsEagerFrame()) {
- __ Prologue(info()->IsStub() ? BUILD_STUB_FRAME : BUILD_FUNCTION_FRAME);
+ if (info()->IsStub()) {
+ __ StubPrologue();
+ } else {
+ __ Prologue(info()->IsCodePreAgingActive());
+ }
frame_is_built_ = true;
info_->AddNoFrameRange(0, masm_->pc_offset());
}
@@ -175,8 +176,7 @@ bool LCodeGen::GeneratePrologue() {
if (slots > 0) {
if (FLAG_debug_code) {
__ Subu(sp, sp, Operand(slots * kPointerSize));
- __ push(a0);
- __ push(a1);
+ __ Push(a0, a1);
__ Addu(a0, sp, Operand(slots * kPointerSize));
__ li(a1, Operand(kSlotsZapValue));
Label loop;
@@ -184,8 +184,7 @@ bool LCodeGen::GeneratePrologue() {
__ Subu(a0, a0, Operand(kPointerSize));
__ sw(a1, MemOperand(a0, 2 * kPointerSize));
__ Branch(&loop, ne, a0, Operand(sp));
- __ pop(a1);
- __ pop(a0);
+ __ Pop(a0, a1);
} else {
__ Subu(sp, sp, Operand(slots * kPointerSize));
}
@@ -199,18 +198,22 @@ bool LCodeGen::GeneratePrologue() {
int heap_slots = info()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
if (heap_slots > 0) {
Comment(";;; Allocate local context");
+ bool need_write_barrier = true;
// Argument to NewContext is the function, which is in a1.
- __ push(a1);
if (heap_slots <= FastNewContextStub::kMaximumSlots) {
- FastNewContextStub stub(heap_slots);
+ FastNewContextStub stub(isolate(), heap_slots);
__ CallStub(&stub);
+ // Result of FastNewContextStub is always in new space.
+ need_write_barrier = false;
} else {
- __ CallRuntime(Runtime::kNewFunctionContext, 1);
+ __ push(a1);
+ __ CallRuntime(Runtime::kHiddenNewFunctionContext, 1);
}
RecordSafepoint(Safepoint::kNoLazyDeopt);
- // Context is returned in both v0 and cp. It replaces the context
- // passed to us. It's saved in the stack and kept live in cp.
- __ sw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ // Context is returned in both v0. It replaces the context passed to us.
+ // It's saved in the stack and kept live in cp.
+ __ mov(cp, v0);
+ __ sw(v0, MemOperand(fp, StandardFrameConstants::kContextOffset));
// Copy any necessary parameters into the context.
int num_parameters = scope()->num_parameters();
for (int i = 0; i < num_parameters; i++) {
@@ -224,8 +227,15 @@ bool LCodeGen::GeneratePrologue() {
MemOperand target = ContextOperand(cp, var->index());
__ sw(a0, target);
// Update the write barrier. This clobbers a3 and a0.
- __ RecordWriteContextSlot(
- cp, target.offset(), a0, a3, GetRAState(), kSaveFPRegs);
+ if (need_write_barrier) {
+ __ RecordWriteContextSlot(
+ cp, target.offset(), a0, a3, GetRAState(), kSaveFPRegs);
+ } else if (FLAG_debug_code) {
+ Label done;
+ __ JumpIfInNewSpace(cp, a0, &done);
+ __ Abort(kExpectedNewSpaceObject);
+ __ bind(&done);
+ }
}
}
Comment(";;; End allocate local context");
@@ -257,6 +267,9 @@ void LCodeGen::GenerateOsrPrologue() {
void LCodeGen::GenerateBodyInstructionPre(LInstruction* instr) {
+ if (instr->IsCall()) {
+ EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
+ }
if (!instr->IsLazyBailout() && !instr->IsGap()) {
safepoints_.BumpLastLazySafepointIndex();
}
@@ -271,7 +284,8 @@ bool LCodeGen::GenerateDeferredCode() {
HValue* value =
instructions_->at(code->instruction_index())->hydrogen_value();
- RecordAndWritePosition(value->position());
+ RecordAndWritePosition(
+ chunk()->graph()->SourcePositionToScriptPosition(value->position()));
Comment(";;; <@%d,#%d> "
"-------------------- Deferred %s --------------------",
@@ -404,7 +418,7 @@ Register LCodeGen::EmitLoadRegister(LOperand* op, Register scratch) {
__ li(scratch, literal);
}
return scratch;
- } else if (op->IsStackSlot() || op->IsArgument()) {
+ } else if (op->IsStackSlot()) {
__ lw(scratch, ToMemOperand(op));
return scratch;
}
@@ -440,7 +454,7 @@ DoubleRegister LCodeGen::EmitLoadDoubleRegister(LOperand* op,
} else if (r.IsTagged()) {
Abort(kUnsupportedTaggedImmediate);
}
- } else if (op->IsStackSlot() || op->IsArgument()) {
+ } else if (op->IsStackSlot()) {
MemOperand mem_op = ToMemOperand(op);
__ ldc1(dbl_scratch, mem_op);
return dbl_scratch;
@@ -658,10 +672,6 @@ void LCodeGen::AddToTranslation(LEnvironment* environment,
}
} else if (op->IsDoubleStackSlot()) {
translation->StoreDoubleStackSlot(op->index());
- } else if (op->IsArgument()) {
- ASSERT(is_tagged);
- int src_index = GetStackSlotCount() + op->index();
- translation->StoreStackSlot(src_index);
} else if (op->IsRegister()) {
Register reg = ToRegister(op);
if (is_tagged) {
@@ -695,7 +705,6 @@ void LCodeGen::CallCodeGeneric(Handle<Code> code,
RelocInfo::Mode mode,
LInstruction* instr,
SafepointMode safepoint_mode) {
- EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
ASSERT(instr != NULL);
__ Call(code, mode);
RecordSafepointWithLazyDeopt(instr, safepoint_mode);
@@ -742,6 +751,7 @@ void LCodeGen::CallRuntimeFromDeferred(Runtime::FunctionId id,
void LCodeGen::RegisterEnvironmentForDeoptimization(LEnvironment* environment,
Safepoint::DeoptMode mode) {
+ environment->set_has_been_used();
if (!environment->HasBeenRegistered()) {
// Physical stack frame layout:
// -x ............. -4 0 ..................................... y
@@ -854,46 +864,24 @@ void LCodeGen::DeoptimizeIf(Condition condition,
}
-void LCodeGen::RegisterDependentCodeForEmbeddedMaps(Handle<Code> code) {
- ZoneList<Handle<Map> > maps(1, zone());
- ZoneList<Handle<JSObject> > objects(1, zone());
- int mode_mask = RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT);
- for (RelocIterator it(*code, mode_mask); !it.done(); it.next()) {
- if (Code::IsWeakEmbeddedObject(code->kind(), it.rinfo()->target_object())) {
- if (it.rinfo()->target_object()->IsMap()) {
- Handle<Map> map(Map::cast(it.rinfo()->target_object()));
- maps.Add(map, zone());
- } else if (it.rinfo()->target_object()->IsJSObject()) {
- Handle<JSObject> object(JSObject::cast(it.rinfo()->target_object()));
- objects.Add(object, zone());
- }
- }
- }
-#ifdef VERIFY_HEAP
- // This disables verification of weak embedded objects after full GC.
- // AddDependentCode can cause a GC, which would observe the state where
- // this code is not yet in the depended code lists of the embedded maps.
- NoWeakObjectVerificationScope disable_verification_of_embedded_objects;
-#endif
- for (int i = 0; i < maps.length(); i++) {
- maps.at(i)->AddDependentCode(DependentCode::kWeaklyEmbeddedGroup, code);
- }
- for (int i = 0; i < objects.length(); i++) {
- AddWeakObjectToCodeDependency(isolate()->heap(), objects.at(i), code);
- }
-}
-
-
void LCodeGen::PopulateDeoptimizationData(Handle<Code> code) {
int length = deoptimizations_.length();
if (length == 0) return;
Handle<DeoptimizationInputData> data =
- factory()->NewDeoptimizationInputData(length, TENURED);
+ DeoptimizationInputData::New(isolate(), length, TENURED);
Handle<ByteArray> translations =
translations_.CreateByteArray(isolate()->factory());
data->SetTranslationByteArray(*translations);
data->SetInlinedFunctionCount(Smi::FromInt(inlined_function_count_));
+ data->SetOptimizationId(Smi::FromInt(info_->optimization_id()));
+ if (info_->IsOptimizing()) {
+ // Reference to shared function info does not change between phases.
+ AllowDeferredHandleDereference allow_handle_dereference;
+ data->SetSharedFunctionInfo(*info_->shared_info());
+ } else {
+ data->SetSharedFunctionInfo(Smi::FromInt(0));
+ }
Handle<FixedArray> literals =
factory()->NewFixedArray(deoptimization_literals_.length(), TENURED);
@@ -1064,31 +1052,19 @@ void LCodeGen::DoCallStub(LCallStub* instr) {
ASSERT(ToRegister(instr->context()).is(cp));
ASSERT(ToRegister(instr->result()).is(v0));
switch (instr->hydrogen()->major_key()) {
- case CodeStub::RegExpConstructResult: {
- RegExpConstructResultStub stub;
- CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
- break;
- }
case CodeStub::RegExpExec: {
- RegExpExecStub stub;
- CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
+ RegExpExecStub stub(isolate());
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
break;
}
case CodeStub::SubString: {
- SubStringStub stub;
- CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
+ SubStringStub stub(isolate());
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
break;
}
case CodeStub::StringCompare: {
- StringCompareStub stub;
- CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
- break;
- }
- case CodeStub::TranscendentalCache: {
- __ lw(a0, MemOperand(sp, 0));
- TranscendentalCacheStub stub(instr->transcendental_type(),
- TranscendentalCacheStub::TAGGED);
- CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
+ StringCompareStub stub(isolate());
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
break;
}
default:
@@ -1102,208 +1078,218 @@ void LCodeGen::DoUnknownOSRValue(LUnknownOSRValue* instr) {
}
-void LCodeGen::DoModI(LModI* instr) {
+void LCodeGen::DoModByPowerOf2I(LModByPowerOf2I* instr) {
+ Register dividend = ToRegister(instr->dividend());
+ int32_t divisor = instr->divisor();
+ ASSERT(dividend.is(ToRegister(instr->result())));
+
+ // Theoretically, a variation of the branch-free code for integer division by
+ // a power of 2 (calculating the remainder via an additional multiplication
+ // (which gets simplified to an 'and') and subtraction) should be faster, and
+ // this is exactly what GCC and clang emit. Nevertheless, benchmarks seem to
+ // indicate that positive dividends are heavily favored, so the branching
+ // version performs better.
HMod* hmod = instr->hydrogen();
- HValue* left = hmod->left();
- HValue* right = hmod->right();
- if (hmod->HasPowerOf2Divisor()) {
- const Register left_reg = ToRegister(instr->left());
- const Register result_reg = ToRegister(instr->result());
+ int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1);
+ Label dividend_is_not_negative, done;
+ if (hmod->CheckFlag(HValue::kLeftCanBeNegative)) {
+ __ Branch(&dividend_is_not_negative, ge, dividend, Operand(zero_reg));
// Note: The code below even works when right contains kMinInt.
- int32_t divisor = Abs(right->GetInteger32Constant());
-
- Label left_is_not_negative, done;
- if (left->CanBeNegative()) {
- __ Branch(left_reg.is(result_reg) ? PROTECT : USE_DELAY_SLOT,
- &left_is_not_negative, ge, left_reg, Operand(zero_reg));
- __ subu(result_reg, zero_reg, left_reg);
- __ And(result_reg, result_reg, divisor - 1);
- if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
- DeoptimizeIf(eq, instr->environment(), result_reg, Operand(zero_reg));
- }
- __ Branch(USE_DELAY_SLOT, &done);
- __ subu(result_reg, zero_reg, result_reg);
+ __ subu(dividend, zero_reg, dividend);
+ __ And(dividend, dividend, Operand(mask));
+ if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ DeoptimizeIf(eq, instr->environment(), dividend, Operand(zero_reg));
}
+ __ Branch(USE_DELAY_SLOT, &done);
+ __ subu(dividend, zero_reg, dividend);
+ }
- __ bind(&left_is_not_negative);
- __ And(result_reg, left_reg, divisor - 1);
- __ bind(&done);
- } else {
- const Register scratch = scratch0();
- const Register left_reg = ToRegister(instr->left());
- const Register result_reg = ToRegister(instr->result());
+ __ bind(&dividend_is_not_negative);
+ __ And(dividend, dividend, Operand(mask));
+ __ bind(&done);
+}
- // div runs in the background while we check for special cases.
- Register right_reg = EmitLoadRegister(instr->right(), scratch);
- __ div(left_reg, right_reg);
- Label done;
- // Check for x % 0, we have to deopt in this case because we can't return a
- // NaN.
- if (right->CanBeZero()) {
- DeoptimizeIf(eq, instr->environment(), right_reg, Operand(zero_reg));
- }
+void LCodeGen::DoModByConstI(LModByConstI* instr) {
+ Register dividend = ToRegister(instr->dividend());
+ int32_t divisor = instr->divisor();
+ Register result = ToRegister(instr->result());
+ ASSERT(!dividend.is(result));
- // Check for kMinInt % -1, we have to deopt if we care about -0, because we
- // can't return that.
- if (left->RangeCanInclude(kMinInt) && right->RangeCanInclude(-1)) {
- Label left_not_min_int;
- __ Branch(&left_not_min_int, ne, left_reg, Operand(kMinInt));
- // TODO(svenpanne) Don't deopt when we don't care about -0.
- DeoptimizeIf(eq, instr->environment(), right_reg, Operand(-1));
- __ bind(&left_not_min_int);
- }
+ if (divisor == 0) {
+ DeoptimizeIf(al, instr->environment());
+ return;
+ }
+
+ __ TruncatingDiv(result, dividend, Abs(divisor));
+ __ Mul(result, result, Operand(Abs(divisor)));
+ __ Subu(result, dividend, Operand(result));
+
+ // Check for negative zero.
+ HMod* hmod = instr->hydrogen();
+ if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ Label remainder_not_zero;
+ __ Branch(&remainder_not_zero, ne, result, Operand(zero_reg));
+ DeoptimizeIf(lt, instr->environment(), dividend, Operand(zero_reg));
+ __ bind(&remainder_not_zero);
+ }
+}
- // TODO(svenpanne) Only emit the test/deopt if we have to.
- __ Branch(USE_DELAY_SLOT, &done, ge, left_reg, Operand(zero_reg));
- __ mfhi(result_reg);
+void LCodeGen::DoModI(LModI* instr) {
+ HMod* hmod = instr->hydrogen();
+ const Register left_reg = ToRegister(instr->left());
+ const Register right_reg = ToRegister(instr->right());
+ const Register result_reg = ToRegister(instr->result());
+
+ // div runs in the background while we check for special cases.
+ __ div(left_reg, right_reg);
+
+ Label done;
+ // Check for x % 0, we have to deopt in this case because we can't return a
+ // NaN.
+ if (hmod->CheckFlag(HValue::kCanBeDivByZero)) {
+ DeoptimizeIf(eq, instr->environment(), right_reg, Operand(zero_reg));
+ }
+
+ // Check for kMinInt % -1, div will return kMinInt, which is not what we
+ // want. We have to deopt if we care about -0, because we can't return that.
+ if (hmod->CheckFlag(HValue::kCanOverflow)) {
+ Label no_overflow_possible;
+ __ Branch(&no_overflow_possible, ne, left_reg, Operand(kMinInt));
if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
- DeoptimizeIf(eq, instr->environment(), result_reg, Operand(zero_reg));
+ DeoptimizeIf(eq, instr->environment(), right_reg, Operand(-1));
+ } else {
+ __ Branch(&no_overflow_possible, ne, right_reg, Operand(-1));
+ __ Branch(USE_DELAY_SLOT, &done);
+ __ mov(result_reg, zero_reg);
}
- __ bind(&done);
+ __ bind(&no_overflow_possible);
}
+
+ // If we care about -0, test if the dividend is <0 and the result is 0.
+ __ Branch(USE_DELAY_SLOT, &done, ge, left_reg, Operand(zero_reg));
+ __ mfhi(result_reg);
+ if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ DeoptimizeIf(eq, instr->environment(), result_reg, Operand(zero_reg));
+ }
+ __ bind(&done);
}
-void LCodeGen::EmitSignedIntegerDivisionByConstant(
- Register result,
- Register dividend,
- int32_t divisor,
- Register remainder,
- Register scratch,
- LEnvironment* environment) {
- ASSERT(!AreAliased(dividend, scratch, at, no_reg));
+void LCodeGen::DoDivByPowerOf2I(LDivByPowerOf2I* instr) {
+ Register dividend = ToRegister(instr->dividend());
+ int32_t divisor = instr->divisor();
+ Register result = ToRegister(instr->result());
+ ASSERT(divisor == kMinInt || IsPowerOf2(Abs(divisor)));
+ ASSERT(!result.is(dividend));
+
+ // Check for (0 / -x) that will produce negative zero.
+ HDiv* hdiv = instr->hydrogen();
+ if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
+ DeoptimizeIf(eq, instr->environment(), dividend, Operand(zero_reg));
+ }
+ // Check for (kMinInt / -1).
+ if (hdiv->CheckFlag(HValue::kCanOverflow) && divisor == -1) {
+ DeoptimizeIf(eq, instr->environment(), dividend, Operand(kMinInt));
+ }
+ // Deoptimize if remainder will not be 0.
+ if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32) &&
+ divisor != 1 && divisor != -1) {
+ int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1);
+ __ And(at, dividend, Operand(mask));
+ DeoptimizeIf(ne, instr->environment(), at, Operand(zero_reg));
+ }
+
+ if (divisor == -1) { // Nice shortcut, not needed for correctness.
+ __ Subu(result, zero_reg, dividend);
+ return;
+ }
+ uint16_t shift = WhichPowerOf2Abs(divisor);
+ if (shift == 0) {
+ __ Move(result, dividend);
+ } else if (shift == 1) {
+ __ srl(result, dividend, 31);
+ __ Addu(result, dividend, Operand(result));
+ } else {
+ __ sra(result, dividend, 31);
+ __ srl(result, result, 32 - shift);
+ __ Addu(result, dividend, Operand(result));
+ }
+ if (shift > 0) __ sra(result, result, shift);
+ if (divisor < 0) __ Subu(result, zero_reg, result);
+}
+
- uint32_t divisor_abs = abs(divisor);
+void LCodeGen::DoDivByConstI(LDivByConstI* instr) {
+ Register dividend = ToRegister(instr->dividend());
+ int32_t divisor = instr->divisor();
+ Register result = ToRegister(instr->result());
+ ASSERT(!dividend.is(result));
- int32_t power_of_2_factor =
- CompilerIntrinsics::CountTrailingZeros(divisor_abs);
+ if (divisor == 0) {
+ DeoptimizeIf(al, instr->environment());
+ return;
+ }
- switch (divisor_abs) {
- case 0:
- DeoptimizeIf(al, environment);
- return;
+ // Check for (0 / -x) that will produce negative zero.
+ HDiv* hdiv = instr->hydrogen();
+ if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
+ DeoptimizeIf(eq, instr->environment(), dividend, Operand(zero_reg));
+ }
- case 1:
- if (divisor > 0) {
- __ Move(result, dividend);
- } else {
- __ SubuAndCheckForOverflow(result, zero_reg, dividend, scratch);
- DeoptimizeIf(lt, environment, scratch, Operand(zero_reg));
- }
- // Compute the remainder.
- __ Move(remainder, zero_reg);
- return;
+ __ TruncatingDiv(result, dividend, Abs(divisor));
+ if (divisor < 0) __ Subu(result, zero_reg, result);
- default:
- if (IsPowerOf2(divisor_abs)) {
- // Branch and condition free code for integer division by a power
- // of two.
- int32_t power = WhichPowerOf2(divisor_abs);
- if (power > 1) {
- __ sra(scratch, dividend, power - 1);
- }
- __ srl(scratch, scratch, 32 - power);
- __ Addu(scratch, dividend, Operand(scratch));
- __ sra(result, scratch, power);
- // Negate if necessary.
- // We don't need to check for overflow because the case '-1' is
- // handled separately.
- if (divisor < 0) {
- ASSERT(divisor != -1);
- __ Subu(result, zero_reg, Operand(result));
- }
- // Compute the remainder.
- if (divisor > 0) {
- __ sll(scratch, result, power);
- __ Subu(remainder, dividend, Operand(scratch));
- } else {
- __ sll(scratch, result, power);
- __ Addu(remainder, dividend, Operand(scratch));
- }
- return;
- } else if (LChunkBuilder::HasMagicNumberForDivisor(divisor)) {
- // Use magic numbers for a few specific divisors.
- // Details and proofs can be found in:
- // - Hacker's Delight, Henry S. Warren, Jr.
- // - The PowerPC Compiler Writer's Guide
- // and probably many others.
- //
- // We handle
- // <divisor with magic numbers> * <power of 2>
- // but not
- // <divisor with magic numbers> * <other divisor with magic numbers>
- DivMagicNumbers magic_numbers =
- DivMagicNumberFor(divisor_abs >> power_of_2_factor);
- // Branch and condition free code for integer division by a power
- // of two.
- const int32_t M = magic_numbers.M;
- const int32_t s = magic_numbers.s + power_of_2_factor;
-
- __ li(scratch, Operand(M));
- __ mult(dividend, scratch);
- __ mfhi(scratch);
- if (M < 0) {
- __ Addu(scratch, scratch, Operand(dividend));
- }
- if (s > 0) {
- __ sra(scratch, scratch, s);
- __ mov(scratch, scratch);
- }
- __ srl(at, dividend, 31);
- __ Addu(result, scratch, Operand(at));
- if (divisor < 0) __ Subu(result, zero_reg, Operand(result));
- // Compute the remainder.
- __ li(scratch, Operand(divisor));
- __ Mul(scratch, result, Operand(scratch));
- __ Subu(remainder, dividend, Operand(scratch));
- } else {
- __ li(scratch, Operand(divisor));
- __ div(dividend, scratch);
- __ mfhi(remainder);
- __ mflo(result);
- }
+ if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32)) {
+ __ Mul(scratch0(), result, Operand(divisor));
+ __ Subu(scratch0(), scratch0(), dividend);
+ DeoptimizeIf(ne, instr->environment(), scratch0(), Operand(zero_reg));
}
}
+// TODO(svenpanne) Refactor this to avoid code duplication with DoFlooringDivI.
void LCodeGen::DoDivI(LDivI* instr) {
- const Register left = ToRegister(instr->left());
- const Register right = ToRegister(instr->right());
+ HBinaryOperation* hdiv = instr->hydrogen();
+ Register dividend = ToRegister(instr->dividend());
+ Register divisor = ToRegister(instr->divisor());
const Register result = ToRegister(instr->result());
// On MIPS div is asynchronous - it will run in the background while we
// check for special cases.
- __ div(left, right);
+ __ div(dividend, divisor);
// Check for x / 0.
- if (instr->hydrogen()->CheckFlag(HValue::kCanBeDivByZero)) {
- DeoptimizeIf(eq, instr->environment(), right, Operand(zero_reg));
+ if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) {
+ DeoptimizeIf(eq, instr->environment(), divisor, Operand(zero_reg));
}
// Check for (0 / -x) that will produce negative zero.
- if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) {
Label left_not_zero;
- __ Branch(&left_not_zero, ne, left, Operand(zero_reg));
- DeoptimizeIf(lt, instr->environment(), right, Operand(zero_reg));
+ __ Branch(&left_not_zero, ne, dividend, Operand(zero_reg));
+ DeoptimizeIf(lt, instr->environment(), divisor, Operand(zero_reg));
__ bind(&left_not_zero);
}
// Check for (kMinInt / -1).
- if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
+ if (hdiv->CheckFlag(HValue::kCanOverflow) &&
+ !hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) {
Label left_not_min_int;
- __ Branch(&left_not_min_int, ne, left, Operand(kMinInt));
- DeoptimizeIf(eq, instr->environment(), right, Operand(-1));
+ __ Branch(&left_not_min_int, ne, dividend, Operand(kMinInt));
+ DeoptimizeIf(eq, instr->environment(), divisor, Operand(-1));
__ bind(&left_not_min_int);
}
- if (!instr->hydrogen()->CheckFlag(HInstruction::kAllUsesTruncatingToInt32)) {
+ if (!hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) {
__ mfhi(result);
DeoptimizeIf(ne, instr->environment(), result, Operand(zero_reg));
+ __ mflo(result);
+ } else {
+ __ mflo(result);
}
- __ mflo(result);
}
@@ -1319,67 +1305,151 @@ void LCodeGen::DoMultiplyAddD(LMultiplyAddD* instr) {
}
-void LCodeGen::DoMathFloorOfDiv(LMathFloorOfDiv* instr) {
- const Register result = ToRegister(instr->result());
- const Register left = ToRegister(instr->left());
- const Register remainder = ToRegister(instr->temp());
- const Register scratch = scratch0();
+void LCodeGen::DoFlooringDivByPowerOf2I(LFlooringDivByPowerOf2I* instr) {
+ Register dividend = ToRegister(instr->dividend());
+ Register result = ToRegister(instr->result());
+ int32_t divisor = instr->divisor();
+ Register scratch = result.is(dividend) ? scratch0() : dividend;
+ ASSERT(!result.is(dividend) || !scratch.is(dividend));
- if (instr->right()->IsConstantOperand()) {
- Label done;
- int32_t divisor = ToInteger32(LConstantOperand::cast(instr->right()));
- if (divisor < 0) {
- DeoptimizeIf(eq, instr->environment(), left, Operand(zero_reg));
- }
- EmitSignedIntegerDivisionByConstant(result,
- left,
- divisor,
- remainder,
- scratch,
- instr->environment());
- // We performed a truncating division. Correct the result if necessary.
- __ Branch(&done, eq, remainder, Operand(zero_reg), USE_DELAY_SLOT);
- __ Xor(scratch , remainder, Operand(divisor));
- __ Branch(&done, ge, scratch, Operand(zero_reg));
- __ Subu(result, result, Operand(1));
- __ bind(&done);
- } else {
- Label done;
- const Register right = ToRegister(instr->right());
+ // If the divisor is 1, return the dividend.
+ if (divisor == 1) {
+ __ Move(result, dividend);
+ return;
+ }
- // On MIPS div is asynchronous - it will run in the background while we
- // check for special cases.
- __ div(left, right);
+ // If the divisor is positive, things are easy: There can be no deopts and we
+ // can simply do an arithmetic right shift.
+ uint16_t shift = WhichPowerOf2Abs(divisor);
+ if (divisor > 1) {
+ __ sra(result, dividend, shift);
+ return;
+ }
- // Check for x / 0.
- DeoptimizeIf(eq, instr->environment(), right, Operand(zero_reg));
+ // If the divisor is negative, we have to negate and handle edge cases.
- // Check for (0 / -x) that will produce negative zero.
- if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
- Label left_not_zero;
- __ Branch(&left_not_zero, ne, left, Operand(zero_reg));
- DeoptimizeIf(lt, instr->environment(), right, Operand(zero_reg));
- __ bind(&left_not_zero);
- }
+ // dividend can be the same register as result so save the value of it
+ // for checking overflow.
+ __ Move(scratch, dividend);
- // Check for (kMinInt / -1).
- if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
- Label left_not_min_int;
- __ Branch(&left_not_min_int, ne, left, Operand(kMinInt));
- DeoptimizeIf(eq, instr->environment(), right, Operand(-1));
- __ bind(&left_not_min_int);
+ __ Subu(result, zero_reg, dividend);
+ if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ DeoptimizeIf(eq, instr->environment(), result, Operand(zero_reg));
+ }
+
+ // Dividing by -1 is basically negation, unless we overflow.
+ __ Xor(scratch, scratch, result);
+ if (divisor == -1) {
+ if (instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) {
+ DeoptimizeIf(ge, instr->environment(), scratch, Operand(zero_reg));
}
+ return;
+ }
- __ mfhi(remainder);
- __ mflo(result);
+ // If the negation could not overflow, simply shifting is OK.
+ if (!instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) {
+ __ sra(result, result, shift);
+ return;
+ }
- // We performed a truncating division. Correct the result if necessary.
- __ Branch(&done, eq, remainder, Operand(zero_reg), USE_DELAY_SLOT);
- __ Xor(scratch , remainder, Operand(right));
- __ Branch(&done, ge, scratch, Operand(zero_reg));
- __ Subu(result, result, Operand(1));
- __ bind(&done);
+ Label no_overflow, done;
+ __ Branch(&no_overflow, lt, scratch, Operand(zero_reg));
+ __ li(result, Operand(kMinInt / divisor));
+ __ Branch(&done);
+ __ bind(&no_overflow);
+ __ sra(result, result, shift);
+ __ bind(&done);
+}
+
+
+void LCodeGen::DoFlooringDivByConstI(LFlooringDivByConstI* instr) {
+ Register dividend = ToRegister(instr->dividend());
+ int32_t divisor = instr->divisor();
+ Register result = ToRegister(instr->result());
+ ASSERT(!dividend.is(result));
+
+ if (divisor == 0) {
+ DeoptimizeIf(al, instr->environment());
+ return;
+ }
+
+ // Check for (0 / -x) that will produce negative zero.
+ HMathFloorOfDiv* hdiv = instr->hydrogen();
+ if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
+ DeoptimizeIf(eq, instr->environment(), dividend, Operand(zero_reg));
+ }
+
+ // Easy case: We need no dynamic check for the dividend and the flooring
+ // division is the same as the truncating division.
+ if ((divisor > 0 && !hdiv->CheckFlag(HValue::kLeftCanBeNegative)) ||
+ (divisor < 0 && !hdiv->CheckFlag(HValue::kLeftCanBePositive))) {
+ __ TruncatingDiv(result, dividend, Abs(divisor));
+ if (divisor < 0) __ Subu(result, zero_reg, result);
+ return;
+ }
+
+ // In the general case we may need to adjust before and after the truncating
+ // division to get a flooring division.
+ Register temp = ToRegister(instr->temp());
+ ASSERT(!temp.is(dividend) && !temp.is(result));
+ Label needs_adjustment, done;
+ __ Branch(&needs_adjustment, divisor > 0 ? lt : gt,
+ dividend, Operand(zero_reg));
+ __ TruncatingDiv(result, dividend, Abs(divisor));
+ if (divisor < 0) __ Subu(result, zero_reg, result);
+ __ jmp(&done);
+ __ bind(&needs_adjustment);
+ __ Addu(temp, dividend, Operand(divisor > 0 ? 1 : -1));
+ __ TruncatingDiv(result, temp, Abs(divisor));
+ if (divisor < 0) __ Subu(result, zero_reg, result);
+ __ Subu(result, result, Operand(1));
+ __ bind(&done);
+}
+
+
+// TODO(svenpanne) Refactor this to avoid code duplication with DoDivI.
+void LCodeGen::DoFlooringDivI(LFlooringDivI* instr) {
+ HBinaryOperation* hdiv = instr->hydrogen();
+ Register dividend = ToRegister(instr->dividend());
+ Register divisor = ToRegister(instr->divisor());
+ const Register result = ToRegister(instr->result());
+
+ // On MIPS div is asynchronous - it will run in the background while we
+ // check for special cases.
+ __ div(dividend, divisor);
+
+ // Check for x / 0.
+ if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) {
+ DeoptimizeIf(eq, instr->environment(), divisor, Operand(zero_reg));
}
+
+ // Check for (0 / -x) that will produce negative zero.
+ if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ Label left_not_zero;
+ __ Branch(&left_not_zero, ne, dividend, Operand(zero_reg));
+ DeoptimizeIf(lt, instr->environment(), divisor, Operand(zero_reg));
+ __ bind(&left_not_zero);
+ }
+
+ // Check for (kMinInt / -1).
+ if (hdiv->CheckFlag(HValue::kCanOverflow) &&
+ !hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) {
+ Label left_not_min_int;
+ __ Branch(&left_not_min_int, ne, dividend, Operand(kMinInt));
+ DeoptimizeIf(eq, instr->environment(), divisor, Operand(-1));
+ __ bind(&left_not_min_int);
+ }
+
+ // We performed a truncating division. Correct the result if necessary.
+ Label done;
+ Register remainder = scratch0();
+ __ mfhi(remainder);
+ __ mflo(result);
+ __ Branch(&done, eq, remainder, Operand(zero_reg), USE_DELAY_SLOT);
+ __ Xor(remainder, remainder, Operand(divisor));
+ __ Branch(&done, ge, remainder, Operand(zero_reg));
+ __ Subu(result, result, Operand(1));
+ __ bind(&done);
}
@@ -1505,7 +1575,7 @@ void LCodeGen::DoBitI(LBitI* instr) {
Register result = ToRegister(instr->result());
Operand right(no_reg);
- if (right_op->IsStackSlot() || right_op->IsArgument()) {
+ if (right_op->IsStackSlot()) {
right = Operand(EmitLoadRegister(right_op, at));
} else {
ASSERT(right_op->IsRegister() || right_op->IsConstantOperand());
@@ -1627,7 +1697,7 @@ void LCodeGen::DoSubI(LSubI* instr) {
bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
if (!can_overflow) {
- if (right->IsStackSlot() || right->IsArgument()) {
+ if (right->IsStackSlot()) {
Register right_reg = EmitLoadRegister(right, at);
__ Subu(ToRegister(result), ToRegister(left), Operand(right_reg));
} else {
@@ -1637,9 +1707,7 @@ void LCodeGen::DoSubI(LSubI* instr) {
} else { // can_overflow.
Register overflow = scratch0();
Register scratch = scratch1();
- if (right->IsStackSlot() ||
- right->IsArgument() ||
- right->IsConstantOperand()) {
+ if (right->IsStackSlot() || right->IsConstantOperand()) {
Register right_reg = EmitLoadRegister(right, scratch);
__ SubuAndCheckForOverflow(ToRegister(result),
ToRegister(left),
@@ -1683,9 +1751,9 @@ void LCodeGen::DoConstantE(LConstantE* instr) {
void LCodeGen::DoConstantT(LConstantT* instr) {
- Handle<Object> value = instr->value(isolate());
+ Handle<Object> object = instr->value(isolate());
AllowDeferredHandleDereference smi_check;
- __ li(ToRegister(instr->result()), value);
+ __ li(ToRegister(instr->result()), object);
}
@@ -1696,41 +1764,6 @@ void LCodeGen::DoMapEnumLength(LMapEnumLength* instr) {
}
-void LCodeGen::DoElementsKind(LElementsKind* instr) {
- Register result = ToRegister(instr->result());
- Register input = ToRegister(instr->value());
-
- // Load map into |result|.
- __ lw(result, FieldMemOperand(input, HeapObject::kMapOffset));
- // Load the map's "bit field 2" into |result|. We only need the first byte,
- // but the following bit field extraction takes care of that anyway.
- __ lbu(result, FieldMemOperand(result, Map::kBitField2Offset));
- // Retrieve elements_kind from bit field 2.
- __ Ext(result, result, Map::kElementsKindShift, Map::kElementsKindBitCount);
-}
-
-
-void LCodeGen::DoValueOf(LValueOf* instr) {
- Register input = ToRegister(instr->value());
- Register result = ToRegister(instr->result());
- Register map = ToRegister(instr->temp());
- Label done;
-
- if (!instr->hydrogen()->value()->IsHeapObject()) {
- // If the object is a smi return the object.
- __ Move(result, input);
- __ JumpIfSmi(input, &done);
- }
-
- // If the object is not a value type, return the object.
- __ GetObjectType(input, map, map);
- __ Branch(&done, ne, map, Operand(JS_VALUE_TYPE));
- __ lw(result, FieldMemOperand(input, JSValue::kValueOffset));
-
- __ bind(&done);
-}
-
-
void LCodeGen::DoDateField(LDateField* instr) {
Register object = ToRegister(instr->date());
Register result = ToRegister(instr->result());
@@ -1847,17 +1880,6 @@ void LCodeGen::DoSeqStringSetChar(LSeqStringSetChar* instr) {
}
-void LCodeGen::DoThrow(LThrow* instr) {
- __ push(ToRegister(instr->value()));
- ASSERT(ToRegister(instr->context()).is(cp));
- CallRuntime(Runtime::kThrow, 1, instr);
-
- if (FLAG_debug_code) {
- __ stop("Unreachable code.");
- }
-}
-
-
void LCodeGen::DoAddI(LAddI* instr) {
LOperand* left = instr->left();
LOperand* right = instr->right();
@@ -1865,7 +1887,7 @@ void LCodeGen::DoAddI(LAddI* instr) {
bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
if (!can_overflow) {
- if (right->IsStackSlot() || right->IsArgument()) {
+ if (right->IsStackSlot()) {
Register right_reg = EmitLoadRegister(right, at);
__ Addu(ToRegister(result), ToRegister(left), Operand(right_reg));
} else {
@@ -1876,7 +1898,6 @@ void LCodeGen::DoAddI(LAddI* instr) {
Register overflow = scratch0();
Register scratch = scratch1();
if (right->IsStackSlot() ||
- right->IsArgument() ||
right->IsConstantOperand()) {
Register right_reg = EmitLoadRegister(right, scratch);
__ AdduAndCheckForOverflow(ToRegister(result),
@@ -1904,20 +1925,19 @@ void LCodeGen::DoMathMinMax(LMathMinMax* instr) {
Condition condition = (operation == HMathMinMax::kMathMin) ? le : ge;
if (instr->hydrogen()->representation().IsSmiOrInteger32()) {
Register left_reg = ToRegister(left);
- Operand right_op = (right->IsRegister() || right->IsConstantOperand())
- ? ToOperand(right)
- : Operand(EmitLoadRegister(right, at));
+ Register right_reg = EmitLoadRegister(right, scratch0());
Register result_reg = ToRegister(instr->result());
Label return_right, done;
- if (!result_reg.is(left_reg)) {
- __ Branch(&return_right, NegateCondition(condition), left_reg, right_op);
- __ mov(result_reg, left_reg);
- __ Branch(&done);
+ Register scratch = scratch1();
+ __ Slt(scratch, left_reg, Operand(right_reg));
+ if (condition == ge) {
+ __ Movz(result_reg, left_reg, scratch);
+ __ Movn(result_reg, right_reg, scratch);
+ } else {
+ ASSERT(condition == le);
+ __ Movn(result_reg, left_reg, scratch);
+ __ Movz(result_reg, right_reg, scratch);
}
- __ Branch(&done, condition, left_reg, right_op);
- __ bind(&return_right);
- __ Addu(result_reg, zero_reg, right_op);
- __ bind(&done);
} else {
ASSERT(instr->hydrogen()->representation().IsDouble());
FPURegister left_reg = ToDoubleRegister(left);
@@ -1982,12 +2002,12 @@ void LCodeGen::DoArithmeticD(LArithmeticD* instr) {
__ MultiPush(saved_regs);
__ PrepareCallCFunction(0, 2, scratch0());
- __ SetCallCDoubleArguments(left, right);
+ __ MovToFloatParameters(left, right);
__ CallCFunction(
- ExternalReference::double_fp_operation(Token::MOD, isolate()),
+ ExternalReference::mod_two_doubles_operation(isolate()),
0, 2);
// Move the result in the double result register.
- __ GetCFunctionDoubleResult(result);
+ __ MovFromFloatResult(result);
// Restore saved register.
__ MultiPop(saved_regs);
@@ -2006,8 +2026,8 @@ void LCodeGen::DoArithmeticT(LArithmeticT* instr) {
ASSERT(ToRegister(instr->right()).is(a0));
ASSERT(ToRegister(instr->result()).is(v0));
- BinaryOpICStub stub(instr->op(), NO_OVERWRITE);
- CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
+ BinaryOpICStub stub(isolate(), instr->op(), NO_OVERWRITE);
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
// Other arch use a nop here, to signal that there is no inlined
// patchable code. Mips does not need the nop, since our marker
// instruction (andi zero_reg) will never be used in normal code.
@@ -2263,7 +2283,10 @@ Condition LCodeGen::TokenToCondition(Token::Value op, bool is_unsigned) {
void LCodeGen::DoCompareNumericAndBranch(LCompareNumericAndBranch* instr) {
LOperand* left = instr->left();
LOperand* right = instr->right();
- Condition cond = TokenToCondition(instr->op(), false);
+ bool is_unsigned =
+ instr->hydrogen()->left()->CheckFlag(HInstruction::kUint32) ||
+ instr->hydrogen()->right()->CheckFlag(HInstruction::kUint32);
+ Condition cond = TokenToCondition(instr->op(), is_unsigned);
if (left->IsConstantOperand() && right->IsConstantOperand()) {
// We can statically evaluate the comparison.
@@ -2307,8 +2330,8 @@ void LCodeGen::DoCompareNumericAndBranch(LCompareNumericAndBranch* instr) {
cmp_left = ToRegister(right);
cmp_right = Operand(value);
}
- // We transposed the operands. Reverse the condition.
- cond = ReverseCondition(cond);
+ // We commuted the operands, so commute the condition.
+ cond = CommuteCondition(cond);
} else {
cmp_left = ToRegister(left);
cmp_right = Operand(ToRegister(right));
@@ -2429,7 +2452,7 @@ void LCodeGen::DoIsStringAndBranch(LIsStringAndBranch* instr) {
Register temp1 = ToRegister(instr->temp());
SmiCheck check_needed =
- instr->hydrogen()->value()->IsHeapObject()
+ instr->hydrogen()->value()->type().IsHeapObject()
? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
Condition true_cond =
EmitIsString(reg, temp1, instr->FalseLabel(chunk_), check_needed);
@@ -2450,7 +2473,7 @@ void LCodeGen::DoIsUndetectableAndBranch(LIsUndetectableAndBranch* instr) {
Register input = ToRegister(instr->value());
Register temp = ToRegister(instr->temp());
- if (!instr->hydrogen()->value()->IsHeapObject()) {
+ if (!instr->hydrogen()->value()->type().IsHeapObject()) {
__ JumpIfSmi(input, instr->FalseLabel(chunk_));
}
__ lw(temp, FieldMemOperand(input, HeapObject::kMapOffset));
@@ -2517,7 +2540,7 @@ void LCodeGen::DoHasInstanceTypeAndBranch(LHasInstanceTypeAndBranch* instr) {
Register scratch = scratch0();
Register input = ToRegister(instr->value());
- if (!instr->hydrogen()->value()->IsHeapObject()) {
+ if (!instr->hydrogen()->value()->type().IsHeapObject()) {
__ JumpIfSmi(input, instr->FalseLabel(chunk_));
}
@@ -2648,8 +2671,8 @@ void LCodeGen::DoInstanceOf(LInstanceOf* instr) {
Register result = ToRegister(instr->result());
ASSERT(result.is(v0));
- InstanceofStub stub(InstanceofStub::kArgsInRegisters);
- CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
+ InstanceofStub stub(isolate(), InstanceofStub::kArgsInRegisters);
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
__ Branch(&true_label, eq, result, Operand(zero_reg));
__ li(result, Operand(factory()->false_value()));
@@ -2706,10 +2729,10 @@ void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) {
Handle<Cell> cell = factory()->NewCell(factory()->the_hole_value());
__ li(at, Operand(Handle<Object>(cell)));
__ lw(at, FieldMemOperand(at, PropertyCell::kValueOffset));
- __ Branch(&cache_miss, ne, map, Operand(at));
+ __ BranchShort(&cache_miss, ne, map, Operand(at));
// We use Factory::the_hole_value() on purpose instead of loading from the
// root array to force relocation to be able to later patch
- // with true or false.
+ // with true or false. The distance from map check has to be constant.
__ li(result, Operand(factory()->the_hole_value()), CONSTANT_SIZE);
__ Branch(&done);
@@ -2749,7 +2772,7 @@ void LCodeGen::DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
flags | InstanceofStub::kCallSiteInlineCheck);
flags = static_cast<InstanceofStub::Flags>(
flags | InstanceofStub::kReturnTrueFalseObject);
- InstanceofStub stub(flags);
+ InstanceofStub stub(isolate(), flags);
PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
LoadContextFromDeferred(instr->context());
@@ -2769,7 +2792,7 @@ void LCodeGen::DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
__ li(temp, Operand(delta * kPointerSize), CONSTANT_SIZE);
__ StoreToSafepointRegisterSlot(temp, temp);
}
- CallCodeGeneric(stub.GetCode(isolate()),
+ CallCodeGeneric(stub.GetCode(),
RelocInfo::CODE_TARGET,
instr,
RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
@@ -2861,10 +2884,9 @@ void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) {
ASSERT(ToRegister(instr->result()).is(v0));
__ li(a2, Operand(instr->name()));
- RelocInfo::Mode mode = instr->for_typeof() ? RelocInfo::CODE_TARGET
- : RelocInfo::CODE_TARGET_CONTEXT;
- Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
- CallCode(ic, mode, instr);
+ ContextualMode mode = instr->for_typeof() ? NOT_CONTEXTUAL : CONTEXTUAL;
+ Handle<Code> ic = LoadIC::initialize_stub(isolate(), mode);
+ CallCode(ic, RelocInfo::CODE_TARGET, instr);
}
@@ -2893,18 +2915,6 @@ void LCodeGen::DoStoreGlobalCell(LStoreGlobalCell* instr) {
}
-void LCodeGen::DoStoreGlobalGeneric(LStoreGlobalGeneric* instr) {
- ASSERT(ToRegister(instr->context()).is(cp));
- ASSERT(ToRegister(instr->global_object()).is(a1));
- ASSERT(ToRegister(instr->value()).is(a0));
-
- __ li(a2, Operand(instr->name()));
- Handle<Code> ic = (instr->strict_mode_flag() == kStrictMode)
- ? isolate()->builtins()->StoreIC_Initialize_Strict()
- : isolate()->builtins()->StoreIC_Initialize();
- CallCode(ic, RelocInfo::CODE_TARGET_CONTEXT, instr);
-}
-
void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) {
Register context = ToRegister(instr->context());
@@ -2948,7 +2958,7 @@ void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) {
__ sw(value, target);
if (instr->hydrogen()->NeedsWriteBarrier()) {
SmiCheck check_needed =
- instr->hydrogen()->value()->IsHeapObject()
+ instr->hydrogen()->value()->type().IsHeapObject()
? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
__ RecordWriteContextSlot(context,
target.offset(),
@@ -2999,7 +3009,7 @@ void LCodeGen::DoLoadNamedGeneric(LLoadNamedGeneric* instr) {
// Name is always in a2.
__ li(a2, Operand(instr->name()));
- Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
+ Handle<Code> ic = LoadIC::initialize_stub(isolate(), NOT_CONTEXTUAL);
CallCode(ic, RelocInfo::CODE_TARGET, instr);
}
@@ -3053,15 +3063,6 @@ void LCodeGen::DoLoadRoot(LLoadRoot* instr) {
}
-void LCodeGen::DoLoadExternalArrayPointer(
- LLoadExternalArrayPointer* instr) {
- Register to_reg = ToRegister(instr->result());
- Register from_reg = ToRegister(instr->object());
- __ lw(to_reg, FieldMemOperand(from_reg,
- ExternalArray::kExternalPointerOffset));
-}
-
-
void LCodeGen::DoAccessArgumentsAt(LAccessArgumentsAt* instr) {
Register arguments = ToRegister(instr->arguments());
Register result = ToRegister(instr->result());
@@ -3124,10 +3125,13 @@ void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) {
int element_size_shift = ElementsKindToShiftSize(elements_kind);
int shift_size = (instr->hydrogen()->key()->representation().IsSmi())
? (element_size_shift - kSmiTagSize) : element_size_shift;
- int additional_offset = instr->additional_index() << element_size_shift;
+ int base_offset = instr->base_offset();
- if (elements_kind == EXTERNAL_FLOAT_ELEMENTS ||
- elements_kind == EXTERNAL_DOUBLE_ELEMENTS) {
+ if (elements_kind == EXTERNAL_FLOAT32_ELEMENTS ||
+ elements_kind == FLOAT32_ELEMENTS ||
+ elements_kind == EXTERNAL_FLOAT64_ELEMENTS ||
+ elements_kind == FLOAT64_ELEMENTS) {
+ int base_offset = instr->base_offset();
FPURegister result = ToDoubleRegister(instr->result());
if (key_is_constant) {
__ Addu(scratch0(), external_pointer, constant_key << element_size_shift);
@@ -3135,44 +3139,53 @@ void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) {
__ sll(scratch0(), key, shift_size);
__ Addu(scratch0(), scratch0(), external_pointer);
}
- if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
- __ lwc1(result, MemOperand(scratch0(), additional_offset));
+ if (elements_kind == EXTERNAL_FLOAT32_ELEMENTS ||
+ elements_kind == FLOAT32_ELEMENTS) {
+ __ lwc1(result, MemOperand(scratch0(), base_offset));
__ cvt_d_s(result, result);
} else { // i.e. elements_kind == EXTERNAL_DOUBLE_ELEMENTS
- __ ldc1(result, MemOperand(scratch0(), additional_offset));
+ __ ldc1(result, MemOperand(scratch0(), base_offset));
}
} else {
Register result = ToRegister(instr->result());
MemOperand mem_operand = PrepareKeyedOperand(
key, external_pointer, key_is_constant, constant_key,
- element_size_shift, shift_size,
- instr->additional_index(), additional_offset);
+ element_size_shift, shift_size, base_offset);
switch (elements_kind) {
- case EXTERNAL_BYTE_ELEMENTS:
+ case EXTERNAL_INT8_ELEMENTS:
+ case INT8_ELEMENTS:
__ lb(result, mem_operand);
break;
- case EXTERNAL_PIXEL_ELEMENTS:
- case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
+ case EXTERNAL_UINT8_CLAMPED_ELEMENTS:
+ case EXTERNAL_UINT8_ELEMENTS:
+ case UINT8_ELEMENTS:
+ case UINT8_CLAMPED_ELEMENTS:
__ lbu(result, mem_operand);
break;
- case EXTERNAL_SHORT_ELEMENTS:
+ case EXTERNAL_INT16_ELEMENTS:
+ case INT16_ELEMENTS:
__ lh(result, mem_operand);
break;
- case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
+ case EXTERNAL_UINT16_ELEMENTS:
+ case UINT16_ELEMENTS:
__ lhu(result, mem_operand);
break;
- case EXTERNAL_INT_ELEMENTS:
+ case EXTERNAL_INT32_ELEMENTS:
+ case INT32_ELEMENTS:
__ lw(result, mem_operand);
break;
- case EXTERNAL_UNSIGNED_INT_ELEMENTS:
+ case EXTERNAL_UINT32_ELEMENTS:
+ case UINT32_ELEMENTS:
__ lw(result, mem_operand);
if (!instr->hydrogen()->CheckFlag(HInstruction::kUint32)) {
DeoptimizeIf(Ugreater_equal, instr->environment(),
result, Operand(0x80000000));
}
break;
- case EXTERNAL_FLOAT_ELEMENTS:
- case EXTERNAL_DOUBLE_ELEMENTS:
+ case FLOAT32_ELEMENTS:
+ case FLOAT64_ELEMENTS:
+ case EXTERNAL_FLOAT32_ELEMENTS:
+ case EXTERNAL_FLOAT64_ELEMENTS:
case FAST_DOUBLE_ELEMENTS:
case FAST_ELEMENTS:
case FAST_SMI_ELEMENTS:
@@ -3180,7 +3193,7 @@ void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) {
case FAST_HOLEY_ELEMENTS:
case FAST_HOLEY_SMI_ELEMENTS:
case DICTIONARY_ELEMENTS:
- case NON_STRICT_ARGUMENTS_ELEMENTS:
+ case SLOPPY_ARGUMENTS_ELEMENTS:
UNREACHABLE();
break;
}
@@ -3197,15 +3210,13 @@ void LCodeGen::DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr) {
int element_size_shift = ElementsKindToShiftSize(FAST_DOUBLE_ELEMENTS);
- int base_offset =
- FixedDoubleArray::kHeaderSize - kHeapObjectTag +
- (instr->additional_index() << element_size_shift);
+ int base_offset = instr->base_offset();
if (key_is_constant) {
int constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
if (constant_key & 0xF0000000) {
Abort(kArrayIndexConstantValueTooBig);
}
- base_offset += constant_key << element_size_shift;
+ base_offset += constant_key * kDoubleSize;
}
__ Addu(scratch, elements, Operand(base_offset));
@@ -3220,7 +3231,7 @@ void LCodeGen::DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr) {
__ ldc1(result, MemOperand(scratch));
if (instr->hydrogen()->RequiresHoleCheck()) {
- __ lw(scratch, MemOperand(scratch, sizeof(kHoleNanLower32)));
+ __ lw(scratch, MemOperand(scratch, kHoleNanUpper32Offset));
DeoptimizeIf(eq, instr->environment(), scratch, Operand(kHoleNanUpper32));
}
}
@@ -3231,12 +3242,11 @@ void LCodeGen::DoLoadKeyedFixedArray(LLoadKeyed* instr) {
Register result = ToRegister(instr->result());
Register scratch = scratch0();
Register store_base = scratch;
- int offset = 0;
+ int offset = instr->base_offset();
if (instr->key()->IsConstantOperand()) {
LConstantOperand* const_operand = LConstantOperand::cast(instr->key());
- offset = FixedArray::OffsetOfElementAt(ToInteger32(const_operand) +
- instr->additional_index());
+ offset += ToInteger32(const_operand) * kPointerSize;
store_base = elements;
} else {
Register key = ToRegister(instr->key());
@@ -3251,9 +3261,8 @@ void LCodeGen::DoLoadKeyedFixedArray(LLoadKeyed* instr) {
__ sll(scratch, key, kPointerSizeLog2);
__ addu(scratch, elements, scratch);
}
- offset = FixedArray::OffsetOfElementAt(instr->additional_index());
}
- __ lw(result, FieldMemOperand(store_base, offset));
+ __ lw(result, MemOperand(store_base, offset));
// Check for the hole value.
if (instr->hydrogen()->RequiresHoleCheck()) {
@@ -3269,7 +3278,7 @@ void LCodeGen::DoLoadKeyedFixedArray(LLoadKeyed* instr) {
void LCodeGen::DoLoadKeyed(LLoadKeyed* instr) {
- if (instr->is_external()) {
+ if (instr->is_typed_elements()) {
DoLoadKeyedExternalArray(instr);
} else if (instr->hydrogen()->representation().IsDouble()) {
DoLoadKeyedFixedDoubleArray(instr);
@@ -3285,19 +3294,12 @@ MemOperand LCodeGen::PrepareKeyedOperand(Register key,
int constant_key,
int element_size,
int shift_size,
- int additional_index,
- int additional_offset) {
- if (additional_index != 0 && !key_is_constant) {
- additional_index *= 1 << (element_size - shift_size);
- __ Addu(scratch0(), key, Operand(additional_index));
- }
-
+ int base_offset) {
if (key_is_constant) {
- return MemOperand(base,
- (constant_key << element_size) + additional_offset);
+ return MemOperand(base, (constant_key << element_size) + base_offset);
}
- if (additional_index == 0) {
+ if (base_offset == 0) {
if (shift_size >= 0) {
__ sll(scratch0(), key, shift_size);
__ Addu(scratch0(), base, scratch0());
@@ -3311,14 +3313,14 @@ MemOperand LCodeGen::PrepareKeyedOperand(Register key,
}
if (shift_size >= 0) {
- __ sll(scratch0(), scratch0(), shift_size);
+ __ sll(scratch0(), key, shift_size);
__ Addu(scratch0(), base, scratch0());
- return MemOperand(scratch0());
+ return MemOperand(scratch0(), base_offset);
} else {
ASSERT_EQ(-1, shift_size);
- __ srl(scratch0(), scratch0(), 1);
+ __ sra(scratch0(), key, 1);
__ Addu(scratch0(), base, scratch0());
- return MemOperand(scratch0());
+ return MemOperand(scratch0(), base_offset);
}
}
@@ -3387,19 +3389,21 @@ void LCodeGen::DoWrapReceiver(LWrapReceiver* instr) {
// passed unchanged to builtins and strict-mode functions.
Label global_object, result_in_receiver;
- // Do not transform the receiver to object for strict mode
- // functions.
- __ lw(scratch,
- FieldMemOperand(function, JSFunction::kSharedFunctionInfoOffset));
- __ lw(scratch,
- FieldMemOperand(scratch, SharedFunctionInfo::kCompilerHintsOffset));
+ if (!instr->hydrogen()->known_function()) {
+ // Do not transform the receiver to object for strict mode
+ // functions.
+ __ lw(scratch,
+ FieldMemOperand(function, JSFunction::kSharedFunctionInfoOffset));
+ __ lw(scratch,
+ FieldMemOperand(scratch, SharedFunctionInfo::kCompilerHintsOffset));
- // Do not transform the receiver to object for builtins.
- int32_t strict_mode_function_mask =
- 1 << (SharedFunctionInfo::kStrictModeFunction + kSmiTagSize);
- int32_t native_mask = 1 << (SharedFunctionInfo::kNative + kSmiTagSize);
- __ And(scratch, scratch, Operand(strict_mode_function_mask | native_mask));
- __ Branch(&result_in_receiver, ne, scratch, Operand(zero_reg));
+ // Do not transform the receiver to object for builtins.
+ int32_t strict_mode_function_mask =
+ 1 << (SharedFunctionInfo::kStrictModeFunction + kSmiTagSize);
+ int32_t native_mask = 1 << (SharedFunctionInfo::kNative + kSmiTagSize);
+ __ And(scratch, scratch, Operand(strict_mode_function_mask | native_mask));
+ __ Branch(&result_in_receiver, ne, scratch, Operand(zero_reg));
+ }
// Normal function. Replace undefined or null with global receiver.
__ LoadRoot(scratch, Heap::kNullValueRootIndex);
@@ -3414,14 +3418,15 @@ void LCodeGen::DoWrapReceiver(LWrapReceiver* instr) {
__ GetObjectType(receiver, scratch, scratch);
DeoptimizeIf(lt, instr->environment(),
scratch, Operand(FIRST_SPEC_OBJECT_TYPE));
- __ Branch(&result_in_receiver);
+ __ Branch(&result_in_receiver);
__ bind(&global_object);
-
- __ lw(result, MemOperand(fp, StandardFrameConstants::kContextOffset));
- __ lw(result, ContextOperand(result, Context::GLOBAL_OBJECT_INDEX));
+ __ lw(result, FieldMemOperand(function, JSFunction::kContextOffset));
+ __ lw(result,
+ ContextOperand(result, Context::GLOBAL_OBJECT_INDEX));
__ lw(result,
- FieldMemOperand(result, JSGlobalObject::kGlobalReceiverOffset));
+ FieldMemOperand(result, GlobalObject::kGlobalReceiverOffset));
+
if (result.is(receiver)) {
__ bind(&result_in_receiver);
} else {
@@ -3478,8 +3483,7 @@ void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
// The number of arguments is stored in receiver which is a0, as expected
// by InvokeFunction.
ParameterCount actual(receiver);
- __ InvokeFunction(function, actual, CALL_FUNCTION,
- safepoint_generator, CALL_AS_METHOD);
+ __ InvokeFunction(function, actual, CALL_FUNCTION, safepoint_generator);
}
@@ -3517,35 +3521,13 @@ void LCodeGen::DoContext(LContext* instr) {
}
-void LCodeGen::DoOuterContext(LOuterContext* instr) {
- Register context = ToRegister(instr->context());
- Register result = ToRegister(instr->result());
- __ lw(result,
- MemOperand(context, Context::SlotOffset(Context::PREVIOUS_INDEX)));
-}
-
-
void LCodeGen::DoDeclareGlobals(LDeclareGlobals* instr) {
ASSERT(ToRegister(instr->context()).is(cp));
__ li(scratch0(), instr->hydrogen()->pairs());
__ li(scratch1(), Operand(Smi::FromInt(instr->hydrogen()->flags())));
// The context is the first argument.
__ Push(cp, scratch0(), scratch1());
- CallRuntime(Runtime::kDeclareGlobals, 3, instr);
-}
-
-
-void LCodeGen::DoGlobalObject(LGlobalObject* instr) {
- Register context = ToRegister(instr->context());
- Register result = ToRegister(instr->result());
- __ lw(result, ContextOperand(context, Context::GLOBAL_OBJECT_INDEX));
-}
-
-
-void LCodeGen::DoGlobalReceiver(LGlobalReceiver* instr) {
- Register global = ToRegister(instr->global_object());
- Register result = ToRegister(instr->result());
- __ lw(result, FieldMemOperand(global, GlobalObject::kGlobalReceiverOffset));
+ CallRuntime(Runtime::kHiddenDeclareGlobals, 3, instr);
}
@@ -3553,7 +3535,6 @@ void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
int formal_parameter_count,
int arity,
LInstruction* instr,
- CallKind call_kind,
A1State a1_state) {
bool dont_adapt_arguments =
formal_parameter_count == SharedFunctionInfo::kDontAdaptArgumentsSentinel;
@@ -3577,7 +3558,6 @@ void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
}
// Invoke function.
- __ SetCallKind(t1, call_kind);
__ lw(at, FieldMemOperand(a1, JSFunction::kCodeEntryOffset));
__ Call(at);
@@ -3587,24 +3567,11 @@ void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
ParameterCount count(arity);
ParameterCount expected(formal_parameter_count);
- __ InvokeFunction(
- function, expected, count, CALL_FUNCTION, generator, call_kind);
+ __ InvokeFunction(function, expected, count, CALL_FUNCTION, generator);
}
}
-void LCodeGen::DoCallConstantFunction(LCallConstantFunction* instr) {
- ASSERT(ToRegister(instr->result()).is(v0));
- __ mov(a0, v0);
- CallKnownFunction(instr->hydrogen()->function(),
- instr->hydrogen()->formal_parameter_count(),
- instr->arity(),
- instr,
- CALL_AS_METHOD,
- A1_UNINITIALIZED);
-}
-
-
void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LMathAbs* instr) {
ASSERT(instr->context() != NULL);
ASSERT(ToRegister(instr->context()).is(cp));
@@ -3649,7 +3616,7 @@ void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LMathAbs* instr) {
// Slow case: Call the runtime system to do the number allocation.
__ bind(&slow);
- CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, instr,
+ CallRuntimeFromDeferred(Runtime::kHiddenAllocateHeapNumber, 0, instr,
instr->context());
// Set the pointer to the new heap number in tmp.
if (!tmp1.is(v0))
@@ -3869,22 +3836,23 @@ void LCodeGen::DoPower(LPower* instr) {
ASSERT(ToDoubleRegister(instr->result()).is(f0));
if (exponent_type.IsSmi()) {
- MathPowStub stub(MathPowStub::TAGGED);
+ MathPowStub stub(isolate(), MathPowStub::TAGGED);
__ CallStub(&stub);
} else if (exponent_type.IsTagged()) {
Label no_deopt;
__ JumpIfSmi(a2, &no_deopt);
__ lw(t3, FieldMemOperand(a2, HeapObject::kMapOffset));
+ __ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
DeoptimizeIf(ne, instr->environment(), t3, Operand(at));
__ bind(&no_deopt);
- MathPowStub stub(MathPowStub::TAGGED);
+ MathPowStub stub(isolate(), MathPowStub::TAGGED);
__ CallStub(&stub);
} else if (exponent_type.IsInteger32()) {
- MathPowStub stub(MathPowStub::INTEGER);
+ MathPowStub stub(isolate(), MathPowStub::INTEGER);
__ CallStub(&stub);
} else {
ASSERT(exponent_type.IsDouble());
- MathPowStub stub(MathPowStub::DOUBLE);
+ MathPowStub stub(isolate(), MathPowStub::DOUBLE);
__ CallStub(&stub);
}
}
@@ -3905,46 +3873,18 @@ void LCodeGen::DoMathExp(LMathExp* instr) {
void LCodeGen::DoMathLog(LMathLog* instr) {
- ASSERT(ToDoubleRegister(instr->result()).is(f4));
- // Set the context register to a GC-safe fake value. Clobbering it is
- // OK because this instruction is marked as a call.
- __ mov(cp, zero_reg);
- TranscendentalCacheStub stub(TranscendentalCache::LOG,
- TranscendentalCacheStub::UNTAGGED);
- CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
-}
-
-
-void LCodeGen::DoMathTan(LMathTan* instr) {
- ASSERT(ToDoubleRegister(instr->result()).is(f4));
- // Set the context register to a GC-safe fake value. Clobbering it is
- // OK because this instruction is marked as a call.
- __ mov(cp, zero_reg);
- TranscendentalCacheStub stub(TranscendentalCache::TAN,
- TranscendentalCacheStub::UNTAGGED);
- CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
-}
-
-
-void LCodeGen::DoMathCos(LMathCos* instr) {
- ASSERT(ToDoubleRegister(instr->result()).is(f4));
- // Set the context register to a GC-safe fake value. Clobbering it is
- // OK because this instruction is marked as a call.
- __ mov(cp, zero_reg);
- TranscendentalCacheStub stub(TranscendentalCache::COS,
- TranscendentalCacheStub::UNTAGGED);
- CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
+ __ PrepareCallCFunction(0, 1, scratch0());
+ __ MovToFloatParameter(ToDoubleRegister(instr->value()));
+ __ CallCFunction(ExternalReference::math_log_double_function(isolate()),
+ 0, 1);
+ __ MovFromFloatResult(ToDoubleRegister(instr->result()));
}
-void LCodeGen::DoMathSin(LMathSin* instr) {
- ASSERT(ToDoubleRegister(instr->result()).is(f4));
- // Set the context register to a GC-safe fake value. Clobbering it is
- // OK because this instruction is marked as a call.
- __ mov(cp, zero_reg);
- TranscendentalCacheStub stub(TranscendentalCache::SIN,
- TranscendentalCacheStub::UNTAGGED);
- CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
+void LCodeGen::DoMathClz32(LMathClz32* instr) {
+ Register input = ToRegister(instr->value());
+ Register result = ToRegister(instr->result());
+ __ Clz(result, input);
}
@@ -3958,79 +3898,66 @@ void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) {
LPointerMap* pointers = instr->pointer_map();
SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
ParameterCount count(instr->arity());
- __ InvokeFunction(a1, count, CALL_FUNCTION, generator, CALL_AS_METHOD);
+ __ InvokeFunction(a1, count, CALL_FUNCTION, generator);
} else {
CallKnownFunction(known_function,
instr->hydrogen()->formal_parameter_count(),
instr->arity(),
instr,
- CALL_AS_METHOD,
A1_CONTAINS_TARGET);
}
}
-void LCodeGen::DoCallKeyed(LCallKeyed* instr) {
- ASSERT(ToRegister(instr->context()).is(cp));
+void LCodeGen::DoCallWithDescriptor(LCallWithDescriptor* instr) {
ASSERT(ToRegister(instr->result()).is(v0));
- int arity = instr->arity();
- Handle<Code> ic =
- isolate()->stub_cache()->ComputeKeyedCallInitialize(arity);
- CallCode(ic, RelocInfo::CODE_TARGET, instr);
-}
-
-
-void LCodeGen::DoCallNamed(LCallNamed* instr) {
- ASSERT(ToRegister(instr->context()).is(cp));
- ASSERT(ToRegister(instr->result()).is(v0));
+ LPointerMap* pointers = instr->pointer_map();
+ SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
- int arity = instr->arity();
- RelocInfo::Mode mode = RelocInfo::CODE_TARGET;
- Handle<Code> ic =
- isolate()->stub_cache()->ComputeCallInitialize(arity, mode);
- __ li(a2, Operand(instr->name()));
- CallCode(ic, mode, instr);
+ if (instr->target()->IsConstantOperand()) {
+ LConstantOperand* target = LConstantOperand::cast(instr->target());
+ Handle<Code> code = Handle<Code>::cast(ToHandle(target));
+ generator.BeforeCall(__ CallSize(code, RelocInfo::CODE_TARGET));
+ __ Call(code, RelocInfo::CODE_TARGET);
+ } else {
+ ASSERT(instr->target()->IsRegister());
+ Register target = ToRegister(instr->target());
+ generator.BeforeCall(__ CallSize(target));
+ __ Addu(target, target, Operand(Code::kHeaderSize - kHeapObjectTag));
+ __ Call(target);
+ }
+ generator.AfterCall();
}
-void LCodeGen::DoCallFunction(LCallFunction* instr) {
- ASSERT(ToRegister(instr->context()).is(cp));
+void LCodeGen::DoCallJSFunction(LCallJSFunction* instr) {
ASSERT(ToRegister(instr->function()).is(a1));
ASSERT(ToRegister(instr->result()).is(v0));
- int arity = instr->arity();
- CallFunctionStub stub(arity, NO_CALL_FUNCTION_FLAGS);
- if (instr->hydrogen()->IsTailCall()) {
- if (NeedsEagerFrame()) __ mov(sp, fp);
- __ Jump(stub.GetCode(isolate()), RelocInfo::CODE_TARGET);
- } else {
- CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
+ if (instr->hydrogen()->pass_argument_count()) {
+ __ li(a0, Operand(instr->arity()));
}
-}
+ // Change context.
+ __ lw(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
-void LCodeGen::DoCallGlobal(LCallGlobal* instr) {
- ASSERT(ToRegister(instr->context()).is(cp));
- ASSERT(ToRegister(instr->result()).is(v0));
+ // Load the code entry address
+ __ lw(at, FieldMemOperand(a1, JSFunction::kCodeEntryOffset));
+ __ Call(at);
- int arity = instr->arity();
- RelocInfo::Mode mode = RelocInfo::CODE_TARGET_CONTEXT;
- Handle<Code> ic =
- isolate()->stub_cache()->ComputeCallInitialize(arity, mode);
- __ li(a2, Operand(instr->name()));
- CallCode(ic, mode, instr);
+ RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
}
-void LCodeGen::DoCallKnownGlobal(LCallKnownGlobal* instr) {
+void LCodeGen::DoCallFunction(LCallFunction* instr) {
+ ASSERT(ToRegister(instr->context()).is(cp));
+ ASSERT(ToRegister(instr->function()).is(a1));
ASSERT(ToRegister(instr->result()).is(v0));
- CallKnownFunction(instr->hydrogen()->target(),
- instr->hydrogen()->formal_parameter_count(),
- instr->arity(),
- instr,
- CALL_AS_FUNCTION,
- A1_UNINITIALIZED);
+
+ int arity = instr->arity();
+ CallFunctionStub stub(isolate(), arity, instr->hydrogen()->function_flags());
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
}
@@ -4041,10 +3968,9 @@ void LCodeGen::DoCallNew(LCallNew* instr) {
__ li(a0, Operand(instr->arity()));
// No cell in a2 for construct type feedback in optimized code
- Handle<Object> undefined_value(isolate()->factory()->undefined_value());
- __ li(a2, Operand(undefined_value));
- CallConstructStub stub(NO_CALL_FUNCTION_FLAGS);
- CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr);
+ __ LoadRoot(a2, Heap::kUndefinedValueRootIndex);
+ CallConstructStub stub(isolate(), NO_CALL_CONSTRUCTOR_FLAGS);
+ CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
}
@@ -4054,17 +3980,16 @@ void LCodeGen::DoCallNewArray(LCallNewArray* instr) {
ASSERT(ToRegister(instr->result()).is(v0));
__ li(a0, Operand(instr->arity()));
- __ li(a2, Operand(instr->hydrogen()->property_cell()));
+ __ LoadRoot(a2, Heap::kUndefinedValueRootIndex);
ElementsKind kind = instr->hydrogen()->elements_kind();
AllocationSiteOverrideMode override_mode =
(AllocationSite::GetMode(kind) == TRACK_ALLOCATION_SITE)
? DISABLE_ALLOCATION_SITES
: DONT_OVERRIDE;
- ContextCheckMode context_mode = CONTEXT_CHECK_NOT_REQUIRED;
if (instr->arity() == 0) {
- ArrayNoArgumentConstructorStub stub(kind, context_mode, override_mode);
- CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr);
+ ArrayNoArgumentConstructorStub stub(isolate(), kind, override_mode);
+ CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
} else if (instr->arity() == 1) {
Label done;
if (IsFastPackedElementsKind(kind)) {
@@ -4075,19 +4000,20 @@ void LCodeGen::DoCallNewArray(LCallNewArray* instr) {
__ Branch(&packed_case, eq, t1, Operand(zero_reg));
ElementsKind holey_kind = GetHoleyElementsKind(kind);
- ArraySingleArgumentConstructorStub stub(holey_kind, context_mode,
+ ArraySingleArgumentConstructorStub stub(isolate(),
+ holey_kind,
override_mode);
- CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr);
+ CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
__ jmp(&done);
__ bind(&packed_case);
}
- ArraySingleArgumentConstructorStub stub(kind, context_mode, override_mode);
- CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr);
+ ArraySingleArgumentConstructorStub stub(isolate(), kind, override_mode);
+ CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
__ bind(&done);
} else {
- ArrayNArgumentsConstructorStub stub(kind, context_mode, override_mode);
- CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr);
+ ArrayNArgumentsConstructorStub stub(isolate(), kind, override_mode);
+ CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
}
}
@@ -4135,46 +4061,38 @@ void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
return;
}
- Handle<Map> transition = instr->transition();
+ __ AssertNotSmi(object);
- if (FLAG_track_heap_object_fields && representation.IsHeapObject()) {
- Register value = ToRegister(instr->value());
- if (!instr->hydrogen()->value()->type().IsHeapObject()) {
- __ SmiTst(value, scratch);
- DeoptimizeIf(eq, instr->environment(), scratch, Operand(zero_reg));
- }
- } else if (FLAG_track_double_fields && representation.IsDouble()) {
- ASSERT(transition.is_null());
+ ASSERT(!representation.IsSmi() ||
+ !instr->value()->IsConstantOperand() ||
+ IsSmi(LConstantOperand::cast(instr->value())));
+ if (representation.IsDouble()) {
ASSERT(access.IsInobject());
+ ASSERT(!instr->hydrogen()->has_transition());
ASSERT(!instr->hydrogen()->NeedsWriteBarrier());
DoubleRegister value = ToDoubleRegister(instr->value());
__ sdc1(value, FieldMemOperand(object, offset));
return;
}
- if (!transition.is_null()) {
+ if (instr->hydrogen()->has_transition()) {
+ Handle<Map> transition = instr->hydrogen()->transition_map();
+ AddDeprecationDependency(transition);
__ li(scratch, Operand(transition));
__ sw(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
if (instr->hydrogen()->NeedsWriteBarrierForMap()) {
Register temp = ToRegister(instr->temp());
// Update the write barrier for the map field.
- __ RecordWriteField(object,
- HeapObject::kMapOffset,
- scratch,
- temp,
- GetRAState(),
- kSaveFPRegs,
- OMIT_REMEMBERED_SET,
- OMIT_SMI_CHECK);
+ __ RecordWriteForMap(object,
+ scratch,
+ temp,
+ GetRAState(),
+ kSaveFPRegs);
}
}
// Do the store.
Register value = ToRegister(instr->value());
- ASSERT(!object.is(value));
- SmiCheck check_needed =
- instr->hydrogen()->value()->IsHeapObject()
- ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
if (access.IsInobject()) {
MemOperand operand = FieldMemOperand(object, offset);
__ Store(value, operand, representation);
@@ -4187,7 +4105,8 @@ void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
GetRAState(),
kSaveFPRegs,
EMIT_REMEMBERED_SET,
- check_needed);
+ instr->hydrogen()->SmiCheckForWriteBarrier(),
+ instr->hydrogen()->PointersToHereCheckForValue());
}
} else {
__ lw(scratch, FieldMemOperand(object, JSObject::kPropertiesOffset));
@@ -4203,7 +4122,8 @@ void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
GetRAState(),
kSaveFPRegs,
EMIT_REMEMBERED_SET,
- check_needed);
+ instr->hydrogen()->SmiCheckForWriteBarrier(),
+ instr->hydrogen()->PointersToHereCheckForValue());
}
}
}
@@ -4216,49 +4136,30 @@ void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) {
// Name is always in a2.
__ li(a2, Operand(instr->name()));
- Handle<Code> ic = (instr->strict_mode_flag() == kStrictMode)
- ? isolate()->builtins()->StoreIC_Initialize_Strict()
- : isolate()->builtins()->StoreIC_Initialize();
+ Handle<Code> ic = StoreIC::initialize_stub(isolate(), instr->strict_mode());
CallCode(ic, RelocInfo::CODE_TARGET, instr);
}
-void LCodeGen::ApplyCheckIf(Condition condition,
- LBoundsCheck* check,
- Register src1,
- const Operand& src2) {
- if (FLAG_debug_code && check->hydrogen()->skip_check()) {
+void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) {
+ Condition cc = instr->hydrogen()->allow_equality() ? hi : hs;
+ Operand operand(0);
+ Register reg;
+ if (instr->index()->IsConstantOperand()) {
+ operand = ToOperand(instr->index());
+ reg = ToRegister(instr->length());
+ cc = CommuteCondition(cc);
+ } else {
+ reg = ToRegister(instr->index());
+ operand = ToOperand(instr->length());
+ }
+ if (FLAG_debug_code && instr->hydrogen()->skip_check()) {
Label done;
- __ Branch(&done, NegateCondition(condition), src1, src2);
+ __ Branch(&done, NegateCondition(cc), reg, operand);
__ stop("eliminated bounds check failed");
__ bind(&done);
} else {
- DeoptimizeIf(condition, check->environment(), src1, src2);
- }
-}
-
-
-void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) {
- if (instr->hydrogen()->skip_check()) return;
-
- Condition condition = instr->hydrogen()->allow_equality() ? hi : hs;
- if (instr->index()->IsConstantOperand()) {
- int constant_index =
- ToInteger32(LConstantOperand::cast(instr->index()));
- if (instr->hydrogen()->length()->representation().IsSmi()) {
- __ li(at, Operand(Smi::FromInt(constant_index)));
- } else {
- __ li(at, Operand(constant_index));
- }
- ApplyCheckIf(condition,
- instr,
- at,
- Operand(ToRegister(instr->length())));
- } else {
- ApplyCheckIf(condition,
- instr,
- ToRegister(instr->index()),
- Operand(ToRegister(instr->length())));
+ DeoptimizeIf(cc, instr->environment(), reg, operand);
}
}
@@ -4280,10 +4181,12 @@ void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) {
int element_size_shift = ElementsKindToShiftSize(elements_kind);
int shift_size = (instr->hydrogen()->key()->representation().IsSmi())
? (element_size_shift - kSmiTagSize) : element_size_shift;
- int additional_offset = instr->additional_index() << element_size_shift;
+ int base_offset = instr->base_offset();
- if (elements_kind == EXTERNAL_FLOAT_ELEMENTS ||
- elements_kind == EXTERNAL_DOUBLE_ELEMENTS) {
+ if (elements_kind == EXTERNAL_FLOAT32_ELEMENTS ||
+ elements_kind == FLOAT32_ELEMENTS ||
+ elements_kind == EXTERNAL_FLOAT64_ELEMENTS ||
+ elements_kind == FLOAT64_ELEMENTS) {
Register address = scratch0();
FPURegister value(ToDoubleRegister(instr->value()));
if (key_is_constant) {
@@ -4298,34 +4201,44 @@ void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) {
__ Addu(address, external_pointer, address);
}
- if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
+ if (elements_kind == EXTERNAL_FLOAT32_ELEMENTS ||
+ elements_kind == FLOAT32_ELEMENTS) {
__ cvt_s_d(double_scratch0(), value);
- __ swc1(double_scratch0(), MemOperand(address, additional_offset));
- } else { // i.e. elements_kind == EXTERNAL_DOUBLE_ELEMENTS
- __ sdc1(value, MemOperand(address, additional_offset));
+ __ swc1(double_scratch0(), MemOperand(address, base_offset));
+ } else { // Storing doubles, not floats.
+ __ sdc1(value, MemOperand(address, base_offset));
}
} else {
Register value(ToRegister(instr->value()));
MemOperand mem_operand = PrepareKeyedOperand(
key, external_pointer, key_is_constant, constant_key,
element_size_shift, shift_size,
- instr->additional_index(), additional_offset);
+ base_offset);
switch (elements_kind) {
- case EXTERNAL_PIXEL_ELEMENTS:
- case EXTERNAL_BYTE_ELEMENTS:
- case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
+ case EXTERNAL_UINT8_CLAMPED_ELEMENTS:
+ case EXTERNAL_INT8_ELEMENTS:
+ case EXTERNAL_UINT8_ELEMENTS:
+ case UINT8_ELEMENTS:
+ case UINT8_CLAMPED_ELEMENTS:
+ case INT8_ELEMENTS:
__ sb(value, mem_operand);
break;
- case EXTERNAL_SHORT_ELEMENTS:
- case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
+ case EXTERNAL_INT16_ELEMENTS:
+ case EXTERNAL_UINT16_ELEMENTS:
+ case INT16_ELEMENTS:
+ case UINT16_ELEMENTS:
__ sh(value, mem_operand);
break;
- case EXTERNAL_INT_ELEMENTS:
- case EXTERNAL_UNSIGNED_INT_ELEMENTS:
+ case EXTERNAL_INT32_ELEMENTS:
+ case EXTERNAL_UINT32_ELEMENTS:
+ case INT32_ELEMENTS:
+ case UINT32_ELEMENTS:
__ sw(value, mem_operand);
break;
- case EXTERNAL_FLOAT_ELEMENTS:
- case EXTERNAL_DOUBLE_ELEMENTS:
+ case FLOAT32_ELEMENTS:
+ case FLOAT64_ELEMENTS:
+ case EXTERNAL_FLOAT32_ELEMENTS:
+ case EXTERNAL_FLOAT64_ELEMENTS:
case FAST_DOUBLE_ELEMENTS:
case FAST_ELEMENTS:
case FAST_SMI_ELEMENTS:
@@ -4333,7 +4246,7 @@ void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) {
case FAST_HOLEY_ELEMENTS:
case FAST_HOLEY_SMI_ELEMENTS:
case DICTIONARY_ELEMENTS:
- case NON_STRICT_ARGUMENTS_ELEMENTS:
+ case SLOPPY_ARGUMENTS_ELEMENTS:
UNREACHABLE();
break;
}
@@ -4347,6 +4260,7 @@ void LCodeGen::DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr) {
Register scratch = scratch0();
DoubleRegister double_scratch = double_scratch0();
bool key_is_constant = instr->key()->IsConstantOperand();
+ int base_offset = instr->base_offset();
Label not_nan, done;
// Calculate the effective address of the slot in the array to store the
@@ -4358,13 +4272,11 @@ void LCodeGen::DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr) {
Abort(kArrayIndexConstantValueTooBig);
}
__ Addu(scratch, elements,
- Operand((constant_key << element_size_shift) +
- FixedDoubleArray::kHeaderSize - kHeapObjectTag));
+ Operand((constant_key << element_size_shift) + base_offset));
} else {
int shift_size = (instr->hydrogen()->key()->representation().IsSmi())
? (element_size_shift - kSmiTagSize) : element_size_shift;
- __ Addu(scratch, elements,
- Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag));
+ __ Addu(scratch, elements, Operand(base_offset));
__ sll(at, ToRegister(instr->key()), shift_size);
__ Addu(scratch, scratch, at);
}
@@ -4377,16 +4289,14 @@ void LCodeGen::DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr) {
// Only load canonical NaN if the comparison above set the overflow.
__ bind(&is_nan);
- __ Move(double_scratch,
- FixedDoubleArray::canonical_not_the_hole_nan_as_double());
- __ sdc1(double_scratch, MemOperand(scratch, instr->additional_index() <<
- element_size_shift));
+ __ LoadRoot(at, Heap::kNanValueRootIndex);
+ __ ldc1(double_scratch, FieldMemOperand(at, HeapNumber::kValueOffset));
+ __ sdc1(double_scratch, MemOperand(scratch, 0));
__ Branch(&done);
}
__ bind(&not_nan);
- __ sdc1(value, MemOperand(scratch, instr->additional_index() <<
- element_size_shift));
+ __ sdc1(value, MemOperand(scratch, 0));
__ bind(&done);
}
@@ -4398,14 +4308,13 @@ void LCodeGen::DoStoreKeyedFixedArray(LStoreKeyed* instr) {
: no_reg;
Register scratch = scratch0();
Register store_base = scratch;
- int offset = 0;
+ int offset = instr->base_offset();
// Do the store.
if (instr->key()->IsConstantOperand()) {
ASSERT(!instr->hydrogen()->NeedsWriteBarrier());
LConstantOperand* const_operand = LConstantOperand::cast(instr->key());
- offset = FixedArray::OffsetOfElementAt(ToInteger32(const_operand) +
- instr->additional_index());
+ offset += ToInteger32(const_operand) * kPointerSize;
store_base = elements;
} else {
// Even though the HLoadKeyed instruction forces the input
@@ -4419,30 +4328,30 @@ void LCodeGen::DoStoreKeyedFixedArray(LStoreKeyed* instr) {
__ sll(scratch, key, kPointerSizeLog2);
__ addu(scratch, elements, scratch);
}
- offset = FixedArray::OffsetOfElementAt(instr->additional_index());
}
- __ sw(value, FieldMemOperand(store_base, offset));
+ __ sw(value, MemOperand(store_base, offset));
if (instr->hydrogen()->NeedsWriteBarrier()) {
SmiCheck check_needed =
- instr->hydrogen()->value()->IsHeapObject()
+ instr->hydrogen()->value()->type().IsHeapObject()
? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
// Compute address of modified element and store it into key register.
- __ Addu(key, store_base, Operand(offset - kHeapObjectTag));
+ __ Addu(key, store_base, Operand(offset));
__ RecordWrite(elements,
key,
value,
GetRAState(),
kSaveFPRegs,
EMIT_REMEMBERED_SET,
- check_needed);
+ check_needed,
+ instr->hydrogen()->PointersToHereCheckForValue());
}
}
void LCodeGen::DoStoreKeyed(LStoreKeyed* instr) {
// By cases: external, fast double
- if (instr->is_external()) {
+ if (instr->is_typed_elements()) {
DoStoreKeyedExternalArray(instr);
} else if (instr->hydrogen()->value()->representation().IsDouble()) {
DoStoreKeyedFixedDoubleArray(instr);
@@ -4458,7 +4367,7 @@ void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) {
ASSERT(ToRegister(instr->key()).is(a1));
ASSERT(ToRegister(instr->value()).is(a0));
- Handle<Code> ic = (instr->strict_mode_flag() == kStrictMode)
+ Handle<Code> ic = (instr->strict_mode() == STRICT)
? isolate()->builtins()->KeyedStoreIC_Initialize_Strict()
: isolate()->builtins()->KeyedStoreIC_Initialize();
CallCode(ic, RelocInfo::CODE_TARGET, instr);
@@ -4483,18 +4392,22 @@ void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) {
__ li(new_map_reg, Operand(to_map));
__ sw(new_map_reg, FieldMemOperand(object_reg, HeapObject::kMapOffset));
// Write barrier.
- __ RecordWriteField(object_reg, HeapObject::kMapOffset, new_map_reg,
- scratch, GetRAState(), kDontSaveFPRegs);
+ __ RecordWriteForMap(object_reg,
+ new_map_reg,
+ scratch,
+ GetRAState(),
+ kDontSaveFPRegs);
} else {
+ ASSERT(object_reg.is(a0));
ASSERT(ToRegister(instr->context()).is(cp));
PushSafepointRegistersScope scope(
this, Safepoint::kWithRegistersAndDoubles);
- __ mov(a0, object_reg);
__ li(a1, Operand(to_map));
- TransitionElementsKindStub stub(from_kind, to_kind);
+ bool is_js_array = from_map->instance_type() == JS_ARRAY_TYPE;
+ TransitionElementsKindStub stub(isolate(), from_kind, to_kind, is_js_array);
__ CallStub(&stub);
RecordSafepointWithRegistersAndDoubles(
- instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
+ instr->pointer_map(), 0, Safepoint::kLazyDeopt);
}
__ bind(&not_applicable);
}
@@ -4513,18 +4426,12 @@ void LCodeGen::DoTrapAllocationMemento(LTrapAllocationMemento* instr) {
void LCodeGen::DoStringAdd(LStringAdd* instr) {
ASSERT(ToRegister(instr->context()).is(cp));
- if (FLAG_new_string_add) {
- ASSERT(ToRegister(instr->left()).is(a1));
- ASSERT(ToRegister(instr->right()).is(a0));
- NewStringAddStub stub(instr->hydrogen()->flags(),
- isolate()->heap()->GetPretenureMode());
- CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
- } else {
- __ push(ToRegister(instr->left()));
- __ push(ToRegister(instr->right()));
- StringAddStub stub(instr->hydrogen()->flags());
- CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
- }
+ ASSERT(ToRegister(instr->left()).is(a1));
+ ASSERT(ToRegister(instr->right()).is(a0));
+ StringAddStub stub(isolate(),
+ instr->hydrogen()->flags(),
+ instr->hydrogen()->pretenure_flag());
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
}
@@ -4575,7 +4482,7 @@ void LCodeGen::DoDeferredStringCharCodeAt(LStringCharCodeAt* instr) {
__ SmiTag(index);
__ push(index);
}
- CallRuntimeFromDeferred(Runtime::kStringCharCodeAt, 2, instr,
+ CallRuntimeFromDeferred(Runtime::kHiddenStringCharCodeAt, 2, instr,
instr->context());
__ AssertSmi(v0);
__ SmiUntag(v0);
@@ -4651,22 +4558,6 @@ void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) {
}
-void LCodeGen::DoInteger32ToSmi(LInteger32ToSmi* instr) {
- LOperand* input = instr->value();
- LOperand* output = instr->result();
- Register scratch = scratch0();
-
- ASSERT(output->IsRegister());
- if (!instr->hydrogen()->value()->HasRange() ||
- !instr->hydrogen()->value()->range()->IsInSmiRange()) {
- __ SmiTagCheckOverflow(ToRegister(output), ToRegister(input), scratch);
- DeoptimizeIf(lt, instr->environment(), scratch, Operand(zero_reg));
- } else {
- __ SmiTag(ToRegister(output), ToRegister(input));
- }
-}
-
-
void LCodeGen::DoUint32ToDouble(LUint32ToDouble* instr) {
LOperand* input = instr->value();
LOperand* output = instr->result();
@@ -4677,28 +4568,17 @@ void LCodeGen::DoUint32ToDouble(LUint32ToDouble* instr) {
}
-void LCodeGen::DoUint32ToSmi(LUint32ToSmi* instr) {
- LOperand* input = instr->value();
- LOperand* output = instr->result();
- if (!instr->hydrogen()->value()->HasRange() ||
- !instr->hydrogen()->value()->range()->IsInSmiRange()) {
- Register scratch = scratch0();
- __ And(scratch, ToRegister(input), Operand(0xc0000000));
- DeoptimizeIf(ne, instr->environment(), scratch, Operand(zero_reg));
- }
- __ SmiTag(ToRegister(output), ToRegister(input));
-}
-
-
void LCodeGen::DoNumberTagI(LNumberTagI* instr) {
class DeferredNumberTagI V8_FINAL : public LDeferredCode {
public:
DeferredNumberTagI(LCodeGen* codegen, LNumberTagI* instr)
: LDeferredCode(codegen), instr_(instr) { }
virtual void Generate() V8_OVERRIDE {
- codegen()->DoDeferredNumberTagI(instr_,
- instr_->value(),
- SIGNED_INT32);
+ codegen()->DoDeferredNumberTagIU(instr_,
+ instr_->value(),
+ instr_->temp1(),
+ instr_->temp2(),
+ SIGNED_INT32);
}
virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
private:
@@ -4722,9 +4602,11 @@ void LCodeGen::DoNumberTagU(LNumberTagU* instr) {
DeferredNumberTagU(LCodeGen* codegen, LNumberTagU* instr)
: LDeferredCode(codegen), instr_(instr) { }
virtual void Generate() V8_OVERRIDE {
- codegen()->DoDeferredNumberTagI(instr_,
- instr_->value(),
- UNSIGNED_INT32);
+ codegen()->DoDeferredNumberTagIU(instr_,
+ instr_->value(),
+ instr_->temp1(),
+ instr_->temp2(),
+ UNSIGNED_INT32);
}
virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
private:
@@ -4741,18 +4623,19 @@ void LCodeGen::DoNumberTagU(LNumberTagU* instr) {
}
-void LCodeGen::DoDeferredNumberTagI(LInstruction* instr,
- LOperand* value,
- IntegerSignedness signedness) {
- Label slow;
+void LCodeGen::DoDeferredNumberTagIU(LInstruction* instr,
+ LOperand* value,
+ LOperand* temp1,
+ LOperand* temp2,
+ IntegerSignedness signedness) {
+ Label done, slow;
Register src = ToRegister(value);
Register dst = ToRegister(instr->result());
+ Register tmp1 = scratch0();
+ Register tmp2 = ToRegister(temp1);
+ Register tmp3 = ToRegister(temp2);
DoubleRegister dbl_scratch = double_scratch0();
- // Preserve the value of all registers.
- PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
-
- Label done;
if (signedness == SIGNED_INT32) {
// There was overflow, so bits 30 and 31 of the original integer
// disagree. Try to allocate a heap number in new space and store
@@ -4769,37 +4652,41 @@ void LCodeGen::DoDeferredNumberTagI(LInstruction* instr,
}
if (FLAG_inline_new) {
- __ LoadRoot(scratch0(), Heap::kHeapNumberMapRootIndex);
- __ AllocateHeapNumber(t1, a3, t0, scratch0(), &slow, DONT_TAG_RESULT);
- __ Move(dst, t1);
+ __ LoadRoot(tmp3, Heap::kHeapNumberMapRootIndex);
+ __ AllocateHeapNumber(dst, tmp1, tmp2, tmp3, &slow, DONT_TAG_RESULT);
__ Branch(&done);
}
// Slow case: Call the runtime system to do the number allocation.
__ bind(&slow);
+ {
+ // TODO(3095996): Put a valid pointer value in the stack slot where the
+ // result register is stored, as this register is in the pointer map, but
+ // contains an integer value.
+ __ mov(dst, zero_reg);
+
+ // Preserve the value of all registers.
+ PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
+
+ // NumberTagI and NumberTagD use the context from the frame, rather than
+ // the environment's HContext or HInlinedContext value.
+ // They only call Runtime::kHiddenAllocateHeapNumber.
+ // The corresponding HChange instructions are added in a phase that does
+ // not have easy access to the local context.
+ __ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ __ CallRuntimeSaveDoubles(Runtime::kHiddenAllocateHeapNumber);
+ RecordSafepointWithRegisters(
+ instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
+ __ Subu(v0, v0, kHeapObjectTag);
+ __ StoreToSafepointRegisterSlot(v0, dst);
+ }
- // TODO(3095996): Put a valid pointer value in the stack slot where the result
- // register is stored, as this register is in the pointer map, but contains an
- // integer value.
- __ StoreToSafepointRegisterSlot(zero_reg, dst);
- // NumberTagI and NumberTagD use the context from the frame, rather than
- // the environment's HContext or HInlinedContext value.
- // They only call Runtime::kAllocateHeapNumber.
- // The corresponding HChange instructions are added in a phase that does
- // not have easy access to the local context.
- __ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
- __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
- RecordSafepointWithRegisters(
- instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
- __ Move(dst, v0);
- __ Subu(dst, dst, kHeapObjectTag);
// Done. Put the value in dbl_scratch into the value of the allocated heap
// number.
__ bind(&done);
__ sdc1(dbl_scratch, MemOperand(dst, HeapNumber::kValueOffset));
__ Addu(dst, dst, kHeapObjectTag);
- __ StoreToSafepointRegisterSlot(dst, dst);
}
@@ -4848,11 +4735,11 @@ void LCodeGen::DoDeferredNumberTagD(LNumberTagD* instr) {
PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
// NumberTagI and NumberTagD use the context from the frame, rather than
// the environment's HContext or HInlinedContext value.
- // They only call Runtime::kAllocateHeapNumber.
+ // They only call Runtime::kHiddenAllocateHeapNumber.
// The corresponding HChange instructions are added in a phase that does
// not have easy access to the local context.
__ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
- __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
+ __ CallRuntimeSaveDoubles(Runtime::kHiddenAllocateHeapNumber);
RecordSafepointWithRegisters(
instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
__ Subu(v0, v0, kHeapObjectTag);
@@ -4861,8 +4748,21 @@ void LCodeGen::DoDeferredNumberTagD(LNumberTagD* instr) {
void LCodeGen::DoSmiTag(LSmiTag* instr) {
- ASSERT(!instr->hydrogen_value()->CheckFlag(HValue::kCanOverflow));
- __ SmiTag(ToRegister(instr->result()), ToRegister(instr->value()));
+ HChange* hchange = instr->hydrogen();
+ Register input = ToRegister(instr->value());
+ Register output = ToRegister(instr->result());
+ if (hchange->CheckFlag(HValue::kCanOverflow) &&
+ hchange->value()->CheckFlag(HValue::kUint32)) {
+ __ And(at, input, Operand(0xc0000000));
+ DeoptimizeIf(ne, instr->environment(), at, Operand(zero_reg));
+ }
+ if (hchange->CheckFlag(HValue::kCanOverflow) &&
+ !hchange->value()->CheckFlag(HValue::kUint32)) {
+ __ SmiTagCheckOverflow(output, input, at);
+ DeoptimizeIf(lt, instr->environment(), at, Operand(zero_reg));
+ } else {
+ __ SmiTag(output, input);
+ }
}
@@ -4955,8 +4855,9 @@ void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) {
// Performs a truncating conversion of a floating point number as used by
// the JS bitwise operations.
Label no_heap_number, check_bools, check_false;
- __ Branch(&no_heap_number, ne, scratch1, Operand(at)); // HeapNumber map?
- __ mov(scratch2, input_reg);
+ // Check HeapNumber map.
+ __ Branch(USE_DELAY_SLOT, &no_heap_number, ne, scratch1, Operand(at));
+ __ mov(scratch2, input_reg); // In delay slot.
__ TruncateHeapNumberToI(input_reg, scratch2);
__ Branch(&done);
@@ -5143,7 +5044,7 @@ void LCodeGen::DoCheckSmi(LCheckSmi* instr) {
void LCodeGen::DoCheckNonSmi(LCheckNonSmi* instr) {
- if (!instr->hydrogen()->value()->IsHeapObject()) {
+ if (!instr->hydrogen()->value()->type().IsHeapObject()) {
LOperand* input = instr->value();
__ SmiTst(ToRegister(input), at);
DeoptimizeIf(eq, instr->environment(), at, Operand(zero_reg));
@@ -5213,7 +5114,7 @@ void LCodeGen::DoDeferredInstanceMigration(LCheckMaps* instr, Register object) {
PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
__ push(object);
__ mov(cp, zero_reg);
- __ CallRuntimeSaveDoubles(Runtime::kMigrateInstance);
+ __ CallRuntimeSaveDoubles(Runtime::kTryMigrateInstance);
RecordSafepointWithRegisters(
instr->pointer_map(), 1, Safepoint::kNoLazyDeopt);
__ StoreToSafepointRegisterSlot(v0, scratch0());
@@ -5241,7 +5142,14 @@ void LCodeGen::DoCheckMaps(LCheckMaps* instr) {
Register object_;
};
- if (instr->hydrogen()->CanOmitMapChecks()) return;
+ if (instr->hydrogen()->IsStabilityCheck()) {
+ const UniqueSet<Map>* maps = instr->hydrogen()->maps();
+ for (int i = 0; i < maps->size(); ++i) {
+ AddStabilityDependency(maps->at(i).handle());
+ }
+ return;
+ }
+
Register map_reg = scratch0();
LOperand* input = instr->value();
ASSERT(input->IsRegister());
@@ -5249,20 +5157,20 @@ void LCodeGen::DoCheckMaps(LCheckMaps* instr) {
__ lw(map_reg, FieldMemOperand(reg, HeapObject::kMapOffset));
DeferredCheckMaps* deferred = NULL;
- if (instr->hydrogen()->has_migration_target()) {
+ if (instr->hydrogen()->HasMigrationTarget()) {
deferred = new(zone()) DeferredCheckMaps(this, instr, reg);
__ bind(deferred->check_maps());
}
- UniqueSet<Map> map_set = instr->hydrogen()->map_set();
+ const UniqueSet<Map>* maps = instr->hydrogen()->maps();
Label success;
- for (int i = 0; i < map_set.size() - 1; i++) {
- Handle<Map> map = map_set.at(i).handle();
+ for (int i = 0; i < maps->size() - 1; i++) {
+ Handle<Map> map = maps->at(i).handle();
__ CompareMapAndBranch(map_reg, map, &success, eq, &success);
}
- Handle<Map> map = map_set.at(map_set.size() - 1).handle();
+ Handle<Map> map = maps->at(maps->size() - 1).handle();
// Do the CompareMap() directly within the Branch() and DeoptimizeIf().
- if (instr->hydrogen()->has_migration_target()) {
+ if (instr->hydrogen()->HasMigrationTarget()) {
__ Branch(deferred->entry(), ne, map_reg, Operand(map));
} else {
DeoptimizeIf(ne, instr->environment(), map_reg, Operand(map));
@@ -5322,6 +5230,25 @@ void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) {
}
+void LCodeGen::DoDoubleBits(LDoubleBits* instr) {
+ DoubleRegister value_reg = ToDoubleRegister(instr->value());
+ Register result_reg = ToRegister(instr->result());
+ if (instr->hydrogen()->bits() == HDoubleBits::HIGH) {
+ __ FmoveHigh(result_reg, value_reg);
+ } else {
+ __ FmoveLow(result_reg, value_reg);
+ }
+}
+
+
+void LCodeGen::DoConstructDouble(LConstructDouble* instr) {
+ Register hi_reg = ToRegister(instr->hi());
+ Register lo_reg = ToRegister(instr->lo());
+ DoubleRegister result_reg = ToDoubleRegister(instr->result());
+ __ Move(result_reg, lo_reg, hi_reg);
+}
+
+
void LCodeGen::DoAllocate(LAllocate* instr) {
class DeferredAllocate V8_FINAL : public LDeferredCode {
public:
@@ -5411,7 +5338,13 @@ void LCodeGen::DoDeferredAllocate(LAllocate* instr) {
__ push(size);
} else {
int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
- __ Push(Smi::FromInt(size));
+ if (size >= 0 && size <= Smi::kMaxValue) {
+ __ Push(Smi::FromInt(size));
+ } else {
+ // We should never get here at runtime => abort
+ __ stop("invalid allocation size");
+ return;
+ }
}
int flags = AllocateDoubleAlignFlag::encode(
@@ -5429,7 +5362,7 @@ void LCodeGen::DoDeferredAllocate(LAllocate* instr) {
__ Push(Smi::FromInt(flags));
CallRuntimeFromDeferred(
- Runtime::kAllocateInTargetSpace, 2, instr, instr->context());
+ Runtime::kHiddenAllocateInTargetSpace, 2, instr, instr->context());
__ StoreToSafepointRegisterSlot(v0, result);
}
@@ -5463,7 +5396,7 @@ void LCodeGen::DoRegExpLiteral(LRegExpLiteral* instr) {
__ li(t1, Operand(instr->hydrogen()->pattern()));
__ li(t0, Operand(instr->hydrogen()->flags()));
__ Push(t3, t2, t1, t0);
- CallRuntime(Runtime::kMaterializeRegExpLiteral, 4, instr);
+ CallRuntime(Runtime::kHiddenMaterializeRegExpLiteral, 4, instr);
__ mov(a1, v0);
__ bind(&materialized);
@@ -5476,7 +5409,7 @@ void LCodeGen::DoRegExpLiteral(LRegExpLiteral* instr) {
__ bind(&runtime_allocate);
__ li(a0, Operand(Smi::FromInt(size)));
__ Push(a1, a0);
- CallRuntime(Runtime::kAllocateInNewSpace, 1, instr);
+ CallRuntime(Runtime::kHiddenAllocateInNewSpace, 1, instr);
__ pop(a1);
__ bind(&allocated);
@@ -5501,16 +5434,17 @@ void LCodeGen::DoFunctionLiteral(LFunctionLiteral* instr) {
// space for nested functions that don't need literals cloning.
bool pretenure = instr->hydrogen()->pretenure();
if (!pretenure && instr->hydrogen()->has_no_literals()) {
- FastNewClosureStub stub(instr->hydrogen()->language_mode(),
+ FastNewClosureStub stub(isolate(),
+ instr->hydrogen()->strict_mode(),
instr->hydrogen()->is_generator());
__ li(a2, Operand(instr->hydrogen()->shared_info()));
- CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
} else {
__ li(a2, Operand(instr->hydrogen()->shared_info()));
__ li(a1, Operand(pretenure ? factory()->true_value()
: factory()->false_value()));
__ Push(cp, a2, a1);
- CallRuntime(Runtime::kNewClosure, 3, instr);
+ CallRuntime(Runtime::kHiddenNewClosure, 3, instr);
}
}
@@ -5533,8 +5467,8 @@ void LCodeGen::DoTypeofIsAndBranch(LTypeofIsAndBranch* instr) {
instr->FalseLabel(chunk_),
input,
instr->type_literal(),
- cmp1,
- cmp2);
+ &cmp1,
+ &cmp2);
ASSERT(cmp1.is_valid());
ASSERT(!cmp2.is_reg() || cmp2.rm().is_valid());
@@ -5549,22 +5483,23 @@ Condition LCodeGen::EmitTypeofIs(Label* true_label,
Label* false_label,
Register input,
Handle<String> type_name,
- Register& cmp1,
- Operand& cmp2) {
+ Register* cmp1,
+ Operand* cmp2) {
// This function utilizes the delay slot heavily. This is used to load
// values that are always usable without depending on the type of the input
// register.
Condition final_branch_condition = kNoCondition;
Register scratch = scratch0();
- if (type_name->Equals(heap()->number_string())) {
+ Factory* factory = isolate()->factory();
+ if (String::Equals(type_name, factory->number_string())) {
__ JumpIfSmi(input, true_label);
__ lw(input, FieldMemOperand(input, HeapObject::kMapOffset));
__ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
- cmp1 = input;
- cmp2 = Operand(at);
+ *cmp1 = input;
+ *cmp2 = Operand(at);
final_branch_condition = eq;
- } else if (type_name->Equals(heap()->string_string())) {
+ } else if (String::Equals(type_name, factory->string_string())) {
__ JumpIfSmi(input, false_label);
__ GetObjectType(input, input, scratch);
__ Branch(USE_DELAY_SLOT, false_label,
@@ -5573,32 +5508,33 @@ Condition LCodeGen::EmitTypeofIs(Label* true_label,
// other branch.
__ lbu(at, FieldMemOperand(input, Map::kBitFieldOffset));
__ And(at, at, 1 << Map::kIsUndetectable);
- cmp1 = at;
- cmp2 = Operand(zero_reg);
+ *cmp1 = at;
+ *cmp2 = Operand(zero_reg);
final_branch_condition = eq;
- } else if (type_name->Equals(heap()->symbol_string())) {
+ } else if (String::Equals(type_name, factory->symbol_string())) {
__ JumpIfSmi(input, false_label);
__ GetObjectType(input, input, scratch);
- cmp1 = scratch;
- cmp2 = Operand(SYMBOL_TYPE);
+ *cmp1 = scratch;
+ *cmp2 = Operand(SYMBOL_TYPE);
final_branch_condition = eq;
- } else if (type_name->Equals(heap()->boolean_string())) {
+ } else if (String::Equals(type_name, factory->boolean_string())) {
__ LoadRoot(at, Heap::kTrueValueRootIndex);
__ Branch(USE_DELAY_SLOT, true_label, eq, at, Operand(input));
__ LoadRoot(at, Heap::kFalseValueRootIndex);
- cmp1 = at;
- cmp2 = Operand(input);
+ *cmp1 = at;
+ *cmp2 = Operand(input);
final_branch_condition = eq;
- } else if (FLAG_harmony_typeof && type_name->Equals(heap()->null_string())) {
+ } else if (FLAG_harmony_typeof &&
+ String::Equals(type_name, factory->null_string())) {
__ LoadRoot(at, Heap::kNullValueRootIndex);
- cmp1 = at;
- cmp2 = Operand(input);
+ *cmp1 = at;
+ *cmp2 = Operand(input);
final_branch_condition = eq;
- } else if (type_name->Equals(heap()->undefined_string())) {
+ } else if (String::Equals(type_name, factory->undefined_string())) {
__ LoadRoot(at, Heap::kUndefinedValueRootIndex);
__ Branch(USE_DELAY_SLOT, true_label, eq, at, Operand(input));
// The first instruction of JumpIfSmi is an And - it is safe in the delay
@@ -5608,20 +5544,20 @@ Condition LCodeGen::EmitTypeofIs(Label* true_label,
__ lw(input, FieldMemOperand(input, HeapObject::kMapOffset));
__ lbu(at, FieldMemOperand(input, Map::kBitFieldOffset));
__ And(at, at, 1 << Map::kIsUndetectable);
- cmp1 = at;
- cmp2 = Operand(zero_reg);
+ *cmp1 = at;
+ *cmp2 = Operand(zero_reg);
final_branch_condition = ne;
- } else if (type_name->Equals(heap()->function_string())) {
+ } else if (String::Equals(type_name, factory->function_string())) {
STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
__ JumpIfSmi(input, false_label);
__ GetObjectType(input, scratch, input);
__ Branch(true_label, eq, input, Operand(JS_FUNCTION_TYPE));
- cmp1 = input;
- cmp2 = Operand(JS_FUNCTION_PROXY_TYPE);
+ *cmp1 = input;
+ *cmp2 = Operand(JS_FUNCTION_PROXY_TYPE);
final_branch_condition = eq;
- } else if (type_name->Equals(heap()->object_string())) {
+ } else if (String::Equals(type_name, factory->object_string())) {
__ JumpIfSmi(input, false_label);
if (!FLAG_harmony_typeof) {
__ LoadRoot(at, Heap::kNullValueRootIndex);
@@ -5637,13 +5573,13 @@ Condition LCodeGen::EmitTypeofIs(Label* true_label,
// Check for undetectable objects => false.
__ lbu(at, FieldMemOperand(map, Map::kBitFieldOffset));
__ And(at, at, 1 << Map::kIsUndetectable);
- cmp1 = at;
- cmp2 = Operand(zero_reg);
+ *cmp1 = at;
+ *cmp2 = Operand(zero_reg);
final_branch_condition = eq;
} else {
- cmp1 = at;
- cmp2 = Operand(zero_reg); // Set to valid regs, to avoid caller assertion.
+ *cmp1 = at;
+ *cmp2 = Operand(zero_reg); // Set to valid regs, to avoid caller assertion.
__ Branch(false_label);
}
@@ -5680,23 +5616,24 @@ void LCodeGen::EmitIsConstructCall(Register temp1, Register temp2) {
void LCodeGen::EnsureSpaceForLazyDeopt(int space_needed) {
- if (info()->IsStub()) return;
- // Ensure that we have enough space after the previous lazy-bailout
- // instruction for patching the code here.
- int current_pc = masm()->pc_offset();
- if (current_pc < last_lazy_deopt_pc_ + space_needed) {
- int padding_size = last_lazy_deopt_pc_ + space_needed - current_pc;
- ASSERT_EQ(0, padding_size % Assembler::kInstrSize);
- while (padding_size > 0) {
- __ nop();
- padding_size -= Assembler::kInstrSize;
+ if (!info()->IsStub()) {
+ // Ensure that we have enough space after the previous lazy-bailout
+ // instruction for patching the code here.
+ int current_pc = masm()->pc_offset();
+ if (current_pc < last_lazy_deopt_pc_ + space_needed) {
+ int padding_size = last_lazy_deopt_pc_ + space_needed - current_pc;
+ ASSERT_EQ(0, padding_size % Assembler::kInstrSize);
+ while (padding_size > 0) {
+ __ nop();
+ padding_size -= Assembler::kInstrSize;
+ }
}
}
+ last_lazy_deopt_pc_ = masm()->pc_offset();
}
void LCodeGen::DoLazyBailout(LLazyBailout* instr) {
- EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
last_lazy_deopt_pc_ = masm()->pc_offset();
ASSERT(instr->HasEnvironment());
LEnvironment* env = instr->environment();
@@ -5733,7 +5670,7 @@ void LCodeGen::DoDummyUse(LDummyUse* instr) {
void LCodeGen::DoDeferredStackCheck(LStackCheck* instr) {
PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
LoadContextFromDeferred(instr->context());
- __ CallRuntimeSaveDoubles(Runtime::kStackGuard);
+ __ CallRuntimeSaveDoubles(Runtime::kHiddenStackGuard);
RecordSafepointWithLazyDeopt(
instr, RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
ASSERT(instr->HasEnvironment());
@@ -5769,11 +5706,7 @@ void LCodeGen::DoStackCheck(LStackCheck* instr) {
CallCode(isolate()->builtins()->StackCheck(),
RelocInfo::CODE_TARGET,
instr);
- EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
- last_lazy_deopt_pc_ = masm()->pc_offset();
__ bind(&done);
- RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
- safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
} else {
ASSERT(instr->hydrogen()->is_backwards_branch());
// Perform stack overflow check if this goto needs it before jumping.
@@ -5782,7 +5715,6 @@ void LCodeGen::DoStackCheck(LStackCheck* instr) {
__ LoadRoot(at, Heap::kStackLimitRootIndex);
__ Branch(deferred_stack_check->entry(), lo, sp, Operand(at));
EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
- last_lazy_deopt_pc_ = masm()->pc_offset();
__ bind(instr->done_label());
deferred_stack_check->SetExit(instr->done_label());
RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
@@ -5874,13 +5806,60 @@ void LCodeGen::DoCheckMapValue(LCheckMapValue* instr) {
}
+void LCodeGen::DoDeferredLoadMutableDouble(LLoadFieldByIndex* instr,
+ Register result,
+ Register object,
+ Register index) {
+ PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
+ __ Push(object, index);
+ __ mov(cp, zero_reg);
+ __ CallRuntimeSaveDoubles(Runtime::kLoadMutableDouble);
+ RecordSafepointWithRegisters(
+ instr->pointer_map(), 2, Safepoint::kNoLazyDeopt);
+ __ StoreToSafepointRegisterSlot(v0, result);
+}
+
+
void LCodeGen::DoLoadFieldByIndex(LLoadFieldByIndex* instr) {
+ class DeferredLoadMutableDouble V8_FINAL : public LDeferredCode {
+ public:
+ DeferredLoadMutableDouble(LCodeGen* codegen,
+ LLoadFieldByIndex* instr,
+ Register result,
+ Register object,
+ Register index)
+ : LDeferredCode(codegen),
+ instr_(instr),
+ result_(result),
+ object_(object),
+ index_(index) {
+ }
+ virtual void Generate() V8_OVERRIDE {
+ codegen()->DoDeferredLoadMutableDouble(instr_, result_, object_, index_);
+ }
+ virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
+ private:
+ LLoadFieldByIndex* instr_;
+ Register result_;
+ Register object_;
+ Register index_;
+ };
+
Register object = ToRegister(instr->object());
Register index = ToRegister(instr->index());
Register result = ToRegister(instr->result());
Register scratch = scratch0();
+ DeferredLoadMutableDouble* deferred;
+ deferred = new(zone()) DeferredLoadMutableDouble(
+ this, instr, result, object, index);
+
Label out_of_object, done;
+
+ __ And(scratch, index, Operand(Smi::FromInt(1)));
+ __ Branch(deferred->entry(), ne, scratch, Operand(zero_reg));
+ __ sra(index, index, 1);
+
__ Branch(USE_DELAY_SLOT, &out_of_object, lt, index, Operand(zero_reg));
__ sll(scratch, index, kPointerSizeLog2 - kSmiTagSize); // In delay slot.
@@ -5896,10 +5875,26 @@ void LCodeGen::DoLoadFieldByIndex(LLoadFieldByIndex* instr) {
__ Subu(scratch, result, scratch);
__ lw(result, FieldMemOperand(scratch,
FixedArray::kHeaderSize - kPointerSize));
+ __ bind(deferred->exit());
__ bind(&done);
}
+void LCodeGen::DoStoreFrameContext(LStoreFrameContext* instr) {
+ Register context = ToRegister(instr->context());
+ __ sw(context, MemOperand(fp, StandardFrameConstants::kContextOffset));
+}
+
+
+void LCodeGen::DoAllocateBlockContext(LAllocateBlockContext* instr) {
+ Handle<ScopeInfo> scope_info = instr->scope_info();
+ __ li(at, scope_info);
+ __ Push(at, ToRegister(instr->function()));
+ CallRuntime(Runtime::kHiddenPushBlockContext, 2, instr);
+ RecordSafepoint(Safepoint::kNoLazyDeopt);
+}
+
+
#undef __
} } // namespace v8::internal
diff --git a/chromium/v8/src/mips/lithium-codegen-mips.h b/chromium/v8/src/mips/lithium-codegen-mips.h
index 71cc34fb8b4..d70c871265c 100644
--- a/chromium/v8/src/mips/lithium-codegen-mips.h
+++ b/chromium/v8/src/mips/lithium-codegen-mips.h
@@ -1,40 +1,17 @@
// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
#ifndef V8_MIPS_LITHIUM_CODEGEN_MIPS_H_
#define V8_MIPS_LITHIUM_CODEGEN_MIPS_H_
-#include "deoptimizer.h"
-#include "mips/lithium-gap-resolver-mips.h"
-#include "mips/lithium-mips.h"
-#include "lithium-codegen.h"
-#include "safepoint-table.h"
-#include "scopes.h"
-#include "v8utils.h"
+#include "src/deoptimizer.h"
+#include "src/mips/lithium-gap-resolver-mips.h"
+#include "src/mips/lithium-mips.h"
+#include "src/lithium-codegen.h"
+#include "src/safepoint-table.h"
+#include "src/scopes.h"
+#include "src/utils.h"
namespace v8 {
namespace internal {
@@ -124,9 +101,11 @@ class LCodeGen: public LCodeGenBase {
void DoDeferredNumberTagD(LNumberTagD* instr);
enum IntegerSignedness { SIGNED_INT32, UNSIGNED_INT32 };
- void DoDeferredNumberTagI(LInstruction* instr,
- LOperand* value,
- IntegerSignedness signedness);
+ void DoDeferredNumberTagIU(LInstruction* instr,
+ LOperand* value,
+ LOperand* temp1,
+ LOperand* temp2,
+ IntegerSignedness signedness);
void DoDeferredTaggedToI(LTaggedToI* instr);
void DoDeferredMathAbsTaggedHeapNumber(LMathAbs* instr);
@@ -138,6 +117,10 @@ class LCodeGen: public LCodeGenBase {
Label* map_check);
void DoDeferredInstanceMigration(LCheckMaps* instr, Register object);
+ void DoDeferredLoadMutableDouble(LLoadFieldByIndex* instr,
+ Register result,
+ Register object,
+ Register index);
// Parallel move support.
void DoParallelMove(LParallelMove* move);
@@ -149,8 +132,7 @@ class LCodeGen: public LCodeGenBase {
int constant_key,
int element_size,
int shift_size,
- int additional_index,
- int additional_offset);
+ int base_offset);
// Emit frame translation commands for an environment.
void WriteTranslation(LEnvironment* environment, Translation* translation);
@@ -161,9 +143,7 @@ class LCodeGen: public LCodeGenBase {
#undef DECLARE_DO
private:
- StrictModeFlag strict_mode_flag() const {
- return info()->is_classic_mode() ? kNonStrictMode : kStrictMode;
- }
+ StrictMode strict_mode() const { return info()->strict_mode(); }
Scope* scope() const { return scope_; }
@@ -182,8 +162,6 @@ class LCodeGen: public LCodeGenBase {
int GetStackSlotCount() const { return chunk()->spill_slot_count(); }
- void Abort(BailoutReason reason);
-
void AddDeferredCode(LDeferredCode* code) { deferred_.Add(code, zone()); }
void SaveCallerDoubles();
@@ -243,7 +221,6 @@ class LCodeGen: public LCodeGenBase {
int formal_parameter_count,
int arity,
LInstruction* instr,
- CallKind call_kind,
A1State a1_state);
void RecordSafepointWithLazyDeopt(LInstruction* instr,
@@ -260,10 +237,6 @@ class LCodeGen: public LCodeGenBase {
LEnvironment* environment,
Register src1 = zero_reg,
const Operand& src2 = Operand(zero_reg));
- void ApplyCheckIf(Condition condition,
- LBoundsCheck* check,
- Register src1 = zero_reg,
- const Operand& src2 = Operand(zero_reg));
void AddToTranslation(LEnvironment* environment,
Translation* translation,
@@ -272,7 +245,6 @@ class LCodeGen: public LCodeGenBase {
bool is_uint32,
int* object_index_pointer,
int* dematerialized_index_pointer);
- void RegisterDependentCodeForEmbeddedMaps(Handle<Code> code);
void PopulateDeoptimizationData(Handle<Code> code);
int DefineDeoptimizationLiteral(Handle<Object> literal);
@@ -344,8 +316,8 @@ class LCodeGen: public LCodeGenBase {
Label* false_label,
Register input,
Handle<String> type_name,
- Register& cmp1,
- Operand& cmp2);
+ Register* cmp1,
+ Operand* cmp2);
// Emits optimized code for %_IsObject(x). Preserves input register.
// Returns the condition on which a final split to
@@ -424,12 +396,20 @@ class LCodeGen: public LCodeGenBase {
codegen_->expected_safepoint_kind_ = kind;
switch (codegen_->expected_safepoint_kind_) {
- case Safepoint::kWithRegisters:
- codegen_->masm_->PushSafepointRegisters();
+ case Safepoint::kWithRegisters: {
+ StoreRegistersStateStub stub1(codegen_->masm_->isolate(),
+ kDontSaveFPRegs);
+ codegen_->masm_->push(ra);
+ codegen_->masm_->CallStub(&stub1);
break;
- case Safepoint::kWithRegistersAndDoubles:
- codegen_->masm_->PushSafepointRegistersAndDoubles();
+ }
+ case Safepoint::kWithRegistersAndDoubles: {
+ StoreRegistersStateStub stub2(codegen_->masm_->isolate(),
+ kSaveFPRegs);
+ codegen_->masm_->push(ra);
+ codegen_->masm_->CallStub(&stub2);
break;
+ }
default:
UNREACHABLE();
}
@@ -439,12 +419,20 @@ class LCodeGen: public LCodeGenBase {
Safepoint::Kind kind = codegen_->expected_safepoint_kind_;
ASSERT((kind & Safepoint::kWithRegisters) != 0);
switch (kind) {
- case Safepoint::kWithRegisters:
- codegen_->masm_->PopSafepointRegisters();
+ case Safepoint::kWithRegisters: {
+ RestoreRegistersStateStub stub1(codegen_->masm_->isolate(),
+ kDontSaveFPRegs);
+ codegen_->masm_->push(ra);
+ codegen_->masm_->CallStub(&stub1);
break;
- case Safepoint::kWithRegistersAndDoubles:
- codegen_->masm_->PopSafepointRegistersAndDoubles();
+ }
+ case Safepoint::kWithRegistersAndDoubles: {
+ RestoreRegistersStateStub stub2(codegen_->masm_->isolate(),
+ kSaveFPRegs);
+ codegen_->masm_->push(ra);
+ codegen_->masm_->CallStub(&stub2);
break;
+ }
default:
UNREACHABLE();
}
diff --git a/chromium/v8/src/mips/lithium-gap-resolver-mips.cc b/chromium/v8/src/mips/lithium-gap-resolver-mips.cc
index 3ee74866c75..6447520c1b3 100644
--- a/chromium/v8/src/mips/lithium-gap-resolver-mips.cc
+++ b/chromium/v8/src/mips/lithium-gap-resolver-mips.cc
@@ -1,34 +1,11 @@
// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#include "mips/lithium-gap-resolver-mips.h"
-#include "mips/lithium-codegen-mips.h"
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#include "src/mips/lithium-gap-resolver-mips.h"
+#include "src/mips/lithium-codegen-mips.h"
namespace v8 {
namespace internal {
diff --git a/chromium/v8/src/mips/lithium-gap-resolver-mips.h b/chromium/v8/src/mips/lithium-gap-resolver-mips.h
index ea1ea3cbbf2..0072e526cb1 100644
--- a/chromium/v8/src/mips/lithium-gap-resolver-mips.h
+++ b/chromium/v8/src/mips/lithium-gap-resolver-mips.h
@@ -1,36 +1,13 @@
// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
#ifndef V8_MIPS_LITHIUM_GAP_RESOLVER_MIPS_H_
#define V8_MIPS_LITHIUM_GAP_RESOLVER_MIPS_H_
-#include "v8.h"
+#include "src/v8.h"
-#include "lithium.h"
+#include "src/lithium.h"
namespace v8 {
namespace internal {
diff --git a/chromium/v8/src/mips/lithium-mips.cc b/chromium/v8/src/mips/lithium-mips.cc
index 0358feeef55..830fc9152df 100644
--- a/chromium/v8/src/mips/lithium-mips.cc
+++ b/chromium/v8/src/mips/lithium-mips.cc
@@ -1,36 +1,13 @@
// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#include "lithium-allocator-inl.h"
-#include "mips/lithium-mips.h"
-#include "mips/lithium-codegen-mips.h"
-#include "hydrogen-osr.h"
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#include "src/lithium-allocator-inl.h"
+#include "src/mips/lithium-mips.h"
+#include "src/mips/lithium-codegen-mips.h"
+#include "src/hydrogen-osr.h"
namespace v8 {
namespace internal {
@@ -261,7 +238,7 @@ void LTypeofIsAndBranch::PrintDataTo(StringStream* stream) {
stream->Add("if typeof ");
value()->PrintTo(stream);
stream->Add(" == \"%s\" then B%d else B%d",
- *hydrogen()->type_literal()->ToCString(),
+ hydrogen()->type_literal()->ToCString().get(),
true_block_id(), false_block_id());
}
@@ -282,7 +259,18 @@ void LInnerAllocatedObject::PrintDataTo(StringStream* stream) {
}
-void LCallConstantFunction::PrintDataTo(StringStream* stream) {
+void LCallJSFunction::PrintDataTo(StringStream* stream) {
+ stream->Add("= ");
+ function()->PrintTo(stream);
+ stream->Add("#%d / ", arity());
+}
+
+
+void LCallWithDescriptor::PrintDataTo(StringStream* stream) {
+ for (int i = 0; i < InputCount(); i++) {
+ InputAt(i)->PrintTo(stream);
+ stream->Add(" ");
+ }
stream->Add("#%d / ", arity());
}
@@ -307,28 +295,6 @@ void LInvokeFunction::PrintDataTo(StringStream* stream) {
}
-void LCallKeyed::PrintDataTo(StringStream* stream) {
- stream->Add("[a2] #%d / ", arity());
-}
-
-
-void LCallNamed::PrintDataTo(StringStream* stream) {
- SmartArrayPointer<char> name_string = name()->ToCString();
- stream->Add("%s #%d / ", *name_string, arity());
-}
-
-
-void LCallGlobal::PrintDataTo(StringStream* stream) {
- SmartArrayPointer<char> name_string = name()->ToCString();
- stream->Add("%s #%d / ", *name_string, arity());
-}
-
-
-void LCallKnownGlobal::PrintDataTo(StringStream* stream) {
- stream->Add("#%d / ", arity());
-}
-
-
void LCallNew::PrintDataTo(StringStream* stream) {
stream->Add("= ");
constructor()->PrintTo(stream);
@@ -365,7 +331,7 @@ void LStoreNamedField::PrintDataTo(StringStream* stream) {
void LStoreNamedGeneric::PrintDataTo(StringStream* stream) {
object()->PrintTo(stream);
stream->Add(".");
- stream->Add(*String::cast(*name())->ToCString());
+ stream->Add(String::cast(*name())->ToCString().get());
stream->Add(" <- ");
value()->PrintTo(stream);
}
@@ -376,7 +342,7 @@ void LLoadKeyed::PrintDataTo(StringStream* stream) {
stream->Add("[");
key()->PrintTo(stream);
if (hydrogen()->IsDehoisted()) {
- stream->Add(" + %d]", additional_index());
+ stream->Add(" + %d]", base_offset());
} else {
stream->Add("]");
}
@@ -388,7 +354,7 @@ void LStoreKeyed::PrintDataTo(StringStream* stream) {
stream->Add("[");
key()->PrintTo(stream);
if (hydrogen()->IsDehoisted()) {
- stream->Add(" + %d] <-", additional_index());
+ stream->Add(" + %d] <-", base_offset());
} else {
stream->Add("] <- ");
}
@@ -462,7 +428,7 @@ LPlatformChunk* LChunkBuilder::Build() {
}
-void LCodeGen::Abort(BailoutReason reason) {
+void LChunkBuilder::Abort(BailoutReason reason) {
info()->set_bailout_reason(reason);
status_ = ABORTED;
}
@@ -568,8 +534,7 @@ LOperand* LChunkBuilder::Use(HValue* value, LUnallocated* operand) {
}
-template<int I, int T>
-LInstruction* LChunkBuilder::Define(LTemplateInstruction<1, I, T>* instr,
+LInstruction* LChunkBuilder::Define(LTemplateResultInstruction<1>* instr,
LUnallocated* result) {
result->set_virtual_register(current_instruction_->id());
instr->set_result(result);
@@ -577,40 +542,35 @@ LInstruction* LChunkBuilder::Define(LTemplateInstruction<1, I, T>* instr,
}
-template<int I, int T>
LInstruction* LChunkBuilder::DefineAsRegister(
- LTemplateInstruction<1, I, T>* instr) {
+ LTemplateResultInstruction<1>* instr) {
return Define(instr,
new(zone()) LUnallocated(LUnallocated::MUST_HAVE_REGISTER));
}
-template<int I, int T>
LInstruction* LChunkBuilder::DefineAsSpilled(
- LTemplateInstruction<1, I, T>* instr, int index) {
+ LTemplateResultInstruction<1>* instr, int index) {
return Define(instr,
new(zone()) LUnallocated(LUnallocated::FIXED_SLOT, index));
}
-template<int I, int T>
LInstruction* LChunkBuilder::DefineSameAsFirst(
- LTemplateInstruction<1, I, T>* instr) {
+ LTemplateResultInstruction<1>* instr) {
return Define(instr,
new(zone()) LUnallocated(LUnallocated::SAME_AS_FIRST_INPUT));
}
-template<int I, int T>
LInstruction* LChunkBuilder::DefineFixed(
- LTemplateInstruction<1, I, T>* instr, Register reg) {
+ LTemplateResultInstruction<1>* instr, Register reg) {
return Define(instr, ToUnallocated(reg));
}
-template<int I, int T>
LInstruction* LChunkBuilder::DefineFixedDouble(
- LTemplateInstruction<1, I, T>* instr, DoubleRegister reg) {
+ LTemplateResultInstruction<1>* instr, DoubleRegister reg) {
return Define(instr, ToUnallocated(reg));
}
@@ -645,6 +605,8 @@ LInstruction* LChunkBuilder::MarkAsCall(LInstruction* instr,
!hinstr->HasObservableSideEffects();
if (needs_environment && !instr->HasEnvironment()) {
instr = AssignEnvironment(instr);
+ // We can't really figure out if the environment is needed or not.
+ instr->environment()->set_has_been_used();
}
return instr;
@@ -671,6 +633,19 @@ LUnallocated* LChunkBuilder::TempRegister() {
}
+LUnallocated* LChunkBuilder::TempDoubleRegister() {
+ LUnallocated* operand =
+ new(zone()) LUnallocated(LUnallocated::MUST_HAVE_DOUBLE_REGISTER);
+ int vreg = allocator_->GetVirtualRegister();
+ if (!allocator_->AllocationOk()) {
+ Abort(kOutOfVirtualRegistersWhileTryingToAllocateTempRegister);
+ vreg = 0;
+ }
+ operand->set_virtual_register(vreg);
+ return operand;
+}
+
+
LOperand* LChunkBuilder::FixedTemp(Register reg) {
LUnallocated* operand = ToUnallocated(reg);
ASSERT(operand->HasFixedPolicy());
@@ -856,176 +831,108 @@ void LChunkBuilder::DoBasicBlock(HBasicBlock* block, HBasicBlock* next_block) {
void LChunkBuilder::VisitInstruction(HInstruction* current) {
HInstruction* old_current = current_instruction_;
current_instruction_ = current;
- if (current->has_position()) position_ = current->position();
LInstruction* instr = NULL;
if (current->CanReplaceWithDummyUses()) {
if (current->OperandCount() == 0) {
instr = DefineAsRegister(new(zone()) LDummy());
} else {
+ ASSERT(!current->OperandAt(0)->IsControlInstruction());
instr = DefineAsRegister(new(zone())
LDummyUse(UseAny(current->OperandAt(0))));
}
for (int i = 1; i < current->OperandCount(); ++i) {
+ if (current->OperandAt(i)->IsControlInstruction()) continue;
LInstruction* dummy =
new(zone()) LDummyUse(UseAny(current->OperandAt(i)));
dummy->set_hydrogen_value(current);
chunk_->AddInstruction(dummy, current_block_);
}
} else {
- instr = current->CompileToLithium(this);
+ HBasicBlock* successor;
+ if (current->IsControlInstruction() &&
+ HControlInstruction::cast(current)->KnownSuccessorBlock(&successor) &&
+ successor != NULL) {
+ instr = new(zone()) LGoto(successor);
+ } else {
+ instr = current->CompileToLithium(this);
+ }
}
argument_count_ += current->argument_delta();
ASSERT(argument_count_ >= 0);
if (instr != NULL) {
- // Associate the hydrogen instruction first, since we may need it for
- // the ClobbersRegisters() or ClobbersDoubleRegisters() calls below.
- instr->set_hydrogen_value(current);
-
-#if DEBUG
- // Make sure that the lithium instruction has either no fixed register
- // constraints in temps or the result OR no uses that are only used at
- // start. If this invariant doesn't hold, the register allocator can decide
- // to insert a split of a range immediately before the instruction due to an
- // already allocated register needing to be used for the instruction's fixed
- // register constraint. In this case, The register allocator won't see an
- // interference between the split child and the use-at-start (it would if
- // the it was just a plain use), so it is free to move the split child into
- // the same register that is used for the use-at-start.
- // See https://code.google.com/p/chromium/issues/detail?id=201590
- if (!(instr->ClobbersRegisters() && instr->ClobbersDoubleRegisters())) {
- int fixed = 0;
- int used_at_start = 0;
- for (UseIterator it(instr); !it.Done(); it.Advance()) {
- LUnallocated* operand = LUnallocated::cast(it.Current());
- if (operand->IsUsedAtStart()) ++used_at_start;
- }
- if (instr->Output() != NULL) {
- if (LUnallocated::cast(instr->Output())->HasFixedPolicy()) ++fixed;
- }
- for (TempIterator it(instr); !it.Done(); it.Advance()) {
- LUnallocated* operand = LUnallocated::cast(it.Current());
- if (operand->HasFixedPolicy()) ++fixed;
- }
- ASSERT(fixed == 0 || used_at_start == 0);
- }
-#endif
-
- if (FLAG_stress_pointer_maps && !instr->HasPointerMap()) {
- instr = AssignPointerMap(instr);
- }
- if (FLAG_stress_environments && !instr->HasEnvironment()) {
- instr = AssignEnvironment(instr);
- }
- chunk_->AddInstruction(instr, current_block_);
-
- if (instr->IsCall()) {
- HValue* hydrogen_value_for_lazy_bailout = current;
- LInstruction* instruction_needing_environment = NULL;
- if (current->HasObservableSideEffects()) {
- HSimulate* sim = HSimulate::cast(current->next());
- instruction_needing_environment = instr;
- sim->ReplayEnvironment(current_block_->last_environment());
- hydrogen_value_for_lazy_bailout = sim;
- }
- LInstruction* bailout = AssignEnvironment(new(zone()) LLazyBailout());
- bailout->set_hydrogen_value(hydrogen_value_for_lazy_bailout);
- chunk_->AddInstruction(bailout, current_block_);
- if (instruction_needing_environment != NULL) {
- // Store the lazy deopt environment with the instruction if needed.
- // Right now it is only used for LInstanceOfKnownGlobal.
- instruction_needing_environment->
- SetDeferredLazyDeoptimizationEnvironment(bailout->environment());
- }
- }
+ AddInstruction(instr, current);
}
+
current_instruction_ = old_current;
}
-LEnvironment* LChunkBuilder::CreateEnvironment(
- HEnvironment* hydrogen_env,
- int* argument_index_accumulator,
- ZoneList<HValue*>* objects_to_materialize) {
- if (hydrogen_env == NULL) return NULL;
-
- LEnvironment* outer = CreateEnvironment(hydrogen_env->outer(),
- argument_index_accumulator,
- objects_to_materialize);
- BailoutId ast_id = hydrogen_env->ast_id();
- ASSERT(!ast_id.IsNone() ||
- hydrogen_env->frame_type() != JS_FUNCTION);
- int value_count = hydrogen_env->length() - hydrogen_env->specials_count();
- LEnvironment* result = new(zone()) LEnvironment(
- hydrogen_env->closure(),
- hydrogen_env->frame_type(),
- ast_id,
- hydrogen_env->parameter_count(),
- argument_count_,
- value_count,
- outer,
- hydrogen_env->entry(),
- zone());
- int argument_index = *argument_index_accumulator;
- int object_index = objects_to_materialize->length();
- for (int i = 0; i < hydrogen_env->length(); ++i) {
- if (hydrogen_env->is_special_index(i)) continue;
-
- LOperand* op;
- HValue* value = hydrogen_env->values()->at(i);
- if (value->IsArgumentsObject() || value->IsCapturedObject()) {
- objects_to_materialize->Add(value, zone());
- op = LEnvironment::materialization_marker();
- } else if (value->IsPushArgument()) {
- op = new(zone()) LArgument(argument_index++);
- } else {
- op = UseAny(value);
- }
- result->AddValue(op,
- value->representation(),
- value->CheckFlag(HInstruction::kUint32));
- }
-
- for (int i = object_index; i < objects_to_materialize->length(); ++i) {
- HValue* object_to_materialize = objects_to_materialize->at(i);
- int previously_materialized_object = -1;
- for (int prev = 0; prev < i; ++prev) {
- if (objects_to_materialize->at(prev) == objects_to_materialize->at(i)) {
- previously_materialized_object = prev;
- break;
- }
+void LChunkBuilder::AddInstruction(LInstruction* instr,
+ HInstruction* hydrogen_val) {
+// Associate the hydrogen instruction first, since we may need it for
+ // the ClobbersRegisters() or ClobbersDoubleRegisters() calls below.
+ instr->set_hydrogen_value(hydrogen_val);
+
+#if DEBUG
+ // Make sure that the lithium instruction has either no fixed register
+ // constraints in temps or the result OR no uses that are only used at
+ // start. If this invariant doesn't hold, the register allocator can decide
+ // to insert a split of a range immediately before the instruction due to an
+ // already allocated register needing to be used for the instruction's fixed
+ // register constraint. In this case, The register allocator won't see an
+ // interference between the split child and the use-at-start (it would if
+ // the it was just a plain use), so it is free to move the split child into
+ // the same register that is used for the use-at-start.
+ // See https://code.google.com/p/chromium/issues/detail?id=201590
+ if (!(instr->ClobbersRegisters() &&
+ instr->ClobbersDoubleRegisters(isolate()))) {
+ int fixed = 0;
+ int used_at_start = 0;
+ for (UseIterator it(instr); !it.Done(); it.Advance()) {
+ LUnallocated* operand = LUnallocated::cast(it.Current());
+ if (operand->IsUsedAtStart()) ++used_at_start;
}
- int length = object_to_materialize->OperandCount();
- bool is_arguments = object_to_materialize->IsArgumentsObject();
- if (previously_materialized_object >= 0) {
- result->AddDuplicateObject(previously_materialized_object);
- continue;
- } else {
- result->AddNewObject(is_arguments ? length - 1 : length, is_arguments);
+ if (instr->Output() != NULL) {
+ if (LUnallocated::cast(instr->Output())->HasFixedPolicy()) ++fixed;
}
- for (int i = is_arguments ? 1 : 0; i < length; ++i) {
- LOperand* op;
- HValue* value = object_to_materialize->OperandAt(i);
- if (value->IsArgumentsObject() || value->IsCapturedObject()) {
- objects_to_materialize->Add(value, zone());
- op = LEnvironment::materialization_marker();
- } else {
- ASSERT(!value->IsPushArgument());
- op = UseAny(value);
- }
- result->AddValue(op,
- value->representation(),
- value->CheckFlag(HInstruction::kUint32));
+ for (TempIterator it(instr); !it.Done(); it.Advance()) {
+ LUnallocated* operand = LUnallocated::cast(it.Current());
+ if (operand->HasFixedPolicy()) ++fixed;
}
+ ASSERT(fixed == 0 || used_at_start == 0);
}
+#endif
- if (hydrogen_env->frame_type() == JS_FUNCTION) {
- *argument_index_accumulator = argument_index;
+ if (FLAG_stress_pointer_maps && !instr->HasPointerMap()) {
+ instr = AssignPointerMap(instr);
+ }
+ if (FLAG_stress_environments && !instr->HasEnvironment()) {
+ instr = AssignEnvironment(instr);
+ }
+ chunk_->AddInstruction(instr, current_block_);
+
+ if (instr->IsCall()) {
+ HValue* hydrogen_value_for_lazy_bailout = hydrogen_val;
+ LInstruction* instruction_needing_environment = NULL;
+ if (hydrogen_val->HasObservableSideEffects()) {
+ HSimulate* sim = HSimulate::cast(hydrogen_val->next());
+ instruction_needing_environment = instr;
+ sim->ReplayEnvironment(current_block_->last_environment());
+ hydrogen_value_for_lazy_bailout = sim;
+ }
+ LInstruction* bailout = AssignEnvironment(new(zone()) LLazyBailout());
+ bailout->set_hydrogen_value(hydrogen_value_for_lazy_bailout);
+ chunk_->AddInstruction(bailout, current_block_);
+ if (instruction_needing_environment != NULL) {
+ // Store the lazy deopt environment with the instruction if needed.
+ // Right now it is only used for LInstanceOfKnownGlobal.
+ instruction_needing_environment->
+ SetDeferredLazyDeoptimizationEnvironment(bailout->environment());
+ }
}
-
- return result;
}
@@ -1035,22 +942,21 @@ LInstruction* LChunkBuilder::DoGoto(HGoto* instr) {
LInstruction* LChunkBuilder::DoBranch(HBranch* instr) {
- LInstruction* goto_instr = CheckElideControlInstruction(instr);
- if (goto_instr != NULL) return goto_instr;
-
HValue* value = instr->value();
- LBranch* result = new(zone()) LBranch(UseRegister(value));
- // Tagged values that are not known smis or booleans require a
- // deoptimization environment. If the instruction is generic no
- // environment is needed since all cases are handled.
- Representation rep = value->representation();
+ Representation r = value->representation();
HType type = value->type();
ToBooleanStub::Types expected = instr->expected_input_types();
- if (rep.IsTagged() && !type.IsSmi() && !type.IsBoolean() &&
- !expected.IsGeneric()) {
- return AssignEnvironment(result);
+ if (expected.IsEmpty()) expected = ToBooleanStub::Types::Generic();
+
+ bool easy_case = !r.IsTagged() || type.IsBoolean() || type.IsSmi() ||
+ type.IsJSArray() || type.IsHeapNumber() || type.IsString();
+ LInstruction* branch = new(zone()) LBranch(UseRegister(value));
+ if (!easy_case &&
+ ((!expected.Contains(ToBooleanStub::SMI) && expected.NeedsMap()) ||
+ !expected.IsGeneric())) {
+ branch = AssignEnvironment(branch);
}
- return result;
+ return branch;
}
@@ -1116,9 +1022,13 @@ LInstruction* LChunkBuilder::DoApplyArguments(HApplyArguments* instr) {
}
-LInstruction* LChunkBuilder::DoPushArgument(HPushArgument* instr) {
- LOperand* argument = Use(instr->argument());
- return new(zone()) LPushArgument(argument);
+LInstruction* LChunkBuilder::DoPushArguments(HPushArguments* instr) {
+ int argc = instr->OperandCount();
+ for (int i = 0; i < argc; ++i) {
+ LOperand* argument = Use(instr->argument(i));
+ AddInstruction(new(zone()) LPushArgument(argument), instr);
+ }
+ return NULL;
}
@@ -1157,33 +1067,38 @@ LInstruction* LChunkBuilder::DoContext(HContext* instr) {
}
-LInstruction* LChunkBuilder::DoOuterContext(HOuterContext* instr) {
- LOperand* context = UseRegisterAtStart(instr->value());
- return DefineAsRegister(new(zone()) LOuterContext(context));
-}
-
-
LInstruction* LChunkBuilder::DoDeclareGlobals(HDeclareGlobals* instr) {
LOperand* context = UseFixed(instr->context(), cp);
return MarkAsCall(new(zone()) LDeclareGlobals(context), instr);
}
-LInstruction* LChunkBuilder::DoGlobalObject(HGlobalObject* instr) {
- LOperand* context = UseRegisterAtStart(instr->value());
- return DefineAsRegister(new(zone()) LGlobalObject(context));
-}
+LInstruction* LChunkBuilder::DoCallJSFunction(
+ HCallJSFunction* instr) {
+ LOperand* function = UseFixed(instr->function(), a1);
+ LCallJSFunction* result = new(zone()) LCallJSFunction(function);
-LInstruction* LChunkBuilder::DoGlobalReceiver(HGlobalReceiver* instr) {
- LOperand* global_object = UseRegisterAtStart(instr->value());
- return DefineAsRegister(new(zone()) LGlobalReceiver(global_object));
+ return MarkAsCall(DefineFixed(result, v0), instr);
}
-LInstruction* LChunkBuilder::DoCallConstantFunction(
- HCallConstantFunction* instr) {
- return MarkAsCall(DefineFixed(new(zone()) LCallConstantFunction, v0), instr);
+LInstruction* LChunkBuilder::DoCallWithDescriptor(
+ HCallWithDescriptor* instr) {
+ const CallInterfaceDescriptor* descriptor = instr->descriptor();
+
+ LOperand* target = UseRegisterOrConstantAtStart(instr->target());
+ ZoneList<LOperand*> ops(instr->OperandCount(), zone());
+ ops.Add(target, zone());
+ for (int i = 1; i < instr->OperandCount(); i++) {
+ LOperand* op = UseFixed(instr->OperandAt(i),
+ descriptor->GetParameterRegister(i - 1));
+ ops.Add(op, zone());
+ }
+
+ LCallWithDescriptor* result = new(zone()) LCallWithDescriptor(
+ descriptor, ops, zone());
+ return MarkAsCall(DefineFixed(result, v0), instr);
}
@@ -1201,12 +1116,10 @@ LInstruction* LChunkBuilder::DoUnaryMathOperation(HUnaryMathOperation* instr) {
case kMathRound: return DoMathRound(instr);
case kMathAbs: return DoMathAbs(instr);
case kMathLog: return DoMathLog(instr);
- case kMathSin: return DoMathSin(instr);
- case kMathCos: return DoMathCos(instr);
- case kMathTan: return DoMathTan(instr);
case kMathExp: return DoMathExp(instr);
case kMathSqrt: return DoMathSqrt(instr);
case kMathPowHalf: return DoMathPowHalf(instr);
+ case kMathClz32: return DoMathClz32(instr);
default:
UNREACHABLE();
return NULL;
@@ -1215,30 +1128,17 @@ LInstruction* LChunkBuilder::DoUnaryMathOperation(HUnaryMathOperation* instr) {
LInstruction* LChunkBuilder::DoMathLog(HUnaryMathOperation* instr) {
+ ASSERT(instr->representation().IsDouble());
+ ASSERT(instr->value()->representation().IsDouble());
LOperand* input = UseFixedDouble(instr->value(), f4);
- LMathLog* result = new(zone()) LMathLog(input);
- return MarkAsCall(DefineFixedDouble(result, f4), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoMathSin(HUnaryMathOperation* instr) {
- LOperand* input = UseFixedDouble(instr->value(), f4);
- LMathSin* result = new(zone()) LMathSin(input);
- return MarkAsCall(DefineFixedDouble(result, f4), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoMathCos(HUnaryMathOperation* instr) {
- LOperand* input = UseFixedDouble(instr->value(), f4);
- LMathCos* result = new(zone()) LMathCos(input);
- return MarkAsCall(DefineFixedDouble(result, f4), instr);
+ return MarkAsCall(DefineFixedDouble(new(zone()) LMathLog(input), f4), instr);
}
-LInstruction* LChunkBuilder::DoMathTan(HUnaryMathOperation* instr) {
- LOperand* input = UseFixedDouble(instr->value(), f4);
- LMathTan* result = new(zone()) LMathTan(input);
- return MarkAsCall(DefineFixedDouble(result, f4), instr);
+LInstruction* LChunkBuilder::DoMathClz32(HUnaryMathOperation* instr) {
+ LOperand* input = UseRegisterAtStart(instr->value());
+ LMathClz32* result = new(zone()) LMathClz32(input);
+ return DefineAsRegister(result);
}
@@ -1248,7 +1148,7 @@ LInstruction* LChunkBuilder::DoMathExp(HUnaryMathOperation* instr) {
LOperand* input = UseRegister(instr->value());
LOperand* temp1 = TempRegister();
LOperand* temp2 = TempRegister();
- LOperand* double_temp = FixedTemp(f6); // Chosen by fair dice roll.
+ LOperand* double_temp = TempDoubleRegister();
LMathExp* result = new(zone()) LMathExp(input, double_temp, temp1, temp2);
return DefineAsRegister(result);
}
@@ -1257,7 +1157,7 @@ LInstruction* LChunkBuilder::DoMathExp(HUnaryMathOperation* instr) {
LInstruction* LChunkBuilder::DoMathPowHalf(HUnaryMathOperation* instr) {
// Input cannot be the same as the result, see LCodeGen::DoMathPowHalf.
LOperand* input = UseFixedDouble(instr->value(), f8);
- LOperand* temp = FixedTemp(f6);
+ LOperand* temp = TempDoubleRegister();
LMathPowHalf* result = new(zone()) LMathPowHalf(input, temp);
return DefineFixedDouble(result, f4);
}
@@ -1269,8 +1169,11 @@ LInstruction* LChunkBuilder::DoMathAbs(HUnaryMathOperation* instr) {
? NULL
: UseFixed(instr->context(), cp);
LOperand* input = UseRegister(instr->value());
- LMathAbs* result = new(zone()) LMathAbs(context, input);
- return AssignEnvironment(AssignPointerMap(DefineAsRegister(result)));
+ LInstruction* result =
+ DefineAsRegister(new(zone()) LMathAbs(context, input));
+ if (!r.IsDouble() && !r.IsSmiOrInteger32()) result = AssignPointerMap(result);
+ if (!r.IsDouble()) result = AssignEnvironment(result);
+ return result;
}
@@ -1291,38 +1194,12 @@ LInstruction* LChunkBuilder::DoMathSqrt(HUnaryMathOperation* instr) {
LInstruction* LChunkBuilder::DoMathRound(HUnaryMathOperation* instr) {
LOperand* input = UseRegister(instr->value());
- LOperand* temp = FixedTemp(f6);
+ LOperand* temp = TempDoubleRegister();
LMathRound* result = new(zone()) LMathRound(input, temp);
return AssignEnvironment(DefineAsRegister(result));
}
-LInstruction* LChunkBuilder::DoCallKeyed(HCallKeyed* instr) {
- ASSERT(instr->key()->representation().IsTagged());
- LOperand* context = UseFixed(instr->context(), cp);
- LOperand* key = UseFixed(instr->key(), a2);
- return MarkAsCall(
- DefineFixed(new(zone()) LCallKeyed(context, key), v0), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoCallNamed(HCallNamed* instr) {
- LOperand* context = UseFixed(instr->context(), cp);
- return MarkAsCall(DefineFixed(new(zone()) LCallNamed(context), v0), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoCallGlobal(HCallGlobal* instr) {
- LOperand* context = UseFixed(instr->context(), cp);
- return MarkAsCall(DefineFixed(new(zone()) LCallGlobal(context), v0), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoCallKnownGlobal(HCallKnownGlobal* instr) {
- return MarkAsCall(DefineFixed(new(zone()) LCallKnownGlobal, v0), instr);
-}
-
-
LInstruction* LChunkBuilder::DoCallNew(HCallNew* instr) {
LOperand* context = UseFixed(instr->context(), cp);
LOperand* constructor = UseFixed(instr->constructor(), a1);
@@ -1343,9 +1220,7 @@ LInstruction* LChunkBuilder::DoCallFunction(HCallFunction* instr) {
LOperand* context = UseFixed(instr->context(), cp);
LOperand* function = UseFixed(instr->function(), a1);
LCallFunction* call = new(zone()) LCallFunction(context, function);
- LInstruction* result = DefineFixed(call, v0);
- if (instr->IsTailCall()) return result;
- return MarkAsCall(result, instr);
+ return MarkAsCall(DefineFixed(call, v0), instr);
}
@@ -1390,14 +1265,70 @@ LInstruction* LChunkBuilder::DoBitwise(HBitwise* instr) {
}
+LInstruction* LChunkBuilder::DoDivByPowerOf2I(HDiv* instr) {
+ ASSERT(instr->representation().IsSmiOrInteger32());
+ ASSERT(instr->left()->representation().Equals(instr->representation()));
+ ASSERT(instr->right()->representation().Equals(instr->representation()));
+ LOperand* dividend = UseRegister(instr->left());
+ int32_t divisor = instr->right()->GetInteger32Constant();
+ LInstruction* result = DefineAsRegister(new(zone()) LDivByPowerOf2I(
+ dividend, divisor));
+ if ((instr->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) ||
+ (instr->CheckFlag(HValue::kCanOverflow) && divisor == -1) ||
+ (!instr->CheckFlag(HInstruction::kAllUsesTruncatingToInt32) &&
+ divisor != 1 && divisor != -1)) {
+ result = AssignEnvironment(result);
+ }
+ return result;
+}
+
+
+LInstruction* LChunkBuilder::DoDivByConstI(HDiv* instr) {
+ ASSERT(instr->representation().IsInteger32());
+ ASSERT(instr->left()->representation().Equals(instr->representation()));
+ ASSERT(instr->right()->representation().Equals(instr->representation()));
+ LOperand* dividend = UseRegister(instr->left());
+ int32_t divisor = instr->right()->GetInteger32Constant();
+ LInstruction* result = DefineAsRegister(new(zone()) LDivByConstI(
+ dividend, divisor));
+ if (divisor == 0 ||
+ (instr->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) ||
+ !instr->CheckFlag(HInstruction::kAllUsesTruncatingToInt32)) {
+ result = AssignEnvironment(result);
+ }
+ return result;
+}
+
+
+LInstruction* LChunkBuilder::DoDivI(HDiv* instr) {
+ ASSERT(instr->representation().IsSmiOrInteger32());
+ ASSERT(instr->left()->representation().Equals(instr->representation()));
+ ASSERT(instr->right()->representation().Equals(instr->representation()));
+ LOperand* dividend = UseRegister(instr->left());
+ LOperand* divisor = UseRegister(instr->right());
+ LInstruction* result =
+ DefineAsRegister(new(zone()) LDivI(dividend, divisor));
+ if (instr->CheckFlag(HValue::kCanBeDivByZero) ||
+ instr->CheckFlag(HValue::kBailoutOnMinusZero) ||
+ (instr->CheckFlag(HValue::kCanOverflow) &&
+ !instr->CheckFlag(HValue::kAllUsesTruncatingToInt32)) ||
+ (!instr->IsMathFloorOfDiv() &&
+ !instr->CheckFlag(HValue::kAllUsesTruncatingToInt32))) {
+ result = AssignEnvironment(result);
+ }
+ return result;
+}
+
+
LInstruction* LChunkBuilder::DoDiv(HDiv* instr) {
if (instr->representation().IsSmiOrInteger32()) {
- ASSERT(instr->left()->representation().Equals(instr->representation()));
- ASSERT(instr->right()->representation().Equals(instr->representation()));
- LOperand* dividend = UseRegister(instr->left());
- LOperand* divisor = UseRegister(instr->right());
- LDivI* div = new(zone()) LDivI(dividend, divisor);
- return AssignEnvironment(DefineAsRegister(div));
+ if (instr->RightIsPowerOf2()) {
+ return DoDivByPowerOf2I(instr);
+ } else if (instr->right()->IsConstant()) {
+ return DoDivByConstI(instr);
+ } else {
+ return DoDivI(instr);
+ }
} else if (instr->representation().IsDouble()) {
return DoArithmeticD(Token::DIV, instr);
} else {
@@ -1406,86 +1337,110 @@ LInstruction* LChunkBuilder::DoDiv(HDiv* instr) {
}
-bool LChunkBuilder::HasMagicNumberForDivisor(int32_t divisor) {
- uint32_t divisor_abs = abs(divisor);
- // Dividing by 0, 1, and powers of 2 is easy.
- // Note that IsPowerOf2(0) returns true;
- ASSERT(IsPowerOf2(0) == true);
- if (IsPowerOf2(divisor_abs)) return true;
+LInstruction* LChunkBuilder::DoFlooringDivByPowerOf2I(HMathFloorOfDiv* instr) {
+ LOperand* dividend = UseRegisterAtStart(instr->left());
+ int32_t divisor = instr->right()->GetInteger32Constant();
+ LInstruction* result = DefineAsRegister(new(zone()) LFlooringDivByPowerOf2I(
+ dividend, divisor));
+ if ((instr->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) ||
+ (instr->CheckFlag(HValue::kLeftCanBeMinInt) && divisor == -1)) {
+ result = AssignEnvironment(result);
+ }
+ return result;
+}
- // We have magic numbers for a few specific divisors.
- // Details and proofs can be found in:
- // - Hacker's Delight, Henry S. Warren, Jr.
- // - The PowerPC Compiler Writer's Guide
- // and probably many others.
- //
- // We handle
- // <divisor with magic numbers> * <power of 2>
- // but not
- // <divisor with magic numbers> * <other divisor with magic numbers>
- int32_t power_of_2_factor =
- CompilerIntrinsics::CountTrailingZeros(divisor_abs);
- DivMagicNumbers magic_numbers =
- DivMagicNumberFor(divisor_abs >> power_of_2_factor);
- if (magic_numbers.M != InvalidDivMagicNumber.M) return true;
- return false;
+LInstruction* LChunkBuilder::DoFlooringDivByConstI(HMathFloorOfDiv* instr) {
+ ASSERT(instr->representation().IsInteger32());
+ ASSERT(instr->left()->representation().Equals(instr->representation()));
+ ASSERT(instr->right()->representation().Equals(instr->representation()));
+ LOperand* dividend = UseRegister(instr->left());
+ int32_t divisor = instr->right()->GetInteger32Constant();
+ LOperand* temp =
+ ((divisor > 0 && !instr->CheckFlag(HValue::kLeftCanBeNegative)) ||
+ (divisor < 0 && !instr->CheckFlag(HValue::kLeftCanBePositive))) ?
+ NULL : TempRegister();
+ LInstruction* result = DefineAsRegister(
+ new(zone()) LFlooringDivByConstI(dividend, divisor, temp));
+ if (divisor == 0 ||
+ (instr->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0)) {
+ result = AssignEnvironment(result);
+ }
+ return result;
}
-HValue* LChunkBuilder::SimplifiedDivisorForMathFloorOfDiv(HValue* divisor) {
- // Only optimize when we have magic numbers for the divisor.
- // The standard integer division routine is usually slower than transitionning
- // to FPU.
- if (divisor->IsConstant() &&
- HConstant::cast(divisor)->HasInteger32Value()) {
- HConstant* constant_val = HConstant::cast(divisor);
- return constant_val->CopyToRepresentation(Representation::Integer32(),
- divisor->block()->zone());
- }
- return NULL;
+LInstruction* LChunkBuilder::DoFlooringDivI(HMathFloorOfDiv* instr) {
+ ASSERT(instr->representation().IsSmiOrInteger32());
+ ASSERT(instr->left()->representation().Equals(instr->representation()));
+ ASSERT(instr->right()->representation().Equals(instr->representation()));
+ LOperand* dividend = UseRegister(instr->left());
+ LOperand* divisor = UseRegister(instr->right());
+ LFlooringDivI* div = new(zone()) LFlooringDivI(dividend, divisor);
+ return AssignEnvironment(DefineAsRegister(div));
}
LInstruction* LChunkBuilder::DoMathFloorOfDiv(HMathFloorOfDiv* instr) {
- HValue* right = instr->right();
- LOperand* dividend = UseRegister(instr->left());
- LOperand* divisor = UseRegisterOrConstant(right);
- LOperand* remainder = TempRegister();
- return AssignEnvironment(DefineAsRegister(
- new(zone()) LMathFloorOfDiv(dividend, divisor, remainder)));
+ if (instr->RightIsPowerOf2()) {
+ return DoFlooringDivByPowerOf2I(instr);
+ } else if (instr->right()->IsConstant()) {
+ return DoFlooringDivByConstI(instr);
+ } else {
+ return DoFlooringDivI(instr);
+ }
+}
+
+
+LInstruction* LChunkBuilder::DoModByPowerOf2I(HMod* instr) {
+ ASSERT(instr->representation().IsSmiOrInteger32());
+ ASSERT(instr->left()->representation().Equals(instr->representation()));
+ ASSERT(instr->right()->representation().Equals(instr->representation()));
+ LOperand* dividend = UseRegisterAtStart(instr->left());
+ int32_t divisor = instr->right()->GetInteger32Constant();
+ LInstruction* result = DefineSameAsFirst(new(zone()) LModByPowerOf2I(
+ dividend, divisor));
+ if (instr->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ result = AssignEnvironment(result);
+ }
+ return result;
+}
+
+
+LInstruction* LChunkBuilder::DoModByConstI(HMod* instr) {
+ ASSERT(instr->representation().IsSmiOrInteger32());
+ ASSERT(instr->left()->representation().Equals(instr->representation()));
+ ASSERT(instr->right()->representation().Equals(instr->representation()));
+ LOperand* dividend = UseRegister(instr->left());
+ int32_t divisor = instr->right()->GetInteger32Constant();
+ LInstruction* result = DefineAsRegister(new(zone()) LModByConstI(
+ dividend, divisor));
+ if (divisor == 0 || instr->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ result = AssignEnvironment(result);
+ }
+ return result;
+}
+
+
+LInstruction* LChunkBuilder::DoModI(HMod* instr) {
+ ASSERT(instr->representation().IsSmiOrInteger32());
+ ASSERT(instr->left()->representation().Equals(instr->representation()));
+ ASSERT(instr->right()->representation().Equals(instr->representation()));
+ LOperand* dividend = UseRegister(instr->left());
+ LOperand* divisor = UseRegister(instr->right());
+ LInstruction* result = DefineAsRegister(new(zone()) LModI(
+ dividend, divisor));
+ if (instr->CheckFlag(HValue::kCanBeDivByZero) ||
+ instr->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ result = AssignEnvironment(result);
+ }
+ return result;
}
LInstruction* LChunkBuilder::DoMod(HMod* instr) {
- HValue* left = instr->left();
- HValue* right = instr->right();
if (instr->representation().IsSmiOrInteger32()) {
- ASSERT(instr->left()->representation().Equals(instr->representation()));
- ASSERT(instr->right()->representation().Equals(instr->representation()));
- if (instr->HasPowerOf2Divisor()) {
- ASSERT(!right->CanBeZero());
- LModI* mod = new(zone()) LModI(UseRegisterAtStart(left),
- UseConstant(right));
- LInstruction* result = DefineAsRegister(mod);
- return (left->CanBeNegative() &&
- instr->CheckFlag(HValue::kBailoutOnMinusZero))
- ? AssignEnvironment(result)
- : result;
- } else {
- LModI* mod = new(zone()) LModI(UseRegister(left),
- UseRegister(right),
- TempRegister(),
- FixedTemp(f20),
- FixedTemp(f22));
- LInstruction* result = DefineAsRegister(mod);
- return (right->CanBeZero() ||
- (left->RangeCanInclude(kMinInt) &&
- right->RangeCanInclude(-1)) ||
- instr->CheckFlag(HValue::kBailoutOnMinusZero))
- ? AssignEnvironment(result)
- : result;
- }
+ return instr->RightIsPowerOf2() ? DoModByPowerOf2I(instr) : DoModI(instr);
} else if (instr->representation().IsDouble()) {
return DoArithmeticD(Token::MOD, instr);
} else {
@@ -1694,8 +1649,6 @@ LInstruction* LChunkBuilder::DoCompareNumericAndBranch(
LInstruction* LChunkBuilder::DoCompareObjectEqAndBranch(
HCompareObjectEqAndBranch* instr) {
- LInstruction* goto_instr = CheckElideControlInstruction(instr);
- if (goto_instr != NULL) return goto_instr;
LOperand* left = UseRegisterAtStart(instr->left());
LOperand* right = UseRegisterAtStart(instr->right());
return new(zone()) LCmpObjectEqAndBranch(left, right);
@@ -1711,8 +1664,6 @@ LInstruction* LChunkBuilder::DoCompareHoleAndBranch(
LInstruction* LChunkBuilder::DoCompareMinusZeroAndBranch(
HCompareMinusZeroAndBranch* instr) {
- LInstruction* goto_instr = CheckElideControlInstruction(instr);
- if (goto_instr != NULL) return goto_instr;
LOperand* value = UseRegister(instr->value());
LOperand* scratch = TempRegister();
return new(zone()) LCompareMinusZeroAndBranch(value, scratch);
@@ -1801,19 +1752,6 @@ LInstruction* LChunkBuilder::DoMapEnumLength(HMapEnumLength* instr) {
}
-LInstruction* LChunkBuilder::DoElementsKind(HElementsKind* instr) {
- LOperand* object = UseRegisterAtStart(instr->value());
- return DefineAsRegister(new(zone()) LElementsKind(object));
-}
-
-
-LInstruction* LChunkBuilder::DoValueOf(HValueOf* instr) {
- LOperand* object = UseRegister(instr->value());
- LValueOf* result = new(zone()) LValueOf(object, TempRegister());
- return DefineAsRegister(result);
-}
-
-
LInstruction* LChunkBuilder::DoDateField(HDateField* instr) {
LOperand* object = UseFixed(instr->value(), a0);
LDateField* result =
@@ -1841,9 +1779,16 @@ LInstruction* LChunkBuilder::DoSeqStringSetChar(HSeqStringSetChar* instr) {
LInstruction* LChunkBuilder::DoBoundsCheck(HBoundsCheck* instr) {
- LOperand* value = UseRegisterOrConstantAtStart(instr->index());
- LOperand* length = UseRegister(instr->length());
- return AssignEnvironment(new(zone()) LBoundsCheck(value, length));
+ if (!FLAG_debug_code && instr->skip_check()) return NULL;
+ LOperand* index = UseRegisterOrConstantAtStart(instr->index());
+ LOperand* length = !index->IsConstantOperand()
+ ? UseRegisterOrConstantAtStart(instr->length())
+ : UseRegisterAtStart(instr->length());
+ LInstruction* result = new(zone()) LBoundsCheck(index, length);
+ if (!FLAG_debug_code || !instr->skip_check()) {
+ result = AssignEnvironment(result);
+ }
+ return result;
}
@@ -1861,13 +1806,6 @@ LInstruction* LChunkBuilder::DoAbnormalExit(HAbnormalExit* instr) {
}
-LInstruction* LChunkBuilder::DoThrow(HThrow* instr) {
- LOperand* context = UseFixed(instr->context(), cp);
- LOperand* value = UseFixed(instr->value(), a0);
- return MarkAsCall(new(zone()) LThrow(context, value), instr);
-}
-
-
LInstruction* LChunkBuilder::DoUseConst(HUseConst* instr) {
return NULL;
}
@@ -1884,20 +1822,21 @@ LInstruction* LChunkBuilder::DoForceRepresentation(HForceRepresentation* bad) {
LInstruction* LChunkBuilder::DoChange(HChange* instr) {
Representation from = instr->from();
Representation to = instr->to();
+ HValue* val = instr->value();
if (from.IsSmi()) {
if (to.IsTagged()) {
- LOperand* value = UseRegister(instr->value());
+ LOperand* value = UseRegister(val);
return DefineSameAsFirst(new(zone()) LDummyUse(value));
}
from = Representation::Tagged();
}
if (from.IsTagged()) {
if (to.IsDouble()) {
- LOperand* value = UseRegister(instr->value());
- LNumberUntagD* res = new(zone()) LNumberUntagD(value);
- return AssignEnvironment(DefineAsRegister(res));
+ LOperand* value = UseRegister(val);
+ LInstruction* result = DefineAsRegister(new(zone()) LNumberUntagD(value));
+ if (!val->representation().IsSmi()) result = AssignEnvironment(result);
+ return result;
} else if (to.IsSmi()) {
- HValue* val = instr->value();
LOperand* value = UseRegister(val);
if (val->type().IsSmi()) {
return DefineSameAsFirst(new(zone()) LDummyUse(value));
@@ -1905,78 +1844,71 @@ LInstruction* LChunkBuilder::DoChange(HChange* instr) {
return AssignEnvironment(DefineSameAsFirst(new(zone()) LCheckSmi(value)));
} else {
ASSERT(to.IsInteger32());
- LOperand* value = NULL;
- LInstruction* res = NULL;
- HValue* val = instr->value();
if (val->type().IsSmi() || val->representation().IsSmi()) {
- value = UseRegisterAtStart(val);
- res = DefineAsRegister(new(zone()) LSmiUntag(value, false));
+ LOperand* value = UseRegisterAtStart(val);
+ return DefineAsRegister(new(zone()) LSmiUntag(value, false));
} else {
- value = UseRegister(val);
+ LOperand* value = UseRegister(val);
LOperand* temp1 = TempRegister();
- LOperand* temp2 = FixedTemp(f22);
- res = DefineSameAsFirst(new(zone()) LTaggedToI(value,
- temp1,
- temp2));
- res = AssignEnvironment(res);
+ LOperand* temp2 = TempDoubleRegister();
+ LInstruction* result =
+ DefineSameAsFirst(new(zone()) LTaggedToI(value, temp1, temp2));
+ if (!val->representation().IsSmi()) result = AssignEnvironment(result);
+ return result;
}
- return res;
}
} else if (from.IsDouble()) {
if (to.IsTagged()) {
info()->MarkAsDeferredCalling();
- LOperand* value = UseRegister(instr->value());
+ LOperand* value = UseRegister(val);
LOperand* temp1 = TempRegister();
LOperand* temp2 = TempRegister();
-
- // Make sure that the temp and result_temp registers are
- // different.
LUnallocated* result_temp = TempRegister();
LNumberTagD* result = new(zone()) LNumberTagD(value, temp1, temp2);
- Define(result, result_temp);
- return AssignPointerMap(result);
+ return AssignPointerMap(Define(result, result_temp));
} else if (to.IsSmi()) {
- LOperand* value = UseRegister(instr->value());
+ LOperand* value = UseRegister(val);
return AssignEnvironment(
DefineAsRegister(new(zone()) LDoubleToSmi(value)));
} else {
ASSERT(to.IsInteger32());
- LOperand* value = UseRegister(instr->value());
- LDoubleToI* res = new(zone()) LDoubleToI(value);
- return AssignEnvironment(DefineAsRegister(res));
+ LOperand* value = UseRegister(val);
+ LInstruction* result = DefineAsRegister(new(zone()) LDoubleToI(value));
+ if (!instr->CanTruncateToInt32()) result = AssignEnvironment(result);
+ return result;
}
} else if (from.IsInteger32()) {
info()->MarkAsDeferredCalling();
if (to.IsTagged()) {
- HValue* val = instr->value();
- LOperand* value = UseRegisterAtStart(val);
- if (val->CheckFlag(HInstruction::kUint32)) {
- LNumberTagU* result = new(zone()) LNumberTagU(value);
- return AssignEnvironment(AssignPointerMap(DefineAsRegister(result)));
- } else if (val->HasRange() && val->range()->IsInSmiRange()) {
+ if (!instr->CheckFlag(HValue::kCanOverflow)) {
+ LOperand* value = UseRegisterAtStart(val);
return DefineAsRegister(new(zone()) LSmiTag(value));
+ } else if (val->CheckFlag(HInstruction::kUint32)) {
+ LOperand* value = UseRegisterAtStart(val);
+ LOperand* temp1 = TempRegister();
+ LOperand* temp2 = TempRegister();
+ LNumberTagU* result = new(zone()) LNumberTagU(value, temp1, temp2);
+ return AssignPointerMap(DefineAsRegister(result));
} else {
- LNumberTagI* result = new(zone()) LNumberTagI(value);
- return AssignEnvironment(AssignPointerMap(DefineAsRegister(result)));
+ LOperand* value = UseRegisterAtStart(val);
+ LOperand* temp1 = TempRegister();
+ LOperand* temp2 = TempRegister();
+ LNumberTagI* result = new(zone()) LNumberTagI(value, temp1, temp2);
+ return AssignPointerMap(DefineAsRegister(result));
}
} else if (to.IsSmi()) {
- HValue* val = instr->value();
LOperand* value = UseRegister(val);
- LInstruction* result = val->CheckFlag(HInstruction::kUint32)
- ? DefineAsRegister(new(zone()) LUint32ToSmi(value))
- : DefineAsRegister(new(zone()) LInteger32ToSmi(value));
- if (val->HasRange() && val->range()->IsInSmiRange()) {
- return result;
+ LInstruction* result = DefineAsRegister(new(zone()) LSmiTag(value));
+ if (instr->CheckFlag(HValue::kCanOverflow)) {
+ result = AssignEnvironment(result);
}
- return AssignEnvironment(result);
+ return result;
} else {
ASSERT(to.IsDouble());
- if (instr->value()->CheckFlag(HInstruction::kUint32)) {
- return DefineAsRegister(
- new(zone()) LUint32ToDouble(UseRegister(instr->value())));
+ if (val->CheckFlag(HInstruction::kUint32)) {
+ return DefineAsRegister(new(zone()) LUint32ToDouble(UseRegister(val)));
} else {
- return DefineAsRegister(
- new(zone()) LInteger32ToDouble(Use(instr->value())));
+ return DefineAsRegister(new(zone()) LInteger32ToDouble(Use(val)));
}
}
}
@@ -1987,7 +1919,11 @@ LInstruction* LChunkBuilder::DoChange(HChange* instr) {
LInstruction* LChunkBuilder::DoCheckHeapObject(HCheckHeapObject* instr) {
LOperand* value = UseRegisterAtStart(instr->value());
- return AssignEnvironment(new(zone()) LCheckNonSmi(value));
+ LInstruction* result = new(zone()) LCheckNonSmi(value);
+ if (!instr->value()->type().IsHeapObject()) {
+ result = AssignEnvironment(result);
+ }
+ return result;
}
@@ -2011,15 +1947,12 @@ LInstruction* LChunkBuilder::DoCheckValue(HCheckValue* instr) {
LInstruction* LChunkBuilder::DoCheckMaps(HCheckMaps* instr) {
- LOperand* value = NULL;
- if (!instr->CanOmitMapChecks()) {
- value = UseRegisterAtStart(instr->value());
- if (instr->has_migration_target()) info()->MarkAsDeferredCalling();
- }
- LCheckMaps* result = new(zone()) LCheckMaps(value);
- if (!instr->CanOmitMapChecks()) {
- AssignEnvironment(result);
- if (instr->has_migration_target()) return AssignPointerMap(result);
+ if (instr->IsStabilityCheck()) return new(zone()) LCheckMaps;
+ LOperand* value = UseRegisterAtStart(instr->value());
+ LInstruction* result = AssignEnvironment(new(zone()) LCheckMaps(value));
+ if (instr->HasMigrationTarget()) {
+ info()->MarkAsDeferredCalling();
+ result = AssignPointerMap(result);
}
return result;
}
@@ -2031,19 +1964,33 @@ LInstruction* LChunkBuilder::DoClampToUint8(HClampToUint8* instr) {
LOperand* reg = UseRegister(value);
if (input_rep.IsDouble()) {
// Revisit this decision, here and 8 lines below.
- return DefineAsRegister(new(zone()) LClampDToUint8(reg, FixedTemp(f22)));
+ return DefineAsRegister(new(zone()) LClampDToUint8(reg,
+ TempDoubleRegister()));
} else if (input_rep.IsInteger32()) {
return DefineAsRegister(new(zone()) LClampIToUint8(reg));
} else {
ASSERT(input_rep.IsSmiOrTagged());
- // Register allocator doesn't (yet) support allocation of double
- // temps. Reserve f22 explicitly.
- LClampTToUint8* result = new(zone()) LClampTToUint8(reg, FixedTemp(f22));
+ LClampTToUint8* result =
+ new(zone()) LClampTToUint8(reg, TempDoubleRegister());
return AssignEnvironment(DefineAsRegister(result));
}
}
+LInstruction* LChunkBuilder::DoDoubleBits(HDoubleBits* instr) {
+ HValue* value = instr->value();
+ ASSERT(value->representation().IsDouble());
+ return DefineAsRegister(new(zone()) LDoubleBits(UseRegister(value)));
+}
+
+
+LInstruction* LChunkBuilder::DoConstructDouble(HConstructDouble* instr) {
+ LOperand* lo = UseRegister(instr->lo());
+ LOperand* hi = UseRegister(instr->hi());
+ return DefineAsRegister(new(zone()) LConstructDouble(hi, lo));
+}
+
+
LInstruction* LChunkBuilder::DoReturn(HReturn* instr) {
LOperand* context = info()->IsStub()
? UseFixed(instr->context(), cp)
@@ -2100,21 +2047,14 @@ LInstruction* LChunkBuilder::DoStoreGlobalCell(HStoreGlobalCell* instr) {
}
-LInstruction* LChunkBuilder::DoStoreGlobalGeneric(HStoreGlobalGeneric* instr) {
- LOperand* context = UseFixed(instr->context(), cp);
- LOperand* global_object = UseFixed(instr->global_object(), a1);
- LOperand* value = UseFixed(instr->value(), a0);
- LStoreGlobalGeneric* result =
- new(zone()) LStoreGlobalGeneric(context, global_object, value);
- return MarkAsCall(result, instr);
-}
-
-
LInstruction* LChunkBuilder::DoLoadContextSlot(HLoadContextSlot* instr) {
LOperand* context = UseRegisterAtStart(instr->value());
LInstruction* result =
DefineAsRegister(new(zone()) LLoadContextSlot(context));
- return instr->RequiresHoleCheck() ? AssignEnvironment(result) : result;
+ if (instr->RequiresHoleCheck() && instr->DeoptimizesOnHole()) {
+ result = AssignEnvironment(result);
+ }
+ return result;
}
@@ -2129,7 +2069,10 @@ LInstruction* LChunkBuilder::DoStoreContextSlot(HStoreContextSlot* instr) {
value = UseRegister(instr->value());
}
LInstruction* result = new(zone()) LStoreContextSlot(context, value);
- return instr->RequiresHoleCheck() ? AssignEnvironment(result) : result;
+ if (instr->RequiresHoleCheck() && instr->DeoptimizesOnHole()) {
+ result = AssignEnvironment(result);
+ }
+ return result;
}
@@ -2160,20 +2103,13 @@ LInstruction* LChunkBuilder::DoLoadRoot(HLoadRoot* instr) {
}
-LInstruction* LChunkBuilder::DoLoadExternalArrayPointer(
- HLoadExternalArrayPointer* instr) {
- LOperand* input = UseRegisterAtStart(instr->value());
- return DefineAsRegister(new(zone()) LLoadExternalArrayPointer(input));
-}
-
-
LInstruction* LChunkBuilder::DoLoadKeyed(HLoadKeyed* instr) {
ASSERT(instr->key()->representation().IsSmiOrInteger32());
ElementsKind elements_kind = instr->elements_kind();
LOperand* key = UseRegisterOrConstantAtStart(instr->key());
- LLoadKeyed* result = NULL;
+ LInstruction* result = NULL;
- if (!instr->is_external()) {
+ if (!instr->is_typed_elements()) {
LOperand* obj = NULL;
if (instr->representation().IsDouble()) {
obj = UseRegister(instr->elements());
@@ -2181,25 +2117,28 @@ LInstruction* LChunkBuilder::DoLoadKeyed(HLoadKeyed* instr) {
ASSERT(instr->representation().IsSmiOrTagged());
obj = UseRegisterAtStart(instr->elements());
}
- result = new(zone()) LLoadKeyed(obj, key);
+ result = DefineAsRegister(new(zone()) LLoadKeyed(obj, key));
} else {
ASSERT(
(instr->representation().IsInteger32() &&
- (elements_kind != EXTERNAL_FLOAT_ELEMENTS) &&
- (elements_kind != EXTERNAL_DOUBLE_ELEMENTS)) ||
+ !IsDoubleOrFloatElementsKind(elements_kind)) ||
(instr->representation().IsDouble() &&
- ((elements_kind == EXTERNAL_FLOAT_ELEMENTS) ||
- (elements_kind == EXTERNAL_DOUBLE_ELEMENTS))));
- LOperand* external_pointer = UseRegister(instr->elements());
- result = new(zone()) LLoadKeyed(external_pointer, key);
+ IsDoubleOrFloatElementsKind(elements_kind)));
+ LOperand* backing_store = UseRegister(instr->elements());
+ result = DefineAsRegister(new(zone()) LLoadKeyed(backing_store, key));
}
- DefineAsRegister(result);
- // An unsigned int array load might overflow and cause a deopt, make sure it
- // has an environment.
- bool can_deoptimize = instr->RequiresHoleCheck() ||
- (elements_kind == EXTERNAL_UNSIGNED_INT_ELEMENTS);
- return can_deoptimize ? AssignEnvironment(result) : result;
+ if ((instr->is_external() || instr->is_fixed_typed_array()) ?
+ // see LCodeGen::DoLoadKeyedExternalArray
+ ((elements_kind == EXTERNAL_UINT32_ELEMENTS ||
+ elements_kind == UINT32_ELEMENTS) &&
+ !instr->CheckFlag(HInstruction::kUint32)) :
+ // see LCodeGen::DoLoadKeyedFixedDoubleArray and
+ // LCodeGen::DoLoadKeyedFixedArray
+ instr->RequiresHoleCheck()) {
+ result = AssignEnvironment(result);
+ }
+ return result;
}
@@ -2215,7 +2154,7 @@ LInstruction* LChunkBuilder::DoLoadKeyedGeneric(HLoadKeyedGeneric* instr) {
LInstruction* LChunkBuilder::DoStoreKeyed(HStoreKeyed* instr) {
- if (!instr->is_external()) {
+ if (!instr->is_typed_elements()) {
ASSERT(instr->elements()->representation().IsTagged());
bool needs_write_barrier = instr->NeedsWriteBarrier();
LOperand* object = NULL;
@@ -2244,17 +2183,17 @@ LInstruction* LChunkBuilder::DoStoreKeyed(HStoreKeyed* instr) {
ASSERT(
(instr->value()->representation().IsInteger32() &&
- (instr->elements_kind() != EXTERNAL_FLOAT_ELEMENTS) &&
- (instr->elements_kind() != EXTERNAL_DOUBLE_ELEMENTS)) ||
+ !IsDoubleOrFloatElementsKind(instr->elements_kind())) ||
(instr->value()->representation().IsDouble() &&
- ((instr->elements_kind() == EXTERNAL_FLOAT_ELEMENTS) ||
- (instr->elements_kind() == EXTERNAL_DOUBLE_ELEMENTS))));
- ASSERT(instr->elements()->representation().IsExternal());
+ IsDoubleOrFloatElementsKind(instr->elements_kind())));
+ ASSERT((instr->is_fixed_typed_array() &&
+ instr->elements()->representation().IsTagged()) ||
+ (instr->is_external() &&
+ instr->elements()->representation().IsExternal()));
LOperand* val = UseRegister(instr->value());
LOperand* key = UseRegisterOrConstantAtStart(instr->key());
- LOperand* external_pointer = UseRegister(instr->elements());
-
- return new(zone()) LStoreKeyed(external_pointer, key, val);
+ LOperand* backing_store = UseRegister(instr->elements());
+ return new(zone()) LStoreKeyed(backing_store, key, val);
}
@@ -2275,17 +2214,18 @@ LInstruction* LChunkBuilder::DoStoreKeyedGeneric(HStoreKeyedGeneric* instr) {
LInstruction* LChunkBuilder::DoTransitionElementsKind(
HTransitionElementsKind* instr) {
- LOperand* object = UseRegister(instr->object());
if (IsSimpleMapChangeTransition(instr->from_kind(), instr->to_kind())) {
+ LOperand* object = UseRegister(instr->object());
LOperand* new_map_reg = TempRegister();
LTransitionElementsKind* result =
new(zone()) LTransitionElementsKind(object, NULL, new_map_reg);
return result;
} else {
+ LOperand* object = UseFixed(instr->object(), a0);
LOperand* context = UseFixed(instr->context(), cp);
LTransitionElementsKind* result =
new(zone()) LTransitionElementsKind(object, context, NULL);
- return AssignPointerMap(result);
+ return MarkAsCall(result, instr);
}
}
@@ -2318,11 +2258,9 @@ LInstruction* LChunkBuilder::DoStoreNamedField(HStoreNamedField* instr) {
}
LOperand* val;
- if (needs_write_barrier ||
- (FLAG_track_fields && instr->field_representation().IsSmi())) {
+ if (needs_write_barrier || instr->field_representation().IsSmi()) {
val = UseTempRegister(instr->value());
- } else if (FLAG_track_double_fields &&
- instr->field_representation().IsDouble()) {
+ } else if (instr->field_representation().IsDouble()) {
val = UseRegisterAtStart(instr->value());
} else {
val = UseRegister(instr->value());
@@ -2331,14 +2269,7 @@ LInstruction* LChunkBuilder::DoStoreNamedField(HStoreNamedField* instr) {
// We need a temporary register for write barrier of the map field.
LOperand* temp = needs_write_barrier_for_map ? TempRegister() : NULL;
- LStoreNamedField* result = new(zone()) LStoreNamedField(obj, val, temp);
- if (FLAG_track_heap_object_fields &&
- instr->field_representation().IsHeapObject()) {
- if (!instr->value()->type().IsHeapObject()) {
- return AssignEnvironment(result);
- }
- }
- return result;
+ return new(zone()) LStoreNamedField(obj, val, temp);
}
@@ -2354,12 +2285,8 @@ LInstruction* LChunkBuilder::DoStoreNamedGeneric(HStoreNamedGeneric* instr) {
LInstruction* LChunkBuilder::DoStringAdd(HStringAdd* instr) {
LOperand* context = UseFixed(instr->context(), cp);
- LOperand* left = FLAG_new_string_add
- ? UseFixed(instr->left(), a1)
- : UseRegisterAtStart(instr->left());
- LOperand* right = FLAG_new_string_add
- ? UseFixed(instr->right(), a0)
- : UseRegisterAtStart(instr->right());
+ LOperand* left = UseFixed(instr->left(), a1);
+ LOperand* right = UseFixed(instr->right(), a0);
return MarkAsCall(
DefineFixed(new(zone()) LStringAdd(context, left, right), v0),
instr);
@@ -2372,7 +2299,7 @@ LInstruction* LChunkBuilder::DoStringCharCodeAt(HStringCharCodeAt* instr) {
LOperand* context = UseAny(instr->context());
LStringCharCodeAt* result =
new(zone()) LStringCharCodeAt(context, string, index);
- return AssignEnvironment(AssignPointerMap(DefineAsRegister(result)));
+ return AssignPointerMap(DefineAsRegister(result));
}
@@ -2428,7 +2355,7 @@ LInstruction* LChunkBuilder::DoParameter(HParameter* instr) {
} else {
ASSERT(info()->IsStub());
CodeStubInterfaceDescriptor* descriptor =
- info()->code_stub()->GetInterfaceDescriptor(info()->isolate());
+ info()->code_stub()->GetInterfaceDescriptor();
int index = static_cast<int>(instr->index());
Register reg = descriptor->GetParameterRegister(index);
return DefineFixed(result, reg);
@@ -2501,9 +2428,6 @@ LInstruction* LChunkBuilder::DoTypeof(HTypeof* instr) {
LInstruction* LChunkBuilder::DoTypeofIsAndBranch(HTypeofIsAndBranch* instr) {
- LInstruction* goto_instr = CheckElideControlInstruction(instr);
- if (goto_instr != NULL) return goto_instr;
-
return new(zone()) LTypeofIsAndBranch(UseTempRegister(instr->value()));
}
@@ -2535,13 +2459,13 @@ LInstruction* LChunkBuilder::DoStackCheck(HStackCheck* instr) {
LInstruction* LChunkBuilder::DoEnterInlined(HEnterInlined* instr) {
HEnvironment* outer = current_block_->last_environment();
+ outer->set_ast_id(instr->ReturnId());
HConstant* undefined = graph()->GetConstantUndefined();
HEnvironment* inner = outer->CopyForInlining(instr->closure(),
instr->arguments_count(),
instr->function(),
undefined,
- instr->inlining_kind(),
- instr->undefined_receiver());
+ instr->inlining_kind());
// Only replay binding of arguments object if it wasn't removed from graph.
if (instr->arguments_var() != NULL && instr->arguments_object()->IsLinked()) {
inner->Bind(instr->arguments_var(), instr->arguments_object());
@@ -2595,9 +2519,27 @@ LInstruction* LChunkBuilder::DoCheckMapValue(HCheckMapValue* instr) {
LInstruction* LChunkBuilder::DoLoadFieldByIndex(HLoadFieldByIndex* instr) {
LOperand* object = UseRegister(instr->object());
- LOperand* index = UseRegister(instr->index());
- return DefineAsRegister(new(zone()) LLoadFieldByIndex(object, index));
+ LOperand* index = UseTempRegister(instr->index());
+ LLoadFieldByIndex* load = new(zone()) LLoadFieldByIndex(object, index);
+ LInstruction* result = DefineSameAsFirst(load);
+ return AssignPointerMap(result);
+}
+
+
+
+LInstruction* LChunkBuilder::DoStoreFrameContext(HStoreFrameContext* instr) {
+ LOperand* context = UseRegisterAtStart(instr->context());
+ return new(zone()) LStoreFrameContext(context);
}
+LInstruction* LChunkBuilder::DoAllocateBlockContext(
+ HAllocateBlockContext* instr) {
+ LOperand* context = UseFixed(instr->context(), cp);
+ LOperand* function = UseRegisterAtStart(instr->function());
+ LAllocateBlockContext* result =
+ new(zone()) LAllocateBlockContext(context, function);
+ return MarkAsCall(DefineFixed(result, cp), instr);
+}
+
} } // namespace v8::internal
diff --git a/chromium/v8/src/mips/lithium-mips.h b/chromium/v8/src/mips/lithium-mips.h
index 8d34399057b..ea3a658f3c3 100644
--- a/chromium/v8/src/mips/lithium-mips.h
+++ b/chromium/v8/src/mips/lithium-mips.h
@@ -1,38 +1,15 @@
// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
#ifndef V8_MIPS_LITHIUM_MIPS_H_
#define V8_MIPS_LITHIUM_MIPS_H_
-#include "hydrogen.h"
-#include "lithium-allocator.h"
-#include "lithium.h"
-#include "safepoint-table.h"
-#include "utils.h"
+#include "src/hydrogen.h"
+#include "src/lithium-allocator.h"
+#include "src/lithium.h"
+#include "src/safepoint-table.h"
+#include "src/utils.h"
namespace v8 {
namespace internal {
@@ -44,6 +21,7 @@ class LCodeGen;
V(AccessArgumentsAt) \
V(AddI) \
V(Allocate) \
+ V(AllocateBlockContext) \
V(ApplyArguments) \
V(ArgumentsElements) \
V(ArgumentsLength) \
@@ -52,12 +30,9 @@ class LCodeGen;
V(BitI) \
V(BoundsCheck) \
V(Branch) \
- V(CallConstantFunction) \
+ V(CallJSFunction) \
+ V(CallWithDescriptor) \
V(CallFunction) \
- V(CallGlobal) \
- V(CallKeyed) \
- V(CallKnownGlobal) \
- V(CallNamed) \
V(CallNew) \
V(CallNewArray) \
V(CallRuntime) \
@@ -83,24 +58,28 @@ class LCodeGen;
V(ConstantI) \
V(ConstantS) \
V(ConstantT) \
+ V(ConstructDouble) \
V(Context) \
V(DateField) \
V(DebugBreak) \
V(DeclareGlobals) \
V(Deoptimize) \
+ V(DivByConstI) \
+ V(DivByPowerOf2I) \
V(DivI) \
V(DoubleToI) \
+ V(DoubleBits) \
V(DoubleToSmi) \
V(Drop) \
V(Dummy) \
V(DummyUse) \
- V(ElementsKind) \
+ V(FlooringDivByConstI) \
+ V(FlooringDivByPowerOf2I) \
+ V(FlooringDivI) \
V(ForInCacheArray) \
V(ForInPrepareMap) \
V(FunctionLiteral) \
V(GetCachedArrayIndex) \
- V(GlobalObject) \
- V(GlobalReceiver) \
V(Goto) \
V(HasCachedArrayIndexAndBranch) \
V(HasInstanceTypeAndBranch) \
@@ -109,7 +88,6 @@ class LCodeGen;
V(InstanceOfKnownGlobal) \
V(InstructionGap) \
V(Integer32ToDouble) \
- V(Integer32ToSmi) \
V(InvokeFunction) \
V(IsConstructCallAndBranch) \
V(IsObjectAndBranch) \
@@ -119,7 +97,6 @@ class LCodeGen;
V(Label) \
V(LazyBailout) \
V(LoadContextSlot) \
- V(LoadExternalArrayPointer) \
V(LoadRoot) \
V(LoadFieldByIndex) \
V(LoadFunctionPrototype) \
@@ -131,17 +108,16 @@ class LCodeGen;
V(LoadNamedGeneric) \
V(MapEnumLength) \
V(MathAbs) \
- V(MathCos) \
V(MathExp) \
+ V(MathClz32) \
V(MathFloor) \
- V(MathFloorOfDiv) \
V(MathLog) \
V(MathMinMax) \
V(MathPowHalf) \
V(MathRound) \
- V(MathSin) \
V(MathSqrt) \
- V(MathTan) \
+ V(ModByConstI) \
+ V(ModByPowerOf2I) \
V(ModI) \
V(MulI) \
V(MultiplyAddD) \
@@ -150,7 +126,6 @@ class LCodeGen;
V(NumberTagU) \
V(NumberUntagD) \
V(OsrEntry) \
- V(OuterContext) \
V(Parameter) \
V(Power) \
V(PushArgument) \
@@ -164,8 +139,8 @@ class LCodeGen;
V(StackCheck) \
V(StoreCodeEntry) \
V(StoreContextSlot) \
+ V(StoreFrameContext) \
V(StoreGlobalCell) \
- V(StoreGlobalGeneric) \
V(StoreKeyed) \
V(StoreKeyedGeneric) \
V(StoreNamedField) \
@@ -177,16 +152,13 @@ class LCodeGen;
V(SubI) \
V(TaggedToI) \
V(ThisFunction) \
- V(Throw) \
V(ToFastProperties) \
V(TransitionElementsKind) \
V(TrapAllocationMemento) \
V(Typeof) \
V(TypeofIsAndBranch) \
V(Uint32ToDouble) \
- V(Uint32ToSmi) \
V(UnknownOSRValue) \
- V(ValueOf) \
V(WrapReceiver)
#define DECLARE_CONCRETE_INSTRUCTION(type, mnemonic) \
@@ -266,7 +238,9 @@ class LInstruction : public ZoneObject {
// Interface to the register allocator and iterators.
bool ClobbersTemps() const { return IsCall(); }
bool ClobbersRegisters() const { return IsCall(); }
- virtual bool ClobbersDoubleRegisters() const { return IsCall(); }
+ virtual bool ClobbersDoubleRegisters(Isolate* isolate) const {
+ return IsCall();
+ }
// Interface to the register allocator and iterators.
bool IsMarkedAsCall() const { return IsCall(); }
@@ -303,10 +277,8 @@ class LInstruction : public ZoneObject {
// R = number of result operands (0 or 1).
-// I = number of input operands.
-// T = number of temporary operands.
-template<int R, int I, int T>
-class LTemplateInstruction : public LInstruction {
+template<int R>
+class LTemplateResultInstruction : public LInstruction {
public:
// Allow 0 or 1 output operands.
STATIC_ASSERT(R == 0 || R == 1);
@@ -318,10 +290,20 @@ class LTemplateInstruction : public LInstruction {
protected:
EmbeddedContainer<LOperand*, R> results_;
+};
+
+
+// R = number of result operands (0 or 1).
+// I = number of input operands.
+// T = number of temporary operands.
+template<int R, int I, int T>
+class LTemplateInstruction : public LTemplateResultInstruction<R> {
+ protected:
EmbeddedContainer<LOperand*, I> inputs_;
EmbeddedContainer<LOperand*, T> temps_;
private:
+ // Iterator support.
virtual int InputCount() V8_FINAL V8_OVERRIDE { return I; }
virtual LOperand* InputAt(int i) V8_FINAL V8_OVERRIDE { return inputs_[i]; }
@@ -440,6 +422,7 @@ class LDummyUse V8_FINAL : public LTemplateInstruction<1, 1, 0> {
class LDeoptimize V8_FINAL : public LTemplateInstruction<0, 0, 0> {
public:
+ virtual bool IsControl() const V8_OVERRIDE { return true; }
DECLARE_CONCRETE_INSTRUCTION(Deoptimize, "deoptimize")
DECLARE_HYDROGEN_ACCESSOR(Deoptimize)
};
@@ -490,10 +473,6 @@ class LCallStub V8_FINAL : public LTemplateInstruction<1, 1, 0> {
DECLARE_CONCRETE_INSTRUCTION(CallStub, "call-stub")
DECLARE_HYDROGEN_ACCESSOR(CallStub)
-
- TranscendentalCache::Type transcendental_type() {
- return hydrogen()->transcendental_type();
- }
};
@@ -558,6 +537,7 @@ class LWrapReceiver V8_FINAL : public LTemplateInstruction<1, 2, 0> {
}
DECLARE_CONCRETE_INSTRUCTION(WrapReceiver, "wrap-receiver")
+ DECLARE_HYDROGEN_ACCESSOR(WrapReceiver)
LOperand* receiver() { return inputs_[0]; }
LOperand* function() { return inputs_[1]; }
@@ -622,72 +602,159 @@ class LArgumentsElements V8_FINAL : public LTemplateInstruction<1, 0, 0> {
};
+class LModByPowerOf2I V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+ LModByPowerOf2I(LOperand* dividend, int32_t divisor) {
+ inputs_[0] = dividend;
+ divisor_ = divisor;
+ }
+
+ LOperand* dividend() { return inputs_[0]; }
+ int32_t divisor() const { return divisor_; }
+
+ DECLARE_CONCRETE_INSTRUCTION(ModByPowerOf2I, "mod-by-power-of-2-i")
+ DECLARE_HYDROGEN_ACCESSOR(Mod)
+
+ private:
+ int32_t divisor_;
+};
+
+
+class LModByConstI V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+ LModByConstI(LOperand* dividend, int32_t divisor) {
+ inputs_[0] = dividend;
+ divisor_ = divisor;
+ }
+
+ LOperand* dividend() { return inputs_[0]; }
+ int32_t divisor() const { return divisor_; }
+
+ DECLARE_CONCRETE_INSTRUCTION(ModByConstI, "mod-by-const-i")
+ DECLARE_HYDROGEN_ACCESSOR(Mod)
+
+ private:
+ int32_t divisor_;
+};
+
+
class LModI V8_FINAL : public LTemplateInstruction<1, 2, 3> {
public:
- // Used when the right hand is a constant power of 2.
LModI(LOperand* left,
LOperand* right) {
inputs_[0] = left;
inputs_[1] = right;
- temps_[0] = NULL;
- temps_[1] = NULL;
- temps_[2] = NULL;
- }
-
- // Used for the standard case.
- LModI(LOperand* left,
- LOperand* right,
- LOperand* temp,
- LOperand* temp2,
- LOperand* temp3) {
- inputs_[0] = left;
- inputs_[1] = right;
- temps_[0] = temp;
- temps_[1] = temp2;
- temps_[2] = temp3;
}
LOperand* left() { return inputs_[0]; }
LOperand* right() { return inputs_[1]; }
- LOperand* temp() { return temps_[0]; }
- LOperand* temp2() { return temps_[1]; }
- LOperand* temp3() { return temps_[2]; }
DECLARE_CONCRETE_INSTRUCTION(ModI, "mod-i")
DECLARE_HYDROGEN_ACCESSOR(Mod)
};
+class LDivByPowerOf2I V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+ LDivByPowerOf2I(LOperand* dividend, int32_t divisor) {
+ inputs_[0] = dividend;
+ divisor_ = divisor;
+ }
+
+ LOperand* dividend() { return inputs_[0]; }
+ int32_t divisor() const { return divisor_; }
+
+ DECLARE_CONCRETE_INSTRUCTION(DivByPowerOf2I, "div-by-power-of-2-i")
+ DECLARE_HYDROGEN_ACCESSOR(Div)
+
+ private:
+ int32_t divisor_;
+};
+
+
+class LDivByConstI V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+ LDivByConstI(LOperand* dividend, int32_t divisor) {
+ inputs_[0] = dividend;
+ divisor_ = divisor;
+ }
+
+ LOperand* dividend() { return inputs_[0]; }
+ int32_t divisor() const { return divisor_; }
+
+ DECLARE_CONCRETE_INSTRUCTION(DivByConstI, "div-by-const-i")
+ DECLARE_HYDROGEN_ACCESSOR(Div)
+
+ private:
+ int32_t divisor_;
+};
+
+
class LDivI V8_FINAL : public LTemplateInstruction<1, 2, 0> {
public:
- LDivI(LOperand* left, LOperand* right) {
- inputs_[0] = left;
- inputs_[1] = right;
+ LDivI(LOperand* dividend, LOperand* divisor) {
+ inputs_[0] = dividend;
+ inputs_[1] = divisor;
}
- LOperand* left() { return inputs_[0]; }
- LOperand* right() { return inputs_[1]; }
+ LOperand* dividend() { return inputs_[0]; }
+ LOperand* divisor() { return inputs_[1]; }
DECLARE_CONCRETE_INSTRUCTION(DivI, "div-i")
- DECLARE_HYDROGEN_ACCESSOR(Div)
+ DECLARE_HYDROGEN_ACCESSOR(BinaryOperation)
};
-class LMathFloorOfDiv V8_FINAL : public LTemplateInstruction<1, 2, 1> {
+class LFlooringDivByPowerOf2I V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
- LMathFloorOfDiv(LOperand* left,
- LOperand* right,
- LOperand* temp = NULL) {
- inputs_[0] = left;
- inputs_[1] = right;
+ LFlooringDivByPowerOf2I(LOperand* dividend, int32_t divisor) {
+ inputs_[0] = dividend;
+ divisor_ = divisor;
+ }
+
+ LOperand* dividend() { return inputs_[0]; }
+ int32_t divisor() { return divisor_; }
+
+ DECLARE_CONCRETE_INSTRUCTION(FlooringDivByPowerOf2I,
+ "flooring-div-by-power-of-2-i")
+ DECLARE_HYDROGEN_ACCESSOR(MathFloorOfDiv)
+
+ private:
+ int32_t divisor_;
+};
+
+
+class LFlooringDivByConstI V8_FINAL : public LTemplateInstruction<1, 1, 2> {
+ public:
+ LFlooringDivByConstI(LOperand* dividend, int32_t divisor, LOperand* temp) {
+ inputs_[0] = dividend;
+ divisor_ = divisor;
temps_[0] = temp;
}
- LOperand* left() { return inputs_[0]; }
- LOperand* right() { return inputs_[1]; }
+ LOperand* dividend() { return inputs_[0]; }
+ int32_t divisor() const { return divisor_; }
LOperand* temp() { return temps_[0]; }
- DECLARE_CONCRETE_INSTRUCTION(MathFloorOfDiv, "math-floor-of-div")
+ DECLARE_CONCRETE_INSTRUCTION(FlooringDivByConstI, "flooring-div-by-const-i")
+ DECLARE_HYDROGEN_ACCESSOR(MathFloorOfDiv)
+
+ private:
+ int32_t divisor_;
+};
+
+
+class LFlooringDivI V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+ public:
+ LFlooringDivI(LOperand* dividend, LOperand* divisor) {
+ inputs_[0] = dividend;
+ inputs_[1] = divisor;
+ }
+
+ LOperand* dividend() { return inputs_[0]; }
+ LOperand* divisor() { return inputs_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(FlooringDivI, "flooring-div-i")
DECLARE_HYDROGEN_ACCESSOR(MathFloorOfDiv)
};
@@ -811,39 +878,15 @@ class LMathLog V8_FINAL : public LTemplateInstruction<1, 1, 0> {
};
-class LMathSin V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LMathClz32 V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
- explicit LMathSin(LOperand* value) {
+ explicit LMathClz32(LOperand* value) {
inputs_[0] = value;
}
LOperand* value() { return inputs_[0]; }
- DECLARE_CONCRETE_INSTRUCTION(MathSin, "math-sin")
-};
-
-
-class LMathCos V8_FINAL : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LMathCos(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(MathCos, "math-cos")
-};
-
-
-class LMathTan V8_FINAL : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LMathTan(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(MathTan, "math-tan")
+ DECLARE_CONCRETE_INSTRUCTION(MathClz32, "math-clz32")
};
@@ -1318,34 +1361,6 @@ class LMapEnumLength V8_FINAL : public LTemplateInstruction<1, 1, 0> {
};
-class LElementsKind V8_FINAL : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LElementsKind(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(ElementsKind, "elements-kind")
- DECLARE_HYDROGEN_ACCESSOR(ElementsKind)
-};
-
-
-class LValueOf V8_FINAL : public LTemplateInstruction<1, 1, 1> {
- public:
- LValueOf(LOperand* value, LOperand* temp) {
- inputs_[0] = value;
- temps_[0] = temp;
- }
-
- LOperand* value() { return inputs_[0]; }
- LOperand* temp() { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(ValueOf, "value-of")
- DECLARE_HYDROGEN_ACCESSOR(ValueOf)
-};
-
-
class LDateField V8_FINAL : public LTemplateInstruction<1, 1, 1> {
public:
LDateField(LOperand* date, LOperand* temp, Smi* index) : index_(index) {
@@ -1401,20 +1416,6 @@ class LSeqStringSetChar V8_FINAL : public LTemplateInstruction<1, 4, 0> {
};
-class LThrow V8_FINAL : public LTemplateInstruction<0, 2, 0> {
- public:
- LThrow(LOperand* context, LOperand* value) {
- inputs_[0] = context;
- inputs_[1] = value;
- }
-
- LOperand* context() { return inputs_[0]; }
- LOperand* value() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(Throw, "throw")
-};
-
-
class LAddI V8_FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LAddI(LOperand* left, LOperand* right) {
@@ -1584,20 +1585,6 @@ class LLoadRoot V8_FINAL : public LTemplateInstruction<1, 0, 0> {
};
-class LLoadExternalArrayPointer V8_FINAL
- : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LLoadExternalArrayPointer(LOperand* object) {
- inputs_[0] = object;
- }
-
- LOperand* object() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(LoadExternalArrayPointer,
- "load-external-array-pointer")
-};
-
-
class LLoadKeyed V8_FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LLoadKeyed(LOperand* elements, LOperand* key) {
@@ -1613,12 +1600,18 @@ class LLoadKeyed V8_FINAL : public LTemplateInstruction<1, 2, 0> {
bool is_external() const {
return hydrogen()->is_external();
}
+ bool is_fixed_typed_array() const {
+ return hydrogen()->is_fixed_typed_array();
+ }
+ bool is_typed_elements() const {
+ return is_external() || is_fixed_typed_array();
+ }
DECLARE_CONCRETE_INSTRUCTION(LoadKeyed, "load-keyed")
DECLARE_HYDROGEN_ACCESSOR(LoadKeyed)
virtual void PrintDataTo(StringStream* stream);
- uint32_t additional_index() const { return hydrogen()->index_offset(); }
+ uint32_t base_offset() const { return hydrogen()->base_offset(); }
};
@@ -1678,28 +1671,6 @@ class LStoreGlobalCell V8_FINAL : public LTemplateInstruction<0, 1, 1> {
};
-class LStoreGlobalGeneric V8_FINAL : public LTemplateInstruction<0, 3, 0> {
- public:
- LStoreGlobalGeneric(LOperand* context,
- LOperand* global_object,
- LOperand* value) {
- inputs_[0] = context;
- inputs_[1] = global_object;
- inputs_[2] = value;
- }
-
- LOperand* context() { return inputs_[0]; }
- LOperand* global_object() { return inputs_[1]; }
- LOperand* value() { return inputs_[2]; }
-
- DECLARE_CONCRETE_INSTRUCTION(StoreGlobalGeneric, "store-global-generic")
- DECLARE_HYDROGEN_ACCESSOR(StoreGlobalGeneric)
-
- Handle<Object> name() const { return hydrogen()->name(); }
- StrictModeFlag strict_mode_flag() { return hydrogen()->strict_mode_flag(); }
-};
-
-
class LLoadContextSlot V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LLoadContextSlot(LOperand* context) {
@@ -1761,15 +1732,15 @@ class LDrop V8_FINAL : public LTemplateInstruction<0, 0, 0> {
};
-class LStoreCodeEntry V8_FINAL: public LTemplateInstruction<0, 1, 1> {
+class LStoreCodeEntry V8_FINAL: public LTemplateInstruction<0, 2, 0> {
public:
LStoreCodeEntry(LOperand* function, LOperand* code_object) {
inputs_[0] = function;
- temps_[0] = code_object;
+ inputs_[1] = code_object;
}
LOperand* function() { return inputs_[0]; }
- LOperand* code_object() { return temps_[0]; }
+ LOperand* code_object() { return inputs_[1]; }
virtual void PrintDataTo(StringStream* stream);
@@ -1808,18 +1779,6 @@ class LContext V8_FINAL : public LTemplateInstruction<1, 0, 0> {
};
-class LOuterContext V8_FINAL : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LOuterContext(LOperand* context) {
- inputs_[0] = context;
- }
-
- LOperand* context() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(OuterContext, "outer-context")
-};
-
-
class LDeclareGlobals V8_FINAL : public LTemplateInstruction<0, 1, 0> {
public:
explicit LDeclareGlobals(LOperand* context) {
@@ -1833,95 +1792,73 @@ class LDeclareGlobals V8_FINAL : public LTemplateInstruction<0, 1, 0> {
};
-class LGlobalObject V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LCallJSFunction V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
- explicit LGlobalObject(LOperand* context) {
- inputs_[0] = context;
- }
-
- LOperand* context() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(GlobalObject, "global-object")
-};
-
-
-class LGlobalReceiver V8_FINAL : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LGlobalReceiver(LOperand* global_object) {
- inputs_[0] = global_object;
+ explicit LCallJSFunction(LOperand* function) {
+ inputs_[0] = function;
}
- LOperand* global_object() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(GlobalReceiver, "global-receiver")
-};
-
+ LOperand* function() { return inputs_[0]; }
-class LCallConstantFunction V8_FINAL : public LTemplateInstruction<1, 0, 0> {
- public:
- DECLARE_CONCRETE_INSTRUCTION(CallConstantFunction, "call-constant-function")
- DECLARE_HYDROGEN_ACCESSOR(CallConstantFunction)
+ DECLARE_CONCRETE_INSTRUCTION(CallJSFunction, "call-js-function")
+ DECLARE_HYDROGEN_ACCESSOR(CallJSFunction)
virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
- Handle<JSFunction> function() { return hydrogen()->function(); }
int arity() const { return hydrogen()->argument_count() - 1; }
};
-class LInvokeFunction V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+class LCallWithDescriptor V8_FINAL : public LTemplateResultInstruction<1> {
public:
- LInvokeFunction(LOperand* context, LOperand* function) {
- inputs_[0] = context;
- inputs_[1] = function;
+ LCallWithDescriptor(const CallInterfaceDescriptor* descriptor,
+ const ZoneList<LOperand*>& operands,
+ Zone* zone)
+ : descriptor_(descriptor),
+ inputs_(descriptor->environment_length() + 1, zone) {
+ ASSERT(descriptor->environment_length() + 1 == operands.length());
+ inputs_.AddAll(operands, zone);
}
- LOperand* context() { return inputs_[0]; }
- LOperand* function() { return inputs_[1]; }
+ LOperand* target() const { return inputs_[0]; }
- DECLARE_CONCRETE_INSTRUCTION(InvokeFunction, "invoke-function")
- DECLARE_HYDROGEN_ACCESSOR(InvokeFunction)
+ const CallInterfaceDescriptor* descriptor() { return descriptor_; }
+
+ private:
+ DECLARE_CONCRETE_INSTRUCTION(CallWithDescriptor, "call-with-descriptor")
+ DECLARE_HYDROGEN_ACCESSOR(CallWithDescriptor)
virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
int arity() const { return hydrogen()->argument_count() - 1; }
-};
-
-
-class LCallKeyed V8_FINAL : public LTemplateInstruction<1, 2, 0> {
- public:
- LCallKeyed(LOperand* context, LOperand* key) {
- inputs_[0] = context;
- inputs_[1] = key;
- }
-
- LOperand* context() { return inputs_[0]; }
- LOperand* key() { return inputs_[1]; }
- DECLARE_CONCRETE_INSTRUCTION(CallKeyed, "call-keyed")
- DECLARE_HYDROGEN_ACCESSOR(CallKeyed)
+ const CallInterfaceDescriptor* descriptor_;
+ ZoneList<LOperand*> inputs_;
- virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+ // Iterator support.
+ virtual int InputCount() V8_FINAL V8_OVERRIDE { return inputs_.length(); }
+ virtual LOperand* InputAt(int i) V8_FINAL V8_OVERRIDE { return inputs_[i]; }
- int arity() const { return hydrogen()->argument_count() - 1; }
+ virtual int TempCount() V8_FINAL V8_OVERRIDE { return 0; }
+ virtual LOperand* TempAt(int i) V8_FINAL V8_OVERRIDE { return NULL; }
};
-
-class LCallNamed V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LInvokeFunction V8_FINAL : public LTemplateInstruction<1, 2, 0> {
public:
- explicit LCallNamed(LOperand* context) {
+ LInvokeFunction(LOperand* context, LOperand* function) {
inputs_[0] = context;
+ inputs_[1] = function;
}
LOperand* context() { return inputs_[0]; }
+ LOperand* function() { return inputs_[1]; }
- DECLARE_CONCRETE_INSTRUCTION(CallNamed, "call-named")
- DECLARE_HYDROGEN_ACCESSOR(CallNamed)
+ DECLARE_CONCRETE_INSTRUCTION(InvokeFunction, "invoke-function")
+ DECLARE_HYDROGEN_ACCESSOR(InvokeFunction)
virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
- Handle<String> name() const { return hydrogen()->name(); }
int arity() const { return hydrogen()->argument_count() - 1; }
};
@@ -1943,35 +1880,6 @@ class LCallFunction V8_FINAL : public LTemplateInstruction<1, 2, 0> {
};
-class LCallGlobal V8_FINAL : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LCallGlobal(LOperand* context) {
- inputs_[0] = context;
- }
-
- LOperand* context() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(CallGlobal, "call-global")
- DECLARE_HYDROGEN_ACCESSOR(CallGlobal)
-
- virtual void PrintDataTo(StringStream* stream);
-
- Handle<String> name() const {return hydrogen()->name(); }
- int arity() const { return hydrogen()->argument_count() - 1; }
-};
-
-
-class LCallKnownGlobal V8_FINAL : public LTemplateInstruction<1, 0, 0> {
- public:
- DECLARE_CONCRETE_INSTRUCTION(CallKnownGlobal, "call-known-global")
- DECLARE_HYDROGEN_ACCESSOR(CallKnownGlobal)
-
- virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
-
- int arity() const { return hydrogen()->argument_count() - 1; }
-};
-
-
class LCallNew V8_FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LCallNew(LOperand* context, LOperand* constructor) {
@@ -2021,7 +1929,7 @@ class LCallRuntime V8_FINAL : public LTemplateInstruction<1, 1, 0> {
DECLARE_CONCRETE_INSTRUCTION(CallRuntime, "call-runtime")
DECLARE_HYDROGEN_ACCESSOR(CallRuntime)
- virtual bool ClobbersDoubleRegisters() const V8_OVERRIDE {
+ virtual bool ClobbersDoubleRegisters(Isolate* isolate) const V8_OVERRIDE {
return save_doubles() == kDontSaveFPRegs;
}
@@ -2043,19 +1951,6 @@ class LInteger32ToDouble V8_FINAL : public LTemplateInstruction<1, 1, 0> {
};
-class LInteger32ToSmi V8_FINAL : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LInteger32ToSmi(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(Integer32ToSmi, "int32-to-smi")
- DECLARE_HYDROGEN_ACCESSOR(Change)
-};
-
-
class LUint32ToDouble V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LUint32ToDouble(LOperand* value) {
@@ -2068,38 +1963,33 @@ class LUint32ToDouble V8_FINAL : public LTemplateInstruction<1, 1, 0> {
};
-class LUint32ToSmi V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LNumberTagI V8_FINAL : public LTemplateInstruction<1, 1, 2> {
public:
- explicit LUint32ToSmi(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(Uint32ToSmi, "uint32-to-smi")
- DECLARE_HYDROGEN_ACCESSOR(Change)
-};
-
-
-class LNumberTagI V8_FINAL : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LNumberTagI(LOperand* value) {
+ LNumberTagI(LOperand* value, LOperand* temp1, LOperand* temp2) {
inputs_[0] = value;
+ temps_[0] = temp1;
+ temps_[1] = temp2;
}
LOperand* value() { return inputs_[0]; }
+ LOperand* temp1() { return temps_[0]; }
+ LOperand* temp2() { return temps_[1]; }
DECLARE_CONCRETE_INSTRUCTION(NumberTagI, "number-tag-i")
};
-class LNumberTagU V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LNumberTagU V8_FINAL : public LTemplateInstruction<1, 1, 2> {
public:
- explicit LNumberTagU(LOperand* value) {
+ LNumberTagU(LOperand* value, LOperand* temp1, LOperand* temp2) {
inputs_[0] = value;
+ temps_[0] = temp1;
+ temps_[1] = temp2;
}
LOperand* value() { return inputs_[0]; }
+ LOperand* temp1() { return temps_[0]; }
+ LOperand* temp2() { return temps_[1]; }
DECLARE_CONCRETE_INSTRUCTION(NumberTagU, "number-tag-u")
};
@@ -2184,6 +2074,7 @@ class LSmiTag V8_FINAL : public LTemplateInstruction<1, 1, 0> {
LOperand* value() { return inputs_[0]; }
DECLARE_CONCRETE_INSTRUCTION(SmiTag, "smi-tag")
+ DECLARE_HYDROGEN_ACCESSOR(Change)
};
@@ -2234,7 +2125,6 @@ class LStoreNamedField V8_FINAL : public LTemplateInstruction<0, 2, 1> {
virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
- Handle<Map> transition() const { return hydrogen()->transition_map(); }
Representation representation() const {
return hydrogen()->field_representation();
}
@@ -2259,7 +2149,7 @@ class LStoreNamedGeneric V8_FINAL : public LTemplateInstruction<0, 3, 0> {
virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
Handle<Object> name() const { return hydrogen()->name(); }
- StrictModeFlag strict_mode_flag() { return hydrogen()->strict_mode_flag(); }
+ StrictMode strict_mode() { return hydrogen()->strict_mode(); }
};
@@ -2272,6 +2162,12 @@ class LStoreKeyed V8_FINAL : public LTemplateInstruction<0, 3, 0> {
}
bool is_external() const { return hydrogen()->is_external(); }
+ bool is_fixed_typed_array() const {
+ return hydrogen()->is_fixed_typed_array();
+ }
+ bool is_typed_elements() const {
+ return is_external() || is_fixed_typed_array();
+ }
LOperand* elements() { return inputs_[0]; }
LOperand* key() { return inputs_[1]; }
LOperand* value() { return inputs_[2]; }
@@ -2284,7 +2180,7 @@ class LStoreKeyed V8_FINAL : public LTemplateInstruction<0, 3, 0> {
virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
bool NeedsCanonicalization() { return hydrogen()->NeedsCanonicalization(); }
- uint32_t additional_index() const { return hydrogen()->index_offset(); }
+ uint32_t base_offset() const { return hydrogen()->base_offset(); }
};
@@ -2310,7 +2206,7 @@ class LStoreKeyedGeneric V8_FINAL : public LTemplateInstruction<0, 4, 0> {
virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
- StrictModeFlag strict_mode_flag() { return hydrogen()->strict_mode_flag(); }
+ StrictMode strict_mode() { return hydrogen()->strict_mode(); }
};
@@ -2437,7 +2333,7 @@ class LCheckInstanceType V8_FINAL : public LTemplateInstruction<0, 1, 0> {
class LCheckMaps V8_FINAL : public LTemplateInstruction<0, 1, 0> {
public:
- explicit LCheckMaps(LOperand* value) {
+ explicit LCheckMaps(LOperand* value = NULL) {
inputs_[0] = value;
}
@@ -2513,6 +2409,33 @@ class LClampTToUint8 V8_FINAL : public LTemplateInstruction<1, 1, 1> {
};
+class LDoubleBits V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LDoubleBits(LOperand* value) {
+ inputs_[0] = value;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(DoubleBits, "double-bits")
+ DECLARE_HYDROGEN_ACCESSOR(DoubleBits)
+};
+
+
+class LConstructDouble V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+ public:
+ LConstructDouble(LOperand* hi, LOperand* lo) {
+ inputs_[0] = hi;
+ inputs_[1] = lo;
+ }
+
+ LOperand* hi() { return inputs_[0]; }
+ LOperand* lo() { return inputs_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(ConstructDouble, "construct-double")
+};
+
+
class LAllocate V8_FINAL : public LTemplateInstruction<1, 2, 2> {
public:
LAllocate(LOperand* context,
@@ -2705,6 +2628,35 @@ class LLoadFieldByIndex V8_FINAL : public LTemplateInstruction<1, 2, 0> {
};
+class LStoreFrameContext: public LTemplateInstruction<0, 1, 0> {
+ public:
+ explicit LStoreFrameContext(LOperand* context) {
+ inputs_[0] = context;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(StoreFrameContext, "store-frame-context")
+};
+
+
+class LAllocateBlockContext: public LTemplateInstruction<1, 2, 0> {
+ public:
+ LAllocateBlockContext(LOperand* context, LOperand* function) {
+ inputs_[0] = context;
+ inputs_[1] = function;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+ LOperand* function() { return inputs_[1]; }
+
+ Handle<ScopeInfo> scope_info() { return hydrogen()->scope_info(); }
+
+ DECLARE_CONCRETE_INSTRUCTION(AllocateBlockContext, "allocate-block-context")
+ DECLARE_HYDROGEN_ACCESSOR(AllocateBlockContext)
+};
+
+
class LChunkBuilder;
class LPlatformChunk V8_FINAL : public LChunk {
public:
@@ -2716,26 +2668,24 @@ class LPlatformChunk V8_FINAL : public LChunk {
};
-class LChunkBuilder V8_FINAL BASE_EMBEDDED {
+class LChunkBuilder V8_FINAL : public LChunkBuilderBase {
public:
LChunkBuilder(CompilationInfo* info, HGraph* graph, LAllocator* allocator)
- : chunk_(NULL),
+ : LChunkBuilderBase(graph->zone()),
+ chunk_(NULL),
info_(info),
graph_(graph),
- zone_(graph->zone()),
status_(UNUSED),
current_instruction_(NULL),
current_block_(NULL),
next_block_(NULL),
- argument_count_(0),
- allocator_(allocator),
- position_(RelocInfo::kNoPosition) { }
+ allocator_(allocator) { }
+
+ Isolate* isolate() const { return graph_->isolate(); }
// Build the sequence for the graph.
LPlatformChunk* Build();
- LInstruction* CheckElideControlInstruction(HControlInstruction* instr);
-
// Declare methods that deal with the individual node types.
#define DECLARE_DO(type) LInstruction* Do##type(H##type* node);
HYDROGEN_CONCRETE_INSTRUCTION_LIST(DECLARE_DO)
@@ -2744,18 +2694,24 @@ class LChunkBuilder V8_FINAL BASE_EMBEDDED {
LInstruction* DoMultiplyAdd(HMul* mul, HValue* addend);
static bool HasMagicNumberForDivisor(int32_t divisor);
- static HValue* SimplifiedDivisorForMathFloorOfDiv(HValue* val);
LInstruction* DoMathFloor(HUnaryMathOperation* instr);
LInstruction* DoMathRound(HUnaryMathOperation* instr);
LInstruction* DoMathAbs(HUnaryMathOperation* instr);
LInstruction* DoMathLog(HUnaryMathOperation* instr);
- LInstruction* DoMathSin(HUnaryMathOperation* instr);
- LInstruction* DoMathCos(HUnaryMathOperation* instr);
- LInstruction* DoMathTan(HUnaryMathOperation* instr);
LInstruction* DoMathExp(HUnaryMathOperation* instr);
LInstruction* DoMathSqrt(HUnaryMathOperation* instr);
LInstruction* DoMathPowHalf(HUnaryMathOperation* instr);
+ LInstruction* DoMathClz32(HUnaryMathOperation* instr);
+ LInstruction* DoDivByPowerOf2I(HDiv* instr);
+ LInstruction* DoDivByConstI(HDiv* instr);
+ LInstruction* DoDivI(HDiv* instr);
+ LInstruction* DoModByPowerOf2I(HMod* instr);
+ LInstruction* DoModByConstI(HMod* instr);
+ LInstruction* DoModI(HMod* instr);
+ LInstruction* DoFlooringDivByPowerOf2I(HMathFloorOfDiv* instr);
+ LInstruction* DoFlooringDivByConstI(HMathFloorOfDiv* instr);
+ LInstruction* DoFlooringDivI(HMathFloorOfDiv* instr);
private:
enum Status {
@@ -2768,7 +2724,6 @@ class LChunkBuilder V8_FINAL BASE_EMBEDDED {
LPlatformChunk* chunk() const { return chunk_; }
CompilationInfo* info() const { return info_; }
HGraph* graph() const { return graph_; }
- Zone* zone() const { return zone_; }
bool is_unused() const { return status_ == UNUSED; }
bool is_building() const { return status_ == BUILDING; }
@@ -2818,31 +2773,26 @@ class LChunkBuilder V8_FINAL BASE_EMBEDDED {
// An input operand in register, stack slot or a constant operand.
// Will not be moved to a register even if one is freely available.
- MUST_USE_RESULT LOperand* UseAny(HValue* value);
+ virtual MUST_USE_RESULT LOperand* UseAny(HValue* value) V8_OVERRIDE;
// Temporary operand that must be in a register.
MUST_USE_RESULT LUnallocated* TempRegister();
+ MUST_USE_RESULT LUnallocated* TempDoubleRegister();
MUST_USE_RESULT LOperand* FixedTemp(Register reg);
MUST_USE_RESULT LOperand* FixedTemp(DoubleRegister reg);
// Methods for setting up define-use relationships.
// Return the same instruction that they are passed.
- template<int I, int T>
- LInstruction* Define(LTemplateInstruction<1, I, T>* instr,
- LUnallocated* result);
- template<int I, int T>
- LInstruction* DefineAsRegister(LTemplateInstruction<1, I, T>* instr);
- template<int I, int T>
- LInstruction* DefineAsSpilled(LTemplateInstruction<1, I, T>* instr,
- int index);
- template<int I, int T>
- LInstruction* DefineSameAsFirst(LTemplateInstruction<1, I, T>* instr);
- template<int I, int T>
- LInstruction* DefineFixed(LTemplateInstruction<1, I, T>* instr,
- Register reg);
- template<int I, int T>
- LInstruction* DefineFixedDouble(LTemplateInstruction<1, I, T>* instr,
- DoubleRegister reg);
+ LInstruction* Define(LTemplateResultInstruction<1>* instr,
+ LUnallocated* result);
+ LInstruction* DefineAsRegister(LTemplateResultInstruction<1>* instr);
+ LInstruction* DefineAsSpilled(LTemplateResultInstruction<1>* instr,
+ int index);
+ LInstruction* DefineSameAsFirst(LTemplateResultInstruction<1>* instr);
+ LInstruction* DefineFixed(LTemplateResultInstruction<1>* instr,
+ Register reg);
+ LInstruction* DefineFixedDouble(LTemplateResultInstruction<1>* instr,
+ DoubleRegister reg);
LInstruction* AssignEnvironment(LInstruction* instr);
LInstruction* AssignPointerMap(LInstruction* instr);
@@ -2856,11 +2806,8 @@ class LChunkBuilder V8_FINAL BASE_EMBEDDED {
HInstruction* hinstr,
CanDeoptimize can_deoptimize = CANNOT_DEOPTIMIZE_EAGERLY);
- LEnvironment* CreateEnvironment(HEnvironment* hydrogen_env,
- int* argument_index_accumulator,
- ZoneList<HValue*>* objects_to_materialize);
-
void VisitInstruction(HInstruction* current);
+ void AddInstruction(LInstruction* instr, HInstruction* current);
void DoBasicBlock(HBasicBlock* block, HBasicBlock* next_block);
LInstruction* DoBit(Token::Value op, HBitwiseBinaryOperation* instr);
@@ -2873,14 +2820,11 @@ class LChunkBuilder V8_FINAL BASE_EMBEDDED {
LPlatformChunk* chunk_;
CompilationInfo* info_;
HGraph* const graph_;
- Zone* zone_;
Status status_;
HInstruction* current_instruction_;
HBasicBlock* current_block_;
HBasicBlock* next_block_;
- int argument_count_;
LAllocator* allocator_;
- int position_;
DISALLOW_COPY_AND_ASSIGN(LChunkBuilder);
};
diff --git a/chromium/v8/src/mips/macro-assembler-mips.cc b/chromium/v8/src/mips/macro-assembler-mips.cc
index f33e6fa063c..45ba4a994e8 100644
--- a/chromium/v8/src/mips/macro-assembler-mips.cc
+++ b/chromium/v8/src/mips/macro-assembler-mips.cc
@@ -1,42 +1,19 @@
// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
#include <limits.h> // For LONG_MIN, LONG_MAX.
-#include "v8.h"
+#include "src/v8.h"
#if V8_TARGET_ARCH_MIPS
-#include "bootstrapper.h"
-#include "codegen.h"
-#include "cpu-profiler.h"
-#include "debug.h"
-#include "isolate-inl.h"
-#include "runtime.h"
+#include "src/bootstrapper.h"
+#include "src/codegen.h"
+#include "src/cpu-profiler.h"
+#include "src/debug.h"
+#include "src/isolate-inl.h"
+#include "src/runtime.h"
namespace v8 {
namespace internal {
@@ -79,6 +56,11 @@ void MacroAssembler::Store(Register src,
} else if (r.IsInteger16() || r.IsUInteger16()) {
sh(src, dst);
} else {
+ if (r.IsHeapObject()) {
+ AssertNotSmi(src);
+ } else if (r.IsSmi()) {
+ AssertSmi(src);
+ }
sw(src, dst);
}
}
@@ -212,7 +194,8 @@ void MacroAssembler::RecordWriteField(
RAStatus ra_status,
SaveFPRegsMode save_fp,
RememberedSetAction remembered_set_action,
- SmiCheck smi_check) {
+ SmiCheck smi_check,
+ PointersToHereCheck pointers_to_here_check_for_value) {
ASSERT(!AreAliased(value, dst, t8, object));
// First, check if a write barrier is even needed. The tests below
// catch stores of Smis.
@@ -242,7 +225,8 @@ void MacroAssembler::RecordWriteField(
ra_status,
save_fp,
remembered_set_action,
- OMIT_SMI_CHECK);
+ OMIT_SMI_CHECK,
+ pointers_to_here_check_for_value);
bind(&done);
@@ -255,16 +239,93 @@ void MacroAssembler::RecordWriteField(
}
+// Will clobber 4 registers: object, map, dst, ip. The
+// register 'object' contains a heap object pointer.
+void MacroAssembler::RecordWriteForMap(Register object,
+ Register map,
+ Register dst,
+ RAStatus ra_status,
+ SaveFPRegsMode fp_mode) {
+ if (emit_debug_code()) {
+ ASSERT(!dst.is(at));
+ lw(dst, FieldMemOperand(map, HeapObject::kMapOffset));
+ Check(eq,
+ kWrongAddressOrValuePassedToRecordWrite,
+ dst,
+ Operand(isolate()->factory()->meta_map()));
+ }
+
+ if (!FLAG_incremental_marking) {
+ return;
+ }
+
+ // Count number of write barriers in generated code.
+ isolate()->counters()->write_barriers_static()->Increment();
+ // TODO(mstarzinger): Dynamic counter missing.
+
+ if (emit_debug_code()) {
+ lw(at, FieldMemOperand(object, HeapObject::kMapOffset));
+ Check(eq,
+ kWrongAddressOrValuePassedToRecordWrite,
+ map,
+ Operand(at));
+ }
+
+ Label done;
+
+ // A single check of the map's pages interesting flag suffices, since it is
+ // only set during incremental collection, and then it's also guaranteed that
+ // the from object's page's interesting flag is also set. This optimization
+ // relies on the fact that maps can never be in new space.
+ CheckPageFlag(map,
+ map, // Used as scratch.
+ MemoryChunk::kPointersToHereAreInterestingMask,
+ eq,
+ &done);
+
+ Addu(dst, object, Operand(HeapObject::kMapOffset - kHeapObjectTag));
+ if (emit_debug_code()) {
+ Label ok;
+ And(at, dst, Operand((1 << kPointerSizeLog2) - 1));
+ Branch(&ok, eq, at, Operand(zero_reg));
+ stop("Unaligned cell in write barrier");
+ bind(&ok);
+ }
+
+ // Record the actual write.
+ if (ra_status == kRAHasNotBeenSaved) {
+ push(ra);
+ }
+ RecordWriteStub stub(isolate(), object, map, dst, OMIT_REMEMBERED_SET,
+ fp_mode);
+ CallStub(&stub);
+ if (ra_status == kRAHasNotBeenSaved) {
+ pop(ra);
+ }
+
+ bind(&done);
+
+ // Clobber clobbered registers when running with the debug-code flag
+ // turned on to provoke errors.
+ if (emit_debug_code()) {
+ li(dst, Operand(BitCast<int32_t>(kZapValue + 12)));
+ li(map, Operand(BitCast<int32_t>(kZapValue + 16)));
+ }
+}
+
+
// Will clobber 4 registers: object, address, scratch, ip. The
// register 'object' contains a heap object pointer. The heap object
// tag is shifted away.
-void MacroAssembler::RecordWrite(Register object,
- Register address,
- Register value,
- RAStatus ra_status,
- SaveFPRegsMode fp_mode,
- RememberedSetAction remembered_set_action,
- SmiCheck smi_check) {
+void MacroAssembler::RecordWrite(
+ Register object,
+ Register address,
+ Register value,
+ RAStatus ra_status,
+ SaveFPRegsMode fp_mode,
+ RememberedSetAction remembered_set_action,
+ SmiCheck smi_check,
+ PointersToHereCheck pointers_to_here_check_for_value) {
ASSERT(!AreAliased(object, address, value, t8));
ASSERT(!AreAliased(object, address, value, t9));
@@ -274,6 +335,11 @@ void MacroAssembler::RecordWrite(Register object,
eq, kWrongAddressOrValuePassedToRecordWrite, at, Operand(value));
}
+ if (remembered_set_action == OMIT_REMEMBERED_SET &&
+ !FLAG_incremental_marking) {
+ return;
+ }
+
// Count number of write barriers in generated code.
isolate()->counters()->write_barriers_static()->Increment();
// TODO(mstarzinger): Dynamic counter missing.
@@ -287,11 +353,13 @@ void MacroAssembler::RecordWrite(Register object,
JumpIfSmi(value, &done);
}
- CheckPageFlag(value,
- value, // Used as scratch.
- MemoryChunk::kPointersToHereAreInterestingMask,
- eq,
- &done);
+ if (pointers_to_here_check_for_value != kPointersToHereAreAlwaysInteresting) {
+ CheckPageFlag(value,
+ value, // Used as scratch.
+ MemoryChunk::kPointersToHereAreInterestingMask,
+ eq,
+ &done);
+ }
CheckPageFlag(object,
value, // Used as scratch.
MemoryChunk::kPointersFromHereAreInterestingMask,
@@ -302,7 +370,8 @@ void MacroAssembler::RecordWrite(Register object,
if (ra_status == kRAHasNotBeenSaved) {
push(ra);
}
- RecordWriteStub stub(object, value, address, remembered_set_action, fp_mode);
+ RecordWriteStub stub(isolate(), object, value, address, remembered_set_action,
+ fp_mode);
CallStub(&stub);
if (ra_status == kRAHasNotBeenSaved) {
pop(ra);
@@ -352,7 +421,7 @@ void MacroAssembler::RememberedSetHelper(Register object, // For debug tests.
}
push(ra);
StoreBufferOverflowStub store_buffer_overflow =
- StoreBufferOverflowStub(fp_mode);
+ StoreBufferOverflowStub(isolate(), fp_mode);
CallStub(&store_buffer_overflow);
pop(ra);
bind(&done);
@@ -789,7 +858,28 @@ void MacroAssembler::Ror(Register rd, Register rs, const Operand& rt) {
}
-//------------Pseudo-instructions-------------
+void MacroAssembler::Pref(int32_t hint, const MemOperand& rs) {
+ if (kArchVariant == kLoongson) {
+ lw(zero_reg, rs);
+ } else {
+ pref(hint, rs);
+ }
+}
+
+
+// ------------Pseudo-instructions-------------
+
+void MacroAssembler::Ulw(Register rd, const MemOperand& rs) {
+ lwr(rd, rs);
+ lwl(rd, MemOperand(rs.rm(), rs.offset() + 3));
+}
+
+
+void MacroAssembler::Usw(Register rd, const MemOperand& rs) {
+ swr(rd, rs);
+ swl(rd, MemOperand(rs.rm(), rs.offset() + 3));
+}
+
void MacroAssembler::li(Register dst, Handle<Object> value, LiFlags mode) {
AllowDeferredHandleDereference smi_check;
@@ -1195,7 +1285,7 @@ void MacroAssembler::BranchF(Label* target,
break;
default:
CHECK(0);
- };
+ }
}
if (bd == PROTECT) {
@@ -1207,12 +1297,12 @@ void MacroAssembler::BranchF(Label* target,
void MacroAssembler::Move(FPURegister dst, double imm) {
static const DoubleRepresentation minus_zero(-0.0);
static const DoubleRepresentation zero(0.0);
- DoubleRepresentation value(imm);
+ DoubleRepresentation value_rep(imm);
// Handle special values first.
bool force_load = dst.is(kDoubleRegZero);
- if (value.bits == zero.bits && !force_load) {
+ if (value_rep == zero && !force_load) {
mov_d(dst, kDoubleRegZero);
- } else if (value.bits == minus_zero.bits && !force_load) {
+ } else if (value_rep == minus_zero && !force_load) {
neg_d(dst, kDoubleRegZero);
} else {
uint32_t lo, hi;
@@ -1435,7 +1525,7 @@ void MacroAssembler::TruncateDoubleToI(Register result,
Subu(sp, sp, Operand(kDoubleSize)); // Put input on stack.
sdc1(double_input, MemOperand(sp, 0));
- DoubleToIStub stub(sp, result, 0, true, true);
+ DoubleToIStub stub(isolate(), sp, result, 0, true, true);
CallStub(&stub);
Addu(sp, sp, Operand(kDoubleSize));
@@ -1456,7 +1546,8 @@ void MacroAssembler::TruncateHeapNumberToI(Register result, Register object) {
// If we fell through then inline version didn't succeed - call stub instead.
push(ra);
- DoubleToIStub stub(object,
+ DoubleToIStub stub(isolate(),
+ object,
result,
HeapNumber::kValueOffset - kHeapObjectTag,
true,
@@ -1542,19 +1633,27 @@ void MacroAssembler::Branch(Label* L, Condition cond, Register rs,
if (is_near(L)) {
BranchShort(L, cond, rs, rt, bdslot);
} else {
- Label skip;
- Condition neg_cond = NegateCondition(cond);
- BranchShort(&skip, neg_cond, rs, rt);
- Jr(L, bdslot);
- bind(&skip);
+ if (cond != cc_always) {
+ Label skip;
+ Condition neg_cond = NegateCondition(cond);
+ BranchShort(&skip, neg_cond, rs, rt);
+ Jr(L, bdslot);
+ bind(&skip);
+ } else {
+ Jr(L, bdslot);
+ }
}
} else {
if (is_trampoline_emitted()) {
- Label skip;
- Condition neg_cond = NegateCondition(cond);
- BranchShort(&skip, neg_cond, rs, rt);
- Jr(L, bdslot);
- bind(&skip);
+ if (cond != cc_always) {
+ Label skip;
+ Condition neg_cond = NegateCondition(cond);
+ BranchShort(&skip, neg_cond, rs, rt);
+ Jr(L, bdslot);
+ bind(&skip);
+ } else {
+ Jr(L, bdslot);
+ }
} else {
BranchShort(L, cond, rs, rt, bdslot);
}
@@ -2015,7 +2114,7 @@ void MacroAssembler::BranchShort(Label* L, Condition cond, Register rs,
case Ugreater:
if (rt.imm32_ == 0) {
offset = shifted_branch_offset(L, false);
- bgtz(rs, offset);
+ bne(rs, zero_reg, offset);
} else {
ASSERT(!scratch.is(rs));
r2 = scratch;
@@ -2062,7 +2161,7 @@ void MacroAssembler::BranchShort(Label* L, Condition cond, Register rs,
case Uless_equal:
if (rt.imm32_ == 0) {
offset = shifted_branch_offset(L, false);
- b(offset);
+ beq(rs, zero_reg, offset);
} else {
ASSERT(!scratch.is(rs));
r2 = scratch;
@@ -2657,18 +2756,14 @@ void MacroAssembler::Push(Handle<Object> handle) {
}
-#ifdef ENABLE_DEBUGGER_SUPPORT
-
void MacroAssembler::DebugBreak() {
PrepareCEntryArgs(0);
PrepareCEntryFunction(ExternalReference(Runtime::kDebugBreak, isolate()));
- CEntryStub ces(1);
+ CEntryStub ces(isolate(), 1);
ASSERT(AllowThisStubCall(&ces));
- Call(ces.GetCode(isolate()), RelocInfo::DEBUG_BREAK);
+ Call(ces.GetCode(), RelocInfo::DEBUG_BREAK);
}
-#endif // ENABLE_DEBUGGER_SUPPORT
-
// ---------------------------------------------------------------------------
// Exception handling.
@@ -2823,7 +2918,7 @@ void MacroAssembler::Allocate(int object_size,
Register scratch2,
Label* gc_required,
AllocationFlags flags) {
- ASSERT(object_size <= Page::kMaxNonCodeHeapObjectSize);
+ ASSERT(object_size <= Page::kMaxRegularHeapObjectSize);
if (!FLAG_inline_new) {
if (emit_debug_code()) {
// Trash the registers to simulate an allocation failure.
@@ -3113,33 +3208,12 @@ void MacroAssembler::AllocateAsciiConsString(Register result,
Register scratch1,
Register scratch2,
Label* gc_required) {
- Label allocate_new_space, install_map;
- AllocationFlags flags = TAG_OBJECT;
-
- ExternalReference high_promotion_mode = ExternalReference::
- new_space_high_promotion_mode_active_address(isolate());
- li(scratch1, Operand(high_promotion_mode));
- lw(scratch1, MemOperand(scratch1, 0));
- Branch(&allocate_new_space, eq, scratch1, Operand(zero_reg));
-
Allocate(ConsString::kSize,
result,
scratch1,
scratch2,
gc_required,
- static_cast<AllocationFlags>(flags | PRETENURE_OLD_POINTER_SPACE));
-
- jmp(&install_map);
-
- bind(&allocate_new_space);
- Allocate(ConsString::kSize,
- result,
- scratch1,
- scratch2,
- gc_required,
- flags);
-
- bind(&install_map);
+ TAG_OBJECT);
InitializeNewString(result,
length,
@@ -3284,13 +3358,24 @@ void MacroAssembler::CopyBytes(Register src,
// TODO(kalmard) check if this can be optimized to use sw in most cases.
// Can't use unaligned access - copy byte by byte.
- sb(scratch, MemOperand(dst, 0));
- srl(scratch, scratch, 8);
- sb(scratch, MemOperand(dst, 1));
- srl(scratch, scratch, 8);
- sb(scratch, MemOperand(dst, 2));
- srl(scratch, scratch, 8);
- sb(scratch, MemOperand(dst, 3));
+ if (kArchEndian == kLittle) {
+ sb(scratch, MemOperand(dst, 0));
+ srl(scratch, scratch, 8);
+ sb(scratch, MemOperand(dst, 1));
+ srl(scratch, scratch, 8);
+ sb(scratch, MemOperand(dst, 2));
+ srl(scratch, scratch, 8);
+ sb(scratch, MemOperand(dst, 3));
+ } else {
+ sb(scratch, MemOperand(dst, 3));
+ srl(scratch, scratch, 8);
+ sb(scratch, MemOperand(dst, 2));
+ srl(scratch, scratch, 8);
+ sb(scratch, MemOperand(dst, 1));
+ srl(scratch, scratch, 8);
+ sb(scratch, MemOperand(dst, 0));
+ }
+
Addu(dst, dst, 4);
Subu(length, length, Operand(kPointerSize));
@@ -3395,11 +3480,12 @@ void MacroAssembler::StoreNumberToDoubleElements(Register value_reg,
bind(&have_double_value);
sll(scratch1, key_reg, kDoubleSizeLog2 - kSmiTagSize);
Addu(scratch1, scratch1, elements_reg);
- sw(mantissa_reg, FieldMemOperand(
- scratch1, FixedDoubleArray::kHeaderSize - elements_offset));
- uint32_t offset = FixedDoubleArray::kHeaderSize - elements_offset +
- sizeof(kHoleNanLower32);
- sw(exponent_reg, FieldMemOperand(scratch1, offset));
+ sw(mantissa_reg,
+ FieldMemOperand(scratch1, FixedDoubleArray::kHeaderSize - elements_offset
+ + kHoleNanLower32Offset));
+ sw(exponent_reg,
+ FieldMemOperand(scratch1, FixedDoubleArray::kHeaderSize - elements_offset
+ + kHoleNanUpper32Offset));
jmp(&done);
bind(&maybe_nan);
@@ -3410,10 +3496,9 @@ void MacroAssembler::StoreNumberToDoubleElements(Register value_reg,
Branch(&have_double_value, eq, mantissa_reg, Operand(zero_reg));
bind(&is_nan);
// Load canonical NaN for storing into the double array.
- uint64_t nan_int64 = BitCast<uint64_t>(
- FixedDoubleArray::canonical_not_the_hole_nan_as_double());
- li(mantissa_reg, Operand(static_cast<uint32_t>(nan_int64)));
- li(exponent_reg, Operand(static_cast<uint32_t>(nan_int64 >> 32)));
+ LoadRoot(at, Heap::kNanValueRootIndex);
+ lw(mantissa_reg, FieldMemOperand(at, HeapNumber::kMantissaOffset));
+ lw(exponent_reg, FieldMemOperand(at, HeapNumber::kExponentOffset));
jmp(&have_double_value);
bind(&smi_value);
@@ -3496,64 +3581,77 @@ void MacroAssembler::CheckMap(Register obj,
}
-void MacroAssembler::GetCFunctionDoubleResult(const DoubleRegister dst) {
+void MacroAssembler::MovFromFloatResult(DoubleRegister dst) {
if (IsMipsSoftFloatABI) {
- Move(dst, v0, v1);
+ if (kArchEndian == kLittle) {
+ Move(dst, v0, v1);
+ } else {
+ Move(dst, v1, v0);
+ }
} else {
Move(dst, f0); // Reg f0 is o32 ABI FP return value.
}
}
-void MacroAssembler::SetCallCDoubleArguments(DoubleRegister dreg) {
- if (!IsMipsSoftFloatABI) {
- Move(f12, dreg);
+void MacroAssembler::MovFromFloatParameter(DoubleRegister dst) {
+ if (IsMipsSoftFloatABI) {
+ if (kArchEndian == kLittle) {
+ Move(dst, a0, a1);
+ } else {
+ Move(dst, a1, a0);
+ }
} else {
- Move(a0, a1, dreg);
+ Move(dst, f12); // Reg f12 is o32 ABI FP first argument value.
}
}
-void MacroAssembler::SetCallCDoubleArguments(DoubleRegister dreg1,
- DoubleRegister dreg2) {
+void MacroAssembler::MovToFloatParameter(DoubleRegister src) {
if (!IsMipsSoftFloatABI) {
- if (dreg2.is(f12)) {
- ASSERT(!dreg1.is(f14));
- Move(f14, dreg2);
- Move(f12, dreg1);
+ Move(f12, src);
+ } else {
+ if (kArchEndian == kLittle) {
+ Move(a0, a1, src);
} else {
- Move(f12, dreg1);
- Move(f14, dreg2);
+ Move(a1, a0, src);
}
- } else {
- Move(a0, a1, dreg1);
- Move(a2, a3, dreg2);
}
}
-void MacroAssembler::SetCallCDoubleArguments(DoubleRegister dreg,
- Register reg) {
+void MacroAssembler::MovToFloatResult(DoubleRegister src) {
if (!IsMipsSoftFloatABI) {
- Move(f12, dreg);
- Move(a2, reg);
+ Move(f0, src);
} else {
- Move(a2, reg);
- Move(a0, a1, dreg);
+ if (kArchEndian == kLittle) {
+ Move(v0, v1, src);
+ } else {
+ Move(v1, v0, src);
+ }
}
}
-void MacroAssembler::SetCallKind(Register dst, CallKind call_kind) {
- // This macro takes the dst register to make the code more readable
- // at the call sites. However, the dst register has to be t1 to
- // follow the calling convention which requires the call type to be
- // in t1.
- ASSERT(dst.is(t1));
- if (call_kind == CALL_AS_FUNCTION) {
- li(dst, Operand(Smi::FromInt(1)));
+void MacroAssembler::MovToFloatParameters(DoubleRegister src1,
+ DoubleRegister src2) {
+ if (!IsMipsSoftFloatABI) {
+ if (src2.is(f12)) {
+ ASSERT(!src1.is(f14));
+ Move(f14, src2);
+ Move(f12, src1);
+ } else {
+ Move(f12, src1);
+ Move(f14, src2);
+ }
} else {
- li(dst, Operand(Smi::FromInt(0)));
+ if (kArchEndian == kLittle) {
+ Move(a0, a1, src1);
+ Move(a2, a3, src2);
+ } else {
+ Move(a1, a0, src1);
+ Move(a3, a2, src2);
+ }
}
}
@@ -3568,8 +3666,7 @@ void MacroAssembler::InvokePrologue(const ParameterCount& expected,
Label* done,
bool* definitely_mismatches,
InvokeFlag flag,
- const CallWrapper& call_wrapper,
- CallKind call_kind) {
+ const CallWrapper& call_wrapper) {
bool definitely_matches = false;
*definitely_mismatches = false;
Label regular_invoke;
@@ -3579,7 +3676,6 @@ void MacroAssembler::InvokePrologue(const ParameterCount& expected,
// a0: actual arguments count
// a1: function (passed through to callee)
// a2: expected arguments count
- // a3: callee code entry
// The code below is made a lot easier because the calling code already sets
// up actual and expected registers according to the contract if values are
@@ -3623,14 +3719,12 @@ void MacroAssembler::InvokePrologue(const ParameterCount& expected,
isolate()->builtins()->ArgumentsAdaptorTrampoline();
if (flag == CALL_FUNCTION) {
call_wrapper.BeforeCall(CallSize(adaptor));
- SetCallKind(t1, call_kind);
Call(adaptor);
call_wrapper.AfterCall();
if (!*definitely_mismatches) {
Branch(done);
}
} else {
- SetCallKind(t1, call_kind);
Jump(adaptor, RelocInfo::CODE_TARGET);
}
bind(&regular_invoke);
@@ -3642,8 +3736,7 @@ void MacroAssembler::InvokeCode(Register code,
const ParameterCount& expected,
const ParameterCount& actual,
InvokeFlag flag,
- const CallWrapper& call_wrapper,
- CallKind call_kind) {
+ const CallWrapper& call_wrapper) {
// You can't call a function without a valid frame.
ASSERT(flag == JUMP_FUNCTION || has_frame());
@@ -3652,16 +3745,14 @@ void MacroAssembler::InvokeCode(Register code,
bool definitely_mismatches = false;
InvokePrologue(expected, actual, Handle<Code>::null(), code,
&done, &definitely_mismatches, flag,
- call_wrapper, call_kind);
+ call_wrapper);
if (!definitely_mismatches) {
if (flag == CALL_FUNCTION) {
call_wrapper.BeforeCall(CallSize(code));
- SetCallKind(t1, call_kind);
Call(code);
call_wrapper.AfterCall();
} else {
ASSERT(flag == JUMP_FUNCTION);
- SetCallKind(t1, call_kind);
Jump(code);
}
// Continue here if InvokePrologue does handle the invocation due to
@@ -3671,41 +3762,10 @@ void MacroAssembler::InvokeCode(Register code,
}
-void MacroAssembler::InvokeCode(Handle<Code> code,
- const ParameterCount& expected,
- const ParameterCount& actual,
- RelocInfo::Mode rmode,
- InvokeFlag flag,
- CallKind call_kind) {
- // You can't call a function without a valid frame.
- ASSERT(flag == JUMP_FUNCTION || has_frame());
-
- Label done;
-
- bool definitely_mismatches = false;
- InvokePrologue(expected, actual, code, no_reg,
- &done, &definitely_mismatches, flag,
- NullCallWrapper(), call_kind);
- if (!definitely_mismatches) {
- if (flag == CALL_FUNCTION) {
- SetCallKind(t1, call_kind);
- Call(code, rmode);
- } else {
- SetCallKind(t1, call_kind);
- Jump(code, rmode);
- }
- // Continue here if InvokePrologue does handle the invocation due to
- // mismatched parameter counts.
- bind(&done);
- }
-}
-
-
void MacroAssembler::InvokeFunction(Register function,
const ParameterCount& actual,
InvokeFlag flag,
- const CallWrapper& call_wrapper,
- CallKind call_kind) {
+ const CallWrapper& call_wrapper) {
// You can't call a function without a valid frame.
ASSERT(flag == JUMP_FUNCTION || has_frame());
@@ -3723,7 +3783,7 @@ void MacroAssembler::InvokeFunction(Register function,
lw(code_reg, FieldMemOperand(a1, JSFunction::kCodeEntryOffset));
ParameterCount expected(expected_reg);
- InvokeCode(code_reg, expected, actual, flag, call_wrapper, call_kind);
+ InvokeCode(code_reg, expected, actual, flag, call_wrapper);
}
@@ -3731,8 +3791,7 @@ void MacroAssembler::InvokeFunction(Register function,
const ParameterCount& expected,
const ParameterCount& actual,
InvokeFlag flag,
- const CallWrapper& call_wrapper,
- CallKind call_kind) {
+ const CallWrapper& call_wrapper) {
// You can't call a function without a valid frame.
ASSERT(flag == JUMP_FUNCTION || has_frame());
@@ -3746,7 +3805,7 @@ void MacroAssembler::InvokeFunction(Register function,
// allow recompilation to take effect without changing any of the
// call sites.
lw(a3, FieldMemOperand(a1, JSFunction::kCodeEntryOffset));
- InvokeCode(a3, expected, actual, flag, call_wrapper, call_kind);
+ InvokeCode(a3, expected, actual, flag, call_wrapper);
}
@@ -3754,10 +3813,9 @@ void MacroAssembler::InvokeFunction(Handle<JSFunction> function,
const ParameterCount& expected,
const ParameterCount& actual,
InvokeFlag flag,
- const CallWrapper& call_wrapper,
- CallKind call_kind) {
+ const CallWrapper& call_wrapper) {
li(a1, function);
- InvokeFunction(a1, expected, actual, flag, call_wrapper, call_kind);
+ InvokeFunction(a1, expected, actual, flag, call_wrapper);
}
@@ -3879,13 +3937,17 @@ void MacroAssembler::CallStub(CodeStub* stub,
const Operand& r2,
BranchDelaySlot bd) {
ASSERT(AllowThisStubCall(stub)); // Stub calls are not allowed in some stubs.
- Call(stub->GetCode(isolate()), RelocInfo::CODE_TARGET, ast_id,
+ Call(stub->GetCode(), RelocInfo::CODE_TARGET, ast_id,
cond, r1, r2, bd);
}
-void MacroAssembler::TailCallStub(CodeStub* stub) {
- Jump(stub->GetCode(isolate()), RelocInfo::CODE_TARGET);
+void MacroAssembler::TailCallStub(CodeStub* stub,
+ Condition cond,
+ Register r1,
+ const Operand& r2,
+ BranchDelaySlot bd) {
+ Jump(stub->GetCode(), RelocInfo::CODE_TARGET, cond, r1, r2, bd);
}
@@ -3895,10 +3957,8 @@ static int AddressOffset(ExternalReference ref0, ExternalReference ref1) {
void MacroAssembler::CallApiFunctionAndReturn(
- ExternalReference function,
- Address function_address,
+ Register function_address,
ExternalReference thunk_ref,
- Register thunk_last_arg,
int stack_space,
MemOperand return_value_operand,
MemOperand* context_restore_operand) {
@@ -3912,6 +3972,22 @@ void MacroAssembler::CallApiFunctionAndReturn(
ExternalReference::handle_scope_level_address(isolate()),
next_address);
+ ASSERT(function_address.is(a1) || function_address.is(a2));
+
+ Label profiler_disabled;
+ Label end_profiler_check;
+ li(t9, Operand(ExternalReference::is_profiling_address(isolate())));
+ lb(t9, MemOperand(t9, 0));
+ Branch(&profiler_disabled, eq, t9, Operand(zero_reg));
+
+ // Additional parameter is the address of the actual callback.
+ li(t9, Operand(thunk_ref));
+ jmp(&end_profiler_check);
+
+ bind(&profiler_disabled);
+ mov(t9, function_address);
+ bind(&end_profiler_check);
+
// Allocate HandleScope in callee-save registers.
li(s3, Operand(next_address));
lw(s0, MemOperand(s3, kNextOffset));
@@ -3929,29 +4005,10 @@ void MacroAssembler::CallApiFunctionAndReturn(
PopSafepointRegisters();
}
- Label profiler_disabled;
- Label end_profiler_check;
- bool* is_profiling_flag =
- isolate()->cpu_profiler()->is_profiling_address();
- STATIC_ASSERT(sizeof(*is_profiling_flag) == 1);
- li(t9, reinterpret_cast<int32_t>(is_profiling_flag));
- lb(t9, MemOperand(t9, 0));
- beq(t9, zero_reg, &profiler_disabled);
-
- // Third parameter is the address of the actual getter function.
- li(thunk_last_arg, reinterpret_cast<int32_t>(function_address));
- li(t9, Operand(thunk_ref));
- jmp(&end_profiler_check);
-
- bind(&profiler_disabled);
- li(t9, Operand(function));
-
- bind(&end_profiler_check);
-
// Native call returns to the DirectCEntry stub which redirects to the
// return address pushed on stack (could have moved after GC).
// DirectCEntry stub itself is generated early and never moves.
- DirectCEntryStub stub;
+ DirectCEntryStub stub(isolate());
stub.GenerateCall(this, t9);
if (FLAG_log_timer_events) {
@@ -4004,7 +4061,7 @@ void MacroAssembler::CallApiFunctionAndReturn(
{
FrameScope frame(this, StackFrame::INTERNAL);
CallExternalReference(
- ExternalReference(Runtime::kPromoteScheduledException, isolate()),
+ ExternalReference(Runtime::kHiddenPromoteScheduledException, isolate()),
0);
}
jmp(&exception_handled);
@@ -4028,27 +4085,14 @@ bool MacroAssembler::AllowThisStubCall(CodeStub* stub) {
}
-void MacroAssembler::IllegalOperation(int num_arguments) {
- if (num_arguments > 0) {
- addiu(sp, sp, num_arguments * kPointerSize);
- }
- LoadRoot(v0, Heap::kUndefinedValueRootIndex);
-}
-
-
-void MacroAssembler::IndexFromHash(Register hash,
- Register index) {
+void MacroAssembler::IndexFromHash(Register hash, Register index) {
// If the hash field contains an array index pick it out. The assert checks
// that the constants for the maximum number of digits for an array index
// cached in the hash field and the number of bits reserved for it does not
// conflict.
ASSERT(TenToThe(String::kMaxCachedArrayIndexLength) <
(1 << String::kArrayIndexValueBits));
- // We want the smi-tagged index in key. kArrayIndexValueMask has zeros in
- // the low kHashShift bits.
- STATIC_ASSERT(kSmiTag == 0);
- Ext(hash, hash, String::kHashShift, String::kArrayIndexValueBits);
- sll(index, hash, kSmiTagSize);
+ DecodeFieldToSmi<String::ArrayIndexValueBits>(index, hash);
}
@@ -4190,10 +4234,7 @@ void MacroAssembler::CallRuntime(const Runtime::Function* f,
// If the expected number of arguments of the runtime function is
// constant, we check that the actual number of arguments match the
// expectation.
- if (f->nargs >= 0 && f->nargs != num_arguments) {
- IllegalOperation(num_arguments);
- return;
- }
+ CHECK(f->nargs < 0 || f->nargs == num_arguments);
// TODO(1236192): Most runtime routines don't need the number of
// arguments passed in because it is constant. At some point we
@@ -4201,7 +4242,7 @@ void MacroAssembler::CallRuntime(const Runtime::Function* f,
// smarter.
PrepareCEntryArgs(num_arguments);
PrepareCEntryFunction(ExternalReference(f, isolate()));
- CEntryStub stub(1, save_doubles);
+ CEntryStub stub(isolate(), 1, save_doubles);
CallStub(&stub);
}
@@ -4212,7 +4253,7 @@ void MacroAssembler::CallExternalReference(const ExternalReference& ext,
PrepareCEntryArgs(num_arguments);
PrepareCEntryFunction(ext);
- CEntryStub stub(1);
+ CEntryStub stub(isolate(), 1);
CallStub(&stub, TypeFeedbackId::None(), al, zero_reg, Operand(zero_reg), bd);
}
@@ -4241,8 +4282,8 @@ void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid,
void MacroAssembler::JumpToExternalReference(const ExternalReference& builtin,
BranchDelaySlot bd) {
PrepareCEntryFunction(builtin);
- CEntryStub stub(1);
- Jump(stub.GetCode(isolate()),
+ CEntryStub stub(isolate(), 1);
+ Jump(stub.GetCode(),
RelocInfo::CODE_TARGET,
al,
zero_reg,
@@ -4260,12 +4301,10 @@ void MacroAssembler::InvokeBuiltin(Builtins::JavaScript id,
GetBuiltinEntry(t9, id);
if (flag == CALL_FUNCTION) {
call_wrapper.BeforeCall(CallSize(t9));
- SetCallKind(t1, CALL_AS_METHOD);
Call(t9);
call_wrapper.AfterCall();
} else {
ASSERT(flag == JUMP_FUNCTION);
- SetCallKind(t1, CALL_AS_METHOD);
Jump(t9);
}
}
@@ -4366,16 +4405,8 @@ void MacroAssembler::Check(Condition cc, BailoutReason reason,
void MacroAssembler::Abort(BailoutReason reason) {
Label abort_start;
bind(&abort_start);
- // We want to pass the msg string like a smi to avoid GC
- // problems, however msg is not guaranteed to be aligned
- // properly. Instead, we pass an aligned pointer that is
- // a proper v8 smi, but also pass the alignment difference
- // from the real pointer as a smi.
- const char* msg = GetBailoutReason(reason);
- intptr_t p1 = reinterpret_cast<intptr_t>(msg);
- intptr_t p0 = (p1 & ~kSmiTagMask) + kSmiTag;
- ASSERT(reinterpret_cast<Object*>(p0)->IsSmi());
#ifdef DEBUG
+ const char* msg = GetBailoutReason(reason);
if (msg != NULL) {
RecordComment("Abort message: ");
RecordComment(msg);
@@ -4387,18 +4418,16 @@ void MacroAssembler::Abort(BailoutReason reason) {
}
#endif
- li(a0, Operand(p0));
- push(a0);
- li(a0, Operand(Smi::FromInt(p1 - p0)));
+ li(a0, Operand(Smi::FromInt(reason)));
push(a0);
// Disable stub call restrictions to always allow calls to abort.
if (!has_frame_) {
// We don't actually want to generate a pile of code for this, so just
// claim there is a stack frame, without generating one.
FrameScope scope(this, StackFrame::NONE);
- CallRuntime(Runtime::kAbort, 2);
+ CallRuntime(Runtime::kAbort, 1);
} else {
- CallRuntime(Runtime::kAbort, 2);
+ CallRuntime(Runtime::kAbort, 1);
}
// Will not return here.
if (is_trampoline_pool_blocked()) {
@@ -4406,8 +4435,8 @@ void MacroAssembler::Abort(BailoutReason reason) {
// instructions generated, we insert padding here to keep the size
// of the Abort macro constant.
// Currently in debug mode with debug_code enabled the number of
- // generated instructions is 14, so we use this as a maximum value.
- static const int kExpectedAbortInstructions = 14;
+ // generated instructions is 10, so we use this as a maximum value.
+ static const int kExpectedAbortInstructions = 10;
int abort_instructions = InstructionsGeneratedSince(&abort_start);
ASSERT(abort_instructions <= kExpectedAbortInstructions);
while (abort_instructions++ < kExpectedAbortInstructions) {
@@ -4460,31 +4489,6 @@ void MacroAssembler::LoadTransitionedArrayMapConditional(
}
-void MacroAssembler::LoadInitialArrayMap(
- Register function_in, Register scratch,
- Register map_out, bool can_have_holes) {
- ASSERT(!function_in.is(map_out));
- Label done;
- lw(map_out, FieldMemOperand(function_in,
- JSFunction::kPrototypeOrInitialMapOffset));
- if (!FLAG_smi_only_arrays) {
- ElementsKind kind = can_have_holes ? FAST_HOLEY_ELEMENTS : FAST_ELEMENTS;
- LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS,
- kind,
- map_out,
- scratch,
- &done);
- } else if (can_have_holes) {
- LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS,
- FAST_HOLEY_SMI_ELEMENTS,
- map_out,
- scratch,
- &done);
- }
- bind(&done);
-}
-
-
void MacroAssembler::LoadGlobalFunction(int index, Register function) {
// Load the global or builtins object from the current context.
lw(function,
@@ -4497,19 +4501,6 @@ void MacroAssembler::LoadGlobalFunction(int index, Register function) {
}
-void MacroAssembler::LoadArrayFunction(Register function) {
- // Load the global or builtins object from the current context.
- lw(function,
- MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
- // Load the global context from the global or builtins object.
- lw(function,
- FieldMemOperand(function, GlobalObject::kGlobalContextOffset));
- // Load the array function from the native context.
- lw(function,
- MemOperand(function, Context::SlotOffset(Context::ARRAY_FUNCTION_INDEX)));
-}
-
-
void MacroAssembler::LoadGlobalFunctionInitialMap(Register function,
Register map,
Register scratch) {
@@ -4526,36 +4517,37 @@ void MacroAssembler::LoadGlobalFunctionInitialMap(Register function,
}
-void MacroAssembler::Prologue(PrologueFrameMode frame_mode) {
- if (frame_mode == BUILD_STUB_FRAME) {
+void MacroAssembler::StubPrologue() {
Push(ra, fp, cp);
Push(Smi::FromInt(StackFrame::STUB));
// Adjust FP to point to saved FP.
Addu(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
+}
+
+
+void MacroAssembler::Prologue(bool code_pre_aging) {
+ PredictableCodeSizeScope predictible_code_size_scope(
+ this, kNoCodeAgeSequenceLength);
+ // The following three instructions must remain together and unmodified
+ // for code aging to work properly.
+ if (code_pre_aging) {
+ // Pre-age the code.
+ Code* stub = Code::GetPreAgedCodeAgeStub(isolate());
+ nop(Assembler::CODE_AGE_MARKER_NOP);
+ // Load the stub address to t9 and call it,
+ // GetCodeAgeAndParity() extracts the stub address from this instruction.
+ li(t9,
+ Operand(reinterpret_cast<uint32_t>(stub->instruction_start())),
+ CONSTANT_SIZE);
+ nop(); // Prevent jalr to jal optimization.
+ jalr(t9, a0);
+ nop(); // Branch delay slot nop.
+ nop(); // Pad the empty space.
} else {
- PredictableCodeSizeScope predictible_code_size_scope(
- this, kNoCodeAgeSequenceLength * Assembler::kInstrSize);
- // The following three instructions must remain together and unmodified
- // for code aging to work properly.
- if (isolate()->IsCodePreAgingActive()) {
- // Pre-age the code.
- Code* stub = Code::GetPreAgedCodeAgeStub(isolate());
- nop(Assembler::CODE_AGE_MARKER_NOP);
- // Load the stub address to t9 and call it,
- // GetCodeAgeAndParity() extracts the stub address from this instruction.
- li(t9,
- Operand(reinterpret_cast<uint32_t>(stub->instruction_start())),
- CONSTANT_SIZE);
- nop(); // Prevent jalr to jal optimization.
- jalr(t9, a0);
- nop(); // Branch delay slot nop.
- nop(); // Pad the empty space.
- } else {
- Push(ra, fp, cp, a1);
- nop(Assembler::CODE_AGE_SEQUENCE_NOP);
- // Adjust fp to point to caller's fp.
- Addu(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
- }
+ Push(ra, fp, cp, a1);
+ nop(Assembler::CODE_AGE_SEQUENCE_NOP);
+ // Adjust fp to point to caller's fp.
+ Addu(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
}
}
@@ -4885,6 +4877,23 @@ void MacroAssembler::AssertName(Register object) {
}
+void MacroAssembler::AssertUndefinedOrAllocationSite(Register object,
+ Register scratch) {
+ if (emit_debug_code()) {
+ Label done_checking;
+ AssertNotSmi(object);
+ LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
+ Branch(&done_checking, eq, object, Operand(scratch));
+ push(object);
+ lw(object, FieldMemOperand(object, HeapObject::kMapOffset));
+ LoadRoot(scratch, Heap::kAllocationSiteMapRootIndex);
+ Assert(eq, kExpectedUndefinedOrCell, object, Operand(scratch));
+ pop(object);
+ bind(&done_checking);
+ }
+}
+
+
void MacroAssembler::AssertIsRoot(Register reg, Heap::RootListIndex index) {
if (emit_debug_code()) {
ASSERT(!reg.is(at));
@@ -5075,14 +5084,14 @@ void MacroAssembler::EmitSeqStringSetCharCheck(Register string,
uint32_t encoding_mask) {
Label is_object;
SmiTst(string, at);
- ThrowIf(eq, kNonObject, at, Operand(zero_reg));
+ Check(ne, kNonObject, at, Operand(zero_reg));
lw(at, FieldMemOperand(string, HeapObject::kMapOffset));
lbu(at, FieldMemOperand(at, Map::kInstanceTypeOffset));
andi(at, at, kStringRepresentationMask | kStringEncodingMask);
li(scratch, Operand(encoding_mask));
- ThrowIf(ne, kUnexpectedStringType, at, Operand(scratch));
+ Check(eq, kUnexpectedStringType, at, Operand(scratch));
// The index is assumed to be untagged coming in, tag it to compare with the
// string length without using a temp register, it is restored at the end of
@@ -5091,14 +5100,14 @@ void MacroAssembler::EmitSeqStringSetCharCheck(Register string,
TrySmiTag(index, scratch, &index_tag_bad);
Branch(&index_tag_ok);
bind(&index_tag_bad);
- Throw(kIndexIsTooLarge);
+ Abort(kIndexIsTooLarge);
bind(&index_tag_ok);
lw(at, FieldMemOperand(string, String::kLengthOffset));
- ThrowIf(ge, kIndexIsTooLarge, index, Operand(at));
+ Check(lt, kIndexIsTooLarge, index, Operand(at));
ASSERT(Smi::FromInt(0) == 0);
- ThrowIf(lt, kIndexIsNegative, index, Operand(zero_reg));
+ Check(ge, kIndexIsNegative, index, Operand(zero_reg));
SmiUntag(index, index);
}
@@ -5293,7 +5302,7 @@ void MacroAssembler::CheckMapDeprecated(Handle<Map> map,
if (map->CanBeDeprecated()) {
li(scratch, Operand(map));
lw(scratch, FieldMemOperand(scratch, Map::kBitField3Offset));
- And(scratch, scratch, Operand(Smi::FromInt(Map::Deprecated::kMask)));
+ And(scratch, scratch, Operand(Map::Deprecated::kMask));
Branch(if_deprecated, ne, scratch, Operand(zero_reg));
}
}
@@ -5484,57 +5493,6 @@ void MacroAssembler::EnsureNotWhite(
}
-void MacroAssembler::Throw(BailoutReason reason) {
- Label throw_start;
- bind(&throw_start);
-#ifdef DEBUG
- const char* msg = GetBailoutReason(reason);
- if (msg != NULL) {
- RecordComment("Throw message: ");
- RecordComment(msg);
- }
-#endif
-
- li(a0, Operand(Smi::FromInt(reason)));
- push(a0);
- // Disable stub call restrictions to always allow calls to throw.
- if (!has_frame_) {
- // We don't actually want to generate a pile of code for this, so just
- // claim there is a stack frame, without generating one.
- FrameScope scope(this, StackFrame::NONE);
- CallRuntime(Runtime::kThrowMessage, 1);
- } else {
- CallRuntime(Runtime::kThrowMessage, 1);
- }
- // will not return here
- if (is_trampoline_pool_blocked()) {
- // If the calling code cares throw the exact number of
- // instructions generated, we insert padding here to keep the size
- // of the ThrowMessage macro constant.
- // Currently in debug mode with debug_code enabled the number of
- // generated instructions is 14, so we use this as a maximum value.
- static const int kExpectedThrowMessageInstructions = 14;
- int throw_instructions = InstructionsGeneratedSince(&throw_start);
- ASSERT(throw_instructions <= kExpectedThrowMessageInstructions);
- while (throw_instructions++ < kExpectedThrowMessageInstructions) {
- nop();
- }
- }
-}
-
-
-void MacroAssembler::ThrowIf(Condition cc,
- BailoutReason reason,
- Register rs,
- Operand rt) {
- Label L;
- Branch(&L, NegateCondition(cc), rs, rt);
- Throw(reason);
- // will not return here
- bind(&L);
-}
-
-
void MacroAssembler::LoadInstanceDescriptors(Register map,
Register descriptors) {
lw(descriptors, FieldMemOperand(map, Map::kDescriptorsOffset));
@@ -5550,7 +5508,8 @@ void MacroAssembler::NumberOfOwnDescriptors(Register dst, Register map) {
void MacroAssembler::EnumLength(Register dst, Register map) {
STATIC_ASSERT(Map::EnumLengthBits::kShift == 0);
lw(dst, FieldMemOperand(map, Map::kBitField3Offset));
- And(dst, dst, Operand(Smi::FromInt(Map::EnumLengthBits::kMask)));
+ And(dst, dst, Operand(Map::EnumLengthBits::kMask));
+ SmiTag(dst);
}
@@ -5579,11 +5538,17 @@ void MacroAssembler::CheckEnumCache(Register null_value, Label* call_runtime) {
bind(&start);
- // Check that there are no elements. Register r2 contains the current JS
+ // Check that there are no elements. Register a2 contains the current JS
// object we've reached through the prototype chain.
+ Label no_elements;
lw(a2, FieldMemOperand(a2, JSObject::kElementsOffset));
- Branch(call_runtime, ne, a2, Operand(empty_fixed_array_value));
+ Branch(&no_elements, eq, a2, Operand(empty_fixed_array_value));
+ // Second chance, the object may be using the empty slow element dictionary.
+ LoadRoot(at, Heap::kEmptySlowElementDictionaryRootIndex);
+ Branch(call_runtime, ne, a2, Operand(at));
+
+ bind(&no_elements);
lw(a2, FieldMemOperand(a1, Map::kPrototypeOffset));
Branch(&next, ne, a2, Operand(null_value));
}
@@ -5697,7 +5662,7 @@ void MacroAssembler::JumpIfDictionaryInPrototypeChain(
bind(&loop_again);
lw(current, FieldMemOperand(current, HeapObject::kMapOffset));
lb(scratch1, FieldMemOperand(current, Map::kBitField2Offset));
- Ext(scratch1, scratch1, Map::kElementsKindShift, Map::kElementsKindBitCount);
+ DecodeField<Map::ElementsKindBits>(scratch1);
Branch(found, eq, scratch1, Operand(DICTIONARY_ELEMENTS));
lw(current, FieldMemOperand(current, Map::kPrototypeOffset));
Branch(&loop_again, ne, current, Operand(factory->null_value()));
@@ -5715,10 +5680,13 @@ bool AreAliased(Register r1, Register r2, Register r3, Register r4) {
}
-CodePatcher::CodePatcher(byte* address, int instructions)
+CodePatcher::CodePatcher(byte* address,
+ int instructions,
+ FlushICache flush_cache)
: address_(address),
size_(instructions * Assembler::kInstrSize),
- masm_(NULL, address, size_ + Assembler::kGap) {
+ masm_(NULL, address, size_ + Assembler::kGap),
+ flush_cache_(flush_cache) {
// Create a new macro assembler pointing to the address of the code to patch.
// The size is adjusted with kGap on order for the assembler to generate size
// bytes of instructions without failing with buffer size constraints.
@@ -5728,7 +5696,9 @@ CodePatcher::CodePatcher(byte* address, int instructions)
CodePatcher::~CodePatcher() {
// Indicate that code has changed.
- CPU::FlushICache(address_, size_);
+ if (flush_cache_ == FLUSH) {
+ CPU::FlushICache(address_, size_);
+ }
// Check that the code was patched as expected.
ASSERT(masm_.pc_ == address_ + size_);
@@ -5768,6 +5738,28 @@ void CodePatcher::ChangeBranchCondition(Condition cond) {
}
+void MacroAssembler::TruncatingDiv(Register result,
+ Register dividend,
+ int32_t divisor) {
+ ASSERT(!dividend.is(result));
+ ASSERT(!dividend.is(at));
+ ASSERT(!result.is(at));
+ MultiplierAndShift ms(divisor);
+ li(at, Operand(ms.multiplier()));
+ Mult(dividend, Operand(at));
+ mfhi(result);
+ if (divisor > 0 && ms.multiplier() < 0) {
+ Addu(result, result, Operand(dividend));
+ }
+ if (divisor < 0 && ms.multiplier() > 0) {
+ Subu(result, result, Operand(dividend));
+ }
+ if (ms.shift() > 0) sra(result, result, ms.shift());
+ srl(at, dividend, 31);
+ Addu(result, result, Operand(at));
+}
+
+
} } // namespace v8::internal
#endif // V8_TARGET_ARCH_MIPS
diff --git a/chromium/v8/src/mips/macro-assembler-mips.h b/chromium/v8/src/mips/macro-assembler-mips.h
index 4e30c353e2c..d339a3f7a2d 100644
--- a/chromium/v8/src/mips/macro-assembler-mips.h
+++ b/chromium/v8/src/mips/macro-assembler-mips.h
@@ -1,36 +1,13 @@
// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
#ifndef V8_MIPS_MACRO_ASSEMBLER_MIPS_H_
#define V8_MIPS_MACRO_ASSEMBLER_MIPS_H_
-#include "assembler.h"
-#include "mips/assembler-mips.h"
-#include "v8globals.h"
+#include "src/assembler.h"
+#include "src/mips/assembler-mips.h"
+#include "src/globals.h"
namespace v8 {
namespace internal {
@@ -94,6 +71,10 @@ enum LiFlags {
enum RememberedSetAction { EMIT_REMEMBERED_SET, OMIT_REMEMBERED_SET };
enum SmiCheck { INLINE_SMI_CHECK, OMIT_SMI_CHECK };
+enum PointersToHereCheck {
+ kPointersToHereMaybeInteresting,
+ kPointersToHereAreAlwaysInteresting
+};
enum RAStatus { kRAHasNotBeenSaved, kRAHasBeenSaved };
Register GetRegisterThatIsNotOneOf(Register reg1,
@@ -169,6 +150,7 @@ class MacroAssembler: public Assembler {
DECLARE_BRANCH_PROTOTYPES(Branch)
DECLARE_BRANCH_PROTOTYPES(BranchAndLink)
+ DECLARE_BRANCH_PROTOTYPES(BranchShort)
#undef DECLARE_BRANCH_PROTOTYPES
#undef COND_TYPED_ARGS
@@ -387,7 +369,9 @@ class MacroAssembler: public Assembler {
RAStatus ra_status,
SaveFPRegsMode save_fp,
RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
- SmiCheck smi_check = INLINE_SMI_CHECK);
+ SmiCheck smi_check = INLINE_SMI_CHECK,
+ PointersToHereCheck pointers_to_here_check_for_value =
+ kPointersToHereMaybeInteresting);
// As above, but the offset has the tag presubtracted. For use with
// MemOperand(reg, off).
@@ -399,7 +383,9 @@ class MacroAssembler: public Assembler {
RAStatus ra_status,
SaveFPRegsMode save_fp,
RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
- SmiCheck smi_check = INLINE_SMI_CHECK) {
+ SmiCheck smi_check = INLINE_SMI_CHECK,
+ PointersToHereCheck pointers_to_here_check_for_value =
+ kPointersToHereMaybeInteresting) {
RecordWriteField(context,
offset + kHeapObjectTag,
value,
@@ -407,9 +393,17 @@ class MacroAssembler: public Assembler {
ra_status,
save_fp,
remembered_set_action,
- smi_check);
+ smi_check,
+ pointers_to_here_check_for_value);
}
+ void RecordWriteForMap(
+ Register object,
+ Register map,
+ Register dst,
+ RAStatus ra_status,
+ SaveFPRegsMode save_fp);
+
// For a given |object| notify the garbage collector that the slot |address|
// has been written. |value| is the object being stored. The value and
// address registers are clobbered by the operation.
@@ -420,7 +414,9 @@ class MacroAssembler: public Assembler {
RAStatus ra_status,
SaveFPRegsMode save_fp,
RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
- SmiCheck smi_check = INLINE_SMI_CHECK);
+ SmiCheck smi_check = INLINE_SMI_CHECK,
+ PointersToHereCheck pointers_to_here_check_for_value =
+ kPointersToHereMaybeInteresting);
// ---------------------------------------------------------------------------
@@ -601,12 +597,17 @@ class MacroAssembler: public Assembler {
#undef DEFINE_INSTRUCTION
#undef DEFINE_INSTRUCTION2
+ void Pref(int32_t hint, const MemOperand& rs);
+
// ---------------------------------------------------------------------------
// Pseudo-instructions.
void mov(Register rd, Register rt) { or_(rd, rt, zero_reg); }
+ void Ulw(Register rd, const MemOperand& rs);
+ void Usw(Register rd, const MemOperand& rs);
+
// Load int32 in the rd register.
void li(Register rd, Operand j, LiFlags mode = OPTIMIZE_SIZE);
inline void li(Register rd, int32_t j, LiFlags mode = OPTIMIZE_SIZE) {
@@ -751,7 +752,7 @@ class MacroAssembler: public Assembler {
FPURegister cmp1,
FPURegister cmp2) {
BranchF(target, nan, cc, cmp1, cmp2, bd);
- };
+ }
// Truncates a double using a specific rounding mode, and writes the value
// to the result register.
@@ -865,14 +866,7 @@ class MacroAssembler: public Assembler {
Register scratch,
Label* no_map_match);
- // Load the initial map for new Arrays from a JSFunction.
- void LoadInitialArrayMap(Register function_in,
- Register scratch,
- Register map_out,
- bool can_have_holes);
-
void LoadGlobalFunction(int index, Register function);
- void LoadArrayFunction(Register function);
// Load the initial map from the global function. The registers
// function and map can be the same, function is then overwritten.
@@ -889,47 +883,31 @@ class MacroAssembler: public Assembler {
// -------------------------------------------------------------------------
// JavaScript invokes.
- // Set up call kind marking in t1. The method takes t1 as an
- // explicit first parameter to make the code more readable at the
- // call sites.
- void SetCallKind(Register dst, CallKind kind);
-
// Invoke the JavaScript function code by either calling or jumping.
void InvokeCode(Register code,
const ParameterCount& expected,
const ParameterCount& actual,
InvokeFlag flag,
- const CallWrapper& call_wrapper,
- CallKind call_kind);
-
- void InvokeCode(Handle<Code> code,
- const ParameterCount& expected,
- const ParameterCount& actual,
- RelocInfo::Mode rmode,
- InvokeFlag flag,
- CallKind call_kind);
+ const CallWrapper& call_wrapper);
// Invoke the JavaScript function in the given register. Changes the
// current context to the context in the function before invoking.
void InvokeFunction(Register function,
const ParameterCount& actual,
InvokeFlag flag,
- const CallWrapper& call_wrapper,
- CallKind call_kind);
+ const CallWrapper& call_wrapper);
void InvokeFunction(Register function,
const ParameterCount& expected,
const ParameterCount& actual,
InvokeFlag flag,
- const CallWrapper& call_wrapper,
- CallKind call_kind);
+ const CallWrapper& call_wrapper);
void InvokeFunction(Handle<JSFunction> function,
const ParameterCount& expected,
const ParameterCount& actual,
InvokeFlag flag,
- const CallWrapper& call_wrapper,
- CallKind call_kind);
+ const CallWrapper& call_wrapper);
void IsObjectJSObjectType(Register heap_object,
@@ -949,13 +927,10 @@ class MacroAssembler: public Assembler {
Register scratch,
Label* fail);
-#ifdef ENABLE_DEBUGGER_SUPPORT
// -------------------------------------------------------------------------
// Debugger Support.
void DebugBreak();
-#endif
-
// -------------------------------------------------------------------------
// Exception handling.
@@ -974,12 +949,6 @@ class MacroAssembler: public Assembler {
// handler chain.
void ThrowUncatchable(Register value);
- // Throw a message string as an exception.
- void Throw(BailoutReason reason);
-
- // Throw a message string as an exception if a condition is not true.
- void ThrowIf(Condition cc, BailoutReason reason, Register rs, Operand rt);
-
// Copies a fixed number of fields of heap objects from src to dst.
void CopyFields(Register dst, Register src, RegList temps, int field_count);
@@ -1091,10 +1060,6 @@ class MacroAssembler: public Assembler {
Handle<Code> success,
SmiCheckType smi_check_type);
- // Generates code for reporting that an illegal operation has
- // occurred.
- void IllegalOperation(int num_arguments);
-
// Load and check the instance type of an object for being a string.
// Loads the type into the second argument register.
@@ -1189,16 +1154,18 @@ class MacroAssembler: public Assembler {
li(s2, Operand(ref));
}
+#define COND_ARGS Condition cond = al, Register rs = zero_reg, \
+const Operand& rt = Operand(zero_reg), BranchDelaySlot bd = PROTECT
+
// Call a code stub.
void CallStub(CodeStub* stub,
TypeFeedbackId ast_id = TypeFeedbackId::None(),
- Condition cond = cc_always,
- Register r1 = zero_reg,
- const Operand& r2 = Operand(zero_reg),
- BranchDelaySlot bd = PROTECT);
+ COND_ARGS);
// Tail call a code stub (jump).
- void TailCallStub(CodeStub* stub);
+ void TailCallStub(CodeStub* stub, COND_ARGS);
+
+#undef COND_ARGS
void CallJSExitStub(CodeStub* stub);
@@ -1270,24 +1237,23 @@ class MacroAssembler: public Assembler {
void CallCFunction(Register function,
int num_reg_arguments,
int num_double_arguments);
- void GetCFunctionDoubleResult(const DoubleRegister dst);
+ void MovFromFloatResult(DoubleRegister dst);
+ void MovFromFloatParameter(DoubleRegister dst);
// There are two ways of passing double arguments on MIPS, depending on
// whether soft or hard floating point ABI is used. These functions
// abstract parameter passing for the three different ways we call
// C functions from generated code.
- void SetCallCDoubleArguments(DoubleRegister dreg);
- void SetCallCDoubleArguments(DoubleRegister dreg1, DoubleRegister dreg2);
- void SetCallCDoubleArguments(DoubleRegister dreg, Register reg);
+ void MovToFloatParameter(DoubleRegister src);
+ void MovToFloatParameters(DoubleRegister src1, DoubleRegister src2);
+ void MovToFloatResult(DoubleRegister src);
// Calls an API function. Allocates HandleScope, extracts returned value
// from handle and propagates exceptions. Restores context. stack_space
// - space to be unwound on exit (includes the call JS arguments space and
// the additional space allocated for the fast call).
- void CallApiFunctionAndReturn(ExternalReference function,
- Address function_address,
+ void CallApiFunctionAndReturn(Register function_address,
ExternalReference thunk_ref,
- Register thunk_last_arg,
int stack_space,
MemOperand return_value_operand,
MemOperand* context_restore_operand);
@@ -1320,6 +1286,10 @@ class MacroAssembler: public Assembler {
return code_object_;
}
+ // Emit code for a truncating division by a constant. The dividend register is
+ // unchanged and at gets clobbered. Dividend and result must be different.
+ void TruncatingDiv(Register result, Register dividend, int32_t divisor);
+
// -------------------------------------------------------------------------
// StatsCounter support.
@@ -1444,6 +1414,10 @@ class MacroAssembler: public Assembler {
// Abort execution if argument is not a name, enabled via --debug-code.
void AssertName(Register object);
+ // Abort execution if argument is not undefined or an AllocationSite, enabled
+ // via --debug-code.
+ void AssertUndefinedOrAllocationSite(Register object, Register scratch);
+
// Abort execution if reg is not the root value with the given index,
// enabled via --debug-code.
void AssertIsRoot(Register reg, Heap::RootListIndex index);
@@ -1522,15 +1496,40 @@ class MacroAssembler: public Assembler {
void NumberOfOwnDescriptors(Register dst, Register map);
template<typename Field>
+ void DecodeField(Register dst, Register src) {
+ Ext(dst, src, Field::kShift, Field::kSize);
+ }
+
+ template<typename Field>
void DecodeField(Register reg) {
+ DecodeField<Field>(reg, reg);
+ }
+
+ template<typename Field>
+ void DecodeFieldToSmi(Register dst, Register src) {
static const int shift = Field::kShift;
- static const int mask = (Field::kMask >> shift) << kSmiTagSize;
- srl(reg, reg, shift);
- And(reg, reg, Operand(mask));
+ static const int mask = Field::kMask >> shift << kSmiTagSize;
+ STATIC_ASSERT((mask & (0x80000000u >> (kSmiTagSize - 1))) == 0);
+ STATIC_ASSERT(kSmiTag == 0);
+ if (shift < kSmiTagSize) {
+ sll(dst, src, kSmiTagSize - shift);
+ And(dst, dst, Operand(mask));
+ } else if (shift > kSmiTagSize) {
+ srl(dst, src, shift - kSmiTagSize);
+ And(dst, dst, Operand(mask));
+ } else {
+ And(dst, src, Operand(mask));
+ }
+ }
+
+ template<typename Field>
+ void DecodeFieldToSmi(Register reg) {
+ DecodeField<Field>(reg, reg);
}
// Generates function and stub prologue code.
- void Prologue(PrologueFrameMode frame_mode);
+ void StubPrologue();
+ void Prologue(bool code_pre_aging);
// Activation support.
void EnterFrame(StackFrame::Type type);
@@ -1580,14 +1579,6 @@ class MacroAssembler: public Assembler {
int num_reg_arguments,
int num_double_arguments);
- void BranchShort(int16_t offset, BranchDelaySlot bdslot = PROTECT);
- void BranchShort(int16_t offset, Condition cond, Register rs,
- const Operand& rt,
- BranchDelaySlot bdslot = PROTECT);
- void BranchShort(Label* L, BranchDelaySlot bdslot = PROTECT);
- void BranchShort(Label* L, Condition cond, Register rs,
- const Operand& rt,
- BranchDelaySlot bdslot = PROTECT);
void BranchAndLinkShort(int16_t offset, BranchDelaySlot bdslot = PROTECT);
void BranchAndLinkShort(int16_t offset, Condition cond, Register rs,
const Operand& rt,
@@ -1608,8 +1599,7 @@ class MacroAssembler: public Assembler {
Label* done,
bool* definitely_mismatches,
InvokeFlag flag,
- const CallWrapper& call_wrapper,
- CallKind call_kind);
+ const CallWrapper& call_wrapper);
// Get the code for the given builtin. Returns if able to resolve
// the function in the 'resolved' flag.
@@ -1661,7 +1651,14 @@ class MacroAssembler: public Assembler {
// an assertion to fail.
class CodePatcher {
public:
- CodePatcher(byte* address, int instructions);
+ enum FlushICache {
+ FLUSH,
+ DONT_FLUSH
+ };
+
+ CodePatcher(byte* address,
+ int instructions,
+ FlushICache flush_cache = FLUSH);
virtual ~CodePatcher();
// Macro assembler to emit code.
@@ -1681,6 +1678,7 @@ class CodePatcher {
byte* address_; // The address of the code being patched.
int size_; // Number of bytes of the expected patch size.
MacroAssembler masm_; // Macro assembler used to generate the code.
+ FlushICache flush_cache_; // Whether to flush the I cache after patching.
};
diff --git a/chromium/v8/src/mips/regexp-macro-assembler-mips.cc b/chromium/v8/src/mips/regexp-macro-assembler-mips.cc
index 49dec3c0246..bbd5e128e5c 100644
--- a/chromium/v8/src/mips/regexp-macro-assembler-mips.cc
+++ b/chromium/v8/src/mips/regexp-macro-assembler-mips.cc
@@ -1,41 +1,18 @@
// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
#if V8_TARGET_ARCH_MIPS
-#include "unicode.h"
-#include "log.h"
-#include "code-stubs.h"
-#include "regexp-stack.h"
-#include "macro-assembler.h"
-#include "regexp-macro-assembler.h"
-#include "mips/regexp-macro-assembler-mips.h"
+#include "src/unicode.h"
+#include "src/log.h"
+#include "src/code-stubs.h"
+#include "src/regexp-stack.h"
+#include "src/macro-assembler.h"
+#include "src/regexp-macro-assembler.h"
+#include "src/mips/regexp-macro-assembler-mips.h"
namespace v8 {
namespace internal {
@@ -1096,7 +1073,7 @@ void RegExpMacroAssemblerMIPS::CallCheckStackGuardState(Register scratch) {
ExternalReference stack_guard_check =
ExternalReference::re_check_stack_guard_state(masm_->isolate());
__ li(t9, Operand(stack_guard_check));
- DirectCEntryStub stub;
+ DirectCEntryStub stub(isolate());
stub.GenerateCall(masm_, t9);
// DirectCEntryStub allocated space for the C argument slots so we have to
@@ -1127,7 +1104,8 @@ int RegExpMacroAssemblerMIPS::CheckStackGuardState(Address* return_address,
Code* re_code,
Address re_frame) {
Isolate* isolate = frame_entry<Isolate*>(re_frame, kIsolate);
- if (isolate->stack_guard()->IsStackOverflow()) {
+ StackLimitCheck check(isolate);
+ if (check.JsHasOverflowed()) {
isolate->StackOverflow();
return EXCEPTION;
}
@@ -1153,7 +1131,7 @@ int RegExpMacroAssemblerMIPS::CheckStackGuardState(Address* return_address,
ASSERT(*return_address <=
re_code->instruction_start() + re_code->instruction_size());
- MaybeObject* result = Execution::HandleStackGuardInterrupt(isolate);
+ Object* result = isolate->stack_guard()->HandleInterrupts();
if (*code_handle != re_code) { // Return address no longer valid.
int delta = code_handle->address() - re_code->address();
diff --git a/chromium/v8/src/mips/regexp-macro-assembler-mips.h b/chromium/v8/src/mips/regexp-macro-assembler-mips.h
index 063582c6485..921a84817c8 100644
--- a/chromium/v8/src/mips/regexp-macro-assembler-mips.h
+++ b/chromium/v8/src/mips/regexp-macro-assembler-mips.h
@@ -1,39 +1,16 @@
// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
#ifndef V8_MIPS_REGEXP_MACRO_ASSEMBLER_MIPS_H_
#define V8_MIPS_REGEXP_MACRO_ASSEMBLER_MIPS_H_
-#include "mips/assembler-mips.h"
-#include "mips/assembler-mips-inl.h"
-#include "macro-assembler.h"
-#include "code.h"
-#include "mips/macro-assembler-mips.h"
+#include "src/mips/assembler-mips.h"
+#include "src/mips/assembler-mips-inl.h"
+#include "src/macro-assembler.h"
+#include "src/code.h"
+#include "src/mips/macro-assembler-mips.h"
namespace v8 {
namespace internal {
diff --git a/chromium/v8/src/mips/simulator-mips.cc b/chromium/v8/src/mips/simulator-mips.cc
index acc65251e23..dfb1ee3f071 100644
--- a/chromium/v8/src/mips/simulator-mips.cc
+++ b/chromium/v8/src/mips/simulator-mips.cc
@@ -1,44 +1,22 @@
// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
-#include <stdlib.h>
#include <limits.h>
+#include <stdarg.h>
+#include <stdlib.h>
#include <cmath>
-#include <cstdarg>
-#include "v8.h"
+
+#include "src/v8.h"
#if V8_TARGET_ARCH_MIPS
-#include "cpu.h"
-#include "disasm.h"
-#include "assembler.h"
-#include "globals.h" // Need the BitCast.
-#include "mips/constants-mips.h"
-#include "mips/simulator-mips.h"
+#include "src/cpu.h"
+#include "src/disasm.h"
+#include "src/assembler.h"
+#include "src/globals.h" // Need the BitCast.
+#include "src/mips/constants-mips.h"
+#include "src/mips/simulator-mips.h"
// Only build the simulator if not compiling for real MIPS hardware.
@@ -862,12 +840,12 @@ void Simulator::CheckICache(v8::internal::HashMap* i_cache,
char* cached_line = cache_page->CachedData(offset & ~CachePage::kLineMask);
if (cache_hit) {
// Check that the data in memory matches the contents of the I-cache.
- CHECK(memcmp(reinterpret_cast<void*>(instr),
- cache_page->CachedData(offset),
- Instruction::kInstrSize) == 0);
+ CHECK_EQ(0, memcmp(reinterpret_cast<void*>(instr),
+ cache_page->CachedData(offset),
+ Instruction::kInstrSize));
} else {
// Cache miss. Load memory into the cache.
- OS::MemCopy(cached_line, line, CachePage::kLineLength);
+ memcpy(cached_line, line, CachePage::kLineLength);
*cache_valid_byte = CachePage::LINE_VALID;
}
}
@@ -924,6 +902,10 @@ Simulator::Simulator(Isolate* isolate) : isolate_(isolate) {
}
+Simulator::~Simulator() {
+}
+
+
// When the generated code calls an external reference we need to catch that in
// the simulator. The external reference will be a function compiled for the
// host architecture. We need to call that function instead of trying to
@@ -971,6 +953,12 @@ class Redirection {
return reinterpret_cast<Redirection*>(addr_of_redirection);
}
+ static void* ReverseRedirection(int32_t reg) {
+ Redirection* redirection = FromSwiInstruction(
+ reinterpret_cast<Instruction*>(reinterpret_cast<void*>(reg)));
+ return redirection->external_function();
+ }
+
private:
void* external_function_;
uint32_t swi_instruction_;
@@ -1059,8 +1047,8 @@ double Simulator::get_double_from_register_pair(int reg) {
// Read the bits from the unsigned integer register_[] array
// into the double precision floating point value and return it.
char buffer[2 * sizeof(registers_[0])];
- OS::MemCopy(buffer, &registers_[reg], 2 * sizeof(registers_[0]));
- OS::MemCopy(&dm_val, buffer, 2 * sizeof(registers_[0]));
+ memcpy(buffer, &registers_[reg], 2 * sizeof(registers_[0]));
+ memcpy(&dm_val, buffer, 2 * sizeof(registers_[0]));
return(dm_val);
}
@@ -1108,14 +1096,14 @@ void Simulator::GetFpArgs(double* x, double* y, int32_t* z) {
// Registers a0 and a1 -> x.
reg_buffer[0] = get_register(a0);
reg_buffer[1] = get_register(a1);
- OS::MemCopy(x, buffer, sizeof(buffer));
+ memcpy(x, buffer, sizeof(buffer));
// Registers a2 and a3 -> y.
reg_buffer[0] = get_register(a2);
reg_buffer[1] = get_register(a3);
- OS::MemCopy(y, buffer, sizeof(buffer));
+ memcpy(y, buffer, sizeof(buffer));
// Register 2 -> z.
reg_buffer[0] = get_register(a2);
- OS::MemCopy(z, buffer, sizeof(*z));
+ memcpy(z, buffer, sizeof(*z));
}
}
@@ -1127,7 +1115,7 @@ void Simulator::SetFpResult(const double& result) {
} else {
char buffer[2 * sizeof(registers_[0])];
int32_t* reg_buffer = reinterpret_cast<int32_t*>(buffer);
- OS::MemCopy(buffer, &result, sizeof(buffer));
+ memcpy(buffer, &result, sizeof(buffer));
// Copy result to v0 and v1.
set_register(v0, reg_buffer[0]);
set_register(v1, reg_buffer[1]);
@@ -1388,12 +1376,12 @@ typedef double (*SimulatorRuntimeFPIntCall)(double darg0, int32_t arg0);
// This signature supports direct call in to API function native callback
// (refer to InvocationCallback in v8.h).
typedef void (*SimulatorRuntimeDirectApiCall)(int32_t arg0);
-typedef void (*SimulatorRuntimeProfilingApiCall)(int32_t arg0, int32_t arg1);
+typedef void (*SimulatorRuntimeProfilingApiCall)(int32_t arg0, void* arg1);
// This signature supports direct call to accessor getter callback.
typedef void (*SimulatorRuntimeDirectGetterCall)(int32_t arg0, int32_t arg1);
typedef void (*SimulatorRuntimeProfilingGetterCall)(
- int32_t arg0, int32_t arg1, int32_t arg2);
+ int32_t arg0, int32_t arg1, void* arg2);
// Software interrupt instructions are used by the simulator to call into the
// C-based V8 runtime. They are also used for debugging with simulator.
@@ -1554,7 +1542,7 @@ void Simulator::SoftwareInterrupt(Instruction* instr) {
}
SimulatorRuntimeProfilingApiCall target =
reinterpret_cast<SimulatorRuntimeProfilingApiCall>(external);
- target(arg0, arg1);
+ target(arg0, Redirection::ReverseRedirection(arg1));
} else if (
redirection->type() == ExternalReference::DIRECT_GETTER_CALL) {
if (::v8::internal::FLAG_trace_sim) {
@@ -1572,7 +1560,7 @@ void Simulator::SoftwareInterrupt(Instruction* instr) {
}
SimulatorRuntimeProfilingGetterCall target =
reinterpret_cast<SimulatorRuntimeProfilingGetterCall>(external);
- target(arg0, arg1, arg2);
+ target(arg0, arg1, Redirection::ReverseRedirection(arg2));
} else {
SimulatorRuntimeCall target =
reinterpret_cast<SimulatorRuntimeCall>(external);
@@ -1718,12 +1706,12 @@ void Simulator::SignalExceptions() {
// Handle execution based on instruction types.
void Simulator::ConfigureTypeRegister(Instruction* instr,
- int32_t& alu_out,
- int64_t& i64hilo,
- uint64_t& u64hilo,
- int32_t& next_pc,
- int32_t& return_addr_reg,
- bool& do_interrupt) {
+ int32_t* alu_out,
+ int64_t* i64hilo,
+ uint64_t* u64hilo,
+ int32_t* next_pc,
+ int32_t* return_addr_reg,
+ bool* do_interrupt) {
// Every local variable declared here needs to be const.
// This is to make sure that changed values are sent back to
// DecodeTypeRegister correctly.
@@ -1752,10 +1740,10 @@ void Simulator::ConfigureTypeRegister(Instruction* instr,
case CFC1:
// At the moment only FCSR is supported.
ASSERT(fs_reg == kFCSRRegister);
- alu_out = FCSR_;
+ *alu_out = FCSR_;
break;
case MFC1:
- alu_out = get_fpu_register(fs_reg);
+ *alu_out = get_fpu_register(fs_reg);
break;
case MFHC1:
UNIMPLEMENTED_MIPS();
@@ -1774,7 +1762,7 @@ void Simulator::ConfigureTypeRegister(Instruction* instr,
break;
default:
UNIMPLEMENTED_MIPS();
- };
+ }
break;
case COP1X:
break;
@@ -1782,56 +1770,56 @@ void Simulator::ConfigureTypeRegister(Instruction* instr,
switch (instr->FunctionFieldRaw()) {
case JR:
case JALR:
- next_pc = get_register(instr->RsValue());
- return_addr_reg = instr->RdValue();
+ *next_pc = get_register(instr->RsValue());
+ *return_addr_reg = instr->RdValue();
break;
case SLL:
- alu_out = rt << sa;
+ *alu_out = rt << sa;
break;
case SRL:
if (rs_reg == 0) {
// Regular logical right shift of a word by a fixed number of
// bits instruction. RS field is always equal to 0.
- alu_out = rt_u >> sa;
+ *alu_out = rt_u >> sa;
} else {
// Logical right-rotate of a word by a fixed number of bits. This
// is special case of SRL instruction, added in MIPS32 Release 2.
// RS field is equal to 00001.
- alu_out = (rt_u >> sa) | (rt_u << (32 - sa));
+ *alu_out = (rt_u >> sa) | (rt_u << (32 - sa));
}
break;
case SRA:
- alu_out = rt >> sa;
+ *alu_out = rt >> sa;
break;
case SLLV:
- alu_out = rt << rs;
+ *alu_out = rt << rs;
break;
case SRLV:
if (sa == 0) {
// Regular logical right-shift of a word by a variable number of
// bits instruction. SA field is always equal to 0.
- alu_out = rt_u >> rs;
+ *alu_out = rt_u >> rs;
} else {
// Logical right-rotate of a word by a variable number of bits.
// This is special case od SRLV instruction, added in MIPS32
// Release 2. SA field is equal to 00001.
- alu_out = (rt_u >> rs_u) | (rt_u << (32 - rs_u));
+ *alu_out = (rt_u >> rs_u) | (rt_u << (32 - rs_u));
}
break;
case SRAV:
- alu_out = rt >> rs;
+ *alu_out = rt >> rs;
break;
case MFHI:
- alu_out = get_register(HI);
+ *alu_out = get_register(HI);
break;
case MFLO:
- alu_out = get_register(LO);
+ *alu_out = get_register(LO);
break;
case MULT:
- i64hilo = static_cast<int64_t>(rs) * static_cast<int64_t>(rt);
+ *i64hilo = static_cast<int64_t>(rs) * static_cast<int64_t>(rt);
break;
case MULTU:
- u64hilo = static_cast<uint64_t>(rs_u) * static_cast<uint64_t>(rt_u);
+ *u64hilo = static_cast<uint64_t>(rs_u) * static_cast<uint64_t>(rt_u);
break;
case ADD:
if (HaveSameSign(rs, rt)) {
@@ -1841,10 +1829,10 @@ void Simulator::ConfigureTypeRegister(Instruction* instr,
exceptions[kIntegerUnderflow] = rs < (Registers::kMinValue - rt);
}
}
- alu_out = rs + rt;
+ *alu_out = rs + rt;
break;
case ADDU:
- alu_out = rs + rt;
+ *alu_out = rs + rt;
break;
case SUB:
if (!HaveSameSign(rs, rt)) {
@@ -1854,51 +1842,50 @@ void Simulator::ConfigureTypeRegister(Instruction* instr,
exceptions[kIntegerUnderflow] = rs < (Registers::kMinValue + rt);
}
}
- alu_out = rs - rt;
+ *alu_out = rs - rt;
break;
case SUBU:
- alu_out = rs - rt;
+ *alu_out = rs - rt;
break;
case AND:
- alu_out = rs & rt;
+ *alu_out = rs & rt;
break;
case OR:
- alu_out = rs | rt;
+ *alu_out = rs | rt;
break;
case XOR:
- alu_out = rs ^ rt;
+ *alu_out = rs ^ rt;
break;
case NOR:
- alu_out = ~(rs | rt);
+ *alu_out = ~(rs | rt);
break;
case SLT:
- alu_out = rs < rt ? 1 : 0;
+ *alu_out = rs < rt ? 1 : 0;
break;
case SLTU:
- alu_out = rs_u < rt_u ? 1 : 0;
+ *alu_out = rs_u < rt_u ? 1 : 0;
break;
// Break and trap instructions.
case BREAK:
-
- do_interrupt = true;
+ *do_interrupt = true;
break;
case TGE:
- do_interrupt = rs >= rt;
+ *do_interrupt = rs >= rt;
break;
case TGEU:
- do_interrupt = rs_u >= rt_u;
+ *do_interrupt = rs_u >= rt_u;
break;
case TLT:
- do_interrupt = rs < rt;
+ *do_interrupt = rs < rt;
break;
case TLTU:
- do_interrupt = rs_u < rt_u;
+ *do_interrupt = rs_u < rt_u;
break;
case TEQ:
- do_interrupt = rs == rt;
+ *do_interrupt = rs == rt;
break;
case TNE:
- do_interrupt = rs != rt;
+ *do_interrupt = rs != rt;
break;
case MOVN:
case MOVZ:
@@ -1911,19 +1898,23 @@ void Simulator::ConfigureTypeRegister(Instruction* instr,
break;
default:
UNREACHABLE();
- };
+ }
break;
case SPECIAL2:
switch (instr->FunctionFieldRaw()) {
case MUL:
- alu_out = rs_u * rt_u; // Only the lower 32 bits are kept.
+ *alu_out = rs_u * rt_u; // Only the lower 32 bits are kept.
break;
case CLZ:
- alu_out = __builtin_clz(rs_u);
+ // MIPS32 spec: If no bits were set in GPR rs, the result written to
+ // GPR rd is 32.
+ // GCC __builtin_clz: If input is 0, the result is undefined.
+ *alu_out =
+ rs_u == 0 ? 32 : CompilerIntrinsics::CountLeadingZeros(rs_u);
break;
default:
UNREACHABLE();
- };
+ }
break;
case SPECIAL3:
switch (instr->FunctionFieldRaw()) {
@@ -1934,7 +1925,7 @@ void Simulator::ConfigureTypeRegister(Instruction* instr,
uint16_t lsb = sa;
uint16_t size = msb - lsb + 1;
uint32_t mask = (1 << size) - 1;
- alu_out = (rt_u & ~(mask << lsb)) | ((rs_u & mask) << lsb);
+ *alu_out = (rt_u & ~(mask << lsb)) | ((rs_u & mask) << lsb);
break;
}
case EXT: { // Mips32r2 instruction.
@@ -1944,16 +1935,16 @@ void Simulator::ConfigureTypeRegister(Instruction* instr,
uint16_t lsb = sa;
uint16_t size = msb + 1;
uint32_t mask = (1 << size) - 1;
- alu_out = (rs_u & (mask << lsb)) >> lsb;
+ *alu_out = (rs_u & (mask << lsb)) >> lsb;
break;
}
default:
UNREACHABLE();
- };
+ }
break;
default:
UNREACHABLE();
- };
+ }
}
@@ -1992,12 +1983,12 @@ void Simulator::DecodeTypeRegister(Instruction* instr) {
// Set up the variables if needed before executing the instruction.
ConfigureTypeRegister(instr,
- alu_out,
- i64hilo,
- u64hilo,
- next_pc,
- return_addr_reg,
- do_interrupt);
+ &alu_out,
+ &i64hilo,
+ &u64hilo,
+ &next_pc,
+ &return_addr_reg,
+ &do_interrupt);
// ---------- Raise exceptions triggered.
SignalExceptions();
@@ -2115,7 +2106,7 @@ void Simulator::DecodeTypeRegister(Instruction* instr) {
// In rounding mode 0 it should behave like ROUND.
case ROUND_W_D: // Round double to word (round half to even).
{
- double rounded = floor(fs + 0.5);
+ double rounded = std::floor(fs + 0.5);
int32_t result = static_cast<int32_t>(rounded);
if ((result & 1) != 0 && result - fs == 0.5) {
// If the number is halfway between two integers,
@@ -2140,7 +2131,7 @@ void Simulator::DecodeTypeRegister(Instruction* instr) {
break;
case FLOOR_W_D: // Round double to word towards negative infinity.
{
- double rounded = floor(fs);
+ double rounded = std::floor(fs);
int32_t result = static_cast<int32_t>(rounded);
set_fpu_register(fd_reg, result);
if (set_fcsr_round_error(fs, rounded)) {
@@ -2150,7 +2141,7 @@ void Simulator::DecodeTypeRegister(Instruction* instr) {
break;
case CEIL_W_D: // Round double to word towards positive infinity.
{
- double rounded = ceil(fs);
+ double rounded = std::ceil(fs);
int32_t result = static_cast<int32_t>(rounded);
set_fpu_register(fd_reg, result);
if (set_fcsr_round_error(fs, rounded)) {
@@ -2176,19 +2167,20 @@ void Simulator::DecodeTypeRegister(Instruction* instr) {
break;
}
case ROUND_L_D: { // Mips32r2 instruction.
- double rounded = fs > 0 ? floor(fs + 0.5) : ceil(fs - 0.5);
+ double rounded =
+ fs > 0 ? std::floor(fs + 0.5) : std::ceil(fs - 0.5);
i64 = static_cast<int64_t>(rounded);
set_fpu_register(fd_reg, i64 & 0xffffffff);
set_fpu_register(fd_reg + 1, i64 >> 32);
break;
}
case FLOOR_L_D: // Mips32r2 instruction.
- i64 = static_cast<int64_t>(floor(fs));
+ i64 = static_cast<int64_t>(std::floor(fs));
set_fpu_register(fd_reg, i64 & 0xffffffff);
set_fpu_register(fd_reg + 1, i64 >> 32);
break;
case CEIL_L_D: // Mips32r2 instruction.
- i64 = static_cast<int64_t>(ceil(fs));
+ i64 = static_cast<int64_t>(std::ceil(fs));
set_fpu_register(fd_reg, i64 & 0xffffffff);
set_fpu_register(fd_reg + 1, i64 >> 32);
break;
@@ -2211,7 +2203,7 @@ void Simulator::DecodeTypeRegister(Instruction* instr) {
break;
default:
UNREACHABLE();
- };
+ }
break;
case L:
switch (instr->FunctionFieldRaw()) {
@@ -2233,7 +2225,7 @@ void Simulator::DecodeTypeRegister(Instruction* instr) {
break;
default:
UNREACHABLE();
- };
+ }
break;
case COP1X:
switch (instr->FunctionFieldRaw()) {
@@ -2246,7 +2238,7 @@ void Simulator::DecodeTypeRegister(Instruction* instr) {
break;
default:
UNREACHABLE();
- };
+ }
break;
case SPECIAL:
switch (instr->FunctionFieldRaw()) {
@@ -2327,7 +2319,7 @@ void Simulator::DecodeTypeRegister(Instruction* instr) {
break;
default: // For other special opcodes we do the default operation.
set_register(rd_reg, alu_out);
- };
+ }
break;
case SPECIAL2:
switch (instr->FunctionFieldRaw()) {
@@ -2353,14 +2345,14 @@ void Simulator::DecodeTypeRegister(Instruction* instr) {
break;
default:
UNREACHABLE();
- };
+ }
break;
// Unimplemented opcodes raised an error in the configuration step before,
// so we can use the default here to set the destination register in common
// cases.
default:
set_register(rd_reg, alu_out);
- };
+ }
}
@@ -2421,7 +2413,7 @@ void Simulator::DecodeTypeImmediate(Instruction* instr) {
break;
default:
UNREACHABLE();
- };
+ }
break;
// ------------- REGIMM class.
case REGIMM:
@@ -2440,7 +2432,7 @@ void Simulator::DecodeTypeImmediate(Instruction* instr) {
break;
default:
UNREACHABLE();
- };
+ }
switch (instr->RtFieldRaw()) {
case BLTZ:
case BLTZAL:
@@ -2459,7 +2451,7 @@ void Simulator::DecodeTypeImmediate(Instruction* instr) {
}
default:
break;
- };
+ }
break; // case REGIMM.
// ------------- Branch instructions.
// When comparing to zero, the encoding of rt field is always 0, so we don't
@@ -2592,7 +2584,7 @@ void Simulator::DecodeTypeImmediate(Instruction* instr) {
break;
default:
UNREACHABLE();
- };
+ }
// ---------- Raise exceptions triggered.
SignalExceptions();
@@ -2668,7 +2660,7 @@ void Simulator::DecodeTypeImmediate(Instruction* instr) {
break;
default:
break;
- };
+ }
if (execute_branch_delay_instruction) {
@@ -2894,9 +2886,9 @@ double Simulator::CallFP(byte* entry, double d0, double d1) {
} else {
int buffer[2];
ASSERT(sizeof(buffer[0]) * 2 == sizeof(d0));
- OS::MemCopy(buffer, &d0, sizeof(d0));
+ memcpy(buffer, &d0, sizeof(d0));
set_dw_register(a0, buffer);
- OS::MemCopy(buffer, &d1, sizeof(d1));
+ memcpy(buffer, &d1, sizeof(d1));
set_dw_register(a2, buffer);
}
CallInternal(entry);
diff --git a/chromium/v8/src/mips/simulator-mips.h b/chromium/v8/src/mips/simulator-mips.h
index d9fd10f245c..20dde25b4c1 100644
--- a/chromium/v8/src/mips/simulator-mips.h
+++ b/chromium/v8/src/mips/simulator-mips.h
@@ -1,29 +1,6 @@
// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
// Declares a Simulator for MIPS instructions if we are not generating a native
@@ -36,8 +13,8 @@
#ifndef V8_MIPS_SIMULATOR_MIPS_H_
#define V8_MIPS_SIMULATOR_MIPS_H_
-#include "allocation.h"
-#include "constants-mips.h"
+#include "src/allocation.h"
+#include "src/mips/constants-mips.h"
#if !defined(USE_SIMULATOR)
// Running without a simulator on a native mips platform.
@@ -61,9 +38,6 @@ typedef int (*mips_regexp_matcher)(String*, int, const byte*, const byte*,
(FUNCTION_CAST<mips_regexp_matcher>(entry)( \
p0, p1, p2, p3, NULL, p4, p5, p6, p7, p8))
-#define TRY_CATCH_FROM_ADDRESS(try_catch_address) \
- reinterpret_cast<TryCatch*>(try_catch_address)
-
// The stack limit beyond which we will throw stack overflow errors in
// generated code. Because generated code on mips uses the C stack, we
// just use the C stack limit.
@@ -96,8 +70,8 @@ class SimulatorStack : public v8::internal::AllStatic {
#else // !defined(USE_SIMULATOR)
// Running with a simulator.
-#include "hashmap.h"
-#include "assembler.h"
+#include "src/hashmap.h"
+#include "src/assembler.h"
namespace v8 {
namespace internal {
@@ -203,6 +177,10 @@ class Simulator {
void set_pc(int32_t value);
int32_t get_pc() const;
+ Address get_sp() {
+ return reinterpret_cast<Address>(static_cast<intptr_t>(get_register(sp)));
+ }
+
// Accessor to the internal simulator stack area.
uintptr_t StackLimit() const;
@@ -285,12 +263,12 @@ class Simulator {
// Helper function for DecodeTypeRegister.
void ConfigureTypeRegister(Instruction* instr,
- int32_t& alu_out,
- int64_t& i64hilo,
- uint64_t& u64hilo,
- int32_t& next_pc,
- int32_t& return_addr_reg,
- bool& do_interrupt);
+ int32_t* alu_out,
+ int64_t* i64hilo,
+ uint64_t* u64hilo,
+ int32_t* next_pc,
+ int32_t* return_addr_reg,
+ bool* do_interrupt);
void DecodeTypeImmediate(Instruction* instr);
void DecodeTypeJump(Instruction* instr);
@@ -409,10 +387,6 @@ class Simulator {
Simulator::current(Isolate::Current())->Call( \
entry, 10, p0, p1, p2, p3, NULL, p4, p5, p6, p7, p8)
-#define TRY_CATCH_FROM_ADDRESS(try_catch_address) \
- try_catch_address == NULL ? \
- NULL : *(reinterpret_cast<TryCatch**>(try_catch_address))
-
// The simulator has its own stack. Thus it has a different stack limit from
// the C-based native code. Setting the c_limit to indicate a very small
diff --git a/chromium/v8/src/mips/stub-cache-mips.cc b/chromium/v8/src/mips/stub-cache-mips.cc
index 9f5089d55d9..13e7e4bde9c 100644
--- a/chromium/v8/src/mips/stub-cache-mips.cc
+++ b/chromium/v8/src/mips/stub-cache-mips.cc
@@ -1,37 +1,14 @@
// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
#if V8_TARGET_ARCH_MIPS
-#include "ic-inl.h"
-#include "codegen.h"
-#include "stub-cache.h"
+#include "src/ic-inl.h"
+#include "src/codegen.h"
+#include "src/stub-cache.h"
namespace v8 {
namespace internal {
@@ -287,15 +264,19 @@ void StubCompiler::GenerateDirectLoadGlobalFunctionPrototype(
Register prototype,
Label* miss) {
Isolate* isolate = masm->isolate();
- // Check we're still in the same context.
- __ lw(prototype,
- MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
- ASSERT(!prototype.is(at));
- __ li(at, isolate->global_object());
- __ Branch(miss, ne, prototype, Operand(at));
// Get the global function with the given index.
Handle<JSFunction> function(
JSFunction::cast(isolate->native_context()->get(index)));
+
+ // Check we're still in the same context.
+ Register scratch = prototype;
+ const int offset = Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX);
+ __ lw(scratch, MemOperand(cp, offset));
+ __ lw(scratch, FieldMemOperand(scratch, GlobalObject::kNativeContextOffset));
+ __ lw(scratch, MemOperand(scratch, Context::SlotOffset(index)));
+ __ li(at, function);
+ __ Branch(miss, ne, at, Operand(scratch));
+
// Load its initial map. The global functions all have initial maps.
__ li(prototype, Handle<Map>(function->initial_map()));
// Load the prototype from the initial map.
@@ -309,7 +290,7 @@ void StubCompiler::GenerateFastPropertyLoad(MacroAssembler* masm,
bool inobject,
int index,
Representation representation) {
- ASSERT(!FLAG_track_double_fields || !representation.IsDouble());
+ ASSERT(!representation.IsDouble());
int offset = index * kPointerSize;
if (!inobject) {
// Calculate the offset into the properties array.
@@ -338,61 +319,6 @@ void StubCompiler::GenerateLoadArrayLength(MacroAssembler* masm,
}
-// Generate code to check if an object is a string. If the object is a
-// heap object, its map's instance type is left in the scratch1 register.
-// If this is not needed, scratch1 and scratch2 may be the same register.
-static void GenerateStringCheck(MacroAssembler* masm,
- Register receiver,
- Register scratch1,
- Register scratch2,
- Label* smi,
- Label* non_string_object) {
- // Check that the receiver isn't a smi.
- __ JumpIfSmi(receiver, smi, t0);
-
- // Check that the object is a string.
- __ lw(scratch1, FieldMemOperand(receiver, HeapObject::kMapOffset));
- __ lbu(scratch1, FieldMemOperand(scratch1, Map::kInstanceTypeOffset));
- __ And(scratch2, scratch1, Operand(kIsNotStringMask));
- // The cast is to resolve the overload for the argument of 0x0.
- __ Branch(non_string_object,
- ne,
- scratch2,
- Operand(static_cast<int32_t>(kStringTag)));
-}
-
-
-// Generate code to load the length from a string object and return the length.
-// If the receiver object is not a string or a wrapped string object the
-// execution continues at the miss label. The register containing the
-// receiver is potentially clobbered.
-void StubCompiler::GenerateLoadStringLength(MacroAssembler* masm,
- Register receiver,
- Register scratch1,
- Register scratch2,
- Label* miss) {
- Label check_wrapper;
-
- // Check if the object is a string leaving the instance type in the
- // scratch1 register.
- GenerateStringCheck(masm, receiver, scratch1, scratch2, miss, &check_wrapper);
-
- // Load length directly from the string.
- __ Ret(USE_DELAY_SLOT);
- __ lw(v0, FieldMemOperand(receiver, String::kLengthOffset));
-
- // Check if the object is a JSValue wrapper.
- __ bind(&check_wrapper);
- __ Branch(miss, ne, scratch1, Operand(JS_VALUE_TYPE));
-
- // Unwrap the value and check if the wrapped value is a string.
- __ lw(scratch1, FieldMemOperand(receiver, JSValue::kValueOffset));
- GenerateStringCheck(masm, scratch1, scratch2, scratch2, miss, miss);
- __ Ret(USE_DELAY_SLOT);
- __ lw(v0, FieldMemOperand(scratch1, String::kLengthOffset));
-}
-
-
void StubCompiler::GenerateLoadFunctionPrototype(MacroAssembler* masm,
Register receiver,
Register scratch1,
@@ -463,11 +389,29 @@ void StoreStubCompiler::GenerateStoreTransition(MacroAssembler* masm,
Handle<Object> constant(descriptors->GetValue(descriptor), masm->isolate());
__ li(scratch1, constant);
__ Branch(miss_label, ne, value_reg, Operand(scratch1));
- } else if (FLAG_track_fields && representation.IsSmi()) {
+ } else if (representation.IsSmi()) {
__ JumpIfNotSmi(value_reg, miss_label);
- } else if (FLAG_track_heap_object_fields && representation.IsHeapObject()) {
+ } else if (representation.IsHeapObject()) {
__ JumpIfSmi(value_reg, miss_label);
- } else if (FLAG_track_double_fields && representation.IsDouble()) {
+ HeapType* field_type = descriptors->GetFieldType(descriptor);
+ HeapType::Iterator<Map> it = field_type->Classes();
+ Handle<Map> current;
+ if (!it.Done()) {
+ __ lw(scratch1, FieldMemOperand(value_reg, HeapObject::kMapOffset));
+ Label do_store;
+ while (true) {
+ // Do the CompareMap() directly within the Branch() functions.
+ current = it.Current();
+ it.Advance();
+ if (it.Done()) {
+ __ Branch(miss_label, ne, scratch1, Operand(current));
+ break;
+ }
+ __ Branch(&do_store, eq, scratch1, Operand(current));
+ }
+ __ bind(&do_store);
+ }
+ } else if (representation.IsDouble()) {
Label do_store, heap_number;
__ LoadRoot(scratch3, Heap::kHeapNumberMapRootIndex);
__ AllocateHeapNumber(storage_reg, scratch1, scratch2, scratch3, slow);
@@ -541,15 +485,15 @@ void StoreStubCompiler::GenerateStoreTransition(MacroAssembler* masm,
if (index < 0) {
// Set the property straight into the object.
int offset = object->map()->instance_size() + (index * kPointerSize);
- if (FLAG_track_double_fields && representation.IsDouble()) {
+ if (representation.IsDouble()) {
__ sw(storage_reg, FieldMemOperand(receiver_reg, offset));
} else {
__ sw(value_reg, FieldMemOperand(receiver_reg, offset));
}
- if (!FLAG_track_fields || !representation.IsSmi()) {
+ if (!representation.IsSmi()) {
// Update the write barrier for the array address.
- if (!FLAG_track_double_fields || !representation.IsDouble()) {
+ if (!representation.IsDouble()) {
__ mov(storage_reg, value_reg);
}
__ RecordWriteField(receiver_reg,
@@ -567,15 +511,15 @@ void StoreStubCompiler::GenerateStoreTransition(MacroAssembler* masm,
// Get the properties array
__ lw(scratch1,
FieldMemOperand(receiver_reg, JSObject::kPropertiesOffset));
- if (FLAG_track_double_fields && representation.IsDouble()) {
+ if (representation.IsDouble()) {
__ sw(storage_reg, FieldMemOperand(scratch1, offset));
} else {
__ sw(value_reg, FieldMemOperand(scratch1, offset));
}
- if (!FLAG_track_fields || !representation.IsSmi()) {
+ if (!representation.IsSmi()) {
// Update the write barrier for the array address.
- if (!FLAG_track_double_fields || !representation.IsDouble()) {
+ if (!representation.IsDouble()) {
__ mov(storage_reg, value_reg);
}
__ RecordWriteField(scratch1,
@@ -617,29 +561,40 @@ void StoreStubCompiler::GenerateStoreField(MacroAssembler* masm,
// checks.
ASSERT(object->IsJSGlobalProxy() || !object->IsAccessCheckNeeded());
- int index = lookup->GetFieldIndex().field_index();
-
- // Adjust for the number of properties stored in the object. Even in the
- // face of a transition we can use the old map here because the size of the
- // object and the number of in-object properties is not going to change.
- index -= object->map()->inobject_properties();
+ FieldIndex index = lookup->GetFieldIndex();
Representation representation = lookup->representation();
ASSERT(!representation.IsNone());
- if (FLAG_track_fields && representation.IsSmi()) {
+ if (representation.IsSmi()) {
__ JumpIfNotSmi(value_reg, miss_label);
- } else if (FLAG_track_heap_object_fields && representation.IsHeapObject()) {
+ } else if (representation.IsHeapObject()) {
__ JumpIfSmi(value_reg, miss_label);
- } else if (FLAG_track_double_fields && representation.IsDouble()) {
+ HeapType* field_type = lookup->GetFieldType();
+ HeapType::Iterator<Map> it = field_type->Classes();
+ if (!it.Done()) {
+ __ lw(scratch1, FieldMemOperand(value_reg, HeapObject::kMapOffset));
+ Label do_store;
+ Handle<Map> current;
+ while (true) {
+ // Do the CompareMap() directly within the Branch() functions.
+ current = it.Current();
+ it.Advance();
+ if (it.Done()) {
+ __ Branch(miss_label, ne, scratch1, Operand(current));
+ break;
+ }
+ __ Branch(&do_store, eq, scratch1, Operand(current));
+ }
+ __ bind(&do_store);
+ }
+ } else if (representation.IsDouble()) {
// Load the double storage.
- if (index < 0) {
- int offset = object->map()->instance_size() + (index * kPointerSize);
- __ lw(scratch1, FieldMemOperand(receiver_reg, offset));
+ if (index.is_inobject()) {
+ __ lw(scratch1, FieldMemOperand(receiver_reg, index.offset()));
} else {
__ lw(scratch1,
FieldMemOperand(receiver_reg, JSObject::kPropertiesOffset));
- int offset = index * kPointerSize + FixedArray::kHeaderSize;
- __ lw(scratch1, FieldMemOperand(scratch1, offset));
+ __ lw(scratch1, FieldMemOperand(scratch1, index.offset()));
}
// Store the value into the storage.
@@ -667,12 +622,11 @@ void StoreStubCompiler::GenerateStoreField(MacroAssembler* masm,
// TODO(verwaest): Share this code as a code stub.
SmiCheck smi_check = representation.IsTagged()
? INLINE_SMI_CHECK : OMIT_SMI_CHECK;
- if (index < 0) {
+ if (index.is_inobject()) {
// Set the property straight into the object.
- int offset = object->map()->instance_size() + (index * kPointerSize);
- __ sw(value_reg, FieldMemOperand(receiver_reg, offset));
+ __ sw(value_reg, FieldMemOperand(receiver_reg, index.offset()));
- if (!FLAG_track_fields || !representation.IsSmi()) {
+ if (!representation.IsSmi()) {
// Skip updating write barrier if storing a smi.
__ JumpIfSmi(value_reg, &exit);
@@ -680,7 +634,7 @@ void StoreStubCompiler::GenerateStoreField(MacroAssembler* masm,
// Pass the now unused name_reg as a scratch register.
__ mov(name_reg, value_reg);
__ RecordWriteField(receiver_reg,
- offset,
+ index.offset(),
name_reg,
scratch1,
kRAHasNotBeenSaved,
@@ -690,13 +644,12 @@ void StoreStubCompiler::GenerateStoreField(MacroAssembler* masm,
}
} else {
// Write to the properties array.
- int offset = index * kPointerSize + FixedArray::kHeaderSize;
// Get the properties array.
__ lw(scratch1,
FieldMemOperand(receiver_reg, JSObject::kPropertiesOffset));
- __ sw(value_reg, FieldMemOperand(scratch1, offset));
+ __ sw(value_reg, FieldMemOperand(scratch1, index.offset()));
- if (!FLAG_track_fields || !representation.IsSmi()) {
+ if (!representation.IsSmi()) {
// Skip updating write barrier if storing a smi.
__ JumpIfSmi(value_reg, &exit);
@@ -704,7 +657,7 @@ void StoreStubCompiler::GenerateStoreField(MacroAssembler* masm,
// Ok to clobber receiver_reg and name_reg, since we return.
__ mov(name_reg, value_reg);
__ RecordWriteField(scratch1,
- offset,
+ index.offset(),
name_reg,
receiver_reg,
kRAHasNotBeenSaved,
@@ -765,92 +718,71 @@ static void CompileCallLoadPropertyWithInterceptor(
}
-static const int kFastApiCallArguments = FunctionCallbackArguments::kArgsLength;
-
-// Reserves space for the extra arguments to API function in the
-// caller's frame.
-//
-// These arguments are set by CheckPrototypes and GenerateFastApiDirectCall.
-static void ReserveSpaceForFastApiCall(MacroAssembler* masm,
- Register scratch) {
- ASSERT(Smi::FromInt(0) == 0);
- for (int i = 0; i < kFastApiCallArguments; i++) {
- __ push(zero_reg);
+// Generate call to api function.
+void StubCompiler::GenerateFastApiCall(MacroAssembler* masm,
+ const CallOptimization& optimization,
+ Handle<Map> receiver_map,
+ Register receiver,
+ Register scratch_in,
+ bool is_store,
+ int argc,
+ Register* values) {
+ ASSERT(!receiver.is(scratch_in));
+ // Preparing to push, adjust sp.
+ __ Subu(sp, sp, Operand((argc + 1) * kPointerSize));
+ __ sw(receiver, MemOperand(sp, argc * kPointerSize)); // Push receiver.
+ // Write the arguments to stack frame.
+ for (int i = 0; i < argc; i++) {
+ Register arg = values[argc-1-i];
+ ASSERT(!receiver.is(arg));
+ ASSERT(!scratch_in.is(arg));
+ __ sw(arg, MemOperand(sp, (argc-1-i) * kPointerSize)); // Push arg.
}
-}
-
-
-// Undoes the effects of ReserveSpaceForFastApiCall.
-static void FreeSpaceForFastApiCall(MacroAssembler* masm) {
- __ Drop(kFastApiCallArguments);
-}
+ ASSERT(optimization.is_simple_api_call());
+ // Abi for CallApiFunctionStub.
+ Register callee = a0;
+ Register call_data = t0;
+ Register holder = a2;
+ Register api_function_address = a1;
+
+ // Put holder in place.
+ CallOptimization::HolderLookup holder_lookup;
+ Handle<JSObject> api_holder = optimization.LookupHolderOfExpectedType(
+ receiver_map,
+ &holder_lookup);
+ switch (holder_lookup) {
+ case CallOptimization::kHolderIsReceiver:
+ __ Move(holder, receiver);
+ break;
+ case CallOptimization::kHolderFound:
+ __ li(holder, api_holder);
+ break;
+ case CallOptimization::kHolderNotFound:
+ UNREACHABLE();
+ break;
+ }
-static void GenerateFastApiDirectCall(MacroAssembler* masm,
- const CallOptimization& optimization,
- int argc,
- bool restore_context) {
- // ----------- S t a t e -------------
- // -- sp[0] - sp[24] : FunctionCallbackInfo, incl.
- // : holder (set by CheckPrototypes)
- // -- sp[28] : last JS argument
- // -- ...
- // -- sp[(argc + 6) * 4] : first JS argument
- // -- sp[(argc + 7) * 4] : receiver
- // -----------------------------------
- typedef FunctionCallbackArguments FCA;
- // Save calling context.
- __ sw(cp, MemOperand(sp, FCA::kContextSaveIndex * kPointerSize));
- // Get the function and setup the context.
+ Isolate* isolate = masm->isolate();
Handle<JSFunction> function = optimization.constant_function();
- __ li(t1, function);
- __ lw(cp, FieldMemOperand(t1, JSFunction::kContextOffset));
- __ sw(t1, MemOperand(sp, FCA::kCalleeIndex * kPointerSize));
-
- // Construct the FunctionCallbackInfo.
Handle<CallHandlerInfo> api_call_info = optimization.api_call_info();
- Handle<Object> call_data(api_call_info->data(), masm->isolate());
- if (masm->isolate()->heap()->InNewSpace(*call_data)) {
- __ li(a0, api_call_info);
- __ lw(t2, FieldMemOperand(a0, CallHandlerInfo::kDataOffset));
+ Handle<Object> call_data_obj(api_call_info->data(), isolate);
+
+ // Put callee in place.
+ __ li(callee, function);
+
+ bool call_data_undefined = false;
+ // Put call_data in place.
+ if (isolate->heap()->InNewSpace(*call_data_obj)) {
+ __ li(call_data, api_call_info);
+ __ lw(call_data, FieldMemOperand(call_data, CallHandlerInfo::kDataOffset));
+ } else if (call_data_obj->IsUndefined()) {
+ call_data_undefined = true;
+ __ LoadRoot(call_data, Heap::kUndefinedValueRootIndex);
} else {
- __ li(t2, call_data);
+ __ li(call_data, call_data_obj);
}
- // Store call data.
- __ sw(t2, MemOperand(sp, FCA::kDataIndex * kPointerSize));
- // Store isolate.
- __ li(t3, Operand(ExternalReference::isolate_address(masm->isolate())));
- __ sw(t3, MemOperand(sp, FCA::kIsolateIndex * kPointerSize));
- // Store ReturnValue default and ReturnValue.
- __ LoadRoot(t1, Heap::kUndefinedValueRootIndex);
- __ sw(t1, MemOperand(sp, FCA::kReturnValueOffset * kPointerSize));
- __ sw(t1, MemOperand(sp, FCA::kReturnValueDefaultValueIndex * kPointerSize));
-
- // Prepare arguments.
- __ Move(a2, sp);
-
- // Allocate the v8::Arguments structure in the arguments' space since
- // it's not controlled by GC.
- const int kApiStackSpace = 4;
-
- FrameScope frame_scope(masm, StackFrame::MANUAL);
- __ EnterExitFrame(false, kApiStackSpace);
-
- // a0 = FunctionCallbackInfo&
- // Arguments is built at sp + 1 (sp is a reserved spot for ra).
- __ Addu(a0, sp, kPointerSize);
- // FunctionCallbackInfo::implicit_args_
- __ sw(a2, MemOperand(a0, 0 * kPointerSize));
- // FunctionCallbackInfo::values_
- __ Addu(t0, a2, Operand((kFastApiCallArguments - 1 + argc) * kPointerSize));
- __ sw(t0, MemOperand(a0, 1 * kPointerSize));
- // FunctionCallbackInfo::length_ = argc
- __ li(t0, Operand(argc));
- __ sw(t0, MemOperand(a0, 2 * kPointerSize));
- // FunctionCallbackInfo::is_construct_call = 0
- __ sw(zero_reg, MemOperand(a0, 3 * kPointerSize));
-
- const int kStackUnwindSpace = argc + kFastApiCallArguments + 1;
+ // Put api_function_address in place.
Address function_address = v8::ToCData<Address>(api_call_info->callback());
ApiFunction fun(function_address);
ExternalReference::Type type = ExternalReference::DIRECT_API_CALL;
@@ -858,249 +790,14 @@ static void GenerateFastApiDirectCall(MacroAssembler* masm,
ExternalReference(&fun,
type,
masm->isolate());
- Address thunk_address = FUNCTION_ADDR(&InvokeFunctionCallback);
- ExternalReference::Type thunk_type = ExternalReference::PROFILING_API_CALL;
- ApiFunction thunk_fun(thunk_address);
- ExternalReference thunk_ref = ExternalReference(&thunk_fun, thunk_type,
- masm->isolate());
-
- AllowExternalCallThatCantCauseGC scope(masm);
- MemOperand context_restore_operand(
- fp, (2 + FCA::kContextSaveIndex) * kPointerSize);
- MemOperand return_value_operand(
- fp, (2 + FCA::kReturnValueOffset) * kPointerSize);
-
- __ CallApiFunctionAndReturn(ref,
- function_address,
- thunk_ref,
- a1,
- kStackUnwindSpace,
- return_value_operand,
- restore_context ?
- &context_restore_operand : NULL);
-}
+ __ li(api_function_address, Operand(ref));
-
-// Generate call to api function.
-static void GenerateFastApiCall(MacroAssembler* masm,
- const CallOptimization& optimization,
- Register receiver,
- Register scratch,
- int argc,
- Register* values) {
- ASSERT(optimization.is_simple_api_call());
- ASSERT(!receiver.is(scratch));
-
- typedef FunctionCallbackArguments FCA;
- const int stack_space = kFastApiCallArguments + argc + 1;
- // Assign stack space for the call arguments.
- __ Subu(sp, sp, Operand(stack_space * kPointerSize));
- // Write holder to stack frame.
- __ sw(receiver, MemOperand(sp, FCA::kHolderIndex * kPointerSize));
- // Write receiver to stack frame.
- int index = stack_space - 1;
- __ sw(receiver, MemOperand(sp, index * kPointerSize));
- // Write the arguments to stack frame.
- for (int i = 0; i < argc; i++) {
- ASSERT(!receiver.is(values[i]));
- ASSERT(!scratch.is(values[i]));
- __ sw(receiver, MemOperand(sp, index-- * kPointerSize));
- }
-
- GenerateFastApiDirectCall(masm, optimization, argc, true);
+ // Jump to stub.
+ CallApiFunctionStub stub(isolate, is_store, call_data_undefined, argc);
+ __ TailCallStub(&stub);
}
-class CallInterceptorCompiler BASE_EMBEDDED {
- public:
- CallInterceptorCompiler(CallStubCompiler* stub_compiler,
- const ParameterCount& arguments,
- Register name,
- ExtraICState extra_ic_state)
- : stub_compiler_(stub_compiler),
- arguments_(arguments),
- name_(name),
- extra_ic_state_(extra_ic_state) {}
-
- void Compile(MacroAssembler* masm,
- Handle<JSObject> object,
- Handle<JSObject> holder,
- Handle<Name> name,
- LookupResult* lookup,
- Register receiver,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Label* miss) {
- ASSERT(holder->HasNamedInterceptor());
- ASSERT(!holder->GetNamedInterceptor()->getter()->IsUndefined());
-
- // Check that the receiver isn't a smi.
- __ JumpIfSmi(receiver, miss);
- CallOptimization optimization(lookup);
- if (optimization.is_constant_call()) {
- CompileCacheable(masm, object, receiver, scratch1, scratch2, scratch3,
- holder, lookup, name, optimization, miss);
- } else {
- CompileRegular(masm, object, receiver, scratch1, scratch2, scratch3,
- name, holder, miss);
- }
- }
-
- private:
- void CompileCacheable(MacroAssembler* masm,
- Handle<JSObject> object,
- Register receiver,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Handle<JSObject> interceptor_holder,
- LookupResult* lookup,
- Handle<Name> name,
- const CallOptimization& optimization,
- Label* miss_label) {
- ASSERT(optimization.is_constant_call());
- ASSERT(!lookup->holder()->IsGlobalObject());
- Counters* counters = masm->isolate()->counters();
- int depth1 = kInvalidProtoDepth;
- int depth2 = kInvalidProtoDepth;
- bool can_do_fast_api_call = false;
- if (optimization.is_simple_api_call() &&
- !lookup->holder()->IsGlobalObject()) {
- depth1 = optimization.GetPrototypeDepthOfExpectedType(
- object, interceptor_holder);
- if (depth1 == kInvalidProtoDepth) {
- depth2 = optimization.GetPrototypeDepthOfExpectedType(
- interceptor_holder, Handle<JSObject>(lookup->holder()));
- }
- can_do_fast_api_call =
- depth1 != kInvalidProtoDepth || depth2 != kInvalidProtoDepth;
- }
-
- __ IncrementCounter(counters->call_const_interceptor(), 1,
- scratch1, scratch2);
-
- if (can_do_fast_api_call) {
- __ IncrementCounter(counters->call_const_interceptor_fast_api(), 1,
- scratch1, scratch2);
- ReserveSpaceForFastApiCall(masm, scratch1);
- }
-
- // Check that the maps from receiver to interceptor's holder
- // haven't changed and thus we can invoke interceptor.
- Label miss_cleanup;
- Label* miss = can_do_fast_api_call ? &miss_cleanup : miss_label;
- Register holder =
- stub_compiler_->CheckPrototypes(
- IC::CurrentTypeOf(object, masm->isolate()), receiver,
- interceptor_holder, scratch1, scratch2, scratch3,
- name, depth1, miss);
-
- // Invoke an interceptor and if it provides a value,
- // branch to |regular_invoke|.
- Label regular_invoke;
- LoadWithInterceptor(masm, receiver, holder, interceptor_holder, scratch2,
- &regular_invoke);
-
- // Interceptor returned nothing for this property. Try to use cached
- // constant function.
-
- // Check that the maps from interceptor's holder to constant function's
- // holder haven't changed and thus we can use cached constant function.
- if (*interceptor_holder != lookup->holder()) {
- stub_compiler_->CheckPrototypes(
- IC::CurrentTypeOf(interceptor_holder, masm->isolate()), holder,
- handle(lookup->holder()), scratch1, scratch2, scratch3,
- name, depth2, miss);
- } else {
- // CheckPrototypes has a side effect of fetching a 'holder'
- // for API (object which is instanceof for the signature). It's
- // safe to omit it here, as if present, it should be fetched
- // by the previous CheckPrototypes.
- ASSERT(depth2 == kInvalidProtoDepth);
- }
-
- // Invoke function.
- if (can_do_fast_api_call) {
- GenerateFastApiDirectCall(
- masm, optimization, arguments_.immediate(), false);
- } else {
- Handle<JSFunction> function = optimization.constant_function();
- __ Move(a0, receiver);
- stub_compiler_->GenerateJumpFunction(object, function);
- }
-
- // Deferred code for fast API call case---clean preallocated space.
- if (can_do_fast_api_call) {
- __ bind(&miss_cleanup);
- FreeSpaceForFastApiCall(masm);
- __ Branch(miss_label);
- }
-
- // Invoke a regular function.
- __ bind(&regular_invoke);
- if (can_do_fast_api_call) {
- FreeSpaceForFastApiCall(masm);
- }
- }
-
- void CompileRegular(MacroAssembler* masm,
- Handle<JSObject> object,
- Register receiver,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Handle<Name> name,
- Handle<JSObject> interceptor_holder,
- Label* miss_label) {
- Register holder =
- stub_compiler_->CheckPrototypes(
- IC::CurrentTypeOf(object, masm->isolate()), receiver,
- interceptor_holder, scratch1, scratch2, scratch3, name, miss_label);
-
- // Call a runtime function to load the interceptor property.
- FrameScope scope(masm, StackFrame::INTERNAL);
- // Save the name_ register across the call.
- __ push(name_);
-
- CompileCallLoadPropertyWithInterceptor(
- masm, receiver, holder, name_, interceptor_holder,
- IC::kLoadPropertyWithInterceptorForCall);
-
- // Restore the name_ register.
- __ pop(name_);
- // Leave the internal frame.
- }
-
- void LoadWithInterceptor(MacroAssembler* masm,
- Register receiver,
- Register holder,
- Handle<JSObject> holder_obj,
- Register scratch,
- Label* interceptor_succeeded) {
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
-
- __ Push(receiver, holder, name_);
- CompileCallLoadPropertyWithInterceptor(
- masm, receiver, holder, name_, holder_obj,
- IC::kLoadPropertyWithInterceptorOnly);
- __ pop(name_);
- __ pop(holder);
- __ pop(receiver);
- }
- // If interceptor returns no-result sentinel, call the constant function.
- __ LoadRoot(scratch, Heap::kNoInterceptorResultSentinelRootIndex);
- __ Branch(interceptor_succeeded, ne, v0, Operand(scratch));
- }
-
- CallStubCompiler* stub_compiler_;
- const ParameterCount& arguments_;
- Register name_;
- ExtraICState extra_ic_state_;
-};
-
-
void StubCompiler::GenerateTailCall(MacroAssembler* masm, Handle<Code> code) {
__ Jump(code, RelocInfo::CODE_TARGET);
}
@@ -1110,20 +807,16 @@ void StubCompiler::GenerateTailCall(MacroAssembler* masm, Handle<Code> code) {
#define __ ACCESS_MASM(masm())
-Register StubCompiler::CheckPrototypes(Handle<Type> type,
+Register StubCompiler::CheckPrototypes(Handle<HeapType> type,
Register object_reg,
Handle<JSObject> holder,
Register holder_reg,
Register scratch1,
Register scratch2,
Handle<Name> name,
- int save_at_depth,
Label* miss,
PrototypeCheckType check) {
Handle<Map> receiver_map(IC::TypeToMap(*type, isolate()));
- // Make sure that the type feedback oracle harvests the receiver map.
- // TODO(svenpanne) Remove this hack when all ICs are reworked.
- __ li(scratch1, Operand(receiver_map));
// Make sure there's no overlap between holder and object registers.
ASSERT(!scratch1.is(object_reg) && !scratch1.is(holder_reg));
@@ -1134,13 +827,10 @@ Register StubCompiler::CheckPrototypes(Handle<Type> type,
Register reg = object_reg;
int depth = 0;
- typedef FunctionCallbackArguments FCA;
- if (save_at_depth == depth) {
- __ sw(reg, MemOperand(sp, FCA::kHolderIndex * kPointerSize));
- }
-
Handle<JSObject> current = Handle<JSObject>::null();
- if (type->IsConstant()) current = Handle<JSObject>::cast(type->AsConstant());
+ if (type->IsConstant()) {
+ current = Handle<JSObject>::cast(type->AsConstant()->Value());
+ }
Handle<JSObject> prototype = Handle<JSObject>::null();
Handle<Map> current_map = receiver_map;
Handle<Map> holder_map(holder->map());
@@ -1163,7 +853,7 @@ Register StubCompiler::CheckPrototypes(Handle<Type> type,
name = factory()->InternalizeString(Handle<String>::cast(name));
}
ASSERT(current.is_null() ||
- current->property_dictionary()->FindEntry(*name) ==
+ current->property_dictionary()->FindEntry(name) ==
NameDictionary::kNotFound);
GenerateDictionaryNegativeLookup(masm(), miss, reg, name,
@@ -1204,10 +894,6 @@ Register StubCompiler::CheckPrototypes(Handle<Type> type,
}
}
- if (save_at_depth == depth) {
- __ sw(reg, MemOperand(sp, FCA::kHolderIndex * kPointerSize));
- }
-
// Go to the next object in the prototype chain.
current = prototype;
current_map = handle(current->map());
@@ -1256,7 +942,7 @@ void StoreStubCompiler::HandlerFrontendFooter(Handle<Name> name, Label* miss) {
Register LoadStubCompiler::CallbackHandlerFrontend(
- Handle<Type> type,
+ Handle<HeapType> type,
Register object_reg,
Handle<JSObject> holder,
Handle<Name> name,
@@ -1302,19 +988,15 @@ Register LoadStubCompiler::CallbackHandlerFrontend(
void LoadStubCompiler::GenerateLoadField(Register reg,
Handle<JSObject> holder,
- PropertyIndex field,
+ FieldIndex field,
Representation representation) {
if (!reg.is(receiver())) __ mov(receiver(), reg);
if (kind() == Code::LOAD_IC) {
- LoadFieldStub stub(field.is_inobject(holder),
- field.translate(holder),
- representation);
- GenerateTailCall(masm(), stub.GetCode(isolate()));
+ LoadFieldStub stub(isolate(), field);
+ GenerateTailCall(masm(), stub.GetCode());
} else {
- KeyedLoadFieldStub stub(field.is_inobject(holder),
- field.translate(holder),
- representation);
- GenerateTailCall(masm(), stub.GetCode(isolate()));
+ KeyedLoadFieldStub stub(isolate(), field);
+ GenerateTailCall(masm(), stub.GetCode());
}
}
@@ -1327,13 +1009,6 @@ void LoadStubCompiler::GenerateLoadConstant(Handle<Object> value) {
void LoadStubCompiler::GenerateLoadCallback(
- const CallOptimization& call_optimization) {
- GenerateFastApiCall(
- masm(), call_optimization, receiver(), scratch3(), 0, NULL);
-}
-
-
-void LoadStubCompiler::GenerateLoadCallback(
Register reg,
Handle<ExecutableAccessorInfo> callback) {
// Build AccessorInfo::args_ list on the stack and push property name below
@@ -1369,37 +1044,17 @@ void LoadStubCompiler::GenerateLoadCallback(
__ Addu(scratch2(), sp, 1 * kPointerSize);
__ mov(a2, scratch2()); // Saved in case scratch2 == a1.
- __ mov(a0, sp); // (first argument - a0) = Handle<Name>
-
- const int kApiStackSpace = 1;
- FrameScope frame_scope(masm(), StackFrame::MANUAL);
- __ EnterExitFrame(false, kApiStackSpace);
-
- // Create PropertyAccessorInfo instance on the stack above the exit frame with
- // scratch2 (internal::Object** args_) as the data.
- __ sw(a2, MemOperand(sp, kPointerSize));
- // (second argument - a1) = AccessorInfo&
- __ Addu(a1, sp, kPointerSize);
+ // Abi for CallApiGetter.
+ Register getter_address_reg = a2;
- const int kStackUnwindSpace = PropertyCallbackArguments::kArgsLength + 1;
Address getter_address = v8::ToCData<Address>(callback->getter());
ApiFunction fun(getter_address);
ExternalReference::Type type = ExternalReference::DIRECT_GETTER_CALL;
ExternalReference ref = ExternalReference(&fun, type, isolate());
+ __ li(getter_address_reg, Operand(ref));
- Address thunk_address = FUNCTION_ADDR(&InvokeAccessorGetterCallback);
- ExternalReference::Type thunk_type =
- ExternalReference::PROFILING_GETTER_CALL;
- ApiFunction thunk_fun(thunk_address);
- ExternalReference thunk_ref = ExternalReference(&thunk_fun, thunk_type,
- isolate());
- __ CallApiFunctionAndReturn(ref,
- getter_address,
- thunk_ref,
- a2,
- kStackUnwindSpace,
- MemOperand(fp, 6 * kPointerSize),
- NULL);
+ CallApiGetterStub stub(isolate());
+ __ TailCallStub(&stub);
}
@@ -1482,1049 +1137,25 @@ void LoadStubCompiler::GenerateLoadInterceptor(
this->name(), interceptor_holder);
ExternalReference ref = ExternalReference(
- IC_Utility(IC::kLoadPropertyWithInterceptorForLoad), isolate());
+ IC_Utility(IC::kLoadPropertyWithInterceptor), isolate());
__ TailCallExternalReference(ref, StubCache::kInterceptorArgsLength, 1);
}
}
-void CallStubCompiler::GenerateNameCheck(Handle<Name> name, Label* miss) {
- if (kind_ == Code::KEYED_CALL_IC) {
- __ Branch(miss, ne, a2, Operand(name));
- }
-}
-
-
-void CallStubCompiler::GenerateFunctionCheck(Register function,
- Register scratch,
- Label* miss) {
- __ JumpIfSmi(function, miss);
- __ GetObjectType(function, scratch, scratch);
- __ Branch(miss, ne, scratch, Operand(JS_FUNCTION_TYPE));
-}
-
-
-void CallStubCompiler::GenerateLoadFunctionFromCell(
- Handle<Cell> cell,
- Handle<JSFunction> function,
- Label* miss) {
- // Get the value from the cell.
- __ li(a3, Operand(cell));
- __ lw(a1, FieldMemOperand(a3, Cell::kValueOffset));
-
- // Check that the cell contains the same function.
- if (heap()->InNewSpace(*function)) {
- // We can't embed a pointer to a function in new space so we have
- // to verify that the shared function info is unchanged. This has
- // the nice side effect that multiple closures based on the same
- // function can all use this call IC. Before we load through the
- // function, we have to verify that it still is a function.
- GenerateFunctionCheck(a1, a3, miss);
-
- // Check the shared function info. Make sure it hasn't changed.
- __ li(a3, Handle<SharedFunctionInfo>(function->shared()));
- __ lw(t0, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
- __ Branch(miss, ne, t0, Operand(a3));
- } else {
- __ Branch(miss, ne, a1, Operand(function));
- }
-}
-
-
-void CallStubCompiler::GenerateMissBranch() {
- Handle<Code> code =
- isolate()->stub_cache()->ComputeCallMiss(arguments().immediate(),
- kind_,
- extra_state());
- __ Jump(code, RelocInfo::CODE_TARGET);
-}
-
-
-Handle<Code> CallStubCompiler::CompileCallField(Handle<JSObject> object,
- Handle<JSObject> holder,
- PropertyIndex index,
- Handle<Name> name) {
- Label miss;
-
- Register reg = HandlerFrontendHeader(
- object, holder, name, RECEIVER_MAP_CHECK, &miss);
- GenerateFastPropertyLoad(masm(), a1, reg, index.is_inobject(holder),
- index.translate(holder), Representation::Tagged());
- GenerateJumpFunction(object, a1, &miss);
-
- HandlerFrontendFooter(&miss);
-
- // Return the generated code.
- return GetCode(Code::FAST, name);
-}
-
-
-Handle<Code> CallStubCompiler::CompileArrayCodeCall(
- Handle<Object> object,
- Handle<JSObject> holder,
- Handle<Cell> cell,
- Handle<JSFunction> function,
- Handle<String> name,
- Code::StubType type) {
- Label miss;
-
- HandlerFrontendHeader(object, holder, name, RECEIVER_MAP_CHECK, &miss);
- if (!cell.is_null()) {
- ASSERT(cell->value() == *function);
- GenerateLoadFunctionFromCell(cell, function, &miss);
- }
-
- Handle<AllocationSite> site = isolate()->factory()->NewAllocationSite();
- site->SetElementsKind(GetInitialFastElementsKind());
- Handle<Cell> site_feedback_cell = isolate()->factory()->NewCell(site);
- const int argc = arguments().immediate();
- __ li(a0, Operand(argc));
- __ li(a2, Operand(site_feedback_cell));
- __ li(a1, Operand(function));
-
- ArrayConstructorStub stub(isolate());
- __ TailCallStub(&stub);
-
- HandlerFrontendFooter(&miss);
-
- // Return the generated code.
- return GetCode(type, name);
-}
-
-
-Handle<Code> CallStubCompiler::CompileArrayPushCall(
- Handle<Object> object,
- Handle<JSObject> holder,
- Handle<Cell> cell,
- Handle<JSFunction> function,
- Handle<String> name,
- Code::StubType type) {
- // If object is not an array or is observed or sealed, bail out to regular
- // call.
- if (!object->IsJSArray() ||
- !cell.is_null() ||
- Handle<JSArray>::cast(object)->map()->is_observed() ||
- !Handle<JSArray>::cast(object)->map()->is_extensible()) {
- return Handle<Code>::null();
- }
-
- Label miss;
- HandlerFrontendHeader(object, holder, name, RECEIVER_MAP_CHECK, &miss);
- Register receiver = a0;
- Register scratch = a1;
-
- const int argc = arguments().immediate();
-
- if (argc == 0) {
- // Nothing to do, just return the length.
- __ lw(v0, FieldMemOperand(receiver, JSArray::kLengthOffset));
- __ DropAndRet(argc + 1);
- } else {
- Label call_builtin;
- if (argc == 1) { // Otherwise fall through to call the builtin.
- Label attempt_to_grow_elements, with_write_barrier, check_double;
-
- Register elements = t2;
- Register end_elements = t1;
- // Get the elements array of the object.
- __ lw(elements, FieldMemOperand(receiver, JSArray::kElementsOffset));
-
- // Check that the elements are in fast mode and writable.
- __ CheckMap(elements,
- scratch,
- Heap::kFixedArrayMapRootIndex,
- &check_double,
- DONT_DO_SMI_CHECK);
-
- // Get the array's length into scratch and calculate new length.
- __ lw(scratch, FieldMemOperand(receiver, JSArray::kLengthOffset));
- STATIC_ASSERT(kSmiTagSize == 1);
- STATIC_ASSERT(kSmiTag == 0);
- __ Addu(scratch, scratch, Operand(Smi::FromInt(argc)));
-
- // Get the elements' length.
- __ lw(t0, FieldMemOperand(elements, FixedArray::kLengthOffset));
-
- // Check if we could survive without allocation.
- __ Branch(&attempt_to_grow_elements, gt, scratch, Operand(t0));
-
- // Check if value is a smi.
- __ lw(t0, MemOperand(sp, (argc - 1) * kPointerSize));
- __ JumpIfNotSmi(t0, &with_write_barrier);
-
- // Save new length.
- __ sw(scratch, FieldMemOperand(receiver, JSArray::kLengthOffset));
-
- // Store the value.
- // We may need a register containing the address end_elements below,
- // so write back the value in end_elements.
- __ sll(end_elements, scratch, kPointerSizeLog2 - kSmiTagSize);
- __ Addu(end_elements, elements, end_elements);
- const int kEndElementsOffset =
- FixedArray::kHeaderSize - kHeapObjectTag - argc * kPointerSize;
- __ Addu(end_elements, end_elements, kEndElementsOffset);
- __ sw(t0, MemOperand(end_elements));
-
- // Check for a smi.
- __ mov(v0, scratch);
- __ DropAndRet(argc + 1);
-
- __ bind(&check_double);
-
- // Check that the elements are in fast mode and writable.
- __ CheckMap(elements,
- scratch,
- Heap::kFixedDoubleArrayMapRootIndex,
- &call_builtin,
- DONT_DO_SMI_CHECK);
-
- // Get the array's length into scratch and calculate new length.
- __ lw(scratch, FieldMemOperand(receiver, JSArray::kLengthOffset));
- STATIC_ASSERT(kSmiTagSize == 1);
- STATIC_ASSERT(kSmiTag == 0);
- __ Addu(scratch, scratch, Operand(Smi::FromInt(argc)));
-
- // Get the elements' length.
- __ lw(t0, FieldMemOperand(elements, FixedArray::kLengthOffset));
-
- // Check if we could survive without allocation.
- __ Branch(&call_builtin, gt, scratch, Operand(t0));
-
- __ lw(t0, MemOperand(sp, (argc - 1) * kPointerSize));
- __ StoreNumberToDoubleElements(
- t0, scratch, elements, a3, t1, a2,
- &call_builtin, argc * kDoubleSize);
-
- // Save new length.
- __ sw(scratch, FieldMemOperand(receiver, JSArray::kLengthOffset));
-
- __ mov(v0, scratch);
- __ DropAndRet(argc + 1);
-
- __ bind(&with_write_barrier);
-
- __ lw(a3, FieldMemOperand(receiver, HeapObject::kMapOffset));
-
- if (FLAG_smi_only_arrays && !FLAG_trace_elements_transitions) {
- Label fast_object, not_fast_object;
- __ CheckFastObjectElements(a3, t3, &not_fast_object);
- __ jmp(&fast_object);
- // In case of fast smi-only, convert to fast object, otherwise bail out.
- __ bind(&not_fast_object);
- __ CheckFastSmiElements(a3, t3, &call_builtin);
-
- __ lw(t3, FieldMemOperand(t0, HeapObject::kMapOffset));
- __ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
- __ Branch(&call_builtin, eq, t3, Operand(at));
- // edx: receiver
- // a3: map
- Label try_holey_map;
- __ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS,
- FAST_ELEMENTS,
- a3,
- t3,
- &try_holey_map);
- __ mov(a2, receiver);
- ElementsTransitionGenerator::
- GenerateMapChangeElementsTransition(masm(),
- DONT_TRACK_ALLOCATION_SITE,
- NULL);
- __ jmp(&fast_object);
-
- __ bind(&try_holey_map);
- __ LoadTransitionedArrayMapConditional(FAST_HOLEY_SMI_ELEMENTS,
- FAST_HOLEY_ELEMENTS,
- a3,
- t3,
- &call_builtin);
- __ mov(a2, receiver);
- ElementsTransitionGenerator::
- GenerateMapChangeElementsTransition(masm(),
- DONT_TRACK_ALLOCATION_SITE,
- NULL);
- __ bind(&fast_object);
- } else {
- __ CheckFastObjectElements(a3, a3, &call_builtin);
- }
-
- // Save new length.
- __ sw(scratch, FieldMemOperand(receiver, JSArray::kLengthOffset));
-
- // Store the value.
- // We may need a register containing the address end_elements below,
- // so write back the value in end_elements.
- __ sll(end_elements, scratch, kPointerSizeLog2 - kSmiTagSize);
- __ Addu(end_elements, elements, end_elements);
- __ Addu(end_elements, end_elements, kEndElementsOffset);
- __ sw(t0, MemOperand(end_elements));
-
- __ RecordWrite(elements,
- end_elements,
- t0,
- kRAHasNotBeenSaved,
- kDontSaveFPRegs,
- EMIT_REMEMBERED_SET,
- OMIT_SMI_CHECK);
- __ mov(v0, scratch);
- __ DropAndRet(argc + 1);
-
- __ bind(&attempt_to_grow_elements);
- // scratch: array's length + 1.
- // t0: elements' length.
-
- if (!FLAG_inline_new) {
- __ Branch(&call_builtin);
- }
-
- __ lw(a2, MemOperand(sp, (argc - 1) * kPointerSize));
- // Growing elements that are SMI-only requires special handling in case
- // the new element is non-Smi. For now, delegate to the builtin.
- Label no_fast_elements_check;
- __ JumpIfSmi(a2, &no_fast_elements_check);
- __ lw(t3, FieldMemOperand(receiver, HeapObject::kMapOffset));
- __ CheckFastObjectElements(t3, t3, &call_builtin);
- __ bind(&no_fast_elements_check);
-
- ExternalReference new_space_allocation_top =
- ExternalReference::new_space_allocation_top_address(isolate());
- ExternalReference new_space_allocation_limit =
- ExternalReference::new_space_allocation_limit_address(isolate());
-
- const int kAllocationDelta = 4;
- // Load top and check if it is the end of elements.
- __ sll(end_elements, scratch, kPointerSizeLog2 - kSmiTagSize);
- __ Addu(end_elements, elements, end_elements);
- __ Addu(end_elements, end_elements, Operand(kEndElementsOffset));
- __ li(t3, Operand(new_space_allocation_top));
- __ lw(a3, MemOperand(t3));
- __ Branch(&call_builtin, ne, end_elements, Operand(a3));
-
- __ li(t5, Operand(new_space_allocation_limit));
- __ lw(t5, MemOperand(t5));
- __ Addu(a3, a3, Operand(kAllocationDelta * kPointerSize));
- __ Branch(&call_builtin, hi, a3, Operand(t5));
-
- // We fit and could grow elements.
- // Update new_space_allocation_top.
- __ sw(a3, MemOperand(t3));
- // Push the argument.
- __ sw(a2, MemOperand(end_elements));
- // Fill the rest with holes.
- __ LoadRoot(a3, Heap::kTheHoleValueRootIndex);
- for (int i = 1; i < kAllocationDelta; i++) {
- __ sw(a3, MemOperand(end_elements, i * kPointerSize));
- }
-
- // Update elements' and array's sizes.
- __ sw(scratch, FieldMemOperand(receiver, JSArray::kLengthOffset));
- __ Addu(t0, t0, Operand(Smi::FromInt(kAllocationDelta)));
- __ sw(t0, FieldMemOperand(elements, FixedArray::kLengthOffset));
-
- // Elements are in new space, so write barrier is not required.
- __ mov(v0, scratch);
- __ DropAndRet(argc + 1);
- }
- __ bind(&call_builtin);
- __ TailCallExternalReference(
- ExternalReference(Builtins::c_ArrayPush, isolate()), argc + 1, 1);
- }
-
- HandlerFrontendFooter(&miss);
-
- // Return the generated code.
- return GetCode(type, name);
-}
-
-
-Handle<Code> CallStubCompiler::CompileArrayPopCall(
- Handle<Object> object,
- Handle<JSObject> holder,
- Handle<Cell> cell,
- Handle<JSFunction> function,
- Handle<String> name,
- Code::StubType type) {
- // If object is not an array or is observed or sealed, bail out to regular
- // call.
- if (!object->IsJSArray() ||
- !cell.is_null() ||
- Handle<JSArray>::cast(object)->map()->is_observed() ||
- !Handle<JSArray>::cast(object)->map()->is_extensible()) {
- return Handle<Code>::null();
- }
-
- Label miss, return_undefined, call_builtin;
- Register receiver = a0;
- Register scratch = a1;
- Register elements = a3;
- HandlerFrontendHeader(object, holder, name, RECEIVER_MAP_CHECK, &miss);
-
- // Get the elements array of the object.
- __ lw(elements, FieldMemOperand(receiver, JSArray::kElementsOffset));
-
- // Check that the elements are in fast mode and writable.
- __ CheckMap(elements,
- scratch,
- Heap::kFixedArrayMapRootIndex,
- &call_builtin,
- DONT_DO_SMI_CHECK);
-
- // Get the array's length into t0 and calculate new length.
- __ lw(t0, FieldMemOperand(receiver, JSArray::kLengthOffset));
- __ Subu(t0, t0, Operand(Smi::FromInt(1)));
- __ Branch(&return_undefined, lt, t0, Operand(zero_reg));
-
- // Get the last element.
- __ LoadRoot(t2, Heap::kTheHoleValueRootIndex);
- STATIC_ASSERT(kSmiTagSize == 1);
- STATIC_ASSERT(kSmiTag == 0);
- // We can't address the last element in one operation. Compute the more
- // expensive shift first, and use an offset later on.
- __ sll(t1, t0, kPointerSizeLog2 - kSmiTagSize);
- __ Addu(elements, elements, t1);
- __ lw(scratch, FieldMemOperand(elements, FixedArray::kHeaderSize));
- __ Branch(&call_builtin, eq, scratch, Operand(t2));
-
- // Set the array's length.
- __ sw(t0, FieldMemOperand(receiver, JSArray::kLengthOffset));
-
- // Fill with the hole.
- __ sw(t2, FieldMemOperand(elements, FixedArray::kHeaderSize));
- const int argc = arguments().immediate();
- __ mov(v0, scratch);
- __ DropAndRet(argc + 1);
-
- __ bind(&return_undefined);
- __ LoadRoot(v0, Heap::kUndefinedValueRootIndex);
- __ DropAndRet(argc + 1);
-
- __ bind(&call_builtin);
- __ TailCallExternalReference(
- ExternalReference(Builtins::c_ArrayPop, isolate()), argc + 1, 1);
-
- HandlerFrontendFooter(&miss);
-
- // Return the generated code.
- return GetCode(type, name);
-}
-
-
-Handle<Code> CallStubCompiler::CompileStringCharCodeAtCall(
- Handle<Object> object,
- Handle<JSObject> holder,
- Handle<Cell> cell,
- Handle<JSFunction> function,
- Handle<String> name,
- Code::StubType type) {
- // If object is not a string, bail out to regular call.
- if (!object->IsString() || !cell.is_null()) return Handle<Code>::null();
-
- Label miss;
- Label name_miss;
- Label index_out_of_range;
-
- Label* index_out_of_range_label = &index_out_of_range;
-
- if (kind_ == Code::CALL_IC &&
- (CallICBase::StringStubState::decode(extra_state()) ==
- DEFAULT_STRING_STUB)) {
- index_out_of_range_label = &miss;
- }
-
- HandlerFrontendHeader(object, holder, name, STRING_CHECK, &name_miss);
-
- Register receiver = a0;
- Register index = t1;
- Register result = a1;
- const int argc = arguments().immediate();
- __ lw(receiver, MemOperand(sp, argc * kPointerSize));
- if (argc > 0) {
- __ lw(index, MemOperand(sp, (argc - 1) * kPointerSize));
- } else {
- __ LoadRoot(index, Heap::kUndefinedValueRootIndex);
- }
-
- StringCharCodeAtGenerator generator(receiver,
- index,
- result,
- &miss, // When not a string.
- &miss, // When not a number.
- index_out_of_range_label,
- STRING_INDEX_IS_NUMBER);
- generator.GenerateFast(masm());
- __ mov(v0, result);
- __ DropAndRet(argc + 1);
-
- StubRuntimeCallHelper call_helper;
- generator.GenerateSlow(masm(), call_helper);
-
- if (index_out_of_range.is_linked()) {
- __ bind(&index_out_of_range);
- __ LoadRoot(v0, Heap::kNanValueRootIndex);
- __ DropAndRet(argc + 1);
- }
-
- __ bind(&miss);
- // Restore function name in a2.
- __ li(a2, name);
- HandlerFrontendFooter(&name_miss);
-
- // Return the generated code.
- return GetCode(type, name);
-}
-
-
-Handle<Code> CallStubCompiler::CompileStringCharAtCall(
- Handle<Object> object,
- Handle<JSObject> holder,
- Handle<Cell> cell,
- Handle<JSFunction> function,
- Handle<String> name,
- Code::StubType type) {
- // If object is not a string, bail out to regular call.
- if (!object->IsString() || !cell.is_null()) return Handle<Code>::null();
-
- const int argc = arguments().immediate();
- Label miss;
- Label name_miss;
- Label index_out_of_range;
- Label* index_out_of_range_label = &index_out_of_range;
- if (kind_ == Code::CALL_IC &&
- (CallICBase::StringStubState::decode(extra_state()) ==
- DEFAULT_STRING_STUB)) {
- index_out_of_range_label = &miss;
- }
-
- HandlerFrontendHeader(object, holder, name, STRING_CHECK, &name_miss);
-
- Register receiver = a0;
- Register index = t1;
- Register scratch = a3;
- Register result = a1;
- if (argc > 0) {
- __ lw(index, MemOperand(sp, (argc - 1) * kPointerSize));
- } else {
- __ LoadRoot(index, Heap::kUndefinedValueRootIndex);
- }
-
- StringCharAtGenerator generator(receiver,
- index,
- scratch,
- result,
- &miss, // When not a string.
- &miss, // When not a number.
- index_out_of_range_label,
- STRING_INDEX_IS_NUMBER);
- generator.GenerateFast(masm());
- __ mov(v0, result);
- __ DropAndRet(argc + 1);
-
- StubRuntimeCallHelper call_helper;
- generator.GenerateSlow(masm(), call_helper);
-
- if (index_out_of_range.is_linked()) {
- __ bind(&index_out_of_range);
- __ LoadRoot(v0, Heap::kempty_stringRootIndex);
- __ DropAndRet(argc + 1);
- }
-
- __ bind(&miss);
- // Restore function name in a2.
- __ li(a2, name);
- HandlerFrontendFooter(&name_miss);
-
- // Return the generated code.
- return GetCode(type, name);
-}
-
-
-Handle<Code> CallStubCompiler::CompileStringFromCharCodeCall(
- Handle<Object> object,
- Handle<JSObject> holder,
- Handle<Cell> cell,
- Handle<JSFunction> function,
- Handle<String> name,
- Code::StubType type) {
- const int argc = arguments().immediate();
-
- // If the object is not a JSObject or we got an unexpected number of
- // arguments, bail out to the regular call.
- if (!object->IsJSObject() || argc != 1) return Handle<Code>::null();
-
- Label miss;
- HandlerFrontendHeader(object, holder, name, RECEIVER_MAP_CHECK, &miss);
- if (!cell.is_null()) {
- ASSERT(cell->value() == *function);
- GenerateLoadFunctionFromCell(cell, function, &miss);
- }
-
- // Load the char code argument.
- Register code = a1;
- __ lw(code, MemOperand(sp, 0 * kPointerSize));
-
- // Check the code is a smi.
- Label slow;
- STATIC_ASSERT(kSmiTag == 0);
- __ JumpIfNotSmi(code, &slow);
-
- // Convert the smi code to uint16.
- __ And(code, code, Operand(Smi::FromInt(0xffff)));
-
- StringCharFromCodeGenerator generator(code, v0);
- generator.GenerateFast(masm());
- __ DropAndRet(argc + 1);
-
- StubRuntimeCallHelper call_helper;
- generator.GenerateSlow(masm(), call_helper);
-
- __ bind(&slow);
- // We do not have to patch the receiver because the function makes no use of
- // it.
- GenerateJumpFunctionIgnoreReceiver(function);
-
- HandlerFrontendFooter(&miss);
-
- // Return the generated code.
- return GetCode(type, name);
-}
-
-
-Handle<Code> CallStubCompiler::CompileMathFloorCall(
- Handle<Object> object,
- Handle<JSObject> holder,
- Handle<Cell> cell,
- Handle<JSFunction> function,
- Handle<String> name,
- Code::StubType type) {
- const int argc = arguments().immediate();
- // If the object is not a JSObject or we got an unexpected number of
- // arguments, bail out to the regular call.
- if (!object->IsJSObject() || argc != 1) return Handle<Code>::null();
-
- Label miss, slow;
- HandlerFrontendHeader(object, holder, name, RECEIVER_MAP_CHECK, &miss);
- if (!cell.is_null()) {
- ASSERT(cell->value() == *function);
- GenerateLoadFunctionFromCell(cell, function, &miss);
- }
-
- // Load the (only) argument into v0.
- __ lw(v0, MemOperand(sp, 0 * kPointerSize));
-
- // If the argument is a smi, just return.
- STATIC_ASSERT(kSmiTag == 0);
- __ SmiTst(v0, t0);
- __ DropAndRet(argc + 1, eq, t0, Operand(zero_reg));
-
- __ CheckMap(v0, a1, Heap::kHeapNumberMapRootIndex, &slow, DONT_DO_SMI_CHECK);
-
- Label wont_fit_smi, no_fpu_error, restore_fcsr_and_return;
-
- // If fpu is enabled, we use the floor instruction.
-
- // Load the HeapNumber value.
- __ ldc1(f0, FieldMemOperand(v0, HeapNumber::kValueOffset));
-
- // Backup FCSR.
- __ cfc1(a3, FCSR);
- // Clearing FCSR clears the exception mask with no side-effects.
- __ ctc1(zero_reg, FCSR);
- // Convert the argument to an integer.
- __ floor_w_d(f0, f0);
-
- // Start checking for special cases.
- // Get the argument exponent and clear the sign bit.
- __ lw(t1, FieldMemOperand(v0, HeapNumber::kValueOffset + kPointerSize));
- __ And(t2, t1, Operand(~HeapNumber::kSignMask));
- __ srl(t2, t2, HeapNumber::kMantissaBitsInTopWord);
-
- // Retrieve FCSR and check for fpu errors.
- __ cfc1(t5, FCSR);
- __ And(t5, t5, Operand(kFCSRExceptionFlagMask));
- __ Branch(&no_fpu_error, eq, t5, Operand(zero_reg));
-
- // Check for NaN, Infinity, and -Infinity.
- // They are invariant through a Math.Floor call, so just
- // return the original argument.
- __ Subu(t3, t2, Operand(HeapNumber::kExponentMask
- >> HeapNumber::kMantissaBitsInTopWord));
- __ Branch(&restore_fcsr_and_return, eq, t3, Operand(zero_reg));
- // We had an overflow or underflow in the conversion. Check if we
- // have a big exponent.
- // If greater or equal, the argument is already round and in v0.
- __ Branch(&restore_fcsr_and_return, ge, t3,
- Operand(HeapNumber::kMantissaBits));
- __ Branch(&wont_fit_smi);
-
- __ bind(&no_fpu_error);
- // Move the result back to v0.
- __ mfc1(v0, f0);
- // Check if the result fits into a smi.
- __ Addu(a1, v0, Operand(0x40000000));
- __ Branch(&wont_fit_smi, lt, a1, Operand(zero_reg));
- // Tag the result.
- STATIC_ASSERT(kSmiTag == 0);
- __ sll(v0, v0, kSmiTagSize);
-
- // Check for -0.
- __ Branch(&restore_fcsr_and_return, ne, v0, Operand(zero_reg));
- // t1 already holds the HeapNumber exponent.
- __ And(t0, t1, Operand(HeapNumber::kSignMask));
- // If our HeapNumber is negative it was -0, so load its address and return.
- // Else v0 is loaded with 0, so we can also just return.
- __ Branch(&restore_fcsr_and_return, eq, t0, Operand(zero_reg));
- __ lw(v0, MemOperand(sp, 0 * kPointerSize));
-
- __ bind(&restore_fcsr_and_return);
- // Restore FCSR and return.
- __ ctc1(a3, FCSR);
-
- __ DropAndRet(argc + 1);
-
- __ bind(&wont_fit_smi);
- // Restore FCSR and fall to slow case.
- __ ctc1(a3, FCSR);
-
- __ bind(&slow);
- // We do not have to patch the receiver because the function makes no use of
- // it.
- GenerateJumpFunctionIgnoreReceiver(function);
-
- HandlerFrontendFooter(&miss);
-
- // Return the generated code.
- return GetCode(type, name);
-}
-
-
-Handle<Code> CallStubCompiler::CompileMathAbsCall(
- Handle<Object> object,
- Handle<JSObject> holder,
- Handle<Cell> cell,
- Handle<JSFunction> function,
- Handle<String> name,
- Code::StubType type) {
- const int argc = arguments().immediate();
- // If the object is not a JSObject or we got an unexpected number of
- // arguments, bail out to the regular call.
- if (!object->IsJSObject() || argc != 1) return Handle<Code>::null();
-
- Label miss;
-
- HandlerFrontendHeader(object, holder, name, RECEIVER_MAP_CHECK, &miss);
- if (!cell.is_null()) {
- ASSERT(cell->value() == *function);
- GenerateLoadFunctionFromCell(cell, function, &miss);
- }
-
- // Load the (only) argument into v0.
- __ lw(v0, MemOperand(sp, 0 * kPointerSize));
-
- // Check if the argument is a smi.
- Label not_smi;
- STATIC_ASSERT(kSmiTag == 0);
- __ JumpIfNotSmi(v0, &not_smi);
-
- // Do bitwise not or do nothing depending on the sign of the
- // argument.
- __ sra(t0, v0, kBitsPerInt - 1);
- __ Xor(a1, v0, t0);
-
- // Add 1 or do nothing depending on the sign of the argument.
- __ Subu(v0, a1, t0);
-
- // If the result is still negative, go to the slow case.
- // This only happens for the most negative smi.
- Label slow;
- __ Branch(&slow, lt, v0, Operand(zero_reg));
-
- // Smi case done.
- __ DropAndRet(argc + 1);
-
- // Check if the argument is a heap number and load its exponent and
- // sign.
- __ bind(&not_smi);
- __ CheckMap(v0, a1, Heap::kHeapNumberMapRootIndex, &slow, DONT_DO_SMI_CHECK);
- __ lw(a1, FieldMemOperand(v0, HeapNumber::kExponentOffset));
-
- // Check the sign of the argument. If the argument is positive,
- // just return it.
- Label negative_sign;
- __ And(t0, a1, Operand(HeapNumber::kSignMask));
- __ Branch(&negative_sign, ne, t0, Operand(zero_reg));
- __ DropAndRet(argc + 1);
-
- // If the argument is negative, clear the sign, and return a new
- // number.
- __ bind(&negative_sign);
- __ Xor(a1, a1, Operand(HeapNumber::kSignMask));
- __ lw(a3, FieldMemOperand(v0, HeapNumber::kMantissaOffset));
- __ LoadRoot(t2, Heap::kHeapNumberMapRootIndex);
- __ AllocateHeapNumber(v0, t0, t1, t2, &slow);
- __ sw(a1, FieldMemOperand(v0, HeapNumber::kExponentOffset));
- __ sw(a3, FieldMemOperand(v0, HeapNumber::kMantissaOffset));
- __ DropAndRet(argc + 1);
-
- __ bind(&slow);
- // We do not have to patch the receiver because the function makes no use of
- // it.
- GenerateJumpFunctionIgnoreReceiver(function);
-
- HandlerFrontendFooter(&miss);
-
- // Return the generated code.
- return GetCode(type, name);
-}
-
-
-Handle<Code> CallStubCompiler::CompileFastApiCall(
- const CallOptimization& optimization,
- Handle<Object> object,
- Handle<JSObject> holder,
- Handle<Cell> cell,
- Handle<JSFunction> function,
- Handle<String> name) {
-
- Counters* counters = isolate()->counters();
-
- ASSERT(optimization.is_simple_api_call());
- // Bail out if object is a global object as we don't want to
- // repatch it to global receiver.
- if (object->IsGlobalObject()) return Handle<Code>::null();
- if (!cell.is_null()) return Handle<Code>::null();
- if (!object->IsJSObject()) return Handle<Code>::null();
- int depth = optimization.GetPrototypeDepthOfExpectedType(
- Handle<JSObject>::cast(object), holder);
- if (depth == kInvalidProtoDepth) return Handle<Code>::null();
-
- Label miss, miss_before_stack_reserved;
-
- GenerateNameCheck(name, &miss_before_stack_reserved);
-
- // Get the receiver from the stack.
- const int argc = arguments().immediate();
- __ lw(a1, MemOperand(sp, argc * kPointerSize));
-
- // Check that the receiver isn't a smi.
- __ JumpIfSmi(a1, &miss_before_stack_reserved);
-
- __ IncrementCounter(counters->call_const(), 1, a0, a3);
- __ IncrementCounter(counters->call_const_fast_api(), 1, a0, a3);
-
- ReserveSpaceForFastApiCall(masm(), a0);
-
- // Check that the maps haven't changed and find a Holder as a side effect.
- CheckPrototypes(
- IC::CurrentTypeOf(object, isolate()),
- a1, holder, a0, a3, t0, name, depth, &miss);
-
- GenerateFastApiDirectCall(masm(), optimization, argc, false);
-
- __ bind(&miss);
- FreeSpaceForFastApiCall(masm());
-
- HandlerFrontendFooter(&miss_before_stack_reserved);
-
- // Return the generated code.
- return GetCode(function);
-}
-
-
-void StubCompiler::GenerateBooleanCheck(Register object, Label* miss) {
- Label success;
- // Check that the object is a boolean.
- __ LoadRoot(at, Heap::kTrueValueRootIndex);
- __ Branch(&success, eq, object, Operand(at));
- __ LoadRoot(at, Heap::kFalseValueRootIndex);
- __ Branch(miss, ne, object, Operand(at));
- __ bind(&success);
-}
-
-
-void CallStubCompiler::PatchGlobalProxy(Handle<Object> object) {
- if (object->IsGlobalObject()) {
- const int argc = arguments().immediate();
- const int receiver_offset = argc * kPointerSize;
- __ lw(a3, FieldMemOperand(a0, GlobalObject::kGlobalReceiverOffset));
- __ sw(a3, MemOperand(sp, receiver_offset));
- }
-}
-
-
-Register CallStubCompiler::HandlerFrontendHeader(Handle<Object> object,
- Handle<JSObject> holder,
- Handle<Name> name,
- CheckType check,
- Label* miss) {
- // ----------- S t a t e -------------
- // -- a2 : name
- // -- ra : return address
- // -----------------------------------
- GenerateNameCheck(name, miss);
-
- Register reg = a0;
-
- // Get the receiver from the stack.
- const int argc = arguments().immediate();
- const int receiver_offset = argc * kPointerSize;
- __ lw(a0, MemOperand(sp, receiver_offset));
-
- // Check that the receiver isn't a smi.
- if (check != NUMBER_CHECK) {
- __ JumpIfSmi(a0, miss);
- }
-
- // Make sure that it's okay not to patch the on stack receiver
- // unless we're doing a receiver map check.
- ASSERT(!object->IsGlobalObject() || check == RECEIVER_MAP_CHECK);
- switch (check) {
- case RECEIVER_MAP_CHECK:
- __ IncrementCounter(isolate()->counters()->call_const(), 1, a1, a3);
-
- // Check that the maps haven't changed.
- reg = CheckPrototypes(
- IC::CurrentTypeOf(object, isolate()),
- reg, holder, a1, a3, t0, name, miss);
- break;
-
- case STRING_CHECK: {
- // Check that the object is a string.
- __ GetObjectType(reg, a3, a3);
- __ Branch(miss, Ugreater_equal, a3, Operand(FIRST_NONSTRING_TYPE));
- // Check that the maps starting from the prototype haven't changed.
- GenerateDirectLoadGlobalFunctionPrototype(
- masm(), Context::STRING_FUNCTION_INDEX, a1, miss);
- break;
- }
- case SYMBOL_CHECK: {
- // Check that the object is a symbol.
- __ GetObjectType(reg, a1, a3);
- __ Branch(miss, ne, a3, Operand(SYMBOL_TYPE));
- // Check that the maps starting from the prototype haven't changed.
- GenerateDirectLoadGlobalFunctionPrototype(
- masm(), Context::SYMBOL_FUNCTION_INDEX, a1, miss);
- break;
- }
- case NUMBER_CHECK: {
- Label fast;
- // Check that the object is a smi or a heap number.
- __ JumpIfSmi(reg, &fast);
- __ GetObjectType(reg, a3, a3);
- __ Branch(miss, ne, a3, Operand(HEAP_NUMBER_TYPE));
- __ bind(&fast);
- // Check that the maps starting from the prototype haven't changed.
- GenerateDirectLoadGlobalFunctionPrototype(
- masm(), Context::NUMBER_FUNCTION_INDEX, a1, miss);
- break;
- }
- case BOOLEAN_CHECK: {
- GenerateBooleanCheck(reg, miss);
-
- // Check that the maps starting from the prototype haven't changed.
- GenerateDirectLoadGlobalFunctionPrototype(
- masm(), Context::BOOLEAN_FUNCTION_INDEX, a1, miss);
- break;
- }
- }
-
- if (check != RECEIVER_MAP_CHECK) {
- Handle<Object> prototype(object->GetPrototype(isolate()), isolate());
- reg = CheckPrototypes(
- IC::CurrentTypeOf(prototype, isolate()),
- a1, holder, a1, a3, t0, name, miss);
- }
-
- return reg;
-}
-
-
-void CallStubCompiler::GenerateJumpFunction(Handle<Object> object,
- Register function,
- Label* miss) {
- ASSERT(function.is(a1));
- // Check that the function really is a function.
- GenerateFunctionCheck(function, a3, miss);
- PatchGlobalProxy(object);
- // Invoke the function.
- __ InvokeFunction(a1, arguments(), JUMP_FUNCTION,
- NullCallWrapper(), call_kind());
-}
-
-
-Handle<Code> CallStubCompiler::CompileCallInterceptor(Handle<JSObject> object,
- Handle<JSObject> holder,
- Handle<Name> name) {
- Label miss;
-
- GenerateNameCheck(name, &miss);
-
- // Get the number of arguments.
- const int argc = arguments().immediate();
- LookupResult lookup(isolate());
- LookupPostInterceptor(holder, name, &lookup);
-
- // Get the receiver from the stack.
- __ lw(a1, MemOperand(sp, argc * kPointerSize));
-
- CallInterceptorCompiler compiler(this, arguments(), a2, extra_state());
- compiler.Compile(masm(), object, holder, name, &lookup, a1, a3, t0, a0,
- &miss);
-
- // Move returned value, the function to call, to a1.
- __ mov(a1, v0);
- // Restore receiver.
- __ lw(a0, MemOperand(sp, argc * kPointerSize));
-
- GenerateJumpFunction(object, a1, &miss);
-
- HandlerFrontendFooter(&miss);
-
- // Return the generated code.
- return GetCode(Code::FAST, name);
-}
-
-
-Handle<Code> CallStubCompiler::CompileCallGlobal(
- Handle<JSObject> object,
- Handle<GlobalObject> holder,
- Handle<PropertyCell> cell,
- Handle<JSFunction> function,
- Handle<Name> name) {
- if (HasCustomCallGenerator(function)) {
- Handle<Code> code = CompileCustomCall(
- object, holder, cell, function, Handle<String>::cast(name),
- Code::NORMAL);
- // A null handle means bail out to the regular compiler code below.
- if (!code.is_null()) return code;
- }
-
- Label miss;
- HandlerFrontendHeader(object, holder, name, RECEIVER_MAP_CHECK, &miss);
- // Potentially loads a closure that matches the shared function info of the
- // function, rather than function.
- GenerateLoadFunctionFromCell(cell, function, &miss);
- Counters* counters = isolate()->counters();
- __ IncrementCounter(counters->call_global_inline(), 1, a3, t0);
- GenerateJumpFunction(object, a1, function);
- HandlerFrontendFooter(&miss);
-
- // Return the generated code.
- return GetCode(Code::NORMAL, name);
-}
-
-
Handle<Code> StoreStubCompiler::CompileStoreCallback(
Handle<JSObject> object,
Handle<JSObject> holder,
Handle<Name> name,
Handle<ExecutableAccessorInfo> callback) {
- HandlerFrontend(IC::CurrentTypeOf(object, isolate()),
- receiver(), holder, name);
+ Register holder_reg = HandlerFrontend(
+ IC::CurrentTypeOf(object, isolate()), receiver(), holder, name);
// Stub never generated for non-global objects that require access
// checks.
ASSERT(holder->IsJSGlobalProxy() || !holder->IsAccessCheckNeeded());
- __ push(receiver()); // Receiver.
+ __ Push(receiver(), holder_reg); // Receiver.
__ li(at, Operand(callback)); // Callback info.
__ push(at);
__ li(at, Operand(name));
@@ -2533,24 +1164,7 @@ Handle<Code> StoreStubCompiler::CompileStoreCallback(
// Do tail-call to the runtime system.
ExternalReference store_callback_property =
ExternalReference(IC_Utility(IC::kStoreCallbackProperty), isolate());
- __ TailCallExternalReference(store_callback_property, 4, 1);
-
- // Return the generated code.
- return GetCode(kind(), Code::FAST, name);
-}
-
-
-Handle<Code> StoreStubCompiler::CompileStoreCallback(
- Handle<JSObject> object,
- Handle<JSObject> holder,
- Handle<Name> name,
- const CallOptimization& call_optimization) {
- HandlerFrontend(IC::CurrentTypeOf(object, isolate()),
- receiver(), holder, name);
-
- Register values[] = { value() };
- GenerateFastApiCall(
- masm(), call_optimization, receiver(), scratch3(), 1, values);
+ __ TailCallExternalReference(store_callback_property, 5, 1);
// Return the generated code.
return GetCode(kind(), Code::FAST, name);
@@ -2563,27 +1177,31 @@ Handle<Code> StoreStubCompiler::CompileStoreCallback(
void StoreStubCompiler::GenerateStoreViaSetter(
MacroAssembler* masm,
+ Handle<HeapType> type,
+ Register receiver,
Handle<JSFunction> setter) {
// ----------- S t a t e -------------
- // -- a0 : value
- // -- a1 : receiver
- // -- a2 : name
// -- ra : return address
// -----------------------------------
{
FrameScope scope(masm, StackFrame::INTERNAL);
// Save value register, so we can restore it later.
- __ push(a0);
+ __ push(value());
if (!setter.is_null()) {
// Call the JavaScript setter with receiver and value on the stack.
- __ push(a1);
- __ push(a0);
+ if (IC::TypeToMap(*type, masm->isolate())->IsJSGlobalObjectMap()) {
+ // Swap in the global receiver.
+ __ lw(receiver,
+ FieldMemOperand(
+ receiver, JSGlobalObject::kGlobalReceiverOffset));
+ }
+ __ Push(receiver, value());
ParameterCount actual(1);
ParameterCount expected(setter);
__ InvokeFunction(setter, expected, actual,
- CALL_FUNCTION, NullCallWrapper(), CALL_AS_METHOD);
+ CALL_FUNCTION, NullCallWrapper());
} else {
// If we generate a global code snippet for deoptimization only, remember
// the place to continue after deoptimization.
@@ -2607,21 +1225,6 @@ void StoreStubCompiler::GenerateStoreViaSetter(
Handle<Code> StoreStubCompiler::CompileStoreInterceptor(
Handle<JSObject> object,
Handle<Name> name) {
- Label miss;
-
- // Check that the map of the object hasn't changed.
- __ CheckMap(receiver(), scratch1(), Handle<Map>(object->map()), &miss,
- DO_SMI_CHECK);
-
- // Perform global security token check if needed.
- if (object->IsJSGlobalProxy()) {
- __ CheckAccessGlobalProxy(receiver(), scratch1(), &miss);
- }
-
- // Stub is never generated for non-global objects that require access
- // checks.
- ASSERT(object->IsJSGlobalProxy() || !object->IsAccessCheckNeeded());
-
__ Push(receiver(), this->name(), value());
// Do tail-call to the runtime system.
@@ -2629,16 +1232,12 @@ Handle<Code> StoreStubCompiler::CompileStoreInterceptor(
ExternalReference(IC_Utility(IC::kStoreInterceptorProperty), isolate());
__ TailCallExternalReference(store_ic_property, 3, 1);
- // Handle store cache miss.
- __ bind(&miss);
- TailCallBuiltin(masm(), MissBuiltin(kind()));
-
// Return the generated code.
return GetCode(kind(), Code::FAST, name);
}
-Handle<Code> LoadStubCompiler::CompileLoadNonexistent(Handle<Type> type,
+Handle<Code> LoadStubCompiler::CompileLoadNonexistent(Handle<HeapType> type,
Handle<JSObject> last,
Handle<Name> name) {
NonexistentHandlerFrontend(type, last, name);
@@ -2666,31 +1265,22 @@ Register* KeyedLoadStubCompiler::registers() {
}
-Register* StoreStubCompiler::registers() {
- // receiver, name, value, scratch1, scratch2, scratch3.
- static Register registers[] = { a1, a2, a0, a3, t0, t1 };
- return registers;
+Register StoreStubCompiler::value() {
+ return a0;
}
-Register* KeyedStoreStubCompiler::registers() {
- // receiver, name, value, scratch1, scratch2, scratch3.
- static Register registers[] = { a2, a1, a0, a3, t0, t1 };
+Register* StoreStubCompiler::registers() {
+ // receiver, name, scratch1, scratch2, scratch3.
+ static Register registers[] = { a1, a2, a3, t0, t1 };
return registers;
}
-void KeyedLoadStubCompiler::GenerateNameCheck(Handle<Name> name,
- Register name_reg,
- Label* miss) {
- __ Branch(miss, ne, name_reg, Operand(name));
-}
-
-
-void KeyedStoreStubCompiler::GenerateNameCheck(Handle<Name> name,
- Register name_reg,
- Label* miss) {
- __ Branch(miss, ne, name_reg, Operand(name));
+Register* KeyedStoreStubCompiler::registers() {
+ // receiver, name, scratch1, scratch2, scratch3.
+ static Register registers[] = { a2, a1, a3, t0, t1 };
+ return registers;
}
@@ -2699,6 +1289,7 @@ void KeyedStoreStubCompiler::GenerateNameCheck(Handle<Name> name,
void LoadStubCompiler::GenerateLoadViaGetter(MacroAssembler* masm,
+ Handle<HeapType> type,
Register receiver,
Handle<JSFunction> getter) {
// ----------- S t a t e -------------
@@ -2711,11 +1302,17 @@ void LoadStubCompiler::GenerateLoadViaGetter(MacroAssembler* masm,
if (!getter.is_null()) {
// Call the JavaScript getter with the receiver on the stack.
+ if (IC::TypeToMap(*type, masm->isolate())->IsJSGlobalObjectMap()) {
+ // Swap in the global receiver.
+ __ lw(receiver,
+ FieldMemOperand(
+ receiver, JSGlobalObject::kGlobalReceiverOffset));
+ }
__ push(receiver);
ParameterCount actual(0);
ParameterCount expected(getter);
__ InvokeFunction(getter, expected, actual,
- CALL_FUNCTION, NullCallWrapper(), CALL_AS_METHOD);
+ CALL_FUNCTION, NullCallWrapper());
} else {
// If we generate a global code snippet for deoptimization only, remember
// the place to continue after deoptimization.
@@ -2734,7 +1331,7 @@ void LoadStubCompiler::GenerateLoadViaGetter(MacroAssembler* masm,
Handle<Code> LoadStubCompiler::CompileLoadGlobal(
- Handle<Type> type,
+ Handle<HeapType> type,
Handle<GlobalObject> global,
Handle<PropertyCell> cell,
Handle<Name> name,
@@ -2753,13 +1350,13 @@ Handle<Code> LoadStubCompiler::CompileLoadGlobal(
__ Branch(&miss, eq, t0, Operand(at));
}
- HandlerFrontendFooter(name, &miss);
-
Counters* counters = isolate()->counters();
__ IncrementCounter(counters->named_load_global_stub(), 1, a1, a3);
__ Ret(USE_DELAY_SLOT);
__ mov(v0, t0);
+ HandlerFrontendFooter(name, &miss);
+
// Return the generated code.
return GetCode(kind(), Code::NORMAL, name);
}
@@ -2773,8 +1370,9 @@ Handle<Code> BaseLoadStoreStubCompiler::CompilePolymorphicIC(
IcCheckType check) {
Label miss;
- if (check == PROPERTY) {
- GenerateNameCheck(name, this->name(), &miss);
+ if (check == PROPERTY &&
+ (kind() == Code::KEYED_LOAD_IC || kind() == Code::KEYED_STORE_IC)) {
+ __ Branch(&miss, ne, this->name(), Operand(name));
}
Label number_case;
@@ -2788,14 +1386,14 @@ Handle<Code> BaseLoadStoreStubCompiler::CompilePolymorphicIC(
int number_of_handled_maps = 0;
__ lw(map_reg, FieldMemOperand(receiver(), HeapObject::kMapOffset));
for (int current = 0; current < receiver_count; ++current) {
- Handle<Type> type = types->at(current);
+ Handle<HeapType> type = types->at(current);
Handle<Map> map = IC::TypeToMap(*type, isolate());
if (!map->is_deprecated()) {
number_of_handled_maps++;
// Check map and tail call if there's a match.
// Separate compare from branch, to provide path for above JumpIfSmi().
__ Subu(match, map_reg, Operand(map));
- if (type->Is(Type::Number())) {
+ if (type->Is(HeapType::Number())) {
ASSERT(!number_case.is_unused());
__ bind(&number_case);
}
@@ -2815,6 +1413,17 @@ Handle<Code> BaseLoadStoreStubCompiler::CompilePolymorphicIC(
}
+void StoreStubCompiler::GenerateStoreArrayLength() {
+ // Prepare tail call to StoreIC_ArrayLength.
+ __ Push(receiver(), value());
+
+ ExternalReference ref =
+ ExternalReference(IC_Utility(IC::kStoreIC_ArrayLength),
+ masm()->isolate());
+ __ TailCallExternalReference(ref, 2, 1);
+}
+
+
Handle<Code> KeyedStoreStubCompiler::CompileStorePolymorphic(
MapHandleList* receiver_maps,
CodeHandleList* handler_stubs,
diff --git a/chromium/v8/src/mirror-debugger.js b/chromium/v8/src/mirror-debugger.js
index 4277136b609..897413cfda5 100644
--- a/chromium/v8/src/mirror-debugger.js
+++ b/chromium/v8/src/mirror-debugger.js
@@ -1,29 +1,6 @@
// Copyright 2006-2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
// Handle id counters.
var next_handle_ = 0;
@@ -31,17 +8,29 @@ var next_transient_handle_ = -1;
// Mirror cache.
var mirror_cache_ = [];
+var mirror_cache_enabled_ = true;
-/**
- * Clear the mirror handle cache.
- */
-function ClearMirrorCache() {
+function ToggleMirrorCache(value) {
+ mirror_cache_enabled_ = value;
next_handle_ = 0;
mirror_cache_ = [];
}
+// Wrapper to check whether an object is a Promise. The call may not work
+// if promises are not enabled.
+// TODO(yangguo): remove try-catch once promises are enabled by default.
+function ObjectIsPromise(value) {
+ try {
+ return IS_SPEC_OBJECT(value) &&
+ !IS_UNDEFINED(%DebugGetProperty(value, builtins.promiseStatus));
+ } catch (e) {
+ return false;
+ }
+}
+
+
/**
* Returns the mirror for a specified value or object.
*
@@ -54,7 +43,7 @@ function MakeMirror(value, opt_transient) {
var mirror;
// Look for non transient mirrors in the mirror cache.
- if (!opt_transient) {
+ if (!opt_transient && mirror_cache_enabled_) {
for (id in mirror_cache_) {
mirror = mirror_cache_[id];
if (mirror.value() === value) {
@@ -78,6 +67,8 @@ function MakeMirror(value, opt_transient) {
mirror = new NumberMirror(value);
} else if (IS_STRING(value)) {
mirror = new StringMirror(value);
+ } else if (IS_SYMBOL(value)) {
+ mirror = new SymbolMirror(value);
} else if (IS_ARRAY(value)) {
mirror = new ArrayMirror(value);
} else if (IS_DATE(value)) {
@@ -90,11 +81,13 @@ function MakeMirror(value, opt_transient) {
mirror = new ErrorMirror(value);
} else if (IS_SCRIPT(value)) {
mirror = new ScriptMirror(value);
+ } else if (ObjectIsPromise(value)) {
+ mirror = new PromiseMirror(value);
} else {
mirror = new ObjectMirror(value, OBJECT_TYPE, opt_transient);
}
- mirror_cache_[mirror.handle()] = mirror;
+ if (mirror_cache_enabled_) mirror_cache_[mirror.handle()] = mirror;
return mirror;
}
@@ -107,6 +100,7 @@ function MakeMirror(value, opt_transient) {
* undefined if no mirror with the requested handle was found
*/
function LookupMirror(handle) {
+ if (!mirror_cache_enabled_) throw new Error("Mirror cache is disabled");
return mirror_cache_[handle];
}
@@ -149,6 +143,7 @@ var NULL_TYPE = 'null';
var BOOLEAN_TYPE = 'boolean';
var NUMBER_TYPE = 'number';
var STRING_TYPE = 'string';
+var SYMBOL_TYPE = 'symbol';
var OBJECT_TYPE = 'object';
var FUNCTION_TYPE = 'function';
var REGEXP_TYPE = 'regexp';
@@ -159,6 +154,7 @@ var FRAME_TYPE = 'frame';
var SCRIPT_TYPE = 'script';
var CONTEXT_TYPE = 'context';
var SCOPE_TYPE = 'scope';
+var PROMISE_TYPE = 'promise';
// Maximum length when sending strings through the JSON protocol.
var kMaxProtocolStringLength = 80;
@@ -205,6 +201,7 @@ var ScopeType = { Global: 0,
// - NullMirror
// - NumberMirror
// - StringMirror
+// - SymbolMirror
// - ObjectMirror
// - FunctionMirror
// - UnresolvedFunctionMirror
@@ -212,6 +209,7 @@ var ScopeType = { Global: 0,
// - DateMirror
// - RegExpMirror
// - ErrorMirror
+// - PromiseMirror
// - PropertyMirror
// - InternalPropertyMirror
// - FrameMirror
@@ -288,6 +286,15 @@ Mirror.prototype.isString = function() {
/**
+ * Check whether the mirror reflects a symbol.
+ * @returns {boolean} True if the mirror reflects a symbol
+ */
+Mirror.prototype.isSymbol = function() {
+ return this instanceof SymbolMirror;
+};
+
+
+/**
* Check whether the mirror reflects an object.
* @returns {boolean} True if the mirror reflects an object
*/
@@ -351,6 +358,15 @@ Mirror.prototype.isError = function() {
/**
+ * Check whether the mirror reflects a promise.
+ * @returns {boolean} True if the mirror reflects a promise
+ */
+Mirror.prototype.isPromise = function() {
+ return this instanceof PromiseMirror;
+};
+
+
+/**
* Check whether the mirror reflects a property.
* @returns {boolean} True if the mirror reflects a property
*/
@@ -408,7 +424,7 @@ Mirror.prototype.isScope = function() {
* Allocate a handle id for this object.
*/
Mirror.prototype.allocateHandle_ = function() {
- this.handle_ = next_handle_++;
+ if (mirror_cache_enabled_) this.handle_ = next_handle_++;
};
@@ -463,7 +479,8 @@ ValueMirror.prototype.isPrimitive = function() {
type === 'null' ||
type === 'boolean' ||
type === 'number' ||
- type === 'string';
+ type === 'string' ||
+ type === 'symbol';
};
@@ -538,7 +555,7 @@ inherits(NumberMirror, ValueMirror);
NumberMirror.prototype.toText = function() {
- return %NumberToString(this.value_);
+ return %_NumberToString(this.value_);
};
@@ -572,6 +589,28 @@ StringMirror.prototype.toText = function() {
/**
+ * Mirror object for a Symbol
+ * @param {Object} value The Symbol
+ * @constructor
+ * @extends Mirror
+ */
+function SymbolMirror(value) {
+ %_CallFunction(this, SYMBOL_TYPE, value, ValueMirror);
+}
+inherits(SymbolMirror, ValueMirror);
+
+
+SymbolMirror.prototype.description = function() {
+ return %SymbolDescription(%_ValueOf(this.value_));
+}
+
+
+SymbolMirror.prototype.toText = function() {
+ return %_CallFunction(this.value_, builtins.SymbolToString);
+}
+
+
+/**
* Mirror object for objects.
* @param {object} value The object reflected by this mirror
* @param {boolean} transient indicate whether this object is transient with a
@@ -637,8 +676,9 @@ ObjectMirror.prototype.propertyNames = function(kind, limit) {
// Find all the named properties.
if (kind & PropertyKind.Named) {
- // Get the local property names.
- propertyNames = %GetLocalPropertyNames(this.value_, true);
+ // Get all own property names except for private symbols.
+ propertyNames =
+ %GetOwnPropertyNames(this.value_, PROPERTY_ATTRIBUTES_PRIVATE_SYMBOL);
total += propertyNames.length;
// Get names for named interceptor properties if any.
@@ -654,8 +694,8 @@ ObjectMirror.prototype.propertyNames = function(kind, limit) {
// Find all the indexed properties.
if (kind & PropertyKind.Indexed) {
- // Get the local element names.
- elementNames = %GetLocalElementNames(this.value_);
+ // Get own element names.
+ elementNames = %GetOwnElementNames(this.value_);
total += elementNames.length;
// Get names for indexed interceptor properties.
@@ -795,7 +835,8 @@ ObjectMirror.prototype.toText = function() {
/**
* Return the internal properties of the value, such as [[PrimitiveValue]] of
- * scalar wrapper objects and properties of the bound function.
+ * scalar wrapper objects, properties of the bound function and properties of
+ * the promise.
* This method is done static to be accessible from Debug API with the bare
* values without mirrors.
* @return {Array} array (possibly empty) of InternalProperty instances
@@ -819,6 +860,13 @@ ObjectMirror.GetInternalProperties = function(value) {
result.push(new InternalPropertyMirror("[[BoundArgs]]", boundArgs));
}
return result;
+ } else if (ObjectIsPromise(value)) {
+ var result = [];
+ result.push(new InternalPropertyMirror("[[PromiseStatus]]",
+ PromiseGetStatus_(value)));
+ result.push(new InternalPropertyMirror("[[PromiseValue]]",
+ PromiseGetValue_(value)));
+ return result;
}
return [];
}
@@ -888,9 +936,12 @@ FunctionMirror.prototype.script = function() {
// Return script if function is resolved. Otherwise just fall through
// to return undefined.
if (this.resolved()) {
+ if (this.script_) {
+ return this.script_;
+ }
var script = %FunctionGetScript(this.value_);
if (script) {
- return MakeMirror(script);
+ return this.script_ = MakeMirror(script);
}
}
};
@@ -916,9 +967,11 @@ FunctionMirror.prototype.sourcePosition_ = function() {
* @return {Location or undefined} in-script location for the function begin
*/
FunctionMirror.prototype.sourceLocation = function() {
- if (this.resolved() && this.script()) {
- return this.script().locationFromPosition(this.sourcePosition_(),
- true);
+ if (this.resolved()) {
+ var script = this.script();
+ if (script) {
+ return script.locationFromPosition(this.sourcePosition_(), true);
+ }
}
};
@@ -948,7 +1001,10 @@ FunctionMirror.prototype.constructedBy = function(opt_max_instances) {
FunctionMirror.prototype.scopeCount = function() {
if (this.resolved()) {
- return %GetFunctionScopeCount(this.value());
+ if (IS_UNDEFINED(this.scopeCount_)) {
+ this.scopeCount_ = %GetFunctionScopeCount(this.value());
+ }
+ return this.scopeCount_;
} else {
return 0;
}
@@ -1163,6 +1219,41 @@ ErrorMirror.prototype.toText = function() {
/**
+ * Mirror object for a Promise object.
+ * @param {Object} value The Promise object
+ * @constructor
+ * @extends ObjectMirror
+ */
+function PromiseMirror(value) {
+ %_CallFunction(this, value, PROMISE_TYPE, ObjectMirror);
+}
+inherits(PromiseMirror, ObjectMirror);
+
+
+function PromiseGetStatus_(value) {
+ var status = %DebugGetProperty(value, builtins.promiseStatus);
+ if (status == 0) return "pending";
+ if (status == 1) return "resolved";
+ return "rejected";
+}
+
+
+function PromiseGetValue_(value) {
+ return %DebugGetProperty(value, builtins.promiseValue);
+}
+
+
+PromiseMirror.prototype.status = function() {
+ return PromiseGetStatus_(this.value_);
+};
+
+
+PromiseMirror.prototype.promiseValue = function() {
+ return MakeMirror(PromiseGetValue_(this.value_));
+};
+
+
+/**
* Base mirror object for properties.
* @param {ObjectMirror} mirror The mirror object having this property
* @param {string} name The name of the property
@@ -1505,7 +1596,10 @@ FrameDetails.prototype.returnValue = function() {
FrameDetails.prototype.scopeCount = function() {
- return %GetScopeCount(this.break_id_, this.frameId());
+ if (IS_UNDEFINED(this.scopeCount_)) {
+ this.scopeCount_ = %GetScopeCount(this.break_id_, this.frameId());
+ }
+ return this.scopeCount_;
};
@@ -1531,12 +1625,21 @@ function FrameMirror(break_id, index) {
inherits(FrameMirror, Mirror);
+FrameMirror.prototype.details = function() {
+ return this.details_;
+};
+
+
FrameMirror.prototype.index = function() {
return this.index_;
};
FrameMirror.prototype.func = function() {
+ if (this.func_) {
+ return this.func_;
+ }
+
// Get the function for this frame from the VM.
var f = this.details_.func();
@@ -1544,7 +1647,7 @@ FrameMirror.prototype.func = function() {
// value returned from the VM might be a string if the function for the
// frame is unresolved.
if (IS_FUNCTION(f)) {
- return MakeMirror(f);
+ return this.func_ = MakeMirror(f);
} else {
return new UnresolvedFunctionMirror(f);
}
@@ -1627,39 +1730,36 @@ FrameMirror.prototype.sourcePosition = function() {
FrameMirror.prototype.sourceLocation = function() {
- if (this.func().resolved() && this.func().script()) {
- return this.func().script().locationFromPosition(this.sourcePosition(),
- true);
+ var func = this.func();
+ if (func.resolved()) {
+ var script = func.script();
+ if (script) {
+ return script.locationFromPosition(this.sourcePosition(), true);
+ }
}
};
FrameMirror.prototype.sourceLine = function() {
- if (this.func().resolved()) {
- var location = this.sourceLocation();
- if (location) {
- return location.line;
- }
+ var location = this.sourceLocation();
+ if (location) {
+ return location.line;
}
};
FrameMirror.prototype.sourceColumn = function() {
- if (this.func().resolved()) {
- var location = this.sourceLocation();
- if (location) {
- return location.column;
- }
+ var location = this.sourceLocation();
+ if (location) {
+ return location.column;
}
};
FrameMirror.prototype.sourceLineText = function() {
- if (this.func().resolved()) {
- var location = this.sourceLocation();
- if (location) {
- return location.sourceText();
- }
+ var location = this.sourceLocation();
+ if (location) {
+ return location.sourceText();
}
};
@@ -1674,6 +1774,19 @@ FrameMirror.prototype.scope = function(index) {
};
+FrameMirror.prototype.allScopes = function(opt_ignore_nested_scopes) {
+ var scopeDetails = %GetAllScopesDetails(this.break_id_,
+ this.details_.frameId(),
+ this.details_.inlinedFrameIndex(),
+ !!opt_ignore_nested_scopes);
+ var result = [];
+ for (var i = 0; i < scopeDetails.length; ++i) {
+ result.push(new ScopeMirror(this, UNDEFINED, i, scopeDetails[i]));
+ }
+ return result;
+};
+
+
FrameMirror.prototype.stepInPositions = function() {
var script = this.func().script();
var funcOffset = this.func().sourcePosition_();
@@ -1792,9 +1905,10 @@ FrameMirror.prototype.sourceAndPositionText = function() {
var result = '';
var func = this.func();
if (func.resolved()) {
- if (func.script()) {
- if (func.script().name()) {
- result += func.script().name();
+ var script = func.script();
+ if (script) {
+ if (script.name()) {
+ result += script.name();
} else {
result += '[unnamed]';
}
@@ -1864,17 +1978,18 @@ FrameMirror.prototype.toText = function(opt_locals) {
var kScopeDetailsTypeIndex = 0;
var kScopeDetailsObjectIndex = 1;
-function ScopeDetails(frame, fun, index) {
+function ScopeDetails(frame, fun, index, opt_details) {
if (frame) {
this.break_id_ = frame.break_id_;
- this.details_ = %GetScopeDetails(frame.break_id_,
+ this.details_ = opt_details ||
+ %GetScopeDetails(frame.break_id_,
frame.details_.frameId(),
frame.details_.inlinedFrameIndex(),
index);
this.frame_id_ = frame.details_.frameId();
this.inlined_frame_id_ = frame.details_.inlinedFrameIndex();
} else {
- this.details_ = %GetFunctionScopeDetails(fun.value(), index);
+ this.details_ = opt_details || %GetFunctionScopeDetails(fun.value(), index);
this.fun_value_ = fun.value();
this.break_id_ = undefined;
}
@@ -1920,10 +2035,11 @@ ScopeDetails.prototype.setVariableValueImpl = function(name, new_value) {
* @param {FrameMirror} frame The frame this scope is a part of
* @param {FunctionMirror} function The function this scope is a part of
* @param {number} index The scope index in the frame
+ * @param {Array=} opt_details Raw scope details data
* @constructor
* @extends Mirror
*/
-function ScopeMirror(frame, function, index) {
+function ScopeMirror(frame, function, index, opt_details) {
%_CallFunction(this, SCOPE_TYPE, Mirror);
if (frame) {
this.frame_index_ = frame.index_;
@@ -1931,11 +2047,16 @@ function ScopeMirror(frame, function, index) {
this.frame_index_ = undefined;
}
this.scope_index_ = index;
- this.details_ = new ScopeDetails(frame, function, index);
+ this.details_ = new ScopeDetails(frame, function, index, opt_details);
}
inherits(ScopeMirror, Mirror);
+ScopeMirror.prototype.details = function() {
+ return this.details_;
+};
+
+
ScopeMirror.prototype.frameIndex = function() {
return this.frame_index_;
};
@@ -2233,6 +2354,9 @@ JSONProtocolSerializer.prototype.serializeReferenceWithDisplayData_ =
case STRING_TYPE:
o.value = mirror.getTruncatedValue(this.maxStringLength_());
break;
+ case SYMBOL_TYPE:
+ o.description = mirror.description();
+ break;
case FUNCTION_TYPE:
o.name = mirror.name();
o.inferredName = mirror.inferredName();
@@ -2307,10 +2431,15 @@ JSONProtocolSerializer.prototype.serialize_ = function(mirror, reference,
content.length = mirror.length();
break;
+ case SYMBOL_TYPE:
+ content.description = mirror.description();
+ break;
+
case OBJECT_TYPE:
case FUNCTION_TYPE:
case ERROR_TYPE:
case REGEXP_TYPE:
+ case PROMISE_TYPE:
// Add object representation.
this.serializeObject_(mirror, content, details);
break;
@@ -2413,7 +2542,6 @@ JSONProtocolSerializer.prototype.serializeObject_ = function(mirror, content,
content.indexedInterceptor = true;
}
- // Add function specific properties.
if (mirror.isFunction()) {
// Add function specific properties.
content.name = mirror.name();
@@ -2441,12 +2569,17 @@ JSONProtocolSerializer.prototype.serializeObject_ = function(mirror, content,
}
}
- // Add date specific properties.
if (mirror.isDate()) {
// Add date specific properties.
content.value = mirror.value();
}
+ if (mirror.isPromise()) {
+ // Add promise specific properties.
+ content.status = mirror.status();
+ content.promiseValue = this.serializeReference(mirror.promiseValue());
+ }
+
// Add actual properties - named properties followed by indexed properties.
var propertyNames = mirror.propertyNames(PropertyKind.Named);
var propertyIndexes = mirror.propertyNames(PropertyKind.Indexed);
@@ -2574,8 +2707,9 @@ JSONProtocolSerializer.prototype.serializeFrame_ = function(mirror, content) {
content.receiver = this.serializeReference(mirror.receiver());
var func = mirror.func();
content.func = this.serializeReference(func);
- if (func.script()) {
- content.script = this.serializeReference(func.script());
+ var script = func.script();
+ if (script) {
+ content.script = this.serializeReference(script);
}
content.constructCall = mirror.isConstructCall();
content.atReturn = mirror.isAtReturn();
diff --git a/chromium/v8/src/misc-intrinsics.h b/chromium/v8/src/misc-intrinsics.h
index 5393de2c217..5256a293a21 100644
--- a/chromium/v8/src/misc-intrinsics.h
+++ b/chromium/v8/src/misc-intrinsics.h
@@ -1,35 +1,12 @@
// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
#ifndef V8_MISC_INTRINSICS_H_
#define V8_MISC_INTRINSICS_H_
-#include "../include/v8.h"
-#include "globals.h"
+#include "include/v8.h"
+#include "src/globals.h"
namespace v8 {
namespace internal {
diff --git a/chromium/v8/src/mksnapshot.cc b/chromium/v8/src/mksnapshot.cc
index 457f7b3a9a3..d4262c4cea1 100644
--- a/chromium/v8/src/mksnapshot.cc
+++ b/chromium/v8/src/mksnapshot.cc
@@ -1,29 +1,6 @@
// Copyright 2006-2008 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
#include <errno.h>
#include <stdio.h>
@@ -32,14 +9,16 @@
#endif
#include <signal.h>
-#include "v8.h"
+#include "src/v8.h"
+
+#include "src/assembler.h"
+#include "src/bootstrapper.h"
+#include "src/flags.h"
+#include "src/natives.h"
+#include "src/platform.h"
+#include "src/serialize.h"
+#include "src/list.h"
-#include "bootstrapper.h"
-#include "flags.h"
-#include "natives.h"
-#include "platform.h"
-#include "serialize.h"
-#include "list.h"
using namespace v8;
@@ -52,148 +31,173 @@ class Compressor {
};
-class PartialSnapshotSink : public i::SnapshotByteSink {
+class ListSnapshotSink : public i::SnapshotByteSink {
+ public:
+ explicit ListSnapshotSink(i::List<char>* data) : data_(data) { }
+ virtual ~ListSnapshotSink() {}
+ virtual void Put(int byte, const char* description) { data_->Add(byte); }
+ virtual int Position() { return data_->length(); }
+ private:
+ i::List<char>* data_;
+};
+
+
+class SnapshotWriter {
public:
- PartialSnapshotSink() : data_(), raw_size_(-1) { }
- virtual ~PartialSnapshotSink() { data_.Free(); }
- virtual void Put(int byte, const char* description) {
- data_.Add(byte);
+ explicit SnapshotWriter(const char* snapshot_file)
+ : fp_(GetFileDescriptorOrDie(snapshot_file))
+ , raw_file_(NULL)
+ , raw_context_file_(NULL)
+ , compressor_(NULL)
+ , omit_(false) {
}
- virtual int Position() { return data_.length(); }
- void Print(FILE* fp) {
- int length = Position();
- for (int j = 0; j < length; j++) {
- if ((j & 0x1f) == 0x1f) {
- fprintf(fp, "\n");
- }
- if (j != 0) {
- fprintf(fp, ",");
- }
- fprintf(fp, "%u", static_cast<unsigned char>(at(j)));
- }
+
+ ~SnapshotWriter() {
+ fclose(fp_);
+ if (raw_file_) fclose(raw_file_);
+ if (raw_context_file_) fclose(raw_context_file_);
+ }
+
+ void SetCompressor(Compressor* compressor) {
+ compressor_ = compressor;
+ }
+
+ void SetOmit(bool omit) {
+ omit_ = omit;
+ }
+
+ void SetRawFiles(const char* raw_file, const char* raw_context_file) {
+ raw_file_ = GetFileDescriptorOrDie(raw_file);
+ raw_context_file_ = GetFileDescriptorOrDie(raw_context_file);
}
- char at(int i) { return data_[i]; }
- bool Compress(Compressor* compressor) {
- ASSERT_EQ(-1, raw_size_);
- raw_size_ = data_.length();
- if (!compressor->Compress(data_.ToVector())) return false;
- data_.Clear();
- data_.AddAll(*compressor->output());
- return true;
+
+ void WriteSnapshot(const i::List<char>& snapshot_data,
+ const i::Serializer& serializer,
+ const i::List<char>& context_snapshot_data,
+ const i::Serializer& context_serializer) const {
+ WriteFilePrefix();
+ WriteData("", snapshot_data, raw_file_);
+ WriteData("context_", context_snapshot_data, raw_context_file_);
+ WriteMeta("context_", context_serializer);
+ WriteMeta("", serializer);
+ WriteFileSuffix();
}
- int raw_size() { return raw_size_; }
private:
- i::List<char> data_;
- int raw_size_;
-};
+ void WriteFilePrefix() const {
+ fprintf(fp_, "// Autogenerated snapshot file. Do not edit.\n\n");
+ fprintf(fp_, "#include \"src/v8.h\"\n");
+ fprintf(fp_, "#include \"src/platform.h\"\n\n");
+ fprintf(fp_, "#include \"src/snapshot.h\"\n\n");
+ fprintf(fp_, "namespace v8 {\n");
+ fprintf(fp_, "namespace internal {\n\n");
+ }
+ void WriteFileSuffix() const {
+ fprintf(fp_, "} // namespace internal\n");
+ fprintf(fp_, "} // namespace v8\n");
+ }
-class CppByteSink : public PartialSnapshotSink {
- public:
- explicit CppByteSink(const char* snapshot_file) {
- fp_ = i::OS::FOpen(snapshot_file, "wb");
- if (fp_ == NULL) {
- i::PrintF("Unable to write to snapshot file \"%s\"\n", snapshot_file);
+ void WriteData(const char* prefix,
+ const i::List<char>& source_data,
+ FILE* raw_file) const {
+ const i::List <char>* data_to_be_written = NULL;
+ i::List<char> compressed_data;
+ if (!compressor_) {
+ data_to_be_written = &source_data;
+ } else if (compressor_->Compress(source_data.ToVector())) {
+ compressed_data.AddAll(*compressor_->output());
+ data_to_be_written = &compressed_data;
+ } else {
+ i::PrintF("Compression failed. Aborting.\n");
exit(1);
}
- fprintf(fp_, "// Autogenerated snapshot file. Do not edit.\n\n");
- fprintf(fp_, "#include \"v8.h\"\n");
- fprintf(fp_, "#include \"platform.h\"\n\n");
- fprintf(fp_, "#include \"snapshot.h\"\n\n");
- fprintf(fp_, "namespace v8 {\nnamespace internal {\n\n");
- fprintf(fp_, "const byte Snapshot::data_[] = {");
+
+ ASSERT(data_to_be_written);
+ MaybeWriteRawFile(data_to_be_written, raw_file);
+ WriteData(prefix, source_data, data_to_be_written);
}
- virtual ~CppByteSink() {
- fprintf(fp_, "const int Snapshot::size_ = %d;\n", Position());
-#ifdef COMPRESS_STARTUP_DATA_BZ2
- fprintf(fp_, "const byte* Snapshot::raw_data_ = NULL;\n");
- fprintf(fp_,
- "const int Snapshot::raw_size_ = %d;\n\n",
- raw_size());
-#else
- fprintf(fp_,
- "const byte* Snapshot::raw_data_ = Snapshot::data_;\n");
- fprintf(fp_,
- "const int Snapshot::raw_size_ = Snapshot::size_;\n\n");
-#endif
- fprintf(fp_, "} } // namespace v8::internal\n");
- fclose(fp_);
+ void MaybeWriteRawFile(const i::List<char>* data, FILE* raw_file) const {
+ if (!data || !raw_file)
+ return;
+
+ // Sanity check, whether i::List iterators truly return pointers to an
+ // internal array.
+ ASSERT(data->end() - data->begin() == data->length());
+
+ size_t written = fwrite(data->begin(), 1, data->length(), raw_file);
+ if (written != (size_t)data->length()) {
+ i::PrintF("Writing raw file failed.. Aborting.\n");
+ exit(1);
+ }
}
- void WriteSpaceUsed(
- const char* prefix,
- int new_space_used,
- int pointer_space_used,
- int data_space_used,
- int code_space_used,
- int map_space_used,
- int cell_space_used,
- int property_cell_space_used) {
- fprintf(fp_,
- "const int Snapshot::%snew_space_used_ = %d;\n",
- prefix,
- new_space_used);
- fprintf(fp_,
- "const int Snapshot::%spointer_space_used_ = %d;\n",
- prefix,
- pointer_space_used);
- fprintf(fp_,
- "const int Snapshot::%sdata_space_used_ = %d;\n",
- prefix,
- data_space_used);
- fprintf(fp_,
- "const int Snapshot::%scode_space_used_ = %d;\n",
- prefix,
- code_space_used);
- fprintf(fp_,
- "const int Snapshot::%smap_space_used_ = %d;\n",
- prefix,
- map_space_used);
- fprintf(fp_,
- "const int Snapshot::%scell_space_used_ = %d;\n",
- prefix,
- cell_space_used);
- fprintf(fp_,
- "const int Snapshot::%sproperty_cell_space_used_ = %d;\n",
- prefix,
- property_cell_space_used);
+ void WriteData(const char* prefix,
+ const i::List<char>& source_data,
+ const i::List<char>* data_to_be_written) const {
+ fprintf(fp_, "const byte Snapshot::%sdata_[] = {\n", prefix);
+ if (!omit_)
+ WriteSnapshotData(data_to_be_written);
+ fprintf(fp_, "};\n");
+ fprintf(fp_, "const int Snapshot::%ssize_ = %d;\n", prefix,
+ data_to_be_written->length());
+
+ if (data_to_be_written == &source_data && !omit_) {
+ fprintf(fp_, "const byte* Snapshot::%sraw_data_ = Snapshot::%sdata_;\n",
+ prefix, prefix);
+ fprintf(fp_, "const int Snapshot::%sraw_size_ = Snapshot::%ssize_;\n",
+ prefix, prefix);
+ } else {
+ fprintf(fp_, "const byte* Snapshot::%sraw_data_ = NULL;\n", prefix);
+ fprintf(fp_, "const int Snapshot::%sraw_size_ = %d;\n",
+ prefix, source_data.length());
+ }
+ fprintf(fp_, "\n");
}
- void WritePartialSnapshot() {
- int length = partial_sink_.Position();
- fprintf(fp_, "};\n\n");
- fprintf(fp_, "const int Snapshot::context_size_ = %d;\n", length);
-#ifdef COMPRESS_STARTUP_DATA_BZ2
- fprintf(fp_,
- "const int Snapshot::context_raw_size_ = %d;\n",
- partial_sink_.raw_size());
-#else
- fprintf(fp_,
- "const int Snapshot::context_raw_size_ = "
- "Snapshot::context_size_;\n");
-#endif
- fprintf(fp_, "const byte Snapshot::context_data_[] = {\n");
- partial_sink_.Print(fp_);
- fprintf(fp_, "};\n\n");
-#ifdef COMPRESS_STARTUP_DATA_BZ2
- fprintf(fp_, "const byte* Snapshot::context_raw_data_ = NULL;\n");
-#else
- fprintf(fp_, "const byte* Snapshot::context_raw_data_ ="
- " Snapshot::context_data_;\n");
-#endif
+ void WriteMeta(const char* prefix, const i::Serializer& ser) const {
+ WriteSizeVar(ser, prefix, "new", i::NEW_SPACE);
+ WriteSizeVar(ser, prefix, "pointer", i::OLD_POINTER_SPACE);
+ WriteSizeVar(ser, prefix, "data", i::OLD_DATA_SPACE);
+ WriteSizeVar(ser, prefix, "code", i::CODE_SPACE);
+ WriteSizeVar(ser, prefix, "map", i::MAP_SPACE);
+ WriteSizeVar(ser, prefix, "cell", i::CELL_SPACE);
+ WriteSizeVar(ser, prefix, "property_cell", i::PROPERTY_CELL_SPACE);
+ fprintf(fp_, "\n");
+ }
+
+ void WriteSizeVar(const i::Serializer& ser, const char* prefix,
+ const char* name, int space) const {
+ fprintf(fp_, "const int Snapshot::%s%s_space_used_ = %d;\n",
+ prefix, name, ser.CurrentAllocationAddress(space));
}
- void WriteSnapshot() {
- Print(fp_);
+ void WriteSnapshotData(const i::List<char>* data) const {
+ for (int i = 0; i < data->length(); i++) {
+ if ((i & 0x1f) == 0x1f)
+ fprintf(fp_, "\n");
+ if (i > 0)
+ fprintf(fp_, ",");
+ fprintf(fp_, "%u", static_cast<unsigned char>(data->at(i)));
+ }
+ fprintf(fp_, "\n");
}
- PartialSnapshotSink* partial_sink() { return &partial_sink_; }
+ FILE* GetFileDescriptorOrDie(const char* filename) {
+ FILE* fp = i::OS::FOpen(filename, "wb");
+ if (fp == NULL) {
+ i::PrintF("Unable to open file \"%s\" for writing.\n", filename);
+ exit(1);
+ }
+ return fp;
+ }
- private:
FILE* fp_;
- PartialSnapshotSink partial_sink_;
+ FILE* raw_file_;
+ FILE* raw_context_file_;
+ Compressor* compressor_;
+ bool omit_;
};
@@ -268,6 +272,7 @@ void DumpException(Handle<Message> message) {
int main(int argc, char** argv) {
V8::InitializeICU();
i::Isolate::SetCrashIfDefaultIsolateInitialized();
+ i::CpuFeatures::Probe(true);
// By default, log code create information in the snapshot.
i::FLAG_log_code = true;
@@ -291,113 +296,103 @@ int main(int argc, char** argv) {
i::FLAG_logfile_per_isolate = false;
Isolate* isolate = v8::Isolate::New();
- isolate->Enter();
- i::Isolate* internal_isolate = reinterpret_cast<i::Isolate*>(isolate);
- i::Serializer::Enable(internal_isolate);
- Persistent<Context> context;
- {
- HandleScope handle_scope(isolate);
- context.Reset(isolate, Context::New(isolate));
- }
+ { Isolate::Scope isolate_scope(isolate);
+ i::Isolate* internal_isolate = reinterpret_cast<i::Isolate*>(isolate);
+ internal_isolate->enable_serializer();
+
+ Persistent<Context> context;
+ {
+ HandleScope handle_scope(isolate);
+ context.Reset(isolate, Context::New(isolate));
+ }
- if (context.IsEmpty()) {
- fprintf(stderr,
- "\nException thrown while compiling natives - see above.\n\n");
- exit(1);
- }
- if (i::FLAG_extra_code != NULL) {
- // Capture 100 frames if anything happens.
- V8::SetCaptureStackTraceForUncaughtExceptions(true, 100);
- HandleScope scope(isolate);
- v8::Context::Scope cscope(v8::Local<v8::Context>::New(isolate, context));
- const char* name = i::FLAG_extra_code;
- FILE* file = i::OS::FOpen(name, "rb");
- if (file == NULL) {
- fprintf(stderr, "Failed to open '%s': errno %d\n", name, errno);
+ if (context.IsEmpty()) {
+ fprintf(stderr,
+ "\nException thrown while compiling natives - see above.\n\n");
exit(1);
}
+ if (i::FLAG_extra_code != NULL) {
+ // Capture 100 frames if anything happens.
+ V8::SetCaptureStackTraceForUncaughtExceptions(true, 100);
+ HandleScope scope(isolate);
+ v8::Context::Scope cscope(v8::Local<v8::Context>::New(isolate, context));
+ const char* name = i::FLAG_extra_code;
+ FILE* file = i::OS::FOpen(name, "rb");
+ if (file == NULL) {
+ fprintf(stderr, "Failed to open '%s': errno %d\n", name, errno);
+ exit(1);
+ }
- fseek(file, 0, SEEK_END);
- int size = ftell(file);
- rewind(file);
-
- char* chars = new char[size + 1];
- chars[size] = '\0';
- for (int i = 0; i < size;) {
- int read = static_cast<int>(fread(&chars[i], 1, size - i, file));
- if (read < 0) {
- fprintf(stderr, "Failed to read '%s': errno %d\n", name, errno);
+ fseek(file, 0, SEEK_END);
+ int size = ftell(file);
+ rewind(file);
+
+ char* chars = new char[size + 1];
+ chars[size] = '\0';
+ for (int i = 0; i < size;) {
+ int read = static_cast<int>(fread(&chars[i], 1, size - i, file));
+ if (read < 0) {
+ fprintf(stderr, "Failed to read '%s': errno %d\n", name, errno);
+ exit(1);
+ }
+ i += read;
+ }
+ fclose(file);
+ Local<String> source = String::NewFromUtf8(isolate, chars);
+ TryCatch try_catch;
+ Local<Script> script = Script::Compile(source);
+ if (try_catch.HasCaught()) {
+ fprintf(stderr, "Failure compiling '%s'\n", name);
+ DumpException(try_catch.Message());
+ exit(1);
+ }
+ script->Run();
+ if (try_catch.HasCaught()) {
+ fprintf(stderr, "Failure running '%s'\n", name);
+ DumpException(try_catch.Message());
exit(1);
}
- i += read;
- }
- fclose(file);
- Local<String> source = String::NewFromUtf8(isolate, chars);
- TryCatch try_catch;
- Local<Script> script = Script::Compile(source);
- if (try_catch.HasCaught()) {
- fprintf(stderr, "Failure compiling '%s'\n", name);
- DumpException(try_catch.Message());
- exit(1);
}
- script->Run();
- if (try_catch.HasCaught()) {
- fprintf(stderr, "Failure running '%s'\n", name);
- DumpException(try_catch.Message());
- exit(1);
+ // Make sure all builtin scripts are cached.
+ { HandleScope scope(isolate);
+ for (int i = 0; i < i::Natives::GetBuiltinsCount(); i++) {
+ internal_isolate->bootstrapper()->NativesSourceLookup(i);
+ }
}
- }
- // Make sure all builtin scripts are cached.
- { HandleScope scope(isolate);
- for (int i = 0; i < i::Natives::GetBuiltinsCount(); i++) {
- internal_isolate->bootstrapper()->NativesSourceLookup(i);
+ // If we don't do this then we end up with a stray root pointing at the
+ // context even after we have disposed of the context.
+ internal_isolate->heap()->CollectAllGarbage(
+ i::Heap::kNoGCFlags, "mksnapshot");
+ i::Object* raw_context = *v8::Utils::OpenPersistent(context);
+ context.Reset();
+
+ // This results in a somewhat smaller snapshot, probably because it gets
+ // rid of some things that are cached between garbage collections.
+ i::List<char> snapshot_data;
+ ListSnapshotSink snapshot_sink(&snapshot_data);
+ i::StartupSerializer ser(internal_isolate, &snapshot_sink);
+ ser.SerializeStrongReferences();
+
+ i::List<char> context_data;
+ ListSnapshotSink contex_sink(&context_data);
+ i::PartialSerializer context_ser(internal_isolate, &ser, &contex_sink);
+ context_ser.Serialize(&raw_context);
+ ser.SerializeWeakReferences();
+
+ {
+ SnapshotWriter writer(argv[1]);
+ writer.SetOmit(i::FLAG_omit);
+ if (i::FLAG_raw_file && i::FLAG_raw_context_file)
+ writer.SetRawFiles(i::FLAG_raw_file, i::FLAG_raw_context_file);
+ #ifdef COMPRESS_STARTUP_DATA_BZ2
+ BZip2Compressor bzip2;
+ writer.SetCompressor(&bzip2);
+ #endif
+ writer.WriteSnapshot(snapshot_data, ser, context_data, context_ser);
}
}
- // If we don't do this then we end up with a stray root pointing at the
- // context even after we have disposed of the context.
- internal_isolate->heap()->CollectAllGarbage(
- i::Heap::kNoGCFlags, "mksnapshot");
- i::Object* raw_context = *v8::Utils::OpenPersistent(context);
- context.Reset();
- CppByteSink sink(argv[1]);
- // This results in a somewhat smaller snapshot, probably because it gets rid
- // of some things that are cached between garbage collections.
- i::StartupSerializer ser(internal_isolate, &sink);
- ser.SerializeStrongReferences();
-
- i::PartialSerializer partial_ser(
- internal_isolate, &ser, sink.partial_sink());
- partial_ser.Serialize(&raw_context);
-
- ser.SerializeWeakReferences();
-#ifdef COMPRESS_STARTUP_DATA_BZ2
- BZip2Compressor compressor;
- if (!sink.Compress(&compressor))
- return 1;
- if (!sink.partial_sink()->Compress(&compressor))
- return 1;
-#endif
- sink.WriteSnapshot();
- sink.WritePartialSnapshot();
-
- sink.WriteSpaceUsed(
- "context_",
- partial_ser.CurrentAllocationAddress(i::NEW_SPACE),
- partial_ser.CurrentAllocationAddress(i::OLD_POINTER_SPACE),
- partial_ser.CurrentAllocationAddress(i::OLD_DATA_SPACE),
- partial_ser.CurrentAllocationAddress(i::CODE_SPACE),
- partial_ser.CurrentAllocationAddress(i::MAP_SPACE),
- partial_ser.CurrentAllocationAddress(i::CELL_SPACE),
- partial_ser.CurrentAllocationAddress(i::PROPERTY_CELL_SPACE));
- sink.WriteSpaceUsed(
- "",
- ser.CurrentAllocationAddress(i::NEW_SPACE),
- ser.CurrentAllocationAddress(i::OLD_POINTER_SPACE),
- ser.CurrentAllocationAddress(i::OLD_DATA_SPACE),
- ser.CurrentAllocationAddress(i::CODE_SPACE),
- ser.CurrentAllocationAddress(i::MAP_SPACE),
- ser.CurrentAllocationAddress(i::CELL_SPACE),
- ser.CurrentAllocationAddress(i::PROPERTY_CELL_SPACE));
+ isolate->Dispose();
+ V8::Dispose();
return 0;
}
diff --git a/chromium/v8/src/msan.h b/chromium/v8/src/msan.h
index 484c9fa3979..4130d22a652 100644
--- a/chromium/v8/src/msan.h
+++ b/chromium/v8/src/msan.h
@@ -1,35 +1,14 @@
// Copyright 2013 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
// MemorySanitizer support.
#ifndef V8_MSAN_H_
#define V8_MSAN_H_
+#include "src/globals.h"
+
#ifndef __has_feature
# define __has_feature(x) 0
#endif
@@ -38,12 +17,12 @@
# define MEMORY_SANITIZER
#endif
-#ifdef MEMORY_SANITIZER
-# include <sanitizer/msan_interface.h>
+#if defined(MEMORY_SANITIZER) && !defined(USE_SIMULATOR)
+# include <sanitizer/msan_interface.h> // NOLINT
// Marks a memory range as fully initialized.
-# define MSAN_MEMORY_IS_INITIALIZED(p, s) __msan_unpoison((p), (s))
+# define MSAN_MEMORY_IS_INITIALIZED_IN_JIT(p, s) __msan_unpoison((p), (s))
#else
-# define MSAN_MEMORY_IS_INITIALIZED(p, s)
+# define MSAN_MEMORY_IS_INITIALIZED_IN_JIT(p, s)
#endif
#endif // V8_MSAN_H_
diff --git a/chromium/v8/src/natives.h b/chromium/v8/src/natives.h
index 5f34420d0b2..2f930dc7bdc 100644
--- a/chromium/v8/src/natives.h
+++ b/chromium/v8/src/natives.h
@@ -1,29 +1,6 @@
// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
#ifndef V8_NATIVES_H_
#define V8_NATIVES_H_
diff --git a/chromium/v8/src/object-observe.js b/chromium/v8/src/object-observe.js
index dfa57b83126..2dfc7522f77 100644
--- a/chromium/v8/src/object-observe.js
+++ b/chromium/v8/src/object-observe.js
@@ -1,29 +1,6 @@
// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
"use strict";
@@ -56,45 +33,92 @@
// implementation of (1) and (2) have "optimized" states which represent
// common cases which can be handled more efficiently.
-var observationState = %GetObservationState();
-if (IS_UNDEFINED(observationState.callbackInfoMap)) {
- observationState.callbackInfoMap = %ObservationWeakMapCreate();
- observationState.objectInfoMap = %ObservationWeakMapCreate();
- observationState.notifierObjectInfoMap = %ObservationWeakMapCreate();
- observationState.pendingObservers = null;
- observationState.nextCallbackPriority = 0;
-}
-
-function ObservationWeakMap(map) {
- this.map_ = map;
-}
-
-ObservationWeakMap.prototype = {
- get: function(key) {
- key = %UnwrapGlobalProxy(key);
- if (!IS_SPEC_OBJECT(key)) return UNDEFINED;
- return %WeakCollectionGet(this.map_, key);
- },
- set: function(key, value) {
- key = %UnwrapGlobalProxy(key);
- if (!IS_SPEC_OBJECT(key)) return UNDEFINED;
- %WeakCollectionSet(this.map_, key, value);
- },
- has: function(key) {
- return !IS_UNDEFINED(this.get(key));
+var observationState;
+
+function GetObservationStateJS() {
+ if (IS_UNDEFINED(observationState))
+ observationState = %GetObservationState();
+
+ if (IS_UNDEFINED(observationState.callbackInfoMap)) {
+ observationState.callbackInfoMap = %ObservationWeakMapCreate();
+ observationState.objectInfoMap = %ObservationWeakMapCreate();
+ observationState.notifierObjectInfoMap = %ObservationWeakMapCreate();
+ observationState.pendingObservers = null;
+ observationState.nextCallbackPriority = 0;
}
-};
-var callbackInfoMap =
- new ObservationWeakMap(observationState.callbackInfoMap);
-var objectInfoMap = new ObservationWeakMap(observationState.objectInfoMap);
-var notifierObjectInfoMap =
- new ObservationWeakMap(observationState.notifierObjectInfoMap);
+ return observationState;
+}
-function TypeMapCreate() {
+function GetWeakMapWrapper() {
+ function MapWrapper(map) {
+ this.map_ = map;
+ };
+
+ MapWrapper.prototype = {
+ __proto__: null,
+ get: function(key) {
+ return %WeakCollectionGet(this.map_, key);
+ },
+ set: function(key, value) {
+ %WeakCollectionSet(this.map_, key, value);
+ },
+ has: function(key) {
+ return !IS_UNDEFINED(this.get(key));
+ }
+ };
+
+ return MapWrapper;
+}
+
+var contextMaps;
+
+function GetContextMaps() {
+ if (IS_UNDEFINED(contextMaps)) {
+ var map = GetWeakMapWrapper();
+ var observationState = GetObservationStateJS();
+ contextMaps = {
+ callbackInfoMap: new map(observationState.callbackInfoMap),
+ objectInfoMap: new map(observationState.objectInfoMap),
+ notifierObjectInfoMap: new map(observationState.notifierObjectInfoMap)
+ };
+ }
+
+ return contextMaps;
+}
+
+function GetCallbackInfoMap() {
+ return GetContextMaps().callbackInfoMap;
+}
+
+function GetObjectInfoMap() {
+ return GetContextMaps().objectInfoMap;
+}
+
+function GetNotifierObjectInfoMap() {
+ return GetContextMaps().notifierObjectInfoMap;
+}
+
+function GetPendingObservers() {
+ return GetObservationStateJS().pendingObservers;
+}
+
+function SetPendingObservers(pendingObservers) {
+ GetObservationStateJS().pendingObservers = pendingObservers;
+}
+
+function GetNextCallbackPriority() {
+ return GetObservationStateJS().nextCallbackPriority++;
+}
+
+function nullProtoObject() {
return { __proto__: null };
}
+function TypeMapCreate() {
+ return nullProtoObject();
+}
+
function TypeMapAddType(typeMap, type, ignoreDuplicate) {
typeMap[type] = ignoreDuplicate ? 1 : (typeMap[type] || 0) + 1;
}
@@ -103,9 +127,9 @@ function TypeMapRemoveType(typeMap, type) {
typeMap[type]--;
}
-function TypeMapCreateFromList(typeList) {
+function TypeMapCreateFromList(typeList, length) {
var typeMap = TypeMapCreate();
- for (var i = 0; i < typeList.length; i++) {
+ for (var i = 0; i < length; i++) {
TypeMapAddType(typeMap, typeList[i], true);
}
return typeMap;
@@ -127,14 +151,17 @@ function TypeMapIsDisjointFrom(typeMap1, typeMap2) {
return true;
}
-var defaultAcceptTypes = TypeMapCreateFromList([
- 'add',
- 'update',
- 'delete',
- 'setPrototype',
- 'reconfigure',
- 'preventExtensions'
-]);
+var defaultAcceptTypes = (function() {
+ var defaultTypes = [
+ 'add',
+ 'update',
+ 'delete',
+ 'setPrototype',
+ 'reconfigure',
+ 'preventExtensions'
+ ];
+ return TypeMapCreateFromList(defaultTypes, defaultTypes.length);
+})();
// An Observer is a registration to observe an object by a callback with
// a given set of accept types. If the set of accept types is the default
@@ -142,11 +169,12 @@ var defaultAcceptTypes = TypeMapCreateFromList([
// to the callback. An observer never changes its accept types and thus never
// needs to "normalize".
function ObserverCreate(callback, acceptList) {
- return IS_UNDEFINED(acceptList) ? callback : {
- __proto__: null,
- callback: callback,
- accept: TypeMapCreateFromList(acceptList)
- };
+ if (IS_UNDEFINED(acceptList))
+ return callback;
+ var observer = nullProtoObject();
+ observer.callback = callback;
+ observer.accept = acceptList;
+ return observer;
}
function ObserverGetCallback(observer) {
@@ -162,8 +190,8 @@ function ObserverIsActive(observer, objectInfo) {
ObserverGetAcceptTypes(observer));
}
-function ObjectInfoGet(object) {
- var objectInfo = objectInfoMap.get(object);
+function ObjectInfoGetOrCreate(object) {
+ var objectInfo = ObjectInfoGet(object);
if (IS_UNDEFINED(objectInfo)) {
if (!%IsJSProxy(object))
%SetIsObserved(object);
@@ -175,19 +203,23 @@ function ObjectInfoGet(object) {
performing: null,
performingCount: 0,
};
- objectInfoMap.set(object, objectInfo);
+ GetObjectInfoMap().set(object, objectInfo);
}
return objectInfo;
}
+function ObjectInfoGet(object) {
+ return GetObjectInfoMap().get(object);
+}
+
function ObjectInfoGetFromNotifier(notifier) {
- return notifierObjectInfoMap.get(notifier);
+ return GetNotifierObjectInfoMap().get(notifier);
}
function ObjectInfoGetNotifier(objectInfo) {
if (IS_NULL(objectInfo.notifier)) {
objectInfo.notifier = { __proto__: notifierPrototype };
- notifierObjectInfoMap.set(objectInfo.notifier, objectInfo);
+ GetNotifierObjectInfoMap().set(objectInfo.notifier, objectInfo);
}
return objectInfo.notifier;
@@ -212,7 +244,7 @@ function ObjectInfoNormalizeChangeObservers(objectInfo) {
var callback = ObserverGetCallback(observer);
var callbackInfo = CallbackInfoGet(callback);
var priority = CallbackInfoGetPriority(callbackInfo);
- objectInfo.changeObservers = { __proto__: null };
+ objectInfo.changeObservers = nullProtoObject();
objectInfo.changeObservers[priority] = observer;
}
}
@@ -243,7 +275,7 @@ function ObjectInfoRemoveObserver(objectInfo, callback) {
var callbackInfo = CallbackInfoGet(callback);
var priority = CallbackInfoGetPriority(callbackInfo);
- delete objectInfo.changeObservers[priority];
+ objectInfo.changeObservers[priority] = null;
}
function ObjectInfoHasActiveObservers(objectInfo) {
@@ -254,7 +286,8 @@ function ObjectInfoHasActiveObservers(objectInfo) {
return ObserverIsActive(objectInfo.changeObservers, objectInfo);
for (var priority in objectInfo.changeObservers) {
- if (ObserverIsActive(objectInfo.changeObservers[priority], objectInfo))
+ var observer = objectInfo.changeObservers[priority];
+ if (!IS_NULL(observer) && ObserverIsActive(observer, objectInfo))
return true;
}
@@ -276,32 +309,34 @@ function ObjectInfoGetPerformingTypes(objectInfo) {
return objectInfo.performingCount > 0 ? objectInfo.performing : null;
}
-function AcceptArgIsValid(arg) {
+function ConvertAcceptListToTypeMap(arg) {
+ // We use undefined as a sentinel for the default accept list.
if (IS_UNDEFINED(arg))
- return true;
+ return arg;
- if (!IS_SPEC_OBJECT(arg) ||
- !IS_NUMBER(arg.length) ||
- arg.length < 0)
- return false;
+ if (!IS_SPEC_OBJECT(arg))
+ throw MakeTypeError("observe_accept_invalid");
- return true;
+ var len = ToInteger(arg.length);
+ if (len < 0) len = 0;
+
+ return TypeMapCreateFromList(arg, len);
}
// CallbackInfo's optimized state is just a number which represents its global
// priority. When a change record must be enqueued for the callback, it
// normalizes. When delivery clears any pending change records, it re-optimizes.
function CallbackInfoGet(callback) {
- return callbackInfoMap.get(callback);
+ return GetCallbackInfoMap().get(callback);
}
function CallbackInfoGetOrCreate(callback) {
- var callbackInfo = callbackInfoMap.get(callback);
+ var callbackInfo = GetCallbackInfoMap().get(callback);
if (!IS_UNDEFINED(callbackInfo))
return callbackInfo;
- var priority = observationState.nextCallbackPriority++
- callbackInfoMap.set(callback, priority);
+ var priority = GetNextCallbackPriority();
+ GetCallbackInfoMap().set(callback, priority);
return priority;
}
@@ -313,12 +348,12 @@ function CallbackInfoGetPriority(callbackInfo) {
}
function CallbackInfoNormalize(callback) {
- var callbackInfo = callbackInfoMap.get(callback);
+ var callbackInfo = GetCallbackInfoMap().get(callback);
if (IS_NUMBER(callbackInfo)) {
var priority = callbackInfo;
callbackInfo = new InternalArray;
callbackInfo.priority = priority;
- callbackInfoMap.set(callback, callbackInfo);
+ GetCallbackInfoMap().set(callback, callbackInfo);
}
return callbackInfo;
}
@@ -326,25 +361,33 @@ function CallbackInfoNormalize(callback) {
function ObjectObserve(object, callback, acceptList) {
if (!IS_SPEC_OBJECT(object))
throw MakeTypeError("observe_non_object", ["observe"]);
+ if (%IsJSGlobalProxy(object))
+ throw MakeTypeError("observe_global_proxy", ["observe"]);
if (!IS_SPEC_FUNCTION(callback))
throw MakeTypeError("observe_non_function", ["observe"]);
if (ObjectIsFrozen(callback))
throw MakeTypeError("observe_callback_frozen");
- if (!AcceptArgIsValid(acceptList))
- throw MakeTypeError("observe_accept_invalid");
- var objectInfo = ObjectInfoGet(object);
- ObjectInfoAddObserver(objectInfo, callback, acceptList);
+ var objectObserveFn = %GetObjectContextObjectObserve(object);
+ return objectObserveFn(object, callback, acceptList);
+}
+
+function NativeObjectObserve(object, callback, acceptList) {
+ var objectInfo = ObjectInfoGetOrCreate(object);
+ var typeList = ConvertAcceptListToTypeMap(acceptList);
+ ObjectInfoAddObserver(objectInfo, callback, typeList);
return object;
}
function ObjectUnobserve(object, callback) {
if (!IS_SPEC_OBJECT(object))
throw MakeTypeError("observe_non_object", ["unobserve"]);
+ if (%IsJSGlobalProxy(object))
+ throw MakeTypeError("observe_global_proxy", ["unobserve"]);
if (!IS_SPEC_FUNCTION(callback))
throw MakeTypeError("observe_non_function", ["unobserve"]);
- var objectInfo = objectInfoMap.get(object);
+ var objectInfo = ObjectInfoGet(object);
if (IS_UNDEFINED(objectInfo))
return object;
@@ -363,28 +406,25 @@ function ArrayUnobserve(object, callback) {
return ObjectUnobserve(object, callback);
}
-function ObserverEnqueueIfActive(observer, objectInfo, changeRecord,
- needsAccessCheck) {
+function ObserverEnqueueIfActive(observer, objectInfo, changeRecord) {
if (!ObserverIsActive(observer, objectInfo) ||
!TypeMapHasType(ObserverGetAcceptTypes(observer), changeRecord.type)) {
return;
}
var callback = ObserverGetCallback(observer);
- if (needsAccessCheck &&
- // Drop all splice records on the floor for access-checked objects
- (changeRecord.type == 'splice' ||
- !%IsAccessAllowedForObserver(
- callback, changeRecord.object, changeRecord.name))) {
+ if (!%ObserverObjectAndRecordHaveSameOrigin(callback, changeRecord.object,
+ changeRecord)) {
return;
}
var callbackInfo = CallbackInfoNormalize(callback);
- if (!observationState.pendingObservers)
- observationState.pendingObservers = { __proto__: null };
- observationState.pendingObservers[callbackInfo.priority] = callback;
+ if (IS_NULL(GetPendingObservers())) {
+ SetPendingObservers(nullProtoObject());
+ %EnqueueMicrotask(ObserveMicrotaskRunner);
+ }
+ GetPendingObservers()[callbackInfo.priority] = callback;
callbackInfo.push(changeRecord);
- %SetMicrotaskPending(true);
}
function ObjectInfoEnqueueExternalChangeRecord(objectInfo, changeRecord, type) {
@@ -401,48 +441,43 @@ function ObjectInfoEnqueueExternalChangeRecord(objectInfo, changeRecord, type) {
%DefineOrRedefineDataProperty(newRecord, prop, changeRecord[prop],
READ_ONLY + DONT_DELETE);
}
- ObjectFreeze(newRecord);
+ ObjectFreezeJS(newRecord);
- ObjectInfoEnqueueInternalChangeRecord(objectInfo, newRecord,
- true /* skip access check */);
+ ObjectInfoEnqueueInternalChangeRecord(objectInfo, newRecord);
}
-function ObjectInfoEnqueueInternalChangeRecord(objectInfo, changeRecord,
- skipAccessCheck) {
+function ObjectInfoEnqueueInternalChangeRecord(objectInfo, changeRecord) {
// TODO(rossberg): adjust once there is a story for symbols vs proxies.
if (IS_SYMBOL(changeRecord.name)) return;
- var needsAccessCheck = !skipAccessCheck &&
- %IsAccessCheckNeeded(changeRecord.object);
-
if (ChangeObserversIsOptimized(objectInfo.changeObservers)) {
var observer = objectInfo.changeObservers;
- ObserverEnqueueIfActive(observer, objectInfo, changeRecord,
- needsAccessCheck);
+ ObserverEnqueueIfActive(observer, objectInfo, changeRecord);
return;
}
for (var priority in objectInfo.changeObservers) {
var observer = objectInfo.changeObservers[priority];
- ObserverEnqueueIfActive(observer, objectInfo, changeRecord,
- needsAccessCheck);
+ if (IS_NULL(observer))
+ continue;
+ ObserverEnqueueIfActive(observer, objectInfo, changeRecord);
}
}
function BeginPerformSplice(array) {
- var objectInfo = objectInfoMap.get(array);
+ var objectInfo = ObjectInfoGet(array);
if (!IS_UNDEFINED(objectInfo))
ObjectInfoAddPerformingType(objectInfo, 'splice');
}
function EndPerformSplice(array) {
- var objectInfo = objectInfoMap.get(array);
+ var objectInfo = ObjectInfoGet(array);
if (!IS_UNDEFINED(objectInfo))
ObjectInfoRemovePerformingType(objectInfo, 'splice');
}
function EnqueueSpliceRecord(array, index, removed, addedCount) {
- var objectInfo = objectInfoMap.get(array);
+ var objectInfo = ObjectInfoGet(array);
if (!ObjectInfoHasActiveObservers(objectInfo))
return;
@@ -454,13 +489,13 @@ function EnqueueSpliceRecord(array, index, removed, addedCount) {
addedCount: addedCount
};
- ObjectFreeze(changeRecord);
- ObjectFreeze(changeRecord.removed);
+ ObjectFreezeJS(changeRecord);
+ ObjectFreezeJS(changeRecord.removed);
ObjectInfoEnqueueInternalChangeRecord(objectInfo, changeRecord);
}
function NotifyChange(type, object, name, oldValue) {
- var objectInfo = objectInfoMap.get(object);
+ var objectInfo = ObjectInfoGet(object);
if (!ObjectInfoHasActiveObservers(objectInfo))
return;
@@ -478,7 +513,7 @@ function NotifyChange(type, object, name, oldValue) {
};
}
- ObjectFreeze(changeRecord);
+ ObjectFreezeJS(changeRecord);
ObjectInfoEnqueueInternalChangeRecord(objectInfo, changeRecord);
}
@@ -502,7 +537,6 @@ function ObjectNotifierPerformChange(changeType, changeFn) {
throw MakeTypeError("called_on_non_object", ["performChange"]);
var objectInfo = ObjectInfoGetFromNotifier(this);
-
if (IS_UNDEFINED(objectInfo))
throw MakeTypeError("observe_notify_non_notifier");
if (!IS_STRING(changeType))
@@ -510,6 +544,11 @@ function ObjectNotifierPerformChange(changeType, changeFn) {
if (!IS_SPEC_FUNCTION(changeFn))
throw MakeTypeError("observe_perform_non_function");
+ var performChangeFn = %GetObjectContextNotifierPerformChange(objectInfo);
+ performChangeFn(objectInfo, changeType, changeFn);
+}
+
+function NativeObjectNotifierPerformChange(objectInfo, changeType, changeFn) {
ObjectInfoAddPerformingType(objectInfo, changeType);
var changeRecord;
@@ -526,25 +565,34 @@ function ObjectNotifierPerformChange(changeType, changeFn) {
function ObjectGetNotifier(object) {
if (!IS_SPEC_OBJECT(object))
throw MakeTypeError("observe_non_object", ["getNotifier"]);
+ if (%IsJSGlobalProxy(object))
+ throw MakeTypeError("observe_global_proxy", ["getNotifier"]);
if (ObjectIsFrozen(object)) return null;
- var objectInfo = ObjectInfoGet(object);
+ if (!%ObjectWasCreatedInCurrentOrigin(object)) return null;
+
+ var getNotifierFn = %GetObjectContextObjectGetNotifier(object);
+ return getNotifierFn(object);
+}
+
+function NativeObjectGetNotifier(object) {
+ var objectInfo = ObjectInfoGetOrCreate(object);
return ObjectInfoGetNotifier(objectInfo);
}
function CallbackDeliverPending(callback) {
- var callbackInfo = callbackInfoMap.get(callback);
+ var callbackInfo = GetCallbackInfoMap().get(callback);
if (IS_UNDEFINED(callbackInfo) || IS_NUMBER(callbackInfo))
return false;
// Clear the pending change records from callback and return it to its
// "optimized" state.
var priority = callbackInfo.priority;
- callbackInfoMap.set(callback, priority);
+ GetCallbackInfoMap().set(callback, priority);
- if (observationState.pendingObservers)
- delete observationState.pendingObservers[priority];
+ if (GetPendingObservers())
+ delete GetPendingObservers()[priority];
var delivered = [];
%MoveArrayContents(callbackInfo, delivered);
@@ -563,15 +611,14 @@ function ObjectDeliverChangeRecords(callback) {
}
function ObserveMicrotaskRunner() {
- var pendingObservers = observationState.pendingObservers;
+ var pendingObservers = GetPendingObservers();
if (pendingObservers) {
- observationState.pendingObservers = null;
+ SetPendingObservers(null);
for (var i in pendingObservers) {
CallbackDeliverPending(pendingObservers[i]);
}
}
}
-RunMicrotasks.runners.push(ObserveMicrotaskRunner);
function SetupObjectObserve() {
%CheckIsBootstrapping();
diff --git a/chromium/v8/src/objects-debug.cc b/chromium/v8/src/objects-debug.cc
index ed93e1dc9e1..877a9d51da7 100644
--- a/chromium/v8/src/objects-debug.cc
+++ b/chromium/v8/src/objects-debug.cc
@@ -1,53 +1,25 @@
// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#include "disassembler.h"
-#include "disasm.h"
-#include "jsregexp.h"
-#include "macro-assembler.h"
-#include "objects-visiting.h"
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#include "src/disassembler.h"
+#include "src/disasm.h"
+#include "src/jsregexp.h"
+#include "src/macro-assembler.h"
+#include "src/objects-visiting.h"
namespace v8 {
namespace internal {
#ifdef VERIFY_HEAP
-void MaybeObject::Verify() {
- Object* this_as_object;
- if (ToObject(&this_as_object)) {
- if (this_as_object->IsSmi()) {
- Smi::cast(this_as_object)->SmiVerify();
- } else {
- HeapObject::cast(this_as_object)->HeapObjectVerify();
- }
+void Object::ObjectVerify() {
+ if (IsSmi()) {
+ Smi::cast(this)->SmiVerify();
} else {
- Failure::cast(this)->FailureVerify();
+ HeapObject::cast(this)->HeapObjectVerify();
}
}
@@ -66,11 +38,6 @@ void Smi::SmiVerify() {
}
-void Failure::FailureVerify() {
- CHECK(IsFailure());
-}
-
-
void HeapObject::HeapObjectVerify() {
InstanceType instance_type = map()->instance_type();
@@ -104,34 +71,18 @@ void HeapObject::HeapObjectVerify() {
case FREE_SPACE_TYPE:
FreeSpace::cast(this)->FreeSpaceVerify();
break;
- case EXTERNAL_PIXEL_ARRAY_TYPE:
- ExternalPixelArray::cast(this)->ExternalPixelArrayVerify();
- break;
- case EXTERNAL_BYTE_ARRAY_TYPE:
- ExternalByteArray::cast(this)->ExternalByteArrayVerify();
- break;
- case EXTERNAL_UNSIGNED_BYTE_ARRAY_TYPE:
- ExternalUnsignedByteArray::cast(this)->ExternalUnsignedByteArrayVerify();
- break;
- case EXTERNAL_SHORT_ARRAY_TYPE:
- ExternalShortArray::cast(this)->ExternalShortArrayVerify();
- break;
- case EXTERNAL_UNSIGNED_SHORT_ARRAY_TYPE:
- ExternalUnsignedShortArray::cast(this)->
- ExternalUnsignedShortArrayVerify();
- break;
- case EXTERNAL_INT_ARRAY_TYPE:
- ExternalIntArray::cast(this)->ExternalIntArrayVerify();
- break;
- case EXTERNAL_UNSIGNED_INT_ARRAY_TYPE:
- ExternalUnsignedIntArray::cast(this)->ExternalUnsignedIntArrayVerify();
- break;
- case EXTERNAL_FLOAT_ARRAY_TYPE:
- ExternalFloatArray::cast(this)->ExternalFloatArrayVerify();
- break;
- case EXTERNAL_DOUBLE_ARRAY_TYPE:
- ExternalDoubleArray::cast(this)->ExternalDoubleArrayVerify();
+
+#define VERIFY_TYPED_ARRAY(Type, type, TYPE, ctype, size) \
+ case EXTERNAL_##TYPE##_ARRAY_TYPE: \
+ External##Type##Array::cast(this)->External##Type##ArrayVerify(); \
+ break; \
+ case FIXED_##TYPE##_ARRAY_TYPE: \
+ Fixed##Type##Array::cast(this)->FixedTypedArrayVerify(); \
break;
+
+ TYPED_ARRAYS(VERIFY_TYPED_ARRAY)
+#undef VERIFY_TYPED_ARRAY
+
case CODE_TYPE:
Code::cast(this)->CodeVerify();
break;
@@ -181,6 +132,12 @@ void HeapObject::HeapObjectVerify() {
case JS_MAP_TYPE:
JSMap::cast(this)->JSMapVerify();
break;
+ case JS_SET_ITERATOR_TYPE:
+ JSSetIterator::cast(this)->JSSetIteratorVerify();
+ break;
+ case JS_MAP_ITERATOR_TYPE:
+ JSMapIterator::cast(this)->JSMapIteratorVerify();
+ break;
case JS_WEAK_MAP_TYPE:
JSWeakMap::cast(this)->JSWeakMapVerify();
break;
@@ -262,54 +219,27 @@ void FreeSpace::FreeSpaceVerify() {
}
-void ExternalPixelArray::ExternalPixelArrayVerify() {
- CHECK(IsExternalPixelArray());
-}
-
-
-void ExternalByteArray::ExternalByteArrayVerify() {
- CHECK(IsExternalByteArray());
-}
-
-
-void ExternalUnsignedByteArray::ExternalUnsignedByteArrayVerify() {
- CHECK(IsExternalUnsignedByteArray());
-}
-
-
-void ExternalShortArray::ExternalShortArrayVerify() {
- CHECK(IsExternalShortArray());
-}
-
-
-void ExternalUnsignedShortArray::ExternalUnsignedShortArrayVerify() {
- CHECK(IsExternalUnsignedShortArray());
-}
-
-
-void ExternalIntArray::ExternalIntArrayVerify() {
- CHECK(IsExternalIntArray());
-}
-
-
-void ExternalUnsignedIntArray::ExternalUnsignedIntArrayVerify() {
- CHECK(IsExternalUnsignedIntArray());
-}
-
+#define EXTERNAL_ARRAY_VERIFY(Type, type, TYPE, ctype, size) \
+ void External##Type##Array::External##Type##ArrayVerify() { \
+ CHECK(IsExternal##Type##Array()); \
+ }
-void ExternalFloatArray::ExternalFloatArrayVerify() {
- CHECK(IsExternalFloatArray());
-}
+TYPED_ARRAYS(EXTERNAL_ARRAY_VERIFY)
+#undef EXTERNAL_ARRAY_VERIFY
-void ExternalDoubleArray::ExternalDoubleArrayVerify() {
- CHECK(IsExternalDoubleArray());
+template <class Traits>
+void FixedTypedArray<Traits>::FixedTypedArrayVerify() {
+ CHECK(IsHeapObject() &&
+ HeapObject::cast(this)->map()->instance_type() ==
+ Traits::kInstanceType);
}
bool JSObject::ElementsAreSafeToExamine() {
- return (FLAG_use_gvn && FLAG_use_allocation_folding) ||
- reinterpret_cast<Map*>(elements()) !=
+ // If a GC was caused while constructing this object, the elements
+ // pointer may point to a one pointer filler map.
+ return reinterpret_cast<Map*>(elements()) !=
GetHeap()->one_pointer_filler_map();
}
@@ -318,7 +248,7 @@ void JSObject::JSObjectVerify() {
VerifyHeapPointer(properties());
VerifyHeapPointer(elements());
- if (GetElementsKind() == NON_STRICT_ARGUMENTS_ELEMENTS) {
+ if (GetElementsKind() == SLOPPY_ARGUMENTS_ELEMENTS) {
CHECK(this->elements()->IsFixedArray());
CHECK_GE(this->elements()->length(), 2);
}
@@ -331,12 +261,18 @@ void JSObject::JSObjectVerify() {
for (int i = 0; i < map()->NumberOfOwnDescriptors(); i++) {
if (descriptors->GetDetails(i).type() == FIELD) {
Representation r = descriptors->GetDetails(i).representation();
- int field = descriptors->GetFieldIndex(i);
- Object* value = RawFastPropertyAt(field);
+ FieldIndex index = FieldIndex::ForDescriptor(map(), i);
+ Object* value = RawFastPropertyAt(index);
if (r.IsDouble()) ASSERT(value->IsHeapNumber());
if (value->IsUninitialized()) continue;
if (r.IsSmi()) ASSERT(value->IsSmi());
if (r.IsHeapObject()) ASSERT(value->IsHeapObject());
+ HeapType* field_type = descriptors->GetFieldType(i);
+ if (r.IsNone()) {
+ CHECK(field_type->Is(HeapType::None()));
+ } else if (!HeapType::Any()->Is(field_type)) {
+ CHECK(!field_type->NowStable() || field_type->NowContains(value));
+ }
}
}
}
@@ -411,7 +347,6 @@ void PolymorphicCodeCache::PolymorphicCodeCacheVerify() {
void TypeFeedbackInfo::TypeFeedbackInfoVerify() {
VerifyObjectField(kStorage1Offset);
VerifyObjectField(kStorage2Offset);
- VerifyHeapPointer(type_feedback_cells());
}
@@ -423,11 +358,7 @@ void AliasedArgumentsEntry::AliasedArgumentsEntryVerify() {
void FixedArray::FixedArrayVerify() {
for (int i = 0; i < length(); i++) {
Object* e = get(i);
- if (e->IsHeapObject()) {
- VerifyHeapPointer(e);
- } else {
- e->Verify();
- }
+ VerifyPointer(e);
}
}
@@ -447,6 +378,15 @@ void FixedDoubleArray::FixedDoubleArrayVerify() {
void ConstantPoolArray::ConstantPoolArrayVerify() {
CHECK(IsConstantPoolArray());
+ ConstantPoolArray::Iterator code_iter(this, ConstantPoolArray::CODE_PTR);
+ while (!code_iter.is_finished()) {
+ Address code_entry = get_code_ptr_entry(code_iter.next_index());
+ VerifyPointer(Code::GetCodeFromTargetAddress(code_entry));
+ }
+ ConstantPoolArray::Iterator heap_iter(this, ConstantPoolArray::HEAP_PTR);
+ while (!heap_iter.is_finished()) {
+ VerifyObjectField(OffsetOfElementAt(heap_iter.next_index()));
+ }
}
@@ -534,7 +474,6 @@ void JSMessageObject::JSMessageObjectVerify() {
VerifyObjectField(kEndPositionOffset);
VerifyObjectField(kArgumentsOffset);
VerifyObjectField(kScriptOffset);
- VerifyObjectField(kStackTraceOffset);
VerifyObjectField(kStackFramesOffset);
}
@@ -558,6 +497,7 @@ void ConsString::ConsStringVerify() {
CHECK(this->second() == GetHeap()->empty_string() ||
this->second()->IsString());
CHECK(this->length() >= ConsString::kMinLength);
+ CHECK(this->length() == this->first()->length() + this->second()->length());
if (this->IsFlat()) {
// A flat cons can only be created by String::SlowTryFlatten.
// Afterwards, the first part may be externalized.
@@ -589,6 +529,7 @@ void SharedFunctionInfo::SharedFunctionInfoVerify() {
VerifyObjectField(kNameOffset);
VerifyObjectField(kCodeOffset);
VerifyObjectField(kOptimizedCodeMapOffset);
+ VerifyObjectField(kFeedbackVectorOffset);
VerifyObjectField(kScopeInfoOffset);
VerifyObjectField(kInstanceClassNameOffset);
VerifyObjectField(kFunctionDataOffset);
@@ -603,7 +544,7 @@ void JSGlobalProxy::JSGlobalProxyVerify() {
VerifyObjectField(JSGlobalProxy::kNativeContextOffset);
// Make sure that this object has no properties, elements.
CHECK_EQ(0, properties()->length());
- CHECK(HasFastObjectElements());
+ CHECK(HasFastSmiElements());
CHECK_EQ(0, FixedArray::cast(elements())->length());
}
@@ -632,18 +573,41 @@ void JSBuiltinsObject::JSBuiltinsObjectVerify() {
void Oddball::OddballVerify() {
CHECK(IsOddball());
+ Heap* heap = GetHeap();
VerifyHeapPointer(to_string());
Object* number = to_number();
if (number->IsHeapObject()) {
- CHECK(number == HeapObject::cast(number)->GetHeap()->nan_value());
+ CHECK(number == heap->nan_value());
} else {
CHECK(number->IsSmi());
int value = Smi::cast(number)->value();
// Hidden oddballs have negative smis.
- const int kLeastHiddenOddballNumber = -4;
+ const int kLeastHiddenOddballNumber = -5;
CHECK_LE(value, 1);
CHECK(value >= kLeastHiddenOddballNumber);
}
+ if (map() == heap->undefined_map()) {
+ CHECK(this == heap->undefined_value());
+ } else if (map() == heap->the_hole_map()) {
+ CHECK(this == heap->the_hole_value());
+ } else if (map() == heap->null_map()) {
+ CHECK(this == heap->null_value());
+ } else if (map() == heap->boolean_map()) {
+ CHECK(this == heap->true_value() ||
+ this == heap->false_value());
+ } else if (map() == heap->uninitialized_map()) {
+ CHECK(this == heap->uninitialized_value());
+ } else if (map() == heap->no_interceptor_result_sentinel_map()) {
+ CHECK(this == heap->no_interceptor_result_sentinel());
+ } else if (map() == heap->arguments_marker_map()) {
+ CHECK(this == heap->arguments_marker());
+ } else if (map() == heap->termination_exception_map()) {
+ CHECK(this == heap->termination_exception());
+ } else if (map() == heap->exception_map()) {
+ CHECK(this == heap->exception());
+ } else {
+ UNREACHABLE();
+ }
}
@@ -663,10 +627,11 @@ void PropertyCell::PropertyCellVerify() {
void Code::CodeVerify() {
CHECK(IsAligned(reinterpret_cast<intptr_t>(instruction_start()),
kCodeAlignment));
- relocation_info()->Verify();
+ relocation_info()->ObjectVerify();
Address last_gc_pc = NULL;
+ Isolate* isolate = GetIsolate();
for (RelocIterator it(this); !it.done(); it.next()) {
- it.rinfo()->Verify();
+ it.rinfo()->Verify(isolate);
// Ensure that GC will not iterate twice over the same pointer.
if (RelocInfo::IsGCRelocMode(it.rinfo()->rmode())) {
CHECK(it.rinfo()->pc() != last_gc_pc);
@@ -677,19 +642,25 @@ void Code::CodeVerify() {
void Code::VerifyEmbeddedObjectsDependency() {
+ if (!CanContainWeakObjects()) return;
+ DisallowHeapAllocation no_gc;
+ Isolate* isolate = GetIsolate();
+ HandleScope scope(isolate);
int mode_mask = RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT);
for (RelocIterator it(this, mode_mask); !it.done(); it.next()) {
Object* obj = it.rinfo()->target_object();
- if (IsWeakEmbeddedObject(kind(), obj)) {
+ if (IsWeakObject(obj)) {
if (obj->IsMap()) {
Map* map = Map::cast(obj);
- CHECK(map->dependent_code()->Contains(
- DependentCode::kWeaklyEmbeddedGroup, this));
+ DependentCode::DependencyGroup group = is_optimized_code() ?
+ DependentCode::kWeakCodeGroup : DependentCode::kWeakICGroup;
+ CHECK(map->dependent_code()->Contains(group, this));
} else if (obj->IsJSObject()) {
Object* raw_table = GetIsolate()->heap()->weak_object_to_code_table();
WeakHashTable* table = WeakHashTable::cast(raw_table);
- CHECK(DependentCode::cast(table->Lookup(obj))->Contains(
- DependentCode::kWeaklyEmbeddedGroup, this));
+ Handle<Object> key_obj(obj, isolate);
+ CHECK(DependentCode::cast(table->Lookup(key_obj))->Contains(
+ DependentCode::kWeakCodeGroup, this));
}
}
}
@@ -713,7 +684,8 @@ void JSSet::JSSetVerify() {
CHECK(IsJSSet());
JSObjectVerify();
VerifyHeapPointer(table());
- CHECK(table()->IsHashTable() || table()->IsUndefined());
+ CHECK(table()->IsOrderedHashTable() || table()->IsUndefined());
+ // TODO(arv): Verify OrderedHashTable too.
}
@@ -721,7 +693,28 @@ void JSMap::JSMapVerify() {
CHECK(IsJSMap());
JSObjectVerify();
VerifyHeapPointer(table());
- CHECK(table()->IsHashTable() || table()->IsUndefined());
+ CHECK(table()->IsOrderedHashTable() || table()->IsUndefined());
+ // TODO(arv): Verify OrderedHashTable too.
+}
+
+
+void JSSetIterator::JSSetIteratorVerify() {
+ CHECK(IsJSSetIterator());
+ JSObjectVerify();
+ VerifyHeapPointer(table());
+ CHECK(table()->IsOrderedHashTable() || table()->IsUndefined());
+ CHECK(index()->IsSmi() || index()->IsUndefined());
+ CHECK(kind()->IsSmi() || kind()->IsUndefined());
+}
+
+
+void JSMapIterator::JSMapIteratorVerify() {
+ CHECK(IsJSMapIterator());
+ JSObjectVerify();
+ VerifyHeapPointer(table());
+ CHECK(table()->IsOrderedHashTable() || table()->IsUndefined());
+ CHECK(index()->IsSmi() || index()->IsUndefined());
+ CHECK(kind()->IsSmi() || kind()->IsUndefined());
}
@@ -811,7 +804,8 @@ void JSArrayBufferView::JSArrayBufferViewVerify() {
CHECK(IsJSArrayBufferView());
JSObjectVerify();
VerifyPointer(buffer());
- CHECK(buffer()->IsJSArrayBuffer() || buffer()->IsUndefined());
+ CHECK(buffer()->IsJSArrayBuffer() || buffer()->IsUndefined()
+ || buffer() == Smi::FromInt(0));
VerifyPointer(byte_offset());
CHECK(byte_offset()->IsSmi() || byte_offset()->IsHeapNumber()
@@ -847,7 +841,7 @@ void Foreign::ForeignVerify() {
void Box::BoxVerify() {
CHECK(IsBox());
- value()->Verify();
+ value()->ObjectVerify();
}
@@ -975,7 +969,6 @@ void Script::ScriptVerify() {
VerifyPointer(name());
line_offset()->SmiVerify();
column_offset()->SmiVerify();
- VerifyPointer(data());
VerifyPointer(wrapper());
type()->SmiVerify();
VerifyPointer(line_ends());
@@ -984,7 +977,7 @@ void Script::ScriptVerify() {
void JSFunctionResultCache::JSFunctionResultCacheVerify() {
- JSFunction::cast(get(kFactoryIndex))->Verify();
+ JSFunction::cast(get(kFactoryIndex))->ObjectVerify();
int size = Smi::cast(get(kCacheSizeIndex))->value();
CHECK(kEntriesIndex <= size);
@@ -999,21 +992,21 @@ void JSFunctionResultCache::JSFunctionResultCacheVerify() {
if (FLAG_enable_slow_asserts) {
for (int i = kEntriesIndex; i < size; i++) {
CHECK(!get(i)->IsTheHole());
- get(i)->Verify();
+ get(i)->ObjectVerify();
}
for (int i = size; i < length(); i++) {
CHECK(get(i)->IsTheHole());
- get(i)->Verify();
+ get(i)->ObjectVerify();
}
}
}
void NormalizedMapCache::NormalizedMapCacheVerify() {
- FixedArray::cast(this)->Verify();
+ FixedArray::cast(this)->FixedArrayVerify();
if (FLAG_enable_slow_asserts) {
for (int i = 0; i < length(); i++) {
- Object* e = get(i);
+ Object* e = FixedArray::get(i);
if (e->IsMap()) {
Map::cast(e)->SharedMapVerify();
} else {
@@ -1024,7 +1017,6 @@ void NormalizedMapCache::NormalizedMapCacheVerify() {
}
-#ifdef ENABLE_DEBUGGER_SUPPORT
void DebugInfo::DebugInfoVerify() {
CHECK(IsDebugInfo());
VerifyPointer(shared());
@@ -1041,7 +1033,6 @@ void BreakPointInfo::BreakPointInfoVerify() {
statement_position()->SmiVerify();
VerifyPointer(break_point_objects());
}
-#endif // ENABLE_DEBUGGER_SUPPORT
#endif // VERIFY_HEAP
#ifdef DEBUG
@@ -1079,17 +1070,15 @@ void JSObject::IncrementSpillStatistics(SpillInformation* info) {
info->number_of_fast_unused_elements_ += holes;
break;
}
- case EXTERNAL_BYTE_ELEMENTS:
- case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
- case EXTERNAL_SHORT_ELEMENTS:
- case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
- case EXTERNAL_INT_ELEMENTS:
- case EXTERNAL_UNSIGNED_INT_ELEMENTS:
- case EXTERNAL_FLOAT_ELEMENTS:
- case EXTERNAL_DOUBLE_ELEMENTS:
- case EXTERNAL_PIXEL_ELEMENTS: {
- info->number_of_objects_with_fast_elements_++;
- ExternalPixelArray* e = ExternalPixelArray::cast(elements());
+
+#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype, size) \
+ case EXTERNAL_##TYPE##_ELEMENTS: \
+ case TYPE##_ELEMENTS:
+
+ TYPED_ARRAYS(TYPED_ARRAY_CASE)
+#undef TYPED_ARRAY_CASE
+ { info->number_of_objects_with_fast_elements_++;
+ FixedArrayBase* e = FixedArrayBase::cast(elements());
info->number_of_fast_used_elements_ += e->length();
break;
}
@@ -1100,7 +1089,7 @@ void JSObject::IncrementSpillStatistics(SpillInformation* info) {
dict->Capacity() - dict->NumberOfElements();
break;
}
- case NON_STRICT_ARGUMENTS_ELEMENTS:
+ case SLOPPY_ARGUMENTS_ELEMENTS:
break;
}
}
diff --git a/chromium/v8/src/objects-inl.h b/chromium/v8/src/objects-inl.h
index 2db3c04f1f2..4848fad8bdd 100644
--- a/chromium/v8/src/objects-inl.h
+++ b/chromium/v8/src/objects-inl.h
@@ -1,29 +1,6 @@
// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
//
// Review notes:
//
@@ -35,19 +12,24 @@
#ifndef V8_OBJECTS_INL_H_
#define V8_OBJECTS_INL_H_
-#include "elements.h"
-#include "objects.h"
-#include "contexts.h"
-#include "conversions-inl.h"
-#include "heap.h"
-#include "isolate.h"
-#include "property.h"
-#include "spaces.h"
-#include "store-buffer.h"
-#include "v8memory.h"
-#include "factory.h"
-#include "incremental-marking.h"
-#include "transitions-inl.h"
+#include "src/base/atomicops.h"
+#include "src/elements.h"
+#include "src/objects.h"
+#include "src/contexts.h"
+#include "src/conversions-inl.h"
+#include "src/field-index-inl.h"
+#include "src/heap.h"
+#include "src/isolate.h"
+#include "src/heap-inl.h"
+#include "src/property.h"
+#include "src/spaces.h"
+#include "src/store-buffer.h"
+#include "src/v8memory.h"
+#include "src/factory.h"
+#include "src/incremental-marking.h"
+#include "src/transitions-inl.h"
+#include "src/objects-visiting.h"
+#include "src/lookup.h"
namespace v8 {
namespace internal {
@@ -57,7 +39,7 @@ PropertyDetails::PropertyDetails(Smi* smi) {
}
-Smi* PropertyDetails::AsSmi() {
+Smi* PropertyDetails::AsSmi() const {
// Ensure the upper 2 bits have the same value by sign extending it. This is
// necessary to be able to use the 31st bit of the property details.
int value = value_ << 1;
@@ -65,7 +47,7 @@ Smi* PropertyDetails::AsSmi() {
}
-PropertyDetails PropertyDetails::AsDeleted() {
+PropertyDetails PropertyDetails::AsDeleted() const {
Smi* smi = Smi::FromInt(value_ | DeletedField::encode(1));
return PropertyDetails(smi);
}
@@ -116,6 +98,23 @@ PropertyDetails PropertyDetails::AsDeleted() {
WRITE_FIELD(this, offset, Smi::FromInt(value)); \
}
+#define SYNCHRONIZED_SMI_ACCESSORS(holder, name, offset) \
+ int holder::synchronized_##name() { \
+ Object* value = ACQUIRE_READ_FIELD(this, offset); \
+ return Smi::cast(value)->value(); \
+ } \
+ void holder::synchronized_set_##name(int value) { \
+ RELEASE_WRITE_FIELD(this, offset, Smi::FromInt(value)); \
+ }
+
+#define NOBARRIER_SMI_ACCESSORS(holder, name, offset) \
+ int holder::nobarrier_##name() { \
+ Object* value = NOBARRIER_READ_FIELD(this, offset); \
+ return Smi::cast(value)->value(); \
+ } \
+ void holder::nobarrier_set_##name(int value) { \
+ NOBARRIER_WRITE_FIELD(this, offset, Smi::FromInt(value)); \
+ }
#define BOOL_GETTER(holder, field, name, offset) \
bool holder::name() { \
@@ -133,7 +132,8 @@ PropertyDetails PropertyDetails::AsDeleted() {
bool Object::IsFixedArrayBase() {
- return IsFixedArray() || IsFixedDoubleArray() || IsConstantPoolArray();
+ return IsFixedArray() || IsFixedDoubleArray() || IsConstantPoolArray() ||
+ IsFixedTypedArrayBase() || IsExternalArray();
}
@@ -160,12 +160,6 @@ bool Object::IsHeapObject() {
}
-bool Object::NonFailureIsHeapObject() {
- ASSERT(!this->IsFailure());
- return (reinterpret_cast<intptr_t>(this) & kSmiTagMask) != 0;
-}
-
-
TYPE_CHECKER(HeapNumber, HEAP_NUMBER_TYPE)
TYPE_CHECKER(Symbol, SYMBOL_TYPE)
@@ -199,6 +193,11 @@ bool Object::IsSpecFunction() {
}
+bool Object::IsTemplateInfo() {
+ return IsObjectTemplateInfo() || IsFunctionTemplateInfo();
+}
+
+
bool Object::IsInternalizedString() {
if (!this->IsHeapObject()) return false;
uint32_t type = HeapObject::cast(this)->map()->instance_type();
@@ -259,20 +258,25 @@ bool Object::IsExternalTwoByteString() {
String::cast(this)->IsTwoByteRepresentation();
}
+
bool Object::HasValidElements() {
// Dictionary is covered under FixedArray.
- return IsFixedArray() || IsFixedDoubleArray() || IsExternalArray();
+ return IsFixedArray() || IsFixedDoubleArray() || IsExternalArray() ||
+ IsFixedTypedArrayBase();
}
-MaybeObject* Object::AllocateNewStorageFor(Heap* heap,
- Representation representation) {
- if (!FLAG_track_double_fields) return this;
- if (!representation.IsDouble()) return this;
- if (IsUninitialized()) {
- return heap->AllocateHeapNumber(0);
+Handle<Object> Object::NewStorageFor(Isolate* isolate,
+ Handle<Object> object,
+ Representation representation) {
+ if (representation.IsSmi() && object->IsUninitialized()) {
+ return handle(Smi::FromInt(0), isolate);
+ }
+ if (!representation.IsDouble()) return object;
+ if (object->IsUninitialized()) {
+ return isolate->factory()->NewHeapNumber(0);
}
- return heap->AllocateHeapNumber(Number());
+ return isolate->factory()->NewHeapNumber(object->Number());
}
@@ -397,10 +401,10 @@ uint32_t StringShape::full_representation_tag() {
}
-STATIC_CHECK((kStringRepresentationMask | kStringEncodingMask) ==
+STATIC_ASSERT((kStringRepresentationMask | kStringEncodingMask) ==
Internals::kFullStringRepresentationMask);
-STATIC_CHECK(static_cast<uint32_t>(kStringEncodingMask) ==
+STATIC_ASSERT(static_cast<uint32_t>(kStringEncodingMask) ==
Internals::kStringEncodingMask);
@@ -419,10 +423,10 @@ bool StringShape::IsExternalAscii() {
}
-STATIC_CHECK((kExternalStringTag | kOneByteStringTag) ==
+STATIC_ASSERT((kExternalStringTag | kOneByteStringTag) ==
Internals::kExternalAsciiRepresentationTag);
-STATIC_CHECK(v8::String::ASCII_ENCODING == kOneByteStringTag);
+STATIC_ASSERT(v8::String::ASCII_ENCODING == kOneByteStringTag);
bool StringShape::IsExternalTwoByte() {
@@ -430,10 +434,10 @@ bool StringShape::IsExternalTwoByte() {
}
-STATIC_CHECK((kExternalStringTag | kTwoByteStringTag) ==
+STATIC_ASSERT((kExternalStringTag | kTwoByteStringTag) ==
Internals::kExternalTwoByteRepresentationTag);
-STATIC_CHECK(v8::String::TWO_BYTE_ENCODING == kTwoByteStringTag);
+STATIC_ASSERT(v8::String::TWO_BYTE_ENCODING == kTwoByteStringTag);
uc32 FlatStringReader::Get(int index) {
ASSERT(0 <= index && index <= length_);
@@ -445,6 +449,162 @@ uc32 FlatStringReader::Get(int index) {
}
+Handle<Object> StringTableShape::AsHandle(Isolate* isolate, HashTableKey* key) {
+ return key->AsHandle(isolate);
+}
+
+
+Handle<Object> MapCacheShape::AsHandle(Isolate* isolate, HashTableKey* key) {
+ return key->AsHandle(isolate);
+}
+
+
+Handle<Object> CompilationCacheShape::AsHandle(Isolate* isolate,
+ HashTableKey* key) {
+ return key->AsHandle(isolate);
+}
+
+
+Handle<Object> CodeCacheHashTableShape::AsHandle(Isolate* isolate,
+ HashTableKey* key) {
+ return key->AsHandle(isolate);
+}
+
+template <typename Char>
+class SequentialStringKey : public HashTableKey {
+ public:
+ explicit SequentialStringKey(Vector<const Char> string, uint32_t seed)
+ : string_(string), hash_field_(0), seed_(seed) { }
+
+ virtual uint32_t Hash() V8_OVERRIDE {
+ hash_field_ = StringHasher::HashSequentialString<Char>(string_.start(),
+ string_.length(),
+ seed_);
+
+ uint32_t result = hash_field_ >> String::kHashShift;
+ ASSERT(result != 0); // Ensure that the hash value of 0 is never computed.
+ return result;
+ }
+
+
+ virtual uint32_t HashForObject(Object* other) V8_OVERRIDE {
+ return String::cast(other)->Hash();
+ }
+
+ Vector<const Char> string_;
+ uint32_t hash_field_;
+ uint32_t seed_;
+};
+
+
+class OneByteStringKey : public SequentialStringKey<uint8_t> {
+ public:
+ OneByteStringKey(Vector<const uint8_t> str, uint32_t seed)
+ : SequentialStringKey<uint8_t>(str, seed) { }
+
+ virtual bool IsMatch(Object* string) V8_OVERRIDE {
+ return String::cast(string)->IsOneByteEqualTo(string_);
+ }
+
+ virtual Handle<Object> AsHandle(Isolate* isolate) V8_OVERRIDE;
+};
+
+
+template<class Char>
+class SubStringKey : public HashTableKey {
+ public:
+ SubStringKey(Handle<String> string, int from, int length)
+ : string_(string), from_(from), length_(length) {
+ if (string_->IsSlicedString()) {
+ string_ = Handle<String>(Unslice(*string_, &from_));
+ }
+ ASSERT(string_->IsSeqString() || string->IsExternalString());
+ }
+
+ virtual uint32_t Hash() V8_OVERRIDE {
+ ASSERT(length_ >= 0);
+ ASSERT(from_ + length_ <= string_->length());
+ const Char* chars = GetChars() + from_;
+ hash_field_ = StringHasher::HashSequentialString(
+ chars, length_, string_->GetHeap()->HashSeed());
+ uint32_t result = hash_field_ >> String::kHashShift;
+ ASSERT(result != 0); // Ensure that the hash value of 0 is never computed.
+ return result;
+ }
+
+ virtual uint32_t HashForObject(Object* other) V8_OVERRIDE {
+ return String::cast(other)->Hash();
+ }
+
+ virtual bool IsMatch(Object* string) V8_OVERRIDE;
+ virtual Handle<Object> AsHandle(Isolate* isolate) V8_OVERRIDE;
+
+ private:
+ const Char* GetChars();
+ String* Unslice(String* string, int* offset) {
+ while (string->IsSlicedString()) {
+ SlicedString* sliced = SlicedString::cast(string);
+ *offset += sliced->offset();
+ string = sliced->parent();
+ }
+ return string;
+ }
+
+ Handle<String> string_;
+ int from_;
+ int length_;
+ uint32_t hash_field_;
+};
+
+
+class TwoByteStringKey : public SequentialStringKey<uc16> {
+ public:
+ explicit TwoByteStringKey(Vector<const uc16> str, uint32_t seed)
+ : SequentialStringKey<uc16>(str, seed) { }
+
+ virtual bool IsMatch(Object* string) V8_OVERRIDE {
+ return String::cast(string)->IsTwoByteEqualTo(string_);
+ }
+
+ virtual Handle<Object> AsHandle(Isolate* isolate) V8_OVERRIDE;
+};
+
+
+// Utf8StringKey carries a vector of chars as key.
+class Utf8StringKey : public HashTableKey {
+ public:
+ explicit Utf8StringKey(Vector<const char> string, uint32_t seed)
+ : string_(string), hash_field_(0), seed_(seed) { }
+
+ virtual bool IsMatch(Object* string) V8_OVERRIDE {
+ return String::cast(string)->IsUtf8EqualTo(string_);
+ }
+
+ virtual uint32_t Hash() V8_OVERRIDE {
+ if (hash_field_ != 0) return hash_field_ >> String::kHashShift;
+ hash_field_ = StringHasher::ComputeUtf8Hash(string_, seed_, &chars_);
+ uint32_t result = hash_field_ >> String::kHashShift;
+ ASSERT(result != 0); // Ensure that the hash value of 0 is never computed.
+ return result;
+ }
+
+ virtual uint32_t HashForObject(Object* other) V8_OVERRIDE {
+ return String::cast(other)->Hash();
+ }
+
+ virtual Handle<Object> AsHandle(Isolate* isolate) V8_OVERRIDE {
+ if (hash_field_ == 0) Hash();
+ return isolate->factory()->NewInternalizedStringFromUtf8(
+ string_, chars_, hash_field_);
+ }
+
+ Vector<const char> string_;
+ uint32_t hash_field_;
+ int chars_; // Caches the number of characters when computing the hash code.
+ uint32_t seed_;
+};
+
+
bool Object::IsNumber() {
return IsSmi() || IsHeapNumber();
}
@@ -461,9 +621,6 @@ bool Object::IsFiller() {
}
-TYPE_CHECKER(ExternalPixelArray, EXTERNAL_PIXEL_ARRAY_TYPE)
-
-
bool Object::IsExternalArray() {
if (!Object::IsHeapObject())
return false;
@@ -474,51 +631,21 @@ bool Object::IsExternalArray() {
}
-TYPE_CHECKER(ExternalByteArray, EXTERNAL_BYTE_ARRAY_TYPE)
-TYPE_CHECKER(ExternalUnsignedByteArray, EXTERNAL_UNSIGNED_BYTE_ARRAY_TYPE)
-TYPE_CHECKER(ExternalShortArray, EXTERNAL_SHORT_ARRAY_TYPE)
-TYPE_CHECKER(ExternalUnsignedShortArray, EXTERNAL_UNSIGNED_SHORT_ARRAY_TYPE)
-TYPE_CHECKER(ExternalIntArray, EXTERNAL_INT_ARRAY_TYPE)
-TYPE_CHECKER(ExternalUnsignedIntArray, EXTERNAL_UNSIGNED_INT_ARRAY_TYPE)
-TYPE_CHECKER(ExternalFloatArray, EXTERNAL_FLOAT_ARRAY_TYPE)
-TYPE_CHECKER(ExternalDoubleArray, EXTERNAL_DOUBLE_ARRAY_TYPE)
+#define TYPED_ARRAY_TYPE_CHECKER(Type, type, TYPE, ctype, size) \
+ TYPE_CHECKER(External##Type##Array, EXTERNAL_##TYPE##_ARRAY_TYPE) \
+ TYPE_CHECKER(Fixed##Type##Array, FIXED_##TYPE##_ARRAY_TYPE)
-
-bool MaybeObject::IsFailure() {
- return HAS_FAILURE_TAG(this);
-}
+TYPED_ARRAYS(TYPED_ARRAY_TYPE_CHECKER)
+#undef TYPED_ARRAY_TYPE_CHECKER
-bool MaybeObject::IsRetryAfterGC() {
- return HAS_FAILURE_TAG(this)
- && Failure::cast(this)->type() == Failure::RETRY_AFTER_GC;
-}
-
-
-bool MaybeObject::IsOutOfMemory() {
- return HAS_FAILURE_TAG(this)
- && Failure::cast(this)->IsOutOfMemoryException();
-}
-
-
-bool MaybeObject::IsException() {
- return this == Failure::Exception();
-}
-
-
-bool MaybeObject::IsTheHole() {
- return !IsFailure() && ToObjectUnchecked()->IsTheHole();
-}
-
-
-bool MaybeObject::IsUninitialized() {
- return !IsFailure() && ToObjectUnchecked()->IsUninitialized();
-}
-
+bool Object::IsFixedTypedArrayBase() {
+ if (!Object::IsHeapObject()) return false;
-Failure* Failure::cast(MaybeObject* obj) {
- ASSERT(HAS_FAILURE_TAG(obj));
- return reinterpret_cast<Failure*>(obj);
+ InstanceType instance_type =
+ HeapObject::cast(this)->map()->instance_type();
+ return (instance_type >= FIRST_FIXED_TYPED_ARRAY_TYPE &&
+ instance_type <= LAST_FIXED_TYPED_ARRAY_TYPE);
}
@@ -538,14 +665,15 @@ bool Object::IsJSObject() {
bool Object::IsJSProxy() {
if (!Object::IsHeapObject()) return false;
- InstanceType type = HeapObject::cast(this)->map()->instance_type();
- return FIRST_JS_PROXY_TYPE <= type && type <= LAST_JS_PROXY_TYPE;
+ return HeapObject::cast(this)->map()->IsJSProxyMap();
}
TYPE_CHECKER(JSFunctionProxy, JS_FUNCTION_PROXY_TYPE)
TYPE_CHECKER(JSSet, JS_SET_TYPE)
TYPE_CHECKER(JSMap, JS_MAP_TYPE)
+TYPE_CHECKER(JSSetIterator, JS_SET_ITERATOR_TYPE)
+TYPE_CHECKER(JSMapIterator, JS_MAP_ITERATOR_TYPE)
TYPE_CHECKER(JSWeakMap, JS_WEAK_MAP_TYPE)
TYPE_CHECKER(JSWeakSet, JS_WEAK_SET_TYPE)
TYPE_CHECKER(JSContextExtensionObject, JS_CONTEXT_EXTENSION_OBJECT_TYPE)
@@ -605,16 +733,6 @@ bool Object::IsDependentCode() {
}
-bool Object::IsTypeFeedbackCells() {
- if (!IsFixedArray()) return false;
- // There's actually no way to see the difference between a fixed array and
- // a cache cells array. Since this is used for asserts we can check that
- // the length is plausible though.
- if (FixedArray::cast(this)->length() % 2 != 0) return false;
- return true;
-}
-
-
bool Object::IsContext() {
if (!Object::IsHeapObject()) return false;
Map* map = HeapObject::cast(this)->map();
@@ -710,8 +828,7 @@ bool Object::IsDictionary() {
bool Object::IsStringTable() {
- return IsHashTable() &&
- this == HeapObject::cast(this)->GetHeap()->raw_unchecked_string_table();
+ return IsHashTable();
}
@@ -735,13 +852,23 @@ bool Object::IsJSFunctionResultCache() {
bool Object::IsNormalizedMapCache() {
- if (!IsFixedArray()) return false;
- if (FixedArray::cast(this)->length() != NormalizedMapCache::kEntries) {
+ return NormalizedMapCache::IsNormalizedMapCache(this);
+}
+
+
+int NormalizedMapCache::GetIndex(Handle<Map> map) {
+ return map->Hash() % NormalizedMapCache::kEntries;
+}
+
+
+bool NormalizedMapCache::IsNormalizedMapCache(Object* obj) {
+ if (!obj->IsFixedArray()) return false;
+ if (FixedArray::cast(obj)->length() != NormalizedMapCache::kEntries) {
return false;
}
#ifdef VERIFY_HEAP
if (FLAG_verify_heap) {
- reinterpret_cast<NormalizedMapCache*>(this)->NormalizedMapCacheVerify();
+ reinterpret_cast<NormalizedMapCache*>(obj)->NormalizedMapCacheVerify();
}
#endif
return true;
@@ -773,6 +900,13 @@ bool Object::IsObjectHashTable() {
}
+bool Object::IsOrderedHashTable() {
+ return IsHeapObject() &&
+ HeapObject::cast(this)->map() ==
+ HeapObject::cast(this)->GetHeap()->ordered_hash_table_map();
+}
+
+
bool Object::IsPrimitive() {
return IsOddball() || IsNumber() || IsString();
}
@@ -782,7 +916,8 @@ bool Object::IsJSGlobalProxy() {
bool result = IsHeapObject() &&
(HeapObject::cast(this)->map()->instance_type() ==
JS_GLOBAL_PROXY_TYPE);
- ASSERT(!result || IsAccessCheckNeeded());
+ ASSERT(!result ||
+ HeapObject::cast(this)->map()->is_access_check_needed());
return result;
}
@@ -807,8 +942,14 @@ bool Object::IsUndetectableObject() {
bool Object::IsAccessCheckNeeded() {
- return IsHeapObject()
- && HeapObject::cast(this)->map()->is_access_check_needed();
+ if (!IsHeapObject()) return false;
+ if (IsJSGlobalProxy()) {
+ JSGlobalProxy* proxy = JSGlobalProxy::cast(this);
+ GlobalObject* global =
+ proxy->GetIsolate()->context()->global_object();
+ return proxy->IsDetachedFrom(global);
+ }
+ return HeapObject::cast(this)->map()->is_access_check_needed();
}
@@ -847,6 +988,11 @@ bool Object::IsTheHole() {
}
+bool Object::IsException() {
+ return IsOddball() && Oddball::cast(this)->kind() == Oddball::kException;
+}
+
+
bool Object::IsUninitialized() {
return IsOddball() && Oddball::cast(this)->kind() == Oddball::kUninitialized;
}
@@ -880,16 +1026,23 @@ bool Object::IsNaN() {
}
-MaybeObject* Object::ToSmi() {
- if (IsSmi()) return this;
- if (IsHeapNumber()) {
- double value = HeapNumber::cast(this)->value();
+MaybeHandle<Smi> Object::ToSmi(Isolate* isolate, Handle<Object> object) {
+ if (object->IsSmi()) return Handle<Smi>::cast(object);
+ if (object->IsHeapNumber()) {
+ double value = Handle<HeapNumber>::cast(object)->value();
int int_value = FastD2I(value);
if (value == FastI2D(int_value) && Smi::IsValid(int_value)) {
- return Smi::FromInt(int_value);
+ return handle(Smi::FromInt(int_value), isolate);
}
}
- return Failure::Exception();
+ return Handle<Smi>();
+}
+
+
+MaybeHandle<JSReceiver> Object::ToObject(Isolate* isolate,
+ Handle<Object> object) {
+ return ToObject(
+ isolate, object, handle(isolate->context()->native_context(), isolate));
}
@@ -898,32 +1051,70 @@ bool Object::HasSpecificClassOf(String* name) {
}
-MaybeObject* Object::GetElement(Isolate* isolate, uint32_t index) {
+MaybeHandle<Object> Object::GetProperty(Handle<Object> object,
+ Handle<Name> name) {
+ LookupIterator it(object, name);
+ return GetProperty(&it);
+}
+
+
+MaybeHandle<Object> Object::GetElement(Isolate* isolate,
+ Handle<Object> object,
+ uint32_t index) {
// GetElement can trigger a getter which can cause allocation.
// This was not always the case. This ASSERT is here to catch
// leftover incorrect uses.
ASSERT(AllowHeapAllocation::IsAllowed());
- return GetElementWithReceiver(isolate, this, index);
+ return Object::GetElementWithReceiver(isolate, object, object, index);
}
-Object* Object::GetElementNoExceptionThrown(Isolate* isolate, uint32_t index) {
- MaybeObject* maybe = GetElementWithReceiver(isolate, this, index);
- ASSERT(!maybe->IsFailure());
- Object* result = NULL; // Initialization to please compiler.
- maybe->ToObject(&result);
- return result;
+MaybeHandle<Object> Object::GetPropertyOrElement(Handle<Object> object,
+ Handle<Name> name) {
+ uint32_t index;
+ Isolate* isolate = name->GetIsolate();
+ if (name->AsArrayIndex(&index)) return GetElement(isolate, object, index);
+ return GetProperty(object, name);
}
-MaybeObject* Object::GetProperty(Name* key) {
- PropertyAttributes attributes;
- return GetPropertyWithReceiver(this, key, &attributes);
+MaybeHandle<Object> Object::GetProperty(Isolate* isolate,
+ Handle<Object> object,
+ const char* name) {
+ Handle<String> str = isolate->factory()->InternalizeUtf8String(name);
+ ASSERT(!str.is_null());
+#ifdef DEBUG
+ uint32_t index; // Assert that the name is not an array index.
+ ASSERT(!str->AsArrayIndex(&index));
+#endif // DEBUG
+ return GetProperty(object, str);
}
-MaybeObject* Object::GetProperty(Name* key, PropertyAttributes* attributes) {
- return GetPropertyWithReceiver(this, key, attributes);
+MaybeHandle<Object> JSProxy::GetElementWithHandler(Handle<JSProxy> proxy,
+ Handle<Object> receiver,
+ uint32_t index) {
+ return GetPropertyWithHandler(
+ proxy, receiver, proxy->GetIsolate()->factory()->Uint32ToString(index));
+}
+
+
+MaybeHandle<Object> JSProxy::SetElementWithHandler(Handle<JSProxy> proxy,
+ Handle<JSReceiver> receiver,
+ uint32_t index,
+ Handle<Object> value,
+ StrictMode strict_mode) {
+ Isolate* isolate = proxy->GetIsolate();
+ Handle<String> name = isolate->factory()->Uint32ToString(index);
+ return SetPropertyWithHandler(
+ proxy, receiver, name, value, NONE, strict_mode);
+}
+
+
+bool JSProxy::HasElementWithHandler(Handle<JSProxy> proxy, uint32_t index) {
+ Isolate* isolate = proxy->GetIsolate();
+ Handle<String> name = isolate->factory()->Uint32ToString(index);
+ return HasPropertyWithHandler(proxy, name);
}
@@ -933,9 +1124,27 @@ MaybeObject* Object::GetProperty(Name* key, PropertyAttributes* attributes) {
#define READ_FIELD(p, offset) \
(*reinterpret_cast<Object**>(FIELD_ADDR(p, offset)))
+#define ACQUIRE_READ_FIELD(p, offset) \
+ reinterpret_cast<Object*>(base::Acquire_Load( \
+ reinterpret_cast<base::AtomicWord*>(FIELD_ADDR(p, offset))))
+
+#define NOBARRIER_READ_FIELD(p, offset) \
+ reinterpret_cast<Object*>(base::NoBarrier_Load( \
+ reinterpret_cast<base::AtomicWord*>(FIELD_ADDR(p, offset))))
+
#define WRITE_FIELD(p, offset, value) \
(*reinterpret_cast<Object**>(FIELD_ADDR(p, offset)) = value)
+#define RELEASE_WRITE_FIELD(p, offset, value) \
+ base::Release_Store( \
+ reinterpret_cast<base::AtomicWord*>(FIELD_ADDR(p, offset)), \
+ reinterpret_cast<base::AtomicWord>(value));
+
+#define NOBARRIER_WRITE_FIELD(p, offset, value) \
+ base::NoBarrier_Store( \
+ reinterpret_cast<base::AtomicWord*>(FIELD_ADDR(p, offset)), \
+ reinterpret_cast<base::AtomicWord>(value));
+
#define WRITE_BARRIER(heap, object, offset, value) \
heap->incremental_marking()->RecordWrite( \
object, HeapObject::RawField(object, offset), value); \
@@ -1030,9 +1239,17 @@ MaybeObject* Object::GetProperty(Name* key, PropertyAttributes* attributes) {
#define READ_BYTE_FIELD(p, offset) \
(*reinterpret_cast<byte*>(FIELD_ADDR(p, offset)))
+#define NOBARRIER_READ_BYTE_FIELD(p, offset) \
+ static_cast<byte>(base::NoBarrier_Load( \
+ reinterpret_cast<base::Atomic8*>(FIELD_ADDR(p, offset))))
+
#define WRITE_BYTE_FIELD(p, offset, value) \
(*reinterpret_cast<byte*>(FIELD_ADDR(p, offset)) = value)
+#define NOBARRIER_WRITE_BYTE_FIELD(p, offset, value) \
+ base::NoBarrier_Store( \
+ reinterpret_cast<base::Atomic8*>(FIELD_ADDR(p, offset)), \
+ static_cast<base::Atomic8>(value));
Object** HeapObject::RawField(HeapObject* obj, int byte_offset) {
return &READ_FIELD(obj, byte_offset);
@@ -1057,72 +1274,6 @@ Smi* Smi::FromIntptr(intptr_t value) {
}
-Failure::Type Failure::type() const {
- return static_cast<Type>(value() & kFailureTypeTagMask);
-}
-
-
-bool Failure::IsInternalError() const {
- return type() == INTERNAL_ERROR;
-}
-
-
-bool Failure::IsOutOfMemoryException() const {
- return type() == OUT_OF_MEMORY_EXCEPTION;
-}
-
-
-AllocationSpace Failure::allocation_space() const {
- ASSERT_EQ(RETRY_AFTER_GC, type());
- return static_cast<AllocationSpace>((value() >> kFailureTypeTagSize)
- & kSpaceTagMask);
-}
-
-
-Failure* Failure::InternalError() {
- return Construct(INTERNAL_ERROR);
-}
-
-
-Failure* Failure::Exception() {
- return Construct(EXCEPTION);
-}
-
-
-Failure* Failure::OutOfMemoryException(intptr_t value) {
- return Construct(OUT_OF_MEMORY_EXCEPTION, value);
-}
-
-
-intptr_t Failure::value() const {
- return static_cast<intptr_t>(
- reinterpret_cast<uintptr_t>(this) >> kFailureTagSize);
-}
-
-
-Failure* Failure::RetryAfterGC() {
- return RetryAfterGC(NEW_SPACE);
-}
-
-
-Failure* Failure::RetryAfterGC(AllocationSpace space) {
- ASSERT((space & ~kSpaceTagMask) == 0);
- return Construct(RETRY_AFTER_GC, space);
-}
-
-
-Failure* Failure::Construct(Type type, intptr_t value) {
- uintptr_t info =
- (static_cast<uintptr_t>(value) << kFailureTypeTagSize) | type;
- ASSERT(((info << kFailureTagSize) >> kFailureTagSize) == info);
- // Fill the unused bits with a pattern that's easy to recognize in crash
- // dumps.
- static const int kFailureMagicPattern = 0x0BAD0000;
- return reinterpret_cast<Failure*>(
- (info << kFailureTagSize) | kFailureTag | kFailureMagicPattern);
-}
-
-
bool Smi::IsValid(intptr_t value) {
bool result = Internals::IsValidSmi(value);
ASSERT_EQ(result, value >= kMinValue && value <= kMaxValue);
@@ -1182,7 +1333,14 @@ Isolate* HeapObject::GetIsolate() {
Map* HeapObject::map() {
+#ifdef DEBUG
+ // Clear mark potentially added by PathTracer.
+ uintptr_t raw_value =
+ map_word().ToRawValue() & ~static_cast<uintptr_t>(PathTracer::kMarkTag);
+ return MapWord::FromRawValue(raw_value).ToMap();
+#else
return map_word().ToMap();
+#endif
}
@@ -1196,6 +1354,26 @@ void HeapObject::set_map(Map* value) {
}
+Map* HeapObject::synchronized_map() {
+ return synchronized_map_word().ToMap();
+}
+
+
+void HeapObject::synchronized_set_map(Map* value) {
+ synchronized_set_map_word(MapWord::FromMap(value));
+ if (value != NULL) {
+ // TODO(1600) We are passing NULL as a slot because maps can never be on
+ // evacuation candidate.
+ value->GetHeap()->incremental_marking()->RecordWrite(this, NULL, value);
+ }
+}
+
+
+void HeapObject::synchronized_set_map_no_write_barrier(Map* value) {
+ synchronized_set_map_word(MapWord::FromMap(value));
+}
+
+
// Unsafe accessor omitting write barrier.
void HeapObject::set_map_no_write_barrier(Map* value) {
set_map_word(MapWord::FromMap(value));
@@ -1203,14 +1381,26 @@ void HeapObject::set_map_no_write_barrier(Map* value) {
MapWord HeapObject::map_word() {
- return MapWord(reinterpret_cast<uintptr_t>(READ_FIELD(this, kMapOffset)));
+ return MapWord(
+ reinterpret_cast<uintptr_t>(NOBARRIER_READ_FIELD(this, kMapOffset)));
}
void HeapObject::set_map_word(MapWord map_word) {
- // WRITE_FIELD does not invoke write barrier, but there is no need
- // here.
- WRITE_FIELD(this, kMapOffset, reinterpret_cast<Object*>(map_word.value_));
+ NOBARRIER_WRITE_FIELD(
+ this, kMapOffset, reinterpret_cast<Object*>(map_word.value_));
+}
+
+
+MapWord HeapObject::synchronized_map_word() {
+ return MapWord(
+ reinterpret_cast<uintptr_t>(ACQUIRE_READ_FIELD(this, kMapOffset)));
+}
+
+
+void HeapObject::synchronized_set_map_word(MapWord map_word) {
+ RELEASE_WRITE_FIELD(
+ this, kMapOffset, reinterpret_cast<Object*>(map_word.value_));
}
@@ -1241,6 +1431,11 @@ void HeapObject::IteratePointer(ObjectVisitor* v, int offset) {
}
+void HeapObject::IterateNextCodeLink(ObjectVisitor* v, int offset) {
+ v->VisitNextCodeLink(reinterpret_cast<Object**>(FIELD_ADDR(this, offset)));
+}
+
+
double HeapNumber::value() {
return READ_DOUBLE_FIELD(this, kValueOffset);
}
@@ -1287,36 +1482,22 @@ FixedArrayBase* JSObject::elements() {
}
-void JSObject::ValidateElements() {
+void JSObject::ValidateElements(Handle<JSObject> object) {
#ifdef ENABLE_SLOW_ASSERTS
if (FLAG_enable_slow_asserts) {
- ElementsAccessor* accessor = GetElementsAccessor();
- accessor->Validate(this);
+ ElementsAccessor* accessor = object->GetElementsAccessor();
+ accessor->Validate(object);
}
#endif
}
-bool JSObject::ShouldTrackAllocationInfo() {
- if (AllocationSite::CanTrack(map()->instance_type())) {
- if (!IsJSArray()) {
- return true;
- }
-
- return AllocationSite::GetMode(GetElementsKind()) ==
- TRACK_ALLOCATION_SITE;
- }
- return false;
-}
-
-
void AllocationSite::Initialize() {
set_transition_info(Smi::FromInt(0));
SetElementsKind(GetInitialFastElementsKind());
set_nested_site(Smi::FromInt(0));
- set_memento_create_count(Smi::FromInt(0));
- set_memento_found_count(Smi::FromInt(0));
- set_pretenure_decision(Smi::FromInt(0));
+ set_pretenure_data(Smi::FromInt(0));
+ set_pretenure_create_count(Smi::FromInt(0));
set_dependent_code(DependentCode::cast(GetHeap()->empty_fixed_array()),
SKIP_WRITE_BARRIER);
}
@@ -1324,11 +1505,8 @@ void AllocationSite::Initialize() {
void AllocationSite::MarkZombie() {
ASSERT(!IsZombie());
- set_pretenure_decision(Smi::FromInt(kZombie));
- // Clear all non-smi fields
- set_transition_info(Smi::FromInt(0));
- set_dependent_code(DependentCode::cast(GetHeap()->empty_fixed_array()),
- SKIP_WRITE_BARRIER);
+ Initialize();
+ set_pretenure_decision(kZombie);
}
@@ -1336,7 +1514,7 @@ void AllocationSite::MarkZombie() {
// elements kind is the initial elements kind.
AllocationSiteMode AllocationSite::GetMode(
ElementsKind boilerplate_elements_kind) {
- if (FLAG_track_allocation_sites &&
+ if (FLAG_pretenuring_call_new ||
IsFastSmiElementsKind(boilerplate_elements_kind)) {
return TRACK_ALLOCATION_SITE;
}
@@ -1347,9 +1525,9 @@ AllocationSiteMode AllocationSite::GetMode(
AllocationSiteMode AllocationSite::GetMode(ElementsKind from,
ElementsKind to) {
- if (FLAG_track_allocation_sites &&
- IsFastSmiElementsKind(from) &&
- IsMoreGeneralElementsKindTransition(from, to)) {
+ if (FLAG_pretenuring_call_new ||
+ (IsFastSmiElementsKind(from) &&
+ IsMoreGeneralElementsKindTransition(from, to))) {
return TRACK_ALLOCATION_SITE;
}
@@ -1359,7 +1537,9 @@ AllocationSiteMode AllocationSite::GetMode(ElementsKind from,
inline bool AllocationSite::CanTrack(InstanceType type) {
if (FLAG_allocation_site_pretenuring) {
- return type == JS_ARRAY_TYPE || type == JS_OBJECT_TYPE;
+ return type == JS_ARRAY_TYPE ||
+ type == JS_OBJECT_TYPE ||
+ type < FIRST_NONSTRING_TYPE;
}
return type == JS_ARRAY_TYPE;
}
@@ -1380,47 +1560,94 @@ inline DependentCode::DependencyGroup AllocationSite::ToDependencyGroup(
}
-inline void AllocationSite::IncrementMementoFoundCount() {
- int value = memento_found_count()->value();
- set_memento_found_count(Smi::FromInt(value + 1));
+inline void AllocationSite::set_memento_found_count(int count) {
+ int value = pretenure_data()->value();
+ // Verify that we can count more mementos than we can possibly find in one
+ // new space collection.
+ ASSERT((GetHeap()->MaxSemiSpaceSize() /
+ (StaticVisitorBase::kMinObjectSizeInWords * kPointerSize +
+ AllocationMemento::kSize)) < MementoFoundCountBits::kMax);
+ ASSERT(count < MementoFoundCountBits::kMax);
+ set_pretenure_data(
+ Smi::FromInt(MementoFoundCountBits::update(value, count)),
+ SKIP_WRITE_BARRIER);
}
+inline bool AllocationSite::IncrementMementoFoundCount() {
+ if (IsZombie()) return false;
-inline void AllocationSite::IncrementMementoCreateCount() {
- ASSERT(FLAG_allocation_site_pretenuring);
- int value = memento_create_count()->value();
- set_memento_create_count(Smi::FromInt(value + 1));
+ int value = memento_found_count();
+ set_memento_found_count(value + 1);
+ return memento_found_count() == kPretenureMinimumCreated;
}
-inline bool AllocationSite::DigestPretenuringFeedback() {
- bool decision_made = false;
- if (!PretenuringDecisionMade()) {
- int create_count = memento_create_count()->value();
- if (create_count >= kPretenureMinimumCreated) {
- int found_count = memento_found_count()->value();
- double ratio = static_cast<double>(found_count) / create_count;
- if (FLAG_trace_track_allocation_sites) {
- PrintF("AllocationSite: %p (created, found, ratio) (%d, %d, %f)\n",
- static_cast<void*>(this), create_count, found_count, ratio);
+inline void AllocationSite::IncrementMementoCreateCount() {
+ ASSERT(FLAG_allocation_site_pretenuring);
+ int value = memento_create_count();
+ set_memento_create_count(value + 1);
+}
+
+
+inline bool AllocationSite::MakePretenureDecision(
+ PretenureDecision current_decision,
+ double ratio,
+ bool maximum_size_scavenge) {
+ // Here we just allow state transitions from undecided or maybe tenure
+ // to don't tenure, maybe tenure, or tenure.
+ if ((current_decision == kUndecided || current_decision == kMaybeTenure)) {
+ if (ratio >= kPretenureRatio) {
+ // We just transition into tenure state when the semi-space was at
+ // maximum capacity.
+ if (maximum_size_scavenge) {
+ set_deopt_dependent_code(true);
+ set_pretenure_decision(kTenure);
+ // Currently we just need to deopt when we make a state transition to
+ // tenure.
+ return true;
}
- int result = ratio >= kPretenureRatio ? kTenure : kDontTenure;
- set_pretenure_decision(Smi::FromInt(result));
- decision_made = true;
- // TODO(mvstanton): if the decision represents a change, any dependent
- // code registered for pretenuring changes should be deopted.
+ set_pretenure_decision(kMaybeTenure);
+ } else {
+ set_pretenure_decision(kDontTenure);
}
}
+ return false;
+}
+
+
+inline bool AllocationSite::DigestPretenuringFeedback(
+ bool maximum_size_scavenge) {
+ bool deopt = false;
+ int create_count = memento_create_count();
+ int found_count = memento_found_count();
+ bool minimum_mementos_created = create_count >= kPretenureMinimumCreated;
+ double ratio =
+ minimum_mementos_created || FLAG_trace_pretenuring_statistics ?
+ static_cast<double>(found_count) / create_count : 0.0;
+ PretenureDecision current_decision = pretenure_decision();
+
+ if (minimum_mementos_created) {
+ deopt = MakePretenureDecision(
+ current_decision, ratio, maximum_size_scavenge);
+ }
+
+ if (FLAG_trace_pretenuring_statistics) {
+ PrintF(
+ "AllocationSite(%p): (created, found, ratio) (%d, %d, %f) %s => %s\n",
+ static_cast<void*>(this), create_count, found_count, ratio,
+ PretenureDecisionName(current_decision),
+ PretenureDecisionName(pretenure_decision()));
+ }
// Clear feedback calculation fields until the next gc.
- set_memento_found_count(Smi::FromInt(0));
- set_memento_create_count(Smi::FromInt(0));
- return decision_made;
+ set_memento_found_count(0);
+ set_memento_create_count(0);
+ return deopt;
}
void JSObject::EnsureCanContainHeapObjectElements(Handle<JSObject> object) {
- object->ValidateElements();
+ JSObject::ValidateElements(object);
ElementsKind elements_kind = object->map()->elements_kind();
if (!IsFastObjectElementsKind(elements_kind)) {
if (IsFastHoleyElementsKind(elements_kind)) {
@@ -1432,126 +1659,103 @@ void JSObject::EnsureCanContainHeapObjectElements(Handle<JSObject> object) {
}
-MaybeObject* JSObject::EnsureCanContainElements(Object** objects,
- uint32_t count,
- EnsureElementsMode mode) {
- ElementsKind current_kind = map()->elements_kind();
+void JSObject::EnsureCanContainElements(Handle<JSObject> object,
+ Object** objects,
+ uint32_t count,
+ EnsureElementsMode mode) {
+ ElementsKind current_kind = object->map()->elements_kind();
ElementsKind target_kind = current_kind;
- ASSERT(mode != ALLOW_COPIED_DOUBLE_ELEMENTS);
- bool is_holey = IsFastHoleyElementsKind(current_kind);
- if (current_kind == FAST_HOLEY_ELEMENTS) return this;
- Heap* heap = GetHeap();
- Object* the_hole = heap->the_hole_value();
- for (uint32_t i = 0; i < count; ++i) {
- Object* current = *objects++;
- if (current == the_hole) {
- is_holey = true;
- target_kind = GetHoleyElementsKind(target_kind);
- } else if (!current->IsSmi()) {
- if (mode == ALLOW_CONVERTED_DOUBLE_ELEMENTS && current->IsNumber()) {
- if (IsFastSmiElementsKind(target_kind)) {
- if (is_holey) {
- target_kind = FAST_HOLEY_DOUBLE_ELEMENTS;
- } else {
- target_kind = FAST_DOUBLE_ELEMENTS;
+ {
+ DisallowHeapAllocation no_allocation;
+ ASSERT(mode != ALLOW_COPIED_DOUBLE_ELEMENTS);
+ bool is_holey = IsFastHoleyElementsKind(current_kind);
+ if (current_kind == FAST_HOLEY_ELEMENTS) return;
+ Heap* heap = object->GetHeap();
+ Object* the_hole = heap->the_hole_value();
+ for (uint32_t i = 0; i < count; ++i) {
+ Object* current = *objects++;
+ if (current == the_hole) {
+ is_holey = true;
+ target_kind = GetHoleyElementsKind(target_kind);
+ } else if (!current->IsSmi()) {
+ if (mode == ALLOW_CONVERTED_DOUBLE_ELEMENTS && current->IsNumber()) {
+ if (IsFastSmiElementsKind(target_kind)) {
+ if (is_holey) {
+ target_kind = FAST_HOLEY_DOUBLE_ELEMENTS;
+ } else {
+ target_kind = FAST_DOUBLE_ELEMENTS;
+ }
}
+ } else if (is_holey) {
+ target_kind = FAST_HOLEY_ELEMENTS;
+ break;
+ } else {
+ target_kind = FAST_ELEMENTS;
}
- } else if (is_holey) {
- target_kind = FAST_HOLEY_ELEMENTS;
- break;
- } else {
- target_kind = FAST_ELEMENTS;
}
}
}
-
if (target_kind != current_kind) {
- return TransitionElementsKind(target_kind);
+ TransitionElementsKind(object, target_kind);
}
- return this;
}
-MaybeObject* JSObject::EnsureCanContainElements(FixedArrayBase* elements,
- uint32_t length,
- EnsureElementsMode mode) {
- if (elements->map() != GetHeap()->fixed_double_array_map()) {
- ASSERT(elements->map() == GetHeap()->fixed_array_map() ||
- elements->map() == GetHeap()->fixed_cow_array_map());
+void JSObject::EnsureCanContainElements(Handle<JSObject> object,
+ Handle<FixedArrayBase> elements,
+ uint32_t length,
+ EnsureElementsMode mode) {
+ Heap* heap = object->GetHeap();
+ if (elements->map() != heap->fixed_double_array_map()) {
+ ASSERT(elements->map() == heap->fixed_array_map() ||
+ elements->map() == heap->fixed_cow_array_map());
if (mode == ALLOW_COPIED_DOUBLE_ELEMENTS) {
mode = DONT_ALLOW_DOUBLE_ELEMENTS;
}
- Object** objects = FixedArray::cast(elements)->GetFirstElementAddress();
- return EnsureCanContainElements(objects, length, mode);
+ Object** objects =
+ Handle<FixedArray>::cast(elements)->GetFirstElementAddress();
+ EnsureCanContainElements(object, objects, length, mode);
+ return;
}
ASSERT(mode == ALLOW_COPIED_DOUBLE_ELEMENTS);
- if (GetElementsKind() == FAST_HOLEY_SMI_ELEMENTS) {
- return TransitionElementsKind(FAST_HOLEY_DOUBLE_ELEMENTS);
- } else if (GetElementsKind() == FAST_SMI_ELEMENTS) {
- FixedDoubleArray* double_array = FixedDoubleArray::cast(elements);
+ if (object->GetElementsKind() == FAST_HOLEY_SMI_ELEMENTS) {
+ TransitionElementsKind(object, FAST_HOLEY_DOUBLE_ELEMENTS);
+ } else if (object->GetElementsKind() == FAST_SMI_ELEMENTS) {
+ Handle<FixedDoubleArray> double_array =
+ Handle<FixedDoubleArray>::cast(elements);
for (uint32_t i = 0; i < length; ++i) {
if (double_array->is_the_hole(i)) {
- return TransitionElementsKind(FAST_HOLEY_DOUBLE_ELEMENTS);
+ TransitionElementsKind(object, FAST_HOLEY_DOUBLE_ELEMENTS);
+ return;
}
}
- return TransitionElementsKind(FAST_DOUBLE_ELEMENTS);
+ TransitionElementsKind(object, FAST_DOUBLE_ELEMENTS);
}
-
- return this;
}
-MaybeObject* JSObject::GetElementsTransitionMap(Isolate* isolate,
- ElementsKind to_kind) {
- Map* current_map = map();
- ElementsKind from_kind = current_map->elements_kind();
- if (from_kind == to_kind) return current_map;
-
- Context* native_context = isolate->context()->native_context();
- Object* maybe_array_maps = native_context->js_array_maps();
- if (maybe_array_maps->IsFixedArray()) {
- FixedArray* array_maps = FixedArray::cast(maybe_array_maps);
- if (array_maps->get(from_kind) == current_map) {
- Object* maybe_transitioned_map = array_maps->get(to_kind);
- if (maybe_transitioned_map->IsMap()) {
- return Map::cast(maybe_transitioned_map);
- }
- }
- }
-
- return GetElementsTransitionMapSlow(to_kind);
+void JSObject::SetMapAndElements(Handle<JSObject> object,
+ Handle<Map> new_map,
+ Handle<FixedArrayBase> value) {
+ JSObject::MigrateToMap(object, new_map);
+ ASSERT((object->map()->has_fast_smi_or_object_elements() ||
+ (*value == object->GetHeap()->empty_fixed_array())) ==
+ (value->map() == object->GetHeap()->fixed_array_map() ||
+ value->map() == object->GetHeap()->fixed_cow_array_map()));
+ ASSERT((*value == object->GetHeap()->empty_fixed_array()) ||
+ (object->map()->has_fast_double_elements() ==
+ value->IsFixedDoubleArray()));
+ object->set_elements(*value);
}
-void JSObject::set_map_and_elements(Map* new_map,
- FixedArrayBase* value,
- WriteBarrierMode mode) {
- ASSERT(value->HasValidElements());
- if (new_map != NULL) {
- if (mode == UPDATE_WRITE_BARRIER) {
- set_map(new_map);
- } else {
- ASSERT(mode == SKIP_WRITE_BARRIER);
- set_map_no_write_barrier(new_map);
- }
- }
- ASSERT((map()->has_fast_smi_or_object_elements() ||
- (value == GetHeap()->empty_fixed_array())) ==
- (value->map() == GetHeap()->fixed_array_map() ||
- value->map() == GetHeap()->fixed_cow_array_map()));
- ASSERT((value == GetHeap()->empty_fixed_array()) ||
- (map()->has_fast_double_elements() == value->IsFixedDoubleArray()));
+void JSObject::set_elements(FixedArrayBase* value, WriteBarrierMode mode) {
WRITE_FIELD(this, kElementsOffset, value);
CONDITIONAL_WRITE_BARRIER(GetHeap(), this, kElementsOffset, value, mode);
}
-void JSObject::set_elements(FixedArrayBase* value, WriteBarrierMode mode) {
- set_map_and_elements(NULL, value, mode);
-}
-
-
void JSObject::initialize_properties() {
ASSERT(!GetHeap()->InNewSpace(GetHeap()->empty_fixed_array()));
WRITE_FIELD(this, kPropertiesOffset, GetHeap()->empty_fixed_array());
@@ -1559,45 +1763,8 @@ void JSObject::initialize_properties() {
void JSObject::initialize_elements() {
- if (map()->has_fast_smi_or_object_elements() ||
- map()->has_fast_double_elements()) {
- ASSERT(!GetHeap()->InNewSpace(GetHeap()->empty_fixed_array()));
- WRITE_FIELD(this, kElementsOffset, GetHeap()->empty_fixed_array());
- } else if (map()->has_external_array_elements()) {
- ExternalArray* empty_array = GetHeap()->EmptyExternalArrayForMap(map());
- ASSERT(!GetHeap()->InNewSpace(empty_array));
- WRITE_FIELD(this, kElementsOffset, empty_array);
- } else {
- UNREACHABLE();
- }
-}
-
-
-MaybeObject* JSObject::ResetElements() {
- if (map()->is_observed()) {
- // Maintain invariant that observed elements are always in dictionary mode.
- SeededNumberDictionary* dictionary;
- MaybeObject* maybe = SeededNumberDictionary::Allocate(GetHeap(), 0);
- if (!maybe->To(&dictionary)) return maybe;
- if (map() == GetHeap()->non_strict_arguments_elements_map()) {
- FixedArray::cast(elements())->set(1, dictionary);
- } else {
- set_elements(dictionary);
- }
- return this;
- }
-
- ElementsKind elements_kind = GetInitialFastElementsKind();
- if (!FLAG_smi_only_arrays) {
- elements_kind = FastSmiToObjectElementsKind(elements_kind);
- }
- MaybeObject* maybe = GetElementsTransitionMap(GetIsolate(), elements_kind);
- Map* map;
- if (!maybe->To(&map)) return maybe;
- set_map(map);
- initialize_elements();
-
- return this;
+ FixedArrayBase* elements = map()->GetInitialElements();
+ WRITE_FIELD(this, kElementsOffset, elements);
}
@@ -1708,6 +1875,10 @@ int JSObject::GetHeaderSize() {
return JSSet::kSize;
case JS_MAP_TYPE:
return JSMap::kSize;
+ case JS_SET_ITERATOR_TYPE:
+ return JSSetIterator::kSize;
+ case JS_MAP_ITERATOR_TYPE:
+ return JSMapIterator::kSize;
case JS_WEAK_MAP_TYPE:
return JSWeakMap::kSize;
case JS_WEAK_SET_TYPE:
@@ -1772,56 +1943,36 @@ void JSObject::SetInternalField(int index, Smi* value) {
}
-MaybeObject* JSObject::FastPropertyAt(Representation representation,
- int index) {
- Object* raw_value = RawFastPropertyAt(index);
- return raw_value->AllocateNewStorageFor(GetHeap(), representation);
-}
-
-
// Access fast-case object properties at index. The use of these routines
// is needed to correctly distinguish between properties stored in-object and
// properties stored in the properties array.
-Object* JSObject::RawFastPropertyAt(int index) {
- // Adjust for the number of properties stored in the object.
- index -= map()->inobject_properties();
- if (index < 0) {
- int offset = map()->instance_size() + (index * kPointerSize);
- return READ_FIELD(this, offset);
+Object* JSObject::RawFastPropertyAt(FieldIndex index) {
+ if (index.is_inobject()) {
+ return READ_FIELD(this, index.offset());
} else {
- ASSERT(index < properties()->length());
- return properties()->get(index);
+ return properties()->get(index.outobject_array_index());
}
}
-void JSObject::FastPropertyAtPut(int index, Object* value) {
- // Adjust for the number of properties stored in the object.
- index -= map()->inobject_properties();
- if (index < 0) {
- int offset = map()->instance_size() + (index * kPointerSize);
+void JSObject::FastPropertyAtPut(FieldIndex index, Object* value) {
+ if (index.is_inobject()) {
+ int offset = index.offset();
WRITE_FIELD(this, offset, value);
WRITE_BARRIER(GetHeap(), this, offset, value);
} else {
- ASSERT(index < properties()->length());
- properties()->set(index, value);
+ properties()->set(index.outobject_array_index(), value);
}
}
int JSObject::GetInObjectPropertyOffset(int index) {
- // Adjust for the number of properties stored in the object.
- index -= map()->inobject_properties();
- ASSERT(index < 0);
- return map()->instance_size() + (index * kPointerSize);
+ return map()->GetInObjectPropertyOffset(index);
}
Object* JSObject::InObjectPropertyAt(int index) {
- // Adjust for the number of properties stored in the object.
- index -= map()->inobject_properties();
- ASSERT(index < 0);
- int offset = map()->instance_size() + (index * kPointerSize);
+ int offset = GetInObjectPropertyOffset(index);
return READ_FIELD(this, offset);
}
@@ -1830,9 +1981,7 @@ Object* JSObject::InObjectPropertyAtPut(int index,
Object* value,
WriteBarrierMode mode) {
// Adjust for the number of properties stored in the object.
- index -= map()->inobject_properties();
- ASSERT(index < 0);
- int offset = map()->instance_size() + (index * kPointerSize);
+ int offset = GetInObjectPropertyOffset(index);
WRITE_FIELD(this, offset, value);
CONDITIONAL_WRITE_BARRIER(GetHeap(), this, offset, value, mode);
return value;
@@ -1930,11 +2079,11 @@ bool Object::IsStringObjectWithCharacterAt(uint32_t index) {
}
-
void Object::VerifyApiCallResultType() {
#if ENABLE_EXTRA_CHECKS
if (!(IsSmi() ||
IsString() ||
+ IsSymbol() ||
IsSpecObject() ||
IsHeapNumber() ||
IsUndefined() ||
@@ -1948,8 +2097,7 @@ void Object::VerifyApiCallResultType() {
FixedArrayBase* FixedArrayBase::cast(Object* object) {
- ASSERT(object->IsFixedArray() || object->IsFixedDoubleArray() ||
- object->IsConstantPoolArray());
+ ASSERT(object->IsFixedArrayBase());
return reinterpret_cast<FixedArrayBase*>(object);
}
@@ -1960,6 +2108,11 @@ Object* FixedArray::get(int index) {
}
+Handle<Object> FixedArray::get(Handle<FixedArray> array, int index) {
+ return handle(array->get(index), array->GetIsolate());
+}
+
+
bool FixedArray::is_the_hole(int index) {
return get(index) == GetHeap()->the_hole_value();
}
@@ -2016,11 +2169,13 @@ int64_t FixedDoubleArray::get_representation(int index) {
return READ_INT64_FIELD(this, kHeaderSize + index * kDoubleSize);
}
-MaybeObject* FixedDoubleArray::get(int index) {
- if (is_the_hole(index)) {
- return GetHeap()->the_hole_value();
+
+Handle<Object> FixedDoubleArray::get(Handle<FixedDoubleArray> array,
+ int index) {
+ if (array->is_the_hole(index)) {
+ return array->GetIsolate()->factory()->the_hole_value();
} else {
- return GetHeap()->NumberFromDouble(get_scalar(index));
+ return array->GetIsolate()->factory()->NewNumber(array->get_scalar(index));
}
}
@@ -2048,79 +2203,173 @@ bool FixedDoubleArray::is_the_hole(int index) {
}
-SMI_ACCESSORS(ConstantPoolArray, first_ptr_index, kFirstPointerIndexOffset)
-SMI_ACCESSORS(ConstantPoolArray, first_int32_index, kFirstInt32IndexOffset)
+double* FixedDoubleArray::data_start() {
+ return reinterpret_cast<double*>(FIELD_ADDR(this, kHeaderSize));
+}
+
+
+void FixedDoubleArray::FillWithHoles(int from, int to) {
+ for (int i = from; i < to; i++) {
+ set_the_hole(i);
+ }
+}
-int ConstantPoolArray::first_int64_index() {
- return 0;
+bool ConstantPoolArray::is_extended_layout() {
+ uint32_t small_layout_1 = READ_UINT32_FIELD(this, kSmallLayout1Offset);
+ return IsExtendedField::decode(small_layout_1);
+}
+
+
+ConstantPoolArray::LayoutSection ConstantPoolArray::final_section() {
+ return is_extended_layout() ? EXTENDED_SECTION : SMALL_SECTION;
+}
+
+
+int ConstantPoolArray::first_extended_section_index() {
+ ASSERT(is_extended_layout());
+ uint32_t small_layout_2 = READ_UINT32_FIELD(this, kSmallLayout2Offset);
+ return TotalCountField::decode(small_layout_2);
}
-int ConstantPoolArray::count_of_int64_entries() {
- return first_ptr_index();
+int ConstantPoolArray::get_extended_section_header_offset() {
+ return RoundUp(SizeFor(NumberOfEntries(this, SMALL_SECTION)), kInt64Size);
+}
+
+
+ConstantPoolArray::WeakObjectState ConstantPoolArray::get_weak_object_state() {
+ uint32_t small_layout_2 = READ_UINT32_FIELD(this, kSmallLayout2Offset);
+ return WeakObjectStateField::decode(small_layout_2);
+}
+
+
+void ConstantPoolArray::set_weak_object_state(
+ ConstantPoolArray::WeakObjectState state) {
+ uint32_t small_layout_2 = READ_UINT32_FIELD(this, kSmallLayout2Offset);
+ small_layout_2 = WeakObjectStateField::update(small_layout_2, state);
+ WRITE_INT32_FIELD(this, kSmallLayout2Offset, small_layout_2);
+}
+
+
+int ConstantPoolArray::first_index(Type type, LayoutSection section) {
+ int index = 0;
+ if (section == EXTENDED_SECTION) {
+ ASSERT(is_extended_layout());
+ index += first_extended_section_index();
+ }
+
+ for (Type type_iter = FIRST_TYPE; type_iter < type;
+ type_iter = next_type(type_iter)) {
+ index += number_of_entries(type_iter, section);
+ }
+
+ return index;
}
-int ConstantPoolArray::count_of_ptr_entries() {
- return first_int32_index() - first_ptr_index();
+int ConstantPoolArray::last_index(Type type, LayoutSection section) {
+ return first_index(type, section) + number_of_entries(type, section) - 1;
}
-int ConstantPoolArray::count_of_int32_entries() {
- return length() - first_int32_index();
+int ConstantPoolArray::number_of_entries(Type type, LayoutSection section) {
+ if (section == SMALL_SECTION) {
+ uint32_t small_layout_1 = READ_UINT32_FIELD(this, kSmallLayout1Offset);
+ uint32_t small_layout_2 = READ_UINT32_FIELD(this, kSmallLayout2Offset);
+ switch (type) {
+ case INT64:
+ return Int64CountField::decode(small_layout_1);
+ case CODE_PTR:
+ return CodePtrCountField::decode(small_layout_1);
+ case HEAP_PTR:
+ return HeapPtrCountField::decode(small_layout_1);
+ case INT32:
+ return Int32CountField::decode(small_layout_2);
+ default:
+ UNREACHABLE();
+ return 0;
+ }
+ } else {
+ ASSERT(section == EXTENDED_SECTION && is_extended_layout());
+ int offset = get_extended_section_header_offset();
+ switch (type) {
+ case INT64:
+ offset += kExtendedInt64CountOffset;
+ break;
+ case CODE_PTR:
+ offset += kExtendedCodePtrCountOffset;
+ break;
+ case HEAP_PTR:
+ offset += kExtendedHeapPtrCountOffset;
+ break;
+ case INT32:
+ offset += kExtendedInt32CountOffset;
+ break;
+ default:
+ UNREACHABLE();
+ }
+ return READ_INT_FIELD(this, offset);
+ }
}
-void ConstantPoolArray::SetEntryCounts(int number_of_int64_entries,
- int number_of_ptr_entries,
- int number_of_int32_entries) {
- set_first_ptr_index(number_of_int64_entries);
- set_first_int32_index(number_of_int64_entries + number_of_ptr_entries);
- set_length(number_of_int64_entries + number_of_ptr_entries +
- number_of_int32_entries);
+ConstantPoolArray::Type ConstantPoolArray::get_type(int index) {
+ LayoutSection section;
+ if (is_extended_layout() && index >= first_extended_section_index()) {
+ section = EXTENDED_SECTION;
+ } else {
+ section = SMALL_SECTION;
+ }
+
+ Type type = FIRST_TYPE;
+ while (index > last_index(type, section)) {
+ type = next_type(type);
+ }
+ ASSERT(type <= LAST_TYPE);
+ return type;
}
int64_t ConstantPoolArray::get_int64_entry(int index) {
ASSERT(map() == GetHeap()->constant_pool_array_map());
- ASSERT(index >= 0 && index < first_ptr_index());
+ ASSERT(get_type(index) == INT64);
return READ_INT64_FIELD(this, OffsetOfElementAt(index));
}
+
double ConstantPoolArray::get_int64_entry_as_double(int index) {
STATIC_ASSERT(kDoubleSize == kInt64Size);
ASSERT(map() == GetHeap()->constant_pool_array_map());
- ASSERT(index >= 0 && index < first_ptr_index());
+ ASSERT(get_type(index) == INT64);
return READ_DOUBLE_FIELD(this, OffsetOfElementAt(index));
}
-Object* ConstantPoolArray::get_ptr_entry(int index) {
+Address ConstantPoolArray::get_code_ptr_entry(int index) {
ASSERT(map() == GetHeap()->constant_pool_array_map());
- ASSERT(index >= first_ptr_index() && index < first_int32_index());
- return READ_FIELD(this, OffsetOfElementAt(index));
+ ASSERT(get_type(index) == CODE_PTR);
+ return reinterpret_cast<Address>(READ_FIELD(this, OffsetOfElementAt(index)));
}
-int32_t ConstantPoolArray::get_int32_entry(int index) {
+Object* ConstantPoolArray::get_heap_ptr_entry(int index) {
ASSERT(map() == GetHeap()->constant_pool_array_map());
- ASSERT(index >= first_int32_index() && index < length());
- return READ_INT32_FIELD(this, OffsetOfElementAt(index));
+ ASSERT(get_type(index) == HEAP_PTR);
+ return READ_FIELD(this, OffsetOfElementAt(index));
}
-void ConstantPoolArray::set(int index, Object* value) {
+int32_t ConstantPoolArray::get_int32_entry(int index) {
ASSERT(map() == GetHeap()->constant_pool_array_map());
- ASSERT(index >= first_ptr_index() && index < first_int32_index());
- WRITE_FIELD(this, OffsetOfElementAt(index), value);
- WRITE_BARRIER(GetHeap(), this, OffsetOfElementAt(index), value);
+ ASSERT(get_type(index) == INT32);
+ return READ_INT32_FIELD(this, OffsetOfElementAt(index));
}
void ConstantPoolArray::set(int index, int64_t value) {
ASSERT(map() == GetHeap()->constant_pool_array_map());
- ASSERT(index >= first_int64_index() && index < first_ptr_index());
+ ASSERT(get_type(index) == INT64);
WRITE_INT64_FIELD(this, OffsetOfElementAt(index), value);
}
@@ -2128,18 +2377,122 @@ void ConstantPoolArray::set(int index, int64_t value) {
void ConstantPoolArray::set(int index, double value) {
STATIC_ASSERT(kDoubleSize == kInt64Size);
ASSERT(map() == GetHeap()->constant_pool_array_map());
- ASSERT(index >= first_int64_index() && index < first_ptr_index());
+ ASSERT(get_type(index) == INT64);
WRITE_DOUBLE_FIELD(this, OffsetOfElementAt(index), value);
}
+void ConstantPoolArray::set(int index, Address value) {
+ ASSERT(map() == GetHeap()->constant_pool_array_map());
+ ASSERT(get_type(index) == CODE_PTR);
+ WRITE_FIELD(this, OffsetOfElementAt(index), reinterpret_cast<Object*>(value));
+}
+
+
+void ConstantPoolArray::set(int index, Object* value) {
+ ASSERT(map() == GetHeap()->constant_pool_array_map());
+ ASSERT(get_type(index) == HEAP_PTR);
+ WRITE_FIELD(this, OffsetOfElementAt(index), value);
+ WRITE_BARRIER(GetHeap(), this, OffsetOfElementAt(index), value);
+}
+
+
void ConstantPoolArray::set(int index, int32_t value) {
ASSERT(map() == GetHeap()->constant_pool_array_map());
- ASSERT(index >= this->first_int32_index() && index < length());
+ ASSERT(get_type(index) == INT32);
WRITE_INT32_FIELD(this, OffsetOfElementAt(index), value);
}
+void ConstantPoolArray::Init(const NumberOfEntries& small) {
+ uint32_t small_layout_1 =
+ Int64CountField::encode(small.count_of(INT64)) |
+ CodePtrCountField::encode(small.count_of(CODE_PTR)) |
+ HeapPtrCountField::encode(small.count_of(HEAP_PTR)) |
+ IsExtendedField::encode(false);
+ uint32_t small_layout_2 =
+ Int32CountField::encode(small.count_of(INT32)) |
+ TotalCountField::encode(small.total_count()) |
+ WeakObjectStateField::encode(NO_WEAK_OBJECTS);
+ WRITE_UINT32_FIELD(this, kSmallLayout1Offset, small_layout_1);
+ WRITE_UINT32_FIELD(this, kSmallLayout2Offset, small_layout_2);
+ if (kHeaderSize != kFirstEntryOffset) {
+ ASSERT(kFirstEntryOffset - kHeaderSize == kInt32Size);
+ WRITE_UINT32_FIELD(this, kHeaderSize, 0); // Zero out header padding.
+ }
+}
+
+
+void ConstantPoolArray::InitExtended(const NumberOfEntries& small,
+ const NumberOfEntries& extended) {
+ // Initialize small layout fields first.
+ Init(small);
+
+ // Set is_extended_layout field.
+ uint32_t small_layout_1 = READ_UINT32_FIELD(this, kSmallLayout1Offset);
+ small_layout_1 = IsExtendedField::update(small_layout_1, true);
+ WRITE_INT32_FIELD(this, kSmallLayout1Offset, small_layout_1);
+
+ // Initialize the extended layout fields.
+ int extended_header_offset = get_extended_section_header_offset();
+ WRITE_INT_FIELD(this, extended_header_offset + kExtendedInt64CountOffset,
+ extended.count_of(INT64));
+ WRITE_INT_FIELD(this, extended_header_offset + kExtendedCodePtrCountOffset,
+ extended.count_of(CODE_PTR));
+ WRITE_INT_FIELD(this, extended_header_offset + kExtendedHeapPtrCountOffset,
+ extended.count_of(HEAP_PTR));
+ WRITE_INT_FIELD(this, extended_header_offset + kExtendedInt32CountOffset,
+ extended.count_of(INT32));
+}
+
+
+int ConstantPoolArray::size() {
+ NumberOfEntries small(this, SMALL_SECTION);
+ if (!is_extended_layout()) {
+ return SizeFor(small);
+ } else {
+ NumberOfEntries extended(this, EXTENDED_SECTION);
+ return SizeForExtended(small, extended);
+ }
+}
+
+
+int ConstantPoolArray::length() {
+ uint32_t small_layout_2 = READ_UINT32_FIELD(this, kSmallLayout2Offset);
+ int length = TotalCountField::decode(small_layout_2);
+ if (is_extended_layout()) {
+ length += number_of_entries(INT64, EXTENDED_SECTION) +
+ number_of_entries(CODE_PTR, EXTENDED_SECTION) +
+ number_of_entries(HEAP_PTR, EXTENDED_SECTION) +
+ number_of_entries(INT32, EXTENDED_SECTION);
+ }
+ return length;
+}
+
+
+int ConstantPoolArray::Iterator::next_index() {
+ ASSERT(!is_finished());
+ int ret = next_index_++;
+ update_section();
+ return ret;
+}
+
+
+bool ConstantPoolArray::Iterator::is_finished() {
+ return next_index_ > array_->last_index(type_, final_section_);
+}
+
+
+void ConstantPoolArray::Iterator::update_section() {
+ if (next_index_ > array_->last_index(type_, current_section_) &&
+ current_section_ != final_section_) {
+ ASSERT(final_section_ == EXTENDED_SECTION);
+ current_section_ = EXTENDED_SECTION;
+ next_index_ = array_->first_index(type_, EXTENDED_SECTION);
+ }
+}
+
+
WriteBarrierMode HeapObject::GetWriteBarrierMode(
const DisallowHeapAllocation& promise) {
Heap* heap = GetHeap();
@@ -2213,8 +2566,10 @@ void FixedArray::set_the_hole(int index) {
}
-double* FixedDoubleArray::data_start() {
- return reinterpret_cast<double*>(FIELD_ADDR(this, kHeaderSize));
+void FixedArray::FillWithHoles(int from, int to) {
+ for (int i = from; i < to; i++) {
+ set_the_hole(i);
+ }
}
@@ -2363,23 +2718,39 @@ void Map::LookupDescriptor(JSObject* holder,
void Map::LookupTransition(JSObject* holder,
Name* name,
LookupResult* result) {
- if (HasTransitionArray()) {
- TransitionArray* transition_array = transitions();
- int number = transition_array->Search(name);
- if (number != TransitionArray::kNotFound) {
- return result->TransitionResult(
- holder, transition_array->GetTarget(number));
- }
+ int transition_index = this->SearchTransition(name);
+ if (transition_index == TransitionArray::kNotFound) return result->NotFound();
+ result->TransitionResult(holder, this->GetTransition(transition_index));
+}
+
+
+FixedArrayBase* Map::GetInitialElements() {
+ if (has_fast_smi_or_object_elements() ||
+ has_fast_double_elements()) {
+ ASSERT(!GetHeap()->InNewSpace(GetHeap()->empty_fixed_array()));
+ return GetHeap()->empty_fixed_array();
+ } else if (has_external_array_elements()) {
+ ExternalArray* empty_array = GetHeap()->EmptyExternalArrayForMap(this);
+ ASSERT(!GetHeap()->InNewSpace(empty_array));
+ return empty_array;
+ } else if (has_fixed_typed_array_elements()) {
+ FixedTypedArrayBase* empty_array =
+ GetHeap()->EmptyFixedTypedArrayForMap(this);
+ ASSERT(!GetHeap()->InNewSpace(empty_array));
+ return empty_array;
+ } else if (has_dictionary_elements()) {
+ ASSERT(!GetHeap()->InNewSpace(GetHeap()->empty_slow_element_dictionary()));
+ return GetHeap()->empty_slow_element_dictionary();
+ } else {
+ UNREACHABLE();
}
- result->NotFound();
+ return NULL;
}
Object** DescriptorArray::GetKeySlot(int descriptor_number) {
ASSERT(descriptor_number < number_of_descriptors());
- return HeapObject::RawField(
- reinterpret_cast<HeapObject*>(this),
- OffsetOfElementAt(ToKeyIndex(descriptor_number)));
+ return RawFieldOfElementAt(ToKeyIndex(descriptor_number));
}
@@ -2424,19 +2795,9 @@ void DescriptorArray::SetRepresentation(int descriptor_index,
}
-void DescriptorArray::InitializeRepresentations(Representation representation) {
- int length = number_of_descriptors();
- for (int i = 0; i < length; i++) {
- SetRepresentation(i, representation);
- }
-}
-
-
Object** DescriptorArray::GetValueSlot(int descriptor_number) {
ASSERT(descriptor_number < number_of_descriptors());
- return HeapObject::RawField(
- reinterpret_cast<HeapObject*>(this),
- OffsetOfElementAt(ToValueIndex(descriptor_number)));
+ return RawFieldOfElementAt(ToValueIndex(descriptor_number));
}
@@ -2446,6 +2807,11 @@ Object* DescriptorArray::GetValue(int descriptor_number) {
}
+void DescriptorArray::SetValue(int descriptor_index, Object* value) {
+ set(ToValueIndex(descriptor_index), value);
+}
+
+
PropertyDetails DescriptorArray::GetDetails(int descriptor_number) {
ASSERT(descriptor_number < number_of_descriptors());
Object* details = get(ToDetailsIndex(descriptor_number));
@@ -2464,6 +2830,12 @@ int DescriptorArray::GetFieldIndex(int descriptor_number) {
}
+HeapType* DescriptorArray::GetFieldType(int descriptor_number) {
+ ASSERT(GetDetails(descriptor_number).type() == FIELD);
+ return HeapType::cast(GetValue(descriptor_number));
+}
+
+
Object* DescriptorArray::GetConstant(int descriptor_number) {
return GetValue(descriptor_number);
}
@@ -2483,8 +2855,8 @@ AccessorDescriptor* DescriptorArray::GetCallbacks(int descriptor_number) {
void DescriptorArray::Get(int descriptor_number, Descriptor* desc) {
- desc->Init(GetKey(descriptor_number),
- GetValue(descriptor_number),
+ desc->Init(handle(GetKey(descriptor_number), GetIsolate()),
+ handle(GetValue(descriptor_number), GetIsolate()),
GetDetails(descriptor_number));
}
@@ -2497,10 +2869,10 @@ void DescriptorArray::Set(int descriptor_number,
NoIncrementalWriteBarrierSet(this,
ToKeyIndex(descriptor_number),
- desc->GetKey());
+ *desc->GetKey());
NoIncrementalWriteBarrierSet(this,
ToValueIndex(descriptor_number),
- desc->GetValue());
+ *desc->GetValue());
NoIncrementalWriteBarrierSet(this,
ToDetailsIndex(descriptor_number),
desc->GetDetails().AsSmi());
@@ -2511,14 +2883,15 @@ void DescriptorArray::Set(int descriptor_number, Descriptor* desc) {
// Range check.
ASSERT(descriptor_number < number_of_descriptors());
- set(ToKeyIndex(descriptor_number), desc->GetKey());
- set(ToValueIndex(descriptor_number), desc->GetValue());
+ set(ToKeyIndex(descriptor_number), *desc->GetKey());
+ set(ToValueIndex(descriptor_number), *desc->GetValue());
set(ToDetailsIndex(descriptor_number), desc->GetDetails().AsSmi());
}
void DescriptorArray::Append(Descriptor* desc,
const WhitenessWitness& witness) {
+ DisallowHeapAllocation no_gc;
int descriptor_number = number_of_descriptors();
SetNumberOfDescriptors(descriptor_number + 1);
Set(descriptor_number, desc, witness);
@@ -2538,6 +2911,7 @@ void DescriptorArray::Append(Descriptor* desc,
void DescriptorArray::Append(Descriptor* desc) {
+ DisallowHeapAllocation no_gc;
int descriptor_number = number_of_descriptors();
SetNumberOfDescriptors(descriptor_number + 1);
Set(descriptor_number, desc);
@@ -2563,10 +2937,11 @@ void DescriptorArray::SwapSortedKeys(int first, int second) {
}
-DescriptorArray::WhitenessWitness::WhitenessWitness(FixedArray* array)
+DescriptorArray::WhitenessWitness::WhitenessWitness(DescriptorArray* array)
: marking_(array->GetHeap()->incremental_marking()) {
marking_->EnterNoMarkingScope();
- ASSERT(Marking::Color(array) == Marking::WHITE_OBJECT);
+ ASSERT(!marking_->IsMarking() ||
+ Marking::Color(array) == Marking::WHITE_OBJECT);
}
@@ -2575,8 +2950,8 @@ DescriptorArray::WhitenessWitness::~WhitenessWitness() {
}
-template<typename Shape, typename Key>
-int HashTable<Shape, Key>::ComputeCapacity(int at_least_space_for) {
+template<typename Derived, typename Shape, typename Key>
+int HashTable<Derived, Shape, Key>::ComputeCapacity(int at_least_space_for) {
const int kMinCapacity = 32;
int capacity = RoundUpToPowerOf2(at_least_space_for * 2);
if (capacity < kMinCapacity) {
@@ -2586,17 +2961,17 @@ int HashTable<Shape, Key>::ComputeCapacity(int at_least_space_for) {
}
-template<typename Shape, typename Key>
-int HashTable<Shape, Key>::FindEntry(Key key) {
+template<typename Derived, typename Shape, typename Key>
+int HashTable<Derived, Shape, Key>::FindEntry(Key key) {
return FindEntry(GetIsolate(), key);
}
// Find entry for key otherwise return kNotFound.
-template<typename Shape, typename Key>
-int HashTable<Shape, Key>::FindEntry(Isolate* isolate, Key key) {
+template<typename Derived, typename Shape, typename Key>
+int HashTable<Derived, Shape, Key>::FindEntry(Isolate* isolate, Key key) {
uint32_t capacity = Capacity();
- uint32_t entry = FirstProbe(HashTable<Shape, Key>::Hash(key), capacity);
+ uint32_t entry = FirstProbe(HashTable::Hash(key), capacity);
uint32_t count = 1;
// EnsureCapacity will guarantee the hash table is never full.
while (true) {
@@ -2638,12 +3013,12 @@ void SeededNumberDictionary::set_requires_slow_elements() {
CAST_ACCESSOR(FixedArray)
CAST_ACCESSOR(FixedDoubleArray)
+CAST_ACCESSOR(FixedTypedArrayBase)
CAST_ACCESSOR(ConstantPoolArray)
CAST_ACCESSOR(DescriptorArray)
CAST_ACCESSOR(DeoptimizationInputData)
CAST_ACCESSOR(DeoptimizationOutputData)
CAST_ACCESSOR(DependentCode)
-CAST_ACCESSOR(TypeFeedbackCells)
CAST_ACCESSOR(StringTable)
CAST_ACCESSOR(JSFunctionResultCache)
CAST_ACCESSOR(NormalizedMapCache)
@@ -2689,41 +3064,56 @@ CAST_ACCESSOR(JSProxy)
CAST_ACCESSOR(JSFunctionProxy)
CAST_ACCESSOR(JSSet)
CAST_ACCESSOR(JSMap)
+CAST_ACCESSOR(JSSetIterator)
+CAST_ACCESSOR(JSMapIterator)
CAST_ACCESSOR(JSWeakMap)
CAST_ACCESSOR(JSWeakSet)
CAST_ACCESSOR(Foreign)
CAST_ACCESSOR(ByteArray)
CAST_ACCESSOR(FreeSpace)
CAST_ACCESSOR(ExternalArray)
-CAST_ACCESSOR(ExternalByteArray)
-CAST_ACCESSOR(ExternalUnsignedByteArray)
-CAST_ACCESSOR(ExternalShortArray)
-CAST_ACCESSOR(ExternalUnsignedShortArray)
-CAST_ACCESSOR(ExternalIntArray)
-CAST_ACCESSOR(ExternalUnsignedIntArray)
-CAST_ACCESSOR(ExternalFloatArray)
-CAST_ACCESSOR(ExternalDoubleArray)
-CAST_ACCESSOR(ExternalPixelArray)
+CAST_ACCESSOR(ExternalInt8Array)
+CAST_ACCESSOR(ExternalUint8Array)
+CAST_ACCESSOR(ExternalInt16Array)
+CAST_ACCESSOR(ExternalUint16Array)
+CAST_ACCESSOR(ExternalInt32Array)
+CAST_ACCESSOR(ExternalUint32Array)
+CAST_ACCESSOR(ExternalFloat32Array)
+CAST_ACCESSOR(ExternalFloat64Array)
+CAST_ACCESSOR(ExternalUint8ClampedArray)
CAST_ACCESSOR(Struct)
CAST_ACCESSOR(AccessorInfo)
+template <class Traits>
+FixedTypedArray<Traits>* FixedTypedArray<Traits>::cast(Object* object) {
+ SLOW_ASSERT(object->IsHeapObject() &&
+ HeapObject::cast(object)->map()->instance_type() ==
+ Traits::kInstanceType);
+ return reinterpret_cast<FixedTypedArray<Traits>*>(object);
+}
+
#define MAKE_STRUCT_CAST(NAME, Name, name) CAST_ACCESSOR(Name)
STRUCT_LIST(MAKE_STRUCT_CAST)
#undef MAKE_STRUCT_CAST
-template <typename Shape, typename Key>
-HashTable<Shape, Key>* HashTable<Shape, Key>::cast(Object* obj) {
+template <typename Derived, typename Shape, typename Key>
+HashTable<Derived, Shape, Key>*
+HashTable<Derived, Shape, Key>::cast(Object* obj) {
ASSERT(obj->IsHashTable());
return reinterpret_cast<HashTable*>(obj);
}
SMI_ACCESSORS(FixedArrayBase, length, kLengthOffset)
+SYNCHRONIZED_SMI_ACCESSORS(FixedArrayBase, length, kLengthOffset)
+
SMI_ACCESSORS(FreeSpace, size, kSizeOffset)
+NOBARRIER_SMI_ACCESSORS(FreeSpace, size, kSizeOffset)
SMI_ACCESSORS(String, length, kLengthOffset)
+SYNCHRONIZED_SMI_ACCESSORS(String, length, kLengthOffset)
uint32_t Name::hash_field() {
@@ -2749,6 +3139,17 @@ bool Name::Equals(Name* other) {
}
+bool Name::Equals(Handle<Name> one, Handle<Name> two) {
+ if (one.is_identical_to(two)) return true;
+ if ((one->IsInternalizedString() && two->IsInternalizedString()) ||
+ one->IsSymbol() || two->IsSymbol()) {
+ return false;
+ }
+ return String::SlowEquals(Handle<String>::cast(one),
+ Handle<String>::cast(two));
+}
+
+
ACCESSORS(Symbol, name, Object, kNameOffset)
ACCESSORS(Symbol, flags, Smi, kFlagsOffset)
BOOL_ACCESSORS(Symbol, flags, is_private, kPrivateBit)
@@ -2763,19 +3164,20 @@ bool String::Equals(String* other) {
}
-MaybeObject* String::TryFlatten(PretenureFlag pretenure) {
- if (!StringShape(this).IsCons()) return this;
- ConsString* cons = ConsString::cast(this);
- if (cons->IsFlat()) return cons->first();
- return SlowTryFlatten(pretenure);
+bool String::Equals(Handle<String> one, Handle<String> two) {
+ if (one.is_identical_to(two)) return true;
+ if (one->IsInternalizedString() && two->IsInternalizedString()) {
+ return false;
+ }
+ return SlowEquals(one, two);
}
-String* String::TryFlattenGetString(PretenureFlag pretenure) {
- MaybeObject* flat = TryFlatten(pretenure);
- Object* successfully_flattened;
- if (!flat->ToObject(&successfully_flattened)) return this;
- return String::cast(successfully_flattened);
+Handle<String> String::Flatten(Handle<String> string, PretenureFlag pretenure) {
+ if (!string->IsConsString()) return string;
+ Handle<ConsString> cons = Handle<ConsString>::cast(string);
+ if (cons->IsFlat()) return handle(cons->first());
+ return SlowFlatten(cons, pretenure);
}
@@ -2832,96 +3234,60 @@ String* String::GetUnderlying() {
}
-template<class Visitor, class ConsOp>
-void String::Visit(
- String* string,
- unsigned offset,
- Visitor& visitor,
- ConsOp& cons_op,
- int32_t type,
- unsigned length) {
- ASSERT(length == static_cast<unsigned>(string->length()));
+template<class Visitor>
+ConsString* String::VisitFlat(Visitor* visitor,
+ String* string,
+ const int offset) {
+ int slice_offset = offset;
+ const int length = string->length();
ASSERT(offset <= length);
- unsigned slice_offset = offset;
while (true) {
- ASSERT(type == string->map()->instance_type());
-
+ int32_t type = string->map()->instance_type();
switch (type & (kStringRepresentationMask | kStringEncodingMask)) {
case kSeqStringTag | kOneByteStringTag:
- visitor.VisitOneByteString(
+ visitor->VisitOneByteString(
SeqOneByteString::cast(string)->GetChars() + slice_offset,
length - offset);
- return;
+ return NULL;
case kSeqStringTag | kTwoByteStringTag:
- visitor.VisitTwoByteString(
+ visitor->VisitTwoByteString(
SeqTwoByteString::cast(string)->GetChars() + slice_offset,
length - offset);
- return;
+ return NULL;
case kExternalStringTag | kOneByteStringTag:
- visitor.VisitOneByteString(
+ visitor->VisitOneByteString(
ExternalAsciiString::cast(string)->GetChars() + slice_offset,
length - offset);
- return;
+ return NULL;
case kExternalStringTag | kTwoByteStringTag:
- visitor.VisitTwoByteString(
+ visitor->VisitTwoByteString(
ExternalTwoByteString::cast(string)->GetChars() + slice_offset,
length - offset);
- return;
+ return NULL;
case kSlicedStringTag | kOneByteStringTag:
case kSlicedStringTag | kTwoByteStringTag: {
SlicedString* slicedString = SlicedString::cast(string);
slice_offset += slicedString->offset();
string = slicedString->parent();
- type = string->map()->instance_type();
continue;
}
case kConsStringTag | kOneByteStringTag:
case kConsStringTag | kTwoByteStringTag:
- string = cons_op.Operate(string, &offset, &type, &length);
- if (string == NULL) return;
- slice_offset = offset;
- ASSERT(length == static_cast<unsigned>(string->length()));
- continue;
+ return ConsString::cast(string);
default:
UNREACHABLE();
- return;
+ return NULL;
}
}
}
-// TODO(dcarney): Remove this class after conversion to VisitFlat.
-class ConsStringCaptureOp {
- public:
- inline ConsStringCaptureOp() : cons_string_(NULL) {}
- inline String* Operate(String* string, unsigned*, int32_t*, unsigned*) {
- cons_string_ = ConsString::cast(string);
- return NULL;
- }
- ConsString* cons_string_;
-};
-
-
-template<class Visitor>
-ConsString* String::VisitFlat(Visitor* visitor,
- String* string,
- int offset,
- int length,
- int32_t type) {
- ASSERT(length >= 0 && length == string->length());
- ASSERT(offset >= 0 && offset <= length);
- ConsStringCaptureOp op;
- Visit(string, offset, *visitor, op, type, static_cast<unsigned>(length));
- return op.cons_string_;
-}
-
-
uint16_t SeqOneByteString::SeqOneByteStringGet(int index) {
ASSERT(index >= 0 && index < length());
return READ_BYTE_FIELD(this, kHeaderSize + index * kCharSize);
@@ -3045,6 +3411,7 @@ void ExternalAsciiString::update_data_cache() {
void ExternalAsciiString::set_resource(
const ExternalAsciiString::Resource* resource) {
+ ASSERT(IsAligned(reinterpret_cast<intptr_t>(resource), kPointerSize));
*reinterpret_cast<const Resource**>(
FIELD_ADDR(this, kResourceOffset)) = resource;
if (resource != NULL) update_data_cache();
@@ -3100,12 +3467,7 @@ const uint16_t* ExternalTwoByteString::ExternalTwoByteStringGetData(
}
-String* ConsStringNullOp::Operate(String*, unsigned*, int32_t*, unsigned*) {
- return NULL;
-}
-
-
-unsigned ConsStringIteratorOp::OffsetForDepth(unsigned depth) {
+int ConsStringIteratorOp::OffsetForDepth(int depth) {
return depth & kDepthMask;
}
@@ -3133,45 +3495,9 @@ void ConsStringIteratorOp::Pop() {
}
-bool ConsStringIteratorOp::HasMore() {
- return depth_ != 0;
-}
-
-
-void ConsStringIteratorOp::Reset() {
- depth_ = 0;
-}
-
-
-String* ConsStringIteratorOp::ContinueOperation(int32_t* type_out,
- unsigned* length_out) {
- bool blew_stack = false;
- String* string = NextLeaf(&blew_stack, type_out, length_out);
- // String found.
- if (string != NULL) {
- // Verify output.
- ASSERT(*length_out == static_cast<unsigned>(string->length()));
- ASSERT(*type_out == string->map()->instance_type());
- return string;
- }
- // Traversal complete.
- if (!blew_stack) return NULL;
- // Restart search from root.
- unsigned offset_out;
- string = Search(&offset_out, type_out, length_out);
- // Verify output.
- ASSERT(string == NULL || offset_out == 0);
- ASSERT(string == NULL ||
- *length_out == static_cast<unsigned>(string->length()));
- ASSERT(string == NULL || *type_out == string->map()->instance_type());
- return string;
-}
-
-
uint16_t StringCharacterStream::GetNext() {
ASSERT(buffer8_ != NULL && end_ != NULL);
// Advance cursor if needed.
- // TODO(dcarney): Ensure uses of the api call HasMore first and avoid this.
if (buffer8_ == end_) HasMore();
ASSERT(buffer8_ < end_);
return is_one_byte_ ? *buffer8_++ : *buffer16_++;
@@ -3180,41 +3506,39 @@ uint16_t StringCharacterStream::GetNext() {
StringCharacterStream::StringCharacterStream(String* string,
ConsStringIteratorOp* op,
- unsigned offset)
+ int offset)
: is_one_byte_(false),
op_(op) {
Reset(string, offset);
}
-void StringCharacterStream::Reset(String* string, unsigned offset) {
- op_->Reset();
+void StringCharacterStream::Reset(String* string, int offset) {
buffer8_ = NULL;
end_ = NULL;
- int32_t type = string->map()->instance_type();
- unsigned length = string->length();
- String::Visit(string, offset, *this, *op_, type, length);
+ ConsString* cons_string = String::VisitFlat(this, string, offset);
+ op_->Reset(cons_string, offset);
+ if (cons_string != NULL) {
+ string = op_->Next(&offset);
+ if (string != NULL) String::VisitFlat(this, string, offset);
+ }
}
bool StringCharacterStream::HasMore() {
if (buffer8_ != end_) return true;
- if (!op_->HasMore()) return false;
- unsigned length;
- int32_t type;
- String* string = op_->ContinueOperation(&type, &length);
+ int offset;
+ String* string = op_->Next(&offset);
+ ASSERT_EQ(offset, 0);
if (string == NULL) return false;
- ASSERT(!string->IsConsString());
- ASSERT(string->length() != 0);
- ConsStringNullOp null_op;
- String::Visit(string, 0, *this, null_op, type, length);
+ String::VisitFlat(this, string);
ASSERT(buffer8_ != end_);
return true;
}
void StringCharacterStream::VisitOneByteString(
- const uint8_t* chars, unsigned length) {
+ const uint8_t* chars, int length) {
is_one_byte_ = true;
buffer8_ = chars;
end_ = chars + length;
@@ -3222,7 +3546,7 @@ void StringCharacterStream::VisitOneByteString(
void StringCharacterStream::VisitTwoByteString(
- const uint16_t* chars, unsigned length) {
+ const uint16_t* chars, int length) {
is_one_byte_ = false;
buffer16_ = chars;
end_ = reinterpret_cast<const uint8_t*>(chars + length);
@@ -3237,7 +3561,7 @@ void JSFunctionResultCache::MakeZeroSize() {
void JSFunctionResultCache::Clear() {
int cache_size = size();
- Object** entries_start = RawField(this, OffsetOfElementAt(kEntriesIndex));
+ Object** entries_start = RawFieldOfElementAt(kEntriesIndex);
MemsetPointer(entries_start,
GetHeap()->the_hole_value(),
cache_size - kEntriesIndex);
@@ -3294,26 +3618,29 @@ Address ByteArray::GetDataStartAddress() {
}
-uint8_t* ExternalPixelArray::external_pixel_pointer() {
+uint8_t* ExternalUint8ClampedArray::external_uint8_clamped_pointer() {
return reinterpret_cast<uint8_t*>(external_pointer());
}
-uint8_t ExternalPixelArray::get_scalar(int index) {
+uint8_t ExternalUint8ClampedArray::get_scalar(int index) {
ASSERT((index >= 0) && (index < this->length()));
- uint8_t* ptr = external_pixel_pointer();
+ uint8_t* ptr = external_uint8_clamped_pointer();
return ptr[index];
}
-MaybeObject* ExternalPixelArray::get(int index) {
- return Smi::FromInt(static_cast<int>(get_scalar(index)));
+Handle<Object> ExternalUint8ClampedArray::get(
+ Handle<ExternalUint8ClampedArray> array,
+ int index) {
+ return Handle<Smi>(Smi::FromInt(array->get_scalar(index)),
+ array->GetIsolate());
}
-void ExternalPixelArray::set(int index, uint8_t value) {
+void ExternalUint8ClampedArray::set(int index, uint8_t value) {
ASSERT((index >= 0) && (index < this->length()));
- uint8_t* ptr = external_pixel_pointer();
+ uint8_t* ptr = external_uint8_clamped_pointer();
ptr[index] = value;
}
@@ -3330,158 +3657,392 @@ void ExternalArray::set_external_pointer(void* value, WriteBarrierMode mode) {
}
-int8_t ExternalByteArray::get_scalar(int index) {
+int8_t ExternalInt8Array::get_scalar(int index) {
ASSERT((index >= 0) && (index < this->length()));
int8_t* ptr = static_cast<int8_t*>(external_pointer());
return ptr[index];
}
-MaybeObject* ExternalByteArray::get(int index) {
- return Smi::FromInt(static_cast<int>(get_scalar(index)));
+Handle<Object> ExternalInt8Array::get(Handle<ExternalInt8Array> array,
+ int index) {
+ return Handle<Smi>(Smi::FromInt(array->get_scalar(index)),
+ array->GetIsolate());
}
-void ExternalByteArray::set(int index, int8_t value) {
+void ExternalInt8Array::set(int index, int8_t value) {
ASSERT((index >= 0) && (index < this->length()));
int8_t* ptr = static_cast<int8_t*>(external_pointer());
ptr[index] = value;
}
-uint8_t ExternalUnsignedByteArray::get_scalar(int index) {
+uint8_t ExternalUint8Array::get_scalar(int index) {
ASSERT((index >= 0) && (index < this->length()));
uint8_t* ptr = static_cast<uint8_t*>(external_pointer());
return ptr[index];
}
-MaybeObject* ExternalUnsignedByteArray::get(int index) {
- return Smi::FromInt(static_cast<int>(get_scalar(index)));
+Handle<Object> ExternalUint8Array::get(Handle<ExternalUint8Array> array,
+ int index) {
+ return Handle<Smi>(Smi::FromInt(array->get_scalar(index)),
+ array->GetIsolate());
}
-void ExternalUnsignedByteArray::set(int index, uint8_t value) {
+void ExternalUint8Array::set(int index, uint8_t value) {
ASSERT((index >= 0) && (index < this->length()));
uint8_t* ptr = static_cast<uint8_t*>(external_pointer());
ptr[index] = value;
}
-int16_t ExternalShortArray::get_scalar(int index) {
+int16_t ExternalInt16Array::get_scalar(int index) {
ASSERT((index >= 0) && (index < this->length()));
int16_t* ptr = static_cast<int16_t*>(external_pointer());
return ptr[index];
}
-MaybeObject* ExternalShortArray::get(int index) {
- return Smi::FromInt(static_cast<int>(get_scalar(index)));
+Handle<Object> ExternalInt16Array::get(Handle<ExternalInt16Array> array,
+ int index) {
+ return Handle<Smi>(Smi::FromInt(array->get_scalar(index)),
+ array->GetIsolate());
}
-void ExternalShortArray::set(int index, int16_t value) {
+void ExternalInt16Array::set(int index, int16_t value) {
ASSERT((index >= 0) && (index < this->length()));
int16_t* ptr = static_cast<int16_t*>(external_pointer());
ptr[index] = value;
}
-uint16_t ExternalUnsignedShortArray::get_scalar(int index) {
+uint16_t ExternalUint16Array::get_scalar(int index) {
ASSERT((index >= 0) && (index < this->length()));
uint16_t* ptr = static_cast<uint16_t*>(external_pointer());
return ptr[index];
}
-MaybeObject* ExternalUnsignedShortArray::get(int index) {
- return Smi::FromInt(static_cast<int>(get_scalar(index)));
+Handle<Object> ExternalUint16Array::get(Handle<ExternalUint16Array> array,
+ int index) {
+ return Handle<Smi>(Smi::FromInt(array->get_scalar(index)),
+ array->GetIsolate());
}
-void ExternalUnsignedShortArray::set(int index, uint16_t value) {
+void ExternalUint16Array::set(int index, uint16_t value) {
ASSERT((index >= 0) && (index < this->length()));
uint16_t* ptr = static_cast<uint16_t*>(external_pointer());
ptr[index] = value;
}
-int32_t ExternalIntArray::get_scalar(int index) {
+int32_t ExternalInt32Array::get_scalar(int index) {
ASSERT((index >= 0) && (index < this->length()));
int32_t* ptr = static_cast<int32_t*>(external_pointer());
return ptr[index];
}
-MaybeObject* ExternalIntArray::get(int index) {
- return GetHeap()->NumberFromInt32(get_scalar(index));
+Handle<Object> ExternalInt32Array::get(Handle<ExternalInt32Array> array,
+ int index) {
+ return array->GetIsolate()->factory()->
+ NewNumberFromInt(array->get_scalar(index));
}
-void ExternalIntArray::set(int index, int32_t value) {
+void ExternalInt32Array::set(int index, int32_t value) {
ASSERT((index >= 0) && (index < this->length()));
int32_t* ptr = static_cast<int32_t*>(external_pointer());
ptr[index] = value;
}
-uint32_t ExternalUnsignedIntArray::get_scalar(int index) {
+uint32_t ExternalUint32Array::get_scalar(int index) {
ASSERT((index >= 0) && (index < this->length()));
uint32_t* ptr = static_cast<uint32_t*>(external_pointer());
return ptr[index];
}
-MaybeObject* ExternalUnsignedIntArray::get(int index) {
- return GetHeap()->NumberFromUint32(get_scalar(index));
+Handle<Object> ExternalUint32Array::get(Handle<ExternalUint32Array> array,
+ int index) {
+ return array->GetIsolate()->factory()->
+ NewNumberFromUint(array->get_scalar(index));
}
-void ExternalUnsignedIntArray::set(int index, uint32_t value) {
+void ExternalUint32Array::set(int index, uint32_t value) {
ASSERT((index >= 0) && (index < this->length()));
uint32_t* ptr = static_cast<uint32_t*>(external_pointer());
ptr[index] = value;
}
-float ExternalFloatArray::get_scalar(int index) {
+float ExternalFloat32Array::get_scalar(int index) {
ASSERT((index >= 0) && (index < this->length()));
float* ptr = static_cast<float*>(external_pointer());
return ptr[index];
}
-MaybeObject* ExternalFloatArray::get(int index) {
- return GetHeap()->NumberFromDouble(get_scalar(index));
+Handle<Object> ExternalFloat32Array::get(Handle<ExternalFloat32Array> array,
+ int index) {
+ return array->GetIsolate()->factory()->NewNumber(array->get_scalar(index));
}
-void ExternalFloatArray::set(int index, float value) {
+void ExternalFloat32Array::set(int index, float value) {
ASSERT((index >= 0) && (index < this->length()));
float* ptr = static_cast<float*>(external_pointer());
ptr[index] = value;
}
-double ExternalDoubleArray::get_scalar(int index) {
+double ExternalFloat64Array::get_scalar(int index) {
ASSERT((index >= 0) && (index < this->length()));
double* ptr = static_cast<double*>(external_pointer());
return ptr[index];
}
-MaybeObject* ExternalDoubleArray::get(int index) {
- return GetHeap()->NumberFromDouble(get_scalar(index));
+Handle<Object> ExternalFloat64Array::get(Handle<ExternalFloat64Array> array,
+ int index) {
+ return array->GetIsolate()->factory()->NewNumber(array->get_scalar(index));
}
-void ExternalDoubleArray::set(int index, double value) {
+void ExternalFloat64Array::set(int index, double value) {
ASSERT((index >= 0) && (index < this->length()));
double* ptr = static_cast<double*>(external_pointer());
ptr[index] = value;
}
+void* FixedTypedArrayBase::DataPtr() {
+ return FIELD_ADDR(this, kDataOffset);
+}
+
+
+int FixedTypedArrayBase::DataSize(InstanceType type) {
+ int element_size;
+ switch (type) {
+#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype, size) \
+ case FIXED_##TYPE##_ARRAY_TYPE: \
+ element_size = size; \
+ break;
+
+ TYPED_ARRAYS(TYPED_ARRAY_CASE)
+#undef TYPED_ARRAY_CASE
+ default:
+ UNREACHABLE();
+ return 0;
+ }
+ return length() * element_size;
+}
+
+
+int FixedTypedArrayBase::DataSize() {
+ return DataSize(map()->instance_type());
+}
+
+
+int FixedTypedArrayBase::size() {
+ return OBJECT_POINTER_ALIGN(kDataOffset + DataSize());
+}
+
+
+int FixedTypedArrayBase::TypedArraySize(InstanceType type) {
+ return OBJECT_POINTER_ALIGN(kDataOffset + DataSize(type));
+}
+
+
+uint8_t Uint8ArrayTraits::defaultValue() { return 0; }
+
+
+uint8_t Uint8ClampedArrayTraits::defaultValue() { return 0; }
+
+
+int8_t Int8ArrayTraits::defaultValue() { return 0; }
+
+
+uint16_t Uint16ArrayTraits::defaultValue() { return 0; }
+
+
+int16_t Int16ArrayTraits::defaultValue() { return 0; }
+
+
+uint32_t Uint32ArrayTraits::defaultValue() { return 0; }
+
+
+int32_t Int32ArrayTraits::defaultValue() { return 0; }
+
+
+float Float32ArrayTraits::defaultValue() {
+ return static_cast<float>(OS::nan_value());
+}
+
+
+double Float64ArrayTraits::defaultValue() { return OS::nan_value(); }
+
+
+template <class Traits>
+typename Traits::ElementType FixedTypedArray<Traits>::get_scalar(int index) {
+ ASSERT((index >= 0) && (index < this->length()));
+ ElementType* ptr = reinterpret_cast<ElementType*>(
+ FIELD_ADDR(this, kDataOffset));
+ return ptr[index];
+}
+
+
+template<> inline
+FixedTypedArray<Float64ArrayTraits>::ElementType
+ FixedTypedArray<Float64ArrayTraits>::get_scalar(int index) {
+ ASSERT((index >= 0) && (index < this->length()));
+ return READ_DOUBLE_FIELD(this, ElementOffset(index));
+}
+
+
+template <class Traits>
+void FixedTypedArray<Traits>::set(int index, ElementType value) {
+ ASSERT((index >= 0) && (index < this->length()));
+ ElementType* ptr = reinterpret_cast<ElementType*>(
+ FIELD_ADDR(this, kDataOffset));
+ ptr[index] = value;
+}
+
+
+template<> inline
+void FixedTypedArray<Float64ArrayTraits>::set(
+ int index, Float64ArrayTraits::ElementType value) {
+ ASSERT((index >= 0) && (index < this->length()));
+ WRITE_DOUBLE_FIELD(this, ElementOffset(index), value);
+}
+
+
+template <class Traits>
+typename Traits::ElementType FixedTypedArray<Traits>::from_int(int value) {
+ return static_cast<ElementType>(value);
+}
+
+
+template <> inline
+uint8_t FixedTypedArray<Uint8ClampedArrayTraits>::from_int(int value) {
+ if (value < 0) return 0;
+ if (value > 0xFF) return 0xFF;
+ return static_cast<uint8_t>(value);
+}
+
+
+template <class Traits>
+typename Traits::ElementType FixedTypedArray<Traits>::from_double(
+ double value) {
+ return static_cast<ElementType>(DoubleToInt32(value));
+}
+
+
+template<> inline
+uint8_t FixedTypedArray<Uint8ClampedArrayTraits>::from_double(double value) {
+ if (value < 0) return 0;
+ if (value > 0xFF) return 0xFF;
+ return static_cast<uint8_t>(lrint(value));
+}
+
+
+template<> inline
+float FixedTypedArray<Float32ArrayTraits>::from_double(double value) {
+ return static_cast<float>(value);
+}
+
+
+template<> inline
+double FixedTypedArray<Float64ArrayTraits>::from_double(double value) {
+ return value;
+}
+
+
+template <class Traits>
+Handle<Object> FixedTypedArray<Traits>::get(
+ Handle<FixedTypedArray<Traits> > array,
+ int index) {
+ return Traits::ToHandle(array->GetIsolate(), array->get_scalar(index));
+}
+
+
+template <class Traits>
+Handle<Object> FixedTypedArray<Traits>::SetValue(
+ Handle<FixedTypedArray<Traits> > array,
+ uint32_t index,
+ Handle<Object> value) {
+ ElementType cast_value = Traits::defaultValue();
+ if (index < static_cast<uint32_t>(array->length())) {
+ if (value->IsSmi()) {
+ int int_value = Handle<Smi>::cast(value)->value();
+ cast_value = from_int(int_value);
+ } else if (value->IsHeapNumber()) {
+ double double_value = Handle<HeapNumber>::cast(value)->value();
+ cast_value = from_double(double_value);
+ } else {
+ // Clamp undefined to the default value. All other types have been
+ // converted to a number type further up in the call chain.
+ ASSERT(value->IsUndefined());
+ }
+ array->set(index, cast_value);
+ }
+ return Traits::ToHandle(array->GetIsolate(), cast_value);
+}
+
+
+Handle<Object> Uint8ArrayTraits::ToHandle(Isolate* isolate, uint8_t scalar) {
+ return handle(Smi::FromInt(scalar), isolate);
+}
+
+
+Handle<Object> Uint8ClampedArrayTraits::ToHandle(Isolate* isolate,
+ uint8_t scalar) {
+ return handle(Smi::FromInt(scalar), isolate);
+}
+
+
+Handle<Object> Int8ArrayTraits::ToHandle(Isolate* isolate, int8_t scalar) {
+ return handle(Smi::FromInt(scalar), isolate);
+}
+
+
+Handle<Object> Uint16ArrayTraits::ToHandle(Isolate* isolate, uint16_t scalar) {
+ return handle(Smi::FromInt(scalar), isolate);
+}
+
+
+Handle<Object> Int16ArrayTraits::ToHandle(Isolate* isolate, int16_t scalar) {
+ return handle(Smi::FromInt(scalar), isolate);
+}
+
+
+Handle<Object> Uint32ArrayTraits::ToHandle(Isolate* isolate, uint32_t scalar) {
+ return isolate->factory()->NewNumberFromUint(scalar);
+}
+
+
+Handle<Object> Int32ArrayTraits::ToHandle(Isolate* isolate, int32_t scalar) {
+ return isolate->factory()->NewNumberFromInt(scalar);
+}
+
+
+Handle<Object> Float32ArrayTraits::ToHandle(Isolate* isolate, float scalar) {
+ return isolate->factory()->NewNumber(scalar);
+}
+
+
+Handle<Object> Float64ArrayTraits::ToHandle(Isolate* isolate, double scalar) {
+ return isolate->factory()->NewNumber(scalar);
+}
+
+
int Map::visitor_id() {
return READ_BYTE_FIELD(this, kVisitorIdOffset);
}
@@ -3494,7 +4055,8 @@ void Map::set_visitor_id(int id) {
int Map::instance_size() {
- return READ_BYTE_FIELD(this, kInstanceSizeOffset) << kPointerSizeLog2;
+ return NOBARRIER_READ_BYTE_FIELD(
+ this, kInstanceSizeOffset) << kPointerSizeLog2;
}
@@ -3508,11 +4070,19 @@ int Map::pre_allocated_property_fields() {
}
+int Map::GetInObjectPropertyOffset(int index) {
+ // Adjust for the number of properties stored in the object.
+ index -= inobject_properties();
+ ASSERT(index <= 0);
+ return instance_size() + (index * kPointerSize);
+}
+
+
int HeapObject::SizeFromMap(Map* map) {
int instance_size = map->instance_size();
if (instance_size != kVariableSizeSentinel) return instance_size;
// Only inline the most frequent cases.
- int instance_type = static_cast<int>(map->instance_type());
+ InstanceType instance_type = map->instance_type();
if (instance_type == FIXED_ARRAY_TYPE) {
return FixedArray::BodyDescriptor::SizeOf(map, this);
}
@@ -3525,7 +4095,7 @@ int HeapObject::SizeFromMap(Map* map) {
return reinterpret_cast<ByteArray*>(this)->ByteArraySize();
}
if (instance_type == FREE_SPACE_TYPE) {
- return reinterpret_cast<FreeSpace*>(this)->size();
+ return reinterpret_cast<FreeSpace*>(this)->nobarrier_size();
}
if (instance_type == STRING_TYPE ||
instance_type == INTERNALIZED_STRING_TYPE) {
@@ -3537,10 +4107,12 @@ int HeapObject::SizeFromMap(Map* map) {
reinterpret_cast<FixedDoubleArray*>(this)->length());
}
if (instance_type == CONSTANT_POOL_ARRAY_TYPE) {
- return ConstantPoolArray::SizeFor(
- reinterpret_cast<ConstantPoolArray*>(this)->count_of_int64_entries(),
- reinterpret_cast<ConstantPoolArray*>(this)->count_of_ptr_entries(),
- reinterpret_cast<ConstantPoolArray*>(this)->count_of_int32_entries());
+ return reinterpret_cast<ConstantPoolArray*>(this)->size();
+ }
+ if (instance_type >= FIRST_FIXED_TYPED_ARRAY_TYPE &&
+ instance_type <= LAST_FIXED_TYPED_ARRAY_TYPE) {
+ return reinterpret_cast<FixedTypedArrayBase*>(
+ this)->TypedArraySize(instance_type);
}
ASSERT(instance_type == CODE_TYPE);
return reinterpret_cast<Code*>(this)->CodeSize();
@@ -3551,7 +4123,8 @@ void Map::set_instance_size(int value) {
ASSERT_EQ(0, value & (kPointerSize - 1));
value >>= kPointerSizeLog2;
ASSERT(0 <= value && value < 256);
- WRITE_BYTE_FIELD(this, kInstanceSizeOffset, static_cast<byte>(value));
+ NOBARRIER_WRITE_BYTE_FIELD(
+ this, kInstanceSizeOffset, static_cast<byte>(value));
}
@@ -3624,12 +4197,12 @@ bool Map::has_non_instance_prototype() {
void Map::set_function_with_prototype(bool value) {
- set_bit_field3(FunctionWithPrototype::update(bit_field3(), value));
+ set_bit_field(FunctionWithPrototype::update(bit_field(), value));
}
bool Map::function_with_prototype() {
- return FunctionWithPrototype::decode(bit_field3());
+ return FunctionWithPrototype::decode(bit_field());
}
@@ -3660,32 +4233,19 @@ bool Map::is_extensible() {
}
-void Map::set_attached_to_shared_function_info(bool value) {
- if (value) {
- set_bit_field2(bit_field2() | (1 << kAttachedToSharedFunctionInfo));
- } else {
- set_bit_field2(bit_field2() & ~(1 << kAttachedToSharedFunctionInfo));
- }
-}
-
-bool Map::attached_to_shared_function_info() {
- return ((1 << kAttachedToSharedFunctionInfo) & bit_field2()) != 0;
-}
-
-
void Map::set_is_shared(bool value) {
set_bit_field3(IsShared::update(bit_field3(), value));
}
bool Map::is_shared() {
- return IsShared::decode(bit_field3());
-}
+ return IsShared::decode(bit_field3()); }
void Map::set_dictionary_map(bool value) {
- if (value) mark_unstable();
- set_bit_field3(DictionaryMap::update(bit_field3(), value));
+ uint32_t new_bit_field3 = DictionaryMap::update(bit_field3(), value);
+ new_bit_field3 = IsUnstable::update(new_bit_field3, value);
+ set_bit_field3(new_bit_field3);
}
@@ -3725,7 +4285,6 @@ void Map::deprecate() {
bool Map::is_deprecated() {
- if (!FLAG_track_fields) return false;
return Deprecated::decode(bit_field3());
}
@@ -3736,11 +4295,30 @@ void Map::set_migration_target(bool value) {
bool Map::is_migration_target() {
- if (!FLAG_track_fields) return false;
return IsMigrationTarget::decode(bit_field3());
}
+void Map::set_done_inobject_slack_tracking(bool value) {
+ set_bit_field3(DoneInobjectSlackTracking::update(bit_field3(), value));
+}
+
+
+bool Map::done_inobject_slack_tracking() {
+ return DoneInobjectSlackTracking::decode(bit_field3());
+}
+
+
+void Map::set_construction_count(int value) {
+ set_bit_field3(ConstructionCount::update(bit_field3(), value));
+}
+
+
+int Map::construction_count() {
+ return ConstructionCount::decode(bit_field3());
+}
+
+
void Map::freeze() {
set_bit_field3(IsFrozen::update(bit_field3(), true));
}
@@ -3770,22 +4348,11 @@ bool Map::CanBeDeprecated() {
int descriptor = LastAdded();
for (int i = 0; i <= descriptor; i++) {
PropertyDetails details = instance_descriptors()->GetDetails(i);
- if (FLAG_track_fields && details.representation().IsNone()) {
- return true;
- }
- if (FLAG_track_fields && details.representation().IsSmi()) {
- return true;
- }
- if (FLAG_track_double_fields && details.representation().IsDouble()) {
- return true;
- }
- if (FLAG_track_heap_object_fields &&
- details.representation().IsHeapObject()) {
- return true;
- }
- if (FLAG_track_fields && details.type() == CONSTANT) {
- return true;
- }
+ if (details.representation().IsNone()) return true;
+ if (details.representation().IsSmi()) return true;
+ if (details.representation().IsDouble()) return true;
+ if (details.representation().IsHeapObject()) return true;
+ if (details.type() == CONSTANT) return true;
}
return false;
}
@@ -3843,8 +4410,7 @@ Object* DependentCode::object_at(int i) {
Object** DependentCode::slot_at(int i) {
- return HeapObject::RawField(
- this, FixedArray::OffsetOfElementAt(kCodesStartIndex + i));
+ return RawFieldOfElementAt(kCodesStartIndex + i);
}
@@ -3870,10 +4436,6 @@ void DependentCode::ExtendGroup(DependencyGroup group) {
void Code::set_flags(Code::Flags flags) {
STATIC_ASSERT(Code::NUMBER_OF_KINDS <= KindField::kMax + 1);
- // Make sure that all call stubs have an arguments count.
- ASSERT((ExtractKindFromFlags(flags) != CALL_IC &&
- ExtractKindFromFlags(flags) != KEYED_CALL_IC) ||
- ExtractArgumentsCountFromFlags(flags) >= 0);
WRITE_INT_FIELD(this, kFlagsOffset, flags);
}
@@ -3896,16 +4458,8 @@ InlineCacheState Code::ic_state() {
ExtraICState Code::extra_ic_state() {
- ASSERT((is_inline_cache_stub() && !needs_extended_extra_ic_state(kind()))
- || ic_state() == DEBUG_STUB);
- return ExtractExtraICStateFromFlags(flags());
-}
-
-
-ExtraICState Code::extended_extra_ic_state() {
ASSERT(is_inline_cache_stub() || ic_state() == DEBUG_STUB);
- ASSERT(needs_extended_extra_ic_state(kind()));
- return ExtractExtendedExtraICStateFromFlags(flags());
+ return ExtractExtraICStateFromFlags(flags());
}
@@ -3914,13 +4468,6 @@ Code::StubType Code::type() {
}
-int Code::arguments_count() {
- ASSERT(is_call_stub() || is_keyed_call_stub() ||
- kind() == STUB || is_handler());
- return ExtractArgumentsCountFromFlags(flags());
-}
-
-
// For initialization.
void Code::set_raw_kind_specific_flags1(int value) {
WRITE_INT_FIELD(this, kKindSpecificFlags1Offset, value);
@@ -3970,8 +4517,8 @@ bool Code::has_major_key() {
kind() == LOAD_IC ||
kind() == KEYED_LOAD_IC ||
kind() == STORE_IC ||
+ kind() == CALL_IC ||
kind() == KEYED_STORE_IC ||
- kind() == KEYED_CALL_IC ||
kind() == TO_BOOLEAN_IC;
}
@@ -4124,21 +4671,8 @@ void Code::set_back_edges_patched_for_osr(bool value) {
-CheckType Code::check_type() {
- ASSERT(is_call_stub() || is_keyed_call_stub());
- byte type = READ_BYTE_FIELD(this, kCheckTypeOffset);
- return static_cast<CheckType>(type);
-}
-
-
-void Code::set_check_type(CheckType value) {
- ASSERT(is_call_stub() || is_keyed_call_stub());
- WRITE_BYTE_FIELD(this, kCheckTypeOffset, value);
-}
-
-
byte Code::to_boolean_state() {
- return extended_extra_ic_state();
+ return extra_ic_state();
}
@@ -4166,12 +4700,41 @@ bool Code::marked_for_deoptimization() {
void Code::set_marked_for_deoptimization(bool flag) {
ASSERT(kind() == OPTIMIZED_FUNCTION);
+ ASSERT(!flag || AllowDeoptimization::IsAllowed(GetIsolate()));
int previous = READ_UINT32_FIELD(this, kKindSpecificFlags1Offset);
int updated = MarkedForDeoptimizationField::update(previous, flag);
WRITE_UINT32_FIELD(this, kKindSpecificFlags1Offset, updated);
}
+bool Code::is_weak_stub() {
+ return CanBeWeakStub() && WeakStubField::decode(
+ READ_UINT32_FIELD(this, kKindSpecificFlags1Offset));
+}
+
+
+void Code::mark_as_weak_stub() {
+ ASSERT(CanBeWeakStub());
+ int previous = READ_UINT32_FIELD(this, kKindSpecificFlags1Offset);
+ int updated = WeakStubField::update(previous, true);
+ WRITE_UINT32_FIELD(this, kKindSpecificFlags1Offset, updated);
+}
+
+
+bool Code::is_invalidated_weak_stub() {
+ return is_weak_stub() && InvalidatedWeakStubField::decode(
+ READ_UINT32_FIELD(this, kKindSpecificFlags1Offset));
+}
+
+
+void Code::mark_as_invalidated_weak_stub() {
+ ASSERT(is_inline_cache_stub());
+ int previous = READ_UINT32_FIELD(this, kKindSpecificFlags1Offset);
+ int updated = InvalidatedWeakStubField::update(previous, true);
+ WRITE_UINT32_FIELD(this, kKindSpecificFlags1Offset, updated);
+}
+
+
bool Code::is_inline_cache_stub() {
Kind kind = this->kind();
switch (kind) {
@@ -4184,7 +4747,7 @@ bool Code::is_inline_cache_stub() {
bool Code::is_keyed_stub() {
- return is_keyed_load_stub() || is_keyed_store_stub() || is_keyed_call_stub();
+ return is_keyed_load_stub() || is_keyed_store_stub();
}
@@ -4193,27 +4756,29 @@ bool Code::is_debug_stub() {
}
+ConstantPoolArray* Code::constant_pool() {
+ return ConstantPoolArray::cast(READ_FIELD(this, kConstantPoolOffset));
+}
+
+
+void Code::set_constant_pool(Object* value) {
+ ASSERT(value->IsConstantPoolArray());
+ WRITE_FIELD(this, kConstantPoolOffset, value);
+ WRITE_BARRIER(GetHeap(), this, kConstantPoolOffset, value);
+}
+
+
Code::Flags Code::ComputeFlags(Kind kind,
InlineCacheState ic_state,
ExtraICState extra_ic_state,
StubType type,
- int argc,
InlineCacheHolderFlag holder) {
- ASSERT(argc <= Code::kMaxArguments);
- // Since the extended extra ic state overlaps with the argument count
- // for CALL_ICs, do so checks to make sure that they don't interfere.
- ASSERT((kind != Code::CALL_IC &&
- kind != Code::KEYED_CALL_IC) ||
- (ExtraICStateField::encode(extra_ic_state) | true));
// Compute the bit mask.
unsigned int bits = KindField::encode(kind)
| ICStateField::encode(ic_state)
| TypeField::encode(type)
- | ExtendedExtraICStateField::encode(extra_ic_state)
+ | ExtraICStateField::encode(extra_ic_state)
| CacheHolderField::encode(holder);
- if (!Code::needs_extended_extra_ic_state(kind)) {
- bits |= (argc << kArgumentsCountShift);
- }
return static_cast<Flags>(bits);
}
@@ -4221,9 +4786,15 @@ Code::Flags Code::ComputeFlags(Kind kind,
Code::Flags Code::ComputeMonomorphicFlags(Kind kind,
ExtraICState extra_ic_state,
InlineCacheHolderFlag holder,
- StubType type,
- int argc) {
- return ComputeFlags(kind, MONOMORPHIC, extra_ic_state, type, argc, holder);
+ StubType type) {
+ return ComputeFlags(kind, MONOMORPHIC, extra_ic_state, type, holder);
+}
+
+
+Code::Flags Code::ComputeHandlerFlags(Kind handler_kind,
+ StubType type,
+ InlineCacheHolderFlag holder) {
+ return ComputeFlags(Code::HANDLER, MONOMORPHIC, handler_kind, type, holder);
}
@@ -4242,22 +4813,11 @@ ExtraICState Code::ExtractExtraICStateFromFlags(Flags flags) {
}
-ExtraICState Code::ExtractExtendedExtraICStateFromFlags(
- Flags flags) {
- return ExtendedExtraICStateField::decode(flags);
-}
-
-
Code::StubType Code::ExtractTypeFromFlags(Flags flags) {
return TypeField::decode(flags);
}
-int Code::ExtractArgumentsCountFromFlags(Flags flags) {
- return (flags & kArgumentsCountMask) >> kArgumentsCountShift;
-}
-
-
InlineCacheHolderFlag Code::ExtractCacheHolderFromFlags(Flags flags) {
return CacheHolderField::decode(flags);
}
@@ -4286,6 +4846,45 @@ Object* Code::GetObjectFromEntryAddress(Address location_of_address) {
}
+bool Code::IsWeakObjectInOptimizedCode(Object* object) {
+ if (!FLAG_collect_maps) return false;
+ if (object->IsMap()) {
+ return Map::cast(object)->CanTransition() &&
+ FLAG_weak_embedded_maps_in_optimized_code;
+ }
+ if (object->IsJSObject() ||
+ (object->IsCell() && Cell::cast(object)->value()->IsJSObject())) {
+ return FLAG_weak_embedded_objects_in_optimized_code;
+ }
+ return false;
+}
+
+
+class Code::FindAndReplacePattern {
+ public:
+ FindAndReplacePattern() : count_(0) { }
+ void Add(Handle<Map> map_to_find, Handle<Object> obj_to_replace) {
+ ASSERT(count_ < kMaxCount);
+ find_[count_] = map_to_find;
+ replace_[count_] = obj_to_replace;
+ ++count_;
+ }
+ private:
+ static const int kMaxCount = 4;
+ int count_;
+ Handle<Map> find_[kMaxCount];
+ Handle<Object> replace_[kMaxCount];
+ friend class Code;
+};
+
+
+bool Code::IsWeakObjectInIC(Object* object) {
+ return object->IsMap() && Map::cast(object)->CanTransition() &&
+ FLAG_collect_maps &&
+ FLAG_weak_embedded_maps_in_ic;
+}
+
+
Object* Map::prototype() {
return READ_FIELD(this, kPrototypeOffset);
}
@@ -4300,21 +4899,17 @@ void Map::set_prototype(Object* value, WriteBarrierMode mode) {
// If the descriptor is using the empty transition array, install a new empty
// transition array that will have place for an element transition.
-static MaybeObject* EnsureHasTransitionArray(Map* map) {
- TransitionArray* transitions;
- MaybeObject* maybe_transitions;
+static void EnsureHasTransitionArray(Handle<Map> map) {
+ Handle<TransitionArray> transitions;
if (!map->HasTransitionArray()) {
- maybe_transitions = TransitionArray::Allocate(map->GetIsolate(), 0);
- if (!maybe_transitions->To(&transitions)) return maybe_transitions;
+ transitions = TransitionArray::Allocate(map->GetIsolate(), 0);
transitions->set_back_pointer_storage(map->GetBackPointer());
} else if (!map->transitions()->IsFullTransitionArray()) {
- maybe_transitions = map->transitions()->ExtendToFullTransitionArray();
- if (!maybe_transitions->To(&transitions)) return maybe_transitions;
+ transitions = TransitionArray::ExtendToFullTransitionArray(map);
} else {
- return map;
+ return;
}
- map->set_transitions(transitions);
- return transitions;
+ map->set_transitions(*transitions);
}
@@ -4329,38 +4924,23 @@ ACCESSORS(Map, instance_descriptors, DescriptorArray, kDescriptorsOffset)
void Map::set_bit_field3(uint32_t bits) {
- // Ensure the upper 2 bits have the same value by sign extending it. This is
- // necessary to be able to use the 31st bit.
- int value = bits << 1;
- WRITE_FIELD(this, kBitField3Offset, Smi::FromInt(value >> 1));
+ if (kInt32Size != kPointerSize) {
+ WRITE_UINT32_FIELD(this, kBitField3Offset + kInt32Size, 0);
+ }
+ WRITE_UINT32_FIELD(this, kBitField3Offset, bits);
}
uint32_t Map::bit_field3() {
- Object* value = READ_FIELD(this, kBitField3Offset);
- return Smi::cast(value)->value();
+ return READ_UINT32_FIELD(this, kBitField3Offset);
}
-void Map::ClearTransitions(Heap* heap, WriteBarrierMode mode) {
- Object* back_pointer = GetBackPointer();
-
- if (Heap::ShouldZapGarbage() && HasTransitionArray()) {
- ZapTransitions();
- }
-
- WRITE_FIELD(this, kTransitionsOrBackPointerOffset, back_pointer);
- CONDITIONAL_WRITE_BARRIER(
- heap, this, kTransitionsOrBackPointerOffset, back_pointer, mode);
-}
-
-
-void Map::AppendDescriptor(Descriptor* desc,
- const DescriptorArray::WhitenessWitness& witness) {
+void Map::AppendDescriptor(Descriptor* desc) {
DescriptorArray* descriptors = instance_descriptors();
int number_of_own_descriptors = NumberOfOwnDescriptors();
ASSERT(descriptors->number_of_descriptors() == number_of_own_descriptors);
- descriptors->Append(desc, witness);
+ descriptors->Append(desc);
SetNumberOfOwnDescriptors(number_of_own_descriptors + 1);
}
@@ -4397,20 +4977,7 @@ bool Map::CanHaveMoreTransitions() {
if (!HasTransitionArray()) return true;
return FixedArray::SizeFor(transitions()->length() +
TransitionArray::kTransitionSize)
- <= Page::kMaxNonCodeHeapObjectSize;
-}
-
-
-MaybeObject* Map::AddTransition(Name* key,
- Map* target,
- SimpleTransitionFlag flag) {
- if (HasTransitionArray()) return transitions()->CopyInsert(key, target);
- return TransitionArray::NewWith(flag, key, target, GetBackPointer());
-}
-
-
-void Map::SetTransition(int transition_index, Map* target) {
- transitions()->SetTarget(transition_index, target);
+ <= Page::kMaxRegularHeapObjectSize;
}
@@ -4419,15 +4986,9 @@ Map* Map::GetTransition(int transition_index) {
}
-MaybeObject* Map::set_elements_transition_map(Map* transitioned_map) {
- TransitionArray* transitions;
- MaybeObject* maybe_transitions = AddTransition(
- GetHeap()->elements_transition_symbol(),
- transitioned_map,
- FULL_TRANSITION);
- if (!maybe_transitions->To(&transitions)) return maybe_transitions;
- set_transitions(transitions);
- return transitions;
+int Map::SearchTransition(Name* name) {
+ if (HasTransitionArray()) return transitions()->Search(name);
+ return TransitionArray::kNotFound;
}
@@ -4440,19 +5001,18 @@ FixedArray* Map::GetPrototypeTransitions() {
}
-MaybeObject* Map::SetPrototypeTransitions(FixedArray* proto_transitions) {
- MaybeObject* allow_prototype = EnsureHasTransitionArray(this);
- if (allow_prototype->IsFailure()) return allow_prototype;
- int old_number_of_transitions = NumberOfProtoTransitions();
+void Map::SetPrototypeTransitions(
+ Handle<Map> map, Handle<FixedArray> proto_transitions) {
+ EnsureHasTransitionArray(map);
+ int old_number_of_transitions = map->NumberOfProtoTransitions();
#ifdef DEBUG
- if (HasPrototypeTransitions()) {
- ASSERT(GetPrototypeTransitions() != proto_transitions);
- ZapPrototypeTransitions();
+ if (map->HasPrototypeTransitions()) {
+ ASSERT(map->GetPrototypeTransitions() != *proto_transitions);
+ map->ZapPrototypeTransitions();
}
#endif
- transitions()->SetPrototypeTransitions(proto_transitions);
- SetNumberOfProtoTransitions(old_number_of_transitions);
- return this;
+ map->transitions()->SetPrototypeTransitions(*proto_transitions);
+ map->SetNumberOfProtoTransitions(old_number_of_transitions);
}
@@ -4517,23 +5077,6 @@ void Map::SetBackPointer(Object* value, WriteBarrierMode mode) {
}
-// Can either be Smi (no transitions), normal transition array, or a transition
-// array with the header overwritten as a Smi (thus iterating).
-TransitionArray* Map::unchecked_transition_array() {
- Object* object = *HeapObject::RawField(this,
- Map::kTransitionsOrBackPointerOffset);
- TransitionArray* transition_array = static_cast<TransitionArray*>(object);
- return transition_array;
-}
-
-
-HeapObject* Map::UncheckedPrototypeTransitions() {
- ASSERT(HasTransitionArray());
- ASSERT(unchecked_transition_array()->HasPrototypeTransitions());
- return unchecked_transition_array()->UncheckedPrototypeTransitions();
-}
-
-
ACCESSORS(Map, code_cache, Object, kCodeCacheOffset)
ACCESSORS(Map, dependent_code, DependentCode, kDependentCodeOffset)
ACCESSORS(Map, constructor, Object, kConstructorOffset)
@@ -4548,6 +5091,7 @@ ACCESSORS(GlobalObject, global_context, Context, kGlobalContextOffset)
ACCESSORS(GlobalObject, global_receiver, JSObject, kGlobalReceiverOffset)
ACCESSORS(JSGlobalProxy, native_context, Object, kNativeContextOffset)
+ACCESSORS(JSGlobalProxy, hash, Object, kHashOffset)
ACCESSORS(AccessorInfo, name, Object, kNameOffset)
ACCESSORS_TO_SMI(AccessorInfo, flag, kFlagOffset)
@@ -4618,10 +5162,9 @@ ACCESSORS(TypeSwitchInfo, types, Object, kTypesOffset)
ACCESSORS(AllocationSite, transition_info, Object, kTransitionInfoOffset)
ACCESSORS(AllocationSite, nested_site, Object, kNestedSiteOffset)
-ACCESSORS_TO_SMI(AllocationSite, memento_found_count, kMementoFoundCountOffset)
-ACCESSORS_TO_SMI(AllocationSite, memento_create_count,
- kMementoCreateCountOffset)
-ACCESSORS_TO_SMI(AllocationSite, pretenure_decision, kPretenureDecisionOffset)
+ACCESSORS_TO_SMI(AllocationSite, pretenure_data, kPretenureDataOffset)
+ACCESSORS_TO_SMI(AllocationSite, pretenure_create_count,
+ kPretenureCreateCountOffset)
ACCESSORS(AllocationSite, dependent_code, DependentCode,
kDependentCodeOffset)
ACCESSORS(AllocationSite, weak_next, Object, kWeakNextOffset)
@@ -4632,7 +5175,6 @@ ACCESSORS(Script, name, Object, kNameOffset)
ACCESSORS(Script, id, Smi, kIdOffset)
ACCESSORS_TO_SMI(Script, line_offset, kLineOffsetOffset)
ACCESSORS_TO_SMI(Script, column_offset, kColumnOffsetOffset)
-ACCESSORS(Script, data, Object, kDataOffset)
ACCESSORS(Script, context_data, Object, kContextOffset)
ACCESSORS(Script, wrapper, Foreign, kWrapperOffset)
ACCESSORS_TO_SMI(Script, type, kTypeOffset)
@@ -4661,7 +5203,6 @@ void Script::set_compilation_state(CompilationState state) {
}
-#ifdef ENABLE_DEBUGGER_SUPPORT
ACCESSORS(DebugInfo, shared, SharedFunctionInfo, kSharedFunctionInfoIndex)
ACCESSORS(DebugInfo, original_code, Code, kOriginalCodeIndex)
ACCESSORS(DebugInfo, code, Code, kPatchedCodeIndex)
@@ -4671,20 +5212,19 @@ ACCESSORS_TO_SMI(BreakPointInfo, code_position, kCodePositionIndex)
ACCESSORS_TO_SMI(BreakPointInfo, source_position, kSourcePositionIndex)
ACCESSORS_TO_SMI(BreakPointInfo, statement_position, kStatementPositionIndex)
ACCESSORS(BreakPointInfo, break_point_objects, Object, kBreakPointObjectsIndex)
-#endif
ACCESSORS(SharedFunctionInfo, name, Object, kNameOffset)
ACCESSORS(SharedFunctionInfo, optimized_code_map, Object,
kOptimizedCodeMapOffset)
ACCESSORS(SharedFunctionInfo, construct_stub, Code, kConstructStubOffset)
-ACCESSORS(SharedFunctionInfo, initial_map, Object, kInitialMapOffset)
+ACCESSORS(SharedFunctionInfo, feedback_vector, FixedArray,
+ kFeedbackVectorOffset)
ACCESSORS(SharedFunctionInfo, instance_class_name, Object,
kInstanceClassNameOffset)
ACCESSORS(SharedFunctionInfo, function_data, Object, kFunctionDataOffset)
ACCESSORS(SharedFunctionInfo, script, Object, kScriptOffset)
ACCESSORS(SharedFunctionInfo, debug_info, Object, kDebugInfoOffset)
ACCESSORS(SharedFunctionInfo, inferred_name, String, kInferredNameOffset)
-SMI_ACCESSORS(SharedFunctionInfo, ast_node_count, kAstNodeCountOffset)
SMI_ACCESSORS(FunctionTemplateInfo, length, kLengthOffset)
@@ -4739,6 +5279,8 @@ SMI_ACCESSORS(SharedFunctionInfo, compiler_hints,
SMI_ACCESSORS(SharedFunctionInfo, opt_count_and_bailout_reason,
kOptCountAndBailoutReasonOffset)
SMI_ACCESSORS(SharedFunctionInfo, counters, kCountersOffset)
+SMI_ACCESSORS(SharedFunctionInfo, ast_node_count, kAstNodeCountOffset)
+SMI_ACCESSORS(SharedFunctionInfo, profiler_ticks, kProfilerTicksOffset)
#else
@@ -4753,7 +5295,7 @@ SMI_ACCESSORS(SharedFunctionInfo, counters, kCountersOffset)
void holder::set_##name(int value) { \
ASSERT(kHeapObjectTag == 1); \
ASSERT((value & 0xC0000000) == 0xC0000000 || \
- (value & 0xC0000000) == 0x000000000); \
+ (value & 0xC0000000) == 0x0); \
WRITE_INT_FIELD(this, \
offset, \
(value << 1) & ~kHeapObjectTag); \
@@ -4789,32 +5331,16 @@ PSEUDO_SMI_ACCESSORS_HI(SharedFunctionInfo,
PSEUDO_SMI_ACCESSORS_LO(SharedFunctionInfo,
opt_count_and_bailout_reason,
kOptCountAndBailoutReasonOffset)
-
PSEUDO_SMI_ACCESSORS_HI(SharedFunctionInfo, counters, kCountersOffset)
-#endif
-
-
-int SharedFunctionInfo::construction_count() {
- return READ_BYTE_FIELD(this, kConstructionCountOffset);
-}
-
-
-void SharedFunctionInfo::set_construction_count(int value) {
- ASSERT(0 <= value && value < 256);
- WRITE_BYTE_FIELD(this, kConstructionCountOffset, static_cast<byte>(value));
-}
-
-
-BOOL_ACCESSORS(SharedFunctionInfo,
- compiler_hints,
- live_objects_may_exist,
- kLiveObjectsMayExist)
-
+PSEUDO_SMI_ACCESSORS_LO(SharedFunctionInfo,
+ ast_node_count,
+ kAstNodeCountOffset)
+PSEUDO_SMI_ACCESSORS_HI(SharedFunctionInfo,
+ profiler_ticks,
+ kProfilerTicksOffset)
-bool SharedFunctionInfo::IsInobjectSlackTrackingInProgress() {
- return initial_map() != GetHeap()->undefined_value();
-}
+#endif
BOOL_GETTER(SharedFunctionInfo,
@@ -4835,45 +5361,21 @@ void SharedFunctionInfo::set_optimization_disabled(bool disable) {
}
-int SharedFunctionInfo::profiler_ticks() {
- if (code()->kind() != Code::FUNCTION) return 0;
- return code()->profiler_ticks();
+StrictMode SharedFunctionInfo::strict_mode() {
+ return BooleanBit::get(compiler_hints(), kStrictModeFunction)
+ ? STRICT : SLOPPY;
}
-LanguageMode SharedFunctionInfo::language_mode() {
+void SharedFunctionInfo::set_strict_mode(StrictMode strict_mode) {
+ // We only allow mode transitions from sloppy to strict.
+ ASSERT(this->strict_mode() == SLOPPY || this->strict_mode() == strict_mode);
int hints = compiler_hints();
- if (BooleanBit::get(hints, kExtendedModeFunction)) {
- ASSERT(BooleanBit::get(hints, kStrictModeFunction));
- return EXTENDED_MODE;
- }
- return BooleanBit::get(hints, kStrictModeFunction)
- ? STRICT_MODE : CLASSIC_MODE;
-}
-
-
-void SharedFunctionInfo::set_language_mode(LanguageMode language_mode) {
- // We only allow language mode transitions that go set the same language mode
- // again or go up in the chain:
- // CLASSIC_MODE -> STRICT_MODE -> EXTENDED_MODE.
- ASSERT(this->language_mode() == CLASSIC_MODE ||
- this->language_mode() == language_mode ||
- language_mode == EXTENDED_MODE);
- int hints = compiler_hints();
- hints = BooleanBit::set(
- hints, kStrictModeFunction, language_mode != CLASSIC_MODE);
- hints = BooleanBit::set(
- hints, kExtendedModeFunction, language_mode == EXTENDED_MODE);
+ hints = BooleanBit::set(hints, kStrictModeFunction, strict_mode == STRICT);
set_compiler_hints(hints);
}
-bool SharedFunctionInfo::is_classic_mode() {
- return !BooleanBit::get(compiler_hints(), kStrictModeFunction);
-}
-
-BOOL_GETTER(SharedFunctionInfo, compiler_hints, is_extended_mode,
- kExtendedModeFunction)
BOOL_ACCESSORS(SharedFunctionInfo, compiler_hints, native, kNative)
BOOL_ACCESSORS(SharedFunctionInfo, compiler_hints, inline_builtin,
kInlineBuiltin)
@@ -4890,11 +5392,6 @@ BOOL_ACCESSORS(SharedFunctionInfo, compiler_hints, dont_cache, kDontCache)
BOOL_ACCESSORS(SharedFunctionInfo, compiler_hints, dont_flush, kDontFlush)
BOOL_ACCESSORS(SharedFunctionInfo, compiler_hints, is_generator, kIsGenerator)
-void SharedFunctionInfo::BeforeVisitingPointers() {
- if (IsInobjectSlackTrackingInProgress()) DetachInitialMap();
-}
-
-
ACCESSORS(CodeCache, default_cache, FixedArray, kDefaultCacheOffset)
ACCESSORS(CodeCache, normal_type_cache, Object, kNormalTypeCacheOffset)
@@ -4952,6 +5449,7 @@ void SharedFunctionInfo::ReplaceCode(Code* value) {
}
ASSERT(code()->gc_metadata() == NULL && value->gc_metadata() == NULL);
+
set_code(value);
}
@@ -4974,7 +5472,7 @@ void SharedFunctionInfo::set_scope_info(ScopeInfo* value,
bool SharedFunctionInfo::is_compiled() {
return code() !=
- GetIsolate()->builtins()->builtin(Builtins::kLazyCompile);
+ GetIsolate()->builtins()->builtin(Builtins::kCompileUnoptimized);
}
@@ -5081,6 +5579,15 @@ bool JSFunction::IsBuiltin() {
}
+bool JSFunction::IsNative() {
+ Object* script = shared()->script();
+ bool native = script->IsScript() &&
+ Script::cast(script)->type()->value() == Script::TYPE_NATIVE;
+ ASSERT(!IsBuiltin() || native); // All builtins are also native.
+ return native;
+}
+
+
bool JSFunction::NeedsArgumentsAdaption() {
return shared()->formal_parameter_count() !=
SharedFunctionInfo::kDontAdaptArgumentsSentinel;
@@ -5097,20 +5604,27 @@ bool JSFunction::IsOptimizable() {
}
-bool JSFunction::IsMarkedForLazyRecompilation() {
- return code() == GetIsolate()->builtins()->builtin(Builtins::kLazyRecompile);
+bool JSFunction::IsMarkedForOptimization() {
+ return code() == GetIsolate()->builtins()->builtin(
+ Builtins::kCompileOptimized);
}
-bool JSFunction::IsMarkedForConcurrentRecompilation() {
+bool JSFunction::IsMarkedForConcurrentOptimization() {
return code() == GetIsolate()->builtins()->builtin(
- Builtins::kConcurrentRecompile);
+ Builtins::kCompileOptimizedConcurrent);
}
-bool JSFunction::IsInRecompileQueue() {
+bool JSFunction::IsInOptimizationQueue() {
return code() == GetIsolate()->builtins()->builtin(
- Builtins::kInRecompileQueue);
+ Builtins::kInOptimizationQueue);
+}
+
+
+bool JSFunction::IsInobjectSlackTrackingInProgress() {
+ return has_initial_map() &&
+ initial_map()->construction_count() != JSFunction::kNoSlackTracking;
}
@@ -5143,8 +5657,8 @@ void JSFunction::ReplaceCode(Code* code) {
bool is_optimized = code->kind() == Code::OPTIMIZED_FUNCTION;
if (was_optimized && is_optimized) {
- shared()->EvictFromOptimizedCodeMap(
- this->code(), "Replacing with another optimized code");
+ shared()->EvictFromOptimizedCodeMap(this->code(),
+ "Replacing with another optimized code");
}
set_code(code);
@@ -5225,7 +5739,8 @@ bool JSFunction::should_have_prototype() {
bool JSFunction::is_compiled() {
- return code() != GetIsolate()->builtins()->builtin(Builtins::kLazyCompile);
+ return code() !=
+ GetIsolate()->builtins()->builtin(Builtins::kCompileUnoptimized);
}
@@ -5307,6 +5822,27 @@ void JSProxy::InitializeBody(int object_size, Object* value) {
ACCESSORS(JSSet, table, Object, kTableOffset)
ACCESSORS(JSMap, table, Object, kTableOffset)
+
+
+#define ORDERED_HASH_TABLE_ITERATOR_ACCESSORS(name, type, offset) \
+ template<class Derived, class TableType> \
+ type* OrderedHashTableIterator<Derived, TableType>::name() { \
+ return type::cast(READ_FIELD(this, offset)); \
+ } \
+ template<class Derived, class TableType> \
+ void OrderedHashTableIterator<Derived, TableType>::set_##name( \
+ type* value, WriteBarrierMode mode) { \
+ WRITE_FIELD(this, offset, value); \
+ CONDITIONAL_WRITE_BARRIER(GetHeap(), this, offset, value, mode); \
+ }
+
+ORDERED_HASH_TABLE_ITERATOR_ACCESSORS(table, Object, kTableOffset)
+ORDERED_HASH_TABLE_ITERATOR_ACCESSORS(index, Smi, kIndexOffset)
+ORDERED_HASH_TABLE_ITERATOR_ACCESSORS(kind, Smi, kKindOffset)
+
+#undef ORDERED_HASH_TABLE_ITERATOR_ACCESSORS
+
+
ACCESSORS(JSWeakCollection, table, Object, kTableOffset)
ACCESSORS(JSWeakCollection, next, Object, kNextOffset)
@@ -5328,6 +5864,19 @@ SMI_ACCESSORS(JSGeneratorObject, continuation, kContinuationOffset)
ACCESSORS(JSGeneratorObject, operand_stack, FixedArray, kOperandStackOffset)
SMI_ACCESSORS(JSGeneratorObject, stack_handler_index, kStackHandlerIndexOffset)
+bool JSGeneratorObject::is_suspended() {
+ ASSERT_LT(kGeneratorExecuting, kGeneratorClosed);
+ ASSERT_EQ(kGeneratorClosed, 0);
+ return continuation() > 0;
+}
+
+bool JSGeneratorObject::is_closed() {
+ return continuation() == kGeneratorClosed;
+}
+
+bool JSGeneratorObject::is_executing() {
+ return continuation() == kGeneratorExecuting;
+}
JSGeneratorObject* JSGeneratorObject::cast(Object* obj) {
ASSERT(obj->IsJSGeneratorObject());
@@ -5378,7 +5927,6 @@ JSDate* JSDate::cast(Object* obj) {
ACCESSORS(JSMessageObject, type, String, kTypeOffset)
ACCESSORS(JSMessageObject, arguments, JSArray, kArgumentsOffset)
ACCESSORS(JSMessageObject, script, Object, kScriptOffset)
-ACCESSORS(JSMessageObject, stack_trace, Object, kStackTraceOffset)
ACCESSORS(JSMessageObject, stack_frames, Object, kStackFramesOffset)
SMI_ACCESSORS(JSMessageObject, start_position, kStartPositionOffset)
SMI_ACCESSORS(JSMessageObject, end_position, kEndPositionOffset)
@@ -5397,12 +5945,14 @@ ACCESSORS(Code, relocation_info, ByteArray, kRelocationInfoOffset)
ACCESSORS(Code, handler_table, FixedArray, kHandlerTableOffset)
ACCESSORS(Code, deoptimization_data, FixedArray, kDeoptimizationDataOffset)
ACCESSORS(Code, raw_type_feedback_info, Object, kTypeFeedbackInfoOffset)
+ACCESSORS(Code, next_code_link, Object, kNextCodeLinkOffset)
void Code::WipeOutHeader() {
WRITE_FIELD(this, kRelocationInfoOffset, NULL);
WRITE_FIELD(this, kHandlerTableOffset, NULL);
WRITE_FIELD(this, kDeoptimizationDataOffset, NULL);
+ WRITE_FIELD(this, kConstantPoolOffset, NULL);
// Do not wipe out e.g. a minor key.
if (!READ_FIELD(this, kTypeFeedbackInfoOffset)->IsSmi()) {
WRITE_FIELD(this, kTypeFeedbackInfoOffset, NULL);
@@ -5424,23 +5974,9 @@ void Code::set_type_feedback_info(Object* value, WriteBarrierMode mode) {
}
-Object* Code::next_code_link() {
- CHECK(kind() == OPTIMIZED_FUNCTION);
- return raw_type_feedback_info();
-}
-
-
-void Code::set_next_code_link(Object* value, WriteBarrierMode mode) {
- CHECK(kind() == OPTIMIZED_FUNCTION);
- set_raw_type_feedback_info(value);
- CONDITIONAL_WRITE_BARRIER(GetHeap(), this, kTypeFeedbackInfoOffset,
- value, mode);
-}
-
-
int Code::stub_info() {
ASSERT(kind() == COMPARE_IC || kind() == COMPARE_NIL_IC ||
- kind() == BINARY_OP_IC || kind() == LOAD_IC);
+ kind() == BINARY_OP_IC || kind() == LOAD_IC || kind() == CALL_IC);
return Smi::cast(raw_type_feedback_info())->value();
}
@@ -5451,6 +5987,7 @@ void Code::set_stub_info(int value) {
kind() == BINARY_OP_IC ||
kind() == STUB ||
kind() == LOAD_IC ||
+ kind() == CALL_IC ||
kind() == KEYED_LOAD_IC ||
kind() == STORE_IC ||
kind() == KEYED_STORE_IC);
@@ -5624,7 +6161,7 @@ ElementsKind JSObject::GetElementsKind() {
fixed_array->IsFixedArray() &&
fixed_array->IsDictionary()) ||
(kind > DICTIONARY_ELEMENTS));
- ASSERT((kind != NON_STRICT_ARGUMENTS_ELEMENTS) ||
+ ASSERT((kind != SLOPPY_ARGUMENTS_ELEMENTS) ||
(elements()->IsFixedArray() && elements()->length() >= 2));
}
#endif
@@ -5672,8 +6209,8 @@ bool JSObject::HasDictionaryElements() {
}
-bool JSObject::HasNonStrictArgumentsElements() {
- return GetElementsKind() == NON_STRICT_ARGUMENTS_ELEMENTS;
+bool JSObject::HasSloppyArgumentsElements() {
+ return GetElementsKind() == SLOPPY_ARGUMENTS_ELEMENTS;
}
@@ -5684,29 +6221,39 @@ bool JSObject::HasExternalArrayElements() {
}
-#define EXTERNAL_ELEMENTS_CHECK(name, type) \
-bool JSObject::HasExternal##name##Elements() { \
- HeapObject* array = elements(); \
- ASSERT(array != NULL); \
- if (!array->IsHeapObject()) \
- return false; \
- return array->map()->instance_type() == type; \
+#define EXTERNAL_ELEMENTS_CHECK(Type, type, TYPE, ctype, size) \
+bool JSObject::HasExternal##Type##Elements() { \
+ HeapObject* array = elements(); \
+ ASSERT(array != NULL); \
+ if (!array->IsHeapObject()) \
+ return false; \
+ return array->map()->instance_type() == EXTERNAL_##TYPE##_ARRAY_TYPE; \
}
+TYPED_ARRAYS(EXTERNAL_ELEMENTS_CHECK)
+
+#undef EXTERNAL_ELEMENTS_CHECK
-EXTERNAL_ELEMENTS_CHECK(Byte, EXTERNAL_BYTE_ARRAY_TYPE)
-EXTERNAL_ELEMENTS_CHECK(UnsignedByte, EXTERNAL_UNSIGNED_BYTE_ARRAY_TYPE)
-EXTERNAL_ELEMENTS_CHECK(Short, EXTERNAL_SHORT_ARRAY_TYPE)
-EXTERNAL_ELEMENTS_CHECK(UnsignedShort,
- EXTERNAL_UNSIGNED_SHORT_ARRAY_TYPE)
-EXTERNAL_ELEMENTS_CHECK(Int, EXTERNAL_INT_ARRAY_TYPE)
-EXTERNAL_ELEMENTS_CHECK(UnsignedInt,
- EXTERNAL_UNSIGNED_INT_ARRAY_TYPE)
-EXTERNAL_ELEMENTS_CHECK(Float,
- EXTERNAL_FLOAT_ARRAY_TYPE)
-EXTERNAL_ELEMENTS_CHECK(Double,
- EXTERNAL_DOUBLE_ARRAY_TYPE)
-EXTERNAL_ELEMENTS_CHECK(Pixel, EXTERNAL_PIXEL_ARRAY_TYPE)
+
+bool JSObject::HasFixedTypedArrayElements() {
+ HeapObject* array = elements();
+ ASSERT(array != NULL);
+ return array->IsFixedTypedArrayBase();
+}
+
+
+#define FIXED_TYPED_ELEMENTS_CHECK(Type, type, TYPE, ctype, size) \
+bool JSObject::HasFixed##Type##Elements() { \
+ HeapObject* array = elements(); \
+ ASSERT(array != NULL); \
+ if (!array->IsHeapObject()) \
+ return false; \
+ return array->map()->instance_type() == FIXED_##TYPE##_ARRAY_TYPE; \
+}
+
+TYPED_ARRAYS(FIXED_TYPED_ELEMENTS_CHECK)
+
+#undef FIXED_TYPED_ELEMENTS_CHECK
bool JSObject::HasNamedInterceptor() {
@@ -5719,24 +6266,6 @@ bool JSObject::HasIndexedInterceptor() {
}
-MaybeObject* JSObject::EnsureWritableFastElements() {
- ASSERT(HasFastSmiOrObjectElements());
- FixedArray* elems = FixedArray::cast(elements());
- Isolate* isolate = GetIsolate();
- if (elems->map() != isolate->heap()->fixed_cow_array_map()) return elems;
- Object* writable_elems;
- { MaybeObject* maybe_writable_elems = isolate->heap()->CopyFixedArrayWithMap(
- elems, isolate->heap()->fixed_array_map());
- if (!maybe_writable_elems->ToObject(&writable_elems)) {
- return maybe_writable_elems;
- }
- }
- set_elements(FixedArray::cast(writable_elems));
- isolate->counters()->cow_arrays_converted()->Increment();
- return writable_elems;
-}
-
-
NameDictionary* JSObject::property_dictionary() {
ASSERT(!HasFastProperties());
return NameDictionary::cast(properties());
@@ -5892,35 +6421,38 @@ bool JSReceiver::HasProperty(Handle<JSReceiver> object,
Handle<JSProxy> proxy = Handle<JSProxy>::cast(object);
return JSProxy::HasPropertyWithHandler(proxy, name);
}
- return object->GetPropertyAttribute(*name) != ABSENT;
+ return GetPropertyAttributes(object, name) != ABSENT;
}
-bool JSReceiver::HasLocalProperty(Handle<JSReceiver> object,
- Handle<Name> name) {
+bool JSReceiver::HasOwnProperty(Handle<JSReceiver> object, Handle<Name> name) {
if (object->IsJSProxy()) {
Handle<JSProxy> proxy = Handle<JSProxy>::cast(object);
return JSProxy::HasPropertyWithHandler(proxy, name);
}
- return object->GetLocalPropertyAttribute(*name) != ABSENT;
+ return GetOwnPropertyAttributes(object, name) != ABSENT;
}
-PropertyAttributes JSReceiver::GetPropertyAttribute(Name* key) {
+PropertyAttributes JSReceiver::GetPropertyAttributes(Handle<JSReceiver> object,
+ Handle<Name> key) {
uint32_t index;
- if (IsJSObject() && key->AsArrayIndex(&index)) {
- return GetElementAttribute(index);
+ if (object->IsJSObject() && key->AsArrayIndex(&index)) {
+ return GetElementAttribute(object, index);
}
- return GetPropertyAttributeWithReceiver(this, key);
+ LookupIterator it(object, key);
+ return GetPropertyAttributes(&it);
}
-PropertyAttributes JSReceiver::GetElementAttribute(uint32_t index) {
- if (IsJSProxy()) {
- return JSProxy::cast(this)->GetElementAttributeWithHandler(this, index);
+PropertyAttributes JSReceiver::GetElementAttribute(Handle<JSReceiver> object,
+ uint32_t index) {
+ if (object->IsJSProxy()) {
+ return JSProxy::GetElementAttributeWithHandler(
+ Handle<JSProxy>::cast(object), object, index);
}
- return JSObject::cast(this)->GetElementAttributeWithReceiver(
- this, index, true);
+ return JSObject::GetElementAttributeWithReceiver(
+ Handle<JSObject>::cast(object), object, index, true);
}
@@ -5934,7 +6466,7 @@ bool JSGlobalProxy::IsDetachedFrom(GlobalObject* global) {
}
-Handle<Object> JSReceiver::GetOrCreateIdentityHash(Handle<JSReceiver> object) {
+Handle<Smi> JSReceiver::GetOrCreateIdentityHash(Handle<JSReceiver> object) {
return object->IsJSProxy()
? JSProxy::GetOrCreateIdentityHash(Handle<JSProxy>::cast(object))
: JSObject::GetOrCreateIdentityHash(Handle<JSObject>::cast(object));
@@ -5953,27 +6485,29 @@ bool JSReceiver::HasElement(Handle<JSReceiver> object, uint32_t index) {
Handle<JSProxy> proxy = Handle<JSProxy>::cast(object);
return JSProxy::HasElementWithHandler(proxy, index);
}
- return Handle<JSObject>::cast(object)->GetElementAttributeWithReceiver(
- *object, index, true) != ABSENT;
+ return JSObject::GetElementAttributeWithReceiver(
+ Handle<JSObject>::cast(object), object, index, true) != ABSENT;
}
-bool JSReceiver::HasLocalElement(Handle<JSReceiver> object, uint32_t index) {
+bool JSReceiver::HasOwnElement(Handle<JSReceiver> object, uint32_t index) {
if (object->IsJSProxy()) {
Handle<JSProxy> proxy = Handle<JSProxy>::cast(object);
return JSProxy::HasElementWithHandler(proxy, index);
}
- return Handle<JSObject>::cast(object)->GetElementAttributeWithReceiver(
- *object, index, false) != ABSENT;
+ return JSObject::GetElementAttributeWithReceiver(
+ Handle<JSObject>::cast(object), object, index, false) != ABSENT;
}
-PropertyAttributes JSReceiver::GetLocalElementAttribute(uint32_t index) {
- if (IsJSProxy()) {
- return JSProxy::cast(this)->GetElementAttributeWithHandler(this, index);
+PropertyAttributes JSReceiver::GetOwnElementAttribute(
+ Handle<JSReceiver> object, uint32_t index) {
+ if (object->IsJSProxy()) {
+ return JSProxy::GetElementAttributeWithHandler(
+ Handle<JSProxy>::cast(object), object, index);
}
- return JSObject::cast(this)->GetElementAttributeWithReceiver(
- this, index, false);
+ return JSObject::GetElementAttributeWithReceiver(
+ Handle<JSObject>::cast(object), object, index, false);
}
@@ -5997,16 +6531,6 @@ void AccessorInfo::set_all_can_write(bool value) {
}
-bool AccessorInfo::prohibits_overwriting() {
- return BooleanBit::get(flag(), kProhibitsOverwritingBit);
-}
-
-
-void AccessorInfo::set_prohibits_overwriting(bool value) {
- set_flag(BooleanBit::set(flag(), kProhibitsOverwritingBit, value));
-}
-
-
PropertyAttributes AccessorInfo::property_attributes() {
return AttributesField::decode(static_cast<uint32_t>(flag()->value()));
}
@@ -6024,12 +6548,14 @@ bool AccessorInfo::IsCompatibleReceiver(Object* receiver) {
}
+void ExecutableAccessorInfo::clear_setter() {
+ set_setter(GetIsolate()->heap()->undefined_value(), SKIP_WRITE_BARRIER);
+}
+
+
void AccessorPair::set_access_flags(v8::AccessControl access_control) {
int current = access_flags()->value();
current = BooleanBit::set(current,
- kProhibitsOverwritingBit,
- access_control & PROHIBITS_OVERWRITING);
- current = BooleanBit::set(current,
kAllCanReadBit,
access_control & ALL_CAN_READ);
current = BooleanBit::set(current,
@@ -6049,32 +6575,27 @@ bool AccessorPair::all_can_write() {
}
-bool AccessorPair::prohibits_overwriting() {
- return BooleanBit::get(access_flags(), kProhibitsOverwritingBit);
-}
-
-
-template<typename Shape, typename Key>
-void Dictionary<Shape, Key>::SetEntry(int entry,
- Object* key,
- Object* value) {
+template<typename Derived, typename Shape, typename Key>
+void Dictionary<Derived, Shape, Key>::SetEntry(int entry,
+ Handle<Object> key,
+ Handle<Object> value) {
SetEntry(entry, key, value, PropertyDetails(Smi::FromInt(0)));
}
-template<typename Shape, typename Key>
-void Dictionary<Shape, Key>::SetEntry(int entry,
- Object* key,
- Object* value,
- PropertyDetails details) {
+template<typename Derived, typename Shape, typename Key>
+void Dictionary<Derived, Shape, Key>::SetEntry(int entry,
+ Handle<Object> key,
+ Handle<Object> value,
+ PropertyDetails details) {
ASSERT(!key->IsName() ||
details.IsDeleted() ||
details.dictionary_index() > 0);
- int index = HashTable<Shape, Key>::EntryToIndex(entry);
+ int index = DerivedHashTable::EntryToIndex(entry);
DisallowHeapAllocation no_gc;
WriteBarrierMode mode = FixedArray::GetWriteBarrierMode(no_gc);
- FixedArray::set(index, key, mode);
- FixedArray::set(index+1, value, mode);
+ FixedArray::set(index, *key, mode);
+ FixedArray::set(index+1, *value, mode);
FixedArray::set(index+2, details.AsSmi());
}
@@ -6096,10 +6617,12 @@ uint32_t UnseededNumberDictionaryShape::HashForObject(uint32_t key,
return ComputeIntegerHash(static_cast<uint32_t>(other->Number()), 0);
}
+
uint32_t SeededNumberDictionaryShape::SeededHash(uint32_t key, uint32_t seed) {
return ComputeIntegerHash(key, seed);
}
+
uint32_t SeededNumberDictionaryShape::SeededHashForObject(uint32_t key,
uint32_t seed,
Object* other) {
@@ -6107,12 +6630,13 @@ uint32_t SeededNumberDictionaryShape::SeededHashForObject(uint32_t key,
return ComputeIntegerHash(static_cast<uint32_t>(other->Number()), seed);
}
-MaybeObject* NumberDictionaryShape::AsObject(Heap* heap, uint32_t key) {
- return heap->NumberFromUint32(key);
+
+Handle<Object> NumberDictionaryShape::AsHandle(Isolate* isolate, uint32_t key) {
+ return isolate->factory()->NewNumberFromUint(key);
}
-bool NameDictionaryShape::IsMatch(Name* key, Object* other) {
+bool NameDictionaryShape::IsMatch(Handle<Name> key, Object* other) {
// We know that all entries in a hash table had their hash keys created.
// Use that knowledge to have fast failure.
if (key->Hash() != Name::cast(other)->Hash()) return false;
@@ -6120,63 +6644,72 @@ bool NameDictionaryShape::IsMatch(Name* key, Object* other) {
}
-uint32_t NameDictionaryShape::Hash(Name* key) {
+uint32_t NameDictionaryShape::Hash(Handle<Name> key) {
return key->Hash();
}
-uint32_t NameDictionaryShape::HashForObject(Name* key, Object* other) {
+uint32_t NameDictionaryShape::HashForObject(Handle<Name> key, Object* other) {
return Name::cast(other)->Hash();
}
-MaybeObject* NameDictionaryShape::AsObject(Heap* heap, Name* key) {
+Handle<Object> NameDictionaryShape::AsHandle(Isolate* isolate,
+ Handle<Name> key) {
ASSERT(key->IsUniqueName());
return key;
}
-template <int entrysize>
-bool ObjectHashTableShape<entrysize>::IsMatch(Object* key, Object* other) {
+void NameDictionary::DoGenerateNewEnumerationIndices(
+ Handle<NameDictionary> dictionary) {
+ DerivedDictionary::GenerateNewEnumerationIndices(dictionary);
+}
+
+
+bool ObjectHashTableShape::IsMatch(Handle<Object> key, Object* other) {
return key->SameValue(other);
}
-template <int entrysize>
-uint32_t ObjectHashTableShape<entrysize>::Hash(Object* key) {
+uint32_t ObjectHashTableShape::Hash(Handle<Object> key) {
return Smi::cast(key->GetHash())->value();
}
-template <int entrysize>
-uint32_t ObjectHashTableShape<entrysize>::HashForObject(Object* key,
- Object* other) {
+uint32_t ObjectHashTableShape::HashForObject(Handle<Object> key,
+ Object* other) {
return Smi::cast(other->GetHash())->value();
}
-template <int entrysize>
-MaybeObject* ObjectHashTableShape<entrysize>::AsObject(Heap* heap,
- Object* key) {
+Handle<Object> ObjectHashTableShape::AsHandle(Isolate* isolate,
+ Handle<Object> key) {
return key;
}
+Handle<ObjectHashTable> ObjectHashTable::Shrink(
+ Handle<ObjectHashTable> table, Handle<Object> key) {
+ return DerivedHashTable::Shrink(table, key);
+}
+
+
template <int entrysize>
-bool WeakHashTableShape<entrysize>::IsMatch(Object* key, Object* other) {
+bool WeakHashTableShape<entrysize>::IsMatch(Handle<Object> key, Object* other) {
return key->SameValue(other);
}
template <int entrysize>
-uint32_t WeakHashTableShape<entrysize>::Hash(Object* key) {
- intptr_t hash = reinterpret_cast<intptr_t>(key);
+uint32_t WeakHashTableShape<entrysize>::Hash(Handle<Object> key) {
+ intptr_t hash = reinterpret_cast<intptr_t>(*key);
return (uint32_t)(hash & 0xFFFFFFFF);
}
template <int entrysize>
-uint32_t WeakHashTableShape<entrysize>::HashForObject(Object* key,
+uint32_t WeakHashTableShape<entrysize>::HashForObject(Handle<Object> key,
Object* other) {
intptr_t hash = reinterpret_cast<intptr_t>(other);
return (uint32_t)(hash & 0xFFFFFFFF);
@@ -6184,8 +6717,8 @@ uint32_t WeakHashTableShape<entrysize>::HashForObject(Object* key,
template <int entrysize>
-MaybeObject* WeakHashTableShape<entrysize>::AsObject(Heap* heap,
- Object* key) {
+Handle<Object> WeakHashTableShape<entrysize>::AsHandle(Isolate* isolate,
+ Handle<Object> key) {
return key;
}
@@ -6200,20 +6733,20 @@ void Map::ClearCodeCache(Heap* heap) {
}
-void JSArray::EnsureSize(int required_size) {
- ASSERT(HasFastSmiOrObjectElements());
- FixedArray* elts = FixedArray::cast(elements());
+void JSArray::EnsureSize(Handle<JSArray> array, int required_size) {
+ ASSERT(array->HasFastSmiOrObjectElements());
+ Handle<FixedArray> elts = handle(FixedArray::cast(array->elements()));
const int kArraySizeThatFitsComfortablyInNewSpace = 128;
if (elts->length() < required_size) {
// Doubling in size would be overkill, but leave some slack to avoid
// constantly growing.
- Expand(required_size + (required_size >> 3));
+ Expand(array, required_size + (required_size >> 3));
// It's a performance benefit to keep a frequently used array in new-space.
- } else if (!GetHeap()->new_space()->Contains(elts) &&
+ } else if (!array->GetHeap()->new_space()->Contains(*elts) &&
required_size < kArraySizeThatFitsComfortablyInNewSpace) {
// Expand will allocate a new backing store in new space even if the size
// we asked for isn't larger than what we had before.
- Expand(required_size);
+ Expand(array, required_size);
}
}
@@ -6231,78 +6764,40 @@ bool JSArray::AllowsSetElementsLength() {
}
-MaybeObject* JSArray::SetContent(FixedArrayBase* storage) {
- MaybeObject* maybe_result = EnsureCanContainElements(
- storage, storage->length(), ALLOW_COPIED_DOUBLE_ELEMENTS);
- if (maybe_result->IsFailure()) return maybe_result;
- ASSERT((storage->map() == GetHeap()->fixed_double_array_map() &&
- IsFastDoubleElementsKind(GetElementsKind())) ||
- ((storage->map() != GetHeap()->fixed_double_array_map()) &&
- (IsFastObjectElementsKind(GetElementsKind()) ||
- (IsFastSmiElementsKind(GetElementsKind()) &&
- FixedArray::cast(storage)->ContainsOnlySmisOrHoles()))));
- set_elements(storage);
- set_length(Smi::FromInt(storage->length()));
- return this;
-}
-
-
-MaybeObject* FixedArray::Copy() {
- if (length() == 0) return this;
- return GetHeap()->CopyFixedArray(this);
-}
-
-
-MaybeObject* FixedDoubleArray::Copy() {
- if (length() == 0) return this;
- return GetHeap()->CopyFixedDoubleArray(this);
-}
-
-
-MaybeObject* ConstantPoolArray::Copy() {
- if (length() == 0) return this;
- return GetHeap()->CopyConstantPoolArray(this);
-}
-
+void JSArray::SetContent(Handle<JSArray> array,
+ Handle<FixedArrayBase> storage) {
+ EnsureCanContainElements(array, storage, storage->length(),
+ ALLOW_COPIED_DOUBLE_ELEMENTS);
-void TypeFeedbackCells::SetAstId(int index, TypeFeedbackId id) {
- set(1 + index * 2, Smi::FromInt(id.ToInt()));
+ ASSERT((storage->map() == array->GetHeap()->fixed_double_array_map() &&
+ IsFastDoubleElementsKind(array->GetElementsKind())) ||
+ ((storage->map() != array->GetHeap()->fixed_double_array_map()) &&
+ (IsFastObjectElementsKind(array->GetElementsKind()) ||
+ (IsFastSmiElementsKind(array->GetElementsKind()) &&
+ Handle<FixedArray>::cast(storage)->ContainsOnlySmisOrHoles()))));
+ array->set_elements(*storage);
+ array->set_length(Smi::FromInt(storage->length()));
}
-TypeFeedbackId TypeFeedbackCells::AstId(int index) {
- return TypeFeedbackId(Smi::cast(get(1 + index * 2))->value());
+Handle<Object> TypeFeedbackInfo::UninitializedSentinel(Isolate* isolate) {
+ return isolate->factory()->uninitialized_symbol();
}
-void TypeFeedbackCells::SetCell(int index, Cell* cell) {
- set(index * 2, cell);
+Handle<Object> TypeFeedbackInfo::MegamorphicSentinel(Isolate* isolate) {
+ return isolate->factory()->megamorphic_symbol();
}
-Cell* TypeFeedbackCells::GetCell(int index) {
- return Cell::cast(get(index * 2));
-}
-
-
-Handle<Object> TypeFeedbackCells::UninitializedSentinel(Isolate* isolate) {
- return isolate->factory()->the_hole_value();
-}
-
-
-Handle<Object> TypeFeedbackCells::MegamorphicSentinel(Isolate* isolate) {
- return isolate->factory()->undefined_value();
-}
-
-
-Handle<Object> TypeFeedbackCells::MonomorphicArraySentinel(Isolate* isolate,
+Handle<Object> TypeFeedbackInfo::MonomorphicArraySentinel(Isolate* isolate,
ElementsKind elements_kind) {
return Handle<Object>(Smi::FromInt(static_cast<int>(elements_kind)), isolate);
}
-Object* TypeFeedbackCells::RawUninitializedSentinel(Heap* heap) {
- return heap->the_hole_value();
+Object* TypeFeedbackInfo::RawUninitializedSentinel(Heap* heap) {
+ return heap->uninitialized_symbol();
}
@@ -6384,10 +6879,6 @@ bool TypeFeedbackInfo::matches_inlined_type_change_checksum(int checksum) {
}
-ACCESSORS(TypeFeedbackInfo, type_feedback_cells, TypeFeedbackCells,
- kTypeFeedbackCellsOffset)
-
-
SMI_ACCESSORS(AliasedArgumentsEntry, aliased_context_slot, kAliasedContextSlot)
@@ -6476,11 +6967,15 @@ void FlexibleBodyDescriptor<start_offset>::IterateBody(HeapObject* obj,
#undef ACCESSORS
#undef ACCESSORS_TO_SMI
#undef SMI_ACCESSORS
+#undef SYNCHRONIZED_SMI_ACCESSORS
+#undef NOBARRIER_SMI_ACCESSORS
#undef BOOL_GETTER
#undef BOOL_ACCESSORS
#undef FIELD_ADDR
#undef READ_FIELD
+#undef NOBARRIER_READ_FIELD
#undef WRITE_FIELD
+#undef NOBARRIER_WRITE_FIELD
#undef WRITE_BARRIER
#undef CONDITIONAL_WRITE_BARRIER
#undef READ_DOUBLE_FIELD
@@ -6495,7 +6990,8 @@ void FlexibleBodyDescriptor<start_offset>::IterateBody(HeapObject* obj,
#undef WRITE_SHORT_FIELD
#undef READ_BYTE_FIELD
#undef WRITE_BYTE_FIELD
-
+#undef NOBARRIER_READ_BYTE_FIELD
+#undef NOBARRIER_WRITE_BYTE_FIELD
} } // namespace v8::internal
diff --git a/chromium/v8/src/objects-printer.cc b/chromium/v8/src/objects-printer.cc
index 381c9aa55ad..54a7b5532bb 100644
--- a/chromium/v8/src/objects-printer.cc
+++ b/chromium/v8/src/objects-printer.cc
@@ -1,68 +1,40 @@
// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#include "disassembler.h"
-#include "disasm.h"
-#include "jsregexp.h"
-#include "objects-visiting.h"
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#include "src/disassembler.h"
+#include "src/disasm.h"
+#include "src/jsregexp.h"
+#include "src/objects-visiting.h"
namespace v8 {
namespace internal {
#ifdef OBJECT_PRINT
-void MaybeObject::Print() {
+void Object::Print() {
Print(stdout);
}
-void MaybeObject::Print(FILE* out) {
- Object* this_as_object;
- if (ToObject(&this_as_object)) {
- if (this_as_object->IsSmi()) {
- Smi::cast(this_as_object)->SmiPrint(out);
- } else {
- HeapObject::cast(this_as_object)->HeapObjectPrint(out);
- }
+void Object::Print(FILE* out) {
+ if (IsSmi()) {
+ Smi::cast(this)->SmiPrint(out);
} else {
- Failure::cast(this)->FailurePrint(out);
+ HeapObject::cast(this)->HeapObjectPrint(out);
}
Flush(out);
}
-void MaybeObject::PrintLn() {
+void Object::PrintLn() {
PrintLn(stdout);
}
-void MaybeObject::PrintLn(FILE* out) {
+void Object::PrintLn(FILE* out) {
Print(out);
PrintF(out, "\n");
}
@@ -107,35 +79,23 @@ void HeapObject::HeapObjectPrint(FILE* out) {
case FREE_SPACE_TYPE:
FreeSpace::cast(this)->FreeSpacePrint(out);
break;
- case EXTERNAL_PIXEL_ARRAY_TYPE:
- ExternalPixelArray::cast(this)->ExternalPixelArrayPrint(out);
- break;
- case EXTERNAL_BYTE_ARRAY_TYPE:
- ExternalByteArray::cast(this)->ExternalByteArrayPrint(out);
- break;
- case EXTERNAL_UNSIGNED_BYTE_ARRAY_TYPE:
- ExternalUnsignedByteArray::cast(this)
- ->ExternalUnsignedByteArrayPrint(out);
- break;
- case EXTERNAL_SHORT_ARRAY_TYPE:
- ExternalShortArray::cast(this)->ExternalShortArrayPrint(out);
- break;
- case EXTERNAL_UNSIGNED_SHORT_ARRAY_TYPE:
- ExternalUnsignedShortArray::cast(this)
- ->ExternalUnsignedShortArrayPrint(out);
- break;
- case EXTERNAL_INT_ARRAY_TYPE:
- ExternalIntArray::cast(this)->ExternalIntArrayPrint(out);
- break;
- case EXTERNAL_UNSIGNED_INT_ARRAY_TYPE:
- ExternalUnsignedIntArray::cast(this)->ExternalUnsignedIntArrayPrint(out);
- break;
- case EXTERNAL_FLOAT_ARRAY_TYPE:
- ExternalFloatArray::cast(this)->ExternalFloatArrayPrint(out);
+
+#define PRINT_EXTERNAL_ARRAY(Type, type, TYPE, ctype, size) \
+ case EXTERNAL_##TYPE##_ARRAY_TYPE: \
+ External##Type##Array::cast(this)->External##Type##ArrayPrint(out); \
break;
- case EXTERNAL_DOUBLE_ARRAY_TYPE:
- ExternalDoubleArray::cast(this)->ExternalDoubleArrayPrint(out);
+
+ TYPED_ARRAYS(PRINT_EXTERNAL_ARRAY)
+#undef PRINT_EXTERNAL_ARRAY
+
+#define PRINT_FIXED_TYPED_ARRAY(Type, type, TYPE, ctype, size) \
+ case Fixed##Type##Array::kInstanceType: \
+ Fixed##Type##Array::cast(this)->FixedTypedArrayPrint(out); \
break;
+
+ TYPED_ARRAYS(PRINT_FIXED_TYPED_ARRAY)
+#undef PRINT_FIXED_TYPED_ARRAY
+
case FILLER_TYPE:
PrintF(out, "filler");
break;
@@ -186,6 +146,12 @@ void HeapObject::HeapObjectPrint(FILE* out) {
case JS_MAP_TYPE:
JSMap::cast(this)->JSMapPrint(out);
break;
+ case JS_SET_ITERATOR_TYPE:
+ JSSetIterator::cast(this)->JSSetIteratorPrint(out);
+ break;
+ case JS_MAP_ITERATOR_TYPE:
+ JSMapIterator::cast(this)->JSMapIteratorPrint(out);
+ break;
case JS_WEAK_MAP_TYPE:
JSWeakMap::cast(this)->JSWeakMapPrint(out);
break;
@@ -241,48 +207,19 @@ void FreeSpace::FreeSpacePrint(FILE* out) {
}
-void ExternalPixelArray::ExternalPixelArrayPrint(FILE* out) {
- PrintF(out, "external pixel array");
-}
-
-
-void ExternalByteArray::ExternalByteArrayPrint(FILE* out) {
- PrintF(out, "external byte array");
-}
-
-
-void ExternalUnsignedByteArray::ExternalUnsignedByteArrayPrint(FILE* out) {
- PrintF(out, "external unsigned byte array");
-}
-
-
-void ExternalShortArray::ExternalShortArrayPrint(FILE* out) {
- PrintF(out, "external short array");
-}
-
-
-void ExternalUnsignedShortArray::ExternalUnsignedShortArrayPrint(FILE* out) {
- PrintF(out, "external unsigned short array");
-}
-
-
-void ExternalIntArray::ExternalIntArrayPrint(FILE* out) {
- PrintF(out, "external int array");
-}
-
-
-void ExternalUnsignedIntArray::ExternalUnsignedIntArrayPrint(FILE* out) {
- PrintF(out, "external unsigned int array");
-}
+#define EXTERNAL_ARRAY_PRINTER(Type, type, TYPE, ctype, size) \
+ void External##Type##Array::External##Type##ArrayPrint(FILE* out) { \
+ PrintF(out, "external " #type " array"); \
+ }
+TYPED_ARRAYS(EXTERNAL_ARRAY_PRINTER)
-void ExternalFloatArray::ExternalFloatArrayPrint(FILE* out) {
- PrintF(out, "external float array");
-}
+#undef EXTERNAL_ARRAY_PRINTER
-void ExternalDoubleArray::ExternalDoubleArrayPrint(FILE* out) {
- PrintF(out, "external double array");
+template <class Traits>
+void FixedTypedArray<Traits>::FixedTypedArrayPrint(FILE* out) {
+ PrintF(out, "fixed %s", Traits::Designator());
}
@@ -295,9 +232,9 @@ void JSObject::PrintProperties(FILE* out) {
PrintF(out, ": ");
switch (descs->GetType(i)) {
case FIELD: {
- int index = descs->GetFieldIndex(i);
+ FieldIndex index = FieldIndex::ForDescriptor(map(), i);
RawFastPropertyAt(index)->ShortPrint(out);
- PrintF(out, " (field at offset %d)\n", index);
+ PrintF(out, " (field at offset %d)\n", index.property_index());
break;
}
case CONSTANT:
@@ -312,7 +249,6 @@ void JSObject::PrintProperties(FILE* out) {
case HANDLER: // only in lookup results, not in descriptors
case INTERCEPTOR: // only in lookup results, not in descriptors
// There are no transitions in the descriptor array.
- case TRANSITION:
case NONEXISTENT:
UNREACHABLE();
break;
@@ -324,6 +260,24 @@ void JSObject::PrintProperties(FILE* out) {
}
+template<class T>
+static void DoPrintElements(FILE *out, Object* object) {
+ T* p = T::cast(object);
+ for (int i = 0; i < p->length(); i++) {
+ PrintF(out, " %d: %d\n", i, p->get_scalar(i));
+ }
+}
+
+
+template<class T>
+static void DoPrintDoubleElements(FILE* out, Object* object) {
+ T* p = T::cast(object);
+ for (int i = 0; i < p->length(); i++) {
+ PrintF(out, " %d: %f\n", i, p->get_scalar(i));
+ }
+}
+
+
void JSObject::PrintElements(FILE* out) {
// Don't call GetElementsKind, its validation code can cause the printer to
// fail when debugging.
@@ -357,76 +311,51 @@ void JSObject::PrintElements(FILE* out) {
}
break;
}
- case EXTERNAL_PIXEL_ELEMENTS: {
- ExternalPixelArray* p = ExternalPixelArray::cast(elements());
- for (int i = 0; i < p->length(); i++) {
- PrintF(out, " %d: %d\n", i, p->get_scalar(i));
- }
- break;
- }
- case EXTERNAL_BYTE_ELEMENTS: {
- ExternalByteArray* p = ExternalByteArray::cast(elements());
- for (int i = 0; i < p->length(); i++) {
- PrintF(out, " %d: %d\n", i, static_cast<int>(p->get_scalar(i)));
- }
- break;
- }
- case EXTERNAL_UNSIGNED_BYTE_ELEMENTS: {
- ExternalUnsignedByteArray* p =
- ExternalUnsignedByteArray::cast(elements());
- for (int i = 0; i < p->length(); i++) {
- PrintF(out, " %d: %d\n", i, static_cast<int>(p->get_scalar(i)));
- }
- break;
- }
- case EXTERNAL_SHORT_ELEMENTS: {
- ExternalShortArray* p = ExternalShortArray::cast(elements());
- for (int i = 0; i < p->length(); i++) {
- PrintF(out, " %d: %d\n", i, static_cast<int>(p->get_scalar(i)));
- }
- break;
- }
- case EXTERNAL_UNSIGNED_SHORT_ELEMENTS: {
- ExternalUnsignedShortArray* p =
- ExternalUnsignedShortArray::cast(elements());
- for (int i = 0; i < p->length(); i++) {
- PrintF(out, " %d: %d\n", i, static_cast<int>(p->get_scalar(i)));
- }
- break;
- }
- case EXTERNAL_INT_ELEMENTS: {
- ExternalIntArray* p = ExternalIntArray::cast(elements());
- for (int i = 0; i < p->length(); i++) {
- PrintF(out, " %d: %d\n", i, static_cast<int>(p->get_scalar(i)));
- }
- break;
- }
- case EXTERNAL_UNSIGNED_INT_ELEMENTS: {
- ExternalUnsignedIntArray* p =
- ExternalUnsignedIntArray::cast(elements());
- for (int i = 0; i < p->length(); i++) {
- PrintF(out, " %d: %d\n", i, static_cast<int>(p->get_scalar(i)));
- }
- break;
- }
- case EXTERNAL_FLOAT_ELEMENTS: {
- ExternalFloatArray* p = ExternalFloatArray::cast(elements());
- for (int i = 0; i < p->length(); i++) {
- PrintF(out, " %d: %f\n", i, p->get_scalar(i));
- }
- break;
+
+
+#define PRINT_ELEMENTS(Kind, Type) \
+ case Kind: { \
+ DoPrintElements<Type>(out, elements()); \
+ break; \
}
- case EXTERNAL_DOUBLE_ELEMENTS: {
- ExternalDoubleArray* p = ExternalDoubleArray::cast(elements());
- for (int i = 0; i < p->length(); i++) {
- PrintF(out, " %d: %f\n", i, p->get_scalar(i));
- }
- break;
+
+#define PRINT_DOUBLE_ELEMENTS(Kind, Type) \
+ case Kind: { \
+ DoPrintDoubleElements<Type>(out, elements()); \
+ break; \
}
+
+ PRINT_ELEMENTS(EXTERNAL_UINT8_CLAMPED_ELEMENTS, ExternalUint8ClampedArray)
+ PRINT_ELEMENTS(EXTERNAL_INT8_ELEMENTS, ExternalInt8Array)
+ PRINT_ELEMENTS(EXTERNAL_UINT8_ELEMENTS,
+ ExternalUint8Array)
+ PRINT_ELEMENTS(EXTERNAL_INT16_ELEMENTS, ExternalInt16Array)
+ PRINT_ELEMENTS(EXTERNAL_UINT16_ELEMENTS,
+ ExternalUint16Array)
+ PRINT_ELEMENTS(EXTERNAL_INT32_ELEMENTS, ExternalInt32Array)
+ PRINT_ELEMENTS(EXTERNAL_UINT32_ELEMENTS,
+ ExternalUint32Array)
+ PRINT_DOUBLE_ELEMENTS(EXTERNAL_FLOAT32_ELEMENTS, ExternalFloat32Array)
+ PRINT_DOUBLE_ELEMENTS(EXTERNAL_FLOAT64_ELEMENTS, ExternalFloat64Array)
+
+
+ PRINT_ELEMENTS(UINT8_ELEMENTS, FixedUint8Array)
+ PRINT_ELEMENTS(UINT8_CLAMPED_ELEMENTS, FixedUint8ClampedArray)
+ PRINT_ELEMENTS(INT8_ELEMENTS, FixedInt8Array)
+ PRINT_ELEMENTS(UINT16_ELEMENTS, FixedUint16Array)
+ PRINT_ELEMENTS(INT16_ELEMENTS, FixedInt16Array)
+ PRINT_ELEMENTS(UINT32_ELEMENTS, FixedUint32Array)
+ PRINT_ELEMENTS(INT32_ELEMENTS, FixedInt32Array)
+ PRINT_DOUBLE_ELEMENTS(FLOAT32_ELEMENTS, FixedFloat32Array)
+ PRINT_DOUBLE_ELEMENTS(FLOAT64_ELEMENTS, FixedFloat64Array)
+
+#undef PRINT_DOUBLE_ELEMENTS
+#undef PRINT_ELEMENTS
+
case DICTIONARY_ELEMENTS:
elements()->Print(out);
break;
- case NON_STRICT_ARGUMENTS_ELEMENTS: {
+ case SLOPPY_ARGUMENTS_ELEMENTS: {
FixedArray* p = FixedArray::cast(elements());
PrintF(out, " parameter map:");
for (int i = 2; i < p->length(); i++) {
@@ -448,28 +377,38 @@ void JSObject::PrintTransitions(FILE* out) {
if (!map()->HasTransitionArray()) return;
TransitionArray* transitions = map()->transitions();
for (int i = 0; i < transitions->number_of_transitions(); i++) {
+ Name* key = transitions->GetKey(i);
PrintF(out, " ");
- transitions->GetKey(i)->NamePrint(out);
+ key->NamePrint(out);
PrintF(out, ": ");
- switch (transitions->GetTargetDetails(i).type()) {
- case FIELD: {
- PrintF(out, " (transition to field)\n");
- break;
+ if (key == GetHeap()->frozen_symbol()) {
+ PrintF(out, " (transition to frozen)\n");
+ } else if (key == GetHeap()->elements_transition_symbol()) {
+ PrintF(out, " (transition to ");
+ PrintElementsKind(out, transitions->GetTarget(i)->elements_kind());
+ PrintF(out, ")\n");
+ } else if (key == GetHeap()->observed_symbol()) {
+ PrintF(out, " (transition to Object.observe)\n");
+ } else {
+ switch (transitions->GetTargetDetails(i).type()) {
+ case FIELD: {
+ PrintF(out, " (transition to field)\n");
+ break;
+ }
+ case CONSTANT:
+ PrintF(out, " (transition to constant)\n");
+ break;
+ case CALLBACKS:
+ PrintF(out, " (transition to callback)\n");
+ break;
+ // Values below are never in the target descriptor array.
+ case NORMAL:
+ case HANDLER:
+ case INTERCEPTOR:
+ case NONEXISTENT:
+ UNREACHABLE();
+ break;
}
- case CONSTANT:
- PrintF(out, " (transition to constant)\n");
- break;
- case CALLBACKS:
- PrintF(out, " (transition to callback)\n");
- break;
- // Values below are never in the target descriptor array.
- case NORMAL:
- case HANDLER:
- case INTERCEPTOR:
- case TRANSITION:
- case NONEXISTENT:
- UNREACHABLE();
- break;
}
}
}
@@ -603,8 +542,6 @@ void TypeFeedbackInfo::TypeFeedbackInfoPrint(FILE* out) {
HeapObject::PrintHeader(out, "TypeFeedbackInfo");
PrintF(out, " - ic_total_count: %d, ic_with_type_info_count: %d\n",
ic_total_count(), ic_with_type_info_count());
- PrintF(out, " - type_feedback_cells: ");
- type_feedback_cells()->FixedArrayPrint(out);
}
@@ -642,16 +579,36 @@ void FixedDoubleArray::FixedDoubleArrayPrint(FILE* out) {
void ConstantPoolArray::ConstantPoolArrayPrint(FILE* out) {
HeapObject::PrintHeader(out, "ConstantPoolArray");
PrintF(out, " - length: %d", length());
- for (int i = 0; i < length(); i++) {
- if (i < first_ptr_index()) {
+ for (int i = 0; i <= last_index(INT32, SMALL_SECTION); i++) {
+ if (i < last_index(INT64, SMALL_SECTION)) {
PrintF(out, "\n [%d]: double: %g", i, get_int64_entry_as_double(i));
- } else if (i < first_int32_index()) {
- PrintF(out, "\n [%d]: pointer: %p", i,
- reinterpret_cast<void*>(get_ptr_entry(i)));
- } else {
+ } else if (i <= last_index(CODE_PTR, SMALL_SECTION)) {
+ PrintF(out, "\n [%d]: code target pointer: %p", i,
+ reinterpret_cast<void*>(get_code_ptr_entry(i)));
+ } else if (i <= last_index(HEAP_PTR, SMALL_SECTION)) {
+ PrintF(out, "\n [%d]: heap pointer: %p", i,
+ reinterpret_cast<void*>(get_heap_ptr_entry(i)));
+ } else if (i <= last_index(INT32, SMALL_SECTION)) {
+ PrintF(out, "\n [%d]: int32: %d", i, get_int32_entry(i));
+ }
+ }
+ if (is_extended_layout()) {
+ PrintF(out, "\n Extended section:");
+ for (int i = first_extended_section_index();
+ i <= last_index(INT32, EXTENDED_SECTION); i++) {
+ if (i < last_index(INT64, EXTENDED_SECTION)) {
+ PrintF(out, "\n [%d]: double: %g", i, get_int64_entry_as_double(i));
+ } else if (i <= last_index(CODE_PTR, EXTENDED_SECTION)) {
+ PrintF(out, "\n [%d]: code target pointer: %p", i,
+ reinterpret_cast<void*>(get_code_ptr_entry(i)));
+ } else if (i <= last_index(HEAP_PTR, EXTENDED_SECTION)) {
+ PrintF(out, "\n [%d]: heap pointer: %p", i,
+ reinterpret_cast<void*>(get_heap_ptr_entry(i)));
+ } else if (i <= last_index(INT32, EXTENDED_SECTION)) {
PrintF(out, "\n [%d]: int32: %d", i, get_int32_entry(i));
}
}
+ }
PrintF(out, "\n");
}
@@ -672,8 +629,6 @@ void JSMessageObject::JSMessageObjectPrint(FILE* out) {
PrintF(out, "\n - end_position: %d", end_position());
PrintF(out, "\n - script: ");
script()->ShortPrint(out);
- PrintF(out, "\n - stack_trace: ");
- stack_trace()->ShortPrint(out);
PrintF(out, "\n - stack_frames: ");
stack_frames()->ShortPrint(out);
PrintF(out, "\n");
@@ -760,7 +715,7 @@ void JSProxy::JSProxyPrint(FILE* out) {
PrintF(out, " - map = %p\n", reinterpret_cast<void*>(map()));
PrintF(out, " - handler = ");
handler()->Print(out);
- PrintF(out, " - hash = ");
+ PrintF(out, "\n - hash = ");
hash()->Print(out);
PrintF(out, "\n");
}
@@ -771,9 +726,9 @@ void JSFunctionProxy::JSFunctionProxyPrint(FILE* out) {
PrintF(out, " - map = %p\n", reinterpret_cast<void*>(map()));
PrintF(out, " - handler = ");
handler()->Print(out);
- PrintF(out, " - call_trap = ");
+ PrintF(out, "\n - call_trap = ");
call_trap()->Print(out);
- PrintF(out, " - construct_trap = ");
+ PrintF(out, "\n - construct_trap = ");
construct_trap()->Print(out);
PrintF(out, "\n");
}
@@ -797,6 +752,42 @@ void JSMap::JSMapPrint(FILE* out) {
}
+template<class Derived, class TableType>
+void OrderedHashTableIterator<Derived, TableType>::
+ OrderedHashTableIteratorPrint(FILE* out) {
+ PrintF(out, " - map = %p\n", reinterpret_cast<void*>(map()));
+ PrintF(out, " - table = ");
+ table()->ShortPrint(out);
+ PrintF(out, "\n - index = ");
+ index()->ShortPrint(out);
+ PrintF(out, "\n - kind = ");
+ kind()->ShortPrint(out);
+ PrintF(out, "\n");
+}
+
+
+template void
+OrderedHashTableIterator<JSSetIterator,
+ OrderedHashSet>::OrderedHashTableIteratorPrint(FILE* out);
+
+
+template void
+OrderedHashTableIterator<JSMapIterator,
+ OrderedHashMap>::OrderedHashTableIteratorPrint(FILE* out);
+
+
+void JSSetIterator::JSSetIteratorPrint(FILE* out) {
+ HeapObject::PrintHeader(out, "JSSetIterator");
+ OrderedHashTableIteratorPrint(out);
+}
+
+
+void JSMapIterator::JSMapIteratorPrint(FILE* out) {
+ HeapObject::PrintHeader(out, "JSMapIterator");
+ OrderedHashTableIteratorPrint(out);
+}
+
+
void JSWeakMap::JSWeakMapPrint(FILE* out) {
HeapObject::PrintHeader(out, "JSWeakMap");
PrintF(out, " - map = %p\n", reinterpret_cast<void*>(map()));
@@ -890,6 +881,7 @@ void SharedFunctionInfo::SharedFunctionInfoPrint(FILE* out) {
PrintF(out, " - name: ");
name()->ShortPrint(out);
PrintF(out, "\n - expected_nof_properties: %d", expected_nof_properties());
+ PrintF(out, "\n - ast_node_count: %d", ast_node_count());
PrintF(out, "\n - instance class name = ");
instance_class_name()->Print(out);
PrintF(out, "\n - code = ");
@@ -903,7 +895,7 @@ void SharedFunctionInfo::SharedFunctionInfoPrint(FILE* out) {
source->ToCString(DISALLOW_NULLS,
FAST_STRING_TRAVERSAL,
start, length, NULL);
- PrintF(out, "%s", *source_string);
+ PrintF(out, "%s", source_string.get());
}
// Script files are often large, hard to read.
// PrintF(out, "\n - script =");
@@ -917,6 +909,8 @@ void SharedFunctionInfo::SharedFunctionInfoPrint(FILE* out) {
PrintF(out, "\n - length = %d", length());
PrintF(out, "\n - optimized_code_map = ");
optimized_code_map()->ShortPrint(out);
+ PrintF(out, "\n - feedback_vector = ");
+ feedback_vector()->FixedArrayPrint(out);
PrintF(out, "\n");
}
@@ -1136,11 +1130,11 @@ void AllocationSite::AllocationSitePrint(FILE* out) {
PrintF(out, "\n - nested site: ");
nested_site()->ShortPrint(out);
PrintF(out, "\n - memento found count: ");
- memento_found_count()->ShortPrint(out);
+ Smi::FromInt(memento_found_count())->ShortPrint(out);
PrintF(out, "\n - memento create count: ");
- memento_create_count()->ShortPrint(out);
+ Smi::FromInt(memento_create_count())->ShortPrint(out);
PrintF(out, "\n - pretenure decision: ");
- pretenure_decision()->ShortPrint(out);
+ Smi::FromInt(pretenure_decision())->ShortPrint(out);
PrintF(out, "\n - transition_info: ");
if (transition_info()->IsSmi()) {
ElementsKind kind = GetElementsKind();
@@ -1186,8 +1180,6 @@ void Script::ScriptPrint(FILE* out) {
type()->ShortPrint(out);
PrintF(out, "\n - id: ");
id()->ShortPrint(out);
- PrintF(out, "\n - data: ");
- data()->ShortPrint(out);
PrintF(out, "\n - context data: ");
context_data()->ShortPrint(out);
PrintF(out, "\n - wrapper: ");
@@ -1203,7 +1195,6 @@ void Script::ScriptPrint(FILE* out) {
}
-#ifdef ENABLE_DEBUGGER_SUPPORT
void DebugInfo::DebugInfoPrint(FILE* out) {
HeapObject::PrintHeader(out, "DebugInfo");
PrintF(out, "\n - shared: ");
@@ -1225,7 +1216,6 @@ void BreakPointInfo::BreakPointInfoPrint(FILE* out) {
PrintF(out, "\n - break_point_objects: ");
break_point_objects()->ShortPrint(out);
}
-#endif // ENABLE_DEBUGGER_SUPPORT
void DescriptorArray::PrintDescriptors(FILE* out) {
@@ -1261,7 +1251,6 @@ void TransitionArray::PrintTransitions(FILE* out) {
case NORMAL:
case HANDLER:
case INTERCEPTOR:
- case TRANSITION:
case NONEXISTENT:
UNREACHABLE();
break;
diff --git a/chromium/v8/src/objects-visiting-inl.h b/chromium/v8/src/objects-visiting-inl.h
index 1a68344b26a..887a3de868f 100644
--- a/chromium/v8/src/objects-visiting-inl.h
+++ b/chromium/v8/src/objects-visiting-inl.h
@@ -1,29 +1,6 @@
// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
#ifndef V8_OBJECTS_VISITING_INL_H_
#define V8_OBJECTS_VISITING_INL_H_
@@ -60,6 +37,8 @@ void StaticNewSpaceVisitor<StaticVisitor>::Initialize() {
int>::Visit);
table_.Register(kVisitFixedDoubleArray, &VisitFixedDoubleArray);
+ table_.Register(kVisitFixedTypedArray, &VisitFixedTypedArray);
+ table_.Register(kVisitFixedFloat64Array, &VisitFixedTypedArray);
table_.Register(kVisitNativeContext,
&FixedBodyVisitor<StaticVisitor,
@@ -87,9 +66,7 @@ void StaticNewSpaceVisitor<StaticVisitor>::Initialize() {
table_.Register(kVisitFreeSpace, &VisitFreeSpace);
- table_.Register(kVisitJSWeakMap, &JSObjectVisitor::Visit);
-
- table_.Register(kVisitJSWeakSet, &JSObjectVisitor::Visit);
+ table_.Register(kVisitJSWeakCollection, &JSObjectVisitor::Visit);
table_.Register(kVisitJSRegExp, &JSObjectVisitor::Visit);
@@ -185,6 +162,10 @@ void StaticMarkingVisitor<StaticVisitor>::Initialize() {
table_.Register(kVisitFixedDoubleArray, &DataObjectVisitor::Visit);
+ table_.Register(kVisitFixedTypedArray, &DataObjectVisitor::Visit);
+
+ table_.Register(kVisitFixedFloat64Array, &DataObjectVisitor::Visit);
+
table_.Register(kVisitConstantPoolArray, &VisitConstantPoolArray);
table_.Register(kVisitNativeContext, &VisitNativeContext);
@@ -199,9 +180,7 @@ void StaticMarkingVisitor<StaticVisitor>::Initialize() {
table_.Register(kVisitSeqTwoByteString, &DataObjectVisitor::Visit);
- table_.Register(kVisitJSWeakMap, &StaticVisitor::VisitWeakCollection);
-
- table_.Register(kVisitJSWeakSet, &StaticVisitor::VisitWeakCollection);
+ table_.Register(kVisitJSWeakCollection, &VisitWeakCollection);
table_.Register(kVisitOddball,
&FixedBodyVisitor<StaticVisitor,
@@ -261,7 +240,10 @@ void StaticMarkingVisitor<StaticVisitor>::VisitEmbeddedPointer(
ASSERT(!rinfo->target_object()->IsConsString());
HeapObject* object = HeapObject::cast(rinfo->target_object());
heap->mark_compact_collector()->RecordRelocSlot(rinfo, object);
- if (!Code::IsWeakEmbeddedObject(rinfo->host()->kind(), object)) {
+ // TODO(ulan): It could be better to record slots only for strongly embedded
+ // objects here and record slots for weakly embedded object during clearing
+ // of non-live references in mark-compact.
+ if (!rinfo->host()->IsWeakObject(object)) {
StaticVisitor::MarkObject(heap, object);
}
}
@@ -272,7 +254,10 @@ void StaticMarkingVisitor<StaticVisitor>::VisitCell(
Heap* heap, RelocInfo* rinfo) {
ASSERT(rinfo->rmode() == RelocInfo::CELL);
Cell* cell = rinfo->target_cell();
- StaticVisitor::MarkObject(heap, cell);
+ // No need to record slots because the cell space is not compacted during GC.
+ if (!rinfo->host()->IsWeakObject(cell)) {
+ StaticVisitor::MarkObject(heap, cell);
+ }
}
@@ -300,8 +285,10 @@ void StaticMarkingVisitor<StaticVisitor>::VisitCodeTarget(
if (FLAG_cleanup_code_caches_at_gc && target->is_inline_cache_stub()
&& (target->ic_state() == MEGAMORPHIC || target->ic_state() == GENERIC ||
target->ic_state() == POLYMORPHIC || heap->flush_monomorphic_ics() ||
- Serializer::enabled() || target->ic_age() != heap->global_ic_age())) {
- IC::Clear(target->GetIsolate(), rinfo->pc());
+ heap->isolate()->serializer_enabled() ||
+ target->ic_age() != heap->global_ic_age() ||
+ target->is_invalidated_weak_stub())) {
+ IC::Clear(heap->isolate(), rinfo->pc(), rinfo->host()->constant_pool());
target = Code::GetCodeFromTargetAddress(rinfo->target_address());
}
heap->mark_compact_collector()->RecordRelocSlot(rinfo, target);
@@ -331,8 +318,7 @@ void StaticMarkingVisitor<StaticVisitor>::VisitNativeContext(
for (int idx = Context::FIRST_WEAK_SLOT;
idx < Context::NATIVE_CONTEXT_SLOTS;
++idx) {
- Object** slot =
- HeapObject::RawField(object, FixedArray::OffsetOfElementAt(idx));
+ Object** slot = Context::cast(object)->RawFieldOfElementAt(idx);
collector->RecordSlot(slot, slot, *slot);
}
}
@@ -411,14 +397,45 @@ void StaticMarkingVisitor<StaticVisitor>::VisitAllocationSite(
template<typename StaticVisitor>
+void StaticMarkingVisitor<StaticVisitor>::VisitWeakCollection(
+ Map* map, HeapObject* object) {
+ Heap* heap = map->GetHeap();
+ JSWeakCollection* weak_collection =
+ reinterpret_cast<JSWeakCollection*>(object);
+
+ // Enqueue weak collection in linked list of encountered weak collections.
+ if (weak_collection->next() == heap->undefined_value()) {
+ weak_collection->set_next(heap->encountered_weak_collections());
+ heap->set_encountered_weak_collections(weak_collection);
+ }
+
+ // Skip visiting the backing hash table containing the mappings and the
+ // pointer to the other enqueued weak collections, both are post-processed.
+ StaticVisitor::VisitPointers(heap,
+ HeapObject::RawField(object, JSWeakCollection::kPropertiesOffset),
+ HeapObject::RawField(object, JSWeakCollection::kTableOffset));
+ STATIC_ASSERT(JSWeakCollection::kTableOffset + kPointerSize ==
+ JSWeakCollection::kNextOffset);
+ STATIC_ASSERT(JSWeakCollection::kNextOffset + kPointerSize ==
+ JSWeakCollection::kSize);
+
+ // Partially initialized weak collection is enqueued, but table is ignored.
+ if (!weak_collection->table()->IsHashTable()) return;
+
+ // Mark the backing hash table without pushing it on the marking stack.
+ Object** slot = HeapObject::RawField(object, JSWeakCollection::kTableOffset);
+ HeapObject* obj = HeapObject::cast(*slot);
+ heap->mark_compact_collector()->RecordSlot(slot, slot, obj);
+ StaticVisitor::MarkObjectWithoutPush(heap, obj);
+}
+
+
+template<typename StaticVisitor>
void StaticMarkingVisitor<StaticVisitor>::VisitCode(
Map* map, HeapObject* object) {
Heap* heap = map->GetHeap();
Code* code = Code::cast(object);
- if (FLAG_cleanup_code_caches_at_gc) {
- code->ClearTypeFeedbackCells(heap);
- }
- if (FLAG_age_code && !Serializer::enabled()) {
+ if (FLAG_age_code && !heap->isolate()->serializer_enabled()) {
code->MakeOlder(heap->mark_compact_collector()->marking_parity());
}
code->CodeIterateBody<StaticVisitor>(heap);
@@ -433,6 +450,9 @@ void StaticMarkingVisitor<StaticVisitor>::VisitSharedFunctionInfo(
if (shared->ic_age() != heap->global_ic_age()) {
shared->ResetForNewContext(heap->global_ic_age());
}
+ if (FLAG_cleanup_code_caches_at_gc) {
+ shared->ClearTypeFeedbackInfo();
+ }
if (FLAG_cache_optimized_code &&
FLAG_flush_optimized_code_cache &&
!shared->optimized_code_map()->IsSmi()) {
@@ -477,15 +497,30 @@ template<typename StaticVisitor>
void StaticMarkingVisitor<StaticVisitor>::VisitConstantPoolArray(
Map* map, HeapObject* object) {
Heap* heap = map->GetHeap();
- ConstantPoolArray* constant_pool = ConstantPoolArray::cast(object);
- int first_ptr_offset = constant_pool->OffsetOfElementAt(
- constant_pool->first_ptr_index());
- int last_ptr_offset = constant_pool->OffsetOfElementAt(
- constant_pool->first_ptr_index() + constant_pool->count_of_ptr_entries());
- StaticVisitor::VisitPointers(
- heap,
- HeapObject::RawField(object, first_ptr_offset),
- HeapObject::RawField(object, last_ptr_offset));
+ ConstantPoolArray* array = ConstantPoolArray::cast(object);
+ ConstantPoolArray::Iterator code_iter(array, ConstantPoolArray::CODE_PTR);
+ while (!code_iter.is_finished()) {
+ Address code_entry = reinterpret_cast<Address>(
+ array->RawFieldOfElementAt(code_iter.next_index()));
+ StaticVisitor::VisitCodeEntry(heap, code_entry);
+ }
+
+ ConstantPoolArray::Iterator heap_iter(array, ConstantPoolArray::HEAP_PTR);
+ while (!heap_iter.is_finished()) {
+ Object** slot = array->RawFieldOfElementAt(heap_iter.next_index());
+ HeapObject* object = HeapObject::cast(*slot);
+ heap->mark_compact_collector()->RecordSlot(slot, slot, object);
+ bool is_weak_object =
+ (array->get_weak_object_state() ==
+ ConstantPoolArray::WEAK_OBJECTS_IN_OPTIMIZED_CODE &&
+ Code::IsWeakObjectInOptimizedCode(object)) ||
+ (array->get_weak_object_state() ==
+ ConstantPoolArray::WEAK_OBJECTS_IN_IC &&
+ Code::IsWeakObjectInIC(object));
+ if (!is_weak_object) {
+ StaticVisitor::MarkObject(heap, object);
+ }
+ }
}
@@ -600,12 +635,9 @@ void StaticMarkingVisitor<StaticVisitor>::MarkMapContents(
// array to prevent visiting it later. Skip recording the transition
// array slot, since it will be implicitly recorded when the pointer
// fields of this map are visited.
- TransitionArray* transitions = map->unchecked_transition_array();
- if (transitions->IsTransitionArray()) {
+ if (map->HasTransitionArray()) {
+ TransitionArray* transitions = map->transitions();
MarkTransitionArray(heap, transitions);
- } else {
- // Already marked by marking map->GetBackPointer() above.
- ASSERT(transitions->IsMap() || transitions->IsUndefined());
}
// Since descriptor arrays are potentially shared, ensure that only the
@@ -792,7 +824,6 @@ bool StaticMarkingVisitor<StaticVisitor>::IsFlushable(
template<typename StaticVisitor>
void StaticMarkingVisitor<StaticVisitor>::VisitSharedFunctionInfoStrongCode(
Heap* heap, HeapObject* object) {
- StaticVisitor::BeforeVisitingSharedFunctionInfo(object);
Object** start_slot =
HeapObject::RawField(object,
SharedFunctionInfo::BodyDescriptor::kStartOffset);
@@ -806,7 +837,6 @@ void StaticMarkingVisitor<StaticVisitor>::VisitSharedFunctionInfoStrongCode(
template<typename StaticVisitor>
void StaticMarkingVisitor<StaticVisitor>::VisitSharedFunctionInfoWeakCode(
Heap* heap, HeapObject* object) {
- StaticVisitor::BeforeVisitingSharedFunctionInfo(object);
Object** name_slot =
HeapObject::RawField(object, SharedFunctionInfo::kNameOffset);
StaticVisitor::VisitPointer(heap, name_slot);
@@ -884,6 +914,8 @@ void Code::CodeIterateBody(ObjectVisitor* v) {
IteratePointer(v, kHandlerTableOffset);
IteratePointer(v, kDeoptimizationDataOffset);
IteratePointer(v, kTypeFeedbackInfoOffset);
+ IterateNextCodeLink(v, kNextCodeLinkOffset);
+ IteratePointer(v, kConstantPoolOffset);
RelocIterator it(this, mode_mask);
Isolate* isolate = this->GetIsolate();
@@ -917,6 +949,13 @@ void Code::CodeIterateBody(Heap* heap) {
StaticVisitor::VisitPointer(
heap,
reinterpret_cast<Object**>(this->address() + kTypeFeedbackInfoOffset));
+ StaticVisitor::VisitNextCodeLink(
+ heap,
+ reinterpret_cast<Object**>(this->address() + kNextCodeLinkOffset));
+ StaticVisitor::VisitPointer(
+ heap,
+ reinterpret_cast<Object**>(this->address() + kConstantPoolOffset));
+
RelocIterator it(this, mode_mask);
for (; !it.done(); it.next()) {
diff --git a/chromium/v8/src/objects-visiting.cc b/chromium/v8/src/objects-visiting.cc
index 5ced2cf7a35..f2f47b0f88f 100644
--- a/chromium/v8/src/objects-visiting.cc
+++ b/chromium/v8/src/objects-visiting.cc
@@ -1,34 +1,11 @@
// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#include "ic-inl.h"
-#include "objects-visiting.h"
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#include "src/ic-inl.h"
+#include "src/objects-visiting.h"
namespace v8 {
namespace internal {
@@ -111,10 +88,8 @@ StaticVisitorBase::VisitorId StaticVisitorBase::GetVisitorId(
JSMap::kSize);
case JS_WEAK_MAP_TYPE:
- return kVisitJSWeakMap;
-
case JS_WEAK_SET_TYPE:
- return kVisitJSWeakSet;
+ return kVisitJSWeakCollection;
case JS_REGEXP_TYPE:
return kVisitJSRegExp;
@@ -163,6 +138,8 @@ StaticVisitorBase::VisitorId StaticVisitorBase::GetVisitorId(
case JS_GLOBAL_OBJECT_TYPE:
case JS_BUILTINS_OBJECT_TYPE:
case JS_MESSAGE_OBJECT_TYPE:
+ case JS_SET_ITERATOR_TYPE:
+ case JS_MAP_ITERATOR_TYPE:
return GetVisitorIdForSize(kVisitJSObject,
kVisitJSObjectGeneric,
instance_size);
@@ -171,18 +148,27 @@ StaticVisitorBase::VisitorId StaticVisitorBase::GetVisitorId(
return kVisitJSFunction;
case HEAP_NUMBER_TYPE:
- case EXTERNAL_PIXEL_ARRAY_TYPE:
- case EXTERNAL_BYTE_ARRAY_TYPE:
- case EXTERNAL_UNSIGNED_BYTE_ARRAY_TYPE:
- case EXTERNAL_SHORT_ARRAY_TYPE:
- case EXTERNAL_UNSIGNED_SHORT_ARRAY_TYPE:
- case EXTERNAL_INT_ARRAY_TYPE:
- case EXTERNAL_UNSIGNED_INT_ARRAY_TYPE:
- case EXTERNAL_FLOAT_ARRAY_TYPE:
- case EXTERNAL_DOUBLE_ARRAY_TYPE:
+#define EXTERNAL_ARRAY_CASE(Type, type, TYPE, ctype, size) \
+ case EXTERNAL_##TYPE##_ARRAY_TYPE:
+
+ TYPED_ARRAYS(EXTERNAL_ARRAY_CASE)
return GetVisitorIdForSize(kVisitDataObject,
kVisitDataObjectGeneric,
instance_size);
+#undef EXTERNAL_ARRAY_CASE
+
+ case FIXED_UINT8_ARRAY_TYPE:
+ case FIXED_INT8_ARRAY_TYPE:
+ case FIXED_UINT16_ARRAY_TYPE:
+ case FIXED_INT16_ARRAY_TYPE:
+ case FIXED_UINT32_ARRAY_TYPE:
+ case FIXED_INT32_ARRAY_TYPE:
+ case FIXED_FLOAT32_ARRAY_TYPE:
+ case FIXED_UINT8_CLAMPED_ARRAY_TYPE:
+ return kVisitFixedTypedArray;
+
+ case FIXED_FLOAT64_ARRAY_TYPE:
+ return kVisitFixedFloat64Array;
#define MAKE_STRUCT_CASE(NAME, Name, name) \
case NAME##_TYPE:
@@ -202,4 +188,272 @@ StaticVisitorBase::VisitorId StaticVisitorBase::GetVisitorId(
}
}
+
+// We don't record weak slots during marking or scavenges. Instead we do it
+// once when we complete mark-compact cycle. Note that write barrier has no
+// effect if we are already in the middle of compacting mark-sweep cycle and we
+// have to record slots manually.
+static bool MustRecordSlots(Heap* heap) {
+ return heap->gc_state() == Heap::MARK_COMPACT &&
+ heap->mark_compact_collector()->is_compacting();
+}
+
+
+template <class T>
+struct WeakListVisitor;
+
+
+template <class T>
+Object* VisitWeakList(Heap* heap,
+ Object* list,
+ WeakObjectRetainer* retainer) {
+ Object* undefined = heap->undefined_value();
+ Object* head = undefined;
+ T* tail = NULL;
+ MarkCompactCollector* collector = heap->mark_compact_collector();
+ bool record_slots = MustRecordSlots(heap);
+ while (list != undefined) {
+ // Check whether to keep the candidate in the list.
+ T* candidate = reinterpret_cast<T*>(list);
+ Object* retained = retainer->RetainAs(list);
+ if (retained != NULL) {
+ if (head == undefined) {
+ // First element in the list.
+ head = retained;
+ } else {
+ // Subsequent elements in the list.
+ ASSERT(tail != NULL);
+ WeakListVisitor<T>::SetWeakNext(tail, retained);
+ if (record_slots) {
+ Object** next_slot =
+ HeapObject::RawField(tail, WeakListVisitor<T>::WeakNextOffset());
+ collector->RecordSlot(next_slot, next_slot, retained);
+ }
+ }
+ // Retained object is new tail.
+ ASSERT(!retained->IsUndefined());
+ candidate = reinterpret_cast<T*>(retained);
+ tail = candidate;
+
+
+ // tail is a live object, visit it.
+ WeakListVisitor<T>::VisitLiveObject(heap, tail, retainer);
+ } else {
+ WeakListVisitor<T>::VisitPhantomObject(heap, candidate);
+ }
+
+ // Move to next element in the list.
+ list = WeakListVisitor<T>::WeakNext(candidate);
+ }
+
+ // Terminate the list if there is one or more elements.
+ if (tail != NULL) {
+ WeakListVisitor<T>::SetWeakNext(tail, undefined);
+ }
+ return head;
+}
+
+
+template <class T>
+static void ClearWeakList(Heap* heap,
+ Object* list) {
+ Object* undefined = heap->undefined_value();
+ while (list != undefined) {
+ T* candidate = reinterpret_cast<T*>(list);
+ list = WeakListVisitor<T>::WeakNext(candidate);
+ WeakListVisitor<T>::SetWeakNext(candidate, undefined);
+ }
+}
+
+
+template<>
+struct WeakListVisitor<JSFunction> {
+ static void SetWeakNext(JSFunction* function, Object* next) {
+ function->set_next_function_link(next);
+ }
+
+ static Object* WeakNext(JSFunction* function) {
+ return function->next_function_link();
+ }
+
+ static int WeakNextOffset() {
+ return JSFunction::kNextFunctionLinkOffset;
+ }
+
+ static void VisitLiveObject(Heap*, JSFunction*, WeakObjectRetainer*) {}
+
+ static void VisitPhantomObject(Heap*, JSFunction*) {}
+};
+
+
+template<>
+struct WeakListVisitor<Code> {
+ static void SetWeakNext(Code* code, Object* next) {
+ code->set_next_code_link(next);
+ }
+
+ static Object* WeakNext(Code* code) {
+ return code->next_code_link();
+ }
+
+ static int WeakNextOffset() {
+ return Code::kNextCodeLinkOffset;
+ }
+
+ static void VisitLiveObject(Heap*, Code*, WeakObjectRetainer*) {}
+
+ static void VisitPhantomObject(Heap*, Code*) {}
+};
+
+
+template<>
+struct WeakListVisitor<Context> {
+ static void SetWeakNext(Context* context, Object* next) {
+ context->set(Context::NEXT_CONTEXT_LINK,
+ next,
+ UPDATE_WRITE_BARRIER);
+ }
+
+ static Object* WeakNext(Context* context) {
+ return context->get(Context::NEXT_CONTEXT_LINK);
+ }
+
+ static int WeakNextOffset() {
+ return FixedArray::SizeFor(Context::NEXT_CONTEXT_LINK);
+ }
+
+ static void VisitLiveObject(Heap* heap,
+ Context* context,
+ WeakObjectRetainer* retainer) {
+ // Process the three weak lists linked off the context.
+ DoWeakList<JSFunction>(heap, context, retainer,
+ Context::OPTIMIZED_FUNCTIONS_LIST);
+ DoWeakList<Code>(heap, context, retainer, Context::OPTIMIZED_CODE_LIST);
+ DoWeakList<Code>(heap, context, retainer, Context::DEOPTIMIZED_CODE_LIST);
+ }
+
+ template<class T>
+ static void DoWeakList(Heap* heap,
+ Context* context,
+ WeakObjectRetainer* retainer,
+ int index) {
+ // Visit the weak list, removing dead intermediate elements.
+ Object* list_head = VisitWeakList<T>(heap, context->get(index), retainer);
+
+ // Update the list head.
+ context->set(index, list_head, UPDATE_WRITE_BARRIER);
+
+ if (MustRecordSlots(heap)) {
+ // Record the updated slot if necessary.
+ Object** head_slot = HeapObject::RawField(
+ context, FixedArray::SizeFor(index));
+ heap->mark_compact_collector()->RecordSlot(
+ head_slot, head_slot, list_head);
+ }
+ }
+
+ static void VisitPhantomObject(Heap* heap, Context* context) {
+ ClearWeakList<JSFunction>(heap,
+ context->get(Context::OPTIMIZED_FUNCTIONS_LIST));
+ ClearWeakList<Code>(heap, context->get(Context::OPTIMIZED_CODE_LIST));
+ ClearWeakList<Code>(heap, context->get(Context::DEOPTIMIZED_CODE_LIST));
+ }
+};
+
+
+template<>
+struct WeakListVisitor<JSArrayBufferView> {
+ static void SetWeakNext(JSArrayBufferView* obj, Object* next) {
+ obj->set_weak_next(next);
+ }
+
+ static Object* WeakNext(JSArrayBufferView* obj) {
+ return obj->weak_next();
+ }
+
+ static int WeakNextOffset() {
+ return JSArrayBufferView::kWeakNextOffset;
+ }
+
+ static void VisitLiveObject(Heap*, JSArrayBufferView*, WeakObjectRetainer*) {}
+
+ static void VisitPhantomObject(Heap*, JSArrayBufferView*) {}
+};
+
+
+template<>
+struct WeakListVisitor<JSArrayBuffer> {
+ static void SetWeakNext(JSArrayBuffer* obj, Object* next) {
+ obj->set_weak_next(next);
+ }
+
+ static Object* WeakNext(JSArrayBuffer* obj) {
+ return obj->weak_next();
+ }
+
+ static int WeakNextOffset() {
+ return JSArrayBuffer::kWeakNextOffset;
+ }
+
+ static void VisitLiveObject(Heap* heap,
+ JSArrayBuffer* array_buffer,
+ WeakObjectRetainer* retainer) {
+ Object* typed_array_obj =
+ VisitWeakList<JSArrayBufferView>(
+ heap,
+ array_buffer->weak_first_view(),
+ retainer);
+ array_buffer->set_weak_first_view(typed_array_obj);
+ if (typed_array_obj != heap->undefined_value() && MustRecordSlots(heap)) {
+ Object** slot = HeapObject::RawField(
+ array_buffer, JSArrayBuffer::kWeakFirstViewOffset);
+ heap->mark_compact_collector()->RecordSlot(slot, slot, typed_array_obj);
+ }
+ }
+
+ static void VisitPhantomObject(Heap* heap, JSArrayBuffer* phantom) {
+ Runtime::FreeArrayBuffer(heap->isolate(), phantom);
+ }
+};
+
+
+template<>
+struct WeakListVisitor<AllocationSite> {
+ static void SetWeakNext(AllocationSite* obj, Object* next) {
+ obj->set_weak_next(next);
+ }
+
+ static Object* WeakNext(AllocationSite* obj) {
+ return obj->weak_next();
+ }
+
+ static int WeakNextOffset() {
+ return AllocationSite::kWeakNextOffset;
+ }
+
+ static void VisitLiveObject(Heap*, AllocationSite*, WeakObjectRetainer*) {}
+
+ static void VisitPhantomObject(Heap*, AllocationSite*) {}
+};
+
+
+template Object* VisitWeakList<Code>(
+ Heap* heap, Object* list, WeakObjectRetainer* retainer);
+
+
+template Object* VisitWeakList<JSFunction>(
+ Heap* heap, Object* list, WeakObjectRetainer* retainer);
+
+
+template Object* VisitWeakList<Context>(
+ Heap* heap, Object* list, WeakObjectRetainer* retainer);
+
+
+template Object* VisitWeakList<JSArrayBuffer>(
+ Heap* heap, Object* list, WeakObjectRetainer* retainer);
+
+
+template Object* VisitWeakList<AllocationSite>(
+ Heap* heap, Object* list, WeakObjectRetainer* retainer);
+
} } // namespace v8::internal
diff --git a/chromium/v8/src/objects-visiting.h b/chromium/v8/src/objects-visiting.h
index f7758fdf4fc..f6fda9df0a2 100644
--- a/chromium/v8/src/objects-visiting.h
+++ b/chromium/v8/src/objects-visiting.h
@@ -1,34 +1,11 @@
// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
#ifndef V8_OBJECTS_VISITING_H_
#define V8_OBJECTS_VISITING_H_
-#include "allocation.h"
+#include "src/allocation.h"
// This file provides base classes and auxiliary methods for defining
// static object visitors used during GC.
@@ -47,13 +24,15 @@ namespace internal {
class StaticVisitorBase : public AllStatic {
public:
#define VISITOR_ID_LIST(V) \
- V(SeqOneByteString) \
+ V(SeqOneByteString) \
V(SeqTwoByteString) \
V(ShortcutCandidate) \
V(ByteArray) \
V(FreeSpace) \
V(FixedArray) \
V(FixedDoubleArray) \
+ V(FixedTypedArray) \
+ V(FixedFloat64Array) \
V(ConstantPoolArray) \
V(NativeContext) \
V(AllocationSite) \
@@ -94,8 +73,7 @@ class StaticVisitorBase : public AllStatic {
V(PropertyCell) \
V(SharedFunctionInfo) \
V(JSFunction) \
- V(JSWeakMap) \
- V(JSWeakSet) \
+ V(JSWeakCollection) \
V(JSArrayBuffer) \
V(JSTypedArray) \
V(JSDataView) \
@@ -142,7 +120,7 @@ class StaticVisitorBase : public AllStatic {
(base == kVisitJSObject));
ASSERT(IsAligned(object_size, kPointerSize));
ASSERT(kMinObjectSizeInWords * kPointerSize <= object_size);
- ASSERT(object_size <= Page::kMaxNonCodeHeapObjectSize);
+ ASSERT(object_size <= Page::kMaxRegularHeapObjectSize);
const VisitorId specialization = static_cast<VisitorId>(
base + (object_size >> kPointerSizeLog2) - kMinObjectSizeInWords);
@@ -160,7 +138,7 @@ class VisitorDispatchTable {
// every element of callbacks_ array will remain correct
// pointer (memcpy might be implemented as a byte copying loop).
for (int i = 0; i < StaticVisitorBase::kVisitorIdCount; i++) {
- NoBarrier_Store(&callbacks_[i], other->callbacks_[i]);
+ base::NoBarrier_Store(&callbacks_[i], other->callbacks_[i]);
}
}
@@ -174,7 +152,7 @@ class VisitorDispatchTable {
void Register(StaticVisitorBase::VisitorId id, Callback callback) {
ASSERT(id < StaticVisitorBase::kVisitorIdCount); // id is unsigned.
- callbacks_[id] = reinterpret_cast<AtomicWord>(callback);
+ callbacks_[id] = reinterpret_cast<base::AtomicWord>(callback);
}
template<typename Visitor,
@@ -206,7 +184,7 @@ class VisitorDispatchTable {
}
private:
- AtomicWord callbacks_[StaticVisitorBase::kVisitorIdCount];
+ base::AtomicWord callbacks_[StaticVisitorBase::kVisitorIdCount];
};
@@ -322,6 +300,10 @@ class StaticNewSpaceVisitor : public StaticVisitorBase {
return FixedDoubleArray::SizeFor(length);
}
+ INLINE(static int VisitFixedTypedArray(Map* map, HeapObject* object)) {
+ return reinterpret_cast<FixedTypedArrayBase*>(object)->size();
+ }
+
INLINE(static int VisitJSObject(Map* map, HeapObject* object)) {
return JSObjectVisitor::Visit(map, object);
}
@@ -399,7 +381,6 @@ class StaticMarkingVisitor : public StaticVisitorBase {
}
INLINE(static void VisitPropertyCell(Map* map, HeapObject* object));
- INLINE(static void VisitAllocationSite(Map* map, HeapObject* object));
INLINE(static void VisitCodeEntry(Heap* heap, Address entry_address));
INLINE(static void VisitEmbeddedPointer(Heap* heap, RelocInfo* rinfo));
INLINE(static void VisitCell(Heap* heap, RelocInfo* rinfo));
@@ -408,6 +389,8 @@ class StaticMarkingVisitor : public StaticVisitorBase {
INLINE(static void VisitCodeAgeSequence(Heap* heap, RelocInfo* rinfo));
INLINE(static void VisitExternalReference(RelocInfo* rinfo)) { }
INLINE(static void VisitRuntimeEntry(RelocInfo* rinfo)) { }
+ // Skip the weak next code link in a code object.
+ INLINE(static void VisitNextCodeLink(Heap* heap, Object** slot)) { }
// TODO(mstarzinger): This should be made protected once refactoring is done.
// Mark non-optimize code for functions inlined into the given optimized
@@ -419,6 +402,8 @@ class StaticMarkingVisitor : public StaticVisitorBase {
INLINE(static void VisitCode(Map* map, HeapObject* object));
INLINE(static void VisitSharedFunctionInfo(Map* map, HeapObject* object));
INLINE(static void VisitConstantPoolArray(Map* map, HeapObject* object));
+ INLINE(static void VisitAllocationSite(Map* map, HeapObject* object));
+ INLINE(static void VisitWeakCollection(Map* map, HeapObject* object));
INLINE(static void VisitJSFunction(Map* map, HeapObject* object));
INLINE(static void VisitJSRegExp(Map* map, HeapObject* object));
INLINE(static void VisitJSArrayBuffer(Map* map, HeapObject* object));
@@ -475,6 +460,17 @@ VisitorDispatchTable<typename StaticMarkingVisitor<StaticVisitor>::Callback>
StaticMarkingVisitor<StaticVisitor>::table_;
+class WeakObjectRetainer;
+
+
+// A weak list is single linked list where each element has a weak pointer to
+// the next element. Given the head of the list, this function removes dead
+// elements from the list and if requested records slots for next-element
+// pointers. The template parameter T is a WeakListVisitor that defines how to
+// access the next-element pointers.
+template <class T>
+Object* VisitWeakList(Heap* heap, Object* list, WeakObjectRetainer* retainer);
+
} } // namespace v8::internal
#endif // V8_OBJECTS_VISITING_H_
diff --git a/chromium/v8/src/objects.cc b/chromium/v8/src/objects.cc
index e9788786c57..5b5d79174bd 100644
--- a/chromium/v8/src/objects.cc
+++ b/chromium/v8/src/objects.cc
@@ -1,111 +1,83 @@
// Copyright 2013 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#include "accessors.h"
-#include "allocation-site-scopes.h"
-#include "api.h"
-#include "arguments.h"
-#include "bootstrapper.h"
-#include "codegen.h"
-#include "code-stubs.h"
-#include "cpu-profiler.h"
-#include "debug.h"
-#include "deoptimizer.h"
-#include "date.h"
-#include "elements.h"
-#include "execution.h"
-#include "full-codegen.h"
-#include "hydrogen.h"
-#include "isolate-inl.h"
-#include "log.h"
-#include "objects-inl.h"
-#include "objects-visiting.h"
-#include "objects-visiting-inl.h"
-#include "macro-assembler.h"
-#include "mark-compact.h"
-#include "safepoint-table.h"
-#include "string-stream.h"
-#include "utils.h"
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#include "src/accessors.h"
+#include "src/allocation-site-scopes.h"
+#include "src/api.h"
+#include "src/arguments.h"
+#include "src/bootstrapper.h"
+#include "src/codegen.h"
+#include "src/code-stubs.h"
+#include "src/cpu-profiler.h"
+#include "src/debug.h"
+#include "src/deoptimizer.h"
+#include "src/date.h"
+#include "src/elements.h"
+#include "src/execution.h"
+#include "src/field-index.h"
+#include "src/field-index-inl.h"
+#include "src/full-codegen.h"
+#include "src/hydrogen.h"
+#include "src/isolate-inl.h"
+#include "src/log.h"
+#include "src/lookup.h"
+#include "src/objects-inl.h"
+#include "src/objects-visiting-inl.h"
+#include "src/macro-assembler.h"
+#include "src/mark-compact.h"
+#include "src/safepoint-table.h"
+#include "src/string-search.h"
+#include "src/string-stream.h"
+#include "src/utils.h"
#ifdef ENABLE_DISASSEMBLER
-#include "disasm.h"
-#include "disassembler.h"
+#include "src/disasm.h"
+#include "src/disassembler.h"
#endif
namespace v8 {
namespace internal {
-
-MUST_USE_RESULT static MaybeObject* CreateJSValue(JSFunction* constructor,
- Object* value) {
- Object* result;
- { MaybeObject* maybe_result =
- constructor->GetHeap()->AllocateJSObject(constructor);
- if (!maybe_result->ToObject(&result)) return maybe_result;
- }
- JSValue::cast(result)->set_value(value);
- return result;
-}
-
-
-MaybeObject* Object::ToObject(Context* native_context) {
- if (IsNumber()) {
- return CreateJSValue(native_context->number_function(), this);
- } else if (IsBoolean()) {
- return CreateJSValue(native_context->boolean_function(), this);
- } else if (IsString()) {
- return CreateJSValue(native_context->string_function(), this);
+Handle<HeapType> Object::OptimalType(Isolate* isolate,
+ Representation representation) {
+ if (representation.IsNone()) return HeapType::None(isolate);
+ if (FLAG_track_field_types) {
+ if (representation.IsHeapObject() && IsHeapObject()) {
+ // We can track only JavaScript objects with stable maps.
+ Handle<Map> map(HeapObject::cast(this)->map(), isolate);
+ if (map->is_stable() &&
+ map->instance_type() >= FIRST_NONCALLABLE_SPEC_OBJECT_TYPE &&
+ map->instance_type() <= LAST_NONCALLABLE_SPEC_OBJECT_TYPE) {
+ return HeapType::Class(map, isolate);
+ }
+ }
}
- ASSERT(IsJSObject());
- return this;
+ return HeapType::Any(isolate);
}
-MaybeObject* Object::ToObject(Isolate* isolate) {
- if (IsJSReceiver()) {
- return this;
- } else if (IsNumber()) {
- Context* native_context = isolate->context()->native_context();
- return CreateJSValue(native_context->number_function(), this);
- } else if (IsBoolean()) {
- Context* native_context = isolate->context()->native_context();
- return CreateJSValue(native_context->boolean_function(), this);
- } else if (IsString()) {
- Context* native_context = isolate->context()->native_context();
- return CreateJSValue(native_context->string_function(), this);
- } else if (IsSymbol()) {
- Context* native_context = isolate->context()->native_context();
- return CreateJSValue(native_context->symbol_function(), this);
+MaybeHandle<JSReceiver> Object::ToObject(Isolate* isolate,
+ Handle<Object> object,
+ Handle<Context> native_context) {
+ if (object->IsJSReceiver()) return Handle<JSReceiver>::cast(object);
+ Handle<JSFunction> constructor;
+ if (object->IsNumber()) {
+ constructor = handle(native_context->number_function(), isolate);
+ } else if (object->IsBoolean()) {
+ constructor = handle(native_context->boolean_function(), isolate);
+ } else if (object->IsString()) {
+ constructor = handle(native_context->string_function(), isolate);
+ } else if (object->IsSymbol()) {
+ constructor = handle(native_context->symbol_function(), isolate);
+ } else {
+ return MaybeHandle<JSReceiver>();
}
-
- // Throw a type error.
- return Failure::InternalError();
+ Handle<JSObject> result = isolate->factory()->NewJSObject(constructor);
+ Handle<JSValue>::cast(result)->set_value(*object);
+ return result;
}
@@ -131,7 +103,8 @@ bool Object::IsCallable() {
}
-void Object::Lookup(Name* name, LookupResult* result) {
+void Object::Lookup(Handle<Name> name, LookupResult* result) {
+ DisallowHeapAllocation no_gc;
Object* holder = NULL;
if (IsJSReceiver()) {
holder = this;
@@ -155,28 +128,39 @@ void Object::Lookup(Name* name, LookupResult* result) {
}
-Handle<Object> Object::GetPropertyWithReceiver(
- Handle<Object> object,
- Handle<Object> receiver,
- Handle<Name> name,
- PropertyAttributes* attributes) {
- LookupResult lookup(name->GetIsolate());
- object->Lookup(*name, &lookup);
- Handle<Object> result =
- GetProperty(object, receiver, &lookup, name, attributes);
- ASSERT(*attributes <= ABSENT);
- return result;
-}
-
-
-MaybeObject* Object::GetPropertyWithReceiver(Object* receiver,
- Name* name,
- PropertyAttributes* attributes) {
- LookupResult result(name->GetIsolate());
- Lookup(name, &result);
- MaybeObject* value = GetProperty(receiver, &result, name, attributes);
- ASSERT(*attributes <= ABSENT);
- return value;
+MaybeHandle<Object> Object::GetProperty(LookupIterator* it) {
+ for (; it->IsFound(); it->Next()) {
+ switch (it->state()) {
+ case LookupIterator::NOT_FOUND:
+ UNREACHABLE();
+ case LookupIterator::JSPROXY:
+ return JSProxy::GetPropertyWithHandler(
+ it->GetJSProxy(), it->GetReceiver(), it->name());
+ case LookupIterator::INTERCEPTOR: {
+ MaybeHandle<Object> maybe_result = JSObject::GetPropertyWithInterceptor(
+ it->GetHolder(), it->GetReceiver(), it->name());
+ if (!maybe_result.is_null()) return maybe_result;
+ if (it->isolate()->has_pending_exception()) return maybe_result;
+ break;
+ }
+ case LookupIterator::ACCESS_CHECK:
+ if (it->HasAccess(v8::ACCESS_GET)) break;
+ return JSObject::GetPropertyWithFailedAccessCheck(it);
+ case LookupIterator::PROPERTY:
+ if (it->HasProperty()) {
+ switch (it->property_kind()) {
+ case LookupIterator::ACCESSOR:
+ return GetPropertyWithAccessor(
+ it->GetReceiver(), it->name(),
+ it->GetHolder(), it->GetAccessors());
+ case LookupIterator::DATA:
+ return it->GetDataValue();
+ }
+ }
+ break;
+ }
+ }
+ return it->factory()->undefined_value();
}
@@ -248,9 +232,9 @@ static inline To* CheckedCast(void *from) {
}
-static MaybeObject* PerformCompare(const BitmaskCompareDescriptor& descriptor,
- char* ptr,
- Heap* heap) {
+static Handle<Object> PerformCompare(const BitmaskCompareDescriptor& descriptor,
+ char* ptr,
+ Isolate* isolate) {
uint32_t bitmask = descriptor.bitmask;
uint32_t compare_value = descriptor.compare_value;
uint32_t value;
@@ -270,26 +254,27 @@ static MaybeObject* PerformCompare(const BitmaskCompareDescriptor& descriptor,
break;
default:
UNREACHABLE();
- return NULL;
+ return isolate->factory()->undefined_value();
}
- return heap->ToBoolean((bitmask & value) == (bitmask & compare_value));
+ return isolate->factory()->ToBoolean(
+ (bitmask & value) == (bitmask & compare_value));
}
-static MaybeObject* PerformCompare(const PointerCompareDescriptor& descriptor,
- char* ptr,
- Heap* heap) {
+static Handle<Object> PerformCompare(const PointerCompareDescriptor& descriptor,
+ char* ptr,
+ Isolate* isolate) {
uintptr_t compare_value =
reinterpret_cast<uintptr_t>(descriptor.compare_value);
uintptr_t value = *CheckedCast<uintptr_t>(ptr);
- return heap->ToBoolean(compare_value == value);
+ return isolate->factory()->ToBoolean(compare_value == value);
}
-static MaybeObject* GetPrimitiveValue(
+static Handle<Object> GetPrimitiveValue(
const PrimitiveValueDescriptor& descriptor,
char* ptr,
- Heap* heap) {
+ Isolate* isolate) {
int32_t int32_value = 0;
switch (descriptor.data_type) {
case kDescriptorInt8Type:
@@ -309,29 +294,36 @@ static MaybeObject* GetPrimitiveValue(
break;
case kDescriptorUint32Type: {
uint32_t value = *CheckedCast<uint32_t>(ptr);
- return heap->NumberFromUint32(value);
+ AllowHeapAllocation allow_gc;
+ return isolate->factory()->NewNumberFromUint(value);
}
case kDescriptorBoolType: {
uint8_t byte = *CheckedCast<uint8_t>(ptr);
- return heap->ToBoolean(byte & (0x1 << descriptor.bool_offset));
+ return isolate->factory()->ToBoolean(
+ byte & (0x1 << descriptor.bool_offset));
}
case kDescriptorFloatType: {
float value = *CheckedCast<float>(ptr);
- return heap->NumberFromDouble(value);
+ AllowHeapAllocation allow_gc;
+ return isolate->factory()->NewNumber(value);
}
case kDescriptorDoubleType: {
double value = *CheckedCast<double>(ptr);
- return heap->NumberFromDouble(value);
+ AllowHeapAllocation allow_gc;
+ return isolate->factory()->NewNumber(value);
}
}
- return heap->NumberFromInt32(int32_value);
+ AllowHeapAllocation allow_gc;
+ return isolate->factory()->NewNumberFromInt(int32_value);
}
-static MaybeObject* GetDeclaredAccessorProperty(Object* receiver,
- DeclaredAccessorInfo* info,
- Isolate* isolate) {
- char* current = reinterpret_cast<char*>(receiver);
+static Handle<Object> GetDeclaredAccessorProperty(
+ Handle<Object> receiver,
+ Handle<DeclaredAccessorInfo> info,
+ Isolate* isolate) {
+ DisallowHeapAllocation no_gc;
+ char* current = reinterpret_cast<char*>(*receiver);
DeclaredAccessorDescriptorIterator iterator(info->descriptor());
while (true) {
const DeclaredAccessorDescriptorData* data = iterator.Next();
@@ -339,7 +331,7 @@ static MaybeObject* GetDeclaredAccessorProperty(Object* receiver,
case kDescriptorReturnObject: {
ASSERT(iterator.Complete());
current = *CheckedCast<char*>(current);
- return *CheckedCast<Object*>(current);
+ return handle(*CheckedCast<Object*>(current), isolate);
}
case kDescriptorPointerDereference:
ASSERT(!iterator.Complete());
@@ -362,49 +354,58 @@ static MaybeObject* GetDeclaredAccessorProperty(Object* receiver,
ASSERT(iterator.Complete());
return PerformCompare(data->bitmask_compare_descriptor,
current,
- isolate->heap());
+ isolate);
case kDescriptorPointerCompare:
ASSERT(iterator.Complete());
return PerformCompare(data->pointer_compare_descriptor,
current,
- isolate->heap());
+ isolate);
case kDescriptorPrimitiveValue:
ASSERT(iterator.Complete());
return GetPrimitiveValue(data->primitive_value_descriptor,
current,
- isolate->heap());
+ isolate);
}
}
UNREACHABLE();
- return NULL;
+ return isolate->factory()->undefined_value();
}
Handle<FixedArray> JSObject::EnsureWritableFastElements(
Handle<JSObject> object) {
- CALL_HEAP_FUNCTION(object->GetIsolate(),
- object->EnsureWritableFastElements(),
- FixedArray);
+ ASSERT(object->HasFastSmiOrObjectElements());
+ Isolate* isolate = object->GetIsolate();
+ Handle<FixedArray> elems(FixedArray::cast(object->elements()), isolate);
+ if (elems->map() != isolate->heap()->fixed_cow_array_map()) return elems;
+ Handle<FixedArray> writable_elems = isolate->factory()->CopyFixedArrayWithMap(
+ elems, isolate->factory()->fixed_array_map());
+ object->set_elements(*writable_elems);
+ isolate->counters()->cow_arrays_converted()->Increment();
+ return writable_elems;
}
-Handle<Object> JSObject::GetPropertyWithCallback(Handle<JSObject> object,
- Handle<Object> receiver,
- Handle<Object> structure,
- Handle<Name> name) {
- Isolate* isolate = name->GetIsolate();
- // To accommodate both the old and the new api we switch on the
- // data structure used to store the callbacks. Eventually foreign
- // callbacks should be phased out.
- if (structure->IsForeign()) {
- AccessorDescriptor* callback =
- reinterpret_cast<AccessorDescriptor*>(
- Handle<Foreign>::cast(structure)->foreign_address());
- CALL_HEAP_FUNCTION(isolate,
- (callback->getter)(isolate, *receiver, callback->data),
- Object);
- }
+MaybeHandle<Object> JSProxy::GetPropertyWithHandler(Handle<JSProxy> proxy,
+ Handle<Object> receiver,
+ Handle<Name> name) {
+ Isolate* isolate = proxy->GetIsolate();
+ // TODO(rossberg): adjust once there is a story for symbols vs proxies.
+ if (name->IsSymbol()) return isolate->factory()->undefined_value();
+
+ Handle<Object> args[] = { receiver, name };
+ return CallTrap(
+ proxy, "get", isolate->derived_get_trap(), ARRAY_SIZE(args), args);
+}
+
+
+MaybeHandle<Object> Object::GetPropertyWithAccessor(Handle<Object> receiver,
+ Handle<Name> name,
+ Handle<JSObject> holder,
+ Handle<Object> structure) {
+ Isolate* isolate = name->GetIsolate();
+ ASSERT(!structure->IsForeign());
// api style callbacks.
if (structure->IsAccessorInfo()) {
Handle<AccessorInfo> accessor_info = Handle<AccessorInfo>::cast(structure);
@@ -414,19 +415,16 @@ Handle<Object> JSObject::GetPropertyWithCallback(Handle<JSObject> object,
isolate->factory()->NewTypeError("incompatible_method_receiver",
HandleVector(args,
ARRAY_SIZE(args)));
- isolate->Throw(*error);
- return Handle<Object>::null();
+ return isolate->Throw<Object>(error);
}
// TODO(rossberg): Handling symbols in the API requires changing the API,
// so we do not support it for now.
if (name->IsSymbol()) return isolate->factory()->undefined_value();
if (structure->IsDeclaredAccessorInfo()) {
- CALL_HEAP_FUNCTION(
- isolate,
- GetDeclaredAccessorProperty(*receiver,
- DeclaredAccessorInfo::cast(*structure),
- isolate),
- Object);
+ return GetDeclaredAccessorProperty(
+ receiver,
+ Handle<DeclaredAccessorInfo>::cast(structure),
+ isolate);
}
Handle<ExecutableAccessorInfo> data =
@@ -435,20 +433,19 @@ Handle<Object> JSObject::GetPropertyWithCallback(Handle<JSObject> object,
v8::ToCData<v8::AccessorGetterCallback>(data->getter());
if (call_fun == NULL) return isolate->factory()->undefined_value();
- HandleScope scope(isolate);
- Handle<JSObject> self = Handle<JSObject>::cast(receiver);
Handle<String> key = Handle<String>::cast(name);
- LOG(isolate, ApiNamedPropertyAccess("load", *self, *name));
- PropertyCallbackArguments args(isolate, data->data(), *self, *object);
+ LOG(isolate, ApiNamedPropertyAccess("load", *holder, *name));
+ PropertyCallbackArguments args(isolate, data->data(), *receiver, *holder);
v8::Handle<v8::Value> result =
args.Call(call_fun, v8::Utils::ToLocal(key));
- RETURN_HANDLE_IF_SCHEDULED_EXCEPTION(isolate, Object);
+ RETURN_EXCEPTION_IF_SCHEDULED_EXCEPTION(isolate, Object);
if (result.IsEmpty()) {
return isolate->factory()->undefined_value();
}
Handle<Object> return_value = v8::Utils::OpenHandle(*result);
return_value->VerifyApiCallResultType();
- return scope.CloseAndEscape(return_value);
+ // Rebox handle before return.
+ return handle(*return_value, isolate);
}
// __defineGetter__ callback
@@ -456,239 +453,209 @@ Handle<Object> JSObject::GetPropertyWithCallback(Handle<JSObject> object,
isolate);
if (getter->IsSpecFunction()) {
// TODO(rossberg): nicer would be to cast to some JSCallable here...
- CALL_HEAP_FUNCTION(
- isolate,
- object->GetPropertyWithDefinedGetter(*receiver,
- JSReceiver::cast(*getter)),
- Object);
+ return Object::GetPropertyWithDefinedGetter(
+ receiver, Handle<JSReceiver>::cast(getter));
}
// Getter is not a function.
return isolate->factory()->undefined_value();
}
-MaybeObject* JSProxy::GetPropertyWithHandler(Object* receiver_raw,
- Name* name_raw) {
- Isolate* isolate = GetIsolate();
- HandleScope scope(isolate);
- Handle<Object> receiver(receiver_raw, isolate);
- Handle<Object> name(name_raw, isolate);
-
- // TODO(rossberg): adjust once there is a story for symbols vs proxies.
- if (name->IsSymbol()) return isolate->heap()->undefined_value();
-
- Handle<Object> args[] = { receiver, name };
- Handle<Object> result = CallTrap(
- "get", isolate->derived_get_trap(), ARRAY_SIZE(args), args);
- if (isolate->has_pending_exception()) return Failure::Exception();
-
- return *result;
-}
-
-
-Handle<Object> Object::GetProperty(Handle<Object> object,
- Handle<Name> name) {
- // TODO(rossberg): The index test should not be here but in the GetProperty
- // method (or somewhere else entirely). Needs more global clean-up.
- uint32_t index;
+MaybeHandle<Object> Object::SetPropertyWithCallback(Handle<Object> receiver,
+ Handle<Name> name,
+ Handle<Object> value,
+ Handle<JSObject> holder,
+ Handle<Object> structure,
+ StrictMode strict_mode) {
Isolate* isolate = name->GetIsolate();
- if (name->AsArrayIndex(&index))
- return GetElement(isolate, object, index);
- CALL_HEAP_FUNCTION(isolate, object->GetProperty(*name), Object);
-}
+ // We should never get here to initialize a const with the hole
+ // value since a const declaration would conflict with the setter.
+ ASSERT(!value->IsTheHole());
+ ASSERT(!structure->IsForeign());
+ if (structure->IsExecutableAccessorInfo()) {
+ // api style callbacks
+ ExecutableAccessorInfo* data = ExecutableAccessorInfo::cast(*structure);
+ if (!data->IsCompatibleReceiver(*receiver)) {
+ Handle<Object> args[2] = { name, receiver };
+ Handle<Object> error =
+ isolate->factory()->NewTypeError("incompatible_method_receiver",
+ HandleVector(args,
+ ARRAY_SIZE(args)));
+ return isolate->Throw<Object>(error);
+ }
+ // TODO(rossberg): Support symbols in the API.
+ if (name->IsSymbol()) return value;
+ Object* call_obj = data->setter();
+ v8::AccessorSetterCallback call_fun =
+ v8::ToCData<v8::AccessorSetterCallback>(call_obj);
+ if (call_fun == NULL) return value;
+ Handle<String> key = Handle<String>::cast(name);
+ LOG(isolate, ApiNamedPropertyAccess("store", *holder, *name));
+ PropertyCallbackArguments args(isolate, data->data(), *receiver, *holder);
+ args.Call(call_fun,
+ v8::Utils::ToLocal(key),
+ v8::Utils::ToLocal(value));
+ RETURN_EXCEPTION_IF_SCHEDULED_EXCEPTION(isolate, Object);
+ return value;
+ }
-Handle<Object> Object::GetElement(Isolate* isolate,
- Handle<Object> object,
- uint32_t index) {
- CALL_HEAP_FUNCTION(isolate, object->GetElement(isolate, index), Object);
-}
+ if (structure->IsAccessorPair()) {
+ Handle<Object> setter(AccessorPair::cast(*structure)->setter(), isolate);
+ if (setter->IsSpecFunction()) {
+ // TODO(rossberg): nicer would be to cast to some JSCallable here...
+ return SetPropertyWithDefinedSetter(
+ receiver, Handle<JSReceiver>::cast(setter), value);
+ } else {
+ if (strict_mode == SLOPPY) return value;
+ Handle<Object> args[2] = { name, holder };
+ Handle<Object> error =
+ isolate->factory()->NewTypeError("no_setter_in_callback",
+ HandleVector(args, 2));
+ return isolate->Throw<Object>(error);
+ }
+ }
+ // TODO(dcarney): Handle correctly.
+ if (structure->IsDeclaredAccessorInfo()) {
+ return value;
+ }
-MaybeObject* JSProxy::GetElementWithHandler(Object* receiver,
- uint32_t index) {
- String* name;
- MaybeObject* maybe = GetHeap()->Uint32ToString(index);
- if (!maybe->To<String>(&name)) return maybe;
- return GetPropertyWithHandler(receiver, name);
+ UNREACHABLE();
+ return MaybeHandle<Object>();
}
-Handle<Object> JSProxy::SetElementWithHandler(Handle<JSProxy> proxy,
- Handle<JSReceiver> receiver,
- uint32_t index,
- Handle<Object> value,
- StrictModeFlag strict_mode) {
- Isolate* isolate = proxy->GetIsolate();
- Handle<String> name = isolate->factory()->Uint32ToString(index);
- return SetPropertyWithHandler(
- proxy, receiver, name, value, NONE, strict_mode);
-}
-
+MaybeHandle<Object> Object::GetPropertyWithDefinedGetter(
+ Handle<Object> receiver,
+ Handle<JSReceiver> getter) {
+ Isolate* isolate = getter->GetIsolate();
+ Debug* debug = isolate->debug();
+ // Handle stepping into a getter if step into is active.
+ // TODO(rossberg): should this apply to getters that are function proxies?
+ if (debug->StepInActive() && getter->IsJSFunction()) {
+ debug->HandleStepIn(
+ Handle<JSFunction>::cast(getter), Handle<Object>::null(), 0, false);
+ }
-bool JSProxy::HasElementWithHandler(Handle<JSProxy> proxy, uint32_t index) {
- Isolate* isolate = proxy->GetIsolate();
- Handle<String> name = isolate->factory()->Uint32ToString(index);
- return HasPropertyWithHandler(proxy, name);
+ return Execution::Call(isolate, getter, receiver, 0, NULL, true);
}
-MaybeObject* Object::GetPropertyWithDefinedGetter(Object* receiver,
- JSReceiver* getter) {
- Isolate* isolate = getter->GetIsolate();
- HandleScope scope(isolate);
- Handle<JSReceiver> fun(getter);
- Handle<Object> self(receiver, isolate);
-#ifdef ENABLE_DEBUGGER_SUPPORT
+MaybeHandle<Object> Object::SetPropertyWithDefinedSetter(
+ Handle<Object> receiver,
+ Handle<JSReceiver> setter,
+ Handle<Object> value) {
+ Isolate* isolate = setter->GetIsolate();
+
Debug* debug = isolate->debug();
- // Handle stepping into a getter if step into is active.
+ // Handle stepping into a setter if step into is active.
// TODO(rossberg): should this apply to getters that are function proxies?
- if (debug->StepInActive() && fun->IsJSFunction()) {
+ if (debug->StepInActive() && setter->IsJSFunction()) {
debug->HandleStepIn(
- Handle<JSFunction>::cast(fun), Handle<Object>::null(), 0, false);
+ Handle<JSFunction>::cast(setter), Handle<Object>::null(), 0, false);
}
-#endif
- bool has_pending_exception;
- Handle<Object> result = Execution::Call(
- isolate, fun, self, 0, NULL, &has_pending_exception, true);
- // Check for pending exception and return the result.
- if (has_pending_exception) return Failure::Exception();
- return *result;
+ Handle<Object> argv[] = { value };
+ RETURN_ON_EXCEPTION(
+ isolate,
+ Execution::Call(isolate, setter, receiver, ARRAY_SIZE(argv), argv),
+ Object);
+ return value;
}
-// Only deal with CALLBACKS and INTERCEPTOR
-Handle<Object> JSObject::GetPropertyWithFailedAccessCheck(
- Handle<JSObject> object,
- Handle<Object> receiver,
- LookupResult* result,
- Handle<Name> name,
- PropertyAttributes* attributes) {
- Isolate* isolate = name->GetIsolate();
- if (result->IsProperty()) {
- switch (result->type()) {
- case CALLBACKS: {
- // Only allow API accessors.
- Handle<Object> callback_obj(result->GetCallbackObject(), isolate);
- if (callback_obj->IsAccessorInfo()) {
- if (!AccessorInfo::cast(*callback_obj)->all_can_read()) break;
- *attributes = result->GetAttributes();
- // Fall through to GetPropertyWithCallback.
- } else if (callback_obj->IsAccessorPair()) {
- if (!AccessorPair::cast(*callback_obj)->all_can_read()) break;
- // Fall through to GetPropertyWithCallback.
- } else {
- break;
- }
- Handle<JSObject> holder(result->holder(), isolate);
- return GetPropertyWithCallback(holder, receiver, callback_obj, name);
+static bool FindAllCanReadHolder(LookupIterator* it) {
+ it->skip_interceptor();
+ it->skip_access_check();
+ for (; it->IsFound(); it->Next()) {
+ if (it->state() == LookupIterator::PROPERTY &&
+ it->HasProperty() &&
+ it->property_kind() == LookupIterator::ACCESSOR) {
+ Handle<Object> accessors = it->GetAccessors();
+ if (accessors->IsAccessorInfo()) {
+ if (AccessorInfo::cast(*accessors)->all_can_read()) return true;
+ } else if (accessors->IsAccessorPair()) {
+ if (AccessorPair::cast(*accessors)->all_can_read()) return true;
}
- case NORMAL:
- case FIELD:
- case CONSTANT: {
- // Search ALL_CAN_READ accessors in prototype chain.
- LookupResult r(isolate);
- result->holder()->LookupRealNamedPropertyInPrototypes(*name, &r);
- if (r.IsProperty()) {
- return GetPropertyWithFailedAccessCheck(
- object, receiver, &r, name, attributes);
- }
- break;
- }
- case INTERCEPTOR: {
- // If the object has an interceptor, try real named properties.
- // No access check in GetPropertyAttributeWithInterceptor.
- LookupResult r(isolate);
- result->holder()->LookupRealNamedProperty(*name, &r);
- if (r.IsProperty()) {
- return GetPropertyWithFailedAccessCheck(
- object, receiver, &r, name, attributes);
- }
- break;
- }
- default:
- UNREACHABLE();
}
}
+ return false;
+}
- // No accessible property found.
- *attributes = ABSENT;
- isolate->ReportFailedAccessCheck(*object, v8::ACCESS_GET);
- RETURN_HANDLE_IF_SCHEDULED_EXCEPTION(isolate, Object);
- return isolate->factory()->undefined_value();
+
+MaybeHandle<Object> JSObject::GetPropertyWithFailedAccessCheck(
+ LookupIterator* it) {
+ Handle<JSObject> checked = Handle<JSObject>::cast(it->GetHolder());
+ if (FindAllCanReadHolder(it)) {
+ return GetPropertyWithAccessor(
+ it->GetReceiver(), it->name(), it->GetHolder(), it->GetAccessors());
+ }
+ it->isolate()->ReportFailedAccessCheck(checked, v8::ACCESS_GET);
+ RETURN_EXCEPTION_IF_SCHEDULED_EXCEPTION(it->isolate(), Object);
+ return it->factory()->undefined_value();
}
-PropertyAttributes JSObject::GetPropertyAttributeWithFailedAccessCheck(
- Object* receiver,
- LookupResult* result,
- Name* name,
- bool continue_search) {
- if (result->IsProperty()) {
- switch (result->type()) {
- case CALLBACKS: {
- // Only allow API accessors.
- Object* obj = result->GetCallbackObject();
- if (obj->IsAccessorInfo()) {
- AccessorInfo* info = AccessorInfo::cast(obj);
- if (info->all_can_read()) {
- return result->GetAttributes();
- }
- } else if (obj->IsAccessorPair()) {
- AccessorPair* pair = AccessorPair::cast(obj);
- if (pair->all_can_read()) {
- return result->GetAttributes();
- }
- }
- break;
- }
+PropertyAttributes JSObject::GetPropertyAttributesWithFailedAccessCheck(
+ LookupIterator* it) {
+ Handle<JSObject> checked = Handle<JSObject>::cast(it->GetHolder());
+ if (FindAllCanReadHolder(it)) return it->property_details().attributes();
+ it->isolate()->ReportFailedAccessCheck(checked, v8::ACCESS_HAS);
+ // TODO(yangguo): Issue 3269, check for scheduled exception missing?
+ return ABSENT;
+}
- case NORMAL:
- case FIELD:
- case CONSTANT: {
- if (!continue_search) break;
- // Search ALL_CAN_READ accessors in prototype chain.
- LookupResult r(GetIsolate());
- result->holder()->LookupRealNamedPropertyInPrototypes(name, &r);
- if (r.IsProperty()) {
- return GetPropertyAttributeWithFailedAccessCheck(receiver,
- &r,
- name,
- continue_search);
- }
- break;
- }
- case INTERCEPTOR: {
- // If the object has an interceptor, try real named properties.
- // No access check in GetPropertyAttributeWithInterceptor.
- LookupResult r(GetIsolate());
- if (continue_search) {
- result->holder()->LookupRealNamedProperty(name, &r);
- } else {
- result->holder()->LocalLookupRealNamedProperty(name, &r);
- }
- if (!r.IsFound()) break;
- return GetPropertyAttributeWithFailedAccessCheck(receiver,
- &r,
- name,
- continue_search);
+static bool FindAllCanWriteHolder(LookupResult* result,
+ Handle<Name> name,
+ bool check_prototype) {
+ if (result->IsInterceptor()) {
+ result->holder()->LookupOwnRealNamedProperty(name, result);
+ }
+
+ while (result->IsProperty()) {
+ if (result->type() == CALLBACKS) {
+ Object* callback_obj = result->GetCallbackObject();
+ if (callback_obj->IsAccessorInfo()) {
+ if (AccessorInfo::cast(callback_obj)->all_can_write()) return true;
+ } else if (callback_obj->IsAccessorPair()) {
+ if (AccessorPair::cast(callback_obj)->all_can_write()) return true;
}
-
- case HANDLER:
- case TRANSITION:
- case NONEXISTENT:
- UNREACHABLE();
}
+ if (!check_prototype) break;
+ result->holder()->LookupRealNamedPropertyInPrototypes(name, result);
}
+ return false;
+}
- GetIsolate()->ReportFailedAccessCheck(this, v8::ACCESS_HAS);
- return ABSENT;
+
+MaybeHandle<Object> JSObject::SetPropertyWithFailedAccessCheck(
+ Handle<JSObject> object,
+ LookupResult* result,
+ Handle<Name> name,
+ Handle<Object> value,
+ bool check_prototype,
+ StrictMode strict_mode) {
+ if (check_prototype && !result->IsProperty()) {
+ object->LookupRealNamedPropertyInPrototypes(name, result);
+ }
+
+ if (FindAllCanWriteHolder(result, name, check_prototype)) {
+ Handle<JSObject> holder(result->holder());
+ Handle<Object> callbacks(result->GetCallbackObject(), result->isolate());
+ return SetPropertyWithCallback(
+ object, name, value, holder, callbacks, strict_mode);
+ }
+
+ Isolate* isolate = object->GetIsolate();
+ isolate->ReportFailedAccessCheck(object, v8::ACCESS_SET);
+ RETURN_EXCEPTION_IF_SCHEDULED_EXCEPTION(isolate, Object);
+ return value;
}
-Object* JSObject::GetNormalizedProperty(LookupResult* result) {
+Object* JSObject::GetNormalizedProperty(const LookupResult* result) {
ASSERT(!HasFastProperties());
Object* value = property_dictionary()->ValueAt(result->GetDictionaryEntry());
if (IsGlobalObject()) {
@@ -699,8 +666,22 @@ Object* JSObject::GetNormalizedProperty(LookupResult* result) {
}
+Handle<Object> JSObject::GetNormalizedProperty(Handle<JSObject> object,
+ const LookupResult* result) {
+ ASSERT(!object->HasFastProperties());
+ Isolate* isolate = object->GetIsolate();
+ Handle<Object> value(object->property_dictionary()->ValueAt(
+ result->GetDictionaryEntry()), isolate);
+ if (object->IsGlobalObject()) {
+ value = Handle<Object>(Handle<PropertyCell>::cast(value)->value(), isolate);
+ }
+ ASSERT(!value->IsPropertyCell() && !value->IsCell());
+ return value;
+}
+
+
void JSObject::SetNormalizedProperty(Handle<JSObject> object,
- LookupResult* result,
+ const LookupResult* result,
Handle<Object> value) {
ASSERT(!object->HasFastProperties());
NameDictionary* property_dictionary = object->property_dictionary();
@@ -714,17 +695,6 @@ void JSObject::SetNormalizedProperty(Handle<JSObject> object,
}
-// TODO(mstarzinger): Temporary wrapper until handlified.
-static Handle<NameDictionary> NameDictionaryAdd(Handle<NameDictionary> dict,
- Handle<Name> name,
- Handle<Object> value,
- PropertyDetails details) {
- CALL_HEAP_FUNCTION(dict->GetIsolate(),
- dict->Add(*name, *value, details),
- NameDictionary);
-}
-
-
void JSObject::SetNormalizedProperty(Handle<JSObject> object,
Handle<Name> name,
Handle<Object> value,
@@ -733,19 +703,19 @@ void JSObject::SetNormalizedProperty(Handle<JSObject> object,
Handle<NameDictionary> property_dictionary(object->property_dictionary());
if (!name->IsUniqueName()) {
- name = object->GetIsolate()->factory()->InternalizedStringFromString(
+ name = object->GetIsolate()->factory()->InternalizeString(
Handle<String>::cast(name));
}
- int entry = property_dictionary->FindEntry(*name);
+ int entry = property_dictionary->FindEntry(name);
if (entry == NameDictionary::kNotFound) {
Handle<Object> store_value = value;
if (object->IsGlobalObject()) {
store_value = object->GetIsolate()->factory()->NewPropertyCell(value);
}
- property_dictionary =
- NameDictionaryAdd(property_dictionary, name, store_value, details);
+ property_dictionary = NameDictionary::Add(
+ property_dictionary, name, store_value, details);
object->set_properties(*property_dictionary);
return;
}
@@ -771,25 +741,18 @@ void JSObject::SetNormalizedProperty(Handle<JSObject> object,
// Please note we have to update the property details.
property_dictionary->DetailsAtPut(entry, details);
} else {
- property_dictionary->SetEntry(entry, *name, *value, details);
+ property_dictionary->SetEntry(entry, name, value, details);
}
}
-// TODO(mstarzinger): Temporary wrapper until target is handlified.
-Handle<NameDictionary> NameDictionaryShrink(Handle<NameDictionary> dict,
- Handle<Name> name) {
- CALL_HEAP_FUNCTION(dict->GetIsolate(), dict->Shrink(*name), NameDictionary);
-}
-
-
Handle<Object> JSObject::DeleteNormalizedProperty(Handle<JSObject> object,
Handle<Name> name,
DeleteMode mode) {
ASSERT(!object->HasFastProperties());
Isolate* isolate = object->GetIsolate();
Handle<NameDictionary> dictionary(object->property_dictionary());
- int entry = dictionary->FindEntry(*name);
+ int entry = dictionary->FindEntry(name);
if (entry != NameDictionary::kNotFound) {
// If we have a global object set the cell to the hole.
if (object->IsGlobalObject()) {
@@ -809,10 +772,11 @@ Handle<Object> JSObject::DeleteNormalizedProperty(Handle<JSObject> object,
PropertyCell::SetValueInferType(cell, value);
dictionary->DetailsAtPut(entry, details.AsDeleted());
} else {
- Handle<Object> deleted(dictionary->DeleteProperty(entry, mode), isolate);
+ Handle<Object> deleted(
+ NameDictionary::DeleteProperty(dictionary, entry, mode));
if (*deleted == isolate->heap()->true_value()) {
Handle<NameDictionary> new_properties =
- NameDictionaryShrink(dictionary, name);
+ NameDictionary::Shrink(dictionary, name);
object->set_properties(*new_properties);
}
return deleted;
@@ -837,203 +801,75 @@ bool JSObject::IsDirty() {
}
-Handle<Object> Object::GetProperty(Handle<Object> object,
- Handle<Object> receiver,
- LookupResult* result,
- Handle<Name> key,
- PropertyAttributes* attributes) {
- Isolate* isolate = result->isolate();
- CALL_HEAP_FUNCTION(
- isolate,
- object->GetProperty(*receiver, result, *key, attributes),
- Object);
-}
-
-
-MaybeObject* Object::GetPropertyOrFail(Handle<Object> object,
- Handle<Object> receiver,
- LookupResult* result,
- Handle<Name> key,
- PropertyAttributes* attributes) {
- Isolate* isolate = result->isolate();
- CALL_HEAP_FUNCTION_PASS_EXCEPTION(
- isolate,
- object->GetProperty(*receiver, result, *key, attributes));
-}
-
-
-// TODO(yangguo): handlify this and get rid of.
-MaybeObject* Object::GetProperty(Object* receiver,
- LookupResult* result,
- Name* name,
- PropertyAttributes* attributes) {
- Isolate* isolate = name->GetIsolate();
- Heap* heap = isolate->heap();
-
-#ifdef DEBUG
- // TODO(mstarzinger): Only because of the AssertNoContextChange, drop as soon
- // as this method has been fully handlified.
- HandleScope scope(isolate);
-#endif
-
- // Make sure that the top context does not change when doing
- // callbacks or interceptor calls.
- AssertNoContextChange ncc(isolate);
-
- // Traverse the prototype chain from the current object (this) to
- // the holder and check for access rights. This avoids traversing the
- // objects more than once in case of interceptors, because the
- // holder will always be the interceptor holder and the search may
- // only continue with a current object just after the interceptor
- // holder in the prototype chain.
- // Proxy handlers do not use the proxy's prototype, so we can skip this.
- if (!result->IsHandler()) {
- Object* last = result->IsProperty()
- ? result->holder()
- : Object::cast(heap->null_value());
- ASSERT(this != this->GetPrototype(isolate));
- for (Object* current = this;
- true;
- current = current->GetPrototype(isolate)) {
- if (current->IsAccessCheckNeeded()) {
- // Check if we're allowed to read from the current object. Note
- // that even though we may not actually end up loading the named
- // property from the current object, we still check that we have
- // access to it.
- JSObject* checked = JSObject::cast(current);
- if (!isolate->MayNamedAccess(checked, name, v8::ACCESS_GET)) {
- HandleScope scope(isolate);
- Handle<Object> value = JSObject::GetPropertyWithFailedAccessCheck(
- handle(checked, isolate),
- handle(receiver, isolate),
- result,
- handle(name, isolate),
- attributes);
- RETURN_IF_EMPTY_HANDLE(isolate, value);
- return *value;
- }
- }
- // Stop traversing the chain once we reach the last object in the
- // chain; either the holder of the result or null in case of an
- // absent property.
- if (current == last) break;
- }
- }
-
- if (!result->IsProperty()) {
- *attributes = ABSENT;
- return heap->undefined_value();
- }
- *attributes = result->GetAttributes();
- Object* value;
- switch (result->type()) {
- case NORMAL:
- value = result->holder()->GetNormalizedProperty(result);
- ASSERT(!value->IsTheHole() || result->IsReadOnly());
- return value->IsTheHole() ? heap->undefined_value() : value;
- case FIELD: {
- MaybeObject* maybe_result = result->holder()->FastPropertyAt(
- result->representation(),
- result->GetFieldIndex().field_index());
- if (!maybe_result->To(&value)) return maybe_result;
- ASSERT(!value->IsTheHole() || result->IsReadOnly());
- return value->IsTheHole() ? heap->undefined_value() : value;
- }
- case CONSTANT:
- return result->GetConstant();
- case CALLBACKS: {
- HandleScope scope(isolate);
- Handle<Object> value = JSObject::GetPropertyWithCallback(
- handle(result->holder(), isolate),
- handle(receiver, isolate),
- handle(result->GetCallbackObject(), isolate),
- handle(name, isolate));
- RETURN_IF_EMPTY_HANDLE(isolate, value);
- return *value;
- }
- case HANDLER:
- return result->proxy()->GetPropertyWithHandler(receiver, name);
- case INTERCEPTOR: {
- HandleScope scope(isolate);
- Handle<Object> value = JSObject::GetPropertyWithInterceptor(
- handle(result->holder(), isolate),
- handle(receiver, isolate),
- handle(name, isolate),
- attributes);
- RETURN_IF_EMPTY_HANDLE(isolate, value);
- return *value;
- }
- case TRANSITION:
- case NONEXISTENT:
- UNREACHABLE();
- break;
- }
- UNREACHABLE();
- return NULL;
-}
-
-
-MaybeObject* Object::GetElementWithReceiver(Isolate* isolate,
- Object* receiver,
- uint32_t index) {
- Heap* heap = isolate->heap();
- Object* holder = this;
+MaybeHandle<Object> Object::GetElementWithReceiver(Isolate* isolate,
+ Handle<Object> object,
+ Handle<Object> receiver,
+ uint32_t index) {
+ Handle<Object> holder;
// Iterate up the prototype chain until an element is found or the null
// prototype is encountered.
- for (holder = this;
- holder != heap->null_value();
- holder = holder->GetPrototype(isolate)) {
+ for (holder = object;
+ !holder->IsNull();
+ holder = Handle<Object>(holder->GetPrototype(isolate), isolate)) {
if (!holder->IsJSObject()) {
Context* native_context = isolate->context()->native_context();
if (holder->IsNumber()) {
- holder = native_context->number_function()->instance_prototype();
+ holder = Handle<Object>(
+ native_context->number_function()->instance_prototype(), isolate);
} else if (holder->IsString()) {
- holder = native_context->string_function()->instance_prototype();
+ holder = Handle<Object>(
+ native_context->string_function()->instance_prototype(), isolate);
} else if (holder->IsSymbol()) {
- holder = native_context->symbol_function()->instance_prototype();
+ holder = Handle<Object>(
+ native_context->symbol_function()->instance_prototype(), isolate);
} else if (holder->IsBoolean()) {
- holder = native_context->boolean_function()->instance_prototype();
+ holder = Handle<Object>(
+ native_context->boolean_function()->instance_prototype(), isolate);
} else if (holder->IsJSProxy()) {
- return JSProxy::cast(holder)->GetElementWithHandler(receiver, index);
+ return JSProxy::GetElementWithHandler(
+ Handle<JSProxy>::cast(holder), receiver, index);
} else {
// Undefined and null have no indexed properties.
ASSERT(holder->IsUndefined() || holder->IsNull());
- return heap->undefined_value();
+ return isolate->factory()->undefined_value();
}
}
// Inline the case for JSObjects. Doing so significantly improves the
// performance of fetching elements where checking the prototype chain is
// necessary.
- JSObject* js_object = JSObject::cast(holder);
+ Handle<JSObject> js_object = Handle<JSObject>::cast(holder);
// Check access rights if needed.
if (js_object->IsAccessCheckNeeded()) {
- Isolate* isolate = heap->isolate();
if (!isolate->MayIndexedAccess(js_object, index, v8::ACCESS_GET)) {
isolate->ReportFailedAccessCheck(js_object, v8::ACCESS_GET);
- RETURN_IF_SCHEDULED_EXCEPTION(isolate);
- return heap->undefined_value();
+ RETURN_EXCEPTION_IF_SCHEDULED_EXCEPTION(isolate, Object);
+ return isolate->factory()->undefined_value();
}
}
if (js_object->HasIndexedInterceptor()) {
- return js_object->GetElementWithInterceptor(receiver, index);
+ return JSObject::GetElementWithInterceptor(js_object, receiver, index);
}
- if (js_object->elements() != heap->empty_fixed_array()) {
- MaybeObject* result = js_object->GetElementsAccessor()->Get(
- receiver, js_object, index);
- if (result != heap->the_hole_value()) return result;
+ if (js_object->elements() != isolate->heap()->empty_fixed_array()) {
+ Handle<Object> result;
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, result,
+ js_object->GetElementsAccessor()->Get(receiver, js_object, index),
+ Object);
+ if (!result->IsTheHole()) return result;
}
}
- return heap->undefined_value();
+ return isolate->factory()->undefined_value();
}
Object* Object::GetPrototype(Isolate* isolate) {
+ DisallowHeapAllocation no_alloc;
if (IsSmi()) {
Context* context = isolate->context()->native_context();
return context->number_function()->instance_prototype();
@@ -1065,9 +901,9 @@ Object* Object::GetPrototype(Isolate* isolate) {
}
-Map* Object::GetMarkerMap(Isolate* isolate) {
- if (IsSmi()) return isolate->heap()->heap_number_map();
- return HeapObject::cast(this)->map();
+Handle<Object> Object::GetPrototype(Isolate* isolate,
+ Handle<Object> object) {
+ return handle(object->GetPrototype(isolate), isolate);
}
@@ -1092,11 +928,9 @@ Object* Object::GetHash() {
}
-Handle<Object> Object::GetOrCreateHash(Handle<Object> object,
- Isolate* isolate) {
+Handle<Smi> Object::GetOrCreateHash(Isolate* isolate, Handle<Object> object) {
Handle<Object> hash(object->GetHash(), isolate);
- if (hash->IsSmi())
- return hash;
+ if (hash->IsSmi()) return Handle<Smi>::cast(hash);
ASSERT(object->IsJSReceiver());
return JSReceiver::GetOrCreateIdentityHash(Handle<JSReceiver>::cast(object));
@@ -1124,6 +958,25 @@ bool Object::SameValue(Object* other) {
}
+bool Object::SameValueZero(Object* other) {
+ if (other == this) return true;
+
+ // The object is either a number, a name, an odd-ball,
+ // a real JS object, or a Harmony proxy.
+ if (IsNumber() && other->IsNumber()) {
+ double this_value = Number();
+ double other_value = other->Number();
+ // +0 == -0 is true
+ return this_value == other_value
+ || (std::isnan(this_value) && std::isnan(other_value));
+ }
+ if (IsString() && other->IsString()) {
+ return String::cast(this)->Equals(String::cast(other));
+ }
+ return false;
+}
+
+
void Object::ShortPrint(FILE* out) {
HeapStringAllocator allocator;
StringStream accumulator(&allocator);
@@ -1135,8 +988,6 @@ void Object::ShortPrint(FILE* out) {
void Object::ShortPrint(StringStream* accumulator) {
if (IsSmi()) {
Smi::cast(this)->SmiPrint(accumulator);
- } else if (IsFailure()) {
- Failure::cast(this)->FailurePrint(accumulator);
} else {
HeapObject::cast(this)->HeapObjectShortPrint(accumulator);
}
@@ -1153,16 +1004,6 @@ void Smi::SmiPrint(StringStream* accumulator) {
}
-void Failure::FailurePrint(StringStream* accumulator) {
- accumulator->Add("Failure(%p)", reinterpret_cast<void*>(value()));
-}
-
-
-void Failure::FailurePrint(FILE* out) {
- PrintF(out, "Failure(%p)", reinterpret_cast<void*>(value()));
-}
-
-
// Should a word be prefixed by 'a' or 'an' in order to read naturally in
// English? Returns false for non-ASCII or words that don't start with
// a capital letter. The a/an rule follows pronunciation in English.
@@ -1187,70 +1028,36 @@ static bool AnWord(String* str) {
}
-MaybeObject* String::SlowTryFlatten(PretenureFlag pretenure) {
-#ifdef DEBUG
- // Do not attempt to flatten in debug mode when allocation is not
- // allowed. This is to avoid an assertion failure when allocating.
- // Flattening strings is the only case where we always allow
- // allocation because no GC is performed if the allocation fails.
- if (!AllowHeapAllocation::IsAllowed()) return this;
-#endif
-
- Heap* heap = GetHeap();
- switch (StringShape(this).representation_tag()) {
- case kConsStringTag: {
- ConsString* cs = ConsString::cast(this);
- if (cs->second()->length() == 0) {
- return cs->first();
- }
- // There's little point in putting the flat string in new space if the
- // cons string is in old space. It can never get GCed until there is
- // an old space GC.
- PretenureFlag tenure = heap->InNewSpace(this) ? pretenure : TENURED;
- int len = length();
- Object* object;
- String* result;
- if (IsOneByteRepresentation()) {
- { MaybeObject* maybe_object =
- heap->AllocateRawOneByteString(len, tenure);
- if (!maybe_object->ToObject(&object)) return maybe_object;
- }
- result = String::cast(object);
- String* first = cs->first();
- int first_length = first->length();
- uint8_t* dest = SeqOneByteString::cast(result)->GetChars();
- WriteToFlat(first, dest, 0, first_length);
- String* second = cs->second();
- WriteToFlat(second,
- dest + first_length,
- 0,
- len - first_length);
- } else {
- { MaybeObject* maybe_object =
- heap->AllocateRawTwoByteString(len, tenure);
- if (!maybe_object->ToObject(&object)) return maybe_object;
- }
- result = String::cast(object);
- uc16* dest = SeqTwoByteString::cast(result)->GetChars();
- String* first = cs->first();
- int first_length = first->length();
- WriteToFlat(first, dest, 0, first_length);
- String* second = cs->second();
- WriteToFlat(second,
- dest + first_length,
- 0,
- len - first_length);
- }
- cs->set_first(result);
- cs->set_second(heap->empty_string(), SKIP_WRITE_BARRIER);
- return result;
- }
- default:
- return this;
+Handle<String> String::SlowFlatten(Handle<ConsString> cons,
+ PretenureFlag pretenure) {
+ ASSERT(AllowHeapAllocation::IsAllowed());
+ ASSERT(cons->second()->length() != 0);
+ Isolate* isolate = cons->GetIsolate();
+ int length = cons->length();
+ PretenureFlag tenure = isolate->heap()->InNewSpace(*cons) ? pretenure
+ : TENURED;
+ Handle<SeqString> result;
+ if (cons->IsOneByteRepresentation()) {
+ Handle<SeqOneByteString> flat = isolate->factory()->NewRawOneByteString(
+ length, tenure).ToHandleChecked();
+ DisallowHeapAllocation no_gc;
+ WriteToFlat(*cons, flat->GetChars(), 0, length);
+ result = flat;
+ } else {
+ Handle<SeqTwoByteString> flat = isolate->factory()->NewRawTwoByteString(
+ length, tenure).ToHandleChecked();
+ DisallowHeapAllocation no_gc;
+ WriteToFlat(*cons, flat->GetChars(), 0, length);
+ result = flat;
}
+ cons->set_first(*result);
+ cons->set_second(isolate->heap()->empty_string());
+ ASSERT(result->IsFlat());
+ return result;
}
+
bool String::MakeExternal(v8::String::ExternalStringResource* resource) {
// Externalizing twice leaks the external resource, so it's
// prohibited by the API.
@@ -1274,39 +1081,50 @@ bool String::MakeExternal(v8::String::ExternalStringResource* resource) {
bool is_ascii = this->IsOneByteRepresentation();
bool is_internalized = this->IsInternalizedString();
- // Morph the object to an external string by adjusting the map and
- // reinitializing the fields.
- if (size >= ExternalString::kSize) {
- this->set_map_no_write_barrier(
- is_internalized
- ? (is_ascii
- ? heap->external_internalized_string_with_one_byte_data_map()
- : heap->external_internalized_string_map())
- : (is_ascii
- ? heap->external_string_with_one_byte_data_map()
- : heap->external_string_map()));
+ // Morph the string to an external string by replacing the map and
+ // reinitializing the fields. This won't work if
+ // - the space the existing string occupies is too small for a regular
+ // external string.
+ // - the existing string is in old pointer space and the backing store of
+ // the external string is not aligned. The GC cannot deal with a field
+ // containing a possibly unaligned address to outside of V8's heap.
+ // In either case we resort to a short external string instead, omitting
+ // the field caching the address of the backing store. When we encounter
+ // short external strings in generated code, we need to bailout to runtime.
+ Map* new_map;
+ if (size < ExternalString::kSize ||
+ heap->old_pointer_space()->Contains(this)) {
+ new_map = is_internalized
+ ? (is_ascii
+ ? heap->
+ short_external_internalized_string_with_one_byte_data_map()
+ : heap->short_external_internalized_string_map())
+ : (is_ascii
+ ? heap->short_external_string_with_one_byte_data_map()
+ : heap->short_external_string_map());
} else {
- this->set_map_no_write_barrier(
- is_internalized
- ? (is_ascii
- ? heap->
- short_external_internalized_string_with_one_byte_data_map()
- : heap->short_external_internalized_string_map())
- : (is_ascii
- ? heap->short_external_string_with_one_byte_data_map()
- : heap->short_external_string_map()));
+ new_map = is_internalized
+ ? (is_ascii
+ ? heap->external_internalized_string_with_one_byte_data_map()
+ : heap->external_internalized_string_map())
+ : (is_ascii
+ ? heap->external_string_with_one_byte_data_map()
+ : heap->external_string_map());
}
+
+ // Byte size of the external String object.
+ int new_size = this->SizeFromMap(new_map);
+ heap->CreateFillerObjectAt(this->address() + new_size, size - new_size);
+
+ // We are storing the new map using release store after creating a filler for
+ // the left-over space to avoid races with the sweeper thread.
+ this->synchronized_set_map(new_map);
+
ExternalTwoByteString* self = ExternalTwoByteString::cast(this);
self->set_resource(resource);
if (is_internalized) self->Hash(); // Force regeneration of the hash value.
- // Fill the remainder of the string with dead wood.
- int new_size = this->Size(); // Byte size of the external String object.
- heap->CreateFillerObjectAt(this->address() + new_size, size - new_size);
- if (Marking::IsBlack(Marking::MarkBitFrom(this))) {
- MemoryChunk::IncrementLiveBytesFromMutator(this->address(),
- new_size - size);
- }
+ heap->AdjustLiveBytes(this->address(), new_size - size, Heap::FROM_MUTATOR);
return true;
}
@@ -1335,28 +1153,41 @@ bool String::MakeExternal(v8::String::ExternalAsciiStringResource* resource) {
}
bool is_internalized = this->IsInternalizedString();
- // Morph the object to an external string by adjusting the map and
- // reinitializing the fields. Use short version if space is limited.
- if (size >= ExternalString::kSize) {
- this->set_map_no_write_barrier(
- is_internalized ? heap->external_ascii_internalized_string_map()
- : heap->external_ascii_string_map());
+ // Morph the string to an external string by replacing the map and
+ // reinitializing the fields. This won't work if
+ // - the space the existing string occupies is too small for a regular
+ // external string.
+ // - the existing string is in old pointer space and the backing store of
+ // the external string is not aligned. The GC cannot deal with a field
+ // containing a possibly unaligned address to outside of V8's heap.
+ // In either case we resort to a short external string instead, omitting
+ // the field caching the address of the backing store. When we encounter
+ // short external strings in generated code, we need to bailout to runtime.
+ Map* new_map;
+ if (size < ExternalString::kSize ||
+ heap->old_pointer_space()->Contains(this)) {
+ new_map = is_internalized
+ ? heap->short_external_ascii_internalized_string_map()
+ : heap->short_external_ascii_string_map();
} else {
- this->set_map_no_write_barrier(
- is_internalized ? heap->short_external_ascii_internalized_string_map()
- : heap->short_external_ascii_string_map());
+ new_map = is_internalized
+ ? heap->external_ascii_internalized_string_map()
+ : heap->external_ascii_string_map();
}
+
+ // Byte size of the external String object.
+ int new_size = this->SizeFromMap(new_map);
+ heap->CreateFillerObjectAt(this->address() + new_size, size - new_size);
+
+ // We are storing the new map using release store after creating a filler for
+ // the left-over space to avoid races with the sweeper thread.
+ this->synchronized_set_map(new_map);
+
ExternalAsciiString* self = ExternalAsciiString::cast(this);
self->set_resource(resource);
if (is_internalized) self->Hash(); // Force regeneration of the hash value.
- // Fill the remainder of the string with dead wood.
- int new_size = this->Size(); // Byte size of the external String object.
- heap->CreateFillerObjectAt(this->address() + new_size, size - new_size);
- if (Marking::IsBlack(Marking::MarkBitFrom(this))) {
- MemoryChunk::IncrementLiveBytesFromMutator(this->address(),
- new_size - size);
- }
+ heap->AdjustLiveBytes(this->address(), new_size - size, Heap::FROM_MUTATOR);
return true;
}
@@ -1524,17 +1355,18 @@ void JSObject::JSObjectShortPrint(StringStream* accumulator) {
void JSObject::PrintElementsTransition(
- FILE* file, ElementsKind from_kind, FixedArrayBase* from_elements,
- ElementsKind to_kind, FixedArrayBase* to_elements) {
+ FILE* file, Handle<JSObject> object,
+ ElementsKind from_kind, Handle<FixedArrayBase> from_elements,
+ ElementsKind to_kind, Handle<FixedArrayBase> to_elements) {
if (from_kind != to_kind) {
PrintF(file, "elements transition [");
PrintElementsKind(file, from_kind);
PrintF(file, " -> ");
PrintElementsKind(file, to_kind);
PrintF(file, "] in ");
- JavaScriptFrame::PrintTop(GetIsolate(), file, false, true);
+ JavaScriptFrame::PrintTop(object->GetIsolate(), file, false, true);
PrintF(file, " for ");
- ShortPrint(file);
+ object->ShortPrint(file);
PrintF(file, " from ");
from_elements->ShortPrint(file);
PrintF(file, " to ");
@@ -1551,18 +1383,31 @@ void Map::PrintGeneralization(FILE* file,
int descriptors,
bool constant_to_field,
Representation old_representation,
- Representation new_representation) {
+ Representation new_representation,
+ HeapType* old_field_type,
+ HeapType* new_field_type) {
PrintF(file, "[generalizing ");
constructor_name()->PrintOn(file);
PrintF(file, "] ");
- String::cast(instance_descriptors()->GetKey(modify_index))->PrintOn(file);
- if (constant_to_field) {
- PrintF(file, ":c->f");
+ Name* name = instance_descriptors()->GetKey(modify_index);
+ if (name->IsString()) {
+ String::cast(name)->PrintOn(file);
} else {
- PrintF(file, ":%s->%s",
- old_representation.Mnemonic(),
- new_representation.Mnemonic());
+ PrintF(file, "{symbol %p}", static_cast<void*>(name));
}
+ PrintF(file, ":");
+ if (constant_to_field) {
+ PrintF(file, "c");
+ } else {
+ PrintF(file, "%s", old_representation.Mnemonic());
+ PrintF(file, "{");
+ old_field_type->TypePrint(file, HeapType::SEMANTIC_DIM);
+ PrintF(file, "}");
+ }
+ PrintF(file, "->%s", new_representation.Mnemonic());
+ PrintF(file, "{");
+ new_field_type->TypePrint(file, HeapType::SEMANTIC_DIM);
+ PrintF(file, "}");
PrintF(file, " (");
if (strlen(reason) > 0) {
PrintF(file, "%s", reason);
@@ -1595,7 +1440,7 @@ void JSObject::PrintInstanceMigration(FILE* file,
if (name->IsString()) {
String::cast(name)->PrintOn(file);
} else {
- PrintF(file, "???");
+ PrintF(file, "{symbol %p}", static_cast<void*>(name));
}
PrintF(file, " ");
}
@@ -1642,48 +1487,25 @@ void HeapObject::HeapObjectShortPrint(StringStream* accumulator) {
case FREE_SPACE_TYPE:
accumulator->Add("<FreeSpace[%u]>", FreeSpace::cast(this)->Size());
break;
- case EXTERNAL_PIXEL_ARRAY_TYPE:
- accumulator->Add("<ExternalPixelArray[%u]>",
- ExternalPixelArray::cast(this)->length());
- break;
- case EXTERNAL_BYTE_ARRAY_TYPE:
- accumulator->Add("<ExternalByteArray[%u]>",
- ExternalByteArray::cast(this)->length());
- break;
- case EXTERNAL_UNSIGNED_BYTE_ARRAY_TYPE:
- accumulator->Add("<ExternalUnsignedByteArray[%u]>",
- ExternalUnsignedByteArray::cast(this)->length());
- break;
- case EXTERNAL_SHORT_ARRAY_TYPE:
- accumulator->Add("<ExternalShortArray[%u]>",
- ExternalShortArray::cast(this)->length());
- break;
- case EXTERNAL_UNSIGNED_SHORT_ARRAY_TYPE:
- accumulator->Add("<ExternalUnsignedShortArray[%u]>",
- ExternalUnsignedShortArray::cast(this)->length());
- break;
- case EXTERNAL_INT_ARRAY_TYPE:
- accumulator->Add("<ExternalIntArray[%u]>",
- ExternalIntArray::cast(this)->length());
- break;
- case EXTERNAL_UNSIGNED_INT_ARRAY_TYPE:
- accumulator->Add("<ExternalUnsignedIntArray[%u]>",
- ExternalUnsignedIntArray::cast(this)->length());
- break;
- case EXTERNAL_FLOAT_ARRAY_TYPE:
- accumulator->Add("<ExternalFloatArray[%u]>",
- ExternalFloatArray::cast(this)->length());
- break;
- case EXTERNAL_DOUBLE_ARRAY_TYPE:
- accumulator->Add("<ExternalDoubleArray[%u]>",
- ExternalDoubleArray::cast(this)->length());
+#define TYPED_ARRAY_SHORT_PRINT(Type, type, TYPE, ctype, size) \
+ case EXTERNAL_##TYPE##_ARRAY_TYPE: \
+ accumulator->Add("<External" #Type "Array[%u]>", \
+ External##Type##Array::cast(this)->length()); \
+ break; \
+ case FIXED_##TYPE##_ARRAY_TYPE: \
+ accumulator->Add("<Fixed" #Type "Array[%u]>", \
+ Fixed##Type##Array::cast(this)->length()); \
break;
+
+ TYPED_ARRAYS(TYPED_ARRAY_SHORT_PRINT)
+#undef TYPED_ARRAY_SHORT_PRINT
+
case SHARED_FUNCTION_INFO_TYPE: {
SharedFunctionInfo* shared = SharedFunctionInfo::cast(this);
SmartArrayPointer<char> debug_name =
shared->DebugName()->ToCString();
if (debug_name[0] != 0) {
- accumulator->Add("<SharedFunctionInfo %s>", *debug_name);
+ accumulator->Add("<SharedFunctionInfo %s>", debug_name.get());
} else {
accumulator->Add("<SharedFunctionInfo>");
}
@@ -1814,6 +1636,8 @@ void HeapObject::IterateBody(InstanceType type, int object_size,
case JS_DATA_VIEW_TYPE:
case JS_SET_TYPE:
case JS_MAP_TYPE:
+ case JS_SET_ITERATOR_TYPE:
+ case JS_MAP_ITERATOR_TYPE:
case JS_WEAK_MAP_TYPE:
case JS_WEAK_SET_TYPE:
case JS_REGEXP_TYPE:
@@ -1854,20 +1678,21 @@ void HeapObject::IterateBody(InstanceType type, int object_size,
case SYMBOL_TYPE:
Symbol::BodyDescriptor::IterateBody(this, v);
break;
+
case HEAP_NUMBER_TYPE:
case FILLER_TYPE:
case BYTE_ARRAY_TYPE:
case FREE_SPACE_TYPE:
- case EXTERNAL_PIXEL_ARRAY_TYPE:
- case EXTERNAL_BYTE_ARRAY_TYPE:
- case EXTERNAL_UNSIGNED_BYTE_ARRAY_TYPE:
- case EXTERNAL_SHORT_ARRAY_TYPE:
- case EXTERNAL_UNSIGNED_SHORT_ARRAY_TYPE:
- case EXTERNAL_INT_ARRAY_TYPE:
- case EXTERNAL_UNSIGNED_INT_ARRAY_TYPE:
- case EXTERNAL_FLOAT_ARRAY_TYPE:
- case EXTERNAL_DOUBLE_ARRAY_TYPE:
break;
+
+#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype, size) \
+ case EXTERNAL_##TYPE##_ARRAY_TYPE: \
+ case FIXED_##TYPE##_ARRAY_TYPE: \
+ break;
+
+ TYPED_ARRAYS(TYPED_ARRAY_CASE)
+#undef TYPED_ARRAY_CASE
+
case SHARED_FUNCTION_INFO_TYPE: {
SharedFunctionInfo::BodyDescriptor::IterateBody(this, v);
break;
@@ -1923,7 +1748,7 @@ void HeapNumber::HeapNumberPrint(StringStream* accumulator) {
// print that using vsnprintf (which may truncate but never allocate if
// there is no more space in the buffer).
EmbeddedVector<char, 100> buffer;
- OS::SNPrintF(buffer, "%.16g", Number());
+ SNPrintF(buffer, "%.16g", Number());
accumulator->Add("%s", buffer.start());
}
@@ -1962,53 +1787,34 @@ String* JSReceiver::constructor_name() {
}
-// TODO(mstarzinger): Temporary wrapper until handlified.
-static Handle<Object> NewStorageFor(Isolate* isolate,
- Handle<Object> object,
- Representation representation) {
- Heap* heap = isolate->heap();
- CALL_HEAP_FUNCTION(isolate,
- object->AllocateNewStorageFor(heap, representation),
- Object);
-}
+MaybeHandle<Map> Map::CopyWithField(Handle<Map> map,
+ Handle<Name> name,
+ Handle<HeapType> type,
+ PropertyAttributes attributes,
+ Representation representation,
+ TransitionFlag flag) {
+ ASSERT(DescriptorArray::kNotFound ==
+ map->instance_descriptors()->Search(
+ *name, map->NumberOfOwnDescriptors()));
+ // Ensure the descriptor array does not get too big.
+ if (map->NumberOfOwnDescriptors() >= kMaxNumberOfDescriptors) {
+ return MaybeHandle<Map>();
+ }
-void JSObject::AddFastPropertyUsingMap(Handle<JSObject> object,
- Handle<Map> new_map,
- Handle<Name> name,
- Handle<Object> value,
- int field_index,
- Representation representation) {
- Isolate* isolate = object->GetIsolate();
+ Isolate* isolate = map->GetIsolate();
- // This method is used to transition to a field. If we are transitioning to a
- // double field, allocate new storage.
- Handle<Object> storage = NewStorageFor(isolate, value, representation);
+ // Compute the new index for new field.
+ int index = map->NextFreePropertyIndex();
- if (object->map()->unused_property_fields() == 0) {
- int new_unused = new_map->unused_property_fields();
- Handle<FixedArray> properties(object->properties());
- Handle<FixedArray> values = isolate->factory()->CopySizeFixedArray(
- properties, properties->length() + new_unused + 1);
- object->set_properties(*values);
+ if (map->instance_type() == JS_CONTEXT_EXTENSION_OBJECT_TYPE) {
+ representation = Representation::Tagged();
+ type = HeapType::Any(isolate);
}
- object->set_map(*new_map);
- object->FastPropertyAtPut(field_index, *storage);
-}
-
-
-static MaybeObject* CopyAddFieldDescriptor(Map* map,
- Name* name,
- int index,
- PropertyAttributes attributes,
- Representation representation,
- TransitionFlag flag) {
- Map* new_map;
- FieldDescriptor new_field_desc(name, index, attributes, representation);
- MaybeObject* maybe_map = map->CopyAddDescriptor(&new_field_desc, flag);
- if (!maybe_map->To(&new_map)) return maybe_map;
- int unused_property_fields = map->unused_property_fields() - 1;
+ FieldDescriptor new_field_desc(name, index, type, attributes, representation);
+ Handle<Map> new_map = Map::CopyAddDescriptor(map, &new_field_desc, flag);
+ int unused_property_fields = new_map->unused_property_fields() - 1;
if (unused_property_fields < 0) {
unused_property_fields += JSObject::kFieldsAdded;
}
@@ -2017,16 +1823,19 @@ static MaybeObject* CopyAddFieldDescriptor(Map* map,
}
-static Handle<Map> CopyAddFieldDescriptor(Handle<Map> map,
- Handle<Name> name,
- int index,
- PropertyAttributes attributes,
- Representation representation,
- TransitionFlag flag) {
- CALL_HEAP_FUNCTION(map->GetIsolate(),
- CopyAddFieldDescriptor(
- *map, *name, index, attributes, representation, flag),
- Map);
+MaybeHandle<Map> Map::CopyWithConstant(Handle<Map> map,
+ Handle<Name> name,
+ Handle<Object> constant,
+ PropertyAttributes attributes,
+ TransitionFlag flag) {
+ // Ensure the descriptor array does not get too big.
+ if (map->NumberOfOwnDescriptors() >= kMaxNumberOfDescriptors) {
+ return MaybeHandle<Map>();
+ }
+
+ // Allocate new instance descriptors with (name, constant) added.
+ ConstantDescriptor new_constant_desc(name, constant, attributes);
+ return Map::CopyAddDescriptor(map, &new_constant_desc, flag);
}
@@ -2038,75 +1847,27 @@ void JSObject::AddFastProperty(Handle<JSObject> object,
ValueType value_type,
TransitionFlag flag) {
ASSERT(!object->IsJSGlobalProxy());
- ASSERT(DescriptorArray::kNotFound ==
- object->map()->instance_descriptors()->Search(
- *name, object->map()->NumberOfOwnDescriptors()));
- // Normalize the object if the name is an actual name (not the
- // hidden strings) and is not a real identifier.
- // Normalize the object if it will have too many fast properties.
- Isolate* isolate = object->GetIsolate();
- if (!name->IsCacheable(isolate) ||
- object->TooManyFastProperties(store_mode)) {
+ MaybeHandle<Map> maybe_map;
+ if (value->IsJSFunction()) {
+ maybe_map = Map::CopyWithConstant(
+ handle(object->map()), name, value, attributes, flag);
+ } else if (!object->TooManyFastProperties(store_mode)) {
+ Isolate* isolate = object->GetIsolate();
+ Representation representation = value->OptimalRepresentation(value_type);
+ maybe_map = Map::CopyWithField(
+ handle(object->map(), isolate), name,
+ value->OptimalType(isolate, representation),
+ attributes, representation, flag);
+ }
+
+ Handle<Map> new_map;
+ if (!maybe_map.ToHandle(&new_map)) {
NormalizeProperties(object, CLEAR_INOBJECT_PROPERTIES, 0);
- AddSlowProperty(object, name, value, attributes);
return;
}
- // Compute the new index for new field.
- int index = object->map()->NextFreePropertyIndex();
-
- // Allocate new instance descriptors with (name, index) added
- if (object->IsJSContextExtensionObject()) value_type = FORCE_TAGGED;
- Representation representation = value->OptimalRepresentation(value_type);
- Handle<Map> new_map = CopyAddFieldDescriptor(
- handle(object->map()), name, index, attributes, representation, flag);
-
- AddFastPropertyUsingMap(object, new_map, name, value, index, representation);
-}
-
-
-static MaybeObject* CopyAddConstantDescriptor(Map* map,
- Name* name,
- Object* value,
- PropertyAttributes attributes,
- TransitionFlag flag) {
- ConstantDescriptor new_constant_desc(name, value, attributes);
- return map->CopyAddDescriptor(&new_constant_desc, flag);
-}
-
-
-static Handle<Map> CopyAddConstantDescriptor(Handle<Map> map,
- Handle<Name> name,
- Handle<Object> value,
- PropertyAttributes attributes,
- TransitionFlag flag) {
- CALL_HEAP_FUNCTION(map->GetIsolate(),
- CopyAddConstantDescriptor(
- *map, *name, *value, attributes, flag),
- Map);
-}
-
-
-void JSObject::AddConstantProperty(Handle<JSObject> object,
- Handle<Name> name,
- Handle<Object> constant,
- PropertyAttributes attributes,
- TransitionFlag initial_flag) {
- TransitionFlag flag =
- // Do not add transitions to global objects.
- (object->IsGlobalObject() ||
- // Don't add transitions to special properties with non-trivial
- // attributes.
- attributes != NONE)
- ? OMIT_TRANSITION
- : initial_flag;
-
- // Allocate new instance descriptors with (name, constant) added.
- Handle<Map> new_map = CopyAddConstantDescriptor(
- handle(object->map()), name, constant, attributes, flag);
-
- object->set_map(*new_map);
+ JSObject::MigrateToNewProperty(object, new_map, value);
}
@@ -2119,7 +1880,7 @@ void JSObject::AddSlowProperty(Handle<JSObject> object,
Handle<NameDictionary> dict(object->property_dictionary());
if (object->IsGlobalObject()) {
// In case name is an orphaned property reuse the cell.
- int entry = dict->FindEntry(*name);
+ int entry = dict->FindEntry(name);
if (entry != NameDictionary::kNotFound) {
Handle<PropertyCell> cell(PropertyCell::cast(dict->ValueAt(entry)));
PropertyCell::SetValueInferType(cell, value);
@@ -2128,7 +1889,7 @@ void JSObject::AddSlowProperty(Handle<JSObject> object,
int index = dict->NextEnumerationIndex();
PropertyDetails details = PropertyDetails(attributes, NORMAL, index);
dict->SetNextEnumerationIndex(index + 1);
- dict->SetEntry(entry, *name, *cell, details);
+ dict->SetEntry(entry, name, cell, details);
return;
}
Handle<PropertyCell> cell = isolate->factory()->NewPropertyCell(value);
@@ -2136,67 +1897,53 @@ void JSObject::AddSlowProperty(Handle<JSObject> object,
value = cell;
}
PropertyDetails details = PropertyDetails(attributes, NORMAL, 0);
- Handle<NameDictionary> result = NameDictionaryAdd(dict, name, value, details);
+ Handle<NameDictionary> result =
+ NameDictionary::Add(dict, name, value, details);
if (*dict != *result) object->set_properties(*result);
}
-Handle<Object> JSObject::AddProperty(Handle<JSObject> object,
- Handle<Name> name,
- Handle<Object> value,
- PropertyAttributes attributes,
- StrictModeFlag strict_mode,
- JSReceiver::StoreFromKeyed store_mode,
- ExtensibilityCheck extensibility_check,
- ValueType value_type,
- StoreMode mode,
- TransitionFlag transition_flag) {
+MaybeHandle<Object> JSObject::AddProperty(
+ Handle<JSObject> object,
+ Handle<Name> name,
+ Handle<Object> value,
+ PropertyAttributes attributes,
+ StrictMode strict_mode,
+ JSReceiver::StoreFromKeyed store_mode,
+ ExtensibilityCheck extensibility_check,
+ ValueType value_type,
+ StoreMode mode,
+ TransitionFlag transition_flag) {
ASSERT(!object->IsJSGlobalProxy());
Isolate* isolate = object->GetIsolate();
if (!name->IsUniqueName()) {
- name = isolate->factory()->InternalizedStringFromString(
+ name = isolate->factory()->InternalizeString(
Handle<String>::cast(name));
}
if (extensibility_check == PERFORM_EXTENSIBILITY_CHECK &&
!object->map()->is_extensible()) {
- if (strict_mode == kNonStrictMode) {
+ if (strict_mode == SLOPPY) {
return value;
} else {
Handle<Object> args[1] = { name };
Handle<Object> error = isolate->factory()->NewTypeError(
"object_not_extensible", HandleVector(args, ARRAY_SIZE(args)));
- isolate->Throw(*error);
- return Handle<Object>();
+ return isolate->Throw<Object>(error);
}
}
if (object->HasFastProperties()) {
- // Ensure the descriptor array does not get too big.
- if (object->map()->NumberOfOwnDescriptors() <= kMaxNumberOfDescriptors) {
- // TODO(verwaest): Support other constants.
- // if (mode == ALLOW_AS_CONSTANT &&
- // !value->IsTheHole() &&
- // !value->IsConsString()) {
- if (value->IsJSFunction()) {
- AddConstantProperty(object, name, value, attributes, transition_flag);
- } else {
- AddFastProperty(object, name, value, attributes, store_mode,
- value_type, transition_flag);
- }
- } else {
- // Normalize the object to prevent very large instance descriptors.
- // This eliminates unwanted N^2 allocation and lookup behavior.
- NormalizeProperties(object, CLEAR_INOBJECT_PROPERTIES, 0);
- AddSlowProperty(object, name, value, attributes);
- }
- } else {
+ AddFastProperty(object, name, value, attributes, store_mode,
+ value_type, transition_flag);
+ }
+
+ if (!object->HasFastProperties()) {
AddSlowProperty(object, name, value, attributes);
}
- if (FLAG_harmony_observation &&
- object->map()->is_observed() &&
+ if (object->map()->is_observed() &&
*name != isolate->heap()->hidden_string()) {
Handle<Object> old_value = isolate->factory()->the_hole_value();
EnqueueChangeRecord(object, "add", name, old_value);
@@ -2206,53 +1953,55 @@ Handle<Object> JSObject::AddProperty(Handle<JSObject> object,
}
+Context* JSObject::GetCreationContext() {
+ Object* constructor = this->map()->constructor();
+ JSFunction* function;
+ if (!constructor->IsJSFunction()) {
+ // Functions have null as a constructor,
+ // but any JSFunction knows its context immediately.
+ function = JSFunction::cast(this);
+ } else {
+ function = JSFunction::cast(constructor);
+ }
+
+ return function->context()->native_context();
+}
+
+
void JSObject::EnqueueChangeRecord(Handle<JSObject> object,
const char* type_str,
Handle<Name> name,
Handle<Object> old_value) {
+ ASSERT(!object->IsJSGlobalProxy());
+ ASSERT(!object->IsJSGlobalObject());
Isolate* isolate = object->GetIsolate();
HandleScope scope(isolate);
Handle<String> type = isolate->factory()->InternalizeUtf8String(type_str);
- if (object->IsJSGlobalObject()) {
- object = handle(JSGlobalObject::cast(*object)->global_receiver(), isolate);
- }
Handle<Object> args[] = { type, object, name, old_value };
int argc = name.is_null() ? 2 : old_value->IsTheHole() ? 3 : 4;
- bool threw;
Execution::Call(isolate,
Handle<JSFunction>(isolate->observers_notify_change()),
isolate->factory()->undefined_value(),
- argc, args,
- &threw);
- ASSERT(!threw);
+ argc, args).Assert();
}
-Handle<Object> JSObject::SetPropertyPostInterceptor(
+MaybeHandle<Object> JSObject::SetPropertyPostInterceptor(
Handle<JSObject> object,
Handle<Name> name,
Handle<Object> value,
PropertyAttributes attributes,
- StrictModeFlag strict_mode) {
- // Check local property, ignore interceptor.
- LookupResult result(object->GetIsolate());
- object->LocalLookupRealNamedProperty(*name, &result);
+ StrictMode strict_mode) {
+ // Check own property, ignore interceptor.
+ Isolate* isolate = object->GetIsolate();
+ LookupResult result(isolate);
+ object->LookupOwnRealNamedProperty(name, &result);
if (!result.IsFound()) {
object->map()->LookupTransition(*object, *name, &result);
}
- if (result.IsFound()) {
- // An existing property or a map transition was found. Use set property to
- // handle all these cases.
- return SetPropertyForResult(object, &result, name, value, attributes,
- strict_mode, MAY_BE_STORE_FROM_KEYED);
- }
- bool done = false;
- Handle<Object> result_object = SetPropertyViaPrototypes(
- object, name, value, attributes, strict_mode, &done);
- if (done) return result_object;
- // Add a new real property.
- return AddProperty(object, name, value, attributes, strict_mode);
+ return SetPropertyForResult(object, &result, name, value, attributes,
+ strict_mode, MAY_BE_STORE_FROM_KEYED);
}
@@ -2261,7 +2010,7 @@ static void ReplaceSlowProperty(Handle<JSObject> object,
Handle<Object> value,
PropertyAttributes attributes) {
NameDictionary* dictionary = object->property_dictionary();
- int old_index = dictionary->FindEntry(*name);
+ int old_index = dictionary->FindEntry(name);
int new_enumeration_index = 0; // 0 means "Use the next available index."
if (old_index != -1) {
// All calls to ReplaceSlowProperty have had all transitions removed.
@@ -2289,9 +2038,6 @@ const char* Representation::Mnemonic() const {
}
-enum RightTrimMode { FROM_GC, FROM_MUTATOR };
-
-
static void ZapEndOfFixedArray(Address new_end, int to_trim) {
// If we are doing a big trim in old space then we zap the space.
Object** zap = reinterpret_cast<Object**>(new_end);
@@ -2302,7 +2048,7 @@ static void ZapEndOfFixedArray(Address new_end, int to_trim) {
}
-template<RightTrimMode trim_mode>
+template<Heap::InvocationMode mode>
static void RightTrimFixedArray(Heap* heap, FixedArray* elms, int to_trim) {
ASSERT(elms->map() != heap->fixed_cow_array_map());
// For now this trick is only applied to fixed arrays in new and paged space.
@@ -2314,7 +2060,7 @@ static void RightTrimFixedArray(Heap* heap, FixedArray* elms, int to_trim) {
Address new_end = elms->address() + FixedArray::SizeFor(len - to_trim);
- if (trim_mode != FROM_GC || Heap::ShouldZapGarbage()) {
+ if (mode != Heap::FROM_GC || Heap::ShouldZapGarbage()) {
ZapEndOfFixedArray(new_end, to_trim);
}
@@ -2325,16 +2071,11 @@ static void RightTrimFixedArray(Heap* heap, FixedArray* elms, int to_trim) {
// we still do it.
heap->CreateFillerObjectAt(new_end, size_delta);
- elms->set_length(len - to_trim);
+ // We are storing the new length using release store after creating a filler
+ // for the left-over space to avoid races with the sweeper thread.
+ elms->synchronized_set_length(len - to_trim);
- // Maintain marking consistency for IncrementalMarking.
- if (Marking::IsBlack(Marking::MarkBitFrom(elms))) {
- if (trim_mode == FROM_GC) {
- MemoryChunk::IncrementLiveBytesFromGC(elms->address(), -size_delta);
- } else {
- MemoryChunk::IncrementLiveBytesFromMutator(elms->address(), -size_delta);
- }
- }
+ heap->AdjustLiveBytes(elms->address(), -size_delta, mode);
// The array may not be moved during GC,
// and size has to be adjusted nevertheless.
@@ -2354,16 +2095,14 @@ bool Map::InstancesNeedRewriting(Map* target,
ASSERT(target_number_of_fields >= number_of_fields);
if (target_number_of_fields != number_of_fields) return true;
- if (FLAG_track_double_fields) {
- // If smi descriptors were replaced by double descriptors, rewrite.
- DescriptorArray* old_desc = instance_descriptors();
- DescriptorArray* new_desc = target->instance_descriptors();
- int limit = NumberOfOwnDescriptors();
- for (int i = 0; i < limit; i++) {
- if (new_desc->GetDetails(i).representation().IsDouble() &&
- !old_desc->GetDetails(i).representation().IsDouble()) {
- return true;
- }
+ // If smi descriptors were replaced by double descriptors, rewrite.
+ DescriptorArray* old_desc = instance_descriptors();
+ DescriptorArray* new_desc = target->instance_descriptors();
+ int limit = NumberOfOwnDescriptors();
+ for (int i = 0; i < limit; i++) {
+ if (new_desc->GetDetails(i).representation().IsDouble() &&
+ !old_desc->GetDetails(i).representation().IsDouble()) {
+ return true;
}
}
@@ -2383,6 +2122,18 @@ bool Map::InstancesNeedRewriting(Map* target,
}
+Handle<TransitionArray> Map::SetElementsTransitionMap(
+ Handle<Map> map, Handle<Map> transitioned_map) {
+ Handle<TransitionArray> transitions = TransitionArray::CopyInsert(
+ map,
+ map->GetIsolate()->factory()->elements_transition_symbol(),
+ transitioned_map,
+ FULL_TRANSITION);
+ map->set_transitions(*transitions);
+ return transitions;
+}
+
+
// To migrate an instance to a map:
// - First check whether the instance needs to be rewritten. If not, simply
// change the map.
@@ -2409,7 +2160,9 @@ void JSObject::MigrateToMap(Handle<JSObject> object, Handle<Map> new_map) {
// converted to doubles.
if (!old_map->InstancesNeedRewriting(
*new_map, number_of_fields, inobject, unused)) {
- object->set_map(*new_map);
+ // Writing the new map here does not require synchronization since it does
+ // not change the actual object size.
+ object->synchronized_set_map(*new_map);
return;
}
@@ -2419,9 +2172,14 @@ void JSObject::MigrateToMap(Handle<JSObject> object, Handle<Map> new_map) {
Handle<DescriptorArray> old_descriptors(old_map->instance_descriptors());
Handle<DescriptorArray> new_descriptors(new_map->instance_descriptors());
- int descriptors = new_map->NumberOfOwnDescriptors();
+ int old_nof = old_map->NumberOfOwnDescriptors();
+ int new_nof = new_map->NumberOfOwnDescriptors();
- for (int i = 0; i < descriptors; i++) {
+ // This method only supports generalizing instances to at least the same
+ // number of properties.
+ ASSERT(old_nof <= new_nof);
+
+ for (int i = 0; i < old_nof; i++) {
PropertyDetails details = new_descriptors->GetDetails(i);
if (details.type() != FIELD) continue;
PropertyDetails old_details = old_descriptors->GetDetails(i);
@@ -2433,19 +2191,30 @@ void JSObject::MigrateToMap(Handle<JSObject> object, Handle<Map> new_map) {
old_details.type() == FIELD);
Object* raw_value = old_details.type() == CONSTANT
? old_descriptors->GetValue(i)
- : object->RawFastPropertyAt(old_descriptors->GetFieldIndex(i));
+ : object->RawFastPropertyAt(FieldIndex::ForDescriptor(*old_map, i));
Handle<Object> value(raw_value, isolate);
- if (FLAG_track_double_fields &&
- !old_details.representation().IsDouble() &&
+ if (!old_details.representation().IsDouble() &&
details.representation().IsDouble()) {
if (old_details.representation().IsNone()) {
value = handle(Smi::FromInt(0), isolate);
}
- value = NewStorageFor(isolate, value, details.representation());
+ value = Object::NewStorageFor(isolate, value, details.representation());
+ }
+ ASSERT(!(details.representation().IsDouble() && value->IsSmi()));
+ int target_index = new_descriptors->GetFieldIndex(i) - inobject;
+ if (target_index < 0) target_index += total_size;
+ array->set(target_index, *value);
+ }
+
+ for (int i = old_nof; i < new_nof; i++) {
+ PropertyDetails details = new_descriptors->GetDetails(i);
+ if (details.type() != FIELD) continue;
+ Handle<Object> value;
+ if (details.representation().IsDouble()) {
+ value = isolate->factory()->NewHeapNumber(0);
+ } else {
+ value = isolate->factory()->uninitialized_value();
}
- ASSERT(!(FLAG_track_double_fields &&
- details.representation().IsDouble() &&
- value->IsSmi()));
int target_index = new_descriptors->GetFieldIndex(i) - inobject;
if (target_index < 0) target_index += total_size;
array->set(target_index, *value);
@@ -2458,7 +2227,8 @@ void JSObject::MigrateToMap(Handle<JSObject> object, Handle<Map> new_map) {
// avoid overwriting |one_pointer_filler_map|.
int limit = Min(inobject, number_of_fields);
for (int i = 0; i < limit; i++) {
- object->FastPropertyAtPut(i, array->get(external + i));
+ FieldIndex index = FieldIndex::ForPropertyIndex(*new_map, i);
+ object->FastPropertyAtPut(index, array->get(external + i));
}
// Create filler object past the new instance size.
@@ -2466,35 +2236,36 @@ void JSObject::MigrateToMap(Handle<JSObject> object, Handle<Map> new_map) {
int instance_size_delta = old_map->instance_size() - new_instance_size;
ASSERT(instance_size_delta >= 0);
Address address = object->address() + new_instance_size;
+
+ // The trimming is performed on a newly allocated object, which is on a
+ // fresly allocated page or on an already swept page. Hence, the sweeper
+ // thread can not get confused with the filler creation. No synchronization
+ // needed.
isolate->heap()->CreateFillerObjectAt(address, instance_size_delta);
// If there are properties in the new backing store, trim it to the correct
// size and install the backing store into the object.
if (external > 0) {
- RightTrimFixedArray<FROM_MUTATOR>(isolate->heap(), *array, inobject);
+ RightTrimFixedArray<Heap::FROM_MUTATOR>(isolate->heap(), *array, inobject);
object->set_properties(*array);
}
+ // The trimming is performed on a newly allocated object, which is on a
+ // fresly allocated page or on an already swept page. Hence, the sweeper
+ // thread can not get confused with the filler creation. No synchronization
+ // needed.
object->set_map(*new_map);
}
-Handle<TransitionArray> Map::AddTransition(Handle<Map> map,
- Handle<Name> key,
- Handle<Map> target,
- SimpleTransitionFlag flag) {
- CALL_HEAP_FUNCTION(map->GetIsolate(),
- map->AddTransition(*key, *target, flag),
- TransitionArray);
-}
-
-
void JSObject::GeneralizeFieldRepresentation(Handle<JSObject> object,
int modify_index,
Representation new_representation,
+ Handle<HeapType> new_field_type,
StoreMode store_mode) {
Handle<Map> new_map = Map::GeneralizeRepresentation(
- handle(object->map()), modify_index, new_representation, store_mode);
+ handle(object->map()), modify_index, new_representation,
+ new_field_type, store_mode);
if (object->map() == *new_map) return;
return MigrateToMap(object, new_map);
}
@@ -2515,20 +2286,26 @@ Handle<Map> Map::CopyGeneralizeAllRepresentations(Handle<Map> map,
StoreMode store_mode,
PropertyAttributes attributes,
const char* reason) {
+ Isolate* isolate = map->GetIsolate();
Handle<Map> new_map = Copy(map);
DescriptorArray* descriptors = new_map->instance_descriptors();
- descriptors->InitializeRepresentations(Representation::Tagged());
+ int length = descriptors->number_of_descriptors();
+ for (int i = 0; i < length; i++) {
+ descriptors->SetRepresentation(i, Representation::Tagged());
+ if (descriptors->GetDetails(i).type() == FIELD) {
+ descriptors->SetValue(i, HeapType::Any());
+ }
+ }
// Unless the instance is being migrated, ensure that modify_index is a field.
PropertyDetails details = descriptors->GetDetails(modify_index);
if (store_mode == FORCE_FIELD && details.type() != FIELD) {
- FieldDescriptor d(descriptors->GetKey(modify_index),
+ FieldDescriptor d(handle(descriptors->GetKey(modify_index), isolate),
new_map->NumberOfFields(),
attributes,
Representation::Tagged());
- d.SetSortedKeyIndex(details.pointer());
- descriptors->Set(modify_index, &d);
+ descriptors->Replace(modify_index, &d);
int unused_property_fields = new_map->unused_property_fields() - 1;
if (unused_property_fields < 0) {
unused_property_fields += JSObject::kFieldsAdded;
@@ -2537,18 +2314,33 @@ Handle<Map> Map::CopyGeneralizeAllRepresentations(Handle<Map> map,
}
if (FLAG_trace_generalization) {
+ HeapType* field_type = (details.type() == FIELD)
+ ? map->instance_descriptors()->GetFieldType(modify_index)
+ : NULL;
map->PrintGeneralization(stdout, reason, modify_index,
new_map->NumberOfOwnDescriptors(),
new_map->NumberOfOwnDescriptors(),
details.type() == CONSTANT && store_mode == FORCE_FIELD,
- Representation::Tagged(), Representation::Tagged());
+ details.representation(), Representation::Tagged(),
+ field_type, HeapType::Any());
}
return new_map;
}
+// static
+Handle<Map> Map::CopyGeneralizeAllRepresentations(Handle<Map> map,
+ int modify_index,
+ StoreMode store_mode,
+ const char* reason) {
+ PropertyDetails details =
+ map->instance_descriptors()->GetDetails(modify_index);
+ return CopyGeneralizeAllRepresentations(map, modify_index, store_mode,
+ details.attributes(), reason);
+}
+
+
void Map::DeprecateTransitionTree() {
- if (!FLAG_track_fields) return;
if (is_deprecated()) return;
if (HasTransitionArray()) {
TransitionArray* transitions = this->transitions();
@@ -2580,6 +2372,7 @@ void Map::DeprecateTarget(Name* key, DescriptorArray* new_descriptors) {
DescriptorArray* to_replace = instance_descriptors();
Map* current = this;
+ GetHeap()->incremental_marking()->RecordWrites(to_replace);
while (current->instance_descriptors() == to_replace) {
current->SetEnumLength(kInvalidEnumCacheSentinel);
current->set_instance_descriptors(new_descriptors);
@@ -2602,42 +2395,11 @@ Map* Map::FindRootMap() {
}
-// Returns NULL if the updated map is incompatible.
-Map* Map::FindUpdatedMap(int verbatim,
- int length,
- DescriptorArray* descriptors) {
- // This can only be called on roots of transition trees.
- ASSERT(GetBackPointer()->IsUndefined());
-
- Map* current = this;
-
- for (int i = verbatim; i < length; i++) {
- if (!current->HasTransitionArray()) break;
- Name* name = descriptors->GetKey(i);
- TransitionArray* transitions = current->transitions();
- int transition = transitions->Search(name);
- if (transition == TransitionArray::kNotFound) break;
- current = transitions->GetTarget(transition);
- PropertyDetails details = descriptors->GetDetails(i);
- PropertyDetails target_details =
- current->instance_descriptors()->GetDetails(i);
- if (details.attributes() != target_details.attributes()) return NULL;
- if (details.type() == CALLBACKS) {
- if (target_details.type() != CALLBACKS) return NULL;
- if (descriptors->GetValue(i) !=
- current->instance_descriptors()->GetValue(i)) {
- return NULL;
- }
- }
- }
-
- return current;
-}
-
-
Map* Map::FindLastMatchMap(int verbatim,
int length,
DescriptorArray* descriptors) {
+ DisallowHeapAllocation no_allocation;
+
// This can only be called on roots of transition trees.
ASSERT(GetBackPointer()->IsUndefined());
@@ -2653,13 +2415,17 @@ Map* Map::FindLastMatchMap(int verbatim,
Map* next = transitions->GetTarget(transition);
DescriptorArray* next_descriptors = next->instance_descriptors();
- if (next_descriptors->GetValue(i) != descriptors->GetValue(i)) break;
-
PropertyDetails details = descriptors->GetDetails(i);
PropertyDetails next_details = next_descriptors->GetDetails(i);
if (details.type() != next_details.type()) break;
if (details.attributes() != next_details.attributes()) break;
if (!details.representation().Equals(next_details.representation())) break;
+ if (next_details.type() == FIELD) {
+ if (!descriptors->GetFieldType(i)->NowIs(
+ next_descriptors->GetFieldType(i))) break;
+ } else {
+ if (descriptors->GetValue(i) != next_descriptors->GetValue(i)) break;
+ }
current = next;
}
@@ -2667,6 +2433,100 @@ Map* Map::FindLastMatchMap(int verbatim,
}
+Map* Map::FindFieldOwner(int descriptor) {
+ DisallowHeapAllocation no_allocation;
+ ASSERT_EQ(FIELD, instance_descriptors()->GetDetails(descriptor).type());
+ Map* result = this;
+ while (true) {
+ Object* back = result->GetBackPointer();
+ if (back->IsUndefined()) break;
+ Map* parent = Map::cast(back);
+ if (parent->NumberOfOwnDescriptors() <= descriptor) break;
+ result = parent;
+ }
+ return result;
+}
+
+
+void Map::UpdateDescriptor(int descriptor_number, Descriptor* desc) {
+ DisallowHeapAllocation no_allocation;
+ if (HasTransitionArray()) {
+ TransitionArray* transitions = this->transitions();
+ for (int i = 0; i < transitions->number_of_transitions(); ++i) {
+ transitions->GetTarget(i)->UpdateDescriptor(descriptor_number, desc);
+ }
+ }
+ instance_descriptors()->Replace(descriptor_number, desc);;
+}
+
+
+// static
+Handle<HeapType> Map::GeneralizeFieldType(Handle<HeapType> type1,
+ Handle<HeapType> type2,
+ Isolate* isolate) {
+ static const int kMaxClassesPerFieldType = 5;
+ if (type1->NowIs(type2)) return type2;
+ if (type2->NowIs(type1)) return type1;
+ if (type1->NowStable() && type2->NowStable()) {
+ Handle<HeapType> type = HeapType::Union(type1, type2, isolate);
+ if (type->NumClasses() <= kMaxClassesPerFieldType) {
+ ASSERT(type->NowStable());
+ ASSERT(type1->NowIs(type));
+ ASSERT(type2->NowIs(type));
+ return type;
+ }
+ }
+ return HeapType::Any(isolate);
+}
+
+
+// static
+void Map::GeneralizeFieldType(Handle<Map> map,
+ int modify_index,
+ Handle<HeapType> new_field_type) {
+ Isolate* isolate = map->GetIsolate();
+
+ // Check if we actually need to generalize the field type at all.
+ Handle<HeapType> old_field_type(
+ map->instance_descriptors()->GetFieldType(modify_index), isolate);
+ if (new_field_type->NowIs(old_field_type)) {
+ ASSERT(Map::GeneralizeFieldType(old_field_type,
+ new_field_type,
+ isolate)->NowIs(old_field_type));
+ return;
+ }
+
+ // Determine the field owner.
+ Handle<Map> field_owner(map->FindFieldOwner(modify_index), isolate);
+ Handle<DescriptorArray> descriptors(
+ field_owner->instance_descriptors(), isolate);
+ ASSERT_EQ(*old_field_type, descriptors->GetFieldType(modify_index));
+
+ // Determine the generalized new field type.
+ new_field_type = Map::GeneralizeFieldType(
+ old_field_type, new_field_type, isolate);
+
+ PropertyDetails details = descriptors->GetDetails(modify_index);
+ FieldDescriptor d(handle(descriptors->GetKey(modify_index), isolate),
+ descriptors->GetFieldIndex(modify_index),
+ new_field_type,
+ details.attributes(),
+ details.representation());
+ field_owner->UpdateDescriptor(modify_index, &d);
+ field_owner->dependent_code()->DeoptimizeDependentCodeGroup(
+ isolate, DependentCode::kFieldTypeGroup);
+
+ if (FLAG_trace_generalization) {
+ map->PrintGeneralization(
+ stdout, "field type generalization",
+ modify_index, map->NumberOfOwnDescriptors(),
+ map->NumberOfOwnDescriptors(), false,
+ details.representation(), details.representation(),
+ *old_field_type, *new_field_type);
+ }
+}
+
+
// Generalize the representation of the descriptor at |modify_index|.
// This method rewrites the transition tree to reflect the new change. To avoid
// high degrees over polymorphism, and to stabilize quickly, on every rewrite
@@ -2674,22 +2534,28 @@ Map* Map::FindLastMatchMap(int verbatim,
// (partial) version of the type in the transition tree.
// To do this, on each rewrite:
// - Search the root of the transition tree using FindRootMap.
-// - Find |updated|, the newest matching version of this map using
-// FindUpdatedMap. This uses the keys in the own map's descriptor array to
-// walk the transition tree.
-// - Merge/generalize the descriptor array of the current map and |updated|.
-// - Generalize the |modify_index| descriptor using |new_representation|.
-// - Walk the tree again starting from the root towards |updated|. Stop at
+// - Find |target_map|, the newest matching version of this map using the keys
+// in the |old_map|'s descriptor array to walk the transition tree.
+// - Merge/generalize the descriptor array of the |old_map| and |target_map|.
+// - Generalize the |modify_index| descriptor using |new_representation| and
+// |new_field_type|.
+// - Walk the tree again starting from the root towards |target_map|. Stop at
// |split_map|, the first map who's descriptor array does not match the merged
// descriptor array.
-// - If |updated| == |split_map|, |updated| is in the expected state. Return it.
-// - Otherwise, invalidate the outdated transition target from |updated|, and
+// - If |target_map| == |split_map|, |target_map| is in the expected state.
+// Return it.
+// - Otherwise, invalidate the outdated transition target from |target_map|, and
// replace its transition tree with a new branch for the updated descriptors.
Handle<Map> Map::GeneralizeRepresentation(Handle<Map> old_map,
int modify_index,
Representation new_representation,
+ Handle<HeapType> new_field_type,
StoreMode store_mode) {
- Handle<DescriptorArray> old_descriptors(old_map->instance_descriptors());
+ Isolate* isolate = old_map->GetIsolate();
+
+ Handle<DescriptorArray> old_descriptors(
+ old_map->instance_descriptors(), isolate);
+ int old_nof = old_map->NumberOfOwnDescriptors();
PropertyDetails old_details = old_descriptors->GetDetails(modify_index);
Representation old_representation = old_details.representation();
@@ -2700,89 +2566,278 @@ Handle<Map> Map::GeneralizeRepresentation(Handle<Map> old_map,
if (old_representation.IsNone() &&
!new_representation.IsNone() &&
!new_representation.IsDouble()) {
+ ASSERT(old_details.type() == FIELD);
+ ASSERT(old_descriptors->GetFieldType(modify_index)->NowIs(
+ HeapType::None()));
+ if (FLAG_trace_generalization) {
+ old_map->PrintGeneralization(
+ stdout, "uninitialized field",
+ modify_index, old_map->NumberOfOwnDescriptors(),
+ old_map->NumberOfOwnDescriptors(), false,
+ old_representation, new_representation,
+ old_descriptors->GetFieldType(modify_index), *new_field_type);
+ }
old_descriptors->SetRepresentation(modify_index, new_representation);
+ old_descriptors->SetValue(modify_index, *new_field_type);
return old_map;
}
- int descriptors = old_map->NumberOfOwnDescriptors();
- Handle<Map> root_map(old_map->FindRootMap());
-
// Check the state of the root map.
+ Handle<Map> root_map(old_map->FindRootMap(), isolate);
if (!old_map->EquivalentToForTransition(*root_map)) {
- return CopyGeneralizeAllRepresentations(old_map, modify_index, store_mode,
- old_details.attributes(), "not equivalent");
+ return CopyGeneralizeAllRepresentations(
+ old_map, modify_index, store_mode, "not equivalent");
+ }
+ int root_nof = root_map->NumberOfOwnDescriptors();
+ if (modify_index < root_nof) {
+ PropertyDetails old_details = old_descriptors->GetDetails(modify_index);
+ if ((old_details.type() != FIELD && store_mode == FORCE_FIELD) ||
+ (old_details.type() == FIELD &&
+ (!new_field_type->NowIs(old_descriptors->GetFieldType(modify_index)) ||
+ !new_representation.fits_into(old_details.representation())))) {
+ return CopyGeneralizeAllRepresentations(
+ old_map, modify_index, store_mode, "root modification");
+ }
+ }
+
+ Handle<Map> target_map = root_map;
+ for (int i = root_nof; i < old_nof; ++i) {
+ int j = target_map->SearchTransition(old_descriptors->GetKey(i));
+ if (j == TransitionArray::kNotFound) break;
+ Handle<Map> tmp_map(target_map->GetTransition(j), isolate);
+ Handle<DescriptorArray> tmp_descriptors = handle(
+ tmp_map->instance_descriptors(), isolate);
+
+ // Check if target map is incompatible.
+ PropertyDetails old_details = old_descriptors->GetDetails(i);
+ PropertyDetails tmp_details = tmp_descriptors->GetDetails(i);
+ PropertyType old_type = old_details.type();
+ PropertyType tmp_type = tmp_details.type();
+ if (tmp_details.attributes() != old_details.attributes() ||
+ ((tmp_type == CALLBACKS || old_type == CALLBACKS) &&
+ (tmp_type != old_type ||
+ tmp_descriptors->GetValue(i) != old_descriptors->GetValue(i)))) {
+ return CopyGeneralizeAllRepresentations(
+ old_map, modify_index, store_mode, "incompatible");
+ }
+ Representation old_representation = old_details.representation();
+ Representation tmp_representation = tmp_details.representation();
+ if (!old_representation.fits_into(tmp_representation) ||
+ (!new_representation.fits_into(tmp_representation) &&
+ modify_index == i)) {
+ break;
+ }
+ if (tmp_type == FIELD) {
+ // Generalize the field type as necessary.
+ Handle<HeapType> old_field_type = (old_type == FIELD)
+ ? handle(old_descriptors->GetFieldType(i), isolate)
+ : old_descriptors->GetValue(i)->OptimalType(
+ isolate, tmp_representation);
+ if (modify_index == i) {
+ old_field_type = GeneralizeFieldType(
+ new_field_type, old_field_type, isolate);
+ }
+ GeneralizeFieldType(tmp_map, i, old_field_type);
+ } else if (tmp_type == CONSTANT) {
+ if (old_type != CONSTANT ||
+ old_descriptors->GetConstant(i) != tmp_descriptors->GetConstant(i)) {
+ break;
+ }
+ } else {
+ ASSERT_EQ(tmp_type, old_type);
+ ASSERT_EQ(tmp_descriptors->GetValue(i), old_descriptors->GetValue(i));
+ }
+ target_map = tmp_map;
+ }
+
+ // Directly change the map if the target map is more general.
+ Handle<DescriptorArray> target_descriptors(
+ target_map->instance_descriptors(), isolate);
+ int target_nof = target_map->NumberOfOwnDescriptors();
+ if (target_nof == old_nof &&
+ (store_mode != FORCE_FIELD ||
+ target_descriptors->GetDetails(modify_index).type() == FIELD)) {
+ ASSERT(modify_index < target_nof);
+ ASSERT(new_representation.fits_into(
+ target_descriptors->GetDetails(modify_index).representation()));
+ ASSERT(target_descriptors->GetDetails(modify_index).type() != FIELD ||
+ new_field_type->NowIs(
+ target_descriptors->GetFieldType(modify_index)));
+ return target_map;
+ }
+
+ // Find the last compatible target map in the transition tree.
+ for (int i = target_nof; i < old_nof; ++i) {
+ int j = target_map->SearchTransition(old_descriptors->GetKey(i));
+ if (j == TransitionArray::kNotFound) break;
+ Handle<Map> tmp_map(target_map->GetTransition(j), isolate);
+ Handle<DescriptorArray> tmp_descriptors(
+ tmp_map->instance_descriptors(), isolate);
+
+ // Check if target map is compatible.
+ PropertyDetails old_details = old_descriptors->GetDetails(i);
+ PropertyDetails tmp_details = tmp_descriptors->GetDetails(i);
+ if (tmp_details.attributes() != old_details.attributes() ||
+ ((tmp_details.type() == CALLBACKS || old_details.type() == CALLBACKS) &&
+ (tmp_details.type() != old_details.type() ||
+ tmp_descriptors->GetValue(i) != old_descriptors->GetValue(i)))) {
+ return CopyGeneralizeAllRepresentations(
+ old_map, modify_index, store_mode, "incompatible");
+ }
+ target_map = tmp_map;
}
+ target_nof = target_map->NumberOfOwnDescriptors();
+ target_descriptors = handle(target_map->instance_descriptors(), isolate);
- int verbatim = root_map->NumberOfOwnDescriptors();
-
- if (store_mode != ALLOW_AS_CONSTANT && modify_index < verbatim) {
- return CopyGeneralizeAllRepresentations(old_map, modify_index, store_mode,
- old_details.attributes(), "root modification");
+ // Allocate a new descriptor array large enough to hold the required
+ // descriptors, with minimally the exact same size as the old descriptor
+ // array.
+ int new_slack = Max(
+ old_nof, old_descriptors->number_of_descriptors()) - old_nof;
+ Handle<DescriptorArray> new_descriptors = DescriptorArray::Allocate(
+ isolate, old_nof, new_slack);
+ ASSERT(new_descriptors->length() > target_descriptors->length() ||
+ new_descriptors->NumberOfSlackDescriptors() > 0 ||
+ new_descriptors->number_of_descriptors() ==
+ old_descriptors->number_of_descriptors());
+ ASSERT(new_descriptors->number_of_descriptors() == old_nof);
+
+ // 0 -> |root_nof|
+ int current_offset = 0;
+ for (int i = 0; i < root_nof; ++i) {
+ PropertyDetails old_details = old_descriptors->GetDetails(i);
+ if (old_details.type() == FIELD) current_offset++;
+ Descriptor d(handle(old_descriptors->GetKey(i), isolate),
+ handle(old_descriptors->GetValue(i), isolate),
+ old_details);
+ new_descriptors->Set(i, &d);
}
- Map* raw_updated = root_map->FindUpdatedMap(
- verbatim, descriptors, *old_descriptors);
- if (raw_updated == NULL) {
- return CopyGeneralizeAllRepresentations(old_map, modify_index, store_mode,
- old_details.attributes(), "incompatible");
+ // |root_nof| -> |target_nof|
+ for (int i = root_nof; i < target_nof; ++i) {
+ Handle<Name> target_key(target_descriptors->GetKey(i), isolate);
+ PropertyDetails old_details = old_descriptors->GetDetails(i);
+ PropertyDetails target_details = target_descriptors->GetDetails(i);
+ target_details = target_details.CopyWithRepresentation(
+ old_details.representation().generalize(
+ target_details.representation()));
+ if (modify_index == i) {
+ target_details = target_details.CopyWithRepresentation(
+ new_representation.generalize(target_details.representation()));
+ }
+ ASSERT_EQ(old_details.attributes(), target_details.attributes());
+ if (old_details.type() == FIELD ||
+ target_details.type() == FIELD ||
+ (modify_index == i && store_mode == FORCE_FIELD) ||
+ (target_descriptors->GetValue(i) != old_descriptors->GetValue(i))) {
+ Handle<HeapType> old_field_type = (old_details.type() == FIELD)
+ ? handle(old_descriptors->GetFieldType(i), isolate)
+ : old_descriptors->GetValue(i)->OptimalType(
+ isolate, target_details.representation());
+ Handle<HeapType> target_field_type = (target_details.type() == FIELD)
+ ? handle(target_descriptors->GetFieldType(i), isolate)
+ : target_descriptors->GetValue(i)->OptimalType(
+ isolate, target_details.representation());
+ target_field_type = GeneralizeFieldType(
+ target_field_type, old_field_type, isolate);
+ if (modify_index == i) {
+ target_field_type = GeneralizeFieldType(
+ target_field_type, new_field_type, isolate);
+ }
+ FieldDescriptor d(target_key,
+ current_offset++,
+ target_field_type,
+ target_details.attributes(),
+ target_details.representation());
+ new_descriptors->Set(i, &d);
+ } else {
+ ASSERT_NE(FIELD, target_details.type());
+ Descriptor d(target_key,
+ handle(target_descriptors->GetValue(i), isolate),
+ target_details);
+ new_descriptors->Set(i, &d);
+ }
}
- Handle<Map> updated(raw_updated);
- Handle<DescriptorArray> updated_descriptors(updated->instance_descriptors());
-
- int valid = updated->NumberOfOwnDescriptors();
-
- // Directly change the map if the target map is more general. Ensure that the
- // target type of the modify_index is a FIELD, unless we are migrating.
- if (updated_descriptors->IsMoreGeneralThan(
- verbatim, valid, descriptors, *old_descriptors) &&
- (store_mode == ALLOW_AS_CONSTANT ||
- updated_descriptors->GetDetails(modify_index).type() == FIELD)) {
- Representation updated_representation =
- updated_descriptors->GetDetails(modify_index).representation();
- if (new_representation.fits_into(updated_representation)) return updated;
+ // |target_nof| -> |old_nof|
+ for (int i = target_nof; i < old_nof; ++i) {
+ PropertyDetails old_details = old_descriptors->GetDetails(i);
+ Handle<Name> old_key(old_descriptors->GetKey(i), isolate);
+ if (modify_index == i) {
+ old_details = old_details.CopyWithRepresentation(
+ new_representation.generalize(old_details.representation()));
+ }
+ if (old_details.type() == FIELD) {
+ Handle<HeapType> old_field_type(
+ old_descriptors->GetFieldType(i), isolate);
+ if (modify_index == i) {
+ old_field_type = GeneralizeFieldType(
+ old_field_type, new_field_type, isolate);
+ }
+ FieldDescriptor d(old_key,
+ current_offset++,
+ old_field_type,
+ old_details.attributes(),
+ old_details.representation());
+ new_descriptors->Set(i, &d);
+ } else {
+ ASSERT(old_details.type() == CONSTANT || old_details.type() == CALLBACKS);
+ if (modify_index == i && store_mode == FORCE_FIELD) {
+ FieldDescriptor d(old_key,
+ current_offset++,
+ GeneralizeFieldType(
+ old_descriptors->GetValue(i)->OptimalType(
+ isolate, old_details.representation()),
+ new_field_type, isolate),
+ old_details.attributes(),
+ old_details.representation());
+ new_descriptors->Set(i, &d);
+ } else {
+ ASSERT_NE(FIELD, old_details.type());
+ Descriptor d(old_key,
+ handle(old_descriptors->GetValue(i), isolate),
+ old_details);
+ new_descriptors->Set(i, &d);
+ }
+ }
}
- Handle<DescriptorArray> new_descriptors = DescriptorArray::Merge(
- updated_descriptors, verbatim, valid, descriptors, modify_index,
- store_mode, old_descriptors);
- ASSERT(store_mode == ALLOW_AS_CONSTANT ||
- new_descriptors->GetDetails(modify_index).type() == FIELD);
+ new_descriptors->Sort();
- old_representation =
- new_descriptors->GetDetails(modify_index).representation();
- Representation updated_representation =
- new_representation.generalize(old_representation);
- if (!updated_representation.Equals(old_representation)) {
- new_descriptors->SetRepresentation(modify_index, updated_representation);
- }
+ ASSERT(store_mode != FORCE_FIELD ||
+ new_descriptors->GetDetails(modify_index).type() == FIELD);
Handle<Map> split_map(root_map->FindLastMatchMap(
- verbatim, descriptors, *new_descriptors));
+ root_nof, old_nof, *new_descriptors), isolate);
+ int split_nof = split_map->NumberOfOwnDescriptors();
+ ASSERT_NE(old_nof, split_nof);
- int split_descriptors = split_map->NumberOfOwnDescriptors();
- // This is shadowed by |updated_descriptors| being more general than
- // |old_descriptors|.
- ASSERT(descriptors != split_descriptors);
-
- int descriptor = split_descriptors;
split_map->DeprecateTarget(
- old_descriptors->GetKey(descriptor), *new_descriptors);
+ old_descriptors->GetKey(split_nof), *new_descriptors);
if (FLAG_trace_generalization) {
+ PropertyDetails old_details = old_descriptors->GetDetails(modify_index);
+ PropertyDetails new_details = new_descriptors->GetDetails(modify_index);
+ Handle<HeapType> old_field_type = (old_details.type() == FIELD)
+ ? handle(old_descriptors->GetFieldType(modify_index), isolate)
+ : HeapType::Constant(handle(old_descriptors->GetValue(modify_index),
+ isolate), isolate);
+ Handle<HeapType> new_field_type = (new_details.type() == FIELD)
+ ? handle(new_descriptors->GetFieldType(modify_index), isolate)
+ : HeapType::Constant(handle(new_descriptors->GetValue(modify_index),
+ isolate), isolate);
old_map->PrintGeneralization(
- stdout, "", modify_index, descriptor, descriptors,
- old_descriptors->GetDetails(modify_index).type() == CONSTANT &&
- store_mode == FORCE_FIELD,
- old_representation, updated_representation);
+ stdout, "", modify_index, split_nof, old_nof,
+ old_details.type() == CONSTANT && store_mode == FORCE_FIELD,
+ old_details.representation(), new_details.representation(),
+ *old_field_type, *new_field_type);
}
// Add missing transitions.
Handle<Map> new_map = split_map;
- for (; descriptor < descriptors; descriptor++) {
- new_map = Map::CopyInstallDescriptors(new_map, descriptor, new_descriptors);
+ for (int i = split_nof; i < old_nof; ++i) {
+ new_map = CopyInstallDescriptors(new_map, i, new_descriptors);
}
-
new_map->set_owns_descriptors(true);
return new_map;
}
@@ -2790,66 +2845,102 @@ Handle<Map> Map::GeneralizeRepresentation(Handle<Map> old_map,
// Generalize the representation of all FIELD descriptors.
Handle<Map> Map::GeneralizeAllFieldRepresentations(
- Handle<Map> map,
- Representation new_representation) {
+ Handle<Map> map) {
Handle<DescriptorArray> descriptors(map->instance_descriptors());
- for (int i = 0; i < map->NumberOfOwnDescriptors(); i++) {
- PropertyDetails details = descriptors->GetDetails(i);
- if (details.type() == FIELD) {
- map = GeneralizeRepresentation(map, i, new_representation, FORCE_FIELD);
+ for (int i = 0; i < map->NumberOfOwnDescriptors(); ++i) {
+ if (descriptors->GetDetails(i).type() == FIELD) {
+ map = GeneralizeRepresentation(map, i, Representation::Tagged(),
+ HeapType::Any(map->GetIsolate()),
+ FORCE_FIELD);
}
}
return map;
}
-Handle<Map> Map::CurrentMapForDeprecated(Handle<Map> map) {
+// static
+MaybeHandle<Map> Map::CurrentMapForDeprecated(Handle<Map> map) {
Handle<Map> proto_map(map);
while (proto_map->prototype()->IsJSObject()) {
Handle<JSObject> holder(JSObject::cast(proto_map->prototype()));
- if (holder->map()->is_deprecated()) {
- JSObject::TryMigrateInstance(holder);
- }
proto_map = Handle<Map>(holder->map());
+ if (proto_map->is_deprecated() && JSObject::TryMigrateInstance(holder)) {
+ proto_map = Handle<Map>(holder->map());
+ }
}
return CurrentMapForDeprecatedInternal(map);
}
-Handle<Map> Map::CurrentMapForDeprecatedInternal(Handle<Map> map) {
- if (!map->is_deprecated()) return map;
-
+// static
+MaybeHandle<Map> Map::CurrentMapForDeprecatedInternal(Handle<Map> old_map) {
DisallowHeapAllocation no_allocation;
- DescriptorArray* old_descriptors = map->instance_descriptors();
+ DisallowDeoptimization no_deoptimization(old_map->GetIsolate());
- int descriptors = map->NumberOfOwnDescriptors();
- Map* root_map = map->FindRootMap();
+ if (!old_map->is_deprecated()) return old_map;
// Check the state of the root map.
- if (!map->EquivalentToForTransition(root_map)) return Handle<Map>();
- int verbatim = root_map->NumberOfOwnDescriptors();
+ Map* root_map = old_map->FindRootMap();
+ if (!old_map->EquivalentToForTransition(root_map)) return MaybeHandle<Map>();
+ int root_nof = root_map->NumberOfOwnDescriptors();
- Map* updated = root_map->FindUpdatedMap(
- verbatim, descriptors, old_descriptors);
- if (updated == NULL) return Handle<Map>();
+ int old_nof = old_map->NumberOfOwnDescriptors();
+ DescriptorArray* old_descriptors = old_map->instance_descriptors();
- DescriptorArray* updated_descriptors = updated->instance_descriptors();
- int valid = updated->NumberOfOwnDescriptors();
- if (!updated_descriptors->IsMoreGeneralThan(
- verbatim, valid, descriptors, old_descriptors)) {
- return Handle<Map>();
- }
+ Map* new_map = root_map;
+ for (int i = root_nof; i < old_nof; ++i) {
+ int j = new_map->SearchTransition(old_descriptors->GetKey(i));
+ if (j == TransitionArray::kNotFound) return MaybeHandle<Map>();
+ new_map = new_map->GetTransition(j);
+ DescriptorArray* new_descriptors = new_map->instance_descriptors();
+
+ PropertyDetails new_details = new_descriptors->GetDetails(i);
+ PropertyDetails old_details = old_descriptors->GetDetails(i);
+ if (old_details.attributes() != new_details.attributes() ||
+ !old_details.representation().fits_into(new_details.representation())) {
+ return MaybeHandle<Map>();
+ }
+ PropertyType new_type = new_details.type();
+ PropertyType old_type = old_details.type();
+ Object* new_value = new_descriptors->GetValue(i);
+ Object* old_value = old_descriptors->GetValue(i);
+ switch (new_type) {
+ case FIELD:
+ if ((old_type == FIELD &&
+ !HeapType::cast(old_value)->NowIs(HeapType::cast(new_value))) ||
+ (old_type == CONSTANT &&
+ !HeapType::cast(new_value)->NowContains(old_value)) ||
+ (old_type == CALLBACKS &&
+ !HeapType::Any()->Is(HeapType::cast(new_value)))) {
+ return MaybeHandle<Map>();
+ }
+ break;
+
+ case CONSTANT:
+ case CALLBACKS:
+ if (old_type != new_type || old_value != new_value) {
+ return MaybeHandle<Map>();
+ }
+ break;
- return handle(updated);
+ case NORMAL:
+ case HANDLER:
+ case INTERCEPTOR:
+ case NONEXISTENT:
+ UNREACHABLE();
+ }
+ }
+ if (new_map->NumberOfOwnDescriptors() != old_nof) return MaybeHandle<Map>();
+ return handle(new_map);
}
-Handle<Object> JSObject::SetPropertyWithInterceptor(
+MaybeHandle<Object> JSObject::SetPropertyWithInterceptor(
Handle<JSObject> object,
Handle<Name> name,
Handle<Object> value,
PropertyAttributes attributes,
- StrictModeFlag strict_mode) {
+ StrictMode strict_mode) {
// TODO(rossberg): Support symbols in the API.
if (name->IsSymbol()) return value;
Isolate* isolate = object->GetIsolate();
@@ -2867,24 +2958,22 @@ Handle<Object> JSObject::SetPropertyWithInterceptor(
v8::Handle<v8::Value> result = args.Call(setter,
v8::Utils::ToLocal(name_string),
v8::Utils::ToLocal(value_unhole));
- RETURN_HANDLE_IF_SCHEDULED_EXCEPTION(isolate, Object);
+ RETURN_EXCEPTION_IF_SCHEDULED_EXCEPTION(isolate, Object);
if (!result.IsEmpty()) return value;
}
- Handle<Object> result =
- SetPropertyPostInterceptor(object, name, value, attributes, strict_mode);
- RETURN_HANDLE_IF_SCHEDULED_EXCEPTION(isolate, Object);
- return result;
+ return SetPropertyPostInterceptor(
+ object, name, value, attributes, strict_mode);
}
-Handle<Object> JSReceiver::SetProperty(Handle<JSReceiver> object,
- Handle<Name> name,
- Handle<Object> value,
- PropertyAttributes attributes,
- StrictModeFlag strict_mode,
- StoreFromKeyed store_mode) {
+MaybeHandle<Object> JSReceiver::SetProperty(Handle<JSReceiver> object,
+ Handle<Name> name,
+ Handle<Object> value,
+ PropertyAttributes attributes,
+ StrictMode strict_mode,
+ StoreFromKeyed store_mode) {
LookupResult result(object->GetIsolate());
- object->LocalLookup(*name, &result, true);
+ object->LookupOwn(name, &result, true);
if (!result.IsFound()) {
object->map()->LookupTransition(JSObject::cast(*object), *name, &result);
}
@@ -2893,124 +2982,12 @@ Handle<Object> JSReceiver::SetProperty(Handle<JSReceiver> object,
}
-Handle<Object> JSObject::SetPropertyWithCallback(Handle<JSObject> object,
- Handle<Object> structure,
- Handle<Name> name,
- Handle<Object> value,
- Handle<JSObject> holder,
- StrictModeFlag strict_mode) {
- Isolate* isolate = object->GetIsolate();
-
- // We should never get here to initialize a const with the hole
- // value since a const declaration would conflict with the setter.
- ASSERT(!value->IsTheHole());
-
- // To accommodate both the old and the new api we switch on the
- // data structure used to store the callbacks. Eventually foreign
- // callbacks should be phased out.
- if (structure->IsForeign()) {
- AccessorDescriptor* callback =
- reinterpret_cast<AccessorDescriptor*>(
- Handle<Foreign>::cast(structure)->foreign_address());
- CALL_AND_RETRY_OR_DIE(isolate,
- (callback->setter)(
- isolate, *object, *value, callback->data),
- break,
- return Handle<Object>());
- RETURN_HANDLE_IF_SCHEDULED_EXCEPTION(isolate, Object);
- return value;
- }
-
- if (structure->IsExecutableAccessorInfo()) {
- // api style callbacks
- ExecutableAccessorInfo* data = ExecutableAccessorInfo::cast(*structure);
- if (!data->IsCompatibleReceiver(*object)) {
- Handle<Object> args[2] = { name, object };
- Handle<Object> error =
- isolate->factory()->NewTypeError("incompatible_method_receiver",
- HandleVector(args,
- ARRAY_SIZE(args)));
- isolate->Throw(*error);
- return Handle<Object>();
- }
- // TODO(rossberg): Support symbols in the API.
- if (name->IsSymbol()) return value;
- Object* call_obj = data->setter();
- v8::AccessorSetterCallback call_fun =
- v8::ToCData<v8::AccessorSetterCallback>(call_obj);
- if (call_fun == NULL) return value;
- Handle<String> key = Handle<String>::cast(name);
- LOG(isolate, ApiNamedPropertyAccess("store", *object, *name));
- PropertyCallbackArguments args(
- isolate, data->data(), *object, JSObject::cast(*holder));
- args.Call(call_fun,
- v8::Utils::ToLocal(key),
- v8::Utils::ToLocal(value));
- RETURN_HANDLE_IF_SCHEDULED_EXCEPTION(isolate, Object);
- return value;
- }
-
- if (structure->IsAccessorPair()) {
- Handle<Object> setter(AccessorPair::cast(*structure)->setter(), isolate);
- if (setter->IsSpecFunction()) {
- // TODO(rossberg): nicer would be to cast to some JSCallable here...
- return SetPropertyWithDefinedSetter(
- object, Handle<JSReceiver>::cast(setter), value);
- } else {
- if (strict_mode == kNonStrictMode) {
- return value;
- }
- Handle<Object> args[2] = { name, holder };
- Handle<Object> error =
- isolate->factory()->NewTypeError("no_setter_in_callback",
- HandleVector(args, 2));
- isolate->Throw(*error);
- return Handle<Object>();
- }
- }
-
- // TODO(dcarney): Handle correctly.
- if (structure->IsDeclaredAccessorInfo()) {
- return value;
- }
-
- UNREACHABLE();
- return Handle<Object>();
-}
-
-
-Handle<Object> JSReceiver::SetPropertyWithDefinedSetter(
- Handle<JSReceiver> object,
- Handle<JSReceiver> setter,
- Handle<Object> value) {
- Isolate* isolate = object->GetIsolate();
-
-#ifdef ENABLE_DEBUGGER_SUPPORT
- Debug* debug = isolate->debug();
- // Handle stepping into a setter if step into is active.
- // TODO(rossberg): should this apply to getters that are function proxies?
- if (debug->StepInActive() && setter->IsJSFunction()) {
- debug->HandleStepIn(
- Handle<JSFunction>::cast(setter), Handle<Object>::null(), 0, false);
- }
-#endif
-
- bool has_pending_exception;
- Handle<Object> argv[] = { value };
- Execution::Call(
- isolate, setter, object, ARRAY_SIZE(argv), argv, &has_pending_exception);
- // Check for pending exception and return the result.
- if (has_pending_exception) return Handle<Object>();
- return value;
-}
-
-
-Handle<Object> JSObject::SetElementWithCallbackSetterInPrototypes(
+MaybeHandle<Object> JSObject::SetElementWithCallbackSetterInPrototypes(
Handle<JSObject> object,
uint32_t index,
Handle<Object> value,
bool* found,
- StrictModeFlag strict_mode) {
+ StrictMode strict_mode) {
Isolate *isolate = object->GetIsolate();
for (Handle<Object> proto = handle(object->GetPrototype(), isolate);
!proto->IsNull();
@@ -3046,20 +3023,21 @@ Handle<Object> JSObject::SetElementWithCallbackSetterInPrototypes(
}
-Handle<Object> JSObject::SetPropertyViaPrototypes(Handle<JSObject> object,
- Handle<Name> name,
- Handle<Object> value,
- PropertyAttributes attributes,
- StrictModeFlag strict_mode,
- bool* done) {
+MaybeHandle<Object> JSObject::SetPropertyViaPrototypes(
+ Handle<JSObject> object,
+ Handle<Name> name,
+ Handle<Object> value,
+ PropertyAttributes attributes,
+ StrictMode strict_mode,
+ bool* done) {
Isolate* isolate = object->GetIsolate();
*done = false;
- // We could not find a local property so let's check whether there is an
+ // We could not find an own property, so let's check whether there is an
// accessor that wants to handle the property, or whether the property is
// read-only on the prototype chain.
LookupResult result(isolate);
- object->LookupRealNamedPropertyInPrototypes(*name, &result);
+ object->LookupRealNamedPropertyInPrototypes(name, &result);
if (result.IsFound()) {
switch (result.type()) {
case NORMAL:
@@ -3068,25 +3046,26 @@ Handle<Object> JSObject::SetPropertyViaPrototypes(Handle<JSObject> object,
*done = result.IsReadOnly();
break;
case INTERCEPTOR: {
- PropertyAttributes attr =
- result.holder()->GetPropertyAttributeWithInterceptor(
- *object, *name, true);
+ LookupIterator it(object, name, handle(result.holder()));
+ PropertyAttributes attr = GetPropertyAttributes(&it);
*done = !!(attr & READ_ONLY);
break;
}
case CALLBACKS: {
- if (!FLAG_es5_readonly && result.IsReadOnly()) break;
*done = true;
- Handle<Object> callback_object(result.GetCallbackObject(), isolate);
- return SetPropertyWithCallback(object, callback_object, name, value,
- handle(result.holder()), strict_mode);
+ if (!result.IsReadOnly()) {
+ Handle<Object> callback_object(result.GetCallbackObject(), isolate);
+ return SetPropertyWithCallback(object, name, value,
+ handle(result.holder()),
+ callback_object, strict_mode);
+ }
+ break;
}
case HANDLER: {
Handle<JSProxy> proxy(result.proxy());
return JSProxy::SetPropertyViaPrototypesWithHandler(
proxy, object, name, value, attributes, strict_mode, done);
}
- case TRANSITION:
case NONEXISTENT:
UNREACHABLE();
break;
@@ -3094,30 +3073,52 @@ Handle<Object> JSObject::SetPropertyViaPrototypes(Handle<JSObject> object,
}
// If we get here with *done true, we have encountered a read-only property.
- if (!FLAG_es5_readonly) *done = false;
if (*done) {
- if (strict_mode == kNonStrictMode) return value;
+ if (strict_mode == SLOPPY) return value;
Handle<Object> args[] = { name, object };
Handle<Object> error = isolate->factory()->NewTypeError(
"strict_read_only_property", HandleVector(args, ARRAY_SIZE(args)));
- isolate->Throw(*error);
- return Handle<Object>();
+ return isolate->Throw<Object>(error);
}
return isolate->factory()->the_hole_value();
}
void Map::EnsureDescriptorSlack(Handle<Map> map, int slack) {
+ // Only supports adding slack to owned descriptors.
+ ASSERT(map->owns_descriptors());
+
Handle<DescriptorArray> descriptors(map->instance_descriptors());
+ int old_size = map->NumberOfOwnDescriptors();
if (slack <= descriptors->NumberOfSlackDescriptors()) return;
- int number_of_descriptors = descriptors->number_of_descriptors();
- Isolate* isolate = map->GetIsolate();
- Handle<DescriptorArray> new_descriptors =
- isolate->factory()->NewDescriptorArray(number_of_descriptors, slack);
- DescriptorArray::WhitenessWitness witness(*new_descriptors);
- for (int i = 0; i < number_of_descriptors; ++i) {
- new_descriptors->CopyFrom(i, *descriptors, i, witness);
+ Handle<DescriptorArray> new_descriptors = DescriptorArray::CopyUpTo(
+ descriptors, old_size, slack);
+
+ if (old_size == 0) {
+ map->set_instance_descriptors(*new_descriptors);
+ return;
+ }
+
+ // If the source descriptors had an enum cache we copy it. This ensures
+ // that the maps to which we push the new descriptor array back can rely
+ // on a cache always being available once it is set. If the map has more
+ // enumerated descriptors than available in the original cache, the cache
+ // will be lazily replaced by the extended cache when needed.
+ if (descriptors->HasEnumCache()) {
+ new_descriptors->CopyEnumCacheFrom(*descriptors);
+ }
+
+ // Replace descriptors by new_descriptors in all maps that share it.
+ map->GetHeap()->incremental_marking()->RecordWrites(*descriptors);
+
+ Map* walk_map;
+ for (Object* current = map->GetBackPointer();
+ !current->IsUndefined();
+ current = walk_map->GetBackPointer()) {
+ walk_map = Map::cast(current);
+ if (walk_map->instance_descriptors() != *descriptors) break;
+ walk_map->set_instance_descriptors(*new_descriptors);
}
map->set_instance_descriptors(*new_descriptors);
@@ -3138,7 +3139,7 @@ static int AppendUniqueCallbacks(NeanderArray* callbacks,
Handle<AccessorInfo> entry(AccessorInfo::cast(callbacks->get(i)));
if (entry->name()->IsUniqueName()) continue;
Handle<String> key =
- isolate->factory()->InternalizedStringFromString(
+ isolate->factory()->InternalizeString(
Handle<String>(String::cast(entry->name())));
entry->set_name(*key);
}
@@ -3147,8 +3148,8 @@ static int AppendUniqueCallbacks(NeanderArray* callbacks,
// back to front so that the last callback with a given name takes
// precedence over previously added callbacks with that name.
for (int i = nof_callbacks - 1; i >= 0; i--) {
- AccessorInfo* entry = AccessorInfo::cast(callbacks->get(i));
- Name* key = Name::cast(entry->name());
+ Handle<AccessorInfo> entry(AccessorInfo::cast(callbacks->get(i)));
+ Handle<Name> key(Name::cast(entry->name()));
// Check if a descriptor with this name already exists before writing.
if (!T::Contains(key, entry, valid_descriptors, array)) {
T::Insert(key, entry, valid_descriptors, array);
@@ -3161,16 +3162,18 @@ static int AppendUniqueCallbacks(NeanderArray* callbacks,
struct DescriptorArrayAppender {
typedef DescriptorArray Array;
- static bool Contains(Name* key,
- AccessorInfo* entry,
+ static bool Contains(Handle<Name> key,
+ Handle<AccessorInfo> entry,
int valid_descriptors,
Handle<DescriptorArray> array) {
- return array->Search(key, valid_descriptors) != DescriptorArray::kNotFound;
+ DisallowHeapAllocation no_gc;
+ return array->Search(*key, valid_descriptors) != DescriptorArray::kNotFound;
}
- static void Insert(Name* key,
- AccessorInfo* entry,
+ static void Insert(Handle<Name> key,
+ Handle<AccessorInfo> entry,
int valid_descriptors,
Handle<DescriptorArray> array) {
+ DisallowHeapAllocation no_gc;
CallbacksDescriptor desc(key, entry, entry->property_attributes());
array->Append(&desc);
}
@@ -3179,20 +3182,21 @@ struct DescriptorArrayAppender {
struct FixedArrayAppender {
typedef FixedArray Array;
- static bool Contains(Name* key,
- AccessorInfo* entry,
+ static bool Contains(Handle<Name> key,
+ Handle<AccessorInfo> entry,
int valid_descriptors,
Handle<FixedArray> array) {
for (int i = 0; i < valid_descriptors; i++) {
- if (key == AccessorInfo::cast(array->get(i))->name()) return true;
+ if (*key == AccessorInfo::cast(array->get(i))->name()) return true;
}
return false;
}
- static void Insert(Name* key,
- AccessorInfo* entry,
+ static void Insert(Handle<Name> key,
+ Handle<AccessorInfo> entry,
int valid_descriptors,
Handle<FixedArray> array) {
- array->set(valid_descriptors, entry);
+ DisallowHeapAllocation no_gc;
+ array->set(valid_descriptors, *entry);
}
};
@@ -3260,24 +3264,38 @@ Handle<Map> Map::FindTransitionedMap(MapHandleList* candidates) {
static Map* FindClosestElementsTransition(Map* map, ElementsKind to_kind) {
Map* current_map = map;
- int index = GetSequenceIndexFromFastElementsKind(map->elements_kind());
- int to_index = IsFastElementsKind(to_kind)
- ? GetSequenceIndexFromFastElementsKind(to_kind)
- : GetSequenceIndexFromFastElementsKind(TERMINAL_FAST_ELEMENTS_KIND);
-
- ASSERT(index <= to_index);
-
- for (; index < to_index; ++index) {
+ int target_kind =
+ IsFastElementsKind(to_kind) || IsExternalArrayElementsKind(to_kind)
+ ? to_kind
+ : TERMINAL_FAST_ELEMENTS_KIND;
+
+ // Support for legacy API: SetIndexedPropertiesTo{External,Pixel}Data
+ // allows to change elements from arbitrary kind to any ExternalArray
+ // elements kind. Satisfy its requirements, checking whether we already
+ // have the cached transition.
+ if (IsExternalArrayElementsKind(to_kind) &&
+ !IsFixedTypedArrayElementsKind(map->elements_kind())) {
+ if (map->HasElementsTransition()) {
+ Map* next_map = map->elements_transition_map();
+ if (next_map->elements_kind() == to_kind) return next_map;
+ }
+ return map;
+ }
+
+ ElementsKind kind = map->elements_kind();
+ while (kind != target_kind) {
+ kind = GetNextTransitionElementsKind(kind);
if (!current_map->HasElementsTransition()) return current_map;
current_map = current_map->elements_transition_map();
}
- if (!IsFastElementsKind(to_kind) && current_map->HasElementsTransition()) {
+
+ if (to_kind != kind && current_map->HasElementsTransition()) {
+ ASSERT(to_kind == DICTIONARY_ELEMENTS);
Map* next_map = current_map->elements_transition_map();
if (next_map->elements_kind() == to_kind) return next_map;
}
- ASSERT(IsFastElementsKind(to_kind)
- ? current_map->elements_kind() == to_kind
- : current_map->elements_kind() == TERMINAL_FAST_ELEMENTS_KIND);
+
+ ASSERT(current_map->elements_kind() == target_kind);
return current_map;
}
@@ -3303,31 +3321,24 @@ bool Map::IsMapInArrayPrototypeChain() {
}
-static MaybeObject* AddMissingElementsTransitions(Map* map,
- ElementsKind to_kind) {
- ASSERT(IsFastElementsKind(map->elements_kind()));
- int index = GetSequenceIndexFromFastElementsKind(map->elements_kind());
- int to_index = IsFastElementsKind(to_kind)
- ? GetSequenceIndexFromFastElementsKind(to_kind)
- : GetSequenceIndexFromFastElementsKind(TERMINAL_FAST_ELEMENTS_KIND);
+static Handle<Map> AddMissingElementsTransitions(Handle<Map> map,
+ ElementsKind to_kind) {
+ ASSERT(IsTransitionElementsKind(map->elements_kind()));
- ASSERT(index <= to_index);
+ Handle<Map> current_map = map;
- Map* current_map = map;
-
- for (; index < to_index; ++index) {
- ElementsKind next_kind = GetFastElementsKindFromSequenceIndex(index + 1);
- MaybeObject* maybe_next_map =
- current_map->CopyAsElementsKind(next_kind, INSERT_TRANSITION);
- if (!maybe_next_map->To(&current_map)) return maybe_next_map;
+ ElementsKind kind = map->elements_kind();
+ while (kind != to_kind && !IsTerminalElementsKind(kind)) {
+ kind = GetNextTransitionElementsKind(kind);
+ current_map = Map::CopyAsElementsKind(
+ current_map, kind, INSERT_TRANSITION);
}
// In case we are exiting the fast elements kind system, just add the map in
// the end.
- if (!IsFastElementsKind(to_kind)) {
- MaybeObject* maybe_next_map =
- current_map->CopyAsElementsKind(to_kind, INSERT_TRANSITION);
- if (!maybe_next_map->To(&current_map)) return maybe_next_map;
+ if (kind != to_kind) {
+ current_map = Map::CopyAsElementsKind(
+ current_map, to_kind, INSERT_TRANSITION);
}
ASSERT(current_map->elements_kind() == to_kind);
@@ -3335,28 +3346,42 @@ static MaybeObject* AddMissingElementsTransitions(Map* map,
}
-Handle<Map> JSObject::GetElementsTransitionMap(Handle<JSObject> object,
- ElementsKind to_kind) {
- Isolate* isolate = object->GetIsolate();
- CALL_HEAP_FUNCTION(isolate,
- object->GetElementsTransitionMap(isolate, to_kind),
- Map);
+Handle<Map> Map::TransitionElementsTo(Handle<Map> map,
+ ElementsKind to_kind) {
+ ElementsKind from_kind = map->elements_kind();
+ if (from_kind == to_kind) return map;
+
+ Isolate* isolate = map->GetIsolate();
+ Context* native_context = isolate->context()->native_context();
+ Object* maybe_array_maps = native_context->js_array_maps();
+ if (maybe_array_maps->IsFixedArray()) {
+ DisallowHeapAllocation no_gc;
+ FixedArray* array_maps = FixedArray::cast(maybe_array_maps);
+ if (array_maps->get(from_kind) == *map) {
+ Object* maybe_transitioned_map = array_maps->get(to_kind);
+ if (maybe_transitioned_map->IsMap()) {
+ return handle(Map::cast(maybe_transitioned_map));
+ }
+ }
+ }
+
+ return TransitionElementsToSlow(map, to_kind);
}
-MaybeObject* JSObject::GetElementsTransitionMapSlow(ElementsKind to_kind) {
- Map* start_map = map();
- ElementsKind from_kind = start_map->elements_kind();
+Handle<Map> Map::TransitionElementsToSlow(Handle<Map> map,
+ ElementsKind to_kind) {
+ ElementsKind from_kind = map->elements_kind();
if (from_kind == to_kind) {
- return start_map;
+ return map;
}
bool allow_store_transition =
// Only remember the map transition if there is not an already existing
// non-matching element transition.
- !start_map->IsUndefined() && !start_map->is_shared() &&
- IsFastElementsKind(from_kind);
+ !map->IsUndefined() && !map->is_shared() &&
+ IsTransitionElementsKind(from_kind);
// Only store fast element maps in ascending generality.
if (IsFastElementsKind(to_kind)) {
@@ -3366,15 +3391,16 @@ MaybeObject* JSObject::GetElementsTransitionMapSlow(ElementsKind to_kind) {
}
if (!allow_store_transition) {
- return start_map->CopyAsElementsKind(to_kind, OMIT_TRANSITION);
+ return Map::CopyAsElementsKind(map, to_kind, OMIT_TRANSITION);
}
- return start_map->AsElementsKind(to_kind);
+ return Map::AsElementsKind(map, to_kind);
}
-MaybeObject* Map::AsElementsKind(ElementsKind kind) {
- Map* closest_map = FindClosestElementsTransition(this, kind);
+// static
+Handle<Map> Map::AsElementsKind(Handle<Map> map, ElementsKind kind) {
+ Handle<Map> closest_map(FindClosestElementsTransition(*map, kind));
if (closest_map->elements_kind() == kind) {
return closest_map;
@@ -3384,18 +3410,27 @@ MaybeObject* Map::AsElementsKind(ElementsKind kind) {
}
-void JSObject::LocalLookupRealNamedProperty(Name* name, LookupResult* result) {
+Handle<Map> JSObject::GetElementsTransitionMap(Handle<JSObject> object,
+ ElementsKind to_kind) {
+ Handle<Map> map(object->map());
+ return Map::TransitionElementsTo(map, to_kind);
+}
+
+
+void JSObject::LookupOwnRealNamedProperty(Handle<Name> name,
+ LookupResult* result) {
+ DisallowHeapAllocation no_gc;
if (IsJSGlobalProxy()) {
Object* proto = GetPrototype();
if (proto->IsNull()) return result->NotFound();
ASSERT(proto->IsJSGlobalObject());
- return JSObject::cast(proto)->LocalLookupRealNamedProperty(name, result);
+ return JSObject::cast(proto)->LookupOwnRealNamedProperty(name, result);
}
if (HasFastProperties()) {
- map()->LookupDescriptor(this, name, result);
+ map()->LookupDescriptor(this, *name, result);
// A property or a map transition was found. We return all of these result
- // types because LocalLookupRealNamedProperty is used when setting
+ // types because LookupOwnRealNamedProperty is used when setting
// properties where map transitions are handled.
ASSERT(!result->IsFound() ||
(result->holder() == this && result->IsFastPropertyType()));
@@ -3403,7 +3438,7 @@ void JSObject::LocalLookupRealNamedProperty(Name* name, LookupResult* result) {
// occur as fields.
if (result->IsField() &&
result->IsReadOnly() &&
- RawFastPropertyAt(result->GetFieldIndex().field_index())->IsTheHole()) {
+ RawFastPropertyAt(result->GetFieldIndex())->IsTheHole()) {
result->DisallowCaching();
}
return;
@@ -3431,16 +3466,19 @@ void JSObject::LocalLookupRealNamedProperty(Name* name, LookupResult* result) {
}
-void JSObject::LookupRealNamedProperty(Name* name, LookupResult* result) {
- LocalLookupRealNamedProperty(name, result);
+void JSObject::LookupRealNamedProperty(Handle<Name> name,
+ LookupResult* result) {
+ DisallowHeapAllocation no_gc;
+ LookupOwnRealNamedProperty(name, result);
if (result->IsFound()) return;
LookupRealNamedPropertyInPrototypes(name, result);
}
-void JSObject::LookupRealNamedPropertyInPrototypes(Name* name,
+void JSObject::LookupRealNamedPropertyInPrototypes(Handle<Name> name,
LookupResult* result) {
+ DisallowHeapAllocation no_gc;
Isolate* isolate = GetIsolate();
Heap* heap = isolate->heap();
for (Object* pt = GetPrototype();
@@ -3449,7 +3487,7 @@ void JSObject::LookupRealNamedPropertyInPrototypes(Name* name,
if (pt->IsJSProxy()) {
return result->HandlerResult(JSProxy::cast(pt));
}
- JSObject::cast(pt)->LocalLookupRealNamedProperty(name, result);
+ JSObject::cast(pt)->LookupOwnRealNamedProperty(name, result);
ASSERT(!(result->IsFound() && result->type() == INTERCEPTOR));
if (result->IsFound()) return;
}
@@ -3457,82 +3495,13 @@ void JSObject::LookupRealNamedPropertyInPrototypes(Name* name,
}
-// We only need to deal with CALLBACKS and INTERCEPTORS
-Handle<Object> JSObject::SetPropertyWithFailedAccessCheck(
- Handle<JSObject> object,
- LookupResult* result,
- Handle<Name> name,
- Handle<Object> value,
- bool check_prototype,
- StrictModeFlag strict_mode) {
- if (check_prototype && !result->IsProperty()) {
- object->LookupRealNamedPropertyInPrototypes(*name, result);
- }
-
- if (result->IsProperty()) {
- if (!result->IsReadOnly()) {
- switch (result->type()) {
- case CALLBACKS: {
- Object* obj = result->GetCallbackObject();
- if (obj->IsAccessorInfo()) {
- Handle<AccessorInfo> info(AccessorInfo::cast(obj));
- if (info->all_can_write()) {
- return SetPropertyWithCallback(object,
- info,
- name,
- value,
- handle(result->holder()),
- strict_mode);
- }
- } else if (obj->IsAccessorPair()) {
- Handle<AccessorPair> pair(AccessorPair::cast(obj));
- if (pair->all_can_read()) {
- return SetPropertyWithCallback(object,
- pair,
- name,
- value,
- handle(result->holder()),
- strict_mode);
- }
- }
- break;
- }
- case INTERCEPTOR: {
- // Try lookup real named properties. Note that only property can be
- // set is callbacks marked as ALL_CAN_WRITE on the prototype chain.
- LookupResult r(object->GetIsolate());
- object->LookupRealNamedProperty(*name, &r);
- if (r.IsProperty()) {
- return SetPropertyWithFailedAccessCheck(object,
- &r,
- name,
- value,
- check_prototype,
- strict_mode);
- }
- break;
- }
- default: {
- break;
- }
- }
- }
- }
-
- Isolate* isolate = object->GetIsolate();
- isolate->ReportFailedAccessCheck(*object, v8::ACCESS_SET);
- RETURN_HANDLE_IF_SCHEDULED_EXCEPTION(isolate, Object);
- return value;
-}
-
-
-Handle<Object> JSReceiver::SetProperty(Handle<JSReceiver> object,
- LookupResult* result,
- Handle<Name> key,
- Handle<Object> value,
- PropertyAttributes attributes,
- StrictModeFlag strict_mode,
- StoreFromKeyed store_mode) {
+MaybeHandle<Object> JSReceiver::SetProperty(Handle<JSReceiver> object,
+ LookupResult* result,
+ Handle<Name> key,
+ Handle<Object> value,
+ PropertyAttributes attributes,
+ StrictMode strict_mode,
+ StoreFromKeyed store_mode) {
if (result->IsHandler()) {
return JSProxy::SetPropertyWithHandler(handle(result->proxy()),
object, key, value, attributes, strict_mode);
@@ -3550,40 +3519,53 @@ bool JSProxy::HasPropertyWithHandler(Handle<JSProxy> proxy, Handle<Name> name) {
if (name->IsSymbol()) return false;
Handle<Object> args[] = { name };
- Handle<Object> result = proxy->CallTrap(
- "has", isolate->derived_has_trap(), ARRAY_SIZE(args), args);
- if (isolate->has_pending_exception()) return false;
+ Handle<Object> result;
+ ASSIGN_RETURN_ON_EXCEPTION_VALUE(
+ isolate, result,
+ CallTrap(proxy,
+ "has",
+ isolate->derived_has_trap(),
+ ARRAY_SIZE(args),
+ args),
+ false);
return result->BooleanValue();
}
-Handle<Object> JSProxy::SetPropertyWithHandler(Handle<JSProxy> proxy,
- Handle<JSReceiver> receiver,
- Handle<Name> name,
- Handle<Object> value,
- PropertyAttributes attributes,
- StrictModeFlag strict_mode) {
+MaybeHandle<Object> JSProxy::SetPropertyWithHandler(
+ Handle<JSProxy> proxy,
+ Handle<JSReceiver> receiver,
+ Handle<Name> name,
+ Handle<Object> value,
+ PropertyAttributes attributes,
+ StrictMode strict_mode) {
Isolate* isolate = proxy->GetIsolate();
// TODO(rossberg): adjust once there is a story for symbols vs proxies.
if (name->IsSymbol()) return value;
Handle<Object> args[] = { receiver, name, value };
- proxy->CallTrap("set", isolate->derived_set_trap(), ARRAY_SIZE(args), args);
- if (isolate->has_pending_exception()) return Handle<Object>();
+ RETURN_ON_EXCEPTION(
+ isolate,
+ CallTrap(proxy,
+ "set",
+ isolate->derived_set_trap(),
+ ARRAY_SIZE(args),
+ args),
+ Object);
return value;
}
-Handle<Object> JSProxy::SetPropertyViaPrototypesWithHandler(
+MaybeHandle<Object> JSProxy::SetPropertyViaPrototypesWithHandler(
Handle<JSProxy> proxy,
Handle<JSReceiver> receiver,
Handle<Name> name,
Handle<Object> value,
PropertyAttributes attributes,
- StrictModeFlag strict_mode,
+ StrictMode strict_mode,
bool* done) {
Isolate* isolate = proxy->GetIsolate();
Handle<Object> handler(proxy->handler(), isolate); // Trap might morph proxy.
@@ -3596,9 +3578,15 @@ Handle<Object> JSProxy::SetPropertyViaPrototypesWithHandler(
*done = true; // except where redefined...
Handle<Object> args[] = { name };
- Handle<Object> result = proxy->CallTrap(
- "getPropertyDescriptor", Handle<Object>(), ARRAY_SIZE(args), args);
- if (isolate->has_pending_exception()) return Handle<Object>();
+ Handle<Object> result;
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, result,
+ CallTrap(proxy,
+ "getPropertyDescriptor",
+ Handle<Object>(),
+ ARRAY_SIZE(args),
+ args),
+ Object);
if (result->IsUndefined()) {
*done = false;
@@ -3606,21 +3594,24 @@ Handle<Object> JSProxy::SetPropertyViaPrototypesWithHandler(
}
// Emulate [[GetProperty]] semantics for proxies.
- bool has_pending_exception;
Handle<Object> argv[] = { result };
- Handle<Object> desc = Execution::Call(
- isolate, isolate->to_complete_property_descriptor(), result,
- ARRAY_SIZE(argv), argv, &has_pending_exception);
- if (has_pending_exception) return Handle<Object>();
+ Handle<Object> desc;
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, desc,
+ Execution::Call(isolate,
+ isolate->to_complete_property_descriptor(),
+ result,
+ ARRAY_SIZE(argv),
+ argv),
+ Object);
// [[GetProperty]] requires to check that all properties are configurable.
Handle<String> configurable_name =
isolate->factory()->InternalizeOneByteString(
STATIC_ASCII_VECTOR("configurable_"));
- Handle<Object> configurable(
- v8::internal::GetProperty(isolate, desc, configurable_name));
- ASSERT(!isolate->has_pending_exception());
- ASSERT(configurable->IsTrue() || configurable->IsFalse());
+ Handle<Object> configurable =
+ Object::GetProperty(desc, configurable_name).ToHandleChecked();
+ ASSERT(configurable->IsBoolean());
if (configurable->IsFalse()) {
Handle<String> trap =
isolate->factory()->InternalizeOneByteString(
@@ -3628,8 +3619,7 @@ Handle<Object> JSProxy::SetPropertyViaPrototypesWithHandler(
Handle<Object> args[] = { handler, trap, name };
Handle<Object> error = isolate->factory()->NewTypeError(
"proxy_prop_not_configurable", HandleVector(args, ARRAY_SIZE(args)));
- isolate->Throw(*error);
- return Handle<Object>();
+ return isolate->Throw<Object>(error);
}
ASSERT(configurable->IsTrue());
@@ -3637,49 +3627,44 @@ Handle<Object> JSProxy::SetPropertyViaPrototypesWithHandler(
Handle<String> hasWritable_name =
isolate->factory()->InternalizeOneByteString(
STATIC_ASCII_VECTOR("hasWritable_"));
- Handle<Object> hasWritable(
- v8::internal::GetProperty(isolate, desc, hasWritable_name));
- ASSERT(!isolate->has_pending_exception());
- ASSERT(hasWritable->IsTrue() || hasWritable->IsFalse());
+ Handle<Object> hasWritable =
+ Object::GetProperty(desc, hasWritable_name).ToHandleChecked();
+ ASSERT(hasWritable->IsBoolean());
if (hasWritable->IsTrue()) {
Handle<String> writable_name =
isolate->factory()->InternalizeOneByteString(
STATIC_ASCII_VECTOR("writable_"));
- Handle<Object> writable(
- v8::internal::GetProperty(isolate, desc, writable_name));
- ASSERT(!isolate->has_pending_exception());
- ASSERT(writable->IsTrue() || writable->IsFalse());
+ Handle<Object> writable =
+ Object::GetProperty(desc, writable_name).ToHandleChecked();
+ ASSERT(writable->IsBoolean());
*done = writable->IsFalse();
if (!*done) return isolate->factory()->the_hole_value();
- if (strict_mode == kNonStrictMode) return value;
+ if (strict_mode == SLOPPY) return value;
Handle<Object> args[] = { name, receiver };
Handle<Object> error = isolate->factory()->NewTypeError(
"strict_read_only_property", HandleVector(args, ARRAY_SIZE(args)));
- isolate->Throw(*error);
- return Handle<Object>();
+ return isolate->Throw<Object>(error);
}
// We have an AccessorDescriptor.
Handle<String> set_name = isolate->factory()->InternalizeOneByteString(
STATIC_ASCII_VECTOR("set_"));
- Handle<Object> setter(v8::internal::GetProperty(isolate, desc, set_name));
- ASSERT(!isolate->has_pending_exception());
+ Handle<Object> setter = Object::GetProperty(desc, set_name).ToHandleChecked();
if (!setter->IsUndefined()) {
// TODO(rossberg): nicer would be to cast to some JSCallable here...
return SetPropertyWithDefinedSetter(
receiver, Handle<JSReceiver>::cast(setter), value);
}
- if (strict_mode == kNonStrictMode) return value;
+ if (strict_mode == SLOPPY) return value;
Handle<Object> args2[] = { name, proxy };
Handle<Object> error = isolate->factory()->NewTypeError(
"no_setter_in_callback", HandleVector(args2, ARRAY_SIZE(args2)));
- isolate->Throw(*error);
- return Handle<Object>();
+ return isolate->Throw<Object>(error);
}
-Handle<Object> JSProxy::DeletePropertyWithHandler(
+MaybeHandle<Object> JSProxy::DeletePropertyWithHandler(
Handle<JSProxy> proxy, Handle<Name> name, DeleteMode mode) {
Isolate* isolate = proxy->GetIsolate();
@@ -3687,9 +3672,15 @@ Handle<Object> JSProxy::DeletePropertyWithHandler(
if (name->IsSymbol()) return isolate->factory()->false_value();
Handle<Object> args[] = { name };
- Handle<Object> result = proxy->CallTrap(
- "delete", Handle<Object>(), ARRAY_SIZE(args), args);
- if (isolate->has_pending_exception()) return Handle<Object>();
+ Handle<Object> result;
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, result,
+ CallTrap(proxy,
+ "delete",
+ Handle<Object>(),
+ ARRAY_SIZE(args),
+ args),
+ Object);
bool result_bool = result->BooleanValue();
if (mode == STRICT_DELETION && !result_bool) {
@@ -3699,14 +3690,13 @@ Handle<Object> JSProxy::DeletePropertyWithHandler(
Handle<Object> args[] = { handler, trap_name };
Handle<Object> error = isolate->factory()->NewTypeError(
"handler_failed", HandleVector(args, ARRAY_SIZE(args)));
- isolate->Throw(*error);
- return Handle<Object>();
+ return isolate->Throw<Object>(error);
}
return isolate->factory()->ToBoolean(result_bool);
}
-Handle<Object> JSProxy::DeleteElementWithHandler(
+MaybeHandle<Object> JSProxy::DeleteElementWithHandler(
Handle<JSProxy> proxy, uint32_t index, DeleteMode mode) {
Isolate* isolate = proxy->GetIsolate();
Handle<String> name = isolate->factory()->Uint32ToString(index);
@@ -3714,55 +3704,67 @@ Handle<Object> JSProxy::DeleteElementWithHandler(
}
-MUST_USE_RESULT PropertyAttributes JSProxy::GetPropertyAttributeWithHandler(
- JSReceiver* receiver_raw,
- Name* name_raw) {
- Isolate* isolate = GetIsolate();
+PropertyAttributes JSProxy::GetPropertyAttributesWithHandler(
+ Handle<JSProxy> proxy,
+ Handle<Object> receiver,
+ Handle<Name> name) {
+ Isolate* isolate = proxy->GetIsolate();
HandleScope scope(isolate);
- Handle<JSProxy> proxy(this);
- Handle<Object> handler(this->handler(), isolate); // Trap might morph proxy.
- Handle<JSReceiver> receiver(receiver_raw);
- Handle<Object> name(name_raw, isolate);
// TODO(rossberg): adjust once there is a story for symbols vs proxies.
if (name->IsSymbol()) return ABSENT;
Handle<Object> args[] = { name };
- Handle<Object> result = CallTrap(
- "getPropertyDescriptor", Handle<Object>(), ARRAY_SIZE(args), args);
- if (isolate->has_pending_exception()) return NONE;
+ Handle<Object> result;
+ ASSIGN_RETURN_ON_EXCEPTION_VALUE(
+ isolate, result,
+ proxy->CallTrap(proxy,
+ "getPropertyDescriptor",
+ Handle<Object>(),
+ ARRAY_SIZE(args),
+ args),
+ NONE);
if (result->IsUndefined()) return ABSENT;
- bool has_pending_exception;
Handle<Object> argv[] = { result };
- Handle<Object> desc = Execution::Call(
- isolate, isolate->to_complete_property_descriptor(), result,
- ARRAY_SIZE(argv), argv, &has_pending_exception);
- if (has_pending_exception) return NONE;
+ Handle<Object> desc;
+ ASSIGN_RETURN_ON_EXCEPTION_VALUE(
+ isolate, desc,
+ Execution::Call(isolate,
+ isolate->to_complete_property_descriptor(),
+ result,
+ ARRAY_SIZE(argv),
+ argv),
+ NONE);
// Convert result to PropertyAttributes.
Handle<String> enum_n = isolate->factory()->InternalizeOneByteString(
STATIC_ASCII_VECTOR("enumerable_"));
- Handle<Object> enumerable(v8::internal::GetProperty(isolate, desc, enum_n));
- if (isolate->has_pending_exception()) return NONE;
+ Handle<Object> enumerable;
+ ASSIGN_RETURN_ON_EXCEPTION_VALUE(
+ isolate, enumerable, Object::GetProperty(desc, enum_n), NONE);
Handle<String> conf_n = isolate->factory()->InternalizeOneByteString(
STATIC_ASCII_VECTOR("configurable_"));
- Handle<Object> configurable(v8::internal::GetProperty(isolate, desc, conf_n));
- if (isolate->has_pending_exception()) return NONE;
+ Handle<Object> configurable;
+ ASSIGN_RETURN_ON_EXCEPTION_VALUE(
+ isolate, configurable, Object::GetProperty(desc, conf_n), NONE);
Handle<String> writ_n = isolate->factory()->InternalizeOneByteString(
STATIC_ASCII_VECTOR("writable_"));
- Handle<Object> writable(v8::internal::GetProperty(isolate, desc, writ_n));
- if (isolate->has_pending_exception()) return NONE;
+ Handle<Object> writable;
+ ASSIGN_RETURN_ON_EXCEPTION_VALUE(
+ isolate, writable, Object::GetProperty(desc, writ_n), NONE);
if (!writable->BooleanValue()) {
Handle<String> set_n = isolate->factory()->InternalizeOneByteString(
STATIC_ASCII_VECTOR("set_"));
- Handle<Object> setter(v8::internal::GetProperty(isolate, desc, set_n));
- if (isolate->has_pending_exception()) return NONE;
+ Handle<Object> setter;
+ ASSIGN_RETURN_ON_EXCEPTION_VALUE(
+ isolate, setter, Object::GetProperty(desc, set_n), NONE);
writable = isolate->factory()->ToBoolean(!setter->IsUndefined());
}
if (configurable->IsFalse()) {
+ Handle<Object> handler(proxy->handler(), isolate);
Handle<String> trap = isolate->factory()->InternalizeOneByteString(
STATIC_ASCII_VECTOR("getPropertyDescriptor"));
Handle<Object> args[] = { handler, trap, name };
@@ -3780,15 +3782,13 @@ MUST_USE_RESULT PropertyAttributes JSProxy::GetPropertyAttributeWithHandler(
}
-MUST_USE_RESULT PropertyAttributes JSProxy::GetElementAttributeWithHandler(
- JSReceiver* receiver_raw,
+PropertyAttributes JSProxy::GetElementAttributeWithHandler(
+ Handle<JSProxy> proxy,
+ Handle<JSReceiver> receiver,
uint32_t index) {
- Isolate* isolate = GetIsolate();
- HandleScope scope(isolate);
- Handle<JSProxy> proxy(this);
- Handle<JSReceiver> receiver(receiver_raw);
+ Isolate* isolate = proxy->GetIsolate();
Handle<String> name = isolate->factory()->Uint32ToString(index);
- return proxy->GetPropertyAttributeWithHandler(*receiver, *name);
+ return GetPropertyAttributesWithHandler(proxy, receiver, name);
}
@@ -3814,36 +3814,32 @@ void JSProxy::Fix(Handle<JSProxy> proxy) {
}
-MUST_USE_RESULT Handle<Object> JSProxy::CallTrap(const char* name,
- Handle<Object> derived,
- int argc,
- Handle<Object> argv[]) {
- Isolate* isolate = GetIsolate();
- Handle<Object> handler(this->handler(), isolate);
+MaybeHandle<Object> JSProxy::CallTrap(Handle<JSProxy> proxy,
+ const char* name,
+ Handle<Object> derived,
+ int argc,
+ Handle<Object> argv[]) {
+ Isolate* isolate = proxy->GetIsolate();
+ Handle<Object> handler(proxy->handler(), isolate);
Handle<String> trap_name = isolate->factory()->InternalizeUtf8String(name);
- Handle<Object> trap(v8::internal::GetProperty(isolate, handler, trap_name));
- if (isolate->has_pending_exception()) return trap;
+ Handle<Object> trap;
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, trap,
+ Object::GetPropertyOrElement(handler, trap_name),
+ Object);
if (trap->IsUndefined()) {
if (derived.is_null()) {
Handle<Object> args[] = { handler, trap_name };
Handle<Object> error = isolate->factory()->NewTypeError(
"handler_trap_missing", HandleVector(args, ARRAY_SIZE(args)));
- isolate->Throw(*error);
- return Handle<Object>();
+ return isolate->Throw<Object>(error);
}
trap = Handle<Object>(derived);
}
- bool threw;
- return Execution::Call(isolate, trap, handler, argc, argv, &threw);
-}
-
-
-// TODO(mstarzinger): Temporary wrapper until handlified.
-static Handle<Map> MapAsElementsKind(Handle<Map> map, ElementsKind kind) {
- CALL_HEAP_FUNCTION(map->GetIsolate(), map->AsElementsKind(kind), Map);
+ return Execution::Call(isolate, trap, handler, argc, argv);
}
@@ -3862,18 +3858,9 @@ void JSObject::AllocateStorageForMap(Handle<JSObject> object, Handle<Map> map) {
} else {
TransitionElementsKind(object, to_kind);
}
- map = MapAsElementsKind(map, to_kind);
+ map = Map::AsElementsKind(map, to_kind);
}
- int total_size =
- map->NumberOfOwnDescriptors() + map->unused_property_fields();
- int out_of_object = total_size - map->inobject_properties();
- if (out_of_object != object->properties()->length()) {
- Isolate* isolate = object->GetIsolate();
- Handle<FixedArray> new_properties = isolate->factory()->CopySizeFixedArray(
- handle(object->properties()), out_of_object);
- object->set_properties(*new_properties);
- }
- object->set_map(*map);
+ JSObject::MigrateToMap(object, map);
}
@@ -3883,7 +3870,9 @@ void JSObject::MigrateInstance(Handle<JSObject> object) {
// transition that matches the object. This achieves what is needed.
Handle<Map> original_map(object->map());
GeneralizeFieldRepresentation(
- object, 0, Representation::None(), ALLOW_AS_CONSTANT);
+ object, 0, Representation::None(),
+ HeapType::None(object->GetIsolate()),
+ ALLOW_AS_CONSTANT);
object->map()->set_migration_target(true);
if (FLAG_trace_migration) {
object->PrintInstanceMigration(stdout, *original_map, object->map());
@@ -3891,19 +3880,24 @@ void JSObject::MigrateInstance(Handle<JSObject> object) {
}
-Handle<Object> JSObject::TryMigrateInstance(Handle<JSObject> object) {
- Handle<Map> original_map(object->map());
- Handle<Map> new_map = Map::CurrentMapForDeprecatedInternal(original_map);
- if (new_map.is_null()) return Handle<Object>();
+// static
+bool JSObject::TryMigrateInstance(Handle<JSObject> object) {
+ Isolate* isolate = object->GetIsolate();
+ DisallowDeoptimization no_deoptimization(isolate);
+ Handle<Map> original_map(object->map(), isolate);
+ Handle<Map> new_map;
+ if (!Map::CurrentMapForDeprecatedInternal(original_map).ToHandle(&new_map)) {
+ return false;
+ }
JSObject::MigrateToMap(object, new_map);
if (FLAG_trace_migration) {
object->PrintInstanceMigration(stdout, *original_map, object->map());
}
- return object;
+ return true;
}
-Handle<Object> JSObject::SetPropertyUsingTransition(
+MaybeHandle<Object> JSObject::SetPropertyUsingTransition(
Handle<JSObject> object,
LookupResult* lookup,
Handle<Name> name,
@@ -3912,7 +3906,7 @@ Handle<Object> JSObject::SetPropertyUsingTransition(
Handle<Map> transition_map(lookup->GetTransitionTarget());
int descriptor = transition_map->LastAdded();
- DescriptorArray* descriptors = transition_map->instance_descriptors();
+ Handle<DescriptorArray> descriptors(transition_map->instance_descriptors());
PropertyDetails details = descriptors->GetDetails(descriptor);
if (details.type() == CALLBACKS || attributes != details.attributes()) {
@@ -3920,7 +3914,7 @@ Handle<Object> JSObject::SetPropertyUsingTransition(
// of the map. If we get a fast copy of the map, all field representations
// will be tagged since the transition is omitted.
return JSObject::AddProperty(
- object, name, value, attributes, kNonStrictMode,
+ object, name, value, attributes, SLOPPY,
JSReceiver::CERTAINLY_NOT_STORE_FROM_KEYED,
JSReceiver::OMIT_EXTENSIBILITY_CHECK,
JSObject::FORCE_TAGGED, FORCE_FIELD, OMIT_TRANSITION);
@@ -3929,64 +3923,68 @@ Handle<Object> JSObject::SetPropertyUsingTransition(
// Keep the target CONSTANT if the same value is stored.
// TODO(verwaest): Also support keeping the placeholder
// (value->IsUninitialized) as constant.
- if (details.type() == CONSTANT &&
- descriptors->GetValue(descriptor) == *value) {
- object->set_map(*transition_map);
- return value;
+ if (!lookup->CanHoldValue(value)) {
+ Representation field_representation = value->OptimalRepresentation();
+ Handle<HeapType> field_type = value->OptimalType(
+ lookup->isolate(), field_representation);
+ transition_map = Map::GeneralizeRepresentation(
+ transition_map, descriptor,
+ field_representation, field_type, FORCE_FIELD);
}
- Representation representation = details.representation();
+ JSObject::MigrateToNewProperty(object, transition_map, value);
+ return value;
+}
- if (!value->FitsRepresentation(representation) ||
- details.type() == CONSTANT) {
- transition_map = Map::GeneralizeRepresentation(transition_map,
- descriptor, value->OptimalRepresentation(), FORCE_FIELD);
- Object* back = transition_map->GetBackPointer();
- if (back->IsMap()) {
- MigrateToMap(object, handle(Map::cast(back)));
- }
- descriptors = transition_map->instance_descriptors();
- representation = descriptors->GetDetails(descriptor).representation();
- }
- int field_index = descriptors->GetFieldIndex(descriptor);
- AddFastPropertyUsingMap(
- object, transition_map, name, value, field_index, representation);
- return value;
+void JSObject::MigrateToNewProperty(Handle<JSObject> object,
+ Handle<Map> map,
+ Handle<Object> value) {
+ JSObject::MigrateToMap(object, map);
+ if (map->GetLastDescriptorDetails().type() != FIELD) return;
+ object->WriteToField(map->LastAdded(), *value);
+}
+
+
+void JSObject::WriteToField(int descriptor, Object* value) {
+ DisallowHeapAllocation no_gc;
+
+ DescriptorArray* desc = map()->instance_descriptors();
+ PropertyDetails details = desc->GetDetails(descriptor);
+
+ ASSERT(details.type() == FIELD);
+
+ FieldIndex index = FieldIndex::ForDescriptor(map(), descriptor);
+ if (details.representation().IsDouble()) {
+ // Nothing more to be done.
+ if (value->IsUninitialized()) return;
+ HeapNumber* box = HeapNumber::cast(RawFastPropertyAt(index));
+ box->set_value(value->Number());
+ } else {
+ FastPropertyAtPut(index, value);
+ }
}
static void SetPropertyToField(LookupResult* lookup,
- Handle<Name> name,
Handle<Object> value) {
- Representation representation = lookup->representation();
- if (!value->FitsRepresentation(representation) ||
- lookup->type() == CONSTANT) {
+ if (lookup->type() == CONSTANT || !lookup->CanHoldValue(value)) {
+ Representation field_representation = value->OptimalRepresentation();
+ Handle<HeapType> field_type = value->OptimalType(
+ lookup->isolate(), field_representation);
JSObject::GeneralizeFieldRepresentation(handle(lookup->holder()),
lookup->GetDescriptorIndex(),
- value->OptimalRepresentation(),
+ field_representation, field_type,
FORCE_FIELD);
- DescriptorArray* desc = lookup->holder()->map()->instance_descriptors();
- int descriptor = lookup->GetDescriptorIndex();
- representation = desc->GetDetails(descriptor).representation();
}
-
- if (FLAG_track_double_fields && representation.IsDouble()) {
- HeapNumber* storage = HeapNumber::cast(lookup->holder()->RawFastPropertyAt(
- lookup->GetFieldIndex().field_index()));
- storage->set_value(value->Number());
- return;
- }
-
- lookup->holder()->FastPropertyAtPut(
- lookup->GetFieldIndex().field_index(), *value);
+ lookup->holder()->WriteToField(lookup->GetDescriptorIndex(), *value);
}
-static void ConvertAndSetLocalProperty(LookupResult* lookup,
- Handle<Name> name,
- Handle<Object> value,
- PropertyAttributes attributes) {
+static void ConvertAndSetOwnProperty(LookupResult* lookup,
+ Handle<Name> name,
+ Handle<Object> value,
+ PropertyAttributes attributes) {
Handle<JSObject> object(lookup->holder());
if (object->TooManyFastProperties()) {
JSObject::NormalizeProperties(object, CLEAR_INOBJECT_PROPERTIES, 0);
@@ -4000,7 +3998,8 @@ static void ConvertAndSetLocalProperty(LookupResult* lookup,
int descriptor_index = lookup->GetDescriptorIndex();
if (lookup->GetAttributes() == attributes) {
JSObject::GeneralizeFieldRepresentation(
- object, descriptor_index, Representation::Tagged(), FORCE_FIELD);
+ object, descriptor_index, Representation::Tagged(),
+ HeapType::Any(lookup->isolate()), FORCE_FIELD);
} else {
Handle<Map> old_map(object->map());
Handle<Map> new_map = Map::CopyGeneralizeAllRepresentations(old_map,
@@ -4008,9 +4007,7 @@ static void ConvertAndSetLocalProperty(LookupResult* lookup,
JSObject::MigrateToMap(object, new_map);
}
- DescriptorArray* descriptors = object->map()->instance_descriptors();
- int index = descriptors->GetDetails(descriptor_index).field_index();
- object->FastPropertyAtPut(index, *value);
+ object->WriteToField(descriptor_index, *value);
}
@@ -4020,20 +4017,21 @@ static void SetPropertyToFieldWithAttributes(LookupResult* lookup,
PropertyAttributes attributes) {
if (lookup->GetAttributes() == attributes) {
if (value->IsUninitialized()) return;
- SetPropertyToField(lookup, name, value);
+ SetPropertyToField(lookup, value);
} else {
- ConvertAndSetLocalProperty(lookup, name, value, attributes);
+ ConvertAndSetOwnProperty(lookup, name, value, attributes);
}
}
-Handle<Object> JSObject::SetPropertyForResult(Handle<JSObject> object,
- LookupResult* lookup,
- Handle<Name> name,
- Handle<Object> value,
- PropertyAttributes attributes,
- StrictModeFlag strict_mode,
- StoreFromKeyed store_mode) {
+MaybeHandle<Object> JSObject::SetPropertyForResult(
+ Handle<JSObject> object,
+ LookupResult* lookup,
+ Handle<Name> name,
+ Handle<Object> value,
+ PropertyAttributes attributes,
+ StrictMode strict_mode,
+ StoreFromKeyed store_mode) {
Isolate* isolate = object->GetIsolate();
// Make sure that the top context does not change when doing callbacks or
@@ -4050,7 +4048,7 @@ Handle<Object> JSObject::SetPropertyForResult(Handle<JSObject> object,
// Check access rights if needed.
if (object->IsAccessCheckNeeded()) {
- if (!isolate->MayNamedAccess(*object, *name, v8::ACCESS_SET)) {
+ if (!isolate->MayNamedAccess(object, name, v8::ACCESS_SET)) {
return SetPropertyWithFailedAccessCheck(object, lookup, name, value,
true, strict_mode);
}
@@ -4069,8 +4067,12 @@ Handle<Object> JSObject::SetPropertyForResult(Handle<JSObject> object,
if (!lookup->IsProperty() && !object->IsJSContextExtensionObject()) {
bool done = false;
- Handle<Object> result_object = SetPropertyViaPrototypes(
- object, name, value, attributes, strict_mode, &done);
+ Handle<Object> result_object;
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, result_object,
+ SetPropertyViaPrototypes(
+ object, name, value, attributes, strict_mode, &done),
+ Object);
if (done) return result_object;
}
@@ -4081,68 +4083,70 @@ Handle<Object> JSObject::SetPropertyForResult(Handle<JSObject> object,
}
if (lookup->IsProperty() && lookup->IsReadOnly()) {
- if (strict_mode == kStrictMode) {
+ if (strict_mode == STRICT) {
Handle<Object> args[] = { name, object };
Handle<Object> error = isolate->factory()->NewTypeError(
"strict_read_only_property", HandleVector(args, ARRAY_SIZE(args)));
- isolate->Throw(*error);
- return Handle<Object>();
+ return isolate->Throw<Object>(error);
} else {
return value;
}
}
Handle<Object> old_value = isolate->factory()->the_hole_value();
- bool is_observed = FLAG_harmony_observation &&
- object->map()->is_observed() &&
+ bool is_observed = object->map()->is_observed() &&
*name != isolate->heap()->hidden_string();
if (is_observed && lookup->IsDataProperty()) {
- old_value = Object::GetProperty(object, name);
+ old_value = Object::GetPropertyOrElement(object, name).ToHandleChecked();
}
// This is a real property that is not read-only, or it is a
// transition or null descriptor and there are no setters in the prototypes.
- Handle<Object> result = value;
- switch (lookup->type()) {
- case NORMAL:
- SetNormalizedProperty(handle(lookup->holder()), lookup, value);
- break;
- case FIELD:
- SetPropertyToField(lookup, name, value);
- break;
- case CONSTANT:
- // Only replace the constant if necessary.
- if (*value == lookup->GetConstant()) return value;
- SetPropertyToField(lookup, name, value);
- break;
- case CALLBACKS: {
- Handle<Object> callback_object(lookup->GetCallbackObject(), isolate);
- return SetPropertyWithCallback(object, callback_object, name, value,
- handle(lookup->holder()), strict_mode);
- }
- case INTERCEPTOR:
- result = SetPropertyWithInterceptor(handle(lookup->holder()), name, value,
- attributes, strict_mode);
- break;
- case TRANSITION:
- result = SetPropertyUsingTransition(handle(lookup->holder()), lookup,
- name, value, attributes);
- break;
- case HANDLER:
- case NONEXISTENT:
- UNREACHABLE();
+ MaybeHandle<Object> maybe_result = value;
+ if (lookup->IsTransition()) {
+ maybe_result = SetPropertyUsingTransition(handle(lookup->holder()), lookup,
+ name, value, attributes);
+ } else {
+ switch (lookup->type()) {
+ case NORMAL:
+ SetNormalizedProperty(handle(lookup->holder()), lookup, value);
+ break;
+ case FIELD:
+ SetPropertyToField(lookup, value);
+ break;
+ case CONSTANT:
+ // Only replace the constant if necessary.
+ if (*value == lookup->GetConstant()) return value;
+ SetPropertyToField(lookup, value);
+ break;
+ case CALLBACKS: {
+ Handle<Object> callback_object(lookup->GetCallbackObject(), isolate);
+ return SetPropertyWithCallback(object, name, value,
+ handle(lookup->holder()),
+ callback_object, strict_mode);
+ }
+ case INTERCEPTOR:
+ maybe_result = SetPropertyWithInterceptor(
+ handle(lookup->holder()), name, value, attributes, strict_mode);
+ break;
+ case HANDLER:
+ case NONEXISTENT:
+ UNREACHABLE();
+ }
}
- RETURN_IF_EMPTY_HANDLE_VALUE(isolate, result, Handle<Object>());
+ Handle<Object> result;
+ ASSIGN_RETURN_ON_EXCEPTION(isolate, result, maybe_result, Object);
if (is_observed) {
if (lookup->IsTransition()) {
EnqueueChangeRecord(object, "add", name, old_value);
} else {
LookupResult new_lookup(isolate);
- object->LocalLookup(*name, &new_lookup, true);
+ object->LookupOwn(name, &new_lookup, true);
if (new_lookup.IsDataProperty()) {
- Handle<Object> new_value = Object::GetProperty(object, name);
+ Handle<Object> new_value =
+ Object::GetPropertyOrElement(object, name).ToHandleChecked();
if (!new_value->SameValue(*old_value)) {
EnqueueChangeRecord(object, "update", name, old_value);
}
@@ -4154,23 +4158,22 @@ Handle<Object> JSObject::SetPropertyForResult(Handle<JSObject> object,
}
-// Set a real local property, even if it is READ_ONLY. If the property is not
+// Set a real own property, even if it is READ_ONLY. If the property is not
// present, add it with attributes NONE. This code is an exact clone of
// SetProperty, with the check for IsReadOnly and the check for a
// callback setter removed. The two lines looking up the LookupResult
// result are also added. If one of the functions is changed, the other
// should be.
-// Note that this method cannot be used to set the prototype of a function
-// because ConvertDescriptorToField() which is called in "case CALLBACKS:"
-// doesn't handle function prototypes correctly.
-Handle<Object> JSObject::SetLocalPropertyIgnoreAttributes(
+MaybeHandle<Object> JSObject::SetOwnPropertyIgnoreAttributes(
Handle<JSObject> object,
Handle<Name> name,
Handle<Object> value,
PropertyAttributes attributes,
ValueType value_type,
StoreMode mode,
- ExtensibilityCheck extensibility_check) {
+ ExtensibilityCheck extensibility_check,
+ StoreFromKeyed store_from_keyed,
+ ExecutableAccessorInfoHandling handling) {
Isolate* isolate = object->GetIsolate();
// Make sure that the top context does not change when doing callbacks or
@@ -4178,16 +4181,16 @@ Handle<Object> JSObject::SetLocalPropertyIgnoreAttributes(
AssertNoContextChange ncc(isolate);
LookupResult lookup(isolate);
- object->LocalLookup(*name, &lookup, true);
+ object->LookupOwn(name, &lookup, true);
if (!lookup.IsFound()) {
object->map()->LookupTransition(*object, *name, &lookup);
}
// Check access rights if needed.
if (object->IsAccessCheckNeeded()) {
- if (!isolate->MayNamedAccess(*object, *name, v8::ACCESS_SET)) {
+ if (!isolate->MayNamedAccess(object, name, v8::ACCESS_SET)) {
return SetPropertyWithFailedAccessCheck(object, &lookup, name, value,
- false, kNonStrictMode);
+ false, SLOPPY);
}
}
@@ -4195,13 +4198,13 @@ Handle<Object> JSObject::SetLocalPropertyIgnoreAttributes(
Handle<Object> proto(object->GetPrototype(), isolate);
if (proto->IsNull()) return value;
ASSERT(proto->IsJSGlobalObject());
- return SetLocalPropertyIgnoreAttributes(Handle<JSObject>::cast(proto),
+ return SetOwnPropertyIgnoreAttributes(Handle<JSObject>::cast(proto),
name, value, attributes, value_type, mode, extensibility_check);
}
- if (lookup.IsFound() &&
- (lookup.type() == INTERCEPTOR || lookup.type() == CALLBACKS)) {
- object->LocalLookupRealNamedProperty(*name, &lookup);
+ if (lookup.IsInterceptor() ||
+ (lookup.IsDescriptorOrDictionary() && lookup.type() == CALLBACKS)) {
+ object->LookupOwnRealNamedProperty(name, &lookup);
}
// Check for accessor in prototype chain removed here in clone.
@@ -4210,62 +4213,108 @@ Handle<Object> JSObject::SetLocalPropertyIgnoreAttributes(
TransitionFlag flag = lookup.IsFound()
? OMIT_TRANSITION : INSERT_TRANSITION;
// Neither properties nor transitions found.
- return AddProperty(object, name, value, attributes, kNonStrictMode,
- MAY_BE_STORE_FROM_KEYED, extensibility_check, value_type, mode, flag);
+ return AddProperty(object, name, value, attributes, SLOPPY,
+ store_from_keyed, extensibility_check, value_type, mode, flag);
}
Handle<Object> old_value = isolate->factory()->the_hole_value();
PropertyAttributes old_attributes = ABSENT;
- bool is_observed = FLAG_harmony_observation &&
- object->map()->is_observed() &&
+ bool is_observed = object->map()->is_observed() &&
*name != isolate->heap()->hidden_string();
if (is_observed && lookup.IsProperty()) {
- if (lookup.IsDataProperty()) old_value =
- Object::GetProperty(object, name);
+ if (lookup.IsDataProperty()) {
+ old_value = Object::GetPropertyOrElement(object, name).ToHandleChecked();
+ }
old_attributes = lookup.GetAttributes();
}
+ bool executed_set_prototype = false;
+
// Check of IsReadOnly removed from here in clone.
- switch (lookup.type()) {
- case NORMAL:
- ReplaceSlowProperty(object, name, value, attributes);
- break;
- case FIELD:
- SetPropertyToFieldWithAttributes(&lookup, name, value, attributes);
- break;
- case CONSTANT:
- // Only replace the constant if necessary.
- if (lookup.GetAttributes() != attributes ||
- *value != lookup.GetConstant()) {
+ if (lookup.IsTransition()) {
+ Handle<Object> result;
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, result,
+ SetPropertyUsingTransition(
+ handle(lookup.holder()), &lookup, name, value, attributes),
+ Object);
+ } else {
+ switch (lookup.type()) {
+ case NORMAL:
+ ReplaceSlowProperty(object, name, value, attributes);
+ break;
+ case FIELD:
SetPropertyToFieldWithAttributes(&lookup, name, value, attributes);
+ break;
+ case CONSTANT:
+ // Only replace the constant if necessary.
+ if (lookup.GetAttributes() != attributes ||
+ *value != lookup.GetConstant()) {
+ SetPropertyToFieldWithAttributes(&lookup, name, value, attributes);
+ }
+ break;
+ case CALLBACKS:
+ {
+ Handle<Object> callback(lookup.GetCallbackObject(), isolate);
+ if (callback->IsExecutableAccessorInfo() &&
+ handling == DONT_FORCE_FIELD) {
+ Handle<Object> result;
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, result,
+ JSObject::SetPropertyWithCallback(object,
+ name,
+ value,
+ handle(lookup.holder()),
+ callback,
+ STRICT),
+ Object);
+
+ if (attributes != lookup.GetAttributes()) {
+ Handle<ExecutableAccessorInfo> new_data =
+ Accessors::CloneAccessor(
+ isolate, Handle<ExecutableAccessorInfo>::cast(callback));
+ new_data->set_property_attributes(attributes);
+ if (attributes & READ_ONLY) {
+ // This way we don't have to introduce a lookup to the setter,
+ // simply make it unavailable to reflect the attributes.
+ new_data->clear_setter();
+ }
+
+ SetPropertyCallback(object, name, new_data, attributes);
+ }
+ if (is_observed) {
+ // If we are setting the prototype of a function and are observed,
+ // don't send change records because the prototype handles that
+ // itself.
+ executed_set_prototype = object->IsJSFunction() &&
+ String::Equals(isolate->factory()->prototype_string(),
+ Handle<String>::cast(name)) &&
+ Handle<JSFunction>::cast(object)->should_have_prototype();
+ }
+ } else {
+ ConvertAndSetOwnProperty(&lookup, name, value, attributes);
+ }
+ break;
}
- break;
- case CALLBACKS:
- ConvertAndSetLocalProperty(&lookup, name, value, attributes);
- break;
- case TRANSITION: {
- Handle<Object> result = SetPropertyUsingTransition(
- handle(lookup.holder()), &lookup, name, value, attributes);
- RETURN_IF_EMPTY_HANDLE_VALUE(isolate, result, Handle<Object>());
- break;
+ case NONEXISTENT:
+ case HANDLER:
+ case INTERCEPTOR:
+ UNREACHABLE();
}
- case NONEXISTENT:
- case HANDLER:
- case INTERCEPTOR:
- UNREACHABLE();
}
- if (is_observed) {
+ if (is_observed && !executed_set_prototype) {
if (lookup.IsTransition()) {
EnqueueChangeRecord(object, "add", name, old_value);
} else if (old_value->IsTheHole()) {
EnqueueChangeRecord(object, "reconfigure", name, old_value);
} else {
LookupResult new_lookup(isolate);
- object->LocalLookup(*name, &new_lookup, true);
+ object->LookupOwn(name, &new_lookup, true);
bool value_changed = false;
if (new_lookup.IsDataProperty()) {
- Handle<Object> new_value = Object::GetProperty(object, name);
+ Handle<Object> new_value =
+ Object::GetPropertyOrElement(object, name).ToHandleChecked();
value_changed = !old_value->SameValue(*new_value);
}
if (new_lookup.GetAttributes() != old_attributes) {
@@ -4281,185 +4330,142 @@ Handle<Object> JSObject::SetLocalPropertyIgnoreAttributes(
}
-PropertyAttributes JSObject::GetPropertyAttributePostInterceptor(
- JSObject* receiver,
- Name* name,
- bool continue_search) {
- // Check local property, ignore interceptor.
- LookupResult result(GetIsolate());
- LocalLookupRealNamedProperty(name, &result);
- if (result.IsFound()) return result.GetAttributes();
-
- if (continue_search) {
- // Continue searching via the prototype chain.
- Object* pt = GetPrototype();
- if (!pt->IsNull()) {
- return JSObject::cast(pt)->
- GetPropertyAttributeWithReceiver(receiver, name);
- }
- }
- return ABSENT;
-}
-
-
-PropertyAttributes JSObject::GetPropertyAttributeWithInterceptor(
- JSObject* receiver,
- Name* name,
- bool continue_search) {
+Maybe<PropertyAttributes> JSObject::GetPropertyAttributesWithInterceptor(
+ Handle<JSObject> holder,
+ Handle<Object> receiver,
+ Handle<Name> name) {
// TODO(rossberg): Support symbols in the API.
- if (name->IsSymbol()) return ABSENT;
+ if (name->IsSymbol()) return Maybe<PropertyAttributes>(ABSENT);
- Isolate* isolate = GetIsolate();
+ Isolate* isolate = holder->GetIsolate();
HandleScope scope(isolate);
// Make sure that the top context does not change when doing
// callbacks or interceptor calls.
AssertNoContextChange ncc(isolate);
- Handle<InterceptorInfo> interceptor(GetNamedInterceptor());
- Handle<JSObject> receiver_handle(receiver);
- Handle<JSObject> holder_handle(this);
- Handle<String> name_handle(String::cast(name));
- PropertyCallbackArguments args(isolate, interceptor->data(), receiver, this);
+ Handle<InterceptorInfo> interceptor(holder->GetNamedInterceptor());
+ PropertyCallbackArguments args(
+ isolate, interceptor->data(), *receiver, *holder);
if (!interceptor->query()->IsUndefined()) {
v8::NamedPropertyQueryCallback query =
v8::ToCData<v8::NamedPropertyQueryCallback>(interceptor->query());
LOG(isolate,
- ApiNamedPropertyAccess("interceptor-named-has", *holder_handle, name));
+ ApiNamedPropertyAccess("interceptor-named-has", *holder, *name));
v8::Handle<v8::Integer> result =
- args.Call(query, v8::Utils::ToLocal(name_handle));
+ args.Call(query, v8::Utils::ToLocal(Handle<String>::cast(name)));
if (!result.IsEmpty()) {
ASSERT(result->IsInt32());
- return static_cast<PropertyAttributes>(result->Int32Value());
+ return Maybe<PropertyAttributes>(
+ static_cast<PropertyAttributes>(result->Int32Value()));
}
} else if (!interceptor->getter()->IsUndefined()) {
v8::NamedPropertyGetterCallback getter =
v8::ToCData<v8::NamedPropertyGetterCallback>(interceptor->getter());
LOG(isolate,
- ApiNamedPropertyAccess("interceptor-named-get-has", this, name));
+ ApiNamedPropertyAccess("interceptor-named-get-has", *holder, *name));
v8::Handle<v8::Value> result =
- args.Call(getter, v8::Utils::ToLocal(name_handle));
- if (!result.IsEmpty()) return DONT_ENUM;
+ args.Call(getter, v8::Utils::ToLocal(Handle<String>::cast(name)));
+ if (!result.IsEmpty()) return Maybe<PropertyAttributes>(DONT_ENUM);
}
- return holder_handle->GetPropertyAttributePostInterceptor(*receiver_handle,
- *name_handle,
- continue_search);
+ return Maybe<PropertyAttributes>();
}
-PropertyAttributes JSReceiver::GetPropertyAttributeWithReceiver(
- JSReceiver* receiver,
- Name* key) {
+PropertyAttributes JSReceiver::GetOwnPropertyAttributes(
+ Handle<JSReceiver> object, Handle<Name> name) {
+ // Check whether the name is an array index.
uint32_t index = 0;
- if (IsJSObject() && key->AsArrayIndex(&index)) {
- return JSObject::cast(this)->GetElementAttributeWithReceiver(
- receiver, index, true);
+ if (object->IsJSObject() && name->AsArrayIndex(&index)) {
+ return GetOwnElementAttribute(object, index);
}
- // Named property.
- LookupResult lookup(GetIsolate());
- Lookup(key, &lookup);
- return GetPropertyAttributeForResult(receiver, &lookup, key, true);
+ LookupIterator it(object, name, LookupIterator::CHECK_OWN);
+ return GetPropertyAttributes(&it);
}
-PropertyAttributes JSReceiver::GetPropertyAttributeForResult(
- JSReceiver* receiver,
- LookupResult* lookup,
- Name* name,
- bool continue_search) {
- // Check access rights if needed.
- if (IsAccessCheckNeeded()) {
- JSObject* this_obj = JSObject::cast(this);
- Heap* heap = GetHeap();
- if (!heap->isolate()->MayNamedAccess(this_obj, name, v8::ACCESS_HAS)) {
- return this_obj->GetPropertyAttributeWithFailedAccessCheck(
- receiver, lookup, name, continue_search);
- }
- }
- if (lookup->IsFound()) {
- switch (lookup->type()) {
- case NORMAL: // fall through
- case FIELD:
- case CONSTANT:
- case CALLBACKS:
- return lookup->GetAttributes();
- case HANDLER: {
- return JSProxy::cast(lookup->proxy())->GetPropertyAttributeWithHandler(
- receiver, name);
- }
- case INTERCEPTOR:
- return lookup->holder()->GetPropertyAttributeWithInterceptor(
- JSObject::cast(receiver), name, continue_search);
- case TRANSITION:
- case NONEXISTENT:
+PropertyAttributes JSReceiver::GetPropertyAttributes(LookupIterator* it) {
+ for (; it->IsFound(); it->Next()) {
+ switch (it->state()) {
+ case LookupIterator::NOT_FOUND:
UNREACHABLE();
+ case LookupIterator::JSPROXY:
+ return JSProxy::GetPropertyAttributesWithHandler(
+ it->GetJSProxy(), it->GetReceiver(), it->name());
+ case LookupIterator::INTERCEPTOR: {
+ Maybe<PropertyAttributes> result =
+ JSObject::GetPropertyAttributesWithInterceptor(
+ it->GetHolder(), it->GetReceiver(), it->name());
+ if (result.has_value) return result.value;
+ break;
+ }
+ case LookupIterator::ACCESS_CHECK:
+ if (it->HasAccess(v8::ACCESS_HAS)) break;
+ return JSObject::GetPropertyAttributesWithFailedAccessCheck(it);
+ case LookupIterator::PROPERTY:
+ if (it->HasProperty()) return it->property_details().attributes();
+ break;
}
}
return ABSENT;
}
-PropertyAttributes JSReceiver::GetLocalPropertyAttribute(Name* name) {
- // Check whether the name is an array index.
- uint32_t index = 0;
- if (IsJSObject() && name->AsArrayIndex(&index)) {
- return GetLocalElementAttribute(index);
- }
- // Named property.
- LookupResult lookup(GetIsolate());
- LocalLookup(name, &lookup, true);
- return GetPropertyAttributeForResult(this, &lookup, name, false);
-}
-
-
PropertyAttributes JSObject::GetElementAttributeWithReceiver(
- JSReceiver* receiver, uint32_t index, bool continue_search) {
- Isolate* isolate = GetIsolate();
+ Handle<JSObject> object,
+ Handle<JSReceiver> receiver,
+ uint32_t index,
+ bool check_prototype) {
+ Isolate* isolate = object->GetIsolate();
// Check access rights if needed.
- if (IsAccessCheckNeeded()) {
- if (!isolate->MayIndexedAccess(this, index, v8::ACCESS_HAS)) {
- isolate->ReportFailedAccessCheck(this, v8::ACCESS_HAS);
+ if (object->IsAccessCheckNeeded()) {
+ if (!isolate->MayIndexedAccess(object, index, v8::ACCESS_HAS)) {
+ isolate->ReportFailedAccessCheck(object, v8::ACCESS_HAS);
+ // TODO(yangguo): Issue 3269, check for scheduled exception missing?
return ABSENT;
}
}
- if (IsJSGlobalProxy()) {
- Object* proto = GetPrototype();
+ if (object->IsJSGlobalProxy()) {
+ Handle<Object> proto(object->GetPrototype(), isolate);
if (proto->IsNull()) return ABSENT;
ASSERT(proto->IsJSGlobalObject());
- return JSObject::cast(proto)->GetElementAttributeWithReceiver(
- receiver, index, continue_search);
+ return JSObject::GetElementAttributeWithReceiver(
+ Handle<JSObject>::cast(proto), receiver, index, check_prototype);
}
// Check for lookup interceptor except when bootstrapping.
- if (HasIndexedInterceptor() && !isolate->bootstrapper()->IsActive()) {
- return GetElementAttributeWithInterceptor(receiver, index, continue_search);
+ if (object->HasIndexedInterceptor() && !isolate->bootstrapper()->IsActive()) {
+ return JSObject::GetElementAttributeWithInterceptor(
+ object, receiver, index, check_prototype);
}
return GetElementAttributeWithoutInterceptor(
- receiver, index, continue_search);
+ object, receiver, index, check_prototype);
}
PropertyAttributes JSObject::GetElementAttributeWithInterceptor(
- JSReceiver* receiver, uint32_t index, bool continue_search) {
- Isolate* isolate = GetIsolate();
+ Handle<JSObject> object,
+ Handle<JSReceiver> receiver,
+ uint32_t index,
+ bool check_prototype) {
+ Isolate* isolate = object->GetIsolate();
HandleScope scope(isolate);
// Make sure that the top context does not change when doing
// callbacks or interceptor calls.
AssertNoContextChange ncc(isolate);
- Handle<InterceptorInfo> interceptor(GetIndexedInterceptor());
- Handle<JSReceiver> hreceiver(receiver);
- Handle<JSObject> holder(this);
- PropertyCallbackArguments args(isolate, interceptor->data(), receiver, this);
+ Handle<InterceptorInfo> interceptor(object->GetIndexedInterceptor());
+ PropertyCallbackArguments args(
+ isolate, interceptor->data(), *receiver, *object);
if (!interceptor->query()->IsUndefined()) {
v8::IndexedPropertyQueryCallback query =
v8::ToCData<v8::IndexedPropertyQueryCallback>(interceptor->query());
LOG(isolate,
- ApiIndexedPropertyAccess("interceptor-indexed-has", this, index));
+ ApiIndexedPropertyAccess("interceptor-indexed-has", *object, index));
v8::Handle<v8::Integer> result = args.Call(query, index);
if (!result.IsEmpty())
return static_cast<PropertyAttributes>(result->Int32Value());
@@ -4467,83 +4473,69 @@ PropertyAttributes JSObject::GetElementAttributeWithInterceptor(
v8::IndexedPropertyGetterCallback getter =
v8::ToCData<v8::IndexedPropertyGetterCallback>(interceptor->getter());
LOG(isolate,
- ApiIndexedPropertyAccess("interceptor-indexed-get-has", this, index));
+ ApiIndexedPropertyAccess(
+ "interceptor-indexed-get-has", *object, index));
v8::Handle<v8::Value> result = args.Call(getter, index);
if (!result.IsEmpty()) return NONE;
}
- return holder->GetElementAttributeWithoutInterceptor(
- *hreceiver, index, continue_search);
+ return GetElementAttributeWithoutInterceptor(
+ object, receiver, index, check_prototype);
}
PropertyAttributes JSObject::GetElementAttributeWithoutInterceptor(
- JSReceiver* receiver, uint32_t index, bool continue_search) {
- PropertyAttributes attr = GetElementsAccessor()->GetAttributes(
- receiver, this, index);
+ Handle<JSObject> object,
+ Handle<JSReceiver> receiver,
+ uint32_t index,
+ bool check_prototype) {
+ PropertyAttributes attr = object->GetElementsAccessor()->GetAttributes(
+ receiver, object, index);
if (attr != ABSENT) return attr;
// Handle [] on String objects.
- if (IsStringObjectWithCharacterAt(index)) {
+ if (object->IsStringObjectWithCharacterAt(index)) {
return static_cast<PropertyAttributes>(READ_ONLY | DONT_DELETE);
}
- if (!continue_search) return ABSENT;
+ if (!check_prototype) return ABSENT;
- Object* pt = GetPrototype();
- if (pt->IsJSProxy()) {
+ Handle<Object> proto(object->GetPrototype(), object->GetIsolate());
+ if (proto->IsJSProxy()) {
// We need to follow the spec and simulate a call to [[GetOwnProperty]].
- return JSProxy::cast(pt)->GetElementAttributeWithHandler(receiver, index);
+ return JSProxy::GetElementAttributeWithHandler(
+ Handle<JSProxy>::cast(proto), receiver, index);
}
- if (pt->IsNull()) return ABSENT;
- return JSObject::cast(pt)->GetElementAttributeWithReceiver(
- receiver, index, true);
+ if (proto->IsNull()) return ABSENT;
+ return GetElementAttributeWithReceiver(
+ Handle<JSObject>::cast(proto), receiver, index, true);
}
-Handle<Map> NormalizedMapCache::Get(Handle<NormalizedMapCache> cache,
- Handle<JSObject> obj,
- PropertyNormalizationMode mode) {
- int index = obj->map()->Hash() % kEntries;
- Handle<Object> result = handle(cache->get(index), cache->GetIsolate());
- if (result->IsMap() &&
- Handle<Map>::cast(result)->EquivalentToForNormalization(obj->map(),
- mode)) {
-#ifdef VERIFY_HEAP
- if (FLAG_verify_heap) {
- Handle<Map>::cast(result)->SharedMapVerify();
- }
-#endif
-#ifdef ENABLE_SLOW_ASSERTS
- if (FLAG_enable_slow_asserts) {
- // The cached map should match newly created normalized map bit-by-bit,
- // except for the code cache, which can contain some ics which can be
- // applied to the shared map.
- Handle<Map> fresh = Map::CopyNormalized(handle(obj->map()), mode,
- SHARED_NORMALIZED_MAP);
+Handle<NormalizedMapCache> NormalizedMapCache::New(Isolate* isolate) {
+ Handle<FixedArray> array(
+ isolate->factory()->NewFixedArray(kEntries, TENURED));
+ return Handle<NormalizedMapCache>::cast(array);
+}
- ASSERT(memcmp(fresh->address(),
- Handle<Map>::cast(result)->address(),
- Map::kCodeCacheOffset) == 0);
- STATIC_ASSERT(Map::kDependentCodeOffset ==
- Map::kCodeCacheOffset + kPointerSize);
- int offset = Map::kDependentCodeOffset + kPointerSize;
- ASSERT(memcmp(fresh->address() + offset,
- Handle<Map>::cast(result)->address() + offset,
- Map::kSize - offset) == 0);
- }
-#endif
- return Handle<Map>::cast(result);
+
+MaybeHandle<Map> NormalizedMapCache::Get(Handle<Map> fast_map,
+ PropertyNormalizationMode mode) {
+ DisallowHeapAllocation no_gc;
+ Object* value = FixedArray::get(GetIndex(fast_map));
+ if (!value->IsMap() ||
+ !Map::cast(value)->EquivalentToForNormalization(*fast_map, mode)) {
+ return MaybeHandle<Map>();
}
+ return handle(Map::cast(value));
+}
- Isolate* isolate = cache->GetIsolate();
- Handle<Map> map = Map::CopyNormalized(handle(obj->map()), mode,
- SHARED_NORMALIZED_MAP);
- ASSERT(map->is_dictionary_map());
- cache->set(index, *map);
- isolate->counters()->normalized_maps()->Increment();
- return map;
+void NormalizedMapCache::Set(Handle<Map> fast_map,
+ Handle<Map> normalized_map) {
+ DisallowHeapAllocation no_gc;
+ ASSERT(normalized_map->is_dictionary_map());
+ FixedArray::set(GetIndex(fast_map), *normalized_map);
}
@@ -4576,6 +4568,7 @@ void JSObject::NormalizeProperties(Handle<JSObject> object,
Isolate* isolate = object->GetIsolate();
HandleScope scope(isolate);
Handle<Map> map(object->map());
+ Handle<Map> new_map = Map::Normalize(map, mode);
// Allocate new content.
int real_size = map->NumberOfOwnDescriptors();
@@ -4586,7 +4579,7 @@ void JSObject::NormalizeProperties(Handle<JSObject> object,
property_count += 2; // Make space for two more properties.
}
Handle<NameDictionary> dictionary =
- isolate->factory()->NewNameDictionary(property_count);
+ NameDictionary::New(isolate, property_count);
Handle<DescriptorArray> descs(map->instance_descriptors());
for (int i = 0; i < real_size; i++) {
@@ -4597,16 +4590,17 @@ void JSObject::NormalizeProperties(Handle<JSObject> object,
Handle<Object> value(descs->GetConstant(i), isolate);
PropertyDetails d = PropertyDetails(
details.attributes(), NORMAL, i + 1);
- dictionary = NameDictionaryAdd(dictionary, key, value, d);
+ dictionary = NameDictionary::Add(dictionary, key, value, d);
break;
}
case FIELD: {
Handle<Name> key(descs->GetKey(i));
+ FieldIndex index = FieldIndex::ForDescriptor(*map, i);
Handle<Object> value(
- object->RawFastPropertyAt(descs->GetFieldIndex(i)), isolate);
+ object->RawFastPropertyAt(index), isolate);
PropertyDetails d =
PropertyDetails(details.attributes(), NORMAL, i + 1);
- dictionary = NameDictionaryAdd(dictionary, key, value, d);
+ dictionary = NameDictionary::Add(dictionary, key, value, d);
break;
}
case CALLBACKS: {
@@ -4614,14 +4608,13 @@ void JSObject::NormalizeProperties(Handle<JSObject> object,
Handle<Object> value(descs->GetCallbacksObject(i), isolate);
PropertyDetails d = PropertyDetails(
details.attributes(), CALLBACKS, i + 1);
- dictionary = NameDictionaryAdd(dictionary, key, value, d);
+ dictionary = NameDictionary::Add(dictionary, key, value, d);
break;
}
case INTERCEPTOR:
break;
case HANDLER:
case NORMAL:
- case TRANSITION:
case NONEXISTENT:
UNREACHABLE();
break;
@@ -4631,11 +4624,6 @@ void JSObject::NormalizeProperties(Handle<JSObject> object,
// Copy the next enumeration index from instance descriptor.
dictionary->SetNextEnumerationIndex(real_size + 1);
- Handle<NormalizedMapCache> cache(
- isolate->context()->native_context()->normalized_map_cache());
- Handle<Map> new_map = NormalizedMapCache::Get(cache, object, mode);
- ASSERT(new_map->is_dictionary_map());
-
// From here on we cannot fail and we shouldn't GC anymore.
DisallowHeapAllocation no_allocation;
@@ -4643,15 +4631,16 @@ void JSObject::NormalizeProperties(Handle<JSObject> object,
int new_instance_size = new_map->instance_size();
int instance_size_delta = map->instance_size() - new_instance_size;
ASSERT(instance_size_delta >= 0);
- isolate->heap()->CreateFillerObjectAt(object->address() + new_instance_size,
- instance_size_delta);
- if (Marking::IsBlack(Marking::MarkBitFrom(*object))) {
- MemoryChunk::IncrementLiveBytesFromMutator(object->address(),
- -instance_size_delta);
- }
+ Heap* heap = isolate->heap();
+ heap->CreateFillerObjectAt(object->address() + new_instance_size,
+ instance_size_delta);
+ heap->AdjustLiveBytes(object->address(),
+ -instance_size_delta,
+ Heap::FROM_MUTATOR);
- object->set_map(*new_map);
- map->NotifyLeafMapLayoutChange();
+ // We are storing the new map using release store after creating a filler for
+ // the left-over space to avoid races with the sweeper thread.
+ object->synchronized_set_map(*new_map);
object->set_properties(*dictionary);
@@ -4670,133 +4659,236 @@ void JSObject::TransformToFastProperties(Handle<JSObject> object,
int unused_property_fields) {
if (object->HasFastProperties()) return;
ASSERT(!object->IsGlobalObject());
- CALL_HEAP_FUNCTION_VOID(
- object->GetIsolate(),
- object->property_dictionary()->TransformPropertiesToFastFor(
- *object, unused_property_fields));
+ Isolate* isolate = object->GetIsolate();
+ Factory* factory = isolate->factory();
+ Handle<NameDictionary> dictionary(object->property_dictionary());
+
+ // Make sure we preserve dictionary representation if there are too many
+ // descriptors.
+ int number_of_elements = dictionary->NumberOfElements();
+ if (number_of_elements > kMaxNumberOfDescriptors) return;
+
+ if (number_of_elements != dictionary->NextEnumerationIndex()) {
+ NameDictionary::DoGenerateNewEnumerationIndices(dictionary);
+ }
+
+ int instance_descriptor_length = 0;
+ int number_of_fields = 0;
+
+ // Compute the length of the instance descriptor.
+ int capacity = dictionary->Capacity();
+ for (int i = 0; i < capacity; i++) {
+ Object* k = dictionary->KeyAt(i);
+ if (dictionary->IsKey(k)) {
+ Object* value = dictionary->ValueAt(i);
+ PropertyType type = dictionary->DetailsAt(i).type();
+ ASSERT(type != FIELD);
+ instance_descriptor_length++;
+ if (type == NORMAL && !value->IsJSFunction()) {
+ number_of_fields += 1;
+ }
+ }
+ }
+
+ int inobject_props = object->map()->inobject_properties();
+
+ // Allocate new map.
+ Handle<Map> new_map = Map::CopyDropDescriptors(handle(object->map()));
+ new_map->set_dictionary_map(false);
+
+ if (instance_descriptor_length == 0) {
+ DisallowHeapAllocation no_gc;
+ ASSERT_LE(unused_property_fields, inobject_props);
+ // Transform the object.
+ new_map->set_unused_property_fields(inobject_props);
+ object->set_map(*new_map);
+ object->set_properties(isolate->heap()->empty_fixed_array());
+ // Check that it really works.
+ ASSERT(object->HasFastProperties());
+ return;
+ }
+
+ // Allocate the instance descriptor.
+ Handle<DescriptorArray> descriptors = DescriptorArray::Allocate(
+ isolate, instance_descriptor_length);
+
+ int number_of_allocated_fields =
+ number_of_fields + unused_property_fields - inobject_props;
+ if (number_of_allocated_fields < 0) {
+ // There is enough inobject space for all fields (including unused).
+ number_of_allocated_fields = 0;
+ unused_property_fields = inobject_props - number_of_fields;
+ }
+
+ // Allocate the fixed array for the fields.
+ Handle<FixedArray> fields = factory->NewFixedArray(
+ number_of_allocated_fields);
+
+ // Fill in the instance descriptor and the fields.
+ int current_offset = 0;
+ for (int i = 0; i < capacity; i++) {
+ Object* k = dictionary->KeyAt(i);
+ if (dictionary->IsKey(k)) {
+ Object* value = dictionary->ValueAt(i);
+ Handle<Name> key;
+ if (k->IsSymbol()) {
+ key = handle(Symbol::cast(k));
+ } else {
+ // Ensure the key is a unique name before writing into the
+ // instance descriptor.
+ key = factory->InternalizeString(handle(String::cast(k)));
+ }
+
+ PropertyDetails details = dictionary->DetailsAt(i);
+ int enumeration_index = details.dictionary_index();
+ PropertyType type = details.type();
+
+ if (value->IsJSFunction()) {
+ ConstantDescriptor d(key,
+ handle(value, isolate),
+ details.attributes());
+ descriptors->Set(enumeration_index - 1, &d);
+ } else if (type == NORMAL) {
+ if (current_offset < inobject_props) {
+ object->InObjectPropertyAtPut(current_offset,
+ value,
+ UPDATE_WRITE_BARRIER);
+ } else {
+ int offset = current_offset - inobject_props;
+ fields->set(offset, value);
+ }
+ FieldDescriptor d(key,
+ current_offset++,
+ details.attributes(),
+ // TODO(verwaest): value->OptimalRepresentation();
+ Representation::Tagged());
+ descriptors->Set(enumeration_index - 1, &d);
+ } else if (type == CALLBACKS) {
+ CallbacksDescriptor d(key,
+ handle(value, isolate),
+ details.attributes());
+ descriptors->Set(enumeration_index - 1, &d);
+ } else {
+ UNREACHABLE();
+ }
+ }
+ }
+ ASSERT(current_offset == number_of_fields);
+
+ descriptors->Sort();
+
+ DisallowHeapAllocation no_gc;
+ new_map->InitializeDescriptors(*descriptors);
+ new_map->set_unused_property_fields(unused_property_fields);
+
+ // Transform the object.
+ object->set_map(*new_map);
+
+ object->set_properties(*fields);
+ ASSERT(object->IsJSObject());
+
+ // Check that it really works.
+ ASSERT(object->HasFastProperties());
}
-static MUST_USE_RESULT MaybeObject* CopyFastElementsToDictionary(
- Isolate* isolate,
- FixedArrayBase* array,
+void JSObject::ResetElements(Handle<JSObject> object) {
+ Heap* heap = object->GetIsolate()->heap();
+ CHECK(object->map() != heap->sloppy_arguments_elements_map());
+ object->set_elements(object->map()->GetInitialElements());
+}
+
+
+static Handle<SeededNumberDictionary> CopyFastElementsToDictionary(
+ Handle<FixedArrayBase> array,
int length,
- SeededNumberDictionary* dictionary) {
- Heap* heap = isolate->heap();
+ Handle<SeededNumberDictionary> dictionary) {
+ Isolate* isolate = array->GetIsolate();
+ Factory* factory = isolate->factory();
bool has_double_elements = array->IsFixedDoubleArray();
for (int i = 0; i < length; i++) {
- Object* value = NULL;
+ Handle<Object> value;
if (has_double_elements) {
- FixedDoubleArray* double_array = FixedDoubleArray::cast(array);
+ Handle<FixedDoubleArray> double_array =
+ Handle<FixedDoubleArray>::cast(array);
if (double_array->is_the_hole(i)) {
- value = isolate->heap()->the_hole_value();
+ value = factory->the_hole_value();
} else {
- // Objects must be allocated in the old object space, since the
- // overall number of HeapNumbers needed for the conversion might
- // exceed the capacity of new space, and we would fail repeatedly
- // trying to convert the FixedDoubleArray.
- MaybeObject* maybe_value_object =
- heap->AllocateHeapNumber(double_array->get_scalar(i), TENURED);
- if (!maybe_value_object->ToObject(&value)) return maybe_value_object;
+ value = factory->NewHeapNumber(double_array->get_scalar(i));
}
} else {
- value = FixedArray::cast(array)->get(i);
+ value = handle(Handle<FixedArray>::cast(array)->get(i), isolate);
}
if (!value->IsTheHole()) {
PropertyDetails details = PropertyDetails(NONE, NORMAL, 0);
- MaybeObject* maybe_result =
- dictionary->AddNumberEntry(i, value, details);
- if (!maybe_result->To(&dictionary)) return maybe_result;
+ dictionary =
+ SeededNumberDictionary::AddNumberEntry(dictionary, i, value, details);
}
}
return dictionary;
}
-static Handle<SeededNumberDictionary> CopyFastElementsToDictionary(
- Handle<FixedArrayBase> array,
- int length,
- Handle<SeededNumberDictionary> dict) {
- Isolate* isolate = array->GetIsolate();
- CALL_HEAP_FUNCTION(isolate,
- CopyFastElementsToDictionary(
- isolate, *array, length, *dict),
- SeededNumberDictionary);
-}
-
-
Handle<SeededNumberDictionary> JSObject::NormalizeElements(
Handle<JSObject> object) {
- CALL_HEAP_FUNCTION(object->GetIsolate(),
- object->NormalizeElements(),
- SeededNumberDictionary);
-}
-
-
-MaybeObject* JSObject::NormalizeElements() {
- ASSERT(!HasExternalArrayElements());
+ ASSERT(!object->HasExternalArrayElements() &&
+ !object->HasFixedTypedArrayElements());
+ Isolate* isolate = object->GetIsolate();
// Find the backing store.
- FixedArrayBase* array = FixedArrayBase::cast(elements());
- Map* old_map = array->map();
+ Handle<FixedArrayBase> array(FixedArrayBase::cast(object->elements()));
bool is_arguments =
- (old_map == old_map->GetHeap()->non_strict_arguments_elements_map());
+ (array->map() == isolate->heap()->sloppy_arguments_elements_map());
if (is_arguments) {
- array = FixedArrayBase::cast(FixedArray::cast(array)->get(1));
+ array = handle(FixedArrayBase::cast(
+ Handle<FixedArray>::cast(array)->get(1)));
}
- if (array->IsDictionary()) return array;
+ if (array->IsDictionary()) return Handle<SeededNumberDictionary>::cast(array);
- ASSERT(HasFastSmiOrObjectElements() ||
- HasFastDoubleElements() ||
- HasFastArgumentsElements());
+ ASSERT(object->HasFastSmiOrObjectElements() ||
+ object->HasFastDoubleElements() ||
+ object->HasFastArgumentsElements());
// Compute the effective length and allocate a new backing store.
- int length = IsJSArray()
- ? Smi::cast(JSArray::cast(this)->length())->value()
+ int length = object->IsJSArray()
+ ? Smi::cast(Handle<JSArray>::cast(object)->length())->value()
: array->length();
int old_capacity = 0;
int used_elements = 0;
- GetElementsCapacityAndUsage(&old_capacity, &used_elements);
- SeededNumberDictionary* dictionary;
- MaybeObject* maybe_dictionary =
- SeededNumberDictionary::Allocate(GetHeap(), used_elements);
- if (!maybe_dictionary->To(&dictionary)) return maybe_dictionary;
+ object->GetElementsCapacityAndUsage(&old_capacity, &used_elements);
+ Handle<SeededNumberDictionary> dictionary =
+ SeededNumberDictionary::New(isolate, used_elements);
- maybe_dictionary = CopyFastElementsToDictionary(
- GetIsolate(), array, length, dictionary);
- if (!maybe_dictionary->To(&dictionary)) return maybe_dictionary;
+ dictionary = CopyFastElementsToDictionary(array, length, dictionary);
// Switch to using the dictionary as the backing storage for elements.
if (is_arguments) {
- FixedArray::cast(elements())->set(1, dictionary);
+ FixedArray::cast(object->elements())->set(1, *dictionary);
} else {
// Set the new map first to satify the elements type assert in
// set_elements().
- Map* new_map;
- MaybeObject* maybe = GetElementsTransitionMap(GetIsolate(),
- DICTIONARY_ELEMENTS);
- if (!maybe->To(&new_map)) return maybe;
- set_map(new_map);
- set_elements(dictionary);
+ Handle<Map> new_map =
+ JSObject::GetElementsTransitionMap(object, DICTIONARY_ELEMENTS);
+
+ JSObject::MigrateToMap(object, new_map);
+ object->set_elements(*dictionary);
}
- old_map->GetHeap()->isolate()->counters()->elements_to_dictionary()->
- Increment();
+ isolate->counters()->elements_to_dictionary()->Increment();
#ifdef DEBUG
if (FLAG_trace_normalization) {
PrintF("Object elements have been normalized:\n");
- Print();
+ object->Print();
}
#endif
- ASSERT(HasDictionaryElements() || HasDictionaryArgumentsElements());
+ ASSERT(object->HasDictionaryElements() ||
+ object->HasDictionaryArgumentsElements());
return dictionary;
}
-Smi* JSReceiver::GenerateIdentityHash() {
- Isolate* isolate = GetIsolate();
-
+static Smi* GenerateIdentityHash(Isolate* isolate) {
int hash_value;
int attempts = 0;
do {
@@ -4812,33 +4904,51 @@ Smi* JSReceiver::GenerateIdentityHash() {
void JSObject::SetIdentityHash(Handle<JSObject> object, Handle<Smi> hash) {
+ ASSERT(!object->IsJSGlobalProxy());
Isolate* isolate = object->GetIsolate();
SetHiddenProperty(object, isolate->factory()->identity_hash_string(), hash);
}
+template<typename ProxyType>
+static Handle<Smi> GetOrCreateIdentityHashHelper(Handle<ProxyType> proxy) {
+ Isolate* isolate = proxy->GetIsolate();
+
+ Handle<Object> maybe_hash(proxy->hash(), isolate);
+ if (maybe_hash->IsSmi()) return Handle<Smi>::cast(maybe_hash);
+
+ Handle<Smi> hash(GenerateIdentityHash(isolate), isolate);
+ proxy->set_hash(*hash);
+ return hash;
+}
+
+
Object* JSObject::GetIdentityHash() {
- Object* stored_value = GetHiddenProperty(GetHeap()->identity_hash_string());
- return stored_value->IsSmi() ? stored_value : GetHeap()->undefined_value();
+ DisallowHeapAllocation no_gc;
+ Isolate* isolate = GetIsolate();
+ if (IsJSGlobalProxy()) {
+ return JSGlobalProxy::cast(this)->hash();
+ }
+ Object* stored_value =
+ GetHiddenProperty(isolate->factory()->identity_hash_string());
+ return stored_value->IsSmi()
+ ? stored_value
+ : isolate->heap()->undefined_value();
}
-Handle<Object> JSObject::GetOrCreateIdentityHash(Handle<JSObject> object) {
- Handle<Object> hash(object->GetIdentityHash(), object->GetIsolate());
- if (hash->IsSmi())
- return hash;
+Handle<Smi> JSObject::GetOrCreateIdentityHash(Handle<JSObject> object) {
+ if (object->IsJSGlobalProxy()) {
+ return GetOrCreateIdentityHashHelper(Handle<JSGlobalProxy>::cast(object));
+ }
Isolate* isolate = object->GetIsolate();
- hash = handle(object->GenerateIdentityHash(), isolate);
- Handle<Object> result = SetHiddenProperty(object,
- isolate->factory()->identity_hash_string(), hash);
-
- if (result->IsUndefined()) {
- // Trying to get hash of detached proxy.
- return handle(Smi::FromInt(0), isolate);
- }
+ Handle<Object> maybe_hash(object->GetIdentityHash(), isolate);
+ if (maybe_hash->IsSmi()) return Handle<Smi>::cast(maybe_hash);
+ Handle<Smi> hash(GenerateIdentityHash(isolate), isolate);
+ SetHiddenProperty(object, isolate->factory()->identity_hash_string(), hash);
return hash;
}
@@ -4848,22 +4958,17 @@ Object* JSProxy::GetIdentityHash() {
}
-Handle<Object> JSProxy::GetOrCreateIdentityHash(Handle<JSProxy> proxy) {
- Isolate* isolate = proxy->GetIsolate();
-
- Handle<Object> hash(proxy->GetIdentityHash(), isolate);
- if (hash->IsSmi())
- return hash;
-
- hash = handle(proxy->GenerateIdentityHash(), isolate);
- proxy->set_hash(*hash);
- return hash;
+Handle<Smi> JSProxy::GetOrCreateIdentityHash(Handle<JSProxy> proxy) {
+ return GetOrCreateIdentityHashHelper(proxy);
}
-Object* JSObject::GetHiddenProperty(Name* key) {
+Object* JSObject::GetHiddenProperty(Handle<Name> key) {
+ DisallowHeapAllocation no_gc;
ASSERT(key->IsUniqueName());
if (IsJSGlobalProxy()) {
+ // JSGlobalProxies store their hash internally.
+ ASSERT(*key != GetHeap()->identity_hash_string());
// For a proxy, use the prototype as target object.
Object* proxy_parent = GetPrototype();
// If the proxy is detached, return undefined.
@@ -4876,7 +4981,7 @@ Object* JSObject::GetHiddenProperty(Name* key) {
if (inline_value->IsSmi()) {
// Handle inline-stored identity hash.
- if (key == GetHeap()->identity_hash_string()) {
+ if (*key == GetHeap()->identity_hash_string()) {
return inline_value;
} else {
return GetHeap()->the_hole_value();
@@ -4898,6 +5003,8 @@ Handle<Object> JSObject::SetHiddenProperty(Handle<JSObject> object,
ASSERT(key->IsUniqueName());
if (object->IsJSGlobalProxy()) {
+ // JSGlobalProxies store their hash internally.
+ ASSERT(*key != *isolate->factory()->identity_hash_string());
// For a proxy, use the prototype as target object.
Handle<Object> proxy_parent(object->GetPrototype(), isolate);
// If the proxy is detached, return undefined.
@@ -4951,14 +5058,15 @@ void JSObject::DeleteHiddenProperty(Handle<JSObject> object, Handle<Name> key) {
if (inline_value->IsUndefined() || inline_value->IsSmi()) return;
Handle<ObjectHashTable> hashtable(ObjectHashTable::cast(inline_value));
- ObjectHashTable::Put(hashtable, key, isolate->factory()->the_hole_value());
+ bool was_present = false;
+ ObjectHashTable::Remove(hashtable, key, &was_present);
}
-bool JSObject::HasHiddenProperties() {
- return GetPropertyAttributePostInterceptor(this,
- GetHeap()->hidden_string(),
- false) != ABSENT;
+bool JSObject::HasHiddenProperties(Handle<JSObject> object) {
+ Handle<Name> hidden = object->GetIsolate()->factory()->hidden_string();
+ LookupIterator it(object, hidden, LookupIterator::CHECK_OWN_REAL);
+ return GetPropertyAttributes(&it) != ABSENT;
}
@@ -4977,8 +5085,9 @@ Object* JSObject::GetHiddenPropertiesHashTable() {
ASSERT(descriptors->GetType(sorted_index) == FIELD);
ASSERT(descriptors->GetDetails(sorted_index).representation().
IsCompatibleForLoad(Representation::Tagged()));
- return this->RawFastPropertyAt(
- descriptors->GetFieldIndex(sorted_index));
+ FieldIndex index = FieldIndex::ForDescriptor(this->map(),
+ sorted_index);
+ return this->RawFastPropertyAt(index);
} else {
return GetHeap()->undefined_value();
}
@@ -4986,13 +5095,16 @@ Object* JSObject::GetHiddenPropertiesHashTable() {
return GetHeap()->undefined_value();
}
} else {
- PropertyAttributes attributes;
- // You can't install a getter on a property indexed by the hidden string,
- // so we can be sure that GetLocalPropertyPostInterceptor returns a real
- // object.
- return GetLocalPropertyPostInterceptor(this,
- GetHeap()->hidden_string(),
- &attributes)->ToObjectUnchecked();
+ Isolate* isolate = GetIsolate();
+ LookupResult result(isolate);
+ LookupOwnRealNamedProperty(isolate->factory()->hidden_string(), &result);
+ if (result.IsFound()) {
+ ASSERT(result.IsNormal());
+ ASSERT(result.holder() == this);
+ Object* value = GetNormalizedProperty(&result);
+ if (!value->IsTheHole()) return value;
+ }
+ return GetHeap()->undefined_value();
}
}
@@ -5006,9 +5118,8 @@ Handle<ObjectHashTable> JSObject::GetOrCreateHiddenPropertiesHashtable(
return Handle<ObjectHashTable>::cast(inline_value);
}
- Handle<ObjectHashTable> hashtable = isolate->factory()->NewObjectHashTable(
- kInitialCapacity,
- USE_CUSTOM_MINIMUM_CAPACITY);
+ Handle<ObjectHashTable> hashtable = ObjectHashTable::New(
+ isolate, kInitialCapacity, USE_CUSTOM_MINIMUM_CAPACITY);
if (inline_value->IsSmi()) {
// We were storing the identity hash inline and now allocated an actual
@@ -5018,14 +5129,14 @@ Handle<ObjectHashTable> JSObject::GetOrCreateHiddenPropertiesHashtable(
inline_value);
}
- JSObject::SetLocalPropertyIgnoreAttributes(
+ JSObject::SetOwnPropertyIgnoreAttributes(
object,
isolate->factory()->hidden_string(),
hashtable,
DONT_ENUM,
OPTIMAL_REPRESENTATION,
ALLOW_AS_CONSTANT,
- OMIT_EXTENSIBILITY_CHECK);
+ OMIT_EXTENSIBILITY_CHECK).Assert();
return hashtable;
}
@@ -5039,7 +5150,7 @@ Handle<Object> JSObject::SetHiddenPropertiesHashTable(Handle<JSObject> object,
// We can store the identity hash inline iff there is no backing store
// for hidden properties yet.
- ASSERT(object->HasHiddenProperties() != value->IsSmi());
+ ASSERT(JSObject::HasHiddenProperties(object) != value->IsSmi());
if (object->HasFastProperties()) {
// If the object has fast properties, check whether the first slot
// in the descriptor array matches the hidden string. Since the
@@ -5050,21 +5161,19 @@ Handle<Object> JSObject::SetHiddenPropertiesHashTable(Handle<JSObject> object,
int sorted_index = descriptors->GetSortedKeyIndex(0);
if (descriptors->GetKey(sorted_index) == isolate->heap()->hidden_string()
&& sorted_index < object->map()->NumberOfOwnDescriptors()) {
- ASSERT(descriptors->GetType(sorted_index) == FIELD);
- object->FastPropertyAtPut(descriptors->GetFieldIndex(sorted_index),
- *value);
+ object->WriteToField(sorted_index, *value);
return object;
}
}
}
- SetLocalPropertyIgnoreAttributes(object,
- isolate->factory()->hidden_string(),
- value,
- DONT_ENUM,
- OPTIMAL_REPRESENTATION,
- ALLOW_AS_CONSTANT,
- OMIT_EXTENSIBILITY_CHECK);
+ SetOwnPropertyIgnoreAttributes(object,
+ isolate->factory()->hidden_string(),
+ value,
+ DONT_ENUM,
+ OPTIMAL_REPRESENTATION,
+ ALLOW_AS_CONSTANT,
+ OMIT_EXTENSIBILITY_CHECK).Assert();
return object;
}
@@ -5072,10 +5181,10 @@ Handle<Object> JSObject::SetHiddenPropertiesHashTable(Handle<JSObject> object,
Handle<Object> JSObject::DeletePropertyPostInterceptor(Handle<JSObject> object,
Handle<Name> name,
DeleteMode mode) {
- // Check local property, ignore interceptor.
+ // Check own property, ignore interceptor.
Isolate* isolate = object->GetIsolate();
LookupResult result(isolate);
- object->LocalLookupRealNamedProperty(*name, &result);
+ object->LookupOwnRealNamedProperty(name, &result);
if (!result.IsFound()) return isolate->factory()->true_value();
// Normalize object if needed.
@@ -5085,8 +5194,8 @@ Handle<Object> JSObject::DeletePropertyPostInterceptor(Handle<JSObject> object,
}
-Handle<Object> JSObject::DeletePropertyWithInterceptor(Handle<JSObject> object,
- Handle<Name> name) {
+MaybeHandle<Object> JSObject::DeletePropertyWithInterceptor(
+ Handle<JSObject> object, Handle<Name> name) {
Isolate* isolate = object->GetIsolate();
// TODO(rossberg): Support symbols in the API.
@@ -5102,7 +5211,7 @@ Handle<Object> JSObject::DeletePropertyWithInterceptor(Handle<JSObject> object,
isolate, interceptor->data(), *object, *object);
v8::Handle<v8::Boolean> result =
args.Call(deleter, v8::Utils::ToLocal(Handle<String>::cast(name)));
- RETURN_HANDLE_IF_SCHEDULED_EXCEPTION(isolate, Object);
+ RETURN_EXCEPTION_IF_SCHEDULED_EXCEPTION(isolate, Object);
if (!result.IsEmpty()) {
ASSERT(result->IsBoolean());
Handle<Object> result_internal = v8::Utils::OpenHandle(*result);
@@ -5113,25 +5222,13 @@ Handle<Object> JSObject::DeletePropertyWithInterceptor(Handle<JSObject> object,
}
Handle<Object> result =
DeletePropertyPostInterceptor(object, name, NORMAL_DELETION);
- RETURN_HANDLE_IF_SCHEDULED_EXCEPTION(isolate, Object);
return result;
}
-// TODO(mstarzinger): Temporary wrapper until handlified.
-static Handle<Object> AccessorDelete(Handle<JSObject> object,
- uint32_t index,
- JSObject::DeleteMode mode) {
- CALL_HEAP_FUNCTION(object->GetIsolate(),
- object->GetElementsAccessor()->Delete(*object,
- index,
- mode),
- Object);
-}
-
-
-Handle<Object> JSObject::DeleteElementWithInterceptor(Handle<JSObject> object,
- uint32_t index) {
+MaybeHandle<Object> JSObject::DeleteElementWithInterceptor(
+ Handle<JSObject> object,
+ uint32_t index) {
Isolate* isolate = object->GetIsolate();
Factory* factory = isolate->factory();
@@ -5148,7 +5245,7 @@ Handle<Object> JSObject::DeleteElementWithInterceptor(Handle<JSObject> object,
PropertyCallbackArguments args(
isolate, interceptor->data(), *object, *object);
v8::Handle<v8::Boolean> result = args.Call(deleter, index);
- RETURN_HANDLE_IF_SCHEDULED_EXCEPTION(isolate, Object);
+ RETURN_EXCEPTION_IF_SCHEDULED_EXCEPTION(isolate, Object);
if (!result.IsEmpty()) {
ASSERT(result->IsBoolean());
Handle<Object> result_internal = v8::Utils::OpenHandle(*result);
@@ -5156,23 +5253,23 @@ Handle<Object> JSObject::DeleteElementWithInterceptor(Handle<JSObject> object,
// Rebox CustomArguments::kReturnValueOffset before returning.
return handle(*result_internal, isolate);
}
- Handle<Object> delete_result = AccessorDelete(object, index, NORMAL_DELETION);
- RETURN_HANDLE_IF_SCHEDULED_EXCEPTION(isolate, Object);
+ MaybeHandle<Object> delete_result = object->GetElementsAccessor()->Delete(
+ object, index, NORMAL_DELETION);
return delete_result;
}
-Handle<Object> JSObject::DeleteElement(Handle<JSObject> object,
- uint32_t index,
- DeleteMode mode) {
+MaybeHandle<Object> JSObject::DeleteElement(Handle<JSObject> object,
+ uint32_t index,
+ DeleteMode mode) {
Isolate* isolate = object->GetIsolate();
Factory* factory = isolate->factory();
// Check access rights if needed.
if (object->IsAccessCheckNeeded() &&
- !isolate->MayIndexedAccess(*object, index, v8::ACCESS_DELETE)) {
- isolate->ReportFailedAccessCheck(*object, v8::ACCESS_DELETE);
- RETURN_HANDLE_IF_SCHEDULED_EXCEPTION(isolate, Object);
+ !isolate->MayIndexedAccess(object, index, v8::ACCESS_DELETE)) {
+ isolate->ReportFailedAccessCheck(object, v8::ACCESS_DELETE);
+ RETURN_EXCEPTION_IF_SCHEDULED_EXCEPTION(isolate, Object);
return factory->false_value();
}
@@ -5199,24 +5296,29 @@ Handle<Object> JSObject::DeleteElement(Handle<JSObject> object,
Handle<Object> old_value;
bool should_enqueue_change_record = false;
- if (FLAG_harmony_observation && object->map()->is_observed()) {
- should_enqueue_change_record = HasLocalElement(object, index);
+ if (object->map()->is_observed()) {
+ should_enqueue_change_record = HasOwnElement(object, index);
if (should_enqueue_change_record) {
- old_value = object->GetLocalElementAccessorPair(index) != NULL
- ? Handle<Object>::cast(factory->the_hole_value())
- : Object::GetElement(isolate, object, index);
+ if (!GetOwnElementAccessorPair(object, index).is_null()) {
+ old_value = Handle<Object>::cast(factory->the_hole_value());
+ } else {
+ old_value = Object::GetElement(
+ isolate, object, index).ToHandleChecked();
+ }
}
}
// Skip interceptor if forcing deletion.
- Handle<Object> result;
+ MaybeHandle<Object> maybe_result;
if (object->HasIndexedInterceptor() && mode != FORCE_DELETION) {
- result = DeleteElementWithInterceptor(object, index);
+ maybe_result = DeleteElementWithInterceptor(object, index);
} else {
- result = AccessorDelete(object, index, mode);
+ maybe_result = object->GetElementsAccessor()->Delete(object, index, mode);
}
+ Handle<Object> result;
+ ASSIGN_RETURN_ON_EXCEPTION(isolate, result, maybe_result, Object);
- if (should_enqueue_change_record && !HasLocalElement(object, index)) {
+ if (should_enqueue_change_record && !HasOwnElement(object, index)) {
Handle<String> name = factory->Uint32ToString(index);
EnqueueChangeRecord(object, "delete", name, old_value);
}
@@ -5225,18 +5327,18 @@ Handle<Object> JSObject::DeleteElement(Handle<JSObject> object,
}
-Handle<Object> JSObject::DeleteProperty(Handle<JSObject> object,
- Handle<Name> name,
- DeleteMode mode) {
+MaybeHandle<Object> JSObject::DeleteProperty(Handle<JSObject> object,
+ Handle<Name> name,
+ DeleteMode mode) {
Isolate* isolate = object->GetIsolate();
// ECMA-262, 3rd, 8.6.2.5
ASSERT(name->IsName());
// Check access rights if needed.
if (object->IsAccessCheckNeeded() &&
- !isolate->MayNamedAccess(*object, *name, v8::ACCESS_DELETE)) {
- isolate->ReportFailedAccessCheck(*object, v8::ACCESS_DELETE);
- RETURN_HANDLE_IF_SCHEDULED_EXCEPTION(isolate, Object);
+ !isolate->MayNamedAccess(object, name, v8::ACCESS_DELETE)) {
+ isolate->ReportFailedAccessCheck(object, v8::ACCESS_DELETE);
+ RETURN_EXCEPTION_IF_SCHEDULED_EXCEPTION(isolate, Object);
return isolate->factory()->false_value();
}
@@ -5254,7 +5356,7 @@ Handle<Object> JSObject::DeleteProperty(Handle<JSObject> object,
}
LookupResult lookup(isolate);
- object->LocalLookup(*name, &lookup, true);
+ object->LookupOwn(name, &lookup, true);
if (!lookup.IsFound()) return isolate->factory()->true_value();
// Ignore attributes if forcing a deletion.
if (lookup.IsDontDelete() && mode != FORCE_DELETION) {
@@ -5270,11 +5372,10 @@ Handle<Object> JSObject::DeleteProperty(Handle<JSObject> object,
}
Handle<Object> old_value = isolate->factory()->the_hole_value();
- bool is_observed = FLAG_harmony_observation &&
- object->map()->is_observed() &&
+ bool is_observed = object->map()->is_observed() &&
*name != isolate->heap()->hidden_string();
if (is_observed && lookup.IsDataProperty()) {
- old_value = Object::GetProperty(object, name);
+ old_value = Object::GetPropertyOrElement(object, name).ToHandleChecked();
}
Handle<Object> result;
@@ -5284,7 +5385,10 @@ Handle<Object> JSObject::DeleteProperty(Handle<JSObject> object,
if (mode == FORCE_DELETION) {
result = DeletePropertyPostInterceptor(object, name, mode);
} else {
- result = DeletePropertyWithInterceptor(object, name);
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, result,
+ DeletePropertyWithInterceptor(object, name),
+ Object);
}
} else {
// Normalize object if needed.
@@ -5293,7 +5397,7 @@ Handle<Object> JSObject::DeleteProperty(Handle<JSObject> object,
result = DeleteNormalizedProperty(object, name, mode);
}
- if (is_observed && !HasLocalProperty(object, name)) {
+ if (is_observed && !HasOwnProperty(object, name)) {
EnqueueChangeRecord(object, "delete", name, old_value);
}
@@ -5301,9 +5405,9 @@ Handle<Object> JSObject::DeleteProperty(Handle<JSObject> object,
}
-Handle<Object> JSReceiver::DeleteElement(Handle<JSReceiver> object,
- uint32_t index,
- DeleteMode mode) {
+MaybeHandle<Object> JSReceiver::DeleteElement(Handle<JSReceiver> object,
+ uint32_t index,
+ DeleteMode mode) {
if (object->IsJSProxy()) {
return JSProxy::DeleteElementWithHandler(
Handle<JSProxy>::cast(object), index, mode);
@@ -5312,9 +5416,9 @@ Handle<Object> JSReceiver::DeleteElement(Handle<JSReceiver> object,
}
-Handle<Object> JSReceiver::DeleteProperty(Handle<JSReceiver> object,
- Handle<Name> name,
- DeleteMode mode) {
+MaybeHandle<Object> JSReceiver::DeleteProperty(Handle<JSReceiver> object,
+ Handle<Name> name,
+ DeleteMode mode) {
if (object->IsJSProxy()) {
return JSProxy::DeletePropertyWithHandler(
Handle<JSProxy>::cast(object), name, mode);
@@ -5370,19 +5474,18 @@ bool JSObject::ReferencesObject(Object* obj) {
// Check if the object is among the indexed properties.
ElementsKind kind = GetElementsKind();
switch (kind) {
- case EXTERNAL_PIXEL_ELEMENTS:
- case EXTERNAL_BYTE_ELEMENTS:
- case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
- case EXTERNAL_SHORT_ELEMENTS:
- case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
- case EXTERNAL_INT_ELEMENTS:
- case EXTERNAL_UNSIGNED_INT_ELEMENTS:
- case EXTERNAL_FLOAT_ELEMENTS:
- case EXTERNAL_DOUBLE_ELEMENTS:
+ // Raw pixels and external arrays do not reference other
+ // objects.
+#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype, size) \
+ case EXTERNAL_##TYPE##_ELEMENTS: \
+ case TYPE##_ELEMENTS: \
+ break;
+
+ TYPED_ARRAYS(TYPED_ARRAY_CASE)
+#undef TYPED_ARRAY_CASE
+
case FAST_DOUBLE_ELEMENTS:
case FAST_HOLEY_DOUBLE_ELEMENTS:
- // Raw pixels and external arrays do not reference other
- // objects.
break;
case FAST_SMI_ELEMENTS:
case FAST_HOLEY_SMI_ELEMENTS:
@@ -5394,7 +5497,7 @@ bool JSObject::ReferencesObject(Object* obj) {
if (ReferencesObjectFromElements(elements, kind, obj)) return true;
break;
}
- case NON_STRICT_ARGUMENTS_ELEMENTS: {
+ case SLOPPY_ARGUMENTS_ELEMENTS: {
FixedArray* parameter_map = FixedArray::cast(elements());
// Check the mapped parameters.
int length = parameter_map->length();
@@ -5416,7 +5519,7 @@ bool JSObject::ReferencesObject(Object* obj) {
// Get the constructor function for arguments array.
JSObject* arguments_boilerplate =
heap->isolate()->context()->native_context()->
- arguments_boilerplate();
+ sloppy_arguments_boilerplate();
JSFunction* arguments_function =
JSFunction::cast(arguments_boilerplate->map()->constructor());
@@ -5445,6 +5548,12 @@ bool JSObject::ReferencesObject(Object* obj) {
// Check the context extension (if any) if it can have references.
if (context->has_extension() && !context->IsCatchContext()) {
+ // With harmony scoping, a JSFunction may have a global context.
+ // TODO(mvstanton): walk into the ScopeInfo.
+ if (FLAG_harmony_scoping && context->IsGlobalContext()) {
+ return false;
+ }
+
return JSObject::cast(context->extension())->ReferencesObject(obj);
}
}
@@ -5454,17 +5563,16 @@ bool JSObject::ReferencesObject(Object* obj) {
}
-Handle<Object> JSObject::PreventExtensions(Handle<JSObject> object) {
+MaybeHandle<Object> JSObject::PreventExtensions(Handle<JSObject> object) {
Isolate* isolate = object->GetIsolate();
if (!object->map()->is_extensible()) return object;
if (object->IsAccessCheckNeeded() &&
- !isolate->MayNamedAccess(*object,
- isolate->heap()->undefined_value(),
- v8::ACCESS_KEYS)) {
- isolate->ReportFailedAccessCheck(*object, v8::ACCESS_KEYS);
- RETURN_HANDLE_IF_SCHEDULED_EXCEPTION(isolate, Object);
+ !isolate->MayNamedAccess(
+ object, isolate->factory()->undefined_value(), v8::ACCESS_KEYS)) {
+ isolate->ReportFailedAccessCheck(object, v8::ACCESS_KEYS);
+ RETURN_EXCEPTION_IF_SCHEDULED_EXCEPTION(isolate, Object);
return isolate->factory()->false_value();
}
@@ -5476,13 +5584,13 @@ Handle<Object> JSObject::PreventExtensions(Handle<JSObject> object) {
}
// It's not possible to seal objects with external array elements
- if (object->HasExternalArrayElements()) {
+ if (object->HasExternalArrayElements() ||
+ object->HasFixedTypedArrayElements()) {
Handle<Object> error =
isolate->factory()->NewTypeError(
"cant_prevent_ext_external_array_elements",
HandleVector(&object, 1));
- isolate->Throw(*error);
- return Handle<Object>();
+ return isolate->Throw<Object>(error);
}
// If there are fast elements we normalize.
@@ -5499,10 +5607,10 @@ Handle<Object> JSObject::PreventExtensions(Handle<JSObject> object) {
Handle<Map> new_map = Map::Copy(handle(object->map()));
new_map->set_is_extensible(false);
- object->set_map(*new_map);
+ JSObject::MigrateToMap(object, new_map);
ASSERT(!object->map()->is_extensible());
- if (FLAG_harmony_observation && object->map()->is_observed()) {
+ if (object->map()->is_observed()) {
EnqueueChangeRecord(object, "preventExtensions", Handle<Name>(),
isolate->factory()->the_hole_value());
}
@@ -5519,8 +5627,11 @@ static void FreezeDictionary(Dictionary* dictionary) {
PropertyDetails details = dictionary->DetailsAt(i);
int attrs = DONT_DELETE;
// READ_ONLY is an invalid attribute for JS setters/getters.
- if (details.type() != CALLBACKS ||
- !dictionary->ValueAt(i)->IsAccessorPair()) {
+ if (details.type() == CALLBACKS) {
+ Object* v = dictionary->ValueAt(i);
+ if (v->IsPropertyCell()) v = PropertyCell::cast(v)->value();
+ if (!v->IsAccessorPair()) attrs |= READ_ONLY;
+ } else {
attrs |= READ_ONLY;
}
details = details.CopyAddAttributes(
@@ -5531,20 +5642,19 @@ static void FreezeDictionary(Dictionary* dictionary) {
}
-Handle<Object> JSObject::Freeze(Handle<JSObject> object) {
- // Freezing non-strict arguments should be handled elsewhere.
- ASSERT(!object->HasNonStrictArgumentsElements());
+MaybeHandle<Object> JSObject::Freeze(Handle<JSObject> object) {
+ // Freezing sloppy arguments should be handled elsewhere.
+ ASSERT(!object->HasSloppyArgumentsElements());
ASSERT(!object->map()->is_observed());
if (object->map()->is_frozen()) return object;
Isolate* isolate = object->GetIsolate();
if (object->IsAccessCheckNeeded() &&
- !isolate->MayNamedAccess(*object,
- isolate->heap()->undefined_value(),
- v8::ACCESS_KEYS)) {
- isolate->ReportFailedAccessCheck(*object, v8::ACCESS_KEYS);
- RETURN_HANDLE_IF_SCHEDULED_EXCEPTION(isolate, Object);
+ !isolate->MayNamedAccess(
+ object, isolate->factory()->undefined_value(), v8::ACCESS_KEYS)) {
+ isolate->ReportFailedAccessCheck(object, v8::ACCESS_KEYS);
+ RETURN_EXCEPTION_IF_SCHEDULED_EXCEPTION(isolate, Object);
return isolate->factory()->false_value();
}
@@ -5556,13 +5666,13 @@ Handle<Object> JSObject::Freeze(Handle<JSObject> object) {
}
// It's not possible to freeze objects with external array elements
- if (object->HasExternalArrayElements()) {
+ if (object->HasExternalArrayElements() ||
+ object->HasFixedTypedArrayElements()) {
Handle<Object> error =
isolate->factory()->NewTypeError(
"cant_prevent_ext_external_array_elements",
HandleVector(&object, 1));
- isolate->Throw(*error);
- return Handle<Object>();
+ return isolate->Throw<Object>(error);
}
Handle<SeededNumberDictionary> new_element_dictionary;
@@ -5574,8 +5684,7 @@ Handle<Object> JSObject::Freeze(Handle<JSObject> object) {
int capacity = 0;
int used = 0;
object->GetElementsCapacityAndUsage(&capacity, &used);
- new_element_dictionary =
- isolate->factory()->NewSeededNumberDictionary(used);
+ new_element_dictionary = SeededNumberDictionary::New(isolate, used);
// Move elements to a dictionary; avoid calling NormalizeElements to avoid
// unnecessary transitions.
@@ -5588,28 +5697,19 @@ Handle<Object> JSObject::Freeze(Handle<JSObject> object) {
}
}
- LookupResult result(isolate);
- Handle<Map> old_map(object->map());
- old_map->LookupTransition(*object, isolate->heap()->frozen_symbol(), &result);
- if (result.IsTransition()) {
- Map* transition_map = result.GetTransitionTarget();
+ Handle<Map> old_map(object->map(), isolate);
+ int transition_index = old_map->SearchTransition(
+ isolate->heap()->frozen_symbol());
+ if (transition_index != TransitionArray::kNotFound) {
+ Handle<Map> transition_map(old_map->GetTransition(transition_index));
ASSERT(transition_map->has_dictionary_elements());
ASSERT(transition_map->is_frozen());
ASSERT(!transition_map->is_extensible());
- object->set_map(transition_map);
+ JSObject::MigrateToMap(object, transition_map);
} else if (object->HasFastProperties() && old_map->CanHaveMoreTransitions()) {
// Create a new descriptor array with fully-frozen properties
- int num_descriptors = old_map->NumberOfOwnDescriptors();
- Handle<DescriptorArray> new_descriptors =
- DescriptorArray::CopyUpToAddAttributes(
- handle(old_map->instance_descriptors()), num_descriptors, FROZEN);
- Handle<Map> new_map = Map::CopyReplaceDescriptors(
- old_map, new_descriptors, INSERT_TRANSITION,
- isolate->factory()->frozen_symbol());
- new_map->freeze();
- new_map->set_is_extensible(false);
- new_map->set_elements_kind(DICTIONARY_ELEMENTS);
- object->set_map(*new_map);
+ Handle<Map> new_map = Map::CopyForFreeze(old_map);
+ JSObject::MigrateToMap(object, new_map);
} else {
// Slow path: need to normalize properties for safety
NormalizeProperties(object, CLEAR_INOBJECT_PROPERTIES, 0);
@@ -5620,7 +5720,7 @@ Handle<Object> JSObject::Freeze(Handle<JSObject> object) {
new_map->freeze();
new_map->set_is_extensible(false);
new_map->set_elements_kind(DICTIONARY_ELEMENTS);
- object->set_map(*new_map);
+ JSObject::MigrateToMap(object, new_map);
// Freeze dictionary-mode properties
FreezeDictionary(object->property_dictionary());
@@ -5644,34 +5744,33 @@ Handle<Object> JSObject::Freeze(Handle<JSObject> object) {
void JSObject::SetObserved(Handle<JSObject> object) {
+ ASSERT(!object->IsJSGlobalProxy());
+ ASSERT(!object->IsJSGlobalObject());
Isolate* isolate = object->GetIsolate();
-
- if (object->map()->is_observed())
- return;
-
- LookupResult result(isolate);
- object->map()->LookupTransition(*object,
- isolate->heap()->observed_symbol(),
- &result);
-
Handle<Map> new_map;
- if (result.IsTransition()) {
- new_map = handle(result.GetTransitionTarget());
+ Handle<Map> old_map(object->map(), isolate);
+ ASSERT(!old_map->is_observed());
+ int transition_index = old_map->SearchTransition(
+ isolate->heap()->observed_symbol());
+ if (transition_index != TransitionArray::kNotFound) {
+ new_map = handle(old_map->GetTransition(transition_index), isolate);
ASSERT(new_map->is_observed());
- } else if (object->map()->CanHaveMoreTransitions()) {
- new_map = Map::CopyForObserved(handle(object->map()));
+ } else if (object->HasFastProperties() && old_map->CanHaveMoreTransitions()) {
+ new_map = Map::CopyForObserved(old_map);
} else {
- new_map = Map::Copy(handle(object->map()));
+ new_map = Map::Copy(old_map);
new_map->set_is_observed();
}
- object->set_map(*new_map);
+ JSObject::MigrateToMap(object, new_map);
}
-Handle<JSObject> JSObject::Copy(Handle<JSObject> object) {
+Handle<Object> JSObject::FastPropertyAt(Handle<JSObject> object,
+ Representation representation,
+ FieldIndex index) {
Isolate* isolate = object->GetIsolate();
- CALL_HEAP_FUNCTION(isolate,
- isolate->heap()->CopyJSObject(*object), JSObject);
+ Handle<Object> raw_value(object->RawFastPropertyAt(index), isolate);
+ return Object::NewStorageFor(isolate, raw_value, representation);
}
@@ -5684,13 +5783,14 @@ class JSObjectWalkVisitor {
copying_(copying),
hints_(hints) {}
- Handle<JSObject> StructureWalk(Handle<JSObject> object);
+ MUST_USE_RESULT MaybeHandle<JSObject> StructureWalk(Handle<JSObject> object);
protected:
- inline Handle<JSObject> VisitElementOrProperty(Handle<JSObject> object,
- Handle<JSObject> value) {
+ MUST_USE_RESULT inline MaybeHandle<JSObject> VisitElementOrProperty(
+ Handle<JSObject> object,
+ Handle<JSObject> value) {
Handle<AllocationSite> current_site = site_context()->EnterNewScope();
- Handle<JSObject> copy_of_value = StructureWalk(value);
+ MaybeHandle<JSObject> copy_of_value = StructureWalk(value);
site_context()->ExitScope(current_site, value);
return copy_of_value;
}
@@ -5708,7 +5808,7 @@ class JSObjectWalkVisitor {
template <class ContextObject>
-Handle<JSObject> JSObjectWalkVisitor<ContextObject>::StructureWalk(
+MaybeHandle<JSObject> JSObjectWalkVisitor<ContextObject>::StructureWalk(
Handle<JSObject> object) {
Isolate* isolate = this->isolate();
bool copying = this->copying();
@@ -5719,7 +5819,7 @@ Handle<JSObject> JSObjectWalkVisitor<ContextObject>::StructureWalk(
if (check.HasOverflowed()) {
isolate->StackOverflow();
- return Handle<JSObject>::null();
+ return MaybeHandle<JSObject>();
}
}
@@ -5733,14 +5833,8 @@ Handle<JSObject> JSObjectWalkVisitor<ContextObject>::StructureWalk(
if (site_context()->ShouldCreateMemento(object)) {
site_to_pass = site_context()->current();
}
- CALL_AND_RETRY_OR_DIE(isolate,
- isolate->heap()->CopyJSObject(*object,
- site_to_pass.is_null() ? NULL : *site_to_pass),
- { copy = Handle<JSObject>(JSObject::cast(__object__),
- isolate);
- break;
- },
- return Handle<JSObject>());
+ copy = isolate->factory()->CopyJSObjectWithAllocationSite(
+ object, site_to_pass);
} else {
copy = object;
}
@@ -5757,21 +5851,23 @@ Handle<JSObject> JSObjectWalkVisitor<ContextObject>::StructureWalk(
if (!shallow) {
HandleScope scope(isolate);
- // Deep copy local properties.
+ // Deep copy own properties.
if (copy->HasFastProperties()) {
Handle<DescriptorArray> descriptors(copy->map()->instance_descriptors());
int limit = copy->map()->NumberOfOwnDescriptors();
for (int i = 0; i < limit; i++) {
PropertyDetails details = descriptors->GetDetails(i);
if (details.type() != FIELD) continue;
- int index = descriptors->GetFieldIndex(i);
+ FieldIndex index = FieldIndex::ForDescriptor(copy->map(), i);
Handle<Object> value(object->RawFastPropertyAt(index), isolate);
if (value->IsJSObject()) {
- value = VisitElementOrProperty(copy, Handle<JSObject>::cast(value));
- RETURN_IF_EMPTY_HANDLE_VALUE(isolate, value, Handle<JSObject>());
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, value,
+ VisitElementOrProperty(copy, Handle<JSObject>::cast(value)),
+ JSObject);
} else {
Representation representation = details.representation();
- value = NewStorageFor(isolate, value, representation);
+ value = Object::NewStorageFor(isolate, value, representation);
}
if (copying) {
copy->FastPropertyAtPut(index, *value);
@@ -5779,34 +5875,35 @@ Handle<JSObject> JSObjectWalkVisitor<ContextObject>::StructureWalk(
}
} else {
Handle<FixedArray> names =
- isolate->factory()->NewFixedArray(copy->NumberOfLocalProperties());
- copy->GetLocalPropertyNames(*names, 0);
+ isolate->factory()->NewFixedArray(copy->NumberOfOwnProperties());
+ copy->GetOwnPropertyNames(*names, 0);
for (int i = 0; i < names->length(); i++) {
ASSERT(names->get(i)->IsString());
Handle<String> key_string(String::cast(names->get(i)));
PropertyAttributes attributes =
- copy->GetLocalPropertyAttribute(*key_string);
+ JSReceiver::GetOwnPropertyAttributes(copy, key_string);
// Only deep copy fields from the object literal expression.
// In particular, don't try to copy the length attribute of
// an array.
if (attributes != NONE) continue;
- Handle<Object> value(
- copy->GetProperty(*key_string, &attributes)->ToObjectUnchecked(),
- isolate);
+ Handle<Object> value =
+ Object::GetProperty(copy, key_string).ToHandleChecked();
if (value->IsJSObject()) {
- Handle<JSObject> result = VisitElementOrProperty(
- copy, Handle<JSObject>::cast(value));
- RETURN_IF_EMPTY_HANDLE_VALUE(isolate, result, Handle<JSObject>());
+ Handle<JSObject> result;
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, result,
+ VisitElementOrProperty(copy, Handle<JSObject>::cast(value)),
+ JSObject);
if (copying) {
// Creating object copy for literals. No strict mode needed.
- CHECK_NOT_EMPTY_HANDLE(isolate, JSObject::SetProperty(
- copy, key_string, result, NONE, kNonStrictMode));
+ JSObject::SetProperty(
+ copy, key_string, result, NONE, SLOPPY).Assert();
}
}
}
}
- // Deep copy local elements.
+ // Deep copy own elements.
// Pixel elements cannot be created using an object literal.
ASSERT(!copy->HasExternalArrayElements());
switch (kind) {
@@ -5828,9 +5925,11 @@ Handle<JSObject> JSObjectWalkVisitor<ContextObject>::StructureWalk(
value->IsTheHole() ||
(IsFastObjectElementsKind(copy->GetElementsKind())));
if (value->IsJSObject()) {
- Handle<JSObject> result = VisitElementOrProperty(
- copy, Handle<JSObject>::cast(value));
- RETURN_IF_EMPTY_HANDLE_VALUE(isolate, result, Handle<JSObject>());
+ Handle<JSObject> result;
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, result,
+ VisitElementOrProperty(copy, Handle<JSObject>::cast(value)),
+ JSObject);
if (copying) {
elements->set(i, *result);
}
@@ -5848,9 +5947,11 @@ Handle<JSObject> JSObjectWalkVisitor<ContextObject>::StructureWalk(
if (element_dictionary->IsKey(k)) {
Handle<Object> value(element_dictionary->ValueAt(i), isolate);
if (value->IsJSObject()) {
- Handle<JSObject> result = VisitElementOrProperty(
- copy, Handle<JSObject>::cast(value));
- RETURN_IF_EMPTY_HANDLE_VALUE(isolate, result, Handle<JSObject>());
+ Handle<JSObject> result;
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, result,
+ VisitElementOrProperty(copy, Handle<JSObject>::cast(value)),
+ JSObject);
if (copying) {
element_dictionary->ValueAtPut(i, *result);
}
@@ -5859,18 +5960,18 @@ Handle<JSObject> JSObjectWalkVisitor<ContextObject>::StructureWalk(
}
break;
}
- case NON_STRICT_ARGUMENTS_ELEMENTS:
+ case SLOPPY_ARGUMENTS_ELEMENTS:
UNIMPLEMENTED();
break;
- case EXTERNAL_PIXEL_ELEMENTS:
- case EXTERNAL_BYTE_ELEMENTS:
- case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
- case EXTERNAL_SHORT_ELEMENTS:
- case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
- case EXTERNAL_INT_ELEMENTS:
- case EXTERNAL_UNSIGNED_INT_ELEMENTS:
- case EXTERNAL_FLOAT_ELEMENTS:
- case EXTERNAL_DOUBLE_ELEMENTS:
+
+
+#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype, size) \
+ case EXTERNAL_##TYPE##_ELEMENTS: \
+ case TYPE##_ELEMENTS: \
+
+ TYPED_ARRAYS(TYPED_ARRAY_CASE)
+#undef TYPED_ARRAY_CASE
+
case FAST_DOUBLE_ELEMENTS:
case FAST_HOLEY_DOUBLE_ELEMENTS:
// No contained objects, nothing to do.
@@ -5882,27 +5983,65 @@ Handle<JSObject> JSObjectWalkVisitor<ContextObject>::StructureWalk(
}
-Handle<JSObject> JSObject::DeepWalk(
+MaybeHandle<JSObject> JSObject::DeepWalk(
Handle<JSObject> object,
AllocationSiteCreationContext* site_context) {
JSObjectWalkVisitor<AllocationSiteCreationContext> v(site_context, false,
kNoHints);
- Handle<JSObject> result = v.StructureWalk(object);
- ASSERT(result.is_null() || result.is_identical_to(object));
+ MaybeHandle<JSObject> result = v.StructureWalk(object);
+ Handle<JSObject> for_assert;
+ ASSERT(!result.ToHandle(&for_assert) || for_assert.is_identical_to(object));
return result;
}
-Handle<JSObject> JSObject::DeepCopy(Handle<JSObject> object,
- AllocationSiteUsageContext* site_context,
- DeepCopyHints hints) {
+MaybeHandle<JSObject> JSObject::DeepCopy(
+ Handle<JSObject> object,
+ AllocationSiteUsageContext* site_context,
+ DeepCopyHints hints) {
JSObjectWalkVisitor<AllocationSiteUsageContext> v(site_context, true, hints);
- Handle<JSObject> copy = v.StructureWalk(object);
- ASSERT(!copy.is_identical_to(object));
+ MaybeHandle<JSObject> copy = v.StructureWalk(object);
+ Handle<JSObject> for_assert;
+ ASSERT(!copy.ToHandle(&for_assert) || !for_assert.is_identical_to(object));
return copy;
}
+Handle<Object> JSObject::GetDataProperty(Handle<JSObject> object,
+ Handle<Name> key) {
+ Isolate* isolate = object->GetIsolate();
+ LookupResult lookup(isolate);
+ {
+ DisallowHeapAllocation no_allocation;
+ object->LookupRealNamedProperty(key, &lookup);
+ }
+ Handle<Object> result = isolate->factory()->undefined_value();
+ if (lookup.IsFound() && !lookup.IsTransition()) {
+ switch (lookup.type()) {
+ case NORMAL:
+ result = GetNormalizedProperty(
+ Handle<JSObject>(lookup.holder(), isolate), &lookup);
+ break;
+ case FIELD:
+ result = FastPropertyAt(Handle<JSObject>(lookup.holder(), isolate),
+ lookup.representation(),
+ lookup.GetFieldIndex());
+ break;
+ case CONSTANT:
+ result = Handle<Object>(lookup.GetConstant(), isolate);
+ break;
+ case CALLBACKS:
+ case HANDLER:
+ case INTERCEPTOR:
+ break;
+ case NONEXISTENT:
+ UNREACHABLE();
+ }
+ }
+ return result;
+}
+
+
// Tests for the fast common case for property enumeration:
// - This object and all prototypes has an enum cache (which means that
// it is no proxy, has no interceptors and needs no access checks).
@@ -5917,9 +6056,9 @@ bool JSReceiver::IsSimpleEnum() {
JSObject* curr = JSObject::cast(o);
int enum_length = curr->map()->EnumLength();
if (enum_length == kInvalidEnumCacheSentinel) return false;
+ if (curr->IsAccessCheckNeeded()) return false;
ASSERT(!curr->HasNamedInterceptor());
ASSERT(!curr->HasIndexedInterceptor());
- ASSERT(!curr->IsAccessCheckNeeded());
if (curr->NumberOfEnumElements() > 0) return false;
if (curr != this && enum_length != 0) return false;
}
@@ -5927,6 +6066,24 @@ bool JSReceiver::IsSimpleEnum() {
}
+static bool FilterKey(Object* key, PropertyAttributes filter) {
+ if ((filter & SYMBOLIC) && key->IsSymbol()) {
+ return true;
+ }
+
+ if ((filter & PRIVATE_SYMBOL) &&
+ key->IsSymbol() && Symbol::cast(key)->is_private()) {
+ return true;
+ }
+
+ if ((filter & STRING) && !key->IsSymbol()) {
+ return true;
+ }
+
+ return false;
+}
+
+
int Map::NumberOfDescribedProperties(DescriptorFlag which,
PropertyAttributes filter) {
int result = 0;
@@ -5936,7 +6093,7 @@ int Map::NumberOfDescribedProperties(DescriptorFlag which,
: NumberOfOwnDescriptors();
for (int i = 0; i < limit; i++) {
if ((descs->GetDetails(i).attributes() & filter) == 0 &&
- ((filter & SYMBOLIC) == 0 || !descs->GetKey(i)->IsSymbol())) {
+ !FilterKey(descs->GetKey(i), filter)) {
result++;
}
}
@@ -5958,29 +6115,16 @@ int Map::NextFreePropertyIndex() {
}
-AccessorDescriptor* Map::FindAccessor(Name* name) {
- DescriptorArray* descs = instance_descriptors();
- int number_of_own_descriptors = NumberOfOwnDescriptors();
- for (int i = 0; i < number_of_own_descriptors; i++) {
- if (descs->GetType(i) == CALLBACKS && name->Equals(descs->GetKey(i))) {
- return descs->GetCallbacks(i);
- }
- }
- return NULL;
-}
-
-
-void JSReceiver::LocalLookup(
- Name* name, LookupResult* result, bool search_hidden_prototypes) {
+void JSReceiver::LookupOwn(
+ Handle<Name> name, LookupResult* result, bool search_hidden_prototypes) {
+ DisallowHeapAllocation no_gc;
ASSERT(name->IsName());
- Heap* heap = GetHeap();
-
if (IsJSGlobalProxy()) {
Object* proto = GetPrototype();
if (proto->IsNull()) return result->NotFound();
ASSERT(proto->IsJSGlobalObject());
- return JSReceiver::cast(proto)->LocalLookup(
+ return JSReceiver::cast(proto)->LookupOwn(
name, result, search_hidden_prototypes);
}
@@ -5999,46 +6143,269 @@ void JSReceiver::LocalLookup(
// Check for lookup interceptor except when bootstrapping.
if (js_object->HasNamedInterceptor() &&
- !heap->isolate()->bootstrapper()->IsActive()) {
+ !GetIsolate()->bootstrapper()->IsActive()) {
result->InterceptorResult(js_object);
return;
}
- js_object->LocalLookupRealNamedProperty(name, result);
+ js_object->LookupOwnRealNamedProperty(name, result);
if (result->IsFound() || !search_hidden_prototypes) return;
Object* proto = js_object->GetPrototype();
if (!proto->IsJSReceiver()) return;
JSReceiver* receiver = JSReceiver::cast(proto);
if (receiver->map()->is_hidden_prototype()) {
- receiver->LocalLookup(name, result, search_hidden_prototypes);
+ receiver->LookupOwn(name, result, search_hidden_prototypes);
}
}
-void JSReceiver::Lookup(Name* name, LookupResult* result) {
+void JSReceiver::Lookup(Handle<Name> name, LookupResult* result) {
+ DisallowHeapAllocation no_gc;
// Ecma-262 3rd 8.6.2.4
- Heap* heap = GetHeap();
+ Handle<Object> null_value = GetIsolate()->factory()->null_value();
for (Object* current = this;
- current != heap->null_value();
+ current != *null_value;
current = JSObject::cast(current)->GetPrototype()) {
- JSReceiver::cast(current)->LocalLookup(name, result, false);
+ JSReceiver::cast(current)->LookupOwn(name, result, false);
if (result->IsFound()) return;
}
result->NotFound();
}
-// Search object and its prototype chain for callback properties.
-void JSObject::LookupCallbackProperty(Name* name, LookupResult* result) {
- Heap* heap = GetHeap();
- for (Object* current = this;
- current != heap->null_value() && current->IsJSObject();
- current = JSObject::cast(current)->GetPrototype()) {
- JSObject::cast(current)->LocalLookupRealNamedProperty(name, result);
- if (result->IsPropertyCallbacks()) return;
+static bool ContainsOnlyValidKeys(Handle<FixedArray> array) {
+ int len = array->length();
+ for (int i = 0; i < len; i++) {
+ Object* e = array->get(i);
+ if (!(e->IsString() || e->IsNumber())) return false;
}
- result->NotFound();
+ return true;
+}
+
+
+static Handle<FixedArray> ReduceFixedArrayTo(
+ Handle<FixedArray> array, int length) {
+ ASSERT(array->length() >= length);
+ if (array->length() == length) return array;
+
+ Handle<FixedArray> new_array =
+ array->GetIsolate()->factory()->NewFixedArray(length);
+ for (int i = 0; i < length; ++i) new_array->set(i, array->get(i));
+ return new_array;
+}
+
+
+static Handle<FixedArray> GetEnumPropertyKeys(Handle<JSObject> object,
+ bool cache_result) {
+ Isolate* isolate = object->GetIsolate();
+ if (object->HasFastProperties()) {
+ int own_property_count = object->map()->EnumLength();
+ // If the enum length of the given map is set to kInvalidEnumCache, this
+ // means that the map itself has never used the present enum cache. The
+ // first step to using the cache is to set the enum length of the map by
+ // counting the number of own descriptors that are not DONT_ENUM or
+ // SYMBOLIC.
+ if (own_property_count == kInvalidEnumCacheSentinel) {
+ own_property_count = object->map()->NumberOfDescribedProperties(
+ OWN_DESCRIPTORS, DONT_SHOW);
+ } else {
+ ASSERT(own_property_count == object->map()->NumberOfDescribedProperties(
+ OWN_DESCRIPTORS, DONT_SHOW));
+ }
+
+ if (object->map()->instance_descriptors()->HasEnumCache()) {
+ DescriptorArray* desc = object->map()->instance_descriptors();
+ Handle<FixedArray> keys(desc->GetEnumCache(), isolate);
+
+ // In case the number of properties required in the enum are actually
+ // present, we can reuse the enum cache. Otherwise, this means that the
+ // enum cache was generated for a previous (smaller) version of the
+ // Descriptor Array. In that case we regenerate the enum cache.
+ if (own_property_count <= keys->length()) {
+ if (cache_result) object->map()->SetEnumLength(own_property_count);
+ isolate->counters()->enum_cache_hits()->Increment();
+ return ReduceFixedArrayTo(keys, own_property_count);
+ }
+ }
+
+ Handle<Map> map(object->map());
+
+ if (map->instance_descriptors()->IsEmpty()) {
+ isolate->counters()->enum_cache_hits()->Increment();
+ if (cache_result) map->SetEnumLength(0);
+ return isolate->factory()->empty_fixed_array();
+ }
+
+ isolate->counters()->enum_cache_misses()->Increment();
+
+ Handle<FixedArray> storage = isolate->factory()->NewFixedArray(
+ own_property_count);
+ Handle<FixedArray> indices = isolate->factory()->NewFixedArray(
+ own_property_count);
+
+ Handle<DescriptorArray> descs =
+ Handle<DescriptorArray>(object->map()->instance_descriptors(), isolate);
+
+ int size = map->NumberOfOwnDescriptors();
+ int index = 0;
+
+ for (int i = 0; i < size; i++) {
+ PropertyDetails details = descs->GetDetails(i);
+ Object* key = descs->GetKey(i);
+ if (!(details.IsDontEnum() || key->IsSymbol())) {
+ storage->set(index, key);
+ if (!indices.is_null()) {
+ if (details.type() != FIELD) {
+ indices = Handle<FixedArray>();
+ } else {
+ FieldIndex field_index = FieldIndex::ForDescriptor(*map, i);
+ int load_by_field_index = field_index.GetLoadByFieldIndex();
+ indices->set(index, Smi::FromInt(load_by_field_index));
+ }
+ }
+ index++;
+ }
+ }
+ ASSERT(index == storage->length());
+
+ Handle<FixedArray> bridge_storage =
+ isolate->factory()->NewFixedArray(
+ DescriptorArray::kEnumCacheBridgeLength);
+ DescriptorArray* desc = object->map()->instance_descriptors();
+ desc->SetEnumCache(*bridge_storage,
+ *storage,
+ indices.is_null() ? Object::cast(Smi::FromInt(0))
+ : Object::cast(*indices));
+ if (cache_result) {
+ object->map()->SetEnumLength(own_property_count);
+ }
+ return storage;
+ } else {
+ Handle<NameDictionary> dictionary(object->property_dictionary());
+ int length = dictionary->NumberOfEnumElements();
+ if (length == 0) {
+ return Handle<FixedArray>(isolate->heap()->empty_fixed_array());
+ }
+ Handle<FixedArray> storage = isolate->factory()->NewFixedArray(length);
+ dictionary->CopyEnumKeysTo(*storage);
+ return storage;
+ }
+}
+
+
+MaybeHandle<FixedArray> JSReceiver::GetKeys(Handle<JSReceiver> object,
+ KeyCollectionType type) {
+ USE(ContainsOnlyValidKeys);
+ Isolate* isolate = object->GetIsolate();
+ Handle<FixedArray> content = isolate->factory()->empty_fixed_array();
+ Handle<JSObject> arguments_boilerplate = Handle<JSObject>(
+ isolate->context()->native_context()->sloppy_arguments_boilerplate(),
+ isolate);
+ Handle<JSFunction> arguments_function = Handle<JSFunction>(
+ JSFunction::cast(arguments_boilerplate->map()->constructor()),
+ isolate);
+
+ // Only collect keys if access is permitted.
+ for (Handle<Object> p = object;
+ *p != isolate->heap()->null_value();
+ p = Handle<Object>(p->GetPrototype(isolate), isolate)) {
+ if (p->IsJSProxy()) {
+ Handle<JSProxy> proxy(JSProxy::cast(*p), isolate);
+ Handle<Object> args[] = { proxy };
+ Handle<Object> names;
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, names,
+ Execution::Call(isolate,
+ isolate->proxy_enumerate(),
+ object,
+ ARRAY_SIZE(args),
+ args),
+ FixedArray);
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, content,
+ FixedArray::AddKeysFromArrayLike(
+ content, Handle<JSObject>::cast(names)),
+ FixedArray);
+ break;
+ }
+
+ Handle<JSObject> current(JSObject::cast(*p), isolate);
+
+ // Check access rights if required.
+ if (current->IsAccessCheckNeeded() &&
+ !isolate->MayNamedAccess(
+ current, isolate->factory()->undefined_value(), v8::ACCESS_KEYS)) {
+ isolate->ReportFailedAccessCheck(current, v8::ACCESS_KEYS);
+ RETURN_EXCEPTION_IF_SCHEDULED_EXCEPTION(isolate, FixedArray);
+ break;
+ }
+
+ // Compute the element keys.
+ Handle<FixedArray> element_keys =
+ isolate->factory()->NewFixedArray(current->NumberOfEnumElements());
+ current->GetEnumElementKeys(*element_keys);
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, content,
+ FixedArray::UnionOfKeys(content, element_keys),
+ FixedArray);
+ ASSERT(ContainsOnlyValidKeys(content));
+
+ // Add the element keys from the interceptor.
+ if (current->HasIndexedInterceptor()) {
+ Handle<JSObject> result;
+ if (JSObject::GetKeysForIndexedInterceptor(
+ current, object).ToHandle(&result)) {
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, content,
+ FixedArray::AddKeysFromArrayLike(content, result),
+ FixedArray);
+ }
+ ASSERT(ContainsOnlyValidKeys(content));
+ }
+
+ // We can cache the computed property keys if access checks are
+ // not needed and no interceptors are involved.
+ //
+ // We do not use the cache if the object has elements and
+ // therefore it does not make sense to cache the property names
+ // for arguments objects. Arguments objects will always have
+ // elements.
+ // Wrapped strings have elements, but don't have an elements
+ // array or dictionary. So the fast inline test for whether to
+ // use the cache says yes, so we should not create a cache.
+ bool cache_enum_keys =
+ ((current->map()->constructor() != *arguments_function) &&
+ !current->IsJSValue() &&
+ !current->IsAccessCheckNeeded() &&
+ !current->HasNamedInterceptor() &&
+ !current->HasIndexedInterceptor());
+ // Compute the property keys and cache them if possible.
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, content,
+ FixedArray::UnionOfKeys(
+ content, GetEnumPropertyKeys(current, cache_enum_keys)),
+ FixedArray);
+ ASSERT(ContainsOnlyValidKeys(content));
+
+ // Add the property keys from the interceptor.
+ if (current->HasNamedInterceptor()) {
+ Handle<JSObject> result;
+ if (JSObject::GetKeysForNamedInterceptor(
+ current, object).ToHandle(&result)) {
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, content,
+ FixedArray::AddKeysFromArrayLike(content, result),
+ FixedArray);
+ }
+ ASSERT(ContainsOnlyValidKeys(content));
+ }
+
+ // If we only want own properties we bail out after the first
+ // iteration.
+ if (type == OWN_ONLY) break;
+ }
+ return content;
}
@@ -6083,17 +6450,16 @@ void JSObject::DefineElementAccessor(Handle<JSObject> object,
case FAST_HOLEY_ELEMENTS:
case FAST_HOLEY_DOUBLE_ELEMENTS:
break;
- case EXTERNAL_PIXEL_ELEMENTS:
- case EXTERNAL_BYTE_ELEMENTS:
- case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
- case EXTERNAL_SHORT_ELEMENTS:
- case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
- case EXTERNAL_INT_ELEMENTS:
- case EXTERNAL_UNSIGNED_INT_ELEMENTS:
- case EXTERNAL_FLOAT_ELEMENTS:
- case EXTERNAL_DOUBLE_ELEMENTS:
+
+#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype, size) \
+ case EXTERNAL_##TYPE##_ELEMENTS: \
+ case TYPE##_ELEMENTS: \
+
+ TYPED_ARRAYS(TYPED_ARRAY_CASE)
+#undef TYPED_ARRAY_CASE
// Ignore getters and setters on pixel and external array elements.
return;
+
case DICTIONARY_ELEMENTS:
if (UpdateGetterSetterInDictionary(object->element_dictionary(),
index,
@@ -6103,7 +6469,7 @@ void JSObject::DefineElementAccessor(Handle<JSObject> object,
return;
}
break;
- case NON_STRICT_ARGUMENTS_ELEMENTS: {
+ case SLOPPY_ARGUMENTS_ELEMENTS: {
// Ascertain whether we have read-only properties or an existing
// getter/setter pair in an arguments elements dictionary backing
// store.
@@ -6142,7 +6508,7 @@ Handle<AccessorPair> JSObject::CreateAccessorPairFor(Handle<JSObject> object,
Handle<Name> name) {
Isolate* isolate = object->GetIsolate();
LookupResult result(isolate);
- object->LocalLookupRealNamedProperty(*name, &result);
+ object->LookupOwnRealNamedProperty(name, &result);
if (result.IsPropertyCallbacks()) {
// Note that the result can actually have IsDontDelete() == true when we
// e.g. have to fall back to the slow case while adding a setter after
@@ -6186,31 +6552,6 @@ void JSObject::DefinePropertyAccessor(Handle<JSObject> object,
}
-bool JSObject::CanSetCallback(Name* name) {
- ASSERT(!IsAccessCheckNeeded() ||
- GetIsolate()->MayNamedAccess(this, name, v8::ACCESS_SET));
-
- // Check if there is an API defined callback object which prohibits
- // callback overwriting in this object or its prototype chain.
- // This mechanism is needed for instance in a browser setting, where
- // certain accessors such as window.location should not be allowed
- // to be overwritten because allowing overwriting could potentially
- // cause security problems.
- LookupResult callback_result(GetIsolate());
- LookupCallbackProperty(name, &callback_result);
- if (callback_result.IsFound()) {
- Object* obj = callback_result.GetCallbackObject();
- if (obj->IsAccessorInfo()) {
- return !AccessorInfo::cast(obj)->prohibits_overwriting();
- }
- if (obj->IsAccessorPair()) {
- return !AccessorPair::cast(obj)->prohibits_overwriting();
- }
- }
- return true;
-}
-
-
bool Map::DictionaryElementsInPrototypeChainOnly() {
Heap* heap = GetHeap();
@@ -6254,7 +6595,7 @@ void JSObject::SetElementCallback(Handle<JSObject> object,
dictionary->set_requires_slow_elements();
// Update the dictionary backing store on the object.
- if (object->elements()->map() == heap->non_strict_arguments_elements_map()) {
+ if (object->elements()->map() == heap->sloppy_arguments_elements_map()) {
// Also delete any parameter alias.
//
// TODO(kmillikin): when deleting the last parameter alias we could
@@ -6311,8 +6652,9 @@ void JSObject::DefineAccessor(Handle<JSObject> object,
Isolate* isolate = object->GetIsolate();
// Check access rights if needed.
if (object->IsAccessCheckNeeded() &&
- !isolate->MayNamedAccess(*object, *name, v8::ACCESS_SET)) {
- isolate->ReportFailedAccessCheck(*object, v8::ACCESS_SET);
+ !isolate->MayNamedAccess(object, name, v8::ACCESS_SET)) {
+ isolate->ReportFailedAccessCheck(object, v8::ACCESS_SET);
+ // TODO(yangguo): Issue 3269, check for scheduled exception missing?
return;
}
@@ -6334,30 +6676,29 @@ void JSObject::DefineAccessor(Handle<JSObject> object,
AssertNoContextChange ncc(isolate);
// Try to flatten before operating on the string.
- if (name->IsString()) String::cast(*name)->TryFlatten();
-
- if (!object->CanSetCallback(*name)) return;
+ if (name->IsString()) name = String::Flatten(Handle<String>::cast(name));
uint32_t index = 0;
bool is_element = name->AsArrayIndex(&index);
Handle<Object> old_value = isolate->factory()->the_hole_value();
- bool is_observed = FLAG_harmony_observation &&
- object->map()->is_observed() &&
+ bool is_observed = object->map()->is_observed() &&
*name != isolate->heap()->hidden_string();
bool preexists = false;
if (is_observed) {
if (is_element) {
- preexists = HasLocalElement(object, index);
- if (preexists && object->GetLocalElementAccessorPair(index) == NULL) {
- old_value = Object::GetElement(isolate, object, index);
+ preexists = HasOwnElement(object, index);
+ if (preexists && GetOwnElementAccessorPair(object, index).is_null()) {
+ old_value =
+ Object::GetElement(isolate, object, index).ToHandleChecked();
}
} else {
LookupResult lookup(isolate);
- object->LocalLookup(*name, &lookup, true);
+ object->LookupOwn(name, &lookup, true);
preexists = lookup.IsProperty();
if (preexists && lookup.IsDataProperty()) {
- old_value = Object::GetProperty(object, name);
+ old_value =
+ Object::GetPropertyOrElement(object, name).ToHandleChecked();
}
}
}
@@ -6377,11 +6718,11 @@ void JSObject::DefineAccessor(Handle<JSObject> object,
}
-static bool TryAccessorTransition(JSObject* self,
- Map* transitioned_map,
+static bool TryAccessorTransition(Handle<JSObject> self,
+ Handle<Map> transitioned_map,
int target_descriptor,
AccessorComponent component,
- Object* accessor,
+ Handle<Object> accessor,
PropertyAttributes attributes) {
DescriptorArray* descs = transitioned_map->instance_descriptors();
PropertyDetails details = descs->GetDetails(target_descriptor);
@@ -6395,8 +6736,8 @@ static bool TryAccessorTransition(JSObject* self,
PropertyAttributes target_attributes = details.attributes();
// Reuse transition if adding same accessor with same attributes.
- if (target_accessor == accessor && target_attributes == attributes) {
- self->set_map(transitioned_map);
+ if (target_accessor == *accessor && target_attributes == attributes) {
+ JSObject::MigrateToMap(self, transitioned_map);
return true;
}
@@ -6406,25 +6747,6 @@ static bool TryAccessorTransition(JSObject* self,
}
-static MaybeObject* CopyInsertDescriptor(Map* map,
- Name* name,
- AccessorPair* accessors,
- PropertyAttributes attributes) {
- CallbacksDescriptor new_accessors_desc(name, accessors, attributes);
- return map->CopyInsertDescriptor(&new_accessors_desc, INSERT_TRANSITION);
-}
-
-
-static Handle<Map> CopyInsertDescriptor(Handle<Map> map,
- Handle<Name> name,
- Handle<AccessorPair> accessors,
- PropertyAttributes attributes) {
- CALL_HEAP_FUNCTION(map->GetIsolate(),
- CopyInsertDescriptor(*map, *name, *accessors, attributes),
- Map);
-}
-
-
bool JSObject::DefineFastAccessor(Handle<JSObject> object,
Handle<Name> name,
AccessorComponent component,
@@ -6433,7 +6755,7 @@ bool JSObject::DefineFastAccessor(Handle<JSObject> object,
ASSERT(accessor->IsSpecFunction() || accessor->IsUndefined());
Isolate* isolate = object->GetIsolate();
LookupResult result(isolate);
- object->LocalLookup(*name, &result);
+ object->LookupOwn(name, &result);
if (result.IsFound() && !result.IsPropertyCallbacks()) {
return false;
@@ -6458,14 +6780,14 @@ bool JSObject::DefineFastAccessor(Handle<JSObject> object,
object->map()->LookupTransition(*object, *name, &result);
if (result.IsFound()) {
- Map* target = result.GetTransitionTarget();
+ Handle<Map> target(result.GetTransitionTarget());
ASSERT(target->NumberOfOwnDescriptors() ==
object->map()->NumberOfOwnDescriptors());
// This works since descriptors are sorted in order of addition.
ASSERT(object->map()->instance_descriptors()->
GetKey(descriptor_number) == *name);
- return TryAccessorTransition(*object, target, descriptor_number,
- component, *accessor, attributes);
+ return TryAccessorTransition(object, target, descriptor_number,
+ component, accessor, attributes);
}
} else {
// If not, lookup a transition.
@@ -6473,12 +6795,12 @@ bool JSObject::DefineFastAccessor(Handle<JSObject> object,
// If there is a transition, try to follow it.
if (result.IsFound()) {
- Map* target = result.GetTransitionTarget();
+ Handle<Map> target(result.GetTransitionTarget());
int descriptor_number = target->LastAdded();
- ASSERT(target->instance_descriptors()->GetKey(descriptor_number)
- ->Equals(*name));
- return TryAccessorTransition(*object, target, descriptor_number,
- component, *accessor, attributes);
+ ASSERT(Name::Equals(name,
+ handle(target->instance_descriptors()->GetKey(descriptor_number))));
+ return TryAccessorTransition(object, target, descriptor_number,
+ component, accessor, attributes);
}
}
@@ -6489,24 +6811,27 @@ bool JSObject::DefineFastAccessor(Handle<JSObject> object,
? AccessorPair::Copy(Handle<AccessorPair>(source_accessors))
: isolate->factory()->NewAccessorPair();
accessors->set(component, *accessor);
- Handle<Map> new_map = CopyInsertDescriptor(Handle<Map>(object->map()),
- name, accessors, attributes);
- object->set_map(*new_map);
+
+ CallbacksDescriptor new_accessors_desc(name, accessors, attributes);
+ Handle<Map> new_map = Map::CopyInsertDescriptor(
+ handle(object->map()), &new_accessors_desc, INSERT_TRANSITION);
+
+ JSObject::MigrateToMap(object, new_map);
return true;
}
-Handle<Object> JSObject::SetAccessor(Handle<JSObject> object,
- Handle<AccessorInfo> info) {
+MaybeHandle<Object> JSObject::SetAccessor(Handle<JSObject> object,
+ Handle<AccessorInfo> info) {
Isolate* isolate = object->GetIsolate();
Factory* factory = isolate->factory();
Handle<Name> name(Name::cast(info->name()));
// Check access rights if needed.
if (object->IsAccessCheckNeeded() &&
- !isolate->MayNamedAccess(*object, *name, v8::ACCESS_SET)) {
- isolate->ReportFailedAccessCheck(*object, v8::ACCESS_SET);
- RETURN_HANDLE_IF_SCHEDULED_EXCEPTION(isolate, Object);
+ !isolate->MayNamedAccess(object, name, v8::ACCESS_SET)) {
+ isolate->ReportFailedAccessCheck(object, v8::ACCESS_SET);
+ RETURN_EXCEPTION_IF_SCHEDULED_EXCEPTION(isolate, Object);
return factory->undefined_value();
}
@@ -6522,9 +6847,7 @@ Handle<Object> JSObject::SetAccessor(Handle<JSObject> object,
AssertNoContextChange ncc(isolate);
// Try to flatten before operating on the string.
- if (name->IsString()) FlattenString(Handle<String>::cast(name));
-
- if (!object->CanSetCallback(*name)) return factory->undefined_value();
+ if (name->IsString()) name = String::Flatten(Handle<String>::cast(name));
uint32_t index = 0;
bool is_element = name->AsArrayIndex(&index);
@@ -6541,21 +6864,20 @@ Handle<Object> JSObject::SetAccessor(Handle<JSObject> object,
case FAST_HOLEY_ELEMENTS:
case FAST_HOLEY_DOUBLE_ELEMENTS:
break;
- case EXTERNAL_PIXEL_ELEMENTS:
- case EXTERNAL_BYTE_ELEMENTS:
- case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
- case EXTERNAL_SHORT_ELEMENTS:
- case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
- case EXTERNAL_INT_ELEMENTS:
- case EXTERNAL_UNSIGNED_INT_ELEMENTS:
- case EXTERNAL_FLOAT_ELEMENTS:
- case EXTERNAL_DOUBLE_ELEMENTS:
+
+#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype, size) \
+ case EXTERNAL_##TYPE##_ELEMENTS: \
+ case TYPE##_ELEMENTS: \
+
+ TYPED_ARRAYS(TYPED_ARRAY_CASE)
+#undef TYPED_ARRAY_CASE
// Ignore getters and setters on pixel and external array
// elements.
return factory->undefined_value();
+
case DICTIONARY_ELEMENTS:
break;
- case NON_STRICT_ARGUMENTS_ELEMENTS:
+ case SLOPPY_ARGUMENTS_ELEMENTS:
UNIMPLEMENTED();
break;
}
@@ -6564,7 +6886,7 @@ Handle<Object> JSObject::SetAccessor(Handle<JSObject> object,
} else {
// Lookup the name.
LookupResult result(isolate);
- object->LocalLookup(*name, &result, true);
+ object->LookupOwn(name, &result, true);
// ES5 forbids turning a property into an accessor if it's not
// configurable (that is IsDontDelete in ES3 and v8), see 8.6.1 (Table 5).
if (result.IsFound() && (result.IsReadOnly() || result.IsDontDelete())) {
@@ -6578,9 +6900,9 @@ Handle<Object> JSObject::SetAccessor(Handle<JSObject> object,
}
-Handle<Object> JSObject::GetAccessor(Handle<JSObject> object,
- Handle<Name> name,
- AccessorComponent component) {
+MaybeHandle<Object> JSObject::GetAccessor(Handle<JSObject> object,
+ Handle<Name> name,
+ AccessorComponent component) {
Isolate* isolate = object->GetIsolate();
// Make sure that the top context does not change when doing callbacks or
@@ -6589,9 +6911,9 @@ Handle<Object> JSObject::GetAccessor(Handle<JSObject> object,
// Check access rights if needed.
if (object->IsAccessCheckNeeded() &&
- !isolate->MayNamedAccess(*object, *name, v8::ACCESS_HAS)) {
- isolate->ReportFailedAccessCheck(*object, v8::ACCESS_HAS);
- RETURN_HANDLE_IF_SCHEDULED_EXCEPTION(isolate, Object);
+ !isolate->MayNamedAccess(object, name, v8::ACCESS_HAS)) {
+ isolate->ReportFailedAccessCheck(object, v8::ACCESS_HAS);
+ RETURN_EXCEPTION_IF_SCHEDULED_EXCEPTION(isolate, Object);
return isolate->factory()->undefined_value();
}
@@ -6620,7 +6942,7 @@ Handle<Object> JSObject::GetAccessor(Handle<JSObject> object,
!obj->IsNull();
obj = handle(JSReceiver::cast(*obj)->GetPrototype(), isolate)) {
LookupResult result(isolate);
- JSReceiver::cast(*obj)->LocalLookup(*name, &result);
+ JSReceiver::cast(*obj)->LookupOwn(name, &result);
if (result.IsFound()) {
if (result.IsReadOnly()) return isolate->factory()->undefined_value();
if (result.IsPropertyCallbacks()) {
@@ -6643,9 +6965,9 @@ Object* JSObject::SlowReverseLookup(Object* value) {
DescriptorArray* descs = map()->instance_descriptors();
for (int i = 0; i < number_of_own_descriptors; i++) {
if (descs->GetType(i) == FIELD) {
- Object* property = RawFastPropertyAt(descs->GetFieldIndex(i));
- if (FLAG_track_double_fields &&
- descs->GetDetails(i).representation().IsDouble()) {
+ Object* property =
+ RawFastPropertyAt(FieldIndex::ForDescriptor(map(), i));
+ if (descs->GetDetails(i).representation().IsDouble()) {
ASSERT(property->IsHeapNumber());
if (value->IsNumber() && property->Number() == value->Number()) {
return descs->GetKey(i);
@@ -6666,36 +6988,73 @@ Object* JSObject::SlowReverseLookup(Object* value) {
}
-Handle<Map> Map::RawCopy(Handle<Map> map,
- int instance_size) {
- CALL_HEAP_FUNCTION(map->GetIsolate(),
- map->RawCopy(instance_size),
- Map);
-}
-
-
-MaybeObject* Map::RawCopy(int instance_size) {
- Map* result;
- MaybeObject* maybe_result =
- GetHeap()->AllocateMap(instance_type(), instance_size);
- if (!maybe_result->To(&result)) return maybe_result;
-
- result->set_prototype(prototype());
- result->set_constructor(constructor());
- result->set_bit_field(bit_field());
- result->set_bit_field2(bit_field2());
- int new_bit_field3 = bit_field3();
+Handle<Map> Map::RawCopy(Handle<Map> map, int instance_size) {
+ Handle<Map> result = map->GetIsolate()->factory()->NewMap(
+ map->instance_type(), instance_size);
+ result->set_prototype(map->prototype());
+ result->set_constructor(map->constructor());
+ result->set_bit_field(map->bit_field());
+ result->set_bit_field2(map->bit_field2());
+ int new_bit_field3 = map->bit_field3();
new_bit_field3 = OwnsDescriptors::update(new_bit_field3, true);
new_bit_field3 = NumberOfOwnDescriptorsBits::update(new_bit_field3, 0);
new_bit_field3 = EnumLengthBits::update(new_bit_field3,
kInvalidEnumCacheSentinel);
new_bit_field3 = Deprecated::update(new_bit_field3, false);
- new_bit_field3 = IsUnstable::update(new_bit_field3, false);
+ if (!map->is_dictionary_map()) {
+ new_bit_field3 = IsUnstable::update(new_bit_field3, false);
+ }
+ new_bit_field3 = ConstructionCount::update(new_bit_field3,
+ JSFunction::kNoSlackTracking);
result->set_bit_field3(new_bit_field3);
return result;
}
+Handle<Map> Map::Normalize(Handle<Map> fast_map,
+ PropertyNormalizationMode mode) {
+ ASSERT(!fast_map->is_dictionary_map());
+
+ Isolate* isolate = fast_map->GetIsolate();
+ Handle<NormalizedMapCache> cache(
+ isolate->context()->native_context()->normalized_map_cache());
+
+ Handle<Map> new_map;
+ if (cache->Get(fast_map, mode).ToHandle(&new_map)) {
+#ifdef VERIFY_HEAP
+ if (FLAG_verify_heap) {
+ new_map->SharedMapVerify();
+ }
+#endif
+#ifdef ENABLE_SLOW_ASSERTS
+ if (FLAG_enable_slow_asserts) {
+ // The cached map should match newly created normalized map bit-by-bit,
+ // except for the code cache, which can contain some ics which can be
+ // applied to the shared map.
+ Handle<Map> fresh = Map::CopyNormalized(
+ fast_map, mode, SHARED_NORMALIZED_MAP);
+
+ ASSERT(memcmp(fresh->address(),
+ new_map->address(),
+ Map::kCodeCacheOffset) == 0);
+ STATIC_ASSERT(Map::kDependentCodeOffset ==
+ Map::kCodeCacheOffset + kPointerSize);
+ int offset = Map::kDependentCodeOffset + kPointerSize;
+ ASSERT(memcmp(fresh->address() + offset,
+ new_map->address() + offset,
+ Map::kSize - offset) == 0);
+ }
+#endif
+ } else {
+ new_map = Map::CopyNormalized(fast_map, mode, SHARED_NORMALIZED_MAP);
+ cache->Set(fast_map, new_map);
+ isolate->counters()->normalized_maps()->Increment();
+ }
+ fast_map->NotifyLeafMapLayoutChange();
+ return new_map;
+}
+
+
Handle<Map> Map::CopyNormalized(Handle<Map> map,
PropertyNormalizationMode mode,
NormalizedMapSharingMode sharing) {
@@ -6704,7 +7063,7 @@ Handle<Map> Map::CopyNormalized(Handle<Map> map,
new_instance_size -= map->inobject_properties() * kPointerSize;
}
- Handle<Map> result = Map::RawCopy(map, new_instance_size);
+ Handle<Map> result = RawCopy(map, new_instance_size);
if (mode != CLEAR_INOBJECT_PROPERTIES) {
result->set_inobject_properties(map->inobject_properties());
@@ -6725,97 +7084,57 @@ Handle<Map> Map::CopyNormalized(Handle<Map> map,
Handle<Map> Map::CopyDropDescriptors(Handle<Map> map) {
- CALL_HEAP_FUNCTION(map->GetIsolate(), map->CopyDropDescriptors(), Map);
-}
-
-
-MaybeObject* Map::CopyDropDescriptors() {
- Map* result;
- MaybeObject* maybe_result = RawCopy(instance_size());
- if (!maybe_result->To(&result)) return maybe_result;
+ Handle<Map> result = RawCopy(map, map->instance_size());
// Please note instance_type and instance_size are set when allocated.
- result->set_inobject_properties(inobject_properties());
- result->set_unused_property_fields(unused_property_fields());
+ result->set_inobject_properties(map->inobject_properties());
+ result->set_unused_property_fields(map->unused_property_fields());
- result->set_pre_allocated_property_fields(pre_allocated_property_fields());
+ result->set_pre_allocated_property_fields(
+ map->pre_allocated_property_fields());
result->set_is_shared(false);
- result->ClearCodeCache(GetHeap());
- NotifyLeafMapLayoutChange();
+ result->ClearCodeCache(map->GetHeap());
+ map->NotifyLeafMapLayoutChange();
return result;
}
-MaybeObject* Map::ShareDescriptor(DescriptorArray* descriptors,
- Descriptor* descriptor) {
+Handle<Map> Map::ShareDescriptor(Handle<Map> map,
+ Handle<DescriptorArray> descriptors,
+ Descriptor* descriptor) {
// Sanity check. This path is only to be taken if the map owns its descriptor
// array, implying that its NumberOfOwnDescriptors equals the number of
// descriptors in the descriptor array.
- ASSERT(NumberOfOwnDescriptors() ==
- instance_descriptors()->number_of_descriptors());
- Map* result;
- MaybeObject* maybe_result = CopyDropDescriptors();
- if (!maybe_result->To(&result)) return maybe_result;
-
- Name* name = descriptor->GetKey();
-
- TransitionArray* transitions;
- MaybeObject* maybe_transitions =
- AddTransition(name, result, SIMPLE_TRANSITION);
- if (!maybe_transitions->To(&transitions)) return maybe_transitions;
-
- int old_size = descriptors->number_of_descriptors();
-
- DescriptorArray* new_descriptors;
+ ASSERT(map->NumberOfOwnDescriptors() ==
+ map->instance_descriptors()->number_of_descriptors());
- if (descriptors->NumberOfSlackDescriptors() > 0) {
- new_descriptors = descriptors;
- new_descriptors->Append(descriptor);
- } else {
- // Descriptor arrays grow by 50%.
- MaybeObject* maybe_descriptors = DescriptorArray::Allocate(
- GetIsolate(), old_size, old_size < 4 ? 1 : old_size / 2);
- if (!maybe_descriptors->To(&new_descriptors)) return maybe_descriptors;
-
- DescriptorArray::WhitenessWitness witness(new_descriptors);
+ Handle<Map> result = CopyDropDescriptors(map);
+ Handle<Name> name = descriptor->GetKey();
+ Handle<TransitionArray> transitions =
+ TransitionArray::CopyInsert(map, name, result, SIMPLE_TRANSITION);
- // Copy the descriptors, inserting a descriptor.
- for (int i = 0; i < old_size; ++i) {
- new_descriptors->CopyFrom(i, descriptors, i, witness);
+ // Ensure there's space for the new descriptor in the shared descriptor array.
+ if (descriptors->NumberOfSlackDescriptors() == 0) {
+ int old_size = descriptors->number_of_descriptors();
+ if (old_size == 0) {
+ descriptors = DescriptorArray::Allocate(map->GetIsolate(), 0, 1);
+ } else {
+ EnsureDescriptorSlack(map, old_size < 4 ? 1 : old_size / 2);
+ descriptors = handle(map->instance_descriptors());
}
+ }
- new_descriptors->Append(descriptor, witness);
-
- if (old_size > 0) {
- // If the source descriptors had an enum cache we copy it. This ensures
- // that the maps to which we push the new descriptor array back can rely
- // on a cache always being available once it is set. If the map has more
- // enumerated descriptors than available in the original cache, the cache
- // will be lazily replaced by the extended cache when needed.
- if (descriptors->HasEnumCache()) {
- new_descriptors->CopyEnumCacheFrom(descriptors);
- }
-
- Map* map;
- // Replace descriptors by new_descriptors in all maps that share it.
- for (Object* current = GetBackPointer();
- !current->IsUndefined();
- current = map->GetBackPointer()) {
- map = Map::cast(current);
- if (map->instance_descriptors() != descriptors) break;
- map->set_instance_descriptors(new_descriptors);
- }
+ // Commit the state atomically.
+ DisallowHeapAllocation no_gc;
- set_instance_descriptors(new_descriptors);
- }
- }
+ descriptors->Append(descriptor);
+ result->SetBackPointer(*map);
+ result->InitializeDescriptors(*descriptors);
- result->SetBackPointer(this);
- result->InitializeDescriptors(new_descriptors);
- ASSERT(result->NumberOfOwnDescriptors() == NumberOfOwnDescriptors() + 1);
+ ASSERT(result->NumberOfOwnDescriptors() == map->NumberOfOwnDescriptors() + 1);
- set_transitions(transitions);
- set_owns_descriptors(false);
+ map->set_transitions(*transitions);
+ map->set_owns_descriptors(false);
return result;
}
@@ -6824,33 +7143,28 @@ MaybeObject* Map::ShareDescriptor(DescriptorArray* descriptors,
Handle<Map> Map::CopyReplaceDescriptors(Handle<Map> map,
Handle<DescriptorArray> descriptors,
TransitionFlag flag,
- Handle<Name> name) {
- CALL_HEAP_FUNCTION(map->GetIsolate(),
- map->CopyReplaceDescriptors(*descriptors, flag, *name),
- Map);
-}
-
-
-MaybeObject* Map::CopyReplaceDescriptors(DescriptorArray* descriptors,
- TransitionFlag flag,
- Name* name,
- SimpleTransitionFlag simple_flag) {
+ MaybeHandle<Name> maybe_name,
+ SimpleTransitionFlag simple_flag) {
ASSERT(descriptors->IsSortedNoDuplicates());
- Map* result;
- MaybeObject* maybe_result = CopyDropDescriptors();
- if (!maybe_result->To(&result)) return maybe_result;
-
- result->InitializeDescriptors(descriptors);
+ Handle<Map> result = CopyDropDescriptors(map);
+ result->InitializeDescriptors(*descriptors);
- if (flag == INSERT_TRANSITION && CanHaveMoreTransitions()) {
- TransitionArray* transitions;
- MaybeObject* maybe_transitions = AddTransition(name, result, simple_flag);
- if (!maybe_transitions->To(&transitions)) return maybe_transitions;
- set_transitions(transitions);
- result->SetBackPointer(this);
+ if (flag == INSERT_TRANSITION && map->CanHaveMoreTransitions()) {
+ Handle<Name> name;
+ CHECK(maybe_name.ToHandle(&name));
+ Handle<TransitionArray> transitions = TransitionArray::CopyInsert(
+ map, name, result, simple_flag);
+ map->set_transitions(*transitions);
+ result->SetBackPointer(*map);
} else {
- descriptors->InitializeRepresentations(Representation::Tagged());
+ int length = descriptors->number_of_descriptors();
+ for (int i = 0; i < length; i++) {
+ descriptors->SetRepresentation(i, Representation::Tagged());
+ if (descriptors->GetDetails(i).type() == FIELD) {
+ descriptors->SetValue(i, HeapType::Any());
+ }
+ }
}
return result;
@@ -6864,7 +7178,7 @@ Handle<Map> Map::CopyInstallDescriptors(Handle<Map> map,
Handle<DescriptorArray> descriptors) {
ASSERT(descriptors->IsSortedNoDuplicates());
- Handle<Map> result = Map::CopyDropDescriptors(map);
+ Handle<Map> result = CopyDropDescriptors(map);
result->InitializeDescriptors(*descriptors);
result->SetNumberOfOwnDescriptors(new_descriptor + 1);
@@ -6881,8 +7195,8 @@ Handle<Map> Map::CopyInstallDescriptors(Handle<Map> map,
result->set_owns_descriptors(false);
Handle<Name> name = handle(descriptors->GetKey(new_descriptor));
- Handle<TransitionArray> transitions = Map::AddTransition(map, name, result,
- SIMPLE_TRANSITION);
+ Handle<TransitionArray> transitions = TransitionArray::CopyInsert(
+ map, name, result, SIMPLE_TRANSITION);
map->set_transitions(*transitions);
result->SetBackPointer(*map);
@@ -6891,52 +7205,48 @@ Handle<Map> Map::CopyInstallDescriptors(Handle<Map> map,
}
-MaybeObject* Map::CopyAsElementsKind(ElementsKind kind, TransitionFlag flag) {
+Handle<Map> Map::CopyAsElementsKind(Handle<Map> map, ElementsKind kind,
+ TransitionFlag flag) {
if (flag == INSERT_TRANSITION) {
- ASSERT(!HasElementsTransition() ||
- ((elements_transition_map()->elements_kind() == DICTIONARY_ELEMENTS ||
+ ASSERT(!map->HasElementsTransition() ||
+ ((map->elements_transition_map()->elements_kind() ==
+ DICTIONARY_ELEMENTS ||
IsExternalArrayElementsKind(
- elements_transition_map()->elements_kind())) &&
+ map->elements_transition_map()->elements_kind())) &&
(kind == DICTIONARY_ELEMENTS ||
IsExternalArrayElementsKind(kind))));
ASSERT(!IsFastElementsKind(kind) ||
- IsMoreGeneralElementsKindTransition(elements_kind(), kind));
- ASSERT(kind != elements_kind());
+ IsMoreGeneralElementsKindTransition(map->elements_kind(), kind));
+ ASSERT(kind != map->elements_kind());
}
bool insert_transition =
- flag == INSERT_TRANSITION && !HasElementsTransition();
+ flag == INSERT_TRANSITION && !map->HasElementsTransition();
- if (insert_transition && owns_descriptors()) {
+ if (insert_transition && map->owns_descriptors()) {
// In case the map owned its own descriptors, share the descriptors and
// transfer ownership to the new map.
- Map* new_map;
- MaybeObject* maybe_new_map = CopyDropDescriptors();
- if (!maybe_new_map->To(&new_map)) return maybe_new_map;
+ Handle<Map> new_map = CopyDropDescriptors(map);
- MaybeObject* added_elements = set_elements_transition_map(new_map);
- if (added_elements->IsFailure()) return added_elements;
+ SetElementsTransitionMap(map, new_map);
new_map->set_elements_kind(kind);
- new_map->InitializeDescriptors(instance_descriptors());
- new_map->SetBackPointer(this);
- set_owns_descriptors(false);
+ new_map->InitializeDescriptors(map->instance_descriptors());
+ new_map->SetBackPointer(*map);
+ map->set_owns_descriptors(false);
return new_map;
}
// In case the map did not own its own descriptors, a split is forced by
// copying the map; creating a new descriptor array cell.
// Create a new free-floating map only if we are not allowed to store it.
- Map* new_map;
- MaybeObject* maybe_new_map = Copy();
- if (!maybe_new_map->To(&new_map)) return maybe_new_map;
+ Handle<Map> new_map = Copy(map);
new_map->set_elements_kind(kind);
if (insert_transition) {
- MaybeObject* added_elements = set_elements_transition_map(new_map);
- if (added_elements->IsFailure()) return added_elements;
- new_map->SetBackPointer(this);
+ SetElementsTransitionMap(map, new_map);
+ new_map->SetBackPointer(*map);
}
return new_map;
@@ -6952,14 +7262,13 @@ Handle<Map> Map::CopyForObserved(Handle<Map> map) {
// transfer ownership to the new map.
Handle<Map> new_map;
if (map->owns_descriptors()) {
- new_map = Map::CopyDropDescriptors(map);
+ new_map = CopyDropDescriptors(map);
} else {
- new_map = Map::Copy(map);
+ new_map = Copy(map);
}
- Handle<TransitionArray> transitions =
- Map::AddTransition(map, isolate->factory()->observed_symbol(), new_map,
- FULL_TRANSITION);
+ Handle<TransitionArray> transitions = TransitionArray::CopyInsert(
+ map, isolate->factory()->observed_symbol(), new_map, FULL_TRANSITION);
map->set_transitions(*transitions);
@@ -6975,127 +7284,127 @@ Handle<Map> Map::CopyForObserved(Handle<Map> map) {
}
-MaybeObject* Map::CopyWithPreallocatedFieldDescriptors() {
- if (pre_allocated_property_fields() == 0) return CopyDropDescriptors();
+Handle<Map> Map::Copy(Handle<Map> map) {
+ Handle<DescriptorArray> descriptors(map->instance_descriptors());
+ int number_of_own_descriptors = map->NumberOfOwnDescriptors();
+ Handle<DescriptorArray> new_descriptors =
+ DescriptorArray::CopyUpTo(descriptors, number_of_own_descriptors);
+ return CopyReplaceDescriptors(
+ map, new_descriptors, OMIT_TRANSITION, MaybeHandle<Name>());
+}
- // If the map has pre-allocated properties always start out with a descriptor
- // array describing these properties.
- ASSERT(constructor()->IsJSFunction());
- JSFunction* ctor = JSFunction::cast(constructor());
- Map* map = ctor->initial_map();
- DescriptorArray* descriptors = map->instance_descriptors();
- int number_of_own_descriptors = map->NumberOfOwnDescriptors();
- DescriptorArray* new_descriptors;
- MaybeObject* maybe_descriptors =
- descriptors->CopyUpTo(number_of_own_descriptors);
- if (!maybe_descriptors->To(&new_descriptors)) return maybe_descriptors;
+Handle<Map> Map::Create(Handle<JSFunction> constructor,
+ int extra_inobject_properties) {
+ Handle<Map> copy = Copy(handle(constructor->initial_map()));
- return CopyReplaceDescriptors(new_descriptors, OMIT_TRANSITION);
-}
+ // Check that we do not overflow the instance size when adding the
+ // extra inobject properties.
+ int instance_size_delta = extra_inobject_properties * kPointerSize;
+ int max_instance_size_delta =
+ JSObject::kMaxInstanceSize - copy->instance_size();
+ int max_extra_properties = max_instance_size_delta >> kPointerSizeLog2;
+ // If the instance size overflows, we allocate as many properties as we can as
+ // inobject properties.
+ if (extra_inobject_properties > max_extra_properties) {
+ instance_size_delta = max_instance_size_delta;
+ extra_inobject_properties = max_extra_properties;
+ }
-Handle<Map> Map::Copy(Handle<Map> map) {
- CALL_HEAP_FUNCTION(map->GetIsolate(), map->Copy(), Map);
+ // Adjust the map with the extra inobject properties.
+ int inobject_properties =
+ copy->inobject_properties() + extra_inobject_properties;
+ copy->set_inobject_properties(inobject_properties);
+ copy->set_unused_property_fields(inobject_properties);
+ copy->set_instance_size(copy->instance_size() + instance_size_delta);
+ copy->set_visitor_id(StaticVisitorBase::GetVisitorId(*copy));
+ return copy;
}
-MaybeObject* Map::Copy() {
- DescriptorArray* descriptors = instance_descriptors();
- DescriptorArray* new_descriptors;
- int number_of_own_descriptors = NumberOfOwnDescriptors();
- MaybeObject* maybe_descriptors =
- descriptors->CopyUpTo(number_of_own_descriptors);
- if (!maybe_descriptors->To(&new_descriptors)) return maybe_descriptors;
-
- return CopyReplaceDescriptors(new_descriptors, OMIT_TRANSITION);
+Handle<Map> Map::CopyForFreeze(Handle<Map> map) {
+ int num_descriptors = map->NumberOfOwnDescriptors();
+ Isolate* isolate = map->GetIsolate();
+ Handle<DescriptorArray> new_desc = DescriptorArray::CopyUpToAddAttributes(
+ handle(map->instance_descriptors(), isolate), num_descriptors, FROZEN);
+ Handle<Map> new_map = CopyReplaceDescriptors(
+ map, new_desc, INSERT_TRANSITION, isolate->factory()->frozen_symbol());
+ new_map->freeze();
+ new_map->set_is_extensible(false);
+ new_map->set_elements_kind(DICTIONARY_ELEMENTS);
+ return new_map;
}
-MaybeObject* Map::CopyAddDescriptor(Descriptor* descriptor,
- TransitionFlag flag) {
- DescriptorArray* descriptors = instance_descriptors();
+Handle<Map> Map::CopyAddDescriptor(Handle<Map> map,
+ Descriptor* descriptor,
+ TransitionFlag flag) {
+ Handle<DescriptorArray> descriptors(map->instance_descriptors());
// Ensure the key is unique.
- MaybeObject* maybe_failure = descriptor->KeyToUniqueName();
- if (maybe_failure->IsFailure()) return maybe_failure;
-
- int old_size = NumberOfOwnDescriptors();
- int new_size = old_size + 1;
+ descriptor->KeyToUniqueName();
if (flag == INSERT_TRANSITION &&
- owns_descriptors() &&
- CanHaveMoreTransitions()) {
- return ShareDescriptor(descriptors, descriptor);
- }
-
- DescriptorArray* new_descriptors;
- MaybeObject* maybe_descriptors =
- DescriptorArray::Allocate(GetIsolate(), old_size, 1);
- if (!maybe_descriptors->To(&new_descriptors)) return maybe_descriptors;
-
- DescriptorArray::WhitenessWitness witness(new_descriptors);
-
- // Copy the descriptors, inserting a descriptor.
- for (int i = 0; i < old_size; ++i) {
- new_descriptors->CopyFrom(i, descriptors, i, witness);
+ map->owns_descriptors() &&
+ map->CanHaveMoreTransitions()) {
+ return ShareDescriptor(map, descriptors, descriptor);
}
- if (old_size != descriptors->number_of_descriptors()) {
- new_descriptors->SetNumberOfDescriptors(new_size);
- new_descriptors->Set(old_size, descriptor, witness);
- new_descriptors->Sort();
- } else {
- new_descriptors->Append(descriptor, witness);
- }
+ Handle<DescriptorArray> new_descriptors = DescriptorArray::CopyUpTo(
+ descriptors, map->NumberOfOwnDescriptors(), 1);
+ new_descriptors->Append(descriptor);
- Name* key = descriptor->GetKey();
- return CopyReplaceDescriptors(new_descriptors, flag, key, SIMPLE_TRANSITION);
+ return CopyReplaceDescriptors(
+ map, new_descriptors, flag, descriptor->GetKey(), SIMPLE_TRANSITION);
}
-MaybeObject* Map::CopyInsertDescriptor(Descriptor* descriptor,
- TransitionFlag flag) {
- DescriptorArray* old_descriptors = instance_descriptors();
+Handle<Map> Map::CopyInsertDescriptor(Handle<Map> map,
+ Descriptor* descriptor,
+ TransitionFlag flag) {
+ Handle<DescriptorArray> old_descriptors(map->instance_descriptors());
// Ensure the key is unique.
- MaybeObject* maybe_result = descriptor->KeyToUniqueName();
- if (maybe_result->IsFailure()) return maybe_result;
+ descriptor->KeyToUniqueName();
// We replace the key if it is already present.
- int index = old_descriptors->SearchWithCache(descriptor->GetKey(), this);
+ int index = old_descriptors->SearchWithCache(*descriptor->GetKey(), *map);
if (index != DescriptorArray::kNotFound) {
- return CopyReplaceDescriptor(old_descriptors, descriptor, index, flag);
+ return CopyReplaceDescriptor(map, old_descriptors, descriptor, index, flag);
}
- return CopyAddDescriptor(descriptor, flag);
+ return CopyAddDescriptor(map, descriptor, flag);
}
-Handle<DescriptorArray> DescriptorArray::CopyUpToAddAttributes(
+Handle<DescriptorArray> DescriptorArray::CopyUpTo(
Handle<DescriptorArray> desc,
int enumeration_index,
- PropertyAttributes attributes) {
- CALL_HEAP_FUNCTION(desc->GetIsolate(),
- desc->CopyUpToAddAttributes(enumeration_index, attributes),
- DescriptorArray);
+ int slack) {
+ return DescriptorArray::CopyUpToAddAttributes(
+ desc, enumeration_index, NONE, slack);
}
-MaybeObject* DescriptorArray::CopyUpToAddAttributes(
- int enumeration_index, PropertyAttributes attributes) {
- if (enumeration_index == 0) return GetHeap()->empty_descriptor_array();
+Handle<DescriptorArray> DescriptorArray::CopyUpToAddAttributes(
+ Handle<DescriptorArray> desc,
+ int enumeration_index,
+ PropertyAttributes attributes,
+ int slack) {
+ if (enumeration_index + slack == 0) {
+ return desc->GetIsolate()->factory()->empty_descriptor_array();
+ }
int size = enumeration_index;
- DescriptorArray* descriptors;
- MaybeObject* maybe_descriptors = Allocate(GetIsolate(), size);
- if (!maybe_descriptors->To(&descriptors)) return maybe_descriptors;
- DescriptorArray::WhitenessWitness witness(descriptors);
+ Handle<DescriptorArray> descriptors =
+ DescriptorArray::Allocate(desc->GetIsolate(), size, slack);
+ DescriptorArray::WhitenessWitness witness(*descriptors);
if (attributes != NONE) {
for (int i = 0; i < size; ++i) {
- Object* value = GetValue(i);
- PropertyDetails details = GetDetails(i);
+ Object* value = desc->GetValue(i);
+ PropertyDetails details = desc->GetDetails(i);
int mask = DONT_DELETE | DONT_ENUM;
// READ_ONLY is an invalid attribute for JS setters/getters.
if (details.type() != CALLBACKS || !value->IsAccessorPair()) {
@@ -7103,59 +7412,44 @@ MaybeObject* DescriptorArray::CopyUpToAddAttributes(
}
details = details.CopyAddAttributes(
static_cast<PropertyAttributes>(attributes & mask));
- Descriptor desc(GetKey(i), value, details);
- descriptors->Set(i, &desc, witness);
+ Descriptor inner_desc(handle(desc->GetKey(i)),
+ handle(value, desc->GetIsolate()),
+ details);
+ descriptors->Set(i, &inner_desc, witness);
}
} else {
for (int i = 0; i < size; ++i) {
- descriptors->CopyFrom(i, this, i, witness);
+ descriptors->CopyFrom(i, *desc, witness);
}
}
- if (number_of_descriptors() != enumeration_index) descriptors->Sort();
+ if (desc->number_of_descriptors() != enumeration_index) descriptors->Sort();
return descriptors;
}
-MaybeObject* Map::CopyReplaceDescriptor(DescriptorArray* descriptors,
- Descriptor* descriptor,
- int insertion_index,
- TransitionFlag flag) {
+Handle<Map> Map::CopyReplaceDescriptor(Handle<Map> map,
+ Handle<DescriptorArray> descriptors,
+ Descriptor* descriptor,
+ int insertion_index,
+ TransitionFlag flag) {
// Ensure the key is unique.
- MaybeObject* maybe_failure = descriptor->KeyToUniqueName();
- if (maybe_failure->IsFailure()) return maybe_failure;
+ descriptor->KeyToUniqueName();
- Name* key = descriptor->GetKey();
- ASSERT(key == descriptors->GetKey(insertion_index));
+ Handle<Name> key = descriptor->GetKey();
+ ASSERT(*key == descriptors->GetKey(insertion_index));
- int new_size = NumberOfOwnDescriptors();
- ASSERT(0 <= insertion_index && insertion_index < new_size);
+ Handle<DescriptorArray> new_descriptors = DescriptorArray::CopyUpTo(
+ descriptors, map->NumberOfOwnDescriptors());
- ASSERT_LT(insertion_index, new_size);
-
- DescriptorArray* new_descriptors;
- MaybeObject* maybe_descriptors =
- DescriptorArray::Allocate(GetIsolate(), new_size);
- if (!maybe_descriptors->To(&new_descriptors)) return maybe_descriptors;
- DescriptorArray::WhitenessWitness witness(new_descriptors);
-
- for (int i = 0; i < new_size; ++i) {
- if (i == insertion_index) {
- new_descriptors->Set(i, descriptor, witness);
- } else {
- new_descriptors->CopyFrom(i, descriptors, i, witness);
- }
- }
-
- // Re-sort if descriptors were removed.
- if (new_size != descriptors->length()) new_descriptors->Sort();
+ new_descriptors->Replace(insertion_index, descriptor);
SimpleTransitionFlag simple_flag =
(insertion_index == descriptors->number_of_descriptors() - 1)
? SIMPLE_TRANSITION
: FULL_TRANSITION;
- return CopyReplaceDescriptors(new_descriptors, flag, key, simple_flag);
+ return CopyReplaceDescriptors(map, new_descriptors, flag, key, simple_flag);
}
@@ -7163,23 +7457,16 @@ void Map::UpdateCodeCache(Handle<Map> map,
Handle<Name> name,
Handle<Code> code) {
Isolate* isolate = map->GetIsolate();
- CALL_HEAP_FUNCTION_VOID(isolate,
- map->UpdateCodeCache(*name, *code));
-}
-
-
-MaybeObject* Map::UpdateCodeCache(Name* name, Code* code) {
+ HandleScope scope(isolate);
// Allocate the code cache if not present.
- if (code_cache()->IsFixedArray()) {
- Object* result;
- { MaybeObject* maybe_result = GetHeap()->AllocateCodeCache();
- if (!maybe_result->ToObject(&result)) return maybe_result;
- }
- set_code_cache(result);
+ if (map->code_cache()->IsFixedArray()) {
+ Handle<Object> result = isolate->factory()->NewCodeCache();
+ map->set_code_cache(*result);
}
// Update the code cache.
- return CodeCache::cast(code_cache())->Update(name, code);
+ Handle<CodeCache> code_cache(CodeCache::cast(map->code_cache()), isolate);
+ CodeCache::Update(code_cache, name, code);
}
@@ -7210,74 +7497,92 @@ void Map::RemoveFromCodeCache(Name* name, Code* code, int index) {
}
-// An iterator over all map transitions in an descriptor array, reusing the map
-// field of the contens array while it is running.
+// An iterator over all map transitions in an descriptor array, reusing the
+// constructor field of the map while it is running. Negative values in
+// the constructor field indicate an active map transition iteration. The
+// original constructor is restored after iterating over all entries.
class IntrusiveMapTransitionIterator {
public:
- explicit IntrusiveMapTransitionIterator(TransitionArray* transition_array)
- : transition_array_(transition_array) { }
+ IntrusiveMapTransitionIterator(
+ Map* map, TransitionArray* transition_array, Object* constructor)
+ : map_(map),
+ transition_array_(transition_array),
+ constructor_(constructor) { }
- void Start() {
- ASSERT(!IsIterating());
- *TransitionArrayHeader() = Smi::FromInt(0);
+ void StartIfNotStarted() {
+ ASSERT(!(*IteratorField())->IsSmi() || IsIterating());
+ if (!(*IteratorField())->IsSmi()) {
+ ASSERT(*IteratorField() == constructor_);
+ *IteratorField() = Smi::FromInt(-1);
+ }
}
bool IsIterating() {
- return (*TransitionArrayHeader())->IsSmi();
+ return (*IteratorField())->IsSmi() &&
+ Smi::cast(*IteratorField())->value() < 0;
}
Map* Next() {
ASSERT(IsIterating());
- int index = Smi::cast(*TransitionArrayHeader())->value();
+ int value = Smi::cast(*IteratorField())->value();
+ int index = -value - 1;
int number_of_transitions = transition_array_->number_of_transitions();
while (index < number_of_transitions) {
- *TransitionArrayHeader() = Smi::FromInt(index + 1);
+ *IteratorField() = Smi::FromInt(value - 1);
return transition_array_->GetTarget(index);
}
- *TransitionArrayHeader() = transition_array_->GetHeap()->fixed_array_map();
+ *IteratorField() = constructor_;
return NULL;
}
private:
- Object** TransitionArrayHeader() {
- return HeapObject::RawField(transition_array_, TransitionArray::kMapOffset);
+ Object** IteratorField() {
+ return HeapObject::RawField(map_, Map::kConstructorOffset);
}
+ Map* map_;
TransitionArray* transition_array_;
+ Object* constructor_;
};
-// An iterator over all prototype transitions, reusing the map field of the
-// underlying array while it is running.
+// An iterator over all prototype transitions, reusing the constructor field
+// of the map while it is running. Positive values in the constructor field
+// indicate an active prototype transition iteration. The original constructor
+// is restored after iterating over all entries.
class IntrusivePrototypeTransitionIterator {
public:
- explicit IntrusivePrototypeTransitionIterator(HeapObject* proto_trans)
- : proto_trans_(proto_trans) { }
+ IntrusivePrototypeTransitionIterator(
+ Map* map, HeapObject* proto_trans, Object* constructor)
+ : map_(map), proto_trans_(proto_trans), constructor_(constructor) { }
- void Start() {
- ASSERT(!IsIterating());
- *Header() = Smi::FromInt(0);
+ void StartIfNotStarted() {
+ if (!(*IteratorField())->IsSmi()) {
+ ASSERT(*IteratorField() == constructor_);
+ *IteratorField() = Smi::FromInt(0);
+ }
}
bool IsIterating() {
- return (*Header())->IsSmi();
+ return (*IteratorField())->IsSmi() &&
+ Smi::cast(*IteratorField())->value() >= 0;
}
Map* Next() {
ASSERT(IsIterating());
- int transitionNumber = Smi::cast(*Header())->value();
+ int transitionNumber = Smi::cast(*IteratorField())->value();
if (transitionNumber < NumberOfTransitions()) {
- *Header() = Smi::FromInt(transitionNumber + 1);
+ *IteratorField() = Smi::FromInt(transitionNumber + 1);
return GetTransition(transitionNumber);
}
- *Header() = proto_trans_->GetHeap()->fixed_array_map();
+ *IteratorField() = constructor_;
return NULL;
}
private:
- Object** Header() {
- return HeapObject::RawField(proto_trans_, FixedArray::kMapOffset);
+ Object** IteratorField() {
+ return HeapObject::RawField(map_, Map::kConstructorOffset);
}
int NumberOfTransitions() {
@@ -7297,29 +7602,33 @@ class IntrusivePrototypeTransitionIterator {
transitionNumber * Map::kProtoTransitionElementsPerEntry;
}
+ Map* map_;
HeapObject* proto_trans_;
+ Object* constructor_;
};
// To traverse the transition tree iteratively, we have to store two kinds of
// information in a map: The parent map in the traversal and which children of a
// node have already been visited. To do this without additional memory, we
-// temporarily reuse two maps with known values:
+// temporarily reuse two fields with known values:
//
// (1) The map of the map temporarily holds the parent, and is restored to the
// meta map afterwards.
//
// (2) The info which children have already been visited depends on which part
-// of the map we currently iterate:
+// of the map we currently iterate. We use the constructor field of the
+// map to store the current index. We can do that because the constructor
+// is the same for all involved maps.
//
// (a) If we currently follow normal map transitions, we temporarily store
-// the current index in the map of the FixedArray of the desciptor
-// array's contents, and restore it to the fixed array map afterwards.
-// Note that a single descriptor can have 0, 1, or 2 transitions.
+// the current index in the constructor field, and restore it to the
+// original constructor afterwards. Note that a single descriptor can
+// have 0, 1, or 2 transitions.
//
// (b) If we currently follow prototype transitions, we temporarily store
-// the current index in the map of the FixedArray holding the prototype
-// transitions, and restore it to the fixed array map afterwards.
+// the current index in the constructor field, and restore it to the
+// original constructor afterwards.
//
// Note that the child iterator is just a concatenation of two iterators: One
// iterating over map transitions and one iterating over prototype transisitons.
@@ -7336,38 +7645,29 @@ class TraversableMap : public Map {
return old_parent;
}
- // Start iterating over this map's children, possibly destroying a FixedArray
- // map (see explanation above).
- void ChildIteratorStart() {
- if (HasTransitionArray()) {
- if (HasPrototypeTransitions()) {
- IntrusivePrototypeTransitionIterator(GetPrototypeTransitions()).Start();
- }
-
- IntrusiveMapTransitionIterator(transitions()).Start();
- }
- }
-
// If we have an unvisited child map, return that one and advance. If we have
- // none, return NULL and reset any destroyed FixedArray maps.
- TraversableMap* ChildIteratorNext() {
- TransitionArray* transition_array = unchecked_transition_array();
- if (!transition_array->map()->IsSmi() &&
- !transition_array->IsTransitionArray()) {
- return NULL;
- }
+ // none, return NULL and restore the overwritten constructor field.
+ TraversableMap* ChildIteratorNext(Object* constructor) {
+ if (!HasTransitionArray()) return NULL;
+ TransitionArray* transition_array = transitions();
if (transition_array->HasPrototypeTransitions()) {
HeapObject* proto_transitions =
- transition_array->UncheckedPrototypeTransitions();
- IntrusivePrototypeTransitionIterator proto_iterator(proto_transitions);
+ transition_array->GetPrototypeTransitions();
+ IntrusivePrototypeTransitionIterator proto_iterator(this,
+ proto_transitions,
+ constructor);
+ proto_iterator.StartIfNotStarted();
if (proto_iterator.IsIterating()) {
Map* next = proto_iterator.Next();
if (next != NULL) return static_cast<TraversableMap*>(next);
}
}
- IntrusiveMapTransitionIterator transition_iterator(transition_array);
+ IntrusiveMapTransitionIterator transition_iterator(this,
+ transition_array,
+ constructor);
+ transition_iterator.StartIfNotStarted();
if (transition_iterator.IsIterating()) {
Map* next = transition_iterator.Next();
if (next != NULL) return static_cast<TraversableMap*>(next);
@@ -7381,12 +7681,16 @@ class TraversableMap : public Map {
// Traverse the transition tree in postorder without using the C++ stack by
// doing pointer reversal.
void Map::TraverseTransitionTree(TraverseCallback callback, void* data) {
+ // Make sure that we do not allocate in the callback.
+ DisallowHeapAllocation no_allocation;
+
TraversableMap* current = static_cast<TraversableMap*>(this);
- current->ChildIteratorStart();
+ // Get the root constructor here to restore it later when finished iterating
+ // over maps.
+ Object* root_constructor = constructor();
while (true) {
- TraversableMap* child = current->ChildIteratorNext();
+ TraversableMap* child = current->ChildIteratorNext(root_constructor);
if (child != NULL) {
- child->ChildIteratorStart();
child->SetParent(current);
current = child;
} else {
@@ -7399,30 +7703,29 @@ void Map::TraverseTransitionTree(TraverseCallback callback, void* data) {
}
-MaybeObject* CodeCache::Update(Name* name, Code* code) {
+void CodeCache::Update(
+ Handle<CodeCache> code_cache, Handle<Name> name, Handle<Code> code) {
// The number of monomorphic stubs for normal load/store/call IC's can grow to
// a large number and therefore they need to go into a hash table. They are
// used to load global properties from cells.
if (code->type() == Code::NORMAL) {
// Make sure that a hash table is allocated for the normal load code cache.
- if (normal_type_cache()->IsUndefined()) {
- Object* result;
- { MaybeObject* maybe_result =
- CodeCacheHashTable::Allocate(GetHeap(),
- CodeCacheHashTable::kInitialSize);
- if (!maybe_result->ToObject(&result)) return maybe_result;
- }
- set_normal_type_cache(result);
+ if (code_cache->normal_type_cache()->IsUndefined()) {
+ Handle<Object> result =
+ CodeCacheHashTable::New(code_cache->GetIsolate(),
+ CodeCacheHashTable::kInitialSize);
+ code_cache->set_normal_type_cache(*result);
}
- return UpdateNormalTypeCache(name, code);
+ UpdateNormalTypeCache(code_cache, name, code);
} else {
- ASSERT(default_cache()->IsFixedArray());
- return UpdateDefaultCache(name, code);
+ ASSERT(code_cache->default_cache()->IsFixedArray());
+ UpdateDefaultCache(code_cache, name, code);
}
}
-MaybeObject* CodeCache::UpdateDefaultCache(Name* name, Code* code) {
+void CodeCache::UpdateDefaultCache(
+ Handle<CodeCache> code_cache, Handle<Name> name, Handle<Code> code) {
// When updating the default code cache we disregard the type encoded in the
// flags. This allows call constant stubs to overwrite call field
// stubs, etc.
@@ -7430,37 +7733,40 @@ MaybeObject* CodeCache::UpdateDefaultCache(Name* name, Code* code) {
// First check whether we can update existing code cache without
// extending it.
- FixedArray* cache = default_cache();
+ Handle<FixedArray> cache = handle(code_cache->default_cache());
int length = cache->length();
- int deleted_index = -1;
- for (int i = 0; i < length; i += kCodeCacheEntrySize) {
- Object* key = cache->get(i);
- if (key->IsNull()) {
- if (deleted_index < 0) deleted_index = i;
- continue;
- }
- if (key->IsUndefined()) {
- if (deleted_index >= 0) i = deleted_index;
- cache->set(i + kCodeCacheEntryNameOffset, name);
- cache->set(i + kCodeCacheEntryCodeOffset, code);
- return this;
- }
- if (name->Equals(Name::cast(key))) {
- Code::Flags found =
- Code::cast(cache->get(i + kCodeCacheEntryCodeOffset))->flags();
- if (Code::RemoveTypeFromFlags(found) == flags) {
- cache->set(i + kCodeCacheEntryCodeOffset, code);
- return this;
+ {
+ DisallowHeapAllocation no_alloc;
+ int deleted_index = -1;
+ for (int i = 0; i < length; i += kCodeCacheEntrySize) {
+ Object* key = cache->get(i);
+ if (key->IsNull()) {
+ if (deleted_index < 0) deleted_index = i;
+ continue;
+ }
+ if (key->IsUndefined()) {
+ if (deleted_index >= 0) i = deleted_index;
+ cache->set(i + kCodeCacheEntryNameOffset, *name);
+ cache->set(i + kCodeCacheEntryCodeOffset, *code);
+ return;
+ }
+ if (name->Equals(Name::cast(key))) {
+ Code::Flags found =
+ Code::cast(cache->get(i + kCodeCacheEntryCodeOffset))->flags();
+ if (Code::RemoveTypeFromFlags(found) == flags) {
+ cache->set(i + kCodeCacheEntryCodeOffset, *code);
+ return;
+ }
}
}
- }
- // Reached the end of the code cache. If there were deleted
- // elements, reuse the space for the first of them.
- if (deleted_index >= 0) {
- cache->set(deleted_index + kCodeCacheEntryNameOffset, name);
- cache->set(deleted_index + kCodeCacheEntryCodeOffset, code);
- return this;
+ // Reached the end of the code cache. If there were deleted
+ // elements, reuse the space for the first of them.
+ if (deleted_index >= 0) {
+ cache->set(deleted_index + kCodeCacheEntryNameOffset, *name);
+ cache->set(deleted_index + kCodeCacheEntryCodeOffset, *code);
+ return;
+ }
}
// Extend the code cache with some new entries (at least one). Must be a
@@ -7468,36 +7774,31 @@ MaybeObject* CodeCache::UpdateDefaultCache(Name* name, Code* code) {
int new_length = length + ((length >> 1)) + kCodeCacheEntrySize;
new_length = new_length - new_length % kCodeCacheEntrySize;
ASSERT((new_length % kCodeCacheEntrySize) == 0);
- Object* result;
- { MaybeObject* maybe_result = cache->CopySize(new_length);
- if (!maybe_result->ToObject(&result)) return maybe_result;
- }
+ cache = FixedArray::CopySize(cache, new_length);
// Add the (name, code) pair to the new cache.
- cache = FixedArray::cast(result);
- cache->set(length + kCodeCacheEntryNameOffset, name);
- cache->set(length + kCodeCacheEntryCodeOffset, code);
- set_default_cache(cache);
- return this;
+ cache->set(length + kCodeCacheEntryNameOffset, *name);
+ cache->set(length + kCodeCacheEntryCodeOffset, *code);
+ code_cache->set_default_cache(*cache);
}
-MaybeObject* CodeCache::UpdateNormalTypeCache(Name* name, Code* code) {
+void CodeCache::UpdateNormalTypeCache(
+ Handle<CodeCache> code_cache, Handle<Name> name, Handle<Code> code) {
// Adding a new entry can cause a new cache to be allocated.
- CodeCacheHashTable* cache = CodeCacheHashTable::cast(normal_type_cache());
- Object* new_cache;
- { MaybeObject* maybe_new_cache = cache->Put(name, code);
- if (!maybe_new_cache->ToObject(&new_cache)) return maybe_new_cache;
- }
- set_normal_type_cache(new_cache);
- return this;
+ Handle<CodeCacheHashTable> cache(
+ CodeCacheHashTable::cast(code_cache->normal_type_cache()));
+ Handle<Object> new_cache = CodeCacheHashTable::Put(cache, name, code);
+ code_cache->set_normal_type_cache(*new_cache);
}
Object* CodeCache::Lookup(Name* name, Code::Flags flags) {
- flags = Code::RemoveTypeFromFlags(flags);
- Object* result = LookupDefaultCache(name, flags);
- if (result->IsCode()) return result;
+ Object* result = LookupDefaultCache(name, Code::RemoveTypeFromFlags(flags));
+ if (result->IsCode()) {
+ if (Code::cast(result)->flags() == flags) return result;
+ return GetHeap()->undefined_value();
+ }
return LookupNormalTypeCache(name, flags);
}
@@ -7572,14 +7873,13 @@ void CodeCache::RemoveByIndex(Object* name, Code* code, int index) {
// lookup not to create a new entry.
class CodeCacheHashTableKey : public HashTableKey {
public:
- CodeCacheHashTableKey(Name* name, Code::Flags flags)
- : name_(name), flags_(flags), code_(NULL) { }
+ CodeCacheHashTableKey(Handle<Name> name, Code::Flags flags)
+ : name_(name), flags_(flags), code_() { }
- CodeCacheHashTableKey(Name* name, Code* code)
+ CodeCacheHashTableKey(Handle<Name> name, Handle<Code> code)
: name_(name), flags_(code->flags()), code_(code) { }
-
- bool IsMatch(Object* other) {
+ bool IsMatch(Object* other) V8_OVERRIDE {
if (!other->IsFixedArray()) return false;
FixedArray* pair = FixedArray::cast(other);
Name* name = Name::cast(pair->get(0));
@@ -7594,68 +7894,59 @@ class CodeCacheHashTableKey : public HashTableKey {
return name->Hash() ^ flags;
}
- uint32_t Hash() { return NameFlagsHashHelper(name_, flags_); }
+ uint32_t Hash() V8_OVERRIDE { return NameFlagsHashHelper(*name_, flags_); }
- uint32_t HashForObject(Object* obj) {
+ uint32_t HashForObject(Object* obj) V8_OVERRIDE {
FixedArray* pair = FixedArray::cast(obj);
Name* name = Name::cast(pair->get(0));
Code* code = Code::cast(pair->get(1));
return NameFlagsHashHelper(name, code->flags());
}
- MUST_USE_RESULT MaybeObject* AsObject(Heap* heap) {
- ASSERT(code_ != NULL);
- Object* obj;
- { MaybeObject* maybe_obj = heap->AllocateFixedArray(2);
- if (!maybe_obj->ToObject(&obj)) return maybe_obj;
- }
- FixedArray* pair = FixedArray::cast(obj);
- pair->set(0, name_);
- pair->set(1, code_);
+ MUST_USE_RESULT Handle<Object> AsHandle(Isolate* isolate) V8_OVERRIDE {
+ Handle<Code> code = code_.ToHandleChecked();
+ Handle<FixedArray> pair = isolate->factory()->NewFixedArray(2);
+ pair->set(0, *name_);
+ pair->set(1, *code);
return pair;
}
private:
- Name* name_;
+ Handle<Name> name_;
Code::Flags flags_;
// TODO(jkummerow): We should be able to get by without this.
- Code* code_;
+ MaybeHandle<Code> code_;
};
Object* CodeCacheHashTable::Lookup(Name* name, Code::Flags flags) {
- CodeCacheHashTableKey key(name, flags);
+ DisallowHeapAllocation no_alloc;
+ CodeCacheHashTableKey key(handle(name), flags);
int entry = FindEntry(&key);
if (entry == kNotFound) return GetHeap()->undefined_value();
return get(EntryToIndex(entry) + 1);
}
-MaybeObject* CodeCacheHashTable::Put(Name* name, Code* code) {
+Handle<CodeCacheHashTable> CodeCacheHashTable::Put(
+ Handle<CodeCacheHashTable> cache, Handle<Name> name, Handle<Code> code) {
CodeCacheHashTableKey key(name, code);
- Object* obj;
- { MaybeObject* maybe_obj = EnsureCapacity(1, &key);
- if (!maybe_obj->ToObject(&obj)) return maybe_obj;
- }
- // Don't use |this|, as the table might have grown.
- CodeCacheHashTable* cache = reinterpret_cast<CodeCacheHashTable*>(obj);
+ Handle<CodeCacheHashTable> new_cache = EnsureCapacity(cache, 1, &key);
- int entry = cache->FindInsertionEntry(key.Hash());
- Object* k;
- { MaybeObject* maybe_k = key.AsObject(GetHeap());
- if (!maybe_k->ToObject(&k)) return maybe_k;
- }
+ int entry = new_cache->FindInsertionEntry(key.Hash());
+ Handle<Object> k = key.AsHandle(cache->GetIsolate());
- cache->set(EntryToIndex(entry), k);
- cache->set(EntryToIndex(entry) + 1, code);
- cache->ElementAdded();
- return cache;
+ new_cache->set(EntryToIndex(entry), *k);
+ new_cache->set(EntryToIndex(entry) + 1, *code);
+ new_cache->ElementAdded();
+ return new_cache;
}
int CodeCacheHashTable::GetIndex(Name* name, Code::Flags flags) {
- CodeCacheHashTableKey key(name, flags);
+ DisallowHeapAllocation no_alloc;
+ CodeCacheHashTableKey key(handle(name), flags);
int entry = FindEntry(&key);
return (entry == kNotFound) ? -1 : entry;
}
@@ -7670,41 +7961,27 @@ void CodeCacheHashTable::RemoveByIndex(int index) {
}
-void PolymorphicCodeCache::Update(Handle<PolymorphicCodeCache> cache,
+void PolymorphicCodeCache::Update(Handle<PolymorphicCodeCache> code_cache,
MapHandleList* maps,
Code::Flags flags,
Handle<Code> code) {
- Isolate* isolate = cache->GetIsolate();
- CALL_HEAP_FUNCTION_VOID(isolate, cache->Update(maps, flags, *code));
-}
-
-
-MaybeObject* PolymorphicCodeCache::Update(MapHandleList* maps,
- Code::Flags flags,
- Code* code) {
- // Initialize cache if necessary.
- if (cache()->IsUndefined()) {
- Object* result;
- { MaybeObject* maybe_result =
- PolymorphicCodeCacheHashTable::Allocate(
- GetHeap(),
- PolymorphicCodeCacheHashTable::kInitialSize);
- if (!maybe_result->ToObject(&result)) return maybe_result;
- }
- set_cache(result);
+ Isolate* isolate = code_cache->GetIsolate();
+ if (code_cache->cache()->IsUndefined()) {
+ Handle<PolymorphicCodeCacheHashTable> result =
+ PolymorphicCodeCacheHashTable::New(
+ isolate,
+ PolymorphicCodeCacheHashTable::kInitialSize);
+ code_cache->set_cache(*result);
} else {
// This entry shouldn't be contained in the cache yet.
- ASSERT(PolymorphicCodeCacheHashTable::cast(cache())
+ ASSERT(PolymorphicCodeCacheHashTable::cast(code_cache->cache())
->Lookup(maps, flags)->IsUndefined());
}
- PolymorphicCodeCacheHashTable* hash_table =
- PolymorphicCodeCacheHashTable::cast(cache());
- Object* new_cache;
- { MaybeObject* maybe_new_cache = hash_table->Put(maps, flags, code);
- if (!maybe_new_cache->ToObject(&new_cache)) return maybe_new_cache;
- }
- set_cache(new_cache);
- return this;
+ Handle<PolymorphicCodeCacheHashTable> hash_table =
+ handle(PolymorphicCodeCacheHashTable::cast(code_cache->cache()));
+ Handle<PolymorphicCodeCacheHashTable> new_cache =
+ PolymorphicCodeCacheHashTable::Put(hash_table, maps, flags, code);
+ code_cache->set_cache(*new_cache);
}
@@ -7730,7 +8007,7 @@ class PolymorphicCodeCacheHashTableKey : public HashTableKey {
: maps_(maps),
code_flags_(code_flags) {}
- bool IsMatch(Object* other) {
+ bool IsMatch(Object* other) V8_OVERRIDE {
MapHandleList other_maps(kDefaultListAllocationSize);
int other_flags;
FromObject(other, &other_flags, &other_maps);
@@ -7765,27 +8042,23 @@ class PolymorphicCodeCacheHashTableKey : public HashTableKey {
return hash;
}
- uint32_t Hash() {
+ uint32_t Hash() V8_OVERRIDE {
return MapsHashHelper(maps_, code_flags_);
}
- uint32_t HashForObject(Object* obj) {
+ uint32_t HashForObject(Object* obj) V8_OVERRIDE {
MapHandleList other_maps(kDefaultListAllocationSize);
int other_flags;
FromObject(obj, &other_flags, &other_maps);
return MapsHashHelper(&other_maps, other_flags);
}
- MUST_USE_RESULT MaybeObject* AsObject(Heap* heap) {
- Object* obj;
+ MUST_USE_RESULT Handle<Object> AsHandle(Isolate* isolate) V8_OVERRIDE {
// The maps in |maps_| must be copied to a newly allocated FixedArray,
// both because the referenced MapList is short-lived, and because C++
// objects can't be stored in the heap anyway.
- { MaybeObject* maybe_obj =
- heap->AllocateUninitializedFixedArray(maps_->length() + 1);
- if (!maybe_obj->ToObject(&obj)) return maybe_obj;
- }
- FixedArray* list = FixedArray::cast(obj);
+ Handle<FixedArray> list =
+ isolate->factory()->NewUninitializedFixedArray(maps_->length() + 1);
list->set(0, Smi::FromInt(code_flags_));
for (int i = 0; i < maps_->length(); ++i) {
list->set(i + 1, *maps_->at(i));
@@ -7813,43 +8086,56 @@ class PolymorphicCodeCacheHashTableKey : public HashTableKey {
Object* PolymorphicCodeCacheHashTable::Lookup(MapHandleList* maps,
- int code_flags) {
- PolymorphicCodeCacheHashTableKey key(maps, code_flags);
+ int code_kind) {
+ DisallowHeapAllocation no_alloc;
+ PolymorphicCodeCacheHashTableKey key(maps, code_kind);
int entry = FindEntry(&key);
if (entry == kNotFound) return GetHeap()->undefined_value();
return get(EntryToIndex(entry) + 1);
}
-MaybeObject* PolymorphicCodeCacheHashTable::Put(MapHandleList* maps,
- int code_flags,
- Code* code) {
- PolymorphicCodeCacheHashTableKey key(maps, code_flags);
- Object* obj;
- { MaybeObject* maybe_obj = EnsureCapacity(1, &key);
- if (!maybe_obj->ToObject(&obj)) return maybe_obj;
- }
- PolymorphicCodeCacheHashTable* cache =
- reinterpret_cast<PolymorphicCodeCacheHashTable*>(obj);
+Handle<PolymorphicCodeCacheHashTable> PolymorphicCodeCacheHashTable::Put(
+ Handle<PolymorphicCodeCacheHashTable> hash_table,
+ MapHandleList* maps,
+ int code_kind,
+ Handle<Code> code) {
+ PolymorphicCodeCacheHashTableKey key(maps, code_kind);
+ Handle<PolymorphicCodeCacheHashTable> cache =
+ EnsureCapacity(hash_table, 1, &key);
int entry = cache->FindInsertionEntry(key.Hash());
- { MaybeObject* maybe_obj = key.AsObject(GetHeap());
- if (!maybe_obj->ToObject(&obj)) return maybe_obj;
- }
- cache->set(EntryToIndex(entry), obj);
- cache->set(EntryToIndex(entry) + 1, code);
+
+ Handle<Object> obj = key.AsHandle(hash_table->GetIsolate());
+ cache->set(EntryToIndex(entry), *obj);
+ cache->set(EntryToIndex(entry) + 1, *code);
cache->ElementAdded();
return cache;
}
-MaybeObject* FixedArray::AddKeysFromJSArray(JSArray* array) {
+void FixedArray::Shrink(int new_length) {
+ ASSERT(0 <= new_length && new_length <= length());
+ if (new_length < length()) {
+ RightTrimFixedArray<Heap::FROM_MUTATOR>(
+ GetHeap(), this, length() - new_length);
+ }
+}
+
+
+MaybeHandle<FixedArray> FixedArray::AddKeysFromArrayLike(
+ Handle<FixedArray> content,
+ Handle<JSObject> array) {
+ ASSERT(array->IsJSArray() || array->HasSloppyArgumentsElements());
ElementsAccessor* accessor = array->GetElementsAccessor();
- MaybeObject* maybe_result =
- accessor->AddElementsToFixedArray(array, array, this);
- FixedArray* result;
- if (!maybe_result->To<FixedArray>(&result)) return maybe_result;
+ Handle<FixedArray> result;
+ ASSIGN_RETURN_ON_EXCEPTION(
+ array->GetIsolate(), result,
+ accessor->AddElementsToFixedArray(array, array, content),
+ FixedArray);
+
#ifdef ENABLE_SLOW_ASSERTS
if (FLAG_enable_slow_asserts) {
+ DisallowHeapAllocation no_allocation;
for (int i = 0; i < result->length(); i++) {
Object* current = result->get(i);
ASSERT(current->IsNumber() || current->IsName());
@@ -7860,14 +8146,22 @@ MaybeObject* FixedArray::AddKeysFromJSArray(JSArray* array) {
}
-MaybeObject* FixedArray::UnionOfKeys(FixedArray* other) {
- ElementsAccessor* accessor = ElementsAccessor::ForArray(other);
- MaybeObject* maybe_result =
- accessor->AddElementsToFixedArray(NULL, NULL, this, other);
- FixedArray* result;
- if (!maybe_result->To(&result)) return maybe_result;
+MaybeHandle<FixedArray> FixedArray::UnionOfKeys(Handle<FixedArray> first,
+ Handle<FixedArray> second) {
+ ElementsAccessor* accessor = ElementsAccessor::ForArray(second);
+ Handle<FixedArray> result;
+ ASSIGN_RETURN_ON_EXCEPTION(
+ first->GetIsolate(), result,
+ accessor->AddElementsToFixedArray(
+ Handle<Object>::null(), // receiver
+ Handle<JSObject>::null(), // holder
+ first,
+ Handle<FixedArrayBase>::cast(second)),
+ FixedArray);
+
#ifdef ENABLE_SLOW_ASSERTS
if (FLAG_enable_slow_asserts) {
+ DisallowHeapAllocation no_allocation;
for (int i = 0; i < result->length(); i++) {
Object* current = result->get(i);
ASSERT(current->IsNumber() || current->IsName());
@@ -7878,24 +8172,22 @@ MaybeObject* FixedArray::UnionOfKeys(FixedArray* other) {
}
-MaybeObject* FixedArray::CopySize(int new_length, PretenureFlag pretenure) {
- Heap* heap = GetHeap();
- if (new_length == 0) return heap->empty_fixed_array();
- Object* obj;
- { MaybeObject* maybe_obj = heap->AllocateFixedArray(new_length, pretenure);
- if (!maybe_obj->ToObject(&obj)) return maybe_obj;
- }
- FixedArray* result = FixedArray::cast(obj);
+Handle<FixedArray> FixedArray::CopySize(
+ Handle<FixedArray> array, int new_length, PretenureFlag pretenure) {
+ Isolate* isolate = array->GetIsolate();
+ if (new_length == 0) return isolate->factory()->empty_fixed_array();
+ Handle<FixedArray> result =
+ isolate->factory()->NewFixedArray(new_length, pretenure);
// Copy the content
DisallowHeapAllocation no_gc;
- int len = length();
+ int len = array->length();
if (new_length < len) len = new_length;
// We are taking the map from the old fixed array so the map is sure to
// be an immortal immutable object.
- result->set_map_no_write_barrier(map());
+ result->set_map_no_write_barrier(array->map());
WriteBarrierMode mode = result->GetWriteBarrierMode(no_gc);
for (int i = 0; i < len; i++) {
- result->set(i, get(i), mode);
+ result->set(i, array->get(i), mode);
}
return result;
}
@@ -7921,21 +8213,20 @@ bool FixedArray::IsEqualTo(FixedArray* other) {
#endif
-MaybeObject* DescriptorArray::Allocate(Isolate* isolate,
- int number_of_descriptors,
- int slack) {
- Heap* heap = isolate->heap();
+Handle<DescriptorArray> DescriptorArray::Allocate(Isolate* isolate,
+ int number_of_descriptors,
+ int slack) {
+ ASSERT(0 <= number_of_descriptors);
+ Factory* factory = isolate->factory();
// Do not use DescriptorArray::cast on incomplete object.
int size = number_of_descriptors + slack;
- if (size == 0) return heap->empty_descriptor_array();
- FixedArray* result;
+ if (size == 0) return factory->empty_descriptor_array();
// Allocate the array of keys.
- MaybeObject* maybe_array = heap->AllocateFixedArray(LengthFor(size));
- if (!maybe_array->To(&result)) return maybe_array;
+ Handle<FixedArray> result = factory->NewFixedArray(LengthFor(size));
result->set(kDescriptorLengthIndex, Smi::FromInt(number_of_descriptors));
result->set(kEnumCacheIndex, Smi::FromInt(0));
- return result;
+ return Handle<DescriptorArray>::cast(result);
}
@@ -7944,6 +8235,12 @@ void DescriptorArray::ClearEnumCache() {
}
+void DescriptorArray::Replace(int index, Descriptor* descriptor) {
+ descriptor->SetSortedKeyIndex(GetSortedKeyIndex(index));
+ Set(index, descriptor);
+}
+
+
void DescriptorArray::SetEnumCache(FixedArray* bridge_storage,
FixedArray* new_cache,
Object* new_index_cache) {
@@ -7959,138 +8256,15 @@ void DescriptorArray::SetEnumCache(FixedArray* bridge_storage,
}
-void DescriptorArray::CopyFrom(int dst_index,
+void DescriptorArray::CopyFrom(int index,
DescriptorArray* src,
- int src_index,
const WhitenessWitness& witness) {
- Object* value = src->GetValue(src_index);
- PropertyDetails details = src->GetDetails(src_index);
- Descriptor desc(src->GetKey(src_index), value, details);
- Set(dst_index, &desc, witness);
-}
-
-
-Handle<DescriptorArray> DescriptorArray::Merge(Handle<DescriptorArray> desc,
- int verbatim,
- int valid,
- int new_size,
- int modify_index,
- StoreMode store_mode,
- Handle<DescriptorArray> other) {
- CALL_HEAP_FUNCTION(desc->GetIsolate(),
- desc->Merge(verbatim, valid, new_size, modify_index,
- store_mode, *other),
- DescriptorArray);
-}
-
-
-// Generalize the |other| descriptor array by merging it into the (at least
-// partly) updated |this| descriptor array.
-// The method merges two descriptor array in three parts. Both descriptor arrays
-// are identical up to |verbatim|. They also overlap in keys up to |valid|.
-// Between |verbatim| and |valid|, the resulting descriptor type as well as the
-// representation are generalized from both |this| and |other|. Beyond |valid|,
-// the descriptors are copied verbatim from |other| up to |new_size|.
-// In case of incompatible types, the type and representation of |other| is
-// used.
-MaybeObject* DescriptorArray::Merge(int verbatim,
- int valid,
- int new_size,
- int modify_index,
- StoreMode store_mode,
- DescriptorArray* other) {
- ASSERT(verbatim <= valid);
- ASSERT(valid <= new_size);
-
- DescriptorArray* result;
- // Allocate a new descriptor array large enough to hold the required
- // descriptors, with minimally the exact same size as this descriptor array.
- MaybeObject* maybe_descriptors = DescriptorArray::Allocate(
- GetIsolate(), new_size,
- Max(new_size, other->number_of_descriptors()) - new_size);
- if (!maybe_descriptors->To(&result)) return maybe_descriptors;
- ASSERT(result->length() > length() ||
- result->NumberOfSlackDescriptors() > 0 ||
- result->number_of_descriptors() == other->number_of_descriptors());
- ASSERT(result->number_of_descriptors() == new_size);
-
- DescriptorArray::WhitenessWitness witness(result);
-
- int descriptor;
-
- // 0 -> |verbatim|
- int current_offset = 0;
- for (descriptor = 0; descriptor < verbatim; descriptor++) {
- if (GetDetails(descriptor).type() == FIELD) current_offset++;
- result->CopyFrom(descriptor, other, descriptor, witness);
- }
-
- // |verbatim| -> |valid|
- for (; descriptor < valid; descriptor++) {
- Name* key = GetKey(descriptor);
- PropertyDetails details = GetDetails(descriptor);
- PropertyDetails other_details = other->GetDetails(descriptor);
-
- if (details.type() == FIELD || other_details.type() == FIELD ||
- (store_mode == FORCE_FIELD && descriptor == modify_index) ||
- (details.type() == CONSTANT &&
- other_details.type() == CONSTANT &&
- GetValue(descriptor) != other->GetValue(descriptor))) {
- Representation representation =
- details.representation().generalize(other_details.representation());
- FieldDescriptor d(key,
- current_offset++,
- other_details.attributes(),
- representation);
- result->Set(descriptor, &d, witness);
- } else {
- result->CopyFrom(descriptor, other, descriptor, witness);
- }
- }
-
- // |valid| -> |new_size|
- for (; descriptor < new_size; descriptor++) {
- PropertyDetails details = other->GetDetails(descriptor);
- if (details.type() == FIELD ||
- (store_mode == FORCE_FIELD && descriptor == modify_index)) {
- Name* key = other->GetKey(descriptor);
- FieldDescriptor d(key,
- current_offset++,
- details.attributes(),
- details.representation());
- result->Set(descriptor, &d, witness);
- } else {
- result->CopyFrom(descriptor, other, descriptor, witness);
- }
- }
-
- result->Sort();
- return result;
-}
-
-
-// Checks whether a merge of |other| into |this| would return a copy of |this|.
-bool DescriptorArray::IsMoreGeneralThan(int verbatim,
- int valid,
- int new_size,
- DescriptorArray* other) {
- ASSERT(verbatim <= valid);
- ASSERT(valid <= new_size);
- if (valid != new_size) return false;
-
- for (int descriptor = verbatim; descriptor < valid; descriptor++) {
- PropertyDetails details = GetDetails(descriptor);
- PropertyDetails other_details = other->GetDetails(descriptor);
- if (!other_details.representation().fits_into(details.representation())) {
- return false;
- }
- if (details.type() == CONSTANT) {
- if (other_details.type() != CONSTANT) return false;
- if (GetValue(descriptor) != other->GetValue(descriptor)) return false;
- }
- }
-
- return true;
+ Object* value = src->GetValue(index);
+ PropertyDetails details = src->GetDetails(index);
+ Descriptor desc(handle(src->GetKey(index)),
+ handle(value, src->GetIsolate()),
+ details);
+ Set(index, &desc, witness);
}
@@ -8167,21 +8341,29 @@ Object* AccessorPair::GetComponent(AccessorComponent component) {
}
-MaybeObject* DeoptimizationInputData::Allocate(Isolate* isolate,
- int deopt_entry_count,
- PretenureFlag pretenure) {
+Handle<DeoptimizationInputData> DeoptimizationInputData::New(
+ Isolate* isolate,
+ int deopt_entry_count,
+ PretenureFlag pretenure) {
ASSERT(deopt_entry_count > 0);
- return isolate->heap()->AllocateFixedArray(LengthFor(deopt_entry_count),
- pretenure);
+ return Handle<DeoptimizationInputData>::cast(
+ isolate->factory()->NewFixedArray(
+ LengthFor(deopt_entry_count), pretenure));
}
-MaybeObject* DeoptimizationOutputData::Allocate(Isolate* isolate,
- int number_of_deopt_points,
- PretenureFlag pretenure) {
- if (number_of_deopt_points == 0) return isolate->heap()->empty_fixed_array();
- return isolate->heap()->AllocateFixedArray(
- LengthOfFixedArray(number_of_deopt_points), pretenure);
+Handle<DeoptimizationOutputData> DeoptimizationOutputData::New(
+ Isolate* isolate,
+ int number_of_deopt_points,
+ PretenureFlag pretenure) {
+ Handle<FixedArray> result;
+ if (number_of_deopt_points == 0) {
+ result = isolate->factory()->empty_fixed_array();
+ } else {
+ result = isolate->factory()->NewFixedArray(
+ LengthOfFixedArray(number_of_deopt_points), pretenure);
+ }
+ return Handle<DeoptimizationOutputData>::cast(result);
}
@@ -8198,32 +8380,6 @@ bool DescriptorArray::IsEqualTo(DescriptorArray* other) {
#endif
-static bool IsIdentifier(UnicodeCache* cache, Name* name) {
- // Checks whether the buffer contains an identifier (no escape).
- if (!name->IsString()) return false;
- String* string = String::cast(name);
- if (string->length() == 0) return false;
- ConsStringIteratorOp op;
- StringCharacterStream stream(string, &op);
- if (!cache->IsIdentifierStart(stream.GetNext())) {
- return false;
- }
- while (stream.HasMore()) {
- if (!cache->IsIdentifierPart(stream.GetNext())) {
- return false;
- }
- }
- return true;
-}
-
-
-bool Name::IsCacheable(Isolate* isolate) {
- return IsSymbol() ||
- IsIdentifier(isolate->unicode_cache(), this) ||
- this == isolate->heap()->hidden_string();
-}
-
-
bool String::LooksValid() {
if (!GetIsolate()->heap()->Contains(this)) return false;
return true;
@@ -8259,7 +8415,7 @@ String::FlatContent String::GetFlatContent() {
} else {
start = ExternalAsciiString::cast(string)->GetChars();
}
- return FlatContent(Vector<const uint8_t>(start + offset, length));
+ return FlatContent(start + offset, length);
} else {
ASSERT(shape.encoding_tag() == kTwoByteStringTag);
const uc16* start;
@@ -8268,7 +8424,7 @@ String::FlatContent String::GetFlatContent() {
} else {
start = ExternalTwoByteString::cast(string)->GetChars();
}
- return FlatContent(Vector<const uc16>(start + offset, length));
+ return FlatContent(start + offset, length);
}
}
@@ -8395,7 +8551,7 @@ int Relocatable::ArchiveSpacePerThread() {
}
-// Archive statics that are thread local.
+// Archive statics that are thread-local.
char* Relocatable::ArchiveState(Isolate* isolate, char* to) {
*reinterpret_cast<Relocatable**>(to) = isolate->relocatable_top();
isolate->set_relocatable_top(NULL);
@@ -8403,7 +8559,7 @@ char* Relocatable::ArchiveState(Isolate* isolate, char* to) {
}
-// Restore statics that are thread local.
+// Restore statics that are thread-local.
char* Relocatable::RestoreState(Isolate* isolate, char* from) {
isolate->set_relocatable_top(*reinterpret_cast<Relocatable**>(from));
return from + ArchiveSpacePerThread();
@@ -8464,34 +8620,47 @@ void FlatStringReader::PostGarbageCollection() {
}
-String* ConsStringIteratorOp::Operate(String* string,
- unsigned* offset_out,
- int32_t* type_out,
- unsigned* length_out) {
- ASSERT(string->IsConsString());
- ConsString* cons_string = ConsString::cast(string);
- // Set up search data.
+void ConsStringIteratorOp::Initialize(ConsString* cons_string, int offset) {
+ ASSERT(cons_string != NULL);
root_ = cons_string;
- consumed_ = *offset_out;
- // Now search.
- return Search(offset_out, type_out, length_out);
+ consumed_ = offset;
+ // Force stack blown condition to trigger restart.
+ depth_ = 1;
+ maximum_depth_ = kStackSize + depth_;
+ ASSERT(StackBlown());
}
-String* ConsStringIteratorOp::Search(unsigned* offset_out,
- int32_t* type_out,
- unsigned* length_out) {
+String* ConsStringIteratorOp::Continue(int* offset_out) {
+ ASSERT(depth_ != 0);
+ ASSERT_EQ(0, *offset_out);
+ bool blew_stack = StackBlown();
+ String* string = NULL;
+ // Get the next leaf if there is one.
+ if (!blew_stack) string = NextLeaf(&blew_stack);
+ // Restart search from root.
+ if (blew_stack) {
+ ASSERT(string == NULL);
+ string = Search(offset_out);
+ }
+ // Ensure future calls return null immediately.
+ if (string == NULL) Reset(NULL);
+ return string;
+}
+
+
+String* ConsStringIteratorOp::Search(int* offset_out) {
ConsString* cons_string = root_;
// Reset the stack, pushing the root string.
depth_ = 1;
maximum_depth_ = 1;
frames_[0] = cons_string;
- const unsigned consumed = consumed_;
- unsigned offset = 0;
+ const int consumed = consumed_;
+ int offset = 0;
while (true) {
// Loop until the string is found which contains the target offset.
String* string = cons_string->first();
- unsigned length = string->length();
+ int length = string->length();
int32_t type;
if (consumed < offset + length) {
// Target offset is in the left branch.
@@ -8502,7 +8671,7 @@ String* ConsStringIteratorOp::Search(unsigned* offset_out,
PushLeft(cons_string);
continue;
}
- // Tell the stack we're done decending.
+ // Tell the stack we're done descending.
AdjustMaximumDepth();
} else {
// Descend right.
@@ -8514,7 +8683,6 @@ String* ConsStringIteratorOp::Search(unsigned* offset_out,
if ((type & kStringRepresentationMask) == kConsStringTag) {
cons_string = ConsString::cast(string);
PushRight(cons_string);
- // TODO(dcarney) Add back root optimization.
continue;
}
// Need this to be updated for the current string.
@@ -8522,11 +8690,11 @@ String* ConsStringIteratorOp::Search(unsigned* offset_out,
// Account for the possibility of an empty right leaf.
// This happens only if we have asked for an offset outside the string.
if (length == 0) {
- // Reset depth so future operations will return null immediately.
- Reset();
+ // Reset so future operations will return null immediately.
+ Reset(NULL);
return NULL;
}
- // Tell the stack we're done decending.
+ // Tell the stack we're done descending.
AdjustMaximumDepth();
// Pop stack so next iteration is in correct place.
Pop();
@@ -8535,8 +8703,6 @@ String* ConsStringIteratorOp::Search(unsigned* offset_out,
// Adjust return values and exit.
consumed_ = offset + length;
*offset_out = consumed - offset;
- *type_out = type;
- *length_out = length;
return string;
}
UNREACHABLE();
@@ -8544,9 +8710,7 @@ String* ConsStringIteratorOp::Search(unsigned* offset_out,
}
-String* ConsStringIteratorOp::NextLeaf(bool* blew_stack,
- int32_t* type_out,
- unsigned* length_out) {
+String* ConsStringIteratorOp::NextLeaf(bool* blew_stack) {
while (true) {
// Tree traversal complete.
if (depth_ == 0) {
@@ -8554,7 +8718,7 @@ String* ConsStringIteratorOp::NextLeaf(bool* blew_stack,
return NULL;
}
// We've lost track of higher nodes.
- if (maximum_depth_ - depth_ == kStackSize) {
+ if (StackBlown()) {
*blew_stack = true;
return NULL;
}
@@ -8565,16 +8729,13 @@ String* ConsStringIteratorOp::NextLeaf(bool* blew_stack,
if ((type & kStringRepresentationMask) != kConsStringTag) {
// Pop stack so next iteration is in correct place.
Pop();
- unsigned length = static_cast<unsigned>(string->length());
+ int length = string->length();
// Could be a flattened ConsString.
if (length == 0) continue;
- *length_out = length;
- *type_out = type;
consumed_ += length;
return string;
}
cons_string = ConsString::cast(string);
- // TODO(dcarney) Add back root optimization.
PushRight(cons_string);
// Need to traverse all the way left.
while (true) {
@@ -8583,10 +8744,8 @@ String* ConsStringIteratorOp::NextLeaf(bool* blew_stack,
type = string->map()->instance_type();
if ((type & kStringRepresentationMask) != kConsStringTag) {
AdjustMaximumDepth();
- unsigned length = static_cast<unsigned>(string->length());
+ int length = string->length();
ASSERT(length != 0);
- *length_out = length;
- *type_out = type;
consumed_ += length;
return string;
}
@@ -8725,6 +8884,64 @@ void String::WriteToFlat(String* src,
}
+
+template <typename SourceChar>
+static void CalculateLineEndsImpl(Isolate* isolate,
+ List<int>* line_ends,
+ Vector<const SourceChar> src,
+ bool include_ending_line) {
+ const int src_len = src.length();
+ StringSearch<uint8_t, SourceChar> search(isolate, STATIC_ASCII_VECTOR("\n"));
+
+ // Find and record line ends.
+ int position = 0;
+ while (position != -1 && position < src_len) {
+ position = search.Search(src, position);
+ if (position != -1) {
+ line_ends->Add(position);
+ position++;
+ } else if (include_ending_line) {
+ // Even if the last line misses a line end, it is counted.
+ line_ends->Add(src_len);
+ return;
+ }
+ }
+}
+
+
+Handle<FixedArray> String::CalculateLineEnds(Handle<String> src,
+ bool include_ending_line) {
+ src = Flatten(src);
+ // Rough estimate of line count based on a roughly estimated average
+ // length of (unpacked) code.
+ int line_count_estimate = src->length() >> 4;
+ List<int> line_ends(line_count_estimate);
+ Isolate* isolate = src->GetIsolate();
+ { DisallowHeapAllocation no_allocation; // ensure vectors stay valid.
+ // Dispatch on type of strings.
+ String::FlatContent content = src->GetFlatContent();
+ ASSERT(content.IsFlat());
+ if (content.IsAscii()) {
+ CalculateLineEndsImpl(isolate,
+ &line_ends,
+ content.ToOneByteVector(),
+ include_ending_line);
+ } else {
+ CalculateLineEndsImpl(isolate,
+ &line_ends,
+ content.ToUC16Vector(),
+ include_ending_line);
+ }
+ }
+ int line_count = line_ends.length();
+ Handle<FixedArray> array = isolate->factory()->NewFixedArray(line_count);
+ for (int i = 0; i < line_count; i++) {
+ array->set(i, Smi::FromInt(line_ends[i]));
+ }
+ return array;
+}
+
+
// Compares the contents of two strings by reading and comparing
// int-sized blocks of characters.
template <typename Char>
@@ -8803,25 +9020,29 @@ class StringComparator {
explicit inline State(ConsStringIteratorOp* op)
: op_(op), is_one_byte_(true), length_(0), buffer8_(NULL) {}
- inline void Init(String* string, unsigned len) {
- op_->Reset();
- int32_t type = string->map()->instance_type();
- String::Visit(string, 0, *this, *op_, type, len);
+ inline void Init(String* string) {
+ ConsString* cons_string = String::VisitFlat(this, string);
+ op_->Reset(cons_string);
+ if (cons_string != NULL) {
+ int offset;
+ string = op_->Next(&offset);
+ String::VisitFlat(this, string, offset);
+ }
}
- inline void VisitOneByteString(const uint8_t* chars, unsigned length) {
+ inline void VisitOneByteString(const uint8_t* chars, int length) {
is_one_byte_ = true;
buffer8_ = chars;
length_ = length;
}
- inline void VisitTwoByteString(const uint16_t* chars, unsigned length) {
+ inline void VisitTwoByteString(const uint16_t* chars, int length) {
is_one_byte_ = false;
buffer16_ = chars;
length_ = length;
}
- void Advance(unsigned consumed) {
+ void Advance(int consumed) {
ASSERT(consumed <= length_);
// Still in buffer.
if (length_ != consumed) {
@@ -8834,18 +9055,16 @@ class StringComparator {
return;
}
// Advance state.
- ASSERT(op_->HasMore());
- int32_t type = 0;
- unsigned length = 0;
- String* next = op_->ContinueOperation(&type, &length);
+ int offset;
+ String* next = op_->Next(&offset);
+ ASSERT_EQ(0, offset);
ASSERT(next != NULL);
- ConsStringNullOp null_op;
- String::Visit(next, 0, *this, null_op, type, length);
+ String::VisitFlat(this, next);
}
ConsStringIteratorOp* const op_;
bool is_one_byte_;
- unsigned length_;
+ int length_;
union {
const uint8_t* buffer8_;
const uint16_t* buffer16_;
@@ -8863,18 +9082,18 @@ class StringComparator {
}
template<typename Chars1, typename Chars2>
- static inline bool Equals(State* state_1, State* state_2, unsigned to_check) {
+ static inline bool Equals(State* state_1, State* state_2, int to_check) {
const Chars1* a = reinterpret_cast<const Chars1*>(state_1->buffer8_);
const Chars2* b = reinterpret_cast<const Chars2*>(state_2->buffer8_);
return RawStringComparator<Chars1, Chars2>::compare(a, b, to_check);
}
- bool Equals(unsigned length, String* string_1, String* string_2) {
- ASSERT(length != 0);
- state_1_.Init(string_1, length);
- state_2_.Init(string_2, length);
+ bool Equals(String* string_1, String* string_2) {
+ int length = string_1->length();
+ state_1_.Init(string_1);
+ state_2_.Init(string_2);
while (true) {
- unsigned to_check = Min(state_1_.length_, state_2_.length_);
+ int to_check = Min(state_1_.length_, state_2_.length_);
ASSERT(to_check > 0 && to_check <= length);
bool is_equal;
if (state_1_.is_one_byte_) {
@@ -8908,6 +9127,7 @@ class StringComparator {
bool String::SlowEquals(String* other) {
+ DisallowHeapAllocation no_gc;
// Fast check: negative check with lengths.
int len = length();
if (len != other->length()) return false;
@@ -8937,14 +9157,9 @@ bool String::SlowEquals(String* other) {
// before we try to flatten the strings.
if (this->Get(0) != other->Get(0)) return false;
- String* lhs = this->TryFlattenGetString();
- String* rhs = other->TryFlattenGetString();
-
- // TODO(dcarney): Compare all types of flat strings with a Visitor.
- if (StringShape(lhs).IsSequentialAscii() &&
- StringShape(rhs).IsSequentialAscii()) {
- const uint8_t* str1 = SeqOneByteString::cast(lhs)->GetChars();
- const uint8_t* str2 = SeqOneByteString::cast(rhs)->GetChars();
+ if (IsSeqOneByteString() && other->IsSeqOneByteString()) {
+ const uint8_t* str1 = SeqOneByteString::cast(this)->GetChars();
+ const uint8_t* str2 = SeqOneByteString::cast(other)->GetChars();
return CompareRawStringContents(str1, str2, len);
}
@@ -8952,7 +9167,57 @@ bool String::SlowEquals(String* other) {
StringComparator comparator(isolate->objects_string_compare_iterator_a(),
isolate->objects_string_compare_iterator_b());
- return comparator.Equals(static_cast<unsigned>(len), lhs, rhs);
+ return comparator.Equals(this, other);
+}
+
+
+bool String::SlowEquals(Handle<String> one, Handle<String> two) {
+ // Fast check: negative check with lengths.
+ int one_length = one->length();
+ if (one_length != two->length()) return false;
+ if (one_length == 0) return true;
+
+ // Fast check: if hash code is computed for both strings
+ // a fast negative check can be performed.
+ if (one->HasHashCode() && two->HasHashCode()) {
+#ifdef ENABLE_SLOW_ASSERTS
+ if (FLAG_enable_slow_asserts) {
+ if (one->Hash() != two->Hash()) {
+ bool found_difference = false;
+ for (int i = 0; i < one_length; i++) {
+ if (one->Get(i) != two->Get(i)) {
+ found_difference = true;
+ break;
+ }
+ }
+ ASSERT(found_difference);
+ }
+ }
+#endif
+ if (one->Hash() != two->Hash()) return false;
+ }
+
+ // We know the strings are both non-empty. Compare the first chars
+ // before we try to flatten the strings.
+ if (one->Get(0) != two->Get(0)) return false;
+
+ one = String::Flatten(one);
+ two = String::Flatten(two);
+
+ DisallowHeapAllocation no_gc;
+ String::FlatContent flat1 = one->GetFlatContent();
+ String::FlatContent flat2 = two->GetFlatContent();
+
+ if (flat1.IsAscii() && flat2.IsAscii()) {
+ return CompareRawStringContents(flat1.ToOneByteVector().start(),
+ flat2.ToOneByteVector().start(),
+ one_length);
+ } else {
+ for (int i = 0; i < one_length; i++) {
+ if (flat1.Get(i) != flat2.Get(i)) return false;
+ }
+ return true;
+ }
}
@@ -9037,49 +9302,31 @@ bool String::IsTwoByteEqualTo(Vector<const uc16> str) {
class IteratingStringHasher: public StringHasher {
public:
static inline uint32_t Hash(String* string, uint32_t seed) {
- const unsigned len = static_cast<unsigned>(string->length());
- IteratingStringHasher hasher(len, seed);
- if (hasher.has_trivial_hash()) {
- return hasher.GetHashField();
- }
- int32_t type = string->map()->instance_type();
- ConsStringNullOp null_op;
- String::Visit(string, 0, hasher, null_op, type, len);
- // Flat strings terminate immediately.
- if (hasher.consumed_ == len) {
- ASSERT(!string->IsConsString());
- return hasher.GetHashField();
- }
- ASSERT(string->IsConsString());
+ IteratingStringHasher hasher(string->length(), seed);
+ // Nothing to do.
+ if (hasher.has_trivial_hash()) return hasher.GetHashField();
+ ConsString* cons_string = String::VisitFlat(&hasher, string);
+ // The string was flat.
+ if (cons_string == NULL) return hasher.GetHashField();
// This is a ConsString, iterate across it.
- ConsStringIteratorOp op;
- unsigned offset = 0;
- unsigned leaf_length = len;
- string = op.Operate(string, &offset, &type, &leaf_length);
- while (true) {
- ASSERT(hasher.consumed_ < len);
- String::Visit(string, 0, hasher, null_op, type, leaf_length);
- if (hasher.consumed_ == len) break;
- string = op.ContinueOperation(&type, &leaf_length);
- // This should be taken care of by the length check.
- ASSERT(string != NULL);
+ ConsStringIteratorOp op(cons_string);
+ int offset;
+ while (NULL != (string = op.Next(&offset))) {
+ String::VisitFlat(&hasher, string, offset);
}
return hasher.GetHashField();
}
- inline void VisitOneByteString(const uint8_t* chars, unsigned length) {
- AddCharacters(chars, static_cast<int>(length));
- consumed_ += length;
+ inline void VisitOneByteString(const uint8_t* chars, int length) {
+ AddCharacters(chars, length);
}
- inline void VisitTwoByteString(const uint16_t* chars, unsigned length) {
- AddCharacters(chars, static_cast<int>(length));
- consumed_ += length;
+ inline void VisitTwoByteString(const uint16_t* chars, int length) {
+ AddCharacters(chars, length);
}
private:
inline IteratingStringHasher(int len, uint32_t seed)
- : StringHasher(len, seed),
- consumed_(0) {}
- unsigned consumed_;
+ : StringHasher(len, seed) {
+ }
DISALLOW_COPY_AND_ASSIGN(IteratingStringHasher);
};
@@ -9137,7 +9384,7 @@ bool String::SlowAsArrayIndex(uint32_t* index) {
uint32_t field = hash_field();
if ((field & kIsNotArrayIndexMask) != 0) return false;
// Isolate the array index form the full hash field.
- *index = (kArrayIndexHashMask & field) >> kHashShift;
+ *index = ArrayIndexValueBits::decode(field);
return true;
} else {
return ComputeArrayIndex(index);
@@ -9160,7 +9407,6 @@ Handle<String> SeqString::Truncate(Handle<SeqString> string, int new_length) {
}
int delta = old_size - new_size;
- string->set_length(new_length);
Address start_of_string = string->address();
ASSERT_OBJECT_ALIGNED(start_of_string);
@@ -9177,50 +9423,17 @@ Handle<String> SeqString::Truncate(Handle<SeqString> string, int new_length) {
// that are a multiple of pointer size.
heap->CreateFillerObjectAt(start_of_string + new_size, delta);
}
- if (Marking::IsBlack(Marking::MarkBitFrom(start_of_string))) {
- MemoryChunk::IncrementLiveBytesFromMutator(start_of_string, -delta);
- }
+ heap->AdjustLiveBytes(start_of_string, -delta, Heap::FROM_MUTATOR);
+ // We are storing the new length using release store after creating a filler
+ // for the left-over space to avoid races with the sweeper thread.
+ string->synchronized_set_length(new_length);
if (new_length == 0) return heap->isolate()->factory()->empty_string();
return string;
}
-AllocationMemento* AllocationMemento::FindForJSObject(JSObject* object,
- bool in_GC) {
- // Currently, AllocationMemento objects are only allocated immediately
- // after JSArrays and some JSObjects in NewSpace. Detecting whether a
- // memento is present involves carefully checking the object immediately
- // after the current object (if there is one) to see if it's an
- // AllocationMemento.
- if (FLAG_track_allocation_sites && object->GetHeap()->InNewSpace(object)) {
- Address ptr_end = (reinterpret_cast<Address>(object) - kHeapObjectTag) +
- object->Size();
- Address top;
- if (in_GC) {
- top = object->GetHeap()->new_space()->FromSpacePageHigh();
- } else {
- top = object->GetHeap()->NewSpaceTop();
- }
- if ((ptr_end + AllocationMemento::kSize) <= top) {
- // There is room in newspace for allocation info. Do we have some?
- Map** possible_allocation_memento_map =
- reinterpret_cast<Map**>(ptr_end);
- if (*possible_allocation_memento_map ==
- object->GetHeap()->allocation_memento_map()) {
- AllocationMemento* memento = AllocationMemento::cast(
- reinterpret_cast<Object*>(ptr_end + kHeapObjectTag));
- if (memento->IsValid()) {
- return memento;
- }
- }
- }
- }
- return NULL;
-}
-
-
uint32_t StringHasher::MakeArrayIndexHash(uint32_t value, int length) {
// For array indexes mix the length into the hash as an array index could
// be zero.
@@ -9229,8 +9442,8 @@ uint32_t StringHasher::MakeArrayIndexHash(uint32_t value, int length) {
ASSERT(TenToThe(String::kMaxCachedArrayIndexLength) <
(1 << String::kArrayIndexValueBits));
- value <<= String::kHashShift;
- value |= length << String::kArrayIndexHashLengthShift;
+ value <<= String::ArrayIndexValueBits::kShift;
+ value |= length << String::ArrayIndexLengthBits::kShift;
ASSERT((value & String::kIsNotArrayIndexMask) == 0);
ASSERT((length > String::kMaxCachedArrayIndexLength) ||
@@ -9301,14 +9514,6 @@ uint32_t StringHasher::ComputeUtf8Hash(Vector<const char> chars,
}
-MaybeObject* String::SubString(int start, int end, PretenureFlag pretenure) {
- Heap* heap = GetHeap();
- if (start == 0 && end == length()) return this;
- MaybeObject* result = heap->AllocateSubString(this, start, end, pretenure);
- return result;
-}
-
-
void String::PrintOn(FILE* file) {
int length = this->length();
for (int i = 0; i < length; i++) {
@@ -9328,11 +9533,12 @@ static void TrimEnumCache(Heap* heap, Map* map, DescriptorArray* descriptors) {
int to_trim = enum_cache->length() - live_enum;
if (to_trim <= 0) return;
- RightTrimFixedArray<FROM_GC>(heap, descriptors->GetEnumCache(), to_trim);
+ RightTrimFixedArray<Heap::FROM_GC>(
+ heap, descriptors->GetEnumCache(), to_trim);
if (!descriptors->HasEnumIndicesCache()) return;
FixedArray* enum_indices_cache = descriptors->GetEnumIndicesCache();
- RightTrimFixedArray<FROM_GC>(heap, enum_indices_cache, to_trim);
+ RightTrimFixedArray<Heap::FROM_GC>(heap, enum_indices_cache, to_trim);
}
@@ -9344,7 +9550,7 @@ static void TrimDescriptorArray(Heap* heap,
int to_trim = number_of_descriptors - number_of_own_descriptors;
if (to_trim == 0) return;
- RightTrimFixedArray<FROM_GC>(
+ RightTrimFixedArray<Heap::FROM_GC>(
heap, descriptors, to_trim * DescriptorArray::kDescriptorSize);
descriptors->SetNumberOfDescriptors(number_of_own_descriptors);
@@ -9416,11 +9622,16 @@ void Map::ClearNonLiveTransitions(Heap* heap) {
}
}
+ // Note that we never eliminate a transition array, though we might right-trim
+ // such that number_of_transitions() == 0. If this assumption changes,
+ // TransitionArray::CopyInsert() will need to deal with the case that a
+ // transition array disappeared during GC.
int trim = t->number_of_transitions() - transition_index;
if (trim > 0) {
- RightTrimFixedArray<FROM_GC>(heap, t, t->IsSimpleTransition()
+ RightTrimFixedArray<Heap::FROM_GC>(heap, t, t->IsSimpleTransition()
? trim : trim * TransitionArray::kTransitionSize);
}
+ ASSERT(HasTransitionArray());
}
@@ -9449,8 +9660,8 @@ static bool CheckEquivalent(Map* first, Map* second) {
first->instance_type() == second->instance_type() &&
first->bit_field() == second->bit_field() &&
first->bit_field2() == second->bit_field2() &&
- first->is_observed() == second->is_observed() &&
- first->function_with_prototype() == second->function_with_prototype();
+ first->is_frozen() == second->is_frozen() &&
+ first->has_instance_call_handler() == second->has_instance_call_handler();
}
@@ -9468,12 +9679,37 @@ bool Map::EquivalentToForNormalization(Map* other,
void ConstantPoolArray::ConstantPoolIterateBody(ObjectVisitor* v) {
- int first_ptr_offset = OffsetOfElementAt(first_ptr_index());
- int last_ptr_offset =
- OffsetOfElementAt(first_ptr_index() + count_of_ptr_entries());
- v->VisitPointers(
- HeapObject::RawField(this, first_ptr_offset),
- HeapObject::RawField(this, last_ptr_offset));
+ ConstantPoolArray::Iterator code_iter(this, ConstantPoolArray::CODE_PTR);
+ while (!code_iter.is_finished()) {
+ v->VisitCodeEntry(reinterpret_cast<Address>(
+ RawFieldOfElementAt(code_iter.next_index())));
+ }
+
+ ConstantPoolArray::Iterator heap_iter(this, ConstantPoolArray::HEAP_PTR);
+ while (!heap_iter.is_finished()) {
+ v->VisitPointer(RawFieldOfElementAt(heap_iter.next_index()));
+ }
+}
+
+
+void ConstantPoolArray::ClearPtrEntries(Isolate* isolate) {
+ Type type[] = { CODE_PTR, HEAP_PTR };
+ Address default_value[] = {
+ isolate->builtins()->builtin(Builtins::kIllegal)->entry(),
+ reinterpret_cast<Address>(isolate->heap()->undefined_value()) };
+
+ for (int i = 0; i < 2; ++i) {
+ for (int s = 0; s <= final_section(); ++s) {
+ LayoutSection section = static_cast<LayoutSection>(s);
+ if (number_of_entries(type[i], section) > 0) {
+ int offset = OffsetOfElementAt(first_index(type[i], section));
+ MemsetPointer(
+ reinterpret_cast<Address*>(HeapObject::RawField(this, offset)),
+ default_value[i],
+ number_of_entries(type[i], section));
+ }
+ }
+ }
}
@@ -9486,19 +9722,19 @@ void JSFunction::JSFunctionIterateBody(int object_size, ObjectVisitor* v) {
}
-void JSFunction::MarkForLazyRecompilation() {
+void JSFunction::MarkForOptimization() {
ASSERT(is_compiled() || GetIsolate()->DebuggerHasBreakPoints());
ASSERT(!IsOptimized());
ASSERT(shared()->allows_lazy_compilation() ||
code()->optimizable());
ASSERT(!shared()->is_generator());
set_code_no_write_barrier(
- GetIsolate()->builtins()->builtin(Builtins::kLazyRecompile));
+ GetIsolate()->builtins()->builtin(Builtins::kCompileOptimized));
// No write barrier required, since the builtin is part of the root set.
}
-void JSFunction::MarkForConcurrentRecompilation() {
+void JSFunction::MarkForConcurrentOptimization() {
ASSERT(is_compiled() || GetIsolate()->DebuggerHasBreakPoints());
ASSERT(!IsOptimized());
ASSERT(shared()->allows_lazy_compilation() || code()->optimizable());
@@ -9510,16 +9746,16 @@ void JSFunction::MarkForConcurrentRecompilation() {
PrintF(" for concurrent recompilation.\n");
}
set_code_no_write_barrier(
- GetIsolate()->builtins()->builtin(Builtins::kConcurrentRecompile));
+ GetIsolate()->builtins()->builtin(Builtins::kCompileOptimizedConcurrent));
// No write barrier required, since the builtin is part of the root set.
}
-void JSFunction::MarkInRecompileQueue() {
+void JSFunction::MarkInOptimizationQueue() {
// We can only arrive here via the concurrent-recompilation builtin. If
// break points were set, the code would point to the lazy-compile builtin.
ASSERT(!GetIsolate()->DebuggerHasBreakPoints());
- ASSERT(IsMarkedForConcurrentRecompilation() && !IsOptimized());
+ ASSERT(IsMarkedForConcurrentOptimization() && !IsOptimized());
ASSERT(shared()->allows_lazy_compilation() || code()->optimizable());
ASSERT(GetIsolate()->concurrent_recompilation_enabled());
if (FLAG_trace_concurrent_recompilation) {
@@ -9528,106 +9764,81 @@ void JSFunction::MarkInRecompileQueue() {
PrintF(" for concurrent recompilation.\n");
}
set_code_no_write_barrier(
- GetIsolate()->builtins()->builtin(Builtins::kInRecompileQueue));
+ GetIsolate()->builtins()->builtin(Builtins::kInOptimizationQueue));
// No write barrier required, since the builtin is part of the root set.
}
-static bool CompileLazyHelper(CompilationInfo* info,
- ClearExceptionFlag flag) {
- // Compile the source information to a code object.
- ASSERT(info->IsOptimizing() || !info->shared_info()->is_compiled());
- ASSERT(!info->isolate()->has_pending_exception());
- bool result = Compiler::CompileLazy(info);
- ASSERT(result != info->isolate()->has_pending_exception());
- if (!result && flag == CLEAR_EXCEPTION) {
- info->isolate()->clear_pending_exception();
- }
- return result;
-}
-
-
-bool SharedFunctionInfo::CompileLazy(Handle<SharedFunctionInfo> shared,
- ClearExceptionFlag flag) {
- ASSERT(shared->allows_lazy_compilation_without_context());
- CompilationInfoWithZone info(shared);
- return CompileLazyHelper(&info, flag);
-}
-
-
void SharedFunctionInfo::AddToOptimizedCodeMap(
Handle<SharedFunctionInfo> shared,
Handle<Context> native_context,
Handle<Code> code,
- Handle<FixedArray> literals) {
- CALL_HEAP_FUNCTION_VOID(
- shared->GetIsolate(),
- shared->AddToOptimizedCodeMap(*native_context, *code, *literals));
-}
-
-
-MaybeObject* SharedFunctionInfo::AddToOptimizedCodeMap(Context* native_context,
- Code* code,
- FixedArray* literals) {
+ Handle<FixedArray> literals,
+ BailoutId osr_ast_id) {
+ Isolate* isolate = shared->GetIsolate();
ASSERT(code->kind() == Code::OPTIMIZED_FUNCTION);
ASSERT(native_context->IsNativeContext());
- STATIC_ASSERT(kEntryLength == 3);
- Heap* heap = GetHeap();
- FixedArray* new_code_map;
- Object* value = optimized_code_map();
+ STATIC_ASSERT(kEntryLength == 4);
+ Handle<FixedArray> new_code_map;
+ Handle<Object> value(shared->optimized_code_map(), isolate);
+ int old_length;
if (value->IsSmi()) {
// No optimized code map.
- ASSERT_EQ(0, Smi::cast(value)->value());
+ ASSERT_EQ(0, Smi::cast(*value)->value());
// Create 3 entries per context {context, code, literals}.
- MaybeObject* maybe = heap->AllocateFixedArray(kInitialLength);
- if (!maybe->To(&new_code_map)) return maybe;
- new_code_map->set(kEntriesStart + 0, native_context);
- new_code_map->set(kEntriesStart + 1, code);
- new_code_map->set(kEntriesStart + 2, literals);
+ new_code_map = isolate->factory()->NewFixedArray(kInitialLength);
+ old_length = kEntriesStart;
} else {
// Copy old map and append one new entry.
- FixedArray* old_code_map = FixedArray::cast(value);
- ASSERT_EQ(-1, SearchOptimizedCodeMap(native_context));
- int old_length = old_code_map->length();
- int new_length = old_length + kEntryLength;
- MaybeObject* maybe = old_code_map->CopySize(new_length);
- if (!maybe->To(&new_code_map)) return maybe;
- new_code_map->set(old_length + 0, native_context);
- new_code_map->set(old_length + 1, code);
- new_code_map->set(old_length + 2, literals);
+ Handle<FixedArray> old_code_map = Handle<FixedArray>::cast(value);
+ ASSERT_EQ(-1, shared->SearchOptimizedCodeMap(*native_context, osr_ast_id));
+ old_length = old_code_map->length();
+ new_code_map = FixedArray::CopySize(
+ old_code_map, old_length + kEntryLength);
// Zap the old map for the sake of the heap verifier.
if (Heap::ShouldZapGarbage()) {
Object** data = old_code_map->data_start();
- MemsetPointer(data, heap->the_hole_value(), old_length);
+ MemsetPointer(data, isolate->heap()->the_hole_value(), old_length);
}
}
+ new_code_map->set(old_length + kContextOffset, *native_context);
+ new_code_map->set(old_length + kCachedCodeOffset, *code);
+ new_code_map->set(old_length + kLiteralsOffset, *literals);
+ new_code_map->set(old_length + kOsrAstIdOffset,
+ Smi::FromInt(osr_ast_id.ToInt()));
+
#ifdef DEBUG
for (int i = kEntriesStart; i < new_code_map->length(); i += kEntryLength) {
- ASSERT(new_code_map->get(i)->IsNativeContext());
- ASSERT(new_code_map->get(i + 1)->IsCode());
- ASSERT(Code::cast(new_code_map->get(i + 1))->kind() ==
+ ASSERT(new_code_map->get(i + kContextOffset)->IsNativeContext());
+ ASSERT(new_code_map->get(i + kCachedCodeOffset)->IsCode());
+ ASSERT(Code::cast(new_code_map->get(i + kCachedCodeOffset))->kind() ==
Code::OPTIMIZED_FUNCTION);
- ASSERT(new_code_map->get(i + 2)->IsFixedArray());
+ ASSERT(new_code_map->get(i + kLiteralsOffset)->IsFixedArray());
+ ASSERT(new_code_map->get(i + kOsrAstIdOffset)->IsSmi());
}
#endif
- set_optimized_code_map(new_code_map);
- return new_code_map;
+ shared->set_optimized_code_map(*new_code_map);
}
-void SharedFunctionInfo::InstallFromOptimizedCodeMap(JSFunction* function,
- int index) {
+FixedArray* SharedFunctionInfo::GetLiteralsFromOptimizedCodeMap(int index) {
ASSERT(index > kEntriesStart);
FixedArray* code_map = FixedArray::cast(optimized_code_map());
if (!bound()) {
FixedArray* cached_literals = FixedArray::cast(code_map->get(index + 1));
- ASSERT(cached_literals != NULL);
- function->set_literals(cached_literals);
+ ASSERT_NE(NULL, cached_literals);
+ return cached_literals;
}
+ return NULL;
+}
+
+
+Code* SharedFunctionInfo::GetCodeFromOptimizedCodeMap(int index) {
+ ASSERT(index > kEntriesStart);
+ FixedArray* code_map = FixedArray::cast(optimized_code_map());
Code* code = Code::cast(code_map->get(index));
- ASSERT(code != NULL);
- ASSERT(function->context()->native_context() == code_map->get(index - 1));
- function->ReplaceCode(code);
+ ASSERT_NE(NULL, code);
+ return code;
}
@@ -9648,35 +9859,45 @@ void SharedFunctionInfo::ClearOptimizedCodeMap() {
void SharedFunctionInfo::EvictFromOptimizedCodeMap(Code* optimized_code,
const char* reason) {
+ DisallowHeapAllocation no_gc;
if (optimized_code_map()->IsSmi()) return;
- int i;
- bool removed_entry = false;
FixedArray* code_map = FixedArray::cast(optimized_code_map());
- for (i = kEntriesStart; i < code_map->length(); i += kEntryLength) {
- ASSERT(code_map->get(i)->IsNativeContext());
- if (Code::cast(code_map->get(i + 1)) == optimized_code) {
+ int dst = kEntriesStart;
+ int length = code_map->length();
+ for (int src = kEntriesStart; src < length; src += kEntryLength) {
+ ASSERT(code_map->get(src)->IsNativeContext());
+ if (Code::cast(code_map->get(src + kCachedCodeOffset)) == optimized_code) {
+ // Evict the src entry by not copying it to the dst entry.
if (FLAG_trace_opt) {
PrintF("[evicting entry from optimizing code map (%s) for ", reason);
ShortPrint();
- PrintF("]\n");
+ BailoutId osr(Smi::cast(code_map->get(src + kOsrAstIdOffset))->value());
+ if (osr.IsNone()) {
+ PrintF("]\n");
+ } else {
+ PrintF(" (osr ast id %d)]\n", osr.ToInt());
+ }
}
- removed_entry = true;
- break;
+ } else {
+ // Keep the src entry by copying it to the dst entry.
+ if (dst != src) {
+ code_map->set(dst + kContextOffset,
+ code_map->get(src + kContextOffset));
+ code_map->set(dst + kCachedCodeOffset,
+ code_map->get(src + kCachedCodeOffset));
+ code_map->set(dst + kLiteralsOffset,
+ code_map->get(src + kLiteralsOffset));
+ code_map->set(dst + kOsrAstIdOffset,
+ code_map->get(src + kOsrAstIdOffset));
+ }
+ dst += kEntryLength;
}
}
- while (i < (code_map->length() - kEntryLength)) {
- code_map->set(i, code_map->get(i + kEntryLength));
- code_map->set(i + 1, code_map->get(i + 1 + kEntryLength));
- code_map->set(i + 2, code_map->get(i + 2 + kEntryLength));
- i += kEntryLength;
- }
- if (removed_entry) {
+ if (dst != length) {
// Always trim even when array is cleared because of heap verifier.
- RightTrimFixedArray<FROM_MUTATOR>(GetHeap(), code_map, kEntryLength);
- if (code_map->length() == kEntriesStart) {
- ClearOptimizedCodeMap();
- }
+ RightTrimFixedArray<Heap::FROM_MUTATOR>(GetHeap(), code_map, length - dst);
+ if (code_map->length() == kEntriesStart) ClearOptimizedCodeMap();
}
}
@@ -9686,57 +9907,13 @@ void SharedFunctionInfo::TrimOptimizedCodeMap(int shrink_by) {
ASSERT(shrink_by % kEntryLength == 0);
ASSERT(shrink_by <= code_map->length() - kEntriesStart);
// Always trim even when array is cleared because of heap verifier.
- RightTrimFixedArray<FROM_GC>(GetHeap(), code_map, shrink_by);
+ RightTrimFixedArray<Heap::FROM_GC>(GetHeap(), code_map, shrink_by);
if (code_map->length() == kEntriesStart) {
ClearOptimizedCodeMap();
}
}
-bool JSFunction::CompileLazy(Handle<JSFunction> function,
- ClearExceptionFlag flag) {
- bool result = true;
- if (function->shared()->is_compiled()) {
- function->ReplaceCode(function->shared()->code());
- } else {
- ASSERT(function->shared()->allows_lazy_compilation());
- CompilationInfoWithZone info(function);
- result = CompileLazyHelper(&info, flag);
- ASSERT(!result || function->is_compiled());
- }
- return result;
-}
-
-
-Handle<Code> JSFunction::CompileOsr(Handle<JSFunction> function,
- BailoutId osr_ast_id,
- ClearExceptionFlag flag) {
- CompilationInfoWithZone info(function);
- info.SetOptimizing(osr_ast_id);
- if (CompileLazyHelper(&info, flag)) {
- // TODO(titzer): don't install the OSR code.
- // ASSERT(function->code() != *info.code());
- return info.code();
- } else {
- return Handle<Code>::null();
- }
-}
-
-
-bool JSFunction::CompileOptimized(Handle<JSFunction> function,
- ClearExceptionFlag flag) {
- CompilationInfoWithZone info(function);
- info.SetOptimizing(BailoutId::None());
- return CompileLazyHelper(&info, flag);
-}
-
-
-bool JSFunction::EnsureCompiled(Handle<JSFunction> function,
- ClearExceptionFlag flag) {
- return function->is_compiled() || CompileLazy(function, flag);
-}
-
-
void JSObject::OptimizeAsPrototype(Handle<JSObject> object) {
if (object->IsGlobalObject()) return;
@@ -9748,50 +9925,41 @@ void JSObject::OptimizeAsPrototype(Handle<JSObject> object) {
}
-static MUST_USE_RESULT MaybeObject* CacheInitialJSArrayMaps(
- Context* native_context, Map* initial_map) {
+Handle<Object> CacheInitialJSArrayMaps(
+ Handle<Context> native_context, Handle<Map> initial_map) {
// Replace all of the cached initial array maps in the native context with
// the appropriate transitioned elements kind maps.
- Heap* heap = native_context->GetHeap();
- MaybeObject* maybe_maps =
- heap->AllocateFixedArrayWithHoles(kElementsKindCount, TENURED);
- FixedArray* maps;
- if (!maybe_maps->To(&maps)) return maybe_maps;
+ Factory* factory = native_context->GetIsolate()->factory();
+ Handle<FixedArray> maps = factory->NewFixedArrayWithHoles(
+ kElementsKindCount, TENURED);
- Map* current_map = initial_map;
+ Handle<Map> current_map = initial_map;
ElementsKind kind = current_map->elements_kind();
ASSERT(kind == GetInitialFastElementsKind());
- maps->set(kind, current_map);
+ maps->set(kind, *current_map);
for (int i = GetSequenceIndexFromFastElementsKind(kind) + 1;
i < kFastElementsKindCount; ++i) {
- Map* new_map;
+ Handle<Map> new_map;
ElementsKind next_kind = GetFastElementsKindFromSequenceIndex(i);
if (current_map->HasElementsTransition()) {
- new_map = current_map->elements_transition_map();
+ new_map = handle(current_map->elements_transition_map());
ASSERT(new_map->elements_kind() == next_kind);
} else {
- MaybeObject* maybe_new_map =
- current_map->CopyAsElementsKind(next_kind, INSERT_TRANSITION);
- if (!maybe_new_map->To(&new_map)) return maybe_new_map;
+ new_map = Map::CopyAsElementsKind(
+ current_map, next_kind, INSERT_TRANSITION);
}
- maps->set(next_kind, new_map);
+ maps->set(next_kind, *new_map);
current_map = new_map;
}
- native_context->set_js_array_maps(maps);
+ native_context->set_js_array_maps(*maps);
return initial_map;
}
-Handle<Object> CacheInitialJSArrayMaps(Handle<Context> native_context,
- Handle<Map> initial_map) {
- CALL_HEAP_FUNCTION(native_context->GetIsolate(),
- CacheInitialJSArrayMaps(*native_context, *initial_map),
- Object);
-}
-
-
void JSFunction::SetInstancePrototype(Handle<JSFunction> function,
Handle<Object> value) {
+ Isolate* isolate = function->GetIsolate();
+
ASSERT(value->IsJSReceiver());
// First some logic for the map of the prototype to make sure it is in fast
@@ -9807,10 +9975,11 @@ void JSFunction::SetInstancePrototype(Handle<JSFunction> function,
// copy containing the new prototype. Also complete any in-object
// slack tracking that is in progress at this point because it is
// still tracking the old copy.
- if (function->shared()->IsInobjectSlackTrackingInProgress()) {
- function->shared()->CompleteInobjectSlackTracking();
+ if (function->IsInobjectSlackTrackingInProgress()) {
+ function->CompleteInobjectSlackTracking();
}
- Handle<Map> new_map = Map::Copy(handle(function->initial_map()));
+ Handle<Map> initial_map(function->initial_map(), isolate);
+ Handle<Map> new_map = Map::Copy(initial_map);
new_map->set_prototype(*value);
// If the function is used as the global Array function, cache the
@@ -9819,17 +9988,21 @@ void JSFunction::SetInstancePrototype(Handle<JSFunction> function,
Object* array_function = native_context->get(Context::ARRAY_FUNCTION_INDEX);
if (array_function->IsJSFunction() &&
*function == JSFunction::cast(array_function)) {
- CacheInitialJSArrayMaps(handle(native_context), new_map);
+ CacheInitialJSArrayMaps(handle(native_context, isolate), new_map);
}
function->set_initial_map(*new_map);
+
+ // Deoptimize all code that embeds the previous initial map.
+ initial_map->dependent_code()->DeoptimizeDependentCodeGroup(
+ isolate, DependentCode::kInitialMapChangedGroup);
} else {
// Put the value in the initial map field until an initial map is
// needed. At that point, a new initial map is created and the
// prototype is put into the initial map where it belongs.
function->set_prototype_or_initial_map(*value);
}
- function->GetHeap()->ClearInstanceofCache();
+ isolate->heap()->ClearInstanceofCache();
}
@@ -9848,7 +10021,7 @@ void JSFunction::SetPrototype(Handle<JSFunction> function,
// different prototype.
Handle<Map> new_map = Map::Copy(handle(function->map()));
- function->set_map(*new_map);
+ JSObject::MigrateToMap(function, new_map);
new_map->set_constructor(*value);
new_map->set_non_instance_prototype(true);
Isolate* isolate = new_map->GetIsolate();
@@ -9863,20 +10036,25 @@ void JSFunction::SetPrototype(Handle<JSFunction> function,
}
-void JSFunction::RemovePrototype() {
+bool JSFunction::RemovePrototype() {
Context* native_context = context()->native_context();
- Map* no_prototype_map = shared()->is_classic_mode()
- ? native_context->function_without_prototype_map()
- : native_context->strict_mode_function_without_prototype_map();
+ Map* no_prototype_map = shared()->strict_mode() == SLOPPY
+ ? native_context->sloppy_function_without_prototype_map()
+ : native_context->strict_function_without_prototype_map();
- if (map() == no_prototype_map) return;
+ if (map() == no_prototype_map) return true;
- ASSERT(map() == (shared()->is_classic_mode()
- ? native_context->function_map()
- : native_context->strict_mode_function_map()));
+#ifdef DEBUG
+ if (map() != (shared()->strict_mode() == SLOPPY
+ ? native_context->sloppy_function_map()
+ : native_context->strict_function_map())) {
+ return false;
+ }
+#endif
set_map(no_prototype_map);
set_prototype_or_initial_map(no_prototype_map->GetHeap()->the_hole_value());
+ return true;
}
@@ -9904,6 +10082,10 @@ void JSFunction::EnsureHasInitialMap(Handle<JSFunction> function) {
Handle<Object> prototype;
if (function->has_instance_prototype()) {
prototype = handle(function->instance_prototype(), isolate);
+ for (Handle<Object> p = prototype; !p->IsNull() && !p->IsJSProxy();
+ p = Object::GetPrototype(isolate, p)) {
+ JSObject::OptimizeAsPrototype(Handle<JSObject>::cast(p));
+ }
} else {
prototype = isolate->factory()->NewFunctionPrototype(function);
}
@@ -9912,13 +10094,13 @@ void JSFunction::EnsureHasInitialMap(Handle<JSFunction> function) {
map->set_prototype(*prototype);
ASSERT(map->has_fast_object_elements());
- if (!function->shared()->is_generator()) {
- function->shared()->StartInobjectSlackTracking(*map);
- }
-
// Finally link initial map and constructor function.
function->set_initial_map(*map);
map->set_constructor(*function);
+
+ if (!function->shared()->is_generator()) {
+ function->StartInobjectSlackTracking();
+ }
}
@@ -9929,7 +10111,7 @@ void JSFunction::SetInstanceClassName(String* name) {
void JSFunction::PrintName(FILE* out) {
SmartArrayPointer<char> name = shared()->DebugName()->ToCString();
- PrintF(out, "%s", *name);
+ PrintF(out, "%s", name.get());
}
@@ -9951,11 +10133,18 @@ bool JSFunction::PassesFilter(const char* raw_filter) {
Vector<const char> filter = CStrVector(raw_filter);
if (filter.length() == 0) return name->length() == 0;
if (filter[0] == '-') {
+ // Negative filter.
if (filter.length() == 1) {
return (name->length() != 0);
- } else if (!name->IsUtf8EqualTo(filter.SubVector(1, filter.length()))) {
- return true;
+ } else if (name->IsUtf8EqualTo(filter.SubVector(1, filter.length()))) {
+ return false;
+ }
+ if (filter[filter.length() - 1] == '*' &&
+ name->IsUtf8EqualTo(filter.SubVector(1, filter.length() - 1), true)) {
+ return false;
}
+ return true;
+
} else if (name->IsUtf8EqualTo(filter)) {
return true;
}
@@ -9967,20 +10156,171 @@ bool JSFunction::PassesFilter(const char* raw_filter) {
}
-MaybeObject* Oddball::Initialize(Heap* heap,
- const char* to_string,
- Object* to_number,
- byte kind) {
- String* internalized_to_string;
- { MaybeObject* maybe_string =
- heap->InternalizeUtf8String(
- CStrVector(to_string));
- if (!maybe_string->To(&internalized_to_string)) return maybe_string;
+void Oddball::Initialize(Isolate* isolate,
+ Handle<Oddball> oddball,
+ const char* to_string,
+ Handle<Object> to_number,
+ byte kind) {
+ Handle<String> internalized_to_string =
+ isolate->factory()->InternalizeUtf8String(to_string);
+ oddball->set_to_string(*internalized_to_string);
+ oddball->set_to_number(*to_number);
+ oddball->set_kind(kind);
+}
+
+
+void Script::InitLineEnds(Handle<Script> script) {
+ if (!script->line_ends()->IsUndefined()) return;
+
+ Isolate* isolate = script->GetIsolate();
+
+ if (!script->source()->IsString()) {
+ ASSERT(script->source()->IsUndefined());
+ Handle<FixedArray> empty = isolate->factory()->NewFixedArray(0);
+ script->set_line_ends(*empty);
+ ASSERT(script->line_ends()->IsFixedArray());
+ return;
+ }
+
+ Handle<String> src(String::cast(script->source()), isolate);
+
+ Handle<FixedArray> array = String::CalculateLineEnds(src, true);
+
+ if (*array != isolate->heap()->empty_fixed_array()) {
+ array->set_map(isolate->heap()->fixed_cow_array_map());
+ }
+
+ script->set_line_ends(*array);
+ ASSERT(script->line_ends()->IsFixedArray());
+}
+
+
+int Script::GetColumnNumber(Handle<Script> script, int code_pos) {
+ int line_number = GetLineNumber(script, code_pos);
+ if (line_number == -1) return -1;
+
+ DisallowHeapAllocation no_allocation;
+ FixedArray* line_ends_array = FixedArray::cast(script->line_ends());
+ line_number = line_number - script->line_offset()->value();
+ if (line_number == 0) return code_pos + script->column_offset()->value();
+ int prev_line_end_pos =
+ Smi::cast(line_ends_array->get(line_number - 1))->value();
+ return code_pos - (prev_line_end_pos + 1);
+}
+
+
+int Script::GetLineNumberWithArray(int code_pos) {
+ DisallowHeapAllocation no_allocation;
+ ASSERT(line_ends()->IsFixedArray());
+ FixedArray* line_ends_array = FixedArray::cast(line_ends());
+ int line_ends_len = line_ends_array->length();
+ if (line_ends_len == 0) return -1;
+
+ if ((Smi::cast(line_ends_array->get(0)))->value() >= code_pos) {
+ return line_offset()->value();
}
- set_to_string(internalized_to_string);
- set_to_number(to_number);
- set_kind(kind);
- return this;
+
+ int left = 0;
+ int right = line_ends_len;
+ while (int half = (right - left) / 2) {
+ if ((Smi::cast(line_ends_array->get(left + half)))->value() > code_pos) {
+ right -= half;
+ } else {
+ left += half;
+ }
+ }
+ return right + line_offset()->value();
+}
+
+
+int Script::GetLineNumber(Handle<Script> script, int code_pos) {
+ InitLineEnds(script);
+ return script->GetLineNumberWithArray(code_pos);
+}
+
+
+int Script::GetLineNumber(int code_pos) {
+ DisallowHeapAllocation no_allocation;
+ if (!line_ends()->IsUndefined()) return GetLineNumberWithArray(code_pos);
+
+ // Slow mode: we do not have line_ends. We have to iterate through source.
+ if (!source()->IsString()) return -1;
+
+ String* source_string = String::cast(source());
+ int line = 0;
+ int len = source_string->length();
+ for (int pos = 0; pos < len; pos++) {
+ if (pos == code_pos) break;
+ if (source_string->Get(pos) == '\n') line++;
+ }
+ return line;
+}
+
+
+Handle<Object> Script::GetNameOrSourceURL(Handle<Script> script) {
+ Isolate* isolate = script->GetIsolate();
+ Handle<String> name_or_source_url_key =
+ isolate->factory()->InternalizeOneByteString(
+ STATIC_ASCII_VECTOR("nameOrSourceURL"));
+ Handle<JSObject> script_wrapper = Script::GetWrapper(script);
+ Handle<Object> property = Object::GetProperty(
+ script_wrapper, name_or_source_url_key).ToHandleChecked();
+ ASSERT(property->IsJSFunction());
+ Handle<JSFunction> method = Handle<JSFunction>::cast(property);
+ Handle<Object> result;
+ // Do not check against pending exception, since this function may be called
+ // when an exception has already been pending.
+ if (!Execution::TryCall(method, script_wrapper, 0, NULL).ToHandle(&result)) {
+ return isolate->factory()->undefined_value();
+ }
+ return result;
+}
+
+
+// Wrappers for scripts are kept alive and cached in weak global
+// handles referred from foreign objects held by the scripts as long as
+// they are used. When they are not used anymore, the garbage
+// collector will call the weak callback on the global handle
+// associated with the wrapper and get rid of both the wrapper and the
+// handle.
+static void ClearWrapperCache(
+ const v8::WeakCallbackData<v8::Value, void>& data) {
+ Object** location = reinterpret_cast<Object**>(data.GetParameter());
+ JSValue* wrapper = JSValue::cast(*location);
+ Foreign* foreign = Script::cast(wrapper->value())->wrapper();
+ ASSERT_EQ(foreign->foreign_address(), reinterpret_cast<Address>(location));
+ foreign->set_foreign_address(0);
+ GlobalHandles::Destroy(location);
+ Isolate* isolate = reinterpret_cast<Isolate*>(data.GetIsolate());
+ isolate->counters()->script_wrappers()->Decrement();
+}
+
+
+Handle<JSObject> Script::GetWrapper(Handle<Script> script) {
+ if (script->wrapper()->foreign_address() != NULL) {
+ // Return a handle for the existing script wrapper from the cache.
+ return Handle<JSValue>(
+ *reinterpret_cast<JSValue**>(script->wrapper()->foreign_address()));
+ }
+ Isolate* isolate = script->GetIsolate();
+ // Construct a new script wrapper.
+ isolate->counters()->script_wrappers()->Increment();
+ Handle<JSFunction> constructor = isolate->script_function();
+ Handle<JSValue> result =
+ Handle<JSValue>::cast(isolate->factory()->NewJSObject(constructor));
+
+ result->set_value(*script);
+
+ // Create a new weak global handle and use it to cache the wrapper
+ // for future use. The cache will automatically be cleared by the
+ // garbage collector when it is not used anymore.
+ Handle<Object> handle = isolate->global_handles()->Create(*result);
+ GlobalHandles::MakeWeak(handle.location(),
+ reinterpret_cast<void*>(handle.location()),
+ &ClearWrapperCache);
+ script->wrapper()->set_foreign_address(
+ reinterpret_cast<Address>(handle.location()));
+ return result;
}
@@ -10125,9 +10465,7 @@ void SharedFunctionInfo::DisableOptimization(BailoutReason reason) {
if (code()->kind() == Code::FUNCTION) {
code()->set_optimizable(false);
}
- PROFILE(GetIsolate(),
- LogExistingFunction(Handle<SharedFunctionInfo>(this),
- Handle<Code>(code())));
+ PROFILE(GetIsolate(), CodeDisableOptEvent(code(), this));
if (FLAG_trace_opt) {
PrintF("[disabled optimization for ");
ShortPrint();
@@ -10147,75 +10485,31 @@ bool SharedFunctionInfo::VerifyBailoutId(BailoutId id) {
}
-void SharedFunctionInfo::StartInobjectSlackTracking(Map* map) {
- ASSERT(!IsInobjectSlackTrackingInProgress());
+void JSFunction::StartInobjectSlackTracking() {
+ ASSERT(has_initial_map() && !IsInobjectSlackTrackingInProgress());
if (!FLAG_clever_optimizations) return;
+ Map* map = initial_map();
// Only initiate the tracking the first time.
- if (live_objects_may_exist()) return;
- set_live_objects_may_exist(true);
+ if (map->done_inobject_slack_tracking()) return;
+ map->set_done_inobject_slack_tracking(true);
// No tracking during the snapshot construction phase.
- if (Serializer::enabled()) return;
+ Isolate* isolate = GetIsolate();
+ if (isolate->serializer_enabled()) return;
if (map->unused_property_fields() == 0) return;
- // Nonzero counter is a leftover from the previous attempt interrupted
- // by GC, keep it.
- if (construction_count() == 0) {
- set_construction_count(kGenerousAllocationCount);
- }
- set_initial_map(map);
- Builtins* builtins = map->GetHeap()->isolate()->builtins();
- ASSERT_EQ(builtins->builtin(Builtins::kJSConstructStubGeneric),
- construct_stub());
- set_construct_stub(builtins->builtin(Builtins::kJSConstructStubCountdown));
-}
-
-
-// Called from GC, hence reinterpret_cast and unchecked accessors.
-void SharedFunctionInfo::DetachInitialMap() {
- Map* map = reinterpret_cast<Map*>(initial_map());
-
- // Make the map remember to restore the link if it survives the GC.
- map->set_bit_field2(
- map->bit_field2() | (1 << Map::kAttachedToSharedFunctionInfo));
-
- // Undo state changes made by StartInobjectTracking (except the
- // construction_count). This way if the initial map does not survive the GC
- // then StartInobjectTracking will be called again the next time the
- // constructor is called. The countdown will continue and (possibly after
- // several more GCs) CompleteInobjectSlackTracking will eventually be called.
- Heap* heap = map->GetHeap();
- set_initial_map(heap->undefined_value());
- Builtins* builtins = heap->isolate()->builtins();
- ASSERT_EQ(builtins->builtin(Builtins::kJSConstructStubCountdown),
- *RawField(this, kConstructStubOffset));
- set_construct_stub(builtins->builtin(Builtins::kJSConstructStubGeneric));
- // It is safe to clear the flag: it will be set again if the map is live.
- set_live_objects_may_exist(false);
-}
-
-
-// Called from GC, hence reinterpret_cast and unchecked accessors.
-void SharedFunctionInfo::AttachInitialMap(Map* map) {
- map->set_bit_field2(
- map->bit_field2() & ~(1 << Map::kAttachedToSharedFunctionInfo));
-
- // Resume inobject slack tracking.
- set_initial_map(map);
- Builtins* builtins = map->GetHeap()->isolate()->builtins();
- ASSERT_EQ(builtins->builtin(Builtins::kJSConstructStubGeneric),
- *RawField(this, kConstructStubOffset));
- set_construct_stub(builtins->builtin(Builtins::kJSConstructStubCountdown));
- // The map survived the gc, so there may be objects referencing it.
- set_live_objects_may_exist(true);
+ map->set_construction_count(kGenerousAllocationCount);
}
void SharedFunctionInfo::ResetForNewContext(int new_ic_age) {
code()->ClearInlineCaches();
+ // If we clear ICs, we need to clear the type feedback vector too, since
+ // CallICs are synced with a feedback vector slot.
+ ClearTypeFeedbackInfo();
set_ic_age(new_ic_age);
if (code()->kind() == Code::FUNCTION) {
code()->set_profiler_ticks(0);
@@ -10250,40 +10544,36 @@ static void ShrinkInstanceSize(Map* map, void* data) {
}
-void SharedFunctionInfo::CompleteInobjectSlackTracking() {
- ASSERT(live_objects_may_exist() && IsInobjectSlackTrackingInProgress());
- Map* map = Map::cast(initial_map());
+void JSFunction::CompleteInobjectSlackTracking() {
+ ASSERT(has_initial_map());
+ Map* map = initial_map();
- Heap* heap = map->GetHeap();
- set_initial_map(heap->undefined_value());
- Builtins* builtins = heap->isolate()->builtins();
- ASSERT_EQ(builtins->builtin(Builtins::kJSConstructStubCountdown),
- construct_stub());
- set_construct_stub(builtins->builtin(Builtins::kJSConstructStubGeneric));
+ ASSERT(map->done_inobject_slack_tracking());
+ map->set_construction_count(kNoSlackTracking);
int slack = map->unused_property_fields();
map->TraverseTransitionTree(&GetMinInobjectSlack, &slack);
if (slack != 0) {
// Resize the initial map and all maps in its transition tree.
map->TraverseTransitionTree(&ShrinkInstanceSize, &slack);
-
- // Give the correct expected_nof_properties to initial maps created later.
- ASSERT(expected_nof_properties() >= slack);
- set_expected_nof_properties(expected_nof_properties() - slack);
}
}
-int SharedFunctionInfo::SearchOptimizedCodeMap(Context* native_context) {
+int SharedFunctionInfo::SearchOptimizedCodeMap(Context* native_context,
+ BailoutId osr_ast_id) {
+ DisallowHeapAllocation no_gc;
ASSERT(native_context->IsNativeContext());
if (!FLAG_cache_optimized_code) return -1;
Object* value = optimized_code_map();
if (!value->IsSmi()) {
FixedArray* optimized_code_map = FixedArray::cast(value);
int length = optimized_code_map->length();
+ Smi* osr_ast_id_smi = Smi::FromInt(osr_ast_id.ToInt());
for (int i = kEntriesStart; i < length; i += kEntryLength) {
- if (optimized_code_map->get(i) == native_context) {
- return i + 1;
+ if (optimized_code_map->get(i + kContextOffset) == native_context &&
+ optimized_code_map->get(i + kOsrAstIdOffset) == osr_ast_id_smi) {
+ return i + kCachedCodeOffset;
}
}
if (FLAG_trace_opt) {
@@ -10383,11 +10673,15 @@ void Code::InvalidateRelocation() {
void Code::InvalidateEmbeddedObjects() {
Object* undefined = GetHeap()->undefined_value();
- int mode_mask = RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT);
+ Cell* undefined_cell = GetHeap()->undefined_cell();
+ int mode_mask = RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT) |
+ RelocInfo::ModeMask(RelocInfo::CELL);
for (RelocIterator it(this, mode_mask); !it.done(); it.next()) {
RelocInfo::Mode mode = it.rinfo()->rmode();
if (mode == RelocInfo::EMBEDDED_OBJECT) {
it.rinfo()->set_target_object(undefined, SKIP_WRITE_BARRIER);
+ } else if (mode == RelocInfo::CELL) {
+ it.rinfo()->set_target_cell(undefined_cell, SKIP_WRITE_BARRIER);
}
}
}
@@ -10395,7 +10689,7 @@ void Code::InvalidateEmbeddedObjects() {
void Code::Relocate(intptr_t delta) {
for (RelocIterator it(this, RelocInfo::kApplyMask); !it.done(); it.next()) {
- it.rinfo()->apply(delta);
+ it.rinfo()->apply(delta, SKIP_ICACHE_FLUSH);
}
CPU::FlushICache(instruction_start(), instruction_size());
}
@@ -10427,26 +10721,28 @@ void Code::CopyFrom(const CodeDesc& desc) {
RelocInfo::Mode mode = it.rinfo()->rmode();
if (mode == RelocInfo::EMBEDDED_OBJECT) {
Handle<Object> p = it.rinfo()->target_object_handle(origin);
- it.rinfo()->set_target_object(*p, SKIP_WRITE_BARRIER);
+ it.rinfo()->set_target_object(*p, SKIP_WRITE_BARRIER, SKIP_ICACHE_FLUSH);
} else if (mode == RelocInfo::CELL) {
Handle<Cell> cell = it.rinfo()->target_cell_handle();
- it.rinfo()->set_target_cell(*cell, SKIP_WRITE_BARRIER);
+ it.rinfo()->set_target_cell(*cell, SKIP_WRITE_BARRIER, SKIP_ICACHE_FLUSH);
} else if (RelocInfo::IsCodeTarget(mode)) {
// rewrite code handles in inline cache targets to direct
// pointers to the first instruction in the code object
Handle<Object> p = it.rinfo()->target_object_handle(origin);
Code* code = Code::cast(*p);
it.rinfo()->set_target_address(code->instruction_start(),
- SKIP_WRITE_BARRIER);
+ SKIP_WRITE_BARRIER,
+ SKIP_ICACHE_FLUSH);
} else if (RelocInfo::IsRuntimeEntry(mode)) {
Address p = it.rinfo()->target_runtime_entry(origin);
- it.rinfo()->set_target_runtime_entry(p, SKIP_WRITE_BARRIER);
+ it.rinfo()->set_target_runtime_entry(p, SKIP_WRITE_BARRIER,
+ SKIP_ICACHE_FLUSH);
} else if (mode == RelocInfo::CODE_AGE_SEQUENCE) {
Handle<Object> p = it.rinfo()->code_age_stub_handle(origin);
Code* code = Code::cast(*p);
- it.rinfo()->set_code_age_stub(code);
+ it.rinfo()->set_code_age_stub(code, SKIP_ICACHE_FLUSH);
} else {
- it.rinfo()->apply(delta);
+ it.rinfo()->apply(delta, SKIP_ICACHE_FLUSH);
}
}
CPU::FlushICache(instruction_start(), instruction_size());
@@ -10532,27 +10828,32 @@ Object* Code::FindNthObject(int n, Map* match_map) {
}
+AllocationSite* Code::FindFirstAllocationSite() {
+ Object* result = FindNthObject(1, GetHeap()->allocation_site_map());
+ return (result != NULL) ? AllocationSite::cast(result) : NULL;
+}
+
+
Map* Code::FindFirstMap() {
Object* result = FindNthObject(1, GetHeap()->meta_map());
return (result != NULL) ? Map::cast(result) : NULL;
}
-void Code::ReplaceNthObject(int n,
- Map* match_map,
- Object* replace_with) {
+void Code::FindAndReplace(const FindAndReplacePattern& pattern) {
ASSERT(is_inline_cache_stub() || is_handler());
DisallowHeapAllocation no_allocation;
int mask = RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT);
+ STATIC_ASSERT(FindAndReplacePattern::kMaxCount < 32);
+ int current_pattern = 0;
for (RelocIterator it(this, mask); !it.done(); it.next()) {
RelocInfo* info = it.rinfo();
Object* object = info->target_object();
if (object->IsHeapObject()) {
- if (HeapObject::cast(object)->map() == match_map) {
- if (--n == 0) {
- info->set_target_object(replace_with);
- return;
- }
+ Map* map = HeapObject::cast(object)->map();
+ if (map == *pattern.find_[current_pattern]) {
+ info->set_target_object(*pattern.replace_[current_pattern]);
+ if (++current_pattern == pattern.count_) return;
}
}
}
@@ -10572,27 +10873,6 @@ void Code::FindAllMaps(MapHandleList* maps) {
}
-void Code::FindAllTypes(TypeHandleList* types) {
- ASSERT(is_inline_cache_stub());
- DisallowHeapAllocation no_allocation;
- int mask = RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT);
- Isolate* isolate = GetIsolate();
- for (RelocIterator it(this, mask); !it.done(); it.next()) {
- RelocInfo* info = it.rinfo();
- Object* object = info->target_object();
- if (object->IsMap()) {
- Handle<Map> map(Map::cast(object));
- types->Add(handle(IC::MapToType(map), isolate));
- }
- }
-}
-
-
-void Code::ReplaceFirstMap(Map* replace_with) {
- ReplaceNthObject(1, GetHeap()->meta_map(), replace_with);
-}
-
-
Code* Code::FindFirstHandler() {
ASSERT(is_inline_cache_stub());
DisallowHeapAllocation no_allocation;
@@ -10638,21 +10918,6 @@ Name* Code::FindFirstName() {
}
-void Code::ReplaceNthCell(int n, Cell* replace_with) {
- ASSERT(is_inline_cache_stub());
- DisallowHeapAllocation no_allocation;
- int mask = RelocInfo::ModeMask(RelocInfo::CELL);
- for (RelocIterator it(this, mask); !it.done(); it.next()) {
- RelocInfo* info = it.rinfo();
- if (--n == 0) {
- info->set_target_cell(replace_with);
- return;
- }
- }
- UNREACHABLE();
-}
-
-
void Code::ClearInlineCaches() {
ClearInlineCaches(NULL);
}
@@ -10666,32 +10931,39 @@ void Code::ClearInlineCaches(Code::Kind kind) {
void Code::ClearInlineCaches(Code::Kind* kind) {
int mask = RelocInfo::ModeMask(RelocInfo::CODE_TARGET) |
RelocInfo::ModeMask(RelocInfo::CONSTRUCT_CALL) |
- RelocInfo::ModeMask(RelocInfo::CODE_TARGET_WITH_ID) |
- RelocInfo::ModeMask(RelocInfo::CODE_TARGET_CONTEXT);
+ RelocInfo::ModeMask(RelocInfo::CODE_TARGET_WITH_ID);
for (RelocIterator it(this, mask); !it.done(); it.next()) {
RelocInfo* info = it.rinfo();
Code* target(Code::GetCodeFromTargetAddress(info->target_address()));
if (target->is_inline_cache_stub()) {
if (kind == NULL || *kind == target->kind()) {
- IC::Clear(this->GetIsolate(), info->pc());
+ IC::Clear(this->GetIsolate(), info->pc(),
+ info->host()->constant_pool());
}
}
}
}
-void Code::ClearTypeFeedbackCells(Heap* heap) {
- if (kind() != FUNCTION) return;
- Object* raw_info = type_feedback_info();
- if (raw_info->IsTypeFeedbackInfo()) {
- TypeFeedbackCells* type_feedback_cells =
- TypeFeedbackInfo::cast(raw_info)->type_feedback_cells();
- for (int i = 0; i < type_feedback_cells->CellCount(); i++) {
- Cell* cell = type_feedback_cells->GetCell(i);
- // Don't clear AllocationSites
- Object* value = cell->value();
- if (value == NULL || !value->IsAllocationSite()) {
- cell->set_value(TypeFeedbackCells::RawUninitializedSentinel(heap));
+void SharedFunctionInfo::ClearTypeFeedbackInfo() {
+ FixedArray* vector = feedback_vector();
+ Heap* heap = GetHeap();
+ int length = vector->length();
+
+ for (int i = 0; i < length; i++) {
+ Object* obj = vector->get(i);
+ if (obj->IsHeapObject()) {
+ InstanceType instance_type =
+ HeapObject::cast(obj)->map()->instance_type();
+ switch (instance_type) {
+ case ALLOCATION_SITE_TYPE:
+ // AllocationSites are not cleared because they do not store
+ // information that leaks.
+ break;
+ // Fall through...
+ default:
+ vector->set(i, TypeFeedbackInfo::RawUninitializedSentinel(heap),
+ SKIP_WRITE_BARRIER);
}
}
}
@@ -10709,6 +10981,18 @@ BailoutId Code::TranslatePcOffsetToAstId(uint32_t pc_offset) {
}
+uint32_t Code::TranslateAstIdToPcOffset(BailoutId ast_id) {
+ DisallowHeapAllocation no_gc;
+ ASSERT(kind() == FUNCTION);
+ BackEdgeTable back_edges(this, &no_gc);
+ for (uint32_t i = 0; i < back_edges.length(); i++) {
+ if (back_edges.ast_id(i) == ast_id) return back_edges.pc_offset(i);
+ }
+ UNREACHABLE(); // We expect to find the back edge.
+ return 0;
+}
+
+
void Code::MakeCodeAgeSequenceYoung(byte* sequence, Isolate* isolate) {
PatchPlatformCodeAge(isolate, sequence, kNoAgeCodeAge, NO_MARKING_PARITY);
}
@@ -10737,10 +11021,11 @@ void Code::MakeOlder(MarkingParity current_parity) {
if (sequence != NULL) {
Age age;
MarkingParity code_parity;
- GetCodeAgeAndParity(sequence, &age, &code_parity);
+ Isolate* isolate = GetIsolate();
+ GetCodeAgeAndParity(isolate, sequence, &age, &code_parity);
age = EffectiveAge(age);
if (age != kLastCodeAge && code_parity != current_parity) {
- PatchPlatformCodeAge(GetIsolate(),
+ PatchPlatformCodeAge(isolate,
sequence,
static_cast<Age>(age + 1),
current_parity);
@@ -10776,7 +11061,7 @@ Code::Age Code::GetRawAge() {
}
Age age;
MarkingParity parity;
- GetCodeAgeAndParity(sequence, &age, &parity);
+ GetCodeAgeAndParity(GetIsolate(), sequence, &age, &parity);
return age;
}
@@ -11103,16 +11388,9 @@ void Code::PrintExtraICState(FILE* out, Kind kind, ExtraICState extra) {
PrintF(out, "extra_ic_state = ");
const char* name = NULL;
switch (kind) {
- case CALL_IC:
- if (extra == STRING_INDEX_OUT_OF_BOUNDS) {
- name = "STRING_INDEX_OUT_OF_BOUNDS";
- }
- break;
case STORE_IC:
case KEYED_STORE_IC:
- if (extra == kStrictMode) {
- name = "STRICT";
- }
+ if (extra == STRICT) name = "STRICT";
break;
default:
break;
@@ -11133,14 +11411,10 @@ void Code::Disassemble(const char* name, FILE* out) {
}
if (is_inline_cache_stub()) {
PrintF(out, "ic_state = %s\n", ICState2String(ic_state()));
- PrintExtraICState(out, kind(), needs_extended_extra_ic_state(kind()) ?
- extended_extra_ic_state() : extra_ic_state());
+ PrintExtraICState(out, kind(), extra_ic_state());
if (ic_state() == MONOMORPHIC) {
PrintF(out, "type = %s\n", StubType2String(type()));
}
- if (is_call_stub() || is_keyed_call_stub()) {
- PrintF(out, "argc = %d\n", arguments_count());
- }
if (is_compare_ic_stub()) {
ASSERT(major_key() == CodeStub::CompareIC);
CompareIC::State left_state, right_state, handler_state;
@@ -11237,33 +11511,20 @@ Handle<FixedArray> JSObject::SetFastElementsCapacityAndLength(
int capacity,
int length,
SetFastElementsCapacitySmiMode smi_mode) {
- CALL_HEAP_FUNCTION(
- object->GetIsolate(),
- object->SetFastElementsCapacityAndLength(capacity, length, smi_mode),
- FixedArray);
-}
-
-
-MaybeObject* JSObject::SetFastElementsCapacityAndLength(
- int capacity,
- int length,
- SetFastElementsCapacitySmiMode smi_mode) {
- Heap* heap = GetHeap();
// We should never end in here with a pixel or external array.
- ASSERT(!HasExternalArrayElements());
+ ASSERT(!object->HasExternalArrayElements());
// Allocate a new fast elements backing store.
- FixedArray* new_elements;
- MaybeObject* maybe = heap->AllocateUninitializedFixedArray(capacity);
- if (!maybe->To(&new_elements)) return maybe;
+ Handle<FixedArray> new_elements =
+ object->GetIsolate()->factory()->NewUninitializedFixedArray(capacity);
- ElementsKind elements_kind = GetElementsKind();
+ ElementsKind elements_kind = object->GetElementsKind();
ElementsKind new_elements_kind;
// The resized array has FAST_*_SMI_ELEMENTS if the capacity mode forces it,
// or if it's allowed and the old elements array contained only SMIs.
bool has_fast_smi_elements =
(smi_mode == kForceSmiElements) ||
- ((smi_mode == kAllowSmiElements) && HasFastSmiElements());
+ ((smi_mode == kAllowSmiElements) && object->HasFastSmiElements());
if (has_fast_smi_elements) {
if (IsHoleyElementsKind(elements_kind)) {
new_elements_kind = FAST_HOLEY_SMI_ELEMENTS;
@@ -11277,82 +11538,47 @@ MaybeObject* JSObject::SetFastElementsCapacityAndLength(
new_elements_kind = FAST_ELEMENTS;
}
}
- FixedArrayBase* old_elements = elements();
+ Handle<FixedArrayBase> old_elements(object->elements());
ElementsAccessor* accessor = ElementsAccessor::ForKind(new_elements_kind);
- MaybeObject* maybe_obj =
- accessor->CopyElements(this, new_elements, elements_kind);
- if (maybe_obj->IsFailure()) return maybe_obj;
+ accessor->CopyElements(object, new_elements, elements_kind);
- if (elements_kind != NON_STRICT_ARGUMENTS_ELEMENTS) {
- Map* new_map = map();
- if (new_elements_kind != elements_kind) {
- MaybeObject* maybe =
- GetElementsTransitionMap(GetIsolate(), new_elements_kind);
- if (!maybe->To(&new_map)) return maybe;
- }
- ValidateElements();
- set_map_and_elements(new_map, new_elements);
+ if (elements_kind != SLOPPY_ARGUMENTS_ELEMENTS) {
+ Handle<Map> new_map = (new_elements_kind != elements_kind)
+ ? GetElementsTransitionMap(object, new_elements_kind)
+ : handle(object->map());
+ JSObject::ValidateElements(object);
+ JSObject::SetMapAndElements(object, new_map, new_elements);
// Transition through the allocation site as well if present.
- maybe_obj = UpdateAllocationSite(new_elements_kind);
- if (maybe_obj->IsFailure()) return maybe_obj;
+ JSObject::UpdateAllocationSite(object, new_elements_kind);
} else {
- FixedArray* parameter_map = FixedArray::cast(old_elements);
- parameter_map->set(1, new_elements);
+ Handle<FixedArray> parameter_map = Handle<FixedArray>::cast(old_elements);
+ parameter_map->set(1, *new_elements);
}
if (FLAG_trace_elements_transitions) {
- PrintElementsTransition(stdout, elements_kind, old_elements,
- GetElementsKind(), new_elements);
+ PrintElementsTransition(stdout, object, elements_kind, old_elements,
+ object->GetElementsKind(), new_elements);
}
- if (IsJSArray()) {
- JSArray::cast(this)->set_length(Smi::FromInt(length));
+ if (object->IsJSArray()) {
+ Handle<JSArray>::cast(object)->set_length(Smi::FromInt(length));
}
return new_elements;
}
-bool Code::IsWeakEmbeddedObject(Kind kind, Object* object) {
- if (kind != Code::OPTIMIZED_FUNCTION) return false;
-
- if (object->IsMap()) {
- return Map::cast(object)->CanTransition() &&
- FLAG_collect_maps &&
- FLAG_weak_embedded_maps_in_optimized_code;
- }
-
- if (object->IsJSObject()) {
- return FLAG_weak_embedded_objects_in_optimized_code;
- }
-
- return false;
-}
-
-
void JSObject::SetFastDoubleElementsCapacityAndLength(Handle<JSObject> object,
int capacity,
int length) {
- CALL_HEAP_FUNCTION_VOID(
- object->GetIsolate(),
- object->SetFastDoubleElementsCapacityAndLength(capacity, length));
-}
-
-
-MaybeObject* JSObject::SetFastDoubleElementsCapacityAndLength(
- int capacity,
- int length) {
- Heap* heap = GetHeap();
// We should never end in here with a pixel or external array.
- ASSERT(!HasExternalArrayElements());
+ ASSERT(!object->HasExternalArrayElements());
- FixedArrayBase* elems;
- { MaybeObject* maybe_obj =
- heap->AllocateUninitializedFixedDoubleArray(capacity);
- if (!maybe_obj->To(&elems)) return maybe_obj;
- }
+ Handle<FixedArrayBase> elems =
+ object->GetIsolate()->factory()->NewFixedDoubleArray(capacity);
- ElementsKind elements_kind = GetElementsKind();
+ ElementsKind elements_kind = object->GetElementsKind();
+ CHECK(elements_kind != SLOPPY_ARGUMENTS_ELEMENTS);
ElementsKind new_elements_kind = elements_kind;
if (IsHoleyElementsKind(elements_kind)) {
new_elements_kind = FAST_HOLEY_DOUBLE_ELEMENTS;
@@ -11360,49 +11586,37 @@ MaybeObject* JSObject::SetFastDoubleElementsCapacityAndLength(
new_elements_kind = FAST_DOUBLE_ELEMENTS;
}
- Map* new_map;
- { MaybeObject* maybe_obj =
- GetElementsTransitionMap(heap->isolate(), new_elements_kind);
- if (!maybe_obj->To(&new_map)) return maybe_obj;
- }
+ Handle<Map> new_map = GetElementsTransitionMap(object, new_elements_kind);
- FixedArrayBase* old_elements = elements();
+ Handle<FixedArrayBase> old_elements(object->elements());
ElementsAccessor* accessor = ElementsAccessor::ForKind(FAST_DOUBLE_ELEMENTS);
- { MaybeObject* maybe_obj =
- accessor->CopyElements(this, elems, elements_kind);
- if (maybe_obj->IsFailure()) return maybe_obj;
- }
- if (elements_kind != NON_STRICT_ARGUMENTS_ELEMENTS) {
- ValidateElements();
- set_map_and_elements(new_map, elems);
- } else {
- FixedArray* parameter_map = FixedArray::cast(old_elements);
- parameter_map->set(1, elems);
- }
+ accessor->CopyElements(object, elems, elements_kind);
+
+ JSObject::ValidateElements(object);
+ JSObject::SetMapAndElements(object, new_map, elems);
if (FLAG_trace_elements_transitions) {
- PrintElementsTransition(stdout, elements_kind, old_elements,
- GetElementsKind(), elems);
+ PrintElementsTransition(stdout, object, elements_kind, old_elements,
+ object->GetElementsKind(), elems);
}
- if (IsJSArray()) {
- JSArray::cast(this)->set_length(Smi::FromInt(length));
+ if (object->IsJSArray()) {
+ Handle<JSArray>::cast(object)->set_length(Smi::FromInt(length));
}
-
- return this;
}
-MaybeObject* JSArray::Initialize(int capacity, int length) {
+// static
+void JSArray::Initialize(Handle<JSArray> array, int capacity, int length) {
ASSERT(capacity >= 0);
- return GetHeap()->AllocateJSArrayStorage(this, length, capacity,
- INITIALIZE_ARRAY_ELEMENTS_WITH_HOLE);
+ array->GetIsolate()->factory()->NewJSArrayStorage(
+ array, length, capacity, INITIALIZE_ARRAY_ELEMENTS_WITH_HOLE);
}
-void JSArray::Expand(int required_size) {
- GetIsolate()->factory()->SetElementsCapacityAndLength(
- Handle<JSArray>(this), required_size, required_size);
+void JSArray::Expand(Handle<JSArray> array, int required_size) {
+ ElementsAccessor* accessor = array->GetElementsAccessor();
+ accessor->SetCapacityAndLength(array, required_size, required_size);
}
@@ -11414,12 +11628,17 @@ static bool GetOldValue(Isolate* isolate,
uint32_t index,
List<Handle<Object> >* old_values,
List<uint32_t>* indices) {
- PropertyAttributes attributes = object->GetLocalElementAttribute(index);
+ PropertyAttributes attributes =
+ JSReceiver::GetOwnElementAttribute(object, index);
ASSERT(attributes != ABSENT);
if (attributes == DONT_DELETE) return false;
- old_values->Add(object->GetLocalElementAccessorPair(index) == NULL
- ? Object::GetElement(isolate, object, index)
- : Handle<Object>::cast(isolate->factory()->the_hole_value()));
+ Handle<Object> value;
+ if (!JSObject::GetOwnElementAccessorPair(object, index).is_null()) {
+ value = Handle<Object>::cast(isolate->factory()->the_hole_value());
+ } else {
+ value = Object::GetElement(isolate, object, index).ToHandleChecked();
+ }
+ old_values->Add(value);
indices->Add(index);
return true;
}
@@ -11437,12 +11656,11 @@ static void EnqueueSpliceRecord(Handle<JSArray> object,
Handle<Object> args[] =
{ object, index_object, deleted, add_count_object };
- bool threw;
Execution::Call(isolate,
Handle<JSFunction>(isolate->observers_enqueue_splice()),
- isolate->factory()->undefined_value(), ARRAY_SIZE(args), args,
- &threw);
- ASSERT(!threw);
+ isolate->factory()->undefined_value(),
+ ARRAY_SIZE(args),
+ args).Assert();
}
@@ -11451,12 +11669,11 @@ static void BeginPerformSplice(Handle<JSArray> object) {
HandleScope scope(isolate);
Handle<Object> args[] = { object };
- bool threw;
Execution::Call(isolate,
Handle<JSFunction>(isolate->observers_begin_perform_splice()),
- isolate->factory()->undefined_value(), ARRAY_SIZE(args), args,
- &threw);
- ASSERT(!threw);
+ isolate->factory()->undefined_value(),
+ ARRAY_SIZE(args),
+ args).Assert();
}
@@ -11465,76 +11682,78 @@ static void EndPerformSplice(Handle<JSArray> object) {
HandleScope scope(isolate);
Handle<Object> args[] = { object };
- bool threw;
Execution::Call(isolate,
Handle<JSFunction>(isolate->observers_end_perform_splice()),
- isolate->factory()->undefined_value(), ARRAY_SIZE(args), args,
- &threw);
- ASSERT(!threw);
+ isolate->factory()->undefined_value(),
+ ARRAY_SIZE(args),
+ args).Assert();
}
-MaybeObject* JSArray::SetElementsLength(Object* len) {
+MaybeHandle<Object> JSArray::SetElementsLength(
+ Handle<JSArray> array,
+ Handle<Object> new_length_handle) {
// We should never end in here with a pixel or external array.
- ASSERT(AllowsSetElementsLength());
- if (!(FLAG_harmony_observation && map()->is_observed()))
- return GetElementsAccessor()->SetLength(this, len);
+ ASSERT(array->AllowsSetElementsLength());
+ if (!array->map()->is_observed()) {
+ return array->GetElementsAccessor()->SetLength(array, new_length_handle);
+ }
- Isolate* isolate = GetIsolate();
- HandleScope scope(isolate);
- Handle<JSArray> self(this);
+ Isolate* isolate = array->GetIsolate();
List<uint32_t> indices;
List<Handle<Object> > old_values;
- Handle<Object> old_length_handle(self->length(), isolate);
- Handle<Object> new_length_handle(len, isolate);
+ Handle<Object> old_length_handle(array->length(), isolate);
uint32_t old_length = 0;
CHECK(old_length_handle->ToArrayIndex(&old_length));
uint32_t new_length = 0;
- if (!new_length_handle->ToArrayIndex(&new_length))
- return Failure::InternalError();
+ CHECK(new_length_handle->ToArrayIndex(&new_length));
static const PropertyAttributes kNoAttrFilter = NONE;
- int num_elements = self->NumberOfLocalElements(kNoAttrFilter);
+ int num_elements = array->NumberOfOwnElements(kNoAttrFilter);
if (num_elements > 0) {
if (old_length == static_cast<uint32_t>(num_elements)) {
// Simple case for arrays without holes.
for (uint32_t i = old_length - 1; i + 1 > new_length; --i) {
- if (!GetOldValue(isolate, self, i, &old_values, &indices)) break;
+ if (!GetOldValue(isolate, array, i, &old_values, &indices)) break;
}
} else {
// For sparse arrays, only iterate over existing elements.
// TODO(rafaelw): For fast, sparse arrays, we can avoid iterating over
// the to-be-removed indices twice.
Handle<FixedArray> keys = isolate->factory()->NewFixedArray(num_elements);
- self->GetLocalElementKeys(*keys, kNoAttrFilter);
+ array->GetOwnElementKeys(*keys, kNoAttrFilter);
while (num_elements-- > 0) {
uint32_t index = NumberToUint32(keys->get(num_elements));
if (index < new_length) break;
- if (!GetOldValue(isolate, self, index, &old_values, &indices)) break;
+ if (!GetOldValue(isolate, array, index, &old_values, &indices)) break;
}
}
}
- MaybeObject* result =
- self->GetElementsAccessor()->SetLength(*self, *new_length_handle);
Handle<Object> hresult;
- if (!result->ToHandle(&hresult, isolate)) return result;
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, hresult,
+ array->GetElementsAccessor()->SetLength(array, new_length_handle),
+ Object);
- CHECK(self->length()->ToArrayIndex(&new_length));
- if (old_length == new_length) return *hresult;
+ CHECK(array->length()->ToArrayIndex(&new_length));
+ if (old_length == new_length) return hresult;
- BeginPerformSplice(self);
+ BeginPerformSplice(array);
for (int i = 0; i < indices.length(); ++i) {
+ // For deletions where the property was an accessor, old_values[i]
+ // will be the hole, which instructs EnqueueChangeRecord to elide
+ // the "oldValue" property.
JSObject::EnqueueChangeRecord(
- self, "delete", isolate->factory()->Uint32ToString(indices[i]),
+ array, "delete", isolate->factory()->Uint32ToString(indices[i]),
old_values[i]);
}
JSObject::EnqueueChangeRecord(
- self, "update", isolate->factory()->length_string(),
+ array, "update", isolate->factory()->length_string(),
old_length_handle);
- EndPerformSplice(self);
+ EndPerformSplice(array);
uint32_t index = Min(old_length, new_length);
uint32_t add_count = new_length > old_length ? new_length - old_length : 0;
@@ -11542,18 +11761,21 @@ MaybeObject* JSArray::SetElementsLength(Object* len) {
Handle<JSArray> deleted = isolate->factory()->NewJSArray(0);
if (delete_count > 0) {
for (int i = indices.length() - 1; i >= 0; i--) {
- JSObject::SetElement(deleted, indices[i] - index, old_values[i], NONE,
- kNonStrictMode);
+ // Skip deletions where the property was an accessor, leaving holes
+ // in the array of old values.
+ if (old_values[i]->IsTheHole()) continue;
+ JSObject::SetElement(
+ deleted, indices[i] - index, old_values[i], NONE, SLOPPY).Assert();
}
SetProperty(deleted, isolate->factory()->length_string(),
isolate->factory()->NewNumberFromUint(delete_count),
- NONE, kNonStrictMode);
+ NONE, SLOPPY).Assert();
}
- EnqueueSpliceRecord(self, index, deleted, add_count);
+ EnqueueSpliceRecord(array, index, deleted, add_count);
- return *hresult;
+ return hresult;
}
@@ -11594,13 +11816,9 @@ Handle<Map> Map::PutPrototypeTransition(Handle<Map> map,
if (capacity > kMaxCachedPrototypeTransitions) return map;
// Grow array by factor 2 over and above what we need.
- Factory* factory = map->GetIsolate()->factory();
- cache = factory->CopySizeFixedArray(cache, transitions * 2 * step + header);
+ cache = FixedArray::CopySize(cache, transitions * 2 * step + header);
- CALL_AND_RETRY_OR_DIE(map->GetIsolate(),
- map->SetPrototypeTransitions(*cache),
- break,
- return Handle<Map>());
+ SetPrototypeTransitions(map, cache);
}
// Reload number of transitions as GC might shrink them.
@@ -11636,21 +11854,42 @@ void Map::ZapPrototypeTransitions() {
}
-void Map::AddDependentCompilationInfo(DependentCode::DependencyGroup group,
+// static
+void Map::AddDependentCompilationInfo(Handle<Map> map,
+ DependentCode::DependencyGroup group,
CompilationInfo* info) {
- Handle<DependentCode> dep(dependent_code());
Handle<DependentCode> codes =
- DependentCode::Insert(dep, group, info->object_wrapper());
- if (*codes != dependent_code()) set_dependent_code(*codes);
- info->dependencies(group)->Add(Handle<HeapObject>(this), info->zone());
+ DependentCode::Insert(handle(map->dependent_code(), info->isolate()),
+ group, info->object_wrapper());
+ if (*codes != map->dependent_code()) map->set_dependent_code(*codes);
+ info->dependencies(group)->Add(map, info->zone());
}
-void Map::AddDependentCode(DependentCode::DependencyGroup group,
+// static
+void Map::AddDependentCode(Handle<Map> map,
+ DependentCode::DependencyGroup group,
Handle<Code> code) {
Handle<DependentCode> codes = DependentCode::Insert(
- Handle<DependentCode>(dependent_code()), group, code);
- if (*codes != dependent_code()) set_dependent_code(*codes);
+ Handle<DependentCode>(map->dependent_code()), group, code);
+ if (*codes != map->dependent_code()) map->set_dependent_code(*codes);
+}
+
+
+// static
+void Map::AddDependentIC(Handle<Map> map,
+ Handle<Code> stub) {
+ ASSERT(stub->next_code_link()->IsUndefined());
+ int n = map->dependent_code()->number_of_entries(DependentCode::kWeakICGroup);
+ if (n == 0) {
+ // Slow path: insert the head of the list with possible heap allocation.
+ Map::AddDependentCode(map, DependentCode::kWeakICGroup, stub);
+ } else {
+ // Fast path: link the stub to the existing head of the list without any
+ // heap allocation.
+ ASSERT(n == 1);
+ map->dependent_code()->AddToDependentICList(stub);
+ }
}
@@ -11693,11 +11932,10 @@ Handle<DependentCode> DependentCode::Insert(Handle<DependentCode> entries,
if (entries->object_at(i) == *object) return entries;
}
if (entries->length() < kCodesStartIndex + number_of_entries + 1) {
- Factory* factory = entries->GetIsolate()->factory();
int capacity = kCodesStartIndex + number_of_entries + 1;
if (capacity > 5) capacity = capacity * 5 / 4;
Handle<DependentCode> new_entries = Handle<DependentCode>::cast(
- factory->CopySizeFixedArray(entries, capacity, TENURED));
+ FixedArray::CopySize(entries, capacity, TENURED));
// The number of codes can change after GC.
starts.Recompute(*entries);
start = starts.at(group);
@@ -11784,10 +12022,22 @@ void DependentCode::RemoveCompilationInfo(DependentCode::DependencyGroup group,
}
+static bool CodeListContains(Object* head, Code* code) {
+ while (!head->IsUndefined()) {
+ if (head == code) return true;
+ head = Code::cast(head)->next_code_link();
+ }
+ return false;
+}
+
+
bool DependentCode::Contains(DependencyGroup group, Code* code) {
GroupStartIndexes starts(this);
int start = starts.at(group);
int end = starts.at(group + 1);
+ if (group == kWeakICGroup) {
+ return CodeListContains(object_at(start), code);
+ }
for (int i = start; i < end; i++) {
if (object_at(i) == code) return true;
}
@@ -11795,16 +12045,15 @@ bool DependentCode::Contains(DependencyGroup group, Code* code) {
}
-void DependentCode::DeoptimizeDependentCodeGroup(
+bool DependentCode::MarkCodeForDeoptimization(
Isolate* isolate,
DependentCode::DependencyGroup group) {
- ASSERT(AllowCodeDependencyChange::IsAllowed());
DisallowHeapAllocation no_allocation_scope;
DependentCode::GroupStartIndexes starts(this);
int start = starts.at(group);
int end = starts.at(group + 1);
int code_entries = starts.number_of_entries();
- if (start == end) return;
+ if (start == end) return false;
// Mark all the code that needs to be deoptimized.
bool marked = false;
@@ -11830,14 +12079,54 @@ void DependentCode::DeoptimizeDependentCodeGroup(
clear_at(i);
}
set_number_of_entries(group, 0);
+ return marked;
+}
+
+
+void DependentCode::DeoptimizeDependentCodeGroup(
+ Isolate* isolate,
+ DependentCode::DependencyGroup group) {
+ ASSERT(AllowCodeDependencyChange::IsAllowed());
+ DisallowHeapAllocation no_allocation_scope;
+ bool marked = MarkCodeForDeoptimization(isolate, group);
if (marked) Deoptimizer::DeoptimizeMarkedCode(isolate);
}
-Handle<Object> JSObject::SetPrototype(Handle<JSObject> object,
- Handle<Object> value,
- bool skip_hidden_prototypes) {
+void DependentCode::AddToDependentICList(Handle<Code> stub) {
+ DisallowHeapAllocation no_heap_allocation;
+ GroupStartIndexes starts(this);
+ int i = starts.at(kWeakICGroup);
+ Object* head = object_at(i);
+ // Try to insert the stub after the head of the list to minimize number of
+ // writes to the DependentCode array, since a write to the array can make it
+ // strong if it was alread marked by incremental marker.
+ if (head->IsCode()) {
+ stub->set_next_code_link(Code::cast(head)->next_code_link());
+ Code::cast(head)->set_next_code_link(*stub);
+ } else {
+ stub->set_next_code_link(head);
+ set_object_at(i, *stub);
+ }
+}
+
+
+Handle<Map> Map::TransitionToPrototype(Handle<Map> map,
+ Handle<Object> prototype) {
+ Handle<Map> new_map = GetPrototypeTransition(map, prototype);
+ if (new_map.is_null()) {
+ new_map = Copy(map);
+ PutPrototypeTransition(map, prototype, new_map);
+ new_map->set_prototype(*prototype);
+ }
+ return new_map;
+}
+
+
+MaybeHandle<Object> JSObject::SetPrototype(Handle<JSObject> object,
+ Handle<Object> value,
+ bool skip_hidden_prototypes) {
#ifdef DEBUG
int size = object->Size();
#endif
@@ -11860,8 +12149,7 @@ Handle<Object> JSObject::SetPrototype(Handle<JSObject> object,
Handle<Object> args[] = { object };
Handle<Object> error = isolate->factory()->NewTypeError(
"non_extensible_proto", HandleVector(args, ARRAY_SIZE(args)));
- isolate->Throw(*error);
- return Handle<Object>();
+ return isolate->Throw<Object>(error);
}
// Before we can set the prototype we need to be sure
@@ -11875,8 +12163,7 @@ Handle<Object> JSObject::SetPrototype(Handle<JSObject> object,
// Cycle detected.
Handle<Object> error = isolate->factory()->NewError(
"cyclic_proto", HandleVector<Object>(NULL, 0));
- isolate->Throw(*error);
- return Handle<Object>();
+ return isolate->Throw<Object>(error);
}
}
@@ -11905,14 +12192,9 @@ Handle<Object> JSObject::SetPrototype(Handle<JSObject> object,
JSObject::OptimizeAsPrototype(Handle<JSObject>::cast(value));
}
- Handle<Map> new_map = Map::GetPrototypeTransition(map, value);
- if (new_map.is_null()) {
- new_map = Map::Copy(map);
- Map::PutPrototypeTransition(map, value, new_map);
- new_map->set_prototype(*value);
- }
+ Handle<Map> new_map = Map::TransitionToPrototype(map, value);
ASSERT(new_map->prototype() == *value);
- real_receiver->set_map(*new_map);
+ JSObject::MigrateToMap(real_receiver, new_map);
if (!dictionary_elements_in_chain &&
new_map->DictionaryElementsInPrototypeChainOnly()) {
@@ -11928,57 +12210,62 @@ Handle<Object> JSObject::SetPrototype(Handle<JSObject> object,
}
-MaybeObject* JSObject::EnsureCanContainElements(Arguments* args,
- uint32_t first_arg,
- uint32_t arg_count,
- EnsureElementsMode mode) {
+void JSObject::EnsureCanContainElements(Handle<JSObject> object,
+ Arguments* args,
+ uint32_t first_arg,
+ uint32_t arg_count,
+ EnsureElementsMode mode) {
// Elements in |Arguments| are ordered backwards (because they're on the
// stack), but the method that's called here iterates over them in forward
// direction.
return EnsureCanContainElements(
- args->arguments() - first_arg - (arg_count - 1),
- arg_count, mode);
+ object, args->arguments() - first_arg - (arg_count - 1), arg_count, mode);
}
-AccessorPair* JSObject::GetLocalPropertyAccessorPair(Name* name) {
+MaybeHandle<AccessorPair> JSObject::GetOwnPropertyAccessorPair(
+ Handle<JSObject> object,
+ Handle<Name> name) {
uint32_t index = 0;
if (name->AsArrayIndex(&index)) {
- return GetLocalElementAccessorPair(index);
+ return GetOwnElementAccessorPair(object, index);
}
- LookupResult lookup(GetIsolate());
- LocalLookupRealNamedProperty(name, &lookup);
+ Isolate* isolate = object->GetIsolate();
+ LookupResult lookup(isolate);
+ object->LookupOwnRealNamedProperty(name, &lookup);
if (lookup.IsPropertyCallbacks() &&
lookup.GetCallbackObject()->IsAccessorPair()) {
- return AccessorPair::cast(lookup.GetCallbackObject());
+ return handle(AccessorPair::cast(lookup.GetCallbackObject()), isolate);
}
- return NULL;
+ return MaybeHandle<AccessorPair>();
}
-AccessorPair* JSObject::GetLocalElementAccessorPair(uint32_t index) {
- if (IsJSGlobalProxy()) {
- Object* proto = GetPrototype();
- if (proto->IsNull()) return NULL;
+MaybeHandle<AccessorPair> JSObject::GetOwnElementAccessorPair(
+ Handle<JSObject> object,
+ uint32_t index) {
+ if (object->IsJSGlobalProxy()) {
+ Handle<Object> proto(object->GetPrototype(), object->GetIsolate());
+ if (proto->IsNull()) return MaybeHandle<AccessorPair>();
ASSERT(proto->IsJSGlobalObject());
- return JSObject::cast(proto)->GetLocalElementAccessorPair(index);
+ return GetOwnElementAccessorPair(Handle<JSObject>::cast(proto), index);
}
// Check for lookup interceptor.
- if (HasIndexedInterceptor()) return NULL;
+ if (object->HasIndexedInterceptor()) return MaybeHandle<AccessorPair>();
- return GetElementsAccessor()->GetAccessorPair(this, this, index);
+ return object->GetElementsAccessor()->GetAccessorPair(object, object, index);
}
-Handle<Object> JSObject::SetElementWithInterceptor(
+MaybeHandle<Object> JSObject::SetElementWithInterceptor(
Handle<JSObject> object,
uint32_t index,
Handle<Object> value,
PropertyAttributes attributes,
- StrictModeFlag strict_mode,
+ StrictMode strict_mode,
bool check_prototype,
SetPropertyMode set_mode) {
Isolate* isolate = object->GetIsolate();
@@ -11997,7 +12284,7 @@ Handle<Object> JSObject::SetElementWithInterceptor(
*object);
v8::Handle<v8::Value> result =
args.Call(setter, index, v8::Utils::ToLocal(value));
- RETURN_HANDLE_IF_SCHEDULED_EXCEPTION(isolate, Object);
+ RETURN_EXCEPTION_IF_SCHEDULED_EXCEPTION(isolate, Object);
if (!result.IsEmpty()) return value;
}
@@ -12008,76 +12295,72 @@ Handle<Object> JSObject::SetElementWithInterceptor(
}
-MaybeObject* JSObject::GetElementWithCallback(Object* receiver,
- Object* structure,
- uint32_t index,
- Object* holder) {
- Isolate* isolate = GetIsolate();
+MaybeHandle<Object> JSObject::GetElementWithCallback(
+ Handle<JSObject> object,
+ Handle<Object> receiver,
+ Handle<Object> structure,
+ uint32_t index,
+ Handle<Object> holder) {
+ Isolate* isolate = object->GetIsolate();
ASSERT(!structure->IsForeign());
-
// api style callbacks.
if (structure->IsExecutableAccessorInfo()) {
- Handle<ExecutableAccessorInfo> data(
- ExecutableAccessorInfo::cast(structure));
+ Handle<ExecutableAccessorInfo> data =
+ Handle<ExecutableAccessorInfo>::cast(structure);
Object* fun_obj = data->getter();
v8::AccessorGetterCallback call_fun =
v8::ToCData<v8::AccessorGetterCallback>(fun_obj);
- if (call_fun == NULL) return isolate->heap()->undefined_value();
- HandleScope scope(isolate);
- Handle<JSObject> self(JSObject::cast(receiver));
- Handle<JSObject> holder_handle(JSObject::cast(holder));
+ if (call_fun == NULL) return isolate->factory()->undefined_value();
+ Handle<JSObject> holder_handle = Handle<JSObject>::cast(holder);
Handle<Object> number = isolate->factory()->NewNumberFromUint(index);
Handle<String> key = isolate->factory()->NumberToString(number);
- LOG(isolate, ApiNamedPropertyAccess("load", *self, *key));
+ LOG(isolate, ApiNamedPropertyAccess("load", *holder_handle, *key));
PropertyCallbackArguments
- args(isolate, data->data(), *self, *holder_handle);
+ args(isolate, data->data(), *receiver, *holder_handle);
v8::Handle<v8::Value> result = args.Call(call_fun, v8::Utils::ToLocal(key));
- RETURN_IF_SCHEDULED_EXCEPTION(isolate);
- if (result.IsEmpty()) return isolate->heap()->undefined_value();
+ RETURN_EXCEPTION_IF_SCHEDULED_EXCEPTION(isolate, Object);
+ if (result.IsEmpty()) return isolate->factory()->undefined_value();
Handle<Object> result_internal = v8::Utils::OpenHandle(*result);
result_internal->VerifyApiCallResultType();
- return *result_internal;
+ // Rebox handle before return.
+ return handle(*result_internal, isolate);
}
// __defineGetter__ callback
if (structure->IsAccessorPair()) {
- Object* getter = AccessorPair::cast(structure)->getter();
+ Handle<Object> getter(Handle<AccessorPair>::cast(structure)->getter(),
+ isolate);
if (getter->IsSpecFunction()) {
// TODO(rossberg): nicer would be to cast to some JSCallable here...
- return GetPropertyWithDefinedGetter(receiver, JSReceiver::cast(getter));
+ return GetPropertyWithDefinedGetter(
+ receiver, Handle<JSReceiver>::cast(getter));
}
// Getter is not a function.
- return isolate->heap()->undefined_value();
+ return isolate->factory()->undefined_value();
}
if (structure->IsDeclaredAccessorInfo()) {
- return GetDeclaredAccessorProperty(receiver,
- DeclaredAccessorInfo::cast(structure),
- isolate);
+ return GetDeclaredAccessorProperty(
+ receiver, Handle<DeclaredAccessorInfo>::cast(structure), isolate);
}
UNREACHABLE();
- return NULL;
+ return MaybeHandle<Object>();
}
-Handle<Object> JSObject::SetElementWithCallback(Handle<JSObject> object,
- Handle<Object> structure,
- uint32_t index,
- Handle<Object> value,
- Handle<JSObject> holder,
- StrictModeFlag strict_mode) {
+MaybeHandle<Object> JSObject::SetElementWithCallback(Handle<JSObject> object,
+ Handle<Object> structure,
+ uint32_t index,
+ Handle<Object> value,
+ Handle<JSObject> holder,
+ StrictMode strict_mode) {
Isolate* isolate = object->GetIsolate();
// We should never get here to initialize a const with the hole
// value since a const declaration would conflict with the setter.
ASSERT(!value->IsTheHole());
-
- // To accommodate both the old and the new api we switch on the
- // data structure used to store the callbacks. Eventually foreign
- // callbacks should be phased out.
ASSERT(!structure->IsForeign());
-
if (structure->IsExecutableAccessorInfo()) {
// api style callbacks
Handle<ExecutableAccessorInfo> data =
@@ -12094,7 +12377,7 @@ Handle<Object> JSObject::SetElementWithCallback(Handle<JSObject> object,
args.Call(call_fun,
v8::Utils::ToLocal(key),
v8::Utils::ToLocal(value));
- RETURN_HANDLE_IF_SCHEDULED_EXCEPTION(isolate, Object);
+ RETURN_EXCEPTION_IF_SCHEDULED_EXCEPTION(isolate, Object);
return value;
}
@@ -12105,15 +12388,12 @@ Handle<Object> JSObject::SetElementWithCallback(Handle<JSObject> object,
return SetPropertyWithDefinedSetter(
object, Handle<JSReceiver>::cast(setter), value);
} else {
- if (strict_mode == kNonStrictMode) {
- return value;
- }
+ if (strict_mode == SLOPPY) return value;
Handle<Object> key(isolate->factory()->NewNumberFromUint(index));
Handle<Object> args[2] = { key, holder };
Handle<Object> error = isolate->factory()->NewTypeError(
"no_setter_in_callback", HandleVector(args, 2));
- isolate->Throw(*error);
- return Handle<Object>();
+ return isolate->Throw<Object>(error);
}
}
@@ -12121,7 +12401,7 @@ Handle<Object> JSObject::SetElementWithCallback(Handle<JSObject> object,
if (structure->IsDeclaredAccessorInfo()) return value;
UNREACHABLE();
- return Handle<Object>();
+ return MaybeHandle<Object>();
}
@@ -12129,7 +12409,7 @@ bool JSObject::HasFastArgumentsElements() {
Heap* heap = GetHeap();
if (!elements()->IsFixedArray()) return false;
FixedArray* elements = FixedArray::cast(this->elements());
- if (elements->map() != heap->non_strict_arguments_elements_map()) {
+ if (elements->map() != heap->sloppy_arguments_elements_map()) {
return false;
}
FixedArray* arguments = FixedArray::cast(elements->get(1));
@@ -12141,7 +12421,7 @@ bool JSObject::HasDictionaryArgumentsElements() {
Heap* heap = GetHeap();
if (!elements()->IsFixedArray()) return false;
FixedArray* elements = FixedArray::cast(this->elements());
- if (elements->map() != heap->non_strict_arguments_elements_map()) {
+ if (elements->map() != heap->sloppy_arguments_elements_map()) {
return false;
}
FixedArray* arguments = FixedArray::cast(elements->get(1));
@@ -12152,11 +12432,11 @@ bool JSObject::HasDictionaryArgumentsElements() {
// Adding n elements in fast case is O(n*n).
// Note: revisit design to have dual undefined values to capture absent
// elements.
-Handle<Object> JSObject::SetFastElement(Handle<JSObject> object,
- uint32_t index,
- Handle<Object> value,
- StrictModeFlag strict_mode,
- bool check_prototype) {
+MaybeHandle<Object> JSObject::SetFastElement(Handle<JSObject> object,
+ uint32_t index,
+ Handle<Object> value,
+ StrictMode strict_mode,
+ bool check_prototype) {
ASSERT(object->HasFastSmiOrObjectElements() ||
object->HasFastArgumentsElements());
@@ -12173,7 +12453,7 @@ Handle<Object> JSObject::SetFastElement(Handle<JSObject> object,
Handle<FixedArray> backing_store(FixedArray::cast(object->elements()));
if (backing_store->map() ==
- isolate->heap()->non_strict_arguments_elements_map()) {
+ isolate->heap()->sloppy_arguments_elements_map()) {
backing_store = handle(FixedArray::cast(backing_store->get(1)));
} else {
backing_store = EnsureWritableFastElements(object);
@@ -12183,7 +12463,7 @@ Handle<Object> JSObject::SetFastElement(Handle<JSObject> object,
if (check_prototype &&
(index >= capacity || backing_store->get(index)->IsTheHole())) {
bool found;
- Handle<Object> result = SetElementWithCallbackSetterInPrototypes(
+ MaybeHandle<Object> result = SetElementWithCallbackSetterInPrototypes(
object, index, value, &found, strict_mode);
if (found) return result;
}
@@ -12242,7 +12522,7 @@ Handle<Object> JSObject::SetFastElement(Handle<JSObject> object,
SetFastDoubleElementsCapacityAndLength(object, new_capacity, array_length);
FixedDoubleArray::cast(object->elements())->set(index, value->Number());
- object->ValidateElements();
+ JSObject::ValidateElements(object);
return value;
}
// Change elements kind from Smi-only to generic FAST if necessary.
@@ -12253,7 +12533,7 @@ Handle<Object> JSObject::SetFastElement(Handle<JSObject> object,
UpdateAllocationSite(object, kind);
Handle<Map> new_map = GetElementsTransitionMap(object, kind);
- object->set_map(*new_map);
+ JSObject::MigrateToMap(object, new_map);
ASSERT(IsFastObjectElementsKind(object->GetElementsKind()));
}
// Increase backing store capacity if that's been decided previously.
@@ -12266,7 +12546,7 @@ Handle<Object> JSObject::SetFastElement(Handle<JSObject> object,
SetFastElementsCapacityAndLength(object, new_capacity, array_length,
smi_mode);
new_elements->set(index, *value);
- object->ValidateElements();
+ JSObject::ValidateElements(object);
return value;
}
@@ -12280,13 +12560,14 @@ Handle<Object> JSObject::SetFastElement(Handle<JSObject> object,
}
-Handle<Object> JSObject::SetDictionaryElement(Handle<JSObject> object,
- uint32_t index,
- Handle<Object> value,
- PropertyAttributes attributes,
- StrictModeFlag strict_mode,
- bool check_prototype,
- SetPropertyMode set_mode) {
+MaybeHandle<Object> JSObject::SetDictionaryElement(
+ Handle<JSObject> object,
+ uint32_t index,
+ Handle<Object> value,
+ PropertyAttributes attributes,
+ StrictMode strict_mode,
+ bool check_prototype,
+ SetPropertyMode set_mode) {
ASSERT(object->HasDictionaryElements() ||
object->HasDictionaryArgumentsElements());
Isolate* isolate = object->GetIsolate();
@@ -12294,7 +12575,7 @@ Handle<Object> JSObject::SetDictionaryElement(Handle<JSObject> object,
// Insert element in the dictionary.
Handle<FixedArray> elements(FixedArray::cast(object->elements()));
bool is_arguments =
- (elements->map() == isolate->heap()->non_strict_arguments_elements_map());
+ (elements->map() == isolate->heap()->sloppy_arguments_elements_map());
Handle<SeededNumberDictionary> dictionary(is_arguments
? SeededNumberDictionary::cast(elements->get(1))
: SeededNumberDictionary::cast(*elements));
@@ -12316,7 +12597,7 @@ Handle<Object> JSObject::SetDictionaryElement(Handle<JSObject> object,
attributes, NORMAL, details.dictionary_index());
dictionary->DetailsAtPut(entry, details);
} else if (details.IsReadOnly() && !element->IsTheHole()) {
- if (strict_mode == kNonStrictMode) {
+ if (strict_mode == SLOPPY) {
return isolate->factory()->undefined_value();
} else {
Handle<Object> number = isolate->factory()->NewNumberFromUint(index);
@@ -12324,8 +12605,7 @@ Handle<Object> JSObject::SetDictionaryElement(Handle<JSObject> object,
Handle<Object> error =
isolate->factory()->NewTypeError("strict_read_only_property",
HandleVector(args, 2));
- isolate->Throw(*error);
- return Handle<Object>();
+ return isolate->Throw<Object>(error);
}
}
// Elements of the arguments object in slow mode might be slow aliases.
@@ -12346,15 +12626,15 @@ Handle<Object> JSObject::SetDictionaryElement(Handle<JSObject> object,
// Can cause GC!
if (check_prototype) {
bool found;
- Handle<Object> result = SetElementWithCallbackSetterInPrototypes(object,
- index, value, &found, strict_mode);
+ MaybeHandle<Object> result = SetElementWithCallbackSetterInPrototypes(
+ object, index, value, &found, strict_mode);
if (found) return result;
}
// When we set the is_extensible flag to false we always force the
// element into dictionary mode (and force them to stay there).
if (!object->map()->is_extensible()) {
- if (strict_mode == kNonStrictMode) {
+ if (strict_mode == SLOPPY) {
return isolate->factory()->undefined_value();
} else {
Handle<Object> number = isolate->factory()->NewNumberFromUint(index);
@@ -12363,8 +12643,7 @@ Handle<Object> JSObject::SetDictionaryElement(Handle<JSObject> object,
Handle<Object> error =
isolate->factory()->NewTypeError("object_not_extensible",
HandleVector(args, 1));
- isolate->Throw(*error);
- return Handle<Object>();
+ return isolate->Throw<Object>(error);
}
}
@@ -12412,7 +12691,7 @@ Handle<Object> JSObject::SetDictionaryElement(Handle<JSObject> object,
SetFastElementsCapacityAndLength(object, new_length, new_length,
smi_mode);
}
- object->ValidateElements();
+ JSObject::ValidateElements(object);
#ifdef DEBUG
if (FLAG_trace_normalization) {
PrintF("Object elements are fast case again:\n");
@@ -12423,11 +12702,11 @@ Handle<Object> JSObject::SetDictionaryElement(Handle<JSObject> object,
return value;
}
-Handle<Object> JSObject::SetFastDoubleElement(
+MaybeHandle<Object> JSObject::SetFastDoubleElement(
Handle<JSObject> object,
uint32_t index,
Handle<Object> value,
- StrictModeFlag strict_mode,
+ StrictMode strict_mode,
bool check_prototype) {
ASSERT(object->HasFastDoubleElements());
@@ -12440,8 +12719,8 @@ Handle<Object> JSObject::SetFastDoubleElement(
(index >= elms_length ||
Handle<FixedDoubleArray>::cast(base_elms)->is_the_hole(index))) {
bool found;
- Handle<Object> result = SetElementWithCallbackSetterInPrototypes(object,
- index, value, &found, strict_mode);
+ MaybeHandle<Object> result = SetElementWithCallbackSetterInPrototypes(
+ object, index, value, &found, strict_mode);
if (found) return result;
}
@@ -12460,11 +12739,12 @@ Handle<Object> JSObject::SetFastDoubleElement(
if (!value->IsNumber()) {
SetFastElementsCapacityAndLength(object, elms_length, length,
kDontAllowSmiElements);
- Handle<Object> result = SetFastElement(object, index, value, strict_mode,
- check_prototype);
- RETURN_IF_EMPTY_HANDLE_VALUE(object->GetIsolate(), result,
- Handle<Object>());
- object->ValidateElements();
+ Handle<Object> result;
+ ASSIGN_RETURN_ON_EXCEPTION(
+ object->GetIsolate(), result,
+ SetFastElement(object, index, value, strict_mode, check_prototype),
+ Object);
+ JSObject::ValidateElements(object);
return result;
}
@@ -12504,7 +12784,7 @@ Handle<Object> JSObject::SetFastDoubleElement(
ASSERT(static_cast<uint32_t>(new_capacity) > index);
SetFastDoubleElementsCapacityAndLength(object, new_capacity, index + 1);
FixedDoubleArray::cast(object->elements())->set(index, double_value);
- object->ValidateElements();
+ JSObject::ValidateElements(object);
return value;
}
}
@@ -12512,7 +12792,8 @@ Handle<Object> JSObject::SetFastDoubleElement(
// Otherwise default to slow case.
ASSERT(object->HasFastDoubleElements());
ASSERT(object->map()->has_fast_double_elements());
- ASSERT(object->elements()->IsFixedDoubleArray());
+ ASSERT(object->elements()->IsFixedDoubleArray() ||
+ object->elements()->length() == 0);
NormalizeElements(object);
ASSERT(object->HasDictionaryElements());
@@ -12520,11 +12801,11 @@ Handle<Object> JSObject::SetFastDoubleElement(
}
-Handle<Object> JSReceiver::SetElement(Handle<JSReceiver> object,
- uint32_t index,
- Handle<Object> value,
- PropertyAttributes attributes,
- StrictModeFlag strict_mode) {
+MaybeHandle<Object> JSReceiver::SetElement(Handle<JSReceiver> object,
+ uint32_t index,
+ Handle<Object> value,
+ PropertyAttributes attributes,
+ StrictMode strict_mode) {
if (object->IsJSProxy()) {
return JSProxy::SetElementWithHandler(
Handle<JSProxy>::cast(object), object, index, value, strict_mode);
@@ -12534,39 +12815,38 @@ Handle<Object> JSReceiver::SetElement(Handle<JSReceiver> object,
}
-Handle<Object> JSObject::SetOwnElement(Handle<JSObject> object,
- uint32_t index,
- Handle<Object> value,
- StrictModeFlag strict_mode) {
+MaybeHandle<Object> JSObject::SetOwnElement(Handle<JSObject> object,
+ uint32_t index,
+ Handle<Object> value,
+ StrictMode strict_mode) {
ASSERT(!object->HasExternalArrayElements());
return JSObject::SetElement(object, index, value, NONE, strict_mode, false);
}
-Handle<Object> JSObject::SetElement(Handle<JSObject> object,
- uint32_t index,
- Handle<Object> value,
- PropertyAttributes attributes,
- StrictModeFlag strict_mode,
- bool check_prototype,
- SetPropertyMode set_mode) {
+MaybeHandle<Object> JSObject::SetElement(Handle<JSObject> object,
+ uint32_t index,
+ Handle<Object> value,
+ PropertyAttributes attributes,
+ StrictMode strict_mode,
+ bool check_prototype,
+ SetPropertyMode set_mode) {
Isolate* isolate = object->GetIsolate();
- if (object->HasExternalArrayElements()) {
+ if (object->HasExternalArrayElements() ||
+ object->HasFixedTypedArrayElements()) {
if (!value->IsNumber() && !value->IsUndefined()) {
- bool has_exception;
- Handle<Object> number =
- Execution::ToNumber(isolate, value, &has_exception);
- if (has_exception) return Handle<Object>();
- value = number;
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, value,
+ Execution::ToNumber(isolate, value), Object);
}
}
// Check access rights if needed.
if (object->IsAccessCheckNeeded()) {
- if (!isolate->MayIndexedAccess(*object, index, v8::ACCESS_SET)) {
- isolate->ReportFailedAccessCheck(*object, v8::ACCESS_SET);
- RETURN_HANDLE_IF_SCHEDULED_EXCEPTION(isolate, Object);
+ if (!isolate->MayIndexedAccess(object, index, v8::ACCESS_SET)) {
+ isolate->ReportFailedAccessCheck(object, v8::ACCESS_SET);
+ RETURN_EXCEPTION_IF_SCHEDULED_EXCEPTION(isolate, Object);
return value;
}
}
@@ -12582,13 +12862,14 @@ Handle<Object> JSObject::SetElement(Handle<JSObject> object,
}
// Don't allow element properties to be redefined for external arrays.
- if (object->HasExternalArrayElements() && set_mode == DEFINE_PROPERTY) {
+ if ((object->HasExternalArrayElements() ||
+ object->HasFixedTypedArrayElements()) &&
+ set_mode == DEFINE_PROPERTY) {
Handle<Object> number = isolate->factory()->NewNumberFromUint(index);
Handle<Object> args[] = { object, number };
Handle<Object> error = isolate->factory()->NewTypeError(
"redef_external_array_element", HandleVector(args, ARRAY_SIZE(args)));
- isolate->Throw(*error);
- return Handle<Object>();
+ return isolate->Throw<Object>(error);
}
// Normalize the elements to enable attributes on the property.
@@ -12598,25 +12879,24 @@ Handle<Object> JSObject::SetElement(Handle<JSObject> object,
dictionary->set_requires_slow_elements();
}
- if (!(FLAG_harmony_observation && object->map()->is_observed())) {
+ if (!object->map()->is_observed()) {
return object->HasIndexedInterceptor()
- ? SetElementWithInterceptor(object, index, value, attributes, strict_mode,
- check_prototype,
- set_mode)
+ ? SetElementWithInterceptor(object, index, value, attributes,
+ strict_mode, check_prototype, set_mode)
: SetElementWithoutInterceptor(object, index, value, attributes,
- strict_mode,
- check_prototype,
- set_mode);
+ strict_mode, check_prototype, set_mode);
}
- PropertyAttributes old_attributes = object->GetLocalElementAttribute(index);
+ PropertyAttributes old_attributes =
+ JSReceiver::GetOwnElementAttribute(object, index);
Handle<Object> old_value = isolate->factory()->the_hole_value();
Handle<Object> old_length_handle;
Handle<Object> new_length_handle;
if (old_attributes != ABSENT) {
- if (object->GetLocalElementAccessorPair(index) == NULL)
- old_value = Object::GetElement(isolate, object, index);
+ if (GetOwnElementAccessorPair(object, index).is_null()) {
+ old_value = Object::GetElement(isolate, object, index).ToHandleChecked();
+ }
} else if (object->IsJSArray()) {
// Store old array length in case adding an element grows the array.
old_length_handle = handle(Handle<JSArray>::cast(object)->length(),
@@ -12624,18 +12904,20 @@ Handle<Object> JSObject::SetElement(Handle<JSObject> object,
}
// Check for lookup interceptor
- Handle<Object> result = object->HasIndexedInterceptor()
- ? SetElementWithInterceptor(object, index, value, attributes, strict_mode,
- check_prototype,
- set_mode)
- : SetElementWithoutInterceptor(object, index, value, attributes,
- strict_mode,
- check_prototype,
- set_mode);
- RETURN_IF_EMPTY_HANDLE_VALUE(isolate, result, Handle<Object>());
+ Handle<Object> result;
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, result,
+ object->HasIndexedInterceptor()
+ ? SetElementWithInterceptor(
+ object, index, value, attributes,
+ strict_mode, check_prototype, set_mode)
+ : SetElementWithoutInterceptor(
+ object, index, value, attributes,
+ strict_mode, check_prototype, set_mode),
+ Object);
Handle<String> name = isolate->factory()->Uint32ToString(index);
- PropertyAttributes new_attributes = object->GetLocalElementAttribute(index);
+ PropertyAttributes new_attributes = GetOwnElementAttribute(object, index);
if (old_attributes == ABSENT) {
if (object->IsJSArray() &&
!old_length_handle->SameValue(
@@ -12661,7 +12943,8 @@ Handle<Object> JSObject::SetElement(Handle<JSObject> object,
} else if (old_value->IsTheHole()) {
EnqueueChangeRecord(object, "reconfigure", name, old_value);
} else {
- Handle<Object> new_value = Object::GetElement(isolate, object, index);
+ Handle<Object> new_value =
+ Object::GetElement(isolate, object, index).ToHandleChecked();
bool value_changed = !old_value->SameValue(*new_value);
if (old_attributes != new_attributes) {
if (!value_changed) old_value = isolate->factory()->the_hole_value();
@@ -12675,12 +12958,12 @@ Handle<Object> JSObject::SetElement(Handle<JSObject> object,
}
-Handle<Object> JSObject::SetElementWithoutInterceptor(
+MaybeHandle<Object> JSObject::SetElementWithoutInterceptor(
Handle<JSObject> object,
uint32_t index,
Handle<Object> value,
PropertyAttributes attributes,
- StrictModeFlag strict_mode,
+ StrictMode strict_mode,
bool check_prototype,
SetPropertyMode set_mode) {
ASSERT(object->HasDictionaryElements() ||
@@ -12689,12 +12972,20 @@ Handle<Object> JSObject::SetElementWithoutInterceptor(
Isolate* isolate = object->GetIsolate();
if (FLAG_trace_external_array_abuse &&
IsExternalArrayElementsKind(object->GetElementsKind())) {
- CheckArrayAbuse(*object, "external elements write", index);
+ CheckArrayAbuse(object, "external elements write", index);
}
if (FLAG_trace_js_array_abuse &&
!IsExternalArrayElementsKind(object->GetElementsKind())) {
if (object->IsJSArray()) {
- CheckArrayAbuse(*object, "elements write", index, true);
+ CheckArrayAbuse(object, "elements write", index, true);
+ }
+ }
+ if (object->IsJSArray() && JSArray::WouldChangeReadOnlyLength(
+ Handle<JSArray>::cast(object), index)) {
+ if (strict_mode == SLOPPY) {
+ return value;
+ } else {
+ return JSArray::ReadOnlyLengthError(Handle<JSArray>::cast(object));
}
}
switch (object->GetElementsKind()) {
@@ -12707,55 +12998,28 @@ Handle<Object> JSObject::SetElementWithoutInterceptor(
case FAST_HOLEY_DOUBLE_ELEMENTS:
return SetFastDoubleElement(object, index, value, strict_mode,
check_prototype);
- case EXTERNAL_PIXEL_ELEMENTS: {
- ExternalPixelArray* pixels = ExternalPixelArray::cast(object->elements());
- return handle(pixels->SetValue(index, *value), isolate);
- }
- case EXTERNAL_BYTE_ELEMENTS: {
- Handle<ExternalByteArray> array(
- ExternalByteArray::cast(object->elements()));
- return ExternalByteArray::SetValue(array, index, value);
- }
- case EXTERNAL_UNSIGNED_BYTE_ELEMENTS: {
- Handle<ExternalUnsignedByteArray> array(
- ExternalUnsignedByteArray::cast(object->elements()));
- return ExternalUnsignedByteArray::SetValue(array, index, value);
- }
- case EXTERNAL_SHORT_ELEMENTS: {
- Handle<ExternalShortArray> array(ExternalShortArray::cast(
- object->elements()));
- return ExternalShortArray::SetValue(array, index, value);
- }
- case EXTERNAL_UNSIGNED_SHORT_ELEMENTS: {
- Handle<ExternalUnsignedShortArray> array(
- ExternalUnsignedShortArray::cast(object->elements()));
- return ExternalUnsignedShortArray::SetValue(array, index, value);
- }
- case EXTERNAL_INT_ELEMENTS: {
- Handle<ExternalIntArray> array(
- ExternalIntArray::cast(object->elements()));
- return ExternalIntArray::SetValue(array, index, value);
- }
- case EXTERNAL_UNSIGNED_INT_ELEMENTS: {
- Handle<ExternalUnsignedIntArray> array(
- ExternalUnsignedIntArray::cast(object->elements()));
- return ExternalUnsignedIntArray::SetValue(array, index, value);
- }
- case EXTERNAL_FLOAT_ELEMENTS: {
- Handle<ExternalFloatArray> array(
- ExternalFloatArray::cast(object->elements()));
- return ExternalFloatArray::SetValue(array, index, value);
- }
- case EXTERNAL_DOUBLE_ELEMENTS: {
- Handle<ExternalDoubleArray> array(
- ExternalDoubleArray::cast(object->elements()));
- return ExternalDoubleArray::SetValue(array, index, value);
+
+#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype, size) \
+ case EXTERNAL_##TYPE##_ELEMENTS: { \
+ Handle<External##Type##Array> array( \
+ External##Type##Array::cast(object->elements())); \
+ return External##Type##Array::SetValue(array, index, value); \
+ } \
+ case TYPE##_ELEMENTS: { \
+ Handle<Fixed##Type##Array> array( \
+ Fixed##Type##Array::cast(object->elements())); \
+ return Fixed##Type##Array::SetValue(array, index, value); \
}
+
+ TYPED_ARRAYS(TYPED_ARRAY_CASE)
+
+#undef TYPED_ARRAY_CASE
+
case DICTIONARY_ELEMENTS:
return SetDictionaryElement(object, index, value, attributes, strict_mode,
check_prototype,
set_mode);
- case NON_STRICT_ARGUMENTS_ELEMENTS: {
+ case SLOPPY_ARGUMENTS_ELEMENTS: {
Handle<FixedArray> parameter_map(FixedArray::cast(object->elements()));
uint32_t length = parameter_map->length();
Handle<Object> probe = index < length - 2 ?
@@ -12794,20 +13058,27 @@ Handle<Object> JSObject::SetElementWithoutInterceptor(
}
-void JSObject::TransitionElementsKind(Handle<JSObject> object,
- ElementsKind to_kind) {
- CALL_HEAP_FUNCTION_VOID(object->GetIsolate(),
- object->TransitionElementsKind(to_kind));
+const double AllocationSite::kPretenureRatio = 0.85;
+
+
+void AllocationSite::ResetPretenureDecision() {
+ set_pretenure_decision(kUndecided);
+ set_memento_found_count(0);
+ set_memento_create_count(0);
}
-const double AllocationSite::kPretenureRatio = 0.60;
+PretenureFlag AllocationSite::GetPretenureMode() {
+ PretenureDecision mode = pretenure_decision();
+ // Zombie objects "decide" to be untenured.
+ return mode == kTenure ? TENURED : NOT_TENURED;
+}
bool AllocationSite::IsNestedSite() {
ASSERT(FLAG_trace_track_allocation_sites);
Object* current = GetHeap()->allocation_sites_list();
- while (current != NULL && current->IsAllocationSite()) {
+ while (current->IsAllocationSite()) {
AllocationSite* current_site = AllocationSite::cast(current);
if (current_site->nested_site() == this) {
return true;
@@ -12818,11 +13089,13 @@ bool AllocationSite::IsNestedSite() {
}
-MaybeObject* AllocationSite::DigestTransitionFeedback(ElementsKind to_kind) {
- Isolate* isolate = GetIsolate();
+void AllocationSite::DigestTransitionFeedback(Handle<AllocationSite> site,
+ ElementsKind to_kind) {
+ Isolate* isolate = site->GetIsolate();
- if (SitePointsToLiteral() && transition_info()->IsJSArray()) {
- JSArray* transition_info = JSArray::cast(this->transition_info());
+ if (site->SitePointsToLiteral() && site->transition_info()->IsJSArray()) {
+ Handle<JSArray> transition_info =
+ handle(JSArray::cast(site->transition_info()));
ElementsKind kind = transition_info->GetElementsKind();
// if kind is holey ensure that to_kind is as well.
if (IsHoleyElementsKind(kind)) {
@@ -12835,22 +13108,21 @@ MaybeObject* AllocationSite::DigestTransitionFeedback(ElementsKind to_kind) {
CHECK(transition_info->length()->ToArrayIndex(&length));
if (length <= kMaximumArrayBytesToPretransition) {
if (FLAG_trace_track_allocation_sites) {
- bool is_nested = IsNestedSite();
+ bool is_nested = site->IsNestedSite();
PrintF(
"AllocationSite: JSArray %p boilerplate %s updated %s->%s\n",
- reinterpret_cast<void*>(this),
+ reinterpret_cast<void*>(*site),
is_nested ? "(nested)" : "",
ElementsKindToString(kind),
ElementsKindToString(to_kind));
}
- MaybeObject* result = transition_info->TransitionElementsKind(to_kind);
- if (result->IsFailure()) return result;
- dependent_code()->DeoptimizeDependentCodeGroup(
+ JSObject::TransitionElementsKind(transition_info, to_kind);
+ site->dependent_code()->DeoptimizeDependentCodeGroup(
isolate, DependentCode::kAllocationSiteTransitionChangedGroup);
}
}
} else {
- ElementsKind kind = GetElementsKind();
+ ElementsKind kind = site->GetElementsKind();
// if kind is holey ensure that to_kind is as well.
if (IsHoleyElementsKind(kind)) {
to_kind = GetHoleyElementsKind(to_kind);
@@ -12858,77 +13130,81 @@ MaybeObject* AllocationSite::DigestTransitionFeedback(ElementsKind to_kind) {
if (IsMoreGeneralElementsKindTransition(kind, to_kind)) {
if (FLAG_trace_track_allocation_sites) {
PrintF("AllocationSite: JSArray %p site updated %s->%s\n",
- reinterpret_cast<void*>(this),
+ reinterpret_cast<void*>(*site),
ElementsKindToString(kind),
ElementsKindToString(to_kind));
}
- SetElementsKind(to_kind);
- dependent_code()->DeoptimizeDependentCodeGroup(
+ site->SetElementsKind(to_kind);
+ site->dependent_code()->DeoptimizeDependentCodeGroup(
isolate, DependentCode::kAllocationSiteTransitionChangedGroup);
}
}
- return this;
}
-void AllocationSite::AddDependentCompilationInfo(Reason reason,
+// static
+void AllocationSite::AddDependentCompilationInfo(Handle<AllocationSite> site,
+ Reason reason,
CompilationInfo* info) {
- DependentCode::DependencyGroup group = ToDependencyGroup(reason);
- Handle<DependentCode> dep(dependent_code());
+ DependentCode::DependencyGroup group = site->ToDependencyGroup(reason);
+ Handle<DependentCode> dep(site->dependent_code());
Handle<DependentCode> codes =
DependentCode::Insert(dep, group, info->object_wrapper());
- if (*codes != dependent_code()) set_dependent_code(*codes);
- info->dependencies(group)->Add(Handle<HeapObject>(this), info->zone());
+ if (*codes != site->dependent_code()) site->set_dependent_code(*codes);
+ info->dependencies(group)->Add(Handle<HeapObject>(*site), info->zone());
}
-void AllocationSite::AddDependentCode(Reason reason, Handle<Code> code) {
- DependentCode::DependencyGroup group = ToDependencyGroup(reason);
- Handle<DependentCode> codes = DependentCode::Insert(
- Handle<DependentCode>(dependent_code()), group, code);
- if (*codes != dependent_code()) set_dependent_code(*codes);
+const char* AllocationSite::PretenureDecisionName(PretenureDecision decision) {
+ switch (decision) {
+ case kUndecided: return "undecided";
+ case kDontTenure: return "don't tenure";
+ case kMaybeTenure: return "maybe tenure";
+ case kTenure: return "tenure";
+ case kZombie: return "zombie";
+ default: UNREACHABLE();
+ }
+ return NULL;
}
void JSObject::UpdateAllocationSite(Handle<JSObject> object,
ElementsKind to_kind) {
- CALL_HEAP_FUNCTION_VOID(object->GetIsolate(),
- object->UpdateAllocationSite(to_kind));
-}
+ if (!object->IsJSArray()) return;
+ Heap* heap = object->GetHeap();
+ if (!heap->InNewSpace(*object)) return;
-MaybeObject* JSObject::UpdateAllocationSite(ElementsKind to_kind) {
- if (!FLAG_track_allocation_sites || !IsJSArray()) {
- return this;
- }
+ Handle<AllocationSite> site;
+ {
+ DisallowHeapAllocation no_allocation;
- AllocationMemento* memento = AllocationMemento::FindForJSObject(this);
- if (memento == NULL || !memento->IsValid()) {
- return this;
- }
+ AllocationMemento* memento = heap->FindAllocationMemento(*object);
+ if (memento == NULL) return;
- // Walk through to the Allocation Site
- AllocationSite* site = memento->GetAllocationSite();
- return site->DigestTransitionFeedback(to_kind);
+ // Walk through to the Allocation Site
+ site = handle(memento->GetAllocationSite());
+ }
+ AllocationSite::DigestTransitionFeedback(site, to_kind);
}
-MaybeObject* JSObject::TransitionElementsKind(ElementsKind to_kind) {
- ElementsKind from_kind = map()->elements_kind();
+void JSObject::TransitionElementsKind(Handle<JSObject> object,
+ ElementsKind to_kind) {
+ ElementsKind from_kind = object->map()->elements_kind();
if (IsFastHoleyElementsKind(from_kind)) {
to_kind = GetHoleyElementsKind(to_kind);
}
- if (from_kind == to_kind) return this;
+ if (from_kind == to_kind) return;
// Don't update the site if to_kind isn't fast
if (IsFastElementsKind(to_kind)) {
- MaybeObject* maybe_failure = UpdateAllocationSite(to_kind);
- if (maybe_failure->IsFailure()) return maybe_failure;
+ UpdateAllocationSite(object, to_kind);
}
- Isolate* isolate = GetIsolate();
- if (elements() == isolate->heap()->empty_fixed_array() ||
+ Isolate* isolate = object->GetIsolate();
+ if (object->elements() == isolate->heap()->empty_fixed_array() ||
(IsFastSmiOrObjectElementsKind(from_kind) &&
IsFastSmiOrObjectElementsKind(to_kind)) ||
(from_kind == FAST_DOUBLE_ELEMENTS &&
@@ -12936,54 +13212,48 @@ MaybeObject* JSObject::TransitionElementsKind(ElementsKind to_kind) {
ASSERT(from_kind != TERMINAL_FAST_ELEMENTS_KIND);
// No change is needed to the elements() buffer, the transition
// only requires a map change.
- MaybeObject* maybe_new_map = GetElementsTransitionMap(isolate, to_kind);
- Map* new_map;
- if (!maybe_new_map->To(&new_map)) return maybe_new_map;
- set_map(new_map);
+ Handle<Map> new_map = GetElementsTransitionMap(object, to_kind);
+ MigrateToMap(object, new_map);
if (FLAG_trace_elements_transitions) {
- FixedArrayBase* elms = FixedArrayBase::cast(elements());
- PrintElementsTransition(stdout, from_kind, elms, to_kind, elms);
+ Handle<FixedArrayBase> elms(object->elements());
+ PrintElementsTransition(stdout, object, from_kind, elms, to_kind, elms);
}
- return this;
+ return;
}
- FixedArrayBase* elms = FixedArrayBase::cast(elements());
+ Handle<FixedArrayBase> elms(object->elements());
uint32_t capacity = static_cast<uint32_t>(elms->length());
uint32_t length = capacity;
- if (IsJSArray()) {
- Object* raw_length = JSArray::cast(this)->length();
+ if (object->IsJSArray()) {
+ Object* raw_length = Handle<JSArray>::cast(object)->length();
if (raw_length->IsUndefined()) {
// If length is undefined, then JSArray is being initialized and has no
// elements, assume a length of zero.
length = 0;
} else {
- CHECK(JSArray::cast(this)->length()->ToArrayIndex(&length));
+ CHECK(raw_length->ToArrayIndex(&length));
}
}
if (IsFastSmiElementsKind(from_kind) &&
IsFastDoubleElementsKind(to_kind)) {
- MaybeObject* maybe_result =
- SetFastDoubleElementsCapacityAndLength(capacity, length);
- if (maybe_result->IsFailure()) return maybe_result;
- ValidateElements();
- return this;
+ SetFastDoubleElementsCapacityAndLength(object, capacity, length);
+ JSObject::ValidateElements(object);
+ return;
}
if (IsFastDoubleElementsKind(from_kind) &&
IsFastObjectElementsKind(to_kind)) {
- MaybeObject* maybe_result = SetFastElementsCapacityAndLength(
- capacity, length, kDontAllowSmiElements);
- if (maybe_result->IsFailure()) return maybe_result;
- ValidateElements();
- return this;
+ SetFastElementsCapacityAndLength(object, capacity, length,
+ kDontAllowSmiElements);
+ JSObject::ValidateElements(object);
+ return;
}
// This method should never be called for any other case than the ones
// handled above.
UNREACHABLE();
- return GetIsolate()->heap()->null_value();
}
@@ -13004,69 +13274,91 @@ bool Map::IsValidElementsTransition(ElementsKind from_kind,
void JSArray::JSArrayUpdateLengthFromIndex(Handle<JSArray> array,
uint32_t index,
Handle<Object> value) {
- CALL_HEAP_FUNCTION_VOID(array->GetIsolate(),
- array->JSArrayUpdateLengthFromIndex(index, *value));
-}
-
-
-MaybeObject* JSArray::JSArrayUpdateLengthFromIndex(uint32_t index,
- Object* value) {
uint32_t old_len = 0;
- CHECK(length()->ToArrayIndex(&old_len));
+ CHECK(array->length()->ToArrayIndex(&old_len));
// Check to see if we need to update the length. For now, we make
// sure that the length stays within 32-bits (unsigned).
if (index >= old_len && index != 0xffffffff) {
- Object* len;
- { MaybeObject* maybe_len =
- GetHeap()->NumberFromDouble(static_cast<double>(index) + 1);
- if (!maybe_len->ToObject(&len)) return maybe_len;
- }
- set_length(len);
+ Handle<Object> len = array->GetIsolate()->factory()->NewNumber(
+ static_cast<double>(index) + 1);
+ array->set_length(*len);
}
- return value;
}
-MaybeObject* JSObject::GetElementWithInterceptor(Object* receiver,
- uint32_t index) {
- Isolate* isolate = GetIsolate();
- HandleScope scope(isolate);
+bool JSArray::IsReadOnlyLengthDescriptor(Handle<Map> jsarray_map) {
+ Isolate* isolate = jsarray_map->GetIsolate();
+ ASSERT(!jsarray_map->is_dictionary_map());
+ LookupResult lookup(isolate);
+ Handle<Name> length_string = isolate->factory()->length_string();
+ jsarray_map->LookupDescriptor(NULL, *length_string, &lookup);
+ return lookup.IsReadOnly();
+}
+
+
+bool JSArray::WouldChangeReadOnlyLength(Handle<JSArray> array,
+ uint32_t index) {
+ uint32_t length = 0;
+ CHECK(array->length()->ToArrayIndex(&length));
+ if (length <= index) {
+ Isolate* isolate = array->GetIsolate();
+ LookupResult lookup(isolate);
+ Handle<Name> length_string = isolate->factory()->length_string();
+ array->LookupOwnRealNamedProperty(length_string, &lookup);
+ return lookup.IsReadOnly();
+ }
+ return false;
+}
+
+
+MaybeHandle<Object> JSArray::ReadOnlyLengthError(Handle<JSArray> array) {
+ Isolate* isolate = array->GetIsolate();
+ Handle<Name> length = isolate->factory()->length_string();
+ Handle<Object> args[2] = { length, array };
+ Handle<Object> error = isolate->factory()->NewTypeError(
+ "strict_read_only_property", HandleVector(args, ARRAY_SIZE(args)));
+ return isolate->Throw<Object>(error);
+}
+
+
+MaybeHandle<Object> JSObject::GetElementWithInterceptor(
+ Handle<JSObject> object,
+ Handle<Object> receiver,
+ uint32_t index) {
+ Isolate* isolate = object->GetIsolate();
// Make sure that the top context does not change when doing
// callbacks or interceptor calls.
AssertNoContextChange ncc(isolate);
- Handle<InterceptorInfo> interceptor(GetIndexedInterceptor(), isolate);
- Handle<Object> this_handle(receiver, isolate);
- Handle<JSObject> holder_handle(this, isolate);
+ Handle<InterceptorInfo> interceptor(object->GetIndexedInterceptor(), isolate);
if (!interceptor->getter()->IsUndefined()) {
v8::IndexedPropertyGetterCallback getter =
v8::ToCData<v8::IndexedPropertyGetterCallback>(interceptor->getter());
LOG(isolate,
- ApiIndexedPropertyAccess("interceptor-indexed-get", this, index));
+ ApiIndexedPropertyAccess("interceptor-indexed-get", *object, index));
PropertyCallbackArguments
- args(isolate, interceptor->data(), receiver, this);
+ args(isolate, interceptor->data(), *receiver, *object);
v8::Handle<v8::Value> result = args.Call(getter, index);
- RETURN_IF_SCHEDULED_EXCEPTION(isolate);
+ RETURN_EXCEPTION_IF_SCHEDULED_EXCEPTION(isolate, Object);
if (!result.IsEmpty()) {
Handle<Object> result_internal = v8::Utils::OpenHandle(*result);
result_internal->VerifyApiCallResultType();
- return *result_internal;
+ // Rebox handle before return.
+ return handle(*result_internal, isolate);
}
}
- Heap* heap = holder_handle->GetHeap();
- ElementsAccessor* handler = holder_handle->GetElementsAccessor();
- MaybeObject* raw_result = handler->Get(*this_handle,
- *holder_handle,
- index);
- if (raw_result != heap->the_hole_value()) return raw_result;
-
- RETURN_IF_SCHEDULED_EXCEPTION(isolate);
+ ElementsAccessor* handler = object->GetElementsAccessor();
+ Handle<Object> result;
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, result, handler->Get(receiver, object, index),
+ Object);
+ if (!result->IsTheHole()) return result;
- Object* pt = holder_handle->GetPrototype();
- if (pt == heap->null_value()) return heap->undefined_value();
- return pt->GetElementWithReceiver(isolate, *this_handle, index);
+ Handle<Object> proto(object->GetPrototype(), isolate);
+ if (proto->IsNull()) return isolate->factory()->undefined_value();
+ return Object::GetElementWithReceiver(isolate, proto, receiver, index);
}
@@ -13085,7 +13377,7 @@ void JSObject::GetElementsCapacityAndUsage(int* capacity, int* used) {
FixedArrayBase* backing_store_base = FixedArrayBase::cast(elements());
FixedArray* backing_store = NULL;
switch (GetElementsKind()) {
- case NON_STRICT_ARGUMENTS_ELEMENTS:
+ case SLOPPY_ARGUMENTS_ELEMENTS:
backing_store_base =
FixedArray::cast(FixedArray::cast(backing_store_base)->get(1));
backing_store = FixedArray::cast(backing_store_base);
@@ -13127,31 +13419,47 @@ void JSObject::GetElementsCapacityAndUsage(int* capacity, int* used) {
}
// Fall through if packing is not guaranteed.
case FAST_HOLEY_DOUBLE_ELEMENTS: {
- FixedDoubleArray* elms = FixedDoubleArray::cast(elements());
- *capacity = elms->length();
+ *capacity = elements()->length();
+ if (*capacity == 0) break;
+ FixedDoubleArray * elms = FixedDoubleArray::cast(elements());
for (int i = 0; i < *capacity; i++) {
if (!elms->is_the_hole(i)) ++(*used);
}
break;
}
- case EXTERNAL_BYTE_ELEMENTS:
- case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
- case EXTERNAL_SHORT_ELEMENTS:
- case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
- case EXTERNAL_INT_ELEMENTS:
- case EXTERNAL_UNSIGNED_INT_ELEMENTS:
- case EXTERNAL_FLOAT_ELEMENTS:
- case EXTERNAL_DOUBLE_ELEMENTS:
- case EXTERNAL_PIXEL_ELEMENTS:
+
+#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype, size) \
+ case EXTERNAL_##TYPE##_ELEMENTS: \
+ case TYPE##_ELEMENTS: \
+
+ TYPED_ARRAYS(TYPED_ARRAY_CASE)
+#undef TYPED_ARRAY_CASE
+ {
// External arrays are considered 100% used.
- ExternalArray* external_array = ExternalArray::cast(elements());
+ FixedArrayBase* external_array = FixedArrayBase::cast(elements());
*capacity = external_array->length();
*used = external_array->length();
break;
+ }
}
}
+bool JSObject::WouldConvertToSlowElements(Handle<Object> key) {
+ uint32_t index;
+ if (HasFastElements() && key->ToArrayIndex(&index)) {
+ Handle<FixedArrayBase> backing_store(FixedArrayBase::cast(elements()));
+ uint32_t capacity = static_cast<uint32_t>(backing_store->length());
+ if (index >= capacity) {
+ if ((index - capacity) >= kMaxGap) return true;
+ uint32_t new_capacity = NewElementsCapacity(index + 1);
+ return ShouldConvertToSlowElements(new_capacity);
+ }
+ }
+ return false;
+}
+
+
bool JSObject::ShouldConvertToSlowElements(int new_capacity) {
STATIC_ASSERT(kMaxUncheckedOldFastElementsLength <=
kMaxUncheckedFastElementsLength);
@@ -13181,11 +13489,11 @@ bool JSObject::ShouldConvertToFastElements() {
if (IsAccessCheckNeeded()) return false;
// Observed objects may not go to fast mode because they rely on map checks,
// and for fast element accesses we sometimes check element kinds only.
- if (FLAG_harmony_observation && map()->is_observed()) return false;
+ if (map()->is_observed()) return false;
FixedArray* elements = FixedArray::cast(this->elements());
SeededNumberDictionary* dictionary = NULL;
- if (elements->map() == GetHeap()->non_strict_arguments_elements_map()) {
+ if (elements->map() == GetHeap()->sloppy_arguments_elements_map()) {
dictionary = SeededNumberDictionary::cast(elements->get(1));
} else {
dictionary = SeededNumberDictionary::cast(elements);
@@ -13211,6 +13519,7 @@ bool JSObject::ShouldConvertToFastElements() {
bool JSObject::ShouldConvertToFastDoubleElements(
bool* has_smi_only_elements) {
*has_smi_only_elements = false;
+ if (HasSloppyArgumentsElements()) return false;
if (FLAG_unbox_double_arrays) {
ASSERT(HasDictionaryElements());
SeededNumberDictionary* dictionary = element_dictionary();
@@ -13239,12 +13548,12 @@ bool JSObject::ShouldConvertToFastDoubleElements(
// together, so even though this function belongs in objects-debug.cc,
// we keep it here instead to satisfy certain compilers.
#ifdef OBJECT_PRINT
-template<typename Shape, typename Key>
-void Dictionary<Shape, Key>::Print(FILE* out) {
- int capacity = HashTable<Shape, Key>::Capacity();
+template<typename Derived, typename Shape, typename Key>
+void Dictionary<Derived, Shape, Key>::Print(FILE* out) {
+ int capacity = DerivedHashTable::Capacity();
for (int i = 0; i < capacity; i++) {
- Object* k = HashTable<Shape, Key>::KeyAt(i);
- if (HashTable<Shape, Key>::IsKey(k)) {
+ Object* k = DerivedHashTable::KeyAt(i);
+ if (DerivedHashTable::IsKey(k)) {
PrintF(out, " ");
if (k->IsString()) {
String::cast(k)->StringPrint(out);
@@ -13260,15 +13569,15 @@ void Dictionary<Shape, Key>::Print(FILE* out) {
#endif
-template<typename Shape, typename Key>
-void Dictionary<Shape, Key>::CopyValuesTo(FixedArray* elements) {
+template<typename Derived, typename Shape, typename Key>
+void Dictionary<Derived, Shape, Key>::CopyValuesTo(FixedArray* elements) {
int pos = 0;
- int capacity = HashTable<Shape, Key>::Capacity();
+ int capacity = DerivedHashTable::Capacity();
DisallowHeapAllocation no_gc;
WriteBarrierMode mode = elements->GetWriteBarrierMode(no_gc);
for (int i = 0; i < capacity; i++) {
- Object* k = Dictionary<Shape, Key>::KeyAt(i);
- if (Dictionary<Shape, Key>::IsKey(k)) {
+ Object* k = Dictionary::KeyAt(i);
+ if (Dictionary::IsKey(k)) {
elements->set(pos++, ValueAt(i), mode);
}
}
@@ -13296,76 +13605,86 @@ InterceptorInfo* JSObject::GetIndexedInterceptor() {
}
-Handle<Object> JSObject::GetPropertyPostInterceptor(
- Handle<JSObject> object,
- Handle<Object> receiver,
- Handle<Name> name,
- PropertyAttributes* attributes) {
- // Check local property in holder, ignore interceptor.
- Isolate* isolate = object->GetIsolate();
- LookupResult lookup(isolate);
- object->LocalLookupRealNamedProperty(*name, &lookup);
- Handle<Object> result;
- if (lookup.IsFound()) {
- result = GetProperty(object, receiver, &lookup, name, attributes);
- } else {
- // Continue searching via the prototype chain.
- Handle<Object> prototype(object->GetPrototype(), isolate);
- *attributes = ABSENT;
- if (prototype->IsNull()) return isolate->factory()->undefined_value();
- result = GetPropertyWithReceiver(prototype, receiver, name, attributes);
- }
- return result;
-}
-
-
-MaybeObject* JSObject::GetLocalPropertyPostInterceptor(
- Object* receiver,
- Name* name,
- PropertyAttributes* attributes) {
- // Check local property in holder, ignore interceptor.
- LookupResult result(GetIsolate());
- LocalLookupRealNamedProperty(name, &result);
- if (result.IsFound()) {
- return GetProperty(receiver, &result, name, attributes);
- }
- return GetHeap()->undefined_value();
-}
-
-
-Handle<Object> JSObject::GetPropertyWithInterceptor(
- Handle<JSObject> object,
+MaybeHandle<Object> JSObject::GetPropertyWithInterceptor(
+ Handle<JSObject> holder,
Handle<Object> receiver,
- Handle<Name> name,
- PropertyAttributes* attributes) {
- Isolate* isolate = object->GetIsolate();
+ Handle<Name> name) {
+ Isolate* isolate = holder->GetIsolate();
// TODO(rossberg): Support symbols in the API.
if (name->IsSymbol()) return isolate->factory()->undefined_value();
- Handle<InterceptorInfo> interceptor(object->GetNamedInterceptor(), isolate);
+ Handle<InterceptorInfo> interceptor(holder->GetNamedInterceptor(), isolate);
Handle<String> name_string = Handle<String>::cast(name);
- if (!interceptor->getter()->IsUndefined()) {
- v8::NamedPropertyGetterCallback getter =
- v8::ToCData<v8::NamedPropertyGetterCallback>(interceptor->getter());
- LOG(isolate,
- ApiNamedPropertyAccess("interceptor-named-get", *object, *name));
- PropertyCallbackArguments
- args(isolate, interceptor->data(), *receiver, *object);
- v8::Handle<v8::Value> result =
- args.Call(getter, v8::Utils::ToLocal(name_string));
- RETURN_HANDLE_IF_SCHEDULED_EXCEPTION(isolate, Object);
- if (!result.IsEmpty()) {
- *attributes = NONE;
- Handle<Object> result_internal = v8::Utils::OpenHandle(*result);
- result_internal->VerifyApiCallResultType();
- // Rebox handle to escape this scope.
- return handle(*result_internal, isolate);
- }
- }
+ if (interceptor->getter()->IsUndefined()) return MaybeHandle<Object>();
- return GetPropertyPostInterceptor(object, receiver, name, attributes);
+ v8::NamedPropertyGetterCallback getter =
+ v8::ToCData<v8::NamedPropertyGetterCallback>(interceptor->getter());
+ LOG(isolate,
+ ApiNamedPropertyAccess("interceptor-named-get", *holder, *name));
+ PropertyCallbackArguments
+ args(isolate, interceptor->data(), *receiver, *holder);
+ v8::Handle<v8::Value> result =
+ args.Call(getter, v8::Utils::ToLocal(name_string));
+ RETURN_EXCEPTION_IF_SCHEDULED_EXCEPTION(isolate, Object);
+ if (result.IsEmpty()) return MaybeHandle<Object>();
+
+ Handle<Object> result_internal = v8::Utils::OpenHandle(*result);
+ result_internal->VerifyApiCallResultType();
+ // Rebox handle before return
+ return handle(*result_internal, isolate);
+}
+
+
+// Compute the property keys from the interceptor.
+// TODO(rossberg): support symbols in API, and filter here if needed.
+MaybeHandle<JSObject> JSObject::GetKeysForNamedInterceptor(
+ Handle<JSObject> object, Handle<JSReceiver> receiver) {
+ Isolate* isolate = receiver->GetIsolate();
+ Handle<InterceptorInfo> interceptor(object->GetNamedInterceptor());
+ PropertyCallbackArguments
+ args(isolate, interceptor->data(), *receiver, *object);
+ v8::Handle<v8::Object> result;
+ if (!interceptor->enumerator()->IsUndefined()) {
+ v8::NamedPropertyEnumeratorCallback enum_fun =
+ v8::ToCData<v8::NamedPropertyEnumeratorCallback>(
+ interceptor->enumerator());
+ LOG(isolate, ApiObjectAccess("interceptor-named-enum", *object));
+ result = args.Call(enum_fun);
+ }
+ if (result.IsEmpty()) return MaybeHandle<JSObject>();
+#if ENABLE_EXTRA_CHECKS
+ CHECK(v8::Utils::OpenHandle(*result)->IsJSArray() ||
+ v8::Utils::OpenHandle(*result)->HasSloppyArgumentsElements());
+#endif
+ // Rebox before returning.
+ return handle(*v8::Utils::OpenHandle(*result), isolate);
+}
+
+
+// Compute the element keys from the interceptor.
+MaybeHandle<JSObject> JSObject::GetKeysForIndexedInterceptor(
+ Handle<JSObject> object, Handle<JSReceiver> receiver) {
+ Isolate* isolate = receiver->GetIsolate();
+ Handle<InterceptorInfo> interceptor(object->GetIndexedInterceptor());
+ PropertyCallbackArguments
+ args(isolate, interceptor->data(), *receiver, *object);
+ v8::Handle<v8::Object> result;
+ if (!interceptor->enumerator()->IsUndefined()) {
+ v8::IndexedPropertyEnumeratorCallback enum_fun =
+ v8::ToCData<v8::IndexedPropertyEnumeratorCallback>(
+ interceptor->enumerator());
+ LOG(isolate, ApiObjectAccess("interceptor-indexed-enum", *object));
+ result = args.Call(enum_fun);
+ }
+ if (result.IsEmpty()) return MaybeHandle<JSObject>();
+#if ENABLE_EXTRA_CHECKS
+ CHECK(v8::Utils::OpenHandle(*result)->IsJSArray() ||
+ v8::Utils::OpenHandle(*result)->HasSloppyArgumentsElements());
+#endif
+ // Rebox before returning.
+ return handle(*v8::Utils::OpenHandle(*result), isolate);
}
@@ -13375,25 +13694,27 @@ bool JSObject::HasRealNamedProperty(Handle<JSObject> object,
SealHandleScope shs(isolate);
// Check access rights if needed.
if (object->IsAccessCheckNeeded()) {
- if (!isolate->MayNamedAccess(*object, *key, v8::ACCESS_HAS)) {
- isolate->ReportFailedAccessCheck(*object, v8::ACCESS_HAS);
+ if (!isolate->MayNamedAccess(object, key, v8::ACCESS_HAS)) {
+ isolate->ReportFailedAccessCheck(object, v8::ACCESS_HAS);
+ // TODO(yangguo): Issue 3269, check for scheduled exception missing?
return false;
}
}
LookupResult result(isolate);
- object->LocalLookupRealNamedProperty(*key, &result);
+ object->LookupOwnRealNamedProperty(key, &result);
return result.IsFound() && !result.IsInterceptor();
}
bool JSObject::HasRealElementProperty(Handle<JSObject> object, uint32_t index) {
Isolate* isolate = object->GetIsolate();
- SealHandleScope shs(isolate);
+ HandleScope scope(isolate);
// Check access rights if needed.
if (object->IsAccessCheckNeeded()) {
- if (!isolate->MayIndexedAccess(*object, index, v8::ACCESS_HAS)) {
- isolate->ReportFailedAccessCheck(*object, v8::ACCESS_HAS);
+ if (!isolate->MayIndexedAccess(object, index, v8::ACCESS_HAS)) {
+ isolate->ReportFailedAccessCheck(object, v8::ACCESS_HAS);
+ // TODO(yangguo): Issue 3269, check for scheduled exception missing?
return false;
}
}
@@ -13406,8 +13727,8 @@ bool JSObject::HasRealElementProperty(Handle<JSObject> object, uint32_t index) {
return HasRealElementProperty(Handle<JSObject>::cast(proto), index);
}
- return object->GetElementAttributeWithoutInterceptor(
- *object, index, false) != ABSENT;
+ return GetElementAttributeWithoutInterceptor(
+ object, object, index, false) != ABSENT;
}
@@ -13417,19 +13738,20 @@ bool JSObject::HasRealNamedCallbackProperty(Handle<JSObject> object,
SealHandleScope shs(isolate);
// Check access rights if needed.
if (object->IsAccessCheckNeeded()) {
- if (!isolate->MayNamedAccess(*object, *key, v8::ACCESS_HAS)) {
- isolate->ReportFailedAccessCheck(*object, v8::ACCESS_HAS);
+ if (!isolate->MayNamedAccess(object, key, v8::ACCESS_HAS)) {
+ isolate->ReportFailedAccessCheck(object, v8::ACCESS_HAS);
+ // TODO(yangguo): Issue 3269, check for scheduled exception missing?
return false;
}
}
LookupResult result(isolate);
- object->LocalLookupRealNamedProperty(*key, &result);
+ object->LookupOwnRealNamedProperty(key, &result);
return result.IsPropertyCallbacks();
}
-int JSObject::NumberOfLocalProperties(PropertyAttributes filter) {
+int JSObject::NumberOfOwnProperties(PropertyAttributes filter) {
if (HasFastProperties()) {
Map* map = this->map();
if (filter == NONE) return map->NumberOfOwnDescriptors();
@@ -13556,18 +13878,18 @@ void FixedArray::SortPairs(FixedArray* numbers, uint32_t len) {
}
-// Fill in the names of local properties into the supplied storage. The main
+// Fill in the names of own properties into the supplied storage. The main
// purpose of this function is to provide reflection information for the object
// mirrors.
-void JSObject::GetLocalPropertyNames(
+void JSObject::GetOwnPropertyNames(
FixedArray* storage, int index, PropertyAttributes filter) {
- ASSERT(storage->length() >= (NumberOfLocalProperties(filter) - index));
+ ASSERT(storage->length() >= (NumberOfOwnProperties(filter) - index));
if (HasFastProperties()) {
int real_size = map()->NumberOfOwnDescriptors();
DescriptorArray* descs = map()->instance_descriptors();
for (int i = 0; i < real_size; i++) {
if ((descs->GetDetails(i).attributes() & filter) == 0 &&
- ((filter & SYMBOLIC) == 0 || !descs->GetKey(i)->IsSymbol())) {
+ !FilterKey(descs->GetKey(i), filter)) {
storage->set(index++, descs->GetKey(i));
}
}
@@ -13580,8 +13902,8 @@ void JSObject::GetLocalPropertyNames(
}
-int JSObject::NumberOfLocalElements(PropertyAttributes filter) {
- return GetLocalElementKeys(NULL, filter);
+int JSObject::NumberOfOwnElements(PropertyAttributes filter) {
+ return GetOwnElementKeys(NULL, filter);
}
@@ -13595,12 +13917,12 @@ int JSObject::NumberOfEnumElements() {
if (length == 0) return 0;
}
// Compute the number of enumerable elements.
- return NumberOfLocalElements(static_cast<PropertyAttributes>(DONT_ENUM));
+ return NumberOfOwnElements(static_cast<PropertyAttributes>(DONT_ENUM));
}
-int JSObject::GetLocalElementKeys(FixedArray* storage,
- PropertyAttributes filter) {
+int JSObject::GetOwnElementKeys(FixedArray* storage,
+ PropertyAttributes filter) {
int counter = 0;
switch (GetElementsKind()) {
case FAST_SMI_ELEMENTS:
@@ -13625,7 +13947,7 @@ int JSObject::GetLocalElementKeys(FixedArray* storage,
case FAST_HOLEY_DOUBLE_ELEMENTS: {
int length = IsJSArray() ?
Smi::cast(JSArray::cast(this)->length())->value() :
- FixedDoubleArray::cast(elements())->length();
+ FixedArrayBase::cast(elements())->length();
for (int i = 0; i < length; i++) {
if (!FixedDoubleArray::cast(elements())->is_the_hole(i)) {
if (storage != NULL) {
@@ -13637,26 +13959,15 @@ int JSObject::GetLocalElementKeys(FixedArray* storage,
ASSERT(!storage || storage->length() >= counter);
break;
}
- case EXTERNAL_PIXEL_ELEMENTS: {
- int length = ExternalPixelArray::cast(elements())->length();
- while (counter < length) {
- if (storage != NULL) {
- storage->set(counter, Smi::FromInt(counter));
- }
- counter++;
- }
- ASSERT(!storage || storage->length() >= counter);
- break;
- }
- case EXTERNAL_BYTE_ELEMENTS:
- case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
- case EXTERNAL_SHORT_ELEMENTS:
- case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
- case EXTERNAL_INT_ELEMENTS:
- case EXTERNAL_UNSIGNED_INT_ELEMENTS:
- case EXTERNAL_FLOAT_ELEMENTS:
- case EXTERNAL_DOUBLE_ELEMENTS: {
- int length = ExternalArray::cast(elements())->length();
+
+#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype, size) \
+ case EXTERNAL_##TYPE##_ELEMENTS: \
+ case TYPE##_ELEMENTS: \
+
+ TYPED_ARRAYS(TYPED_ARRAY_CASE)
+#undef TYPED_ARRAY_CASE
+ {
+ int length = FixedArrayBase::cast(elements())->length();
while (counter < length) {
if (storage != NULL) {
storage->set(counter, Smi::FromInt(counter));
@@ -13666,6 +13977,7 @@ int JSObject::GetLocalElementKeys(FixedArray* storage,
ASSERT(!storage || storage->length() >= counter);
break;
}
+
case DICTIONARY_ELEMENTS: {
if (storage != NULL) {
element_dictionary()->CopyKeysTo(storage,
@@ -13675,7 +13987,7 @@ int JSObject::GetLocalElementKeys(FixedArray* storage,
counter += element_dictionary()->NumberOfElementsFilterAttributes(filter);
break;
}
- case NON_STRICT_ARGUMENTS_ELEMENTS: {
+ case SLOPPY_ARGUMENTS_ELEMENTS: {
FixedArray* parameter_map = FixedArray::cast(elements());
int mapped_length = parameter_map->length() - 2;
FixedArray* arguments = FixedArray::cast(parameter_map->get(1));
@@ -13736,8 +14048,7 @@ int JSObject::GetLocalElementKeys(FixedArray* storage,
int JSObject::GetEnumElementKeys(FixedArray* storage) {
- return GetLocalElementKeys(storage,
- static_cast<PropertyAttributes>(DONT_ENUM));
+ return GetOwnElementKeys(storage, static_cast<PropertyAttributes>(DONT_ENUM));
}
@@ -13771,35 +14082,34 @@ class StringKey : public HashTableKey {
// StringSharedKeys are used as keys in the eval cache.
class StringSharedKey : public HashTableKey {
public:
- StringSharedKey(String* source,
- SharedFunctionInfo* shared,
- LanguageMode language_mode,
+ StringSharedKey(Handle<String> source,
+ Handle<SharedFunctionInfo> shared,
+ StrictMode strict_mode,
int scope_position)
: source_(source),
shared_(shared),
- language_mode_(language_mode),
+ strict_mode_(strict_mode),
scope_position_(scope_position) { }
- bool IsMatch(Object* other) {
+ bool IsMatch(Object* other) V8_OVERRIDE {
+ DisallowHeapAllocation no_allocation;
if (!other->IsFixedArray()) return false;
FixedArray* other_array = FixedArray::cast(other);
SharedFunctionInfo* shared = SharedFunctionInfo::cast(other_array->get(0));
- if (shared != shared_) return false;
- int language_unchecked = Smi::cast(other_array->get(2))->value();
- ASSERT(language_unchecked == CLASSIC_MODE ||
- language_unchecked == STRICT_MODE ||
- language_unchecked == EXTENDED_MODE);
- LanguageMode language_mode = static_cast<LanguageMode>(language_unchecked);
- if (language_mode != language_mode_) return false;
+ if (shared != *shared_) return false;
+ int strict_unchecked = Smi::cast(other_array->get(2))->value();
+ ASSERT(strict_unchecked == SLOPPY || strict_unchecked == STRICT);
+ StrictMode strict_mode = static_cast<StrictMode>(strict_unchecked);
+ if (strict_mode != strict_mode_) return false;
int scope_position = Smi::cast(other_array->get(3))->value();
if (scope_position != scope_position_) return false;
String* source = String::cast(other_array->get(1));
- return source->Equals(source_);
+ return source->Equals(*source_);
}
static uint32_t StringSharedHashHelper(String* source,
SharedFunctionInfo* shared,
- LanguageMode language_mode,
+ StrictMode strict_mode,
int scope_position) {
uint32_t hash = source->Hash();
if (shared->HasSourceCode()) {
@@ -13808,51 +14118,46 @@ class StringSharedKey : public HashTableKey {
// script source code and the start position of the calling scope.
// We do this to ensure that the cache entries can survive garbage
// collection.
- Script* script = Script::cast(shared->script());
+ Script* script(Script::cast(shared->script()));
hash ^= String::cast(script->source())->Hash();
- if (language_mode == STRICT_MODE) hash ^= 0x8000;
- if (language_mode == EXTENDED_MODE) hash ^= 0x0080;
+ if (strict_mode == STRICT) hash ^= 0x8000;
hash += scope_position;
}
return hash;
}
- uint32_t Hash() {
- return StringSharedHashHelper(
- source_, shared_, language_mode_, scope_position_);
+ uint32_t Hash() V8_OVERRIDE {
+ return StringSharedHashHelper(*source_, *shared_, strict_mode_,
+ scope_position_);
}
- uint32_t HashForObject(Object* obj) {
+ uint32_t HashForObject(Object* obj) V8_OVERRIDE {
+ DisallowHeapAllocation no_allocation;
FixedArray* other_array = FixedArray::cast(obj);
SharedFunctionInfo* shared = SharedFunctionInfo::cast(other_array->get(0));
String* source = String::cast(other_array->get(1));
- int language_unchecked = Smi::cast(other_array->get(2))->value();
- ASSERT(language_unchecked == CLASSIC_MODE ||
- language_unchecked == STRICT_MODE ||
- language_unchecked == EXTENDED_MODE);
- LanguageMode language_mode = static_cast<LanguageMode>(language_unchecked);
+ int strict_unchecked = Smi::cast(other_array->get(2))->value();
+ ASSERT(strict_unchecked == SLOPPY || strict_unchecked == STRICT);
+ StrictMode strict_mode = static_cast<StrictMode>(strict_unchecked);
int scope_position = Smi::cast(other_array->get(3))->value();
return StringSharedHashHelper(
- source, shared, language_mode, scope_position);
+ source, shared, strict_mode, scope_position);
}
- MUST_USE_RESULT MaybeObject* AsObject(Heap* heap) {
- Object* obj;
- { MaybeObject* maybe_obj = heap->AllocateFixedArray(4);
- if (!maybe_obj->ToObject(&obj)) return maybe_obj;
- }
- FixedArray* other_array = FixedArray::cast(obj);
- other_array->set(0, shared_);
- other_array->set(1, source_);
- other_array->set(2, Smi::FromInt(language_mode_));
- other_array->set(3, Smi::FromInt(scope_position_));
- return other_array;
+
+ Handle<Object> AsHandle(Isolate* isolate) V8_OVERRIDE {
+ Handle<FixedArray> array = isolate->factory()->NewFixedArray(4);
+ array->set(0, *shared_);
+ array->set(1, *source_);
+ array->set(2, Smi::FromInt(strict_mode_));
+ array->set(3, Smi::FromInt(scope_position_));
+ return array;
}
private:
- String* source_;
- SharedFunctionInfo* shared_;
- LanguageMode language_mode_;
+ Handle<String> source_;
+ Handle<SharedFunctionInfo> shared_;
+ StrictMode strict_mode_;
int scope_position_;
};
@@ -13860,7 +14165,7 @@ class StringSharedKey : public HashTableKey {
// RegExpKey carries the source and flags of a regular expression as key.
class RegExpKey : public HashTableKey {
public:
- RegExpKey(String* string, JSRegExp::Flags flags)
+ RegExpKey(Handle<String> string, JSRegExp::Flags flags)
: string_(string),
flags_(Smi::FromInt(flags.value())) { }
@@ -13868,22 +14173,22 @@ class RegExpKey : public HashTableKey {
// stored value is stored where the key should be. IsMatch then
// compares the search key to the found object, rather than comparing
// a key to a key.
- bool IsMatch(Object* obj) {
+ bool IsMatch(Object* obj) V8_OVERRIDE {
FixedArray* val = FixedArray::cast(obj);
return string_->Equals(String::cast(val->get(JSRegExp::kSourceIndex)))
&& (flags_ == val->get(JSRegExp::kFlagsIndex));
}
- uint32_t Hash() { return RegExpHash(string_, flags_); }
+ uint32_t Hash() V8_OVERRIDE { return RegExpHash(*string_, flags_); }
- Object* AsObject(Heap* heap) {
+ Handle<Object> AsHandle(Isolate* isolate) V8_OVERRIDE {
// Plain hash maps, which is where regexp keys are used, don't
// use this function.
UNREACHABLE();
- return NULL;
+ return MaybeHandle<Object>().ToHandleChecked();
}
- uint32_t HashForObject(Object* obj) {
+ uint32_t HashForObject(Object* obj) V8_OVERRIDE {
FixedArray* val = FixedArray::cast(obj);
return RegExpHash(String::cast(val->get(JSRegExp::kSourceIndex)),
Smi::cast(val->get(JSRegExp::kFlagsIndex)));
@@ -13893,178 +14198,101 @@ class RegExpKey : public HashTableKey {
return string->Hash() + flags->value();
}
- String* string_;
+ Handle<String> string_;
Smi* flags_;
};
-// Utf8StringKey carries a vector of chars as key.
-class Utf8StringKey : public HashTableKey {
- public:
- explicit Utf8StringKey(Vector<const char> string, uint32_t seed)
- : string_(string), hash_field_(0), seed_(seed) { }
-
- bool IsMatch(Object* string) {
- return String::cast(string)->IsUtf8EqualTo(string_);
- }
-
- uint32_t Hash() {
- if (hash_field_ != 0) return hash_field_ >> String::kHashShift;
- hash_field_ = StringHasher::ComputeUtf8Hash(string_, seed_, &chars_);
- uint32_t result = hash_field_ >> String::kHashShift;
- ASSERT(result != 0); // Ensure that the hash value of 0 is never computed.
- return result;
- }
-
- uint32_t HashForObject(Object* other) {
- return String::cast(other)->Hash();
- }
-
- MaybeObject* AsObject(Heap* heap) {
- if (hash_field_ == 0) Hash();
- return heap->AllocateInternalizedStringFromUtf8(string_,
- chars_,
- hash_field_);
- }
-
- Vector<const char> string_;
- uint32_t hash_field_;
- int chars_; // Caches the number of characters when computing the hash code.
- uint32_t seed_;
-};
-
-
-template <typename Char>
-class SequentialStringKey : public HashTableKey {
- public:
- explicit SequentialStringKey(Vector<const Char> string, uint32_t seed)
- : string_(string), hash_field_(0), seed_(seed) { }
-
- uint32_t Hash() {
- hash_field_ = StringHasher::HashSequentialString<Char>(string_.start(),
- string_.length(),
- seed_);
-
- uint32_t result = hash_field_ >> String::kHashShift;
- ASSERT(result != 0); // Ensure that the hash value of 0 is never computed.
- return result;
- }
-
-
- uint32_t HashForObject(Object* other) {
- return String::cast(other)->Hash();
- }
-
- Vector<const Char> string_;
- uint32_t hash_field_;
- uint32_t seed_;
-};
+Handle<Object> OneByteStringKey::AsHandle(Isolate* isolate) {
+ if (hash_field_ == 0) Hash();
+ return isolate->factory()->NewOneByteInternalizedString(string_, hash_field_);
+}
+Handle<Object> TwoByteStringKey::AsHandle(Isolate* isolate) {
+ if (hash_field_ == 0) Hash();
+ return isolate->factory()->NewTwoByteInternalizedString(string_, hash_field_);
+}
-class OneByteStringKey : public SequentialStringKey<uint8_t> {
- public:
- OneByteStringKey(Vector<const uint8_t> str, uint32_t seed)
- : SequentialStringKey<uint8_t>(str, seed) { }
- bool IsMatch(Object* string) {
- return String::cast(string)->IsOneByteEqualTo(string_);
- }
+template<>
+const uint8_t* SubStringKey<uint8_t>::GetChars() {
+ return string_->IsSeqOneByteString()
+ ? SeqOneByteString::cast(*string_)->GetChars()
+ : ExternalAsciiString::cast(*string_)->GetChars();
+}
- MaybeObject* AsObject(Heap* heap) {
- if (hash_field_ == 0) Hash();
- return heap->AllocateOneByteInternalizedString(string_, hash_field_);
- }
-};
+template<>
+const uint16_t* SubStringKey<uint16_t>::GetChars() {
+ return string_->IsSeqTwoByteString()
+ ? SeqTwoByteString::cast(*string_)->GetChars()
+ : ExternalTwoByteString::cast(*string_)->GetChars();
+}
-class SubStringOneByteStringKey : public HashTableKey {
- public:
- explicit SubStringOneByteStringKey(Handle<SeqOneByteString> string,
- int from,
- int length)
- : string_(string), from_(from), length_(length) { }
-
- uint32_t Hash() {
- ASSERT(length_ >= 0);
- ASSERT(from_ + length_ <= string_->length());
- uint8_t* chars = string_->GetChars() + from_;
- hash_field_ = StringHasher::HashSequentialString(
- chars, length_, string_->GetHeap()->HashSeed());
- uint32_t result = hash_field_ >> String::kHashShift;
- ASSERT(result != 0); // Ensure that the hash value of 0 is never computed.
- return result;
- }
+template<>
+Handle<Object> SubStringKey<uint8_t>::AsHandle(Isolate* isolate) {
+ if (hash_field_ == 0) Hash();
+ Vector<const uint8_t> chars(GetChars() + from_, length_);
+ return isolate->factory()->NewOneByteInternalizedString(chars, hash_field_);
+}
- uint32_t HashForObject(Object* other) {
- return String::cast(other)->Hash();
- }
- bool IsMatch(Object* string) {
- Vector<const uint8_t> chars(string_->GetChars() + from_, length_);
- return String::cast(string)->IsOneByteEqualTo(chars);
- }
+template<>
+Handle<Object> SubStringKey<uint16_t>::AsHandle(Isolate* isolate) {
+ if (hash_field_ == 0) Hash();
+ Vector<const uint16_t> chars(GetChars() + from_, length_);
+ return isolate->factory()->NewTwoByteInternalizedString(chars, hash_field_);
+}
- MaybeObject* AsObject(Heap* heap) {
- if (hash_field_ == 0) Hash();
- Vector<const uint8_t> chars(string_->GetChars() + from_, length_);
- return heap->AllocateOneByteInternalizedString(chars, hash_field_);
- }
- private:
- Handle<SeqOneByteString> string_;
- int from_;
- int length_;
- uint32_t hash_field_;
-};
+template<>
+bool SubStringKey<uint8_t>::IsMatch(Object* string) {
+ Vector<const uint8_t> chars(GetChars() + from_, length_);
+ return String::cast(string)->IsOneByteEqualTo(chars);
+}
-class TwoByteStringKey : public SequentialStringKey<uc16> {
- public:
- explicit TwoByteStringKey(Vector<const uc16> str, uint32_t seed)
- : SequentialStringKey<uc16>(str, seed) { }
+template<>
+bool SubStringKey<uint16_t>::IsMatch(Object* string) {
+ Vector<const uint16_t> chars(GetChars() + from_, length_);
+ return String::cast(string)->IsTwoByteEqualTo(chars);
+}
- bool IsMatch(Object* string) {
- return String::cast(string)->IsTwoByteEqualTo(string_);
- }
- MaybeObject* AsObject(Heap* heap) {
- if (hash_field_ == 0) Hash();
- return heap->AllocateTwoByteInternalizedString(string_, hash_field_);
- }
-};
+template class SubStringKey<uint8_t>;
+template class SubStringKey<uint16_t>;
// InternalizedStringKey carries a string/internalized-string object as key.
class InternalizedStringKey : public HashTableKey {
public:
- explicit InternalizedStringKey(String* string)
+ explicit InternalizedStringKey(Handle<String> string)
: string_(string) { }
- bool IsMatch(Object* string) {
- return String::cast(string)->Equals(string_);
+ virtual bool IsMatch(Object* string) V8_OVERRIDE {
+ return String::cast(string)->Equals(*string_);
}
- uint32_t Hash() { return string_->Hash(); }
+ virtual uint32_t Hash() V8_OVERRIDE { return string_->Hash(); }
- uint32_t HashForObject(Object* other) {
+ virtual uint32_t HashForObject(Object* other) V8_OVERRIDE {
return String::cast(other)->Hash();
}
- MaybeObject* AsObject(Heap* heap) {
- // Attempt to flatten the string, so that internalized strings will most
- // often be flat strings.
- string_ = string_->TryFlattenGetString();
+ virtual Handle<Object> AsHandle(Isolate* isolate) V8_OVERRIDE {
// Internalize the string if possible.
- Map* map = heap->InternalizedStringMapForString(string_);
- if (map != NULL) {
- string_->set_map_no_write_barrier(map);
+ MaybeHandle<Map> maybe_map =
+ isolate->factory()->InternalizedStringMapForString(string_);
+ Handle<Map> map;
+ if (maybe_map.ToHandle(&map)) {
+ string_->set_map_no_write_barrier(*map);
ASSERT(string_->IsInternalizedString());
return string_;
}
// Otherwise allocate a new internalized string.
- return heap->AllocateInternalizedStringImpl(
+ return isolate->factory()->NewInternalizedStringImpl(
string_, string_->length(), string_->hash_field());
}
@@ -14072,53 +14300,56 @@ class InternalizedStringKey : public HashTableKey {
return String::cast(obj)->Hash();
}
- String* string_;
+ Handle<String> string_;
};
-template<typename Shape, typename Key>
-void HashTable<Shape, Key>::IteratePrefix(ObjectVisitor* v) {
+template<typename Derived, typename Shape, typename Key>
+void HashTable<Derived, Shape, Key>::IteratePrefix(ObjectVisitor* v) {
IteratePointers(v, 0, kElementsStartOffset);
}
-template<typename Shape, typename Key>
-void HashTable<Shape, Key>::IterateElements(ObjectVisitor* v) {
+template<typename Derived, typename Shape, typename Key>
+void HashTable<Derived, Shape, Key>::IterateElements(ObjectVisitor* v) {
IteratePointers(v,
kElementsStartOffset,
kHeaderSize + length() * kPointerSize);
}
-template<typename Shape, typename Key>
-MaybeObject* HashTable<Shape, Key>::Allocate(Heap* heap,
- int at_least_space_for,
- MinimumCapacity capacity_option,
- PretenureFlag pretenure) {
- ASSERT(!capacity_option || IS_POWER_OF_TWO(at_least_space_for));
+template<typename Derived, typename Shape, typename Key>
+Handle<Derived> HashTable<Derived, Shape, Key>::New(
+ Isolate* isolate,
+ int at_least_space_for,
+ MinimumCapacity capacity_option,
+ PretenureFlag pretenure) {
+ ASSERT(0 <= at_least_space_for);
+ ASSERT(!capacity_option || IsPowerOf2(at_least_space_for));
int capacity = (capacity_option == USE_CUSTOM_MINIMUM_CAPACITY)
? at_least_space_for
: ComputeCapacity(at_least_space_for);
if (capacity > HashTable::kMaxCapacity) {
- return Failure::OutOfMemoryException(0x10);
+ v8::internal::Heap::FatalProcessOutOfMemory("invalid table size", true);
}
- Object* obj;
- { MaybeObject* maybe_obj =
- heap-> AllocateHashTable(EntryToIndex(capacity), pretenure);
- if (!maybe_obj->ToObject(&obj)) return maybe_obj;
- }
- HashTable::cast(obj)->SetNumberOfElements(0);
- HashTable::cast(obj)->SetNumberOfDeletedElements(0);
- HashTable::cast(obj)->SetCapacity(capacity);
- return obj;
+ Factory* factory = isolate->factory();
+ int length = EntryToIndex(capacity);
+ Handle<FixedArray> array = factory->NewFixedArray(length, pretenure);
+ array->set_map_no_write_barrier(*factory->hash_table_map());
+ Handle<Derived> table = Handle<Derived>::cast(array);
+
+ table->SetNumberOfElements(0);
+ table->SetNumberOfDeletedElements(0);
+ table->SetCapacity(capacity);
+ return table;
}
// Find entry for key otherwise return kNotFound.
-int NameDictionary::FindEntry(Name* key) {
+int NameDictionary::FindEntry(Handle<Name> key) {
if (!key->IsUniqueName()) {
- return HashTable<NameDictionaryShape, Name*>::FindEntry(key);
+ return DerivedHashTable::FindEntry(key);
}
// Optimized for unique names. Knowledge of the key type allows:
@@ -14139,24 +14370,26 @@ int NameDictionary::FindEntry(Name* key) {
int index = EntryToIndex(entry);
Object* element = get(index);
if (element->IsUndefined()) break; // Empty entry.
- if (key == element) return entry;
+ if (*key == element) return entry;
if (!element->IsUniqueName() &&
!element->IsTheHole() &&
- Name::cast(element)->Equals(key)) {
+ Name::cast(element)->Equals(*key)) {
// Replace a key that is a non-internalized string by the equivalent
// internalized string for faster further lookups.
- set(index, key);
+ set(index, *key);
return entry;
}
- ASSERT(element->IsTheHole() || !Name::cast(element)->Equals(key));
+ ASSERT(element->IsTheHole() || !Name::cast(element)->Equals(*key));
entry = NextProbe(entry, count++, capacity);
}
return kNotFound;
}
-template<typename Shape, typename Key>
-MaybeObject* HashTable<Shape, Key>::Rehash(HashTable* new_table, Key key) {
+template<typename Derived, typename Shape, typename Key>
+void HashTable<Derived, Shape, Key>::Rehash(
+ Handle<Derived> new_table,
+ Key key) {
ASSERT(NumberOfElements() < new_table->Capacity());
DisallowHeapAllocation no_gc;
@@ -14175,7 +14408,7 @@ MaybeObject* HashTable<Shape, Key>::Rehash(HashTable* new_table, Key key) {
uint32_t from_index = EntryToIndex(i);
Object* k = get(from_index);
if (IsKey(k)) {
- uint32_t hash = HashTable<Shape, Key>::HashForObject(key, k);
+ uint32_t hash = HashTable::HashForObject(key, k);
uint32_t insertion_index =
EntryToIndex(new_table->FindInsertionEntry(hash));
for (int j = 0; j < Shape::kEntrySize; j++) {
@@ -14185,16 +14418,16 @@ MaybeObject* HashTable<Shape, Key>::Rehash(HashTable* new_table, Key key) {
}
new_table->SetNumberOfElements(NumberOfElements());
new_table->SetNumberOfDeletedElements(0);
- return new_table;
}
-template<typename Shape, typename Key>
-uint32_t HashTable<Shape, Key>::EntryForProbe(Key key,
- Object* k,
- int probe,
- uint32_t expected) {
- uint32_t hash = HashTable<Shape, Key>::HashForObject(key, k);
+template<typename Derived, typename Shape, typename Key>
+uint32_t HashTable<Derived, Shape, Key>::EntryForProbe(
+ Key key,
+ Object* k,
+ int probe,
+ uint32_t expected) {
+ uint32_t hash = HashTable::HashForObject(key, k);
uint32_t capacity = Capacity();
uint32_t entry = FirstProbe(hash, capacity);
for (int i = 1; i < probe; i++) {
@@ -14205,10 +14438,10 @@ uint32_t HashTable<Shape, Key>::EntryForProbe(Key key,
}
-template<typename Shape, typename Key>
-void HashTable<Shape, Key>::Swap(uint32_t entry1,
- uint32_t entry2,
- WriteBarrierMode mode) {
+template<typename Derived, typename Shape, typename Key>
+void HashTable<Derived, Shape, Key>::Swap(uint32_t entry1,
+ uint32_t entry2,
+ WriteBarrierMode mode) {
int index1 = EntryToIndex(entry1);
int index2 = EntryToIndex(entry2);
Object* temp[Shape::kEntrySize];
@@ -14224,8 +14457,8 @@ void HashTable<Shape, Key>::Swap(uint32_t entry1,
}
-template<typename Shape, typename Key>
-void HashTable<Shape, Key>::Rehash(Key key) {
+template<typename Derived, typename Shape, typename Key>
+void HashTable<Derived, Shape, Key>::Rehash(Key key) {
DisallowHeapAllocation no_gc;
WriteBarrierMode mode = GetWriteBarrierMode(no_gc);
uint32_t capacity = Capacity();
@@ -14257,71 +14490,73 @@ void HashTable<Shape, Key>::Rehash(Key key) {
}
-template<typename Shape, typename Key>
-MaybeObject* HashTable<Shape, Key>::EnsureCapacity(int n,
- Key key,
- PretenureFlag pretenure) {
- int capacity = Capacity();
- int nof = NumberOfElements() + n;
- int nod = NumberOfDeletedElements();
+template<typename Derived, typename Shape, typename Key>
+Handle<Derived> HashTable<Derived, Shape, Key>::EnsureCapacity(
+ Handle<Derived> table,
+ int n,
+ Key key,
+ PretenureFlag pretenure) {
+ Isolate* isolate = table->GetIsolate();
+ int capacity = table->Capacity();
+ int nof = table->NumberOfElements() + n;
+ int nod = table->NumberOfDeletedElements();
// Return if:
// 50% is still free after adding n elements and
// at most 50% of the free elements are deleted elements.
if (nod <= (capacity - nof) >> 1) {
int needed_free = nof >> 1;
- if (nof + needed_free <= capacity) return this;
+ if (nof + needed_free <= capacity) return table;
}
const int kMinCapacityForPretenure = 256;
bool should_pretenure = pretenure == TENURED ||
- ((capacity > kMinCapacityForPretenure) && !GetHeap()->InNewSpace(this));
- Object* obj;
- { MaybeObject* maybe_obj =
- Allocate(GetHeap(),
- nof * 2,
- USE_DEFAULT_MINIMUM_CAPACITY,
- should_pretenure ? TENURED : NOT_TENURED);
- if (!maybe_obj->ToObject(&obj)) return maybe_obj;
- }
+ ((capacity > kMinCapacityForPretenure) &&
+ !isolate->heap()->InNewSpace(*table));
+ Handle<Derived> new_table = HashTable::New(
+ isolate,
+ nof * 2,
+ USE_DEFAULT_MINIMUM_CAPACITY,
+ should_pretenure ? TENURED : NOT_TENURED);
- return Rehash(HashTable::cast(obj), key);
+ table->Rehash(new_table, key);
+ return new_table;
}
-template<typename Shape, typename Key>
-MaybeObject* HashTable<Shape, Key>::Shrink(Key key) {
- int capacity = Capacity();
- int nof = NumberOfElements();
+template<typename Derived, typename Shape, typename Key>
+Handle<Derived> HashTable<Derived, Shape, Key>::Shrink(Handle<Derived> table,
+ Key key) {
+ int capacity = table->Capacity();
+ int nof = table->NumberOfElements();
// Shrink to fit the number of elements if only a quarter of the
// capacity is filled with elements.
- if (nof > (capacity >> 2)) return this;
+ if (nof > (capacity >> 2)) return table;
// Allocate a new dictionary with room for at least the current
// number of elements. The allocation method will make sure that
// there is extra room in the dictionary for additions. Don't go
// lower than room for 16 elements.
int at_least_room_for = nof;
- if (at_least_room_for < 16) return this;
+ if (at_least_room_for < 16) return table;
+ Isolate* isolate = table->GetIsolate();
const int kMinCapacityForPretenure = 256;
bool pretenure =
(at_least_room_for > kMinCapacityForPretenure) &&
- !GetHeap()->InNewSpace(this);
- Object* obj;
- { MaybeObject* maybe_obj =
- Allocate(GetHeap(),
- at_least_room_for,
- USE_DEFAULT_MINIMUM_CAPACITY,
- pretenure ? TENURED : NOT_TENURED);
- if (!maybe_obj->ToObject(&obj)) return maybe_obj;
- }
+ !isolate->heap()->InNewSpace(*table);
+ Handle<Derived> new_table = HashTable::New(
+ isolate,
+ at_least_room_for,
+ USE_DEFAULT_MINIMUM_CAPACITY,
+ pretenure ? TENURED : NOT_TENURED);
- return Rehash(HashTable::cast(obj), key);
+ table->Rehash(new_table, key);
+ return new_table;
}
-template<typename Shape, typename Key>
-uint32_t HashTable<Shape, Key>::FindInsertionEntry(uint32_t hash) {
+template<typename Derived, typename Shape, typename Key>
+uint32_t HashTable<Derived, Shape, Key>::FindInsertionEntry(uint32_t hash) {
uint32_t capacity = Capacity();
uint32_t entry = FirstProbe(hash, capacity);
uint32_t count = 1;
@@ -14338,221 +14573,238 @@ uint32_t HashTable<Shape, Key>::FindInsertionEntry(uint32_t hash) {
// Force instantiation of template instances class.
// Please note this list is compiler dependent.
-template class HashTable<StringTableShape, HashTableKey*>;
+template class HashTable<StringTable, StringTableShape, HashTableKey*>;
-template class HashTable<CompilationCacheShape, HashTableKey*>;
+template class HashTable<CompilationCacheTable,
+ CompilationCacheShape,
+ HashTableKey*>;
-template class HashTable<MapCacheShape, HashTableKey*>;
+template class HashTable<MapCache, MapCacheShape, HashTableKey*>;
-template class HashTable<ObjectHashTableShape<1>, Object*>;
+template class HashTable<ObjectHashTable,
+ ObjectHashTableShape,
+ Handle<Object> >;
-template class HashTable<ObjectHashTableShape<2>, Object*>;
+template class HashTable<WeakHashTable, WeakHashTableShape<2>, Handle<Object> >;
-template class HashTable<WeakHashTableShape<2>, Object*>;
+template class Dictionary<NameDictionary, NameDictionaryShape, Handle<Name> >;
-template class Dictionary<NameDictionaryShape, Name*>;
+template class Dictionary<SeededNumberDictionary,
+ SeededNumberDictionaryShape,
+ uint32_t>;
-template class Dictionary<SeededNumberDictionaryShape, uint32_t>;
+template class Dictionary<UnseededNumberDictionary,
+ UnseededNumberDictionaryShape,
+ uint32_t>;
-template class Dictionary<UnseededNumberDictionaryShape, uint32_t>;
+template Handle<SeededNumberDictionary>
+Dictionary<SeededNumberDictionary, SeededNumberDictionaryShape, uint32_t>::
+ New(Isolate*, int at_least_space_for, PretenureFlag pretenure);
-template MaybeObject* Dictionary<SeededNumberDictionaryShape, uint32_t>::
- Allocate(Heap* heap, int at_least_space_for, PretenureFlag pretenure);
+template Handle<UnseededNumberDictionary>
+Dictionary<UnseededNumberDictionary, UnseededNumberDictionaryShape, uint32_t>::
+ New(Isolate*, int at_least_space_for, PretenureFlag pretenure);
-template MaybeObject* Dictionary<UnseededNumberDictionaryShape, uint32_t>::
- Allocate(Heap* heap, int at_least_space_for, PretenureFlag pretenure);
+template Handle<NameDictionary>
+Dictionary<NameDictionary, NameDictionaryShape, Handle<Name> >::
+ New(Isolate*, int n, PretenureFlag pretenure);
-template MaybeObject* Dictionary<NameDictionaryShape, Name*>::
- Allocate(Heap* heap, int n, PretenureFlag pretenure);
+template Handle<SeededNumberDictionary>
+Dictionary<SeededNumberDictionary, SeededNumberDictionaryShape, uint32_t>::
+ AtPut(Handle<SeededNumberDictionary>, uint32_t, Handle<Object>);
-template MaybeObject* Dictionary<SeededNumberDictionaryShape, uint32_t>::AtPut(
- uint32_t, Object*);
+template Handle<UnseededNumberDictionary>
+Dictionary<UnseededNumberDictionary, UnseededNumberDictionaryShape, uint32_t>::
+ AtPut(Handle<UnseededNumberDictionary>, uint32_t, Handle<Object>);
-template MaybeObject* Dictionary<UnseededNumberDictionaryShape, uint32_t>::
- AtPut(uint32_t, Object*);
-
-template Object* Dictionary<SeededNumberDictionaryShape, uint32_t>::
+template Object*
+Dictionary<SeededNumberDictionary, SeededNumberDictionaryShape, uint32_t>::
SlowReverseLookup(Object* value);
-template Object* Dictionary<UnseededNumberDictionaryShape, uint32_t>::
+template Object*
+Dictionary<UnseededNumberDictionary, UnseededNumberDictionaryShape, uint32_t>::
SlowReverseLookup(Object* value);
-template Object* Dictionary<NameDictionaryShape, Name*>::SlowReverseLookup(
- Object*);
-
-template void Dictionary<SeededNumberDictionaryShape, uint32_t>::CopyKeysTo(
- FixedArray*,
- PropertyAttributes,
- Dictionary<SeededNumberDictionaryShape, uint32_t>::SortMode);
-
-template Object* Dictionary<NameDictionaryShape, Name*>::DeleteProperty(
- int, JSObject::DeleteMode);
-
-template Object* Dictionary<SeededNumberDictionaryShape, uint32_t>::
- DeleteProperty(int, JSObject::DeleteMode);
-
-template MaybeObject* Dictionary<NameDictionaryShape, Name*>::Shrink(Name* n);
-
-template MaybeObject* Dictionary<SeededNumberDictionaryShape, uint32_t>::Shrink(
- uint32_t);
+template Object*
+Dictionary<NameDictionary, NameDictionaryShape, Handle<Name> >::
+ SlowReverseLookup(Object* value);
-template void Dictionary<NameDictionaryShape, Name*>::CopyKeysTo(
- FixedArray*,
- int,
- PropertyAttributes,
- Dictionary<NameDictionaryShape, Name*>::SortMode);
+template void
+Dictionary<SeededNumberDictionary, SeededNumberDictionaryShape, uint32_t>::
+ CopyKeysTo(
+ FixedArray*,
+ PropertyAttributes,
+ Dictionary<SeededNumberDictionary,
+ SeededNumberDictionaryShape,
+ uint32_t>::SortMode);
+
+template Handle<Object>
+Dictionary<NameDictionary, NameDictionaryShape, Handle<Name> >::DeleteProperty(
+ Handle<NameDictionary>, int, JSObject::DeleteMode);
+
+template Handle<Object>
+Dictionary<SeededNumberDictionary, SeededNumberDictionaryShape, uint32_t>::
+ DeleteProperty(Handle<SeededNumberDictionary>, int, JSObject::DeleteMode);
+
+template Handle<NameDictionary>
+HashTable<NameDictionary, NameDictionaryShape, Handle<Name> >::
+ New(Isolate*, int, MinimumCapacity, PretenureFlag);
+
+template Handle<NameDictionary>
+HashTable<NameDictionary, NameDictionaryShape, Handle<Name> >::
+ Shrink(Handle<NameDictionary>, Handle<Name>);
+
+template Handle<SeededNumberDictionary>
+HashTable<SeededNumberDictionary, SeededNumberDictionaryShape, uint32_t>::
+ Shrink(Handle<SeededNumberDictionary>, uint32_t);
+
+template void Dictionary<NameDictionary, NameDictionaryShape, Handle<Name> >::
+ CopyKeysTo(
+ FixedArray*,
+ int,
+ PropertyAttributes,
+ Dictionary<
+ NameDictionary, NameDictionaryShape, Handle<Name> >::SortMode);
template int
-Dictionary<NameDictionaryShape, Name*>::NumberOfElementsFilterAttributes(
- PropertyAttributes);
+Dictionary<NameDictionary, NameDictionaryShape, Handle<Name> >::
+ NumberOfElementsFilterAttributes(PropertyAttributes);
-template MaybeObject* Dictionary<NameDictionaryShape, Name*>::Add(
- Name*, Object*, PropertyDetails);
+template Handle<NameDictionary>
+Dictionary<NameDictionary, NameDictionaryShape, Handle<Name> >::Add(
+ Handle<NameDictionary>, Handle<Name>, Handle<Object>, PropertyDetails);
-template MaybeObject*
-Dictionary<NameDictionaryShape, Name*>::GenerateNewEnumerationIndices();
+template void
+Dictionary<NameDictionary, NameDictionaryShape, Handle<Name> >::
+ GenerateNewEnumerationIndices(Handle<NameDictionary>);
template int
-Dictionary<SeededNumberDictionaryShape, uint32_t>::
+Dictionary<SeededNumberDictionary, SeededNumberDictionaryShape, uint32_t>::
NumberOfElementsFilterAttributes(PropertyAttributes);
-template MaybeObject* Dictionary<SeededNumberDictionaryShape, uint32_t>::Add(
- uint32_t, Object*, PropertyDetails);
-
-template MaybeObject* Dictionary<UnseededNumberDictionaryShape, uint32_t>::Add(
- uint32_t, Object*, PropertyDetails);
-
-template MaybeObject* Dictionary<SeededNumberDictionaryShape, uint32_t>::
- EnsureCapacity(int, uint32_t);
-
-template MaybeObject* Dictionary<UnseededNumberDictionaryShape, uint32_t>::
- EnsureCapacity(int, uint32_t);
+template Handle<SeededNumberDictionary>
+Dictionary<SeededNumberDictionary, SeededNumberDictionaryShape, uint32_t>::
+ Add(Handle<SeededNumberDictionary>,
+ uint32_t,
+ Handle<Object>,
+ PropertyDetails);
-template MaybeObject* Dictionary<NameDictionaryShape, Name*>::
- EnsureCapacity(int, Name*);
+template Handle<UnseededNumberDictionary>
+Dictionary<UnseededNumberDictionary, UnseededNumberDictionaryShape, uint32_t>::
+ Add(Handle<UnseededNumberDictionary>,
+ uint32_t,
+ Handle<Object>,
+ PropertyDetails);
-template MaybeObject* Dictionary<SeededNumberDictionaryShape, uint32_t>::
- AddEntry(uint32_t, Object*, PropertyDetails, uint32_t);
+template Handle<SeededNumberDictionary>
+Dictionary<SeededNumberDictionary, SeededNumberDictionaryShape, uint32_t>::
+ EnsureCapacity(Handle<SeededNumberDictionary>, int, uint32_t);
-template MaybeObject* Dictionary<UnseededNumberDictionaryShape, uint32_t>::
- AddEntry(uint32_t, Object*, PropertyDetails, uint32_t);
+template Handle<UnseededNumberDictionary>
+Dictionary<UnseededNumberDictionary, UnseededNumberDictionaryShape, uint32_t>::
+ EnsureCapacity(Handle<UnseededNumberDictionary>, int, uint32_t);
-template MaybeObject* Dictionary<NameDictionaryShape, Name*>::AddEntry(
- Name*, Object*, PropertyDetails, uint32_t);
+template Handle<NameDictionary>
+Dictionary<NameDictionary, NameDictionaryShape, Handle<Name> >::
+ EnsureCapacity(Handle<NameDictionary>, int, Handle<Name>);
template
-int Dictionary<SeededNumberDictionaryShape, uint32_t>::NumberOfEnumElements();
+int Dictionary<SeededNumberDictionary, SeededNumberDictionaryShape, uint32_t>::
+ NumberOfEnumElements();
template
-int Dictionary<NameDictionaryShape, Name*>::NumberOfEnumElements();
+int Dictionary<NameDictionary, NameDictionaryShape, Handle<Name> >::
+ NumberOfEnumElements();
template
-int HashTable<SeededNumberDictionaryShape, uint32_t>::FindEntry(uint32_t);
+int HashTable<SeededNumberDictionary, SeededNumberDictionaryShape, uint32_t>::
+ FindEntry(uint32_t);
Handle<Object> JSObject::PrepareSlowElementsForSort(
Handle<JSObject> object, uint32_t limit) {
- CALL_HEAP_FUNCTION(object->GetIsolate(),
- object->PrepareSlowElementsForSort(limit),
- Object);
-}
-
-
-// Collates undefined and unexisting elements below limit from position
-// zero of the elements. The object stays in Dictionary mode.
-MaybeObject* JSObject::PrepareSlowElementsForSort(uint32_t limit) {
- ASSERT(HasDictionaryElements());
+ ASSERT(object->HasDictionaryElements());
+ Isolate* isolate = object->GetIsolate();
// Must stay in dictionary mode, either because of requires_slow_elements,
// or because we are not going to sort (and therefore compact) all of the
// elements.
- SeededNumberDictionary* dict = element_dictionary();
- HeapNumber* result_double = NULL;
- if (limit > static_cast<uint32_t>(Smi::kMaxValue)) {
- // Allocate space for result before we start mutating the object.
- Object* new_double;
- { MaybeObject* maybe_new_double = GetHeap()->AllocateHeapNumber(0.0);
- if (!maybe_new_double->ToObject(&new_double)) return maybe_new_double;
- }
- result_double = HeapNumber::cast(new_double);
- }
-
- Object* obj;
- { MaybeObject* maybe_obj =
- SeededNumberDictionary::Allocate(GetHeap(), dict->NumberOfElements());
- if (!maybe_obj->ToObject(&obj)) return maybe_obj;
- }
- SeededNumberDictionary* new_dict = SeededNumberDictionary::cast(obj);
-
- DisallowHeapAllocation no_alloc;
+ Handle<SeededNumberDictionary> dict(object->element_dictionary(), isolate);
+ Handle<SeededNumberDictionary> new_dict =
+ SeededNumberDictionary::New(isolate, dict->NumberOfElements());
uint32_t pos = 0;
uint32_t undefs = 0;
int capacity = dict->Capacity();
+ Handle<Smi> bailout(Smi::FromInt(-1), isolate);
+ // Entry to the new dictionary does not cause it to grow, as we have
+ // allocated one that is large enough for all entries.
+ DisallowHeapAllocation no_gc;
for (int i = 0; i < capacity; i++) {
Object* k = dict->KeyAt(i);
- if (dict->IsKey(k)) {
- ASSERT(k->IsNumber());
- ASSERT(!k->IsSmi() || Smi::cast(k)->value() >= 0);
- ASSERT(!k->IsHeapNumber() || HeapNumber::cast(k)->value() >= 0);
- ASSERT(!k->IsHeapNumber() || HeapNumber::cast(k)->value() <= kMaxUInt32);
- Object* value = dict->ValueAt(i);
- PropertyDetails details = dict->DetailsAt(i);
- if (details.type() == CALLBACKS || details.IsReadOnly()) {
- // Bail out and do the sorting of undefineds and array holes in JS.
- // Also bail out if the element is not supposed to be moved.
- return Smi::FromInt(-1);
- }
- uint32_t key = NumberToUint32(k);
- // In the following we assert that adding the entry to the new dictionary
- // does not cause GC. This is the case because we made sure to allocate
- // the dictionary big enough above, so it need not grow.
- if (key < limit) {
- if (value->IsUndefined()) {
- undefs++;
- } else {
- if (pos > static_cast<uint32_t>(Smi::kMaxValue)) {
- // Adding an entry with the key beyond smi-range requires
- // allocation. Bailout.
- return Smi::FromInt(-1);
- }
- new_dict->AddNumberEntry(pos, value, details)->ToObjectUnchecked();
- pos++;
- }
+ if (!dict->IsKey(k)) continue;
+
+ ASSERT(k->IsNumber());
+ ASSERT(!k->IsSmi() || Smi::cast(k)->value() >= 0);
+ ASSERT(!k->IsHeapNumber() || HeapNumber::cast(k)->value() >= 0);
+ ASSERT(!k->IsHeapNumber() || HeapNumber::cast(k)->value() <= kMaxUInt32);
+
+ HandleScope scope(isolate);
+ Handle<Object> value(dict->ValueAt(i), isolate);
+ PropertyDetails details = dict->DetailsAt(i);
+ if (details.type() == CALLBACKS || details.IsReadOnly()) {
+ // Bail out and do the sorting of undefineds and array holes in JS.
+ // Also bail out if the element is not supposed to be moved.
+ return bailout;
+ }
+
+ uint32_t key = NumberToUint32(k);
+ if (key < limit) {
+ if (value->IsUndefined()) {
+ undefs++;
+ } else if (pos > static_cast<uint32_t>(Smi::kMaxValue)) {
+ // Adding an entry with the key beyond smi-range requires
+ // allocation. Bailout.
+ return bailout;
} else {
- if (key > static_cast<uint32_t>(Smi::kMaxValue)) {
- // Adding an entry with the key beyond smi-range requires
- // allocation. Bailout.
- return Smi::FromInt(-1);
- }
- new_dict->AddNumberEntry(key, value, details)->ToObjectUnchecked();
+ Handle<Object> result = SeededNumberDictionary::AddNumberEntry(
+ new_dict, pos, value, details);
+ ASSERT(result.is_identical_to(new_dict));
+ USE(result);
+ pos++;
}
+ } else if (key > static_cast<uint32_t>(Smi::kMaxValue)) {
+ // Adding an entry with the key beyond smi-range requires
+ // allocation. Bailout.
+ return bailout;
+ } else {
+ Handle<Object> result = SeededNumberDictionary::AddNumberEntry(
+ new_dict, key, value, details);
+ ASSERT(result.is_identical_to(new_dict));
+ USE(result);
}
}
uint32_t result = pos;
PropertyDetails no_details = PropertyDetails(NONE, NORMAL, 0);
- Heap* heap = GetHeap();
while (undefs > 0) {
if (pos > static_cast<uint32_t>(Smi::kMaxValue)) {
// Adding an entry with the key beyond smi-range requires
// allocation. Bailout.
- return Smi::FromInt(-1);
+ return bailout;
}
- new_dict->AddNumberEntry(pos, heap->undefined_value(), no_details)->
- ToObjectUnchecked();
+ HandleScope scope(isolate);
+ Handle<Object> result = SeededNumberDictionary::AddNumberEntry(
+ new_dict, pos, isolate->factory()->undefined_value(), no_details);
+ ASSERT(result.is_identical_to(new_dict));
+ USE(result);
pos++;
undefs--;
}
- set_elements(new_dict);
-
- if (result <= static_cast<uint32_t>(Smi::kMaxValue)) {
- return Smi::FromInt(static_cast<int>(result));
- }
+ object->set_elements(*new_dict);
- ASSERT_NE(NULL, result_double);
- result_double->set_value(static_cast<double>(result));
- return result_double;
+ AllowHeapAllocation allocate_return_value;
+ return isolate->factory()->NewNumberFromUint(result);
}
@@ -14563,8 +14815,11 @@ MaybeObject* JSObject::PrepareSlowElementsForSort(uint32_t limit) {
Handle<Object> JSObject::PrepareElementsForSort(Handle<JSObject> object,
uint32_t limit) {
Isolate* isolate = object->GetIsolate();
+ if (object->HasSloppyArgumentsElements() ||
+ object->map()->is_observed()) {
+ return handle(Smi::FromInt(-1), isolate);
+ }
- ASSERT(!object->map()->is_observed());
if (object->HasDictionaryElements()) {
// Convert to fast elements containing only the existing properties.
// Ordering is irrelevant, since we are going to sort anyway.
@@ -14583,13 +14838,14 @@ Handle<Object> JSObject::PrepareElementsForSort(Handle<JSObject> object,
Handle<FixedArray> fast_elements =
isolate->factory()->NewFixedArray(dict->NumberOfElements(), tenure);
dict->CopyValuesTo(*fast_elements);
- object->ValidateElements();
+ JSObject::ValidateElements(object);
- object->set_map_and_elements(*new_map, *fast_elements);
- } else if (object->HasExternalArrayElements()) {
- // External arrays cannot have holes or undefined elements.
+ JSObject::SetMapAndElements(object, new_map, fast_elements);
+ } else if (object->HasExternalArrayElements() ||
+ object->HasFixedTypedArrayElements()) {
+ // Typed arrays cannot have holes or undefined elements.
return handle(Smi::FromInt(
- ExternalArray::cast(object->elements())->length()), isolate);
+ FixedArrayBase::cast(object->elements())->length()), isolate);
} else if (!object->HasFastDoubleElements()) {
EnsureWritableFastElements(object);
}
@@ -14688,25 +14944,16 @@ Handle<Object> JSObject::PrepareElementsForSort(Handle<JSObject> object,
ExternalArrayType JSTypedArray::type() {
switch (elements()->map()->instance_type()) {
- case EXTERNAL_BYTE_ARRAY_TYPE:
- return kExternalByteArray;
- case EXTERNAL_UNSIGNED_BYTE_ARRAY_TYPE:
- return kExternalUnsignedByteArray;
- case EXTERNAL_SHORT_ARRAY_TYPE:
- return kExternalShortArray;
- case EXTERNAL_UNSIGNED_SHORT_ARRAY_TYPE:
- return kExternalUnsignedShortArray;
- case EXTERNAL_INT_ARRAY_TYPE:
- return kExternalIntArray;
- case EXTERNAL_UNSIGNED_INT_ARRAY_TYPE:
- return kExternalUnsignedIntArray;
- case EXTERNAL_FLOAT_ARRAY_TYPE:
- return kExternalFloatArray;
- case EXTERNAL_DOUBLE_ARRAY_TYPE:
- return kExternalDoubleArray;
- case EXTERNAL_PIXEL_ARRAY_TYPE:
- return kExternalPixelArray;
+#define INSTANCE_TYPE_TO_ARRAY_TYPE(Type, type, TYPE, ctype, size) \
+ case EXTERNAL_##TYPE##_ARRAY_TYPE: \
+ case FIXED_##TYPE##_ARRAY_TYPE: \
+ return kExternal##Type##Array;
+
+ TYPED_ARRAYS(INSTANCE_TYPE_TO_ARRAY_TYPE)
+#undef INSTANCE_TYPE_TO_ARRAY_TYPE
+
default:
+ UNREACHABLE();
return static_cast<ExternalArrayType>(-1);
}
}
@@ -14714,24 +14961,13 @@ ExternalArrayType JSTypedArray::type() {
size_t JSTypedArray::element_size() {
switch (elements()->map()->instance_type()) {
- case EXTERNAL_BYTE_ARRAY_TYPE:
- return 1;
- case EXTERNAL_UNSIGNED_BYTE_ARRAY_TYPE:
- return 1;
- case EXTERNAL_SHORT_ARRAY_TYPE:
- return 2;
- case EXTERNAL_UNSIGNED_SHORT_ARRAY_TYPE:
- return 2;
- case EXTERNAL_INT_ARRAY_TYPE:
- return 4;
- case EXTERNAL_UNSIGNED_INT_ARRAY_TYPE:
- return 4;
- case EXTERNAL_FLOAT_ARRAY_TYPE:
- return 4;
- case EXTERNAL_DOUBLE_ARRAY_TYPE:
- return 8;
- case EXTERNAL_PIXEL_ARRAY_TYPE:
- return 1;
+#define INSTANCE_TYPE_TO_ELEMENT_SIZE(Type, type, TYPE, ctype, size) \
+ case EXTERNAL_##TYPE##_ARRAY_TYPE: \
+ return size;
+
+ TYPED_ARRAYS(INSTANCE_TYPE_TO_ELEMENT_SIZE)
+#undef INSTANCE_TYPE_TO_ELEMENT_SIZE
+
default:
UNREACHABLE();
return 0;
@@ -14739,11 +14975,14 @@ size_t JSTypedArray::element_size() {
}
-Object* ExternalPixelArray::SetValue(uint32_t index, Object* value) {
+Handle<Object> ExternalUint8ClampedArray::SetValue(
+ Handle<ExternalUint8ClampedArray> array,
+ uint32_t index,
+ Handle<Object> value) {
uint8_t clamped_value = 0;
- if (index < static_cast<uint32_t>(length())) {
+ if (index < static_cast<uint32_t>(array->length())) {
if (value->IsSmi()) {
- int int_value = Smi::cast(value)->value();
+ int int_value = Handle<Smi>::cast(value)->value();
if (int_value < 0) {
clamped_value = 0;
} else if (int_value > 255) {
@@ -14752,7 +14991,7 @@ Object* ExternalPixelArray::SetValue(uint32_t index, Object* value) {
clamped_value = static_cast<uint8_t>(int_value);
}
} else if (value->IsHeapNumber()) {
- double double_value = HeapNumber::cast(value)->value();
+ double double_value = Handle<HeapNumber>::cast(value)->value();
if (!(double_value > 0)) {
// NaN and less than zero clamp to zero.
clamped_value = 0;
@@ -14768,24 +15007,25 @@ Object* ExternalPixelArray::SetValue(uint32_t index, Object* value) {
// converted to a number type further up in the call chain.
ASSERT(value->IsUndefined());
}
- set(index, clamped_value);
+ array->set(index, clamped_value);
}
- return Smi::FromInt(clamped_value);
+ return handle(Smi::FromInt(clamped_value), array->GetIsolate());
}
template<typename ExternalArrayClass, typename ValueType>
-static MaybeObject* ExternalArrayIntSetter(Heap* heap,
- ExternalArrayClass* receiver,
- uint32_t index,
- Object* value) {
+static Handle<Object> ExternalArrayIntSetter(
+ Isolate* isolate,
+ Handle<ExternalArrayClass> receiver,
+ uint32_t index,
+ Handle<Object> value) {
ValueType cast_value = 0;
if (index < static_cast<uint32_t>(receiver->length())) {
if (value->IsSmi()) {
- int int_value = Smi::cast(value)->value();
+ int int_value = Handle<Smi>::cast(value)->value();
cast_value = static_cast<ValueType>(int_value);
} else if (value->IsHeapNumber()) {
- double double_value = HeapNumber::cast(value)->value();
+ double double_value = Handle<HeapNumber>::cast(value)->value();
cast_value = static_cast<ValueType>(DoubleToInt32(double_value));
} else {
// Clamp undefined to zero (default). All other types have been
@@ -14794,178 +15034,112 @@ static MaybeObject* ExternalArrayIntSetter(Heap* heap,
}
receiver->set(index, cast_value);
}
- return heap->NumberFromInt32(cast_value);
+ return isolate->factory()->NewNumberFromInt(cast_value);
}
-Handle<Object> ExternalByteArray::SetValue(Handle<ExternalByteArray> array,
+Handle<Object> ExternalInt8Array::SetValue(Handle<ExternalInt8Array> array,
uint32_t index,
Handle<Object> value) {
- CALL_HEAP_FUNCTION(array->GetIsolate(),
- array->SetValue(index, *value),
- Object);
+ return ExternalArrayIntSetter<ExternalInt8Array, int8_t>(
+ array->GetIsolate(), array, index, value);
}
-MaybeObject* ExternalByteArray::SetValue(uint32_t index, Object* value) {
- return ExternalArrayIntSetter<ExternalByteArray, int8_t>
- (GetHeap(), this, index, value);
-}
-
-
-Handle<Object> ExternalUnsignedByteArray::SetValue(
- Handle<ExternalUnsignedByteArray> array,
- uint32_t index,
- Handle<Object> value) {
- CALL_HEAP_FUNCTION(array->GetIsolate(),
- array->SetValue(index, *value),
- Object);
-}
-
-
-MaybeObject* ExternalUnsignedByteArray::SetValue(uint32_t index,
- Object* value) {
- return ExternalArrayIntSetter<ExternalUnsignedByteArray, uint8_t>
- (GetHeap(), this, index, value);
-}
-
-
-Handle<Object> ExternalShortArray::SetValue(
- Handle<ExternalShortArray> array,
- uint32_t index,
- Handle<Object> value) {
- CALL_HEAP_FUNCTION(array->GetIsolate(),
- array->SetValue(index, *value),
- Object);
-}
-
-
-MaybeObject* ExternalShortArray::SetValue(uint32_t index,
- Object* value) {
- return ExternalArrayIntSetter<ExternalShortArray, int16_t>
- (GetHeap(), this, index, value);
-}
-
-
-Handle<Object> ExternalUnsignedShortArray::SetValue(
- Handle<ExternalUnsignedShortArray> array,
- uint32_t index,
- Handle<Object> value) {
- CALL_HEAP_FUNCTION(array->GetIsolate(),
- array->SetValue(index, *value),
- Object);
+Handle<Object> ExternalUint8Array::SetValue(Handle<ExternalUint8Array> array,
+ uint32_t index,
+ Handle<Object> value) {
+ return ExternalArrayIntSetter<ExternalUint8Array, uint8_t>(
+ array->GetIsolate(), array, index, value);
}
-MaybeObject* ExternalUnsignedShortArray::SetValue(uint32_t index,
- Object* value) {
- return ExternalArrayIntSetter<ExternalUnsignedShortArray, uint16_t>
- (GetHeap(), this, index, value);
+Handle<Object> ExternalInt16Array::SetValue(Handle<ExternalInt16Array> array,
+ uint32_t index,
+ Handle<Object> value) {
+ return ExternalArrayIntSetter<ExternalInt16Array, int16_t>(
+ array->GetIsolate(), array, index, value);
}
-Handle<Object> ExternalIntArray::SetValue(Handle<ExternalIntArray> array,
- uint32_t index,
- Handle<Object> value) {
- CALL_HEAP_FUNCTION(array->GetIsolate(),
- array->SetValue(index, *value),
- Object);
+Handle<Object> ExternalUint16Array::SetValue(Handle<ExternalUint16Array> array,
+ uint32_t index,
+ Handle<Object> value) {
+ return ExternalArrayIntSetter<ExternalUint16Array, uint16_t>(
+ array->GetIsolate(), array, index, value);
}
-MaybeObject* ExternalIntArray::SetValue(uint32_t index, Object* value) {
- return ExternalArrayIntSetter<ExternalIntArray, int32_t>
- (GetHeap(), this, index, value);
+Handle<Object> ExternalInt32Array::SetValue(Handle<ExternalInt32Array> array,
+ uint32_t index,
+ Handle<Object> value) {
+ return ExternalArrayIntSetter<ExternalInt32Array, int32_t>(
+ array->GetIsolate(), array, index, value);
}
-Handle<Object> ExternalUnsignedIntArray::SetValue(
- Handle<ExternalUnsignedIntArray> array,
+Handle<Object> ExternalUint32Array::SetValue(
+ Handle<ExternalUint32Array> array,
uint32_t index,
Handle<Object> value) {
- CALL_HEAP_FUNCTION(array->GetIsolate(),
- array->SetValue(index, *value),
- Object);
-}
-
-
-MaybeObject* ExternalUnsignedIntArray::SetValue(uint32_t index, Object* value) {
uint32_t cast_value = 0;
- Heap* heap = GetHeap();
- if (index < static_cast<uint32_t>(length())) {
+ if (index < static_cast<uint32_t>(array->length())) {
if (value->IsSmi()) {
- int int_value = Smi::cast(value)->value();
+ int int_value = Handle<Smi>::cast(value)->value();
cast_value = static_cast<uint32_t>(int_value);
} else if (value->IsHeapNumber()) {
- double double_value = HeapNumber::cast(value)->value();
+ double double_value = Handle<HeapNumber>::cast(value)->value();
cast_value = static_cast<uint32_t>(DoubleToUint32(double_value));
} else {
// Clamp undefined to zero (default). All other types have been
// converted to a number type further up in the call chain.
ASSERT(value->IsUndefined());
}
- set(index, cast_value);
+ array->set(index, cast_value);
}
- return heap->NumberFromUint32(cast_value);
+ return array->GetIsolate()->factory()->NewNumberFromUint(cast_value);
}
-Handle<Object> ExternalFloatArray::SetValue(Handle<ExternalFloatArray> array,
- uint32_t index,
- Handle<Object> value) {
- CALL_HEAP_FUNCTION(array->GetIsolate(),
- array->SetValue(index, *value),
- Object);
-}
-
-
-MaybeObject* ExternalFloatArray::SetValue(uint32_t index, Object* value) {
+Handle<Object> ExternalFloat32Array::SetValue(
+ Handle<ExternalFloat32Array> array,
+ uint32_t index,
+ Handle<Object> value) {
float cast_value = static_cast<float>(OS::nan_value());
- Heap* heap = GetHeap();
- if (index < static_cast<uint32_t>(length())) {
+ if (index < static_cast<uint32_t>(array->length())) {
if (value->IsSmi()) {
- int int_value = Smi::cast(value)->value();
+ int int_value = Handle<Smi>::cast(value)->value();
cast_value = static_cast<float>(int_value);
} else if (value->IsHeapNumber()) {
- double double_value = HeapNumber::cast(value)->value();
+ double double_value = Handle<HeapNumber>::cast(value)->value();
cast_value = static_cast<float>(double_value);
} else {
// Clamp undefined to NaN (default). All other types have been
// converted to a number type further up in the call chain.
ASSERT(value->IsUndefined());
}
- set(index, cast_value);
+ array->set(index, cast_value);
}
- return heap->AllocateHeapNumber(cast_value);
-}
-
-
-Handle<Object> ExternalDoubleArray::SetValue(Handle<ExternalDoubleArray> array,
- uint32_t index,
- Handle<Object> value) {
- CALL_HEAP_FUNCTION(array->GetIsolate(),
- array->SetValue(index, *value),
- Object);
+ return array->GetIsolate()->factory()->NewNumber(cast_value);
}
-MaybeObject* ExternalDoubleArray::SetValue(uint32_t index, Object* value) {
+Handle<Object> ExternalFloat64Array::SetValue(
+ Handle<ExternalFloat64Array> array,
+ uint32_t index,
+ Handle<Object> value) {
double double_value = OS::nan_value();
- Heap* heap = GetHeap();
- if (index < static_cast<uint32_t>(length())) {
- if (value->IsSmi()) {
- int int_value = Smi::cast(value)->value();
- double_value = static_cast<double>(int_value);
- } else if (value->IsHeapNumber()) {
- double_value = HeapNumber::cast(value)->value();
+ if (index < static_cast<uint32_t>(array->length())) {
+ if (value->IsNumber()) {
+ double_value = value->Number();
} else {
// Clamp undefined to NaN (default). All other types have been
// converted to a number type further up in the call chain.
ASSERT(value->IsUndefined());
}
- set(index, double_value);
+ array->set(index, double_value);
}
- return heap->AllocateHeapNumber(double_value);
+ return array->GetIsolate()->factory()->NewNumber(double_value);
}
@@ -14980,14 +15154,14 @@ Handle<PropertyCell> JSGlobalObject::EnsurePropertyCell(
Handle<JSGlobalObject> global,
Handle<Name> name) {
ASSERT(!global->HasFastProperties());
- int entry = global->property_dictionary()->FindEntry(*name);
+ int entry = global->property_dictionary()->FindEntry(name);
if (entry == NameDictionary::kNotFound) {
Isolate* isolate = global->GetIsolate();
Handle<PropertyCell> cell = isolate->factory()->NewPropertyCell(
isolate->factory()->the_hole_value());
PropertyDetails details(NONE, NORMAL, 0);
details = details.AsDeleted();
- Handle<NameDictionary> dictionary = NameDictionaryAdd(
+ Handle<NameDictionary> dictionary = NameDictionary::Add(
handle(global->property_dictionary()), name, cell, details);
global->set_properties(*dictionary);
return cell;
@@ -14999,12 +15173,6 @@ Handle<PropertyCell> JSGlobalObject::EnsurePropertyCell(
}
-MaybeObject* StringTable::LookupString(String* string, Object** s) {
- InternalizedStringKey key(string);
- return LookupKey(&key, s);
-}
-
-
// This class is used for looking up two character strings in the string table.
// If we don't have a hit we don't want to waste much time so we unroll the
// string hash calculation loop here for speed. Doesn't work if the two
@@ -15041,7 +15209,7 @@ class TwoCharHashTableKey : public HashTableKey {
#endif
}
- bool IsMatch(Object* o) {
+ bool IsMatch(Object* o) V8_OVERRIDE {
if (!o->IsString()) return false;
String* other = String::cast(o);
if (other->length() != 2) return false;
@@ -15049,17 +15217,17 @@ class TwoCharHashTableKey : public HashTableKey {
return other->Get(1) == c2_;
}
- uint32_t Hash() { return hash_; }
- uint32_t HashForObject(Object* key) {
+ uint32_t Hash() V8_OVERRIDE { return hash_; }
+ uint32_t HashForObject(Object* key) V8_OVERRIDE {
if (!key->IsString()) return 0;
return String::cast(key)->Hash();
}
- Object* AsObject(Heap* heap) {
+ Handle<Object> AsHandle(Isolate* isolate) V8_OVERRIDE {
// The TwoCharHashTableKey is only used for looking in the string
// table, not for adding to it.
UNREACHABLE();
- return NULL;
+ return MaybeHandle<Object>().ToHandleChecked();
}
private:
@@ -15069,221 +15237,171 @@ class TwoCharHashTableKey : public HashTableKey {
};
-bool StringTable::LookupStringIfExists(String* string, String** result) {
+MaybeHandle<String> StringTable::InternalizeStringIfExists(
+ Isolate* isolate,
+ Handle<String> string) {
+ if (string->IsInternalizedString()) {
+ return string;
+ }
+ return LookupStringIfExists(isolate, string);
+}
+
+
+MaybeHandle<String> StringTable::LookupStringIfExists(
+ Isolate* isolate,
+ Handle<String> string) {
+ Handle<StringTable> string_table = isolate->factory()->string_table();
InternalizedStringKey key(string);
- int entry = FindEntry(&key);
+ int entry = string_table->FindEntry(&key);
if (entry == kNotFound) {
- return false;
+ return MaybeHandle<String>();
} else {
- *result = String::cast(KeyAt(entry));
+ Handle<String> result(String::cast(string_table->KeyAt(entry)), isolate);
ASSERT(StringShape(*result).IsInternalized());
- return true;
+ return result;
}
}
-bool StringTable::LookupTwoCharsStringIfExists(uint16_t c1,
- uint16_t c2,
- String** result) {
- TwoCharHashTableKey key(c1, c2, GetHeap()->HashSeed());
- int entry = FindEntry(&key);
+MaybeHandle<String> StringTable::LookupTwoCharsStringIfExists(
+ Isolate* isolate,
+ uint16_t c1,
+ uint16_t c2) {
+ Handle<StringTable> string_table = isolate->factory()->string_table();
+ TwoCharHashTableKey key(c1, c2, isolate->heap()->HashSeed());
+ int entry = string_table->FindEntry(&key);
if (entry == kNotFound) {
- return false;
+ return MaybeHandle<String>();
} else {
- *result = String::cast(KeyAt(entry));
+ Handle<String> result(String::cast(string_table->KeyAt(entry)), isolate);
ASSERT(StringShape(*result).IsInternalized());
- return true;
+ return result;
}
}
-MaybeObject* StringTable::LookupUtf8String(Vector<const char> str,
- Object** s) {
- Utf8StringKey key(str, GetHeap()->HashSeed());
- return LookupKey(&key, s);
-}
-
-
-MaybeObject* StringTable::LookupOneByteString(Vector<const uint8_t> str,
- Object** s) {
- OneByteStringKey key(str, GetHeap()->HashSeed());
- return LookupKey(&key, s);
-}
-
-
-MaybeObject* StringTable::LookupSubStringOneByteString(
- Handle<SeqOneByteString> str,
- int from,
- int length,
- Object** s) {
- SubStringOneByteStringKey key(str, from, length);
- return LookupKey(&key, s);
-}
-
-
-MaybeObject* StringTable::LookupTwoByteString(Vector<const uc16> str,
- Object** s) {
- TwoByteStringKey key(str, GetHeap()->HashSeed());
- return LookupKey(&key, s);
+Handle<String> StringTable::LookupString(Isolate* isolate,
+ Handle<String> string) {
+ InternalizedStringKey key(string);
+ return LookupKey(isolate, &key);
}
-MaybeObject* StringTable::LookupKey(HashTableKey* key, Object** s) {
- int entry = FindEntry(key);
+Handle<String> StringTable::LookupKey(Isolate* isolate, HashTableKey* key) {
+ Handle<StringTable> table = isolate->factory()->string_table();
+ int entry = table->FindEntry(key);
// String already in table.
if (entry != kNotFound) {
- *s = KeyAt(entry);
- return this;
+ return handle(String::cast(table->KeyAt(entry)), isolate);
}
// Adding new string. Grow table if needed.
- Object* obj;
- { MaybeObject* maybe_obj = EnsureCapacity(1, key);
- if (!maybe_obj->ToObject(&obj)) return maybe_obj;
- }
+ table = StringTable::EnsureCapacity(table, 1, key);
// Create string object.
- Object* string;
- { MaybeObject* maybe_string = key->AsObject(GetHeap());
- if (!maybe_string->ToObject(&string)) return maybe_string;
- }
-
- // If the string table grew as part of EnsureCapacity, obj is not
- // the current string table and therefore we cannot use
- // StringTable::cast here.
- StringTable* table = reinterpret_cast<StringTable*>(obj);
+ Handle<Object> string = key->AsHandle(isolate);
+ // There must be no attempts to internalize strings that could throw
+ // InvalidStringLength error.
+ CHECK(!string.is_null());
// Add the new string and return it along with the string table.
entry = table->FindInsertionEntry(key->Hash());
- table->set(EntryToIndex(entry), string);
+ table->set(EntryToIndex(entry), *string);
table->ElementAdded();
- *s = string;
- return table;
-}
-
-// The key for the script compilation cache is dependent on the mode flags,
-// because they change the global language mode and thus binding behaviour.
-// If flags change at some point, we must ensure that we do not hit the cache
-// for code compiled with different settings.
-static LanguageMode CurrentGlobalLanguageMode() {
- return FLAG_use_strict
- ? (FLAG_harmony_scoping ? EXTENDED_MODE : STRICT_MODE)
- : CLASSIC_MODE;
+ isolate->factory()->set_string_table(table);
+ return Handle<String>::cast(string);
}
-Object* CompilationCacheTable::Lookup(String* src, Context* context) {
- SharedFunctionInfo* shared = context->closure()->shared();
- StringSharedKey key(src,
- shared,
- CurrentGlobalLanguageMode(),
+Handle<Object> CompilationCacheTable::Lookup(Handle<String> src,
+ Handle<Context> context) {
+ Isolate* isolate = GetIsolate();
+ Handle<SharedFunctionInfo> shared(context->closure()->shared());
+ StringSharedKey key(src, shared, FLAG_use_strict ? STRICT : SLOPPY,
RelocInfo::kNoPosition);
int entry = FindEntry(&key);
- if (entry == kNotFound) return GetHeap()->undefined_value();
- return get(EntryToIndex(entry) + 1);
+ if (entry == kNotFound) return isolate->factory()->undefined_value();
+ return Handle<Object>(get(EntryToIndex(entry) + 1), isolate);
}
-Object* CompilationCacheTable::LookupEval(String* src,
- Context* context,
- LanguageMode language_mode,
- int scope_position) {
- StringSharedKey key(src,
- context->closure()->shared(),
- language_mode,
- scope_position);
+Handle<Object> CompilationCacheTable::LookupEval(Handle<String> src,
+ Handle<Context> context,
+ StrictMode strict_mode,
+ int scope_position) {
+ Isolate* isolate = GetIsolate();
+ Handle<SharedFunctionInfo> shared(context->closure()->shared());
+ StringSharedKey key(src, shared, strict_mode, scope_position);
int entry = FindEntry(&key);
- if (entry == kNotFound) return GetHeap()->undefined_value();
- return get(EntryToIndex(entry) + 1);
+ if (entry == kNotFound) return isolate->factory()->undefined_value();
+ return Handle<Object>(get(EntryToIndex(entry) + 1), isolate);
}
-Object* CompilationCacheTable::LookupRegExp(String* src,
- JSRegExp::Flags flags) {
+Handle<Object> CompilationCacheTable::LookupRegExp(Handle<String> src,
+ JSRegExp::Flags flags) {
+ Isolate* isolate = GetIsolate();
+ DisallowHeapAllocation no_allocation;
RegExpKey key(src, flags);
int entry = FindEntry(&key);
- if (entry == kNotFound) return GetHeap()->undefined_value();
- return get(EntryToIndex(entry) + 1);
+ if (entry == kNotFound) return isolate->factory()->undefined_value();
+ return Handle<Object>(get(EntryToIndex(entry) + 1), isolate);
}
-MaybeObject* CompilationCacheTable::Put(String* src,
- Context* context,
- Object* value) {
- SharedFunctionInfo* shared = context->closure()->shared();
- StringSharedKey key(src,
- shared,
- CurrentGlobalLanguageMode(),
+Handle<CompilationCacheTable> CompilationCacheTable::Put(
+ Handle<CompilationCacheTable> cache, Handle<String> src,
+ Handle<Context> context, Handle<Object> value) {
+ Isolate* isolate = cache->GetIsolate();
+ Handle<SharedFunctionInfo> shared(context->closure()->shared());
+ StringSharedKey key(src, shared, FLAG_use_strict ? STRICT : SLOPPY,
RelocInfo::kNoPosition);
- CompilationCacheTable* cache;
- MaybeObject* maybe_cache = EnsureCapacity(1, &key);
- if (!maybe_cache->To(&cache)) return maybe_cache;
-
- Object* k;
- MaybeObject* maybe_k = key.AsObject(GetHeap());
- if (!maybe_k->To(&k)) return maybe_k;
-
+ cache = EnsureCapacity(cache, 1, &key);
+ Handle<Object> k = key.AsHandle(isolate);
int entry = cache->FindInsertionEntry(key.Hash());
- cache->set(EntryToIndex(entry), k);
- cache->set(EntryToIndex(entry) + 1, value);
+ cache->set(EntryToIndex(entry), *k);
+ cache->set(EntryToIndex(entry) + 1, *value);
cache->ElementAdded();
return cache;
}
-MaybeObject* CompilationCacheTable::PutEval(String* src,
- Context* context,
- SharedFunctionInfo* value,
- int scope_position) {
- StringSharedKey key(src,
- context->closure()->shared(),
- value->language_mode(),
- scope_position);
- Object* obj;
- { MaybeObject* maybe_obj = EnsureCapacity(1, &key);
- if (!maybe_obj->ToObject(&obj)) return maybe_obj;
- }
-
- CompilationCacheTable* cache =
- reinterpret_cast<CompilationCacheTable*>(obj);
+Handle<CompilationCacheTable> CompilationCacheTable::PutEval(
+ Handle<CompilationCacheTable> cache, Handle<String> src,
+ Handle<Context> context, Handle<SharedFunctionInfo> value,
+ int scope_position) {
+ Isolate* isolate = cache->GetIsolate();
+ Handle<SharedFunctionInfo> shared(context->closure()->shared());
+ StringSharedKey key(src, shared, value->strict_mode(), scope_position);
+ cache = EnsureCapacity(cache, 1, &key);
+ Handle<Object> k = key.AsHandle(isolate);
int entry = cache->FindInsertionEntry(key.Hash());
-
- Object* k;
- { MaybeObject* maybe_k = key.AsObject(GetHeap());
- if (!maybe_k->ToObject(&k)) return maybe_k;
- }
-
- cache->set(EntryToIndex(entry), k);
- cache->set(EntryToIndex(entry) + 1, value);
+ cache->set(EntryToIndex(entry), *k);
+ cache->set(EntryToIndex(entry) + 1, *value);
cache->ElementAdded();
return cache;
}
-MaybeObject* CompilationCacheTable::PutRegExp(String* src,
- JSRegExp::Flags flags,
- FixedArray* value) {
+Handle<CompilationCacheTable> CompilationCacheTable::PutRegExp(
+ Handle<CompilationCacheTable> cache, Handle<String> src,
+ JSRegExp::Flags flags, Handle<FixedArray> value) {
RegExpKey key(src, flags);
- Object* obj;
- { MaybeObject* maybe_obj = EnsureCapacity(1, &key);
- if (!maybe_obj->ToObject(&obj)) return maybe_obj;
- }
-
- CompilationCacheTable* cache =
- reinterpret_cast<CompilationCacheTable*>(obj);
+ cache = EnsureCapacity(cache, 1, &key);
int entry = cache->FindInsertionEntry(key.Hash());
// We store the value in the key slot, and compare the search key
// to the stored value with a custon IsMatch function during lookups.
- cache->set(EntryToIndex(entry), value);
- cache->set(EntryToIndex(entry) + 1, value);
+ cache->set(EntryToIndex(entry), *value);
+ cache->set(EntryToIndex(entry) + 1, *value);
cache->ElementAdded();
return cache;
}
void CompilationCacheTable::Remove(Object* value) {
+ DisallowHeapAllocation no_allocation;
Object* the_hole_value = GetHeap()->the_hole_value();
for (int entry = 0, size = Capacity(); entry < size; entry++) {
int entry_index = EntryToIndex(entry);
@@ -15301,9 +15419,9 @@ void CompilationCacheTable::Remove(Object* value) {
// StringsKey used for HashTable where key is array of internalized strings.
class StringsKey : public HashTableKey {
public:
- explicit StringsKey(FixedArray* strings) : strings_(strings) { }
+ explicit StringsKey(Handle<FixedArray> strings) : strings_(strings) { }
- bool IsMatch(Object* strings) {
+ bool IsMatch(Object* strings) V8_OVERRIDE {
FixedArray* o = FixedArray::cast(strings);
int len = strings_->length();
if (o->length() != len) return false;
@@ -15313,9 +15431,9 @@ class StringsKey : public HashTableKey {
return true;
}
- uint32_t Hash() { return HashForObject(strings_); }
+ uint32_t Hash() V8_OVERRIDE { return HashForObject(*strings_); }
- uint32_t HashForObject(Object* obj) {
+ uint32_t HashForObject(Object* obj) V8_OVERRIDE {
FixedArray* strings = FixedArray::cast(obj);
int len = strings->length();
uint32_t hash = 0;
@@ -15325,96 +15443,79 @@ class StringsKey : public HashTableKey {
return hash;
}
- Object* AsObject(Heap* heap) { return strings_; }
+ Handle<Object> AsHandle(Isolate* isolate) V8_OVERRIDE { return strings_; }
private:
- FixedArray* strings_;
+ Handle<FixedArray> strings_;
};
Object* MapCache::Lookup(FixedArray* array) {
- StringsKey key(array);
+ DisallowHeapAllocation no_alloc;
+ StringsKey key(handle(array));
int entry = FindEntry(&key);
if (entry == kNotFound) return GetHeap()->undefined_value();
return get(EntryToIndex(entry) + 1);
}
-MaybeObject* MapCache::Put(FixedArray* array, Map* value) {
+Handle<MapCache> MapCache::Put(
+ Handle<MapCache> map_cache, Handle<FixedArray> array, Handle<Map> value) {
StringsKey key(array);
- Object* obj;
- { MaybeObject* maybe_obj = EnsureCapacity(1, &key);
- if (!maybe_obj->ToObject(&obj)) return maybe_obj;
- }
- MapCache* cache = reinterpret_cast<MapCache*>(obj);
- int entry = cache->FindInsertionEntry(key.Hash());
- cache->set(EntryToIndex(entry), array);
- cache->set(EntryToIndex(entry) + 1, value);
- cache->ElementAdded();
- return cache;
+ Handle<MapCache> new_cache = EnsureCapacity(map_cache, 1, &key);
+ int entry = new_cache->FindInsertionEntry(key.Hash());
+ new_cache->set(EntryToIndex(entry), *array);
+ new_cache->set(EntryToIndex(entry) + 1, *value);
+ new_cache->ElementAdded();
+ return new_cache;
}
-template<typename Shape, typename Key>
-MaybeObject* Dictionary<Shape, Key>::Allocate(Heap* heap,
- int at_least_space_for,
- PretenureFlag pretenure) {
- Object* obj;
- { MaybeObject* maybe_obj =
- HashTable<Shape, Key>::Allocate(
- heap,
- at_least_space_for,
- USE_DEFAULT_MINIMUM_CAPACITY,
- pretenure);
- if (!maybe_obj->ToObject(&obj)) return maybe_obj;
- }
+template<typename Derived, typename Shape, typename Key>
+Handle<Derived> Dictionary<Derived, Shape, Key>::New(
+ Isolate* isolate,
+ int at_least_space_for,
+ PretenureFlag pretenure) {
+ ASSERT(0 <= at_least_space_for);
+ Handle<Derived> dict = DerivedHashTable::New(isolate,
+ at_least_space_for,
+ USE_DEFAULT_MINIMUM_CAPACITY,
+ pretenure);
+
// Initialize the next enumeration index.
- Dictionary<Shape, Key>::cast(obj)->
- SetNextEnumerationIndex(PropertyDetails::kInitialIndex);
- return obj;
+ dict->SetNextEnumerationIndex(PropertyDetails::kInitialIndex);
+ return dict;
}
-void NameDictionary::DoGenerateNewEnumerationIndices(
- Handle<NameDictionary> dictionary) {
- CALL_HEAP_FUNCTION_VOID(dictionary->GetIsolate(),
- dictionary->GenerateNewEnumerationIndices());
-}
-
-template<typename Shape, typename Key>
-MaybeObject* Dictionary<Shape, Key>::GenerateNewEnumerationIndices() {
- Heap* heap = Dictionary<Shape, Key>::GetHeap();
- int length = HashTable<Shape, Key>::NumberOfElements();
+template<typename Derived, typename Shape, typename Key>
+void Dictionary<Derived, Shape, Key>::GenerateNewEnumerationIndices(
+ Handle<Derived> dictionary) {
+ Factory* factory = dictionary->GetIsolate()->factory();
+ int length = dictionary->NumberOfElements();
// Allocate and initialize iteration order array.
- Object* obj;
- { MaybeObject* maybe_obj = heap->AllocateFixedArray(length);
- if (!maybe_obj->ToObject(&obj)) return maybe_obj;
- }
- FixedArray* iteration_order = FixedArray::cast(obj);
+ Handle<FixedArray> iteration_order = factory->NewFixedArray(length);
for (int i = 0; i < length; i++) {
iteration_order->set(i, Smi::FromInt(i));
}
// Allocate array with enumeration order.
- { MaybeObject* maybe_obj = heap->AllocateFixedArray(length);
- if (!maybe_obj->ToObject(&obj)) return maybe_obj;
- }
- FixedArray* enumeration_order = FixedArray::cast(obj);
+ Handle<FixedArray> enumeration_order = factory->NewFixedArray(length);
// Fill the enumeration order array with property details.
- int capacity = HashTable<Shape, Key>::Capacity();
+ int capacity = dictionary->Capacity();
int pos = 0;
for (int i = 0; i < capacity; i++) {
- if (Dictionary<Shape, Key>::IsKey(Dictionary<Shape, Key>::KeyAt(i))) {
- int index = DetailsAt(i).dictionary_index();
+ if (dictionary->IsKey(dictionary->KeyAt(i))) {
+ int index = dictionary->DetailsAt(i).dictionary_index();
enumeration_order->set(pos++, Smi::FromInt(index));
}
}
// Sort the arrays wrt. enumeration order.
- iteration_order->SortPairs(enumeration_order, enumeration_order->length());
+ iteration_order->SortPairs(*enumeration_order, enumeration_order->length());
// Overwrite the enumeration_order with the enumeration indices.
for (int i = 0; i < length; i++) {
@@ -15424,135 +15525,125 @@ MaybeObject* Dictionary<Shape, Key>::GenerateNewEnumerationIndices() {
}
// Update the dictionary with new indices.
- capacity = HashTable<Shape, Key>::Capacity();
+ capacity = dictionary->Capacity();
pos = 0;
for (int i = 0; i < capacity; i++) {
- if (Dictionary<Shape, Key>::IsKey(Dictionary<Shape, Key>::KeyAt(i))) {
+ if (dictionary->IsKey(dictionary->KeyAt(i))) {
int enum_index = Smi::cast(enumeration_order->get(pos++))->value();
- PropertyDetails details = DetailsAt(i);
+ PropertyDetails details = dictionary->DetailsAt(i);
PropertyDetails new_details = PropertyDetails(
details.attributes(), details.type(), enum_index);
- DetailsAtPut(i, new_details);
+ dictionary->DetailsAtPut(i, new_details);
}
}
// Set the next enumeration index.
- SetNextEnumerationIndex(PropertyDetails::kInitialIndex+length);
- return this;
+ dictionary->SetNextEnumerationIndex(PropertyDetails::kInitialIndex+length);
}
-template<typename Shape, typename Key>
-MaybeObject* Dictionary<Shape, Key>::EnsureCapacity(int n, Key key) {
+
+template<typename Derived, typename Shape, typename Key>
+Handle<Derived> Dictionary<Derived, Shape, Key>::EnsureCapacity(
+ Handle<Derived> dictionary, int n, Key key) {
// Check whether there are enough enumeration indices to add n elements.
if (Shape::kIsEnumerable &&
- !PropertyDetails::IsValidIndex(NextEnumerationIndex() + n)) {
+ !PropertyDetails::IsValidIndex(dictionary->NextEnumerationIndex() + n)) {
// If not, we generate new indices for the properties.
- Object* result;
- { MaybeObject* maybe_result = GenerateNewEnumerationIndices();
- if (!maybe_result->ToObject(&result)) return maybe_result;
- }
+ GenerateNewEnumerationIndices(dictionary);
}
- return HashTable<Shape, Key>::EnsureCapacity(n, key);
+ return DerivedHashTable::EnsureCapacity(dictionary, n, key);
}
-template<typename Shape, typename Key>
-Object* Dictionary<Shape, Key>::DeleteProperty(int entry,
- JSReceiver::DeleteMode mode) {
- Heap* heap = Dictionary<Shape, Key>::GetHeap();
- PropertyDetails details = DetailsAt(entry);
+template<typename Derived, typename Shape, typename Key>
+Handle<Object> Dictionary<Derived, Shape, Key>::DeleteProperty(
+ Handle<Derived> dictionary,
+ int entry,
+ JSObject::DeleteMode mode) {
+ Factory* factory = dictionary->GetIsolate()->factory();
+ PropertyDetails details = dictionary->DetailsAt(entry);
// Ignore attributes if forcing a deletion.
if (details.IsDontDelete() && mode != JSReceiver::FORCE_DELETION) {
- return heap->false_value();
+ return factory->false_value();
}
- SetEntry(entry, heap->the_hole_value(), heap->the_hole_value());
- HashTable<Shape, Key>::ElementRemoved();
- return heap->true_value();
-}
-
-template<typename Shape, typename Key>
-MaybeObject* Dictionary<Shape, Key>::Shrink(Key key) {
- return HashTable<Shape, Key>::Shrink(key);
+ dictionary->SetEntry(
+ entry, factory->the_hole_value(), factory->the_hole_value());
+ dictionary->ElementRemoved();
+ return factory->true_value();
}
-template<typename Shape, typename Key>
-MaybeObject* Dictionary<Shape, Key>::AtPut(Key key, Object* value) {
- int entry = this->FindEntry(key);
+template<typename Derived, typename Shape, typename Key>
+Handle<Derived> Dictionary<Derived, Shape, Key>::AtPut(
+ Handle<Derived> dictionary, Key key, Handle<Object> value) {
+ int entry = dictionary->FindEntry(key);
// If the entry is present set the value;
- if (entry != Dictionary<Shape, Key>::kNotFound) {
- ValueAtPut(entry, value);
- return this;
+ if (entry != Dictionary::kNotFound) {
+ dictionary->ValueAtPut(entry, *value);
+ return dictionary;
}
// Check whether the dictionary should be extended.
- Object* obj;
- { MaybeObject* maybe_obj = EnsureCapacity(1, key);
- if (!maybe_obj->ToObject(&obj)) return maybe_obj;
- }
-
- Object* k;
- { MaybeObject* maybe_k = Shape::AsObject(this->GetHeap(), key);
- if (!maybe_k->ToObject(&k)) return maybe_k;
- }
+ dictionary = EnsureCapacity(dictionary, 1, key);
+#ifdef DEBUG
+ USE(Shape::AsHandle(dictionary->GetIsolate(), key));
+#endif
PropertyDetails details = PropertyDetails(NONE, NORMAL, 0);
- return Dictionary<Shape, Key>::cast(obj)->AddEntry(key, value, details,
- Dictionary<Shape, Key>::Hash(key));
+ AddEntry(dictionary, key, value, details, dictionary->Hash(key));
+ return dictionary;
}
-template<typename Shape, typename Key>
-MaybeObject* Dictionary<Shape, Key>::Add(Key key,
- Object* value,
- PropertyDetails details) {
+template<typename Derived, typename Shape, typename Key>
+Handle<Derived> Dictionary<Derived, Shape, Key>::Add(
+ Handle<Derived> dictionary,
+ Key key,
+ Handle<Object> value,
+ PropertyDetails details) {
// Valdate key is absent.
- SLOW_ASSERT((this->FindEntry(key) == Dictionary<Shape, Key>::kNotFound));
+ SLOW_ASSERT((dictionary->FindEntry(key) == Dictionary::kNotFound));
// Check whether the dictionary should be extended.
- Object* obj;
- { MaybeObject* maybe_obj = EnsureCapacity(1, key);
- if (!maybe_obj->ToObject(&obj)) return maybe_obj;
- }
+ dictionary = EnsureCapacity(dictionary, 1, key);
- return Dictionary<Shape, Key>::cast(obj)->AddEntry(key, value, details,
- Dictionary<Shape, Key>::Hash(key));
+ AddEntry(dictionary, key, value, details, dictionary->Hash(key));
+ return dictionary;
}
// Add a key, value pair to the dictionary.
-template<typename Shape, typename Key>
-MaybeObject* Dictionary<Shape, Key>::AddEntry(Key key,
- Object* value,
- PropertyDetails details,
- uint32_t hash) {
+template<typename Derived, typename Shape, typename Key>
+void Dictionary<Derived, Shape, Key>::AddEntry(
+ Handle<Derived> dictionary,
+ Key key,
+ Handle<Object> value,
+ PropertyDetails details,
+ uint32_t hash) {
// Compute the key object.
- Object* k;
- { MaybeObject* maybe_k = Shape::AsObject(this->GetHeap(), key);
- if (!maybe_k->ToObject(&k)) return maybe_k;
- }
+ Handle<Object> k = Shape::AsHandle(dictionary->GetIsolate(), key);
- uint32_t entry = Dictionary<Shape, Key>::FindInsertionEntry(hash);
+ uint32_t entry = dictionary->FindInsertionEntry(hash);
// Insert element at empty or deleted entry
if (!details.IsDeleted() &&
details.dictionary_index() == 0 &&
Shape::kIsEnumerable) {
// Assign an enumeration index to the property and update
// SetNextEnumerationIndex.
- int index = NextEnumerationIndex();
+ int index = dictionary->NextEnumerationIndex();
details = PropertyDetails(details.attributes(), details.type(), index);
- SetNextEnumerationIndex(index + 1);
+ dictionary->SetNextEnumerationIndex(index + 1);
}
- SetEntry(entry, k, value, details);
- ASSERT((Dictionary<Shape, Key>::KeyAt(entry)->IsNumber() ||
- Dictionary<Shape, Key>::KeyAt(entry)->IsName()));
- HashTable<Shape, Key>::ElementAdded();
- return this;
+ dictionary->SetEntry(entry, k, value, details);
+ ASSERT((dictionary->KeyAt(entry)->IsNumber() ||
+ dictionary->KeyAt(entry)->IsName()));
+ dictionary->ElementAdded();
}
void SeededNumberDictionary::UpdateMaxNumberKey(uint32_t key) {
+ DisallowHeapAllocation no_allocation;
// If the dictionary requires slow elements an element has already
// been added at a high index.
if (requires_slow_elements()) return;
@@ -15570,106 +15661,86 @@ void SeededNumberDictionary::UpdateMaxNumberKey(uint32_t key) {
}
}
+
Handle<SeededNumberDictionary> SeededNumberDictionary::AddNumberEntry(
Handle<SeededNumberDictionary> dictionary,
uint32_t key,
Handle<Object> value,
PropertyDetails details) {
- CALL_HEAP_FUNCTION(dictionary->GetIsolate(),
- dictionary->AddNumberEntry(key, *value, details),
- SeededNumberDictionary);
-}
-
-MaybeObject* SeededNumberDictionary::AddNumberEntry(uint32_t key,
- Object* value,
- PropertyDetails details) {
- UpdateMaxNumberKey(key);
- SLOW_ASSERT(this->FindEntry(key) == kNotFound);
- return Add(key, value, details);
+ dictionary->UpdateMaxNumberKey(key);
+ SLOW_ASSERT(dictionary->FindEntry(key) == kNotFound);
+ return Add(dictionary, key, value, details);
}
-MaybeObject* UnseededNumberDictionary::AddNumberEntry(uint32_t key,
- Object* value) {
- SLOW_ASSERT(this->FindEntry(key) == kNotFound);
- return Add(key, value, PropertyDetails(NONE, NORMAL, 0));
+Handle<UnseededNumberDictionary> UnseededNumberDictionary::AddNumberEntry(
+ Handle<UnseededNumberDictionary> dictionary,
+ uint32_t key,
+ Handle<Object> value) {
+ SLOW_ASSERT(dictionary->FindEntry(key) == kNotFound);
+ return Add(dictionary, key, value, PropertyDetails(NONE, NORMAL, 0));
}
-MaybeObject* SeededNumberDictionary::AtNumberPut(uint32_t key, Object* value) {
- UpdateMaxNumberKey(key);
- return AtPut(key, value);
+Handle<SeededNumberDictionary> SeededNumberDictionary::AtNumberPut(
+ Handle<SeededNumberDictionary> dictionary,
+ uint32_t key,
+ Handle<Object> value) {
+ dictionary->UpdateMaxNumberKey(key);
+ return AtPut(dictionary, key, value);
}
-MaybeObject* UnseededNumberDictionary::AtNumberPut(uint32_t key,
- Object* value) {
- return AtPut(key, value);
+Handle<UnseededNumberDictionary> UnseededNumberDictionary::AtNumberPut(
+ Handle<UnseededNumberDictionary> dictionary,
+ uint32_t key,
+ Handle<Object> value) {
+ return AtPut(dictionary, key, value);
}
Handle<SeededNumberDictionary> SeededNumberDictionary::Set(
Handle<SeededNumberDictionary> dictionary,
- uint32_t index,
+ uint32_t key,
Handle<Object> value,
PropertyDetails details) {
- CALL_HEAP_FUNCTION(dictionary->GetIsolate(),
- dictionary->Set(index, *value, details),
- SeededNumberDictionary);
-}
-
-
-Handle<UnseededNumberDictionary> UnseededNumberDictionary::Set(
- Handle<UnseededNumberDictionary> dictionary,
- uint32_t index,
- Handle<Object> value) {
- CALL_HEAP_FUNCTION(dictionary->GetIsolate(),
- dictionary->Set(index, *value),
- UnseededNumberDictionary);
-}
-
-
-MaybeObject* SeededNumberDictionary::Set(uint32_t key,
- Object* value,
- PropertyDetails details) {
- int entry = FindEntry(key);
- if (entry == kNotFound) return AddNumberEntry(key, value, details);
+ int entry = dictionary->FindEntry(key);
+ if (entry == kNotFound) {
+ return AddNumberEntry(dictionary, key, value, details);
+ }
// Preserve enumeration index.
details = PropertyDetails(details.attributes(),
details.type(),
- DetailsAt(entry).dictionary_index());
- MaybeObject* maybe_object_key =
- SeededNumberDictionaryShape::AsObject(GetHeap(), key);
- Object* object_key;
- if (!maybe_object_key->ToObject(&object_key)) return maybe_object_key;
- SetEntry(entry, object_key, value, details);
- return this;
+ dictionary->DetailsAt(entry).dictionary_index());
+ Handle<Object> object_key =
+ SeededNumberDictionaryShape::AsHandle(dictionary->GetIsolate(), key);
+ dictionary->SetEntry(entry, object_key, value, details);
+ return dictionary;
}
-MaybeObject* UnseededNumberDictionary::Set(uint32_t key,
- Object* value) {
- int entry = FindEntry(key);
- if (entry == kNotFound) return AddNumberEntry(key, value);
- MaybeObject* maybe_object_key =
- UnseededNumberDictionaryShape::AsObject(GetHeap(), key);
- Object* object_key;
- if (!maybe_object_key->ToObject(&object_key)) return maybe_object_key;
- SetEntry(entry, object_key, value);
- return this;
+Handle<UnseededNumberDictionary> UnseededNumberDictionary::Set(
+ Handle<UnseededNumberDictionary> dictionary,
+ uint32_t key,
+ Handle<Object> value) {
+ int entry = dictionary->FindEntry(key);
+ if (entry == kNotFound) return AddNumberEntry(dictionary, key, value);
+ Handle<Object> object_key =
+ UnseededNumberDictionaryShape::AsHandle(dictionary->GetIsolate(), key);
+ dictionary->SetEntry(entry, object_key, value);
+ return dictionary;
}
-template<typename Shape, typename Key>
-int Dictionary<Shape, Key>::NumberOfElementsFilterAttributes(
+template<typename Derived, typename Shape, typename Key>
+int Dictionary<Derived, Shape, Key>::NumberOfElementsFilterAttributes(
PropertyAttributes filter) {
- int capacity = HashTable<Shape, Key>::Capacity();
+ int capacity = DerivedHashTable::Capacity();
int result = 0;
for (int i = 0; i < capacity; i++) {
- Object* k = HashTable<Shape, Key>::KeyAt(i);
- if (HashTable<Shape, Key>::IsKey(k) &&
- ((filter & SYMBOLIC) == 0 || !k->IsSymbol())) {
+ Object* k = DerivedHashTable::KeyAt(i);
+ if (DerivedHashTable::IsKey(k) && !FilterKey(k, filter)) {
PropertyDetails details = DetailsAt(i);
if (details.IsDeleted()) continue;
PropertyAttributes attr = details.attributes();
@@ -15680,98 +15751,91 @@ int Dictionary<Shape, Key>::NumberOfElementsFilterAttributes(
}
-template<typename Shape, typename Key>
-int Dictionary<Shape, Key>::NumberOfEnumElements() {
+template<typename Derived, typename Shape, typename Key>
+int Dictionary<Derived, Shape, Key>::NumberOfEnumElements() {
return NumberOfElementsFilterAttributes(
- static_cast<PropertyAttributes>(DONT_ENUM));
+ static_cast<PropertyAttributes>(DONT_ENUM | SYMBOLIC));
}
-template<typename Shape, typename Key>
-void Dictionary<Shape, Key>::CopyKeysTo(
+template<typename Derived, typename Shape, typename Key>
+void Dictionary<Derived, Shape, Key>::CopyKeysTo(
FixedArray* storage,
PropertyAttributes filter,
- typename Dictionary<Shape, Key>::SortMode sort_mode) {
- ASSERT(storage->length() >= NumberOfEnumElements());
- int capacity = HashTable<Shape, Key>::Capacity();
+ typename Dictionary<Derived, Shape, Key>::SortMode sort_mode) {
+ ASSERT(storage->length() >= NumberOfElementsFilterAttributes(filter));
+ int capacity = DerivedHashTable::Capacity();
int index = 0;
for (int i = 0; i < capacity; i++) {
- Object* k = HashTable<Shape, Key>::KeyAt(i);
- if (HashTable<Shape, Key>::IsKey(k)) {
+ Object* k = DerivedHashTable::KeyAt(i);
+ if (DerivedHashTable::IsKey(k) && !FilterKey(k, filter)) {
PropertyDetails details = DetailsAt(i);
if (details.IsDeleted()) continue;
PropertyAttributes attr = details.attributes();
if ((attr & filter) == 0) storage->set(index++, k);
}
}
- if (sort_mode == Dictionary<Shape, Key>::SORTED) {
+ if (sort_mode == Dictionary::SORTED) {
storage->SortPairs(storage, index);
}
ASSERT(storage->length() >= index);
}
-FixedArray* NameDictionary::CopyEnumKeysTo(FixedArray* storage) {
+struct EnumIndexComparator {
+ explicit EnumIndexComparator(NameDictionary* dict) : dict(dict) { }
+ bool operator() (Smi* a, Smi* b) {
+ PropertyDetails da(dict->DetailsAt(a->value()));
+ PropertyDetails db(dict->DetailsAt(b->value()));
+ return da.dictionary_index() < db.dictionary_index();
+ }
+ NameDictionary* dict;
+};
+
+
+void NameDictionary::CopyEnumKeysTo(FixedArray* storage) {
int length = storage->length();
- ASSERT(length >= NumberOfEnumElements());
- Heap* heap = GetHeap();
- Object* undefined_value = heap->undefined_value();
int capacity = Capacity();
int properties = 0;
-
- // Fill in the enumeration array by assigning enumerable keys at their
- // enumeration index. This will leave holes in the array if there are keys
- // that are deleted or not enumerable.
for (int i = 0; i < capacity; i++) {
Object* k = KeyAt(i);
if (IsKey(k) && !k->IsSymbol()) {
PropertyDetails details = DetailsAt(i);
if (details.IsDeleted() || details.IsDontEnum()) continue;
+ storage->set(properties, Smi::FromInt(i));
properties++;
- storage->set(details.dictionary_index() - 1, k);
if (properties == length) break;
}
}
-
- // There are holes in the enumeration array if less properties were assigned
- // than the length of the array. If so, crunch all the existing properties
- // together by shifting them to the left (maintaining the enumeration order),
- // and trimming of the right side of the array.
- if (properties < length) {
- if (properties == 0) return heap->empty_fixed_array();
- properties = 0;
- for (int i = 0; i < length; ++i) {
- Object* value = storage->get(i);
- if (value != undefined_value) {
- storage->set(properties, value);
- ++properties;
- }
- }
- RightTrimFixedArray<FROM_MUTATOR>(heap, storage, length - properties);
+ CHECK_EQ(length, properties);
+ EnumIndexComparator cmp(this);
+ Smi** start = reinterpret_cast<Smi**>(storage->GetFirstElementAddress());
+ std::sort(start, start + length, cmp);
+ for (int i = 0; i < length; i++) {
+ int index = Smi::cast(storage->get(i))->value();
+ storage->set(i, KeyAt(index));
}
- return storage;
}
-template<typename Shape, typename Key>
-void Dictionary<Shape, Key>::CopyKeysTo(
+template<typename Derived, typename Shape, typename Key>
+void Dictionary<Derived, Shape, Key>::CopyKeysTo(
FixedArray* storage,
int index,
PropertyAttributes filter,
- typename Dictionary<Shape, Key>::SortMode sort_mode) {
- ASSERT(storage->length() >= NumberOfElementsFilterAttributes(
- static_cast<PropertyAttributes>(NONE)));
- int capacity = HashTable<Shape, Key>::Capacity();
+ typename Dictionary<Derived, Shape, Key>::SortMode sort_mode) {
+ ASSERT(storage->length() >= NumberOfElementsFilterAttributes(filter));
+ int capacity = DerivedHashTable::Capacity();
for (int i = 0; i < capacity; i++) {
- Object* k = HashTable<Shape, Key>::KeyAt(i);
- if (HashTable<Shape, Key>::IsKey(k)) {
+ Object* k = DerivedHashTable::KeyAt(i);
+ if (DerivedHashTable::IsKey(k) && !FilterKey(k, filter)) {
PropertyDetails details = DetailsAt(i);
if (details.IsDeleted()) continue;
PropertyAttributes attr = details.attributes();
if ((attr & filter) == 0) storage->set(index++, k);
}
}
- if (sort_mode == Dictionary<Shape, Key>::SORTED) {
+ if (sort_mode == Dictionary::SORTED) {
storage->SortPairs(storage, index);
}
ASSERT(storage->length() >= index);
@@ -15779,12 +15843,12 @@ void Dictionary<Shape, Key>::CopyKeysTo(
// Backwards lookup (slow).
-template<typename Shape, typename Key>
-Object* Dictionary<Shape, Key>::SlowReverseLookup(Object* value) {
- int capacity = HashTable<Shape, Key>::Capacity();
+template<typename Derived, typename Shape, typename Key>
+Object* Dictionary<Derived, Shape, Key>::SlowReverseLookup(Object* value) {
+ int capacity = DerivedHashTable::Capacity();
for (int i = 0; i < capacity; i++) {
- Object* k = HashTable<Shape, Key>::KeyAt(i);
- if (Dictionary<Shape, Key>::IsKey(k)) {
+ Object* k = DerivedHashTable::KeyAt(i);
+ if (Dictionary::IsKey(k)) {
Object* e = ValueAt(i);
if (e->IsPropertyCell()) {
e = PropertyCell::cast(e)->value();
@@ -15792,344 +15856,562 @@ Object* Dictionary<Shape, Key>::SlowReverseLookup(Object* value) {
if (e == value) return k;
}
}
- Heap* heap = Dictionary<Shape, Key>::GetHeap();
+ Heap* heap = Dictionary::GetHeap();
return heap->undefined_value();
}
-MaybeObject* NameDictionary::TransformPropertiesToFastFor(
- JSObject* obj, int unused_property_fields) {
- // Make sure we preserve dictionary representation if there are too many
- // descriptors.
- int number_of_elements = NumberOfElements();
- if (number_of_elements > kMaxNumberOfDescriptors) return obj;
+Object* ObjectHashTable::Lookup(Handle<Object> key) {
+ DisallowHeapAllocation no_gc;
+ ASSERT(IsKey(*key));
- if (number_of_elements != NextEnumerationIndex()) {
- MaybeObject* maybe_result = GenerateNewEnumerationIndices();
- if (maybe_result->IsFailure()) return maybe_result;
+ // If the object does not have an identity hash, it was never used as a key.
+ Object* hash = key->GetHash();
+ if (hash->IsUndefined()) {
+ return GetHeap()->the_hole_value();
}
+ int entry = FindEntry(key);
+ if (entry == kNotFound) return GetHeap()->the_hole_value();
+ return get(EntryToIndex(entry) + 1);
+}
- int instance_descriptor_length = 0;
- int number_of_fields = 0;
- Heap* heap = GetHeap();
+Handle<ObjectHashTable> ObjectHashTable::Put(Handle<ObjectHashTable> table,
+ Handle<Object> key,
+ Handle<Object> value) {
+ ASSERT(table->IsKey(*key));
+ ASSERT(!value->IsTheHole());
- // Compute the length of the instance descriptor.
- int capacity = Capacity();
- for (int i = 0; i < capacity; i++) {
- Object* k = KeyAt(i);
- if (IsKey(k)) {
- Object* value = ValueAt(i);
- PropertyType type = DetailsAt(i).type();
- ASSERT(type != FIELD);
- instance_descriptor_length++;
- if (type == NORMAL && !value->IsJSFunction()) {
- number_of_fields += 1;
- }
- }
- }
+ Isolate* isolate = table->GetIsolate();
- int inobject_props = obj->map()->inobject_properties();
+ // Make sure the key object has an identity hash code.
+ Handle<Smi> hash = Object::GetOrCreateHash(isolate, key);
- // Allocate new map.
- Map* new_map;
- MaybeObject* maybe_new_map = obj->map()->CopyDropDescriptors();
- if (!maybe_new_map->To(&new_map)) return maybe_new_map;
- new_map->set_dictionary_map(false);
+ int entry = table->FindEntry(key);
- if (instance_descriptor_length == 0) {
- ASSERT_LE(unused_property_fields, inobject_props);
- // Transform the object.
- new_map->set_unused_property_fields(inobject_props);
- obj->set_map(new_map);
- obj->set_properties(heap->empty_fixed_array());
- // Check that it really works.
- ASSERT(obj->HasFastProperties());
- return obj;
+ // Key is already in table, just overwrite value.
+ if (entry != kNotFound) {
+ table->set(EntryToIndex(entry) + 1, *value);
+ return table;
}
- // Allocate the instance descriptor.
- DescriptorArray* descriptors;
- MaybeObject* maybe_descriptors =
- DescriptorArray::Allocate(GetIsolate(), instance_descriptor_length);
- if (!maybe_descriptors->To(&descriptors)) {
- return maybe_descriptors;
- }
+ // Check whether the hash table should be extended.
+ table = EnsureCapacity(table, 1, key);
+ table->AddEntry(table->FindInsertionEntry(hash->value()),
+ *key,
+ *value);
+ return table;
+}
- DescriptorArray::WhitenessWitness witness(descriptors);
- int number_of_allocated_fields =
- number_of_fields + unused_property_fields - inobject_props;
- if (number_of_allocated_fields < 0) {
- // There is enough inobject space for all fields (including unused).
- number_of_allocated_fields = 0;
- unused_property_fields = inobject_props - number_of_fields;
+Handle<ObjectHashTable> ObjectHashTable::Remove(Handle<ObjectHashTable> table,
+ Handle<Object> key,
+ bool* was_present) {
+ ASSERT(table->IsKey(*key));
+
+ Object* hash = key->GetHash();
+ if (hash->IsUndefined()) {
+ *was_present = false;
+ return table;
}
- // Allocate the fixed array for the fields.
- FixedArray* fields;
- MaybeObject* maybe_fields =
- heap->AllocateFixedArray(number_of_allocated_fields);
- if (!maybe_fields->To(&fields)) return maybe_fields;
+ int entry = table->FindEntry(key);
+ if (entry == kNotFound) {
+ *was_present = false;
+ return table;
+ }
- // Fill in the instance descriptor and the fields.
- int current_offset = 0;
- for (int i = 0; i < capacity; i++) {
- Object* k = KeyAt(i);
- if (IsKey(k)) {
- Object* value = ValueAt(i);
- Name* key;
- if (k->IsSymbol()) {
- key = Symbol::cast(k);
- } else {
- // Ensure the key is a unique name before writing into the
- // instance descriptor.
- MaybeObject* maybe_key = heap->InternalizeString(String::cast(k));
- if (!maybe_key->To(&key)) return maybe_key;
- }
+ *was_present = true;
+ table->RemoveEntry(entry);
+ return Shrink(table, key);
+}
- PropertyDetails details = DetailsAt(i);
- int enumeration_index = details.dictionary_index();
- PropertyType type = details.type();
- if (value->IsJSFunction()) {
- ConstantDescriptor d(key, value, details.attributes());
- descriptors->Set(enumeration_index - 1, &d, witness);
- } else if (type == NORMAL) {
- if (current_offset < inobject_props) {
- obj->InObjectPropertyAtPut(current_offset,
- value,
- UPDATE_WRITE_BARRIER);
- } else {
- int offset = current_offset - inobject_props;
- fields->set(offset, value);
- }
- FieldDescriptor d(key,
- current_offset++,
- details.attributes(),
- // TODO(verwaest): value->OptimalRepresentation();
- Representation::Tagged());
- descriptors->Set(enumeration_index - 1, &d, witness);
- } else if (type == CALLBACKS) {
- CallbacksDescriptor d(key,
- value,
- details.attributes());
- descriptors->Set(enumeration_index - 1, &d, witness);
- } else {
- UNREACHABLE();
- }
- }
- }
- ASSERT(current_offset == number_of_fields);
+void ObjectHashTable::AddEntry(int entry, Object* key, Object* value) {
+ set(EntryToIndex(entry), key);
+ set(EntryToIndex(entry) + 1, value);
+ ElementAdded();
+}
- descriptors->Sort();
- new_map->InitializeDescriptors(descriptors);
- new_map->set_unused_property_fields(unused_property_fields);
+void ObjectHashTable::RemoveEntry(int entry) {
+ set_the_hole(EntryToIndex(entry));
+ set_the_hole(EntryToIndex(entry) + 1);
+ ElementRemoved();
+}
- // Transform the object.
- obj->set_map(new_map);
- obj->set_properties(fields);
- ASSERT(obj->IsJSObject());
+Object* WeakHashTable::Lookup(Handle<Object> key) {
+ DisallowHeapAllocation no_gc;
+ ASSERT(IsKey(*key));
+ int entry = FindEntry(key);
+ if (entry == kNotFound) return GetHeap()->the_hole_value();
+ return get(EntryToValueIndex(entry));
+}
- // Check that it really works.
- ASSERT(obj->HasFastProperties());
- return obj;
-}
+Handle<WeakHashTable> WeakHashTable::Put(Handle<WeakHashTable> table,
+ Handle<Object> key,
+ Handle<Object> value) {
+ ASSERT(table->IsKey(*key));
+ int entry = table->FindEntry(key);
+ // Key is already in table, just overwrite value.
+ if (entry != kNotFound) {
+ // TODO(ulan): Skipping write barrier is a temporary solution to avoid
+ // memory leaks. Remove this once we have special visitor for weak fixed
+ // arrays.
+ table->set(EntryToValueIndex(entry), *value, SKIP_WRITE_BARRIER);
+ return table;
+ }
+ // Check whether the hash table should be extended.
+ table = EnsureCapacity(table, 1, key, TENURED);
-Handle<ObjectHashSet> ObjectHashSet::EnsureCapacity(
- Handle<ObjectHashSet> table,
- int n,
- Handle<Object> key,
- PretenureFlag pretenure) {
- Handle<HashTable<ObjectHashTableShape<1>, Object*> > table_base = table;
- CALL_HEAP_FUNCTION(table_base->GetIsolate(),
- table_base->EnsureCapacity(n, *key, pretenure),
- ObjectHashSet);
+ table->AddEntry(table->FindInsertionEntry(table->Hash(key)), key, value);
+ return table;
}
-Handle<ObjectHashSet> ObjectHashSet::Shrink(Handle<ObjectHashSet> table,
- Handle<Object> key) {
- Handle<HashTable<ObjectHashTableShape<1>, Object*> > table_base = table;
- CALL_HEAP_FUNCTION(table_base->GetIsolate(),
- table_base->Shrink(*key),
- ObjectHashSet);
+void WeakHashTable::AddEntry(int entry,
+ Handle<Object> key,
+ Handle<Object> value) {
+ DisallowHeapAllocation no_allocation;
+ // TODO(ulan): Skipping write barrier is a temporary solution to avoid
+ // memory leaks. Remove this once we have special visitor for weak fixed
+ // arrays.
+ set(EntryToIndex(entry), *key, SKIP_WRITE_BARRIER);
+ set(EntryToValueIndex(entry), *value, SKIP_WRITE_BARRIER);
+ ElementAdded();
}
-bool ObjectHashSet::Contains(Object* key) {
- ASSERT(IsKey(key));
+template<class Derived, class Iterator, int entrysize>
+Handle<Derived> OrderedHashTable<Derived, Iterator, entrysize>::Allocate(
+ Isolate* isolate, int capacity, PretenureFlag pretenure) {
+ // Capacity must be a power of two, since we depend on being able
+ // to divide and multiple by 2 (kLoadFactor) to derive capacity
+ // from number of buckets. If we decide to change kLoadFactor
+ // to something other than 2, capacity should be stored as another
+ // field of this object.
+ capacity = RoundUpToPowerOf2(Max(kMinCapacity, capacity));
+ if (capacity > kMaxCapacity) {
+ v8::internal::Heap::FatalProcessOutOfMemory("invalid table size", true);
+ }
+ int num_buckets = capacity / kLoadFactor;
+ Handle<FixedArray> backing_store = isolate->factory()->NewFixedArray(
+ kHashTableStartIndex + num_buckets + (capacity * kEntrySize), pretenure);
+ backing_store->set_map_no_write_barrier(
+ isolate->heap()->ordered_hash_table_map());
+ Handle<Derived> table = Handle<Derived>::cast(backing_store);
+ for (int i = 0; i < num_buckets; ++i) {
+ table->set(kHashTableStartIndex + i, Smi::FromInt(kNotFound));
+ }
+ table->SetNumberOfBuckets(num_buckets);
+ table->SetNumberOfElements(0);
+ table->SetNumberOfDeletedElements(0);
+ return table;
+}
- // If the object does not have an identity hash, it was never used as a key.
- Object* hash = key->GetHash();
- if (hash->IsUndefined()) return false;
- return (FindEntry(key) != kNotFound);
+template<class Derived, class Iterator, int entrysize>
+Handle<Derived> OrderedHashTable<Derived, Iterator, entrysize>::EnsureGrowable(
+ Handle<Derived> table) {
+ ASSERT(!table->IsObsolete());
+
+ int nof = table->NumberOfElements();
+ int nod = table->NumberOfDeletedElements();
+ int capacity = table->Capacity();
+ if ((nof + nod) < capacity) return table;
+ // Don't need to grow if we can simply clear out deleted entries instead.
+ // Note that we can't compact in place, though, so we always allocate
+ // a new table.
+ return Rehash(table, (nod < (capacity >> 1)) ? capacity << 1 : capacity);
}
-Handle<ObjectHashSet> ObjectHashSet::Add(Handle<ObjectHashSet> table,
- Handle<Object> key) {
- ASSERT(table->IsKey(*key));
+template<class Derived, class Iterator, int entrysize>
+Handle<Derived> OrderedHashTable<Derived, Iterator, entrysize>::Shrink(
+ Handle<Derived> table) {
+ ASSERT(!table->IsObsolete());
- // Make sure the key object has an identity hash code.
- Handle<Object> object_hash = Object::GetOrCreateHash(key,
- table->GetIsolate());
+ int nof = table->NumberOfElements();
+ int capacity = table->Capacity();
+ if (nof >= (capacity >> 2)) return table;
+ return Rehash(table, capacity / 2);
+}
- int entry = table->FindEntry(*key);
- // Check whether key is already present.
- if (entry != kNotFound) return table;
+template<class Derived, class Iterator, int entrysize>
+Handle<Derived> OrderedHashTable<Derived, Iterator, entrysize>::Clear(
+ Handle<Derived> table) {
+ ASSERT(!table->IsObsolete());
+
+ Handle<Derived> new_table =
+ Allocate(table->GetIsolate(),
+ kMinCapacity,
+ table->GetHeap()->InNewSpace(*table) ? NOT_TENURED : TENURED);
+
+ table->SetNextTable(*new_table);
+ table->SetNumberOfDeletedElements(-1);
- // Check whether the hash set should be extended and add entry.
- Handle<ObjectHashSet> new_table =
- ObjectHashSet::EnsureCapacity(table, 1, key);
- entry = new_table->FindInsertionEntry(Smi::cast(*object_hash)->value());
- new_table->set(EntryToIndex(entry), *key);
- new_table->ElementAdded();
return new_table;
}
-Handle<ObjectHashSet> ObjectHashSet::Remove(Handle<ObjectHashSet> table,
- Handle<Object> key) {
- ASSERT(table->IsKey(*key));
+template<class Derived, class Iterator, int entrysize>
+Handle<Derived> OrderedHashTable<Derived, Iterator, entrysize>::Remove(
+ Handle<Derived> table, Handle<Object> key, bool* was_present) {
+ int entry = table->FindEntry(key);
+ if (entry == kNotFound) {
+ *was_present = false;
+ return table;
+ }
+ *was_present = true;
+ table->RemoveEntry(entry);
+ return Shrink(table);
+}
- // If the object does not have an identity hash, it was never used as a key.
- if (key->GetHash()->IsUndefined()) return table;
- int entry = table->FindEntry(*key);
+template<class Derived, class Iterator, int entrysize>
+Handle<Derived> OrderedHashTable<Derived, Iterator, entrysize>::Rehash(
+ Handle<Derived> table, int new_capacity) {
+ ASSERT(!table->IsObsolete());
- // Check whether key is actually present.
- if (entry == kNotFound) return table;
+ Handle<Derived> new_table =
+ Allocate(table->GetIsolate(),
+ new_capacity,
+ table->GetHeap()->InNewSpace(*table) ? NOT_TENURED : TENURED);
+ int nof = table->NumberOfElements();
+ int nod = table->NumberOfDeletedElements();
+ int new_buckets = new_table->NumberOfBuckets();
+ int new_entry = 0;
+ int removed_holes_index = 0;
- // Remove entry and try to shrink this hash set.
- table->set_the_hole(EntryToIndex(entry));
- table->ElementRemoved();
+ for (int old_entry = 0; old_entry < (nof + nod); ++old_entry) {
+ Object* key = table->KeyAt(old_entry);
+ if (key->IsTheHole()) {
+ table->SetRemovedIndexAt(removed_holes_index++, old_entry);
+ continue;
+ }
+
+ Object* hash = key->GetHash();
+ int bucket = Smi::cast(hash)->value() & (new_buckets - 1);
+ Object* chain_entry = new_table->get(kHashTableStartIndex + bucket);
+ new_table->set(kHashTableStartIndex + bucket, Smi::FromInt(new_entry));
+ int new_index = new_table->EntryToIndex(new_entry);
+ int old_index = table->EntryToIndex(old_entry);
+ for (int i = 0; i < entrysize; ++i) {
+ Object* value = table->get(old_index + i);
+ new_table->set(new_index + i, value);
+ }
+ new_table->set(new_index + kChainOffset, chain_entry);
+ ++new_entry;
+ }
+
+ ASSERT_EQ(nod, removed_holes_index);
- return ObjectHashSet::Shrink(table, key);
+ new_table->SetNumberOfElements(nof);
+ table->SetNextTable(*new_table);
+
+ return new_table;
}
-Handle<ObjectHashTable> ObjectHashTable::EnsureCapacity(
- Handle<ObjectHashTable> table,
- int n,
- Handle<Object> key,
- PretenureFlag pretenure) {
- Handle<HashTable<ObjectHashTableShape<2>, Object*> > table_base = table;
- CALL_HEAP_FUNCTION(table_base->GetIsolate(),
- table_base->EnsureCapacity(n, *key, pretenure),
- ObjectHashTable);
+template<class Derived, class Iterator, int entrysize>
+int OrderedHashTable<Derived, Iterator, entrysize>::FindEntry(
+ Handle<Object> key) {
+ ASSERT(!IsObsolete());
+
+ DisallowHeapAllocation no_gc;
+ ASSERT(!key->IsTheHole());
+ Object* hash = key->GetHash();
+ if (hash->IsUndefined()) return kNotFound;
+ for (int entry = HashToEntry(Smi::cast(hash)->value());
+ entry != kNotFound;
+ entry = ChainAt(entry)) {
+ Object* candidate = KeyAt(entry);
+ if (candidate->SameValueZero(*key))
+ return entry;
+ }
+ return kNotFound;
}
-Handle<ObjectHashTable> ObjectHashTable::Shrink(
- Handle<ObjectHashTable> table, Handle<Object> key) {
- Handle<HashTable<ObjectHashTableShape<2>, Object*> > table_base = table;
- CALL_HEAP_FUNCTION(table_base->GetIsolate(),
- table_base->Shrink(*key),
- ObjectHashTable);
+template<class Derived, class Iterator, int entrysize>
+int OrderedHashTable<Derived, Iterator, entrysize>::AddEntry(int hash) {
+ ASSERT(!IsObsolete());
+
+ int entry = UsedCapacity();
+ int bucket = HashToBucket(hash);
+ int index = EntryToIndex(entry);
+ Object* chain_entry = get(kHashTableStartIndex + bucket);
+ set(kHashTableStartIndex + bucket, Smi::FromInt(entry));
+ set(index + kChainOffset, chain_entry);
+ SetNumberOfElements(NumberOfElements() + 1);
+ return index;
}
-Object* ObjectHashTable::Lookup(Object* key) {
- ASSERT(IsKey(key));
+template<class Derived, class Iterator, int entrysize>
+void OrderedHashTable<Derived, Iterator, entrysize>::RemoveEntry(int entry) {
+ ASSERT(!IsObsolete());
- // If the object does not have an identity hash, it was never used as a key.
- Object* hash = key->GetHash();
- if (hash->IsUndefined()) {
- return GetHeap()->the_hole_value();
+ int index = EntryToIndex(entry);
+ for (int i = 0; i < entrysize; ++i) {
+ set_the_hole(index + i);
}
- int entry = FindEntry(key);
- if (entry == kNotFound) return GetHeap()->the_hole_value();
- return get(EntryToIndex(entry) + 1);
+ SetNumberOfElements(NumberOfElements() - 1);
+ SetNumberOfDeletedElements(NumberOfDeletedElements() + 1);
}
-Handle<ObjectHashTable> ObjectHashTable::Put(Handle<ObjectHashTable> table,
- Handle<Object> key,
- Handle<Object> value) {
- ASSERT(table->IsKey(*key));
+template Handle<OrderedHashSet>
+OrderedHashTable<OrderedHashSet, JSSetIterator, 1>::Allocate(
+ Isolate* isolate, int capacity, PretenureFlag pretenure);
- Isolate* isolate = table->GetIsolate();
+template Handle<OrderedHashSet>
+OrderedHashTable<OrderedHashSet, JSSetIterator, 1>::EnsureGrowable(
+ Handle<OrderedHashSet> table);
- // Make sure the key object has an identity hash code.
- Handle<Object> hash = Object::GetOrCreateHash(key, isolate);
+template Handle<OrderedHashSet>
+OrderedHashTable<OrderedHashSet, JSSetIterator, 1>::Shrink(
+ Handle<OrderedHashSet> table);
- int entry = table->FindEntry(*key);
+template Handle<OrderedHashSet>
+OrderedHashTable<OrderedHashSet, JSSetIterator, 1>::Clear(
+ Handle<OrderedHashSet> table);
- // Check whether to perform removal operation.
- if (value->IsTheHole()) {
- if (entry == kNotFound) return table;
- table->RemoveEntry(entry);
- return Shrink(table, key);
- }
+template Handle<OrderedHashSet>
+OrderedHashTable<OrderedHashSet, JSSetIterator, 1>::Remove(
+ Handle<OrderedHashSet> table, Handle<Object> key, bool* was_present);
- // Key is already in table, just overwrite value.
- if (entry != kNotFound) {
- table->set(EntryToIndex(entry) + 1, *value);
- return table;
- }
+template int
+OrderedHashTable<OrderedHashSet, JSSetIterator, 1>::FindEntry(
+ Handle<Object> key);
- // Check whether the hash table should be extended.
- table = EnsureCapacity(table, 1, key);
- table->AddEntry(table->FindInsertionEntry(Handle<Smi>::cast(hash)->value()),
- *key,
- *value);
- return table;
-}
+template int
+OrderedHashTable<OrderedHashSet, JSSetIterator, 1>::AddEntry(int hash);
+template void
+OrderedHashTable<OrderedHashSet, JSSetIterator, 1>::RemoveEntry(int entry);
-void ObjectHashTable::AddEntry(int entry, Object* key, Object* value) {
- set(EntryToIndex(entry), key);
- set(EntryToIndex(entry) + 1, value);
- ElementAdded();
+
+template Handle<OrderedHashMap>
+OrderedHashTable<OrderedHashMap, JSMapIterator, 2>::Allocate(
+ Isolate* isolate, int capacity, PretenureFlag pretenure);
+
+template Handle<OrderedHashMap>
+OrderedHashTable<OrderedHashMap, JSMapIterator, 2>::EnsureGrowable(
+ Handle<OrderedHashMap> table);
+
+template Handle<OrderedHashMap>
+OrderedHashTable<OrderedHashMap, JSMapIterator, 2>::Shrink(
+ Handle<OrderedHashMap> table);
+
+template Handle<OrderedHashMap>
+OrderedHashTable<OrderedHashMap, JSMapIterator, 2>::Clear(
+ Handle<OrderedHashMap> table);
+
+template Handle<OrderedHashMap>
+OrderedHashTable<OrderedHashMap, JSMapIterator, 2>::Remove(
+ Handle<OrderedHashMap> table, Handle<Object> key, bool* was_present);
+
+template int
+OrderedHashTable<OrderedHashMap, JSMapIterator, 2>::FindEntry(
+ Handle<Object> key);
+
+template int
+OrderedHashTable<OrderedHashMap, JSMapIterator, 2>::AddEntry(int hash);
+
+template void
+OrderedHashTable<OrderedHashMap, JSMapIterator, 2>::RemoveEntry(int entry);
+
+
+bool OrderedHashSet::Contains(Handle<Object> key) {
+ return FindEntry(key) != kNotFound;
}
-void ObjectHashTable::RemoveEntry(int entry) {
- set_the_hole(EntryToIndex(entry));
- set_the_hole(EntryToIndex(entry) + 1);
- ElementRemoved();
+Handle<OrderedHashSet> OrderedHashSet::Add(Handle<OrderedHashSet> table,
+ Handle<Object> key) {
+ if (table->FindEntry(key) != kNotFound) return table;
+
+ table = EnsureGrowable(table);
+
+ Handle<Smi> hash = GetOrCreateHash(table->GetIsolate(), key);
+ int index = table->AddEntry(hash->value());
+ table->set(index, *key);
+ return table;
}
-Object* WeakHashTable::Lookup(Object* key) {
- ASSERT(IsKey(key));
+Object* OrderedHashMap::Lookup(Handle<Object> key) {
+ DisallowHeapAllocation no_gc;
int entry = FindEntry(key);
if (entry == kNotFound) return GetHeap()->the_hole_value();
- return get(EntryToValueIndex(entry));
+ return ValueAt(entry);
}
-MaybeObject* WeakHashTable::Put(Object* key, Object* value) {
- ASSERT(IsKey(key));
- int entry = FindEntry(key);
- // Key is already in table, just overwrite value.
+Handle<OrderedHashMap> OrderedHashMap::Put(Handle<OrderedHashMap> table,
+ Handle<Object> key,
+ Handle<Object> value) {
+ ASSERT(!key->IsTheHole());
+
+ int entry = table->FindEntry(key);
+
if (entry != kNotFound) {
- set(EntryToValueIndex(entry), value);
- return this;
+ table->set(table->EntryToIndex(entry) + kValueOffset, *value);
+ return table;
}
- // Check whether the hash table should be extended.
- Object* obj;
- { MaybeObject* maybe_obj = EnsureCapacity(1, key, TENURED);
- if (!maybe_obj->ToObject(&obj)) return maybe_obj;
- }
- WeakHashTable* table = WeakHashTable::cast(obj);
- table->AddEntry(table->FindInsertionEntry(Hash(key)), key, value);
+ table = EnsureGrowable(table);
+
+ Handle<Smi> hash = GetOrCreateHash(table->GetIsolate(), key);
+ int index = table->AddEntry(hash->value());
+ table->set(index, *key);
+ table->set(index + kValueOffset, *value);
return table;
}
-void WeakHashTable::AddEntry(int entry, Object* key, Object* value) {
- set(EntryToIndex(entry), key);
- set(EntryToValueIndex(entry), value);
- ElementAdded();
+template<class Derived, class TableType>
+Handle<JSObject> OrderedHashTableIterator<Derived, TableType>::Next(
+ Handle<Derived> iterator) {
+ Isolate* isolate = iterator->GetIsolate();
+ Factory* factory = isolate->factory();
+
+ Handle<Object> maybe_table(iterator->table(), isolate);
+ if (!maybe_table->IsUndefined()) {
+ iterator->Transition();
+
+ Handle<TableType> table(TableType::cast(iterator->table()), isolate);
+ int index = Smi::cast(iterator->index())->value();
+ int used_capacity = table->UsedCapacity();
+
+ while (index < used_capacity && table->KeyAt(index)->IsTheHole()) {
+ index++;
+ }
+
+ if (index < used_capacity) {
+ int entry_index = table->EntryToIndex(index);
+ Handle<Object> value =
+ Derived::ValueForKind(iterator, entry_index);
+ iterator->set_index(Smi::FromInt(index + 1));
+ return factory->NewIteratorResultObject(value, false);
+ }
+
+ iterator->set_table(iterator->GetHeap()->undefined_value());
+ }
+
+ return factory->NewIteratorResultObject(factory->undefined_value(), true);
+}
+
+
+template<class Derived, class TableType>
+void OrderedHashTableIterator<Derived, TableType>::Transition() {
+ Isolate* isolate = GetIsolate();
+ Handle<TableType> table(TableType::cast(this->table()), isolate);
+ if (!table->IsObsolete()) return;
+
+ int index = Smi::cast(this->index())->value();
+ while (table->IsObsolete()) {
+ Handle<TableType> next_table(table->NextTable(), isolate);
+
+ if (index > 0) {
+ int nod = table->NumberOfDeletedElements();
+
+ // When we clear the table we set the number of deleted elements to -1.
+ if (nod == -1) {
+ index = 0;
+ } else {
+ int old_index = index;
+ for (int i = 0; i < nod; ++i) {
+ int removed_index = table->RemovedIndexAt(i);
+ if (removed_index >= old_index) break;
+ --index;
+ }
+ }
+ }
+
+ table = next_table;
+ }
+
+ set_table(*table);
+ set_index(Smi::FromInt(index));
+}
+
+
+template Handle<JSObject>
+OrderedHashTableIterator<JSSetIterator, OrderedHashSet>::Next(
+ Handle<JSSetIterator> iterator);
+
+template void
+OrderedHashTableIterator<JSSetIterator, OrderedHashSet>::Transition();
+
+
+template Handle<JSObject>
+OrderedHashTableIterator<JSMapIterator, OrderedHashMap>::Next(
+ Handle<JSMapIterator> iterator);
+
+template void
+OrderedHashTableIterator<JSMapIterator, OrderedHashMap>::Transition();
+
+
+Handle<Object> JSSetIterator::ValueForKind(
+ Handle<JSSetIterator> iterator, int entry_index) {
+ int kind = iterator->kind()->value();
+ // Set.prototype only has values and entries.
+ ASSERT(kind == kKindValues || kind == kKindEntries);
+
+ Isolate* isolate = iterator->GetIsolate();
+ Factory* factory = isolate->factory();
+
+ Handle<OrderedHashSet> table(
+ OrderedHashSet::cast(iterator->table()), isolate);
+ Handle<Object> value = Handle<Object>(table->get(entry_index), isolate);
+
+ if (kind == kKindEntries) {
+ Handle<FixedArray> array = factory->NewFixedArray(2);
+ array->set(0, *value);
+ array->set(1, *value);
+ return factory->NewJSArrayWithElements(array);
+ }
+
+ return value;
+}
+
+
+Handle<Object> JSMapIterator::ValueForKind(
+ Handle<JSMapIterator> iterator, int entry_index) {
+ int kind = iterator->kind()->value();
+ ASSERT(kind == kKindKeys || kind == kKindValues || kind == kKindEntries);
+
+ Isolate* isolate = iterator->GetIsolate();
+ Factory* factory = isolate->factory();
+
+ Handle<OrderedHashMap> table(
+ OrderedHashMap::cast(iterator->table()), isolate);
+
+ switch (kind) {
+ case kKindKeys:
+ return Handle<Object>(table->get(entry_index), isolate);
+
+ case kKindValues:
+ return Handle<Object>(table->get(entry_index + 1), isolate);
+
+ case kKindEntries: {
+ Handle<Object> key(table->get(entry_index), isolate);
+ Handle<Object> value(table->get(entry_index + 1), isolate);
+ Handle<FixedArray> array = factory->NewFixedArray(2);
+ array->set(0, *key);
+ array->set(1, *value);
+ return factory->NewJSArrayWithElements(array);
+ }
+ }
+
+ UNREACHABLE();
+ return factory->undefined_value();
}
@@ -16173,7 +16455,7 @@ Handle<DeclaredAccessorDescriptor> DeclaredAccessorDescriptor::Create(
if (previous_length != 0) {
uint8_t* previous_array =
previous->serialized_data()->GetDataStartAddress();
- OS::MemCopy(array, previous_array, previous_length);
+ MemCopy(array, previous_array, previous_length);
array += previous_length;
}
ASSERT(reinterpret_cast<uintptr_t>(array) % sizeof(uintptr_t) == 0);
@@ -16185,7 +16467,6 @@ Handle<DeclaredAccessorDescriptor> DeclaredAccessorDescriptor::Create(
}
-#ifdef ENABLE_DEBUGGER_SUPPORT
// Check if there is a break point at this code position.
bool DebugInfo::HasBreakPoint(int code_position) {
// Get the break point info object for this code position.
@@ -16253,7 +16534,7 @@ void DebugInfo::SetBreakPoint(Handle<DebugInfo> debug_info,
Handle<FixedArray> new_break_points =
isolate->factory()->NewFixedArray(
old_break_points->length() +
- Debug::kEstimatedNofBreakPointsInFunction);
+ DebugInfo::kEstimatedNofBreakPointsInFunction);
debug_info->set_break_points(*new_break_points);
for (int i = 0; i < old_break_points->length(); i++) {
@@ -16439,7 +16720,6 @@ int BreakPointInfo::GetBreakPointCount() {
// Multiple break points.
return FixedArray::cast(break_point_objects())->length();
}
-#endif // ENABLE_DEBUGGER_SUPPORT
Object* JSDate::GetField(Object* object, Smi* index) {
@@ -16459,7 +16739,7 @@ Object* JSDate::DoGetField(FieldIndex index) {
// Since the stamp is not NaN, the value is also not NaN.
int64_t local_time_ms =
date_cache->ToLocal(static_cast<int64_t>(value()->Number()));
- SetLocalFields(local_time_ms, date_cache);
+ SetCachedFields(local_time_ms, date_cache);
}
switch (index) {
case kYear: return year();
@@ -16552,7 +16832,7 @@ void JSDate::SetValue(Object* value, bool is_value_nan) {
}
-void JSDate::SetLocalFields(int64_t local_time_ms, DateCache* date_cache) {
+void JSDate::SetCachedFields(int64_t local_time_ms, DateCache* date_cache) {
int days = DateCache::DaysFromTime(local_time_ms);
int time_in_day_ms = DateCache::TimeInDay(local_time_ms, days);
int year, month, day;
@@ -16597,26 +16877,85 @@ void JSTypedArray::Neuter() {
}
-Type* PropertyCell::type() {
- return static_cast<Type*>(type_raw());
+static ElementsKind FixedToExternalElementsKind(ElementsKind elements_kind) {
+ switch (elements_kind) {
+#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype, size) \
+ case TYPE##_ELEMENTS: return EXTERNAL_##TYPE##_ELEMENTS;
+
+ TYPED_ARRAYS(TYPED_ARRAY_CASE)
+#undef TYPED_ARRAY_CASE
+
+ default:
+ UNREACHABLE();
+ return FIRST_EXTERNAL_ARRAY_ELEMENTS_KIND;
+ }
}
-void PropertyCell::set_type(Type* type, WriteBarrierMode ignored) {
+Handle<JSArrayBuffer> JSTypedArray::MaterializeArrayBuffer(
+ Handle<JSTypedArray> typed_array) {
+
+ Handle<Map> map(typed_array->map());
+ Isolate* isolate = typed_array->GetIsolate();
+
+ ASSERT(IsFixedTypedArrayElementsKind(map->elements_kind()));
+
+ Handle<Map> new_map = Map::TransitionElementsTo(
+ map,
+ FixedToExternalElementsKind(map->elements_kind()));
+
+ Handle<JSArrayBuffer> buffer = isolate->factory()->NewJSArrayBuffer();
+ Handle<FixedTypedArrayBase> fixed_typed_array(
+ FixedTypedArrayBase::cast(typed_array->elements()));
+ Runtime::SetupArrayBufferAllocatingData(isolate, buffer,
+ fixed_typed_array->DataSize(), false);
+ memcpy(buffer->backing_store(),
+ fixed_typed_array->DataPtr(),
+ fixed_typed_array->DataSize());
+ Handle<ExternalArray> new_elements =
+ isolate->factory()->NewExternalArray(
+ fixed_typed_array->length(), typed_array->type(),
+ static_cast<uint8_t*>(buffer->backing_store()));
+
+ buffer->set_weak_first_view(*typed_array);
+ ASSERT(typed_array->weak_next() == isolate->heap()->undefined_value());
+ typed_array->set_buffer(*buffer);
+ JSObject::SetMapAndElements(typed_array, new_map, new_elements);
+
+ return buffer;
+}
+
+
+Handle<JSArrayBuffer> JSTypedArray::GetBuffer() {
+ Handle<Object> result(buffer(), GetIsolate());
+ if (*result != Smi::FromInt(0)) {
+ ASSERT(IsExternalArrayElementsKind(map()->elements_kind()));
+ return Handle<JSArrayBuffer>::cast(result);
+ }
+ Handle<JSTypedArray> self(this);
+ return MaterializeArrayBuffer(self);
+}
+
+
+HeapType* PropertyCell::type() {
+ return static_cast<HeapType*>(type_raw());
+}
+
+
+void PropertyCell::set_type(HeapType* type, WriteBarrierMode ignored) {
ASSERT(IsPropertyCell());
set_type_raw(type, ignored);
}
-Handle<Type> PropertyCell::UpdatedType(Handle<PropertyCell> cell,
- Handle<Object> value) {
+Handle<HeapType> PropertyCell::UpdatedType(Handle<PropertyCell> cell,
+ Handle<Object> value) {
Isolate* isolate = cell->GetIsolate();
- Handle<Type> old_type(cell->type(), isolate);
+ Handle<HeapType> old_type(cell->type(), isolate);
// TODO(2803): Do not track ConsString as constant because they cannot be
// embedded into code.
- Handle<Type> new_type(value->IsConsString() || value->IsTheHole()
- ? Type::Any()
- : Type::Constant(value, isolate), isolate);
+ Handle<HeapType> new_type = value->IsConsString() || value->IsTheHole()
+ ? HeapType::Any(isolate) : HeapType::Constant(value, isolate);
if (new_type->Is(old_type)) {
return old_type;
@@ -16625,40 +16964,34 @@ Handle<Type> PropertyCell::UpdatedType(Handle<PropertyCell> cell,
cell->dependent_code()->DeoptimizeDependentCodeGroup(
isolate, DependentCode::kPropertyCellChangedGroup);
- if (old_type->Is(Type::None()) || old_type->Is(Type::Undefined())) {
+ if (old_type->Is(HeapType::None()) || old_type->Is(HeapType::Undefined())) {
return new_type;
}
- return handle(Type::Any(), isolate);
+ return HeapType::Any(isolate);
}
void PropertyCell::SetValueInferType(Handle<PropertyCell> cell,
Handle<Object> value) {
cell->set_value(*value);
- if (!Type::Any()->Is(cell->type())) {
- Handle<Type> new_type = UpdatedType(cell, value);
+ if (!HeapType::Any()->Is(cell->type())) {
+ Handle<HeapType> new_type = UpdatedType(cell, value);
cell->set_type(*new_type);
}
}
-void PropertyCell::AddDependentCompilationInfo(CompilationInfo* info) {
- Handle<DependentCode> dep(dependent_code());
+// static
+void PropertyCell::AddDependentCompilationInfo(Handle<PropertyCell> cell,
+ CompilationInfo* info) {
Handle<DependentCode> codes =
- DependentCode::Insert(dep, DependentCode::kPropertyCellChangedGroup,
+ DependentCode::Insert(handle(cell->dependent_code(), info->isolate()),
+ DependentCode::kPropertyCellChangedGroup,
info->object_wrapper());
- if (*codes != dependent_code()) set_dependent_code(*codes);
+ if (*codes != cell->dependent_code()) cell->set_dependent_code(*codes);
info->dependencies(DependentCode::kPropertyCellChangedGroup)->Add(
- Handle<HeapObject>(this), info->zone());
-}
-
-
-void PropertyCell::AddDependentCode(Handle<Code> code) {
- Handle<DependentCode> codes = DependentCode::Insert(
- Handle<DependentCode>(dependent_code()),
- DependentCode::kPropertyCellChangedGroup, code);
- if (*codes != dependent_code()) set_dependent_code(*codes);
+ cell, info->zone());
}
diff --git a/chromium/v8/src/objects.h b/chromium/v8/src/objects.h
index a7f01d18569..73566d885e3 100644
--- a/chromium/v8/src/objects.h
+++ b/chromium/v8/src/objects.h
@@ -1,160 +1,142 @@
// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
#ifndef V8_OBJECTS_H_
#define V8_OBJECTS_H_
-#include "allocation.h"
-#include "assert-scope.h"
-#include "builtins.h"
-#include "elements-kind.h"
-#include "flags.h"
-#include "list.h"
-#include "property-details.h"
-#include "smart-pointers.h"
-#include "unicode-inl.h"
-#if V8_TARGET_ARCH_ARM
-#include "arm/constants-arm.h"
+#include "src/allocation.h"
+#include "src/assert-scope.h"
+#include "src/builtins.h"
+#include "src/elements-kind.h"
+#include "src/field-index.h"
+#include "src/flags.h"
+#include "src/list.h"
+#include "src/property-details.h"
+#include "src/smart-pointers.h"
+#include "src/unicode-inl.h"
+#if V8_TARGET_ARCH_ARM64
+#include "src/arm64/constants-arm64.h"
+#elif V8_TARGET_ARCH_ARM
+#include "src/arm/constants-arm.h"
#elif V8_TARGET_ARCH_MIPS
-#include "mips/constants-mips.h"
+#include "src/mips/constants-mips.h"
#endif
-#include "v8checks.h"
-#include "zone.h"
+#include "src/v8checks.h"
+#include "src/zone.h"
//
// Most object types in the V8 JavaScript are described in this file.
//
// Inheritance hierarchy:
-// - MaybeObject (an object or a failure)
-// - Failure (immediate for marking failed operation)
-// - Object
-// - Smi (immediate small integer)
-// - HeapObject (superclass for everything allocated in the heap)
-// - JSReceiver (suitable for property access)
-// - JSObject
-// - JSArray
-// - JSArrayBuffer
-// - JSArrayBufferView
-// - JSTypedArray
-// - JSDataView
-// - JSSet
-// - JSMap
-// - JSWeakCollection
-// - JSWeakMap
-// - JSWeakSet
-// - JSRegExp
-// - JSFunction
-// - JSGeneratorObject
-// - JSModule
-// - GlobalObject
-// - JSGlobalObject
-// - JSBuiltinsObject
-// - JSGlobalProxy
-// - JSValue
-// - JSDate
-// - JSMessageObject
-// - JSProxy
-// - JSFunctionProxy
-// - FixedArrayBase
-// - ByteArray
-// - FixedArray
-// - DescriptorArray
-// - HashTable
-// - Dictionary
-// - StringTable
-// - CompilationCacheTable
-// - CodeCacheHashTable
-// - MapCache
-// - Context
-// - JSFunctionResultCache
-// - ScopeInfo
-// - TransitionArray
-// - FixedDoubleArray
-// - ExternalArray
-// - ExternalPixelArray
-// - ExternalByteArray
-// - ExternalUnsignedByteArray
-// - ExternalShortArray
-// - ExternalUnsignedShortArray
-// - ExternalIntArray
-// - ExternalUnsignedIntArray
-// - ExternalFloatArray
-// - Name
-// - String
-// - SeqString
-// - SeqOneByteString
-// - SeqTwoByteString
-// - SlicedString
-// - ConsString
-// - ExternalString
-// - ExternalAsciiString
-// - ExternalTwoByteString
-// - InternalizedString
-// - SeqInternalizedString
-// - SeqOneByteInternalizedString
-// - SeqTwoByteInternalizedString
-// - ConsInternalizedString
-// - ExternalInternalizedString
-// - ExternalAsciiInternalizedString
-// - ExternalTwoByteInternalizedString
-// - Symbol
-// - HeapNumber
-// - Cell
-// - PropertyCell
-// - Code
-// - Map
-// - Oddball
-// - Foreign
-// - SharedFunctionInfo
-// - Struct
-// - Box
-// - DeclaredAccessorDescriptor
-// - AccessorInfo
-// - DeclaredAccessorInfo
-// - ExecutableAccessorInfo
-// - AccessorPair
-// - AccessCheckInfo
-// - InterceptorInfo
-// - CallHandlerInfo
-// - TemplateInfo
-// - FunctionTemplateInfo
-// - ObjectTemplateInfo
-// - Script
-// - SignatureInfo
-// - TypeSwitchInfo
-// - DebugInfo
-// - BreakPointInfo
-// - CodeCache
+// - Object
+// - Smi (immediate small integer)
+// - HeapObject (superclass for everything allocated in the heap)
+// - JSReceiver (suitable for property access)
+// - JSObject
+// - JSArray
+// - JSArrayBuffer
+// - JSArrayBufferView
+// - JSTypedArray
+// - JSDataView
+// - JSSet
+// - JSMap
+// - JSSetIterator
+// - JSMapIterator
+// - JSWeakCollection
+// - JSWeakMap
+// - JSWeakSet
+// - JSRegExp
+// - JSFunction
+// - JSGeneratorObject
+// - JSModule
+// - GlobalObject
+// - JSGlobalObject
+// - JSBuiltinsObject
+// - JSGlobalProxy
+// - JSValue
+// - JSDate
+// - JSMessageObject
+// - JSProxy
+// - JSFunctionProxy
+// - FixedArrayBase
+// - ByteArray
+// - FixedArray
+// - DescriptorArray
+// - HashTable
+// - Dictionary
+// - StringTable
+// - CompilationCacheTable
+// - CodeCacheHashTable
+// - MapCache
+// - OrderedHashTable
+// - OrderedHashSet
+// - OrderedHashMap
+// - Context
+// - JSFunctionResultCache
+// - ScopeInfo
+// - TransitionArray
+// - FixedDoubleArray
+// - ExternalArray
+// - ExternalUint8ClampedArray
+// - ExternalInt8Array
+// - ExternalUint8Array
+// - ExternalInt16Array
+// - ExternalUint16Array
+// - ExternalInt32Array
+// - ExternalUint32Array
+// - ExternalFloat32Array
+// - Name
+// - String
+// - SeqString
+// - SeqOneByteString
+// - SeqTwoByteString
+// - SlicedString
+// - ConsString
+// - ExternalString
+// - ExternalAsciiString
+// - ExternalTwoByteString
+// - InternalizedString
+// - SeqInternalizedString
+// - SeqOneByteInternalizedString
+// - SeqTwoByteInternalizedString
+// - ConsInternalizedString
+// - ExternalInternalizedString
+// - ExternalAsciiInternalizedString
+// - ExternalTwoByteInternalizedString
+// - Symbol
+// - HeapNumber
+// - Cell
+// - PropertyCell
+// - Code
+// - Map
+// - Oddball
+// - Foreign
+// - SharedFunctionInfo
+// - Struct
+// - Box
+// - DeclaredAccessorDescriptor
+// - AccessorInfo
+// - DeclaredAccessorInfo
+// - ExecutableAccessorInfo
+// - AccessorPair
+// - AccessCheckInfo
+// - InterceptorInfo
+// - CallHandlerInfo
+// - TemplateInfo
+// - FunctionTemplateInfo
+// - ObjectTemplateInfo
+// - Script
+// - SignatureInfo
+// - TypeSwitchInfo
+// - DebugInfo
+// - BreakPointInfo
+// - CodeCache
//
// Formats of Object*:
// Smi: [31 bit signed int] 0
// HeapObject: [32 bit direct pointer] (4 byte aligned) | 01
-// Failure: [30 bit signed int] 11
namespace v8 {
namespace internal {
@@ -353,8 +335,6 @@ const int kStubMinorKeyBits = kBitsPerInt - kSmiTagSize - kStubMajorKeyBits;
\
V(INTERNALIZED_STRING_TYPE) \
V(ASCII_INTERNALIZED_STRING_TYPE) \
- V(CONS_INTERNALIZED_STRING_TYPE) \
- V(CONS_ASCII_INTERNALIZED_STRING_TYPE) \
V(EXTERNAL_INTERNALIZED_STRING_TYPE) \
V(EXTERNAL_ASCII_INTERNALIZED_STRING_TYPE) \
V(EXTERNAL_INTERNALIZED_STRING_WITH_ONE_BYTE_DATA_TYPE) \
@@ -377,15 +357,26 @@ const int kStubMinorKeyBits = kBitsPerInt - kSmiTagSize - kStubMajorKeyBits;
/* Note: the order of these external array */ \
/* types is relied upon in */ \
/* Object::IsExternalArray(). */ \
- V(EXTERNAL_BYTE_ARRAY_TYPE) \
- V(EXTERNAL_UNSIGNED_BYTE_ARRAY_TYPE) \
- V(EXTERNAL_SHORT_ARRAY_TYPE) \
- V(EXTERNAL_UNSIGNED_SHORT_ARRAY_TYPE) \
- V(EXTERNAL_INT_ARRAY_TYPE) \
- V(EXTERNAL_UNSIGNED_INT_ARRAY_TYPE) \
- V(EXTERNAL_FLOAT_ARRAY_TYPE) \
- V(EXTERNAL_DOUBLE_ARRAY_TYPE) \
- V(EXTERNAL_PIXEL_ARRAY_TYPE) \
+ V(EXTERNAL_INT8_ARRAY_TYPE) \
+ V(EXTERNAL_UINT8_ARRAY_TYPE) \
+ V(EXTERNAL_INT16_ARRAY_TYPE) \
+ V(EXTERNAL_UINT16_ARRAY_TYPE) \
+ V(EXTERNAL_INT32_ARRAY_TYPE) \
+ V(EXTERNAL_UINT32_ARRAY_TYPE) \
+ V(EXTERNAL_FLOAT32_ARRAY_TYPE) \
+ V(EXTERNAL_FLOAT64_ARRAY_TYPE) \
+ V(EXTERNAL_UINT8_CLAMPED_ARRAY_TYPE) \
+ \
+ V(FIXED_INT8_ARRAY_TYPE) \
+ V(FIXED_UINT8_ARRAY_TYPE) \
+ V(FIXED_INT16_ARRAY_TYPE) \
+ V(FIXED_UINT16_ARRAY_TYPE) \
+ V(FIXED_INT32_ARRAY_TYPE) \
+ V(FIXED_UINT32_ARRAY_TYPE) \
+ V(FIXED_FLOAT32_ARRAY_TYPE) \
+ V(FIXED_FLOAT64_ARRAY_TYPE) \
+ V(FIXED_UINT8_CLAMPED_ARRAY_TYPE) \
+ \
V(FILLER_TYPE) \
\
V(DECLARED_ACCESSOR_DESCRIPTOR_TYPE) \
@@ -431,6 +422,8 @@ const int kStubMinorKeyBits = kBitsPerInt - kSmiTagSize - kStubMajorKeyBits;
V(JS_PROXY_TYPE) \
V(JS_SET_TYPE) \
V(JS_MAP_TYPE) \
+ V(JS_SET_ITERATOR_TYPE) \
+ V(JS_MAP_ITERATOR_TYPE) \
V(JS_WEAK_MAP_TYPE) \
V(JS_WEAK_SET_TYPE) \
V(JS_REGEXP_TYPE) \
@@ -478,7 +471,7 @@ const int kStubMinorKeyBits = kBitsPerInt - kSmiTagSize - kStubMajorKeyBits;
ExternalAsciiString) \
V(EXTERNAL_STRING_WITH_ONE_BYTE_DATA_TYPE, \
ExternalTwoByteString::kSize, \
- external_string_with_one_bytei_data, \
+ external_string_with_one_byte_data, \
ExternalStringWithOneByteData) \
V(SHORT_EXTERNAL_STRING_TYPE, \
ExternalTwoByteString::kShortSize, \
@@ -501,14 +494,6 @@ const int kStubMinorKeyBits = kBitsPerInt - kSmiTagSize - kStubMajorKeyBits;
kVariableSizeSentinel, \
ascii_internalized_string, \
AsciiInternalizedString) \
- V(CONS_INTERNALIZED_STRING_TYPE, \
- ConsString::kSize, \
- cons_internalized_string, \
- ConsInternalizedString) \
- V(CONS_ASCII_INTERNALIZED_STRING_TYPE, \
- ConsString::kSize, \
- cons_ascii_internalized_string, \
- ConsAsciiInternalizedString) \
V(EXTERNAL_INTERNALIZED_STRING_TYPE, \
ExternalTwoByteString::kSize, \
external_internalized_string, \
@@ -543,7 +528,7 @@ const int kStubMinorKeyBits = kBitsPerInt - kSmiTagSize - kStubMajorKeyBits;
// Note that for subtle reasons related to the ordering or numerical values of
// type tags, elements in this list have to be added to the INSTANCE_TYPE_LIST
// manually.
-#define STRUCT_LIST_ALL(V) \
+#define STRUCT_LIST(V) \
V(BOX, Box, box) \
V(DECLARED_ACCESSOR_DESCRIPTOR, \
DeclaredAccessorDescriptor, \
@@ -564,19 +549,9 @@ const int kStubMinorKeyBits = kBitsPerInt - kSmiTagSize - kStubMajorKeyBits;
V(CODE_CACHE, CodeCache, code_cache) \
V(POLYMORPHIC_CODE_CACHE, PolymorphicCodeCache, polymorphic_code_cache) \
V(TYPE_FEEDBACK_INFO, TypeFeedbackInfo, type_feedback_info) \
- V(ALIASED_ARGUMENTS_ENTRY, AliasedArgumentsEntry, aliased_arguments_entry)
-
-#ifdef ENABLE_DEBUGGER_SUPPORT
-#define STRUCT_LIST_DEBUGGER(V) \
+ V(ALIASED_ARGUMENTS_ENTRY, AliasedArgumentsEntry, aliased_arguments_entry) \
V(DEBUG_INFO, DebugInfo, debug_info) \
V(BREAK_POINT_INFO, BreakPointInfo, break_point_info)
-#else
-#define STRUCT_LIST_DEBUGGER(V)
-#endif
-
-#define STRUCT_LIST(V) \
- STRUCT_LIST_ALL(V) \
- STRUCT_LIST_DEBUGGER(V)
// We use the full 8 bits of the instance_type field to encode heap object
// instance types. The high-order bit (bit 7) is set if the object is not a
@@ -608,17 +583,17 @@ enum StringRepresentationTag {
};
const uint32_t kIsIndirectStringMask = 0x1;
const uint32_t kIsIndirectStringTag = 0x1;
-STATIC_ASSERT((kSeqStringTag & kIsIndirectStringMask) == 0);
-STATIC_ASSERT((kExternalStringTag & kIsIndirectStringMask) == 0);
-STATIC_ASSERT(
- (kConsStringTag & kIsIndirectStringMask) == kIsIndirectStringTag);
-STATIC_ASSERT(
- (kSlicedStringTag & kIsIndirectStringMask) == kIsIndirectStringTag);
+STATIC_ASSERT((kSeqStringTag & kIsIndirectStringMask) == 0); // NOLINT
+STATIC_ASSERT((kExternalStringTag & kIsIndirectStringMask) == 0); // NOLINT
+STATIC_ASSERT((kConsStringTag &
+ kIsIndirectStringMask) == kIsIndirectStringTag); // NOLINT
+STATIC_ASSERT((kSlicedStringTag &
+ kIsIndirectStringMask) == kIsIndirectStringTag); // NOLINT
// Use this mask to distinguish between cons and slice only after making
// sure that the string is one of the two (an indirect string).
const uint32_t kSlicedNotConsMask = kSlicedStringTag & ~kConsStringTag;
-STATIC_ASSERT(IS_POWER_OF_TWO(kSlicedNotConsMask) && kSlicedNotConsMask != 0);
+STATIC_ASSERT(IS_POWER_OF_TWO(kSlicedNotConsMask));
// If bit 7 is clear, then bit 3 indicates whether this two-byte
// string actually contains one byte data.
@@ -649,10 +624,6 @@ enum InstanceType {
| kInternalizedTag,
ASCII_INTERNALIZED_STRING_TYPE = kOneByteStringTag | kSeqStringTag
| kInternalizedTag,
- CONS_INTERNALIZED_STRING_TYPE = kTwoByteStringTag | kConsStringTag
- | kInternalizedTag,
- CONS_ASCII_INTERNALIZED_STRING_TYPE = kOneByteStringTag | kConsStringTag
- | kInternalizedTag,
EXTERNAL_INTERNALIZED_STRING_TYPE = kTwoByteStringTag | kExternalStringTag
| kInternalizedTag,
EXTERNAL_ASCII_INTERNALIZED_STRING_TYPE = kOneByteStringTag
@@ -672,9 +643,9 @@ enum InstanceType {
STRING_TYPE = INTERNALIZED_STRING_TYPE | kNotInternalizedTag,
ASCII_STRING_TYPE = ASCII_INTERNALIZED_STRING_TYPE | kNotInternalizedTag,
- CONS_STRING_TYPE = CONS_INTERNALIZED_STRING_TYPE | kNotInternalizedTag,
+ CONS_STRING_TYPE = kTwoByteStringTag | kConsStringTag | kNotInternalizedTag,
CONS_ASCII_STRING_TYPE =
- CONS_ASCII_INTERNALIZED_STRING_TYPE | kNotInternalizedTag,
+ kOneByteStringTag | kConsStringTag | kNotInternalizedTag,
SLICED_STRING_TYPE =
kTwoByteStringTag | kSlicedStringTag | kNotInternalizedTag,
@@ -711,17 +682,28 @@ enum InstanceType {
FOREIGN_TYPE,
BYTE_ARRAY_TYPE,
FREE_SPACE_TYPE,
- EXTERNAL_BYTE_ARRAY_TYPE, // FIRST_EXTERNAL_ARRAY_TYPE
- EXTERNAL_UNSIGNED_BYTE_ARRAY_TYPE,
- EXTERNAL_SHORT_ARRAY_TYPE,
- EXTERNAL_UNSIGNED_SHORT_ARRAY_TYPE,
- EXTERNAL_INT_ARRAY_TYPE,
- EXTERNAL_UNSIGNED_INT_ARRAY_TYPE,
- EXTERNAL_FLOAT_ARRAY_TYPE,
- EXTERNAL_DOUBLE_ARRAY_TYPE,
- EXTERNAL_PIXEL_ARRAY_TYPE, // LAST_EXTERNAL_ARRAY_TYPE
+
+ EXTERNAL_INT8_ARRAY_TYPE, // FIRST_EXTERNAL_ARRAY_TYPE
+ EXTERNAL_UINT8_ARRAY_TYPE,
+ EXTERNAL_INT16_ARRAY_TYPE,
+ EXTERNAL_UINT16_ARRAY_TYPE,
+ EXTERNAL_INT32_ARRAY_TYPE,
+ EXTERNAL_UINT32_ARRAY_TYPE,
+ EXTERNAL_FLOAT32_ARRAY_TYPE,
+ EXTERNAL_FLOAT64_ARRAY_TYPE,
+ EXTERNAL_UINT8_CLAMPED_ARRAY_TYPE, // LAST_EXTERNAL_ARRAY_TYPE
+
+ FIXED_INT8_ARRAY_TYPE, // FIRST_FIXED_TYPED_ARRAY_TYPE
+ FIXED_UINT8_ARRAY_TYPE,
+ FIXED_INT16_ARRAY_TYPE,
+ FIXED_UINT16_ARRAY_TYPE,
+ FIXED_INT32_ARRAY_TYPE,
+ FIXED_UINT32_ARRAY_TYPE,
+ FIXED_FLOAT32_ARRAY_TYPE,
+ FIXED_FLOAT64_ARRAY_TYPE,
+ FIXED_UINT8_CLAMPED_ARRAY_TYPE, // LAST_FIXED_TYPED_ARRAY_TYPE
+
FIXED_DOUBLE_ARRAY_TYPE,
- CONSTANT_POOL_ARRAY_TYPE,
FILLER_TYPE, // LAST_DATA_TYPE
// Structs.
@@ -744,18 +726,13 @@ enum InstanceType {
TYPE_FEEDBACK_INFO_TYPE,
ALIASED_ARGUMENTS_ENTRY_TYPE,
BOX_TYPE,
- // The following two instance types are only used when ENABLE_DEBUGGER_SUPPORT
- // is defined. However as include/v8.h contain some of the instance type
- // constants always having them avoids them getting different numbers
- // depending on whether ENABLE_DEBUGGER_SUPPORT is defined or not.
DEBUG_INFO_TYPE,
BREAK_POINT_INFO_TYPE,
FIXED_ARRAY_TYPE,
+ CONSTANT_POOL_ARRAY_TYPE,
SHARED_FUNCTION_INFO_TYPE,
- JS_MESSAGE_OBJECT_TYPE,
-
// All the following types are subtypes of JSReceiver, which corresponds to
// objects in the JS sense. The first and the last type in this range are
// the two forms of function. This organization enables using the same
@@ -765,6 +742,7 @@ enum InstanceType {
JS_PROXY_TYPE, // LAST_JS_PROXY_TYPE
JS_VALUE_TYPE, // FIRST_JS_OBJECT_TYPE
+ JS_MESSAGE_OBJECT_TYPE,
JS_DATE_TYPE,
JS_OBJECT_TYPE,
JS_CONTEXT_EXTENSION_OBJECT_TYPE,
@@ -779,6 +757,8 @@ enum InstanceType {
JS_DATA_VIEW_TYPE,
JS_SET_TYPE,
JS_MAP_TYPE,
+ JS_SET_ITERATOR_TYPE,
+ JS_MAP_ITERATOR_TYPE,
JS_WEAK_MAP_TYPE,
JS_WEAK_SET_TYPE,
@@ -795,8 +775,11 @@ enum InstanceType {
LAST_UNIQUE_NAME_TYPE = SYMBOL_TYPE,
FIRST_NONSTRING_TYPE = SYMBOL_TYPE,
// Boundaries for testing for an external array.
- FIRST_EXTERNAL_ARRAY_TYPE = EXTERNAL_BYTE_ARRAY_TYPE,
- LAST_EXTERNAL_ARRAY_TYPE = EXTERNAL_PIXEL_ARRAY_TYPE,
+ FIRST_EXTERNAL_ARRAY_TYPE = EXTERNAL_INT8_ARRAY_TYPE,
+ LAST_EXTERNAL_ARRAY_TYPE = EXTERNAL_UINT8_CLAMPED_ARRAY_TYPE,
+ // Boundaries for testing for a fixed typed array.
+ FIRST_FIXED_TYPED_ARRAY_TYPE = FIXED_INT8_ARRAY_TYPE,
+ LAST_FIXED_TYPED_ARRAY_TYPE = FIXED_UINT8_CLAMPED_ARRAY_TYPE,
// Boundary for promotion to old data space/old pointer space.
LAST_DATA_TYPE = FILLER_TYPE,
// Boundary for objects represented as JSReceiver (i.e. JSObject or JSProxy).
@@ -826,10 +809,10 @@ enum InstanceType {
const int kExternalArrayTypeCount =
LAST_EXTERNAL_ARRAY_TYPE - FIRST_EXTERNAL_ARRAY_TYPE + 1;
-STATIC_CHECK(JS_OBJECT_TYPE == Internals::kJSObjectType);
-STATIC_CHECK(FIRST_NONSTRING_TYPE == Internals::kFirstNonstringType);
-STATIC_CHECK(ODDBALL_TYPE == Internals::kOddballType);
-STATIC_CHECK(FOREIGN_TYPE == Internals::kForeignType);
+STATIC_ASSERT(JS_OBJECT_TYPE == Internals::kJSObjectType);
+STATIC_ASSERT(FIRST_NONSTRING_TYPE == Internals::kFirstNonstringType);
+STATIC_ASSERT(ODDBALL_TYPE == Internals::kOddballType);
+STATIC_ASSERT(FOREIGN_TYPE == Internals::kForeignType);
#define FIXED_ARRAY_SUB_INSTANCE_TYPE_LIST(V) \
@@ -876,12 +859,15 @@ class AllocationSiteCreationContext;
class AllocationSiteUsageContext;
class DictionaryElementsAccessor;
class ElementsAccessor;
-class Failure;
class FixedArrayBase;
class GlobalObject;
class ObjectVisitor;
+class LookupIterator;
class StringStream;
-class Type;
+// We cannot just say "class HeapType;" if it is created from a template... =8-?
+template<class> class TypeImpl;
+struct HeapTypeConfig;
+typedef TypeImpl<HeapTypeConfig> HeapType;
// A template-ized version of the IsXXX functions.
@@ -899,61 +885,6 @@ template <class C> inline bool Is(Object* obj);
#define DECLARE_PRINTER(Name)
#endif
-class MaybeObject BASE_EMBEDDED {
- public:
- inline bool IsFailure();
- inline bool IsRetryAfterGC();
- inline bool IsOutOfMemory();
- inline bool IsException();
- INLINE(bool IsTheHole());
- INLINE(bool IsUninitialized());
- inline bool ToObject(Object** obj) {
- if (IsFailure()) return false;
- *obj = reinterpret_cast<Object*>(this);
- return true;
- }
- inline Failure* ToFailureUnchecked() {
- ASSERT(IsFailure());
- return reinterpret_cast<Failure*>(this);
- }
- inline Object* ToObjectUnchecked() {
- // TODO(jkummerow): Turn this back into an ASSERT when we can be certain
- // that it never fires in Release mode in the wild.
- CHECK(!IsFailure());
- return reinterpret_cast<Object*>(this);
- }
- inline Object* ToObjectChecked() {
- CHECK(!IsFailure());
- return reinterpret_cast<Object*>(this);
- }
-
- template<typename T>
- inline bool To(T** obj) {
- if (IsFailure()) return false;
- *obj = T::cast(reinterpret_cast<Object*>(this));
- return true;
- }
-
- template<typename T>
- inline bool ToHandle(Handle<T>* obj, Isolate* isolate) {
- if (IsFailure()) return false;
- *obj = handle(T::cast(reinterpret_cast<Object*>(this)), isolate);
- return true;
- }
-
-#ifdef OBJECT_PRINT
- // Prints this object with details.
- void Print();
- void Print(FILE* out);
- void PrintLn();
- void PrintLn(FILE* out);
-#endif
-#ifdef VERIFY_HEAP
- // Verifies the object.
- void Verify();
-#endif
-};
-
#define OBJECT_TYPE_LIST(V) \
V(Smi) \
@@ -977,15 +908,25 @@ class MaybeObject BASE_EMBEDDED {
V(Symbol) \
\
V(ExternalArray) \
- V(ExternalByteArray) \
- V(ExternalUnsignedByteArray) \
- V(ExternalShortArray) \
- V(ExternalUnsignedShortArray) \
- V(ExternalIntArray) \
- V(ExternalUnsignedIntArray) \
- V(ExternalFloatArray) \
- V(ExternalDoubleArray) \
- V(ExternalPixelArray) \
+ V(ExternalInt8Array) \
+ V(ExternalUint8Array) \
+ V(ExternalInt16Array) \
+ V(ExternalUint16Array) \
+ V(ExternalInt32Array) \
+ V(ExternalUint32Array) \
+ V(ExternalFloat32Array) \
+ V(ExternalFloat64Array) \
+ V(ExternalUint8ClampedArray) \
+ V(FixedTypedArrayBase) \
+ V(FixedUint8Array) \
+ V(FixedInt8Array) \
+ V(FixedUint16Array) \
+ V(FixedInt16Array) \
+ V(FixedUint32Array) \
+ V(FixedInt32Array) \
+ V(FixedFloat32Array) \
+ V(FixedFloat64Array) \
+ V(FixedUint8ClampedArray) \
V(ByteArray) \
V(FreeSpace) \
V(JSReceiver) \
@@ -999,7 +940,6 @@ class MaybeObject BASE_EMBEDDED {
V(DeoptimizationInputData) \
V(DeoptimizationOutputData) \
V(DependentCode) \
- V(TypeFeedbackCells) \
V(FixedArray) \
V(FixedDoubleArray) \
V(ConstantPoolArray) \
@@ -1025,6 +965,8 @@ class MaybeObject BASE_EMBEDDED {
V(JSFunctionProxy) \
V(JSSet) \
V(JSMap) \
+ V(JSSetIterator) \
+ V(JSMapIterator) \
V(JSWeakCollection) \
V(JSWeakMap) \
V(JSWeakSet) \
@@ -1048,7 +990,8 @@ class MaybeObject BASE_EMBEDDED {
V(Cell) \
V(PropertyCell) \
V(ObjectHashTable) \
- V(WeakHashTable)
+ V(WeakHashTable) \
+ V(OrderedHashTable)
#define ERROR_MESSAGES_LIST(V) \
@@ -1056,110 +999,129 @@ class MaybeObject BASE_EMBEDDED {
\
V(k32BitValueInRegisterIsNotZeroExtended, \
"32 bit value in register is not zero-extended") \
- V(kAlignmentMarkerExpected, "alignment marker expected") \
+ V(kAlignmentMarkerExpected, "Alignment marker expected") \
V(kAllocationIsNotDoubleAligned, "Allocation is not double aligned") \
V(kAPICallReturnedInvalidObject, "API call returned invalid object") \
V(kArgumentsObjectValueInATestContext, \
- "arguments object value in a test context") \
- V(kArrayBoilerplateCreationFailed, "array boilerplate creation failed") \
- V(kArrayIndexConstantValueTooBig, "array index constant value too big") \
- V(kAssignmentToArguments, "assignment to arguments") \
+ "Arguments object value in a test context") \
+ V(kArrayBoilerplateCreationFailed, "Array boilerplate creation failed") \
+ V(kArrayIndexConstantValueTooBig, "Array index constant value too big") \
+ V(kAssignmentToArguments, "Assignment to arguments") \
V(kAssignmentToLetVariableBeforeInitialization, \
- "assignment to let variable before initialization") \
- V(kAssignmentToLOOKUPVariable, "assignment to LOOKUP variable") \
+ "Assignment to let variable before initialization") \
+ V(kAssignmentToLOOKUPVariable, "Assignment to LOOKUP variable") \
V(kAssignmentToParameterFunctionUsesArgumentsObject, \
- "assignment to parameter, function uses arguments object") \
+ "Assignment to parameter, function uses arguments object") \
V(kAssignmentToParameterInArgumentsObject, \
- "assignment to parameter in arguments object") \
+ "Assignment to parameter in arguments object") \
V(kAttemptToUseUndefinedCache, "Attempt to use undefined cache") \
V(kBadValueContextForArgumentsObjectValue, \
- "bad value context for arguments object value") \
+ "Bad value context for arguments object value") \
V(kBadValueContextForArgumentsValue, \
- "bad value context for arguments value") \
- V(kBailedOutDueToDependencyChange, "bailed out due to dependency change") \
- V(kBailoutWasNotPrepared, "bailout was not prepared") \
+ "Bad value context for arguments value") \
+ V(kBailedOutDueToDependencyChange, "Bailed out due to dependency change") \
+ V(kBailoutWasNotPrepared, "Bailout was not prepared") \
V(kBinaryStubGenerateFloatingPointCode, \
"BinaryStub_GenerateFloatingPointCode") \
V(kBothRegistersWereSmisInSelectNonSmi, \
"Both registers were smis in SelectNonSmi") \
V(kCallToAJavaScriptRuntimeFunction, \
- "call to a JavaScript runtime function") \
+ "Call to a JavaScript runtime function") \
V(kCannotTranslatePositionInChangedArea, \
"Cannot translate position in changed area") \
- V(kCodeGenerationFailed, "code generation failed") \
- V(kCodeObjectNotProperlyPatched, "code object not properly patched") \
- V(kCompoundAssignmentToLookupSlot, "compound assignment to lookup slot") \
- V(kContextAllocatedArguments, "context-allocated arguments") \
- V(kDebuggerIsActive, "debugger is active") \
+ V(kCodeGenerationFailed, "Code generation failed") \
+ V(kCodeObjectNotProperlyPatched, "Code object not properly patched") \
+ V(kCompoundAssignmentToLookupSlot, "Compound assignment to lookup slot") \
+ V(kContextAllocatedArguments, "Context-allocated arguments") \
+ V(kCopyBuffersOverlap, "Copy buffers overlap") \
+ V(kCouldNotGenerateZero, "Could not generate +0.0") \
+ V(kCouldNotGenerateNegativeZero, "Could not generate -0.0") \
+ V(kDebuggerHasBreakPoints, "Debugger has break points") \
V(kDebuggerStatement, "DebuggerStatement") \
V(kDeclarationInCatchContext, "Declaration in catch context") \
V(kDeclarationInWithContext, "Declaration in with context") \
V(kDefaultNaNModeNotSet, "Default NaN mode not set") \
- V(kDeleteWithGlobalVariable, "delete with global variable") \
- V(kDeleteWithNonGlobalVariable, "delete with non-global variable") \
+ V(kDeleteWithGlobalVariable, "Delete with global variable") \
+ V(kDeleteWithNonGlobalVariable, "Delete with non-global variable") \
V(kDestinationOfCopyNotAligned, "Destination of copy not aligned") \
V(kDontDeleteCellsCannotContainTheHole, \
"DontDelete cells can't contain the hole") \
V(kDoPushArgumentNotImplementedForDoubleType, \
"DoPushArgument not implemented for double type") \
+ V(kEliminatedBoundsCheckFailed, "Eliminated bounds check failed") \
V(kEmitLoadRegisterUnsupportedDoubleImmediate, \
"EmitLoadRegister: Unsupported double immediate") \
V(kEval, "eval") \
V(kExpected0AsASmiSentinel, "Expected 0 as a Smi sentinel") \
- V(kExpectedAlignmentMarker, "expected alignment marker") \
+ V(kExpectedAlignmentMarker, "Expected alignment marker") \
+ V(kExpectedAllocationSite, "Expected allocation site") \
+ V(kExpectedFunctionObject, "Expected function object in register") \
+ V(kExpectedHeapNumber, "Expected HeapNumber") \
+ V(kExpectedNativeContext, "Expected native context") \
+ V(kExpectedNonIdenticalObjects, "Expected non-identical objects") \
+ V(kExpectedNonNullContext, "Expected non-null context") \
+ V(kExpectedPositiveZero, "Expected +0.0") \
V(kExpectedAllocationSiteInCell, \
"Expected AllocationSite in property cell") \
- V(kExpectedPropertyCellInRegisterA2, \
- "Expected property cell in register a2") \
- V(kExpectedPropertyCellInRegisterEbx, \
- "Expected property cell in register ebx") \
- V(kExpectedPropertyCellInRegisterRbx, \
- "Expected property cell in register rbx") \
+ V(kExpectedFixedArrayInFeedbackVector, \
+ "Expected fixed array in feedback vector") \
+ V(kExpectedFixedArrayInRegisterA2, \
+ "Expected fixed array in register a2") \
+ V(kExpectedFixedArrayInRegisterEbx, \
+ "Expected fixed array in register ebx") \
+ V(kExpectedFixedArrayInRegisterR2, \
+ "Expected fixed array in register r2") \
+ V(kExpectedFixedArrayInRegisterRbx, \
+ "Expected fixed array in register rbx") \
+ V(kExpectedNewSpaceObject, "Expected new space object") \
+ V(kExpectedSmiOrHeapNumber, "Expected smi or HeapNumber") \
+ V(kExpectedUndefinedOrCell, \
+ "Expected undefined or cell in register") \
V(kExpectingAlignmentForCopyBytes, \
"Expecting alignment for CopyBytes") \
V(kExportDeclaration, "Export declaration") \
V(kExternalStringExpectedButNotFound, \
- "external string expected, but not found") \
- V(kFailedBailedOutLastTime, "failed/bailed out last time") \
+ "External string expected, but not found") \
+ V(kFailedBailedOutLastTime, "Failed/bailed out last time") \
V(kForInStatementIsNotFastCase, "ForInStatement is not fast case") \
V(kForInStatementOptimizationIsDisabled, \
"ForInStatement optimization is disabled") \
V(kForInStatementWithNonLocalEachVariable, \
"ForInStatement with non-local each variable") \
V(kForOfStatement, "ForOfStatement") \
- V(kFrameIsExpectedToBeAligned, "frame is expected to be aligned") \
- V(kFunctionCallsEval, "function calls eval") \
- V(kFunctionIsAGenerator, "function is a generator") \
- V(kFunctionWithIllegalRedeclaration, "function with illegal redeclaration") \
+ V(kFrameIsExpectedToBeAligned, "Frame is expected to be aligned") \
+ V(kFunctionCallsEval, "Function calls eval") \
+ V(kFunctionIsAGenerator, "Function is a generator") \
+ V(kFunctionWithIllegalRedeclaration, "Function with illegal redeclaration") \
V(kGeneratedCodeIsTooLarge, "Generated code is too large") \
V(kGeneratorFailedToResume, "Generator failed to resume") \
- V(kGenerator, "generator") \
+ V(kGenerator, "Generator") \
V(kGlobalFunctionsMustHaveInitialMap, \
"Global functions must have initial map") \
V(kHeapNumberMapRegisterClobbered, "HeapNumberMap register clobbered") \
+ V(kHydrogenFilter, "Optimization disabled by filter") \
V(kImportDeclaration, "Import declaration") \
V(kImproperObjectOnPrototypeChainForStore, \
- "improper object on prototype chain for store") \
+ "Improper object on prototype chain for store") \
V(kIndexIsNegative, "Index is negative") \
V(kIndexIsTooLarge, "Index is too large") \
- V(kInlinedRuntimeFunctionClassOf, "inlined runtime function: ClassOf") \
+ V(kInlinedRuntimeFunctionClassOf, "Inlined runtime function: ClassOf") \
V(kInlinedRuntimeFunctionFastAsciiArrayJoin, \
- "inlined runtime function: FastAsciiArrayJoin") \
+ "Inlined runtime function: FastAsciiArrayJoin") \
V(kInlinedRuntimeFunctionGeneratorNext, \
- "inlined runtime function: GeneratorNext") \
+ "Inlined runtime function: GeneratorNext") \
V(kInlinedRuntimeFunctionGeneratorThrow, \
- "inlined runtime function: GeneratorThrow") \
+ "Inlined runtime function: GeneratorThrow") \
V(kInlinedRuntimeFunctionGetFromCache, \
- "inlined runtime function: GetFromCache") \
+ "Inlined runtime function: GetFromCache") \
V(kInlinedRuntimeFunctionIsNonNegativeSmi, \
- "inlined runtime function: IsNonNegativeSmi") \
- V(kInlinedRuntimeFunctionIsRegExpEquivalent, \
- "inlined runtime function: IsRegExpEquivalent") \
+ "Inlined runtime function: IsNonNegativeSmi") \
V(kInlinedRuntimeFunctionIsStringWrapperSafeForDefaultValueOf, \
- "inlined runtime function: IsStringWrapperSafeForDefaultValueOf") \
- V(kInliningBailedOut, "inlining bailed out") \
+ "Inlined runtime function: IsStringWrapperSafeForDefaultValueOf") \
+ V(kInliningBailedOut, "Inlining bailed out") \
V(kInputGPRIsExpectedToHaveUpper32Cleared, \
- "input GPR is expected to have upper32 cleared") \
+ "Input GPR is expected to have upper32 cleared") \
+ V(kInputStringTooLong, "Input string too long") \
V(kInstanceofStubUnexpectedCallSiteCacheCheck, \
"InstanceofStub unexpected call site cache (check)") \
V(kInstanceofStubUnexpectedCallSiteCacheCmp1, \
@@ -1173,10 +1135,11 @@ class MaybeObject BASE_EMBEDDED {
V(kInvalidCaptureReferenced, "Invalid capture referenced") \
V(kInvalidElementsKindForInternalArrayOrInternalPackedArray, \
"Invalid ElementsKind for InternalArray or InternalPackedArray") \
+ V(kInvalidFullCodegenState, "invalid full-codegen state") \
V(kInvalidHandleScopeLevel, "Invalid HandleScope level") \
- V(kInvalidLeftHandSideInAssignment, "invalid left-hand side in assignment") \
- V(kInvalidLhsInCompoundAssignment, "invalid lhs in compound assignment") \
- V(kInvalidLhsInCountOperation, "invalid lhs in count operation") \
+ V(kInvalidLeftHandSideInAssignment, "Invalid left-hand side in assignment") \
+ V(kInvalidLhsInCompoundAssignment, "Invalid lhs in compound assignment") \
+ V(kInvalidLhsInCountOperation, "Invalid lhs in count operation") \
V(kInvalidMinLength, "Invalid min_length") \
V(kJSGlobalObjectNativeContextShouldBeANativeContext, \
"JSGlobalObject::native_context should be a native context") \
@@ -1185,14 +1148,19 @@ class MaybeObject BASE_EMBEDDED {
V(kJSObjectWithFastElementsMapHasSlowElements, \
"JSObject with fast elements map has slow elements") \
V(kLetBindingReInitialization, "Let binding re-initialization") \
+ V(kLhsHasBeenClobbered, "lhs has been clobbered") \
V(kLiveBytesCountOverflowChunkSize, "Live Bytes Count overflow chunk size") \
+ V(kLiveEditFrameDroppingIsNotSupportedOnARM64, \
+ "LiveEdit frame dropping is not supported on arm64") \
V(kLiveEditFrameDroppingIsNotSupportedOnArm, \
"LiveEdit frame dropping is not supported on arm") \
V(kLiveEditFrameDroppingIsNotSupportedOnMips, \
"LiveEdit frame dropping is not supported on mips") \
V(kLiveEdit, "LiveEdit") \
V(kLookupVariableInCountOperation, \
- "lookup variable in count operation") \
+ "Lookup variable in count operation") \
+ V(kMapBecameDeprecated, "Map became deprecated") \
+ V(kMapBecameUnstable, "Map became unstable") \
V(kMapIsNoLongerInEax, "Map is no longer in eax") \
V(kModuleDeclaration, "Module declaration") \
V(kModuleLiteral, "Module literal") \
@@ -1201,26 +1169,28 @@ class MaybeObject BASE_EMBEDDED {
V(kModuleVariable, "Module variable") \
V(kModuleUrl, "Module url") \
V(kNativeFunctionLiteral, "Native function literal") \
- V(kNoCasesLeft, "no cases left") \
+ V(kNeedSmiLiteral, "Need a Smi literal here") \
+ V(kNoCasesLeft, "No cases left") \
V(kNoEmptyArraysHereInEmitFastAsciiArrayJoin, \
"No empty arrays here in EmitFastAsciiArrayJoin") \
V(kNonInitializerAssignmentToConst, \
- "non-initializer assignment to const") \
+ "Non-initializer assignment to const") \
V(kNonSmiIndex, "Non-smi index") \
V(kNonSmiKeyInArrayLiteral, "Non-smi key in array literal") \
V(kNonSmiValue, "Non-smi value") \
V(kNonObject, "Non-object value") \
V(kNotEnoughVirtualRegistersForValues, \
- "not enough virtual registers for values") \
+ "Not enough virtual registers for values") \
V(kNotEnoughSpillSlotsForOsr, \
- "not enough spill slots for OSR") \
+ "Not enough spill slots for OSR") \
V(kNotEnoughVirtualRegistersRegalloc, \
- "not enough virtual registers (regalloc)") \
- V(kObjectFoundInSmiOnlyArray, "object found in smi-only array") \
+ "Not enough virtual registers (regalloc)") \
+ V(kObjectFoundInSmiOnlyArray, "Object found in smi-only array") \
V(kObjectLiteralWithComplexProperty, \
"Object literal with complex property") \
V(kOddballInStringTableIsNotUndefinedOrTheHole, \
- "oddball in string table is not undefined or the hole") \
+ "Oddball in string table is not undefined or the hole") \
+ V(kOffsetOutOfRange, "Offset out of range") \
V(kOperandIsASmiAndNotAName, "Operand is a smi and not a name") \
V(kOperandIsASmiAndNotAString, "Operand is a smi and not a string") \
V(kOperandIsASmi, "Operand is a smi") \
@@ -1230,38 +1200,58 @@ class MaybeObject BASE_EMBEDDED {
V(kOperandIsNotAString, "Operand is not a string") \
V(kOperandIsNotSmi, "Operand is not smi") \
V(kOperandNotANumber, "Operand not a number") \
- V(kOptimizedTooManyTimes, "optimized too many times") \
+ V(kObjectTagged, "The object is tagged") \
+ V(kObjectNotTagged, "The object is not tagged") \
+ V(kOptimizationDisabled, "Optimization is disabled") \
+ V(kOptimizedTooManyTimes, "Optimized too many times") \
V(kOutOfVirtualRegistersWhileTryingToAllocateTempRegister, \
"Out of virtual registers while trying to allocate temp register") \
- V(kParseScopeError, "parse/scope error") \
- V(kPossibleDirectCallToEval, "possible direct call to eval") \
+ V(kParseScopeError, "Parse/scope error") \
+ V(kPossibleDirectCallToEval, "Possible direct call to eval") \
+ V(kPreconditionsWereNotMet, "Preconditions were not met") \
V(kPropertyAllocationCountFailed, "Property allocation count failed") \
V(kReceivedInvalidReturnAddress, "Received invalid return address") \
V(kReferenceToAVariableWhichRequiresDynamicLookup, \
- "reference to a variable which requires dynamic lookup") \
+ "Reference to a variable which requires dynamic lookup") \
V(kReferenceToGlobalLexicalVariable, \
- "reference to global lexical variable") \
- V(kReferenceToUninitializedVariable, "reference to uninitialized variable") \
+ "Reference to global lexical variable") \
+ V(kReferenceToUninitializedVariable, "Reference to uninitialized variable") \
V(kRegisterDidNotMatchExpectedRoot, "Register did not match expected root") \
- V(kRegisterWasClobbered, "register was clobbered") \
+ V(kRegisterWasClobbered, "Register was clobbered") \
+ V(kRememberedSetPointerInNewSpace, "Remembered set pointer is in new space") \
+ V(kReturnAddressNotFoundInFrame, "Return address not found in frame") \
+ V(kRhsHasBeenClobbered, "Rhs has been clobbered") \
V(kScopedBlock, "ScopedBlock") \
V(kSmiAdditionOverflow, "Smi addition overflow") \
V(kSmiSubtractionOverflow, "Smi subtraction overflow") \
- V(kStackFrameTypesMustMatch, "stack frame types must match") \
+ V(kStackAccessBelowStackPointer, "Stack access below stack pointer") \
+ V(kStackFrameTypesMustMatch, "Stack frame types must match") \
V(kSwitchStatementMixedOrNonLiteralSwitchLabels, \
"SwitchStatement: mixed or non-literal switch labels") \
V(kSwitchStatementTooManyClauses, "SwitchStatement: too many clauses") \
+ V(kTheCurrentStackPointerIsBelowCsp, \
+ "The current stack pointer is below csp") \
V(kTheInstructionShouldBeALui, "The instruction should be a lui") \
V(kTheInstructionShouldBeAnOri, "The instruction should be an ori") \
V(kTheInstructionToPatchShouldBeALoadFromPc, \
"The instruction to patch should be a load from pc") \
+ V(kTheInstructionToPatchShouldBeALoadFromPp, \
+ "The instruction to patch should be a load from pp") \
+ V(kTheInstructionToPatchShouldBeAnLdrLiteral, \
+ "The instruction to patch should be a ldr literal") \
V(kTheInstructionToPatchShouldBeALui, \
"The instruction to patch should be a lui") \
V(kTheInstructionToPatchShouldBeAnOri, \
"The instruction to patch should be an ori") \
- V(kTooManyParametersLocals, "too many parameters/locals") \
- V(kTooManyParameters, "too many parameters") \
+ V(kTheSourceAndDestinationAreTheSame, \
+ "The source and destination are the same") \
+ V(kTheStackPointerIsNotAligned, "The stack pointer is not aligned.") \
+ V(kTheStackWasCorruptedByMacroAssemblerCall, \
+ "The stack was corrupted by MacroAssembler::Call()") \
+ V(kTooManyParametersLocals, "Too many parameters/locals") \
+ V(kTooManyParameters, "Too many parameters") \
V(kTooManySpillSlotsNeededForOSR, "Too many spill slots needed for OSR") \
+ V(kToOperand32UnsupportedImmediate, "ToOperand32 unsupported immediate.") \
V(kToOperandIsDoubleRegisterUnimplemented, \
"ToOperand IsDoubleRegister unimplemented") \
V(kToOperandUnsupportedDoubleImmediate, \
@@ -1270,10 +1260,12 @@ class MaybeObject BASE_EMBEDDED {
V(kTryFinallyStatement, "TryFinallyStatement") \
V(kUnableToEncodeValueAsSmi, "Unable to encode value as smi") \
V(kUnalignedAllocationInNewSpace, "Unaligned allocation in new space") \
+ V(kUnalignedCellInWriteBarrier, "Unaligned cell in write barrier") \
V(kUndefinedValueNotLoaded, "Undefined value not loaded") \
V(kUndoAllocationOfNonAllocatedMemory, \
"Undo allocation of non allocated memory") \
V(kUnexpectedAllocationTop, "Unexpected allocation top") \
+ V(kUnexpectedColorFound, "Unexpected color bit pattern found") \
V(kUnexpectedElementsKindInArrayConstructor, \
"Unexpected ElementsKind in array constructor") \
V(kUnexpectedFallthroughFromCharCodeAtSlowCase, \
@@ -1300,34 +1292,39 @@ class MaybeObject BASE_EMBEDDED {
"Unexpected initial map for InternalArray function") \
V(kUnexpectedLevelAfterReturnFromApiCall, \
"Unexpected level after return from api call") \
+ V(kUnexpectedNegativeValue, "Unexpected negative value") \
V(kUnexpectedNumberOfPreAllocatedPropertyFields, \
"Unexpected number of pre-allocated property fields") \
+ V(kUnexpectedFPCRMode, "Unexpected FPCR mode.") \
+ V(kUnexpectedSmi, "Unexpected smi value") \
V(kUnexpectedStringFunction, "Unexpected String function") \
V(kUnexpectedStringType, "Unexpected string type") \
V(kUnexpectedStringWrapperInstanceSize, \
"Unexpected string wrapper instance size") \
V(kUnexpectedTypeForRegExpDataFixedArrayExpected, \
"Unexpected type for RegExp data, FixedArray expected") \
+ V(kUnexpectedValue, "Unexpected value") \
V(kUnexpectedUnusedPropertiesOfStringWrapper, \
"Unexpected unused properties of string wrapper") \
+ V(kUnimplemented, "unimplemented") \
V(kUninitializedKSmiConstantRegister, "Uninitialized kSmiConstantRegister") \
- V(kUnknown, "unknown") \
+ V(kUnknown, "Unknown") \
V(kUnsupportedConstCompoundAssignment, \
- "unsupported const compound assignment") \
+ "Unsupported const compound assignment") \
V(kUnsupportedCountOperationWithConst, \
- "unsupported count operation with const") \
- V(kUnsupportedDoubleImmediate, "unsupported double immediate") \
- V(kUnsupportedLetCompoundAssignment, "unsupported let compound assignment") \
+ "Unsupported count operation with const") \
+ V(kUnsupportedDoubleImmediate, "Unsupported double immediate") \
+ V(kUnsupportedLetCompoundAssignment, "Unsupported let compound assignment") \
V(kUnsupportedLookupSlotInDeclaration, \
- "unsupported lookup slot in declaration") \
+ "Unsupported lookup slot in declaration") \
V(kUnsupportedNonPrimitiveCompare, "Unsupported non-primitive compare") \
V(kUnsupportedPhiUseOfArguments, "Unsupported phi use of arguments") \
V(kUnsupportedPhiUseOfConstVariable, \
"Unsupported phi use of const variable") \
- V(kUnsupportedTaggedImmediate, "unsupported tagged immediate") \
+ V(kUnsupportedTaggedImmediate, "Unsupported tagged immediate") \
V(kVariableResolvedToWithContext, "Variable resolved to with context") \
V(kWeShouldNotHaveAnEmptyLexicalContext, \
- "we should not have an empty lexical context") \
+ "We should not have an empty lexical context") \
V(kWithStatement, "WithStatement") \
V(kWrongAddressOrValuePassedToRecordWrite, \
"Wrong address or value passed to RecordWrite") \
@@ -1349,9 +1346,9 @@ const char* GetBailoutReason(BailoutReason reason);
// object hierarchy.
// Object does not use any virtual functions to avoid the
// allocation of the C++ vtable.
-// Since Smi and Failure are subclasses of Object no
+// Since both Smi and HeapObject are subclasses of Object no
// data members can be present in Object.
-class Object : public MaybeObject {
+class Object {
public:
// Type testing.
bool IsObject() { return true; }
@@ -1372,17 +1369,18 @@ class Object : public MaybeObject {
INLINE(bool IsSpecObject());
INLINE(bool IsSpecFunction());
+ INLINE(bool IsTemplateInfo());
bool IsCallable();
// Oddball testing.
INLINE(bool IsUndefined());
INLINE(bool IsNull());
- INLINE(bool IsTheHole()); // Shadows MaybeObject's implementation.
+ INLINE(bool IsTheHole());
+ INLINE(bool IsException());
INLINE(bool IsUninitialized());
INLINE(bool IsTrue());
INLINE(bool IsFalse());
inline bool IsArgumentsMarker();
- inline bool NonFailureIsHeapObject();
// Filler objects (fillers and free space objects).
inline bool IsFiller();
@@ -1431,8 +1429,11 @@ class Object : public MaybeObject {
return true;
}
- inline MaybeObject* AllocateNewStorageFor(Heap* heap,
- Representation representation);
+ Handle<HeapType> OptimalType(Isolate* isolate, Representation representation);
+
+ inline static Handle<Object> NewStorageFor(Isolate* isolate,
+ Handle<Object> object,
+ Representation representation);
// Returns true if the object is of the correct type to be used as a
// implementation of a JSObject's elements.
@@ -1440,72 +1441,69 @@ class Object : public MaybeObject {
inline bool HasSpecificClassOf(String* name);
- MUST_USE_RESULT MaybeObject* ToObject(Isolate* isolate); // ECMA-262 9.9.
bool BooleanValue(); // ECMA-262 9.2.
// Convert to a JSObject if needed.
// native_context is used when creating wrapper object.
- MUST_USE_RESULT MaybeObject* ToObject(Context* native_context);
+ static inline MaybeHandle<JSReceiver> ToObject(Isolate* isolate,
+ Handle<Object> object);
+ static MaybeHandle<JSReceiver> ToObject(Isolate* isolate,
+ Handle<Object> object,
+ Handle<Context> context);
// Converts this to a Smi if possible.
- // Failure is returned otherwise.
- MUST_USE_RESULT inline MaybeObject* ToSmi();
-
- void Lookup(Name* name, LookupResult* result);
-
- // Property access.
- MUST_USE_RESULT inline MaybeObject* GetProperty(Name* key);
- MUST_USE_RESULT inline MaybeObject* GetProperty(
- Name* key,
- PropertyAttributes* attributes);
-
- // TODO(yangguo): this should eventually replace the non-handlified version.
- static Handle<Object> GetPropertyWithReceiver(Handle<Object> object,
- Handle<Object> receiver,
- Handle<Name> name,
- PropertyAttributes* attributes);
- MUST_USE_RESULT MaybeObject* GetPropertyWithReceiver(
- Object* receiver,
- Name* key,
- PropertyAttributes* attributes);
-
- static Handle<Object> GetProperty(Handle<Object> object,
- Handle<Name> key);
- static Handle<Object> GetProperty(Handle<Object> object,
- Handle<Object> receiver,
- LookupResult* result,
- Handle<Name> key,
- PropertyAttributes* attributes);
+ static MUST_USE_RESULT inline MaybeHandle<Smi> ToSmi(Isolate* isolate,
+ Handle<Object> object);
+
+ void Lookup(Handle<Name> name, LookupResult* result);
- MUST_USE_RESULT static MaybeObject* GetPropertyOrFail(
+ MUST_USE_RESULT static MaybeHandle<Object> GetProperty(LookupIterator* it);
+ MUST_USE_RESULT static inline MaybeHandle<Object> GetPropertyOrElement(
Handle<Object> object,
+ Handle<Name> key);
+ MUST_USE_RESULT static inline MaybeHandle<Object> GetProperty(
+ Isolate* isolate,
+ Handle<Object> object,
+ const char* key);
+ MUST_USE_RESULT static inline MaybeHandle<Object> GetProperty(
+ Handle<Object> object,
+ Handle<Name> key);
+
+ MUST_USE_RESULT static MaybeHandle<Object> GetPropertyWithAccessor(
Handle<Object> receiver,
- LookupResult* result,
- Handle<Name> key,
- PropertyAttributes* attributes);
-
- MUST_USE_RESULT MaybeObject* GetProperty(Object* receiver,
- LookupResult* result,
- Name* key,
- PropertyAttributes* attributes);
-
- MUST_USE_RESULT MaybeObject* GetPropertyWithDefinedGetter(Object* receiver,
- JSReceiver* getter);
-
- static Handle<Object> GetElement(Isolate* isolate,
- Handle<Object> object,
- uint32_t index);
- MUST_USE_RESULT inline MaybeObject* GetElement(Isolate* isolate,
- uint32_t index);
- // For use when we know that no exception can be thrown.
- inline Object* GetElementNoExceptionThrown(Isolate* isolate, uint32_t index);
- MUST_USE_RESULT MaybeObject* GetElementWithReceiver(Isolate* isolate,
- Object* receiver,
- uint32_t index);
+ Handle<Name> name,
+ Handle<JSObject> holder,
+ Handle<Object> structure);
+ MUST_USE_RESULT static MaybeHandle<Object> SetPropertyWithCallback(
+ Handle<Object> receiver,
+ Handle<Name> name,
+ Handle<Object> value,
+ Handle<JSObject> holder,
+ Handle<Object> structure,
+ StrictMode strict_mode);
+
+ MUST_USE_RESULT static MaybeHandle<Object> GetPropertyWithDefinedGetter(
+ Handle<Object> receiver,
+ Handle<JSReceiver> getter);
+ MUST_USE_RESULT static MaybeHandle<Object> SetPropertyWithDefinedSetter(
+ Handle<Object> receiver,
+ Handle<JSReceiver> setter,
+ Handle<Object> value);
+
+ MUST_USE_RESULT static inline MaybeHandle<Object> GetElement(
+ Isolate* isolate,
+ Handle<Object> object,
+ uint32_t index);
+
+ MUST_USE_RESULT static MaybeHandle<Object> GetElementWithReceiver(
+ Isolate* isolate,
+ Handle<Object> object,
+ Handle<Object> receiver,
+ uint32_t index);
// Return the object's prototype (might be Heap::null_value()).
Object* GetPrototype(Isolate* isolate);
- Map* GetMarkerMap(Isolate* isolate);
+ static Handle<Object> GetPrototype(Isolate* isolate, Handle<Object> object);
// Returns the permanent hash code associated with this object. May return
// undefined if not yet created.
@@ -1514,16 +1512,19 @@ class Object : public MaybeObject {
// Returns the permanent hash code associated with this object depending on
// the actual object type. May create and store a hash code if needed and none
// exists.
- // TODO(rafaelw): Remove isolate parameter when objects.cc is fully
- // handlified.
- static Handle<Object> GetOrCreateHash(Handle<Object> object,
- Isolate* isolate);
+ static Handle<Smi> GetOrCreateHash(Isolate* isolate, Handle<Object> object);
// Checks whether this object has the same value as the given one. This
// function is implemented according to ES5, section 9.12 and can be used
// to implement the Harmony "egal" function.
bool SameValue(Object* other);
+ // Checks whether this object has the same value as the given one.
+ // +0 and -0 are treated equal. Everything else is the same as SameValue.
+ // This function is implemented according to ES6, section 7.2.4 and is used
+ // by ES6 Map and Set.
+ bool SameValueZero(Object* other);
+
// Tries to convert an object to an array index. Returns true and sets
// the output parameter if it succeeds.
inline bool ToArrayIndex(uint32_t* index);
@@ -1532,6 +1533,7 @@ class Object : public MaybeObject {
// < the length of the string. Used to implement [] on strings.
inline bool IsStringObjectWithCharacterAt(uint32_t index);
+ DECLARE_VERIFIER(Object)
#ifdef VERIFY_HEAP
// Verify a pointer is a valid object pointer.
static void VerifyPointer(Object* p);
@@ -1551,6 +1553,14 @@ class Object : public MaybeObject {
// Layout description.
static const int kHeaderSize = 0; // Object does not take up any space.
+#ifdef OBJECT_PRINT
+ // Prints this object with details.
+ void Print();
+ void Print(FILE* out);
+ void PrintLn();
+ void PrintLn(FILE* out);
+#endif
+
private:
DISALLOW_IMPLICIT_CONSTRUCTORS(Object);
};
@@ -1593,76 +1603,6 @@ class Smi: public Object {
};
-// Failure is used for reporting out of memory situations and
-// propagating exceptions through the runtime system. Failure objects
-// are transient and cannot occur as part of the object graph.
-//
-// Failures are a single word, encoded as follows:
-// +-------------------------+---+--+--+
-// |.........unused..........|sss|tt|11|
-// +-------------------------+---+--+--+
-// 7 6 4 32 10
-//
-//
-// The low two bits, 0-1, are the failure tag, 11. The next two bits,
-// 2-3, are a failure type tag 'tt' with possible values:
-// 00 RETRY_AFTER_GC
-// 01 EXCEPTION
-// 10 INTERNAL_ERROR
-// 11 OUT_OF_MEMORY_EXCEPTION
-//
-// The next three bits, 4-6, are an allocation space tag 'sss'. The
-// allocation space tag is 000 for all failure types except
-// RETRY_AFTER_GC. For RETRY_AFTER_GC, the possible values are the
-// allocation spaces (the encoding is found in globals.h).
-
-// Failure type tag info.
-const int kFailureTypeTagSize = 2;
-const int kFailureTypeTagMask = (1 << kFailureTypeTagSize) - 1;
-
-class Failure: public MaybeObject {
- public:
- // RuntimeStubs assumes EXCEPTION = 1 in the compiler-generated code.
- enum Type {
- RETRY_AFTER_GC = 0,
- EXCEPTION = 1, // Returning this marker tells the real exception
- // is in Isolate::pending_exception.
- INTERNAL_ERROR = 2,
- OUT_OF_MEMORY_EXCEPTION = 3
- };
-
- inline Type type() const;
-
- // Returns the space that needs to be collected for RetryAfterGC failures.
- inline AllocationSpace allocation_space() const;
-
- inline bool IsInternalError() const;
- inline bool IsOutOfMemoryException() const;
-
- static inline Failure* RetryAfterGC(AllocationSpace space);
- static inline Failure* RetryAfterGC(); // NEW_SPACE
- static inline Failure* Exception();
- static inline Failure* InternalError();
- // TODO(jkummerow): The value is temporary instrumentation. Remove it
- // when it has served its purpose.
- static inline Failure* OutOfMemoryException(intptr_t value);
- // Casting.
- static inline Failure* cast(MaybeObject* object);
-
- // Dispatched behavior.
- void FailurePrint(FILE* out = stdout);
- void FailurePrint(StringStream* accumulator);
-
- DECLARE_VERIFIER(Failure)
-
- private:
- inline intptr_t value() const;
- static inline Failure* Construct(Type type, intptr_t value = 0);
-
- DISALLOW_IMPLICIT_CONSTRUCTORS(Failure);
-};
-
-
// Heap objects typically have a map pointer in their first word. However,
// during GC other data (e.g. mark bits, forwarding addresses) is sometimes
// encoded in the first word. The class MapWord is an abstraction of the
@@ -1723,6 +1663,15 @@ class HeapObject: public Object {
// of primitive (non-JS) objects like strings, heap numbers etc.
inline void set_map_no_write_barrier(Map* value);
+ // Get the map using acquire load.
+ inline Map* synchronized_map();
+ inline MapWord synchronized_map_word();
+
+ // Set the map using release store
+ inline void synchronized_set_map(Map* value);
+ inline void synchronized_set_map_no_write_barrier(Map* value);
+ inline void synchronized_set_map_word(MapWord map_word);
+
// During garbage collection, the map word of a heap object does not
// necessarily contain a map pointer.
inline MapWord map_word();
@@ -1802,7 +1751,7 @@ class HeapObject: public Object {
static const int kMapOffset = Object::kHeaderSize;
static const int kHeaderSize = kMapOffset + kPointerSize;
- STATIC_CHECK(kMapOffset == Internals::kHeapObjectMapOffset);
+ STATIC_ASSERT(kMapOffset == Internals::kHeapObjectMapOffset);
protected:
// helpers for calling an ObjectVisitor to iterate over pointers in the
@@ -1810,6 +1759,8 @@ class HeapObject: public Object {
inline void IteratePointers(ObjectVisitor* v, int start, int end);
// as above, for the single element at "offset"
inline void IteratePointer(ObjectVisitor* v, int offset);
+ // as above, for the next code link of a code object.
+ inline void IterateNextCodeLink(ObjectVisitor* v, int offset);
private:
DISALLOW_IMPLICIT_CONSTRUCTORS(HeapObject);
@@ -1880,11 +1831,18 @@ class HeapNumber: public HeapObject {
// Layout description.
static const int kValueOffset = HeapObject::kHeaderSize;
// IEEE doubles are two 32 bit words. The first is just mantissa, the second
- // is a mixture of sign, exponent and mantissa. Our current platforms are all
- // little endian apart from non-EABI arm which is little endian with big
- // endian floating point word ordering!
+ // is a mixture of sign, exponent and mantissa. The offsets of two 32 bit
+ // words within double numbers are endian dependent and they are set
+ // accordingly.
+#if defined(V8_TARGET_LITTLE_ENDIAN)
static const int kMantissaOffset = kValueOffset;
static const int kExponentOffset = kValueOffset + 4;
+#elif defined(V8_TARGET_BIG_ENDIAN)
+ static const int kMantissaOffset = kValueOffset + 4;
+ static const int kExponentOffset = kValueOffset;
+#else
+#error Unknown byte ordering
+#endif
static const int kSize = kValueOffset + kDoubleSize;
static const uint32_t kSignMask = 0x80000000u;
@@ -1956,32 +1914,35 @@ class JSReceiver: public HeapObject {
static inline JSReceiver* cast(Object* obj);
// Implementation of [[Put]], ECMA-262 5th edition, section 8.12.5.
- static Handle<Object> SetProperty(Handle<JSReceiver> object,
- Handle<Name> key,
- Handle<Object> value,
- PropertyAttributes attributes,
- StrictModeFlag strict_mode,
- StoreFromKeyed store_mode =
- MAY_BE_STORE_FROM_KEYED);
- static Handle<Object> SetElement(Handle<JSReceiver> object,
- uint32_t index,
- Handle<Object> value,
- PropertyAttributes attributes,
- StrictModeFlag strict_mode);
+ MUST_USE_RESULT static MaybeHandle<Object> SetProperty(
+ Handle<JSReceiver> object,
+ Handle<Name> key,
+ Handle<Object> value,
+ PropertyAttributes attributes,
+ StrictMode strict_mode,
+ StoreFromKeyed store_mode = MAY_BE_STORE_FROM_KEYED);
+ MUST_USE_RESULT static MaybeHandle<Object> SetElement(
+ Handle<JSReceiver> object,
+ uint32_t index,
+ Handle<Object> value,
+ PropertyAttributes attributes,
+ StrictMode strict_mode);
// Implementation of [[HasProperty]], ECMA-262 5th edition, section 8.12.6.
static inline bool HasProperty(Handle<JSReceiver> object, Handle<Name> name);
- static inline bool HasLocalProperty(Handle<JSReceiver>, Handle<Name> name);
+ static inline bool HasOwnProperty(Handle<JSReceiver>, Handle<Name> name);
static inline bool HasElement(Handle<JSReceiver> object, uint32_t index);
- static inline bool HasLocalElement(Handle<JSReceiver> object, uint32_t index);
+ static inline bool HasOwnElement(Handle<JSReceiver> object, uint32_t index);
// Implementation of [[Delete]], ECMA-262 5th edition, section 8.12.7.
- static Handle<Object> DeleteProperty(Handle<JSReceiver> object,
- Handle<Name> name,
- DeleteMode mode = NORMAL_DELETION);
- static Handle<Object> DeleteElement(Handle<JSReceiver> object,
- uint32_t index,
- DeleteMode mode = NORMAL_DELETION);
+ MUST_USE_RESULT static MaybeHandle<Object> DeleteProperty(
+ Handle<JSReceiver> object,
+ Handle<Name> name,
+ DeleteMode mode = NORMAL_DELETION);
+ MUST_USE_RESULT static MaybeHandle<Object> DeleteElement(
+ Handle<JSReceiver> object,
+ uint32_t index,
+ DeleteMode mode = NORMAL_DELETION);
// Tests for the fast common case for property enumeration.
bool IsSimpleEnum();
@@ -1993,13 +1954,20 @@ class JSReceiver: public HeapObject {
// function that was used to instantiate the object).
String* constructor_name();
- inline PropertyAttributes GetPropertyAttribute(Name* name);
- PropertyAttributes GetPropertyAttributeWithReceiver(JSReceiver* receiver,
- Name* name);
- PropertyAttributes GetLocalPropertyAttribute(Name* name);
+ static inline PropertyAttributes GetPropertyAttributes(
+ Handle<JSReceiver> object,
+ Handle<Name> name);
+ static PropertyAttributes GetPropertyAttributes(LookupIterator* it);
+ static PropertyAttributes GetOwnPropertyAttributes(
+ Handle<JSReceiver> object,
+ Handle<Name> name);
- inline PropertyAttributes GetElementAttribute(uint32_t index);
- inline PropertyAttributes GetLocalElementAttribute(uint32_t index);
+ static inline PropertyAttributes GetElementAttribute(
+ Handle<JSReceiver> object,
+ uint32_t index);
+ static inline PropertyAttributes GetOwnElementAttribute(
+ Handle<JSReceiver> object,
+ uint32_t index);
// Return the object's prototype (might be Heap::null_value()).
inline Object* GetPrototype();
@@ -2013,35 +1981,32 @@ class JSReceiver: public HeapObject {
// Retrieves a permanent object identity hash code. May create and store a
// hash code if needed and none exists.
- inline static Handle<Object> GetOrCreateIdentityHash(
+ inline static Handle<Smi> GetOrCreateIdentityHash(
Handle<JSReceiver> object);
// Lookup a property. If found, the result is valid and has
// detailed information.
- void LocalLookup(Name* name, LookupResult* result,
- bool search_hidden_prototypes = false);
- void Lookup(Name* name, LookupResult* result);
+ void LookupOwn(Handle<Name> name, LookupResult* result,
+ bool search_hidden_prototypes = false);
+ void Lookup(Handle<Name> name, LookupResult* result);
- protected:
- Smi* GenerateIdentityHash();
+ enum KeyCollectionType { OWN_ONLY, INCLUDE_PROTOS };
- static Handle<Object> SetPropertyWithDefinedSetter(Handle<JSReceiver> object,
- Handle<JSReceiver> setter,
- Handle<Object> value);
+ // Computes the enumerable keys for a JSObject. Used for implementing
+ // "for (n in object) { }".
+ MUST_USE_RESULT static MaybeHandle<FixedArray> GetKeys(
+ Handle<JSReceiver> object,
+ KeyCollectionType type);
private:
- PropertyAttributes GetPropertyAttributeForResult(JSReceiver* receiver,
- LookupResult* result,
- Name* name,
- bool continue_search);
-
- static Handle<Object> SetProperty(Handle<JSReceiver> receiver,
- LookupResult* result,
- Handle<Name> key,
- Handle<Object> value,
- PropertyAttributes attributes,
- StrictModeFlag strict_mode,
- StoreFromKeyed store_from_keyed);
+ MUST_USE_RESULT static MaybeHandle<Object> SetProperty(
+ Handle<JSReceiver> receiver,
+ LookupResult* result,
+ Handle<Name> key,
+ Handle<Object> value,
+ PropertyAttributes attributes,
+ StrictMode strict_mode,
+ StoreFromKeyed store_from_keyed);
DISALLOW_IMPLICIT_CONSTRUCTORS(JSReceiver);
};
@@ -2049,6 +2014,10 @@ class JSReceiver: public HeapObject {
// Forward declaration for JSObject::GetOrCreateHiddenPropertiesHashTable.
class ObjectHashTable;
+// Forward declaration for JSObject::Copy.
+class AllocationSite;
+
+
// The JSObject describes real heap allocated JavaScript objects with
// properties.
// Note that the map of JSObject changes during execution to enable inline
@@ -2072,18 +2041,21 @@ class JSObject: public JSReceiver {
// In the fast mode elements is a FixedArray and so each element can
// be quickly accessed. This fact is used in the generated code. The
// elements array can have one of three maps in this mode:
- // fixed_array_map, non_strict_arguments_elements_map or
+ // fixed_array_map, sloppy_arguments_elements_map or
// fixed_cow_array_map (for copy-on-write arrays). In the latter case
// the elements array may be shared by a few objects and so before
// writing to any element the array must be copied. Use
// EnsureWritableFastElements in this case.
//
// In the slow mode the elements is either a NumberDictionary, an
- // ExternalArray, or a FixedArray parameter map for a (non-strict)
+ // ExternalArray, or a FixedArray parameter map for a (sloppy)
// arguments object.
DECL_ACCESSORS(elements, FixedArrayBase)
inline void initialize_elements();
- MUST_USE_RESULT inline MaybeObject* ResetElements();
+ static void ResetElements(Handle<JSObject> object);
+ static inline void SetMapAndElements(Handle<JSObject> object,
+ Handle<Map> map,
+ Handle<FixedArrayBase> elements);
inline ElementsKind GetElementsKind();
inline ElementsAccessor* GetElementsAccessor();
// Returns true if an object has elements of FAST_SMI_ELEMENTS ElementsKind.
@@ -2101,33 +2073,40 @@ class JSObject: public JSReceiver {
// Returns true if an object has elements of FAST_HOLEY_*_ELEMENTS
// ElementsKind.
inline bool HasFastHoleyElements();
- inline bool HasNonStrictArgumentsElements();
+ inline bool HasSloppyArgumentsElements();
inline bool HasDictionaryElements();
- inline bool HasExternalPixelElements();
+
+ inline bool HasExternalUint8ClampedElements();
inline bool HasExternalArrayElements();
- inline bool HasExternalByteElements();
- inline bool HasExternalUnsignedByteElements();
- inline bool HasExternalShortElements();
- inline bool HasExternalUnsignedShortElements();
- inline bool HasExternalIntElements();
- inline bool HasExternalUnsignedIntElements();
- inline bool HasExternalFloatElements();
- inline bool HasExternalDoubleElements();
+ inline bool HasExternalInt8Elements();
+ inline bool HasExternalUint8Elements();
+ inline bool HasExternalInt16Elements();
+ inline bool HasExternalUint16Elements();
+ inline bool HasExternalInt32Elements();
+ inline bool HasExternalUint32Elements();
+ inline bool HasExternalFloat32Elements();
+ inline bool HasExternalFloat64Elements();
+
+ inline bool HasFixedTypedArrayElements();
+
+ inline bool HasFixedUint8ClampedElements();
+ inline bool HasFixedArrayElements();
+ inline bool HasFixedInt8Elements();
+ inline bool HasFixedUint8Elements();
+ inline bool HasFixedInt16Elements();
+ inline bool HasFixedUint16Elements();
+ inline bool HasFixedInt32Elements();
+ inline bool HasFixedUint32Elements();
+ inline bool HasFixedFloat32Elements();
+ inline bool HasFixedFloat64Elements();
+
bool HasFastArgumentsElements();
bool HasDictionaryArgumentsElements();
inline SeededNumberDictionary* element_dictionary(); // Gets slow elements.
- inline bool ShouldTrackAllocationInfo();
-
- inline void set_map_and_elements(
- Map* map,
- FixedArrayBase* value,
- WriteBarrierMode mode = UPDATE_WRITE_BARRIER);
-
// Requires: HasFastElements().
static Handle<FixedArray> EnsureWritableFastElements(
Handle<JSObject> object);
- MUST_USE_RESULT inline MaybeObject* EnsureWritableFastElements();
// Collects elements starting at index 0.
// Undefined values are placed after non-undefined values.
@@ -2135,48 +2114,44 @@ class JSObject: public JSReceiver {
static Handle<Object> PrepareElementsForSort(Handle<JSObject> object,
uint32_t limit);
// As PrepareElementsForSort, but only on objects where elements is
- // a dictionary, and it will stay a dictionary.
+ // a dictionary, and it will stay a dictionary. Collates undefined and
+ // unexisting elements below limit from position zero of the elements.
static Handle<Object> PrepareSlowElementsForSort(Handle<JSObject> object,
uint32_t limit);
- MUST_USE_RESULT MaybeObject* PrepareSlowElementsForSort(uint32_t limit);
-
- static Handle<Object> GetPropertyWithCallback(Handle<JSObject> object,
- Handle<Object> receiver,
- Handle<Object> structure,
- Handle<Name> name);
- static Handle<Object> SetPropertyWithCallback(
- Handle<JSObject> object,
- Handle<Object> structure,
- Handle<Name> name,
- Handle<Object> value,
- Handle<JSObject> holder,
- StrictModeFlag strict_mode);
-
- static Handle<Object> SetPropertyWithInterceptor(
+ MUST_USE_RESULT static MaybeHandle<Object> SetPropertyWithInterceptor(
Handle<JSObject> object,
Handle<Name> name,
Handle<Object> value,
PropertyAttributes attributes,
- StrictModeFlag strict_mode);
+ StrictMode strict_mode);
- static Handle<Object> SetPropertyForResult(
+ MUST_USE_RESULT static MaybeHandle<Object> SetPropertyForResult(
Handle<JSObject> object,
LookupResult* result,
Handle<Name> name,
Handle<Object> value,
PropertyAttributes attributes,
- StrictModeFlag strict_mode,
+ StrictMode strict_mode,
StoreFromKeyed store_mode = MAY_BE_STORE_FROM_KEYED);
- static Handle<Object> SetLocalPropertyIgnoreAttributes(
+ // SetLocalPropertyIgnoreAttributes converts callbacks to fields. We need to
+ // grant an exemption to ExecutableAccessor callbacks in some cases.
+ enum ExecutableAccessorInfoHandling {
+ DEFAULT_HANDLING,
+ DONT_FORCE_FIELD
+ };
+
+ MUST_USE_RESULT static MaybeHandle<Object> SetOwnPropertyIgnoreAttributes(
Handle<JSObject> object,
Handle<Name> key,
Handle<Object> value,
PropertyAttributes attributes,
ValueType value_type = OPTIMAL_REPRESENTATION,
StoreMode mode = ALLOW_AS_CONSTANT,
- ExtensibilityCheck extensibility_check = PERFORM_EXTENSIBILITY_CHECK);
+ ExtensibilityCheck extensibility_check = PERFORM_EXTENSIBILITY_CHECK,
+ StoreFromKeyed store_mode = MAY_BE_STORE_FROM_KEYED,
+ ExecutableAccessorInfoHandling handling = DEFAULT_HANDLING);
static inline Handle<String> ExpectedTransitionKey(Handle<Map> map);
static inline Handle<Map> ExpectedTransitionTarget(Handle<Map> map);
@@ -2195,17 +2170,19 @@ class JSObject: public JSReceiver {
static void MigrateInstance(Handle<JSObject> instance);
// Migrates the given object only if the target map is already available,
- // or returns an empty handle if such a map is not yet available.
- static Handle<Object> TryMigrateInstance(Handle<JSObject> instance);
+ // or returns false if such a map is not yet available.
+ static bool TryMigrateInstance(Handle<JSObject> instance);
// Retrieve a value in a normalized object given a lookup result.
// Handles the special representation of JS global objects.
- Object* GetNormalizedProperty(LookupResult* result);
+ Object* GetNormalizedProperty(const LookupResult* result);
+ static Handle<Object> GetNormalizedProperty(Handle<JSObject> object,
+ const LookupResult* result);
// Sets the property value in a normalized object given a lookup result.
// Handles the special representation of JS global objects.
static void SetNormalizedProperty(Handle<JSObject> object,
- LookupResult* result,
+ const LookupResult* result,
Handle<Object> value);
// Sets the property value in a normalized object given (key, value, details).
@@ -2222,26 +2199,24 @@ class JSObject: public JSReceiver {
InterceptorInfo* GetIndexedInterceptor();
// Used from JSReceiver.
- PropertyAttributes GetPropertyAttributePostInterceptor(JSObject* receiver,
- Name* name,
- bool continue_search);
- PropertyAttributes GetPropertyAttributeWithInterceptor(JSObject* receiver,
- Name* name,
- bool continue_search);
- PropertyAttributes GetPropertyAttributeWithFailedAccessCheck(
- Object* receiver,
- LookupResult* result,
- Name* name,
- bool continue_search);
- PropertyAttributes GetElementAttributeWithReceiver(JSReceiver* receiver,
- uint32_t index,
- bool continue_search);
+ static Maybe<PropertyAttributes> GetPropertyAttributesWithInterceptor(
+ Handle<JSObject> holder,
+ Handle<Object> receiver,
+ Handle<Name> name);
+ static PropertyAttributes GetPropertyAttributesWithFailedAccessCheck(
+ LookupIterator* it);
+ static PropertyAttributes GetElementAttributeWithReceiver(
+ Handle<JSObject> object,
+ Handle<JSReceiver> receiver,
+ uint32_t index,
+ bool check_prototype);
// Retrieves an AccessorPair property from the given object. Might return
// undefined if the property doesn't exist or is of a different kind.
- static Handle<Object> GetAccessor(Handle<JSObject> object,
- Handle<Name> name,
- AccessorComponent component);
+ MUST_USE_RESULT static MaybeHandle<Object> GetAccessor(
+ Handle<JSObject> object,
+ Handle<Name> name,
+ AccessorComponent component);
// Defines an AccessorPair property on the given object.
// TODO(mstarzinger): Rename to SetAccessor() and return empty handle on
@@ -2254,36 +2229,23 @@ class JSObject: public JSReceiver {
v8::AccessControl access_control = v8::DEFAULT);
// Defines an AccessorInfo property on the given object.
- static Handle<Object> SetAccessor(Handle<JSObject> object,
- Handle<AccessorInfo> info);
-
- static Handle<Object> GetPropertyWithInterceptor(
+ MUST_USE_RESULT static MaybeHandle<Object> SetAccessor(
Handle<JSObject> object,
- Handle<Object> receiver,
- Handle<Name> name,
- PropertyAttributes* attributes);
- static Handle<Object> GetPropertyPostInterceptor(
+ Handle<AccessorInfo> info);
+
+ MUST_USE_RESULT static MaybeHandle<Object> GetPropertyWithInterceptor(
Handle<JSObject> object,
Handle<Object> receiver,
- Handle<Name> name,
- PropertyAttributes* attributes);
- MUST_USE_RESULT MaybeObject* GetLocalPropertyPostInterceptor(
- Object* receiver,
- Name* name,
- PropertyAttributes* attributes);
+ Handle<Name> name);
// Returns true if this is an instance of an api function and has
// been modified since it was created. May give false positives.
bool IsDirty();
- // If the receiver is a JSGlobalProxy this method will return its prototype,
- // otherwise the result is the receiver itself.
- inline Object* BypassGlobalProxy();
-
// Accessors for hidden properties object.
//
- // Hidden properties are not local properties of the object itself.
- // Instead they are stored in an auxiliary structure kept as a local
+ // Hidden properties are not own properties of the object itself.
+ // Instead they are stored in an auxiliary structure kept as an own
// property with a special name Heap::hidden_string(). But if the
// receiver is a JSGlobalProxy then the auxiliary object is a property
// of its prototype, and if it's a detached proxy, then you can't have
@@ -2297,36 +2259,42 @@ class JSObject: public JSReceiver {
// Gets the value of a hidden property with the given key. Returns the hole
// if the property doesn't exist (or if called on a detached proxy),
// otherwise returns the value set for the key.
- Object* GetHiddenProperty(Name* key);
+ Object* GetHiddenProperty(Handle<Name> key);
// Deletes a hidden property. Deleting a non-existing property is
// considered successful.
static void DeleteHiddenProperty(Handle<JSObject> object,
Handle<Name> key);
// Returns true if the object has a property with the hidden string as name.
- bool HasHiddenProperties();
+ static bool HasHiddenProperties(Handle<JSObject> object);
static void SetIdentityHash(Handle<JSObject> object, Handle<Smi> hash);
- inline void ValidateElements();
+ static inline void ValidateElements(Handle<JSObject> object);
// Makes sure that this object can contain HeapObject as elements.
static inline void EnsureCanContainHeapObjectElements(Handle<JSObject> obj);
// Makes sure that this object can contain the specified elements.
- MUST_USE_RESULT inline MaybeObject* EnsureCanContainElements(
+ static inline void EnsureCanContainElements(
+ Handle<JSObject> object,
Object** elements,
uint32_t count,
EnsureElementsMode mode);
- MUST_USE_RESULT inline MaybeObject* EnsureCanContainElements(
- FixedArrayBase* elements,
+ static inline void EnsureCanContainElements(
+ Handle<JSObject> object,
+ Handle<FixedArrayBase> elements,
uint32_t length,
EnsureElementsMode mode);
- MUST_USE_RESULT MaybeObject* EnsureCanContainElements(
+ static void EnsureCanContainElements(
+ Handle<JSObject> object,
Arguments* arguments,
uint32_t first_arg,
uint32_t arg_count,
EnsureElementsMode mode);
+ // Would we convert a fast elements array to dictionary mode given
+ // an access at key?
+ bool WouldConvertToSlowElements(Handle<Object> key);
// Do we want to keep the elements in fast case when increasing the
// capacity?
bool ShouldConvertToSlowElements(int new_capacity);
@@ -2347,33 +2315,42 @@ class JSObject: public JSReceiver {
}
// These methods do not perform access checks!
- AccessorPair* GetLocalPropertyAccessorPair(Name* name);
- AccessorPair* GetLocalElementAccessorPair(uint32_t index);
+ MUST_USE_RESULT static MaybeHandle<AccessorPair> GetOwnPropertyAccessorPair(
+ Handle<JSObject> object,
+ Handle<Name> name);
+ MUST_USE_RESULT static MaybeHandle<AccessorPair> GetOwnElementAccessorPair(
+ Handle<JSObject> object,
+ uint32_t index);
- static Handle<Object> SetFastElement(Handle<JSObject> object, uint32_t index,
- Handle<Object> value,
- StrictModeFlag strict_mode,
- bool check_prototype);
+ MUST_USE_RESULT static MaybeHandle<Object> SetFastElement(
+ Handle<JSObject> object,
+ uint32_t index,
+ Handle<Object> value,
+ StrictMode strict_mode,
+ bool check_prototype);
- static Handle<Object> SetOwnElement(Handle<JSObject> object,
- uint32_t index,
- Handle<Object> value,
- StrictModeFlag strict_mode);
+ MUST_USE_RESULT static MaybeHandle<Object> SetOwnElement(
+ Handle<JSObject> object,
+ uint32_t index,
+ Handle<Object> value,
+ StrictMode strict_mode);
// Empty handle is returned if the element cannot be set to the given value.
- static Handle<Object> SetElement(
+ MUST_USE_RESULT static MaybeHandle<Object> SetElement(
Handle<JSObject> object,
uint32_t index,
Handle<Object> value,
PropertyAttributes attributes,
- StrictModeFlag strict_mode,
+ StrictMode strict_mode,
bool check_prototype = true,
SetPropertyMode set_mode = SET_PROPERTY);
// Returns the index'th element.
// The undefined object if index is out of bounds.
- MUST_USE_RESULT MaybeObject* GetElementWithInterceptor(Object* receiver,
- uint32_t index);
+ MUST_USE_RESULT static MaybeHandle<Object> GetElementWithInterceptor(
+ Handle<JSObject> object,
+ Handle<Object> receiver,
+ uint32_t index);
enum SetFastElementsCapacitySmiMode {
kAllowSmiElements,
@@ -2381,15 +2358,11 @@ class JSObject: public JSReceiver {
kDontAllowSmiElements
};
- static Handle<FixedArray> SetFastElementsCapacityAndLength(
- Handle<JSObject> object,
- int capacity,
- int length,
- SetFastElementsCapacitySmiMode smi_mode);
// Replace the elements' backing store with fast elements of the given
// capacity. Update the length for JSArrays. Returns the new backing
// store.
- MUST_USE_RESULT MaybeObject* SetFastElementsCapacityAndLength(
+ static Handle<FixedArray> SetFastElementsCapacityAndLength(
+ Handle<JSObject> object,
int capacity,
int length,
SetFastElementsCapacitySmiMode smi_mode);
@@ -2397,15 +2370,21 @@ class JSObject: public JSReceiver {
Handle<JSObject> object,
int capacity,
int length);
- MUST_USE_RESULT MaybeObject* SetFastDoubleElementsCapacityAndLength(
- int capacity,
- int length);
// Lookup interceptors are used for handling properties controlled by host
// objects.
inline bool HasNamedInterceptor();
inline bool HasIndexedInterceptor();
+ // Computes the enumerable keys from interceptors. Used for debug mirrors and
+ // by JSReceiver::GetKeys.
+ MUST_USE_RESULT static MaybeHandle<JSObject> GetKeysForNamedInterceptor(
+ Handle<JSObject> object,
+ Handle<JSReceiver> receiver);
+ MUST_USE_RESULT static MaybeHandle<JSObject> GetKeysForIndexedInterceptor(
+ Handle<JSObject> object,
+ Handle<JSReceiver> receiver);
+
// Support functions for v8 api (needed for correct interceptor behavior).
static bool HasRealNamedProperty(Handle<JSObject> object,
Handle<Name> key);
@@ -2424,27 +2403,27 @@ class JSObject: public JSReceiver {
inline void SetInternalField(int index, Smi* value);
// The following lookup functions skip interceptors.
- void LocalLookupRealNamedProperty(Name* name, LookupResult* result);
- void LookupRealNamedProperty(Name* name, LookupResult* result);
- void LookupRealNamedPropertyInPrototypes(Name* name, LookupResult* result);
- void LookupCallbackProperty(Name* name, LookupResult* result);
+ void LookupOwnRealNamedProperty(Handle<Name> name, LookupResult* result);
+ void LookupRealNamedProperty(Handle<Name> name, LookupResult* result);
+ void LookupRealNamedPropertyInPrototypes(Handle<Name> name,
+ LookupResult* result);
// Returns the number of properties on this object filtering out properties
// with the specified attributes (ignoring interceptors).
- int NumberOfLocalProperties(PropertyAttributes filter = NONE);
+ int NumberOfOwnProperties(PropertyAttributes filter = NONE);
// Fill in details for properties into storage starting at the specified
// index.
- void GetLocalPropertyNames(
+ void GetOwnPropertyNames(
FixedArray* storage, int index, PropertyAttributes filter = NONE);
// Returns the number of properties on this object filtering out properties
// with the specified attributes (ignoring interceptors).
- int NumberOfLocalElements(PropertyAttributes filter);
+ int NumberOfOwnElements(PropertyAttributes filter);
// Returns the number of enumerable elements (ignoring interceptors).
int NumberOfEnumElements();
// Returns the number of elements on this object filtering out elements
// with the specified attributes (ignoring interceptors).
- int GetLocalElementKeys(FixedArray* storage, PropertyAttributes filter);
+ int GetOwnElementKeys(FixedArray* storage, PropertyAttributes filter);
// Count and fill in the enumerable elements into storage.
// (storage->length() == NumberOfEnumElements()).
// If storage is NULL, will count the elements without adding
@@ -2456,22 +2435,15 @@ class JSObject: public JSReceiver {
// map and the ElementsKind set.
static Handle<Map> GetElementsTransitionMap(Handle<JSObject> object,
ElementsKind to_kind);
- inline MUST_USE_RESULT MaybeObject* GetElementsTransitionMap(
- Isolate* isolate,
- ElementsKind elements_kind);
- MUST_USE_RESULT MaybeObject* GetElementsTransitionMapSlow(
- ElementsKind elements_kind);
-
static void TransitionElementsKind(Handle<JSObject> object,
ElementsKind to_kind);
- MUST_USE_RESULT MaybeObject* TransitionElementsKind(ElementsKind to_kind);
-
- // TODO(mstarzinger): Both public because of ConvertAnsSetLocalProperty().
+ // TODO(mstarzinger): Both public because of ConvertAndSetOwnProperty().
static void MigrateToMap(Handle<JSObject> object, Handle<Map> new_map);
static void GeneralizeFieldRepresentation(Handle<JSObject> object,
int modify_index,
Representation new_representation,
+ Handle<HeapType> new_field_type,
StoreMode store_mode);
// Convert the object to use the canonical dictionary
@@ -2487,18 +2459,17 @@ class JSObject: public JSReceiver {
static Handle<SeededNumberDictionary> NormalizeElements(
Handle<JSObject> object);
- MUST_USE_RESULT MaybeObject* NormalizeElements();
-
// Transform slow named properties to fast variants.
static void TransformToFastProperties(Handle<JSObject> object,
int unused_property_fields);
// Access fast-case object properties at index.
- MUST_USE_RESULT inline MaybeObject* FastPropertyAt(
- Representation representation,
- int index);
- inline Object* RawFastPropertyAt(int index);
- inline void FastPropertyAtPut(int index, Object* value);
+ static Handle<Object> FastPropertyAt(Handle<JSObject> object,
+ Representation representation,
+ FieldIndex index);
+ inline Object* RawFastPropertyAt(FieldIndex index);
+ inline void FastPropertyAtPut(FieldIndex index, Object* value);
+ void WriteToField(int descriptor, Object* value);
// Access to in object properties.
inline int GetInObjectPropertyOffset(int index);
@@ -2509,9 +2480,10 @@ class JSObject: public JSReceiver {
= UPDATE_WRITE_BARRIER);
// Set the object's prototype (only JSReceiver and null are allowed values).
- static Handle<Object> SetPrototype(Handle<JSObject> object,
- Handle<Object> value,
- bool skip_hidden_prototypes = false);
+ MUST_USE_RESULT static MaybeHandle<Object> SetPrototype(
+ Handle<JSObject> object,
+ Handle<Object> value,
+ bool skip_hidden_prototypes = false);
// Initializes the body after properties slot, properties slot is
// initialized by set_properties. Fill the pre-allocated fields with
@@ -2526,10 +2498,11 @@ class JSObject: public JSReceiver {
bool ReferencesObject(Object* obj);
// Disalow further properties to be added to the object.
- static Handle<Object> PreventExtensions(Handle<JSObject> object);
+ MUST_USE_RESULT static MaybeHandle<Object> PreventExtensions(
+ Handle<JSObject> object);
// ES5 Object.freeze
- static Handle<Object> Freeze(Handle<JSObject> object);
+ MUST_USE_RESULT static MaybeHandle<Object> Freeze(Handle<JSObject> object);
// Called the first time an object is observed with ES7 Object.observe.
static void SetObserved(Handle<JSObject> object);
@@ -2541,11 +2514,16 @@ class JSObject: public JSReceiver {
};
static Handle<JSObject> Copy(Handle<JSObject> object);
- static Handle<JSObject> DeepCopy(Handle<JSObject> object,
- AllocationSiteUsageContext* site_context,
- DeepCopyHints hints = kNoHints);
- static Handle<JSObject> DeepWalk(Handle<JSObject> object,
- AllocationSiteCreationContext* site_context);
+ MUST_USE_RESULT static MaybeHandle<JSObject> DeepCopy(
+ Handle<JSObject> object,
+ AllocationSiteUsageContext* site_context,
+ DeepCopyHints hints = kNoHints);
+ MUST_USE_RESULT static MaybeHandle<JSObject> DeepWalk(
+ Handle<JSObject> object,
+ AllocationSiteCreationContext* site_context);
+
+ static Handle<Object> GetDataProperty(Handle<JSObject> object,
+ Handle<Name> key);
// Casting.
static inline JSObject* cast(Object* obj);
@@ -2560,9 +2538,10 @@ class JSObject: public JSReceiver {
void PrintTransitions(FILE* out = stdout);
#endif
- void PrintElementsTransition(
- FILE* file, ElementsKind from_kind, FixedArrayBase* from_elements,
- ElementsKind to_kind, FixedArrayBase* to_elements);
+ static void PrintElementsTransition(
+ FILE* file, Handle<JSObject> object,
+ ElementsKind from_kind, Handle<FixedArrayBase> from_elements,
+ ElementsKind to_kind, Handle<FixedArrayBase> to_elements);
void PrintInstanceMigration(FILE* file, Map* original_map, Map* new_map);
@@ -2622,12 +2601,16 @@ class JSObject: public JSReceiver {
// don't want to be wasteful with long lived objects.
static const int kMaxUncheckedOldFastElementsLength = 500;
- // Note that Heap::MaxRegularSpaceAllocationSize() puts a limit on
+ // Note that Page::kMaxRegularHeapObjectSize puts a limit on
// permissible values (see the ASSERT in heap.cc).
static const int kInitialMaxFastElementArray = 100000;
+ // This constant applies only to the initial map of "$Object" aka
+ // "global.Object" and not to arbitrary other JSObject maps.
+ static const int kInitialGlobalObjectUnusedPropertiesCount = 4;
+
static const int kFastPropertiesSoftLimit = 12;
- static const int kMaxFastProperties = 64;
+ static const int kMaxFastProperties = 128;
static const int kMaxInstanceSize = 255 * kPointerSize;
// When extending the backing storage for property values, we increase
// its size by more than the 1 entry necessary, so sequentially adding fields
@@ -2639,13 +2622,15 @@ class JSObject: public JSReceiver {
static const int kElementsOffset = kPropertiesOffset + kPointerSize;
static const int kHeaderSize = kElementsOffset + kPointerSize;
- STATIC_CHECK(kHeaderSize == Internals::kJSObjectHeaderSize);
+ STATIC_ASSERT(kHeaderSize == Internals::kJSObjectHeaderSize);
class BodyDescriptor : public FlexibleBodyDescriptor<kPropertiesOffset> {
public:
static inline int SizeOf(Map* map, HeapObject* object);
};
+ Context* GetCreationContext();
+
// Enqueue change record for Object.observe. May cause GC.
static void EnqueueChangeRecord(Handle<JSObject> object,
const char* type,
@@ -2659,128 +2644,117 @@ class JSObject: public JSReceiver {
static void UpdateAllocationSite(Handle<JSObject> object,
ElementsKind to_kind);
- MUST_USE_RESULT MaybeObject* UpdateAllocationSite(ElementsKind to_kind);
// Used from Object::GetProperty().
- static Handle<Object> GetPropertyWithFailedAccessCheck(
+ MUST_USE_RESULT static MaybeHandle<Object> GetPropertyWithFailedAccessCheck(
+ LookupIterator* it);
+
+ MUST_USE_RESULT static MaybeHandle<Object> GetElementWithCallback(
Handle<JSObject> object,
Handle<Object> receiver,
- LookupResult* result,
- Handle<Name> name,
- PropertyAttributes* attributes);
-
- MUST_USE_RESULT MaybeObject* GetElementWithCallback(Object* receiver,
- Object* structure,
- uint32_t index,
- Object* holder);
- MUST_USE_RESULT PropertyAttributes GetElementAttributeWithInterceptor(
- JSReceiver* receiver,
+ Handle<Object> structure,
+ uint32_t index,
+ Handle<Object> holder);
+
+ static PropertyAttributes GetElementAttributeWithInterceptor(
+ Handle<JSObject> object,
+ Handle<JSReceiver> receiver,
uint32_t index,
bool continue_search);
- MUST_USE_RESULT PropertyAttributes GetElementAttributeWithoutInterceptor(
- JSReceiver* receiver,
+ static PropertyAttributes GetElementAttributeWithoutInterceptor(
+ Handle<JSObject> object,
+ Handle<JSReceiver> receiver,
uint32_t index,
bool continue_search);
- static Handle<Object> SetElementWithCallback(
+ MUST_USE_RESULT static MaybeHandle<Object> SetElementWithCallback(
Handle<JSObject> object,
Handle<Object> structure,
uint32_t index,
Handle<Object> value,
Handle<JSObject> holder,
- StrictModeFlag strict_mode);
- static Handle<Object> SetElementWithInterceptor(
+ StrictMode strict_mode);
+ MUST_USE_RESULT static MaybeHandle<Object> SetElementWithInterceptor(
Handle<JSObject> object,
uint32_t index,
Handle<Object> value,
PropertyAttributes attributes,
- StrictModeFlag strict_mode,
+ StrictMode strict_mode,
bool check_prototype,
SetPropertyMode set_mode);
- static Handle<Object> SetElementWithoutInterceptor(
+ MUST_USE_RESULT static MaybeHandle<Object> SetElementWithoutInterceptor(
Handle<JSObject> object,
uint32_t index,
Handle<Object> value,
PropertyAttributes attributes,
- StrictModeFlag strict_mode,
+ StrictMode strict_mode,
bool check_prototype,
SetPropertyMode set_mode);
- static Handle<Object> SetElementWithCallbackSetterInPrototypes(
+ MUST_USE_RESULT
+ static MaybeHandle<Object> SetElementWithCallbackSetterInPrototypes(
Handle<JSObject> object,
uint32_t index,
Handle<Object> value,
bool* found,
- StrictModeFlag strict_mode);
- static Handle<Object> SetDictionaryElement(
+ StrictMode strict_mode);
+ MUST_USE_RESULT static MaybeHandle<Object> SetDictionaryElement(
Handle<JSObject> object,
uint32_t index,
Handle<Object> value,
PropertyAttributes attributes,
- StrictModeFlag strict_mode,
+ StrictMode strict_mode,
bool check_prototype,
SetPropertyMode set_mode = SET_PROPERTY);
- static Handle<Object> SetFastDoubleElement(
+ MUST_USE_RESULT static MaybeHandle<Object> SetFastDoubleElement(
Handle<JSObject> object,
uint32_t index,
Handle<Object> value,
- StrictModeFlag strict_mode,
+ StrictMode strict_mode,
bool check_prototype = true);
// Searches the prototype chain for property 'name'. If it is found and
// has a setter, invoke it and set '*done' to true. If it is found and is
// read-only, reject and set '*done' to true. Otherwise, set '*done' to
// false. Can throw and return an empty handle with '*done==true'.
- static Handle<Object> SetPropertyViaPrototypes(
+ MUST_USE_RESULT static MaybeHandle<Object> SetPropertyViaPrototypes(
Handle<JSObject> object,
Handle<Name> name,
Handle<Object> value,
PropertyAttributes attributes,
- StrictModeFlag strict_mode,
+ StrictMode strict_mode,
bool* done);
- static Handle<Object> SetPropertyPostInterceptor(
+ MUST_USE_RESULT static MaybeHandle<Object> SetPropertyPostInterceptor(
Handle<JSObject> object,
Handle<Name> name,
Handle<Object> value,
PropertyAttributes attributes,
- StrictModeFlag strict_mode);
- static Handle<Object> SetPropertyUsingTransition(
+ StrictMode strict_mode);
+ MUST_USE_RESULT static MaybeHandle<Object> SetPropertyUsingTransition(
Handle<JSObject> object,
LookupResult* lookup,
Handle<Name> name,
Handle<Object> value,
PropertyAttributes attributes);
- static Handle<Object> SetPropertyWithFailedAccessCheck(
+ MUST_USE_RESULT static MaybeHandle<Object> SetPropertyWithFailedAccessCheck(
Handle<JSObject> object,
LookupResult* result,
Handle<Name> name,
Handle<Object> value,
bool check_prototype,
- StrictModeFlag strict_mode);
+ StrictMode strict_mode);
// Add a property to an object.
- static Handle<Object> AddProperty(
+ MUST_USE_RESULT static MaybeHandle<Object> AddProperty(
Handle<JSObject> object,
Handle<Name> name,
Handle<Object> value,
PropertyAttributes attributes,
- StrictModeFlag strict_mode,
+ StrictMode strict_mode,
StoreFromKeyed store_mode = MAY_BE_STORE_FROM_KEYED,
ExtensibilityCheck extensibility_check = PERFORM_EXTENSIBILITY_CHECK,
ValueType value_type = OPTIMAL_REPRESENTATION,
StoreMode mode = ALLOW_AS_CONSTANT,
TransitionFlag flag = INSERT_TRANSITION);
- // Add a constant function property to a fast-case object.
- // This leaves a CONSTANT_TRANSITION in the old map, and
- // if it is called on a second object with this map, a
- // normal property is added instead, with a map transition.
- // This avoids the creation of many maps with the same constant
- // function, all orphaned.
- static void AddConstantProperty(Handle<JSObject> object,
- Handle<Name> name,
- Handle<Object> constant,
- PropertyAttributes attributes,
- TransitionFlag flag);
-
// Add a property to a fast-case object.
static void AddFastProperty(Handle<JSObject> object,
Handle<Name> name,
@@ -2790,14 +2764,9 @@ class JSObject: public JSReceiver {
ValueType value_type,
TransitionFlag flag);
- // Add a property to a fast-case object using a map transition to
- // new_map.
- static void AddFastPropertyUsingMap(Handle<JSObject> object,
- Handle<Map> new_map,
- Handle<Name> name,
- Handle<Object> value,
- int field_index,
- Representation representation);
+ static void MigrateToNewProperty(Handle<JSObject> object,
+ Handle<Map> transition,
+ Handle<Object> value);
// Add a property to a slow-case object.
static void AddSlowProperty(Handle<JSObject> object,
@@ -2805,25 +2774,29 @@ class JSObject: public JSReceiver {
Handle<Object> value,
PropertyAttributes attributes);
- static Handle<Object> DeleteProperty(Handle<JSObject> object,
- Handle<Name> name,
- DeleteMode mode);
+ MUST_USE_RESULT static MaybeHandle<Object> DeleteProperty(
+ Handle<JSObject> object,
+ Handle<Name> name,
+ DeleteMode mode);
static Handle<Object> DeletePropertyPostInterceptor(Handle<JSObject> object,
Handle<Name> name,
DeleteMode mode);
- static Handle<Object> DeletePropertyWithInterceptor(Handle<JSObject> object,
- Handle<Name> name);
+ MUST_USE_RESULT static MaybeHandle<Object> DeletePropertyWithInterceptor(
+ Handle<JSObject> object,
+ Handle<Name> name);
// Deletes the named property in a normalized object.
static Handle<Object> DeleteNormalizedProperty(Handle<JSObject> object,
Handle<Name> name,
DeleteMode mode);
- static Handle<Object> DeleteElement(Handle<JSObject> object,
- uint32_t index,
- DeleteMode mode);
- static Handle<Object> DeleteElementWithInterceptor(Handle<JSObject> object,
- uint32_t index);
+ MUST_USE_RESULT static MaybeHandle<Object> DeleteElement(
+ Handle<JSObject> object,
+ uint32_t index,
+ DeleteMode mode);
+ MUST_USE_RESULT static MaybeHandle<Object> DeleteElementWithInterceptor(
+ Handle<JSObject> object,
+ uint32_t index);
bool ReferencesObjectFromElements(FixedArray* elements,
ElementsKind kind,
@@ -2835,7 +2808,7 @@ class JSObject: public JSReceiver {
// Gets the current elements capacity and the number of used elements.
void GetElementsCapacityAndUsage(int* capacity, int* used);
- bool CanSetCallback(Name* name);
+ static bool CanSetCallback(Handle<JSObject> object, Handle<Name> name);
static void SetElementCallback(Handle<JSObject> object,
uint32_t index,
Handle<Object> structure,
@@ -2885,7 +2858,7 @@ class JSObject: public JSReceiver {
MUST_USE_RESULT Object* GetIdentityHash();
- static Handle<Object> GetOrCreateIdentityHash(Handle<JSObject> object);
+ static Handle<Smi> GetOrCreateIdentityHash(Handle<JSObject> object);
DISALLOW_IMPLICIT_CONSTRUCTORS(JSObject);
};
@@ -2899,6 +2872,10 @@ class FixedArrayBase: public HeapObject {
inline int length();
inline void set_length(int value);
+ // Get and set the length using acquire loads and release stores.
+ inline int synchronized_length();
+ inline void synchronized_set_length(int value);
+
inline static FixedArrayBase* cast(Object* object);
// Layout description.
@@ -2917,6 +2894,7 @@ class FixedArray: public FixedArrayBase {
public:
// Setter and getter for elements.
inline Object* get(int index);
+ static inline Handle<Object> get(Handle<FixedArray> array, int index);
// Setter that uses write barrier.
inline void set(int index, Object* value);
inline bool is_the_hole(int index);
@@ -2937,16 +2915,26 @@ class FixedArray: public FixedArrayBase {
// Gives access to raw memory which stores the array's data.
inline Object** data_start();
- // Copy operations.
- MUST_USE_RESULT inline MaybeObject* Copy();
- MUST_USE_RESULT MaybeObject* CopySize(int new_length,
- PretenureFlag pretenure = NOT_TENURED);
+ inline void FillWithHoles(int from, int to);
+
+ // Shrink length and insert filler objects.
+ void Shrink(int length);
+
+ // Copy operation.
+ static Handle<FixedArray> CopySize(Handle<FixedArray> array,
+ int new_length,
+ PretenureFlag pretenure = NOT_TENURED);
// Add the elements of a JSArray to this FixedArray.
- MUST_USE_RESULT MaybeObject* AddKeysFromJSArray(JSArray* array);
+ MUST_USE_RESULT static MaybeHandle<FixedArray> AddKeysFromArrayLike(
+ Handle<FixedArray> content,
+ Handle<JSObject> array);
- // Compute the union of this and other.
- MUST_USE_RESULT MaybeObject* UnionOfKeys(FixedArray* other);
+ // Computes the union of keys and return the result.
+ // Used for implementing "for (n in object) { }"
+ MUST_USE_RESULT static MaybeHandle<FixedArray> UnionOfKeys(
+ Handle<FixedArray> first,
+ Handle<FixedArray> second);
// Copy a sub array from the receiver to dest.
void CopyTo(int pos, FixedArray* dest, int dest_pos, int len);
@@ -2957,6 +2945,11 @@ class FixedArray: public FixedArrayBase {
// Code Generation support.
static int OffsetOfElementAt(int index) { return SizeFor(index); }
+ // Garbage collection support.
+ Object** RawFieldOfElementAt(int index) {
+ return HeapObject::RawField(this, OffsetOfElementAt(index));
+ }
+
// Casting.
static inline FixedArray* cast(Object* obj);
@@ -3007,7 +3000,7 @@ class FixedArray: public FixedArrayBase {
Object* value);
private:
- STATIC_CHECK(kHeaderSize == Internals::kFixedArrayHeaderSize);
+ STATIC_ASSERT(kHeaderSize == Internals::kFixedArrayHeaderSize);
DISALLOW_IMPLICIT_CONSTRUCTORS(FixedArray);
};
@@ -3019,16 +3012,13 @@ class FixedDoubleArray: public FixedArrayBase {
// Setter and getter for elements.
inline double get_scalar(int index);
inline int64_t get_representation(int index);
- MUST_USE_RESULT inline MaybeObject* get(int index);
+ static inline Handle<Object> get(Handle<FixedDoubleArray> array, int index);
inline void set(int index, double value);
inline void set_the_hole(int index);
// Checking for the hole.
inline bool is_the_hole(int index);
- // Copy operations
- MUST_USE_RESULT inline MaybeObject* Copy();
-
// Garbage collection support.
inline static int SizeFor(int length) {
return kHeaderSize + length * kDoubleSize;
@@ -3037,6 +3027,8 @@ class FixedDoubleArray: public FixedArrayBase {
// Gives access to raw memory which stores the array's data.
inline double* data_start();
+ inline void FillWithHoles(int from, int to);
+
// Code Generation support.
static int OffsetOfElementAt(int index) { return SizeFor(index); }
@@ -3064,75 +3056,277 @@ class FixedDoubleArray: public FixedArrayBase {
// ConstantPoolArray describes a fixed-sized array containing constant pool
-// entires.
-// The format of the pool is:
-// [0]: Field holding the first index which is a pointer entry
-// [1]: Field holding the first index which is a int32 entry
-// [2] ... [first_ptr_index() - 1]: 64 bit entries
-// [first_ptr_index()] ... [first_int32_index() - 1]: pointer entries
-// [first_int32_index()] ... [length - 1]: 32 bit entries
-class ConstantPoolArray: public FixedArrayBase {
- public:
- // Getters for the field storing the first index for different type entries.
- inline int first_ptr_index();
- inline int first_int64_index();
- inline int first_int32_index();
-
- // Getters for counts of different type entries.
- inline int count_of_ptr_entries();
- inline int count_of_int64_entries();
- inline int count_of_int32_entries();
+// entries.
+//
+// A ConstantPoolArray can be structured in two different ways depending upon
+// whether it is extended or small. The is_extended_layout() method can be used
+// to discover which layout the constant pool has.
+//
+// The format of a small constant pool is:
+// [kSmallLayout1Offset] : Small section layout bitmap 1
+// [kSmallLayout2Offset] : Small section layout bitmap 2
+// [first_index(INT64, SMALL_SECTION)] : 64 bit entries
+// ... : ...
+// [first_index(CODE_PTR, SMALL_SECTION)] : code pointer entries
+// ... : ...
+// [first_index(HEAP_PTR, SMALL_SECTION)] : heap pointer entries
+// ... : ...
+// [first_index(INT32, SMALL_SECTION)] : 32 bit entries
+// ... : ...
+//
+// If the constant pool has an extended layout, the extended section constant
+// pool also contains an extended section, which has the following format at
+// location get_extended_section_header_offset():
+// [kExtendedInt64CountOffset] : count of extended 64 bit entries
+// [kExtendedCodePtrCountOffset] : count of extended code pointers
+// [kExtendedHeapPtrCountOffset] : count of extended heap pointers
+// [kExtendedInt32CountOffset] : count of extended 32 bit entries
+// [first_index(INT64, EXTENDED_SECTION)] : 64 bit entries
+// ... : ...
+// [first_index(CODE_PTR, EXTENDED_SECTION)]: code pointer entries
+// ... : ...
+// [first_index(HEAP_PTR, EXTENDED_SECTION)]: heap pointer entries
+// ... : ...
+// [first_index(INT32, EXTENDED_SECTION)] : 32 bit entries
+// ... : ...
+//
+class ConstantPoolArray: public HeapObject {
+ public:
+ enum WeakObjectState {
+ NO_WEAK_OBJECTS,
+ WEAK_OBJECTS_IN_OPTIMIZED_CODE,
+ WEAK_OBJECTS_IN_IC
+ };
+
+ enum Type {
+ INT64 = 0,
+ CODE_PTR,
+ HEAP_PTR,
+ INT32,
+ // Number of types stored by the ConstantPoolArrays.
+ NUMBER_OF_TYPES,
+ FIRST_TYPE = INT64,
+ LAST_TYPE = INT32
+ };
+
+ enum LayoutSection {
+ SMALL_SECTION = 0,
+ EXTENDED_SECTION
+ };
+
+ class NumberOfEntries BASE_EMBEDDED {
+ public:
+ inline NumberOfEntries(int int64_count, int code_ptr_count,
+ int heap_ptr_count, int int32_count) {
+ element_counts_[INT64] = int64_count;
+ element_counts_[CODE_PTR] = code_ptr_count;
+ element_counts_[HEAP_PTR] = heap_ptr_count;
+ element_counts_[INT32] = int32_count;
+ }
+
+ inline NumberOfEntries(ConstantPoolArray* array, LayoutSection section) {
+ element_counts_[INT64] = array->number_of_entries(INT64, section);
+ element_counts_[CODE_PTR] = array->number_of_entries(CODE_PTR, section);
+ element_counts_[HEAP_PTR] = array->number_of_entries(HEAP_PTR, section);
+ element_counts_[INT32] = array->number_of_entries(INT32, section);
+ }
+
+ inline int count_of(Type type) const {
+ ASSERT(type < NUMBER_OF_TYPES);
+ return element_counts_[type];
+ }
+
+ inline int total_count() const {
+ int count = 0;
+ for (int i = 0; i < NUMBER_OF_TYPES; i++) {
+ count += element_counts_[i];
+ }
+ return count;
+ }
+
+ inline int are_in_range(int min, int max) const {
+ for (int i = FIRST_TYPE; i < NUMBER_OF_TYPES; i++) {
+ if (element_counts_[i] < min || element_counts_[i] > max) {
+ return false;
+ }
+ }
+ return true;
+ }
+
+ private:
+ int element_counts_[NUMBER_OF_TYPES];
+ };
+
+ class Iterator BASE_EMBEDDED {
+ public:
+ inline Iterator(ConstantPoolArray* array, Type type)
+ : array_(array), type_(type), final_section_(array->final_section()) {
+ current_section_ = SMALL_SECTION;
+ next_index_ = array->first_index(type, SMALL_SECTION);
+ update_section();
+ }
+
+ inline int next_index();
+ inline bool is_finished();
+ private:
+ inline void update_section();
+ ConstantPoolArray* array_;
+ const Type type_;
+ const LayoutSection final_section_;
+
+ LayoutSection current_section_;
+ int next_index_;
+ };
+
+ // Getters for the first index, the last index and the count of entries of
+ // a given type for a given layout section.
+ inline int first_index(Type type, LayoutSection layout_section);
+ inline int last_index(Type type, LayoutSection layout_section);
+ inline int number_of_entries(Type type, LayoutSection layout_section);
+
+ // Returns the type of the entry at the given index.
+ inline Type get_type(int index);
// Setter and getter for pool elements.
- inline Object* get_ptr_entry(int index);
+ inline Address get_code_ptr_entry(int index);
+ inline Object* get_heap_ptr_entry(int index);
inline int64_t get_int64_entry(int index);
inline int32_t get_int32_entry(int index);
inline double get_int64_entry_as_double(int index);
+ inline void set(int index, Address value);
inline void set(int index, Object* value);
inline void set(int index, int64_t value);
inline void set(int index, double value);
inline void set(int index, int32_t value);
- // Set up initial state.
- inline void SetEntryCounts(int number_of_int64_entries,
- int number_of_ptr_entries,
- int number_of_int32_entries);
+ // Setter and getter for weak objects state
+ inline void set_weak_object_state(WeakObjectState state);
+ inline WeakObjectState get_weak_object_state();
+
+ // Returns true if the constant pool has an extended layout, false if it has
+ // only the small layout.
+ inline bool is_extended_layout();
- // Copy operations
- MUST_USE_RESULT inline MaybeObject* Copy();
+ // Returns the last LayoutSection in this constant pool array.
+ inline LayoutSection final_section();
+
+ // Set up initial state for a small layout constant pool array.
+ inline void Init(const NumberOfEntries& small);
+
+ // Set up initial state for an extended layout constant pool array.
+ inline void InitExtended(const NumberOfEntries& small,
+ const NumberOfEntries& extended);
+
+ // Clears the pointer entries with GC safe values.
+ void ClearPtrEntries(Isolate* isolate);
+
+ // returns the total number of entries in the constant pool array.
+ inline int length();
// Garbage collection support.
- inline static int SizeFor(int number_of_int64_entries,
- int number_of_ptr_entries,
- int number_of_int32_entries) {
- return RoundUp(OffsetAt(number_of_int64_entries,
- number_of_ptr_entries,
- number_of_int32_entries),
- kPointerSize);
+ inline int size();
+
+ inline static int SizeFor(const NumberOfEntries& small) {
+ int size = kFirstEntryOffset +
+ (small.count_of(INT64) * kInt64Size) +
+ (small.count_of(CODE_PTR) * kPointerSize) +
+ (small.count_of(HEAP_PTR) * kPointerSize) +
+ (small.count_of(INT32) * kInt32Size);
+ return RoundUp(size, kPointerSize);
+ }
+
+ inline static int SizeForExtended(const NumberOfEntries& small,
+ const NumberOfEntries& extended) {
+ int size = SizeFor(small);
+ size = RoundUp(size, kInt64Size); // Align extended header to 64 bits.
+ size += kExtendedFirstOffset +
+ (extended.count_of(INT64) * kInt64Size) +
+ (extended.count_of(CODE_PTR) * kPointerSize) +
+ (extended.count_of(HEAP_PTR) * kPointerSize) +
+ (extended.count_of(INT32) * kInt32Size);
+ return RoundUp(size, kPointerSize);
+ }
+
+ inline static int entry_size(Type type) {
+ switch (type) {
+ case INT32:
+ return kInt32Size;
+ case INT64:
+ return kInt64Size;
+ case CODE_PTR:
+ case HEAP_PTR:
+ return kPointerSize;
+ default:
+ UNREACHABLE();
+ return 0;
+ }
}
// Code Generation support.
inline int OffsetOfElementAt(int index) {
- ASSERT(index < length());
- if (index >= first_int32_index()) {
- return OffsetAt(count_of_int64_entries(), count_of_ptr_entries(),
- index - first_int32_index());
- } else if (index >= first_ptr_index()) {
- return OffsetAt(count_of_int64_entries(), index - first_ptr_index(), 0);
+ int offset;
+ LayoutSection section;
+ if (is_extended_layout() && index >= first_extended_section_index()) {
+ section = EXTENDED_SECTION;
+ offset = get_extended_section_header_offset() + kExtendedFirstOffset;
} else {
- return OffsetAt(index, 0, 0);
+ section = SMALL_SECTION;
+ offset = kFirstEntryOffset;
}
+
+ // Add offsets for the preceding type sections.
+ ASSERT(index <= last_index(LAST_TYPE, section));
+ for (Type type = FIRST_TYPE; index > last_index(type, section);
+ type = next_type(type)) {
+ offset += entry_size(type) * number_of_entries(type, section);
+ }
+
+ // Add offset for the index in it's type.
+ Type type = get_type(index);
+ offset += entry_size(type) * (index - first_index(type, section));
+ return offset;
}
// Casting.
static inline ConstantPoolArray* cast(Object* obj);
- // Layout description.
- static const int kFirstPointerIndexOffset = FixedArray::kHeaderSize;
- static const int kFirstInt32IndexOffset =
- kFirstPointerIndexOffset + kPointerSize;
- static const int kFirstOffset = kFirstInt32IndexOffset + kPointerSize;
+ // Garbage collection support.
+ Object** RawFieldOfElementAt(int index) {
+ return HeapObject::RawField(this, OffsetOfElementAt(index));
+ }
+
+ // Small Layout description.
+ static const int kSmallLayout1Offset = HeapObject::kHeaderSize;
+ static const int kSmallLayout2Offset = kSmallLayout1Offset + kInt32Size;
+ static const int kHeaderSize = kSmallLayout2Offset + kInt32Size;
+ static const int kFirstEntryOffset = ROUND_UP(kHeaderSize, kInt64Size);
+
+ static const int kSmallLayoutCountBits = 10;
+ static const int kMaxSmallEntriesPerType = (1 << kSmallLayoutCountBits) - 1;
+
+ // Fields in kSmallLayout1Offset.
+ class Int64CountField: public BitField<int, 1, kSmallLayoutCountBits> {};
+ class CodePtrCountField: public BitField<int, 11, kSmallLayoutCountBits> {};
+ class HeapPtrCountField: public BitField<int, 21, kSmallLayoutCountBits> {};
+ class IsExtendedField: public BitField<bool, 31, 1> {};
+
+ // Fields in kSmallLayout2Offset.
+ class Int32CountField: public BitField<int, 1, kSmallLayoutCountBits> {};
+ class TotalCountField: public BitField<int, 11, 12> {};
+ class WeakObjectStateField: public BitField<WeakObjectState, 23, 2> {};
+
+ // Extended layout description, which starts at
+ // get_extended_section_header_offset().
+ static const int kExtendedInt64CountOffset = 0;
+ static const int kExtendedCodePtrCountOffset =
+ kExtendedInt64CountOffset + kPointerSize;
+ static const int kExtendedHeapPtrCountOffset =
+ kExtendedCodePtrCountOffset + kPointerSize;
+ static const int kExtendedInt32CountOffset =
+ kExtendedHeapPtrCountOffset + kPointerSize;
+ static const int kExtendedFirstOffset =
+ kExtendedInt32CountOffset + kPointerSize;
// Dispatched behavior.
void ConstantPoolIterateBody(ObjectVisitor* v);
@@ -3141,16 +3335,13 @@ class ConstantPoolArray: public FixedArrayBase {
DECLARE_VERIFIER(ConstantPoolArray)
private:
- inline void set_first_ptr_index(int value);
- inline void set_first_int32_index(int value);
+ inline int first_extended_section_index();
+ inline int get_extended_section_header_offset();
- inline static int OffsetAt(int number_of_int64_entries,
- int number_of_ptr_entries,
- int number_of_int32_entries) {
- return kFirstOffset
- + (number_of_int64_entries * kInt64Size)
- + (number_of_ptr_entries * kPointerSize)
- + (number_of_int32_entries * kInt32Size);
+ inline static Type next_type(Type type) {
+ ASSERT(type >= FIRST_TYPE && type < NUMBER_OF_TYPES);
+ int type_int = static_cast<int>(type);
+ return static_cast<Type>(++type_int);
}
DISALLOW_IMPLICIT_CONSTRUCTORS(ConstantPoolArray);
@@ -3167,23 +3358,6 @@ class ConstantPoolArray: public FixedArrayBase {
// [2 + number of descriptors * kDescriptorSize]: start of slack
class DescriptorArray: public FixedArray {
public:
- // WhitenessWitness is used to prove that a descriptor array is white
- // (unmarked), so incremental write barriers can be skipped because the
- // marking invariant cannot be broken and slots pointing into evacuation
- // candidates will be discovered when the object is scanned. A witness is
- // always stack-allocated right after creating an array. By allocating a
- // witness, incremental marking is globally disabled. The witness is then
- // passed along wherever needed to statically prove that the array is known to
- // be white.
- class WhitenessWitness {
- public:
- inline explicit WhitenessWitness(FixedArray* array);
- inline ~WhitenessWitness();
-
- private:
- IncrementalMarking* marking_;
- };
-
// Returns true for both shared empty_descriptor_array and for smis, which the
// map uses to encode additional bit fields when the descriptor array is not
// yet used.
@@ -3254,12 +3428,14 @@ class DescriptorArray: public FixedArray {
inline Name* GetKey(int descriptor_number);
inline Object** GetKeySlot(int descriptor_number);
inline Object* GetValue(int descriptor_number);
+ inline void SetValue(int descriptor_number, Object* value);
inline Object** GetValueSlot(int descriptor_number);
inline Object** GetDescriptorStartSlot(int descriptor_number);
inline Object** GetDescriptorEndSlot(int descriptor_number);
inline PropertyDetails GetDetails(int descriptor_number);
inline PropertyType GetType(int descriptor_number);
inline int GetFieldIndex(int descriptor_number);
+ inline HeapType* GetFieldType(int descriptor_number);
inline Object* GetConstant(int descriptor_number);
inline Object* GetCallbacksObject(int descriptor_number);
inline AccessorDescriptor* GetCallbacks(int descriptor_number);
@@ -3267,59 +3443,28 @@ class DescriptorArray: public FixedArray {
inline Name* GetSortedKey(int descriptor_number);
inline int GetSortedKeyIndex(int descriptor_number);
inline void SetSortedKey(int pointer, int descriptor_number);
- inline void InitializeRepresentations(Representation representation);
inline void SetRepresentation(int descriptor_number,
Representation representation);
// Accessor for complete descriptor.
inline void Get(int descriptor_number, Descriptor* desc);
- inline void Set(int descriptor_number,
- Descriptor* desc,
- const WhitenessWitness&);
inline void Set(int descriptor_number, Descriptor* desc);
+ void Replace(int descriptor_number, Descriptor* descriptor);
// Append automatically sets the enumeration index. This should only be used
// to add descriptors in bulk at the end, followed by sorting the descriptor
// array.
- inline void Append(Descriptor* desc, const WhitenessWitness&);
inline void Append(Descriptor* desc);
- // Transfer a complete descriptor from the src descriptor array to this
- // descriptor array.
- void CopyFrom(int dst_index,
- DescriptorArray* src,
- int src_index,
- const WhitenessWitness&);
- static Handle<DescriptorArray> Merge(Handle<DescriptorArray> desc,
- int verbatim,
- int valid,
- int new_size,
- int modify_index,
- StoreMode store_mode,
- Handle<DescriptorArray> other);
- MUST_USE_RESULT MaybeObject* Merge(int verbatim,
- int valid,
- int new_size,
- int modify_index,
- StoreMode store_mode,
- DescriptorArray* other);
-
- bool IsMoreGeneralThan(int verbatim,
- int valid,
- int new_size,
- DescriptorArray* other);
-
- MUST_USE_RESULT MaybeObject* CopyUpTo(int enumeration_index) {
- return CopyUpToAddAttributes(enumeration_index, NONE);
- }
+ static Handle<DescriptorArray> CopyUpTo(Handle<DescriptorArray> desc,
+ int enumeration_index,
+ int slack = 0);
static Handle<DescriptorArray> CopyUpToAddAttributes(
Handle<DescriptorArray> desc,
int enumeration_index,
- PropertyAttributes attributes);
- MUST_USE_RESULT MaybeObject* CopyUpToAddAttributes(
- int enumeration_index,
- PropertyAttributes attributes);
+ PropertyAttributes attributes,
+ int slack = 0);
// Sort the instance descriptors by the hash codes of their keys.
void Sort();
@@ -3333,9 +3478,9 @@ class DescriptorArray: public FixedArray {
// Allocates a DescriptorArray, but returns the singleton
// empty descriptor array object if number_of_descriptors is 0.
- MUST_USE_RESULT static MaybeObject* Allocate(Isolate* isolate,
- int number_of_descriptors,
- int slack = 0);
+ static Handle<DescriptorArray> Allocate(Isolate* isolate,
+ int number_of_descriptors,
+ int slack = 0);
// Casting.
static inline DescriptorArray* cast(Object* obj);
@@ -3389,6 +3534,23 @@ class DescriptorArray: public FixedArray {
}
private:
+ // WhitenessWitness is used to prove that a descriptor array is white
+ // (unmarked), so incremental write barriers can be skipped because the
+ // marking invariant cannot be broken and slots pointing into evacuation
+ // candidates will be discovered when the object is scanned. A witness is
+ // always stack-allocated right after creating an array. By allocating a
+ // witness, incremental marking is globally disabled. The witness is then
+ // passed along wherever needed to statically prove that the array is known to
+ // be white.
+ class WhitenessWitness {
+ public:
+ inline explicit WhitenessWitness(DescriptorArray* array);
+ inline ~WhitenessWitness();
+
+ private:
+ IncrementalMarking* marking_;
+ };
+
// An entry in a DescriptorArray, represented as an (array, index) pair.
class Entry {
public:
@@ -3422,6 +3584,18 @@ class DescriptorArray: public FixedArray {
kDescriptorValue;
}
+ // Transfer a complete descriptor from the src descriptor array to this
+ // descriptor array.
+ void CopyFrom(int index,
+ DescriptorArray* src,
+ const WhitenessWitness&);
+
+ inline void Set(int descriptor_number,
+ Descriptor* desc,
+ const WhitenessWitness&);
+
+ inline void Append(Descriptor* desc, const WhitenessWitness&);
+
// Swap first and second descriptor.
inline void SwapSortedKeys(int first, int second);
@@ -3461,7 +3635,7 @@ inline int Search(T* array, Name* name, int valid_entries = 0);
// // Returns the hash value for object.
// static uint32_t HashForObject(Key key, Object* object);
// // Convert key to an object.
-// static inline Object* AsObject(Heap* heap, Key key);
+// static inline Handle<Object> AsHandle(Isolate* isolate, Key key);
// // The prefix size indicates number of elements in the beginning
// // of the backing storage.
// static const int kPrefixSize = ..;
@@ -3488,14 +3662,13 @@ class BaseShape {
}
};
-template<typename Shape, typename Key>
+template<typename Derived, typename Shape, typename Key>
class HashTable: public FixedArray {
public:
// Wrapper methods
inline uint32_t Hash(Key key) {
if (Shape::UsesSeed) {
- return Shape::SeededHash(key,
- GetHeap()->HashSeed());
+ return Shape::SeededHash(key, GetHeap()->HashSeed());
} else {
return Shape::Hash(key);
}
@@ -3503,8 +3676,7 @@ class HashTable: public FixedArray {
inline uint32_t HashForObject(Key key, Object* object) {
if (Shape::UsesSeed) {
- return Shape::SeededHashForObject(key,
- GetHeap()->HashSeed(), object);
+ return Shape::SeededHashForObject(key, GetHeap()->HashSeed(), object);
} else {
return Shape::HashForObject(key, object);
}
@@ -3540,9 +3712,9 @@ class HashTable: public FixedArray {
SetNumberOfDeletedElements(NumberOfDeletedElements() + n);
}
- // Returns a new HashTable object. Might return Failure.
- MUST_USE_RESULT static MaybeObject* Allocate(
- Heap* heap,
+ // Returns a new HashTable object.
+ MUST_USE_RESULT static Handle<Derived> New(
+ Isolate* isolate,
int at_least_space_for,
MinimumCapacity capacity_option = USE_DEFAULT_MINIMUM_CAPACITY,
PretenureFlag pretenure = NOT_TENURED);
@@ -3601,7 +3773,6 @@ class HashTable: public FixedArray {
void Rehash(Key key);
protected:
- friend class ObjectHashSet;
friend class ObjectHashTable;
// Find the entry at which to insert element with the given key that
@@ -3649,6 +3820,17 @@ class HashTable: public FixedArray {
return (last + number) & (size - 1);
}
+ // Attempt to shrink hash table after removal of key.
+ MUST_USE_RESULT static Handle<Derived> Shrink(Handle<Derived> table, Key key);
+
+ // Ensure enough space for n additional elements.
+ MUST_USE_RESULT static Handle<Derived> EnsureCapacity(
+ Handle<Derived> table,
+ int n,
+ Key key,
+ PretenureFlag pretenure = NOT_TENURED);
+
+ private:
// Returns _expected_ if one of entries given by the first _probe_ probes is
// equal to _expected_. Otherwise, returns the entry given by the probe
// number _probe_.
@@ -3657,16 +3839,7 @@ class HashTable: public FixedArray {
void Swap(uint32_t entry1, uint32_t entry2, WriteBarrierMode mode);
// Rehashes this hash-table into the new table.
- MUST_USE_RESULT MaybeObject* Rehash(HashTable* new_table, Key key);
-
- // Attempt to shrink hash table after removal of key.
- MUST_USE_RESULT MaybeObject* Shrink(Key key);
-
- // Ensure enough space for n additional elements.
- MUST_USE_RESULT MaybeObject* EnsureCapacity(
- int n,
- Key key,
- PretenureFlag pretenure = NOT_TENURED);
+ void Rehash(Handle<Derived> new_table, Key key);
};
@@ -3680,8 +3853,7 @@ class HashTableKey {
// Returns the hash value for object.
virtual uint32_t HashForObject(Object* key) = 0;
// Returns the key object for storing into the hash table.
- // If allocations fails a failure object is returned.
- MUST_USE_RESULT virtual MaybeObject* AsObject(Heap* heap) = 0;
+ MUST_USE_RESULT virtual Handle<Object> AsHandle(Isolate* isolate) = 0;
// Required.
virtual ~HashTableKey() {}
};
@@ -3692,16 +3864,16 @@ class StringTableShape : public BaseShape<HashTableKey*> {
static inline bool IsMatch(HashTableKey* key, Object* value) {
return key->IsMatch(value);
}
+
static inline uint32_t Hash(HashTableKey* key) {
return key->Hash();
}
+
static inline uint32_t HashForObject(HashTableKey* key, Object* object) {
return key->HashForObject(object);
}
- MUST_USE_RESULT static inline MaybeObject* AsObject(Heap* heap,
- HashTableKey* key) {
- return key->AsObject(heap);
- }
+
+ static inline Handle<Object> AsHandle(Isolate* isolate, HashTableKey* key);
static const int kPrefixSize = 0;
static const int kEntrySize = 1;
@@ -3713,40 +3885,35 @@ class SeqOneByteString;
//
// No special elements in the prefix and the element size is 1
// because only the string itself (the key) needs to be stored.
-class StringTable: public HashTable<StringTableShape, HashTableKey*> {
- public:
- // Find string in the string table. If it is not there yet, it is
- // added. The return value is the string table which might have
- // been enlarged. If the return value is not a failure, the string
- // pointer *s is set to the string found.
- MUST_USE_RESULT MaybeObject* LookupUtf8String(
- Vector<const char> str,
- Object** s);
- MUST_USE_RESULT MaybeObject* LookupOneByteString(
- Vector<const uint8_t> str,
- Object** s);
- MUST_USE_RESULT MaybeObject* LookupSubStringOneByteString(
- Handle<SeqOneByteString> str,
- int from,
- int length,
- Object** s);
- MUST_USE_RESULT MaybeObject* LookupTwoByteString(
- Vector<const uc16> str,
- Object** s);
- MUST_USE_RESULT MaybeObject* LookupString(String* key, Object** s);
+class StringTable: public HashTable<StringTable,
+ StringTableShape,
+ HashTableKey*> {
+ public:
+ // Find string in the string table. If it is not there yet, it is
+ // added. The return value is the string found.
+ static Handle<String> LookupString(Isolate* isolate, Handle<String> key);
+ static Handle<String> LookupKey(Isolate* isolate, HashTableKey* key);
+
+ // Tries to internalize given string and returns string handle on success
+ // or an empty handle otherwise.
+ MUST_USE_RESULT static MaybeHandle<String> InternalizeStringIfExists(
+ Isolate* isolate,
+ Handle<String> string);
// Looks up a string that is equal to the given string and returns
- // true if it is found, assigning the string to the given output
- // parameter.
- bool LookupStringIfExists(String* str, String** result);
- bool LookupTwoCharsStringIfExists(uint16_t c1, uint16_t c2, String** result);
+ // string handle if it is found, or an empty handle otherwise.
+ MUST_USE_RESULT static MaybeHandle<String> LookupStringIfExists(
+ Isolate* isolate,
+ Handle<String> str);
+ MUST_USE_RESULT static MaybeHandle<String> LookupTwoCharsStringIfExists(
+ Isolate* isolate,
+ uint16_t c1,
+ uint16_t c2);
// Casting.
static inline StringTable* cast(Object* obj);
private:
- MUST_USE_RESULT MaybeObject* LookupKey(HashTableKey* key, Object** s);
-
template <bool seq_ascii> friend class JsonParser;
DISALLOW_IMPLICIT_CONSTRUCTORS(StringTable);
@@ -3758,6 +3925,7 @@ class MapCacheShape : public BaseShape<HashTableKey*> {
static inline bool IsMatch(HashTableKey* key, Object* value) {
return key->IsMatch(value);
}
+
static inline uint32_t Hash(HashTableKey* key) {
return key->Hash();
}
@@ -3766,10 +3934,7 @@ class MapCacheShape : public BaseShape<HashTableKey*> {
return key->HashForObject(object);
}
- MUST_USE_RESULT static inline MaybeObject* AsObject(Heap* heap,
- HashTableKey* key) {
- return key->AsObject(heap);
- }
+ static inline Handle<Object> AsHandle(Isolate* isolate, HashTableKey* key);
static const int kPrefixSize = 0;
static const int kEntrySize = 2;
@@ -3780,11 +3945,12 @@ class MapCacheShape : public BaseShape<HashTableKey*> {
//
// Maps keys that are a fixed array of unique names to a map.
// Used for canonicalize maps for object literals.
-class MapCache: public HashTable<MapCacheShape, HashTableKey*> {
+class MapCache: public HashTable<MapCache, MapCacheShape, HashTableKey*> {
public:
// Find cached value for a name key, otherwise return null.
Object* Lookup(FixedArray* key);
- MUST_USE_RESULT MaybeObject* Put(FixedArray* key, Map* value);
+ static Handle<MapCache> Put(
+ Handle<MapCache> map_cache, Handle<FixedArray> key, Handle<Map> value);
static inline MapCache* cast(Object* obj);
private:
@@ -3792,43 +3958,53 @@ class MapCache: public HashTable<MapCacheShape, HashTableKey*> {
};
-template <typename Shape, typename Key>
-class Dictionary: public HashTable<Shape, Key> {
+template <typename Derived, typename Shape, typename Key>
+class Dictionary: public HashTable<Derived, Shape, Key> {
+ protected:
+ typedef HashTable<Derived, Shape, Key> DerivedHashTable;
+
public:
- static inline Dictionary<Shape, Key>* cast(Object* obj) {
- return reinterpret_cast<Dictionary<Shape, Key>*>(obj);
+ static inline Dictionary* cast(Object* obj) {
+ return reinterpret_cast<Dictionary*>(obj);
}
// Returns the value at entry.
Object* ValueAt(int entry) {
- return this->get(HashTable<Shape, Key>::EntryToIndex(entry) + 1);
+ return this->get(DerivedHashTable::EntryToIndex(entry) + 1);
}
// Set the value for entry.
void ValueAtPut(int entry, Object* value) {
- this->set(HashTable<Shape, Key>::EntryToIndex(entry) + 1, value);
+ this->set(DerivedHashTable::EntryToIndex(entry) + 1, value);
}
// Returns the property details for the property at entry.
PropertyDetails DetailsAt(int entry) {
ASSERT(entry >= 0); // Not found is -1, which is not caught by get().
return PropertyDetails(
- Smi::cast(this->get(HashTable<Shape, Key>::EntryToIndex(entry) + 2)));
+ Smi::cast(this->get(DerivedHashTable::EntryToIndex(entry) + 2)));
}
// Set the details for entry.
void DetailsAtPut(int entry, PropertyDetails value) {
- this->set(HashTable<Shape, Key>::EntryToIndex(entry) + 2, value.AsSmi());
+ this->set(DerivedHashTable::EntryToIndex(entry) + 2, value.AsSmi());
}
// Sorting support
void CopyValuesTo(FixedArray* elements);
// Delete a property from the dictionary.
- Object* DeleteProperty(int entry, JSObject::DeleteMode mode);
+ static Handle<Object> DeleteProperty(
+ Handle<Derived> dictionary,
+ int entry,
+ JSObject::DeleteMode mode);
// Attempt to shrink the dictionary after deletion of key.
- MUST_USE_RESULT MaybeObject* Shrink(Key key);
+ MUST_USE_RESULT static inline Handle<Derived> Shrink(
+ Handle<Derived> dictionary,
+ Key key) {
+ return DerivedHashTable::Shrink(dictionary, key);
+ }
// Returns the number of elements in the dictionary filtering out properties
// with the specified attributes.
@@ -3855,17 +4031,17 @@ class Dictionary: public HashTable<Shape, Key> {
}
int NextEnumerationIndex() {
- return Smi::cast(FixedArray::get(kNextEnumerationIndexIndex))->value();
+ return Smi::cast(this->get(kNextEnumerationIndexIndex))->value();
}
- // Returns a new array for dictionary usage. Might return Failure.
- MUST_USE_RESULT static MaybeObject* Allocate(
- Heap* heap,
+ // Creates a new dictionary.
+ MUST_USE_RESULT static Handle<Derived> New(
+ Isolate* isolate,
int at_least_space_for,
PretenureFlag pretenure = NOT_TENURED);
// Ensure enough space for n additional elements.
- MUST_USE_RESULT MaybeObject* EnsureCapacity(int n, Key key);
+ static Handle<Derived> EnsureCapacity(Handle<Derived> obj, int n, Key key);
#ifdef OBJECT_PRINT
void Print(FILE* out = stdout);
@@ -3875,49 +4051,59 @@ class Dictionary: public HashTable<Shape, Key> {
// Sets the entry to (key, value) pair.
inline void SetEntry(int entry,
- Object* key,
- Object* value);
+ Handle<Object> key,
+ Handle<Object> value);
inline void SetEntry(int entry,
- Object* key,
- Object* value,
+ Handle<Object> key,
+ Handle<Object> value,
PropertyDetails details);
- MUST_USE_RESULT MaybeObject* Add(Key key,
- Object* value,
- PropertyDetails details);
+ MUST_USE_RESULT static Handle<Derived> Add(
+ Handle<Derived> dictionary,
+ Key key,
+ Handle<Object> value,
+ PropertyDetails details);
protected:
// Generic at put operation.
- MUST_USE_RESULT MaybeObject* AtPut(Key key, Object* value);
+ MUST_USE_RESULT static Handle<Derived> AtPut(
+ Handle<Derived> dictionary,
+ Key key,
+ Handle<Object> value);
// Add entry to dictionary.
- MUST_USE_RESULT MaybeObject* AddEntry(Key key,
- Object* value,
- PropertyDetails details,
- uint32_t hash);
+ static void AddEntry(
+ Handle<Derived> dictionary,
+ Key key,
+ Handle<Object> value,
+ PropertyDetails details,
+ uint32_t hash);
// Generate new enumeration indices to avoid enumeration index overflow.
- MUST_USE_RESULT MaybeObject* GenerateNewEnumerationIndices();
- static const int kMaxNumberKeyIndex =
- HashTable<Shape, Key>::kPrefixStartIndex;
+ static void GenerateNewEnumerationIndices(Handle<Derived> dictionary);
+ static const int kMaxNumberKeyIndex = DerivedHashTable::kPrefixStartIndex;
static const int kNextEnumerationIndexIndex = kMaxNumberKeyIndex + 1;
};
-class NameDictionaryShape : public BaseShape<Name*> {
+class NameDictionaryShape : public BaseShape<Handle<Name> > {
public:
- static inline bool IsMatch(Name* key, Object* other);
- static inline uint32_t Hash(Name* key);
- static inline uint32_t HashForObject(Name* key, Object* object);
- MUST_USE_RESULT static inline MaybeObject* AsObject(Heap* heap,
- Name* key);
+ static inline bool IsMatch(Handle<Name> key, Object* other);
+ static inline uint32_t Hash(Handle<Name> key);
+ static inline uint32_t HashForObject(Handle<Name> key, Object* object);
+ static inline Handle<Object> AsHandle(Isolate* isolate, Handle<Name> key);
static const int kPrefixSize = 2;
static const int kEntrySize = 3;
static const bool kIsEnumerable = true;
};
-class NameDictionary: public Dictionary<NameDictionaryShape, Name*> {
+class NameDictionary: public Dictionary<NameDictionary,
+ NameDictionaryShape,
+ Handle<Name> > {
+ typedef Dictionary<
+ NameDictionary, NameDictionaryShape, Handle<Name> > DerivedDictionary;
+
public:
static inline NameDictionary* cast(Object* obj) {
ASSERT(obj->IsDictionary());
@@ -3925,26 +4111,20 @@ class NameDictionary: public Dictionary<NameDictionaryShape, Name*> {
}
// Copies enumerable keys to preallocated fixed array.
- FixedArray* CopyEnumKeysTo(FixedArray* storage);
- static void DoGenerateNewEnumerationIndices(
+ void CopyEnumKeysTo(FixedArray* storage);
+ inline static void DoGenerateNewEnumerationIndices(
Handle<NameDictionary> dictionary);
- // For transforming properties of a JSObject.
- MUST_USE_RESULT MaybeObject* TransformPropertiesToFastFor(
- JSObject* obj,
- int unused_property_fields);
-
// Find entry for key, otherwise return kNotFound. Optimized version of
// HashTable::FindEntry.
- int FindEntry(Name* key);
+ int FindEntry(Handle<Name> key);
};
class NumberDictionaryShape : public BaseShape<uint32_t> {
public:
static inline bool IsMatch(uint32_t key, Object* other);
- MUST_USE_RESULT static inline MaybeObject* AsObject(Heap* heap,
- uint32_t key);
+ static inline Handle<Object> AsHandle(Isolate* isolate, uint32_t key);
static const int kEntrySize = 3;
static const bool kIsEnumerable = false;
};
@@ -3972,7 +4152,9 @@ class UnseededNumberDictionaryShape : public NumberDictionaryShape {
class SeededNumberDictionary
- : public Dictionary<SeededNumberDictionaryShape, uint32_t> {
+ : public Dictionary<SeededNumberDictionary,
+ SeededNumberDictionaryShape,
+ uint32_t> {
public:
static SeededNumberDictionary* cast(Object* obj) {
ASSERT(obj->IsDictionary());
@@ -3980,28 +4162,24 @@ class SeededNumberDictionary
}
// Type specific at put (default NONE attributes is used when adding).
- MUST_USE_RESULT MaybeObject* AtNumberPut(uint32_t key, Object* value);
+ MUST_USE_RESULT static Handle<SeededNumberDictionary> AtNumberPut(
+ Handle<SeededNumberDictionary> dictionary,
+ uint32_t key,
+ Handle<Object> value);
MUST_USE_RESULT static Handle<SeededNumberDictionary> AddNumberEntry(
Handle<SeededNumberDictionary> dictionary,
uint32_t key,
Handle<Object> value,
PropertyDetails details);
- MUST_USE_RESULT MaybeObject* AddNumberEntry(uint32_t key,
- Object* value,
- PropertyDetails details);
// Set an existing entry or add a new one if needed.
// Return the updated dictionary.
MUST_USE_RESULT static Handle<SeededNumberDictionary> Set(
Handle<SeededNumberDictionary> dictionary,
- uint32_t index,
+ uint32_t key,
Handle<Object> value,
PropertyDetails details);
- MUST_USE_RESULT MaybeObject* Set(uint32_t key,
- Object* value,
- PropertyDetails details);
-
void UpdateMaxNumberKey(uint32_t key);
// If slow elements are required we will never go back to fast-case
@@ -4025,7 +4203,9 @@ class SeededNumberDictionary
class UnseededNumberDictionary
- : public Dictionary<UnseededNumberDictionaryShape, uint32_t> {
+ : public Dictionary<UnseededNumberDictionary,
+ UnseededNumberDictionaryShape,
+ uint32_t> {
public:
static UnseededNumberDictionary* cast(Object* obj) {
ASSERT(obj->IsDictionary());
@@ -4033,94 +4213,67 @@ class UnseededNumberDictionary
}
// Type specific at put (default NONE attributes is used when adding).
- MUST_USE_RESULT MaybeObject* AtNumberPut(uint32_t key, Object* value);
- MUST_USE_RESULT MaybeObject* AddNumberEntry(uint32_t key, Object* value);
+ MUST_USE_RESULT static Handle<UnseededNumberDictionary> AtNumberPut(
+ Handle<UnseededNumberDictionary> dictionary,
+ uint32_t key,
+ Handle<Object> value);
+ MUST_USE_RESULT static Handle<UnseededNumberDictionary> AddNumberEntry(
+ Handle<UnseededNumberDictionary> dictionary,
+ uint32_t key,
+ Handle<Object> value);
// Set an existing entry or add a new one if needed.
// Return the updated dictionary.
MUST_USE_RESULT static Handle<UnseededNumberDictionary> Set(
Handle<UnseededNumberDictionary> dictionary,
- uint32_t index,
+ uint32_t key,
Handle<Object> value);
-
- MUST_USE_RESULT MaybeObject* Set(uint32_t key, Object* value);
};
-template <int entrysize>
-class ObjectHashTableShape : public BaseShape<Object*> {
+class ObjectHashTableShape : public BaseShape<Handle<Object> > {
public:
- static inline bool IsMatch(Object* key, Object* other);
- static inline uint32_t Hash(Object* key);
- static inline uint32_t HashForObject(Object* key, Object* object);
- MUST_USE_RESULT static inline MaybeObject* AsObject(Heap* heap,
- Object* key);
+ static inline bool IsMatch(Handle<Object> key, Object* other);
+ static inline uint32_t Hash(Handle<Object> key);
+ static inline uint32_t HashForObject(Handle<Object> key, Object* object);
+ static inline Handle<Object> AsHandle(Isolate* isolate, Handle<Object> key);
static const int kPrefixSize = 0;
- static const int kEntrySize = entrysize;
-};
-
-
-// ObjectHashSet holds keys that are arbitrary objects by using the identity
-// hash of the key for hashing purposes.
-class ObjectHashSet: public HashTable<ObjectHashTableShape<1>, Object*> {
- public:
- static inline ObjectHashSet* cast(Object* obj) {
- ASSERT(obj->IsHashTable());
- return reinterpret_cast<ObjectHashSet*>(obj);
- }
-
- // Looks up whether the given key is part of this hash set.
- bool Contains(Object* key);
-
- static Handle<ObjectHashSet> EnsureCapacity(
- Handle<ObjectHashSet> table,
- int n,
- Handle<Object> key,
- PretenureFlag pretenure = NOT_TENURED);
-
- // Attempt to shrink hash table after removal of key.
- static Handle<ObjectHashSet> Shrink(Handle<ObjectHashSet> table,
- Handle<Object> key);
-
- // Adds the given key to this hash set.
- static Handle<ObjectHashSet> Add(Handle<ObjectHashSet> table,
- Handle<Object> key);
-
- // Removes the given key from this hash set.
- static Handle<ObjectHashSet> Remove(Handle<ObjectHashSet> table,
- Handle<Object> key);
+ static const int kEntrySize = 2;
};
// ObjectHashTable maps keys that are arbitrary objects to object values by
// using the identity hash of the key for hashing purposes.
-class ObjectHashTable: public HashTable<ObjectHashTableShape<2>, Object*> {
+class ObjectHashTable: public HashTable<ObjectHashTable,
+ ObjectHashTableShape,
+ Handle<Object> > {
+ typedef HashTable<
+ ObjectHashTable, ObjectHashTableShape, Handle<Object> > DerivedHashTable;
public:
static inline ObjectHashTable* cast(Object* obj) {
ASSERT(obj->IsHashTable());
return reinterpret_cast<ObjectHashTable*>(obj);
}
- static Handle<ObjectHashTable> EnsureCapacity(
- Handle<ObjectHashTable> table,
- int n,
- Handle<Object> key,
- PretenureFlag pretenure = NOT_TENURED);
-
// Attempt to shrink hash table after removal of key.
- static Handle<ObjectHashTable> Shrink(Handle<ObjectHashTable> table,
- Handle<Object> key);
+ MUST_USE_RESULT static inline Handle<ObjectHashTable> Shrink(
+ Handle<ObjectHashTable> table,
+ Handle<Object> key);
// Looks up the value associated with the given key. The hole value is
// returned in case the key is not present.
- Object* Lookup(Object* key);
+ Object* Lookup(Handle<Object> key);
- // Adds (or overwrites) the value associated with the given key. Mapping a
- // key to the hole value causes removal of the whole entry.
+ // Adds (or overwrites) the value associated with the given key.
static Handle<ObjectHashTable> Put(Handle<ObjectHashTable> table,
Handle<Object> key,
Handle<Object> value);
+ // Returns an ObjectHashTable (possibly |table|) where |key| has been removed.
+ static Handle<ObjectHashTable> Remove(Handle<ObjectHashTable> table,
+ Handle<Object> key,
+ bool* was_present);
+
private:
friend class MarkCompactCollector;
@@ -4134,14 +4287,225 @@ class ObjectHashTable: public HashTable<ObjectHashTableShape<2>, Object*> {
};
+// OrderedHashTable is a HashTable with Object keys that preserves
+// insertion order. There are Map and Set interfaces (OrderedHashMap
+// and OrderedHashTable, below). It is meant to be used by JSMap/JSSet.
+//
+// Only Object* keys are supported, with Object::SameValueZero() used as the
+// equality operator and Object::GetHash() for the hash function.
+//
+// Based on the "Deterministic Hash Table" as described by Jason Orendorff at
+// https://wiki.mozilla.org/User:Jorend/Deterministic_hash_tables
+// Originally attributed to Tyler Close.
+//
+// Memory layout:
+// [0]: bucket count
+// [1]: element count
+// [2]: deleted element count
+// [3..(3 + NumberOfBuckets() - 1)]: "hash table", where each item is an
+// offset into the data table (see below) where the
+// first item in this bucket is stored.
+// [3 + NumberOfBuckets()..length]: "data table", an array of length
+// Capacity() * kEntrySize, where the first entrysize
+// items are handled by the derived class and the
+// item at kChainOffset is another entry into the
+// data table indicating the next entry in this hash
+// bucket.
+//
+// When we transition the table to a new version we obsolete it and reuse parts
+// of the memory to store information how to transition an iterator to the new
+// table:
+//
+// Memory layout for obsolete table:
+// [0]: bucket count
+// [1]: Next newer table
+// [2]: Number of removed holes or -1 when the table was cleared.
+// [3..(3 + NumberOfRemovedHoles() - 1)]: The indexes of the removed holes.
+// [3 + NumberOfRemovedHoles()..length]: Not used
+//
+template<class Derived, class Iterator, int entrysize>
+class OrderedHashTable: public FixedArray {
+ public:
+ // Returns an OrderedHashTable with a capacity of at least |capacity|.
+ static Handle<Derived> Allocate(
+ Isolate* isolate, int capacity, PretenureFlag pretenure = NOT_TENURED);
+
+ // Returns an OrderedHashTable (possibly |table|) with enough space
+ // to add at least one new element.
+ static Handle<Derived> EnsureGrowable(Handle<Derived> table);
+
+ // Returns an OrderedHashTable (possibly |table|) that's shrunken
+ // if possible.
+ static Handle<Derived> Shrink(Handle<Derived> table);
+
+ // Returns a new empty OrderedHashTable and records the clearing so that
+ // exisiting iterators can be updated.
+ static Handle<Derived> Clear(Handle<Derived> table);
+
+ // Returns an OrderedHashTable (possibly |table|) where |key| has been
+ // removed.
+ static Handle<Derived> Remove(Handle<Derived> table, Handle<Object> key,
+ bool* was_present);
+
+ // Returns kNotFound if the key isn't present.
+ int FindEntry(Handle<Object> key);
+
+ int NumberOfElements() {
+ return Smi::cast(get(kNumberOfElementsIndex))->value();
+ }
+
+ int NumberOfDeletedElements() {
+ return Smi::cast(get(kNumberOfDeletedElementsIndex))->value();
+ }
+
+ int UsedCapacity() { return NumberOfElements() + NumberOfDeletedElements(); }
+
+ int NumberOfBuckets() {
+ return Smi::cast(get(kNumberOfBucketsIndex))->value();
+ }
+
+ // Returns the index into the data table where the new entry
+ // should be placed. The table is assumed to have enough space
+ // for a new entry.
+ int AddEntry(int hash);
+
+ // Removes the entry, and puts the_hole in entrysize pointers
+ // (leaving the hash table chain intact).
+ void RemoveEntry(int entry);
+
+ // Returns an index into |this| for the given entry.
+ int EntryToIndex(int entry) {
+ return kHashTableStartIndex + NumberOfBuckets() + (entry * kEntrySize);
+ }
+
+ Object* KeyAt(int entry) { return get(EntryToIndex(entry)); }
+
+ bool IsObsolete() {
+ return !get(kNextTableIndex)->IsSmi();
+ }
+
+ // The next newer table. This is only valid if the table is obsolete.
+ Derived* NextTable() {
+ return Derived::cast(get(kNextTableIndex));
+ }
+
+ // When the table is obsolete we store the indexes of the removed holes.
+ int RemovedIndexAt(int index) {
+ return Smi::cast(get(kRemovedHolesIndex + index))->value();
+ }
+
+ static const int kNotFound = -1;
+ static const int kMinCapacity = 4;
+
+ private:
+ static Handle<Derived> Rehash(Handle<Derived> table, int new_capacity);
+
+ void SetNumberOfBuckets(int num) {
+ set(kNumberOfBucketsIndex, Smi::FromInt(num));
+ }
+
+ void SetNumberOfElements(int num) {
+ set(kNumberOfElementsIndex, Smi::FromInt(num));
+ }
+
+ void SetNumberOfDeletedElements(int num) {
+ set(kNumberOfDeletedElementsIndex, Smi::FromInt(num));
+ }
+
+ int Capacity() {
+ return NumberOfBuckets() * kLoadFactor;
+ }
+
+ // Returns the next entry for the given entry.
+ int ChainAt(int entry) {
+ return Smi::cast(get(EntryToIndex(entry) + kChainOffset))->value();
+ }
+
+ int HashToBucket(int hash) {
+ return hash & (NumberOfBuckets() - 1);
+ }
+
+ int HashToEntry(int hash) {
+ int bucket = HashToBucket(hash);
+ return Smi::cast(get(kHashTableStartIndex + bucket))->value();
+ }
+
+ void SetNextTable(Derived* next_table) {
+ set(kNextTableIndex, next_table);
+ }
+
+ void SetRemovedIndexAt(int index, int removed_index) {
+ return set(kRemovedHolesIndex + index, Smi::FromInt(removed_index));
+ }
+
+ static const int kNumberOfBucketsIndex = 0;
+ static const int kNumberOfElementsIndex = kNumberOfBucketsIndex + 1;
+ static const int kNumberOfDeletedElementsIndex = kNumberOfElementsIndex + 1;
+ static const int kHashTableStartIndex = kNumberOfDeletedElementsIndex + 1;
+
+ static const int kNextTableIndex = kNumberOfElementsIndex;
+ static const int kRemovedHolesIndex = kHashTableStartIndex;
+
+ static const int kEntrySize = entrysize + 1;
+ static const int kChainOffset = entrysize;
+
+ static const int kLoadFactor = 2;
+ static const int kMaxCapacity =
+ (FixedArray::kMaxLength - kHashTableStartIndex)
+ / (1 + (kEntrySize * kLoadFactor));
+};
+
+
+class JSSetIterator;
+
+
+class OrderedHashSet: public OrderedHashTable<
+ OrderedHashSet, JSSetIterator, 1> {
+ public:
+ static OrderedHashSet* cast(Object* obj) {
+ ASSERT(obj->IsOrderedHashTable());
+ return reinterpret_cast<OrderedHashSet*>(obj);
+ }
+
+ bool Contains(Handle<Object> key);
+ static Handle<OrderedHashSet> Add(
+ Handle<OrderedHashSet> table, Handle<Object> key);
+};
+
+
+class JSMapIterator;
+
+
+class OrderedHashMap:public OrderedHashTable<
+ OrderedHashMap, JSMapIterator, 2> {
+ public:
+ static OrderedHashMap* cast(Object* obj) {
+ ASSERT(obj->IsOrderedHashTable());
+ return reinterpret_cast<OrderedHashMap*>(obj);
+ }
+
+ Object* Lookup(Handle<Object> key);
+ static Handle<OrderedHashMap> Put(
+ Handle<OrderedHashMap> table,
+ Handle<Object> key,
+ Handle<Object> value);
+
+ private:
+ Object* ValueAt(int entry) {
+ return get(EntryToIndex(entry) + kValueOffset);
+ }
+
+ static const int kValueOffset = 1;
+};
+
+
template <int entrysize>
-class WeakHashTableShape : public BaseShape<Object*> {
+class WeakHashTableShape : public BaseShape<Handle<Object> > {
public:
- static inline bool IsMatch(Object* key, Object* other);
- static inline uint32_t Hash(Object* key);
- static inline uint32_t HashForObject(Object* key, Object* object);
- MUST_USE_RESULT static inline MaybeObject* AsObject(Heap* heap,
- Object* key);
+ static inline bool IsMatch(Handle<Object> key, Object* other);
+ static inline uint32_t Hash(Handle<Object> key);
+ static inline uint32_t HashForObject(Handle<Object> key, Object* object);
+ static inline Handle<Object> AsHandle(Isolate* isolate, Handle<Object> key);
static const int kPrefixSize = 0;
static const int kEntrySize = entrysize;
};
@@ -4150,7 +4514,11 @@ class WeakHashTableShape : public BaseShape<Object*> {
// WeakHashTable maps keys that are arbitrary objects to object values.
// It is used for the global weak hash table that maps objects
// embedded in optimized code to dependent code lists.
-class WeakHashTable: public HashTable<WeakHashTableShape<2>, Object*> {
+class WeakHashTable: public HashTable<WeakHashTable,
+ WeakHashTableShape<2>,
+ Handle<Object> > {
+ typedef HashTable<
+ WeakHashTable, WeakHashTableShape<2>, Handle<Object> > DerivedHashTable;
public:
static inline WeakHashTable* cast(Object* obj) {
ASSERT(obj->IsHashTable());
@@ -4159,11 +4527,13 @@ class WeakHashTable: public HashTable<WeakHashTableShape<2>, Object*> {
// Looks up the value associated with the given key. The hole value is
// returned in case the key is not present.
- Object* Lookup(Object* key);
+ Object* Lookup(Handle<Object> key);
// Adds (or overwrites) the value associated with the given key. Mapping a
// key to the hole value causes removal of the whole entry.
- MUST_USE_RESULT MaybeObject* Put(Object* key, Object* value);
+ MUST_USE_RESULT static Handle<WeakHashTable> Put(Handle<WeakHashTable> table,
+ Handle<Object> key,
+ Handle<Object> value);
// This function is called when heap verification is turned on.
void Zap(Object* value) {
@@ -4177,7 +4547,7 @@ class WeakHashTable: public HashTable<WeakHashTableShape<2>, Object*> {
private:
friend class MarkCompactCollector;
- void AddEntry(int entry, Object* key, Object* value);
+ void AddEntry(int entry, Handle<Object> key, Handle<Object> value);
// Returns the index to the value of an entry.
static inline int EntryToValueIndex(int entry) {
@@ -4239,13 +4609,11 @@ class ScopeInfo : public FixedArray {
// Does this scope call eval?
bool CallsEval();
- // Return the language mode of this scope.
- LanguageMode language_mode();
+ // Return the strict mode of this scope.
+ StrictMode strict_mode();
- // Does this scope make a non-strict eval call?
- bool CallsNonStrictEval() {
- return CallsEval() && (language_mode() == CLASSIC_MODE);
- }
+ // Does this scope make a sloppy eval call?
+ bool CallsSloppyEval() { return CallsEval() && strict_mode() == SLOPPY; }
// Return the total number of locals allocated on the stack and in the
// context. This includes the parameters that are allocated in the context.
@@ -4296,6 +4664,10 @@ class ScopeInfo : public FixedArray {
// Return the initialization flag of the given context local.
InitializationFlag ContextLocalInitFlag(int var);
+ // Return true if this local was introduced by the compiler, and should not be
+ // exposed to the user in a debugger.
+ bool LocalIsSynthetic(int var);
+
// Lookup support for serialized scope info. Returns the
// the stack slot index for a given slot name if the slot is
// present; otherwise returns a value < 0. The name must be an internalized
@@ -4307,9 +4679,10 @@ class ScopeInfo : public FixedArray {
// returns a value < 0. The name must be an internalized string.
// If the slot is present and mode != NULL, sets *mode to the corresponding
// mode for that variable.
- int ContextSlotIndex(String* name,
- VariableMode* mode,
- InitializationFlag* init_flag);
+ static int ContextSlotIndex(Handle<ScopeInfo> scope_info,
+ Handle<String> name,
+ VariableMode* mode,
+ InitializationFlag* init_flag);
// Lookup support for serialized scope info. Returns the
// parameter index for a given parameter name if the parameter is present;
@@ -4419,9 +4792,9 @@ class ScopeInfo : public FixedArray {
// Properties of scopes.
class ScopeTypeField: public BitField<ScopeType, 0, 3> {};
class CallsEvalField: public BitField<bool, 3, 1> {};
- class LanguageModeField: public BitField<LanguageMode, 4, 2> {};
- class FunctionVariableField: public BitField<FunctionVariableInfo, 6, 2> {};
- class FunctionVariableMode: public BitField<VariableMode, 8, 3> {};
+ class StrictModeField: public BitField<StrictMode, 4, 1> {};
+ class FunctionVariableField: public BitField<FunctionVariableInfo, 5, 2> {};
+ class FunctionVariableMode: public BitField<VariableMode, 7, 3> {};
// BitFields representing the encoded information for context locals in the
// ContextLocalInfoEntries part.
@@ -4435,18 +4808,27 @@ class ScopeInfo : public FixedArray {
// needs very limited number of distinct normalized maps.
class NormalizedMapCache: public FixedArray {
public:
- static const int kEntries = 64;
+ static Handle<NormalizedMapCache> New(Isolate* isolate);
- static Handle<Map> Get(Handle<NormalizedMapCache> cache,
- Handle<JSObject> object,
- PropertyNormalizationMode mode);
+ MUST_USE_RESULT MaybeHandle<Map> Get(Handle<Map> fast_map,
+ PropertyNormalizationMode mode);
+ void Set(Handle<Map> fast_map, Handle<Map> normalized_map);
void Clear();
// Casting
static inline NormalizedMapCache* cast(Object* obj);
+ static inline bool IsNormalizedMapCache(Object* obj);
DECLARE_VERIFIER(NormalizedMapCache)
+ private:
+ static const int kEntries = 64;
+
+ static inline int GetIndex(Handle<Map> map);
+
+ // The following declarations hide base class methods.
+ Object* get(int index);
+ void set(int index, Object* value);
};
@@ -4513,6 +4895,9 @@ class FreeSpace: public HeapObject {
inline int size();
inline void set_size(int value);
+ inline int nobarrier_size();
+ inline void nobarrier_set_size(int value);
+
inline int Size() { return size(); }
// Casting.
@@ -4534,6 +4919,20 @@ class FreeSpace: public HeapObject {
};
+// V has parameters (Type, type, TYPE, C type, element_size)
+#define TYPED_ARRAYS(V) \
+ V(Uint8, uint8, UINT8, uint8_t, 1) \
+ V(Int8, int8, INT8, int8_t, 1) \
+ V(Uint16, uint16, UINT16, uint16_t, 2) \
+ V(Int16, int16, INT16, int16_t, 2) \
+ V(Uint32, uint32, UINT32, uint32_t, 4) \
+ V(Int32, int32, INT32, int32_t, 4) \
+ V(Float32, float32, FLOAT32, float, 4) \
+ V(Float64, float64, FLOAT64, double, 8) \
+ V(Uint8Clamped, uint8_clamped, UINT8_CLAMPED, uint8_t, 1)
+
+
+
// An ExternalArray represents a fixed-size array of primitive values
// which live outside the JavaScript heap. Its subclasses are used to
// implement the CanvasArray types being defined in the WebGL
@@ -4570,7 +4969,7 @@ class ExternalArray: public FixedArrayBase {
};
-// A ExternalPixelArray represents a fixed-size byte array with special
+// A ExternalUint8ClampedArray represents a fixed-size byte array with special
// semantics used for implementing the CanvasPixelArray object. Please see the
// specification at:
@@ -4578,247 +4977,315 @@ class ExternalArray: public FixedArrayBase {
// multipage/the-canvas-element.html#canvaspixelarray
// In particular, write access clamps the value written to 0 or 255 if the
// value written is outside this range.
-class ExternalPixelArray: public ExternalArray {
+class ExternalUint8ClampedArray: public ExternalArray {
public:
- inline uint8_t* external_pixel_pointer();
+ inline uint8_t* external_uint8_clamped_pointer();
// Setter and getter.
inline uint8_t get_scalar(int index);
- MUST_USE_RESULT inline MaybeObject* get(int index);
+ static inline Handle<Object> get(Handle<ExternalUint8ClampedArray> array,
+ int index);
inline void set(int index, uint8_t value);
- // This accessor applies the correct conversion from Smi, HeapNumber and
- // undefined and clamps the converted value between 0 and 255.
- Object* SetValue(uint32_t index, Object* value);
+ // This accessor applies the correct conversion from Smi, HeapNumber
+ // and undefined and clamps the converted value between 0 and 255.
+ static Handle<Object> SetValue(Handle<ExternalUint8ClampedArray> array,
+ uint32_t index,
+ Handle<Object> value);
// Casting.
- static inline ExternalPixelArray* cast(Object* obj);
+ static inline ExternalUint8ClampedArray* cast(Object* obj);
// Dispatched behavior.
- DECLARE_PRINTER(ExternalPixelArray)
- DECLARE_VERIFIER(ExternalPixelArray)
+ DECLARE_PRINTER(ExternalUint8ClampedArray)
+ DECLARE_VERIFIER(ExternalUint8ClampedArray)
private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(ExternalPixelArray);
+ DISALLOW_IMPLICIT_CONSTRUCTORS(ExternalUint8ClampedArray);
};
-class ExternalByteArray: public ExternalArray {
+class ExternalInt8Array: public ExternalArray {
public:
// Setter and getter.
inline int8_t get_scalar(int index);
- MUST_USE_RESULT inline MaybeObject* get(int index);
+ static inline Handle<Object> get(Handle<ExternalInt8Array> array, int index);
inline void set(int index, int8_t value);
- static Handle<Object> SetValue(Handle<ExternalByteArray> array,
- uint32_t index,
- Handle<Object> value);
-
// This accessor applies the correct conversion from Smi, HeapNumber
// and undefined.
- MUST_USE_RESULT MaybeObject* SetValue(uint32_t index, Object* value);
+ static Handle<Object> SetValue(Handle<ExternalInt8Array> array,
+ uint32_t index,
+ Handle<Object> value);
// Casting.
- static inline ExternalByteArray* cast(Object* obj);
+ static inline ExternalInt8Array* cast(Object* obj);
// Dispatched behavior.
- DECLARE_PRINTER(ExternalByteArray)
- DECLARE_VERIFIER(ExternalByteArray)
+ DECLARE_PRINTER(ExternalInt8Array)
+ DECLARE_VERIFIER(ExternalInt8Array)
private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(ExternalByteArray);
+ DISALLOW_IMPLICIT_CONSTRUCTORS(ExternalInt8Array);
};
-class ExternalUnsignedByteArray: public ExternalArray {
+class ExternalUint8Array: public ExternalArray {
public:
// Setter and getter.
inline uint8_t get_scalar(int index);
- MUST_USE_RESULT inline MaybeObject* get(int index);
+ static inline Handle<Object> get(Handle<ExternalUint8Array> array, int index);
inline void set(int index, uint8_t value);
- static Handle<Object> SetValue(Handle<ExternalUnsignedByteArray> array,
- uint32_t index,
- Handle<Object> value);
-
// This accessor applies the correct conversion from Smi, HeapNumber
// and undefined.
- MUST_USE_RESULT MaybeObject* SetValue(uint32_t index, Object* value);
+ static Handle<Object> SetValue(Handle<ExternalUint8Array> array,
+ uint32_t index,
+ Handle<Object> value);
// Casting.
- static inline ExternalUnsignedByteArray* cast(Object* obj);
+ static inline ExternalUint8Array* cast(Object* obj);
// Dispatched behavior.
- DECLARE_PRINTER(ExternalUnsignedByteArray)
- DECLARE_VERIFIER(ExternalUnsignedByteArray)
+ DECLARE_PRINTER(ExternalUint8Array)
+ DECLARE_VERIFIER(ExternalUint8Array)
private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(ExternalUnsignedByteArray);
+ DISALLOW_IMPLICIT_CONSTRUCTORS(ExternalUint8Array);
};
-class ExternalShortArray: public ExternalArray {
+class ExternalInt16Array: public ExternalArray {
public:
// Setter and getter.
inline int16_t get_scalar(int index);
- MUST_USE_RESULT inline MaybeObject* get(int index);
+ static inline Handle<Object> get(Handle<ExternalInt16Array> array, int index);
inline void set(int index, int16_t value);
- static Handle<Object> SetValue(Handle<ExternalShortArray> array,
- uint32_t index,
- Handle<Object> value);
-
// This accessor applies the correct conversion from Smi, HeapNumber
// and undefined.
- MUST_USE_RESULT MaybeObject* SetValue(uint32_t index, Object* value);
+ static Handle<Object> SetValue(Handle<ExternalInt16Array> array,
+ uint32_t index,
+ Handle<Object> value);
// Casting.
- static inline ExternalShortArray* cast(Object* obj);
+ static inline ExternalInt16Array* cast(Object* obj);
// Dispatched behavior.
- DECLARE_PRINTER(ExternalShortArray)
- DECLARE_VERIFIER(ExternalShortArray)
+ DECLARE_PRINTER(ExternalInt16Array)
+ DECLARE_VERIFIER(ExternalInt16Array)
private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(ExternalShortArray);
+ DISALLOW_IMPLICIT_CONSTRUCTORS(ExternalInt16Array);
};
-class ExternalUnsignedShortArray: public ExternalArray {
+class ExternalUint16Array: public ExternalArray {
public:
// Setter and getter.
inline uint16_t get_scalar(int index);
- MUST_USE_RESULT inline MaybeObject* get(int index);
+ static inline Handle<Object> get(Handle<ExternalUint16Array> array,
+ int index);
inline void set(int index, uint16_t value);
- static Handle<Object> SetValue(Handle<ExternalUnsignedShortArray> array,
- uint32_t index,
- Handle<Object> value);
-
// This accessor applies the correct conversion from Smi, HeapNumber
// and undefined.
- MUST_USE_RESULT MaybeObject* SetValue(uint32_t index, Object* value);
+ static Handle<Object> SetValue(Handle<ExternalUint16Array> array,
+ uint32_t index,
+ Handle<Object> value);
// Casting.
- static inline ExternalUnsignedShortArray* cast(Object* obj);
+ static inline ExternalUint16Array* cast(Object* obj);
// Dispatched behavior.
- DECLARE_PRINTER(ExternalUnsignedShortArray)
- DECLARE_VERIFIER(ExternalUnsignedShortArray)
+ DECLARE_PRINTER(ExternalUint16Array)
+ DECLARE_VERIFIER(ExternalUint16Array)
private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(ExternalUnsignedShortArray);
+ DISALLOW_IMPLICIT_CONSTRUCTORS(ExternalUint16Array);
};
-class ExternalIntArray: public ExternalArray {
+class ExternalInt32Array: public ExternalArray {
public:
// Setter and getter.
inline int32_t get_scalar(int index);
- MUST_USE_RESULT inline MaybeObject* get(int index);
+ static inline Handle<Object> get(Handle<ExternalInt32Array> array, int index);
inline void set(int index, int32_t value);
- static Handle<Object> SetValue(Handle<ExternalIntArray> array,
- uint32_t index,
- Handle<Object> value);
-
// This accessor applies the correct conversion from Smi, HeapNumber
// and undefined.
- MUST_USE_RESULT MaybeObject* SetValue(uint32_t index, Object* value);
+ static Handle<Object> SetValue(Handle<ExternalInt32Array> array,
+ uint32_t index,
+ Handle<Object> value);
// Casting.
- static inline ExternalIntArray* cast(Object* obj);
+ static inline ExternalInt32Array* cast(Object* obj);
// Dispatched behavior.
- DECLARE_PRINTER(ExternalIntArray)
- DECLARE_VERIFIER(ExternalIntArray)
+ DECLARE_PRINTER(ExternalInt32Array)
+ DECLARE_VERIFIER(ExternalInt32Array)
private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(ExternalIntArray);
+ DISALLOW_IMPLICIT_CONSTRUCTORS(ExternalInt32Array);
};
-class ExternalUnsignedIntArray: public ExternalArray {
+class ExternalUint32Array: public ExternalArray {
public:
// Setter and getter.
inline uint32_t get_scalar(int index);
- MUST_USE_RESULT inline MaybeObject* get(int index);
+ static inline Handle<Object> get(Handle<ExternalUint32Array> array,
+ int index);
inline void set(int index, uint32_t value);
- static Handle<Object> SetValue(Handle<ExternalUnsignedIntArray> array,
- uint32_t index,
- Handle<Object> value);
-
// This accessor applies the correct conversion from Smi, HeapNumber
// and undefined.
- MUST_USE_RESULT MaybeObject* SetValue(uint32_t index, Object* value);
+ static Handle<Object> SetValue(Handle<ExternalUint32Array> array,
+ uint32_t index,
+ Handle<Object> value);
// Casting.
- static inline ExternalUnsignedIntArray* cast(Object* obj);
+ static inline ExternalUint32Array* cast(Object* obj);
// Dispatched behavior.
- DECLARE_PRINTER(ExternalUnsignedIntArray)
- DECLARE_VERIFIER(ExternalUnsignedIntArray)
+ DECLARE_PRINTER(ExternalUint32Array)
+ DECLARE_VERIFIER(ExternalUint32Array)
private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(ExternalUnsignedIntArray);
+ DISALLOW_IMPLICIT_CONSTRUCTORS(ExternalUint32Array);
};
-class ExternalFloatArray: public ExternalArray {
+class ExternalFloat32Array: public ExternalArray {
public:
// Setter and getter.
inline float get_scalar(int index);
- MUST_USE_RESULT inline MaybeObject* get(int index);
+ static inline Handle<Object> get(Handle<ExternalFloat32Array> array,
+ int index);
inline void set(int index, float value);
- static Handle<Object> SetValue(Handle<ExternalFloatArray> array,
- uint32_t index,
- Handle<Object> value);
-
// This accessor applies the correct conversion from Smi, HeapNumber
// and undefined.
- MUST_USE_RESULT MaybeObject* SetValue(uint32_t index, Object* value);
+ static Handle<Object> SetValue(Handle<ExternalFloat32Array> array,
+ uint32_t index,
+ Handle<Object> value);
// Casting.
- static inline ExternalFloatArray* cast(Object* obj);
+ static inline ExternalFloat32Array* cast(Object* obj);
// Dispatched behavior.
- DECLARE_PRINTER(ExternalFloatArray)
- DECLARE_VERIFIER(ExternalFloatArray)
+ DECLARE_PRINTER(ExternalFloat32Array)
+ DECLARE_VERIFIER(ExternalFloat32Array)
private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(ExternalFloatArray);
+ DISALLOW_IMPLICIT_CONSTRUCTORS(ExternalFloat32Array);
};
-class ExternalDoubleArray: public ExternalArray {
+class ExternalFloat64Array: public ExternalArray {
public:
// Setter and getter.
inline double get_scalar(int index);
- MUST_USE_RESULT inline MaybeObject* get(int index);
+ static inline Handle<Object> get(Handle<ExternalFloat64Array> array,
+ int index);
inline void set(int index, double value);
- static Handle<Object> SetValue(Handle<ExternalDoubleArray> array,
- uint32_t index,
- Handle<Object> value);
-
// This accessor applies the correct conversion from Smi, HeapNumber
// and undefined.
- MUST_USE_RESULT MaybeObject* SetValue(uint32_t index, Object* value);
+ static Handle<Object> SetValue(Handle<ExternalFloat64Array> array,
+ uint32_t index,
+ Handle<Object> value);
// Casting.
- static inline ExternalDoubleArray* cast(Object* obj);
+ static inline ExternalFloat64Array* cast(Object* obj);
// Dispatched behavior.
- DECLARE_PRINTER(ExternalDoubleArray)
- DECLARE_VERIFIER(ExternalDoubleArray)
+ DECLARE_PRINTER(ExternalFloat64Array)
+ DECLARE_VERIFIER(ExternalFloat64Array)
private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(ExternalDoubleArray);
+ DISALLOW_IMPLICIT_CONSTRUCTORS(ExternalFloat64Array);
};
+class FixedTypedArrayBase: public FixedArrayBase {
+ public:
+ // Casting:
+ static inline FixedTypedArrayBase* cast(Object* obj);
+
+ static const int kDataOffset = kHeaderSize;
+
+ inline int size();
+
+ inline int TypedArraySize(InstanceType type);
+
+ // Use with care: returns raw pointer into heap.
+ inline void* DataPtr();
+
+ inline int DataSize();
+
+ private:
+ inline int DataSize(InstanceType type);
+
+ DISALLOW_IMPLICIT_CONSTRUCTORS(FixedTypedArrayBase);
+};
+
+
+template <class Traits>
+class FixedTypedArray: public FixedTypedArrayBase {
+ public:
+ typedef typename Traits::ElementType ElementType;
+ static const InstanceType kInstanceType = Traits::kInstanceType;
+
+ // Casting:
+ static inline FixedTypedArray<Traits>* cast(Object* obj);
+
+ static inline int ElementOffset(int index) {
+ return kDataOffset + index * sizeof(ElementType);
+ }
+
+ static inline int SizeFor(int length) {
+ return ElementOffset(length);
+ }
+
+ inline ElementType get_scalar(int index);
+ static inline Handle<Object> get(Handle<FixedTypedArray> array, int index);
+ inline void set(int index, ElementType value);
+
+ static inline ElementType from_int(int value);
+ static inline ElementType from_double(double value);
+
+ // This accessor applies the correct conversion from Smi, HeapNumber
+ // and undefined.
+ static Handle<Object> SetValue(Handle<FixedTypedArray<Traits> > array,
+ uint32_t index,
+ Handle<Object> value);
+
+ DECLARE_PRINTER(FixedTypedArray)
+ DECLARE_VERIFIER(FixedTypedArray)
+
+ private:
+ DISALLOW_IMPLICIT_CONSTRUCTORS(FixedTypedArray);
+};
+
+#define FIXED_TYPED_ARRAY_TRAITS(Type, type, TYPE, elementType, size) \
+ class Type##ArrayTraits { \
+ public: /* NOLINT */ \
+ typedef elementType ElementType; \
+ static const InstanceType kInstanceType = FIXED_##TYPE##_ARRAY_TYPE; \
+ static const char* Designator() { return #type " array"; } \
+ static inline Handle<Object> ToHandle(Isolate* isolate, \
+ elementType scalar); \
+ static inline elementType defaultValue(); \
+ }; \
+ \
+ typedef FixedTypedArray<Type##ArrayTraits> Fixed##Type##Array;
+
+TYPED_ARRAYS(FIXED_TYPED_ARRAY_TRAITS)
+
+#undef FIXED_TYPED_ARRAY_TRAITS
+
// DeoptimizationInputData is a fixed array used to hold the deoptimization
// data for code generated by the Hydrogen/Lithium compiler. It also
// contains information about functions that were inlined. If N different
@@ -4834,7 +5301,9 @@ class DeoptimizationInputData: public FixedArray {
static const int kLiteralArrayIndex = 2;
static const int kOsrAstIdIndex = 3;
static const int kOsrPcOffsetIndex = 4;
- static const int kFirstDeoptEntryIndex = 5;
+ static const int kOptimizationIdIndex = 5;
+ static const int kSharedFunctionInfoIndex = 6;
+ static const int kFirstDeoptEntryIndex = 7;
// Offsets of deopt entry elements relative to the start of the entry.
static const int kAstIdRawOffset = 0;
@@ -4857,6 +5326,8 @@ class DeoptimizationInputData: public FixedArray {
DEFINE_ELEMENT_ACCESSORS(LiteralArray, FixedArray)
DEFINE_ELEMENT_ACCESSORS(OsrAstId, Smi)
DEFINE_ELEMENT_ACCESSORS(OsrPcOffset, Smi)
+ DEFINE_ELEMENT_ACCESSORS(OptimizationId, Smi)
+ DEFINE_ELEMENT_ACCESSORS(SharedFunctionInfo, Object)
#undef DEFINE_ELEMENT_ACCESSORS
@@ -4889,9 +5360,9 @@ class DeoptimizationInputData: public FixedArray {
}
// Allocates a DeoptimizationInputData.
- MUST_USE_RESULT static MaybeObject* Allocate(Isolate* isolate,
- int deopt_entry_count,
- PretenureFlag pretenure);
+ static Handle<DeoptimizationInputData> New(Isolate* isolate,
+ int deopt_entry_count,
+ PretenureFlag pretenure);
// Casting.
static inline DeoptimizationInputData* cast(Object* obj);
@@ -4936,9 +5407,9 @@ class DeoptimizationOutputData: public FixedArray {
}
// Allocates a DeoptimizationOutputData.
- MUST_USE_RESULT static MaybeObject* Allocate(Isolate* isolate,
- int number_of_deopt_points,
- PretenureFlag pretenure);
+ static Handle<DeoptimizationOutputData> New(Isolate* isolate,
+ int number_of_deopt_points,
+ PretenureFlag pretenure);
// Casting.
static inline DeoptimizationOutputData* cast(Object* obj);
@@ -4952,49 +5423,6 @@ class DeoptimizationOutputData: public FixedArray {
// Forward declaration.
class Cell;
class PropertyCell;
-
-// TypeFeedbackCells is a fixed array used to hold the association between
-// cache cells and AST ids for code generated by the full compiler.
-// The format of the these objects is
-// [i * 2]: Global property cell of ith cache cell.
-// [i * 2 + 1]: Ast ID for ith cache cell.
-class TypeFeedbackCells: public FixedArray {
- public:
- int CellCount() { return length() / 2; }
- static int LengthOfFixedArray(int cell_count) { return cell_count * 2; }
-
- // Accessors for AST ids associated with cache values.
- inline TypeFeedbackId AstId(int index);
- inline void SetAstId(int index, TypeFeedbackId id);
-
- // Accessors for global property cells holding the cache values.
- inline Cell* GetCell(int index);
- inline void SetCell(int index, Cell* cell);
-
- // The object that indicates an uninitialized cache.
- static inline Handle<Object> UninitializedSentinel(Isolate* isolate);
-
- // The object that indicates a megamorphic state.
- static inline Handle<Object> MegamorphicSentinel(Isolate* isolate);
-
- // The object that indicates a monomorphic state of Array with
- // ElementsKind
- static inline Handle<Object> MonomorphicArraySentinel(Isolate* isolate,
- ElementsKind elements_kind);
-
- // A raw version of the uninitialized sentinel that's safe to read during
- // garbage collection (e.g., for patching the cache).
- static inline Object* RawUninitializedSentinel(Heap* heap);
-
- // Casting.
- static inline TypeFeedbackCells* cast(Object* obj);
-
- static const int kForInFastCaseMarker = 0;
- static const int kForInSlowCaseMarker = 1;
-};
-
-
-// Forward declaration.
class SafepointEntry;
class TypeFeedbackInfo;
@@ -5017,7 +5445,6 @@ class Code: public HeapObject {
V(LOAD_IC) \
V(KEYED_LOAD_IC) \
V(CALL_IC) \
- V(KEYED_CALL_IC) \
V(STORE_IC) \
V(KEYED_STORE_IC) \
V(BINARY_OP_IC) \
@@ -5077,7 +5504,6 @@ class Code: public HeapObject {
// the kind of the code object.
// FUNCTION => type feedback information.
// STUB => various things, e.g. a SMI
- // OPTIMIZED_FUNCTION => the next_code_link for optimized code list.
DECL_ACCESSORS(raw_type_feedback_info, Object)
inline Object* type_feedback_info();
inline void set_type_feedback_info(
@@ -5115,24 +5541,10 @@ class Code: public HeapObject {
// [flags]: Access to specific code flags.
inline Kind kind();
- inline Kind handler_kind() {
- return static_cast<Kind>(arguments_count());
- }
inline InlineCacheState ic_state(); // Only valid for IC stubs.
inline ExtraICState extra_ic_state(); // Only valid for IC stubs.
- inline ExtraICState extended_extra_ic_state(); // Only valid for
- // non-call IC stubs.
- static bool needs_extended_extra_ic_state(Kind kind) {
- // TODO(danno): This is a bit of a hack right now since there are still
- // clients of this API that pass "extra" values in for argc. These clients
- // should be retrofitted to used ExtendedExtraICState.
- return kind == COMPARE_NIL_IC || kind == TO_BOOLEAN_IC ||
- kind == BINARY_OP_IC;
- }
-
inline StubType type(); // Only valid for monomorphic IC stubs.
- inline int arguments_count(); // Only valid for call IC stubs.
// Testers for IC stub kinds.
inline bool is_inline_cache_stub();
@@ -5143,12 +5555,23 @@ class Code: public HeapObject {
inline bool is_store_stub() { return kind() == STORE_IC; }
inline bool is_keyed_store_stub() { return kind() == KEYED_STORE_IC; }
inline bool is_call_stub() { return kind() == CALL_IC; }
- inline bool is_keyed_call_stub() { return kind() == KEYED_CALL_IC; }
inline bool is_binary_op_stub() { return kind() == BINARY_OP_IC; }
inline bool is_compare_ic_stub() { return kind() == COMPARE_IC; }
inline bool is_compare_nil_ic_stub() { return kind() == COMPARE_NIL_IC; }
inline bool is_to_boolean_ic_stub() { return kind() == TO_BOOLEAN_IC; }
inline bool is_keyed_stub();
+ inline bool is_optimized_code() { return kind() == OPTIMIZED_FUNCTION; }
+ inline bool is_weak_stub();
+ inline void mark_as_weak_stub();
+ inline bool is_invalidated_weak_stub();
+ inline void mark_as_invalidated_weak_stub();
+
+ inline bool CanBeWeakStub() {
+ Kind k = kind();
+ return (k == LOAD_IC || k == STORE_IC || k == KEYED_LOAD_IC ||
+ k == KEYED_STORE_IC || k == COMPARE_NIL_IC) &&
+ ic_state() == MONOMORPHIC;
+ }
inline void set_raw_kind_specific_flags1(int value);
inline void set_raw_kind_specific_flags2(int value);
@@ -5212,11 +5635,6 @@ class Code: public HeapObject {
inline bool back_edges_patched_for_osr();
inline void set_back_edges_patched_for_osr(bool value);
- // [check type]: For kind CALL_IC, tells how to check if the
- // receiver is valid for the given call.
- inline CheckType check_type();
- inline void set_check_type(CheckType value);
-
// [to_boolean_foo]: For kind TO_BOOLEAN_IC tells what state the stub is in.
inline byte to_boolean_state();
@@ -5231,18 +5649,22 @@ class Code: public HeapObject {
inline bool marked_for_deoptimization();
inline void set_marked_for_deoptimization(bool flag);
+ // [constant_pool]: The constant pool for this function.
+ inline ConstantPoolArray* constant_pool();
+ inline void set_constant_pool(Object* constant_pool);
+
// Get the safepoint entry for the given pc.
SafepointEntry GetSafepointEntry(Address pc);
// Find an object in a stub with a specified map
Object* FindNthObject(int n, Map* match_map);
- void ReplaceNthObject(int n, Map* match_map, Object* replace_with);
+
+ // Find the first allocation site in an IC stub.
+ AllocationSite* FindFirstAllocationSite();
// Find the first map in an IC stub.
Map* FindFirstMap();
void FindAllMaps(MapHandleList* maps);
- void FindAllTypes(TypeHandleList* types);
- void ReplaceFirstMap(Map* replace);
// Find the first handler in an IC stub.
Code* FindFirstHandler();
@@ -5254,7 +5676,12 @@ class Code: public HeapObject {
// Find the first name in an IC stub.
Name* FindFirstName();
- void ReplaceNthCell(int n, Cell* replace_with);
+ class FindAndReplacePattern;
+ // For each (map-to-find, object-to-replace) pair in the pattern, this
+ // function replaces the corresponding placeholder in the code with the
+ // object-to-replace. The function assumes that pairs in the pattern come in
+ // the same order as the placeholders in the code.
+ void FindAndReplace(const FindAndReplacePattern& pattern);
// The entire code object including its header is copied verbatim to the
// snapshot so that it can be written in one, fast, memcpy during
@@ -5271,23 +5698,24 @@ class Code: public HeapObject {
InlineCacheState ic_state = UNINITIALIZED,
ExtraICState extra_ic_state = kNoExtraICState,
StubType type = NORMAL,
- int argc = -1,
InlineCacheHolderFlag holder = OWN_MAP);
static inline Flags ComputeMonomorphicFlags(
Kind kind,
ExtraICState extra_ic_state = kNoExtraICState,
InlineCacheHolderFlag holder = OWN_MAP,
+ StubType type = NORMAL);
+
+ static inline Flags ComputeHandlerFlags(
+ Kind handler_kind,
StubType type = NORMAL,
- int argc = -1);
+ InlineCacheHolderFlag holder = OWN_MAP);
static inline InlineCacheState ExtractICStateFromFlags(Flags flags);
static inline StubType ExtractTypeFromFlags(Flags flags);
static inline Kind ExtractKindFromFlags(Flags flags);
static inline InlineCacheHolderFlag ExtractCacheHolderFromFlags(Flags flags);
static inline ExtraICState ExtractExtraICStateFromFlags(Flags flags);
- static inline ExtraICState ExtractExtendedExtraICStateFromFlags(Flags flags);
- static inline int ExtractArgumentsCountFromFlags(Flags flags);
static inline Flags RemoveTypeFromFlags(Flags flags);
@@ -5357,9 +5785,8 @@ class Code: public HeapObject {
void ClearInlineCaches();
void ClearInlineCaches(Kind kind);
- void ClearTypeFeedbackCells(Heap* heap);
-
BailoutId TranslatePcOffsetToAstId(uint32_t pc_offset);
+ uint32_t TranslateAstIdToPcOffset(BailoutId ast_id);
#define DECLARE_CODE_AGE_ENUM(X) k##X##CodeAge,
enum Age {
@@ -5383,7 +5810,7 @@ class Code: public HeapObject {
static void MakeCodeAgeSequenceYoung(byte* sequence, Isolate* isolate);
static void MarkCodeAsExecuted(byte* sequence, Isolate* isolate);
void MakeOlder(MarkingParity);
- static bool IsYoungSequence(byte* sequence);
+ static bool IsYoungSequence(Isolate* isolate, byte* sequence);
bool IsOld();
Age GetAge();
// Gets the raw code age, including psuedo code-age values such as
@@ -5400,7 +5827,17 @@ class Code: public HeapObject {
void VerifyEmbeddedObjectsDependency();
#endif
- static bool IsWeakEmbeddedObject(Kind kind, Object* object);
+ inline bool CanContainWeakObjects() {
+ return is_optimized_code() || is_weak_stub();
+ }
+
+ inline bool IsWeakObject(Object* object) {
+ return (is_optimized_code() && IsWeakObjectInOptimizedCode(object)) ||
+ (is_weak_stub() && IsWeakObjectInIC(object));
+ }
+
+ static inline bool IsWeakObjectInOptimizedCode(Object* object);
+ static inline bool IsWeakObjectInIC(Object* object);
// Max loop nesting marker used to postpose OSR. We don't take loop
// nesting that is deeper than 5 levels into account.
@@ -5414,8 +5851,8 @@ class Code: public HeapObject {
kHandlerTableOffset + kPointerSize;
static const int kTypeFeedbackInfoOffset =
kDeoptimizationDataOffset + kPointerSize;
- static const int kNextCodeLinkOffset = kTypeFeedbackInfoOffset; // Shared.
- static const int kGCMetadataOffset = kTypeFeedbackInfoOffset + kPointerSize;
+ static const int kNextCodeLinkOffset = kTypeFeedbackInfoOffset + kPointerSize;
+ static const int kGCMetadataOffset = kNextCodeLinkOffset + kPointerSize;
static const int kICAgeOffset =
kGCMetadataOffset + kPointerSize;
static const int kFlagsOffset = kICAgeOffset + kIntSize;
@@ -5424,8 +5861,9 @@ class Code: public HeapObject {
kKindSpecificFlags1Offset + kIntSize;
// Note: We might be able to squeeze this into the flags above.
static const int kPrologueOffset = kKindSpecificFlags2Offset + kIntSize;
+ static const int kConstantPoolOffset = kPrologueOffset + kPointerSize;
- static const int kHeaderPaddingStart = kPrologueOffset + kIntSize;
+ static const int kHeaderPaddingStart = kConstantPoolOffset + kIntSize;
// Add padding to align the instruction start following right after
// the Code object header.
@@ -5434,7 +5872,6 @@ class Code: public HeapObject {
// Byte offsets within kKindSpecificFlags1Offset.
static const int kOptimizableOffset = kKindSpecificFlags1Offset;
- static const int kCheckTypeOffset = kKindSpecificFlags1Offset;
static const int kFullCodeFlags = kOptimizableOffset + 1;
class FullCodeFlagsHasDeoptimizationSupportField:
@@ -5451,10 +5888,8 @@ class Code: public HeapObject {
class CacheHolderField: public BitField<InlineCacheHolderFlag, 5, 1> {};
class KindField: public BitField<Kind, 6, 4> {};
// TODO(bmeurer): Bit 10 is available for free use. :-)
- class ExtraICStateField: public BitField<ExtraICState, 11, 6> {};
- class ExtendedExtraICStateField: public BitField<ExtraICState, 11,
+ class ExtraICStateField: public BitField<ExtraICState, 11,
PlatformSmiTagging::kSmiValueSize - 11 + 1> {}; // NOLINT
- STATIC_ASSERT(ExtraICStateField::kShift == ExtendedExtraICStateField::kShift);
// KindSpecificFlags1 layout (STUB and OPTIMIZED_FUNCTION)
static const int kStackSlotsFirstBit = 0;
@@ -5465,11 +5900,17 @@ class Code: public HeapObject {
static const int kMarkedForDeoptimizationFirstBit =
kStackSlotsFirstBit + kStackSlotsBitCount + 1;
static const int kMarkedForDeoptimizationBitCount = 1;
+ static const int kWeakStubFirstBit =
+ kMarkedForDeoptimizationFirstBit + kMarkedForDeoptimizationBitCount;
+ static const int kWeakStubBitCount = 1;
+ static const int kInvalidatedWeakStubFirstBit =
+ kWeakStubFirstBit + kWeakStubBitCount;
+ static const int kInvalidatedWeakStubBitCount = 1;
STATIC_ASSERT(kStackSlotsFirstBit + kStackSlotsBitCount <= 32);
STATIC_ASSERT(kHasFunctionCacheFirstBit + kHasFunctionCacheBitCount <= 32);
- STATIC_ASSERT(kMarkedForDeoptimizationFirstBit +
- kMarkedForDeoptimizationBitCount <= 32);
+ STATIC_ASSERT(kInvalidatedWeakStubFirstBit +
+ kInvalidatedWeakStubBitCount <= 32);
class StackSlotsField: public BitField<int,
kStackSlotsFirstBit, kStackSlotsBitCount> {}; // NOLINT
@@ -5478,6 +5919,12 @@ class Code: public HeapObject {
class MarkedForDeoptimizationField: public BitField<bool,
kMarkedForDeoptimizationFirstBit,
kMarkedForDeoptimizationBitCount> {}; // NOLINT
+ class WeakStubField: public BitField<bool,
+ kWeakStubFirstBit,
+ kWeakStubBitCount> {}; // NOLINT
+ class InvalidatedWeakStubField: public BitField<bool,
+ kInvalidatedWeakStubFirstBit,
+ kInvalidatedWeakStubBitCount> {}; // NOLINT
// KindSpecificFlags2 layout (ALL)
static const int kIsCrankshaftedBit = 0;
@@ -5508,26 +5955,16 @@ class Code: public HeapObject {
class BackEdgesPatchedForOSRField: public BitField<bool,
kIsCrankshaftedBit + 1 + 29, 1> {}; // NOLINT
- // Signed field cannot be encoded using the BitField class.
- static const int kArgumentsCountShift = 17;
- static const int kArgumentsCountMask = ~((1 << kArgumentsCountShift) - 1);
- static const int kArgumentsBits =
- PlatformSmiTagging::kSmiValueSize - Code::kArgumentsCountShift + 1;
+ static const int kArgumentsBits = 16;
static const int kMaxArguments = (1 << kArgumentsBits) - 1;
- // ICs can use either argument count or ExtendedExtraIC, since their storage
- // overlaps.
- STATIC_ASSERT(ExtraICStateField::kShift +
- ExtraICStateField::kSize + kArgumentsBits ==
- ExtendedExtraICStateField::kShift +
- ExtendedExtraICStateField::kSize);
-
// This constant should be encodable in an ARM instruction.
static const int kFlagsNotUsedInLookup =
TypeField::kMask | CacheHolderField::kMask;
private:
friend class RelocIterator;
+ friend class Deoptimizer; // For FindCodeAgeSequence.
void ClearInlineCaches(Kind* kind);
@@ -5535,7 +5972,7 @@ class Code: public HeapObject {
byte* FindCodeAgeSequence();
static void GetCodeAgeAndParity(Code* code, Age* age,
MarkingParity* parity);
- static void GetCodeAgeAndParity(byte* sequence, Age* age,
+ static void GetCodeAgeAndParity(Isolate* isolate, byte* sequence, Age* age,
MarkingParity* parity);
static Code* GetCodeAgeStub(Isolate* isolate, Age age, MarkingParity parity);
@@ -5572,9 +6009,14 @@ class CompilationInfo;
class DependentCode: public FixedArray {
public:
enum DependencyGroup {
+ // Group of IC stubs that weakly embed this map and depend on being
+ // invalidated when the map is garbage collected. Dependent IC stubs form
+ // a linked list. This group stores only the head of the list. This means
+ // that the number_of_entries(kWeakICGroup) is 0 or 1.
+ kWeakICGroup,
// Group of code that weakly embed this map and depend on being
// deoptimized when the map is garbage collected.
- kWeaklyEmbeddedGroup,
+ kWeakCodeGroup,
// Group of code that embed a transition to this map, and depend on being
// deoptimized when the transition is replaced by a new version.
kTransitionGroup,
@@ -5589,6 +6031,12 @@ class DependentCode: public FixedArray {
// Group of code that depends on global property values in property cells
// not being changed.
kPropertyCellChangedGroup,
+ // Group of code that omit run-time type checks for the field(s) introduced
+ // by this map.
+ kFieldTypeGroup,
+ // Group of code that omit run-time type checks for initial maps of
+ // constructors.
+ kInitialMapChangedGroup,
// Group of code that depends on tenuring information in AllocationSites
// not being changed.
kAllocationSiteTenuringChangedGroup,
@@ -5623,6 +6071,10 @@ class DependentCode: public FixedArray {
void DeoptimizeDependentCodeGroup(Isolate* isolate,
DependentCode::DependencyGroup group);
+ bool MarkCodeForDeoptimization(Isolate* isolate,
+ DependentCode::DependencyGroup group);
+ void AddToDependentICList(Handle<Code> stub);
+
// The following low-level accessors should only be used by this class
// and the mark compact collector.
inline int number_of_entries(DependencyGroup group);
@@ -5695,14 +6147,17 @@ class Map: public HeapObject {
kDescriptorIndexBitCount, kDescriptorIndexBitCount> {}; // NOLINT
STATIC_ASSERT(kDescriptorIndexBitCount + kDescriptorIndexBitCount == 20);
class IsShared: public BitField<bool, 20, 1> {};
- class FunctionWithPrototype: public BitField<bool, 21, 1> {};
- class DictionaryMap: public BitField<bool, 22, 1> {};
- class OwnsDescriptors: public BitField<bool, 23, 1> {};
- class HasInstanceCallHandler: public BitField<bool, 24, 1> {};
- class Deprecated: public BitField<bool, 25, 1> {};
- class IsFrozen: public BitField<bool, 26, 1> {};
- class IsUnstable: public BitField<bool, 27, 1> {};
- class IsMigrationTarget: public BitField<bool, 28, 1> {};
+ class DictionaryMap: public BitField<bool, 21, 1> {};
+ class OwnsDescriptors: public BitField<bool, 22, 1> {};
+ class HasInstanceCallHandler: public BitField<bool, 23, 1> {};
+ class Deprecated: public BitField<bool, 24, 1> {};
+ class IsFrozen: public BitField<bool, 25, 1> {};
+ class IsUnstable: public BitField<bool, 26, 1> {};
+ class IsMigrationTarget: public BitField<bool, 27, 1> {};
+ class DoneInobjectSlackTracking: public BitField<bool, 28, 1> {};
+ // Keep this bit field at the very end for better code in
+ // Builtins::kJSConstructStubGeneric stub.
+ class ConstructionCount: public BitField<int, 29, 3> {};
// Tells whether the object in the prototype property will be used
// for instances created from this function. If the prototype
@@ -5774,15 +6229,13 @@ class Map: public HeapObject {
inline void set_elements_kind(ElementsKind elements_kind) {
ASSERT(elements_kind < kElementsKindCount);
- ASSERT(kElementsKindCount <= (1 << kElementsKindBitCount));
- set_bit_field2((bit_field2() & ~kElementsKindMask) |
- (elements_kind << kElementsKindShift));
+ ASSERT(kElementsKindCount <= (1 << Map::ElementsKindBits::kSize));
+ set_bit_field2(Map::ElementsKindBits::update(bit_field2(), elements_kind));
ASSERT(this->elements_kind() == elements_kind);
}
inline ElementsKind elements_kind() {
- return static_cast<ElementsKind>(
- (bit_field2() & kElementsKindMask) >> kElementsKindShift);
+ return Map::ElementsKindBits::decode(bit_field2());
}
// Tells whether the instance has fast elements that are only Smis.
@@ -5807,21 +6260,25 @@ class Map: public HeapObject {
return IsFastElementsKind(elements_kind());
}
- inline bool has_non_strict_arguments_elements() {
- return elements_kind() == NON_STRICT_ARGUMENTS_ELEMENTS;
+ inline bool has_sloppy_arguments_elements() {
+ return elements_kind() == SLOPPY_ARGUMENTS_ELEMENTS;
}
inline bool has_external_array_elements() {
return IsExternalArrayElementsKind(elements_kind());
}
+ inline bool has_fixed_typed_array_elements() {
+ return IsFixedTypedArrayElementsKind(elements_kind());
+ }
+
inline bool has_dictionary_elements() {
return IsDictionaryElementsKind(elements_kind());
}
inline bool has_slow_elements_kind() {
return elements_kind() == DICTIONARY_ELEMENTS
- || elements_kind() == NON_STRICT_ARGUMENTS_ELEMENTS;
+ || elements_kind() == SLOPPY_ARGUMENTS_ELEMENTS;
}
static bool IsValidElementsTransition(ElementsKind from_kind,
@@ -5834,29 +6291,18 @@ class Map: public HeapObject {
inline bool HasTransitionArray();
inline bool HasElementsTransition();
inline Map* elements_transition_map();
- MUST_USE_RESULT inline MaybeObject* set_elements_transition_map(
- Map* transitioned_map);
- inline void SetTransition(int transition_index, Map* target);
+ static Handle<TransitionArray> SetElementsTransitionMap(
+ Handle<Map> map, Handle<Map> transitioned_map);
inline Map* GetTransition(int transition_index);
+ inline int SearchTransition(Name* name);
+ inline FixedArrayBase* GetInitialElements();
- static Handle<TransitionArray> AddTransition(Handle<Map> map,
- Handle<Name> key,
- Handle<Map> target,
- SimpleTransitionFlag flag);
-
- MUST_USE_RESULT inline MaybeObject* AddTransition(Name* key,
- Map* target,
- SimpleTransitionFlag flag);
DECL_ACCESSORS(transitions, TransitionArray)
- inline void ClearTransitions(Heap* heap,
- WriteBarrierMode mode = UPDATE_WRITE_BARRIER);
-
- void DeprecateTransitionTree();
- void DeprecateTarget(Name* key, DescriptorArray* new_descriptors);
Map* FindRootMap();
- Map* FindUpdatedMap(int verbatim, int length, DescriptorArray* descriptors);
- Map* FindLastMatchMap(int verbatim, int length, DescriptorArray* descriptors);
+ Map* FindFieldOwner(int descriptor);
+
+ inline int GetInObjectPropertyOffset(int index);
int NumberOfFields();
@@ -5864,13 +6310,19 @@ class Map: public HeapObject {
int target_number_of_fields,
int target_inobject,
int target_unused);
- static Handle<Map> GeneralizeAllFieldRepresentations(
- Handle<Map> map,
- Representation new_representation);
+ static Handle<Map> GeneralizeAllFieldRepresentations(Handle<Map> map);
+ static Handle<HeapType> GeneralizeFieldType(Handle<HeapType> type1,
+ Handle<HeapType> type2,
+ Isolate* isolate)
+ V8_WARN_UNUSED_RESULT;
+ static void GeneralizeFieldType(Handle<Map> map,
+ int modify_index,
+ Handle<HeapType> new_field_type);
static Handle<Map> GeneralizeRepresentation(
Handle<Map> map,
int modify_index,
Representation new_representation,
+ Handle<HeapType> new_field_type,
StoreMode store_mode);
static Handle<Map> CopyGeneralizeAllRepresentations(
Handle<Map> map,
@@ -5878,26 +6330,18 @@ class Map: public HeapObject {
StoreMode store_mode,
PropertyAttributes attributes,
const char* reason);
+ static Handle<Map> CopyGeneralizeAllRepresentations(
+ Handle<Map> map,
+ int modify_index,
+ StoreMode store_mode,
+ const char* reason);
- void PrintGeneralization(FILE* file,
- const char* reason,
- int modify_index,
- int split,
- int descriptors,
- bool constant_to_field,
- Representation old_representation,
- Representation new_representation);
+ static Handle<Map> Normalize(Handle<Map> map, PropertyNormalizationMode mode);
// Returns the constructor name (the name (possibly, inferred name) of the
// function that was used to instantiate the object).
String* constructor_name();
- // Tells whether the map is attached to SharedFunctionInfo
- // (for inobject slack tracking).
- inline void set_attached_to_shared_function_info(bool value);
-
- inline bool attached_to_shared_function_info();
-
// Tells whether the map is shared between objects that may have different
// behavior. If true, the map should never be modified, instead a clone
// should be created and modified.
@@ -5932,7 +6376,7 @@ class Map: public HeapObject {
// [stub cache]: contains stubs compiled for this map.
DECL_ACCESSORS(code_cache, Object)
- // [dependent code]: list of optimized codes that have this map embedded.
+ // [dependent code]: list of optimized codes that weakly embed this map.
DECL_ACCESSORS(dependent_code, DependentCode)
// [back pointer]: points back to the parent map from which a transition
@@ -5953,13 +6397,8 @@ class Map: public HeapObject {
// 2 + 2 * i: prototype
// 3 + 2 * i: target map
inline FixedArray* GetPrototypeTransitions();
- MUST_USE_RESULT inline MaybeObject* SetPrototypeTransitions(
- FixedArray* prototype_transitions);
inline bool HasPrototypeTransitions();
- inline HeapObject* UncheckedPrototypeTransitions();
- inline TransitionArray* unchecked_transition_array();
-
static const int kProtoTransitionHeaderSize = 1;
static const int kProtoTransitionNumberOfEntriesOffset = 0;
static const int kProtoTransitionElementsPerEntry = 2;
@@ -6037,6 +6476,10 @@ class Map: public HeapObject {
inline bool is_stable();
inline void set_migration_target(bool value);
inline bool is_migration_target();
+ inline void set_done_inobject_slack_tracking(bool value);
+ inline bool done_inobject_slack_tracking();
+ inline void set_construction_count(int value);
+ inline int construction_count();
inline void deprecate();
inline bool is_deprecated();
inline bool CanBeDeprecated();
@@ -6045,57 +6488,54 @@ class Map: public HeapObject {
// is found by re-transitioning from the root of the transition tree using the
// descriptor array of the map. Returns NULL if no updated map is found.
// This method also applies any pending migrations along the prototype chain.
- static Handle<Map> CurrentMapForDeprecated(Handle<Map> map);
+ static MaybeHandle<Map> CurrentMapForDeprecated(Handle<Map> map)
+ V8_WARN_UNUSED_RESULT;
// Same as above, but does not touch the prototype chain.
- static Handle<Map> CurrentMapForDeprecatedInternal(Handle<Map> map);
+ static MaybeHandle<Map> CurrentMapForDeprecatedInternal(Handle<Map> map)
+ V8_WARN_UNUSED_RESULT;
- static Handle<Map> RawCopy(Handle<Map> map, int instance_size);
- MUST_USE_RESULT MaybeObject* RawCopy(int instance_size);
- MUST_USE_RESULT MaybeObject* CopyWithPreallocatedFieldDescriptors();
static Handle<Map> CopyDropDescriptors(Handle<Map> map);
- MUST_USE_RESULT MaybeObject* CopyDropDescriptors();
- static Handle<Map> CopyReplaceDescriptors(Handle<Map> map,
- Handle<DescriptorArray> descriptors,
- TransitionFlag flag,
- Handle<Name> name);
- MUST_USE_RESULT MaybeObject* CopyReplaceDescriptors(
- DescriptorArray* descriptors,
- TransitionFlag flag,
- Name* name = NULL,
- SimpleTransitionFlag simple_flag = FULL_TRANSITION);
- static Handle<Map> CopyInstallDescriptors(
+ static Handle<Map> CopyInsertDescriptor(Handle<Map> map,
+ Descriptor* descriptor,
+ TransitionFlag flag);
+
+ MUST_USE_RESULT static MaybeHandle<Map> CopyWithField(
Handle<Map> map,
- int new_descriptor,
- Handle<DescriptorArray> descriptors);
- MUST_USE_RESULT MaybeObject* ShareDescriptor(DescriptorArray* descriptors,
- Descriptor* descriptor);
- MUST_USE_RESULT MaybeObject* CopyAddDescriptor(Descriptor* descriptor,
- TransitionFlag flag);
- MUST_USE_RESULT MaybeObject* CopyInsertDescriptor(Descriptor* descriptor,
- TransitionFlag flag);
- MUST_USE_RESULT MaybeObject* CopyReplaceDescriptor(
- DescriptorArray* descriptors,
- Descriptor* descriptor,
- int index,
+ Handle<Name> name,
+ Handle<HeapType> type,
+ PropertyAttributes attributes,
+ Representation representation,
+ TransitionFlag flag);
+
+ MUST_USE_RESULT static MaybeHandle<Map> CopyWithConstant(
+ Handle<Map> map,
+ Handle<Name> name,
+ Handle<Object> constant,
+ PropertyAttributes attributes,
TransitionFlag flag);
- MUST_USE_RESULT MaybeObject* AsElementsKind(ElementsKind kind);
- MUST_USE_RESULT MaybeObject* CopyAsElementsKind(ElementsKind kind,
- TransitionFlag flag);
+ // Returns a new map with all transitions dropped from the given map and
+ // the ElementsKind set.
+ static Handle<Map> TransitionElementsTo(Handle<Map> map,
+ ElementsKind to_kind);
+
+ static Handle<Map> AsElementsKind(Handle<Map> map, ElementsKind kind);
+
+ static Handle<Map> CopyAsElementsKind(Handle<Map> map,
+ ElementsKind kind,
+ TransitionFlag flag);
static Handle<Map> CopyForObserved(Handle<Map> map);
- static Handle<Map> CopyNormalized(Handle<Map> map,
- PropertyNormalizationMode mode,
- NormalizedMapSharingMode sharing);
+ static Handle<Map> CopyForFreeze(Handle<Map> map);
- inline void AppendDescriptor(Descriptor* desc,
- const DescriptorArray::WhitenessWitness&);
+ inline void AppendDescriptor(Descriptor* desc);
// Returns a copy of the map, with all transitions dropped from the
// instance descriptors.
static Handle<Map> Copy(Handle<Map> map);
- MUST_USE_RESULT MaybeObject* Copy();
+ static Handle<Map> Create(Handle<JSFunction> constructor,
+ int extra_inobject_properties);
// Returns the next free property index (only valid for FAST MODE).
int NextFreePropertyIndex();
@@ -6115,9 +6555,6 @@ class Map: public HeapObject {
// Casting.
static inline Map* cast(Object* obj);
- // Locate an accessor in the instance descriptor.
- AccessorDescriptor* FindAccessor(Name* name);
-
// Code cache operations.
// Clears the code cache.
@@ -6127,7 +6564,6 @@ class Map: public HeapObject {
static void UpdateCodeCache(Handle<Map> map,
Handle<Name> name,
Handle<Code> code);
- MUST_USE_RESULT MaybeObject* UpdateCodeCache(Name* name, Code* code);
// Extend the descriptor array of the map with the list of descriptors.
// In case of duplicates, the latest descriptor is used.
@@ -6154,14 +6590,6 @@ class Map: public HeapObject {
// Computes a hash value for this map, to be used in HashTables and such.
int Hash();
- bool EquivalentToForTransition(Map* other);
-
- // Compares this map to another to see if they describe equivalent objects.
- // If |mode| is set to CLEAR_INOBJECT_PROPERTIES, |other| is treated as if
- // it had exactly zero inobject properties.
- // The "shared" flags of both this map and |other| are ignored.
- bool EquivalentToForNormalization(Map* other, PropertyNormalizationMode mode);
-
// Returns the map that this map transitions to if its elements_kind
// is changed to |elements_kind|, or NULL if no such map is cached yet.
// |safe_to_add_transitions| is set to false if adding transitions is not
@@ -6174,15 +6602,6 @@ class Map: public HeapObject {
Handle<Map> FindTransitionedMap(MapHandleList* candidates);
Map* FindTransitionedMap(MapList* candidates);
- // Zaps the contents of backing data structures. Note that the
- // heap verifier (i.e. VerifyMarkingVisitor) relies on zapping of objects
- // holding weak references when incremental marking is used, because it also
- // iterates over objects that are otherwise unreachable.
- // In general we only want to call these functions in release mode when
- // heap verification is turned on.
- void ZapPrototypeTransitions();
- void ZapTransitions();
-
bool CanTransition() {
// Only JSObject and subtypes have map transitions and back pointers.
STATIC_ASSERT(LAST_TYPE == LAST_JS_OBJECT_TYPE);
@@ -6192,6 +6611,10 @@ class Map: public HeapObject {
bool IsJSObjectMap() {
return instance_type() >= FIRST_JS_OBJECT_TYPE;
}
+ bool IsJSProxyMap() {
+ InstanceType type = instance_type();
+ return FIRST_JS_PROXY_TYPE <= type && type <= LAST_JS_PROXY_TYPE;
+ }
bool IsJSGlobalProxyMap() {
return instance_type() == JS_GLOBAL_PROXY_TYPE;
}
@@ -6203,18 +6626,17 @@ class Map: public HeapObject {
return type == JS_GLOBAL_OBJECT_TYPE || type == JS_BUILTINS_OBJECT_TYPE;
}
- // Fires when the layout of an object with a leaf map changes.
- // This includes adding transitions to the leaf map or changing
- // the descriptor array.
- inline void NotifyLeafMapLayoutChange();
-
inline bool CanOmitMapChecks();
- void AddDependentCompilationInfo(DependentCode::DependencyGroup group,
- CompilationInfo* info);
+ static void AddDependentCompilationInfo(Handle<Map> map,
+ DependentCode::DependencyGroup group,
+ CompilationInfo* info);
- void AddDependentCode(DependentCode::DependencyGroup group,
- Handle<Code> code);
+ static void AddDependentCode(Handle<Map> map,
+ DependentCode::DependencyGroup group,
+ Handle<Code> code);
+ static void AddDependentIC(Handle<Map> map,
+ Handle<Code> stub);
bool IsMapInArrayPrototypeChain();
@@ -6242,18 +6664,16 @@ class Map: public HeapObject {
// transitions are in the form of a map where the keys are prototype objects
// and the values are the maps the are transitioned to.
static const int kMaxCachedPrototypeTransitions = 256;
- static Handle<Map> GetPrototypeTransition(Handle<Map> map,
- Handle<Object> prototype);
- static Handle<Map> PutPrototypeTransition(Handle<Map> map,
- Handle<Object> prototype,
- Handle<Map> target_map);
+ static Handle<Map> TransitionToPrototype(Handle<Map> map,
+ Handle<Object> prototype);
static const int kMaxPreAllocatedPropertyFields = 255;
// Layout description.
static const int kInstanceSizesOffset = HeapObject::kHeaderSize;
static const int kInstanceAttributesOffset = kInstanceSizesOffset + kIntSize;
- static const int kPrototypeOffset = kInstanceAttributesOffset + kIntSize;
+ static const int kBitField3Offset = kInstanceAttributesOffset + kIntSize;
+ static const int kPrototypeOffset = kBitField3Offset + kPointerSize;
static const int kConstructorOffset = kPrototypeOffset + kPointerSize;
// Storage for the transition array is overloaded to directly contain a back
// pointer if unused. When the map has transitions, the back pointer is
@@ -6265,13 +6685,12 @@ class Map: public HeapObject {
kTransitionsOrBackPointerOffset + kPointerSize;
static const int kCodeCacheOffset = kDescriptorsOffset + kPointerSize;
static const int kDependentCodeOffset = kCodeCacheOffset + kPointerSize;
- static const int kBitField3Offset = kDependentCodeOffset + kPointerSize;
- static const int kSize = kBitField3Offset + kPointerSize;
+ static const int kSize = kDependentCodeOffset + kPointerSize;
// Layout of pointer fields. Heap iteration code relies on them
// being continuously allocated.
static const int kPointerFieldsBeginOffset = Map::kPrototypeOffset;
- static const int kPointerFieldsEndOffset = kBitField3Offset + kPointerSize;
+ static const int kPointerFieldsEndOffset = kSize;
// Byte offsets within kInstanceSizesOffset.
static const int kInstanceSizeOffset = kInstanceSizesOffset + 0;
@@ -6285,52 +6704,134 @@ class Map: public HeapObject {
static const int kVisitorIdOffset = kInstanceSizesOffset + kVisitorIdByte;
// Byte offsets within kInstanceAttributesOffset attributes.
+#if V8_TARGET_LITTLE_ENDIAN
+ // Order instance type and bit field together such that they can be loaded
+ // together as a 16-bit word with instance type in the lower 8 bits regardless
+ // of endianess.
static const int kInstanceTypeOffset = kInstanceAttributesOffset + 0;
- static const int kUnusedPropertyFieldsOffset = kInstanceAttributesOffset + 1;
- static const int kBitFieldOffset = kInstanceAttributesOffset + 2;
- static const int kBitField2Offset = kInstanceAttributesOffset + 3;
+ static const int kBitFieldOffset = kInstanceAttributesOffset + 1;
+#else
+ static const int kBitFieldOffset = kInstanceAttributesOffset + 0;
+ static const int kInstanceTypeOffset = kInstanceAttributesOffset + 1;
+#endif
+ static const int kBitField2Offset = kInstanceAttributesOffset + 2;
+ static const int kUnusedPropertyFieldsOffset = kInstanceAttributesOffset + 3;
- STATIC_CHECK(kInstanceTypeOffset == Internals::kMapInstanceTypeOffset);
+ STATIC_ASSERT(kInstanceTypeOffset == Internals::kMapInstanceTypeOffset);
// Bit positions for bit field.
- static const int kUnused = 0; // To be used for marking recently used maps.
- static const int kHasNonInstancePrototype = 1;
- static const int kIsHiddenPrototype = 2;
- static const int kHasNamedInterceptor = 3;
- static const int kHasIndexedInterceptor = 4;
- static const int kIsUndetectable = 5;
- static const int kIsObserved = 6;
- static const int kIsAccessCheckNeeded = 7;
+ static const int kHasNonInstancePrototype = 0;
+ static const int kIsHiddenPrototype = 1;
+ static const int kHasNamedInterceptor = 2;
+ static const int kHasIndexedInterceptor = 3;
+ static const int kIsUndetectable = 4;
+ static const int kIsObserved = 5;
+ static const int kIsAccessCheckNeeded = 6;
+ class FunctionWithPrototype: public BitField<bool, 7, 1> {};
// Bit positions for bit field 2
static const int kIsExtensible = 0;
static const int kStringWrapperSafeForDefaultValueOf = 1;
- static const int kAttachedToSharedFunctionInfo = 2;
- // No bits can be used after kElementsKindFirstBit, they are all reserved for
- // storing ElementKind.
- static const int kElementsKindShift = 3;
- static const int kElementsKindBitCount = 5;
+ // Currently bit 2 is not used.
+ class ElementsKindBits: public BitField<ElementsKind, 3, 5> {};
// Derived values from bit field 2
- static const int kElementsKindMask = (-1 << kElementsKindShift) &
- ((1 << (kElementsKindShift + kElementsKindBitCount)) - 1);
static const int8_t kMaximumBitField2FastElementValue = static_cast<int8_t>(
- (FAST_ELEMENTS + 1) << Map::kElementsKindShift) - 1;
+ (FAST_ELEMENTS + 1) << Map::ElementsKindBits::kShift) - 1;
static const int8_t kMaximumBitField2FastSmiElementValue =
static_cast<int8_t>((FAST_SMI_ELEMENTS + 1) <<
- Map::kElementsKindShift) - 1;
+ Map::ElementsKindBits::kShift) - 1;
static const int8_t kMaximumBitField2FastHoleyElementValue =
static_cast<int8_t>((FAST_HOLEY_ELEMENTS + 1) <<
- Map::kElementsKindShift) - 1;
+ Map::ElementsKindBits::kShift) - 1;
static const int8_t kMaximumBitField2FastHoleySmiElementValue =
static_cast<int8_t>((FAST_HOLEY_SMI_ELEMENTS + 1) <<
- Map::kElementsKindShift) - 1;
+ Map::ElementsKindBits::kShift) - 1;
typedef FixedBodyDescriptor<kPointerFieldsBeginOffset,
kPointerFieldsEndOffset,
kSize> BodyDescriptor;
+ // Compares this map to another to see if they describe equivalent objects.
+ // If |mode| is set to CLEAR_INOBJECT_PROPERTIES, |other| is treated as if
+ // it had exactly zero inobject properties.
+ // The "shared" flags of both this map and |other| are ignored.
+ bool EquivalentToForNormalization(Map* other, PropertyNormalizationMode mode);
+
private:
+ bool EquivalentToForTransition(Map* other);
+ static Handle<Map> RawCopy(Handle<Map> map, int instance_size);
+ static Handle<Map> ShareDescriptor(Handle<Map> map,
+ Handle<DescriptorArray> descriptors,
+ Descriptor* descriptor);
+ static Handle<Map> CopyInstallDescriptors(
+ Handle<Map> map,
+ int new_descriptor,
+ Handle<DescriptorArray> descriptors);
+ static Handle<Map> CopyAddDescriptor(Handle<Map> map,
+ Descriptor* descriptor,
+ TransitionFlag flag);
+ static Handle<Map> CopyReplaceDescriptors(
+ Handle<Map> map,
+ Handle<DescriptorArray> descriptors,
+ TransitionFlag flag,
+ MaybeHandle<Name> maybe_name,
+ SimpleTransitionFlag simple_flag = FULL_TRANSITION);
+ static Handle<Map> CopyReplaceDescriptor(Handle<Map> map,
+ Handle<DescriptorArray> descriptors,
+ Descriptor* descriptor,
+ int index,
+ TransitionFlag flag);
+
+ static Handle<Map> CopyNormalized(Handle<Map> map,
+ PropertyNormalizationMode mode,
+ NormalizedMapSharingMode sharing);
+
+ // Fires when the layout of an object with a leaf map changes.
+ // This includes adding transitions to the leaf map or changing
+ // the descriptor array.
+ inline void NotifyLeafMapLayoutChange();
+
+ static Handle<Map> TransitionElementsToSlow(Handle<Map> object,
+ ElementsKind to_kind);
+
+ // Zaps the contents of backing data structures. Note that the
+ // heap verifier (i.e. VerifyMarkingVisitor) relies on zapping of objects
+ // holding weak references when incremental marking is used, because it also
+ // iterates over objects that are otherwise unreachable.
+ // In general we only want to call these functions in release mode when
+ // heap verification is turned on.
+ void ZapPrototypeTransitions();
+ void ZapTransitions();
+
+ void DeprecateTransitionTree();
+ void DeprecateTarget(Name* key, DescriptorArray* new_descriptors);
+
+ Map* FindLastMatchMap(int verbatim, int length, DescriptorArray* descriptors);
+
+ void UpdateDescriptor(int descriptor_number, Descriptor* desc);
+
+ void PrintGeneralization(FILE* file,
+ const char* reason,
+ int modify_index,
+ int split,
+ int descriptors,
+ bool constant_to_field,
+ Representation old_representation,
+ Representation new_representation,
+ HeapType* old_field_type,
+ HeapType* new_field_type);
+
+ static inline void SetPrototypeTransitions(
+ Handle<Map> map,
+ Handle<FixedArray> prototype_transitions);
+
+ static Handle<Map> GetPrototypeTransition(Handle<Map> map,
+ Handle<Object> prototype);
+ static Handle<Map> PutPrototypeTransition(Handle<Map> map,
+ Handle<Object> prototype,
+ Handle<Map> target_map);
+
DISALLOW_IMPLICIT_CONSTRUCTORS(Map);
};
@@ -6403,9 +6904,6 @@ class Script: public Struct {
// extracted.
DECL_ACCESSORS(column_offset, Smi)
- // [data]: additional data associated with this script.
- DECL_ACCESSORS(data, Object)
-
// [context_data]: context data for the context this script was compiled in.
DECL_ACCESSORS(context_data, Object)
@@ -6451,6 +6949,22 @@ class Script: public Struct {
// resource is accessible. Otherwise, always return true.
inline bool HasValidSource();
+ // Convert code position into column number.
+ static int GetColumnNumber(Handle<Script> script, int code_pos);
+
+ // Convert code position into (zero-based) line number.
+ // The non-handlified version does not allocate, but may be much slower.
+ static int GetLineNumber(Handle<Script> script, int code_pos);
+ int GetLineNumber(int code_pos);
+
+ static Handle<Object> GetNameOrSourceURL(Handle<Script> script);
+
+ // Init line_ends array with code positions of line ends inside script source.
+ static void InitLineEnds(Handle<Script> script);
+
+ // Get the JS object wrapping the given script; create it if none exists.
+ static Handle<JSObject> GetWrapper(Handle<Script> script);
+
// Dispatched behavior.
DECLARE_PRINTER(Script)
DECLARE_VERIFIER(Script)
@@ -6459,8 +6973,7 @@ class Script: public Struct {
static const int kNameOffset = kSourceOffset + kPointerSize;
static const int kLineOffsetOffset = kNameOffset + kPointerSize;
static const int kColumnOffsetOffset = kLineOffsetOffset + kPointerSize;
- static const int kDataOffset = kColumnOffsetOffset + kPointerSize;
- static const int kContextOffset = kDataOffset + kPointerSize;
+ static const int kContextOffset = kColumnOffsetOffset + kPointerSize;
static const int kWrapperOffset = kContextOffset + kPointerSize;
static const int kTypeOffset = kWrapperOffset + kPointerSize;
static const int kLineEndsOffset = kTypeOffset + kPointerSize;
@@ -6473,6 +6986,8 @@ class Script: public Struct {
static const int kSize = kFlagsOffset + kPointerSize;
private:
+ int GetLineNumberWithArray(int code_pos);
+
// Bit positions in the flags field.
static const int kCompilationTypeBit = 0;
static const int kCompilationStateBit = 1;
@@ -6491,29 +7006,26 @@ class Script: public Struct {
//
// Installation of ids for the selected builtin functions is handled
// by the bootstrapper.
-#define FUNCTIONS_WITH_ID_LIST(V) \
- V(Array.prototype, push, ArrayPush) \
- V(Array.prototype, pop, ArrayPop) \
- V(Function.prototype, apply, FunctionApply) \
- V(String.prototype, charCodeAt, StringCharCodeAt) \
- V(String.prototype, charAt, StringCharAt) \
- V(String, fromCharCode, StringFromCharCode) \
- V(Math, floor, MathFloor) \
- V(Math, round, MathRound) \
- V(Math, ceil, MathCeil) \
- V(Math, abs, MathAbs) \
- V(Math, log, MathLog) \
- V(Math, sin, MathSin) \
- V(Math, cos, MathCos) \
- V(Math, tan, MathTan) \
- V(Math, asin, MathASin) \
- V(Math, acos, MathACos) \
- V(Math, atan, MathATan) \
- V(Math, exp, MathExp) \
- V(Math, sqrt, MathSqrt) \
- V(Math, pow, MathPow) \
- V(Math, max, MathMax) \
- V(Math, min, MathMin) \
+#define FUNCTIONS_WITH_ID_LIST(V) \
+ V(Array.prototype, indexOf, ArrayIndexOf) \
+ V(Array.prototype, lastIndexOf, ArrayLastIndexOf) \
+ V(Array.prototype, push, ArrayPush) \
+ V(Array.prototype, pop, ArrayPop) \
+ V(Array.prototype, shift, ArrayShift) \
+ V(Function.prototype, apply, FunctionApply) \
+ V(String.prototype, charCodeAt, StringCharCodeAt) \
+ V(String.prototype, charAt, StringCharAt) \
+ V(String, fromCharCode, StringFromCharCode) \
+ V(Math, floor, MathFloor) \
+ V(Math, round, MathRound) \
+ V(Math, ceil, MathCeil) \
+ V(Math, abs, MathAbs) \
+ V(Math, log, MathLog) \
+ V(Math, exp, MathExp) \
+ V(Math, sqrt, MathSqrt) \
+ V(Math, pow, MathPow) \
+ V(Math, max, MathMax) \
+ V(Math, min, MathMin) \
V(Math, imul, MathImul)
enum BuiltinFunctionId {
@@ -6524,7 +7036,9 @@ enum BuiltinFunctionId {
#undef DECLARE_FUNCTION_ID
// Fake id for a special case of Math.pow. Note, it continues the
// list of math functions.
- kMathPowHalf
+ kMathPowHalf,
+ // Installed only on --harmony-maths.
+ kMathClz32
};
@@ -6543,14 +7057,16 @@ class SharedFunctionInfo: public HeapObject {
// and a shared literals array or Smi(0) if none.
DECL_ACCESSORS(optimized_code_map, Object)
- // Returns index i of the entry with the specified context. At position
- // i - 1 is the context, position i the code, and i + 1 the literals array.
- // Returns -1 when no matching entry is found.
- int SearchOptimizedCodeMap(Context* native_context);
+ // Returns index i of the entry with the specified context and OSR entry.
+ // At position i - 1 is the context, position i the code, and i + 1 the
+ // literals array. Returns -1 when no matching entry is found.
+ int SearchOptimizedCodeMap(Context* native_context, BailoutId osr_ast_id);
// Installs optimized code from the code map on the given closure. The
// index has to be consistent with a search result as defined above.
- void InstallFromOptimizedCodeMap(JSFunction* function, int index);
+ FixedArray* GetLiteralsFromOptimizedCodeMap(int index);
+
+ Code* GetCodeFromOptimizedCodeMap(int index);
// Clear optimized code map.
void ClearOptimizedCodeMap();
@@ -6558,25 +7074,26 @@ class SharedFunctionInfo: public HeapObject {
// Removed a specific optimized code object from the optimized code map.
void EvictFromOptimizedCodeMap(Code* optimized_code, const char* reason);
+ void ClearTypeFeedbackInfo();
+
// Trims the optimized code map after entries have been removed.
void TrimOptimizedCodeMap(int shrink_by);
// Add a new entry to the optimized code map.
- MUST_USE_RESULT MaybeObject* AddToOptimizedCodeMap(Context* native_context,
- Code* code,
- FixedArray* literals);
static void AddToOptimizedCodeMap(Handle<SharedFunctionInfo> shared,
Handle<Context> native_context,
Handle<Code> code,
- Handle<FixedArray> literals);
+ Handle<FixedArray> literals,
+ BailoutId osr_ast_id);
// Layout description of the optimized code map.
static const int kNextMapIndex = 0;
static const int kEntriesStart = 1;
- static const int kEntryLength = 3;
- static const int kFirstContextSlot = FixedArray::kHeaderSize + kPointerSize;
- static const int kFirstCodeSlot = FixedArray::kHeaderSize + 2 * kPointerSize;
- static const int kSecondEntryIndex = kEntryLength + kEntriesStart;
+ static const int kContextOffset = 0;
+ static const int kCachedCodeOffset = 1;
+ static const int kLiteralsOffset = 2;
+ static const int kOsrAstIdOffset = 3;
+ static const int kEntryLength = 4;
static const int kInitialLength = kEntriesStart + kEntryLength;
// [scope_info]: Scope info.
@@ -6605,101 +7122,11 @@ class SharedFunctionInfo: public HeapObject {
inline int expected_nof_properties();
inline void set_expected_nof_properties(int value);
- // Inobject slack tracking is the way to reclaim unused inobject space.
- //
- // The instance size is initially determined by adding some slack to
- // expected_nof_properties (to allow for a few extra properties added
- // after the constructor). There is no guarantee that the extra space
- // will not be wasted.
- //
- // Here is the algorithm to reclaim the unused inobject space:
- // - Detect the first constructor call for this SharedFunctionInfo.
- // When it happens enter the "in progress" state: remember the
- // constructor's initial_map and install a special construct stub that
- // counts constructor calls.
- // - While the tracking is in progress create objects filled with
- // one_pointer_filler_map instead of undefined_value. This way they can be
- // resized quickly and safely.
- // - Once enough (kGenerousAllocationCount) objects have been created
- // compute the 'slack' (traverse the map transition tree starting from the
- // initial_map and find the lowest value of unused_property_fields).
- // - Traverse the transition tree again and decrease the instance size
- // of every map. Existing objects will resize automatically (they are
- // filled with one_pointer_filler_map). All further allocations will
- // use the adjusted instance size.
- // - Decrease expected_nof_properties so that an allocations made from
- // another context will use the adjusted instance size too.
- // - Exit "in progress" state by clearing the reference to the initial_map
- // and setting the regular construct stub (generic or inline).
- //
- // The above is the main event sequence. Some special cases are possible
- // while the tracking is in progress:
- //
- // - GC occurs.
- // Check if the initial_map is referenced by any live objects (except this
- // SharedFunctionInfo). If it is, continue tracking as usual.
- // If it is not, clear the reference and reset the tracking state. The
- // tracking will be initiated again on the next constructor call.
- //
- // - The constructor is called from another context.
- // Immediately complete the tracking, perform all the necessary changes
- // to maps. This is necessary because there is no efficient way to track
- // multiple initial_maps.
- // Proceed to create an object in the current context (with the adjusted
- // size).
- //
- // - A different constructor function sharing the same SharedFunctionInfo is
- // called in the same context. This could be another closure in the same
- // context, or the first function could have been disposed.
- // This is handled the same way as the previous case.
- //
- // Important: inobject slack tracking is not attempted during the snapshot
- // creation.
-
- static const int kGenerousAllocationCount = 8;
-
- // [construction_count]: Counter for constructor calls made during
- // the tracking phase.
- inline int construction_count();
- inline void set_construction_count(int value);
-
- // [initial_map]: initial map of the first function called as a constructor.
- // Saved for the duration of the tracking phase.
- // This is a weak link (GC resets it to undefined_value if no other live
- // object reference this map).
- DECL_ACCESSORS(initial_map, Object)
-
- // True if the initial_map is not undefined and the countdown stub is
- // installed.
- inline bool IsInobjectSlackTrackingInProgress();
-
- // Starts the tracking.
- // Stores the initial map and installs the countdown stub.
- // IsInobjectSlackTrackingInProgress is normally true after this call,
- // except when tracking have not been started (e.g. the map has no unused
- // properties or the snapshot is being built).
- void StartInobjectSlackTracking(Map* map);
-
- // Completes the tracking.
- // IsInobjectSlackTrackingInProgress is false after this call.
- void CompleteInobjectSlackTracking();
-
- // Invoked before pointers in SharedFunctionInfo are being marked.
- // Also clears the optimized code map.
- inline void BeforeVisitingPointers();
-
- // Clears the initial_map before the GC marking phase to ensure the reference
- // is weak. IsInobjectSlackTrackingInProgress is false after this call.
- void DetachInitialMap();
-
- // Restores the link to the initial map after the GC marking phase.
- // IsInobjectSlackTrackingInProgress is true after this call.
- void AttachInitialMap(Map* map);
-
- // False if there are definitely no live objects created from this function.
- // True if live objects _may_ exist (existence not guaranteed).
- // May go back from true to false after GC.
- DECL_BOOLEAN_ACCESSORS(live_objects_may_exist)
+ // [feedback_vector] - accumulates ast node feedback from full-codegen and
+ // (increasingly) from crankshafted code where sufficient feedback isn't
+ // available. Currently the field is duplicated in
+ // TypeFeedbackInfo::feedback_vector, but the allocation is done here.
+ DECL_ACCESSORS(feedback_vector, FixedArray)
// [instance class name]: class name for instances.
DECL_ACCESSORS(instance_class_name, Object)
@@ -6772,6 +7199,7 @@ class SharedFunctionInfo: public HeapObject {
inline void set_ast_node_count(int count);
inline int profiler_ticks();
+ inline void set_profiler_ticks(int ticks);
// Inline cache age is used to infer whether the function survived a context
// disposal or not. In the former case we reset the opt_count.
@@ -6795,20 +7223,9 @@ class SharedFunctionInfo: public HeapObject {
// spending time attempting to optimize it again.
DECL_BOOLEAN_ACCESSORS(optimization_disabled)
- // Indicates the language mode of the function's code as defined by the
- // current harmony drafts for the next ES language standard. Possible
- // values are:
- // 1. CLASSIC_MODE - Unrestricted syntax and semantics, same as in ES5.
- // 2. STRICT_MODE - Restricted syntax and semantics, same as in ES5.
- // 3. EXTENDED_MODE - Only available under the harmony flag, not part of ES5.
- inline LanguageMode language_mode();
- inline void set_language_mode(LanguageMode language_mode);
-
- // Indicates whether the language mode of this function is CLASSIC_MODE.
- inline bool is_classic_mode();
-
- // Indicates whether the language mode of this function is EXTENDED_MODE.
- inline bool is_extended_mode();
+ // Indicates the language mode.
+ inline StrictMode strict_mode();
+ inline void set_strict_mode(StrictMode strict_mode);
// False if the function definitely does not allocate an arguments object.
DECL_BOOLEAN_ACCESSORS(uses_arguments)
@@ -6934,12 +7351,6 @@ class SharedFunctionInfo: public HeapObject {
void ResetForNewContext(int new_ic_age);
- // Helper to compile the shared code. Returns true on success, false on
- // failure (e.g., stack overflow during compilation). This is only used by
- // the debugger, it is not possible to compile without a context otherwise.
- static bool CompileLazy(Handle<SharedFunctionInfo> shared,
- ClearExceptionFlag flag);
-
// Casting.
static inline SharedFunctionInfo* cast(Object* obj);
@@ -6960,16 +7371,12 @@ class SharedFunctionInfo: public HeapObject {
static const int kScriptOffset = kFunctionDataOffset + kPointerSize;
static const int kDebugInfoOffset = kScriptOffset + kPointerSize;
static const int kInferredNameOffset = kDebugInfoOffset + kPointerSize;
- static const int kInitialMapOffset =
+ static const int kFeedbackVectorOffset =
kInferredNameOffset + kPointerSize;
- // ast_node_count is a Smi field. It could be grouped with another Smi field
- // into a PSEUDO_SMI_ACCESSORS pair (on x64), if one becomes available.
- static const int kAstNodeCountOffset =
- kInitialMapOffset + kPointerSize;
#if V8_HOST_ARCH_32_BIT
// Smi fields.
static const int kLengthOffset =
- kAstNodeCountOffset + kPointerSize;
+ kFeedbackVectorOffset + kPointerSize;
static const int kFormalParameterCountOffset = kLengthOffset + kPointerSize;
static const int kExpectedNofPropertiesOffset =
kFormalParameterCountOffset + kPointerSize;
@@ -6987,9 +7394,13 @@ class SharedFunctionInfo: public HeapObject {
kCompilerHintsOffset + kPointerSize;
static const int kCountersOffset =
kOptCountAndBailoutReasonOffset + kPointerSize;
+ static const int kAstNodeCountOffset =
+ kCountersOffset + kPointerSize;
+ static const int kProfilerTicksOffset =
+ kAstNodeCountOffset + kPointerSize;
// Total size.
- static const int kSize = kCountersOffset + kPointerSize;
+ static const int kSize = kProfilerTicksOffset + kPointerSize;
#else
// The only reason to use smi fields instead of int fields
// is to allow iteration without maps decoding during
@@ -7001,7 +7412,7 @@ class SharedFunctionInfo: public HeapObject {
// word is not set and thus this word cannot be treated as pointer
// to HeapObject during old space traversal.
static const int kLengthOffset =
- kAstNodeCountOffset + kPointerSize;
+ kFeedbackVectorOffset + kPointerSize;
static const int kFormalParameterCountOffset =
kLengthOffset + kIntSize;
@@ -7022,30 +7433,23 @@ class SharedFunctionInfo: public HeapObject {
static const int kOptCountAndBailoutReasonOffset =
kCompilerHintsOffset + kIntSize;
-
static const int kCountersOffset =
kOptCountAndBailoutReasonOffset + kIntSize;
- // Total size.
- static const int kSize = kCountersOffset + kIntSize;
+ static const int kAstNodeCountOffset =
+ kCountersOffset + kIntSize;
+ static const int kProfilerTicksOffset =
+ kAstNodeCountOffset + kIntSize;
-#endif
+ // Total size.
+ static const int kSize = kProfilerTicksOffset + kIntSize;
- // The construction counter for inobject slack tracking is stored in the
- // most significant byte of compiler_hints which is otherwise unused.
- // Its offset depends on the endian-ness of the architecture.
-#if __BYTE_ORDER == __LITTLE_ENDIAN
- static const int kConstructionCountOffset = kCompilerHintsOffset + 3;
-#elif __BYTE_ORDER == __BIG_ENDIAN
- static const int kConstructionCountOffset = kCompilerHintsOffset + 0;
-#else
-#error Unknown byte ordering
#endif
static const int kAlignedSize = POINTER_SIZE_ALIGN(kSize);
typedef FixedBodyDescriptor<kNameOffset,
- kInitialMapOffset + kPointerSize,
+ kFeedbackVectorOffset + kPointerSize,
kSize> BodyDescriptor;
// Bit positions in start_position_and_type.
@@ -7060,10 +7464,8 @@ class SharedFunctionInfo: public HeapObject {
enum CompilerHints {
kAllowLazyCompilation,
kAllowLazyCompilationWithoutContext,
- kLiveObjectsMayExist,
kOptimizationDisabled,
kStrictModeFunction,
- kExtendedModeFunction,
kUsesArguments,
kHasDuplicateParameters,
kNative,
@@ -7108,26 +7510,18 @@ class SharedFunctionInfo: public HeapObject {
static const int kStrictModeBitWithinByte =
(kStrictModeFunction + kCompilerHintsSmiTagSize) % kBitsPerByte;
- static const int kExtendedModeBitWithinByte =
- (kExtendedModeFunction + kCompilerHintsSmiTagSize) % kBitsPerByte;
-
static const int kNativeBitWithinByte =
(kNative + kCompilerHintsSmiTagSize) % kBitsPerByte;
-#if __BYTE_ORDER == __LITTLE_ENDIAN
+#if defined(V8_TARGET_LITTLE_ENDIAN)
static const int kStrictModeByteOffset = kCompilerHintsOffset +
(kStrictModeFunction + kCompilerHintsSmiTagSize) / kBitsPerByte;
- static const int kExtendedModeByteOffset = kCompilerHintsOffset +
- (kExtendedModeFunction + kCompilerHintsSmiTagSize) / kBitsPerByte;
static const int kNativeByteOffset = kCompilerHintsOffset +
(kNative + kCompilerHintsSmiTagSize) / kBitsPerByte;
-#elif __BYTE_ORDER == __BIG_ENDIAN
+#elif defined(V8_TARGET_BIG_ENDIAN)
static const int kStrictModeByteOffset = kCompilerHintsOffset +
(kCompilerHintsSize - 1) -
((kStrictModeFunction + kCompilerHintsSmiTagSize) / kBitsPerByte);
- static const int kExtendedModeByteOffset = kCompilerHintsOffset +
- (kCompilerHintsSize - 1) -
- ((kExtendedModeFunction + kCompilerHintsSmiTagSize) / kBitsPerByte);
static const int kNativeByteOffset = kCompilerHintsOffset +
(kCompilerHintsSize - 1) -
((kNative + kCompilerHintsSmiTagSize) / kBitsPerByte);
@@ -7158,6 +7552,9 @@ class JSGeneratorObject: public JSObject {
// cannot be resumed.
inline int continuation();
inline void set_continuation(int continuation);
+ inline bool is_closed();
+ inline bool is_executing();
+ inline bool is_suspended();
// [operand_stack]: Saved operand stack.
DECL_ACCESSORS(operand_stack, FixedArray)
@@ -7192,7 +7589,7 @@ class JSGeneratorObject: public JSObject {
enum ResumeMode { NEXT, THROW };
// Yielding from a generator returns an object with the following inobject
- // properties. See Context::generator_result_map() for the map.
+ // properties. See Context::iterator_result_map() for the map.
static const int kResultValuePropertyIndex = 0;
static const int kResultDonePropertyIndex = 1;
static const int kResultPropertyCount = 2;
@@ -7259,6 +7656,9 @@ class JSFunction: public JSObject {
// Tells whether this function is builtin.
inline bool IsBuiltin();
+ // Tells whether this function is defined in a native script.
+ inline bool IsNative();
+
// Tells whether or not the function needs arguments adaption.
inline bool NeedsArgumentsAdaption();
@@ -7270,29 +7670,65 @@ class JSFunction: public JSObject {
// Mark this function for lazy recompilation. The function will be
// recompiled the next time it is executed.
- void MarkForLazyRecompilation();
- void MarkForConcurrentRecompilation();
- void MarkInRecompileQueue();
-
- // Helpers to compile this function. Returns true on success, false on
- // failure (e.g., stack overflow during compilation).
- static bool EnsureCompiled(Handle<JSFunction> function,
- ClearExceptionFlag flag);
- static bool CompileLazy(Handle<JSFunction> function,
- ClearExceptionFlag flag);
- static Handle<Code> CompileOsr(Handle<JSFunction> function,
- BailoutId osr_ast_id,
- ClearExceptionFlag flag);
- static bool CompileOptimized(Handle<JSFunction> function,
- ClearExceptionFlag flag);
+ void MarkForOptimization();
+ void MarkForConcurrentOptimization();
+ void MarkInOptimizationQueue();
// Tells whether or not the function is already marked for lazy
// recompilation.
- inline bool IsMarkedForLazyRecompilation();
- inline bool IsMarkedForConcurrentRecompilation();
+ inline bool IsMarkedForOptimization();
+ inline bool IsMarkedForConcurrentOptimization();
// Tells whether or not the function is on the concurrent recompilation queue.
- inline bool IsInRecompileQueue();
+ inline bool IsInOptimizationQueue();
+
+ // Inobject slack tracking is the way to reclaim unused inobject space.
+ //
+ // The instance size is initially determined by adding some slack to
+ // expected_nof_properties (to allow for a few extra properties added
+ // after the constructor). There is no guarantee that the extra space
+ // will not be wasted.
+ //
+ // Here is the algorithm to reclaim the unused inobject space:
+ // - Detect the first constructor call for this JSFunction.
+ // When it happens enter the "in progress" state: initialize construction
+ // counter in the initial_map and set the |done_inobject_slack_tracking|
+ // flag.
+ // - While the tracking is in progress create objects filled with
+ // one_pointer_filler_map instead of undefined_value. This way they can be
+ // resized quickly and safely.
+ // - Once enough (kGenerousAllocationCount) objects have been created
+ // compute the 'slack' (traverse the map transition tree starting from the
+ // initial_map and find the lowest value of unused_property_fields).
+ // - Traverse the transition tree again and decrease the instance size
+ // of every map. Existing objects will resize automatically (they are
+ // filled with one_pointer_filler_map). All further allocations will
+ // use the adjusted instance size.
+ // - SharedFunctionInfo's expected_nof_properties left unmodified since
+ // allocations made using different closures could actually create different
+ // kind of objects (see prototype inheritance pattern).
+ //
+ // Important: inobject slack tracking is not attempted during the snapshot
+ // creation.
+
+ static const int kGenerousAllocationCount = Map::ConstructionCount::kMax;
+ static const int kFinishSlackTracking = 1;
+ static const int kNoSlackTracking = 0;
+
+ // True if the initial_map is set and the object constructions countdown
+ // counter is not zero.
+ inline bool IsInobjectSlackTrackingInProgress();
+
+ // Starts the tracking.
+ // Initializes object constructions countdown counter in the initial map.
+ // IsInobjectSlackTrackingInProgress is normally true after this call,
+ // except when tracking have not been started (e.g. the map has no unused
+ // properties or the snapshot is being built).
+ void StartInobjectSlackTracking();
+
+ // Completes the tracking.
+ // IsInobjectSlackTrackingInProgress is false after this call.
+ void CompleteInobjectSlackTracking();
// [literals_or_bindings]: Fixed array holding either
// the materialized literals or the bindings of a bound function.
@@ -7337,7 +7773,7 @@ class JSFunction: public JSObject {
// After prototype is removed, it will not be created when accessed, and
// [[Construct]] from this function will not be allowed.
- void RemovePrototype();
+ bool RemovePrototype();
inline bool should_have_prototype();
// Accessor for this function's initial map's [[class]]
@@ -7424,6 +7860,9 @@ class JSGlobalProxy : public JSObject {
// It is null value if this object is not used by any context.
DECL_ACCESSORS(native_context, Object)
+ // [hash]: The hash code property (undefined if not initialized yet).
+ DECL_ACCESSORS(hash, Object)
+
// Casting.
static inline JSGlobalProxy* cast(Object* obj);
@@ -7435,7 +7874,8 @@ class JSGlobalProxy : public JSObject {
// Layout description.
static const int kNativeContextOffset = JSObject::kHeaderSize;
- static const int kSize = kNativeContextOffset + kPointerSize;
+ static const int kHashOffset = kNativeContextOffset + kPointerSize;
+ static const int kSize = kHashOffset + kPointerSize;
private:
DISALLOW_IMPLICIT_CONSTRUCTORS(JSGlobalProxy);
@@ -7464,15 +7904,6 @@ class GlobalObject: public JSObject {
// Retrieve the property cell used to store a property.
PropertyCell* GetPropertyCell(LookupResult* result);
- // This is like GetProperty, but is used when you know the lookup won't fail
- // by throwing an exception. This is for the debug and builtins global
- // objects, where it is known which properties can be expected to be present
- // on the object.
- Object* GetPropertyNoExceptionThrown(Name* key) {
- Object* answer = GetProperty(key)->ToObjectUnchecked();
- return answer;
- }
-
// Casting.
static inline GlobalObject* cast(Object* obj);
@@ -7599,7 +8030,7 @@ class JSDate: public JSObject {
// [sec]: caches seconds. Either undefined, smi, or NaN.
DECL_ACCESSORS(sec, Object)
// [cache stamp]: sample of the date cache stamp at the
- // moment when local fields were cached.
+ // moment when chached fields were cached.
DECL_ACCESSORS(cache_stamp, Object)
// Casting.
@@ -7663,7 +8094,7 @@ class JSDate: public JSObject {
Object* GetUTCField(FieldIndex index, double value, DateCache* date_cache);
// Computes and caches the cacheable fields of the date.
- inline void SetLocalFields(int64_t local_time_ms, DateCache* date_cache);
+ inline void SetCachedFields(int64_t local_time_ms, DateCache* date_cache);
DISALLOW_IMPLICIT_CONSTRUCTORS(JSDate);
@@ -7687,9 +8118,6 @@ class JSMessageObject: public JSObject {
// [script]: the script from which the error message originated.
DECL_ACCESSORS(script, Object)
- // [stack_trace]: the stack trace for this error message.
- DECL_ACCESSORS(stack_trace, Object)
-
// [stack_frames]: an array of stack frames for this error object.
DECL_ACCESSORS(stack_frames, Object)
@@ -7712,8 +8140,7 @@ class JSMessageObject: public JSObject {
static const int kTypeOffset = JSObject::kHeaderSize;
static const int kArgumentsOffset = kTypeOffset + kPointerSize;
static const int kScriptOffset = kArgumentsOffset + kPointerSize;
- static const int kStackTraceOffset = kScriptOffset + kPointerSize;
- static const int kStackFramesOffset = kStackTraceOffset + kPointerSize;
+ static const int kStackFramesOffset = kScriptOffset + kPointerSize;
static const int kStartPositionOffset = kStackFramesOffset + kPointerSize;
static const int kEndPositionOffset = kStartPositionOffset + kPointerSize;
static const int kSize = kEndPositionOffset + kPointerSize;
@@ -7877,38 +8304,32 @@ class CompilationCacheShape : public BaseShape<HashTableKey*> {
return key->HashForObject(object);
}
- MUST_USE_RESULT static MaybeObject* AsObject(Heap* heap,
- HashTableKey* key) {
- return key->AsObject(heap);
- }
+ static inline Handle<Object> AsHandle(Isolate* isolate, HashTableKey* key);
static const int kPrefixSize = 0;
static const int kEntrySize = 2;
};
-class CompilationCacheTable: public HashTable<CompilationCacheShape,
+class CompilationCacheTable: public HashTable<CompilationCacheTable,
+ CompilationCacheShape,
HashTableKey*> {
public:
// Find cached value for a string key, otherwise return null.
- Object* Lookup(String* src, Context* context);
- Object* LookupEval(String* src,
- Context* context,
- LanguageMode language_mode,
- int scope_position);
- Object* LookupRegExp(String* source, JSRegExp::Flags flags);
- MUST_USE_RESULT MaybeObject* Put(String* src,
- Context* context,
- Object* value);
- MUST_USE_RESULT MaybeObject* PutEval(String* src,
- Context* context,
- SharedFunctionInfo* value,
- int scope_position);
- MUST_USE_RESULT MaybeObject* PutRegExp(String* src,
- JSRegExp::Flags flags,
- FixedArray* value);
-
- // Remove given value from cache.
+ Handle<Object> Lookup(Handle<String> src, Handle<Context> context);
+ Handle<Object> LookupEval(Handle<String> src, Handle<Context> context,
+ StrictMode strict_mode, int scope_position);
+ Handle<Object> LookupRegExp(Handle<String> source, JSRegExp::Flags flags);
+ static Handle<CompilationCacheTable> Put(
+ Handle<CompilationCacheTable> cache, Handle<String> src,
+ Handle<Context> context, Handle<Object> value);
+ static Handle<CompilationCacheTable> PutEval(
+ Handle<CompilationCacheTable> cache, Handle<String> src,
+ Handle<Context> context, Handle<SharedFunctionInfo> value,
+ int scope_position);
+ static Handle<CompilationCacheTable> PutRegExp(
+ Handle<CompilationCacheTable> cache, Handle<String> src,
+ JSRegExp::Flags flags, Handle<FixedArray> value);
void Remove(Object* value);
static inline CompilationCacheTable* cast(Object* obj);
@@ -7924,7 +8345,8 @@ class CodeCache: public Struct {
DECL_ACCESSORS(normal_type_cache, Object)
// Add the code object to the cache.
- MUST_USE_RESULT MaybeObject* Update(Name* name, Code* code);
+ static void Update(
+ Handle<CodeCache> cache, Handle<Name> name, Handle<Code> code);
// Lookup code object in the cache. Returns code object if found and undefined
// if not.
@@ -7951,8 +8373,10 @@ class CodeCache: public Struct {
static const int kSize = kNormalTypeCacheOffset + kPointerSize;
private:
- MUST_USE_RESULT MaybeObject* UpdateDefaultCache(Name* name, Code* code);
- MUST_USE_RESULT MaybeObject* UpdateNormalTypeCache(Name* name, Code* code);
+ static void UpdateDefaultCache(
+ Handle<CodeCache> code_cache, Handle<Name> name, Handle<Code> code);
+ static void UpdateNormalTypeCache(
+ Handle<CodeCache> code_cache, Handle<Name> name, Handle<Code> code);
Object* LookupDefaultCache(Name* name, Code::Flags flags);
Object* LookupNormalTypeCache(Name* name, Code::Flags flags);
@@ -7980,21 +8404,22 @@ class CodeCacheHashTableShape : public BaseShape<HashTableKey*> {
return key->HashForObject(object);
}
- MUST_USE_RESULT static MaybeObject* AsObject(Heap* heap,
- HashTableKey* key) {
- return key->AsObject(heap);
- }
+ static inline Handle<Object> AsHandle(Isolate* isolate, HashTableKey* key);
static const int kPrefixSize = 0;
static const int kEntrySize = 2;
};
-class CodeCacheHashTable: public HashTable<CodeCacheHashTableShape,
+class CodeCacheHashTable: public HashTable<CodeCacheHashTable,
+ CodeCacheHashTableShape,
HashTableKey*> {
public:
Object* Lookup(Name* name, Code::Flags flags);
- MUST_USE_RESULT MaybeObject* Put(Name* name, Code* code);
+ static Handle<CodeCacheHashTable> Put(
+ Handle<CodeCacheHashTable> table,
+ Handle<Name> name,
+ Handle<Code> code);
int GetIndex(Name* name, Code::Flags flags);
void RemoveByIndex(int index);
@@ -8018,9 +8443,6 @@ class PolymorphicCodeCache: public Struct {
Code::Flags flags,
Handle<Code> code);
- MUST_USE_RESULT MaybeObject* Update(MapHandleList* maps,
- Code::Flags flags,
- Code* code);
// Returns an undefined value if the entry is not found.
Handle<Object> Lookup(MapHandleList* maps, Code::Flags flags);
@@ -8040,13 +8462,17 @@ class PolymorphicCodeCache: public Struct {
class PolymorphicCodeCacheHashTable
- : public HashTable<CodeCacheHashTableShape, HashTableKey*> {
+ : public HashTable<PolymorphicCodeCacheHashTable,
+ CodeCacheHashTableShape,
+ HashTableKey*> {
public:
Object* Lookup(MapHandleList* maps, int code_kind);
- MUST_USE_RESULT MaybeObject* Put(MapHandleList* maps,
- int code_kind,
- Code* code);
+ static Handle<PolymorphicCodeCacheHashTable> Put(
+ Handle<PolymorphicCodeCacheHashTable> hash_table,
+ MapHandleList* maps,
+ int code_kind,
+ Handle<Code> code);
static inline PolymorphicCodeCacheHashTable* cast(Object* obj);
@@ -8072,7 +8498,6 @@ class TypeFeedbackInfo: public Struct {
inline void set_inlined_type_change_checksum(int checksum);
inline bool matches_inlined_type_change_checksum(int checksum);
- DECL_ACCESSORS(type_feedback_cells, TypeFeedbackCells)
static inline TypeFeedbackInfo* cast(Object* obj);
@@ -8082,8 +8507,23 @@ class TypeFeedbackInfo: public Struct {
static const int kStorage1Offset = HeapObject::kHeaderSize;
static const int kStorage2Offset = kStorage1Offset + kPointerSize;
- static const int kTypeFeedbackCellsOffset = kStorage2Offset + kPointerSize;
- static const int kSize = kTypeFeedbackCellsOffset + kPointerSize;
+ static const int kSize = kStorage2Offset + kPointerSize;
+
+ // TODO(mvstanton): move these sentinel declarations to shared function info.
+ // The object that indicates an uninitialized cache.
+ static inline Handle<Object> UninitializedSentinel(Isolate* isolate);
+
+ // The object that indicates a megamorphic state.
+ static inline Handle<Object> MegamorphicSentinel(Isolate* isolate);
+
+ // The object that indicates a monomorphic state of Array with
+ // ElementsKind
+ static inline Handle<Object> MonomorphicArraySentinel(Isolate* isolate,
+ ElementsKind elements_kind);
+
+ // A raw version of the uninitialized sentinel that's safe to read during
+ // garbage collection (e.g., for patching the cache).
+ static inline Object* RawUninitializedSentinel(Heap* heap);
private:
static const int kTypeChangeChecksumBits = 7;
@@ -8117,23 +8557,24 @@ class AllocationSite: public Struct {
static const int kPretenureMinimumCreated = 100;
// Values for pretenure decision field.
- enum {
+ enum PretenureDecision {
kUndecided = 0,
kDontTenure = 1,
- kTenure = 2,
- kZombie = 3
+ kMaybeTenure = 2,
+ kTenure = 3,
+ kZombie = 4,
+ kLastPretenureDecisionValue = kZombie
};
+ const char* PretenureDecisionName(PretenureDecision decision);
+
DECL_ACCESSORS(transition_info, Object)
// nested_site threads a list of sites that represent nested literals
// walked in a particular order. So [[1, 2], 1, 2] will have one
// nested_site, but [[1, 2], 3, [4]] will have a list of two.
DECL_ACCESSORS(nested_site, Object)
- DECL_ACCESSORS(memento_found_count, Smi)
- DECL_ACCESSORS(memento_create_count, Smi)
- // TODO(mvstanton): we don't need a whole integer to record pretenure
- // decision. Consider sharing space with memento_found_count.
- DECL_ACCESSORS(pretenure_decision, Smi)
+ DECL_ACCESSORS(pretenure_data, Smi)
+ DECL_ACCESSORS(pretenure_create_count, Smi)
DECL_ACCESSORS(dependent_code, DependentCode)
DECL_ACCESSORS(weak_next, Object)
@@ -8142,18 +8583,64 @@ class AllocationSite: public Struct {
// This method is expensive, it should only be called for reporting.
bool IsNestedSite();
+ // transition_info bitfields, for constructed array transition info.
class ElementsKindBits: public BitField<ElementsKind, 0, 15> {};
class UnusedBits: public BitField<int, 15, 14> {};
class DoNotInlineBit: public BitField<bool, 29, 1> {};
- inline void IncrementMementoFoundCount();
+ // Bitfields for pretenure_data
+ class MementoFoundCountBits: public BitField<int, 0, 26> {};
+ class PretenureDecisionBits: public BitField<PretenureDecision, 26, 3> {};
+ class DeoptDependentCodeBit: public BitField<bool, 29, 1> {};
+ STATIC_ASSERT(PretenureDecisionBits::kMax >= kLastPretenureDecisionValue);
+
+ // Increments the mementos found counter and returns true when the first
+ // memento was found for a given allocation site.
+ inline bool IncrementMementoFoundCount();
inline void IncrementMementoCreateCount();
- PretenureFlag GetPretenureMode() {
- int mode = pretenure_decision()->value();
- // Zombie objects "decide" to be untenured.
- return (mode == kTenure) ? TENURED : NOT_TENURED;
+ PretenureFlag GetPretenureMode();
+
+ void ResetPretenureDecision();
+
+ PretenureDecision pretenure_decision() {
+ int value = pretenure_data()->value();
+ return PretenureDecisionBits::decode(value);
+ }
+
+ void set_pretenure_decision(PretenureDecision decision) {
+ int value = pretenure_data()->value();
+ set_pretenure_data(
+ Smi::FromInt(PretenureDecisionBits::update(value, decision)),
+ SKIP_WRITE_BARRIER);
+ }
+
+ bool deopt_dependent_code() {
+ int value = pretenure_data()->value();
+ return DeoptDependentCodeBit::decode(value);
+ }
+
+ void set_deopt_dependent_code(bool deopt) {
+ int value = pretenure_data()->value();
+ set_pretenure_data(
+ Smi::FromInt(DeoptDependentCodeBit::update(value, deopt)),
+ SKIP_WRITE_BARRIER);
+ }
+
+ int memento_found_count() {
+ int value = pretenure_data()->value();
+ return MementoFoundCountBits::decode(value);
+ }
+
+ inline void set_memento_found_count(int count);
+
+ int memento_create_count() {
+ return pretenure_create_count()->value();
+ }
+
+ void set_memento_create_count(int count) {
+ set_pretenure_create_count(Smi::FromInt(count), SKIP_WRITE_BARRIER);
}
// The pretenuring decision is made during gc, and the zombie state allows
@@ -8161,12 +8648,20 @@ class AllocationSite: public Struct {
// a later traversal of new space may discover AllocationMementos that point
// to this AllocationSite.
bool IsZombie() {
- return pretenure_decision()->value() == kZombie;
+ return pretenure_decision() == kZombie;
+ }
+
+ bool IsMaybeTenure() {
+ return pretenure_decision() == kMaybeTenure;
}
inline void MarkZombie();
- inline bool DigestPretenuringFeedback();
+ inline bool MakePretenureDecision(PretenureDecision current_decision,
+ double ratio,
+ bool maximum_size_scavenge);
+
+ inline bool DigestPretenuringFeedback(bool maximum_size_scavenge);
ElementsKind GetElementsKind() {
ASSERT(!SitePointsToLiteral());
@@ -8198,15 +8693,17 @@ class AllocationSite: public Struct {
return transition_info()->IsJSArray() || transition_info()->IsJSObject();
}
- MaybeObject* DigestTransitionFeedback(ElementsKind to_kind);
+ static void DigestTransitionFeedback(Handle<AllocationSite> site,
+ ElementsKind to_kind);
enum Reason {
TENURING,
TRANSITIONS
};
- void AddDependentCompilationInfo(Reason reason, CompilationInfo* info);
- void AddDependentCode(Reason reason, Handle<Code> code);
+ static void AddDependentCompilationInfo(Handle<AllocationSite> site,
+ Reason reason,
+ CompilationInfo* info);
DECLARE_PRINTER(AllocationSite)
DECLARE_VERIFIER(AllocationSite)
@@ -8219,13 +8716,11 @@ class AllocationSite: public Struct {
static const int kTransitionInfoOffset = HeapObject::kHeaderSize;
static const int kNestedSiteOffset = kTransitionInfoOffset + kPointerSize;
- static const int kMementoFoundCountOffset = kNestedSiteOffset + kPointerSize;
- static const int kMementoCreateCountOffset =
- kMementoFoundCountOffset + kPointerSize;
- static const int kPretenureDecisionOffset =
- kMementoCreateCountOffset + kPointerSize;
+ static const int kPretenureDataOffset = kNestedSiteOffset + kPointerSize;
+ static const int kPretenureCreateCountOffset =
+ kPretenureDataOffset + kPointerSize;
static const int kDependentCodeOffset =
- kPretenureDecisionOffset + kPointerSize;
+ kPretenureCreateCountOffset + kPointerSize;
static const int kWeakNextOffset = kDependentCodeOffset + kPointerSize;
static const int kSize = kWeakNextOffset + kPointerSize;
@@ -8242,7 +8737,7 @@ class AllocationSite: public Struct {
private:
inline DependentCode::DependencyGroup ToDependencyGroup(Reason reason);
bool PretenuringDecisionMade() {
- return pretenure_decision()->value() != kUndecided;
+ return pretenure_decision() != kUndecided;
}
DISALLOW_IMPLICIT_CONSTRUCTORS(AllocationSite);
@@ -8268,9 +8763,6 @@ class AllocationMemento: public Struct {
DECLARE_PRINTER(AllocationMemento)
DECLARE_VERIFIER(AllocationMemento)
- // Returns NULL if no AllocationMemento is available for object.
- static AllocationMemento* FindForJSObject(JSObject* object,
- bool in_GC = false);
static inline AllocationMemento* cast(Object* obj);
private:
@@ -8278,8 +8770,8 @@ class AllocationMemento: public Struct {
};
-// Representation of a slow alias as part of a non-strict arguments objects.
-// For fast aliases (if HasNonStrictArgumentsElements()):
+// Representation of a slow alias as part of a sloppy arguments objects.
+// For fast aliases (if HasSloppyArgumentsElements()):
// - the parameter map contains an index into the context
// - all attributes of the element have default values
// For slow aliases (if HasDictionaryArgumentsElements()):
@@ -8428,6 +8920,7 @@ class Name: public HeapObject {
// Equality operations.
inline bool Equals(Name* other);
+ inline static bool Equals(Handle<Name> one, Handle<Name> two);
// Conversion.
inline bool AsArrayIndex(uint32_t* index);
@@ -8435,8 +8928,6 @@ class Name: public HeapObject {
// Casting.
static inline Name* cast(Object* obj);
- bool IsCacheable(Isolate* isolate);
-
DECLARE_PRINTER(Name)
// Layout description.
@@ -8469,23 +8960,21 @@ class Name: public HeapObject {
static const int kArrayIndexLengthBits =
kBitsPerInt - kArrayIndexValueBits - kNofHashBitFields;
- STATIC_CHECK((kArrayIndexLengthBits > 0));
-
- static const int kArrayIndexHashLengthShift =
- kArrayIndexValueBits + kNofHashBitFields;
+ STATIC_ASSERT((kArrayIndexLengthBits > 0));
- static const int kArrayIndexHashMask = (1 << kArrayIndexHashLengthShift) - 1;
-
- static const int kArrayIndexValueMask =
- ((1 << kArrayIndexValueBits) - 1) << kHashShift;
+ class ArrayIndexValueBits : public BitField<unsigned int, kNofHashBitFields,
+ kArrayIndexValueBits> {}; // NOLINT
+ class ArrayIndexLengthBits : public BitField<unsigned int,
+ kNofHashBitFields + kArrayIndexValueBits,
+ kArrayIndexLengthBits> {}; // NOLINT
// Check that kMaxCachedArrayIndexLength + 1 is a power of two so we
// could use a mask to test if the length of string is less than or equal to
// kMaxCachedArrayIndexLength.
- STATIC_CHECK(IS_POWER_OF_TWO(kMaxCachedArrayIndexLength + 1));
+ STATIC_ASSERT(IS_POWER_OF_TWO(kMaxCachedArrayIndexLength + 1));
- static const int kContainsCachedArrayIndexMask =
- (~kMaxCachedArrayIndexLength << kArrayIndexHashLengthShift) |
+ static const unsigned int kContainsCachedArrayIndexMask =
+ (~kMaxCachedArrayIndexLength << ArrayIndexLengthBits::kShift) |
kIsNotArrayIndexMask;
// Value of empty hash field indicating that the hash is not computed.
@@ -8546,6 +9035,33 @@ class String: public Name {
public:
enum Encoding { ONE_BYTE_ENCODING, TWO_BYTE_ENCODING };
+ // Array index strings this short can keep their index in the hash field.
+ static const int kMaxCachedArrayIndexLength = 7;
+
+ // For strings which are array indexes the hash value has the string length
+ // mixed into the hash, mainly to avoid a hash value of zero which would be
+ // the case for the string '0'. 24 bits are used for the array index value.
+ static const int kArrayIndexValueBits = 24;
+ static const int kArrayIndexLengthBits =
+ kBitsPerInt - kArrayIndexValueBits - kNofHashBitFields;
+
+ STATIC_ASSERT((kArrayIndexLengthBits > 0));
+
+ class ArrayIndexValueBits : public BitField<unsigned int, kNofHashBitFields,
+ kArrayIndexValueBits> {}; // NOLINT
+ class ArrayIndexLengthBits : public BitField<unsigned int,
+ kNofHashBitFields + kArrayIndexValueBits,
+ kArrayIndexLengthBits> {}; // NOLINT
+
+ // Check that kMaxCachedArrayIndexLength + 1 is a power of two so we
+ // could use a mask to test if the length of string is less than or equal to
+ // kMaxCachedArrayIndexLength.
+ STATIC_ASSERT(IS_POWER_OF_TWO(kMaxCachedArrayIndexLength + 1));
+
+ static const unsigned int kContainsCachedArrayIndexMask =
+ (~kMaxCachedArrayIndexLength << ArrayIndexLengthBits::kShift) |
+ kIsNotArrayIndexMask;
+
// Representation of the flat content of a String.
// A non-flat string doesn't have flat content.
// A flat string has content that's encoded as a sequence of either
@@ -8564,28 +9080,37 @@ class String: public Name {
// true.
Vector<const uint8_t> ToOneByteVector() {
ASSERT_EQ(ASCII, state_);
- return buffer_;
+ return Vector<const uint8_t>(onebyte_start, length_);
}
// Return the two-byte content of the string. Only use if IsTwoByte()
// returns true.
Vector<const uc16> ToUC16Vector() {
ASSERT_EQ(TWO_BYTE, state_);
- return Vector<const uc16>::cast(buffer_);
+ return Vector<const uc16>(twobyte_start, length_);
+ }
+
+ uc16 Get(int i) {
+ ASSERT(i < length_);
+ ASSERT(state_ != NON_FLAT);
+ if (state_ == ASCII) return onebyte_start[i];
+ return twobyte_start[i];
}
private:
enum State { NON_FLAT, ASCII, TWO_BYTE };
// Constructors only used by String::GetFlatContent().
- explicit FlatContent(Vector<const uint8_t> chars)
- : buffer_(chars),
- state_(ASCII) { }
- explicit FlatContent(Vector<const uc16> chars)
- : buffer_(Vector<const byte>::cast(chars)),
- state_(TWO_BYTE) { }
- FlatContent() : buffer_(), state_(NON_FLAT) { }
-
- Vector<const uint8_t> buffer_;
+ explicit FlatContent(const uint8_t* start, int length)
+ : onebyte_start(start), length_(length), state_(ASCII) { }
+ explicit FlatContent(const uc16* start, int length)
+ : twobyte_start(start), length_(length), state_(TWO_BYTE) { }
+ FlatContent() : onebyte_start(NULL), length_(0), state_(NON_FLAT) { }
+
+ union {
+ const uint8_t* onebyte_start;
+ const uc16* twobyte_start;
+ };
+ int length_;
State state_;
friend class String;
@@ -8595,6 +9120,11 @@ class String: public Name {
inline int length();
inline void set_length(int value);
+ // Get and set the length of the string using acquire loads and release
+ // stores.
+ inline int synchronized_length();
+ inline void synchronized_set_length(int value);
+
// Returns whether this string has only ASCII chars, i.e. all of them can
// be ASCII encoded. This might be the case even if the string is
// two-byte. Such strings may appear when the embedder prefers
@@ -8618,7 +9148,7 @@ class String: public Name {
// to this method are not efficient unless the string is flat.
INLINE(uint16_t Get(int index));
- // Try to flatten the string. Checks first inline to see if it is
+ // Flattens the string. Checks first inline to see if it is
// necessary. Does nothing if the string is not a cons string.
// Flattening allocates a sequential string with the same data as
// the given string and mutates the cons string to a degenerate
@@ -8630,15 +9160,9 @@ class String: public Name {
//
// Degenerate cons strings are handled specially by the garbage
// collector (see IsShortcutCandidate).
- //
- // Use FlattenString from Handles.cc to flatten even in case an
- // allocation failure happens.
- inline MaybeObject* TryFlatten(PretenureFlag pretenure = NOT_TENURED);
- // Convenience function. Has exactly the same behavior as
- // TryFlatten(), except in the case of failure returns the original
- // string.
- inline String* TryFlattenGetString(PretenureFlag pretenure = NOT_TENURED);
+ static inline Handle<String> Flatten(Handle<String> string,
+ PretenureFlag pretenure = NOT_TENURED);
// Tries to return the content of a flat string as a structure holding either
// a flat vector of char or of uc16.
@@ -8655,13 +9179,9 @@ class String: public Name {
// ASCII and two byte string types.
bool MarkAsUndetectable();
- // Return a substring.
- MUST_USE_RESULT MaybeObject* SubString(int from,
- int to,
- PretenureFlag pretenure = NOT_TENURED);
-
// String equality operations.
inline bool Equals(String* other);
+ inline static bool Equals(Handle<String> one, Handle<String> two);
bool IsUtf8EqualTo(Vector<const char> str, bool allow_prefix_match = false);
bool IsOneByteEqualTo(Vector<const uint8_t> str);
bool IsTwoByteEqualTo(Vector<const uc16> str);
@@ -8727,18 +9247,19 @@ class String: public Name {
// Maximum number of characters to consider when trying to convert a string
// value into an array index.
static const int kMaxArrayIndexSize = 10;
- STATIC_CHECK(kMaxArrayIndexSize < (1 << kArrayIndexLengthBits));
+ STATIC_ASSERT(kMaxArrayIndexSize < (1 << kArrayIndexLengthBits));
// Max char codes.
static const int32_t kMaxOneByteCharCode = unibrow::Latin1::kMaxChar;
static const uint32_t kMaxOneByteCharCodeU = unibrow::Latin1::kMaxChar;
static const int kMaxUtf16CodeUnit = 0xffff;
+ static const uint32_t kMaxUtf16CodeUnitU = kMaxUtf16CodeUnit;
// Value of hash field containing computed hash equal to zero.
static const int kEmptyStringHash = kIsNotArrayIndexMask;
// Maximal string length.
- static const int kMaxLength = (1 << (32 - 2)) - 1;
+ static const int kMaxLength = (1 << 28) - 16;
// Max length for computing hash. For strings longer than this limit the
// string length is used as the hash value.
@@ -8805,42 +9326,26 @@ class String: public Name {
return NonOneByteStart(chars, length) >= length;
}
- // TODO(dcarney): Replace all instances of this with VisitFlat.
- template<class Visitor, class ConsOp>
- static inline void Visit(String* string,
- unsigned offset,
- Visitor& visitor,
- ConsOp& cons_op,
- int32_t type,
- unsigned length);
-
template<class Visitor>
static inline ConsString* VisitFlat(Visitor* visitor,
String* string,
- int offset,
- int length,
- int32_t type);
+ int offset = 0);
- template<class Visitor>
- static inline ConsString* VisitFlat(Visitor* visitor,
- String* string,
- int offset = 0) {
- int32_t type = string->map()->instance_type();
- return VisitFlat(visitor, string, offset, string->length(), type);
- }
+ static Handle<FixedArray> CalculateLineEnds(Handle<String> string,
+ bool include_ending_line);
private:
friend class Name;
- // Try to flatten the top level ConsString that is hiding behind this
- // string. This is a no-op unless the string is a ConsString. Flatten
- // mutates the ConsString and might return a failure.
- MUST_USE_RESULT MaybeObject* SlowTryFlatten(PretenureFlag pretenure);
+ static Handle<String> SlowFlatten(Handle<ConsString> cons,
+ PretenureFlag tenure);
// Slow case of String::Equals. This implementation works on any strings
// but it is most efficient on strings that are almost flat.
bool SlowEquals(String* other);
+ static bool SlowEquals(Handle<String> one, Handle<String> two);
+
// Slow case of AsArrayIndex.
bool SlowAsArrayIndex(uint32_t* index);
@@ -8900,9 +9405,7 @@ class SeqOneByteString: public SeqString {
// Maximal memory usage for a single sequential ASCII string.
static const int kMaxSize = 512 * MB - 1;
- // Maximal length of a single sequential ASCII string.
- // Q.v. String::kMaxLength which is the maximal size of concatenated strings.
- static const int kMaxLength = (kMaxSize - kHeaderSize);
+ STATIC_ASSERT((kMaxSize - kHeaderSize) >= String::kMaxLength);
private:
DISALLOW_IMPLICIT_CONSTRUCTORS(SeqOneByteString);
@@ -8942,9 +9445,8 @@ class SeqTwoByteString: public SeqString {
// Maximal memory usage for a single sequential two-byte string.
static const int kMaxSize = 512 * MB - 1;
- // Maximal length of a single sequential two-byte string.
- // Q.v. String::kMaxLength which is the maximal size of concatenated strings.
- static const int kMaxLength = (kMaxSize - kHeaderSize) / sizeof(uint16_t);
+ STATIC_ASSERT(static_cast<int>((kMaxSize - kHeaderSize)/sizeof(uint16_t)) >=
+ String::kMaxLength);
private:
DISALLOW_IMPLICIT_CONSTRUCTORS(SeqTwoByteString);
@@ -9072,7 +9574,7 @@ class ExternalString: public String {
// Return whether external string is short (data pointer is not cached).
inline bool is_short();
- STATIC_CHECK(kResourceOffset == Internals::kStringResourceOffset);
+ STATIC_ASSERT(kResourceOffset == Internals::kStringResourceOffset);
private:
DISALLOW_IMPLICIT_CONSTRUCTORS(ExternalString);
@@ -9213,57 +9715,63 @@ class ConsStringNullOp {
// This maintains an off-stack representation of the stack frames required
// to traverse a ConsString, allowing an entirely iterative and restartable
// traversal of the entire string
-// Note: this class is not GC-safe.
class ConsStringIteratorOp {
public:
inline ConsStringIteratorOp() {}
- String* Operate(String* string,
- unsigned* offset_out,
- int32_t* type_out,
- unsigned* length_out);
- inline String* ContinueOperation(int32_t* type_out, unsigned* length_out);
- inline void Reset();
- inline bool HasMore();
+ inline ConsStringIteratorOp(ConsString* cons_string, int offset = 0) {
+ Reset(cons_string, offset);
+ }
+ inline void Reset(ConsString* cons_string, int offset = 0) {
+ depth_ = 0;
+ // Next will always return NULL.
+ if (cons_string == NULL) return;
+ Initialize(cons_string, offset);
+ }
+ // Returns NULL when complete.
+ inline String* Next(int* offset_out) {
+ *offset_out = 0;
+ if (depth_ == 0) return NULL;
+ return Continue(offset_out);
+ }
private:
- // TODO(dcarney): Templatize this out for different stack sizes.
- static const unsigned kStackSize = 32;
+ static const int kStackSize = 32;
// Use a mask instead of doing modulo operations for stack wrapping.
- static const unsigned kDepthMask = kStackSize-1;
+ static const int kDepthMask = kStackSize-1;
STATIC_ASSERT(IS_POWER_OF_TWO(kStackSize));
- static inline unsigned OffsetForDepth(unsigned depth);
+ static inline int OffsetForDepth(int depth);
inline void PushLeft(ConsString* string);
inline void PushRight(ConsString* string);
inline void AdjustMaximumDepth();
inline void Pop();
- String* NextLeaf(bool* blew_stack, int32_t* type_out, unsigned* length_out);
- String* Search(unsigned* offset_out,
- int32_t* type_out,
- unsigned* length_out);
+ inline bool StackBlown() { return maximum_depth_ - depth_ == kStackSize; }
+ void Initialize(ConsString* cons_string, int offset);
+ String* Continue(int* offset_out);
+ String* NextLeaf(bool* blew_stack);
+ String* Search(int* offset_out);
- unsigned depth_;
- unsigned maximum_depth_;
// Stack must always contain only frames for which right traversal
// has not yet been performed.
ConsString* frames_[kStackSize];
- unsigned consumed_;
ConsString* root_;
+ int depth_;
+ int maximum_depth_;
+ int consumed_;
DISALLOW_COPY_AND_ASSIGN(ConsStringIteratorOp);
};
-// Note: this class is not GC-safe.
class StringCharacterStream {
public:
inline StringCharacterStream(String* string,
ConsStringIteratorOp* op,
- unsigned offset = 0);
+ int offset = 0);
inline uint16_t GetNext();
inline bool HasMore();
- inline void Reset(String* string, unsigned offset = 0);
- inline void VisitOneByteString(const uint8_t* chars, unsigned length);
- inline void VisitTwoByteString(const uint16_t* chars, unsigned length);
+ inline void Reset(String* string, int offset = 0);
+ inline void VisitOneByteString(const uint8_t* chars, int length);
+ inline void VisitTwoByteString(const uint16_t* chars, int length);
private:
bool is_one_byte_;
@@ -9309,10 +9817,11 @@ class Oddball: public HeapObject {
DECLARE_VERIFIER(Oddball)
// Initialize the fields.
- MUST_USE_RESULT MaybeObject* Initialize(Heap* heap,
- const char* to_string,
- Object* to_number,
- byte kind);
+ static void Initialize(Isolate* isolate,
+ Handle<Oddball> oddball,
+ const char* to_string,
+ Handle<Object> to_number,
+ byte kind);
// Layout description.
static const int kToStringOffset = HeapObject::kHeaderSize;
@@ -9329,14 +9838,15 @@ class Oddball: public HeapObject {
static const byte kUndefined = 5;
static const byte kUninitialized = 6;
static const byte kOther = 7;
+ static const byte kException = 8;
typedef FixedBodyDescriptor<kToStringOffset,
kToNumberOffset + kPointerSize,
kSize> BodyDescriptor;
- STATIC_CHECK(kKindOffset == Internals::kOddballKindOffset);
- STATIC_CHECK(kNull == Internals::kNullOddballKind);
- STATIC_CHECK(kUndefined == Internals::kUndefinedOddballKind);
+ STATIC_ASSERT(kKindOffset == Internals::kOddballKindOffset);
+ STATIC_ASSERT(kNull == Internals::kNullOddballKind);
+ STATIC_ASSERT(kUndefined == Internals::kUndefinedOddballKind);
private:
DISALLOW_IMPLICIT_CONSTRUCTORS(Oddball);
@@ -9381,8 +9891,8 @@ class Cell: public HeapObject {
class PropertyCell: public Cell {
public:
// [type]: type of the global property.
- Type* type();
- void set_type(Type* value, WriteBarrierMode mode = UPDATE_WRITE_BARRIER);
+ HeapType* type();
+ void set_type(HeapType* value, WriteBarrierMode mode = UPDATE_WRITE_BARRIER);
// [dependent_code]: dependent code that depends on the type of the global
// property.
@@ -9397,12 +9907,11 @@ class PropertyCell: public Cell {
// Computes the new type of the cell's contents for the given value, but
// without actually modifying the 'type' field.
- static Handle<Type> UpdatedType(Handle<PropertyCell> cell,
- Handle<Object> value);
+ static Handle<HeapType> UpdatedType(Handle<PropertyCell> cell,
+ Handle<Object> value);
- void AddDependentCompilationInfo(CompilationInfo* info);
-
- void AddDependentCode(Handle<Code> code);
+ static void AddDependentCompilationInfo(Handle<PropertyCell> cell,
+ CompilationInfo* info);
// Casting.
static inline PropertyCell* cast(Object* obj);
@@ -9445,31 +9954,36 @@ class JSProxy: public JSReceiver {
// Casting.
static inline JSProxy* cast(Object* obj);
- MUST_USE_RESULT MaybeObject* GetPropertyWithHandler(
- Object* receiver,
- Name* name);
- MUST_USE_RESULT MaybeObject* GetElementWithHandler(
- Object* receiver,
+ MUST_USE_RESULT static MaybeHandle<Object> GetPropertyWithHandler(
+ Handle<JSProxy> proxy,
+ Handle<Object> receiver,
+ Handle<Name> name);
+ MUST_USE_RESULT static inline MaybeHandle<Object> GetElementWithHandler(
+ Handle<JSProxy> proxy,
+ Handle<Object> receiver,
uint32_t index);
// If the handler defines an accessor property with a setter, invoke it.
// If it defines an accessor property without a setter, or a data property
// that is read-only, throw. In all these cases set '*done' to true,
// otherwise set it to false.
- static Handle<Object> SetPropertyViaPrototypesWithHandler(
+ MUST_USE_RESULT
+ static MaybeHandle<Object> SetPropertyViaPrototypesWithHandler(
Handle<JSProxy> proxy,
Handle<JSReceiver> receiver,
Handle<Name> name,
Handle<Object> value,
PropertyAttributes attributes,
- StrictModeFlag strict_mode,
+ StrictMode strict_mode,
bool* done);
- MUST_USE_RESULT PropertyAttributes GetPropertyAttributeWithHandler(
- JSReceiver* receiver,
- Name* name);
- MUST_USE_RESULT PropertyAttributes GetElementAttributeWithHandler(
- JSReceiver* receiver,
+ static PropertyAttributes GetPropertyAttributesWithHandler(
+ Handle<JSProxy> proxy,
+ Handle<Object> receiver,
+ Handle<Name> name);
+ static PropertyAttributes GetElementAttributeWithHandler(
+ Handle<JSProxy> proxy,
+ Handle<JSReceiver> receiver,
uint32_t index);
// Turn the proxy into an (empty) JSObject.
@@ -9480,10 +9994,12 @@ class JSProxy: public JSReceiver {
// Invoke a trap by name. If the trap does not exist on this's handler,
// but derived_trap is non-NULL, invoke that instead. May cause GC.
- Handle<Object> CallTrap(const char* name,
- Handle<Object> derived_trap,
- int argc,
- Handle<Object> args[]);
+ MUST_USE_RESULT static MaybeHandle<Object> CallTrap(
+ Handle<JSProxy> proxy,
+ const char* name,
+ Handle<Object> derived_trap,
+ int argc,
+ Handle<Object> args[]);
// Dispatched behavior.
DECLARE_PRINTER(JSProxy)
@@ -9499,7 +10015,7 @@ class JSProxy: public JSReceiver {
static const int kHeaderSize = kPaddingOffset;
static const int kPaddingSize = kSize - kPaddingOffset;
- STATIC_CHECK(kPaddingSize >= 0);
+ STATIC_ASSERT(kPaddingSize >= 0);
typedef FixedBodyDescriptor<kHandlerOffset,
kPaddingOffset,
@@ -9508,31 +10024,36 @@ class JSProxy: public JSReceiver {
private:
friend class JSReceiver;
- static Handle<Object> SetPropertyWithHandler(Handle<JSProxy> proxy,
- Handle<JSReceiver> receiver,
- Handle<Name> name,
- Handle<Object> value,
- PropertyAttributes attributes,
- StrictModeFlag strict_mode);
- static Handle<Object> SetElementWithHandler(Handle<JSProxy> proxy,
- Handle<JSReceiver> receiver,
- uint32_t index,
- Handle<Object> value,
- StrictModeFlag strict_mode);
+ MUST_USE_RESULT static MaybeHandle<Object> SetPropertyWithHandler(
+ Handle<JSProxy> proxy,
+ Handle<JSReceiver> receiver,
+ Handle<Name> name,
+ Handle<Object> value,
+ PropertyAttributes attributes,
+ StrictMode strict_mode);
+ MUST_USE_RESULT static inline MaybeHandle<Object> SetElementWithHandler(
+ Handle<JSProxy> proxy,
+ Handle<JSReceiver> receiver,
+ uint32_t index,
+ Handle<Object> value,
+ StrictMode strict_mode);
static bool HasPropertyWithHandler(Handle<JSProxy> proxy, Handle<Name> name);
- static bool HasElementWithHandler(Handle<JSProxy> proxy, uint32_t index);
+ static inline bool HasElementWithHandler(Handle<JSProxy> proxy,
+ uint32_t index);
- static Handle<Object> DeletePropertyWithHandler(Handle<JSProxy> proxy,
- Handle<Name> name,
- DeleteMode mode);
- static Handle<Object> DeleteElementWithHandler(Handle<JSProxy> proxy,
- uint32_t index,
- DeleteMode mode);
+ MUST_USE_RESULT static MaybeHandle<Object> DeletePropertyWithHandler(
+ Handle<JSProxy> proxy,
+ Handle<Name> name,
+ DeleteMode mode);
+ MUST_USE_RESULT static MaybeHandle<Object> DeleteElementWithHandler(
+ Handle<JSProxy> proxy,
+ uint32_t index,
+ DeleteMode mode);
MUST_USE_RESULT Object* GetIdentityHash();
- static Handle<Object> GetOrCreateIdentityHash(Handle<JSProxy> proxy);
+ static Handle<Smi> GetOrCreateIdentityHash(Handle<JSProxy> proxy);
DISALLOW_IMPLICIT_CONSTRUCTORS(JSProxy);
};
@@ -9560,7 +10081,7 @@ class JSFunctionProxy: public JSProxy {
static const int kSize = JSFunction::kSize;
static const int kPaddingSize = kSize - kPaddingOffset;
- STATIC_CHECK(kPaddingSize >= 0);
+ STATIC_ASSERT(kPaddingSize >= 0);
typedef FixedBodyDescriptor<kHandlerOffset,
kConstructTrapOffset + kPointerSize,
@@ -9613,6 +10134,97 @@ class JSMap: public JSObject {
};
+// OrderedHashTableIterator is an iterator that iterates over the keys and
+// values of an OrderedHashTable.
+//
+// The iterator has a reference to the underlying OrderedHashTable data,
+// [table], as well as the current [index] the iterator is at.
+//
+// When the OrderedHashTable is rehashed it adds a reference from the old table
+// to the new table as well as storing enough data about the changes so that the
+// iterator [index] can be adjusted accordingly.
+//
+// When the [Next] result from the iterator is requested, the iterator checks if
+// there is a newer table that it needs to transition to.
+template<class Derived, class TableType>
+class OrderedHashTableIterator: public JSObject {
+ public:
+ // [table]: the backing hash table mapping keys to values.
+ DECL_ACCESSORS(table, Object)
+
+ // [index]: The index into the data table.
+ DECL_ACCESSORS(index, Smi)
+
+ // [kind]: The kind of iteration this is. One of the [Kind] enum values.
+ DECL_ACCESSORS(kind, Smi)
+
+#ifdef OBJECT_PRINT
+ void OrderedHashTableIteratorPrint(FILE* out);
+#endif
+
+ static const int kTableOffset = JSObject::kHeaderSize;
+ static const int kIndexOffset = kTableOffset + kPointerSize;
+ static const int kKindOffset = kIndexOffset + kPointerSize;
+ static const int kSize = kKindOffset + kPointerSize;
+
+ enum Kind {
+ kKindKeys = 1,
+ kKindValues = 2,
+ kKindEntries = 3
+ };
+
+ // Returns an iterator result object: {value: any, done: boolean} and moves
+ // the index to the next valid entry. Closes the iterator if moving past the
+ // end.
+ static Handle<JSObject> Next(Handle<Derived> iterator);
+
+ private:
+ // Transitions the iterator to the non obsolote backing store. This is a NOP
+ // if the [table] is not obsolete.
+ void Transition();
+
+ DISALLOW_IMPLICIT_CONSTRUCTORS(OrderedHashTableIterator);
+};
+
+
+class JSSetIterator: public OrderedHashTableIterator<JSSetIterator,
+ OrderedHashSet> {
+ public:
+ // Dispatched behavior.
+ DECLARE_PRINTER(JSSetIterator)
+ DECLARE_VERIFIER(JSSetIterator)
+
+ // Casting.
+ static inline JSSetIterator* cast(Object* obj);
+
+ static Handle<Object> ValueForKind(
+ Handle<JSSetIterator> iterator,
+ int entry_index);
+
+ private:
+ DISALLOW_IMPLICIT_CONSTRUCTORS(JSSetIterator);
+};
+
+
+class JSMapIterator: public OrderedHashTableIterator<JSMapIterator,
+ OrderedHashMap> {
+ public:
+ // Dispatched behavior.
+ DECLARE_PRINTER(JSMapIterator)
+ DECLARE_VERIFIER(JSMapIterator)
+
+ // Casting.
+ static inline JSMapIterator* cast(Object* obj);
+
+ static Handle<Object> ValueForKind(
+ Handle<JSMapIterator> iterator,
+ int entry_index);
+
+ private:
+ DISALLOW_IMPLICIT_CONSTRUCTORS(JSMapIterator);
+};
+
+
// Base class for both JSWeakMap and JSWeakSet
class JSWeakCollection: public JSObject {
public:
@@ -9760,6 +10372,8 @@ class JSTypedArray: public JSArrayBufferView {
ExternalArrayType type();
size_t element_size();
+ Handle<JSArrayBuffer> GetBuffer();
+
// Dispatched behavior.
DECLARE_PRINTER(JSTypedArray)
DECLARE_VERIFIER(JSTypedArray)
@@ -9771,6 +10385,9 @@ class JSTypedArray: public JSArrayBufferView {
kSize + v8::ArrayBufferView::kInternalFieldCount * kPointerSize;
private:
+ static Handle<JSArrayBuffer> MaterializeArrayBuffer(
+ Handle<JSTypedArray> typed_array);
+
DISALLOW_IMPLICIT_CONSTRUCTORS(JSTypedArray);
};
@@ -9824,7 +10441,7 @@ class Foreign: public HeapObject {
static const int kForeignAddressOffset = HeapObject::kHeaderSize;
static const int kSize = kForeignAddressOffset + kPointerSize;
- STATIC_CHECK(kForeignAddressOffset == Internals::kForeignAddressOffset);
+ STATIC_ASSERT(kForeignAddressOffset == Internals::kForeignAddressOffset);
private:
DISALLOW_IMPLICIT_CONSTRUCTORS(Foreign);
@@ -9849,28 +10466,38 @@ class JSArray: public JSObject {
uint32_t index,
Handle<Object> value);
- MUST_USE_RESULT MaybeObject* JSArrayUpdateLengthFromIndex(uint32_t index,
- Object* value);
+ static bool IsReadOnlyLengthDescriptor(Handle<Map> jsarray_map);
+ static bool WouldChangeReadOnlyLength(Handle<JSArray> array, uint32_t index);
+ static MaybeHandle<Object> ReadOnlyLengthError(Handle<JSArray> array);
// Initialize the array with the given capacity. The function may
// fail due to out-of-memory situations, but only if the requested
// capacity is non-zero.
- MUST_USE_RESULT MaybeObject* Initialize(int capacity, int length = 0);
+ static void Initialize(Handle<JSArray> array, int capacity, int length = 0);
// Initializes the array to a certain length.
inline bool AllowsSetElementsLength();
// Can cause GC.
- MUST_USE_RESULT MaybeObject* SetElementsLength(Object* length);
+ MUST_USE_RESULT static MaybeHandle<Object> SetElementsLength(
+ Handle<JSArray> array,
+ Handle<Object> length);
// Set the content of the array to the content of storage.
- MUST_USE_RESULT inline MaybeObject* SetContent(FixedArrayBase* storage);
+ static inline void SetContent(Handle<JSArray> array,
+ Handle<FixedArrayBase> storage);
// Casting.
static inline JSArray* cast(Object* obj);
- // Uses handles. Ensures that the fixed array backing the JSArray has at
+ // Ensures that the fixed array backing the JSArray has at
// least the stated size.
- inline void EnsureSize(int minimum_size_of_backing_fixed_array);
+ static inline void EnsureSize(Handle<JSArray> array,
+ int minimum_size_of_backing_fixed_array);
+
+ // Expand the fixed array backing of a fast-case JSArray to at least
+ // the requested size.
+ static void Expand(Handle<JSArray> array,
+ int minimum_size_of_backing_fixed_array);
// Dispatched behavior.
DECLARE_PRINTER(JSArray)
@@ -9884,10 +10511,6 @@ class JSArray: public JSObject {
static const int kSize = kLengthOffset + kPointerSize;
private:
- // Expand the fixed array backing of a fast-case JSArray to at least
- // the requested size.
- void Expand(int minimum_size_of_backing_fixed_array);
-
DISALLOW_IMPLICIT_CONSTRUCTORS(JSArray);
};
@@ -9928,9 +10551,6 @@ class AccessorInfo: public Struct {
inline bool all_can_write();
inline void set_all_can_write(bool value);
- inline bool prohibits_overwriting();
- inline void set_prohibits_overwriting(bool value);
-
inline PropertyAttributes property_attributes();
inline void set_property_attributes(PropertyAttributes attributes);
@@ -9957,8 +10577,7 @@ class AccessorInfo: public Struct {
// Bit positions in flag.
static const int kAllCanReadBit = 0;
static const int kAllCanWriteBit = 1;
- static const int kProhibitsOverwritingBit = 2;
- class AttributesField: public BitField<PropertyAttributes, 3, 3> {};
+ class AttributesField: public BitField<PropertyAttributes, 2, 3> {};
DISALLOW_IMPLICIT_CONSTRUCTORS(AccessorInfo);
};
@@ -10080,7 +10699,7 @@ class DeclaredAccessorInfo: public AccessorInfo {
// the request is ignored.
//
// If the accessor in the prototype has the READ_ONLY property attribute, then
-// a new value is added to the local object when the property is set.
+// a new value is added to the derived object when the property is set.
// This shadows the accessor in the prototype.
class ExecutableAccessorInfo: public AccessorInfo {
public:
@@ -10099,6 +10718,8 @@ class ExecutableAccessorInfo: public AccessorInfo {
static const int kDataOffset = kSetterOffset + kPointerSize;
static const int kSize = kDataOffset + kPointerSize;
+ inline void clear_setter();
+
private:
DISALLOW_IMPLICIT_CONSTRUCTORS(ExecutableAccessorInfo);
};
@@ -10121,7 +10742,6 @@ class AccessorPair: public Struct {
inline void set_access_flags(v8::AccessControl access_control);
inline bool all_can_read();
inline bool all_can_write();
- inline bool prohibits_overwriting();
static inline AccessorPair* cast(Object* obj);
@@ -10164,7 +10784,6 @@ class AccessorPair: public Struct {
private:
static const int kAllCanReadBit = 0;
static const int kAllCanWriteBit = 1;
- static const int kProhibitsOverwritingBit = 2;
// Strangely enough, in addition to functions and harmony proxies, the spec
// requires us to consider undefined as a kind of accessor, too:
@@ -10393,7 +11012,6 @@ class TypeSwitchInfo: public Struct {
};
-#ifdef ENABLE_DEBUGGER_SUPPORT
// The DebugInfo class holds additional information for a function being
// debugged.
class DebugInfo: public Struct {
@@ -10443,6 +11061,8 @@ class DebugInfo: public Struct {
kActiveBreakPointsCountIndex + kPointerSize;
static const int kSize = kBreakPointsStateIndex + kPointerSize;
+ static const int kEstimatedNofBreakPointsInFunction = 16;
+
private:
static const int kNoBreakPointInfo = -1;
@@ -10497,7 +11117,6 @@ class BreakPointInfo: public Struct {
private:
DISALLOW_IMPLICIT_CONSTRUCTORS(BreakPointInfo);
};
-#endif // ENABLE_DEBUGGER_SUPPORT
#undef DECL_BOOLEAN_ACCESSORS
@@ -10508,6 +11127,7 @@ class BreakPointInfo: public Struct {
V(kStringTable, "string_table", "(Internalized strings)") \
V(kExternalStringsTable, "external_strings_table", "(External strings)") \
V(kStrongRootList, "strong_root_list", "(Strong roots)") \
+ V(kSmiRootList, "smi_root_list", "(Smi roots)") \
V(kInternalizedString, "internalized_string", "(Internal string)") \
V(kBootstrapper, "bootstrapper", "(Bootstrapper)") \
V(kTop, "top", "(Isolate)") \
@@ -10547,6 +11167,9 @@ class ObjectVisitor BASE_EMBEDDED {
// Handy shorthand for visiting a single pointer.
virtual void VisitPointer(Object** p) { VisitPointers(p, p + 1); }
+ // Visit weak next_code_link in Code object.
+ virtual void VisitNextCodeLink(Object** p) { VisitPointers(p, p + 1); }
+
// To allow lazy clearing of inline caches the visitor has
// a rich interface for iterating over Code objects..
diff --git a/chromium/v8/src/once.cc b/chromium/v8/src/once.cc
deleted file mode 100644
index 37fe369fb67..00000000000
--- a/chromium/v8/src/once.cc
+++ /dev/null
@@ -1,77 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "once.h"
-
-#ifdef _WIN32
-#include <windows.h>
-#else
-#include <sched.h>
-#endif
-
-#include "atomicops.h"
-#include "checks.h"
-
-namespace v8 {
-namespace internal {
-
-void CallOnceImpl(OnceType* once, PointerArgFunction init_func, void* arg) {
- AtomicWord state = Acquire_Load(once);
- // Fast path. The provided function was already executed.
- if (state == ONCE_STATE_DONE) {
- return;
- }
-
- // The function execution did not complete yet. The once object can be in one
- // of the two following states:
- // - UNINITIALIZED: We are the first thread calling this function.
- // - EXECUTING_FUNCTION: Another thread is already executing the function.
- //
- // First, try to change the state from UNINITIALIZED to EXECUTING_FUNCTION
- // atomically.
- state = Acquire_CompareAndSwap(
- once, ONCE_STATE_UNINITIALIZED, ONCE_STATE_EXECUTING_FUNCTION);
- if (state == ONCE_STATE_UNINITIALIZED) {
- // We are the first thread to call this function, so we have to call the
- // function.
- init_func(arg);
- Release_Store(once, ONCE_STATE_DONE);
- } else {
- // Another thread has already started executing the function. We need to
- // wait until it completes the initialization.
- while (state == ONCE_STATE_EXECUTING_FUNCTION) {
-#ifdef _WIN32
- ::Sleep(0);
-#else
- sched_yield();
-#endif
- state = Acquire_Load(once);
- }
- }
-}
-
-} } // namespace v8::internal
diff --git a/chromium/v8/src/optimizing-compiler-thread.cc b/chromium/v8/src/optimizing-compiler-thread.cc
index 32a7f971401..987bac2768f 100644
--- a/chromium/v8/src/optimizing-compiler-thread.cc
+++ b/chromium/v8/src/optimizing-compiler-thread.cc
@@ -1,38 +1,16 @@
// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "optimizing-compiler-thread.h"
-
-#include "v8.h"
-
-#include "full-codegen.h"
-#include "hydrogen.h"
-#include "isolate.h"
-#include "v8threads.h"
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/optimizing-compiler-thread.h"
+
+#include "src/v8.h"
+
+#include "src/base/atomicops.h"
+#include "src/full-codegen.h"
+#include "src/hydrogen.h"
+#include "src/isolate.h"
+#include "src/v8threads.h"
namespace v8 {
namespace internal {
@@ -74,7 +52,7 @@ void OptimizingCompilerThread::Run() {
OS::Sleep(FLAG_concurrent_recompilation_delay);
}
- switch (static_cast<StopFlag>(Acquire_Load(&stop_thread_))) {
+ switch (static_cast<StopFlag>(base::Acquire_Load(&stop_thread_))) {
case CONTINUE:
break;
case STOP:
@@ -88,7 +66,8 @@ void OptimizingCompilerThread::Run() {
{ AllowHandleDereference allow_handle_dereference;
FlushInputQueue(true);
}
- Release_Store(&stop_thread_, static_cast<AtomicWord>(CONTINUE));
+ base::Release_Store(&stop_thread_,
+ static_cast<base::AtomicWord>(CONTINUE));
stop_semaphore_.Signal();
// Return to start of consumer loop.
continue;
@@ -106,10 +85,10 @@ void OptimizingCompilerThread::Run() {
}
-RecompileJob* OptimizingCompilerThread::NextInput() {
+OptimizedCompileJob* OptimizingCompilerThread::NextInput() {
LockGuard<Mutex> access_input_queue_(&input_queue_mutex_);
if (input_queue_length_ == 0) return NULL;
- RecompileJob* job = input_queue_[InputQueueIndex(0)];
+ OptimizedCompileJob* job = input_queue_[InputQueueIndex(0)];
ASSERT_NE(NULL, job);
input_queue_shift_ = InputQueueIndex(1);
input_queue_length_--;
@@ -118,13 +97,13 @@ RecompileJob* OptimizingCompilerThread::NextInput() {
void OptimizingCompilerThread::CompileNext() {
- RecompileJob* job = NextInput();
+ OptimizedCompileJob* job = NextInput();
ASSERT_NE(NULL, job);
// The function may have already been optimized by OSR. Simply continue.
- RecompileJob::Status status = job->OptimizeGraph();
+ OptimizedCompileJob::Status status = job->OptimizeGraph();
USE(status); // Prevent an unused-variable error in release mode.
- ASSERT(status != RecompileJob::FAILED);
+ ASSERT(status != OptimizedCompileJob::FAILED);
// The function may have already been optimized by OSR. Simply continue.
// Use a mutex to make sure that functions marked for install
@@ -134,13 +113,18 @@ void OptimizingCompilerThread::CompileNext() {
}
-static void DisposeRecompileJob(RecompileJob* job,
- bool restore_function_code) {
+static void DisposeOptimizedCompileJob(OptimizedCompileJob* job,
+ bool restore_function_code) {
// The recompile job is allocated in the CompilationInfo's zone.
CompilationInfo* info = job->info();
if (restore_function_code) {
if (info->is_osr()) {
- if (!job->IsWaitingForInstall()) BackEdgeTable::RemoveStackCheck(info);
+ if (!job->IsWaitingForInstall()) {
+ // Remove stack check that guards OSR entry on original code.
+ Handle<Code> code = info->unoptimized_code();
+ uint32_t offset = code->TranslateAstIdToPcOffset(info->osr_ast_id());
+ BackEdgeTable::RemoveStackCheck(code, offset);
+ }
} else {
Handle<JSFunction> function = info->closure();
function->ReplaceCode(function->shared()->code());
@@ -151,25 +135,25 @@ static void DisposeRecompileJob(RecompileJob* job,
void OptimizingCompilerThread::FlushInputQueue(bool restore_function_code) {
- RecompileJob* job;
+ OptimizedCompileJob* job;
while ((job = NextInput())) {
// This should not block, since we have one signal on the input queue
// semaphore corresponding to each element in the input queue.
input_queue_semaphore_.Wait();
// OSR jobs are dealt with separately.
if (!job->info()->is_osr()) {
- DisposeRecompileJob(job, restore_function_code);
+ DisposeOptimizedCompileJob(job, restore_function_code);
}
}
}
void OptimizingCompilerThread::FlushOutputQueue(bool restore_function_code) {
- RecompileJob* job;
+ OptimizedCompileJob* job;
while (output_queue_.Dequeue(&job)) {
// OSR jobs are dealt with separately.
if (!job->info()->is_osr()) {
- DisposeRecompileJob(job, restore_function_code);
+ DisposeOptimizedCompileJob(job, restore_function_code);
}
}
}
@@ -178,7 +162,7 @@ void OptimizingCompilerThread::FlushOutputQueue(bool restore_function_code) {
void OptimizingCompilerThread::FlushOsrBuffer(bool restore_function_code) {
for (int i = 0; i < osr_buffer_capacity_; i++) {
if (osr_buffer_[i] != NULL) {
- DisposeRecompileJob(osr_buffer_[i], restore_function_code);
+ DisposeOptimizedCompileJob(osr_buffer_[i], restore_function_code);
osr_buffer_[i] = NULL;
}
}
@@ -187,7 +171,7 @@ void OptimizingCompilerThread::FlushOsrBuffer(bool restore_function_code) {
void OptimizingCompilerThread::Flush() {
ASSERT(!IsOptimizerThread());
- Release_Store(&stop_thread_, static_cast<AtomicWord>(FLUSH));
+ base::Release_Store(&stop_thread_, static_cast<base::AtomicWord>(FLUSH));
if (FLAG_block_concurrent_recompilation) Unblock();
input_queue_semaphore_.Signal();
stop_semaphore_.Wait();
@@ -201,7 +185,7 @@ void OptimizingCompilerThread::Flush() {
void OptimizingCompilerThread::Stop() {
ASSERT(!IsOptimizerThread());
- Release_Store(&stop_thread_, static_cast<AtomicWord>(STOP));
+ base::Release_Store(&stop_thread_, static_cast<base::AtomicWord>(STOP));
if (FLAG_block_concurrent_recompilation) Unblock();
input_queue_semaphore_.Signal();
stop_semaphore_.Wait();
@@ -236,9 +220,10 @@ void OptimizingCompilerThread::InstallOptimizedFunctions() {
ASSERT(!IsOptimizerThread());
HandleScope handle_scope(isolate_);
- RecompileJob* job;
+ OptimizedCompileJob* job;
while (output_queue_.Dequeue(&job)) {
CompilationInfo* info = job->info();
+ Handle<JSFunction> function(*info->closure());
if (info->is_osr()) {
if (FLAG_trace_osr) {
PrintF("[COSR - ");
@@ -247,26 +232,29 @@ void OptimizingCompilerThread::InstallOptimizedFunctions() {
info->osr_ast_id().ToInt());
}
job->WaitForInstall();
- BackEdgeTable::RemoveStackCheck(info);
+ // Remove stack check that guards OSR entry on original code.
+ Handle<Code> code = info->unoptimized_code();
+ uint32_t offset = code->TranslateAstIdToPcOffset(info->osr_ast_id());
+ BackEdgeTable::RemoveStackCheck(code, offset);
} else {
- Compiler::InstallOptimizedCode(job);
+ if (function->IsOptimized()) {
+ DisposeOptimizedCompileJob(job, false);
+ } else {
+ Handle<Code> code = Compiler::GetConcurrentlyOptimizedCode(job);
+ function->ReplaceCode(
+ code.is_null() ? function->shared()->code() : *code);
+ }
}
}
}
-void OptimizingCompilerThread::QueueForOptimization(RecompileJob* job) {
+void OptimizingCompilerThread::QueueForOptimization(OptimizedCompileJob* job) {
ASSERT(IsQueueAvailable());
ASSERT(!IsOptimizerThread());
CompilationInfo* info = job->info();
if (info->is_osr()) {
- if (FLAG_trace_concurrent_recompilation) {
- PrintF(" ** Queueing ");
- info->closure()->PrintName();
- PrintF(" for concurrent on-stack replacement.\n");
- }
osr_attempts_++;
- BackEdgeTable::AddStackCheck(info);
AddToOsrBuffer(job);
// Add job to the front of the input queue.
LockGuard<Mutex> access_input_queue(&input_queue_mutex_);
@@ -276,7 +264,6 @@ void OptimizingCompilerThread::QueueForOptimization(RecompileJob* job) {
input_queue_[InputQueueIndex(0)] = job;
input_queue_length_++;
} else {
- info->closure()->MarkInRecompileQueue();
// Add job to the back of the input queue.
LockGuard<Mutex> access_input_queue(&input_queue_mutex_);
ASSERT_LT(input_queue_length_, input_queue_capacity_);
@@ -300,14 +287,14 @@ void OptimizingCompilerThread::Unblock() {
}
-RecompileJob* OptimizingCompilerThread::FindReadyOSRCandidate(
- Handle<JSFunction> function, uint32_t osr_pc_offset) {
+OptimizedCompileJob* OptimizingCompilerThread::FindReadyOSRCandidate(
+ Handle<JSFunction> function, BailoutId osr_ast_id) {
ASSERT(!IsOptimizerThread());
for (int i = 0; i < osr_buffer_capacity_; i++) {
- RecompileJob* current = osr_buffer_[i];
+ OptimizedCompileJob* current = osr_buffer_[i];
if (current != NULL &&
current->IsWaitingForInstall() &&
- current->info()->HasSameOsrEntry(function, osr_pc_offset)) {
+ current->info()->HasSameOsrEntry(function, osr_ast_id)) {
osr_hits_++;
osr_buffer_[i] = NULL;
return current;
@@ -318,12 +305,12 @@ RecompileJob* OptimizingCompilerThread::FindReadyOSRCandidate(
bool OptimizingCompilerThread::IsQueuedForOSR(Handle<JSFunction> function,
- uint32_t osr_pc_offset) {
+ BailoutId osr_ast_id) {
ASSERT(!IsOptimizerThread());
for (int i = 0; i < osr_buffer_capacity_; i++) {
- RecompileJob* current = osr_buffer_[i];
+ OptimizedCompileJob* current = osr_buffer_[i];
if (current != NULL &&
- current->info()->HasSameOsrEntry(function, osr_pc_offset)) {
+ current->info()->HasSameOsrEntry(function, osr_ast_id)) {
return !current->IsWaitingForInstall();
}
}
@@ -334,7 +321,7 @@ bool OptimizingCompilerThread::IsQueuedForOSR(Handle<JSFunction> function,
bool OptimizingCompilerThread::IsQueuedForOSR(JSFunction* function) {
ASSERT(!IsOptimizerThread());
for (int i = 0; i < osr_buffer_capacity_; i++) {
- RecompileJob* current = osr_buffer_[i];
+ OptimizedCompileJob* current = osr_buffer_[i];
if (current != NULL && *current->info()->closure() == function) {
return !current->IsWaitingForInstall();
}
@@ -343,10 +330,10 @@ bool OptimizingCompilerThread::IsQueuedForOSR(JSFunction* function) {
}
-void OptimizingCompilerThread::AddToOsrBuffer(RecompileJob* job) {
+void OptimizingCompilerThread::AddToOsrBuffer(OptimizedCompileJob* job) {
ASSERT(!IsOptimizerThread());
// Find the next slot that is empty or has a stale job.
- RecompileJob* stale = NULL;
+ OptimizedCompileJob* stale = NULL;
while (true) {
stale = osr_buffer_[osr_buffer_cursor_];
if (stale == NULL || stale->IsWaitingForInstall()) break;
@@ -362,7 +349,7 @@ void OptimizingCompilerThread::AddToOsrBuffer(RecompileJob* job) {
info->closure()->PrintName();
PrintF(", AST id %d]\n", info->osr_ast_id().ToInt());
}
- DisposeRecompileJob(stale, false);
+ DisposeOptimizedCompileJob(stale, false);
}
osr_buffer_[osr_buffer_cursor_] = job;
osr_buffer_cursor_ = (osr_buffer_cursor_ + 1) % osr_buffer_capacity_;
diff --git a/chromium/v8/src/optimizing-compiler-thread.h b/chromium/v8/src/optimizing-compiler-thread.h
index 795fa65588a..a6bcbedbd3e 100644
--- a/chromium/v8/src/optimizing-compiler-thread.h
+++ b/chromium/v8/src/optimizing-compiler-thread.h
@@ -1,46 +1,23 @@
// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
#ifndef V8_OPTIMIZING_COMPILER_THREAD_H_
#define V8_OPTIMIZING_COMPILER_THREAD_H_
-#include "atomicops.h"
-#include "flags.h"
-#include "list.h"
-#include "platform.h"
-#include "platform/mutex.h"
-#include "platform/time.h"
-#include "unbound-queue-inl.h"
+#include "src/base/atomicops.h"
+#include "src/flags.h"
+#include "src/list.h"
+#include "src/platform.h"
+#include "src/platform/mutex.h"
+#include "src/platform/time.h"
+#include "src/unbound-queue-inl.h"
namespace v8 {
namespace internal {
class HOptimizedGraphBuilder;
-class RecompileJob;
+class OptimizedCompileJob;
class SharedFunctionInfo;
class OptimizingCompilerThread : public Thread {
@@ -61,11 +38,12 @@ class OptimizingCompilerThread : public Thread {
osr_hits_(0),
osr_attempts_(0),
blocked_jobs_(0) {
- NoBarrier_Store(&stop_thread_, static_cast<AtomicWord>(CONTINUE));
- input_queue_ = NewArray<RecompileJob*>(input_queue_capacity_);
+ base::NoBarrier_Store(&stop_thread_,
+ static_cast<base::AtomicWord>(CONTINUE));
+ input_queue_ = NewArray<OptimizedCompileJob*>(input_queue_capacity_);
if (FLAG_concurrent_osr) {
// Allocate and mark OSR buffer slots as empty.
- osr_buffer_ = NewArray<RecompileJob*>(osr_buffer_capacity_);
+ osr_buffer_ = NewArray<OptimizedCompileJob*>(osr_buffer_capacity_);
for (int i = 0; i < osr_buffer_capacity_; i++) osr_buffer_[i] = NULL;
}
}
@@ -75,12 +53,12 @@ class OptimizingCompilerThread : public Thread {
void Run();
void Stop();
void Flush();
- void QueueForOptimization(RecompileJob* optimizing_compiler);
+ void QueueForOptimization(OptimizedCompileJob* optimizing_compiler);
void Unblock();
void InstallOptimizedFunctions();
- RecompileJob* FindReadyOSRCandidate(Handle<JSFunction> function,
- uint32_t osr_pc_offset);
- bool IsQueuedForOSR(Handle<JSFunction> function, uint32_t osr_pc_offset);
+ OptimizedCompileJob* FindReadyOSRCandidate(Handle<JSFunction> function,
+ BailoutId osr_ast_id);
+ bool IsQueuedForOSR(Handle<JSFunction> function, BailoutId osr_ast_id);
bool IsQueuedForOSR(JSFunction* function);
@@ -112,11 +90,11 @@ class OptimizingCompilerThread : public Thread {
void FlushOutputQueue(bool restore_function_code);
void FlushOsrBuffer(bool restore_function_code);
void CompileNext();
- RecompileJob* NextInput();
+ OptimizedCompileJob* NextInput();
// Add a recompilation task for OSR to the cyclic buffer, awaiting OSR entry.
// Tasks evicted from the cyclic buffer are discarded.
- void AddToOsrBuffer(RecompileJob* compiler);
+ void AddToOsrBuffer(OptimizedCompileJob* compiler);
inline int InputQueueIndex(int i) {
int result = (i + input_queue_shift_) % input_queue_capacity_;
@@ -135,21 +113,21 @@ class OptimizingCompilerThread : public Thread {
Semaphore input_queue_semaphore_;
// Circular queue of incoming recompilation tasks (including OSR).
- RecompileJob** input_queue_;
+ OptimizedCompileJob** input_queue_;
int input_queue_capacity_;
int input_queue_length_;
int input_queue_shift_;
Mutex input_queue_mutex_;
// Queue of recompilation tasks ready to be installed (excluding OSR).
- UnboundQueue<RecompileJob*> output_queue_;
+ UnboundQueue<OptimizedCompileJob*> output_queue_;
// Cyclic buffer of recompilation tasks for OSR.
- RecompileJob** osr_buffer_;
+ OptimizedCompileJob** osr_buffer_;
int osr_buffer_capacity_;
int osr_buffer_cursor_;
- volatile AtomicWord stop_thread_;
+ volatile base::AtomicWord stop_thread_;
TimeDelta time_spent_compiling_;
TimeDelta time_spent_total_;
diff --git a/chromium/v8/src/parser.cc b/chromium/v8/src/parser.cc
index b1689191ad3..fd0dd2913ea 100644
--- a/chromium/v8/src/parser.cc
+++ b/chromium/v8/src/parser.cc
@@ -1,94 +1,27 @@
// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#include "api.h"
-#include "ast.h"
-#include "bootstrapper.h"
-#include "char-predicates-inl.h"
-#include "codegen.h"
-#include "compiler.h"
-#include "func-name-inferrer.h"
-#include "messages.h"
-#include "parser.h"
-#include "platform.h"
-#include "preparser.h"
-#include "runtime.h"
-#include "scanner-character-streams.h"
-#include "scopeinfo.h"
-#include "string-stream.h"
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#include "src/api.h"
+#include "src/ast.h"
+#include "src/bootstrapper.h"
+#include "src/char-predicates-inl.h"
+#include "src/codegen.h"
+#include "src/compiler.h"
+#include "src/messages.h"
+#include "src/parser.h"
+#include "src/platform.h"
+#include "src/preparser.h"
+#include "src/runtime.h"
+#include "src/scanner-character-streams.h"
+#include "src/scopeinfo.h"
+#include "src/string-stream.h"
namespace v8 {
namespace internal {
-// PositionStack is used for on-stack allocation of token positions for
-// new expressions. Please look at ParseNewExpression.
-
-class PositionStack {
- public:
- explicit PositionStack(bool* ok) : top_(NULL), ok_(ok) {}
- ~PositionStack() {
- ASSERT(!*ok_ || is_empty());
- USE(ok_);
- }
-
- class Element {
- public:
- Element(PositionStack* stack, int value) {
- previous_ = stack->top();
- value_ = value;
- stack->set_top(this);
- }
-
- private:
- Element* previous() { return previous_; }
- int value() { return value_; }
- friend class PositionStack;
- Element* previous_;
- int value_;
- };
-
- bool is_empty() { return top_ == NULL; }
- int pop() {
- ASSERT(!is_empty());
- int result = top_->value();
- top_ = top_->previous();
- return result;
- }
-
- private:
- Element* top() { return top_; }
- void set_top(Element* value) { top_ = value; }
- Element* top_;
- bool* ok_;
-};
-
-
RegExpBuilder::RegExpBuilder(Zone* zone)
: zone_(zone),
pending_empty_(false),
@@ -249,50 +182,31 @@ void RegExpBuilder::AddQuantifierToAtom(
}
-Handle<String> Parser::LookupSymbol(int symbol_id) {
- // Length of symbol cache is the number of identified symbols.
- // If we are larger than that, or negative, it's not a cached symbol.
- // This might also happen if there is no preparser symbol data, even
- // if there is some preparser data.
- if (static_cast<unsigned>(symbol_id)
- >= static_cast<unsigned>(symbol_cache_.length())) {
- if (scanner().is_literal_ascii()) {
- return isolate()->factory()->InternalizeOneByteString(
- Vector<const uint8_t>::cast(scanner().literal_ascii_string()));
- } else {
- return isolate()->factory()->InternalizeTwoByteString(
- scanner().literal_utf16_string());
- }
+ScriptData* ScriptData::New(const char* data, int length) {
+ // The length is obviously invalid.
+ if (length % sizeof(unsigned) != 0) {
+ return NULL;
}
- return LookupCachedSymbol(symbol_id);
-}
-
-Handle<String> Parser::LookupCachedSymbol(int symbol_id) {
- // Make sure the cache is large enough to hold the symbol identifier.
- if (symbol_cache_.length() <= symbol_id) {
- // Increase length to index + 1.
- symbol_cache_.AddBlock(Handle<String>::null(),
- symbol_id + 1 - symbol_cache_.length(), zone());
- }
- Handle<String> result = symbol_cache_.at(symbol_id);
- if (result.is_null()) {
- if (scanner().is_literal_ascii()) {
- result = isolate()->factory()->InternalizeOneByteString(
- Vector<const uint8_t>::cast(scanner().literal_ascii_string()));
- } else {
- result = isolate()->factory()->InternalizeTwoByteString(
- scanner().literal_utf16_string());
- }
- symbol_cache_.at(symbol_id) = result;
- return result;
+ int deserialized_data_length = length / sizeof(unsigned);
+ unsigned* deserialized_data;
+ bool owns_store = reinterpret_cast<intptr_t>(data) % sizeof(unsigned) != 0;
+ if (owns_store) {
+ // Copy the data to align it.
+ deserialized_data = i::NewArray<unsigned>(deserialized_data_length);
+ i::CopyBytes(reinterpret_cast<char*>(deserialized_data),
+ data, static_cast<size_t>(length));
+ } else {
+ // If aligned, don't create a copy of the data.
+ deserialized_data = reinterpret_cast<unsigned*>(const_cast<char*>(data));
}
- isolate()->counters()->total_preparse_symbols_skipped()->Increment();
- return result;
+ return new ScriptData(
+ Vector<unsigned>(deserialized_data, deserialized_data_length),
+ owns_store);
}
-FunctionEntry ScriptDataImpl::GetFunctionEntry(int start) {
+FunctionEntry ScriptData::GetFunctionEntry(int start) {
// The current pre-data entry must be a FunctionEntry with the given
// start position.
if ((function_index_ + FunctionEntry::kSize <= store_.length())
@@ -306,12 +220,12 @@ FunctionEntry ScriptDataImpl::GetFunctionEntry(int start) {
}
-int ScriptDataImpl::GetSymbolIdentifier() {
+int ScriptData::GetSymbolIdentifier() {
return ReadNumber(&symbol_data_);
}
-bool ScriptDataImpl::SanityCheck() {
+bool ScriptData::SanityCheck() {
// Check that the header data is valid and doesn't specify
// point to positions outside the store.
if (store_.length() < PreparseDataConstants::kHeaderSize) return false;
@@ -347,10 +261,6 @@ bool ScriptDataImpl::SanityCheck() {
static_cast<int>(store_[PreparseDataConstants::kFunctionsSizeOffset]);
if (functions_size < 0) return false;
if (functions_size % FunctionEntry::kSize != 0) return false;
- // Check that the count of symbols is non-negative.
- int symbol_count =
- static_cast<int>(store_[PreparseDataConstants::kSymbolCountOffset]);
- if (symbol_count < 0) return false;
// Check that the total size has room for header and function entries.
int minimum_size =
PreparseDataConstants::kHeaderSize + functions_size;
@@ -360,7 +270,7 @@ bool ScriptDataImpl::SanityCheck() {
-const char* ScriptDataImpl::ReadString(unsigned* start, int* chars) {
+const char* ScriptData::ReadString(unsigned* start, int* chars) {
int length = start[0];
char* result = NewArray<char>(length + 1);
for (int i = 0; i < length; i++) {
@@ -372,41 +282,45 @@ const char* ScriptDataImpl::ReadString(unsigned* start, int* chars) {
}
-Scanner::Location ScriptDataImpl::MessageLocation() {
+Scanner::Location ScriptData::MessageLocation() const {
int beg_pos = Read(PreparseDataConstants::kMessageStartPos);
int end_pos = Read(PreparseDataConstants::kMessageEndPos);
return Scanner::Location(beg_pos, end_pos);
}
-const char* ScriptDataImpl::BuildMessage() {
+bool ScriptData::IsReferenceError() const {
+ return Read(PreparseDataConstants::kIsReferenceErrorPos);
+}
+
+
+const char* ScriptData::BuildMessage() const {
unsigned* start = ReadAddress(PreparseDataConstants::kMessageTextPos);
return ReadString(start, NULL);
}
-Vector<const char*> ScriptDataImpl::BuildArgs() {
+const char* ScriptData::BuildArg() const {
int arg_count = Read(PreparseDataConstants::kMessageArgCountPos);
- const char** array = NewArray<const char*>(arg_count);
+ ASSERT(arg_count == 0 || arg_count == 1);
+ if (arg_count == 0) {
+ return NULL;
+ }
// Position after text found by skipping past length field and
// length field content words.
int pos = PreparseDataConstants::kMessageTextPos + 1
+ Read(PreparseDataConstants::kMessageTextPos);
- for (int i = 0; i < arg_count; i++) {
- int count = 0;
- array[i] = ReadString(ReadAddress(pos), &count);
- pos += count + 1;
- }
- return Vector<const char*>(array, arg_count);
+ int count = 0;
+ return ReadString(ReadAddress(pos), &count);
}
-unsigned ScriptDataImpl::Read(int position) {
+unsigned ScriptData::Read(int position) const {
return store_[PreparseDataConstants::kHeaderSize + position];
}
-unsigned* ScriptDataImpl::ReadAddress(int position) {
+unsigned* ScriptData::ReadAddress(int position) const {
return &store_[PreparseDataConstants::kHeaderSize + position];
}
@@ -463,56 +377,6 @@ class TargetScope BASE_EMBEDDED {
// ----------------------------------------------------------------------------
-// FunctionState and BlockState together implement the parser's scope stack.
-// The parser's current scope is in top_scope_. The BlockState and
-// FunctionState constructors push on the scope stack and the destructors
-// pop. They are also used to hold the parser's per-function and per-block
-// state.
-
-class Parser::BlockState BASE_EMBEDDED {
- public:
- BlockState(Parser* parser, Scope* scope)
- : parser_(parser),
- outer_scope_(parser->top_scope_) {
- parser->top_scope_ = scope;
- }
-
- ~BlockState() { parser_->top_scope_ = outer_scope_; }
-
- private:
- Parser* parser_;
- Scope* outer_scope_;
-};
-
-
-Parser::FunctionState::FunctionState(Parser* parser,
- Scope* scope,
- Isolate* isolate)
- : next_materialized_literal_index_(JSFunction::kLiteralsPrefixSize),
- next_handler_index_(0),
- expected_property_count_(0),
- generator_object_variable_(NULL),
- parser_(parser),
- outer_function_state_(parser->current_function_state_),
- outer_scope_(parser->top_scope_),
- saved_ast_node_id_(isolate->ast_node_id()),
- factory_(isolate, parser->zone()) {
- parser->top_scope_ = scope;
- parser->current_function_state_ = this;
- isolate->set_ast_node_id(BailoutId::FirstUsable().ToInt());
-}
-
-
-Parser::FunctionState::~FunctionState() {
- parser_->top_scope_ = outer_scope_;
- parser_->current_function_state_ = outer_function_state_;
- if (outer_function_state_ != NULL) {
- parser_->isolate()->set_ast_node_id(saved_ast_node_id_);
- }
-}
-
-
-// ----------------------------------------------------------------------------
// The CHECK_OK macro is a convenient macro to enforce error
// handling for functions that may fail (by returning !*ok).
//
@@ -535,23 +399,387 @@ Parser::FunctionState::~FunctionState() {
// ----------------------------------------------------------------------------
// Implementation of Parser
+bool ParserTraits::IsEvalOrArguments(Handle<String> identifier) const {
+ Factory* factory = parser_->isolate()->factory();
+ return identifier.is_identical_to(factory->eval_string())
+ || identifier.is_identical_to(factory->arguments_string());
+}
+
+
+bool ParserTraits::IsThisProperty(Expression* expression) {
+ ASSERT(expression != NULL);
+ Property* property = expression->AsProperty();
+ return property != NULL &&
+ property->obj()->AsVariableProxy() != NULL &&
+ property->obj()->AsVariableProxy()->is_this();
+}
+
+
+bool ParserTraits::IsIdentifier(Expression* expression) {
+ VariableProxy* operand = expression->AsVariableProxy();
+ return operand != NULL && !operand->is_this();
+}
+
+
+void ParserTraits::PushPropertyName(FuncNameInferrer* fni,
+ Expression* expression) {
+ if (expression->IsPropertyName()) {
+ fni->PushLiteralName(expression->AsLiteral()->AsPropertyName());
+ } else {
+ fni->PushLiteralName(
+ parser_->isolate()->factory()->anonymous_function_string());
+ }
+}
+
+
+void ParserTraits::CheckAssigningFunctionLiteralToProperty(Expression* left,
+ Expression* right) {
+ ASSERT(left != NULL);
+ if (left->AsProperty() != NULL &&
+ right->AsFunctionLiteral() != NULL) {
+ right->AsFunctionLiteral()->set_pretenure();
+ }
+}
+
+
+void ParserTraits::CheckPossibleEvalCall(Expression* expression,
+ Scope* scope) {
+ VariableProxy* callee = expression->AsVariableProxy();
+ if (callee != NULL &&
+ callee->IsVariable(parser_->isolate()->factory()->eval_string())) {
+ scope->DeclarationScope()->RecordEvalCall();
+ }
+}
+
+
+Expression* ParserTraits::MarkExpressionAsLValue(Expression* expression) {
+ VariableProxy* proxy = expression != NULL
+ ? expression->AsVariableProxy()
+ : NULL;
+ if (proxy != NULL) proxy->MarkAsLValue();
+ return expression;
+}
+
+
+bool ParserTraits::ShortcutNumericLiteralBinaryExpression(
+ Expression** x, Expression* y, Token::Value op, int pos,
+ AstNodeFactory<AstConstructionVisitor>* factory) {
+ if ((*x)->AsLiteral() && (*x)->AsLiteral()->value()->IsNumber() &&
+ y->AsLiteral() && y->AsLiteral()->value()->IsNumber()) {
+ double x_val = (*x)->AsLiteral()->value()->Number();
+ double y_val = y->AsLiteral()->value()->Number();
+ switch (op) {
+ case Token::ADD:
+ *x = factory->NewNumberLiteral(x_val + y_val, pos);
+ return true;
+ case Token::SUB:
+ *x = factory->NewNumberLiteral(x_val - y_val, pos);
+ return true;
+ case Token::MUL:
+ *x = factory->NewNumberLiteral(x_val * y_val, pos);
+ return true;
+ case Token::DIV:
+ *x = factory->NewNumberLiteral(x_val / y_val, pos);
+ return true;
+ case Token::BIT_OR: {
+ int value = DoubleToInt32(x_val) | DoubleToInt32(y_val);
+ *x = factory->NewNumberLiteral(value, pos);
+ return true;
+ }
+ case Token::BIT_AND: {
+ int value = DoubleToInt32(x_val) & DoubleToInt32(y_val);
+ *x = factory->NewNumberLiteral(value, pos);
+ return true;
+ }
+ case Token::BIT_XOR: {
+ int value = DoubleToInt32(x_val) ^ DoubleToInt32(y_val);
+ *x = factory->NewNumberLiteral(value, pos);
+ return true;
+ }
+ case Token::SHL: {
+ int value = DoubleToInt32(x_val) << (DoubleToInt32(y_val) & 0x1f);
+ *x = factory->NewNumberLiteral(value, pos);
+ return true;
+ }
+ case Token::SHR: {
+ uint32_t shift = DoubleToInt32(y_val) & 0x1f;
+ uint32_t value = DoubleToUint32(x_val) >> shift;
+ *x = factory->NewNumberLiteral(value, pos);
+ return true;
+ }
+ case Token::SAR: {
+ uint32_t shift = DoubleToInt32(y_val) & 0x1f;
+ int value = ArithmeticShiftRight(DoubleToInt32(x_val), shift);
+ *x = factory->NewNumberLiteral(value, pos);
+ return true;
+ }
+ default:
+ break;
+ }
+ }
+ return false;
+}
+
+
+Expression* ParserTraits::BuildUnaryExpression(
+ Expression* expression, Token::Value op, int pos,
+ AstNodeFactory<AstConstructionVisitor>* factory) {
+ ASSERT(expression != NULL);
+ if (expression->IsLiteral()) {
+ Handle<Object> literal = expression->AsLiteral()->value();
+ if (op == Token::NOT) {
+ // Convert the literal to a boolean condition and negate it.
+ bool condition = literal->BooleanValue();
+ Handle<Object> result =
+ parser_->isolate()->factory()->ToBoolean(!condition);
+ return factory->NewLiteral(result, pos);
+ } else if (literal->IsNumber()) {
+ // Compute some expressions involving only number literals.
+ double value = literal->Number();
+ switch (op) {
+ case Token::ADD:
+ return expression;
+ case Token::SUB:
+ return factory->NewNumberLiteral(-value, pos);
+ case Token::BIT_NOT:
+ return factory->NewNumberLiteral(~DoubleToInt32(value), pos);
+ default:
+ break;
+ }
+ }
+ }
+ // Desugar '+foo' => 'foo*1'
+ if (op == Token::ADD) {
+ return factory->NewBinaryOperation(
+ Token::MUL, expression, factory->NewNumberLiteral(1, pos), pos);
+ }
+ // The same idea for '-foo' => 'foo*(-1)'.
+ if (op == Token::SUB) {
+ return factory->NewBinaryOperation(
+ Token::MUL, expression, factory->NewNumberLiteral(-1, pos), pos);
+ }
+ // ...and one more time for '~foo' => 'foo^(~0)'.
+ if (op == Token::BIT_NOT) {
+ return factory->NewBinaryOperation(
+ Token::BIT_XOR, expression, factory->NewNumberLiteral(~0, pos), pos);
+ }
+ return factory->NewUnaryOperation(op, expression, pos);
+}
+
+
+Expression* ParserTraits::NewThrowReferenceError(const char* message, int pos) {
+ return NewThrowError(
+ parser_->isolate()->factory()->MakeReferenceError_string(),
+ message, HandleVector<Object>(NULL, 0), pos);
+}
+
+
+Expression* ParserTraits::NewThrowSyntaxError(
+ const char* message, Handle<Object> arg, int pos) {
+ int argc = arg.is_null() ? 0 : 1;
+ Vector< Handle<Object> > arguments = HandleVector<Object>(&arg, argc);
+ return NewThrowError(
+ parser_->isolate()->factory()->MakeSyntaxError_string(),
+ message, arguments, pos);
+}
+
+
+Expression* ParserTraits::NewThrowTypeError(
+ const char* message, Handle<Object> arg, int pos) {
+ int argc = arg.is_null() ? 0 : 1;
+ Vector< Handle<Object> > arguments = HandleVector<Object>(&arg, argc);
+ return NewThrowError(
+ parser_->isolate()->factory()->MakeTypeError_string(),
+ message, arguments, pos);
+}
+
+
+Expression* ParserTraits::NewThrowError(
+ Handle<String> constructor, const char* message,
+ Vector<Handle<Object> > arguments, int pos) {
+ Zone* zone = parser_->zone();
+ Factory* factory = parser_->isolate()->factory();
+ int argc = arguments.length();
+ Handle<FixedArray> elements = factory->NewFixedArray(argc, TENURED);
+ for (int i = 0; i < argc; i++) {
+ Handle<Object> element = arguments[i];
+ if (!element.is_null()) {
+ elements->set(i, *element);
+ }
+ }
+ Handle<JSArray> array =
+ factory->NewJSArrayWithElements(elements, FAST_ELEMENTS, TENURED);
+
+ ZoneList<Expression*>* args = new(zone) ZoneList<Expression*>(2, zone);
+ Handle<String> type = factory->InternalizeUtf8String(message);
+ args->Add(parser_->factory()->NewLiteral(type, pos), zone);
+ args->Add(parser_->factory()->NewLiteral(array, pos), zone);
+ CallRuntime* call_constructor =
+ parser_->factory()->NewCallRuntime(constructor, NULL, args, pos);
+ return parser_->factory()->NewThrow(call_constructor, pos);
+}
+
+
+void ParserTraits::ReportMessageAt(Scanner::Location source_location,
+ const char* message,
+ const char* arg,
+ bool is_reference_error) {
+ if (parser_->stack_overflow()) {
+ // Suppress the error message (syntax error or such) in the presence of a
+ // stack overflow. The isolate allows only one pending exception at at time
+ // and we want to report the stack overflow later.
+ return;
+ }
+ parser_->has_pending_error_ = true;
+ parser_->pending_error_location_ = source_location;
+ parser_->pending_error_message_ = message;
+ parser_->pending_error_char_arg_ = arg;
+ parser_->pending_error_arg_ = Handle<String>();
+ parser_->pending_error_is_reference_error_ = is_reference_error;
+}
+
+
+void ParserTraits::ReportMessage(const char* message,
+ MaybeHandle<String> arg,
+ bool is_reference_error) {
+ Scanner::Location source_location = parser_->scanner()->location();
+ ReportMessageAt(source_location, message, arg, is_reference_error);
+}
+
+
+void ParserTraits::ReportMessageAt(Scanner::Location source_location,
+ const char* message,
+ MaybeHandle<String> arg,
+ bool is_reference_error) {
+ if (parser_->stack_overflow()) {
+ // Suppress the error message (syntax error or such) in the presence of a
+ // stack overflow. The isolate allows only one pending exception at at time
+ // and we want to report the stack overflow later.
+ return;
+ }
+ parser_->has_pending_error_ = true;
+ parser_->pending_error_location_ = source_location;
+ parser_->pending_error_message_ = message;
+ parser_->pending_error_char_arg_ = NULL;
+ parser_->pending_error_arg_ = arg;
+ parser_->pending_error_is_reference_error_ = is_reference_error;
+}
+
+
+Handle<String> ParserTraits::GetSymbol(Scanner* scanner) {
+ Handle<String> result =
+ parser_->scanner()->AllocateInternalizedString(parser_->isolate());
+ ASSERT(!result.is_null());
+ return result;
+}
+
+
+Handle<String> ParserTraits::NextLiteralString(Scanner* scanner,
+ PretenureFlag tenured) {
+ return scanner->AllocateNextLiteralString(parser_->isolate(), tenured);
+}
+
+
+Expression* ParserTraits::ThisExpression(
+ Scope* scope,
+ AstNodeFactory<AstConstructionVisitor>* factory) {
+ return factory->NewVariableProxy(scope->receiver());
+}
+
+
+Literal* ParserTraits::ExpressionFromLiteral(
+ Token::Value token, int pos,
+ Scanner* scanner,
+ AstNodeFactory<AstConstructionVisitor>* factory) {
+ Factory* isolate_factory = parser_->isolate()->factory();
+ switch (token) {
+ case Token::NULL_LITERAL:
+ return factory->NewLiteral(isolate_factory->null_value(), pos);
+ case Token::TRUE_LITERAL:
+ return factory->NewLiteral(isolate_factory->true_value(), pos);
+ case Token::FALSE_LITERAL:
+ return factory->NewLiteral(isolate_factory->false_value(), pos);
+ case Token::NUMBER: {
+ double value = scanner->DoubleValue();
+ return factory->NewNumberLiteral(value, pos);
+ }
+ default:
+ ASSERT(false);
+ }
+ return NULL;
+}
+
+
+Expression* ParserTraits::ExpressionFromIdentifier(
+ Handle<String> name, int pos, Scope* scope,
+ AstNodeFactory<AstConstructionVisitor>* factory) {
+ if (parser_->fni_ != NULL) parser_->fni_->PushVariableName(name);
+ // The name may refer to a module instance object, so its type is unknown.
+#ifdef DEBUG
+ if (FLAG_print_interface_details)
+ PrintF("# Variable %s ", name->ToAsciiArray());
+#endif
+ Interface* interface = Interface::NewUnknown(parser_->zone());
+ return scope->NewUnresolved(factory, name, interface, pos);
+}
+
+
+Expression* ParserTraits::ExpressionFromString(
+ int pos, Scanner* scanner,
+ AstNodeFactory<AstConstructionVisitor>* factory) {
+ Handle<String> symbol = GetSymbol(scanner);
+ if (parser_->fni_ != NULL) parser_->fni_->PushLiteralName(symbol);
+ return factory->NewLiteral(symbol, pos);
+}
+
+
+Literal* ParserTraits::GetLiteralTheHole(
+ int position, AstNodeFactory<AstConstructionVisitor>* factory) {
+ return factory->NewLiteral(parser_->isolate()->factory()->the_hole_value(),
+ RelocInfo::kNoPosition);
+}
+
+
+Expression* ParserTraits::ParseV8Intrinsic(bool* ok) {
+ return parser_->ParseV8Intrinsic(ok);
+}
+
+
+FunctionLiteral* ParserTraits::ParseFunctionLiteral(
+ Handle<String> name,
+ Scanner::Location function_name_location,
+ bool name_is_strict_reserved,
+ bool is_generator,
+ int function_token_position,
+ FunctionLiteral::FunctionType type,
+ FunctionLiteral::ArityRestriction arity_restriction,
+ bool* ok) {
+ return parser_->ParseFunctionLiteral(name, function_name_location,
+ name_is_strict_reserved, is_generator,
+ function_token_position, type,
+ arity_restriction, ok);
+}
+
+
Parser::Parser(CompilationInfo* info)
- : ParserBase(&scanner_, info->isolate()->stack_guard()->real_climit()),
+ : ParserBase<ParserTraits>(&scanner_,
+ info->isolate()->stack_guard()->real_climit(),
+ info->extension(),
+ NULL,
+ info->zone(),
+ this),
isolate_(info->isolate()),
- symbol_cache_(0, info->zone()),
script_(info->script()),
scanner_(isolate_->unicode_cache()),
reusable_preparser_(NULL),
- top_scope_(NULL),
original_scope_(NULL),
- current_function_state_(NULL),
target_stack_(NULL),
- extension_(info->extension()),
- pre_parse_data_(NULL),
- fni_(NULL),
- parenthesized_function_(false),
- zone_(info->zone()),
- info_(info) {
+ cached_data_(NULL),
+ cached_data_mode_(NO_CACHED_DATA),
+ info_(info),
+ has_pending_error_(false),
+ pending_error_message_(NULL),
+ pending_error_char_arg_(NULL) {
ASSERT(!script_.is_null());
isolate_->set_ast_node_id(0);
set_allow_harmony_scoping(!info->is_native() && FLAG_harmony_scoping);
@@ -577,7 +805,14 @@ FunctionLiteral* Parser::ParseProgram() {
fni_ = new(zone()) FuncNameInferrer(isolate(), zone());
// Initialize parser state.
- source->TryFlatten();
+ CompleteParserRecorder recorder;
+ if (cached_data_mode_ == PRODUCE_CACHED_DATA) {
+ log_ = &recorder;
+ } else if (cached_data_mode_ == CONSUME_CACHED_DATA) {
+ (*cached_data_)->Initialize();
+ }
+
+ source = String::Flatten(source);
FunctionLiteral* result;
if (source->IsExternalTwoByteString()) {
// Notice that the stream is destroyed at the end of the branch block.
@@ -600,33 +835,39 @@ FunctionLiteral* Parser::ParseProgram() {
} else if (info()->script()->name()->IsString()) {
String* name = String::cast(info()->script()->name());
SmartArrayPointer<char> name_chars = name->ToCString();
- PrintF("[parsing script: %s", *name_chars);
+ PrintF("[parsing script: %s", name_chars.get());
} else {
PrintF("[parsing script");
}
PrintF(" - took %0.3f ms]\n", ms);
}
+ if (cached_data_mode_ == PRODUCE_CACHED_DATA) {
+ if (result != NULL) {
+ Vector<unsigned> store = recorder.ExtractData();
+ *cached_data_ = new ScriptData(store);
+ }
+ log_ = NULL;
+ }
return result;
}
FunctionLiteral* Parser::DoParseProgram(CompilationInfo* info,
Handle<String> source) {
- ASSERT(top_scope_ == NULL);
+ ASSERT(scope_ == NULL);
ASSERT(target_stack_ == NULL);
- if (pre_parse_data_ != NULL) pre_parse_data_->Initialize();
Handle<String> no_name = isolate()->factory()->empty_string();
FunctionLiteral* result = NULL;
- { Scope* scope = NewScope(top_scope_, GLOBAL_SCOPE);
+ { Scope* scope = NewScope(scope_, GLOBAL_SCOPE);
info->SetGlobalScope(scope);
if (!info->context().is_null()) {
scope = Scope::DeserializeScopeChain(*info->context(), scope, zone());
}
original_scope_ = scope;
if (info->is_eval()) {
- if (!scope->is_global_scope() || info->language_mode() != CLASSIC_MODE) {
+ if (!scope->is_global_scope() || info->strict_mode() == STRICT) {
scope = NewScope(scope, EVAL_SCOPE);
}
} else if (info->is_global()) {
@@ -645,19 +886,19 @@ FunctionLiteral* Parser::DoParseProgram(CompilationInfo* info,
ParsingModeScope parsing_mode(this, mode);
// Enters 'scope'.
- FunctionState function_state(this, scope, isolate());
+ FunctionState function_state(&function_state_, &scope_, scope, zone());
- top_scope_->SetLanguageMode(info->language_mode());
+ scope_->SetStrictMode(info->strict_mode());
ZoneList<Statement*>* body = new(zone()) ZoneList<Statement*>(16, zone());
bool ok = true;
- int beg_pos = scanner().location().beg_pos;
+ int beg_pos = scanner()->location().beg_pos;
ParseSourceElements(body, Token::EOS, info->is_eval(), true, &ok);
- if (ok && !top_scope_->is_classic_mode()) {
- CheckOctalLiteral(beg_pos, scanner().location().end_pos, &ok);
+ if (ok && strict_mode() == STRICT) {
+ CheckOctalLiteral(beg_pos, scanner()->location().end_pos, &ok);
}
- if (ok && is_extended_mode()) {
- CheckConflictingVarDeclarations(top_scope_, &ok);
+ if (ok && allow_harmony_scoping() && strict_mode() == STRICT) {
+ CheckConflictingVarDeclarations(scope_, &ok);
}
if (ok && info->parse_restriction() == ONLY_SINGLE_FUNCTION_LITERAL) {
@@ -665,7 +906,7 @@ FunctionLiteral* Parser::DoParseProgram(CompilationInfo* info,
!body->at(0)->IsExpressionStatement() ||
!body->at(0)->AsExpressionStatement()->
expression()->IsFunctionLiteral()) {
- ReportMessage("single_function_literal", Vector<const char*>::empty());
+ ReportMessage("single_function_literal");
ok = false;
}
}
@@ -673,7 +914,7 @@ FunctionLiteral* Parser::DoParseProgram(CompilationInfo* info,
if (ok) {
result = factory()->NewFunctionLiteral(
no_name,
- top_scope_,
+ scope_,
body,
function_state.materialized_literal_count(),
function_state.expected_property_count(),
@@ -690,6 +931,8 @@ FunctionLiteral* Parser::DoParseProgram(CompilationInfo* info,
factory()->visitor()->dont_optimize_reason());
} else if (stack_overflow()) {
isolate()->StackOverflow();
+ } else {
+ ThrowPendingError();
}
}
@@ -711,7 +954,7 @@ FunctionLiteral* Parser::ParseLazy() {
Handle<SharedFunctionInfo> shared_info = info()->shared_info();
// Initialize parser state.
- source->TryFlatten();
+ source = String::Flatten(source);
FunctionLiteral* result;
if (source->IsExternalTwoByteString()) {
ExternalTwoByteStringUtf16CharacterStream stream(
@@ -729,7 +972,7 @@ FunctionLiteral* Parser::ParseLazy() {
if (FLAG_trace_parse && result != NULL) {
double ms = timer.Elapsed().InMillisecondsF();
SmartArrayPointer<char> name_chars = result->debug_name()->ToCString();
- PrintF("[parsing function: %s - took %0.3f ms]\n", *name_chars, ms);
+ PrintF("[parsing function: %s - took %0.3f ms]\n", name_chars.get(), ms);
}
return result;
}
@@ -738,7 +981,7 @@ FunctionLiteral* Parser::ParseLazy() {
FunctionLiteral* Parser::ParseLazy(Utf16CharacterStream* source) {
Handle<SharedFunctionInfo> shared_info = info()->shared_info();
scanner_.Initialize(source);
- ASSERT(top_scope_ == NULL);
+ ASSERT(scope_ == NULL);
ASSERT(target_stack_ == NULL);
Handle<String> name(String::cast(shared_info->name()));
@@ -752,19 +995,17 @@ FunctionLiteral* Parser::ParseLazy(Utf16CharacterStream* source) {
{
// Parse the function literal.
- Scope* scope = NewScope(top_scope_, GLOBAL_SCOPE);
+ Scope* scope = NewScope(scope_, GLOBAL_SCOPE);
info()->SetGlobalScope(scope);
if (!info()->closure().is_null()) {
scope = Scope::DeserializeScopeChain(info()->closure()->context(), scope,
zone());
}
original_scope_ = scope;
- FunctionState function_state(this, scope, isolate());
- ASSERT(scope->language_mode() != STRICT_MODE || !info()->is_classic_mode());
- ASSERT(scope->language_mode() != EXTENDED_MODE ||
- info()->is_extended_mode());
- ASSERT(info()->language_mode() == shared_info->language_mode());
- scope->SetLanguageMode(shared_info->language_mode());
+ FunctionState function_state(&function_state_, &scope_, scope, zone());
+ ASSERT(scope->strict_mode() == SLOPPY || info()->strict_mode() == STRICT);
+ ASSERT(info()->strict_mode() == shared_info->strict_mode());
+ scope->SetStrictMode(shared_info->strict_mode());
FunctionLiteral::FunctionType function_type = shared_info->is_expression()
? (shared_info->is_anonymous()
? FunctionLiteral::ANONYMOUS_EXPRESSION
@@ -772,10 +1013,12 @@ FunctionLiteral* Parser::ParseLazy(Utf16CharacterStream* source) {
: FunctionLiteral::DECLARATION;
bool ok = true;
result = ParseFunctionLiteral(name,
+ Scanner::Location::invalid(),
false, // Strict mode name already checked.
shared_info->is_generator(),
RelocInfo::kNoPosition,
function_type,
+ FunctionLiteral::NORMAL_ARITY,
&ok);
// Make sure the results agree.
ASSERT(ok == (result != NULL));
@@ -785,7 +1028,11 @@ FunctionLiteral* Parser::ParseLazy(Utf16CharacterStream* source) {
ASSERT(target_stack_ == NULL);
if (result == NULL) {
- if (stack_overflow()) isolate()->StackOverflow();
+ if (stack_overflow()) {
+ isolate()->StackOverflow();
+ } else {
+ ThrowPendingError();
+ }
} else {
Handle<String> inferred_name(shared_info->inferred_name());
result->set_inferred_name(inferred_name);
@@ -794,62 +1041,6 @@ FunctionLiteral* Parser::ParseLazy(Utf16CharacterStream* source) {
}
-Handle<String> Parser::GetSymbol() {
- int symbol_id = -1;
- if (pre_parse_data() != NULL) {
- symbol_id = pre_parse_data()->GetSymbolIdentifier();
- }
- return LookupSymbol(symbol_id);
-}
-
-
-void Parser::ReportMessage(const char* message, Vector<const char*> args) {
- Scanner::Location source_location = scanner().location();
- ReportMessageAt(source_location, message, args);
-}
-
-
-void Parser::ReportMessage(const char* message, Vector<Handle<String> > args) {
- Scanner::Location source_location = scanner().location();
- ReportMessageAt(source_location, message, args);
-}
-
-
-void Parser::ReportMessageAt(Scanner::Location source_location,
- const char* message,
- Vector<const char*> args) {
- MessageLocation location(script_,
- source_location.beg_pos,
- source_location.end_pos);
- Factory* factory = isolate()->factory();
- Handle<FixedArray> elements = factory->NewFixedArray(args.length());
- for (int i = 0; i < args.length(); i++) {
- Handle<String> arg_string = factory->NewStringFromUtf8(CStrVector(args[i]));
- elements->set(i, *arg_string);
- }
- Handle<JSArray> array = factory->NewJSArrayWithElements(elements);
- Handle<Object> result = factory->NewSyntaxError(message, array);
- isolate()->Throw(*result, &location);
-}
-
-
-void Parser::ReportMessageAt(Scanner::Location source_location,
- const char* message,
- Vector<Handle<String> > args) {
- MessageLocation location(script_,
- source_location.beg_pos,
- source_location.end_pos);
- Factory* factory = isolate()->factory();
- Handle<FixedArray> elements = factory->NewFixedArray(args.length());
- for (int i = 0; i < args.length(); i++) {
- elements->set(i, *args[i]);
- }
- Handle<JSArray> array = factory->NewJSArrayWithElements(elements);
- Handle<Object> result = factory->NewSyntaxError(message, array);
- isolate()->Throw(*result, &location);
-}
-
-
void* Parser::ParseSourceElements(ZoneList<Statement*>* processor,
int end_token,
bool is_eval,
@@ -872,7 +1063,7 @@ void* Parser::ParseSourceElements(ZoneList<Statement*>* processor,
directive_prologue = false;
}
- Scanner::Location token_loc = scanner().peek_location();
+ Scanner::Location token_loc = scanner()->peek_location();
Statement* stat;
if (is_global && !is_eval) {
stat = ParseModuleElement(NULL, CHECK_OK);
@@ -895,8 +1086,9 @@ void* Parser::ParseSourceElements(ZoneList<Statement*>* processor,
Handle<String> directive = Handle<String>::cast(literal->value());
// Check "use strict" directive (ES5 14.1).
- if (top_scope_->is_classic_mode() &&
- directive->Equals(isolate()->heap()->use_strict_string()) &&
+ if (strict_mode() == SLOPPY &&
+ String::Equals(isolate()->factory()->use_strict_string(),
+ directive) &&
token_loc.end_pos - token_loc.beg_pos ==
isolate()->heap()->use_strict_string()->length() + 2) {
// TODO(mstarzinger): Global strict eval calls, need their own scope
@@ -904,17 +1096,15 @@ void* Parser::ParseSourceElements(ZoneList<Statement*>* processor,
// add this scope in DoParseProgram(), but that requires adaptations
// all over the code base, so we go with a quick-fix for now.
// In the same manner, we have to patch the parsing mode.
- if (is_eval && !top_scope_->is_eval_scope()) {
- ASSERT(top_scope_->is_global_scope());
- Scope* scope = NewScope(top_scope_, EVAL_SCOPE);
- scope->set_start_position(top_scope_->start_position());
- scope->set_end_position(top_scope_->end_position());
- top_scope_ = scope;
+ if (is_eval && !scope_->is_eval_scope()) {
+ ASSERT(scope_->is_global_scope());
+ Scope* scope = NewScope(scope_, EVAL_SCOPE);
+ scope->set_start_position(scope_->start_position());
+ scope->set_end_position(scope_->end_position());
+ scope_ = scope;
mode_ = PARSE_EAGERLY;
}
- // TODO(ES6): Fix entering extended mode, once it is specified.
- top_scope_->SetLanguageMode(allow_harmony_scoping()
- ? EXTENDED_MODE : STRICT_MODE);
+ scope_->SetStrictMode(STRICT);
// "use strict" is the only directive for now.
directive_prologue = false;
}
@@ -962,14 +1152,14 @@ Statement* Parser::ParseModuleElement(ZoneStringList* labels,
// Handle 'module' as a context-sensitive keyword.
if (FLAG_harmony_modules &&
peek() == Token::IDENTIFIER &&
- !scanner().HasAnyLineTerminatorBeforeNext() &&
+ !scanner()->HasAnyLineTerminatorBeforeNext() &&
stmt != NULL) {
ExpressionStatement* estmt = stmt->AsExpressionStatement();
if (estmt != NULL &&
estmt->expression()->AsVariableProxy() != NULL &&
- estmt->expression()->AsVariableProxy()->name()->Equals(
- isolate()->heap()->module_string()) &&
- !scanner().literal_contains_escapes()) {
+ String::Equals(isolate()->factory()->module_string(),
+ estmt->expression()->AsVariableProxy()->name()) &&
+ !scanner()->literal_contains_escapes()) {
return ParseModuleDeclaration(NULL, ok);
}
}
@@ -984,7 +1174,7 @@ Statement* Parser::ParseModuleDeclaration(ZoneStringList* names, bool* ok) {
// 'module' Identifier Module
int pos = peek_position();
- Handle<String> name = ParseIdentifier(CHECK_OK);
+ Handle<String> name = ParseIdentifier(kDontAllowEvalOrArguments, CHECK_OK);
#ifdef DEBUG
if (FLAG_print_interface_details)
@@ -994,7 +1184,7 @@ Statement* Parser::ParseModuleDeclaration(ZoneStringList* names, bool* ok) {
Module* module = ParseModule(CHECK_OK);
VariableProxy* proxy = NewUnresolved(name, MODULE, module->interface());
Declaration* declaration =
- factory()->NewModuleDeclaration(proxy, module, top_scope_, pos);
+ factory()->NewModuleDeclaration(proxy, module, scope_, pos);
Declare(declaration, true, CHECK_OK);
#ifdef DEBUG
@@ -1052,14 +1242,14 @@ Module* Parser::ParseModuleLiteral(bool* ok) {
#ifdef DEBUG
if (FLAG_print_interface_details) PrintF("# Literal ");
#endif
- Scope* scope = NewScope(top_scope_, MODULE_SCOPE);
+ Scope* scope = NewScope(scope_, MODULE_SCOPE);
Expect(Token::LBRACE, CHECK_OK);
- scope->set_start_position(scanner().location().beg_pos);
- scope->SetLanguageMode(EXTENDED_MODE);
+ scope->set_start_position(scanner()->location().beg_pos);
+ scope->SetStrictMode(STRICT);
{
- BlockState block_state(this, scope);
+ BlockState block_state(&scope_, scope);
TargetCollector collector(zone());
Target target(&this->target_stack_, &collector);
Target target_body(&this->target_stack_, body);
@@ -1073,17 +1263,15 @@ Module* Parser::ParseModuleLiteral(bool* ok) {
}
Expect(Token::RBRACE, CHECK_OK);
- scope->set_end_position(scanner().location().end_pos);
+ scope->set_end_position(scanner()->location().end_pos);
body->set_scope(scope);
// Check that all exports are bound.
Interface* interface = scope->interface();
for (Interface::Iterator it = interface->iterator();
!it.done(); it.Advance()) {
- if (scope->LocalLookup(it.name()) == NULL) {
- Handle<String> name(it.name());
- ReportMessage("module_export_undefined",
- Vector<Handle<String> >(&name, 1));
+ if (scope->LookupLocal(it.name()) == NULL) {
+ ParserTraits::ReportMessage("module_export_undefined", it.name());
*ok = false;
return NULL;
}
@@ -1122,7 +1310,7 @@ Module* Parser::ParseModulePath(bool* ok) {
member->interface()->Print();
}
#endif
- ReportMessage("invalid_module_path", Vector<Handle<String> >(&name, 1));
+ ParserTraits::ReportMessage("invalid_module_path", name);
return NULL;
}
result = member;
@@ -1137,14 +1325,14 @@ Module* Parser::ParseModuleVariable(bool* ok) {
// Identifier
int pos = peek_position();
- Handle<String> name = ParseIdentifier(CHECK_OK);
+ Handle<String> name = ParseIdentifier(kDontAllowEvalOrArguments, CHECK_OK);
#ifdef DEBUG
if (FLAG_print_interface_details)
PrintF("# Module variable %s ", name->ToAsciiArray());
#endif
- VariableProxy* proxy = top_scope_->NewUnresolved(
+ VariableProxy* proxy = scope_->NewUnresolved(
factory(), name, Interface::NewModule(zone()),
- scanner().location().beg_pos);
+ scanner()->location().beg_pos);
return factory()->NewModuleVariable(proxy, pos);
}
@@ -1166,7 +1354,7 @@ Module* Parser::ParseModuleUrl(bool* ok) {
// Create an empty literal as long as the feature isn't finished.
USE(symbol);
- Scope* scope = NewScope(top_scope_, MODULE_SCOPE);
+ Scope* scope = NewScope(scope_, MODULE_SCOPE);
Block* body = factory()->NewBlock(NULL, 1, false, RelocInfo::kNoPosition);
body->set_scope(scope);
Interface* interface = scope->interface();
@@ -1232,12 +1420,12 @@ Block* Parser::ParseImportDeclaration(bool* ok) {
module->interface()->Print();
}
#endif
- ReportMessage("invalid_module_path", Vector<Handle<String> >(&name, 1));
+ ParserTraits::ReportMessage("invalid_module_path", name);
return NULL;
}
VariableProxy* proxy = NewUnresolved(names[i], LET, interface);
Declaration* declaration =
- factory()->NewImportDeclaration(proxy, module, top_scope_, pos);
+ factory()->NewImportDeclaration(proxy, module, scope_, pos);
Declare(declaration, true, CHECK_OK);
}
@@ -1262,13 +1450,14 @@ Statement* Parser::ParseExportDeclaration(bool* ok) {
switch (peek()) {
case Token::IDENTIFIER: {
int pos = position();
- Handle<String> name = ParseIdentifier(CHECK_OK);
+ Handle<String> name =
+ ParseIdentifier(kDontAllowEvalOrArguments, CHECK_OK);
// Handle 'module' as a context-sensitive keyword.
if (!name->IsOneByteEqualTo(STATIC_ASCII_VECTOR("module"))) {
names.Add(name, zone());
while (peek() == Token::COMMA) {
Consume(Token::COMMA);
- name = ParseIdentifier(CHECK_OK);
+ name = ParseIdentifier(kDontAllowEvalOrArguments, CHECK_OK);
names.Add(name, zone());
}
ExpectSemicolon(CHECK_OK);
@@ -1291,12 +1480,12 @@ Statement* Parser::ParseExportDeclaration(bool* ok) {
default:
*ok = false;
- ReportUnexpectedToken(scanner().current_token());
+ ReportUnexpectedToken(scanner()->current_token());
return NULL;
}
// Extract declared names into export declarations and interface.
- Interface* interface = top_scope_->interface();
+ Interface* interface = scope_->interface();
for (int i = 0; i < names.length(); ++i) {
#ifdef DEBUG
if (FLAG_print_interface_details)
@@ -1311,8 +1500,8 @@ Statement* Parser::ParseExportDeclaration(bool* ok) {
// TODO(rossberg): Rethink whether we actually need to store export
// declarations (for compilation?).
// ExportDeclaration* declaration =
- // factory()->NewExportDeclaration(proxy, top_scope_, position);
- // top_scope_->AddDeclaration(declaration);
+ // factory()->NewExportDeclaration(proxy, scope_, position);
+ // scope_->AddDeclaration(declaration);
}
ASSERT(result != NULL);
@@ -1438,9 +1627,8 @@ Statement* Parser::ParseStatement(ZoneStringList* labels, bool* ok) {
// In Harmony mode, this case also handles the extension:
// Statement:
// GeneratorDeclaration
- if (!top_scope_->is_classic_mode()) {
- ReportMessageAt(scanner().peek_location(), "strict_function",
- Vector<const char*>::empty());
+ if (strict_mode() == STRICT) {
+ ReportMessageAt(scanner()->peek_location(), "strict_function");
*ok = false;
return NULL;
}
@@ -1484,7 +1672,7 @@ void Parser::Declare(Declaration* declaration, bool resolve, bool* ok) {
// Similarly, strict mode eval scope does not leak variable declarations to
// the caller's scope so we declare all locals, too.
if (declaration_scope->is_function_scope() ||
- declaration_scope->is_strict_or_extended_eval_scope() ||
+ declaration_scope->is_strict_eval_scope() ||
declaration_scope->is_block_scope() ||
declaration_scope->is_module_scope() ||
declaration_scope->is_global_scope()) {
@@ -1494,7 +1682,7 @@ void Parser::Declare(Declaration* declaration, bool resolve, bool* ok) {
// global scope.
var = declaration_scope->is_global_scope()
? declaration_scope->Lookup(name)
- : declaration_scope->LocalLookup(name);
+ : declaration_scope->LookupLocal(name);
if (var == NULL) {
// Declare the name.
var = declaration_scope->DeclareLocal(
@@ -1517,22 +1705,15 @@ void Parser::Declare(Declaration* declaration, bool resolve, bool* ok) {
// because the var declaration is hoisted to the function scope where 'x'
// is already bound.
ASSERT(IsDeclaredVariableMode(var->mode()));
- if (is_extended_mode()) {
- // In harmony mode we treat re-declarations as early errors. See
+ if (allow_harmony_scoping() && strict_mode() == STRICT) {
+ // In harmony we treat re-declarations as early errors. See
// ES5 16 for a definition of early errors.
- SmartArrayPointer<char> c_string = name->ToCString(DISALLOW_NULLS);
- const char* elms[2] = { "Variable", *c_string };
- Vector<const char*> args(elms, 2);
- ReportMessage("redeclaration", args);
+ ParserTraits::ReportMessage("var_redeclaration", name);
*ok = false;
return;
}
- Handle<String> message_string =
- isolate()->factory()->NewStringFromUtf8(CStrVector("Variable"),
- TENURED);
- Expression* expression =
- NewThrowTypeError(isolate()->factory()->redeclaration_string(),
- message_string, name);
+ Expression* expression = NewThrowTypeError(
+ "var_redeclaration", name, declaration->position());
declaration_scope->SetIllegalRedeclaration(expression);
}
}
@@ -1552,10 +1733,10 @@ void Parser::Declare(Declaration* declaration, bool resolve, bool* ok) {
// same variable if it is declared several times. This is not a
// semantic issue as long as we keep the source order, but it may be
// a performance issue since it may lead to repeated
- // Runtime::DeclareContextSlot() calls.
+ // RuntimeHidden_DeclareContextSlot calls.
declaration_scope->AddDeclaration(declaration);
- if (mode == CONST && declaration_scope->is_global_scope()) {
+ if (mode == CONST_LEGACY && declaration_scope->is_global_scope()) {
// For global const variables we bind the proxy to a variable.
ASSERT(resolve); // should be set by all callers
Variable::Kind kind = Variable::NORMAL;
@@ -1563,8 +1744,8 @@ void Parser::Declare(Declaration* declaration, bool resolve, bool* ok) {
declaration_scope, name, mode, true, kind,
kNeedsInitialization, proxy->interface());
} else if (declaration_scope->is_eval_scope() &&
- declaration_scope->is_classic_mode()) {
- // For variable declarations in a non-strict eval scope the proxy is bound
+ declaration_scope->strict_mode() == SLOPPY) {
+ // For variable declarations in a sloppy eval scope the proxy is bound
// to a lookup variable to force a dynamic declaration using the
// DeclareContextSlot runtime function.
Variable::Kind kind = Variable::NORMAL;
@@ -1619,7 +1800,7 @@ void Parser::Declare(Declaration* declaration, bool resolve, bool* ok) {
var->interface()->Print();
}
#endif
- ReportMessage("module_type_error", Vector<Handle<String> >(&name, 1));
+ ParserTraits::ReportMessage("module_type_error", name);
}
}
}
@@ -1633,11 +1814,12 @@ void Parser::Declare(Declaration* declaration, bool resolve, bool* ok) {
Statement* Parser::ParseNativeDeclaration(bool* ok) {
int pos = peek_position();
Expect(Token::FUNCTION, CHECK_OK);
- Handle<String> name = ParseIdentifier(CHECK_OK);
+ // Allow "eval" or "arguments" for backward compatibility.
+ Handle<String> name = ParseIdentifier(kAllowEvalOrArguments, CHECK_OK);
Expect(Token::LPAREN, CHECK_OK);
bool done = (peek() == Token::RPAREN);
while (!done) {
- ParseIdentifier(CHECK_OK);
+ ParseIdentifier(kAllowEvalOrArguments, CHECK_OK);
done = (peek() == Token::RPAREN);
if (!done) {
Expect(Token::COMMA, CHECK_OK);
@@ -1657,7 +1839,7 @@ Statement* Parser::ParseNativeDeclaration(bool* ok) {
// other functions are set up when entering the surrounding scope.
VariableProxy* proxy = NewUnresolved(name, VAR, Interface::NewValue());
Declaration* declaration =
- factory()->NewVariableDeclaration(proxy, VAR, top_scope_, pos);
+ factory()->NewVariableDeclaration(proxy, VAR, scope_, pos);
Declare(declaration, true, CHECK_OK);
NativeFunctionLiteral* lit = factory()->NewNativeFunctionLiteral(
name, extension_, RelocInfo::kNoPosition);
@@ -1681,10 +1863,12 @@ Statement* Parser::ParseFunctionDeclaration(ZoneStringList* names, bool* ok) {
Handle<String> name = ParseIdentifierOrStrictReservedWord(
&is_strict_reserved, CHECK_OK);
FunctionLiteral* fun = ParseFunctionLiteral(name,
+ scanner()->location(),
is_strict_reserved,
is_generator,
pos,
FunctionLiteral::DECLARATION,
+ FunctionLiteral::NORMAL_ARITY,
CHECK_OK);
// Even if we're not at the top-level of the global or a function
// scope, we treat it as such and introduce the function with its
@@ -1692,10 +1876,11 @@ Statement* Parser::ParseFunctionDeclaration(ZoneStringList* names, bool* ok) {
// In extended mode, a function behaves as a lexical binding, except in the
// global scope.
VariableMode mode =
- is_extended_mode() && !top_scope_->is_global_scope() ? LET : VAR;
+ allow_harmony_scoping() &&
+ strict_mode() == STRICT && !scope_->is_global_scope() ? LET : VAR;
VariableProxy* proxy = NewUnresolved(name, mode, Interface::NewValue());
Declaration* declaration =
- factory()->NewFunctionDeclaration(proxy, mode, fun, top_scope_, pos);
+ factory()->NewFunctionDeclaration(proxy, mode, fun, scope_, pos);
Declare(declaration, true, CHECK_OK);
if (names) names->Add(name, zone());
return factory()->NewEmptyStatement(RelocInfo::kNoPosition);
@@ -1703,7 +1888,9 @@ Statement* Parser::ParseFunctionDeclaration(ZoneStringList* names, bool* ok) {
Block* Parser::ParseBlock(ZoneStringList* labels, bool* ok) {
- if (top_scope_->is_extended_mode()) return ParseScopedBlock(labels, ok);
+ if (allow_harmony_scoping() && strict_mode() == STRICT) {
+ return ParseScopedBlock(labels, ok);
+ }
// Block ::
// '{' Statement* '}'
@@ -1736,12 +1923,12 @@ Block* Parser::ParseScopedBlock(ZoneStringList* labels, bool* ok) {
// Construct block expecting 16 statements.
Block* body =
factory()->NewBlock(labels, 16, false, RelocInfo::kNoPosition);
- Scope* block_scope = NewScope(top_scope_, BLOCK_SCOPE);
+ Scope* block_scope = NewScope(scope_, BLOCK_SCOPE);
// Parse the statements and collect escaping labels.
Expect(Token::LBRACE, CHECK_OK);
- block_scope->set_start_position(scanner().location().beg_pos);
- { BlockState block_state(this, block_scope);
+ block_scope->set_start_position(scanner()->location().beg_pos);
+ { BlockState block_state(&scope_, block_scope);
TargetCollector collector(zone());
Target target(&this->target_stack_, &collector);
Target target_body(&this->target_stack_, body);
@@ -1754,7 +1941,7 @@ Block* Parser::ParseScopedBlock(ZoneStringList* labels, bool* ok) {
}
}
Expect(Token::RBRACE, CHECK_OK);
- block_scope->set_end_position(scanner().location().end_pos);
+ block_scope->set_end_position(scanner()->location().end_pos);
block_scope = block_scope->FinalizeBlockScope();
body->set_scope(block_scope);
return body;
@@ -1775,12 +1962,6 @@ Block* Parser::ParseVariableStatement(VariableDeclarationContext var_context,
}
-bool Parser::IsEvalOrArguments(Handle<String> string) {
- return string.is_identical_to(isolate()->factory()->eval_string()) ||
- string.is_identical_to(isolate()->factory()->arguments_string());
-}
-
-
// If the variable declaration declares exactly one non-const
// variable, then *out is set to that variable. In all other cases,
// *out is untouched; in particular, it is the caller's responsibility
@@ -1825,29 +2006,31 @@ Block* Parser::ParseVariableDeclarations(
// * It is a Syntax Error if the code that matches this production is not
// contained in extended code.
//
- // However disallowing const in classic mode will break compatibility with
+ // However disallowing const in sloppy mode will break compatibility with
// existing pages. Therefore we keep allowing const with the old
- // non-harmony semantics in classic mode.
+ // non-harmony semantics in sloppy mode.
Consume(Token::CONST);
- switch (top_scope_->language_mode()) {
- case CLASSIC_MODE:
- mode = CONST;
- init_op = Token::INIT_CONST;
+ switch (strict_mode()) {
+ case SLOPPY:
+ mode = CONST_LEGACY;
+ init_op = Token::INIT_CONST_LEGACY;
break;
- case STRICT_MODE:
- ReportMessage("strict_const", Vector<const char*>::empty());
- *ok = false;
- return NULL;
- case EXTENDED_MODE:
- if (var_context == kStatement) {
- // In extended mode 'const' declarations are only allowed in source
- // element positions.
- ReportMessage("unprotected_const", Vector<const char*>::empty());
+ case STRICT:
+ if (allow_harmony_scoping()) {
+ if (var_context == kStatement) {
+ // In strict mode 'const' declarations are only allowed in source
+ // element positions.
+ ReportMessage("unprotected_const");
+ *ok = false;
+ return NULL;
+ }
+ mode = CONST;
+ init_op = Token::INIT_CONST;
+ } else {
+ ReportMessage("strict_const");
*ok = false;
return NULL;
}
- mode = CONST_HARMONY;
- init_op = Token::INIT_CONST_HARMONY;
}
is_const = true;
needs_init = true;
@@ -1858,15 +2041,17 @@ Block* Parser::ParseVariableDeclarations(
//
// * It is a Syntax Error if the code that matches this production is not
// contained in extended code.
- if (!is_extended_mode()) {
- ReportMessage("illegal_let", Vector<const char*>::empty());
+ //
+ // TODO(rossberg): make 'let' a legal identifier in sloppy mode.
+ if (!allow_harmony_scoping() || strict_mode() == SLOPPY) {
+ ReportMessage("illegal_let");
*ok = false;
return NULL;
}
Consume(Token::LET);
if (var_context == kStatement) {
// Let declarations are only allowed in source element positions.
- ReportMessage("unprotected_let", Vector<const char*>::empty());
+ ReportMessage("unprotected_let");
*ok = false;
return NULL;
}
@@ -1900,16 +2085,9 @@ Block* Parser::ParseVariableDeclarations(
// Parse variable name.
if (nvars > 0) Consume(Token::COMMA);
- name = ParseIdentifier(CHECK_OK);
+ name = ParseIdentifier(kDontAllowEvalOrArguments, CHECK_OK);
if (fni_ != NULL) fni_->PushVariableName(name);
- // Strict mode variables may not be named eval or arguments
- if (!declaration_scope->is_classic_mode() && IsEvalOrArguments(name)) {
- ReportMessage("strict_var_name", Vector<const char*>::empty());
- *ok = false;
- return NULL;
- }
-
// Declare variable.
// Note that we *always* must treat the initial value via a separate init
// assignment for variables and constants because the value must be assigned
@@ -1929,12 +2107,11 @@ Block* Parser::ParseVariableDeclarations(
is_const ? Interface::NewConst() : Interface::NewValue();
VariableProxy* proxy = NewUnresolved(name, mode, interface);
Declaration* declaration =
- factory()->NewVariableDeclaration(proxy, mode, top_scope_, pos);
+ factory()->NewVariableDeclaration(proxy, mode, scope_, pos);
Declare(declaration, mode != VAR, CHECK_OK);
nvars++;
if (declaration_scope->num_var_or_const() > kMaxNumFunctionLocals) {
- ReportMessageAt(scanner().location(), "too_many_variables",
- Vector<const char*>::empty());
+ ReportMessage("too_many_variables");
*ok = false;
return NULL;
}
@@ -1949,7 +2126,7 @@ Block* Parser::ParseVariableDeclarations(
//
// var v; v = x;
//
- // In particular, we need to re-lookup 'v' (in top_scope_, not
+ // In particular, we need to re-lookup 'v' (in scope_, not
// declaration_scope) as it may be a different 'v' than the 'v' in the
// declaration (e.g., if we are inside a 'with' statement or 'catch'
// block).
@@ -1967,11 +2144,11 @@ Block* Parser::ParseVariableDeclarations(
// one - there is no re-lookup (see the last parameter of the
// Declare() call above).
- Scope* initialization_scope = is_const ? declaration_scope : top_scope_;
+ Scope* initialization_scope = is_const ? declaration_scope : scope_;
Expression* value = NULL;
int pos = -1;
// Harmony consts have non-optional initializers.
- if (peek() == Token::ASSIGN || mode == CONST_HARMONY) {
+ if (peek() == Token::ASSIGN || mode == CONST) {
Expect(Token::ASSIGN, CHECK_OK);
pos = position();
value = ParseAssignmentExpression(var_context != kForStatement, CHECK_OK);
@@ -2034,13 +2211,13 @@ Block* Parser::ParseVariableDeclarations(
// the number of arguments (1 or 2).
initialize = factory()->NewCallRuntime(
isolate()->factory()->InitializeConstGlobal_string(),
- Runtime::FunctionForId(Runtime::kInitializeConstGlobal),
+ Runtime::FunctionForId(Runtime::kHiddenInitializeConstGlobal),
arguments, pos);
} else {
// Add strict mode.
// We may want to pass singleton to avoid Literal allocations.
- LanguageMode language_mode = initialization_scope->language_mode();
- arguments->Add(factory()->NewNumberLiteral(language_mode, pos), zone());
+ StrictMode strict_mode = initialization_scope->strict_mode();
+ arguments->Add(factory()->NewNumberLiteral(strict_mode, pos), zone());
// Be careful not to assign a value to the global variable if
// we're in a with. The initialization value should not
@@ -2114,11 +2291,13 @@ Block* Parser::ParseVariableDeclarations(
static bool ContainsLabel(ZoneStringList* labels, Handle<String> label) {
ASSERT(!label.is_null());
- if (labels != NULL)
- for (int i = labels->length(); i-- > 0; )
- if (labels->at(i).is_identical_to(label))
+ if (labels != NULL) {
+ for (int i = labels->length(); i-- > 0; ) {
+ if (labels->at(i).is_identical_to(label)) {
return true;
-
+ }
+ }
+ }
return false;
}
@@ -2144,10 +2323,7 @@ Statement* Parser::ParseExpressionOrLabelledStatement(ZoneStringList* labels,
// structured. However, these are probably changes we want to
// make later anyway so we should go back and fix this then.
if (ContainsLabel(labels, label) || TargetStackContainsLabel(label)) {
- SmartArrayPointer<char> c_string = label->ToCString(DISALLOW_NULLS);
- const char* elms[2] = { "Label", *c_string };
- Vector<const char*> args(elms, 2);
- ReportMessage("redeclaration", args);
+ ParserTraits::ReportMessage("label_redeclaration", label);
*ok = false;
return NULL;
}
@@ -2158,7 +2334,7 @@ Statement* Parser::ParseExpressionOrLabelledStatement(ZoneStringList* labels,
// Remove the "ghost" variable that turned out to be a label
// from the top scope. This way, we don't try to resolve it
// during the scope processing.
- top_scope_->RemoveUnresolved(var);
+ scope_->RemoveUnresolved(var);
Expect(Token::COLON, CHECK_OK);
return ParseStatement(labels, ok);
}
@@ -2168,12 +2344,12 @@ Statement* Parser::ParseExpressionOrLabelledStatement(ZoneStringList* labels,
// no line-terminator between the two words.
if (extension_ != NULL &&
peek() == Token::FUNCTION &&
- !scanner().HasAnyLineTerminatorBeforeNext() &&
+ !scanner()->HasAnyLineTerminatorBeforeNext() &&
expr != NULL &&
expr->AsVariableProxy() != NULL &&
- expr->AsVariableProxy()->name()->Equals(
- isolate()->heap()->native_string()) &&
- !scanner().literal_contains_escapes()) {
+ String::Equals(isolate()->factory()->native_string(),
+ expr->AsVariableProxy()->name()) &&
+ !scanner()->literal_contains_escapes()) {
return ParseNativeDeclaration(ok);
}
@@ -2181,11 +2357,11 @@ Statement* Parser::ParseExpressionOrLabelledStatement(ZoneStringList* labels,
// Only expect semicolon in the former case.
if (!FLAG_harmony_modules ||
peek() != Token::IDENTIFIER ||
- scanner().HasAnyLineTerminatorBeforeNext() ||
+ scanner()->HasAnyLineTerminatorBeforeNext() ||
expr->AsVariableProxy() == NULL ||
- !expr->AsVariableProxy()->name()->Equals(
- isolate()->heap()->module_string()) ||
- scanner().literal_contains_escapes()) {
+ !String::Equals(isolate()->factory()->module_string(),
+ expr->AsVariableProxy()->name()) ||
+ scanner()->literal_contains_escapes()) {
ExpectSemicolon(CHECK_OK);
}
return factory()->NewExpressionStatement(expr, pos);
@@ -2222,21 +2398,20 @@ Statement* Parser::ParseContinueStatement(bool* ok) {
Expect(Token::CONTINUE, CHECK_OK);
Handle<String> label = Handle<String>::null();
Token::Value tok = peek();
- if (!scanner().HasAnyLineTerminatorBeforeNext() &&
+ if (!scanner()->HasAnyLineTerminatorBeforeNext() &&
tok != Token::SEMICOLON && tok != Token::RBRACE && tok != Token::EOS) {
- label = ParseIdentifier(CHECK_OK);
+ // ECMA allows "eval" or "arguments" as labels even in strict mode.
+ label = ParseIdentifier(kAllowEvalOrArguments, CHECK_OK);
}
IterationStatement* target = NULL;
target = LookupContinueTarget(label, CHECK_OK);
if (target == NULL) {
// Illegal continue statement.
const char* message = "illegal_continue";
- Vector<Handle<String> > args;
if (!label.is_null()) {
message = "unknown_label";
- args = Vector<Handle<String> >(&label, 1);
}
- ReportMessageAt(scanner().location(), message, args);
+ ParserTraits::ReportMessage(message, label);
*ok = false;
return NULL;
}
@@ -2253,9 +2428,10 @@ Statement* Parser::ParseBreakStatement(ZoneStringList* labels, bool* ok) {
Expect(Token::BREAK, CHECK_OK);
Handle<String> label;
Token::Value tok = peek();
- if (!scanner().HasAnyLineTerminatorBeforeNext() &&
+ if (!scanner()->HasAnyLineTerminatorBeforeNext() &&
tok != Token::SEMICOLON && tok != Token::RBRACE && tok != Token::EOS) {
- label = ParseIdentifier(CHECK_OK);
+ // ECMA allows "eval" or "arguments" as labels even in strict mode.
+ label = ParseIdentifier(kAllowEvalOrArguments, CHECK_OK);
}
// Parse labeled break statements that target themselves into
// empty statements, e.g. 'l1: l2: l3: break l2;'
@@ -2268,12 +2444,10 @@ Statement* Parser::ParseBreakStatement(ZoneStringList* labels, bool* ok) {
if (target == NULL) {
// Illegal break statement.
const char* message = "illegal_break";
- Vector<Handle<String> > args;
if (!label.is_null()) {
message = "unknown_label";
- args = Vector<Handle<String> >(&label, 1);
}
- ReportMessageAt(scanner().location(), message, args);
+ ParserTraits::ReportMessage(message, label);
*ok = false;
return NULL;
}
@@ -2290,12 +2464,12 @@ Statement* Parser::ParseReturnStatement(bool* ok) {
// reporting any errors on it, because of the way errors are
// reported (underlining).
Expect(Token::RETURN, CHECK_OK);
- int pos = position();
+ Scanner::Location loc = scanner()->location();
Token::Value tok = peek();
Statement* result;
Expression* return_value;
- if (scanner().HasAnyLineTerminatorBeforeNext() ||
+ if (scanner()->HasAnyLineTerminatorBeforeNext() ||
tok == Token::SEMICOLON ||
tok == Token::RBRACE ||
tok == Token::EOS) {
@@ -2306,26 +2480,19 @@ Statement* Parser::ParseReturnStatement(bool* ok) {
ExpectSemicolon(CHECK_OK);
if (is_generator()) {
Expression* generator = factory()->NewVariableProxy(
- current_function_state_->generator_object_variable());
+ function_state_->generator_object_variable());
Expression* yield = factory()->NewYield(
- generator, return_value, Yield::FINAL, pos);
- result = factory()->NewExpressionStatement(yield, pos);
+ generator, return_value, Yield::FINAL, loc.beg_pos);
+ result = factory()->NewExpressionStatement(yield, loc.beg_pos);
} else {
- result = factory()->NewReturnStatement(return_value, pos);
+ result = factory()->NewReturnStatement(return_value, loc.beg_pos);
}
- // An ECMAScript program is considered syntactically incorrect if it
- // contains a return statement that is not within the body of a
- // function. See ECMA-262, section 12.9, page 67.
- //
- // To be consistent with KJS we report the syntax error at runtime.
- Scope* declaration_scope = top_scope_->DeclarationScope();
- if (declaration_scope->is_global_scope() ||
- declaration_scope->is_eval_scope()) {
- Handle<String> message = isolate()->factory()->illegal_return_string();
- Expression* throw_error =
- NewThrowSyntaxError(message, Handle<Object>::null());
- return factory()->NewExpressionStatement(throw_error, pos);
+ Scope* decl_scope = scope_->DeclarationScope();
+ if (decl_scope->is_global_scope() || decl_scope->is_eval_scope()) {
+ ReportMessageAt(loc, "illegal_return");
+ *ok = false;
+ return NULL;
}
return result;
}
@@ -2338,8 +2505,8 @@ Statement* Parser::ParseWithStatement(ZoneStringList* labels, bool* ok) {
Expect(Token::WITH, CHECK_OK);
int pos = position();
- if (!top_scope_->is_classic_mode()) {
- ReportMessage("strict_mode_with", Vector<const char*>::empty());
+ if (strict_mode() == STRICT) {
+ ReportMessage("strict_mode_with");
*ok = false;
return NULL;
}
@@ -2348,13 +2515,13 @@ Statement* Parser::ParseWithStatement(ZoneStringList* labels, bool* ok) {
Expression* expr = ParseExpression(true, CHECK_OK);
Expect(Token::RPAREN, CHECK_OK);
- top_scope_->DeclarationScope()->RecordWithStatement();
- Scope* with_scope = NewScope(top_scope_, WITH_SCOPE);
+ scope_->DeclarationScope()->RecordWithStatement();
+ Scope* with_scope = NewScope(scope_, WITH_SCOPE);
Statement* stmt;
- { BlockState block_state(this, with_scope);
- with_scope->set_start_position(scanner().peek_location().beg_pos);
+ { BlockState block_state(&scope_, with_scope);
+ with_scope->set_start_position(scanner()->peek_location().beg_pos);
stmt = ParseStatement(labels, CHECK_OK);
- with_scope->set_end_position(scanner().location().end_pos);
+ with_scope->set_end_position(scanner()->location().end_pos);
}
return factory()->NewWithStatement(with_scope, expr, stmt, pos);
}
@@ -2372,8 +2539,7 @@ CaseClause* Parser::ParseCaseClause(bool* default_seen_ptr, bool* ok) {
} else {
Expect(Token::DEFAULT, CHECK_OK);
if (*default_seen_ptr) {
- ReportMessage("multiple_defaults_in_switch",
- Vector<const char*>::empty());
+ ReportMessage("multiple_defaults_in_switch");
*ok = false;
return NULL;
}
@@ -2428,8 +2594,8 @@ Statement* Parser::ParseThrowStatement(bool* ok) {
Expect(Token::THROW, CHECK_OK);
int pos = position();
- if (scanner().HasAnyLineTerminatorBeforeNext()) {
- ReportMessage("newline_after_throw", Vector<const char*>::empty());
+ if (scanner()->HasAnyLineTerminatorBeforeNext()) {
+ ReportMessage("newline_after_throw");
*ok = false;
return NULL;
}
@@ -2465,7 +2631,7 @@ TryStatement* Parser::ParseTryStatement(bool* ok) {
Token::Value tok = peek();
if (tok != Token::CATCH && tok != Token::FINALLY) {
- ReportMessage("no_catch_or_finally", Vector<const char*>::empty());
+ ReportMessage("no_catch_or_finally");
*ok = false;
return NULL;
}
@@ -2483,35 +2649,28 @@ TryStatement* Parser::ParseTryStatement(bool* ok) {
Consume(Token::CATCH);
Expect(Token::LPAREN, CHECK_OK);
- catch_scope = NewScope(top_scope_, CATCH_SCOPE);
- catch_scope->set_start_position(scanner().location().beg_pos);
- name = ParseIdentifier(CHECK_OK);
-
- if (!top_scope_->is_classic_mode() && IsEvalOrArguments(name)) {
- ReportMessage("strict_catch_variable", Vector<const char*>::empty());
- *ok = false;
- return NULL;
- }
+ catch_scope = NewScope(scope_, CATCH_SCOPE);
+ catch_scope->set_start_position(scanner()->location().beg_pos);
+ name = ParseIdentifier(kDontAllowEvalOrArguments, CHECK_OK);
Expect(Token::RPAREN, CHECK_OK);
- if (peek() == Token::LBRACE) {
- Target target(&this->target_stack_, &catch_collector);
- VariableMode mode = is_extended_mode() ? LET : VAR;
- catch_variable =
- catch_scope->DeclareLocal(name, mode, kCreatedInitialized);
+ Target target(&this->target_stack_, &catch_collector);
+ VariableMode mode =
+ allow_harmony_scoping() && strict_mode() == STRICT ? LET : VAR;
+ catch_variable =
+ catch_scope->DeclareLocal(name, mode, kCreatedInitialized);
- BlockState block_state(this, catch_scope);
- catch_block = ParseBlock(NULL, CHECK_OK);
- } else {
- Expect(Token::LBRACE, CHECK_OK);
- }
- catch_scope->set_end_position(scanner().location().end_pos);
+ BlockState block_state(&scope_, catch_scope);
+ catch_block = ParseBlock(NULL, CHECK_OK);
+
+ catch_scope->set_end_position(scanner()->location().end_pos);
tok = peek();
}
Block* finally_block = NULL;
- if (tok == Token::FINALLY || catch_block == NULL) {
+ ASSERT(tok == Token::FINALLY || catch_block != NULL);
+ if (tok == Token::FINALLY) {
Consume(Token::FINALLY);
finally_block = ParseBlock(NULL, CHECK_OK);
}
@@ -2524,7 +2683,7 @@ TryStatement* Parser::ParseTryStatement(bool* ok) {
if (catch_block != NULL && finally_block != NULL) {
// If we have both, create an inner try/catch.
ASSERT(catch_scope != NULL && catch_variable != NULL);
- int index = current_function_state_->NextHandlerIndex();
+ int index = function_state_->NextHandlerIndex();
TryCatchStatement* statement = factory()->NewTryCatchStatement(
index, try_block, catch_scope, catch_variable, catch_block,
RelocInfo::kNoPosition);
@@ -2538,12 +2697,12 @@ TryStatement* Parser::ParseTryStatement(bool* ok) {
if (catch_block != NULL) {
ASSERT(finally_block == NULL);
ASSERT(catch_scope != NULL && catch_variable != NULL);
- int index = current_function_state_->NextHandlerIndex();
+ int index = function_state_->NextHandlerIndex();
result = factory()->NewTryCatchStatement(
index, try_block, catch_scope, catch_variable, catch_block, pos);
} else {
ASSERT(finally_block != NULL);
- int index = current_function_state_->NextHandlerIndex();
+ int index = function_state_->NextHandlerIndex();
result = factory()->NewTryFinallyStatement(
index, try_block, finally_block, pos);
// Combine the jump targets of the try block and the possible catch block.
@@ -2623,21 +2782,46 @@ void Parser::InitializeForEachStatement(ForEachStatement* stmt,
if (for_of != NULL) {
Factory* heap_factory = isolate()->factory();
- Variable* iterator = top_scope_->DeclarationScope()->NewTemporary(
+ Variable* iterable = scope_->DeclarationScope()->NewTemporary(
+ heap_factory->dot_iterable_string());
+ Variable* iterator = scope_->DeclarationScope()->NewTemporary(
heap_factory->dot_iterator_string());
- Variable* result = top_scope_->DeclarationScope()->NewTemporary(
+ Variable* result = scope_->DeclarationScope()->NewTemporary(
heap_factory->dot_result_string());
+ Expression* assign_iterable;
Expression* assign_iterator;
Expression* next_result;
Expression* result_done;
Expression* assign_each;
- // var iterator = iterable;
+ // var iterable = subject;
{
+ Expression* iterable_proxy = factory()->NewVariableProxy(iterable);
+ assign_iterable = factory()->NewAssignment(
+ Token::ASSIGN, iterable_proxy, subject, subject->position());
+ }
+
+ // var iterator = iterable[Symbol.iterator]();
+ {
+ Expression* iterable_proxy = factory()->NewVariableProxy(iterable);
+ Handle<Symbol> iterator_symbol(
+ isolate()->native_context()->iterator_symbol(), isolate());
+ Expression* iterator_symbol_literal = factory()->NewLiteral(
+ iterator_symbol, RelocInfo::kNoPosition);
+ // FIXME(wingo): Unhappily, it will be a common error that the RHS of a
+ // for-of doesn't have a Symbol.iterator property. We should do better
+ // than informing the user that "undefined is not a function".
+ int pos = subject->position();
+ Expression* iterator_property = factory()->NewProperty(
+ iterable_proxy, iterator_symbol_literal, pos);
+ ZoneList<Expression*>* iterator_arguments =
+ new(zone()) ZoneList<Expression*>(0, zone());
+ Expression* iterator_call = factory()->NewCall(
+ iterator_property, iterator_arguments, pos);
Expression* iterator_proxy = factory()->NewVariableProxy(iterator);
assign_iterator = factory()->NewAssignment(
- Token::ASSIGN, iterator_proxy, subject, RelocInfo::kNoPosition);
+ Token::ASSIGN, iterator_proxy, iterator_call, RelocInfo::kNoPosition);
}
// var result = iterator.next();
@@ -2677,28 +2861,195 @@ void Parser::InitializeForEachStatement(ForEachStatement* stmt,
}
for_of->Initialize(each, subject, body,
- assign_iterator, next_result, result_done, assign_each);
+ assign_iterable,
+ assign_iterator,
+ next_result,
+ result_done,
+ assign_each);
} else {
stmt->Initialize(each, subject, body);
}
}
+Statement* Parser::DesugarLetBindingsInForStatement(
+ Scope* inner_scope, ZoneStringList* names, ForStatement* loop,
+ Statement* init, Expression* cond, Statement* next, Statement* body,
+ bool* ok) {
+ // ES6 13.6.3.4 specifies that on each loop iteration the let variables are
+ // copied into a new environment. After copying, the "next" statement of the
+ // loop is executed to update the loop variables. The loop condition is
+ // checked and the loop body is executed.
+ //
+ // We rewrite a for statement of the form
+ //
+ // for (let x = i; cond; next) body
+ //
+ // into
+ //
+ // {
+ // let x = i;
+ // temp_x = x;
+ // flag = 1;
+ // for (;;) {
+ // let x = temp_x;
+ // if (flag == 1) {
+ // flag = 0;
+ // } else {
+ // next;
+ // }
+ // if (cond) {
+ // <empty>
+ // } else {
+ // break;
+ // }
+ // b
+ // temp_x = x;
+ // }
+ // }
+
+ ASSERT(names->length() > 0);
+ Scope* for_scope = scope_;
+ ZoneList<Variable*> temps(names->length(), zone());
+
+ Block* outer_block = factory()->NewBlock(NULL, names->length() + 3, false,
+ RelocInfo::kNoPosition);
+ outer_block->AddStatement(init, zone());
+
+ Handle<String> temp_name = isolate()->factory()->dot_for_string();
+ Handle<Smi> smi0 = handle(Smi::FromInt(0), isolate());
+ Handle<Smi> smi1 = handle(Smi::FromInt(1), isolate());
+
+
+ // For each let variable x:
+ // make statement: temp_x = x.
+ for (int i = 0; i < names->length(); i++) {
+ VariableProxy* proxy =
+ NewUnresolved(names->at(i), LET, Interface::NewValue());
+ Variable* temp = scope_->DeclarationScope()->NewTemporary(temp_name);
+ VariableProxy* temp_proxy = factory()->NewVariableProxy(temp);
+ Assignment* assignment = factory()->NewAssignment(
+ Token::ASSIGN, temp_proxy, proxy, RelocInfo::kNoPosition);
+ Statement* assignment_statement = factory()->NewExpressionStatement(
+ assignment, RelocInfo::kNoPosition);
+ outer_block->AddStatement(assignment_statement, zone());
+ temps.Add(temp, zone());
+ }
+
+ Variable* flag = scope_->DeclarationScope()->NewTemporary(temp_name);
+ // Make statement: flag = 1.
+ {
+ VariableProxy* flag_proxy = factory()->NewVariableProxy(flag);
+ Expression* const1 = factory()->NewLiteral(smi1, RelocInfo::kNoPosition);
+ Assignment* assignment = factory()->NewAssignment(
+ Token::ASSIGN, flag_proxy, const1, RelocInfo::kNoPosition);
+ Statement* assignment_statement = factory()->NewExpressionStatement(
+ assignment, RelocInfo::kNoPosition);
+ outer_block->AddStatement(assignment_statement, zone());
+ }
+
+ outer_block->AddStatement(loop, zone());
+ outer_block->set_scope(for_scope);
+ scope_ = inner_scope;
+
+ Block* inner_block = factory()->NewBlock(NULL, 2 * names->length() + 3,
+ false, RelocInfo::kNoPosition);
+ int pos = scanner()->location().beg_pos;
+ ZoneList<Variable*> inner_vars(names->length(), zone());
+
+ // For each let variable x:
+ // make statement: let x = temp_x.
+ for (int i = 0; i < names->length(); i++) {
+ VariableProxy* proxy =
+ NewUnresolved(names->at(i), LET, Interface::NewValue());
+ Declaration* declaration =
+ factory()->NewVariableDeclaration(proxy, LET, scope_, pos);
+ Declare(declaration, true, CHECK_OK);
+ inner_vars.Add(declaration->proxy()->var(), zone());
+ VariableProxy* temp_proxy = factory()->NewVariableProxy(temps.at(i));
+ Assignment* assignment = factory()->NewAssignment(
+ Token::INIT_LET, proxy, temp_proxy, pos);
+ Statement* assignment_statement = factory()->NewExpressionStatement(
+ assignment, pos);
+ proxy->var()->set_initializer_position(pos);
+ inner_block->AddStatement(assignment_statement, zone());
+ }
+
+ // Make statement: if (flag == 1) { flag = 0; } else { next; }.
+ {
+ Expression* compare = NULL;
+ // Make compare expresion: flag == 1.
+ {
+ Expression* const1 = factory()->NewLiteral(smi1, RelocInfo::kNoPosition);
+ VariableProxy* flag_proxy = factory()->NewVariableProxy(flag);
+ compare = factory()->NewCompareOperation(
+ Token::EQ, flag_proxy, const1, pos);
+ }
+ Statement* clear_flag = NULL;
+ // Make statement: flag = 0.
+ {
+ VariableProxy* flag_proxy = factory()->NewVariableProxy(flag);
+ Expression* const0 = factory()->NewLiteral(smi0, RelocInfo::kNoPosition);
+ Assignment* assignment = factory()->NewAssignment(
+ Token::ASSIGN, flag_proxy, const0, RelocInfo::kNoPosition);
+ clear_flag = factory()->NewExpressionStatement(assignment, pos);
+ }
+ Statement* clear_flag_or_next = factory()->NewIfStatement(
+ compare, clear_flag, next, RelocInfo::kNoPosition);
+ inner_block->AddStatement(clear_flag_or_next, zone());
+ }
+
+
+ // Make statement: if (cond) { } else { break; }.
+ {
+ Statement* empty = factory()->NewEmptyStatement(RelocInfo::kNoPosition);
+ BreakableStatement* t = LookupBreakTarget(Handle<String>(), CHECK_OK);
+ Statement* stop = factory()->NewBreakStatement(t, RelocInfo::kNoPosition);
+ Statement* if_not_cond_break = factory()->NewIfStatement(
+ cond, empty, stop, cond->position());
+ inner_block->AddStatement(if_not_cond_break, zone());
+ }
+
+ inner_block->AddStatement(body, zone());
+
+ // For each let variable x:
+ // make statement: temp_x = x;
+ for (int i = 0; i < names->length(); i++) {
+ VariableProxy* temp_proxy = factory()->NewVariableProxy(temps.at(i));
+ int pos = scanner()->location().end_pos;
+ VariableProxy* proxy = factory()->NewVariableProxy(inner_vars.at(i), pos);
+ Assignment* assignment = factory()->NewAssignment(
+ Token::ASSIGN, temp_proxy, proxy, RelocInfo::kNoPosition);
+ Statement* assignment_statement = factory()->NewExpressionStatement(
+ assignment, RelocInfo::kNoPosition);
+ inner_block->AddStatement(assignment_statement, zone());
+ }
+
+ inner_scope->set_end_position(scanner()->location().end_pos);
+ inner_block->set_scope(inner_scope);
+ scope_ = for_scope;
+
+ loop->Initialize(NULL, NULL, NULL, inner_block);
+ return outer_block;
+}
+
+
Statement* Parser::ParseForStatement(ZoneStringList* labels, bool* ok) {
// ForStatement ::
// 'for' '(' Expression? ';' Expression? ';' Expression? ')' Statement
int pos = peek_position();
Statement* init = NULL;
+ ZoneStringList let_bindings(1, zone());
// Create an in-between scope for let-bound iteration variables.
- Scope* saved_scope = top_scope_;
- Scope* for_scope = NewScope(top_scope_, BLOCK_SCOPE);
- top_scope_ = for_scope;
+ Scope* saved_scope = scope_;
+ Scope* for_scope = NewScope(scope_, BLOCK_SCOPE);
+ scope_ = for_scope;
Expect(Token::FOR, CHECK_OK);
Expect(Token::LPAREN, CHECK_OK);
- for_scope->set_start_position(scanner().location().beg_pos);
+ for_scope->set_start_position(scanner()->location().beg_pos);
if (peek() != Token::SEMICOLON) {
if (peek() == Token::VAR || peek() == Token::CONST) {
bool is_const = peek() == Token::CONST;
@@ -2721,15 +3072,15 @@ Statement* Parser::ParseForStatement(ZoneStringList* labels, bool* ok) {
Expect(Token::RPAREN, CHECK_OK);
VariableProxy* each =
- top_scope_->NewUnresolved(factory(), name, interface);
+ scope_->NewUnresolved(factory(), name, interface);
Statement* body = ParseStatement(NULL, CHECK_OK);
InitializeForEachStatement(loop, each, enumerable, body);
Block* result =
factory()->NewBlock(NULL, 2, false, RelocInfo::kNoPosition);
result->AddStatement(variable_statement, zone());
result->AddStatement(loop, zone());
- top_scope_ = saved_scope;
- for_scope->set_end_position(scanner().location().end_pos);
+ scope_ = saved_scope;
+ for_scope->set_end_position(scanner()->location().end_pos);
for_scope = for_scope->FinalizeBlockScope();
ASSERT(for_scope == NULL);
// Parsed for-in loop w/ variable/const declaration.
@@ -2741,8 +3092,8 @@ Statement* Parser::ParseForStatement(ZoneStringList* labels, bool* ok) {
Handle<String> name;
VariableDeclarationProperties decl_props = kHasNoInitializers;
Block* variable_statement =
- ParseVariableDeclarations(kForStatement, &decl_props, NULL, &name,
- CHECK_OK);
+ ParseVariableDeclarations(kForStatement, &decl_props, &let_bindings,
+ &name, CHECK_OK);
bool accept_IN = !name.is_null() && decl_props != kHasInitializers;
bool accept_OF = decl_props == kHasNoInitializers;
ForEachStatement::VisitMode mode;
@@ -2764,23 +3115,26 @@ Statement* Parser::ParseForStatement(ZoneStringList* labels, bool* ok) {
// TODO(keuchel): Move the temporary variable to the block scope, after
// implementing stack allocated block scoped variables.
Factory* heap_factory = isolate()->factory();
- Handle<String> tempstr =
- heap_factory->NewConsString(heap_factory->dot_for_string(), name);
+ Handle<String> tempstr;
+ ASSIGN_RETURN_ON_EXCEPTION_VALUE(
+ isolate(), tempstr,
+ heap_factory->NewConsString(heap_factory->dot_for_string(), name),
+ 0);
Handle<String> tempname = heap_factory->InternalizeString(tempstr);
- Variable* temp = top_scope_->DeclarationScope()->NewTemporary(tempname);
+ Variable* temp = scope_->DeclarationScope()->NewTemporary(tempname);
VariableProxy* temp_proxy = factory()->NewVariableProxy(temp);
ForEachStatement* loop =
factory()->NewForEachStatement(mode, labels, pos);
Target target(&this->target_stack_, loop);
// The expression does not see the loop variable.
- top_scope_ = saved_scope;
+ scope_ = saved_scope;
Expression* enumerable = ParseExpression(true, CHECK_OK);
- top_scope_ = for_scope;
+ scope_ = for_scope;
Expect(Token::RPAREN, CHECK_OK);
VariableProxy* each =
- top_scope_->NewUnresolved(factory(), name, Interface::NewValue());
+ scope_->NewUnresolved(factory(), name, Interface::NewValue());
Statement* body = ParseStatement(NULL, CHECK_OK);
Block* body_block =
factory()->NewBlock(NULL, 3, false, RelocInfo::kNoPosition);
@@ -2792,8 +3146,8 @@ Statement* Parser::ParseForStatement(ZoneStringList* labels, bool* ok) {
body_block->AddStatement(assignment_statement, zone());
body_block->AddStatement(body, zone());
InitializeForEachStatement(loop, temp_proxy, enumerable, body_block);
- top_scope_ = saved_scope;
- for_scope->set_end_position(scanner().location().end_pos);
+ scope_ = saved_scope;
+ for_scope->set_end_position(scanner()->location().end_pos);
for_scope = for_scope->FinalizeBlockScope();
body_block->set_scope(for_scope);
// Parsed for-in loop w/ let declaration.
@@ -2803,20 +3157,15 @@ Statement* Parser::ParseForStatement(ZoneStringList* labels, bool* ok) {
init = variable_statement;
}
} else {
+ Scanner::Location lhs_location = scanner()->peek_location();
Expression* expression = ParseExpression(false, CHECK_OK);
ForEachStatement::VisitMode mode;
bool accept_OF = expression->AsVariableProxy();
if (CheckInOrOf(accept_OF, &mode)) {
- // Signal a reference error if the expression is an invalid
- // left-hand side expression. We could report this as a syntax
- // error here but for compatibility with JSC we choose to report
- // the error at runtime.
- if (expression == NULL || !expression->IsValidLeftHandSide()) {
- Handle<String> message =
- isolate()->factory()->invalid_lhs_in_for_in_string();
- expression = NewThrowReferenceError(message);
- }
+ expression = this->CheckAndRewriteReferenceExpression(
+ expression, lhs_location, "invalid_lhs_in_for", CHECK_OK);
+
ForEachStatement* loop =
factory()->NewForEachStatement(mode, labels, pos);
Target target(&this->target_stack_, loop);
@@ -2826,8 +3175,8 @@ Statement* Parser::ParseForStatement(ZoneStringList* labels, bool* ok) {
Statement* body = ParseStatement(NULL, CHECK_OK);
InitializeForEachStatement(loop, expression, enumerable, body);
- top_scope_ = saved_scope;
- for_scope->set_end_position(scanner().location().end_pos);
+ scope_ = saved_scope;
+ for_scope->set_end_position(scanner()->location().end_pos);
for_scope = for_scope->FinalizeBlockScope();
ASSERT(for_scope == NULL);
// Parsed for-in loop.
@@ -2847,6 +3196,15 @@ Statement* Parser::ParseForStatement(ZoneStringList* labels, bool* ok) {
// Parsed initializer at this point.
Expect(Token::SEMICOLON, CHECK_OK);
+ // If there are let bindings, then condition and the next statement of the
+ // for loop must be parsed in a new scope.
+ Scope* inner_scope = NULL;
+ if (let_bindings.length() > 0) {
+ inner_scope = NewScope(for_scope, BLOCK_SCOPE);
+ inner_scope->set_start_position(scanner()->location().beg_pos);
+ scope_ = inner_scope;
+ }
+
Expression* cond = NULL;
if (peek() != Token::SEMICOLON) {
cond = ParseExpression(true, CHECK_OK);
@@ -2861,606 +3219,25 @@ Statement* Parser::ParseForStatement(ZoneStringList* labels, bool* ok) {
Expect(Token::RPAREN, CHECK_OK);
Statement* body = ParseStatement(NULL, CHECK_OK);
- top_scope_ = saved_scope;
- for_scope->set_end_position(scanner().location().end_pos);
- for_scope = for_scope->FinalizeBlockScope();
- if (for_scope != NULL) {
- // Rewrite a for statement of the form
- //
- // for (let x = i; c; n) b
- //
- // into
- //
- // {
- // let x = i;
- // for (; c; n) b
- // }
- ASSERT(init != NULL);
- Block* result = factory()->NewBlock(NULL, 2, false, RelocInfo::kNoPosition);
- result->AddStatement(init, zone());
- result->AddStatement(loop, zone());
- result->set_scope(for_scope);
- loop->Initialize(NULL, cond, next, body);
- return result;
- } else {
- loop->Initialize(init, cond, next, body);
- return loop;
- }
-}
-
-
-// Precedence = 1
-Expression* Parser::ParseExpression(bool accept_IN, bool* ok) {
- // Expression ::
- // AssignmentExpression
- // Expression ',' AssignmentExpression
-
- Expression* result = ParseAssignmentExpression(accept_IN, CHECK_OK);
- while (peek() == Token::COMMA) {
- Expect(Token::COMMA, CHECK_OK);
- int pos = position();
- Expression* right = ParseAssignmentExpression(accept_IN, CHECK_OK);
- result = factory()->NewBinaryOperation(Token::COMMA, result, right, pos);
- }
- return result;
-}
-
-
-// Precedence = 2
-Expression* Parser::ParseAssignmentExpression(bool accept_IN, bool* ok) {
- // AssignmentExpression ::
- // ConditionalExpression
- // YieldExpression
- // LeftHandSideExpression AssignmentOperator AssignmentExpression
-
- if (peek() == Token::YIELD && is_generator()) {
- return ParseYieldExpression(ok);
- }
-
- if (fni_ != NULL) fni_->Enter();
- Expression* expression = ParseConditionalExpression(accept_IN, CHECK_OK);
-
- if (!Token::IsAssignmentOp(peek())) {
- if (fni_ != NULL) fni_->Leave();
- // Parsed conditional expression only (no assignment).
- return expression;
- }
-
- // Signal a reference error if the expression is an invalid left-hand
- // side expression. We could report this as a syntax error here but
- // for compatibility with JSC we choose to report the error at
- // runtime.
- // TODO(ES5): Should change parsing for spec conformance.
- if (expression == NULL || !expression->IsValidLeftHandSide()) {
- Handle<String> message =
- isolate()->factory()->invalid_lhs_in_assignment_string();
- expression = NewThrowReferenceError(message);
- }
-
- if (!top_scope_->is_classic_mode()) {
- // Assignment to eval or arguments is disallowed in strict mode.
- CheckStrictModeLValue(expression, "strict_lhs_assignment", CHECK_OK);
- }
- MarkAsLValue(expression);
-
- Token::Value op = Next(); // Get assignment operator.
- int pos = position();
- Expression* right = ParseAssignmentExpression(accept_IN, CHECK_OK);
-
- // TODO(1231235): We try to estimate the set of properties set by
- // constructors. We define a new property whenever there is an
- // assignment to a property of 'this'. We should probably only add
- // properties if we haven't seen them before. Otherwise we'll
- // probably overestimate the number of properties.
- Property* property = expression ? expression->AsProperty() : NULL;
- if (op == Token::ASSIGN &&
- property != NULL &&
- property->obj()->AsVariableProxy() != NULL &&
- property->obj()->AsVariableProxy()->is_this()) {
- current_function_state_->AddProperty();
- }
-
- // If we assign a function literal to a property we pretenure the
- // literal so it can be added as a constant function property.
- if (property != NULL && right->AsFunctionLiteral() != NULL) {
- right->AsFunctionLiteral()->set_pretenure();
- }
-
- if (fni_ != NULL) {
- // Check if the right hand side is a call to avoid inferring a
- // name if we're dealing with "a = function(){...}();"-like
- // expression.
- if ((op == Token::INIT_VAR
- || op == Token::INIT_CONST
- || op == Token::ASSIGN)
- && (right->AsCall() == NULL && right->AsCallNew() == NULL)) {
- fni_->Infer();
- } else {
- fni_->RemoveLastFunction();
- }
- fni_->Leave();
- }
-
- return factory()->NewAssignment(op, expression, right, pos);
-}
-
-Expression* Parser::ParseYieldExpression(bool* ok) {
- // YieldExpression ::
- // 'yield' '*'? AssignmentExpression
- int pos = peek_position();
- Expect(Token::YIELD, CHECK_OK);
- Yield::Kind kind =
- Check(Token::MUL) ? Yield::DELEGATING : Yield::SUSPEND;
- Expression* generator_object = factory()->NewVariableProxy(
- current_function_state_->generator_object_variable());
- Expression* expression = ParseAssignmentExpression(false, CHECK_OK);
- Yield* yield = factory()->NewYield(generator_object, expression, kind, pos);
- if (kind == Yield::DELEGATING) {
- yield->set_index(current_function_state_->NextHandlerIndex());
- }
- return yield;
-}
-
-
-// Precedence = 3
-Expression* Parser::ParseConditionalExpression(bool accept_IN, bool* ok) {
- // ConditionalExpression ::
- // LogicalOrExpression
- // LogicalOrExpression '?' AssignmentExpression ':' AssignmentExpression
-
- int pos = peek_position();
- // We start using the binary expression parser for prec >= 4 only!
- Expression* expression = ParseBinaryExpression(4, accept_IN, CHECK_OK);
- if (peek() != Token::CONDITIONAL) return expression;
- Consume(Token::CONDITIONAL);
- // In parsing the first assignment expression in conditional
- // expressions we always accept the 'in' keyword; see ECMA-262,
- // section 11.12, page 58.
- Expression* left = ParseAssignmentExpression(true, CHECK_OK);
- Expect(Token::COLON, CHECK_OK);
- Expression* right = ParseAssignmentExpression(accept_IN, CHECK_OK);
- return factory()->NewConditional(expression, left, right, pos);
-}
-
-
-int ParserBase::Precedence(Token::Value tok, bool accept_IN) {
- if (tok == Token::IN && !accept_IN)
- return 0; // 0 precedence will terminate binary expression parsing
-
- return Token::Precedence(tok);
-}
-
-
-// Precedence >= 4
-Expression* Parser::ParseBinaryExpression(int prec, bool accept_IN, bool* ok) {
- ASSERT(prec >= 4);
- Expression* x = ParseUnaryExpression(CHECK_OK);
- for (int prec1 = Precedence(peek(), accept_IN); prec1 >= prec; prec1--) {
- // prec1 >= 4
- while (Precedence(peek(), accept_IN) == prec1) {
- Token::Value op = Next();
- int pos = position();
- Expression* y = ParseBinaryExpression(prec1 + 1, accept_IN, CHECK_OK);
-
- // Compute some expressions involving only number literals.
- if (x && x->AsLiteral() && x->AsLiteral()->value()->IsNumber() &&
- y && y->AsLiteral() && y->AsLiteral()->value()->IsNumber()) {
- double x_val = x->AsLiteral()->value()->Number();
- double y_val = y->AsLiteral()->value()->Number();
-
- switch (op) {
- case Token::ADD:
- x = factory()->NewNumberLiteral(x_val + y_val, pos);
- continue;
- case Token::SUB:
- x = factory()->NewNumberLiteral(x_val - y_val, pos);
- continue;
- case Token::MUL:
- x = factory()->NewNumberLiteral(x_val * y_val, pos);
- continue;
- case Token::DIV:
- x = factory()->NewNumberLiteral(x_val / y_val, pos);
- continue;
- case Token::BIT_OR: {
- int value = DoubleToInt32(x_val) | DoubleToInt32(y_val);
- x = factory()->NewNumberLiteral(value, pos);
- continue;
- }
- case Token::BIT_AND: {
- int value = DoubleToInt32(x_val) & DoubleToInt32(y_val);
- x = factory()->NewNumberLiteral(value, pos);
- continue;
- }
- case Token::BIT_XOR: {
- int value = DoubleToInt32(x_val) ^ DoubleToInt32(y_val);
- x = factory()->NewNumberLiteral(value, pos);
- continue;
- }
- case Token::SHL: {
- int value = DoubleToInt32(x_val) << (DoubleToInt32(y_val) & 0x1f);
- x = factory()->NewNumberLiteral(value, pos);
- continue;
- }
- case Token::SHR: {
- uint32_t shift = DoubleToInt32(y_val) & 0x1f;
- uint32_t value = DoubleToUint32(x_val) >> shift;
- x = factory()->NewNumberLiteral(value, pos);
- continue;
- }
- case Token::SAR: {
- uint32_t shift = DoubleToInt32(y_val) & 0x1f;
- int value = ArithmeticShiftRight(DoubleToInt32(x_val), shift);
- x = factory()->NewNumberLiteral(value, pos);
- continue;
- }
- default:
- break;
- }
- }
-
- // For now we distinguish between comparisons and other binary
- // operations. (We could combine the two and get rid of this
- // code and AST node eventually.)
- if (Token::IsCompareOp(op)) {
- // We have a comparison.
- Token::Value cmp = op;
- switch (op) {
- case Token::NE: cmp = Token::EQ; break;
- case Token::NE_STRICT: cmp = Token::EQ_STRICT; break;
- default: break;
- }
- x = factory()->NewCompareOperation(cmp, x, y, pos);
- if (cmp != op) {
- // The comparison was negated - add a NOT.
- x = factory()->NewUnaryOperation(Token::NOT, x, pos);
- }
-
- } else {
- // We have a "normal" binary operation.
- x = factory()->NewBinaryOperation(op, x, y, pos);
- }
- }
- }
- return x;
-}
-
-
-Expression* Parser::ParseUnaryExpression(bool* ok) {
- // UnaryExpression ::
- // PostfixExpression
- // 'delete' UnaryExpression
- // 'void' UnaryExpression
- // 'typeof' UnaryExpression
- // '++' UnaryExpression
- // '--' UnaryExpression
- // '+' UnaryExpression
- // '-' UnaryExpression
- // '~' UnaryExpression
- // '!' UnaryExpression
-
- Token::Value op = peek();
- if (Token::IsUnaryOp(op)) {
- op = Next();
- int pos = position();
- Expression* expression = ParseUnaryExpression(CHECK_OK);
-
- if (expression != NULL && (expression->AsLiteral() != NULL)) {
- Handle<Object> literal = expression->AsLiteral()->value();
- if (op == Token::NOT) {
- // Convert the literal to a boolean condition and negate it.
- bool condition = literal->BooleanValue();
- Handle<Object> result = isolate()->factory()->ToBoolean(!condition);
- return factory()->NewLiteral(result, pos);
- } else if (literal->IsNumber()) {
- // Compute some expressions involving only number literals.
- double value = literal->Number();
- switch (op) {
- case Token::ADD:
- return expression;
- case Token::SUB:
- return factory()->NewNumberLiteral(-value, pos);
- case Token::BIT_NOT:
- return factory()->NewNumberLiteral(~DoubleToInt32(value), pos);
- default:
- break;
- }
- }
- }
-
- // "delete identifier" is a syntax error in strict mode.
- if (op == Token::DELETE && !top_scope_->is_classic_mode()) {
- VariableProxy* operand = expression->AsVariableProxy();
- if (operand != NULL && !operand->is_this()) {
- ReportMessage("strict_delete", Vector<const char*>::empty());
- *ok = false;
- return NULL;
- }
- }
-
- // Desugar '+foo' into 'foo*1', this enables the collection of type feedback
- // without any special stub and the multiplication is removed later in
- // Crankshaft's canonicalization pass.
- if (op == Token::ADD) {
- return factory()->NewBinaryOperation(Token::MUL,
- expression,
- factory()->NewNumberLiteral(1, pos),
- pos);
- }
- // The same idea for '-foo' => 'foo*(-1)'.
- if (op == Token::SUB) {
- return factory()->NewBinaryOperation(Token::MUL,
- expression,
- factory()->NewNumberLiteral(-1, pos),
- pos);
- }
- // ...and one more time for '~foo' => 'foo^(~0)'.
- if (op == Token::BIT_NOT) {
- return factory()->NewBinaryOperation(Token::BIT_XOR,
- expression,
- factory()->NewNumberLiteral(~0, pos),
- pos);
- }
-
- return factory()->NewUnaryOperation(op, expression, pos);
-
- } else if (Token::IsCountOp(op)) {
- op = Next();
- Expression* expression = ParseUnaryExpression(CHECK_OK);
- // Signal a reference error if the expression is an invalid
- // left-hand side expression. We could report this as a syntax
- // error here but for compatibility with JSC we choose to report the
- // error at runtime.
- if (expression == NULL || !expression->IsValidLeftHandSide()) {
- Handle<String> message =
- isolate()->factory()->invalid_lhs_in_prefix_op_string();
- expression = NewThrowReferenceError(message);
- }
-
- if (!top_scope_->is_classic_mode()) {
- // Prefix expression operand in strict mode may not be eval or arguments.
- CheckStrictModeLValue(expression, "strict_lhs_prefix", CHECK_OK);
- }
- MarkAsLValue(expression);
-
- return factory()->NewCountOperation(op,
- true /* prefix */,
- expression,
- position());
-
- } else {
- return ParsePostfixExpression(ok);
- }
-}
-
-
-Expression* Parser::ParsePostfixExpression(bool* ok) {
- // PostfixExpression ::
- // LeftHandSideExpression ('++' | '--')?
-
- Expression* expression = ParseLeftHandSideExpression(CHECK_OK);
- if (!scanner().HasAnyLineTerminatorBeforeNext() &&
- Token::IsCountOp(peek())) {
- // Signal a reference error if the expression is an invalid
- // left-hand side expression. We could report this as a syntax
- // error here but for compatibility with JSC we choose to report the
- // error at runtime.
- if (expression == NULL || !expression->IsValidLeftHandSide()) {
- Handle<String> message =
- isolate()->factory()->invalid_lhs_in_postfix_op_string();
- expression = NewThrowReferenceError(message);
- }
-
- if (!top_scope_->is_classic_mode()) {
- // Postfix expression operand in strict mode may not be eval or arguments.
- CheckStrictModeLValue(expression, "strict_lhs_prefix", CHECK_OK);
- }
- MarkAsLValue(expression);
-
- Token::Value next = Next();
- expression =
- factory()->NewCountOperation(next,
- false /* postfix */,
- expression,
- position());
- }
- return expression;
-}
-
-
-Expression* Parser::ParseLeftHandSideExpression(bool* ok) {
- // LeftHandSideExpression ::
- // (NewExpression | MemberExpression) ...
-
- Expression* result;
- if (peek() == Token::NEW) {
- result = ParseNewExpression(CHECK_OK);
- } else {
- result = ParseMemberExpression(CHECK_OK);
- }
-
- while (true) {
- switch (peek()) {
- case Token::LBRACK: {
- Consume(Token::LBRACK);
- int pos = position();
- Expression* index = ParseExpression(true, CHECK_OK);
- result = factory()->NewProperty(result, index, pos);
- Expect(Token::RBRACK, CHECK_OK);
- break;
- }
-
- case Token::LPAREN: {
- int pos;
- if (scanner().current_token() == Token::IDENTIFIER) {
- // For call of an identifier we want to report position of
- // the identifier as position of the call in the stack trace.
- pos = position();
- } else {
- // For other kinds of calls we record position of the parenthesis as
- // position of the call. Note that this is extremely important for
- // expressions of the form function(){...}() for which call position
- // should not point to the closing brace otherwise it will intersect
- // with positions recorded for function literal and confuse debugger.
- pos = peek_position();
- // Also the trailing parenthesis are a hint that the function will
- // be called immediately. If we happen to have parsed a preceding
- // function literal eagerly, we can also compile it eagerly.
- if (result->IsFunctionLiteral() && mode() == PARSE_EAGERLY) {
- result->AsFunctionLiteral()->set_parenthesized();
- }
- }
- ZoneList<Expression*>* args = ParseArguments(CHECK_OK);
-
- // Keep track of eval() calls since they disable all local variable
- // optimizations.
- // The calls that need special treatment are the
- // direct eval calls. These calls are all of the form eval(...), with
- // no explicit receiver.
- // These calls are marked as potentially direct eval calls. Whether
- // they are actually direct calls to eval is determined at run time.
- VariableProxy* callee = result->AsVariableProxy();
- if (callee != NULL &&
- callee->IsVariable(isolate()->factory()->eval_string())) {
- top_scope_->DeclarationScope()->RecordEvalCall();
- }
- result = factory()->NewCall(result, args, pos);
- if (fni_ != NULL) fni_->RemoveLastFunction();
- break;
- }
-
- case Token::PERIOD: {
- Consume(Token::PERIOD);
- int pos = position();
- Handle<String> name = ParseIdentifierName(CHECK_OK);
- result = factory()->NewProperty(
- result, factory()->NewLiteral(name, pos), pos);
- if (fni_ != NULL) fni_->PushLiteralName(name);
- break;
- }
-
- default:
- return result;
- }
- }
-}
-
-
-Expression* Parser::ParseNewPrefix(PositionStack* stack, bool* ok) {
- // NewExpression ::
- // ('new')+ MemberExpression
-
- // The grammar for new expressions is pretty warped. The keyword
- // 'new' can either be a part of the new expression (where it isn't
- // followed by an argument list) or a part of the member expression,
- // where it must be followed by an argument list. To accommodate
- // this, we parse the 'new' keywords greedily and keep track of how
- // many we have parsed. This information is then passed on to the
- // member expression parser, which is only allowed to match argument
- // lists as long as it has 'new' prefixes left
- Expect(Token::NEW, CHECK_OK);
- PositionStack::Element pos(stack, position());
-
- Expression* result;
- if (peek() == Token::NEW) {
- result = ParseNewPrefix(stack, CHECK_OK);
+ Statement* result = NULL;
+ if (let_bindings.length() > 0) {
+ scope_ = for_scope;
+ result = DesugarLetBindingsInForStatement(inner_scope, &let_bindings, loop,
+ init, cond, next, body, CHECK_OK);
+ scope_ = saved_scope;
+ for_scope->set_end_position(scanner()->location().end_pos);
} else {
- result = ParseMemberWithNewPrefixesExpression(stack, CHECK_OK);
- }
-
- if (!stack->is_empty()) {
- int last = stack->pop();
- result = factory()->NewCallNew(
- result, new(zone()) ZoneList<Expression*>(0, zone()), last);
+ loop->Initialize(init, cond, next, body);
+ result = loop;
+ scope_ = saved_scope;
+ for_scope->set_end_position(scanner()->location().end_pos);
+ for_scope->FinalizeBlockScope();
}
return result;
}
-Expression* Parser::ParseNewExpression(bool* ok) {
- PositionStack stack(ok);
- return ParseNewPrefix(&stack, ok);
-}
-
-
-Expression* Parser::ParseMemberExpression(bool* ok) {
- return ParseMemberWithNewPrefixesExpression(NULL, ok);
-}
-
-
-Expression* Parser::ParseMemberWithNewPrefixesExpression(PositionStack* stack,
- bool* ok) {
- // MemberExpression ::
- // (PrimaryExpression | FunctionLiteral)
- // ('[' Expression ']' | '.' Identifier | Arguments)*
-
- // Parse the initial primary or function expression.
- Expression* result = NULL;
- if (peek() == Token::FUNCTION) {
- Expect(Token::FUNCTION, CHECK_OK);
- int function_token_position = position();
- bool is_generator = allow_generators() && Check(Token::MUL);
- Handle<String> name;
- bool is_strict_reserved_name = false;
- if (peek_any_identifier()) {
- name = ParseIdentifierOrStrictReservedWord(&is_strict_reserved_name,
- CHECK_OK);
- }
- FunctionLiteral::FunctionType function_type = name.is_null()
- ? FunctionLiteral::ANONYMOUS_EXPRESSION
- : FunctionLiteral::NAMED_EXPRESSION;
- result = ParseFunctionLiteral(name,
- is_strict_reserved_name,
- is_generator,
- function_token_position,
- function_type,
- CHECK_OK);
- } else {
- result = ParsePrimaryExpression(CHECK_OK);
- }
-
- while (true) {
- switch (peek()) {
- case Token::LBRACK: {
- Consume(Token::LBRACK);
- int pos = position();
- Expression* index = ParseExpression(true, CHECK_OK);
- result = factory()->NewProperty(result, index, pos);
- if (fni_ != NULL) {
- if (index->IsPropertyName()) {
- fni_->PushLiteralName(index->AsLiteral()->AsPropertyName());
- } else {
- fni_->PushLiteralName(
- isolate()->factory()->anonymous_function_string());
- }
- }
- Expect(Token::RBRACK, CHECK_OK);
- break;
- }
- case Token::PERIOD: {
- Consume(Token::PERIOD);
- int pos = position();
- Handle<String> name = ParseIdentifierName(CHECK_OK);
- result = factory()->NewProperty(
- result, factory()->NewLiteral(name, pos), pos);
- if (fni_ != NULL) fni_->PushLiteralName(name);
- break;
- }
- case Token::LPAREN: {
- if ((stack == NULL) || stack->is_empty()) return result;
- // Consume one of the new prefixes (already parsed).
- ZoneList<Expression*>* args = ParseArguments(CHECK_OK);
- int pos = stack->pop();
- result = factory()->NewCallNew(result, args, pos);
- break;
- }
- default:
- return result;
- }
- }
-}
-
-
DebuggerStatement* Parser::ParseDebuggerStatement(bool* ok) {
// In ECMA-262 'debugger' is defined as a reserved keyword. In some browser
// contexts this is used as a statement which invokes the debugger as i a
@@ -3475,197 +3252,14 @@ DebuggerStatement* Parser::ParseDebuggerStatement(bool* ok) {
}
-void Parser::ReportUnexpectedToken(Token::Value token) {
- // We don't report stack overflows here, to avoid increasing the
- // stack depth even further. Instead we report it after parsing is
- // over, in ParseProgram/ParseJson.
- if (token == Token::ILLEGAL && stack_overflow()) return;
- // Four of the tokens are treated specially
- switch (token) {
- case Token::EOS:
- return ReportMessage("unexpected_eos", Vector<const char*>::empty());
- case Token::NUMBER:
- return ReportMessage("unexpected_token_number",
- Vector<const char*>::empty());
- case Token::STRING:
- return ReportMessage("unexpected_token_string",
- Vector<const char*>::empty());
- case Token::IDENTIFIER:
- return ReportMessage("unexpected_token_identifier",
- Vector<const char*>::empty());
- case Token::FUTURE_RESERVED_WORD:
- return ReportMessage("unexpected_reserved",
- Vector<const char*>::empty());
- case Token::YIELD:
- case Token::FUTURE_STRICT_RESERVED_WORD:
- return ReportMessage(top_scope_->is_classic_mode() ?
- "unexpected_token_identifier" :
- "unexpected_strict_reserved",
- Vector<const char*>::empty());
- default:
- const char* name = Token::String(token);
- ASSERT(name != NULL);
- ReportMessage("unexpected_token", Vector<const char*>(&name, 1));
- }
-}
-
-
-void Parser::ReportInvalidPreparseData(Handle<String> name, bool* ok) {
- SmartArrayPointer<char> name_string = name->ToCString(DISALLOW_NULLS);
- const char* element[1] = { *name_string };
- ReportMessage("invalid_preparser_data",
- Vector<const char*>(element, 1));
+void Parser::ReportInvalidCachedData(Handle<String> name, bool* ok) {
+ ParserTraits::ReportMessage("invalid_cached_data_function", name);
*ok = false;
}
-Expression* Parser::ParsePrimaryExpression(bool* ok) {
- // PrimaryExpression ::
- // 'this'
- // 'null'
- // 'true'
- // 'false'
- // Identifier
- // Number
- // String
- // ArrayLiteral
- // ObjectLiteral
- // RegExpLiteral
- // '(' Expression ')'
-
- int pos = peek_position();
- Expression* result = NULL;
- switch (peek()) {
- case Token::THIS: {
- Consume(Token::THIS);
- result = factory()->NewVariableProxy(top_scope_->receiver());
- break;
- }
-
- case Token::NULL_LITERAL:
- Consume(Token::NULL_LITERAL);
- result = factory()->NewLiteral(isolate()->factory()->null_value(), pos);
- break;
-
- case Token::TRUE_LITERAL:
- Consume(Token::TRUE_LITERAL);
- result = factory()->NewLiteral(isolate()->factory()->true_value(), pos);
- break;
-
- case Token::FALSE_LITERAL:
- Consume(Token::FALSE_LITERAL);
- result = factory()->NewLiteral(isolate()->factory()->false_value(), pos);
- break;
-
- case Token::IDENTIFIER:
- case Token::YIELD:
- case Token::FUTURE_STRICT_RESERVED_WORD: {
- Handle<String> name = ParseIdentifier(CHECK_OK);
- if (fni_ != NULL) fni_->PushVariableName(name);
- // The name may refer to a module instance object, so its type is unknown.
-#ifdef DEBUG
- if (FLAG_print_interface_details)
- PrintF("# Variable %s ", name->ToAsciiArray());
-#endif
- Interface* interface = Interface::NewUnknown(zone());
- result = top_scope_->NewUnresolved(factory(), name, interface, pos);
- break;
- }
-
- case Token::NUMBER: {
- Consume(Token::NUMBER);
- ASSERT(scanner().is_literal_ascii());
- double value = StringToDouble(isolate()->unicode_cache(),
- scanner().literal_ascii_string(),
- ALLOW_HEX | ALLOW_OCTAL |
- ALLOW_IMPLICIT_OCTAL | ALLOW_BINARY);
- result = factory()->NewNumberLiteral(value, pos);
- break;
- }
-
- case Token::STRING: {
- Consume(Token::STRING);
- Handle<String> symbol = GetSymbol();
- result = factory()->NewLiteral(symbol, pos);
- if (fni_ != NULL) fni_->PushLiteralName(symbol);
- break;
- }
-
- case Token::ASSIGN_DIV:
- result = ParseRegExpLiteral(true, CHECK_OK);
- break;
-
- case Token::DIV:
- result = ParseRegExpLiteral(false, CHECK_OK);
- break;
-
- case Token::LBRACK:
- result = ParseArrayLiteral(CHECK_OK);
- break;
-
- case Token::LBRACE:
- result = ParseObjectLiteral(CHECK_OK);
- break;
-
- case Token::LPAREN:
- Consume(Token::LPAREN);
- // Heuristically try to detect immediately called functions before
- // seeing the call parentheses.
- parenthesized_function_ = (peek() == Token::FUNCTION);
- result = ParseExpression(true, CHECK_OK);
- Expect(Token::RPAREN, CHECK_OK);
- break;
-
- case Token::MOD:
- if (allow_natives_syntax() || extension_ != NULL) {
- result = ParseV8Intrinsic(CHECK_OK);
- break;
- }
- // If we're not allowing special syntax we fall-through to the
- // default case.
-
- default: {
- Token::Value tok = Next();
- ReportUnexpectedToken(tok);
- *ok = false;
- return NULL;
- }
- }
-
- return result;
-}
-
-
-Expression* Parser::ParseArrayLiteral(bool* ok) {
- // ArrayLiteral ::
- // '[' Expression? (',' Expression?)* ']'
-
- int pos = peek_position();
- ZoneList<Expression*>* values = new(zone()) ZoneList<Expression*>(4, zone());
- Expect(Token::LBRACK, CHECK_OK);
- while (peek() != Token::RBRACK) {
- Expression* elem;
- if (peek() == Token::COMMA) {
- elem = GetLiteralTheHole(peek_position());
- } else {
- elem = ParseAssignmentExpression(true, CHECK_OK);
- }
- values->Add(elem, zone());
- if (peek() != Token::RBRACK) {
- Expect(Token::COMMA, CHECK_OK);
- }
- }
- Expect(Token::RBRACK, CHECK_OK);
-
- // Update the scope information before the pre-parsing bailout.
- int literal_index = current_function_state_->NextMaterializedLiteralIndex();
-
- return factory()->NewArrayLiteral(values, literal_index, pos);
-}
-
-
bool CompileTimeValue::IsCompileTimeValue(Expression* expression) {
- if (expression->AsLiteral() != NULL) return true;
+ if (expression->IsLiteral()) return true;
MaterializedLiteral* lit = expression->AsMaterializedLiteral();
return lit != NULL && lit->is_simple();
}
@@ -3707,318 +3301,23 @@ Handle<FixedArray> CompileTimeValue::GetElements(Handle<FixedArray> value) {
}
-Expression* Parser::ParseObjectLiteral(bool* ok) {
- // ObjectLiteral ::
- // '{' (
- // ((IdentifierName | String | Number) ':' AssignmentExpression)
- // | (('get' | 'set') (IdentifierName | String | Number) FunctionLiteral)
- // )*[','] '}'
-
- int pos = peek_position();
- ZoneList<ObjectLiteral::Property*>* properties =
- new(zone()) ZoneList<ObjectLiteral::Property*>(4, zone());
- int number_of_boilerplate_properties = 0;
- bool has_function = false;
-
- ObjectLiteralChecker checker(this, top_scope_->language_mode());
-
- Expect(Token::LBRACE, CHECK_OK);
-
- while (peek() != Token::RBRACE) {
- if (fni_ != NULL) fni_->Enter();
-
- Literal* key = NULL;
- Token::Value next = peek();
- int next_pos = peek_position();
-
- switch (next) {
- case Token::FUTURE_RESERVED_WORD:
- case Token::FUTURE_STRICT_RESERVED_WORD:
- case Token::IDENTIFIER: {
- bool is_getter = false;
- bool is_setter = false;
- Handle<String> id =
- ParseIdentifierNameOrGetOrSet(&is_getter, &is_setter, CHECK_OK);
- if (fni_ != NULL) fni_->PushLiteralName(id);
-
- if ((is_getter || is_setter) && peek() != Token::COLON) {
- // Special handling of getter and setter syntax:
- // { ... , get foo() { ... }, ... , set foo(v) { ... v ... } , ... }
- // We have already read the "get" or "set" keyword.
- Token::Value next = Next();
- bool is_keyword = Token::IsKeyword(next);
- if (next != i::Token::IDENTIFIER &&
- next != i::Token::FUTURE_RESERVED_WORD &&
- next != i::Token::FUTURE_STRICT_RESERVED_WORD &&
- next != i::Token::NUMBER &&
- next != i::Token::STRING &&
- !is_keyword) {
- // Unexpected token.
- ReportUnexpectedToken(next);
- *ok = false;
- return NULL;
- }
- // Validate the property.
- PropertyKind type = is_getter ? kGetterProperty : kSetterProperty;
- checker.CheckProperty(next, type, CHECK_OK);
- Handle<String> name = is_keyword
- ? isolate_->factory()->InternalizeUtf8String(Token::String(next))
- : GetSymbol();
- FunctionLiteral* value =
- ParseFunctionLiteral(name,
- false, // reserved words are allowed here
- false, // not a generator
- RelocInfo::kNoPosition,
- FunctionLiteral::ANONYMOUS_EXPRESSION,
- CHECK_OK);
- // Allow any number of parameters for compatibilty with JSC.
- // Specification only allows zero parameters for get and one for set.
- ObjectLiteral::Property* property =
- factory()->NewObjectLiteralProperty(is_getter, value, next_pos);
- if (ObjectLiteral::IsBoilerplateProperty(property)) {
- number_of_boilerplate_properties++;
- }
- properties->Add(property, zone());
- if (peek() != Token::RBRACE) Expect(Token::COMMA, CHECK_OK);
-
- if (fni_ != NULL) {
- fni_->Infer();
- fni_->Leave();
- }
- continue; // restart the while
- }
- // Failed to parse as get/set property, so it's just a property
- // called "get" or "set".
- key = factory()->NewLiteral(id, next_pos);
- break;
- }
- case Token::STRING: {
- Consume(Token::STRING);
- Handle<String> string = GetSymbol();
- if (fni_ != NULL) fni_->PushLiteralName(string);
- uint32_t index;
- if (!string.is_null() && string->AsArrayIndex(&index)) {
- key = factory()->NewNumberLiteral(index, next_pos);
- break;
- }
- key = factory()->NewLiteral(string, next_pos);
- break;
- }
- case Token::NUMBER: {
- Consume(Token::NUMBER);
- ASSERT(scanner().is_literal_ascii());
- double value = StringToDouble(isolate()->unicode_cache(),
- scanner().literal_ascii_string(),
- ALLOW_HEX | ALLOW_OCTAL |
- ALLOW_IMPLICIT_OCTAL | ALLOW_BINARY);
- key = factory()->NewNumberLiteral(value, next_pos);
- break;
- }
- default:
- if (Token::IsKeyword(next)) {
- Consume(next);
- Handle<String> string = GetSymbol();
- key = factory()->NewLiteral(string, next_pos);
- } else {
- // Unexpected token.
- Token::Value next = Next();
- ReportUnexpectedToken(next);
- *ok = false;
- return NULL;
- }
- }
-
- // Validate the property
- checker.CheckProperty(next, kValueProperty, CHECK_OK);
-
- Expect(Token::COLON, CHECK_OK);
- Expression* value = ParseAssignmentExpression(true, CHECK_OK);
-
- ObjectLiteral::Property* property =
- new(zone()) ObjectLiteral::Property(key, value, isolate());
-
- // Mark top-level object literals that contain function literals and
- // pretenure the literal so it can be added as a constant function
- // property.
- if (top_scope_->DeclarationScope()->is_global_scope() &&
- value->AsFunctionLiteral() != NULL) {
- has_function = true;
- value->AsFunctionLiteral()->set_pretenure();
- }
-
- // Count CONSTANT or COMPUTED properties to maintain the enumeration order.
- if (ObjectLiteral::IsBoilerplateProperty(property)) {
- number_of_boilerplate_properties++;
- }
- properties->Add(property, zone());
-
- // TODO(1240767): Consider allowing trailing comma.
- if (peek() != Token::RBRACE) Expect(Token::COMMA, CHECK_OK);
-
- if (fni_ != NULL) {
- fni_->Infer();
- fni_->Leave();
- }
- }
- Expect(Token::RBRACE, CHECK_OK);
-
- // Computation of literal_index must happen before pre parse bailout.
- int literal_index = current_function_state_->NextMaterializedLiteralIndex();
-
- return factory()->NewObjectLiteral(properties,
- literal_index,
- number_of_boilerplate_properties,
- has_function,
- pos);
-}
-
-
-Expression* Parser::ParseRegExpLiteral(bool seen_equal, bool* ok) {
- int pos = peek_position();
- if (!scanner().ScanRegExpPattern(seen_equal)) {
- Next();
- ReportMessage("unterminated_regexp", Vector<const char*>::empty());
- *ok = false;
- return NULL;
- }
-
- int literal_index = current_function_state_->NextMaterializedLiteralIndex();
-
- Handle<String> js_pattern = NextLiteralString(TENURED);
- scanner().ScanRegExpFlags();
- Handle<String> js_flags = NextLiteralString(TENURED);
- Next();
-
- return factory()->NewRegExpLiteral(js_pattern, js_flags, literal_index, pos);
-}
-
-
-ZoneList<Expression*>* Parser::ParseArguments(bool* ok) {
- // Arguments ::
- // '(' (AssignmentExpression)*[','] ')'
-
- ZoneList<Expression*>* result = new(zone()) ZoneList<Expression*>(4, zone());
- Expect(Token::LPAREN, CHECK_OK);
- bool done = (peek() == Token::RPAREN);
- while (!done) {
- Expression* argument = ParseAssignmentExpression(true, CHECK_OK);
- result->Add(argument, zone());
- if (result->length() > Code::kMaxArguments) {
- ReportMessageAt(scanner().location(), "too_many_arguments",
- Vector<const char*>::empty());
- *ok = false;
- return NULL;
- }
- done = (peek() == Token::RPAREN);
- if (!done) Expect(Token::COMMA, CHECK_OK);
- }
- Expect(Token::RPAREN, CHECK_OK);
- return result;
-}
-
-
-class SingletonLogger : public ParserRecorder {
- public:
- SingletonLogger() : has_error_(false), start_(-1), end_(-1) { }
- virtual ~SingletonLogger() { }
-
- void Reset() { has_error_ = false; }
-
- virtual void LogFunction(int start,
- int end,
- int literals,
- int properties,
- LanguageMode mode) {
- ASSERT(!has_error_);
- start_ = start;
- end_ = end;
- literals_ = literals;
- properties_ = properties;
- mode_ = mode;
- };
-
- // Logs a symbol creation of a literal or identifier.
- virtual void LogAsciiSymbol(int start, Vector<const char> literal) { }
- virtual void LogUtf16Symbol(int start, Vector<const uc16> literal) { }
-
- // Logs an error message and marks the log as containing an error.
- // Further logging will be ignored, and ExtractData will return a vector
- // representing the error only.
- virtual void LogMessage(int start,
- int end,
- const char* message,
- const char* argument_opt) {
- if (has_error_) return;
- has_error_ = true;
- start_ = start;
- end_ = end;
- message_ = message;
- argument_opt_ = argument_opt;
- }
-
- virtual int function_position() { return 0; }
-
- virtual int symbol_position() { return 0; }
-
- virtual int symbol_ids() { return -1; }
-
- virtual Vector<unsigned> ExtractData() {
- UNREACHABLE();
- return Vector<unsigned>();
- }
-
- virtual void PauseRecording() { }
-
- virtual void ResumeRecording() { }
-
- bool has_error() { return has_error_; }
-
- int start() { return start_; }
- int end() { return end_; }
- int literals() {
- ASSERT(!has_error_);
- return literals_;
- }
- int properties() {
- ASSERT(!has_error_);
- return properties_;
- }
- LanguageMode language_mode() {
- ASSERT(!has_error_);
- return mode_;
- }
- const char* message() {
- ASSERT(has_error_);
- return message_;
- }
- const char* argument_opt() {
- ASSERT(has_error_);
- return argument_opt_;
- }
-
- private:
- bool has_error_;
- int start_;
- int end_;
- // For function entries.
- int literals_;
- int properties_;
- LanguageMode mode_;
- // For error messages.
- const char* message_;
- const char* argument_opt_;
-};
-
-
FunctionLiteral* Parser::ParseFunctionLiteral(
Handle<String> function_name,
+ Scanner::Location function_name_location,
bool name_is_strict_reserved,
bool is_generator,
int function_token_pos,
FunctionLiteral::FunctionType function_type,
+ FunctionLiteral::ArityRestriction arity_restriction,
bool* ok) {
// Function ::
// '(' FormalParameterList? ')' '{' FunctionBody '}'
+ //
+ // Getter ::
+ // '(' ')' '{' FunctionBody '}'
+ //
+ // Setter ::
+ // '(' PropertySetParameterList ')' '{' FunctionBody '}'
int pos = function_token_pos == RelocInfo::kNoPosition
? peek_position() : function_token_pos;
@@ -4061,14 +3360,15 @@ FunctionLiteral* Parser::ParseFunctionLiteral(
// one relative to the deserialized scope chain. Otherwise we must be
// compiling a function in an inner declaration scope in the eval, e.g. a
// nested function, and hoisting works normally relative to that.
- Scope* declaration_scope = top_scope_->DeclarationScope();
+ Scope* declaration_scope = scope_->DeclarationScope();
Scope* original_declaration_scope = original_scope_->DeclarationScope();
Scope* scope =
- function_type == FunctionLiteral::DECLARATION && !is_extended_mode() &&
+ function_type == FunctionLiteral::DECLARATION &&
+ (!allow_harmony_scoping() || strict_mode() == SLOPPY) &&
(original_scope_ == original_declaration_scope ||
declaration_scope != original_declaration_scope)
? NewScope(declaration_scope, FUNCTION_SCOPE)
- : NewScope(top_scope_, FUNCTION_SCOPE);
+ : NewScope(scope_, FUNCTION_SCOPE);
ZoneList<Statement*>* body = NULL;
int materialized_literal_count = -1;
int expected_property_count = -1;
@@ -4078,26 +3378,22 @@ FunctionLiteral* Parser::ParseFunctionLiteral(
FunctionLiteral::IsParenthesizedFlag parenthesized = parenthesized_function_
? FunctionLiteral::kIsParenthesized
: FunctionLiteral::kNotParenthesized;
- FunctionLiteral::IsGeneratorFlag generator = is_generator
- ? FunctionLiteral::kIsGenerator
- : FunctionLiteral::kNotGenerator;
AstProperties ast_properties;
BailoutReason dont_optimize_reason = kNoReason;
// Parse function body.
- { FunctionState function_state(this, scope, isolate());
- top_scope_->SetScopeName(function_name);
+ { FunctionState function_state(&function_state_, &scope_, scope, zone());
+ scope_->SetScopeName(function_name);
if (is_generator) {
// For generators, allocating variables in contexts is currently a win
// because it minimizes the work needed to suspend and resume an
// activation.
- top_scope_->ForceContextAllocation();
+ scope_->ForceContextAllocation();
// Calling a generator returns a generator object. That object is stored
// in a temporary variable, a definition that is used by "yield"
- // expressions. Presence of a variable for the generator object in the
- // FunctionState indicates that this function is a generator.
- Variable* temp = top_scope_->DeclarationScope()->NewTemporary(
+ // expressions. This also marks the FunctionState as a generator.
+ Variable* temp = scope_->DeclarationScope()->NewTemporary(
isolate()->factory()->dot_generator_object_string());
function_state.set_generator_object_variable(temp);
}
@@ -4105,37 +3401,43 @@ FunctionLiteral* Parser::ParseFunctionLiteral(
// FormalParameterList ::
// '(' (Identifier)*[','] ')'
Expect(Token::LPAREN, CHECK_OK);
- scope->set_start_position(scanner().location().beg_pos);
- Scanner::Location name_loc = Scanner::Location::invalid();
- Scanner::Location dupe_loc = Scanner::Location::invalid();
+ scope->set_start_position(scanner()->location().beg_pos);
+
+ // We don't yet know if the function will be strict, so we cannot yet
+ // produce errors for parameter names or duplicates. However, we remember
+ // the locations of these errors if they occur and produce the errors later.
+ Scanner::Location eval_args_error_log = Scanner::Location::invalid();
+ Scanner::Location dupe_error_loc = Scanner::Location::invalid();
Scanner::Location reserved_loc = Scanner::Location::invalid();
- bool done = (peek() == Token::RPAREN);
+ bool done = arity_restriction == FunctionLiteral::GETTER_ARITY ||
+ (peek() == Token::RPAREN &&
+ arity_restriction != FunctionLiteral::SETTER_ARITY);
while (!done) {
bool is_strict_reserved = false;
Handle<String> param_name =
ParseIdentifierOrStrictReservedWord(&is_strict_reserved, CHECK_OK);
// Store locations for possible future error reports.
- if (!name_loc.IsValid() && IsEvalOrArguments(param_name)) {
- name_loc = scanner().location();
- }
- if (!dupe_loc.IsValid() && top_scope_->IsDeclared(param_name)) {
- duplicate_parameters = FunctionLiteral::kHasDuplicateParameters;
- dupe_loc = scanner().location();
+ if (!eval_args_error_log.IsValid() && IsEvalOrArguments(param_name)) {
+ eval_args_error_log = scanner()->location();
}
if (!reserved_loc.IsValid() && is_strict_reserved) {
- reserved_loc = scanner().location();
+ reserved_loc = scanner()->location();
+ }
+ if (!dupe_error_loc.IsValid() && scope_->IsDeclared(param_name)) {
+ duplicate_parameters = FunctionLiteral::kHasDuplicateParameters;
+ dupe_error_loc = scanner()->location();
}
- top_scope_->DeclareParameter(param_name, VAR);
+ scope_->DeclareParameter(param_name, VAR);
num_parameters++;
if (num_parameters > Code::kMaxArguments) {
- ReportMessageAt(scanner().location(), "too_many_parameters",
- Vector<const char*>::empty());
+ ReportMessage("too_many_parameters");
*ok = false;
return NULL;
}
+ if (arity_restriction == FunctionLiteral::SETTER_ARITY) break;
done = (peek() == Token::RPAREN);
if (!done) Expect(Token::COMMA, CHECK_OK);
}
@@ -4150,21 +3452,28 @@ FunctionLiteral* Parser::ParseFunctionLiteral(
// future we can change the AST to only refer to VariableProxies
// instead of Variables and Proxis as is the case now.
Variable* fvar = NULL;
- Token::Value fvar_init_op = Token::INIT_CONST;
+ Token::Value fvar_init_op = Token::INIT_CONST_LEGACY;
if (function_type == FunctionLiteral::NAMED_EXPRESSION) {
- if (is_extended_mode()) fvar_init_op = Token::INIT_CONST_HARMONY;
- VariableMode fvar_mode = is_extended_mode() ? CONST_HARMONY : CONST;
- fvar = new(zone()) Variable(top_scope_,
+ if (allow_harmony_scoping() && strict_mode() == STRICT) {
+ fvar_init_op = Token::INIT_CONST;
+ }
+ VariableMode fvar_mode =
+ allow_harmony_scoping() && strict_mode() == STRICT ? CONST
+ : CONST_LEGACY;
+ fvar = new(zone()) Variable(scope_,
function_name, fvar_mode, true /* is valid LHS */,
Variable::NORMAL, kCreatedInitialized, Interface::NewConst());
VariableProxy* proxy = factory()->NewVariableProxy(fvar);
VariableDeclaration* fvar_declaration = factory()->NewVariableDeclaration(
- proxy, fvar_mode, top_scope_, RelocInfo::kNoPosition);
- top_scope_->DeclareFunctionVar(fvar_declaration);
+ proxy, fvar_mode, scope_, RelocInfo::kNoPosition);
+ scope_->DeclareFunctionVar(fvar_declaration);
}
- // Determine whether the function will be lazily compiled.
- // The heuristics are:
+ // Determine if the function can be parsed lazily. Lazy parsing is different
+ // from lazy compilation; we need to parse more eagerly than we compile.
+
+ // We can only parse lazily if we also compile lazily. The heuristics for
+ // lazy compilation are:
// - It must not have been prohibited by the caller to Parse (some callers
// need a full AST).
// - The outer scope must allow lazy compilation of inner functions.
@@ -4174,163 +3483,66 @@ FunctionLiteral* Parser::ParseFunctionLiteral(
// compiled.
// These are all things we can know at this point, without looking at the
// function itself.
- bool is_lazily_compiled = (mode() == PARSE_LAZILY &&
- top_scope_->AllowsLazyCompilation() &&
- !parenthesized_function_);
- parenthesized_function_ = false; // The bit was set for this function only.
-
- if (is_lazily_compiled) {
- int function_block_pos = position();
- FunctionEntry entry;
- if (pre_parse_data_ != NULL) {
- // If we have pre_parse_data_, we use it to skip parsing the function
- // body. The preparser data contains the information we need to
- // construct the lazy function.
- entry = pre_parse_data()->GetFunctionEntry(function_block_pos);
- if (entry.is_valid()) {
- if (entry.end_pos() <= function_block_pos) {
- // End position greater than end of stream is safe, and hard
- // to check.
- ReportInvalidPreparseData(function_name, CHECK_OK);
- }
- scanner().SeekForward(entry.end_pos() - 1);
-
- scope->set_end_position(entry.end_pos());
- Expect(Token::RBRACE, CHECK_OK);
- isolate()->counters()->total_preparse_skipped()->Increment(
- scope->end_position() - function_block_pos);
- materialized_literal_count = entry.literal_count();
- expected_property_count = entry.property_count();
- top_scope_->SetLanguageMode(entry.language_mode());
- } else {
- is_lazily_compiled = false;
- }
- } else {
- // With no preparser data, we partially parse the function, without
- // building an AST. This gathers the data needed to build a lazy
- // function.
- SingletonLogger logger;
- PreParser::PreParseResult result = LazyParseFunctionLiteral(&logger);
- if (result == PreParser::kPreParseStackOverflow) {
- // Propagate stack overflow.
- set_stack_overflow();
- *ok = false;
- return NULL;
- }
- if (logger.has_error()) {
- const char* arg = logger.argument_opt();
- Vector<const char*> args;
- if (arg != NULL) {
- args = Vector<const char*>(&arg, 1);
- }
- ReportMessageAt(Scanner::Location(logger.start(), logger.end()),
- logger.message(), args);
- *ok = false;
- return NULL;
- }
- scope->set_end_position(logger.end());
- Expect(Token::RBRACE, CHECK_OK);
- isolate()->counters()->total_preparse_skipped()->Increment(
- scope->end_position() - function_block_pos);
- materialized_literal_count = logger.literals();
- expected_property_count = logger.properties();
- top_scope_->SetLanguageMode(logger.language_mode());
- }
- }
- if (!is_lazily_compiled) {
- ParsingModeScope parsing_mode(this, PARSE_EAGERLY);
- body = new(zone()) ZoneList<Statement*>(8, zone());
- if (fvar != NULL) {
- VariableProxy* fproxy = top_scope_->NewUnresolved(
- factory(), function_name, Interface::NewConst());
- fproxy->BindTo(fvar);
- body->Add(factory()->NewExpressionStatement(
- factory()->NewAssignment(fvar_init_op,
- fproxy,
- factory()->NewThisFunction(pos),
- RelocInfo::kNoPosition),
- RelocInfo::kNoPosition), zone());
- }
-
- // For generators, allocate and yield an iterator on function entry.
- if (is_generator) {
- ZoneList<Expression*>* arguments =
- new(zone()) ZoneList<Expression*>(0, zone());
- CallRuntime* allocation = factory()->NewCallRuntime(
- isolate()->factory()->empty_string(),
- Runtime::FunctionForId(Runtime::kCreateJSGeneratorObject),
- arguments, pos);
- VariableProxy* init_proxy = factory()->NewVariableProxy(
- current_function_state_->generator_object_variable());
- Assignment* assignment = factory()->NewAssignment(
- Token::INIT_VAR, init_proxy, allocation, RelocInfo::kNoPosition);
- VariableProxy* get_proxy = factory()->NewVariableProxy(
- current_function_state_->generator_object_variable());
- Yield* yield = factory()->NewYield(
- get_proxy, assignment, Yield::INITIAL, RelocInfo::kNoPosition);
- body->Add(factory()->NewExpressionStatement(
- yield, RelocInfo::kNoPosition), zone());
- }
-
- ParseSourceElements(body, Token::RBRACE, false, false, CHECK_OK);
-
- if (is_generator) {
- VariableProxy* get_proxy = factory()->NewVariableProxy(
- current_function_state_->generator_object_variable());
- Expression *undefined = factory()->NewLiteral(
- isolate()->factory()->undefined_value(), RelocInfo::kNoPosition);
- Yield* yield = factory()->NewYield(
- get_proxy, undefined, Yield::FINAL, RelocInfo::kNoPosition);
- body->Add(factory()->NewExpressionStatement(
- yield, RelocInfo::kNoPosition), zone());
- }
+ // In addition, we need to distinguish between these cases:
+ // (function foo() {
+ // bar = function() { return 1; }
+ // })();
+ // and
+ // (function foo() {
+ // var a = 1;
+ // bar = function() { return a; }
+ // })();
+
+ // Now foo will be parsed eagerly and compiled eagerly (optimization: assume
+ // parenthesis before the function means that it will be called
+ // immediately). The inner function *must* be parsed eagerly to resolve the
+ // possible reference to the variable in foo's scope. However, it's possible
+ // that it will be compiled lazily.
+
+ // To make this additional case work, both Parser and PreParser implement a
+ // logic where only top-level functions will be parsed lazily.
+ bool is_lazily_parsed = (mode() == PARSE_LAZILY &&
+ scope_->AllowsLazyCompilation() &&
+ !parenthesized_function_);
+ parenthesized_function_ = false; // The bit was set for this function only.
+ if (is_lazily_parsed) {
+ SkipLazyFunctionBody(function_name, &materialized_literal_count,
+ &expected_property_count, CHECK_OK);
+ } else {
+ body = ParseEagerFunctionBody(function_name, pos, fvar, fvar_init_op,
+ is_generator, CHECK_OK);
materialized_literal_count = function_state.materialized_literal_count();
expected_property_count = function_state.expected_property_count();
handler_count = function_state.handler_count();
-
- Expect(Token::RBRACE, CHECK_OK);
- scope->set_end_position(scanner().location().end_pos);
}
- // Validate strict mode.
- if (!top_scope_->is_classic_mode()) {
+ // Validate strict mode. We can do this only after parsing the function,
+ // since the function can declare itself strict.
+ if (strict_mode() == STRICT) {
if (IsEvalOrArguments(function_name)) {
- int start_pos = scope->start_position();
- int position = function_token_pos != RelocInfo::kNoPosition
- ? function_token_pos : (start_pos > 0 ? start_pos - 1 : start_pos);
- Scanner::Location location = Scanner::Location(position, start_pos);
- ReportMessageAt(location,
- "strict_function_name", Vector<const char*>::empty());
+ ReportMessageAt(function_name_location, "strict_eval_arguments");
*ok = false;
return NULL;
}
- if (name_loc.IsValid()) {
- ReportMessageAt(name_loc, "strict_param_name",
- Vector<const char*>::empty());
+ if (name_is_strict_reserved) {
+ ReportMessageAt(function_name_location, "unexpected_strict_reserved");
*ok = false;
return NULL;
}
- if (dupe_loc.IsValid()) {
- ReportMessageAt(dupe_loc, "strict_param_dupe",
- Vector<const char*>::empty());
+ if (eval_args_error_log.IsValid()) {
+ ReportMessageAt(eval_args_error_log, "strict_eval_arguments");
*ok = false;
return NULL;
}
- if (name_is_strict_reserved) {
- int start_pos = scope->start_position();
- int position = function_token_pos != RelocInfo::kNoPosition
- ? function_token_pos : (start_pos > 0 ? start_pos - 1 : start_pos);
- Scanner::Location location = Scanner::Location(position, start_pos);
- ReportMessageAt(location, "strict_reserved_word",
- Vector<const char*>::empty());
+ if (dupe_error_loc.IsValid()) {
+ ReportMessageAt(dupe_error_loc, "strict_param_dupe");
*ok = false;
return NULL;
}
if (reserved_loc.IsValid()) {
- ReportMessageAt(reserved_loc, "strict_reserved_word",
- Vector<const char*>::empty());
+ ReportMessageAt(reserved_loc, "unexpected_strict_reserved");
*ok = false;
return NULL;
}
@@ -4342,10 +3554,13 @@ FunctionLiteral* Parser::ParseFunctionLiteral(
dont_optimize_reason = factory()->visitor()->dont_optimize_reason();
}
- if (is_extended_mode()) {
+ if (allow_harmony_scoping() && strict_mode() == STRICT) {
CheckConflictingVarDeclarations(scope, CHECK_OK);
}
+ FunctionLiteral::IsGeneratorFlag generator = is_generator
+ ? FunctionLiteral::kIsGenerator
+ : FunctionLiteral::kNotGenerator;
FunctionLiteral* function_literal =
factory()->NewFunctionLiteral(function_name,
scope,
@@ -4369,10 +3584,147 @@ FunctionLiteral* Parser::ParseFunctionLiteral(
}
-PreParser::PreParseResult Parser::LazyParseFunctionLiteral(
+void Parser::SkipLazyFunctionBody(Handle<String> function_name,
+ int* materialized_literal_count,
+ int* expected_property_count,
+ bool* ok) {
+ int function_block_pos = position();
+ if (cached_data_mode_ == CONSUME_CACHED_DATA) {
+ // If we have cached data, we use it to skip parsing the function body. The
+ // data contains the information we need to construct the lazy function.
+ FunctionEntry entry =
+ (*cached_data())->GetFunctionEntry(function_block_pos);
+ if (entry.is_valid()) {
+ if (entry.end_pos() <= function_block_pos) {
+ // End position greater than end of stream is safe, and hard to check.
+ ReportInvalidCachedData(function_name, ok);
+ if (!*ok) {
+ return;
+ }
+ }
+ scanner()->SeekForward(entry.end_pos() - 1);
+
+ scope_->set_end_position(entry.end_pos());
+ Expect(Token::RBRACE, ok);
+ if (!*ok) {
+ return;
+ }
+ isolate()->counters()->total_preparse_skipped()->Increment(
+ scope_->end_position() - function_block_pos);
+ *materialized_literal_count = entry.literal_count();
+ *expected_property_count = entry.property_count();
+ scope_->SetStrictMode(entry.strict_mode());
+ } else {
+ // This case happens when we have preparse data but it doesn't contain an
+ // entry for the function. Fail the compilation.
+ ReportInvalidCachedData(function_name, ok);
+ return;
+ }
+ } else {
+ // With no cached data, we partially parse the function, without building an
+ // AST. This gathers the data needed to build a lazy function.
+ SingletonLogger logger;
+ PreParser::PreParseResult result =
+ ParseLazyFunctionBodyWithPreParser(&logger);
+ if (result == PreParser::kPreParseStackOverflow) {
+ // Propagate stack overflow.
+ set_stack_overflow();
+ *ok = false;
+ return;
+ }
+ if (logger.has_error()) {
+ ParserTraits::ReportMessageAt(
+ Scanner::Location(logger.start(), logger.end()),
+ logger.message(), logger.argument_opt(), logger.is_reference_error());
+ *ok = false;
+ return;
+ }
+ scope_->set_end_position(logger.end());
+ Expect(Token::RBRACE, ok);
+ if (!*ok) {
+ return;
+ }
+ isolate()->counters()->total_preparse_skipped()->Increment(
+ scope_->end_position() - function_block_pos);
+ *materialized_literal_count = logger.literals();
+ *expected_property_count = logger.properties();
+ scope_->SetStrictMode(logger.strict_mode());
+ if (cached_data_mode_ == PRODUCE_CACHED_DATA) {
+ ASSERT(log_);
+ // Position right after terminal '}'.
+ int body_end = scanner()->location().end_pos;
+ log_->LogFunction(function_block_pos, body_end,
+ *materialized_literal_count,
+ *expected_property_count,
+ scope_->strict_mode());
+ }
+ }
+}
+
+
+ZoneList<Statement*>* Parser::ParseEagerFunctionBody(
+ Handle<String> function_name, int pos, Variable* fvar,
+ Token::Value fvar_init_op, bool is_generator, bool* ok) {
+ // Everything inside an eagerly parsed function will be parsed eagerly
+ // (see comment above).
+ ParsingModeScope parsing_mode(this, PARSE_EAGERLY);
+ ZoneList<Statement*>* body = new(zone()) ZoneList<Statement*>(8, zone());
+ if (fvar != NULL) {
+ VariableProxy* fproxy = scope_->NewUnresolved(
+ factory(), function_name, Interface::NewConst());
+ fproxy->BindTo(fvar);
+ body->Add(factory()->NewExpressionStatement(
+ factory()->NewAssignment(fvar_init_op,
+ fproxy,
+ factory()->NewThisFunction(pos),
+ RelocInfo::kNoPosition),
+ RelocInfo::kNoPosition), zone());
+ }
+
+ // For generators, allocate and yield an iterator on function entry.
+ if (is_generator) {
+ ZoneList<Expression*>* arguments =
+ new(zone()) ZoneList<Expression*>(0, zone());
+ CallRuntime* allocation = factory()->NewCallRuntime(
+ isolate()->factory()->empty_string(),
+ Runtime::FunctionForId(Runtime::kHiddenCreateJSGeneratorObject),
+ arguments, pos);
+ VariableProxy* init_proxy = factory()->NewVariableProxy(
+ function_state_->generator_object_variable());
+ Assignment* assignment = factory()->NewAssignment(
+ Token::INIT_VAR, init_proxy, allocation, RelocInfo::kNoPosition);
+ VariableProxy* get_proxy = factory()->NewVariableProxy(
+ function_state_->generator_object_variable());
+ Yield* yield = factory()->NewYield(
+ get_proxy, assignment, Yield::INITIAL, RelocInfo::kNoPosition);
+ body->Add(factory()->NewExpressionStatement(
+ yield, RelocInfo::kNoPosition), zone());
+ }
+
+ ParseSourceElements(body, Token::RBRACE, false, false, CHECK_OK);
+
+ if (is_generator) {
+ VariableProxy* get_proxy = factory()->NewVariableProxy(
+ function_state_->generator_object_variable());
+ Expression *undefined = factory()->NewLiteral(
+ isolate()->factory()->undefined_value(), RelocInfo::kNoPosition);
+ Yield* yield = factory()->NewYield(
+ get_proxy, undefined, Yield::FINAL, RelocInfo::kNoPosition);
+ body->Add(factory()->NewExpressionStatement(
+ yield, RelocInfo::kNoPosition), zone());
+ }
+
+ Expect(Token::RBRACE, CHECK_OK);
+ scope_->set_end_position(scanner()->location().end_pos);
+
+ return body;
+}
+
+
+PreParser::PreParseResult Parser::ParseLazyFunctionBodyWithPreParser(
SingletonLogger* logger) {
HistogramTimerScope preparse_scope(isolate()->counters()->pre_parse());
- ASSERT_EQ(Token::LBRACE, scanner().current_token());
+ ASSERT_EQ(Token::LBRACE, scanner()->current_token());
if (reusable_preparser_ == NULL) {
intptr_t stack_limit = isolate()->stack_guard()->real_climit();
@@ -4387,7 +3739,7 @@ PreParser::PreParseResult Parser::LazyParseFunctionLiteral(
allow_harmony_numeric_literals());
}
PreParser::PreParseResult result =
- reusable_preparser_->PreParseLazyFunction(top_scope_->language_mode(),
+ reusable_preparser_->PreParseLazyFunction(strict_mode(),
is_generator(),
logger);
return result;
@@ -4400,13 +3752,14 @@ Expression* Parser::ParseV8Intrinsic(bool* ok) {
int pos = peek_position();
Expect(Token::MOD, CHECK_OK);
- Handle<String> name = ParseIdentifier(CHECK_OK);
+ // Allow "eval" or "arguments" for backward compatibility.
+ Handle<String> name = ParseIdentifier(kAllowEvalOrArguments, CHECK_OK);
ZoneList<Expression*>* args = ParseArguments(CHECK_OK);
if (extension_ != NULL) {
// The extension structures are only accessible while parsing the
// very first time not when reparsing because of lazy compilation.
- top_scope_->DeclarationScope()->ForceEagerCompilation();
+ scope_->DeclarationScope()->ForceEagerCompilation();
}
const Runtime::Function* function = Runtime::FunctionForName(name);
@@ -4421,7 +3774,7 @@ Expression* Parser::ParseV8Intrinsic(bool* ok) {
if (args->length() == 1 && args->at(0)->AsVariableProxy() != NULL) {
return args->at(0);
} else {
- ReportMessage("not_isvar", Vector<const char*>::empty());
+ ReportMessage("not_isvar");
*ok = false;
return NULL;
}
@@ -4431,14 +3784,14 @@ Expression* Parser::ParseV8Intrinsic(bool* ok) {
if (function != NULL &&
function->nargs != -1 &&
function->nargs != args->length()) {
- ReportMessage("illegal_access", Vector<const char*>::empty());
+ ReportMessage("illegal_access");
*ok = false;
return NULL;
}
// Check that the function is defined if it's an inline runtime call.
if (function == NULL && name->Get(0) == '_') {
- ReportMessage("not_defined", Vector<Handle<String> >(&name, 1));
+ ParserTraits::ReportMessage("not_defined", name);
*ok = false;
return NULL;
}
@@ -4448,187 +3801,28 @@ Expression* Parser::ParseV8Intrinsic(bool* ok) {
}
-bool ParserBase::peek_any_identifier() {
- Token::Value next = peek();
- return next == Token::IDENTIFIER ||
- next == Token::FUTURE_RESERVED_WORD ||
- next == Token::FUTURE_STRICT_RESERVED_WORD ||
- next == Token::YIELD;
-}
-
-
-bool ParserBase::CheckContextualKeyword(Vector<const char> keyword) {
- if (peek() == Token::IDENTIFIER &&
- scanner()->is_next_contextual_keyword(keyword)) {
- Consume(Token::IDENTIFIER);
- return true;
- }
- return false;
-}
-
-
-void ParserBase::ExpectSemicolon(bool* ok) {
- // Check for automatic semicolon insertion according to
- // the rules given in ECMA-262, section 7.9, page 21.
- Token::Value tok = peek();
- if (tok == Token::SEMICOLON) {
- Next();
- return;
- }
- if (scanner()->HasAnyLineTerminatorBeforeNext() ||
- tok == Token::RBRACE ||
- tok == Token::EOS) {
- return;
- }
- Expect(Token::SEMICOLON, ok);
-}
-
-
-void ParserBase::ExpectContextualKeyword(Vector<const char> keyword, bool* ok) {
- Expect(Token::IDENTIFIER, ok);
- if (!*ok) return;
- if (!scanner()->is_literal_contextual_keyword(keyword)) {
- ReportUnexpectedToken(scanner()->current_token());
- *ok = false;
- }
-}
-
-
Literal* Parser::GetLiteralUndefined(int position) {
return factory()->NewLiteral(
isolate()->factory()->undefined_value(), position);
}
-Literal* Parser::GetLiteralTheHole(int position) {
- return factory()->NewLiteral(
- isolate()->factory()->the_hole_value(), RelocInfo::kNoPosition);
-}
-
-
-// Parses an identifier that is valid for the current scope, in particular it
-// fails on strict mode future reserved keywords in a strict scope.
-Handle<String> Parser::ParseIdentifier(bool* ok) {
- Token::Value next = Next();
- if (next == Token::IDENTIFIER ||
- (top_scope_->is_classic_mode() &&
- (next == Token::FUTURE_STRICT_RESERVED_WORD ||
- (next == Token::YIELD && !is_generator())))) {
- return GetSymbol();
- } else {
- ReportUnexpectedToken(next);
- *ok = false;
- return Handle<String>();
- }
-}
-
-
-// Parses and identifier or a strict mode future reserved word, and indicate
-// whether it is strict mode future reserved.
-Handle<String> Parser::ParseIdentifierOrStrictReservedWord(
- bool* is_strict_reserved, bool* ok) {
- Token::Value next = Next();
- if (next == Token::IDENTIFIER) {
- *is_strict_reserved = false;
- } else if (next == Token::FUTURE_STRICT_RESERVED_WORD ||
- (next == Token::YIELD && !is_generator())) {
- *is_strict_reserved = true;
- } else {
- ReportUnexpectedToken(next);
- *ok = false;
- return Handle<String>();
- }
- return GetSymbol();
-}
-
-
-Handle<String> Parser::ParseIdentifierName(bool* ok) {
- Token::Value next = Next();
- if (next != Token::IDENTIFIER &&
- next != Token::FUTURE_RESERVED_WORD &&
- next != Token::FUTURE_STRICT_RESERVED_WORD &&
- !Token::IsKeyword(next)) {
- ReportUnexpectedToken(next);
- *ok = false;
- return Handle<String>();
- }
- return GetSymbol();
-}
-
-
-void Parser::MarkAsLValue(Expression* expression) {
- VariableProxy* proxy = expression != NULL
- ? expression->AsVariableProxy()
- : NULL;
-
- if (proxy != NULL) proxy->MarkAsLValue();
-}
-
-
-// Checks LHS expression for assignment and prefix/postfix increment/decrement
-// in strict mode.
-void Parser::CheckStrictModeLValue(Expression* expression,
- const char* error,
- bool* ok) {
- ASSERT(!top_scope_->is_classic_mode());
- VariableProxy* lhs = expression != NULL
- ? expression->AsVariableProxy()
- : NULL;
-
- if (lhs != NULL && !lhs->is_this() && IsEvalOrArguments(lhs->name())) {
- ReportMessage(error, Vector<const char*>::empty());
- *ok = false;
- }
-}
-
-
-// Checks whether an octal literal was last seen between beg_pos and end_pos.
-// If so, reports an error. Only called for strict mode.
-void ParserBase::CheckOctalLiteral(int beg_pos, int end_pos, bool* ok) {
- Scanner::Location octal = scanner()->octal_position();
- if (octal.IsValid() && beg_pos <= octal.beg_pos && octal.end_pos <= end_pos) {
- ReportMessageAt(octal, "strict_octal_literal");
- scanner()->clear_octal_position();
- *ok = false;
- }
-}
-
-
void Parser::CheckConflictingVarDeclarations(Scope* scope, bool* ok) {
Declaration* decl = scope->CheckConflictingVarDeclarations();
if (decl != NULL) {
// In harmony mode we treat conflicting variable bindinds as early
// errors. See ES5 16 for a definition of early errors.
Handle<String> name = decl->proxy()->name();
- SmartArrayPointer<char> c_string = name->ToCString(DISALLOW_NULLS);
- const char* elms[2] = { "Variable", *c_string };
- Vector<const char*> args(elms, 2);
int position = decl->proxy()->position();
Scanner::Location location = position == RelocInfo::kNoPosition
? Scanner::Location::invalid()
: Scanner::Location(position, position + 1);
- ReportMessageAt(location, "redeclaration", args);
+ ParserTraits::ReportMessageAt(location, "var_redeclaration", name);
*ok = false;
}
}
-// This function reads an identifier name and determines whether or not it
-// is 'get' or 'set'.
-Handle<String> Parser::ParseIdentifierNameOrGetOrSet(bool* is_get,
- bool* is_set,
- bool* ok) {
- Handle<String> result = ParseIdentifierName(ok);
- if (!*ok) return Handle<String>();
- if (scanner().is_literal_ascii() && scanner().literal_length() == 3) {
- const char* token = scanner().literal_ascii_string().start();
- *is_get = strncmp(token, "get", 3) == 0;
- *is_set = !*is_get && strncmp(token, "set", 3) == 0;
- }
- return result;
-}
-
-
// ----------------------------------------------------------------------------
// Parser support
@@ -4686,55 +3880,29 @@ void Parser::RegisterTargetUse(Label* target, Target* stop) {
}
-Expression* Parser::NewThrowReferenceError(Handle<String> message) {
- return NewThrowError(isolate()->factory()->MakeReferenceError_string(),
- message, HandleVector<Object>(NULL, 0));
-}
-
-
-Expression* Parser::NewThrowSyntaxError(Handle<String> message,
- Handle<Object> first) {
- int argc = first.is_null() ? 0 : 1;
- Vector< Handle<Object> > arguments = HandleVector<Object>(&first, argc);
- return NewThrowError(
- isolate()->factory()->MakeSyntaxError_string(), message, arguments);
-}
-
-
-Expression* Parser::NewThrowTypeError(Handle<String> message,
- Handle<Object> first,
- Handle<Object> second) {
- ASSERT(!first.is_null() && !second.is_null());
- Handle<Object> elements[] = { first, second };
- Vector< Handle<Object> > arguments =
- HandleVector<Object>(elements, ARRAY_SIZE(elements));
- return NewThrowError(
- isolate()->factory()->MakeTypeError_string(), message, arguments);
-}
-
-
-Expression* Parser::NewThrowError(Handle<String> constructor,
- Handle<String> message,
- Vector< Handle<Object> > arguments) {
- int argc = arguments.length();
- Handle<FixedArray> elements = isolate()->factory()->NewFixedArray(argc,
- TENURED);
- for (int i = 0; i < argc; i++) {
- Handle<Object> element = arguments[i];
- if (!element.is_null()) {
- elements->set(i, *element);
+void Parser::ThrowPendingError() {
+ if (has_pending_error_) {
+ MessageLocation location(script_,
+ pending_error_location_.beg_pos,
+ pending_error_location_.end_pos);
+ Factory* factory = isolate()->factory();
+ bool has_arg =
+ !pending_error_arg_.is_null() || pending_error_char_arg_ != NULL;
+ Handle<FixedArray> elements = factory->NewFixedArray(has_arg ? 1 : 0);
+ if (!pending_error_arg_.is_null()) {
+ elements->set(0, *(pending_error_arg_.ToHandleChecked()));
+ } else if (pending_error_char_arg_ != NULL) {
+ Handle<String> arg_string =
+ factory->NewStringFromUtf8(CStrVector(pending_error_char_arg_))
+ .ToHandleChecked();
+ elements->set(0, *arg_string);
}
+ Handle<JSArray> array = factory->NewJSArrayWithElements(elements);
+ Handle<Object> result = pending_error_is_reference_error_
+ ? factory->NewReferenceError(pending_error_message_, array)
+ : factory->NewSyntaxError(pending_error_message_, array);
+ isolate()->Throw(*result, &location);
}
- Handle<JSArray> array = isolate()->factory()->NewJSArrayWithElements(
- elements, FAST_ELEMENTS, TENURED);
-
- int pos = position();
- ZoneList<Expression*>* args = new(zone()) ZoneList<Expression*>(2, zone());
- args->Add(factory()->NewLiteral(message, pos), zone());
- args->Add(factory()->NewLiteral(array, pos), zone());
- CallRuntime* call_constructor =
- factory()->NewCallRuntime(constructor, NULL, args, pos);
- return factory()->NewThrow(call_constructor, pos);
}
@@ -4811,7 +3979,7 @@ bool RegExpParser::simple() {
RegExpTree* RegExpParser::ReportError(Vector<const char> message) {
failed_ = true;
- *error_ = isolate()->factory()->NewStringFromAscii(message, NOT_TENURED);
+ *error_ = isolate()->factory()->NewStringFromAscii(message).ToHandleChecked();
// Zip to the end to make sure the no more input is read.
current_ = kEndMarker;
next_pos_ = in()->length();
@@ -5327,7 +4495,7 @@ bool RegExpParser::ParseIntervalQuantifier(int* min_out, int* max_out) {
uc32 RegExpParser::ParseOctalLiteral() {
- ASSERT('0' <= current() && current() <= '7');
+ ASSERT(('0' <= current() && current() <= '7') || current() == kEndMarker);
// For compatibility with some other browsers (not all), we parse
// up to three octal digits with a value below 256.
uc32 value = current() - '0';
@@ -5546,27 +4714,27 @@ RegExpTree* RegExpParser::ParseCharacterClass() {
// ----------------------------------------------------------------------------
// The Parser interface.
-ScriptDataImpl::~ScriptDataImpl() {
+ScriptData::~ScriptData() {
if (owns_store_) store_.Dispose();
}
-int ScriptDataImpl::Length() {
+int ScriptData::Length() {
return store_.length() * sizeof(unsigned);
}
-const char* ScriptDataImpl::Data() {
+const char* ScriptData::Data() {
return reinterpret_cast<const char*>(store_.start());
}
-bool ScriptDataImpl::HasError() {
+bool ScriptData::HasError() {
return has_error();
}
-void ScriptDataImpl::Initialize() {
+void ScriptData::Initialize() {
// Prepares state for use.
if (store_.length() >= PreparseDataConstants::kHeaderSize) {
function_index_ = PreparseDataConstants::kHeaderSize;
@@ -5583,7 +4751,7 @@ void ScriptDataImpl::Initialize() {
}
-int ScriptDataImpl::ReadNumber(byte** source) {
+int ScriptData::ReadNumber(byte** source) {
// Reads a number from symbol_data_ in base 128. The most significant
// bit marks that there are more digits.
// If the first byte is 0x80 (kNumberTerminator), it would normally
@@ -5610,33 +4778,6 @@ int ScriptDataImpl::ReadNumber(byte** source) {
}
-// Create a Scanner for the preparser to use as input, and preparse the source.
-ScriptDataImpl* PreParserApi::PreParse(Isolate* isolate,
- Utf16CharacterStream* source) {
- CompleteParserRecorder recorder;
- HistogramTimerScope timer(isolate->counters()->pre_parse());
- Scanner scanner(isolate->unicode_cache());
- intptr_t stack_limit = isolate->stack_guard()->real_climit();
- PreParser preparser(&scanner, &recorder, stack_limit);
- preparser.set_allow_lazy(true);
- preparser.set_allow_generators(FLAG_harmony_generators);
- preparser.set_allow_for_of(FLAG_harmony_iteration);
- preparser.set_allow_harmony_scoping(FLAG_harmony_scoping);
- preparser.set_allow_harmony_numeric_literals(FLAG_harmony_numeric_literals);
- scanner.Initialize(source);
- PreParser::PreParseResult result = preparser.PreParseProgram();
- if (result == PreParser::kPreParseStackOverflow) {
- isolate->StackOverflow();
- return NULL;
- }
-
- // Extract the accumulated data from the recorder as a single
- // contiguous vector that we are responsible for disposing.
- Vector<unsigned> store = recorder.ExtractData();
- return new ScriptDataImpl(store);
-}
-
-
bool RegExpParser::ParseRegExp(FlatStringReader* input,
bool multiline,
RegExpCompileData* result,
@@ -5671,18 +4812,17 @@ bool Parser::Parse() {
result = ParseProgram();
}
} else {
- ScriptDataImpl* pre_parse_data = info()->pre_parse_data();
- set_pre_parse_data(pre_parse_data);
- if (pre_parse_data != NULL && pre_parse_data->has_error()) {
- Scanner::Location loc = pre_parse_data->MessageLocation();
- const char* message = pre_parse_data->BuildMessage();
- Vector<const char*> args = pre_parse_data->BuildArgs();
- ReportMessageAt(loc, message, args);
+ SetCachedData(info()->cached_data(), info()->cached_data_mode());
+ if (info()->cached_data_mode() == CONSUME_CACHED_DATA &&
+ (*info()->cached_data())->has_error()) {
+ ScriptData* cached_data = *(info()->cached_data());
+ Scanner::Location loc = cached_data->MessageLocation();
+ const char* message = cached_data->BuildMessage();
+ const char* arg = cached_data->BuildArg();
+ ParserTraits::ReportMessageAt(loc, message, arg,
+ cached_data->IsReferenceError());
DeleteArray(message);
- for (int i = 0; i < args.length(); i++) {
- DeleteArray(args[i]);
- }
- DeleteArray(args.start());
+ DeleteArray(arg);
ASSERT(info()->isolate()->has_pending_exception());
} else {
result = ParseProgram();
diff --git a/chromium/v8/src/parser.h b/chromium/v8/src/parser.h
index dd8e600f9d7..7cb364b4d77 100644
--- a/chromium/v8/src/parser.h
+++ b/chromium/v8/src/parser.h
@@ -1,45 +1,24 @@
// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
#ifndef V8_PARSER_H_
#define V8_PARSER_H_
-#include "allocation.h"
-#include "ast.h"
-#include "preparse-data-format.h"
-#include "preparse-data.h"
-#include "scopes.h"
-#include "preparser.h"
+#include "src/allocation.h"
+#include "src/ast.h"
+#include "src/compiler.h" // For CachedDataMode
+#include "src/preparse-data-format.h"
+#include "src/preparse-data.h"
+#include "src/scopes.h"
+#include "src/preparser.h"
namespace v8 {
+class ScriptCompiler;
+
namespace internal {
class CompilationInfo;
-class FuncNameInferrer;
class ParserLog;
class PositionStack;
class Target;
@@ -54,7 +33,7 @@ class FunctionEntry BASE_EMBEDDED {
kEndPositionIndex,
kLiteralCountIndex,
kPropertyCountIndex,
- kLanguageModeIndex,
+ kStrictModeIndex,
kSize
};
@@ -67,11 +46,10 @@ class FunctionEntry BASE_EMBEDDED {
int end_pos() { return backing_[kEndPositionIndex]; }
int literal_count() { return backing_[kLiteralCountIndex]; }
int property_count() { return backing_[kPropertyCountIndex]; }
- LanguageMode language_mode() {
- ASSERT(backing_[kLanguageModeIndex] == CLASSIC_MODE ||
- backing_[kLanguageModeIndex] == STRICT_MODE ||
- backing_[kLanguageModeIndex] == EXTENDED_MODE);
- return static_cast<LanguageMode>(backing_[kLanguageModeIndex]);
+ StrictMode strict_mode() {
+ ASSERT(backing_[kStrictModeIndex] == SLOPPY ||
+ backing_[kStrictModeIndex] == STRICT);
+ return static_cast<StrictMode>(backing_[kStrictModeIndex]);
}
bool is_valid() { return !backing_.is_empty(); }
@@ -81,17 +59,22 @@ class FunctionEntry BASE_EMBEDDED {
};
-class ScriptDataImpl : public ScriptData {
+class ScriptData {
public:
- explicit ScriptDataImpl(Vector<unsigned> store)
+ explicit ScriptData(Vector<unsigned> store)
: store_(store),
owns_store_(true) { }
- // Create an empty ScriptDataImpl that is guaranteed to not satisfy
- // a SanityCheck.
- ScriptDataImpl() : owns_store_(false) { }
+ ScriptData(Vector<unsigned> store, bool owns_store)
+ : store_(store),
+ owns_store_(owns_store) { }
+
+ // The created ScriptData won't take ownership of the data. If the alignment
+ // is not correct, this will copy the data (and the created ScriptData will
+ // take ownership of the copy).
+ static ScriptData* New(const char* data, int length);
- virtual ~ScriptDataImpl();
+ virtual ~ScriptData();
virtual int Length();
virtual const char* Data();
virtual bool HasError();
@@ -103,14 +86,17 @@ class ScriptDataImpl : public ScriptData {
int GetSymbolIdentifier();
bool SanityCheck();
- Scanner::Location MessageLocation();
- const char* BuildMessage();
- Vector<const char*> BuildArgs();
-
- int symbol_count() {
- return (store_.length() > PreparseDataConstants::kHeaderSize)
- ? store_[PreparseDataConstants::kSymbolCountOffset]
- : 0;
+ Scanner::Location MessageLocation() const;
+ bool IsReferenceError() const;
+ const char* BuildMessage() const;
+ const char* BuildArg() const;
+
+ int function_count() {
+ int functions_size =
+ static_cast<int>(store_[PreparseDataConstants::kFunctionsSizeOffset]);
+ if (functions_size < 0) return 0;
+ if (functions_size % FunctionEntry::kSize != 0) return 0;
+ return functions_size / FunctionEntry::kSize;
}
// The following functions should only be called if SanityCheck has
// returned true.
@@ -119,41 +105,24 @@ class ScriptDataImpl : public ScriptData {
unsigned version() { return store_[PreparseDataConstants::kVersionOffset]; }
private:
+ // Disable copying and assigning; because of owns_store they won't be correct.
+ ScriptData(const ScriptData&);
+ ScriptData& operator=(const ScriptData&);
+
+ friend class v8::ScriptCompiler;
Vector<unsigned> store_;
unsigned char* symbol_data_;
unsigned char* symbol_data_end_;
int function_index_;
bool owns_store_;
- unsigned Read(int position);
- unsigned* ReadAddress(int position);
+ unsigned Read(int position) const;
+ unsigned* ReadAddress(int position) const;
// Reads a number from the current symbols
int ReadNumber(byte** source);
- ScriptDataImpl(const char* backing_store, int length)
- : store_(reinterpret_cast<unsigned*>(const_cast<char*>(backing_store)),
- length / static_cast<int>(sizeof(unsigned))),
- owns_store_(false) {
- ASSERT_EQ(0, static_cast<int>(
- reinterpret_cast<intptr_t>(backing_store) % sizeof(unsigned)));
- }
-
// Read strings written by ParserRecorder::WriteString.
static const char* ReadString(unsigned* start, int* chars);
-
- friend class ScriptData;
-};
-
-
-class PreParserApi {
- public:
- // Pre-parse a character stream and return full preparse data.
- //
- // This interface is here instead of in preparser.h because it instantiates a
- // preparser recorder object that is suited to the parser's purposes. Also,
- // the preparser doesn't know about ScriptDataImpl.
- static ScriptDataImpl* PreParse(Isolate* isolate,
- Utf16CharacterStream* source);
};
@@ -404,10 +373,220 @@ class RegExpParser BASE_EMBEDDED {
// ----------------------------------------------------------------------------
// JAVASCRIPT PARSING
-// Forward declaration.
+class Parser;
class SingletonLogger;
-class Parser : public ParserBase {
+class ParserTraits {
+ public:
+ struct Type {
+ // TODO(marja): To be removed. The Traits object should contain all the data
+ // it needs.
+ typedef v8::internal::Parser* Parser;
+
+ // Used by FunctionState and BlockState.
+ typedef v8::internal::Scope Scope;
+ typedef Variable GeneratorVariable;
+ typedef v8::internal::Zone Zone;
+
+ // Return types for traversing functions.
+ typedef Handle<String> Identifier;
+ typedef v8::internal::Expression* Expression;
+ typedef Yield* YieldExpression;
+ typedef v8::internal::FunctionLiteral* FunctionLiteral;
+ typedef v8::internal::Literal* Literal;
+ typedef ObjectLiteral::Property* ObjectLiteralProperty;
+ typedef ZoneList<v8::internal::Expression*>* ExpressionList;
+ typedef ZoneList<ObjectLiteral::Property*>* PropertyList;
+ typedef ZoneList<v8::internal::Statement*>* StatementList;
+
+ // For constructing objects returned by the traversing functions.
+ typedef AstNodeFactory<AstConstructionVisitor> Factory;
+ };
+
+ explicit ParserTraits(Parser* parser) : parser_(parser) {}
+
+ // Custom operations executed when FunctionStates are created and destructed.
+ template<typename FunctionState>
+ static void SetUpFunctionState(FunctionState* function_state, Zone* zone) {
+ Isolate* isolate = zone->isolate();
+ function_state->saved_ast_node_id_ = isolate->ast_node_id();
+ isolate->set_ast_node_id(BailoutId::FirstUsable().ToInt());
+ }
+
+ template<typename FunctionState>
+ static void TearDownFunctionState(FunctionState* function_state, Zone* zone) {
+ if (function_state->outer_function_state_ != NULL) {
+ zone->isolate()->set_ast_node_id(function_state->saved_ast_node_id_);
+ }
+ }
+
+ // Helper functions for recursive descent.
+ bool IsEvalOrArguments(Handle<String> identifier) const;
+
+ // Returns true if the expression is of type "this.foo".
+ static bool IsThisProperty(Expression* expression);
+
+ static bool IsIdentifier(Expression* expression);
+
+ static Handle<String> AsIdentifier(Expression* expression) {
+ ASSERT(IsIdentifier(expression));
+ return expression->AsVariableProxy()->name();
+ }
+
+ static bool IsBoilerplateProperty(ObjectLiteral::Property* property) {
+ return ObjectLiteral::IsBoilerplateProperty(property);
+ }
+
+ static bool IsArrayIndex(Handle<String> string, uint32_t* index) {
+ return !string.is_null() && string->AsArrayIndex(index);
+ }
+
+ // Functions for encapsulating the differences between parsing and preparsing;
+ // operations interleaved with the recursive descent.
+ static void PushLiteralName(FuncNameInferrer* fni, Handle<String> id) {
+ fni->PushLiteralName(id);
+ }
+ void PushPropertyName(FuncNameInferrer* fni, Expression* expression);
+
+ static void CheckFunctionLiteralInsideTopLevelObjectLiteral(
+ Scope* scope, Expression* value, bool* has_function) {
+ if (scope->DeclarationScope()->is_global_scope() &&
+ value->AsFunctionLiteral() != NULL) {
+ *has_function = true;
+ value->AsFunctionLiteral()->set_pretenure();
+ }
+ }
+
+ // If we assign a function literal to a property we pretenure the
+ // literal so it can be added as a constant function property.
+ static void CheckAssigningFunctionLiteralToProperty(Expression* left,
+ Expression* right);
+
+ // Keep track of eval() calls since they disable all local variable
+ // optimizations. This checks if expression is an eval call, and if yes,
+ // forwards the information to scope.
+ void CheckPossibleEvalCall(Expression* expression, Scope* scope);
+
+ // Determine if the expression is a variable proxy and mark it as being used
+ // in an assignment or with a increment/decrement operator. This is currently
+ // used on for the statically checking assignments to harmony const bindings.
+ static Expression* MarkExpressionAsLValue(Expression* expression);
+
+ // Returns true if we have a binary expression between two numeric
+ // literals. In that case, *x will be changed to an expression which is the
+ // computed value.
+ bool ShortcutNumericLiteralBinaryExpression(
+ Expression** x, Expression* y, Token::Value op, int pos,
+ AstNodeFactory<AstConstructionVisitor>* factory);
+
+ // Rewrites the following types of unary expressions:
+ // not <literal> -> true / false
+ // + <numeric literal> -> <numeric literal>
+ // - <numeric literal> -> <numeric literal with value negated>
+ // ! <literal> -> true / false
+ // The following rewriting rules enable the collection of type feedback
+ // without any special stub and the multiplication is removed later in
+ // Crankshaft's canonicalization pass.
+ // + foo -> foo * 1
+ // - foo -> foo * (-1)
+ // ~ foo -> foo ^(~0)
+ Expression* BuildUnaryExpression(
+ Expression* expression, Token::Value op, int pos,
+ AstNodeFactory<AstConstructionVisitor>* factory);
+
+ // Generate AST node that throws a ReferenceError with the given type.
+ Expression* NewThrowReferenceError(const char* type, int pos);
+
+ // Generate AST node that throws a SyntaxError with the given
+ // type. The first argument may be null (in the handle sense) in
+ // which case no arguments are passed to the constructor.
+ Expression* NewThrowSyntaxError(
+ const char* type, Handle<Object> arg, int pos);
+
+ // Generate AST node that throws a TypeError with the given
+ // type. Both arguments must be non-null (in the handle sense).
+ Expression* NewThrowTypeError(const char* type, Handle<Object> arg, int pos);
+
+ // Generic AST generator for throwing errors from compiled code.
+ Expression* NewThrowError(
+ Handle<String> constructor, const char* type,
+ Vector<Handle<Object> > arguments, int pos);
+
+ // Reporting errors.
+ void ReportMessageAt(Scanner::Location source_location,
+ const char* message,
+ const char* arg,
+ bool is_reference_error = false);
+ void ReportMessage(const char* message,
+ MaybeHandle<String> arg,
+ bool is_reference_error = false);
+ void ReportMessageAt(Scanner::Location source_location,
+ const char* message,
+ MaybeHandle<String> arg,
+ bool is_reference_error = false);
+
+ // "null" return type creators.
+ static Handle<String> EmptyIdentifier() {
+ return Handle<String>();
+ }
+ static Expression* EmptyExpression() {
+ return NULL;
+ }
+ static Literal* EmptyLiteral() {
+ return NULL;
+ }
+ // Used in error return values.
+ static ZoneList<Expression*>* NullExpressionList() {
+ return NULL;
+ }
+
+ // Odd-ball literal creators.
+ Literal* GetLiteralTheHole(int position,
+ AstNodeFactory<AstConstructionVisitor>* factory);
+
+ // Producing data during the recursive descent.
+ Handle<String> GetSymbol(Scanner* scanner = NULL);
+ Handle<String> NextLiteralString(Scanner* scanner,
+ PretenureFlag tenured);
+ Expression* ThisExpression(Scope* scope,
+ AstNodeFactory<AstConstructionVisitor>* factory);
+ Literal* ExpressionFromLiteral(
+ Token::Value token, int pos, Scanner* scanner,
+ AstNodeFactory<AstConstructionVisitor>* factory);
+ Expression* ExpressionFromIdentifier(
+ Handle<String> name, int pos, Scope* scope,
+ AstNodeFactory<AstConstructionVisitor>* factory);
+ Expression* ExpressionFromString(
+ int pos, Scanner* scanner,
+ AstNodeFactory<AstConstructionVisitor>* factory);
+ ZoneList<v8::internal::Expression*>* NewExpressionList(int size, Zone* zone) {
+ return new(zone) ZoneList<v8::internal::Expression*>(size, zone);
+ }
+ ZoneList<ObjectLiteral::Property*>* NewPropertyList(int size, Zone* zone) {
+ return new(zone) ZoneList<ObjectLiteral::Property*>(size, zone);
+ }
+ ZoneList<v8::internal::Statement*>* NewStatementList(int size, Zone* zone) {
+ return new(zone) ZoneList<v8::internal::Statement*>(size, zone);
+ }
+
+ // Temporary glue; these functions will move to ParserBase.
+ Expression* ParseV8Intrinsic(bool* ok);
+ FunctionLiteral* ParseFunctionLiteral(
+ Handle<String> name,
+ Scanner::Location function_name_location,
+ bool name_is_strict_reserved,
+ bool is_generator,
+ int function_token_position,
+ FunctionLiteral::FunctionType type,
+ FunctionLiteral::ArityRestriction arity_restriction,
+ bool* ok);
+
+ private:
+ Parser* parser_;
+};
+
+
+class Parser : public ParserBase<ParserTraits> {
public:
explicit Parser(CompilationInfo* info);
~Parser() {
@@ -418,16 +597,25 @@ class Parser : public ParserBase {
// Parses the source code represented by the compilation info and sets its
// function literal. Returns false (and deallocates any allocated AST
// nodes) if parsing failed.
- static bool Parse(CompilationInfo* info) { return Parser(info).Parse(); }
+ static bool Parse(CompilationInfo* info,
+ bool allow_lazy = false) {
+ Parser parser(info);
+ parser.set_allow_lazy(allow_lazy);
+ return parser.Parse();
+ }
bool Parse();
private:
- static const int kMaxNumFunctionLocals = 131071; // 2^17-1
+ friend class ParserTraits;
- enum Mode {
- PARSE_LAZILY,
- PARSE_EAGERLY
- };
+ // Limit the allowed number of local variables in a function. The hard limit
+ // is that offsets computed by FullCodeGenerator::StackOperand and similar
+ // functions are ints, and they should not overflow. In addition, accessing
+ // local variables creates user-controlled constants in the generated code,
+ // and we don't want too much user-controlled memory inside the code (this was
+ // the reason why this limit was introduced in the first place; see
+ // https://codereview.chromium.org/7003030/ ).
+ static const int kMaxNumFunctionLocals = 4194303; // 2^22-1
enum VariableDeclarationContext {
kModuleElement,
@@ -442,82 +630,6 @@ class Parser : public ParserBase {
kHasNoInitializers
};
- class BlockState;
-
- class FunctionState BASE_EMBEDDED {
- public:
- FunctionState(Parser* parser,
- Scope* scope,
- Isolate* isolate);
- ~FunctionState();
-
- int NextMaterializedLiteralIndex() {
- return next_materialized_literal_index_++;
- }
- int materialized_literal_count() {
- return next_materialized_literal_index_ - JSFunction::kLiteralsPrefixSize;
- }
-
- int NextHandlerIndex() { return next_handler_index_++; }
- int handler_count() { return next_handler_index_; }
-
- void AddProperty() { expected_property_count_++; }
- int expected_property_count() { return expected_property_count_; }
-
- void set_generator_object_variable(Variable *variable) {
- ASSERT(variable != NULL);
- ASSERT(!is_generator());
- generator_object_variable_ = variable;
- }
- Variable* generator_object_variable() const {
- return generator_object_variable_;
- }
- bool is_generator() const {
- return generator_object_variable_ != NULL;
- }
-
- AstNodeFactory<AstConstructionVisitor>* factory() { return &factory_; }
-
- private:
- // Used to assign an index to each literal that needs materialization in
- // the function. Includes regexp literals, and boilerplate for object and
- // array literals.
- int next_materialized_literal_index_;
-
- // Used to assign a per-function index to try and catch handlers.
- int next_handler_index_;
-
- // Properties count estimation.
- int expected_property_count_;
-
- // For generators, the variable that holds the generator object. This
- // variable is used by yield expressions and return statements. NULL
- // indicates that this function is not a generator.
- Variable* generator_object_variable_;
-
- Parser* parser_;
- FunctionState* outer_function_state_;
- Scope* outer_scope_;
- int saved_ast_node_id_;
- AstNodeFactory<AstConstructionVisitor> factory_;
- };
-
- class ParsingModeScope BASE_EMBEDDED {
- public:
- ParsingModeScope(Parser* parser, Mode mode)
- : parser_(parser),
- old_mode_(parser->mode()) {
- parser_->mode_ = mode;
- }
- ~ParsingModeScope() {
- parser_->mode_ = old_mode_;
- }
-
- private:
- Parser* parser_;
- Mode old_mode_;
- };
-
// Returns NULL if parsing failed.
FunctionLiteral* ParseProgram();
@@ -525,7 +637,6 @@ class Parser : public ParserBase {
FunctionLiteral* ParseLazy(Utf16CharacterStream* source);
Isolate* isolate() { return isolate_; }
- Zone* zone() const { return zone_; }
CompilationInfo* info() const { return info_; }
// Called by ParseProgram after setting up the scanner.
@@ -533,41 +644,27 @@ class Parser : public ParserBase {
Handle<String> source);
// Report syntax error
- void ReportUnexpectedToken(Token::Value token);
- void ReportInvalidPreparseData(Handle<String> name, bool* ok);
- void ReportMessage(const char* message, Vector<const char*> args);
- void ReportMessage(const char* message, Vector<Handle<String> > args);
- void ReportMessageAt(Scanner::Location location, const char* type) {
- ReportMessageAt(location, type, Vector<const char*>::empty());
- }
- void ReportMessageAt(Scanner::Location loc,
- const char* message,
- Vector<const char*> args);
- void ReportMessageAt(Scanner::Location loc,
- const char* message,
- Vector<Handle<String> > args);
+ void ReportInvalidCachedData(Handle<String> name, bool* ok);
- void set_pre_parse_data(ScriptDataImpl *data) {
- pre_parse_data_ = data;
- symbol_cache_.Initialize(data ? data->symbol_count() : 0, zone());
+ void SetCachedData(ScriptData** data,
+ CachedDataMode cached_data_mode) {
+ cached_data_mode_ = cached_data_mode;
+ if (cached_data_mode == NO_CACHED_DATA) {
+ cached_data_ = NULL;
+ } else {
+ ASSERT(data != NULL);
+ cached_data_ = data;
+ }
}
- bool inside_with() const { return top_scope_->inside_with(); }
- Scanner& scanner() { return scanner_; }
- Mode mode() const { return mode_; }
- ScriptDataImpl* pre_parse_data() const { return pre_parse_data_; }
- bool is_extended_mode() {
- ASSERT(top_scope_ != NULL);
- return top_scope_->is_extended_mode();
- }
+ bool inside_with() const { return scope_->inside_with(); }
+ ScriptData** cached_data() const { return cached_data_; }
+ CachedDataMode cached_data_mode() const { return cached_data_mode_; }
Scope* DeclarationScope(VariableMode mode) {
return IsLexicalVariableMode(mode)
- ? top_scope_ : top_scope_->DeclarationScope();
+ ? scope_ : scope_->DeclarationScope();
}
- // Check if the given string is 'eval' or 'arguments'.
- bool IsEvalOrArguments(Handle<String> string);
-
// All ParseXXX functions take as the last argument an *ok parameter
// which is set to false if parsing failed; it is unchanged otherwise.
// By making the 'exception handling' explicit, we are forced to check
@@ -617,89 +714,33 @@ class Parser : public ParserBase {
// Support for hamony block scoped bindings.
Block* ParseScopedBlock(ZoneStringList* labels, bool* ok);
- Expression* ParseExpression(bool accept_IN, bool* ok);
- Expression* ParseAssignmentExpression(bool accept_IN, bool* ok);
- Expression* ParseYieldExpression(bool* ok);
- Expression* ParseConditionalExpression(bool accept_IN, bool* ok);
- Expression* ParseBinaryExpression(int prec, bool accept_IN, bool* ok);
- Expression* ParseUnaryExpression(bool* ok);
- Expression* ParsePostfixExpression(bool* ok);
- Expression* ParseLeftHandSideExpression(bool* ok);
- Expression* ParseNewExpression(bool* ok);
- Expression* ParseMemberExpression(bool* ok);
- Expression* ParseNewPrefix(PositionStack* stack, bool* ok);
- Expression* ParseMemberWithNewPrefixesExpression(PositionStack* stack,
- bool* ok);
- Expression* ParsePrimaryExpression(bool* ok);
- Expression* ParseArrayLiteral(bool* ok);
- Expression* ParseObjectLiteral(bool* ok);
- Expression* ParseRegExpLiteral(bool seen_equal, bool* ok);
-
// Initialize the components of a for-in / for-of statement.
void InitializeForEachStatement(ForEachStatement* stmt,
Expression* each,
Expression* subject,
Statement* body);
-
- ZoneList<Expression*>* ParseArguments(bool* ok);
- FunctionLiteral* ParseFunctionLiteral(Handle<String> var_name,
- bool name_is_reserved,
- bool is_generator,
- int function_token_position,
- FunctionLiteral::FunctionType type,
- bool* ok);
-
+ Statement* DesugarLetBindingsInForStatement(
+ Scope* inner_scope, ZoneStringList* names, ForStatement* loop,
+ Statement* init, Expression* cond, Statement* next, Statement* body,
+ bool* ok);
+
+ FunctionLiteral* ParseFunctionLiteral(
+ Handle<String> name,
+ Scanner::Location function_name_location,
+ bool name_is_strict_reserved,
+ bool is_generator,
+ int function_token_position,
+ FunctionLiteral::FunctionType type,
+ FunctionLiteral::ArityRestriction arity_restriction,
+ bool* ok);
// Magical syntax support.
Expression* ParseV8Intrinsic(bool* ok);
- bool is_generator() const { return current_function_state_->is_generator(); }
-
bool CheckInOrOf(bool accept_OF, ForEachStatement::VisitMode* visit_mode);
- Handle<String> LiteralString(PretenureFlag tenured) {
- if (scanner().is_literal_ascii()) {
- return isolate_->factory()->NewStringFromAscii(
- scanner().literal_ascii_string(), tenured);
- } else {
- return isolate_->factory()->NewStringFromTwoByte(
- scanner().literal_utf16_string(), tenured);
- }
- }
-
- Handle<String> NextLiteralString(PretenureFlag tenured) {
- if (scanner().is_next_literal_ascii()) {
- return isolate_->factory()->NewStringFromAscii(
- scanner().next_literal_ascii_string(), tenured);
- } else {
- return isolate_->factory()->NewStringFromTwoByte(
- scanner().next_literal_utf16_string(), tenured);
- }
- }
-
- Handle<String> GetSymbol();
-
// Get odd-ball literals.
Literal* GetLiteralUndefined(int position);
- Literal* GetLiteralTheHole(int position);
-
- Handle<String> ParseIdentifier(bool* ok);
- Handle<String> ParseIdentifierOrStrictReservedWord(
- bool* is_strict_reserved, bool* ok);
- Handle<String> ParseIdentifierName(bool* ok);
- Handle<String> ParseIdentifierNameOrGetOrSet(bool* is_get,
- bool* is_set,
- bool* ok);
-
- // Determine if the expression is a variable proxy and mark it as being used
- // in an assignment or with a increment/decrement operator. This is currently
- // used on for the statically checking assignments to harmony const bindings.
- void MarkAsLValue(Expression* expression);
-
- // Strict mode validation of LValue expressions
- void CheckStrictModeLValue(Expression* expression,
- const char* error,
- bool* ok);
// For harmony block scoping mode: Check if the scope has conflicting var/let
// declarations from different scopes. It covers for example
@@ -728,61 +769,45 @@ class Parser : public ParserBase {
Scope* NewScope(Scope* parent, ScopeType type);
- Handle<String> LookupSymbol(int symbol_id);
-
- Handle<String> LookupCachedSymbol(int symbol_id);
-
- // Generate AST node that throw a ReferenceError with the given type.
- Expression* NewThrowReferenceError(Handle<String> type);
-
- // Generate AST node that throw a SyntaxError with the given
- // type. The first argument may be null (in the handle sense) in
- // which case no arguments are passed to the constructor.
- Expression* NewThrowSyntaxError(Handle<String> type, Handle<Object> first);
-
- // Generate AST node that throw a TypeError with the given
- // type. Both arguments must be non-null (in the handle sense).
- Expression* NewThrowTypeError(Handle<String> type,
- Handle<Object> first,
- Handle<Object> second);
-
- // Generic AST generator for throwing errors from compiled code.
- Expression* NewThrowError(Handle<String> constructor,
- Handle<String> type,
- Vector< Handle<Object> > arguments);
-
- PreParser::PreParseResult LazyParseFunctionLiteral(
- SingletonLogger* logger);
+ // Skip over a lazy function, either using cached data if we have it, or
+ // by parsing the function with PreParser. Consumes the ending }.
+ void SkipLazyFunctionBody(Handle<String> function_name,
+ int* materialized_literal_count,
+ int* expected_property_count,
+ bool* ok);
+
+ PreParser::PreParseResult ParseLazyFunctionBodyWithPreParser(
+ SingletonLogger* logger);
+
+ // Consumes the ending }.
+ ZoneList<Statement*>* ParseEagerFunctionBody(Handle<String> function_name,
+ int pos,
+ Variable* fvar,
+ Token::Value fvar_init_op,
+ bool is_generator,
+ bool* ok);
- AstNodeFactory<AstConstructionVisitor>* factory() {
- return current_function_state_->factory();
- }
+ void ThrowPendingError();
Isolate* isolate_;
- ZoneList<Handle<String> > symbol_cache_;
Handle<Script> script_;
Scanner scanner_;
PreParser* reusable_preparser_;
- Scope* top_scope_;
Scope* original_scope_; // for ES5 function declarations in sloppy eval
- FunctionState* current_function_state_;
Target* target_stack_; // for break, continue statements
- v8::Extension* extension_;
- ScriptDataImpl* pre_parse_data_;
- FuncNameInferrer* fni_;
-
- Mode mode_;
- // If true, the next (and immediately following) function literal is
- // preceded by a parenthesis.
- // Heuristically that means that the function will be called immediately,
- // so never lazily compile it.
- bool parenthesized_function_;
+ ScriptData** cached_data_;
+ CachedDataMode cached_data_mode_;
- Zone* zone_;
CompilationInfo* info_;
- friend class BlockState;
- friend class FunctionState;
+
+ // Pending errors.
+ bool has_pending_error_;
+ Scanner::Location pending_error_location_;
+ const char* pending_error_message_;
+ MaybeHandle<String> pending_error_arg_;
+ const char* pending_error_char_arg_;
+ bool pending_error_is_reference_error_;
};
diff --git a/chromium/v8/src/platform-cygwin.cc b/chromium/v8/src/platform-cygwin.cc
index 0076d567f8b..91235cfb0ae 100644
--- a/chromium/v8/src/platform-cygwin.cc
+++ b/chromium/v8/src/platform-cygwin.cc
@@ -1,32 +1,9 @@
// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// Platform specific code for Cygwin goes here. For the POSIX comaptible parts
-// the implementation is in platform-posix.cc.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Platform-specific code for Cygwin goes here. For the POSIX-compatible
+// parts, the implementation is in platform-posix.cc.
#include <errno.h>
#include <pthread.h>
@@ -39,28 +16,25 @@
#undef MAP_TYPE
-#include "v8.h"
+#include "src/v8.h"
-#include "platform.h"
-#include "simulator.h"
-#include "v8threads.h"
-#include "vm-state-inl.h"
-#include "win32-headers.h"
+#include "src/base/win32-headers.h"
+#include "src/platform.h"
namespace v8 {
namespace internal {
-const char* OS::LocalTimezone(double time) {
+const char* OS::LocalTimezone(double time, TimezoneCache* cache) {
if (std::isnan(time)) return "";
- time_t tv = static_cast<time_t>(floor(time/msPerSecond));
+ time_t tv = static_cast<time_t>(std::floor(time/msPerSecond));
struct tm* t = localtime(&tv);
if (NULL == t) return "";
return tzname[0]; // The location of the timezone string on Cygwin.
}
-double OS::LocalTimeOffset() {
+double OS::LocalTimeOffset(TimezoneCache* cache) {
// On Cygwin, struct tm does not contain a tm_gmtoff field.
time_t utc = time(NULL);
ASSERT(utc != -1);
@@ -78,10 +52,7 @@ void* OS::Allocate(const size_t requested,
const size_t msize = RoundUp(requested, sysconf(_SC_PAGESIZE));
int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
void* mbase = mmap(NULL, msize, prot, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
- if (mbase == MAP_FAILED) {
- LOG(Isolate::Current(), StringEvent("OS::Allocate", "mmap failed"));
- return NULL;
- }
+ if (mbase == MAP_FAILED) return NULL;
*allocated = msize;
return mbase;
}
@@ -135,12 +106,13 @@ PosixMemoryMappedFile::~PosixMemoryMappedFile() {
}
-void OS::LogSharedLibraryAddresses(Isolate* isolate) {
+std::vector<OS::SharedLibraryAddress> OS::GetSharedLibraryAddresses() {
+ std::vector<SharedLibraryAddresses> result;
// This function assumes that the layout of the file is as follows:
// hex_start_addr-hex_end_addr rwxp <unused data> [binary_file_name]
// If we encounter an unexpected situation we abort scanning further entries.
FILE* fp = fopen("/proc/self/maps", "r");
- if (fp == NULL) return;
+ if (fp == NULL) return result;
// Allocate enough room to be able to store a full file name.
const int kLibNameLen = FILENAME_MAX + 1;
@@ -179,7 +151,7 @@ void OS::LogSharedLibraryAddresses(Isolate* isolate) {
snprintf(lib_name, kLibNameLen,
"%08" V8PRIxPTR "-%08" V8PRIxPTR, start, end);
}
- LOG(isolate, SharedLibraryEvent(lib_name, start, end));
+ result.push_back(SharedLibraryAddress(lib_name, start, end));
} else {
// Entry not describing executable data. Skip to end of line to set up
// reading the next entry.
@@ -191,6 +163,7 @@ void OS::LogSharedLibraryAddresses(Isolate* isolate) {
}
free(lib_name);
fclose(fp);
+ return result;
}
diff --git a/chromium/v8/src/platform-freebsd.cc b/chromium/v8/src/platform-freebsd.cc
index 75d88ec5d33..a1a07396fb4 100644
--- a/chromium/v8/src/platform-freebsd.cc
+++ b/chromium/v8/src/platform-freebsd.cc
@@ -1,32 +1,9 @@
// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// Platform specific code for FreeBSD goes here. For the POSIX comaptible parts
-// the implementation is in platform-posix.cc.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Platform-specific code for FreeBSD goes here. For the POSIX-compatible
+// parts, the implementation is in platform-posix.cc.
#include <pthread.h>
#include <semaphore.h>
@@ -50,27 +27,25 @@
#undef MAP_TYPE
-#include "v8.h"
-#include "v8threads.h"
+#include "src/v8.h"
-#include "platform.h"
-#include "vm-state-inl.h"
+#include "src/platform.h"
namespace v8 {
namespace internal {
-const char* OS::LocalTimezone(double time) {
+const char* OS::LocalTimezone(double time, TimezoneCache* cache) {
if (std::isnan(time)) return "";
- time_t tv = static_cast<time_t>(floor(time/msPerSecond));
+ time_t tv = static_cast<time_t>(std::floor(time/msPerSecond));
struct tm* t = localtime(&tv);
if (NULL == t) return "";
return t->tm_zone;
}
-double OS::LocalTimeOffset() {
+double OS::LocalTimeOffset(TimezoneCache* cache) {
time_t tv = time(NULL);
struct tm* t = localtime(&tv);
// tm_gmtoff includes any daylight savings offset, so subtract it.
@@ -86,10 +61,7 @@ void* OS::Allocate(const size_t requested,
int prot = PROT_READ | PROT_WRITE | (executable ? PROT_EXEC : 0);
void* mbase = mmap(NULL, msize, prot, MAP_PRIVATE | MAP_ANON, -1, 0);
- if (mbase == MAP_FAILED) {
- LOG(Isolate::Current(), StringEvent("OS::Allocate", "mmap failed"));
- return NULL;
- }
+ if (mbase == MAP_FAILED) return NULL;
*allocated = msize;
return mbase;
}
@@ -148,10 +120,11 @@ static unsigned StringToLong(char* buffer) {
}
-void OS::LogSharedLibraryAddresses(Isolate* isolate) {
+std::vector<OS::SharedLibraryAddress> OS::GetSharedLibraryAddresses() {
+ std::vector<SharedLibraryAddress> result;
static const int MAP_LENGTH = 1024;
int fd = open("/proc/self/maps", O_RDONLY);
- if (fd < 0) return;
+ if (fd < 0) return result;
while (true) {
char addr_buffer[11];
addr_buffer[0] = '0';
@@ -182,9 +155,10 @@ void OS::LogSharedLibraryAddresses(Isolate* isolate) {
// There may be no filename in this line. Skip to next.
if (start_of_path == NULL) continue;
buffer[bytes_read] = 0;
- LOG(isolate, SharedLibraryEvent(start_of_path, start, end));
+ result.push_back(SharedLibraryAddress(start_of_path, start, end));
}
close(fd);
+ return result;
}
diff --git a/chromium/v8/src/platform-linux.cc b/chromium/v8/src/platform-linux.cc
index eb2d10b3f9d..3cbf4dafd23 100644
--- a/chromium/v8/src/platform-linux.cc
+++ b/chromium/v8/src/platform-linux.cc
@@ -1,32 +1,9 @@
// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// Platform specific code for Linux goes here. For the POSIX comaptible parts
-// the implementation is in platform-posix.cc.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Platform-specific code for Linux goes here. For the POSIX-compatible
+// parts, the implementation is in platform-posix.cc.
#include <pthread.h>
#include <semaphore.h>
@@ -53,17 +30,20 @@
// GLibc on ARM defines mcontext_t has a typedef for 'struct sigcontext'.
// Old versions of the C library <signal.h> didn't define the type.
#if defined(__ANDROID__) && !defined(__BIONIC_HAVE_UCONTEXT_T) && \
- defined(__arm__) && !defined(__BIONIC_HAVE_STRUCT_SIGCONTEXT)
+ (defined(__arm__) || defined(__aarch64__)) && \
+ !defined(__BIONIC_HAVE_STRUCT_SIGCONTEXT)
#include <asm/sigcontext.h>
#endif
+#if defined(LEAK_SANITIZER)
+#include <sanitizer/lsan_interface.h>
+#endif
+
#undef MAP_TYPE
-#include "v8.h"
+#include "src/v8.h"
-#include "platform.h"
-#include "v8threads.h"
-#include "vm-state-inl.h"
+#include "src/platform.h"
namespace v8 {
@@ -113,16 +93,16 @@ bool OS::ArmUsingHardFloat() {
#endif // def __arm__
-const char* OS::LocalTimezone(double time) {
+const char* OS::LocalTimezone(double time, TimezoneCache* cache) {
if (std::isnan(time)) return "";
- time_t tv = static_cast<time_t>(floor(time/msPerSecond));
+ time_t tv = static_cast<time_t>(std::floor(time/msPerSecond));
struct tm* t = localtime(&tv);
if (NULL == t) return "";
return t->tm_zone;
}
-double OS::LocalTimeOffset() {
+double OS::LocalTimeOffset(TimezoneCache* cache) {
time_t tv = time(NULL);
struct tm* t = localtime(&tv);
// tm_gmtoff includes any daylight savings offset, so subtract it.
@@ -138,11 +118,7 @@ void* OS::Allocate(const size_t requested,
int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
void* addr = OS::GetRandomMmapAddr();
void* mbase = mmap(addr, msize, prot, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
- if (mbase == MAP_FAILED) {
- LOG(i::Isolate::Current(),
- StringEvent("OS::Allocate", "mmap failed"));
- return NULL;
- }
+ if (mbase == MAP_FAILED) return NULL;
*allocated = msize;
return mbase;
}
@@ -206,12 +182,13 @@ PosixMemoryMappedFile::~PosixMemoryMappedFile() {
}
-void OS::LogSharedLibraryAddresses(Isolate* isolate) {
+std::vector<OS::SharedLibraryAddress> OS::GetSharedLibraryAddresses() {
+ std::vector<SharedLibraryAddress> result;
// This function assumes that the layout of the file is as follows:
// hex_start_addr-hex_end_addr rwxp <unused data> [binary_file_name]
// If we encounter an unexpected situation we abort scanning further entries.
FILE* fp = fopen("/proc/self/maps", "r");
- if (fp == NULL) return;
+ if (fp == NULL) return result;
// Allocate enough room to be able to store a full file name.
const int kLibNameLen = FILENAME_MAX + 1;
@@ -251,7 +228,7 @@ void OS::LogSharedLibraryAddresses(Isolate* isolate) {
snprintf(lib_name, kLibNameLen,
"%08" V8PRIxPTR "-%08" V8PRIxPTR, start, end);
}
- LOG(isolate, SharedLibraryEvent(lib_name, start, end));
+ result.push_back(SharedLibraryAddress(lib_name, start, end));
} else {
// Entry not describing executable data. Skip to end of line to set up
// reading the next entry.
@@ -263,6 +240,7 @@ void OS::LogSharedLibraryAddresses(Isolate* isolate) {
}
free(lib_name);
fclose(fp);
+ return result;
}
@@ -348,6 +326,9 @@ VirtualMemory::VirtualMemory(size_t size, size_t alignment)
address_ = static_cast<void*>(aligned_base);
size_ = aligned_size;
+#if defined(LEAK_SANITIZER)
+ __lsan_register_root_region(address_, size_);
+#endif
}
@@ -397,6 +378,9 @@ void* VirtualMemory::ReserveRegion(size_t size) {
if (result == MAP_FAILED) return NULL;
+#if defined(LEAK_SANITIZER)
+ __lsan_register_root_region(result, size);
+#endif
return result;
}
@@ -433,6 +417,9 @@ bool VirtualMemory::UncommitRegion(void* base, size_t size) {
bool VirtualMemory::ReleaseRegion(void* base, size_t size) {
+#if defined(LEAK_SANITIZER)
+ __lsan_unregister_root_region(base, size);
+#endif
return munmap(base, size) == 0;
}
diff --git a/chromium/v8/src/platform-macos.cc b/chromium/v8/src/platform-macos.cc
index 5ffc3fc54c4..4301875b9b6 100644
--- a/chromium/v8/src/platform-macos.cc
+++ b/chromium/v8/src/platform-macos.cc
@@ -1,32 +1,9 @@
// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// Platform specific code for MacOS goes here. For the POSIX comaptible parts
-// the implementation is in platform-posix.cc.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Platform-specific code for MacOS goes here. For the POSIX-compatible
+// parts, the implementation is in platform-posix.cc.
#include <dlfcn.h>
#include <unistd.h>
@@ -56,11 +33,9 @@
#undef MAP_TYPE
-#include "v8.h"
+#include "src/v8.h"
-#include "platform.h"
-#include "simulator.h"
-#include "vm-state-inl.h"
+#include "src/platform.h"
namespace v8 {
@@ -86,10 +61,7 @@ void* OS::Allocate(const size_t requested,
MAP_PRIVATE | MAP_ANON,
kMmapFd,
kMmapFdOffset);
- if (mbase == MAP_FAILED) {
- LOG(Isolate::Current(), StringEvent("OS::Allocate", "mmap failed"));
- return NULL;
- }
+ if (mbase == MAP_FAILED) return NULL;
*allocated = msize;
return mbase;
}
@@ -153,7 +125,8 @@ PosixMemoryMappedFile::~PosixMemoryMappedFile() {
}
-void OS::LogSharedLibraryAddresses(Isolate* isolate) {
+std::vector<OS::SharedLibraryAddress> OS::GetSharedLibraryAddresses() {
+ std::vector<SharedLibraryAddress> result;
unsigned int images_count = _dyld_image_count();
for (unsigned int i = 0; i < images_count; ++i) {
const mach_header* header = _dyld_get_image_header(i);
@@ -172,9 +145,10 @@ void OS::LogSharedLibraryAddresses(Isolate* isolate) {
if (code_ptr == NULL) continue;
const uintptr_t slide = _dyld_get_image_vmaddr_slide(i);
const uintptr_t start = reinterpret_cast<uintptr_t>(code_ptr) + slide;
- LOG(isolate,
- SharedLibraryEvent(_dyld_get_image_name(i), start, start + size));
+ result.push_back(
+ SharedLibraryAddress(_dyld_get_image_name(i), start, start + size));
}
+ return result;
}
@@ -182,16 +156,16 @@ void OS::SignalCodeMovingGC() {
}
-const char* OS::LocalTimezone(double time) {
+const char* OS::LocalTimezone(double time, TimezoneCache* cache) {
if (std::isnan(time)) return "";
- time_t tv = static_cast<time_t>(floor(time/msPerSecond));
+ time_t tv = static_cast<time_t>(std::floor(time/msPerSecond));
struct tm* t = localtime(&tv);
if (NULL == t) return "";
return t->tm_zone;
}
-double OS::LocalTimeOffset() {
+double OS::LocalTimeOffset(TimezoneCache* cache) {
time_t tv = time(NULL);
struct tm* t = localtime(&tv);
// tm_gmtoff includes any daylight savings offset, so subtract it.
diff --git a/chromium/v8/src/platform-openbsd.cc b/chromium/v8/src/platform-openbsd.cc
index 710c3904afb..1f8e239cd53 100644
--- a/chromium/v8/src/platform-openbsd.cc
+++ b/chromium/v8/src/platform-openbsd.cc
@@ -1,32 +1,9 @@
// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// Platform specific code for OpenBSD and NetBSD goes here. For the POSIX
-// comaptible parts the implementation is in platform-posix.cc.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Platform-specific code for OpenBSD and NetBSD goes here. For the
+// POSIX-compatible parts, the implementation is in platform-posix.cc.
#include <pthread.h>
#include <semaphore.h>
@@ -48,27 +25,25 @@
#undef MAP_TYPE
-#include "v8.h"
+#include "src/v8.h"
-#include "platform.h"
-#include "v8threads.h"
-#include "vm-state-inl.h"
+#include "src/platform.h"
namespace v8 {
namespace internal {
-const char* OS::LocalTimezone(double time) {
+const char* OS::LocalTimezone(double time, TimezoneCache* cache) {
if (std::isnan(time)) return "";
- time_t tv = static_cast<time_t>(floor(time/msPerSecond));
+ time_t tv = static_cast<time_t>(std::floor(time/msPerSecond));
struct tm* t = localtime(&tv);
if (NULL == t) return "";
return t->tm_zone;
}
-double OS::LocalTimeOffset() {
+double OS::LocalTimeOffset(TimezoneCache* cache) {
time_t tv = time(NULL);
struct tm* t = localtime(&tv);
// tm_gmtoff includes any daylight savings offset, so subtract it.
@@ -84,11 +59,7 @@ void* OS::Allocate(const size_t requested,
int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
void* addr = OS::GetRandomMmapAddr();
void* mbase = mmap(addr, msize, prot, MAP_PRIVATE | MAP_ANON, -1, 0);
- if (mbase == MAP_FAILED) {
- LOG(i::Isolate::Current(),
- StringEvent("OS::Allocate", "mmap failed"));
- return NULL;
- }
+ if (mbase == MAP_FAILED) return NULL;
*allocated = msize;
return mbase;
}
@@ -142,12 +113,13 @@ PosixMemoryMappedFile::~PosixMemoryMappedFile() {
}
-void OS::LogSharedLibraryAddresses(Isolate* isolate) {
+std::vector<OS::SharedLibraryAddress> OS::GetSharedLibraryAddresses() {
+ std::vector<SharedLibraryAddress> result;
// This function assumes that the layout of the file is as follows:
// hex_start_addr-hex_end_addr rwxp <unused data> [binary_file_name]
// If we encounter an unexpected situation we abort scanning further entries.
FILE* fp = fopen("/proc/self/maps", "r");
- if (fp == NULL) return;
+ if (fp == NULL) return result;
// Allocate enough room to be able to store a full file name.
const int kLibNameLen = FILENAME_MAX + 1;
@@ -186,7 +158,7 @@ void OS::LogSharedLibraryAddresses(Isolate* isolate) {
snprintf(lib_name, kLibNameLen,
"%08" V8PRIxPTR "-%08" V8PRIxPTR, start, end);
}
- LOG(isolate, SharedLibraryEvent(lib_name, start, end));
+ result.push_back(SharedLibraryAddress(lib_name, start, end));
} else {
// Entry not describing executable data. Skip to end of line to set up
// reading the next entry.
@@ -198,6 +170,7 @@ void OS::LogSharedLibraryAddresses(Isolate* isolate) {
}
free(lib_name);
fclose(fp);
+ return result;
}
diff --git a/chromium/v8/src/platform-posix.cc b/chromium/v8/src/platform-posix.cc
index 879dcc81484..c963fb1797a 100644
--- a/chromium/v8/src/platform-posix.cc
+++ b/chromium/v8/src/platform-posix.cc
@@ -1,33 +1,10 @@
// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// Platform specific code for POSIX goes here. This is not a platform on its
-// own but contains the parts which are the same across POSIX platforms Linux,
-// Mac OS, FreeBSD and OpenBSD.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Platform-specific code for POSIX goes here. This is not a platform on its
+// own, but contains the parts which are the same across the POSIX platforms
+// Linux, MacOS, FreeBSD, OpenBSD, NetBSD and QNX.
#include <dlfcn.h>
#include <pthread.h>
@@ -40,7 +17,6 @@
#include <time.h>
#include <sys/mman.h>
-#include <sys/socket.h>
#include <sys/resource.h>
#include <sys/time.h>
#include <sys/types.h>
@@ -64,11 +40,14 @@
#include <android/log.h>
#endif
-#include "v8.h"
+#include "src/v8.h"
+
+#include "src/isolate-inl.h"
+#include "src/platform.h"
-#include "codegen.h"
-#include "isolate-inl.h"
-#include "platform.h"
+#ifdef V8_FAST_TLS_SUPPORTED
+#include "src/base/atomicops.h"
+#endif
namespace v8 {
namespace internal {
@@ -77,15 +56,8 @@ namespace internal {
static const pthread_t kNoThread = (pthread_t) 0;
-uint64_t OS::CpuFeaturesImpliedByPlatform() {
-#if V8_OS_MACOSX
- // Mac OS X requires all these to install so we can assume they are present.
- // These constants are defined by the CPUid instructions.
- const uint64_t one = 1;
- return (one << SSE2) | (one << CMOV);
-#else
- return 0; // Nothing special about the other systems.
-#endif
+int OS::NumberOfProcessorsOnline() {
+ return static_cast<int>(sysconf(_SC_NPROCESSORS_ONLN));
}
@@ -96,6 +68,12 @@ intptr_t OS::MaxVirtualMemory() {
struct rlimit limit;
int result = getrlimit(RLIMIT_DATA, &limit);
if (result != 0) return 0;
+#if V8_OS_NACL
+ // The NaCl compiler doesn't like resource.h constants.
+ if (static_cast<int>(limit.rlim_cur) == -1) return 0;
+#else
+ if (limit.rlim_cur == RLIM_INFINITY) return 0;
+#endif
return limit.rlim_cur;
}
@@ -130,6 +108,13 @@ uint64_t OS::TotalPhysicalMemory() {
return 0;
}
return static_cast<uint64_t>(memory_info.dwTotalPhys);
+#elif V8_OS_QNX
+ struct stat stat_buf;
+ if (stat("/proc", &stat_buf) != 0) {
+ UNREACHABLE();
+ return 0;
+ }
+ return static_cast<uint64_t>(stat_buf.st_size);
#else
intptr_t pages = sysconf(_SC_PHYS_PAGES);
intptr_t page_size = sysconf(_SC_PAGESIZE);
@@ -176,10 +161,10 @@ void OS::Free(void* address, const size_t size) {
// Get rid of writable permission on code allocations.
void OS::ProtectCode(void* address, const size_t size) {
-#if defined(__CYGWIN__)
+#if V8_OS_CYGWIN
DWORD old_protect;
VirtualProtect(address, size, PAGE_EXECUTE_READ, &old_protect);
-#elif defined(__native_client__)
+#elif V8_OS_NACL
// The Native Client port of V8 uses an interpreter, so
// code pages don't need PROT_EXEC.
mprotect(address, size, PROT_READ);
@@ -191,7 +176,7 @@ void OS::ProtectCode(void* address, const size_t size) {
// Create guard pages.
void OS::Guard(void* address, const size_t size) {
-#if defined(__CYGWIN__)
+#if V8_OS_CYGWIN
DWORD oldprotect;
VirtualProtect(address, size, PAGE_NOACCESS, &oldprotect);
#else
@@ -201,12 +186,17 @@ void OS::Guard(void* address, const size_t size) {
void* OS::GetRandomMmapAddr() {
-#if defined(__native_client__)
+#if V8_OS_NACL
// TODO(bradchen): restore randomization once Native Client gets
// smarter about using mmap address hints.
// See http://code.google.com/p/nativeclient/issues/3341
return NULL;
#endif
+#if defined(ADDRESS_SANITIZER) || defined(MEMORY_SANITIZER) || \
+ defined(THREAD_SANITIZER)
+ // Dynamic tools do not support custom mmap addresses.
+ return NULL;
+#endif
Isolate* isolate = Isolate::UncheckedCurrent();
// Note that the current isolate isn't set up in a call path via
// CpuFeatures::Probe. We don't care about randomization in this case because
@@ -247,7 +237,7 @@ void* OS::GetRandomMmapAddr() {
size_t OS::AllocateAlignment() {
- return getpagesize();
+ return static_cast<size_t>(sysconf(_SC_PAGESIZE));
}
@@ -258,10 +248,10 @@ void OS::Sleep(int milliseconds) {
void OS::Abort() {
- // Redirect to std abort to signal abnormal program termination.
- if (FLAG_break_on_abort) {
- DebugBreak();
+ if (FLAG_hard_abort) {
+ V8_IMMEDIATE_CRASH();
}
+ // Redirect to std abort to signal abnormal program termination.
abort();
}
@@ -269,6 +259,8 @@ void OS::Abort() {
void OS::DebugBreak() {
#if V8_HOST_ARCH_ARM
asm("bkpt 0");
+#elif V8_HOST_ARCH_ARM64
+ asm("brk 0");
#elif V8_HOST_ARCH_MIPS
asm("break");
#elif V8_HOST_ARCH_IA32
@@ -288,37 +280,6 @@ void OS::DebugBreak() {
// ----------------------------------------------------------------------------
// Math functions
-double modulo(double x, double y) {
- return fmod(x, y);
-}
-
-
-#define UNARY_MATH_FUNCTION(name, generator) \
-static UnaryMathFunction fast_##name##_function = NULL; \
-void init_fast_##name##_function() { \
- fast_##name##_function = generator; \
-} \
-double fast_##name(double x) { \
- return (*fast_##name##_function)(x); \
-}
-
-UNARY_MATH_FUNCTION(sin, CreateTranscendentalFunction(TranscendentalCache::SIN))
-UNARY_MATH_FUNCTION(cos, CreateTranscendentalFunction(TranscendentalCache::COS))
-UNARY_MATH_FUNCTION(tan, CreateTranscendentalFunction(TranscendentalCache::TAN))
-UNARY_MATH_FUNCTION(log, CreateTranscendentalFunction(TranscendentalCache::LOG))
-UNARY_MATH_FUNCTION(exp, CreateExpFunction())
-UNARY_MATH_FUNCTION(sqrt, CreateSqrtFunction())
-
-#undef UNARY_MATH_FUNCTION
-
-
-void lazily_initialize_fast_exp() {
- if (fast_exp_function == NULL) {
- init_fast_exp_function();
- }
-}
-
-
double OS::nan_value() {
// NAN from math.h is defined in C99 and not in POSIX.
return NAN;
@@ -349,9 +310,27 @@ double OS::TimeCurrentMillis() {
}
-double OS::DaylightSavingsOffset(double time) {
+class TimezoneCache {};
+
+
+TimezoneCache* OS::CreateTimezoneCache() {
+ return NULL;
+}
+
+
+void OS::DisposeTimezoneCache(TimezoneCache* cache) {
+ ASSERT(cache == NULL);
+}
+
+
+void OS::ClearTimezoneCache(TimezoneCache* cache) {
+ ASSERT(cache == NULL);
+}
+
+
+double OS::DaylightSavingsOffset(double time, TimezoneCache*) {
if (std::isnan(time)) return nan_value();
- time_t tv = static_cast<time_t>(floor(time/msPerSecond));
+ time_t tv = static_cast<time_t>(std::floor(time/msPerSecond));
struct tm* t = localtime(&tv);
if (NULL == t) return nan_value();
return t->tm_isdst > 0 ? 3600 * msPerSecond : 0;
@@ -443,23 +422,24 @@ void OS::VPrintError(const char* format, va_list args) {
}
-int OS::SNPrintF(Vector<char> str, const char* format, ...) {
+int OS::SNPrintF(char* str, int length, const char* format, ...) {
va_list args;
va_start(args, format);
- int result = VSNPrintF(str, format, args);
+ int result = VSNPrintF(str, length, format, args);
va_end(args);
return result;
}
-int OS::VSNPrintF(Vector<char> str,
+int OS::VSNPrintF(char* str,
+ int length,
const char* format,
va_list args) {
- int n = vsnprintf(str.start(), str.length(), format, args);
- if (n < 0 || n >= str.length()) {
+ int n = vsnprintf(str, length, format, args);
+ if (n < 0 || n >= length) {
// If the length is zero, the assignment fails.
- if (str.length() > 0)
- str[str.length() - 1] = '\0';
+ if (length > 0)
+ str[length - 1] = '\0';
return -1;
} else {
return n;
@@ -467,69 +447,6 @@ int OS::VSNPrintF(Vector<char> str,
}
-#if V8_TARGET_ARCH_IA32
-static void MemMoveWrapper(void* dest, const void* src, size_t size) {
- memmove(dest, src, size);
-}
-
-
-// Initialize to library version so we can call this at any time during startup.
-static OS::MemMoveFunction memmove_function = &MemMoveWrapper;
-
-// Defined in codegen-ia32.cc.
-OS::MemMoveFunction CreateMemMoveFunction();
-
-// Copy memory area. No restrictions.
-void OS::MemMove(void* dest, const void* src, size_t size) {
- if (size == 0) return;
- // Note: here we rely on dependent reads being ordered. This is true
- // on all architectures we currently support.
- (*memmove_function)(dest, src, size);
-}
-
-#elif defined(V8_HOST_ARCH_ARM)
-void OS::MemCopyUint16Uint8Wrapper(uint16_t* dest,
- const uint8_t* src,
- size_t chars) {
- uint16_t *limit = dest + chars;
- while (dest < limit) {
- *dest++ = static_cast<uint16_t>(*src++);
- }
-}
-
-
-OS::MemCopyUint8Function OS::memcopy_uint8_function = &OS::MemCopyUint8Wrapper;
-OS::MemCopyUint16Uint8Function OS::memcopy_uint16_uint8_function =
- &OS::MemCopyUint16Uint8Wrapper;
-// Defined in codegen-arm.cc.
-OS::MemCopyUint8Function CreateMemCopyUint8Function(
- OS::MemCopyUint8Function stub);
-OS::MemCopyUint16Uint8Function CreateMemCopyUint16Uint8Function(
- OS::MemCopyUint16Uint8Function stub);
-#endif
-
-
-void OS::PostSetUp() {
-#if V8_TARGET_ARCH_IA32
- OS::MemMoveFunction generated_memmove = CreateMemMoveFunction();
- if (generated_memmove != NULL) {
- memmove_function = generated_memmove;
- }
-#elif defined(V8_HOST_ARCH_ARM)
- OS::memcopy_uint8_function =
- CreateMemCopyUint8Function(&OS::MemCopyUint8Wrapper);
- OS::memcopy_uint16_uint8_function =
- CreateMemCopyUint16Uint8Function(&OS::MemCopyUint16Uint8Wrapper);
-#endif
- init_fast_sin_function();
- init_fast_cos_function();
- init_fast_tan_function();
- init_fast_log_function();
- // fast_exp is initialized lazily.
- init_fast_sqrt_function();
-}
-
-
// ----------------------------------------------------------------------------
// POSIX string support.
//
@@ -539,8 +456,8 @@ char* OS::StrChr(char* str, int c) {
}
-void OS::StrNCpy(Vector<char> dest, const char* src, size_t n) {
- strncpy(dest.start(), src, n);
+void OS::StrNCpy(char* dest, int length, const char* src, size_t n) {
+ strncpy(dest, src, n);
}
@@ -552,6 +469,8 @@ class Thread::PlatformData : public Malloced {
public:
PlatformData() : thread_(kNoThread) {}
pthread_t thread_; // Thread handle for pthread.
+ // Synchronizes thread creation
+ Mutex thread_creation_mutex_;
};
Thread::Thread(const Options& options)
@@ -571,12 +490,12 @@ Thread::~Thread() {
static void SetThreadName(const char* name) {
-#if defined(__DragonFly__) || defined(__FreeBSD__) || defined(__OpenBSD__)
+#if V8_OS_DRAGONFLYBSD || V8_OS_FREEBSD || V8_OS_OPENBSD
pthread_set_name_np(pthread_self(), name);
-#elif defined(__NetBSD__)
+#elif V8_OS_NETBSD
STATIC_ASSERT(Thread::kMaxThreadNameLength <= PTHREAD_MAX_NAMELEN_NP);
pthread_setname_np(pthread_self(), "%s", name);
-#elif defined(__APPLE__)
+#elif V8_OS_MACOSX
// pthread_setname_np is only available in 10.6 or later, so test
// for it at runtime.
int (*dynamic_pthread_setname_np)(const char*);
@@ -599,10 +518,10 @@ static void SetThreadName(const char* name) {
static void* ThreadEntry(void* arg) {
Thread* thread = reinterpret_cast<Thread*>(arg);
- // This is also initialized by the first argument to pthread_create() but we
- // don't know which thread will run first (the original thread or the new
- // one) so we initialize it here too.
- thread->data()->thread_ = pthread_self();
+ // We take the lock here to make sure that pthread_create finished first since
+ // we don't know which thread will run first (the original thread or the new
+ // one).
+ { LockGuard<Mutex> lock_guard(&thread->data()->thread_creation_mutex_); }
SetThreadName(thread->name());
ASSERT(thread->data()->thread_ != kNoThread);
thread->NotifyStartedAndRun();
@@ -623,13 +542,16 @@ void Thread::Start() {
result = pthread_attr_init(&attr);
ASSERT_EQ(0, result);
// Native client uses default stack size.
-#if !defined(__native_client__)
+#if !V8_OS_NACL
if (stack_size_ > 0) {
result = pthread_attr_setstacksize(&attr, static_cast<size_t>(stack_size_));
ASSERT_EQ(0, result);
}
#endif
- result = pthread_create(&data_->thread_, &attr, ThreadEntry, this);
+ {
+ LockGuard<Mutex> lock_guard(&data_->thread_creation_mutex_);
+ result = pthread_create(&data_->thread_, &attr, ThreadEntry, this);
+ }
ASSERT_EQ(0, result);
result = pthread_attr_destroy(&attr);
ASSERT_EQ(0, result);
@@ -651,7 +573,7 @@ void Thread::YieldCPU() {
static Thread::LocalStorageKey PthreadKeyToLocalKey(pthread_key_t pthread_key) {
-#if defined(__CYGWIN__)
+#if V8_OS_CYGWIN
// We need to cast pthread_key_t to Thread::LocalStorageKey in two steps
// because pthread_key_t is a pointer type on Cygwin. This will probably not
// work on 64-bit platforms, but Cygwin doesn't support 64-bit anyway.
@@ -665,7 +587,7 @@ static Thread::LocalStorageKey PthreadKeyToLocalKey(pthread_key_t pthread_key) {
static pthread_key_t LocalKeyToPthreadKey(Thread::LocalStorageKey local_key) {
-#if defined(__CYGWIN__)
+#if V8_OS_CYGWIN
STATIC_ASSERT(sizeof(Thread::LocalStorageKey) == sizeof(pthread_key_t));
intptr_t ptr_key = static_cast<intptr_t>(local_key);
return reinterpret_cast<pthread_key_t>(ptr_key);
@@ -677,7 +599,7 @@ static pthread_key_t LocalKeyToPthreadKey(Thread::LocalStorageKey local_key) {
#ifdef V8_FAST_TLS_SUPPORTED
-static Atomic32 tls_base_offset_initialized = 0;
+static base::Atomic32 tls_base_offset_initialized = 0;
intptr_t kMacTlsBaseOffset = 0;
// It's safe to do the initialization more that once, but it has to be
@@ -713,7 +635,7 @@ static void InitializeTlsBaseOffset() {
kMacTlsBaseOffset = 0;
}
- Release_Store(&tls_base_offset_initialized, 1);
+ base::Release_Store(&tls_base_offset_initialized, 1);
}
diff --git a/chromium/v8/src/platform-qnx.cc b/chromium/v8/src/platform-qnx.cc
new file mode 100644
index 00000000000..3c95650f57a
--- /dev/null
+++ b/chromium/v8/src/platform-qnx.cc
@@ -0,0 +1,373 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Platform-specific code for QNX goes here. For the POSIX-compatible
+// parts the implementation is in platform-posix.cc.
+
+#include <pthread.h>
+#include <semaphore.h>
+#include <signal.h>
+#include <sys/time.h>
+#include <sys/resource.h>
+#include <sys/types.h>
+#include <stdlib.h>
+#include <ucontext.h>
+#include <backtrace.h>
+
+// QNX requires memory pages to be marked as executable.
+// Otherwise, the OS raises an exception when executing code in that page.
+#include <sys/types.h> // mmap & munmap
+#include <sys/mman.h> // mmap & munmap
+#include <sys/stat.h> // open
+#include <fcntl.h> // open
+#include <unistd.h> // sysconf
+#include <strings.h> // index
+#include <errno.h>
+#include <stdarg.h>
+#include <sys/procfs.h>
+
+#undef MAP_TYPE
+
+#include "src/v8.h"
+
+#include "src/platform.h"
+
+
+namespace v8 {
+namespace internal {
+
+// 0 is never a valid thread id on Qnx since tids and pids share a
+// name space and pid 0 is reserved (see man 2 kill).
+static const pthread_t kNoThread = (pthread_t) 0;
+
+
+#ifdef __arm__
+
+bool OS::ArmUsingHardFloat() {
+ // GCC versions 4.6 and above define __ARM_PCS or __ARM_PCS_VFP to specify
+ // the Floating Point ABI used (PCS stands for Procedure Call Standard).
+ // We use these as well as a couple of other defines to statically determine
+ // what FP ABI used.
+ // GCC versions 4.4 and below don't support hard-fp.
+ // GCC versions 4.5 may support hard-fp without defining __ARM_PCS or
+ // __ARM_PCS_VFP.
+
+#define GCC_VERSION (__GNUC__ * 10000 \
+ + __GNUC_MINOR__ * 100 \
+ + __GNUC_PATCHLEVEL__)
+#if GCC_VERSION >= 40600
+#if defined(__ARM_PCS_VFP)
+ return true;
+#else
+ return false;
+#endif
+
+#elif GCC_VERSION < 40500
+ return false;
+
+#else
+#if defined(__ARM_PCS_VFP)
+ return true;
+#elif defined(__ARM_PCS) || defined(__SOFTFP__) || defined(__SOFTFP) || \
+ !defined(__VFP_FP__)
+ return false;
+#else
+#error "Your version of GCC does not report the FP ABI compiled for." \
+ "Please report it on this issue" \
+ "http://code.google.com/p/v8/issues/detail?id=2140"
+
+#endif
+#endif
+#undef GCC_VERSION
+}
+
+#endif // __arm__
+
+
+const char* OS::LocalTimezone(double time, TimezoneCache* cache) {
+ if (std::isnan(time)) return "";
+ time_t tv = static_cast<time_t>(std::floor(time/msPerSecond));
+ struct tm* t = localtime(&tv);
+ if (NULL == t) return "";
+ return t->tm_zone;
+}
+
+
+double OS::LocalTimeOffset(TimezoneCache* cache) {
+ time_t tv = time(NULL);
+ struct tm* t = localtime(&tv);
+ // tm_gmtoff includes any daylight savings offset, so subtract it.
+ return static_cast<double>(t->tm_gmtoff * msPerSecond -
+ (t->tm_isdst > 0 ? 3600 * msPerSecond : 0));
+}
+
+
+void* OS::Allocate(const size_t requested,
+ size_t* allocated,
+ bool is_executable) {
+ const size_t msize = RoundUp(requested, AllocateAlignment());
+ int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
+ void* addr = OS::GetRandomMmapAddr();
+ void* mbase = mmap(addr, msize, prot, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
+ if (mbase == MAP_FAILED) return NULL;
+ *allocated = msize;
+ return mbase;
+}
+
+
+class PosixMemoryMappedFile : public OS::MemoryMappedFile {
+ public:
+ PosixMemoryMappedFile(FILE* file, void* memory, int size)
+ : file_(file), memory_(memory), size_(size) { }
+ virtual ~PosixMemoryMappedFile();
+ virtual void* memory() { return memory_; }
+ virtual int size() { return size_; }
+ private:
+ FILE* file_;
+ void* memory_;
+ int size_;
+};
+
+
+OS::MemoryMappedFile* OS::MemoryMappedFile::open(const char* name) {
+ FILE* file = fopen(name, "r+");
+ if (file == NULL) return NULL;
+
+ fseek(file, 0, SEEK_END);
+ int size = ftell(file);
+
+ void* memory =
+ mmap(OS::GetRandomMmapAddr(),
+ size,
+ PROT_READ | PROT_WRITE,
+ MAP_SHARED,
+ fileno(file),
+ 0);
+ return new PosixMemoryMappedFile(file, memory, size);
+}
+
+
+OS::MemoryMappedFile* OS::MemoryMappedFile::create(const char* name, int size,
+ void* initial) {
+ FILE* file = fopen(name, "w+");
+ if (file == NULL) return NULL;
+ int result = fwrite(initial, size, 1, file);
+ if (result < 1) {
+ fclose(file);
+ return NULL;
+ }
+ void* memory =
+ mmap(OS::GetRandomMmapAddr(),
+ size,
+ PROT_READ | PROT_WRITE,
+ MAP_SHARED,
+ fileno(file),
+ 0);
+ return new PosixMemoryMappedFile(file, memory, size);
+}
+
+
+PosixMemoryMappedFile::~PosixMemoryMappedFile() {
+ if (memory_) OS::Free(memory_, size_);
+ fclose(file_);
+}
+
+
+std::vector<OS::SharedLibraryAddress> OS::GetSharedLibraryAddresses() {
+ std::vector<SharedLibraryAddress> result;
+ procfs_mapinfo *mapinfos = NULL, *mapinfo;
+ int proc_fd, num, i;
+
+ struct {
+ procfs_debuginfo info;
+ char buff[PATH_MAX];
+ } map;
+
+ char buf[PATH_MAX + 1];
+ snprintf(buf, PATH_MAX + 1, "/proc/%d/as", getpid());
+
+ if ((proc_fd = open(buf, O_RDONLY)) == -1) {
+ close(proc_fd);
+ return result;
+ }
+
+ /* Get the number of map entries. */
+ if (devctl(proc_fd, DCMD_PROC_MAPINFO, NULL, 0, &num) != EOK) {
+ close(proc_fd);
+ return result;
+ }
+
+ mapinfos = reinterpret_cast<procfs_mapinfo *>(
+ malloc(num * sizeof(procfs_mapinfo)));
+ if (mapinfos == NULL) {
+ close(proc_fd);
+ return result;
+ }
+
+ /* Fill the map entries. */
+ if (devctl(proc_fd, DCMD_PROC_PAGEDATA,
+ mapinfos, num * sizeof(procfs_mapinfo), &num) != EOK) {
+ free(mapinfos);
+ close(proc_fd);
+ return result;
+ }
+
+ for (i = 0; i < num; i++) {
+ mapinfo = mapinfos + i;
+ if (mapinfo->flags & MAP_ELF) {
+ map.info.vaddr = mapinfo->vaddr;
+ if (devctl(proc_fd, DCMD_PROC_MAPDEBUG, &map, sizeof(map), 0) != EOK) {
+ continue;
+ }
+ result.push_back(SharedLibraryAddress(
+ map.info.path, mapinfo->vaddr, mapinfo->vaddr + mapinfo->size));
+ }
+ }
+ free(mapinfos);
+ close(proc_fd);
+ return result;
+}
+
+
+void OS::SignalCodeMovingGC() {
+}
+
+
+// Constants used for mmap.
+static const int kMmapFd = -1;
+static const int kMmapFdOffset = 0;
+
+
+VirtualMemory::VirtualMemory() : address_(NULL), size_(0) { }
+
+
+VirtualMemory::VirtualMemory(size_t size)
+ : address_(ReserveRegion(size)), size_(size) { }
+
+
+VirtualMemory::VirtualMemory(size_t size, size_t alignment)
+ : address_(NULL), size_(0) {
+ ASSERT(IsAligned(alignment, static_cast<intptr_t>(OS::AllocateAlignment())));
+ size_t request_size = RoundUp(size + alignment,
+ static_cast<intptr_t>(OS::AllocateAlignment()));
+ void* reservation = mmap(OS::GetRandomMmapAddr(),
+ request_size,
+ PROT_NONE,
+ MAP_PRIVATE | MAP_ANONYMOUS | MAP_LAZY,
+ kMmapFd,
+ kMmapFdOffset);
+ if (reservation == MAP_FAILED) return;
+
+ Address base = static_cast<Address>(reservation);
+ Address aligned_base = RoundUp(base, alignment);
+ ASSERT_LE(base, aligned_base);
+
+ // Unmap extra memory reserved before and after the desired block.
+ if (aligned_base != base) {
+ size_t prefix_size = static_cast<size_t>(aligned_base - base);
+ OS::Free(base, prefix_size);
+ request_size -= prefix_size;
+ }
+
+ size_t aligned_size = RoundUp(size, OS::AllocateAlignment());
+ ASSERT_LE(aligned_size, request_size);
+
+ if (aligned_size != request_size) {
+ size_t suffix_size = request_size - aligned_size;
+ OS::Free(aligned_base + aligned_size, suffix_size);
+ request_size -= suffix_size;
+ }
+
+ ASSERT(aligned_size == request_size);
+
+ address_ = static_cast<void*>(aligned_base);
+ size_ = aligned_size;
+}
+
+
+VirtualMemory::~VirtualMemory() {
+ if (IsReserved()) {
+ bool result = ReleaseRegion(address(), size());
+ ASSERT(result);
+ USE(result);
+ }
+}
+
+
+bool VirtualMemory::IsReserved() {
+ return address_ != NULL;
+}
+
+
+void VirtualMemory::Reset() {
+ address_ = NULL;
+ size_ = 0;
+}
+
+
+bool VirtualMemory::Commit(void* address, size_t size, bool is_executable) {
+ return CommitRegion(address, size, is_executable);
+}
+
+
+bool VirtualMemory::Uncommit(void* address, size_t size) {
+ return UncommitRegion(address, size);
+}
+
+
+bool VirtualMemory::Guard(void* address) {
+ OS::Guard(address, OS::CommitPageSize());
+ return true;
+}
+
+
+void* VirtualMemory::ReserveRegion(size_t size) {
+ void* result = mmap(OS::GetRandomMmapAddr(),
+ size,
+ PROT_NONE,
+ MAP_PRIVATE | MAP_ANONYMOUS | MAP_LAZY,
+ kMmapFd,
+ kMmapFdOffset);
+
+ if (result == MAP_FAILED) return NULL;
+
+ return result;
+}
+
+
+bool VirtualMemory::CommitRegion(void* base, size_t size, bool is_executable) {
+ int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
+ if (MAP_FAILED == mmap(base,
+ size,
+ prot,
+ MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED,
+ kMmapFd,
+ kMmapFdOffset)) {
+ return false;
+ }
+
+ return true;
+}
+
+
+bool VirtualMemory::UncommitRegion(void* base, size_t size) {
+ return mmap(base,
+ size,
+ PROT_NONE,
+ MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED | MAP_LAZY,
+ kMmapFd,
+ kMmapFdOffset) != MAP_FAILED;
+}
+
+
+bool VirtualMemory::ReleaseRegion(void* base, size_t size) {
+ return munmap(base, size) == 0;
+}
+
+
+bool VirtualMemory::HasLazyCommits() {
+ return false;
+}
+
+} } // namespace v8::internal
diff --git a/chromium/v8/src/platform-solaris.cc b/chromium/v8/src/platform-solaris.cc
index a0590cbecb8..fc8cb727a52 100644
--- a/chromium/v8/src/platform-solaris.cc
+++ b/chromium/v8/src/platform-solaris.cc
@@ -1,32 +1,9 @@
// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// Platform specific code for Solaris 10 goes here. For the POSIX comaptible
-// parts the implementation is in platform-posix.cc.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Platform-specific code for Solaris 10 goes here. For the POSIX-compatible
+// parts, the implementation is in platform-posix.cc.
#ifdef __sparc
# error "V8 does not support the SPARC CPU architecture."
@@ -49,11 +26,9 @@
#undef MAP_TYPE
-#include "v8.h"
+#include "src/v8.h"
-#include "platform.h"
-#include "v8threads.h"
-#include "vm-state-inl.h"
+#include "src/platform.h"
// It seems there is a bug in some Solaris distributions (experienced in
@@ -80,16 +55,16 @@ namespace v8 {
namespace internal {
-const char* OS::LocalTimezone(double time) {
+const char* OS::LocalTimezone(double time, TimezoneCache* cache) {
if (std::isnan(time)) return "";
- time_t tv = static_cast<time_t>(floor(time/msPerSecond));
+ time_t tv = static_cast<time_t>(std::floor(time/msPerSecond));
struct tm* t = localtime(&tv);
if (NULL == t) return "";
return tzname[0]; // The location of the timezone string on Solaris.
}
-double OS::LocalTimeOffset() {
+double OS::LocalTimeOffset(TimezoneCache* cache) {
tzset();
return -static_cast<double>(timezone * msPerSecond);
}
@@ -102,10 +77,7 @@ void* OS::Allocate(const size_t requested,
int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
void* mbase = mmap(NULL, msize, prot, MAP_PRIVATE | MAP_ANON, -1, 0);
- if (mbase == MAP_FAILED) {
- LOG(Isolate::Current(), StringEvent("OS::Allocate", "mmap failed"));
- return NULL;
- }
+ if (mbase == MAP_FAILED) return NULL;
*allocated = msize;
return mbase;
}
@@ -159,7 +131,8 @@ PosixMemoryMappedFile::~PosixMemoryMappedFile() {
}
-void OS::LogSharedLibraryAddresses(Isolate* isolate) {
+std::vector<OS::SharedLibraryAddress> OS::GetSharedLibraryAddresses() {
+ return std::vector<SharedLibraryAddress>();
}
@@ -167,44 +140,6 @@ void OS::SignalCodeMovingGC() {
}
-struct StackWalker {
- Vector<OS::StackFrame>& frames;
- int index;
-};
-
-
-static int StackWalkCallback(uintptr_t pc, int signo, void* data) {
- struct StackWalker* walker = static_cast<struct StackWalker*>(data);
- Dl_info info;
-
- int i = walker->index;
-
- walker->frames[i].address = reinterpret_cast<void*>(pc);
-
- // Make sure line termination is in place.
- walker->frames[i].text[OS::kStackWalkMaxTextLen - 1] = '\0';
-
- Vector<char> text = MutableCStrVector(walker->frames[i].text,
- OS::kStackWalkMaxTextLen);
-
- if (dladdr(reinterpret_cast<void*>(pc), &info) == 0) {
- OS::SNPrintF(text, "[0x%p]", pc);
- } else if ((info.dli_fname != NULL && info.dli_sname != NULL)) {
- // We have symbol info.
- OS::SNPrintF(text, "%s'%s+0x%x", info.dli_fname, info.dli_sname, pc);
- } else {
- // No local symbol info.
- OS::SNPrintF(text,
- "%s'0x%p [0x%p]",
- info.dli_fname,
- pc - reinterpret_cast<uintptr_t>(info.dli_fbase),
- pc);
- }
- walker->index++;
- return 0;
-}
-
-
// Constants used for mmap.
static const int kMmapFd = -1;
static const int kMmapFdOffset = 0;
diff --git a/chromium/v8/src/platform-win32.cc b/chromium/v8/src/platform-win32.cc
index ea11806cb41..b1e6478bc8c 100644
--- a/chromium/v8/src/platform-win32.cc
+++ b/chromium/v8/src/platform-win32.cc
@@ -1,31 +1,8 @@
// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// Platform specific code for Win32.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Platform-specific code for Win32.
// Secure API functions are not available using MinGW with msvcrt.dll
// on Windows XP. Make sure MINGW_HAS_SECURE_API is not defined to
@@ -38,15 +15,12 @@
#endif // MINGW_HAS_SECURE_API
#endif // __MINGW32__
-#include "win32-headers.h"
+#include "src/base/win32-headers.h"
-#include "v8.h"
+#include "src/v8.h"
-#include "codegen.h"
-#include "isolate-inl.h"
-#include "platform.h"
-#include "simulator.h"
-#include "vm-state-inl.h"
+#include "src/isolate-inl.h"
+#include "src/platform.h"
#ifdef _MSC_VER
@@ -133,97 +107,95 @@ intptr_t OS::MaxVirtualMemory() {
}
-#if V8_TARGET_ARCH_IA32
-static void MemMoveWrapper(void* dest, const void* src, size_t size) {
- memmove(dest, src, size);
-}
-
-
-// Initialize to library version so we can call this at any time during startup.
-static OS::MemMoveFunction memmove_function = &MemMoveWrapper;
-
-// Defined in codegen-ia32.cc.
-OS::MemMoveFunction CreateMemMoveFunction();
-
-// Copy memory area to disjoint memory area.
-void OS::MemMove(void* dest, const void* src, size_t size) {
- if (size == 0) return;
- // Note: here we rely on dependent reads being ordered. This is true
- // on all architectures we currently support.
- (*memmove_function)(dest, src, size);
-}
-
-#endif // V8_TARGET_ARCH_IA32
-
-#ifdef _WIN64
-typedef double (*ModuloFunction)(double, double);
-static ModuloFunction modulo_function = NULL;
-// Defined in codegen-x64.cc.
-ModuloFunction CreateModuloFunction();
-
-void init_modulo_function() {
- modulo_function = CreateModuloFunction();
-}
-
-
-double modulo(double x, double y) {
- // Note: here we rely on dependent reads being ordered. This is true
- // on all architectures we currently support.
- return (*modulo_function)(x, y);
-}
-#else // Win32
+class TimezoneCache {
+ public:
+ TimezoneCache() : initialized_(false) { }
-double modulo(double x, double y) {
- // Workaround MS fmod bugs. ECMA-262 says:
- // dividend is finite and divisor is an infinity => result equals dividend
- // dividend is a zero and divisor is nonzero finite => result equals dividend
- if (!(std::isfinite(x) && (!std::isfinite(y) && !std::isnan(y))) &&
- !(x == 0 && (y != 0 && std::isfinite(y)))) {
- x = fmod(x, y);
+ void Clear() {
+ initialized_ = false;
}
- return x;
-}
-
-#endif // _WIN64
+ // Initialize timezone information. The timezone information is obtained from
+ // windows. If we cannot get the timezone information we fall back to CET.
+ void InitializeIfNeeded() {
+ // Just return if timezone information has already been initialized.
+ if (initialized_) return;
+
+ // Initialize POSIX time zone data.
+ _tzset();
+ // Obtain timezone information from operating system.
+ memset(&tzinfo_, 0, sizeof(tzinfo_));
+ if (GetTimeZoneInformation(&tzinfo_) == TIME_ZONE_ID_INVALID) {
+ // If we cannot get timezone information we fall back to CET.
+ tzinfo_.Bias = -60;
+ tzinfo_.StandardDate.wMonth = 10;
+ tzinfo_.StandardDate.wDay = 5;
+ tzinfo_.StandardDate.wHour = 3;
+ tzinfo_.StandardBias = 0;
+ tzinfo_.DaylightDate.wMonth = 3;
+ tzinfo_.DaylightDate.wDay = 5;
+ tzinfo_.DaylightDate.wHour = 2;
+ tzinfo_.DaylightBias = -60;
+ }
-#define UNARY_MATH_FUNCTION(name, generator) \
-static UnaryMathFunction fast_##name##_function = NULL; \
-void init_fast_##name##_function() { \
- fast_##name##_function = generator; \
-} \
-double fast_##name(double x) { \
- return (*fast_##name##_function)(x); \
-}
-
-UNARY_MATH_FUNCTION(sin, CreateTranscendentalFunction(TranscendentalCache::SIN))
-UNARY_MATH_FUNCTION(cos, CreateTranscendentalFunction(TranscendentalCache::COS))
-UNARY_MATH_FUNCTION(tan, CreateTranscendentalFunction(TranscendentalCache::TAN))
-UNARY_MATH_FUNCTION(log, CreateTranscendentalFunction(TranscendentalCache::LOG))
-UNARY_MATH_FUNCTION(exp, CreateExpFunction())
-UNARY_MATH_FUNCTION(sqrt, CreateSqrtFunction())
-
-#undef UNARY_MATH_FUNCTION
-
+ // Make standard and DST timezone names.
+ WideCharToMultiByte(CP_UTF8, 0, tzinfo_.StandardName, -1,
+ std_tz_name_, kTzNameSize, NULL, NULL);
+ std_tz_name_[kTzNameSize - 1] = '\0';
+ WideCharToMultiByte(CP_UTF8, 0, tzinfo_.DaylightName, -1,
+ dst_tz_name_, kTzNameSize, NULL, NULL);
+ dst_tz_name_[kTzNameSize - 1] = '\0';
+
+ // If OS returned empty string or resource id (like "@tzres.dll,-211")
+ // simply guess the name from the UTC bias of the timezone.
+ // To properly resolve the resource identifier requires a library load,
+ // which is not possible in a sandbox.
+ if (std_tz_name_[0] == '\0' || std_tz_name_[0] == '@') {
+ OS::SNPrintF(std_tz_name_, kTzNameSize - 1,
+ "%s Standard Time",
+ GuessTimezoneNameFromBias(tzinfo_.Bias));
+ }
+ if (dst_tz_name_[0] == '\0' || dst_tz_name_[0] == '@') {
+ OS::SNPrintF(dst_tz_name_, kTzNameSize - 1,
+ "%s Daylight Time",
+ GuessTimezoneNameFromBias(tzinfo_.Bias));
+ }
+ // Timezone information initialized.
+ initialized_ = true;
+ }
-void lazily_initialize_fast_exp() {
- if (fast_exp_function == NULL) {
- init_fast_exp_function();
+ // Guess the name of the timezone from the bias.
+ // The guess is very biased towards the northern hemisphere.
+ const char* GuessTimezoneNameFromBias(int bias) {
+ static const int kHour = 60;
+ switch (-bias) {
+ case -9*kHour: return "Alaska";
+ case -8*kHour: return "Pacific";
+ case -7*kHour: return "Mountain";
+ case -6*kHour: return "Central";
+ case -5*kHour: return "Eastern";
+ case -4*kHour: return "Atlantic";
+ case 0*kHour: return "GMT";
+ case +1*kHour: return "Central Europe";
+ case +2*kHour: return "Eastern Europe";
+ case +3*kHour: return "Russia";
+ case +5*kHour + 30: return "India";
+ case +8*kHour: return "China";
+ case +9*kHour: return "Japan";
+ case +12*kHour: return "New Zealand";
+ default: return "Local";
+ }
}
-}
-void MathSetup() {
-#ifdef _WIN64
- init_modulo_function();
-#endif
- init_fast_sin_function();
- init_fast_cos_function();
- init_fast_tan_function();
- init_fast_log_function();
- // fast_exp is initialized lazily.
- init_fast_sqrt_function();
-}
+ private:
+ static const int kTzNameSize = 128;
+ bool initialized_;
+ char std_tz_name_[kTzNameSize];
+ char dst_tz_name_[kTzNameSize];
+ TIME_ZONE_INFORMATION tzinfo_;
+ friend class Win32Time;
+};
// ----------------------------------------------------------------------------
@@ -250,14 +222,14 @@ class Win32Time {
// LocalOffset(CET) = 3600000 and LocalOffset(PST) = -28800000. This
// routine also takes into account whether daylight saving is effect
// at the time.
- int64_t LocalOffset();
+ int64_t LocalOffset(TimezoneCache* cache);
// Returns the daylight savings time offset for the time in milliseconds.
- int64_t DaylightSavingsOffset();
+ int64_t DaylightSavingsOffset(TimezoneCache* cache);
// Returns a string identifying the current timezone for the
// timestamp taking into account daylight saving.
- char* LocalTimezone();
+ char* LocalTimezone(TimezoneCache* cache);
private:
// Constants for time conversion.
@@ -266,25 +238,10 @@ class Win32Time {
static const int64_t kMsPerMinute = 60000;
// Constants for timezone information.
- static const int kTzNameSize = 128;
static const bool kShortTzNames = false;
- // Timezone information. We need to have static buffers for the
- // timezone names because we return pointers to these in
- // LocalTimezone().
- static bool tz_initialized_;
- static TIME_ZONE_INFORMATION tzinfo_;
- static char std_tz_name_[kTzNameSize];
- static char dst_tz_name_[kTzNameSize];
-
- // Initialize the timezone information (if not already done).
- static void TzSet();
-
- // Guess the name of the timezone from the bias.
- static const char* GuessTimezoneNameFromBias(int bias);
-
// Return whether or not daylight savings time is in effect at this time.
- bool InDST();
+ bool InDST(TimezoneCache* cache);
// Accessor for FILETIME representation.
FILETIME& ft() { return time_.ft_; }
@@ -306,13 +263,6 @@ class Win32Time {
};
-// Static variables.
-bool Win32Time::tz_initialized_ = false;
-TIME_ZONE_INFORMATION Win32Time::tzinfo_;
-char Win32Time::std_tz_name_[kTzNameSize];
-char Win32Time::dst_tz_name_[kTzNameSize];
-
-
// Initialize timestamp to start of epoc.
Win32Time::Win32Time() {
t() = 0;
@@ -401,90 +351,13 @@ void Win32Time::SetToCurrentTime() {
}
-// Guess the name of the timezone from the bias.
-// The guess is very biased towards the northern hemisphere.
-const char* Win32Time::GuessTimezoneNameFromBias(int bias) {
- static const int kHour = 60;
- switch (-bias) {
- case -9*kHour: return "Alaska";
- case -8*kHour: return "Pacific";
- case -7*kHour: return "Mountain";
- case -6*kHour: return "Central";
- case -5*kHour: return "Eastern";
- case -4*kHour: return "Atlantic";
- case 0*kHour: return "GMT";
- case +1*kHour: return "Central Europe";
- case +2*kHour: return "Eastern Europe";
- case +3*kHour: return "Russia";
- case +5*kHour + 30: return "India";
- case +8*kHour: return "China";
- case +9*kHour: return "Japan";
- case +12*kHour: return "New Zealand";
- default: return "Local";
- }
-}
-
-
-// Initialize timezone information. The timezone information is obtained from
-// windows. If we cannot get the timezone information we fall back to CET.
-// Please notice that this code is not thread-safe.
-void Win32Time::TzSet() {
- // Just return if timezone information has already been initialized.
- if (tz_initialized_) return;
-
- // Initialize POSIX time zone data.
- _tzset();
- // Obtain timezone information from operating system.
- memset(&tzinfo_, 0, sizeof(tzinfo_));
- if (GetTimeZoneInformation(&tzinfo_) == TIME_ZONE_ID_INVALID) {
- // If we cannot get timezone information we fall back to CET.
- tzinfo_.Bias = -60;
- tzinfo_.StandardDate.wMonth = 10;
- tzinfo_.StandardDate.wDay = 5;
- tzinfo_.StandardDate.wHour = 3;
- tzinfo_.StandardBias = 0;
- tzinfo_.DaylightDate.wMonth = 3;
- tzinfo_.DaylightDate.wDay = 5;
- tzinfo_.DaylightDate.wHour = 2;
- tzinfo_.DaylightBias = -60;
- }
-
- // Make standard and DST timezone names.
- WideCharToMultiByte(CP_UTF8, 0, tzinfo_.StandardName, -1,
- std_tz_name_, kTzNameSize, NULL, NULL);
- std_tz_name_[kTzNameSize - 1] = '\0';
- WideCharToMultiByte(CP_UTF8, 0, tzinfo_.DaylightName, -1,
- dst_tz_name_, kTzNameSize, NULL, NULL);
- dst_tz_name_[kTzNameSize - 1] = '\0';
-
- // If OS returned empty string or resource id (like "@tzres.dll,-211")
- // simply guess the name from the UTC bias of the timezone.
- // To properly resolve the resource identifier requires a library load,
- // which is not possible in a sandbox.
- if (std_tz_name_[0] == '\0' || std_tz_name_[0] == '@') {
- OS::SNPrintF(Vector<char>(std_tz_name_, kTzNameSize - 1),
- "%s Standard Time",
- GuessTimezoneNameFromBias(tzinfo_.Bias));
- }
- if (dst_tz_name_[0] == '\0' || dst_tz_name_[0] == '@') {
- OS::SNPrintF(Vector<char>(dst_tz_name_, kTzNameSize - 1),
- "%s Daylight Time",
- GuessTimezoneNameFromBias(tzinfo_.Bias));
- }
-
- // Timezone information initialized.
- tz_initialized_ = true;
-}
-
-
// Return the local timezone offset in milliseconds east of UTC. This
// takes into account whether daylight saving is in effect at the time.
// Only times in the 32-bit Unix range may be passed to this function.
// Also, adding the time-zone offset to the input must not overflow.
// The function EquivalentTime() in date.js guarantees this.
-int64_t Win32Time::LocalOffset() {
- // Initialize timezone information, if needed.
- TzSet();
+int64_t Win32Time::LocalOffset(TimezoneCache* cache) {
+ cache->InitializeIfNeeded();
Win32Time rounded_to_second(*this);
rounded_to_second.t() = rounded_to_second.t() / 1000 / kTimeScaler *
@@ -507,29 +380,30 @@ int64_t Win32Time::LocalOffset() {
if (localtime_s(&posix_local_time_struct, &posix_time)) return 0;
if (posix_local_time_struct.tm_isdst > 0) {
- return (tzinfo_.Bias + tzinfo_.DaylightBias) * -kMsPerMinute;
+ return (cache->tzinfo_.Bias + cache->tzinfo_.DaylightBias) * -kMsPerMinute;
} else if (posix_local_time_struct.tm_isdst == 0) {
- return (tzinfo_.Bias + tzinfo_.StandardBias) * -kMsPerMinute;
+ return (cache->tzinfo_.Bias + cache->tzinfo_.StandardBias) * -kMsPerMinute;
} else {
- return tzinfo_.Bias * -kMsPerMinute;
+ return cache->tzinfo_.Bias * -kMsPerMinute;
}
}
// Return whether or not daylight savings time is in effect at this time.
-bool Win32Time::InDST() {
- // Initialize timezone information, if needed.
- TzSet();
+bool Win32Time::InDST(TimezoneCache* cache) {
+ cache->InitializeIfNeeded();
// Determine if DST is in effect at the specified time.
bool in_dst = false;
- if (tzinfo_.StandardDate.wMonth != 0 || tzinfo_.DaylightDate.wMonth != 0) {
+ if (cache->tzinfo_.StandardDate.wMonth != 0 ||
+ cache->tzinfo_.DaylightDate.wMonth != 0) {
// Get the local timezone offset for the timestamp in milliseconds.
- int64_t offset = LocalOffset();
+ int64_t offset = LocalOffset(cache);
// Compute the offset for DST. The bias parameters in the timezone info
// are specified in minutes. These must be converted to milliseconds.
- int64_t dstofs = -(tzinfo_.Bias + tzinfo_.DaylightBias) * kMsPerMinute;
+ int64_t dstofs =
+ -(cache->tzinfo_.Bias + cache->tzinfo_.DaylightBias) * kMsPerMinute;
// If the local time offset equals the timezone bias plus the daylight
// bias then DST is in effect.
@@ -541,30 +415,17 @@ bool Win32Time::InDST() {
// Return the daylight savings time offset for this time.
-int64_t Win32Time::DaylightSavingsOffset() {
- return InDST() ? 60 * kMsPerMinute : 0;
+int64_t Win32Time::DaylightSavingsOffset(TimezoneCache* cache) {
+ return InDST(cache) ? 60 * kMsPerMinute : 0;
}
// Returns a string identifying the current timezone for the
// timestamp taking into account daylight saving.
-char* Win32Time::LocalTimezone() {
+char* Win32Time::LocalTimezone(TimezoneCache* cache) {
// Return the standard or DST time zone name based on whether daylight
// saving is in effect at the given time.
- return InDST() ? dst_tz_name_ : std_tz_name_;
-}
-
-
-void OS::PostSetUp() {
- // Math functions depend on CPU features therefore they are initialized after
- // CPU.
- MathSetup();
-#if V8_TARGET_ARCH_IA32
- OS::MemMoveFunction generated_memmove = CreateMemMoveFunction();
- if (generated_memmove != NULL) {
- memmove_function = generated_memmove;
- }
-#endif
+ return InDST(cache) ? cache->dst_tz_name_ : cache->std_tz_name_;
}
@@ -594,27 +455,43 @@ double OS::TimeCurrentMillis() {
}
+TimezoneCache* OS::CreateTimezoneCache() {
+ return new TimezoneCache();
+}
+
+
+void OS::DisposeTimezoneCache(TimezoneCache* cache) {
+ delete cache;
+}
+
+
+void OS::ClearTimezoneCache(TimezoneCache* cache) {
+ cache->Clear();
+}
+
+
// Returns a string identifying the current timezone taking into
// account daylight saving.
-const char* OS::LocalTimezone(double time) {
- return Win32Time(time).LocalTimezone();
+const char* OS::LocalTimezone(double time, TimezoneCache* cache) {
+ return Win32Time(time).LocalTimezone(cache);
}
// Returns the local time offset in milliseconds east of UTC without
// taking daylight savings time into account.
-double OS::LocalTimeOffset() {
+double OS::LocalTimeOffset(TimezoneCache* cache) {
// Use current time, rounded to the millisecond.
Win32Time t(TimeCurrentMillis());
// Time::LocalOffset inlcudes any daylight savings offset, so subtract it.
- return static_cast<double>(t.LocalOffset() - t.DaylightSavingsOffset());
+ return static_cast<double>(t.LocalOffset(cache) -
+ t.DaylightSavingsOffset(cache));
}
// Returns the daylight savings offset in milliseconds for the given
// time.
-double OS::DaylightSavingsOffset(double time) {
- int64_t offset = Win32Time(time).DaylightSavingsOffset();
+double OS::DaylightSavingsOffset(double time, TimezoneCache* cache) {
+ int64_t offset = Win32Time(time).DaylightSavingsOffset(cache);
return static_cast<double>(offset);
}
@@ -670,15 +547,15 @@ static bool HasConsole() {
static void VPrintHelper(FILE* stream, const char* format, va_list args) {
- if (HasConsole()) {
- vfprintf(stream, format, args);
- } else {
+ if ((stream == stdout || stream == stderr) && !HasConsole()) {
// It is important to use safe print here in order to avoid
// overflowing the buffer. We might truncate the output, but this
// does not crash.
- EmbeddedVector<char, 4096> buffer;
- OS::VSNPrintF(buffer, format, args);
- OutputDebugStringA(buffer.start());
+ char buffer[4096];
+ OS::VSNPrintF(buffer, sizeof(buffer), format, args);
+ OutputDebugStringA(buffer);
+ } else {
+ vfprintf(stream, format, args);
}
}
@@ -761,22 +638,22 @@ void OS::VPrintError(const char* format, va_list args) {
}
-int OS::SNPrintF(Vector<char> str, const char* format, ...) {
+int OS::SNPrintF(char* str, int length, const char* format, ...) {
va_list args;
va_start(args, format);
- int result = VSNPrintF(str, format, args);
+ int result = VSNPrintF(str, length, format, args);
va_end(args);
return result;
}
-int OS::VSNPrintF(Vector<char> str, const char* format, va_list args) {
- int n = _vsnprintf_s(str.start(), str.length(), _TRUNCATE, format, args);
+int OS::VSNPrintF(char* str, int length, const char* format, va_list args) {
+ int n = _vsnprintf_s(str, length, _TRUNCATE, format, args);
// Make sure to zero-terminate the string if the output was
// truncated or if there was an error.
- if (n < 0 || n >= str.length()) {
- if (str.length() > 0)
- str[str.length() - 1] = '\0';
+ if (n < 0 || n >= length) {
+ if (length > 0)
+ str[length - 1] = '\0';
return -1;
} else {
return n;
@@ -789,12 +666,12 @@ char* OS::StrChr(char* str, int c) {
}
-void OS::StrNCpy(Vector<char> dest, const char* src, size_t n) {
+void OS::StrNCpy(char* dest, int length, const char* src, size_t n) {
// Use _TRUNCATE or strncpy_s crashes (by design) if buffer is too small.
- size_t buffer_size = static_cast<size_t>(dest.length());
+ size_t buffer_size = static_cast<size_t>(length);
if (n + 1 > buffer_size) // count for trailing '\0'
n = _TRUNCATE;
- int result = strncpy_s(dest.start(), dest.length(), src, n);
+ int result = strncpy_s(dest, length, src, n);
USE(result);
ASSERT(result == 0 || (n == _TRUNCATE && result == STRUNCATE));
}
@@ -889,10 +766,7 @@ void* OS::Allocate(const size_t requested,
MEM_COMMIT | MEM_RESERVE,
prot);
- if (mbase == NULL) {
- LOG(Isolate::Current(), StringEvent("OS::Allocate", "VirtualAlloc failed"));
- return NULL;
- }
+ if (mbase == NULL) return NULL;
ASSERT(IsAligned(reinterpret_cast<size_t>(mbase), OS::AllocateAlignment()));
@@ -931,12 +805,11 @@ void OS::Sleep(int milliseconds) {
void OS::Abort() {
- if (IsDebuggerPresent() || FLAG_break_on_abort) {
- DebugBreak();
- } else {
- // Make the MSVCRT do a silent abort.
- raise(SIGABRT);
+ if (FLAG_hard_abort) {
+ V8_IMMEDIATE_CRASH();
}
+ // Make the MSVCRT do a silent abort.
+ raise(SIGABRT);
}
@@ -1004,7 +877,7 @@ OS::MemoryMappedFile* OS::MemoryMappedFile::create(const char* name, int size,
if (file_mapping == NULL) return NULL;
// Map a view of the file into memory
void* memory = MapViewOfFile(file_mapping, FILE_MAP_ALL_ACCESS, 0, 0, size);
- if (memory) OS::MemMove(memory, initial, size);
+ if (memory) MemMove(memory, initial, size);
return new Win32MemoryMappedFile(file, file_mapping, memory, size);
}
@@ -1185,10 +1058,13 @@ TLHELP32_FUNCTION_LIST(DLL_FUNC_LOADED)
// Load the symbols for generating stack traces.
-static bool LoadSymbols(Isolate* isolate, HANDLE process_handle) {
+static std::vector<OS::SharedLibraryAddress> LoadSymbols(
+ HANDLE process_handle) {
+ static std::vector<OS::SharedLibraryAddress> result;
+
static bool symbols_loaded = false;
- if (symbols_loaded) return true;
+ if (symbols_loaded) return result;
BOOL ok;
@@ -1196,7 +1072,7 @@ static bool LoadSymbols(Isolate* isolate, HANDLE process_handle) {
ok = _SymInitialize(process_handle, // hProcess
NULL, // UserSearchPath
false); // fInvadeProcess
- if (!ok) return false;
+ if (!ok) return result;
DWORD options = _SymGetOptions();
options |= SYMOPT_LOAD_LINES;
@@ -1208,13 +1084,13 @@ static bool LoadSymbols(Isolate* isolate, HANDLE process_handle) {
if (!ok) {
int err = GetLastError();
PrintF("%d\n", err);
- return false;
+ return result;
}
HANDLE snapshot = _CreateToolhelp32Snapshot(
TH32CS_SNAPMODULE, // dwFlags
GetCurrentProcessId()); // th32ProcessId
- if (snapshot == INVALID_HANDLE_VALUE) return false;
+ if (snapshot == INVALID_HANDLE_VALUE) return result;
MODULEENTRY32W module_entry;
module_entry.dwSize = sizeof(module_entry); // Set the size of the structure.
BOOL cont = _Module32FirstW(snapshot, &module_entry);
@@ -1232,31 +1108,37 @@ static bool LoadSymbols(Isolate* isolate, HANDLE process_handle) {
if (base == 0) {
int err = GetLastError();
if (err != ERROR_MOD_NOT_FOUND &&
- err != ERROR_INVALID_HANDLE) return false;
+ err != ERROR_INVALID_HANDLE) {
+ result.clear();
+ return result;
+ }
}
- LOG(isolate,
- SharedLibraryEvent(
- module_entry.szExePath,
- reinterpret_cast<unsigned int>(module_entry.modBaseAddr),
- reinterpret_cast<unsigned int>(module_entry.modBaseAddr +
- module_entry.modBaseSize)));
+ int lib_name_length = WideCharToMultiByte(
+ CP_UTF8, 0, module_entry.szExePath, -1, NULL, 0, NULL, NULL);
+ std::string lib_name(lib_name_length, 0);
+ WideCharToMultiByte(CP_UTF8, 0, module_entry.szExePath, -1, &lib_name[0],
+ lib_name_length, NULL, NULL);
+ result.push_back(OS::SharedLibraryAddress(
+ lib_name, reinterpret_cast<unsigned int>(module_entry.modBaseAddr),
+ reinterpret_cast<unsigned int>(module_entry.modBaseAddr +
+ module_entry.modBaseSize)));
cont = _Module32NextW(snapshot, &module_entry);
}
CloseHandle(snapshot);
symbols_loaded = true;
- return true;
+ return result;
}
-void OS::LogSharedLibraryAddresses(Isolate* isolate) {
+std::vector<OS::SharedLibraryAddress> OS::GetSharedLibraryAddresses() {
// SharedLibraryEvents are logged when loading symbol information.
// Only the shared libraries loaded at the time of the call to
- // LogSharedLibraryAddresses are logged. DLLs loaded after
+ // GetSharedLibraryAddresses are logged. DLLs loaded after
// initialization are not accounted for.
- if (!LoadDbgHelpAndTlHelp32()) return;
+ if (!LoadDbgHelpAndTlHelp32()) return std::vector<OS::SharedLibraryAddress>();
HANDLE process_handle = GetCurrentProcess();
- LoadSymbols(isolate, process_handle);
+ return LoadSymbols(process_handle);
}
@@ -1277,13 +1159,19 @@ uint64_t OS::TotalPhysicalMemory() {
#else // __MINGW32__
-void OS::LogSharedLibraryAddresses(Isolate* isolate) { }
+std::vector<OS::SharedLibraryAddress> OS::GetSharedLibraryAddresses() {
+ return std::vector<OS::SharedLibraryAddress>();
+}
+
+
void OS::SignalCodeMovingGC() { }
#endif // __MINGW32__
-uint64_t OS::CpuFeaturesImpliedByPlatform() {
- return 0; // Windows runs on anything.
+int OS::NumberOfProcessorsOnline() {
+ SYSTEM_INFO info;
+ GetSystemInfo(&info);
+ return info.dwNumberOfProcessors;
}
@@ -1454,7 +1342,7 @@ Thread::Thread(const Options& options)
void Thread::set_name(const char* name) {
- OS::StrNCpy(Vector<char>(name_, sizeof(name_)), name, strlen(name));
+ OS::StrNCpy(name_, sizeof(name_), name, strlen(name));
name_[sizeof(name_) - 1] = '\0';
}
diff --git a/chromium/v8/src/platform.h b/chromium/v8/src/platform.h
index 3bd87a98326..497e3a86cd7 100644
--- a/chromium/v8/src/platform.h
+++ b/chromium/v8/src/platform.h
@@ -1,29 +1,6 @@
// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
// This module contains the platform-specific code. This make the rest of the
// code less dependent on operating system, compilers and runtime libraries.
@@ -44,12 +21,13 @@
#ifndef V8_PLATFORM_H_
#define V8_PLATFORM_H_
-#include <cstdarg>
+#include <stdarg.h>
+#include <string>
+#include <vector>
-#include "platform/mutex.h"
-#include "platform/semaphore.h"
-#include "utils.h"
-#include "v8globals.h"
+#include "src/base/build_config.h"
+#include "src/platform/mutex.h"
+#include "src/platform/semaphore.h"
#ifdef __sun
# ifndef signbit
@@ -59,11 +37,15 @@ int signbit(double x);
# endif
#endif
+#if V8_OS_QNX
+#include "src/qnx-math.h"
+#endif
+
// Microsoft Visual C++ specific stuff.
-#if V8_CC_MSVC
+#if V8_LIBC_MSVCRT
-#include "win32-headers.h"
-#include "win32-math.h"
+#include "src/base/win32-headers.h"
+#include "src/win32-math.h"
int strncasecmp(const char* s1, const char* s2, int n);
@@ -71,7 +53,7 @@ int strncasecmp(const char* s1, const char* s2, int n);
#if (_MSC_VER < 1800)
inline int lrint(double flt) {
int intgr;
-#if V8_TARGET_ARCH_IA32
+#if V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X87
__asm {
fld flt
fistp intgr
@@ -85,33 +67,19 @@ inline int lrint(double flt) {
#endif
return intgr;
}
-
#endif // _MSC_VER < 1800
-#endif // V8_CC_MSVC
+#endif // V8_LIBC_MSVCRT
namespace v8 {
namespace internal {
-double modulo(double x, double y);
-
-// Custom implementation of math functions.
-double fast_sin(double input);
-double fast_cos(double input);
-double fast_tan(double input);
-double fast_log(double input);
-double fast_exp(double input);
-double fast_sqrt(double input);
-// The custom exp implementation needs 16KB of lookup data; initialize it
-// on demand.
-void lazily_initialize_fast_exp();
-
// ----------------------------------------------------------------------------
// Fast TLS support
#ifndef V8_NO_FAST_TLS
-#if defined(_MSC_VER) && V8_HOST_ARCH_IA32
+#if defined(_MSC_VER) && (V8_HOST_ARCH_IA32)
#define V8_FAST_TLS_SUPPORTED 1
@@ -122,6 +90,7 @@ inline intptr_t InternalGetExistingThreadLocal(intptr_t index) {
const intptr_t kTibExtraTlsOffset = 0xF94;
const intptr_t kMaxInlineSlots = 64;
const intptr_t kMaxSlots = kMaxInlineSlots + 1024;
+ const intptr_t kPointerSize = sizeof(void*);
ASSERT(0 <= index && index < kMaxSlots);
if (index < kMaxInlineSlots) {
return static_cast<intptr_t>(__readfsdword(kTibInlineTlsOffset +
@@ -160,6 +129,9 @@ inline intptr_t InternalGetExistingThreadLocal(intptr_t index) {
#endif // V8_NO_FAST_TLS
+class TimezoneCache;
+
+
// ----------------------------------------------------------------------------
// OS
//
@@ -169,10 +141,6 @@ inline intptr_t InternalGetExistingThreadLocal(intptr_t index) {
class OS {
public:
- // Initializes the platform OS support that depend on CPU features. This is
- // called after CPU initialization.
- static void PostSetUp();
-
// Returns the accumulated user time for thread. This routine
// can be used for profiling. The implementation should
// strive for high-precision timer resolution, preferable
@@ -183,16 +151,20 @@ class OS {
// 00:00:00 UTC, January 1, 1970.
static double TimeCurrentMillis();
+ static TimezoneCache* CreateTimezoneCache();
+ static void DisposeTimezoneCache(TimezoneCache* cache);
+ static void ClearTimezoneCache(TimezoneCache* cache);
+
// Returns a string identifying the current time zone. The
// timestamp is used for determining if DST is in effect.
- static const char* LocalTimezone(double time);
+ static const char* LocalTimezone(double time, TimezoneCache* cache);
// Returns the local time offset in milliseconds east of UTC without
// taking daylight savings time into account.
- static double LocalTimeOffset();
+ static double LocalTimeOffset(TimezoneCache* cache);
// Returns the daylight savings offset for the given time.
- static double DaylightSavingsOffset(double time);
+ static double DaylightSavingsOffset(double time, TimezoneCache* cache);
// Returns last OS error.
static int GetLastError();
@@ -275,17 +247,28 @@ class OS {
// Safe formatting print. Ensures that str is always null-terminated.
// Returns the number of chars written, or -1 if output was truncated.
- static int SNPrintF(Vector<char> str, const char* format, ...);
- static int VSNPrintF(Vector<char> str,
+ static int SNPrintF(char* str, int length, const char* format, ...);
+ static int VSNPrintF(char* str,
+ int length,
const char* format,
va_list args);
static char* StrChr(char* str, int c);
- static void StrNCpy(Vector<char> dest, const char* src, size_t n);
+ static void StrNCpy(char* dest, int length, const char* src, size_t n);
// Support for the profiler. Can do nothing, in which case ticks
// occuring in shared libraries will not be properly accounted for.
- static void LogSharedLibraryAddresses(Isolate* isolate);
+ struct SharedLibraryAddress {
+ SharedLibraryAddress(
+ const std::string& library_path, uintptr_t start, uintptr_t end)
+ : library_path(library_path), start(start), end(end) {}
+
+ std::string library_path;
+ uintptr_t start;
+ uintptr_t end;
+ };
+
+ static std::vector<SharedLibraryAddress> GetSharedLibraryAddresses();
// Support for the profiler. Notifies the external profiling
// process that a code moving garbage collection starts. Can do
@@ -293,13 +276,8 @@ class OS {
// using --never-compact) if accurate profiling is desired.
static void SignalCodeMovingGC();
- // The return value indicates the CPU features we are sure of because of the
- // OS. For example MacOSX doesn't run on any x86 CPUs that don't have SSE2
- // instructions.
- // This is a little messy because the interpretation is subject to the cross
- // of the CPU and the OS. The bits in the answer correspond to the bit
- // positions indicated by the members of the CpuFeature enum from globals.h
- static uint64_t CpuFeaturesImpliedByPlatform();
+ // Returns the number of processors online.
+ static int NumberOfProcessorsOnline();
// The total amount of physical memory available on the current system.
static uint64_t TotalPhysicalMemory();
@@ -319,66 +297,6 @@ class OS {
// the platform doesn't care. Guaranteed to be a power of two.
static int ActivationFrameAlignment();
-#if defined(V8_TARGET_ARCH_IA32)
- // Limit below which the extra overhead of the MemCopy function is likely
- // to outweigh the benefits of faster copying.
- static const int kMinComplexMemCopy = 64;
-
- // Copy memory area. No restrictions.
- static void MemMove(void* dest, const void* src, size_t size);
- typedef void (*MemMoveFunction)(void* dest, const void* src, size_t size);
-
- // Keep the distinction of "move" vs. "copy" for the benefit of other
- // architectures.
- static void MemCopy(void* dest, const void* src, size_t size) {
- MemMove(dest, src, size);
- }
-#elif defined(V8_HOST_ARCH_ARM)
- typedef void (*MemCopyUint8Function)(uint8_t* dest,
- const uint8_t* src,
- size_t size);
- static MemCopyUint8Function memcopy_uint8_function;
- static void MemCopyUint8Wrapper(uint8_t* dest,
- const uint8_t* src,
- size_t chars) {
- memcpy(dest, src, chars);
- }
- // For values < 16, the assembler function is slower than the inlined C code.
- static const int kMinComplexMemCopy = 16;
- static void MemCopy(void* dest, const void* src, size_t size) {
- (*memcopy_uint8_function)(reinterpret_cast<uint8_t*>(dest),
- reinterpret_cast<const uint8_t*>(src),
- size);
- }
- static void MemMove(void* dest, const void* src, size_t size) {
- memmove(dest, src, size);
- }
-
- typedef void (*MemCopyUint16Uint8Function)(uint16_t* dest,
- const uint8_t* src,
- size_t size);
- static MemCopyUint16Uint8Function memcopy_uint16_uint8_function;
- static void MemCopyUint16Uint8Wrapper(uint16_t* dest,
- const uint8_t* src,
- size_t chars);
- // For values < 12, the assembler function is slower than the inlined C code.
- static const int kMinComplexConvertMemCopy = 12;
- static void MemCopyUint16Uint8(uint16_t* dest,
- const uint8_t* src,
- size_t size) {
- (*memcopy_uint16_uint8_function)(dest, src, size);
- }
-#else
- // Copy memory area to disjoint memory area.
- static void MemCopy(void* dest, const void* src, size_t size) {
- memcpy(dest, src, size);
- }
- static void MemMove(void* dest, const void* src, size_t size) {
- memmove(dest, src, size);
- }
- static const int kMinComplexMemCopy = 16 * kPointerSize;
-#endif // V8_TARGET_ARCH_IA32
-
static int GetCurrentProcessId();
private:
@@ -491,13 +409,7 @@ class VirtualMemory {
class Thread {
public:
// Opaque data type for thread-local storage keys.
- // LOCAL_STORAGE_KEY_MIN_VALUE and LOCAL_STORAGE_KEY_MAX_VALUE are specified
- // to ensure that enumeration type has correct value range (see Issue 830 for
- // more details).
- enum LocalStorageKey {
- LOCAL_STORAGE_KEY_MIN_VALUE = kMinInt,
- LOCAL_STORAGE_KEY_MAX_VALUE = kMaxInt
- };
+ typedef int32_t LocalStorageKey;
class Options {
public:
diff --git a/chromium/v8/src/platform/condition-variable.cc b/chromium/v8/src/platform/condition-variable.cc
index e2bf3882ece..e180acdcb7f 100644
--- a/chromium/v8/src/platform/condition-variable.cc
+++ b/chromium/v8/src/platform/condition-variable.cc
@@ -1,36 +1,13 @@
// Copyright 2013 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "platform/condition-variable.h"
-
-#include <cerrno>
-#include <ctime>
-
-#include "platform/time.h"
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/platform/condition-variable.h"
+
+#include <errno.h>
+#include <time.h>
+
+#include "src/platform/time.h"
namespace v8 {
namespace internal {
diff --git a/chromium/v8/src/platform/condition-variable.h b/chromium/v8/src/platform/condition-variable.h
index 4d8a88aee79..4e8724cd273 100644
--- a/chromium/v8/src/platform/condition-variable.h
+++ b/chromium/v8/src/platform/condition-variable.h
@@ -1,34 +1,12 @@
// Copyright 2013 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
#ifndef V8_PLATFORM_CONDITION_VARIABLE_H_
#define V8_PLATFORM_CONDITION_VARIABLE_H_
-#include "platform/mutex.h"
+#include "src/base/lazy-instance.h"
+#include "src/platform/mutex.h"
namespace v8 {
namespace internal {
@@ -129,9 +107,9 @@ class ConditionVariable V8_FINAL {
// LockGuard<Mutex> lock_guard(&my_mutex);
// my_condvar.Pointer()->Wait(&my_mutex);
// }
-typedef LazyStaticInstance<ConditionVariable,
- DefaultConstructTrait<ConditionVariable>,
- ThreadSafeInitOnceTrait>::type LazyConditionVariable;
+typedef base::LazyStaticInstance<
+ ConditionVariable, base::DefaultConstructTrait<ConditionVariable>,
+ base::ThreadSafeInitOnceTrait>::type LazyConditionVariable;
#define LAZY_CONDITION_VARIABLE_INITIALIZER LAZY_STATIC_INSTANCE_INITIALIZER
diff --git a/chromium/v8/src/platform/elapsed-timer.h b/chromium/v8/src/platform/elapsed-timer.h
index b61b007605b..9955c3eaae0 100644
--- a/chromium/v8/src/platform/elapsed-timer.h
+++ b/chromium/v8/src/platform/elapsed-timer.h
@@ -1,35 +1,12 @@
// Copyright 2013 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
#ifndef V8_PLATFORM_ELAPSED_TIMER_H_
#define V8_PLATFORM_ELAPSED_TIMER_H_
-#include "../checks.h"
-#include "time.h"
+#include "src/checks.h"
+#include "src/platform/time.h"
namespace v8 {
namespace internal {
diff --git a/chromium/v8/src/platform/mutex.cc b/chromium/v8/src/platform/mutex.cc
index ad97740995d..014b41a5252 100644
--- a/chromium/v8/src/platform/mutex.cc
+++ b/chromium/v8/src/platform/mutex.cc
@@ -1,33 +1,10 @@
// Copyright 2013 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "platform/mutex.h"
-
-#include <cerrno>
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/platform/mutex.h"
+
+#include <errno.h>
namespace v8 {
namespace internal {
diff --git a/chromium/v8/src/platform/mutex.h b/chromium/v8/src/platform/mutex.h
index 125e9d4860f..1e934689959 100644
--- a/chromium/v8/src/platform/mutex.h
+++ b/chromium/v8/src/platform/mutex.h
@@ -1,37 +1,15 @@
// Copyright 2013 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
#ifndef V8_PLATFORM_MUTEX_H_
#define V8_PLATFORM_MUTEX_H_
-#include "../lazy-instance.h"
+#include "src/base/lazy-instance.h"
#if V8_OS_WIN
-#include "../win32-headers.h"
+#include "src/base/win32-headers.h"
#endif
+#include "src/checks.h"
#if V8_OS_POSIX
#include <pthread.h> // NOLINT
@@ -123,9 +101,9 @@ class Mutex V8_FINAL {
// // Do something.
// }
//
-typedef LazyStaticInstance<Mutex,
- DefaultConstructTrait<Mutex>,
- ThreadSafeInitOnceTrait>::type LazyMutex;
+typedef v8::base::LazyStaticInstance<
+ Mutex, v8::base::DefaultConstructTrait<Mutex>,
+ v8::base::ThreadSafeInitOnceTrait>::type LazyMutex;
#define LAZY_MUTEX_INITIALIZER LAZY_STATIC_INSTANCE_INITIALIZER
@@ -204,9 +182,9 @@ class RecursiveMutex V8_FINAL {
// // Do something.
// }
//
-typedef LazyStaticInstance<RecursiveMutex,
- DefaultConstructTrait<RecursiveMutex>,
- ThreadSafeInitOnceTrait>::type LazyRecursiveMutex;
+typedef v8::base::LazyStaticInstance<
+ RecursiveMutex, v8::base::DefaultConstructTrait<RecursiveMutex>,
+ v8::base::ThreadSafeInitOnceTrait>::type LazyRecursiveMutex;
#define LAZY_RECURSIVE_MUTEX_INITIALIZER LAZY_STATIC_INSTANCE_INITIALIZER
diff --git a/chromium/v8/src/platform/semaphore.cc b/chromium/v8/src/platform/semaphore.cc
index c3e5826f4f7..18264f4e326 100644
--- a/chromium/v8/src/platform/semaphore.cc
+++ b/chromium/v8/src/platform/semaphore.cc
@@ -1,41 +1,18 @@
// Copyright 2013 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "platform/semaphore.h"
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/platform/semaphore.h"
#if V8_OS_MACOSX
#include <mach/mach_init.h>
#include <mach/task.h>
#endif
-#include <cerrno>
+#include <errno.h>
-#include "checks.h"
-#include "platform/time.h"
+#include "src/checks.h"
+#include "src/platform/time.h"
namespace v8 {
namespace internal {
diff --git a/chromium/v8/src/platform/semaphore.h b/chromium/v8/src/platform/semaphore.h
index 0babe5fd659..028af924f46 100644
--- a/chromium/v8/src/platform/semaphore.h
+++ b/chromium/v8/src/platform/semaphore.h
@@ -1,36 +1,13 @@
// Copyright 2013 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
#ifndef V8_PLATFORM_SEMAPHORE_H_
#define V8_PLATFORM_SEMAPHORE_H_
-#include "../lazy-instance.h"
+#include "src/base/lazy-instance.h"
#if V8_OS_WIN
-#include "../win32-headers.h"
+#include "src/base/win32-headers.h"
#endif
#if V8_OS_MACOSX
@@ -113,10 +90,10 @@ struct CreateSemaphoreTrait {
template <int N>
struct LazySemaphore {
- typedef typename LazyDynamicInstance<
+ typedef typename v8::base::LazyDynamicInstance<
Semaphore,
CreateSemaphoreTrait<N>,
- ThreadSafeInitOnceTrait>::type type;
+ v8::base::ThreadSafeInitOnceTrait>::type type;
};
#define LAZY_SEMAPHORE_INITIALIZER LAZY_DYNAMIC_INSTANCE_INITIALIZER
diff --git a/chromium/v8/src/platform/socket.cc b/chromium/v8/src/platform/socket.cc
deleted file mode 100644
index 2fce6f2992d..00000000000
--- a/chromium/v8/src/platform/socket.cc
+++ /dev/null
@@ -1,224 +0,0 @@
-// Copyright 2013 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "platform/socket.h"
-
-#if V8_OS_POSIX
-#include <sys/types.h>
-#include <sys/socket.h>
-
-#include <netinet/in.h>
-#include <netdb.h>
-
-#include <unistd.h>
-#endif
-
-#include <cerrno>
-
-#include "checks.h"
-#include "once.h"
-
-namespace v8 {
-namespace internal {
-
-#if V8_OS_WIN
-
-static V8_DECLARE_ONCE(initialize_winsock) = V8_ONCE_INIT;
-
-
-static void InitializeWinsock() {
- WSADATA wsa_data;
- int result = WSAStartup(MAKEWORD(1, 0), &wsa_data);
- CHECK_EQ(0, result);
-}
-
-#endif // V8_OS_WIN
-
-
-Socket::Socket() {
-#if V8_OS_WIN
- // Be sure to initialize the WinSock DLL first.
- CallOnce(&initialize_winsock, &InitializeWinsock);
-#endif // V8_OS_WIN
-
- // Create the native socket handle.
- native_handle_ = ::socket(AF_INET, SOCK_STREAM, IPPROTO_TCP);
-}
-
-
-bool Socket::Bind(int port) {
- ASSERT_GE(port, 0);
- ASSERT_LT(port, 65536);
- if (!IsValid()) return false;
- struct sockaddr_in sin;
- memset(&sin, 0, sizeof(sin));
- sin.sin_family = AF_INET;
- sin.sin_addr.s_addr = htonl(INADDR_LOOPBACK);
- sin.sin_port = htons(static_cast<uint16_t>(port));
- int result = ::bind(
- native_handle_, reinterpret_cast<struct sockaddr*>(&sin), sizeof(sin));
- return result == 0;
-}
-
-
-bool Socket::Listen(int backlog) {
- if (!IsValid()) return false;
- int result = ::listen(native_handle_, backlog);
- return result == 0;
-}
-
-
-Socket* Socket::Accept() {
- if (!IsValid()) return NULL;
- while (true) {
- NativeHandle native_handle = ::accept(native_handle_, NULL, NULL);
- if (native_handle == kInvalidNativeHandle) {
-#if V8_OS_POSIX
- if (errno == EINTR) continue; // Retry after signal.
-#endif
- return NULL;
- }
- return new Socket(native_handle);
- }
-}
-
-
-bool Socket::Connect(const char* host, const char* port) {
- ASSERT_NE(NULL, host);
- ASSERT_NE(NULL, port);
- if (!IsValid()) return false;
-
- // Lookup host and port.
- struct addrinfo* info = NULL;
- struct addrinfo hint;
- memset(&hint, 0, sizeof(hint));
- hint.ai_family = AF_INET;
- hint.ai_socktype = SOCK_STREAM;
- hint.ai_protocol = IPPROTO_TCP;
- int result = ::getaddrinfo(host, port, &hint, &info);
- if (result != 0) {
- return false;
- }
-
- // Connect to the host on the given port.
- for (struct addrinfo* ai = info; ai != NULL; ai = ai->ai_next) {
- // Try to connect using this addr info.
- while (true) {
- result = ::connect(
- native_handle_, ai->ai_addr, static_cast<int>(ai->ai_addrlen));
- if (result == 0) {
- freeaddrinfo(info);
- return true;
- }
-#if V8_OS_POSIX
- if (errno == EINTR) continue; // Retry after signal.
-#endif
- break;
- }
- }
- freeaddrinfo(info);
- return false;
-}
-
-
-bool Socket::Shutdown() {
- if (!IsValid()) return false;
- // Shutdown socket for both read and write.
-#if V8_OS_POSIX
- int result = ::shutdown(native_handle_, SHUT_RDWR);
- ::close(native_handle_);
-#elif V8_OS_WIN
- int result = ::shutdown(native_handle_, SD_BOTH);
- ::closesocket(native_handle_);
-#endif
- native_handle_ = kInvalidNativeHandle;
- return result == 0;
-}
-
-
-int Socket::Send(const char* buffer, int length) {
- ASSERT(length <= 0 || buffer != NULL);
- if (!IsValid()) return 0;
- int offset = 0;
- while (offset < length) {
- int result = ::send(native_handle_, buffer + offset, length - offset, 0);
- if (result == 0) {
- break;
- } else if (result > 0) {
- ASSERT(result <= length - offset);
- offset += result;
- } else {
-#if V8_OS_POSIX
- if (errno == EINTR) continue; // Retry after signal.
-#endif
- return 0;
- }
- }
- return offset;
-}
-
-
-int Socket::Receive(char* buffer, int length) {
- if (!IsValid()) return 0;
- if (length <= 0) return 0;
- ASSERT_NE(NULL, buffer);
- while (true) {
- int result = ::recv(native_handle_, buffer, length, 0);
- if (result < 0) {
-#if V8_OS_POSIX
- if (errno == EINTR) continue; // Retry after signal.
-#endif
- return 0;
- }
- return result;
- }
-}
-
-
-bool Socket::SetReuseAddress(bool reuse_address) {
- if (!IsValid()) return 0;
- int v = reuse_address ? 1 : 0;
- int result = ::setsockopt(native_handle_, SOL_SOCKET, SO_REUSEADDR,
- reinterpret_cast<char*>(&v), sizeof(v));
- return result == 0;
-}
-
-
-// static
-int Socket::GetLastError() {
-#if V8_OS_POSIX
- return errno;
-#elif V8_OS_WIN
- // Be sure to initialize the WinSock DLL first.
- CallOnce(&initialize_winsock, &InitializeWinsock);
-
- // Now we can safely perform WSA calls.
- return ::WSAGetLastError();
-#endif
-}
-
-} } // namespace v8::internal
diff --git a/chromium/v8/src/platform/socket.h b/chromium/v8/src/platform/socket.h
deleted file mode 100644
index ff8c1de7ce7..00000000000
--- a/chromium/v8/src/platform/socket.h
+++ /dev/null
@@ -1,101 +0,0 @@
-// Copyright 2013 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_PLATFORM_SOCKET_H_
-#define V8_PLATFORM_SOCKET_H_
-
-#include "globals.h"
-#if V8_OS_WIN
-#include "win32-headers.h"
-#endif
-
-namespace v8 {
-namespace internal {
-
-// ----------------------------------------------------------------------------
-// Socket
-//
-
-class Socket V8_FINAL {
- public:
- Socket();
- ~Socket() { Shutdown(); }
-
- // Server initialization.
- bool Bind(int port) V8_WARN_UNUSED_RESULT;
- bool Listen(int backlog) V8_WARN_UNUSED_RESULT;
- Socket* Accept() V8_WARN_UNUSED_RESULT;
-
- // Client initialization.
- bool Connect(const char* host, const char* port) V8_WARN_UNUSED_RESULT;
-
- // Shutdown socket for both read and write. This causes blocking Send and
- // Receive calls to exit. After |Shutdown()| the Socket object cannot be
- // used for any communication.
- bool Shutdown();
-
- // Data Transimission
- // Return 0 on failure.
- int Send(const char* buffer, int length) V8_WARN_UNUSED_RESULT;
- int Receive(char* buffer, int length) V8_WARN_UNUSED_RESULT;
-
- // Set the value of the SO_REUSEADDR socket option.
- bool SetReuseAddress(bool reuse_address);
-
- V8_INLINE bool IsValid() const {
- return native_handle_ != kInvalidNativeHandle;
- }
-
- static int GetLastError();
-
- // The implementation-defined native handle type.
-#if V8_OS_POSIX
- typedef int NativeHandle;
- static const NativeHandle kInvalidNativeHandle = -1;
-#elif V8_OS_WIN
- typedef SOCKET NativeHandle;
- static const NativeHandle kInvalidNativeHandle = INVALID_SOCKET;
-#endif
-
- NativeHandle& native_handle() {
- return native_handle_;
- }
- const NativeHandle& native_handle() const {
- return native_handle_;
- }
-
- private:
- explicit Socket(NativeHandle native_handle) : native_handle_(native_handle) {}
-
- NativeHandle native_handle_;
-
- DISALLOW_COPY_AND_ASSIGN(Socket);
-};
-
-} } // namespace v8::internal
-
-#endif // V8_PLATFORM_SOCKET_H_
diff --git a/chromium/v8/src/platform/time.cc b/chromium/v8/src/platform/time.cc
index de0ca16473f..09b6f8a470c 100644
--- a/chromium/v8/src/platform/time.cc
+++ b/chromium/v8/src/platform/time.cc
@@ -1,31 +1,8 @@
// Copyright 2013 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "platform/time.h"
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/platform/time.h"
#if V8_OS_POSIX
#include <sys/time.h>
@@ -34,14 +11,15 @@
#include <mach/mach_time.h>
#endif
-#include <cstring>
+#include <string.h>
-#include "checks.h"
-#include "cpu.h"
-#include "platform.h"
#if V8_OS_WIN
-#include "win32-headers.h"
+#include "src/base/lazy-instance.h"
+#include "src/base/win32-headers.h"
#endif
+#include "src/checks.h"
+#include "src/cpu.h"
+#include "src/platform.h"
namespace v8 {
namespace internal {
@@ -216,9 +194,9 @@ class Clock V8_FINAL {
};
-static LazyStaticInstance<Clock,
- DefaultConstructTrait<Clock>,
- ThreadSafeInitOnceTrait>::type clock = LAZY_STATIC_INSTANCE_INITIALIZER;
+static base::LazyStaticInstance<Clock, base::DefaultConstructTrait<Clock>,
+ base::ThreadSafeInitOnceTrait>::type clock =
+ LAZY_STATIC_INSTANCE_INITIALIZER;
Time Time::Now() {
@@ -485,10 +463,11 @@ class RolloverProtectedTickClock V8_FINAL : public TickClock {
};
-static LazyStaticInstance<RolloverProtectedTickClock,
- DefaultConstructTrait<RolloverProtectedTickClock>,
- ThreadSafeInitOnceTrait>::type tick_clock =
- LAZY_STATIC_INSTANCE_INITIALIZER;
+static base::LazyStaticInstance<
+ RolloverProtectedTickClock,
+ base::DefaultConstructTrait<RolloverProtectedTickClock>,
+ base::ThreadSafeInitOnceTrait>::type tick_clock =
+ LAZY_STATIC_INSTANCE_INITIALIZER;
struct CreateHighResTickClockTrait {
@@ -512,9 +491,9 @@ struct CreateHighResTickClockTrait {
};
-static LazyDynamicInstance<TickClock,
+static base::LazyDynamicInstance<TickClock,
CreateHighResTickClockTrait,
- ThreadSafeInitOnceTrait>::type high_res_tick_clock =
+ base::ThreadSafeInitOnceTrait>::type high_res_tick_clock =
LAZY_DYNAMIC_INSTANCE_INITIALIZER;
diff --git a/chromium/v8/src/platform/time.h b/chromium/v8/src/platform/time.h
index 877e0203bb5..0cd234c3dd5 100644
--- a/chromium/v8/src/platform/time.h
+++ b/chromium/v8/src/platform/time.h
@@ -1,37 +1,14 @@
// Copyright 2013 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
#ifndef V8_PLATFORM_TIME_H_
#define V8_PLATFORM_TIME_H_
-#include <ctime>
+#include <time.h>
#include <limits>
-#include "../allocation.h"
+#include "src/allocation.h"
// Forward declarations.
extern "C" {
diff --git a/chromium/v8/src/preparse-data-format.h b/chromium/v8/src/preparse-data-format.h
index e64326e578c..4d1ad7abbfa 100644
--- a/chromium/v8/src/preparse-data-format.h
+++ b/chromium/v8/src/preparse-data-format.h
@@ -1,29 +1,6 @@
// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
#ifndef V8_PREPARSE_DATA_FORMAT_H_
#define V8_PREPARSE_DATA_FORMAT_H_
@@ -37,21 +14,21 @@ struct PreparseDataConstants {
public:
// Layout and constants of the preparse data exchange format.
static const unsigned kMagicNumber = 0xBadDead;
- static const unsigned kCurrentVersion = 7;
+ static const unsigned kCurrentVersion = 9;
static const int kMagicOffset = 0;
static const int kVersionOffset = 1;
static const int kHasErrorOffset = 2;
static const int kFunctionsSizeOffset = 3;
- static const int kSymbolCountOffset = 4;
- static const int kSizeOffset = 5;
- static const int kHeaderSize = 6;
+ static const int kSizeOffset = 4;
+ static const int kHeaderSize = 5;
// If encoding a message, the following positions are fixed.
static const int kMessageStartPos = 0;
static const int kMessageEndPos = 1;
static const int kMessageArgCountPos = 2;
- static const int kMessageTextPos = 3;
+ static const int kIsReferenceErrorPos = 3;
+ static const int kMessageTextPos = 4;
static const unsigned char kNumberTerminator = 0x80u;
};
diff --git a/chromium/v8/src/preparse-data.cc b/chromium/v8/src/preparse-data.cc
index 8e088482850..5860e4144ec 100644
--- a/chromium/v8/src/preparse-data.cc
+++ b/chromium/v8/src/preparse-data.cc
@@ -1,68 +1,41 @@
// Copyright 2010 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
-#include "../include/v8stdint.h"
+#include "include/v8stdint.h"
-#include "preparse-data-format.h"
-#include "preparse-data.h"
+#include "src/preparse-data-format.h"
+#include "src/preparse-data.h"
-#include "checks.h"
-#include "globals.h"
-#include "hashmap.h"
+#include "src/checks.h"
+#include "src/globals.h"
+#include "src/hashmap.h"
namespace v8 {
namespace internal {
-// ----------------------------------------------------------------------------
-// FunctionLoggingParserRecorder
-FunctionLoggingParserRecorder::FunctionLoggingParserRecorder()
- : function_store_(0),
- is_recording_(true),
- pause_count_(0) {
+CompleteParserRecorder::CompleteParserRecorder()
+ : function_store_(0) {
preamble_[PreparseDataConstants::kMagicOffset] =
PreparseDataConstants::kMagicNumber;
preamble_[PreparseDataConstants::kVersionOffset] =
PreparseDataConstants::kCurrentVersion;
preamble_[PreparseDataConstants::kHasErrorOffset] = false;
preamble_[PreparseDataConstants::kFunctionsSizeOffset] = 0;
- preamble_[PreparseDataConstants::kSymbolCountOffset] = 0;
preamble_[PreparseDataConstants::kSizeOffset] = 0;
- ASSERT_EQ(6, PreparseDataConstants::kHeaderSize);
+ ASSERT_EQ(5, PreparseDataConstants::kHeaderSize);
#ifdef DEBUG
prev_start_ = -1;
#endif
}
-void FunctionLoggingParserRecorder::LogMessage(int start_pos,
- int end_pos,
- const char* message,
- const char* arg_opt) {
+void CompleteParserRecorder::LogMessage(int start_pos,
+ int end_pos,
+ const char* message,
+ const char* arg_opt,
+ bool is_reference_error) {
if (has_error()) return;
preamble_[PreparseDataConstants::kHasErrorOffset] = true;
function_store_.Reset();
@@ -72,14 +45,15 @@ void FunctionLoggingParserRecorder::LogMessage(int start_pos,
function_store_.Add(end_pos);
STATIC_ASSERT(PreparseDataConstants::kMessageArgCountPos == 2);
function_store_.Add((arg_opt == NULL) ? 0 : 1);
- STATIC_ASSERT(PreparseDataConstants::kMessageTextPos == 3);
+ STATIC_ASSERT(PreparseDataConstants::kIsReferenceErrorPos == 3);
+ function_store_.Add(is_reference_error ? 1 : 0);
+ STATIC_ASSERT(PreparseDataConstants::kMessageTextPos == 4);
WriteString(CStrVector(message));
if (arg_opt != NULL) WriteString(CStrVector(arg_opt));
- is_recording_ = false;
}
-void FunctionLoggingParserRecorder::WriteString(Vector<const char> str) {
+void CompleteParserRecorder::WriteString(Vector<const char> str) {
function_store_.Add(str.length());
for (int i = 0; i < str.length(); i++) {
function_store_.Add(str[i]);
@@ -87,98 +61,18 @@ void FunctionLoggingParserRecorder::WriteString(Vector<const char> str) {
}
-// ----------------------------------------------------------------------------
-// PartialParserRecorder - Record both function entries and symbols.
-
-Vector<unsigned> PartialParserRecorder::ExtractData() {
- int function_size = function_store_.size();
- int total_size = PreparseDataConstants::kHeaderSize + function_size;
- Vector<unsigned> data = Vector<unsigned>::New(total_size);
- preamble_[PreparseDataConstants::kFunctionsSizeOffset] = function_size;
- preamble_[PreparseDataConstants::kSymbolCountOffset] = 0;
- OS::MemCopy(data.start(), preamble_, sizeof(preamble_));
- int symbol_start = PreparseDataConstants::kHeaderSize + function_size;
- if (function_size > 0) {
- function_store_.WriteTo(data.SubVector(PreparseDataConstants::kHeaderSize,
- symbol_start));
- }
- return data;
-}
-
-
-// ----------------------------------------------------------------------------
-// CompleteParserRecorder - Record both function entries and symbols.
-
-CompleteParserRecorder::CompleteParserRecorder()
- : FunctionLoggingParserRecorder(),
- literal_chars_(0),
- symbol_store_(0),
- symbol_keys_(0),
- string_table_(vector_compare),
- symbol_id_(0) {
-}
-
-
-void CompleteParserRecorder::LogSymbol(int start,
- int hash,
- bool is_ascii,
- Vector<const byte> literal_bytes) {
- Key key = { is_ascii, literal_bytes };
- HashMap::Entry* entry = string_table_.Lookup(&key, hash, true);
- int id = static_cast<int>(reinterpret_cast<intptr_t>(entry->value));
- if (id == 0) {
- // Copy literal contents for later comparison.
- key.literal_bytes =
- Vector<const byte>::cast(literal_chars_.AddBlock(literal_bytes));
- // Put (symbol_id_ + 1) into entry and increment it.
- id = ++symbol_id_;
- entry->value = reinterpret_cast<void*>(id);
- Vector<Key> symbol = symbol_keys_.AddBlock(1, key);
- entry->key = &symbol[0];
- }
- WriteNumber(id - 1);
-}
-
-
Vector<unsigned> CompleteParserRecorder::ExtractData() {
int function_size = function_store_.size();
- // Add terminator to symbols, then pad to unsigned size.
- int symbol_size = symbol_store_.size();
- int padding = sizeof(unsigned) - (symbol_size % sizeof(unsigned));
- symbol_store_.AddBlock(padding, PreparseDataConstants::kNumberTerminator);
- symbol_size += padding;
- int total_size = PreparseDataConstants::kHeaderSize + function_size
- + (symbol_size / sizeof(unsigned));
+ int total_size = PreparseDataConstants::kHeaderSize + function_size;
Vector<unsigned> data = Vector<unsigned>::New(total_size);
preamble_[PreparseDataConstants::kFunctionsSizeOffset] = function_size;
- preamble_[PreparseDataConstants::kSymbolCountOffset] = symbol_id_;
- OS::MemCopy(data.start(), preamble_, sizeof(preamble_));
- int symbol_start = PreparseDataConstants::kHeaderSize + function_size;
+ MemCopy(data.start(), preamble_, sizeof(preamble_));
if (function_size > 0) {
function_store_.WriteTo(data.SubVector(PreparseDataConstants::kHeaderSize,
- symbol_start));
- }
- if (!has_error()) {
- symbol_store_.WriteTo(
- Vector<byte>::cast(data.SubVector(symbol_start, total_size)));
+ total_size));
}
return data;
}
-void CompleteParserRecorder::WriteNumber(int number) {
- ASSERT(number >= 0);
-
- int mask = (1 << 28) - 1;
- for (int i = 28; i > 0; i -= 7) {
- if (number > mask) {
- symbol_store_.Add(static_cast<byte>(number >> i) | 0x80u);
- number &= mask;
- }
- mask >>= 7;
- }
- symbol_store_.Add(static_cast<byte>(number));
-}
-
-
} } // namespace v8::internal.
diff --git a/chromium/v8/src/preparse-data.h b/chromium/v8/src/preparse-data.h
index 3a1e99d5d18..8978098767c 100644
--- a/chromium/v8/src/preparse-data.h
+++ b/chromium/v8/src/preparse-data.h
@@ -1,42 +1,17 @@
// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
#ifndef V8_PREPARSE_DATA_H_
#define V8_PREPARSE_DATA_H_
-#include "allocation.h"
-#include "hashmap.h"
-#include "utils-inl.h"
+#include "src/allocation.h"
+#include "src/hashmap.h"
+#include "src/utils-inl.h"
namespace v8 {
namespace internal {
-// ----------------------------------------------------------------------------
-// ParserRecorder - Logging of preparser data.
// Abstract interface for preparse data recorder.
class ParserRecorder {
@@ -49,11 +24,7 @@ class ParserRecorder {
int end,
int literals,
int properties,
- LanguageMode language_mode) = 0;
-
- // Logs a symbol creation of a literal or identifier.
- virtual void LogAsciiSymbol(int start, Vector<const char> literal) { }
- virtual void LogUtf16Symbol(int start, Vector<const uc16> literal) { }
+ StrictMode strict_mode) = 0;
// Logs an error message and marks the log as containing an error.
// Further logging will be ignored, and ExtractData will return a vector
@@ -61,40 +32,32 @@ class ParserRecorder {
virtual void LogMessage(int start,
int end,
const char* message,
- const char* argument_opt) = 0;
-
- virtual int function_position() = 0;
-
- virtual int symbol_position() = 0;
-
- virtual int symbol_ids() = 0;
-
- virtual Vector<unsigned> ExtractData() = 0;
-
- virtual void PauseRecording() = 0;
-
- virtual void ResumeRecording() = 0;
+ const char* argument_opt,
+ bool is_reference_error) = 0;
+ private:
+ DISALLOW_COPY_AND_ASSIGN(ParserRecorder);
};
-// ----------------------------------------------------------------------------
-// FunctionLoggingParserRecorder - Record only function entries
-
-class FunctionLoggingParserRecorder : public ParserRecorder {
+class SingletonLogger : public ParserRecorder {
public:
- FunctionLoggingParserRecorder();
- virtual ~FunctionLoggingParserRecorder() {}
+ SingletonLogger()
+ : has_error_(false), start_(-1), end_(-1), is_reference_error_(false) {}
+ virtual ~SingletonLogger() {}
+
+ void Reset() { has_error_ = false; }
virtual void LogFunction(int start,
int end,
int literals,
int properties,
- LanguageMode language_mode) {
- function_store_.Add(start);
- function_store_.Add(end);
- function_store_.Add(literals);
- function_store_.Add(properties);
- function_store_.Add(language_mode);
+ StrictMode strict_mode) {
+ ASSERT(!has_error_);
+ start_ = start;
+ end_ = end;
+ literals_ = literals;
+ properties_ = properties;
+ strict_mode_ = strict_mode;
}
// Logs an error message and marks the log as containing an error.
@@ -103,126 +66,106 @@ class FunctionLoggingParserRecorder : public ParserRecorder {
virtual void LogMessage(int start,
int end,
const char* message,
- const char* argument_opt);
-
- virtual int function_position() { return function_store_.size(); }
-
+ const char* argument_opt,
+ bool is_reference_error) {
+ if (has_error_) return;
+ has_error_ = true;
+ start_ = start;
+ end_ = end;
+ message_ = message;
+ argument_opt_ = argument_opt;
+ is_reference_error_ = is_reference_error;
+ }
- virtual Vector<unsigned> ExtractData() = 0;
+ bool has_error() const { return has_error_; }
- virtual void PauseRecording() {
- pause_count_++;
- is_recording_ = false;
+ int start() const { return start_; }
+ int end() const { return end_; }
+ int literals() const {
+ ASSERT(!has_error_);
+ return literals_;
}
-
- virtual void ResumeRecording() {
- ASSERT(pause_count_ > 0);
- if (--pause_count_ == 0) is_recording_ = !has_error();
+ int properties() const {
+ ASSERT(!has_error_);
+ return properties_;
}
-
- protected:
- bool has_error() {
- return static_cast<bool>(preamble_[PreparseDataConstants::kHasErrorOffset]);
+ StrictMode strict_mode() const {
+ ASSERT(!has_error_);
+ return strict_mode_;
}
-
- bool is_recording() {
- return is_recording_;
+ int is_reference_error() const { return is_reference_error_; }
+ const char* message() {
+ ASSERT(has_error_);
+ return message_;
+ }
+ const char* argument_opt() const {
+ ASSERT(has_error_);
+ return argument_opt_;
}
- void WriteString(Vector<const char> str);
-
- Collector<unsigned> function_store_;
- unsigned preamble_[PreparseDataConstants::kHeaderSize];
- bool is_recording_;
- int pause_count_;
-
-#ifdef DEBUG
- int prev_start_;
-#endif
+ private:
+ bool has_error_;
+ int start_;
+ int end_;
+ // For function entries.
+ int literals_;
+ int properties_;
+ StrictMode strict_mode_;
+ // For error messages.
+ const char* message_;
+ const char* argument_opt_;
+ bool is_reference_error_;
};
-// ----------------------------------------------------------------------------
-// PartialParserRecorder - Record only function entries
-
-class PartialParserRecorder : public FunctionLoggingParserRecorder {
+class CompleteParserRecorder : public ParserRecorder {
public:
- PartialParserRecorder() : FunctionLoggingParserRecorder() { }
- virtual void LogAsciiSymbol(int start, Vector<const char> literal) { }
- virtual void LogUtf16Symbol(int start, Vector<const uc16> literal) { }
- virtual ~PartialParserRecorder() { }
- virtual Vector<unsigned> ExtractData();
- virtual int symbol_position() { return 0; }
- virtual int symbol_ids() { return 0; }
-};
-
-
-// ----------------------------------------------------------------------------
-// CompleteParserRecorder - Record both function entries and symbols.
+ struct Key {
+ bool is_one_byte;
+ Vector<const byte> literal_bytes;
+ };
-class CompleteParserRecorder: public FunctionLoggingParserRecorder {
- public:
CompleteParserRecorder();
- virtual ~CompleteParserRecorder() { }
+ virtual ~CompleteParserRecorder() {}
- virtual void LogAsciiSymbol(int start, Vector<const char> literal) {
- if (!is_recording_) return;
- int hash = vector_hash(literal);
- LogSymbol(start, hash, true, Vector<const byte>::cast(literal));
- }
-
- virtual void LogUtf16Symbol(int start, Vector<const uc16> literal) {
- if (!is_recording_) return;
- int hash = vector_hash(literal);
- LogSymbol(start, hash, false, Vector<const byte>::cast(literal));
+ virtual void LogFunction(int start,
+ int end,
+ int literals,
+ int properties,
+ StrictMode strict_mode) {
+ function_store_.Add(start);
+ function_store_.Add(end);
+ function_store_.Add(literals);
+ function_store_.Add(properties);
+ function_store_.Add(strict_mode);
}
- virtual Vector<unsigned> ExtractData();
-
- virtual int symbol_position() { return symbol_store_.size(); }
- virtual int symbol_ids() { return symbol_id_; }
+ // Logs an error message and marks the log as containing an error.
+ // Further logging will be ignored, and ExtractData will return a vector
+ // representing the error only.
+ virtual void LogMessage(int start,
+ int end,
+ const char* message,
+ const char* argument_opt,
+ bool is_reference_error_);
+ Vector<unsigned> ExtractData();
private:
- struct Key {
- bool is_ascii;
- Vector<const byte> literal_bytes;
- };
-
- virtual void LogSymbol(int start,
- int hash,
- bool is_ascii,
- Vector<const byte> literal);
-
- template <typename Char>
- static int vector_hash(Vector<const Char> string) {
- int hash = 0;
- for (int i = 0; i < string.length(); i++) {
- int c = static_cast<int>(string[i]);
- hash += c;
- hash += (hash << 10);
- hash ^= (hash >> 6);
- }
- return hash;
+ bool has_error() {
+ return static_cast<bool>(preamble_[PreparseDataConstants::kHasErrorOffset]);
}
- static bool vector_compare(void* a, void* b) {
- Key* string1 = reinterpret_cast<Key*>(a);
- Key* string2 = reinterpret_cast<Key*>(b);
- if (string1->is_ascii != string2->is_ascii) return false;
- int length = string1->literal_bytes.length();
- if (string2->literal_bytes.length() != length) return false;
- return memcmp(string1->literal_bytes.start(),
- string2->literal_bytes.start(), length) == 0;
- }
+ void WriteString(Vector<const char> str);
// Write a non-negative number to the symbol store.
void WriteNumber(int number);
- Collector<byte> literal_chars_;
- Collector<byte> symbol_store_;
- Collector<Key> symbol_keys_;
- HashMap string_table_;
- int symbol_id_;
+ Collector<unsigned> function_store_;
+ unsigned preamble_[PreparseDataConstants::kHeaderSize];
+
+#ifdef DEBUG
+ int prev_start_;
+#endif
};
diff --git a/chromium/v8/src/preparser.cc b/chromium/v8/src/preparser.cc
index a87c434558d..63462c80457 100644
--- a/chromium/v8/src/preparser.cc
+++ b/chromium/v8/src/preparser.cc
@@ -1,48 +1,25 @@
// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
#include <cmath>
-#include "../include/v8stdint.h"
-
-#include "allocation.h"
-#include "checks.h"
-#include "conversions.h"
-#include "conversions-inl.h"
-#include "globals.h"
-#include "hashmap.h"
-#include "list.h"
-#include "preparse-data-format.h"
-#include "preparse-data.h"
-#include "preparser.h"
-#include "unicode.h"
-#include "utils.h"
-
-#if V8_CC_MSVC && (_MSC_VER < 1800)
+#include "include/v8stdint.h"
+
+#include "src/allocation.h"
+#include "src/checks.h"
+#include "src/conversions.h"
+#include "src/conversions-inl.h"
+#include "src/globals.h"
+#include "src/hashmap.h"
+#include "src/list.h"
+#include "src/preparse-data-format.h"
+#include "src/preparse-data.h"
+#include "src/preparser.h"
+#include "src/unicode.h"
+#include "src/utils.h"
+
+#if V8_LIBC_MSVCRT && (_MSC_VER < 1800)
namespace std {
// Usually defined in math.h, but not in MSVC until VS2013+.
@@ -55,14 +32,87 @@ int isfinite(double value);
namespace v8 {
namespace internal {
+
+void PreParserTraits::ReportMessageAt(Scanner::Location location,
+ const char* message,
+ const char* arg,
+ bool is_reference_error) {
+ ReportMessageAt(location.beg_pos,
+ location.end_pos,
+ message,
+ arg,
+ is_reference_error);
+}
+
+
+void PreParserTraits::ReportMessageAt(int start_pos,
+ int end_pos,
+ const char* message,
+ const char* arg,
+ bool is_reference_error) {
+ pre_parser_->log_->LogMessage(start_pos, end_pos, message, arg,
+ is_reference_error);
+}
+
+
+PreParserIdentifier PreParserTraits::GetSymbol(Scanner* scanner) {
+ if (scanner->current_token() == Token::FUTURE_RESERVED_WORD) {
+ return PreParserIdentifier::FutureReserved();
+ } else if (scanner->current_token() ==
+ Token::FUTURE_STRICT_RESERVED_WORD) {
+ return PreParserIdentifier::FutureStrictReserved();
+ } else if (scanner->current_token() == Token::YIELD) {
+ return PreParserIdentifier::Yield();
+ }
+ if (scanner->UnescapedLiteralMatches("eval", 4)) {
+ return PreParserIdentifier::Eval();
+ }
+ if (scanner->UnescapedLiteralMatches("arguments", 9)) {
+ return PreParserIdentifier::Arguments();
+ }
+ return PreParserIdentifier::Default();
+}
+
+
+PreParserExpression PreParserTraits::ExpressionFromString(
+ int pos, Scanner* scanner, PreParserFactory* factory) {
+ if (scanner->UnescapedLiteralMatches("use strict", 10)) {
+ return PreParserExpression::UseStrictStringLiteral();
+ }
+ return PreParserExpression::StringLiteral();
+}
+
+
+PreParserExpression PreParserTraits::ParseV8Intrinsic(bool* ok) {
+ return pre_parser_->ParseV8Intrinsic(ok);
+}
+
+
+PreParserExpression PreParserTraits::ParseFunctionLiteral(
+ PreParserIdentifier name,
+ Scanner::Location function_name_location,
+ bool name_is_strict_reserved,
+ bool is_generator,
+ int function_token_position,
+ FunctionLiteral::FunctionType type,
+ FunctionLiteral::ArityRestriction arity_restriction,
+ bool* ok) {
+ return pre_parser_->ParseFunctionLiteral(
+ name, function_name_location, name_is_strict_reserved, is_generator,
+ function_token_position, type, arity_restriction, ok);
+}
+
+
PreParser::PreParseResult PreParser::PreParseLazyFunction(
- LanguageMode mode, bool is_generator, ParserRecorder* log) {
+ StrictMode strict_mode, bool is_generator, ParserRecorder* log) {
log_ = log;
// Lazy functions always have trivial outer scopes (no with/catch scopes).
- Scope top_scope(&scope_, kTopLevelScope);
- set_language_mode(mode);
- Scope function_scope(&scope_, kFunctionScope);
- function_scope.set_is_generator(is_generator);
+ PreParserScope top_scope(scope_, GLOBAL_SCOPE);
+ FunctionState top_state(&function_state_, &scope_, &top_scope);
+ scope_->SetStrictMode(strict_mode);
+ PreParserScope function_scope(scope_, FUNCTION_SCOPE);
+ FunctionState function_state(&function_state_, &scope_, &function_scope);
+ function_state.set_is_generator(is_generator);
ASSERT_EQ(Token::LBRACE, scanner()->current_token());
bool ok = true;
int start_position = peek_position();
@@ -72,12 +122,9 @@ PreParser::PreParseResult PreParser::PreParseLazyFunction(
ReportUnexpectedToken(scanner()->current_token());
} else {
ASSERT_EQ(Token::RBRACE, scanner()->peek());
- if (!is_classic_mode()) {
+ if (scope_->strict_mode() == STRICT) {
int end_pos = scanner()->location().end_pos;
CheckOctalLiteral(start_position, end_pos, &ok);
- if (ok) {
- CheckDelayedStrictModeViolation(start_position, end_pos, &ok);
- }
}
}
return kPreParseSuccess;
@@ -97,37 +144,6 @@ PreParser::PreParseResult PreParser::PreParseLazyFunction(
// That means that contextual checks (like a label being declared where
// it is used) are generally omitted.
-void PreParser::ReportUnexpectedToken(Token::Value token) {
- // We don't report stack overflows here, to avoid increasing the
- // stack depth even further. Instead we report it after parsing is
- // over, in ParseProgram.
- if (token == Token::ILLEGAL && stack_overflow()) {
- return;
- }
- Scanner::Location source_location = scanner()->location();
-
- // Four of the tokens are treated specially
- switch (token) {
- case Token::EOS:
- return ReportMessageAt(source_location, "unexpected_eos", NULL);
- case Token::NUMBER:
- return ReportMessageAt(source_location, "unexpected_token_number", NULL);
- case Token::STRING:
- return ReportMessageAt(source_location, "unexpected_token_string", NULL);
- case Token::IDENTIFIER:
- return ReportMessageAt(source_location,
- "unexpected_token_identifier", NULL);
- case Token::FUTURE_RESERVED_WORD:
- return ReportMessageAt(source_location, "unexpected_reserved", NULL);
- case Token::FUTURE_STRICT_RESERVED_WORD:
- return ReportMessageAt(source_location,
- "unexpected_strict_reserved", NULL);
- default:
- const char* name = Token::String(token);
- ReportMessageAt(source_location, "unexpected_token", name);
- }
-}
-
#define CHECK_OK ok); \
if (!*ok) return kUnknownSourceElements; \
@@ -165,15 +181,17 @@ PreParser::SourceElements PreParser::ParseSourceElements(int end_token,
// SourceElements ::
// (Statement)* <end_token>
- bool allow_directive_prologue = true;
+ bool directive_prologue = true;
while (peek() != end_token) {
+ if (directive_prologue && peek() != Token::STRING) {
+ directive_prologue = false;
+ }
Statement statement = ParseSourceElement(CHECK_OK);
- if (allow_directive_prologue) {
+ if (directive_prologue) {
if (statement.IsUseStrictLiteral()) {
- set_language_mode(allow_harmony_scoping() ?
- EXTENDED_MODE : STRICT_MODE);
+ scope_->SetStrictMode(STRICT);
} else if (!statement.IsStringLiteral()) {
- allow_directive_prologue = false;
+ directive_prologue = false;
}
}
}
@@ -265,9 +283,10 @@ PreParser::Statement PreParser::ParseStatement(bool* ok) {
Scanner::Location start_location = scanner()->peek_location();
Statement statement = ParseFunctionDeclaration(CHECK_OK);
Scanner::Location end_location = scanner()->location();
- if (!is_classic_mode()) {
- ReportMessageAt(start_location.beg_pos, end_location.end_pos,
- "strict_function", NULL);
+ if (strict_mode() == STRICT) {
+ PreParserTraits::ReportMessageAt(start_location.beg_pos,
+ end_location.end_pos,
+ "strict_function");
*ok = false;
return Statement::Default();
} else {
@@ -291,24 +310,19 @@ PreParser::Statement PreParser::ParseFunctionDeclaration(bool* ok) {
// 'function' '*' Identifier '(' FormalParameterListopt ')'
// '{' FunctionBody '}'
Expect(Token::FUNCTION, CHECK_OK);
-
+ int pos = position();
bool is_generator = allow_generators() && Check(Token::MUL);
- Identifier identifier = ParseIdentifier(CHECK_OK);
- Scanner::Location location = scanner()->location();
-
- Expression function_value = ParseFunctionLiteral(is_generator, CHECK_OK);
-
- if (function_value.IsStrictFunction() &&
- !identifier.IsValidStrictVariable()) {
- // Strict mode violation, using either reserved word or eval/arguments
- // as name of strict function.
- const char* type = "strict_function_name";
- if (identifier.IsFutureStrictReserved() || identifier.IsYield()) {
- type = "strict_reserved_word";
- }
- ReportMessageAt(location, type, NULL);
- *ok = false;
- }
+ bool is_strict_reserved = false;
+ Identifier name = ParseIdentifierOrStrictReservedWord(
+ &is_strict_reserved, CHECK_OK);
+ ParseFunctionLiteral(name,
+ scanner()->location(),
+ is_strict_reserved,
+ is_generator,
+ pos,
+ FunctionLiteral::DECLARATION,
+ FunctionLiteral::NORMAL_ARITY,
+ CHECK_OK);
return Statement::FunctionDeclaration();
}
@@ -322,7 +336,7 @@ PreParser::Statement PreParser::ParseBlock(bool* ok) {
//
Expect(Token::LBRACE, CHECK_OK);
while (peek() != Token::RBRACE) {
- if (is_extended_mode()) {
+ if (allow_harmony_scoping() && strict_mode() == STRICT) {
ParseSourceElement(CHECK_OK);
} else {
ParseStatement(CHECK_OK);
@@ -382,30 +396,24 @@ PreParser::Statement PreParser::ParseVariableDeclarations(
// * It is a Syntax Error if the code that matches this production is not
// contained in extended code.
//
- // However disallowing const in classic mode will break compatibility with
+ // However disallowing const in sloppy mode will break compatibility with
// existing pages. Therefore we keep allowing const with the old
- // non-harmony semantics in classic mode.
+ // non-harmony semantics in sloppy mode.
Consume(Token::CONST);
- switch (language_mode()) {
- case CLASSIC_MODE:
- break;
- case STRICT_MODE: {
- Scanner::Location location = scanner()->peek_location();
- ReportMessageAt(location, "strict_const", NULL);
- *ok = false;
- return Statement::Default();
- }
- case EXTENDED_MODE:
- if (var_context != kSourceElement &&
- var_context != kForStatement) {
- Scanner::Location location = scanner()->peek_location();
- ReportMessageAt(location.beg_pos, location.end_pos,
- "unprotected_const", NULL);
+ if (strict_mode() == STRICT) {
+ if (allow_harmony_scoping()) {
+ if (var_context != kSourceElement && var_context != kForStatement) {
+ ReportMessageAt(scanner()->peek_location(), "unprotected_const");
*ok = false;
return Statement::Default();
}
require_initializer = true;
- break;
+ } else {
+ Scanner::Location location = scanner()->peek_location();
+ ReportMessageAt(location, "strict_const");
+ *ok = false;
+ return Statement::Default();
+ }
}
} else if (peek() == Token::LET) {
// ES6 Draft Rev4 section 12.2.1:
@@ -414,19 +422,17 @@ PreParser::Statement PreParser::ParseVariableDeclarations(
//
// * It is a Syntax Error if the code that matches this production is not
// contained in extended code.
- if (!is_extended_mode()) {
- Scanner::Location location = scanner()->peek_location();
- ReportMessageAt(location.beg_pos, location.end_pos,
- "illegal_let", NULL);
+ //
+ // TODO(rossberg): make 'let' a legal identifier in sloppy mode.
+ if (!allow_harmony_scoping() || strict_mode() == SLOPPY) {
+ ReportMessageAt(scanner()->peek_location(), "illegal_let");
*ok = false;
return Statement::Default();
}
Consume(Token::LET);
if (var_context != kSourceElement &&
var_context != kForStatement) {
- Scanner::Location location = scanner()->peek_location();
- ReportMessageAt(location.beg_pos, location.end_pos,
- "unprotected_let", NULL);
+ ReportMessageAt(scanner()->peek_location(), "unprotected_let");
*ok = false;
return Statement::Default();
}
@@ -443,14 +449,7 @@ PreParser::Statement PreParser::ParseVariableDeclarations(
do {
// Parse variable name.
if (nvars > 0) Consume(Token::COMMA);
- Identifier identifier = ParseIdentifier(CHECK_OK);
- if (!is_classic_mode() && !identifier.IsValidStrictVariable()) {
- StrictModeIdentifierViolation(scanner()->location(),
- "strict_var_name",
- identifier,
- ok);
- return Statement::Default();
- }
+ ParseIdentifier(kDontAllowEvalOrArguments, CHECK_OK);
nvars++;
if (peek() == Token::ASSIGN || require_initializer) {
Expect(Token::ASSIGN, CHECK_OK);
@@ -469,16 +468,20 @@ PreParser::Statement PreParser::ParseExpressionOrLabelledStatement(bool* ok) {
// Expression ';'
// Identifier ':' Statement
+ bool starts_with_identifier = peek_any_identifier();
Expression expr = ParseExpression(true, CHECK_OK);
- if (expr.IsRawIdentifier()) {
+ // Even if the expression starts with an identifier, it is not necessarily an
+ // identifier. For example, "foo + bar" starts with an identifier but is not
+ // an identifier.
+ if (starts_with_identifier && expr.IsIdentifier() && peek() == Token::COLON) {
+ // Expression is a single identifier, and not, e.g., a parenthesized
+ // identifier.
ASSERT(!expr.AsIdentifier().IsFutureReserved());
- ASSERT(is_classic_mode() ||
+ ASSERT(strict_mode() == SLOPPY ||
(!expr.AsIdentifier().IsFutureStrictReserved() &&
!expr.AsIdentifier().IsYield()));
- if (peek() == Token::COLON) {
- Consume(Token::COLON);
- return ParseStatement(ok);
- }
+ Consume(Token::COLON);
+ return ParseStatement(ok);
// Preparsing is disabled for extensions (because the extension details
// aren't passed to lazily compiled functions), so we don't
// accept "native function" in the preparser.
@@ -516,7 +519,8 @@ PreParser::Statement PreParser::ParseContinueStatement(bool* ok) {
tok != Token::SEMICOLON &&
tok != Token::RBRACE &&
tok != Token::EOS) {
- ParseIdentifier(CHECK_OK);
+ // ECMA allows "eval" or "arguments" as labels even in strict mode.
+ ParseIdentifier(kAllowEvalOrArguments, CHECK_OK);
}
ExpectSemicolon(CHECK_OK);
return Statement::Default();
@@ -533,7 +537,8 @@ PreParser::Statement PreParser::ParseBreakStatement(bool* ok) {
tok != Token::SEMICOLON &&
tok != Token::RBRACE &&
tok != Token::EOS) {
- ParseIdentifier(CHECK_OK);
+ // ECMA allows "eval" or "arguments" as labels even in strict mode.
+ ParseIdentifier(kAllowEvalOrArguments, CHECK_OK);
}
ExpectSemicolon(CHECK_OK);
return Statement::Default();
@@ -544,7 +549,7 @@ PreParser::Statement PreParser::ParseReturnStatement(bool* ok) {
// ReturnStatement ::
// 'return' [no line terminator] Expression? ';'
- // Consume the return token. It is necessary to do the before
+ // Consume the return token. It is necessary to do before
// reporting any errors on it, because of the way errors are
// reported (underlining).
Expect(Token::RETURN, CHECK_OK);
@@ -570,9 +575,8 @@ PreParser::Statement PreParser::ParseWithStatement(bool* ok) {
// WithStatement ::
// 'with' '(' Expression ')' Statement
Expect(Token::WITH, CHECK_OK);
- if (!is_classic_mode()) {
- Scanner::Location location = scanner()->location();
- ReportMessageAt(location, "strict_mode_with", NULL);
+ if (strict_mode() == STRICT) {
+ ReportMessageAt(scanner()->location(), "strict_mode_with");
*ok = false;
return Statement::Default();
}
@@ -580,7 +584,8 @@ PreParser::Statement PreParser::ParseWithStatement(bool* ok) {
ParseExpression(true, CHECK_OK);
Expect(Token::RPAREN, CHECK_OK);
- Scope::InsideWith iw(scope_);
+ PreParserScope with_scope(scope_, WITH_SCOPE);
+ BlockState block_state(&scope_, &with_scope);
ParseStatement(CHECK_OK);
return Statement::Default();
}
@@ -716,8 +721,7 @@ PreParser::Statement PreParser::ParseThrowStatement(bool* ok) {
Expect(Token::THROW, CHECK_OK);
if (scanner()->HasAnyLineTerminatorBeforeNext()) {
- Scanner::Location pos = scanner()->location();
- ReportMessageAt(pos, "newline_after_throw", NULL);
+ ReportMessageAt(scanner()->location(), "newline_after_throw");
*ok = false;
return Statement::Default();
}
@@ -739,38 +743,31 @@ PreParser::Statement PreParser::ParseTryStatement(bool* ok) {
// Finally ::
// 'finally' Block
- // In preparsing, allow any number of catch/finally blocks, including zero
- // of both.
-
Expect(Token::TRY, CHECK_OK);
ParseBlock(CHECK_OK);
- bool catch_or_finally_seen = false;
- if (peek() == Token::CATCH) {
+ Token::Value tok = peek();
+ if (tok != Token::CATCH && tok != Token::FINALLY) {
+ ReportMessageAt(scanner()->location(), "no_catch_or_finally");
+ *ok = false;
+ return Statement::Default();
+ }
+ if (tok == Token::CATCH) {
Consume(Token::CATCH);
Expect(Token::LPAREN, CHECK_OK);
- Identifier id = ParseIdentifier(CHECK_OK);
- if (!is_classic_mode() && !id.IsValidStrictVariable()) {
- StrictModeIdentifierViolation(scanner()->location(),
- "strict_catch_variable",
- id,
- ok);
- return Statement::Default();
- }
+ ParseIdentifier(kDontAllowEvalOrArguments, CHECK_OK);
Expect(Token::RPAREN, CHECK_OK);
- { Scope::InsideWith iw(scope_);
+ {
+ PreParserScope with_scope(scope_, WITH_SCOPE);
+ BlockState block_state(&scope_, &with_scope);
ParseBlock(CHECK_OK);
}
- catch_or_finally_seen = true;
+ tok = peek();
}
- if (peek() == Token::FINALLY) {
+ if (tok == Token::FINALLY) {
Consume(Token::FINALLY);
ParseBlock(CHECK_OK);
- catch_or_finally_seen = true;
- }
- if (!catch_or_finally_seen) {
- *ok = false;
}
return Statement::Default();
}
@@ -797,608 +794,106 @@ PreParser::Statement PreParser::ParseDebuggerStatement(bool* ok) {
#undef DUMMY
-// Precedence = 1
-PreParser::Expression PreParser::ParseExpression(bool accept_IN, bool* ok) {
- // Expression ::
- // AssignmentExpression
- // Expression ',' AssignmentExpression
-
- Expression result = ParseAssignmentExpression(accept_IN, CHECK_OK);
- while (peek() == Token::COMMA) {
- Expect(Token::COMMA, CHECK_OK);
- ParseAssignmentExpression(accept_IN, CHECK_OK);
- result = Expression::Default();
- }
- return result;
-}
-
-
-// Precedence = 2
-PreParser::Expression PreParser::ParseAssignmentExpression(bool accept_IN,
- bool* ok) {
- // AssignmentExpression ::
- // ConditionalExpression
- // YieldExpression
- // LeftHandSideExpression AssignmentOperator AssignmentExpression
-
- if (scope_->is_generator() && peek() == Token::YIELD) {
- return ParseYieldExpression(ok);
- }
-
- Scanner::Location before = scanner()->peek_location();
- Expression expression = ParseConditionalExpression(accept_IN, CHECK_OK);
-
- if (!Token::IsAssignmentOp(peek())) {
- // Parsed conditional expression only (no assignment).
- return expression;
- }
-
- if (!is_classic_mode() &&
- expression.IsIdentifier() &&
- expression.AsIdentifier().IsEvalOrArguments()) {
- Scanner::Location after = scanner()->location();
- ReportMessageAt(before.beg_pos, after.end_pos,
- "strict_lhs_assignment", NULL);
- *ok = false;
- return Expression::Default();
- }
-
- Token::Value op = Next(); // Get assignment operator.
- ParseAssignmentExpression(accept_IN, CHECK_OK);
-
- if ((op == Token::ASSIGN) && expression.IsThisProperty()) {
- scope_->AddProperty();
- }
-
- return Expression::Default();
-}
-
-
-// Precedence = 3
-PreParser::Expression PreParser::ParseYieldExpression(bool* ok) {
- // YieldExpression ::
- // 'yield' '*'? AssignmentExpression
- Consume(Token::YIELD);
- Check(Token::MUL);
-
- ParseAssignmentExpression(false, CHECK_OK);
-
- return Expression::Default();
-}
-
-
-// Precedence = 3
-PreParser::Expression PreParser::ParseConditionalExpression(bool accept_IN,
- bool* ok) {
- // ConditionalExpression ::
- // LogicalOrExpression
- // LogicalOrExpression '?' AssignmentExpression ':' AssignmentExpression
-
- // We start using the binary expression parser for prec >= 4 only!
- Expression expression = ParseBinaryExpression(4, accept_IN, CHECK_OK);
- if (peek() != Token::CONDITIONAL) return expression;
- Consume(Token::CONDITIONAL);
- // In parsing the first assignment expression in conditional
- // expressions we always accept the 'in' keyword; see ECMA-262,
- // section 11.12, page 58.
- ParseAssignmentExpression(true, CHECK_OK);
- Expect(Token::COLON, CHECK_OK);
- ParseAssignmentExpression(accept_IN, CHECK_OK);
- return Expression::Default();
-}
-
-
-// Precedence >= 4
-PreParser::Expression PreParser::ParseBinaryExpression(int prec,
- bool accept_IN,
- bool* ok) {
- Expression result = ParseUnaryExpression(CHECK_OK);
- for (int prec1 = Precedence(peek(), accept_IN); prec1 >= prec; prec1--) {
- // prec1 >= 4
- while (Precedence(peek(), accept_IN) == prec1) {
- Next();
- ParseBinaryExpression(prec1 + 1, accept_IN, CHECK_OK);
- result = Expression::Default();
- }
- }
- return result;
-}
-
-
-PreParser::Expression PreParser::ParseUnaryExpression(bool* ok) {
- // UnaryExpression ::
- // PostfixExpression
- // 'delete' UnaryExpression
- // 'void' UnaryExpression
- // 'typeof' UnaryExpression
- // '++' UnaryExpression
- // '--' UnaryExpression
- // '+' UnaryExpression
- // '-' UnaryExpression
- // '~' UnaryExpression
- // '!' UnaryExpression
-
- Token::Value op = peek();
- if (Token::IsUnaryOp(op)) {
- op = Next();
- ParseUnaryExpression(ok);
- return Expression::Default();
- } else if (Token::IsCountOp(op)) {
- op = Next();
- Scanner::Location before = scanner()->peek_location();
- Expression expression = ParseUnaryExpression(CHECK_OK);
- if (!is_classic_mode() &&
- expression.IsIdentifier() &&
- expression.AsIdentifier().IsEvalOrArguments()) {
- Scanner::Location after = scanner()->location();
- ReportMessageAt(before.beg_pos, after.end_pos,
- "strict_lhs_prefix", NULL);
- *ok = false;
- }
- return Expression::Default();
- } else {
- return ParsePostfixExpression(ok);
- }
-}
-
-
-PreParser::Expression PreParser::ParsePostfixExpression(bool* ok) {
- // PostfixExpression ::
- // LeftHandSideExpression ('++' | '--')?
-
- Scanner::Location before = scanner()->peek_location();
- Expression expression = ParseLeftHandSideExpression(CHECK_OK);
- if (!scanner()->HasAnyLineTerminatorBeforeNext() &&
- Token::IsCountOp(peek())) {
- if (!is_classic_mode() &&
- expression.IsIdentifier() &&
- expression.AsIdentifier().IsEvalOrArguments()) {
- Scanner::Location after = scanner()->location();
- ReportMessageAt(before.beg_pos, after.end_pos,
- "strict_lhs_postfix", NULL);
- *ok = false;
- return Expression::Default();
- }
- Next();
- return Expression::Default();
- }
- return expression;
-}
-
-
-PreParser::Expression PreParser::ParseLeftHandSideExpression(bool* ok) {
- // LeftHandSideExpression ::
- // (NewExpression | MemberExpression) ...
-
- Expression result = Expression::Default();
- if (peek() == Token::NEW) {
- result = ParseNewExpression(CHECK_OK);
- } else {
- result = ParseMemberExpression(CHECK_OK);
- }
-
- while (true) {
- switch (peek()) {
- case Token::LBRACK: {
- Consume(Token::LBRACK);
- ParseExpression(true, CHECK_OK);
- Expect(Token::RBRACK, CHECK_OK);
- if (result.IsThis()) {
- result = Expression::ThisProperty();
- } else {
- result = Expression::Default();
- }
- break;
- }
-
- case Token::LPAREN: {
- ParseArguments(CHECK_OK);
- result = Expression::Default();
- break;
- }
-
- case Token::PERIOD: {
- Consume(Token::PERIOD);
- ParseIdentifierName(CHECK_OK);
- if (result.IsThis()) {
- result = Expression::ThisProperty();
- } else {
- result = Expression::Default();
- }
- break;
- }
-
- default:
- return result;
- }
- }
-}
-
-
-PreParser::Expression PreParser::ParseNewExpression(bool* ok) {
- // NewExpression ::
- // ('new')+ MemberExpression
-
- // The grammar for new expressions is pretty warped. The keyword
- // 'new' can either be a part of the new expression (where it isn't
- // followed by an argument list) or a part of the member expression,
- // where it must be followed by an argument list. To accommodate
- // this, we parse the 'new' keywords greedily and keep track of how
- // many we have parsed. This information is then passed on to the
- // member expression parser, which is only allowed to match argument
- // lists as long as it has 'new' prefixes left
- unsigned new_count = 0;
- do {
- Consume(Token::NEW);
- new_count++;
- } while (peek() == Token::NEW);
-
- return ParseMemberWithNewPrefixesExpression(new_count, ok);
-}
-
-
-PreParser::Expression PreParser::ParseMemberExpression(bool* ok) {
- return ParseMemberWithNewPrefixesExpression(0, ok);
-}
-
-
-PreParser::Expression PreParser::ParseMemberWithNewPrefixesExpression(
- unsigned new_count, bool* ok) {
- // MemberExpression ::
- // (PrimaryExpression | FunctionLiteral)
- // ('[' Expression ']' | '.' Identifier | Arguments)*
-
- // Parse the initial primary or function expression.
- Expression result = Expression::Default();
- if (peek() == Token::FUNCTION) {
- Consume(Token::FUNCTION);
-
- bool is_generator = allow_generators() && Check(Token::MUL);
- Identifier identifier = Identifier::Default();
- if (peek_any_identifier()) {
- identifier = ParseIdentifier(CHECK_OK);
- }
- result = ParseFunctionLiteral(is_generator, CHECK_OK);
- if (result.IsStrictFunction() && !identifier.IsValidStrictVariable()) {
- StrictModeIdentifierViolation(scanner()->location(),
- "strict_function_name",
- identifier,
- ok);
- return Expression::Default();
- }
- } else {
- result = ParsePrimaryExpression(CHECK_OK);
- }
-
- while (true) {
- switch (peek()) {
- case Token::LBRACK: {
- Consume(Token::LBRACK);
- ParseExpression(true, CHECK_OK);
- Expect(Token::RBRACK, CHECK_OK);
- if (result.IsThis()) {
- result = Expression::ThisProperty();
- } else {
- result = Expression::Default();
- }
- break;
- }
- case Token::PERIOD: {
- Consume(Token::PERIOD);
- ParseIdentifierName(CHECK_OK);
- if (result.IsThis()) {
- result = Expression::ThisProperty();
- } else {
- result = Expression::Default();
- }
- break;
- }
- case Token::LPAREN: {
- if (new_count == 0) return result;
- // Consume one of the new prefixes (already parsed).
- ParseArguments(CHECK_OK);
- new_count--;
- result = Expression::Default();
- break;
- }
- default:
- return result;
- }
- }
-}
-
-
-PreParser::Expression PreParser::ParsePrimaryExpression(bool* ok) {
- // PrimaryExpression ::
- // 'this'
- // 'null'
- // 'true'
- // 'false'
- // Identifier
- // Number
- // String
- // ArrayLiteral
- // ObjectLiteral
- // RegExpLiteral
- // '(' Expression ')'
-
- Expression result = Expression::Default();
- switch (peek()) {
- case Token::THIS: {
- Next();
- result = Expression::This();
- break;
- }
-
- case Token::FUTURE_RESERVED_WORD:
- case Token::FUTURE_STRICT_RESERVED_WORD:
- case Token::YIELD:
- case Token::IDENTIFIER: {
- Identifier id = ParseIdentifier(CHECK_OK);
- result = Expression::FromIdentifier(id);
- break;
- }
-
- case Token::NULL_LITERAL:
- case Token::TRUE_LITERAL:
- case Token::FALSE_LITERAL:
- case Token::NUMBER: {
- Next();
- break;
- }
- case Token::STRING: {
- Next();
- result = GetStringSymbol();
- break;
- }
-
- case Token::ASSIGN_DIV:
- result = ParseRegExpLiteral(true, CHECK_OK);
- break;
-
- case Token::DIV:
- result = ParseRegExpLiteral(false, CHECK_OK);
- break;
-
- case Token::LBRACK:
- result = ParseArrayLiteral(CHECK_OK);
- break;
-
- case Token::LBRACE:
- result = ParseObjectLiteral(CHECK_OK);
- break;
-
- case Token::LPAREN:
- Consume(Token::LPAREN);
- parenthesized_function_ = (peek() == Token::FUNCTION);
- result = ParseExpression(true, CHECK_OK);
- Expect(Token::RPAREN, CHECK_OK);
- result = result.Parenthesize();
- break;
-
- case Token::MOD:
- result = ParseV8Intrinsic(CHECK_OK);
- break;
-
- default: {
- Next();
- *ok = false;
- return Expression::Default();
- }
- }
-
- return result;
-}
-
-
-PreParser::Expression PreParser::ParseArrayLiteral(bool* ok) {
- // ArrayLiteral ::
- // '[' Expression? (',' Expression?)* ']'
- Expect(Token::LBRACK, CHECK_OK);
- while (peek() != Token::RBRACK) {
- if (peek() != Token::COMMA) {
- ParseAssignmentExpression(true, CHECK_OK);
- }
- if (peek() != Token::RBRACK) {
- Expect(Token::COMMA, CHECK_OK);
- }
- }
- Expect(Token::RBRACK, CHECK_OK);
-
- scope_->NextMaterializedLiteralIndex();
- return Expression::Default();
-}
-
-
-PreParser::Expression PreParser::ParseObjectLiteral(bool* ok) {
- // ObjectLiteral ::
- // '{' (
- // ((IdentifierName | String | Number) ':' AssignmentExpression)
- // | (('get' | 'set') (IdentifierName | String | Number) FunctionLiteral)
- // )*[','] '}'
-
- ObjectLiteralChecker checker(this, language_mode());
-
- Expect(Token::LBRACE, CHECK_OK);
- while (peek() != Token::RBRACE) {
- Token::Value next = peek();
- switch (next) {
- case Token::IDENTIFIER:
- case Token::FUTURE_RESERVED_WORD:
- case Token::FUTURE_STRICT_RESERVED_WORD: {
- bool is_getter = false;
- bool is_setter = false;
- ParseIdentifierNameOrGetOrSet(&is_getter, &is_setter, CHECK_OK);
- if ((is_getter || is_setter) && peek() != Token::COLON) {
- Token::Value name = Next();
- bool is_keyword = Token::IsKeyword(name);
- if (name != Token::IDENTIFIER &&
- name != Token::FUTURE_RESERVED_WORD &&
- name != Token::FUTURE_STRICT_RESERVED_WORD &&
- name != Token::NUMBER &&
- name != Token::STRING &&
- !is_keyword) {
- *ok = false;
- return Expression::Default();
- }
- if (!is_keyword) {
- LogSymbol();
- }
- PropertyKind type = is_getter ? kGetterProperty : kSetterProperty;
- checker.CheckProperty(name, type, CHECK_OK);
- ParseFunctionLiteral(false, CHECK_OK);
- if (peek() != Token::RBRACE) {
- Expect(Token::COMMA, CHECK_OK);
- }
- continue; // restart the while
- }
- checker.CheckProperty(next, kValueProperty, CHECK_OK);
- break;
- }
- case Token::STRING:
- Consume(next);
- checker.CheckProperty(next, kValueProperty, CHECK_OK);
- GetStringSymbol();
- break;
- case Token::NUMBER:
- Consume(next);
- checker.CheckProperty(next, kValueProperty, CHECK_OK);
- break;
- default:
- if (Token::IsKeyword(next)) {
- Consume(next);
- checker.CheckProperty(next, kValueProperty, CHECK_OK);
- } else {
- // Unexpected token.
- *ok = false;
- return Expression::Default();
- }
- }
-
- Expect(Token::COLON, CHECK_OK);
- ParseAssignmentExpression(true, CHECK_OK);
-
- // TODO(1240767): Consider allowing trailing comma.
- if (peek() != Token::RBRACE) Expect(Token::COMMA, CHECK_OK);
- }
- Expect(Token::RBRACE, CHECK_OK);
-
- scope_->NextMaterializedLiteralIndex();
- return Expression::Default();
-}
-
-
-PreParser::Expression PreParser::ParseRegExpLiteral(bool seen_equal,
- bool* ok) {
- if (!scanner()->ScanRegExpPattern(seen_equal)) {
- Next();
- ReportMessageAt(scanner()->location(), "unterminated_regexp", NULL);
- *ok = false;
- return Expression::Default();
- }
-
- scope_->NextMaterializedLiteralIndex();
-
- if (!scanner()->ScanRegExpFlags()) {
- Next();
- ReportMessageAt(scanner()->location(), "invalid_regexp_flags", NULL);
- *ok = false;
- return Expression::Default();
- }
- Next();
- return Expression::Default();
-}
-
-
-PreParser::Arguments PreParser::ParseArguments(bool* ok) {
- // Arguments ::
- // '(' (AssignmentExpression)*[','] ')'
-
- Expect(Token::LPAREN, ok);
- if (!*ok) return -1;
- bool done = (peek() == Token::RPAREN);
- int argc = 0;
- while (!done) {
- ParseAssignmentExpression(true, ok);
- if (!*ok) return -1;
- argc++;
- done = (peek() == Token::RPAREN);
- if (!done) {
- Expect(Token::COMMA, ok);
- if (!*ok) return -1;
- }
- }
- Expect(Token::RPAREN, ok);
- return argc;
-}
-
-
-PreParser::Expression PreParser::ParseFunctionLiteral(bool is_generator,
- bool* ok) {
+PreParser::Expression PreParser::ParseFunctionLiteral(
+ Identifier function_name,
+ Scanner::Location function_name_location,
+ bool name_is_strict_reserved,
+ bool is_generator,
+ int function_token_pos,
+ FunctionLiteral::FunctionType function_type,
+ FunctionLiteral::ArityRestriction arity_restriction,
+ bool* ok) {
// Function ::
// '(' FormalParameterList? ')' '{' FunctionBody '}'
// Parse function body.
ScopeType outer_scope_type = scope_->type();
- bool inside_with = scope_->IsInsideWith();
- Scope function_scope(&scope_, kFunctionScope);
- function_scope.set_is_generator(is_generator);
+ PreParserScope function_scope(scope_, FUNCTION_SCOPE);
+ FunctionState function_state(&function_state_, &scope_, &function_scope);
+ function_state.set_is_generator(is_generator);
// FormalParameterList ::
// '(' (Identifier)*[','] ')'
Expect(Token::LPAREN, CHECK_OK);
int start_position = position();
- bool done = (peek() == Token::RPAREN);
DuplicateFinder duplicate_finder(scanner()->unicode_cache());
+ // We don't yet know if the function will be strict, so we cannot yet produce
+ // errors for parameter names or duplicates. However, we remember the
+ // locations of these errors if they occur and produce the errors later.
+ Scanner::Location eval_args_error_loc = Scanner::Location::invalid();
+ Scanner::Location dupe_error_loc = Scanner::Location::invalid();
+ Scanner::Location reserved_error_loc = Scanner::Location::invalid();
+
+ bool done = arity_restriction == FunctionLiteral::GETTER_ARITY ||
+ (peek() == Token::RPAREN &&
+ arity_restriction != FunctionLiteral::SETTER_ARITY);
while (!done) {
- Identifier id = ParseIdentifier(CHECK_OK);
- if (!id.IsValidStrictVariable()) {
- StrictModeIdentifierViolation(scanner()->location(),
- "strict_param_name",
- id,
- CHECK_OK);
+ bool is_strict_reserved = false;
+ Identifier param_name =
+ ParseIdentifierOrStrictReservedWord(&is_strict_reserved, CHECK_OK);
+ if (!eval_args_error_loc.IsValid() && param_name.IsEvalOrArguments()) {
+ eval_args_error_loc = scanner()->location();
}
- int prev_value;
- if (scanner()->is_literal_ascii()) {
- prev_value =
- duplicate_finder.AddAsciiSymbol(scanner()->literal_ascii_string(), 1);
- } else {
- prev_value =
- duplicate_finder.AddUtf16Symbol(scanner()->literal_utf16_string(), 1);
+ if (!reserved_error_loc.IsValid() && is_strict_reserved) {
+ reserved_error_loc = scanner()->location();
}
- if (prev_value != 0) {
- SetStrictModeViolation(scanner()->location(),
- "strict_param_dupe",
- CHECK_OK);
+ int prev_value = scanner()->FindSymbol(&duplicate_finder, 1);
+
+ if (!dupe_error_loc.IsValid() && prev_value != 0) {
+ dupe_error_loc = scanner()->location();
}
+
+ if (arity_restriction == FunctionLiteral::SETTER_ARITY) break;
done = (peek() == Token::RPAREN);
- if (!done) {
- Expect(Token::COMMA, CHECK_OK);
- }
+ if (!done) Expect(Token::COMMA, CHECK_OK);
}
Expect(Token::RPAREN, CHECK_OK);
- // Determine if the function will be lazily compiled.
- // Currently only happens to top-level functions.
- // Optimistically assume that all top-level functions are lazily compiled.
- bool is_lazily_compiled = (outer_scope_type == kTopLevelScope &&
- !inside_with && allow_lazy() &&
- !parenthesized_function_);
+ // See Parser::ParseFunctionLiteral for more information about lazy parsing
+ // and lazy compilation.
+ bool is_lazily_parsed = (outer_scope_type == GLOBAL_SCOPE && allow_lazy() &&
+ !parenthesized_function_);
parenthesized_function_ = false;
Expect(Token::LBRACE, CHECK_OK);
- if (is_lazily_compiled) {
+ if (is_lazily_parsed) {
ParseLazyFunctionLiteralBody(CHECK_OK);
} else {
ParseSourceElements(Token::RBRACE, ok);
}
Expect(Token::RBRACE, CHECK_OK);
- if (!is_classic_mode()) {
+ // Validate strict mode. We can do this only after parsing the function,
+ // since the function can declare itself strict.
+ if (strict_mode() == STRICT) {
+ if (function_name.IsEvalOrArguments()) {
+ ReportMessageAt(function_name_location, "strict_eval_arguments");
+ *ok = false;
+ return Expression::Default();
+ }
+ if (name_is_strict_reserved) {
+ ReportMessageAt(function_name_location, "unexpected_strict_reserved");
+ *ok = false;
+ return Expression::Default();
+ }
+ if (eval_args_error_loc.IsValid()) {
+ ReportMessageAt(eval_args_error_loc, "strict_eval_arguments");
+ *ok = false;
+ return Expression::Default();
+ }
+ if (dupe_error_loc.IsValid()) {
+ ReportMessageAt(dupe_error_loc, "strict_param_dupe");
+ *ok = false;
+ return Expression::Default();
+ }
+ if (reserved_error_loc.IsValid()) {
+ ReportMessageAt(reserved_error_loc, "unexpected_strict_reserved");
+ *ok = false;
+ return Expression::Default();
+ }
+
int end_position = scanner()->location().end_pos;
CheckOctalLiteral(start_position, end_position, CHECK_OK);
- CheckDelayedStrictModeViolation(start_position, end_position, CHECK_OK);
- return Expression::StrictFunction();
}
return Expression::Default();
@@ -1407,18 +902,16 @@ PreParser::Expression PreParser::ParseFunctionLiteral(bool is_generator,
void PreParser::ParseLazyFunctionLiteralBody(bool* ok) {
int body_start = position();
- log_->PauseRecording();
ParseSourceElements(Token::RBRACE, ok);
- log_->ResumeRecording();
if (!*ok) return;
// Position right after terminal '}'.
ASSERT_EQ(Token::RBRACE, scanner()->peek());
int body_end = scanner()->peek_location().end_pos;
log_->LogFunction(body_start, body_end,
- scope_->materialized_literal_count(),
- scope_->expected_properties(),
- language_mode());
+ function_state_->materialized_literal_count(),
+ function_state_->expected_property_count(),
+ strict_mode());
}
@@ -1430,7 +923,8 @@ PreParser::Expression PreParser::ParseV8Intrinsic(bool* ok) {
*ok = false;
return Expression::Default();
}
- ParseIdentifier(CHECK_OK);
+ // Allow "eval" or "arguments" for backward compatibility.
+ ParseIdentifier(kAllowEvalOrArguments, CHECK_OK);
ParseArguments(ok);
return Expression::Default();
@@ -1439,211 +933,4 @@ PreParser::Expression PreParser::ParseV8Intrinsic(bool* ok) {
#undef CHECK_OK
-void PreParser::LogSymbol() {
- int identifier_pos = position();
- if (scanner()->is_literal_ascii()) {
- log_->LogAsciiSymbol(identifier_pos, scanner()->literal_ascii_string());
- } else {
- log_->LogUtf16Symbol(identifier_pos, scanner()->literal_utf16_string());
- }
-}
-
-
-PreParser::Expression PreParser::GetStringSymbol() {
- const int kUseStrictLength = 10;
- const char* kUseStrictChars = "use strict";
- LogSymbol();
- if (scanner()->is_literal_ascii() &&
- scanner()->literal_length() == kUseStrictLength &&
- !scanner()->literal_contains_escapes() &&
- !strncmp(scanner()->literal_ascii_string().start(), kUseStrictChars,
- kUseStrictLength)) {
- return Expression::UseStrictStringLiteral();
- }
- return Expression::StringLiteral();
-}
-
-
-PreParser::Identifier PreParser::GetIdentifierSymbol() {
- LogSymbol();
- if (scanner()->current_token() == Token::FUTURE_RESERVED_WORD) {
- return Identifier::FutureReserved();
- } else if (scanner()->current_token() ==
- Token::FUTURE_STRICT_RESERVED_WORD) {
- return Identifier::FutureStrictReserved();
- } else if (scanner()->current_token() == Token::YIELD) {
- return Identifier::Yield();
- }
- if (scanner()->is_literal_ascii()) {
- // Detect strict-mode poison words.
- if (scanner()->literal_length() == 4 &&
- !strncmp(scanner()->literal_ascii_string().start(), "eval", 4)) {
- return Identifier::Eval();
- }
- if (scanner()->literal_length() == 9 &&
- !strncmp(scanner()->literal_ascii_string().start(), "arguments", 9)) {
- return Identifier::Arguments();
- }
- }
- return Identifier::Default();
-}
-
-
-PreParser::Identifier PreParser::ParseIdentifier(bool* ok) {
- Token::Value next = Next();
- switch (next) {
- case Token::FUTURE_RESERVED_WORD: {
- Scanner::Location location = scanner()->location();
- ReportMessageAt(location.beg_pos, location.end_pos,
- "reserved_word", NULL);
- *ok = false;
- return GetIdentifierSymbol();
- }
- case Token::YIELD:
- if (scope_->is_generator()) {
- // 'yield' in a generator is only valid as part of a YieldExpression.
- ReportMessageAt(scanner()->location(), "unexpected_token", "yield");
- *ok = false;
- return Identifier::Yield();
- }
- // FALLTHROUGH
- case Token::FUTURE_STRICT_RESERVED_WORD:
- if (!is_classic_mode()) {
- Scanner::Location location = scanner()->location();
- ReportMessageAt(location.beg_pos, location.end_pos,
- "strict_reserved_word", NULL);
- *ok = false;
- }
- // FALLTHROUGH
- case Token::IDENTIFIER:
- return GetIdentifierSymbol();
- default:
- *ok = false;
- return Identifier::Default();
- }
-}
-
-
-void PreParser::SetStrictModeViolation(Scanner::Location location,
- const char* type,
- bool* ok) {
- if (!is_classic_mode()) {
- ReportMessageAt(location, type, NULL);
- *ok = false;
- return;
- }
- // Delay report in case this later turns out to be strict code
- // (i.e., for function names and parameters prior to a "use strict"
- // directive).
- // It's safe to overwrite an existing violation.
- // It's either from a function that turned out to be non-strict,
- // or it's in the current function (and we just need to report
- // one error), or it's in a unclosed nesting function that wasn't
- // strict (otherwise we would already be in strict mode).
- strict_mode_violation_location_ = location;
- strict_mode_violation_type_ = type;
-}
-
-
-void PreParser::CheckDelayedStrictModeViolation(int beg_pos,
- int end_pos,
- bool* ok) {
- Scanner::Location location = strict_mode_violation_location_;
- if (location.IsValid() &&
- location.beg_pos > beg_pos && location.end_pos < end_pos) {
- ReportMessageAt(location, strict_mode_violation_type_, NULL);
- *ok = false;
- }
-}
-
-
-void PreParser::StrictModeIdentifierViolation(Scanner::Location location,
- const char* eval_args_type,
- Identifier identifier,
- bool* ok) {
- const char* type = eval_args_type;
- if (identifier.IsFutureReserved()) {
- type = "reserved_word";
- } else if (identifier.IsFutureStrictReserved() || identifier.IsYield()) {
- type = "strict_reserved_word";
- }
- if (!is_classic_mode()) {
- ReportMessageAt(location, type, NULL);
- *ok = false;
- return;
- }
- strict_mode_violation_location_ = location;
- strict_mode_violation_type_ = type;
-}
-
-
-PreParser::Identifier PreParser::ParseIdentifierName(bool* ok) {
- Token::Value next = Next();
- if (Token::IsKeyword(next)) {
- int pos = position();
- const char* keyword = Token::String(next);
- log_->LogAsciiSymbol(pos, Vector<const char>(keyword, StrLength(keyword)));
- return Identifier::Default();
- }
- if (next == Token::IDENTIFIER ||
- next == Token::FUTURE_RESERVED_WORD ||
- next == Token::FUTURE_STRICT_RESERVED_WORD) {
- return GetIdentifierSymbol();
- }
- *ok = false;
- return Identifier::Default();
-}
-
-#undef CHECK_OK
-
-
-// This function reads an identifier and determines whether or not it
-// is 'get' or 'set'.
-PreParser::Identifier PreParser::ParseIdentifierNameOrGetOrSet(bool* is_get,
- bool* is_set,
- bool* ok) {
- Identifier result = ParseIdentifierName(ok);
- if (!*ok) return Identifier::Default();
- if (scanner()->is_literal_ascii() &&
- scanner()->literal_length() == 3) {
- const char* token = scanner()->literal_ascii_string().start();
- *is_get = strncmp(token, "get", 3) == 0;
- *is_set = !*is_get && strncmp(token, "set", 3) == 0;
- }
- return result;
-}
-
-
-void PreParser::ObjectLiteralChecker::CheckProperty(Token::Value property,
- PropertyKind type,
- bool* ok) {
- int old;
- if (property == Token::NUMBER) {
- old = finder_.AddNumber(scanner()->literal_ascii_string(), type);
- } else if (scanner()->is_literal_ascii()) {
- old = finder_.AddAsciiSymbol(scanner()->literal_ascii_string(), type);
- } else {
- old = finder_.AddUtf16Symbol(scanner()->literal_utf16_string(), type);
- }
- PropertyKind old_type = static_cast<PropertyKind>(old);
- if (HasConflict(old_type, type)) {
- if (IsDataDataConflict(old_type, type)) {
- // Both are data properties.
- if (language_mode_ == CLASSIC_MODE) return;
- parser()->ReportMessageAt(scanner()->location(),
- "strict_duplicate_property");
- } else if (IsDataAccessorConflict(old_type, type)) {
- // Both a data and an accessor property with the same name.
- parser()->ReportMessageAt(scanner()->location(),
- "accessor_data_property");
- } else {
- ASSERT(IsAccessorAccessorConflict(old_type, type));
- // Both accessors of the same type.
- parser()->ReportMessageAt(scanner()->location(),
- "accessor_get_set");
- }
- *ok = false;
- }
-}
-
} } // v8::internal
diff --git a/chromium/v8/src/preparser.h b/chromium/v8/src/preparser.h
index e99b4b0a181..94f42797740 100644
--- a/chromium/v8/src/preparser.h
+++ b/chromium/v8/src/preparser.h
@@ -1,53 +1,88 @@
// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
#ifndef V8_PREPARSER_H
#define V8_PREPARSER_H
-#include "hashmap.h"
-#include "token.h"
-#include "scanner.h"
+#include "src/func-name-inferrer.h"
+#include "src/hashmap.h"
+#include "src/scopes.h"
+#include "src/token.h"
+#include "src/scanner.h"
+#include "src/v8.h"
namespace v8 {
namespace internal {
-// Common base class shared between parser and pre-parser.
-class ParserBase {
+// Common base class shared between parser and pre-parser. Traits encapsulate
+// the differences between Parser and PreParser:
+
+// - Return types: For example, Parser functions return Expression* and
+// PreParser functions return PreParserExpression.
+
+// - Creating parse tree nodes: Parser generates an AST during the recursive
+// descent. PreParser doesn't create a tree. Instead, it passes around minimal
+// data objects (PreParserExpression, PreParserIdentifier etc.) which contain
+// just enough data for the upper layer functions. PreParserFactory is
+// responsible for creating these dummy objects. It provides a similar kind of
+// interface as AstNodeFactory, so ParserBase doesn't need to care which one is
+// used.
+
+// - Miscellanous other tasks interleaved with the recursive descent. For
+// example, Parser keeps track of which function literals should be marked as
+// pretenured, and PreParser doesn't care.
+
+// The traits are expected to contain the following typedefs:
+// struct Traits {
+// // In particular...
+// struct Type {
+// // Used by FunctionState and BlockState.
+// typedef Scope;
+// typedef GeneratorVariable;
+// typedef Zone;
+// // Return types for traversing functions.
+// typedef Identifier;
+// typedef Expression;
+// typedef FunctionLiteral;
+// typedef ObjectLiteralProperty;
+// typedef Literal;
+// typedef ExpressionList;
+// typedef PropertyList;
+// // For constructing objects returned by the traversing functions.
+// typedef Factory;
+// };
+// // ...
+// };
+
+template <typename Traits>
+class ParserBase : public Traits {
public:
- ParserBase(Scanner* scanner, uintptr_t stack_limit)
- : scanner_(scanner),
+ // Shorten type names defined by Traits.
+ typedef typename Traits::Type::Expression ExpressionT;
+ typedef typename Traits::Type::Identifier IdentifierT;
+
+ ParserBase(Scanner* scanner, uintptr_t stack_limit,
+ v8::Extension* extension,
+ ParserRecorder* log,
+ typename Traits::Type::Zone* zone,
+ typename Traits::Type::Parser this_object)
+ : Traits(this_object),
+ parenthesized_function_(false),
+ scope_(NULL),
+ function_state_(NULL),
+ extension_(extension),
+ fni_(NULL),
+ log_(log),
+ mode_(PARSE_EAGERLY), // Lazy mode must be set explicitly.
+ scanner_(scanner),
stack_limit_(stack_limit),
stack_overflow_(false),
allow_lazy_(false),
allow_natives_syntax_(false),
allow_generators_(false),
- allow_for_of_(false) { }
- // TODO(mstarzinger): Only virtual until message reporting has been unified.
- virtual ~ParserBase() { }
+ allow_for_of_(false),
+ zone_(zone) { }
// Getters that indicate whether certain syntactical constructs are
// allowed to be parsed by this instance of the parser.
@@ -76,11 +111,130 @@ class ParserBase {
}
protected:
+ enum AllowEvalOrArgumentsAsIdentifier {
+ kAllowEvalOrArguments,
+ kDontAllowEvalOrArguments
+ };
+
+ enum Mode {
+ PARSE_LAZILY,
+ PARSE_EAGERLY
+ };
+
+ // ---------------------------------------------------------------------------
+ // FunctionState and BlockState together implement the parser's scope stack.
+ // The parser's current scope is in scope_. BlockState and FunctionState
+ // constructors push on the scope stack and the destructors pop. They are also
+ // used to hold the parser's per-function and per-block state.
+ class BlockState BASE_EMBEDDED {
+ public:
+ BlockState(typename Traits::Type::Scope** scope_stack,
+ typename Traits::Type::Scope* scope)
+ : scope_stack_(scope_stack),
+ outer_scope_(*scope_stack),
+ scope_(scope) {
+ *scope_stack_ = scope_;
+ }
+ ~BlockState() { *scope_stack_ = outer_scope_; }
+
+ private:
+ typename Traits::Type::Scope** scope_stack_;
+ typename Traits::Type::Scope* outer_scope_;
+ typename Traits::Type::Scope* scope_;
+ };
+
+ class FunctionState BASE_EMBEDDED {
+ public:
+ FunctionState(
+ FunctionState** function_state_stack,
+ typename Traits::Type::Scope** scope_stack,
+ typename Traits::Type::Scope* scope,
+ typename Traits::Type::Zone* zone = NULL);
+ ~FunctionState();
+
+ int NextMaterializedLiteralIndex() {
+ return next_materialized_literal_index_++;
+ }
+ int materialized_literal_count() {
+ return next_materialized_literal_index_ - JSFunction::kLiteralsPrefixSize;
+ }
+
+ int NextHandlerIndex() { return next_handler_index_++; }
+ int handler_count() { return next_handler_index_; }
+
+ void AddProperty() { expected_property_count_++; }
+ int expected_property_count() { return expected_property_count_; }
+
+ void set_is_generator(bool is_generator) { is_generator_ = is_generator; }
+ bool is_generator() const { return is_generator_; }
+
+ void set_generator_object_variable(
+ typename Traits::Type::GeneratorVariable* variable) {
+ ASSERT(variable != NULL);
+ ASSERT(!is_generator());
+ generator_object_variable_ = variable;
+ is_generator_ = true;
+ }
+ typename Traits::Type::GeneratorVariable* generator_object_variable()
+ const {
+ return generator_object_variable_;
+ }
+
+ typename Traits::Type::Factory* factory() { return &factory_; }
+
+ private:
+ // Used to assign an index to each literal that needs materialization in
+ // the function. Includes regexp literals, and boilerplate for object and
+ // array literals.
+ int next_materialized_literal_index_;
+
+ // Used to assign a per-function index to try and catch handlers.
+ int next_handler_index_;
+
+ // Properties count estimation.
+ int expected_property_count_;
+
+ // Whether the function is a generator.
+ bool is_generator_;
+ // For generators, this variable may hold the generator object. It variable
+ // is used by yield expressions and return statements. It is not necessary
+ // for generator functions to have this variable set.
+ Variable* generator_object_variable_;
+
+ FunctionState** function_state_stack_;
+ FunctionState* outer_function_state_;
+ typename Traits::Type::Scope** scope_stack_;
+ typename Traits::Type::Scope* outer_scope_;
+ int saved_ast_node_id_; // Only used by ParserTraits.
+ typename Traits::Type::Zone* extra_param_;
+ typename Traits::Type::Factory factory_;
+
+ friend class ParserTraits;
+ };
+
+ class ParsingModeScope BASE_EMBEDDED {
+ public:
+ ParsingModeScope(ParserBase* parser, Mode mode)
+ : parser_(parser),
+ old_mode_(parser->mode()) {
+ parser_->mode_ = mode;
+ }
+ ~ParsingModeScope() {
+ parser_->mode_ = old_mode_;
+ }
+
+ private:
+ ParserBase* parser_;
+ Mode old_mode_;
+ };
+
Scanner* scanner() const { return scanner_; }
int position() { return scanner_->location().beg_pos; }
int peek_position() { return scanner_->peek_location().beg_pos; }
bool stack_overflow() const { return stack_overflow_; }
void set_stack_overflow() { stack_overflow_ = true; }
+ Mode mode() const { return mode_; }
+ typename Traits::Type::Zone* zone() const { return zone_; }
INLINE(Token::Value peek()) {
if (stack_overflow_) return Token::ILLEGAL;
@@ -125,20 +279,134 @@ class ParserBase {
}
}
- bool peek_any_identifier();
- void ExpectSemicolon(bool* ok);
- bool CheckContextualKeyword(Vector<const char> keyword);
- void ExpectContextualKeyword(Vector<const char> keyword, bool* ok);
+ void ExpectSemicolon(bool* ok) {
+ // Check for automatic semicolon insertion according to
+ // the rules given in ECMA-262, section 7.9, page 21.
+ Token::Value tok = peek();
+ if (tok == Token::SEMICOLON) {
+ Next();
+ return;
+ }
+ if (scanner()->HasAnyLineTerminatorBeforeNext() ||
+ tok == Token::RBRACE ||
+ tok == Token::EOS) {
+ return;
+ }
+ Expect(Token::SEMICOLON, ok);
+ }
+
+ bool peek_any_identifier() {
+ Token::Value next = peek();
+ return next == Token::IDENTIFIER ||
+ next == Token::FUTURE_RESERVED_WORD ||
+ next == Token::FUTURE_STRICT_RESERVED_WORD ||
+ next == Token::YIELD;
+ }
+
+ bool CheckContextualKeyword(Vector<const char> keyword) {
+ if (peek() == Token::IDENTIFIER &&
+ scanner()->is_next_contextual_keyword(keyword)) {
+ Consume(Token::IDENTIFIER);
+ return true;
+ }
+ return false;
+ }
- // Strict mode octal literal validation.
- void CheckOctalLiteral(int beg_pos, int end_pos, bool* ok);
+ void ExpectContextualKeyword(Vector<const char> keyword, bool* ok) {
+ Expect(Token::IDENTIFIER, ok);
+ if (!*ok) return;
+ if (!scanner()->is_literal_contextual_keyword(keyword)) {
+ ReportUnexpectedToken(scanner()->current_token());
+ *ok = false;
+ }
+ }
+
+ // Checks whether an octal literal was last seen between beg_pos and end_pos.
+ // If so, reports an error. Only called for strict mode.
+ void CheckOctalLiteral(int beg_pos, int end_pos, bool* ok) {
+ Scanner::Location octal = scanner()->octal_position();
+ if (octal.IsValid() && beg_pos <= octal.beg_pos &&
+ octal.end_pos <= end_pos) {
+ ReportMessageAt(octal, "strict_octal_literal");
+ scanner()->clear_octal_position();
+ *ok = false;
+ }
+ }
// Determine precedence of given token.
- static int Precedence(Token::Value token, bool accept_IN);
+ static int Precedence(Token::Value token, bool accept_IN) {
+ if (token == Token::IN && !accept_IN)
+ return 0; // 0 precedence will terminate binary expression parsing
+ return Token::Precedence(token);
+ }
+
+ typename Traits::Type::Factory* factory() {
+ return function_state_->factory();
+ }
+
+ StrictMode strict_mode() { return scope_->strict_mode(); }
+ bool is_generator() const { return function_state_->is_generator(); }
// Report syntax errors.
- virtual void ReportUnexpectedToken(Token::Value token) = 0;
- virtual void ReportMessageAt(Scanner::Location loc, const char* type) = 0;
+ void ReportMessage(const char* message, const char* arg = NULL,
+ bool is_reference_error = false) {
+ Scanner::Location source_location = scanner()->location();
+ Traits::ReportMessageAt(source_location, message, arg, is_reference_error);
+ }
+
+ void ReportMessageAt(Scanner::Location location, const char* message,
+ bool is_reference_error = false) {
+ Traits::ReportMessageAt(location, message, NULL, is_reference_error);
+ }
+
+ void ReportUnexpectedToken(Token::Value token);
+
+ // Recursive descent functions:
+
+ // Parses an identifier that is valid for the current scope, in particular it
+ // fails on strict mode future reserved keywords in a strict scope. If
+ // allow_eval_or_arguments is kAllowEvalOrArguments, we allow "eval" or
+ // "arguments" as identifier even in strict mode (this is needed in cases like
+ // "var foo = eval;").
+ IdentifierT ParseIdentifier(
+ AllowEvalOrArgumentsAsIdentifier,
+ bool* ok);
+ // Parses an identifier or a strict mode future reserved word, and indicate
+ // whether it is strict mode future reserved.
+ IdentifierT ParseIdentifierOrStrictReservedWord(
+ bool* is_strict_reserved,
+ bool* ok);
+ IdentifierT ParseIdentifierName(bool* ok);
+ // Parses an identifier and determines whether or not it is 'get' or 'set'.
+ IdentifierT ParseIdentifierNameOrGetOrSet(bool* is_get,
+ bool* is_set,
+ bool* ok);
+
+ ExpressionT ParseRegExpLiteral(bool seen_equal, bool* ok);
+
+ ExpressionT ParsePrimaryExpression(bool* ok);
+ ExpressionT ParseExpression(bool accept_IN, bool* ok);
+ ExpressionT ParseArrayLiteral(bool* ok);
+ ExpressionT ParseObjectLiteral(bool* ok);
+ typename Traits::Type::ExpressionList ParseArguments(bool* ok);
+ ExpressionT ParseAssignmentExpression(bool accept_IN, bool* ok);
+ ExpressionT ParseYieldExpression(bool* ok);
+ ExpressionT ParseConditionalExpression(bool accept_IN, bool* ok);
+ ExpressionT ParseBinaryExpression(int prec, bool accept_IN, bool* ok);
+ ExpressionT ParseUnaryExpression(bool* ok);
+ ExpressionT ParsePostfixExpression(bool* ok);
+ ExpressionT ParseLeftHandSideExpression(bool* ok);
+ ExpressionT ParseMemberWithNewPrefixesExpression(bool* ok);
+ ExpressionT ParseMemberExpression(bool* ok);
+ ExpressionT ParseMemberExpressionContinuation(ExpressionT expression,
+ bool* ok);
+
+ // Checks if the expression is a valid reference expression (e.g., on the
+ // left-hand side of assignments). Although ruled out by ECMA as early errors,
+ // we allow calls for web compatibility and rewrite them to a runtime throw.
+ ExpressionT CheckAndRewriteReferenceExpression(
+ ExpressionT expression,
+ Scanner::Location location, const char* message, bool* ok);
// Used to detect duplicates in object literals. Each of the values
// kGetterProperty, kSetterProperty and kValueProperty represents
@@ -164,10 +432,10 @@ class ParserBase {
// Validation per ECMA 262 - 11.1.5 "Object Initialiser".
class ObjectLiteralChecker {
public:
- ObjectLiteralChecker(ParserBase* parser, LanguageMode mode)
+ ObjectLiteralChecker(ParserBase* parser, StrictMode strict_mode)
: parser_(parser),
finder_(scanner()->unicode_cache()),
- language_mode_(mode) { }
+ strict_mode_(strict_mode) { }
void CheckProperty(Token::Value property, PropertyKind type, bool* ok);
@@ -191,9 +459,22 @@ class ParserBase {
ParserBase* parser_;
DuplicateFinder finder_;
- LanguageMode language_mode_;
+ StrictMode strict_mode_;
};
+ // If true, the next (and immediately following) function literal is
+ // preceded by a parenthesis.
+ // Heuristically that means that the function will be called immediately,
+ // so never lazily compile it.
+ bool parenthesized_function_;
+
+ typename Traits::Type::Scope* scope_; // Scope stack.
+ FunctionState* function_state_; // Function state stack.
+ v8::Extension* extension_;
+ FuncNameInferrer* fni_;
+ ParserRecorder* log_;
+ Mode mode_;
+
private:
Scanner* scanner_;
uintptr_t stack_limit_;
@@ -203,6 +484,571 @@ class ParserBase {
bool allow_natives_syntax_;
bool allow_generators_;
bool allow_for_of_;
+
+ typename Traits::Type::Zone* zone_; // Only used by Parser.
+};
+
+
+class PreParserIdentifier {
+ public:
+ PreParserIdentifier() : type_(kUnknownIdentifier) {}
+ static PreParserIdentifier Default() {
+ return PreParserIdentifier(kUnknownIdentifier);
+ }
+ static PreParserIdentifier Eval() {
+ return PreParserIdentifier(kEvalIdentifier);
+ }
+ static PreParserIdentifier Arguments() {
+ return PreParserIdentifier(kArgumentsIdentifier);
+ }
+ static PreParserIdentifier FutureReserved() {
+ return PreParserIdentifier(kFutureReservedIdentifier);
+ }
+ static PreParserIdentifier FutureStrictReserved() {
+ return PreParserIdentifier(kFutureStrictReservedIdentifier);
+ }
+ static PreParserIdentifier Yield() {
+ return PreParserIdentifier(kYieldIdentifier);
+ }
+ bool IsEval() { return type_ == kEvalIdentifier; }
+ bool IsArguments() { return type_ == kArgumentsIdentifier; }
+ bool IsEvalOrArguments() { return type_ >= kEvalIdentifier; }
+ bool IsYield() { return type_ == kYieldIdentifier; }
+ bool IsFutureReserved() { return type_ == kFutureReservedIdentifier; }
+ bool IsFutureStrictReserved() {
+ return type_ == kFutureStrictReservedIdentifier;
+ }
+ bool IsValidStrictVariable() { return type_ == kUnknownIdentifier; }
+
+ private:
+ enum Type {
+ kUnknownIdentifier,
+ kFutureReservedIdentifier,
+ kFutureStrictReservedIdentifier,
+ kYieldIdentifier,
+ kEvalIdentifier,
+ kArgumentsIdentifier
+ };
+ explicit PreParserIdentifier(Type type) : type_(type) {}
+ Type type_;
+
+ friend class PreParserExpression;
+};
+
+
+// Bits 0 and 1 are used to identify the type of expression:
+// If bit 0 is set, it's an identifier.
+// if bit 1 is set, it's a string literal.
+// If neither is set, it's no particular type, and both set isn't
+// use yet.
+class PreParserExpression {
+ public:
+ static PreParserExpression Default() {
+ return PreParserExpression(kUnknownExpression);
+ }
+
+ static PreParserExpression FromIdentifier(PreParserIdentifier id) {
+ return PreParserExpression(kIdentifierFlag |
+ (id.type_ << kIdentifierShift));
+ }
+
+ static PreParserExpression StringLiteral() {
+ return PreParserExpression(kUnknownStringLiteral);
+ }
+
+ static PreParserExpression UseStrictStringLiteral() {
+ return PreParserExpression(kUseStrictString);
+ }
+
+ static PreParserExpression This() {
+ return PreParserExpression(kThisExpression);
+ }
+
+ static PreParserExpression ThisProperty() {
+ return PreParserExpression(kThisPropertyExpression);
+ }
+
+ static PreParserExpression Property() {
+ return PreParserExpression(kPropertyExpression);
+ }
+
+ static PreParserExpression Call() {
+ return PreParserExpression(kCallExpression);
+ }
+
+ bool IsIdentifier() { return (code_ & kIdentifierFlag) != 0; }
+
+ PreParserIdentifier AsIdentifier() {
+ ASSERT(IsIdentifier());
+ return PreParserIdentifier(
+ static_cast<PreParserIdentifier::Type>(code_ >> kIdentifierShift));
+ }
+
+ bool IsStringLiteral() { return (code_ & kStringLiteralFlag) != 0; }
+
+ bool IsUseStrictLiteral() {
+ return (code_ & kStringLiteralMask) == kUseStrictString;
+ }
+
+ bool IsThis() { return code_ == kThisExpression; }
+
+ bool IsThisProperty() { return code_ == kThisPropertyExpression; }
+
+ bool IsProperty() {
+ return code_ == kPropertyExpression || code_ == kThisPropertyExpression;
+ }
+
+ bool IsCall() { return code_ == kCallExpression; }
+
+ bool IsValidReferenceExpression() {
+ return IsIdentifier() || IsProperty();
+ }
+
+ // At the moment PreParser doesn't track these expression types.
+ bool IsFunctionLiteral() const { return false; }
+ bool IsCallNew() const { return false; }
+
+ PreParserExpression AsFunctionLiteral() { return *this; }
+
+ // Dummy implementation for making expression->somefunc() work in both Parser
+ // and PreParser.
+ PreParserExpression* operator->() { return this; }
+
+ // More dummy implementations of things PreParser doesn't need to track:
+ void set_index(int index) {} // For YieldExpressions
+ void set_parenthesized() {}
+
+ private:
+ // Least significant 2 bits are used as flags. Bits 0 and 1 represent
+ // identifiers or strings literals, and are mutually exclusive, but can both
+ // be absent. If the expression is an identifier or a string literal, the
+ // other bits describe the type (see PreParserIdentifier::Type and string
+ // literal constants below).
+ enum {
+ kUnknownExpression = 0,
+ // Identifiers
+ kIdentifierFlag = 1, // Used to detect labels.
+ kIdentifierShift = 3,
+
+ kStringLiteralFlag = 2, // Used to detect directive prologue.
+ kUnknownStringLiteral = kStringLiteralFlag,
+ kUseStrictString = kStringLiteralFlag | 8,
+ kStringLiteralMask = kUseStrictString,
+
+ // Below here applies if neither identifier nor string literal. Reserve the
+ // 2 least significant bits for flags.
+ kThisExpression = 1 << 2,
+ kThisPropertyExpression = 2 << 2,
+ kPropertyExpression = 3 << 2,
+ kCallExpression = 4 << 2
+ };
+
+ explicit PreParserExpression(int expression_code) : code_(expression_code) {}
+
+ int code_;
+};
+
+
+// PreParserExpressionList doesn't actually store the expressions because
+// PreParser doesn't need to.
+class PreParserExpressionList {
+ public:
+ // These functions make list->Add(some_expression) work (and do nothing).
+ PreParserExpressionList() : length_(0) {}
+ PreParserExpressionList* operator->() { return this; }
+ void Add(PreParserExpression, void*) { ++length_; }
+ int length() const { return length_; }
+ private:
+ int length_;
+};
+
+
+class PreParserStatement {
+ public:
+ static PreParserStatement Default() {
+ return PreParserStatement(kUnknownStatement);
+ }
+
+ static PreParserStatement FunctionDeclaration() {
+ return PreParserStatement(kFunctionDeclaration);
+ }
+
+ // Creates expression statement from expression.
+ // Preserves being an unparenthesized string literal, possibly
+ // "use strict".
+ static PreParserStatement ExpressionStatement(
+ PreParserExpression expression) {
+ if (expression.IsUseStrictLiteral()) {
+ return PreParserStatement(kUseStrictExpressionStatement);
+ }
+ if (expression.IsStringLiteral()) {
+ return PreParserStatement(kStringLiteralExpressionStatement);
+ }
+ return Default();
+ }
+
+ bool IsStringLiteral() {
+ return code_ == kStringLiteralExpressionStatement;
+ }
+
+ bool IsUseStrictLiteral() {
+ return code_ == kUseStrictExpressionStatement;
+ }
+
+ bool IsFunctionDeclaration() {
+ return code_ == kFunctionDeclaration;
+ }
+
+ private:
+ enum Type {
+ kUnknownStatement,
+ kStringLiteralExpressionStatement,
+ kUseStrictExpressionStatement,
+ kFunctionDeclaration
+ };
+
+ explicit PreParserStatement(Type code) : code_(code) {}
+ Type code_;
+};
+
+
+
+// PreParserStatementList doesn't actually store the statements because
+// the PreParser does not need them.
+class PreParserStatementList {
+ public:
+ // These functions make list->Add(some_expression) work as no-ops.
+ PreParserStatementList() {}
+ PreParserStatementList* operator->() { return this; }
+ void Add(PreParserStatement, void*) {}
+};
+
+
+class PreParserScope {
+ public:
+ explicit PreParserScope(PreParserScope* outer_scope, ScopeType scope_type)
+ : scope_type_(scope_type) {
+ strict_mode_ = outer_scope ? outer_scope->strict_mode() : SLOPPY;
+ }
+
+ ScopeType type() { return scope_type_; }
+ StrictMode strict_mode() const { return strict_mode_; }
+ void SetStrictMode(StrictMode strict_mode) { strict_mode_ = strict_mode; }
+
+ private:
+ ScopeType scope_type_;
+ StrictMode strict_mode_;
+};
+
+
+class PreParserFactory {
+ public:
+ explicit PreParserFactory(void* extra_param) {}
+ PreParserExpression NewLiteral(PreParserIdentifier identifier,
+ int pos) {
+ return PreParserExpression::Default();
+ }
+ PreParserExpression NewNumberLiteral(double number,
+ int pos) {
+ return PreParserExpression::Default();
+ }
+ PreParserExpression NewRegExpLiteral(PreParserIdentifier js_pattern,
+ PreParserIdentifier js_flags,
+ int literal_index,
+ int pos) {
+ return PreParserExpression::Default();
+ }
+ PreParserExpression NewArrayLiteral(PreParserExpressionList values,
+ int literal_index,
+ int pos) {
+ return PreParserExpression::Default();
+ }
+ PreParserExpression NewObjectLiteralProperty(bool is_getter,
+ PreParserExpression value,
+ int pos) {
+ return PreParserExpression::Default();
+ }
+ PreParserExpression NewObjectLiteralProperty(PreParserExpression key,
+ PreParserExpression value) {
+ return PreParserExpression::Default();
+ }
+ PreParserExpression NewObjectLiteral(PreParserExpressionList properties,
+ int literal_index,
+ int boilerplate_properties,
+ bool has_function,
+ int pos) {
+ return PreParserExpression::Default();
+ }
+ PreParserExpression NewVariableProxy(void* generator_variable) {
+ return PreParserExpression::Default();
+ }
+ PreParserExpression NewProperty(PreParserExpression obj,
+ PreParserExpression key,
+ int pos) {
+ if (obj.IsThis()) {
+ return PreParserExpression::ThisProperty();
+ }
+ return PreParserExpression::Property();
+ }
+ PreParserExpression NewUnaryOperation(Token::Value op,
+ PreParserExpression expression,
+ int pos) {
+ return PreParserExpression::Default();
+ }
+ PreParserExpression NewBinaryOperation(Token::Value op,
+ PreParserExpression left,
+ PreParserExpression right, int pos) {
+ return PreParserExpression::Default();
+ }
+ PreParserExpression NewCompareOperation(Token::Value op,
+ PreParserExpression left,
+ PreParserExpression right, int pos) {
+ return PreParserExpression::Default();
+ }
+ PreParserExpression NewAssignment(Token::Value op,
+ PreParserExpression left,
+ PreParserExpression right,
+ int pos) {
+ return PreParserExpression::Default();
+ }
+ PreParserExpression NewYield(PreParserExpression generator_object,
+ PreParserExpression expression,
+ Yield::Kind yield_kind,
+ int pos) {
+ return PreParserExpression::Default();
+ }
+ PreParserExpression NewConditional(PreParserExpression condition,
+ PreParserExpression then_expression,
+ PreParserExpression else_expression,
+ int pos) {
+ return PreParserExpression::Default();
+ }
+ PreParserExpression NewCountOperation(Token::Value op,
+ bool is_prefix,
+ PreParserExpression expression,
+ int pos) {
+ return PreParserExpression::Default();
+ }
+ PreParserExpression NewCall(PreParserExpression expression,
+ PreParserExpressionList arguments,
+ int pos) {
+ return PreParserExpression::Call();
+ }
+ PreParserExpression NewCallNew(PreParserExpression expression,
+ PreParserExpressionList arguments,
+ int pos) {
+ return PreParserExpression::Default();
+ }
+};
+
+
+class PreParser;
+
+class PreParserTraits {
+ public:
+ struct Type {
+ // TODO(marja): To be removed. The Traits object should contain all the data
+ // it needs.
+ typedef PreParser* Parser;
+
+ // Used by FunctionState and BlockState.
+ typedef PreParserScope Scope;
+ // PreParser doesn't need to store generator variables.
+ typedef void GeneratorVariable;
+ // No interaction with Zones.
+ typedef void Zone;
+
+ // Return types for traversing functions.
+ typedef PreParserIdentifier Identifier;
+ typedef PreParserExpression Expression;
+ typedef PreParserExpression YieldExpression;
+ typedef PreParserExpression FunctionLiteral;
+ typedef PreParserExpression ObjectLiteralProperty;
+ typedef PreParserExpression Literal;
+ typedef PreParserExpressionList ExpressionList;
+ typedef PreParserExpressionList PropertyList;
+ typedef PreParserStatementList StatementList;
+
+ // For constructing objects returned by the traversing functions.
+ typedef PreParserFactory Factory;
+ };
+
+ explicit PreParserTraits(PreParser* pre_parser) : pre_parser_(pre_parser) {}
+
+ // Custom operations executed when FunctionStates are created and
+ // destructed. (The PreParser doesn't need to do anything.)
+ template<typename FunctionState>
+ static void SetUpFunctionState(FunctionState* function_state, void*) {}
+ template<typename FunctionState>
+ static void TearDownFunctionState(FunctionState* function_state, void*) {}
+
+ // Helper functions for recursive descent.
+ static bool IsEvalOrArguments(PreParserIdentifier identifier) {
+ return identifier.IsEvalOrArguments();
+ }
+
+ // Returns true if the expression is of type "this.foo".
+ static bool IsThisProperty(PreParserExpression expression) {
+ return expression.IsThisProperty();
+ }
+
+ static bool IsIdentifier(PreParserExpression expression) {
+ return expression.IsIdentifier();
+ }
+
+ static PreParserIdentifier AsIdentifier(PreParserExpression expression) {
+ return expression.AsIdentifier();
+ }
+
+ static bool IsBoilerplateProperty(PreParserExpression property) {
+ // PreParser doesn't count boilerplate properties.
+ return false;
+ }
+
+ static bool IsArrayIndex(PreParserIdentifier string, uint32_t* index) {
+ return false;
+ }
+
+ // Functions for encapsulating the differences between parsing and preparsing;
+ // operations interleaved with the recursive descent.
+ static void PushLiteralName(FuncNameInferrer* fni, PreParserIdentifier id) {
+ // PreParser should not use FuncNameInferrer.
+ UNREACHABLE();
+ }
+ static void PushPropertyName(FuncNameInferrer* fni,
+ PreParserExpression expression) {
+ // PreParser should not use FuncNameInferrer.
+ UNREACHABLE();
+ }
+
+ static void CheckFunctionLiteralInsideTopLevelObjectLiteral(
+ PreParserScope* scope, PreParserExpression value, bool* has_function) {}
+
+ static void CheckAssigningFunctionLiteralToProperty(
+ PreParserExpression left, PreParserExpression right) {}
+
+ // PreParser doesn't need to keep track of eval calls.
+ static void CheckPossibleEvalCall(PreParserExpression expression,
+ PreParserScope* scope) {}
+
+ static PreParserExpression MarkExpressionAsLValue(
+ PreParserExpression expression) {
+ // TODO(marja): To be able to produce the same errors, the preparser needs
+ // to start tracking which expressions are variables and which are lvalues.
+ return expression;
+ }
+
+ bool ShortcutNumericLiteralBinaryExpression(PreParserExpression* x,
+ PreParserExpression y,
+ Token::Value op,
+ int pos,
+ PreParserFactory* factory) {
+ return false;
+ }
+
+ PreParserExpression BuildUnaryExpression(PreParserExpression expression,
+ Token::Value op, int pos,
+ PreParserFactory* factory) {
+ return PreParserExpression::Default();
+ }
+
+ PreParserExpression NewThrowReferenceError(const char* type, int pos) {
+ return PreParserExpression::Default();
+ }
+ PreParserExpression NewThrowSyntaxError(
+ const char* type, Handle<Object> arg, int pos) {
+ return PreParserExpression::Default();
+ }
+ PreParserExpression NewThrowTypeError(
+ const char* type, Handle<Object> arg, int pos) {
+ return PreParserExpression::Default();
+ }
+
+ // Reporting errors.
+ void ReportMessageAt(Scanner::Location location,
+ const char* message,
+ const char* arg = NULL,
+ bool is_reference_error = false);
+ void ReportMessageAt(int start_pos,
+ int end_pos,
+ const char* message,
+ const char* arg = NULL,
+ bool is_reference_error = false);
+
+ // "null" return type creators.
+ static PreParserIdentifier EmptyIdentifier() {
+ return PreParserIdentifier::Default();
+ }
+ static PreParserExpression EmptyExpression() {
+ return PreParserExpression::Default();
+ }
+ static PreParserExpression EmptyLiteral() {
+ return PreParserExpression::Default();
+ }
+ static PreParserExpressionList NullExpressionList() {
+ return PreParserExpressionList();
+ }
+
+ // Odd-ball literal creators.
+ static PreParserExpression GetLiteralTheHole(int position,
+ PreParserFactory* factory) {
+ return PreParserExpression::Default();
+ }
+
+ // Producing data during the recursive descent.
+ PreParserIdentifier GetSymbol(Scanner* scanner);
+ static PreParserIdentifier NextLiteralString(Scanner* scanner,
+ PretenureFlag tenured) {
+ return PreParserIdentifier::Default();
+ }
+
+ static PreParserExpression ThisExpression(PreParserScope* scope,
+ PreParserFactory* factory) {
+ return PreParserExpression::This();
+ }
+
+ static PreParserExpression ExpressionFromLiteral(
+ Token::Value token, int pos, Scanner* scanner,
+ PreParserFactory* factory) {
+ return PreParserExpression::Default();
+ }
+
+ static PreParserExpression ExpressionFromIdentifier(
+ PreParserIdentifier name, int pos, PreParserScope* scope,
+ PreParserFactory* factory) {
+ return PreParserExpression::FromIdentifier(name);
+ }
+
+ PreParserExpression ExpressionFromString(int pos,
+ Scanner* scanner,
+ PreParserFactory* factory = NULL);
+
+ static PreParserExpressionList NewExpressionList(int size, void* zone) {
+ return PreParserExpressionList();
+ }
+
+ static PreParserStatementList NewStatementList(int size, void* zone) {
+ return PreParserStatementList();
+ }
+
+ static PreParserExpressionList NewPropertyList(int size, void* zone) {
+ return PreParserExpressionList();
+ }
+
+ // Temporary glue; these functions will move to ParserBase.
+ PreParserExpression ParseV8Intrinsic(bool* ok);
+ PreParserExpression ParseFunctionLiteral(
+ PreParserIdentifier name,
+ Scanner::Location function_name_location,
+ bool name_is_strict_reserved,
+ bool is_generator,
+ int function_token_position,
+ FunctionLiteral::FunctionType type,
+ FunctionLiteral::ArityRestriction arity_restriction,
+ bool* ok);
+
+ private:
+ PreParser* pre_parser_;
};
@@ -218,38 +1064,35 @@ class ParserBase {
// rather it is to speed up properly written and correct programs.
// That means that contextual checks (like a label being declared where
// it is used) are generally omitted.
-class PreParser : public ParserBase {
+class PreParser : public ParserBase<PreParserTraits> {
public:
+ typedef PreParserIdentifier Identifier;
+ typedef PreParserExpression Expression;
+ typedef PreParserStatement Statement;
+
enum PreParseResult {
kPreParseStackOverflow,
kPreParseSuccess
};
- PreParser(Scanner* scanner,
- ParserRecorder* log,
- uintptr_t stack_limit)
- : ParserBase(scanner, stack_limit),
- log_(log),
- scope_(NULL),
- strict_mode_violation_location_(Scanner::Location::invalid()),
- strict_mode_violation_type_(NULL),
- parenthesized_function_(false) { }
-
- ~PreParser() {}
+ PreParser(Scanner* scanner, ParserRecorder* log, uintptr_t stack_limit)
+ : ParserBase<PreParserTraits>(scanner, stack_limit, NULL, log, NULL,
+ this) {}
// Pre-parse the program from the character stream; returns true on
// success (even if parsing failed, the pre-parse data successfully
// captured the syntax error), and false if a stack-overflow happened
// during parsing.
PreParseResult PreParseProgram() {
- Scope top_scope(&scope_, kTopLevelScope);
+ PreParserScope scope(scope_, GLOBAL_SCOPE);
+ FunctionState top_scope(&function_state_, &scope_, &scope, NULL);
bool ok = true;
int start_position = scanner()->peek_location().beg_pos;
ParseSourceElements(Token::EOS, &ok);
if (stack_overflow()) return kPreParseStackOverflow;
if (!ok) {
ReportUnexpectedToken(scanner()->current_token());
- } else if (!scope_->is_classic_mode()) {
+ } else if (scope_->strict_mode() == STRICT) {
CheckOctalLiteral(start_position, scanner()->location().end_pos, &ok);
}
return kPreParseSuccess;
@@ -263,21 +1106,18 @@ class PreParser : public ParserBase {
// keyword and parameters, and have consumed the initial '{'.
// At return, unless an error occurred, the scanner is positioned before the
// the final '}'.
- PreParseResult PreParseLazyFunction(LanguageMode mode,
+ PreParseResult PreParseLazyFunction(StrictMode strict_mode,
bool is_generator,
ParserRecorder* log);
private:
+ friend class PreParserTraits;
+
// These types form an algebra over syntactic categories that is just
// rich enough to let us recognize and propagate the constructs that
// are either being counted in the preparser data, or is important
// to throw the correct syntax error exceptions.
- enum ScopeType {
- kTopLevelScope,
- kFunctionScope
- };
-
enum VariableDeclarationContext {
kSourceElement,
kStatement,
@@ -290,402 +1130,1043 @@ class PreParser : public ParserBase {
kHasNoInitializers
};
- class Expression;
-
- class Identifier {
- public:
- static Identifier Default() {
- return Identifier(kUnknownIdentifier);
- }
- static Identifier Eval() {
- return Identifier(kEvalIdentifier);
- }
- static Identifier Arguments() {
- return Identifier(kArgumentsIdentifier);
- }
- static Identifier FutureReserved() {
- return Identifier(kFutureReservedIdentifier);
- }
- static Identifier FutureStrictReserved() {
- return Identifier(kFutureStrictReservedIdentifier);
- }
- static Identifier Yield() {
- return Identifier(kYieldIdentifier);
- }
- bool IsEval() { return type_ == kEvalIdentifier; }
- bool IsArguments() { return type_ == kArgumentsIdentifier; }
- bool IsEvalOrArguments() { return type_ >= kEvalIdentifier; }
- bool IsYield() { return type_ == kYieldIdentifier; }
- bool IsFutureReserved() { return type_ == kFutureReservedIdentifier; }
- bool IsFutureStrictReserved() {
- return type_ == kFutureStrictReservedIdentifier;
- }
- bool IsValidStrictVariable() { return type_ == kUnknownIdentifier; }
- private:
- enum Type {
- kUnknownIdentifier,
- kFutureReservedIdentifier,
- kFutureStrictReservedIdentifier,
- kYieldIdentifier,
- kEvalIdentifier,
- kArgumentsIdentifier
- };
- explicit Identifier(Type type) : type_(type) { }
- Type type_;
-
- friend class Expression;
+ enum SourceElements {
+ kUnknownSourceElements
};
- // Bits 0 and 1 are used to identify the type of expression:
- // If bit 0 is set, it's an identifier.
- // if bit 1 is set, it's a string literal.
- // If neither is set, it's no particular type, and both set isn't
- // use yet.
- // Bit 2 is used to mark the expression as being parenthesized,
- // so "(foo)" isn't recognized as a pure identifier (and possible label).
- class Expression {
- public:
- static Expression Default() {
- return Expression(kUnknownExpression);
- }
+ // All ParseXXX functions take as the last argument an *ok parameter
+ // which is set to false if parsing failed; it is unchanged otherwise.
+ // By making the 'exception handling' explicit, we are forced to check
+ // for failure at the call sites.
+ Statement ParseSourceElement(bool* ok);
+ SourceElements ParseSourceElements(int end_token, bool* ok);
+ Statement ParseStatement(bool* ok);
+ Statement ParseFunctionDeclaration(bool* ok);
+ Statement ParseBlock(bool* ok);
+ Statement ParseVariableStatement(VariableDeclarationContext var_context,
+ bool* ok);
+ Statement ParseVariableDeclarations(VariableDeclarationContext var_context,
+ VariableDeclarationProperties* decl_props,
+ int* num_decl,
+ bool* ok);
+ Statement ParseExpressionOrLabelledStatement(bool* ok);
+ Statement ParseIfStatement(bool* ok);
+ Statement ParseContinueStatement(bool* ok);
+ Statement ParseBreakStatement(bool* ok);
+ Statement ParseReturnStatement(bool* ok);
+ Statement ParseWithStatement(bool* ok);
+ Statement ParseSwitchStatement(bool* ok);
+ Statement ParseDoWhileStatement(bool* ok);
+ Statement ParseWhileStatement(bool* ok);
+ Statement ParseForStatement(bool* ok);
+ Statement ParseThrowStatement(bool* ok);
+ Statement ParseTryStatement(bool* ok);
+ Statement ParseDebuggerStatement(bool* ok);
+ Expression ParseConditionalExpression(bool accept_IN, bool* ok);
+ Expression ParseObjectLiteral(bool* ok);
+ Expression ParseV8Intrinsic(bool* ok);
- static Expression FromIdentifier(Identifier id) {
- return Expression(kIdentifierFlag | (id.type_ << kIdentifierShift));
- }
+ Expression ParseFunctionLiteral(
+ Identifier name,
+ Scanner::Location function_name_location,
+ bool name_is_strict_reserved,
+ bool is_generator,
+ int function_token_pos,
+ FunctionLiteral::FunctionType function_type,
+ FunctionLiteral::ArityRestriction arity_restriction,
+ bool* ok);
+ void ParseLazyFunctionLiteralBody(bool* ok);
- static Expression StringLiteral() {
- return Expression(kUnknownStringLiteral);
- }
+ bool CheckInOrOf(bool accept_OF);
+};
- static Expression UseStrictStringLiteral() {
- return Expression(kUseStrictString);
+template<class Traits>
+ParserBase<Traits>::FunctionState::FunctionState(
+ FunctionState** function_state_stack,
+ typename Traits::Type::Scope** scope_stack,
+ typename Traits::Type::Scope* scope,
+ typename Traits::Type::Zone* extra_param)
+ : next_materialized_literal_index_(JSFunction::kLiteralsPrefixSize),
+ next_handler_index_(0),
+ expected_property_count_(0),
+ is_generator_(false),
+ generator_object_variable_(NULL),
+ function_state_stack_(function_state_stack),
+ outer_function_state_(*function_state_stack),
+ scope_stack_(scope_stack),
+ outer_scope_(*scope_stack),
+ saved_ast_node_id_(0),
+ extra_param_(extra_param),
+ factory_(extra_param) {
+ *scope_stack_ = scope;
+ *function_state_stack = this;
+ Traits::SetUpFunctionState(this, extra_param);
+}
+
+
+template<class Traits>
+ParserBase<Traits>::FunctionState::~FunctionState() {
+ *scope_stack_ = outer_scope_;
+ *function_state_stack_ = outer_function_state_;
+ Traits::TearDownFunctionState(this, extra_param_);
+}
+
+
+template<class Traits>
+void ParserBase<Traits>::ReportUnexpectedToken(Token::Value token) {
+ Scanner::Location source_location = scanner()->location();
+
+ // Four of the tokens are treated specially
+ switch (token) {
+ case Token::EOS:
+ return ReportMessageAt(source_location, "unexpected_eos");
+ case Token::NUMBER:
+ return ReportMessageAt(source_location, "unexpected_token_number");
+ case Token::STRING:
+ return ReportMessageAt(source_location, "unexpected_token_string");
+ case Token::IDENTIFIER:
+ return ReportMessageAt(source_location, "unexpected_token_identifier");
+ case Token::FUTURE_RESERVED_WORD:
+ return ReportMessageAt(source_location, "unexpected_reserved");
+ case Token::YIELD:
+ case Token::FUTURE_STRICT_RESERVED_WORD:
+ return ReportMessageAt(source_location, strict_mode() == SLOPPY
+ ? "unexpected_token_identifier" : "unexpected_strict_reserved");
+ default:
+ const char* name = Token::String(token);
+ ASSERT(name != NULL);
+ Traits::ReportMessageAt(source_location, "unexpected_token", name);
+ }
+}
+
+
+template<class Traits>
+typename ParserBase<Traits>::IdentifierT ParserBase<Traits>::ParseIdentifier(
+ AllowEvalOrArgumentsAsIdentifier allow_eval_or_arguments,
+ bool* ok) {
+ Token::Value next = Next();
+ if (next == Token::IDENTIFIER) {
+ IdentifierT name = this->GetSymbol(scanner());
+ if (allow_eval_or_arguments == kDontAllowEvalOrArguments &&
+ strict_mode() == STRICT && this->IsEvalOrArguments(name)) {
+ ReportMessage("strict_eval_arguments");
+ *ok = false;
}
+ return name;
+ } else if (strict_mode() == SLOPPY &&
+ (next == Token::FUTURE_STRICT_RESERVED_WORD ||
+ (next == Token::YIELD && !is_generator()))) {
+ return this->GetSymbol(scanner());
+ } else {
+ this->ReportUnexpectedToken(next);
+ *ok = false;
+ return Traits::EmptyIdentifier();
+ }
+}
+
+
+template <class Traits>
+typename ParserBase<Traits>::IdentifierT ParserBase<
+ Traits>::ParseIdentifierOrStrictReservedWord(bool* is_strict_reserved,
+ bool* ok) {
+ Token::Value next = Next();
+ if (next == Token::IDENTIFIER) {
+ *is_strict_reserved = false;
+ } else if (next == Token::FUTURE_STRICT_RESERVED_WORD ||
+ (next == Token::YIELD && !this->is_generator())) {
+ *is_strict_reserved = true;
+ } else {
+ ReportUnexpectedToken(next);
+ *ok = false;
+ return Traits::EmptyIdentifier();
+ }
+ return this->GetSymbol(scanner());
+}
+
+
+template <class Traits>
+typename ParserBase<Traits>::IdentifierT
+ParserBase<Traits>::ParseIdentifierName(bool* ok) {
+ Token::Value next = Next();
+ if (next != Token::IDENTIFIER && next != Token::FUTURE_RESERVED_WORD &&
+ next != Token::FUTURE_STRICT_RESERVED_WORD && !Token::IsKeyword(next)) {
+ this->ReportUnexpectedToken(next);
+ *ok = false;
+ return Traits::EmptyIdentifier();
+ }
+ return this->GetSymbol(scanner());
+}
+
+
+template <class Traits>
+typename ParserBase<Traits>::IdentifierT
+ParserBase<Traits>::ParseIdentifierNameOrGetOrSet(bool* is_get,
+ bool* is_set,
+ bool* ok) {
+ IdentifierT result = ParseIdentifierName(ok);
+ if (!*ok) return Traits::EmptyIdentifier();
+ scanner()->IsGetOrSet(is_get, is_set);
+ return result;
+}
+
+
+template <class Traits>
+typename ParserBase<Traits>::ExpressionT ParserBase<Traits>::ParseRegExpLiteral(
+ bool seen_equal, bool* ok) {
+ int pos = peek_position();
+ if (!scanner()->ScanRegExpPattern(seen_equal)) {
+ Next();
+ ReportMessage("unterminated_regexp");
+ *ok = false;
+ return Traits::EmptyExpression();
+ }
- static Expression This() {
- return Expression(kThisExpression);
- }
+ int literal_index = function_state_->NextMaterializedLiteralIndex();
- static Expression ThisProperty() {
- return Expression(kThisPropertyExpression);
+ IdentifierT js_pattern = this->NextLiteralString(scanner(), TENURED);
+ if (!scanner()->ScanRegExpFlags()) {
+ Next();
+ ReportMessage("invalid_regexp_flags");
+ *ok = false;
+ return Traits::EmptyExpression();
+ }
+ IdentifierT js_flags = this->NextLiteralString(scanner(), TENURED);
+ Next();
+ return factory()->NewRegExpLiteral(js_pattern, js_flags, literal_index, pos);
+}
+
+
+#define CHECK_OK ok); \
+ if (!*ok) return this->EmptyExpression(); \
+ ((void)0
+#define DUMMY ) // to make indentation work
+#undef DUMMY
+
+// Used in functions where the return type is not ExpressionT.
+#define CHECK_OK_CUSTOM(x) ok); \
+ if (!*ok) return this->x(); \
+ ((void)0
+#define DUMMY ) // to make indentation work
+#undef DUMMY
+
+template <class Traits>
+typename ParserBase<Traits>::ExpressionT
+ParserBase<Traits>::ParsePrimaryExpression(bool* ok) {
+ // PrimaryExpression ::
+ // 'this'
+ // 'null'
+ // 'true'
+ // 'false'
+ // Identifier
+ // Number
+ // String
+ // ArrayLiteral
+ // ObjectLiteral
+ // RegExpLiteral
+ // '(' Expression ')'
+
+ int pos = peek_position();
+ ExpressionT result = this->EmptyExpression();
+ Token::Value token = peek();
+ switch (token) {
+ case Token::THIS: {
+ Consume(Token::THIS);
+ result = this->ThisExpression(scope_, factory());
+ break;
}
- static Expression StrictFunction() {
- return Expression(kStrictFunctionExpression);
+ case Token::NULL_LITERAL:
+ case Token::TRUE_LITERAL:
+ case Token::FALSE_LITERAL:
+ case Token::NUMBER:
+ Next();
+ result = this->ExpressionFromLiteral(token, pos, scanner(), factory());
+ break;
+
+ case Token::IDENTIFIER:
+ case Token::YIELD:
+ case Token::FUTURE_STRICT_RESERVED_WORD: {
+ // Using eval or arguments in this context is OK even in strict mode.
+ IdentifierT name = ParseIdentifier(kAllowEvalOrArguments, CHECK_OK);
+ result = this->ExpressionFromIdentifier(name, pos, scope_, factory());
+ break;
}
- bool IsIdentifier() {
- return (code_ & kIdentifierFlag) != 0;
+ case Token::STRING: {
+ Consume(Token::STRING);
+ result = this->ExpressionFromString(pos, scanner(), factory());
+ break;
}
- // Only works corretly if it is actually an identifier expression.
- PreParser::Identifier AsIdentifier() {
- return PreParser::Identifier(
- static_cast<PreParser::Identifier::Type>(code_ >> kIdentifierShift));
- }
+ case Token::ASSIGN_DIV:
+ result = this->ParseRegExpLiteral(true, CHECK_OK);
+ break;
+
+ case Token::DIV:
+ result = this->ParseRegExpLiteral(false, CHECK_OK);
+ break;
+
+ case Token::LBRACK:
+ result = this->ParseArrayLiteral(CHECK_OK);
+ break;
+
+ case Token::LBRACE:
+ result = this->ParseObjectLiteral(CHECK_OK);
+ break;
+
+ case Token::LPAREN:
+ Consume(Token::LPAREN);
+ // Heuristically try to detect immediately called functions before
+ // seeing the call parentheses.
+ parenthesized_function_ = (peek() == Token::FUNCTION);
+ result = this->ParseExpression(true, CHECK_OK);
+ Expect(Token::RPAREN, CHECK_OK);
+ break;
+
+ case Token::MOD:
+ if (allow_natives_syntax() || extension_ != NULL) {
+ result = this->ParseV8Intrinsic(CHECK_OK);
+ break;
+ }
+ // If we're not allowing special syntax we fall-through to the
+ // default case.
- bool IsParenthesized() {
- // If bit 0 or 1 is set, we interpret bit 2 as meaning parenthesized.
- return (code_ & 7) > 4;
+ default: {
+ Next();
+ ReportUnexpectedToken(token);
+ *ok = false;
}
+ }
- bool IsRawIdentifier() {
- return !IsParenthesized() && IsIdentifier();
+ return result;
+}
+
+// Precedence = 1
+template <class Traits>
+typename ParserBase<Traits>::ExpressionT ParserBase<Traits>::ParseExpression(
+ bool accept_IN, bool* ok) {
+ // Expression ::
+ // AssignmentExpression
+ // Expression ',' AssignmentExpression
+
+ ExpressionT result = this->ParseAssignmentExpression(accept_IN, CHECK_OK);
+ while (peek() == Token::COMMA) {
+ Expect(Token::COMMA, CHECK_OK);
+ int pos = position();
+ ExpressionT right = this->ParseAssignmentExpression(accept_IN, CHECK_OK);
+ result = factory()->NewBinaryOperation(Token::COMMA, result, right, pos);
+ }
+ return result;
+}
+
+
+template <class Traits>
+typename ParserBase<Traits>::ExpressionT ParserBase<Traits>::ParseArrayLiteral(
+ bool* ok) {
+ // ArrayLiteral ::
+ // '[' Expression? (',' Expression?)* ']'
+
+ int pos = peek_position();
+ typename Traits::Type::ExpressionList values =
+ this->NewExpressionList(4, zone_);
+ Expect(Token::LBRACK, CHECK_OK);
+ while (peek() != Token::RBRACK) {
+ ExpressionT elem = this->EmptyExpression();
+ if (peek() == Token::COMMA) {
+ elem = this->GetLiteralTheHole(peek_position(), factory());
+ } else {
+ elem = this->ParseAssignmentExpression(true, CHECK_OK);
}
-
- bool IsStringLiteral() { return (code_ & kStringLiteralFlag) != 0; }
-
- bool IsRawStringLiteral() {
- return !IsParenthesized() && IsStringLiteral();
+ values->Add(elem, zone_);
+ if (peek() != Token::RBRACK) {
+ Expect(Token::COMMA, CHECK_OK);
}
+ }
+ Expect(Token::RBRACK, CHECK_OK);
- bool IsUseStrictLiteral() {
- return (code_ & kStringLiteralMask) == kUseStrictString;
- }
+ // Update the scope information before the pre-parsing bailout.
+ int literal_index = function_state_->NextMaterializedLiteralIndex();
- bool IsThis() {
- return code_ == kThisExpression;
- }
+ return factory()->NewArrayLiteral(values, literal_index, pos);
+}
- bool IsThisProperty() {
- return code_ == kThisPropertyExpression;
- }
- bool IsStrictFunction() {
- return code_ == kStrictFunctionExpression;
- }
+template <class Traits>
+typename ParserBase<Traits>::ExpressionT ParserBase<Traits>::ParseObjectLiteral(
+ bool* ok) {
+ // ObjectLiteral ::
+ // '{' ((
+ // ((IdentifierName | String | Number) ':' AssignmentExpression) |
+ // (('get' | 'set') (IdentifierName | String | Number) FunctionLiteral)
+ // ) ',')* '}'
+ // (Except that trailing comma is not required and not allowed.)
- Expression Parenthesize() {
- int type = code_ & 3;
- if (type != 0) {
- // Identifiers and string literals can be parenthesized.
- // They no longer work as labels or directive prologues,
- // but are still recognized in other contexts.
- return Expression(code_ | kParenthesizedExpressionFlag);
- }
- // For other types of expressions, it's not important to remember
- // the parentheses.
- return *this;
- }
+ int pos = peek_position();
+ typename Traits::Type::PropertyList properties =
+ this->NewPropertyList(4, zone_);
+ int number_of_boilerplate_properties = 0;
+ bool has_function = false;
- private:
- // First two/three bits are used as flags.
- // Bit 0 and 1 represent identifiers or strings literals, and are
- // mutually exclusive, but can both be absent.
- // If bit 0 or 1 are set, bit 2 marks that the expression has
- // been wrapped in parentheses (a string literal can no longer
- // be a directive prologue, and an identifier can no longer be
- // a label.
- enum {
- kUnknownExpression = 0,
- // Identifiers
- kIdentifierFlag = 1, // Used to detect labels.
- kIdentifierShift = 3,
-
- kStringLiteralFlag = 2, // Used to detect directive prologue.
- kUnknownStringLiteral = kStringLiteralFlag,
- kUseStrictString = kStringLiteralFlag | 8,
- kStringLiteralMask = kUseStrictString,
-
- // Only if identifier or string literal.
- kParenthesizedExpressionFlag = 4,
-
- // Below here applies if neither identifier nor string literal.
- kThisExpression = 4,
- kThisPropertyExpression = 8,
- kStrictFunctionExpression = 12
- };
-
- explicit Expression(int expression_code) : code_(expression_code) { }
-
- int code_;
- };
+ ObjectLiteralChecker checker(this, strict_mode());
- class Statement {
- public:
- static Statement Default() {
- return Statement(kUnknownStatement);
- }
+ Expect(Token::LBRACE, CHECK_OK);
- static Statement FunctionDeclaration() {
- return Statement(kFunctionDeclaration);
- }
+ while (peek() != Token::RBRACE) {
+ if (fni_ != NULL) fni_->Enter();
- // Creates expression statement from expression.
- // Preserves being an unparenthesized string literal, possibly
- // "use strict".
- static Statement ExpressionStatement(Expression expression) {
- if (!expression.IsParenthesized()) {
- if (expression.IsUseStrictLiteral()) {
- return Statement(kUseStrictExpressionStatement);
+ typename Traits::Type::Literal key = this->EmptyLiteral();
+ Token::Value next = peek();
+ int next_pos = peek_position();
+
+ switch (next) {
+ case Token::FUTURE_RESERVED_WORD:
+ case Token::FUTURE_STRICT_RESERVED_WORD:
+ case Token::IDENTIFIER: {
+ bool is_getter = false;
+ bool is_setter = false;
+ IdentifierT id =
+ ParseIdentifierNameOrGetOrSet(&is_getter, &is_setter, CHECK_OK);
+ if (fni_ != NULL) this->PushLiteralName(fni_, id);
+
+ if ((is_getter || is_setter) && peek() != Token::COLON) {
+ // Special handling of getter and setter syntax:
+ // { ... , get foo() { ... }, ... , set foo(v) { ... v ... } , ... }
+ // We have already read the "get" or "set" keyword.
+ Token::Value next = Next();
+ if (next != i::Token::IDENTIFIER &&
+ next != i::Token::FUTURE_RESERVED_WORD &&
+ next != i::Token::FUTURE_STRICT_RESERVED_WORD &&
+ next != i::Token::NUMBER &&
+ next != i::Token::STRING &&
+ !Token::IsKeyword(next)) {
+ ReportUnexpectedToken(next);
+ *ok = false;
+ return this->EmptyLiteral();
+ }
+ // Validate the property.
+ PropertyKind type = is_getter ? kGetterProperty : kSetterProperty;
+ checker.CheckProperty(next, type, CHECK_OK);
+ IdentifierT name = this->GetSymbol(scanner_);
+ typename Traits::Type::FunctionLiteral value =
+ this->ParseFunctionLiteral(
+ name, scanner()->location(),
+ false, // reserved words are allowed here
+ false, // not a generator
+ RelocInfo::kNoPosition, FunctionLiteral::ANONYMOUS_EXPRESSION,
+ is_getter ? FunctionLiteral::GETTER_ARITY
+ : FunctionLiteral::SETTER_ARITY,
+ CHECK_OK);
+ typename Traits::Type::ObjectLiteralProperty property =
+ factory()->NewObjectLiteralProperty(is_getter, value, next_pos);
+ if (this->IsBoilerplateProperty(property)) {
+ number_of_boilerplate_properties++;
+ }
+ properties->Add(property, zone());
+ if (peek() != Token::RBRACE) {
+ // Need {} because of the CHECK_OK macro.
+ Expect(Token::COMMA, CHECK_OK);
+ }
+
+ if (fni_ != NULL) {
+ fni_->Infer();
+ fni_->Leave();
+ }
+ continue; // restart the while
}
- if (expression.IsStringLiteral()) {
- return Statement(kStringLiteralExpressionStatement);
+ // Failed to parse as get/set property, so it's just a normal property
+ // (which might be called "get" or "set" or something else).
+ key = factory()->NewLiteral(id, next_pos);
+ break;
+ }
+ case Token::STRING: {
+ Consume(Token::STRING);
+ IdentifierT string = this->GetSymbol(scanner_);
+ if (fni_ != NULL) this->PushLiteralName(fni_, string);
+ uint32_t index;
+ if (this->IsArrayIndex(string, &index)) {
+ key = factory()->NewNumberLiteral(index, next_pos);
+ break;
}
+ key = factory()->NewLiteral(string, next_pos);
+ break;
+ }
+ case Token::NUMBER: {
+ Consume(Token::NUMBER);
+ key = this->ExpressionFromLiteral(Token::NUMBER, next_pos, scanner_,
+ factory());
+ break;
}
- return Default();
+ default:
+ if (Token::IsKeyword(next)) {
+ Consume(next);
+ IdentifierT string = this->GetSymbol(scanner_);
+ key = factory()->NewLiteral(string, next_pos);
+ } else {
+ Token::Value next = Next();
+ ReportUnexpectedToken(next);
+ *ok = false;
+ return this->EmptyLiteral();
+ }
}
- bool IsStringLiteral() {
- return code_ == kStringLiteralExpressionStatement;
- }
+ // Validate the property
+ checker.CheckProperty(next, kValueProperty, CHECK_OK);
- bool IsUseStrictLiteral() {
- return code_ == kUseStrictExpressionStatement;
- }
+ Expect(Token::COLON, CHECK_OK);
+ ExpressionT value = this->ParseAssignmentExpression(true, CHECK_OK);
- bool IsFunctionDeclaration() {
- return code_ == kFunctionDeclaration;
- }
+ typename Traits::Type::ObjectLiteralProperty property =
+ factory()->NewObjectLiteralProperty(key, value);
- private:
- enum Type {
- kUnknownStatement,
- kStringLiteralExpressionStatement,
- kUseStrictExpressionStatement,
- kFunctionDeclaration
- };
-
- explicit Statement(Type code) : code_(code) {}
- Type code_;
- };
+ // Mark top-level object literals that contain function literals and
+ // pretenure the literal so it can be added as a constant function
+ // property. (Parser only.)
+ this->CheckFunctionLiteralInsideTopLevelObjectLiteral(scope_, value,
+ &has_function);
- enum SourceElements {
- kUnknownSourceElements
- };
+ // Count CONSTANT or COMPUTED properties to maintain the enumeration order.
+ if (this->IsBoilerplateProperty(property)) {
+ number_of_boilerplate_properties++;
+ }
+ properties->Add(property, zone());
- typedef int Arguments;
+ // TODO(1240767): Consider allowing trailing comma.
+ if (peek() != Token::RBRACE) {
+ // Need {} because of the CHECK_OK macro.
+ Expect(Token::COMMA, CHECK_OK);
+ }
- class Scope {
- public:
- Scope(Scope** variable, ScopeType type)
- : variable_(variable),
- prev_(*variable),
- type_(type),
- materialized_literal_count_(0),
- expected_properties_(0),
- with_nesting_count_(0),
- language_mode_(
- (prev_ != NULL) ? prev_->language_mode() : CLASSIC_MODE),
- is_generator_(false) {
- *variable = this;
- }
- ~Scope() { *variable_ = prev_; }
- void NextMaterializedLiteralIndex() { materialized_literal_count_++; }
- void AddProperty() { expected_properties_++; }
- ScopeType type() { return type_; }
- int expected_properties() { return expected_properties_; }
- int materialized_literal_count() { return materialized_literal_count_; }
- bool IsInsideWith() { return with_nesting_count_ != 0; }
- bool is_generator() { return is_generator_; }
- void set_is_generator(bool is_generator) { is_generator_ = is_generator; }
- bool is_classic_mode() {
- return language_mode_ == CLASSIC_MODE;
+ if (fni_ != NULL) {
+ fni_->Infer();
+ fni_->Leave();
}
- LanguageMode language_mode() {
- return language_mode_;
+ }
+ Expect(Token::RBRACE, CHECK_OK);
+
+ // Computation of literal_index must happen before pre parse bailout.
+ int literal_index = function_state_->NextMaterializedLiteralIndex();
+
+ return factory()->NewObjectLiteral(properties,
+ literal_index,
+ number_of_boilerplate_properties,
+ has_function,
+ pos);
+}
+
+
+template <class Traits>
+typename Traits::Type::ExpressionList ParserBase<Traits>::ParseArguments(
+ bool* ok) {
+ // Arguments ::
+ // '(' (AssignmentExpression)*[','] ')'
+
+ typename Traits::Type::ExpressionList result =
+ this->NewExpressionList(4, zone_);
+ Expect(Token::LPAREN, CHECK_OK_CUSTOM(NullExpressionList));
+ bool done = (peek() == Token::RPAREN);
+ while (!done) {
+ ExpressionT argument = this->ParseAssignmentExpression(
+ true, CHECK_OK_CUSTOM(NullExpressionList));
+ result->Add(argument, zone_);
+ if (result->length() > Code::kMaxArguments) {
+ ReportMessage("too_many_arguments");
+ *ok = false;
+ return this->NullExpressionList();
}
- void set_language_mode(LanguageMode language_mode) {
- language_mode_ = language_mode;
+ done = (peek() == Token::RPAREN);
+ if (!done) {
+ // Need {} because of the CHECK_OK_CUSTOM macro.
+ Expect(Token::COMMA, CHECK_OK_CUSTOM(NullExpressionList));
}
+ }
+ Expect(Token::RPAREN, CHECK_OK_CUSTOM(NullExpressionList));
+ return result;
+}
+
+// Precedence = 2
+template <class Traits>
+typename ParserBase<Traits>::ExpressionT
+ParserBase<Traits>::ParseAssignmentExpression(bool accept_IN, bool* ok) {
+ // AssignmentExpression ::
+ // ConditionalExpression
+ // YieldExpression
+ // LeftHandSideExpression AssignmentOperator AssignmentExpression
+
+ Scanner::Location lhs_location = scanner()->peek_location();
+
+ if (peek() == Token::YIELD && is_generator()) {
+ return this->ParseYieldExpression(ok);
+ }
- class InsideWith {
- public:
- explicit InsideWith(Scope* scope) : scope_(scope) {
- scope->with_nesting_count_++;
- }
+ if (fni_ != NULL) fni_->Enter();
+ ExpressionT expression =
+ this->ParseConditionalExpression(accept_IN, CHECK_OK);
- ~InsideWith() { scope_->with_nesting_count_--; }
+ if (!Token::IsAssignmentOp(peek())) {
+ if (fni_ != NULL) fni_->Leave();
+ // Parsed conditional expression only (no assignment).
+ return expression;
+ }
- private:
- Scope* scope_;
- DISALLOW_COPY_AND_ASSIGN(InsideWith);
- };
+ expression = this->CheckAndRewriteReferenceExpression(
+ expression, lhs_location, "invalid_lhs_in_assignment", CHECK_OK);
+ expression = this->MarkExpressionAsLValue(expression);
+
+ Token::Value op = Next(); // Get assignment operator.
+ int pos = position();
+ ExpressionT right = this->ParseAssignmentExpression(accept_IN, CHECK_OK);
+
+ // TODO(1231235): We try to estimate the set of properties set by
+ // constructors. We define a new property whenever there is an
+ // assignment to a property of 'this'. We should probably only add
+ // properties if we haven't seen them before. Otherwise we'll
+ // probably overestimate the number of properties.
+ if (op == Token::ASSIGN && this->IsThisProperty(expression)) {
+ function_state_->AddProperty();
+ }
- private:
- Scope** const variable_;
- Scope* const prev_;
- const ScopeType type_;
- int materialized_literal_count_;
- int expected_properties_;
- int with_nesting_count_;
- LanguageMode language_mode_;
- bool is_generator_;
- };
+ this->CheckAssigningFunctionLiteralToProperty(expression, right);
+
+ if (fni_ != NULL) {
+ // Check if the right hand side is a call to avoid inferring a
+ // name if we're dealing with "a = function(){...}();"-like
+ // expression.
+ if ((op == Token::INIT_VAR
+ || op == Token::INIT_CONST_LEGACY
+ || op == Token::ASSIGN)
+ && (!right->IsCall() && !right->IsCallNew())) {
+ fni_->Infer();
+ } else {
+ fni_->RemoveLastFunction();
+ }
+ fni_->Leave();
+ }
- // Report syntax error
- void ReportUnexpectedToken(Token::Value token);
- void ReportMessageAt(Scanner::Location location, const char* type) {
- ReportMessageAt(location, type, NULL);
+ return factory()->NewAssignment(op, expression, right, pos);
+}
+
+template <class Traits>
+typename ParserBase<Traits>::ExpressionT
+ParserBase<Traits>::ParseYieldExpression(bool* ok) {
+ // YieldExpression ::
+ // 'yield' '*'? AssignmentExpression
+ int pos = peek_position();
+ Expect(Token::YIELD, CHECK_OK);
+ Yield::Kind kind =
+ Check(Token::MUL) ? Yield::DELEGATING : Yield::SUSPEND;
+ ExpressionT generator_object =
+ factory()->NewVariableProxy(function_state_->generator_object_variable());
+ ExpressionT expression =
+ ParseAssignmentExpression(false, CHECK_OK);
+ typename Traits::Type::YieldExpression yield =
+ factory()->NewYield(generator_object, expression, kind, pos);
+ if (kind == Yield::DELEGATING) {
+ yield->set_index(function_state_->NextHandlerIndex());
}
- void ReportMessageAt(Scanner::Location location,
- const char* type,
- const char* name_opt) {
- log_->LogMessage(location.beg_pos, location.end_pos, type, name_opt);
+ return yield;
+}
+
+
+// Precedence = 3
+template <class Traits>
+typename ParserBase<Traits>::ExpressionT
+ParserBase<Traits>::ParseConditionalExpression(bool accept_IN, bool* ok) {
+ // ConditionalExpression ::
+ // LogicalOrExpression
+ // LogicalOrExpression '?' AssignmentExpression ':' AssignmentExpression
+
+ int pos = peek_position();
+ // We start using the binary expression parser for prec >= 4 only!
+ ExpressionT expression = this->ParseBinaryExpression(4, accept_IN, CHECK_OK);
+ if (peek() != Token::CONDITIONAL) return expression;
+ Consume(Token::CONDITIONAL);
+ // In parsing the first assignment expression in conditional
+ // expressions we always accept the 'in' keyword; see ECMA-262,
+ // section 11.12, page 58.
+ ExpressionT left = ParseAssignmentExpression(true, CHECK_OK);
+ Expect(Token::COLON, CHECK_OK);
+ ExpressionT right = ParseAssignmentExpression(accept_IN, CHECK_OK);
+ return factory()->NewConditional(expression, left, right, pos);
+}
+
+
+// Precedence >= 4
+template <class Traits>
+typename ParserBase<Traits>::ExpressionT
+ParserBase<Traits>::ParseBinaryExpression(int prec, bool accept_IN, bool* ok) {
+ ASSERT(prec >= 4);
+ ExpressionT x = this->ParseUnaryExpression(CHECK_OK);
+ for (int prec1 = Precedence(peek(), accept_IN); prec1 >= prec; prec1--) {
+ // prec1 >= 4
+ while (Precedence(peek(), accept_IN) == prec1) {
+ Token::Value op = Next();
+ int pos = position();
+ ExpressionT y = ParseBinaryExpression(prec1 + 1, accept_IN, CHECK_OK);
+
+ if (this->ShortcutNumericLiteralBinaryExpression(&x, y, op, pos,
+ factory())) {
+ continue;
+ }
+
+ // For now we distinguish between comparisons and other binary
+ // operations. (We could combine the two and get rid of this
+ // code and AST node eventually.)
+ if (Token::IsCompareOp(op)) {
+ // We have a comparison.
+ Token::Value cmp = op;
+ switch (op) {
+ case Token::NE: cmp = Token::EQ; break;
+ case Token::NE_STRICT: cmp = Token::EQ_STRICT; break;
+ default: break;
+ }
+ x = factory()->NewCompareOperation(cmp, x, y, pos);
+ if (cmp != op) {
+ // The comparison was negated - add a NOT.
+ x = factory()->NewUnaryOperation(Token::NOT, x, pos);
+ }
+
+ } else {
+ // We have a "normal" binary operation.
+ x = factory()->NewBinaryOperation(op, x, y, pos);
+ }
+ }
}
- void ReportMessageAt(int start_pos,
- int end_pos,
- const char* type,
- const char* name_opt) {
- log_->LogMessage(start_pos, end_pos, type, name_opt);
+ return x;
+}
+
+
+template <class Traits>
+typename ParserBase<Traits>::ExpressionT
+ParserBase<Traits>::ParseUnaryExpression(bool* ok) {
+ // UnaryExpression ::
+ // PostfixExpression
+ // 'delete' UnaryExpression
+ // 'void' UnaryExpression
+ // 'typeof' UnaryExpression
+ // '++' UnaryExpression
+ // '--' UnaryExpression
+ // '+' UnaryExpression
+ // '-' UnaryExpression
+ // '~' UnaryExpression
+ // '!' UnaryExpression
+
+ Token::Value op = peek();
+ if (Token::IsUnaryOp(op)) {
+ op = Next();
+ int pos = position();
+ ExpressionT expression = ParseUnaryExpression(CHECK_OK);
+
+ // "delete identifier" is a syntax error in strict mode.
+ if (op == Token::DELETE && strict_mode() == STRICT &&
+ this->IsIdentifier(expression)) {
+ ReportMessage("strict_delete");
+ *ok = false;
+ return this->EmptyExpression();
+ }
+
+ // Allow Traits do rewrite the expression.
+ return this->BuildUnaryExpression(expression, op, pos, factory());
+ } else if (Token::IsCountOp(op)) {
+ op = Next();
+ Scanner::Location lhs_location = scanner()->peek_location();
+ ExpressionT expression = this->ParseUnaryExpression(CHECK_OK);
+ expression = this->CheckAndRewriteReferenceExpression(
+ expression, lhs_location, "invalid_lhs_in_prefix_op", CHECK_OK);
+ this->MarkExpressionAsLValue(expression);
+
+ return factory()->NewCountOperation(op,
+ true /* prefix */,
+ expression,
+ position());
+
+ } else {
+ return this->ParsePostfixExpression(ok);
}
+}
- // All ParseXXX functions take as the last argument an *ok parameter
- // which is set to false if parsing failed; it is unchanged otherwise.
- // By making the 'exception handling' explicit, we are forced to check
- // for failure at the call sites.
- Statement ParseSourceElement(bool* ok);
- SourceElements ParseSourceElements(int end_token, bool* ok);
- Statement ParseStatement(bool* ok);
- Statement ParseFunctionDeclaration(bool* ok);
- Statement ParseBlock(bool* ok);
- Statement ParseVariableStatement(VariableDeclarationContext var_context,
- bool* ok);
- Statement ParseVariableDeclarations(VariableDeclarationContext var_context,
- VariableDeclarationProperties* decl_props,
- int* num_decl,
- bool* ok);
- Statement ParseExpressionOrLabelledStatement(bool* ok);
- Statement ParseIfStatement(bool* ok);
- Statement ParseContinueStatement(bool* ok);
- Statement ParseBreakStatement(bool* ok);
- Statement ParseReturnStatement(bool* ok);
- Statement ParseWithStatement(bool* ok);
- Statement ParseSwitchStatement(bool* ok);
- Statement ParseDoWhileStatement(bool* ok);
- Statement ParseWhileStatement(bool* ok);
- Statement ParseForStatement(bool* ok);
- Statement ParseThrowStatement(bool* ok);
- Statement ParseTryStatement(bool* ok);
- Statement ParseDebuggerStatement(bool* ok);
- Expression ParseExpression(bool accept_IN, bool* ok);
- Expression ParseAssignmentExpression(bool accept_IN, bool* ok);
- Expression ParseYieldExpression(bool* ok);
- Expression ParseConditionalExpression(bool accept_IN, bool* ok);
- Expression ParseBinaryExpression(int prec, bool accept_IN, bool* ok);
- Expression ParseUnaryExpression(bool* ok);
- Expression ParsePostfixExpression(bool* ok);
- Expression ParseLeftHandSideExpression(bool* ok);
- Expression ParseNewExpression(bool* ok);
- Expression ParseMemberExpression(bool* ok);
- Expression ParseMemberWithNewPrefixesExpression(unsigned new_count, bool* ok);
- Expression ParsePrimaryExpression(bool* ok);
- Expression ParseArrayLiteral(bool* ok);
- Expression ParseObjectLiteral(bool* ok);
- Expression ParseRegExpLiteral(bool seen_equal, bool* ok);
- Expression ParseV8Intrinsic(bool* ok);
+template <class Traits>
+typename ParserBase<Traits>::ExpressionT
+ParserBase<Traits>::ParsePostfixExpression(bool* ok) {
+ // PostfixExpression ::
+ // LeftHandSideExpression ('++' | '--')?
- Arguments ParseArguments(bool* ok);
- Expression ParseFunctionLiteral(bool is_generator, bool* ok);
- void ParseLazyFunctionLiteralBody(bool* ok);
+ Scanner::Location lhs_location = scanner()->peek_location();
+ ExpressionT expression = this->ParseLeftHandSideExpression(CHECK_OK);
+ if (!scanner()->HasAnyLineTerminatorBeforeNext() &&
+ Token::IsCountOp(peek())) {
+ expression = this->CheckAndRewriteReferenceExpression(
+ expression, lhs_location, "invalid_lhs_in_postfix_op", CHECK_OK);
+ expression = this->MarkExpressionAsLValue(expression);
- Identifier ParseIdentifier(bool* ok);
- Identifier ParseIdentifierName(bool* ok);
- Identifier ParseIdentifierNameOrGetOrSet(bool* is_get,
- bool* is_set,
- bool* ok);
+ Token::Value next = Next();
+ expression =
+ factory()->NewCountOperation(next,
+ false /* postfix */,
+ expression,
+ position());
+ }
+ return expression;
+}
+
+
+template <class Traits>
+typename ParserBase<Traits>::ExpressionT
+ParserBase<Traits>::ParseLeftHandSideExpression(bool* ok) {
+ // LeftHandSideExpression ::
+ // (NewExpression | MemberExpression) ...
+
+ ExpressionT result = this->ParseMemberWithNewPrefixesExpression(CHECK_OK);
+
+ while (true) {
+ switch (peek()) {
+ case Token::LBRACK: {
+ Consume(Token::LBRACK);
+ int pos = position();
+ ExpressionT index = ParseExpression(true, CHECK_OK);
+ result = factory()->NewProperty(result, index, pos);
+ Expect(Token::RBRACK, CHECK_OK);
+ break;
+ }
- // Logs the currently parsed literal as a symbol in the preparser data.
- void LogSymbol();
- // Log the currently parsed identifier.
- Identifier GetIdentifierSymbol();
- // Log the currently parsed string literal.
- Expression GetStringSymbol();
+ case Token::LPAREN: {
+ int pos;
+ if (scanner()->current_token() == Token::IDENTIFIER) {
+ // For call of an identifier we want to report position of
+ // the identifier as position of the call in the stack trace.
+ pos = position();
+ } else {
+ // For other kinds of calls we record position of the parenthesis as
+ // position of the call. Note that this is extremely important for
+ // expressions of the form function(){...}() for which call position
+ // should not point to the closing brace otherwise it will intersect
+ // with positions recorded for function literal and confuse debugger.
+ pos = peek_position();
+ // Also the trailing parenthesis are a hint that the function will
+ // be called immediately. If we happen to have parsed a preceding
+ // function literal eagerly, we can also compile it eagerly.
+ if (result->IsFunctionLiteral() && mode() == PARSE_EAGERLY) {
+ result->AsFunctionLiteral()->set_parenthesized();
+ }
+ }
+ typename Traits::Type::ExpressionList args = ParseArguments(CHECK_OK);
+
+ // Keep track of eval() calls since they disable all local variable
+ // optimizations.
+ // The calls that need special treatment are the
+ // direct eval calls. These calls are all of the form eval(...), with
+ // no explicit receiver.
+ // These calls are marked as potentially direct eval calls. Whether
+ // they are actually direct calls to eval is determined at run time.
+ this->CheckPossibleEvalCall(result, scope_);
+ result = factory()->NewCall(result, args, pos);
+ if (fni_ != NULL) fni_->RemoveLastFunction();
+ break;
+ }
- void set_language_mode(LanguageMode language_mode) {
- scope_->set_language_mode(language_mode);
- }
+ case Token::PERIOD: {
+ Consume(Token::PERIOD);
+ int pos = position();
+ IdentifierT name = ParseIdentifierName(CHECK_OK);
+ result = factory()->NewProperty(
+ result, factory()->NewLiteral(name, pos), pos);
+ if (fni_ != NULL) this->PushLiteralName(fni_, name);
+ break;
+ }
- bool is_classic_mode() {
- return scope_->language_mode() == CLASSIC_MODE;
+ default:
+ return result;
+ }
}
-
- bool is_extended_mode() {
- return scope_->language_mode() == EXTENDED_MODE;
+}
+
+
+template <class Traits>
+typename ParserBase<Traits>::ExpressionT
+ParserBase<Traits>::ParseMemberWithNewPrefixesExpression(bool* ok) {
+ // NewExpression ::
+ // ('new')+ MemberExpression
+
+ // The grammar for new expressions is pretty warped. We can have several 'new'
+ // keywords following each other, and then a MemberExpression. When we see '('
+ // after the MemberExpression, it's associated with the rightmost unassociated
+ // 'new' to create a NewExpression with arguments. However, a NewExpression
+ // can also occur without arguments.
+
+ // Examples of new expression:
+ // new foo.bar().baz means (new (foo.bar)()).baz
+ // new foo()() means (new foo())()
+ // new new foo()() means (new (new foo())())
+ // new new foo means new (new foo)
+ // new new foo() means new (new foo())
+ // new new foo().bar().baz means (new (new foo()).bar()).baz
+
+ if (peek() == Token::NEW) {
+ Consume(Token::NEW);
+ int new_pos = position();
+ ExpressionT result = this->ParseMemberWithNewPrefixesExpression(CHECK_OK);
+ if (peek() == Token::LPAREN) {
+ // NewExpression with arguments.
+ typename Traits::Type::ExpressionList args =
+ this->ParseArguments(CHECK_OK);
+ result = factory()->NewCallNew(result, args, new_pos);
+ // The expression can still continue with . or [ after the arguments.
+ result = this->ParseMemberExpressionContinuation(result, CHECK_OK);
+ return result;
+ }
+ // NewExpression without arguments.
+ return factory()->NewCallNew(result, this->NewExpressionList(0, zone_),
+ new_pos);
+ }
+ // No 'new' keyword.
+ return this->ParseMemberExpression(ok);
+}
+
+
+template <class Traits>
+typename ParserBase<Traits>::ExpressionT
+ParserBase<Traits>::ParseMemberExpression(bool* ok) {
+ // MemberExpression ::
+ // (PrimaryExpression | FunctionLiteral)
+ // ('[' Expression ']' | '.' Identifier | Arguments)*
+
+ // The '[' Expression ']' and '.' Identifier parts are parsed by
+ // ParseMemberExpressionContinuation, and the Arguments part is parsed by the
+ // caller.
+
+ // Parse the initial primary or function expression.
+ ExpressionT result = this->EmptyExpression();
+ if (peek() == Token::FUNCTION) {
+ Consume(Token::FUNCTION);
+ int function_token_position = position();
+ bool is_generator = allow_generators() && Check(Token::MUL);
+ IdentifierT name = this->EmptyIdentifier();
+ bool is_strict_reserved_name = false;
+ Scanner::Location function_name_location = Scanner::Location::invalid();
+ FunctionLiteral::FunctionType function_type =
+ FunctionLiteral::ANONYMOUS_EXPRESSION;
+ if (peek_any_identifier()) {
+ name = ParseIdentifierOrStrictReservedWord(&is_strict_reserved_name,
+ CHECK_OK);
+ function_name_location = scanner()->location();
+ function_type = FunctionLiteral::NAMED_EXPRESSION;
+ }
+ result = this->ParseFunctionLiteral(name,
+ function_name_location,
+ is_strict_reserved_name,
+ is_generator,
+ function_token_position,
+ function_type,
+ FunctionLiteral::NORMAL_ARITY,
+ CHECK_OK);
+ } else {
+ result = ParsePrimaryExpression(CHECK_OK);
}
- LanguageMode language_mode() { return scope_->language_mode(); }
+ result = ParseMemberExpressionContinuation(result, CHECK_OK);
+ return result;
+}
+
+
+template <class Traits>
+typename ParserBase<Traits>::ExpressionT
+ParserBase<Traits>::ParseMemberExpressionContinuation(ExpressionT expression,
+ bool* ok) {
+ // Parses this part of MemberExpression:
+ // ('[' Expression ']' | '.' Identifier)*
+ while (true) {
+ switch (peek()) {
+ case Token::LBRACK: {
+ Consume(Token::LBRACK);
+ int pos = position();
+ ExpressionT index = this->ParseExpression(true, CHECK_OK);
+ expression = factory()->NewProperty(expression, index, pos);
+ if (fni_ != NULL) {
+ this->PushPropertyName(fni_, index);
+ }
+ Expect(Token::RBRACK, CHECK_OK);
+ break;
+ }
+ case Token::PERIOD: {
+ Consume(Token::PERIOD);
+ int pos = position();
+ IdentifierT name = ParseIdentifierName(CHECK_OK);
+ expression = factory()->NewProperty(
+ expression, factory()->NewLiteral(name, pos), pos);
+ if (fni_ != NULL) {
+ this->PushLiteralName(fni_, name);
+ }
+ break;
+ }
+ default:
+ return expression;
+ }
+ }
+ ASSERT(false);
+ return this->EmptyExpression();
+}
+
+
+template <typename Traits>
+typename ParserBase<Traits>::ExpressionT
+ParserBase<Traits>::CheckAndRewriteReferenceExpression(
+ ExpressionT expression,
+ Scanner::Location location, const char* message, bool* ok) {
+ if (strict_mode() == STRICT && this->IsIdentifier(expression) &&
+ this->IsEvalOrArguments(this->AsIdentifier(expression))) {
+ this->ReportMessageAt(location, "strict_eval_arguments", false);
+ *ok = false;
+ return this->EmptyExpression();
+ } else if (expression->IsValidReferenceExpression()) {
+ return expression;
+ } else if (expression->IsCall()) {
+ // If it is a call, make it a runtime error for legacy web compatibility.
+ // Rewrite `expr' to `expr[throw ReferenceError]'.
+ int pos = location.beg_pos;
+ ExpressionT error = this->NewThrowReferenceError(message, pos);
+ return factory()->NewProperty(expression, error, pos);
+ } else {
+ this->ReportMessageAt(location, message, true);
+ *ok = false;
+ return this->EmptyExpression();
+ }
+}
- bool CheckInOrOf(bool accept_OF);
- void SetStrictModeViolation(Scanner::Location,
- const char* type,
- bool* ok);
+#undef CHECK_OK
+#undef CHECK_OK_CUSTOM
- void CheckDelayedStrictModeViolation(int beg_pos, int end_pos, bool* ok);
- void StrictModeIdentifierViolation(Scanner::Location,
- const char* eval_args_type,
- Identifier identifier,
- bool* ok);
+template <typename Traits>
+void ParserBase<Traits>::ObjectLiteralChecker::CheckProperty(
+ Token::Value property,
+ PropertyKind type,
+ bool* ok) {
+ int old;
+ if (property == Token::NUMBER) {
+ old = scanner()->FindNumber(&finder_, type);
+ } else {
+ old = scanner()->FindSymbol(&finder_, type);
+ }
+ PropertyKind old_type = static_cast<PropertyKind>(old);
+ if (HasConflict(old_type, type)) {
+ if (IsDataDataConflict(old_type, type)) {
+ // Both are data properties.
+ if (strict_mode_ == SLOPPY) return;
+ parser()->ReportMessage("strict_duplicate_property");
+ } else if (IsDataAccessorConflict(old_type, type)) {
+ // Both a data and an accessor property with the same name.
+ parser()->ReportMessage("accessor_data_property");
+ } else {
+ ASSERT(IsAccessorAccessorConflict(old_type, type));
+ // Both accessors of the same type.
+ parser()->ReportMessage("accessor_get_set");
+ }
+ *ok = false;
+ }
+}
- ParserRecorder* log_;
- Scope* scope_;
- Scanner::Location strict_mode_violation_location_;
- const char* strict_mode_violation_type_;
- bool parenthesized_function_;
-};
} } // v8::internal
diff --git a/chromium/v8/src/prettyprinter.cc b/chromium/v8/src/prettyprinter.cc
index 4b441b9ae41..f46f6f1b4ad 100644
--- a/chromium/v8/src/prettyprinter.cc
+++ b/chromium/v8/src/prettyprinter.cc
@@ -1,48 +1,25 @@
// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
#include <stdarg.h>
-#include "v8.h"
+#include "src/v8.h"
-#include "prettyprinter.h"
-#include "scopes.h"
-#include "platform.h"
+#include "src/prettyprinter.h"
+#include "src/scopes.h"
+#include "src/platform.h"
namespace v8 {
namespace internal {
#ifdef DEBUG
-PrettyPrinter::PrettyPrinter(Isolate* isolate) {
+PrettyPrinter::PrettyPrinter(Zone* zone) {
output_ = NULL;
size_ = 0;
pos_ = 0;
- InitializeAstVisitor(isolate);
+ InitializeAstVisitor(zone);
}
@@ -493,8 +470,8 @@ const char* PrettyPrinter::PrintProgram(FunctionLiteral* program) {
}
-void PrettyPrinter::PrintOut(Isolate* isolate, AstNode* node) {
- PrettyPrinter printer(isolate);
+void PrettyPrinter::PrintOut(Zone* zone, AstNode* node) {
+ PrettyPrinter printer(zone);
PrintF("%s", printer.Print(node));
}
@@ -515,9 +492,9 @@ void PrettyPrinter::Print(const char* format, ...) {
for (;;) {
va_list arguments;
va_start(arguments, format);
- int n = OS::VSNPrintF(Vector<char>(output_, size_) + pos_,
- format,
- arguments);
+ int n = VSNPrintF(Vector<char>(output_, size_) + pos_,
+ format,
+ arguments);
va_end(arguments);
if (n >= 0) {
@@ -529,7 +506,7 @@ void PrettyPrinter::Print(const char* format, ...) {
const int slack = 32;
int new_size = size_ + (size_ >> 1) + slack;
char* new_output = NewArray<char>(new_size);
- OS::MemCopy(new_output, output_, pos_);
+ MemCopy(new_output, output_, pos_);
DeleteArray(output_);
output_ = new_output;
size_ = new_size;
@@ -657,7 +634,7 @@ class IndentedScope BASE_EMBEDDED {
//-----------------------------------------------------------------------------
-AstPrinter::AstPrinter(Isolate* isolate) : PrettyPrinter(isolate), indent_(0) {
+AstPrinter::AstPrinter(Zone* zone) : PrettyPrinter(zone), indent_(0) {
}
@@ -691,9 +668,9 @@ void AstPrinter::PrintLiteralWithModeIndented(const char* info,
PrintLiteralIndented(info, value, true);
} else {
EmbeddedVector<char, 256> buf;
- int pos = OS::SNPrintF(buf, "%s (mode = %s", info,
- Variable::Mode2String(var->mode()));
- OS::SNPrintF(buf + pos, ")");
+ int pos = SNPrintF(buf, "%s (mode = %s", info,
+ Variable::Mode2String(var->mode()));
+ SNPrintF(buf + pos, ")");
PrintLiteralIndented(buf.start(), value, true);
}
}
@@ -1056,21 +1033,21 @@ void AstPrinter::VisitArrayLiteral(ArrayLiteral* node) {
void AstPrinter::VisitVariableProxy(VariableProxy* node) {
Variable* var = node->var();
EmbeddedVector<char, 128> buf;
- int pos = OS::SNPrintF(buf, "VAR PROXY");
+ int pos = SNPrintF(buf, "VAR PROXY");
switch (var->location()) {
case Variable::UNALLOCATED:
break;
case Variable::PARAMETER:
- OS::SNPrintF(buf + pos, " parameter[%d]", var->index());
+ SNPrintF(buf + pos, " parameter[%d]", var->index());
break;
case Variable::LOCAL:
- OS::SNPrintF(buf + pos, " local[%d]", var->index());
+ SNPrintF(buf + pos, " local[%d]", var->index());
break;
case Variable::CONTEXT:
- OS::SNPrintF(buf + pos, " context[%d]", var->index());
+ SNPrintF(buf + pos, " context[%d]", var->index());
break;
case Variable::LOOKUP:
- OS::SNPrintF(buf + pos, " lookup");
+ SNPrintF(buf + pos, " lookup");
break;
}
PrintLiteralWithModeIndented(buf.start(), var, node->name());
@@ -1137,8 +1114,8 @@ void AstPrinter::VisitUnaryOperation(UnaryOperation* node) {
void AstPrinter::VisitCountOperation(CountOperation* node) {
EmbeddedVector<char, 128> buf;
- OS::SNPrintF(buf, "%s %s", (node->is_prefix() ? "PRE" : "POST"),
- Token::Name(node->op()));
+ SNPrintF(buf, "%s %s", (node->is_prefix() ? "PRE" : "POST"),
+ Token::Name(node->op()));
IndentedScope indent(this, buf.start());
Visit(node->expression());
}
diff --git a/chromium/v8/src/prettyprinter.h b/chromium/v8/src/prettyprinter.h
index b7ff2af5fa1..585734ee242 100644
--- a/chromium/v8/src/prettyprinter.h
+++ b/chromium/v8/src/prettyprinter.h
@@ -1,35 +1,12 @@
// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
#ifndef V8_PRETTYPRINTER_H_
#define V8_PRETTYPRINTER_H_
-#include "allocation.h"
-#include "ast.h"
+#include "src/allocation.h"
+#include "src/ast.h"
namespace v8 {
namespace internal {
@@ -38,7 +15,7 @@ namespace internal {
class PrettyPrinter: public AstVisitor {
public:
- explicit PrettyPrinter(Isolate* isolate);
+ explicit PrettyPrinter(Zone* zone);
virtual ~PrettyPrinter();
// The following routines print a node into a string.
@@ -50,7 +27,7 @@ class PrettyPrinter: public AstVisitor {
void Print(const char* format, ...);
// Print a node to stdout.
- static void PrintOut(Isolate* isolate, AstNode* node);
+ static void PrintOut(Zone* zone, AstNode* node);
// Individual nodes
#define DECLARE_VISIT(type) virtual void Visit##type(type* node);
@@ -82,7 +59,7 @@ class PrettyPrinter: public AstVisitor {
// Prints the AST structure
class AstPrinter: public PrettyPrinter {
public:
- explicit AstPrinter(Isolate* isolate);
+ explicit AstPrinter(Zone* zone);
virtual ~AstPrinter();
const char* PrintProgram(FunctionLiteral* program);
diff --git a/chromium/v8/src/profile-generator-inl.h b/chromium/v8/src/profile-generator-inl.h
index e363f67761b..58c124fe62b 100644
--- a/chromium/v8/src/profile-generator-inl.h
+++ b/chromium/v8/src/profile-generator-inl.h
@@ -1,34 +1,11 @@
// Copyright 2010 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
#ifndef V8_PROFILE_GENERATOR_INL_H_
#define V8_PROFILE_GENERATOR_INL_H_
-#include "profile-generator.h"
+#include "src/profile-generator.h"
namespace v8 {
namespace internal {
@@ -47,7 +24,7 @@ CodeEntry::CodeEntry(Logger::LogEventsAndTags tag,
line_number_(line_number),
column_number_(column_number),
shared_id_(0),
- script_id_(v8::Script::kNoScriptId),
+ script_id_(v8::UnboundScript::kNoScriptId),
no_frame_ranges_(NULL),
bailout_reason_(kEmptyBailoutReason) { }
diff --git a/chromium/v8/src/profile-generator.cc b/chromium/v8/src/profile-generator.cc
index acf54da1c7b..5c177923a0f 100644
--- a/chromium/v8/src/profile-generator.cc
+++ b/chromium/v8/src/profile-generator.cc
@@ -1,41 +1,18 @@
// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#include "profile-generator-inl.h"
-
-#include "compiler.h"
-#include "debug.h"
-#include "sampler.h"
-#include "global-handles.h"
-#include "scopeinfo.h"
-#include "unicode.h"
-#include "zone-inl.h"
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#include "src/profile-generator-inl.h"
+
+#include "src/compiler.h"
+#include "src/debug.h"
+#include "src/sampler.h"
+#include "src/global-handles.h"
+#include "src/scopeinfo.h"
+#include "src/unicode.h"
+#include "src/zone-inl.h"
namespace v8 {
namespace internal {
@@ -66,7 +43,7 @@ const char* StringsStorage::GetCopy(const char* src) {
HashMap::Entry* entry = GetEntry(src, len);
if (entry->value == NULL) {
Vector<char> dst = Vector<char>::New(len + 1);
- OS::StrNCpy(dst, src, len);
+ StrNCpy(dst, src, len);
dst[len] = '\0';
entry->key = dst.start();
entry->value = entry->key;
@@ -99,7 +76,7 @@ const char* StringsStorage::AddOrDisposeString(char* str, int len) {
const char* StringsStorage::GetVFormatted(const char* format, va_list args) {
Vector<char> str = Vector<char>::New(1024);
- int len = OS::VSNPrintF(str, format, args);
+ int len = VSNPrintF(str, format, args);
if (len == -1) {
DeleteArray(str.start());
return GetCopy(format);
@@ -231,9 +208,9 @@ ProfileNode* ProfileNode::FindOrAddChild(CodeEntry* entry) {
void ProfileNode::Print(int indent) {
- OS::Print("%5u %*c %s%s %d #%d %s",
+ OS::Print("%5u %*s %s%s %d #%d %s",
self_ticks_,
- indent, ' ',
+ indent, "",
entry_->name_prefix(),
entry_->name(),
entry_->script_id(),
@@ -352,23 +329,24 @@ void ProfileTree::TraverseDepthFirst(Callback* callback) {
}
-CpuProfile::CpuProfile(const char* title, unsigned uid, bool record_samples)
+CpuProfile::CpuProfile(const char* title, bool record_samples)
: title_(title),
- uid_(uid),
record_samples_(record_samples),
- start_time_(Time::NowFromSystemTime()) {
- timer_.Start();
+ start_time_(TimeTicks::HighResolutionNow()) {
}
-void CpuProfile::AddPath(const Vector<CodeEntry*>& path) {
+void CpuProfile::AddPath(TimeTicks timestamp, const Vector<CodeEntry*>& path) {
ProfileNode* top_frame_node = top_down_.AddPathFromEnd(path);
- if (record_samples_) samples_.Add(top_frame_node);
+ if (record_samples_) {
+ timestamps_.Add(timestamp);
+ samples_.Add(top_frame_node);
+ }
}
void CpuProfile::CalculateTotalTicksAndSamplingRate() {
- end_time_ = start_time_ + timer_.Elapsed();
+ end_time_ = TimeTicks::HighResolutionNow();
}
@@ -486,9 +464,8 @@ CpuProfilesCollection::~CpuProfilesCollection() {
}
-bool CpuProfilesCollection::StartProfiling(const char* title, unsigned uid,
+bool CpuProfilesCollection::StartProfiling(const char* title,
bool record_samples) {
- ASSERT(uid > 0);
current_profiles_semaphore_.Wait();
if (current_profiles_.length() >= kMaxSimultaneousProfiles) {
current_profiles_semaphore_.Signal();
@@ -501,7 +478,7 @@ bool CpuProfilesCollection::StartProfiling(const char* title, unsigned uid,
return false;
}
}
- current_profiles_.Add(new CpuProfile(title, uid, record_samples));
+ current_profiles_.Add(new CpuProfile(title, record_samples));
current_profiles_semaphore_.Signal();
return true;
}
@@ -537,9 +514,8 @@ bool CpuProfilesCollection::IsLastProfile(const char* title) {
void CpuProfilesCollection::RemoveProfile(CpuProfile* profile) {
// Called from VM thread for a completed profile.
- unsigned uid = profile->uid();
for (int i = 0; i < finished_profiles_.length(); i++) {
- if (uid == finished_profiles_[i]->uid()) {
+ if (profile == finished_profiles_[i]) {
finished_profiles_.Remove(i);
return;
}
@@ -549,13 +525,13 @@ void CpuProfilesCollection::RemoveProfile(CpuProfile* profile) {
void CpuProfilesCollection::AddPathToCurrentProfiles(
- const Vector<CodeEntry*>& path) {
+ TimeTicks timestamp, const Vector<CodeEntry*>& path) {
// As starting / stopping profiles is rare relatively to this
// method, we don't bother minimizing the duration of lock holding,
// e.g. copying contents of the list to a local vector.
current_profiles_semaphore_.Wait();
for (int i = 0; i < current_profiles_.length(); ++i) {
- current_profiles_[i]->AddPath(path);
+ current_profiles_[i]->AddPath(timestamp, path);
}
current_profiles_semaphore_.Signal();
}
@@ -678,7 +654,7 @@ void ProfileGenerator::RecordTickSample(const TickSample& sample) {
}
}
- profiles_->AddPathToCurrentProfiles(entries);
+ profiles_->AddPathToCurrentProfiles(sample.timestamp, entries);
}
diff --git a/chromium/v8/src/profile-generator.h b/chromium/v8/src/profile-generator.h
index 6e4758bece7..c89b41a57ba 100644
--- a/chromium/v8/src/profile-generator.h
+++ b/chromium/v8/src/profile-generator.h
@@ -1,36 +1,13 @@
// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
#ifndef V8_PROFILE_GENERATOR_H_
#define V8_PROFILE_GENERATOR_H_
-#include "allocation.h"
-#include "hashmap.h"
-#include "../include/v8-profiler.h"
+#include "src/allocation.h"
+#include "src/hashmap.h"
+#include "include/v8-profiler.h"
namespace v8 {
namespace internal {
@@ -196,21 +173,21 @@ class ProfileTree {
class CpuProfile {
public:
- CpuProfile(const char* title, unsigned uid, bool record_samples);
+ CpuProfile(const char* title, bool record_samples);
// Add pc -> ... -> main() call path to the profile.
- void AddPath(const Vector<CodeEntry*>& path);
+ void AddPath(TimeTicks timestamp, const Vector<CodeEntry*>& path);
void CalculateTotalTicksAndSamplingRate();
const char* title() const { return title_; }
- unsigned uid() const { return uid_; }
const ProfileTree* top_down() const { return &top_down_; }
int samples_count() const { return samples_.length(); }
ProfileNode* sample(int index) const { return samples_.at(index); }
+ TimeTicks sample_timestamp(int index) const { return timestamps_.at(index); }
- Time start_time() const { return start_time_; }
- Time end_time() const { return end_time_; }
+ TimeTicks start_time() const { return start_time_; }
+ TimeTicks end_time() const { return end_time_; }
void UpdateTicksScale();
@@ -218,12 +195,11 @@ class CpuProfile {
private:
const char* title_;
- unsigned uid_;
bool record_samples_;
- Time start_time_;
- Time end_time_;
- ElapsedTimer timer_;
+ TimeTicks start_time_;
+ TimeTicks end_time_;
List<ProfileNode*> samples_;
+ List<TimeTicks> timestamps_;
ProfileTree top_down_;
DISALLOW_COPY_AND_ASSIGN(CpuProfile);
@@ -281,7 +257,7 @@ class CpuProfilesCollection {
explicit CpuProfilesCollection(Heap* heap);
~CpuProfilesCollection();
- bool StartProfiling(const char* title, unsigned uid, bool record_samples);
+ bool StartProfiling(const char* title, bool record_samples);
CpuProfile* StopProfiling(const char* title);
List<CpuProfile*>* profiles() { return &finished_profiles_; }
const char* GetName(Name* name) {
@@ -308,7 +284,8 @@ class CpuProfilesCollection {
int column_number = v8::CpuProfileNode::kNoColumnNumberInfo);
// Called from profile generator thread.
- void AddPathToCurrentProfiles(const Vector<CodeEntry*>& path);
+ void AddPathToCurrentProfiles(
+ TimeTicks timestamp, const Vector<CodeEntry*>& path);
// Limits the number of profiles that can be simultaneously collected.
static const int kMaxSimultaneousProfiles = 100;
diff --git a/chromium/v8/src/promise.js b/chromium/v8/src/promise.js
index 30f4f07b4b7..710abadbeb8 100644
--- a/chromium/v8/src/promise.js
+++ b/chromium/v8/src/promise.js
@@ -1,30 +1,6 @@
// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
"use strict";
@@ -33,273 +9,309 @@
// var $Object = global.Object
// var $WeakMap = global.WeakMap
+// For bootstrapper.
-var $Promise = Promise;
-
-
-//-------------------------------------------------------------------
+var IsPromise;
+var PromiseCreate;
+var PromiseResolve;
+var PromiseReject;
+var PromiseChain;
+var PromiseCatch;
+var PromiseThen;
-// Core functionality.
-
-// Event queue format: [(value, [(handler, deferred)*])*]
-// I.e., a list of value/tasks pairs, where the value is a resolution value or
-// rejection reason, and the tasks are a respective list of handler/deferred
-// pairs waiting for notification of this value. Each handler is an onResolve or
-// onReject function provided to the same call of 'chain' that produced the
-// associated deferred.
-var promiseEvents = new InternalArray;
+// mirror-debugger.js currently uses builtins.promiseStatus. It would be nice
+// if we could move these property names into the closure below.
+// TODO(jkummerow/rossberg/yangguo): Find a better solution.
// Status values: 0 = pending, +1 = resolved, -1 = rejected
-var promiseStatus = NEW_PRIVATE("Promise#status");
-var promiseValue = NEW_PRIVATE("Promise#value");
-var promiseOnResolve = NEW_PRIVATE("Promise#onResolve");
-var promiseOnReject = NEW_PRIVATE("Promise#onReject");
-var promiseRaw = NEW_PRIVATE("Promise#raw");
-
-function IsPromise(x) {
- return IS_SPEC_OBJECT(x) && %HasLocalProperty(x, promiseStatus);
-}
-
-function Promise(resolver) {
- if (resolver === promiseRaw) return;
- var promise = PromiseInit(this);
- resolver(function(x) { PromiseResolve(promise, x) },
- function(r) { PromiseReject(promise, r) });
- // TODO(rossberg): current draft makes exception from this call asynchronous,
- // but that's probably a mistake.
-}
-
-function PromiseSet(promise, status, value, onResolve, onReject) {
- SET_PRIVATE(promise, promiseStatus, status);
- SET_PRIVATE(promise, promiseValue, value);
- SET_PRIVATE(promise, promiseOnResolve, onResolve);
- SET_PRIVATE(promise, promiseOnReject, onReject);
- return promise;
-}
-
-function PromiseInit(promise) {
- return PromiseSet(promise, 0, UNDEFINED, new InternalArray, new InternalArray)
-}
-
-function PromiseDone(promise, status, value, promiseQueue) {
- if (GET_PRIVATE(promise, promiseStatus) !== 0) return;
- PromiseEnqueue(value, GET_PRIVATE(promise, promiseQueue));
- PromiseSet(promise, status, value);
-}
-
-function PromiseResolve(promise, x) {
- PromiseDone(promise, +1, x, promiseOnResolve)
-}
-
-function PromiseReject(promise, r) {
- PromiseDone(promise, -1, r, promiseOnReject)
-}
-
-
-// Convenience.
-
-function PromiseDeferred() {
- if (this === $Promise) {
- // Optimized case, avoid extra closure.
- var promise = PromiseInit(new Promise(promiseRaw));
- return {
- promise: promise,
- resolve: function(x) { PromiseResolve(promise, x) },
- reject: function(r) { PromiseReject(promise, r) }
- };
- } else {
- var result = {};
- result.promise = new this(function(resolve, reject) {
- result.resolve = resolve;
- result.reject = reject;
- })
- return result;
+var promiseStatus = GLOBAL_PRIVATE("Promise#status");
+var promiseValue = GLOBAL_PRIVATE("Promise#value");
+var promiseOnResolve = GLOBAL_PRIVATE("Promise#onResolve");
+var promiseOnReject = GLOBAL_PRIVATE("Promise#onReject");
+var promiseRaw = GLOBAL_PRIVATE("Promise#raw");
+
+(function() {
+
+ var $Promise = function Promise(resolver) {
+ if (resolver === promiseRaw) return;
+ if (!%_IsConstructCall()) throw MakeTypeError('not_a_promise', [this]);
+ if (!IS_SPEC_FUNCTION(resolver))
+ throw MakeTypeError('resolver_not_a_function', [resolver]);
+ var promise = PromiseInit(this);
+ try {
+ %DebugPromiseHandlePrologue(function() { return promise });
+ resolver(function(x) { PromiseResolve(promise, x) },
+ function(r) { PromiseReject(promise, r) });
+ } catch (e) {
+ PromiseReject(promise, e);
+ } finally {
+ %DebugPromiseHandleEpilogue();
+ }
}
-}
-
-function PromiseResolved(x) {
- if (this === $Promise) {
- // Optimized case, avoid extra closure.
- return PromiseSet(new Promise(promiseRaw), +1, x);
- } else {
- return new this(function(resolve, reject) { resolve(x) });
+
+ // Core functionality.
+
+ function PromiseSet(promise, status, value, onResolve, onReject) {
+ SET_PRIVATE(promise, promiseStatus, status);
+ SET_PRIVATE(promise, promiseValue, value);
+ SET_PRIVATE(promise, promiseOnResolve, onResolve);
+ SET_PRIVATE(promise, promiseOnReject, onReject);
+ return promise;
}
-}
-
-function PromiseRejected(r) {
- if (this === $Promise) {
- // Optimized case, avoid extra closure.
- return PromiseSet(new Promise(promiseRaw), -1, r);
- } else {
- return new this(function(resolve, reject) { reject(r) });
+
+ function PromiseInit(promise) {
+ return PromiseSet(
+ promise, 0, UNDEFINED, new InternalArray, new InternalArray)
}
-}
-
-
-// Simple chaining.
-
-function PromiseIdResolveHandler(x) { return x }
-function PromiseIdRejectHandler(r) { throw r }
-
-function PromiseChain(onResolve, onReject) { // a.k.a. flatMap
- onResolve = IS_UNDEFINED(onResolve) ? PromiseIdResolveHandler : onResolve;
- onReject = IS_UNDEFINED(onReject) ? PromiseIdRejectHandler : onReject;
- var deferred = %_CallFunction(this.constructor, PromiseDeferred);
- switch (GET_PRIVATE(this, promiseStatus)) {
- case UNDEFINED:
- throw MakeTypeError('not_a_promise', [this]);
- case 0: // Pending
- GET_PRIVATE(this, promiseOnResolve).push(onResolve, deferred);
- GET_PRIVATE(this, promiseOnReject).push(onReject, deferred);
- break;
- case +1: // Resolved
- PromiseEnqueue(GET_PRIVATE(this, promiseValue), [onResolve, deferred]);
- break;
- case -1: // Rejected
- PromiseEnqueue(GET_PRIVATE(this, promiseValue), [onReject, deferred]);
- break;
+
+ function PromiseDone(promise, status, value, promiseQueue) {
+ if (GET_PRIVATE(promise, promiseStatus) === 0) {
+ PromiseEnqueue(value, GET_PRIVATE(promise, promiseQueue));
+ PromiseSet(promise, status, value);
+ }
}
- return deferred.promise;
-}
-
-function PromiseCatch(onReject) {
- return this.chain(UNDEFINED, onReject);
-}
-
-function PromiseEnqueue(value, tasks) {
- promiseEvents.push(value, tasks);
- %SetMicrotaskPending(true);
-}
-
-function PromiseMicrotaskRunner() {
- var events = promiseEvents;
- if (events.length > 0) {
- promiseEvents = new InternalArray;
- for (var i = 0; i < events.length; i += 2) {
- var value = events[i];
- var tasks = events[i + 1];
- for (var j = 0; j < tasks.length; j += 2) {
- var handler = tasks[j];
- var deferred = tasks[j + 1];
+
+ function PromiseCoerce(constructor, x) {
+ if (!IsPromise(x) && IS_SPEC_OBJECT(x)) {
+ var then;
+ try {
+ then = x.then;
+ } catch(r) {
+ return %_CallFunction(constructor, r, PromiseRejected);
+ }
+ if (IS_SPEC_FUNCTION(then)) {
+ var deferred = %_CallFunction(constructor, PromiseDeferred);
try {
- var result = handler(value);
- if (result === deferred.promise)
- throw MakeTypeError('promise_cyclic', [result]);
- else if (IsPromise(result))
- result.chain(deferred.resolve, deferred.reject);
- else
- deferred.resolve(result);
- } catch(e) {
- // TODO(rossberg): perhaps log uncaught exceptions below.
- try { deferred.reject(e) } catch(e) {}
+ %_CallFunction(x, deferred.resolve, deferred.reject, then);
+ } catch(r) {
+ deferred.reject(r);
}
+ return deferred.promise;
}
}
- }
-}
-RunMicrotasks.runners.push(PromiseMicrotaskRunner);
-
-
-// Multi-unwrapped chaining with thenable coercion.
-
-function PromiseThen(onResolve, onReject) {
- onResolve = IS_UNDEFINED(onResolve) ? PromiseIdResolveHandler : onResolve;
- var that = this;
- var constructor = this.constructor;
- return this.chain(
- function(x) {
- x = PromiseCoerce(constructor, x);
- return x === that ? onReject(MakeTypeError('promise_cyclic', [x])) :
- IsPromise(x) ? x.then(onResolve, onReject) : onResolve(x);
- },
- onReject
- );
-}
-
-PromiseCoerce.table = new $WeakMap;
-
-function PromiseCoerce(constructor, x) {
- var then;
- if (IsPromise(x)) {
return x;
- } else if (!IS_NULL_OR_UNDEFINED(x) && %IsCallable(then = x.then)) {
- if (PromiseCoerce.table.has(x)) {
- return PromiseCoerce.table.get(x);
- } else {
- var deferred = constructor.deferred();
- PromiseCoerce.table.set(x, deferred.promise);
+ }
+
+ function PromiseHandle(value, handler, deferred) {
+ try {
+ %DebugPromiseHandlePrologue(
+ function() {
+ var queue = GET_PRIVATE(deferred.promise, promiseOnReject);
+ return (queue && queue.length == 0) ? deferred.promise : UNDEFINED;
+ });
+ var result = handler(value);
+ if (result === deferred.promise)
+ throw MakeTypeError('promise_cyclic', [result]);
+ else if (IsPromise(result))
+ %_CallFunction(result, deferred.resolve, deferred.reject, PromiseChain);
+ else
+ deferred.resolve(result);
+ } catch (exception) {
try {
- %_CallFunction(x, deferred.resolve, deferred.reject, then);
- } catch(e) {
- deferred.reject(e);
+ %DebugPromiseHandlePrologue(function() { return deferred.promise });
+ deferred.reject(exception);
+ } catch (e) { } finally {
+ %DebugPromiseHandleEpilogue();
}
- return deferred.promise;
+ } finally {
+ %DebugPromiseHandleEpilogue();
}
- } else {
- return x;
}
-}
-
-
-// Combinators.
-
-function PromiseCast(x) {
- // TODO(rossberg): cannot do better until we support @@create.
- return IsPromise(x) ? x : this.resolved(x);
-}
-
-function PromiseAll(values) {
- var deferred = this.deferred();
- var resolutions = [];
- var count = values.length;
- if (count === 0) {
- deferred.resolve(resolutions);
- } else {
- for (var i = 0; i < values.length; ++i) {
- this.cast(values[i]).chain(
- function(i, x) {
- resolutions[i] = x;
- if (--count === 0) deferred.resolve(resolutions);
- }.bind(UNDEFINED, i), // TODO(rossberg): use let loop once available
- function(r) {
- if (count > 0) { count = 0; deferred.reject(r) }
- }
- );
+
+ function PromiseEnqueue(value, tasks) {
+ %EnqueueMicrotask(function() {
+ for (var i = 0; i < tasks.length; i += 2) {
+ PromiseHandle(value, tasks[i], tasks[i + 1])
+ }
+ });
+ }
+
+ function PromiseIdResolveHandler(x) { return x }
+ function PromiseIdRejectHandler(r) { throw r }
+
+ function PromiseNopResolver() {}
+
+ // -------------------------------------------------------------------
+ // Define exported functions.
+
+ // For bootstrapper.
+
+ IsPromise = function IsPromise(x) {
+ return IS_SPEC_OBJECT(x) && HAS_PRIVATE(x, promiseStatus);
+ }
+
+ PromiseCreate = function PromiseCreate() {
+ return new $Promise(PromiseNopResolver)
+ }
+
+ PromiseResolve = function PromiseResolve(promise, x) {
+ PromiseDone(promise, +1, x, promiseOnResolve)
+ }
+
+ PromiseReject = function PromiseReject(promise, r) {
+ PromiseDone(promise, -1, r, promiseOnReject)
+ }
+
+ // Convenience.
+
+ function PromiseDeferred() {
+ if (this === $Promise) {
+ // Optimized case, avoid extra closure.
+ var promise = PromiseInit(new $Promise(promiseRaw));
+ return {
+ promise: promise,
+ resolve: function(x) { PromiseResolve(promise, x) },
+ reject: function(r) { PromiseReject(promise, r) }
+ };
+ } else {
+ var result = {};
+ result.promise = new this(function(resolve, reject) {
+ result.resolve = resolve;
+ result.reject = reject;
+ })
+ return result;
}
}
- return deferred.promise;
-}
-
-function PromiseOne(values) { // a.k.a. race
- var deferred = this.deferred();
- var done = false;
- for (var i = 0; i < values.length; ++i) {
- this.cast(values[i]).chain(
- function(x) { if (!done) { done = true; deferred.resolve(x) } },
- function(r) { if (!done) { done = true; deferred.reject(r) } }
+
+ function PromiseResolved(x) {
+ if (this === $Promise) {
+ // Optimized case, avoid extra closure.
+ return PromiseSet(new $Promise(promiseRaw), +1, x);
+ } else {
+ return new this(function(resolve, reject) { resolve(x) });
+ }
+ }
+
+ function PromiseRejected(r) {
+ if (this === $Promise) {
+ // Optimized case, avoid extra closure.
+ return PromiseSet(new $Promise(promiseRaw), -1, r);
+ } else {
+ return new this(function(resolve, reject) { reject(r) });
+ }
+ }
+
+ // Simple chaining.
+
+ PromiseChain = function PromiseChain(onResolve, onReject) { // a.k.a.
+ // flatMap
+ onResolve = IS_UNDEFINED(onResolve) ? PromiseIdResolveHandler : onResolve;
+ onReject = IS_UNDEFINED(onReject) ? PromiseIdRejectHandler : onReject;
+ var deferred = %_CallFunction(this.constructor, PromiseDeferred);
+ switch (GET_PRIVATE(this, promiseStatus)) {
+ case UNDEFINED:
+ throw MakeTypeError('not_a_promise', [this]);
+ case 0: // Pending
+ GET_PRIVATE(this, promiseOnResolve).push(onResolve, deferred);
+ GET_PRIVATE(this, promiseOnReject).push(onReject, deferred);
+ break;
+ case +1: // Resolved
+ PromiseEnqueue(GET_PRIVATE(this, promiseValue), [onResolve, deferred]);
+ break;
+ case -1: // Rejected
+ PromiseEnqueue(GET_PRIVATE(this, promiseValue), [onReject, deferred]);
+ break;
+ }
+ return deferred.promise;
+ }
+
+ PromiseCatch = function PromiseCatch(onReject) {
+ return this.then(UNDEFINED, onReject);
+ }
+
+ // Multi-unwrapped chaining with thenable coercion.
+
+ PromiseThen = function PromiseThen(onResolve, onReject) {
+ onResolve = IS_SPEC_FUNCTION(onResolve) ? onResolve
+ : PromiseIdResolveHandler;
+ onReject = IS_SPEC_FUNCTION(onReject) ? onReject
+ : PromiseIdRejectHandler;
+ var that = this;
+ var constructor = this.constructor;
+ return %_CallFunction(
+ this,
+ function(x) {
+ x = PromiseCoerce(constructor, x);
+ return x === that ? onReject(MakeTypeError('promise_cyclic', [x])) :
+ IsPromise(x) ? x.then(onResolve, onReject) : onResolve(x);
+ },
+ onReject,
+ PromiseChain
);
}
- return deferred.promise;
-}
-//-------------------------------------------------------------------
+ // Combinators.
+
+ function PromiseCast(x) {
+ // TODO(rossberg): cannot do better until we support @@create.
+ return IsPromise(x) ? x : new this(function(resolve) { resolve(x) });
+ }
+
+ function PromiseAll(values) {
+ var deferred = %_CallFunction(this, PromiseDeferred);
+ var resolutions = [];
+ if (!%_IsArray(values)) {
+ deferred.reject(MakeTypeError('invalid_argument'));
+ return deferred.promise;
+ }
+ try {
+ var count = values.length;
+ if (count === 0) {
+ deferred.resolve(resolutions);
+ } else {
+ for (var i = 0; i < values.length; ++i) {
+ this.resolve(values[i]).then(
+ function(i, x) {
+ resolutions[i] = x;
+ if (--count === 0) deferred.resolve(resolutions);
+ }.bind(UNDEFINED, i), // TODO(rossberg): use let loop once
+ // available
+ function(r) { deferred.reject(r) }
+ );
+ }
+ }
+ } catch (e) {
+ deferred.reject(e)
+ }
+ return deferred.promise;
+ }
+
+ function PromiseOne(values) {
+ var deferred = %_CallFunction(this, PromiseDeferred);
+ if (!%_IsArray(values)) {
+ deferred.reject(MakeTypeError('invalid_argument'));
+ return deferred.promise;
+ }
+ try {
+ for (var i = 0; i < values.length; ++i) {
+ this.resolve(values[i]).then(
+ function(x) { deferred.resolve(x) },
+ function(r) { deferred.reject(r) }
+ );
+ }
+ } catch (e) {
+ deferred.reject(e)
+ }
+ return deferred.promise;
+ }
+
+ // -------------------------------------------------------------------
+ // Install exported functions.
-function SetUpPromise() {
- %CheckIsBootstrapping()
- global.Promise = $Promise;
+ %CheckIsBootstrapping();
+ %SetProperty(global, 'Promise', $Promise, DONT_ENUM);
InstallFunctions($Promise, DONT_ENUM, [
- "deferred", PromiseDeferred,
- "resolved", PromiseResolved,
- "rejected", PromiseRejected,
+ "defer", PromiseDeferred,
+ "accept", PromiseResolved,
+ "reject", PromiseRejected,
"all", PromiseAll,
- "one", PromiseOne,
- "cast", PromiseCast
+ "race", PromiseOne,
+ "resolve", PromiseCast
]);
InstallFunctions($Promise.prototype, DONT_ENUM, [
"chain", PromiseChain,
"then", PromiseThen,
"catch", PromiseCatch
]);
-}
-SetUpPromise();
+})();
diff --git a/chromium/v8/src/property-details-inl.h b/chromium/v8/src/property-details-inl.h
new file mode 100644
index 00000000000..eaa596f9daf
--- /dev/null
+++ b/chromium/v8/src/property-details-inl.h
@@ -0,0 +1,39 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_PROPERTY_DETAILS_INL_H_
+#define V8_PROPERTY_DETAILS_INL_H_
+
+#include "src/conversions.h"
+#include "src/objects.h"
+#include "src/property-details.h"
+#include "src/types.h"
+
+namespace v8 {
+namespace internal {
+
+inline bool Representation::CanContainDouble(double value) {
+ if (IsDouble() || is_more_general_than(Representation::Double())) {
+ return true;
+ }
+ if (IsInt32Double(value)) {
+ if (IsInteger32()) return true;
+ if (IsSmi()) return Smi::IsValid(static_cast<int32_t>(value));
+ }
+ return false;
+}
+
+
+Representation Representation::FromType(Type* type) {
+ DisallowHeapAllocation no_allocation;
+ if (type->Is(Type::None())) return Representation::None();
+ if (type->Is(Type::SignedSmall())) return Representation::Smi();
+ if (type->Is(Type::Signed32())) return Representation::Integer32();
+ if (type->Is(Type::Number())) return Representation::Double();
+ return Representation::Tagged();
+}
+
+} } // namespace v8::internal
+
+#endif // V8_PROPERTY_DETAILS_INL_H_
diff --git a/chromium/v8/src/property-details.h b/chromium/v8/src/property-details.h
index 200657f11f0..cfe257efcc4 100644
--- a/chromium/v8/src/property-details.h
+++ b/chromium/v8/src/property-details.h
@@ -1,36 +1,13 @@
// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
#ifndef V8_PROPERTY_DETAILS_H_
#define V8_PROPERTY_DETAILS_H_
-#include "../include/v8.h"
-#include "allocation.h"
-#include "utils.h"
+#include "include/v8.h"
+#include "src/allocation.h"
+#include "src/utils.h"
// Ecma-262 3rd 8.6.1
enum PropertyAttributes {
@@ -42,9 +19,12 @@ enum PropertyAttributes {
SEALED = DONT_DELETE,
FROZEN = SEALED | READ_ONLY,
- SYMBOLIC = 8, // Used to filter symbol names
- DONT_SHOW = DONT_ENUM | SYMBOLIC,
- ABSENT = 16 // Used in runtime to indicate a property is absent.
+ STRING = 8, // Used to filter symbols and string names
+ SYMBOLIC = 16,
+ PRIVATE_SYMBOL = 32,
+
+ DONT_SHOW = DONT_ENUM | SYMBOLIC | PRIVATE_SYMBOL,
+ ABSENT = 64 // Used in runtime to indicate a property is absent.
// ABSENT can never be stored in or returned from a descriptor's attributes
// bitfield. It is only used as a return value meaning the attributes of
// a non-existent property.
@@ -55,7 +35,9 @@ namespace v8 {
namespace internal {
class Smi;
-class Type;
+template<class> class TypeImpl;
+struct ZoneTypeConfig;
+typedef TypeImpl<ZoneTypeConfig> Type;
class TypeInfo;
// Type of properties.
@@ -72,9 +54,8 @@ enum PropertyType {
// Only in lookup results, not in descriptors.
HANDLER = 4,
INTERCEPTOR = 5,
- TRANSITION = 6,
// Only used as a marker in LookupResult.
- NONEXISTENT = 7
+ NONEXISTENT = 6
};
@@ -102,9 +83,7 @@ class Representation {
static Representation Integer8() { return Representation(kInteger8); }
static Representation UInteger8() { return Representation(kUInteger8); }
static Representation Integer16() { return Representation(kInteger16); }
- static Representation UInteger16() {
- return Representation(kUInteger16);
- }
+ static Representation UInteger16() { return Representation(kUInteger16); }
static Representation Smi() { return Representation(kSmi); }
static Representation Integer32() { return Representation(kInteger32); }
static Representation Double() { return Representation(kDouble); }
@@ -113,9 +92,7 @@ class Representation {
static Representation FromKind(Kind kind) { return Representation(kind); }
- // TODO(rossberg): this should die eventually.
- static Representation FromType(TypeInfo info);
- static Representation FromType(Handle<Type> type);
+ static Representation FromType(Type* type);
bool Equals(const Representation& other) const {
return kind_ == other.kind_;
@@ -147,6 +124,8 @@ class Representation {
return other.is_more_general_than(*this) || other.Equals(*this);
}
+ bool CanContainDouble(double value);
+
Representation generalize(Representation other) {
if (other.fits_into(*this)) return *this;
if (other.is_more_general_than(*this)) return other;
@@ -232,11 +211,11 @@ class PropertyDetails BASE_EMBEDDED {
| FieldIndexField::encode(field_index);
}
- int pointer() { return DescriptorPointer::decode(value_); }
+ int pointer() const { return DescriptorPointer::decode(value_); }
PropertyDetails set_pointer(int i) { return PropertyDetails(value_, i); }
- PropertyDetails CopyWithRepresentation(Representation representation) {
+ PropertyDetails CopyWithRepresentation(Representation representation) const {
return PropertyDetails(value_, representation);
}
PropertyDetails CopyAddAttributes(PropertyAttributes new_attributes) {
@@ -247,7 +226,7 @@ class PropertyDetails BASE_EMBEDDED {
// Conversion for storing details as Object*.
explicit inline PropertyDetails(Smi* smi);
- inline Smi* AsSmi();
+ inline Smi* AsSmi() const;
static uint8_t EncodeRepresentation(Representation representation) {
return representation.kind();
@@ -257,26 +236,26 @@ class PropertyDetails BASE_EMBEDDED {
return Representation::FromKind(static_cast<Representation::Kind>(bits));
}
- PropertyType type() { return TypeField::decode(value_); }
+ PropertyType type() const { return TypeField::decode(value_); }
PropertyAttributes attributes() const {
return AttributesField::decode(value_);
}
- int dictionary_index() {
+ int dictionary_index() const {
return DictionaryStorageField::decode(value_);
}
- Representation representation() {
+ Representation representation() const {
ASSERT(type() != NORMAL);
return DecodeRepresentation(RepresentationField::decode(value_));
}
- int field_index() {
+ int field_index() const {
return FieldIndexField::decode(value_);
}
- inline PropertyDetails AsDeleted();
+ inline PropertyDetails AsDeleted() const;
static bool IsValidIndex(int index) {
return DictionaryStorageField::is_valid(index);
diff --git a/chromium/v8/src/property.cc b/chromium/v8/src/property.cc
index 2f72eec48ec..24b39a95921 100644
--- a/chromium/v8/src/property.cc
+++ b/chromium/v8/src/property.cc
@@ -1,36 +1,14 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
-#include "v8.h"
+#include "src/property.h"
+
+#include "src/handles-inl.h"
namespace v8 {
namespace internal {
-
void LookupResult::Iterate(ObjectVisitor* visitor) {
LookupResult* current = this; // Could be NULL.
while (current != NULL) {
@@ -51,6 +29,11 @@ void LookupResult::Print(FILE* out) {
PrintF(out, "LookupResult:\n");
PrintF(out, " -cacheable = %s\n", IsCacheable() ? "true" : "false");
PrintF(out, " -attributes = %x\n", GetAttributes());
+ if (IsTransition()) {
+ PrintF(out, " -transition target:\n");
+ GetTransitionTarget()->Print(out);
+ PrintF(out, "\n");
+ }
switch (type()) {
case NORMAL:
PrintF(out, " -type = normal\n");
@@ -64,8 +47,10 @@ void LookupResult::Print(FILE* out) {
break;
case FIELD:
PrintF(out, " -type = field\n");
- PrintF(out, " -index = %d", GetFieldIndex().field_index());
- PrintF(out, "\n");
+ PrintF(out, " -index = %d\n",
+ GetFieldIndex().property_index());
+ PrintF(out, " -field type:\n");
+ GetFieldType()->TypePrint(out);
break;
case CALLBACKS:
PrintF(out, " -type = call backs\n");
@@ -78,29 +63,6 @@ void LookupResult::Print(FILE* out) {
case INTERCEPTOR:
PrintF(out, " -type = lookup interceptor\n");
break;
- case TRANSITION:
- switch (GetTransitionDetails().type()) {
- case FIELD:
- PrintF(out, " -type = map transition\n");
- PrintF(out, " -map:\n");
- GetTransitionTarget()->Print(out);
- PrintF(out, "\n");
- return;
- case CONSTANT:
- PrintF(out, " -type = constant property transition\n");
- PrintF(out, " -map:\n");
- GetTransitionTarget()->Print(out);
- PrintF(out, "\n");
- return;
- case CALLBACKS:
- PrintF(out, " -type = callbacks transition\n");
- PrintF(out, " -callback object:\n");
- GetCallbackObject()->Print(out);
- return;
- default:
- UNREACHABLE();
- return;
- }
case NONEXISTENT:
UNREACHABLE();
break;
@@ -114,9 +76,6 @@ void Descriptor::Print(FILE* out) {
PrintF(out, " @ ");
GetValue()->ShortPrint(out);
}
-
-
#endif
-
} } // namespace v8::internal
diff --git a/chromium/v8/src/property.h b/chromium/v8/src/property.h
index da772dc86c3..ebb43430a04 100644
--- a/chromium/v8/src/property.h
+++ b/chromium/v8/src/property.h
@@ -1,60 +1,35 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
#ifndef V8_PROPERTY_H_
#define V8_PROPERTY_H_
-#include "allocation.h"
-#include "transitions.h"
+#include "src/isolate.h"
+#include "src/factory.h"
+#include "src/field-index.h"
+#include "src/field-index-inl.h"
+#include "src/types.h"
namespace v8 {
namespace internal {
-
// Abstraction for elements in instance-descriptor arrays.
//
// Each descriptor has a key, property attributes, property type,
// property index (in the actual instance-descriptor array) and
// optionally a piece of data.
-//
-
class Descriptor BASE_EMBEDDED {
public:
- MUST_USE_RESULT MaybeObject* KeyToUniqueName() {
+ void KeyToUniqueName() {
if (!key_->IsUniqueName()) {
- MaybeObject* maybe_result =
- key_->GetIsolate()->heap()->InternalizeString(String::cast(key_));
- if (!maybe_result->To(&key_)) return maybe_result;
+ key_ = key_->GetIsolate()->factory()->InternalizeString(
+ Handle<String>::cast(key_));
}
- return key_;
}
- Name* GetKey() { return key_; }
- Object* GetValue() { return value_; }
+ Handle<Name> GetKey() { return key_; }
+ Handle<Object> GetValue() { return value_; }
PropertyDetails GetDetails() { return details_; }
#ifdef OBJECT_PRINT
@@ -64,26 +39,26 @@ class Descriptor BASE_EMBEDDED {
void SetSortedKeyIndex(int index) { details_ = details_.set_pointer(index); }
private:
- Name* key_;
- Object* value_;
+ Handle<Name> key_;
+ Handle<Object> value_;
PropertyDetails details_;
protected:
Descriptor() : details_(Smi::FromInt(0)) {}
- void Init(Name* key, Object* value, PropertyDetails details) {
+ void Init(Handle<Name> key, Handle<Object> value, PropertyDetails details) {
key_ = key;
value_ = value;
details_ = details;
}
- Descriptor(Name* key, Object* value, PropertyDetails details)
+ Descriptor(Handle<Name> key, Handle<Object> value, PropertyDetails details)
: key_(key),
value_(value),
details_(details) { }
- Descriptor(Name* key,
- Object* value,
+ Descriptor(Handle<Name> key,
+ Handle<Object> value,
PropertyAttributes attributes,
PropertyType type,
Representation representation,
@@ -93,91 +68,49 @@ class Descriptor BASE_EMBEDDED {
details_(attributes, type, representation, field_index) { }
friend class DescriptorArray;
+ friend class Map;
};
-class FieldDescriptor: public Descriptor {
+class FieldDescriptor V8_FINAL : public Descriptor {
public:
- FieldDescriptor(Name* key,
+ FieldDescriptor(Handle<Name> key,
int field_index,
PropertyAttributes attributes,
Representation representation)
- : Descriptor(key, Smi::FromInt(0), attributes,
+ : Descriptor(key, HeapType::Any(key->GetIsolate()), attributes,
FIELD, representation, field_index) {}
+ FieldDescriptor(Handle<Name> key,
+ int field_index,
+ Handle<HeapType> field_type,
+ PropertyAttributes attributes,
+ Representation representation)
+ : Descriptor(key, field_type, attributes, FIELD,
+ representation, field_index) { }
};
-class ConstantDescriptor: public Descriptor {
+class ConstantDescriptor V8_FINAL : public Descriptor {
public:
- ConstantDescriptor(Name* key,
- Object* value,
+ ConstantDescriptor(Handle<Name> key,
+ Handle<Object> value,
PropertyAttributes attributes)
: Descriptor(key, value, attributes, CONSTANT,
value->OptimalRepresentation()) {}
};
-class CallbacksDescriptor: public Descriptor {
+class CallbacksDescriptor V8_FINAL : public Descriptor {
public:
- CallbacksDescriptor(Name* key,
- Object* foreign,
+ CallbacksDescriptor(Handle<Name> key,
+ Handle<Object> foreign,
PropertyAttributes attributes)
: Descriptor(key, foreign, attributes, CALLBACKS,
Representation::Tagged()) {}
};
-// Holds a property index value distinguishing if it is a field index or an
-// index inside the object header.
-class PropertyIndex {
- public:
- static PropertyIndex NewFieldIndex(int index) {
- return PropertyIndex(index, false);
- }
- static PropertyIndex NewHeaderIndex(int index) {
- return PropertyIndex(index, true);
- }
-
- bool is_field_index() { return (index_ & kHeaderIndexBit) == 0; }
- bool is_header_index() { return (index_ & kHeaderIndexBit) != 0; }
-
- int field_index() {
- ASSERT(is_field_index());
- return value();
- }
- int header_index() {
- ASSERT(is_header_index());
- return value();
- }
-
- bool is_inobject(Handle<JSObject> holder) {
- if (is_header_index()) return true;
- return field_index() < holder->map()->inobject_properties();
- }
-
- int translate(Handle<JSObject> holder) {
- if (is_header_index()) return header_index();
- int index = field_index() - holder->map()->inobject_properties();
- if (index >= 0) return index;
- return index + holder->map()->instance_size() / kPointerSize;
- }
-
- private:
- static const int kHeaderIndexBit = 1 << 31;
- static const int kIndexMask = ~kHeaderIndexBit;
-
- int value() { return index_ & kIndexMask; }
-
- PropertyIndex(int index, bool is_header_based)
- : index_(index | (is_header_based ? kHeaderIndexBit : 0)) {
- ASSERT(index <= kIndexMask);
- }
-
- int index_;
-};
-
-
-class LookupResult BASE_EMBEDDED {
+class LookupResult V8_FINAL BASE_EMBEDDED {
public:
explicit LookupResult(Isolate* isolate)
: isolate_(isolate),
@@ -187,12 +120,12 @@ class LookupResult BASE_EMBEDDED {
transition_(NULL),
cacheable_(true),
details_(NONE, NONEXISTENT, Representation::None()) {
- isolate->SetTopLookupResult(this);
+ isolate->set_top_lookup_result(this);
}
~LookupResult() {
ASSERT(isolate()->top_lookup_result() == this);
- isolate()->SetTopLookupResult(next_);
+ isolate()->set_top_lookup_result(next_);
}
Isolate* isolate() const { return isolate_; }
@@ -200,23 +133,39 @@ class LookupResult BASE_EMBEDDED {
void DescriptorResult(JSObject* holder, PropertyDetails details, int number) {
lookup_type_ = DESCRIPTOR_TYPE;
holder_ = holder;
+ transition_ = NULL;
details_ = details;
number_ = number;
- transition_ = NULL;
}
- bool CanHoldValue(Handle<Object> value) {
- if (IsNormal()) return true;
- ASSERT(!IsTransition());
- return value->FitsRepresentation(details_.representation());
+ bool CanHoldValue(Handle<Object> value) const {
+ switch (type()) {
+ case NORMAL:
+ return true;
+ case FIELD:
+ return value->FitsRepresentation(representation()) &&
+ GetFieldType()->NowContains(value);
+ case CONSTANT:
+ ASSERT(GetConstant() != *value ||
+ value->FitsRepresentation(representation()));
+ return GetConstant() == *value;
+ case CALLBACKS:
+ case HANDLER:
+ case INTERCEPTOR:
+ return true;
+ case NONEXISTENT:
+ UNREACHABLE();
+ }
+ UNREACHABLE();
+ return true;
}
void TransitionResult(JSObject* holder, Map* target) {
lookup_type_ = TRANSITION_TYPE;
- details_ = PropertyDetails(NONE, TRANSITION, Representation::None());
+ number_ = target->LastAdded();
+ details_ = target->instance_descriptors()->GetDetails(number_);
holder_ = holder;
transition_ = target;
- number_ = 0xAAAA;
}
void DictionaryResult(JSObject* holder, int entry) {
@@ -246,200 +195,216 @@ class LookupResult BASE_EMBEDDED {
lookup_type_ = NOT_FOUND;
details_ = PropertyDetails(NONE, NONEXISTENT, Representation::None());
holder_ = NULL;
+ transition_ = NULL;
}
- JSObject* holder() {
+ JSObject* holder() const {
ASSERT(IsFound());
return JSObject::cast(holder_);
}
- JSProxy* proxy() {
+ JSProxy* proxy() const {
ASSERT(IsHandler());
return JSProxy::cast(holder_);
}
- PropertyType type() {
+ PropertyType type() const {
ASSERT(IsFound());
return details_.type();
}
- Representation representation() {
+ Representation representation() const {
ASSERT(IsFound());
- ASSERT(!IsTransition());
ASSERT(details_.type() != NONEXISTENT);
return details_.representation();
}
- PropertyAttributes GetAttributes() {
- ASSERT(!IsTransition());
+ PropertyAttributes GetAttributes() const {
ASSERT(IsFound());
ASSERT(details_.type() != NONEXISTENT);
return details_.attributes();
}
- PropertyDetails GetPropertyDetails() {
- ASSERT(!IsTransition());
+ PropertyDetails GetPropertyDetails() const {
return details_;
}
- bool IsFastPropertyType() {
+ bool IsFastPropertyType() const {
ASSERT(IsFound());
return IsTransition() || type() != NORMAL;
}
// Property callbacks does not include transitions to callbacks.
- bool IsPropertyCallbacks() {
+ bool IsPropertyCallbacks() const {
ASSERT(!(details_.type() == CALLBACKS && !IsFound()));
- return details_.type() == CALLBACKS;
+ return !IsTransition() && details_.type() == CALLBACKS;
}
- bool IsReadOnly() {
+ bool IsReadOnly() const {
ASSERT(IsFound());
- ASSERT(!IsTransition());
ASSERT(details_.type() != NONEXISTENT);
return details_.IsReadOnly();
}
- bool IsField() {
+ bool IsField() const {
ASSERT(!(details_.type() == FIELD && !IsFound()));
- return details_.type() == FIELD;
+ return IsDescriptorOrDictionary() && type() == FIELD;
}
- bool IsNormal() {
+ bool IsNormal() const {
ASSERT(!(details_.type() == NORMAL && !IsFound()));
- return details_.type() == NORMAL;
+ return IsDescriptorOrDictionary() && type() == NORMAL;
}
- bool IsConstant() {
+ bool IsConstant() const {
ASSERT(!(details_.type() == CONSTANT && !IsFound()));
- return details_.type() == CONSTANT;
+ return IsDescriptorOrDictionary() && type() == CONSTANT;
}
- bool IsConstantFunction() {
- return IsConstant() && GetValue()->IsJSFunction();
+ bool IsConstantFunction() const {
+ return IsConstant() && GetConstant()->IsJSFunction();
}
- bool IsDontDelete() { return details_.IsDontDelete(); }
- bool IsDontEnum() { return details_.IsDontEnum(); }
- bool IsFound() { return lookup_type_ != NOT_FOUND; }
- bool IsTransition() { return lookup_type_ == TRANSITION_TYPE; }
- bool IsHandler() { return lookup_type_ == HANDLER_TYPE; }
- bool IsInterceptor() { return lookup_type_ == INTERCEPTOR_TYPE; }
+ bool IsDontDelete() const { return details_.IsDontDelete(); }
+ bool IsDontEnum() const { return details_.IsDontEnum(); }
+ bool IsFound() const { return lookup_type_ != NOT_FOUND; }
+ bool IsDescriptorOrDictionary() const {
+ return lookup_type_ == DESCRIPTOR_TYPE || lookup_type_ == DICTIONARY_TYPE;
+ }
+ bool IsTransition() const { return lookup_type_ == TRANSITION_TYPE; }
+ bool IsHandler() const { return lookup_type_ == HANDLER_TYPE; }
+ bool IsInterceptor() const { return lookup_type_ == INTERCEPTOR_TYPE; }
// Is the result is a property excluding transitions and the null descriptor?
- bool IsProperty() {
+ bool IsProperty() const {
return IsFound() && !IsTransition();
}
- bool IsDataProperty() {
- switch (type()) {
- case FIELD:
- case NORMAL:
- case CONSTANT:
- return true;
- case CALLBACKS: {
- Object* callback = GetCallbackObject();
- return callback->IsAccessorInfo() || callback->IsForeign();
- }
- case HANDLER:
- case INTERCEPTOR:
- case TRANSITION:
- case NONEXISTENT:
+ bool IsDataProperty() const {
+ switch (lookup_type_) {
+ case NOT_FOUND:
+ case TRANSITION_TYPE:
+ case HANDLER_TYPE:
+ case INTERCEPTOR_TYPE:
return false;
+
+ case DESCRIPTOR_TYPE:
+ case DICTIONARY_TYPE:
+ switch (type()) {
+ case FIELD:
+ case NORMAL:
+ case CONSTANT:
+ return true;
+ case CALLBACKS: {
+ Object* callback = GetCallbackObject();
+ ASSERT(!callback->IsForeign());
+ return callback->IsAccessorInfo();
+ }
+ case HANDLER:
+ case INTERCEPTOR:
+ case NONEXISTENT:
+ UNREACHABLE();
+ return false;
+ }
}
UNREACHABLE();
return false;
}
- bool IsCacheable() { return cacheable_; }
+ bool IsCacheable() const { return cacheable_; }
void DisallowCaching() { cacheable_ = false; }
- Object* GetLazyValue() {
- switch (type()) {
- case FIELD:
- return holder()->RawFastPropertyAt(GetFieldIndex().field_index());
- case NORMAL: {
- Object* value;
- value = holder()->property_dictionary()->ValueAt(GetDictionaryEntry());
- if (holder()->IsGlobalObject()) {
- value = PropertyCell::cast(value)->value();
- }
- return value;
- }
- case CONSTANT:
- return GetConstant();
- case CALLBACKS:
- case HANDLER:
- case INTERCEPTOR:
- case TRANSITION:
- case NONEXISTENT:
+ Object* GetLazyValue() const {
+ switch (lookup_type_) {
+ case NOT_FOUND:
+ case TRANSITION_TYPE:
+ case HANDLER_TYPE:
+ case INTERCEPTOR_TYPE:
return isolate()->heap()->the_hole_value();
+
+ case DESCRIPTOR_TYPE:
+ case DICTIONARY_TYPE:
+ switch (type()) {
+ case FIELD:
+ return holder()->RawFastPropertyAt(GetFieldIndex());
+ case NORMAL: {
+ Object* value = holder()->property_dictionary()->ValueAt(
+ GetDictionaryEntry());
+ if (holder()->IsGlobalObject()) {
+ value = PropertyCell::cast(value)->value();
+ }
+ return value;
+ }
+ case CONSTANT:
+ return GetConstant();
+ case CALLBACKS:
+ return isolate()->heap()->the_hole_value();
+ case HANDLER:
+ case INTERCEPTOR:
+ case NONEXISTENT:
+ UNREACHABLE();
+ return NULL;
+ }
}
UNREACHABLE();
return NULL;
}
- Map* GetTransitionTarget() {
+ Map* GetTransitionTarget() const {
+ ASSERT(IsTransition());
return transition_;
}
- PropertyDetails GetTransitionDetails() {
- return transition_->GetLastDescriptorDetails();
+ bool IsTransitionToField() const {
+ return IsTransition() && details_.type() == FIELD;
}
- bool IsTransitionToField() {
- return IsTransition() && GetTransitionDetails().type() == FIELD;
+ bool IsTransitionToConstant() const {
+ return IsTransition() && details_.type() == CONSTANT;
}
- bool IsTransitionToConstant() {
- return IsTransition() && GetTransitionDetails().type() == CONSTANT;
- }
-
- int GetTransitionIndex() {
- ASSERT(IsTransition());
- return number_;
- }
-
- int GetDescriptorIndex() {
+ int GetDescriptorIndex() const {
ASSERT(lookup_type_ == DESCRIPTOR_TYPE);
return number_;
}
- PropertyIndex GetFieldIndex() {
- ASSERT(lookup_type_ == DESCRIPTOR_TYPE);
- return PropertyIndex::NewFieldIndex(GetFieldIndexFromMap(holder()->map()));
+ FieldIndex GetFieldIndex() const {
+ ASSERT(lookup_type_ == DESCRIPTOR_TYPE ||
+ lookup_type_ == TRANSITION_TYPE);
+ return FieldIndex::ForLookupResult(this);
}
- int GetLocalFieldIndexFromMap(Map* map) {
+ int GetLocalFieldIndexFromMap(Map* map) const {
return GetFieldIndexFromMap(map) - map->inobject_properties();
}
- int GetDictionaryEntry() {
+ int GetDictionaryEntry() const {
ASSERT(lookup_type_ == DICTIONARY_TYPE);
return number_;
}
- JSFunction* GetConstantFunction() {
+ JSFunction* GetConstantFunction() const {
ASSERT(type() == CONSTANT);
return JSFunction::cast(GetValue());
}
- Object* GetConstantFromMap(Map* map) {
+ Object* GetConstantFromMap(Map* map) const {
ASSERT(type() == CONSTANT);
return GetValueFromMap(map);
}
- JSFunction* GetConstantFunctionFromMap(Map* map) {
+ JSFunction* GetConstantFunctionFromMap(Map* map) const {
return JSFunction::cast(GetConstantFromMap(map));
}
- Object* GetConstant() {
+ Object* GetConstant() const {
ASSERT(type() == CONSTANT);
return GetValue();
}
- Object* GetCallbackObject() {
- ASSERT(type() == CALLBACKS && !IsTransition());
+ Object* GetCallbackObject() const {
+ ASSERT(!IsTransition());
+ ASSERT(type() == CALLBACKS);
return GetValue();
}
@@ -447,9 +412,11 @@ class LookupResult BASE_EMBEDDED {
void Print(FILE* out);
#endif
- Object* GetValue() {
+ Object* GetValue() const {
if (lookup_type_ == DESCRIPTOR_TYPE) {
return GetValueFromMap(holder()->map());
+ } else if (lookup_type_ == TRANSITION_TYPE) {
+ return GetValueFromMap(transition_);
}
// In the dictionary case, the data is held in the value field.
ASSERT(lookup_type_ == DICTIONARY_TYPE);
@@ -457,17 +424,46 @@ class LookupResult BASE_EMBEDDED {
}
Object* GetValueFromMap(Map* map) const {
- ASSERT(lookup_type_ == DESCRIPTOR_TYPE);
+ ASSERT(lookup_type_ == DESCRIPTOR_TYPE ||
+ lookup_type_ == TRANSITION_TYPE);
ASSERT(number_ < map->NumberOfOwnDescriptors());
return map->instance_descriptors()->GetValue(number_);
}
int GetFieldIndexFromMap(Map* map) const {
- ASSERT(lookup_type_ == DESCRIPTOR_TYPE);
+ ASSERT(lookup_type_ == DESCRIPTOR_TYPE ||
+ lookup_type_ == TRANSITION_TYPE);
ASSERT(number_ < map->NumberOfOwnDescriptors());
return map->instance_descriptors()->GetFieldIndex(number_);
}
+ HeapType* GetFieldType() const {
+ ASSERT(type() == FIELD);
+ if (lookup_type_ == DESCRIPTOR_TYPE) {
+ return GetFieldTypeFromMap(holder()->map());
+ }
+ ASSERT(lookup_type_ == TRANSITION_TYPE);
+ return GetFieldTypeFromMap(transition_);
+ }
+
+ HeapType* GetFieldTypeFromMap(Map* map) const {
+ ASSERT(lookup_type_ == DESCRIPTOR_TYPE ||
+ lookup_type_ == TRANSITION_TYPE);
+ ASSERT(number_ < map->NumberOfOwnDescriptors());
+ return map->instance_descriptors()->GetFieldType(number_);
+ }
+
+ Map* GetFieldOwner() const {
+ return GetFieldOwnerFromMap(holder()->map());
+ }
+
+ Map* GetFieldOwnerFromMap(Map* map) const {
+ ASSERT(lookup_type_ == DESCRIPTOR_TYPE ||
+ lookup_type_ == TRANSITION_TYPE);
+ ASSERT(number_ < map->NumberOfOwnDescriptors());
+ return map->FindFieldOwner(number_);
+ }
+
void Iterate(ObjectVisitor* visitor);
private:
@@ -491,7 +487,6 @@ class LookupResult BASE_EMBEDDED {
PropertyDetails details_;
};
-
} } // namespace v8::internal
#endif // V8_PROPERTY_H_
diff --git a/chromium/v8/src/proxy.js b/chromium/v8/src/proxy.js
index 4c03f215389..99f9dab9f3a 100644
--- a/chromium/v8/src/proxy.js
+++ b/chromium/v8/src/proxy.js
@@ -1,29 +1,6 @@
// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
"use strict";
@@ -72,7 +49,8 @@ function ProxyCreateFunction(handler, callTrap, constructTrap) {
function SetUpProxy() {
%CheckIsBootstrapping()
- global.Proxy = $Proxy;
+ var global_receiver = %GlobalReceiver(global);
+ global_receiver.Proxy = $Proxy;
// Set up non-enumerable properties of the Proxy object.
InstallFunctions($Proxy, DONT_ENUM, [
diff --git a/chromium/v8/src/qnx-math.h b/chromium/v8/src/qnx-math.h
new file mode 100644
index 00000000000..8cf65d208cb
--- /dev/null
+++ b/chromium/v8/src/qnx-math.h
@@ -0,0 +1,19 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_QNX_MATH_H_
+#define V8_QNX_MATH_H_
+
+#include <cmath>
+
+#undef fpclassify
+#undef isfinite
+#undef isinf
+#undef isnan
+#undef isnormal
+#undef signbit
+
+using std::lrint;
+
+#endif // V8_QNX_MATH_H_
diff --git a/chromium/v8/src/regexp-macro-assembler-irregexp-inl.h b/chromium/v8/src/regexp-macro-assembler-irregexp-inl.h
index a767ec0089f..a2359babcb6 100644
--- a/chromium/v8/src/regexp-macro-assembler-irregexp-inl.h
+++ b/chromium/v8/src/regexp-macro-assembler-irregexp-inl.h
@@ -1,36 +1,13 @@
// Copyright 2008-2009 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
// A light-weight assembler for the Irregexp byte code.
-#include "v8.h"
-#include "ast.h"
-#include "bytecodes-irregexp.h"
+#include "src/v8.h"
+#include "src/ast.h"
+#include "src/bytecodes-irregexp.h"
#ifndef V8_REGEXP_MACRO_ASSEMBLER_IRREGEXP_INL_H_
#define V8_REGEXP_MACRO_ASSEMBLER_IRREGEXP_INL_H_
diff --git a/chromium/v8/src/regexp-macro-assembler-irregexp.cc b/chromium/v8/src/regexp-macro-assembler-irregexp.cc
index 3b9a2f66033..92d7deeb750 100644
--- a/chromium/v8/src/regexp-macro-assembler-irregexp.cc
+++ b/chromium/v8/src/regexp-macro-assembler-irregexp.cc
@@ -1,36 +1,13 @@
// Copyright 2008-2009 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-#include "ast.h"
-#include "bytecodes-irregexp.h"
-#include "regexp-macro-assembler.h"
-#include "regexp-macro-assembler-irregexp.h"
-#include "regexp-macro-assembler-irregexp-inl.h"
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+#include "src/ast.h"
+#include "src/bytecodes-irregexp.h"
+#include "src/regexp-macro-assembler.h"
+#include "src/regexp-macro-assembler-irregexp.h"
+#include "src/regexp-macro-assembler-irregexp-inl.h"
namespace v8 {
@@ -457,7 +434,7 @@ int RegExpMacroAssemblerIrregexp::length() {
void RegExpMacroAssemblerIrregexp::Copy(Address a) {
- OS::MemCopy(a, buffer_.start(), length());
+ MemCopy(a, buffer_.start(), length());
}
@@ -466,7 +443,7 @@ void RegExpMacroAssemblerIrregexp::Expand() {
Vector<byte> old_buffer = buffer_;
buffer_ = Vector<byte>::New(old_buffer.length() * 2);
own_buffer_ = true;
- OS::MemCopy(buffer_.start(), old_buffer.start(), old_buffer.length());
+ MemCopy(buffer_.start(), old_buffer.start(), old_buffer.length());
if (old_buffer_was_our_own) {
old_buffer.Dispose();
}
diff --git a/chromium/v8/src/regexp-macro-assembler-irregexp.h b/chromium/v8/src/regexp-macro-assembler-irregexp.h
index f8a412d4f86..cdfb46ad15e 100644
--- a/chromium/v8/src/regexp-macro-assembler-irregexp.h
+++ b/chromium/v8/src/regexp-macro-assembler-irregexp.h
@@ -1,33 +1,12 @@
// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
#ifndef V8_REGEXP_MACRO_ASSEMBLER_IRREGEXP_H_
#define V8_REGEXP_MACRO_ASSEMBLER_IRREGEXP_H_
+#include "src/regexp-macro-assembler.h"
+
namespace v8 {
namespace internal {
diff --git a/chromium/v8/src/regexp-macro-assembler-tracer.cc b/chromium/v8/src/regexp-macro-assembler-tracer.cc
index 1ce1fa4b24a..1e745d94a7e 100644
--- a/chromium/v8/src/regexp-macro-assembler-tracer.cc
+++ b/chromium/v8/src/regexp-macro-assembler-tracer.cc
@@ -1,34 +1,11 @@
// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-#include "ast.h"
-#include "regexp-macro-assembler.h"
-#include "regexp-macro-assembler-tracer.h"
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+#include "src/ast.h"
+#include "src/regexp-macro-assembler.h"
+#include "src/regexp-macro-assembler-tracer.h"
namespace v8 {
namespace internal {
@@ -38,8 +15,9 @@ RegExpMacroAssemblerTracer::RegExpMacroAssemblerTracer(
RegExpMacroAssembler(assembler->zone()),
assembler_(assembler) {
unsigned int type = assembler->Implementation();
- ASSERT(type < 5);
- const char* impl_names[] = {"IA32", "ARM", "MIPS", "X64", "Bytecode"};
+ ASSERT(type < 6);
+ const char* impl_names[] = {"IA32", "ARM", "ARM64",
+ "MIPS", "X64", "X87", "Bytecode"};
PrintF("RegExpMacroAssembler%s();\n", impl_names[type]);
}
@@ -214,7 +192,7 @@ class PrintablePrinter {
buffer_[0] = '\0';
}
return &buffer_[0];
- };
+ }
private:
uc16 character_;
@@ -427,7 +405,7 @@ RegExpMacroAssembler::IrregexpImplementation
Handle<HeapObject> RegExpMacroAssemblerTracer::GetCode(Handle<String> source) {
- PrintF(" GetCode(%s);\n", *(source->ToCString()));
+ PrintF(" GetCode(%s);\n", source->ToCString().get());
return assembler_->GetCode(source);
}
diff --git a/chromium/v8/src/regexp-macro-assembler-tracer.h b/chromium/v8/src/regexp-macro-assembler-tracer.h
index 852fb80417b..ac76cfdb067 100644
--- a/chromium/v8/src/regexp-macro-assembler-tracer.h
+++ b/chromium/v8/src/regexp-macro-assembler-tracer.h
@@ -1,29 +1,6 @@
// Copyright 2008 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
#ifndef V8_REGEXP_MACRO_ASSEMBLER_TRACER_H_
#define V8_REGEXP_MACRO_ASSEMBLER_TRACER_H_
diff --git a/chromium/v8/src/regexp-macro-assembler.cc b/chromium/v8/src/regexp-macro-assembler.cc
index 7d027f880fa..88adf978992 100644
--- a/chromium/v8/src/regexp-macro-assembler.cc
+++ b/chromium/v8/src/regexp-macro-assembler.cc
@@ -1,36 +1,13 @@
// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-#include "ast.h"
-#include "assembler.h"
-#include "regexp-stack.h"
-#include "regexp-macro-assembler.h"
-#include "simulator.h"
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+#include "src/ast.h"
+#include "src/assembler.h"
+#include "src/regexp-stack.h"
+#include "src/regexp-macro-assembler.h"
+#include "src/simulator.h"
namespace v8 {
namespace internal {
diff --git a/chromium/v8/src/regexp-macro-assembler.h b/chromium/v8/src/regexp-macro-assembler.h
index 1ff8bd97972..f0cfc465fc6 100644
--- a/chromium/v8/src/regexp-macro-assembler.h
+++ b/chromium/v8/src/regexp-macro-assembler.h
@@ -1,34 +1,11 @@
// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
#ifndef V8_REGEXP_MACRO_ASSEMBLER_H_
#define V8_REGEXP_MACRO_ASSEMBLER_H_
-#include "ast.h"
+#include "src/ast.h"
namespace v8 {
namespace internal {
@@ -53,8 +30,10 @@ class RegExpMacroAssembler {
enum IrregexpImplementation {
kIA32Implementation,
kARMImplementation,
+ kARM64Implementation,
kMIPSImplementation,
kX64Implementation,
+ kX87Implementation,
kBytecodeImplementation
};
diff --git a/chromium/v8/src/regexp-stack.cc b/chromium/v8/src/regexp-stack.cc
index f3af490f1e4..97835cc9021 100644
--- a/chromium/v8/src/regexp-stack.cc
+++ b/chromium/v8/src/regexp-stack.cc
@@ -1,32 +1,9 @@
// Copyright 2009 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-#include "regexp-stack.h"
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+#include "src/regexp-stack.h"
namespace v8 {
namespace internal {
@@ -56,7 +33,7 @@ RegExpStack::~RegExpStack() {
char* RegExpStack::ArchiveStack(char* to) {
size_t size = sizeof(thread_local_);
- OS::MemCopy(reinterpret_cast<void*>(to), &thread_local_, size);
+ MemCopy(reinterpret_cast<void*>(to), &thread_local_, size);
thread_local_ = ThreadLocal();
return to + size;
}
@@ -64,7 +41,7 @@ char* RegExpStack::ArchiveStack(char* to) {
char* RegExpStack::RestoreStack(char* from) {
size_t size = sizeof(thread_local_);
- OS::MemCopy(&thread_local_, reinterpret_cast<void*>(from), size);
+ MemCopy(&thread_local_, reinterpret_cast<void*>(from), size);
return from + size;
}
@@ -92,11 +69,10 @@ Address RegExpStack::EnsureCapacity(size_t size) {
Address new_memory = NewArray<byte>(static_cast<int>(size));
if (thread_local_.memory_size_ > 0) {
// Copy original memory into top of new memory.
- OS::MemCopy(
- reinterpret_cast<void*>(
- new_memory + size - thread_local_.memory_size_),
- reinterpret_cast<void*>(thread_local_.memory_),
- thread_local_.memory_size_);
+ MemCopy(reinterpret_cast<void*>(new_memory + size -
+ thread_local_.memory_size_),
+ reinterpret_cast<void*>(thread_local_.memory_),
+ thread_local_.memory_size_);
DeleteArray(thread_local_.memory_);
}
thread_local_.memory_ = new_memory;
diff --git a/chromium/v8/src/regexp-stack.h b/chromium/v8/src/regexp-stack.h
index 5684239f9fa..745782d73b7 100644
--- a/chromium/v8/src/regexp-stack.h
+++ b/chromium/v8/src/regexp-stack.h
@@ -1,29 +1,6 @@
// Copyright 2009 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
#ifndef V8_REGEXP_STACK_H_
#define V8_REGEXP_STACK_H_
diff --git a/chromium/v8/src/regexp.js b/chromium/v8/src/regexp.js
index 22b08775b32..8a805b0a19b 100644
--- a/chromium/v8/src/regexp.js
+++ b/chromium/v8/src/regexp.js
@@ -1,29 +1,6 @@
// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
// This file relies on the fact that the following declaration has been made
// in runtime.js:
@@ -103,7 +80,7 @@ function RegExpConstructor(pattern, flags) {
// were called again. In SpiderMonkey, this method returns the regexp object.
// In JSC, it returns undefined. For compatibility with JSC, we match their
// behavior.
-function RegExpCompile(pattern, flags) {
+function RegExpCompileJS(pattern, flags) {
// Both JSC and SpiderMonkey treat a missing pattern argument as the
// empty subject string, and an actual undefined value passed as the
// pattern as the string 'undefined'. Note that JSC is inconsistent
@@ -131,23 +108,30 @@ function DoRegExpExec(regexp, string, index) {
}
-function BuildResultFromMatchInfo(lastMatchInfo, s) {
- var numResults = NUMBER_OF_CAPTURES(lastMatchInfo) >> 1;
- var start = lastMatchInfo[CAPTURE0];
- var end = lastMatchInfo[CAPTURE1];
- var result = %_RegExpConstructResult(numResults, start, s);
- result[0] = %_SubString(s, start, end);
+// This is kind of performance sensitive, so we want to avoid unnecessary
+// type checks on inputs. But we also don't want to inline it several times
+// manually, so we use a macro :-)
+macro RETURN_NEW_RESULT_FROM_MATCH_INFO(MATCHINFO, STRING)
+ var numResults = NUMBER_OF_CAPTURES(MATCHINFO) >> 1;
+ var start = MATCHINFO[CAPTURE0];
+ var end = MATCHINFO[CAPTURE1];
+ // Calculate the substring of the first match before creating the result array
+ // to avoid an unnecessary write barrier storing the first result.
+ var first = %_SubString(STRING, start, end);
+ var result = %_RegExpConstructResult(numResults, start, STRING);
+ result[0] = first;
+ if (numResults == 1) return result;
var j = REGEXP_FIRST_CAPTURE + 2;
for (var i = 1; i < numResults; i++) {
- start = lastMatchInfo[j++];
+ start = MATCHINFO[j++];
if (start != -1) {
- end = lastMatchInfo[j];
- result[i] = %_SubString(s, start, end);
+ end = MATCHINFO[j];
+ result[i] = %_SubString(STRING, start, end);
}
j++;
}
return result;
-}
+endmacro
function RegExpExecNoTests(regexp, string, start) {
@@ -155,7 +139,7 @@ function RegExpExecNoTests(regexp, string, start) {
var matchInfo = %_RegExpExec(regexp, string, start, lastMatchInfo);
if (matchInfo !== null) {
lastMatchInfoOverride = null;
- return BuildResultFromMatchInfo(matchInfo, string);
+ RETURN_NEW_RESULT_FROM_MATCH_INFO(matchInfo, string);
}
regexp.lastIndex = 0;
return null;
@@ -185,7 +169,6 @@ function RegExpExec(string) {
i = 0;
}
- %_Log('regexp', 'regexp-exec,%0r,%1S,%2i', [this, string, lastIndex]);
// matchIndices is either null or the lastMatchInfo array.
var matchIndices = %_RegExpExec(this, string, i, lastMatchInfo);
@@ -199,7 +182,7 @@ function RegExpExec(string) {
if (global) {
this.lastIndex = lastMatchInfo[CAPTURE1];
}
- return BuildResultFromMatchInfo(matchIndices, string);
+ RETURN_NEW_RESULT_FROM_MATCH_INFO(matchIndices, string);
}
@@ -229,7 +212,6 @@ function RegExpTest(string) {
this.lastIndex = 0;
return false;
}
- %_Log('regexp', 'regexp-exec,%0r,%1S,%2i', [this, string, lastIndex]);
// matchIndices is either null or the lastMatchInfo array.
var matchIndices = %_RegExpExec(this, string, i, lastMatchInfo);
if (IS_NULL(matchIndices)) {
@@ -250,7 +232,6 @@ function RegExpTest(string) {
%_StringCharCodeAt(regexp.source, 2) != 63) { // '?'
regexp = TrimRegExp(regexp);
}
- %_Log('regexp', 'regexp-exec,%0r,%1S,%2i', [regexp, string, lastIndex]);
// matchIndices is either null or the lastMatchInfo array.
var matchIndices = %_RegExpExec(regexp, string, 0, lastMatchInfo);
if (IS_NULL(matchIndices)) {
@@ -407,7 +388,7 @@ function SetUpRegExp() {
"exec", RegExpExec,
"test", RegExpTest,
"toString", RegExpToString,
- "compile", RegExpCompile
+ "compile", RegExpCompileJS
));
// The length of compile is 1 in SpiderMonkey.
diff --git a/chromium/v8/src/rewriter.cc b/chromium/v8/src/rewriter.cc
index ba35284d7f9..c92ccda730f 100644
--- a/chromium/v8/src/rewriter.cc
+++ b/chromium/v8/src/rewriter.cc
@@ -1,37 +1,14 @@
// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#include "rewriter.h"
-
-#include "ast.h"
-#include "compiler.h"
-#include "scopes.h"
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#include "src/rewriter.h"
+
+#include "src/ast.h"
+#include "src/compiler.h"
+#include "src/scopes.h"
namespace v8 {
namespace internal {
@@ -43,8 +20,8 @@ class Processor: public AstVisitor {
result_assigned_(false),
is_set_(false),
in_try_(false),
- factory_(zone->isolate(), zone) {
- InitializeAstVisitor(zone->isolate());
+ factory_(zone) {
+ InitializeAstVisitor(zone);
}
virtual ~Processor() { }
@@ -207,11 +184,6 @@ void Processor::VisitSwitchStatement(SwitchStatement* node) {
}
-void Processor::VisitCaseClause(CaseClause* clause) {
- UNREACHABLE();
-}
-
-
void Processor::VisitContinueStatement(ContinueStatement* node) {
is_set_ = false;
}
diff --git a/chromium/v8/src/rewriter.h b/chromium/v8/src/rewriter.h
index 59914d97f91..0423802bade 100644
--- a/chromium/v8/src/rewriter.h
+++ b/chromium/v8/src/rewriter.h
@@ -1,29 +1,6 @@
// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
#ifndef V8_REWRITER_H_
#define V8_REWRITER_H_
diff --git a/chromium/v8/src/runtime-profiler.cc b/chromium/v8/src/runtime-profiler.cc
index 691fc666419..dddcad060de 100644
--- a/chromium/v8/src/runtime-profiler.cc
+++ b/chromium/v8/src/runtime-profiler.cc
@@ -1,67 +1,26 @@
// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#include "runtime-profiler.h"
-
-#include "assembler.h"
-#include "bootstrapper.h"
-#include "code-stubs.h"
-#include "compilation-cache.h"
-#include "execution.h"
-#include "full-codegen.h"
-#include "global-handles.h"
-#include "isolate-inl.h"
-#include "mark-compact.h"
-#include "platform.h"
-#include "scopeinfo.h"
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
-namespace v8 {
-namespace internal {
-
-
-// Optimization sampler constants.
-static const int kSamplerFrameCount = 2;
-
-// Constants for statistical profiler.
-static const int kSamplerFrameWeight[kSamplerFrameCount] = { 2, 1 };
+#include "src/v8.h"
-static const int kSamplerTicksBetweenThresholdAdjustment = 32;
+#include "src/runtime-profiler.h"
-static const int kSamplerThresholdInit = 3;
-static const int kSamplerThresholdMin = 1;
-static const int kSamplerThresholdDelta = 1;
+#include "src/assembler.h"
+#include "src/bootstrapper.h"
+#include "src/code-stubs.h"
+#include "src/compilation-cache.h"
+#include "src/execution.h"
+#include "src/full-codegen.h"
+#include "src/global-handles.h"
+#include "src/isolate-inl.h"
+#include "src/mark-compact.h"
+#include "src/platform.h"
+#include "src/scopeinfo.h"
-static const int kSamplerThresholdSizeFactorInit = 3;
-
-static const int kSizeLimit = 1500;
+namespace v8 {
+namespace internal {
-// Constants for counter based profiler.
// Number of times a function has to be seen on the stack before it is
// optimized.
@@ -84,7 +43,7 @@ static const int kOSRCodeSizeAllowanceBase =
100 * FullCodeGenerator::kCodeSizeMultiplier;
static const int kOSRCodeSizeAllowancePerTick =
- 3 * FullCodeGenerator::kCodeSizeMultiplier;
+ 4 * FullCodeGenerator::kCodeSizeMultiplier;
// Maximum size in bytes of generated code for a function to be optimized
// the very first time it is seen on the stack.
@@ -94,14 +53,7 @@ static const int kMaxSizeEarlyOpt =
RuntimeProfiler::RuntimeProfiler(Isolate* isolate)
: isolate_(isolate),
- sampler_threshold_(kSamplerThresholdInit),
- sampler_threshold_size_factor_(kSamplerThresholdSizeFactorInit),
- sampler_ticks_until_threshold_adjustment_(
- kSamplerTicksBetweenThresholdAdjustment),
- sampler_window_position_(0),
- any_ic_changed_(false),
- code_generated_(false) {
- ClearSampleBuffer();
+ any_ic_changed_(false) {
}
@@ -149,11 +101,11 @@ void RuntimeProfiler::Optimize(JSFunction* function, const char* reason) {
// recompilation race. This goes away as soon as OSR becomes one-shot.
return;
}
- ASSERT(!function->IsInRecompileQueue());
- function->MarkForConcurrentRecompilation();
+ ASSERT(!function->IsInOptimizationQueue());
+ function->MarkForConcurrentOptimization();
} else {
// The next call to the function will trigger optimization.
- function->MarkForLazyRecompilation();
+ function->MarkForOptimization();
}
}
@@ -189,38 +141,6 @@ void RuntimeProfiler::AttemptOnStackReplacement(JSFunction* function) {
}
-void RuntimeProfiler::ClearSampleBuffer() {
- memset(sampler_window_, 0, sizeof(sampler_window_));
- memset(sampler_window_weight_, 0, sizeof(sampler_window_weight_));
-}
-
-
-int RuntimeProfiler::LookupSample(JSFunction* function) {
- int weight = 0;
- for (int i = 0; i < kSamplerWindowSize; i++) {
- Object* sample = sampler_window_[i];
- if (sample != NULL) {
- bool fits = FLAG_lookup_sample_by_shared
- ? (function->shared() == JSFunction::cast(sample)->shared())
- : (function == JSFunction::cast(sample));
- if (fits) {
- weight += sampler_window_weight_[i];
- }
- }
- }
- return weight;
-}
-
-
-void RuntimeProfiler::AddSample(JSFunction* function, int weight) {
- ASSERT(IsPowerOf2(kSamplerWindowSize));
- sampler_window_[sampler_window_position_] = function;
- sampler_window_weight_[sampler_window_position_] = weight;
- sampler_window_position_ = (sampler_window_position_ + 1) &
- (kSamplerWindowSize - 1);
-}
-
-
void RuntimeProfiler::OptimizeNow() {
HandleScope scope(isolate_);
@@ -231,39 +151,29 @@ void RuntimeProfiler::OptimizeNow() {
// Run through the JavaScript frames and collect them. If we already
// have a sample of the function, we mark it for optimizations
// (eagerly or lazily).
- JSFunction* samples[kSamplerFrameCount];
- int sample_count = 0;
int frame_count = 0;
- int frame_count_limit = FLAG_watch_ic_patching ? FLAG_frame_count
- : kSamplerFrameCount;
+ int frame_count_limit = FLAG_frame_count;
for (JavaScriptFrameIterator it(isolate_);
frame_count++ < frame_count_limit && !it.done();
it.Advance()) {
JavaScriptFrame* frame = it.frame();
JSFunction* function = frame->function();
- if (!FLAG_watch_ic_patching) {
- // Adjust threshold each time we have processed
- // a certain number of ticks.
- if (sampler_ticks_until_threshold_adjustment_ > 0) {
- sampler_ticks_until_threshold_adjustment_--;
- if (sampler_ticks_until_threshold_adjustment_ <= 0) {
- // If the threshold is not already at the minimum
- // modify and reset the ticks until next adjustment.
- if (sampler_threshold_ > kSamplerThresholdMin) {
- sampler_threshold_ -= kSamplerThresholdDelta;
- sampler_ticks_until_threshold_adjustment_ =
- kSamplerTicksBetweenThresholdAdjustment;
- }
- }
- }
- }
-
SharedFunctionInfo* shared = function->shared();
Code* shared_code = shared->code();
+ List<JSFunction*> functions(4);
+ frame->GetFunctions(&functions);
+ for (int i = functions.length(); --i >= 0; ) {
+ SharedFunctionInfo* shared_function_info = functions[i]->shared();
+ int ticks = shared_function_info->profiler_ticks();
+ if (ticks < Smi::kMaxValue) {
+ shared_function_info->set_profiler_ticks(ticks + 1);
+ }
+ }
+
if (shared_code->kind() != Code::FUNCTION) continue;
- if (function->IsInRecompileQueue()) continue;
+ if (function->IsInOptimizationQueue()) continue;
if (FLAG_always_osr &&
shared_code->allow_osr_at_loop_nesting_level() == 0) {
@@ -275,8 +185,8 @@ void RuntimeProfiler::OptimizeNow() {
}
// Fall through and do a normal optimized compile as well.
} else if (!frame->is_optimized() &&
- (function->IsMarkedForLazyRecompilation() ||
- function->IsMarkedForConcurrentRecompilation() ||
+ (function->IsMarkedForOptimization() ||
+ function->IsMarkedForConcurrentOptimization() ||
function->IsOptimized())) {
// Attempt OSR if we are still running unoptimized code even though the
// the function has long been marked or even already been optimized.
@@ -322,116 +232,36 @@ void RuntimeProfiler::OptimizeNow() {
}
if (!function->IsOptimizable()) continue;
- if (FLAG_watch_ic_patching) {
- int ticks = shared_code->profiler_ticks();
+ int ticks = shared_code->profiler_ticks();
- if (ticks >= kProfilerTicksBeforeOptimization) {
- int typeinfo, total, percentage;
- GetICCounts(shared_code, &typeinfo, &total, &percentage);
- if (percentage >= FLAG_type_info_threshold) {
- // If this particular function hasn't had any ICs patched for enough
- // ticks, optimize it now.
- Optimize(function, "hot and stable");
- } else if (ticks >= kTicksWhenNotEnoughTypeInfo) {
- Optimize(function, "not much type info but very hot");
- } else {
- shared_code->set_profiler_ticks(ticks + 1);
- if (FLAG_trace_opt_verbose) {
- PrintF("[not yet optimizing ");
- function->PrintName();
- PrintF(", not enough type info: %d/%d (%d%%)]\n",
- typeinfo, total, percentage);
- }
- }
- } else if (!any_ic_changed_ &&
- shared_code->instruction_size() < kMaxSizeEarlyOpt) {
- // If no IC was patched since the last tick and this function is very
- // small, optimistically optimize it now.
- Optimize(function, "small function");
+ if (ticks >= kProfilerTicksBeforeOptimization) {
+ int typeinfo, total, percentage;
+ GetICCounts(shared_code, &typeinfo, &total, &percentage);
+ if (percentage >= FLAG_type_info_threshold) {
+ // If this particular function hasn't had any ICs patched for enough
+ // ticks, optimize it now.
+ Optimize(function, "hot and stable");
+ } else if (ticks >= kTicksWhenNotEnoughTypeInfo) {
+ Optimize(function, "not much type info but very hot");
} else {
shared_code->set_profiler_ticks(ticks + 1);
+ if (FLAG_trace_opt_verbose) {
+ PrintF("[not yet optimizing ");
+ function->PrintName();
+ PrintF(", not enough type info: %d/%d (%d%%)]\n",
+ typeinfo, total, percentage);
+ }
}
- } else { // !FLAG_watch_ic_patching
- samples[sample_count++] = function;
-
- int function_size = function->shared()->SourceSize();
- int threshold_size_factor = (function_size > kSizeLimit)
- ? sampler_threshold_size_factor_
- : 1;
-
- int threshold = sampler_threshold_ * threshold_size_factor;
-
- if (LookupSample(function) >= threshold) {
- Optimize(function, "sampler window lookup");
- }
- }
- }
- if (FLAG_watch_ic_patching) {
- any_ic_changed_ = false;
- } else { // !FLAG_watch_ic_patching
- // Add the collected functions as samples. It's important not to do
- // this as part of collecting them because this will interfere with
- // the sample lookup in case of recursive functions.
- for (int i = 0; i < sample_count; i++) {
- AddSample(samples[i], kSamplerFrameWeight[i]);
- }
- }
-}
-
-
-void RuntimeProfiler::SetUp() {
- if (!FLAG_watch_ic_patching) {
- ClearSampleBuffer();
- }
-}
-
-
-void RuntimeProfiler::Reset() {
- if (!FLAG_watch_ic_patching) {
- sampler_threshold_ = kSamplerThresholdInit;
- sampler_threshold_size_factor_ = kSamplerThresholdSizeFactorInit;
- sampler_ticks_until_threshold_adjustment_ =
- kSamplerTicksBetweenThresholdAdjustment;
- }
-}
-
-
-void RuntimeProfiler::TearDown() {
- // Nothing to do.
-}
-
-
-// Update the pointers in the sampler window after a GC.
-void RuntimeProfiler::UpdateSamplesAfterScavenge() {
- for (int i = 0; i < kSamplerWindowSize; i++) {
- Object* function = sampler_window_[i];
- if (function != NULL && isolate_->heap()->InNewSpace(function)) {
- MapWord map_word = HeapObject::cast(function)->map_word();
- if (map_word.IsForwardingAddress()) {
- sampler_window_[i] = map_word.ToForwardingAddress();
- } else {
- sampler_window_[i] = NULL;
- }
- }
- }
-}
-
-
-void RuntimeProfiler::RemoveDeadSamples() {
- for (int i = 0; i < kSamplerWindowSize; i++) {
- Object* function = sampler_window_[i];
- if (function != NULL &&
- !Marking::MarkBitFrom(HeapObject::cast(function)).Get()) {
- sampler_window_[i] = NULL;
+ } else if (!any_ic_changed_ &&
+ shared_code->instruction_size() < kMaxSizeEarlyOpt) {
+ // If no IC was patched since the last tick and this function is very
+ // small, optimistically optimize it now.
+ Optimize(function, "small function");
+ } else {
+ shared_code->set_profiler_ticks(ticks + 1);
}
}
-}
-
-
-void RuntimeProfiler::UpdateSamplesAfterCompact(ObjectVisitor* visitor) {
- for (int i = 0; i < kSamplerWindowSize; i++) {
- visitor->VisitPointer(&sampler_window_[i]);
- }
+ any_ic_changed_ = false;
}
diff --git a/chromium/v8/src/runtime-profiler.h b/chromium/v8/src/runtime-profiler.h
index 28d6d322fd5..fa8352d5e0c 100644
--- a/chromium/v8/src/runtime-profiler.h
+++ b/chromium/v8/src/runtime-profiler.h
@@ -1,35 +1,11 @@
// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
#ifndef V8_RUNTIME_PROFILER_H_
#define V8_RUNTIME_PROFILER_H_
-#include "allocation.h"
-#include "atomicops.h"
+#include "src/allocation.h"
namespace v8 {
namespace internal {
@@ -45,47 +21,18 @@ class RuntimeProfiler {
void OptimizeNow();
- void SetUp();
- void Reset();
- void TearDown();
-
void NotifyICChanged() { any_ic_changed_ = true; }
- // Rate limiting support.
-
- void UpdateSamplesAfterScavenge();
- void RemoveDeadSamples();
- void UpdateSamplesAfterCompact(ObjectVisitor* visitor);
-
void AttemptOnStackReplacement(JSFunction* function);
private:
- static const int kSamplerWindowSize = 16;
-
void Optimize(JSFunction* function, const char* reason);
- void ClearSampleBuffer();
-
- void ClearSampleBufferNewSpaceEntries();
-
- int LookupSample(JSFunction* function);
-
- void AddSample(JSFunction* function, int weight);
-
bool CodeSizeOKForOSR(Code* shared_code);
Isolate* isolate_;
- int sampler_threshold_;
- int sampler_threshold_size_factor_;
- int sampler_ticks_until_threshold_adjustment_;
-
- Object* sampler_window_[kSamplerWindowSize];
- int sampler_window_position_;
- int sampler_window_weight_[kSamplerWindowSize];
-
bool any_ic_changed_;
- bool code_generated_;
};
} } // namespace v8::internal
diff --git a/chromium/v8/src/runtime.cc b/chromium/v8/src/runtime.cc
index 8333380e83b..36b3177b7c0 100644
--- a/chromium/v8/src/runtime.cc
+++ b/chromium/v8/src/runtime.cc
@@ -1,74 +1,51 @@
// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
#include <stdlib.h>
#include <limits>
-#include "v8.h"
-
-#include "accessors.h"
-#include "allocation-site-scopes.h"
-#include "api.h"
-#include "arguments.h"
-#include "bootstrapper.h"
-#include "codegen.h"
-#include "compilation-cache.h"
-#include "compiler.h"
-#include "cpu.h"
-#include "cpu-profiler.h"
-#include "dateparser-inl.h"
-#include "debug.h"
-#include "deoptimizer.h"
-#include "date.h"
-#include "execution.h"
-#include "full-codegen.h"
-#include "global-handles.h"
-#include "isolate-inl.h"
-#include "jsregexp.h"
-#include "jsregexp-inl.h"
-#include "json-parser.h"
-#include "json-stringifier.h"
-#include "liveedit.h"
-#include "misc-intrinsics.h"
-#include "parser.h"
-#include "platform.h"
-#include "runtime-profiler.h"
-#include "runtime.h"
-#include "scopeinfo.h"
-#include "smart-pointers.h"
-#include "string-search.h"
-#include "stub-cache.h"
-#include "uri.h"
-#include "v8conversions.h"
-#include "v8threads.h"
-#include "vm-state-inl.h"
+#include "src/v8.h"
+
+#include "src/accessors.h"
+#include "src/allocation-site-scopes.h"
+#include "src/api.h"
+#include "src/arguments.h"
+#include "src/bootstrapper.h"
+#include "src/codegen.h"
+#include "src/compilation-cache.h"
+#include "src/compiler.h"
+#include "src/conversions.h"
+#include "src/cpu.h"
+#include "src/cpu-profiler.h"
+#include "src/dateparser-inl.h"
+#include "src/debug.h"
+#include "src/deoptimizer.h"
+#include "src/date.h"
+#include "src/execution.h"
+#include "src/full-codegen.h"
+#include "src/global-handles.h"
+#include "src/isolate-inl.h"
+#include "src/jsregexp.h"
+#include "src/jsregexp-inl.h"
+#include "src/json-parser.h"
+#include "src/json-stringifier.h"
+#include "src/liveedit.h"
+#include "src/misc-intrinsics.h"
+#include "src/parser.h"
+#include "src/platform.h"
+#include "src/runtime-profiler.h"
+#include "src/runtime.h"
+#include "src/scopeinfo.h"
+#include "src/smart-pointers.h"
+#include "src/string-search.h"
+#include "src/stub-cache.h"
+#include "src/uri.h"
+#include "src/v8threads.h"
+#include "src/vm-state-inl.h"
#ifdef V8_I18N_SUPPORT
-#include "i18n.h"
+#include "src/i18n.h"
#include "unicode/brkiter.h"
#include "unicode/calendar.h"
#include "unicode/coll.h"
@@ -105,6 +82,12 @@ namespace internal {
#define RUNTIME_ASSERT(value) \
if (!(value)) return isolate->ThrowIllegalOperation();
+#define RUNTIME_ASSERT_HANDLIFIED(value, T) \
+ if (!(value)) { \
+ isolate->ThrowIllegalOperation(); \
+ return MaybeHandle<T>(); \
+ }
+
// Cast the given object to a value of the specified type and store
// it in a variable with the given name. If the object is not of the
// expected type call IllegalOperation and return.
@@ -116,6 +99,10 @@ namespace internal {
RUNTIME_ASSERT(args[index]->Is##Type()); \
Handle<Type> name = args.at<Type>(index);
+#define CONVERT_NUMBER_ARG_HANDLE_CHECKED(name, index) \
+ RUNTIME_ASSERT(args[index]->IsNumber()); \
+ Handle<Object> name = args.at<Object>(index);
+
// Cast the given object to a boolean and store it in a variable with
// the given name. If the object is not a boolean call IllegalOperation
// and return.
@@ -153,25 +140,13 @@ namespace internal {
PropertyDetails name = PropertyDetails(Smi::cast(args[index]));
-// Assert that the given argument has a valid value for a StrictModeFlag
-// and store it in a StrictModeFlag variable with the given name.
+// Assert that the given argument has a valid value for a StrictMode
+// and store it in a StrictMode variable with the given name.
#define CONVERT_STRICT_MODE_ARG_CHECKED(name, index) \
RUNTIME_ASSERT(args[index]->IsSmi()); \
- RUNTIME_ASSERT(args.smi_at(index) == kStrictMode || \
- args.smi_at(index) == kNonStrictMode); \
- StrictModeFlag name = \
- static_cast<StrictModeFlag>(args.smi_at(index));
-
-
-// Assert that the given argument has a valid value for a LanguageMode
-// and store it in a LanguageMode variable with the given name.
-#define CONVERT_LANGUAGE_MODE_ARG(name, index) \
- ASSERT(args[index]->IsSmi()); \
- ASSERT(args.smi_at(index) == CLASSIC_MODE || \
- args.smi_at(index) == STRICT_MODE || \
- args.smi_at(index) == EXTENDED_MODE); \
- LanguageMode name = \
- static_cast<LanguageMode>(args.smi_at(index));
+ RUNTIME_ASSERT(args.smi_at(index) == STRICT || \
+ args.smi_at(index) == SLOPPY); \
+ StrictMode name = static_cast<StrictMode>(args.smi_at(index));
static Handle<Map> ComputeObjectLiteralMap(
@@ -221,19 +196,17 @@ static Handle<Map> ComputeObjectLiteralMap(
return isolate->factory()->ObjectLiteralMapFromCache(context, keys);
}
*is_result_from_cache = false;
- return isolate->factory()->CopyMap(
- Handle<Map>(context->object_function()->initial_map()),
- number_of_properties);
+ return Map::Create(handle(context->object_function()), number_of_properties);
}
-static Handle<Object> CreateLiteralBoilerplate(
+MUST_USE_RESULT static MaybeHandle<Object> CreateLiteralBoilerplate(
Isolate* isolate,
Handle<FixedArray> literals,
Handle<FixedArray> constant_properties);
-static Handle<Object> CreateObjectLiteralBoilerplate(
+MUST_USE_RESULT static MaybeHandle<Object> CreateObjectLiteralBoilerplate(
Isolate* isolate,
Handle<FixedArray> literals,
Handle<FixedArray> constant_properties,
@@ -259,9 +232,11 @@ static Handle<Object> CreateObjectLiteralBoilerplate(
constant_properties,
&is_result_from_cache);
+ PretenureFlag pretenure_flag =
+ isolate->heap()->InNewSpace(*literals) ? NOT_TENURED : TENURED;
+
Handle<JSObject> boilerplate =
- isolate->factory()->NewJSObjectFromMap(
- map, isolate->heap()->GetPretenureMode());
+ isolate->factory()->NewJSObjectFromMap(map, pretenure_flag);
// Normalize the elements of the boilerplate to save space if needed.
if (!should_have_fast_elements) JSObject::NormalizeElements(boilerplate);
@@ -270,13 +245,14 @@ static Handle<Object> CreateObjectLiteralBoilerplate(
int length = constant_properties->length();
bool should_transform =
!is_result_from_cache && boilerplate->HasFastProperties();
- if (should_transform || has_function_literal) {
- // Normalize the properties of object to avoid n^2 behavior
- // when extending the object multiple properties. Indicate the number of
- // properties to be added.
+ bool should_normalize = should_transform || has_function_literal;
+ if (should_normalize) {
+ // TODO(verwaest): We might not want to ever normalize here.
JSObject::NormalizeProperties(
boilerplate, KEEP_INOBJECT_PROPERTIES, length / 2);
}
+ Object::ValueType value_type = should_normalize
+ ? Object::FORCE_TAGGED : Object::OPTIMAL_REPRESENTATION;
// TODO(verwaest): Support tracking representations in the boilerplate.
for (int index = 0; index < length; index +=2) {
@@ -286,28 +262,30 @@ static Handle<Object> CreateObjectLiteralBoilerplate(
// The value contains the constant_properties of a
// simple object or array literal.
Handle<FixedArray> array = Handle<FixedArray>::cast(value);
- value = CreateLiteralBoilerplate(isolate, literals, array);
- if (value.is_null()) return value;
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, value,
+ CreateLiteralBoilerplate(isolate, literals, array),
+ Object);
}
- Handle<Object> result;
+ MaybeHandle<Object> maybe_result;
uint32_t element_index = 0;
StoreMode mode = value->IsJSObject() ? FORCE_FIELD : ALLOW_AS_CONSTANT;
if (key->IsInternalizedString()) {
if (Handle<String>::cast(key)->AsArrayIndex(&element_index)) {
// Array index as string (uint32).
- result = JSObject::SetOwnElement(
- boilerplate, element_index, value, kNonStrictMode);
+ maybe_result = JSObject::SetOwnElement(
+ boilerplate, element_index, value, SLOPPY);
} else {
Handle<String> name(String::cast(*key));
ASSERT(!name->AsArrayIndex(&element_index));
- result = JSObject::SetLocalPropertyIgnoreAttributes(
+ maybe_result = JSObject::SetOwnPropertyIgnoreAttributes(
boilerplate, name, value, NONE,
- Object::OPTIMAL_REPRESENTATION, mode);
+ value_type, mode);
}
} else if (key->ToArrayIndex(&element_index)) {
// Array index (uint32).
- result = JSObject::SetOwnElement(
- boilerplate, element_index, value, kNonStrictMode);
+ maybe_result = JSObject::SetOwnElement(
+ boilerplate, element_index, value, SLOPPY);
} else {
// Non-uint32 number.
ASSERT(key->IsNumber());
@@ -315,17 +293,16 @@ static Handle<Object> CreateObjectLiteralBoilerplate(
char arr[100];
Vector<char> buffer(arr, ARRAY_SIZE(arr));
const char* str = DoubleToCString(num, buffer);
- Handle<String> name =
- isolate->factory()->NewStringFromAscii(CStrVector(str));
- result = JSObject::SetLocalPropertyIgnoreAttributes(
+ Handle<String> name = isolate->factory()->NewStringFromAsciiChecked(str);
+ maybe_result = JSObject::SetOwnPropertyIgnoreAttributes(
boilerplate, name, value, NONE,
- Object::OPTIMAL_REPRESENTATION, mode);
+ value_type, mode);
}
// If setting the property on the boilerplate throws an
// exception, the exception is converted to an empty handle in
// the handle based operations. In that case, we need to
// convert back to an exception.
- if (result.is_null()) return result;
+ RETURN_ON_EXCEPTION(isolate, maybe_result, Object);
}
// Transform to fast properties if necessary. For object literals with
@@ -341,25 +318,30 @@ static Handle<Object> CreateObjectLiteralBoilerplate(
}
-MaybeObject* TransitionElements(Handle<Object> object,
- ElementsKind to_kind,
- Isolate* isolate) {
+MUST_USE_RESULT static MaybeHandle<Object> TransitionElements(
+ Handle<Object> object,
+ ElementsKind to_kind,
+ Isolate* isolate) {
HandleScope scope(isolate);
- if (!object->IsJSObject()) return isolate->ThrowIllegalOperation();
+ if (!object->IsJSObject()) {
+ isolate->ThrowIllegalOperation();
+ return MaybeHandle<Object>();
+ }
ElementsKind from_kind =
Handle<JSObject>::cast(object)->map()->elements_kind();
if (Map::IsValidElementsTransition(from_kind, to_kind)) {
JSObject::TransitionElementsKind(Handle<JSObject>::cast(object), to_kind);
- return *object;
+ return object;
}
- return isolate->ThrowIllegalOperation();
+ isolate->ThrowIllegalOperation();
+ return MaybeHandle<Object>();
}
static const int kSmiLiteralMinimumLength = 1024;
-Handle<Object> Runtime::CreateArrayLiteralBoilerplate(
+MaybeHandle<Object> Runtime::CreateArrayLiteralBoilerplate(
Isolate* isolate,
Handle<FixedArray> literals,
Handle<FixedArray> elements) {
@@ -367,23 +349,25 @@ Handle<Object> Runtime::CreateArrayLiteralBoilerplate(
Handle<JSFunction> constructor(
JSFunction::NativeContextFromLiterals(*literals)->array_function());
+ PretenureFlag pretenure_flag =
+ isolate->heap()->InNewSpace(*literals) ? NOT_TENURED : TENURED;
+
Handle<JSArray> object = Handle<JSArray>::cast(
- isolate->factory()->NewJSObject(
- constructor, isolate->heap()->GetPretenureMode()));
+ isolate->factory()->NewJSObject(constructor, pretenure_flag));
ElementsKind constant_elements_kind =
static_cast<ElementsKind>(Smi::cast(elements->get(0))->value());
Handle<FixedArrayBase> constant_elements_values(
FixedArrayBase::cast(elements->get(1)));
- ASSERT(IsFastElementsKind(constant_elements_kind));
- Context* native_context = isolate->context()->native_context();
- Object* maybe_maps_array = native_context->js_array_maps();
- ASSERT(!maybe_maps_array->IsUndefined());
- Object* maybe_map = FixedArray::cast(maybe_maps_array)->get(
- constant_elements_kind);
- ASSERT(maybe_map->IsMap());
- object->set_map(Map::cast(maybe_map));
+ { DisallowHeapAllocation no_gc;
+ ASSERT(IsFastElementsKind(constant_elements_kind));
+ Context* native_context = isolate->context()->native_context();
+ Object* maps_array = native_context->js_array_maps();
+ ASSERT(!maps_array->IsUndefined());
+ Object* map = FixedArray::cast(maps_array)->get(constant_elements_kind);
+ object->set_map(Map::cast(map));
+ }
Handle<FixedArrayBase> copied_elements_values;
if (IsFastDoubleElementsKind(constant_elements_kind)) {
@@ -411,14 +395,15 @@ Handle<Object> Runtime::CreateArrayLiteralBoilerplate(
isolate->factory()->CopyFixedArray(fixed_array_values);
copied_elements_values = fixed_array_values_copy;
for (int i = 0; i < fixed_array_values->length(); i++) {
- Object* current = fixed_array_values->get(i);
- if (current->IsFixedArray()) {
+ if (fixed_array_values->get(i)->IsFixedArray()) {
// The value contains the constant_properties of a
// simple object or array literal.
Handle<FixedArray> fa(FixedArray::cast(fixed_array_values->get(i)));
- Handle<Object> result =
- CreateLiteralBoilerplate(isolate, literals, fa);
- if (result.is_null()) return result;
+ Handle<Object> result;
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, result,
+ CreateLiteralBoilerplate(isolate, literals, fa),
+ Object);
fixed_array_values_copy->set(i, *result);
}
}
@@ -434,20 +419,19 @@ Handle<Object> Runtime::CreateArrayLiteralBoilerplate(
ElementsKind elements_kind = object->GetElementsKind();
if (!IsFastObjectElementsKind(elements_kind)) {
if (IsFastHoleyElementsKind(elements_kind)) {
- CHECK(!TransitionElements(object, FAST_HOLEY_ELEMENTS,
- isolate)->IsFailure());
+ TransitionElements(object, FAST_HOLEY_ELEMENTS, isolate).Check();
} else {
- CHECK(!TransitionElements(object, FAST_ELEMENTS, isolate)->IsFailure());
+ TransitionElements(object, FAST_ELEMENTS, isolate).Check();
}
}
}
- object->ValidateElements();
+ JSObject::ValidateElements(object);
return object;
}
-static Handle<Object> CreateLiteralBoilerplate(
+MUST_USE_RESULT static MaybeHandle<Object> CreateLiteralBoilerplate(
Isolate* isolate,
Handle<FixedArray> literals,
Handle<FixedArray> array) {
@@ -471,12 +455,12 @@ static Handle<Object> CreateLiteralBoilerplate(
isolate, literals, elements);
default:
UNREACHABLE();
- return Handle<Object>::null();
+ return MaybeHandle<Object>();
}
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_CreateObjectLiteral) {
+RUNTIME_FUNCTION(RuntimeHidden_CreateObjectLiteral) {
HandleScope scope(isolate);
ASSERT(args.length() == 4);
CONVERT_ARG_HANDLE_CHECKED(FixedArray, literals, 0);
@@ -486,24 +470,29 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_CreateObjectLiteral) {
bool should_have_fast_elements = (flags & ObjectLiteral::kFastElements) != 0;
bool has_function_literal = (flags & ObjectLiteral::kHasFunction) != 0;
+ RUNTIME_ASSERT(literals_index >= 0 && literals_index < literals->length());
+
// Check if boilerplate exists. If not, create it first.
Handle<Object> literal_site(literals->get(literals_index), isolate);
Handle<AllocationSite> site;
Handle<JSObject> boilerplate;
if (*literal_site == isolate->heap()->undefined_value()) {
- Handle<Object> raw_boilerplate = CreateObjectLiteralBoilerplate(
- isolate,
- literals,
- constant_properties,
- should_have_fast_elements,
- has_function_literal);
- RETURN_IF_EMPTY_HANDLE(isolate, raw_boilerplate);
+ Handle<Object> raw_boilerplate;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, raw_boilerplate,
+ CreateObjectLiteralBoilerplate(
+ isolate,
+ literals,
+ constant_properties,
+ should_have_fast_elements,
+ has_function_literal));
boilerplate = Handle<JSObject>::cast(raw_boilerplate);
AllocationSiteCreationContext creation_context(isolate);
site = creation_context.EnterNewScope();
- RETURN_IF_EMPTY_HANDLE(isolate,
- JSObject::DeepWalk(boilerplate, &creation_context));
+ RETURN_FAILURE_ON_EXCEPTION(
+ isolate,
+ JSObject::DeepWalk(boilerplate, &creation_context));
creation_context.ExitScope(site, boilerplate);
// Update the functions literal and return the boilerplate.
@@ -516,14 +505,16 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_CreateObjectLiteral) {
AllocationSiteUsageContext usage_context(isolate, site, true);
usage_context.EnterNewScope();
- Handle<Object> copy = JSObject::DeepCopy(boilerplate, &usage_context);
+ MaybeHandle<Object> maybe_copy = JSObject::DeepCopy(
+ boilerplate, &usage_context);
usage_context.ExitScope(site, boilerplate);
- RETURN_IF_EMPTY_HANDLE(isolate, copy);
+ Handle<Object> copy;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, copy, maybe_copy);
return *copy;
}
-static Handle<AllocationSite> GetLiteralAllocationSite(
+MUST_USE_RESULT static MaybeHandle<AllocationSite> GetLiteralAllocationSite(
Isolate* isolate,
Handle<FixedArray> literals,
int literals_index,
@@ -533,9 +524,11 @@ static Handle<AllocationSite> GetLiteralAllocationSite(
Handle<AllocationSite> site;
if (*literal_site == isolate->heap()->undefined_value()) {
ASSERT(*elements != isolate->heap()->empty_fixed_array());
- Handle<Object> boilerplate =
- Runtime::CreateArrayLiteralBoilerplate(isolate, literals, elements);
- if (boilerplate.is_null()) return Handle<AllocationSite>::null();
+ Handle<Object> boilerplate;
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, boilerplate,
+ Runtime::CreateArrayLiteralBoilerplate(isolate, literals, elements),
+ AllocationSite);
AllocationSiteCreationContext creation_context(isolate);
site = creation_context.EnterNewScope();
@@ -554,14 +547,18 @@ static Handle<AllocationSite> GetLiteralAllocationSite(
}
-static MaybeObject* CreateArrayLiteralImpl(Isolate* isolate,
+static MaybeHandle<JSObject> CreateArrayLiteralImpl(Isolate* isolate,
Handle<FixedArray> literals,
int literals_index,
Handle<FixedArray> elements,
int flags) {
- Handle<AllocationSite> site = GetLiteralAllocationSite(isolate, literals,
- literals_index, elements);
- RETURN_IF_EMPTY_HANDLE(isolate, site);
+ RUNTIME_ASSERT_HANDLIFIED(literals_index >= 0 &&
+ literals_index < literals->length(), JSObject);
+ Handle<AllocationSite> site;
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, site,
+ GetLiteralAllocationSite(isolate, literals, literals_index, elements),
+ JSObject);
bool enable_mementos = (flags & ArrayLiteral::kDisableMementos) == 0;
Handle<JSObject> boilerplate(JSObject::cast(site->transition_info()));
@@ -570,15 +567,14 @@ static MaybeObject* CreateArrayLiteralImpl(Isolate* isolate,
JSObject::DeepCopyHints hints = (flags & ArrayLiteral::kShallowElements) == 0
? JSObject::kNoHints
: JSObject::kObjectIsShallowArray;
- Handle<JSObject> copy = JSObject::DeepCopy(boilerplate, &usage_context,
- hints);
+ MaybeHandle<JSObject> copy = JSObject::DeepCopy(boilerplate, &usage_context,
+ hints);
usage_context.ExitScope(site, boilerplate);
- RETURN_IF_EMPTY_HANDLE(isolate, copy);
- return *copy;
+ return copy;
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_CreateArrayLiteral) {
+RUNTIME_FUNCTION(RuntimeHidden_CreateArrayLiteral) {
HandleScope scope(isolate);
ASSERT(args.length() == 4);
CONVERT_ARG_HANDLE_CHECKED(FixedArray, literals, 0);
@@ -586,50 +582,83 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_CreateArrayLiteral) {
CONVERT_ARG_HANDLE_CHECKED(FixedArray, elements, 2);
CONVERT_SMI_ARG_CHECKED(flags, 3);
- return CreateArrayLiteralImpl(isolate, literals, literals_index, elements,
- flags);
+ Handle<JSObject> result;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, result,
+ CreateArrayLiteralImpl(isolate, literals, literals_index, elements,
+ flags));
+ return *result;
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_CreateArrayLiteralStubBailout) {
+RUNTIME_FUNCTION(RuntimeHidden_CreateArrayLiteralStubBailout) {
HandleScope scope(isolate);
ASSERT(args.length() == 3);
CONVERT_ARG_HANDLE_CHECKED(FixedArray, literals, 0);
CONVERT_SMI_ARG_CHECKED(literals_index, 1);
CONVERT_ARG_HANDLE_CHECKED(FixedArray, elements, 2);
- return CreateArrayLiteralImpl(isolate, literals, literals_index, elements,
- ArrayLiteral::kShallowElements);
+ Handle<JSObject> result;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, result,
+ CreateArrayLiteralImpl(isolate, literals, literals_index, elements,
+ ArrayLiteral::kShallowElements));
+ return *result;
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_CreateSymbol) {
+RUNTIME_FUNCTION(Runtime_CreateSymbol) {
HandleScope scope(isolate);
ASSERT(args.length() == 1);
- Handle<Object> name(args[0], isolate);
+ CONVERT_ARG_HANDLE_CHECKED(Object, name, 0);
RUNTIME_ASSERT(name->IsString() || name->IsUndefined());
- Symbol* symbol;
- MaybeObject* maybe = isolate->heap()->AllocateSymbol();
- if (!maybe->To(&symbol)) return maybe;
+ Handle<Symbol> symbol = isolate->factory()->NewSymbol();
if (name->IsString()) symbol->set_name(*name);
- return symbol;
+ return *symbol;
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_CreatePrivateSymbol) {
+RUNTIME_FUNCTION(Runtime_CreatePrivateSymbol) {
HandleScope scope(isolate);
ASSERT(args.length() == 1);
- Handle<Object> name(args[0], isolate);
+ CONVERT_ARG_HANDLE_CHECKED(Object, name, 0);
RUNTIME_ASSERT(name->IsString() || name->IsUndefined());
- Symbol* symbol;
- MaybeObject* maybe = isolate->heap()->AllocatePrivateSymbol();
- if (!maybe->To(&symbol)) return maybe;
+ Handle<Symbol> symbol = isolate->factory()->NewPrivateSymbol();
if (name->IsString()) symbol->set_name(*name);
- return symbol;
+ return *symbol;
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_SymbolName) {
+RUNTIME_FUNCTION(Runtime_CreateGlobalPrivateSymbol) {
+ HandleScope scope(isolate);
+ ASSERT(args.length() == 1);
+ CONVERT_ARG_HANDLE_CHECKED(String, name, 0);
+ Handle<JSObject> registry = isolate->GetSymbolRegistry();
+ Handle<String> part = isolate->factory()->private_intern_string();
+ Handle<Object> privates;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, privates, Object::GetPropertyOrElement(registry, part));
+ Handle<Object> symbol;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, symbol, Object::GetPropertyOrElement(privates, name));
+ if (!symbol->IsSymbol()) {
+ ASSERT(symbol->IsUndefined());
+ symbol = isolate->factory()->NewPrivateSymbol();
+ Handle<Symbol>::cast(symbol)->set_name(*name);
+ JSObject::SetProperty(Handle<JSObject>::cast(privates),
+ name, symbol, NONE, STRICT).Assert();
+ }
+ return *symbol;
+}
+
+
+RUNTIME_FUNCTION(Runtime_NewSymbolWrapper) {
+ HandleScope scope(isolate);
+ ASSERT(args.length() == 1);
+ CONVERT_ARG_HANDLE_CHECKED(Symbol, symbol, 0);
+ return *Object::ToObject(isolate, symbol).ToHandleChecked();
+}
+
+
+RUNTIME_FUNCTION(Runtime_SymbolDescription) {
SealHandleScope shs(isolate);
ASSERT(args.length() == 1);
CONVERT_ARG_CHECKED(Symbol, symbol, 0);
@@ -637,7 +666,14 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_SymbolName) {
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_SymbolIsPrivate) {
+RUNTIME_FUNCTION(Runtime_SymbolRegistry) {
+ HandleScope scope(isolate);
+ ASSERT(args.length() == 0);
+ return *isolate->GetSymbolRegistry();
+}
+
+
+RUNTIME_FUNCTION(Runtime_SymbolIsPrivate) {
SealHandleScope shs(isolate);
ASSERT(args.length() == 1);
CONVERT_ARG_CHECKED(Symbol, symbol, 0);
@@ -645,49 +681,47 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_SymbolIsPrivate) {
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_CreateJSProxy) {
- SealHandleScope shs(isolate);
+RUNTIME_FUNCTION(Runtime_CreateJSProxy) {
+ HandleScope scope(isolate);
ASSERT(args.length() == 2);
- CONVERT_ARG_CHECKED(JSReceiver, handler, 0);
- Object* prototype = args[1];
- Object* used_prototype =
- prototype->IsJSReceiver() ? prototype : isolate->heap()->null_value();
- return isolate->heap()->AllocateJSProxy(handler, used_prototype);
+ CONVERT_ARG_HANDLE_CHECKED(JSReceiver, handler, 0);
+ CONVERT_ARG_HANDLE_CHECKED(Object, prototype, 1);
+ if (!prototype->IsJSReceiver()) prototype = isolate->factory()->null_value();
+ return *isolate->factory()->NewJSProxy(handler, prototype);
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_CreateJSFunctionProxy) {
- SealHandleScope shs(isolate);
+RUNTIME_FUNCTION(Runtime_CreateJSFunctionProxy) {
+ HandleScope scope(isolate);
ASSERT(args.length() == 4);
- CONVERT_ARG_CHECKED(JSReceiver, handler, 0);
- Object* call_trap = args[1];
+ CONVERT_ARG_HANDLE_CHECKED(JSReceiver, handler, 0);
+ CONVERT_ARG_HANDLE_CHECKED(Object, call_trap, 1);
RUNTIME_ASSERT(call_trap->IsJSFunction() || call_trap->IsJSFunctionProxy());
- CONVERT_ARG_CHECKED(JSFunction, construct_trap, 2);
- Object* prototype = args[3];
- Object* used_prototype =
- prototype->IsJSReceiver() ? prototype : isolate->heap()->null_value();
- return isolate->heap()->AllocateJSFunctionProxy(
- handler, call_trap, construct_trap, used_prototype);
+ CONVERT_ARG_HANDLE_CHECKED(JSFunction, construct_trap, 2);
+ CONVERT_ARG_HANDLE_CHECKED(Object, prototype, 3);
+ if (!prototype->IsJSReceiver()) prototype = isolate->factory()->null_value();
+ return *isolate->factory()->NewJSFunctionProxy(
+ handler, call_trap, construct_trap, prototype);
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_IsJSProxy) {
+RUNTIME_FUNCTION(Runtime_IsJSProxy) {
SealHandleScope shs(isolate);
ASSERT(args.length() == 1);
- Object* obj = args[0];
+ CONVERT_ARG_HANDLE_CHECKED(Object, obj, 0);
return isolate->heap()->ToBoolean(obj->IsJSProxy());
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_IsJSFunctionProxy) {
+RUNTIME_FUNCTION(Runtime_IsJSFunctionProxy) {
SealHandleScope shs(isolate);
ASSERT(args.length() == 1);
- Object* obj = args[0];
+ CONVERT_ARG_HANDLE_CHECKED(Object, obj, 0);
return isolate->heap()->ToBoolean(obj->IsJSFunctionProxy());
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_GetHandler) {
+RUNTIME_FUNCTION(Runtime_GetHandler) {
SealHandleScope shs(isolate);
ASSERT(args.length() == 1);
CONVERT_ARG_CHECKED(JSProxy, proxy, 0);
@@ -695,7 +729,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_GetHandler) {
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_GetCallTrap) {
+RUNTIME_FUNCTION(Runtime_GetCallTrap) {
SealHandleScope shs(isolate);
ASSERT(args.length() == 1);
CONVERT_ARG_CHECKED(JSFunctionProxy, proxy, 0);
@@ -703,7 +737,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_GetCallTrap) {
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_GetConstructTrap) {
+RUNTIME_FUNCTION(Runtime_GetConstructTrap) {
SealHandleScope shs(isolate);
ASSERT(args.length() == 1);
CONVERT_ARG_CHECKED(JSFunctionProxy, proxy, 0);
@@ -711,7 +745,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_GetConstructTrap) {
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_Fix) {
+RUNTIME_FUNCTION(Runtime_Fix) {
HandleScope scope(isolate);
ASSERT(args.length() == 1);
CONVERT_ARG_HANDLE_CHECKED(JSProxy, proxy, 0);
@@ -731,8 +765,9 @@ void Runtime::FreeArrayBuffer(Isolate* isolate,
size_t allocated_length = NumberToSize(
isolate, phantom_array_buffer->byte_length());
- isolate->heap()->AdjustAmountOfExternalAllocatedMemory(
- -static_cast<int64_t>(allocated_length));
+ reinterpret_cast<v8::Isolate*>(isolate)
+ ->AdjustAmountOfExternalAllocatedMemory(
+ -static_cast<int64_t>(allocated_length));
CHECK(V8::ArrayBufferAllocator() != NULL);
V8::ArrayBufferAllocator()->Free(
phantom_array_buffer->backing_store(),
@@ -777,7 +812,7 @@ bool Runtime::SetupArrayBufferAllocatingData(
data = V8::ArrayBufferAllocator()->Allocate(allocated_length);
} else {
data =
- V8::ArrayBufferAllocator()->AllocateUninitialized(allocated_length);
+ V8::ArrayBufferAllocator()->AllocateUninitialized(allocated_length);
}
if (data == NULL) return false;
} else {
@@ -786,47 +821,57 @@ bool Runtime::SetupArrayBufferAllocatingData(
SetupArrayBuffer(isolate, array_buffer, false, data, allocated_length);
- isolate->heap()->AdjustAmountOfExternalAllocatedMemory(allocated_length);
+ reinterpret_cast<v8::Isolate*>(isolate)
+ ->AdjustAmountOfExternalAllocatedMemory(allocated_length);
return true;
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_ArrayBufferInitialize) {
+void Runtime::NeuterArrayBuffer(Handle<JSArrayBuffer> array_buffer) {
+ Isolate* isolate = array_buffer->GetIsolate();
+ for (Handle<Object> view_obj(array_buffer->weak_first_view(), isolate);
+ !view_obj->IsUndefined();) {
+ Handle<JSArrayBufferView> view(JSArrayBufferView::cast(*view_obj));
+ if (view->IsJSTypedArray()) {
+ JSTypedArray::cast(*view)->Neuter();
+ } else if (view->IsJSDataView()) {
+ JSDataView::cast(*view)->Neuter();
+ } else {
+ UNREACHABLE();
+ }
+ view_obj = handle(view->weak_next(), isolate);
+ }
+ array_buffer->Neuter();
+}
+
+
+RUNTIME_FUNCTION(Runtime_ArrayBufferInitialize) {
HandleScope scope(isolate);
ASSERT(args.length() == 2);
CONVERT_ARG_HANDLE_CHECKED(JSArrayBuffer, holder, 0);
- CONVERT_ARG_HANDLE_CHECKED(Object, byteLength, 1);
- size_t allocated_length;
- if (byteLength->IsSmi()) {
- allocated_length = Smi::cast(*byteLength)->value();
- } else {
- ASSERT(byteLength->IsHeapNumber());
- double value = HeapNumber::cast(*byteLength)->value();
-
- ASSERT(value >= 0);
-
- if (value > std::numeric_limits<size_t>::max()) {
- return isolate->Throw(
- *isolate->factory()->NewRangeError("invalid_array_buffer_length",
- HandleVector<Object>(NULL, 0)));
- }
-
- allocated_length = static_cast<size_t>(value);
+ CONVERT_NUMBER_ARG_HANDLE_CHECKED(byteLength, 1);
+ if (!holder->byte_length()->IsUndefined()) {
+ // ArrayBuffer is already initialized; probably a fuzz test.
+ return *holder;
+ }
+ size_t allocated_length = 0;
+ if (!TryNumberToSize(isolate, *byteLength, &allocated_length)) {
+ return isolate->Throw(
+ *isolate->factory()->NewRangeError("invalid_array_buffer_length",
+ HandleVector<Object>(NULL, 0)));
}
-
if (!Runtime::SetupArrayBufferAllocatingData(isolate,
holder, allocated_length)) {
- return isolate->Throw(*isolate->factory()->
- NewRangeError("invalid_array_buffer_length",
- HandleVector<Object>(NULL, 0)));
+ return isolate->Throw(
+ *isolate->factory()->NewRangeError("invalid_array_buffer_length",
+ HandleVector<Object>(NULL, 0)));
}
-
return *holder;
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_ArrayBufferGetByteLength) {
+RUNTIME_FUNCTION(Runtime_ArrayBufferGetByteLength) {
SealHandleScope shs(isolate);
ASSERT(args.length() == 1);
CONVERT_ARG_CHECKED(JSArrayBuffer, holder, 0);
@@ -834,18 +879,22 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_ArrayBufferGetByteLength) {
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_ArrayBufferSliceImpl) {
+RUNTIME_FUNCTION(Runtime_ArrayBufferSliceImpl) {
HandleScope scope(isolate);
ASSERT(args.length() == 3);
CONVERT_ARG_HANDLE_CHECKED(JSArrayBuffer, source, 0);
CONVERT_ARG_HANDLE_CHECKED(JSArrayBuffer, target, 1);
- CONVERT_DOUBLE_ARG_CHECKED(first, 2);
- size_t start = static_cast<size_t>(first);
+ CONVERT_NUMBER_ARG_HANDLE_CHECKED(first, 2);
+ RUNTIME_ASSERT(!source.is_identical_to(target));
+ size_t start = 0;
+ RUNTIME_ASSERT(TryNumberToSize(isolate, *first, &start));
size_t target_length = NumberToSize(isolate, target->byte_length());
if (target_length == 0) return isolate->heap()->undefined_value();
- ASSERT(NumberToSize(isolate, source->byte_length()) - target_length >= start);
+ size_t source_byte_length = NumberToSize(isolate, source->byte_length());
+ RUNTIME_ASSERT(start <= source_byte_length);
+ RUNTIME_ASSERT(source_byte_length - start >= target_length);
uint8_t* source_data = reinterpret_cast<uint8_t*>(source->backing_store());
uint8_t* target_data = reinterpret_cast<uint8_t*>(target->backing_store());
CopyBytes(target_data, source_data + start, target_length);
@@ -853,105 +902,138 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_ArrayBufferSliceImpl) {
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_ArrayBufferIsView) {
+RUNTIME_FUNCTION(Runtime_ArrayBufferIsView) {
HandleScope scope(isolate);
ASSERT(args.length() == 1);
CONVERT_ARG_CHECKED(Object, object, 0);
- return object->IsJSArrayBufferView()
- ? isolate->heap()->true_value()
- : isolate->heap()->false_value();
+ return isolate->heap()->ToBoolean(object->IsJSArrayBufferView());
+}
+
+
+RUNTIME_FUNCTION(Runtime_ArrayBufferNeuter) {
+ HandleScope scope(isolate);
+ ASSERT(args.length() == 1);
+ CONVERT_ARG_HANDLE_CHECKED(JSArrayBuffer, array_buffer, 0);
+ if (array_buffer->backing_store() == NULL) {
+ CHECK(Smi::FromInt(0) == array_buffer->byte_length());
+ return isolate->heap()->undefined_value();
+ }
+ ASSERT(!array_buffer->is_external());
+ void* backing_store = array_buffer->backing_store();
+ size_t byte_length = NumberToSize(isolate, array_buffer->byte_length());
+ array_buffer->set_is_external(true);
+ Runtime::NeuterArrayBuffer(array_buffer);
+ V8::ArrayBufferAllocator()->Free(backing_store, byte_length);
+ return isolate->heap()->undefined_value();
}
void Runtime::ArrayIdToTypeAndSize(
- int arrayId, ExternalArrayType* array_type, size_t* element_size) {
+ int arrayId,
+ ExternalArrayType* array_type,
+ ElementsKind* external_elements_kind,
+ ElementsKind* fixed_elements_kind,
+ size_t* element_size) {
switch (arrayId) {
- case ARRAY_ID_UINT8:
- *array_type = kExternalUnsignedByteArray;
- *element_size = 1;
- break;
- case ARRAY_ID_INT8:
- *array_type = kExternalByteArray;
- *element_size = 1;
- break;
- case ARRAY_ID_UINT16:
- *array_type = kExternalUnsignedShortArray;
- *element_size = 2;
- break;
- case ARRAY_ID_INT16:
- *array_type = kExternalShortArray;
- *element_size = 2;
- break;
- case ARRAY_ID_UINT32:
- *array_type = kExternalUnsignedIntArray;
- *element_size = 4;
- break;
- case ARRAY_ID_INT32:
- *array_type = kExternalIntArray;
- *element_size = 4;
- break;
- case ARRAY_ID_FLOAT32:
- *array_type = kExternalFloatArray;
- *element_size = 4;
- break;
- case ARRAY_ID_FLOAT64:
- *array_type = kExternalDoubleArray;
- *element_size = 8;
- break;
- case ARRAY_ID_UINT8C:
- *array_type = kExternalPixelArray;
- *element_size = 1;
+#define ARRAY_ID_CASE(Type, type, TYPE, ctype, size) \
+ case ARRAY_ID_##TYPE: \
+ *array_type = kExternal##Type##Array; \
+ *external_elements_kind = EXTERNAL_##TYPE##_ELEMENTS; \
+ *fixed_elements_kind = TYPE##_ELEMENTS; \
+ *element_size = size; \
break;
+
+ TYPED_ARRAYS(ARRAY_ID_CASE)
+#undef ARRAY_ID_CASE
+
default:
UNREACHABLE();
}
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_TypedArrayInitialize) {
+RUNTIME_FUNCTION(Runtime_TypedArrayInitialize) {
HandleScope scope(isolate);
ASSERT(args.length() == 5);
CONVERT_ARG_HANDLE_CHECKED(JSTypedArray, holder, 0);
CONVERT_SMI_ARG_CHECKED(arrayId, 1);
- CONVERT_ARG_HANDLE_CHECKED(JSArrayBuffer, buffer, 2);
- CONVERT_ARG_HANDLE_CHECKED(Object, byte_offset_object, 3);
- CONVERT_ARG_HANDLE_CHECKED(Object, byte_length_object, 4);
+ CONVERT_ARG_HANDLE_CHECKED(Object, maybe_buffer, 2);
+ CONVERT_NUMBER_ARG_HANDLE_CHECKED(byte_offset_object, 3);
+ CONVERT_NUMBER_ARG_HANDLE_CHECKED(byte_length_object, 4);
- ASSERT(holder->GetInternalFieldCount() ==
- v8::ArrayBufferView::kInternalFieldCount);
- for (int i = 0; i < v8::ArrayBufferView::kInternalFieldCount; i++) {
- holder->SetInternalField(i, Smi::FromInt(0));
- }
+ RUNTIME_ASSERT(arrayId >= Runtime::ARRAY_ID_FIRST &&
+ arrayId <= Runtime::ARRAY_ID_LAST);
- ExternalArrayType array_type = kExternalByteArray; // Bogus initialization.
+ ExternalArrayType array_type = kExternalInt8Array; // Bogus initialization.
size_t element_size = 1; // Bogus initialization.
- Runtime::ArrayIdToTypeAndSize(arrayId, &array_type, &element_size);
+ ElementsKind external_elements_kind =
+ EXTERNAL_INT8_ELEMENTS; // Bogus initialization.
+ ElementsKind fixed_elements_kind = INT8_ELEMENTS; // Bogus initialization.
+ Runtime::ArrayIdToTypeAndSize(arrayId,
+ &array_type,
+ &external_elements_kind,
+ &fixed_elements_kind,
+ &element_size);
+ RUNTIME_ASSERT(holder->map()->elements_kind() == fixed_elements_kind);
- holder->set_buffer(*buffer);
- holder->set_byte_offset(*byte_offset_object);
- holder->set_byte_length(*byte_length_object);
+ size_t byte_offset = 0;
+ size_t byte_length = 0;
+ RUNTIME_ASSERT(TryNumberToSize(isolate, *byte_offset_object, &byte_offset));
+ RUNTIME_ASSERT(TryNumberToSize(isolate, *byte_length_object, &byte_length));
+
+ if (maybe_buffer->IsJSArrayBuffer()) {
+ Handle<JSArrayBuffer> buffer = Handle<JSArrayBuffer>::cast(maybe_buffer);
+ size_t array_buffer_byte_length =
+ NumberToSize(isolate, buffer->byte_length());
+ RUNTIME_ASSERT(byte_offset <= array_buffer_byte_length);
+ RUNTIME_ASSERT(array_buffer_byte_length - byte_offset >= byte_length);
+ } else {
+ RUNTIME_ASSERT(maybe_buffer->IsNull());
+ }
- size_t byte_offset = NumberToSize(isolate, *byte_offset_object);
- size_t byte_length = NumberToSize(isolate, *byte_length_object);
- ASSERT(byte_length % element_size == 0);
+ RUNTIME_ASSERT(byte_length % element_size == 0);
size_t length = byte_length / element_size;
if (length > static_cast<unsigned>(Smi::kMaxValue)) {
- return isolate->Throw(*isolate->factory()->
- NewRangeError("invalid_typed_array_length",
- HandleVector<Object>(NULL, 0)));
+ return isolate->Throw(
+ *isolate->factory()->NewRangeError("invalid_typed_array_length",
+ HandleVector<Object>(NULL, 0)));
}
+ // All checks are done, now we can modify objects.
+
+ ASSERT(holder->GetInternalFieldCount() ==
+ v8::ArrayBufferView::kInternalFieldCount);
+ for (int i = 0; i < v8::ArrayBufferView::kInternalFieldCount; i++) {
+ holder->SetInternalField(i, Smi::FromInt(0));
+ }
Handle<Object> length_obj = isolate->factory()->NewNumberFromSize(length);
holder->set_length(*length_obj);
- holder->set_weak_next(buffer->weak_first_view());
- buffer->set_weak_first_view(*holder);
+ holder->set_byte_offset(*byte_offset_object);
+ holder->set_byte_length(*byte_length_object);
- Handle<ExternalArray> elements =
- isolate->factory()->NewExternalArray(
- static_cast<int>(length), array_type,
- static_cast<uint8_t*>(buffer->backing_store()) + byte_offset);
- holder->set_elements(*elements);
+ if (!maybe_buffer->IsNull()) {
+ Handle<JSArrayBuffer> buffer = Handle<JSArrayBuffer>::cast(maybe_buffer);
+ holder->set_buffer(*buffer);
+ holder->set_weak_next(buffer->weak_first_view());
+ buffer->set_weak_first_view(*holder);
+
+ Handle<ExternalArray> elements =
+ isolate->factory()->NewExternalArray(
+ static_cast<int>(length), array_type,
+ static_cast<uint8_t*>(buffer->backing_store()) + byte_offset);
+ Handle<Map> map =
+ JSObject::GetElementsTransitionMap(holder, external_elements_kind);
+ JSObject::SetMapAndElements(holder, map, elements);
+ ASSERT(IsExternalArrayElementsKind(holder->map()->elements_kind()));
+ } else {
+ holder->set_buffer(Smi::FromInt(0));
+ holder->set_weak_next(isolate->heap()->undefined_value());
+ Handle<FixedTypedArrayBase> elements =
+ isolate->factory()->NewFixedTypedArray(
+ static_cast<int>(length), array_type);
+ holder->set_elements(*elements);
+ }
return isolate->heap()->undefined_value();
}
@@ -961,30 +1043,37 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_TypedArrayInitialize) {
// initializes backing store using memove.
//
// Returns true if backing store was initialized or false otherwise.
-RUNTIME_FUNCTION(MaybeObject*, Runtime_TypedArrayInitializeFromArrayLike) {
+RUNTIME_FUNCTION(Runtime_TypedArrayInitializeFromArrayLike) {
HandleScope scope(isolate);
ASSERT(args.length() == 4);
CONVERT_ARG_HANDLE_CHECKED(JSTypedArray, holder, 0);
CONVERT_SMI_ARG_CHECKED(arrayId, 1);
CONVERT_ARG_HANDLE_CHECKED(Object, source, 2);
- CONVERT_ARG_HANDLE_CHECKED(Object, length_obj, 3);
+ CONVERT_NUMBER_ARG_HANDLE_CHECKED(length_obj, 3);
- ASSERT(holder->GetInternalFieldCount() ==
- v8::ArrayBufferView::kInternalFieldCount);
- for (int i = 0; i < v8::ArrayBufferView::kInternalFieldCount; i++) {
- holder->SetInternalField(i, Smi::FromInt(0));
- }
+ RUNTIME_ASSERT(arrayId >= Runtime::ARRAY_ID_FIRST &&
+ arrayId <= Runtime::ARRAY_ID_LAST);
- ExternalArrayType array_type = kExternalByteArray; // Bogus initialization.
+ ExternalArrayType array_type = kExternalInt8Array; // Bogus initialization.
size_t element_size = 1; // Bogus initialization.
- Runtime::ArrayIdToTypeAndSize(arrayId, &array_type, &element_size);
+ ElementsKind external_elements_kind =
+ EXTERNAL_INT8_ELEMENTS; // Bogus intialization.
+ ElementsKind fixed_elements_kind = INT8_ELEMENTS; // Bogus initialization.
+ Runtime::ArrayIdToTypeAndSize(arrayId,
+ &array_type,
+ &external_elements_kind,
+ &fixed_elements_kind,
+ &element_size);
+
+ RUNTIME_ASSERT(holder->map()->elements_kind() == fixed_elements_kind);
Handle<JSArrayBuffer> buffer = isolate->factory()->NewJSArrayBuffer();
if (source->IsJSTypedArray() &&
JSTypedArray::cast(*source)->type() == array_type) {
length_obj = Handle<Object>(JSTypedArray::cast(*source)->length(), isolate);
}
- size_t length = NumberToSize(isolate, *length_obj);
+ size_t length = 0;
+ RUNTIME_ASSERT(TryNumberToSize(isolate, *length_obj, &length));
if ((length > static_cast<unsigned>(Smi::kMaxValue)) ||
(length > (kMaxInt / element_size))) {
@@ -994,6 +1083,12 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_TypedArrayInitializeFromArrayLike) {
}
size_t byte_length = length * element_size;
+ ASSERT(holder->GetInternalFieldCount() ==
+ v8::ArrayBufferView::kInternalFieldCount);
+ for (int i = 0; i < v8::ArrayBufferView::kInternalFieldCount; i++) {
+ holder->SetInternalField(i, Smi::FromInt(0));
+ }
+
// NOTE: not initializing backing store.
// We assume that the caller of this function will initialize holder
// with the loop
@@ -1030,7 +1125,9 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_TypedArrayInitializeFromArrayLike) {
isolate->factory()->NewExternalArray(
static_cast<int>(length), array_type,
static_cast<uint8_t*>(buffer->backing_store()));
- holder->set_elements(*elements);
+ Handle<Map> map = JSObject::GetElementsTransitionMap(
+ holder, external_elements_kind);
+ JSObject::SetMapAndElements(holder, map, elements);
if (source->IsJSTypedArray()) {
Handle<JSTypedArray> typed_array(JSTypedArray::cast(*source));
@@ -1038,41 +1135,43 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_TypedArrayInitializeFromArrayLike) {
if (typed_array->type() == holder->type()) {
uint8_t* backing_store =
static_cast<uint8_t*>(
- JSArrayBuffer::cast(typed_array->buffer())->backing_store());
+ typed_array->GetBuffer()->backing_store());
size_t source_byte_offset =
NumberToSize(isolate, typed_array->byte_offset());
memcpy(
buffer->backing_store(),
backing_store + source_byte_offset,
byte_length);
- return *isolate->factory()->true_value();
- } else {
- return *isolate->factory()->false_value();
+ return isolate->heap()->true_value();
}
}
- return *isolate->factory()->false_value();
+ return isolate->heap()->false_value();
}
-#define TYPED_ARRAY_GETTER(getter, accessor) \
- RUNTIME_FUNCTION(MaybeObject*, Runtime_TypedArrayGet##getter) { \
+#define BUFFER_VIEW_GETTER(Type, getter, accessor) \
+ RUNTIME_FUNCTION(Runtime_##Type##Get##getter) { \
HandleScope scope(isolate); \
ASSERT(args.length() == 1); \
- CONVERT_ARG_HANDLE_CHECKED(Object, holder, 0); \
- if (!holder->IsJSTypedArray()) \
- return isolate->Throw(*isolate->factory()->NewTypeError( \
- "not_typed_array", HandleVector<Object>(NULL, 0))); \
- Handle<JSTypedArray> typed_array(JSTypedArray::cast(*holder)); \
- return typed_array->accessor(); \
+ CONVERT_ARG_HANDLE_CHECKED(JS##Type, holder, 0); \
+ return holder->accessor(); \
}
-TYPED_ARRAY_GETTER(Buffer, buffer)
-TYPED_ARRAY_GETTER(ByteLength, byte_length)
-TYPED_ARRAY_GETTER(ByteOffset, byte_offset)
-TYPED_ARRAY_GETTER(Length, length)
+BUFFER_VIEW_GETTER(ArrayBufferView, ByteLength, byte_length)
+BUFFER_VIEW_GETTER(ArrayBufferView, ByteOffset, byte_offset)
+BUFFER_VIEW_GETTER(TypedArray, Length, length)
+BUFFER_VIEW_GETTER(DataView, Buffer, buffer)
+
+#undef BUFFER_VIEW_GETTER
+
+RUNTIME_FUNCTION(Runtime_TypedArrayGetBuffer) {
+ HandleScope scope(isolate);
+ ASSERT(args.length() == 1);
+ CONVERT_ARG_HANDLE_CHECKED(JSTypedArray, holder, 0);
+ return *holder->GetBuffer();
+}
-#undef TYPED_ARRAY_GETTER
// Return codes for Runtime_TypedArraySetFastCases.
// Should be synchronized with typedarray.js natives.
@@ -1089,22 +1188,24 @@ enum TypedArraySetResultCodes {
};
-RUNTIME_FUNCTION(MaybeObject*, Runtime_TypedArraySetFastCases) {
+RUNTIME_FUNCTION(Runtime_TypedArraySetFastCases) {
HandleScope scope(isolate);
- CONVERT_ARG_HANDLE_CHECKED(Object, target_obj, 0);
- CONVERT_ARG_HANDLE_CHECKED(Object, source_obj, 1);
- CONVERT_ARG_HANDLE_CHECKED(Object, offset_obj, 2);
-
- if (!target_obj->IsJSTypedArray())
+ ASSERT(args.length() == 3);
+ if (!args[0]->IsJSTypedArray())
return isolate->Throw(*isolate->factory()->NewTypeError(
"not_typed_array", HandleVector<Object>(NULL, 0)));
- if (!source_obj->IsJSTypedArray())
+ if (!args[1]->IsJSTypedArray())
return Smi::FromInt(TYPED_ARRAY_SET_NON_TYPED_ARRAY);
+ CONVERT_ARG_HANDLE_CHECKED(JSTypedArray, target_obj, 0);
+ CONVERT_ARG_HANDLE_CHECKED(JSTypedArray, source_obj, 1);
+ CONVERT_NUMBER_ARG_HANDLE_CHECKED(offset_obj, 2);
+
Handle<JSTypedArray> target(JSTypedArray::cast(*target_obj));
Handle<JSTypedArray> source(JSTypedArray::cast(*source_obj));
- size_t offset = NumberToSize(isolate, *offset_obj);
+ size_t offset = 0;
+ RUNTIME_ASSERT(TryNumberToSize(isolate, *offset_obj, &offset));
size_t target_length = NumberToSize(isolate, target->length());
size_t source_length = NumberToSize(isolate, source->length());
size_t target_byte_length = NumberToSize(isolate, target->byte_length());
@@ -1119,10 +1220,10 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_TypedArraySetFastCases) {
size_t source_offset = NumberToSize(isolate, source->byte_offset());
uint8_t* target_base =
static_cast<uint8_t*>(
- JSArrayBuffer::cast(target->buffer())->backing_store()) + target_offset;
+ target->GetBuffer()->backing_store()) + target_offset;
uint8_t* source_base =
static_cast<uint8_t*>(
- JSArrayBuffer::cast(source->buffer())->backing_store()) + source_offset;
+ source->GetBuffer()->backing_store()) + source_offset;
// Typed arrays of the same type: use memmove.
if (target->type() == source->type()) {
@@ -1138,8 +1239,8 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_TypedArraySetFastCases) {
target_base + target_byte_length > source_base)) {
// We do not support overlapping ArrayBuffers
ASSERT(
- JSArrayBuffer::cast(target->buffer())->backing_store() ==
- JSArrayBuffer::cast(source->buffer())->backing_store());
+ target->GetBuffer()->backing_store() ==
+ source->GetBuffer()->backing_store());
return Smi::FromInt(TYPED_ARRAY_SET_TYPED_ARRAY_OVERLAPPING);
} else { // Non-overlapping typed arrays
return Smi::FromInt(TYPED_ARRAY_SET_TYPED_ARRAY_NONOVERLAPPING);
@@ -1147,28 +1248,44 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_TypedArraySetFastCases) {
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_DataViewInitialize) {
+RUNTIME_FUNCTION(Runtime_TypedArrayMaxSizeInHeap) {
+ ASSERT(args.length() == 0);
+ ASSERT_OBJECT_SIZE(
+ FLAG_typed_array_max_size_in_heap + FixedTypedArrayBase::kDataOffset);
+ return Smi::FromInt(FLAG_typed_array_max_size_in_heap);
+}
+
+
+RUNTIME_FUNCTION(Runtime_DataViewInitialize) {
HandleScope scope(isolate);
ASSERT(args.length() == 4);
CONVERT_ARG_HANDLE_CHECKED(JSDataView, holder, 0);
CONVERT_ARG_HANDLE_CHECKED(JSArrayBuffer, buffer, 1);
- CONVERT_ARG_HANDLE_CHECKED(Object, byte_offset, 2);
- CONVERT_ARG_HANDLE_CHECKED(Object, byte_length, 3);
+ CONVERT_NUMBER_ARG_HANDLE_CHECKED(byte_offset, 2);
+ CONVERT_NUMBER_ARG_HANDLE_CHECKED(byte_length, 3);
ASSERT(holder->GetInternalFieldCount() ==
v8::ArrayBufferView::kInternalFieldCount);
for (int i = 0; i < v8::ArrayBufferView::kInternalFieldCount; i++) {
holder->SetInternalField(i, Smi::FromInt(0));
}
+ size_t buffer_length = 0;
+ size_t offset = 0;
+ size_t length = 0;
+ RUNTIME_ASSERT(
+ TryNumberToSize(isolate, buffer->byte_length(), &buffer_length));
+ RUNTIME_ASSERT(TryNumberToSize(isolate, *byte_offset, &offset));
+ RUNTIME_ASSERT(TryNumberToSize(isolate, *byte_length, &length));
+
+ // TODO(jkummerow): When we have a "safe numerics" helper class, use it here.
+ // Entire range [offset, offset + length] must be in bounds.
+ RUNTIME_ASSERT(offset <= buffer_length);
+ RUNTIME_ASSERT(offset + length <= buffer_length);
+ // No overflow.
+ RUNTIME_ASSERT(offset + length >= offset);
holder->set_buffer(*buffer);
- ASSERT(byte_offset->IsNumber());
- ASSERT(
- NumberToSize(isolate, buffer->byte_length()) >=
- NumberToSize(isolate, *byte_offset)
- + NumberToSize(isolate, *byte_length));
holder->set_byte_offset(*byte_offset);
- ASSERT(byte_length->IsNumber());
holder->set_byte_length(*byte_length);
holder->set_weak_next(buffer->weak_first_view());
@@ -1178,30 +1295,6 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DataViewInitialize) {
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_DataViewGetBuffer) {
- HandleScope scope(isolate);
- ASSERT(args.length() == 1);
- CONVERT_ARG_HANDLE_CHECKED(JSDataView, data_view, 0);
- return data_view->buffer();
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_DataViewGetByteOffset) {
- HandleScope scope(isolate);
- ASSERT(args.length() == 1);
- CONVERT_ARG_HANDLE_CHECKED(JSDataView, data_view, 0);
- return data_view->byte_offset();
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_DataViewGetByteLength) {
- HandleScope scope(isolate);
- ASSERT(args.length() == 1);
- CONVERT_ARG_HANDLE_CHECKED(JSDataView, data_view, 0);
- return data_view->byte_length();
-}
-
-
inline static bool NeedToFlipBytes(bool is_little_endian) {
#ifdef V8_TARGET_LITTLE_ENDIAN
return !is_little_endian;
@@ -1317,16 +1410,16 @@ static bool DataViewSetValue(
#define DATA_VIEW_GETTER(TypeName, Type, Converter) \
- RUNTIME_FUNCTION(MaybeObject*, Runtime_DataViewGet##TypeName) { \
+ RUNTIME_FUNCTION(Runtime_DataViewGet##TypeName) { \
HandleScope scope(isolate); \
ASSERT(args.length() == 3); \
CONVERT_ARG_HANDLE_CHECKED(JSDataView, holder, 0); \
- CONVERT_ARG_HANDLE_CHECKED(Object, offset, 1); \
+ CONVERT_NUMBER_ARG_HANDLE_CHECKED(offset, 1); \
CONVERT_BOOLEAN_ARG_CHECKED(is_little_endian, 2); \
Type result; \
if (DataViewGetValue( \
isolate, holder, offset, is_little_endian, &result)) { \
- return isolate->heap()->Converter(result); \
+ return *isolate->factory()->Converter(result); \
} else { \
return isolate->Throw(*isolate->factory()->NewRangeError( \
"invalid_data_view_accessor_offset", \
@@ -1334,14 +1427,14 @@ static bool DataViewSetValue(
} \
}
-DATA_VIEW_GETTER(Uint8, uint8_t, NumberFromUint32)
-DATA_VIEW_GETTER(Int8, int8_t, NumberFromInt32)
-DATA_VIEW_GETTER(Uint16, uint16_t, NumberFromUint32)
-DATA_VIEW_GETTER(Int16, int16_t, NumberFromInt32)
-DATA_VIEW_GETTER(Uint32, uint32_t, NumberFromUint32)
-DATA_VIEW_GETTER(Int32, int32_t, NumberFromInt32)
-DATA_VIEW_GETTER(Float32, float, NumberFromDouble)
-DATA_VIEW_GETTER(Float64, double, NumberFromDouble)
+DATA_VIEW_GETTER(Uint8, uint8_t, NewNumberFromUint)
+DATA_VIEW_GETTER(Int8, int8_t, NewNumberFromInt)
+DATA_VIEW_GETTER(Uint16, uint16_t, NewNumberFromUint)
+DATA_VIEW_GETTER(Int16, int16_t, NewNumberFromInt)
+DATA_VIEW_GETTER(Uint32, uint32_t, NewNumberFromUint)
+DATA_VIEW_GETTER(Int32, int32_t, NewNumberFromInt)
+DATA_VIEW_GETTER(Float32, float, NewNumber)
+DATA_VIEW_GETTER(Float64, double, NewNumber)
#undef DATA_VIEW_GETTER
@@ -1399,12 +1492,12 @@ double DataViewConvertValue<double>(double value) {
#define DATA_VIEW_SETTER(TypeName, Type) \
- RUNTIME_FUNCTION(MaybeObject*, Runtime_DataViewSet##TypeName) { \
+ RUNTIME_FUNCTION(Runtime_DataViewSet##TypeName) { \
HandleScope scope(isolate); \
ASSERT(args.length() == 4); \
CONVERT_ARG_HANDLE_CHECKED(JSDataView, holder, 0); \
- CONVERT_ARG_HANDLE_CHECKED(Object, offset, 1); \
- CONVERT_ARG_HANDLE_CHECKED(Object, value, 2); \
+ CONVERT_NUMBER_ARG_HANDLE_CHECKED(offset, 1); \
+ CONVERT_NUMBER_ARG_HANDLE_CHECKED(value, 2); \
CONVERT_BOOLEAN_ARG_CHECKED(is_little_endian, 3); \
Type v = DataViewConvertValue<Type>(value->Number()); \
if (DataViewSetValue( \
@@ -1429,208 +1522,288 @@ DATA_VIEW_SETTER(Float64, double)
#undef DATA_VIEW_SETTER
-RUNTIME_FUNCTION(MaybeObject*, Runtime_SetInitialize) {
+RUNTIME_FUNCTION(Runtime_SetInitialize) {
HandleScope scope(isolate);
ASSERT(args.length() == 1);
CONVERT_ARG_HANDLE_CHECKED(JSSet, holder, 0);
- Handle<ObjectHashSet> table = isolate->factory()->NewObjectHashSet(0);
+ Handle<OrderedHashSet> table = isolate->factory()->NewOrderedHashSet();
holder->set_table(*table);
return *holder;
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_SetAdd) {
+RUNTIME_FUNCTION(Runtime_SetAdd) {
HandleScope scope(isolate);
ASSERT(args.length() == 2);
CONVERT_ARG_HANDLE_CHECKED(JSSet, holder, 0);
- Handle<Object> key(args[1], isolate);
- Handle<ObjectHashSet> table(ObjectHashSet::cast(holder->table()));
- table = ObjectHashSet::Add(table, key);
+ CONVERT_ARG_HANDLE_CHECKED(Object, key, 1);
+ Handle<OrderedHashSet> table(OrderedHashSet::cast(holder->table()));
+ table = OrderedHashSet::Add(table, key);
holder->set_table(*table);
return isolate->heap()->undefined_value();
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_SetHas) {
+RUNTIME_FUNCTION(Runtime_SetHas) {
HandleScope scope(isolate);
ASSERT(args.length() == 2);
CONVERT_ARG_HANDLE_CHECKED(JSSet, holder, 0);
- Handle<Object> key(args[1], isolate);
- Handle<ObjectHashSet> table(ObjectHashSet::cast(holder->table()));
- return isolate->heap()->ToBoolean(table->Contains(*key));
+ CONVERT_ARG_HANDLE_CHECKED(Object, key, 1);
+ Handle<OrderedHashSet> table(OrderedHashSet::cast(holder->table()));
+ return isolate->heap()->ToBoolean(table->Contains(key));
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_SetDelete) {
+RUNTIME_FUNCTION(Runtime_SetDelete) {
HandleScope scope(isolate);
ASSERT(args.length() == 2);
CONVERT_ARG_HANDLE_CHECKED(JSSet, holder, 0);
- Handle<Object> key(args[1], isolate);
- Handle<ObjectHashSet> table(ObjectHashSet::cast(holder->table()));
- table = ObjectHashSet::Remove(table, key);
+ CONVERT_ARG_HANDLE_CHECKED(Object, key, 1);
+ Handle<OrderedHashSet> table(OrderedHashSet::cast(holder->table()));
+ bool was_present = false;
+ table = OrderedHashSet::Remove(table, key, &was_present);
+ holder->set_table(*table);
+ return isolate->heap()->ToBoolean(was_present);
+}
+
+
+RUNTIME_FUNCTION(Runtime_SetClear) {
+ HandleScope scope(isolate);
+ ASSERT(args.length() == 1);
+ CONVERT_ARG_HANDLE_CHECKED(JSSet, holder, 0);
+ Handle<OrderedHashSet> table(OrderedHashSet::cast(holder->table()));
+ table = OrderedHashSet::Clear(table);
holder->set_table(*table);
return isolate->heap()->undefined_value();
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_SetGetSize) {
+RUNTIME_FUNCTION(Runtime_SetGetSize) {
HandleScope scope(isolate);
ASSERT(args.length() == 1);
CONVERT_ARG_HANDLE_CHECKED(JSSet, holder, 0);
- Handle<ObjectHashSet> table(ObjectHashSet::cast(holder->table()));
+ Handle<OrderedHashSet> table(OrderedHashSet::cast(holder->table()));
return Smi::FromInt(table->NumberOfElements());
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_MapInitialize) {
+RUNTIME_FUNCTION(Runtime_SetIteratorInitialize) {
+ HandleScope scope(isolate);
+ ASSERT(args.length() == 3);
+ CONVERT_ARG_HANDLE_CHECKED(JSSetIterator, holder, 0);
+ CONVERT_ARG_HANDLE_CHECKED(JSSet, set, 1);
+ CONVERT_SMI_ARG_CHECKED(kind, 2)
+ RUNTIME_ASSERT(kind == JSSetIterator::kKindValues ||
+ kind == JSSetIterator::kKindEntries);
+ Handle<OrderedHashSet> table(OrderedHashSet::cast(set->table()));
+ holder->set_table(*table);
+ holder->set_index(Smi::FromInt(0));
+ holder->set_kind(Smi::FromInt(kind));
+ return isolate->heap()->undefined_value();
+}
+
+
+RUNTIME_FUNCTION(Runtime_SetIteratorNext) {
+ HandleScope scope(isolate);
+ ASSERT(args.length() == 1);
+ CONVERT_ARG_HANDLE_CHECKED(JSSetIterator, holder, 0);
+ return *JSSetIterator::Next(holder);
+}
+
+
+RUNTIME_FUNCTION(Runtime_MapInitialize) {
HandleScope scope(isolate);
ASSERT(args.length() == 1);
CONVERT_ARG_HANDLE_CHECKED(JSMap, holder, 0);
- Handle<ObjectHashTable> table = isolate->factory()->NewObjectHashTable(0);
+ Handle<OrderedHashMap> table = isolate->factory()->NewOrderedHashMap();
holder->set_table(*table);
return *holder;
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_MapGet) {
+RUNTIME_FUNCTION(Runtime_MapGet) {
HandleScope scope(isolate);
ASSERT(args.length() == 2);
CONVERT_ARG_HANDLE_CHECKED(JSMap, holder, 0);
CONVERT_ARG_HANDLE_CHECKED(Object, key, 1);
- Handle<ObjectHashTable> table(ObjectHashTable::cast(holder->table()));
- Handle<Object> lookup(table->Lookup(*key), isolate);
+ Handle<OrderedHashMap> table(OrderedHashMap::cast(holder->table()));
+ Handle<Object> lookup(table->Lookup(key), isolate);
return lookup->IsTheHole() ? isolate->heap()->undefined_value() : *lookup;
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_MapHas) {
+RUNTIME_FUNCTION(Runtime_MapHas) {
HandleScope scope(isolate);
ASSERT(args.length() == 2);
CONVERT_ARG_HANDLE_CHECKED(JSMap, holder, 0);
CONVERT_ARG_HANDLE_CHECKED(Object, key, 1);
- Handle<ObjectHashTable> table(ObjectHashTable::cast(holder->table()));
- Handle<Object> lookup(table->Lookup(*key), isolate);
+ Handle<OrderedHashMap> table(OrderedHashMap::cast(holder->table()));
+ Handle<Object> lookup(table->Lookup(key), isolate);
return isolate->heap()->ToBoolean(!lookup->IsTheHole());
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_MapDelete) {
+RUNTIME_FUNCTION(Runtime_MapDelete) {
HandleScope scope(isolate);
ASSERT(args.length() == 2);
CONVERT_ARG_HANDLE_CHECKED(JSMap, holder, 0);
CONVERT_ARG_HANDLE_CHECKED(Object, key, 1);
- Handle<ObjectHashTable> table(ObjectHashTable::cast(holder->table()));
- Handle<Object> lookup(table->Lookup(*key), isolate);
- Handle<ObjectHashTable> new_table =
- ObjectHashTable::Put(table, key, isolate->factory()->the_hole_value());
+ Handle<OrderedHashMap> table(OrderedHashMap::cast(holder->table()));
+ bool was_present = false;
+ Handle<OrderedHashMap> new_table =
+ OrderedHashMap::Remove(table, key, &was_present);
holder->set_table(*new_table);
- return isolate->heap()->ToBoolean(!lookup->IsTheHole());
+ return isolate->heap()->ToBoolean(was_present);
+}
+
+
+RUNTIME_FUNCTION(Runtime_MapClear) {
+ HandleScope scope(isolate);
+ ASSERT(args.length() == 1);
+ CONVERT_ARG_HANDLE_CHECKED(JSMap, holder, 0);
+ Handle<OrderedHashMap> table(OrderedHashMap::cast(holder->table()));
+ table = OrderedHashMap::Clear(table);
+ holder->set_table(*table);
+ return isolate->heap()->undefined_value();
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_MapSet) {
+RUNTIME_FUNCTION(Runtime_MapSet) {
HandleScope scope(isolate);
ASSERT(args.length() == 3);
CONVERT_ARG_HANDLE_CHECKED(JSMap, holder, 0);
CONVERT_ARG_HANDLE_CHECKED(Object, key, 1);
CONVERT_ARG_HANDLE_CHECKED(Object, value, 2);
- Handle<ObjectHashTable> table(ObjectHashTable::cast(holder->table()));
- Handle<ObjectHashTable> new_table = ObjectHashTable::Put(table, key, value);
+ Handle<OrderedHashMap> table(OrderedHashMap::cast(holder->table()));
+ Handle<OrderedHashMap> new_table = OrderedHashMap::Put(table, key, value);
holder->set_table(*new_table);
return isolate->heap()->undefined_value();
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_MapGetSize) {
+RUNTIME_FUNCTION(Runtime_MapGetSize) {
HandleScope scope(isolate);
ASSERT(args.length() == 1);
CONVERT_ARG_HANDLE_CHECKED(JSMap, holder, 0);
- Handle<ObjectHashTable> table(ObjectHashTable::cast(holder->table()));
+ Handle<OrderedHashMap> table(OrderedHashMap::cast(holder->table()));
return Smi::FromInt(table->NumberOfElements());
}
-static JSWeakCollection* WeakCollectionInitialize(Isolate* isolate,
+RUNTIME_FUNCTION(Runtime_MapIteratorInitialize) {
+ HandleScope scope(isolate);
+ ASSERT(args.length() == 3);
+ CONVERT_ARG_HANDLE_CHECKED(JSMapIterator, holder, 0);
+ CONVERT_ARG_HANDLE_CHECKED(JSMap, map, 1);
+ CONVERT_SMI_ARG_CHECKED(kind, 2)
+ RUNTIME_ASSERT(kind == JSMapIterator::kKindKeys
+ || kind == JSMapIterator::kKindValues
+ || kind == JSMapIterator::kKindEntries);
+ Handle<OrderedHashMap> table(OrderedHashMap::cast(map->table()));
+ holder->set_table(*table);
+ holder->set_index(Smi::FromInt(0));
+ holder->set_kind(Smi::FromInt(kind));
+ return isolate->heap()->undefined_value();
+}
+
+
+RUNTIME_FUNCTION(Runtime_MapIteratorNext) {
+ HandleScope scope(isolate);
+ ASSERT(args.length() == 1);
+ CONVERT_ARG_HANDLE_CHECKED(JSMapIterator, holder, 0);
+ return *JSMapIterator::Next(holder);
+}
+
+
+static Handle<JSWeakCollection> WeakCollectionInitialize(
+ Isolate* isolate,
Handle<JSWeakCollection> weak_collection) {
ASSERT(weak_collection->map()->inobject_properties() == 0);
- Handle<ObjectHashTable> table = isolate->factory()->NewObjectHashTable(0);
+ Handle<ObjectHashTable> table = ObjectHashTable::New(isolate, 0);
weak_collection->set_table(*table);
- weak_collection->set_next(Smi::FromInt(0));
- return *weak_collection;
+ return weak_collection;
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_WeakCollectionInitialize) {
+RUNTIME_FUNCTION(Runtime_WeakCollectionInitialize) {
HandleScope scope(isolate);
ASSERT(args.length() == 1);
CONVERT_ARG_HANDLE_CHECKED(JSWeakCollection, weak_collection, 0);
- return WeakCollectionInitialize(isolate, weak_collection);
+ return *WeakCollectionInitialize(isolate, weak_collection);
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_WeakCollectionGet) {
+RUNTIME_FUNCTION(Runtime_WeakCollectionGet) {
HandleScope scope(isolate);
ASSERT(args.length() == 2);
CONVERT_ARG_HANDLE_CHECKED(JSWeakCollection, weak_collection, 0);
CONVERT_ARG_HANDLE_CHECKED(Object, key, 1);
+ RUNTIME_ASSERT(key->IsJSReceiver() || key->IsSymbol());
Handle<ObjectHashTable> table(
ObjectHashTable::cast(weak_collection->table()));
- Handle<Object> lookup(table->Lookup(*key), isolate);
+ RUNTIME_ASSERT(table->IsKey(*key));
+ Handle<Object> lookup(table->Lookup(key), isolate);
return lookup->IsTheHole() ? isolate->heap()->undefined_value() : *lookup;
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_WeakCollectionHas) {
+RUNTIME_FUNCTION(Runtime_WeakCollectionHas) {
HandleScope scope(isolate);
ASSERT(args.length() == 2);
CONVERT_ARG_HANDLE_CHECKED(JSWeakCollection, weak_collection, 0);
CONVERT_ARG_HANDLE_CHECKED(Object, key, 1);
+ RUNTIME_ASSERT(key->IsJSReceiver() || key->IsSymbol());
Handle<ObjectHashTable> table(
ObjectHashTable::cast(weak_collection->table()));
- Handle<Object> lookup(table->Lookup(*key), isolate);
+ RUNTIME_ASSERT(table->IsKey(*key));
+ Handle<Object> lookup(table->Lookup(key), isolate);
return isolate->heap()->ToBoolean(!lookup->IsTheHole());
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_WeakCollectionDelete) {
+RUNTIME_FUNCTION(Runtime_WeakCollectionDelete) {
HandleScope scope(isolate);
ASSERT(args.length() == 2);
CONVERT_ARG_HANDLE_CHECKED(JSWeakCollection, weak_collection, 0);
CONVERT_ARG_HANDLE_CHECKED(Object, key, 1);
+ RUNTIME_ASSERT(key->IsJSReceiver() || key->IsSymbol());
Handle<ObjectHashTable> table(ObjectHashTable::cast(
weak_collection->table()));
- Handle<Object> lookup(table->Lookup(*key), isolate);
+ RUNTIME_ASSERT(table->IsKey(*key));
+ bool was_present = false;
Handle<ObjectHashTable> new_table =
- ObjectHashTable::Put(table, key, isolate->factory()->the_hole_value());
+ ObjectHashTable::Remove(table, key, &was_present);
weak_collection->set_table(*new_table);
- return isolate->heap()->ToBoolean(!lookup->IsTheHole());
+ return isolate->heap()->ToBoolean(was_present);
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_WeakCollectionSet) {
+RUNTIME_FUNCTION(Runtime_WeakCollectionSet) {
HandleScope scope(isolate);
ASSERT(args.length() == 3);
CONVERT_ARG_HANDLE_CHECKED(JSWeakCollection, weak_collection, 0);
CONVERT_ARG_HANDLE_CHECKED(Object, key, 1);
- Handle<Object> value(args[2], isolate);
+ RUNTIME_ASSERT(key->IsJSReceiver() || key->IsSymbol());
+ CONVERT_ARG_HANDLE_CHECKED(Object, value, 2);
Handle<ObjectHashTable> table(
ObjectHashTable::cast(weak_collection->table()));
+ RUNTIME_ASSERT(table->IsKey(*key));
Handle<ObjectHashTable> new_table = ObjectHashTable::Put(table, key, value);
weak_collection->set_table(*new_table);
return isolate->heap()->undefined_value();
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_ClassOf) {
+RUNTIME_FUNCTION(Runtime_ClassOf) {
SealHandleScope shs(isolate);
ASSERT(args.length() == 1);
- Object* obj = args[0];
+ CONVERT_ARG_CHECKED(Object, obj, 0);
if (!obj->IsJSObject()) return isolate->heap()->null_value();
return JSObject::cast(obj)->class_name();
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_GetPrototype) {
+RUNTIME_FUNCTION(Runtime_GetPrototype) {
HandleScope scope(isolate);
ASSERT(args.length() == 1);
CONVERT_ARG_HANDLE_CHECKED(Object, obj, 0);
@@ -1638,45 +1811,52 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_GetPrototype) {
ASSERT(!obj->IsAccessCheckNeeded() || obj->IsJSObject());
do {
if (obj->IsAccessCheckNeeded() &&
- !isolate->MayNamedAccessWrapper(Handle<JSObject>::cast(obj),
- isolate->factory()->proto_string(),
- v8::ACCESS_GET)) {
- isolate->ReportFailedAccessCheck(JSObject::cast(*obj), v8::ACCESS_GET);
- RETURN_IF_SCHEDULED_EXCEPTION(isolate);
+ !isolate->MayNamedAccess(Handle<JSObject>::cast(obj),
+ isolate->factory()->proto_string(),
+ v8::ACCESS_GET)) {
+ isolate->ReportFailedAccessCheck(Handle<JSObject>::cast(obj),
+ v8::ACCESS_GET);
+ RETURN_FAILURE_IF_SCHEDULED_EXCEPTION(isolate);
return isolate->heap()->undefined_value();
}
- obj = handle(obj->GetPrototype(isolate), isolate);
+ obj = Object::GetPrototype(isolate, obj);
} while (obj->IsJSObject() &&
JSObject::cast(*obj)->map()->is_hidden_prototype());
return *obj;
}
-static inline Object* GetPrototypeSkipHiddenPrototypes(Isolate* isolate,
- Object* receiver) {
- Object* current = receiver->GetPrototype(isolate);
+static inline Handle<Object> GetPrototypeSkipHiddenPrototypes(
+ Isolate* isolate, Handle<Object> receiver) {
+ Handle<Object> current = Object::GetPrototype(isolate, receiver);
while (current->IsJSObject() &&
- JSObject::cast(current)->map()->is_hidden_prototype()) {
- current = current->GetPrototype(isolate);
+ JSObject::cast(*current)->map()->is_hidden_prototype()) {
+ current = Object::GetPrototype(isolate, current);
}
return current;
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_SetPrototype) {
+RUNTIME_FUNCTION(Runtime_SetPrototype) {
HandleScope scope(isolate);
ASSERT(args.length() == 2);
CONVERT_ARG_HANDLE_CHECKED(JSObject, obj, 0);
CONVERT_ARG_HANDLE_CHECKED(Object, prototype, 1);
- if (FLAG_harmony_observation && obj->map()->is_observed()) {
- Handle<Object> old_value(
- GetPrototypeSkipHiddenPrototypes(isolate, *obj), isolate);
-
- Handle<Object> result = JSObject::SetPrototype(obj, prototype, true);
- RETURN_IF_EMPTY_HANDLE(isolate, result);
+ if (obj->IsAccessCheckNeeded() &&
+ !isolate->MayNamedAccess(
+ obj, isolate->factory()->proto_string(), v8::ACCESS_SET)) {
+ isolate->ReportFailedAccessCheck(obj, v8::ACCESS_SET);
+ RETURN_FAILURE_IF_SCHEDULED_EXCEPTION(isolate);
+ return isolate->heap()->undefined_value();
+ }
+ if (obj->map()->is_observed()) {
+ Handle<Object> old_value = GetPrototypeSkipHiddenPrototypes(isolate, obj);
+ Handle<Object> result;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, result,
+ JSObject::SetPrototype(obj, prototype, true));
- Handle<Object> new_value(
- GetPrototypeSkipHiddenPrototypes(isolate, *obj), isolate);
+ Handle<Object> new_value = GetPrototypeSkipHiddenPrototypes(isolate, obj);
if (!new_value->SameValue(*old_value)) {
JSObject::EnqueueChangeRecord(obj, "setPrototype",
isolate->factory()->proto_string(),
@@ -1684,22 +1864,24 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_SetPrototype) {
}
return *result;
}
- Handle<Object> result = JSObject::SetPrototype(obj, prototype, true);
- RETURN_IF_EMPTY_HANDLE(isolate, result);
+ Handle<Object> result;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, result,
+ JSObject::SetPrototype(obj, prototype, true));
return *result;
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_IsInPrototypeChain) {
- SealHandleScope shs(isolate);
+RUNTIME_FUNCTION(Runtime_IsInPrototypeChain) {
+ HandleScope shs(isolate);
ASSERT(args.length() == 2);
// See ECMA-262, section 15.3.5.3, page 88 (steps 5 - 8).
- Object* O = args[0];
- Object* V = args[1];
+ CONVERT_ARG_HANDLE_CHECKED(Object, O, 0);
+ CONVERT_ARG_HANDLE_CHECKED(Object, V, 1);
while (true) {
- Object* prototype = V->GetPrototype(isolate);
+ Handle<Object> prototype = Object::GetPrototype(isolate, V);
if (prototype->IsNull()) return isolate->heap()->false_value();
- if (O == prototype) return isolate->heap()->true_value();
+ if (*O == *prototype) return isolate->heap()->true_value();
V = prototype;
}
}
@@ -1708,6 +1890,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_IsInPrototypeChain) {
static bool CheckAccessException(Object* callback,
v8::AccessType access_type) {
DisallowHeapAllocation no_gc;
+ ASSERT(!callback->IsForeign());
if (callback->IsAccessorInfo()) {
AccessorInfo* info = AccessorInfo::cast(callback);
return
@@ -1763,22 +1946,22 @@ static AccessCheckResult CheckPropertyAccess(Handle<JSObject> obj,
if (name->AsArrayIndex(&index)) {
// TODO(1095): we should traverse hidden prototype hierachy as well.
if (CheckGenericAccess(
- obj, obj, index, access_type, &Isolate::MayIndexedAccessWrapper)) {
+ obj, obj, index, access_type, &Isolate::MayIndexedAccess)) {
return ACCESS_ALLOWED;
}
- obj->GetIsolate()->ReportFailedAccessCheck(*obj, access_type);
+ obj->GetIsolate()->ReportFailedAccessCheck(obj, access_type);
return ACCESS_FORBIDDEN;
}
Isolate* isolate = obj->GetIsolate();
LookupResult lookup(isolate);
- obj->LocalLookup(*name, &lookup, true);
+ obj->LookupOwn(name, &lookup, true);
if (!lookup.IsProperty()) return ACCESS_ABSENT;
Handle<JSObject> holder(lookup.holder(), isolate);
if (CheckGenericAccess<Handle<Object> >(
- obj, holder, name, access_type, &Isolate::MayNamedAccessWrapper)) {
+ obj, holder, name, access_type, &Isolate::MayNamedAccess)) {
return ACCESS_ALLOWED;
}
@@ -1795,7 +1978,7 @@ static AccessCheckResult CheckPropertyAccess(Handle<JSObject> obj,
case INTERCEPTOR:
// If the object has an interceptor, try real named properties.
// Overwrite the result to fetch the correct property later.
- holder->LookupRealNamedProperty(*name, &lookup);
+ holder->LookupRealNamedProperty(name, &lookup);
if (lookup.IsProperty() && lookup.IsPropertyCallbacks()) {
if (CheckAccessException(lookup.GetCallbackObject(), access_type)) {
return ACCESS_ALLOWED;
@@ -1806,7 +1989,7 @@ static AccessCheckResult CheckPropertyAccess(Handle<JSObject> obj,
break;
}
- isolate->ReportFailedAccessCheck(*obj, access_type);
+ isolate->ReportFailedAccessCheck(obj, access_type);
return ACCESS_FORBIDDEN;
}
@@ -1824,40 +2007,43 @@ enum PropertyDescriptorIndices {
};
-static Handle<Object> GetOwnProperty(Isolate* isolate,
- Handle<JSObject> obj,
- Handle<Name> name) {
+MUST_USE_RESULT static MaybeHandle<Object> GetOwnProperty(Isolate* isolate,
+ Handle<JSObject> obj,
+ Handle<Name> name) {
Heap* heap = isolate->heap();
Factory* factory = isolate->factory();
// Due to some WebKit tests, we want to make sure that we do not log
// more than one access failure here.
AccessCheckResult access_check_result =
CheckPropertyAccess(obj, name, v8::ACCESS_HAS);
- RETURN_HANDLE_IF_SCHEDULED_EXCEPTION(isolate, Object);
+ RETURN_EXCEPTION_IF_SCHEDULED_EXCEPTION(isolate, Object);
switch (access_check_result) {
case ACCESS_FORBIDDEN: return factory->false_value();
case ACCESS_ALLOWED: break;
case ACCESS_ABSENT: return factory->undefined_value();
}
- PropertyAttributes attrs = obj->GetLocalPropertyAttribute(*name);
+ PropertyAttributes attrs = JSReceiver::GetOwnPropertyAttributes(obj, name);
if (attrs == ABSENT) {
- RETURN_HANDLE_IF_SCHEDULED_EXCEPTION(isolate, Object);
+ RETURN_EXCEPTION_IF_SCHEDULED_EXCEPTION(isolate, Object);
return factory->undefined_value();
}
ASSERT(!isolate->has_scheduled_exception());
- AccessorPair* raw_accessors = obj->GetLocalPropertyAccessorPair(*name);
- Handle<AccessorPair> accessors(raw_accessors, isolate);
+ Handle<AccessorPair> accessors;
+ bool has_accessors =
+ JSObject::GetOwnPropertyAccessorPair(obj, name).ToHandle(&accessors);
Handle<FixedArray> elms = isolate->factory()->NewFixedArray(DESCRIPTOR_SIZE);
elms->set(ENUMERABLE_INDEX, heap->ToBoolean((attrs & DONT_ENUM) == 0));
elms->set(CONFIGURABLE_INDEX, heap->ToBoolean((attrs & DONT_DELETE) == 0));
- elms->set(IS_ACCESSOR_INDEX, heap->ToBoolean(raw_accessors != NULL));
+ elms->set(IS_ACCESSOR_INDEX, heap->ToBoolean(has_accessors));
- if (raw_accessors == NULL) {
+ if (!has_accessors) {
elms->set(WRITABLE_INDEX, heap->ToBoolean((attrs & READ_ONLY) == 0));
- // GetProperty does access check.
- Handle<Object> value = GetProperty(isolate, obj, name);
- RETURN_IF_EMPTY_HANDLE_VALUE(isolate, value, Handle<Object>::null());
+ // Runtime::GetObjectProperty does access check.
+ Handle<Object> value;
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, value, Runtime::GetObjectProperty(isolate, obj, name),
+ Object);
elms->set(VALUE_INDEX, *value);
} else {
// Access checks are performed for both accessors separately.
@@ -1869,14 +2055,14 @@ static Handle<Object> GetOwnProperty(Isolate* isolate,
ASSERT(!isolate->has_scheduled_exception());
elms->set(GETTER_INDEX, *getter);
} else {
- RETURN_HANDLE_IF_SCHEDULED_EXCEPTION(isolate, Object);
+ RETURN_EXCEPTION_IF_SCHEDULED_EXCEPTION(isolate, Object);
}
if (!setter->IsMap() && CheckPropertyAccess(obj, name, v8::ACCESS_SET)) {
ASSERT(!isolate->has_scheduled_exception());
elms->set(SETTER_INDEX, *setter);
} else {
- RETURN_HANDLE_IF_SCHEDULED_EXCEPTION(isolate, Object);
+ RETURN_EXCEPTION_IF_SCHEDULED_EXCEPTION(isolate, Object);
}
}
@@ -1891,28 +2077,30 @@ static Handle<Object> GetOwnProperty(Isolate* isolate,
// [false, value, Writeable, Enumerable, Configurable]
// if args[1] is an accessor on args[0]
// [true, GetFunction, SetFunction, Enumerable, Configurable]
-RUNTIME_FUNCTION(MaybeObject*, Runtime_GetOwnProperty) {
+RUNTIME_FUNCTION(Runtime_GetOwnProperty) {
HandleScope scope(isolate);
ASSERT(args.length() == 2);
CONVERT_ARG_HANDLE_CHECKED(JSObject, obj, 0);
CONVERT_ARG_HANDLE_CHECKED(Name, name, 1);
- Handle<Object> result = GetOwnProperty(isolate, obj, name);
- RETURN_IF_EMPTY_HANDLE(isolate, result);
+ Handle<Object> result;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, result, GetOwnProperty(isolate, obj, name));
return *result;
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_PreventExtensions) {
+RUNTIME_FUNCTION(Runtime_PreventExtensions) {
HandleScope scope(isolate);
ASSERT(args.length() == 1);
CONVERT_ARG_HANDLE_CHECKED(JSObject, obj, 0);
- Handle<Object> result = JSObject::PreventExtensions(obj);
- RETURN_IF_EMPTY_HANDLE(isolate, result);
+ Handle<Object> result;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, result, JSObject::PreventExtensions(obj));
return *result;
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_IsExtensible) {
+RUNTIME_FUNCTION(Runtime_IsExtensible) {
SealHandleScope shs(isolate);
ASSERT(args.length() == 1);
CONVERT_ARG_CHECKED(JSObject, obj, 0);
@@ -1926,44 +2114,46 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_IsExtensible) {
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_RegExpCompile) {
+RUNTIME_FUNCTION(Runtime_RegExpCompile) {
HandleScope scope(isolate);
ASSERT(args.length() == 3);
CONVERT_ARG_HANDLE_CHECKED(JSRegExp, re, 0);
CONVERT_ARG_HANDLE_CHECKED(String, pattern, 1);
CONVERT_ARG_HANDLE_CHECKED(String, flags, 2);
- Handle<Object> result = RegExpImpl::Compile(re, pattern, flags);
- RETURN_IF_EMPTY_HANDLE(isolate, result);
+ Handle<Object> result;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, result, RegExpImpl::Compile(re, pattern, flags));
return *result;
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_CreateApiFunction) {
+RUNTIME_FUNCTION(Runtime_CreateApiFunction) {
HandleScope scope(isolate);
- ASSERT(args.length() == 1);
+ ASSERT(args.length() == 2);
CONVERT_ARG_HANDLE_CHECKED(FunctionTemplateInfo, data, 0);
- return *isolate->factory()->CreateApiFunction(data);
+ CONVERT_ARG_HANDLE_CHECKED(Object, prototype, 1);
+ return *isolate->factory()->CreateApiFunction(data, prototype);
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_IsTemplate) {
+RUNTIME_FUNCTION(Runtime_IsTemplate) {
SealHandleScope shs(isolate);
ASSERT(args.length() == 1);
- Object* arg = args[0];
+ CONVERT_ARG_HANDLE_CHECKED(Object, arg, 0);
bool result = arg->IsObjectTemplateInfo() || arg->IsFunctionTemplateInfo();
return isolate->heap()->ToBoolean(result);
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_GetTemplateField) {
+RUNTIME_FUNCTION(Runtime_GetTemplateField) {
SealHandleScope shs(isolate);
ASSERT(args.length() == 2);
CONVERT_ARG_CHECKED(HeapObject, templ, 0);
- CONVERT_SMI_ARG_CHECKED(index, 1)
+ CONVERT_SMI_ARG_CHECKED(index, 1);
int offset = index * kPointerSize + HeapObject::kHeaderSize;
InstanceType type = templ->map()->instance_type();
- RUNTIME_ASSERT(type == FUNCTION_TEMPLATE_INFO_TYPE ||
- type == OBJECT_TEMPLATE_INFO_TYPE);
+ RUNTIME_ASSERT(type == FUNCTION_TEMPLATE_INFO_TYPE ||
+ type == OBJECT_TEMPLATE_INFO_TYPE);
RUNTIME_ASSERT(offset > 0);
if (type == FUNCTION_TEMPLATE_INFO_TYPE) {
RUNTIME_ASSERT(offset < FunctionTemplateInfo::kSize);
@@ -1974,38 +2164,40 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_GetTemplateField) {
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_DisableAccessChecks) {
- SealHandleScope shs(isolate);
+RUNTIME_FUNCTION(Runtime_DisableAccessChecks) {
+ HandleScope scope(isolate);
ASSERT(args.length() == 1);
- CONVERT_ARG_CHECKED(HeapObject, object, 0);
- Map* old_map = object->map();
+ CONVERT_ARG_HANDLE_CHECKED(HeapObject, object, 0);
+ Handle<Map> old_map(object->map());
bool needs_access_checks = old_map->is_access_check_needed();
if (needs_access_checks) {
// Copy map so it won't interfere constructor's initial map.
- Map* new_map;
- MaybeObject* maybe_new_map = old_map->Copy();
- if (!maybe_new_map->To(&new_map)) return maybe_new_map;
-
+ Handle<Map> new_map = Map::Copy(old_map);
new_map->set_is_access_check_needed(false);
- object->set_map(new_map);
+ if (object->IsJSObject()) {
+ JSObject::MigrateToMap(Handle<JSObject>::cast(object), new_map);
+ } else {
+ object->set_map(*new_map);
+ }
}
return isolate->heap()->ToBoolean(needs_access_checks);
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_EnableAccessChecks) {
- SealHandleScope shs(isolate);
+RUNTIME_FUNCTION(Runtime_EnableAccessChecks) {
+ HandleScope scope(isolate);
ASSERT(args.length() == 1);
- CONVERT_ARG_CHECKED(HeapObject, object, 0);
- Map* old_map = object->map();
+ CONVERT_ARG_HANDLE_CHECKED(HeapObject, object, 0);
+ Handle<Map> old_map(object->map());
if (!old_map->is_access_check_needed()) {
// Copy map so it won't interfere constructor's initial map.
- Map* new_map;
- MaybeObject* maybe_new_map = old_map->Copy();
- if (!maybe_new_map->To(&new_map)) return maybe_new_map;
-
+ Handle<Map> new_map = Map::Copy(old_map);
new_map->set_is_access_check_needed(true);
- object->set_map(new_map);
+ if (object->IsJSObject()) {
+ JSObject::MigrateToMap(Handle<JSObject>::cast(object), new_map);
+ } else {
+ object->set_map(*new_map);
+ }
}
return isolate->heap()->undefined_value();
}
@@ -2021,7 +2213,7 @@ static Handle<Object> InstantiateAccessorComponent(Isolate* isolate,
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_SetAccessorProperty) {
+RUNTIME_FUNCTION(Runtime_SetAccessorProperty) {
HandleScope scope(isolate);
ASSERT(args.length() == 6);
CONVERT_ARG_HANDLE_CHECKED(JSObject, object, 0);
@@ -2030,6 +2222,10 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_SetAccessorProperty) {
CONVERT_ARG_HANDLE_CHECKED(Object, setter, 3);
CONVERT_SMI_ARG_CHECKED(attribute, 4);
CONVERT_SMI_ARG_CHECKED(access_control, 5);
+ RUNTIME_ASSERT(getter->IsUndefined() || getter->IsFunctionTemplateInfo());
+ RUNTIME_ASSERT(setter->IsUndefined() || setter->IsFunctionTemplateInfo());
+ RUNTIME_ASSERT(PropertyDetails::AttributesField::is_valid(
+ static_cast<PropertyAttributes>(attribute)));
JSObject::DefineAccessor(object,
name,
InstantiateAccessorComponent(isolate, getter),
@@ -2040,26 +2236,22 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_SetAccessorProperty) {
}
-static Failure* ThrowRedeclarationError(Isolate* isolate,
- const char* type,
- Handle<String> name) {
+static Object* ThrowRedeclarationError(Isolate* isolate, Handle<String> name) {
HandleScope scope(isolate);
- Handle<Object> type_handle =
- isolate->factory()->NewStringFromAscii(CStrVector(type));
- Handle<Object> args[2] = { type_handle, name };
- Handle<Object> error =
- isolate->factory()->NewTypeError("redeclaration", HandleVector(args, 2));
+ Handle<Object> args[1] = { name };
+ Handle<Object> error = isolate->factory()->NewTypeError(
+ "var_redeclaration", HandleVector(args, 1));
return isolate->Throw(*error);
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_DeclareGlobals) {
+RUNTIME_FUNCTION(RuntimeHidden_DeclareGlobals) {
HandleScope scope(isolate);
ASSERT(args.length() == 3);
Handle<GlobalObject> global = Handle<GlobalObject>(
isolate->context()->global_object());
- Handle<Context> context = args.at<Context>(0);
+ CONVERT_ARG_HANDLE_CHECKED(Context, context, 0);
CONVERT_ARG_HANDLE_CHECKED(FixedArray, pairs, 1);
CONVERT_SMI_ARG_CHECKED(flags, 2);
@@ -2081,19 +2273,14 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DeclareGlobals) {
if (is_var || is_const) {
// Lookup the property in the global object, and don't set the
// value of the variable if the property is already there.
- // Do the lookup locally only, see ES5 erratum.
+ // Do the lookup own properties only, see ES5 erratum.
LookupResult lookup(isolate);
- if (FLAG_es52_globals) {
- global->LocalLookup(*name, &lookup, true);
- } else {
- global->Lookup(*name, &lookup);
- }
+ global->LookupOwn(name, &lookup, true);
if (lookup.IsFound()) {
// We found an existing property. Unless it was an interceptor
// that claims the property is absent, skip this declaration.
if (!lookup.IsInterceptor()) continue;
- PropertyAttributes attributes = global->GetPropertyAttribute(*name);
- if (attributes != ABSENT) continue;
+ if (JSReceiver::GetPropertyAttributes(global, name) != ABSENT) continue;
// Fall-through and introduce the absent property by using
// SetProperty.
}
@@ -2108,7 +2295,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DeclareGlobals) {
}
LookupResult lookup(isolate);
- global->LocalLookup(*name, &lookup, true);
+ global->LookupOwn(name, &lookup, true);
// Compute the property attributes. According to ECMA-262,
// the property must be non-configurable except in eval.
@@ -2122,29 +2309,30 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DeclareGlobals) {
attr |= READ_ONLY;
}
- LanguageMode language_mode = DeclareGlobalsLanguageMode::decode(flags);
+ StrictMode strict_mode = DeclareGlobalsStrictMode::decode(flags);
if (!lookup.IsFound() || is_function) {
- // If the local property exists, check that we can reconfigure it
+ // If the own property exists, check that we can reconfigure it
// as required for function declarations.
if (lookup.IsFound() && lookup.IsDontDelete()) {
if (lookup.IsReadOnly() || lookup.IsDontEnum() ||
lookup.IsPropertyCallbacks()) {
- return ThrowRedeclarationError(isolate, "function", name);
+ return ThrowRedeclarationError(isolate, name);
}
// If the existing property is not configurable, keep its attributes.
attr = lookup.GetAttributes();
}
// Define or redefine own property.
- RETURN_IF_EMPTY_HANDLE(isolate,
- JSObject::SetLocalPropertyIgnoreAttributes(
+ RETURN_FAILURE_ON_EXCEPTION(isolate,
+ JSObject::SetOwnPropertyIgnoreAttributes(
global, name, value, static_cast<PropertyAttributes>(attr)));
} else {
// Do a [[Put]] on the existing (own) property.
- RETURN_IF_EMPTY_HANDLE(isolate,
+ RETURN_FAILURE_ON_EXCEPTION(
+ isolate,
JSObject::SetProperty(
global, name, value, static_cast<PropertyAttributes>(attr),
- language_mode == CLASSIC_MODE ? kNonStrictMode : kStrictMode));
+ strict_mode));
}
}
@@ -2153,20 +2341,20 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DeclareGlobals) {
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_DeclareContextSlot) {
+RUNTIME_FUNCTION(RuntimeHidden_DeclareContextSlot) {
HandleScope scope(isolate);
ASSERT(args.length() == 4);
// Declarations are always made in a function or native context. In the
// case of eval code, the context passed is the context of the caller,
// which may be some nested context and not the declaration context.
- RUNTIME_ASSERT(args[0]->IsContext());
- Handle<Context> context(Context::cast(args[0])->declaration_context());
-
- Handle<String> name(String::cast(args[1]));
- PropertyAttributes mode = static_cast<PropertyAttributes>(args.smi_at(2));
+ CONVERT_ARG_HANDLE_CHECKED(Context, context_arg, 0);
+ Handle<Context> context(context_arg->declaration_context());
+ CONVERT_ARG_HANDLE_CHECKED(String, name, 1);
+ CONVERT_SMI_ARG_CHECKED(mode_arg, 2);
+ PropertyAttributes mode = static_cast<PropertyAttributes>(mode_arg);
RUNTIME_ASSERT(mode == READ_ONLY || mode == NONE);
- Handle<Object> initial_value(args[3], isolate);
+ CONVERT_ARG_HANDLE_CHECKED(Object, initial_value, 3);
int index;
PropertyAttributes attributes;
@@ -2182,8 +2370,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DeclareContextSlot) {
if (((attributes & READ_ONLY) != 0) || (mode == READ_ONLY)) {
// Functions are not read-only.
ASSERT(mode != READ_ONLY || initial_value->IsTheHole());
- const char* type = ((attributes & READ_ONLY) != 0) ? "const" : "var";
- return ThrowRedeclarationError(isolate, type, name);
+ return ThrowRedeclarationError(isolate, name);
}
// Initialize it if necessary.
@@ -2198,10 +2385,9 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DeclareContextSlot) {
// Slow case: The property is in the context extension object of a
// function context or the global object of a native context.
Handle<JSObject> object = Handle<JSObject>::cast(holder);
- RETURN_IF_EMPTY_HANDLE(
+ RETURN_FAILURE_ON_EXCEPTION(
isolate,
- JSReceiver::SetProperty(object, name, initial_value, mode,
- kNonStrictMode));
+ JSReceiver::SetProperty(object, name, initial_value, mode, SLOPPY));
}
}
@@ -2224,7 +2410,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DeclareContextSlot) {
// Declare the property by setting it to the initial value if provided,
// or undefined, and use the correct mode (e.g. READ_ONLY attribute for
// constant declarations).
- ASSERT(!JSReceiver::HasLocalProperty(object, name));
+ ASSERT(!JSReceiver::HasOwnProperty(object, name));
Handle<Object> value(isolate->heap()->undefined_value(), isolate);
if (*initial_value != NULL) value = initial_value;
// Declaring a const context slot is a conflicting declaration if
@@ -2236,18 +2422,18 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DeclareContextSlot) {
if (initial_value->IsTheHole() &&
!object->IsJSContextExtensionObject()) {
LookupResult lookup(isolate);
- object->Lookup(*name, &lookup);
+ object->Lookup(name, &lookup);
if (lookup.IsPropertyCallbacks()) {
- return ThrowRedeclarationError(isolate, "const", name);
+ return ThrowRedeclarationError(isolate, name);
}
}
if (object->IsJSGlobalObject()) {
// Define own property on the global object.
- RETURN_IF_EMPTY_HANDLE(isolate,
- JSObject::SetLocalPropertyIgnoreAttributes(object, name, value, mode));
+ RETURN_FAILURE_ON_EXCEPTION(isolate,
+ JSObject::SetOwnPropertyIgnoreAttributes(object, name, value, mode));
} else {
- RETURN_IF_EMPTY_HANDLE(isolate,
- JSReceiver::SetProperty(object, name, value, mode, kNonStrictMode));
+ RETURN_FAILURE_ON_EXCEPTION(isolate,
+ JSReceiver::SetProperty(object, name, value, mode, SLOPPY));
}
}
@@ -2255,7 +2441,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DeclareContextSlot) {
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_InitializeVarGlobal) {
+RUNTIME_FUNCTION(Runtime_InitializeVarGlobal) {
HandleScope scope(isolate);
// args[0] == name
// args[1] == language_mode
@@ -2267,35 +2453,34 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_InitializeVarGlobal) {
bool assign = args.length() == 3;
CONVERT_ARG_HANDLE_CHECKED(String, name, 0);
- RUNTIME_ASSERT(args[1]->IsSmi());
- CONVERT_LANGUAGE_MODE_ARG(language_mode, 1);
- StrictModeFlag strict_mode_flag = (language_mode == CLASSIC_MODE)
- ? kNonStrictMode : kStrictMode;
+ CONVERT_STRICT_MODE_ARG_CHECKED(strict_mode, 1);
// According to ECMA-262, section 12.2, page 62, the property must
// not be deletable.
PropertyAttributes attributes = DONT_DELETE;
- // Lookup the property locally in the global object. If it isn't
+ // Lookup the property as own on the global object. If it isn't
// there, there is a property with this name in the prototype chain.
// We follow Safari and Firefox behavior and only set the property
- // locally if there is an explicit initialization value that we have
+ // if there is an explicit initialization value that we have
// to assign to the property.
// Note that objects can have hidden prototypes, so we need to traverse
- // the whole chain of hidden prototypes to do a 'local' lookup.
+ // the whole chain of hidden prototypes to do an 'own' lookup.
LookupResult lookup(isolate);
- isolate->context()->global_object()->LocalLookup(*name, &lookup, true);
+ isolate->context()->global_object()->LookupOwn(name, &lookup, true);
if (lookup.IsInterceptor()) {
+ Handle<JSObject> holder(lookup.holder());
PropertyAttributes intercepted =
- lookup.holder()->GetPropertyAttribute(*name);
+ JSReceiver::GetPropertyAttributes(holder, name);
if (intercepted != ABSENT && (intercepted & READ_ONLY) == 0) {
// Found an interceptor that's not read only.
if (assign) {
CONVERT_ARG_HANDLE_CHECKED(Object, value, 2);
- Handle<Object> result = JSObject::SetPropertyForResult(
- handle(lookup.holder()), &lookup, name, value, attributes,
- strict_mode_flag);
- RETURN_IF_EMPTY_HANDLE(isolate, result);
+ Handle<Object> result;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, result,
+ JSObject::SetPropertyForResult(
+ holder, &lookup, name, value, attributes, strict_mode));
return *result;
} else {
return isolate->heap()->undefined_value();
@@ -2306,23 +2491,24 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_InitializeVarGlobal) {
if (assign) {
CONVERT_ARG_HANDLE_CHECKED(Object, value, 2);
Handle<GlobalObject> global(isolate->context()->global_object());
- Handle<Object> result = JSReceiver::SetProperty(
- global, name, value, attributes, strict_mode_flag);
- RETURN_IF_EMPTY_HANDLE(isolate, result);
+ Handle<Object> result;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, result,
+ JSReceiver::SetProperty(global, name, value, attributes, strict_mode));
return *result;
}
return isolate->heap()->undefined_value();
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_InitializeConstGlobal) {
+RUNTIME_FUNCTION(RuntimeHidden_InitializeConstGlobal) {
SealHandleScope shs(isolate);
// All constants are declared with an initial value. The name
// of the constant is the first argument and the initial value
// is the second.
RUNTIME_ASSERT(args.length() == 2);
CONVERT_ARG_HANDLE_CHECKED(String, name, 0);
- Handle<Object> value = args.at<Object>(1);
+ CONVERT_ARG_HANDLE_CHECKED(Object, value, 1);
// Get the current global object from top.
GlobalObject* global = isolate->context()->global_object();
@@ -2332,20 +2518,19 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_InitializeConstGlobal) {
PropertyAttributes attributes =
static_cast<PropertyAttributes>(DONT_DELETE | READ_ONLY);
- // Lookup the property locally in the global object. If it isn't
+ // Lookup the property as own on the global object. If it isn't
// there, we add the property and take special precautions to always
- // add it as a local property even in case of callbacks in the
- // prototype chain (this rules out using SetProperty).
- // We use SetLocalPropertyIgnoreAttributes instead
+ // add it even in case of callbacks in the prototype chain (this rules
+ // out using SetProperty). We use SetOwnPropertyIgnoreAttributes instead
LookupResult lookup(isolate);
- global->LocalLookup(*name, &lookup);
+ global->LookupOwn(name, &lookup);
if (!lookup.IsFound()) {
HandleScope handle_scope(isolate);
Handle<GlobalObject> global(isolate->context()->global_object());
- RETURN_IF_EMPTY_HANDLE(
+ RETURN_FAILURE_ON_EXCEPTION(
isolate,
- JSObject::SetLocalPropertyIgnoreAttributes(global, name, value,
- attributes));
+ JSObject::SetOwnPropertyIgnoreAttributes(global, name, value,
+ attributes));
return *value;
}
@@ -2358,11 +2543,10 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_InitializeConstGlobal) {
// BUG 1213575: Handle the case where we have to set a read-only
// property through an interceptor and only do it if it's
// uninitialized, e.g. the hole. Nirk...
- // Passing non-strict mode because the property is writable.
- RETURN_IF_EMPTY_HANDLE(
+ // Passing sloppy mode because the property is writable.
+ RETURN_FAILURE_ON_EXCEPTION(
isolate,
- JSReceiver::SetProperty(global, name, value, attributes,
- kNonStrictMode));
+ JSReceiver::SetProperty(global, name, value, attributes, SLOPPY));
return *value;
}
@@ -2372,7 +2556,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_InitializeConstGlobal) {
// Strict mode handling not needed (const is disallowed in strict mode).
if (lookup.IsField()) {
FixedArray* properties = global->properties();
- int index = lookup.GetFieldIndex().field_index();
+ int index = lookup.GetFieldIndex().outobject_array_index();
if (properties->get(index)->IsTheHole() || !lookup.IsReadOnly()) {
properties->set(index, *value);
}
@@ -2393,18 +2577,16 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_InitializeConstGlobal) {
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_InitializeConstContextSlot) {
+RUNTIME_FUNCTION(RuntimeHidden_InitializeConstContextSlot) {
HandleScope scope(isolate);
ASSERT(args.length() == 3);
- Handle<Object> value(args[0], isolate);
+ CONVERT_ARG_HANDLE_CHECKED(Object, value, 0);
ASSERT(!value->IsTheHole());
-
// Initializations are always done in a function or native context.
- RUNTIME_ASSERT(args[1]->IsContext());
- Handle<Context> context(Context::cast(args[1])->declaration_context());
-
- Handle<String> name(String::cast(args[2]));
+ CONVERT_ARG_HANDLE_CHECKED(Context, context_arg, 1);
+ Handle<Context> context(context_arg->declaration_context());
+ CONVERT_ARG_HANDLE_CHECKED(String, name, 2);
int index;
PropertyAttributes attributes;
@@ -2430,9 +2612,9 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_InitializeConstContextSlot) {
Handle<JSObject> global = Handle<JSObject>(
isolate->context()->global_object());
// Strict mode not needed (const disallowed in strict mode).
- RETURN_IF_EMPTY_HANDLE(
+ RETURN_FAILURE_ON_EXCEPTION(
isolate,
- JSReceiver::SetProperty(global, name, value, NONE, kNonStrictMode));
+ JSReceiver::SetProperty(global, name, value, NONE, SLOPPY));
return *value;
}
@@ -2457,15 +2639,16 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_InitializeConstContextSlot) {
// Set it if it hasn't been set before. NOTE: We cannot use
// GetProperty() to get the current value as it 'unholes' the value.
LookupResult lookup(isolate);
- object->LocalLookupRealNamedProperty(*name, &lookup);
+ object->LookupOwnRealNamedProperty(name, &lookup);
ASSERT(lookup.IsFound()); // the property was declared
ASSERT(lookup.IsReadOnly()); // and it was declared as read-only
if (lookup.IsField()) {
FixedArray* properties = object->properties();
- int index = lookup.GetFieldIndex().field_index();
- if (properties->get(index)->IsTheHole()) {
- properties->set(index, *value);
+ FieldIndex index = lookup.GetFieldIndex();
+ ASSERT(!index.is_inobject());
+ if (properties->get(index.outobject_array_index())->IsTheHole()) {
+ properties->set(index.outobject_array_index(), *value);
}
} else if (lookup.IsNormal()) {
if (object->GetNormalizedProperty(&lookup)->IsTheHole()) {
@@ -2481,10 +2664,9 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_InitializeConstContextSlot) {
// read-only property.
if ((attributes & READ_ONLY) == 0) {
// Strict mode not needed (const disallowed in strict mode).
- RETURN_IF_EMPTY_HANDLE(
+ RETURN_FAILURE_ON_EXCEPTION(
isolate,
- JSReceiver::SetProperty(object, name, value, attributes,
- kNonStrictMode));
+ JSReceiver::SetProperty(object, name, value, attributes, SLOPPY));
}
}
@@ -2492,20 +2674,21 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_InitializeConstContextSlot) {
}
-RUNTIME_FUNCTION(MaybeObject*,
- Runtime_OptimizeObjectForAddingMultipleProperties) {
+RUNTIME_FUNCTION(Runtime_OptimizeObjectForAddingMultipleProperties) {
HandleScope scope(isolate);
ASSERT(args.length() == 2);
CONVERT_ARG_HANDLE_CHECKED(JSObject, object, 0);
CONVERT_SMI_ARG_CHECKED(properties, 1);
- if (object->HasFastProperties()) {
+ // Conservative upper limit to prevent fuzz tests from going OOM.
+ RUNTIME_ASSERT(properties <= 100000);
+ if (object->HasFastProperties() && !object->IsJSGlobalProxy()) {
JSObject::NormalizeProperties(object, KEEP_INOBJECT_PROPERTIES, properties);
}
return *object;
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_RegExpExec) {
+RUNTIME_FUNCTION(RuntimeHidden_RegExpExec) {
HandleScope scope(isolate);
ASSERT(args.length() == 4);
CONVERT_ARG_HANDLE_CHECKED(JSRegExp, regexp, 0);
@@ -2517,54 +2700,37 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_RegExpExec) {
RUNTIME_ASSERT(index >= 0);
RUNTIME_ASSERT(index <= subject->length());
isolate->counters()->regexp_entry_runtime()->Increment();
- Handle<Object> result = RegExpImpl::Exec(regexp,
- subject,
- index,
- last_match_info);
- RETURN_IF_EMPTY_HANDLE(isolate, result);
+ Handle<Object> result;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, result,
+ RegExpImpl::Exec(regexp, subject, index, last_match_info));
return *result;
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_RegExpConstructResult) {
- SealHandleScope shs(isolate);
+RUNTIME_FUNCTION(RuntimeHidden_RegExpConstructResult) {
+ HandleScope handle_scope(isolate);
ASSERT(args.length() == 3);
- CONVERT_SMI_ARG_CHECKED(elements_count, 0);
- if (elements_count < 0 ||
- elements_count > FixedArray::kMaxLength ||
- !Smi::IsValid(elements_count)) {
- return isolate->ThrowIllegalOperation();
- }
- Object* new_object;
- { MaybeObject* maybe_new_object =
- isolate->heap()->AllocateFixedArray(elements_count);
- if (!maybe_new_object->ToObject(&new_object)) return maybe_new_object;
- }
- FixedArray* elements = FixedArray::cast(new_object);
- { MaybeObject* maybe_new_object = isolate->heap()->AllocateRaw(
- JSRegExpResult::kSize, NEW_SPACE, OLD_POINTER_SPACE);
- if (!maybe_new_object->ToObject(&new_object)) return maybe_new_object;
- }
- {
- DisallowHeapAllocation no_gc;
- HandleScope scope(isolate);
- reinterpret_cast<HeapObject*>(new_object)->
- set_map(isolate->native_context()->regexp_result_map());
- }
- JSArray* array = JSArray::cast(new_object);
- array->set_properties(isolate->heap()->empty_fixed_array());
- array->set_elements(elements);
- array->set_length(Smi::FromInt(elements_count));
+ CONVERT_SMI_ARG_CHECKED(size, 0);
+ RUNTIME_ASSERT(size >= 0 && size <= FixedArray::kMaxLength);
+ CONVERT_ARG_HANDLE_CHECKED(Object, index, 1);
+ CONVERT_ARG_HANDLE_CHECKED(Object, input, 2);
+ Handle<FixedArray> elements = isolate->factory()->NewFixedArray(size);
+ Handle<Map> regexp_map(isolate->native_context()->regexp_result_map());
+ Handle<JSObject> object =
+ isolate->factory()->NewJSObjectFromMap(regexp_map, NOT_TENURED, false);
+ Handle<JSArray> array = Handle<JSArray>::cast(object);
+ array->set_elements(*elements);
+ array->set_length(Smi::FromInt(size));
// Write in-object properties after the length of the array.
- array->InObjectPropertyAtPut(JSRegExpResult::kIndexIndex, args[1]);
- array->InObjectPropertyAtPut(JSRegExpResult::kInputIndex, args[2]);
- return array;
+ array->InObjectPropertyAtPut(JSRegExpResult::kIndexIndex, *index);
+ array->InObjectPropertyAtPut(JSRegExpResult::kInputIndex, *input);
+ return *array;
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_RegExpInitializeObject) {
+RUNTIME_FUNCTION(Runtime_RegExpInitializeObject) {
HandleScope scope(isolate);
- DisallowHeapAllocation no_allocation;
ASSERT(args.length() == 5);
CONVERT_ARG_HANDLE_CHECKED(JSRegExp, regexp, 0);
CONVERT_ARG_HANDLE_CHECKED(String, source, 1);
@@ -2607,24 +2773,27 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_RegExpInitializeObject) {
static_cast<PropertyAttributes>(DONT_ENUM | DONT_DELETE);
Handle<Object> zero(Smi::FromInt(0), isolate);
Factory* factory = isolate->factory();
- CHECK_NOT_EMPTY_HANDLE(isolate, JSObject::SetLocalPropertyIgnoreAttributes(
- regexp, factory->source_string(), source, final));
- CHECK_NOT_EMPTY_HANDLE(isolate, JSObject::SetLocalPropertyIgnoreAttributes(
- regexp, factory->global_string(), global, final));
- CHECK_NOT_EMPTY_HANDLE(isolate, JSObject::SetLocalPropertyIgnoreAttributes(
- regexp, factory->ignore_case_string(), ignoreCase, final));
- CHECK_NOT_EMPTY_HANDLE(isolate, JSObject::SetLocalPropertyIgnoreAttributes(
- regexp, factory->multiline_string(), multiline, final));
- CHECK_NOT_EMPTY_HANDLE(isolate, JSObject::SetLocalPropertyIgnoreAttributes(
- regexp, factory->last_index_string(), zero, writable));
+ JSObject::SetOwnPropertyIgnoreAttributes(
+ regexp, factory->source_string(), source, final).Check();
+ JSObject::SetOwnPropertyIgnoreAttributes(
+ regexp, factory->global_string(), global, final).Check();
+ JSObject::SetOwnPropertyIgnoreAttributes(
+ regexp, factory->ignore_case_string(), ignoreCase, final).Check();
+ JSObject::SetOwnPropertyIgnoreAttributes(
+ regexp, factory->multiline_string(), multiline, final).Check();
+ JSObject::SetOwnPropertyIgnoreAttributes(
+ regexp, factory->last_index_string(), zero, writable).Check();
return *regexp;
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_FinishArrayPrototypeSetup) {
+RUNTIME_FUNCTION(Runtime_FinishArrayPrototypeSetup) {
HandleScope scope(isolate);
ASSERT(args.length() == 1);
CONVERT_ARG_HANDLE_CHECKED(JSArray, prototype, 0);
+ Object* length = prototype->length();
+ RUNTIME_ASSERT(length->IsSmi() && Smi::cast(length)->value() == 0);
+ RUNTIME_ASSERT(prototype->HasFastSmiOrObjectElements());
// This is necessary to enable fast checks for absence of elements
// on Array.prototype and below.
prototype->set_elements(isolate->heap()->empty_fixed_array());
@@ -2632,28 +2801,24 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_FinishArrayPrototypeSetup) {
}
-static Handle<JSFunction> InstallBuiltin(Isolate* isolate,
- Handle<JSObject> holder,
- const char* name,
- Builtins::Name builtin_name) {
+static void InstallBuiltin(Isolate* isolate,
+ Handle<JSObject> holder,
+ const char* name,
+ Builtins::Name builtin_name) {
Handle<String> key = isolate->factory()->InternalizeUtf8String(name);
Handle<Code> code(isolate->builtins()->builtin(builtin_name));
Handle<JSFunction> optimized =
- isolate->factory()->NewFunction(key,
- JS_OBJECT_TYPE,
- JSObject::kHeaderSize,
- code,
- false);
+ isolate->factory()->NewFunctionWithoutPrototype(key, code);
optimized->shared()->DontAdaptArguments();
- JSReceiver::SetProperty(holder, key, optimized, NONE, kStrictMode);
- return optimized;
+ JSReceiver::SetProperty(holder, key, optimized, NONE, STRICT).Assert();
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_SpecialArrayFunctions) {
+RUNTIME_FUNCTION(Runtime_SpecialArrayFunctions) {
HandleScope scope(isolate);
- ASSERT(args.length() == 1);
- CONVERT_ARG_HANDLE_CHECKED(JSObject, holder, 0);
+ ASSERT(args.length() == 0);
+ Handle<JSObject> holder =
+ isolate->factory()->NewJSObject(isolate->object_function());
InstallBuiltin(isolate, holder, "pop", Builtins::kArrayPop);
InstallBuiltin(isolate, holder, "push", Builtins::kArrayPush);
@@ -2667,49 +2832,43 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_SpecialArrayFunctions) {
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_IsCallable) {
- SealHandleScope shs(isolate);
- ASSERT(args.length() == 1);
- CONVERT_ARG_CHECKED(Object, obj, 0);
- return isolate->heap()->ToBoolean(obj->IsCallable());
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_IsClassicModeFunction) {
+RUNTIME_FUNCTION(Runtime_IsSloppyModeFunction) {
SealHandleScope shs(isolate);
ASSERT(args.length() == 1);
CONVERT_ARG_CHECKED(JSReceiver, callable, 0);
if (!callable->IsJSFunction()) {
HandleScope scope(isolate);
- bool threw = false;
- Handle<Object> delegate = Execution::TryGetFunctionDelegate(
- isolate, Handle<JSReceiver>(callable), &threw);
- if (threw) return Failure::Exception();
+ Handle<Object> delegate;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, delegate,
+ Execution::TryGetFunctionDelegate(
+ isolate, Handle<JSReceiver>(callable)));
callable = JSFunction::cast(*delegate);
}
JSFunction* function = JSFunction::cast(callable);
SharedFunctionInfo* shared = function->shared();
- return isolate->heap()->ToBoolean(shared->is_classic_mode());
+ return isolate->heap()->ToBoolean(shared->strict_mode() == SLOPPY);
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_GetDefaultReceiver) {
+RUNTIME_FUNCTION(Runtime_GetDefaultReceiver) {
SealHandleScope shs(isolate);
ASSERT(args.length() == 1);
CONVERT_ARG_CHECKED(JSReceiver, callable, 0);
if (!callable->IsJSFunction()) {
HandleScope scope(isolate);
- bool threw = false;
- Handle<Object> delegate = Execution::TryGetFunctionDelegate(
- isolate, Handle<JSReceiver>(callable), &threw);
- if (threw) return Failure::Exception();
+ Handle<Object> delegate;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, delegate,
+ Execution::TryGetFunctionDelegate(
+ isolate, Handle<JSReceiver>(callable)));
callable = JSFunction::cast(*delegate);
}
JSFunction* function = JSFunction::cast(callable);
SharedFunctionInfo* shared = function->shared();
- if (shared->native() || !shared->is_classic_mode()) {
+ if (shared->native() || shared->strict_mode() == STRICT) {
return isolate->heap()->undefined_value();
}
// Returns undefined for strict or native functions, or
@@ -2721,13 +2880,13 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_GetDefaultReceiver) {
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_MaterializeRegExpLiteral) {
+RUNTIME_FUNCTION(RuntimeHidden_MaterializeRegExpLiteral) {
HandleScope scope(isolate);
ASSERT(args.length() == 4);
CONVERT_ARG_HANDLE_CHECKED(FixedArray, literals, 0);
- int index = args.smi_at(1);
- Handle<String> pattern = args.at<String>(2);
- Handle<String> flags = args.at<String>(3);
+ CONVERT_SMI_ARG_CHECKED(index, 1);
+ CONVERT_ARG_HANDLE_CHECKED(String, pattern, 2);
+ CONVERT_ARG_HANDLE_CHECKED(String, flags, 3);
// Get the RegExp function from the context in the literals array.
// This is the RegExp function from the context in which the
@@ -2738,20 +2897,16 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_MaterializeRegExpLiteral) {
Handle<JSFunction>(
JSFunction::NativeContextFromLiterals(*literals)->regexp_function());
// Compute the regular expression literal.
- bool has_pending_exception;
- Handle<Object> regexp =
- RegExpImpl::CreateRegExpLiteral(constructor, pattern, flags,
- &has_pending_exception);
- if (has_pending_exception) {
- ASSERT(isolate->has_pending_exception());
- return Failure::Exception();
- }
+ Handle<Object> regexp;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, regexp,
+ RegExpImpl::CreateRegExpLiteral(constructor, pattern, flags));
literals->set(index, *regexp);
return *regexp;
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_FunctionGetName) {
+RUNTIME_FUNCTION(Runtime_FunctionGetName) {
SealHandleScope shs(isolate);
ASSERT(args.length() == 1);
@@ -2760,7 +2915,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_FunctionGetName) {
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_FunctionSetName) {
+RUNTIME_FUNCTION(Runtime_FunctionSetName) {
SealHandleScope shs(isolate);
ASSERT(args.length() == 2);
@@ -2771,7 +2926,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_FunctionSetName) {
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_FunctionNameShouldPrintAsAnonymous) {
+RUNTIME_FUNCTION(Runtime_FunctionNameShouldPrintAsAnonymous) {
SealHandleScope shs(isolate);
ASSERT(args.length() == 1);
CONVERT_ARG_CHECKED(JSFunction, f, 0);
@@ -2780,7 +2935,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_FunctionNameShouldPrintAsAnonymous) {
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_FunctionMarkNameShouldPrintAsAnonymous) {
+RUNTIME_FUNCTION(Runtime_FunctionMarkNameShouldPrintAsAnonymous) {
SealHandleScope shs(isolate);
ASSERT(args.length() == 1);
CONVERT_ARG_CHECKED(JSFunction, f, 0);
@@ -2789,7 +2944,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_FunctionMarkNameShouldPrintAsAnonymous) {
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_FunctionIsGenerator) {
+RUNTIME_FUNCTION(Runtime_FunctionIsGenerator) {
SealHandleScope shs(isolate);
ASSERT(args.length() == 1);
CONVERT_ARG_CHECKED(JSFunction, f, 0);
@@ -2797,18 +2952,18 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_FunctionIsGenerator) {
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_FunctionRemovePrototype) {
+RUNTIME_FUNCTION(Runtime_FunctionRemovePrototype) {
SealHandleScope shs(isolate);
ASSERT(args.length() == 1);
CONVERT_ARG_CHECKED(JSFunction, f, 0);
- f->RemovePrototype();
+ RUNTIME_ASSERT(f->RemovePrototype());
return isolate->heap()->undefined_value();
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_FunctionGetScript) {
+RUNTIME_FUNCTION(Runtime_FunctionGetScript) {
HandleScope scope(isolate);
ASSERT(args.length() == 1);
@@ -2816,11 +2971,11 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_FunctionGetScript) {
Handle<Object> script = Handle<Object>(fun->shared()->script(), isolate);
if (!script->IsScript()) return isolate->heap()->undefined_value();
- return *GetScriptWrapper(Handle<Script>::cast(script));
+ return *Script::GetWrapper(Handle<Script>::cast(script));
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_FunctionGetSourceCode) {
+RUNTIME_FUNCTION(Runtime_FunctionGetSourceCode) {
HandleScope scope(isolate);
ASSERT(args.length() == 1);
@@ -2830,7 +2985,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_FunctionGetSourceCode) {
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_FunctionGetScriptSourcePosition) {
+RUNTIME_FUNCTION(Runtime_FunctionGetScriptSourcePosition) {
SealHandleScope shs(isolate);
ASSERT(args.length() == 1);
@@ -2840,7 +2995,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_FunctionGetScriptSourcePosition) {
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_FunctionGetPositionForOffset) {
+RUNTIME_FUNCTION(Runtime_FunctionGetPositionForOffset) {
SealHandleScope shs(isolate);
ASSERT(args.length() == 2);
@@ -2854,7 +3009,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_FunctionGetPositionForOffset) {
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_FunctionSetInstanceClassName) {
+RUNTIME_FUNCTION(Runtime_FunctionSetInstanceClassName) {
SealHandleScope shs(isolate);
ASSERT(args.length() == 2);
@@ -2865,72 +3020,32 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_FunctionSetInstanceClassName) {
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_FunctionSetLength) {
+RUNTIME_FUNCTION(Runtime_FunctionSetLength) {
SealHandleScope shs(isolate);
ASSERT(args.length() == 2);
CONVERT_ARG_CHECKED(JSFunction, fun, 0);
CONVERT_SMI_ARG_CHECKED(length, 1);
+ RUNTIME_ASSERT((length & 0xC0000000) == 0xC0000000 ||
+ (length & 0xC0000000) == 0x0);
fun->shared()->set_length(length);
return isolate->heap()->undefined_value();
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_FunctionSetPrototype) {
+RUNTIME_FUNCTION(Runtime_FunctionSetPrototype) {
HandleScope scope(isolate);
ASSERT(args.length() == 2);
CONVERT_ARG_HANDLE_CHECKED(JSFunction, fun, 0);
CONVERT_ARG_HANDLE_CHECKED(Object, value, 1);
- ASSERT(fun->should_have_prototype());
+ RUNTIME_ASSERT(fun->should_have_prototype());
Accessors::FunctionSetPrototype(fun, value);
return args[0]; // return TOS
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_FunctionSetReadOnlyPrototype) {
- SealHandleScope shs(isolate);
- RUNTIME_ASSERT(args.length() == 1);
- CONVERT_ARG_CHECKED(JSFunction, function, 0);
-
- String* name = isolate->heap()->prototype_string();
-
- if (function->HasFastProperties()) {
- // Construct a new field descriptor with updated attributes.
- DescriptorArray* instance_desc = function->map()->instance_descriptors();
-
- int index = instance_desc->SearchWithCache(name, function->map());
- ASSERT(index != DescriptorArray::kNotFound);
- PropertyDetails details = instance_desc->GetDetails(index);
-
- CallbacksDescriptor new_desc(name,
- instance_desc->GetValue(index),
- static_cast<PropertyAttributes>(details.attributes() | READ_ONLY));
-
- // Create a new map featuring the new field descriptors array.
- Map* new_map;
- MaybeObject* maybe_map =
- function->map()->CopyReplaceDescriptor(
- instance_desc, &new_desc, index, OMIT_TRANSITION);
- if (!maybe_map->To(&new_map)) return maybe_map;
-
- function->set_map(new_map);
- } else { // Dictionary properties.
- // Directly manipulate the property details.
- int entry = function->property_dictionary()->FindEntry(name);
- ASSERT(entry != NameDictionary::kNotFound);
- PropertyDetails details = function->property_dictionary()->DetailsAt(entry);
- PropertyDetails new_details(
- static_cast<PropertyAttributes>(details.attributes() | READ_ONLY),
- details.type(),
- details.dictionary_index());
- function->property_dictionary()->DetailsAtPut(entry, new_details);
- }
- return function;
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_FunctionIsAPIFunction) {
+RUNTIME_FUNCTION(Runtime_FunctionIsAPIFunction) {
SealHandleScope shs(isolate);
ASSERT(args.length() == 1);
@@ -2939,7 +3054,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_FunctionIsAPIFunction) {
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_FunctionIsBuiltin) {
+RUNTIME_FUNCTION(Runtime_FunctionIsBuiltin) {
SealHandleScope shs(isolate);
ASSERT(args.length() == 1);
@@ -2948,21 +3063,19 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_FunctionIsBuiltin) {
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_SetCode) {
+RUNTIME_FUNCTION(Runtime_SetCode) {
HandleScope scope(isolate);
ASSERT(args.length() == 2);
CONVERT_ARG_HANDLE_CHECKED(JSFunction, target, 0);
- Handle<Object> code = args.at<Object>(1);
+ CONVERT_ARG_HANDLE_CHECKED(JSFunction, source, 1);
- if (code->IsNull()) return *target;
- RUNTIME_ASSERT(code->IsJSFunction());
- Handle<JSFunction> source = Handle<JSFunction>::cast(code);
Handle<SharedFunctionInfo> target_shared(target->shared());
Handle<SharedFunctionInfo> source_shared(source->shared());
+ RUNTIME_ASSERT(!source_shared->bound());
- if (!JSFunction::EnsureCompiled(source, KEEP_EXCEPTION)) {
- return Failure::Exception();
+ if (!Compiler::EnsureCompiled(source, KEEP_EXCEPTION)) {
+ return isolate->heap()->exception();
}
// Mark both, the source and the target, as un-flushable because the
@@ -2977,6 +3090,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_SetCode) {
target_shared->ReplaceCode(source_shared->code());
target_shared->set_scope_info(source_shared->scope_info());
target_shared->set_length(source_shared->length());
+ target_shared->set_feedback_vector(source_shared->feedback_vector());
target_shared->set_formal_parameter_count(
source_shared->formal_parameter_count());
target_shared->set_script(source_shared->script());
@@ -2986,6 +3100,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_SetCode) {
bool was_native = target_shared->native();
target_shared->set_compiler_hints(source_shared->compiler_hints());
target_shared->set_native(was_native);
+ target_shared->set_profiler_ticks(source_shared->profiler_ticks());
// Set the code of the target function.
target->ReplaceCode(source_shared->code());
@@ -3014,32 +3129,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_SetCode) {
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_SetExpectedNumberOfProperties) {
- HandleScope scope(isolate);
- ASSERT(args.length() == 2);
- CONVERT_ARG_HANDLE_CHECKED(JSFunction, func, 0);
- CONVERT_SMI_ARG_CHECKED(num, 1);
- RUNTIME_ASSERT(num >= 0);
- // If objects constructed from this function exist then changing
- // 'estimated_nof_properties' is dangerous since the previous value might
- // have been compiled into the fast construct stub. Moreover, the inobject
- // slack tracking logic might have adjusted the previous value, so even
- // passing the same value is risky.
- if (!func->shared()->live_objects_may_exist()) {
- func->shared()->set_expected_nof_properties(num);
- if (func->has_initial_map()) {
- Handle<Map> new_initial_map =
- func->GetIsolate()->factory()->CopyMap(
- Handle<Map>(func->initial_map()));
- new_initial_map->set_unused_property_fields(num);
- func->set_initial_map(*new_initial_map);
- }
- }
- return isolate->heap()->undefined_value();
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_CreateJSGeneratorObject) {
+RUNTIME_FUNCTION(RuntimeHidden_CreateJSGeneratorObject) {
HandleScope scope(isolate);
ASSERT(args.length() == 0);
@@ -3065,10 +3155,10 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_CreateJSGeneratorObject) {
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_SuspendJSGeneratorObject) {
- SealHandleScope shs(isolate);
+RUNTIME_FUNCTION(RuntimeHidden_SuspendJSGeneratorObject) {
+ HandleScope handle_scope(isolate);
ASSERT(args.length() == 1);
- CONVERT_ARG_CHECKED(JSGeneratorObject, generator_object, 0);
+ CONVERT_ARG_HANDLE_CHECKED(JSGeneratorObject, generator_object, 0);
JavaScriptFrameIterator stack_iterator(isolate);
JavaScriptFrame* frame = stack_iterator.frame();
@@ -3097,11 +3187,10 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_SuspendJSGeneratorObject) {
ASSERT(!frame->HasHandler());
} else {
int stack_handler_index = -1;
- MaybeObject* alloc = isolate->heap()->AllocateFixedArray(operands_count);
- FixedArray* operand_stack;
- if (!alloc->To(&operand_stack)) return alloc;
- frame->SaveOperandStack(operand_stack, &stack_handler_index);
- generator_object->set_operand_stack(operand_stack);
+ Handle<FixedArray> operand_stack =
+ isolate->factory()->NewFixedArray(operands_count);
+ frame->SaveOperandStack(*operand_stack, &stack_handler_index);
+ generator_object->set_operand_stack(*operand_stack);
generator_object->set_stack_handler_index(stack_handler_index);
}
@@ -3116,7 +3205,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_SuspendJSGeneratorObject) {
// inlined into GeneratorNext and GeneratorThrow. EmitGeneratorResumeResume is
// called in any case, as it needs to reconstruct the stack frame and make space
// for arguments and operands.
-RUNTIME_FUNCTION(MaybeObject*, Runtime_ResumeJSGeneratorObject) {
+RUNTIME_FUNCTION(RuntimeHidden_ResumeJSGeneratorObject) {
SealHandleScope shs(isolate);
ASSERT(args.length() == 3);
CONVERT_ARG_CHECKED(JSGeneratorObject, generator_object, 0);
@@ -3128,13 +3217,17 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_ResumeJSGeneratorObject) {
ASSERT_EQ(frame->function(), generator_object->function());
ASSERT(frame->function()->is_compiled());
- STATIC_ASSERT(JSGeneratorObject::kGeneratorExecuting <= 0);
- STATIC_ASSERT(JSGeneratorObject::kGeneratorClosed <= 0);
+ STATIC_ASSERT(JSGeneratorObject::kGeneratorExecuting < 0);
+ STATIC_ASSERT(JSGeneratorObject::kGeneratorClosed == 0);
Address pc = generator_object->function()->code()->instruction_start();
int offset = generator_object->continuation();
ASSERT(offset > 0);
frame->set_pc(pc + offset);
+ if (FLAG_enable_ool_constant_pool) {
+ frame->set_constant_pool(
+ generator_object->function()->code()->constant_pool());
+ }
generator_object->set_continuation(JSGeneratorObject::kGeneratorExecuting);
FixedArray* operand_stack = generator_object->operand_stack();
@@ -3160,7 +3253,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_ResumeJSGeneratorObject) {
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_ThrowGeneratorStateError) {
+RUNTIME_FUNCTION(RuntimeHidden_ThrowGeneratorStateError) {
HandleScope scope(isolate);
ASSERT(args.length() == 1);
CONVERT_ARG_HANDLE_CHECKED(JSGeneratorObject, generator, 0);
@@ -3173,41 +3266,33 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_ThrowGeneratorStateError) {
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_ObjectFreeze) {
+RUNTIME_FUNCTION(Runtime_ObjectFreeze) {
HandleScope scope(isolate);
ASSERT(args.length() == 1);
CONVERT_ARG_HANDLE_CHECKED(JSObject, object, 0);
- Handle<Object> result = JSObject::Freeze(object);
- RETURN_IF_EMPTY_HANDLE(isolate, result);
- return *result;
-}
+ // %ObjectFreeze is a fast path and these cases are handled elsewhere.
+ RUNTIME_ASSERT(!object->HasSloppyArgumentsElements() &&
+ !object->map()->is_observed() &&
+ !object->IsJSProxy());
-MUST_USE_RESULT static MaybeObject* CharFromCode(Isolate* isolate,
- Object* char_code) {
- if (char_code->IsNumber()) {
- return isolate->heap()->LookupSingleCharacterStringFromCode(
- NumberToUint32(char_code) & 0xffff);
- }
- return isolate->heap()->empty_string();
+ Handle<Object> result;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, result, JSObject::Freeze(object));
+ return *result;
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_StringCharCodeAt) {
- SealHandleScope shs(isolate);
+RUNTIME_FUNCTION(RuntimeHidden_StringCharCodeAt) {
+ HandleScope handle_scope(isolate);
ASSERT(args.length() == 2);
- CONVERT_ARG_CHECKED(String, subject, 0);
+ CONVERT_ARG_HANDLE_CHECKED(String, subject, 0);
CONVERT_NUMBER_CHECKED(uint32_t, i, Uint32, args[1]);
// Flatten the string. If someone wants to get a char at an index
// in a cons string, it is likely that more indices will be
// accessed.
- Object* flat;
- { MaybeObject* maybe_flat = subject->TryFlatten();
- if (!maybe_flat->ToObject(&flat)) return maybe_flat;
- }
- subject = String::cast(flat);
+ subject = String::Flatten(subject);
if (i >= static_cast<uint32_t>(subject->length())) {
return isolate->heap()->nan_value();
@@ -3217,10 +3302,15 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_StringCharCodeAt) {
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_CharFromCode) {
- SealHandleScope shs(isolate);
+RUNTIME_FUNCTION(Runtime_CharFromCode) {
+ HandleScope handlescope(isolate);
ASSERT(args.length() == 1);
- return CharFromCode(isolate, args[0]);
+ if (args[0]->IsNumber()) {
+ CONVERT_NUMBER_CHECKED(uint32_t, code, Uint32, args[0]);
+ code &= 0xffff;
+ return *isolate->factory()->LookupSingleCharacterStringFromCode(code);
+ }
+ return isolate->heap()->empty_string();
}
@@ -3293,8 +3383,7 @@ class FixedArrayBuilder {
}
Handle<JSArray> ToJSArray(Handle<JSArray> target_array) {
- Factory* factory = target_array->GetIsolate()->factory();
- factory->SetContent(target_array, array_);
+ JSArray::SetContent(target_array, array_);
target_array->set_length(Smi::FromInt(length_));
return target_array;
}
@@ -3381,14 +3470,20 @@ class ReplacementStringBuilder {
}
- Handle<String> ToString() {
+ MaybeHandle<String> ToString() {
+ Isolate* isolate = heap_->isolate();
if (array_builder_.length() == 0) {
- return heap_->isolate()->factory()->empty_string();
+ return isolate->factory()->empty_string();
}
Handle<String> joined_string;
if (is_ascii_) {
- Handle<SeqOneByteString> seq = NewRawOneByteString(character_count_);
+ Handle<SeqOneByteString> seq;
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, seq,
+ isolate->factory()->NewRawOneByteString(character_count_),
+ String);
+
DisallowHeapAllocation no_gc;
uint8_t* char_buffer = seq->GetChars();
StringBuilderConcatHelper(*subject_,
@@ -3398,7 +3493,12 @@ class ReplacementStringBuilder {
joined_string = Handle<String>::cast(seq);
} else {
// Non-ASCII.
- Handle<SeqTwoByteString> seq = NewRawTwoByteString(character_count_);
+ Handle<SeqTwoByteString> seq;
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, seq,
+ isolate->factory()->NewRawTwoByteString(character_count_),
+ String);
+
DisallowHeapAllocation no_gc;
uc16* char_buffer = seq->GetChars();
StringBuilderConcatHelper(*subject_,
@@ -3413,22 +3513,14 @@ class ReplacementStringBuilder {
void IncrementCharacterCount(int by) {
if (character_count_ > String::kMaxLength - by) {
- V8::FatalProcessOutOfMemory("String.replace result too large.");
+ STATIC_ASSERT(String::kMaxLength < kMaxInt);
+ character_count_ = kMaxInt;
+ } else {
+ character_count_ += by;
}
- character_count_ += by;
}
private:
- Handle<SeqOneByteString> NewRawOneByteString(int length) {
- return heap_->isolate()->factory()->NewRawOneByteString(length);
- }
-
-
- Handle<SeqTwoByteString> NewRawTwoByteString(int length) {
- return heap_->isolate()->factory()->NewRawTwoByteString(length);
- }
-
-
void AddElement(Object* element) {
ASSERT(element->IsSmi() || element->IsString());
ASSERT(array_builder_.capacity() > array_builder_.length());
@@ -3866,7 +3958,7 @@ void FindStringIndicesDispatch(Isolate* isolate,
template<typename ResultSeqString>
-MUST_USE_RESULT static MaybeObject* StringReplaceGlobalAtomRegExpWithString(
+MUST_USE_RESULT static Object* StringReplaceGlobalAtomRegExpWithString(
Isolate* isolate,
Handle<String> subject,
Handle<JSRegExp> pattern_regexp,
@@ -3896,20 +3988,26 @@ MUST_USE_RESULT static MaybeObject* StringReplaceGlobalAtomRegExpWithString(
static_cast<int64_t>(pattern_len)) *
static_cast<int64_t>(matches) +
static_cast<int64_t>(subject_len);
- if (result_len_64 > INT_MAX) return Failure::OutOfMemoryException(0x11);
- int result_len = static_cast<int>(result_len_64);
+ int result_len;
+ if (result_len_64 > static_cast<int64_t>(String::kMaxLength)) {
+ STATIC_ASSERT(String::kMaxLength < kMaxInt);
+ result_len = kMaxInt; // Provoke exception.
+ } else {
+ result_len = static_cast<int>(result_len_64);
+ }
int subject_pos = 0;
int result_pos = 0;
- Handle<ResultSeqString> result;
+ MaybeHandle<SeqString> maybe_res;
if (ResultSeqString::kHasAsciiEncoding) {
- result = Handle<ResultSeqString>::cast(
- isolate->factory()->NewRawOneByteString(result_len));
+ maybe_res = isolate->factory()->NewRawOneByteString(result_len);
} else {
- result = Handle<ResultSeqString>::cast(
- isolate->factory()->NewRawTwoByteString(result_len));
+ maybe_res = isolate->factory()->NewRawTwoByteString(result_len);
}
+ Handle<SeqString> untyped_res;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, untyped_res, maybe_res);
+ Handle<ResultSeqString> result = Handle<ResultSeqString>::cast(untyped_res);
for (int i = 0; i < matches; i++) {
// Copy non-matched subject content.
@@ -3948,7 +4046,7 @@ MUST_USE_RESULT static MaybeObject* StringReplaceGlobalAtomRegExpWithString(
}
-MUST_USE_RESULT static MaybeObject* StringReplaceGlobalRegExpWithString(
+MUST_USE_RESULT static Object* StringReplaceGlobalRegExpWithString(
Isolate* isolate,
Handle<String> subject,
Handle<JSRegExp> regexp,
@@ -3980,11 +4078,11 @@ MUST_USE_RESULT static MaybeObject* StringReplaceGlobalRegExpWithString(
}
RegExpImpl::GlobalCache global_cache(regexp, subject, true, isolate);
- if (global_cache.HasException()) return Failure::Exception();
+ if (global_cache.HasException()) return isolate->heap()->exception();
int32_t* current_match = global_cache.FetchNext();
if (current_match == NULL) {
- if (global_cache.HasException()) return Failure::Exception();
+ if (global_cache.HasException()) return isolate->heap()->exception();
return *subject;
}
@@ -4026,7 +4124,7 @@ MUST_USE_RESULT static MaybeObject* StringReplaceGlobalRegExpWithString(
current_match = global_cache.FetchNext();
} while (current_match != NULL);
- if (global_cache.HasException()) return Failure::Exception();
+ if (global_cache.HasException()) return isolate->heap()->exception();
if (prev < subject_length) {
builder.EnsureCapacity(2);
@@ -4038,12 +4136,14 @@ MUST_USE_RESULT static MaybeObject* StringReplaceGlobalRegExpWithString(
capture_count,
global_cache.LastSuccessfulMatch());
- return *(builder.ToString());
+ Handle<String> result;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, result, builder.ToString());
+ return *result;
}
template <typename ResultSeqString>
-MUST_USE_RESULT static MaybeObject* StringReplaceGlobalRegExpWithEmptyString(
+MUST_USE_RESULT static Object* StringReplaceGlobalRegExpWithEmptyString(
Isolate* isolate,
Handle<String> subject,
Handle<JSRegExp> regexp,
@@ -4063,11 +4163,11 @@ MUST_USE_RESULT static MaybeObject* StringReplaceGlobalRegExpWithEmptyString(
}
RegExpImpl::GlobalCache global_cache(regexp, subject, true, isolate);
- if (global_cache.HasException()) return Failure::Exception();
+ if (global_cache.HasException()) return isolate->heap()->exception();
int32_t* current_match = global_cache.FetchNext();
if (current_match == NULL) {
- if (global_cache.HasException()) return Failure::Exception();
+ if (global_cache.HasException()) return isolate->heap()->exception();
return *subject;
}
@@ -4082,10 +4182,10 @@ MUST_USE_RESULT static MaybeObject* StringReplaceGlobalRegExpWithEmptyString(
Handle<ResultSeqString> answer;
if (ResultSeqString::kHasAsciiEncoding) {
answer = Handle<ResultSeqString>::cast(
- isolate->factory()->NewRawOneByteString(new_length));
+ isolate->factory()->NewRawOneByteString(new_length).ToHandleChecked());
} else {
answer = Handle<ResultSeqString>::cast(
- isolate->factory()->NewRawTwoByteString(new_length));
+ isolate->factory()->NewRawTwoByteString(new_length).ToHandleChecked());
}
int prev = 0;
@@ -4104,7 +4204,7 @@ MUST_USE_RESULT static MaybeObject* StringReplaceGlobalRegExpWithEmptyString(
current_match = global_cache.FetchNext();
} while (current_match != NULL);
- if (global_cache.HasException()) return Failure::Exception();
+ if (global_cache.HasException()) return isolate->heap()->exception();
RegExpImpl::SetLastMatchInfo(last_match_info,
subject,
@@ -4129,16 +4229,19 @@ MUST_USE_RESULT static MaybeObject* StringReplaceGlobalRegExpWithEmptyString(
if (delta == 0) return *answer;
Address end_of_string = answer->address() + string_size;
- isolate->heap()->CreateFillerObjectAt(end_of_string, delta);
- if (Marking::IsBlack(Marking::MarkBitFrom(*answer))) {
- MemoryChunk::IncrementLiveBytesFromMutator(answer->address(), -delta);
- }
+ Heap* heap = isolate->heap();
+ // The trimming is performed on a newly allocated object, which is on a
+ // fresly allocated page or on an already swept page. Hence, the sweeper
+ // thread can not get confused with the filler creation. No synchronization
+ // needed.
+ heap->CreateFillerObjectAt(end_of_string, delta);
+ heap->AdjustLiveBytes(answer->address(), -delta, Heap::FROM_MUTATOR);
return *answer;
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_StringReplaceGlobalRegExpWithString) {
+RUNTIME_FUNCTION(Runtime_StringReplaceGlobalRegExpWithString) {
HandleScope scope(isolate);
ASSERT(args.length() == 4);
@@ -4147,9 +4250,10 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_StringReplaceGlobalRegExpWithString) {
CONVERT_ARG_HANDLE_CHECKED(JSRegExp, regexp, 1);
CONVERT_ARG_HANDLE_CHECKED(JSArray, last_match_info, 3);
- ASSERT(regexp->GetFlags().is_global());
+ RUNTIME_ASSERT(regexp->GetFlags().is_global());
+ RUNTIME_ASSERT(last_match_info->HasFastObjectElements());
- if (!subject->IsFlat()) subject = FlattenGetString(subject);
+ subject = String::Flatten(subject);
if (replacement->length() == 0) {
if (subject->HasOnlyOneByteChars()) {
@@ -4161,43 +4265,45 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_StringReplaceGlobalRegExpWithString) {
}
}
- if (!replacement->IsFlat()) replacement = FlattenGetString(replacement);
+ replacement = String::Flatten(replacement);
return StringReplaceGlobalRegExpWithString(
isolate, subject, regexp, replacement, last_match_info);
}
-Handle<String> StringReplaceOneCharWithString(Isolate* isolate,
- Handle<String> subject,
- Handle<String> search,
- Handle<String> replace,
- bool* found,
- int recursion_limit) {
- if (recursion_limit == 0) return Handle<String>::null();
+// This may return an empty MaybeHandle if an exception is thrown or
+// we abort due to reaching the recursion limit.
+MaybeHandle<String> StringReplaceOneCharWithString(Isolate* isolate,
+ Handle<String> subject,
+ Handle<String> search,
+ Handle<String> replace,
+ bool* found,
+ int recursion_limit) {
+ StackLimitCheck stackLimitCheck(isolate);
+ if (stackLimitCheck.HasOverflowed() || (recursion_limit == 0)) {
+ return MaybeHandle<String>();
+ }
+ recursion_limit--;
if (subject->IsConsString()) {
ConsString* cons = ConsString::cast(*subject);
Handle<String> first = Handle<String>(cons->first());
Handle<String> second = Handle<String>(cons->second());
- Handle<String> new_first =
- StringReplaceOneCharWithString(isolate,
- first,
- search,
- replace,
- found,
- recursion_limit - 1);
+ Handle<String> new_first;
+ if (!StringReplaceOneCharWithString(
+ isolate, first, search, replace, found, recursion_limit)
+ .ToHandle(&new_first)) {
+ return MaybeHandle<String>();
+ }
if (*found) return isolate->factory()->NewConsString(new_first, second);
- if (new_first.is_null()) return new_first;
-
- Handle<String> new_second =
- StringReplaceOneCharWithString(isolate,
- second,
- search,
- replace,
- found,
- recursion_limit - 1);
+
+ Handle<String> new_second;
+ if (!StringReplaceOneCharWithString(
+ isolate, second, search, replace, found, recursion_limit)
+ .ToHandle(&new_second)) {
+ return MaybeHandle<String>();
+ }
if (*found) return isolate->factory()->NewConsString(first, new_second);
- if (new_second.is_null()) return new_second;
return subject;
} else {
@@ -4205,7 +4311,11 @@ Handle<String> StringReplaceOneCharWithString(Isolate* isolate,
if (index == -1) return subject;
*found = true;
Handle<String> first = isolate->factory()->NewSubString(subject, 0, index);
- Handle<String> cons1 = isolate->factory()->NewConsString(first, replace);
+ Handle<String> cons1;
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, cons1,
+ isolate->factory()->NewConsString(first, replace),
+ String);
Handle<String> second =
isolate->factory()->NewSubString(subject, index + 1, subject->length());
return isolate->factory()->NewConsString(cons1, second);
@@ -4213,7 +4323,7 @@ Handle<String> StringReplaceOneCharWithString(Isolate* isolate,
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_StringReplaceOneCharWithString) {
+RUNTIME_FUNCTION(Runtime_StringReplaceOneCharWithString) {
HandleScope scope(isolate);
ASSERT(args.length() == 3);
CONVERT_ARG_HANDLE_CHECKED(String, subject, 0);
@@ -4224,19 +4334,20 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_StringReplaceOneCharWithString) {
// retry with a flattened subject string.
const int kRecursionLimit = 0x1000;
bool found = false;
- Handle<String> result = StringReplaceOneCharWithString(isolate,
- subject,
- search,
- replace,
- &found,
- kRecursionLimit);
- if (!result.is_null()) return *result;
- return *StringReplaceOneCharWithString(isolate,
- FlattenGetString(subject),
- search,
- replace,
- &found,
- kRecursionLimit);
+ Handle<String> result;
+ if (StringReplaceOneCharWithString(
+ isolate, subject, search, replace, &found, kRecursionLimit)
+ .ToHandle(&result)) {
+ return *result;
+ }
+ if (isolate->has_pending_exception()) return isolate->heap()->exception();
+
+ subject = String::Flatten(subject);
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, result,
+ StringReplaceOneCharWithString(
+ isolate, subject, search, replace, &found, kRecursionLimit));
+ return *result;
}
@@ -4256,8 +4367,8 @@ int Runtime::StringMatch(Isolate* isolate,
int subject_length = sub->length();
if (start_index + pattern_length > subject_length) return -1;
- if (!sub->IsFlat()) FlattenString(sub);
- if (!pat->IsFlat()) FlattenString(pat);
+ sub = String::Flatten(sub);
+ pat = String::Flatten(pat);
DisallowHeapAllocation no_gc; // ensure vectors stay valid
// Extract flattened substrings of cons strings before determining asciiness.
@@ -4292,20 +4403,19 @@ int Runtime::StringMatch(Isolate* isolate,
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_StringIndexOf) {
+RUNTIME_FUNCTION(Runtime_StringIndexOf) {
HandleScope scope(isolate);
ASSERT(args.length() == 3);
CONVERT_ARG_HANDLE_CHECKED(String, sub, 0);
CONVERT_ARG_HANDLE_CHECKED(String, pat, 1);
+ CONVERT_ARG_HANDLE_CHECKED(Object, index, 2);
- Object* index = args[2];
uint32_t start_index;
if (!index->ToArrayIndex(&start_index)) return Smi::FromInt(-1);
RUNTIME_ASSERT(start_index <= static_cast<uint32_t>(sub->length()));
- int position =
- Runtime::StringMatch(isolate, sub, pat, start_index);
+ int position = Runtime::StringMatch(isolate, sub, pat, start_index);
return Smi::FromInt(position);
}
@@ -4345,14 +4455,14 @@ static int StringMatchBackwards(Vector<const schar> subject,
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_StringLastIndexOf) {
+RUNTIME_FUNCTION(Runtime_StringLastIndexOf) {
HandleScope scope(isolate);
ASSERT(args.length() == 3);
CONVERT_ARG_HANDLE_CHECKED(String, sub, 0);
CONVERT_ARG_HANDLE_CHECKED(String, pat, 1);
+ CONVERT_ARG_HANDLE_CHECKED(Object, index, 2);
- Object* index = args[2];
uint32_t start_index;
if (!index->ToArrayIndex(&start_index)) return Smi::FromInt(-1);
@@ -4367,8 +4477,8 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_StringLastIndexOf) {
return Smi::FromInt(start_index);
}
- if (!sub->IsFlat()) FlattenString(sub);
- if (!pat->IsFlat()) FlattenString(pat);
+ sub = String::Flatten(sub);
+ pat = String::Flatten(pat);
int position = -1;
DisallowHeapAllocation no_gc; // ensure vectors stay valid
@@ -4404,14 +4514,14 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_StringLastIndexOf) {
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_StringLocaleCompare) {
- SealHandleScope shs(isolate);
+RUNTIME_FUNCTION(Runtime_StringLocaleCompare) {
+ HandleScope handle_scope(isolate);
ASSERT(args.length() == 2);
- CONVERT_ARG_CHECKED(String, str1, 0);
- CONVERT_ARG_CHECKED(String, str2, 1);
+ CONVERT_ARG_HANDLE_CHECKED(String, str1, 0);
+ CONVERT_ARG_HANDLE_CHECKED(String, str2, 1);
- if (str1 == str2) return Smi::FromInt(0); // Equal.
+ if (str1.is_identical_to(str2)) return Smi::FromInt(0); // Equal.
int str1_length = str1->length();
int str2_length = str2->length();
@@ -4431,32 +4541,28 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_StringLocaleCompare) {
int d = str1->Get(0) - str2->Get(0);
if (d != 0) return Smi::FromInt(d);
- str1->TryFlatten();
- str2->TryFlatten();
+ str1 = String::Flatten(str1);
+ str2 = String::Flatten(str2);
- ConsStringIteratorOp* op1 =
- isolate->runtime_state()->string_locale_compare_it1();
- ConsStringIteratorOp* op2 =
- isolate->runtime_state()->string_locale_compare_it2();
- // TODO(dcarney) Can do array compares here more efficiently.
- StringCharacterStream stream1(str1, op1);
- StringCharacterStream stream2(str2, op2);
+ DisallowHeapAllocation no_gc;
+ String::FlatContent flat1 = str1->GetFlatContent();
+ String::FlatContent flat2 = str2->GetFlatContent();
for (int i = 0; i < end; i++) {
- uint16_t char1 = stream1.GetNext();
- uint16_t char2 = stream2.GetNext();
- if (char1 != char2) return Smi::FromInt(char1 - char2);
+ if (flat1.Get(i) != flat2.Get(i)) {
+ return Smi::FromInt(flat1.Get(i) - flat2.Get(i));
+ }
}
return Smi::FromInt(str1_length - str2_length);
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_SubString) {
- SealHandleScope shs(isolate);
+RUNTIME_FUNCTION(RuntimeHidden_SubString) {
+ HandleScope scope(isolate);
ASSERT(args.length() == 3);
- CONVERT_ARG_CHECKED(String, value, 0);
+ CONVERT_ARG_HANDLE_CHECKED(String, string, 0);
int start, end;
// We have a fast integer-only case here to avoid a conversion to double in
// the common case where from and to are Smis.
@@ -4473,22 +4579,25 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_SubString) {
}
RUNTIME_ASSERT(end >= start);
RUNTIME_ASSERT(start >= 0);
- RUNTIME_ASSERT(end <= value->length());
+ RUNTIME_ASSERT(end <= string->length());
isolate->counters()->sub_string_runtime()->Increment();
- return value->SubString(start, end);
+
+ return *isolate->factory()->NewSubString(string, start, end);
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_StringMatch) {
+RUNTIME_FUNCTION(Runtime_StringMatch) {
HandleScope handles(isolate);
- ASSERT_EQ(3, args.length());
+ ASSERT(args.length() == 3);
CONVERT_ARG_HANDLE_CHECKED(String, subject, 0);
CONVERT_ARG_HANDLE_CHECKED(JSRegExp, regexp, 1);
CONVERT_ARG_HANDLE_CHECKED(JSArray, regexp_info, 2);
+ RUNTIME_ASSERT(regexp_info->HasFastObjectElements());
+
RegExpImpl::GlobalCache global_cache(regexp, subject, true, isolate);
- if (global_cache.HasException()) return Failure::Exception();
+ if (global_cache.HasException()) return isolate->heap()->exception();
int capture_count = regexp->CaptureCount();
@@ -4502,7 +4611,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_StringMatch) {
offsets.Add(match[1], zone_scope.zone()); // end
}
- if (global_cache.HasException()) return Failure::Exception();
+ if (global_cache.HasException()) return isolate->heap()->exception();
if (offsets.length() == 0) {
// Not a single match.
@@ -4536,7 +4645,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_StringMatch) {
// Only called from Runtime_RegExpExecMultiple so it doesn't need to maintain
// separate last match info. See comment on that function.
template<bool has_capture>
-static MaybeObject* SearchRegExpMultiple(
+static Object* SearchRegExpMultiple(
Isolate* isolate,
Handle<String> subject,
Handle<JSRegExp> regexp,
@@ -4560,7 +4669,7 @@ static MaybeObject* SearchRegExpMultiple(
Handle<FixedArray> cached_fixed_array =
Handle<FixedArray>(FixedArray::cast(*cached_answer));
// The cache FixedArray is a COW-array and can therefore be reused.
- isolate->factory()->SetContent(result_array, cached_fixed_array);
+ JSArray::SetContent(result_array, cached_fixed_array);
// The actual length of the result array is stored in the last element of
// the backing store (the backing FixedArray may have a larger capacity).
Object* cached_fixed_array_last_element =
@@ -4574,14 +4683,13 @@ static MaybeObject* SearchRegExpMultiple(
}
RegExpImpl::GlobalCache global_cache(regexp, subject, true, isolate);
- if (global_cache.HasException()) return Failure::Exception();
+ if (global_cache.HasException()) return isolate->heap()->exception();
- Handle<FixedArray> result_elements;
- if (result_array->HasFastObjectElements()) {
- result_elements =
- Handle<FixedArray>(FixedArray::cast(result_array->elements()));
- }
- if (result_elements.is_null() || result_elements->length() < 16) {
+ // Ensured in Runtime_RegExpExecMultiple.
+ ASSERT(result_array->HasFastObjectElements());
+ Handle<FixedArray> result_elements(
+ FixedArray::cast(result_array->elements()));
+ if (result_elements->length() < 16) {
result_elements = isolate->factory()->NewFixedArrayWithHoles(16);
}
@@ -4650,7 +4758,7 @@ static MaybeObject* SearchRegExpMultiple(
}
}
- if (global_cache.HasException()) return Failure::Exception();
+ if (global_cache.HasException()) return isolate->heap()->exception();
if (match_start >= 0) {
// Finished matching, with at least one match.
@@ -4671,10 +4779,10 @@ static MaybeObject* SearchRegExpMultiple(
fixed_array->set(fixed_array->length() - 1,
Smi::FromInt(builder.length()));
// Cache the result and turn the FixedArray into a COW array.
- RegExpResultsCache::Enter(isolate->heap(),
- *subject,
- regexp->data(),
- *fixed_array,
+ RegExpResultsCache::Enter(isolate,
+ subject,
+ handle(regexp->data(), isolate),
+ fixed_array,
RegExpResultsCache::REGEXP_MULTIPLE_INDICES);
}
return *builder.ToJSArray(result_array);
@@ -4687,17 +4795,19 @@ static MaybeObject* SearchRegExpMultiple(
// This is only called for StringReplaceGlobalRegExpWithFunction. This sets
// lastMatchInfoOverride to maintain the last match info, so we don't need to
// set any other last match array info.
-RUNTIME_FUNCTION(MaybeObject*, Runtime_RegExpExecMultiple) {
+RUNTIME_FUNCTION(Runtime_RegExpExecMultiple) {
HandleScope handles(isolate);
ASSERT(args.length() == 4);
CONVERT_ARG_HANDLE_CHECKED(String, subject, 1);
- if (!subject->IsFlat()) FlattenString(subject);
CONVERT_ARG_HANDLE_CHECKED(JSRegExp, regexp, 0);
CONVERT_ARG_HANDLE_CHECKED(JSArray, last_match_info, 2);
CONVERT_ARG_HANDLE_CHECKED(JSArray, result_array, 3);
+ RUNTIME_ASSERT(last_match_info->HasFastObjectElements());
+ RUNTIME_ASSERT(result_array->HasFastObjectElements());
- ASSERT(regexp->GetFlags().is_global());
+ subject = String::Flatten(subject);
+ RUNTIME_ASSERT(regexp->GetFlags().is_global());
if (regexp->CaptureCount() == 0) {
return SearchRegExpMultiple<false>(
@@ -4709,8 +4819,8 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_RegExpExecMultiple) {
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberToRadixString) {
- SealHandleScope shs(isolate);
+RUNTIME_FUNCTION(Runtime_NumberToRadixString) {
+ HandleScope scope(isolate);
ASSERT(args.length() == 2);
CONVERT_SMI_ARG_CHECKED(radix, 1);
RUNTIME_ASSERT(2 <= radix && radix <= 36);
@@ -4721,7 +4831,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberToRadixString) {
if (value >= 0 && value < radix) {
// Character array used for conversion.
static const char kCharTable[] = "0123456789abcdefghijklmnopqrstuvwxyz";
- return isolate->heap()->
+ return *isolate->factory()->
LookupSingleCharacterStringFromCode(kCharTable[value]);
}
}
@@ -4729,80 +4839,76 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberToRadixString) {
// Slow case.
CONVERT_DOUBLE_ARG_CHECKED(value, 0);
if (std::isnan(value)) {
- return *isolate->factory()->nan_string();
+ return isolate->heap()->nan_string();
}
if (std::isinf(value)) {
if (value < 0) {
- return *isolate->factory()->minus_infinity_string();
+ return isolate->heap()->minus_infinity_string();
}
- return *isolate->factory()->infinity_string();
+ return isolate->heap()->infinity_string();
}
char* str = DoubleToRadixCString(value, radix);
- MaybeObject* result =
- isolate->heap()->AllocateStringFromOneByte(CStrVector(str));
+ Handle<String> result = isolate->factory()->NewStringFromAsciiChecked(str);
DeleteArray(str);
- return result;
+ return *result;
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberToFixed) {
- SealHandleScope shs(isolate);
+RUNTIME_FUNCTION(Runtime_NumberToFixed) {
+ HandleScope scope(isolate);
ASSERT(args.length() == 2);
CONVERT_DOUBLE_ARG_CHECKED(value, 0);
CONVERT_DOUBLE_ARG_CHECKED(f_number, 1);
int f = FastD2IChecked(f_number);
- RUNTIME_ASSERT(f >= 0);
+ // See DoubleToFixedCString for these constants:
+ RUNTIME_ASSERT(f >= 0 && f <= 20);
+ RUNTIME_ASSERT(!Double(value).IsSpecial());
char* str = DoubleToFixedCString(value, f);
- MaybeObject* res =
- isolate->heap()->AllocateStringFromOneByte(CStrVector(str));
+ Handle<String> result = isolate->factory()->NewStringFromAsciiChecked(str);
DeleteArray(str);
- return res;
+ return *result;
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberToExponential) {
- SealHandleScope shs(isolate);
+RUNTIME_FUNCTION(Runtime_NumberToExponential) {
+ HandleScope scope(isolate);
ASSERT(args.length() == 2);
CONVERT_DOUBLE_ARG_CHECKED(value, 0);
CONVERT_DOUBLE_ARG_CHECKED(f_number, 1);
int f = FastD2IChecked(f_number);
RUNTIME_ASSERT(f >= -1 && f <= 20);
+ RUNTIME_ASSERT(!Double(value).IsSpecial());
char* str = DoubleToExponentialCString(value, f);
- MaybeObject* res =
- isolate->heap()->AllocateStringFromOneByte(CStrVector(str));
+ Handle<String> result = isolate->factory()->NewStringFromAsciiChecked(str);
DeleteArray(str);
- return res;
+ return *result;
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberToPrecision) {
- SealHandleScope shs(isolate);
+RUNTIME_FUNCTION(Runtime_NumberToPrecision) {
+ HandleScope scope(isolate);
ASSERT(args.length() == 2);
CONVERT_DOUBLE_ARG_CHECKED(value, 0);
CONVERT_DOUBLE_ARG_CHECKED(f_number, 1);
int f = FastD2IChecked(f_number);
RUNTIME_ASSERT(f >= 1 && f <= 21);
+ RUNTIME_ASSERT(!Double(value).IsSpecial());
char* str = DoubleToPrecisionCString(value, f);
- MaybeObject* res =
- isolate->heap()->AllocateStringFromOneByte(CStrVector(str));
+ Handle<String> result = isolate->factory()->NewStringFromAsciiChecked(str);
DeleteArray(str);
- return res;
+ return *result;
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_IsValidSmi) {
- HandleScope shs(isolate);
+RUNTIME_FUNCTION(Runtime_IsValidSmi) {
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 1);
CONVERT_NUMBER_CHECKED(int32_t, number, Int32, args[0]);
- if (Smi::IsValid(number)) {
- return isolate->heap()->true_value();
- } else {
- return isolate->heap()->false_value();
- }
+ return isolate->heap()->ToBoolean(Smi::IsValid(number));
}
@@ -4810,30 +4916,21 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_IsValidSmi) {
// string->Get(index).
static Handle<Object> GetCharAt(Handle<String> string, uint32_t index) {
if (index < static_cast<uint32_t>(string->length())) {
- string->TryFlatten();
- return LookupSingleCharacterStringFromCode(
- string->GetIsolate(),
- string->Get(index));
+ Factory* factory = string->GetIsolate()->factory();
+ return factory->LookupSingleCharacterStringFromCode(
+ String::Flatten(string)->Get(index));
}
return Execution::CharAt(string, index);
}
-MaybeObject* Runtime::GetElementOrCharAtOrFail(Isolate* isolate,
- Handle<Object> object,
- uint32_t index) {
- CALL_HEAP_FUNCTION_PASS_EXCEPTION(isolate,
- GetElementOrCharAt(isolate, object, index));
-}
-
-
-MaybeObject* Runtime::GetElementOrCharAt(Isolate* isolate,
- Handle<Object> object,
- uint32_t index) {
+MaybeHandle<Object> Runtime::GetElementOrCharAt(Isolate* isolate,
+ Handle<Object> object,
+ uint32_t index) {
// Handle [] indexing on Strings
if (object->IsString()) {
Handle<Object> result = GetCharAt(Handle<String>::cast(object), index);
- if (!result->IsUndefined()) return *result;
+ if (!result->IsUndefined()) return result;
}
// Handle [] indexing on String objects
@@ -4841,67 +4938,57 @@ MaybeObject* Runtime::GetElementOrCharAt(Isolate* isolate,
Handle<JSValue> js_value = Handle<JSValue>::cast(object);
Handle<Object> result =
GetCharAt(Handle<String>(String::cast(js_value->value())), index);
- if (!result->IsUndefined()) return *result;
+ if (!result->IsUndefined()) return result;
}
+ Handle<Object> result;
if (object->IsString() || object->IsNumber() || object->IsBoolean()) {
- return object->GetPrototype(isolate)->GetElement(isolate, index);
+ Handle<Object> proto(object->GetPrototype(isolate), isolate);
+ return Object::GetElement(isolate, proto, index);
+ } else {
+ return Object::GetElement(isolate, object, index);
}
-
- return object->GetElement(isolate, index);
}
-static Handle<Name> ToName(Isolate* isolate, Handle<Object> key) {
+MUST_USE_RESULT
+static MaybeHandle<Name> ToName(Isolate* isolate, Handle<Object> key) {
if (key->IsName()) {
return Handle<Name>::cast(key);
} else {
- bool has_pending_exception = false;
- Handle<Object> converted =
- Execution::ToString(isolate, key, &has_pending_exception);
- if (has_pending_exception) return Handle<Name>();
+ Handle<Object> converted;
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, converted, Execution::ToString(isolate, key), Name);
return Handle<Name>::cast(converted);
}
}
-MaybeObject* Runtime::HasObjectProperty(Isolate* isolate,
- Handle<JSReceiver> object,
- Handle<Object> key) {
- HandleScope scope(isolate);
-
+MaybeHandle<Object> Runtime::HasObjectProperty(Isolate* isolate,
+ Handle<JSReceiver> object,
+ Handle<Object> key) {
// Check if the given key is an array index.
uint32_t index;
if (key->ToArrayIndex(&index)) {
- return isolate->heap()->ToBoolean(JSReceiver::HasElement(object, index));
+ return isolate->factory()->ToBoolean(JSReceiver::HasElement(object, index));
}
// Convert the key to a name - possibly by calling back into JavaScript.
- Handle<Name> name = ToName(isolate, key);
- RETURN_IF_EMPTY_HANDLE(isolate, name);
-
- return isolate->heap()->ToBoolean(JSReceiver::HasProperty(object, name));
-}
+ Handle<Name> name;
+ ASSIGN_RETURN_ON_EXCEPTION(isolate, name, ToName(isolate, key), Object);
-MaybeObject* Runtime::GetObjectPropertyOrFail(
- Isolate* isolate,
- Handle<Object> object,
- Handle<Object> key) {
- CALL_HEAP_FUNCTION_PASS_EXCEPTION(isolate,
- GetObjectProperty(isolate, object, key));
+ return isolate->factory()->ToBoolean(JSReceiver::HasProperty(object, name));
}
-MaybeObject* Runtime::GetObjectProperty(Isolate* isolate,
- Handle<Object> object,
- Handle<Object> key) {
- HandleScope scope(isolate);
+MaybeHandle<Object> Runtime::GetObjectProperty(Isolate* isolate,
+ Handle<Object> object,
+ Handle<Object> key) {
if (object->IsUndefined() || object->IsNull()) {
Handle<Object> args[2] = { key, object };
- Handle<Object> error =
+ return isolate->Throw<Object>(
isolate->factory()->NewTypeError("non_object_property_load",
- HandleVector(args, 2));
- return isolate->Throw(*error);
+ HandleVector(args, 2)));
}
// Check if the given key is an array index.
@@ -4911,60 +4998,68 @@ MaybeObject* Runtime::GetObjectProperty(Isolate* isolate,
}
// Convert the key to a name - possibly by calling back into JavaScript.
- Handle<Name> name = ToName(isolate, key);
- RETURN_IF_EMPTY_HANDLE(isolate, name);
+ Handle<Name> name;
+ ASSIGN_RETURN_ON_EXCEPTION(isolate, name, ToName(isolate, key), Object);
// Check if the name is trivially convertible to an index and get
// the element if so.
if (name->AsArrayIndex(&index)) {
return GetElementOrCharAt(isolate, object, index);
} else {
- return object->GetProperty(*name);
+ return Object::GetProperty(object, name);
}
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_GetProperty) {
- SealHandleScope shs(isolate);
+RUNTIME_FUNCTION(Runtime_GetProperty) {
+ HandleScope scope(isolate);
ASSERT(args.length() == 2);
- Handle<Object> object = args.at<Object>(0);
- Handle<Object> key = args.at<Object>(1);
-
- return Runtime::GetObjectProperty(isolate, object, key);
+ CONVERT_ARG_HANDLE_CHECKED(Object, object, 0);
+ CONVERT_ARG_HANDLE_CHECKED(Object, key, 1);
+ Handle<Object> result;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, result,
+ Runtime::GetObjectProperty(isolate, object, key));
+ return *result;
}
// KeyedGetProperty is called from KeyedLoadIC::GenerateGeneric.
-RUNTIME_FUNCTION(MaybeObject*, Runtime_KeyedGetProperty) {
- SealHandleScope shs(isolate);
+RUNTIME_FUNCTION(Runtime_KeyedGetProperty) {
+ HandleScope scope(isolate);
ASSERT(args.length() == 2);
+ CONVERT_ARG_HANDLE_CHECKED(Object, receiver_obj, 0);
+ CONVERT_ARG_HANDLE_CHECKED(Object, key_obj, 1);
+
// Fast cases for getting named properties of the receiver JSObject
// itself.
//
- // The global proxy objects has to be excluded since LocalLookup on
+ // The global proxy objects has to be excluded since LookupOwn on
// the global proxy object can return a valid result even though the
// global proxy object never has properties. This is the case
// because the global proxy object forwards everything to its hidden
- // prototype including local lookups.
+ // prototype including own lookups.
//
// Additionally, we need to make sure that we do not cache results
// for objects that require access checks.
- if (args[0]->IsJSObject()) {
- if (!args[0]->IsJSGlobalProxy() &&
- !args[0]->IsAccessCheckNeeded() &&
- args[1]->IsName()) {
- JSObject* receiver = JSObject::cast(args[0]);
- Name* key = Name::cast(args[1]);
+ if (receiver_obj->IsJSObject()) {
+ if (!receiver_obj->IsJSGlobalProxy() &&
+ !receiver_obj->IsAccessCheckNeeded() &&
+ key_obj->IsName()) {
+ DisallowHeapAllocation no_allocation;
+ Handle<JSObject> receiver = Handle<JSObject>::cast(receiver_obj);
+ Handle<Name> key = Handle<Name>::cast(key_obj);
if (receiver->HasFastProperties()) {
// Attempt to use lookup cache.
- Map* receiver_map = receiver->map();
+ Handle<Map> receiver_map(receiver->map(), isolate);
KeyedLookupCache* keyed_lookup_cache = isolate->keyed_lookup_cache();
- int offset = keyed_lookup_cache->Lookup(receiver_map, key);
- if (offset != -1) {
+ int index = keyed_lookup_cache->Lookup(receiver_map, key);
+ if (index != -1) {
// Doubles are not cached, so raw read the value.
- Object* value = receiver->RawFastPropertyAt(offset);
+ Object* value = receiver->RawFastPropertyAt(
+ FieldIndex::ForKeyedLookupCacheIndex(*receiver_map, index));
return value->IsTheHole()
? isolate->heap()->undefined_value()
: value;
@@ -4972,16 +5067,18 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_KeyedGetProperty) {
// Lookup cache miss. Perform lookup and update the cache if
// appropriate.
LookupResult result(isolate);
- receiver->LocalLookup(key, &result);
+ receiver->LookupOwn(key, &result);
if (result.IsField()) {
- int offset = result.GetFieldIndex().field_index();
+ FieldIndex field_index = result.GetFieldIndex();
// Do not track double fields in the keyed lookup cache. Reading
// double values requires boxing.
- if (!FLAG_track_double_fields ||
- !result.representation().IsDouble()) {
- keyed_lookup_cache->Update(receiver_map, key, offset);
+ if (!result.representation().IsDouble()) {
+ keyed_lookup_cache->Update(receiver_map, key,
+ field_index.GetKeyedLookupCacheIndex());
}
- return receiver->FastPropertyAt(result.representation(), offset);
+ AllowHeapAllocation allow_allocation;
+ return *JSObject::FastPropertyAt(receiver, result.representation(),
+ field_index);
}
} else {
// Attempt dictionary lookup.
@@ -4996,48 +5093,46 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_KeyedGetProperty) {
// If value is the hole do the general lookup.
}
}
- } else if (FLAG_smi_only_arrays && args.at<Object>(1)->IsSmi()) {
+ } else if (FLAG_smi_only_arrays && key_obj->IsSmi()) {
// JSObject without a name key. If the key is a Smi, check for a
// definite out-of-bounds access to elements, which is a strong indicator
// that subsequent accesses will also call the runtime. Proactively
// transition elements to FAST_*_ELEMENTS to avoid excessive boxing of
// doubles for those future calls in the case that the elements would
// become FAST_DOUBLE_ELEMENTS.
- Handle<JSObject> js_object(args.at<JSObject>(0));
+ Handle<JSObject> js_object = Handle<JSObject>::cast(receiver_obj);
ElementsKind elements_kind = js_object->GetElementsKind();
if (IsFastDoubleElementsKind(elements_kind)) {
- FixedArrayBase* elements = js_object->elements();
- if (args.at<Smi>(1)->value() >= elements->length()) {
+ Handle<Smi> key = Handle<Smi>::cast(key_obj);
+ if (key->value() >= js_object->elements()->length()) {
if (IsFastHoleyElementsKind(elements_kind)) {
elements_kind = FAST_HOLEY_ELEMENTS;
} else {
elements_kind = FAST_ELEMENTS;
}
- MaybeObject* maybe_object = TransitionElements(js_object,
- elements_kind,
- isolate);
- if (maybe_object->IsFailure()) return maybe_object;
+ RETURN_FAILURE_ON_EXCEPTION(
+ isolate, TransitionElements(js_object, elements_kind, isolate));
}
} else {
ASSERT(IsFastSmiOrObjectElementsKind(elements_kind) ||
!IsFastElementsKind(elements_kind));
}
}
- } else if (args[0]->IsString() && args[1]->IsSmi()) {
+ } else if (receiver_obj->IsString() && key_obj->IsSmi()) {
// Fast case for string indexing using [] with a smi index.
- HandleScope scope(isolate);
- Handle<String> str = args.at<String>(0);
+ Handle<String> str = Handle<String>::cast(receiver_obj);
int index = args.smi_at(1);
if (index >= 0 && index < str->length()) {
- Handle<Object> result = GetCharAt(str, index);
- return *result;
+ return *GetCharAt(str, index);
}
}
// Fall back to GetObjectProperty.
- return Runtime::GetObjectProperty(isolate,
- args.at<Object>(0),
- args.at<Object>(1));
+ Handle<Object> result;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, result,
+ Runtime::GetObjectProperty(isolate, receiver_obj, key_obj));
+ return *result;
}
@@ -5052,7 +5147,7 @@ static bool IsValidAccessor(Handle<Object> obj) {
// Steps 9c & 12 - replace an existing data property with an accessor property.
// Step 12 - update an existing accessor property with an accessor or generic
// descriptor.
-RUNTIME_FUNCTION(MaybeObject*, Runtime_DefineOrRedefineAccessorProperty) {
+RUNTIME_FUNCTION(Runtime_DefineOrRedefineAccessorProperty) {
HandleScope scope(isolate);
ASSERT(args.length() == 5);
CONVERT_ARG_HANDLE_CHECKED(JSObject, obj, 0);
@@ -5067,8 +5162,9 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DefineOrRedefineAccessorProperty) {
PropertyAttributes attr = static_cast<PropertyAttributes>(unchecked);
bool fast = obj->HasFastProperties();
+ // DefineAccessor checks access rights.
JSObject::DefineAccessor(obj, name, getter, setter, attr);
- RETURN_IF_SCHEDULED_EXCEPTION(isolate);
+ RETURN_FAILURE_IF_SCHEDULED_EXCEPTION(isolate);
if (fast) JSObject::TransformToFastProperties(obj, 0);
return isolate->heap()->undefined_value();
}
@@ -5080,7 +5176,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DefineOrRedefineAccessorProperty) {
// Steps 9b & 12 - replace an existing accessor property with a data property.
// Step 12 - update an existing data property with a data or generic
// descriptor.
-RUNTIME_FUNCTION(MaybeObject*, Runtime_DefineOrRedefineDataProperty) {
+RUNTIME_FUNCTION(Runtime_DefineOrRedefineDataProperty) {
HandleScope scope(isolate);
ASSERT(args.length() == 4);
CONVERT_ARG_HANDLE_CHECKED(JSObject, js_object, 0);
@@ -5090,35 +5186,15 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DefineOrRedefineDataProperty) {
RUNTIME_ASSERT((unchecked & ~(READ_ONLY | DONT_ENUM | DONT_DELETE)) == 0);
PropertyAttributes attr = static_cast<PropertyAttributes>(unchecked);
- LookupResult lookup(isolate);
- js_object->LocalLookupRealNamedProperty(*name, &lookup);
-
- // Special case for callback properties.
- if (lookup.IsPropertyCallbacks()) {
- Handle<Object> callback(lookup.GetCallbackObject(), isolate);
- // To be compatible with Safari we do not change the value on API objects
- // in Object.defineProperty(). Firefox disagrees here, and actually changes
- // the value.
- if (callback->IsAccessorInfo()) {
- return isolate->heap()->undefined_value();
- }
- // Avoid redefining foreign callback as data property, just use the stored
- // setter to update the value instead.
- // TODO(mstarzinger): So far this only works if property attributes don't
- // change, this should be fixed once we cleanup the underlying code.
- if (callback->IsForeign() && lookup.GetAttributes() == attr) {
- Handle<Object> result_object =
- JSObject::SetPropertyWithCallback(js_object,
- callback,
- name,
- obj_value,
- handle(lookup.holder()),
- kStrictMode);
- RETURN_IF_EMPTY_HANDLE(isolate, result_object);
- return *result_object;
- }
+ // Check access rights if needed.
+ if (js_object->IsAccessCheckNeeded() &&
+ !isolate->MayNamedAccess(js_object, name, v8::ACCESS_SET)) {
+ return isolate->heap()->undefined_value();
}
+ LookupResult lookup(isolate);
+ js_object->LookupOwnRealNamedProperty(name, &lookup);
+
// Take special care when attributes are different and there is already
// a property. For simplicity we normalize the property which enables us
// to not worry about changing the instance_descriptor and creating a new
@@ -5133,60 +5209,54 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DefineOrRedefineDataProperty) {
// we don't have to check for null.
js_object = Handle<JSObject>(JSObject::cast(js_object->GetPrototype()));
}
- JSObject::NormalizeProperties(js_object, CLEAR_INOBJECT_PROPERTIES, 0);
+
+ if (attr != lookup.GetAttributes() ||
+ (lookup.IsPropertyCallbacks() &&
+ !lookup.GetCallbackObject()->IsAccessorInfo())) {
+ JSObject::NormalizeProperties(js_object, CLEAR_INOBJECT_PROPERTIES, 0);
+ }
+
// Use IgnoreAttributes version since a readonly property may be
// overridden and SetProperty does not allow this.
- Handle<Object> result = JSObject::SetLocalPropertyIgnoreAttributes(
- js_object, name, obj_value, attr);
- RETURN_IF_EMPTY_HANDLE(isolate, result);
+ Handle<Object> result;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, result,
+ JSObject::SetOwnPropertyIgnoreAttributes(
+ js_object, name, obj_value, attr,
+ Object::OPTIMAL_REPRESENTATION,
+ ALLOW_AS_CONSTANT,
+ JSReceiver::PERFORM_EXTENSIBILITY_CHECK,
+ JSReceiver::MAY_BE_STORE_FROM_KEYED,
+ JSObject::DONT_FORCE_FIELD));
return *result;
}
- Handle<Object> result = Runtime::ForceSetObjectProperty(isolate, js_object,
- name,
- obj_value,
- attr);
- RETURN_IF_EMPTY_HANDLE(isolate, result);
+ Handle<Object> result;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, result,
+ Runtime::ForceSetObjectProperty(
+ js_object, name, obj_value, attr,
+ JSReceiver::CERTAINLY_NOT_STORE_FROM_KEYED));
return *result;
}
// Return property without being observable by accessors or interceptors.
-RUNTIME_FUNCTION(MaybeObject*, Runtime_GetDataProperty) {
- SealHandleScope shs(isolate);
+RUNTIME_FUNCTION(Runtime_GetDataProperty) {
+ HandleScope scope(isolate);
ASSERT(args.length() == 2);
CONVERT_ARG_HANDLE_CHECKED(JSObject, object, 0);
CONVERT_ARG_HANDLE_CHECKED(Name, key, 1);
- LookupResult lookup(isolate);
- object->LookupRealNamedProperty(*key, &lookup);
- if (!lookup.IsFound()) return isolate->heap()->undefined_value();
- switch (lookup.type()) {
- case NORMAL:
- return lookup.holder()->GetNormalizedProperty(&lookup);
- case FIELD:
- return lookup.holder()->FastPropertyAt(
- lookup.representation(),
- lookup.GetFieldIndex().field_index());
- case CONSTANT:
- return lookup.GetConstant();
- case CALLBACKS:
- case HANDLER:
- case INTERCEPTOR:
- case TRANSITION:
- return isolate->heap()->undefined_value();
- case NONEXISTENT:
- UNREACHABLE();
- }
- return isolate->heap()->undefined_value();
+ return *JSObject::GetDataProperty(object, key);
}
-Handle<Object> Runtime::SetObjectProperty(Isolate* isolate,
- Handle<Object> object,
- Handle<Object> key,
- Handle<Object> value,
- PropertyAttributes attr,
- StrictModeFlag strict_mode) {
+MaybeHandle<Object> Runtime::SetObjectProperty(Isolate* isolate,
+ Handle<Object> object,
+ Handle<Object> key,
+ Handle<Object> value,
+ PropertyAttributes attr,
+ StrictMode strict_mode) {
SetPropertyMode set_mode = attr == NONE ? SET_PROPERTY : DEFINE_PROPERTY;
if (object->IsUndefined() || object->IsNull()) {
@@ -5194,15 +5264,17 @@ Handle<Object> Runtime::SetObjectProperty(Isolate* isolate,
Handle<Object> error =
isolate->factory()->NewTypeError("non_object_property_store",
HandleVector(args, 2));
- isolate->Throw(*error);
- return Handle<Object>();
+ return isolate->Throw<Object>(error);
}
if (object->IsJSProxy()) {
- bool has_pending_exception = false;
- Handle<Object> name_object = key->IsSymbol()
- ? key : Execution::ToString(isolate, key, &has_pending_exception);
- if (has_pending_exception) return Handle<Object>(); // exception
+ Handle<Object> name_object;
+ if (key->IsSymbol()) {
+ name_object = key;
+ } else {
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, name_object, Execution::ToString(isolate, key), Object);
+ }
Handle<Name> name = Handle<Name>::cast(name_object);
return JSReceiver::SetProperty(Handle<JSProxy>::cast(object), name, value,
attr,
@@ -5228,21 +5300,19 @@ Handle<Object> Runtime::SetObjectProperty(Isolate* isolate,
return value;
}
- js_object->ValidateElements();
- if (js_object->HasExternalArrayElements()) {
+ JSObject::ValidateElements(js_object);
+ if (js_object->HasExternalArrayElements() ||
+ js_object->HasFixedTypedArrayElements()) {
if (!value->IsNumber() && !value->IsUndefined()) {
- bool has_exception;
- Handle<Object> number =
- Execution::ToNumber(isolate, value, &has_exception);
- if (has_exception) return Handle<Object>(); // exception
- value = number;
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, value, Execution::ToNumber(isolate, value), Object);
}
}
- Handle<Object> result = JSObject::SetElement(js_object, index, value, attr,
- strict_mode,
- true,
- set_mode);
- js_object->ValidateElements();
+
+ MaybeHandle<Object> result = JSObject::SetElement(
+ js_object, index, value, attr, strict_mode, true, set_mode);
+ JSObject::ValidateElements(js_object);
+
return result.is_null() ? result : value;
}
@@ -5251,44 +5321,40 @@ Handle<Object> Runtime::SetObjectProperty(Isolate* isolate,
if (name->AsArrayIndex(&index)) {
if (js_object->HasExternalArrayElements()) {
if (!value->IsNumber() && !value->IsUndefined()) {
- bool has_exception;
- Handle<Object> number =
- Execution::ToNumber(isolate, value, &has_exception);
- if (has_exception) return Handle<Object>(); // exception
- value = number;
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, value, Execution::ToNumber(isolate, value), Object);
}
}
- return JSObject::SetElement(js_object, index, value, attr, strict_mode,
- true,
- set_mode);
+ return JSObject::SetElement(js_object, index, value, attr,
+ strict_mode, true, set_mode);
} else {
- if (name->IsString()) Handle<String>::cast(name)->TryFlatten();
+ if (name->IsString()) name = String::Flatten(Handle<String>::cast(name));
return JSReceiver::SetProperty(js_object, name, value, attr, strict_mode);
}
}
// Call-back into JavaScript to convert the key to a string.
- bool has_pending_exception = false;
- Handle<Object> converted =
- Execution::ToString(isolate, key, &has_pending_exception);
- if (has_pending_exception) return Handle<Object>(); // exception
+ Handle<Object> converted;
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, converted, Execution::ToString(isolate, key), Object);
Handle<String> name = Handle<String>::cast(converted);
if (name->AsArrayIndex(&index)) {
- return JSObject::SetElement(js_object, index, value, attr, strict_mode,
- true,
- set_mode);
+ return JSObject::SetElement(js_object, index, value, attr,
+ strict_mode, true, set_mode);
} else {
return JSReceiver::SetProperty(js_object, name, value, attr, strict_mode);
}
}
-Handle<Object> Runtime::ForceSetObjectProperty(Isolate* isolate,
- Handle<JSObject> js_object,
- Handle<Object> key,
- Handle<Object> value,
- PropertyAttributes attr) {
+MaybeHandle<Object> Runtime::ForceSetObjectProperty(
+ Handle<JSObject> js_object,
+ Handle<Object> key,
+ Handle<Object> value,
+ PropertyAttributes attr,
+ JSReceiver::StoreFromKeyed store_from_keyed) {
+ Isolate* isolate = js_object->GetIsolate();
// Check if the given key is an array index.
uint32_t index;
if (key->ToArrayIndex(&index)) {
@@ -5303,48 +5369,46 @@ Handle<Object> Runtime::ForceSetObjectProperty(Isolate* isolate,
return value;
}
- return JSObject::SetElement(js_object, index, value, attr, kNonStrictMode,
- false,
- DEFINE_PROPERTY);
+ return JSObject::SetElement(js_object, index, value, attr,
+ SLOPPY, false, DEFINE_PROPERTY);
}
if (key->IsName()) {
Handle<Name> name = Handle<Name>::cast(key);
if (name->AsArrayIndex(&index)) {
- return JSObject::SetElement(js_object, index, value, attr, kNonStrictMode,
- false,
- DEFINE_PROPERTY);
+ return JSObject::SetElement(js_object, index, value, attr,
+ SLOPPY, false, DEFINE_PROPERTY);
} else {
- if (name->IsString()) Handle<String>::cast(name)->TryFlatten();
- return JSObject::SetLocalPropertyIgnoreAttributes(js_object, name,
- value, attr);
+ if (name->IsString()) name = String::Flatten(Handle<String>::cast(name));
+ return JSObject::SetOwnPropertyIgnoreAttributes(
+ js_object, name, value, attr, Object::OPTIMAL_REPRESENTATION,
+ ALLOW_AS_CONSTANT, JSReceiver::PERFORM_EXTENSIBILITY_CHECK,
+ store_from_keyed);
}
}
// Call-back into JavaScript to convert the key to a string.
- bool has_pending_exception = false;
- Handle<Object> converted =
- Execution::ToString(isolate, key, &has_pending_exception);
- if (has_pending_exception) return Handle<Object>(); // exception
+ Handle<Object> converted;
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, converted, Execution::ToString(isolate, key), Object);
Handle<String> name = Handle<String>::cast(converted);
if (name->AsArrayIndex(&index)) {
- return JSObject::SetElement(js_object, index, value, attr, kNonStrictMode,
- false,
- DEFINE_PROPERTY);
+ return JSObject::SetElement(js_object, index, value, attr,
+ SLOPPY, false, DEFINE_PROPERTY);
} else {
- return JSObject::SetLocalPropertyIgnoreAttributes(js_object, name, value,
- attr);
+ return JSObject::SetOwnPropertyIgnoreAttributes(
+ js_object, name, value, attr, Object::OPTIMAL_REPRESENTATION,
+ ALLOW_AS_CONSTANT, JSReceiver::PERFORM_EXTENSIBILITY_CHECK,
+ store_from_keyed);
}
}
-MaybeObject* Runtime::DeleteObjectProperty(Isolate* isolate,
- Handle<JSReceiver> receiver,
- Handle<Object> key,
- JSReceiver::DeleteMode mode) {
- HandleScope scope(isolate);
-
+MaybeHandle<Object> Runtime::DeleteObjectProperty(Isolate* isolate,
+ Handle<JSReceiver> receiver,
+ Handle<Object> key,
+ JSReceiver::DeleteMode mode) {
// Check if the given key is an array index.
uint32_t index;
if (key->ToArrayIndex(&index)) {
@@ -5355,12 +5419,10 @@ MaybeObject* Runtime::DeleteObjectProperty(Isolate* isolate,
// underlying string does nothing with the deletion, we can ignore
// such deletions.
if (receiver->IsStringObjectWithCharacterAt(index)) {
- return isolate->heap()->true_value();
+ return isolate->factory()->true_value();
}
- Handle<Object> result = JSReceiver::DeleteElement(receiver, index, mode);
- RETURN_IF_EMPTY_HANDLE(isolate, result);
- return *result;
+ return JSReceiver::DeleteElement(receiver, index, mode);
}
Handle<Name> name;
@@ -5368,21 +5430,30 @@ MaybeObject* Runtime::DeleteObjectProperty(Isolate* isolate,
name = Handle<Name>::cast(key);
} else {
// Call-back into JavaScript to convert the key to a string.
- bool has_pending_exception = false;
- Handle<Object> converted = Execution::ToString(
- isolate, key, &has_pending_exception);
- if (has_pending_exception) return Failure::Exception();
+ Handle<Object> converted;
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, converted, Execution::ToString(isolate, key), Object);
name = Handle<String>::cast(converted);
}
- if (name->IsString()) Handle<String>::cast(name)->TryFlatten();
- Handle<Object> result = JSReceiver::DeleteProperty(receiver, name, mode);
- RETURN_IF_EMPTY_HANDLE(isolate, result);
- return *result;
+ if (name->IsString()) name = String::Flatten(Handle<String>::cast(name));
+ return JSReceiver::DeleteProperty(receiver, name, mode);
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_SetProperty) {
+RUNTIME_FUNCTION(Runtime_SetHiddenProperty) {
+ HandleScope scope(isolate);
+ RUNTIME_ASSERT(args.length() == 3);
+
+ CONVERT_ARG_HANDLE_CHECKED(JSObject, object, 0);
+ CONVERT_ARG_HANDLE_CHECKED(String, key, 1);
+ CONVERT_ARG_HANDLE_CHECKED(Object, value, 2);
+ RUNTIME_ASSERT(key->IsUniqueName());
+ return *JSObject::SetHiddenProperty(object, key, value);
+}
+
+
+RUNTIME_FUNCTION(Runtime_SetProperty) {
HandleScope scope(isolate);
RUNTIME_ASSERT(args.length() == 4 || args.length() == 5);
@@ -5396,22 +5467,22 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_SetProperty) {
PropertyAttributes attributes =
static_cast<PropertyAttributes>(unchecked_attributes);
- StrictModeFlag strict_mode = kNonStrictMode;
+ StrictMode strict_mode = SLOPPY;
if (args.length() == 5) {
- CONVERT_STRICT_MODE_ARG_CHECKED(strict_mode_flag, 4);
- strict_mode = strict_mode_flag;
+ CONVERT_STRICT_MODE_ARG_CHECKED(strict_mode_arg, 4);
+ strict_mode = strict_mode_arg;
}
- Handle<Object> result = Runtime::SetObjectProperty(isolate, object, key,
- value,
- attributes,
- strict_mode);
- RETURN_IF_EMPTY_HANDLE(isolate, result);
+ Handle<Object> result;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, result,
+ Runtime::SetObjectProperty(
+ isolate, object, key, value, attributes, strict_mode));
return *result;
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_TransitionElementsKind) {
+RUNTIME_FUNCTION(Runtime_TransitionElementsKind) {
HandleScope scope(isolate);
RUNTIME_ASSERT(args.length() == 2);
CONVERT_ARG_HANDLE_CHECKED(JSArray, array, 0);
@@ -5424,7 +5495,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_TransitionElementsKind) {
// Set the native flag on the function.
// This is used to decide if we should transform null and undefined
// into the global object when doing call and apply.
-RUNTIME_FUNCTION(MaybeObject*, Runtime_SetNativeFlag) {
+RUNTIME_FUNCTION(Runtime_SetNativeFlag) {
SealHandleScope shs(isolate);
RUNTIME_ASSERT(args.length() == 1);
@@ -5438,11 +5509,10 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_SetNativeFlag) {
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_SetInlineBuiltinFlag) {
+RUNTIME_FUNCTION(Runtime_SetInlineBuiltinFlag) {
SealHandleScope shs(isolate);
RUNTIME_ASSERT(args.length() == 1);
-
- Handle<Object> object = args.at<Object>(0);
+ CONVERT_ARG_HANDLE_CHECKED(Object, object, 0);
if (object->IsJSFunction()) {
JSFunction* func = JSFunction::cast(*object);
@@ -5452,12 +5522,12 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_SetInlineBuiltinFlag) {
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_StoreArrayLiteralElement) {
+RUNTIME_FUNCTION(Runtime_StoreArrayLiteralElement) {
HandleScope scope(isolate);
RUNTIME_ASSERT(args.length() == 5);
CONVERT_ARG_HANDLE_CHECKED(JSObject, object, 0);
CONVERT_SMI_ARG_CHECKED(store_index, 1);
- Handle<Object> value = args.at<Object>(2);
+ CONVERT_ARG_HANDLE_CHECKED(Object, value, 2);
CONVERT_ARG_HANDLE_CHECKED(FixedArray, literals, 3);
CONVERT_SMI_ARG_CHECKED(literal_index, 4);
@@ -5491,16 +5561,17 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_StoreArrayLiteralElement) {
HeapNumber* number = HeapNumber::cast(*value);
double_array->set(store_index, number->Number());
} else {
- ASSERT(IsFastSmiElementsKind(elements_kind) ||
- IsFastDoubleElementsKind(elements_kind));
- ElementsKind transitioned_kind = IsFastHoleyElementsKind(elements_kind)
- ? FAST_HOLEY_ELEMENTS
- : FAST_ELEMENTS;
- JSObject::TransitionElementsKind(object, transitioned_kind);
- if (IsMoreGeneralElementsKindTransition(
- boilerplate_object->GetElementsKind(),
- transitioned_kind)) {
- JSObject::TransitionElementsKind(boilerplate_object, transitioned_kind);
+ if (!IsFastObjectElementsKind(elements_kind)) {
+ ElementsKind transitioned_kind = IsFastHoleyElementsKind(elements_kind)
+ ? FAST_HOLEY_ELEMENTS
+ : FAST_ELEMENTS;
+ JSObject::TransitionElementsKind(object, transitioned_kind);
+ ElementsKind boilerplate_elements_kind =
+ boilerplate_object->GetElementsKind();
+ if (IsMoreGeneralElementsKindTransition(boilerplate_elements_kind,
+ transitioned_kind)) {
+ JSObject::TransitionElementsKind(boilerplate_object, transitioned_kind);
+ }
}
FixedArray* object_array = FixedArray::cast(object->elements());
object_array->set(store_index, *value);
@@ -5511,29 +5582,22 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_StoreArrayLiteralElement) {
// Check whether debugger and is about to step into the callback that is passed
// to a built-in function such as Array.forEach.
-RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugCallbackSupportsStepping) {
- SealHandleScope shs(isolate);
-#ifdef ENABLE_DEBUGGER_SUPPORT
- if (!isolate->IsDebuggerActive() || !isolate->debug()->StepInActive()) {
+RUNTIME_FUNCTION(Runtime_DebugCallbackSupportsStepping) {
+ ASSERT(args.length() == 1);
+ if (!isolate->debug()->is_active() || !isolate->debug()->StepInActive()) {
return isolate->heap()->false_value();
}
CONVERT_ARG_CHECKED(Object, callback, 0);
// We do not step into the callback if it's a builtin or not even a function.
- if (!callback->IsJSFunction() || JSFunction::cast(callback)->IsBuiltin()) {
- return isolate->heap()->false_value();
- }
- return isolate->heap()->true_value();
-#else
- return isolate->heap()->false_value();
-#endif // ENABLE_DEBUGGER_SUPPORT
+ return isolate->heap()->ToBoolean(
+ callback->IsJSFunction() && !JSFunction::cast(callback)->IsBuiltin());
}
// Set one shot breakpoints for the callback function that is passed to a
// built-in function such as Array.forEach to enable stepping into the callback.
-RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugPrepareStepInIfStepping) {
- SealHandleScope shs(isolate);
-#ifdef ENABLE_DEBUGGER_SUPPORT
+RUNTIME_FUNCTION(Runtime_DebugPrepareStepInIfStepping) {
+ ASSERT(args.length() == 1);
Debug* debug = isolate->debug();
if (!debug->IsStepping()) return isolate->heap()->undefined_value();
CONVERT_ARG_HANDLE_CHECKED(JSFunction, callback, 0);
@@ -5543,14 +5607,33 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugPrepareStepInIfStepping) {
// again, we need to clear the step out at this point.
debug->ClearStepOut();
debug->FloodWithOneShot(callback);
-#endif // ENABLE_DEBUGGER_SUPPORT
return isolate->heap()->undefined_value();
}
-// Set a local property, even if it is READ_ONLY. If the property does not
+// The argument is a closure that is kept until the epilogue is called.
+// On exception, the closure is called, which returns the promise if the
+// exception is considered uncaught, or undefined otherwise.
+RUNTIME_FUNCTION(Runtime_DebugPromiseHandlePrologue) {
+ ASSERT(args.length() == 1);
+ HandleScope scope(isolate);
+ CONVERT_ARG_HANDLE_CHECKED(JSFunction, promise_getter, 0);
+ isolate->debug()->PromiseHandlePrologue(promise_getter);
+ return isolate->heap()->undefined_value();
+}
+
+
+RUNTIME_FUNCTION(Runtime_DebugPromiseHandleEpilogue) {
+ ASSERT(args.length() == 0);
+ SealHandleScope shs(isolate);
+ isolate->debug()->PromiseHandleEpilogue();
+ return isolate->heap()->undefined_value();
+}
+
+
+// Set an own property, even if it is READ_ONLY. If the property does not
// exist, it will be added with attributes NONE.
-RUNTIME_FUNCTION(MaybeObject*, Runtime_IgnoreAttributesAndSetProperty) {
+RUNTIME_FUNCTION(Runtime_IgnoreAttributesAndSetProperty) {
HandleScope scope(isolate);
RUNTIME_ASSERT(args.length() == 3 || args.length() == 4);
CONVERT_ARG_HANDLE_CHECKED(JSObject, object, 0);
@@ -5565,31 +5648,35 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_IgnoreAttributesAndSetProperty) {
(unchecked_value & ~(READ_ONLY | DONT_ENUM | DONT_DELETE)) == 0);
attributes = static_cast<PropertyAttributes>(unchecked_value);
}
- Handle<Object> result = JSObject::SetLocalPropertyIgnoreAttributes(
- object, name, value, attributes);
- RETURN_IF_EMPTY_HANDLE(isolate, result);
+ Handle<Object> result;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, result,
+ JSObject::SetOwnPropertyIgnoreAttributes(
+ object, name, value, attributes));
return *result;
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_DeleteProperty) {
+RUNTIME_FUNCTION(Runtime_DeleteProperty) {
HandleScope scope(isolate);
ASSERT(args.length() == 3);
CONVERT_ARG_HANDLE_CHECKED(JSReceiver, object, 0);
CONVERT_ARG_HANDLE_CHECKED(Name, key, 1);
CONVERT_STRICT_MODE_ARG_CHECKED(strict_mode, 2);
- JSReceiver::DeleteMode delete_mode = (strict_mode == kStrictMode)
+ JSReceiver::DeleteMode delete_mode = strict_mode == STRICT
? JSReceiver::STRICT_DELETION : JSReceiver::NORMAL_DELETION;
- Handle<Object> result = JSReceiver::DeleteProperty(object, key, delete_mode);
- RETURN_IF_EMPTY_HANDLE(isolate, result);
+ Handle<Object> result;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, result,
+ JSReceiver::DeleteProperty(object, key, delete_mode));
return *result;
}
-static MaybeObject* HasLocalPropertyImplementation(Isolate* isolate,
- Handle<JSObject> object,
- Handle<Name> key) {
- if (JSReceiver::HasLocalProperty(object, key)) {
+static Object* HasOwnPropertyImplementation(Isolate* isolate,
+ Handle<JSObject> object,
+ Handle<Name> key) {
+ if (JSReceiver::HasOwnProperty(object, key)) {
return isolate->heap()->true_value();
}
// Handle hidden prototypes. If there's a hidden prototype above this thing
@@ -5598,20 +5685,20 @@ static MaybeObject* HasLocalPropertyImplementation(Isolate* isolate,
Handle<Object> proto(object->GetPrototype(), isolate);
if (proto->IsJSObject() &&
Handle<JSObject>::cast(proto)->map()->is_hidden_prototype()) {
- return HasLocalPropertyImplementation(isolate,
- Handle<JSObject>::cast(proto),
- key);
+ return HasOwnPropertyImplementation(isolate,
+ Handle<JSObject>::cast(proto),
+ key);
}
- RETURN_IF_SCHEDULED_EXCEPTION(isolate);
+ RETURN_FAILURE_IF_SCHEDULED_EXCEPTION(isolate);
return isolate->heap()->false_value();
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_HasLocalProperty) {
+RUNTIME_FUNCTION(Runtime_HasOwnProperty) {
HandleScope scope(isolate);
ASSERT(args.length() == 2);
+ CONVERT_ARG_HANDLE_CHECKED(Object, object, 0)
CONVERT_ARG_HANDLE_CHECKED(Name, key, 1);
- Handle<Object> object = args.at<Object>(0);
uint32_t index;
const bool key_is_array_index = key->AsArrayIndex(&index);
@@ -5626,7 +5713,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_HasLocalProperty) {
ASSERT(!isolate->has_scheduled_exception());
return isolate->heap()->true_value();
} else {
- RETURN_IF_SCHEDULED_EXCEPTION(isolate);
+ RETURN_FAILURE_IF_SCHEDULED_EXCEPTION(isolate);
}
Map* map = js_obj->map();
if (!key_is_array_index &&
@@ -5635,9 +5722,9 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_HasLocalProperty) {
return isolate->heap()->false_value();
}
// Slow case.
- return HasLocalPropertyImplementation(isolate,
- Handle<JSObject>(js_obj),
- Handle<Name>(key));
+ return HasOwnPropertyImplementation(isolate,
+ Handle<JSObject>(js_obj),
+ Handle<Name>(key));
} else if (object->IsString() && key_is_array_index) {
// Well, there is one exception: Handle [] on strings.
Handle<String> string = Handle<String>::cast(object);
@@ -5649,42 +5736,41 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_HasLocalProperty) {
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_HasProperty) {
+RUNTIME_FUNCTION(Runtime_HasProperty) {
HandleScope scope(isolate);
ASSERT(args.length() == 2);
CONVERT_ARG_HANDLE_CHECKED(JSReceiver, receiver, 0);
CONVERT_ARG_HANDLE_CHECKED(Name, key, 1);
bool result = JSReceiver::HasProperty(receiver, key);
- RETURN_IF_SCHEDULED_EXCEPTION(isolate);
- if (isolate->has_pending_exception()) return Failure::Exception();
+ RETURN_FAILURE_IF_SCHEDULED_EXCEPTION(isolate);
+ if (isolate->has_pending_exception()) return isolate->heap()->exception();
return isolate->heap()->ToBoolean(result);
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_HasElement) {
+RUNTIME_FUNCTION(Runtime_HasElement) {
HandleScope scope(isolate);
ASSERT(args.length() == 2);
CONVERT_ARG_HANDLE_CHECKED(JSReceiver, receiver, 0);
CONVERT_SMI_ARG_CHECKED(index, 1);
bool result = JSReceiver::HasElement(receiver, index);
- RETURN_IF_SCHEDULED_EXCEPTION(isolate);
- if (isolate->has_pending_exception()) return Failure::Exception();
+ RETURN_FAILURE_IF_SCHEDULED_EXCEPTION(isolate);
return isolate->heap()->ToBoolean(result);
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_IsPropertyEnumerable) {
- SealHandleScope shs(isolate);
+RUNTIME_FUNCTION(Runtime_IsPropertyEnumerable) {
+ HandleScope scope(isolate);
ASSERT(args.length() == 2);
- CONVERT_ARG_CHECKED(JSObject, object, 0);
- CONVERT_ARG_CHECKED(Name, key, 1);
+ CONVERT_ARG_HANDLE_CHECKED(JSObject, object, 0);
+ CONVERT_ARG_HANDLE_CHECKED(Name, key, 1);
- PropertyAttributes att = object->GetLocalPropertyAttribute(key);
+ PropertyAttributes att = JSReceiver::GetOwnPropertyAttributes(object, key);
if (att == ABSENT || (att & DONT_ENUM) != 0) {
- RETURN_IF_SCHEDULED_EXCEPTION(isolate);
+ RETURN_FAILURE_IF_SCHEDULED_EXCEPTION(isolate);
return isolate->heap()->false_value();
}
ASSERT(!isolate->has_scheduled_exception());
@@ -5692,14 +5778,18 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_IsPropertyEnumerable) {
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_GetPropertyNames) {
+RUNTIME_FUNCTION(Runtime_GetPropertyNames) {
HandleScope scope(isolate);
ASSERT(args.length() == 1);
CONVERT_ARG_HANDLE_CHECKED(JSReceiver, object, 0);
- bool threw = false;
- Handle<JSArray> result = GetKeysFor(object, &threw);
- if (threw) return Failure::Exception();
- return *result;
+ Handle<JSArray> result;
+
+ isolate->counters()->for_in()->Increment();
+ Handle<FixedArray> elements;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, elements,
+ JSReceiver::GetKeys(object, JSReceiver::INCLUDE_PROTOS));
+ return *isolate->factory()->NewJSArrayWithElements(elements);
}
@@ -5708,7 +5798,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_GetPropertyNames) {
// all enumerable properties of the object and its prototypes
// have none, the map of the object. This is used to speed up
// the check for deletions during a for-in.
-RUNTIME_FUNCTION(MaybeObject*, Runtime_GetPropertyNamesFast) {
+RUNTIME_FUNCTION(Runtime_GetPropertyNamesFast) {
SealHandleScope shs(isolate);
ASSERT(args.length() == 1);
@@ -5718,10 +5808,10 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_GetPropertyNamesFast) {
HandleScope scope(isolate);
Handle<JSReceiver> object(raw_object);
- bool threw = false;
- Handle<FixedArray> content =
- GetKeysInFixedArrayFor(object, INCLUDE_PROTOS, &threw);
- if (threw) return Failure::Exception();
+ Handle<FixedArray> content;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, content,
+ JSReceiver::GetKeys(object, JSReceiver::INCLUDE_PROTOS));
// Test again, since cache may have been built by preceding call.
if (object->IsSimpleEnum()) return object->map();
@@ -5730,10 +5820,10 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_GetPropertyNamesFast) {
}
-// Find the length of the prototype chain that is to to handled as one. If a
+// Find the length of the prototype chain that is to be handled as one. If a
// prototype object is hidden it is to be viewed as part of the the object it
// is prototype for.
-static int LocalPrototypeChainLength(JSObject* obj) {
+static int OwnPrototypeChainLength(JSObject* obj) {
int count = 1;
Object* proto = obj->GetPrototype();
while (proto->IsJSObject() &&
@@ -5745,53 +5835,52 @@ static int LocalPrototypeChainLength(JSObject* obj) {
}
-// Return the names of the local named properties.
+// Return the names of the own named properties.
// args[0]: object
-RUNTIME_FUNCTION(MaybeObject*, Runtime_GetLocalPropertyNames) {
+// args[1]: PropertyAttributes as int
+RUNTIME_FUNCTION(Runtime_GetOwnPropertyNames) {
HandleScope scope(isolate);
ASSERT(args.length() == 2);
if (!args[0]->IsJSObject()) {
return isolate->heap()->undefined_value();
}
CONVERT_ARG_HANDLE_CHECKED(JSObject, obj, 0);
- CONVERT_BOOLEAN_ARG_CHECKED(include_symbols, 1);
- PropertyAttributes filter = include_symbols ? NONE : SYMBOLIC;
+ CONVERT_SMI_ARG_CHECKED(filter_value, 1);
+ PropertyAttributes filter = static_cast<PropertyAttributes>(filter_value);
// Skip the global proxy as it has no properties and always delegates to the
// real global object.
if (obj->IsJSGlobalProxy()) {
// Only collect names if access is permitted.
if (obj->IsAccessCheckNeeded() &&
- !isolate->MayNamedAccess(*obj,
- isolate->heap()->undefined_value(),
- v8::ACCESS_KEYS)) {
- isolate->ReportFailedAccessCheck(*obj, v8::ACCESS_KEYS);
- RETURN_IF_SCHEDULED_EXCEPTION(isolate);
+ !isolate->MayNamedAccess(
+ obj, isolate->factory()->undefined_value(), v8::ACCESS_KEYS)) {
+ isolate->ReportFailedAccessCheck(obj, v8::ACCESS_KEYS);
+ RETURN_FAILURE_IF_SCHEDULED_EXCEPTION(isolate);
return *isolate->factory()->NewJSArray(0);
}
obj = Handle<JSObject>(JSObject::cast(obj->GetPrototype()));
}
// Find the number of objects making up this.
- int length = LocalPrototypeChainLength(*obj);
+ int length = OwnPrototypeChainLength(*obj);
- // Find the number of local properties for each of the objects.
- ScopedVector<int> local_property_count(length);
+ // Find the number of own properties for each of the objects.
+ ScopedVector<int> own_property_count(length);
int total_property_count = 0;
Handle<JSObject> jsproto = obj;
for (int i = 0; i < length; i++) {
// Only collect names if access is permitted.
if (jsproto->IsAccessCheckNeeded() &&
- !isolate->MayNamedAccess(*jsproto,
- isolate->heap()->undefined_value(),
- v8::ACCESS_KEYS)) {
- isolate->ReportFailedAccessCheck(*jsproto, v8::ACCESS_KEYS);
- RETURN_IF_SCHEDULED_EXCEPTION(isolate);
+ !isolate->MayNamedAccess(
+ jsproto, isolate->factory()->undefined_value(), v8::ACCESS_KEYS)) {
+ isolate->ReportFailedAccessCheck(jsproto, v8::ACCESS_KEYS);
+ RETURN_FAILURE_IF_SCHEDULED_EXCEPTION(isolate);
return *isolate->factory()->NewJSArray(0);
}
int n;
- n = jsproto->NumberOfLocalProperties(filter);
- local_property_count[i] = n;
+ n = jsproto->NumberOfOwnProperties(filter);
+ own_property_count[i] = n;
total_property_count += n;
if (i < length - 1) {
jsproto = Handle<JSObject>(JSObject::cast(jsproto->GetPrototype()));
@@ -5804,41 +5893,66 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_GetLocalPropertyNames) {
// Get the property names.
jsproto = obj;
- int proto_with_hidden_properties = 0;
int next_copy_index = 0;
+ int hidden_strings = 0;
for (int i = 0; i < length; i++) {
- jsproto->GetLocalPropertyNames(*names, next_copy_index, filter);
- next_copy_index += local_property_count[i];
- if (jsproto->HasHiddenProperties()) {
- proto_with_hidden_properties++;
+ jsproto->GetOwnPropertyNames(*names, next_copy_index, filter);
+ if (i > 0) {
+ // Names from hidden prototypes may already have been added
+ // for inherited function template instances. Count the duplicates
+ // and stub them out; the final copy pass at the end ignores holes.
+ for (int j = next_copy_index;
+ j < next_copy_index + own_property_count[i];
+ j++) {
+ Object* name_from_hidden_proto = names->get(j);
+ for (int k = 0; k < next_copy_index; k++) {
+ if (names->get(k) != isolate->heap()->hidden_string()) {
+ Object* name = names->get(k);
+ if (name_from_hidden_proto == name) {
+ names->set(j, isolate->heap()->hidden_string());
+ hidden_strings++;
+ break;
+ }
+ }
+ }
+ }
+ }
+ next_copy_index += own_property_count[i];
+
+ // Hidden properties only show up if the filter does not skip strings.
+ if ((filter & STRING) == 0 && JSObject::HasHiddenProperties(jsproto)) {
+ hidden_strings++;
}
if (i < length - 1) {
jsproto = Handle<JSObject>(JSObject::cast(jsproto->GetPrototype()));
}
}
- // Filter out name of hidden properties object.
- if (proto_with_hidden_properties > 0) {
+ // Filter out name of hidden properties object and
+ // hidden prototype duplicates.
+ if (hidden_strings > 0) {
Handle<FixedArray> old_names = names;
names = isolate->factory()->NewFixedArray(
- names->length() - proto_with_hidden_properties);
+ names->length() - hidden_strings);
int dest_pos = 0;
for (int i = 0; i < total_property_count; i++) {
Object* name = old_names->get(i);
if (name == isolate->heap()->hidden_string()) {
+ hidden_strings--;
continue;
}
names->set(dest_pos++, name);
}
+ ASSERT_EQ(0, hidden_strings);
}
return *isolate->factory()->NewJSArrayWithElements(names);
}
-// Return the names of the local indexed properties.
+// Return the names of the own indexed properties.
// args[0]: object
-RUNTIME_FUNCTION(MaybeObject*, Runtime_GetLocalElementNames) {
+RUNTIME_FUNCTION(Runtime_GetOwnElementNames) {
HandleScope scope(isolate);
ASSERT(args.length() == 1);
if (!args[0]->IsJSObject()) {
@@ -5846,16 +5960,16 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_GetLocalElementNames) {
}
CONVERT_ARG_HANDLE_CHECKED(JSObject, obj, 0);
- int n = obj->NumberOfLocalElements(static_cast<PropertyAttributes>(NONE));
+ int n = obj->NumberOfOwnElements(static_cast<PropertyAttributes>(NONE));
Handle<FixedArray> names = isolate->factory()->NewFixedArray(n);
- obj->GetLocalElementKeys(*names, static_cast<PropertyAttributes>(NONE));
+ obj->GetOwnElementKeys(*names, static_cast<PropertyAttributes>(NONE));
return *isolate->factory()->NewJSArrayWithElements(names);
}
// Return information on whether an object has a named or indexed interceptor.
// args[0]: object
-RUNTIME_FUNCTION(MaybeObject*, Runtime_GetInterceptorInfo) {
+RUNTIME_FUNCTION(Runtime_GetInterceptorInfo) {
HandleScope scope(isolate);
ASSERT(args.length() == 1);
if (!args[0]->IsJSObject()) {
@@ -5873,14 +5987,16 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_GetInterceptorInfo) {
// Return property names from named interceptor.
// args[0]: object
-RUNTIME_FUNCTION(MaybeObject*, Runtime_GetNamedInterceptorPropertyNames) {
+RUNTIME_FUNCTION(Runtime_GetNamedInterceptorPropertyNames) {
HandleScope scope(isolate);
ASSERT(args.length() == 1);
CONVERT_ARG_HANDLE_CHECKED(JSObject, obj, 0);
if (obj->HasNamedInterceptor()) {
- v8::Handle<v8::Array> result = GetKeysForNamedInterceptor(obj, obj);
- if (!result.IsEmpty()) return *v8::Utils::OpenHandle(*result);
+ Handle<JSObject> result;
+ if (JSObject::GetKeysForNamedInterceptor(obj, obj).ToHandle(&result)) {
+ return *result;
+ }
}
return isolate->heap()->undefined_value();
}
@@ -5888,32 +6004,34 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_GetNamedInterceptorPropertyNames) {
// Return element names from indexed interceptor.
// args[0]: object
-RUNTIME_FUNCTION(MaybeObject*, Runtime_GetIndexedInterceptorElementNames) {
+RUNTIME_FUNCTION(Runtime_GetIndexedInterceptorElementNames) {
HandleScope scope(isolate);
ASSERT(args.length() == 1);
CONVERT_ARG_HANDLE_CHECKED(JSObject, obj, 0);
if (obj->HasIndexedInterceptor()) {
- v8::Handle<v8::Array> result = GetKeysForIndexedInterceptor(obj, obj);
- if (!result.IsEmpty()) return *v8::Utils::OpenHandle(*result);
+ Handle<JSObject> result;
+ if (JSObject::GetKeysForIndexedInterceptor(obj, obj).ToHandle(&result)) {
+ return *result;
+ }
}
return isolate->heap()->undefined_value();
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_LocalKeys) {
+RUNTIME_FUNCTION(Runtime_OwnKeys) {
HandleScope scope(isolate);
- ASSERT_EQ(args.length(), 1);
+ ASSERT(args.length() == 1);
CONVERT_ARG_CHECKED(JSObject, raw_object, 0);
Handle<JSObject> object(raw_object);
if (object->IsJSGlobalProxy()) {
// Do access checks before going to the global object.
if (object->IsAccessCheckNeeded() &&
- !isolate->MayNamedAccess(*object, isolate->heap()->undefined_value(),
- v8::ACCESS_KEYS)) {
- isolate->ReportFailedAccessCheck(*object, v8::ACCESS_KEYS);
- RETURN_IF_SCHEDULED_EXCEPTION(isolate);
+ !isolate->MayNamedAccess(
+ object, isolate->factory()->undefined_value(), v8::ACCESS_KEYS)) {
+ isolate->ReportFailedAccessCheck(object, v8::ACCESS_KEYS);
+ RETURN_FAILURE_IF_SCHEDULED_EXCEPTION(isolate);
return *isolate->factory()->NewJSArray(0);
}
@@ -5923,10 +6041,10 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_LocalKeys) {
object = Handle<JSObject>::cast(proto);
}
- bool threw = false;
- Handle<FixedArray> contents =
- GetKeysInFixedArrayFor(object, LOCAL_ONLY, &threw);
- if (threw) return Failure::Exception();
+ Handle<FixedArray> contents;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, contents,
+ JSReceiver::GetKeys(object, JSReceiver::OWN_ONLY));
// Some fast paths through GetKeysInFixedArrayFor reuse a cached
// property array and since the result is mutable we have to create
@@ -5950,9 +6068,10 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_LocalKeys) {
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_GetArgumentsProperty) {
+RUNTIME_FUNCTION(Runtime_GetArgumentsProperty) {
SealHandleScope shs(isolate);
ASSERT(args.length() == 1);
+ CONVERT_ARG_HANDLE_CHECKED(Object, raw_key, 0);
// Compute the frame holding the arguments.
JavaScriptFrameIterator it(isolate);
@@ -5965,22 +6084,25 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_GetArgumentsProperty) {
// Try to convert the key to an index. If successful and within
// index return the the argument from the frame.
uint32_t index;
- if (args[0]->ToArrayIndex(&index) && index < n) {
+ if (raw_key->ToArrayIndex(&index) && index < n) {
return frame->GetParameter(index);
}
- if (args[0]->IsSymbol()) {
+ HandleScope scope(isolate);
+ if (raw_key->IsSymbol()) {
// Lookup in the initial Object.prototype object.
- return isolate->initial_object_prototype()->GetProperty(
- Symbol::cast(args[0]));
+ Handle<Object> result;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, result,
+ Object::GetProperty(isolate->initial_object_prototype(),
+ Handle<Symbol>::cast(raw_key)));
+ return *result;
}
// Convert the key to a string.
- HandleScope scope(isolate);
- bool exception = false;
- Handle<Object> converted =
- Execution::ToString(isolate, args.at<Object>(0), &exception);
- if (exception) return Failure::Exception();
+ Handle<Object> converted;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, converted, Execution::ToString(isolate, raw_key));
Handle<String> key = Handle<String>::cast(converted);
// Try to convert the string key into an array index.
@@ -5988,15 +6110,22 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_GetArgumentsProperty) {
if (index < n) {
return frame->GetParameter(index);
} else {
- return isolate->initial_object_prototype()->GetElement(isolate, index);
+ Handle<Object> initial_prototype(isolate->initial_object_prototype());
+ Handle<Object> result;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, result,
+ Object::GetElement(isolate, initial_prototype, index));
+ return *result;
}
}
// Handle special arguments properties.
- if (key->Equals(isolate->heap()->length_string())) return Smi::FromInt(n);
- if (key->Equals(isolate->heap()->callee_string())) {
+ if (String::Equals(isolate->factory()->length_string(), key)) {
+ return Smi::FromInt(n);
+ }
+ if (String::Equals(isolate->factory()->callee_string(), key)) {
JSFunction* function = frame->function();
- if (!function->shared()->is_classic_mode()) {
+ if (function->shared()->strict_mode() == STRICT) {
return isolate->Throw(*isolate->factory()->NewTypeError(
"strict_arguments_callee", HandleVector<Object>(NULL, 0)));
}
@@ -6004,11 +6133,15 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_GetArgumentsProperty) {
}
// Lookup in the initial Object.prototype object.
- return isolate->initial_object_prototype()->GetProperty(*key);
+ Handle<Object> result;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, result,
+ Object::GetProperty(isolate->initial_object_prototype(), key));
+ return *result;
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_ToFastProperties) {
+RUNTIME_FUNCTION(Runtime_ToFastProperties) {
HandleScope scope(isolate);
ASSERT(args.length() == 1);
CONVERT_ARG_HANDLE_CHECKED(Object, object, 0);
@@ -6019,20 +6152,21 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_ToFastProperties) {
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_ToBool) {
+RUNTIME_FUNCTION(Runtime_ToBool) {
SealHandleScope shs(isolate);
ASSERT(args.length() == 1);
+ CONVERT_ARG_CHECKED(Object, object, 0);
- return isolate->heap()->ToBoolean(args[0]->BooleanValue());
+ return isolate->heap()->ToBoolean(object->BooleanValue());
}
// Returns the type string of a value; see ECMA-262, 11.4.3 (p 47).
// Possible optimizations: put the type string into the oddballs.
-RUNTIME_FUNCTION(MaybeObject*, Runtime_Typeof) {
+RUNTIME_FUNCTION(Runtime_Typeof) {
SealHandleScope shs(isolate);
-
- Object* obj = args[0];
+ ASSERT(args.length() == 1);
+ CONVERT_ARG_CHECKED(Object, obj, 0);
if (obj->IsNumber()) return isolate->heap()->number_string();
HeapObject* heap_obj = HeapObject::cast(obj);
@@ -6093,18 +6227,19 @@ static int ParseDecimalInteger(const uint8_t*s, int from, int to) {
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_StringToNumber) {
- SealHandleScope shs(isolate);
+RUNTIME_FUNCTION(Runtime_StringToNumber) {
+ HandleScope handle_scope(isolate);
ASSERT(args.length() == 1);
- CONVERT_ARG_CHECKED(String, subject, 0);
- subject->TryFlatten();
+ CONVERT_ARG_HANDLE_CHECKED(String, subject, 0);
+ subject = String::Flatten(subject);
// Fast case: short integer or some sorts of junk values.
- int len = subject->length();
if (subject->IsSeqOneByteString()) {
+ int len = subject->length();
if (len == 0) return Smi::FromInt(0);
- uint8_t const* data = SeqOneByteString::cast(subject)->GetChars();
+ DisallowHeapAllocation no_gc;
+ uint8_t const* data = Handle<SeqOneByteString>::cast(subject)->GetChars();
bool minus = (data[0] == '-');
int start_pos = (minus ? 1 : 0);
@@ -6112,15 +6247,15 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_StringToNumber) {
return isolate->heap()->nan_value();
} else if (data[start_pos] > '9') {
// Fast check for a junk value. A valid string may start from a
- // whitespace, a sign ('+' or '-'), the decimal point, a decimal digit or
- // the 'I' character ('Infinity'). All of that have codes not greater than
- // '9' except 'I' and &nbsp;.
+ // whitespace, a sign ('+' or '-'), the decimal point, a decimal digit
+ // or the 'I' character ('Infinity'). All of that have codes not greater
+ // than '9' except 'I' and &nbsp;.
if (data[start_pos] != 'I' && data[start_pos] != 0xa0) {
return isolate->heap()->nan_value();
}
} else if (len - start_pos < 10 && AreDigits(data, start_pos, len)) {
- // The maximal/minimal smi has 10 digits. If the string has less digits we
- // know it will fit into the smi-data type.
+ // The maximal/minimal smi has 10 digits. If the string has less digits
+ // we know it will fit into the smi-data type.
int d = ParseDecimalInteger(data, start_pos, len);
if (minus) {
if (d == 0) return isolate->heap()->minus_zero_value();
@@ -6149,144 +6284,173 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_StringToNumber) {
// Type", https://bugs.ecmascript.org/show_bug.cgi?id=1584
flags |= ALLOW_OCTAL | ALLOW_BINARY;
}
- return isolate->heap()->NumberFromDouble(
- StringToDouble(isolate->unicode_cache(), subject, flags));
+
+ return *isolate->factory()->NewNumber(StringToDouble(
+ isolate->unicode_cache(), *subject, flags));
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_NewString) {
- SealHandleScope shs(isolate);
+RUNTIME_FUNCTION(Runtime_NewString) {
+ HandleScope scope(isolate);
+ ASSERT(args.length() == 2);
CONVERT_SMI_ARG_CHECKED(length, 0);
CONVERT_BOOLEAN_ARG_CHECKED(is_one_byte, 1);
if (length == 0) return isolate->heap()->empty_string();
+ Handle<String> result;
if (is_one_byte) {
- return isolate->heap()->AllocateRawOneByteString(length);
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, result, isolate->factory()->NewRawOneByteString(length));
} else {
- return isolate->heap()->AllocateRawTwoByteString(length);
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, result, isolate->factory()->NewRawTwoByteString(length));
}
+ return *result;
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_TruncateString) {
+RUNTIME_FUNCTION(Runtime_TruncateString) {
HandleScope scope(isolate);
+ ASSERT(args.length() == 2);
CONVERT_ARG_HANDLE_CHECKED(SeqString, string, 0);
CONVERT_SMI_ARG_CHECKED(new_length, 1);
+ RUNTIME_ASSERT(new_length >= 0);
return *SeqString::Truncate(string, new_length);
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_URIEscape) {
+RUNTIME_FUNCTION(Runtime_URIEscape) {
HandleScope scope(isolate);
ASSERT(args.length() == 1);
CONVERT_ARG_HANDLE_CHECKED(String, source, 0);
- Handle<String> string = FlattenGetString(source);
+ Handle<String> string = String::Flatten(source);
ASSERT(string->IsFlat());
- Handle<String> result = string->IsOneByteRepresentationUnderneath()
- ? URIEscape::Escape<uint8_t>(isolate, source)
- : URIEscape::Escape<uc16>(isolate, source);
- if (result.is_null()) return Failure::OutOfMemoryException(0x12);
+ Handle<String> result;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, result,
+ string->IsOneByteRepresentationUnderneath()
+ ? URIEscape::Escape<uint8_t>(isolate, source)
+ : URIEscape::Escape<uc16>(isolate, source));
return *result;
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_URIUnescape) {
+RUNTIME_FUNCTION(Runtime_URIUnescape) {
HandleScope scope(isolate);
ASSERT(args.length() == 1);
CONVERT_ARG_HANDLE_CHECKED(String, source, 0);
- Handle<String> string = FlattenGetString(source);
+ Handle<String> string = String::Flatten(source);
ASSERT(string->IsFlat());
- return string->IsOneByteRepresentationUnderneath()
- ? *URIUnescape::Unescape<uint8_t>(isolate, source)
- : *URIUnescape::Unescape<uc16>(isolate, source);
+ Handle<String> result;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, result,
+ string->IsOneByteRepresentationUnderneath()
+ ? URIUnescape::Unescape<uint8_t>(isolate, source)
+ : URIUnescape::Unescape<uc16>(isolate, source));
+ return *result;
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_QuoteJSONString) {
+RUNTIME_FUNCTION(Runtime_QuoteJSONString) {
HandleScope scope(isolate);
CONVERT_ARG_HANDLE_CHECKED(String, string, 0);
ASSERT(args.length() == 1);
- return BasicJsonStringifier::StringifyString(isolate, string);
+ Handle<Object> result;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, result, BasicJsonStringifier::StringifyString(isolate, string));
+ return *result;
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_BasicJSONStringify) {
+RUNTIME_FUNCTION(Runtime_BasicJSONStringify) {
HandleScope scope(isolate);
ASSERT(args.length() == 1);
+ CONVERT_ARG_HANDLE_CHECKED(Object, object, 0);
BasicJsonStringifier stringifier(isolate);
- return stringifier.Stringify(Handle<Object>(args[0], isolate));
+ Handle<Object> result;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, result, stringifier.Stringify(object));
+ return *result;
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_StringParseInt) {
- SealHandleScope shs(isolate);
+RUNTIME_FUNCTION(Runtime_StringParseInt) {
+ HandleScope handle_scope(isolate);
+ ASSERT(args.length() == 2);
+ CONVERT_ARG_HANDLE_CHECKED(String, subject, 0);
+ CONVERT_NUMBER_CHECKED(int, radix, Int32, args[1]);
+ RUNTIME_ASSERT(radix == 0 || (2 <= radix && radix <= 36));
- CONVERT_ARG_CHECKED(String, s, 0);
- CONVERT_SMI_ARG_CHECKED(radix, 1);
+ subject = String::Flatten(subject);
+ double value;
- s->TryFlatten();
+ { DisallowHeapAllocation no_gc;
+ String::FlatContent flat = subject->GetFlatContent();
- RUNTIME_ASSERT(radix == 0 || (2 <= radix && radix <= 36));
- double value = StringToInt(isolate->unicode_cache(), s, radix);
- return isolate->heap()->NumberFromDouble(value);
+ // ECMA-262 section 15.1.2.3, empty string is NaN
+ if (flat.IsAscii()) {
+ value = StringToInt(
+ isolate->unicode_cache(), flat.ToOneByteVector(), radix);
+ } else {
+ value = StringToInt(
+ isolate->unicode_cache(), flat.ToUC16Vector(), radix);
+ }
+ }
+
+ return *isolate->factory()->NewNumber(value);
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_StringParseFloat) {
- SealHandleScope shs(isolate);
- CONVERT_ARG_CHECKED(String, str, 0);
+RUNTIME_FUNCTION(Runtime_StringParseFloat) {
+ HandleScope shs(isolate);
+ ASSERT(args.length() == 1);
+ CONVERT_ARG_HANDLE_CHECKED(String, subject, 0);
- // ECMA-262 section 15.1.2.3, empty string is NaN
- double value = StringToDouble(isolate->unicode_cache(),
- str, ALLOW_TRAILING_JUNK, OS::nan_value());
+ subject = String::Flatten(subject);
+ double value = StringToDouble(
+ isolate->unicode_cache(), *subject, ALLOW_TRAILING_JUNK, OS::nan_value());
- // Create a number object from the value.
- return isolate->heap()->NumberFromDouble(value);
+ return *isolate->factory()->NewNumber(value);
+}
+
+
+static inline bool ToUpperOverflows(uc32 character) {
+ // y with umlauts and the micro sign are the only characters that stop
+ // fitting into one-byte when converting to uppercase.
+ static const uc32 yuml_code = 0xff;
+ static const uc32 micro_code = 0xb5;
+ return (character == yuml_code || character == micro_code);
}
template <class Converter>
-MUST_USE_RESULT static MaybeObject* ConvertCaseHelper(
+MUST_USE_RESULT static Object* ConvertCaseHelper(
Isolate* isolate,
- String* s,
- String::Encoding result_encoding,
- int length,
- int input_string_length,
+ String* string,
+ SeqString* result,
+ int result_length,
unibrow::Mapping<Converter, 128>* mapping) {
+ DisallowHeapAllocation no_gc;
// We try this twice, once with the assumption that the result is no longer
// than the input and, if that assumption breaks, again with the exact
// length. This may not be pretty, but it is nicer than what was here before
// and I hereby claim my vaffel-is.
//
- // Allocate the resulting string.
- //
// NOTE: This assumes that the upper/lower case of an ASCII
// character is also ASCII. This is currently the case, but it
// might break in the future if we implement more context and locale
// dependent upper/lower conversions.
- Object* o;
- { MaybeObject* maybe_o = result_encoding == String::ONE_BYTE_ENCODING
- ? isolate->heap()->AllocateRawOneByteString(length)
- : isolate->heap()->AllocateRawTwoByteString(length);
- if (!maybe_o->ToObject(&o)) return maybe_o;
- }
- String* result = String::cast(o);
bool has_changed_character = false;
- DisallowHeapAllocation no_gc;
-
// Convert all characters to upper case, assuming that they will fit
// in the buffer
Access<ConsStringIteratorOp> op(
isolate->runtime_state()->string_iterator());
- StringCharacterStream stream(s, op.value());
+ StringCharacterStream stream(string, op.value());
unibrow::uchar chars[Converter::kMaxWidth];
// We can assume that the string is not empty
uc32 current = stream.GetNext();
- // y with umlauts is the only character that stops fitting into one-byte
- // when converting to uppercase.
- static const uc32 yuml_code = 0xff;
- bool ignore_yuml = result->IsSeqTwoByteString() || Converter::kIsToLower;
- for (int i = 0; i < length;) {
+ bool ignore_overflow = Converter::kIsToLower || result->IsSeqTwoByteString();
+ for (int i = 0; i < result_length;) {
bool has_next = stream.HasMore();
uc32 next = has_next ? stream.GetNext() : 0;
int char_length = mapping->get(current, next, chars);
@@ -6294,14 +6458,15 @@ MUST_USE_RESULT static MaybeObject* ConvertCaseHelper(
// The case conversion of this character is the character itself.
result->Set(i, current);
i++;
- } else if (char_length == 1 && (ignore_yuml || current != yuml_code)) {
+ } else if (char_length == 1 &&
+ (ignore_overflow || !ToUpperOverflows(current))) {
// Common case: converting the letter resulted in one character.
ASSERT(static_cast<uc32>(chars[0]) != current);
result->Set(i, chars[0]);
has_changed_character = true;
i++;
- } else if (length == input_string_length) {
- bool found_yuml = (current == yuml_code);
+ } else if (result_length == string->length()) {
+ bool overflows = ToUpperOverflows(current);
// We've assumed that the result would be as long as the
// input but here is a character that converts to several
// characters. No matter, we calculate the exact length
@@ -6321,7 +6486,7 @@ MUST_USE_RESULT static MaybeObject* ConvertCaseHelper(
int current_length = i + char_length + next_length;
while (stream.HasMore()) {
current = stream.GetNext();
- found_yuml |= (current == yuml_code);
+ overflows |= ToUpperOverflows(current);
// NOTE: we use 0 as the next character here because, while
// the next character may affect what a character converts to,
// it does not in any case affect the length of what it convert
@@ -6329,15 +6494,15 @@ MUST_USE_RESULT static MaybeObject* ConvertCaseHelper(
int char_length = mapping->get(current, 0, chars);
if (char_length == 0) char_length = 1;
current_length += char_length;
- if (current_length > Smi::kMaxValue) {
- isolate->context()->mark_out_of_memory();
- return Failure::OutOfMemoryException(0x13);
+ if (current_length > String::kMaxLength) {
+ AllowHeapAllocation allocate_error_and_return;
+ return isolate->ThrowInvalidStringLength();
}
}
// Try again with the real length. Return signed if we need
- // to allocate a two-byte string for y-umlaut to uppercase.
- return (found_yuml && !ignore_yuml) ? Smi::FromInt(-current_length)
- : Smi::FromInt(current_length);
+ // to allocate a two-byte string for to uppercase.
+ return (overflows && !ignore_overflow) ? Smi::FromInt(-current_length)
+ : Smi::FromInt(current_length);
} else {
for (int j = 0; j < char_length; j++) {
result->Set(i, chars[j]);
@@ -6354,7 +6519,7 @@ MUST_USE_RESULT static MaybeObject* ConvertCaseHelper(
// we simple return the result and let the converted string
// become garbage; there is no reason to keep two identical strings
// alive.
- return s;
+ return string;
}
}
@@ -6385,7 +6550,7 @@ static inline uintptr_t AsciiRangeMask(uintptr_t w, char m, char n) {
#ifdef DEBUG
static bool CheckFastAsciiConvert(char* dst,
- char* src,
+ const char* src,
int length,
bool changed,
bool is_to_lower) {
@@ -6408,12 +6573,12 @@ static bool CheckFastAsciiConvert(char* dst,
template<class Converter>
static bool FastAsciiConvert(char* dst,
- char* src,
+ const char* src,
int length,
bool* changed_out) {
#ifdef DEBUG
char* saved_dst = dst;
- char* saved_src = src;
+ const char* saved_src = src;
#endif
DisallowHeapAllocation no_gc;
// We rely on the distance between upper and lower case letters
@@ -6424,12 +6589,12 @@ static bool FastAsciiConvert(char* dst,
static const char hi = Converter::kIsToLower ? 'Z' + 1 : 'z' + 1;
bool changed = false;
uintptr_t or_acc = 0;
- char* const limit = src + length;
+ const char* const limit = src + length;
#ifdef V8_HOST_CAN_READ_UNALIGNED
// Process the prefix of the input that requires no conversion one
// (machine) word at a time.
while (src <= limit - sizeof(uintptr_t)) {
- uintptr_t w = *reinterpret_cast<uintptr_t*>(src);
+ const uintptr_t w = *reinterpret_cast<const uintptr_t*>(src);
or_acc |= w;
if (AsciiRangeMask(w, lo, hi) != 0) {
changed = true;
@@ -6442,7 +6607,7 @@ static bool FastAsciiConvert(char* dst,
// Process the remainder of the input performing conversion when
// required one word at a time.
while (src <= limit - sizeof(uintptr_t)) {
- uintptr_t w = *reinterpret_cast<uintptr_t*>(src);
+ const uintptr_t w = *reinterpret_cast<const uintptr_t*>(src);
or_acc |= w;
uintptr_t m = AsciiRangeMask(w, lo, hi);
// The mask has high (7th) bit set in every byte that needs
@@ -6481,17 +6646,14 @@ static bool FastAsciiConvert(char* dst,
template <class Converter>
-MUST_USE_RESULT static MaybeObject* ConvertCase(
- Arguments args,
+MUST_USE_RESULT static Object* ConvertCase(
+ Handle<String> s,
Isolate* isolate,
unibrow::Mapping<Converter, 128>* mapping) {
- SealHandleScope shs(isolate);
- CONVERT_ARG_CHECKED(String, s, 0);
- s = s->TryFlattenGetString();
-
- const int length = s->length();
+ s = String::Flatten(s);
+ int length = s->length();
// Assume that the string is not empty; we need this assumption later
- if (length == 0) return s;
+ if (length == 0) return *s;
// Simpler handling of ASCII strings.
//
@@ -6499,96 +6661,105 @@ MUST_USE_RESULT static MaybeObject* ConvertCase(
// character is also ASCII. This is currently the case, but it
// might break in the future if we implement more context and locale
// dependent upper/lower conversions.
- if (s->IsSeqOneByteString()) {
- Object* o;
- { MaybeObject* maybe_o = isolate->heap()->AllocateRawOneByteString(length);
- if (!maybe_o->ToObject(&o)) return maybe_o;
- }
- SeqOneByteString* result = SeqOneByteString::cast(o);
- bool has_changed_character;
+ if (s->IsOneByteRepresentationUnderneath()) {
+ // Same length as input.
+ Handle<SeqOneByteString> result =
+ isolate->factory()->NewRawOneByteString(length).ToHandleChecked();
+ DisallowHeapAllocation no_gc;
+ String::FlatContent flat_content = s->GetFlatContent();
+ ASSERT(flat_content.IsFlat());
+ bool has_changed_character = false;
bool is_ascii = FastAsciiConvert<Converter>(
reinterpret_cast<char*>(result->GetChars()),
- reinterpret_cast<char*>(SeqOneByteString::cast(s)->GetChars()),
+ reinterpret_cast<const char*>(flat_content.ToOneByteVector().start()),
length,
&has_changed_character);
// If not ASCII, we discard the result and take the 2 byte path.
- if (is_ascii) {
- return has_changed_character ? result : s;
- }
+ if (is_ascii) return has_changed_character ? *result : *s;
}
- String::Encoding result_encoding = s->IsOneByteRepresentation()
- ? String::ONE_BYTE_ENCODING : String::TWO_BYTE_ENCODING;
- Object* answer;
- { MaybeObject* maybe_answer = ConvertCaseHelper(
- isolate, s, result_encoding, length, length, mapping);
- if (!maybe_answer->ToObject(&answer)) return maybe_answer;
- }
- if (answer->IsSmi()) {
- int new_length = Smi::cast(answer)->value();
- if (new_length < 0) {
- result_encoding = String::TWO_BYTE_ENCODING;
- new_length = -new_length;
- }
- MaybeObject* maybe_answer = ConvertCaseHelper(
- isolate, s, result_encoding, new_length, length, mapping);
- if (!maybe_answer->ToObject(&answer)) return maybe_answer;
+ Handle<SeqString> result; // Same length as input.
+ if (s->IsOneByteRepresentation()) {
+ result = isolate->factory()->NewRawOneByteString(length).ToHandleChecked();
+ } else {
+ result = isolate->factory()->NewRawTwoByteString(length).ToHandleChecked();
}
- return answer;
-}
+ Object* answer = ConvertCaseHelper(isolate, *s, *result, length, mapping);
+ if (answer->IsException() || answer->IsString()) return answer;
-RUNTIME_FUNCTION(MaybeObject*, Runtime_StringToLowerCase) {
- return ConvertCase(
- args, isolate, isolate->runtime_state()->to_lower_mapping());
+ ASSERT(answer->IsSmi());
+ length = Smi::cast(answer)->value();
+ if (s->IsOneByteRepresentation() && length > 0) {
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, result, isolate->factory()->NewRawOneByteString(length));
+ } else {
+ if (length < 0) length = -length;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, result, isolate->factory()->NewRawTwoByteString(length));
+ }
+ return ConvertCaseHelper(isolate, *s, *result, length, mapping);
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_StringToUpperCase) {
+RUNTIME_FUNCTION(Runtime_StringToLowerCase) {
+ HandleScope scope(isolate);
+ ASSERT(args.length() == 1);
+ CONVERT_ARG_HANDLE_CHECKED(String, s, 0);
return ConvertCase(
- args, isolate, isolate->runtime_state()->to_upper_mapping());
+ s, isolate, isolate->runtime_state()->to_lower_mapping());
}
-static inline bool IsTrimWhiteSpace(unibrow::uchar c) {
- return unibrow::WhiteSpace::Is(c) || c == 0x200b || c == 0xfeff;
+RUNTIME_FUNCTION(Runtime_StringToUpperCase) {
+ HandleScope scope(isolate);
+ ASSERT(args.length() == 1);
+ CONVERT_ARG_HANDLE_CHECKED(String, s, 0);
+ return ConvertCase(
+ s, isolate, isolate->runtime_state()->to_upper_mapping());
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_StringTrim) {
- SealHandleScope shs(isolate);
+RUNTIME_FUNCTION(Runtime_StringTrim) {
+ HandleScope scope(isolate);
ASSERT(args.length() == 3);
- CONVERT_ARG_CHECKED(String, s, 0);
+ CONVERT_ARG_HANDLE_CHECKED(String, string, 0);
CONVERT_BOOLEAN_ARG_CHECKED(trimLeft, 1);
CONVERT_BOOLEAN_ARG_CHECKED(trimRight, 2);
- s->TryFlatten();
- int length = s->length();
+ string = String::Flatten(string);
+ int length = string->length();
int left = 0;
+ UnicodeCache* unicode_cache = isolate->unicode_cache();
if (trimLeft) {
- while (left < length && IsTrimWhiteSpace(s->Get(left))) {
+ while (left < length &&
+ unicode_cache->IsWhiteSpaceOrLineTerminator(string->Get(left))) {
left++;
}
}
int right = length;
if (trimRight) {
- while (right > left && IsTrimWhiteSpace(s->Get(right - 1))) {
+ while (right > left &&
+ unicode_cache->IsWhiteSpaceOrLineTerminator(
+ string->Get(right - 1))) {
right--;
}
}
- return s->SubString(left, right);
+
+ return *isolate->factory()->NewSubString(string, left, right);
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_StringSplit) {
+RUNTIME_FUNCTION(Runtime_StringSplit) {
HandleScope handle_scope(isolate);
ASSERT(args.length() == 3);
CONVERT_ARG_HANDLE_CHECKED(String, subject, 0);
CONVERT_ARG_HANDLE_CHECKED(String, pattern, 1);
CONVERT_NUMBER_CHECKED(uint32_t, limit, Uint32, args[2]);
+ RUNTIME_ASSERT(limit > 0);
int subject_length = subject->length();
int pattern_length = pattern->length();
@@ -6614,7 +6785,8 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_StringSplit) {
// isn't empty, we can never create more parts than ~half the length
// of the subject.
- if (!subject->IsFlat()) FlattenString(subject);
+ subject = String::Flatten(subject);
+ pattern = String::Flatten(pattern);
static const int kMaxInitialListCapacity = 16;
@@ -6623,7 +6795,6 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_StringSplit) {
// Find (up to limit) indices of separator and end-of-string in subject
int initial_capacity = Min<uint32_t>(kMaxInitialListCapacity, limit);
ZoneList<int> indices(initial_capacity, zone_scope.zone());
- if (!pattern->IsFlat()) FlattenString(pattern);
FindStringIndicesDispatch(isolate, *subject, *pattern,
&indices, limit, zone_scope.zone());
@@ -6661,10 +6832,10 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_StringSplit) {
if (limit == 0xffffffffu) {
if (result->HasFastObjectElements()) {
- RegExpResultsCache::Enter(isolate->heap(),
- *subject,
- *pattern,
- *elements,
+ RegExpResultsCache::Enter(isolate,
+ subject,
+ pattern,
+ elements,
RegExpResultsCache::STRING_SPLIT_SUBSTRINGS);
}
}
@@ -6708,25 +6879,21 @@ static int CopyCachedAsciiCharsToArray(Heap* heap,
// Converts a String to JSArray.
// For example, "foo" => ["f", "o", "o"].
-RUNTIME_FUNCTION(MaybeObject*, Runtime_StringToArray) {
+RUNTIME_FUNCTION(Runtime_StringToArray) {
HandleScope scope(isolate);
ASSERT(args.length() == 2);
CONVERT_ARG_HANDLE_CHECKED(String, s, 0);
CONVERT_NUMBER_CHECKED(uint32_t, limit, Uint32, args[1]);
- s = FlattenGetString(s);
+ s = String::Flatten(s);
const int length = static_cast<int>(Min<uint32_t>(s->length(), limit));
Handle<FixedArray> elements;
int position = 0;
if (s->IsFlat() && s->IsOneByteRepresentation()) {
// Try using cached chars where possible.
- Object* obj;
- { MaybeObject* maybe_obj =
- isolate->heap()->AllocateUninitializedFixedArray(length);
- if (!maybe_obj->ToObject(&obj)) return maybe_obj;
- }
- elements = Handle<FixedArray>(FixedArray::cast(obj), isolate);
+ elements = isolate->factory()->NewUninitializedFixedArray(length);
+
DisallowHeapAllocation no_gc;
String::FlatContent content = s->GetFlatContent();
if (content.IsAscii()) {
@@ -6747,7 +6914,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_StringToArray) {
}
for (int i = position; i < length; ++i) {
Handle<Object> str =
- LookupSingleCharacterStringFromCode(isolate, s->Get(i));
+ isolate->factory()->LookupSingleCharacterStringFromCode(s->Get(i));
elements->set(i, *str);
}
@@ -6761,11 +6928,11 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_StringToArray) {
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_NewStringWrapper) {
- SealHandleScope shs(isolate);
+RUNTIME_FUNCTION(Runtime_NewStringWrapper) {
+ HandleScope scope(isolate);
ASSERT(args.length() == 1);
- CONVERT_ARG_CHECKED(String, value, 0);
- return value->ToObject(isolate);
+ CONVERT_ARG_HANDLE_CHECKED(String, value, 0);
+ return *Object::ToObject(isolate, value).ToHandleChecked();
}
@@ -6776,110 +6943,70 @@ bool Runtime::IsUpperCaseChar(RuntimeState* runtime_state, uint16_t ch) {
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberToString) {
- SealHandleScope shs(isolate);
- ASSERT(args.length() == 1);
-
- Object* number = args[0];
- RUNTIME_ASSERT(number->IsNumber());
-
- return isolate->heap()->NumberToString(number);
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberToStringSkipCache) {
- SealHandleScope shs(isolate);
+RUNTIME_FUNCTION(RuntimeHidden_NumberToString) {
+ HandleScope scope(isolate);
ASSERT(args.length() == 1);
+ CONVERT_NUMBER_ARG_HANDLE_CHECKED(number, 0);
- Object* number = args[0];
- RUNTIME_ASSERT(number->IsNumber());
-
- return isolate->heap()->NumberToString(
- number, false, isolate->heap()->GetPretenureMode());
+ return *isolate->factory()->NumberToString(number);
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberToInteger) {
- SealHandleScope shs(isolate);
+RUNTIME_FUNCTION(RuntimeHidden_NumberToStringSkipCache) {
+ HandleScope scope(isolate);
ASSERT(args.length() == 1);
+ CONVERT_NUMBER_ARG_HANDLE_CHECKED(number, 0);
- CONVERT_DOUBLE_ARG_CHECKED(number, 0);
-
- // We do not include 0 so that we don't have to treat +0 / -0 cases.
- if (number > 0 && number <= Smi::kMaxValue) {
- return Smi::FromInt(static_cast<int>(number));
- }
- return isolate->heap()->NumberFromDouble(DoubleToInteger(number));
+ return *isolate->factory()->NumberToString(number, false);
}
-// ES6 draft 9.1.11
-RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberToPositiveInteger) {
- SealHandleScope shs(isolate);
+RUNTIME_FUNCTION(Runtime_NumberToInteger) {
+ HandleScope scope(isolate);
ASSERT(args.length() == 1);
CONVERT_DOUBLE_ARG_CHECKED(number, 0);
-
- // We do not include 0 so that we don't have to treat +0 / -0 cases.
- if (number > 0 && number <= Smi::kMaxValue) {
- return Smi::FromInt(static_cast<int>(number));
- }
- if (number <= 0) {
- return Smi::FromInt(0);
- }
- return isolate->heap()->NumberFromDouble(DoubleToInteger(number));
+ return *isolate->factory()->NewNumber(DoubleToInteger(number));
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberToIntegerMapMinusZero) {
- SealHandleScope shs(isolate);
+RUNTIME_FUNCTION(Runtime_NumberToIntegerMapMinusZero) {
+ HandleScope scope(isolate);
ASSERT(args.length() == 1);
CONVERT_DOUBLE_ARG_CHECKED(number, 0);
-
- // We do not include 0 so that we don't have to treat +0 / -0 cases.
- if (number > 0 && number <= Smi::kMaxValue) {
- return Smi::FromInt(static_cast<int>(number));
- }
-
double double_value = DoubleToInteger(number);
// Map both -0 and +0 to +0.
if (double_value == 0) double_value = 0;
- return isolate->heap()->NumberFromDouble(double_value);
+ return *isolate->factory()->NewNumber(double_value);
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberToJSUint32) {
- SealHandleScope shs(isolate);
+RUNTIME_FUNCTION(Runtime_NumberToJSUint32) {
+ HandleScope scope(isolate);
ASSERT(args.length() == 1);
CONVERT_NUMBER_CHECKED(int32_t, number, Uint32, args[0]);
- return isolate->heap()->NumberFromUint32(number);
+ return *isolate->factory()->NewNumberFromUint(number);
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberToJSInt32) {
- SealHandleScope shs(isolate);
+RUNTIME_FUNCTION(Runtime_NumberToJSInt32) {
+ HandleScope scope(isolate);
ASSERT(args.length() == 1);
CONVERT_DOUBLE_ARG_CHECKED(number, 0);
-
- // We do not include 0 so that we don't have to treat +0 / -0 cases.
- if (number > 0 && number <= Smi::kMaxValue) {
- return Smi::FromInt(static_cast<int>(number));
- }
- return isolate->heap()->NumberFromInt32(DoubleToInt32(number));
+ return *isolate->factory()->NewNumberFromInt(DoubleToInt32(number));
}
// Converts a Number to a Smi, if possible. Returns NaN if the number is not
// a small integer.
-RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberToSmi) {
+RUNTIME_FUNCTION(RuntimeHidden_NumberToSmi) {
SealHandleScope shs(isolate);
ASSERT(args.length() == 1);
-
- Object* obj = args[0];
+ CONVERT_ARG_CHECKED(Object, obj, 0);
if (obj->IsSmi()) {
return obj;
}
@@ -6894,100 +7021,92 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberToSmi) {
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_AllocateHeapNumber) {
- SealHandleScope shs(isolate);
+RUNTIME_FUNCTION(RuntimeHidden_AllocateHeapNumber) {
+ HandleScope scope(isolate);
ASSERT(args.length() == 0);
- return isolate->heap()->AllocateHeapNumber(0);
+ return *isolate->factory()->NewHeapNumber(0);
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberAdd) {
- SealHandleScope shs(isolate);
+RUNTIME_FUNCTION(Runtime_NumberAdd) {
+ HandleScope scope(isolate);
ASSERT(args.length() == 2);
CONVERT_DOUBLE_ARG_CHECKED(x, 0);
CONVERT_DOUBLE_ARG_CHECKED(y, 1);
- return isolate->heap()->NumberFromDouble(x + y);
+ return *isolate->factory()->NewNumber(x + y);
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberSub) {
- SealHandleScope shs(isolate);
+RUNTIME_FUNCTION(Runtime_NumberSub) {
+ HandleScope scope(isolate);
ASSERT(args.length() == 2);
CONVERT_DOUBLE_ARG_CHECKED(x, 0);
CONVERT_DOUBLE_ARG_CHECKED(y, 1);
- return isolate->heap()->NumberFromDouble(x - y);
+ return *isolate->factory()->NewNumber(x - y);
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberMul) {
- SealHandleScope shs(isolate);
+RUNTIME_FUNCTION(Runtime_NumberMul) {
+ HandleScope scope(isolate);
ASSERT(args.length() == 2);
CONVERT_DOUBLE_ARG_CHECKED(x, 0);
CONVERT_DOUBLE_ARG_CHECKED(y, 1);
- return isolate->heap()->NumberFromDouble(x * y);
+ return *isolate->factory()->NewNumber(x * y);
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberUnaryMinus) {
- SealHandleScope shs(isolate);
+RUNTIME_FUNCTION(Runtime_NumberUnaryMinus) {
+ HandleScope scope(isolate);
ASSERT(args.length() == 1);
CONVERT_DOUBLE_ARG_CHECKED(x, 0);
- return isolate->heap()->NumberFromDouble(-x);
+ return *isolate->factory()->NewNumber(-x);
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberAlloc) {
- SealHandleScope shs(isolate);
- ASSERT(args.length() == 0);
-
- return isolate->heap()->NumberFromDouble(9876543210.0);
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberDiv) {
- SealHandleScope shs(isolate);
+RUNTIME_FUNCTION(Runtime_NumberDiv) {
+ HandleScope scope(isolate);
ASSERT(args.length() == 2);
CONVERT_DOUBLE_ARG_CHECKED(x, 0);
CONVERT_DOUBLE_ARG_CHECKED(y, 1);
- return isolate->heap()->NumberFromDouble(x / y);
+ return *isolate->factory()->NewNumber(x / y);
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberMod) {
- SealHandleScope shs(isolate);
+RUNTIME_FUNCTION(Runtime_NumberMod) {
+ HandleScope scope(isolate);
ASSERT(args.length() == 2);
CONVERT_DOUBLE_ARG_CHECKED(x, 0);
CONVERT_DOUBLE_ARG_CHECKED(y, 1);
-
- x = modulo(x, y);
- // NumberFromDouble may return a Smi instead of a Number object
- return isolate->heap()->NumberFromDouble(x);
+ return *isolate->factory()->NewNumber(modulo(x, y));
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberImul) {
- SealHandleScope shs(isolate);
+RUNTIME_FUNCTION(Runtime_NumberImul) {
+ HandleScope scope(isolate);
ASSERT(args.length() == 2);
CONVERT_NUMBER_CHECKED(int32_t, x, Int32, args[0]);
CONVERT_NUMBER_CHECKED(int32_t, y, Int32, args[1]);
- return isolate->heap()->NumberFromInt32(x * y);
+ return *isolate->factory()->NewNumberFromInt(x * y);
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_StringAdd) {
- SealHandleScope shs(isolate);
+RUNTIME_FUNCTION(RuntimeHidden_StringAdd) {
+ HandleScope scope(isolate);
ASSERT(args.length() == 2);
- CONVERT_ARG_CHECKED(String, str1, 0);
- CONVERT_ARG_CHECKED(String, str2, 1);
+ CONVERT_ARG_HANDLE_CHECKED(String, str1, 0);
+ CONVERT_ARG_HANDLE_CHECKED(String, str2, 1);
isolate->counters()->string_add_runtime()->Increment();
- return isolate->heap()->AllocateConsString(str1, str2);
+ Handle<String> result;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, result, isolate->factory()->NewConsString(str1, str2));
+ return *result;
}
@@ -6996,6 +7115,7 @@ static inline void StringBuilderConcatHelper(String* special,
sinkchar* sink,
FixedArray* fixed_array,
int array_length) {
+ DisallowHeapAllocation no_gc;
int position = 0;
for (int i = 0; i < array_length; i++) {
Object* element = fixed_array->get(i);
@@ -7030,39 +7150,13 @@ static inline void StringBuilderConcatHelper(String* special,
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_StringBuilderConcat) {
- HandleScope scope(isolate);
- ASSERT(args.length() == 3);
- CONVERT_ARG_HANDLE_CHECKED(JSArray, array, 0);
- if (!args[1]->IsSmi()) {
- isolate->context()->mark_out_of_memory();
- return Failure::OutOfMemoryException(0x14);
- }
- int array_length = args.smi_at(1);
- CONVERT_ARG_HANDLE_CHECKED(String, special, 2);
-
- // This assumption is used by the slice encoding in one or two smis.
- ASSERT(Smi::kMaxValue >= String::kMaxLength);
-
- JSObject::EnsureCanContainHeapObjectElements(array);
-
- int special_length = special->length();
- if (!array->HasFastObjectElements()) {
- return isolate->Throw(isolate->heap()->illegal_argument_string());
- }
- FixedArray* fixed_array = FixedArray::cast(array->elements());
- if (fixed_array->length() < array_length) {
- array_length = fixed_array->length();
- }
-
- if (array_length == 0) {
- return isolate->heap()->empty_string();
- } else if (array_length == 1) {
- Object* first = fixed_array->get(0);
- if (first->IsString()) return first;
- }
-
- bool one_byte = special->HasOnlyOneByteChars();
+// Returns the result length of the concatenation.
+// On illegal argument, -1 is returned.
+static inline int StringBuilderConcatLength(int special_length,
+ FixedArray* fixed_array,
+ int array_length,
+ bool* one_byte) {
+ DisallowHeapAllocation no_gc;
int position = 0;
for (int i = 0; i < array_length; i++) {
int increment = 0;
@@ -7081,86 +7175,118 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_StringBuilderConcat) {
len = -smi_value;
// Get the position and check that it is a positive smi.
i++;
- if (i >= array_length) {
- return isolate->Throw(isolate->heap()->illegal_argument_string());
- }
+ if (i >= array_length) return -1;
Object* next_smi = fixed_array->get(i);
- if (!next_smi->IsSmi()) {
- return isolate->Throw(isolate->heap()->illegal_argument_string());
- }
+ if (!next_smi->IsSmi()) return -1;
pos = Smi::cast(next_smi)->value();
- if (pos < 0) {
- return isolate->Throw(isolate->heap()->illegal_argument_string());
- }
+ if (pos < 0) return -1;
}
ASSERT(pos >= 0);
ASSERT(len >= 0);
- if (pos > special_length || len > special_length - pos) {
- return isolate->Throw(isolate->heap()->illegal_argument_string());
- }
+ if (pos > special_length || len > special_length - pos) return -1;
increment = len;
} else if (elt->IsString()) {
String* element = String::cast(elt);
int element_length = element->length();
increment = element_length;
- if (one_byte && !element->HasOnlyOneByteChars()) {
- one_byte = false;
+ if (*one_byte && !element->HasOnlyOneByteChars()) {
+ *one_byte = false;
}
} else {
- ASSERT(!elt->IsTheHole());
- return isolate->Throw(isolate->heap()->illegal_argument_string());
+ return -1;
}
if (increment > String::kMaxLength - position) {
- isolate->context()->mark_out_of_memory();
- return Failure::OutOfMemoryException(0x15);
+ return kMaxInt; // Provoke throw on allocation.
}
position += increment;
}
+ return position;
+}
- int length = position;
- Object* object;
- if (one_byte) {
- { MaybeObject* maybe_object =
- isolate->heap()->AllocateRawOneByteString(length);
- if (!maybe_object->ToObject(&object)) return maybe_object;
+RUNTIME_FUNCTION(Runtime_StringBuilderConcat) {
+ HandleScope scope(isolate);
+ ASSERT(args.length() == 3);
+ CONVERT_ARG_HANDLE_CHECKED(JSArray, array, 0);
+ if (!args[1]->IsSmi()) return isolate->ThrowInvalidStringLength();
+ CONVERT_SMI_ARG_CHECKED(array_length, 1);
+ CONVERT_ARG_HANDLE_CHECKED(String, special, 2);
+
+ size_t actual_array_length = 0;
+ RUNTIME_ASSERT(
+ TryNumberToSize(isolate, array->length(), &actual_array_length));
+ RUNTIME_ASSERT(array_length >= 0);
+ RUNTIME_ASSERT(static_cast<size_t>(array_length) <= actual_array_length);
+
+ // This assumption is used by the slice encoding in one or two smis.
+ ASSERT(Smi::kMaxValue >= String::kMaxLength);
+
+ RUNTIME_ASSERT(array->HasFastElements());
+ JSObject::EnsureCanContainHeapObjectElements(array);
+
+ int special_length = special->length();
+ if (!array->HasFastObjectElements()) {
+ return isolate->Throw(isolate->heap()->illegal_argument_string());
+ }
+
+ int length;
+ bool one_byte = special->HasOnlyOneByteChars();
+
+ { DisallowHeapAllocation no_gc;
+ FixedArray* fixed_array = FixedArray::cast(array->elements());
+ if (fixed_array->length() < array_length) {
+ array_length = fixed_array->length();
}
- SeqOneByteString* answer = SeqOneByteString::cast(object);
+
+ if (array_length == 0) {
+ return isolate->heap()->empty_string();
+ } else if (array_length == 1) {
+ Object* first = fixed_array->get(0);
+ if (first->IsString()) return first;
+ }
+ length = StringBuilderConcatLength(
+ special_length, fixed_array, array_length, &one_byte);
+ }
+
+ if (length == -1) {
+ return isolate->Throw(isolate->heap()->illegal_argument_string());
+ }
+
+ if (one_byte) {
+ Handle<SeqOneByteString> answer;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, answer,
+ isolate->factory()->NewRawOneByteString(length));
StringBuilderConcatHelper(*special,
answer->GetChars(),
- fixed_array,
+ FixedArray::cast(array->elements()),
array_length);
- return answer;
+ return *answer;
} else {
- { MaybeObject* maybe_object =
- isolate->heap()->AllocateRawTwoByteString(length);
- if (!maybe_object->ToObject(&object)) return maybe_object;
- }
- SeqTwoByteString* answer = SeqTwoByteString::cast(object);
+ Handle<SeqTwoByteString> answer;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, answer,
+ isolate->factory()->NewRawTwoByteString(length));
StringBuilderConcatHelper(*special,
answer->GetChars(),
- fixed_array,
+ FixedArray::cast(array->elements()),
array_length);
- return answer;
+ return *answer;
}
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_StringBuilderJoin) {
- SealHandleScope shs(isolate);
+RUNTIME_FUNCTION(Runtime_StringBuilderJoin) {
+ HandleScope scope(isolate);
ASSERT(args.length() == 3);
- CONVERT_ARG_CHECKED(JSArray, array, 0);
- if (!args[1]->IsSmi()) {
- isolate->context()->mark_out_of_memory();
- return Failure::OutOfMemoryException(0x16);
- }
- int array_length = args.smi_at(1);
- CONVERT_ARG_CHECKED(String, separator, 2);
+ CONVERT_ARG_HANDLE_CHECKED(JSArray, array, 0);
+ if (!args[1]->IsSmi()) return isolate->ThrowInvalidStringLength();
+ CONVERT_SMI_ARG_CHECKED(array_length, 1);
+ CONVERT_ARG_HANDLE_CHECKED(String, separator, 2);
+ RUNTIME_ASSERT(array->HasFastObjectElements());
+ RUNTIME_ASSERT(array_length >= 0);
- if (!array->HasFastObjectElements()) {
- return isolate->Throw(isolate->heap()->illegal_argument_string());
- }
- FixedArray* fixed_array = FixedArray::cast(array->elements());
+ Handle<FixedArray> fixed_array(FixedArray::cast(array->elements()));
if (fixed_array->length() < array_length) {
array_length = fixed_array->length();
}
@@ -7169,54 +7295,56 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_StringBuilderJoin) {
return isolate->heap()->empty_string();
} else if (array_length == 1) {
Object* first = fixed_array->get(0);
- if (first->IsString()) return first;
+ RUNTIME_ASSERT(first->IsString());
+ return first;
}
int separator_length = separator->length();
+ RUNTIME_ASSERT(separator_length > 0);
int max_nof_separators =
(String::kMaxLength + separator_length - 1) / separator_length;
if (max_nof_separators < (array_length - 1)) {
- isolate->context()->mark_out_of_memory();
- return Failure::OutOfMemoryException(0x17);
+ return isolate->ThrowInvalidStringLength();
}
int length = (array_length - 1) * separator_length;
for (int i = 0; i < array_length; i++) {
Object* element_obj = fixed_array->get(i);
- if (!element_obj->IsString()) {
- // TODO(1161): handle this case.
- return isolate->Throw(isolate->heap()->illegal_argument_string());
- }
+ RUNTIME_ASSERT(element_obj->IsString());
String* element = String::cast(element_obj);
int increment = element->length();
if (increment > String::kMaxLength - length) {
- isolate->context()->mark_out_of_memory();
- return Failure::OutOfMemoryException(0x18);
+ STATIC_ASSERT(String::kMaxLength < kMaxInt);
+ length = kMaxInt; // Provoke exception;
+ break;
}
length += increment;
}
- Object* object;
- { MaybeObject* maybe_object =
- isolate->heap()->AllocateRawTwoByteString(length);
- if (!maybe_object->ToObject(&object)) return maybe_object;
- }
- SeqTwoByteString* answer = SeqTwoByteString::cast(object);
+ Handle<SeqTwoByteString> answer;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, answer,
+ isolate->factory()->NewRawTwoByteString(length));
+
+ DisallowHeapAllocation no_gc;
uc16* sink = answer->GetChars();
#ifdef DEBUG
uc16* end = sink + length;
#endif
+ RUNTIME_ASSERT(fixed_array->get(0)->IsString());
String* first = String::cast(fixed_array->get(0));
+ String* separator_raw = *separator;
int first_length = first->length();
String::WriteToFlat(first, sink, 0, first_length);
sink += first_length;
for (int i = 1; i < array_length; i++) {
ASSERT(sink + separator_length <= end);
- String::WriteToFlat(separator, sink, 0, separator_length);
+ String::WriteToFlat(separator_raw, sink, 0, separator_length);
sink += separator_length;
+ RUNTIME_ASSERT(fixed_array->get(i)->IsString());
String* element = String::cast(fixed_array->get(i));
int element_length = element->length();
ASSERT(sink + element_length <= end);
@@ -7227,7 +7355,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_StringBuilderJoin) {
// Use %_FastAsciiArrayJoin instead.
ASSERT(!answer->IsOneByteRepresentation());
- return answer;
+ return *answer;
}
template <typename Char>
@@ -7236,6 +7364,7 @@ static void JoinSparseArrayWithSeparator(FixedArray* elements,
uint32_t array_length,
String* separator,
Vector<Char> buffer) {
+ DisallowHeapAllocation no_gc;
int previous_separator_position = 0;
int separator_length = separator->length();
int cursor = 0;
@@ -7271,53 +7400,55 @@ static void JoinSparseArrayWithSeparator(FixedArray* elements,
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_SparseJoinWithSeparator) {
- SealHandleScope shs(isolate);
+RUNTIME_FUNCTION(Runtime_SparseJoinWithSeparator) {
+ HandleScope scope(isolate);
ASSERT(args.length() == 3);
- CONVERT_ARG_CHECKED(JSArray, elements_array, 0);
- RUNTIME_ASSERT(elements_array->HasFastSmiOrObjectElements());
+ CONVERT_ARG_HANDLE_CHECKED(JSArray, elements_array, 0);
CONVERT_NUMBER_CHECKED(uint32_t, array_length, Uint32, args[1]);
- CONVERT_ARG_CHECKED(String, separator, 2);
+ CONVERT_ARG_HANDLE_CHECKED(String, separator, 2);
// elements_array is fast-mode JSarray of alternating positions
// (increasing order) and strings.
+ RUNTIME_ASSERT(elements_array->HasFastSmiOrObjectElements());
// array_length is length of original array (used to add separators);
// separator is string to put between elements. Assumed to be non-empty.
+ RUNTIME_ASSERT(array_length > 0);
// Find total length of join result.
int string_length = 0;
bool is_ascii = separator->IsOneByteRepresentation();
- int max_string_length;
- if (is_ascii) {
- max_string_length = SeqOneByteString::kMaxLength;
- } else {
- max_string_length = SeqTwoByteString::kMaxLength;
- }
bool overflow = false;
- CONVERT_NUMBER_CHECKED(int, elements_length,
- Int32, elements_array->length());
+ CONVERT_NUMBER_CHECKED(int, elements_length, Int32, elements_array->length());
+ RUNTIME_ASSERT(elements_length <= elements_array->elements()->length());
RUNTIME_ASSERT((elements_length & 1) == 0); // Even length.
FixedArray* elements = FixedArray::cast(elements_array->elements());
for (int i = 0; i < elements_length; i += 2) {
RUNTIME_ASSERT(elements->get(i)->IsNumber());
+ CONVERT_NUMBER_CHECKED(uint32_t, position, Uint32, elements->get(i));
+ RUNTIME_ASSERT(position < array_length);
RUNTIME_ASSERT(elements->get(i + 1)->IsString());
- String* string = String::cast(elements->get(i + 1));
- int length = string->length();
- if (is_ascii && !string->IsOneByteRepresentation()) {
- is_ascii = false;
- max_string_length = SeqTwoByteString::kMaxLength;
- }
- if (length > max_string_length ||
- max_string_length - length < string_length) {
- overflow = true;
- break;
+ }
+
+ { DisallowHeapAllocation no_gc;
+ for (int i = 0; i < elements_length; i += 2) {
+ String* string = String::cast(elements->get(i + 1));
+ int length = string->length();
+ if (is_ascii && !string->IsOneByteRepresentation()) {
+ is_ascii = false;
+ }
+ if (length > String::kMaxLength ||
+ String::kMaxLength - length < string_length) {
+ overflow = true;
+ break;
+ }
+ string_length += length;
}
- string_length += length;
}
+
int separator_length = separator->length();
if (!overflow && separator_length > 0) {
if (array_length <= 0x7fffffffu) {
int separator_count = static_cast<int>(array_length) - 1;
- int remaining_length = max_string_length - string_length;
+ int remaining_length = String::kMaxLength - string_length;
if ((remaining_length / separator_length) >= separator_count) {
string_length += separator_length * (array_length - 1);
} else {
@@ -7332,102 +7463,98 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_SparseJoinWithSeparator) {
}
}
if (overflow) {
- // Throw OutOfMemory exception for creating too large a string.
- V8::FatalProcessOutOfMemory("Array join result too large.");
+ // Throw an exception if the resulting string is too large. See
+ // https://code.google.com/p/chromium/issues/detail?id=336820
+ // for details.
+ return isolate->ThrowInvalidStringLength();
}
if (is_ascii) {
- MaybeObject* result_allocation =
- isolate->heap()->AllocateRawOneByteString(string_length);
- if (result_allocation->IsFailure()) return result_allocation;
- SeqOneByteString* result_string =
- SeqOneByteString::cast(result_allocation->ToObjectUnchecked());
- JoinSparseArrayWithSeparator<uint8_t>(elements,
- elements_length,
- array_length,
- separator,
- Vector<uint8_t>(
- result_string->GetChars(),
- string_length));
- return result_string;
+ Handle<SeqOneByteString> result = isolate->factory()->NewRawOneByteString(
+ string_length).ToHandleChecked();
+ JoinSparseArrayWithSeparator<uint8_t>(
+ FixedArray::cast(elements_array->elements()),
+ elements_length,
+ array_length,
+ *separator,
+ Vector<uint8_t>(result->GetChars(), string_length));
+ return *result;
} else {
- MaybeObject* result_allocation =
- isolate->heap()->AllocateRawTwoByteString(string_length);
- if (result_allocation->IsFailure()) return result_allocation;
- SeqTwoByteString* result_string =
- SeqTwoByteString::cast(result_allocation->ToObjectUnchecked());
- JoinSparseArrayWithSeparator<uc16>(elements,
- elements_length,
- array_length,
- separator,
- Vector<uc16>(result_string->GetChars(),
- string_length));
- return result_string;
+ Handle<SeqTwoByteString> result = isolate->factory()->NewRawTwoByteString(
+ string_length).ToHandleChecked();
+ JoinSparseArrayWithSeparator<uc16>(
+ FixedArray::cast(elements_array->elements()),
+ elements_length,
+ array_length,
+ *separator,
+ Vector<uc16>(result->GetChars(), string_length));
+ return *result;
}
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberOr) {
- SealHandleScope shs(isolate);
+RUNTIME_FUNCTION(Runtime_NumberOr) {
+ HandleScope scope(isolate);
ASSERT(args.length() == 2);
CONVERT_NUMBER_CHECKED(int32_t, x, Int32, args[0]);
CONVERT_NUMBER_CHECKED(int32_t, y, Int32, args[1]);
- return isolate->heap()->NumberFromInt32(x | y);
+ return *isolate->factory()->NewNumberFromInt(x | y);
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberAnd) {
- SealHandleScope shs(isolate);
+RUNTIME_FUNCTION(Runtime_NumberAnd) {
+ HandleScope scope(isolate);
ASSERT(args.length() == 2);
CONVERT_NUMBER_CHECKED(int32_t, x, Int32, args[0]);
CONVERT_NUMBER_CHECKED(int32_t, y, Int32, args[1]);
- return isolate->heap()->NumberFromInt32(x & y);
+ return *isolate->factory()->NewNumberFromInt(x & y);
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberXor) {
- SealHandleScope shs(isolate);
+RUNTIME_FUNCTION(Runtime_NumberXor) {
+ HandleScope scope(isolate);
ASSERT(args.length() == 2);
CONVERT_NUMBER_CHECKED(int32_t, x, Int32, args[0]);
CONVERT_NUMBER_CHECKED(int32_t, y, Int32, args[1]);
- return isolate->heap()->NumberFromInt32(x ^ y);
+ return *isolate->factory()->NewNumberFromInt(x ^ y);
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberShl) {
- SealHandleScope shs(isolate);
+RUNTIME_FUNCTION(Runtime_NumberShl) {
+ HandleScope scope(isolate);
ASSERT(args.length() == 2);
CONVERT_NUMBER_CHECKED(int32_t, x, Int32, args[0]);
CONVERT_NUMBER_CHECKED(int32_t, y, Int32, args[1]);
- return isolate->heap()->NumberFromInt32(x << (y & 0x1f));
+ return *isolate->factory()->NewNumberFromInt(x << (y & 0x1f));
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberShr) {
- SealHandleScope shs(isolate);
+RUNTIME_FUNCTION(Runtime_NumberShr) {
+ HandleScope scope(isolate);
ASSERT(args.length() == 2);
CONVERT_NUMBER_CHECKED(uint32_t, x, Uint32, args[0]);
CONVERT_NUMBER_CHECKED(int32_t, y, Int32, args[1]);
- return isolate->heap()->NumberFromUint32(x >> (y & 0x1f));
+ return *isolate->factory()->NewNumberFromUint(x >> (y & 0x1f));
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberSar) {
- SealHandleScope shs(isolate);
+RUNTIME_FUNCTION(Runtime_NumberSar) {
+ HandleScope scope(isolate);
ASSERT(args.length() == 2);
CONVERT_NUMBER_CHECKED(int32_t, x, Int32, args[0]);
CONVERT_NUMBER_CHECKED(int32_t, y, Int32, args[1]);
- return isolate->heap()->NumberFromInt32(ArithmeticShiftRight(x, y & 0x1f));
+ return *isolate->factory()->NewNumberFromInt(
+ ArithmeticShiftRight(x, y & 0x1f));
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberEquals) {
+RUNTIME_FUNCTION(Runtime_NumberEquals) {
SealHandleScope shs(isolate);
ASSERT(args.length() == 2);
@@ -7446,31 +7573,32 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberEquals) {
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_StringEquals) {
- SealHandleScope shs(isolate);
+RUNTIME_FUNCTION(Runtime_StringEquals) {
+ HandleScope handle_scope(isolate);
ASSERT(args.length() == 2);
- CONVERT_ARG_CHECKED(String, x, 0);
- CONVERT_ARG_CHECKED(String, y, 1);
+ CONVERT_ARG_HANDLE_CHECKED(String, x, 0);
+ CONVERT_ARG_HANDLE_CHECKED(String, y, 1);
- bool not_equal = !x->Equals(y);
+ bool not_equal = !String::Equals(x, y);
// This is slightly convoluted because the value that signifies
// equality is 0 and inequality is 1 so we have to negate the result
// from String::Equals.
ASSERT(not_equal == 0 || not_equal == 1);
- STATIC_CHECK(EQUAL == 0);
- STATIC_CHECK(NOT_EQUAL == 1);
+ STATIC_ASSERT(EQUAL == 0);
+ STATIC_ASSERT(NOT_EQUAL == 1);
return Smi::FromInt(not_equal);
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberCompare) {
+RUNTIME_FUNCTION(Runtime_NumberCompare) {
SealHandleScope shs(isolate);
ASSERT(args.length() == 3);
CONVERT_DOUBLE_ARG_CHECKED(x, 0);
CONVERT_DOUBLE_ARG_CHECKED(y, 1);
- if (std::isnan(x) || std::isnan(y)) return args[2];
+ CONVERT_ARG_HANDLE_CHECKED(Object, uncomparable_result, 2)
+ if (std::isnan(x) || std::isnan(y)) return *uncomparable_result;
if (x == y) return Smi::FromInt(EQUAL);
if (isless(x, y)) return Smi::FromInt(LESS);
return Smi::FromInt(GREATER);
@@ -7479,7 +7607,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberCompare) {
// Compare two Smis as if they were converted to strings and then
// compared lexicographically.
-RUNTIME_FUNCTION(MaybeObject*, Runtime_SmiLexicographicCompare) {
+RUNTIME_FUNCTION(Runtime_SmiLexicographicCompare) {
SealHandleScope shs(isolate);
ASSERT(args.length() == 2);
CONVERT_SMI_ARG_CHECKED(x_value, 0);
@@ -7554,27 +7682,33 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_SmiLexicographicCompare) {
}
-static Object* StringCharacterStreamCompare(RuntimeState* state,
- String* x,
- String* y) {
- StringCharacterStream stream_x(x, state->string_iterator_compare_x());
- StringCharacterStream stream_y(y, state->string_iterator_compare_y());
- while (stream_x.HasMore() && stream_y.HasMore()) {
- int d = stream_x.GetNext() - stream_y.GetNext();
- if (d < 0) return Smi::FromInt(LESS);
- else if (d > 0) return Smi::FromInt(GREATER);
+RUNTIME_FUNCTION(RuntimeHidden_StringCompare) {
+ HandleScope handle_scope(isolate);
+ ASSERT(args.length() == 2);
+
+ CONVERT_ARG_HANDLE_CHECKED(String, x, 0);
+ CONVERT_ARG_HANDLE_CHECKED(String, y, 1);
+
+ isolate->counters()->string_compare_runtime()->Increment();
+
+ // A few fast case tests before we flatten.
+ if (x.is_identical_to(y)) return Smi::FromInt(EQUAL);
+ if (y->length() == 0) {
+ if (x->length() == 0) return Smi::FromInt(EQUAL);
+ return Smi::FromInt(GREATER);
+ } else if (x->length() == 0) {
+ return Smi::FromInt(LESS);
}
- // x is (non-trivial) prefix of y:
- if (stream_y.HasMore()) return Smi::FromInt(LESS);
- // y is prefix of x:
- return Smi::FromInt(stream_x.HasMore() ? GREATER : EQUAL);
-}
+ int d = x->Get(0) - y->Get(0);
+ if (d < 0) return Smi::FromInt(LESS);
+ else if (d > 0) return Smi::FromInt(GREATER);
+ // Slow case.
+ x = String::Flatten(x);
+ y = String::Flatten(y);
-static Object* FlatStringCompare(String* x, String* y) {
- ASSERT(x->IsFlat());
- ASSERT(y->IsFlat());
+ DisallowHeapAllocation no_gc;
Object* equal_prefix_result = Smi::FromInt(EQUAL);
int prefix_length = x->length();
if (y->length() < prefix_length) {
@@ -7584,7 +7718,6 @@ static Object* FlatStringCompare(String* x, String* y) {
equal_prefix_result = Smi::FromInt(LESS);
}
int r;
- DisallowHeapAllocation no_gc;
String::FlatContent x_content = x->GetFlatContent();
String::FlatContent y_content = y->GetFlatContent();
if (x_content.IsAscii()) {
@@ -7612,82 +7745,60 @@ static Object* FlatStringCompare(String* x, String* y) {
} else {
result = (r < 0) ? Smi::FromInt(LESS) : Smi::FromInt(GREATER);
}
- ASSERT(result ==
- StringCharacterStreamCompare(x->GetIsolate()->runtime_state(), x, y));
return result;
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_StringCompare) {
- SealHandleScope shs(isolate);
- ASSERT(args.length() == 2);
-
- CONVERT_ARG_CHECKED(String, x, 0);
- CONVERT_ARG_CHECKED(String, y, 1);
-
- isolate->counters()->string_compare_runtime()->Increment();
-
- // A few fast case tests before we flatten.
- if (x == y) return Smi::FromInt(EQUAL);
- if (y->length() == 0) {
- if (x->length() == 0) return Smi::FromInt(EQUAL);
- return Smi::FromInt(GREATER);
- } else if (x->length() == 0) {
- return Smi::FromInt(LESS);
- }
-
- int d = x->Get(0) - y->Get(0);
- if (d < 0) return Smi::FromInt(LESS);
- else if (d > 0) return Smi::FromInt(GREATER);
-
- Object* obj;
- { MaybeObject* maybe_obj = isolate->heap()->PrepareForCompare(x);
- if (!maybe_obj->ToObject(&obj)) return maybe_obj;
- }
- { MaybeObject* maybe_obj = isolate->heap()->PrepareForCompare(y);
- if (!maybe_obj->ToObject(&obj)) return maybe_obj;
- }
-
- return (x->IsFlat() && y->IsFlat()) ? FlatStringCompare(x, y)
- : StringCharacterStreamCompare(isolate->runtime_state(), x, y);
+#define RUNTIME_UNARY_MATH(Name, name) \
+RUNTIME_FUNCTION(Runtime_Math##Name) { \
+ HandleScope scope(isolate); \
+ ASSERT(args.length() == 1); \
+ isolate->counters()->math_##name()->Increment(); \
+ CONVERT_DOUBLE_ARG_CHECKED(x, 0); \
+ return *isolate->factory()->NewHeapNumber(std::name(x)); \
}
+RUNTIME_UNARY_MATH(Acos, acos)
+RUNTIME_UNARY_MATH(Asin, asin)
+RUNTIME_UNARY_MATH(Atan, atan)
+RUNTIME_UNARY_MATH(LogRT, log)
+#undef RUNTIME_UNARY_MATH
-RUNTIME_FUNCTION(MaybeObject*, Runtime_Math_acos) {
- SealHandleScope shs(isolate);
- ASSERT(args.length() == 1);
- isolate->counters()->math_acos()->Increment();
+RUNTIME_FUNCTION(Runtime_DoubleHi) {
+ HandleScope scope(isolate);
+ ASSERT(args.length() == 1);
CONVERT_DOUBLE_ARG_CHECKED(x, 0);
- return isolate->transcendental_cache()->Get(TranscendentalCache::ACOS, x);
+ uint64_t integer = double_to_uint64(x);
+ integer = (integer >> 32) & 0xFFFFFFFFu;
+ return *isolate->factory()->NewNumber(static_cast<int32_t>(integer));
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_Math_asin) {
- SealHandleScope shs(isolate);
+RUNTIME_FUNCTION(Runtime_DoubleLo) {
+ HandleScope scope(isolate);
ASSERT(args.length() == 1);
- isolate->counters()->math_asin()->Increment();
-
CONVERT_DOUBLE_ARG_CHECKED(x, 0);
- return isolate->transcendental_cache()->Get(TranscendentalCache::ASIN, x);
+ return *isolate->factory()->NewNumber(
+ static_cast<int32_t>(double_to_uint64(x) & 0xFFFFFFFFu));
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_Math_atan) {
- SealHandleScope shs(isolate);
- ASSERT(args.length() == 1);
- isolate->counters()->math_atan()->Increment();
-
- CONVERT_DOUBLE_ARG_CHECKED(x, 0);
- return isolate->transcendental_cache()->Get(TranscendentalCache::ATAN, x);
+RUNTIME_FUNCTION(Runtime_ConstructDouble) {
+ HandleScope scope(isolate);
+ ASSERT(args.length() == 2);
+ CONVERT_NUMBER_CHECKED(uint32_t, hi, Uint32, args[0]);
+ CONVERT_NUMBER_CHECKED(uint32_t, lo, Uint32, args[1]);
+ uint64_t result = (static_cast<uint64_t>(hi) << 32) | lo;
+ return *isolate->factory()->NewNumber(uint64_to_double(result));
}
static const double kPiDividedBy4 = 0.78539816339744830962;
-RUNTIME_FUNCTION(MaybeObject*, Runtime_Math_atan2) {
- SealHandleScope shs(isolate);
+RUNTIME_FUNCTION(Runtime_MathAtan2) {
+ HandleScope scope(isolate);
ASSERT(args.length() == 2);
isolate->counters()->math_atan2()->Increment();
@@ -7703,57 +7814,37 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_Math_atan2) {
if (y < 0) multiplier *= 3;
result = multiplier * kPiDividedBy4;
} else {
- result = atan2(x, y);
+ result = std::atan2(x, y);
}
- return isolate->heap()->AllocateHeapNumber(result);
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_Math_cos) {
- SealHandleScope shs(isolate);
- ASSERT(args.length() == 1);
- isolate->counters()->math_cos()->Increment();
-
- CONVERT_DOUBLE_ARG_CHECKED(x, 0);
- return isolate->transcendental_cache()->Get(TranscendentalCache::COS, x);
+ return *isolate->factory()->NewNumber(result);
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_Math_exp) {
- SealHandleScope shs(isolate);
+RUNTIME_FUNCTION(Runtime_MathExpRT) {
+ HandleScope scope(isolate);
ASSERT(args.length() == 1);
isolate->counters()->math_exp()->Increment();
CONVERT_DOUBLE_ARG_CHECKED(x, 0);
lazily_initialize_fast_exp();
- return isolate->heap()->NumberFromDouble(fast_exp(x));
+ return *isolate->factory()->NewNumber(fast_exp(x));
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_Math_floor) {
- SealHandleScope shs(isolate);
+RUNTIME_FUNCTION(Runtime_MathFloorRT) {
+ HandleScope scope(isolate);
ASSERT(args.length() == 1);
isolate->counters()->math_floor()->Increment();
CONVERT_DOUBLE_ARG_CHECKED(x, 0);
- return isolate->heap()->NumberFromDouble(floor(x));
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_Math_log) {
- SealHandleScope shs(isolate);
- ASSERT(args.length() == 1);
- isolate->counters()->math_log()->Increment();
-
- CONVERT_DOUBLE_ARG_CHECKED(x, 0);
- return isolate->transcendental_cache()->Get(TranscendentalCache::LOG, x);
+ return *isolate->factory()->NewNumber(std::floor(x));
}
// Slow version of Math.pow. We check for fast paths for special cases.
-// Used if SSE2/VFP3 is not available.
-RUNTIME_FUNCTION(MaybeObject*, Runtime_Math_pow) {
- SealHandleScope shs(isolate);
+// Used if VFP3 is not available.
+RUNTIME_FUNCTION(RuntimeHidden_MathPowSlow) {
+ HandleScope scope(isolate);
ASSERT(args.length() == 2);
isolate->counters()->math_pow()->Increment();
@@ -7763,20 +7854,20 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_Math_pow) {
// custom powi() function than the generic pow().
if (args[1]->IsSmi()) {
int y = args.smi_at(1);
- return isolate->heap()->NumberFromDouble(power_double_int(x, y));
+ return *isolate->factory()->NewNumber(power_double_int(x, y));
}
CONVERT_DOUBLE_ARG_CHECKED(y, 1);
double result = power_helper(x, y);
if (std::isnan(result)) return isolate->heap()->nan_value();
- return isolate->heap()->AllocateHeapNumber(result);
+ return *isolate->factory()->NewNumber(result);
}
// Fast version of Math.pow if we know that y is not an integer and y is not
// -0.5 or 0.5. Used as slow case from full codegen.
-RUNTIME_FUNCTION(MaybeObject*, Runtime_Math_pow_cfunction) {
- SealHandleScope shs(isolate);
+RUNTIME_FUNCTION(RuntimeHidden_MathPow) {
+ HandleScope scope(isolate);
ASSERT(args.length() == 2);
isolate->counters()->math_pow()->Increment();
@@ -7787,23 +7878,23 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_Math_pow_cfunction) {
} else {
double result = power_double_double(x, y);
if (std::isnan(result)) return isolate->heap()->nan_value();
- return isolate->heap()->AllocateHeapNumber(result);
+ return *isolate->factory()->NewNumber(result);
}
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_RoundNumber) {
- SealHandleScope shs(isolate);
+RUNTIME_FUNCTION(Runtime_RoundNumber) {
+ HandleScope scope(isolate);
ASSERT(args.length() == 1);
+ CONVERT_NUMBER_ARG_HANDLE_CHECKED(input, 0);
isolate->counters()->math_round()->Increment();
- if (!args[0]->IsHeapNumber()) {
- // Must be smi. Return the argument unchanged for all the other types
- // to make fuzz-natives test happy.
- return args[0];
+ if (!input->IsHeapNumber()) {
+ ASSERT(input->IsSmi());
+ return *input;
}
- HeapNumber* number = reinterpret_cast<HeapNumber*>(args[0]);
+ Handle<HeapNumber> number = Handle<HeapNumber>::cast(input);
double value = number->value();
int exponent = number->get_exponent();
@@ -7825,58 +7916,50 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_RoundNumber) {
// If the magnitude is big enough, there's no place for fraction part. If we
// try to add 0.5 to this number, 1.0 will be added instead.
if (exponent >= 52) {
- return number;
+ return *number;
}
if (sign && value >= -0.5) return isolate->heap()->minus_zero_value();
// Do not call NumberFromDouble() to avoid extra checks.
- return isolate->heap()->AllocateHeapNumber(floor(value + 0.5));
+ return *isolate->factory()->NewNumber(std::floor(value + 0.5));
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_Math_sin) {
- SealHandleScope shs(isolate);
- ASSERT(args.length() == 1);
- isolate->counters()->math_sin()->Increment();
-
- CONVERT_DOUBLE_ARG_CHECKED(x, 0);
- return isolate->transcendental_cache()->Get(TranscendentalCache::SIN, x);
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_Math_sqrt) {
- SealHandleScope shs(isolate);
+RUNTIME_FUNCTION(Runtime_MathSqrtRT) {
+ HandleScope scope(isolate);
ASSERT(args.length() == 1);
isolate->counters()->math_sqrt()->Increment();
CONVERT_DOUBLE_ARG_CHECKED(x, 0);
- return isolate->heap()->AllocateHeapNumber(fast_sqrt(x));
+ return *isolate->factory()->NewNumber(fast_sqrt(x));
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_Math_tan) {
- SealHandleScope shs(isolate);
+RUNTIME_FUNCTION(Runtime_MathFround) {
+ HandleScope scope(isolate);
ASSERT(args.length() == 1);
- isolate->counters()->math_tan()->Increment();
CONVERT_DOUBLE_ARG_CHECKED(x, 0);
- return isolate->transcendental_cache()->Get(TranscendentalCache::TAN, x);
+ float xf = static_cast<float>(x);
+ return *isolate->factory()->NewNumber(xf);
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_DateMakeDay) {
+RUNTIME_FUNCTION(Runtime_DateMakeDay) {
SealHandleScope shs(isolate);
ASSERT(args.length() == 2);
CONVERT_SMI_ARG_CHECKED(year, 0);
CONVERT_SMI_ARG_CHECKED(month, 1);
- return Smi::FromInt(isolate->date_cache()->DaysFromYearMonth(year, month));
+ int days = isolate->date_cache()->DaysFromYearMonth(year, month);
+ RUNTIME_ASSERT(Smi::IsValid(days));
+ return Smi::FromInt(days);
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_DateSetValue) {
+RUNTIME_FUNCTION(Runtime_DateSetValue) {
HandleScope scope(isolate);
ASSERT(args.length() == 3);
@@ -7886,40 +7969,38 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DateSetValue) {
DateCache* date_cache = isolate->date_cache();
- Object* value = NULL;
+ Handle<Object> value;;
bool is_value_nan = false;
if (std::isnan(time)) {
- value = isolate->heap()->nan_value();
+ value = isolate->factory()->nan_value();
is_value_nan = true;
} else if (!is_utc &&
(time < -DateCache::kMaxTimeBeforeUTCInMs ||
time > DateCache::kMaxTimeBeforeUTCInMs)) {
- value = isolate->heap()->nan_value();
+ value = isolate->factory()->nan_value();
is_value_nan = true;
} else {
time = is_utc ? time : date_cache->ToUTC(static_cast<int64_t>(time));
if (time < -DateCache::kMaxTimeInMs ||
time > DateCache::kMaxTimeInMs) {
- value = isolate->heap()->nan_value();
+ value = isolate->factory()->nan_value();
is_value_nan = true;
} else {
- MaybeObject* maybe_result =
- isolate->heap()->AllocateHeapNumber(DoubleToInteger(time));
- if (!maybe_result->ToObject(&value)) return maybe_result;
+ value = isolate->factory()->NewNumber(DoubleToInteger(time));
}
}
- date->SetValue(value, is_value_nan);
- return value;
+ date->SetValue(*value, is_value_nan);
+ return *value;
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_NewArgumentsFast) {
+RUNTIME_FUNCTION(RuntimeHidden_NewSloppyArguments) {
HandleScope scope(isolate);
ASSERT(args.length() == 3);
- Handle<JSFunction> callee = args.at<JSFunction>(0);
+ CONVERT_ARG_HANDLE_CHECKED(JSFunction, callee, 0);
Object** parameters = reinterpret_cast<Object**>(args[1]);
- const int argument_count = Smi::cast(args[2])->value();
+ CONVERT_SMI_ARG_CHECKED(argument_count, 2);
Handle<JSObject> result =
isolate->factory()->NewArgumentsObject(callee, argument_count);
@@ -7931,13 +8012,12 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_NewArgumentsFast) {
Handle<FixedArray> parameter_map =
isolate->factory()->NewFixedArray(mapped_count + 2, NOT_TENURED);
parameter_map->set_map(
- isolate->heap()->non_strict_arguments_elements_map());
+ isolate->heap()->sloppy_arguments_elements_map());
- Handle<Map> old_map(result->map());
- Handle<Map> new_map = isolate->factory()->CopyMap(old_map);
- new_map->set_elements_kind(NON_STRICT_ARGUMENTS_ELEMENTS);
+ Handle<Map> map = Map::Copy(handle(result->map()));
+ map->set_elements_kind(SLOPPY_ARGUMENTS_ELEMENTS);
- result->set_map(*new_map);
+ result->set_map(*map);
result->set_elements(*parameter_map);
// Store the context and the arguments array at the beginning of the
@@ -8008,54 +8088,42 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_NewArgumentsFast) {
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_NewStrictArgumentsFast) {
- SealHandleScope shs(isolate);
+RUNTIME_FUNCTION(RuntimeHidden_NewStrictArguments) {
+ HandleScope scope(isolate);
ASSERT(args.length() == 3);
-
- JSFunction* callee = JSFunction::cast(args[0]);
+ CONVERT_ARG_HANDLE_CHECKED(JSFunction, callee, 0)
Object** parameters = reinterpret_cast<Object**>(args[1]);
- const int length = args.smi_at(2);
+ CONVERT_SMI_ARG_CHECKED(length, 2);
- Object* result;
- { MaybeObject* maybe_result =
- isolate->heap()->AllocateArgumentsObject(callee, length);
- if (!maybe_result->ToObject(&result)) return maybe_result;
- }
- // Allocate the elements if needed.
- if (length > 0) {
- // Allocate the fixed array.
- FixedArray* array;
- { MaybeObject* maybe_obj =
- isolate->heap()->AllocateUninitializedFixedArray(length);
- if (!maybe_obj->To(&array)) return maybe_obj;
- }
+ Handle<JSObject> result =
+ isolate->factory()->NewArgumentsObject(callee, length);
+ if (length > 0) {
+ Handle<FixedArray> array =
+ isolate->factory()->NewUninitializedFixedArray(length);
DisallowHeapAllocation no_gc;
WriteBarrierMode mode = array->GetWriteBarrierMode(no_gc);
for (int i = 0; i < length; i++) {
array->set(i, *--parameters, mode);
}
- JSObject::cast(result)->set_elements(array);
+ result->set_elements(*array);
}
- return result;
+ return *result;
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_NewClosureFromStubFailure) {
+RUNTIME_FUNCTION(RuntimeHidden_NewClosureFromStubFailure) {
HandleScope scope(isolate);
ASSERT(args.length() == 1);
CONVERT_ARG_HANDLE_CHECKED(SharedFunctionInfo, shared, 0);
Handle<Context> context(isolate->context());
PretenureFlag pretenure_flag = NOT_TENURED;
- Handle<JSFunction> result =
- isolate->factory()->NewFunctionFromSharedFunctionInfo(shared,
- context,
- pretenure_flag);
- return *result;
+ return *isolate->factory()->NewFunctionFromSharedFunctionInfo(
+ shared, context, pretenure_flag);
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_NewClosure) {
+RUNTIME_FUNCTION(RuntimeHidden_NewClosure) {
HandleScope scope(isolate);
ASSERT(args.length() == 3);
CONVERT_ARG_HANDLE_CHECKED(Context, context, 0);
@@ -8065,11 +8133,8 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_NewClosure) {
// The caller ensures that we pretenure closures that are assigned
// directly to properties.
PretenureFlag pretenure_flag = pretenure ? TENURED : NOT_TENURED;
- Handle<JSFunction> result =
- isolate->factory()->NewFunctionFromSharedFunctionInfo(shared,
- context,
- pretenure_flag);
- return *result;
+ return *isolate->factory()->NewFunctionFromSharedFunctionInfo(
+ shared, context, pretenure_flag);
}
@@ -8088,23 +8153,22 @@ static SmartArrayPointer<Handle<Object> > GetCallerArguments(
if (functions.length() > 1) {
int inlined_jsframe_index = functions.length() - 1;
JSFunction* inlined_function = functions[inlined_jsframe_index];
- Vector<SlotRef> args_slots =
- SlotRef::ComputeSlotMappingForArguments(
- frame,
- inlined_jsframe_index,
- inlined_function->shared()->formal_parameter_count());
+ SlotRefValueBuilder slot_refs(
+ frame,
+ inlined_jsframe_index,
+ inlined_function->shared()->formal_parameter_count());
- int args_count = args_slots.length();
+ int args_count = slot_refs.args_length();
*total_argc = prefix_argc + args_count;
SmartArrayPointer<Handle<Object> > param_data(
NewArray<Handle<Object> >(*total_argc));
+ slot_refs.Prepare(isolate);
for (int i = 0; i < args_count; i++) {
- Handle<Object> val = args_slots[i].GetValue(isolate);
+ Handle<Object> val = slot_refs.GetNext(isolate, 0);
param_data[prefix_argc + i] = val;
}
-
- args_slots.Dispose();
+ slot_refs.Finish(isolate);
return param_data;
} else {
@@ -8124,12 +8188,13 @@ static SmartArrayPointer<Handle<Object> > GetCallerArguments(
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_FunctionBindArguments) {
+RUNTIME_FUNCTION(Runtime_FunctionBindArguments) {
HandleScope scope(isolate);
ASSERT(args.length() == 4);
CONVERT_ARG_HANDLE_CHECKED(JSFunction, bound_function, 0);
- RUNTIME_ASSERT(args[3]->IsNumber());
- Handle<Object> bindee = args.at<Object>(1);
+ CONVERT_ARG_HANDLE_CHECKED(Object, bindee, 1);
+ CONVERT_ARG_HANDLE_CHECKED(Object, this_object, 2);
+ CONVERT_NUMBER_ARG_HANDLE_CHECKED(new_length, 3);
// TODO(lrn): Create bound function in C++ code from premade shared info.
bound_function->shared()->set_bound(true);
@@ -8139,10 +8204,10 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_FunctionBindArguments) {
GetCallerArguments(isolate, 0, &argc);
// Don't count the this-arg.
if (argc > 0) {
- ASSERT(*arguments[0] == args[2]);
+ RUNTIME_ASSERT(arguments[0].is_identical_to(this_object));
argc--;
} else {
- ASSERT(args[2]->IsUndefined());
+ RUNTIME_ASSERT(this_object->IsUndefined());
}
// Initialize array of bindings (function, this, and any existing arguments
// if the function was already bound).
@@ -8151,6 +8216,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_FunctionBindArguments) {
if (bindee->IsJSFunction() && JSFunction::cast(*bindee)->shared()->bound()) {
Handle<FixedArray> old_bindings(
JSFunction::cast(*bindee)->function_bindings());
+ RUNTIME_ASSERT(old_bindings->length() > JSFunction::kBoundFunctionIndex);
new_bindings =
isolate->factory()->NewFixedArray(old_bindings->length() + argc);
bindee = Handle<Object>(old_bindings->get(JSFunction::kBoundFunctionIndex),
@@ -8163,7 +8229,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_FunctionBindArguments) {
int array_size = JSFunction::kBoundArgumentsStartIndex + argc;
new_bindings = isolate->factory()->NewFixedArray(array_size);
new_bindings->set(JSFunction::kBoundFunctionIndex, *bindee);
- new_bindings->set(JSFunction::kBoundThisIndex, args[2]);
+ new_bindings->set(JSFunction::kBoundThisIndex, *this_object);
i = 2;
}
// Copy arguments, skipping the first which is "this_arg".
@@ -8174,17 +8240,24 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_FunctionBindArguments) {
isolate->heap()->fixed_cow_array_map());
bound_function->set_function_bindings(*new_bindings);
- // Update length.
+ // Update length. Have to remove the prototype first so that map migration
+ // is happy about the number of fields.
+ RUNTIME_ASSERT(bound_function->RemovePrototype());
+ Handle<Map> bound_function_map(
+ isolate->native_context()->bound_function_map());
+ JSObject::MigrateToMap(bound_function, bound_function_map);
Handle<String> length_string = isolate->factory()->length_string();
- Handle<Object> new_length(args.at<Object>(3));
PropertyAttributes attr =
static_cast<PropertyAttributes>(DONT_DELETE | DONT_ENUM | READ_ONLY);
- ForceSetProperty(bound_function, length_string, new_length, attr);
+ RETURN_FAILURE_ON_EXCEPTION(
+ isolate,
+ JSObject::SetOwnPropertyIgnoreAttributes(bound_function, length_string,
+ new_length, attr));
return *bound_function;
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_BoundFunctionGetBindings) {
+RUNTIME_FUNCTION(Runtime_BoundFunctionGetBindings) {
HandleScope handles(isolate);
ASSERT(args.length() == 1);
CONVERT_ARG_HANDLE_CHECKED(JSReceiver, callable, 0);
@@ -8192,7 +8265,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_BoundFunctionGetBindings) {
Handle<JSFunction> function = Handle<JSFunction>::cast(callable);
if (function->shared()->bound()) {
Handle<FixedArray> bindings(function->function_bindings());
- ASSERT(bindings->map() == isolate->heap()->fixed_cow_array_map());
+ RUNTIME_ASSERT(bindings->map() == isolate->heap()->fixed_cow_array_map());
return *isolate->factory()->NewJSArrayWithElements(bindings);
}
}
@@ -8200,7 +8273,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_BoundFunctionGetBindings) {
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_NewObjectFromBound) {
+RUNTIME_FUNCTION(Runtime_NewObjectFromBound) {
HandleScope scope(isolate);
ASSERT(args.length() == 1);
// First argument is a function to use as a constructor.
@@ -8227,32 +8300,24 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_NewObjectFromBound) {
}
if (!bound_function->IsJSFunction()) {
- bool exception_thrown;
- bound_function = Execution::TryGetConstructorDelegate(isolate,
- bound_function,
- &exception_thrown);
- if (exception_thrown) return Failure::Exception();
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, bound_function,
+ Execution::TryGetConstructorDelegate(isolate, bound_function));
}
ASSERT(bound_function->IsJSFunction());
- bool exception = false;
- Handle<Object> result =
+ Handle<Object> result;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, result,
Execution::New(Handle<JSFunction>::cast(bound_function),
- total_argc, *param_data, &exception);
- if (exception) {
- return Failure::Exception();
- }
- ASSERT(!result.is_null());
+ total_argc, param_data.get()));
return *result;
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_NewObject) {
- HandleScope scope(isolate);
- ASSERT(args.length() == 1);
-
- Handle<Object> constructor = args.at<Object>(0);
-
+static Object* Runtime_NewObjectHelper(Isolate* isolate,
+ Handle<Object> constructor,
+ Handle<AllocationSite> site) {
// If the constructor isn't a proper function we throw a type error.
if (!constructor->IsJSFunction()) {
Vector< Handle<Object> > arguments = HandleVector(&constructor, 1);
@@ -8272,13 +8337,11 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_NewObject) {
return isolate->Throw(*type_error);
}
-#ifdef ENABLE_DEBUGGER_SUPPORT
Debug* debug = isolate->debug();
// Handle stepping into constructors if step into is active.
if (debug->StepInActive()) {
debug->HandleStepIn(function, Handle<Object>::null(), 0, true);
}
-#endif
if (function->has_initial_map()) {
if (function->initial_map()->instance_type() == JS_FUNCTION_TYPE) {
@@ -8299,20 +8362,15 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_NewObject) {
// The function should be compiled for the optimization hints to be
// available.
- JSFunction::EnsureCompiled(function, CLEAR_EXCEPTION);
+ Compiler::EnsureCompiled(function, CLEAR_EXCEPTION);
- Handle<SharedFunctionInfo> shared(function->shared(), isolate);
- if (!function->has_initial_map() &&
- shared->IsInobjectSlackTrackingInProgress()) {
- // The tracking is already in progress for another function. We can only
- // track one initial_map at a time, so we force the completion before the
- // function is called as a constructor for the first time.
- shared->CompleteInobjectSlackTracking();
+ Handle<JSObject> result;
+ if (site.is_null()) {
+ result = isolate->factory()->NewJSObject(function);
+ } else {
+ result = isolate->factory()->NewJSObjectWithMemento(function, site);
}
- Handle<JSObject> result = isolate->factory()->NewJSObject(function);
- RETURN_IF_EMPTY_HANDLE(isolate, result);
-
isolate->counters()->constructed_objects()->Increment();
isolate->counters()->constructed_objects_runtime()->Increment();
@@ -8320,53 +8378,88 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_NewObject) {
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_FinalizeInstanceSize) {
+RUNTIME_FUNCTION(RuntimeHidden_NewObject) {
+ HandleScope scope(isolate);
+ ASSERT(args.length() == 1);
+ CONVERT_ARG_HANDLE_CHECKED(Object, constructor, 0);
+ return Runtime_NewObjectHelper(isolate,
+ constructor,
+ Handle<AllocationSite>::null());
+}
+
+
+RUNTIME_FUNCTION(RuntimeHidden_NewObjectWithAllocationSite) {
+ HandleScope scope(isolate);
+ ASSERT(args.length() == 2);
+ CONVERT_ARG_HANDLE_CHECKED(Object, constructor, 1);
+ CONVERT_ARG_HANDLE_CHECKED(Object, feedback, 0);
+ Handle<AllocationSite> site;
+ if (feedback->IsAllocationSite()) {
+ // The feedback can be an AllocationSite or undefined.
+ site = Handle<AllocationSite>::cast(feedback);
+ }
+ return Runtime_NewObjectHelper(isolate, constructor, site);
+}
+
+
+RUNTIME_FUNCTION(RuntimeHidden_FinalizeInstanceSize) {
HandleScope scope(isolate);
ASSERT(args.length() == 1);
CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 0);
- function->shared()->CompleteInobjectSlackTracking();
+ function->CompleteInobjectSlackTracking();
return isolate->heap()->undefined_value();
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_LazyCompile) {
+RUNTIME_FUNCTION(RuntimeHidden_CompileUnoptimized) {
HandleScope scope(isolate);
ASSERT(args.length() == 1);
-
- Handle<JSFunction> function = args.at<JSFunction>(0);
+ CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 0);
#ifdef DEBUG
if (FLAG_trace_lazy && !function->shared()->is_compiled()) {
- PrintF("[lazy: ");
+ PrintF("[unoptimized: ");
function->PrintName();
PrintF("]\n");
}
#endif
// Compile the target function.
- ASSERT(!function->is_compiled());
- if (!JSFunction::CompileLazy(function, KEEP_EXCEPTION)) {
- return Failure::Exception();
- }
+ ASSERT(function->shared()->allows_lazy_compilation());
+
+ Handle<Code> code;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, code,
+ Compiler::GetUnoptimizedCode(function));
+ function->ReplaceCode(*code);
// All done. Return the compiled code.
ASSERT(function->is_compiled());
- return function->code();
+ ASSERT(function->code()->kind() == Code::FUNCTION ||
+ (FLAG_always_opt &&
+ function->code()->kind() == Code::OPTIMIZED_FUNCTION));
+ return *code;
}
-bool AllowOptimization(Isolate* isolate, Handle<JSFunction> function) {
- // If the function is not compiled ignore the lazy
- // recompilation. This can happen if the debugger is activated and
- // the function is returned to the not compiled state.
- if (!function->shared()->is_compiled()) return false;
-
- // If the function is not optimizable or debugger is active continue using the
- // code from the full compiler.
- if (!isolate->use_crankshaft() ||
- function->shared()->optimization_disabled() ||
- isolate->DebuggerHasBreakPoints()) {
+RUNTIME_FUNCTION(RuntimeHidden_CompileOptimized) {
+ HandleScope scope(isolate);
+ ASSERT(args.length() == 2);
+ CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 0);
+ CONVERT_BOOLEAN_ARG_CHECKED(concurrent, 1);
+
+ Handle<Code> unoptimized(function->shared()->code());
+ if (!function->shared()->is_compiled()) {
+ // If the function is not compiled, do not optimize.
+ // This can happen if the debugger is activated and
+ // the function is returned to the not compiled state.
+ // TODO(yangguo): reconsider this.
+ function->ReplaceCode(function->shared()->code());
+ } else if (!isolate->use_crankshaft() ||
+ function->shared()->optimization_disabled() ||
+ isolate->DebuggerHasBreakPoints()) {
+ // If the function is not optimizable or debugger is active continue
+ // using the code from the full compiler.
if (FLAG_trace_opt) {
PrintF("[failed to optimize ");
function->PrintName();
@@ -8374,52 +8467,26 @@ bool AllowOptimization(Isolate* isolate, Handle<JSFunction> function) {
function->shared()->optimization_disabled() ? "F" : "T",
isolate->DebuggerHasBreakPoints() ? "T" : "F");
}
- return false;
+ function->ReplaceCode(*unoptimized);
+ } else {
+ Compiler::ConcurrencyMode mode = concurrent ? Compiler::CONCURRENT
+ : Compiler::NOT_CONCURRENT;
+ Handle<Code> code;
+ if (Compiler::GetOptimizedCode(
+ function, unoptimized, mode).ToHandle(&code)) {
+ function->ReplaceCode(*code);
+ } else {
+ function->ReplaceCode(*unoptimized);
+ }
}
- return true;
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_LazyRecompile) {
- HandleScope scope(isolate);
- ASSERT(args.length() == 1);
- Handle<JSFunction> function = args.at<JSFunction>(0);
- if (!AllowOptimization(isolate, function)) {
- function->ReplaceCode(function->shared()->code());
- return function->code();
- }
- function->shared()->code()->set_profiler_ticks(0);
- if (JSFunction::CompileOptimized(function, CLEAR_EXCEPTION)) {
- return function->code();
- }
- if (FLAG_trace_opt) {
- PrintF("[failed to optimize ");
- function->PrintName();
- PrintF(": optimized compilation failed]\n");
- }
- function->ReplaceCode(function->shared()->code());
+ ASSERT(function->code()->kind() == Code::FUNCTION ||
+ function->code()->kind() == Code::OPTIMIZED_FUNCTION ||
+ function->IsInOptimizationQueue());
return function->code();
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_ConcurrentRecompile) {
- HandleScope handle_scope(isolate);
- ASSERT(args.length() == 1);
- CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 0);
- if (!AllowOptimization(isolate, function)) {
- function->ReplaceCode(function->shared()->code());
- return isolate->heap()->undefined_value();
- }
- function->shared()->code()->set_profiler_ticks(0);
- ASSERT(isolate->concurrent_recompilation_enabled());
- if (!Compiler::RecompileConcurrent(function)) {
- function->ReplaceCode(function->shared()->code());
- }
- return isolate->heap()->undefined_value();
-}
-
-
class ActivationsFinder : public ThreadVisitor {
public:
Code* code_;
@@ -8443,7 +8510,7 @@ class ActivationsFinder : public ThreadVisitor {
};
-RUNTIME_FUNCTION(MaybeObject*, Runtime_NotifyStubFailure) {
+RUNTIME_FUNCTION(RuntimeHidden_NotifyStubFailure) {
HandleScope scope(isolate);
ASSERT(args.length() == 0);
Deoptimizer* deoptimizer = Deoptimizer::Grab(isolate);
@@ -8453,12 +8520,12 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_NotifyStubFailure) {
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_NotifyDeoptimized) {
+RUNTIME_FUNCTION(RuntimeHidden_NotifyDeoptimized) {
HandleScope scope(isolate);
ASSERT(args.length() == 1);
- RUNTIME_ASSERT(args[0]->IsSmi());
+ CONVERT_SMI_ARG_CHECKED(type_arg, 0);
Deoptimizer::BailoutType type =
- static_cast<Deoptimizer::BailoutType>(args.smi_at(0));
+ static_cast<Deoptimizer::BailoutType>(type_arg);
Deoptimizer* deoptimizer = Deoptimizer::Grab(isolate);
ASSERT(AllowHeapAllocation::IsAllowed());
@@ -8496,6 +8563,10 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_NotifyDeoptimized) {
PrintF("]\n");
}
function->ReplaceCode(function->shared()->code());
+ // Evict optimized code for this function from the cache so that it
+ // doesn't get used for new closures.
+ function->shared()->EvictFromOptimizedCodeMap(*optimized_code,
+ "notify deoptimized");
}
} else {
// TODO(titzer): we should probably do DeoptimizeCodeList(code)
@@ -8503,16 +8574,12 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_NotifyDeoptimized) {
// If there is an index by shared function info, all the better.
Deoptimizer::DeoptimizeFunction(*function);
}
- // Evict optimized code for this function from the cache so that it doesn't
- // get used for new closures.
- function->shared()->EvictFromOptimizedCodeMap(*optimized_code,
- "notify deoptimized");
return isolate->heap()->undefined_value();
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_DeoptimizeFunction) {
+RUNTIME_FUNCTION(Runtime_DeoptimizeFunction) {
HandleScope scope(isolate);
ASSERT(args.length() == 1);
CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 0);
@@ -8524,21 +8591,22 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DeoptimizeFunction) {
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_ClearFunctionTypeFeedback) {
+RUNTIME_FUNCTION(Runtime_ClearFunctionTypeFeedback) {
HandleScope scope(isolate);
ASSERT(args.length() == 1);
CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 0);
+ function->shared()->ClearTypeFeedbackInfo();
Code* unoptimized = function->shared()->code();
if (unoptimized->kind() == Code::FUNCTION) {
unoptimized->ClearInlineCaches();
- unoptimized->ClearTypeFeedbackCells(isolate->heap());
}
return isolate->heap()->undefined_value();
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_RunningInSimulator) {
+RUNTIME_FUNCTION(Runtime_RunningInSimulator) {
SealHandleScope shs(isolate);
+ ASSERT(args.length() == 0);
#if defined(USE_SIMULATOR)
return isolate->heap()->true_value();
#else
@@ -8547,20 +8615,26 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_RunningInSimulator) {
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_IsConcurrentRecompilationSupported) {
- HandleScope scope(isolate);
- return isolate->concurrent_recompilation_enabled()
- ? isolate->heap()->true_value() : isolate->heap()->false_value();
+RUNTIME_FUNCTION(Runtime_IsConcurrentRecompilationSupported) {
+ SealHandleScope shs(isolate);
+ ASSERT(args.length() == 0);
+ return isolate->heap()->ToBoolean(
+ isolate->concurrent_recompilation_enabled());
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_OptimizeFunctionOnNextCall) {
+RUNTIME_FUNCTION(Runtime_OptimizeFunctionOnNextCall) {
HandleScope scope(isolate);
RUNTIME_ASSERT(args.length() == 1 || args.length() == 2);
CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 0);
- if (!function->IsOptimizable()) return isolate->heap()->undefined_value();
- function->MarkForLazyRecompilation();
+ if (!function->IsOptimizable() &&
+ !function->IsMarkedForConcurrentOptimization() &&
+ !function->IsInOptimizationQueue()) {
+ return isolate->heap()->undefined_value();
+ }
+
+ function->MarkForOptimization();
Code* unoptimized = function->shared()->code();
if (args.length() == 2 &&
@@ -8570,12 +8644,15 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_OptimizeFunctionOnNextCall) {
// Start patching from the currently patched loop nesting level.
int current_level = unoptimized->allow_osr_at_loop_nesting_level();
ASSERT(BackEdgeTable::Verify(isolate, unoptimized, current_level));
- for (int i = current_level + 1; i <= Code::kMaxLoopNestingMarker; i++) {
- unoptimized->set_allow_osr_at_loop_nesting_level(i);
- isolate->runtime_profiler()->AttemptOnStackReplacement(*function);
+ if (FLAG_use_osr) {
+ for (int i = current_level + 1; i <= Code::kMaxLoopNestingMarker; i++) {
+ unoptimized->set_allow_osr_at_loop_nesting_level(i);
+ isolate->runtime_profiler()->AttemptOnStackReplacement(*function);
+ }
}
- } else if (type->IsOneByteEqualTo(STATIC_ASCII_VECTOR("concurrent"))) {
- function->MarkForConcurrentRecompilation();
+ } else if (type->IsOneByteEqualTo(STATIC_ASCII_VECTOR("concurrent")) &&
+ isolate->concurrent_recompilation_enabled()) {
+ function->MarkForConcurrentOptimization();
}
}
@@ -8583,17 +8660,16 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_OptimizeFunctionOnNextCall) {
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_NeverOptimizeFunction) {
+RUNTIME_FUNCTION(Runtime_NeverOptimizeFunction) {
HandleScope scope(isolate);
ASSERT(args.length() == 1);
CONVERT_ARG_CHECKED(JSFunction, function, 0);
- ASSERT(!function->IsOptimized());
function->shared()->set_optimization_disabled(true);
return isolate->heap()->undefined_value();
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_GetOptimizationStatus) {
+RUNTIME_FUNCTION(Runtime_GetOptimizationStatus) {
HandleScope scope(isolate);
RUNTIME_ASSERT(args.length() == 1 || args.length() == 2);
if (!isolate->use_crankshaft()) {
@@ -8609,7 +8685,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_GetOptimizationStatus) {
CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 0);
if (isolate->concurrent_recompilation_enabled() &&
sync_with_compiler_thread) {
- while (function->IsInRecompileQueue()) {
+ while (function->IsInOptimizationQueue()) {
isolate->optimizing_compiler_thread()->InstallOptimizedFunctions();
OS::Sleep(50);
}
@@ -8628,14 +8704,16 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_GetOptimizationStatus) {
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_UnblockConcurrentRecompilation) {
+RUNTIME_FUNCTION(Runtime_UnblockConcurrentRecompilation) {
+ ASSERT(args.length() == 0);
RUNTIME_ASSERT(FLAG_block_concurrent_recompilation);
+ RUNTIME_ASSERT(isolate->concurrent_recompilation_enabled());
isolate->optimizing_compiler_thread()->Unblock();
return isolate->heap()->undefined_value();
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_GetOptimizationCount) {
+RUNTIME_FUNCTION(Runtime_GetOptimizationCount) {
HandleScope scope(isolate);
ASSERT(args.length() == 1);
CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 0);
@@ -8645,9 +8723,9 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_GetOptimizationCount) {
static bool IsSuitableForOnStackReplacement(Isolate* isolate,
Handle<JSFunction> function,
- Handle<Code> unoptimized) {
+ Handle<Code> current_code) {
// Keep track of whether we've succeeded in optimizing.
- if (!isolate->use_crankshaft() || !unoptimized->optimizable()) return false;
+ if (!isolate->use_crankshaft() || !current_code->optimizable()) return false;
// If we are trying to do OSR when there are already optimized
// activations of the function, it means (a) the function is directly or
// indirectly recursive and (b) an optimized invocation has been
@@ -8662,76 +8740,88 @@ static bool IsSuitableForOnStackReplacement(Isolate* isolate,
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_CompileForOnStackReplacement) {
+RUNTIME_FUNCTION(Runtime_CompileForOnStackReplacement) {
HandleScope scope(isolate);
- ASSERT(args.length() == 2);
+ ASSERT(args.length() == 1);
CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 0);
- CONVERT_NUMBER_CHECKED(uint32_t, pc_offset, Uint32, args[1]);
- Handle<Code> unoptimized(function->shared()->code(), isolate);
+ Handle<Code> caller_code(function->shared()->code());
-#ifdef DEBUG
+ // We're not prepared to handle a function with arguments object.
+ ASSERT(!function->shared()->uses_arguments());
+
+ RUNTIME_ASSERT(FLAG_use_osr);
+
+ // Passing the PC in the javascript frame from the caller directly is
+ // not GC safe, so we walk the stack to get it.
JavaScriptFrameIterator it(isolate);
JavaScriptFrame* frame = it.frame();
- ASSERT_EQ(frame->function(), *function);
- ASSERT_EQ(frame->LookupCode(), *unoptimized);
- ASSERT(unoptimized->contains(frame->pc()));
+ if (!caller_code->contains(frame->pc())) {
+ // Code on the stack may not be the code object referenced by the shared
+ // function info. It may have been replaced to include deoptimization data.
+ caller_code = Handle<Code>(frame->LookupCode());
+ }
+
+ uint32_t pc_offset = static_cast<uint32_t>(
+ frame->pc() - caller_code->instruction_start());
- ASSERT(pc_offset ==
- static_cast<uint32_t>(frame->pc() - unoptimized->instruction_start()));
+#ifdef DEBUG
+ ASSERT_EQ(frame->function(), *function);
+ ASSERT_EQ(frame->LookupCode(), *caller_code);
+ ASSERT(caller_code->contains(frame->pc()));
#endif // DEBUG
- // We're not prepared to handle a function with arguments object.
- ASSERT(!function->shared()->uses_arguments());
+ BailoutId ast_id = caller_code->TranslatePcOffsetToAstId(pc_offset);
+ ASSERT(!ast_id.IsNone());
+
+ Compiler::ConcurrencyMode mode =
+ isolate->concurrent_osr_enabled() &&
+ (function->shared()->ast_node_count() > 512) ? Compiler::CONCURRENT
+ : Compiler::NOT_CONCURRENT;
Handle<Code> result = Handle<Code>::null();
- BailoutId ast_id = BailoutId::None();
- if (isolate->concurrent_osr_enabled()) {
- if (isolate->optimizing_compiler_thread()->
- IsQueuedForOSR(function, pc_offset)) {
- // Still waiting for the optimizing compiler thread to finish. Carry on.
+ OptimizedCompileJob* job = NULL;
+ if (mode == Compiler::CONCURRENT) {
+ // Gate the OSR entry with a stack check.
+ BackEdgeTable::AddStackCheck(caller_code, pc_offset);
+ // Poll already queued compilation jobs.
+ OptimizingCompilerThread* thread = isolate->optimizing_compiler_thread();
+ if (thread->IsQueuedForOSR(function, ast_id)) {
if (FLAG_trace_osr) {
- PrintF("[COSR - polling recompile tasks for ");
+ PrintF("[OSR - Still waiting for queued: ");
function->PrintName();
- PrintF("]\n");
+ PrintF(" at AST id %d]\n", ast_id.ToInt());
}
return NULL;
}
- RecompileJob* job = isolate->optimizing_compiler_thread()->
- FindReadyOSRCandidate(function, pc_offset);
+ job = thread->FindReadyOSRCandidate(function, ast_id);
+ }
- if (job == NULL) {
- if (IsSuitableForOnStackReplacement(isolate, function, unoptimized) &&
- Compiler::RecompileConcurrent(function, pc_offset)) {
- if (function->IsMarkedForLazyRecompilation() ||
- function->IsMarkedForConcurrentRecompilation()) {
- // Prevent regular recompilation if we queue this for OSR.
- // TODO(yangguo): remove this as soon as OSR becomes one-shot.
- function->ReplaceCode(*unoptimized);
- }
- return NULL;
- }
- // Fall through to the end in case of failure.
- } else {
- // TODO(titzer): don't install the OSR code into the function.
- ast_id = job->info()->osr_ast_id();
- result = Compiler::InstallOptimizedCode(job);
+ if (job != NULL) {
+ if (FLAG_trace_osr) {
+ PrintF("[OSR - Found ready: ");
+ function->PrintName();
+ PrintF(" at AST id %d]\n", ast_id.ToInt());
}
- } else if (IsSuitableForOnStackReplacement(isolate, function, unoptimized)) {
- ast_id = unoptimized->TranslatePcOffsetToAstId(pc_offset);
- ASSERT(!ast_id.IsNone());
+ result = Compiler::GetConcurrentlyOptimizedCode(job);
+ } else if (IsSuitableForOnStackReplacement(isolate, function, caller_code)) {
if (FLAG_trace_osr) {
- PrintF("[OSR - replacing at AST id %d in ", ast_id.ToInt());
+ PrintF("[OSR - Compiling: ");
function->PrintName();
- PrintF("]\n");
+ PrintF(" at AST id %d]\n", ast_id.ToInt());
+ }
+ MaybeHandle<Code> maybe_result = Compiler::GetOptimizedCode(
+ function, caller_code, mode, ast_id);
+ if (maybe_result.ToHandle(&result) &&
+ result.is_identical_to(isolate->builtins()->InOptimizationQueue())) {
+ // Optimization is queued. Return to check later.
+ return NULL;
}
- // Attempt OSR compilation.
- result = JSFunction::CompileOsr(function, ast_id, CLEAR_EXCEPTION);
}
// Revert the patched back edge table, regardless of whether OSR succeeds.
- BackEdgeTable::Revert(isolate, *unoptimized);
+ BackEdgeTable::Revert(isolate, *caller_code);
// Check whether we ended up with usable optimized code.
if (!result.is_null() && result->kind() == Code::OPTIMIZED_FUNCTION) {
@@ -8741,58 +8831,72 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_CompileForOnStackReplacement) {
if (data->OsrPcOffset()->value() >= 0) {
ASSERT(BailoutId(data->OsrAstId()->value()) == ast_id);
if (FLAG_trace_osr) {
- PrintF("[OSR - entry at AST id %d, offset %d in optimized code]\n",
+ PrintF("[OSR - Entry at AST id %d, offset %d in optimized code]\n",
ast_id.ToInt(), data->OsrPcOffset()->value());
}
// TODO(titzer): this is a massive hack to make the deopt counts
// match. Fix heuristics for reenabling optimizations!
function->shared()->increment_deopt_count();
+
+ // TODO(titzer): Do not install code into the function.
+ function->ReplaceCode(*result);
return *result;
}
}
+ // Failed.
if (FLAG_trace_osr) {
- PrintF("[OSR - optimization failed for ");
+ PrintF("[OSR - Failed: ");
function->PrintName();
- PrintF("]\n");
+ PrintF(" at AST id %d]\n", ast_id.ToInt());
}
- if (function->IsMarkedForLazyRecompilation() ||
- function->IsMarkedForConcurrentRecompilation()) {
+ if (!function->IsOptimized()) {
function->ReplaceCode(function->shared()->code());
}
return NULL;
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_SetAllocationTimeout) {
+RUNTIME_FUNCTION(Runtime_SetAllocationTimeout) {
SealHandleScope shs(isolate);
- ASSERT(args.length() == 2);
+ ASSERT(args.length() == 2 || args.length() == 3);
#ifdef DEBUG
CONVERT_SMI_ARG_CHECKED(interval, 0);
CONVERT_SMI_ARG_CHECKED(timeout, 1);
isolate->heap()->set_allocation_timeout(timeout);
FLAG_gc_interval = interval;
+ if (args.length() == 3) {
+ // Enable/disable inline allocation if requested.
+ CONVERT_BOOLEAN_ARG_CHECKED(inline_allocation, 2);
+ if (inline_allocation) {
+ isolate->heap()->EnableInlineAllocation();
+ } else {
+ isolate->heap()->DisableInlineAllocation();
+ }
+ }
#endif
return isolate->heap()->undefined_value();
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_CheckIsBootstrapping) {
+RUNTIME_FUNCTION(Runtime_CheckIsBootstrapping) {
SealHandleScope shs(isolate);
+ ASSERT(args.length() == 0);
RUNTIME_ASSERT(isolate->bootstrapper()->IsActive());
return isolate->heap()->undefined_value();
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_GetRootNaN) {
+RUNTIME_FUNCTION(Runtime_GetRootNaN) {
SealHandleScope shs(isolate);
+ ASSERT(args.length() == 0);
RUNTIME_ASSERT(isolate->bootstrapper()->IsActive());
return isolate->heap()->nan_value();
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_Call) {
+RUNTIME_FUNCTION(Runtime_Call) {
HandleScope scope(isolate);
ASSERT(args.length() >= 2);
int argc = args.length() - 2;
@@ -8811,33 +8915,32 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_Call) {
}
for (int i = 0; i < argc; ++i) {
- MaybeObject* maybe = args[1 + i];
- Object* object;
- if (!maybe->To<Object>(&object)) return maybe;
- argv[i] = Handle<Object>(object, isolate);
+ argv[i] = Handle<Object>(args[1 + i], isolate);
}
- bool threw;
Handle<JSReceiver> hfun(fun);
Handle<Object> hreceiver(receiver, isolate);
- Handle<Object> result = Execution::Call(
- isolate, hfun, hreceiver, argc, argv, &threw, true);
-
- if (threw) return Failure::Exception();
+ Handle<Object> result;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, result,
+ Execution::Call(isolate, hfun, hreceiver, argc, argv, true));
return *result;
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_Apply) {
+RUNTIME_FUNCTION(Runtime_Apply) {
HandleScope scope(isolate);
ASSERT(args.length() == 5);
CONVERT_ARG_HANDLE_CHECKED(JSReceiver, fun, 0);
- Handle<Object> receiver = args.at<Object>(1);
+ CONVERT_ARG_HANDLE_CHECKED(Object, receiver, 1);
CONVERT_ARG_HANDLE_CHECKED(JSObject, arguments, 2);
CONVERT_SMI_ARG_CHECKED(offset, 3);
CONVERT_SMI_ARG_CHECKED(argc, 4);
RUNTIME_ASSERT(offset >= 0);
- RUNTIME_ASSERT(argc >= 0);
+ // Loose upper bound to allow fuzzing. We'll most likely run out of
+ // stack space before hitting this limit.
+ static int kMaxArgc = 1000000;
+ RUNTIME_ASSERT(argc >= 0 && argc <= kMaxArgc);
// If there are too many arguments, allocate argv via malloc.
const int argv_small_size = 10;
@@ -8851,174 +8954,152 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_Apply) {
}
for (int i = 0; i < argc; ++i) {
- argv[i] = Object::GetElement(isolate, arguments, offset + i);
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, argv[i],
+ Object::GetElement(isolate, arguments, offset + i));
}
- bool threw;
- Handle<Object> result = Execution::Call(
- isolate, fun, receiver, argc, argv, &threw, true);
-
- if (threw) return Failure::Exception();
+ Handle<Object> result;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, result,
+ Execution::Call(isolate, fun, receiver, argc, argv, true));
return *result;
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_GetFunctionDelegate) {
+RUNTIME_FUNCTION(Runtime_GetFunctionDelegate) {
HandleScope scope(isolate);
ASSERT(args.length() == 1);
- RUNTIME_ASSERT(!args[0]->IsJSFunction());
- return *Execution::GetFunctionDelegate(isolate, args.at<Object>(0));
+ CONVERT_ARG_HANDLE_CHECKED(Object, object, 0);
+ RUNTIME_ASSERT(!object->IsJSFunction());
+ return *Execution::GetFunctionDelegate(isolate, object);
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_GetConstructorDelegate) {
+RUNTIME_FUNCTION(Runtime_GetConstructorDelegate) {
HandleScope scope(isolate);
ASSERT(args.length() == 1);
- RUNTIME_ASSERT(!args[0]->IsJSFunction());
- return *Execution::GetConstructorDelegate(isolate, args.at<Object>(0));
+ CONVERT_ARG_HANDLE_CHECKED(Object, object, 0);
+ RUNTIME_ASSERT(!object->IsJSFunction());
+ return *Execution::GetConstructorDelegate(isolate, object);
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_NewGlobalContext) {
- SealHandleScope shs(isolate);
+RUNTIME_FUNCTION(RuntimeHidden_NewGlobalContext) {
+ HandleScope scope(isolate);
ASSERT(args.length() == 2);
- CONVERT_ARG_CHECKED(JSFunction, function, 0);
- CONVERT_ARG_CHECKED(ScopeInfo, scope_info, 1);
- Context* result;
- MaybeObject* maybe_result =
- isolate->heap()->AllocateGlobalContext(function, scope_info);
- if (!maybe_result->To(&result)) return maybe_result;
+ CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 0);
+ CONVERT_ARG_HANDLE_CHECKED(ScopeInfo, scope_info, 1);
+ Handle<Context> result =
+ isolate->factory()->NewGlobalContext(function, scope_info);
ASSERT(function->context() == isolate->context());
ASSERT(function->context()->global_object() == result->global_object());
- isolate->set_context(result);
- result->global_object()->set_global_context(result);
-
- return result; // non-failure
+ result->global_object()->set_global_context(*result);
+ return *result;
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_NewFunctionContext) {
- SealHandleScope shs(isolate);
+RUNTIME_FUNCTION(RuntimeHidden_NewFunctionContext) {
+ HandleScope scope(isolate);
ASSERT(args.length() == 1);
- CONVERT_ARG_CHECKED(JSFunction, function, 0);
+ CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 0);
int length = function->shared()->scope_info()->ContextLength();
- Context* result;
- MaybeObject* maybe_result =
- isolate->heap()->AllocateFunctionContext(length, function);
- if (!maybe_result->To(&result)) return maybe_result;
-
- isolate->set_context(result);
-
- return result; // non-failure
+ return *isolate->factory()->NewFunctionContext(length, function);
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_PushWithContext) {
- SealHandleScope shs(isolate);
+RUNTIME_FUNCTION(RuntimeHidden_PushWithContext) {
+ HandleScope scope(isolate);
ASSERT(args.length() == 2);
- JSReceiver* extension_object;
+ Handle<JSReceiver> extension_object;
if (args[0]->IsJSReceiver()) {
- extension_object = JSReceiver::cast(args[0]);
+ extension_object = args.at<JSReceiver>(0);
} else {
- // Convert the object to a proper JavaScript object.
- MaybeObject* maybe_js_object = args[0]->ToObject(isolate);
- if (!maybe_js_object->To(&extension_object)) {
- if (Failure::cast(maybe_js_object)->IsInternalError()) {
- HandleScope scope(isolate);
- Handle<Object> handle = args.at<Object>(0);
- Handle<Object> result =
- isolate->factory()->NewTypeError("with_expression",
- HandleVector(&handle, 1));
- return isolate->Throw(*result);
- } else {
- return maybe_js_object;
- }
+ // Try to convert the object to a proper JavaScript object.
+ MaybeHandle<JSReceiver> maybe_object =
+ Object::ToObject(isolate, args.at<Object>(0));
+ if (!maybe_object.ToHandle(&extension_object)) {
+ Handle<Object> handle = args.at<Object>(0);
+ Handle<Object> result =
+ isolate->factory()->NewTypeError("with_expression",
+ HandleVector(&handle, 1));
+ return isolate->Throw(*result);
}
}
- JSFunction* function;
+ Handle<JSFunction> function;
if (args[1]->IsSmi()) {
// A smi sentinel indicates a context nested inside global code rather
// than some function. There is a canonical empty function that can be
// gotten from the native context.
- function = isolate->context()->native_context()->closure();
+ function = handle(isolate->context()->native_context()->closure());
} else {
- function = JSFunction::cast(args[1]);
+ function = args.at<JSFunction>(1);
}
- Context* context;
- MaybeObject* maybe_context =
- isolate->heap()->AllocateWithContext(function,
- isolate->context(),
- extension_object);
- if (!maybe_context->To(&context)) return maybe_context;
- isolate->set_context(context);
- return context;
+ Handle<Context> current(isolate->context());
+ Handle<Context> context = isolate->factory()->NewWithContext(
+ function, current, extension_object);
+ isolate->set_context(*context);
+ return *context;
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_PushCatchContext) {
- SealHandleScope shs(isolate);
+RUNTIME_FUNCTION(RuntimeHidden_PushCatchContext) {
+ HandleScope scope(isolate);
ASSERT(args.length() == 3);
- String* name = String::cast(args[0]);
- Object* thrown_object = args[1];
- JSFunction* function;
+ CONVERT_ARG_HANDLE_CHECKED(String, name, 0);
+ CONVERT_ARG_HANDLE_CHECKED(Object, thrown_object, 1);
+ Handle<JSFunction> function;
if (args[2]->IsSmi()) {
// A smi sentinel indicates a context nested inside global code rather
// than some function. There is a canonical empty function that can be
// gotten from the native context.
- function = isolate->context()->native_context()->closure();
+ function = handle(isolate->context()->native_context()->closure());
} else {
- function = JSFunction::cast(args[2]);
+ function = args.at<JSFunction>(2);
}
- Context* context;
- MaybeObject* maybe_context =
- isolate->heap()->AllocateCatchContext(function,
- isolate->context(),
- name,
- thrown_object);
- if (!maybe_context->To(&context)) return maybe_context;
- isolate->set_context(context);
- return context;
+ Handle<Context> current(isolate->context());
+ Handle<Context> context = isolate->factory()->NewCatchContext(
+ function, current, name, thrown_object);
+ isolate->set_context(*context);
+ return *context;
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_PushBlockContext) {
- SealHandleScope shs(isolate);
+RUNTIME_FUNCTION(RuntimeHidden_PushBlockContext) {
+ HandleScope scope(isolate);
ASSERT(args.length() == 2);
- ScopeInfo* scope_info = ScopeInfo::cast(args[0]);
- JSFunction* function;
+ CONVERT_ARG_HANDLE_CHECKED(ScopeInfo, scope_info, 0);
+ Handle<JSFunction> function;
if (args[1]->IsSmi()) {
// A smi sentinel indicates a context nested inside global code rather
// than some function. There is a canonical empty function that can be
// gotten from the native context.
- function = isolate->context()->native_context()->closure();
+ function = handle(isolate->context()->native_context()->closure());
} else {
- function = JSFunction::cast(args[1]);
+ function = args.at<JSFunction>(1);
}
- Context* context;
- MaybeObject* maybe_context =
- isolate->heap()->AllocateBlockContext(function,
- isolate->context(),
- scope_info);
- if (!maybe_context->To(&context)) return maybe_context;
- isolate->set_context(context);
- return context;
+ Handle<Context> current(isolate->context());
+ Handle<Context> context = isolate->factory()->NewBlockContext(
+ function, current, scope_info);
+ isolate->set_context(*context);
+ return *context;
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_IsJSModule) {
+RUNTIME_FUNCTION(Runtime_IsJSModule) {
SealHandleScope shs(isolate);
ASSERT(args.length() == 1);
- Object* obj = args[0];
+ CONVERT_ARG_CHECKED(Object, obj, 0);
return isolate->heap()->ToBoolean(obj->IsJSModule());
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_PushModuleContext) {
+RUNTIME_FUNCTION(RuntimeHidden_PushModuleContext) {
SealHandleScope shs(isolate);
ASSERT(args.length() == 2);
CONVERT_SMI_ARG_CHECKED(index, 0);
@@ -9053,7 +9134,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_PushModuleContext) {
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_DeclareModules) {
+RUNTIME_FUNCTION(RuntimeHidden_DeclareModules) {
HandleScope scope(isolate);
ASSERT(args.length() == 1);
CONVERT_ARG_HANDLE_CHECKED(FixedArray, descriptions, 0);
@@ -9073,20 +9154,21 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DeclareModules) {
case VAR:
case LET:
case CONST:
- case CONST_HARMONY: {
+ case CONST_LEGACY: {
PropertyAttributes attr =
IsImmutableVariableMode(mode) ? FROZEN : SEALED;
Handle<AccessorInfo> info =
Accessors::MakeModuleExport(name, index, attr);
- Handle<Object> result = JSObject::SetAccessor(module, info);
- ASSERT(!(result.is_null() || result->IsUndefined()));
+ Handle<Object> result =
+ JSObject::SetAccessor(module, info).ToHandleChecked();
+ ASSERT(!result->IsUndefined());
USE(result);
break;
}
case MODULE: {
Object* referenced_context = Context::cast(host_context)->get(index);
Handle<JSModule> value(Context::cast(referenced_context)->module());
- JSReceiver::SetProperty(module, name, value, FROZEN, kStrictMode);
+ JSReceiver::SetProperty(module, name, value, FROZEN, STRICT).Assert();
break;
}
case INTERNAL:
@@ -9098,7 +9180,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DeclareModules) {
}
}
- JSObject::PreventExtensions(module);
+ JSObject::PreventExtensions(module).Assert();
}
ASSERT(!isolate->has_pending_exception());
@@ -9106,7 +9188,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DeclareModules) {
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_DeleteContextSlot) {
+RUNTIME_FUNCTION(RuntimeHidden_DeleteContextSlot) {
HandleScope scope(isolate);
ASSERT(args.length() == 2);
@@ -9137,8 +9219,10 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DeleteContextSlot) {
// the global object, or the subject of a with. Try to delete it
// (respecting DONT_DELETE).
Handle<JSObject> object = Handle<JSObject>::cast(holder);
- Handle<Object> result = JSReceiver::DeleteProperty(object, name);
- RETURN_IF_EMPTY_HANDLE(isolate, result);
+ Handle<Object> result;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, result,
+ JSReceiver::DeleteProperty(object, name));
return *result;
}
@@ -9153,12 +9237,12 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DeleteContextSlot) {
// allocated by the caller, and passed as a pointer in a hidden first parameter.
#ifdef V8_HOST_ARCH_64_BIT
struct ObjectPair {
- MaybeObject* x;
- MaybeObject* y;
+ Object* x;
+ Object* y;
};
-static inline ObjectPair MakePair(MaybeObject* x, MaybeObject* y) {
+static inline ObjectPair MakePair(Object* x, Object* y) {
ObjectPair result = {x, y};
// Pointers x and y returned in rax and rdx, in AMD-x64-abi.
// In Win64 they are assigned to a hidden first argument.
@@ -9166,20 +9250,18 @@ static inline ObjectPair MakePair(MaybeObject* x, MaybeObject* y) {
}
#else
typedef uint64_t ObjectPair;
-static inline ObjectPair MakePair(MaybeObject* x, MaybeObject* y) {
+static inline ObjectPair MakePair(Object* x, Object* y) {
+#if defined(V8_TARGET_LITTLE_ENDIAN)
return reinterpret_cast<uint32_t>(x) |
(reinterpret_cast<ObjectPair>(y) << 32);
-}
+#elif defined(V8_TARGET_BIG_ENDIAN)
+ return reinterpret_cast<uint32_t>(y) |
+ (reinterpret_cast<ObjectPair>(x) << 32);
+#else
+#error Unknown endianness
#endif
-
-
-static inline MaybeObject* Unhole(Heap* heap,
- MaybeObject* x,
- PropertyAttributes attributes) {
- ASSERT(!x->IsTheHole() || (attributes & READ_ONLY) != 0);
- USE(attributes);
- return x->IsTheHole() ? heap->undefined_value() : x;
}
+#endif
static Object* ComputeReceiverForNonGlobal(Isolate* isolate,
@@ -9197,9 +9279,8 @@ static Object* ComputeReceiverForNonGlobal(Isolate* isolate,
if (constructor != context_extension_function) return holder;
// Fall back to using the global object as the implicit receiver if
// the property turns out to be a local variable allocated in a
- // context extension object - introduced via eval. Implicit global
- // receivers are indicated with the hole value.
- return isolate->heap()->the_hole_value();
+ // context extension object - introduced via eval.
+ return isolate->heap()->undefined_value();
}
@@ -9225,7 +9306,7 @@ static ObjectPair LoadContextSlotHelper(Arguments args,
&attributes,
&binding_flags);
if (isolate->has_pending_exception()) {
- return MakePair(Failure::Exception(), NULL);
+ return MakePair(isolate->heap()->exception(), NULL);
}
// If the index is non-negative, the slot has been found in a context.
@@ -9233,11 +9314,7 @@ static ObjectPair LoadContextSlotHelper(Arguments args,
ASSERT(holder->IsContext());
// If the "property" we were looking for is a local variable, the
// receiver is the global object; see ECMA-262, 3rd., 10.1.6 and 10.2.3.
- //
- // Use the hole as the receiver to signal that the receiver is implicit
- // and that the global receiver should be used (as distinguished from an
- // explicit receiver that happens to be a global object).
- Handle<Object> receiver = isolate->factory()->the_hole_value();
+ Handle<Object> receiver = isolate->factory()->undefined_value();
Object* value = Context::cast(*holder)->get(index);
// Check for uninitialized bindings.
switch (binding_flags) {
@@ -9256,7 +9333,11 @@ static ObjectPair LoadContextSlotHelper(Arguments args,
ASSERT(!value->IsTheHole());
return MakePair(value, *receiver);
case IMMUTABLE_CHECK_INITIALIZED:
- return MakePair(Unhole(isolate->heap(), value, attributes), *receiver);
+ if (value->IsTheHole()) {
+ ASSERT((attributes & READ_ONLY) != 0);
+ value = isolate->heap()->undefined_value();
+ }
+ return MakePair(value, *receiver);
case MISSING_BINDING:
UNREACHABLE();
return MakePair(NULL, NULL);
@@ -9272,15 +9353,19 @@ static ObjectPair LoadContextSlotHelper(Arguments args,
// GetProperty below can cause GC.
Handle<Object> receiver_handle(
object->IsGlobalObject()
- ? GlobalObject::cast(*object)->global_receiver()
+ ? Object::cast(isolate->heap()->undefined_value())
: object->IsJSProxy() ? static_cast<Object*>(*object)
: ComputeReceiverForNonGlobal(isolate, JSObject::cast(*object)),
isolate);
// No need to unhole the value here. This is taken care of by the
// GetProperty function.
- MaybeObject* value = object->GetProperty(*name);
- return MakePair(value, *receiver_handle);
+ Handle<Object> value;
+ ASSIGN_RETURN_ON_EXCEPTION_VALUE(
+ isolate, value,
+ Object::GetProperty(object, name),
+ MakePair(isolate->heap()->exception(), NULL));
+ return MakePair(*value, *receiver_handle);
}
if (throw_error) {
@@ -9297,26 +9382,24 @@ static ObjectPair LoadContextSlotHelper(Arguments args,
}
-RUNTIME_FUNCTION(ObjectPair, Runtime_LoadContextSlot) {
+RUNTIME_FUNCTION_RETURN_PAIR(RuntimeHidden_LoadContextSlot) {
return LoadContextSlotHelper(args, isolate, true);
}
-RUNTIME_FUNCTION(ObjectPair, Runtime_LoadContextSlotNoReferenceError) {
+RUNTIME_FUNCTION_RETURN_PAIR(RuntimeHidden_LoadContextSlotNoReferenceError) {
return LoadContextSlotHelper(args, isolate, false);
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_StoreContextSlot) {
+RUNTIME_FUNCTION(RuntimeHidden_StoreContextSlot) {
HandleScope scope(isolate);
ASSERT(args.length() == 4);
- Handle<Object> value(args[0], isolate);
+ CONVERT_ARG_HANDLE_CHECKED(Object, value, 0);
CONVERT_ARG_HANDLE_CHECKED(Context, context, 1);
CONVERT_ARG_HANDLE_CHECKED(String, name, 2);
- CONVERT_LANGUAGE_MODE_ARG(language_mode, 3);
- StrictModeFlag strict_mode = (language_mode == CLASSIC_MODE)
- ? kNonStrictMode : kStrictMode;
+ CONVERT_STRICT_MODE_ARG_CHECKED(strict_mode, 3);
int index;
PropertyAttributes attributes;
@@ -9327,7 +9410,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_StoreContextSlot) {
&index,
&attributes,
&binding_flags);
- if (isolate->has_pending_exception()) return Failure::Exception();
+ if (isolate->has_pending_exception()) return isolate->heap()->exception();
if (index >= 0) {
// The property was found in a context slot.
@@ -9343,7 +9426,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_StoreContextSlot) {
if ((attributes & READ_ONLY) == 0) {
// Context is a fixed array and set cannot fail.
context->set(index, *value);
- } else if (strict_mode == kStrictMode) {
+ } else if (strict_mode == STRICT) {
// Setting read only property in strict mode.
Handle<Object> error =
isolate->factory()->NewTypeError("strict_cannot_assign",
@@ -9365,25 +9448,25 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_StoreContextSlot) {
// The property was not found.
ASSERT(attributes == ABSENT);
- if (strict_mode == kStrictMode) {
+ if (strict_mode == STRICT) {
// Throw in strict mode (assignment to undefined variable).
Handle<Object> error =
isolate->factory()->NewReferenceError(
"not_defined", HandleVector(&name, 1));
return isolate->Throw(*error);
}
- // In non-strict mode, the property is added to the global object.
+ // In sloppy mode, the property is added to the global object.
attributes = NONE;
object = Handle<JSReceiver>(isolate->context()->global_object());
}
// Set the property if it's not read only or doesn't yet exist.
if ((attributes & READ_ONLY) == 0 ||
- (object->GetLocalPropertyAttribute(*name) == ABSENT)) {
- RETURN_IF_EMPTY_HANDLE(
+ (JSReceiver::GetOwnPropertyAttributes(object, name) == ABSENT)) {
+ RETURN_FAILURE_ON_EXCEPTION(
isolate,
JSReceiver::SetProperty(object, name, value, NONE, strict_mode));
- } else if (strict_mode == kStrictMode && (attributes & READ_ONLY) != 0) {
+ } else if (strict_mode == STRICT && (attributes & READ_ONLY) != 0) {
// Setting read only property in strict mode.
Handle<Object> error =
isolate->factory()->NewTypeError(
@@ -9394,7 +9477,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_StoreContextSlot) {
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_Throw) {
+RUNTIME_FUNCTION(RuntimeHidden_Throw) {
HandleScope scope(isolate);
ASSERT(args.length() == 1);
@@ -9402,7 +9485,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_Throw) {
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_ReThrow) {
+RUNTIME_FUNCTION(RuntimeHidden_ReThrow) {
HandleScope scope(isolate);
ASSERT(args.length() == 1);
@@ -9410,18 +9493,17 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_ReThrow) {
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_PromoteScheduledException) {
+RUNTIME_FUNCTION(RuntimeHidden_PromoteScheduledException) {
SealHandleScope shs(isolate);
- ASSERT_EQ(0, args.length());
+ ASSERT(args.length() == 0);
return isolate->PromoteScheduledException();
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_ThrowReferenceError) {
+RUNTIME_FUNCTION(RuntimeHidden_ThrowReferenceError) {
HandleScope scope(isolate);
ASSERT(args.length() == 1);
-
- Handle<Object> name(args[0], isolate);
+ CONVERT_ARG_HANDLE_CHECKED(Object, name, 0);
Handle<Object> reference_error =
isolate->factory()->NewReferenceError("not_defined",
HandleVector(&name, 1));
@@ -9429,7 +9511,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_ThrowReferenceError) {
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_ThrowNotDateError) {
+RUNTIME_FUNCTION(RuntimeHidden_ThrowNotDateError) {
HandleScope scope(isolate);
ASSERT(args.length() == 0);
return isolate->Throw(*isolate->factory()->NewTypeError(
@@ -9437,38 +9519,28 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_ThrowNotDateError) {
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_ThrowMessage) {
- HandleScope scope(isolate);
- ASSERT(args.length() == 1);
- CONVERT_SMI_ARG_CHECKED(message_id, 0);
- const char* message = GetBailoutReason(
- static_cast<BailoutReason>(message_id));
- Handle<Name> message_handle =
- isolate->factory()->NewStringFromAscii(CStrVector(message));
- return isolate->Throw(*message_handle);
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_StackGuard) {
+RUNTIME_FUNCTION(RuntimeHidden_StackGuard) {
SealHandleScope shs(isolate);
ASSERT(args.length() == 0);
// First check if this is a real stack overflow.
- if (isolate->stack_guard()->IsStackOverflow()) {
+ StackLimitCheck check(isolate);
+ if (check.JsHasOverflowed()) {
return isolate->StackOverflow();
}
- return Execution::HandleStackGuardInterrupt(isolate);
+ return isolate->stack_guard()->HandleInterrupts();
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_TryInstallRecompiledCode) {
+RUNTIME_FUNCTION(RuntimeHidden_TryInstallOptimizedCode) {
HandleScope scope(isolate);
ASSERT(args.length() == 1);
CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 0);
// First check if this is a real stack overflow.
- if (isolate->stack_guard()->IsStackOverflow()) {
+ StackLimitCheck check(isolate);
+ if (check.JsHasOverflowed()) {
SealHandleScope shs(isolate);
return isolate->StackOverflow();
}
@@ -9479,10 +9551,10 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_TryInstallRecompiledCode) {
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_Interrupt) {
+RUNTIME_FUNCTION(RuntimeHidden_Interrupt) {
SealHandleScope shs(isolate);
ASSERT(args.length() == 0);
- return Execution::HandleStackGuardInterrupt(isolate);
+ return isolate->stack_guard()->HandleInterrupts();
}
@@ -9515,7 +9587,7 @@ static void PrintTransition(Isolate* isolate, Object* result) {
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_TraceEnter) {
+RUNTIME_FUNCTION(Runtime_TraceEnter) {
SealHandleScope shs(isolate);
ASSERT(args.length() == 0);
PrintTransition(isolate, NULL);
@@ -9523,14 +9595,16 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_TraceEnter) {
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_TraceExit) {
+RUNTIME_FUNCTION(Runtime_TraceExit) {
SealHandleScope shs(isolate);
- PrintTransition(isolate, args[0]);
- return args[0]; // return TOS
+ ASSERT(args.length() == 1);
+ CONVERT_ARG_CHECKED(Object, obj, 0);
+ PrintTransition(isolate, obj);
+ return obj; // return TOS
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugPrint) {
+RUNTIME_FUNCTION(Runtime_DebugPrint) {
SealHandleScope shs(isolate);
ASSERT(args.length() == 1);
@@ -9561,7 +9635,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugPrint) {
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugTrace) {
+RUNTIME_FUNCTION(Runtime_DebugTrace) {
SealHandleScope shs(isolate);
ASSERT(args.length() == 0);
isolate->PrintStack(stdout);
@@ -9569,45 +9643,45 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugTrace) {
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_DateCurrentTime) {
- SealHandleScope shs(isolate);
+RUNTIME_FUNCTION(Runtime_DateCurrentTime) {
+ HandleScope scope(isolate);
ASSERT(args.length() == 0);
+ if (FLAG_log_timer_events) LOG(isolate, CurrentTimeEvent());
// According to ECMA-262, section 15.9.1, page 117, the precision of
// the number in a Date object representing a particular instant in
// time is milliseconds. Therefore, we floor the result of getting
// the OS time.
- double millis = floor(OS::TimeCurrentMillis());
- return isolate->heap()->NumberFromDouble(millis);
+ double millis = std::floor(OS::TimeCurrentMillis());
+ return *isolate->factory()->NewNumber(millis);
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_DateParseString) {
+RUNTIME_FUNCTION(Runtime_DateParseString) {
HandleScope scope(isolate);
ASSERT(args.length() == 2);
-
CONVERT_ARG_HANDLE_CHECKED(String, str, 0);
- FlattenString(str);
-
CONVERT_ARG_HANDLE_CHECKED(JSArray, output, 1);
+ RUNTIME_ASSERT(output->HasFastElements());
JSObject::EnsureCanContainHeapObjectElements(output);
RUNTIME_ASSERT(output->HasFastObjectElements());
+ Handle<FixedArray> output_array(FixedArray::cast(output->elements()));
+ RUNTIME_ASSERT(output_array->length() >= DateParser::OUTPUT_SIZE);
+ str = String::Flatten(str);
DisallowHeapAllocation no_gc;
- FixedArray* output_array = FixedArray::cast(output->elements());
- RUNTIME_ASSERT(output_array->length() >= DateParser::OUTPUT_SIZE);
bool result;
String::FlatContent str_content = str->GetFlatContent();
if (str_content.IsAscii()) {
result = DateParser::Parse(str_content.ToOneByteVector(),
- output_array,
+ *output_array,
isolate->unicode_cache());
} else {
ASSERT(str_content.IsTwoByte());
result = DateParser::Parse(str_content.ToUC16Vector(),
- output_array,
+ *output_array,
isolate->unicode_cache());
}
@@ -9619,65 +9693,86 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DateParseString) {
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_DateLocalTimezone) {
- SealHandleScope shs(isolate);
+RUNTIME_FUNCTION(Runtime_DateLocalTimezone) {
+ HandleScope scope(isolate);
ASSERT(args.length() == 1);
CONVERT_DOUBLE_ARG_CHECKED(x, 0);
- int64_t time = isolate->date_cache()->EquivalentTime(static_cast<int64_t>(x));
- const char* zone = OS::LocalTimezone(static_cast<double>(time));
- return isolate->heap()->AllocateStringFromUtf8(CStrVector(zone));
+ RUNTIME_ASSERT(x >= -DateCache::kMaxTimeBeforeUTCInMs &&
+ x <= DateCache::kMaxTimeBeforeUTCInMs);
+ const char* zone =
+ isolate->date_cache()->LocalTimezone(static_cast<int64_t>(x));
+ Handle<String> result = isolate->factory()->NewStringFromUtf8(
+ CStrVector(zone)).ToHandleChecked();
+ return *result;
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_DateToUTC) {
- SealHandleScope shs(isolate);
+RUNTIME_FUNCTION(Runtime_DateToUTC) {
+ HandleScope scope(isolate);
ASSERT(args.length() == 1);
CONVERT_DOUBLE_ARG_CHECKED(x, 0);
+ RUNTIME_ASSERT(x >= -DateCache::kMaxTimeBeforeUTCInMs &&
+ x <= DateCache::kMaxTimeBeforeUTCInMs);
int64_t time = isolate->date_cache()->ToUTC(static_cast<int64_t>(x));
- return isolate->heap()->NumberFromDouble(static_cast<double>(time));
+ return *isolate->factory()->NewNumber(static_cast<double>(time));
+}
+
+
+RUNTIME_FUNCTION(Runtime_DateCacheVersion) {
+ HandleScope hs(isolate);
+ ASSERT(args.length() == 0);
+ if (!isolate->eternal_handles()->Exists(EternalHandles::DATE_CACHE_VERSION)) {
+ Handle<FixedArray> date_cache_version =
+ isolate->factory()->NewFixedArray(1, TENURED);
+ date_cache_version->set(0, Smi::FromInt(0));
+ isolate->eternal_handles()->CreateSingleton(
+ isolate, *date_cache_version, EternalHandles::DATE_CACHE_VERSION);
+ }
+ Handle<FixedArray> date_cache_version =
+ Handle<FixedArray>::cast(isolate->eternal_handles()->GetSingleton(
+ EternalHandles::DATE_CACHE_VERSION));
+ // Return result as a JS array.
+ Handle<JSObject> result =
+ isolate->factory()->NewJSObject(isolate->array_function());
+ JSArray::SetContent(Handle<JSArray>::cast(result), date_cache_version);
+ return *result;
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_GlobalReceiver) {
+RUNTIME_FUNCTION(Runtime_GlobalReceiver) {
SealHandleScope shs(isolate);
ASSERT(args.length() == 1);
- Object* global = args[0];
+ CONVERT_ARG_CHECKED(Object, global, 0);
if (!global->IsJSGlobalObject()) return isolate->heap()->null_value();
return JSGlobalObject::cast(global)->global_receiver();
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_IsAttachedGlobal) {
+RUNTIME_FUNCTION(Runtime_IsAttachedGlobal) {
SealHandleScope shs(isolate);
ASSERT(args.length() == 1);
- Object* global = args[0];
+ CONVERT_ARG_CHECKED(Object, global, 0);
if (!global->IsJSGlobalObject()) return isolate->heap()->false_value();
return isolate->heap()->ToBoolean(
!JSGlobalObject::cast(global)->IsDetached());
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_ParseJson) {
+RUNTIME_FUNCTION(Runtime_ParseJson) {
HandleScope scope(isolate);
- ASSERT_EQ(1, args.length());
+ ASSERT(args.length() == 1);
CONVERT_ARG_HANDLE_CHECKED(String, source, 0);
- source = Handle<String>(FlattenGetString(source));
+ source = String::Flatten(source);
// Optimized fast case where we only have ASCII characters.
Handle<Object> result;
- if (source->IsSeqOneByteString()) {
- result = JsonParser<true>::Parse(source);
- } else {
- result = JsonParser<false>::Parse(source);
- }
- if (result.is_null()) {
- // Syntax error or stack overflow in scanner.
- ASSERT(isolate->has_pending_exception());
- return Failure::Exception();
- }
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, result,
+ source->IsSeqOneByteString() ? JsonParser<true>::Parse(source)
+ : JsonParser<false>::Parse(source));
return *result;
}
@@ -9699,15 +9794,73 @@ bool CodeGenerationFromStringsAllowed(Isolate* isolate,
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_CompileString) {
+// Walk up the stack expecting:
+// - Runtime_CompileString
+// - JSFunction callee (eval, Function constructor, etc)
+// - call() (maybe)
+// - apply() (maybe)
+// - bind() (maybe)
+// - JSFunction caller (maybe)
+//
+// return true if the caller has the same security token as the callee
+// or if an exit frame was hit, in which case allow it through, as it could
+// have come through the api.
+static bool TokensMatchForCompileString(Isolate* isolate) {
+ MaybeHandle<JSFunction> callee;
+ bool exit_handled = true;
+ bool tokens_match = true;
+ bool done = false;
+ for (StackFrameIterator it(isolate); !it.done() && !done; it.Advance()) {
+ StackFrame* raw_frame = it.frame();
+ if (!raw_frame->is_java_script()) {
+ if (raw_frame->is_exit()) exit_handled = false;
+ continue;
+ }
+ JavaScriptFrame* outer_frame = JavaScriptFrame::cast(raw_frame);
+ List<FrameSummary> frames(FLAG_max_inlining_levels + 1);
+ outer_frame->Summarize(&frames);
+ for (int i = frames.length() - 1; i >= 0 && !done; --i) {
+ FrameSummary& frame = frames[i];
+ Handle<JSFunction> fun = frame.function();
+ // Capture the callee function.
+ if (callee.is_null()) {
+ callee = fun;
+ exit_handled = true;
+ continue;
+ }
+ // Exit condition.
+ Handle<Context> context(callee.ToHandleChecked()->context());
+ if (!fun->context()->HasSameSecurityTokenAs(*context)) {
+ tokens_match = false;
+ done = true;
+ continue;
+ }
+ // Skip bound functions in correct origin.
+ if (fun->shared()->bound()) {
+ exit_handled = true;
+ continue;
+ }
+ done = true;
+ }
+ }
+ return !exit_handled || tokens_match;
+}
+
+
+RUNTIME_FUNCTION(Runtime_CompileString) {
HandleScope scope(isolate);
- ASSERT_EQ(2, args.length());
+ ASSERT(args.length() == 2);
CONVERT_ARG_HANDLE_CHECKED(String, source, 0);
CONVERT_BOOLEAN_ARG_CHECKED(function_literal_only, 1);
// Extract native context.
Handle<Context> context(isolate->context()->native_context());
+ // Filter cross security context calls.
+ if (!TokensMatchForCompileString(isolate)) {
+ return isolate->heap()->undefined_value();
+ }
+
// Check if native context allows code generation from
// strings. Throw an exception if it doesn't.
if (context->allow_code_gen_from_strings()->IsFalse() &&
@@ -9721,13 +9874,11 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_CompileString) {
// Compile source string in the native context.
ParseRestriction restriction = function_literal_only
? ONLY_SINGLE_FUNCTION_LITERAL : NO_PARSE_RESTRICTION;
- Handle<SharedFunctionInfo> shared = Compiler::CompileEval(
- source, context, true, CLASSIC_MODE, restriction, RelocInfo::kNoPosition);
- RETURN_IF_EMPTY_HANDLE(isolate, shared);
- Handle<JSFunction> fun =
- isolate->factory()->NewFunctionFromSharedFunctionInfo(shared,
- context,
- NOT_TENURED);
+ Handle<JSFunction> fun;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, fun,
+ Compiler::GetFunctionFromEval(
+ source, context, SLOPPY, restriction, RelocInfo::kNoPosition));
return *fun;
}
@@ -9735,7 +9886,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_CompileString) {
static ObjectPair CompileGlobalEval(Isolate* isolate,
Handle<String> source,
Handle<Object> receiver,
- LanguageMode language_mode,
+ StrictMode strict_mode,
int scope_position) {
Handle<Context> context = Handle<Context>(isolate->context());
Handle<Context> native_context = Handle<Context>(context->native_context());
@@ -9748,28 +9899,23 @@ static ObjectPair CompileGlobalEval(Isolate* isolate,
native_context->ErrorMessageForCodeGenerationFromStrings();
isolate->Throw(*isolate->factory()->NewEvalError(
"code_gen_from_strings", HandleVector<Object>(&error_message, 1)));
- return MakePair(Failure::Exception(), NULL);
+ return MakePair(isolate->heap()->exception(), NULL);
}
// Deal with a normal eval call with a string argument. Compile it
// and return the compiled function bound in the local context.
- Handle<SharedFunctionInfo> shared = Compiler::CompileEval(
- source,
- context,
- context->IsNativeContext(),
- language_mode,
- NO_PARSE_RESTRICTION,
- scope_position);
- RETURN_IF_EMPTY_HANDLE_VALUE(isolate, shared,
- MakePair(Failure::Exception(), NULL));
- Handle<JSFunction> compiled =
- isolate->factory()->NewFunctionFromSharedFunctionInfo(
- shared, context, NOT_TENURED);
+ static const ParseRestriction restriction = NO_PARSE_RESTRICTION;
+ Handle<JSFunction> compiled;
+ ASSIGN_RETURN_ON_EXCEPTION_VALUE(
+ isolate, compiled,
+ Compiler::GetFunctionFromEval(
+ source, context, strict_mode, restriction, scope_position),
+ MakePair(isolate->heap()->exception(), NULL));
return MakePair(*compiled, *receiver);
}
-RUNTIME_FUNCTION(ObjectPair, Runtime_ResolvePossiblyDirectEval) {
+RUNTIME_FUNCTION_RETURN_PAIR(RuntimeHidden_ResolvePossiblyDirectEval) {
HandleScope scope(isolate);
ASSERT(args.length() == 5);
@@ -9782,65 +9928,50 @@ RUNTIME_FUNCTION(ObjectPair, Runtime_ResolvePossiblyDirectEval) {
// the first argument without doing anything).
if (*callee != isolate->native_context()->global_eval_fun() ||
!args[1]->IsString()) {
- return MakePair(*callee, isolate->heap()->the_hole_value());
+ return MakePair(*callee, isolate->heap()->undefined_value());
}
- CONVERT_LANGUAGE_MODE_ARG(language_mode, 3);
+ ASSERT(args[3]->IsSmi());
+ ASSERT(args.smi_at(3) == SLOPPY || args.smi_at(3) == STRICT);
+ StrictMode strict_mode = static_cast<StrictMode>(args.smi_at(3));
ASSERT(args[4]->IsSmi());
return CompileGlobalEval(isolate,
args.at<String>(1),
args.at<Object>(2),
- language_mode,
+ strict_mode,
args.smi_at(4));
}
-// Allocate a block of memory in the given space (filled with a filler).
-// Used as a fall-back for generated code when the space is full.
-static MaybeObject* Allocate(Isolate* isolate,
- int size,
- bool double_align,
- AllocationSpace space) {
- Heap* heap = isolate->heap();
- RUNTIME_ASSERT(IsAligned(size, kPointerSize));
- RUNTIME_ASSERT(size > 0);
- RUNTIME_ASSERT(size <= heap->MaxRegularSpaceAllocationSize());
- HeapObject* allocation;
- { MaybeObject* maybe_allocation = heap->AllocateRaw(size, space, space);
- if (!maybe_allocation->To(&allocation)) return maybe_allocation;
- }
-#ifdef DEBUG
- MemoryChunk* chunk = MemoryChunk::FromAddress(allocation->address());
- ASSERT(chunk->owner()->identity() == space);
-#endif
- heap->CreateFillerObjectAt(allocation->address(), size);
- return allocation;
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_AllocateInNewSpace) {
- SealHandleScope shs(isolate);
+RUNTIME_FUNCTION(RuntimeHidden_AllocateInNewSpace) {
+ HandleScope scope(isolate);
ASSERT(args.length() == 1);
CONVERT_SMI_ARG_CHECKED(size, 0);
- return Allocate(isolate, size, false, NEW_SPACE);
+ RUNTIME_ASSERT(IsAligned(size, kPointerSize));
+ RUNTIME_ASSERT(size > 0);
+ RUNTIME_ASSERT(size <= Page::kMaxRegularHeapObjectSize);
+ return *isolate->factory()->NewFillerObject(size, false, NEW_SPACE);
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_AllocateInTargetSpace) {
- SealHandleScope shs(isolate);
+RUNTIME_FUNCTION(RuntimeHidden_AllocateInTargetSpace) {
+ HandleScope scope(isolate);
ASSERT(args.length() == 2);
CONVERT_SMI_ARG_CHECKED(size, 0);
CONVERT_SMI_ARG_CHECKED(flags, 1);
+ RUNTIME_ASSERT(IsAligned(size, kPointerSize));
+ RUNTIME_ASSERT(size > 0);
+ RUNTIME_ASSERT(size <= Page::kMaxRegularHeapObjectSize);
bool double_align = AllocateDoubleAlignFlag::decode(flags);
AllocationSpace space = AllocateTargetSpace::decode(flags);
- return Allocate(isolate, size, double_align, space);
+ return *isolate->factory()->NewFillerObject(size, double_align, space);
}
// Push an object unto an array of objects if it is not already in the
// array. Returns true if the element was pushed on the stack and
// false otherwise.
-RUNTIME_FUNCTION(MaybeObject*, Runtime_PushIfAbsent) {
+RUNTIME_FUNCTION(Runtime_PushIfAbsent) {
HandleScope scope(isolate);
ASSERT(args.length() == 2);
CONVERT_ARG_HANDLE_CHECKED(JSArray, array, 0);
@@ -9853,10 +9984,9 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_PushIfAbsent) {
}
// Strict not needed. Used for cycle detection in Array join implementation.
- RETURN_IF_EMPTY_HANDLE(isolate, JSObject::SetFastElement(array, length,
- element,
- kNonStrictMode,
- true));
+ RETURN_FAILURE_ON_EXCEPTION(
+ isolate,
+ JSObject::SetFastElement(array, length, element, SLOPPY, true));
return isolate->heap()->true_value();
}
@@ -9904,14 +10034,14 @@ class ArrayConcatVisitor {
// getters on the arrays increasing the length of later arrays
// during iteration.
// This shouldn't happen in anything but pathological cases.
- SetDictionaryMode(index);
+ SetDictionaryMode();
// Fall-through to dictionary mode.
}
ASSERT(!fast_elements_);
Handle<SeededNumberDictionary> dict(
SeededNumberDictionary::cast(*storage_));
Handle<SeededNumberDictionary> result =
- isolate_->factory()->DictionaryAtNumberPut(dict, index, elm);
+ SeededNumberDictionary::AtNumberPut(dict, index, elm);
if (!result.is_identical_to(dict)) {
// Dictionary needed to grow.
clear_storage();
@@ -9925,6 +10055,14 @@ class ArrayConcatVisitor {
} else {
index_offset_ += delta;
}
+ // If the initial length estimate was off (see special case in visit()),
+ // but the array blowing the limit didn't contain elements beyond the
+ // provided-for index range, go to dictionary mode now.
+ if (fast_elements_ &&
+ index_offset_ >
+ static_cast<uint32_t>(FixedArrayBase::cast(*storage_)->length())) {
+ SetDictionaryMode();
+ }
}
bool exceeds_array_limit() {
@@ -9935,14 +10073,9 @@ class ArrayConcatVisitor {
Handle<JSArray> array = isolate_->factory()->NewJSArray(0);
Handle<Object> length =
isolate_->factory()->NewNumber(static_cast<double>(index_offset_));
- Handle<Map> map;
- if (fast_elements_) {
- map = isolate_->factory()->GetElementsTransitionMap(array,
- FAST_HOLEY_ELEMENTS);
- } else {
- map = isolate_->factory()->GetElementsTransitionMap(array,
- DICTIONARY_ELEMENTS);
- }
+ Handle<Map> map = JSObject::GetElementsTransitionMap(
+ array,
+ fast_elements_ ? FAST_HOLEY_ELEMENTS : DICTIONARY_ELEMENTS);
array->set_map(*map);
array->set_length(*length);
array->set_elements(*storage_);
@@ -9951,19 +10084,18 @@ class ArrayConcatVisitor {
private:
// Convert storage to dictionary mode.
- void SetDictionaryMode(uint32_t index) {
+ void SetDictionaryMode() {
ASSERT(fast_elements_);
Handle<FixedArray> current_storage(*storage_);
Handle<SeededNumberDictionary> slow_storage(
- isolate_->factory()->NewSeededNumberDictionary(
- current_storage->length()));
+ SeededNumberDictionary::New(isolate_, current_storage->length()));
uint32_t current_length = static_cast<uint32_t>(current_storage->length());
for (uint32_t i = 0; i < current_length; i++) {
HandleScope loop_scope(isolate_);
Handle<Object> element(current_storage->get(i), isolate_);
if (!element->IsTheHole()) {
Handle<SeededNumberDictionary> new_storage =
- isolate_->factory()->DictionaryAtNumberPut(slow_storage, i, element);
+ SeededNumberDictionary::AtNumberPut(slow_storage, i, element);
if (!new_storage.is_identical_to(slow_storage)) {
slow_storage = loop_scope.CloseAndEscape(new_storage);
}
@@ -9975,8 +10107,7 @@ class ArrayConcatVisitor {
}
inline void clear_storage() {
- isolate_->global_handles()->Destroy(
- Handle<Object>::cast(storage_).location());
+ GlobalHandles::Destroy(Handle<Object>::cast(storage_).location());
}
inline void set_storage(FixedArray* storage) {
@@ -10041,16 +10172,13 @@ static uint32_t EstimateElementCount(Handle<JSArray> array) {
}
break;
}
- case NON_STRICT_ARGUMENTS_ELEMENTS:
- case EXTERNAL_BYTE_ELEMENTS:
- case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
- case EXTERNAL_SHORT_ELEMENTS:
- case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
- case EXTERNAL_INT_ELEMENTS:
- case EXTERNAL_UNSIGNED_INT_ELEMENTS:
- case EXTERNAL_FLOAT_ELEMENTS:
- case EXTERNAL_DOUBLE_ELEMENTS:
- case EXTERNAL_PIXEL_ELEMENTS:
+ case SLOPPY_ARGUMENTS_ELEMENTS:
+#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype, size) \
+ case EXTERNAL_##TYPE##_ELEMENTS: \
+ case TYPE##_ELEMENTS: \
+
+ TYPED_ARRAYS(TYPED_ARRAY_CASE)
+#undef TYPED_ARRAY_CASE
// External arrays are always dense.
return length;
}
@@ -10158,51 +10286,16 @@ static void CollectElementIndices(Handle<JSObject> object,
default: {
int dense_elements_length;
switch (kind) {
- case EXTERNAL_PIXEL_ELEMENTS: {
- dense_elements_length =
- ExternalPixelArray::cast(object->elements())->length();
- break;
- }
- case EXTERNAL_BYTE_ELEMENTS: {
- dense_elements_length =
- ExternalByteArray::cast(object->elements())->length();
- break;
- }
- case EXTERNAL_UNSIGNED_BYTE_ELEMENTS: {
- dense_elements_length =
- ExternalUnsignedByteArray::cast(object->elements())->length();
- break;
- }
- case EXTERNAL_SHORT_ELEMENTS: {
- dense_elements_length =
- ExternalShortArray::cast(object->elements())->length();
- break;
- }
- case EXTERNAL_UNSIGNED_SHORT_ELEMENTS: {
- dense_elements_length =
- ExternalUnsignedShortArray::cast(object->elements())->length();
- break;
- }
- case EXTERNAL_INT_ELEMENTS: {
- dense_elements_length =
- ExternalIntArray::cast(object->elements())->length();
- break;
- }
- case EXTERNAL_UNSIGNED_INT_ELEMENTS: {
- dense_elements_length =
- ExternalUnsignedIntArray::cast(object->elements())->length();
- break;
- }
- case EXTERNAL_FLOAT_ELEMENTS: {
- dense_elements_length =
- ExternalFloatArray::cast(object->elements())->length();
- break;
- }
- case EXTERNAL_DOUBLE_ELEMENTS: {
- dense_elements_length =
- ExternalDoubleArray::cast(object->elements())->length();
- break;
+#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype, size) \
+ case EXTERNAL_##TYPE##_ELEMENTS: { \
+ dense_elements_length = \
+ External##Type##Array::cast(object->elements())->length(); \
+ break; \
}
+
+ TYPED_ARRAYS(TYPED_ARRAY_CASE)
+#undef TYPED_ARRAY_CASE
+
default:
UNREACHABLE();
dense_elements_length = 0;
@@ -10264,8 +10357,10 @@ static bool IterateElements(Isolate* isolate,
} else if (JSReceiver::HasElement(receiver, j)) {
// Call GetElement on receiver, not its prototype, or getters won't
// have the correct receiver.
- element_value = Object::GetElement(isolate, receiver, j);
- RETURN_IF_EMPTY_HANDLE_VALUE(isolate, element_value, false);
+ ASSIGN_RETURN_ON_EXCEPTION_VALUE(
+ isolate, element_value,
+ Object::GetElement(isolate, receiver, j),
+ false);
visitor->visit(j, element_value);
}
}
@@ -10273,8 +10368,14 @@ static bool IterateElements(Isolate* isolate,
}
case FAST_HOLEY_DOUBLE_ELEMENTS:
case FAST_DOUBLE_ELEMENTS: {
+ // Empty array is FixedArray but not FixedDoubleArray.
+ if (length == 0) break;
// Run through the elements FixedArray and use HasElement and GetElement
// to check the prototype for missing elements.
+ if (receiver->elements()->IsFixedArray()) {
+ ASSERT(receiver->elements()->length() == 0);
+ break;
+ }
Handle<FixedDoubleArray> elements(
FixedDoubleArray::cast(receiver->elements()));
int fast_length = static_cast<int>(length);
@@ -10289,9 +10390,11 @@ static bool IterateElements(Isolate* isolate,
} else if (JSReceiver::HasElement(receiver, j)) {
// Call GetElement on receiver, not its prototype, or getters won't
// have the correct receiver.
- Handle<Object> element_value =
- Object::GetElement(isolate, receiver, j);
- RETURN_IF_EMPTY_HANDLE_VALUE(isolate, element_value, false);
+ Handle<Object> element_value;
+ ASSIGN_RETURN_ON_EXCEPTION_VALUE(
+ isolate, element_value,
+ Object::GetElement(isolate, receiver, j),
+ false);
visitor->visit(j, element_value);
}
}
@@ -10309,8 +10412,11 @@ static bool IterateElements(Isolate* isolate,
while (j < n) {
HandleScope loop_scope(isolate);
uint32_t index = indices[j];
- Handle<Object> element = Object::GetElement(isolate, receiver, index);
- RETURN_IF_EMPTY_HANDLE_VALUE(isolate, element, false);
+ Handle<Object> element;
+ ASSIGN_RETURN_ON_EXCEPTION_VALUE(
+ isolate, element,
+ Object::GetElement(isolate, receiver, index),
+ false);
visitor->visit(index, element);
// Skip to next different index (i.e., omit duplicates).
do {
@@ -10319,8 +10425,8 @@ static bool IterateElements(Isolate* isolate,
}
break;
}
- case EXTERNAL_PIXEL_ELEMENTS: {
- Handle<ExternalPixelArray> pixels(ExternalPixelArray::cast(
+ case EXTERNAL_UINT8_CLAMPED_ELEMENTS: {
+ Handle<ExternalUint8ClampedArray> pixels(ExternalUint8ClampedArray::cast(
receiver->elements()));
for (uint32_t j = 0; j < length; j++) {
Handle<Smi> e(Smi::FromInt(pixels->get_scalar(j)), isolate);
@@ -10328,43 +10434,43 @@ static bool IterateElements(Isolate* isolate,
}
break;
}
- case EXTERNAL_BYTE_ELEMENTS: {
- IterateExternalArrayElements<ExternalByteArray, int8_t>(
+ case EXTERNAL_INT8_ELEMENTS: {
+ IterateExternalArrayElements<ExternalInt8Array, int8_t>(
isolate, receiver, true, true, visitor);
break;
}
- case EXTERNAL_UNSIGNED_BYTE_ELEMENTS: {
- IterateExternalArrayElements<ExternalUnsignedByteArray, uint8_t>(
+ case EXTERNAL_UINT8_ELEMENTS: {
+ IterateExternalArrayElements<ExternalUint8Array, uint8_t>(
isolate, receiver, true, true, visitor);
break;
}
- case EXTERNAL_SHORT_ELEMENTS: {
- IterateExternalArrayElements<ExternalShortArray, int16_t>(
+ case EXTERNAL_INT16_ELEMENTS: {
+ IterateExternalArrayElements<ExternalInt16Array, int16_t>(
isolate, receiver, true, true, visitor);
break;
}
- case EXTERNAL_UNSIGNED_SHORT_ELEMENTS: {
- IterateExternalArrayElements<ExternalUnsignedShortArray, uint16_t>(
+ case EXTERNAL_UINT16_ELEMENTS: {
+ IterateExternalArrayElements<ExternalUint16Array, uint16_t>(
isolate, receiver, true, true, visitor);
break;
}
- case EXTERNAL_INT_ELEMENTS: {
- IterateExternalArrayElements<ExternalIntArray, int32_t>(
+ case EXTERNAL_INT32_ELEMENTS: {
+ IterateExternalArrayElements<ExternalInt32Array, int32_t>(
isolate, receiver, true, false, visitor);
break;
}
- case EXTERNAL_UNSIGNED_INT_ELEMENTS: {
- IterateExternalArrayElements<ExternalUnsignedIntArray, uint32_t>(
+ case EXTERNAL_UINT32_ELEMENTS: {
+ IterateExternalArrayElements<ExternalUint32Array, uint32_t>(
isolate, receiver, true, false, visitor);
break;
}
- case EXTERNAL_FLOAT_ELEMENTS: {
- IterateExternalArrayElements<ExternalFloatArray, float>(
+ case EXTERNAL_FLOAT32_ELEMENTS: {
+ IterateExternalArrayElements<ExternalFloat32Array, float>(
isolate, receiver, false, false, visitor);
break;
}
- case EXTERNAL_DOUBLE_ELEMENTS: {
- IterateExternalArrayElements<ExternalDoubleArray, double>(
+ case EXTERNAL_FLOAT64_ELEMENTS: {
+ IterateExternalArrayElements<ExternalFloat64Array, double>(
isolate, receiver, false, false, visitor);
break;
}
@@ -10383,7 +10489,7 @@ static bool IterateElements(Isolate* isolate,
* TODO(581): Fix non-compliance for very large concatenations and update to
* following the ECMAScript 5 specification.
*/
-RUNTIME_FUNCTION(MaybeObject*, Runtime_ArrayConcat) {
+RUNTIME_FUNCTION(Runtime_ArrayConcat) {
HandleScope handle_scope(isolate);
ASSERT(args.length() == 1);
@@ -10450,12 +10556,13 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_ArrayConcat) {
// dictionary.
bool fast_case = (estimate_nof_elements * 2) >= estimate_result_length;
- Handle<FixedArray> storage;
- if (fast_case) {
- if (kind == FAST_DOUBLE_ELEMENTS) {
+ if (fast_case && kind == FAST_DOUBLE_ELEMENTS) {
+ Handle<FixedArrayBase> storage =
+ isolate->factory()->NewFixedDoubleArray(estimate_result_length);
+ int j = 0;
+ if (estimate_result_length > 0) {
Handle<FixedDoubleArray> double_storage =
- isolate->factory()->NewFixedDoubleArray(estimate_result_length);
- int j = 0;
+ Handle<FixedDoubleArray>::cast(storage);
bool failure = false;
for (int i = 0; i < argument_count; i++) {
Handle<Object> obj(elements->get(i), isolate);
@@ -10471,8 +10578,8 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_ArrayConcat) {
switch (array->map()->elements_kind()) {
case FAST_HOLEY_DOUBLE_ELEMENTS:
case FAST_DOUBLE_ELEMENTS: {
- // Empty fixed array indicates that there are no elements.
- if (array->elements()->IsFixedArray()) break;
+ // Empty array is FixedArray but not FixedDoubleArray.
+ if (length == 0) break;
FixedDoubleArray* elements =
FixedDoubleArray::cast(array->elements());
for (uint32_t i = 0; i < length; i++) {
@@ -10511,15 +10618,19 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_ArrayConcat) {
}
if (failure) break;
}
- Handle<JSArray> array = isolate->factory()->NewJSArray(0);
- Smi* length = Smi::FromInt(j);
- Handle<Map> map;
- map = isolate->factory()->GetElementsTransitionMap(array, kind);
- array->set_map(*map);
- array->set_length(length);
- array->set_elements(*double_storage);
- return *array;
}
+ Handle<JSArray> array = isolate->factory()->NewJSArray(0);
+ Smi* length = Smi::FromInt(j);
+ Handle<Map> map;
+ map = JSObject::GetElementsTransitionMap(array, kind);
+ array->set_map(*map);
+ array->set_length(length);
+ array->set_elements(*storage);
+ return *array;
+ }
+
+ Handle<FixedArray> storage;
+ if (fast_case) {
// The backing storage array must have non-existing elements to preserve
// holes across concat operations.
storage = isolate->factory()->NewFixedArrayWithHoles(
@@ -10529,7 +10640,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_ArrayConcat) {
uint32_t at_least_space_for = estimate_nof_elements +
(estimate_nof_elements >> 2);
storage = Handle<FixedArray>::cast(
- isolate->factory()->NewSeededNumberDictionary(at_least_space_for));
+ SeededNumberDictionary::New(isolate, at_least_space_for));
}
ArrayConcatVisitor visitor(isolate, storage, fast_case);
@@ -10539,7 +10650,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_ArrayConcat) {
if (obj->IsJSArray()) {
Handle<JSArray> array = Handle<JSArray>::cast(obj);
if (!IterateElements(isolate, array, &visitor)) {
- return Failure::Exception();
+ return isolate->heap()->exception();
}
} else {
visitor.visit(0, obj);
@@ -10558,7 +10669,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_ArrayConcat) {
// This will not allocate (flatten the string), but it may run
// very slowly for very deeply nested ConsStrings. For debugging use only.
-RUNTIME_FUNCTION(MaybeObject*, Runtime_GlobalPrint) {
+RUNTIME_FUNCTION(Runtime_GlobalPrint) {
SealHandleScope shs(isolate);
ASSERT(args.length() == 1);
@@ -10578,7 +10689,8 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_GlobalPrint) {
// and are followed by non-existing element. Does not change the length
// property.
// Returns the number of non-undefined elements collected.
-RUNTIME_FUNCTION(MaybeObject*, Runtime_RemoveArrayHoles) {
+// Returns -1 if hole removal is not supported by this method.
+RUNTIME_FUNCTION(Runtime_RemoveArrayHoles) {
HandleScope scope(isolate);
ASSERT(args.length() == 2);
CONVERT_ARG_HANDLE_CHECKED(JSObject, object, 0);
@@ -10588,44 +10700,39 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_RemoveArrayHoles) {
// Move contents of argument 0 (an array) to argument 1 (an array)
-RUNTIME_FUNCTION(MaybeObject*, Runtime_MoveArrayContents) {
- SealHandleScope shs(isolate);
+RUNTIME_FUNCTION(Runtime_MoveArrayContents) {
+ HandleScope scope(isolate);
ASSERT(args.length() == 2);
- CONVERT_ARG_CHECKED(JSArray, from, 0);
- CONVERT_ARG_CHECKED(JSArray, to, 1);
- from->ValidateElements();
- to->ValidateElements();
- FixedArrayBase* new_elements = from->elements();
+ CONVERT_ARG_HANDLE_CHECKED(JSArray, from, 0);
+ CONVERT_ARG_HANDLE_CHECKED(JSArray, to, 1);
+ JSObject::ValidateElements(from);
+ JSObject::ValidateElements(to);
+
+ Handle<FixedArrayBase> new_elements(from->elements());
ElementsKind from_kind = from->GetElementsKind();
- MaybeObject* maybe_new_map;
- maybe_new_map = to->GetElementsTransitionMap(isolate, from_kind);
- Object* new_map;
- if (!maybe_new_map->ToObject(&new_map)) return maybe_new_map;
- to->set_map_and_elements(Map::cast(new_map), new_elements);
+ Handle<Map> new_map = JSObject::GetElementsTransitionMap(to, from_kind);
+ JSObject::SetMapAndElements(to, new_map, new_elements);
to->set_length(from->length());
- Object* obj;
- { MaybeObject* maybe_obj = from->ResetElements();
- if (!maybe_obj->ToObject(&obj)) return maybe_obj;
- }
+
+ JSObject::ResetElements(from);
from->set_length(Smi::FromInt(0));
- to->ValidateElements();
- return to;
+
+ JSObject::ValidateElements(to);
+ return *to;
}
// How many elements does this object/array have?
-RUNTIME_FUNCTION(MaybeObject*, Runtime_EstimateNumberOfElements) {
+RUNTIME_FUNCTION(Runtime_EstimateNumberOfElements) {
SealHandleScope shs(isolate);
ASSERT(args.length() == 1);
- CONVERT_ARG_CHECKED(JSObject, object, 0);
+ CONVERT_ARG_CHECKED(JSArray, object, 0);
HeapObject* elements = object->elements();
if (elements->IsDictionary()) {
int result = SeededNumberDictionary::cast(elements)->NumberOfElements();
return Smi::FromInt(result);
- } else if (object->IsJSArray()) {
- return JSArray::cast(object)->length();
} else {
- return Smi::FromInt(FixedArray::cast(elements)->length());
+ return object->length();
}
}
@@ -10635,7 +10742,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_EstimateNumberOfElements) {
// or undefined) or a number representing the positive length of an interval
// starting at index 0.
// Intervals can span over some keys that are not in the object.
-RUNTIME_FUNCTION(MaybeObject*, Runtime_GetArrayKeys) {
+RUNTIME_FUNCTION(Runtime_GetArrayKeys) {
HandleScope scope(isolate);
ASSERT(args.length() == 2);
CONVERT_ARG_HANDLE_CHECKED(JSObject, array, 0);
@@ -10652,10 +10759,10 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_GetArrayKeys) {
}
Handle<JSObject> current = Handle<JSObject>::cast(p);
Handle<FixedArray> current_keys =
- isolate->factory()->NewFixedArray(
- current->NumberOfLocalElements(NONE));
- current->GetLocalElementKeys(*current_keys, NONE);
- keys = UnionOfKeys(keys, current_keys);
+ isolate->factory()->NewFixedArray(current->NumberOfOwnElements(NONE));
+ current->GetOwnElementKeys(*current_keys, NONE);
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, keys, FixedArray::UnionOfKeys(keys, current_keys));
}
// Erase any keys >= length.
// TODO(adamk): Remove this step when the contract of %GetArrayKeys
@@ -10665,15 +10772,15 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_GetArrayKeys) {
}
return *isolate->factory()->NewJSArrayWithElements(keys);
} else {
- ASSERT(array->HasFastSmiOrObjectElements() ||
- array->HasFastDoubleElements());
+ RUNTIME_ASSERT(array->HasFastSmiOrObjectElements() ||
+ array->HasFastDoubleElements());
uint32_t actual_length = static_cast<uint32_t>(array->elements()->length());
return *isolate->factory()->NewNumberFromUint(Min(actual_length, length));
}
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_LookupAccessor) {
+RUNTIME_FUNCTION(Runtime_LookupAccessor) {
HandleScope scope(isolate);
ASSERT(args.length() == 3);
CONVERT_ARG_HANDLE_CHECKED(JSReceiver, receiver, 0);
@@ -10681,18 +10788,19 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_LookupAccessor) {
CONVERT_SMI_ARG_CHECKED(flag, 2);
AccessorComponent component = flag == 0 ? ACCESSOR_GETTER : ACCESSOR_SETTER;
if (!receiver->IsJSObject()) return isolate->heap()->undefined_value();
- Handle<Object> result =
- JSObject::GetAccessor(Handle<JSObject>::cast(receiver), name, component);
- RETURN_IF_EMPTY_HANDLE(isolate, result);
+ Handle<Object> result;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, result,
+ JSObject::GetAccessor(Handle<JSObject>::cast(receiver), name, component));
return *result;
}
-#ifdef ENABLE_DEBUGGER_SUPPORT
-RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugBreak) {
+RUNTIME_FUNCTION(Runtime_DebugBreak) {
SealHandleScope shs(isolate);
ASSERT(args.length() == 0);
- return Execution::DebugBreakHelper(isolate);
+ isolate->debug()->HandleDebugBreak();
+ return isolate->heap()->undefined_value();
}
@@ -10712,86 +10820,72 @@ static StackFrame::Id UnwrapFrameId(int wrapped) {
// args[0]: debug event listener function to set or null or undefined for
// clearing the event listener function
// args[1]: object supplied during callback
-RUNTIME_FUNCTION(MaybeObject*, Runtime_SetDebugEventListener) {
+RUNTIME_FUNCTION(Runtime_SetDebugEventListener) {
SealHandleScope shs(isolate);
ASSERT(args.length() == 2);
RUNTIME_ASSERT(args[0]->IsJSFunction() ||
args[0]->IsUndefined() ||
args[0]->IsNull());
- Handle<Object> callback = args.at<Object>(0);
- Handle<Object> data = args.at<Object>(1);
- isolate->debugger()->SetEventListener(callback, data);
+ CONVERT_ARG_HANDLE_CHECKED(Object, callback, 0);
+ CONVERT_ARG_HANDLE_CHECKED(Object, data, 1);
+ isolate->debug()->SetEventListener(callback, data);
return isolate->heap()->undefined_value();
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_Break) {
+RUNTIME_FUNCTION(Runtime_Break) {
SealHandleScope shs(isolate);
ASSERT(args.length() == 0);
- isolate->stack_guard()->DebugBreak();
+ isolate->stack_guard()->RequestDebugBreak();
return isolate->heap()->undefined_value();
}
-static MaybeObject* DebugLookupResultValue(Heap* heap,
- Object* receiver,
- Name* name,
- LookupResult* result,
- bool* caught_exception) {
- Object* value;
+static Handle<Object> DebugLookupResultValue(Isolate* isolate,
+ Handle<Object> receiver,
+ Handle<Name> name,
+ LookupResult* result,
+ bool* has_caught = NULL) {
+ Handle<Object> value = isolate->factory()->undefined_value();
+ if (!result->IsFound()) return value;
switch (result->type()) {
case NORMAL:
- value = result->holder()->GetNormalizedProperty(result);
- if (value->IsTheHole()) {
- return heap->undefined_value();
- }
- return value;
- case FIELD: {
- Object* value;
- MaybeObject* maybe_value =
- JSObject::cast(result->holder())->FastPropertyAt(
- result->representation(),
- result->GetFieldIndex().field_index());
- if (!maybe_value->To(&value)) return maybe_value;
- if (value->IsTheHole()) {
- return heap->undefined_value();
- }
- return value;
- }
+ value = JSObject::GetNormalizedProperty(
+ handle(result->holder(), isolate), result);
+ break;
+ case FIELD:
+ value = JSObject::FastPropertyAt(handle(result->holder(), isolate),
+ result->representation(),
+ result->GetFieldIndex());
+ break;
case CONSTANT:
- return result->GetConstant();
+ return handle(result->GetConstant(), isolate);
case CALLBACKS: {
- Object* structure = result->GetCallbackObject();
- if (structure->IsForeign() || structure->IsAccessorInfo()) {
- Isolate* isolate = heap->isolate();
- HandleScope scope(isolate);
- Handle<Object> value = JSObject::GetPropertyWithCallback(
- handle(result->holder(), isolate),
- handle(receiver, isolate),
- handle(structure, isolate),
- handle(name, isolate));
- if (value.is_null()) {
- MaybeObject* exception = heap->isolate()->pending_exception();
- heap->isolate()->clear_pending_exception();
- if (caught_exception != NULL) *caught_exception = true;
- return exception;
+ Handle<Object> structure(result->GetCallbackObject(), isolate);
+ ASSERT(!structure->IsForeign());
+ if (structure->IsAccessorInfo()) {
+ MaybeHandle<Object> obj = JSObject::GetPropertyWithAccessor(
+ receiver, name, handle(result->holder(), isolate), structure);
+ if (!obj.ToHandle(&value)) {
+ value = handle(isolate->pending_exception(), isolate);
+ isolate->clear_pending_exception();
+ if (has_caught != NULL) *has_caught = true;
+ return value;
}
- return *value;
- } else {
- return heap->undefined_value();
}
+ break;
}
case INTERCEPTOR:
- case TRANSITION:
- return heap->undefined_value();
case HANDLER:
+ break;
case NONEXISTENT:
UNREACHABLE();
- return heap->undefined_value();
+ break;
}
- UNREACHABLE(); // keep the compiler happy
- return heap->undefined_value();
+ ASSERT(!value->IsTheHole() || result->IsReadOnly());
+ return value->IsTheHole()
+ ? Handle<Object>::cast(isolate->factory()->undefined_value()) : value;
}
@@ -10807,7 +10901,7 @@ static MaybeObject* DebugLookupResultValue(Heap* heap,
// 4: Setter function if defined
// Items 2-4 are only filled if the property has either a getter or a setter
// defined through __defineGetter__ and/or __defineSetter__.
-RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugGetPropertyDetails) {
+RUNTIME_FUNCTION(Runtime_DebugGetPropertyDetails) {
HandleScope scope(isolate);
ASSERT(args.length() == 2);
@@ -10822,7 +10916,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugGetPropertyDetails) {
// could have the assumption that its own native context is the current
// context and not some internal debugger context.
SaveContext save(isolate);
- if (isolate->debug()->InDebugger()) {
+ if (isolate->debug()->in_debug_scope()) {
isolate->set_context(*isolate->debug()->debugger_entry()->GetContext());
}
@@ -10838,27 +10932,24 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugGetPropertyDetails) {
uint32_t index;
if (name->AsArrayIndex(&index)) {
Handle<FixedArray> details = isolate->factory()->NewFixedArray(2);
- Object* element_or_char;
- { MaybeObject* maybe_element_or_char =
- Runtime::GetElementOrCharAt(isolate, obj, index);
- if (!maybe_element_or_char->ToObject(&element_or_char)) {
- return maybe_element_or_char;
- }
- }
- details->set(0, element_or_char);
+ Handle<Object> element_or_char;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, element_or_char,
+ Runtime::GetElementOrCharAt(isolate, obj, index));
+ details->set(0, *element_or_char);
details->set(
1, PropertyDetails(NONE, NORMAL, Representation::None()).AsSmi());
return *isolate->factory()->NewJSArrayWithElements(details);
}
// Find the number of objects making up this.
- int length = LocalPrototypeChainLength(*obj);
+ int length = OwnPrototypeChainLength(*obj);
- // Try local lookup on each of the objects.
+ // Try own lookup on each of the objects.
Handle<JSObject> jsproto = obj;
for (int i = 0; i < length; i++) {
LookupResult result(isolate);
- jsproto->LocalLookup(*name, &result);
+ jsproto->LookupOwn(name, &result);
if (result.IsFound()) {
// LookupResult is not GC safe as it holds raw object pointers.
// GC can happen later in this code so put the required fields into
@@ -10868,29 +10959,23 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugGetPropertyDetails) {
result_callback_obj = Handle<Object>(result.GetCallbackObject(),
isolate);
}
- Smi* property_details = result.GetPropertyDetails().AsSmi();
- // DebugLookupResultValue can cause GC so details from LookupResult needs
- // to be copied to handles before this.
- bool caught_exception = false;
- Object* raw_value;
- { MaybeObject* maybe_raw_value =
- DebugLookupResultValue(isolate->heap(), *obj, *name,
- &result, &caught_exception);
- if (!maybe_raw_value->ToObject(&raw_value)) return maybe_raw_value;
- }
- Handle<Object> value(raw_value, isolate);
+
+
+ bool has_caught = false;
+ Handle<Object> value = DebugLookupResultValue(
+ isolate, obj, name, &result, &has_caught);
// If the callback object is a fixed array then it contains JavaScript
// getter and/or setter.
- bool hasJavaScriptAccessors = result.IsPropertyCallbacks() &&
- result_callback_obj->IsAccessorPair();
+ bool has_js_accessors = result.IsPropertyCallbacks() &&
+ result_callback_obj->IsAccessorPair();
Handle<FixedArray> details =
- isolate->factory()->NewFixedArray(hasJavaScriptAccessors ? 5 : 2);
+ isolate->factory()->NewFixedArray(has_js_accessors ? 5 : 2);
details->set(0, *value);
- details->set(1, property_details);
- if (hasJavaScriptAccessors) {
+ details->set(1, result.GetPropertyDetails().AsSmi());
+ if (has_js_accessors) {
AccessorPair* accessors = AccessorPair::cast(*result_callback_obj);
- details->set(2, isolate->heap()->ToBoolean(caught_exception));
+ details->set(2, isolate->heap()->ToBoolean(has_caught));
details->set(3, accessors->GetComponent(ACCESSOR_GETTER));
details->set(4, accessors->GetComponent(ACCESSOR_SETTER));
}
@@ -10906,7 +10991,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugGetPropertyDetails) {
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugGetProperty) {
+RUNTIME_FUNCTION(Runtime_DebugGetProperty) {
HandleScope scope(isolate);
ASSERT(args.length() == 2);
@@ -10915,17 +11000,14 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugGetProperty) {
CONVERT_ARG_HANDLE_CHECKED(Name, name, 1);
LookupResult result(isolate);
- obj->Lookup(*name, &result);
- if (result.IsFound()) {
- return DebugLookupResultValue(isolate->heap(), *obj, *name, &result, NULL);
- }
- return isolate->heap()->undefined_value();
+ obj->Lookup(name, &result);
+ return *DebugLookupResultValue(isolate, obj, name, &result);
}
// Return the property type calculated from the property details.
// args[0]: smi with property details.
-RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugPropertyTypeFromDetails) {
+RUNTIME_FUNCTION(Runtime_DebugPropertyTypeFromDetails) {
SealHandleScope shs(isolate);
ASSERT(args.length() == 1);
CONVERT_PROPERTY_DETAILS_CHECKED(details, 0);
@@ -10935,7 +11017,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugPropertyTypeFromDetails) {
// Return the property attribute calculated from the property details.
// args[0]: smi with property details.
-RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugPropertyAttributesFromDetails) {
+RUNTIME_FUNCTION(Runtime_DebugPropertyAttributesFromDetails) {
SealHandleScope shs(isolate);
ASSERT(args.length() == 1);
CONVERT_PROPERTY_DETAILS_CHECKED(details, 0);
@@ -10945,7 +11027,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugPropertyAttributesFromDetails) {
// Return the property insertion index calculated from the property details.
// args[0]: smi with property details.
-RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugPropertyIndexFromDetails) {
+RUNTIME_FUNCTION(Runtime_DebugPropertyIndexFromDetails) {
SealHandleScope shs(isolate);
ASSERT(args.length() == 1);
CONVERT_PROPERTY_DETAILS_CHECKED(details, 0);
@@ -10957,17 +11039,17 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugPropertyIndexFromDetails) {
// Return property value from named interceptor.
// args[0]: object
// args[1]: property name
-RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugNamedInterceptorPropertyValue) {
+RUNTIME_FUNCTION(Runtime_DebugNamedInterceptorPropertyValue) {
HandleScope scope(isolate);
ASSERT(args.length() == 2);
CONVERT_ARG_HANDLE_CHECKED(JSObject, obj, 0);
RUNTIME_ASSERT(obj->HasNamedInterceptor());
CONVERT_ARG_HANDLE_CHECKED(Name, name, 1);
- PropertyAttributes attributes;
- Handle<Object> result =
- JSObject::GetPropertyWithInterceptor(obj, obj, name, &attributes);
- RETURN_IF_EMPTY_HANDLE(isolate, result);
+ Handle<Object> result;
+ LookupIterator it(obj, name, obj);
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, result, JSObject::GetProperty(&it));
return *result;
}
@@ -10975,42 +11057,40 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugNamedInterceptorPropertyValue) {
// Return element value from indexed interceptor.
// args[0]: object
// args[1]: index
-RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugIndexedInterceptorElementValue) {
+RUNTIME_FUNCTION(Runtime_DebugIndexedInterceptorElementValue) {
HandleScope scope(isolate);
ASSERT(args.length() == 2);
CONVERT_ARG_HANDLE_CHECKED(JSObject, obj, 0);
RUNTIME_ASSERT(obj->HasIndexedInterceptor());
CONVERT_NUMBER_CHECKED(uint32_t, index, Uint32, args[1]);
+ Handle<Object> result;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, result, JSObject::GetElementWithInterceptor(obj, obj, index));
+ return *result;
+}
+
- return obj->GetElementWithInterceptor(*obj, index);
+static bool CheckExecutionState(Isolate* isolate, int break_id) {
+ return !isolate->debug()->debug_context().is_null() &&
+ isolate->debug()->break_id() != 0 &&
+ isolate->debug()->break_id() == break_id;
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_CheckExecutionState) {
+RUNTIME_FUNCTION(Runtime_CheckExecutionState) {
SealHandleScope shs(isolate);
- ASSERT(args.length() >= 1);
+ ASSERT(args.length() == 1);
CONVERT_NUMBER_CHECKED(int, break_id, Int32, args[0]);
- // Check that the break id is valid.
- if (isolate->debug()->break_id() == 0 ||
- break_id != isolate->debug()->break_id()) {
- return isolate->Throw(
- isolate->heap()->illegal_execution_state_string());
- }
-
+ RUNTIME_ASSERT(CheckExecutionState(isolate, break_id));
return isolate->heap()->true_value();
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_GetFrameCount) {
+RUNTIME_FUNCTION(Runtime_GetFrameCount) {
HandleScope scope(isolate);
ASSERT(args.length() == 1);
-
- // Check arguments.
- Object* result;
- { MaybeObject* maybe_result = Runtime_CheckExecutionState(
- RUNTIME_ARGUMENTS(isolate, args));
- if (!maybe_result->ToObject(&result)) return maybe_result;
- }
+ CONVERT_NUMBER_CHECKED(int, break_id, Int32, args[0]);
+ RUNTIME_ASSERT(CheckExecutionState(isolate, break_id));
// Count all frames which are relevant to debugging stack trace.
int n = 0;
@@ -11144,16 +11224,12 @@ static SaveContext* FindSavedContextForFrame(Isolate* isolate,
// Arguments name, value
// Locals name, value
// Return value if any
-RUNTIME_FUNCTION(MaybeObject*, Runtime_GetFrameDetails) {
+RUNTIME_FUNCTION(Runtime_GetFrameDetails) {
HandleScope scope(isolate);
ASSERT(args.length() == 2);
+ CONVERT_NUMBER_CHECKED(int, break_id, Int32, args[0]);
+ RUNTIME_ASSERT(CheckExecutionState(isolate, break_id));
- // Check arguments.
- Object* check;
- { MaybeObject* maybe_check = Runtime_CheckExecutionState(
- RUNTIME_ARGUMENTS(isolate, args));
- if (!maybe_check->ToObject(&check)) return maybe_check;
- }
CONVERT_NUMBER_CHECKED(int, index, Int32, args[1]);
Heap* heap = isolate->heap();
@@ -11201,31 +11277,44 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_GetFrameDetails) {
ASSERT(*scope_info != ScopeInfo::Empty(isolate));
// Get the locals names and values into a temporary array.
- //
- // TODO(1240907): Hide compiler-introduced stack variables
- // (e.g. .result)? For users of the debugger, they will probably be
- // confusing.
+ int local_count = scope_info->LocalCount();
+ for (int slot = 0; slot < scope_info->LocalCount(); ++slot) {
+ // Hide compiler-introduced temporary variables, whether on the stack or on
+ // the context.
+ if (scope_info->LocalIsSynthetic(slot))
+ local_count--;
+ }
+
Handle<FixedArray> locals =
- isolate->factory()->NewFixedArray(scope_info->LocalCount() * 2);
+ isolate->factory()->NewFixedArray(local_count * 2);
// Fill in the values of the locals.
+ int local = 0;
int i = 0;
for (; i < scope_info->StackLocalCount(); ++i) {
// Use the value from the stack.
- locals->set(i * 2, scope_info->LocalName(i));
- locals->set(i * 2 + 1, frame_inspector.GetExpression(i));
+ if (scope_info->LocalIsSynthetic(i))
+ continue;
+ locals->set(local * 2, scope_info->LocalName(i));
+ locals->set(local * 2 + 1, frame_inspector.GetExpression(i));
+ local++;
}
- if (i < scope_info->LocalCount()) {
+ if (local < local_count) {
// Get the context containing declarations.
Handle<Context> context(
Context::cast(it.frame()->context())->declaration_context());
for (; i < scope_info->LocalCount(); ++i) {
+ if (scope_info->LocalIsSynthetic(i))
+ continue;
Handle<String> name(scope_info->LocalName(i));
VariableMode mode;
InitializationFlag init_flag;
- locals->set(i * 2, *name);
- locals->set(i * 2 + 1, context->get(
- scope_info->ContextSlotIndex(*name, &mode, &init_flag)));
+ locals->set(local * 2, *name);
+ int context_slot_index =
+ ScopeInfo::ContextSlotIndex(scope_info, name, &mode, &init_flag);
+ Object* value = context->get(context_slot_index);
+ locals->set(local * 2 + 1, value);
+ local++;
}
}
@@ -11286,7 +11375,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_GetFrameDetails) {
// Calculate the size of the result.
int details_size = kFrameDetailsFirstDynamicIndex +
- 2 * (argument_count + scope_info->LocalCount()) +
+ 2 * (argument_count + local_count) +
(at_return ? 1 : 0);
Handle<FixedArray> details = isolate->factory()->NewFixedArray(details_size);
@@ -11301,7 +11390,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_GetFrameDetails) {
// Add the locals count
details->set(kFrameDetailsLocalCountIndex,
- Smi::FromInt(scope_info->LocalCount()));
+ Smi::FromInt(local_count));
// Add the source position.
if (position != RelocInfo::kNoPosition) {
@@ -11352,7 +11441,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_GetFrameDetails) {
}
// Add locals name and value from the temporary copy from the function frame.
- for (int i = 0; i < scope_info->LocalCount() * 2; i++) {
+ for (int i = 0; i < local_count * 2; i++) {
details->set(details_index++, locals->get(i));
}
@@ -11366,7 +11455,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_GetFrameDetails) {
// THE FRAME ITERATOR TO WRAP THE RECEIVER.
Handle<Object> receiver(it.frame()->receiver(), isolate);
if (!receiver->IsJSObject() &&
- shared->is_classic_mode() &&
+ shared->strict_mode() == SLOPPY &&
!function->IsBuiltin()) {
// If the receiver is not a JSObject and the function is not a
// builtin or strict-mode we have hit an optimization where a
@@ -11375,11 +11464,16 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_GetFrameDetails) {
// by creating correct wrapper object based on the calling frame's
// native context.
it.Advance();
- Handle<Context> calling_frames_native_context(
- Context::cast(Context::cast(it.frame()->context())->native_context()));
- ASSERT(!receiver->IsUndefined() && !receiver->IsNull());
- receiver =
- isolate->factory()->ToObject(receiver, calling_frames_native_context);
+ if (receiver->IsUndefined()) {
+ Context* context = function->context();
+ receiver = handle(context->global_object()->global_receiver());
+ } else {
+ ASSERT(!receiver->IsNull());
+ Context* context = Context::cast(it.frame()->context());
+ Handle<Context> native_context(Context::cast(context->native_context()));
+ receiver = Object::ToObject(
+ isolate, receiver, native_context).ToHandleChecked();
+ }
}
details->set(kFrameDetailsReceiverIndex, *receiver);
@@ -11388,9 +11482,18 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_GetFrameDetails) {
}
+static bool ParameterIsShadowedByContextLocal(Handle<ScopeInfo> info,
+ Handle<String> parameter_name) {
+ VariableMode mode;
+ InitializationFlag flag;
+ return ScopeInfo::ContextSlotIndex(info, parameter_name, &mode, &flag) != -1;
+}
+
+
// Create a plain JSObject which materializes the local scope for the specified
// frame.
-static Handle<JSObject> MaterializeStackLocalsWithFrameInspector(
+MUST_USE_RESULT
+static MaybeHandle<JSObject> MaterializeStackLocalsWithFrameInspector(
Isolate* isolate,
Handle<JSObject> target,
Handle<JSFunction> function,
@@ -11400,38 +11503,34 @@ static Handle<JSObject> MaterializeStackLocalsWithFrameInspector(
// First fill all parameters.
for (int i = 0; i < scope_info->ParameterCount(); ++i) {
+ // Do not materialize the parameter if it is shadowed by a context local.
+ Handle<String> name(scope_info->ParameterName(i));
+ if (ParameterIsShadowedByContextLocal(scope_info, name)) continue;
+
+ HandleScope scope(isolate);
Handle<Object> value(i < frame_inspector->GetParametersCount()
? frame_inspector->GetParameter(i)
: isolate->heap()->undefined_value(),
isolate);
ASSERT(!value->IsTheHole());
- RETURN_IF_EMPTY_HANDLE_VALUE(
+ RETURN_ON_EXCEPTION(
isolate,
- Runtime::SetObjectProperty(isolate,
- target,
- Handle<String>(scope_info->ParameterName(i)),
- value,
- NONE,
- kNonStrictMode),
- Handle<JSObject>());
+ Runtime::SetObjectProperty(isolate, target, name, value, NONE, SLOPPY),
+ JSObject);
}
// Second fill all stack locals.
for (int i = 0; i < scope_info->StackLocalCount(); ++i) {
+ if (scope_info->LocalIsSynthetic(i)) continue;
+ Handle<String> name(scope_info->StackLocalName(i));
Handle<Object> value(frame_inspector->GetExpression(i), isolate);
if (value->IsTheHole()) continue;
- RETURN_IF_EMPTY_HANDLE_VALUE(
+ RETURN_ON_EXCEPTION(
isolate,
- Runtime::SetObjectProperty(
- isolate,
- target,
- Handle<String>(scope_info->StackLocalName(i)),
- value,
- NONE,
- kNonStrictMode),
- Handle<JSObject>());
+ Runtime::SetObjectProperty(isolate, target, name, value, NONE, SLOPPY),
+ JSObject);
}
return target;
@@ -11455,28 +11554,35 @@ static void UpdateStackLocalsFromMaterializedObject(Isolate* isolate,
// Parameters.
for (int i = 0; i < scope_info->ParameterCount(); ++i) {
+ // Shadowed parameters were not materialized.
+ Handle<String> name(scope_info->ParameterName(i));
+ if (ParameterIsShadowedByContextLocal(scope_info, name)) continue;
+
ASSERT(!frame->GetParameter(i)->IsTheHole());
HandleScope scope(isolate);
- Handle<Object> value = GetProperty(
- isolate, target, Handle<String>(scope_info->ParameterName(i)));
+ Handle<Object> value =
+ Object::GetPropertyOrElement(target, name).ToHandleChecked();
frame->SetParameterValue(i, *value);
}
// Stack locals.
for (int i = 0; i < scope_info->StackLocalCount(); ++i) {
+ if (scope_info->LocalIsSynthetic(i)) continue;
if (frame->GetExpression(i)->IsTheHole()) continue;
HandleScope scope(isolate);
- Handle<Object> value = GetProperty(
- isolate, target, Handle<String>(scope_info->StackLocalName(i)));
+ Handle<Object> value = Object::GetPropertyOrElement(
+ target,
+ handle(scope_info->StackLocalName(i), isolate)).ToHandleChecked();
frame->SetExpression(i, *value);
}
}
-static Handle<JSObject> MaterializeLocalContext(Isolate* isolate,
- Handle<JSObject> target,
- Handle<JSFunction> function,
- JavaScriptFrame* frame) {
+MUST_USE_RESULT static MaybeHandle<JSObject> MaterializeLocalContext(
+ Isolate* isolate,
+ Handle<JSObject> target,
+ Handle<JSFunction> function,
+ JavaScriptFrame* frame) {
HandleScope scope(isolate);
Handle<SharedFunctionInfo> shared(function->shared());
Handle<ScopeInfo> scope_info(shared->scope_info());
@@ -11488,7 +11594,7 @@ static Handle<JSObject> MaterializeLocalContext(Isolate* isolate,
Handle<Context> function_context(frame_context->declaration_context());
if (!ScopeInfo::CopyContextLocalsToScopeObject(
scope_info, function_context, target)) {
- return Handle<JSObject>();
+ return MaybeHandle<JSObject>();
}
// Finally copy any properties from the function context extension.
@@ -11497,24 +11603,24 @@ static Handle<JSObject> MaterializeLocalContext(Isolate* isolate,
if (function_context->has_extension() &&
!function_context->IsNativeContext()) {
Handle<JSObject> ext(JSObject::cast(function_context->extension()));
- bool threw = false;
- Handle<FixedArray> keys =
- GetKeysInFixedArrayFor(ext, INCLUDE_PROTOS, &threw);
- if (threw) return Handle<JSObject>();
+ Handle<FixedArray> keys;
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, keys,
+ JSReceiver::GetKeys(ext, JSReceiver::INCLUDE_PROTOS),
+ JSObject);
for (int i = 0; i < keys->length(); i++) {
// Names of variables introduced by eval are strings.
ASSERT(keys->get(i)->IsString());
Handle<String> key(String::cast(keys->get(i)));
- RETURN_IF_EMPTY_HANDLE_VALUE(
+ Handle<Object> value;
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, value, Object::GetPropertyOrElement(ext, key), JSObject);
+ RETURN_ON_EXCEPTION(
isolate,
- Runtime::SetObjectProperty(isolate,
- target,
- key,
- GetProperty(isolate, ext, key),
- NONE,
- kNonStrictMode),
- Handle<JSObject>());
+ Runtime::SetObjectProperty(
+ isolate, target, key, value, NONE, SLOPPY),
+ JSObject);
}
}
}
@@ -11523,7 +11629,7 @@ static Handle<JSObject> MaterializeLocalContext(Isolate* isolate,
}
-static Handle<JSObject> MaterializeLocalScope(
+MUST_USE_RESULT static MaybeHandle<JSObject> MaterializeLocalScope(
Isolate* isolate,
JavaScriptFrame* frame,
int inlined_jsframe_index) {
@@ -11532,9 +11638,11 @@ static Handle<JSObject> MaterializeLocalScope(
Handle<JSObject> local_scope =
isolate->factory()->NewJSObject(isolate->object_function());
- local_scope = MaterializeStackLocalsWithFrameInspector(
- isolate, local_scope, function, &frame_inspector);
- RETURN_IF_EMPTY_HANDLE_VALUE(isolate, local_scope, Handle<JSObject>());
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, local_scope,
+ MaterializeStackLocalsWithFrameInspector(
+ isolate, local_scope, function, &frame_inspector),
+ JSObject);
return MaterializeLocalContext(isolate, local_scope, function, frame);
}
@@ -11548,11 +11656,11 @@ static bool SetContextLocalValue(Isolate* isolate,
Handle<Object> new_value) {
for (int i = 0; i < scope_info->ContextLocalCount(); i++) {
Handle<String> next_name(scope_info->ContextLocalName(i));
- if (variable_name->Equals(*next_name)) {
+ if (String::Equals(variable_name, next_name)) {
VariableMode mode;
InitializationFlag init_flag;
int context_index =
- scope_info->ContextSlotIndex(*next_name, &mode, &init_flag);
+ ScopeInfo::ContextSlotIndex(scope_info, next_name, &mode, &init_flag);
context->set(context_index, *new_value);
return true;
}
@@ -11580,7 +11688,8 @@ static bool SetLocalVariableValue(Isolate* isolate,
// Parameters.
for (int i = 0; i < scope_info->ParameterCount(); ++i) {
- if (scope_info->ParameterName(i)->Equals(*variable_name)) {
+ HandleScope scope(isolate);
+ if (String::Equals(handle(scope_info->ParameterName(i)), variable_name)) {
frame->SetParameterValue(i, *new_value);
// Argument might be shadowed in heap context, don't stop here.
default_result = true;
@@ -11589,7 +11698,8 @@ static bool SetLocalVariableValue(Isolate* isolate,
// Stack locals.
for (int i = 0; i < scope_info->StackLocalCount(); ++i) {
- if (scope_info->StackLocalName(i)->Equals(*variable_name)) {
+ HandleScope scope(isolate);
+ if (String::Equals(handle(scope_info->StackLocalName(i)), variable_name)) {
frame->SetExpression(i, *new_value);
return true;
}
@@ -11614,8 +11724,7 @@ static bool SetLocalVariableValue(Isolate* isolate,
// We don't expect this to do anything except replacing
// property value.
Runtime::SetObjectProperty(isolate, ext, variable_name, new_value,
- NONE,
- kNonStrictMode);
+ NONE, SLOPPY).Assert();
return true;
}
}
@@ -11628,8 +11737,9 @@ static bool SetLocalVariableValue(Isolate* isolate,
// Create a plain JSObject which materializes the closure content for the
// context.
-static Handle<JSObject> MaterializeClosure(Isolate* isolate,
- Handle<Context> context) {
+MUST_USE_RESULT static MaybeHandle<JSObject> MaterializeClosure(
+ Isolate* isolate,
+ Handle<Context> context) {
ASSERT(context->IsFunctionContext());
Handle<SharedFunctionInfo> shared(context->closure()->shared());
@@ -11643,29 +11753,31 @@ static Handle<JSObject> MaterializeClosure(Isolate* isolate,
// Fill all context locals to the context extension.
if (!ScopeInfo::CopyContextLocalsToScopeObject(
scope_info, context, closure_scope)) {
- return Handle<JSObject>();
+ return MaybeHandle<JSObject>();
}
// Finally copy any properties from the function context extension. This will
// be variables introduced by eval.
if (context->has_extension()) {
Handle<JSObject> ext(JSObject::cast(context->extension()));
- bool threw = false;
- Handle<FixedArray> keys =
- GetKeysInFixedArrayFor(ext, INCLUDE_PROTOS, &threw);
- if (threw) return Handle<JSObject>();
+ Handle<FixedArray> keys;
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, keys,
+ JSReceiver::GetKeys(ext, JSReceiver::INCLUDE_PROTOS), JSObject);
for (int i = 0; i < keys->length(); i++) {
+ HandleScope scope(isolate);
// Names of variables introduced by eval are strings.
ASSERT(keys->get(i)->IsString());
Handle<String> key(String::cast(keys->get(i)));
- RETURN_IF_EMPTY_HANDLE_VALUE(
+ Handle<Object> value;
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, value, Object::GetPropertyOrElement(ext, key), JSObject);
+ RETURN_ON_EXCEPTION(
isolate,
- Runtime::SetObjectProperty(isolate, closure_scope, key,
- GetProperty(isolate, ext, key),
- NONE,
- kNonStrictMode),
- Handle<JSObject>());
+ Runtime::SetObjectProperty(
+ isolate, closure_scope, key, value, NONE, SLOPPY),
+ JSObject);
}
}
@@ -11696,8 +11808,7 @@ static bool SetClosureVariableValue(Isolate* isolate,
if (JSReceiver::HasProperty(ext, variable_name)) {
// We don't expect this to do anything except replacing property value.
Runtime::SetObjectProperty(isolate, ext, variable_name, new_value,
- NONE,
- kNonStrictMode);
+ NONE, SLOPPY).Assert();
return true;
}
}
@@ -11708,20 +11819,20 @@ static bool SetClosureVariableValue(Isolate* isolate,
// Create a plain JSObject which materializes the scope for the specified
// catch context.
-static Handle<JSObject> MaterializeCatchScope(Isolate* isolate,
- Handle<Context> context) {
+MUST_USE_RESULT static MaybeHandle<JSObject> MaterializeCatchScope(
+ Isolate* isolate,
+ Handle<Context> context) {
ASSERT(context->IsCatchContext());
Handle<String> name(String::cast(context->extension()));
Handle<Object> thrown_object(context->get(Context::THROWN_OBJECT_INDEX),
isolate);
Handle<JSObject> catch_scope =
isolate->factory()->NewJSObject(isolate->object_function());
- RETURN_IF_EMPTY_HANDLE_VALUE(
+ RETURN_ON_EXCEPTION(
isolate,
Runtime::SetObjectProperty(isolate, catch_scope, name, thrown_object,
- NONE,
- kNonStrictMode),
- Handle<JSObject>());
+ NONE, SLOPPY),
+ JSObject);
return catch_scope;
}
@@ -11732,7 +11843,7 @@ static bool SetCatchVariableValue(Isolate* isolate,
Handle<Object> new_value) {
ASSERT(context->IsCatchContext());
Handle<String> name(String::cast(context->extension()));
- if (!name->Equals(*variable_name)) {
+ if (!String::Equals(name, variable_name)) {
return false;
}
context->set(Context::THROWN_OBJECT_INDEX, *new_value);
@@ -11742,7 +11853,7 @@ static bool SetCatchVariableValue(Isolate* isolate,
// Create a plain JSObject which materializes the block scope for the specified
// block context.
-static Handle<JSObject> MaterializeBlockScope(
+MUST_USE_RESULT static MaybeHandle<JSObject> MaterializeBlockScope(
Isolate* isolate,
Handle<Context> context) {
ASSERT(context->IsBlockContext());
@@ -11756,7 +11867,7 @@ static Handle<JSObject> MaterializeBlockScope(
// Fill all context locals.
if (!ScopeInfo::CopyContextLocalsToScopeObject(
scope_info, context, block_scope)) {
- return Handle<JSObject>();
+ return MaybeHandle<JSObject>();
}
return block_scope;
@@ -11765,7 +11876,7 @@ static Handle<JSObject> MaterializeBlockScope(
// Create a plain JSObject which materializes the module scope for the specified
// module context.
-static Handle<JSObject> MaterializeModuleScope(
+MUST_USE_RESULT static MaybeHandle<JSObject> MaterializeModuleScope(
Isolate* isolate,
Handle<Context> context) {
ASSERT(context->IsModuleContext());
@@ -11779,7 +11890,7 @@ static Handle<JSObject> MaterializeModuleScope(
// Fill all context locals.
if (!ScopeInfo::CopyContextLocalsToScopeObject(
scope_info, context, module_scope)) {
- return Handle<JSObject>();
+ return MaybeHandle<JSObject>();
}
return module_scope;
@@ -11804,7 +11915,8 @@ class ScopeIterator {
ScopeIterator(Isolate* isolate,
JavaScriptFrame* frame,
- int inlined_jsframe_index)
+ int inlined_jsframe_index,
+ bool ignore_nested_scopes = false)
: isolate_(isolate),
frame_(frame),
inlined_jsframe_index_(inlined_jsframe_index),
@@ -11828,19 +11940,31 @@ class ScopeIterator {
// Return if ensuring debug info failed.
return;
}
- Handle<DebugInfo> debug_info = Debug::GetDebugInfo(shared_info);
- // Find the break point where execution has stopped.
- BreakLocationIterator break_location_iterator(debug_info,
- ALL_BREAK_LOCATIONS);
- // pc points to the instruction after the current one, possibly a break
- // location as well. So the "- 1" to exclude it from the search.
- break_location_iterator.FindBreakLocationFromAddress(frame->pc() - 1);
- if (break_location_iterator.IsExit()) {
- // We are within the return sequence. At the momemt it is not possible to
+ // Currently it takes too much time to find nested scopes due to script
+ // parsing. Sometimes we want to run the ScopeIterator as fast as possible
+ // (for example, while collecting async call stacks on every
+ // addEventListener call), even if we drop some nested scopes.
+ // Later we may optimize getting the nested scopes (cache the result?)
+ // and include nested scopes into the "fast" iteration case as well.
+ if (!ignore_nested_scopes) {
+ Handle<DebugInfo> debug_info = Debug::GetDebugInfo(shared_info);
+
+ // Find the break point where execution has stopped.
+ BreakLocationIterator break_location_iterator(debug_info,
+ ALL_BREAK_LOCATIONS);
+ // pc points to the instruction after the current one, possibly a break
+ // location as well. So the "- 1" to exclude it from the search.
+ break_location_iterator.FindBreakLocationFromAddress(frame->pc() - 1);
+
+ // Within the return sequence at the moment it is not possible to
// get a source position which is consistent with the current scope chain.
// Thus all nested with, catch and block contexts are skipped and we only
// provide the function scope.
+ ignore_nested_scopes = break_location_iterator.IsExit();
+ }
+
+ if (ignore_nested_scopes) {
if (scope_info->HasContext()) {
context_ = Handle<Context>(context_->declaration_context(), isolate_);
} else {
@@ -11848,7 +11972,7 @@ class ScopeIterator {
context_ = Handle<Context>(context_->previous(), isolate_);
}
}
- if (scope_info->scope_type() != EVAL_SCOPE) {
+ if (scope_info->scope_type() == FUNCTION_SCOPE) {
nested_scope_chain_.Add(scope_info);
}
} else {
@@ -11976,7 +12100,7 @@ class ScopeIterator {
}
// Return the JavaScript object with the content of the current scope.
- Handle<JSObject> ScopeObject() {
+ MaybeHandle<JSObject> ScopeObject() {
ASSERT(!failed_);
switch (Type()) {
case ScopeIterator::ScopeTypeGlobal:
@@ -12139,16 +12263,12 @@ class ScopeIterator {
};
-RUNTIME_FUNCTION(MaybeObject*, Runtime_GetScopeCount) {
+RUNTIME_FUNCTION(Runtime_GetScopeCount) {
HandleScope scope(isolate);
ASSERT(args.length() == 2);
+ CONVERT_NUMBER_CHECKED(int, break_id, Int32, args[0]);
+ RUNTIME_ASSERT(CheckExecutionState(isolate, break_id));
- // Check arguments.
- Object* check;
- { MaybeObject* maybe_check = Runtime_CheckExecutionState(
- RUNTIME_ARGUMENTS(isolate, args));
- if (!maybe_check->ToObject(&check)) return maybe_check;
- }
CONVERT_SMI_ARG_CHECKED(wrapped_id, 1);
// Get the frame where the debugging is performed.
@@ -12171,16 +12291,12 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_GetScopeCount) {
// Returns the list of step-in positions (text offset) in a function of the
// stack frame in a range from the current debug break position to the end
// of the corresponding statement.
-RUNTIME_FUNCTION(MaybeObject*, Runtime_GetStepInPositions) {
+RUNTIME_FUNCTION(Runtime_GetStepInPositions) {
HandleScope scope(isolate);
ASSERT(args.length() == 2);
+ CONVERT_NUMBER_CHECKED(int, break_id, Int32, args[0]);
+ RUNTIME_ASSERT(CheckExecutionState(isolate, break_id));
- // Check arguments.
- Object* check;
- { MaybeObject* maybe_check = Runtime_CheckExecutionState(
- RUNTIME_ARGUMENTS(isolate, args));
- if (!maybe_check->ToObject(&check)) return maybe_check;
- }
CONVERT_SMI_ARG_CHECKED(wrapped_id, 1);
// Get the frame where the debugging is performed.
@@ -12231,9 +12347,11 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_GetStepInPositions) {
if (accept) {
if (break_location_iterator.IsStepInLocation(isolate)) {
Smi* position_value = Smi::FromInt(break_location_iterator.position());
- JSObject::SetElement(array, len,
- Handle<Object>(position_value, isolate),
- NONE, kNonStrictMode);
+ RETURN_FAILURE_ON_EXCEPTION(
+ isolate,
+ JSObject::SetElement(array, len,
+ Handle<Object>(position_value, isolate),
+ NONE, SLOPPY));
len++;
}
}
@@ -12253,7 +12371,8 @@ static const int kScopeDetailsObjectIndex = 1;
static const int kScopeDetailsSize = 2;
-static MaybeObject* MaterializeScopeDetails(Isolate* isolate,
+MUST_USE_RESULT static MaybeHandle<JSObject> MaterializeScopeDetails(
+ Isolate* isolate,
ScopeIterator* it) {
// Calculate the size of the result.
int details_size = kScopeDetailsSize;
@@ -12261,11 +12380,12 @@ static MaybeObject* MaterializeScopeDetails(Isolate* isolate,
// Fill in scope details.
details->set(kScopeDetailsTypeIndex, Smi::FromInt(it->Type()));
- Handle<JSObject> scope_object = it->ScopeObject();
- RETURN_IF_EMPTY_HANDLE(isolate, scope_object);
+ Handle<JSObject> scope_object;
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, scope_object, it->ScopeObject(), JSObject);
details->set(kScopeDetailsObjectIndex, *scope_object);
- return *isolate->factory()->NewJSArrayWithElements(details);
+ return isolate->factory()->NewJSArrayWithElements(details);
}
@@ -12278,16 +12398,12 @@ static MaybeObject* MaterializeScopeDetails(Isolate* isolate,
// The array returned contains the following information:
// 0: Scope type
// 1: Scope object
-RUNTIME_FUNCTION(MaybeObject*, Runtime_GetScopeDetails) {
+RUNTIME_FUNCTION(Runtime_GetScopeDetails) {
HandleScope scope(isolate);
ASSERT(args.length() == 4);
+ CONVERT_NUMBER_CHECKED(int, break_id, Int32, args[0]);
+ RUNTIME_ASSERT(CheckExecutionState(isolate, break_id));
- // Check arguments.
- Object* check;
- { MaybeObject* maybe_check = Runtime_CheckExecutionState(
- RUNTIME_ARGUMENTS(isolate, args));
- if (!maybe_check->ToObject(&check)) return maybe_check;
- }
CONVERT_SMI_ARG_CHECKED(wrapped_id, 1);
CONVERT_NUMBER_CHECKED(int, inlined_jsframe_index, Int32, args[2]);
CONVERT_NUMBER_CHECKED(int, index, Int32, args[3]);
@@ -12306,11 +12422,60 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_GetScopeDetails) {
if (it.Done()) {
return isolate->heap()->undefined_value();
}
- return MaterializeScopeDetails(isolate, &it);
+ Handle<JSObject> details;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, details, MaterializeScopeDetails(isolate, &it));
+ return *details;
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_GetFunctionScopeCount) {
+// Return an array of scope details
+// args[0]: number: break id
+// args[1]: number: frame index
+// args[2]: number: inlined frame index
+// args[3]: boolean: ignore nested scopes
+//
+// The array returned contains arrays with the following information:
+// 0: Scope type
+// 1: Scope object
+RUNTIME_FUNCTION(Runtime_GetAllScopesDetails) {
+ HandleScope scope(isolate);
+ ASSERT(args.length() == 3 || args.length() == 4);
+ CONVERT_NUMBER_CHECKED(int, break_id, Int32, args[0]);
+ RUNTIME_ASSERT(CheckExecutionState(isolate, break_id));
+
+ CONVERT_SMI_ARG_CHECKED(wrapped_id, 1);
+ CONVERT_NUMBER_CHECKED(int, inlined_jsframe_index, Int32, args[2]);
+
+ bool ignore_nested_scopes = false;
+ if (args.length() == 4) {
+ CONVERT_BOOLEAN_ARG_CHECKED(flag, 3);
+ ignore_nested_scopes = flag;
+ }
+
+ // Get the frame where the debugging is performed.
+ StackFrame::Id id = UnwrapFrameId(wrapped_id);
+ JavaScriptFrameIterator frame_it(isolate, id);
+ JavaScriptFrame* frame = frame_it.frame();
+
+ List<Handle<JSObject> > result(4);
+ ScopeIterator it(isolate, frame, inlined_jsframe_index, ignore_nested_scopes);
+ for (; !it.Done(); it.Next()) {
+ Handle<JSObject> details;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, details, MaterializeScopeDetails(isolate, &it));
+ result.Add(details);
+ }
+
+ Handle<FixedArray> array = isolate->factory()->NewFixedArray(result.length());
+ for (int i = 0; i < result.length(); ++i) {
+ array->set(i, *result[i]);
+ }
+ return *isolate->factory()->NewJSArrayWithElements(array);
+}
+
+
+RUNTIME_FUNCTION(Runtime_GetFunctionScopeCount) {
HandleScope scope(isolate);
ASSERT(args.length() == 1);
@@ -12327,7 +12492,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_GetFunctionScopeCount) {
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_GetFunctionScopeDetails) {
+RUNTIME_FUNCTION(Runtime_GetFunctionScopeDetails) {
HandleScope scope(isolate);
ASSERT(args.length() == 2);
@@ -12345,7 +12510,10 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_GetFunctionScopeDetails) {
return isolate->heap()->undefined_value();
}
- return MaterializeScopeDetails(isolate, &it);
+ Handle<JSObject> details;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, details, MaterializeScopeDetails(isolate, &it));
+ return *details;
}
@@ -12371,22 +12539,20 @@ static bool SetScopeVariableValue(ScopeIterator* it, int index,
// args[5]: object: new value
//
// Return true if success and false otherwise
-RUNTIME_FUNCTION(MaybeObject*, Runtime_SetScopeVariableValue) {
+RUNTIME_FUNCTION(Runtime_SetScopeVariableValue) {
HandleScope scope(isolate);
ASSERT(args.length() == 6);
// Check arguments.
CONVERT_NUMBER_CHECKED(int, index, Int32, args[3]);
CONVERT_ARG_HANDLE_CHECKED(String, variable_name, 4);
- Handle<Object> new_value = args.at<Object>(5);
+ CONVERT_ARG_HANDLE_CHECKED(Object, new_value, 5);
bool res;
if (args[0]->IsNumber()) {
- Object* check;
- { MaybeObject* maybe_check = Runtime_CheckExecutionState(
- RUNTIME_ARGUMENTS(isolate, args));
- if (!maybe_check->ToObject(&check)) return maybe_check;
- }
+ CONVERT_NUMBER_CHECKED(int, break_id, Int32, args[0]);
+ RUNTIME_ASSERT(CheckExecutionState(isolate, break_id));
+
CONVERT_SMI_ARG_CHECKED(wrapped_id, 1);
CONVERT_NUMBER_CHECKED(int, inlined_jsframe_index, Int32, args[2]);
@@ -12407,7 +12573,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_SetScopeVariableValue) {
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugPrintScopes) {
+RUNTIME_FUNCTION(Runtime_DebugPrintScopes) {
HandleScope scope(isolate);
ASSERT(args.length() == 0);
@@ -12425,16 +12591,11 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugPrintScopes) {
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_GetThreadCount) {
+RUNTIME_FUNCTION(Runtime_GetThreadCount) {
HandleScope scope(isolate);
ASSERT(args.length() == 1);
-
- // Check arguments.
- Object* result;
- { MaybeObject* maybe_result = Runtime_CheckExecutionState(
- RUNTIME_ARGUMENTS(isolate, args));
- if (!maybe_result->ToObject(&result)) return maybe_result;
- }
+ CONVERT_NUMBER_CHECKED(int, break_id, Int32, args[0]);
+ RUNTIME_ASSERT(CheckExecutionState(isolate, break_id));
// Count all archived V8 threads.
int n = 0;
@@ -12461,16 +12622,12 @@ static const int kThreadDetailsSize = 2;
// The array returned contains the following information:
// 0: Is current thread?
// 1: Thread id
-RUNTIME_FUNCTION(MaybeObject*, Runtime_GetThreadDetails) {
+RUNTIME_FUNCTION(Runtime_GetThreadDetails) {
HandleScope scope(isolate);
ASSERT(args.length() == 2);
+ CONVERT_NUMBER_CHECKED(int, break_id, Int32, args[0]);
+ RUNTIME_ASSERT(CheckExecutionState(isolate, break_id));
- // Check arguments.
- Object* check;
- { MaybeObject* maybe_check = Runtime_CheckExecutionState(
- RUNTIME_ARGUMENTS(isolate, args));
- if (!maybe_check->ToObject(&check)) return maybe_check;
- }
CONVERT_NUMBER_CHECKED(int, index, Int32, args[1]);
// Allocate array for result.
@@ -12511,7 +12668,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_GetThreadDetails) {
// Sets the disable break state
// args[0]: disable break state
-RUNTIME_FUNCTION(MaybeObject*, Runtime_SetDisableBreak) {
+RUNTIME_FUNCTION(Runtime_SetDisableBreak) {
HandleScope scope(isolate);
ASSERT(args.length() == 1);
CONVERT_BOOLEAN_ARG_CHECKED(disable_break, 0);
@@ -12525,7 +12682,7 @@ static bool IsPositionAlignmentCodeCorrect(int alignment) {
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_GetBreakLocations) {
+RUNTIME_FUNCTION(Runtime_GetBreakLocations) {
HandleScope scope(isolate);
ASSERT(args.length() == 2);
@@ -12553,17 +12710,18 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_GetBreakLocations) {
// args[0]: function
// args[1]: number: break source position (within the function source)
// args[2]: number: break point object
-RUNTIME_FUNCTION(MaybeObject*, Runtime_SetFunctionBreakPoint) {
+RUNTIME_FUNCTION(Runtime_SetFunctionBreakPoint) {
HandleScope scope(isolate);
ASSERT(args.length() == 3);
CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 0);
CONVERT_NUMBER_CHECKED(int32_t, source_position, Int32, args[1]);
- RUNTIME_ASSERT(source_position >= 0);
- Handle<Object> break_point_object_arg = args.at<Object>(2);
+ RUNTIME_ASSERT(source_position >= function->shared()->start_position() &&
+ source_position <= function->shared()->end_position());
+ CONVERT_ARG_HANDLE_CHECKED(Object, break_point_object_arg, 2);
// Set break point.
- isolate->debug()->SetBreakPoint(function, break_point_object_arg,
- &source_position);
+ RUNTIME_ASSERT(isolate->debug()->SetBreakPoint(
+ function, break_point_object_arg, &source_position));
return Smi::FromInt(source_position);
}
@@ -12576,14 +12734,14 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_SetFunctionBreakPoint) {
// args[1]: number: break source position (within the script source)
// args[2]: number, breakpoint position alignment
// args[3]: number: break point object
-RUNTIME_FUNCTION(MaybeObject*, Runtime_SetScriptBreakPoint) {
+RUNTIME_FUNCTION(Runtime_SetScriptBreakPoint) {
HandleScope scope(isolate);
ASSERT(args.length() == 4);
CONVERT_ARG_HANDLE_CHECKED(JSValue, wrapper, 0);
CONVERT_NUMBER_CHECKED(int32_t, source_position, Int32, args[1]);
RUNTIME_ASSERT(source_position >= 0);
CONVERT_NUMBER_CHECKED(int32_t, statement_aligned_code, Int32, args[2]);
- Handle<Object> break_point_object_arg = args.at<Object>(3);
+ CONVERT_ARG_HANDLE_CHECKED(Object, break_point_object_arg, 3);
if (!IsPositionAlignmentCodeCorrect(statement_aligned_code)) {
return isolate->ThrowIllegalOperation();
@@ -12599,7 +12757,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_SetScriptBreakPoint) {
if (!isolate->debug()->SetBreakPointForScript(script, break_point_object_arg,
&source_position,
alignment)) {
- return isolate->heap()->undefined_value();
+ return isolate->heap()->undefined_value();
}
return Smi::FromInt(source_position);
@@ -12608,10 +12766,10 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_SetScriptBreakPoint) {
// Clear a break point
// args[0]: number: break point object
-RUNTIME_FUNCTION(MaybeObject*, Runtime_ClearBreakPoint) {
+RUNTIME_FUNCTION(Runtime_ClearBreakPoint) {
HandleScope scope(isolate);
ASSERT(args.length() == 1);
- Handle<Object> break_point_object_arg = args.at<Object>(0);
+ CONVERT_ARG_HANDLE_CHECKED(Object, break_point_object_arg, 0);
// Clear break point.
isolate->debug()->ClearBreakPoint(break_point_object_arg);
@@ -12623,16 +12781,15 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_ClearBreakPoint) {
// Change the state of break on exceptions.
// args[0]: Enum value indicating whether to affect caught/uncaught exceptions.
// args[1]: Boolean indicating on/off.
-RUNTIME_FUNCTION(MaybeObject*, Runtime_ChangeBreakOnException) {
+RUNTIME_FUNCTION(Runtime_ChangeBreakOnException) {
HandleScope scope(isolate);
ASSERT(args.length() == 2);
- RUNTIME_ASSERT(args[0]->IsNumber());
+ CONVERT_NUMBER_CHECKED(uint32_t, type_arg, Uint32, args[0]);
CONVERT_BOOLEAN_ARG_CHECKED(enable, 1);
// If the number doesn't match an enum value, the ChangeBreakOnException
// function will default to affecting caught exceptions.
- ExceptionBreakType type =
- static_cast<ExceptionBreakType>(NumberToUint32(args[0]));
+ ExceptionBreakType type = static_cast<ExceptionBreakType>(type_arg);
// Update break point state.
isolate->debug()->ChangeBreakOnException(type, enable);
return isolate->heap()->undefined_value();
@@ -12641,13 +12798,12 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_ChangeBreakOnException) {
// Returns the state of break on exceptions
// args[0]: boolean indicating uncaught exceptions
-RUNTIME_FUNCTION(MaybeObject*, Runtime_IsBreakOnException) {
+RUNTIME_FUNCTION(Runtime_IsBreakOnException) {
HandleScope scope(isolate);
ASSERT(args.length() == 1);
- RUNTIME_ASSERT(args[0]->IsNumber());
+ CONVERT_NUMBER_CHECKED(uint32_t, type_arg, Uint32, args[0]);
- ExceptionBreakType type =
- static_cast<ExceptionBreakType>(NumberToUint32(args[0]));
+ ExceptionBreakType type = static_cast<ExceptionBreakType>(type_arg);
bool result = isolate->debug()->IsBreakOnException(type);
return Smi::FromInt(result);
}
@@ -12658,15 +12814,12 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_IsBreakOnException) {
// args[1]: step action from the enumeration StepAction
// args[2]: number of times to perform the step, for step out it is the number
// of frames to step down.
-RUNTIME_FUNCTION(MaybeObject*, Runtime_PrepareStep) {
+RUNTIME_FUNCTION(Runtime_PrepareStep) {
HandleScope scope(isolate);
ASSERT(args.length() == 4);
- // Check arguments.
- Object* check;
- { MaybeObject* maybe_check = Runtime_CheckExecutionState(
- RUNTIME_ARGUMENTS(isolate, args));
- if (!maybe_check->ToObject(&check)) return maybe_check;
- }
+ CONVERT_NUMBER_CHECKED(int, break_id, Int32, args[0]);
+ RUNTIME_ASSERT(CheckExecutionState(isolate, break_id));
+
if (!args[1]->IsNumber() || !args[2]->IsNumber()) {
return isolate->Throw(isolate->heap()->illegal_argument_string());
}
@@ -12713,7 +12866,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_PrepareStep) {
// Clear all stepping set by PrepareStep.
-RUNTIME_FUNCTION(MaybeObject*, Runtime_ClearStepping) {
+RUNTIME_FUNCTION(Runtime_ClearStepping) {
HandleScope scope(isolate);
ASSERT(args.length() == 0);
isolate->debug()->ClearStepping();
@@ -12723,59 +12876,58 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_ClearStepping) {
// Helper function to find or create the arguments object for
// Runtime_DebugEvaluate.
-static Handle<JSObject> MaterializeArgumentsObject(
+MUST_USE_RESULT static MaybeHandle<JSObject> MaterializeArgumentsObject(
Isolate* isolate,
Handle<JSObject> target,
Handle<JSFunction> function) {
// Do not materialize the arguments object for eval or top-level code.
// Skip if "arguments" is already taken.
if (!function->shared()->is_function() ||
- JSReceiver::HasLocalProperty(target,
- isolate->factory()->arguments_string())) {
+ JSReceiver::HasOwnProperty(
+ target, isolate->factory()->arguments_string())) {
return target;
}
// FunctionGetArguments can't throw an exception.
Handle<JSObject> arguments = Handle<JSObject>::cast(
Accessors::FunctionGetArguments(function));
- Runtime::SetObjectProperty(isolate, target,
- isolate->factory()->arguments_string(),
- arguments,
- ::NONE,
- kNonStrictMode);
+ Handle<String> arguments_str = isolate->factory()->arguments_string();
+ RETURN_ON_EXCEPTION(
+ isolate,
+ Runtime::SetObjectProperty(
+ isolate, target, arguments_str, arguments, ::NONE, SLOPPY),
+ JSObject);
return target;
}
// Compile and evaluate source for the given context.
-static MaybeObject* DebugEvaluate(Isolate* isolate,
- Handle<Context> context,
- Handle<Object> context_extension,
- Handle<Object> receiver,
- Handle<String> source) {
+static MaybeHandle<Object> DebugEvaluate(Isolate* isolate,
+ Handle<Context> context,
+ Handle<Object> context_extension,
+ Handle<Object> receiver,
+ Handle<String> source) {
if (context_extension->IsJSObject()) {
Handle<JSObject> extension = Handle<JSObject>::cast(context_extension);
Handle<JSFunction> closure(context->closure(), isolate);
context = isolate->factory()->NewWithContext(closure, context, extension);
}
- Handle<SharedFunctionInfo> shared = Compiler::CompileEval(
- source,
- context,
- context->IsNativeContext(),
- CLASSIC_MODE,
- NO_PARSE_RESTRICTION,
- RelocInfo::kNoPosition);
- RETURN_IF_EMPTY_HANDLE(isolate, shared);
-
- Handle<JSFunction> eval_fun =
- isolate->factory()->NewFunctionFromSharedFunctionInfo(
- shared, context, NOT_TENURED);
- bool pending_exception;
- Handle<Object> result = Execution::Call(
- isolate, eval_fun, receiver, 0, NULL, &pending_exception);
+ Handle<JSFunction> eval_fun;
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, eval_fun,
+ Compiler::GetFunctionFromEval(source,
+ context,
+ SLOPPY,
+ NO_PARSE_RESTRICTION,
+ RelocInfo::kNoPosition),
+ Object);
- if (pending_exception) return Failure::Exception();
+ Handle<Object> result;
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, result,
+ Execution::Call(isolate, eval_fun, receiver, 0, NULL),
+ Object);
// Skip the global proxy as it has no properties and always delegates to the
// real global object.
@@ -12785,7 +12937,7 @@ static MaybeObject* DebugEvaluate(Isolate* isolate,
// Clear the oneshot breakpoints so that the debugger does not step further.
isolate->debug()->ClearStepping();
- return *result;
+ return result;
}
@@ -12794,25 +12946,23 @@ static MaybeObject* DebugEvaluate(Isolate* isolate,
// - Parameters and stack-allocated locals need to be materialized. Altered
// values need to be written back to the stack afterwards.
// - The arguments object needs to materialized.
-RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugEvaluate) {
+RUNTIME_FUNCTION(Runtime_DebugEvaluate) {
HandleScope scope(isolate);
// Check the execution state and decode arguments frame and source to be
// evaluated.
ASSERT(args.length() == 6);
- Object* check_result;
- { MaybeObject* maybe_result = Runtime_CheckExecutionState(
- RUNTIME_ARGUMENTS(isolate, args));
- if (!maybe_result->ToObject(&check_result)) return maybe_result;
- }
+ CONVERT_NUMBER_CHECKED(int, break_id, Int32, args[0]);
+ RUNTIME_ASSERT(CheckExecutionState(isolate, break_id));
+
CONVERT_SMI_ARG_CHECKED(wrapped_id, 1);
CONVERT_NUMBER_CHECKED(int, inlined_jsframe_index, Int32, args[2]);
CONVERT_ARG_HANDLE_CHECKED(String, source, 3);
CONVERT_BOOLEAN_ARG_CHECKED(disable_break, 4);
- Handle<Object> context_extension(args[5], isolate);
+ CONVERT_ARG_HANDLE_CHECKED(Object, context_extension, 5);
// Handle the processing of break.
- DisableBreak disable_break_save(isolate, disable_break);
+ DisableBreak disable_break_scope(isolate->debug(), disable_break);
// Get the frame where the debugging is performed.
StackFrame::Id id = UnwrapFrameId(wrapped_id);
@@ -12836,24 +12986,23 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugEvaluate) {
Handle<JSObject> materialized =
isolate->factory()->NewJSObject(isolate->object_function());
- materialized = MaterializeStackLocalsWithFrameInspector(
- isolate, materialized, function, &frame_inspector);
- RETURN_IF_EMPTY_HANDLE(isolate, materialized);
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, materialized,
+ MaterializeStackLocalsWithFrameInspector(
+ isolate, materialized, function, &frame_inspector));
- materialized = MaterializeArgumentsObject(isolate, materialized, function);
- RETURN_IF_EMPTY_HANDLE(isolate, materialized);
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, materialized,
+ MaterializeArgumentsObject(isolate, materialized, function));
// Add the materialized object in a with-scope to shadow the stack locals.
context = isolate->factory()->NewWithContext(function, context, materialized);
Handle<Object> receiver(frame->receiver(), isolate);
- Object* evaluate_result_object;
- { MaybeObject* maybe_result =
- DebugEvaluate(isolate, context, context_extension, receiver, source);
- if (!maybe_result->ToObject(&evaluate_result_object)) return maybe_result;
- }
-
- Handle<Object> result(evaluate_result_object, isolate);
+ Handle<Object> result;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, result,
+ DebugEvaluate(isolate, context, context_extension, receiver, source));
// Write back potential changes to materialized stack locals to the stack.
UpdateStackLocalsFromMaterializedObject(
@@ -12863,23 +13012,21 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugEvaluate) {
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugEvaluateGlobal) {
+RUNTIME_FUNCTION(Runtime_DebugEvaluateGlobal) {
HandleScope scope(isolate);
// Check the execution state and decode arguments frame and source to be
// evaluated.
ASSERT(args.length() == 4);
- Object* check_result;
- { MaybeObject* maybe_result = Runtime_CheckExecutionState(
- RUNTIME_ARGUMENTS(isolate, args));
- if (!maybe_result->ToObject(&check_result)) return maybe_result;
- }
+ CONVERT_NUMBER_CHECKED(int, break_id, Int32, args[0]);
+ RUNTIME_ASSERT(CheckExecutionState(isolate, break_id));
+
CONVERT_ARG_HANDLE_CHECKED(String, source, 1);
CONVERT_BOOLEAN_ARG_CHECKED(disable_break, 2);
- Handle<Object> context_extension(args[3], isolate);
+ CONVERT_ARG_HANDLE_CHECKED(Object, context_extension, 3);
// Handle the processing of break.
- DisableBreak disable_break_save(isolate, disable_break);
+ DisableBreak disable_break_scope(isolate->debug(), disable_break);
// Enter the top context from before the debugger was invoked.
SaveContext save(isolate);
@@ -12895,11 +13042,15 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugEvaluateGlobal) {
// debugger was invoked.
Handle<Context> context = isolate->native_context();
Handle<Object> receiver = isolate->global_object();
- return DebugEvaluate(isolate, context, context_extension, receiver, source);
+ Handle<Object> result;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, result,
+ DebugEvaluate(isolate, context, context_extension, receiver, source));
+ return *result;
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugGetLoadedScripts) {
+RUNTIME_FUNCTION(Runtime_DebugGetLoadedScripts) {
HandleScope scope(isolate);
ASSERT(args.length() == 0);
@@ -12914,14 +13065,14 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugGetLoadedScripts) {
// instances->set(i, *GetScriptWrapper(script))
// is unsafe as GetScriptWrapper might call GC and the C++ compiler might
// already have dereferenced the instances handle.
- Handle<JSValue> wrapper = GetScriptWrapper(script);
+ Handle<JSObject> wrapper = Script::GetWrapper(script);
instances->set(i, *wrapper);
}
// Return result as a JS array.
Handle<JSObject> result =
isolate->factory()->NewJSObject(isolate->array_function());
- isolate->factory()->SetContent(Handle<JSArray>::cast(result), instances);
+ JSArray::SetContent(Handle<JSArray>::cast(result), instances);
return *result;
}
@@ -13001,21 +13152,13 @@ static int DebugReferencedBy(HeapIterator* iterator,
// args[0]: the object to find references to
// args[1]: constructor function for instances to exclude (Mirror)
// args[2]: the the maximum number of objects to return
-RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugReferencedBy) {
- SealHandleScope shs(isolate);
+RUNTIME_FUNCTION(Runtime_DebugReferencedBy) {
+ HandleScope scope(isolate);
ASSERT(args.length() == 3);
- // First perform a full GC in order to avoid references from dead objects.
- isolate->heap()->CollectAllGarbage(Heap::kMakeHeapIterableMask,
- "%DebugReferencedBy");
- // The heap iterator reserves the right to do a GC to make the heap iterable.
- // Due to the GC above we know it won't need to do that, but it seems cleaner
- // to get the heap iterator constructed before we start having unprotected
- // Object* locals that are not protected by handles.
-
// Check parameters.
- CONVERT_ARG_CHECKED(JSObject, target, 0);
- Object* instance_filter = args[1];
+ CONVERT_ARG_HANDLE_CHECKED(JSObject, target, 0);
+ CONVERT_ARG_HANDLE_CHECKED(Object, instance_filter, 1);
RUNTIME_ASSERT(instance_filter->IsUndefined() ||
instance_filter->IsJSObject());
CONVERT_NUMBER_CHECKED(int32_t, max_references, Int32, args[2]);
@@ -13023,40 +13166,42 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugReferencedBy) {
// Get the constructor function for context extension and arguments array.
- JSObject* arguments_boilerplate =
- isolate->context()->native_context()->arguments_boilerplate();
- JSFunction* arguments_function =
- JSFunction::cast(arguments_boilerplate->map()->constructor());
+ Handle<JSObject> arguments_boilerplate(
+ isolate->context()->native_context()->sloppy_arguments_boilerplate());
+ Handle<JSFunction> arguments_function(
+ JSFunction::cast(arguments_boilerplate->map()->constructor()));
// Get the number of referencing objects.
int count;
+ // First perform a full GC in order to avoid dead objects and to make the heap
+ // iterable.
Heap* heap = isolate->heap();
- HeapIterator heap_iterator(heap);
- count = DebugReferencedBy(&heap_iterator,
- target, instance_filter, max_references,
- NULL, 0, arguments_function);
+ heap->CollectAllGarbage(Heap::kMakeHeapIterableMask, "%DebugConstructedBy");
+ {
+ HeapIterator heap_iterator(heap);
+ count = DebugReferencedBy(&heap_iterator,
+ *target, *instance_filter, max_references,
+ NULL, 0, *arguments_function);
+ }
// Allocate an array to hold the result.
- Object* object;
- { MaybeObject* maybe_object = heap->AllocateFixedArray(count);
- if (!maybe_object->ToObject(&object)) return maybe_object;
- }
- FixedArray* instances = FixedArray::cast(object);
+ Handle<FixedArray> instances = isolate->factory()->NewFixedArray(count);
// Fill the referencing objects.
- // AllocateFixedArray above does not make the heap non-iterable.
- ASSERT(heap->IsHeapIterable());
- HeapIterator heap_iterator2(heap);
- count = DebugReferencedBy(&heap_iterator2,
- target, instance_filter, max_references,
- instances, count, arguments_function);
+ {
+ HeapIterator heap_iterator(heap);
+ count = DebugReferencedBy(&heap_iterator,
+ *target, *instance_filter, max_references,
+ *instances, count, *arguments_function);
+ }
// Return result as JS array.
- Object* result;
- MaybeObject* maybe_result = heap->AllocateJSObject(
+ Handle<JSFunction> constructor(
isolate->context()->native_context()->array_function());
- if (!maybe_result->ToObject(&result)) return maybe_result;
- return JSArray::cast(result)->SetContent(instances);
+
+ Handle<JSObject> result = isolate->factory()->NewJSObject(constructor);
+ JSArray::SetContent(Handle<JSArray>::cast(result), instances);
+ return *result;
}
@@ -13095,66 +13240,65 @@ static int DebugConstructedBy(HeapIterator* iterator,
// Scan the heap for objects constructed by a specific function.
// args[0]: the constructor to find instances of
// args[1]: the the maximum number of objects to return
-RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugConstructedBy) {
- SealHandleScope shs(isolate);
+RUNTIME_FUNCTION(Runtime_DebugConstructedBy) {
+ HandleScope scope(isolate);
ASSERT(args.length() == 2);
- // First perform a full GC in order to avoid dead objects.
- Heap* heap = isolate->heap();
- heap->CollectAllGarbage(Heap::kMakeHeapIterableMask, "%DebugConstructedBy");
// Check parameters.
- CONVERT_ARG_CHECKED(JSFunction, constructor, 0);
+ CONVERT_ARG_HANDLE_CHECKED(JSFunction, constructor, 0);
CONVERT_NUMBER_CHECKED(int32_t, max_references, Int32, args[1]);
RUNTIME_ASSERT(max_references >= 0);
// Get the number of referencing objects.
int count;
- HeapIterator heap_iterator(heap);
- count = DebugConstructedBy(&heap_iterator,
- constructor,
- max_references,
- NULL,
- 0);
+ // First perform a full GC in order to avoid dead objects and to make the heap
+ // iterable.
+ Heap* heap = isolate->heap();
+ heap->CollectAllGarbage(Heap::kMakeHeapIterableMask, "%DebugConstructedBy");
+ {
+ HeapIterator heap_iterator(heap);
+ count = DebugConstructedBy(&heap_iterator,
+ *constructor,
+ max_references,
+ NULL,
+ 0);
+ }
// Allocate an array to hold the result.
- Object* object;
- { MaybeObject* maybe_object = heap->AllocateFixedArray(count);
- if (!maybe_object->ToObject(&object)) return maybe_object;
- }
- FixedArray* instances = FixedArray::cast(object);
+ Handle<FixedArray> instances = isolate->factory()->NewFixedArray(count);
- ASSERT(isolate->heap()->IsHeapIterable());
// Fill the referencing objects.
- HeapIterator heap_iterator2(heap);
- count = DebugConstructedBy(&heap_iterator2,
- constructor,
- max_references,
- instances,
- count);
+ {
+ HeapIterator heap_iterator2(heap);
+ count = DebugConstructedBy(&heap_iterator2,
+ *constructor,
+ max_references,
+ *instances,
+ count);
+ }
// Return result as JS array.
- Object* result;
- { MaybeObject* maybe_result = isolate->heap()->AllocateJSObject(
+ Handle<JSFunction> array_function(
isolate->context()->native_context()->array_function());
- if (!maybe_result->ToObject(&result)) return maybe_result;
- }
- return JSArray::cast(result)->SetContent(instances);
+ Handle<JSObject> result = isolate->factory()->NewJSObject(array_function);
+ JSArray::SetContent(Handle<JSArray>::cast(result), instances);
+ return *result;
}
// Find the effective prototype object as returned by __proto__.
// args[0]: the object to find the prototype for.
-RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugGetPrototype) {
- SealHandleScope shs(isolate);
+RUNTIME_FUNCTION(Runtime_DebugGetPrototype) {
+ HandleScope shs(isolate);
ASSERT(args.length() == 1);
- CONVERT_ARG_CHECKED(JSObject, obj, 0);
- return GetPrototypeSkipHiddenPrototypes(isolate, obj);
+ CONVERT_ARG_HANDLE_CHECKED(JSObject, obj, 0);
+ return *GetPrototypeSkipHiddenPrototypes(isolate, obj);
}
// Patches script source (should be called upon BeforeCompile event).
-RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugSetScriptSource) {
+RUNTIME_FUNCTION(Runtime_DebugSetScriptSource) {
HandleScope scope(isolate);
ASSERT(args.length() == 2);
@@ -13172,7 +13316,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugSetScriptSource) {
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_SystemBreak) {
+RUNTIME_FUNCTION(Runtime_SystemBreak) {
SealHandleScope shs(isolate);
ASSERT(args.length() == 0);
OS::DebugBreak();
@@ -13180,14 +13324,14 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_SystemBreak) {
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugDisassembleFunction) {
+RUNTIME_FUNCTION(Runtime_DebugDisassembleFunction) {
HandleScope scope(isolate);
#ifdef DEBUG
ASSERT(args.length() == 1);
// Get the function and make sure it is compiled.
CONVERT_ARG_HANDLE_CHECKED(JSFunction, func, 0);
- if (!JSFunction::EnsureCompiled(func, KEEP_EXCEPTION)) {
- return Failure::Exception();
+ if (!Compiler::EnsureCompiled(func, KEEP_EXCEPTION)) {
+ return isolate->heap()->exception();
}
func->code()->PrintLn();
#endif // DEBUG
@@ -13195,14 +13339,14 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugDisassembleFunction) {
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugDisassembleConstructor) {
+RUNTIME_FUNCTION(Runtime_DebugDisassembleConstructor) {
HandleScope scope(isolate);
#ifdef DEBUG
ASSERT(args.length() == 1);
// Get the function and make sure it is compiled.
CONVERT_ARG_HANDLE_CHECKED(JSFunction, func, 0);
- if (!JSFunction::EnsureCompiled(func, KEEP_EXCEPTION)) {
- return Failure::Exception();
+ if (!Compiler::EnsureCompiled(func, KEEP_EXCEPTION)) {
+ return isolate->heap()->exception();
}
func->shared()->construct_stub()->PrintLn();
#endif // DEBUG
@@ -13210,7 +13354,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugDisassembleConstructor) {
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_FunctionGetInferredName) {
+RUNTIME_FUNCTION(Runtime_FunctionGetInferredName) {
SealHandleScope shs(isolate);
ASSERT(args.length() == 1);
@@ -13248,10 +13392,9 @@ static int FindSharedFunctionInfosForScript(HeapIterator* iterator,
// For a script finds all SharedFunctionInfo's in the heap that points
// to this script. Returns JSArray of SharedFunctionInfo wrapped
// in OpaqueReferences.
-RUNTIME_FUNCTION(MaybeObject*,
- Runtime_LiveEditFindSharedFunctionInfosForScript) {
+RUNTIME_FUNCTION(Runtime_LiveEditFindSharedFunctionInfosForScript) {
HandleScope scope(isolate);
- CHECK(isolate->debugger()->live_edit_enabled());
+ CHECK(isolate->debug()->live_edit_enabled());
ASSERT(args.length() == 1);
CONVERT_ARG_CHECKED(JSValue, script_value, 0);
@@ -13265,8 +13408,6 @@ RUNTIME_FUNCTION(MaybeObject*,
int number;
Heap* heap = isolate->heap();
{
- heap->EnsureHeapIsIterable();
- DisallowHeapAllocation no_allocation;
HeapIterator heap_iterator(heap);
Script* scr = *script;
FixedArray* arr = *array;
@@ -13274,8 +13415,6 @@ RUNTIME_FUNCTION(MaybeObject*,
}
if (number > kBufferSize) {
array = isolate->factory()->NewFixedArray(number);
- heap->EnsureHeapIsIterable();
- DisallowHeapAllocation no_allocation;
HeapIterator heap_iterator(heap);
Script* scr = *script;
FixedArray* arr = *array;
@@ -13298,9 +13437,9 @@ RUNTIME_FUNCTION(MaybeObject*,
// Returns a JSArray of compilation infos. The array is ordered so that
// each function with all its descendant is always stored in a continues range
// with the function itself going first. The root function is a script function.
-RUNTIME_FUNCTION(MaybeObject*, Runtime_LiveEditGatherCompileInfo) {
+RUNTIME_FUNCTION(Runtime_LiveEditGatherCompileInfo) {
HandleScope scope(isolate);
- CHECK(isolate->debugger()->live_edit_enabled());
+ CHECK(isolate->debug()->live_edit_enabled());
ASSERT(args.length() == 2);
CONVERT_ARG_CHECKED(JSValue, script, 0);
CONVERT_ARG_HANDLE_CHECKED(String, source, 1);
@@ -13308,71 +13447,72 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_LiveEditGatherCompileInfo) {
RUNTIME_ASSERT(script->value()->IsScript());
Handle<Script> script_handle = Handle<Script>(Script::cast(script->value()));
- JSArray* result = LiveEdit::GatherCompileInfo(script_handle, source);
-
- if (isolate->has_pending_exception()) {
- return Failure::Exception();
- }
-
- return result;
+ Handle<JSArray> result;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, result, LiveEdit::GatherCompileInfo(script_handle, source));
+ return *result;
}
// Changes the source of the script to a new_source.
// If old_script_name is provided (i.e. is a String), also creates a copy of
// the script with its original source and sends notification to debugger.
-RUNTIME_FUNCTION(MaybeObject*, Runtime_LiveEditReplaceScript) {
+RUNTIME_FUNCTION(Runtime_LiveEditReplaceScript) {
HandleScope scope(isolate);
- CHECK(isolate->debugger()->live_edit_enabled());
+ CHECK(isolate->debug()->live_edit_enabled());
ASSERT(args.length() == 3);
CONVERT_ARG_CHECKED(JSValue, original_script_value, 0);
CONVERT_ARG_HANDLE_CHECKED(String, new_source, 1);
- Handle<Object> old_script_name(args[2], isolate);
+ CONVERT_ARG_HANDLE_CHECKED(Object, old_script_name, 2);
RUNTIME_ASSERT(original_script_value->value()->IsScript());
Handle<Script> original_script(Script::cast(original_script_value->value()));
- Object* old_script = LiveEdit::ChangeScriptSource(original_script,
- new_source,
- old_script_name);
+ Handle<Object> old_script = LiveEdit::ChangeScriptSource(
+ original_script, new_source, old_script_name);
if (old_script->IsScript()) {
- Handle<Script> script_handle(Script::cast(old_script));
- return *(GetScriptWrapper(script_handle));
+ Handle<Script> script_handle = Handle<Script>::cast(old_script);
+ return *Script::GetWrapper(script_handle);
} else {
return isolate->heap()->null_value();
}
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_LiveEditFunctionSourceUpdated) {
+RUNTIME_FUNCTION(Runtime_LiveEditFunctionSourceUpdated) {
HandleScope scope(isolate);
- CHECK(isolate->debugger()->live_edit_enabled());
+ CHECK(isolate->debug()->live_edit_enabled());
ASSERT(args.length() == 1);
CONVERT_ARG_HANDLE_CHECKED(JSArray, shared_info, 0);
- return LiveEdit::FunctionSourceUpdated(shared_info);
+ RUNTIME_ASSERT(SharedInfoWrapper::IsInstance(shared_info));
+
+ LiveEdit::FunctionSourceUpdated(shared_info);
+ return isolate->heap()->undefined_value();
}
// Replaces code of SharedFunctionInfo with a new one.
-RUNTIME_FUNCTION(MaybeObject*, Runtime_LiveEditReplaceFunctionCode) {
+RUNTIME_FUNCTION(Runtime_LiveEditReplaceFunctionCode) {
HandleScope scope(isolate);
- CHECK(isolate->debugger()->live_edit_enabled());
+ CHECK(isolate->debug()->live_edit_enabled());
ASSERT(args.length() == 2);
CONVERT_ARG_HANDLE_CHECKED(JSArray, new_compile_info, 0);
CONVERT_ARG_HANDLE_CHECKED(JSArray, shared_info, 1);
+ RUNTIME_ASSERT(SharedInfoWrapper::IsInstance(shared_info));
- return LiveEdit::ReplaceFunctionCode(new_compile_info, shared_info);
+ LiveEdit::ReplaceFunctionCode(new_compile_info, shared_info);
+ return isolate->heap()->undefined_value();
}
// Connects SharedFunctionInfo to another script.
-RUNTIME_FUNCTION(MaybeObject*, Runtime_LiveEditFunctionSetScript) {
+RUNTIME_FUNCTION(Runtime_LiveEditFunctionSetScript) {
HandleScope scope(isolate);
- CHECK(isolate->debugger()->live_edit_enabled());
+ CHECK(isolate->debug()->live_edit_enabled());
ASSERT(args.length() == 2);
- Handle<Object> function_object(args[0], isolate);
- Handle<Object> script_object(args[1], isolate);
+ CONVERT_ARG_HANDLE_CHECKED(Object, function_object, 0);
+ CONVERT_ARG_HANDLE_CHECKED(Object, script_object, 1);
if (function_object->IsJSValue()) {
Handle<JSValue> function_wrapper = Handle<JSValue>::cast(function_object);
@@ -13381,7 +13521,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_LiveEditFunctionSetScript) {
Script* script = Script::cast(JSValue::cast(*script_object)->value());
script_object = Handle<Object>(script, isolate);
}
-
+ RUNTIME_ASSERT(function_wrapper->value()->IsSharedFunctionInfo());
LiveEdit::SetFunctionScript(function_wrapper, script_object);
} else {
// Just ignore this. We may not have a SharedFunctionInfo for some functions
@@ -13394,18 +13534,20 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_LiveEditFunctionSetScript) {
// In a code of a parent function replaces original function as embedded object
// with a substitution one.
-RUNTIME_FUNCTION(MaybeObject*, Runtime_LiveEditReplaceRefToNestedFunction) {
+RUNTIME_FUNCTION(Runtime_LiveEditReplaceRefToNestedFunction) {
HandleScope scope(isolate);
- CHECK(isolate->debugger()->live_edit_enabled());
+ CHECK(isolate->debug()->live_edit_enabled());
ASSERT(args.length() == 3);
CONVERT_ARG_HANDLE_CHECKED(JSValue, parent_wrapper, 0);
CONVERT_ARG_HANDLE_CHECKED(JSValue, orig_wrapper, 1);
CONVERT_ARG_HANDLE_CHECKED(JSValue, subst_wrapper, 2);
+ RUNTIME_ASSERT(parent_wrapper->value()->IsSharedFunctionInfo());
+ RUNTIME_ASSERT(orig_wrapper->value()->IsSharedFunctionInfo());
+ RUNTIME_ASSERT(subst_wrapper->value()->IsSharedFunctionInfo());
- LiveEdit::ReplaceRefToNestedFunction(parent_wrapper, orig_wrapper,
- subst_wrapper);
-
+ LiveEdit::ReplaceRefToNestedFunction(
+ parent_wrapper, orig_wrapper, subst_wrapper);
return isolate->heap()->undefined_value();
}
@@ -13415,14 +13557,16 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_LiveEditReplaceRefToNestedFunction) {
// array of groups of 3 numbers:
// (change_begin, change_end, change_end_new_position).
// Each group describes a change in text; groups are sorted by change_begin.
-RUNTIME_FUNCTION(MaybeObject*, Runtime_LiveEditPatchFunctionPositions) {
+RUNTIME_FUNCTION(Runtime_LiveEditPatchFunctionPositions) {
HandleScope scope(isolate);
- CHECK(isolate->debugger()->live_edit_enabled());
+ CHECK(isolate->debug()->live_edit_enabled());
ASSERT(args.length() == 2);
CONVERT_ARG_HANDLE_CHECKED(JSArray, shared_array, 0);
CONVERT_ARG_HANDLE_CHECKED(JSArray, position_change_array, 1);
+ RUNTIME_ASSERT(SharedInfoWrapper::IsInstance(shared_array))
- return LiveEdit::PatchFunctionPositions(shared_array, position_change_array);
+ LiveEdit::PatchFunctionPositions(shared_array, position_change_array);
+ return isolate->heap()->undefined_value();
}
@@ -13430,12 +13574,21 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_LiveEditPatchFunctionPositions) {
// checks that none of them have activations on stacks (of any thread).
// Returns array of the same length with corresponding results of
// LiveEdit::FunctionPatchabilityStatus type.
-RUNTIME_FUNCTION(MaybeObject*, Runtime_LiveEditCheckAndDropActivations) {
+RUNTIME_FUNCTION(Runtime_LiveEditCheckAndDropActivations) {
HandleScope scope(isolate);
- CHECK(isolate->debugger()->live_edit_enabled());
+ CHECK(isolate->debug()->live_edit_enabled());
ASSERT(args.length() == 2);
CONVERT_ARG_HANDLE_CHECKED(JSArray, shared_array, 0);
CONVERT_BOOLEAN_ARG_CHECKED(do_drop, 1);
+ RUNTIME_ASSERT(shared_array->length()->IsSmi());
+ int array_length = Smi::cast(shared_array->length())->value();
+ for (int i = 0; i < array_length; i++) {
+ Handle<Object> element =
+ Object::GetElement(isolate, shared_array, i).ToHandleChecked();
+ RUNTIME_ASSERT(
+ element->IsJSValue() &&
+ Handle<JSValue>::cast(element)->value()->IsSharedFunctionInfo());
+ }
return *LiveEdit::CheckAndDropActivations(shared_array, do_drop);
}
@@ -13444,9 +13597,9 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_LiveEditCheckAndDropActivations) {
// Compares 2 strings line-by-line, then token-wise and returns diff in form
// of JSArray of triplets (pos1, pos1_end, pos2_end) describing list
// of diff chunks.
-RUNTIME_FUNCTION(MaybeObject*, Runtime_LiveEditCompareStrings) {
+RUNTIME_FUNCTION(Runtime_LiveEditCompareStrings) {
HandleScope scope(isolate);
- CHECK(isolate->debugger()->live_edit_enabled());
+ CHECK(isolate->debug()->live_edit_enabled());
ASSERT(args.length() == 2);
CONVERT_ARG_HANDLE_CHECKED(String, s1, 0);
CONVERT_ARG_HANDLE_CHECKED(String, s2, 1);
@@ -13457,17 +13610,13 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_LiveEditCompareStrings) {
// Restarts a call frame and completely drops all frames above.
// Returns true if successful. Otherwise returns undefined or an error message.
-RUNTIME_FUNCTION(MaybeObject*, Runtime_LiveEditRestartFrame) {
+RUNTIME_FUNCTION(Runtime_LiveEditRestartFrame) {
HandleScope scope(isolate);
- CHECK(isolate->debugger()->live_edit_enabled());
+ CHECK(isolate->debug()->live_edit_enabled());
ASSERT(args.length() == 2);
+ CONVERT_NUMBER_CHECKED(int, break_id, Int32, args[0]);
+ RUNTIME_ASSERT(CheckExecutionState(isolate, break_id));
- // Check arguments.
- Object* check;
- { MaybeObject* maybe_check = Runtime_CheckExecutionState(
- RUNTIME_ARGUMENTS(isolate, args));
- if (!maybe_check->ToObject(&check)) return maybe_check;
- }
CONVERT_NUMBER_CHECKED(int, index, Int32, args[1]);
Heap* heap = isolate->heap();
@@ -13496,9 +13645,9 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_LiveEditRestartFrame) {
// A testing entry. Returns statement position which is the closest to
// source_position.
-RUNTIME_FUNCTION(MaybeObject*, Runtime_GetFunctionCodePositionFromSource) {
+RUNTIME_FUNCTION(Runtime_GetFunctionCodePositionFromSource) {
HandleScope scope(isolate);
- CHECK(isolate->debugger()->live_edit_enabled());
+ CHECK(isolate->debug()->live_edit_enabled());
ASSERT(args.length() == 2);
CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 0);
CONVERT_NUMBER_CHECKED(int32_t, source_position, Int32, args[1]);
@@ -13534,63 +13683,59 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_GetFunctionCodePositionFromSource) {
// Calls specified function with or without entering the debugger.
// This is used in unit tests to run code as if debugger is entered or simply
// to have a stack with C++ frame in the middle.
-RUNTIME_FUNCTION(MaybeObject*, Runtime_ExecuteInDebugContext) {
+RUNTIME_FUNCTION(Runtime_ExecuteInDebugContext) {
HandleScope scope(isolate);
ASSERT(args.length() == 2);
CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 0);
CONVERT_BOOLEAN_ARG_CHECKED(without_debugger, 1);
- Handle<Object> result;
- bool pending_exception;
- {
- if (without_debugger) {
- result = Execution::Call(isolate,
- function,
- isolate->global_object(),
- 0,
- NULL,
- &pending_exception);
- } else {
- EnterDebugger enter_debugger(isolate);
- result = Execution::Call(isolate,
- function,
- isolate->global_object(),
- 0,
- NULL,
- &pending_exception);
- }
- }
- if (!pending_exception) {
- return *result;
+ MaybeHandle<Object> maybe_result;
+ if (without_debugger) {
+ maybe_result = Execution::Call(isolate,
+ function,
+ isolate->global_object(),
+ 0,
+ NULL);
} else {
- return Failure::Exception();
+ DebugScope debug_scope(isolate->debug());
+ maybe_result = Execution::Call(isolate,
+ function,
+ isolate->global_object(),
+ 0,
+ NULL);
}
+ Handle<Object> result;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, result, maybe_result);
+ return *result;
}
// Sets a v8 flag.
-RUNTIME_FUNCTION(MaybeObject*, Runtime_SetFlags) {
+RUNTIME_FUNCTION(Runtime_SetFlags) {
SealHandleScope shs(isolate);
+ ASSERT(args.length() == 1);
CONVERT_ARG_CHECKED(String, arg, 0);
SmartArrayPointer<char> flags =
arg->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL);
- FlagList::SetFlagsFromString(*flags, StrLength(*flags));
+ FlagList::SetFlagsFromString(flags.get(), StrLength(flags.get()));
return isolate->heap()->undefined_value();
}
// Performs a GC.
// Presently, it only does a full GC.
-RUNTIME_FUNCTION(MaybeObject*, Runtime_CollectGarbage) {
+RUNTIME_FUNCTION(Runtime_CollectGarbage) {
SealHandleScope shs(isolate);
+ ASSERT(args.length() == 1);
isolate->heap()->CollectAllGarbage(Heap::kNoGCFlags, "%CollectGarbage");
return isolate->heap()->undefined_value();
}
// Gets the current heap usage.
-RUNTIME_FUNCTION(MaybeObject*, Runtime_GetHeapUsage) {
+RUNTIME_FUNCTION(Runtime_GetHeapUsage) {
SealHandleScope shs(isolate);
+ ASSERT(args.length() == 0);
int usage = static_cast<int>(isolate->heap()->SizeOfObjects());
if (!Smi::IsValid(usage)) {
return *isolate->factory()->NewNumberFromInt(usage);
@@ -13598,12 +13743,11 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_GetHeapUsage) {
return Smi::FromInt(usage);
}
-#endif // ENABLE_DEBUGGER_SUPPORT
-
#ifdef V8_I18N_SUPPORT
-RUNTIME_FUNCTION(MaybeObject*, Runtime_CanonicalizeLanguageTag) {
+RUNTIME_FUNCTION(Runtime_CanonicalizeLanguageTag) {
HandleScope scope(isolate);
+ Factory* factory = isolate->factory();
ASSERT(args.length() == 1);
CONVERT_ARG_HANDLE_CHECKED(String, locale_id_str, 0);
@@ -13620,7 +13764,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_CanonicalizeLanguageTag) {
uloc_forLanguageTag(*locale_id, icu_result, ULOC_FULLNAME_CAPACITY,
&icu_length, &error);
if (U_FAILURE(error) || icu_length == 0) {
- return isolate->heap()->AllocateStringFromOneByte(CStrVector(kInvalidTag));
+ return *factory->NewStringFromAsciiChecked(kInvalidTag);
}
char result[ULOC_FULLNAME_CAPACITY];
@@ -13629,15 +13773,16 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_CanonicalizeLanguageTag) {
uloc_toLanguageTag(icu_result, result, ULOC_FULLNAME_CAPACITY, TRUE, &error);
if (U_FAILURE(error)) {
- return isolate->heap()->AllocateStringFromOneByte(CStrVector(kInvalidTag));
+ return *factory->NewStringFromAsciiChecked(kInvalidTag);
}
- return isolate->heap()->AllocateStringFromOneByte(CStrVector(result));
+ return *factory->NewStringFromAsciiChecked(result);
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_AvailableLocalesOf) {
+RUNTIME_FUNCTION(Runtime_AvailableLocalesOf) {
HandleScope scope(isolate);
+ Factory* factory = isolate->factory();
ASSERT(args.length() == 1);
CONVERT_ARG_HANDLE_CHECKED(String, service, 0);
@@ -13658,7 +13803,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_AvailableLocalesOf) {
UErrorCode error = U_ZERO_ERROR;
char result[ULOC_FULLNAME_CAPACITY];
Handle<JSObject> locales =
- isolate->factory()->NewJSObject(isolate->object_function());
+ factory->NewJSObject(isolate->object_function());
for (int32_t i = 0; i < count; ++i) {
const char* icu_name = available_locales[i].getName();
@@ -13671,11 +13816,11 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_AvailableLocalesOf) {
continue;
}
- RETURN_IF_EMPTY_HANDLE(isolate,
- JSObject::SetLocalPropertyIgnoreAttributes(
+ RETURN_FAILURE_ON_EXCEPTION(isolate,
+ JSObject::SetOwnPropertyIgnoreAttributes(
locales,
- isolate->factory()->NewStringFromAscii(CStrVector(result)),
- isolate->factory()->NewNumber(i),
+ factory->NewStringFromAsciiChecked(result),
+ factory->NewNumber(i),
NONE));
}
@@ -13683,8 +13828,9 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_AvailableLocalesOf) {
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_GetDefaultICULocale) {
- SealHandleScope shs(isolate);
+RUNTIME_FUNCTION(Runtime_GetDefaultICULocale) {
+ HandleScope scope(isolate);
+ Factory* factory = isolate->factory();
ASSERT(args.length() == 0);
@@ -13696,35 +13842,38 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_GetDefaultICULocale) {
uloc_toLanguageTag(
default_locale.getName(), result, ULOC_FULLNAME_CAPACITY, FALSE, &status);
if (U_SUCCESS(status)) {
- return isolate->heap()->AllocateStringFromOneByte(CStrVector(result));
+ return *factory->NewStringFromAsciiChecked(result);
}
- return isolate->heap()->AllocateStringFromOneByte(CStrVector("und"));
+ return *factory->NewStringFromStaticAscii("und");
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_GetLanguageTagVariants) {
+RUNTIME_FUNCTION(Runtime_GetLanguageTagVariants) {
HandleScope scope(isolate);
+ Factory* factory = isolate->factory();
ASSERT(args.length() == 1);
CONVERT_ARG_HANDLE_CHECKED(JSArray, input, 0);
uint32_t length = static_cast<uint32_t>(input->length()->Number());
- Handle<FixedArray> output = isolate->factory()->NewFixedArray(length);
- Handle<Name> maximized =
- isolate->factory()->NewStringFromAscii(CStrVector("maximized"));
- Handle<Name> base =
- isolate->factory()->NewStringFromAscii(CStrVector("base"));
+ // Set some limit to prevent fuzz tests from going OOM.
+ // Can be bumped when callers' requirements change.
+ RUNTIME_ASSERT(length < 100);
+ Handle<FixedArray> output = factory->NewFixedArray(length);
+ Handle<Name> maximized = factory->NewStringFromStaticAscii("maximized");
+ Handle<Name> base = factory->NewStringFromStaticAscii("base");
for (unsigned int i = 0; i < length; ++i) {
- MaybeObject* maybe_string = input->GetElement(isolate, i);
- Object* locale_id;
- if (!maybe_string->ToObject(&locale_id) || !locale_id->IsString()) {
- return isolate->Throw(isolate->heap()->illegal_argument_string());
+ Handle<Object> locale_id;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, locale_id, Object::GetElement(isolate, input, i));
+ if (!locale_id->IsString()) {
+ return isolate->Throw(*factory->illegal_argument_string());
}
v8::String::Utf8Value utf8_locale_id(
- v8::Utils::ToLocal(Handle<String>(String::cast(locale_id))));
+ v8::Utils::ToLocal(Handle<String>::cast(locale_id)));
UErrorCode error = U_ZERO_ERROR;
@@ -13735,7 +13884,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_GetLanguageTagVariants) {
uloc_forLanguageTag(*utf8_locale_id, icu_locale, ULOC_FULLNAME_CAPACITY,
&icu_locale_length, &error);
if (U_FAILURE(error) || icu_locale_length == 0) {
- return isolate->Throw(isolate->heap()->illegal_argument_string());
+ return isolate->Throw(*factory->illegal_argument_string());
}
// Maximize the locale.
@@ -13768,33 +13917,113 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_GetLanguageTagVariants) {
icu_base_locale, base_locale, ULOC_FULLNAME_CAPACITY, FALSE, &error);
if (U_FAILURE(error)) {
- return isolate->Throw(isolate->heap()->illegal_argument_string());
+ return isolate->Throw(*factory->illegal_argument_string());
}
- Handle<JSObject> result =
- isolate->factory()->NewJSObject(isolate->object_function());
- RETURN_IF_EMPTY_HANDLE(isolate,
- JSObject::SetLocalPropertyIgnoreAttributes(
+ Handle<JSObject> result = factory->NewJSObject(isolate->object_function());
+ RETURN_FAILURE_ON_EXCEPTION(isolate,
+ JSObject::SetOwnPropertyIgnoreAttributes(
result,
maximized,
- isolate->factory()->NewStringFromAscii(CStrVector(base_max_locale)),
+ factory->NewStringFromAsciiChecked(base_max_locale),
NONE));
- RETURN_IF_EMPTY_HANDLE(isolate,
- JSObject::SetLocalPropertyIgnoreAttributes(
+ RETURN_FAILURE_ON_EXCEPTION(isolate,
+ JSObject::SetOwnPropertyIgnoreAttributes(
result,
base,
- isolate->factory()->NewStringFromAscii(CStrVector(base_locale)),
+ factory->NewStringFromAsciiChecked(base_locale),
NONE));
output->set(i, *result);
}
- Handle<JSArray> result = isolate->factory()->NewJSArrayWithElements(output);
+ Handle<JSArray> result = factory->NewJSArrayWithElements(output);
result->set_length(Smi::FromInt(length));
return *result;
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_CreateDateTimeFormat) {
+RUNTIME_FUNCTION(Runtime_IsInitializedIntlObject) {
+ HandleScope scope(isolate);
+
+ ASSERT(args.length() == 1);
+
+ CONVERT_ARG_HANDLE_CHECKED(Object, input, 0);
+
+ if (!input->IsJSObject()) return isolate->heap()->false_value();
+ Handle<JSObject> obj = Handle<JSObject>::cast(input);
+
+ Handle<String> marker = isolate->factory()->intl_initialized_marker_string();
+ Handle<Object> tag(obj->GetHiddenProperty(marker), isolate);
+ return isolate->heap()->ToBoolean(!tag->IsTheHole());
+}
+
+
+RUNTIME_FUNCTION(Runtime_IsInitializedIntlObjectOfType) {
+ HandleScope scope(isolate);
+
+ ASSERT(args.length() == 2);
+
+ CONVERT_ARG_HANDLE_CHECKED(Object, input, 0);
+ CONVERT_ARG_HANDLE_CHECKED(String, expected_type, 1);
+
+ if (!input->IsJSObject()) return isolate->heap()->false_value();
+ Handle<JSObject> obj = Handle<JSObject>::cast(input);
+
+ Handle<String> marker = isolate->factory()->intl_initialized_marker_string();
+ Handle<Object> tag(obj->GetHiddenProperty(marker), isolate);
+ return isolate->heap()->ToBoolean(
+ tag->IsString() && String::cast(*tag)->Equals(*expected_type));
+}
+
+
+RUNTIME_FUNCTION(Runtime_MarkAsInitializedIntlObjectOfType) {
+ HandleScope scope(isolate);
+
+ ASSERT(args.length() == 3);
+
+ CONVERT_ARG_HANDLE_CHECKED(JSObject, input, 0);
+ CONVERT_ARG_HANDLE_CHECKED(String, type, 1);
+ CONVERT_ARG_HANDLE_CHECKED(JSObject, impl, 2);
+
+ Handle<String> marker = isolate->factory()->intl_initialized_marker_string();
+ JSObject::SetHiddenProperty(input, marker, type);
+
+ marker = isolate->factory()->intl_impl_object_string();
+ JSObject::SetHiddenProperty(input, marker, impl);
+
+ return isolate->heap()->undefined_value();
+}
+
+
+RUNTIME_FUNCTION(Runtime_GetImplFromInitializedIntlObject) {
+ HandleScope scope(isolate);
+
+ ASSERT(args.length() == 1);
+
+ CONVERT_ARG_HANDLE_CHECKED(Object, input, 0);
+
+ if (!input->IsJSObject()) {
+ Vector< Handle<Object> > arguments = HandleVector(&input, 1);
+ Handle<Object> type_error =
+ isolate->factory()->NewTypeError("not_intl_object", arguments);
+ return isolate->Throw(*type_error);
+ }
+
+ Handle<JSObject> obj = Handle<JSObject>::cast(input);
+
+ Handle<String> marker = isolate->factory()->intl_impl_object_string();
+ Handle<Object> impl(obj->GetHiddenProperty(marker), isolate);
+ if (impl->IsTheHole()) {
+ Vector< Handle<Object> > arguments = HandleVector(&obj, 1);
+ Handle<Object> type_error =
+ isolate->factory()->NewTypeError("not_intl_object", arguments);
+ return isolate->Throw(*type_error);
+ }
+ return *impl;
+}
+
+
+RUNTIME_FUNCTION(Runtime_CreateDateTimeFormat) {
HandleScope scope(isolate);
ASSERT(args.length() == 3);
@@ -13807,13 +14036,10 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_CreateDateTimeFormat) {
I18N::GetTemplate(isolate);
// Create an empty object wrapper.
- bool has_pending_exception = false;
- Handle<JSObject> local_object = Execution::InstantiateObject(
- date_format_template, &has_pending_exception);
- if (has_pending_exception) {
- ASSERT(isolate->has_pending_exception());
- return Failure::Exception();
- }
+ Handle<JSObject> local_object;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, local_object,
+ Execution::InstantiateObject(date_format_template));
// Set date time formatter as internal field of the resulting JS object.
icu::SimpleDateFormat* date_format = DateFormat::InitializeDateTimeFormat(
@@ -13823,23 +14049,23 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_CreateDateTimeFormat) {
local_object->SetInternalField(0, reinterpret_cast<Smi*>(date_format));
- RETURN_IF_EMPTY_HANDLE(isolate,
- JSObject::SetLocalPropertyIgnoreAttributes(
+ RETURN_FAILURE_ON_EXCEPTION(isolate,
+ JSObject::SetOwnPropertyIgnoreAttributes(
local_object,
- isolate->factory()->NewStringFromAscii(CStrVector("dateFormat")),
- isolate->factory()->NewStringFromAscii(CStrVector("valid")),
+ isolate->factory()->NewStringFromStaticAscii("dateFormat"),
+ isolate->factory()->NewStringFromStaticAscii("valid"),
NONE));
// Make object handle weak so we can delete the data format once GC kicks in.
Handle<Object> wrapper = isolate->global_handles()->Create(*local_object);
- GlobalHandles::MakeWeak(reinterpret_cast<Object**>(wrapper.location()),
- NULL,
+ GlobalHandles::MakeWeak(wrapper.location(),
+ reinterpret_cast<void*>(wrapper.location()),
DateFormat::DeleteDateFormat);
return *local_object;
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_InternalDateFormat) {
+RUNTIME_FUNCTION(Runtime_InternalDateFormat) {
HandleScope scope(isolate);
ASSERT(args.length() == 2);
@@ -13847,13 +14073,9 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_InternalDateFormat) {
CONVERT_ARG_HANDLE_CHECKED(JSObject, date_format_holder, 0);
CONVERT_ARG_HANDLE_CHECKED(JSDate, date, 1);
- bool has_pending_exception = false;
- Handle<Object> value =
- Execution::ToNumber(isolate, date, &has_pending_exception);
- if (has_pending_exception) {
- ASSERT(isolate->has_pending_exception());
- return Failure::Exception();
- }
+ Handle<Object> value;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, value, Execution::ToNumber(isolate, date));
icu::SimpleDateFormat* date_format =
DateFormat::UnpackDateFormat(isolate, date_format_holder);
@@ -13862,14 +14084,18 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_InternalDateFormat) {
icu::UnicodeString result;
date_format->format(value->Number(), result);
- return *isolate->factory()->NewStringFromTwoByte(
- Vector<const uint16_t>(
- reinterpret_cast<const uint16_t*>(result.getBuffer()),
- result.length()));
+ Handle<String> result_str;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, result_str,
+ isolate->factory()->NewStringFromTwoByte(
+ Vector<const uint16_t>(
+ reinterpret_cast<const uint16_t*>(result.getBuffer()),
+ result.length())));
+ return *result_str;
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_InternalDateParse) {
+RUNTIME_FUNCTION(Runtime_InternalDateParse) {
HandleScope scope(isolate);
ASSERT(args.length() == 2);
@@ -13887,19 +14113,16 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_InternalDateParse) {
UDate date = date_format->parse(u_date, status);
if (U_FAILURE(status)) return isolate->heap()->undefined_value();
- bool has_pending_exception = false;
- Handle<JSDate> result = Handle<JSDate>::cast(
- Execution::NewDate(
- isolate, static_cast<double>(date), &has_pending_exception));
- if (has_pending_exception) {
- ASSERT(isolate->has_pending_exception());
- return Failure::Exception();
- }
+ Handle<Object> result;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, result,
+ Execution::NewDate(isolate, static_cast<double>(date)));
+ ASSERT(result->IsJSDate());
return *result;
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_CreateNumberFormat) {
+RUNTIME_FUNCTION(Runtime_CreateNumberFormat) {
HandleScope scope(isolate);
ASSERT(args.length() == 3);
@@ -13912,13 +14135,10 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_CreateNumberFormat) {
I18N::GetTemplate(isolate);
// Create an empty object wrapper.
- bool has_pending_exception = false;
- Handle<JSObject> local_object = Execution::InstantiateObject(
- number_format_template, &has_pending_exception);
- if (has_pending_exception) {
- ASSERT(isolate->has_pending_exception());
- return Failure::Exception();
- }
+ Handle<JSObject> local_object;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, local_object,
+ Execution::InstantiateObject(number_format_template));
// Set number formatter as internal field of the resulting JS object.
icu::DecimalFormat* number_format = NumberFormat::InitializeNumberFormat(
@@ -13928,22 +14148,22 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_CreateNumberFormat) {
local_object->SetInternalField(0, reinterpret_cast<Smi*>(number_format));
- RETURN_IF_EMPTY_HANDLE(isolate,
- JSObject::SetLocalPropertyIgnoreAttributes(
+ RETURN_FAILURE_ON_EXCEPTION(isolate,
+ JSObject::SetOwnPropertyIgnoreAttributes(
local_object,
- isolate->factory()->NewStringFromAscii(CStrVector("numberFormat")),
- isolate->factory()->NewStringFromAscii(CStrVector("valid")),
+ isolate->factory()->NewStringFromStaticAscii("numberFormat"),
+ isolate->factory()->NewStringFromStaticAscii("valid"),
NONE));
Handle<Object> wrapper = isolate->global_handles()->Create(*local_object);
- GlobalHandles::MakeWeak(reinterpret_cast<Object**>(wrapper.location()),
- NULL,
+ GlobalHandles::MakeWeak(wrapper.location(),
+ reinterpret_cast<void*>(wrapper.location()),
NumberFormat::DeleteNumberFormat);
return *local_object;
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_InternalNumberFormat) {
+RUNTIME_FUNCTION(Runtime_InternalNumberFormat) {
HandleScope scope(isolate);
ASSERT(args.length() == 2);
@@ -13951,13 +14171,9 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_InternalNumberFormat) {
CONVERT_ARG_HANDLE_CHECKED(JSObject, number_format_holder, 0);
CONVERT_ARG_HANDLE_CHECKED(Object, number, 1);
- bool has_pending_exception = false;
- Handle<Object> value = Execution::ToNumber(
- isolate, number, &has_pending_exception);
- if (has_pending_exception) {
- ASSERT(isolate->has_pending_exception());
- return Failure::Exception();
- }
+ Handle<Object> value;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, value, Execution::ToNumber(isolate, number));
icu::DecimalFormat* number_format =
NumberFormat::UnpackNumberFormat(isolate, number_format_holder);
@@ -13966,14 +14182,18 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_InternalNumberFormat) {
icu::UnicodeString result;
number_format->format(value->Number(), result);
- return *isolate->factory()->NewStringFromTwoByte(
- Vector<const uint16_t>(
- reinterpret_cast<const uint16_t*>(result.getBuffer()),
- result.length()));
+ Handle<String> result_str;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, result_str,
+ isolate->factory()->NewStringFromTwoByte(
+ Vector<const uint16_t>(
+ reinterpret_cast<const uint16_t*>(result.getBuffer()),
+ result.length())));
+ return *result_str;
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_InternalNumberParse) {
+RUNTIME_FUNCTION(Runtime_InternalNumberParse) {
HandleScope scope(isolate);
ASSERT(args.length() == 2);
@@ -14012,7 +14232,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_InternalNumberParse) {
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_CreateCollator) {
+RUNTIME_FUNCTION(Runtime_CreateCollator) {
HandleScope scope(isolate);
ASSERT(args.length() == 3);
@@ -14024,13 +14244,9 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_CreateCollator) {
Handle<ObjectTemplateInfo> collator_template = I18N::GetTemplate(isolate);
// Create an empty object wrapper.
- bool has_pending_exception = false;
- Handle<JSObject> local_object = Execution::InstantiateObject(
- collator_template, &has_pending_exception);
- if (has_pending_exception) {
- ASSERT(isolate->has_pending_exception());
- return Failure::Exception();
- }
+ Handle<JSObject> local_object;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, local_object, Execution::InstantiateObject(collator_template));
// Set collator as internal field of the resulting JS object.
icu::Collator* collator = Collator::InitializeCollator(
@@ -14040,22 +14256,22 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_CreateCollator) {
local_object->SetInternalField(0, reinterpret_cast<Smi*>(collator));
- RETURN_IF_EMPTY_HANDLE(isolate,
- JSObject::SetLocalPropertyIgnoreAttributes(
+ RETURN_FAILURE_ON_EXCEPTION(isolate,
+ JSObject::SetOwnPropertyIgnoreAttributes(
local_object,
- isolate->factory()->NewStringFromAscii(CStrVector("collator")),
- isolate->factory()->NewStringFromAscii(CStrVector("valid")),
+ isolate->factory()->NewStringFromStaticAscii("collator"),
+ isolate->factory()->NewStringFromStaticAscii("valid"),
NONE));
Handle<Object> wrapper = isolate->global_handles()->Create(*local_object);
- GlobalHandles::MakeWeak(reinterpret_cast<Object**>(wrapper.location()),
- NULL,
+ GlobalHandles::MakeWeak(wrapper.location(),
+ reinterpret_cast<void*>(wrapper.location()),
Collator::DeleteCollator);
return *local_object;
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_InternalCompare) {
+RUNTIME_FUNCTION(Runtime_InternalCompare) {
HandleScope scope(isolate);
ASSERT(args.length() == 3);
@@ -14083,7 +14299,42 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_InternalCompare) {
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_CreateBreakIterator) {
+RUNTIME_FUNCTION(Runtime_StringNormalize) {
+ HandleScope scope(isolate);
+ static const UNormalizationMode normalizationForms[] =
+ { UNORM_NFC, UNORM_NFD, UNORM_NFKC, UNORM_NFKD };
+
+ ASSERT(args.length() == 2);
+
+ CONVERT_ARG_HANDLE_CHECKED(String, stringValue, 0);
+ CONVERT_NUMBER_CHECKED(int, form_id, Int32, args[1]);
+ RUNTIME_ASSERT(form_id >= 0 &&
+ static_cast<size_t>(form_id) < ARRAY_SIZE(normalizationForms));
+
+ v8::String::Value string_value(v8::Utils::ToLocal(stringValue));
+ const UChar* u_value = reinterpret_cast<const UChar*>(*string_value);
+
+ // TODO(mnita): check Normalizer2 (not available in ICU 46)
+ UErrorCode status = U_ZERO_ERROR;
+ icu::UnicodeString result;
+ icu::Normalizer::normalize(u_value, normalizationForms[form_id], 0,
+ result, status);
+ if (U_FAILURE(status)) {
+ return isolate->heap()->undefined_value();
+ }
+
+ Handle<String> result_str;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, result_str,
+ isolate->factory()->NewStringFromTwoByte(
+ Vector<const uint16_t>(
+ reinterpret_cast<const uint16_t*>(result.getBuffer()),
+ result.length())));
+ return *result_str;
+}
+
+
+RUNTIME_FUNCTION(Runtime_CreateBreakIterator) {
HandleScope scope(isolate);
ASSERT(args.length() == 3);
@@ -14096,13 +14347,10 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_CreateBreakIterator) {
I18N::GetTemplate2(isolate);
// Create an empty object wrapper.
- bool has_pending_exception = false;
- Handle<JSObject> local_object = Execution::InstantiateObject(
- break_iterator_template, &has_pending_exception);
- if (has_pending_exception) {
- ASSERT(isolate->has_pending_exception());
- return Failure::Exception();
- }
+ Handle<JSObject> local_object;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, local_object,
+ Execution::InstantiateObject(break_iterator_template));
// Set break iterator as internal field of the resulting JS object.
icu::BreakIterator* break_iterator = BreakIterator::InitializeBreakIterator(
@@ -14114,24 +14362,24 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_CreateBreakIterator) {
// Make sure that the pointer to adopted text is NULL.
local_object->SetInternalField(1, reinterpret_cast<Smi*>(NULL));
- RETURN_IF_EMPTY_HANDLE(isolate,
- JSObject::SetLocalPropertyIgnoreAttributes(
+ RETURN_FAILURE_ON_EXCEPTION(isolate,
+ JSObject::SetOwnPropertyIgnoreAttributes(
local_object,
- isolate->factory()->NewStringFromAscii(CStrVector("breakIterator")),
- isolate->factory()->NewStringFromAscii(CStrVector("valid")),
+ isolate->factory()->NewStringFromStaticAscii("breakIterator"),
+ isolate->factory()->NewStringFromStaticAscii("valid"),
NONE));
// Make object handle weak so we can delete the break iterator once GC kicks
// in.
Handle<Object> wrapper = isolate->global_handles()->Create(*local_object);
- GlobalHandles::MakeWeak(reinterpret_cast<Object**>(wrapper.location()),
- NULL,
+ GlobalHandles::MakeWeak(wrapper.location(),
+ reinterpret_cast<void*>(wrapper.location()),
BreakIterator::DeleteBreakIterator);
return *local_object;
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_BreakIteratorAdoptText) {
+RUNTIME_FUNCTION(Runtime_BreakIteratorAdoptText) {
HandleScope scope(isolate);
ASSERT(args.length() == 2);
@@ -14158,7 +14406,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_BreakIteratorAdoptText) {
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_BreakIteratorFirst) {
+RUNTIME_FUNCTION(Runtime_BreakIteratorFirst) {
HandleScope scope(isolate);
ASSERT(args.length() == 1);
@@ -14173,7 +14421,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_BreakIteratorFirst) {
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_BreakIteratorNext) {
+RUNTIME_FUNCTION(Runtime_BreakIteratorNext) {
HandleScope scope(isolate);
ASSERT(args.length() == 1);
@@ -14188,7 +14436,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_BreakIteratorNext) {
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_BreakIteratorCurrent) {
+RUNTIME_FUNCTION(Runtime_BreakIteratorCurrent) {
HandleScope scope(isolate);
ASSERT(args.length() == 1);
@@ -14203,7 +14451,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_BreakIteratorCurrent) {
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_BreakIteratorBreakType) {
+RUNTIME_FUNCTION(Runtime_BreakIteratorBreakType) {
HandleScope scope(isolate);
ASSERT(args.length() == 1);
@@ -14220,17 +14468,17 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_BreakIteratorBreakType) {
int32_t status = rule_based_iterator->getRuleStatus();
// Keep return values in sync with JavaScript BreakType enum.
if (status >= UBRK_WORD_NONE && status < UBRK_WORD_NONE_LIMIT) {
- return *isolate->factory()->NewStringFromAscii(CStrVector("none"));
+ return *isolate->factory()->NewStringFromStaticAscii("none");
} else if (status >= UBRK_WORD_NUMBER && status < UBRK_WORD_NUMBER_LIMIT) {
- return *isolate->factory()->NewStringFromAscii(CStrVector("number"));
+ return *isolate->factory()->NewStringFromStaticAscii("number");
} else if (status >= UBRK_WORD_LETTER && status < UBRK_WORD_LETTER_LIMIT) {
- return *isolate->factory()->NewStringFromAscii(CStrVector("letter"));
+ return *isolate->factory()->NewStringFromStaticAscii("letter");
} else if (status >= UBRK_WORD_KANA && status < UBRK_WORD_KANA_LIMIT) {
- return *isolate->factory()->NewStringFromAscii(CStrVector("kana"));
+ return *isolate->factory()->NewStringFromStaticAscii("kana");
} else if (status >= UBRK_WORD_IDEO && status < UBRK_WORD_IDEO_LIMIT) {
- return *isolate->factory()->NewStringFromAscii(CStrVector("ideo"));
+ return *isolate->factory()->NewStringFromStaticAscii("ideo");
} else {
- return *isolate->factory()->NewStringFromAscii(CStrVector("unknown"));
+ return *isolate->factory()->NewStringFromStaticAscii("unknown");
}
}
#endif // V8_I18N_SUPPORT
@@ -14249,8 +14497,6 @@ static Handle<Object> Runtime_GetScriptFromScriptName(
Handle<Script> script;
Factory* factory = script_name->GetIsolate()->factory();
Heap* heap = script_name->GetHeap();
- heap->EnsureHeapIsIterable();
- DisallowHeapAllocation no_allocation_during_heap_iteration;
HeapIterator iterator(heap);
HeapObject* obj = NULL;
while (script.is_null() && ((obj = iterator.next()) != NULL)) {
@@ -14268,14 +14514,14 @@ static Handle<Object> Runtime_GetScriptFromScriptName(
if (script.is_null()) return factory->undefined_value();
// Return the script found.
- return GetScriptWrapper(script);
+ return Script::GetWrapper(script);
}
// Get the script object from script data. NOTE: Regarding performance
// see the NOTE for GetScriptFromScriptData.
// args[0]: script data for the script to find the source for
-RUNTIME_FUNCTION(MaybeObject*, Runtime_GetScript) {
+RUNTIME_FUNCTION(Runtime_GetScript) {
HandleScope scope(isolate);
ASSERT(args.length() == 1);
@@ -14292,11 +14538,11 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_GetScript) {
// Collect the raw data for a stack trace. Returns an array of 4
// element segments each containing a receiver, function, code and
// native code offset.
-RUNTIME_FUNCTION(MaybeObject*, Runtime_CollectStackTrace) {
+RUNTIME_FUNCTION(Runtime_CollectStackTrace) {
HandleScope scope(isolate);
- ASSERT_EQ(args.length(), 3);
+ ASSERT(args.length() == 3);
CONVERT_ARG_HANDLE_CHECKED(JSObject, error_object, 0);
- Handle<Object> caller = args.at<Object>(1);
+ CONVERT_ARG_HANDLE_CHECKED(Object, caller, 1);
CONVERT_NUMBER_CHECKED(int32_t, limit, Int32, args[2]);
// Optionally capture a more detailed stack trace for the message.
@@ -14308,12 +14554,12 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_CollectStackTrace) {
// Retrieve the stack trace. This is the raw stack trace that yet has to
// be formatted. Since we only need this once, clear it afterwards.
-RUNTIME_FUNCTION(MaybeObject*, Runtime_GetAndClearOverflowedStackTrace) {
+RUNTIME_FUNCTION(Runtime_GetAndClearOverflowedStackTrace) {
HandleScope scope(isolate);
- ASSERT_EQ(args.length(), 1);
+ ASSERT(args.length() == 1);
CONVERT_ARG_HANDLE_CHECKED(JSObject, error_object, 0);
Handle<String> key = isolate->factory()->hidden_stack_trace_string();
- Handle<Object> result(error_object->GetHiddenProperty(*key), isolate);
+ Handle<Object> result(error_object->GetHiddenProperty(key), isolate);
if (result->IsTheHole()) return isolate->heap()->undefined_value();
RUNTIME_ASSERT(result->IsJSArray() || result->IsUndefined());
JSObject::DeleteHiddenProperty(error_object, key);
@@ -14322,22 +14568,23 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_GetAndClearOverflowedStackTrace) {
// Returns V8 version as a string.
-RUNTIME_FUNCTION(MaybeObject*, Runtime_GetV8Version) {
- SealHandleScope shs(isolate);
- ASSERT_EQ(args.length(), 0);
+RUNTIME_FUNCTION(Runtime_GetV8Version) {
+ HandleScope scope(isolate);
+ ASSERT(args.length() == 0);
const char* version_string = v8::V8::GetVersion();
- return isolate->heap()->AllocateStringFromOneByte(CStrVector(version_string),
- NOT_TENURED);
+ return *isolate->factory()->NewStringFromAsciiChecked(version_string);
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_Abort) {
+RUNTIME_FUNCTION(Runtime_Abort) {
SealHandleScope shs(isolate);
- ASSERT(args.length() == 2);
- OS::PrintError("abort: %s\n",
- reinterpret_cast<char*>(args[0]) + args.smi_at(1));
+ ASSERT(args.length() == 1);
+ CONVERT_SMI_ARG_CHECKED(message_id, 0);
+ const char* message = GetBailoutReason(
+ static_cast<BailoutReason>(message_id));
+ OS::PrintError("abort: %s\n", message);
isolate->PrintStack(stderr);
OS::Abort();
UNREACHABLE();
@@ -14345,11 +14592,11 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_Abort) {
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_AbortJS) {
+RUNTIME_FUNCTION(Runtime_AbortJS) {
HandleScope scope(isolate);
ASSERT(args.length() == 1);
CONVERT_ARG_HANDLE_CHECKED(String, message, 0);
- OS::PrintError("abort: %s\n", *message->ToCString());
+ OS::PrintError("abort: %s\n", message->ToCString().get());
isolate->PrintStack(stderr);
OS::Abort();
UNREACHABLE();
@@ -14357,16 +14604,15 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_AbortJS) {
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_FlattenString) {
+RUNTIME_FUNCTION(Runtime_FlattenString) {
HandleScope scope(isolate);
ASSERT(args.length() == 1);
CONVERT_ARG_HANDLE_CHECKED(String, str, 0);
- FlattenString(str);
- return isolate->heap()->undefined_value();
+ return *String::Flatten(str);
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_NotifyContextDisposed) {
+RUNTIME_FUNCTION(Runtime_NotifyContextDisposed) {
HandleScope scope(isolate);
ASSERT(args.length() == 0);
isolate->heap()->NotifyContextDisposed();
@@ -14374,49 +14620,78 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_NotifyContextDisposed) {
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_MigrateInstance) {
+RUNTIME_FUNCTION(Runtime_LoadMutableDouble) {
+ HandleScope scope(isolate);
+ ASSERT(args.length() == 2);
+ CONVERT_ARG_HANDLE_CHECKED(JSObject, object, 0);
+ CONVERT_ARG_HANDLE_CHECKED(Smi, index, 1);
+ RUNTIME_ASSERT((index->value() & 1) == 1);
+ FieldIndex field_index =
+ FieldIndex::ForLoadByFieldIndex(object->map(), index->value());
+ if (field_index.is_inobject()) {
+ RUNTIME_ASSERT(field_index.property_index() <
+ object->map()->inobject_properties());
+ } else {
+ RUNTIME_ASSERT(field_index.outobject_array_index() <
+ object->properties()->length());
+ }
+ Handle<Object> raw_value(object->RawFastPropertyAt(field_index), isolate);
+ RUNTIME_ASSERT(raw_value->IsNumber() || raw_value->IsUninitialized());
+ return *Object::NewStorageFor(isolate, raw_value, Representation::Double());
+}
+
+
+RUNTIME_FUNCTION(Runtime_TryMigrateInstance) {
HandleScope scope(isolate);
ASSERT(args.length() == 1);
CONVERT_ARG_HANDLE_CHECKED(Object, object, 0);
if (!object->IsJSObject()) return Smi::FromInt(0);
Handle<JSObject> js_object = Handle<JSObject>::cast(object);
if (!js_object->map()->is_deprecated()) return Smi::FromInt(0);
- JSObject::MigrateInstance(js_object);
+ // This call must not cause lazy deopts, because it's called from deferred
+ // code where we can't handle lazy deopts for lack of a suitable bailout
+ // ID. So we just try migration and signal failure if necessary,
+ // which will also trigger a deopt.
+ if (!JSObject::TryMigrateInstance(js_object)) return Smi::FromInt(0);
return *object;
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_GetFromCache) {
+RUNTIME_FUNCTION(RuntimeHidden_GetFromCache) {
SealHandleScope shs(isolate);
// This is only called from codegen, so checks might be more lax.
CONVERT_ARG_CHECKED(JSFunctionResultCache, cache, 0);
- Object* key = args[1];
+ CONVERT_ARG_CHECKED(Object, key, 1);
- int finger_index = cache->finger_index();
- Object* o = cache->get(finger_index);
- if (o == key) {
- // The fastest case: hit the same place again.
- return cache->get(finger_index + 1);
- }
+ {
+ DisallowHeapAllocation no_alloc;
- for (int i = finger_index - 2;
- i >= JSFunctionResultCache::kEntriesIndex;
- i -= 2) {
- o = cache->get(i);
+ int finger_index = cache->finger_index();
+ Object* o = cache->get(finger_index);
if (o == key) {
- cache->set_finger_index(i);
- return cache->get(i + 1);
+ // The fastest case: hit the same place again.
+ return cache->get(finger_index + 1);
+ }
+
+ for (int i = finger_index - 2;
+ i >= JSFunctionResultCache::kEntriesIndex;
+ i -= 2) {
+ o = cache->get(i);
+ if (o == key) {
+ cache->set_finger_index(i);
+ return cache->get(i + 1);
+ }
}
- }
- int size = cache->size();
- ASSERT(size <= cache->length());
+ int size = cache->size();
+ ASSERT(size <= cache->length());
- for (int i = size - 2; i > finger_index; i -= 2) {
- o = cache->get(i);
- if (o == key) {
- cache->set_finger_index(i);
- return cache->get(i + 1);
+ for (int i = size - 2; i > finger_index; i -= 2) {
+ o = cache->get(i);
+ if (o == key) {
+ cache->set_finger_index(i);
+ return cache->get(i + 1);
+ }
}
}
@@ -14434,14 +14709,9 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_GetFromCache) {
isolate);
// This handle is nor shared, nor used later, so it's safe.
Handle<Object> argv[] = { key_handle };
- bool pending_exception;
- value = Execution::Call(isolate,
- factory,
- receiver,
- ARRAY_SIZE(argv),
- argv,
- &pending_exception);
- if (pending_exception) return Failure::Exception();
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, value,
+ Execution::Call(isolate, factory, receiver, ARRAY_SIZE(argv), argv));
}
#ifdef VERIFY_HEAP
@@ -14451,8 +14721,8 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_GetFromCache) {
#endif
// Function invocation may have cleared the cache. Reread all the data.
- finger_index = cache_handle->finger_index();
- size = cache_handle->size();
+ int finger_index = cache_handle->finger_index();
+ int size = cache_handle->size();
// If we have spare room, put new data into it, otherwise evict post finger
// entry which is likely to be the least recently used.
@@ -14485,15 +14755,17 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_GetFromCache) {
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_MessageGetStartPosition) {
+RUNTIME_FUNCTION(Runtime_MessageGetStartPosition) {
SealHandleScope shs(isolate);
+ ASSERT(args.length() == 1);
CONVERT_ARG_CHECKED(JSMessageObject, message, 0);
return Smi::FromInt(message->start_position());
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_MessageGetScript) {
+RUNTIME_FUNCTION(Runtime_MessageGetScript) {
SealHandleScope shs(isolate);
+ ASSERT(args.length() == 1);
CONVERT_ARG_CHECKED(JSMessageObject, message, 0);
return message->script();
}
@@ -14502,14 +14774,15 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_MessageGetScript) {
#ifdef DEBUG
// ListNatives is ONLY used by the fuzz-natives.js in debug mode
// Exclude the code in release mode.
-RUNTIME_FUNCTION(MaybeObject*, Runtime_ListNatives) {
+RUNTIME_FUNCTION(Runtime_ListNatives) {
HandleScope scope(isolate);
ASSERT(args.length() == 0);
#define COUNT_ENTRY(Name, argc, ressize) + 1
int entry_count = 0
RUNTIME_FUNCTION_LIST(COUNT_ENTRY)
+ RUNTIME_HIDDEN_FUNCTION_LIST(COUNT_ENTRY)
INLINE_FUNCTION_LIST(COUNT_ENTRY)
- INLINE_RUNTIME_FUNCTION_LIST(COUNT_ENTRY);
+ INLINE_OPTIMIZED_FUNCTION_LIST(COUNT_ENTRY);
#undef COUNT_ENTRY
Factory* factory = isolate->factory();
Handle<FixedArray> elements = factory->NewFixedArray(entry_count);
@@ -14521,11 +14794,9 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_ListNatives) {
Handle<String> name; \
/* Inline runtime functions have an underscore in front of the name. */ \
if (inline_runtime_functions) { \
- name = factory->NewStringFromAscii( \
- Vector<const char>("_" #Name, StrLength("_" #Name))); \
+ name = factory->NewStringFromStaticAscii("_" #Name); \
} else { \
- name = factory->NewStringFromAscii( \
- Vector<const char>(#Name, StrLength(#Name))); \
+ name = factory->NewStringFromStaticAscii(#Name); \
} \
Handle<FixedArray> pair_elements = factory->NewFixedArray(2); \
pair_elements->set(0, *name); \
@@ -14535,9 +14806,11 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_ListNatives) {
}
inline_runtime_functions = false;
RUNTIME_FUNCTION_LIST(ADD_ENTRY)
+ INLINE_OPTIMIZED_FUNCTION_LIST(ADD_ENTRY)
+ // Calling hidden runtime functions should just throw.
+ RUNTIME_HIDDEN_FUNCTION_LIST(ADD_ENTRY)
inline_runtime_functions = true;
INLINE_FUNCTION_LIST(ADD_ENTRY)
- INLINE_RUNTIME_FUNCTION_LIST(ADD_ENTRY)
#undef ADD_ENTRY
ASSERT_EQ(index, entry_count);
Handle<JSArray> result = factory->NewJSArrayWithElements(elements);
@@ -14546,29 +14819,15 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_ListNatives) {
#endif
-RUNTIME_FUNCTION(MaybeObject*, Runtime_Log) {
- SealHandleScope shs(isolate);
- ASSERT(args.length() == 2);
- CONVERT_ARG_CHECKED(String, format, 0);
- CONVERT_ARG_CHECKED(JSArray, elms, 1);
- DisallowHeapAllocation no_gc;
- String::FlatContent format_content = format->GetFlatContent();
- RUNTIME_ASSERT(format_content.IsAscii());
- Vector<const uint8_t> chars = format_content.ToOneByteVector();
- isolate->logger()->LogRuntime(Vector<const char>::cast(chars), elms);
- return isolate->heap()->undefined_value();
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_IS_VAR) {
+RUNTIME_FUNCTION(Runtime_IS_VAR) {
UNREACHABLE(); // implemented as macro in the parser
return NULL;
}
#define ELEMENTS_KIND_CHECK_RUNTIME_FUNCTION(Name) \
- RUNTIME_FUNCTION(MaybeObject*, Runtime_Has##Name) { \
- CONVERT_ARG_CHECKED(JSObject, obj, 0); \
+ RUNTIME_FUNCTION(Runtime_Has##Name) { \
+ CONVERT_ARG_CHECKED(JSObject, obj, 0); \
return isolate->heap()->ToBoolean(obj->Has##Name()); \
}
@@ -14578,24 +14837,37 @@ ELEMENTS_KIND_CHECK_RUNTIME_FUNCTION(FastSmiOrObjectElements)
ELEMENTS_KIND_CHECK_RUNTIME_FUNCTION(FastDoubleElements)
ELEMENTS_KIND_CHECK_RUNTIME_FUNCTION(FastHoleyElements)
ELEMENTS_KIND_CHECK_RUNTIME_FUNCTION(DictionaryElements)
-ELEMENTS_KIND_CHECK_RUNTIME_FUNCTION(NonStrictArgumentsElements)
-ELEMENTS_KIND_CHECK_RUNTIME_FUNCTION(ExternalPixelElements)
+ELEMENTS_KIND_CHECK_RUNTIME_FUNCTION(SloppyArgumentsElements)
ELEMENTS_KIND_CHECK_RUNTIME_FUNCTION(ExternalArrayElements)
-ELEMENTS_KIND_CHECK_RUNTIME_FUNCTION(ExternalByteElements)
-ELEMENTS_KIND_CHECK_RUNTIME_FUNCTION(ExternalUnsignedByteElements)
-ELEMENTS_KIND_CHECK_RUNTIME_FUNCTION(ExternalShortElements)
-ELEMENTS_KIND_CHECK_RUNTIME_FUNCTION(ExternalUnsignedShortElements)
-ELEMENTS_KIND_CHECK_RUNTIME_FUNCTION(ExternalIntElements)
-ELEMENTS_KIND_CHECK_RUNTIME_FUNCTION(ExternalUnsignedIntElements)
-ELEMENTS_KIND_CHECK_RUNTIME_FUNCTION(ExternalFloatElements)
-ELEMENTS_KIND_CHECK_RUNTIME_FUNCTION(ExternalDoubleElements)
// Properties test sitting with elements tests - not fooling anyone.
ELEMENTS_KIND_CHECK_RUNTIME_FUNCTION(FastProperties)
#undef ELEMENTS_KIND_CHECK_RUNTIME_FUNCTION
-RUNTIME_FUNCTION(MaybeObject*, Runtime_HaveSameMap) {
+#define TYPED_ARRAYS_CHECK_RUNTIME_FUNCTION(Type, type, TYPE, ctype, size) \
+ RUNTIME_FUNCTION(Runtime_HasExternal##Type##Elements) { \
+ CONVERT_ARG_CHECKED(JSObject, obj, 0); \
+ return isolate->heap()->ToBoolean(obj->HasExternal##Type##Elements()); \
+ }
+
+TYPED_ARRAYS(TYPED_ARRAYS_CHECK_RUNTIME_FUNCTION)
+
+#undef TYPED_ARRAYS_CHECK_RUNTIME_FUNCTION
+
+
+#define FIXED_TYPED_ARRAYS_CHECK_RUNTIME_FUNCTION(Type, type, TYPE, ctype, s) \
+ RUNTIME_FUNCTION(Runtime_HasFixed##Type##Elements) { \
+ CONVERT_ARG_CHECKED(JSObject, obj, 0); \
+ return isolate->heap()->ToBoolean(obj->HasFixed##Type##Elements()); \
+ }
+
+TYPED_ARRAYS(FIXED_TYPED_ARRAYS_CHECK_RUNTIME_FUNCTION)
+
+#undef FIXED_TYPED_ARRAYS_CHECK_RUNTIME_FUNCTION
+
+
+RUNTIME_FUNCTION(Runtime_HaveSameMap) {
SealHandleScope shs(isolate);
ASSERT(args.length() == 2);
CONVERT_ARG_CHECKED(JSObject, obj1, 0);
@@ -14604,69 +14876,64 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_HaveSameMap) {
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_IsAccessCheckNeeded) {
+RUNTIME_FUNCTION(Runtime_IsJSGlobalProxy) {
SealHandleScope shs(isolate);
ASSERT(args.length() == 1);
- CONVERT_ARG_CHECKED(HeapObject, obj, 0);
- return isolate->heap()->ToBoolean(obj->IsAccessCheckNeeded());
+ CONVERT_ARG_CHECKED(Object, obj, 0);
+ return isolate->heap()->ToBoolean(obj->IsJSGlobalProxy());
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_IsObserved) {
+RUNTIME_FUNCTION(Runtime_IsObserved) {
SealHandleScope shs(isolate);
ASSERT(args.length() == 1);
if (!args[0]->IsJSReceiver()) return isolate->heap()->false_value();
- JSReceiver* obj = JSReceiver::cast(args[0]);
- if (obj->IsJSGlobalProxy()) {
- Object* proto = obj->GetPrototype();
- if (proto->IsNull()) return isolate->heap()->false_value();
- ASSERT(proto->IsJSGlobalObject());
- obj = JSReceiver::cast(proto);
- }
+ CONVERT_ARG_CHECKED(JSReceiver, obj, 0);
+ ASSERT(!obj->IsJSGlobalProxy() || !obj->map()->is_observed());
return isolate->heap()->ToBoolean(obj->map()->is_observed());
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_SetIsObserved) {
+RUNTIME_FUNCTION(Runtime_SetIsObserved) {
HandleScope scope(isolate);
ASSERT(args.length() == 1);
CONVERT_ARG_HANDLE_CHECKED(JSReceiver, obj, 0);
- if (obj->IsJSGlobalProxy()) {
- Object* proto = obj->GetPrototype();
- if (proto->IsNull()) return isolate->heap()->undefined_value();
- ASSERT(proto->IsJSGlobalObject());
- obj = handle(JSReceiver::cast(proto));
- }
- if (obj->IsJSProxy())
- return isolate->heap()->undefined_value();
+ RUNTIME_ASSERT(!obj->IsJSGlobalProxy());
+ if (obj->IsJSProxy()) return isolate->heap()->undefined_value();
+ RUNTIME_ASSERT(!obj->map()->is_observed());
- ASSERT(!(obj->map()->is_observed() && obj->IsJSObject() &&
- Handle<JSObject>::cast(obj)->HasFastElements()));
ASSERT(obj->IsJSObject());
JSObject::SetObserved(Handle<JSObject>::cast(obj));
return isolate->heap()->undefined_value();
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_SetMicrotaskPending) {
- SealHandleScope shs(isolate);
+RUNTIME_FUNCTION(Runtime_EnqueueMicrotask) {
+ HandleScope scope(isolate);
ASSERT(args.length() == 1);
- CONVERT_BOOLEAN_ARG_CHECKED(new_state, 0);
- bool old_state = isolate->microtask_pending();
- isolate->set_microtask_pending(new_state);
- return isolate->heap()->ToBoolean(old_state);
+ CONVERT_ARG_HANDLE_CHECKED(JSFunction, microtask, 0);
+ isolate->EnqueueMicrotask(microtask);
+ return isolate->heap()->undefined_value();
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_GetObservationState) {
+RUNTIME_FUNCTION(Runtime_RunMicrotasks) {
+ HandleScope scope(isolate);
+ ASSERT(args.length() == 0);
+ isolate->RunMicrotasks();
+ return isolate->heap()->undefined_value();
+}
+
+
+RUNTIME_FUNCTION(Runtime_GetObservationState) {
SealHandleScope shs(isolate);
ASSERT(args.length() == 0);
return isolate->heap()->observation_state();
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_ObservationWeakMapCreate) {
+RUNTIME_FUNCTION(Runtime_ObservationWeakMapCreate) {
HandleScope scope(isolate);
ASSERT(args.length() == 0);
// TODO(adamk): Currently this runtime function is only called three times per
@@ -14676,60 +14943,87 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_ObservationWeakMapCreate) {
isolate->factory()->NewMap(JS_WEAK_MAP_TYPE, JSWeakMap::kSize);
Handle<JSWeakMap> weakmap =
Handle<JSWeakMap>::cast(isolate->factory()->NewJSObjectFromMap(map));
- return WeakCollectionInitialize(isolate, weakmap);
+ return *WeakCollectionInitialize(isolate, weakmap);
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_UnwrapGlobalProxy) {
- SealHandleScope shs(isolate);
- ASSERT(args.length() == 1);
- Object* object = args[0];
- if (object->IsJSGlobalProxy()) {
- object = object->GetPrototype(isolate);
- if (object->IsNull()) return isolate->heap()->undefined_value();
- }
- return object;
+static bool ContextsHaveSameOrigin(Handle<Context> context1,
+ Handle<Context> context2) {
+ return context1->security_token() == context2->security_token();
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_IsAccessAllowedForObserver) {
+RUNTIME_FUNCTION(Runtime_ObserverObjectAndRecordHaveSameOrigin) {
HandleScope scope(isolate);
ASSERT(args.length() == 3);
CONVERT_ARG_HANDLE_CHECKED(JSFunction, observer, 0);
CONVERT_ARG_HANDLE_CHECKED(JSObject, object, 1);
- ASSERT(object->IsAccessCheckNeeded());
- Handle<Object> key = args.at<Object>(2);
- SaveContext save(isolate);
- isolate->set_context(observer->context());
- if (!isolate->MayNamedAccess(*object, isolate->heap()->undefined_value(),
- v8::ACCESS_KEYS)) {
- return isolate->heap()->false_value();
- }
- bool access_allowed = false;
- uint32_t index = 0;
- if (key->ToArrayIndex(&index) ||
- (key->IsString() && String::cast(*key)->AsArrayIndex(&index))) {
- access_allowed =
- isolate->MayIndexedAccess(*object, index, v8::ACCESS_GET) &&
- isolate->MayIndexedAccess(*object, index, v8::ACCESS_HAS);
- } else {
- access_allowed = isolate->MayNamedAccess(*object, *key, v8::ACCESS_GET) &&
- isolate->MayNamedAccess(*object, *key, v8::ACCESS_HAS);
- }
- return isolate->heap()->ToBoolean(access_allowed);
+ CONVERT_ARG_HANDLE_CHECKED(JSObject, record, 2);
+
+ Handle<Context> observer_context(observer->context()->native_context(),
+ isolate);
+ Handle<Context> object_context(object->GetCreationContext());
+ Handle<Context> record_context(record->GetCreationContext());
+
+ return isolate->heap()->ToBoolean(
+ ContextsHaveSameOrigin(object_context, observer_context) &&
+ ContextsHaveSameOrigin(object_context, record_context));
+}
+
+
+RUNTIME_FUNCTION(Runtime_ObjectWasCreatedInCurrentOrigin) {
+ HandleScope scope(isolate);
+ ASSERT(args.length() == 1);
+ CONVERT_ARG_HANDLE_CHECKED(JSObject, object, 0);
+
+ Handle<Context> creation_context(object->GetCreationContext(), isolate);
+ return isolate->heap()->ToBoolean(
+ ContextsHaveSameOrigin(creation_context, isolate->native_context()));
}
-static MaybeObject* ArrayConstructorCommon(Isolate* isolate,
+RUNTIME_FUNCTION(Runtime_GetObjectContextObjectObserve) {
+ HandleScope scope(isolate);
+ ASSERT(args.length() == 1);
+ CONVERT_ARG_HANDLE_CHECKED(JSObject, object, 0);
+
+ Handle<Context> context(object->GetCreationContext(), isolate);
+ return context->native_object_observe();
+}
+
+
+RUNTIME_FUNCTION(Runtime_GetObjectContextObjectGetNotifier) {
+ HandleScope scope(isolate);
+ ASSERT(args.length() == 1);
+ CONVERT_ARG_HANDLE_CHECKED(JSObject, object, 0);
+
+ Handle<Context> context(object->GetCreationContext(), isolate);
+ return context->native_object_get_notifier();
+}
+
+
+RUNTIME_FUNCTION(Runtime_GetObjectContextNotifierPerformChange) {
+ HandleScope scope(isolate);
+ ASSERT(args.length() == 1);
+ CONVERT_ARG_HANDLE_CHECKED(JSObject, object_info, 0);
+
+ Handle<Context> context(object_info->GetCreationContext(), isolate);
+ return context->native_object_notifier_perform_change();
+}
+
+
+static Object* ArrayConstructorCommon(Isolate* isolate,
Handle<JSFunction> constructor,
Handle<AllocationSite> site,
Arguments* caller_args) {
+ Factory* factory = isolate->factory();
+
bool holey = false;
bool can_use_type_feedback = true;
if (caller_args->length() == 1) {
- Object* argument_one = (*caller_args)[0];
+ Handle<Object> argument_one = caller_args->at<Object>(0);
if (argument_one->IsSmi()) {
- int value = Smi::cast(argument_one)->value();
+ int value = Handle<Smi>::cast(argument_one)->value();
if (value < 0 || value >= JSObject::kInitialMaxFastElementArray) {
// the array is a dictionary in this case.
can_use_type_feedback = false;
@@ -14742,8 +15036,7 @@ static MaybeObject* ArrayConstructorCommon(Isolate* isolate,
}
}
- JSArray* array;
- MaybeObject* maybe_array;
+ Handle<JSArray> array;
if (!site.is_null() && can_use_type_feedback) {
ElementsKind to_kind = site->GetElementsKind();
if (holey && !IsFastHoleyElementsKind(to_kind)) {
@@ -14752,27 +15045,39 @@ static MaybeObject* ArrayConstructorCommon(Isolate* isolate,
site->SetElementsKind(to_kind);
}
- maybe_array = isolate->heap()->AllocateJSObjectWithAllocationSite(
- *constructor, site);
- if (!maybe_array->To(&array)) return maybe_array;
+ // We should allocate with an initial map that reflects the allocation site
+ // advice. Therefore we use AllocateJSObjectFromMap instead of passing
+ // the constructor.
+ Handle<Map> initial_map(constructor->initial_map(), isolate);
+ if (to_kind != initial_map->elements_kind()) {
+ initial_map = Map::AsElementsKind(initial_map, to_kind);
+ }
+
+ // If we don't care to track arrays of to_kind ElementsKind, then
+ // don't emit a memento for them.
+ Handle<AllocationSite> allocation_site;
+ if (AllocationSite::GetMode(to_kind) == TRACK_ALLOCATION_SITE) {
+ allocation_site = site;
+ }
+
+ array = Handle<JSArray>::cast(factory->NewJSObjectFromMap(
+ initial_map, NOT_TENURED, true, allocation_site));
} else {
- maybe_array = isolate->heap()->AllocateJSObject(*constructor);
- if (!maybe_array->To(&array)) return maybe_array;
+ array = Handle<JSArray>::cast(factory->NewJSObject(constructor));
+
// We might need to transition to holey
ElementsKind kind = constructor->initial_map()->elements_kind();
if (holey && !IsFastHoleyElementsKind(kind)) {
kind = GetHoleyElementsKind(kind);
- maybe_array = array->TransitionElementsKind(kind);
- if (maybe_array->IsFailure()) return maybe_array;
+ JSObject::TransitionElementsKind(array, kind);
}
}
- maybe_array = isolate->heap()->AllocateJSArrayStorage(array, 0, 0,
- DONT_INITIALIZE_ARRAY_ELEMENTS);
- if (maybe_array->IsFailure()) return maybe_array;
+ factory->NewJSArrayStorage(array, 0, 0, DONT_INITIALIZE_ARRAY_ELEMENTS);
+
ElementsKind old_kind = array->GetElementsKind();
- maybe_array = ArrayConstructInitializeElements(array, caller_args);
- if (maybe_array->IsFailure()) return maybe_array;
+ RETURN_FAILURE_ON_EXCEPTION(
+ isolate, ArrayConstructInitializeElements(array, caller_args));
if (!site.is_null() &&
(old_kind != array->GetElementsKind() ||
!can_use_type_feedback)) {
@@ -14781,11 +15086,11 @@ static MaybeObject* ArrayConstructorCommon(Isolate* isolate,
// We must mark the allocationsite as un-inlinable.
site->SetDoNotInlineCall();
}
- return array;
+ return *array;
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_ArrayConstructor) {
+RUNTIME_FUNCTION(RuntimeHidden_ArrayConstructor) {
HandleScope scope(isolate);
// If we get 2 arguments then they are the stub parameters (constructor, type
// info). If we get 4, then the first one is a pointer to the arguments
@@ -14810,10 +15115,8 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_ArrayConstructor) {
Handle<AllocationSite> site;
if (!type_info.is_null() &&
- *type_info != isolate->heap()->undefined_value() &&
- Cell::cast(*type_info)->value()->IsAllocationSite()) {
- site = Handle<AllocationSite>(
- AllocationSite::cast(Cell::cast(*type_info)->value()), isolate);
+ *type_info != isolate->heap()->undefined_value()) {
+ site = Handle<AllocationSite>::cast(type_info);
ASSERT(!site->SitePointsToLiteral());
}
@@ -14824,7 +15127,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_ArrayConstructor) {
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_InternalArrayConstructor) {
+RUNTIME_FUNCTION(RuntimeHidden_InternalArrayConstructor) {
HandleScope scope(isolate);
Arguments empty_args(0, NULL);
bool no_caller_args = args.length() == 1;
@@ -14847,7 +15150,8 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_InternalArrayConstructor) {
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_MaxSmi) {
+RUNTIME_FUNCTION(Runtime_MaxSmi) {
+ ASSERT(args.length() == 0);
return Smi::FromInt(Smi::kMaxValue);
}
@@ -14860,46 +15164,56 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_MaxSmi) {
FUNCTION_ADDR(Runtime_##name), number_of_args, result_size },
+#define FH(name, number_of_args, result_size) \
+ { Runtime::kHidden##name, Runtime::RUNTIME_HIDDEN, NULL, \
+ FUNCTION_ADDR(RuntimeHidden_##name), number_of_args, result_size },
+
+
#define I(name, number_of_args, result_size) \
{ Runtime::kInline##name, Runtime::INLINE, \
"_" #name, NULL, number_of_args, result_size },
+
+#define IO(name, number_of_args, result_size) \
+ { Runtime::kInlineOptimized##name, Runtime::INLINE_OPTIMIZED, \
+ "_" #name, FUNCTION_ADDR(Runtime_##name), number_of_args, result_size },
+
+
static const Runtime::Function kIntrinsicFunctions[] = {
RUNTIME_FUNCTION_LIST(F)
+ INLINE_OPTIMIZED_FUNCTION_LIST(F)
+ RUNTIME_HIDDEN_FUNCTION_LIST(FH)
INLINE_FUNCTION_LIST(I)
- INLINE_RUNTIME_FUNCTION_LIST(I)
+ INLINE_OPTIMIZED_FUNCTION_LIST(IO)
};
+#undef IO
+#undef I
+#undef FH
+#undef F
+
-MaybeObject* Runtime::InitializeIntrinsicFunctionNames(Heap* heap,
- Object* dictionary) {
- ASSERT(dictionary != NULL);
- ASSERT(NameDictionary::cast(dictionary)->NumberOfElements() == 0);
+void Runtime::InitializeIntrinsicFunctionNames(Isolate* isolate,
+ Handle<NameDictionary> dict) {
+ ASSERT(dict->NumberOfElements() == 0);
+ HandleScope scope(isolate);
for (int i = 0; i < kNumFunctions; ++i) {
- Object* name_string;
- { MaybeObject* maybe_name_string =
- heap->InternalizeUtf8String(kIntrinsicFunctions[i].name);
- if (!maybe_name_string->ToObject(&name_string)) return maybe_name_string;
- }
- NameDictionary* name_dictionary = NameDictionary::cast(dictionary);
- { MaybeObject* maybe_dictionary = name_dictionary->Add(
- String::cast(name_string),
- Smi::FromInt(i),
- PropertyDetails(NONE, NORMAL, Representation::None()));
- if (!maybe_dictionary->ToObject(&dictionary)) {
- // Non-recoverable failure. Calling code must restart heap
- // initialization.
- return maybe_dictionary;
- }
- }
+ const char* name = kIntrinsicFunctions[i].name;
+ if (name == NULL) continue;
+ Handle<NameDictionary> new_dict = NameDictionary::Add(
+ dict,
+ isolate->factory()->InternalizeUtf8String(name),
+ Handle<Smi>(Smi::FromInt(i), isolate),
+ PropertyDetails(NONE, NORMAL, Representation::None()));
+ // The dictionary does not need to grow.
+ CHECK(new_dict.is_identical_to(dict));
}
- return dictionary;
}
const Runtime::Function* Runtime::FunctionForName(Handle<String> name) {
Heap* heap = name->GetHeap();
- int entry = heap->intrinsic_function_names()->FindEntry(*name);
+ int entry = heap->intrinsic_function_names()->FindEntry(name);
if (entry != kNotFound) {
Object* smi_index = heap->intrinsic_function_names()->ValueAt(entry);
int function_index = Smi::cast(smi_index)->value();
@@ -14913,26 +15227,4 @@ const Runtime::Function* Runtime::FunctionForId(Runtime::FunctionId id) {
return &(kIntrinsicFunctions[static_cast<int>(id)]);
}
-
-void Runtime::PerformGC(Object* result, Isolate* isolate) {
- Failure* failure = Failure::cast(result);
- if (failure->IsRetryAfterGC()) {
- if (isolate->heap()->new_space()->AddFreshPage()) {
- return;
- }
-
- // Try to do a garbage collection; ignore it if it fails. The C
- // entry stub will throw an out-of-memory exception in that case.
- isolate->heap()->CollectGarbage(failure->allocation_space(),
- "Runtime::PerformGC");
- } else {
- // Handle last resort GC and make sure to allow future allocations
- // to grow the heap without causing GCs (if possible).
- isolate->counters()->gc_last_resort_from_js()->Increment();
- isolate->heap()->CollectAllGarbage(Heap::kNoGCFlags,
- "Runtime::PerformGC");
- }
-}
-
-
} } // namespace v8::internal
diff --git a/chromium/v8/src/runtime.h b/chromium/v8/src/runtime.h
index 6a0358399d1..d6ed83000c7 100644
--- a/chromium/v8/src/runtime.h
+++ b/chromium/v8/src/runtime.h
@@ -1,35 +1,12 @@
// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
#ifndef V8_RUNTIME_H_
#define V8_RUNTIME_H_
-#include "allocation.h"
-#include "zone.h"
+#include "src/allocation.h"
+#include "src/zone.h"
namespace v8 {
namespace internal {
@@ -49,23 +26,22 @@ namespace internal {
F(GetProperty, 2, 1) \
F(KeyedGetProperty, 2, 1) \
F(DeleteProperty, 3, 1) \
- F(HasLocalProperty, 2, 1) \
+ F(HasOwnProperty, 2, 1) \
F(HasProperty, 2, 1) \
F(HasElement, 2, 1) \
F(IsPropertyEnumerable, 2, 1) \
F(GetPropertyNames, 1, 1) \
F(GetPropertyNamesFast, 1, 1) \
- F(GetLocalPropertyNames, 2, 1) \
- F(GetLocalElementNames, 1, 1) \
+ F(GetOwnPropertyNames, 2, 1) \
+ F(GetOwnElementNames, 1, 1) \
F(GetInterceptorInfo, 1, 1) \
F(GetNamedInterceptorPropertyNames, 1, 1) \
F(GetIndexedInterceptorElementNames, 1, 1) \
F(GetArgumentsProperty, 1, 1) \
F(ToFastProperties, 1, 1) \
F(FinishArrayPrototypeSetup, 1, 1) \
- F(SpecialArrayFunctions, 1, 1) \
- F(IsCallable, 1, 1) \
- F(IsClassicModeFunction, 1, 1) \
+ F(SpecialArrayFunctions, 0, 1) \
+ F(IsSloppyModeFunction, 1, 1) \
F(GetDefaultReceiver, 1, 1) \
\
F(GetPrototype, 1, 1) \
@@ -84,14 +60,6 @@ namespace internal {
F(Apply, 5, 1) \
F(GetFunctionDelegate, 1, 1) \
F(GetConstructorDelegate, 1, 1) \
- F(NewArgumentsFast, 3, 1) \
- F(NewStrictArgumentsFast, 3, 1) \
- F(LazyCompile, 1, 1) \
- F(LazyRecompile, 1, 1) \
- F(ConcurrentRecompile, 1, 1) \
- F(TryInstallRecompiledCode, 1, 1) \
- F(NotifyDeoptimized, 1, 1) \
- F(NotifyStubFailure, 0, 1) \
F(DeoptimizeFunction, 1, 1) \
F(ClearFunctionTypeFeedback, 1, 1) \
F(RunningInSimulator, 0, 1) \
@@ -101,19 +69,18 @@ namespace internal {
F(GetOptimizationStatus, -1, 1) \
F(GetOptimizationCount, 1, 1) \
F(UnblockConcurrentRecompilation, 0, 1) \
- F(CompileForOnStackReplacement, 2, 1) \
- F(SetAllocationTimeout, 2, 1) \
- F(AllocateInNewSpace, 1, 1) \
- F(AllocateInTargetSpace, 2, 1) \
+ F(CompileForOnStackReplacement, 1, 1) \
+ F(SetAllocationTimeout, -1 /* 2 || 3 */, 1) \
F(SetNativeFlag, 1, 1) \
F(SetInlineBuiltinFlag, 1, 1) \
F(StoreArrayLiteralElement, 5, 1) \
- F(DebugCallbackSupportsStepping, 1, 1) \
F(DebugPrepareStepInIfStepping, 1, 1) \
+ F(DebugPromiseHandlePrologue, 1, 1) \
+ F(DebugPromiseHandleEpilogue, 0, 1) \
F(FlattenString, 1, 1) \
- F(MigrateInstance, 1, 1) \
+ F(LoadMutableDouble, 2, 1) \
+ F(TryMigrateInstance, 1, 1) \
F(NotifyContextDisposed, 0, 1) \
- F(MaxSmi, 0, 1) \
\
/* Array join support */ \
F(PushIfAbsent, 2, 1) \
@@ -133,15 +100,10 @@ namespace internal {
F(URIEscape, 1, 1) \
F(URIUnescape, 1, 1) \
\
- F(NumberToString, 1, 1) \
- F(NumberToStringSkipCache, 1, 1) \
F(NumberToInteger, 1, 1) \
- F(NumberToPositiveInteger, 1, 1) \
F(NumberToIntegerMapMinusZero, 1, 1) \
F(NumberToJSUint32, 1, 1) \
F(NumberToJSInt32, 1, 1) \
- F(NumberToSmi, 1, 1) \
- F(AllocateHeapNumber, 0, 1) \
\
/* Arithmetic operations */ \
F(NumberAdd, 2, 1) \
@@ -150,10 +112,8 @@ namespace internal {
F(NumberDiv, 2, 1) \
F(NumberMod, 2, 1) \
F(NumberUnaryMinus, 1, 1) \
- F(NumberAlloc, 0, 1) \
F(NumberImul, 2, 1) \
\
- F(StringAdd, 2, 1) \
F(StringBuilderConcat, 3, 1) \
F(StringBuilderJoin, 3, 1) \
F(SparseJoinWithSeparator, 3, 1) \
@@ -173,30 +133,21 @@ namespace internal {
\
F(NumberCompare, 3, 1) \
F(SmiLexicographicCompare, 2, 1) \
- F(StringCompare, 2, 1) \
\
/* Math */ \
- F(Math_acos, 1, 1) \
- F(Math_asin, 1, 1) \
- F(Math_atan, 1, 1) \
- F(Math_atan2, 2, 1) \
- F(Math_cos, 1, 1) \
- F(Math_exp, 1, 1) \
- F(Math_floor, 1, 1) \
- F(Math_log, 1, 1) \
- F(Math_pow, 2, 1) \
- F(Math_pow_cfunction, 2, 1) \
+ F(MathAcos, 1, 1) \
+ F(MathAsin, 1, 1) \
+ F(MathAtan, 1, 1) \
+ F(MathFloorRT, 1, 1) \
+ F(MathAtan2, 2, 1) \
+ F(MathExpRT, 1, 1) \
F(RoundNumber, 1, 1) \
- F(Math_sin, 1, 1) \
- F(Math_sqrt, 1, 1) \
- F(Math_tan, 1, 1) \
+ F(MathFround, 1, 1) \
\
/* Regular expressions */ \
F(RegExpCompile, 3, 1) \
- F(RegExpExec, 4, 1) \
F(RegExpExecMultiple, 4, 1) \
F(RegExpInitializeObject, 5, 1) \
- F(RegExpConstructResult, 3, 1) \
\
/* JSON */ \
F(ParseJson, 1, 1) \
@@ -204,11 +155,9 @@ namespace internal {
F(QuoteJSONString, 1, 1) \
\
/* Strings */ \
- F(StringCharCodeAt, 2, 1) \
F(StringIndexOf, 3, 1) \
F(StringLastIndexOf, 3, 1) \
F(StringLocaleCompare, 2, 1) \
- F(SubString, 3, 1) \
F(StringReplaceGlobalRegExpWithString, 4, 1) \
F(StringReplaceOneCharWithString, 3, 1) \
F(StringMatch, 3, 1) \
@@ -231,7 +180,6 @@ namespace internal {
F(FunctionSetInstanceClassName, 2, 1) \
F(FunctionSetLength, 2, 1) \
F(FunctionSetPrototype, 2, 1) \
- F(FunctionSetReadOnlyPrototype, 1, 1) \
F(FunctionGetName, 1, 1) \
F(FunctionSetName, 2, 1) \
F(FunctionNameShouldPrintAsAnonymous, 1, 1) \
@@ -251,11 +199,9 @@ namespace internal {
F(GetAndClearOverflowedStackTrace, 1, 1) \
F(GetV8Version, 0, 1) \
\
- F(ClassOf, 1, 1) \
F(SetCode, 2, 1) \
- F(SetExpectedNumberOfProperties, 2, 1) \
\
- F(CreateApiFunction, 1, 1) \
+ F(CreateApiFunction, 2, 1) \
F(IsTemplate, 1, 1) \
F(GetTemplateField, 2, 1) \
F(DisableAccessChecks, 1, 1) \
@@ -269,47 +215,31 @@ namespace internal {
F(DateToUTC, 1, 1) \
F(DateMakeDay, 2, 1) \
F(DateSetValue, 3, 1) \
- \
- /* Numbers */ \
+ F(DateCacheVersion, 0, 1) \
\
/* Globals */ \
F(CompileString, 2, 1) \
- F(GlobalPrint, 1, 1) \
\
/* Eval */ \
F(GlobalReceiver, 1, 1) \
F(IsAttachedGlobal, 1, 1) \
- F(ResolvePossiblyDirectEval, 5, 2) \
\
F(SetProperty, -1 /* 4 or 5 */, 1) \
F(DefineOrRedefineDataProperty, 4, 1) \
F(DefineOrRedefineAccessorProperty, 5, 1) \
F(IgnoreAttributesAndSetProperty, -1 /* 3 or 4 */, 1) \
F(GetDataProperty, 2, 1) \
+ F(SetHiddenProperty, 3, 1) \
\
/* Arrays */ \
F(RemoveArrayHoles, 2, 1) \
F(GetArrayKeys, 2, 1) \
F(MoveArrayContents, 2, 1) \
F(EstimateNumberOfElements, 1, 1) \
- F(ArrayConstructor, -1, 1) \
- F(InternalArrayConstructor, -1, 1) \
\
/* Getters and Setters */ \
F(LookupAccessor, 3, 1) \
\
- /* Literals */ \
- F(MaterializeRegExpLiteral, 4, 1)\
- F(CreateObjectLiteral, 4, 1) \
- F(CreateArrayLiteral, 4, 1) \
- F(CreateArrayLiteralStubBailout, 3, 1) \
- \
- /* Harmony generators */ \
- F(CreateJSGeneratorObject, 0, 1) \
- F(SuspendJSGeneratorObject, 1, 1) \
- F(ResumeJSGeneratorObject, 3, 1) \
- F(ThrowGeneratorStateError, 1, 1) \
- \
/* ES5 */ \
F(ObjectFreeze, 1, 1) \
\
@@ -319,7 +249,10 @@ namespace internal {
/* Harmony symbols */ \
F(CreateSymbol, 1, 1) \
F(CreatePrivateSymbol, 1, 1) \
- F(SymbolName, 1, 1) \
+ F(CreateGlobalPrivateSymbol, 1, 1) \
+ F(NewSymbolWrapper, 1, 1) \
+ F(SymbolDescription, 1, 1) \
+ F(SymbolRegistry, 0, 1) \
F(SymbolIsPrivate, 1, 1) \
\
/* Harmony proxies */ \
@@ -337,16 +270,24 @@ namespace internal {
F(SetAdd, 2, 1) \
F(SetHas, 2, 1) \
F(SetDelete, 2, 1) \
+ F(SetClear, 1, 1) \
F(SetGetSize, 1, 1) \
\
+ F(SetIteratorInitialize, 3, 1) \
+ F(SetIteratorNext, 1, 1) \
+ \
/* Harmony maps */ \
F(MapInitialize, 1, 1) \
F(MapGet, 2, 1) \
F(MapHas, 2, 1) \
F(MapDelete, 2, 1) \
+ F(MapClear, 1, 1) \
F(MapSet, 3, 1) \
F(MapGetSize, 1, 1) \
\
+ F(MapIteratorInitialize, 3, 1) \
+ F(MapIteratorNext, 1, 1) \
+ \
/* Harmony weak maps and sets */ \
F(WeakCollectionInitialize, 1, 1) \
F(WeakCollectionGet, 2, 1) \
@@ -355,34 +296,31 @@ namespace internal {
F(WeakCollectionSet, 3, 1) \
\
/* Harmony events */ \
- F(SetMicrotaskPending, 1, 1) \
+ F(EnqueueMicrotask, 1, 1) \
+ F(RunMicrotasks, 0, 1) \
\
/* Harmony observe */ \
F(IsObserved, 1, 1) \
F(SetIsObserved, 1, 1) \
F(GetObservationState, 0, 1) \
F(ObservationWeakMapCreate, 0, 1) \
- F(UnwrapGlobalProxy, 1, 1) \
- F(IsAccessAllowedForObserver, 3, 1) \
+ F(ObserverObjectAndRecordHaveSameOrigin, 3, 1) \
+ F(ObjectWasCreatedInCurrentOrigin, 1, 1) \
+ F(GetObjectContextObjectObserve, 1, 1) \
+ F(GetObjectContextObjectGetNotifier, 1, 1) \
+ F(GetObjectContextNotifierPerformChange, 1, 1) \
\
/* Harmony typed arrays */ \
F(ArrayBufferInitialize, 2, 1)\
- F(ArrayBufferGetByteLength, 1, 1)\
F(ArrayBufferSliceImpl, 3, 1) \
F(ArrayBufferIsView, 1, 1) \
+ F(ArrayBufferNeuter, 1, 1) \
\
- F(TypedArrayInitialize, 5, 1) \
F(TypedArrayInitializeFromArrayLike, 4, 1) \
F(TypedArrayGetBuffer, 1, 1) \
- F(TypedArrayGetByteLength, 1, 1) \
- F(TypedArrayGetByteOffset, 1, 1) \
- F(TypedArrayGetLength, 1, 1) \
F(TypedArraySetFastCases, 3, 1) \
\
- F(DataViewInitialize, 4, 1) \
F(DataViewGetBuffer, 1, 1) \
- F(DataViewGetByteLength, 1, 1) \
- F(DataViewGetByteOffset, 1, 1) \
F(DataViewGetInt8, 3, 1) \
F(DataViewGetUint8, 3, 1) \
F(DataViewGetInt16, 3, 1) \
@@ -402,54 +340,22 @@ namespace internal {
F(DataViewSetFloat64, 4, 1) \
\
/* Statements */ \
- F(NewClosure, 3, 1) \
- F(NewClosureFromStubFailure, 1, 1) \
- F(NewObject, 1, 1) \
F(NewObjectFromBound, 1, 1) \
- F(FinalizeInstanceSize, 1, 1) \
- F(Throw, 1, 1) \
- F(ReThrow, 1, 1) \
- F(ThrowReferenceError, 1, 1) \
- F(ThrowNotDateError, 0, 1) \
- F(ThrowMessage, 1, 1) \
- F(StackGuard, 0, 1) \
- F(Interrupt, 0, 1) \
- F(PromoteScheduledException, 0, 1) \
- \
- /* Contexts */ \
- F(NewGlobalContext, 2, 1) \
- F(NewFunctionContext, 1, 1) \
- F(PushWithContext, 2, 1) \
- F(PushCatchContext, 3, 1) \
- F(PushBlockContext, 2, 1) \
- F(PushModuleContext, 2, 1) \
- F(DeleteContextSlot, 2, 1) \
- F(LoadContextSlot, 2, 2) \
- F(LoadContextSlotNoReferenceError, 2, 2) \
- F(StoreContextSlot, 4, 1) \
\
/* Declarations and initialization */ \
- F(DeclareGlobals, 3, 1) \
- F(DeclareModules, 1, 1) \
- F(DeclareContextSlot, 4, 1) \
F(InitializeVarGlobal, -1 /* 2 or 3 */, 1) \
- F(InitializeConstGlobal, 2, 1) \
- F(InitializeConstContextSlot, 3, 1) \
F(OptimizeObjectForAddingMultipleProperties, 2, 1) \
\
/* Debugging */ \
F(DebugPrint, 1, 1) \
+ F(GlobalPrint, 1, 1) \
F(DebugTrace, 0, 1) \
F(TraceEnter, 0, 1) \
F(TraceExit, 1, 1) \
- F(Abort, 2, 1) \
+ F(Abort, 1, 1) \
F(AbortJS, 1, 1) \
- /* Logging */ \
- F(Log, 2, 1) \
/* ES5 */ \
- F(LocalKeys, 1, 1) \
- /* Cache suport */ \
- F(GetFromCache, 2, 1) \
+ F(OwnKeys, 1, 1) \
\
/* Message objects */ \
F(MessageGetStartPosition, 1, 1) \
@@ -465,25 +371,33 @@ namespace internal {
F(HasFastDoubleElements, 1, 1) \
F(HasFastHoleyElements, 1, 1) \
F(HasDictionaryElements, 1, 1) \
- F(HasNonStrictArgumentsElements, 1, 1) \
- F(HasExternalPixelElements, 1, 1) \
+ F(HasSloppyArgumentsElements, 1, 1) \
+ F(HasExternalUint8ClampedElements, 1, 1) \
F(HasExternalArrayElements, 1, 1) \
- F(HasExternalByteElements, 1, 1) \
- F(HasExternalUnsignedByteElements, 1, 1) \
- F(HasExternalShortElements, 1, 1) \
- F(HasExternalUnsignedShortElements, 1, 1) \
- F(HasExternalIntElements, 1, 1) \
- F(HasExternalUnsignedIntElements, 1, 1) \
- F(HasExternalFloatElements, 1, 1) \
- F(HasExternalDoubleElements, 1, 1) \
+ F(HasExternalInt8Elements, 1, 1) \
+ F(HasExternalUint8Elements, 1, 1) \
+ F(HasExternalInt16Elements, 1, 1) \
+ F(HasExternalUint16Elements, 1, 1) \
+ F(HasExternalInt32Elements, 1, 1) \
+ F(HasExternalUint32Elements, 1, 1) \
+ F(HasExternalFloat32Elements, 1, 1) \
+ F(HasExternalFloat64Elements, 1, 1) \
+ F(HasFixedUint8ClampedElements, 1, 1) \
+ F(HasFixedInt8Elements, 1, 1) \
+ F(HasFixedUint8Elements, 1, 1) \
+ F(HasFixedInt16Elements, 1, 1) \
+ F(HasFixedUint16Elements, 1, 1) \
+ F(HasFixedInt32Elements, 1, 1) \
+ F(HasFixedUint32Elements, 1, 1) \
+ F(HasFixedFloat32Elements, 1, 1) \
+ F(HasFixedFloat64Elements, 1, 1) \
F(HasFastProperties, 1, 1) \
F(TransitionElementsKind, 2, 1) \
F(HaveSameMap, 2, 1) \
- F(IsAccessCheckNeeded, 1, 1)
+ F(IsJSGlobalProxy, 1, 1)
-#ifdef ENABLE_DEBUGGER_SUPPORT
-#define RUNTIME_FUNCTION_LIST_DEBUGGER_SUPPORT(F) \
+#define RUNTIME_FUNCTION_LIST_DEBUGGER(F) \
/* Debugger support*/ \
F(DebugBreak, 0, 1) \
F(SetDebugEventListener, 2, 1) \
@@ -501,6 +415,7 @@ namespace internal {
F(GetScopeCount, 2, 1) \
F(GetStepInPositions, 2, 1) \
F(GetScopeDetails, 4, 1) \
+ F(GetAllScopesDetails, 4, 1) \
F(GetFunctionScopeCount, 1, 1) \
F(GetFunctionScopeDetails, 2, 1) \
F(SetScopeVariableValue, 6, 1) \
@@ -545,10 +460,6 @@ namespace internal {
F(CollectGarbage, 1, 1) \
F(GetHeapUsage, 0, 1) \
-#else
-#define RUNTIME_FUNCTION_LIST_DEBUGGER_SUPPORT(F)
-#endif
-
#ifdef V8_I18N_SUPPORT
#define RUNTIME_FUNCTION_LIST_I18N_SUPPORT(F) \
@@ -558,6 +469,10 @@ namespace internal {
F(AvailableLocalesOf, 1, 1) \
F(GetDefaultICULocale, 0, 1) \
F(GetLanguageTagVariants, 1, 1) \
+ F(IsInitializedIntlObject, 1, 1) \
+ F(IsInitializedIntlObjectOfType, 2, 1) \
+ F(MarkAsInitializedIntlObjectOfType, 3, 1) \
+ F(GetImplFromInitializedIntlObject, 1, 1) \
\
/* Date format and parse. */ \
F(CreateDateTimeFormat, 3, 1) \
@@ -573,6 +488,9 @@ namespace internal {
F(CreateCollator, 3, 1) \
F(InternalCompare, 3, 1) \
\
+ /* String.prototype.normalize. */ \
+ F(StringNormalize, 2, 1) \
+ \
/* Break iterator. */ \
F(CreateBreakIterator, 3, 1) \
F(BreakIteratorAdoptText, 2, 1) \
@@ -598,14 +516,102 @@ namespace internal {
// RUNTIME_FUNCTION_LIST defines all runtime functions accessed
// either directly by id (via the code generator), or indirectly
// via a native call by name (from within JS code).
+// Entries have the form F(name, number of arguments, number of return values).
#define RUNTIME_FUNCTION_LIST(F) \
RUNTIME_FUNCTION_LIST_ALWAYS_1(F) \
RUNTIME_FUNCTION_LIST_ALWAYS_2(F) \
RUNTIME_FUNCTION_LIST_DEBUG(F) \
- RUNTIME_FUNCTION_LIST_DEBUGGER_SUPPORT(F) \
+ RUNTIME_FUNCTION_LIST_DEBUGGER(F) \
RUNTIME_FUNCTION_LIST_I18N_SUPPORT(F)
+// RUNTIME_HIDDEN_FUNCTION_LIST defines all runtime functions accessed
+// by id from code generator, but not via native call by name.
+// Entries have the form F(name, number of arguments, number of return values).
+#define RUNTIME_HIDDEN_FUNCTION_LIST(F) \
+ /* String and Regexp */ \
+ F(NumberToString, 1, 1) \
+ F(RegExpConstructResult, 3, 1) \
+ F(RegExpExec, 4, 1) \
+ F(StringAdd, 2, 1) \
+ F(SubString, 3, 1) \
+ F(StringCompare, 2, 1) \
+ F(StringCharCodeAt, 2, 1) \
+ F(GetFromCache, 2, 1) \
+ \
+ /* Compilation */ \
+ F(CompileUnoptimized, 1, 1) \
+ F(CompileOptimized, 2, 1) \
+ F(TryInstallOptimizedCode, 1, 1) \
+ F(NotifyDeoptimized, 1, 1) \
+ F(NotifyStubFailure, 0, 1) \
+ \
+ /* Utilities */ \
+ F(AllocateInNewSpace, 1, 1) \
+ F(AllocateInTargetSpace, 2, 1) \
+ F(AllocateHeapNumber, 0, 1) \
+ F(NumberToSmi, 1, 1) \
+ F(NumberToStringSkipCache, 1, 1) \
+ \
+ F(NewSloppyArguments, 3, 1) \
+ F(NewStrictArguments, 3, 1) \
+ \
+ /* Harmony generators */ \
+ F(CreateJSGeneratorObject, 0, 1) \
+ F(SuspendJSGeneratorObject, 1, 1) \
+ F(ResumeJSGeneratorObject, 3, 1) \
+ F(ThrowGeneratorStateError, 1, 1) \
+ \
+ /* Arrays */ \
+ F(ArrayConstructor, -1, 1) \
+ F(InternalArrayConstructor, -1, 1) \
+ \
+ /* Literals */ \
+ F(MaterializeRegExpLiteral, 4, 1)\
+ F(CreateObjectLiteral, 4, 1) \
+ F(CreateArrayLiteral, 4, 1) \
+ F(CreateArrayLiteralStubBailout, 3, 1) \
+ \
+ /* Statements */ \
+ F(NewClosure, 3, 1) \
+ F(NewClosureFromStubFailure, 1, 1) \
+ F(NewObject, 1, 1) \
+ F(NewObjectWithAllocationSite, 2, 1) \
+ F(FinalizeInstanceSize, 1, 1) \
+ F(Throw, 1, 1) \
+ F(ReThrow, 1, 1) \
+ F(ThrowReferenceError, 1, 1) \
+ F(ThrowNotDateError, 0, 1) \
+ F(StackGuard, 0, 1) \
+ F(Interrupt, 0, 1) \
+ F(PromoteScheduledException, 0, 1) \
+ \
+ /* Contexts */ \
+ F(NewGlobalContext, 2, 1) \
+ F(NewFunctionContext, 1, 1) \
+ F(PushWithContext, 2, 1) \
+ F(PushCatchContext, 3, 1) \
+ F(PushBlockContext, 2, 1) \
+ F(PushModuleContext, 2, 1) \
+ F(DeleteContextSlot, 2, 1) \
+ F(LoadContextSlot, 2, 2) \
+ F(LoadContextSlotNoReferenceError, 2, 2) \
+ F(StoreContextSlot, 4, 1) \
+ \
+ /* Declarations and initialization */ \
+ F(DeclareGlobals, 3, 1) \
+ F(DeclareModules, 1, 1) \
+ F(DeclareContextSlot, 4, 1) \
+ F(InitializeConstGlobal, 2, 1) \
+ F(InitializeConstContextSlot, 3, 1) \
+ \
+ /* Eval */ \
+ F(ResolvePossiblyDirectEval, 5, 2) \
+ \
+ /* Maths */ \
+ F(MathPowSlow, 2, 1) \
+ F(MathPow, 2, 1)
+
// ----------------------------------------------------------------------------
// INLINE_FUNCTION_LIST defines all inlined functions accessed
// with a native call of the form %_name from within JS code.
@@ -633,27 +639,15 @@ namespace internal {
F(IsSpecObject, 1, 1) \
F(IsStringWrapperSafeForDefaultValueOf, 1, 1) \
F(MathPow, 2, 1) \
- F(MathSqrt, 1, 1) \
- F(MathLog, 1, 1) \
F(IsMinusZero, 1, 1) \
- F(IsRegExpEquivalent, 2, 1) \
F(HasCachedArrayIndex, 1, 1) \
F(GetCachedArrayIndex, 1, 1) \
F(FastAsciiArrayJoin, 2, 1) \
F(GeneratorNext, 2, 1) \
F(GeneratorThrow, 2, 1) \
- F(DebugBreakInOptimizedCode, 0, 1)
-
-
-// ----------------------------------------------------------------------------
-// INLINE_RUNTIME_FUNCTION_LIST defines all inlined functions accessed
-// with a native call of the form %_name from within JS code that also have
-// a corresponding runtime function, that is called for slow cases.
-// Entries have the form F(name, number of arguments, number of return values).
-#define INLINE_RUNTIME_FUNCTION_LIST(F) \
+ F(DebugBreakInOptimizedCode, 0, 1) \
F(ClassOf, 1, 1) \
F(StringCharCodeAt, 2, 1) \
- F(Log, 3, 1) \
F(StringAdd, 2, 1) \
F(SubString, 3, 1) \
F(StringCompare, 2, 1) \
@@ -663,6 +657,34 @@ namespace internal {
F(NumberToString, 1, 1)
+// ----------------------------------------------------------------------------
+// INLINE_OPTIMIZED_FUNCTION_LIST defines all inlined functions accessed
+// with a native call of the form %_name from within JS code that also have
+// a corresponding runtime function, that is called from non-optimized code.
+// For the benefit of (fuzz) tests, the runtime version can also be called
+// directly as %name (i.e. without the leading underscore).
+// Entries have the form F(name, number of arguments, number of return values).
+#define INLINE_OPTIMIZED_FUNCTION_LIST(F) \
+ /* Typed Arrays */ \
+ F(TypedArrayInitialize, 5, 1) \
+ F(DataViewInitialize, 4, 1) \
+ F(MaxSmi, 0, 1) \
+ F(TypedArrayMaxSizeInHeap, 0, 1) \
+ F(ArrayBufferViewGetByteLength, 1, 1) \
+ F(ArrayBufferViewGetByteOffset, 1, 1) \
+ F(TypedArrayGetLength, 1, 1) \
+ /* ArrayBuffer */ \
+ F(ArrayBufferGetByteLength, 1, 1) \
+ /* Maths */ \
+ F(ConstructDouble, 2, 1) \
+ F(DoubleHi, 1, 1) \
+ F(DoubleLo, 1, 1) \
+ F(MathSqrtRT, 1, 1) \
+ F(MathLogRT, 1, 1) \
+ /* Debugger */ \
+ F(DebugCallbackSupportsStepping, 1, 1)
+
+
//---------------------------------------------------------------------------
// Runtime provides access to all C++ runtime functions.
@@ -713,10 +735,16 @@ class Runtime : public AllStatic {
enum FunctionId {
#define F(name, nargs, ressize) k##name,
RUNTIME_FUNCTION_LIST(F)
+ INLINE_OPTIMIZED_FUNCTION_LIST(F)
+#undef F
+#define F(name, nargs, ressize) kHidden##name,
+ RUNTIME_HIDDEN_FUNCTION_LIST(F)
#undef F
#define F(name, nargs, ressize) kInline##name,
INLINE_FUNCTION_LIST(F)
- INLINE_RUNTIME_FUNCTION_LIST(F)
+#undef F
+#define F(name, nargs, ressize) kInlineOptimized##name,
+ INLINE_OPTIMIZED_FUNCTION_LIST(F)
#undef F
kNumFunctions,
kFirstInlineFunction = kInlineIsSmi
@@ -724,7 +752,9 @@ class Runtime : public AllStatic {
enum IntrinsicType {
RUNTIME,
- INLINE
+ RUNTIME_HIDDEN,
+ INLINE,
+ INLINE_OPTIMIZED
};
// Intrinsic function descriptor.
@@ -748,11 +778,8 @@ class Runtime : public AllStatic {
// Add internalized strings for all the intrinsic function names to a
// StringDictionary.
- // Returns failure if an allocation fails. In this case, it must be
- // retried with a new, empty StringDictionary, not with the same one.
- // Alternatively, heap initialization can be completely restarted.
- MUST_USE_RESULT static MaybeObject* InitializeIntrinsicFunctionNames(
- Heap* heap, Object* dictionary);
+ static void InitializeIntrinsicFunctionNames(Isolate* isolate,
+ Handle<NameDictionary> dict);
// Get the intrinsic function with the given name, which must be internalized.
static const Function* FunctionForName(Handle<String> name);
@@ -773,47 +800,39 @@ class Runtime : public AllStatic {
// Support getting the characters in a string using [] notation as
// in Firefox/SpiderMonkey, Safari and Opera.
- MUST_USE_RESULT static MaybeObject* GetElementOrCharAt(Isolate* isolate,
- Handle<Object> object,
- uint32_t index);
-
- MUST_USE_RESULT static MaybeObject* GetElementOrCharAtOrFail(
+ MUST_USE_RESULT static MaybeHandle<Object> GetElementOrCharAt(
Isolate* isolate,
Handle<Object> object,
uint32_t index);
- static Handle<Object> SetObjectProperty(
+ MUST_USE_RESULT static MaybeHandle<Object> SetObjectProperty(
Isolate* isolate,
Handle<Object> object,
Handle<Object> key,
Handle<Object> value,
PropertyAttributes attr,
- StrictModeFlag strict_mode);
+ StrictMode strict_mode);
- static Handle<Object> ForceSetObjectProperty(
- Isolate* isolate,
+ MUST_USE_RESULT static MaybeHandle<Object> ForceSetObjectProperty(
Handle<JSObject> object,
Handle<Object> key,
Handle<Object> value,
- PropertyAttributes attr);
+ PropertyAttributes attr,
+ JSReceiver::StoreFromKeyed store_from_keyed
+ = JSReceiver::MAY_BE_STORE_FROM_KEYED);
- MUST_USE_RESULT static MaybeObject* DeleteObjectProperty(
+ MUST_USE_RESULT static MaybeHandle<Object> DeleteObjectProperty(
Isolate* isolate,
Handle<JSReceiver> object,
Handle<Object> key,
JSReceiver::DeleteMode mode);
- MUST_USE_RESULT static MaybeObject* HasObjectProperty(
+ MUST_USE_RESULT static MaybeHandle<Object> HasObjectProperty(
Isolate* isolate,
Handle<JSReceiver> object,
Handle<Object> key);
- MUST_USE_RESULT static MaybeObject* GetObjectProperty(
- Isolate* isolate,
- Handle<Object> object,
- Handle<Object> key);
-
- MUST_USE_RESULT static MaybeObject* GetObjectPropertyOrFail(
+ MUST_USE_RESULT static MaybeHandle<Object> GetObjectProperty(
Isolate* isolate,
Handle<Object> object,
Handle<Object> key);
@@ -830,6 +849,8 @@ class Runtime : public AllStatic {
size_t allocated_length,
bool initialize = true);
+ static void NeuterArrayBuffer(Handle<JSArrayBuffer> array_buffer);
+
static void FreeArrayBuffer(
Isolate* isolate,
JSArrayBuffer* phantom_array_buffer);
@@ -844,17 +865,20 @@ class Runtime : public AllStatic {
ARRAY_ID_INT32 = 6,
ARRAY_ID_FLOAT32 = 7,
ARRAY_ID_FLOAT64 = 8,
- ARRAY_ID_UINT8C = 9
+ ARRAY_ID_UINT8_CLAMPED = 9,
+
+ ARRAY_ID_FIRST = ARRAY_ID_UINT8,
+ ARRAY_ID_LAST = ARRAY_ID_UINT8_CLAMPED
};
static void ArrayIdToTypeAndSize(int array_id,
- ExternalArrayType *type, size_t *element_size);
-
- // Helper functions used stubs.
- static void PerformGC(Object* result, Isolate* isolate);
+ ExternalArrayType *type,
+ ElementsKind* external_elements_kind,
+ ElementsKind* fixed_elements_kind,
+ size_t *element_size);
// Used in runtime.cc and hydrogen's VisitArrayLiteral.
- static Handle<Object> CreateArrayLiteralBoilerplate(
+ MUST_USE_RESULT static MaybeHandle<Object> CreateArrayLiteralBoilerplate(
Isolate* isolate,
Handle<FixedArray> literals,
Handle<FixedArray> elements);
@@ -864,12 +888,12 @@ class Runtime : public AllStatic {
//---------------------------------------------------------------------------
// Constants used by interface to runtime functions.
-class AllocateDoubleAlignFlag: public BitField<bool, 0, 1> {};
-class AllocateTargetSpace: public BitField<AllocationSpace, 1, 3> {};
+class AllocateDoubleAlignFlag: public BitField<bool, 0, 1> {};
+class AllocateTargetSpace: public BitField<AllocationSpace, 1, 3> {};
-class DeclareGlobalsEvalFlag: public BitField<bool, 0, 1> {};
-class DeclareGlobalsNativeFlag: public BitField<bool, 1, 1> {};
-class DeclareGlobalsLanguageMode: public BitField<LanguageMode, 2, 2> {};
+class DeclareGlobalsEvalFlag: public BitField<bool, 0, 1> {};
+class DeclareGlobalsNativeFlag: public BitField<bool, 1, 1> {};
+class DeclareGlobalsStrictMode: public BitField<StrictMode, 2, 1> {};
} } // namespace v8::internal
diff --git a/chromium/v8/src/runtime.js b/chromium/v8/src/runtime.js
index 35bc07a10e8..1dee2e08f61 100644
--- a/chromium/v8/src/runtime.js
+++ b/chromium/v8/src/runtime.js
@@ -1,29 +1,6 @@
// Copyright 2006-2008 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
// This files contains runtime support implemented in JavaScript.
@@ -48,7 +25,6 @@ var $Number = global.Number;
var $Function = global.Function;
var $Boolean = global.Boolean;
var $NaN = %GetRootNaN();
-var builtins = this;
// ECMA-262 Section 11.9.3.
function EQUALS(y) {
@@ -76,11 +52,8 @@ function EQUALS(y) {
y = %ToPrimitive(y, NO_HINT);
}
} else if (IS_SYMBOL(x)) {
- while (true) {
- if (IS_SYMBOL(y)) return %_ObjectEquals(x, y) ? 0 : 1;
- if (!IS_SPEC_OBJECT(y)) return 1; // not equal
- y = %ToPrimitive(y, NO_HINT);
- }
+ if (IS_SYMBOL(y)) return %_ObjectEquals(x, y) ? 0 : 1;
+ return 1; // not equal
} else if (IS_BOOLEAN(x)) {
if (IS_BOOLEAN(y)) return %_ObjectEquals(x, y) ? 0 : 1;
if (IS_NULL_OR_UNDEFINED(y)) return 1;
@@ -98,6 +71,7 @@ function EQUALS(y) {
return %_ObjectEquals(x, y) ? 0 : 1;
}
if (IS_NULL_OR_UNDEFINED(y)) return 1; // not equal
+ if (IS_SYMBOL(y)) return 1; // not equal
if (IS_BOOLEAN(y)) y = %ToNumber(y);
x = %ToPrimitive(x, NO_HINT);
}
@@ -467,7 +441,7 @@ function APPLY_PREPARE(args) {
}
-function APPLY_OVERFLOW(length) {
+function STACK_OVERFLOW(length) {
throw %MakeRangeError('stack_overflow', []);
}
@@ -502,7 +476,7 @@ function ToPrimitive(x, hint) {
if (IS_STRING(x)) return x;
// Normal behavior.
if (!IS_SPEC_OBJECT(x)) return x;
- if (IS_SYMBOL_WRAPPER(x)) return %_ValueOf(x);
+ if (IS_SYMBOL_WRAPPER(x)) throw MakeTypeError('symbol_to_primitive', []);
if (hint == NO_HINT) hint = (IS_DATE(x)) ? STRING_HINT : NUMBER_HINT;
return (hint == NUMBER_HINT) ? %DefaultNumber(x) : %DefaultString(x);
}
@@ -549,6 +523,7 @@ function ToString(x) {
if (IS_NUMBER(x)) return %_NumberToString(x);
if (IS_BOOLEAN(x)) return x ? 'true' : 'false';
if (IS_UNDEFINED(x)) return 'undefined';
+ if (IS_SYMBOL(x)) throw %MakeTypeError('symbol_to_string', []);
return (IS_NULL(x)) ? 'null' : %ToString(%DefaultString(x));
}
@@ -556,6 +531,7 @@ function NonStringToString(x) {
if (IS_NUMBER(x)) return %_NumberToString(x);
if (IS_BOOLEAN(x)) return x ? 'true' : 'false';
if (IS_UNDEFINED(x)) return 'undefined';
+ if (IS_SYMBOL(x)) throw %MakeTypeError('symbol_to_string', []);
return (IS_NULL(x)) ? 'null' : %ToString(%DefaultString(x));
}
@@ -569,9 +545,9 @@ function ToName(x) {
// ECMA-262, section 9.9, page 36.
function ToObject(x) {
if (IS_STRING(x)) return new $String(x);
- if (IS_SYMBOL(x)) return new $Symbol(x);
if (IS_NUMBER(x)) return new $Number(x);
if (IS_BOOLEAN(x)) return new $Boolean(x);
+ if (IS_SYMBOL(x)) return %NewSymbolWrapper(x);
if (IS_NULL_OR_UNDEFINED(x) && !IS_UNDETECTABLE(x)) {
throw %MakeTypeError('undefined_or_null_to_object', []);
}
diff --git a/chromium/v8/src/safepoint-table.cc b/chromium/v8/src/safepoint-table.cc
index beecb27582d..e041e17ca9f 100644
--- a/chromium/v8/src/safepoint-table.cc
+++ b/chromium/v8/src/safepoint-table.cc
@@ -1,38 +1,15 @@
// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#include "safepoint-table.h"
-
-#include "deoptimizer.h"
-#include "disasm.h"
-#include "macro-assembler.h"
-#include "zone-inl.h"
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#include "src/safepoint-table.h"
+
+#include "src/deoptimizer.h"
+#include "src/disasm.h"
+#include "src/macro-assembler.h"
+#include "src/zone-inl.h"
namespace v8 {
namespace internal {
diff --git a/chromium/v8/src/safepoint-table.h b/chromium/v8/src/safepoint-table.h
index cd094c55bf2..2fed5a76042 100644
--- a/chromium/v8/src/safepoint-table.h
+++ b/chromium/v8/src/safepoint-table.h
@@ -1,37 +1,14 @@
// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
#ifndef V8_SAFEPOINT_TABLE_H_
#define V8_SAFEPOINT_TABLE_H_
-#include "allocation.h"
-#include "heap.h"
-#include "v8memory.h"
-#include "zone.h"
+#include "src/allocation.h"
+#include "src/heap.h"
+#include "src/v8memory.h"
+#include "src/zone.h"
namespace v8 {
namespace internal {
diff --git a/chromium/v8/src/sampler.cc b/chromium/v8/src/sampler.cc
index 684ef486c7d..dcb4be72afc 100644
--- a/chromium/v8/src/sampler.cc
+++ b/chromium/v8/src/sampler.cc
@@ -1,31 +1,8 @@
// Copyright 2013 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "sampler.h"
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/sampler.h"
#if V8_OS_POSIX && !V8_OS_CYGWIN
@@ -35,7 +12,10 @@
#include <pthread.h>
#include <signal.h>
#include <sys/time.h>
+
+#if !V8_OS_QNX
#include <sys/syscall.h>
+#endif
#if V8_OS_MACOSX
#include <mach/mach.h>
@@ -45,31 +25,33 @@
&& !V8_OS_OPENBSD
#include <ucontext.h>
#endif
+
#include <unistd.h>
// GLibc on ARM defines mcontext_t has a typedef for 'struct sigcontext'.
// Old versions of the C library <signal.h> didn't define the type.
#if V8_OS_ANDROID && !defined(__BIONIC_HAVE_UCONTEXT_T) && \
- defined(__arm__) && !defined(__BIONIC_HAVE_STRUCT_SIGCONTEXT)
+ (defined(__arm__) || defined(__aarch64__)) && \
+ !defined(__BIONIC_HAVE_STRUCT_SIGCONTEXT)
#include <asm/sigcontext.h>
#endif
#elif V8_OS_WIN || V8_OS_CYGWIN
-#include "win32-headers.h"
+#include "src/base/win32-headers.h"
#endif
-#include "v8.h"
+#include "src/v8.h"
-#include "cpu-profiler-inl.h"
-#include "flags.h"
-#include "frames-inl.h"
-#include "log.h"
-#include "platform.h"
-#include "simulator.h"
-#include "v8threads.h"
-#include "vm-state-inl.h"
+#include "src/cpu-profiler-inl.h"
+#include "src/flags.h"
+#include "src/frames-inl.h"
+#include "src/log.h"
+#include "src/platform.h"
+#include "src/simulator.h"
+#include "src/v8threads.h"
+#include "src/vm-state-inl.h"
#if V8_OS_ANDROID && !defined(__BIONIC_HAVE_UCONTEXT_T)
@@ -93,6 +75,18 @@ typedef struct ucontext {
// Other fields are not used by V8, don't define them here.
} ucontext_t;
+#elif defined(__aarch64__)
+
+typedef struct sigcontext mcontext_t;
+
+typedef struct ucontext {
+ uint64_t uc_flags;
+ struct ucontext *uc_link;
+ stack_t uc_stack;
+ mcontext_t uc_mcontext;
+ // Other fields are not used by V8, don't define them here.
+} ucontext_t;
+
#elif defined(__mips__)
// MIPS version of sigcontext, for Android bionic.
typedef struct {
@@ -142,6 +136,23 @@ typedef struct ucontext {
// Other fields are not used by V8, don't define them here.
} ucontext_t;
enum { REG_EBP = 6, REG_ESP = 7, REG_EIP = 14 };
+
+#elif defined(__x86_64__)
+// x64 version for Android.
+typedef struct {
+ uint64_t gregs[23];
+ void* fpregs;
+ uint64_t __reserved1[8];
+} mcontext_t;
+
+typedef struct ucontext {
+ uint64_t uc_flags;
+ struct ucontext *uc_link;
+ stack_t uc_stack;
+ mcontext_t uc_mcontext;
+ // Other fields are not used by V8, don't define them here.
+} ucontext_t;
+enum { REG_RBP = 10, REG_RSP = 15, REG_RIP = 16 };
#endif
#endif // V8_OS_ANDROID && !defined(__BIONIC_HAVE_UCONTEXT_T)
@@ -222,13 +233,27 @@ class SimulatorHelper {
}
inline void FillRegisters(RegisterState* state) {
+#if V8_TARGET_ARCH_ARM
state->pc = reinterpret_cast<Address>(simulator_->get_pc());
state->sp = reinterpret_cast<Address>(simulator_->get_register(
Simulator::sp));
-#if V8_TARGET_ARCH_ARM
state->fp = reinterpret_cast<Address>(simulator_->get_register(
Simulator::r11));
+#elif V8_TARGET_ARCH_ARM64
+ if (simulator_->sp() == 0 || simulator_->fp() == 0) {
+ // It possible that the simulator is interrupted while it is updating
+ // the sp or fp register. ARM64 simulator does this in two steps:
+ // first setting it to zero and then setting it to the new value.
+ // Bailout if sp/fp doesn't contain the new value.
+ return;
+ }
+ state->pc = reinterpret_cast<Address>(simulator_->pc());
+ state->sp = reinterpret_cast<Address>(simulator_->sp());
+ state->fp = reinterpret_cast<Address>(simulator_->fp());
#elif V8_TARGET_ARCH_MIPS
+ state->pc = reinterpret_cast<Address>(simulator_->get_pc());
+ state->sp = reinterpret_cast<Address>(simulator_->get_register(
+ Simulator::sp));
state->fp = reinterpret_cast<Address>(simulator_->get_register(
Simulator::fp));
#endif
@@ -266,7 +291,11 @@ class SignalHandler : public AllStatic {
struct sigaction sa;
sa.sa_sigaction = &HandleProfilerSignal;
sigemptyset(&sa.sa_mask);
+#if V8_OS_QNX
+ sa.sa_flags = SA_SIGINFO;
+#else
sa.sa_flags = SA_RESTART | SA_SIGINFO;
+#endif
signal_handler_installed_ =
(sigaction(SIGPROF, &sa, &old_signal_handler_) == 0);
}
@@ -321,6 +350,11 @@ void SignalHandler::HandleProfilerSignal(int signal, siginfo_t* info,
SimulatorHelper helper;
if (!helper.Init(sampler, isolate)) return;
helper.FillRegisters(&state);
+ // It possible that the simulator is interrupted while it is updating
+ // the sp or fp register. ARM64 simulator does this in two steps:
+ // first setting it to zero and then setting it to the new value.
+ // Bailout if sp/fp doesn't contain the new value.
+ if (state.sp == 0 || state.fp == 0) return;
#else
// Extracting the sample from the context is extremely machine dependent.
ucontext_t* ucontext = reinterpret_cast<ucontext_t*>(context);
@@ -350,6 +384,11 @@ void SignalHandler::HandleProfilerSignal(int signal, siginfo_t* info,
state.fp = reinterpret_cast<Address>(mcontext.arm_fp);
#endif // defined(__GLIBC__) && !defined(__UCLIBC__) &&
// (__GLIBC__ < 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ <= 3))
+#elif V8_HOST_ARCH_ARM64
+ state.pc = reinterpret_cast<Address>(mcontext.pc);
+ state.sp = reinterpret_cast<Address>(mcontext.sp);
+ // FP is an alias for x29.
+ state.fp = reinterpret_cast<Address>(mcontext.regs[29]);
#elif V8_HOST_ARCH_MIPS
state.pc = reinterpret_cast<Address>(mcontext.pc);
state.sp = reinterpret_cast<Address>(mcontext.gregs[29]);
@@ -415,7 +454,17 @@ void SignalHandler::HandleProfilerSignal(int signal, siginfo_t* info,
state.pc = reinterpret_cast<Address>(mcontext.gregs[REG_PC]);
state.sp = reinterpret_cast<Address>(mcontext.gregs[REG_SP]);
state.fp = reinterpret_cast<Address>(mcontext.gregs[REG_FP]);
-#endif // V8_OS_SOLARIS
+#elif V8_OS_QNX
+#if V8_HOST_ARCH_IA32
+ state.pc = reinterpret_cast<Address>(mcontext.cpu.eip);
+ state.sp = reinterpret_cast<Address>(mcontext.cpu.esp);
+ state.fp = reinterpret_cast<Address>(mcontext.cpu.ebp);
+#elif V8_HOST_ARCH_ARM
+ state.pc = reinterpret_cast<Address>(mcontext.cpu.gpr[ARM_REG_PC]);
+ state.sp = reinterpret_cast<Address>(mcontext.cpu.gpr[ARM_REG_SP]);
+ state.fp = reinterpret_cast<Address>(mcontext.cpu.gpr[ARM_REG_FP]);
+#endif // V8_HOST_ARCH_*
+#endif // V8_OS_QNX
#endif // USE_SIMULATOR
sampler->SampleStack(state);
#endif // V8_OS_NACL
@@ -517,6 +566,7 @@ SamplerThread* SamplerThread::instance_ = NULL;
DISABLE_ASAN void TickSample::Init(Isolate* isolate,
const RegisterState& regs) {
ASSERT(isolate->IsInitialized());
+ timestamp = TimeTicks::HighResolutionNow();
pc = regs.pc;
state = isolate->current_vm_state();
@@ -604,7 +654,7 @@ void Sampler::Stop() {
void Sampler::IncreaseProfilingDepth() {
- NoBarrier_AtomicIncrement(&profiling_, 1);
+ base::NoBarrier_AtomicIncrement(&profiling_, 1);
#if defined(USE_SIGNALS)
SignalHandler::IncreaseSamplerCount();
#endif
@@ -615,7 +665,7 @@ void Sampler::DecreaseProfilingDepth() {
#if defined(USE_SIGNALS)
SignalHandler::DecreaseSamplerCount();
#endif
- NoBarrier_AtomicIncrement(&profiling_, -1);
+ base::NoBarrier_AtomicIncrement(&profiling_, -1);
}
diff --git a/chromium/v8/src/sampler.h b/chromium/v8/src/sampler.h
index b17a2ed8d50..fe94a02e935 100644
--- a/chromium/v8/src/sampler.h
+++ b/chromium/v8/src/sampler.h
@@ -1,36 +1,13 @@
// Copyright 2013 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
#ifndef V8_SAMPLER_H_
#define V8_SAMPLER_H_
-#include "atomicops.h"
-#include "frames.h"
-#include "v8globals.h"
+#include "src/base/atomicops.h"
+#include "src/frames.h"
+#include "src/globals.h"
namespace v8 {
namespace internal {
@@ -69,6 +46,7 @@ struct TickSample {
};
static const int kMaxFramesCount = 64;
Address stack[kMaxFramesCount]; // Call stack.
+ TimeTicks timestamp;
int frames_count : 8; // Number of captured frames.
bool has_external_callback : 1;
StackFrame::Type top_frame_type : 4;
@@ -96,20 +74,20 @@ class Sampler {
// Whether the sampling thread should use this Sampler for CPU profiling?
bool IsProfiling() const {
- return NoBarrier_Load(&profiling_) > 0 &&
- !NoBarrier_Load(&has_processing_thread_);
+ return base::NoBarrier_Load(&profiling_) > 0 &&
+ !base::NoBarrier_Load(&has_processing_thread_);
}
void IncreaseProfilingDepth();
void DecreaseProfilingDepth();
// Whether the sampler is running (that is, consumes resources).
- bool IsActive() const { return NoBarrier_Load(&active_); }
+ bool IsActive() const { return base::NoBarrier_Load(&active_); }
void DoSample();
// If true next sample must be initiated on the profiler event processor
// thread right after latest sample is processed.
void SetHasProcessingThread(bool value) {
- NoBarrier_Store(&has_processing_thread_, value);
+ base::NoBarrier_Store(&has_processing_thread_, value);
}
// Used in tests to make sure that stack sampling is performed.
@@ -130,13 +108,13 @@ class Sampler {
virtual void Tick(TickSample* sample) = 0;
private:
- void SetActive(bool value) { NoBarrier_Store(&active_, value); }
+ void SetActive(bool value) { base::NoBarrier_Store(&active_, value); }
Isolate* isolate_;
const int interval_;
- Atomic32 profiling_;
- Atomic32 has_processing_thread_;
- Atomic32 active_;
+ base::Atomic32 profiling_;
+ base::Atomic32 has_processing_thread_;
+ base::Atomic32 active_;
PlatformData* data_; // Platform specific data.
bool is_counting_samples_;
// Counts stack samples taken in JS VM state.
diff --git a/chromium/v8/src/scanner-character-streams.cc b/chromium/v8/src/scanner-character-streams.cc
index fb503459f7f..23af45fe09e 100644
--- a/chromium/v8/src/scanner-character-streams.cc
+++ b/chromium/v8/src/scanner-character-streams.cc
@@ -1,36 +1,13 @@
// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#include "scanner-character-streams.h"
-
-#include "handles.h"
-#include "unicode-inl.h"
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#include "src/scanner-character-streams.h"
+
+#include "src/handles.h"
+#include "src/unicode-inl.h"
namespace v8 {
namespace internal {
@@ -126,8 +103,6 @@ GenericStringUtf16CharacterStream::GenericStringUtf16CharacterStream(
: string_(data),
length_(end_position) {
ASSERT(end_position >= start_position);
- buffer_cursor_ = buffer_;
- buffer_end_ = buffer_;
pos_ = start_position;
}
@@ -213,11 +188,11 @@ unsigned Utf8ToUtf16CharacterStream::FillBuffer(unsigned char_position,
static const byte kUtf8MultiByteMask = 0xC0;
-static const byte kUtf8MultiByteCharStart = 0xC0;
static const byte kUtf8MultiByteCharFollower = 0x80;
#ifdef DEBUG
+static const byte kUtf8MultiByteCharStart = 0xC0;
static bool IsUtf8MultiCharacterStart(byte first_byte) {
return (first_byte & kUtf8MultiByteMask) == kUtf8MultiByteCharStart;
}
diff --git a/chromium/v8/src/scanner-character-streams.h b/chromium/v8/src/scanner-character-streams.h
index 319ee8fc1c5..a25eb584a5d 100644
--- a/chromium/v8/src/scanner-character-streams.h
+++ b/chromium/v8/src/scanner-character-streams.h
@@ -1,34 +1,11 @@
// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
#ifndef V8_SCANNER_CHARACTER_STREAMS_H_
#define V8_SCANNER_CHARACTER_STREAMS_H_
-#include "scanner.h"
+#include "src/scanner.h"
namespace v8 {
namespace internal {
@@ -72,7 +49,6 @@ class GenericStringUtf16CharacterStream: public BufferedUtf16CharacterStream {
virtual unsigned FillBuffer(unsigned position, unsigned length);
Handle<String> string_;
- unsigned start_position_;
unsigned length_;
};
diff --git a/chromium/v8/src/scanner.cc b/chromium/v8/src/scanner.cc
index 26f840b23a5..0265a8fa781 100644
--- a/chromium/v8/src/scanner.cc
+++ b/chromium/v8/src/scanner.cc
@@ -1,40 +1,19 @@
// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
// Features shared by parsing and pre-parsing scanners.
#include <cmath>
-#include "scanner.h"
+#include "src/scanner.h"
-#include "../include/v8stdint.h"
-#include "char-predicates-inl.h"
-#include "conversions-inl.h"
-#include "list-inl.h"
+#include "include/v8stdint.h"
+#include "src/char-predicates-inl.h"
+#include "src/conversions-inl.h"
+#include "src/list-inl.h"
+#include "src/v8.h"
+#include "src/parser.h"
namespace v8 {
namespace internal {
@@ -246,7 +225,8 @@ Token::Value Scanner::Next() {
}
-static inline bool IsByteOrderMark(uc32 c) {
+// TODO(yangguo): check whether this is actually necessary.
+static inline bool IsLittleEndianByteOrderMark(uc32 c) {
// The Unicode value U+FFFE is guaranteed never to be assigned as a
// Unicode character; this implies that in a Unicode context the
// 0xFF, 0xFE byte pattern can only be interpreted as the U+FEFF
@@ -254,7 +234,7 @@ static inline bool IsByteOrderMark(uc32 c) {
// not be a U+FFFE character expressed in big-endian byte
// order). Nevertheless, we check for it to be compatible with
// Spidermonkey.
- return c == 0xFEFF || c == 0xFFFE;
+ return c == 0xFFFE;
}
@@ -262,14 +242,14 @@ bool Scanner::SkipWhiteSpace() {
int start_position = source_pos();
while (true) {
- // We treat byte-order marks (BOMs) as whitespace for better
- // compatibility with Spidermonkey and other JavaScript engines.
- while (unicode_cache_->IsWhiteSpace(c0_) || IsByteOrderMark(c0_)) {
- // IsWhiteSpace() includes line terminators!
+ while (true) {
+ // Advance as long as character is a WhiteSpace or LineTerminator.
+ // Remember if the latter is the case.
if (unicode_cache_->IsLineTerminator(c0_)) {
- // Ignore line terminators, but remember them. This is necessary
- // for automatic semicolon insertion.
has_line_terminator_before_next_ = true;
+ } else if (!unicode_cache_->IsWhiteSpace(c0_) &&
+ !IsLittleEndianByteOrderMark(c0_)) {
+ break;
}
Advance();
}
@@ -906,7 +886,7 @@ uc32 Scanner::ScanIdentifierUnicodeEscape() {
KEYWORD("yield", Token::YIELD)
-static Token::Value KeywordOrIdentifierToken(const char* input,
+static Token::Value KeywordOrIdentifierToken(const uint8_t* input,
int input_length,
bool harmony_scoping,
bool harmony_modules) {
@@ -981,8 +961,8 @@ Token::Value Scanner::ScanIdentifierOrKeyword() {
literal.Complete();
- if (next_.literal_chars->is_ascii()) {
- Vector<const char> chars = next_.literal_chars->ascii_literal();
+ if (next_.literal_chars->is_one_byte()) {
+ Vector<const uint8_t> chars = next_.literal_chars->one_byte_literal();
return KeywordOrIdentifierToken(chars.start(),
chars.length(),
harmony_scoping_,
@@ -1113,21 +1093,66 @@ bool Scanner::ScanRegExpFlags() {
}
-int DuplicateFinder::AddAsciiSymbol(Vector<const char> key, int value) {
- return AddSymbol(Vector<const byte>::cast(key), true, value);
+Handle<String> Scanner::AllocateNextLiteralString(Isolate* isolate,
+ PretenureFlag tenured) {
+ if (is_next_literal_one_byte()) {
+ return isolate->factory()->NewStringFromOneByte(
+ next_literal_one_byte_string(), tenured).ToHandleChecked();
+ } else {
+ return isolate->factory()->NewStringFromTwoByte(
+ next_literal_two_byte_string(), tenured).ToHandleChecked();
+ }
+}
+
+
+Handle<String> Scanner::AllocateInternalizedString(Isolate* isolate) {
+ if (is_literal_one_byte()) {
+ return isolate->factory()->InternalizeOneByteString(
+ literal_one_byte_string());
+ } else {
+ return isolate->factory()->InternalizeTwoByteString(
+ literal_two_byte_string());
+ }
+}
+
+
+double Scanner::DoubleValue() {
+ ASSERT(is_literal_one_byte());
+ return StringToDouble(
+ unicode_cache_,
+ literal_one_byte_string(),
+ ALLOW_HEX | ALLOW_OCTAL | ALLOW_IMPLICIT_OCTAL | ALLOW_BINARY);
+}
+
+
+int Scanner::FindNumber(DuplicateFinder* finder, int value) {
+ return finder->AddNumber(literal_one_byte_string(), value);
+}
+
+
+int Scanner::FindSymbol(DuplicateFinder* finder, int value) {
+ if (is_literal_one_byte()) {
+ return finder->AddOneByteSymbol(literal_one_byte_string(), value);
+ }
+ return finder->AddTwoByteSymbol(literal_two_byte_string(), value);
+}
+
+
+int DuplicateFinder::AddOneByteSymbol(Vector<const uint8_t> key, int value) {
+ return AddSymbol(key, true, value);
}
-int DuplicateFinder::AddUtf16Symbol(Vector<const uint16_t> key, int value) {
- return AddSymbol(Vector<const byte>::cast(key), false, value);
+int DuplicateFinder::AddTwoByteSymbol(Vector<const uint16_t> key, int value) {
+ return AddSymbol(Vector<const uint8_t>::cast(key), false, value);
}
-int DuplicateFinder::AddSymbol(Vector<const byte> key,
- bool is_ascii,
+int DuplicateFinder::AddSymbol(Vector<const uint8_t> key,
+ bool is_one_byte,
int value) {
- uint32_t hash = Hash(key, is_ascii);
- byte* encoding = BackupKey(key, is_ascii);
+ uint32_t hash = Hash(key, is_one_byte);
+ byte* encoding = BackupKey(key, is_one_byte);
HashMap::Entry* entry = map_.Lookup(encoding, hash, true);
int old_value = static_cast<int>(reinterpret_cast<intptr_t>(entry->value));
entry->value =
@@ -1136,15 +1161,16 @@ int DuplicateFinder::AddSymbol(Vector<const byte> key,
}
-int DuplicateFinder::AddNumber(Vector<const char> key, int value) {
+int DuplicateFinder::AddNumber(Vector<const uint8_t> key, int value) {
ASSERT(key.length() > 0);
// Quick check for already being in canonical form.
if (IsNumberCanonical(key)) {
- return AddAsciiSymbol(key, value);
+ return AddOneByteSymbol(key, value);
}
int flags = ALLOW_HEX | ALLOW_OCTAL | ALLOW_IMPLICIT_OCTAL | ALLOW_BINARY;
- double double_value = StringToDouble(unicode_constants_, key, flags, 0.0);
+ double double_value = StringToDouble(
+ unicode_constants_, key, flags, 0.0);
int length;
const char* string;
if (!std::isfinite(double_value)) {
@@ -1160,7 +1186,7 @@ int DuplicateFinder::AddNumber(Vector<const char> key, int value) {
}
-bool DuplicateFinder::IsNumberCanonical(Vector<const char> number) {
+bool DuplicateFinder::IsNumberCanonical(Vector<const uint8_t> number) {
// Test for a safe approximation of number literals that are already
// in canonical form: max 15 digits, no leading zeroes, except an
// integer part that is a single zero, and no trailing zeros below
@@ -1179,7 +1205,7 @@ bool DuplicateFinder::IsNumberCanonical(Vector<const char> number) {
pos++;
bool invalid_last_digit = true;
while (pos < length) {
- byte digit = number[pos] - '0';
+ uint8_t digit = number[pos] - '0';
if (digit > '9' - '0') return false;
invalid_last_digit = (digit == 0);
pos++;
@@ -1188,11 +1214,11 @@ bool DuplicateFinder::IsNumberCanonical(Vector<const char> number) {
}
-uint32_t DuplicateFinder::Hash(Vector<const byte> key, bool is_ascii) {
+uint32_t DuplicateFinder::Hash(Vector<const uint8_t> key, bool is_one_byte) {
// Primitive hash function, almost identical to the one used
// for strings (except that it's seeded by the length and ASCII-ness).
int length = key.length();
- uint32_t hash = (length << 1) | (is_ascii ? 1 : 0) ;
+ uint32_t hash = (length << 1) | (is_one_byte ? 1 : 0) ;
for (int i = 0; i < length; i++) {
uint32_t c = key[i];
hash = (hash + c) * 1025;
@@ -1210,39 +1236,42 @@ bool DuplicateFinder::Match(void* first, void* second) {
// was ASCII.
byte* s1 = reinterpret_cast<byte*>(first);
byte* s2 = reinterpret_cast<byte*>(second);
- uint32_t length_ascii_field = 0;
+ uint32_t length_one_byte_field = 0;
byte c1;
do {
c1 = *s1;
if (c1 != *s2) return false;
- length_ascii_field = (length_ascii_field << 7) | (c1 & 0x7f);
+ length_one_byte_field = (length_one_byte_field << 7) | (c1 & 0x7f);
s1++;
s2++;
} while ((c1 & 0x80) != 0);
- int length = static_cast<int>(length_ascii_field >> 1);
+ int length = static_cast<int>(length_one_byte_field >> 1);
return memcmp(s1, s2, length) == 0;
}
-byte* DuplicateFinder::BackupKey(Vector<const byte> bytes,
- bool is_ascii) {
- uint32_t ascii_length = (bytes.length() << 1) | (is_ascii ? 1 : 0);
+byte* DuplicateFinder::BackupKey(Vector<const uint8_t> bytes,
+ bool is_one_byte) {
+ uint32_t one_byte_length = (bytes.length() << 1) | (is_one_byte ? 1 : 0);
backing_store_.StartSequence();
- // Emit ascii_length as base-128 encoded number, with the 7th bit set
+ // Emit one_byte_length as base-128 encoded number, with the 7th bit set
// on the byte of every heptet except the last, least significant, one.
- if (ascii_length >= (1 << 7)) {
- if (ascii_length >= (1 << 14)) {
- if (ascii_length >= (1 << 21)) {
- if (ascii_length >= (1 << 28)) {
- backing_store_.Add(static_cast<byte>((ascii_length >> 28) | 0x80));
+ if (one_byte_length >= (1 << 7)) {
+ if (one_byte_length >= (1 << 14)) {
+ if (one_byte_length >= (1 << 21)) {
+ if (one_byte_length >= (1 << 28)) {
+ backing_store_.Add(
+ static_cast<uint8_t>((one_byte_length >> 28) | 0x80));
}
- backing_store_.Add(static_cast<byte>((ascii_length >> 21) | 0x80u));
+ backing_store_.Add(
+ static_cast<uint8_t>((one_byte_length >> 21) | 0x80u));
}
- backing_store_.Add(static_cast<byte>((ascii_length >> 14) | 0x80u));
+ backing_store_.Add(
+ static_cast<uint8_t>((one_byte_length >> 14) | 0x80u));
}
- backing_store_.Add(static_cast<byte>((ascii_length >> 7) | 0x80u));
+ backing_store_.Add(static_cast<uint8_t>((one_byte_length >> 7) | 0x80u));
}
- backing_store_.Add(static_cast<byte>(ascii_length & 0x7f));
+ backing_store_.Add(static_cast<uint8_t>(one_byte_length & 0x7f));
backing_store_.AddBlock(bytes);
return backing_store_.EndSequence().start();
diff --git a/chromium/v8/src/scanner.h b/chromium/v8/src/scanner.h
index 3cefc833ac3..2979082e3f1 100644
--- a/chromium/v8/src/scanner.h
+++ b/chromium/v8/src/scanner.h
@@ -1,49 +1,29 @@
// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
// Features shared by parsing and pre-parsing scanners.
#ifndef V8_SCANNER_H_
#define V8_SCANNER_H_
-#include "allocation.h"
-#include "char-predicates.h"
-#include "checks.h"
-#include "globals.h"
-#include "hashmap.h"
-#include "list.h"
-#include "token.h"
-#include "unicode-inl.h"
-#include "utils.h"
+#include "src/allocation.h"
+#include "src/char-predicates.h"
+#include "src/checks.h"
+#include "src/globals.h"
+#include "src/hashmap.h"
+#include "src/list.h"
+#include "src/token.h"
+#include "src/unicode-inl.h"
+#include "src/utils.h"
namespace v8 {
namespace internal {
+class ParserRecorder;
+
+
// Returns the value (0 .. 15) of a hexadecimal character c.
// If c is not a legal hexadecimal character, returns a value < 0.
inline int HexValue(uc32 c) {
@@ -117,8 +97,8 @@ class Utf16CharacterStream {
virtual bool ReadBlock() = 0;
virtual unsigned SlowSeekForward(unsigned code_unit_count) = 0;
- const uc16* buffer_cursor_;
- const uc16* buffer_end_;
+ const uint16_t* buffer_cursor_;
+ const uint16_t* buffer_end_;
unsigned pos_;
};
@@ -139,12 +119,17 @@ class UnicodeCache {
bool IsIdentifierPart(unibrow::uchar c) { return kIsIdentifierPart.get(c); }
bool IsLineTerminator(unibrow::uchar c) { return kIsLineTerminator.get(c); }
bool IsWhiteSpace(unibrow::uchar c) { return kIsWhiteSpace.get(c); }
+ bool IsWhiteSpaceOrLineTerminator(unibrow::uchar c) {
+ return kIsWhiteSpaceOrLineTerminator.get(c);
+ }
private:
unibrow::Predicate<IdentifierStart, 128> kIsIdentifierStart;
unibrow::Predicate<IdentifierPart, 128> kIsIdentifierPart;
unibrow::Predicate<unibrow::LineTerminator, 128> kIsLineTerminator;
- unibrow::Predicate<unibrow::WhiteSpace, 128> kIsWhiteSpace;
+ unibrow::Predicate<WhiteSpace, 128> kIsWhiteSpace;
+ unibrow::Predicate<WhiteSpaceOrLineTerminator, 128>
+ kIsWhiteSpaceOrLineTerminator;
StaticResource<Utf8Decoder> utf8_decoder_;
DISALLOW_COPY_AND_ASSIGN(UnicodeCache);
@@ -161,32 +146,32 @@ class DuplicateFinder {
backing_store_(16),
map_(&Match) { }
- int AddAsciiSymbol(Vector<const char> key, int value);
- int AddUtf16Symbol(Vector<const uint16_t> key, int value);
+ int AddOneByteSymbol(Vector<const uint8_t> key, int value);
+ int AddTwoByteSymbol(Vector<const uint16_t> key, int value);
// Add a a number literal by converting it (if necessary)
// to the string that ToString(ToNumber(literal)) would generate.
// and then adding that string with AddAsciiSymbol.
// This string is the actual value used as key in an object literal,
// and the one that must be different from the other keys.
- int AddNumber(Vector<const char> key, int value);
+ int AddNumber(Vector<const uint8_t> key, int value);
private:
- int AddSymbol(Vector<const byte> key, bool is_ascii, int value);
+ int AddSymbol(Vector<const uint8_t> key, bool is_one_byte, int value);
// Backs up the key and its length in the backing store.
// The backup is stored with a base 127 encoding of the
- // length (plus a bit saying whether the string is ASCII),
+ // length (plus a bit saying whether the string is one byte),
// followed by the bytes of the key.
- byte* BackupKey(Vector<const byte> key, bool is_ascii);
+ uint8_t* BackupKey(Vector<const uint8_t> key, bool is_one_byte);
// Compare two encoded keys (both pointing into the backing store)
// for having the same base-127 encoded lengths and ASCII-ness,
// and then having the same 'length' bytes following.
static bool Match(void* first, void* second);
// Creates a hash from a sequence of bytes.
- static uint32_t Hash(Vector<const byte> key, bool is_ascii);
+ static uint32_t Hash(Vector<const uint8_t> key, bool is_one_byte);
// Checks whether a string containing a JS number is its canonical
// form.
- static bool IsNumberCanonical(Vector<const char> key);
+ static bool IsNumberCanonical(Vector<const uint8_t> key);
// Size of buffer. Sufficient for using it to call DoubleToCString in
// from conversions.h.
@@ -206,7 +191,7 @@ class DuplicateFinder {
class LiteralBuffer {
public:
- LiteralBuffer() : is_ascii_(true), position_(0), backing_store_() { }
+ LiteralBuffer() : is_one_byte_(true), position_(0), backing_store_() { }
~LiteralBuffer() {
if (backing_store_.length() > 0) {
@@ -216,48 +201,48 @@ class LiteralBuffer {
INLINE(void AddChar(uint32_t code_unit)) {
if (position_ >= backing_store_.length()) ExpandBuffer();
- if (is_ascii_) {
+ if (is_one_byte_) {
if (code_unit <= unibrow::Latin1::kMaxChar) {
backing_store_[position_] = static_cast<byte>(code_unit);
position_ += kOneByteSize;
return;
}
- ConvertToUtf16();
+ ConvertToTwoByte();
}
ASSERT(code_unit < 0x10000u);
- *reinterpret_cast<uc16*>(&backing_store_[position_]) = code_unit;
+ *reinterpret_cast<uint16_t*>(&backing_store_[position_]) = code_unit;
position_ += kUC16Size;
}
- bool is_ascii() { return is_ascii_; }
+ bool is_one_byte() { return is_one_byte_; }
bool is_contextual_keyword(Vector<const char> keyword) {
- return is_ascii() && keyword.length() == position_ &&
+ return is_one_byte() && keyword.length() == position_ &&
(memcmp(keyword.start(), backing_store_.start(), position_) == 0);
}
- Vector<const uc16> utf16_literal() {
- ASSERT(!is_ascii_);
+ Vector<const uint16_t> two_byte_literal() {
+ ASSERT(!is_one_byte_);
ASSERT((position_ & 0x1) == 0);
- return Vector<const uc16>(
- reinterpret_cast<const uc16*>(backing_store_.start()),
+ return Vector<const uint16_t>(
+ reinterpret_cast<const uint16_t*>(backing_store_.start()),
position_ >> 1);
}
- Vector<const char> ascii_literal() {
- ASSERT(is_ascii_);
- return Vector<const char>(
- reinterpret_cast<const char*>(backing_store_.start()),
+ Vector<const uint8_t> one_byte_literal() {
+ ASSERT(is_one_byte_);
+ return Vector<const uint8_t>(
+ reinterpret_cast<const uint8_t*>(backing_store_.start()),
position_);
}
int length() {
- return is_ascii_ ? position_ : (position_ >> 1);
+ return is_one_byte_ ? position_ : (position_ >> 1);
}
void Reset() {
position_ = 0;
- is_ascii_ = true;
+ is_one_byte_ = true;
}
private:
@@ -273,13 +258,13 @@ class LiteralBuffer {
void ExpandBuffer() {
Vector<byte> new_store = Vector<byte>::New(NewCapacity(kInitialCapacity));
- OS::MemCopy(new_store.start(), backing_store_.start(), position_);
+ MemCopy(new_store.start(), backing_store_.start(), position_);
backing_store_.Dispose();
backing_store_ = new_store;
}
- void ConvertToUtf16() {
- ASSERT(is_ascii_);
+ void ConvertToTwoByte() {
+ ASSERT(is_one_byte_);
Vector<byte> new_store;
int new_content_size = position_ * kUC16Size;
if (new_content_size >= backing_store_.length()) {
@@ -290,7 +275,7 @@ class LiteralBuffer {
new_store = backing_store_;
}
uint8_t* src = backing_store_.start();
- uc16* dst = reinterpret_cast<uc16*>(new_store.start());
+ uint16_t* dst = reinterpret_cast<uint16_t*>(new_store.start());
for (int i = position_ - 1; i >= 0; i--) {
dst[i] = src[i];
}
@@ -299,10 +284,10 @@ class LiteralBuffer {
backing_store_ = new_store;
}
position_ = new_content_size;
- is_ascii_ = false;
+ is_one_byte_ = false;
}
- bool is_ascii_;
+ bool is_one_byte_;
int position_;
Vector<byte> backing_store_;
@@ -365,32 +350,13 @@ class Scanner {
// Returns the location information for the current token
// (the token last returned by Next()).
Location location() const { return current_.location; }
- // Returns the literal string, if any, for the current token (the
- // token last returned by Next()). The string is 0-terminated.
- // Literal strings are collected for identifiers, strings, and
- // numbers.
- // These functions only give the correct result if the literal
- // was scanned between calls to StartLiteral() and TerminateLiteral().
- Vector<const char> literal_ascii_string() {
- ASSERT_NOT_NULL(current_.literal_chars);
- return current_.literal_chars->ascii_literal();
- }
- Vector<const uc16> literal_utf16_string() {
- ASSERT_NOT_NULL(current_.literal_chars);
- return current_.literal_chars->utf16_literal();
- }
- bool is_literal_ascii() {
- ASSERT_NOT_NULL(current_.literal_chars);
- return current_.literal_chars->is_ascii();
- }
- bool is_literal_contextual_keyword(Vector<const char> keyword) {
- ASSERT_NOT_NULL(current_.literal_chars);
- return current_.literal_chars->is_contextual_keyword(keyword);
- }
- int literal_length() const {
- ASSERT_NOT_NULL(current_.literal_chars);
- return current_.literal_chars->length();
- }
+
+ // Similar functions for the upcoming token.
+
+ // One token look-ahead (past the token returned by Next()).
+ Token::Value peek() const { return next_.token; }
+
+ Location peek_location() const { return next_.location; }
bool literal_contains_escapes() const {
Location location = current_.location;
@@ -401,43 +367,45 @@ class Scanner {
}
return current_.literal_chars->length() != source_length;
}
-
- // Similar functions for the upcoming token.
-
- // One token look-ahead (past the token returned by Next()).
- Token::Value peek() const { return next_.token; }
-
- Location peek_location() const { return next_.location; }
-
- // Returns the literal string for the next token (the token that
- // would be returned if Next() were called).
- Vector<const char> next_literal_ascii_string() {
- ASSERT_NOT_NULL(next_.literal_chars);
- return next_.literal_chars->ascii_literal();
- }
- Vector<const uc16> next_literal_utf16_string() {
- ASSERT_NOT_NULL(next_.literal_chars);
- return next_.literal_chars->utf16_literal();
- }
- bool is_next_literal_ascii() {
- ASSERT_NOT_NULL(next_.literal_chars);
- return next_.literal_chars->is_ascii();
+ bool is_literal_contextual_keyword(Vector<const char> keyword) {
+ ASSERT_NOT_NULL(current_.literal_chars);
+ return current_.literal_chars->is_contextual_keyword(keyword);
}
bool is_next_contextual_keyword(Vector<const char> keyword) {
ASSERT_NOT_NULL(next_.literal_chars);
return next_.literal_chars->is_contextual_keyword(keyword);
}
- int next_literal_length() const {
- ASSERT_NOT_NULL(next_.literal_chars);
- return next_.literal_chars->length();
- }
- UnicodeCache* unicode_cache() { return unicode_cache_; }
+ Handle<String> AllocateNextLiteralString(Isolate* isolate,
+ PretenureFlag tenured);
+ Handle<String> AllocateInternalizedString(Isolate* isolate);
- static const int kCharacterLookaheadBufferSize = 1;
+ double DoubleValue();
+ bool UnescapedLiteralMatches(const char* data, int length) {
+ if (is_literal_one_byte() &&
+ literal_length() == length &&
+ !literal_contains_escapes()) {
+ const char* token =
+ reinterpret_cast<const char*>(literal_one_byte_string().start());
+ return !strncmp(token, data, length);
+ }
+ return false;
+ }
+ void IsGetOrSet(bool* is_get, bool* is_set) {
+ if (is_literal_one_byte() &&
+ literal_length() == 3 &&
+ !literal_contains_escapes()) {
+ const char* token =
+ reinterpret_cast<const char*>(literal_one_byte_string().start());
+ *is_get = strncmp(token, "get", 3) == 0;
+ *is_set = !*is_get && strncmp(token, "set", 3) == 0;
+ }
+ }
- // Scans octal escape sequence. Also accepts "\0" decimal escape sequence.
- uc32 ScanOctalEscape(uc32 c, int length);
+ int FindNumber(DuplicateFinder* finder, int value);
+ int FindSymbol(DuplicateFinder* finder, int value);
+
+ UnicodeCache* unicode_cache() { return unicode_cache_; }
// Returns the location of the last seen octal literal.
Location octal_position() const { return octal_pos_; }
@@ -490,6 +458,11 @@ class Scanner {
LiteralBuffer* literal_chars;
};
+ static const int kCharacterLookaheadBufferSize = 1;
+
+ // Scans octal escape sequence. Also accepts "\0" decimal escape sequence.
+ uc32 ScanOctalEscape(uc32 c, int length);
+
// Call this after setting source_ to the input.
void Init() {
// Set c0_ (one character ahead)
@@ -550,6 +523,47 @@ class Scanner {
}
}
+ // Returns the literal string, if any, for the current token (the
+ // token last returned by Next()). The string is 0-terminated.
+ // Literal strings are collected for identifiers, strings, and
+ // numbers.
+ // These functions only give the correct result if the literal
+ // was scanned between calls to StartLiteral() and TerminateLiteral().
+ Vector<const uint8_t> literal_one_byte_string() {
+ ASSERT_NOT_NULL(current_.literal_chars);
+ return current_.literal_chars->one_byte_literal();
+ }
+ Vector<const uint16_t> literal_two_byte_string() {
+ ASSERT_NOT_NULL(current_.literal_chars);
+ return current_.literal_chars->two_byte_literal();
+ }
+ bool is_literal_one_byte() {
+ ASSERT_NOT_NULL(current_.literal_chars);
+ return current_.literal_chars->is_one_byte();
+ }
+ int literal_length() const {
+ ASSERT_NOT_NULL(current_.literal_chars);
+ return current_.literal_chars->length();
+ }
+ // Returns the literal string for the next token (the token that
+ // would be returned if Next() were called).
+ Vector<const uint8_t> next_literal_one_byte_string() {
+ ASSERT_NOT_NULL(next_.literal_chars);
+ return next_.literal_chars->one_byte_literal();
+ }
+ Vector<const uint16_t> next_literal_two_byte_string() {
+ ASSERT_NOT_NULL(next_.literal_chars);
+ return next_.literal_chars->two_byte_literal();
+ }
+ bool is_next_literal_one_byte() {
+ ASSERT_NOT_NULL(next_.literal_chars);
+ return next_.literal_chars->is_one_byte();
+ }
+ int next_literal_length() const {
+ ASSERT_NOT_NULL(next_.literal_chars);
+ return next_.literal_chars->length();
+ }
+
uc32 ScanHexNumber(int expected_length);
// Scans a single JavaScript token.
diff --git a/chromium/v8/src/scopeinfo.cc b/chromium/v8/src/scopeinfo.cc
index 03e69bf3842..d84c5bf0057 100644
--- a/chromium/v8/src/scopeinfo.cc
+++ b/chromium/v8/src/scopeinfo.cc
@@ -1,36 +1,13 @@
// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
#include <stdlib.h>
-#include "v8.h"
+#include "src/v8.h"
-#include "scopeinfo.h"
-#include "scopes.h"
+#include "src/scopeinfo.h"
+#include "src/scopes.h"
namespace v8 {
namespace internal {
@@ -78,7 +55,7 @@ Handle<ScopeInfo> ScopeInfo::Create(Scope* scope, Zone* zone) {
// Encode the flags.
int flags = ScopeTypeField::encode(scope->scope_type()) |
CallsEvalField::encode(scope->calls_eval()) |
- LanguageModeField::encode(scope->language_mode()) |
+ StrictModeField::encode(scope->strict_mode()) |
FunctionVariableField::encode(function_name_info) |
FunctionVariableMode::encode(function_variable_mode);
scope_info->SetFlags(flags);
@@ -164,8 +141,8 @@ bool ScopeInfo::CallsEval() {
}
-LanguageMode ScopeInfo::language_mode() {
- return length() > 0 ? LanguageModeField::decode(Flags()) : CLASSIC_MODE;
+StrictMode ScopeInfo::strict_mode() {
+ return length() > 0 ? StrictModeField::decode(Flags()) : SLOPPY;
}
@@ -278,6 +255,17 @@ InitializationFlag ScopeInfo::ContextLocalInitFlag(int var) {
}
+bool ScopeInfo::LocalIsSynthetic(int var) {
+ ASSERT(0 <= var && var < LocalCount());
+ // There's currently no flag stored on the ScopeInfo to indicate that a
+ // variable is a compiler-introduced temporary. However, to avoid conflict
+ // with user declarations, the current temporaries like .generator_object and
+ // .result start with a dot, so we can use that as a flag. It's a hack!
+ Handle<String> name(LocalName(var));
+ return name->length() > 0 && name->Get(0) == '.';
+}
+
+
int ScopeInfo::StackSlotIndex(String* name) {
ASSERT(name->IsInternalizedString());
if (length() > 0) {
@@ -293,35 +281,40 @@ int ScopeInfo::StackSlotIndex(String* name) {
}
-int ScopeInfo::ContextSlotIndex(String* name,
+int ScopeInfo::ContextSlotIndex(Handle<ScopeInfo> scope_info,
+ Handle<String> name,
VariableMode* mode,
InitializationFlag* init_flag) {
ASSERT(name->IsInternalizedString());
ASSERT(mode != NULL);
ASSERT(init_flag != NULL);
- if (length() > 0) {
- ContextSlotCache* context_slot_cache = GetIsolate()->context_slot_cache();
- int result = context_slot_cache->Lookup(this, name, mode, init_flag);
+ if (scope_info->length() > 0) {
+ ContextSlotCache* context_slot_cache =
+ scope_info->GetIsolate()->context_slot_cache();
+ int result =
+ context_slot_cache->Lookup(*scope_info, *name, mode, init_flag);
if (result != ContextSlotCache::kNotFound) {
- ASSERT(result < ContextLength());
+ ASSERT(result < scope_info->ContextLength());
return result;
}
- int start = ContextLocalNameEntriesIndex();
- int end = ContextLocalNameEntriesIndex() + ContextLocalCount();
+ int start = scope_info->ContextLocalNameEntriesIndex();
+ int end = scope_info->ContextLocalNameEntriesIndex() +
+ scope_info->ContextLocalCount();
for (int i = start; i < end; ++i) {
- if (name == get(i)) {
+ if (*name == scope_info->get(i)) {
int var = i - start;
- *mode = ContextLocalMode(var);
- *init_flag = ContextLocalInitFlag(var);
+ *mode = scope_info->ContextLocalMode(var);
+ *init_flag = scope_info->ContextLocalInitFlag(var);
result = Context::MIN_CONTEXT_SLOTS + var;
- context_slot_cache->Update(this, name, *mode, *init_flag, result);
- ASSERT(result < ContextLength());
+ context_slot_cache->Update(scope_info, name, *mode, *init_flag, result);
+ ASSERT(result < scope_info->ContextLength());
return result;
}
}
// Cache as not found. Mode and init flag don't matter.
- context_slot_cache->Update(this, name, INTERNAL, kNeedsInitialization, -1);
+ context_slot_cache->Update(
+ scope_info, name, INTERNAL, kNeedsInitialization, -1);
}
return -1;
}
@@ -368,18 +361,21 @@ bool ScopeInfo::CopyContextLocalsToScopeObject(Handle<ScopeInfo> scope_info,
int local_count = scope_info->ContextLocalCount();
if (local_count == 0) return true;
// Fill all context locals to the context extension.
+ int first_context_var = scope_info->StackLocalCount();
int start = scope_info->ContextLocalNameEntriesIndex();
- int end = start + local_count;
- for (int i = start; i < end; ++i) {
- int context_index = Context::MIN_CONTEXT_SLOTS + i - start;
- Handle<Object> result = Runtime::SetObjectProperty(
+ for (int i = 0; i < local_count; ++i) {
+ if (scope_info->LocalIsSynthetic(first_context_var + i)) continue;
+ int context_index = Context::MIN_CONTEXT_SLOTS + i;
+ RETURN_ON_EXCEPTION_VALUE(
isolate,
- scope_object,
- Handle<String>(String::cast(scope_info->get(i))),
- Handle<Object>(context->get(context_index), isolate),
- ::NONE,
- kNonStrictMode);
- RETURN_IF_EMPTY_HANDLE_VALUE(isolate, result, false);
+ Runtime::SetObjectProperty(
+ isolate,
+ scope_object,
+ Handle<String>(String::cast(scope_info->get(i + start))),
+ Handle<Object>(context->get(context_index), isolate),
+ ::NONE,
+ SLOPPY),
+ false);
}
return true;
}
@@ -435,19 +431,20 @@ int ContextSlotCache::Lookup(Object* data,
}
-void ContextSlotCache::Update(Object* data,
- String* name,
+void ContextSlotCache::Update(Handle<Object> data,
+ Handle<String> name,
VariableMode mode,
InitializationFlag init_flag,
int slot_index) {
- String* internalized_name;
+ DisallowHeapAllocation no_gc;
+ Handle<String> internalized_name;
ASSERT(slot_index > kNotFound);
- if (name->GetIsolate()->heap()->InternalizeStringIfExists(
- name, &internalized_name)) {
- int index = Hash(data, internalized_name);
+ if (StringTable::InternalizeStringIfExists(name->GetIsolate(), name).
+ ToHandle(&internalized_name)) {
+ int index = Hash(*data, *internalized_name);
Key& key = keys_[index];
- key.data = data;
- key.name = internalized_name;
+ key.data = *data;
+ key.name = *internalized_name;
// Please note value only takes a uint as index.
values_[index] = Value(mode, init_flag, slot_index - kNotFound).raw();
#ifdef DEBUG
@@ -464,18 +461,19 @@ void ContextSlotCache::Clear() {
#ifdef DEBUG
-void ContextSlotCache::ValidateEntry(Object* data,
- String* name,
+void ContextSlotCache::ValidateEntry(Handle<Object> data,
+ Handle<String> name,
VariableMode mode,
InitializationFlag init_flag,
int slot_index) {
- String* internalized_name;
- if (name->GetIsolate()->heap()->InternalizeStringIfExists(
- name, &internalized_name)) {
- int index = Hash(data, name);
+ DisallowHeapAllocation no_gc;
+ Handle<String> internalized_name;
+ if (StringTable::InternalizeStringIfExists(name->GetIsolate(), name).
+ ToHandle(&internalized_name)) {
+ int index = Hash(*data, *name);
Key& key = keys_[index];
- ASSERT(key.data == data);
- ASSERT(key.name->Equals(name));
+ ASSERT(key.data == *data);
+ ASSERT(key.name->Equals(*name));
Value result(values_[index]);
ASSERT(result.mode() == mode);
ASSERT(result.initialization_flag() == init_flag);
@@ -541,7 +539,7 @@ Handle<ModuleInfo> ModuleInfo::Create(
int i = 0;
for (Interface::Iterator it = interface->iterator();
!it.done(); it.Advance(), ++i) {
- Variable* var = scope->LocalLookup(it.name());
+ Variable* var = scope->LookupLocal(it.name());
info->set_name(i, *it.name());
info->set_mode(i, var->mode());
ASSERT((var->mode() == MODULE) == (it.interface()->IsModule()));
diff --git a/chromium/v8/src/scopeinfo.h b/chromium/v8/src/scopeinfo.h
index a884b3b9ed8..7b8ed44d5bd 100644
--- a/chromium/v8/src/scopeinfo.h
+++ b/chromium/v8/src/scopeinfo.h
@@ -1,36 +1,13 @@
// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
#ifndef V8_SCOPEINFO_H_
#define V8_SCOPEINFO_H_
-#include "allocation.h"
-#include "variables.h"
-#include "zone-inl.h"
+#include "src/allocation.h"
+#include "src/variables.h"
+#include "src/zone-inl.h"
namespace v8 {
namespace internal {
@@ -49,8 +26,8 @@ class ContextSlotCache {
InitializationFlag* init_flag);
// Update an element in the cache.
- void Update(Object* data,
- String* name,
+ void Update(Handle<Object> data,
+ Handle<String> name,
VariableMode mode,
InitializationFlag init_flag,
int slot_index);
@@ -72,8 +49,8 @@ class ContextSlotCache {
inline static int Hash(Object* data, String* name);
#ifdef DEBUG
- void ValidateEntry(Object* data,
- String* name,
+ void ValidateEntry(Handle<Object> data,
+ Handle<String> name,
VariableMode mode,
InitializationFlag init_flag,
int slot_index);
diff --git a/chromium/v8/src/scopes.cc b/chromium/v8/src/scopes.cc
index fefc696d1a6..497f7940adf 100644
--- a/chromium/v8/src/scopes.cc
+++ b/chromium/v8/src/scopes.cc
@@ -1,39 +1,16 @@
// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#include "scopes.h"
-
-#include "accessors.h"
-#include "bootstrapper.h"
-#include "compiler.h"
-#include "messages.h"
-#include "scopeinfo.h"
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#include "src/scopes.h"
+
+#include "src/accessors.h"
+#include "src/bootstrapper.h"
+#include "src/compiler.h"
+#include "src/messages.h"
+#include "src/scopeinfo.h"
namespace v8 {
namespace internal {
@@ -190,9 +167,8 @@ void Scope::SetDefaults(ScopeType scope_type,
scope_contains_with_ = false;
scope_calls_eval_ = false;
// Inherit the strict mode from the parent scope.
- language_mode_ = (outer_scope != NULL)
- ? outer_scope->language_mode_ : CLASSIC_MODE;
- outer_scope_calls_non_strict_eval_ = false;
+ strict_mode_ = outer_scope != NULL ? outer_scope->strict_mode_ : SLOPPY;
+ outer_scope_calls_sloppy_eval_ = false;
inner_scope_calls_eval_ = false;
force_eager_compilation_ = false;
force_context_allocation_ = (outer_scope != NULL && !is_function_scope())
@@ -207,7 +183,7 @@ void Scope::SetDefaults(ScopeType scope_type,
end_position_ = RelocInfo::kNoPosition;
if (!scope_info.is_null()) {
scope_calls_eval_ = scope_info->CallsEval();
- language_mode_ = scope_info->language_mode();
+ strict_mode_ = scope_info->strict_mode();
}
}
@@ -290,8 +266,7 @@ bool Scope::Analyze(CompilationInfo* info) {
// Allocate the variables.
{
- AstNodeFactory<AstNullVisitor> ast_node_factory(info->isolate(),
- info->zone());
+ AstNodeFactory<AstNullVisitor> ast_node_factory(info->zone());
if (!top->AllocateVariables(info, &ast_node_factory)) return false;
}
@@ -308,7 +283,7 @@ bool Scope::Analyze(CompilationInfo* info) {
}
#endif
- info->SetScope(scope);
+ info->PrepareForCompilation(scope);
return true;
}
@@ -391,7 +366,7 @@ Scope* Scope::FinalizeBlockScope() {
}
-Variable* Scope::LocalLookup(Handle<String> name) {
+Variable* Scope::LookupLocal(Handle<String> name) {
Variable* result = variables_.Lookup(name);
if (result != NULL || scope_info_.is_null()) {
return result;
@@ -404,7 +379,7 @@ Variable* Scope::LocalLookup(Handle<String> name) {
VariableMode mode;
Variable::Location location = Variable::CONTEXT;
InitializationFlag init_flag;
- int index = scope_info_->ContextSlotIndex(*name, &mode, &init_flag);
+ int index = ScopeInfo::ContextSlotIndex(scope_info_, name, &mode, &init_flag);
if (index < 0) {
// Check parameters.
index = scope_info_->ParameterIndex(*name);
@@ -450,7 +425,7 @@ Variable* Scope::Lookup(Handle<String> name) {
for (Scope* scope = this;
scope != NULL;
scope = scope->outer_scope()) {
- Variable* var = scope->LocalLookup(name);
+ Variable* var = scope->LookupLocal(name);
if (var != NULL) return var;
}
return NULL;
@@ -471,7 +446,7 @@ Variable* Scope::DeclareLocal(Handle<String> name,
InitializationFlag init_flag,
Interface* interface) {
ASSERT(!already_resolved());
- // This function handles VAR and CONST modes. DYNAMIC variables are
+ // This function handles VAR, LET, and CONST modes. DYNAMIC variables are
// introduces during variable allocation, INTERNAL variables are allocated
// explicitly, and TEMPORARY variables are allocated via NewTemporary().
ASSERT(IsDeclaredVariableMode(mode));
@@ -644,13 +619,13 @@ void Scope::CollectStackAndContextLocals(ZoneList<Variable*>* stack_locals,
bool Scope::AllocateVariables(CompilationInfo* info,
AstNodeFactory<AstNullVisitor>* factory) {
// 1) Propagate scope information.
- bool outer_scope_calls_non_strict_eval = false;
+ bool outer_scope_calls_sloppy_eval = false;
if (outer_scope_ != NULL) {
- outer_scope_calls_non_strict_eval =
- outer_scope_->outer_scope_calls_non_strict_eval() |
- outer_scope_->calls_non_strict_eval();
+ outer_scope_calls_sloppy_eval =
+ outer_scope_->outer_scope_calls_sloppy_eval() |
+ outer_scope_->calls_sloppy_eval();
}
- PropagateScopeInfo(outer_scope_calls_non_strict_eval);
+ PropagateScopeInfo(outer_scope_calls_sloppy_eval);
// 2) Allocate module instances.
if (FLAG_harmony_modules && (is_global_scope() || is_module_scope())) {
@@ -800,7 +775,7 @@ static void Indent(int n, const char* str) {
static void PrintName(Handle<String> name) {
SmartArrayPointer<char> s = name->ToCString(DISALLOW_NULLS);
- PrintF("%s", *s);
+ PrintF("%s", s.get());
}
@@ -882,21 +857,14 @@ void Scope::Print(int n) {
if (HasTrivialOuterContext()) {
Indent(n1, "// scope has trivial outer context\n");
}
- switch (language_mode()) {
- case CLASSIC_MODE:
- break;
- case STRICT_MODE:
- Indent(n1, "// strict mode scope\n");
- break;
- case EXTENDED_MODE:
- Indent(n1, "// extended mode scope\n");
- break;
+ if (strict_mode() == STRICT) {
+ Indent(n1, "// strict mode scope\n");
}
if (scope_inside_with_) Indent(n1, "// scope inside 'with'\n");
if (scope_contains_with_) Indent(n1, "// scope contains 'with'\n");
if (scope_calls_eval_) Indent(n1, "// scope calls 'eval'\n");
- if (outer_scope_calls_non_strict_eval_) {
- Indent(n1, "// outer scope calls 'eval' in non-strict context\n");
+ if (outer_scope_calls_sloppy_eval_) {
+ Indent(n1, "// outer scope calls 'eval' in sloppy context\n");
}
if (inner_scope_calls_eval_) Indent(n1, "// inner scope calls 'eval'\n");
if (num_stack_slots_ > 0) { Indent(n1, "// ");
@@ -982,7 +950,7 @@ Variable* Scope::LookupRecursive(Handle<String> name,
}
// Try to find the variable in this scope.
- Variable* var = LocalLookup(name);
+ Variable* var = LookupLocal(name);
// We found a variable and we are done. (Even if there is an 'eval' in
// this scope which introduces the same variable again, the resulting
@@ -1018,9 +986,9 @@ Variable* Scope::LookupRecursive(Handle<String> name,
// object).
*binding_kind = DYNAMIC_LOOKUP;
return NULL;
- } else if (calls_non_strict_eval()) {
+ } else if (calls_sloppy_eval()) {
// A variable binding may have been found in an outer scope, but the current
- // scope makes a non-strict 'eval' call, so the found variable may not be
+ // scope makes a sloppy 'eval' call, so the found variable may not be
// the correct one (the 'eval' may introduce a binding with the same name).
// In that case, change the lookup result to reflect this situation.
if (*binding_kind == BOUND) {
@@ -1072,8 +1040,7 @@ bool Scope::ResolveVariable(CompilationInfo* info,
break;
case UNBOUND_EVAL_SHADOWED:
- // No binding has been found. But some scope makes a
- // non-strict 'eval' call.
+ // No binding has been found. But some scope makes a sloppy 'eval' call.
var = NonLocal(proxy->name(), DYNAMIC_GLOBAL);
break;
@@ -1085,7 +1052,7 @@ bool Scope::ResolveVariable(CompilationInfo* info,
ASSERT(var != NULL);
- if (FLAG_harmony_scoping && is_extended_mode() &&
+ if (FLAG_harmony_scoping && strict_mode() == STRICT &&
var->is_const_mode() && proxy->IsLValue()) {
// Assignment to const. Throw a syntax error.
MessageLocation location(
@@ -1124,7 +1091,7 @@ bool Scope::ResolveVariable(CompilationInfo* info,
Isolate* isolate = info->isolate();
Factory* factory = isolate->factory();
Handle<JSArray> array = factory->NewJSArray(1);
- USE(JSObject::SetElement(array, 0, var->name(), NONE, kStrictMode));
+ JSObject::SetElement(array, 0, var->name(), NONE, STRICT).Assert();
Handle<Object> result =
factory->NewSyntaxError("module_type_error", array);
isolate->Throw(*result, &location);
@@ -1158,16 +1125,16 @@ bool Scope::ResolveVariablesRecursively(
}
-bool Scope::PropagateScopeInfo(bool outer_scope_calls_non_strict_eval ) {
- if (outer_scope_calls_non_strict_eval) {
- outer_scope_calls_non_strict_eval_ = true;
+bool Scope::PropagateScopeInfo(bool outer_scope_calls_sloppy_eval ) {
+ if (outer_scope_calls_sloppy_eval) {
+ outer_scope_calls_sloppy_eval_ = true;
}
- bool calls_non_strict_eval =
- this->calls_non_strict_eval() || outer_scope_calls_non_strict_eval_;
+ bool calls_sloppy_eval =
+ this->calls_sloppy_eval() || outer_scope_calls_sloppy_eval_;
for (int i = 0; i < inner_scopes_.length(); i++) {
Scope* inner_scope = inner_scopes_[i];
- if (inner_scope->PropagateScopeInfo(calls_non_strict_eval)) {
+ if (inner_scope->PropagateScopeInfo(calls_sloppy_eval)) {
inner_scope_calls_eval_ = true;
}
if (inner_scope->force_eager_compilation_) {
@@ -1244,10 +1211,10 @@ void Scope::AllocateHeapSlot(Variable* var) {
void Scope::AllocateParameterLocals() {
ASSERT(is_function_scope());
- Variable* arguments = LocalLookup(isolate_->factory()->arguments_string());
+ Variable* arguments = LookupLocal(isolate_->factory()->arguments_string());
ASSERT(arguments != NULL); // functions have 'arguments' declared implicitly
- bool uses_nonstrict_arguments = false;
+ bool uses_sloppy_arguments = false;
if (MustAllocate(arguments) && !HasArgumentsParameter()) {
// 'arguments' is used. Unless there is also a parameter called
@@ -1266,7 +1233,7 @@ void Scope::AllocateParameterLocals() {
// In strict mode 'arguments' does not alias formal parameters.
// Therefore in strict mode we allocate parameters as if 'arguments'
// were not used.
- uses_nonstrict_arguments = is_classic_mode();
+ uses_sloppy_arguments = strict_mode() == SLOPPY;
}
// The same parameter may occur multiple times in the parameters_ list.
@@ -1276,7 +1243,7 @@ void Scope::AllocateParameterLocals() {
for (int i = params_.length() - 1; i >= 0; --i) {
Variable* var = params_[i];
ASSERT(var->scope() == this);
- if (uses_nonstrict_arguments) {
+ if (uses_sloppy_arguments || has_forced_context_allocation()) {
// Force context allocation of the parameter.
var->ForceContextAllocation();
}
diff --git a/chromium/v8/src/scopes.h b/chromium/v8/src/scopes.h
index 06aaa902cf8..4486921d6a2 100644
--- a/chromium/v8/src/scopes.h
+++ b/chromium/v8/src/scopes.h
@@ -1,35 +1,12 @@
// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
#ifndef V8_SCOPES_H_
#define V8_SCOPES_H_
-#include "ast.h"
-#include "zone.h"
+#include "src/ast.h"
+#include "src/zone.h"
namespace v8 {
namespace internal {
@@ -123,7 +100,7 @@ class Scope: public ZoneObject {
// Declarations
// Lookup a variable in this scope. Returns the variable or NULL if not found.
- Variable* LocalLookup(Handle<String> name);
+ Variable* LookupLocal(Handle<String> name);
// This lookup corresponds to a lookup in the "intermediate" scope sitting
// between this scope and the outer scope. (ECMA-262, 3rd., requires that
@@ -234,9 +211,7 @@ class Scope: public ZoneObject {
void RecordEvalCall() { if (!is_global_scope()) scope_calls_eval_ = true; }
// Set the strict mode flag (unless disabled by a global flag).
- void SetLanguageMode(LanguageMode language_mode) {
- language_mode_ = language_mode;
- }
+ void SetStrictMode(StrictMode strict_mode) { strict_mode_ = strict_mode; }
// Position in the source where this scope begins and ends.
//
@@ -293,23 +268,17 @@ class Scope: public ZoneObject {
return is_eval_scope() || is_function_scope() ||
is_module_scope() || is_global_scope();
}
- bool is_classic_mode() const {
- return language_mode() == CLASSIC_MODE;
- }
- bool is_extended_mode() const {
- return language_mode() == EXTENDED_MODE;
- }
- bool is_strict_or_extended_eval_scope() const {
- return is_eval_scope() && !is_classic_mode();
+ bool is_strict_eval_scope() const {
+ return is_eval_scope() && strict_mode_ == STRICT;
}
// Information about which scopes calls eval.
bool calls_eval() const { return scope_calls_eval_; }
- bool calls_non_strict_eval() {
- return scope_calls_eval_ && is_classic_mode();
+ bool calls_sloppy_eval() {
+ return scope_calls_eval_ && strict_mode_ == SLOPPY;
}
- bool outer_scope_calls_non_strict_eval() const {
- return outer_scope_calls_non_strict_eval_;
+ bool outer_scope_calls_sloppy_eval() const {
+ return outer_scope_calls_sloppy_eval_;
}
// Is this scope inside a with statement.
@@ -324,7 +293,7 @@ class Scope: public ZoneObject {
ScopeType scope_type() const { return scope_type_; }
// The language mode of this scope.
- LanguageMode language_mode() const { return language_mode_; }
+ StrictMode strict_mode() const { return strict_mode_; }
// The variable corresponding the 'this' value.
Variable* receiver() { return receiver_; }
@@ -493,14 +462,14 @@ class Scope: public ZoneObject {
// This scope or a nested catch scope or with scope contain an 'eval' call. At
// the 'eval' call site this scope is the declaration scope.
bool scope_calls_eval_;
- // The language mode of this scope.
- LanguageMode language_mode_;
+ // The strict mode of this scope.
+ StrictMode strict_mode_;
// Source positions.
int start_position_;
int end_position_;
// Computed via PropagateScopeInfo.
- bool outer_scope_calls_non_strict_eval_;
+ bool outer_scope_calls_sloppy_eval_;
bool inner_scope_calls_eval_;
bool force_eager_compilation_;
bool force_context_allocation_;
@@ -538,13 +507,13 @@ class Scope: public ZoneObject {
// The variable reference could be statically resolved to a variable binding
// which is returned. There is no 'with' statement between the reference and
// the binding and no scope between the reference scope (inclusive) and
- // binding scope (exclusive) makes a non-strict 'eval' call.
+ // binding scope (exclusive) makes a sloppy 'eval' call.
BOUND,
// The variable reference could be statically resolved to a variable binding
// which is returned. There is no 'with' statement between the reference and
// the binding, but some scope between the reference scope (inclusive) and
- // binding scope (exclusive) makes a non-strict 'eval' call, that might
+ // binding scope (exclusive) makes a sloppy 'eval' call, that might
// possibly introduce variable bindings shadowing the found one. Thus the
// found variable binding is just a guess.
BOUND_EVAL_SHADOWED,
@@ -553,13 +522,13 @@ class Scope: public ZoneObject {
// and thus should be considered referencing a global variable. NULL is
// returned. The variable reference is not inside any 'with' statement and
// no scope between the reference scope (inclusive) and global scope
- // (exclusive) makes a non-strict 'eval' call.
+ // (exclusive) makes a sloppy 'eval' call.
UNBOUND,
// The variable reference could not be statically resolved to any binding
// NULL is returned. The variable reference is not inside any 'with'
// statement, but some scope between the reference scope (inclusive) and
- // global scope (exclusive) makes a non-strict 'eval' call, that might
+ // global scope (exclusive) makes a sloppy 'eval' call, that might
// possibly introduce a variable binding. Thus the reference should be
// considered referencing a global variable unless it is shadowed by an
// 'eval' introduced binding.
@@ -591,7 +560,7 @@ class Scope: public ZoneObject {
AstNodeFactory<AstNullVisitor>* factory);
// Scope analysis.
- bool PropagateScopeInfo(bool outer_scope_calls_non_strict_eval);
+ bool PropagateScopeInfo(bool outer_scope_calls_sloppy_eval);
bool HasTrivialContext() const;
// Predicates.
diff --git a/chromium/v8/src/serialize.cc b/chromium/v8/src/serialize.cc
index a0a66f9e9f1..4e5699c5104 100644
--- a/chromium/v8/src/serialize.cc
+++ b/chromium/v8/src/serialize.cc
@@ -1,46 +1,23 @@
// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#include "accessors.h"
-#include "api.h"
-#include "bootstrapper.h"
-#include "deoptimizer.h"
-#include "execution.h"
-#include "global-handles.h"
-#include "ic-inl.h"
-#include "natives.h"
-#include "platform.h"
-#include "runtime.h"
-#include "serialize.h"
-#include "snapshot.h"
-#include "stub-cache.h"
-#include "v8threads.h"
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#include "src/accessors.h"
+#include "src/api.h"
+#include "src/bootstrapper.h"
+#include "src/deoptimizer.h"
+#include "src/execution.h"
+#include "src/global-handles.h"
+#include "src/ic-inl.h"
+#include "src/natives.h"
+#include "src/platform.h"
+#include "src/runtime.h"
+#include "src/serialize.h"
+#include "src/snapshot.h"
+#include "src/stub-cache.h"
+#include "src/v8threads.h"
namespace v8 {
namespace internal {
@@ -173,8 +150,25 @@ void ExternalReferenceTable::PopulateTable(Isolate* isolate) {
"Runtime::" #name },
RUNTIME_FUNCTION_LIST(RUNTIME_ENTRY)
+ INLINE_OPTIMIZED_FUNCTION_LIST(RUNTIME_ENTRY)
#undef RUNTIME_ENTRY
+#define RUNTIME_HIDDEN_ENTRY(name, nargs, ressize) \
+ { RUNTIME_FUNCTION, \
+ Runtime::kHidden##name, \
+ "Runtime::Hidden" #name },
+
+ RUNTIME_HIDDEN_FUNCTION_LIST(RUNTIME_HIDDEN_ENTRY)
+#undef RUNTIME_HIDDEN_ENTRY
+
+#define INLINE_OPTIMIZED_ENTRY(name, nargs, ressize) \
+ { RUNTIME_FUNCTION, \
+ Runtime::kInlineOptimized##name, \
+ "Runtime::" #name },
+
+ INLINE_OPTIMIZED_FUNCTION_LIST(INLINE_OPTIMIZED_ENTRY)
+#undef INLINE_OPTIMIZED_ENTRY
+
// IC utilities
#define IC_ENTRY(name) \
{ IC_UTILITY, \
@@ -192,26 +186,6 @@ void ExternalReferenceTable::PopulateTable(Isolate* isolate) {
isolate);
}
-#ifdef ENABLE_DEBUGGER_SUPPORT
- // Debug addresses
- Add(Debug_Address(Debug::k_after_break_target_address).address(isolate),
- DEBUG_ADDRESS,
- Debug::k_after_break_target_address << kDebugIdShift,
- "Debug::after_break_target_address()");
- Add(Debug_Address(Debug::k_debug_break_slot_address).address(isolate),
- DEBUG_ADDRESS,
- Debug::k_debug_break_slot_address << kDebugIdShift,
- "Debug::debug_break_slot_address()");
- Add(Debug_Address(Debug::k_debug_break_return_address).address(isolate),
- DEBUG_ADDRESS,
- Debug::k_debug_break_return_address << kDebugIdShift,
- "Debug::debug_break_return_address()");
- Add(Debug_Address(Debug::k_restarter_frame_function_pointer).address(isolate),
- DEBUG_ADDRESS,
- Debug::k_restarter_frame_function_pointer << kDebugIdShift,
- "Debug::restarter_frame_function_pointer_address()");
-#endif
-
// Stat counters
struct StatsRefTableEntry {
StatsCounter* (Counters::*counter)();
@@ -255,14 +229,17 @@ void ExternalReferenceTable::PopulateTable(Isolate* isolate) {
}
// Accessors
-#define ACCESSOR_DESCRIPTOR_DECLARATION(name) \
- Add((Address)&Accessors::name, \
+#define ACCESSOR_INFO_DECLARATION(name) \
+ Add(FUNCTION_ADDR(&Accessors::name##Getter), \
ACCESSOR, \
- Accessors::k##name, \
- "Accessors::" #name);
-
- ACCESSOR_DESCRIPTOR_LIST(ACCESSOR_DESCRIPTOR_DECLARATION)
-#undef ACCESSOR_DESCRIPTOR_DECLARATION
+ Accessors::k##name##Getter, \
+ "Accessors::" #name "Getter"); \
+ Add(FUNCTION_ADDR(&Accessors::name##Setter), \
+ ACCESSOR, \
+ Accessors::k##name##Setter, \
+ "Accessors::" #name "Setter");
+ ACCESSOR_INFO_LIST(ACCESSOR_INFO_DECLARATION)
+#undef ACCESSOR_INFO_DECLARATION
StubCache* stub_cache = isolate->stub_cache();
@@ -293,10 +270,6 @@ void ExternalReferenceTable::PopulateTable(Isolate* isolate) {
"StubCache::secondary_->map");
// Runtime entries
- Add(ExternalReference::perform_gc_function(isolate).address(),
- RUNTIME_ENTRY,
- 1,
- "Runtime::PerformGC");
Add(ExternalReference::delete_handle_scope_extensions(isolate).address(),
RUNTIME_ENTRY,
4,
@@ -310,11 +283,6 @@ void ExternalReferenceTable::PopulateTable(Isolate* isolate) {
RUNTIME_ENTRY,
6,
"StoreBuffer::StoreBufferOverflow");
- Add(ExternalReference::
- incremental_evacuation_record_write_function(isolate).address(),
- RUNTIME_ENTRY,
- 7,
- "IncrementalMarking::RecordWrite");
// Miscellaneous
Add(ExternalReference::roots_array_start(isolate).address(),
@@ -356,10 +324,6 @@ void ExternalReferenceTable::PopulateTable(Isolate* isolate) {
UNCLASSIFIED,
11,
"Heap::NewSpaceMask()");
- Add(ExternalReference::heap_always_allocate_scope_depth(isolate).address(),
- UNCLASSIFIED,
- 12,
- "Heap::always_allocate_scope_depth()");
Add(ExternalReference::new_space_allocation_limit_address(isolate).address(),
UNCLASSIFIED,
14,
@@ -368,7 +332,6 @@ void ExternalReferenceTable::PopulateTable(Isolate* isolate) {
UNCLASSIFIED,
15,
"Heap::NewSpaceAllocationTopAddress()");
-#ifdef ENABLE_DEBUGGER_SUPPORT
Add(ExternalReference::debug_break(isolate).address(),
UNCLASSIFIED,
16,
@@ -377,31 +340,10 @@ void ExternalReferenceTable::PopulateTable(Isolate* isolate) {
UNCLASSIFIED,
17,
"Debug::step_in_fp_addr()");
-#endif
- Add(ExternalReference::double_fp_operation(Token::ADD, isolate).address(),
- UNCLASSIFIED,
- 18,
- "add_two_doubles");
- Add(ExternalReference::double_fp_operation(Token::SUB, isolate).address(),
- UNCLASSIFIED,
- 19,
- "sub_two_doubles");
- Add(ExternalReference::double_fp_operation(Token::MUL, isolate).address(),
- UNCLASSIFIED,
- 20,
- "mul_two_doubles");
- Add(ExternalReference::double_fp_operation(Token::DIV, isolate).address(),
- UNCLASSIFIED,
- 21,
- "div_two_doubles");
- Add(ExternalReference::double_fp_operation(Token::MOD, isolate).address(),
+ Add(ExternalReference::mod_two_doubles_operation(isolate).address(),
UNCLASSIFIED,
22,
"mod_two_doubles");
- Add(ExternalReference::compare_doubles(isolate).address(),
- UNCLASSIFIED,
- 23,
- "compare_doubles");
#ifndef V8_INTERPRETED_REGEXP
Add(ExternalReference::re_case_insensitive_compare_uc16(isolate).address(),
UNCLASSIFIED,
@@ -429,10 +371,6 @@ void ExternalReferenceTable::PopulateTable(Isolate* isolate) {
UNCLASSIFIED,
29,
"KeyedLookupCache::field_offsets()");
- Add(ExternalReference::transcendental_cache_array_address(isolate).address(),
- UNCLASSIFIED,
- 30,
- "TranscendentalCache::caches()");
Add(ExternalReference::handle_scope_next_address(isolate).address(),
UNCLASSIFIED,
31,
@@ -521,11 +459,12 @@ void ExternalReferenceTable::PopulateTable(Isolate* isolate) {
UNCLASSIFIED,
52,
"cpu_features");
- Add(ExternalReference(Runtime::kAllocateInNewSpace, isolate).address(),
+ Add(ExternalReference(Runtime::kHiddenAllocateInNewSpace, isolate).address(),
UNCLASSIFIED,
53,
"Runtime::AllocateInNewSpace");
- Add(ExternalReference(Runtime::kAllocateInTargetSpace, isolate).address(),
+ Add(ExternalReference(
+ Runtime::kHiddenAllocateInTargetSpace, isolate).address(),
UNCLASSIFIED,
54,
"Runtime::AllocateInTargetSpace");
@@ -549,24 +488,51 @@ void ExternalReferenceTable::PopulateTable(Isolate* isolate) {
UNCLASSIFIED,
58,
"Heap::OldDataSpaceAllocationLimitAddress");
- Add(ExternalReference::new_space_high_promotion_mode_active_address(isolate).
- address(),
- UNCLASSIFIED,
- 59,
- "Heap::NewSpaceAllocationLimitAddress");
Add(ExternalReference::allocation_sites_list_address(isolate).address(),
UNCLASSIFIED,
- 60,
+ 59,
"Heap::allocation_sites_list_address()");
Add(ExternalReference::address_of_uint32_bias().address(),
UNCLASSIFIED,
- 61,
+ 60,
"uint32_bias");
Add(ExternalReference::get_mark_code_as_executed_function(isolate).address(),
UNCLASSIFIED,
- 62,
+ 61,
"Code::MarkCodeAsExecuted");
+ Add(ExternalReference::is_profiling_address(isolate).address(),
+ UNCLASSIFIED,
+ 62,
+ "CpuProfiler::is_profiling");
+
+ Add(ExternalReference::scheduled_exception_address(isolate).address(),
+ UNCLASSIFIED,
+ 63,
+ "Isolate::scheduled_exception");
+
+ Add(ExternalReference::invoke_function_callback(isolate).address(),
+ UNCLASSIFIED,
+ 64,
+ "InvokeFunctionCallback");
+
+ Add(ExternalReference::invoke_accessor_getter_callback(isolate).address(),
+ UNCLASSIFIED,
+ 65,
+ "InvokeAccessorGetterCallback");
+
+ // Debug addresses
+ Add(ExternalReference::debug_after_break_target_address(isolate).address(),
+ UNCLASSIFIED,
+ 66,
+ "Debug::after_break_target_address()");
+
+ Add(ExternalReference::debug_restarter_frame_function_pointer_address(
+ isolate).address(),
+ UNCLASSIFIED,
+ 67,
+ "Debug::restarter_frame_function_pointer_address()");
+
// Add a small set of deopt entry addresses to encoder without generating the
// deopt table code, which isn't possible at deserialization time.
HandleScope scope(isolate);
@@ -582,7 +548,7 @@ void ExternalReferenceTable::PopulateTable(Isolate* isolate) {
ExternalReferenceEncoder::ExternalReferenceEncoder(Isolate* isolate)
- : encodings_(Match),
+ : encodings_(HashMap::PointersMatch),
isolate_(isolate) {
ExternalReferenceTable* external_references =
ExternalReferenceTable::instance(isolate_);
@@ -595,7 +561,7 @@ ExternalReferenceEncoder::ExternalReferenceEncoder(Isolate* isolate)
uint32_t ExternalReferenceEncoder::Encode(Address key) const {
int index = IndexOf(key);
ASSERT(key == NULL || index >= 0);
- return index >=0 ?
+ return index >= 0 ?
ExternalReferenceTable::instance(isolate_)->code(index) : 0;
}
@@ -646,10 +612,6 @@ ExternalReferenceDecoder::~ExternalReferenceDecoder() {
}
-bool Serializer::serialization_enabled_ = false;
-bool Serializer::too_late_to_enable_now_ = false;
-
-
class CodeAddressMap: public CodeEventLogger {
public:
explicit CodeAddressMap(Isolate* isolate)
@@ -665,6 +627,9 @@ class CodeAddressMap: public CodeEventLogger {
address_to_name_map_.Move(from, to);
}
+ virtual void CodeDisableOptEvent(Code* code, SharedFunctionInfo* shared) {
+ }
+
virtual void CodeDeleteEvent(Address from) {
address_to_name_map_.Remove(from);
}
@@ -676,7 +641,7 @@ class CodeAddressMap: public CodeEventLogger {
private:
class NameMap {
public:
- NameMap() : impl_(&PointerEquals) {}
+ NameMap() : impl_(HashMap::PointersMatch) {}
~NameMap() {
for (HashMap::Entry* p = impl_.Start(); p != NULL; p = impl_.Next(p)) {
@@ -716,10 +681,6 @@ class CodeAddressMap: public CodeEventLogger {
}
private:
- static bool PointerEquals(void* lhs, void* rhs) {
- return lhs == rhs;
- }
-
static char* CopyName(const char* name, int name_size) {
char* result = NewArray<char>(name_size + 1);
for (int i = 0; i < name_size; ++i) {
@@ -762,28 +723,6 @@ class CodeAddressMap: public CodeEventLogger {
};
-CodeAddressMap* Serializer::code_address_map_ = NULL;
-
-
-void Serializer::Enable(Isolate* isolate) {
- if (!serialization_enabled_) {
- ASSERT(!too_late_to_enable_now_);
- }
- if (serialization_enabled_) return;
- serialization_enabled_ = true;
- isolate->InitializeLoggingAndCounters();
- code_address_map_ = new CodeAddressMap(isolate);
-}
-
-
-void Serializer::Disable() {
- if (!serialization_enabled_) return;
- serialization_enabled_ = false;
- delete code_address_map_;
- code_address_map_ = NULL;
-}
-
-
Deserializer::Deserializer(SnapshotByteSource* source)
: isolate_(NULL),
source_(source),
@@ -813,6 +752,7 @@ void Deserializer::Deserialize(Isolate* isolate) {
ASSERT(isolate_->handle_scope_implementer()->blocks()->is_empty());
ASSERT_EQ(NULL, external_reference_decoder_);
external_reference_decoder_ = new ExternalReferenceDecoder(isolate);
+ isolate_->heap()->IterateSmiRoots(this);
isolate_->heap()->IterateStrongRoots(this, VISIT_ONLY_STRONG);
isolate_->heap()->RepairFreeListsAfterBoot();
isolate_->heap()->IterateWeakRoots(this, VISIT_ALL);
@@ -871,7 +811,8 @@ void Deserializer::DeserializePartial(Isolate* isolate, Object** root) {
Deserializer::~Deserializer() {
- ASSERT(source_->AtEOF());
+ // TODO(svenpanne) Re-enable this assertion when v8 initialization is fixed.
+ // ASSERT(source_->AtEOF());
if (external_reference_decoder_) {
delete external_reference_decoder_;
external_reference_decoder_ = NULL;
@@ -1011,6 +952,7 @@ void Deserializer::ReadChunk(Object** current,
reinterpret_cast<Address>(current); \
Assembler::deserialization_set_special_target_at( \
location_of_branch_data, \
+ Code::cast(HeapObject::FromAddress(current_object_address)), \
reinterpret_cast<Address>(new_object)); \
location_of_branch_data += Assembler::kSpecialTargetSize; \
current = reinterpret_cast<Object**>(location_of_branch_data); \
@@ -1172,15 +1114,15 @@ void Deserializer::ReadChunk(Object** current,
// allocation point and write a pointer to it to the current object.
ALL_SPACES(kBackref, kPlain, kStartOfObject)
ALL_SPACES(kBackrefWithSkip, kPlain, kStartOfObject)
-#if V8_TARGET_ARCH_MIPS
+#if defined(V8_TARGET_ARCH_MIPS) || V8_OOL_CONSTANT_POOL
// Deserialize a new object from pointer found in code and write
- // a pointer to it to the current object. Required only for MIPS, and
- // omitted on the other architectures because it is fully unrolled and
- // would cause bloat.
+ // a pointer to it to the current object. Required only for MIPS or ARM
+ // with ool constant pool, and omitted on the other architectures because
+ // it is fully unrolled and would cause bloat.
ALL_SPACES(kNewObject, kFromCode, kStartOfObject)
// Find a recently deserialized code object using its offset from the
// current allocation point and write a pointer to it to the current
- // object. Required only for MIPS.
+ // object. Required only for MIPS or ARM with ool constant pool.
ALL_SPACES(kBackref, kFromCode, kStartOfObject)
ALL_SPACES(kBackrefWithSkip, kFromCode, kStartOfObject)
#endif
@@ -1277,9 +1219,9 @@ void SnapshotByteSink::PutInt(uintptr_t integer, const char* description) {
Serializer::Serializer(Isolate* isolate, SnapshotByteSink* sink)
: isolate_(isolate),
sink_(sink),
- current_root_index_(0),
external_reference_encoder_(new ExternalReferenceEncoder(isolate)),
- root_index_wave_front_(0) {
+ root_index_wave_front_(0),
+ code_address_map_(NULL) {
// The serializer is meant to be used only to generate initial heap images
// from a context in which there is only one isolate.
for (int i = 0; i <= LAST_SPACE; i++) {
@@ -1290,6 +1232,7 @@ Serializer::Serializer(Isolate* isolate, SnapshotByteSink* sink)
Serializer::~Serializer() {
delete external_reference_encoder_;
+ if (code_address_map_ != NULL) delete code_address_map_;
}
@@ -1303,7 +1246,7 @@ void StartupSerializer::SerializeStrongReferences() {
CHECK_EQ(0, isolate->eternal_handles()->NumberOfHandles());
// We don't support serializing installed extensions.
CHECK(!isolate->has_installed_extensions());
-
+ isolate->heap()->IterateSmiRoots(this);
isolate->heap()->IterateStrongRoots(this, VISIT_ONLY_STRONG);
}
@@ -1355,7 +1298,7 @@ void Serializer::VisitPointers(Object** start, Object** end) {
// deserialized objects.
void SerializerDeserializer::Iterate(Isolate* isolate,
ObjectVisitor* visitor) {
- if (Serializer::enabled()) return;
+ if (isolate->serializer_enabled()) return;
for (int i = 0; ; i++) {
if (isolate->serialize_partial_snapshot_cache_length() <= i) {
// Extend the array ready to get a value from the visitor when
@@ -1402,12 +1345,11 @@ int Serializer::RootIndex(HeapObject* heap_object, HowToCode from) {
for (int i = 0; i < root_index_wave_front_; i++) {
Object* root = heap->roots_array_start()[i];
if (!root->IsSmi() && root == heap_object) {
-#if V8_TARGET_ARCH_MIPS
+#if defined(V8_TARGET_ARCH_MIPS) || V8_OOL_CONSTANT_POOL
if (from == kFromCode) {
// In order to avoid code bloat in the deserializer we don't have
// support for the encoding that specifies a particular root should
- // be written into the lui/ori instructions on MIPS. Therefore we
- // should not generate such serialization data for MIPS.
+ // be written from within code.
return kInvalidRootIndex;
}
#endif
@@ -1596,12 +1538,14 @@ void Serializer::ObjectSerializer::Serialize() {
"ObjectSerialization");
sink_->PutInt(size >> kObjectAlignmentBits, "Size in words");
- ASSERT(code_address_map_);
- const char* code_name = code_address_map_->Lookup(object_->address());
- LOG(serializer_->isolate_,
- CodeNameEvent(object_->address(), sink_->Position(), code_name));
- LOG(serializer_->isolate_,
- SnapshotPositionEvent(object_->address(), sink_->Position()));
+ if (serializer_->code_address_map_) {
+ const char* code_name =
+ serializer_->code_address_map_->Lookup(object_->address());
+ LOG(serializer_->isolate_,
+ CodeNameEvent(object_->address(), sink_->Position(), code_name));
+ LOG(serializer_->isolate_,
+ SnapshotPositionEvent(object_->address(), sink_->Position()));
+ }
// Mark this object as already serialized.
int offset = serializer_->Allocate(space, size);
@@ -1660,6 +1604,9 @@ void Serializer::ObjectSerializer::VisitPointers(Object** start,
void Serializer::ObjectSerializer::VisitEmbeddedPointer(RelocInfo* rinfo) {
+ // Out-of-line constant pool entries will be visited by the ConstantPoolArray.
+ if (FLAG_enable_ool_constant_pool && rinfo->IsInConstantPool()) return;
+
int skip = OutputRawData(rinfo->target_address_address(),
kCanReturnSkipInsteadOfSkipping);
HowToCode how_to_code = rinfo->IsCodedSpecially() ? kFromCode : kPlain;
@@ -1705,6 +1652,9 @@ void Serializer::ObjectSerializer::VisitRuntimeEntry(RelocInfo* rinfo) {
void Serializer::ObjectSerializer::VisitCodeTarget(RelocInfo* rinfo) {
+ // Out-of-line constant pool entries will be visited by the ConstantPoolArray.
+ if (FLAG_enable_ool_constant_pool && rinfo->IsInConstantPool()) return;
+
int skip = OutputRawData(rinfo->target_address_address(),
kCanReturnSkipInsteadOfSkipping);
Code* object = Code::GetCodeFromTargetAddress(rinfo->target_address());
@@ -1722,6 +1672,9 @@ void Serializer::ObjectSerializer::VisitCodeEntry(Address entry_address) {
void Serializer::ObjectSerializer::VisitCell(RelocInfo* rinfo) {
+ // Out-of-line constant pool entries will be visited by the ConstantPoolArray.
+ if (FLAG_enable_ool_constant_pool && rinfo->IsInConstantPool()) return;
+
int skip = OutputRawData(rinfo->pc(), kCanReturnSkipInsteadOfSkipping);
Cell* object = Cell::cast(rinfo->target_cell());
serializer_->SerializeObject(object, kPlain, kInnerPointer, skip);
@@ -1755,7 +1708,7 @@ void Serializer::ObjectSerializer::VisitExternalAsciiString(
static Code* CloneCodeObject(HeapObject* code) {
Address copy = new byte[code->Size()];
- OS::MemCopy(copy, code->address(), code->Size());
+ MemCopy(copy, code->address(), code->Size());
return Code::cast(HeapObject::FromAddress(copy));
}
@@ -1767,7 +1720,9 @@ static void WipeOutRelocations(Code* code) {
RelocInfo::ModeMask(RelocInfo::EXTERNAL_REFERENCE) |
RelocInfo::ModeMask(RelocInfo::RUNTIME_ENTRY);
for (RelocIterator it(code, mode_mask); !it.done(); it.next()) {
- it.rinfo()->WipeOut();
+ if (!(FLAG_enable_ool_constant_pool && it.rinfo()->IsInConstantPool())) {
+ it.rinfo()->WipeOut();
+ }
}
}
@@ -1870,6 +1825,12 @@ void Serializer::Pad() {
}
+void Serializer::InitializeCodeAddressMap() {
+ isolate_->InitializeLoggingAndCounters();
+ code_address_map_ = new CodeAddressMap(isolate_);
+}
+
+
bool SnapshotByteSource::AtEOF() {
if (0u + length_ - position_ > 2 * sizeof(uint32_t)) return false;
for (int x = position_; x < length_; x++) {
diff --git a/chromium/v8/src/serialize.h b/chromium/v8/src/serialize.h
index 9229bad4061..9e3cc88662e 100644
--- a/chromium/v8/src/serialize.h
+++ b/chromium/v8/src/serialize.h
@@ -1,34 +1,11 @@
// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
#ifndef V8_SERIALIZE_H_
#define V8_SERIALIZE_H_
-#include "hashmap.h"
+#include "src/hashmap.h"
namespace v8 {
namespace internal {
@@ -40,7 +17,6 @@ enum TypeCode {
BUILTIN,
RUNTIME_FUNCTION,
IC_UTILITY,
- DEBUG_ADDRESS,
STATS_COUNTER,
TOP_ADDRESS,
C_BUILTIN,
@@ -57,10 +33,8 @@ const int kFirstTypeCode = UNCLASSIFIED;
const int kReferenceIdBits = 16;
const int kReferenceIdMask = (1 << kReferenceIdBits) - 1;
const int kReferenceTypeShift = kReferenceIdBits;
-const int kDebugRegisterBits = 4;
-const int kDebugIdShift = kDebugRegisterBits;
-const int kDeoptTableSerializeEntryCount = 12;
+const int kDeoptTableSerializeEntryCount = 64;
// ExternalReferenceTable is a helper class that defines the relationship
// between external references and their encodings. It is used to build
@@ -83,7 +57,7 @@ class ExternalReferenceTable {
private:
explicit ExternalReferenceTable(Isolate* isolate) : refs_(64) {
- PopulateTable(isolate);
+ PopulateTable(isolate);
}
struct ExternalReferenceEntry {
@@ -124,8 +98,6 @@ class ExternalReferenceEncoder {
int IndexOf(Address key) const;
- static bool Match(void* key1, void* key2) { return key1 == key2; }
-
void Put(Address key, int index);
Isolate* isolate_;
@@ -311,7 +283,7 @@ int SnapshotByteSource::GetInt() {
void SnapshotByteSource::CopyRaw(byte* to, int number_of_bytes) {
- OS::MemCopy(to, data_ + position_, number_of_bytes);
+ MemCopy(to, data_ + position_, number_of_bytes);
position_ += number_of_bytes;
}
@@ -414,7 +386,7 @@ class SerializationAddressMapper {
public:
SerializationAddressMapper()
: no_allocation_(),
- serialization_map_(new HashMap(&SerializationMatchFun)) { }
+ serialization_map_(new HashMap(HashMap::PointersMatch)) { }
~SerializationAddressMapper() {
delete serialization_map_;
@@ -438,10 +410,6 @@ class SerializationAddressMapper {
}
private:
- static bool SerializationMatchFun(void* key1, void* key2) {
- return key1 == key2;
- }
-
static uint32_t Hash(HeapObject* obj) {
return static_cast<int32_t>(reinterpret_cast<intptr_t>(obj->address()));
}
@@ -470,19 +438,13 @@ class Serializer : public SerializerDeserializer {
void VisitPointers(Object** start, Object** end);
// You can call this after serialization to find out how much space was used
// in each space.
- int CurrentAllocationAddress(int space) {
+ int CurrentAllocationAddress(int space) const {
ASSERT(space < kNumberOfSpaces);
return fullness_[space];
}
Isolate* isolate() const { return isolate_; }
- static void Enable(Isolate* isolate);
- static void Disable();
- // Call this when you have made use of the fact that there is no serialization
- // going on.
- static void TooLateToEnableNow() { too_late_to_enable_now_ = true; }
- static bool enabled() { return serialization_enabled_; }
SerializationAddressMapper* address_mapper() { return &address_mapper_; }
void PutRoot(int index,
HeapObject* object,
@@ -494,7 +456,6 @@ class Serializer : public SerializerDeserializer {
static const int kInvalidRootIndex = -1;
int RootIndex(HeapObject* heap_object, HowToCode from);
- virtual bool ShouldBeInThePartialSnapshotCache(HeapObject* o) = 0;
intptr_t root_index_wave_front() { return root_index_wave_front_; }
void set_root_index_wave_front(intptr_t value) {
ASSERT(value >= root_index_wave_front_);
@@ -579,11 +540,8 @@ class Serializer : public SerializerDeserializer {
// relative addresses for back references.
int fullness_[LAST_SPACE + 1];
SnapshotByteSink* sink_;
- int current_root_index_;
ExternalReferenceEncoder* external_reference_encoder_;
- static bool serialization_enabled_;
- // Did we already make use of the fact that serialization was not enabled?
- static bool too_late_to_enable_now_;
+
SerializationAddressMapper address_mapper_;
intptr_t root_index_wave_front_;
void Pad();
@@ -591,8 +549,12 @@ class Serializer : public SerializerDeserializer {
friend class ObjectSerializer;
friend class Deserializer;
+ // We may not need the code address map for logging for every instance
+ // of the serializer. Initialize it on demand.
+ void InitializeCodeAddressMap();
+
private:
- static CodeAddressMap* code_address_map_;
+ CodeAddressMap* code_address_map_;
DISALLOW_COPY_AND_ASSIGN(Serializer);
};
@@ -605,18 +567,19 @@ class PartialSerializer : public Serializer {
: Serializer(isolate, sink),
startup_serializer_(startup_snapshot_serializer) {
set_root_index_wave_front(Heap::kStrongRootListLength);
+ InitializeCodeAddressMap();
}
// Serialize the objects reachable from a single object pointer.
- virtual void Serialize(Object** o);
+ void Serialize(Object** o);
virtual void SerializeObject(Object* o,
HowToCode how_to_code,
WhereToPoint where_to_point,
int skip);
- protected:
- virtual int PartialSnapshotCacheIndex(HeapObject* o);
- virtual bool ShouldBeInThePartialSnapshotCache(HeapObject* o) {
+ private:
+ int PartialSnapshotCacheIndex(HeapObject* o);
+ bool ShouldBeInThePartialSnapshotCache(HeapObject* o) {
// Scripts should be referred only through shared function infos. We can't
// allow them to be part of the partial snapshot because they contain a
// unique ID, and deserializing several partial snapshots containing script
@@ -629,7 +592,7 @@ class PartialSerializer : public Serializer {
startup_serializer_->isolate()->heap()->fixed_cow_array_map();
}
- private:
+
Serializer* startup_serializer_;
DISALLOW_COPY_AND_ASSIGN(PartialSerializer);
};
@@ -644,6 +607,7 @@ class StartupSerializer : public Serializer {
// which will repopulate the cache with objects needed by that partial
// snapshot.
isolate->set_serialize_partial_snapshot_cache_length(0);
+ InitializeCodeAddressMap();
}
// Serialize the current state of the heap. The order is:
// 1) Strong references.
@@ -660,11 +624,6 @@ class StartupSerializer : public Serializer {
SerializeWeakReferences();
Pad();
}
-
- private:
- virtual bool ShouldBeInThePartialSnapshotCache(HeapObject* o) {
- return false;
- }
};
diff --git a/chromium/v8/src/simulator.h b/chromium/v8/src/simulator.h
index 485e9306458..c8739074143 100644
--- a/chromium/v8/src/simulator.h
+++ b/chromium/v8/src/simulator.h
@@ -1,41 +1,22 @@
// Copyright 2009 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
#ifndef V8_SIMULATOR_H_
#define V8_SIMULATOR_H_
#if V8_TARGET_ARCH_IA32
-#include "ia32/simulator-ia32.h"
+#include "src/ia32/simulator-ia32.h"
#elif V8_TARGET_ARCH_X64
-#include "x64/simulator-x64.h"
+#include "src/x64/simulator-x64.h"
+#elif V8_TARGET_ARCH_ARM64
+#include "src/arm64/simulator-arm64.h"
#elif V8_TARGET_ARCH_ARM
-#include "arm/simulator-arm.h"
+#include "src/arm/simulator-arm.h"
#elif V8_TARGET_ARCH_MIPS
-#include "mips/simulator-mips.h"
+#include "src/mips/simulator-mips.h"
+#elif V8_TARGET_ARCH_X87
+#include "src/x87/simulator-x87.h"
#else
#error Unsupported target architecture.
#endif
diff --git a/chromium/v8/src/small-pointer-list.h b/chromium/v8/src/small-pointer-list.h
index 295a06f26af..c4f8233beca 100644
--- a/chromium/v8/src/small-pointer-list.h
+++ b/chromium/v8/src/small-pointer-list.h
@@ -1,36 +1,13 @@
// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
#ifndef V8_SMALL_POINTER_LIST_H_
#define V8_SMALL_POINTER_LIST_H_
-#include "checks.h"
-#include "v8globals.h"
-#include "zone.h"
+#include "src/checks.h"
+#include "src/globals.h"
+#include "src/zone.h"
namespace v8 {
namespace internal {
diff --git a/chromium/v8/src/smart-pointers.h b/chromium/v8/src/smart-pointers.h
index 7c35b2aff27..db2206a32f5 100644
--- a/chromium/v8/src/smart-pointers.h
+++ b/chromium/v8/src/smart-pointers.h
@@ -1,29 +1,6 @@
// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
#ifndef V8_SMART_POINTERS_H_
#define V8_SMART_POINTERS_H_
@@ -36,35 +13,31 @@ template<typename Deallocator, typename T>
class SmartPointerBase {
public:
// Default constructor. Constructs an empty scoped pointer.
- inline SmartPointerBase() : p_(NULL) {}
+ SmartPointerBase() : p_(NULL) {}
// Constructs a scoped pointer from a plain one.
- explicit inline SmartPointerBase(T* ptr) : p_(ptr) {}
+ explicit SmartPointerBase(T* ptr) : p_(ptr) {}
// Copy constructor removes the pointer from the original to avoid double
// freeing.
- inline SmartPointerBase(const SmartPointerBase<Deallocator, T>& rhs)
+ SmartPointerBase(const SmartPointerBase<Deallocator, T>& rhs)
: p_(rhs.p_) {
const_cast<SmartPointerBase<Deallocator, T>&>(rhs).p_ = NULL;
}
- // When the destructor of the scoped pointer is executed the plain pointer
- // is deleted using DeleteArray. This implies that you must allocate with
- // NewArray.
- inline ~SmartPointerBase() { if (p_) Deallocator::Delete(p_); }
+ T* operator->() const { return p_; }
- inline T* operator->() const { return p_; }
+ T& operator*() const { return *p_; }
- // You can get the underlying pointer out with the * operator.
- inline T* operator*() { return p_; }
+ T* get() const { return p_; }
// You can use [n] to index as if it was a plain pointer.
- inline T& operator[](size_t i) {
+ T& operator[](size_t i) {
return p_[i];
}
// You can use [n] to index as if it was a plain pointer.
- const inline T& operator[](size_t i) const {
+ const T& operator[](size_t i) const {
return p_[i];
}
@@ -76,13 +49,14 @@ class SmartPointerBase {
// If you want to take out the plain pointer and don't want it automatically
// deleted then call Detach(). Afterwards, the smart pointer is empty
// (NULL).
- inline T* Detach() {
+ T* Detach() {
T* temp = p_;
p_ = NULL;
return temp;
}
- inline void Reset(T* new_value) {
+ void Reset(T* new_value) {
+ ASSERT(p_ == NULL || p_ != new_value);
if (p_) Deallocator::Delete(p_);
p_ = new_value;
}
@@ -90,7 +64,7 @@ class SmartPointerBase {
// Assignment requires an empty (NULL) SmartArrayPointer as the receiver. Like
// the copy constructor it removes the pointer in the original to avoid
// double freeing.
- inline SmartPointerBase<Deallocator, T>& operator=(
+ SmartPointerBase<Deallocator, T>& operator=(
const SmartPointerBase<Deallocator, T>& rhs) {
ASSERT(is_empty());
T* tmp = rhs.p_; // swap to handle self-assignment
@@ -99,7 +73,13 @@ class SmartPointerBase {
return *this;
}
- inline bool is_empty() { return p_ == NULL; }
+ bool is_empty() const { return p_ == NULL; }
+
+ protected:
+ // When the destructor of the scoped pointer is executed the plain pointer
+ // is deleted using DeleteArray. This implies that you must allocate with
+ // NewArray.
+ ~SmartPointerBase() { if (p_) Deallocator::Delete(p_); }
private:
T* p_;
@@ -119,10 +99,10 @@ struct ArrayDeallocator {
template<typename T>
class SmartArrayPointer: public SmartPointerBase<ArrayDeallocator<T>, T> {
public:
- inline SmartArrayPointer() { }
- explicit inline SmartArrayPointer(T* ptr)
+ SmartArrayPointer() { }
+ explicit SmartArrayPointer(T* ptr)
: SmartPointerBase<ArrayDeallocator<T>, T>(ptr) { }
- inline SmartArrayPointer(const SmartArrayPointer<T>& rhs)
+ SmartArrayPointer(const SmartArrayPointer<T>& rhs)
: SmartPointerBase<ArrayDeallocator<T>, T>(rhs) { }
};
@@ -138,10 +118,10 @@ struct ObjectDeallocator {
template<typename T>
class SmartPointer: public SmartPointerBase<ObjectDeallocator<T>, T> {
public:
- inline SmartPointer() { }
- explicit inline SmartPointer(T* ptr)
+ SmartPointer() { }
+ explicit SmartPointer(T* ptr)
: SmartPointerBase<ObjectDeallocator<T>, T>(ptr) { }
- inline SmartPointer(const SmartPointer<T>& rhs)
+ SmartPointer(const SmartPointer<T>& rhs)
: SmartPointerBase<ObjectDeallocator<T>, T>(rhs) { }
};
diff --git a/chromium/v8/src/snapshot-common.cc b/chromium/v8/src/snapshot-common.cc
index 4bdf63ceddb..bef096928a7 100644
--- a/chromium/v8/src/snapshot-common.cc
+++ b/chromium/v8/src/snapshot-common.cc
@@ -1,38 +1,15 @@
// Copyright 2006-2008 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
// The common functionality when building with or without snapshots.
-#include "v8.h"
+#include "src/v8.h"
-#include "api.h"
-#include "serialize.h"
-#include "snapshot.h"
-#include "platform.h"
+#include "src/api.h"
+#include "src/serialize.h"
+#include "src/snapshot.h"
+#include "src/platform.h"
namespace v8 {
namespace internal {
@@ -42,7 +19,7 @@ static void ReserveSpaceForSnapshot(Deserializer* deserializer,
const char* file_name) {
int file_name_length = StrLength(file_name) + 10;
Vector<char> name = Vector<char>::New(file_name_length + 1);
- OS::SNPrintF(name, "%s.size", file_name);
+ SNPrintF(name, "%s.size", file_name);
FILE* fp = OS::FOpen(name.start(), "r");
CHECK_NE(NULL, fp);
int new_size, pointer_size, data_size, code_size, map_size, cell_size,
diff --git a/chromium/v8/src/snapshot-empty.cc b/chromium/v8/src/snapshot-empty.cc
index 54236d82eca..65207bfc744 100644
--- a/chromium/v8/src/snapshot-empty.cc
+++ b/chromium/v8/src/snapshot-empty.cc
@@ -1,35 +1,12 @@
// Copyright 2006-2008 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
// Used for building without snapshots.
-#include "v8.h"
+#include "src/v8.h"
-#include "snapshot.h"
+#include "src/snapshot.h"
namespace v8 {
namespace internal {
diff --git a/chromium/v8/src/snapshot.h b/chromium/v8/src/snapshot.h
index 4041f2925e5..17191f0be13 100644
--- a/chromium/v8/src/snapshot.h
+++ b/chromium/v8/src/snapshot.h
@@ -1,31 +1,8 @@
// Copyright 2006-2008 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
-#include "isolate.h"
+#include "src/isolate.h"
#ifndef V8_SNAPSHOT_H_
#define V8_SNAPSHOT_H_
diff --git a/chromium/v8/src/spaces-inl.h b/chromium/v8/src/spaces-inl.h
index 87de29c4a5d..e863b519a99 100644
--- a/chromium/v8/src/spaces-inl.h
+++ b/chromium/v8/src/spaces-inl.h
@@ -1,37 +1,14 @@
// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
#ifndef V8_SPACES_INL_H_
#define V8_SPACES_INL_H_
-#include "heap-profiler.h"
-#include "isolate.h"
-#include "spaces.h"
-#include "v8memory.h"
+#include "src/heap-profiler.h"
+#include "src/isolate.h"
+#include "src/spaces.h"
+#include "src/v8memory.h"
namespace v8 {
namespace internal {
@@ -165,7 +142,7 @@ Page* Page::Initialize(Heap* heap,
Executability executable,
PagedSpace* owner) {
Page* page = reinterpret_cast<Page*>(chunk);
- ASSERT(page->area_size() <= kNonCodeObjectAreaSize);
+ ASSERT(page->area_size() <= kMaxRegularHeapObjectSize);
ASSERT(chunk->owner() == owner);
owner->IncreaseCapacity(page->area_size());
owner->Free(page->area_start(), page->area_size());
@@ -274,7 +251,7 @@ HeapObject* PagedSpace::AllocateLinearly(int size_in_bytes) {
// Raw allocation.
-MaybeObject* PagedSpace::AllocateRaw(int size_in_bytes) {
+AllocationResult PagedSpace::AllocateRaw(int size_in_bytes) {
HeapObject* object = AllocateLinearly(size_in_bytes);
if (object != NULL) {
if (identity() == CODE_SPACE) {
@@ -303,7 +280,7 @@ MaybeObject* PagedSpace::AllocateRaw(int size_in_bytes) {
return object;
}
- return Failure::RetryAfterGC(identity());
+ return AllocationResult::Retry(identity());
}
@@ -311,23 +288,8 @@ MaybeObject* PagedSpace::AllocateRaw(int size_in_bytes) {
// NewSpace
-MaybeObject* NewSpace::AllocateRaw(int size_in_bytes) {
+AllocationResult NewSpace::AllocateRaw(int size_in_bytes) {
Address old_top = allocation_info_.top();
-#ifdef DEBUG
- // If we are stressing compaction we waste some memory in new space
- // in order to get more frequent GCs.
- if (FLAG_stress_compaction && !heap()->linear_allocation()) {
- if (allocation_info_.limit() - old_top >= size_in_bytes * 4) {
- int filler_size = size_in_bytes * 4;
- for (int i = 0; i < filler_size; i += kPointerSize) {
- *(reinterpret_cast<Object**>(old_top + i)) =
- heap()->one_pointer_filler_map();
- }
- old_top += filler_size;
- allocation_info_.set_top(allocation_info_.top() + filler_size);
- }
- }
-#endif
if (allocation_info_.limit() - old_top < size_in_bytes) {
return SlowAllocateRaw(size_in_bytes);
diff --git a/chromium/v8/src/spaces.cc b/chromium/v8/src/spaces.cc
index ee19a02967d..69a01451bb9 100644
--- a/chromium/v8/src/spaces.cc
+++ b/chromium/v8/src/spaces.cc
@@ -1,36 +1,14 @@
// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#include "macro-assembler.h"
-#include "mark-compact.h"
-#include "msan.h"
-#include "platform.h"
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#include "src/full-codegen.h"
+#include "src/macro-assembler.h"
+#include "src/mark-compact.h"
+#include "src/msan.h"
+#include "src/platform.h"
namespace v8 {
namespace internal {
@@ -133,9 +111,21 @@ CodeRange::CodeRange(Isolate* isolate)
}
-bool CodeRange::SetUp(const size_t requested) {
+bool CodeRange::SetUp(size_t requested) {
ASSERT(code_range_ == NULL);
+ if (requested == 0) {
+ // When a target requires the code range feature, we put all code objects
+ // in a kMaximalCodeRangeSize range of virtual address space, so that
+ // they can call each other with near calls.
+ if (kRequiresCodeRange) {
+ requested = kMaximalCodeRangeSize;
+ } else {
+ return true;
+ }
+ }
+
+ ASSERT(!kRequiresCodeRange || requested <= kMaximalCodeRangeSize);
code_range_ = new VirtualMemory(requested);
CHECK(code_range_ != NULL);
if (!code_range_->IsReserved()) {
@@ -146,7 +136,8 @@ bool CodeRange::SetUp(const size_t requested) {
// We are sure that we have mapped a block of requested addresses.
ASSERT(code_range_->size() == requested);
- LOG(isolate_, NewEvent("CodeRange", code_range_->address(), requested));
+ LOG(isolate_,
+ NewEvent("CodeRange", code_range_->address(), requested));
Address base = reinterpret_cast<Address>(code_range_->address());
Address aligned_base =
RoundUp(reinterpret_cast<Address>(code_range_->address()),
@@ -167,12 +158,12 @@ int CodeRange::CompareFreeBlockAddress(const FreeBlock* left,
}
-void CodeRange::GetNextAllocationBlock(size_t requested) {
+bool CodeRange::GetNextAllocationBlock(size_t requested) {
for (current_allocation_block_index_++;
current_allocation_block_index_ < allocation_list_.length();
current_allocation_block_index_++) {
if (requested <= allocation_list_[current_allocation_block_index_].size) {
- return; // Found a large enough allocation block.
+ return true; // Found a large enough allocation block.
}
}
@@ -199,12 +190,12 @@ void CodeRange::GetNextAllocationBlock(size_t requested) {
current_allocation_block_index_ < allocation_list_.length();
current_allocation_block_index_++) {
if (requested <= allocation_list_[current_allocation_block_index_].size) {
- return; // Found a large enough allocation block.
+ return true; // Found a large enough allocation block.
}
}
-
+ current_allocation_block_index_ = 0;
// Code range is full or too fragmented.
- V8::FatalProcessOutOfMemory("CodeRange::GetNextAllocationBlock");
+ return false;
}
@@ -214,9 +205,8 @@ Address CodeRange::AllocateRawMemory(const size_t requested_size,
ASSERT(commit_size <= requested_size);
ASSERT(current_allocation_block_index_ < allocation_list_.length());
if (requested_size > allocation_list_[current_allocation_block_index_].size) {
- // Find an allocation block large enough. This function call may
- // call V8::FatalProcessOutOfMemory if it cannot find a large enough block.
- GetNextAllocationBlock(requested_size);
+ // Find an allocation block large enough.
+ if (!GetNextAllocationBlock(requested_size)) return NULL;
}
// Commit the requested memory at the start of the current allocation block.
size_t aligned_requested = RoundUp(requested_size, MemoryChunk::kAlignment);
@@ -239,7 +229,8 @@ Address CodeRange::AllocateRawMemory(const size_t requested_size,
allocation_list_[current_allocation_block_index_].start += *allocated;
allocation_list_[current_allocation_block_index_].size -= *allocated;
if (*allocated == current.size) {
- GetNextAllocationBlock(0); // This block is used up, get the next one.
+ // This block is used up, get the next one.
+ if (!GetNextAllocationBlock(0)) return NULL;
}
return current.start;
}
@@ -333,9 +324,12 @@ void MemoryAllocator::FreeMemory(VirtualMemory* reservation,
size_executable_ -= size;
}
// Code which is part of the code-range does not have its own VirtualMemory.
- ASSERT(!isolate_->code_range()->contains(
- static_cast<Address>(reservation->address())));
- ASSERT(executable == NOT_EXECUTABLE || !isolate_->code_range()->exists());
+ ASSERT(isolate_->code_range() == NULL ||
+ !isolate_->code_range()->contains(
+ static_cast<Address>(reservation->address())));
+ ASSERT(executable == NOT_EXECUTABLE ||
+ isolate_->code_range() == NULL ||
+ !isolate_->code_range()->valid());
reservation->Release();
}
@@ -353,11 +347,14 @@ void MemoryAllocator::FreeMemory(Address base,
ASSERT(size_executable_ >= size);
size_executable_ -= size;
}
- if (isolate_->code_range()->contains(static_cast<Address>(base))) {
+ if (isolate_->code_range() != NULL &&
+ isolate_->code_range()->contains(static_cast<Address>(base))) {
ASSERT(executable == EXECUTABLE);
isolate_->code_range()->FreeRawMemory(base, size);
} else {
- ASSERT(executable == NOT_EXECUTABLE || !isolate_->code_range()->exists());
+ ASSERT(executable == NOT_EXECUTABLE ||
+ isolate_->code_range() == NULL ||
+ !isolate_->code_range()->valid());
bool result = VirtualMemory::ReleaseRegion(base, size);
USE(result);
ASSERT(result);
@@ -483,7 +480,7 @@ MemoryChunk* MemoryChunk::Initialize(Heap* heap,
chunk->write_barrier_counter_ = kWriteBarrierCounterGranularity;
chunk->progress_bar_ = 0;
chunk->high_water_mark_ = static_cast<int>(area_start - base);
- chunk->parallel_sweeping_ = 0;
+ chunk->set_parallel_sweeping(PARALLEL_SWEEPING_DONE);
chunk->available_in_small_free_list_ = 0;
chunk->available_in_medium_free_list_ = 0;
chunk->available_in_large_free_list_ = 0;
@@ -533,7 +530,8 @@ bool MemoryChunk::CommitArea(size_t requested) {
}
} else {
CodeRange* code_range = heap_->isolate()->code_range();
- ASSERT(code_range->exists() && IsFlagSet(IS_EXECUTABLE));
+ ASSERT(code_range != NULL && code_range->valid() &&
+ IsFlagSet(IS_EXECUTABLE));
if (!code_range->CommitRawMemory(start, length)) return false;
}
@@ -549,7 +547,8 @@ bool MemoryChunk::CommitArea(size_t requested) {
if (!reservation_.Uncommit(start, length)) return false;
} else {
CodeRange* code_range = heap_->isolate()->code_range();
- ASSERT(code_range->exists() && IsFlagSet(IS_EXECUTABLE));
+ ASSERT(code_range != NULL && code_range->valid() &&
+ IsFlagSet(IS_EXECUTABLE));
if (!code_range->UncommitRawMemory(start, length)) return false;
}
}
@@ -560,33 +559,22 @@ bool MemoryChunk::CommitArea(size_t requested) {
void MemoryChunk::InsertAfter(MemoryChunk* other) {
- next_chunk_ = other->next_chunk_;
- prev_chunk_ = other;
-
- // This memory barrier is needed since concurrent sweeper threads may iterate
- // over the list of pages while a new page is inserted.
- // TODO(hpayer): find a cleaner way to guarantee that the page list can be
- // expanded concurrently
- MemoryBarrier();
+ MemoryChunk* other_next = other->next_chunk();
- // The following two write operations can take effect in arbitrary order
- // since pages are always iterated by the sweeper threads in LIFO order, i.e,
- // the inserted page becomes visible for the sweeper threads after
- // other->next_chunk_ = this;
- other->next_chunk_->prev_chunk_ = this;
- other->next_chunk_ = this;
+ set_next_chunk(other_next);
+ set_prev_chunk(other);
+ other_next->set_prev_chunk(this);
+ other->set_next_chunk(this);
}
void MemoryChunk::Unlink() {
- if (!InNewSpace() && IsFlagSet(SCAN_ON_SCAVENGE)) {
- heap_->decrement_scan_on_scavenge_pages();
- ClearFlag(SCAN_ON_SCAVENGE);
- }
- next_chunk_->prev_chunk_ = prev_chunk_;
- prev_chunk_->next_chunk_ = next_chunk_;
- prev_chunk_ = NULL;
- next_chunk_ = NULL;
+ MemoryChunk* next_element = next_chunk();
+ MemoryChunk* prev_element = prev_chunk();
+ next_element->set_prev_chunk(prev_element);
+ prev_element->set_next_chunk(next_element);
+ set_prev_chunk(NULL);
+ set_next_chunk(NULL);
}
@@ -650,7 +638,7 @@ MemoryChunk* MemoryAllocator::AllocateChunk(intptr_t reserve_area_size,
OS::CommitPageSize());
// Allocate executable memory either from code range or from the
// OS.
- if (isolate_->code_range()->exists()) {
+ if (isolate_->code_range() != NULL && isolate_->code_range()->valid()) {
base = isolate_->code_range()->AllocateRawMemory(chunk_size,
commit_size,
&chunk_size);
@@ -718,7 +706,7 @@ MemoryChunk* MemoryAllocator::AllocateChunk(intptr_t reserve_area_size,
executable,
owner);
result->set_reserved_memory(&reservation);
- MSAN_MEMORY_IS_INITIALIZED(base, chunk_size);
+ MSAN_MEMORY_IS_INITIALIZED_IN_JIT(base, chunk_size);
return result;
}
@@ -948,8 +936,8 @@ PagedSpace::PagedSpace(Heap* heap,
: Space(heap, id, executable),
free_list_(this),
was_swept_conservatively_(false),
- first_unswept_page_(Page::FromAddress(NULL)),
- unswept_free_bytes_(0) {
+ unswept_free_bytes_(0),
+ end_of_unswept_pages_(NULL) {
if (id == CODE_SPACE) {
area_size_ = heap->isolate()->memory_allocator()->
CodePageAreaSize();
@@ -1000,11 +988,11 @@ size_t PagedSpace::CommittedPhysicalMemory() {
}
-MaybeObject* PagedSpace::FindObject(Address addr) {
+Object* PagedSpace::FindObject(Address addr) {
// Note: this function can only be called on precisely swept spaces.
ASSERT(!heap()->mark_compact_collector()->in_use());
- if (!Contains(addr)) return Failure::Exception();
+ if (!Contains(addr)) return Smi::FromInt(0); // Signaling not found.
Page* p = Page::FromAddress(addr);
HeapObjectIterator it(p, NULL);
@@ -1015,7 +1003,7 @@ MaybeObject* PagedSpace::FindObject(Address addr) {
}
UNREACHABLE();
- return Failure::Exception();
+ return Smi::FromInt(0);
}
@@ -1058,7 +1046,7 @@ intptr_t PagedSpace::SizeOfFirstPage() {
int size = 0;
switch (identity()) {
case OLD_POINTER_SPACE:
- size = 72 * kPointerSize * KB;
+ size = 96 * kPointerSize * KB;
break;
case OLD_DATA_SPACE:
size = 192 * KB;
@@ -1072,21 +1060,20 @@ intptr_t PagedSpace::SizeOfFirstPage() {
case PROPERTY_CELL_SPACE:
size = 8 * kPointerSize * KB;
break;
- case CODE_SPACE:
- if (heap()->isolate()->code_range()->exists()) {
+ case CODE_SPACE: {
+ CodeRange* code_range = heap()->isolate()->code_range();
+ if (code_range != NULL && code_range->valid()) {
// When code range exists, code pages are allocated in a special way
// (from the reserved code range). That part of the code is not yet
// upgraded to handle small pages.
size = AreaSize();
} else {
-#if V8_TARGET_ARCH_MIPS
- // TODO(plind): Investigate larger code stubs size on MIPS.
- size = 480 * KB;
-#else
- size = 416 * KB;
-#endif
+ size = RoundUp(
+ 480 * KB * FullCodeGenerator::kBootCodeSizeMultiplier / 100,
+ kPointerSize);
}
break;
+ }
default:
UNREACHABLE();
}
@@ -1127,18 +1114,10 @@ void PagedSpace::IncreaseCapacity(int size) {
}
-void PagedSpace::ReleasePage(Page* page, bool unlink) {
+void PagedSpace::ReleasePage(Page* page) {
ASSERT(page->LiveBytes() == 0);
ASSERT(AreaSize() == page->area_size());
- // Adjust list of unswept pages if the page is the head of the list.
- if (first_unswept_page_ == page) {
- first_unswept_page_ = page->next_page();
- if (first_unswept_page_ == anchor()) {
- first_unswept_page_ = Page::FromAddress(NULL);
- }
- }
-
if (page->WasSwept()) {
intptr_t size = free_list_.EvictFreeListItems(page);
accounting_stats_.AllocateBytes(size);
@@ -1147,14 +1126,19 @@ void PagedSpace::ReleasePage(Page* page, bool unlink) {
DecreaseUnsweptFreeBytes(page);
}
+ if (page->IsFlagSet(MemoryChunk::SCAN_ON_SCAVENGE)) {
+ heap()->decrement_scan_on_scavenge_pages();
+ page->ClearFlag(MemoryChunk::SCAN_ON_SCAVENGE);
+ }
+
+ ASSERT(!free_list_.ContainsPageFreeListItems(page));
+
if (Page::FromAllocationTop(allocation_info_.top()) == page) {
allocation_info_.set_top(NULL);
allocation_info_.set_limit(NULL);
}
- if (unlink) {
- page->Unlink();
- }
+ page->Unlink();
if (page->IsFlagSet(MemoryChunk::CONTAINS_ONLY_DATA)) {
heap()->isolate()->memory_allocator()->Free(page);
} else {
@@ -1202,7 +1186,7 @@ void PagedSpace::Verify(ObjectVisitor* visitor) {
VerifyObject(object);
// The object itself should look OK.
- object->Verify();
+ object->ObjectVerify();
// All the interior pointers should be contained in the heap.
int size = object->Size();
@@ -1427,7 +1411,7 @@ bool NewSpace::AddFreshPage() {
}
-MaybeObject* NewSpace::SlowAllocateRaw(int size_in_bytes) {
+AllocationResult NewSpace::SlowAllocateRaw(int size_in_bytes) {
Address old_top = allocation_info_.top();
Address high = to_space_.page_high();
if (allocation_info_.limit() < high) {
@@ -1449,7 +1433,7 @@ MaybeObject* NewSpace::SlowAllocateRaw(int size_in_bytes) {
top_on_previous_step_ = to_space_.page_low();
return AllocateRaw(size_in_bytes);
} else {
- return Failure::RetryAfterGC();
+ return AllocationResult::Retry();
}
}
@@ -1485,7 +1469,7 @@ void NewSpace::Verify() {
CHECK(!object->IsCode());
// The object itself should look OK.
- object->Verify();
+ object->ObjectVerify();
// All the interior pointers should be contained in the heap.
VerifyPointersVisitor visitor;
@@ -2024,10 +2008,13 @@ void FreeListNode::set_size(Heap* heap, int size_in_bytes) {
// field and a next pointer, we give it a filler map that gives it the
// correct size.
if (size_in_bytes > FreeSpace::kHeaderSize) {
- set_map_no_write_barrier(heap->raw_unchecked_free_space_map());
// Can't use FreeSpace::cast because it fails during deserialization.
+ // We have to set the size first with a release store before we store
+ // the map because a concurrent store buffer scan on scavenge must not
+ // observe a map with an invalid size.
FreeSpace* this_as_free_space = reinterpret_cast<FreeSpace*>(this);
- this_as_free_space->set_size(size_in_bytes);
+ this_as_free_space->nobarrier_set_size(size_in_bytes);
+ synchronized_set_map_no_write_barrier(heap->raw_unchecked_free_space_map());
} else if (size_in_bytes == kPointerSize) {
set_map_no_write_barrier(heap->raw_unchecked_one_pointer_filler_map());
} else if (size_in_bytes == 2 * kPointerSize) {
@@ -2071,31 +2058,34 @@ void FreeListNode::set_next(FreeListNode* next) {
// stage.
if (map() == GetHeap()->raw_unchecked_free_space_map()) {
ASSERT(map() == NULL || Size() >= kNextOffset + kPointerSize);
- Memory::Address_at(address() + kNextOffset) =
- reinterpret_cast<Address>(next);
+ base::NoBarrier_Store(
+ reinterpret_cast<base::AtomicWord*>(address() + kNextOffset),
+ reinterpret_cast<base::AtomicWord>(next));
} else {
- Memory::Address_at(address() + kPointerSize) =
- reinterpret_cast<Address>(next);
+ base::NoBarrier_Store(
+ reinterpret_cast<base::AtomicWord*>(address() + kPointerSize),
+ reinterpret_cast<base::AtomicWord>(next));
}
}
intptr_t FreeListCategory::Concatenate(FreeListCategory* category) {
intptr_t free_bytes = 0;
- if (category->top_ != NULL) {
- ASSERT(category->end_ != NULL);
+ if (category->top() != NULL) {
// This is safe (not going to deadlock) since Concatenate operations
// are never performed on the same free lists at the same time in
// reverse order.
LockGuard<Mutex> target_lock_guard(mutex());
LockGuard<Mutex> source_lock_guard(category->mutex());
+ ASSERT(category->end_ != NULL);
free_bytes = category->available();
if (end_ == NULL) {
end_ = category->end();
} else {
- category->end()->set_next(top_);
+ category->end()->set_next(top());
}
- top_ = category->top();
+ set_top(category->top());
+ base::NoBarrier_Store(&top_, category->top_);
available_ += category->available();
category->Reset();
}
@@ -2104,15 +2094,16 @@ intptr_t FreeListCategory::Concatenate(FreeListCategory* category) {
void FreeListCategory::Reset() {
- top_ = NULL;
- end_ = NULL;
- available_ = 0;
+ set_top(NULL);
+ set_end(NULL);
+ set_available(0);
}
intptr_t FreeListCategory::EvictFreeListItemsInList(Page* p) {
int sum = 0;
- FreeListNode** n = &top_;
+ FreeListNode* t = top();
+ FreeListNode** n = &t;
while (*n != NULL) {
if (Page::FromAddress((*n)->address()) == p) {
FreeSpace* free_space = reinterpret_cast<FreeSpace*>(*n);
@@ -2122,16 +2113,27 @@ intptr_t FreeListCategory::EvictFreeListItemsInList(Page* p) {
n = (*n)->next_address();
}
}
- if (top_ == NULL) {
- end_ = NULL;
+ set_top(t);
+ if (top() == NULL) {
+ set_end(NULL);
}
available_ -= sum;
return sum;
}
+bool FreeListCategory::ContainsPageFreeListItemsInList(Page* p) {
+ FreeListNode* node = top();
+ while (node != NULL) {
+ if (Page::FromAddress(node->address()) == p) return true;
+ node = node->next();
+ }
+ return false;
+}
+
+
FreeListNode* FreeListCategory::PickNodeFromList(int *node_size) {
- FreeListNode* node = top_;
+ FreeListNode* node = top();
if (node == NULL) return NULL;
@@ -2170,8 +2172,8 @@ FreeListNode* FreeListCategory::PickNodeFromList(int size_in_bytes,
void FreeListCategory::Free(FreeListNode* node, int size_in_bytes) {
- node->set_next(top_);
- top_ = node;
+ node->set_next(top());
+ set_top(node);
if (end_ == NULL) {
end_ = node;
}
@@ -2180,7 +2182,7 @@ void FreeListCategory::Free(FreeListNode* node, int size_in_bytes) {
void FreeListCategory::RepairFreeList(Heap* heap) {
- FreeListNode* n = top_;
+ FreeListNode* n = top();
while (n != NULL) {
Map** map_location = reinterpret_cast<Map**>(n->address());
if (*map_location == NULL) {
@@ -2289,7 +2291,8 @@ FreeListNode* FreeList::FindNodeFor(int size_in_bytes, int* node_size) {
}
int huge_list_available = huge_list_.available();
- for (FreeListNode** cur = huge_list_.GetTopAddress();
+ FreeListNode* top_node = huge_list_.top();
+ for (FreeListNode** cur = &top_node;
*cur != NULL;
cur = (*cur)->next_address()) {
FreeListNode* cur_node = *cur;
@@ -2323,6 +2326,7 @@ FreeListNode* FreeList::FindNodeFor(int size_in_bytes, int* node_size) {
}
}
+ huge_list_.set_top(top_node);
if (huge_list_.top() == NULL) {
huge_list_.set_end(NULL);
}
@@ -2457,6 +2461,14 @@ intptr_t FreeList::EvictFreeListItems(Page* p) {
}
+bool FreeList::ContainsPageFreeListItems(Page* p) {
+ return huge_list_.EvictFreeListItemsInList(p) ||
+ small_list_.EvictFreeListItemsInList(p) ||
+ medium_list_.EvictFreeListItemsInList(p) ||
+ large_list_.EvictFreeListItemsInList(p);
+}
+
+
void FreeList::RepairLists(Heap* heap) {
small_list_.RepairFreeList(heap);
medium_list_.RepairFreeList(heap);
@@ -2468,11 +2480,11 @@ void FreeList::RepairLists(Heap* heap) {
#ifdef DEBUG
intptr_t FreeListCategory::SumFreeList() {
intptr_t sum = 0;
- FreeListNode* cur = top_;
+ FreeListNode* cur = top();
while (cur != NULL) {
ASSERT(cur->map() == cur->GetHeap()->raw_unchecked_free_space_map());
FreeSpace* cur_as_free_space = reinterpret_cast<FreeSpace*>(cur);
- sum += cur_as_free_space->Size();
+ sum += cur_as_free_space->nobarrier_size();
cur = cur->next();
}
return sum;
@@ -2484,7 +2496,7 @@ static const int kVeryLongFreeList = 500;
int FreeListCategory::FreeListLength() {
int length = 0;
- FreeListNode* cur = top_;
+ FreeListNode* cur = top();
while (cur != NULL) {
length++;
cur = cur->next();
@@ -2524,24 +2536,8 @@ void PagedSpace::PrepareForMarkCompact() {
// on the first allocation after the sweep.
EmptyAllocationInfo();
- // Stop lazy sweeping and clear marking bits for unswept pages.
- if (first_unswept_page_ != NULL) {
- Page* p = first_unswept_page_;
- do {
- // Do not use ShouldBeSweptLazily predicate here.
- // New evacuation candidates were selected but they still have
- // to be swept before collection starts.
- if (!p->WasSwept()) {
- Bitmap::Clear(p);
- if (FLAG_gc_verbose) {
- PrintF("Sweeping 0x%" V8PRIxPTR " lazily abandoned.\n",
- reinterpret_cast<intptr_t>(p));
- }
- }
- p = p->next_page();
- } while (p != anchor());
- }
- first_unswept_page_ = Page::FromAddress(NULL);
+ // This counter will be increased for pages which will be swept by the
+ // sweeper threads.
unswept_free_bytes_ = 0;
// Clear the free list before a full GC---it will be rebuilt afterward.
@@ -2550,7 +2546,8 @@ void PagedSpace::PrepareForMarkCompact() {
intptr_t PagedSpace::SizeOfObjects() {
- ASSERT(!heap()->IsSweepingComplete() || (unswept_free_bytes_ == 0));
+ ASSERT(heap()->mark_compact_collector()->IsConcurrentSweepingInProgress() ||
+ (unswept_free_bytes_ == 0));
return Size() - unswept_free_bytes_ - (limit() - top());
}
@@ -2564,39 +2561,6 @@ void PagedSpace::RepairFreeListsAfterBoot() {
}
-bool PagedSpace::AdvanceSweeper(intptr_t bytes_to_sweep) {
- if (IsLazySweepingComplete()) return true;
-
- intptr_t freed_bytes = 0;
- Page* p = first_unswept_page_;
- do {
- Page* next_page = p->next_page();
- if (ShouldBeSweptLazily(p)) {
- if (FLAG_gc_verbose) {
- PrintF("Sweeping 0x%" V8PRIxPTR " lazily advanced.\n",
- reinterpret_cast<intptr_t>(p));
- }
- DecreaseUnsweptFreeBytes(p);
- freed_bytes +=
- MarkCompactCollector::
- SweepConservatively<MarkCompactCollector::SWEEP_SEQUENTIALLY>(
- this, NULL, p);
- }
- p = next_page;
- } while (p != anchor() && freed_bytes < bytes_to_sweep);
-
- if (p == anchor()) {
- first_unswept_page_ = Page::FromAddress(NULL);
- } else {
- first_unswept_page_ = p;
- }
-
- heap()->FreeQueuedChunks();
-
- return IsLazySweepingComplete();
-}
-
-
void PagedSpace::EvictEvacuationCandidatesFromFreeLists() {
if (allocation_info_.top() >= allocation_info_.limit()) return;
@@ -2613,35 +2577,29 @@ void PagedSpace::EvictEvacuationCandidatesFromFreeLists() {
}
-bool PagedSpace::EnsureSweeperProgress(intptr_t size_in_bytes) {
+HeapObject* PagedSpace::WaitForSweeperThreadsAndRetryAllocation(
+ int size_in_bytes) {
MarkCompactCollector* collector = heap()->mark_compact_collector();
- if (collector->AreSweeperThreadsActivated()) {
- if (collector->IsConcurrentSweepingInProgress()) {
- if (collector->StealMemoryFromSweeperThreads(this) < size_in_bytes) {
- if (!collector->sequential_sweeping()) {
- collector->WaitUntilSweepingCompleted();
- return true;
- }
- }
- return false;
- }
- return true;
- } else {
- return AdvanceSweeper(size_in_bytes);
+
+ // If sweeper threads are still running, wait for them.
+ if (collector->IsConcurrentSweepingInProgress()) {
+ collector->WaitUntilSweepingCompleted();
+
+ // After waiting for the sweeper threads, there may be new free-list
+ // entries.
+ return free_list_.Allocate(size_in_bytes);
}
+ return NULL;
}
HeapObject* PagedSpace::SlowAllocateRaw(int size_in_bytes) {
// Allocation in this space has failed.
- // If there are unswept pages advance lazy sweeper a bounded number of times
- // until we find a size_in_bytes contiguous piece of memory
- const int kMaxSweepingTries = 5;
- bool sweeping_complete = false;
-
- for (int i = 0; i < kMaxSweepingTries && !sweeping_complete; i++) {
- sweeping_complete = EnsureSweeperProgress(size_in_bytes);
+ // If sweeper threads are active, try to re-fill the free-lists.
+ MarkCompactCollector* collector = heap()->mark_compact_collector();
+ if (collector->IsConcurrentSweepingInProgress()) {
+ collector->RefillFreeList(this);
// Retry the free list allocation.
HeapObject* object = free_list_.Allocate(size_in_bytes);
@@ -2651,9 +2609,12 @@ HeapObject* PagedSpace::SlowAllocateRaw(int size_in_bytes) {
// Free list allocation failed and there is no next page. Fail if we have
// hit the old generation size limit that should cause a garbage
// collection.
- if (!heap()->always_allocate() &&
- heap()->OldGenerationAllocationLimitReached()) {
- return NULL;
+ if (!heap()->always_allocate()
+ && heap()->OldGenerationAllocationLimitReached()) {
+ // If sweeper threads are active, wait for them at that point and steal
+ // elements form their free-lists.
+ HeapObject* object = WaitForSweeperThreadsAndRetryAllocation(size_in_bytes);
+ if (object != NULL) return object;
}
// Try to expand the space and allocate in the new next page.
@@ -2662,18 +2623,10 @@ HeapObject* PagedSpace::SlowAllocateRaw(int size_in_bytes) {
return free_list_.Allocate(size_in_bytes);
}
- // Last ditch, sweep all the remaining pages to try to find space. This may
- // cause a pause.
- if (!IsLazySweepingComplete()) {
- EnsureSweeperProgress(kMaxInt);
-
- // Retry the free list allocation.
- HeapObject* object = free_list_.Allocate(size_in_bytes);
- if (object != NULL) return object;
- }
-
- // Finally, fail.
- return NULL;
+ // If sweeper threads are active, wait for them at that point and steal
+ // elements form their free-lists. Allocation may still fail their which
+ // would indicate that there is not enough memory for the given allocation.
+ return WaitForSweeperThreadsAndRetryAllocation(size_in_bytes);
}
@@ -2916,22 +2869,22 @@ void LargeObjectSpace::TearDown() {
}
-MaybeObject* LargeObjectSpace::AllocateRaw(int object_size,
- Executability executable) {
+AllocationResult LargeObjectSpace::AllocateRaw(int object_size,
+ Executability executable) {
// Check if we want to force a GC before growing the old space further.
// If so, fail the allocation.
if (!heap()->always_allocate() &&
heap()->OldGenerationAllocationLimitReached()) {
- return Failure::RetryAfterGC(identity());
+ return AllocationResult::Retry(identity());
}
if (Size() + object_size > max_capacity_) {
- return Failure::RetryAfterGC(identity());
+ return AllocationResult::Retry(identity());
}
LargePage* page = heap()->isolate()->memory_allocator()->
AllocateLargePage(object_size, this, executable);
- if (page == NULL) return Failure::RetryAfterGC(identity());
+ if (page == NULL) return AllocationResult::Retry(identity());
ASSERT(page->area_size() >= object_size);
size_ += static_cast<int>(page->size());
@@ -2984,12 +2937,12 @@ size_t LargeObjectSpace::CommittedPhysicalMemory() {
// GC support
-MaybeObject* LargeObjectSpace::FindObject(Address a) {
+Object* LargeObjectSpace::FindObject(Address a) {
LargePage* page = FindPage(a);
if (page != NULL) {
return page->GetObject();
}
- return Failure::Exception();
+ return Smi::FromInt(0); // Signaling not found.
}
@@ -3070,7 +3023,7 @@ bool LargeObjectSpace::Contains(HeapObject* object) {
bool owned = (chunk->owner() == this);
- SLOW_ASSERT(!owned || !FindObject(address)->IsFailure());
+ SLOW_ASSERT(!owned || FindObject(address)->IsHeapObject());
return owned;
}
@@ -3103,7 +3056,7 @@ void LargeObjectSpace::Verify() {
object->IsFixedDoubleArray() || object->IsByteArray());
// The object itself should look OK.
- object->Verify();
+ object->ObjectVerify();
// Byte arrays and strings don't have interior pointers.
if (object->IsCode()) {
diff --git a/chromium/v8/src/spaces.h b/chromium/v8/src/spaces.h
index 7c650a2aca5..a8c981d3828 100644
--- a/chromium/v8/src/spaces.h
+++ b/chromium/v8/src/spaces.h
@@ -1,39 +1,17 @@
// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
#ifndef V8_SPACES_H_
#define V8_SPACES_H_
-#include "allocation.h"
-#include "hashmap.h"
-#include "list.h"
-#include "log.h"
-#include "platform/mutex.h"
-#include "v8utils.h"
+#include "src/allocation.h"
+#include "src/base/atomicops.h"
+#include "src/hashmap.h"
+#include "src/list.h"
+#include "src/log.h"
+#include "src/platform/mutex.h"
+#include "src/utils.h"
namespace v8 {
namespace internal {
@@ -103,7 +81,7 @@ class Isolate;
ASSERT((OffsetFrom(address) & kObjectAlignmentMask) == 0)
#define ASSERT_OBJECT_SIZE(size) \
- ASSERT((0 < size) && (size <= Page::kMaxNonCodeHeapObjectSize))
+ ASSERT((0 < size) && (size <= Page::kMaxRegularHeapObjectSize))
#define ASSERT_PAGE_OFFSET(offset) \
ASSERT((Page::kObjectStartOffset <= offset) \
@@ -313,11 +291,21 @@ class MemoryChunk {
bool is_valid() { return address() != NULL; }
- MemoryChunk* next_chunk() const { return next_chunk_; }
- MemoryChunk* prev_chunk() const { return prev_chunk_; }
+ MemoryChunk* next_chunk() const {
+ return reinterpret_cast<MemoryChunk*>(base::Acquire_Load(&next_chunk_));
+ }
+
+ MemoryChunk* prev_chunk() const {
+ return reinterpret_cast<MemoryChunk*>(base::Acquire_Load(&prev_chunk_));
+ }
+
+ void set_next_chunk(MemoryChunk* next) {
+ base::Release_Store(&next_chunk_, reinterpret_cast<base::AtomicWord>(next));
+ }
- void set_next_chunk(MemoryChunk* next) { next_chunk_ = next; }
- void set_prev_chunk(MemoryChunk* prev) { prev_chunk_ = prev; }
+ void set_prev_chunk(MemoryChunk* prev) {
+ base::Release_Store(&prev_chunk_, reinterpret_cast<base::AtomicWord>(prev));
+ }
Space* owner() const {
if ((reinterpret_cast<intptr_t>(owner_) & kFailureTagMask) ==
@@ -457,16 +445,34 @@ class MemoryChunk {
// Return all current flags.
intptr_t GetFlags() { return flags_; }
- intptr_t parallel_sweeping() const {
- return parallel_sweeping_;
+
+ // PARALLEL_SWEEPING_DONE - The page state when sweeping is complete or
+ // sweeping must not be performed on that page.
+ // PARALLEL_SWEEPING_FINALIZE - A sweeper thread is done sweeping this
+ // page and will not touch the page memory anymore.
+ // PARALLEL_SWEEPING_IN_PROGRESS - This page is currently swept by a
+ // sweeper thread.
+ // PARALLEL_SWEEPING_PENDING - This page is ready for parallel sweeping.
+ enum ParallelSweepingState {
+ PARALLEL_SWEEPING_DONE,
+ PARALLEL_SWEEPING_FINALIZE,
+ PARALLEL_SWEEPING_IN_PROGRESS,
+ PARALLEL_SWEEPING_PENDING
+ };
+
+ ParallelSweepingState parallel_sweeping() {
+ return static_cast<ParallelSweepingState>(
+ base::Acquire_Load(&parallel_sweeping_));
}
- void set_parallel_sweeping(intptr_t state) {
- parallel_sweeping_ = state;
+ void set_parallel_sweeping(ParallelSweepingState state) {
+ base::Release_Store(&parallel_sweeping_, state);
}
bool TryParallelSweeping() {
- return NoBarrier_CompareAndSwap(&parallel_sweeping_, 1, 0) == 1;
+ return base::Acquire_CompareAndSwap(
+ &parallel_sweeping_, PARALLEL_SWEEPING_PENDING,
+ PARALLEL_SWEEPING_IN_PROGRESS) == PARALLEL_SWEEPING_PENDING;
}
// Manage live byte count (count of bytes known to be live,
@@ -536,7 +542,7 @@ class MemoryChunk {
static const intptr_t kAlignmentMask = kAlignment - 1;
- static const intptr_t kSizeOffset = kPointerSize + kPointerSize;
+ static const intptr_t kSizeOffset = 0;
static const intptr_t kLiveBytesOffset =
kSizeOffset + kPointerSize + kPointerSize + kPointerSize +
@@ -550,7 +556,8 @@ class MemoryChunk {
static const size_t kHeaderSize = kWriteBarrierCounterOffset + kPointerSize +
kIntSize + kIntSize + kPointerSize +
- 5 * kPointerSize;
+ 5 * kPointerSize +
+ kPointerSize + kPointerSize;
static const int kBodyOffset =
CODE_POINTER_ALIGN(kHeaderSize + Bitmap::kSize);
@@ -622,7 +629,7 @@ class MemoryChunk {
inline Heap* heap() { return heap_; }
- static const int kFlagsOffset = kPointerSize * 3;
+ static const int kFlagsOffset = kPointerSize;
bool IsEvacuationCandidate() { return IsFlagSet(EVACUATION_CANDIDATE); }
@@ -671,8 +678,6 @@ class MemoryChunk {
static inline void UpdateHighWaterMark(Address mark);
protected:
- MemoryChunk* next_chunk_;
- MemoryChunk* prev_chunk_;
size_t size_;
intptr_t flags_;
@@ -702,7 +707,7 @@ class MemoryChunk {
// count highest number of bytes ever allocated on the page.
int high_water_mark_;
- intptr_t parallel_sweeping_;
+ base::AtomicWord parallel_sweeping_;
// PagedSpace free-list statistics.
intptr_t available_in_small_free_list_;
@@ -719,11 +724,17 @@ class MemoryChunk {
Executability executable,
Space* owner);
+ private:
+ // next_chunk_ holds a pointer of type MemoryChunk
+ base::AtomicWord next_chunk_;
+ // prev_chunk_ holds a pointer of type MemoryChunk
+ base::AtomicWord prev_chunk_;
+
friend class MemoryAllocator;
};
-STATIC_CHECK(sizeof(MemoryChunk) <= MemoryChunk::kHeaderSize);
+STATIC_ASSERT(sizeof(MemoryChunk) <= MemoryChunk::kHeaderSize);
// -----------------------------------------------------------------------------
@@ -779,21 +790,11 @@ class Page : public MemoryChunk {
// Page size in bytes. This must be a multiple of the OS page size.
static const int kPageSize = 1 << kPageSizeBits;
- // Object area size in bytes.
- static const int kNonCodeObjectAreaSize = kPageSize - kObjectStartOffset;
-
- // Maximum object size that fits in a page. Objects larger than that size are
- // allocated in large object space and are never moved in memory. This also
- // applies to new space allocation, since objects are never migrated from new
- // space to large object space. Takes double alignment into account.
- static const int kMaxRegularHeapObjectSize = kPageSize - kObjectStartOffset;
-
// Maximum object size that fits in a page. Objects larger than that size
// are allocated in large object space and are never moved in memory. This
// also applies to new space allocation, since objects are never migrated
// from new space to large object space. Takes double alignment into account.
- static const int kMaxNonCodeHeapObjectSize =
- kNonCodeObjectAreaSize - kPointerSize;
+ static const int kMaxRegularHeapObjectSize = kPageSize - kObjectStartOffset;
// Page size mask.
static const intptr_t kPageAlignmentMask = (1 << kPageSizeBits) - 1;
@@ -840,7 +841,7 @@ class Page : public MemoryChunk {
};
-STATIC_CHECK(sizeof(Page) <= MemoryChunk::kHeaderSize);
+STATIC_ASSERT(sizeof(Page) <= MemoryChunk::kHeaderSize);
class LargePage : public MemoryChunk {
@@ -862,7 +863,7 @@ class LargePage : public MemoryChunk {
friend class MemoryAllocator;
};
-STATIC_CHECK(sizeof(LargePage) <= MemoryChunk::kHeaderSize);
+STATIC_ASSERT(sizeof(LargePage) <= MemoryChunk::kHeaderSize);
// ----------------------------------------------------------------------------
// Space is the abstract superclass for all allocation spaces.
@@ -922,19 +923,19 @@ class CodeRange {
// Reserves a range of virtual memory, but does not commit any of it.
// Can only be called once, at heap initialization time.
// Returns false on failure.
- bool SetUp(const size_t requested_size);
+ bool SetUp(size_t requested_size);
// Frees the range of virtual memory, and frees the data structures used to
// manage it.
void TearDown();
- bool exists() { return this != NULL && code_range_ != NULL; }
+ bool valid() { return code_range_ != NULL; }
Address start() {
- if (this == NULL || code_range_ == NULL) return NULL;
+ ASSERT(valid());
return static_cast<Address>(code_range_->address());
}
bool contains(Address address) {
- if (this == NULL || code_range_ == NULL) return false;
+ if (!valid()) return false;
Address start = static_cast<Address>(code_range_->address());
return start <= address && address < start + code_range_->size();
}
@@ -984,8 +985,8 @@ class CodeRange {
// Finds a block on the allocation list that contains at least the
// requested amount of memory. If none is found, sorts and merges
// the existing free memory blocks, and searches again.
- // If none can be found, terminates V8 with FatalProcessOutOfMemory.
- void GetNextAllocationBlock(size_t requested);
+ // If none can be found, returns false.
+ bool GetNextAllocationBlock(size_t requested);
// Compares the start addresses of two free blocks.
static int CompareFreeBlockAddress(const FreeBlock* left,
const FreeBlock* right);
@@ -1086,7 +1087,7 @@ class MemoryAllocator {
// Returns maximum available bytes that the old space can have.
intptr_t MaxAvailable() {
- return (Available() / Page::kPageSize) * Page::kMaxNonCodeHeapObjectSize;
+ return (Available() / Page::kPageSize) * Page::kMaxRegularHeapObjectSize;
}
// Returns an indication of whether a pointer is in a space that has
@@ -1496,9 +1497,8 @@ class FreeListNode: public HeapObject {
inline void Zap();
- static inline FreeListNode* cast(MaybeObject* maybe) {
- ASSERT(!maybe->IsFailure());
- return reinterpret_cast<FreeListNode*>(maybe);
+ static inline FreeListNode* cast(Object* object) {
+ return reinterpret_cast<FreeListNode*>(object);
}
private:
@@ -1513,7 +1513,7 @@ class FreeListNode: public HeapObject {
class FreeListCategory {
public:
FreeListCategory() :
- top_(NULL),
+ top_(0),
end_(NULL),
available_(0) {}
@@ -1527,12 +1527,17 @@ class FreeListCategory {
FreeListNode* PickNodeFromList(int size_in_bytes, int *node_size);
intptr_t EvictFreeListItemsInList(Page* p);
+ bool ContainsPageFreeListItemsInList(Page* p);
void RepairFreeList(Heap* heap);
- FreeListNode** GetTopAddress() { return &top_; }
- FreeListNode* top() const { return top_; }
- void set_top(FreeListNode* top) { top_ = top; }
+ FreeListNode* top() const {
+ return reinterpret_cast<FreeListNode*>(base::NoBarrier_Load(&top_));
+ }
+
+ void set_top(FreeListNode* top) {
+ base::NoBarrier_Store(&top_, reinterpret_cast<base::AtomicWord>(top));
+ }
FreeListNode** GetEndAddress() { return &end_; }
FreeListNode* end() const { return end_; }
@@ -1544,13 +1549,18 @@ class FreeListCategory {
Mutex* mutex() { return &mutex_; }
+ bool IsEmpty() {
+ return top() == 0;
+ }
+
#ifdef DEBUG
intptr_t SumFreeList();
int FreeListLength();
#endif
private:
- FreeListNode* top_;
+ // top_ points to the top FreeListNode* in the free list category.
+ base::AtomicWord top_;
FreeListNode* end_;
Mutex mutex_;
@@ -1582,7 +1592,7 @@ class FreeListCategory {
// These spaces are call large.
// At least 16384 words. This list is for objects of 2048 words or larger.
// Empty pages are added to this list. These spaces are called huge.
-class FreeList BASE_EMBEDDED {
+class FreeList {
public:
explicit FreeList(PagedSpace* owner);
@@ -1611,6 +1621,11 @@ class FreeList BASE_EMBEDDED {
// 'wasted_bytes'. The size should be a non-zero multiple of the word size.
MUST_USE_RESULT HeapObject* Allocate(int size_in_bytes);
+ bool IsEmpty() {
+ return small_list_.IsEmpty() && medium_list_.IsEmpty() &&
+ large_list_.IsEmpty() && huge_list_.IsEmpty();
+ }
+
#ifdef DEBUG
void Zap();
intptr_t SumFreeLists();
@@ -1621,6 +1636,7 @@ class FreeList BASE_EMBEDDED {
void RepairLists(Heap* heap);
intptr_t EvictFreeListItems(Page* p);
+ bool ContainsPageFreeListItems(Page* p);
FreeListCategory* small_list() { return &small_list_; }
FreeListCategory* medium_list() { return &medium_list_; }
@@ -1630,7 +1646,7 @@ class FreeList BASE_EMBEDDED {
private:
// The size range of blocks, in bytes.
static const int kMinBlockSize = 3 * kPointerSize;
- static const int kMaxBlockSize = Page::kMaxNonCodeHeapObjectSize;
+ static const int kMaxBlockSize = Page::kMaxRegularHeapObjectSize;
FreeListNode* FindNodeFor(int size_in_bytes, int* node_size);
@@ -1653,6 +1669,47 @@ class FreeList BASE_EMBEDDED {
};
+class AllocationResult {
+ public:
+ // Implicit constructor from Object*.
+ AllocationResult(Object* object) : object_(object), // NOLINT
+ retry_space_(INVALID_SPACE) { }
+
+ AllocationResult() : object_(NULL),
+ retry_space_(INVALID_SPACE) { }
+
+ static inline AllocationResult Retry(AllocationSpace space = NEW_SPACE) {
+ return AllocationResult(space);
+ }
+
+ inline bool IsRetry() { return retry_space_ != INVALID_SPACE; }
+
+ template <typename T>
+ bool To(T** obj) {
+ if (IsRetry()) return false;
+ *obj = T::cast(object_);
+ return true;
+ }
+
+ Object* ToObjectChecked() {
+ CHECK(!IsRetry());
+ return object_;
+ }
+
+ AllocationSpace RetrySpace() {
+ ASSERT(IsRetry());
+ return retry_space_;
+ }
+
+ private:
+ explicit AllocationResult(AllocationSpace space) : object_(NULL),
+ retry_space_(space) { }
+
+ Object* object_;
+ AllocationSpace retry_space_;
+};
+
+
class PagedSpace : public Space {
public:
// Creates a space with a maximum capacity, and an id.
@@ -1682,10 +1739,10 @@ class PagedSpace : public Space {
bool Contains(HeapObject* o) { return Contains(o->address()); }
// Given an address occupied by a live object, return that object if it is
- // in this space, or Failure::Exception() if it is not. The implementation
- // iterates over objects in the page containing the address, the cost is
- // linear in the number of objects in the page. It may be slow.
- MUST_USE_RESULT MaybeObject* FindObject(Address addr);
+ // in this space, or a Smi if it is not. The implementation iterates over
+ // objects in the page containing the address, the cost is linear in the
+ // number of objects in the page. It may be slow.
+ Object* FindObject(Address addr);
// During boot the free_space_map is created, and afterwards we may need
// to write it into the free list nodes that were already created.
@@ -1743,8 +1800,9 @@ class PagedSpace : public Space {
intptr_t Available() { return free_list_.available(); }
// Allocated bytes in this space. Garbage bytes that were not found due to
- // lazy sweeping are counted as being allocated! The bytes in the current
- // linear allocation area (between top and limit) are also counted here.
+ // concurrent sweeping are counted as being allocated! The bytes in the
+ // current linear allocation area (between top and limit) are also counted
+ // here.
virtual intptr_t Size() { return accounting_stats_.Size(); }
// As size, but the bytes in lazily swept pages are estimated and the bytes
@@ -1772,7 +1830,7 @@ class PagedSpace : public Space {
// Allocate the requested number of bytes in the space if possible, return a
// failure object if not.
- MUST_USE_RESULT inline MaybeObject* AllocateRaw(int size_in_bytes);
+ MUST_USE_RESULT inline AllocationResult AllocateRaw(int size_in_bytes);
// Give a block of memory to the space's free list. It might be added to
// the free list or accounted as waste.
@@ -1813,7 +1871,7 @@ class PagedSpace : public Space {
void IncreaseCapacity(int size);
// Releases an unused page and shrinks the space.
- void ReleasePage(Page* page, bool unlink);
+ void ReleasePage(Page* page);
// The dummy page that anchors the linked list of pages.
Page* anchor() { return &anchor_; }
@@ -1845,24 +1903,18 @@ class PagedSpace : public Space {
// Evacuation candidates are swept by evacuator. Needs to return a valid
// result before _and_ after evacuation has finished.
- static bool ShouldBeSweptLazily(Page* p) {
+ static bool ShouldBeSweptBySweeperThreads(Page* p) {
return !p->IsEvacuationCandidate() &&
!p->IsFlagSet(Page::RESCAN_ON_EVACUATION) &&
!p->WasSweptPrecisely();
}
- void SetPagesToSweep(Page* first) {
- ASSERT(unswept_free_bytes_ == 0);
- if (first == &anchor_) first = NULL;
- first_unswept_page_ = first;
- }
-
void IncrementUnsweptFreeBytes(intptr_t by) {
unswept_free_bytes_ += by;
}
void IncreaseUnsweptFreeBytes(Page* p) {
- ASSERT(ShouldBeSweptLazily(p));
+ ASSERT(ShouldBeSweptBySweeperThreads(p));
unswept_free_bytes_ += (p->area_size() - p->LiveBytes());
}
@@ -1871,7 +1923,7 @@ class PagedSpace : public Space {
}
void DecreaseUnsweptFreeBytes(Page* p) {
- ASSERT(ShouldBeSweptLazily(p));
+ ASSERT(ShouldBeSweptBySweeperThreads(p));
unswept_free_bytes_ -= (p->area_size() - p->LiveBytes());
}
@@ -1879,15 +1931,18 @@ class PagedSpace : public Space {
unswept_free_bytes_ = 0;
}
- bool AdvanceSweeper(intptr_t bytes_to_sweep);
-
- // When parallel sweeper threads are active and the main thread finished
- // its sweeping phase, this function waits for them to complete, otherwise
- // AdvanceSweeper with size_in_bytes is called.
+ // This function tries to steal size_in_bytes memory from the sweeper threads
+ // free-lists. If it does not succeed stealing enough memory, it will wait
+ // for the sweeper threads to finish sweeping.
+ // It returns true when sweeping is completed and false otherwise.
bool EnsureSweeperProgress(intptr_t size_in_bytes);
- bool IsLazySweepingComplete() {
- return !first_unswept_page_->is_valid();
+ void set_end_of_unswept_pages(Page* page) {
+ end_of_unswept_pages_ = page;
+ }
+
+ Page* end_of_unswept_pages() {
+ return end_of_unswept_pages_;
}
Page* FirstPage() { return anchor_.next_page(); }
@@ -1929,15 +1984,16 @@ class PagedSpace : public Space {
bool was_swept_conservatively_;
- // The first page to be swept when the lazy sweeper advances. Is set
- // to NULL when all pages have been swept.
- Page* first_unswept_page_;
-
// The number of free bytes which could be reclaimed by advancing the
- // lazy sweeper. This is only an estimation because lazy sweeping is
- // done conservatively.
+ // concurrent sweeper threads. This is only an estimation because concurrent
+ // sweeping is done conservatively.
intptr_t unswept_free_bytes_;
+ // The sweeper threads iterate over the list of pointer and data space pages
+ // and sweep these pages concurrently. They will stop sweeping after the
+ // end_of_unswept_pages_ page.
+ Page* end_of_unswept_pages_;
+
// Expands the space by allocating a fixed number of pages. Returns false if
// it cannot allocate requested number of pages from OS, or if the hard heap
// size limit has been hit.
@@ -1947,11 +2003,14 @@ class PagedSpace : public Space {
// address denoted by top in allocation_info_.
inline HeapObject* AllocateLinearly(int size_in_bytes);
+ MUST_USE_RESULT HeapObject*
+ WaitForSweeperThreadsAndRetryAllocation(int size_in_bytes);
+
// Slow path of AllocateRaw. This function is space-dependent.
- MUST_USE_RESULT virtual HeapObject* SlowAllocateRaw(int size_in_bytes);
+ MUST_USE_RESULT HeapObject* SlowAllocateRaw(int size_in_bytes);
friend class PageIterator;
- friend class SweeperThread;
+ friend class MarkCompactCollector;
};
@@ -2008,7 +2067,7 @@ class NewSpacePage : public MemoryChunk {
(1 << MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING) |
(1 << MemoryChunk::SCAN_ON_SCAVENGE);
- static const int kAreaSize = Page::kNonCodeObjectAreaSize;
+ static const int kAreaSize = Page::kMaxRegularHeapObjectSize;
inline NewSpacePage* next_page() const {
return static_cast<NewSpacePage*>(next_chunk());
@@ -2060,6 +2119,12 @@ class NewSpacePage : public MemoryChunk {
return NewSpacePage::FromAddress(address_limit - 1);
}
+ // Checks if address1 and address2 are on the same new space page.
+ static inline bool OnSamePage(Address address1, Address address2) {
+ return NewSpacePage::FromAddress(address1) ==
+ NewSpacePage::FromAddress(address2);
+ }
+
private:
// Create a NewSpacePage object that is only used as anchor
// for the doubly-linked list of real pages.
@@ -2355,7 +2420,7 @@ class NewSpace : public Space {
inline_allocation_limit_step_(0) {}
// Sets up the new space using the given chunk.
- bool SetUp(int reserved_semispace_size_, int max_semispace_size);
+ bool SetUp(int reserved_semispace_size_, int max_semi_space_size);
// Tears down the space. Heap memory was not allocated by the space, so it
// is not deallocated here.
@@ -2437,6 +2502,10 @@ class NewSpace : public Space {
return to_space_.MaximumCapacity();
}
+ bool IsAtMaximumCapacity() {
+ return Capacity() == MaximumCapacity();
+ }
+
// Returns the initial capacity of a semispace.
int InitialCapacity() {
ASSERT(to_space_.InitialCapacity() == from_space_.InitialCapacity());
@@ -2454,6 +2523,12 @@ class NewSpace : public Space {
allocation_info_.set_top(top);
}
+ // Return the address of the allocation pointer limit in the active semispace.
+ Address limit() {
+ ASSERT(to_space_.current_page()->ContainsLimit(allocation_info_.limit()));
+ return allocation_info_.limit();
+ }
+
// Return the address of the first object in the active semispace.
Address bottom() { return to_space_.space_start(); }
@@ -2488,7 +2563,7 @@ class NewSpace : public Space {
return allocation_info_.limit_address();
}
- MUST_USE_RESULT INLINE(MaybeObject* AllocateRaw(int size_in_bytes));
+ MUST_USE_RESULT INLINE(AllocationResult AllocateRaw(int size_in_bytes));
// Reset the allocation pointer to the beginning of the active semispace.
void ResetAllocationInfo();
@@ -2605,7 +2680,7 @@ class NewSpace : public Space {
HistogramInfo* allocated_histogram_;
HistogramInfo* promoted_histogram_;
- MUST_USE_RESULT MaybeObject* SlowAllocateRaw(int size_in_bytes);
+ MUST_USE_RESULT AllocationResult SlowAllocateRaw(int size_in_bytes);
friend class SemiSpaceIterator;
@@ -2668,7 +2743,7 @@ class MapSpace : public PagedSpace {
virtual void VerifyObject(HeapObject* obj);
private:
- static const int kMapsPerPage = Page::kNonCodeObjectAreaSize / Map::kSize;
+ static const int kMapsPerPage = Page::kMaxRegularHeapObjectSize / Map::kSize;
// Do map space compaction if there is a page gap.
int CompactionThreshold() {
@@ -2760,8 +2835,8 @@ class LargeObjectSpace : public Space {
// Shared implementation of AllocateRaw, AllocateRawCode and
// AllocateRawFixedArray.
- MUST_USE_RESULT MaybeObject* AllocateRaw(int object_size,
- Executability executable);
+ MUST_USE_RESULT AllocationResult AllocateRaw(int object_size,
+ Executability executable);
// Available bytes for objects in this space.
inline intptr_t Available();
@@ -2789,10 +2864,9 @@ class LargeObjectSpace : public Space {
return page_count_;
}
- // Finds an object for a given address, returns Failure::Exception()
- // if it is not found. The function iterates through all objects in this
- // space, may be slow.
- MaybeObject* FindObject(Address a);
+ // Finds an object for a given address, returns a Smi if it is not found.
+ // The function iterates through all objects in this space, may be slow.
+ Object* FindObject(Address a);
// Finds a large object page containing the given address, returns NULL
// if such a page doesn't exist.
@@ -2820,7 +2894,7 @@ class LargeObjectSpace : public Space {
#endif
// Checks whether an address is in the object area in this space. It
// iterates all objects in the space. May be slow.
- bool SlowContains(Address addr) { return !FindObject(addr)->IsFailure(); }
+ bool SlowContains(Address addr) { return FindObject(addr)->IsHeapObject(); }
private:
intptr_t max_capacity_;
diff --git a/chromium/v8/src/splay-tree-inl.h b/chromium/v8/src/splay-tree-inl.h
index 42024756e9b..6c7b4f404ca 100644
--- a/chromium/v8/src/splay-tree-inl.h
+++ b/chromium/v8/src/splay-tree-inl.h
@@ -1,34 +1,11 @@
// Copyright 2010 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
#ifndef V8_SPLAY_TREE_INL_H_
#define V8_SPLAY_TREE_INL_H_
-#include "splay-tree.h"
+#include "src/splay-tree.h"
namespace v8 {
namespace internal {
diff --git a/chromium/v8/src/splay-tree.h b/chromium/v8/src/splay-tree.h
index f393027a82c..5448fcd042a 100644
--- a/chromium/v8/src/splay-tree.h
+++ b/chromium/v8/src/splay-tree.h
@@ -1,34 +1,11 @@
// Copyright 2010 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
#ifndef V8_SPLAY_TREE_H_
#define V8_SPLAY_TREE_H_
-#include "allocation.h"
+#include "src/allocation.h"
namespace v8 {
namespace internal {
diff --git a/chromium/v8/src/store-buffer-inl.h b/chromium/v8/src/store-buffer-inl.h
index 7e5432c8413..fdfe37d3a29 100644
--- a/chromium/v8/src/store-buffer-inl.h
+++ b/chromium/v8/src/store-buffer-inl.h
@@ -1,34 +1,11 @@
// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
#ifndef V8_STORE_BUFFER_INL_H_
#define V8_STORE_BUFFER_INL_H_
-#include "store-buffer.h"
+#include "src/store-buffer.h"
namespace v8 {
namespace internal {
diff --git a/chromium/v8/src/store-buffer.cc b/chromium/v8/src/store-buffer.cc
index e89eb1bfed4..5ec3e547836 100644
--- a/chromium/v8/src/store-buffer.cc
+++ b/chromium/v8/src/store-buffer.cc
@@ -1,37 +1,16 @@
// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "store-buffer.h"
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/store-buffer.h"
#include <algorithm>
-#include "v8.h"
-#include "store-buffer-inl.h"
-#include "v8-counters.h"
+#include "src/v8.h"
+
+#include "src/base/atomicops.h"
+#include "src/counters.h"
+#include "src/store-buffer-inl.h"
namespace v8 {
namespace internal {
@@ -355,27 +334,6 @@ void StoreBuffer::GCPrologue() {
#ifdef VERIFY_HEAP
-static void DummyScavengePointer(HeapObject** p, HeapObject* o) {
- // Do nothing.
-}
-
-
-void StoreBuffer::VerifyPointers(PagedSpace* space,
- RegionCallback region_callback) {
- PageIterator it(space);
-
- while (it.has_next()) {
- Page* page = it.next();
- FindPointersToNewSpaceOnPage(
- reinterpret_cast<PagedSpace*>(page->owner()),
- page,
- region_callback,
- &DummyScavengePointer,
- false);
- }
-}
-
-
void StoreBuffer::VerifyPointers(LargeObjectSpace* space) {
LargeObjectIterator it(space);
for (HeapObject* object = it.Next(); object != NULL; object = it.Next()) {
@@ -388,7 +346,9 @@ void StoreBuffer::VerifyPointers(LargeObjectSpace* space) {
// When we are not in GC the Heap::InNewSpace() predicate
// checks that pointers which satisfy predicate point into
// the active semispace.
- heap_->InNewSpace(*slot);
+ Object* object = reinterpret_cast<Object*>(
+ base::NoBarrier_Load(reinterpret_cast<base::AtomicWord*>(slot)));
+ heap_->InNewSpace(object);
slot_address += kPointerSize;
}
}
@@ -399,10 +359,6 @@ void StoreBuffer::VerifyPointers(LargeObjectSpace* space) {
void StoreBuffer::Verify() {
#ifdef VERIFY_HEAP
- VerifyPointers(heap_->old_pointer_space(),
- &StoreBuffer::FindPointersToNewSpaceInRegion);
- VerifyPointers(heap_->map_space(),
- &StoreBuffer::FindPointersToNewSpaceInMapsRegion);
VerifyPointers(heap_->lo_space());
#endif
}
@@ -427,14 +383,18 @@ void StoreBuffer::FindPointersToNewSpaceInRegion(
slot_address < end;
slot_address += kPointerSize) {
Object** slot = reinterpret_cast<Object**>(slot_address);
- if (heap_->InNewSpace(*slot)) {
- HeapObject* object = reinterpret_cast<HeapObject*>(*slot);
- ASSERT(object->IsHeapObject());
+ Object* object = reinterpret_cast<Object*>(
+ base::NoBarrier_Load(reinterpret_cast<base::AtomicWord*>(slot)));
+ if (heap_->InNewSpace(object)) {
+ HeapObject* heap_object = reinterpret_cast<HeapObject*>(object);
+ ASSERT(heap_object->IsHeapObject());
// The new space object was not promoted if it still contains a map
// pointer. Clear the map field now lazily.
- if (clear_maps) ClearDeadObject(object);
- slot_callback(reinterpret_cast<HeapObject**>(slot), object);
- if (heap_->InNewSpace(*slot)) {
+ if (clear_maps) ClearDeadObject(heap_object);
+ slot_callback(reinterpret_cast<HeapObject**>(slot), heap_object);
+ object = reinterpret_cast<Object*>(
+ base::NoBarrier_Load(reinterpret_cast<base::AtomicWord*>(slot)));
+ if (heap_->InNewSpace(object)) {
EnterDirectlyIntoStoreBuffer(slot_address);
}
}
@@ -490,7 +450,7 @@ void StoreBuffer::FindPointersToNewSpaceInMapsRegion(
Address map_aligned_end = MapEndAlign(end);
ASSERT(map_aligned_start == start);
- ASSERT(map_aligned_end == end);
+ ASSERT(map_aligned_start <= map_aligned_end && map_aligned_end <= end);
FindPointersToNewSpaceInMaps(map_aligned_start,
map_aligned_end,
@@ -499,83 +459,6 @@ void StoreBuffer::FindPointersToNewSpaceInMapsRegion(
}
-// This function iterates over all the pointers in a paged space in the heap,
-// looking for pointers into new space. Within the pages there may be dead
-// objects that have not been overwritten by free spaces or fillers because of
-// lazy sweeping. These dead objects may not contain pointers to new space.
-// The garbage areas that have been swept properly (these will normally be the
-// large ones) will be marked with free space and filler map words. In
-// addition any area that has never been used at all for object allocation must
-// be marked with a free space or filler. Because the free space and filler
-// maps do not move we can always recognize these even after a compaction.
-// Normal objects like FixedArrays and JSObjects should not contain references
-// to these maps. The special garbage section (see comment in spaces.h) is
-// skipped since it can contain absolutely anything. Any objects that are
-// allocated during iteration may or may not be visited by the iteration, but
-// they will not be partially visited.
-void StoreBuffer::FindPointersToNewSpaceOnPage(
- PagedSpace* space,
- Page* page,
- RegionCallback region_callback,
- ObjectSlotCallback slot_callback,
- bool clear_maps) {
- Address visitable_start = page->area_start();
- Address end_of_page = page->area_end();
-
- Address visitable_end = visitable_start;
-
- Object* free_space_map = heap_->free_space_map();
- Object* two_pointer_filler_map = heap_->two_pointer_filler_map();
-
- while (visitable_end < end_of_page) {
- Object* o = *reinterpret_cast<Object**>(visitable_end);
- // Skip fillers but not things that look like fillers in the special
- // garbage section which can contain anything.
- if (o == free_space_map ||
- o == two_pointer_filler_map ||
- (visitable_end == space->top() && visitable_end != space->limit())) {
- if (visitable_start != visitable_end) {
- // After calling this the special garbage section may have moved.
- (this->*region_callback)(visitable_start,
- visitable_end,
- slot_callback,
- clear_maps);
- if (visitable_end >= space->top() && visitable_end < space->limit()) {
- visitable_end = space->limit();
- visitable_start = visitable_end;
- continue;
- }
- }
- if (visitable_end == space->top() && visitable_end != space->limit()) {
- visitable_start = visitable_end = space->limit();
- } else {
- // At this point we are either at the start of a filler or we are at
- // the point where the space->top() used to be before the
- // visit_pointer_region call above. Either way we can skip the
- // object at the current spot: We don't promise to visit objects
- // allocated during heap traversal, and if space->top() moved then it
- // must be because an object was allocated at this point.
- visitable_start =
- visitable_end + HeapObject::FromAddress(visitable_end)->Size();
- visitable_end = visitable_start;
- }
- } else {
- ASSERT(o != free_space_map);
- ASSERT(o != two_pointer_filler_map);
- ASSERT(visitable_end < space->top() || visitable_end >= space->limit());
- visitable_end += kPointerSize;
- }
- }
- ASSERT(visitable_end == end_of_page);
- if (visitable_start != visitable_end) {
- (this->*region_callback)(visitable_start,
- visitable_end,
- slot_callback,
- clear_maps);
- }
-}
-
-
void StoreBuffer::IteratePointersInStoreBuffer(
ObjectSlotCallback slot_callback,
bool clear_maps) {
@@ -588,14 +471,17 @@ void StoreBuffer::IteratePointersInStoreBuffer(
Address* saved_top = old_top_;
#endif
Object** slot = reinterpret_cast<Object**>(*current);
- Object* object = *slot;
+ Object* object = reinterpret_cast<Object*>(
+ base::NoBarrier_Load(reinterpret_cast<base::AtomicWord*>(slot)));
if (heap_->InFromSpace(object)) {
HeapObject* heap_object = reinterpret_cast<HeapObject*>(object);
// The new space object was not promoted if it still contains a map
// pointer. Clear the map field now lazily.
if (clear_maps) ClearDeadObject(heap_object);
slot_callback(reinterpret_cast<HeapObject**>(slot), heap_object);
- if (heap_->InNewSpace(*slot)) {
+ object = reinterpret_cast<Object*>(
+ base::NoBarrier_Load(reinterpret_cast<base::AtomicWord*>(slot)));
+ if (heap_->InNewSpace(object)) {
EnterDirectlyIntoStoreBuffer(reinterpret_cast<Address>(slot));
}
}
@@ -659,14 +545,15 @@ void StoreBuffer::IteratePointersToNewSpace(ObjectSlotCallback slot_callback,
} else {
Page* page = reinterpret_cast<Page*>(chunk);
PagedSpace* owner = reinterpret_cast<PagedSpace*>(page->owner());
- FindPointersToNewSpaceOnPage(
- owner,
- page,
- (owner == heap_->map_space() ?
- &StoreBuffer::FindPointersToNewSpaceInMapsRegion :
- &StoreBuffer::FindPointersToNewSpaceInRegion),
- slot_callback,
- clear_maps);
+ Address start = page->area_start();
+ Address end = page->area_end();
+ if (owner == heap_->map_space()) {
+ FindPointersToNewSpaceInMapsRegion(
+ start, end, slot_callback, clear_maps);
+ } else {
+ FindPointersToNewSpaceInRegion(
+ start, end, slot_callback, clear_maps);
+ }
}
}
}
diff --git a/chromium/v8/src/store-buffer.h b/chromium/v8/src/store-buffer.h
index 01e7cbeb8d2..d6de2aa8e30 100644
--- a/chromium/v8/src/store-buffer.h
+++ b/chromium/v8/src/store-buffer.h
@@ -1,38 +1,14 @@
// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
#ifndef V8_STORE_BUFFER_H_
#define V8_STORE_BUFFER_H_
-#include "allocation.h"
-#include "checks.h"
-#include "globals.h"
-#include "platform.h"
-#include "v8globals.h"
+#include "src/allocation.h"
+#include "src/checks.h"
+#include "src/globals.h"
+#include "src/platform.h"
namespace v8 {
namespace internal {
@@ -203,18 +179,10 @@ class StoreBuffer {
ObjectSlotCallback slot_callback,
bool clear_maps);
- void FindPointersToNewSpaceOnPage(
- PagedSpace* space,
- Page* page,
- RegionCallback region_callback,
- ObjectSlotCallback slot_callback,
- bool clear_maps);
-
void IteratePointersInStoreBuffer(ObjectSlotCallback slot_callback,
bool clear_maps);
#ifdef VERIFY_HEAP
- void VerifyPointers(PagedSpace* space, RegionCallback region_callback);
void VerifyPointers(LargeObjectSpace* space);
#endif
diff --git a/chromium/v8/src/string-search.cc b/chromium/v8/src/string-search.cc
index 3ae68b5d401..1f0eb7e6abe 100644
--- a/chromium/v8/src/string-search.cc
+++ b/chromium/v8/src/string-search.cc
@@ -1,32 +1,9 @@
// Copyright 2010 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
-#include "v8.h"
-#include "string-search.h"
+#include "src/v8.h"
+#include "src/string-search.h"
namespace v8 {
namespace internal {
diff --git a/chromium/v8/src/string-search.h b/chromium/v8/src/string-search.h
index bc685ffe584..09bc36ef82e 100644
--- a/chromium/v8/src/string-search.h
+++ b/chromium/v8/src/string-search.h
@@ -1,29 +1,6 @@
// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
#ifndef V8_STRING_SEARCH_H_
#define V8_STRING_SEARCH_H_
diff --git a/chromium/v8/src/string-stream.cc b/chromium/v8/src/string-stream.cc
index e2d15f54056..930ce3dff53 100644
--- a/chromium/v8/src/string-stream.cc
+++ b/chromium/v8/src/string-stream.cc
@@ -1,34 +1,10 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#include "factory.h"
-#include "string-stream.h"
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/string-stream.h"
+
+#include "src/handles-inl.h"
namespace v8 {
namespace internal {
@@ -153,21 +129,21 @@ void StringStream::Add(Vector<const char> format, Vector<FmtElm> elms) {
case 'i': case 'd': case 'u': case 'x': case 'c': case 'X': {
int value = current.data_.u_int_;
EmbeddedVector<char, 24> formatted;
- int length = OS::SNPrintF(formatted, temp.start(), value);
+ int length = SNPrintF(formatted, temp.start(), value);
Add(Vector<const char>(formatted.start(), length));
break;
}
case 'f': case 'g': case 'G': case 'e': case 'E': {
double value = current.data_.u_double_;
EmbeddedVector<char, 28> formatted;
- OS::SNPrintF(formatted, temp.start(), value);
+ SNPrintF(formatted, temp.start(), value);
Add(formatted.start());
break;
}
case 'p': {
void* value = current.data_.u_pointer_;
EmbeddedVector<char, 20> formatted;
- OS::SNPrintF(formatted, temp.start(), value);
+ SNPrintF(formatted, temp.start(), value);
Add(formatted.start());
break;
}
@@ -261,7 +237,7 @@ void StringStream::Add(const char* format, FmtElm arg0, FmtElm arg1,
SmartArrayPointer<const char> StringStream::ToCString() const {
char* str = NewArray<char>(length_ + 1);
- OS::MemCopy(str, buffer_, length_);
+ MemCopy(str, buffer_, length_);
str[length_] = '\0';
return SmartArrayPointer<const char>(str);
}
@@ -290,7 +266,7 @@ void StringStream::OutputToFile(FILE* out) {
Handle<String> StringStream::ToString(Isolate* isolate) {
return isolate->factory()->NewStringFromUtf8(
- Vector<const char>(buffer_, length_));
+ Vector<const char>(buffer_, length_)).ToHandleChecked();
}
@@ -372,7 +348,8 @@ void StringStream::PrintUsingMap(JSObject* js_object) {
key->ShortPrint();
}
Add(": ");
- Object* value = js_object->RawFastPropertyAt(descs->GetFieldIndex(i));
+ FieldIndex index = FieldIndex::ForDescriptor(map, i);
+ Object* value = js_object->RawFastPropertyAt(index);
Add("%o\n", value);
}
}
@@ -570,7 +547,7 @@ char* HeapStringAllocator::grow(unsigned* bytes) {
if (new_space == NULL) {
return space_;
}
- OS::MemCopy(new_space, space_, *bytes);
+ MemCopy(new_space, space_, *bytes);
*bytes = new_bytes;
DeleteArray(space_);
space_ = new_space;
diff --git a/chromium/v8/src/string-stream.h b/chromium/v8/src/string-stream.h
index e3db2a8a868..d72d5c2d0e8 100644
--- a/chromium/v8/src/string-stream.h
+++ b/chromium/v8/src/string-stream.h
@@ -1,40 +1,18 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
#ifndef V8_STRING_STREAM_H_
#define V8_STRING_STREAM_H_
+#include "src/handles.h"
+
namespace v8 {
namespace internal {
-
class StringAllocator {
public:
- virtual ~StringAllocator() {}
+ virtual ~StringAllocator() { }
// Allocate a number of bytes.
virtual char* allocate(unsigned bytes) = 0;
// Allocate a larger number of bytes and copy the old buffer to the new one.
@@ -46,11 +24,12 @@ class StringAllocator {
// Normal allocator uses new[] and delete[].
-class HeapStringAllocator: public StringAllocator {
+class HeapStringAllocator V8_FINAL : public StringAllocator {
public:
~HeapStringAllocator() { DeleteArray(space_); }
- char* allocate(unsigned bytes);
- char* grow(unsigned* bytes);
+ virtual char* allocate(unsigned bytes) V8_OVERRIDE;
+ virtual char* grow(unsigned* bytes) V8_OVERRIDE;
+
private:
char* space_;
};
@@ -59,18 +38,19 @@ class HeapStringAllocator: public StringAllocator {
// Allocator for use when no new c++ heap allocation is allowed.
// Given a preallocated buffer up front and does no allocation while
// building message.
-class NoAllocationStringAllocator: public StringAllocator {
+class NoAllocationStringAllocator V8_FINAL : public StringAllocator {
public:
NoAllocationStringAllocator(char* memory, unsigned size);
- char* allocate(unsigned bytes) { return space_; }
- char* grow(unsigned* bytes);
+ virtual char* allocate(unsigned bytes) V8_OVERRIDE { return space_; }
+ virtual char* grow(unsigned* bytes) V8_OVERRIDE;
+
private:
unsigned size_;
char* space_;
};
-class FmtElm {
+class FmtElm V8_FINAL {
public:
FmtElm(int value) : type_(INT) { // NOLINT
data_.u_int_ = value;
@@ -110,7 +90,7 @@ class FmtElm {
};
-class StringStream {
+class StringStream V8_FINAL {
public:
explicit StringStream(StringAllocator* allocator):
allocator_(allocator),
@@ -120,9 +100,6 @@ class StringStream {
buffer_[0] = 0;
}
- ~StringStream() {
- }
-
bool Put(char c);
bool Put(String* str);
bool Put(String* str, int start, int end);
@@ -175,7 +152,6 @@ class StringStream {
static bool IsMentionedObjectCacheClear(Isolate* isolate);
#endif
-
static const int kInitialCapacity = 16;
private:
@@ -194,7 +170,7 @@ class StringStream {
// Utility class to print a list of items to a stream, divided by a separator.
-class SimpleListPrinter {
+class SimpleListPrinter V8_FINAL {
public:
explicit SimpleListPrinter(StringStream* stream, char separator = ',') {
separator_ = separator;
@@ -217,7 +193,6 @@ class SimpleListPrinter {
StringStream* stream_;
};
-
} } // namespace v8::internal
#endif // V8_STRING_STREAM_H_
diff --git a/chromium/v8/src/string.js b/chromium/v8/src/string.js
index 14b44ca41f3..07cdcc043f0 100644
--- a/chromium/v8/src/string.js
+++ b/chromium/v8/src/string.js
@@ -1,29 +1,6 @@
// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
// This file relies on the fact that the following declaration has been made
// in runtime.js:
@@ -61,10 +38,8 @@ function StringValueOf() {
// ECMA-262, section 15.5.4.4
function StringCharAt(pos) {
- if (IS_NULL_OR_UNDEFINED(this) && !IS_UNDETECTABLE(this)) {
- throw MakeTypeError("called_on_null_or_undefined",
- ["String.prototype.charAt"]);
- }
+ CHECK_OBJECT_COERCIBLE(this, "String.prototype.charAt");
+
var result = %_StringCharAt(this, pos);
if (%_IsSmi(result)) {
result = %_StringCharAt(TO_STRING_INLINE(this), TO_INTEGER(pos));
@@ -75,10 +50,8 @@ function StringCharAt(pos) {
// ECMA-262 section 15.5.4.5
function StringCharCodeAt(pos) {
- if (IS_NULL_OR_UNDEFINED(this) && !IS_UNDETECTABLE(this)) {
- throw MakeTypeError("called_on_null_or_undefined",
- ["String.prototype.charCodeAt"]);
- }
+ CHECK_OBJECT_COERCIBLE(this, "String.prototype.charCodeAt");
+
var result = %_StringCharCodeAt(this, pos);
if (!%_IsSmi(result)) {
result = %_StringCharCodeAt(TO_STRING_INLINE(this), TO_INTEGER(pos));
@@ -89,10 +62,8 @@ function StringCharCodeAt(pos) {
// ECMA-262, section 15.5.4.6
function StringConcat() {
- if (IS_NULL_OR_UNDEFINED(this) && !IS_UNDETECTABLE(this)) {
- throw MakeTypeError("called_on_null_or_undefined",
- ["String.prototype.concat"]);
- }
+ CHECK_OBJECT_COERCIBLE(this, "String.prototype.concat");
+
var len = %_ArgumentsLength();
var this_as_string = TO_STRING_INLINE(this);
if (len === 1) {
@@ -112,11 +83,9 @@ function StringConcat() {
// ECMA-262 section 15.5.4.7
-function StringIndexOf(pattern /* position */) { // length == 1
- if (IS_NULL_OR_UNDEFINED(this) && !IS_UNDETECTABLE(this)) {
- throw MakeTypeError("called_on_null_or_undefined",
- ["String.prototype.indexOf"]);
- }
+function StringIndexOfJS(pattern /* position */) { // length == 1
+ CHECK_OBJECT_COERCIBLE(this, "String.prototype.indexOf");
+
var subject = TO_STRING_INLINE(this);
pattern = TO_STRING_INLINE(pattern);
var index = 0;
@@ -131,11 +100,9 @@ function StringIndexOf(pattern /* position */) { // length == 1
// ECMA-262 section 15.5.4.8
-function StringLastIndexOf(pat /* position */) { // length == 1
- if (IS_NULL_OR_UNDEFINED(this) && !IS_UNDETECTABLE(this)) {
- throw MakeTypeError("called_on_null_or_undefined",
- ["String.prototype.lastIndexOf"]);
- }
+function StringLastIndexOfJS(pat /* position */) { // length == 1
+ CHECK_OBJECT_COERCIBLE(this, "String.prototype.lastIndexOf");
+
var sub = TO_STRING_INLINE(this);
var subLength = sub.length;
var pat = TO_STRING_INLINE(pat);
@@ -164,22 +131,18 @@ function StringLastIndexOf(pat /* position */) { // length == 1
//
// This function is implementation specific. For now, we do not
// do anything locale specific.
-function StringLocaleCompare(other) {
- if (IS_NULL_OR_UNDEFINED(this) && !IS_UNDETECTABLE(this)) {
- throw MakeTypeError("called_on_null_or_undefined",
- ["String.prototype.localeCompare"]);
- }
+function StringLocaleCompareJS(other) {
+ CHECK_OBJECT_COERCIBLE(this, "String.prototype.localeCompare");
+
return %StringLocaleCompare(TO_STRING_INLINE(this),
TO_STRING_INLINE(other));
}
// ECMA-262 section 15.5.4.10
-function StringMatch(regexp) {
- if (IS_NULL_OR_UNDEFINED(this) && !IS_UNDETECTABLE(this)) {
- throw MakeTypeError("called_on_null_or_undefined",
- ["String.prototype.match"]);
- }
+function StringMatchJS(regexp) {
+ CHECK_OBJECT_COERCIBLE(this, "String.prototype.match");
+
var subject = TO_STRING_INLINE(this);
if (IS_REGEXP(regexp)) {
// Emulate RegExp.prototype.exec's side effect in step 5, even though
@@ -187,7 +150,6 @@ function StringMatch(regexp) {
var lastIndex = regexp.lastIndex;
TO_INTEGER_FOR_SIDE_EFFECT(lastIndex);
if (!regexp.global) return RegExpExecNoTests(regexp, subject, 0);
- %_Log('regexp', 'regexp-match,%0S,%1r', [subject, regexp]);
// lastMatchInfo is defined in regexp.js.
var result = %StringMatch(subject, regexp, lastMatchInfo);
if (result !== null) lastMatchInfoOverride = null;
@@ -200,6 +162,28 @@ function StringMatch(regexp) {
}
+var NORMALIZATION_FORMS = ['NFC', 'NFD', 'NFKC', 'NFKD'];
+
+
+// ECMA-262 v6, section 21.1.3.12
+//
+// For now we do nothing, as proper normalization requires big tables.
+// If Intl is enabled, then i18n.js will override it and provide the the
+// proper functionality.
+function StringNormalizeJS(form) {
+ CHECK_OBJECT_COERCIBLE(this, "String.prototype.normalize");
+
+ var form = form ? TO_STRING_INLINE(form) : 'NFC';
+ var normalizationForm = NORMALIZATION_FORMS.indexOf(form);
+ if (normalizationForm === -1) {
+ throw new $RangeError('The normalization form should be one of '
+ + NORMALIZATION_FORMS.join(', ') + '.');
+ }
+
+ return %_ValueOf(this);
+}
+
+
// This has the same size as the lastMatchInfo array, and can be used for
// functions that expect that structure to be returned. It is used when the
// needle is a string rather than a regexp. In this case we can't update
@@ -210,10 +194,8 @@ var reusableMatchInfo = [2, "", "", -1, -1];
// ECMA-262, section 15.5.4.11
function StringReplace(search, replace) {
- if (IS_NULL_OR_UNDEFINED(this) && !IS_UNDETECTABLE(this)) {
- throw MakeTypeError("called_on_null_or_undefined",
- ["String.prototype.replace"]);
- }
+ CHECK_OBJECT_COERCIBLE(this, "String.prototype.replace");
+
var subject = TO_STRING_INLINE(this);
// Decision tree for dispatch
@@ -238,7 +220,6 @@ function StringReplace(search, replace) {
// value is discarded.
var lastIndex = search.lastIndex;
TO_INTEGER_FOR_SIDE_EFFECT(lastIndex);
- %_Log('regexp', 'regexp-replace,%0r,%1S', [search, subject]);
if (!IS_SPEC_FUNCTION(replace)) {
replace = TO_STRING_INLINE(replace);
@@ -543,10 +524,8 @@ function StringReplaceNonGlobalRegExpWithFunction(subject, regexp, replace) {
// ECMA-262 section 15.5.4.12
function StringSearch(re) {
- if (IS_NULL_OR_UNDEFINED(this) && !IS_UNDETECTABLE(this)) {
- throw MakeTypeError("called_on_null_or_undefined",
- ["String.prototype.search"]);
- }
+ CHECK_OBJECT_COERCIBLE(this, "String.prototype.search");
+
var regexp;
if (IS_STRING(re)) {
regexp = %_GetFromCache(STRING_TO_REGEXP_CACHE_ID, re);
@@ -565,10 +544,8 @@ function StringSearch(re) {
// ECMA-262 section 15.5.4.13
function StringSlice(start, end) {
- if (IS_NULL_OR_UNDEFINED(this) && !IS_UNDETECTABLE(this)) {
- throw MakeTypeError("called_on_null_or_undefined",
- ["String.prototype.slice"]);
- }
+ CHECK_OBJECT_COERCIBLE(this, "String.prototype.slice");
+
var s = TO_STRING_INLINE(this);
var s_len = s.length;
var start_i = TO_INTEGER(start);
@@ -608,32 +585,28 @@ function StringSlice(start, end) {
// ECMA-262 section 15.5.4.14
-function StringSplit(separator, limit) {
- if (IS_NULL_OR_UNDEFINED(this) && !IS_UNDETECTABLE(this)) {
- throw MakeTypeError("called_on_null_or_undefined",
- ["String.prototype.split"]);
- }
+function StringSplitJS(separator, limit) {
+ CHECK_OBJECT_COERCIBLE(this, "String.prototype.split");
+
var subject = TO_STRING_INLINE(this);
limit = (IS_UNDEFINED(limit)) ? 0xffffffff : TO_UINT32(limit);
- // ECMA-262 says that if separator is undefined, the result should
- // be an array of size 1 containing the entire string.
- if (IS_UNDEFINED(separator)) {
- return [subject];
- }
-
var length = subject.length;
if (!IS_REGEXP(separator)) {
- separator = TO_STRING_INLINE(separator);
+ var separator_string = TO_STRING_INLINE(separator);
if (limit === 0) return [];
- var separator_length = separator.length;
+ // ECMA-262 says that if separator is undefined, the result should
+ // be an array of size 1 containing the entire string.
+ if (IS_UNDEFINED(separator)) return [subject];
+
+ var separator_length = separator_string.length;
// If the separator string is empty then return the elements in the subject.
if (separator_length === 0) return %StringToArray(subject, limit);
- var result = %StringSplit(subject, separator, limit);
+ var result = %StringSplit(subject, separator_string, limit);
return result;
}
@@ -645,11 +618,7 @@ function StringSplit(separator, limit) {
}
-var ArrayPushBuiltin = $Array.prototype.push;
-
function StringSplitOnRegExp(subject, separator, limit, length) {
- %_Log('regexp', 'regexp-split,%0S,%1r', [subject, separator]);
-
if (length === 0) {
if (DoRegExpExec(separator, subject, 0, 0) != null) {
return [];
@@ -660,21 +629,19 @@ function StringSplitOnRegExp(subject, separator, limit, length) {
var currentIndex = 0;
var startIndex = 0;
var startMatch = 0;
- var result = [];
+ var result = new InternalArray();
outer_loop:
while (true) {
if (startIndex === length) {
- %_CallFunction(result, %_SubString(subject, currentIndex, length),
- ArrayPushBuiltin);
+ result[result.length] = %_SubString(subject, currentIndex, length);
break;
}
var matchInfo = DoRegExpExec(separator, subject, startIndex);
if (matchInfo == null || length === (startMatch = matchInfo[CAPTURE0])) {
- %_CallFunction(result, %_SubString(subject, currentIndex, length),
- ArrayPushBuiltin);
+ result[result.length] = %_SubString(subject, currentIndex, length);
break;
}
var endIndex = matchInfo[CAPTURE1];
@@ -685,8 +652,7 @@ function StringSplitOnRegExp(subject, separator, limit, length) {
continue;
}
- %_CallFunction(result, %_SubString(subject, currentIndex, startMatch),
- ArrayPushBuiltin);
+ result[result.length] = %_SubString(subject, currentIndex, startMatch);
if (result.length === limit) break;
@@ -695,26 +661,25 @@ function StringSplitOnRegExp(subject, separator, limit, length) {
var start = matchInfo[i++];
var end = matchInfo[i++];
if (end != -1) {
- %_CallFunction(result, %_SubString(subject, start, end),
- ArrayPushBuiltin);
+ result[result.length] = %_SubString(subject, start, end);
} else {
- %_CallFunction(result, UNDEFINED, ArrayPushBuiltin);
+ result[result.length] = UNDEFINED;
}
if (result.length === limit) break outer_loop;
}
startIndex = currentIndex = endIndex;
}
- return result;
+ var array_result = [];
+ %MoveArrayContents(result, array_result);
+ return array_result;
}
// ECMA-262 section 15.5.4.15
function StringSubstring(start, end) {
- if (IS_NULL_OR_UNDEFINED(this) && !IS_UNDETECTABLE(this)) {
- throw MakeTypeError("called_on_null_or_undefined",
- ["String.prototype.subString"]);
- }
+ CHECK_OBJECT_COERCIBLE(this, "String.prototype.subString");
+
var s = TO_STRING_INLINE(this);
var s_len = s.length;
@@ -746,10 +711,8 @@ function StringSubstring(start, end) {
// This is not a part of ECMA-262.
function StringSubstr(start, n) {
- if (IS_NULL_OR_UNDEFINED(this) && !IS_UNDETECTABLE(this)) {
- throw MakeTypeError("called_on_null_or_undefined",
- ["String.prototype.substr"]);
- }
+ CHECK_OBJECT_COERCIBLE(this, "String.prototype.substr");
+
var s = TO_STRING_INLINE(this);
var len;
@@ -787,66 +750,52 @@ function StringSubstr(start, n) {
// ECMA-262, 15.5.4.16
-function StringToLowerCase() {
- if (IS_NULL_OR_UNDEFINED(this) && !IS_UNDETECTABLE(this)) {
- throw MakeTypeError("called_on_null_or_undefined",
- ["String.prototype.toLowerCase"]);
- }
+function StringToLowerCaseJS() {
+ CHECK_OBJECT_COERCIBLE(this, "String.prototype.toLowerCase");
+
return %StringToLowerCase(TO_STRING_INLINE(this));
}
// ECMA-262, 15.5.4.17
function StringToLocaleLowerCase() {
- if (IS_NULL_OR_UNDEFINED(this) && !IS_UNDETECTABLE(this)) {
- throw MakeTypeError("called_on_null_or_undefined",
- ["String.prototype.toLocaleLowerCase"]);
- }
+ CHECK_OBJECT_COERCIBLE(this, "String.prototype.toLocaleLowerCase");
+
return %StringToLowerCase(TO_STRING_INLINE(this));
}
// ECMA-262, 15.5.4.18
-function StringToUpperCase() {
- if (IS_NULL_OR_UNDEFINED(this) && !IS_UNDETECTABLE(this)) {
- throw MakeTypeError("called_on_null_or_undefined",
- ["String.prototype.toUpperCase"]);
- }
+function StringToUpperCaseJS() {
+ CHECK_OBJECT_COERCIBLE(this, "String.prototype.toUpperCase");
+
return %StringToUpperCase(TO_STRING_INLINE(this));
}
// ECMA-262, 15.5.4.19
function StringToLocaleUpperCase() {
- if (IS_NULL_OR_UNDEFINED(this) && !IS_UNDETECTABLE(this)) {
- throw MakeTypeError("called_on_null_or_undefined",
- ["String.prototype.toLocaleUpperCase"]);
- }
+ CHECK_OBJECT_COERCIBLE(this, "String.prototype.toLocaleUpperCase");
+
return %StringToUpperCase(TO_STRING_INLINE(this));
}
// ES5, 15.5.4.20
-function StringTrim() {
- if (IS_NULL_OR_UNDEFINED(this) && !IS_UNDETECTABLE(this)) {
- throw MakeTypeError("called_on_null_or_undefined",
- ["String.prototype.trim"]);
- }
+function StringTrimJS() {
+ CHECK_OBJECT_COERCIBLE(this, "String.prototype.trim");
+
return %StringTrim(TO_STRING_INLINE(this), true, true);
}
function StringTrimLeft() {
- if (IS_NULL_OR_UNDEFINED(this) && !IS_UNDETECTABLE(this)) {
- throw MakeTypeError("called_on_null_or_undefined",
- ["String.prototype.trimLeft"]);
- }
+ CHECK_OBJECT_COERCIBLE(this, "String.prototype.trimLeft");
+
return %StringTrim(TO_STRING_INLINE(this), true, false);
}
function StringTrimRight() {
- if (IS_NULL_OR_UNDEFINED(this) && !IS_UNDETECTABLE(this)) {
- throw MakeTypeError("called_on_null_or_undefined",
- ["String.prototype.trimRight"]);
- }
+ CHECK_OBJECT_COERCIBLE(this, "String.prototype.trimRight");
+
return %StringTrim(TO_STRING_INLINE(this), false, true);
}
@@ -980,21 +929,22 @@ function SetUpString() {
"charAt", StringCharAt,
"charCodeAt", StringCharCodeAt,
"concat", StringConcat,
- "indexOf", StringIndexOf,
- "lastIndexOf", StringLastIndexOf,
- "localeCompare", StringLocaleCompare,
- "match", StringMatch,
+ "indexOf", StringIndexOfJS,
+ "lastIndexOf", StringLastIndexOfJS,
+ "localeCompare", StringLocaleCompareJS,
+ "match", StringMatchJS,
+ "normalize", StringNormalizeJS,
"replace", StringReplace,
"search", StringSearch,
"slice", StringSlice,
- "split", StringSplit,
+ "split", StringSplitJS,
"substring", StringSubstring,
"substr", StringSubstr,
- "toLowerCase", StringToLowerCase,
+ "toLowerCase", StringToLowerCaseJS,
"toLocaleLowerCase", StringToLocaleLowerCase,
- "toUpperCase", StringToUpperCase,
+ "toUpperCase", StringToUpperCaseJS,
"toLocaleUpperCase", StringToLocaleUpperCase,
- "trim", StringTrim,
+ "trim", StringTrimJS,
"trimLeft", StringTrimLeft,
"trimRight", StringTrimRight,
"link", StringLink,
diff --git a/chromium/v8/src/strtod.cc b/chromium/v8/src/strtod.cc
index d332fd2bc4d..391aebc3347 100644
--- a/chromium/v8/src/strtod.cc
+++ b/chromium/v8/src/strtod.cc
@@ -1,39 +1,16 @@
// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
#include <stdarg.h>
#include <cmath>
-#include "globals.h"
-#include "utils.h"
-#include "strtod.h"
-#include "bignum.h"
-#include "cached-powers.h"
-#include "double.h"
+#include "src/globals.h"
+#include "src/utils.h"
+#include "src/strtod.h"
+#include "src/bignum.h"
+#include "src/cached-powers.h"
+#include "src/double.h"
namespace v8 {
namespace internal {
@@ -176,7 +153,8 @@ static void ReadDiyFp(Vector<const char> buffer,
static bool DoubleStrtod(Vector<const char> trimmed,
int exponent,
double* result) {
-#if (V8_TARGET_ARCH_IA32 || defined(USE_SIMULATOR)) && !defined(_MSC_VER)
+#if (V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X87 || defined(USE_SIMULATOR)) && \
+ !defined(_MSC_VER)
// On x86 the floating-point stack can be 64 or 80 bits wide. If it is
// 80 bits wide (as is the case on Linux) then double-rounding occurs and the
// result is not accurate.
diff --git a/chromium/v8/src/strtod.h b/chromium/v8/src/strtod.h
index 1a5a96c8ebe..f4ce731a17b 100644
--- a/chromium/v8/src/strtod.h
+++ b/chromium/v8/src/strtod.h
@@ -1,29 +1,6 @@
// Copyright 2010 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
#ifndef V8_STRTOD_H_
#define V8_STRTOD_H_
diff --git a/chromium/v8/src/stub-cache.cc b/chromium/v8/src/stub-cache.cc
index 689eeaef153..c15038ee9ec 100644
--- a/chromium/v8/src/stub-cache.cc
+++ b/chromium/v8/src/stub-cache.cc
@@ -1,41 +1,19 @@
// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#include "api.h"
-#include "arguments.h"
-#include "ast.h"
-#include "code-stubs.h"
-#include "cpu-profiler.h"
-#include "gdb-jit.h"
-#include "ic-inl.h"
-#include "stub-cache.h"
-#include "vm-state-inl.h"
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#include "src/api.h"
+#include "src/arguments.h"
+#include "src/ast.h"
+#include "src/code-stubs.h"
+#include "src/cpu-profiler.h"
+#include "src/gdb-jit.h"
+#include "src/ic-inl.h"
+#include "src/stub-cache.h"
+#include "src/type-info.h"
+#include "src/vm-state-inl.h"
namespace v8 {
namespace internal {
@@ -115,9 +93,9 @@ Handle<Code> StubCache::FindIC(Handle<Name> name,
Handle<Code> StubCache::FindHandler(Handle<Name> name,
Handle<Map> stub_holder,
Code::Kind kind,
- InlineCacheHolderFlag cache_holder) {
- Code::Flags flags = Code::ComputeMonomorphicFlags(
- Code::HANDLER, kNoExtraICState, cache_holder, Code::NORMAL, kind);
+ InlineCacheHolderFlag cache_holder,
+ Code::StubType type) {
+ Code::Flags flags = Code::ComputeHandlerFlags(kind, type, cache_holder);
Handle<Object> probe(stub_holder->FindInCodeCache(*name, flags), isolate_);
if (probe->IsCode()) return Handle<Code>::cast(probe);
@@ -126,11 +104,11 @@ Handle<Code> StubCache::FindHandler(Handle<Name> name,
Handle<Code> StubCache::ComputeMonomorphicIC(
+ Code::Kind kind,
Handle<Name> name,
- Handle<Type> type,
+ Handle<HeapType> type,
Handle<Code> handler,
ExtraICState extra_ic_state) {
- Code::Kind kind = handler->handler_kind();
InlineCacheHolderFlag flag = IC::GetCodeCacheFlag(*type);
Handle<Map> stub_holder;
@@ -138,7 +116,7 @@ Handle<Code> StubCache::ComputeMonomorphicIC(
// There are multiple string maps that all use the same prototype. That
// prototype cannot hold multiple handlers, one for each of the string maps,
// for a single name. Hence, turn off caching of the IC.
- bool can_be_cached = !type->Is(Type::String());
+ bool can_be_cached = !type->Is(HeapType::String());
if (can_be_cached) {
stub_holder = IC::GetCodeCacheHolder(flag, *type, isolate());
ic = FindIC(name, stub_holder, kind, extra_ic_state, flag);
@@ -146,10 +124,10 @@ Handle<Code> StubCache::ComputeMonomorphicIC(
}
if (kind == Code::LOAD_IC) {
- LoadStubCompiler ic_compiler(isolate(), flag);
+ LoadStubCompiler ic_compiler(isolate(), extra_ic_state, flag);
ic = ic_compiler.CompileMonomorphicIC(type, handler, name);
} else if (kind == Code::KEYED_LOAD_IC) {
- KeyedLoadStubCompiler ic_compiler(isolate(), flag);
+ KeyedLoadStubCompiler ic_compiler(isolate(), extra_ic_state, flag);
ic = ic_compiler.CompileMonomorphicIC(type, handler, name);
} else if (kind == Code::STORE_IC) {
StoreStubCompiler ic_compiler(isolate(), extra_ic_state);
@@ -168,7 +146,7 @@ Handle<Code> StubCache::ComputeMonomorphicIC(
Handle<Code> StubCache::ComputeLoadNonexistent(Handle<Name> name,
- Handle<Type> type) {
+ Handle<HeapType> type) {
InlineCacheHolderFlag flag = IC::GetCodeCacheFlag(*type);
Handle<Map> stub_holder = IC::GetCodeCacheHolder(flag, *type, isolate());
// If no dictionary mode objects are present in the prototype chain, the load
@@ -178,7 +156,7 @@ Handle<Code> StubCache::ComputeLoadNonexistent(Handle<Name> name,
// therefore the stub will be specific to the name.
Handle<Map> current_map = stub_holder;
Handle<Name> cache_name = current_map->is_dictionary_map()
- ? name : Handle<Name>::cast(isolate()->factory()->empty_string());
+ ? name : Handle<Name>::cast(isolate()->factory()->nonexistent_symbol());
Handle<Object> next(current_map->prototype(), isolate());
Handle<JSObject> last = Handle<JSObject>::null();
while (!next->IsNull()) {
@@ -191,10 +169,12 @@ Handle<Code> StubCache::ComputeLoadNonexistent(Handle<Name> name,
// Compile the stub that is either shared for all names or
// name specific if there are global objects involved.
Handle<Code> handler = FindHandler(
- cache_name, stub_holder, Code::LOAD_IC, flag);
- if (!handler.is_null()) return handler;
+ cache_name, stub_holder, Code::LOAD_IC, flag, Code::FAST);
+ if (!handler.is_null()) {
+ return handler;
+ }
- LoadStubCompiler compiler(isolate_, flag);
+ LoadStubCompiler compiler(isolate_, kNoExtraICState, flag);
handler = compiler.CompileLoadNonexistent(type, last, cache_name);
Map::UpdateCodeCache(stub_holder, cache_name, handler);
return handler;
@@ -219,7 +199,7 @@ Handle<Code> StubCache::ComputeKeyedLoadElement(Handle<Map> receiver_map) {
Handle<Code> StubCache::ComputeKeyedStoreElement(
Handle<Map> receiver_map,
- StrictModeFlag strict_mode,
+ StrictMode strict_mode,
KeyedAccessStoreMode store_mode) {
ExtraICState extra_state =
KeyedStoreIC::ComputeExtraICState(strict_mode, store_mode);
@@ -246,167 +226,7 @@ Handle<Code> StubCache::ComputeKeyedStoreElement(
}
-#define CALL_LOGGER_TAG(kind, type) \
- (kind == Code::CALL_IC ? Logger::type : Logger::KEYED_##type)
-
-Handle<Code> StubCache::ComputeCallConstant(int argc,
- Code::Kind kind,
- ExtraICState extra_state,
- Handle<Name> name,
- Handle<Object> object,
- Handle<JSObject> holder,
- Handle<JSFunction> function) {
- // Compute the check type and the map.
- InlineCacheHolderFlag cache_holder = IC::GetCodeCacheForObject(*object);
- Handle<HeapObject> stub_holder(IC::GetCodeCacheHolder(
- isolate_, *object, cache_holder));
-
- // Compute check type based on receiver/holder.
- CheckType check = RECEIVER_MAP_CHECK;
- if (object->IsString()) {
- check = STRING_CHECK;
- } else if (object->IsSymbol()) {
- check = SYMBOL_CHECK;
- } else if (object->IsNumber()) {
- check = NUMBER_CHECK;
- } else if (object->IsBoolean()) {
- check = BOOLEAN_CHECK;
- }
-
- if (check != RECEIVER_MAP_CHECK &&
- !function->IsBuiltin() &&
- function->shared()->is_classic_mode()) {
- // Calling non-strict non-builtins with a value as the receiver
- // requires boxing.
- return Handle<Code>::null();
- }
-
- Code::Flags flags = Code::ComputeMonomorphicFlags(
- kind, extra_state, cache_holder, Code::FAST, argc);
- Handle<Object> probe(stub_holder->map()->FindInCodeCache(*name, flags),
- isolate_);
- if (probe->IsCode()) return Handle<Code>::cast(probe);
-
- CallStubCompiler compiler(isolate_, argc, kind, extra_state, cache_holder);
- Handle<Code> code =
- compiler.CompileCallConstant(object, holder, name, check, function);
- code->set_check_type(check);
- ASSERT(flags == code->flags());
- PROFILE(isolate_,
- CodeCreateEvent(CALL_LOGGER_TAG(kind, CALL_IC_TAG), *code, *name));
- GDBJIT(AddCode(GDBJITInterface::CALL_IC, *name, *code));
-
- if (CallStubCompiler::CanBeCached(function)) {
- HeapObject::UpdateMapCodeCache(stub_holder, name, code);
- }
- return code;
-}
-
-
-Handle<Code> StubCache::ComputeCallField(int argc,
- Code::Kind kind,
- ExtraICState extra_state,
- Handle<Name> name,
- Handle<Object> object,
- Handle<JSObject> holder,
- PropertyIndex index) {
- // Compute the check type and the map.
- InlineCacheHolderFlag cache_holder = IC::GetCodeCacheForObject(*object);
- Handle<HeapObject> stub_holder(IC::GetCodeCacheHolder(
- isolate_, *object, cache_holder));
-
- // TODO(1233596): We cannot do receiver map check for non-JS objects
- // because they may be represented as immediates without a
- // map. Instead, we check against the map in the holder.
- if (object->IsNumber() || object->IsSymbol() ||
- object->IsBoolean() || object->IsString()) {
- object = holder;
- }
-
- Code::Flags flags = Code::ComputeMonomorphicFlags(
- kind, extra_state, cache_holder, Code::FAST, argc);
- Handle<Object> probe(stub_holder->map()->FindInCodeCache(*name, flags),
- isolate_);
- if (probe->IsCode()) return Handle<Code>::cast(probe);
-
- CallStubCompiler compiler(isolate_, argc, kind, extra_state, cache_holder);
- Handle<Code> code =
- compiler.CompileCallField(Handle<JSObject>::cast(object),
- holder, index, name);
- ASSERT(flags == code->flags());
- PROFILE(isolate_,
- CodeCreateEvent(CALL_LOGGER_TAG(kind, CALL_IC_TAG), *code, *name));
- GDBJIT(AddCode(GDBJITInterface::CALL_IC, *name, *code));
- HeapObject::UpdateMapCodeCache(stub_holder, name, code);
- return code;
-}
-
-
-Handle<Code> StubCache::ComputeCallInterceptor(int argc,
- Code::Kind kind,
- ExtraICState extra_state,
- Handle<Name> name,
- Handle<Object> object,
- Handle<JSObject> holder) {
- // Compute the check type and the map.
- InlineCacheHolderFlag cache_holder = IC::GetCodeCacheForObject(*object);
- Handle<HeapObject> stub_holder(IC::GetCodeCacheHolder(
- isolate_, *object, cache_holder));
-
- // TODO(1233596): We cannot do receiver map check for non-JS objects
- // because they may be represented as immediates without a
- // map. Instead, we check against the map in the holder.
- if (object->IsNumber() || object->IsSymbol() ||
- object->IsBoolean() || object->IsString()) {
- object = holder;
- }
-
- Code::Flags flags = Code::ComputeMonomorphicFlags(
- kind, extra_state, cache_holder, Code::FAST, argc);
- Handle<Object> probe(stub_holder->map()->FindInCodeCache(*name, flags),
- isolate_);
- if (probe->IsCode()) return Handle<Code>::cast(probe);
-
- CallStubCompiler compiler(isolate(), argc, kind, extra_state, cache_holder);
- Handle<Code> code =
- compiler.CompileCallInterceptor(Handle<JSObject>::cast(object),
- holder, name);
- ASSERT(flags == code->flags());
- PROFILE(isolate(),
- CodeCreateEvent(CALL_LOGGER_TAG(kind, CALL_IC_TAG), *code, *name));
- GDBJIT(AddCode(GDBJITInterface::CALL_IC, *name, *code));
- HeapObject::UpdateMapCodeCache(stub_holder, name, code);
- return code;
-}
-
-
-Handle<Code> StubCache::ComputeCallGlobal(int argc,
- Code::Kind kind,
- ExtraICState extra_state,
- Handle<Name> name,
- Handle<JSObject> receiver,
- Handle<GlobalObject> holder,
- Handle<PropertyCell> cell,
- Handle<JSFunction> function) {
- Code::Flags flags = Code::ComputeMonomorphicFlags(
- kind, extra_state, OWN_MAP, Code::NORMAL, argc);
- Handle<Object> probe(receiver->map()->FindInCodeCache(*name, flags),
- isolate_);
- if (probe->IsCode()) return Handle<Code>::cast(probe);
-
- CallStubCompiler compiler(isolate(), argc, kind, extra_state);
- Handle<Code> code =
- compiler.CompileCallGlobal(receiver, holder, cell, function, name);
- ASSERT(flags == code->flags());
- PROFILE(isolate(),
- CodeCreateEvent(CALL_LOGGER_TAG(kind, CALL_IC_TAG), *code, *name));
- GDBJIT(AddCode(GDBJITInterface::CALL_IC, *name, *code));
- if (CallStubCompiler::CanBeCached(function)) {
- HeapObject::UpdateMapCodeCache(receiver, name, code);
- }
- return code;
-}
-
+#define CALL_LOGGER_TAG(kind, type) (Logger::KEYED_##type)
static void FillCache(Isolate* isolate, Handle<Code> code) {
Handle<UnseededNumberDictionary> dictionary =
@@ -417,15 +237,8 @@ static void FillCache(Isolate* isolate, Handle<Code> code) {
}
-Code* StubCache::FindCallInitialize(int argc,
- RelocInfo::Mode mode,
- Code::Kind kind) {
- ExtraICState extra_state =
- CallICBase::StringStubState::encode(DEFAULT_STRING_STUB) |
- CallICBase::Contextual::encode(mode == RelocInfo::CODE_TARGET_CONTEXT
- ? CONTEXTUAL : NOT_CONTEXTUAL);
- Code::Flags flags =
- Code::ComputeFlags(kind, UNINITIALIZED, extra_state, Code::NORMAL, argc);
+Code* StubCache::FindPreMonomorphicIC(Code::Kind kind, ExtraICState state) {
+ Code::Flags flags = Code::ComputeFlags(kind, PREMONOMORPHIC, state);
UnseededNumberDictionary* dictionary =
isolate()->heap()->non_monomorphic_cache();
int entry = dictionary->FindEntry(isolate(), flags);
@@ -437,139 +250,69 @@ Code* StubCache::FindCallInitialize(int argc,
}
-Handle<Code> StubCache::ComputeCallInitialize(int argc,
- RelocInfo::Mode mode,
- Code::Kind kind) {
- ExtraICState extra_state =
- CallICBase::StringStubState::encode(DEFAULT_STRING_STUB) |
- CallICBase::Contextual::encode(mode == RelocInfo::CODE_TARGET_CONTEXT
- ? CONTEXTUAL : NOT_CONTEXTUAL);
- Code::Flags flags =
- Code::ComputeFlags(kind, UNINITIALIZED, extra_state, Code::NORMAL, argc);
- Handle<UnseededNumberDictionary> cache =
- isolate_->factory()->non_monomorphic_cache();
- int entry = cache->FindEntry(isolate_, flags);
- if (entry != -1) return Handle<Code>(Code::cast(cache->ValueAt(entry)));
-
- StubCompiler compiler(isolate_);
- Handle<Code> code = compiler.CompileCallInitialize(flags);
- FillCache(isolate_, code);
- return code;
-}
-
-
-Handle<Code> StubCache::ComputeCallInitialize(int argc, RelocInfo::Mode mode) {
- return ComputeCallInitialize(argc, mode, Code::CALL_IC);
-}
-
-
-Handle<Code> StubCache::ComputeKeyedCallInitialize(int argc) {
- return ComputeCallInitialize(argc, RelocInfo::CODE_TARGET,
- Code::KEYED_CALL_IC);
-}
-
-
-Handle<Code> StubCache::ComputeCallPreMonomorphic(
- int argc,
- Code::Kind kind,
- ExtraICState extra_state) {
- Code::Flags flags =
- Code::ComputeFlags(kind, PREMONOMORPHIC, extra_state, Code::NORMAL, argc);
- Handle<UnseededNumberDictionary> cache =
- isolate_->factory()->non_monomorphic_cache();
- int entry = cache->FindEntry(isolate_, flags);
- if (entry != -1) return Handle<Code>(Code::cast(cache->ValueAt(entry)));
-
- StubCompiler compiler(isolate_);
- Handle<Code> code = compiler.CompileCallPreMonomorphic(flags);
- FillCache(isolate_, code);
- return code;
-}
-
-
-Handle<Code> StubCache::ComputeCallNormal(int argc,
- Code::Kind kind,
- ExtraICState extra_state) {
- Code::Flags flags =
- Code::ComputeFlags(kind, MONOMORPHIC, extra_state, Code::NORMAL, argc);
- Handle<UnseededNumberDictionary> cache =
- isolate_->factory()->non_monomorphic_cache();
- int entry = cache->FindEntry(isolate_, flags);
- if (entry != -1) return Handle<Code>(Code::cast(cache->ValueAt(entry)));
-
- StubCompiler compiler(isolate_);
- Handle<Code> code = compiler.CompileCallNormal(flags);
- FillCache(isolate_, code);
- return code;
-}
-
-
-Handle<Code> StubCache::ComputeCallArguments(int argc) {
- Code::Flags flags =
- Code::ComputeFlags(Code::KEYED_CALL_IC, MEGAMORPHIC,
- kNoExtraICState, Code::NORMAL, argc);
+Handle<Code> StubCache::ComputeLoad(InlineCacheState ic_state,
+ ExtraICState extra_state) {
+ Code::Flags flags = Code::ComputeFlags(Code::LOAD_IC, ic_state, extra_state);
Handle<UnseededNumberDictionary> cache =
isolate_->factory()->non_monomorphic_cache();
int entry = cache->FindEntry(isolate_, flags);
if (entry != -1) return Handle<Code>(Code::cast(cache->ValueAt(entry)));
StubCompiler compiler(isolate_);
- Handle<Code> code = compiler.CompileCallArguments(flags);
+ Handle<Code> code;
+ if (ic_state == UNINITIALIZED) {
+ code = compiler.CompileLoadInitialize(flags);
+ } else if (ic_state == PREMONOMORPHIC) {
+ code = compiler.CompileLoadPreMonomorphic(flags);
+ } else if (ic_state == MEGAMORPHIC) {
+ code = compiler.CompileLoadMegamorphic(flags);
+ } else {
+ UNREACHABLE();
+ }
FillCache(isolate_, code);
return code;
}
-Handle<Code> StubCache::ComputeCallMegamorphic(
- int argc,
- Code::Kind kind,
- ExtraICState extra_state) {
- Code::Flags flags =
- Code::ComputeFlags(kind, MEGAMORPHIC, extra_state,
- Code::NORMAL, argc);
+Handle<Code> StubCache::ComputeStore(InlineCacheState ic_state,
+ ExtraICState extra_state) {
+ Code::Flags flags = Code::ComputeFlags(Code::STORE_IC, ic_state, extra_state);
Handle<UnseededNumberDictionary> cache =
isolate_->factory()->non_monomorphic_cache();
int entry = cache->FindEntry(isolate_, flags);
if (entry != -1) return Handle<Code>(Code::cast(cache->ValueAt(entry)));
StubCompiler compiler(isolate_);
- Handle<Code> code = compiler.CompileCallMegamorphic(flags);
- FillCache(isolate_, code);
- return code;
-}
-
-
-Handle<Code> StubCache::ComputeCallMiss(int argc,
- Code::Kind kind,
- ExtraICState extra_state) {
- // MONOMORPHIC_PROTOTYPE_FAILURE state is used to make sure that miss stubs
- // and monomorphic stubs are not mixed up together in the stub cache.
- Code::Flags flags =
- Code::ComputeFlags(kind, MONOMORPHIC_PROTOTYPE_FAILURE, extra_state,
- Code::NORMAL, argc, OWN_MAP);
- Handle<UnseededNumberDictionary> cache =
- isolate_->factory()->non_monomorphic_cache();
- int entry = cache->FindEntry(isolate_, flags);
- if (entry != -1) return Handle<Code>(Code::cast(cache->ValueAt(entry)));
+ Handle<Code> code;
+ if (ic_state == UNINITIALIZED) {
+ code = compiler.CompileStoreInitialize(flags);
+ } else if (ic_state == PREMONOMORPHIC) {
+ code = compiler.CompileStorePreMonomorphic(flags);
+ } else if (ic_state == GENERIC) {
+ code = compiler.CompileStoreGeneric(flags);
+ } else if (ic_state == MEGAMORPHIC) {
+ code = compiler.CompileStoreMegamorphic(flags);
+ } else {
+ UNREACHABLE();
+ }
- StubCompiler compiler(isolate_);
- Handle<Code> code = compiler.CompileCallMiss(flags);
FillCache(isolate_, code);
return code;
}
Handle<Code> StubCache::ComputeCompareNil(Handle<Map> receiver_map,
- CompareNilICStub& stub) {
+ CompareNilICStub* stub) {
Handle<String> name(isolate_->heap()->empty_string());
if (!receiver_map->is_shared()) {
Handle<Code> cached_ic = FindIC(name, receiver_map, Code::COMPARE_NIL_IC,
- stub.GetExtraICState());
+ stub->GetExtraICState());
if (!cached_ic.is_null()) return cached_ic;
}
- Handle<Code> ic = stub.GetCodeCopyFromTemplate(isolate_);
- ic->ReplaceNthObject(1, isolate_->heap()->meta_map(), *receiver_map);
+ Code::FindAndReplacePattern pattern;
+ pattern.Add(isolate_->factory()->meta_map(), receiver_map);
+ Handle<Code> ic = stub->GetCodeCopy(pattern);
if (!receiver_map->is_shared()) {
Map::UpdateCodeCache(receiver_map, name, ic);
@@ -590,7 +333,7 @@ Handle<Code> StubCache::ComputeLoadElementPolymorphic(
TypeHandleList types(receiver_maps->length());
for (int i = 0; i < receiver_maps->length(); i++) {
- types.Add(handle(Type::Class(receiver_maps->at(i)), isolate()));
+ types.Add(HeapType::Class(receiver_maps->at(i), isolate()));
}
CodeHandleList handlers(receiver_maps->length());
KeyedLoadStubCompiler compiler(isolate_);
@@ -606,24 +349,22 @@ Handle<Code> StubCache::ComputeLoadElementPolymorphic(
Handle<Code> StubCache::ComputePolymorphicIC(
+ Code::Kind kind,
TypeHandleList* types,
CodeHandleList* handlers,
int number_of_valid_types,
Handle<Name> name,
ExtraICState extra_ic_state) {
-
Handle<Code> handler = handlers->at(0);
- Code::Kind kind = handler->handler_kind();
Code::StubType type = number_of_valid_types == 1 ? handler->type()
: Code::NORMAL;
if (kind == Code::LOAD_IC) {
- LoadStubCompiler ic_compiler(isolate_);
+ LoadStubCompiler ic_compiler(isolate_, extra_ic_state);
return ic_compiler.CompilePolymorphicIC(
types, handlers, name, type, PROPERTY);
} else {
ASSERT(kind == Code::STORE_IC);
- StrictModeFlag strict_mode = StoreIC::GetStrictMode(extra_ic_state);
- StoreStubCompiler ic_compiler(isolate_, strict_mode);
+ StoreStubCompiler ic_compiler(isolate_, extra_ic_state);
return ic_compiler.CompilePolymorphicIC(
types, handlers, name, type, PROPERTY);
}
@@ -633,7 +374,7 @@ Handle<Code> StubCache::ComputePolymorphicIC(
Handle<Code> StubCache::ComputeStoreElementPolymorphic(
MapHandleList* receiver_maps,
KeyedAccessStoreMode store_mode,
- StrictModeFlag strict_mode) {
+ StrictMode strict_mode) {
ASSERT(store_mode == STANDARD_STORE ||
store_mode == STORE_AND_GROW_NO_TRANSITION ||
store_mode == STORE_NO_TRANSITION_IGNORE_OUT_OF_BOUNDS ||
@@ -654,46 +395,6 @@ Handle<Code> StubCache::ComputeStoreElementPolymorphic(
}
-#ifdef ENABLE_DEBUGGER_SUPPORT
-Handle<Code> StubCache::ComputeCallDebugBreak(int argc,
- Code::Kind kind) {
- // Extra IC state is irrelevant for debug break ICs. They jump to
- // the actual call ic to carry out the work.
- Code::Flags flags =
- Code::ComputeFlags(kind, DEBUG_STUB, DEBUG_BREAK,
- Code::NORMAL, argc);
- Handle<UnseededNumberDictionary> cache =
- isolate_->factory()->non_monomorphic_cache();
- int entry = cache->FindEntry(isolate_, flags);
- if (entry != -1) return Handle<Code>(Code::cast(cache->ValueAt(entry)));
-
- StubCompiler compiler(isolate_);
- Handle<Code> code = compiler.CompileCallDebugBreak(flags);
- FillCache(isolate_, code);
- return code;
-}
-
-
-Handle<Code> StubCache::ComputeCallDebugPrepareStepIn(int argc,
- Code::Kind kind) {
- // Extra IC state is irrelevant for debug break ICs. They jump to
- // the actual call ic to carry out the work.
- Code::Flags flags =
- Code::ComputeFlags(kind, DEBUG_STUB, DEBUG_PREPARE_STEP_IN,
- Code::NORMAL, argc);
- Handle<UnseededNumberDictionary> cache =
- isolate_->factory()->non_monomorphic_cache();
- int entry = cache->FindEntry(isolate_, flags);
- if (entry != -1) return Handle<Code>(Code::cast(cache->ValueAt(entry)));
-
- StubCompiler compiler(isolate_);
- Handle<Code> code = compiler.CompileCallDebugPrepareStepIn(flags);
- FillCache(isolate_, code);
- return code;
-}
-#endif
-
-
void StubCache::Clear() {
Code* empty = isolate_->builtins()->builtin(Builtins::kIllegal);
for (int i = 0; i < kPrimaryTableSize; i++) {
@@ -754,27 +455,28 @@ void StubCache::CollectMatchingMaps(SmallMapList* types,
// StubCompiler implementation.
-RUNTIME_FUNCTION(MaybeObject*, StoreCallbackProperty) {
- JSObject* recv = JSObject::cast(args[0]);
- ExecutableAccessorInfo* callback = ExecutableAccessorInfo::cast(args[1]);
+RUNTIME_FUNCTION(StoreCallbackProperty) {
+ JSObject* receiver = JSObject::cast(args[0]);
+ JSObject* holder = JSObject::cast(args[1]);
+ ExecutableAccessorInfo* callback = ExecutableAccessorInfo::cast(args[2]);
Address setter_address = v8::ToCData<Address>(callback->setter());
v8::AccessorSetterCallback fun =
FUNCTION_CAST<v8::AccessorSetterCallback>(setter_address);
ASSERT(fun != NULL);
- ASSERT(callback->IsCompatibleReceiver(recv));
- Handle<Name> name = args.at<Name>(2);
- Handle<Object> value = args.at<Object>(3);
+ ASSERT(callback->IsCompatibleReceiver(receiver));
+ Handle<Name> name = args.at<Name>(3);
+ Handle<Object> value = args.at<Object>(4);
HandleScope scope(isolate);
// TODO(rossberg): Support symbols in the API.
if (name->IsSymbol()) return *value;
Handle<String> str = Handle<String>::cast(name);
- LOG(isolate, ApiNamedPropertyAccess("store", recv, *name));
+ LOG(isolate, ApiNamedPropertyAccess("store", receiver, *name));
PropertyCallbackArguments
- custom_args(isolate, callback->data(), recv, recv);
+ custom_args(isolate, callback->data(), receiver, holder);
custom_args.Call(fun, v8::Utils::ToLocal(str), v8::Utils::ToLocal(value));
- RETURN_IF_SCHEDULED_EXCEPTION(isolate);
+ RETURN_FAILURE_IF_SCHEDULED_EXCEPTION(isolate);
return *value;
}
@@ -786,7 +488,7 @@ RUNTIME_FUNCTION(MaybeObject*, StoreCallbackProperty) {
* Returns |Heap::no_interceptor_result_sentinel()| if interceptor doesn't
* provide any value for the given name.
*/
-RUNTIME_FUNCTION(MaybeObject*, LoadPropertyWithInterceptorOnly) {
+RUNTIME_FUNCTION(LoadPropertyWithInterceptorOnly) {
ASSERT(args.length() == StubCache::kInterceptorArgsLength);
Handle<Name> name_handle =
args.at<Name>(StubCache::kInterceptorArgsNameIndex);
@@ -814,7 +516,7 @@ RUNTIME_FUNCTION(MaybeObject*, LoadPropertyWithInterceptorOnly) {
HandleScope scope(isolate);
v8::Handle<v8::Value> r =
callback_args.Call(getter, v8::Utils::ToLocal(name));
- RETURN_IF_SCHEDULED_EXCEPTION(isolate);
+ RETURN_FAILURE_IF_SCHEDULED_EXCEPTION(isolate);
if (!r.IsEmpty()) {
Handle<Object> result = v8::Utils::OpenHandle(*r);
result->VerifyApiCallResultType();
@@ -826,14 +528,14 @@ RUNTIME_FUNCTION(MaybeObject*, LoadPropertyWithInterceptorOnly) {
}
-static MaybeObject* ThrowReferenceError(Isolate* isolate, Name* name) {
+static Object* ThrowReferenceError(Isolate* isolate, Name* name) {
// If the load is non-contextual, just return the undefined result.
- // Note that both keyed and non-keyed loads may end up here, so we
- // can't use either LoadIC or KeyedLoadIC constructors.
+ // Note that both keyed and non-keyed loads may end up here.
HandleScope scope(isolate);
- IC ic(IC::NO_EXTRA_FRAME, isolate);
- ASSERT(ic.IsLoadStub());
- if (!ic.SlowIsUndeclaredGlobal()) return isolate->heap()->undefined_value();
+ LoadIC ic(IC::NO_EXTRA_FRAME, isolate);
+ if (ic.contextual_mode() != CONTEXTUAL) {
+ return isolate->heap()->undefined_value();
+ }
// Throw a reference error.
Handle<Name> name_handle(name);
@@ -844,85 +546,32 @@ static MaybeObject* ThrowReferenceError(Isolate* isolate, Name* name) {
}
-static Handle<Object> LoadWithInterceptor(Arguments* args,
- PropertyAttributes* attrs) {
- ASSERT(args->length() == StubCache::kInterceptorArgsLength);
- Handle<Name> name_handle =
- args->at<Name>(StubCache::kInterceptorArgsNameIndex);
- Handle<InterceptorInfo> interceptor_info =
- args->at<InterceptorInfo>(StubCache::kInterceptorArgsInfoIndex);
- Handle<JSObject> receiver_handle =
- args->at<JSObject>(StubCache::kInterceptorArgsThisIndex);
- Handle<JSObject> holder_handle =
- args->at<JSObject>(StubCache::kInterceptorArgsHolderIndex);
-
- Isolate* isolate = receiver_handle->GetIsolate();
-
- // TODO(rossberg): Support symbols in the API.
- if (name_handle->IsSymbol()) {
- return JSObject::GetPropertyPostInterceptor(
- holder_handle, receiver_handle, name_handle, attrs);
- }
- Handle<String> name = Handle<String>::cast(name_handle);
-
- Address getter_address = v8::ToCData<Address>(interceptor_info->getter());
- v8::NamedPropertyGetterCallback getter =
- FUNCTION_CAST<v8::NamedPropertyGetterCallback>(getter_address);
- ASSERT(getter != NULL);
-
- PropertyCallbackArguments callback_args(isolate,
- interceptor_info->data(),
- *receiver_handle,
- *holder_handle);
- {
- HandleScope scope(isolate);
- // Use the interceptor getter.
- v8::Handle<v8::Value> r =
- callback_args.Call(getter, v8::Utils::ToLocal(name));
- RETURN_HANDLE_IF_SCHEDULED_EXCEPTION(isolate, Object);
- if (!r.IsEmpty()) {
- *attrs = NONE;
- Handle<Object> result = v8::Utils::OpenHandle(*r);
- result->VerifyApiCallResultType();
- return scope.CloseAndEscape(result);
- }
- }
-
- Handle<Object> result = JSObject::GetPropertyPostInterceptor(
- holder_handle, receiver_handle, name_handle, attrs);
- return result;
-}
-
-
/**
* Loads a property with an interceptor performing post interceptor
* lookup if interceptor failed.
*/
-RUNTIME_FUNCTION(MaybeObject*, LoadPropertyWithInterceptorForLoad) {
- PropertyAttributes attr = NONE;
+RUNTIME_FUNCTION(LoadPropertyWithInterceptor) {
HandleScope scope(isolate);
- Handle<Object> result = LoadWithInterceptor(&args, &attr);
- RETURN_IF_EMPTY_HANDLE(isolate, result);
+ ASSERT(args.length() == StubCache::kInterceptorArgsLength);
+ Handle<Name> name =
+ args.at<Name>(StubCache::kInterceptorArgsNameIndex);
+ Handle<JSObject> receiver =
+ args.at<JSObject>(StubCache::kInterceptorArgsThisIndex);
+ Handle<JSObject> holder =
+ args.at<JSObject>(StubCache::kInterceptorArgsHolderIndex);
- // If the property is present, return it.
- if (attr != ABSENT) return *result;
- return ThrowReferenceError(isolate, Name::cast(args[0]));
-}
+ Handle<Object> result;
+ LookupIterator it(receiver, name, holder);
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, result, JSObject::GetProperty(&it));
+ if (it.IsFound()) return *result;
-RUNTIME_FUNCTION(MaybeObject*, LoadPropertyWithInterceptorForCall) {
- PropertyAttributes attr;
- HandleScope scope(isolate);
- Handle<Object> result = LoadWithInterceptor(&args, &attr);
- RETURN_IF_EMPTY_HANDLE(isolate, result);
- // This is call IC. In this case, we simply return the undefined result which
- // will lead to an exception when trying to invoke the result as a
- // function.
- return *result;
+ return ThrowReferenceError(isolate, Name::cast(args[0]));
}
-RUNTIME_FUNCTION(MaybeObject*, StoreInterceptorProperty) {
+RUNTIME_FUNCTION(StoreInterceptorProperty) {
HandleScope scope(isolate);
ASSERT(args.length() == 3);
StoreIC ic(IC::NO_EXTRA_FRAME, isolate);
@@ -931,166 +580,100 @@ RUNTIME_FUNCTION(MaybeObject*, StoreInterceptorProperty) {
Handle<Object> value = args.at<Object>(2);
ASSERT(receiver->HasNamedInterceptor());
PropertyAttributes attr = NONE;
- Handle<Object> result = JSObject::SetPropertyWithInterceptor(
- receiver, name, value, attr, ic.strict_mode());
- RETURN_IF_EMPTY_HANDLE(isolate, result);
+ Handle<Object> result;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, result,
+ JSObject::SetPropertyWithInterceptor(
+ receiver, name, value, attr, ic.strict_mode()));
return *result;
}
-RUNTIME_FUNCTION(MaybeObject*, KeyedLoadPropertyWithInterceptor) {
- JSObject* receiver = JSObject::cast(args[0]);
+RUNTIME_FUNCTION(KeyedLoadPropertyWithInterceptor) {
+ HandleScope scope(isolate);
+ Handle<JSObject> receiver = args.at<JSObject>(0);
ASSERT(args.smi_at(1) >= 0);
uint32_t index = args.smi_at(1);
- return receiver->GetElementWithInterceptor(receiver, index);
+ Handle<Object> result;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, result,
+ JSObject::GetElementWithInterceptor(receiver, receiver, index));
+ return *result;
}
-Handle<Code> StubCompiler::CompileCallInitialize(Code::Flags flags) {
- int argc = Code::ExtractArgumentsCountFromFlags(flags);
- Code::Kind kind = Code::ExtractKindFromFlags(flags);
- ExtraICState extra_state = Code::ExtractExtraICStateFromFlags(flags);
- if (kind == Code::CALL_IC) {
- CallIC::GenerateInitialize(masm(), argc, extra_state);
- } else {
- KeyedCallIC::GenerateInitialize(masm(), argc);
- }
- Handle<Code> code = GetCodeWithFlags(flags, "CompileCallInitialize");
- isolate()->counters()->call_initialize_stubs()->Increment();
+Handle<Code> StubCompiler::CompileLoadInitialize(Code::Flags flags) {
+ LoadIC::GenerateInitialize(masm());
+ Handle<Code> code = GetCodeWithFlags(flags, "CompileLoadInitialize");
PROFILE(isolate(),
- CodeCreateEvent(CALL_LOGGER_TAG(kind, CALL_INITIALIZE_TAG),
- *code, code->arguments_count()));
- GDBJIT(AddCode(GDBJITInterface::CALL_INITIALIZE, *code));
+ CodeCreateEvent(Logger::LOAD_INITIALIZE_TAG, *code, 0));
+ GDBJIT(AddCode(GDBJITInterface::LOAD_IC, *code));
return code;
}
-Handle<Code> StubCompiler::CompileCallPreMonomorphic(Code::Flags flags) {
- int argc = Code::ExtractArgumentsCountFromFlags(flags);
- // The code of the PreMonomorphic stub is the same as the code
- // of the Initialized stub. They just differ on the code object flags.
- Code::Kind kind = Code::ExtractKindFromFlags(flags);
- ExtraICState extra_state = Code::ExtractExtraICStateFromFlags(flags);
- if (kind == Code::CALL_IC) {
- CallIC::GenerateInitialize(masm(), argc, extra_state);
- } else {
- KeyedCallIC::GenerateInitialize(masm(), argc);
- }
- Handle<Code> code = GetCodeWithFlags(flags, "CompileCallPreMonomorphic");
- isolate()->counters()->call_premonomorphic_stubs()->Increment();
+Handle<Code> StubCompiler::CompileLoadPreMonomorphic(Code::Flags flags) {
+ LoadIC::GeneratePreMonomorphic(masm());
+ Handle<Code> code = GetCodeWithFlags(flags, "CompileLoadPreMonomorphic");
PROFILE(isolate(),
- CodeCreateEvent(CALL_LOGGER_TAG(kind, CALL_PRE_MONOMORPHIC_TAG),
- *code, code->arguments_count()));
- GDBJIT(AddCode(GDBJITInterface::CALL_PRE_MONOMORPHIC, *code));
+ CodeCreateEvent(Logger::LOAD_PREMONOMORPHIC_TAG, *code, 0));
+ GDBJIT(AddCode(GDBJITInterface::LOAD_IC, *code));
return code;
}
-Handle<Code> StubCompiler::CompileCallNormal(Code::Flags flags) {
- int argc = Code::ExtractArgumentsCountFromFlags(flags);
- Code::Kind kind = Code::ExtractKindFromFlags(flags);
- if (kind == Code::CALL_IC) {
- // Call normal is always with a explict receiver.
- ASSERT(!CallIC::Contextual::decode(
- Code::ExtractExtraICStateFromFlags(flags)));
- CallIC::GenerateNormal(masm(), argc);
- } else {
- KeyedCallIC::GenerateNormal(masm(), argc);
- }
- Handle<Code> code = GetCodeWithFlags(flags, "CompileCallNormal");
- isolate()->counters()->call_normal_stubs()->Increment();
+Handle<Code> StubCompiler::CompileLoadMegamorphic(Code::Flags flags) {
+ LoadIC::GenerateMegamorphic(masm());
+ Handle<Code> code = GetCodeWithFlags(flags, "CompileLoadMegamorphic");
PROFILE(isolate(),
- CodeCreateEvent(CALL_LOGGER_TAG(kind, CALL_NORMAL_TAG),
- *code, code->arguments_count()));
- GDBJIT(AddCode(GDBJITInterface::CALL_NORMAL, *code));
+ CodeCreateEvent(Logger::LOAD_MEGAMORPHIC_TAG, *code, 0));
+ GDBJIT(AddCode(GDBJITInterface::LOAD_IC, *code));
return code;
}
-Handle<Code> StubCompiler::CompileCallMegamorphic(Code::Flags flags) {
- int argc = Code::ExtractArgumentsCountFromFlags(flags);
- Code::Kind kind = Code::ExtractKindFromFlags(flags);
- ExtraICState extra_state = Code::ExtractExtraICStateFromFlags(flags);
- if (kind == Code::CALL_IC) {
- CallIC::GenerateMegamorphic(masm(), argc, extra_state);
- } else {
- KeyedCallIC::GenerateMegamorphic(masm(), argc);
- }
- Handle<Code> code = GetCodeWithFlags(flags, "CompileCallMegamorphic");
- isolate()->counters()->call_megamorphic_stubs()->Increment();
+Handle<Code> StubCompiler::CompileStoreInitialize(Code::Flags flags) {
+ StoreIC::GenerateInitialize(masm());
+ Handle<Code> code = GetCodeWithFlags(flags, "CompileStoreInitialize");
PROFILE(isolate(),
- CodeCreateEvent(CALL_LOGGER_TAG(kind, CALL_MEGAMORPHIC_TAG),
- *code, code->arguments_count()));
- GDBJIT(AddCode(GDBJITInterface::CALL_MEGAMORPHIC, *code));
+ CodeCreateEvent(Logger::STORE_INITIALIZE_TAG, *code, 0));
+ GDBJIT(AddCode(GDBJITInterface::STORE_IC, *code));
return code;
}
-Handle<Code> StubCompiler::CompileCallArguments(Code::Flags flags) {
- int argc = Code::ExtractArgumentsCountFromFlags(flags);
- KeyedCallIC::GenerateNonStrictArguments(masm(), argc);
- Handle<Code> code = GetCodeWithFlags(flags, "CompileCallArguments");
+Handle<Code> StubCompiler::CompileStorePreMonomorphic(Code::Flags flags) {
+ StoreIC::GeneratePreMonomorphic(masm());
+ Handle<Code> code = GetCodeWithFlags(flags, "CompileStorePreMonomorphic");
PROFILE(isolate(),
- CodeCreateEvent(CALL_LOGGER_TAG(Code::ExtractKindFromFlags(flags),
- CALL_MEGAMORPHIC_TAG),
- *code, code->arguments_count()));
- GDBJIT(AddCode(GDBJITInterface::CALL_MEGAMORPHIC, *code));
+ CodeCreateEvent(Logger::STORE_PREMONOMORPHIC_TAG, *code, 0));
+ GDBJIT(AddCode(GDBJITInterface::STORE_IC, *code));
return code;
}
-Handle<Code> StubCompiler::CompileCallMiss(Code::Flags flags) {
- int argc = Code::ExtractArgumentsCountFromFlags(flags);
- Code::Kind kind = Code::ExtractKindFromFlags(flags);
+Handle<Code> StubCompiler::CompileStoreGeneric(Code::Flags flags) {
ExtraICState extra_state = Code::ExtractExtraICStateFromFlags(flags);
- if (kind == Code::CALL_IC) {
- CallIC::GenerateMiss(masm(), argc, extra_state);
- } else {
- KeyedCallIC::GenerateMiss(masm(), argc);
- }
- Handle<Code> code = GetCodeWithFlags(flags, "CompileCallMiss");
- isolate()->counters()->call_megamorphic_stubs()->Increment();
+ StrictMode strict_mode = StoreIC::GetStrictMode(extra_state);
+ StoreIC::GenerateRuntimeSetProperty(masm(), strict_mode);
+ Handle<Code> code = GetCodeWithFlags(flags, "CompileStoreGeneric");
PROFILE(isolate(),
- CodeCreateEvent(CALL_LOGGER_TAG(kind, CALL_MISS_TAG),
- *code, code->arguments_count()));
- GDBJIT(AddCode(GDBJITInterface::CALL_MISS, *code));
+ CodeCreateEvent(Logger::STORE_GENERIC_TAG, *code, 0));
+ GDBJIT(AddCode(GDBJITInterface::STORE_IC, *code));
return code;
}
-#ifdef ENABLE_DEBUGGER_SUPPORT
-Handle<Code> StubCompiler::CompileCallDebugBreak(Code::Flags flags) {
- Debug::GenerateCallICDebugBreak(masm());
- Handle<Code> code = GetCodeWithFlags(flags, "CompileCallDebugBreak");
+Handle<Code> StubCompiler::CompileStoreMegamorphic(Code::Flags flags) {
+ StoreIC::GenerateMegamorphic(masm());
+ Handle<Code> code = GetCodeWithFlags(flags, "CompileStoreMegamorphic");
PROFILE(isolate(),
- CodeCreateEvent(CALL_LOGGER_TAG(Code::ExtractKindFromFlags(flags),
- CALL_DEBUG_BREAK_TAG),
- *code, code->arguments_count()));
+ CodeCreateEvent(Logger::STORE_MEGAMORPHIC_TAG, *code, 0));
+ GDBJIT(AddCode(GDBJITInterface::STORE_IC, *code));
return code;
}
-Handle<Code> StubCompiler::CompileCallDebugPrepareStepIn(Code::Flags flags) {
- // Use the same code for the the step in preparations as we do for the
- // miss case.
- int argc = Code::ExtractArgumentsCountFromFlags(flags);
- Code::Kind kind = Code::ExtractKindFromFlags(flags);
- if (kind == Code::CALL_IC) {
- // For the debugger extra ic state is irrelevant.
- CallIC::GenerateMiss(masm(), argc, kNoExtraICState);
- } else {
- KeyedCallIC::GenerateMiss(masm(), argc);
- }
- Handle<Code> code = GetCodeWithFlags(flags, "CompileCallDebugPrepareStepIn");
- PROFILE(isolate(),
- CodeCreateEvent(
- CALL_LOGGER_TAG(kind, CALL_DEBUG_PREPARE_STEP_IN_TAG),
- *code,
- code->arguments_count()));
- return code;
-}
-#endif // ENABLE_DEBUGGER_SUPPORT
-
#undef CALL_LOGGER_TAG
@@ -1113,7 +696,7 @@ Handle<Code> StubCompiler::GetCodeWithFlags(Code::Flags flags,
Handle<Code> StubCompiler::GetCodeWithFlags(Code::Flags flags,
Handle<Name> name) {
return (FLAG_print_code_stubs && !name.is_null() && name->IsString())
- ? GetCodeWithFlags(flags, *Handle<String>::cast(name)->ToCString())
+ ? GetCodeWithFlags(flags, Handle<String>::cast(name)->ToCString().get())
: GetCodeWithFlags(flags, NULL);
}
@@ -1121,97 +704,31 @@ Handle<Code> StubCompiler::GetCodeWithFlags(Code::Flags flags,
void StubCompiler::LookupPostInterceptor(Handle<JSObject> holder,
Handle<Name> name,
LookupResult* lookup) {
- holder->LocalLookupRealNamedProperty(*name, lookup);
+ holder->LookupOwnRealNamedProperty(name, lookup);
if (lookup->IsFound()) return;
if (holder->GetPrototype()->IsNull()) return;
- holder->GetPrototype()->Lookup(*name, lookup);
+ holder->GetPrototype()->Lookup(name, lookup);
}
#define __ ACCESS_MASM(masm())
-CallKind CallStubCompiler::call_kind() {
- return CallICBase::Contextual::decode(extra_state())
- ? CALL_AS_FUNCTION
- : CALL_AS_METHOD;
-}
-
-
-void CallStubCompiler::HandlerFrontendFooter(Label* miss) {
- __ bind(miss);
- GenerateMissBranch();
-}
-
-
-void CallStubCompiler::GenerateJumpFunctionIgnoreReceiver(
- Handle<JSFunction> function) {
- ParameterCount expected(function);
- __ InvokeFunction(function, expected, arguments(),
- JUMP_FUNCTION, NullCallWrapper(), call_kind());
-}
-
-
-void CallStubCompiler::GenerateJumpFunction(Handle<Object> object,
- Handle<JSFunction> function) {
- PatchGlobalProxy(object);
- GenerateJumpFunctionIgnoreReceiver(function);
-}
-
-
-void CallStubCompiler::GenerateJumpFunction(Handle<Object> object,
- Register actual_closure,
- Handle<JSFunction> function) {
- PatchGlobalProxy(object);
- ParameterCount expected(function);
- __ InvokeFunction(actual_closure, expected, arguments(),
- JUMP_FUNCTION, NullCallWrapper(), call_kind());
-}
-
-
-Handle<Code> CallStubCompiler::CompileCallConstant(
- Handle<Object> object,
- Handle<JSObject> holder,
- Handle<Name> name,
- CheckType check,
- Handle<JSFunction> function) {
- if (HasCustomCallGenerator(function)) {
- Handle<Code> code = CompileCustomCall(object, holder,
- Handle<Cell>::null(),
- function, Handle<String>::cast(name),
- Code::FAST);
- // A null handle means bail out to the regular compiler code below.
- if (!code.is_null()) return code;
- }
-
- Label miss;
- HandlerFrontendHeader(object, holder, name, check, &miss);
- GenerateJumpFunction(object, function);
- HandlerFrontendFooter(&miss);
-
- // Return the generated code.
- return GetCode(function);
-}
-
-
Register LoadStubCompiler::HandlerFrontendHeader(
- Handle<Type> type,
+ Handle<HeapType> type,
Register object_reg,
Handle<JSObject> holder,
Handle<Name> name,
Label* miss) {
PrototypeCheckType check_type = CHECK_ALL_MAPS;
int function_index = -1;
- if (type->Is(Type::String())) {
+ if (type->Is(HeapType::String())) {
function_index = Context::STRING_FUNCTION_INDEX;
- } else if (type->Is(Type::Symbol())) {
+ } else if (type->Is(HeapType::Symbol())) {
function_index = Context::SYMBOL_FUNCTION_INDEX;
- } else if (type->Is(Type::Number())) {
+ } else if (type->Is(HeapType::Number())) {
function_index = Context::NUMBER_FUNCTION_INDEX;
- } else if (type->Is(Type::Boolean())) {
- // Booleans use the generic oddball map, so an additional check is needed to
- // ensure the receiver is really a boolean.
- GenerateBooleanCheck(object_reg, miss);
+ } else if (type->Is(HeapType::Boolean())) {
function_index = Context::BOOLEAN_FUNCTION_INDEX;
} else {
check_type = SKIP_RECEIVER;
@@ -1236,7 +753,7 @@ Register LoadStubCompiler::HandlerFrontendHeader(
// HandlerFrontend for store uses the name register. It has to be restored
// before a miss.
Register StoreStubCompiler::HandlerFrontendHeader(
- Handle<Type> type,
+ Handle<HeapType> type,
Register object_reg,
Handle<JSObject> holder,
Handle<Name> name,
@@ -1248,13 +765,13 @@ Register StoreStubCompiler::HandlerFrontendHeader(
bool BaseLoadStoreStubCompiler::IncludesNumberType(TypeHandleList* types) {
for (int i = 0; i < types->length(); ++i) {
- if (types->at(i)->Is(Type::Number())) return true;
+ if (types->at(i)->Is(HeapType::Number())) return true;
}
return false;
}
-Register BaseLoadStoreStubCompiler::HandlerFrontend(Handle<Type> type,
+Register BaseLoadStoreStubCompiler::HandlerFrontend(Handle<HeapType> type,
Register object_reg,
Handle<JSObject> holder,
Handle<Name> name) {
@@ -1268,7 +785,7 @@ Register BaseLoadStoreStubCompiler::HandlerFrontend(Handle<Type> type,
}
-void LoadStubCompiler::NonexistentHandlerFrontend(Handle<Type> type,
+void LoadStubCompiler::NonexistentHandlerFrontend(Handle<HeapType> type,
Handle<JSObject> last,
Handle<Name> name) {
Label miss;
@@ -1293,7 +810,7 @@ void LoadStubCompiler::NonexistentHandlerFrontend(Handle<Type> type,
name = factory()->InternalizeString(Handle<String>::cast(name));
}
ASSERT(last.is_null() ||
- last->property_dictionary()->FindEntry(*name) ==
+ last->property_dictionary()->FindEntry(name) ==
NameDictionary::kNotFound);
GenerateDictionaryNegativeLookup(masm(), &miss, holder, name,
scratch2(), scratch3());
@@ -1303,7 +820,7 @@ void LoadStubCompiler::NonexistentHandlerFrontend(Handle<Type> type,
// check that the global property cell is empty.
if (last_map->IsJSGlobalObjectMap()) {
Handle<JSGlobalObject> global = last.is_null()
- ? Handle<JSGlobalObject>::cast(type->AsConstant())
+ ? Handle<JSGlobalObject>::cast(type->AsConstant()->Value())
: Handle<JSGlobalObject>::cast(last);
GenerateCheckPropertyCell(masm(), global, name, scratch2(), &miss);
}
@@ -1313,27 +830,21 @@ void LoadStubCompiler::NonexistentHandlerFrontend(Handle<Type> type,
Handle<Code> LoadStubCompiler::CompileLoadField(
- Handle<Type> type,
+ Handle<HeapType> type,
Handle<JSObject> holder,
Handle<Name> name,
- PropertyIndex field,
+ FieldIndex field,
Representation representation) {
- Label miss;
-
- Register reg = HandlerFrontendHeader(type, receiver(), holder, name, &miss);
-
+ Register reg = HandlerFrontend(type, receiver(), holder, name);
GenerateLoadField(reg, holder, field, representation);
- __ bind(&miss);
- TailCallBuiltin(masm(), MissBuiltin(kind()));
-
// Return the generated code.
return GetCode(kind(), Code::FAST, name);
}
Handle<Code> LoadStubCompiler::CompileLoadConstant(
- Handle<Type> type,
+ Handle<HeapType> type,
Handle<JSObject> holder,
Handle<Name> name,
Handle<Object> value) {
@@ -1346,7 +857,7 @@ Handle<Code> LoadStubCompiler::CompileLoadConstant(
Handle<Code> LoadStubCompiler::CompileLoadCallback(
- Handle<Type> type,
+ Handle<HeapType> type,
Handle<JSObject> holder,
Handle<Name> name,
Handle<ExecutableAccessorInfo> callback) {
@@ -1360,22 +871,24 @@ Handle<Code> LoadStubCompiler::CompileLoadCallback(
Handle<Code> LoadStubCompiler::CompileLoadCallback(
- Handle<Type> type,
+ Handle<HeapType> type,
Handle<JSObject> holder,
Handle<Name> name,
const CallOptimization& call_optimization) {
ASSERT(call_optimization.is_simple_api_call());
Handle<JSFunction> callback = call_optimization.constant_function();
CallbackHandlerFrontend(type, receiver(), holder, name, callback);
- GenerateLoadCallback(call_optimization);
-
+ Handle<Map>receiver_map = IC::TypeToMap(*type, isolate());
+ GenerateFastApiCall(
+ masm(), call_optimization, receiver_map,
+ receiver(), scratch1(), false, 0, NULL);
// Return the generated code.
return GetCode(kind(), Code::FAST, name);
}
Handle<Code> LoadStubCompiler::CompileLoadInterceptor(
- Handle<Type> type,
+ Handle<HeapType> type,
Handle<JSObject> holder,
Handle<Name> name) {
LookupResult lookup(isolate());
@@ -1398,7 +911,7 @@ void LoadStubCompiler::GenerateLoadPostInterceptor(
LookupResult* lookup) {
Handle<JSObject> holder(lookup->holder());
if (lookup->IsField()) {
- PropertyIndex field = lookup->GetFieldIndex();
+ FieldIndex field = lookup->GetFieldIndex();
if (interceptor_holder.is_identical_to(holder)) {
GenerateLoadField(
interceptor_reg, holder, field, lookup->representation());
@@ -1428,7 +941,7 @@ void LoadStubCompiler::GenerateLoadPostInterceptor(
Handle<Code> BaseLoadStoreStubCompiler::CompileMonomorphicIC(
- Handle<Type> type,
+ Handle<HeapType> type,
Handle<Code> handler,
Handle<Name> name) {
TypeHandleList types(1);
@@ -1441,12 +954,12 @@ Handle<Code> BaseLoadStoreStubCompiler::CompileMonomorphicIC(
Handle<Code> LoadStubCompiler::CompileLoadViaGetter(
- Handle<Type> type,
+ Handle<HeapType> type,
Handle<JSObject> holder,
Handle<Name> name,
Handle<JSFunction> getter) {
HandlerFrontend(type, receiver(), holder, name);
- GenerateLoadViaGetter(masm(), receiver(), getter);
+ GenerateLoadViaGetter(masm(), type, receiver(), getter);
// Return the generated code.
return GetCode(kind(), Code::FAST, name);
@@ -1534,15 +1047,55 @@ Handle<Code> StoreStubCompiler::CompileStoreField(Handle<JSObject> object,
}
+Handle<Code> StoreStubCompiler::CompileStoreArrayLength(Handle<JSObject> object,
+ LookupResult* lookup,
+ Handle<Name> name) {
+ // This accepts as a receiver anything JSArray::SetElementsLength accepts
+ // (currently anything except for external arrays which means anything with
+ // elements of FixedArray type). Value must be a number, but only smis are
+ // accepted as the most common case.
+ Label miss;
+
+ // Check that value is a smi.
+ __ JumpIfNotSmi(value(), &miss);
+
+ // Generate tail call to StoreIC_ArrayLength.
+ GenerateStoreArrayLength();
+
+ // Handle miss case.
+ __ bind(&miss);
+ TailCallBuiltin(masm(), MissBuiltin(kind()));
+
+ // Return the generated code.
+ return GetCode(kind(), Code::FAST, name);
+}
+
+
Handle<Code> StoreStubCompiler::CompileStoreViaSetter(
Handle<JSObject> object,
Handle<JSObject> holder,
Handle<Name> name,
Handle<JSFunction> setter) {
+ Handle<HeapType> type = IC::CurrentTypeOf(object, isolate());
+ HandlerFrontend(type, receiver(), holder, name);
+ GenerateStoreViaSetter(masm(), type, receiver(), setter);
+
+ return GetCode(kind(), Code::FAST, name);
+}
+
+
+Handle<Code> StoreStubCompiler::CompileStoreCallback(
+ Handle<JSObject> object,
+ Handle<JSObject> holder,
+ Handle<Name> name,
+ const CallOptimization& call_optimization) {
HandlerFrontend(IC::CurrentTypeOf(object, isolate()),
receiver(), holder, name);
- GenerateStoreViaSetter(masm(), setter);
-
+ Register values[] = { value() };
+ GenerateFastApiCall(
+ masm(), call_optimization, handle(object->map()),
+ receiver(), scratch1(), true, 1, values);
+ // Return the generated code.
return GetCode(kind(), Code::FAST, name);
}
@@ -1551,15 +1104,17 @@ Handle<Code> KeyedLoadStubCompiler::CompileLoadElement(
Handle<Map> receiver_map) {
ElementsKind elements_kind = receiver_map->elements_kind();
if (receiver_map->has_fast_elements() ||
- receiver_map->has_external_array_elements()) {
+ receiver_map->has_external_array_elements() ||
+ receiver_map->has_fixed_typed_array_elements()) {
Handle<Code> stub = KeyedLoadFastElementStub(
+ isolate(),
receiver_map->instance_type() == JS_ARRAY_TYPE,
- elements_kind).GetCode(isolate());
+ elements_kind).GetCode();
__ DispatchMap(receiver(), scratch1(), receiver_map, stub, DO_SMI_CHECK);
} else {
Handle<Code> stub = FLAG_compiled_keyed_dictionary_loads
- ? KeyedLoadDictionaryElementStub().GetCode(isolate())
- : KeyedLoadDictionaryElementPlatformStub().GetCode(isolate());
+ ? KeyedLoadDictionaryElementStub(isolate()).GetCode()
+ : KeyedLoadDictionaryElementPlatformStub(isolate()).GetCode();
__ DispatchMap(receiver(), scratch1(), receiver_map, stub, DO_SMI_CHECK);
}
@@ -1576,15 +1131,18 @@ Handle<Code> KeyedStoreStubCompiler::CompileStoreElement(
bool is_jsarray = receiver_map->instance_type() == JS_ARRAY_TYPE;
Handle<Code> stub;
if (receiver_map->has_fast_elements() ||
- receiver_map->has_external_array_elements()) {
+ receiver_map->has_external_array_elements() ||
+ receiver_map->has_fixed_typed_array_elements()) {
stub = KeyedStoreFastElementStub(
+ isolate(),
is_jsarray,
elements_kind,
- store_mode()).GetCode(isolate());
+ store_mode()).GetCode();
} else {
- stub = KeyedStoreElementStub(is_jsarray,
+ stub = KeyedStoreElementStub(isolate(),
+ is_jsarray,
elements_kind,
- store_mode()).GetCode(isolate());
+ store_mode()).GetCode();
}
__ DispatchMap(receiver(), scratch1(), receiver_map, stub, DO_SMI_CHECK);
@@ -1641,6 +1199,7 @@ Handle<Code> BaseLoadStoreStubCompiler::GetICCode(Code::Kind kind,
InlineCacheState state) {
Code::Flags flags = Code::ComputeFlags(kind, state, extra_state(), type);
Handle<Code> code = GetCodeWithFlags(flags, name);
+ IC::RegisterWeakMapDependency(code);
PROFILE(isolate(), CodeCreateEvent(log_kind(code), *code, *name));
JitEvent(name, code);
return code;
@@ -1650,8 +1209,8 @@ Handle<Code> BaseLoadStoreStubCompiler::GetICCode(Code::Kind kind,
Handle<Code> BaseLoadStoreStubCompiler::GetCode(Code::Kind kind,
Code::StubType type,
Handle<Name> name) {
- Code::Flags flags = Code::ComputeFlags(
- Code::HANDLER, MONOMORPHIC, extra_state(), type, kind, cache_holder_);
+ ASSERT_EQ(kNoExtraICState, extra_state());
+ Code::Flags flags = Code::ComputeHandlerFlags(kind, type, cache_holder_);
Handle<Code> code = GetCodeWithFlags(flags, name);
PROFILE(isolate(), CodeCreateEvent(log_kind(code), *code, *name));
JitEvent(name, code);
@@ -1674,13 +1233,18 @@ void KeyedLoadStubCompiler::CompileElementHandlers(MapHandleList* receiver_maps,
ElementsKind elements_kind = receiver_map->elements_kind();
if (IsFastElementsKind(elements_kind) ||
- IsExternalArrayElementsKind(elements_kind)) {
+ IsExternalArrayElementsKind(elements_kind) ||
+ IsFixedTypedArrayElementsKind(elements_kind)) {
cached_stub =
- KeyedLoadFastElementStub(is_js_array,
- elements_kind).GetCode(isolate());
+ KeyedLoadFastElementStub(isolate(),
+ is_js_array,
+ elements_kind).GetCode();
+ } else if (elements_kind == SLOPPY_ARGUMENTS_ELEMENTS) {
+ cached_stub = isolate()->builtins()->KeyedLoadIC_SloppyArguments();
} else {
ASSERT(elements_kind == DICTIONARY_ELEMENTS);
- cached_stub = KeyedLoadDictionaryElementStub().GetCode(isolate());
+ cached_stub =
+ KeyedLoadDictionaryElementStub(isolate()).GetCode();
}
}
@@ -1709,24 +1273,28 @@ Handle<Code> KeyedStoreStubCompiler::CompileStoreElementPolymorphic(
ElementsKind elements_kind = receiver_map->elements_kind();
if (!transitioned_map.is_null()) {
cached_stub = ElementsTransitionAndStoreStub(
+ isolate(),
elements_kind,
transitioned_map->elements_kind(),
is_js_array,
- store_mode()).GetCode(isolate());
+ store_mode()).GetCode();
} else if (receiver_map->instance_type() < FIRST_JS_RECEIVER_TYPE) {
cached_stub = isolate()->builtins()->KeyedStoreIC_Slow();
} else {
if (receiver_map->has_fast_elements() ||
- receiver_map->has_external_array_elements()) {
+ receiver_map->has_external_array_elements() ||
+ receiver_map->has_fixed_typed_array_elements()) {
cached_stub = KeyedStoreFastElementStub(
+ isolate(),
is_js_array,
elements_kind,
- store_mode()).GetCode(isolate());
+ store_mode()).GetCode();
} else {
cached_stub = KeyedStoreElementStub(
+ isolate(),
is_js_array,
elements_kind,
- store_mode()).GetCode(isolate());
+ store_mode()).GetCode();
}
}
ASSERT(!cached_stub.is_null());
@@ -1748,95 +1316,6 @@ void KeyedStoreStubCompiler::GenerateStoreDictionaryElement(
}
-CallStubCompiler::CallStubCompiler(Isolate* isolate,
- int argc,
- Code::Kind kind,
- ExtraICState extra_state,
- InlineCacheHolderFlag cache_holder)
- : StubCompiler(isolate, extra_state),
- arguments_(argc),
- kind_(kind),
- cache_holder_(cache_holder) {
-}
-
-
-bool CallStubCompiler::HasCustomCallGenerator(Handle<JSFunction> function) {
- if (function->shared()->HasBuiltinFunctionId()) {
- BuiltinFunctionId id = function->shared()->builtin_function_id();
-#define CALL_GENERATOR_CASE(name) if (id == k##name) return true;
- CUSTOM_CALL_IC_GENERATORS(CALL_GENERATOR_CASE)
-#undef CALL_GENERATOR_CASE
- }
-
- CallOptimization optimization(function);
- return optimization.is_simple_api_call();
-}
-
-
-bool CallStubCompiler::CanBeCached(Handle<JSFunction> function) {
- if (function->shared()->HasBuiltinFunctionId()) {
- BuiltinFunctionId id = function->shared()->builtin_function_id();
-#define CALL_GENERATOR_CASE(name) if (id == k##name) return false;
- SITE_SPECIFIC_CALL_GENERATORS(CALL_GENERATOR_CASE)
-#undef CALL_GENERATOR_CASE
- }
-
- return true;
-}
-
-
-Handle<Code> CallStubCompiler::CompileCustomCall(
- Handle<Object> object,
- Handle<JSObject> holder,
- Handle<Cell> cell,
- Handle<JSFunction> function,
- Handle<String> fname,
- Code::StubType type) {
- ASSERT(HasCustomCallGenerator(function));
-
- if (function->shared()->HasBuiltinFunctionId()) {
- BuiltinFunctionId id = function->shared()->builtin_function_id();
-#define CALL_GENERATOR_CASE(name) \
- if (id == k##name) { \
- return CallStubCompiler::Compile##name##Call(object, \
- holder, \
- cell, \
- function, \
- fname, \
- type); \
- }
- CUSTOM_CALL_IC_GENERATORS(CALL_GENERATOR_CASE)
-#undef CALL_GENERATOR_CASE
- }
- CallOptimization optimization(function);
- ASSERT(optimization.is_simple_api_call());
- return CompileFastApiCall(optimization,
- object,
- holder,
- cell,
- function,
- fname);
-}
-
-
-Handle<Code> CallStubCompiler::GetCode(Code::StubType type,
- Handle<Name> name) {
- int argc = arguments_.immediate();
- Code::Flags flags = Code::ComputeMonomorphicFlags(
- kind_, extra_state(), cache_holder_, type, argc);
- return GetCodeWithFlags(flags, name);
-}
-
-
-Handle<Code> CallStubCompiler::GetCode(Handle<JSFunction> function) {
- Handle<String> function_name;
- if (function->shared()->name()->IsString()) {
- function_name = Handle<String>(String::cast(function->shared()->name()));
- }
- return GetCode(Code::FAST, function_name);
-}
-
-
CallOptimization::CallOptimization(LookupResult* lookup) {
if (lookup->IsFound() &&
lookup->IsCacheable() &&
@@ -1854,20 +1333,63 @@ CallOptimization::CallOptimization(Handle<JSFunction> function) {
}
-int CallOptimization::GetPrototypeDepthOfExpectedType(
- Handle<JSObject> object,
- Handle<JSObject> holder) const {
+Handle<JSObject> CallOptimization::LookupHolderOfExpectedType(
+ Handle<Map> object_map,
+ HolderLookup* holder_lookup) const {
+ ASSERT(is_simple_api_call());
+ if (!object_map->IsJSObjectMap()) {
+ *holder_lookup = kHolderNotFound;
+ return Handle<JSObject>::null();
+ }
+ if (expected_receiver_type_.is_null() ||
+ expected_receiver_type_->IsTemplateFor(*object_map)) {
+ *holder_lookup = kHolderIsReceiver;
+ return Handle<JSObject>::null();
+ }
+ while (true) {
+ if (!object_map->prototype()->IsJSObject()) break;
+ Handle<JSObject> prototype(JSObject::cast(object_map->prototype()));
+ if (!prototype->map()->is_hidden_prototype()) break;
+ object_map = handle(prototype->map());
+ if (expected_receiver_type_->IsTemplateFor(*object_map)) {
+ *holder_lookup = kHolderFound;
+ return prototype;
+ }
+ }
+ *holder_lookup = kHolderNotFound;
+ return Handle<JSObject>::null();
+}
+
+
+bool CallOptimization::IsCompatibleReceiver(Handle<Object> receiver,
+ Handle<JSObject> holder) const {
ASSERT(is_simple_api_call());
- if (expected_receiver_type_.is_null()) return 0;
- int depth = 0;
- while (!object.is_identical_to(holder)) {
- if (expected_receiver_type_->IsTemplateFor(object->map())) return depth;
- object = Handle<JSObject>(JSObject::cast(object->GetPrototype()));
- if (!object->map()->is_hidden_prototype()) return kInvalidProtoDepth;
- ++depth;
+ if (!receiver->IsJSObject()) return false;
+ Handle<Map> map(JSObject::cast(*receiver)->map());
+ HolderLookup holder_lookup;
+ Handle<JSObject> api_holder =
+ LookupHolderOfExpectedType(map, &holder_lookup);
+ switch (holder_lookup) {
+ case kHolderNotFound:
+ return false;
+ case kHolderIsReceiver:
+ return true;
+ case kHolderFound:
+ if (api_holder.is_identical_to(holder)) return true;
+ // Check if holder is in prototype chain of api_holder.
+ {
+ JSObject* object = *api_holder;
+ while (true) {
+ Object* prototype = object->map()->prototype();
+ if (!prototype->IsJSObject()) return false;
+ if (prototype == *holder) return true;
+ object = JSObject::cast(prototype);
+ }
+ }
+ break;
}
- if (expected_receiver_type_->IsTemplateFor(holder->map())) return depth;
- return kInvalidProtoDepth;
+ UNREACHABLE();
+ return false;
}
diff --git a/chromium/v8/src/stub-cache.h b/chromium/v8/src/stub-cache.h
index ebf0bd3c917..9f2a87b4683 100644
--- a/chromium/v8/src/stub-cache.h
+++ b/chromium/v8/src/stub-cache.h
@@ -1,40 +1,17 @@
// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
#ifndef V8_STUB_CACHE_H_
#define V8_STUB_CACHE_H_
-#include "allocation.h"
-#include "arguments.h"
-#include "code-stubs.h"
-#include "ic-inl.h"
-#include "macro-assembler.h"
-#include "objects.h"
-#include "zone-inl.h"
+#include "src/allocation.h"
+#include "src/arguments.h"
+#include "src/code-stubs.h"
+#include "src/ic-inl.h"
+#include "src/macro-assembler.h"
+#include "src/objects.h"
+#include "src/zone-inl.h"
namespace v8 {
namespace internal {
@@ -89,103 +66,50 @@ class StubCache {
Handle<Code> FindHandler(Handle<Name> name,
Handle<Map> map,
Code::Kind kind,
- InlineCacheHolderFlag cache_holder = OWN_MAP);
+ InlineCacheHolderFlag cache_holder,
+ Code::StubType type);
- Handle<Code> ComputeMonomorphicIC(Handle<Name> name,
- Handle<Type> type,
+ Handle<Code> ComputeMonomorphicIC(Code::Kind kind,
+ Handle<Name> name,
+ Handle<HeapType> type,
Handle<Code> handler,
ExtraICState extra_ic_state);
- Handle<Code> ComputeLoadNonexistent(Handle<Name> name, Handle<Type> type);
+ Handle<Code> ComputeLoadNonexistent(Handle<Name> name, Handle<HeapType> type);
Handle<Code> ComputeKeyedLoadElement(Handle<Map> receiver_map);
Handle<Code> ComputeKeyedStoreElement(Handle<Map> receiver_map,
- StrictModeFlag strict_mode,
+ StrictMode strict_mode,
KeyedAccessStoreMode store_mode);
- Handle<Code> ComputeCallField(int argc,
- Code::Kind,
- ExtraICState extra_state,
- Handle<Name> name,
- Handle<Object> object,
- Handle<JSObject> holder,
- PropertyIndex index);
-
- Handle<Code> ComputeCallConstant(int argc,
- Code::Kind,
- ExtraICState extra_state,
- Handle<Name> name,
- Handle<Object> object,
- Handle<JSObject> holder,
- Handle<JSFunction> function);
-
- Handle<Code> ComputeCallInterceptor(int argc,
- Code::Kind,
- ExtraICState extra_state,
- Handle<Name> name,
- Handle<Object> object,
- Handle<JSObject> holder);
-
- Handle<Code> ComputeCallGlobal(int argc,
- Code::Kind,
- ExtraICState extra_state,
- Handle<Name> name,
- Handle<JSObject> object,
- Handle<GlobalObject> holder,
- Handle<PropertyCell> cell,
- Handle<JSFunction> function);
-
// ---
- Handle<Code> ComputeCallInitialize(int argc, RelocInfo::Mode mode);
-
- Handle<Code> ComputeKeyedCallInitialize(int argc);
-
- Handle<Code> ComputeCallPreMonomorphic(int argc,
- Code::Kind kind,
- ExtraICState extra_state);
-
- Handle<Code> ComputeCallNormal(int argc,
- Code::Kind kind,
- ExtraICState state);
-
- Handle<Code> ComputeCallArguments(int argc);
-
- Handle<Code> ComputeCallMegamorphic(int argc,
- Code::Kind kind,
- ExtraICState state);
-
- Handle<Code> ComputeCallMiss(int argc,
- Code::Kind kind,
- ExtraICState state);
+ Handle<Code> ComputeLoad(InlineCacheState ic_state, ExtraICState extra_state);
+ Handle<Code> ComputeStore(InlineCacheState ic_state,
+ ExtraICState extra_state);
// ---
Handle<Code> ComputeCompareNil(Handle<Map> receiver_map,
- CompareNilICStub& stub);
+ CompareNilICStub* stub);
// ---
Handle<Code> ComputeLoadElementPolymorphic(MapHandleList* receiver_maps);
Handle<Code> ComputeStoreElementPolymorphic(MapHandleList* receiver_maps,
KeyedAccessStoreMode store_mode,
- StrictModeFlag strict_mode);
+ StrictMode strict_mode);
- Handle<Code> ComputePolymorphicIC(TypeHandleList* types,
+ Handle<Code> ComputePolymorphicIC(Code::Kind kind,
+ TypeHandleList* types,
CodeHandleList* handlers,
int number_of_valid_maps,
Handle<Name> name,
ExtraICState extra_ic_state);
// Finds the Code object stored in the Heap::non_monomorphic_cache().
- Code* FindCallInitialize(int argc, RelocInfo::Mode mode, Code::Kind kind);
-
-#ifdef ENABLE_DEBUGGER_SUPPORT
- Handle<Code> ComputeCallDebugBreak(int argc, Code::Kind kind);
-
- Handle<Code> ComputeCallDebugPrepareStepIn(int argc, Code::Kind kind);
-#endif
+ Code* FindPreMonomorphicIC(Code::Kind kind, ExtraICState extra_ic_state);
// Update cache for entry hash(name, map).
Code* Set(Name* name, Map* map, Code* code);
@@ -262,10 +186,6 @@ class StubCache {
private:
explicit StubCache(Isolate* isolate);
- Handle<Code> ComputeCallInitialize(int argc,
- RelocInfo::Mode mode,
- Code::Kind kind);
-
// The stub cache has a primary and secondary level. The two levels have
// different hashing algorithms in order to avoid simultaneous collisions
// in both caches. Unlike a probing strategy (quadratic or otherwise) the
@@ -345,16 +265,14 @@ class StubCache {
// Support functions for IC stubs for callbacks.
-DECLARE_RUNTIME_FUNCTION(MaybeObject*, StoreCallbackProperty);
+DECLARE_RUNTIME_FUNCTION(StoreCallbackProperty);
// Support functions for IC stubs for interceptors.
-DECLARE_RUNTIME_FUNCTION(MaybeObject*, LoadPropertyWithInterceptorOnly);
-DECLARE_RUNTIME_FUNCTION(MaybeObject*, LoadPropertyWithInterceptorForLoad);
-DECLARE_RUNTIME_FUNCTION(MaybeObject*, LoadPropertyWithInterceptorForCall);
-DECLARE_RUNTIME_FUNCTION(MaybeObject*, StoreInterceptorProperty);
-DECLARE_RUNTIME_FUNCTION(MaybeObject*, CallInterceptorProperty);
-DECLARE_RUNTIME_FUNCTION(MaybeObject*, KeyedLoadPropertyWithInterceptor);
+DECLARE_RUNTIME_FUNCTION(LoadPropertyWithInterceptorOnly);
+DECLARE_RUNTIME_FUNCTION(LoadPropertyWithInterceptor);
+DECLARE_RUNTIME_FUNCTION(StoreInterceptorProperty);
+DECLARE_RUNTIME_FUNCTION(KeyedLoadPropertyWithInterceptor);
enum PrototypeCheckType { CHECK_ALL_MAPS, SKIP_RECEIVER };
@@ -367,21 +285,16 @@ class StubCompiler BASE_EMBEDDED {
explicit StubCompiler(Isolate* isolate,
ExtraICState extra_ic_state = kNoExtraICState)
: isolate_(isolate), extra_ic_state_(extra_ic_state),
- masm_(isolate, NULL, 256), failure_(NULL) { }
-
- // Functions to compile either CallIC or KeyedCallIC. The specific kind
- // is extracted from the code flags.
- Handle<Code> CompileCallInitialize(Code::Flags flags);
- Handle<Code> CompileCallPreMonomorphic(Code::Flags flags);
- Handle<Code> CompileCallNormal(Code::Flags flags);
- Handle<Code> CompileCallMegamorphic(Code::Flags flags);
- Handle<Code> CompileCallArguments(Code::Flags flags);
- Handle<Code> CompileCallMiss(Code::Flags flags);
-
-#ifdef ENABLE_DEBUGGER_SUPPORT
- Handle<Code> CompileCallDebugBreak(Code::Flags flags);
- Handle<Code> CompileCallDebugPrepareStepIn(Code::Flags flags);
-#endif
+ masm_(isolate, NULL, 256) { }
+
+ Handle<Code> CompileLoadInitialize(Code::Flags flags);
+ Handle<Code> CompileLoadPreMonomorphic(Code::Flags flags);
+ Handle<Code> CompileLoadMegamorphic(Code::Flags flags);
+
+ Handle<Code> CompileStoreInitialize(Code::Flags flags);
+ Handle<Code> CompileStorePreMonomorphic(Code::Flags flags);
+ Handle<Code> CompileStoreGeneric(Code::Flags flags);
+ Handle<Code> CompileStoreMegamorphic(Code::Flags flags);
// Static functions for generating parts of stubs.
static void GenerateLoadGlobalFunctionPrototype(MacroAssembler* masm,
@@ -423,12 +336,6 @@ class StubCompiler BASE_EMBEDDED {
Register scratch,
Label* miss_label);
- static void GenerateLoadStringLength(MacroAssembler* masm,
- Register receiver,
- Register scratch1,
- Register scratch2,
- Label* miss_label);
-
static void GenerateLoadFunctionPrototype(MacroAssembler* masm,
Register receiver,
Register scratch1,
@@ -458,34 +365,24 @@ class StubCompiler BASE_EMBEDDED {
// register is only clobbered if it the same as the holder register. The
// function returns a register containing the holder - either object_reg or
// holder_reg.
- // The function can optionally (when save_at_depth !=
- // kInvalidProtoDepth) save the object at the given depth by moving
- // it to [esp + kPointerSize].
- Register CheckPrototypes(Handle<Type> type,
- Register object_reg,
- Handle<JSObject> holder,
- Register holder_reg,
- Register scratch1,
- Register scratch2,
- Handle<Name> name,
- Label* miss,
- PrototypeCheckType check = CHECK_ALL_MAPS) {
- return CheckPrototypes(type, object_reg, holder, holder_reg, scratch1,
- scratch2, name, kInvalidProtoDepth, miss, check);
- }
-
- Register CheckPrototypes(Handle<Type> type,
+ Register CheckPrototypes(Handle<HeapType> type,
Register object_reg,
Handle<JSObject> holder,
Register holder_reg,
Register scratch1,
Register scratch2,
Handle<Name> name,
- int save_at_depth,
Label* miss,
PrototypeCheckType check = CHECK_ALL_MAPS);
- void GenerateBooleanCheck(Register object, Label* miss);
+ static void GenerateFastApiCall(MacroAssembler* masm,
+ const CallOptimization& optimization,
+ Handle<Map> receiver_map,
+ Register receiver,
+ Register scratch,
+ bool is_store,
+ int argc,
+ Register* values);
protected:
Handle<Code> GetCodeWithFlags(Code::Flags flags, const char* name);
@@ -494,7 +391,6 @@ class StubCompiler BASE_EMBEDDED {
ExtraICState extra_state() { return extra_ic_state_; }
MacroAssembler* masm() { return &masm_; }
- void set_failure(Failure* failure) { failure_ = failure; }
static void LookupPostInterceptor(Handle<JSObject> holder,
Handle<Name> name,
@@ -510,7 +406,6 @@ class StubCompiler BASE_EMBEDDED {
Isolate* isolate_;
const ExtraICState extra_ic_state_;
MacroAssembler masm_;
- Failure* failure_;
};
@@ -530,7 +425,7 @@ class BaseLoadStoreStubCompiler: public StubCompiler {
}
virtual ~BaseLoadStoreStubCompiler() { }
- Handle<Code> CompileMonomorphicIC(Handle<Type> type,
+ Handle<Code> CompileMonomorphicIC(Handle<HeapType> type,
Handle<Code> handler,
Handle<Name> name);
@@ -540,10 +435,6 @@ class BaseLoadStoreStubCompiler: public StubCompiler {
Code::StubType type,
IcCheckType check);
- virtual void GenerateNameCheck(Handle<Name> name,
- Register name_reg,
- Label* miss) { }
-
static Builtins::Name MissBuiltin(Code::Kind kind) {
switch (kind) {
case Code::LOAD_IC: return Builtins::kLoadIC_Miss;
@@ -556,7 +447,7 @@ class BaseLoadStoreStubCompiler: public StubCompiler {
}
protected:
- virtual Register HandlerFrontendHeader(Handle<Type> type,
+ virtual Register HandlerFrontendHeader(Handle<HeapType> type,
Register object_reg,
Handle<JSObject> holder,
Handle<Name> name,
@@ -564,7 +455,7 @@ class BaseLoadStoreStubCompiler: public StubCompiler {
virtual void HandlerFrontendFooter(Handle<Name> name, Label* miss) = 0;
- Register HandlerFrontend(Handle<Type> type,
+ Register HandlerFrontend(Handle<HeapType> type,
Register object_reg,
Handle<JSObject> holder,
Handle<Name> name);
@@ -597,11 +488,11 @@ class BaseLoadStoreStubCompiler: public StubCompiler {
}
void JitEvent(Handle<Name> name, Handle<Code> code);
- virtual Register receiver() = 0;
- virtual Register name() = 0;
- virtual Register scratch1() = 0;
- virtual Register scratch2() = 0;
- virtual Register scratch3() = 0;
+ Register receiver() { return registers_[0]; }
+ Register name() { return registers_[1]; }
+ Register scratch1() { return registers_[2]; }
+ Register scratch2() { return registers_[3]; }
+ Register scratch3() { return registers_[4]; }
void InitializeRegisters();
@@ -623,54 +514,62 @@ class LoadStubCompiler: public BaseLoadStoreStubCompiler {
cache_holder) { }
virtual ~LoadStubCompiler() { }
- Handle<Code> CompileLoadField(Handle<Type> type,
+ Handle<Code> CompileLoadField(Handle<HeapType> type,
Handle<JSObject> holder,
Handle<Name> name,
- PropertyIndex index,
+ FieldIndex index,
Representation representation);
- Handle<Code> CompileLoadCallback(Handle<Type> type,
+ Handle<Code> CompileLoadCallback(Handle<HeapType> type,
Handle<JSObject> holder,
Handle<Name> name,
Handle<ExecutableAccessorInfo> callback);
- Handle<Code> CompileLoadCallback(Handle<Type> type,
+ Handle<Code> CompileLoadCallback(Handle<HeapType> type,
Handle<JSObject> holder,
Handle<Name> name,
const CallOptimization& call_optimization);
- Handle<Code> CompileLoadConstant(Handle<Type> type,
+ Handle<Code> CompileLoadConstant(Handle<HeapType> type,
Handle<JSObject> holder,
Handle<Name> name,
Handle<Object> value);
- Handle<Code> CompileLoadInterceptor(Handle<Type> type,
+ Handle<Code> CompileLoadInterceptor(Handle<HeapType> type,
Handle<JSObject> holder,
Handle<Name> name);
- Handle<Code> CompileLoadViaGetter(Handle<Type> type,
+ Handle<Code> CompileLoadViaGetter(Handle<HeapType> type,
Handle<JSObject> holder,
Handle<Name> name,
Handle<JSFunction> getter);
static void GenerateLoadViaGetter(MacroAssembler* masm,
+ Handle<HeapType> type,
Register receiver,
Handle<JSFunction> getter);
- Handle<Code> CompileLoadNonexistent(Handle<Type> type,
+ static void GenerateLoadViaGetterForDeopt(MacroAssembler* masm) {
+ GenerateLoadViaGetter(
+ masm, Handle<HeapType>::null(), no_reg, Handle<JSFunction>());
+ }
+
+ Handle<Code> CompileLoadNonexistent(Handle<HeapType> type,
Handle<JSObject> last,
Handle<Name> name);
- Handle<Code> CompileLoadGlobal(Handle<Type> type,
+ Handle<Code> CompileLoadGlobal(Handle<HeapType> type,
Handle<GlobalObject> holder,
Handle<PropertyCell> cell,
Handle<Name> name,
bool is_dont_delete);
- static Register* registers();
-
protected:
- virtual Register HandlerFrontendHeader(Handle<Type> type,
+ ContextualMode contextual_mode() {
+ return LoadIC::GetContextualMode(extra_state());
+ }
+
+ virtual Register HandlerFrontendHeader(Handle<HeapType> type,
Register object_reg,
Handle<JSObject> holder,
Handle<Name> name,
@@ -678,23 +577,24 @@ class LoadStubCompiler: public BaseLoadStoreStubCompiler {
virtual void HandlerFrontendFooter(Handle<Name> name, Label* miss);
- Register CallbackHandlerFrontend(Handle<Type> type,
+ Register CallbackHandlerFrontend(Handle<HeapType> type,
Register object_reg,
Handle<JSObject> holder,
Handle<Name> name,
Handle<Object> callback);
- void NonexistentHandlerFrontend(Handle<Type> type,
+ void NonexistentHandlerFrontend(Handle<HeapType> type,
Handle<JSObject> last,
Handle<Name> name);
void GenerateLoadField(Register reg,
Handle<JSObject> holder,
- PropertyIndex field,
+ FieldIndex field,
Representation representation);
void GenerateLoadConstant(Handle<Object> value);
void GenerateLoadCallback(Register reg,
Handle<ExecutableAccessorInfo> callback);
- void GenerateLoadCallback(const CallOptimization& call_optimization);
+ void GenerateLoadCallback(const CallOptimization& call_optimization,
+ Handle<Map> receiver_map);
void GenerateLoadInterceptor(Register holder_reg,
Handle<Object> object,
Handle<JSObject> holder,
@@ -705,12 +605,10 @@ class LoadStubCompiler: public BaseLoadStoreStubCompiler {
Handle<Name> name,
LookupResult* lookup);
- virtual Register receiver() { return registers_[0]; }
- virtual Register name() { return registers_[1]; }
- virtual Register scratch1() { return registers_[2]; }
- virtual Register scratch2() { return registers_[3]; }
- virtual Register scratch3() { return registers_[4]; }
+ private:
+ static Register* registers();
Register scratch4() { return registers_[5]; }
+ friend class BaseLoadStoreStubCompiler;
};
@@ -729,13 +627,8 @@ class KeyedLoadStubCompiler: public LoadStubCompiler {
static void GenerateLoadDictionaryElement(MacroAssembler* masm);
- protected:
- static Register* registers();
-
private:
- virtual void GenerateNameCheck(Handle<Name> name,
- Register name_reg,
- Label* miss);
+ static Register* registers();
friend class BaseLoadStoreStubCompiler;
};
@@ -758,6 +651,12 @@ class StoreStubCompiler: public BaseLoadStoreStubCompiler {
LookupResult* lookup,
Handle<Name> name);
+ Handle<Code> CompileStoreArrayLength(Handle<JSObject> object,
+ LookupResult* lookup,
+ Handle<Name> name);
+
+ void GenerateStoreArrayLength();
+
void GenerateNegativeHolderLookup(MacroAssembler* masm,
Handle<JSObject> holder,
Register holder_reg,
@@ -799,8 +698,15 @@ class StoreStubCompiler: public BaseLoadStoreStubCompiler {
const CallOptimization& call_optimization);
static void GenerateStoreViaSetter(MacroAssembler* masm,
+ Handle<HeapType> type,
+ Register receiver,
Handle<JSFunction> setter);
+ static void GenerateStoreViaSetterForDeopt(MacroAssembler* masm) {
+ GenerateStoreViaSetter(
+ masm, Handle<HeapType>::null(), no_reg, Handle<JSFunction>());
+ }
+
Handle<Code> CompileStoreViaSetter(Handle<JSObject> object,
Handle<JSObject> holder,
Handle<Name> name,
@@ -819,7 +725,7 @@ class StoreStubCompiler: public BaseLoadStoreStubCompiler {
}
protected:
- virtual Register HandlerFrontendHeader(Handle<Type> type,
+ virtual Register HandlerFrontendHeader(Handle<HeapType> type,
Register object_reg,
Handle<JSObject> holder,
Handle<Name> name,
@@ -830,17 +736,9 @@ class StoreStubCompiler: public BaseLoadStoreStubCompiler {
Label* label,
Handle<Name> name);
- virtual Register receiver() { return registers_[0]; }
- virtual Register name() { return registers_[1]; }
- Register value() { return registers_[2]; }
- virtual Register scratch1() { return registers_[3]; }
- virtual Register scratch2() { return registers_[4]; }
- virtual Register scratch3() { return registers_[5]; }
-
- protected:
- static Register* registers();
-
private:
+ static Register* registers();
+ static Register value();
friend class BaseLoadStoreStubCompiler;
};
@@ -861,152 +759,19 @@ class KeyedStoreStubCompiler: public StoreStubCompiler {
static void GenerateStoreDictionaryElement(MacroAssembler* masm);
- protected:
+ private:
static Register* registers();
KeyedAccessStoreMode store_mode() {
return KeyedStoreIC::GetKeyedAccessStoreMode(extra_state());
}
- private:
- Register transition_map() {
- return registers()[3];
- }
+ Register transition_map() { return scratch1(); }
- virtual void GenerateNameCheck(Handle<Name> name,
- Register name_reg,
- Label* miss);
friend class BaseLoadStoreStubCompiler;
};
-// Subset of FUNCTIONS_WITH_ID_LIST with custom constant/global call
-// IC stubs.
-#define CUSTOM_CALL_IC_GENERATORS(V) \
- V(ArrayPush) \
- V(ArrayPop) \
- V(StringCharCodeAt) \
- V(StringCharAt) \
- V(StringFromCharCode) \
- V(MathFloor) \
- V(MathAbs) \
- V(ArrayCode)
-
-
-#define SITE_SPECIFIC_CALL_GENERATORS(V) \
- V(ArrayCode)
-
-
-class CallStubCompiler: public StubCompiler {
- public:
- CallStubCompiler(Isolate* isolate,
- int argc,
- Code::Kind kind,
- ExtraICState extra_state,
- InlineCacheHolderFlag cache_holder = OWN_MAP);
-
- Handle<Code> CompileCallField(Handle<JSObject> object,
- Handle<JSObject> holder,
- PropertyIndex index,
- Handle<Name> name);
-
- // Patch the global proxy over the global object if the global object is the
- // receiver.
- void PatchGlobalProxy(Handle<Object> object);
-
- // Returns the register containing the holder of |name|.
- Register HandlerFrontendHeader(Handle<Object> object,
- Handle<JSObject> holder,
- Handle<Name> name,
- CheckType check,
- Label* miss);
- void HandlerFrontendFooter(Label* miss);
-
- void GenerateJumpFunctionIgnoreReceiver(Handle<JSFunction> function);
- void GenerateJumpFunction(Handle<Object> object,
- Handle<JSFunction> function);
- void GenerateJumpFunction(Handle<Object> object,
- Register function,
- Label* miss);
- // Use to call |actual_closure|, a closure with the same shared function info
- // as |function|.
- void GenerateJumpFunction(Handle<Object> object,
- Register actual_closure,
- Handle<JSFunction> function);
-
- Handle<Code> CompileCallConstant(Handle<Object> object,
- Handle<JSObject> holder,
- Handle<Name> name,
- CheckType check,
- Handle<JSFunction> function);
-
- Handle<Code> CompileCallInterceptor(Handle<JSObject> object,
- Handle<JSObject> holder,
- Handle<Name> name);
-
- Handle<Code> CompileCallGlobal(Handle<JSObject> object,
- Handle<GlobalObject> holder,
- Handle<PropertyCell> cell,
- Handle<JSFunction> function,
- Handle<Name> name);
-
- static bool HasCustomCallGenerator(Handle<JSFunction> function);
- static bool CanBeCached(Handle<JSFunction> function);
-
- private:
- // Compiles a custom call constant/global IC. For constant calls cell is
- // NULL. Returns an empty handle if there is no custom call code for the
- // given function.
- Handle<Code> CompileCustomCall(Handle<Object> object,
- Handle<JSObject> holder,
- Handle<Cell> cell,
- Handle<JSFunction> function,
- Handle<String> name,
- Code::StubType type);
-
-#define DECLARE_CALL_GENERATOR(name) \
- Handle<Code> Compile##name##Call(Handle<Object> object, \
- Handle<JSObject> holder, \
- Handle<Cell> cell, \
- Handle<JSFunction> function, \
- Handle<String> fname, \
- Code::StubType type);
- CUSTOM_CALL_IC_GENERATORS(DECLARE_CALL_GENERATOR)
-#undef DECLARE_CALL_GENERATOR
-
- Handle<Code> CompileFastApiCall(const CallOptimization& optimization,
- Handle<Object> object,
- Handle<JSObject> holder,
- Handle<Cell> cell,
- Handle<JSFunction> function,
- Handle<String> name);
-
- CallKind call_kind();
-
- Handle<Code> GetCode(Code::StubType type, Handle<Name> name);
- Handle<Code> GetCode(Handle<JSFunction> function);
-
- const ParameterCount& arguments() { return arguments_; }
-
- void GenerateNameCheck(Handle<Name> name, Label* miss);
-
- // Generates code to load the function from the cell checking that
- // it still contains the same function.
- void GenerateLoadFunctionFromCell(Handle<Cell> cell,
- Handle<JSFunction> function,
- Label* miss);
-
- void GenerateFunctionCheck(Register function, Register scratch, Label* miss);
-
- // Generates a jump to CallIC miss stub.
- void GenerateMissBranch();
-
- const ParameterCount arguments_;
- const Code::Kind kind_;
- const InlineCacheHolderFlag cache_holder_;
-};
-
-
// Holds information about possible function call optimizations.
class CallOptimization BASE_EMBEDDED {
public:
@@ -1037,16 +802,18 @@ class CallOptimization BASE_EMBEDDED {
return api_call_info_;
}
- // Returns the depth of the object having the expected type in the
- // prototype chain between the two arguments.
- int GetPrototypeDepthOfExpectedType(Handle<JSObject> object,
- Handle<JSObject> holder) const;
+ enum HolderLookup {
+ kHolderNotFound,
+ kHolderIsReceiver,
+ kHolderFound
+ };
+ Handle<JSObject> LookupHolderOfExpectedType(
+ Handle<Map> receiver_map,
+ HolderLookup* holder_lookup) const;
- bool IsCompatibleReceiver(Object* receiver) {
- ASSERT(is_simple_api_call());
- if (expected_receiver_type_.is_null()) return true;
- return expected_receiver_type_->IsTemplateFor(receiver);
- }
+ // Check if the api holder is between the receiver and the holder.
+ bool IsCompatibleReceiver(Handle<Object> receiver,
+ Handle<JSObject> holder) const;
private:
void Initialize(Handle<JSFunction> function);
diff --git a/chromium/v8/src/sweeper-thread.cc b/chromium/v8/src/sweeper-thread.cc
index 6f3baed11f8..ea2553dfaa4 100644
--- a/chromium/v8/src/sweeper-thread.cc
+++ b/chromium/v8/src/sweeper-thread.cc
@@ -1,36 +1,13 @@
// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "sweeper-thread.h"
-
-#include "v8.h"
-
-#include "isolate.h"
-#include "v8threads.h"
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/sweeper-thread.h"
+
+#include "src/v8.h"
+
+#include "src/isolate.h"
+#include "src/v8threads.h"
namespace v8 {
namespace internal {
@@ -44,13 +21,9 @@ SweeperThread::SweeperThread(Isolate* isolate)
collector_(heap_->mark_compact_collector()),
start_sweeping_semaphore_(0),
end_sweeping_semaphore_(0),
- stop_semaphore_(0),
- free_list_old_data_space_(heap_->paged_space(OLD_DATA_SPACE)),
- free_list_old_pointer_space_(heap_->paged_space(OLD_POINTER_SPACE)),
- private_free_list_old_data_space_(heap_->paged_space(OLD_DATA_SPACE)),
- private_free_list_old_pointer_space_(
- heap_->paged_space(OLD_POINTER_SPACE)) {
- NoBarrier_Store(&stop_thread_, static_cast<AtomicWord>(false));
+ stop_semaphore_(0) {
+ ASSERT(!FLAG_job_based_sweeping);
+ base::NoBarrier_Store(&stop_thread_, static_cast<base::AtomicWord>(false));
}
@@ -63,34 +36,20 @@ void SweeperThread::Run() {
while (true) {
start_sweeping_semaphore_.Wait();
- if (Acquire_Load(&stop_thread_)) {
+ if (base::Acquire_Load(&stop_thread_)) {
stop_semaphore_.Signal();
return;
}
- collector_->SweepInParallel(heap_->old_data_space(),
- &private_free_list_old_data_space_,
- &free_list_old_data_space_);
- collector_->SweepInParallel(heap_->old_pointer_space(),
- &private_free_list_old_pointer_space_,
- &free_list_old_pointer_space_);
+ collector_->SweepInParallel(heap_->old_data_space());
+ collector_->SweepInParallel(heap_->old_pointer_space());
end_sweeping_semaphore_.Signal();
}
}
-intptr_t SweeperThread::StealMemory(PagedSpace* space) {
- if (space->identity() == OLD_POINTER_SPACE) {
- return space->free_list()->Concatenate(&free_list_old_pointer_space_);
- } else if (space->identity() == OLD_DATA_SPACE) {
- return space->free_list()->Concatenate(&free_list_old_data_space_);
- }
- return 0;
-}
-
-
void SweeperThread::Stop() {
- Release_Store(&stop_thread_, static_cast<AtomicWord>(true));
+ base::Release_Store(&stop_thread_, static_cast<base::AtomicWord>(true));
start_sweeping_semaphore_.Signal();
stop_semaphore_.Wait();
Join();
@@ -107,6 +66,15 @@ void SweeperThread::WaitForSweeperThread() {
}
+bool SweeperThread::SweepingCompleted() {
+ bool value = end_sweeping_semaphore_.WaitFor(TimeDelta::FromSeconds(0));
+ if (value) {
+ end_sweeping_semaphore_.Signal();
+ }
+ return value;
+}
+
+
int SweeperThread::NumberOfThreads(int max_available) {
if (!FLAG_concurrent_sweeping && !FLAG_parallel_sweeping) return 0;
if (FLAG_sweeper_threads > 0) return FLAG_sweeper_threads;
diff --git a/chromium/v8/src/sweeper-thread.h b/chromium/v8/src/sweeper-thread.h
index 96255a0972a..02cace66bf2 100644
--- a/chromium/v8/src/sweeper-thread.h
+++ b/chromium/v8/src/sweeper-thread.h
@@ -1,41 +1,18 @@
// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
#ifndef V8_SWEEPER_THREAD_H_
#define V8_SWEEPER_THREAD_H_
-#include "atomicops.h"
-#include "flags.h"
-#include "platform.h"
-#include "v8utils.h"
+#include "src/base/atomicops.h"
+#include "src/flags.h"
+#include "src/platform.h"
+#include "src/utils.h"
-#include "spaces.h"
+#include "src/spaces.h"
-#include "heap.h"
+#include "src/heap.h"
namespace v8 {
namespace internal {
@@ -49,7 +26,7 @@ class SweeperThread : public Thread {
void Stop();
void StartSweeping();
void WaitForSweeperThread();
- intptr_t StealMemory(PagedSpace* space);
+ bool SweepingCompleted();
static int NumberOfThreads(int max_available);
@@ -60,11 +37,7 @@ class SweeperThread : public Thread {
Semaphore start_sweeping_semaphore_;
Semaphore end_sweeping_semaphore_;
Semaphore stop_semaphore_;
- FreeList free_list_old_data_space_;
- FreeList free_list_old_pointer_space_;
- FreeList private_free_list_old_data_space_;
- FreeList private_free_list_old_pointer_space_;
- volatile AtomicWord stop_thread_;
+ volatile base::AtomicWord stop_thread_;
};
} } // namespace v8::internal
diff --git a/chromium/v8/src/symbol.js b/chromium/v8/src/symbol.js
index 050e7d918a0..1c483020206 100644
--- a/chromium/v8/src/symbol.js
+++ b/chromium/v8/src/symbol.js
@@ -1,29 +1,6 @@
// Copyright 2013 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
"use strict";
@@ -36,48 +13,107 @@ var $Symbol = global.Symbol;
// -------------------------------------------------------------------
function SymbolConstructor(x) {
- var value =
- IS_SYMBOL(x) ? x : %CreateSymbol(IS_UNDEFINED(x) ? x : ToString(x));
if (%_IsConstructCall()) {
- %_SetValueOf(this, value);
- } else {
- return value;
+ throw MakeTypeError('not_constructor', ["Symbol"]);
}
+ // NOTE: Passing in a Symbol value will throw on ToString().
+ return %CreateSymbol(IS_UNDEFINED(x) ? x : ToString(x));
}
-function SymbolGetName() {
- var symbol = IS_SYMBOL_WRAPPER(this) ? %_ValueOf(this) : this;
- if (!IS_SYMBOL(symbol)) {
+
+function SymbolToString() {
+ if (!(IS_SYMBOL(this) || IS_SYMBOL_WRAPPER(this))) {
throw MakeTypeError(
- 'incompatible_method_receiver', ["Symbol.prototype.name", this]);
+ 'incompatible_method_receiver', ["Symbol.prototype.toString", this]);
}
- return %SymbolName(symbol);
+ var description = %SymbolDescription(%_ValueOf(this));
+ return "Symbol(" + (IS_UNDEFINED(description) ? "" : description) + ")";
}
-function SymbolToString() {
- throw MakeTypeError('symbol_to_string');
-}
function SymbolValueOf() {
- // NOTE: Both Symbol objects and values can enter here as
- // 'this'. This is not as dictated by ECMA-262.
- if (!IS_SYMBOL(this) && !IS_SYMBOL_WRAPPER(this)) {
+ if (!(IS_SYMBOL(this) || IS_SYMBOL_WRAPPER(this))) {
throw MakeTypeError(
- 'incompatible_method_receiver', ["Symbol.prototype.valueOf", this]);
+ 'incompatible_method_receiver', ["Symbol.prototype.valueOf", this]);
}
return %_ValueOf(this);
}
+
+function InternalSymbol(key) {
+ var internal_registry = %SymbolRegistry().for_intern;
+ if (IS_UNDEFINED(internal_registry[key])) {
+ internal_registry[key] = %CreateSymbol(key);
+ }
+ return internal_registry[key];
+}
+
+
+function SymbolFor(key) {
+ key = TO_STRING_INLINE(key);
+ var registry = %SymbolRegistry();
+ if (IS_UNDEFINED(registry.for[key])) {
+ var symbol = %CreateSymbol(key);
+ registry.for[key] = symbol;
+ registry.keyFor[symbol] = key;
+ }
+ return registry.for[key];
+}
+
+
+function SymbolKeyFor(symbol) {
+ if (!IS_SYMBOL(symbol)) throw MakeTypeError("not_a_symbol", [symbol]);
+ return %SymbolRegistry().keyFor[symbol];
+}
+
+
+// ES6 19.1.2.8
+function ObjectGetOwnPropertySymbols(obj) {
+ if (!IS_SPEC_OBJECT(obj)) {
+ throw MakeTypeError("called_on_non_object",
+ ["Object.getOwnPropertySymbols"]);
+ }
+
+ // TODO(arv): Proxies use a shared trap for String and Symbol keys.
+
+ return ObjectGetOwnPropertyKeys(obj, true);
+}
+
+
+//-------------------------------------------------------------------
+
+var symbolCreate = InternalSymbol("Symbol.create");
+var symbolHasInstance = InternalSymbol("Symbol.hasInstance");
+var symbolIsConcatSpreadable = InternalSymbol("Symbol.isConcatSpreadable");
+var symbolIsRegExp = InternalSymbol("Symbol.isRegExp");
+var symbolIterator = InternalSymbol("Symbol.iterator");
+var symbolToStringTag = InternalSymbol("Symbol.toStringTag");
+var symbolUnscopables = InternalSymbol("Symbol.unscopables");
+
+
//-------------------------------------------------------------------
function SetUpSymbol() {
%CheckIsBootstrapping();
%SetCode($Symbol, SymbolConstructor);
- %FunctionSetPrototype($Symbol, new $Symbol());
- %SetProperty($Symbol.prototype, "constructor", $Symbol, DONT_ENUM);
+ %FunctionSetPrototype($Symbol, new $Object());
- InstallGetter($Symbol.prototype, "name", SymbolGetName);
+ InstallConstants($Symbol, $Array(
+ "create", symbolCreate,
+ "hasInstance", symbolHasInstance,
+ "isConcatSpreadable", symbolIsConcatSpreadable,
+ "isRegExp", symbolIsRegExp,
+ "iterator", symbolIterator,
+ "toStringTag", symbolToStringTag,
+ "unscopables", symbolUnscopables
+ ));
+ InstallFunctions($Symbol, DONT_ENUM, $Array(
+ "for", SymbolFor,
+ "keyFor", SymbolKeyFor
+ ));
+
+ %SetProperty($Symbol.prototype, "constructor", $Symbol, DONT_ENUM);
InstallFunctions($Symbol.prototype, DONT_ENUM, $Array(
"toString", SymbolToString,
"valueOf", SymbolValueOf
@@ -85,3 +121,14 @@ function SetUpSymbol() {
}
SetUpSymbol();
+
+
+function ExtendObject() {
+ %CheckIsBootstrapping();
+
+ InstallFunctions($Object, DONT_ENUM, $Array(
+ "getOwnPropertySymbols", ObjectGetOwnPropertySymbols
+ ));
+}
+
+ExtendObject();
diff --git a/chromium/v8/src/third_party/valgrind/valgrind.h b/chromium/v8/src/third_party/valgrind/valgrind.h
index 7a3ee2f1fb2..fa3f53675ee 100644
--- a/chromium/v8/src/third_party/valgrind/valgrind.h
+++ b/chromium/v8/src/third_party/valgrind/valgrind.h
@@ -21,16 +21,16 @@
1. Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
- 2. The origin of this software must not be misrepresented; you must
- not claim that you wrote the original software. If you use this
- software in a product, an acknowledgment in the product
+ 2. The origin of this software must not be misrepresented; you must
+ not claim that you wrote the original software. If you use this
+ software in a product, an acknowledgment in the product
documentation would be appreciated but is not required.
3. Altered source versions must be plainly marked as such, and must
not be misrepresented as being the original software.
- 4. The name of the author may not be used to endorse or promote
- products derived from this software without specific prior written
+ 4. The name of the author may not be used to endorse or promote
+ products derived from this software without specific prior written
permission.
THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
@@ -52,13 +52,13 @@
the terms of the GNU General Public License, version 2. See the
COPYING file in the source distribution for details.
- ----------------------------------------------------------------
+ ----------------------------------------------------------------
*/
/* This file is for inclusion into client (your!) code.
- You can use these macros to manipulate and query Valgrind's
+ You can use these macros to manipulate and query Valgrind's
execution inside your own programs.
The resulting executables will still run without Valgrind, just a
@@ -194,8 +194,8 @@
this is executed not under Valgrind. Args are passed in a memory
block, and so there's no intrinsic limit to the number that could
be passed, but it's currently five.
-
- The macro args are:
+
+ The macro args are:
_zzq_rlval result lvalue
_zzq_default default value (result returned when running on real CPU)
_zzq_request request code
@@ -222,7 +222,7 @@
|| (defined(PLAT_x86_win32) && defined(__GNUC__))
typedef
- struct {
+ struct {
unsigned int nraddr; /* where's the code? */
}
OrigFn;
@@ -277,7 +277,7 @@ typedef
#if defined(PLAT_x86_win32) && !defined(__GNUC__)
typedef
- struct {
+ struct {
unsigned int nraddr; /* where's the code? */
}
OrigFn;
@@ -343,7 +343,7 @@ valgrind_do_client_request_expr(uintptr_t _zzq_default, uintptr_t _zzq_request,
#if defined(PLAT_amd64_linux) || defined(PLAT_amd64_darwin)
typedef
- struct {
+ struct {
uint64_t nraddr; /* where's the code? */
}
OrigFn;
@@ -398,7 +398,7 @@ typedef
#if defined(PLAT_ppc32_linux)
typedef
- struct {
+ struct {
unsigned int nraddr; /* where's the code? */
}
OrigFn;
@@ -459,7 +459,7 @@ typedef
#if defined(PLAT_ppc64_linux)
typedef
- struct {
+ struct {
uint64_t nraddr; /* where's the code? */
uint64_t r2; /* what tocptr do we need? */
}
@@ -526,7 +526,7 @@ typedef
#if defined(PLAT_arm_linux)
typedef
- struct {
+ struct {
unsigned int nraddr; /* where's the code? */
}
OrigFn;
@@ -1709,7 +1709,7 @@ typedef
"r0", "r2", "r3", "r4", "r5", "r6", "r7", "r8", "r9", "r10", \
"r11", "r12", "r13"
-/* These CALL_FN_ macros assume that on ppc32-linux,
+/* These CALL_FN_ macros assume that on ppc32-linux,
sizeof(unsigned long) == 4. */
#define CALL_FN_W_v(lval, orig) \
@@ -3581,7 +3581,7 @@ typedef
#define VG_IS_TOOL_USERREQ(a, b, v) \
(VG_USERREQ_TOOL_BASE(a,b) == ((v) & 0xffff0000))
-/* !! ABIWARNING !! ABIWARNING !! ABIWARNING !! ABIWARNING !!
+/* !! ABIWARNING !! ABIWARNING !! ABIWARNING !! ABIWARNING !!
This enum comprises an ABI exported by Valgrind to programs
which use client requests. DO NOT CHANGE THE ORDER OF THESE
ENTRIES, NOR DELETE ANY -- add new ones at the end. */
@@ -3710,7 +3710,7 @@ VALGRIND_PRINTF(const char *format, ...)
_qzz_res = VALGRIND_DO_CLIENT_REQUEST_EXPR(0,
VG_USERREQ__PRINTF_VALIST_BY_REF,
(unsigned long)format,
- (unsigned long)&vargs,
+ (unsigned long)&vargs,
0, 0, 0);
#endif
va_end(vargs);
@@ -3748,7 +3748,7 @@ VALGRIND_PRINTF_BACKTRACE(const char *format, ...)
_qzz_res = VALGRIND_DO_CLIENT_REQUEST_EXPR(0,
VG_USERREQ__PRINTF_BACKTRACE_VALIST_BY_REF,
(unsigned long)format,
- (unsigned long)&vargs,
+ (unsigned long)&vargs,
0, 0, 0);
#endif
va_end(vargs);
@@ -3759,7 +3759,7 @@ VALGRIND_PRINTF_BACKTRACE(const char *format, ...)
/* These requests allow control to move from the simulated CPU to the
real CPU, calling an arbitary function.
-
+
Note that the current ThreadId is inserted as the first argument.
So this call:
@@ -3845,7 +3845,7 @@ VALGRIND_PRINTF_BACKTRACE(const char *format, ...)
- It marks the block as being addressable and undefined (if 'is_zeroed' is
not set), or addressable and defined (if 'is_zeroed' is set). This
controls how accesses to the block by the program are handled.
-
+
'addr' is the start of the usable block (ie. after any
redzone), 'sizeB' is its size. 'rzB' is the redzone size if the allocator
can apply redzones -- these are blocks of padding at the start and end of
@@ -3853,7 +3853,7 @@ VALGRIND_PRINTF_BACKTRACE(const char *format, ...)
Valgrind will spot block overruns. `is_zeroed' indicates if the memory is
zeroed (or filled with another predictable value), as is the case for
calloc().
-
+
VALGRIND_MALLOCLIKE_BLOCK should be put immediately after the point where a
heap block -- that will be used by the client program -- is allocated.
It's best to put it at the outermost level of the allocator if possible;
diff --git a/chromium/v8/src/third_party/vtune/jitprofiling.cc b/chromium/v8/src/third_party/vtune/jitprofiling.cc
index b3952b32169..40282903fca 100644
--- a/chromium/v8/src/third_party/vtune/jitprofiling.cc
+++ b/chromium/v8/src/third_party/vtune/jitprofiling.cc
@@ -103,12 +103,12 @@ static iJIT_IsProfilingActiveFlags executionMode = iJIT_NOTHING_RUNNING;
/* end collector dll part. */
-/* loadiJIT_Funcs() : this function is called just in the beginning and is responsible
+/* loadiJIT_Funcs() : this function is called just in the beginning and is responsible
** to load the functions from BistroJavaCollector.dll
** result:
** on success: the functions loads, iJIT_DLL_is_missing=0, return value = 1.
** on failure: the functions are NULL, iJIT_DLL_is_missing=1, return value = 0.
-*/
+*/
static int loadiJIT_Funcs(void);
/* global representing whether the BistroJavaCollector can't be loaded */
@@ -129,7 +129,7 @@ static pthread_key_t threadLocalStorageHandle = (pthread_key_t)0;
#define INIT_TOP_Stack 10000
-typedef struct
+typedef struct
{
unsigned int TopStack;
unsigned int CurrentStack;
@@ -139,9 +139,9 @@ typedef struct
/*
** The function for reporting virtual-machine related events to VTune.
-** Note: when reporting iJVM_EVENT_TYPE_ENTER_NIDS, there is no need to fill in the stack_id
+** Note: when reporting iJVM_EVENT_TYPE_ENTER_NIDS, there is no need to fill in the stack_id
** field in the iJIT_Method_NIDS structure, as VTune fills it.
-**
+**
** The return value in iJVM_EVENT_TYPE_ENTER_NIDS && iJVM_EVENT_TYPE_LEAVE_NIDS events
** will be 0 in case of failure.
** in iJVM_EVENT_TYPE_METHOD_LOAD_FINISHED event it will be -1 if EventSpecificData == 0
@@ -153,7 +153,7 @@ ITT_EXTERN_C int JITAPI iJIT_NotifyEvent(iJIT_JVM_EVENT event_type, void *EventS
int ReturnValue;
/*******************************************************************************
- ** This section is for debugging outside of VTune.
+ ** This section is for debugging outside of VTune.
** It creates the environment variables that indicates call graph mode.
** If running outside of VTune remove the remark.
**
@@ -170,22 +170,22 @@ ITT_EXTERN_C int JITAPI iJIT_NotifyEvent(iJIT_JVM_EVENT event_type, void *EventS
*******************************************************************************/
/* initialization part - the functions have not been loaded yet. This part
- ** will load the functions, and check if we are in Call Graph mode.
+ ** will load the functions, and check if we are in Call Graph mode.
** (for special treatment).
*/
- if (!FUNC_NotifyEvent)
+ if (!FUNC_NotifyEvent)
{
- if (iJIT_DLL_is_missing)
+ if (iJIT_DLL_is_missing)
return 0;
// load the Function from the DLL
- if (!loadiJIT_Funcs())
+ if (!loadiJIT_Funcs())
return 0;
/* Call Graph initialization. */
}
- /* If the event is method entry/exit, check that in the current mode
+ /* If the event is method entry/exit, check that in the current mode
** VTune is allowed to receive it
*/
if ((event_type == iJVM_EVENT_TYPE_ENTER_NIDS || event_type == iJVM_EVENT_TYPE_LEAVE_NIDS) &&
@@ -194,7 +194,7 @@ ITT_EXTERN_C int JITAPI iJIT_NotifyEvent(iJIT_JVM_EVENT event_type, void *EventS
return 0;
}
/* This section is performed when method enter event occurs.
- ** It updates the virtual stack, or creates it if this is the first
+ ** It updates the virtual stack, or creates it if this is the first
** method entry in the thread. The stack pointer is decreased.
*/
if (event_type == iJVM_EVENT_TYPE_ENTER_NIDS)
@@ -263,7 +263,7 @@ ITT_EXTERN_C int JITAPI iJIT_NotifyEvent(iJIT_JVM_EVENT event_type, void *EventS
return 0;
}
- ReturnValue = (int)FUNC_NotifyEvent(event_type, EventSpecificData);
+ ReturnValue = (int)FUNC_NotifyEvent(event_type, EventSpecificData);
return ReturnValue;
}
@@ -296,7 +296,7 @@ ITT_EXTERN_C iJIT_IsProfilingActiveFlags JITAPI iJIT_IsProfilingActive()
/* this function loads the collector dll (BistroJavaCollector) and the relevant functions.
** on success: all functions load, iJIT_DLL_is_missing = 0, return value = 1.
** on failure: all functions are NULL, iJIT_DLL_is_missing = 1, return value = 0.
-*/
+*/
static int loadiJIT_Funcs()
{
static int bDllWasLoaded = 0;
@@ -314,7 +314,7 @@ static int loadiJIT_Funcs()
iJIT_DLL_is_missing = 1;
FUNC_NotifyEvent = NULL;
- if (m_libHandle)
+ if (m_libHandle)
{
#if ITT_PLATFORM==ITT_PLATFORM_WIN
FreeLibrary(m_libHandle);
@@ -390,7 +390,7 @@ static int loadiJIT_Funcs()
#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */
FUNC_NotifyEvent = reinterpret_cast<TPNotify>(reinterpret_cast<intptr_t>(dlsym(m_libHandle, "NotifyEvent")));
#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */
- if (!FUNC_NotifyEvent)
+ if (!FUNC_NotifyEvent)
{
FUNC_Initialize = NULL;
return 0;
@@ -401,7 +401,7 @@ static int loadiJIT_Funcs()
#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */
FUNC_Initialize = reinterpret_cast<TPInitialize>(reinterpret_cast<intptr_t>(dlsym(m_libHandle, "Initialize")));
#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */
- if (!FUNC_Initialize)
+ if (!FUNC_Initialize)
{
FUNC_NotifyEvent = NULL;
return 0;
@@ -433,7 +433,7 @@ static int loadiJIT_Funcs()
}
/*
-** This function should be called by the user whenever a thread ends, to free the thread
+** This function should be called by the user whenever a thread ends, to free the thread
** "virtual stack" storage
*/
ITT_EXTERN_C void JITAPI FinalizeThread()
@@ -464,7 +464,7 @@ ITT_EXTERN_C void JITAPI FinalizeThread()
*/
ITT_EXTERN_C void JITAPI FinalizeProcess()
{
- if (m_libHandle)
+ if (m_libHandle)
{
#if ITT_PLATFORM==ITT_PLATFORM_WIN
FreeLibrary(m_libHandle);
@@ -484,7 +484,7 @@ ITT_EXTERN_C void JITAPI FinalizeProcess()
/*
** This function should be called by the user for any method once.
-** The function will return a unique method ID, the user should maintain the ID for each
+** The function will return a unique method ID, the user should maintain the ID for each
** method
*/
ITT_EXTERN_C unsigned int JITAPI iJIT_GetNewMethodID()
diff --git a/chromium/v8/src/third_party/vtune/jitprofiling.h b/chromium/v8/src/third_party/vtune/jitprofiling.h
index abd6d8ca786..193f243851c 100644
--- a/chromium/v8/src/third_party/vtune/jitprofiling.h
+++ b/chromium/v8/src/third_party/vtune/jitprofiling.h
@@ -67,54 +67,54 @@ typedef enum iJIT_jvm_event
{
/* shutdown */
-
- /*
+
+ /*
* Program exiting EventSpecificData NA
*/
- iJVM_EVENT_TYPE_SHUTDOWN = 2,
+ iJVM_EVENT_TYPE_SHUTDOWN = 2,
/* JIT profiling */
-
- /*
+
+ /*
* issued after method code jitted into memory but before code is executed
* EventSpecificData is an iJIT_Method_Load
*/
- iJVM_EVENT_TYPE_METHOD_LOAD_FINISHED=13,
+ iJVM_EVENT_TYPE_METHOD_LOAD_FINISHED=13,
- /* issued before unload. Method code will no longer be executed, but code
- * and info are still in memory. The VTune profiler may capture method
+ /* issued before unload. Method code will no longer be executed, but code
+ * and info are still in memory. The VTune profiler may capture method
* code only at this point EventSpecificData is iJIT_Method_Id
*/
- iJVM_EVENT_TYPE_METHOD_UNLOAD_START,
+ iJVM_EVENT_TYPE_METHOD_UNLOAD_START,
/* Method Profiling */
- /* method name, Id and stack is supplied
- * issued when a method is about to be entered EventSpecificData is
+ /* method name, Id and stack is supplied
+ * issued when a method is about to be entered EventSpecificData is
* iJIT_Method_NIDS
*/
- iJVM_EVENT_TYPE_ENTER_NIDS = 19,
+ iJVM_EVENT_TYPE_ENTER_NIDS = 19,
- /* method name, Id and stack is supplied
- * issued when a method is about to be left EventSpecificData is
+ /* method name, Id and stack is supplied
+ * issued when a method is about to be left EventSpecificData is
* iJIT_Method_NIDS
*/
- iJVM_EVENT_TYPE_LEAVE_NIDS
+ iJVM_EVENT_TYPE_LEAVE_NIDS
} iJIT_JVM_EVENT;
typedef enum _iJIT_ModeFlags
{
/* No need to Notify VTune, since VTune is not running */
- iJIT_NO_NOTIFICATIONS = 0x0000,
+ iJIT_NO_NOTIFICATIONS = 0x0000,
- /* when turned on the jit must call
+ /* when turned on the jit must call
* iJIT_NotifyEvent
* (
* iJVM_EVENT_TYPE_METHOD_LOAD_FINISHED,
* )
* for all the method already jitted
*/
- iJIT_BE_NOTIFY_ON_LOAD = 0x0001,
+ iJIT_BE_NOTIFY_ON_LOAD = 0x0001,
/* when turned on the jit must call
* iJIT_NotifyEvent
@@ -122,19 +122,19 @@ typedef enum _iJIT_ModeFlags
* iJVM_EVENT_TYPE_METHOD_UNLOAD_FINISHED,
* ) for all the method that are unloaded
*/
- iJIT_BE_NOTIFY_ON_UNLOAD = 0x0002,
+ iJIT_BE_NOTIFY_ON_UNLOAD = 0x0002,
/* when turned on the jit must instrument all
* the currently jited code with calls on
* method entries
*/
- iJIT_BE_NOTIFY_ON_METHOD_ENTRY = 0x0004,
+ iJIT_BE_NOTIFY_ON_METHOD_ENTRY = 0x0004,
/* when turned on the jit must instrument all
* the currently jited code with calls
* on method exit
*/
- iJIT_BE_NOTIFY_ON_METHOD_EXIT = 0x0008
+ iJIT_BE_NOTIFY_ON_METHOD_EXIT = 0x0008
} iJIT_ModeFlags;
@@ -143,13 +143,13 @@ typedef enum _iJIT_ModeFlags
typedef enum _iJIT_IsProfilingActiveFlags
{
/* No profiler is running. Currently not used */
- iJIT_NOTHING_RUNNING = 0x0000,
+ iJIT_NOTHING_RUNNING = 0x0000,
/* Sampling is running. This is the default value
* returned by iJIT_IsProfilingActive()
*/
- iJIT_SAMPLING_ON = 0x0001,
-
+ iJIT_SAMPLING_ON = 0x0001,
+
/* Call Graph is running */
iJIT_CALLGRAPH_ON = 0x0002
@@ -174,7 +174,7 @@ typedef struct _iJIT_Method_Id
/* Id of the method (same as the one passed in
* the iJIT_Method_Load struct
*/
- unsigned int method_id;
+ unsigned int method_id;
} *piJIT_Method_Id, iJIT_Method_Id;
@@ -188,13 +188,13 @@ typedef struct _iJIT_Method_Id
typedef struct _iJIT_Method_NIDS
{
/* unique method ID */
- unsigned int method_id;
+ unsigned int method_id;
/* NOTE: no need to fill this field, it's filled by VTune */
- unsigned int stack_id;
+ unsigned int stack_id;
/* method name (just the method, without the class) */
- char* method_name;
+ char* method_name;
} *piJIT_Method_NIDS, iJIT_Method_NIDS;
/* structures for the events:
@@ -204,54 +204,54 @@ typedef struct _iJIT_Method_NIDS
typedef struct _LineNumberInfo
{
/* x86 Offset from the begining of the method*/
- unsigned int Offset;
-
+ unsigned int Offset;
+
/* source line number from the begining of the source file */
- unsigned int LineNumber;
+ unsigned int LineNumber;
} *pLineNumberInfo, LineNumberInfo;
typedef struct _iJIT_Method_Load
{
/* unique method ID - can be any unique value, (except 0 - 999) */
- unsigned int method_id;
+ unsigned int method_id;
/* method name (can be with or without the class and signature, in any case
* the class name will be added to it)
*/
- char* method_name;
+ char* method_name;
/* virtual address of that method - This determines the method range for the
* iJVM_EVENT_TYPE_ENTER/LEAVE_METHOD_ADDR events
*/
- void* method_load_address;
+ void* method_load_address;
/* Size in memory - Must be exact */
- unsigned int method_size;
+ unsigned int method_size;
/* Line Table size in number of entries - Zero if none */
- unsigned int line_number_size;
-
+ unsigned int line_number_size;
+
/* Pointer to the begining of the line numbers info array */
- pLineNumberInfo line_number_table;
+ pLineNumberInfo line_number_table;
/* unique class ID */
- unsigned int class_id;
-
+ unsigned int class_id;
+
/* class file name */
- char* class_file_name;
+ char* class_file_name;
/* source file name */
- char* source_file_name;
+ char* source_file_name;
/* bits supplied by the user for saving in the JIT file */
- void* user_data;
+ void* user_data;
/* the size of the user data buffer */
- unsigned int user_data_size;
+ unsigned int user_data_size;
/* NOTE: no need to fill this field, it's filled by VTune */
- iJDEnvironmentType env;
+ iJDEnvironmentType env;
} *piJIT_Method_Load, iJIT_Method_Load;
@@ -280,7 +280,7 @@ typedef void (*iJIT_ModeChangedEx)(void *UserData, iJIT_ModeFlags Flags);
int JITAPI iJIT_NotifyEvent(iJIT_JVM_EVENT event_type, void *EventSpecificData);
/* The new mode call back routine */
-void JITAPI iJIT_RegisterCallbackEx(void *userdata,
+void JITAPI iJIT_RegisterCallbackEx(void *userdata,
iJIT_ModeChangedEx NewModeCallBackFuncEx);
iJIT_IsProfilingActiveFlags JITAPI iJIT_IsProfilingActive(void);
diff --git a/chromium/v8/src/third_party/vtune/v8-vtune.h b/chromium/v8/src/third_party/vtune/v8-vtune.h
index 29ea3eacd88..c60b303b3ab 100644
--- a/chromium/v8/src/third_party/vtune/v8-vtune.h
+++ b/chromium/v8/src/third_party/vtune/v8-vtune.h
@@ -1,38 +1,38 @@
/*
This file is provided under a dual BSD/GPLv2 license. When using or
redistributing this file, you may do so under either license.
-
+
GPL LICENSE SUMMARY
-
+
Copyright(c) 2005-2012 Intel Corporation. All rights reserved.
-
+
This program is free software; you can redistribute it and/or modify
it under the terms of version 2 of the GNU General Public License as
published by the Free Software Foundation.
-
+
This program is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
-
+
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
The full GNU General Public License is included in this distribution
in the file called LICENSE.GPL.
-
+
Contact Information:
http://software.intel.com/en-us/articles/intel-vtune-amplifier-xe/
-
+
BSD LICENSE
-
+
Copyright(c) 2005-2012 Intel Corporation. All rights reserved.
All rights reserved.
-
+
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
-
+
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
@@ -42,7 +42,7 @@
* Neither the name of Intel Corporation nor the names of its
contributors may be used to endorse or promote products derived
from this software without specific prior written permission.
-
+
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
diff --git a/chromium/v8/src/third_party/vtune/vtune-jit.cc b/chromium/v8/src/third_party/vtune/vtune-jit.cc
index 93de7efbb93..023dd1864be 100644
--- a/chromium/v8/src/third_party/vtune/vtune-jit.cc
+++ b/chromium/v8/src/third_party/vtune/vtune-jit.cc
@@ -193,11 +193,12 @@ void VTUNEJITInterface::event_handler(const v8::JitCodeEvent* event) {
jmethod.method_name = temp_method_name;
Handle<Script> script = event->script;
-
+
if (*script != NULL) {
// Get the source file name and set it to jmethod.source_file_name
- if ((*script->GetScriptName())->IsString()) {
- Handle<String> script_name = script->GetScriptName()->ToString();
+ if ((*script->GetUnboundScript()->GetScriptName())->IsString()) {
+ Handle<String> script_name =
+ script->GetUnboundScript()->GetScriptName()->ToString();
temp_file_name = new char[script_name->Utf8Length() + 1];
script_name->WriteUtf8(temp_file_name);
jmethod.source_file_name = temp_file_name;
@@ -224,11 +225,11 @@ void VTUNEJITInterface::event_handler(const v8::JitCodeEvent* event) {
jmethod.line_number_table[index].Offset =
static_cast<unsigned int>(Iter->pc_);
jmethod.line_number_table[index++].LineNumber =
- script->GetLineNumber(Iter->pos_)+1;
+ script->GetUnboundScript()->GetLineNumber(Iter->pos_)+1;
}
GetEntries()->erase(event->code_start);
}
- }
+ }
iJIT_NotifyEvent(iJVM_EVENT_TYPE_METHOD_LOAD_FINISHED,
reinterpret_cast<void*>(&jmethod));
@@ -261,11 +262,11 @@ void VTUNEJITInterface::event_handler(const v8::JitCodeEvent* event) {
case v8::JitCodeEvent::CODE_END_LINE_INFO_RECORDING: {
GetEntries()->insert(std::pair <void*, void*>(event->code_start, event->user_data));
break;
- }
+ }
default:
break;
}
- }
+ }
return;
}
diff --git a/chromium/v8/src/third_party/vtune/vtune-jit.h b/chromium/v8/src/third_party/vtune/vtune-jit.h
index 42b8c3da1fb..15011bf05ea 100644
--- a/chromium/v8/src/third_party/vtune/vtune-jit.h
+++ b/chromium/v8/src/third_party/vtune/vtune-jit.h
@@ -1,38 +1,38 @@
/*
This file is provided under a dual BSD/GPLv2 license. When using or
redistributing this file, you may do so under either license.
-
+
GPL LICENSE SUMMARY
-
+
Copyright(c) 2005-2012 Intel Corporation. All rights reserved.
-
+
This program is free software; you can redistribute it and/or modify
it under the terms of version 2 of the GNU General Public License as
published by the Free Software Foundation.
-
+
This program is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
-
+
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
The full GNU General Public License is included in this distribution
in the file called LICENSE.GPL.
-
+
Contact Information:
http://software.intel.com/en-us/articles/intel-vtune-amplifier-xe/
-
+
BSD LICENSE
-
+
Copyright(c) 2005-2012 Intel Corporation. All rights reserved.
All rights reserved.
-
+
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
-
+
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
@@ -42,7 +42,7 @@
* Neither the name of Intel Corporation nor the names of its
contributors may be used to endorse or promote products derived
from this software without specific prior written permission.
-
+
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
diff --git a/chromium/v8/src/token.cc b/chromium/v8/src/token.cc
index 7ba7ed34205..5dc67bb7212 100644
--- a/chromium/v8/src/token.cc
+++ b/chromium/v8/src/token.cc
@@ -1,32 +1,9 @@
// Copyright 2006-2008 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "../include/v8stdint.h"
-#include "token.h"
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "include/v8stdint.h"
+#include "src/token.h"
namespace v8 {
namespace internal {
diff --git a/chromium/v8/src/token.h b/chromium/v8/src/token.h
index 39bcc24074a..12ae424f285 100644
--- a/chromium/v8/src/token.h
+++ b/chromium/v8/src/token.h
@@ -1,34 +1,11 @@
// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
#ifndef V8_TOKEN_H_
#define V8_TOKEN_H_
-#include "checks.h"
+#include "src/checks.h"
namespace v8 {
namespace internal {
@@ -73,7 +50,7 @@ namespace internal {
T(INIT_VAR, "=init_var", 2) /* AST-use only. */ \
T(INIT_LET, "=init_let", 2) /* AST-use only. */ \
T(INIT_CONST, "=init_const", 2) /* AST-use only. */ \
- T(INIT_CONST_HARMONY, "=init_const_harmony", 2) /* AST-use only. */ \
+ T(INIT_CONST_LEGACY, "=init_const_legacy", 2) /* AST-use only. */ \
T(ASSIGN, "=", 2) \
T(ASSIGN_BIT_OR, "|=", 2) \
T(ASSIGN_BIT_XOR, "^=", 2) \
diff --git a/chromium/v8/src/transitions-inl.h b/chromium/v8/src/transitions-inl.h
index 5c7c28b6e5d..2387803c500 100644
--- a/chromium/v8/src/transitions-inl.h
+++ b/chromium/v8/src/transitions-inl.h
@@ -1,35 +1,11 @@
// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
#ifndef V8_TRANSITIONS_INL_H_
#define V8_TRANSITIONS_INL_H_
-#include "objects-inl.h"
-#include "transitions.h"
+#include "src/transitions.h"
namespace v8 {
namespace internal {
@@ -89,12 +65,6 @@ FixedArray* TransitionArray::GetPrototypeTransitions() {
}
-HeapObject* TransitionArray::UncheckedPrototypeTransitions() {
- ASSERT(HasPrototypeTransitions());
- return reinterpret_cast<HeapObject*>(get(kPrototypeTransitionsIndex));
-}
-
-
void TransitionArray::SetPrototypeTransitions(FixedArray* transitions,
WriteBarrierMode mode) {
ASSERT(IsFullTransitionArray());
@@ -115,9 +85,7 @@ Object** TransitionArray::GetPrototypeTransitionsSlot() {
Object** TransitionArray::GetKeySlot(int transition_number) {
ASSERT(!IsSimpleTransition());
ASSERT(transition_number < number_of_transitions());
- return HeapObject::RawField(
- reinterpret_cast<HeapObject*>(this),
- OffsetOfElementAt(ToKeyIndex(transition_number)));
+ return RawFieldOfElementAt(ToKeyIndex(transition_number));
}
diff --git a/chromium/v8/src/transitions.cc b/chromium/v8/src/transitions.cc
index 9d3f038947d..6ac1ab054c8 100644
--- a/chromium/v8/src/transitions.cc
+++ b/chromium/v8/src/transitions.cc
@@ -1,57 +1,32 @@
// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#include "objects.h"
-#include "transitions-inl.h"
-#include "utils.h"
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#include "src/objects.h"
+#include "src/transitions-inl.h"
+#include "src/utils.h"
namespace v8 {
namespace internal {
-static MaybeObject* AllocateRaw(Isolate* isolate, int length) {
- // Use FixedArray to not use TransitionArray::cast on incomplete object.
- FixedArray* array;
- MaybeObject* maybe_array = isolate->heap()->AllocateFixedArray(length);
- if (!maybe_array->To(&array)) return maybe_array;
- return array;
+Handle<TransitionArray> TransitionArray::Allocate(Isolate* isolate,
+ int number_of_transitions) {
+ Handle<FixedArray> array =
+ isolate->factory()->NewFixedArray(ToKeyIndex(number_of_transitions));
+ array->set(kPrototypeTransitionsIndex, Smi::FromInt(0));
+ return Handle<TransitionArray>::cast(array);
}
-MaybeObject* TransitionArray::Allocate(Isolate* isolate,
- int number_of_transitions) {
- FixedArray* array;
- MaybeObject* maybe_array =
- AllocateRaw(isolate, ToKeyIndex(number_of_transitions));
- if (!maybe_array->To(&array)) return maybe_array;
- array->set(kPrototypeTransitionsIndex, Smi::FromInt(0));
- return array;
+Handle<TransitionArray> TransitionArray::AllocateSimple(Isolate* isolate,
+ Handle<Map> target) {
+ Handle<FixedArray> array =
+ isolate->factory()->NewFixedArray(kSimpleTransitionSize);
+ array->set(kSimpleTransitionTarget, *target);
+ return Handle<TransitionArray>::cast(array);
}
@@ -69,86 +44,111 @@ static bool InsertionPointFound(Name* key1, Name* key2) {
}
-MaybeObject* TransitionArray::NewWith(SimpleTransitionFlag flag,
- Name* key,
- Map* target,
- Object* back_pointer) {
- TransitionArray* result;
- MaybeObject* maybe_result;
+Handle<TransitionArray> TransitionArray::NewWith(Handle<Map> map,
+ Handle<Name> name,
+ Handle<Map> target,
+ SimpleTransitionFlag flag) {
+ Handle<TransitionArray> result;
+ Isolate* isolate = name->GetIsolate();
if (flag == SIMPLE_TRANSITION) {
- maybe_result = AllocateRaw(target->GetIsolate(), kSimpleTransitionSize);
- if (!maybe_result->To(&result)) return maybe_result;
- result->set(kSimpleTransitionTarget, target);
+ result = AllocateSimple(isolate, target);
} else {
- maybe_result = Allocate(target->GetIsolate(), 1);
- if (!maybe_result->To(&result)) return maybe_result;
- result->NoIncrementalWriteBarrierSet(0, key, target);
+ result = Allocate(isolate, 1);
+ result->NoIncrementalWriteBarrierSet(0, *name, *target);
}
- result->set_back_pointer_storage(back_pointer);
+ result->set_back_pointer_storage(map->GetBackPointer());
return result;
}
-MaybeObject* TransitionArray::ExtendToFullTransitionArray() {
- ASSERT(!IsFullTransitionArray());
- int nof = number_of_transitions();
- TransitionArray* result;
- MaybeObject* maybe_result = Allocate(GetIsolate(), nof);
- if (!maybe_result->To(&result)) return maybe_result;
+Handle<TransitionArray> TransitionArray::ExtendToFullTransitionArray(
+ Handle<Map> containing_map) {
+ ASSERT(!containing_map->transitions()->IsFullTransitionArray());
+ int nof = containing_map->transitions()->number_of_transitions();
- if (nof == 1) {
- result->NoIncrementalWriteBarrierCopyFrom(this, kSimpleTransitionIndex, 0);
+ // A transition array may shrink during GC.
+ Handle<TransitionArray> result = Allocate(containing_map->GetIsolate(), nof);
+ DisallowHeapAllocation no_gc;
+ int new_nof = containing_map->transitions()->number_of_transitions();
+ if (new_nof != nof) {
+ ASSERT(new_nof == 0);
+ result->Shrink(ToKeyIndex(0));
+ } else if (nof == 1) {
+ result->NoIncrementalWriteBarrierCopyFrom(
+ containing_map->transitions(), kSimpleTransitionIndex, 0);
}
- result->set_back_pointer_storage(back_pointer_storage());
+ result->set_back_pointer_storage(
+ containing_map->transitions()->back_pointer_storage());
return result;
}
-MaybeObject* TransitionArray::CopyInsert(Name* name, Map* target) {
- TransitionArray* result;
+Handle<TransitionArray> TransitionArray::CopyInsert(Handle<Map> map,
+ Handle<Name> name,
+ Handle<Map> target,
+ SimpleTransitionFlag flag) {
+ if (!map->HasTransitionArray()) {
+ return TransitionArray::NewWith(map, name, target, flag);
+ }
- int number_of_transitions = this->number_of_transitions();
+ int number_of_transitions = map->transitions()->number_of_transitions();
int new_size = number_of_transitions;
- int insertion_index = this->Search(name);
+ int insertion_index = map->transitions()->Search(*name);
if (insertion_index == kNotFound) ++new_size;
- MaybeObject* maybe_array;
- maybe_array = TransitionArray::Allocate(GetIsolate(), new_size);
- if (!maybe_array->To(&result)) return maybe_array;
+ Handle<TransitionArray> result = Allocate(map->GetIsolate(), new_size);
+
+ // The map's transition array may grown smaller during the allocation above as
+ // it was weakly traversed, though it is guaranteed not to disappear. Trim the
+ // result copy if needed, and recompute variables.
+ ASSERT(map->HasTransitionArray());
+ DisallowHeapAllocation no_gc;
+ TransitionArray* array = map->transitions();
+ if (array->number_of_transitions() != number_of_transitions) {
+ ASSERT(array->number_of_transitions() < number_of_transitions);
+
+ number_of_transitions = array->number_of_transitions();
+ new_size = number_of_transitions;
+
+ insertion_index = array->Search(*name);
+ if (insertion_index == kNotFound) ++new_size;
+
+ result->Shrink(ToKeyIndex(new_size));
+ }
- if (HasPrototypeTransitions()) {
- result->SetPrototypeTransitions(GetPrototypeTransitions());
+ if (array->HasPrototypeTransitions()) {
+ result->SetPrototypeTransitions(array->GetPrototypeTransitions());
}
if (insertion_index != kNotFound) {
for (int i = 0; i < number_of_transitions; ++i) {
if (i != insertion_index) {
- result->NoIncrementalWriteBarrierCopyFrom(this, i, i);
+ result->NoIncrementalWriteBarrierCopyFrom(array, i, i);
}
}
- result->NoIncrementalWriteBarrierSet(insertion_index, name, target);
- result->set_back_pointer_storage(back_pointer_storage());
+ result->NoIncrementalWriteBarrierSet(insertion_index, *name, *target);
+ result->set_back_pointer_storage(array->back_pointer_storage());
return result;
}
insertion_index = 0;
for (; insertion_index < number_of_transitions; ++insertion_index) {
- if (InsertionPointFound(GetKey(insertion_index), name)) break;
+ if (InsertionPointFound(array->GetKey(insertion_index), *name)) break;
result->NoIncrementalWriteBarrierCopyFrom(
- this, insertion_index, insertion_index);
+ array, insertion_index, insertion_index);
}
- result->NoIncrementalWriteBarrierSet(insertion_index, name, target);
+ result->NoIncrementalWriteBarrierSet(insertion_index, *name, *target);
for (; insertion_index < number_of_transitions; ++insertion_index) {
result->NoIncrementalWriteBarrierCopyFrom(
- this, insertion_index, insertion_index + 1);
+ array, insertion_index, insertion_index + 1);
}
- result->set_back_pointer_storage(back_pointer_storage());
+ result->set_back_pointer_storage(array->back_pointer_storage());
return result;
}
diff --git a/chromium/v8/src/transitions.h b/chromium/v8/src/transitions.h
index b2e98396784..ec99c8b6053 100644
--- a/chromium/v8/src/transitions.h
+++ b/chromium/v8/src/transitions.h
@@ -1,38 +1,15 @@
// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
#ifndef V8_TRANSITIONS_H_
#define V8_TRANSITIONS_H_
-#include "elements-kind.h"
-#include "heap.h"
-#include "isolate.h"
-#include "objects.h"
-#include "v8checks.h"
+#include "src/elements-kind.h"
+#include "src/heap.h"
+#include "src/isolate.h"
+#include "src/objects.h"
+#include "src/v8checks.h"
namespace v8 {
namespace internal {
@@ -85,7 +62,6 @@ class TransitionArray: public FixedArray {
WriteBarrierMode mode = UPDATE_WRITE_BARRIER);
inline Object** GetPrototypeTransitionsSlot();
inline bool HasPrototypeTransitions();
- inline HeapObject* UncheckedPrototypeTransitions();
// Returns the number of transitions in the array.
int number_of_transitions() {
@@ -96,30 +72,25 @@ class TransitionArray: public FixedArray {
inline int number_of_entries() { return number_of_transitions(); }
- // Allocate a new transition array with a single entry.
- static MUST_USE_RESULT MaybeObject* NewWith(
- SimpleTransitionFlag flag,
- Name* key,
- Map* target,
- Object* back_pointer);
+ // Creates a FullTransitionArray from a SimpleTransitionArray in
+ // containing_map.
+ static Handle<TransitionArray> ExtendToFullTransitionArray(
+ Handle<Map> containing_map);
- MUST_USE_RESULT MaybeObject* ExtendToFullTransitionArray();
-
- // Copy the transition array, inserting a new transition.
+ // Create a transition array, copying from the owning map if it already has
+ // one, otherwise creating a new one according to flag.
// TODO(verwaest): This should not cause an existing transition to be
// overwritten.
- MUST_USE_RESULT MaybeObject* CopyInsert(Name* name, Map* target);
-
- // Copy a single transition from the origin array.
- inline void NoIncrementalWriteBarrierCopyFrom(TransitionArray* origin,
- int origin_transition,
- int target_transition);
+ static Handle<TransitionArray> CopyInsert(Handle<Map> map,
+ Handle<Name> name,
+ Handle<Map> target,
+ SimpleTransitionFlag flag);
// Search a transition for a given property name.
inline int Search(Name* name);
// Allocates a TransitionArray.
- MUST_USE_RESULT static MaybeObject* Allocate(
+ static Handle<TransitionArray> Allocate(
Isolate* isolate, int number_of_transitions);
bool IsSimpleTransition() {
@@ -199,10 +170,24 @@ class TransitionArray: public FixedArray {
kTransitionTarget;
}
+ static Handle<TransitionArray> AllocateSimple(
+ Isolate* isolate, Handle<Map> target);
+
+ // Allocate a new transition array with a single entry.
+ static Handle<TransitionArray> NewWith(Handle<Map> map,
+ Handle<Name> name,
+ Handle<Map> target,
+ SimpleTransitionFlag flag);
+
inline void NoIncrementalWriteBarrierSet(int transition_number,
Name* key,
Map* target);
+ // Copy a single transition from the origin array.
+ inline void NoIncrementalWriteBarrierCopyFrom(TransitionArray* origin,
+ int origin_transition,
+ int target_transition);
+
DISALLOW_IMPLICIT_CONSTRUCTORS(TransitionArray);
};
diff --git a/chromium/v8/src/trig-table.h b/chromium/v8/src/trig-table.h
index 081c0389ae2..7332152a9df 100644
--- a/chromium/v8/src/trig-table.h
+++ b/chromium/v8/src/trig-table.h
@@ -1,29 +1,6 @@
// Copyright 2013 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
#ifndef V8_TRIG_TABLE_H_
#define V8_TRIG_TABLE_H_
diff --git a/chromium/v8/src/type-info.cc b/chromium/v8/src/type-info.cc
index eed54ce2bcd..45ac1a33267 100644
--- a/chromium/v8/src/type-info.cc
+++ b/chromium/v8/src/type-info.cc
@@ -1,70 +1,37 @@
// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#include "ast.h"
-#include "code-stubs.h"
-#include "compiler.h"
-#include "ic.h"
-#include "macro-assembler.h"
-#include "stub-cache.h"
-#include "type-info.h"
-
-#include "ic-inl.h"
-#include "objects-inl.h"
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
-namespace v8 {
-namespace internal {
+#include "src/v8.h"
+#include "src/ast.h"
+#include "src/code-stubs.h"
+#include "src/compiler.h"
+#include "src/ic.h"
+#include "src/macro-assembler.h"
+#include "src/stub-cache.h"
+#include "src/type-info.h"
-TypeInfo TypeInfo::FromValue(Handle<Object> value) {
- if (value->IsSmi()) {
- return TypeInfo::Smi();
- } else if (value->IsHeapNumber()) {
- return TypeInfo::IsInt32Double(HeapNumber::cast(*value)->value())
- ? TypeInfo::Integer32()
- : TypeInfo::Double();
- } else if (value->IsString()) {
- return TypeInfo::String();
- }
- return TypeInfo::Unknown();
-}
+#include "src/ic-inl.h"
+#include "src/objects-inl.h"
+
+namespace v8 {
+namespace internal {
TypeFeedbackOracle::TypeFeedbackOracle(Handle<Code> code,
+ Handle<FixedArray> feedback_vector,
Handle<Context> native_context,
- Isolate* isolate,
Zone* zone)
: native_context_(native_context),
- isolate_(isolate),
zone_(zone) {
BuildDictionary(code);
ASSERT(dictionary_->IsDictionary());
+ // We make a copy of the feedback vector because a GC could clear
+ // the type feedback info contained therein.
+ // TODO(mvstanton): revisit the decision to copy when we weakly
+ // traverse the feedback vector at GC time.
+ feedback_vector_ = isolate()->factory()->CopyFixedArray(feedback_vector);
}
@@ -79,23 +46,23 @@ Handle<Object> TypeFeedbackOracle::GetInfo(TypeFeedbackId ast_id) {
Object* value = dictionary_->ValueAt(entry);
if (value->IsCell()) {
Cell* cell = Cell::cast(value);
- return Handle<Object>(cell->value(), isolate_);
+ return Handle<Object>(cell->value(), isolate());
} else {
- return Handle<Object>(value, isolate_);
+ return Handle<Object>(value, isolate());
}
}
- return Handle<Object>::cast(isolate_->factory()->undefined_value());
+ return Handle<Object>::cast(isolate()->factory()->undefined_value());
}
-Handle<Cell> TypeFeedbackOracle::GetInfoCell(
- TypeFeedbackId ast_id) {
- int entry = dictionary_->FindEntry(IdToKey(ast_id));
- if (entry != UnseededNumberDictionary::kNotFound) {
- Cell* cell = Cell::cast(dictionary_->ValueAt(entry));
- return Handle<Cell>(cell, isolate_);
+Handle<Object> TypeFeedbackOracle::GetInfo(int slot) {
+ ASSERT(slot >= 0 && slot < feedback_vector_->length());
+ Object* obj = feedback_vector_->get(slot);
+ if (!obj->IsJSFunction() ||
+ !CanRetainOtherContext(JSFunction::cast(obj), *native_context_)) {
+ return Handle<Object>(obj, isolate());
}
- return Handle<Cell>::null();
+ return Handle<Object>::cast(isolate()->factory()->undefined_value());
}
@@ -109,16 +76,6 @@ bool TypeFeedbackOracle::LoadIsUninitialized(TypeFeedbackId id) {
}
-bool TypeFeedbackOracle::LoadIsPreMonomorphic(TypeFeedbackId id) {
- Handle<Object> maybe_code = GetInfo(id);
- if (maybe_code->IsCode()) {
- Handle<Code> code = Handle<Code>::cast(maybe_code);
- return code->is_inline_cache_stub() && code->ic_state() == PREMONOMORPHIC;
- }
- return false;
-}
-
-
bool TypeFeedbackOracle::StoreIsUninitialized(TypeFeedbackId ast_id) {
Handle<Object> maybe_code = GetInfo(ast_id);
if (!maybe_code->IsCode()) return false;
@@ -127,16 +84,6 @@ bool TypeFeedbackOracle::StoreIsUninitialized(TypeFeedbackId ast_id) {
}
-bool TypeFeedbackOracle::StoreIsPreMonomorphic(TypeFeedbackId ast_id) {
- Handle<Object> maybe_code = GetInfo(ast_id);
- if (maybe_code->IsCode()) {
- Handle<Code> code = Handle<Code>::cast(maybe_code);
- return code->ic_state() == PREMONOMORPHIC;
- }
- return false;
-}
-
-
bool TypeFeedbackOracle::StoreIsKeyedPolymorphic(TypeFeedbackId ast_id) {
Handle<Object> maybe_code = GetInfo(ast_id);
if (maybe_code->IsCode()) {
@@ -148,31 +95,25 @@ bool TypeFeedbackOracle::StoreIsKeyedPolymorphic(TypeFeedbackId ast_id) {
}
-bool TypeFeedbackOracle::CallIsMonomorphic(TypeFeedbackId id) {
- Handle<Object> value = GetInfo(id);
- return value->IsAllocationSite() || value->IsJSFunction() || value->IsSmi() ||
- (value->IsCode() && Handle<Code>::cast(value)->ic_state() == MONOMORPHIC);
+bool TypeFeedbackOracle::CallIsMonomorphic(int slot) {
+ Handle<Object> value = GetInfo(slot);
+ return value->IsAllocationSite() || value->IsJSFunction();
}
-bool TypeFeedbackOracle::KeyedArrayCallIsHoley(TypeFeedbackId id) {
- Handle<Object> value = GetInfo(id);
- Handle<Code> code = Handle<Code>::cast(value);
- return KeyedArrayCallStub::IsHoley(code);
+bool TypeFeedbackOracle::CallNewIsMonomorphic(int slot) {
+ Handle<Object> info = GetInfo(slot);
+ return FLAG_pretenuring_call_new
+ ? info->IsJSFunction()
+ : info->IsAllocationSite() || info->IsJSFunction();
}
-bool TypeFeedbackOracle::CallNewIsMonomorphic(TypeFeedbackId id) {
- Handle<Object> info = GetInfo(id);
- return info->IsAllocationSite() || info->IsJSFunction();
-}
-
-
-byte TypeFeedbackOracle::ForInType(TypeFeedbackId id) {
- Handle<Object> value = GetInfo(id);
- return value->IsSmi() &&
- Smi::cast(*value)->value() == TypeFeedbackCells::kForInFastCaseMarker
- ? ForInStatement::FAST_FOR_IN : ForInStatement::SLOW_FOR_IN;
+byte TypeFeedbackOracle::ForInType(int feedback_vector_slot) {
+ Handle<Object> value = GetInfo(feedback_vector_slot);
+ return value.is_identical_to(
+ TypeFeedbackInfo::UninitializedSentinel(isolate()))
+ ? ForInStatement::FAST_FOR_IN : ForInStatement::SLOW_FOR_IN;
}
@@ -189,63 +130,48 @@ KeyedAccessStoreMode TypeFeedbackOracle::GetStoreMode(
}
-void TypeFeedbackOracle::CallReceiverTypes(TypeFeedbackId id,
- Handle<String> name,
- int arity,
- CallKind call_kind,
- SmallMapList* types) {
- // Note: Currently we do not take string extra ic data into account
- // here.
- ContextualMode contextual_mode = call_kind == CALL_AS_FUNCTION
- ? CONTEXTUAL
- : NOT_CONTEXTUAL;
- ExtraICState extra_ic_state =
- CallIC::Contextual::encode(contextual_mode);
-
- Code::Flags flags = Code::ComputeMonomorphicFlags(
- Code::CALL_IC, extra_ic_state, OWN_MAP, Code::NORMAL, arity);
- CollectReceiverTypes(id, name, flags, types);
-}
-
+Handle<JSFunction> TypeFeedbackOracle::GetCallTarget(int slot) {
+ Handle<Object> info = GetInfo(slot);
+ if (info->IsAllocationSite()) {
+ return Handle<JSFunction>(isolate()->native_context()->array_function());
+ }
-CheckType TypeFeedbackOracle::GetCallCheckType(TypeFeedbackId id) {
- Handle<Object> value = GetInfo(id);
- if (!value->IsSmi()) return RECEIVER_MAP_CHECK;
- CheckType check = static_cast<CheckType>(Smi::cast(*value)->value());
- ASSERT(check != RECEIVER_MAP_CHECK);
- return check;
+ return Handle<JSFunction>::cast(info);
}
-Handle<JSFunction> TypeFeedbackOracle::GetCallTarget(TypeFeedbackId id) {
- Handle<Object> info = GetInfo(id);
- if (info->IsAllocationSite()) {
- return Handle<JSFunction>(isolate_->global_context()->array_function());
- } else {
+Handle<JSFunction> TypeFeedbackOracle::GetCallNewTarget(int slot) {
+ Handle<Object> info = GetInfo(slot);
+ if (FLAG_pretenuring_call_new || info->IsJSFunction()) {
return Handle<JSFunction>::cast(info);
}
+
+ ASSERT(info->IsAllocationSite());
+ return Handle<JSFunction>(isolate()->native_context()->array_function());
}
-Handle<JSFunction> TypeFeedbackOracle::GetCallNewTarget(TypeFeedbackId id) {
- Handle<Object> info = GetInfo(id);
+Handle<AllocationSite> TypeFeedbackOracle::GetCallAllocationSite(int slot) {
+ Handle<Object> info = GetInfo(slot);
if (info->IsAllocationSite()) {
- return Handle<JSFunction>(isolate_->global_context()->array_function());
- } else {
- return Handle<JSFunction>::cast(info);
+ return Handle<AllocationSite>::cast(info);
}
+ return Handle<AllocationSite>::null();
}
-Handle<Cell> TypeFeedbackOracle::GetCallNewAllocationInfoCell(
- TypeFeedbackId id) {
- return GetInfoCell(id);
+Handle<AllocationSite> TypeFeedbackOracle::GetCallNewAllocationSite(int slot) {
+ Handle<Object> info = GetInfo(slot);
+ if (FLAG_pretenuring_call_new || info->IsAllocationSite()) {
+ return Handle<AllocationSite>::cast(info);
+ }
+ return Handle<AllocationSite>::null();
}
bool TypeFeedbackOracle::LoadIsBuiltin(
TypeFeedbackId id, Builtins::Name builtin) {
- return *GetInfo(id) == isolate_->builtins()->builtin(builtin);
+ return *GetInfo(id) == isolate()->builtins()->builtin(builtin);
}
@@ -260,13 +186,13 @@ bool TypeFeedbackOracle::LoadIsStub(TypeFeedbackId id, ICStub* stub) {
void TypeFeedbackOracle::CompareType(TypeFeedbackId id,
- Handle<Type>* left_type,
- Handle<Type>* right_type,
- Handle<Type>* combined_type) {
+ Type** left_type,
+ Type** right_type,
+ Type** combined_type) {
Handle<Object> info = GetInfo(id);
if (!info->IsCode()) {
// For some comparisons we don't have ICs, e.g. LiteralCompareTypeof.
- *left_type = *right_type = *combined_type = handle(Type::None(), isolate_);
+ *left_type = *right_type = *combined_type = Type::None(zone());
return;
}
Handle<Code> code = Handle<Code>::cast(info);
@@ -274,8 +200,8 @@ void TypeFeedbackOracle::CompareType(TypeFeedbackId id,
Handle<Map> map;
Map* raw_map = code->FindFirstMap();
if (raw_map != NULL) {
- map = Map::CurrentMapForDeprecated(handle(raw_map));
- if (!map.is_null() && CanRetainOtherContext(*map, *native_context_)) {
+ if (Map::CurrentMapForDeprecated(handle(raw_map)).ToHandle(&map) &&
+ CanRetainOtherContext(*map, *native_context_)) {
map = Handle<Map>::null();
}
}
@@ -283,20 +209,21 @@ void TypeFeedbackOracle::CompareType(TypeFeedbackId id,
if (code->is_compare_ic_stub()) {
int stub_minor_key = code->stub_info();
CompareIC::StubInfoToType(
- stub_minor_key, left_type, right_type, combined_type, map, isolate());
+ stub_minor_key, left_type, right_type, combined_type, map, zone());
} else if (code->is_compare_nil_ic_stub()) {
- CompareNilICStub stub(code->extended_extra_ic_state());
- *combined_type = stub.GetType(isolate_, map);
- *left_type = *right_type = stub.GetInputType(isolate_, map);
+ CompareNilICStub stub(isolate(), code->extra_ic_state());
+ *combined_type = stub.GetType(zone(), map);
+ *left_type = *right_type = stub.GetInputType(zone(), map);
}
}
void TypeFeedbackOracle::BinaryType(TypeFeedbackId id,
- Handle<Type>* left,
- Handle<Type>* right,
- Handle<Type>* result,
+ Type** left,
+ Type** right,
+ Type** result,
Maybe<int>* fixed_right_arg,
+ Handle<AllocationSite>* allocation_site,
Token::Value op) {
Handle<Object> object = GetInfo(id);
if (!object->IsCode()) {
@@ -304,41 +231,37 @@ void TypeFeedbackOracle::BinaryType(TypeFeedbackId id,
// operations covered by the BinaryOpIC we should always have them.
ASSERT(op < BinaryOpIC::State::FIRST_TOKEN ||
op > BinaryOpIC::State::LAST_TOKEN);
- *left = *right = *result = handle(Type::None(), isolate_);
+ *left = *right = *result = Type::None(zone());
*fixed_right_arg = Maybe<int>();
+ *allocation_site = Handle<AllocationSite>::null();
return;
}
Handle<Code> code = Handle<Code>::cast(object);
ASSERT_EQ(Code::BINARY_OP_IC, code->kind());
- BinaryOpIC::State state(code->extended_extra_ic_state());
+ BinaryOpIC::State state(isolate(), code->extra_ic_state());
ASSERT_EQ(op, state.op());
- *left = state.GetLeftType(isolate());
- *right = state.GetRightType(isolate());
- *result = state.GetResultType(isolate());
+ *left = state.GetLeftType(zone());
+ *right = state.GetRightType(zone());
+ *result = state.GetResultType(zone());
*fixed_right_arg = state.fixed_right_arg();
-}
-
-Handle<Type> TypeFeedbackOracle::ClauseType(TypeFeedbackId id) {
- Handle<Object> info = GetInfo(id);
- Handle<Type> result(Type::None(), isolate_);
- if (info->IsCode() && Handle<Code>::cast(info)->is_compare_ic_stub()) {
- Handle<Code> code = Handle<Code>::cast(info);
- CompareIC::State state = ICCompareStub::CompareState(code->stub_info());
- result = CompareIC::StateToType(isolate_, state);
+ AllocationSite* first_allocation_site = code->FindFirstAllocationSite();
+ if (first_allocation_site != NULL) {
+ *allocation_site = handle(first_allocation_site);
+ } else {
+ *allocation_site = Handle<AllocationSite>::null();
}
- return result;
}
-Handle<Type> TypeFeedbackOracle::CountType(TypeFeedbackId id) {
+Type* TypeFeedbackOracle::CountType(TypeFeedbackId id) {
Handle<Object> object = GetInfo(id);
- if (!object->IsCode()) return handle(Type::None(), isolate_);
+ if (!object->IsCode()) return Type::None(zone());
Handle<Code> code = Handle<Code>::cast(object);
ASSERT_EQ(Code::BINARY_OP_IC, code->kind());
- BinaryOpIC::State state(code->extended_extra_ic_state());
- return state.GetLeftType(isolate());
+ BinaryOpIC::State state(isolate(), code->extra_ic_state());
+ return state.GetLeftType(zone());
}
@@ -346,12 +269,10 @@ void TypeFeedbackOracle::PropertyReceiverTypes(
TypeFeedbackId id, Handle<String> name,
SmallMapList* receiver_types, bool* is_prototype) {
receiver_types->Clear();
- FunctionPrototypeStub proto_stub(Code::LOAD_IC);
+ FunctionPrototypeStub proto_stub(isolate(), Code::LOAD_IC);
*is_prototype = LoadIsStub(id, &proto_stub);
if (!*is_prototype) {
- Code::Flags flags = Code::ComputeFlags(
- Code::HANDLER, MONOMORPHIC, kNoExtraICState,
- Code::NORMAL, Code::LOAD_IC);
+ Code::Flags flags = Code::ComputeHandlerFlags(Code::LOAD_IC);
CollectReceiverTypes(id, name, flags, receiver_types);
}
}
@@ -372,9 +293,7 @@ void TypeFeedbackOracle::KeyedPropertyReceiverTypes(
void TypeFeedbackOracle::AssignmentReceiverTypes(
TypeFeedbackId id, Handle<String> name, SmallMapList* receiver_types) {
receiver_types->Clear();
- Code::Flags flags = Code::ComputeFlags(
- Code::HANDLER, MONOMORPHIC, kNoExtraICState,
- Code::NORMAL, Code::STORE_IC);
+ Code::Flags flags = Code::ComputeHandlerFlags(Code::STORE_IC);
CollectReceiverTypes(id, name, flags, receiver_types);
}
@@ -408,7 +327,7 @@ void TypeFeedbackOracle::CollectReceiverTypes(TypeFeedbackId ast_id,
if (FLAG_collect_megamorphic_maps_from_stub_cache &&
code->ic_state() == MEGAMORPHIC) {
types->Reserve(4, zone());
- isolate_->stub_cache()->CollectMatchingMaps(
+ isolate()->stub_cache()->CollectMatchingMaps(
types, name, flags, native_context_, zone());
} else {
CollectReceiverTypes(ast_id, types);
@@ -487,11 +406,10 @@ byte TypeFeedbackOracle::ToBooleanTypes(TypeFeedbackId id) {
void TypeFeedbackOracle::BuildDictionary(Handle<Code> code) {
DisallowHeapAllocation no_allocation;
ZoneList<RelocInfo> infos(16, zone());
- HandleScope scope(isolate_);
+ HandleScope scope(isolate());
GetRelocInfos(code, &infos);
CreateDictionary(code, &infos);
ProcessRelocInfos(&infos);
- ProcessTypeFeedbackCells(code);
// Allocate handle in the parent scope.
dictionary_ = scope.CloseAndEscape(dictionary_);
}
@@ -509,24 +427,20 @@ void TypeFeedbackOracle::GetRelocInfos(Handle<Code> code,
void TypeFeedbackOracle::CreateDictionary(Handle<Code> code,
ZoneList<RelocInfo>* infos) {
AllowHeapAllocation allocation_allowed;
- int cell_count = code->type_feedback_info()->IsTypeFeedbackInfo()
- ? TypeFeedbackInfo::cast(code->type_feedback_info())->
- type_feedback_cells()->CellCount()
- : 0;
- int length = infos->length() + cell_count;
- byte* old_start = code->instruction_start();
- dictionary_ = isolate()->factory()->NewUnseededNumberDictionary(length);
- byte* new_start = code->instruction_start();
- RelocateRelocInfos(infos, old_start, new_start);
+ Code* old_code = *code;
+ dictionary_ = UnseededNumberDictionary::New(isolate(), infos->length());
+ RelocateRelocInfos(infos, old_code, *code);
}
void TypeFeedbackOracle::RelocateRelocInfos(ZoneList<RelocInfo>* infos,
- byte* old_start,
- byte* new_start) {
+ Code* old_code,
+ Code* new_code) {
for (int i = 0; i < infos->length(); i++) {
RelocInfo* info = &(*infos)[i];
- info->set_pc(new_start + (info->pc() - old_start));
+ info->set_host(new_code);
+ info->set_pc(new_code->instruction_start() +
+ (info->pc() - old_code->instruction_start()));
}
}
@@ -539,15 +453,8 @@ void TypeFeedbackOracle::ProcessRelocInfos(ZoneList<RelocInfo>* infos) {
TypeFeedbackId(static_cast<unsigned>((*infos)[i].data()));
Code* target = Code::GetCodeFromTargetAddress(target_address);
switch (target->kind()) {
- case Code::CALL_IC:
- if (target->ic_state() == MONOMORPHIC &&
- target->check_type() != RECEIVER_MAP_CHECK) {
- SetInfo(ast_id, Smi::FromInt(target->check_type()));
- break;
- }
case Code::LOAD_IC:
case Code::STORE_IC:
- case Code::KEYED_CALL_IC:
case Code::KEYED_LOAD_IC:
case Code::KEYED_STORE_IC:
case Code::BINARY_OP_IC:
@@ -564,47 +471,14 @@ void TypeFeedbackOracle::ProcessRelocInfos(ZoneList<RelocInfo>* infos) {
}
-void TypeFeedbackOracle::ProcessTypeFeedbackCells(Handle<Code> code) {
- Object* raw_info = code->type_feedback_info();
- if (!raw_info->IsTypeFeedbackInfo()) return;
- Handle<TypeFeedbackCells> cache(
- TypeFeedbackInfo::cast(raw_info)->type_feedback_cells());
- for (int i = 0; i < cache->CellCount(); i++) {
- TypeFeedbackId ast_id = cache->AstId(i);
- Cell* cell = cache->GetCell(i);
- Object* value = cell->value();
- if (value->IsSmi() ||
- value->IsAllocationSite() ||
- (value->IsJSFunction() &&
- !CanRetainOtherContext(JSFunction::cast(value),
- *native_context_))) {
- SetInfo(ast_id, cell);
- }
- }
-}
-
-
void TypeFeedbackOracle::SetInfo(TypeFeedbackId ast_id, Object* target) {
ASSERT(dictionary_->FindEntry(IdToKey(ast_id)) ==
UnseededNumberDictionary::kNotFound);
- MaybeObject* maybe_result = dictionary_->AtNumberPut(IdToKey(ast_id), target);
- USE(maybe_result);
-#ifdef DEBUG
- Object* result = NULL;
// Dictionary has been allocated with sufficient size for all elements.
- ASSERT(maybe_result->ToObject(&result));
- ASSERT(*dictionary_ == result);
-#endif
-}
-
-
-Representation Representation::FromType(TypeInfo info) {
- if (info.IsUninitialized()) return Representation::None();
- if (info.IsSmi()) return Representation::Smi();
- if (info.IsInteger32()) return Representation::Integer32();
- if (info.IsDouble()) return Representation::Double();
- if (info.IsNumber()) return Representation::Double();
- return Representation::Tagged();
+ DisallowHeapAllocation no_need_to_resize_dictionary;
+ HandleScope scope(isolate());
+ USE(UnseededNumberDictionary::AtNumberPut(
+ dictionary_, IdToKey(ast_id), handle(target, isolate())));
}
diff --git a/chromium/v8/src/type-info.h b/chromium/v8/src/type-info.h
index 0ff99e994d9..706921adb08 100644
--- a/chromium/v8/src/type-info.h
+++ b/chromium/v8/src/type-info.h
@@ -1,224 +1,19 @@
// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
#ifndef V8_TYPE_INFO_H_
#define V8_TYPE_INFO_H_
-#include "allocation.h"
-#include "globals.h"
-#include "types.h"
-#include "zone-inl.h"
+#include "src/allocation.h"
+#include "src/globals.h"
+#include "src/types.h"
+#include "src/zone-inl.h"
namespace v8 {
namespace internal {
-const int kMaxKeyedPolymorphism = 4;
-
-// Unknown
-// | \____________
-// | |
-// Primitive Non-primitive
-// | \_______ |
-// | | |
-// Number String |
-// / \ | |
-// Double Integer32 | /
-// | | / /
-// | Smi / /
-// | | / __/
-// Uninitialized.
-
-class TypeInfo {
- public:
- TypeInfo() : type_(kUninitialized) { }
-
- static TypeInfo Unknown() { return TypeInfo(kUnknown); }
- // We know it's a primitive type.
- static TypeInfo Primitive() { return TypeInfo(kPrimitive); }
- // We know it's a number of some sort.
- static TypeInfo Number() { return TypeInfo(kNumber); }
- // We know it's a signed 32 bit integer.
- static TypeInfo Integer32() { return TypeInfo(kInteger32); }
- // We know it's a Smi.
- static TypeInfo Smi() { return TypeInfo(kSmi); }
- // We know it's a heap number.
- static TypeInfo Double() { return TypeInfo(kDouble); }
- // We know it's a string.
- static TypeInfo String() { return TypeInfo(kString); }
- // We know it's an internalized string.
- static TypeInfo InternalizedString() { return TypeInfo(kInternalizedString); }
- // We know it's a non-primitive (object) type.
- static TypeInfo NonPrimitive() { return TypeInfo(kNonPrimitive); }
- // We haven't started collecting info yet.
- static TypeInfo Uninitialized() { return TypeInfo(kUninitialized); }
-
- int ToInt() {
- return type_;
- }
-
- static TypeInfo FromInt(int bit_representation) {
- Type t = static_cast<Type>(bit_representation);
- ASSERT(t == kUnknown ||
- t == kPrimitive ||
- t == kNumber ||
- t == kInteger32 ||
- t == kSmi ||
- t == kDouble ||
- t == kString ||
- t == kNonPrimitive);
- return TypeInfo(t);
- }
-
- // Return the weakest (least precise) common type.
- static TypeInfo Combine(TypeInfo a, TypeInfo b) {
- return TypeInfo(static_cast<Type>(a.type_ & b.type_));
- }
-
-
- // Integer32 is an integer that can be represented as a signed
- // 32-bit integer. It has to be
- // in the range [-2^31, 2^31 - 1]. We also have to check for negative 0
- // as it is not an Integer32.
- static inline bool IsInt32Double(double value) {
- const DoubleRepresentation minus_zero(-0.0);
- DoubleRepresentation rep(value);
- if (rep.bits == minus_zero.bits) return false;
- if (value >= kMinInt && value <= kMaxInt &&
- value == static_cast<int32_t>(value)) {
- return true;
- }
- return false;
- }
-
- static TypeInfo FromValue(Handle<Object> value);
-
- bool Equals(const TypeInfo& other) {
- return type_ == other.type_;
- }
-
- inline bool IsUnknown() {
- ASSERT(type_ != kUninitialized);
- return type_ == kUnknown;
- }
-
- inline bool IsPrimitive() {
- ASSERT(type_ != kUninitialized);
- return ((type_ & kPrimitive) == kPrimitive);
- }
-
- inline bool IsNumber() {
- ASSERT(type_ != kUninitialized);
- return ((type_ & kNumber) == kNumber);
- }
-
- inline bool IsSmi() {
- ASSERT(type_ != kUninitialized);
- return ((type_ & kSmi) == kSmi);
- }
-
- inline bool IsInternalizedString() {
- ASSERT(type_ != kUninitialized);
- return ((type_ & kInternalizedString) == kInternalizedString);
- }
-
- inline bool IsNonInternalizedString() {
- ASSERT(type_ != kUninitialized);
- return ((type_ & kInternalizedString) == kString);
- }
-
- inline bool IsInteger32() {
- ASSERT(type_ != kUninitialized);
- return ((type_ & kInteger32) == kInteger32);
- }
-
- inline bool IsDouble() {
- ASSERT(type_ != kUninitialized);
- return ((type_ & kDouble) == kDouble);
- }
-
- inline bool IsString() {
- ASSERT(type_ != kUninitialized);
- return ((type_ & kString) == kString);
- }
-
- inline bool IsNonPrimitive() {
- ASSERT(type_ != kUninitialized);
- return ((type_ & kNonPrimitive) == kNonPrimitive);
- }
-
- inline bool IsUninitialized() {
- return type_ == kUninitialized;
- }
-
- const char* ToString() {
- switch (type_) {
- case kUnknown: return "Unknown";
- case kPrimitive: return "Primitive";
- case kNumber: return "Number";
- case kInteger32: return "Integer32";
- case kSmi: return "Smi";
- case kInternalizedString: return "InternalizedString";
- case kDouble: return "Double";
- case kString: return "String";
- case kNonPrimitive: return "Object";
- case kUninitialized: return "Uninitialized";
- }
- UNREACHABLE();
- return "Unreachable code";
- }
-
- private:
- enum Type {
- kUnknown = 0, // 0000000
- kPrimitive = 0x10, // 0010000
- kNumber = 0x11, // 0010001
- kInteger32 = 0x13, // 0010011
- kSmi = 0x17, // 0010111
- kDouble = 0x19, // 0011001
- kString = 0x30, // 0110000
- kInternalizedString = 0x32, // 0110010
- kNonPrimitive = 0x40, // 1000000
- kUninitialized = 0x7f // 1111111
- };
-
- explicit inline TypeInfo(Type t) : type_(t) { }
-
- Type type_;
-};
-
-
-enum StringStubFeedback {
- DEFAULT_STRING_STUB = 0,
- STRING_INDEX_OUT_OF_BOUNDS = 1
-};
-
-
// Forward declarations.
-class CompilationInfo;
class ICStub;
class SmallMapList;
@@ -226,32 +21,26 @@ class SmallMapList;
class TypeFeedbackOracle: public ZoneObject {
public:
TypeFeedbackOracle(Handle<Code> code,
+ Handle<FixedArray> feedback_vector,
Handle<Context> native_context,
- Isolate* isolate,
Zone* zone);
bool LoadIsUninitialized(TypeFeedbackId id);
- bool LoadIsPreMonomorphic(TypeFeedbackId id);
bool StoreIsUninitialized(TypeFeedbackId id);
- bool StoreIsPreMonomorphic(TypeFeedbackId id);
bool StoreIsKeyedPolymorphic(TypeFeedbackId id);
+ bool CallIsMonomorphic(int slot);
bool CallIsMonomorphic(TypeFeedbackId aid);
bool KeyedArrayCallIsHoley(TypeFeedbackId id);
- bool CallNewIsMonomorphic(TypeFeedbackId id);
+ bool CallNewIsMonomorphic(int slot);
// TODO(1571) We can't use ForInStatement::ForInType as the return value due
// to various cycles in our headers.
// TODO(rossberg): once all oracle access is removed from ast.cc, it should
// be possible.
- byte ForInType(TypeFeedbackId id);
+ byte ForInType(int feedback_vector_slot);
KeyedAccessStoreMode GetStoreMode(TypeFeedbackId id);
- void CallReceiverTypes(TypeFeedbackId id,
- Handle<String> name,
- int arity,
- CallKind call_kind,
- SmallMapList* types);
void PropertyReceiverTypes(TypeFeedbackId id,
Handle<String> name,
SmallMapList* receiver_types,
@@ -275,10 +64,10 @@ class TypeFeedbackOracle: public ZoneObject {
static bool CanRetainOtherContext(JSFunction* function,
Context* native_context);
- CheckType GetCallCheckType(TypeFeedbackId id);
- Handle<JSFunction> GetCallTarget(TypeFeedbackId id);
- Handle<JSFunction> GetCallNewTarget(TypeFeedbackId id);
- Handle<Cell> GetCallNewAllocationInfoCell(TypeFeedbackId id);
+ Handle<JSFunction> GetCallTarget(int slot);
+ Handle<AllocationSite> GetCallAllocationSite(int slot);
+ Handle<JSFunction> GetCallNewTarget(int slot);
+ Handle<AllocationSite> GetCallNewAllocationSite(int slot);
bool LoadIsBuiltin(TypeFeedbackId id, Builtins::Name builtin_id);
bool LoadIsStub(TypeFeedbackId id, ICStub* stub);
@@ -290,23 +79,22 @@ class TypeFeedbackOracle: public ZoneObject {
// Get type information for arithmetic operations and compares.
void BinaryType(TypeFeedbackId id,
- Handle<Type>* left,
- Handle<Type>* right,
- Handle<Type>* result,
+ Type** left,
+ Type** right,
+ Type** result,
Maybe<int>* fixed_right_arg,
+ Handle<AllocationSite>* allocation_site,
Token::Value operation);
void CompareType(TypeFeedbackId id,
- Handle<Type>* left,
- Handle<Type>* right,
- Handle<Type>* combined);
-
- Handle<Type> CountType(TypeFeedbackId id);
+ Type** left,
+ Type** right,
+ Type** combined);
- Handle<Type> ClauseType(TypeFeedbackId id);
+ Type* CountType(TypeFeedbackId id);
Zone* zone() const { return zone_; }
- Isolate* isolate() const { return isolate_; }
+ Isolate* isolate() const { return zone_->isolate(); }
private:
void CollectReceiverTypes(TypeFeedbackId id,
@@ -320,23 +108,23 @@ class TypeFeedbackOracle: public ZoneObject {
void GetRelocInfos(Handle<Code> code, ZoneList<RelocInfo>* infos);
void CreateDictionary(Handle<Code> code, ZoneList<RelocInfo>* infos);
void RelocateRelocInfos(ZoneList<RelocInfo>* infos,
- byte* old_start,
- byte* new_start);
+ Code* old_code,
+ Code* new_code);
void ProcessRelocInfos(ZoneList<RelocInfo>* infos);
- void ProcessTypeFeedbackCells(Handle<Code> code);
// Returns an element from the backing store. Returns undefined if
// there is no information.
Handle<Object> GetInfo(TypeFeedbackId id);
- // Return the cell that contains type feedback.
- Handle<Cell> GetInfoCell(TypeFeedbackId id);
+ // Returns an element from the type feedback vector. Returns undefined
+ // if there is no information.
+ Handle<Object> GetInfo(int slot);
private:
Handle<Context> native_context_;
- Isolate* isolate_;
Zone* zone_;
Handle<UnseededNumberDictionary> dictionary_;
+ Handle<FixedArray> feedback_vector_;
DISALLOW_COPY_AND_ASSIGN(TypeFeedbackOracle);
};
diff --git a/chromium/v8/src/typedarray.js b/chromium/v8/src/typedarray.js
index 0a06ebbdd42..d2f5ae86923 100644
--- a/chromium/v8/src/typedarray.js
+++ b/chromium/v8/src/typedarray.js
@@ -1,29 +1,6 @@
// Copyright 2013 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
"use strict";
@@ -48,130 +25,167 @@ FUNCTION(9, Uint8ClampedArray, 1)
endmacro
macro TYPED_ARRAY_CONSTRUCTOR(ARRAY_ID, NAME, ELEMENT_SIZE)
- function NAMEConstructByArrayBuffer(obj, buffer, byteOffset, length) {
- var bufferByteLength = %ArrayBufferGetByteLength(buffer);
- var offset;
- if (IS_UNDEFINED(byteOffset)) {
- offset = 0;
- } else {
- offset = ToPositiveInteger(byteOffset, "invalid_typed_array_length");
+function NAMEConstructByArrayBuffer(obj, buffer, byteOffset, length) {
+ if (!IS_UNDEFINED(byteOffset)) {
+ byteOffset =
+ ToPositiveInteger(byteOffset, "invalid_typed_array_length");
+ }
+ if (!IS_UNDEFINED(length)) {
+ length = ToPositiveInteger(length, "invalid_typed_array_length");
+ }
- if (offset % ELEMENT_SIZE !== 0) {
- throw MakeRangeError("invalid_typed_array_alignment",
- "start offset", "NAME", ELEMENT_SIZE);
- }
- if (offset > bufferByteLength) {
- throw MakeRangeError("invalid_typed_array_offset");
- }
- }
+ var bufferByteLength = %_ArrayBufferGetByteLength(buffer);
+ var offset;
+ if (IS_UNDEFINED(byteOffset)) {
+ offset = 0;
+ } else {
+ offset = byteOffset;
- var newByteLength;
- var newLength;
- if (IS_UNDEFINED(length)) {
- if (bufferByteLength % ELEMENT_SIZE !== 0) {
- throw MakeRangeError("invalid_typed_array_alignment",
- "byte length", "NAME", ELEMENT_SIZE);
- }
- newByteLength = bufferByteLength - offset;
- newLength = newByteLength / ELEMENT_SIZE;
- } else {
- var newLength = ToPositiveInteger(length, "invalid_typed_array_length");
- newByteLength = newLength * ELEMENT_SIZE;
+ if (offset % ELEMENT_SIZE !== 0) {
+ throw MakeRangeError("invalid_typed_array_alignment",
+ ["start offset", "NAME", ELEMENT_SIZE]);
}
- if ((offset + newByteLength > bufferByteLength)
- || (newLength > %MaxSmi())) {
- throw MakeRangeError("invalid_typed_array_length");
+ if (offset > bufferByteLength) {
+ throw MakeRangeError("invalid_typed_array_offset");
}
- %TypedArrayInitialize(obj, ARRAY_ID, buffer, offset, newByteLength);
}
- function NAMEConstructByLength(obj, length) {
- var l = IS_UNDEFINED(length) ?
- 0 : ToPositiveInteger(length, "invalid_typed_array_length");
- if (l > %MaxSmi()) {
- throw MakeRangeError("invalid_typed_array_length");
+ var newByteLength;
+ var newLength;
+ if (IS_UNDEFINED(length)) {
+ if (bufferByteLength % ELEMENT_SIZE !== 0) {
+ throw MakeRangeError("invalid_typed_array_alignment",
+ ["byte length", "NAME", ELEMENT_SIZE]);
}
- var byteLength = l * ELEMENT_SIZE;
+ newByteLength = bufferByteLength - offset;
+ newLength = newByteLength / ELEMENT_SIZE;
+ } else {
+ var newLength = length;
+ newByteLength = newLength * ELEMENT_SIZE;
+ }
+ if ((offset + newByteLength > bufferByteLength)
+ || (newLength > %_MaxSmi())) {
+ throw MakeRangeError("invalid_typed_array_length");
+ }
+ %_TypedArrayInitialize(obj, ARRAY_ID, buffer, offset, newByteLength);
+}
+
+function NAMEConstructByLength(obj, length) {
+ var l = IS_UNDEFINED(length) ?
+ 0 : ToPositiveInteger(length, "invalid_typed_array_length");
+ if (l > %_MaxSmi()) {
+ throw MakeRangeError("invalid_typed_array_length");
+ }
+ var byteLength = l * ELEMENT_SIZE;
+ if (byteLength > %_TypedArrayMaxSizeInHeap()) {
var buffer = new $ArrayBuffer(byteLength);
- %TypedArrayInitialize(obj, ARRAY_ID, buffer, 0, byteLength);
+ %_TypedArrayInitialize(obj, ARRAY_ID, buffer, 0, byteLength);
+ } else {
+ %_TypedArrayInitialize(obj, ARRAY_ID, null, 0, byteLength);
}
+}
- function NAMEConstructByArrayLike(obj, arrayLike) {
- var length = arrayLike.length;
- var l = ToPositiveInteger(length, "invalid_typed_array_length");
- if (l > %MaxSmi()) {
- throw MakeRangeError("invalid_typed_array_length");
- }
- if(!%TypedArrayInitializeFromArrayLike(obj, ARRAY_ID, arrayLike, l)) {
- for (var i = 0; i < l; i++) {
- // It is crucial that we let any execptions from arrayLike[i]
- // propagate outside the function.
- obj[i] = arrayLike[i];
- }
+function NAMEConstructByArrayLike(obj, arrayLike) {
+ var length = arrayLike.length;
+ var l = ToPositiveInteger(length, "invalid_typed_array_length");
+
+ if (l > %_MaxSmi()) {
+ throw MakeRangeError("invalid_typed_array_length");
+ }
+ if(!%TypedArrayInitializeFromArrayLike(obj, ARRAY_ID, arrayLike, l)) {
+ for (var i = 0; i < l; i++) {
+ // It is crucial that we let any execptions from arrayLike[i]
+ // propagate outside the function.
+ obj[i] = arrayLike[i];
}
}
+}
- function NAMEConstructor(arg1, arg2, arg3) {
-
- if (%_IsConstructCall()) {
- if (IS_ARRAYBUFFER(arg1)) {
- NAMEConstructByArrayBuffer(this, arg1, arg2, arg3);
- } else if (IS_NUMBER(arg1) || IS_STRING(arg1) ||
- IS_BOOLEAN(arg1) || IS_UNDEFINED(arg1)) {
- NAMEConstructByLength(this, arg1);
- } else {
- NAMEConstructByArrayLike(this, arg1);
- }
+function NAMEConstructor(arg1, arg2, arg3) {
+ if (%_IsConstructCall()) {
+ if (IS_ARRAYBUFFER(arg1)) {
+ NAMEConstructByArrayBuffer(this, arg1, arg2, arg3);
+ } else if (IS_NUMBER(arg1) || IS_STRING(arg1) ||
+ IS_BOOLEAN(arg1) || IS_UNDEFINED(arg1)) {
+ NAMEConstructByLength(this, arg1);
} else {
- throw MakeTypeError("constructor_not_function", ["NAME"])
+ NAMEConstructByArrayLike(this, arg1);
}
+ } else {
+ throw MakeTypeError("constructor_not_function", ["NAME"])
}
-endmacro
-
-TYPED_ARRAYS(TYPED_ARRAY_CONSTRUCTOR)
+}
-function TypedArrayGetBuffer() {
+function NAME_GetBuffer() {
+ if (!(%_ClassOf(this) === 'NAME')) {
+ throw MakeTypeError('incompatible_method_receiver',
+ ["NAME.buffer", this]);
+ }
return %TypedArrayGetBuffer(this);
}
-function TypedArrayGetByteLength() {
- return %TypedArrayGetByteLength(this);
+function NAME_GetByteLength() {
+ if (!(%_ClassOf(this) === 'NAME')) {
+ throw MakeTypeError('incompatible_method_receiver',
+ ["NAME.byteLength", this]);
+ }
+ return %_ArrayBufferViewGetByteLength(this);
}
-function TypedArrayGetByteOffset() {
- return %TypedArrayGetByteOffset(this);
+function NAME_GetByteOffset() {
+ if (!(%_ClassOf(this) === 'NAME')) {
+ throw MakeTypeError('incompatible_method_receiver',
+ ["NAME.byteOffset", this]);
+ }
+ return %_ArrayBufferViewGetByteOffset(this);
}
-function TypedArrayGetLength() {
- return %TypedArrayGetLength(this);
+function NAME_GetLength() {
+ if (!(%_ClassOf(this) === 'NAME')) {
+ throw MakeTypeError('incompatible_method_receiver',
+ ["NAME.length", this]);
+ }
+ return %_TypedArrayGetLength(this);
}
-function CreateSubArray(elementSize, constructor) {
- return function(begin, end) {
- var srcLength = %TypedArrayGetLength(this);
- var beginInt = TO_INTEGER(begin);
- if (beginInt < 0) {
- beginInt = MathMax(0, srcLength + beginInt);
- } else {
- beginInt = MathMin(srcLength, beginInt);
- }
+var $NAME = global.NAME;
- var endInt = IS_UNDEFINED(end) ? srcLength : TO_INTEGER(end);
- if (endInt < 0) {
- endInt = MathMax(0, srcLength + endInt);
- } else {
- endInt = MathMin(endInt, srcLength);
- }
- if (endInt < beginInt) {
- endInt = beginInt;
- }
- var newLength = endInt - beginInt;
- var beginByteOffset =
- %TypedArrayGetByteOffset(this) + beginInt * elementSize;
- return new constructor(%TypedArrayGetBuffer(this),
- beginByteOffset, newLength);
+function NAMESubArray(begin, end) {
+ if (!(%_ClassOf(this) === 'NAME')) {
+ throw MakeTypeError('incompatible_method_receiver',
+ ["NAME.subarray", this]);
+ }
+ var beginInt = TO_INTEGER(begin);
+ if (!IS_UNDEFINED(end)) {
+ end = TO_INTEGER(end);
+ }
+
+ var srcLength = %_TypedArrayGetLength(this);
+ if (beginInt < 0) {
+ beginInt = MathMax(0, srcLength + beginInt);
+ } else {
+ beginInt = MathMin(srcLength, beginInt);
+ }
+
+ var endInt = IS_UNDEFINED(end) ? srcLength : end;
+ if (endInt < 0) {
+ endInt = MathMax(0, srcLength + endInt);
+ } else {
+ endInt = MathMin(endInt, srcLength);
+ }
+ if (endInt < beginInt) {
+ endInt = beginInt;
}
+ var newLength = endInt - beginInt;
+ var beginByteOffset =
+ %_ArrayBufferViewGetByteOffset(this) + beginInt * ELEMENT_SIZE;
+ return new $NAME(%TypedArrayGetBuffer(this),
+ beginByteOffset, newLength);
}
+endmacro
+
+TYPED_ARRAYS(TYPED_ARRAY_CONSTRUCTOR)
+
function TypedArraySetFromArrayLike(target, source, sourceLength, offset) {
if (offset > 0) {
@@ -243,6 +257,10 @@ function TypedArraySet(obj, offset) {
if (intOffset < 0) {
throw MakeTypeError("typed_array_set_negative_offset");
}
+
+ if (intOffset > %_MaxSmi()) {
+ throw MakeRangeError("typed_array_set_source_too_large");
+ }
switch (%TypedArraySetFastCases(this, obj, intOffset)) {
// These numbers should be synchronized with runtime.cc.
case 0: // TYPED_ARRAY_SET_TYPED_ARRAY_SAME_TYPE
@@ -275,34 +293,34 @@ function TypedArraySet(obj, offset) {
// -------------------------------------------------------------------
-function SetupTypedArray(constructor, fun, elementSize) {
+function SetupTypedArrays() {
+macro SETUP_TYPED_ARRAY(ARRAY_ID, NAME, ELEMENT_SIZE)
%CheckIsBootstrapping();
- %SetCode(constructor, fun);
- %FunctionSetPrototype(constructor, new $Object());
+ %SetCode(global.NAME, NAMEConstructor);
+ %FunctionSetPrototype(global.NAME, new $Object());
- %SetProperty(constructor, "BYTES_PER_ELEMENT", elementSize,
+ %SetProperty(global.NAME, "BYTES_PER_ELEMENT", ELEMENT_SIZE,
READ_ONLY | DONT_ENUM | DONT_DELETE);
- %SetProperty(constructor.prototype,
- "constructor", constructor, DONT_ENUM);
- %SetProperty(constructor.prototype,
- "BYTES_PER_ELEMENT", elementSize,
+ %SetProperty(global.NAME.prototype,
+ "constructor", global.NAME, DONT_ENUM);
+ %SetProperty(global.NAME.prototype,
+ "BYTES_PER_ELEMENT", ELEMENT_SIZE,
READ_ONLY | DONT_ENUM | DONT_DELETE);
- InstallGetter(constructor.prototype, "buffer", TypedArrayGetBuffer);
- InstallGetter(constructor.prototype, "byteOffset", TypedArrayGetByteOffset);
- InstallGetter(constructor.prototype, "byteLength", TypedArrayGetByteLength);
- InstallGetter(constructor.prototype, "length", TypedArrayGetLength);
+ InstallGetter(global.NAME.prototype, "buffer", NAME_GetBuffer);
+ InstallGetter(global.NAME.prototype, "byteOffset", NAME_GetByteOffset);
+ InstallGetter(global.NAME.prototype, "byteLength", NAME_GetByteLength);
+ InstallGetter(global.NAME.prototype, "length", NAME_GetLength);
- InstallFunctions(constructor.prototype, DONT_ENUM, $Array(
- "subarray", CreateSubArray(elementSize, constructor),
+ InstallFunctions(global.NAME.prototype, DONT_ENUM, $Array(
+ "subarray", NAMESubArray,
"set", TypedArraySet
));
-}
-
-macro SETUP_TYPED_ARRAY(ARRAY_ID, NAME, ELEMENT_SIZE)
- SetupTypedArray (global.NAME, NAMEConstructor, ELEMENT_SIZE);
endmacro
TYPED_ARRAYS(SETUP_TYPED_ARRAY)
+}
+
+SetupTypedArrays();
// --------------------------- DataView -----------------------------
@@ -313,24 +331,33 @@ function DataViewConstructor(buffer, byteOffset, byteLength) { // length = 3
if (!IS_ARRAYBUFFER(buffer)) {
throw MakeTypeError('data_view_not_array_buffer', []);
}
- var bufferByteLength = %ArrayBufferGetByteLength(buffer);
- var offset = IS_UNDEFINED(byteOffset) ?
- 0 : ToPositiveInteger(byteOffset, 'invalid_data_view_offset');
+ if (!IS_UNDEFINED(byteOffset)) {
+ byteOffset = ToPositiveInteger(byteOffset, 'invalid_data_view_offset');
+ }
+ if (!IS_UNDEFINED(byteLength)) {
+ byteLength = TO_INTEGER(byteLength);
+ }
+
+ var bufferByteLength = %_ArrayBufferGetByteLength(buffer);
+
+ var offset = IS_UNDEFINED(byteOffset) ? 0 : byteOffset;
if (offset > bufferByteLength) {
throw MakeRangeError('invalid_data_view_offset');
}
- var length = IS_UNDEFINED(byteLength) ?
- bufferByteLength - offset : TO_INTEGER(byteLength);
+
+ var length = IS_UNDEFINED(byteLength)
+ ? bufferByteLength - offset
+ : byteLength;
if (length < 0 || offset + length > bufferByteLength) {
throw new MakeRangeError('invalid_data_view_length');
}
- %DataViewInitialize(this, buffer, offset, length);
+ %_DataViewInitialize(this, buffer, offset, length);
} else {
throw MakeTypeError('constructor_not_function', ["DataView"]);
}
}
-function DataViewGetBuffer() {
+function DataViewGetBufferJS() {
if (!IS_DATAVIEW(this)) {
throw MakeTypeError('incompatible_method_receiver',
['DataView.buffer', this]);
@@ -343,7 +370,7 @@ function DataViewGetByteOffset() {
throw MakeTypeError('incompatible_method_receiver',
['DataView.byteOffset', this]);
}
- return %DataViewGetByteOffset(this);
+ return %_ArrayBufferViewGetByteOffset(this);
}
function DataViewGetByteLength() {
@@ -351,7 +378,7 @@ function DataViewGetByteLength() {
throw MakeTypeError('incompatible_method_receiver',
['DataView.byteLength', this]);
}
- return %DataViewGetByteLength(this);
+ return %_ArrayBufferViewGetByteLength(this);
}
macro DATA_VIEW_TYPES(FUNCTION)
@@ -371,7 +398,7 @@ function ToPositiveDataViewOffset(offset) {
macro DATA_VIEW_GETTER_SETTER(TYPENAME)
-function DataViewGetTYPENAME(offset, little_endian) {
+function DataViewGetTYPENAMEJS(offset, little_endian) {
if (!IS_DATAVIEW(this)) {
throw MakeTypeError('incompatible_method_receiver',
['DataView.getTYPENAME', this]);
@@ -384,7 +411,7 @@ function DataViewGetTYPENAME(offset, little_endian) {
!!little_endian);
}
-function DataViewSetTYPENAME(offset, value, little_endian) {
+function DataViewSetTYPENAMEJS(offset, value, little_endian) {
if (!IS_DATAVIEW(this)) {
throw MakeTypeError('incompatible_method_receiver',
['DataView.setTYPENAME', this]);
@@ -411,34 +438,34 @@ function SetupDataView() {
// Set up constructor property on the DataView prototype.
%SetProperty($DataView.prototype, "constructor", $DataView, DONT_ENUM);
- InstallGetter($DataView.prototype, "buffer", DataViewGetBuffer);
+ InstallGetter($DataView.prototype, "buffer", DataViewGetBufferJS);
InstallGetter($DataView.prototype, "byteOffset", DataViewGetByteOffset);
InstallGetter($DataView.prototype, "byteLength", DataViewGetByteLength);
InstallFunctions($DataView.prototype, DONT_ENUM, $Array(
- "getInt8", DataViewGetInt8,
- "setInt8", DataViewSetInt8,
+ "getInt8", DataViewGetInt8JS,
+ "setInt8", DataViewSetInt8JS,
- "getUint8", DataViewGetUint8,
- "setUint8", DataViewSetUint8,
+ "getUint8", DataViewGetUint8JS,
+ "setUint8", DataViewSetUint8JS,
- "getInt16", DataViewGetInt16,
- "setInt16", DataViewSetInt16,
+ "getInt16", DataViewGetInt16JS,
+ "setInt16", DataViewSetInt16JS,
- "getUint16", DataViewGetUint16,
- "setUint16", DataViewSetUint16,
+ "getUint16", DataViewGetUint16JS,
+ "setUint16", DataViewSetUint16JS,
- "getInt32", DataViewGetInt32,
- "setInt32", DataViewSetInt32,
+ "getInt32", DataViewGetInt32JS,
+ "setInt32", DataViewSetInt32JS,
- "getUint32", DataViewGetUint32,
- "setUint32", DataViewSetUint32,
+ "getUint32", DataViewGetUint32JS,
+ "setUint32", DataViewSetUint32JS,
- "getFloat32", DataViewGetFloat32,
- "setFloat32", DataViewSetFloat32,
+ "getFloat32", DataViewGetFloat32JS,
+ "setFloat32", DataViewSetFloat32JS,
- "getFloat64", DataViewGetFloat64,
- "setFloat64", DataViewSetFloat64
+ "getFloat64", DataViewGetFloat64JS,
+ "setFloat64", DataViewSetFloat64JS
));
}
diff --git a/chromium/v8/src/types-inl.h b/chromium/v8/src/types-inl.h
new file mode 100644
index 00000000000..0bdd4638f88
--- /dev/null
+++ b/chromium/v8/src/types-inl.h
@@ -0,0 +1,336 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_TYPES_INL_H_
+#define V8_TYPES_INL_H_
+
+#include "src/types.h"
+
+#include "src/factory.h"
+#include "src/handles-inl.h"
+
+namespace v8 {
+namespace internal {
+
+// -----------------------------------------------------------------------------
+// TypeImpl
+
+template<class Config>
+TypeImpl<Config>* TypeImpl<Config>::cast(typename Config::Base* object) {
+ TypeImpl* t = static_cast<TypeImpl*>(object);
+ ASSERT(t->IsBitset() || t->IsClass() || t->IsConstant() ||
+ t->IsUnion() || t->IsArray() || t->IsFunction() || t->IsContext());
+ return t;
+}
+
+
+// Most precise _current_ type of a value (usually its class).
+template<class Config>
+typename TypeImpl<Config>::TypeHandle TypeImpl<Config>::NowOf(
+ i::Object* value, Region* region) {
+ if (value->IsSmi() ||
+ i::HeapObject::cast(value)->map()->instance_type() == HEAP_NUMBER_TYPE) {
+ return Of(value, region);
+ }
+ return Class(i::handle(i::HeapObject::cast(value)->map()), region);
+}
+
+
+template<class Config>
+bool TypeImpl<Config>::NowContains(i::Object* value) {
+ DisallowHeapAllocation no_allocation;
+ if (this->IsAny()) return true;
+ if (value->IsHeapObject()) {
+ i::Map* map = i::HeapObject::cast(value)->map();
+ for (Iterator<i::Map> it = this->Classes(); !it.Done(); it.Advance()) {
+ if (*it.Current() == map) return true;
+ }
+ }
+ return this->Contains(value);
+}
+
+
+// -----------------------------------------------------------------------------
+// ZoneTypeConfig
+
+// static
+template<class T>
+T* ZoneTypeConfig::handle(T* type) {
+ return type;
+}
+
+
+// static
+template<class T>
+T* ZoneTypeConfig::cast(Type* type) {
+ return static_cast<T*>(type);
+}
+
+
+// static
+bool ZoneTypeConfig::is_bitset(Type* type) {
+ return reinterpret_cast<intptr_t>(type) & 1;
+}
+
+
+// static
+bool ZoneTypeConfig::is_struct(Type* type, int tag) {
+ return !is_bitset(type) && struct_tag(as_struct(type)) == tag;
+}
+
+
+// static
+bool ZoneTypeConfig::is_class(Type* type) {
+ return false;
+}
+
+
+// static
+int ZoneTypeConfig::as_bitset(Type* type) {
+ ASSERT(is_bitset(type));
+ return static_cast<int>(reinterpret_cast<intptr_t>(type) >> 1);
+}
+
+
+// static
+ZoneTypeConfig::Struct* ZoneTypeConfig::as_struct(Type* type) {
+ ASSERT(!is_bitset(type));
+ return reinterpret_cast<Struct*>(type);
+}
+
+
+// static
+i::Handle<i::Map> ZoneTypeConfig::as_class(Type* type) {
+ UNREACHABLE();
+ return i::Handle<i::Map>();
+}
+
+
+// static
+ZoneTypeConfig::Type* ZoneTypeConfig::from_bitset(int bitset) {
+ return reinterpret_cast<Type*>((bitset << 1) | 1);
+}
+
+
+// static
+ZoneTypeConfig::Type* ZoneTypeConfig::from_bitset(int bitset, Zone* Zone) {
+ return from_bitset(bitset);
+}
+
+
+// static
+ZoneTypeConfig::Type* ZoneTypeConfig::from_struct(Struct* structure) {
+ return reinterpret_cast<Type*>(structure);
+}
+
+
+// static
+ZoneTypeConfig::Type* ZoneTypeConfig::from_class(
+ i::Handle<i::Map> map, Zone* zone) {
+ return from_bitset(0);
+}
+
+
+// static
+ZoneTypeConfig::Struct* ZoneTypeConfig::struct_create(
+ int tag, int length, Zone* zone) {
+ Struct* structure = reinterpret_cast<Struct*>(
+ zone->New(sizeof(void*) * (length + 2))); // NOLINT
+ structure[0] = reinterpret_cast<void*>(tag);
+ structure[1] = reinterpret_cast<void*>(length);
+ return structure;
+}
+
+
+// static
+void ZoneTypeConfig::struct_shrink(Struct* structure, int length) {
+ ASSERT(0 <= length && length <= struct_length(structure));
+ structure[1] = reinterpret_cast<void*>(length);
+}
+
+
+// static
+int ZoneTypeConfig::struct_tag(Struct* structure) {
+ return static_cast<int>(reinterpret_cast<intptr_t>(structure[0]));
+}
+
+
+// static
+int ZoneTypeConfig::struct_length(Struct* structure) {
+ return static_cast<int>(reinterpret_cast<intptr_t>(structure[1]));
+}
+
+
+// static
+Type* ZoneTypeConfig::struct_get(Struct* structure, int i) {
+ ASSERT(0 <= i && i <= struct_length(structure));
+ return static_cast<Type*>(structure[2 + i]);
+}
+
+
+// static
+void ZoneTypeConfig::struct_set(Struct* structure, int i, Type* x) {
+ ASSERT(0 <= i && i <= struct_length(structure));
+ structure[2 + i] = x;
+}
+
+
+// static
+template<class V>
+i::Handle<V> ZoneTypeConfig::struct_get_value(Struct* structure, int i) {
+ ASSERT(0 <= i && i <= struct_length(structure));
+ return i::Handle<V>(static_cast<V**>(structure[2 + i]));
+}
+
+
+// static
+template<class V>
+void ZoneTypeConfig::struct_set_value(
+ Struct* structure, int i, i::Handle<V> x) {
+ ASSERT(0 <= i && i <= struct_length(structure));
+ structure[2 + i] = x.location();
+}
+
+
+// -----------------------------------------------------------------------------
+// HeapTypeConfig
+
+// static
+template<class T>
+i::Handle<T> HeapTypeConfig::handle(T* type) {
+ return i::handle(type, i::HeapObject::cast(type)->GetIsolate());
+}
+
+
+// static
+template<class T>
+i::Handle<T> HeapTypeConfig::cast(i::Handle<Type> type) {
+ return i::Handle<T>::cast(type);
+}
+
+
+// static
+bool HeapTypeConfig::is_bitset(Type* type) {
+ return type->IsSmi();
+}
+
+
+// static
+bool HeapTypeConfig::is_class(Type* type) {
+ return type->IsMap();
+}
+
+
+// static
+bool HeapTypeConfig::is_struct(Type* type, int tag) {
+ return type->IsFixedArray() && struct_tag(as_struct(type)) == tag;
+}
+
+
+// static
+int HeapTypeConfig::as_bitset(Type* type) {
+ return i::Smi::cast(type)->value();
+}
+
+
+// static
+i::Handle<i::Map> HeapTypeConfig::as_class(Type* type) {
+ return i::handle(i::Map::cast(type));
+}
+
+
+// static
+i::Handle<HeapTypeConfig::Struct> HeapTypeConfig::as_struct(Type* type) {
+ return i::handle(Struct::cast(type));
+}
+
+
+// static
+HeapTypeConfig::Type* HeapTypeConfig::from_bitset(int bitset) {
+ return Type::cast(i::Smi::FromInt(bitset));
+}
+
+
+// static
+i::Handle<HeapTypeConfig::Type> HeapTypeConfig::from_bitset(
+ int bitset, Isolate* isolate) {
+ return i::handle(from_bitset(bitset), isolate);
+}
+
+
+// static
+i::Handle<HeapTypeConfig::Type> HeapTypeConfig::from_class(
+ i::Handle<i::Map> map, Isolate* isolate) {
+ return i::Handle<Type>::cast(i::Handle<Object>::cast(map));
+}
+
+
+// static
+i::Handle<HeapTypeConfig::Type> HeapTypeConfig::from_struct(
+ i::Handle<Struct> structure) {
+ return i::Handle<Type>::cast(i::Handle<Object>::cast(structure));
+}
+
+
+// static
+i::Handle<HeapTypeConfig::Struct> HeapTypeConfig::struct_create(
+ int tag, int length, Isolate* isolate) {
+ i::Handle<Struct> structure = isolate->factory()->NewFixedArray(length + 1);
+ structure->set(0, i::Smi::FromInt(tag));
+ return structure;
+}
+
+
+// static
+void HeapTypeConfig::struct_shrink(i::Handle<Struct> structure, int length) {
+ structure->Shrink(length + 1);
+}
+
+
+// static
+int HeapTypeConfig::struct_tag(i::Handle<Struct> structure) {
+ return static_cast<i::Smi*>(structure->get(0))->value();
+}
+
+
+// static
+int HeapTypeConfig::struct_length(i::Handle<Struct> structure) {
+ return structure->length() - 1;
+}
+
+
+// static
+i::Handle<HeapTypeConfig::Type> HeapTypeConfig::struct_get(
+ i::Handle<Struct> structure, int i) {
+ Type* type = static_cast<Type*>(structure->get(i + 1));
+ return i::handle(type, structure->GetIsolate());
+}
+
+
+// static
+void HeapTypeConfig::struct_set(
+ i::Handle<Struct> structure, int i, i::Handle<Type> type) {
+ structure->set(i + 1, *type);
+}
+
+
+// static
+template<class V>
+i::Handle<V> HeapTypeConfig::struct_get_value(
+ i::Handle<Struct> structure, int i) {
+ V* x = static_cast<V*>(structure->get(i + 1));
+ return i::handle(x, structure->GetIsolate());
+}
+
+
+// static
+template<class V>
+void HeapTypeConfig::struct_set_value(
+ i::Handle<Struct> structure, int i, i::Handle<V> x) {
+ structure->set(i + 1, *x);
+}
+
+} } // namespace v8::internal
+
+#endif // V8_TYPES_INL_H_
diff --git a/chromium/v8/src/types.cc b/chromium/v8/src/types.cc
index 485ba885187..22694c06e84 100644
--- a/chromium/v8/src/types.cc
+++ b/chromium/v8/src/types.cc
@@ -1,155 +1,159 @@
-// Copyright 2013 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "types.h"
-#include "string-stream.h"
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/types.h"
+
+#include "src/string-stream.h"
+#include "src/types-inl.h"
namespace v8 {
namespace internal {
-int Type::NumClasses() {
- if (is_class()) {
- return 1;
- } else if (is_union()) {
- Handle<Unioned> unioned = as_union();
- int result = 0;
- for (int i = 0; i < unioned->length(); ++i) {
- if (union_get(unioned, i)->is_class()) ++result;
+// -----------------------------------------------------------------------------
+// Glb and lub computation.
+
+// The largest bitset subsumed by this type.
+template<class Config>
+int TypeImpl<Config>::BitsetType::Glb(TypeImpl* type) {
+ DisallowHeapAllocation no_allocation;
+ if (type->IsBitset()) {
+ return type->AsBitset();
+ } else if (type->IsUnion()) {
+ UnionHandle unioned = handle(type->AsUnion());
+ int bitset = kNone;
+ for (int i = 0; i < unioned->Length(); ++i) {
+ bitset |= unioned->Get(i)->BitsetGlb();
}
- return result;
+ return bitset;
+ } else if (type->IsClass()) {
+ // Little hack to avoid the need for a region for handlification here...
+ return REPRESENTATION(Config::is_class(type)
+ ? Lub(*Config::as_class(type))
+ : type->AsClass()->Bound(NULL)->AsBitset());
+ } else if (type->IsConstant()) {
+ return REPRESENTATION(type->AsConstant()->Bound()->AsBitset());
+ } else if (type->IsContext()) {
+ return REPRESENTATION(type->AsContext()->Bound()->AsBitset());
+ } else if (type->IsArray()) {
+ return REPRESENTATION(type->AsArray()->Bound()->AsBitset());
+ } else if (type->IsFunction()) {
+ return REPRESENTATION(type->AsFunction()->Bound()->AsBitset());
} else {
- return 0;
+ UNREACHABLE();
+ return kNone;
}
}
-int Type::NumConstants() {
- if (is_constant()) {
- return 1;
- } else if (is_union()) {
- Handle<Unioned> unioned = as_union();
- int result = 0;
- for (int i = 0; i < unioned->length(); ++i) {
- if (union_get(unioned, i)->is_constant()) ++result;
+// The smallest bitset subsuming this type.
+template<class Config>
+int TypeImpl<Config>::BitsetType::Lub(TypeImpl* type) {
+ DisallowHeapAllocation no_allocation;
+ if (type->IsBitset()) {
+ return type->AsBitset();
+ } else if (type->IsUnion()) {
+ UnionHandle unioned = handle(type->AsUnion());
+ int bitset = kNone;
+ for (int i = 0; i < unioned->Length(); ++i) {
+ bitset |= unioned->Get(i)->BitsetLub();
}
- return result;
+ return bitset;
+ } else if (type->IsClass()) {
+ // Little hack to avoid the need for a region for handlification here...
+ return Config::is_class(type) ? Lub(*Config::as_class(type)) :
+ type->AsClass()->Bound(NULL)->AsBitset();
+ } else if (type->IsConstant()) {
+ return type->AsConstant()->Bound()->AsBitset();
+ } else if (type->IsContext()) {
+ return type->AsContext()->Bound()->AsBitset();
+ } else if (type->IsArray()) {
+ return type->AsArray()->Bound()->AsBitset();
+ } else if (type->IsFunction()) {
+ return type->AsFunction()->Bound()->AsBitset();
} else {
- return 0;
+ UNREACHABLE();
+ return kNone;
}
}
-template<class T>
-Handle<Type> Type::Iterator<T>::get_type() {
- ASSERT(!Done());
- return type_->is_union() ? union_get(type_->as_union(), index_) : type_;
-}
-
-template<>
-Handle<i::Map> Type::Iterator<i::Map>::Current() {
- return get_type()->as_class();
-}
-
-template<>
-Handle<i::Object> Type::Iterator<i::Object>::Current() {
- return get_type()->as_constant();
+// The smallest bitset subsuming this type, ignoring explicit bounds.
+template<class Config>
+int TypeImpl<Config>::BitsetType::InherentLub(TypeImpl* type) {
+ DisallowHeapAllocation no_allocation;
+ if (type->IsBitset()) {
+ return type->AsBitset();
+ } else if (type->IsUnion()) {
+ UnionHandle unioned = handle(type->AsUnion());
+ int bitset = kNone;
+ for (int i = 0; i < unioned->Length(); ++i) {
+ bitset |= unioned->Get(i)->InherentBitsetLub();
+ }
+ return bitset;
+ } else if (type->IsClass()) {
+ return Lub(*type->AsClass()->Map());
+ } else if (type->IsConstant()) {
+ return Lub(*type->AsConstant()->Value());
+ } else if (type->IsContext()) {
+ return kInternal & kTaggedPtr;
+ } else if (type->IsArray()) {
+ return kArray;
+ } else if (type->IsFunction()) {
+ return kFunction;
+ } else {
+ UNREACHABLE();
+ return kNone;
+ }
}
-template<>
-bool Type::Iterator<i::Map>::matches(Handle<Type> type) {
- return type->is_class();
-}
-
-template<>
-bool Type::Iterator<i::Object>::matches(Handle<Type> type) {
- return type->is_constant();
+template<class Config>
+int TypeImpl<Config>::BitsetType::Lub(i::Object* value) {
+ DisallowHeapAllocation no_allocation;
+ if (value->IsNumber()) {
+ return Lub(value->Number()) & (value->IsSmi() ? kTaggedInt : kTaggedPtr);
+ }
+ return Lub(i::HeapObject::cast(value)->map());
}
-template<class T>
-void Type::Iterator<T>::Advance() {
- ++index_;
- if (type_->is_union()) {
- Handle<Unioned> unioned = type_->as_union();
- for (; index_ < unioned->length(); ++index_) {
- if (matches(union_get(unioned, index_))) return;
- }
- } else if (index_ == 0 && matches(type_)) {
- return;
- }
- index_ = -1;
+template<class Config>
+int TypeImpl<Config>::BitsetType::Lub(double value) {
+ DisallowHeapAllocation no_allocation;
+ if (i::IsMinusZero(value)) return kMinusZero;
+ if (std::isnan(value)) return kNaN;
+ if (IsUint32Double(value)) return Lub(FastD2UI(value));
+ if (IsInt32Double(value)) return Lub(FastD2I(value));
+ return kOtherNumber;
}
-template class Type::Iterator<i::Map>;
-template class Type::Iterator<i::Object>;
-
-// Get the smallest bitset subsuming this type.
-int Type::LubBitset() {
- if (this->is_bitset()) {
- return this->as_bitset();
- } else if (this->is_union()) {
- Handle<Unioned> unioned = this->as_union();
- int bitset = kNone;
- for (int i = 0; i < unioned->length(); ++i) {
- bitset |= union_get(unioned, i)->LubBitset();
- }
- return bitset;
- } else if (this->is_class()) {
- return LubBitset(*this->as_class());
- } else {
- return LubBitset(*this->as_constant());
+template<class Config>
+int TypeImpl<Config>::BitsetType::Lub(int32_t value) {
+ if (value >= 0x40000000) {
+ return i::SmiValuesAre31Bits() ? kOtherUnsigned31 : kUnsignedSmall;
}
+ if (value >= 0) return kUnsignedSmall;
+ if (value >= -0x40000000) return kOtherSignedSmall;
+ return i::SmiValuesAre31Bits() ? kOtherSigned32 : kOtherSignedSmall;
}
-int Type::LubBitset(i::Object* value) {
- if (value->IsSmi()) return kSmi;
- i::Map* map = i::HeapObject::cast(value)->map();
- if (map->instance_type() == HEAP_NUMBER_TYPE) {
- int32_t i;
- uint32_t u;
- if (value->ToInt32(&i)) return Smi::IsValid(i) ? kSmi : kOtherSigned32;
- if (value->ToUint32(&u)) return kUnsigned32;
- return kDouble;
+template<class Config>
+int TypeImpl<Config>::BitsetType::Lub(uint32_t value) {
+ DisallowHeapAllocation no_allocation;
+ if (value >= 0x80000000u) return kOtherUnsigned32;
+ if (value >= 0x40000000u) {
+ return i::SmiValuesAre31Bits() ? kOtherUnsigned31 : kUnsignedSmall;
}
- if (map->instance_type() == ODDBALL_TYPE) {
- if (value->IsUndefined()) return kUndefined;
- if (value->IsNull()) return kNull;
- if (value->IsBoolean()) return kBoolean;
- if (value->IsTheHole()) return kAny; // TODO(rossberg): kNone?
- UNREACHABLE();
- }
- return Type::LubBitset(map);
+ return kUnsignedSmall;
}
-int Type::LubBitset(i::Map* map) {
+template<class Config>
+int TypeImpl<Config>::BitsetType::Lub(i::Map* map) {
+ DisallowHeapAllocation no_allocation;
switch (map->instance_type()) {
case STRING_TYPE:
case ASCII_STRING_TYPE:
@@ -165,8 +169,6 @@ int Type::LubBitset(i::Map* map) {
case SHORT_EXTERNAL_STRING_WITH_ONE_BYTE_DATA_TYPE:
case INTERNALIZED_STRING_TYPE:
case ASCII_INTERNALIZED_STRING_TYPE:
- case CONS_INTERNALIZED_STRING_TYPE:
- case CONS_ASCII_INTERNALIZED_STRING_TYPE:
case EXTERNAL_INTERNALIZED_STRING_TYPE:
case EXTERNAL_ASCII_INTERNALIZED_STRING_TYPE:
case EXTERNAL_INTERNALIZED_STRING_WITH_ONE_BYTE_DATA_TYPE:
@@ -176,10 +178,20 @@ int Type::LubBitset(i::Map* map) {
return kString;
case SYMBOL_TYPE:
return kSymbol;
- case ODDBALL_TYPE:
- return kOddball;
+ case ODDBALL_TYPE: {
+ Heap* heap = map->GetHeap();
+ if (map == heap->undefined_map()) return kUndefined;
+ if (map == heap->the_hole_map()) return kAny; // TODO(rossberg): kNone?
+ if (map == heap->null_map()) return kNull;
+ if (map == heap->boolean_map()) return kBoolean;
+ ASSERT(map == heap->uninitialized_map() ||
+ map == heap->no_interceptor_result_sentinel_map() ||
+ map == heap->termination_exception_map() ||
+ map == heap->arguments_marker_map());
+ return kInternal & kTaggedPtr;
+ }
case HEAP_NUMBER_TYPE:
- return kDouble;
+ return kNumber & kTaggedPtr;
case JS_VALUE_TYPE:
case JS_DATE_TYPE:
case JS_OBJECT_TYPE:
@@ -194,6 +206,8 @@ int Type::LubBitset(i::Map* map) {
case JS_DATA_VIEW_TYPE:
case JS_SET_TYPE:
case JS_MAP_TYPE:
+ case JS_SET_ITERATOR_TYPE:
+ case JS_MAP_ITERATOR_TYPE:
case JS_WEAK_MAP_TYPE:
case JS_WEAK_SET_TYPE:
if (map->is_undetectable()) return kUndetectable;
@@ -220,9 +234,11 @@ int Type::LubBitset(i::Map* map) {
return kDetectable;
case DECLARED_ACCESSOR_INFO_TYPE:
case EXECUTABLE_ACCESSOR_INFO_TYPE:
+ case SHARED_FUNCTION_INFO_TYPE:
case ACCESSOR_PAIR_TYPE:
case FIXED_ARRAY_TYPE:
- return kInternal;
+ case FOREIGN_TYPE:
+ return kInternal & kTaggedPtr;
default:
UNREACHABLE();
return kNone;
@@ -230,65 +246,79 @@ int Type::LubBitset(i::Map* map) {
}
-// Get the largest bitset subsumed by this type.
-int Type::GlbBitset() {
- if (this->is_bitset()) {
- return this->as_bitset();
- } else if (this->is_union()) {
- // All but the first are non-bitsets and thus would yield kNone anyway.
- return union_get(this->as_union(), 0)->GlbBitset();
- } else {
- return kNone;
- }
-}
-
-
-// Most precise _current_ type of a value (usually its class).
-Type* Type::OfCurrently(Handle<i::Object> value) {
- if (value->IsSmi()) return Smi();
- i::Map* map = i::HeapObject::cast(*value)->map();
- if (map->instance_type() == HEAP_NUMBER_TYPE ||
- map->instance_type() == ODDBALL_TYPE) {
- return Type::Of(value);
- }
- return Class(i::handle(map));
-}
-
+// -----------------------------------------------------------------------------
+// Predicates.
// Check this <= that.
-bool Type::SlowIs(Type* that) {
- // Fast path for bitsets.
- if (this->is_none()) return true;
- if (that->is_bitset()) {
- return (this->LubBitset() | that->as_bitset()) == that->as_bitset();
- }
+template<class Config>
+bool TypeImpl<Config>::SlowIs(TypeImpl* that) {
+ DisallowHeapAllocation no_allocation;
- if (that->is_class()) {
- return this->is_class() && *this->as_class() == *that->as_class();
- }
- if (that->is_constant()) {
- return this->is_constant() && *this->as_constant() == *that->as_constant();
+ // Fast path for bitsets.
+ if (this->IsNone()) return true;
+ if (that->IsBitset()) {
+ return (BitsetType::Lub(this) | that->AsBitset()) == that->AsBitset();
+ }
+ if (this->IsBitset() && SEMANTIC(this->AsBitset()) == BitsetType::kNone) {
+ // Bitsets only have non-bitset supertypes along the representation axis.
+ int that_bitset = that->BitsetGlb();
+ return (this->AsBitset() | that_bitset) == that_bitset;
+ }
+
+ if (that->IsClass()) {
+ return this->IsClass()
+ && *this->AsClass()->Map() == *that->AsClass()->Map()
+ && ((Config::is_class(that) && Config::is_class(this)) ||
+ BitsetType::New(this->BitsetLub())->Is(
+ BitsetType::New(that->BitsetLub())));
+ }
+ if (that->IsConstant()) {
+ return this->IsConstant()
+ && *this->AsConstant()->Value() == *that->AsConstant()->Value()
+ && this->AsConstant()->Bound()->Is(that->AsConstant()->Bound());
+ }
+ if (that->IsContext()) {
+ return this->IsContext()
+ && this->AsContext()->Outer()->Equals(that->AsContext()->Outer());
+ }
+ if (that->IsArray()) {
+ return this->IsArray()
+ && this->AsArray()->Element()->Equals(that->AsArray()->Element());
+ }
+ if (that->IsFunction()) {
+ // We currently do not allow for any variance here, in order to keep
+ // Union and Intersect operations simple.
+ if (!this->IsFunction()) return false;
+ FunctionType* this_fun = this->AsFunction();
+ FunctionType* that_fun = that->AsFunction();
+ if (this_fun->Arity() != that_fun->Arity() ||
+ !this_fun->Result()->Equals(that_fun->Result()) ||
+ !that_fun->Receiver()->Equals(this_fun->Receiver())) {
+ return false;
+ }
+ for (int i = 0; i < this_fun->Arity(); ++i) {
+ if (!that_fun->Parameter(i)->Equals(this_fun->Parameter(i))) return false;
+ }
+ return true;
}
// (T1 \/ ... \/ Tn) <= T <=> (T1 <= T) /\ ... /\ (Tn <= T)
- if (this->is_union()) {
- Handle<Unioned> unioned = this->as_union();
- for (int i = 0; i < unioned->length(); ++i) {
- Handle<Type> this_i = union_get(unioned, i);
- if (!this_i->Is(that)) return false;
+ if (this->IsUnion()) {
+ UnionHandle unioned = handle(this->AsUnion());
+ for (int i = 0; i < unioned->Length(); ++i) {
+ if (!unioned->Get(i)->Is(that)) return false;
}
return true;
}
// T <= (T1 \/ ... \/ Tn) <=> (T <= T1) \/ ... \/ (T <= Tn)
// (iff T is not a union)
- ASSERT(!this->is_union());
- if (that->is_union()) {
- Handle<Unioned> unioned = that->as_union();
- for (int i = 0; i < unioned->length(); ++i) {
- Handle<Type> that_i = union_get(unioned, i);
- if (this->Is(that_i)) return true;
- if (this->is_bitset()) break; // Fast fail, no other field is a bitset.
+ ASSERT(!this->IsUnion());
+ if (that->IsUnion()) {
+ UnionHandle unioned = handle(that->AsUnion());
+ for (int i = 0; i < unioned->Length(); ++i) {
+ if (this->Is(unioned->Get(i))) return true;
+ if (this->IsBitset()) break; // Fast fail, only first field is a bitset.
}
return false;
}
@@ -297,291 +327,666 @@ bool Type::SlowIs(Type* that) {
}
-bool Type::IsCurrently(Type* that) {
- return this->Is(that) ||
- (this->is_constant() && that->is_class() &&
- this->as_constant()->IsHeapObject() &&
- i::HeapObject::cast(*this->as_constant())->map() == *that->as_class());
+template<class Config>
+bool TypeImpl<Config>::NowIs(TypeImpl* that) {
+ DisallowHeapAllocation no_allocation;
+
+ // TODO(rossberg): this is incorrect for
+ // Union(Constant(V), T)->NowIs(Class(M))
+ // but fuzzing does not cover that!
+ if (this->IsConstant()) {
+ i::Object* object = *this->AsConstant()->Value();
+ if (object->IsHeapObject()) {
+ i::Map* map = i::HeapObject::cast(object)->map();
+ for (Iterator<i::Map> it = that->Classes(); !it.Done(); it.Advance()) {
+ if (*it.Current() == map) return true;
+ }
+ }
+ }
+ return this->Is(that);
}
-// Check this overlaps that.
-bool Type::Maybe(Type* that) {
- // Fast path for bitsets.
- if (this->is_bitset()) {
- return (this->as_bitset() & that->LubBitset()) != 0;
- }
- if (that->is_bitset()) {
- return (this->LubBitset() & that->as_bitset()) != 0;
+// Check if this contains only (currently) stable classes.
+template<class Config>
+bool TypeImpl<Config>::NowStable() {
+ DisallowHeapAllocation no_allocation;
+ for (Iterator<i::Map> it = this->Classes(); !it.Done(); it.Advance()) {
+ if (!it.Current()->is_stable()) return false;
}
+ return true;
+}
+
+
+// Check this overlaps that.
+template<class Config>
+bool TypeImpl<Config>::Maybe(TypeImpl* that) {
+ DisallowHeapAllocation no_allocation;
// (T1 \/ ... \/ Tn) overlaps T <=> (T1 overlaps T) \/ ... \/ (Tn overlaps T)
- if (this->is_union()) {
- Handle<Unioned> unioned = this->as_union();
- for (int i = 0; i < unioned->length(); ++i) {
- Handle<Type> this_i = union_get(unioned, i);
- if (this_i->Maybe(that)) return true;
+ if (this->IsUnion()) {
+ UnionHandle unioned = handle(this->AsUnion());
+ for (int i = 0; i < unioned->Length(); ++i) {
+ if (unioned->Get(i)->Maybe(that)) return true;
}
return false;
}
// T overlaps (T1 \/ ... \/ Tn) <=> (T overlaps T1) \/ ... \/ (T overlaps Tn)
- if (that->is_union()) {
- Handle<Unioned> unioned = that->as_union();
- for (int i = 0; i < unioned->length(); ++i) {
- Handle<Type> that_i = union_get(unioned, i);
- if (this->Maybe(that_i)) return true;
+ if (that->IsUnion()) {
+ UnionHandle unioned = handle(that->AsUnion());
+ for (int i = 0; i < unioned->Length(); ++i) {
+ if (this->Maybe(unioned->Get(i))) return true;
}
return false;
}
- ASSERT(!that->is_union());
- if (this->is_class()) {
- return that->is_class() && *this->as_class() == *that->as_class();
+ ASSERT(!this->IsUnion() && !that->IsUnion());
+ if (this->IsBitset()) {
+ return BitsetType::IsInhabited(this->AsBitset() & that->BitsetLub());
+ }
+ if (that->IsBitset()) {
+ return BitsetType::IsInhabited(this->BitsetLub() & that->AsBitset());
}
- if (this->is_constant()) {
- return that->is_constant() && *this->as_constant() == *that->as_constant();
+ if (this->IsClass()) {
+ return that->IsClass()
+ && *this->AsClass()->Map() == *that->AsClass()->Map();
+ }
+ if (this->IsConstant()) {
+ return that->IsConstant()
+ && *this->AsConstant()->Value() == *that->AsConstant()->Value();
+ }
+ if (this->IsContext()) {
+ return this->Equals(that);
+ }
+ if (this->IsArray()) {
+ // There is no variance!
+ return this->Equals(that);
+ }
+ if (this->IsFunction()) {
+ // There is no variance!
+ return this->Equals(that);
}
return false;
}
-bool Type::InUnion(Handle<Unioned> unioned, int current_size) {
- ASSERT(!this->is_union());
+// Check if value is contained in (inhabits) type.
+template<class Config>
+bool TypeImpl<Config>::Contains(i::Object* value) {
+ DisallowHeapAllocation no_allocation;
+ for (Iterator<i::Object> it = this->Constants(); !it.Done(); it.Advance()) {
+ if (*it.Current() == value) return true;
+ }
+ return BitsetType::New(BitsetType::Lub(value))->Is(this);
+}
+
+
+template<class Config>
+bool TypeImpl<Config>::UnionType::Wellformed() {
+ ASSERT(this->Length() >= 2);
+ for (int i = 0; i < this->Length(); ++i) {
+ ASSERT(!this->Get(i)->IsUnion());
+ if (i > 0) ASSERT(!this->Get(i)->IsBitset());
+ for (int j = 0; j < this->Length(); ++j) {
+ if (i != j) ASSERT(!this->Get(i)->Is(this->Get(j)));
+ }
+ }
+ return true;
+}
+
+
+// -----------------------------------------------------------------------------
+// Union and intersection
+
+template<class Config>
+typename TypeImpl<Config>::TypeHandle TypeImpl<Config>::Narrow(
+ int bitset, Region* region) {
+ TypeHandle bound = BitsetType::New(bitset, region);
+ if (this->IsClass()) {
+ return ClassType::New(this->AsClass()->Map(), bound, region);
+ } else if (this->IsConstant()) {
+ return ConstantType::New(this->AsConstant()->Value(), bound, region);
+ } else if (this->IsContext()) {
+ return ContextType::New(this->AsContext()->Outer(), bound, region);
+ } else if (this->IsArray()) {
+ return ArrayType::New(this->AsArray()->Element(), bound, region);
+ } else if (this->IsFunction()) {
+ FunctionType* function = this->AsFunction();
+ int arity = function->Arity();
+ FunctionHandle type = FunctionType::New(
+ function->Result(), function->Receiver(), bound, arity, region);
+ for (int i = 0; i < arity; ++i) {
+ type->InitParameter(i, function->Parameter(i));
+ }
+ return type;
+ }
+ UNREACHABLE();
+ return TypeHandle();
+}
+
+
+template<class Config>
+int TypeImpl<Config>::BoundBy(TypeImpl* that) {
+ ASSERT(!this->IsUnion());
+ if (that->IsUnion()) {
+ UnionType* unioned = that->AsUnion();
+ int length = unioned->Length();
+ int bitset = BitsetType::kNone;
+ for (int i = 0; i < length; ++i) {
+ bitset |= BoundBy(unioned->Get(i)->unhandle());
+ }
+ return bitset;
+ } else if (that->IsClass() && this->IsClass() &&
+ *this->AsClass()->Map() == *that->AsClass()->Map()) {
+ return that->BitsetLub();
+ } else if (that->IsConstant() && this->IsConstant() &&
+ *this->AsConstant()->Value() == *that->AsConstant()->Value()) {
+ return that->AsConstant()->Bound()->AsBitset();
+ } else if (that->IsContext() && this->IsContext() && this->Is(that)) {
+ return that->AsContext()->Bound()->AsBitset();
+ } else if (that->IsArray() && this->IsArray() && this->Is(that)) {
+ return that->AsArray()->Bound()->AsBitset();
+ } else if (that->IsFunction() && this->IsFunction() && this->Is(that)) {
+ return that->AsFunction()->Bound()->AsBitset();
+ }
+ return that->BitsetGlb();
+}
+
+
+template<class Config>
+int TypeImpl<Config>::IndexInUnion(
+ int bound, UnionHandle unioned, int current_size) {
+ ASSERT(!this->IsUnion());
for (int i = 0; i < current_size; ++i) {
- Handle<Type> type = union_get(unioned, i);
- if (this->Is(type)) return true;
+ TypeHandle that = unioned->Get(i);
+ if (that->IsBitset()) {
+ if ((bound | that->AsBitset()) == that->AsBitset()) return i;
+ } else if (that->IsClass() && this->IsClass()) {
+ if (*this->AsClass()->Map() == *that->AsClass()->Map()) return i;
+ } else if (that->IsConstant() && this->IsConstant()) {
+ if (*this->AsConstant()->Value() == *that->AsConstant()->Value())
+ return i;
+ } else if (that->IsContext() && this->IsContext()) {
+ if (this->Is(that)) return i;
+ } else if (that->IsArray() && this->IsArray()) {
+ if (this->Is(that)) return i;
+ } else if (that->IsFunction() && this->IsFunction()) {
+ if (this->Is(that)) return i;
+ }
}
- return false;
+ return -1;
}
-// Get non-bitsets from this which are not subsumed by union, store at unioned,
-// starting at index. Returns updated index.
-int Type::ExtendUnion(Handle<Unioned> result, int current_size) {
- int old_size = current_size;
- if (this->is_class() || this->is_constant()) {
- if (!this->InUnion(result, old_size)) result->set(current_size++, this);
- } else if (this->is_union()) {
- Handle<Unioned> unioned = this->as_union();
- for (int i = 0; i < unioned->length(); ++i) {
- Handle<Type> type = union_get(unioned, i);
- ASSERT(i == 0 || !(type->is_bitset() || type->Is(union_get(unioned, 0))));
- if (type->is_bitset()) continue;
- if (!type->InUnion(result, old_size)) result->set(current_size++, *type);
+// Get non-bitsets from type, bounded by upper.
+// Store at result starting at index. Returns updated index.
+template<class Config>
+int TypeImpl<Config>::ExtendUnion(
+ UnionHandle result, int size, TypeHandle type,
+ TypeHandle other, bool is_intersect, Region* region) {
+ int old_size = size;
+ if (type->IsUnion()) {
+ UnionHandle unioned = handle(type->AsUnion());
+ for (int i = 0; i < unioned->Length(); ++i) {
+ TypeHandle type_i = unioned->Get(i);
+ ASSERT(i == 0 || !(type_i->IsBitset() || type_i->Is(unioned->Get(0))));
+ if (!type_i->IsBitset()) {
+ size = ExtendUnion(result, size, type_i, other, is_intersect, region);
+ }
+ }
+ } else if (!type->IsBitset()) {
+ ASSERT(type->IsClass() || type->IsConstant() ||
+ type->IsArray() || type->IsFunction() || type->IsContext());
+ int inherent_bound = type->InherentBitsetLub();
+ int old_bound = type->BitsetLub();
+ int other_bound = type->BoundBy(other->unhandle()) & inherent_bound;
+ int new_bound =
+ is_intersect ? (old_bound & other_bound) : (old_bound | other_bound);
+ if (new_bound != BitsetType::kNone) {
+ int i = type->IndexInUnion(new_bound, result, old_size);
+ if (i == -1) {
+ i = size++;
+ } else if (result->Get(i)->IsBitset()) {
+ return size; // Already fully subsumed.
+ } else {
+ int type_i_bound = result->Get(i)->BitsetLub();
+ new_bound |= type_i_bound;
+ if (new_bound == type_i_bound) return size;
+ }
+ if (new_bound != old_bound) type = type->Narrow(new_bound, region);
+ result->Set(i, type);
}
}
- return current_size;
+ return size;
}
-// Union is O(1) on simple bit unions, but O(n*m) on structured unions.
-// TODO(rossberg): Should we use object sets somehow? Is it worth it?
-Type* Type::Union(Handle<Type> type1, Handle<Type> type2) {
+// If bitset is subsumed by another entry in the result, remove it.
+// (Only bitsets with empty semantic axis can be subtypes of non-bitsets.)
+template<class Config>
+int TypeImpl<Config>::NormalizeUnion(UnionHandle result, int size, int bitset) {
+ if (bitset != BitsetType::kNone && SEMANTIC(bitset) == BitsetType::kNone) {
+ for (int i = 1; i < size; ++i) {
+ int glb = result->Get(i)->BitsetGlb();
+ if ((bitset | glb) == glb) {
+ for (int j = 1; j < size; ++j) {
+ result->Set(j - 1, result->Get(j));
+ }
+ --size;
+ break;
+ }
+ }
+ }
+ return size;
+}
+
+
+// Union is O(1) on simple bitsets, but O(n*m) on structured unions.
+template<class Config>
+typename TypeImpl<Config>::TypeHandle TypeImpl<Config>::Union(
+ TypeHandle type1, TypeHandle type2, Region* region) {
// Fast case: bit sets.
- if (type1->is_bitset() && type2->is_bitset()) {
- return from_bitset(type1->as_bitset() | type2->as_bitset());
+ if (type1->IsBitset() && type2->IsBitset()) {
+ return BitsetType::New(type1->AsBitset() | type2->AsBitset(), region);
}
// Fast case: top or bottom types.
- if (type1->SameValue(Type::Any())) return *type1;
- if (type2->SameValue(Type::Any())) return *type2;
- if (type1->SameValue(Type::None())) return *type2;
- if (type2->SameValue(Type::None())) return *type1;
+ if (type1->IsAny() || type2->IsNone()) return type1;
+ if (type2->IsAny() || type1->IsNone()) return type2;
// Semi-fast case: Unioned objects are neither involved nor produced.
- if (!(type1->is_union() || type2->is_union())) {
- if (type1->Is(type2)) return *type2;
- if (type2->Is(type1)) return *type1;
+ if (!(type1->IsUnion() || type2->IsUnion())) {
+ if (type1->Is(type2)) return type2;
+ if (type2->Is(type1)) return type1;
}
// Slow case: may need to produce a Unioned object.
- Isolate* isolate = NULL;
- int size = type1->is_bitset() || type2->is_bitset() ? 1 : 0;
- if (!type1->is_bitset()) {
- isolate = i::HeapObject::cast(*type1)->GetIsolate();
- size += (type1->is_union() ? type1->as_union()->length() : 1);
- }
- if (!type2->is_bitset()) {
- isolate = i::HeapObject::cast(*type2)->GetIsolate();
- size += (type2->is_union() ? type2->as_union()->length() : 1);
- }
- ASSERT(isolate != NULL);
- ASSERT(size >= 2);
- Handle<Unioned> unioned = isolate->factory()->NewFixedArray(size);
- size = 0;
+ int size = 0;
+ if (!type1->IsBitset()) {
+ size += (type1->IsUnion() ? type1->AsUnion()->Length() : 1);
+ }
+ if (!type2->IsBitset()) {
+ size += (type2->IsUnion() ? type2->AsUnion()->Length() : 1);
+ }
+ int bitset = type1->BitsetGlb() | type2->BitsetGlb();
+ if (bitset != BitsetType::kNone) ++size;
+ ASSERT(size >= 1);
- int bitset = type1->GlbBitset() | type2->GlbBitset();
- if (bitset != kNone) unioned->set(size++, from_bitset(bitset));
- size = type1->ExtendUnion(unioned, size);
- size = type2->ExtendUnion(unioned, size);
+ UnionHandle unioned = UnionType::New(size, region);
+ size = 0;
+ if (bitset != BitsetType::kNone) {
+ unioned->Set(size++, BitsetType::New(bitset, region));
+ }
+ size = ExtendUnion(unioned, size, type1, type2, false, region);
+ size = ExtendUnion(unioned, size, type2, type1, false, region);
+ size = NormalizeUnion(unioned, size, bitset);
if (size == 1) {
- return *union_get(unioned, 0);
- } else if (size == unioned->length()) {
- return from_handle(unioned);
- }
-
- // There was an overlap. Copy to smaller union.
- Handle<Unioned> result = isolate->factory()->NewFixedArray(size);
- for (int i = 0; i < size; ++i) result->set(i, unioned->get(i));
- return from_handle(result);
-}
-
-
-// Get non-bitsets from this which are also in that, store at unioned,
-// starting at index. Returns updated index.
-int Type::ExtendIntersection(
- Handle<Unioned> result, Handle<Type> that, int current_size) {
- int old_size = current_size;
- if (this->is_class() || this->is_constant()) {
- if (this->Is(that) && !this->InUnion(result, old_size))
- result->set(current_size++, this);
- } else if (this->is_union()) {
- Handle<Unioned> unioned = this->as_union();
- for (int i = 0; i < unioned->length(); ++i) {
- Handle<Type> type = union_get(unioned, i);
- ASSERT(i == 0 || !(type->is_bitset() || type->Is(union_get(unioned, 0))));
- if (type->is_bitset()) continue;
- if (type->Is(that) && !type->InUnion(result, old_size))
- result->set(current_size++, *type);
- }
+ return unioned->Get(0);
+ } else {
+ unioned->Shrink(size);
+ ASSERT(unioned->Wellformed());
+ return unioned;
}
- return current_size;
}
-// Intersection is O(1) on simple bit unions, but O(n*m) on structured unions.
-// TODO(rossberg): Should we use object sets somehow? Is it worth it?
-Type* Type::Intersect(Handle<Type> type1, Handle<Type> type2) {
+// Intersection is O(1) on simple bitsets, but O(n*m) on structured unions.
+template<class Config>
+typename TypeImpl<Config>::TypeHandle TypeImpl<Config>::Intersect(
+ TypeHandle type1, TypeHandle type2, Region* region) {
// Fast case: bit sets.
- if (type1->is_bitset() && type2->is_bitset()) {
- return from_bitset(type1->as_bitset() & type2->as_bitset());
+ if (type1->IsBitset() && type2->IsBitset()) {
+ return BitsetType::New(type1->AsBitset() & type2->AsBitset(), region);
}
// Fast case: top or bottom types.
- if (type1->SameValue(Type::None())) return *type1;
- if (type2->SameValue(Type::None())) return *type2;
- if (type1->SameValue(Type::Any())) return *type2;
- if (type2->SameValue(Type::Any())) return *type1;
+ if (type1->IsNone() || type2->IsAny()) return type1;
+ if (type2->IsNone() || type1->IsAny()) return type2;
// Semi-fast case: Unioned objects are neither involved nor produced.
- if (!(type1->is_union() || type2->is_union())) {
- if (type1->Is(type2)) return *type1;
- if (type2->Is(type1)) return *type2;
+ if (!(type1->IsUnion() || type2->IsUnion())) {
+ if (type1->Is(type2)) return type1;
+ if (type2->Is(type1)) return type2;
}
// Slow case: may need to produce a Unioned object.
- Isolate* isolate = NULL;
int size = 0;
- if (!type1->is_bitset()) {
- isolate = i::HeapObject::cast(*type1)->GetIsolate();
- size = (type1->is_union() ? type1->as_union()->length() : 2);
- }
- if (!type2->is_bitset()) {
- isolate = i::HeapObject::cast(*type2)->GetIsolate();
- int size2 = (type2->is_union() ? type2->as_union()->length() : 2);
- size = (size == 0 ? size2 : Min(size, size2));
- }
- ASSERT(isolate != NULL);
- ASSERT(size >= 2);
- Handle<Unioned> unioned = isolate->factory()->NewFixedArray(size);
- size = 0;
+ if (!type1->IsBitset()) {
+ size += (type1->IsUnion() ? type1->AsUnion()->Length() : 1);
+ }
+ if (!type2->IsBitset()) {
+ size += (type2->IsUnion() ? type2->AsUnion()->Length() : 1);
+ }
+ int bitset = type1->BitsetGlb() & type2->BitsetGlb();
+ if (bitset != BitsetType::kNone) ++size;
+ ASSERT(size >= 1);
- int bitset = type1->GlbBitset() & type2->GlbBitset();
- if (bitset != kNone) unioned->set(size++, from_bitset(bitset));
- size = type1->ExtendIntersection(unioned, type2, size);
- size = type2->ExtendIntersection(unioned, type1, size);
+ UnionHandle unioned = UnionType::New(size, region);
+ size = 0;
+ if (bitset != BitsetType::kNone) {
+ unioned->Set(size++, BitsetType::New(bitset, region));
+ }
+ size = ExtendUnion(unioned, size, type1, type2, true, region);
+ size = ExtendUnion(unioned, size, type2, type1, true, region);
+ size = NormalizeUnion(unioned, size, bitset);
if (size == 0) {
- return None();
+ return None(region);
} else if (size == 1) {
- return *union_get(unioned, 0);
- } else if (size == unioned->length()) {
- return from_handle(unioned);
+ return unioned->Get(0);
+ } else {
+ unioned->Shrink(size);
+ ASSERT(unioned->Wellformed());
+ return unioned;
}
+}
- // There were dropped cases. Copy to smaller union.
- Handle<Unioned> result = isolate->factory()->NewFixedArray(size);
- for (int i = 0; i < size; ++i) result->set(i, unioned->get(i));
- return from_handle(result);
+
+// -----------------------------------------------------------------------------
+// Iteration.
+
+template<class Config>
+int TypeImpl<Config>::NumClasses() {
+ DisallowHeapAllocation no_allocation;
+ if (this->IsClass()) {
+ return 1;
+ } else if (this->IsUnion()) {
+ UnionHandle unioned = handle(this->AsUnion());
+ int result = 0;
+ for (int i = 0; i < unioned->Length(); ++i) {
+ if (unioned->Get(i)->IsClass()) ++result;
+ }
+ return result;
+ } else {
+ return 0;
+ }
}
-Type* Type::Optional(Handle<Type> type) {
- return type->is_bitset()
- ? from_bitset(type->as_bitset() | kUndefined)
- : Union(type, Undefined()->handle_via_isolate_of(*type));
+template<class Config>
+int TypeImpl<Config>::NumConstants() {
+ DisallowHeapAllocation no_allocation;
+ if (this->IsConstant()) {
+ return 1;
+ } else if (this->IsUnion()) {
+ UnionHandle unioned = handle(this->AsUnion());
+ int result = 0;
+ for (int i = 0; i < unioned->Length(); ++i) {
+ if (unioned->Get(i)->IsConstant()) ++result;
+ }
+ return result;
+ } else {
+ return 0;
+ }
}
-Representation Representation::FromType(Handle<Type> type) {
- if (type->Is(Type::None())) return Representation::None();
- if (type->Is(Type::Smi())) return Representation::Smi();
- if (type->Is(Type::Signed32())) return Representation::Integer32();
- if (type->Is(Type::Number())) return Representation::Double();
- return Representation::Tagged();
+template<class Config> template<class T>
+typename TypeImpl<Config>::TypeHandle
+TypeImpl<Config>::Iterator<T>::get_type() {
+ ASSERT(!Done());
+ return type_->IsUnion() ? type_->AsUnion()->Get(index_) : type_;
}
-#ifdef OBJECT_PRINT
-void Type::TypePrint() {
- TypePrint(stdout);
- PrintF(stdout, "\n");
- Flush(stdout);
+// C++ cannot specialise nested templates, so we have to go through this
+// contortion with an auxiliary template to simulate it.
+template<class Config, class T>
+struct TypeImplIteratorAux {
+ static bool matches(typename TypeImpl<Config>::TypeHandle type);
+ static i::Handle<T> current(typename TypeImpl<Config>::TypeHandle type);
+};
+
+template<class Config>
+struct TypeImplIteratorAux<Config, i::Map> {
+ static bool matches(typename TypeImpl<Config>::TypeHandle type) {
+ return type->IsClass();
+ }
+ static i::Handle<i::Map> current(typename TypeImpl<Config>::TypeHandle type) {
+ return type->AsClass()->Map();
+ }
+};
+
+template<class Config>
+struct TypeImplIteratorAux<Config, i::Object> {
+ static bool matches(typename TypeImpl<Config>::TypeHandle type) {
+ return type->IsConstant();
+ }
+ static i::Handle<i::Object> current(
+ typename TypeImpl<Config>::TypeHandle type) {
+ return type->AsConstant()->Value();
+ }
+};
+
+template<class Config> template<class T>
+bool TypeImpl<Config>::Iterator<T>::matches(TypeHandle type) {
+ return TypeImplIteratorAux<Config, T>::matches(type);
+}
+
+template<class Config> template<class T>
+i::Handle<T> TypeImpl<Config>::Iterator<T>::Current() {
+ return TypeImplIteratorAux<Config, T>::current(get_type());
}
-const char* Type::bitset_name(int bitset) {
+template<class Config> template<class T>
+void TypeImpl<Config>::Iterator<T>::Advance() {
+ DisallowHeapAllocation no_allocation;
+ ++index_;
+ if (type_->IsUnion()) {
+ UnionHandle unioned = handle(type_->AsUnion());
+ for (; index_ < unioned->Length(); ++index_) {
+ if (matches(unioned->Get(index_))) return;
+ }
+ } else if (index_ == 0 && matches(type_)) {
+ return;
+ }
+ index_ = -1;
+}
+
+
+// -----------------------------------------------------------------------------
+// Conversion between low-level representations.
+
+template<class Config>
+template<class OtherType>
+typename TypeImpl<Config>::TypeHandle TypeImpl<Config>::Convert(
+ typename OtherType::TypeHandle type, Region* region) {
+ if (type->IsBitset()) {
+ return BitsetType::New(type->AsBitset(), region);
+ } else if (type->IsClass()) {
+ return ClassType::New(
+ type->AsClass()->Map(),
+ BitsetType::New(type->BitsetLub(), region), region);
+ } else if (type->IsConstant()) {
+ return ConstantType::New(
+ type->AsConstant()->Value(),
+ Convert<OtherType>(type->AsConstant()->Bound(), region), region);
+ } else if (type->IsContext()) {
+ TypeHandle outer = Convert<OtherType>(type->AsContext()->Outer(), region);
+ return ContextType::New(outer, region);
+ } else if (type->IsUnion()) {
+ int length = type->AsUnion()->Length();
+ UnionHandle unioned = UnionType::New(length, region);
+ for (int i = 0; i < length; ++i) {
+ unioned->Set(i, Convert<OtherType>(type->AsUnion()->Get(i), region));
+ }
+ return unioned;
+ } else if (type->IsArray()) {
+ return ArrayType::New(
+ Convert<OtherType>(type->AsArray()->Element(), region),
+ Convert<OtherType>(type->AsArray()->Bound(), region), region);
+ } else if (type->IsFunction()) {
+ FunctionHandle function = FunctionType::New(
+ Convert<OtherType>(type->AsFunction()->Result(), region),
+ Convert<OtherType>(type->AsFunction()->Receiver(), region),
+ Convert<OtherType>(type->AsFunction()->Bound(), region),
+ type->AsFunction()->Arity(), region);
+ for (int i = 0; i < function->Arity(); ++i) {
+ function->InitParameter(i,
+ Convert<OtherType>(type->AsFunction()->Parameter(i), region));
+ }
+ return function;
+ } else {
+ UNREACHABLE();
+ return None(region);
+ }
+}
+
+
+// -----------------------------------------------------------------------------
+// Printing.
+
+template<class Config>
+const char* TypeImpl<Config>::BitsetType::Name(int bitset) {
switch (bitset) {
- #define PRINT_COMPOSED_TYPE(type, value) case k##type: return #type;
- BITSET_TYPE_LIST(PRINT_COMPOSED_TYPE)
- #undef PRINT_COMPOSED_TYPE
+ case REPRESENTATION(kAny): return "Any";
+ #define RETURN_NAMED_REPRESENTATION_TYPE(type, value) \
+ case REPRESENTATION(k##type): return #type;
+ REPRESENTATION_BITSET_TYPE_LIST(RETURN_NAMED_REPRESENTATION_TYPE)
+ #undef RETURN_NAMED_REPRESENTATION_TYPE
+
+ #define RETURN_NAMED_SEMANTIC_TYPE(type, value) \
+ case SEMANTIC(k##type): return #type;
+ SEMANTIC_BITSET_TYPE_LIST(RETURN_NAMED_SEMANTIC_TYPE)
+ #undef RETURN_NAMED_SEMANTIC_TYPE
+
default:
return NULL;
}
}
-void Type::TypePrint(FILE* out) {
- if (is_bitset()) {
- int bitset = as_bitset();
- const char* name = bitset_name(bitset);
- if (name != NULL) {
- PrintF(out, "%s", name);
- } else {
- bool is_first = true;
- PrintF(out, "(");
- for (int mask = 1; mask != 0; mask = mask << 1) {
- if ((bitset & mask) != 0) {
- if (!is_first) PrintF(out, " | ");
- is_first = false;
- PrintF(out, "%s", bitset_name(mask));
- }
+template<class Config>
+void TypeImpl<Config>::BitsetType::PrintTo(StringStream* stream, int bitset) {
+ DisallowHeapAllocation no_allocation;
+ const char* name = Name(bitset);
+ if (name != NULL) {
+ stream->Add("%s", name);
+ } else {
+ static const int named_bitsets[] = {
+ #define BITSET_CONSTANT(type, value) REPRESENTATION(k##type),
+ REPRESENTATION_BITSET_TYPE_LIST(BITSET_CONSTANT)
+ #undef BITSET_CONSTANT
+
+ #define BITSET_CONSTANT(type, value) SEMANTIC(k##type),
+ SEMANTIC_BITSET_TYPE_LIST(BITSET_CONSTANT)
+ #undef BITSET_CONSTANT
+ };
+
+ bool is_first = true;
+ stream->Add("(");
+ for (int i(ARRAY_SIZE(named_bitsets) - 1); bitset != 0 && i >= 0; --i) {
+ int subset = named_bitsets[i];
+ if ((bitset & subset) == subset) {
+ if (!is_first) stream->Add(" | ");
+ is_first = false;
+ stream->Add("%s", Name(subset));
+ bitset -= subset;
}
- PrintF(out, ")");
}
- } else if (is_constant()) {
- PrintF(out, "Constant(%p : ", static_cast<void*>(*as_constant()));
- from_bitset(LubBitset())->TypePrint(out);
- PrintF(")");
- } else if (is_class()) {
- PrintF(out, "Class(%p < ", static_cast<void*>(*as_class()));
- from_bitset(LubBitset())->TypePrint(out);
- PrintF(")");
- } else if (is_union()) {
- PrintF(out, "(");
- Handle<Unioned> unioned = as_union();
- for (int i = 0; i < unioned->length(); ++i) {
- Handle<Type> type_i = union_get(unioned, i);
- if (i > 0) PrintF(out, " | ");
- type_i->TypePrint(out);
+ ASSERT(bitset == 0);
+ stream->Add(")");
+ }
+}
+
+
+template<class Config>
+void TypeImpl<Config>::PrintTo(StringStream* stream, PrintDimension dim) {
+ DisallowHeapAllocation no_allocation;
+ if (dim != REPRESENTATION_DIM) {
+ if (this->IsBitset()) {
+ BitsetType::PrintTo(stream, SEMANTIC(this->AsBitset()));
+ } else if (this->IsClass()) {
+ stream->Add("Class(%p < ", static_cast<void*>(*this->AsClass()->Map()));
+ BitsetType::New(BitsetType::Lub(this))->PrintTo(stream, dim);
+ stream->Add(")");
+ return;
+ } else if (this->IsConstant()) {
+ stream->Add("Constant(%p : ",
+ static_cast<void*>(*this->AsConstant()->Value()));
+ BitsetType::New(BitsetType::Lub(this))->PrintTo(stream, dim);
+ stream->Add(")");
+ return;
+ } else if (this->IsContext()) {
+ stream->Add("Context(");
+ this->AsContext()->Outer()->PrintTo(stream, dim);
+ stream->Add(")");
+ } else if (this->IsUnion()) {
+ stream->Add("(");
+ UnionHandle unioned = handle(this->AsUnion());
+ for (int i = 0; i < unioned->Length(); ++i) {
+ TypeHandle type_i = unioned->Get(i);
+ if (i > 0) stream->Add(" | ");
+ type_i->PrintTo(stream, dim);
+ }
+ stream->Add(")");
+ return;
+ } else if (this->IsArray()) {
+ stream->Add("Array(");
+ AsArray()->Element()->PrintTo(stream, dim);
+ stream->Add(")");
+ } else if (this->IsFunction()) {
+ if (!this->AsFunction()->Receiver()->IsAny()) {
+ this->AsFunction()->Receiver()->PrintTo(stream, dim);
+ stream->Add(".");
+ }
+ stream->Add("(");
+ for (int i = 0; i < this->AsFunction()->Arity(); ++i) {
+ if (i > 0) stream->Add(", ");
+ this->AsFunction()->Parameter(i)->PrintTo(stream, dim);
+ }
+ stream->Add(")->");
+ this->AsFunction()->Result()->PrintTo(stream, dim);
+ } else {
+ UNREACHABLE();
}
- PrintF(out, ")");
+ }
+ if (dim == BOTH_DIMS) {
+ stream->Add("/");
+ }
+ if (dim != SEMANTIC_DIM) {
+ BitsetType::PrintTo(stream, REPRESENTATION(this->BitsetLub()));
}
}
-#endif
+template<class Config>
+void TypeImpl<Config>::TypePrint(FILE* out, PrintDimension dim) {
+ HeapStringAllocator allocator;
+ StringStream stream(&allocator);
+ PrintTo(&stream, dim);
+ stream.OutputToFile(out);
+}
+
+
+template<class Config>
+void TypeImpl<Config>::TypePrint(PrintDimension dim) {
+ TypePrint(stdout, dim);
+ PrintF(stdout, "\n");
+ Flush(stdout);
+}
+
+
+// -----------------------------------------------------------------------------
+// Instantiations.
+
+template class TypeImpl<ZoneTypeConfig>;
+template class TypeImpl<ZoneTypeConfig>::Iterator<i::Map>;
+template class TypeImpl<ZoneTypeConfig>::Iterator<i::Object>;
+
+template class TypeImpl<HeapTypeConfig>;
+template class TypeImpl<HeapTypeConfig>::Iterator<i::Map>;
+template class TypeImpl<HeapTypeConfig>::Iterator<i::Object>;
+
+template TypeImpl<ZoneTypeConfig>::TypeHandle
+ TypeImpl<ZoneTypeConfig>::Convert<HeapType>(
+ TypeImpl<HeapTypeConfig>::TypeHandle, TypeImpl<ZoneTypeConfig>::Region*);
+template TypeImpl<HeapTypeConfig>::TypeHandle
+ TypeImpl<HeapTypeConfig>::Convert<Type>(
+ TypeImpl<ZoneTypeConfig>::TypeHandle, TypeImpl<HeapTypeConfig>::Region*);
+
} } // namespace v8::internal
diff --git a/chromium/v8/src/types.h b/chromium/v8/src/types.h
index 1dc79dd6b33..aaa76e4f3dc 100644
--- a/chromium/v8/src/types.h
+++ b/chromium/v8/src/types.h
@@ -1,74 +1,91 @@
-// Copyright 2013 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
#ifndef V8_TYPES_H_
#define V8_TYPES_H_
-#include "v8.h"
-
-#include "objects.h"
+#include "src/handles.h"
namespace v8 {
namespace internal {
-
+// SUMMARY
+//
// A simple type system for compiler-internal use. It is based entirely on
// union types, and all subtyping hence amounts to set inclusion. Besides the
// obvious primitive types and some predefined unions, the type language also
// can express class types (a.k.a. specific maps) and singleton types (i.e.,
// concrete constants).
//
-// The following equations and inequations hold:
+// Types consist of two dimensions: semantic (value range) and representation.
+// Both are related through subtyping.
+//
+// SEMANTIC DIMENSION
+//
+// The following equations and inequations hold for the semantic axis:
//
// None <= T
// T <= Any
//
-// Oddball = Boolean \/ Null \/ Undefined
// Number = Signed32 \/ Unsigned32 \/ Double
// Smi <= Signed32
// Name = String \/ Symbol
// UniqueName = InternalizedString \/ Symbol
// InternalizedString < String
//
-// Allocated = Receiver \/ Number \/ Name
-// Detectable = Allocated - Undetectable
-// Undetectable < Object
// Receiver = Object \/ Proxy
// Array < Object
// Function < Object
// RegExp < Object
+// Undetectable < Object
+// Detectable = Receiver \/ Number \/ Name - Undetectable
//
// Class(map) < T iff instance_type(map) < T
// Constant(x) < T iff instance_type(map(x)) < T
+// Array(T) < Array
+// Function(R, S, T0, T1, ...) < Function
+// Context(T) < Internal
//
-// Note that Constant(x) < Class(map(x)) does _not_ hold, since x's map can
+// Both structural Array and Function types are invariant in all parameters;
+// relaxing this would make Union and Intersect operations more involved.
+// There is no subtyping relation between Array, Function, or Context types
+// and respective Constant types, since these types cannot be reconstructed
+// for arbitrary heap values.
+// Note also that Constant(x) < Class(map(x)) does _not_ hold, since x's map can
// change! (Its instance type cannot, however.)
// TODO(rossberg): the latter is not currently true for proxies, because of fix,
// but will hold once we implement direct proxies.
+// However, we also define a 'temporal' variant of the subtyping relation that
+// considers the _current_ state only, i.e., Constant(x) <_now Class(map(x)).
+//
+// REPRESENTATIONAL DIMENSION
+//
+// For the representation axis, the following holds:
+//
+// None <= R
+// R <= Any
+//
+// UntaggedInt <= UntaggedInt8 \/ UntaggedInt16 \/ UntaggedInt32)
+// UntaggedFloat <= UntaggedFloat32 \/ UntaggedFloat64
+// UntaggedNumber <= UntaggedInt \/ UntaggedFloat
+// Untagged <= UntaggedNumber \/ UntaggedPtr
+// Tagged <= TaggedInt \/ TaggedPtr
+//
+// Subtyping relates the two dimensions, for example:
+//
+// Number <= Tagged \/ UntaggedNumber
+// Object <= TaggedPtr \/ UntaggedPtr
+//
+// That holds because the semantic type constructors defined by the API create
+// types that allow for all possible representations, and dually, the ones for
+// representation types initially include all semantic ranges. Representations
+// can then e.g. be narrowed for a given semantic type using intersection:
+//
+// SignedSmall /\ TaggedInt (a 'smi')
+// Number /\ TaggedPtr (a heap number)
+//
+// PREDICATES
//
// There are two main functions for testing types:
//
@@ -76,152 +93,384 @@ namespace internal {
// T1->Maybe(T2) -- tests whether T1 and T2 overlap (i.e., T1 /\ T2 =/= 0)
//
// Typically, the former is to be used to select representations (e.g., via
-// T->Is(Integer31())), and the to check whether a specific case needs handling
-// (e.g., via T->Maybe(Number())).
+// T->Is(SignedSmall())), and the latter to check whether a specific case needs
+// handling (e.g., via T->Maybe(Number())).
//
// There is no functionality to discover whether a type is a leaf in the
// lattice. That is intentional. It should always be possible to refine the
// lattice (e.g., splitting up number types further) without invalidating any
// existing assumptions or tests.
+// Consequently, do not normally use Equals for type tests, always use Is!
+//
+// The NowIs operator implements state-sensitive subtying, as described above.
+// Any compilation decision based on such temporary properties requires runtime
+// guarding!
+//
+// PROPERTIES
+//
+// Various formal properties hold for constructors, operators, and predicates
+// over types. For example, constructors are injective, subtyping is a complete
+// partial order, union and intersection satisfy the usual algebraic properties.
//
-// Consequently, do not use pointer equality for type tests, always use Is!
+// See test/cctest/test-types.cc for a comprehensive executable specification,
+// especially with respect to the properties of the more exotic 'temporal'
+// constructors and predicates (those prefixed 'Now').
+//
+// IMPLEMENTATION
//
// Internally, all 'primitive' types, and their unions, are represented as
-// bitsets via smis. Class is a heap pointer to the respective map. Only
-// Constant's, or unions containing Class'es or Constant's, require allocation.
+// bitsets. Class is a heap pointer to the respective map. Only Constant's, or
+// unions containing Class'es or Constant's, currently require allocation.
// Note that the bitset representation is closed under both Union and Intersect.
//
-// The type representation is heap-allocated, so cannot (currently) be used in
-// a concurrent compilation context.
-
-
-#define BITSET_TYPE_LIST(V) \
- V(None, 0) \
- V(Null, 1 << 0) \
- V(Undefined, 1 << 1) \
- V(Boolean, 1 << 2) \
- V(Smi, 1 << 3) \
- V(OtherSigned32, 1 << 4) \
- V(Unsigned32, 1 << 5) \
- V(Double, 1 << 6) \
- V(Symbol, 1 << 7) \
- V(InternalizedString, 1 << 8) \
- V(OtherString, 1 << 9) \
- V(Undetectable, 1 << 10) \
- V(Array, 1 << 11) \
- V(Function, 1 << 12) \
- V(RegExp, 1 << 13) \
- V(OtherObject, 1 << 14) \
- V(Proxy, 1 << 15) \
- V(Internal, 1 << 16) \
+// There are two type representations, using different allocation:
+//
+// - class Type (zone-allocated, for compiler and concurrent compilation)
+// - class HeapType (heap-allocated, for persistent types)
+//
+// Both provide the same API, and the Convert method can be used to interconvert
+// them. For zone types, no query method touches the heap, only constructors do.
+
+
+// -----------------------------------------------------------------------------
+// Values for bitset types
+
+#define MASK_BITSET_TYPE_LIST(V) \
+ V(Representation, static_cast<int>(0xffc00000)) \
+ V(Semantic, static_cast<int>(0x003fffff))
+
+#define REPRESENTATION(k) ((k) & BitsetType::kRepresentation)
+#define SEMANTIC(k) ((k) & BitsetType::kSemantic)
+
+#define REPRESENTATION_BITSET_TYPE_LIST(V) \
+ V(None, 0) \
+ V(UntaggedInt1, 1 << 22 | kSemantic) \
+ V(UntaggedInt8, 1 << 23 | kSemantic) \
+ V(UntaggedInt16, 1 << 24 | kSemantic) \
+ V(UntaggedInt32, 1 << 25 | kSemantic) \
+ V(UntaggedFloat32, 1 << 26 | kSemantic) \
+ V(UntaggedFloat64, 1 << 27 | kSemantic) \
+ V(UntaggedPtr, 1 << 28 | kSemantic) \
+ V(TaggedInt, 1 << 29 | kSemantic) \
+ V(TaggedPtr, -1 << 30 | kSemantic) /* MSB has to be sign-extended */ \
\
- V(Oddball, kBoolean | kNull | kUndefined) \
- V(Signed32, kSmi | kOtherSigned32) \
- V(Number, kSigned32 | kUnsigned32 | kDouble) \
- V(String, kInternalizedString | kOtherString) \
- V(UniqueName, kSymbol | kInternalizedString) \
- V(Name, kSymbol | kString) \
- V(NumberOrString, kNumber | kString) \
- V(Object, kUndetectable | kArray | kFunction | \
- kRegExp | kOtherObject) \
- V(Receiver, kObject | kProxy) \
- V(Allocated, kDouble | kName | kReceiver) \
- V(Any, kOddball | kNumber | kAllocated | kInternal) \
- V(NonNumber, kAny - kNumber) \
- V(Detectable, kAllocated - kUndetectable)
-
-
-class Type : public Object {
+ V(UntaggedInt, kUntaggedInt1 | kUntaggedInt8 | \
+ kUntaggedInt16 | kUntaggedInt32) \
+ V(UntaggedFloat, kUntaggedFloat32 | kUntaggedFloat64) \
+ V(UntaggedNumber, kUntaggedInt | kUntaggedFloat) \
+ V(Untagged, kUntaggedNumber | kUntaggedPtr) \
+ V(Tagged, kTaggedInt | kTaggedPtr)
+
+#define SEMANTIC_BITSET_TYPE_LIST(V) \
+ V(Null, 1 << 0 | REPRESENTATION(kTaggedPtr)) \
+ V(Undefined, 1 << 1 | REPRESENTATION(kTaggedPtr)) \
+ V(Boolean, 1 << 2 | REPRESENTATION(kTaggedPtr)) \
+ V(UnsignedSmall, 1 << 3 | REPRESENTATION(kTagged | kUntaggedNumber)) \
+ V(OtherSignedSmall, 1 << 4 | REPRESENTATION(kTagged | kUntaggedNumber)) \
+ V(OtherUnsigned31, 1 << 5 | REPRESENTATION(kTagged | kUntaggedNumber)) \
+ V(OtherUnsigned32, 1 << 6 | REPRESENTATION(kTagged | kUntaggedNumber)) \
+ V(OtherSigned32, 1 << 7 | REPRESENTATION(kTagged | kUntaggedNumber)) \
+ V(MinusZero, 1 << 8 | REPRESENTATION(kTagged | kUntaggedNumber)) \
+ V(NaN, 1 << 9 | REPRESENTATION(kTagged | kUntaggedNumber)) \
+ V(OtherNumber, 1 << 10 | REPRESENTATION(kTagged | kUntaggedNumber)) \
+ V(Symbol, 1 << 11 | REPRESENTATION(kTaggedPtr)) \
+ V(InternalizedString, 1 << 12 | REPRESENTATION(kTaggedPtr)) \
+ V(OtherString, 1 << 13 | REPRESENTATION(kTaggedPtr)) \
+ V(Undetectable, 1 << 14 | REPRESENTATION(kTaggedPtr)) \
+ V(Array, 1 << 15 | REPRESENTATION(kTaggedPtr)) \
+ V(Buffer, 1 << 16 | REPRESENTATION(kTaggedPtr)) \
+ V(Function, 1 << 17 | REPRESENTATION(kTaggedPtr)) \
+ V(RegExp, 1 << 18 | REPRESENTATION(kTaggedPtr)) \
+ V(OtherObject, 1 << 19 | REPRESENTATION(kTaggedPtr)) \
+ V(Proxy, 1 << 20 | REPRESENTATION(kTaggedPtr)) \
+ V(Internal, 1 << 21 | REPRESENTATION(kTagged | kUntagged)) \
+ \
+ V(SignedSmall, kUnsignedSmall | kOtherSignedSmall) \
+ V(Signed32, kSignedSmall | kOtherUnsigned31 | kOtherSigned32) \
+ V(Unsigned32, kUnsignedSmall | kOtherUnsigned31 | kOtherUnsigned32) \
+ V(Integral32, kSigned32 | kUnsigned32) \
+ V(Number, kIntegral32 | kMinusZero | kNaN | kOtherNumber) \
+ V(String, kInternalizedString | kOtherString) \
+ V(UniqueName, kSymbol | kInternalizedString) \
+ V(Name, kSymbol | kString) \
+ V(NumberOrString, kNumber | kString) \
+ V(Primitive, kNumber | kName | kBoolean | kNull | kUndefined) \
+ V(DetectableObject, kArray | kFunction | kRegExp | kOtherObject) \
+ V(DetectableReceiver, kDetectableObject | kProxy) \
+ V(Detectable, kDetectableReceiver | kNumber | kName) \
+ V(Object, kDetectableObject | kUndetectable) \
+ V(Receiver, kObject | kProxy) \
+ V(NonNumber, kBoolean | kName | kNull | kReceiver | \
+ kUndefined | kInternal) \
+ V(Any, -1)
+
+#define BITSET_TYPE_LIST(V) \
+ MASK_BITSET_TYPE_LIST(V) \
+ REPRESENTATION_BITSET_TYPE_LIST(V) \
+ SEMANTIC_BITSET_TYPE_LIST(V)
+
+
+// -----------------------------------------------------------------------------
+// The abstract Type class, parameterized over the low-level representation.
+
+// struct Config {
+// typedef TypeImpl<Config> Type;
+// typedef Base;
+// typedef Struct;
+// typedef Region;
+// template<class> struct Handle { typedef type; } // No template typedefs...
+// template<class T> static Handle<T>::type handle(T* t); // !is_bitset(t)
+// template<class T> static Handle<T>::type cast(Handle<Type>::type);
+// static bool is_bitset(Type*);
+// static bool is_class(Type*);
+// static bool is_struct(Type*, int tag);
+// static int as_bitset(Type*);
+// static i::Handle<i::Map> as_class(Type*);
+// static Handle<Struct>::type as_struct(Type*);
+// static Type* from_bitset(int bitset);
+// static Handle<Type>::type from_bitset(int bitset, Region*);
+// static Handle<Type>::type from_class(i::Handle<Map>, Region*);
+// static Handle<Type>::type from_struct(Handle<Struct>::type, int tag);
+// static Handle<Struct>::type struct_create(int tag, int length, Region*);
+// static void struct_shrink(Handle<Struct>::type, int length);
+// static int struct_tag(Handle<Struct>::type);
+// static int struct_length(Handle<Struct>::type);
+// static Handle<Type>::type struct_get(Handle<Struct>::type, int);
+// static void struct_set(Handle<Struct>::type, int, Handle<Type>::type);
+// template<class V>
+// static i::Handle<V> struct_get_value(Handle<Struct>::type, int);
+// template<class V>
+// static void struct_set_value(Handle<Struct>::type, int, i::Handle<V>);
+// }
+template<class Config>
+class TypeImpl : public Config::Base {
public:
- #define DEFINE_TYPE_CONSTRUCTOR(type, value) \
- static Type* type() { return from_bitset(k##type); }
+ // Auxiliary types.
+
+ class BitsetType; // Internal
+ class StructuralType; // Internal
+ class UnionType; // Internal
+
+ class ClassType;
+ class ConstantType;
+ class ContextType;
+ class ArrayType;
+ class FunctionType;
+
+ typedef typename Config::template Handle<TypeImpl>::type TypeHandle;
+ typedef typename Config::template Handle<ClassType>::type ClassHandle;
+ typedef typename Config::template Handle<ConstantType>::type ConstantHandle;
+ typedef typename Config::template Handle<ContextType>::type ContextHandle;
+ typedef typename Config::template Handle<ArrayType>::type ArrayHandle;
+ typedef typename Config::template Handle<FunctionType>::type FunctionHandle;
+ typedef typename Config::template Handle<UnionType>::type UnionHandle;
+ typedef typename Config::Region Region;
+
+ // Constructors.
+
+ #define DEFINE_TYPE_CONSTRUCTOR(type, value) \
+ static TypeImpl* type() { return BitsetType::New(BitsetType::k##type); } \
+ static TypeHandle type(Region* region) { \
+ return BitsetType::New(BitsetType::k##type, region); \
+ }
BITSET_TYPE_LIST(DEFINE_TYPE_CONSTRUCTOR)
#undef DEFINE_TYPE_CONSTRUCTOR
- static Type* Class(Handle<i::Map> map) { return from_handle(map); }
- static Type* Constant(Handle<i::HeapObject> value) {
- return Constant(value, value->GetIsolate());
+ static TypeHandle Class(i::Handle<i::Map> map, Region* region) {
+ return ClassType::New(map, region);
+ }
+ static TypeHandle Constant(i::Handle<i::Object> value, Region* region) {
+ return ConstantType::New(value, region);
+ }
+ static TypeHandle Context(TypeHandle outer, Region* region) {
+ return ContextType::New(outer, region);
+ }
+ static TypeHandle Array(TypeHandle element, Region* region) {
+ return ArrayType::New(element, region);
+ }
+ static FunctionHandle Function(
+ TypeHandle result, TypeHandle receiver, int arity, Region* region) {
+ return FunctionType::New(result, receiver, arity, region);
+ }
+ static TypeHandle Function(TypeHandle result, Region* region) {
+ return Function(result, Any(region), 0, region);
}
- static Type* Constant(Handle<i::Object> value, Isolate* isolate) {
- return from_handle(isolate->factory()->NewBox(value));
+ static TypeHandle Function(
+ TypeHandle result, TypeHandle param0, Region* region) {
+ FunctionHandle function = Function(result, Any(region), 1, region);
+ function->InitParameter(0, param0);
+ return function;
+ }
+ static TypeHandle Function(
+ TypeHandle result, TypeHandle param0, TypeHandle param1, Region* region) {
+ FunctionHandle function = Function(result, Any(region), 2, region);
+ function->InitParameter(0, param0);
+ function->InitParameter(1, param1);
+ return function;
+ }
+ static TypeHandle Function(
+ TypeHandle result, TypeHandle param0, TypeHandle param1,
+ TypeHandle param2, Region* region) {
+ FunctionHandle function = Function(result, Any(region), 3, region);
+ function->InitParameter(0, param0);
+ function->InitParameter(1, param1);
+ function->InitParameter(2, param2);
+ return function;
}
- static Type* Union(Handle<Type> type1, Handle<Type> type2);
- static Type* Intersect(Handle<Type> type1, Handle<Type> type2);
- static Type* Optional(Handle<Type> type); // type \/ Undefined
+ static TypeHandle Union(TypeHandle type1, TypeHandle type2, Region* reg);
+ static TypeHandle Intersect(TypeHandle type1, TypeHandle type2, Region* reg);
- static Type* Of(Handle<i::Object> value) {
- return from_bitset(LubBitset(*value));
+ static TypeHandle Of(double value, Region* region) {
+ return Config::from_bitset(BitsetType::Lub(value), region);
+ }
+ static TypeHandle Of(i::Object* value, Region* region) {
+ return Config::from_bitset(BitsetType::Lub(value), region);
}
+ static TypeHandle Of(i::Handle<i::Object> value, Region* region) {
+ return Of(*value, region);
+ }
+
+ // Predicates.
- bool Is(Type* that) { return this == that || SlowIs(that); }
- bool Is(Handle<Type> that) { return this->Is(*that); }
- bool Maybe(Type* that);
- bool Maybe(Handle<Type> that) { return this->Maybe(*that); }
+ bool IsInhabited() { return BitsetType::IsInhabited(this->BitsetLub()); }
- // State-dependent versions of Of and Is that consider subtyping between
+ bool Is(TypeImpl* that) { return this == that || this->SlowIs(that); }
+ template<class TypeHandle>
+ bool Is(TypeHandle that) { return this->Is(*that); }
+
+ bool Maybe(TypeImpl* that);
+ template<class TypeHandle>
+ bool Maybe(TypeHandle that) { return this->Maybe(*that); }
+
+ bool Equals(TypeImpl* that) { return this->Is(that) && that->Is(this); }
+ template<class TypeHandle>
+ bool Equals(TypeHandle that) { return this->Equals(*that); }
+
+ // Equivalent to Constant(value)->Is(this), but avoiding allocation.
+ bool Contains(i::Object* val);
+ bool Contains(i::Handle<i::Object> val) { return this->Contains(*val); }
+
+ // State-dependent versions of the above that consider subtyping between
// a constant and its map class.
- static Type* OfCurrently(Handle<i::Object> value);
- bool IsCurrently(Type* that);
- bool IsCurrently(Handle<Type> that) { return this->IsCurrently(*that); }
+ inline static TypeHandle NowOf(i::Object* value, Region* region);
+ static TypeHandle NowOf(i::Handle<i::Object> value, Region* region) {
+ return NowOf(*value, region);
+ }
+ bool NowIs(TypeImpl* that);
+ template<class TypeHandle>
+ bool NowIs(TypeHandle that) { return this->NowIs(*that); }
+ inline bool NowContains(i::Object* val);
+ bool NowContains(i::Handle<i::Object> val) { return this->NowContains(*val); }
- bool IsClass() { return is_class(); }
- bool IsConstant() { return is_constant(); }
- Handle<i::Map> AsClass() { return as_class(); }
- Handle<i::Object> AsConstant() { return as_constant(); }
+ bool NowStable();
- int NumClasses();
- int NumConstants();
+ // Inspection.
- template<class T>
- class Iterator {
- public:
- bool Done() const { return index_ < 0; }
- Handle<T> Current();
- void Advance();
-
- private:
- friend class Type;
-
- Iterator() : index_(-1) {}
- explicit Iterator(Handle<Type> type) : type_(type), index_(-1) {
- Advance();
- }
+ bool IsClass() {
+ return Config::is_class(this)
+ || Config::is_struct(this, StructuralType::kClassTag);
+ }
+ bool IsConstant() {
+ return Config::is_struct(this, StructuralType::kConstantTag);
+ }
+ bool IsContext() {
+ return Config::is_struct(this, StructuralType::kContextTag);
+ }
+ bool IsArray() {
+ return Config::is_struct(this, StructuralType::kArrayTag);
+ }
+ bool IsFunction() {
+ return Config::is_struct(this, StructuralType::kFunctionTag);
+ }
- inline bool matches(Handle<Type> type);
- inline Handle<Type> get_type();
+ ClassType* AsClass() { return ClassType::cast(this); }
+ ConstantType* AsConstant() { return ConstantType::cast(this); }
+ ContextType* AsContext() { return ContextType::cast(this); }
+ ArrayType* AsArray() { return ArrayType::cast(this); }
+ FunctionType* AsFunction() { return FunctionType::cast(this); }
- Handle<Type> type_;
- int index_;
- };
+ int NumClasses();
+ int NumConstants();
+ template<class T> class Iterator;
Iterator<i::Map> Classes() {
- if (this->is_bitset()) return Iterator<i::Map>();
- return Iterator<i::Map>(this->handle());
+ if (this->IsBitset()) return Iterator<i::Map>();
+ return Iterator<i::Map>(Config::handle(this));
}
Iterator<i::Object> Constants() {
- if (this->is_bitset()) return Iterator<i::Object>();
- return Iterator<i::Object>(this->handle());
+ if (this->IsBitset()) return Iterator<i::Object>();
+ return Iterator<i::Object>(Config::handle(this));
}
- static Type* cast(i::Object* object) {
- Type* t = static_cast<Type*>(object);
- ASSERT(t->is_bitset() || t->is_class() ||
- t->is_constant() || t->is_union());
- return t;
+ // Casting and conversion.
+
+ static inline TypeImpl* cast(typename Config::Base* object);
+
+ template<class OtherTypeImpl>
+ static TypeHandle Convert(
+ typename OtherTypeImpl::TypeHandle type, Region* region);
+
+ // Printing.
+
+ enum PrintDimension { BOTH_DIMS, SEMANTIC_DIM, REPRESENTATION_DIM };
+
+ void PrintTo(StringStream* stream, PrintDimension = BOTH_DIMS);
+ void TypePrint(PrintDimension = BOTH_DIMS);
+ void TypePrint(FILE* out, PrintDimension = BOTH_DIMS);
+
+ protected:
+ // Friends.
+
+ template<class> friend class Iterator;
+ template<class> friend class TypeImpl;
+
+ // Handle conversion.
+
+ template<class T>
+ static typename Config::template Handle<T>::type handle(T* type) {
+ return Config::handle(type);
}
+ TypeImpl* unhandle() { return this; }
-#ifdef OBJECT_PRINT
- void TypePrint();
- void TypePrint(FILE* out);
-#endif
+ // Internal inspection.
+
+ bool IsNone() { return this == None(); }
+ bool IsAny() { return this == Any(); }
+ bool IsBitset() { return Config::is_bitset(this); }
+ bool IsUnion() { return Config::is_struct(this, StructuralType::kUnionTag); }
+
+ int AsBitset() {
+ ASSERT(this->IsBitset());
+ return static_cast<BitsetType*>(this)->Bitset();
+ }
+ UnionType* AsUnion() { return UnionType::cast(this); }
+
+ // Auxiliary functions.
+
+ int BitsetGlb() { return BitsetType::Glb(this); }
+ int BitsetLub() { return BitsetType::Lub(this); }
+ int InherentBitsetLub() { return BitsetType::InherentLub(this); }
+
+ bool SlowIs(TypeImpl* that);
+
+ TypeHandle Narrow(int bitset, Region* region);
+ int BoundBy(TypeImpl* that);
+ int IndexInUnion(int bound, UnionHandle unioned, int current_size);
+ static int ExtendUnion(
+ UnionHandle unioned, int current_size, TypeHandle t,
+ TypeHandle other, bool is_intersect, Region* region);
+ static int NormalizeUnion(UnionHandle unioned, int current_size, int bitset);
+};
- private:
- // A union is a fixed array containing types. Invariants:
- // - its length is at least 2
- // - at most one field is a bitset, and it must go into index 0
- // - no field is a union
- typedef FixedArray Unioned;
+
+// -----------------------------------------------------------------------------
+// Bitset types (internal).
+
+template<class Config>
+class TypeImpl<Config>::BitsetType : public TypeImpl<Config> {
+ protected:
+ friend class TypeImpl<Config>;
enum {
#define DECLARE_TYPE(type, value) k##type = (value),
@@ -230,108 +479,455 @@ class Type : public Object {
kUnusedEOL = 0
};
- bool is_none() { return this == None(); }
- bool is_bitset() { return this->IsSmi(); }
- bool is_class() { return this->IsMap(); }
- bool is_constant() { return this->IsBox(); }
- bool is_union() { return this->IsFixedArray(); }
+ int Bitset() { return Config::as_bitset(this); }
- bool SlowIs(Type* that);
+ static TypeImpl* New(int bitset) {
+ return static_cast<BitsetType*>(Config::from_bitset(bitset));
+ }
+ static TypeHandle New(int bitset, Region* region) {
+ return Config::from_bitset(bitset, region);
+ }
- int as_bitset() { return Smi::cast(this)->value(); }
- Handle<i::Map> as_class() { return Handle<i::Map>::cast(handle()); }
- Handle<i::Object> as_constant() {
- Handle<i::Box> box = Handle<i::Box>::cast(handle());
- return i::handle(box->value(), box->GetIsolate());
+ static bool IsInhabited(int bitset) {
+ return (bitset & kRepresentation) && (bitset & kSemantic);
}
- Handle<Unioned> as_union() { return Handle<Unioned>::cast(handle()); }
- Handle<Type> handle() { return handle_via_isolate_of(this); }
- Handle<Type> handle_via_isolate_of(Type* type) {
- ASSERT(type->IsHeapObject());
- return i::handle(this, i::HeapObject::cast(type)->GetIsolate());
+ static int Glb(TypeImpl* type); // greatest lower bound that's a bitset
+ static int Lub(TypeImpl* type); // least upper bound that's a bitset
+ static int Lub(i::Object* value);
+ static int Lub(double value);
+ static int Lub(int32_t value);
+ static int Lub(uint32_t value);
+ static int Lub(i::Map* map);
+ static int InherentLub(TypeImpl* type);
+
+ static const char* Name(int bitset);
+ static void PrintTo(StringStream* stream, int bitset);
+ using TypeImpl::PrintTo;
+};
+
+
+// -----------------------------------------------------------------------------
+// Superclass for non-bitset types (internal).
+// Contains a tag and a variable number of type or value fields.
+
+template<class Config>
+class TypeImpl<Config>::StructuralType : public TypeImpl<Config> {
+ protected:
+ template<class> friend class TypeImpl;
+ friend struct ZoneTypeConfig; // For tags.
+ friend struct HeapTypeConfig;
+
+ enum Tag {
+ kClassTag,
+ kConstantTag,
+ kContextTag,
+ kArrayTag,
+ kFunctionTag,
+ kUnionTag
+ };
+
+ int Length() {
+ return Config::struct_length(Config::as_struct(this));
+ }
+ TypeHandle Get(int i) {
+ ASSERT(0 <= i && i < this->Length());
+ return Config::struct_get(Config::as_struct(this), i);
+ }
+ void Set(int i, TypeHandle type) {
+ ASSERT(0 <= i && i < this->Length());
+ Config::struct_set(Config::as_struct(this), i, type);
+ }
+ void Shrink(int length) {
+ ASSERT(2 <= length && length <= this->Length());
+ Config::struct_shrink(Config::as_struct(this), length);
+ }
+ template<class V> i::Handle<V> GetValue(int i) {
+ ASSERT(0 <= i && i < this->Length());
+ return Config::template struct_get_value<V>(Config::as_struct(this), i);
+ }
+ template<class V> void SetValue(int i, i::Handle<V> x) {
+ ASSERT(0 <= i && i < this->Length());
+ Config::struct_set_value(Config::as_struct(this), i, x);
}
- static Type* from_bitset(int bitset) {
- return static_cast<Type*>(i::Object::cast(i::Smi::FromInt(bitset)));
+ static TypeHandle New(Tag tag, int length, Region* region) {
+ ASSERT(1 <= length);
+ return Config::from_struct(Config::struct_create(tag, length, region));
}
- static Type* from_handle(Handle<i::HeapObject> handle) {
- return static_cast<Type*>(i::Object::cast(*handle));
+};
+
+
+// -----------------------------------------------------------------------------
+// Union types (internal).
+// A union is a structured type with the following invariants:
+// - its length is at least 2
+// - at most one field is a bitset, and it must go into index 0
+// - no field is a union
+// - no field is a subtype of any other field
+template<class Config>
+class TypeImpl<Config>::UnionType : public StructuralType {
+ public:
+ static UnionHandle New(int length, Region* region) {
+ return Config::template cast<UnionType>(
+ StructuralType::New(StructuralType::kUnionTag, length, region));
}
- static Handle<Type> union_get(Handle<Unioned> unioned, int i) {
- Type* type = static_cast<Type*>(unioned->get(i));
- ASSERT(!type->is_union());
- return type->handle_via_isolate_of(from_handle(unioned));
+ static UnionType* cast(TypeImpl* type) {
+ ASSERT(type->IsUnion());
+ return static_cast<UnionType*>(type);
}
- int LubBitset(); // least upper bound that's a bitset
- int GlbBitset(); // greatest lower bound that's a bitset
+ bool Wellformed();
+};
- static int LubBitset(i::Object* value);
- static int LubBitset(i::Map* map);
- bool InUnion(Handle<Unioned> unioned, int current_size);
- int ExtendUnion(Handle<Unioned> unioned, int current_size);
- int ExtendIntersection(
- Handle<Unioned> unioned, Handle<Type> type, int current_size);
+// -----------------------------------------------------------------------------
+// Class types.
- static const char* bitset_name(int bitset);
+template<class Config>
+class TypeImpl<Config>::ClassType : public StructuralType {
+ public:
+ TypeHandle Bound(Region* region) {
+ return Config::is_class(this)
+ ? BitsetType::New(BitsetType::Lub(*Config::as_class(this)), region)
+ : this->Get(0);
+ }
+ i::Handle<i::Map> Map() {
+ return Config::is_class(this)
+ ? Config::as_class(this)
+ : this->template GetValue<i::Map>(1);
+ }
+
+ static ClassHandle New(
+ i::Handle<i::Map> map, TypeHandle bound, Region* region) {
+ ClassHandle type = Config::template cast<ClassType>(
+ StructuralType::New(StructuralType::kClassTag, 2, region));
+ type->Set(0, bound);
+ type->SetValue(1, map);
+ return type;
+ }
+
+ static ClassHandle New(i::Handle<i::Map> map, Region* region) {
+ ClassHandle type =
+ Config::template cast<ClassType>(Config::from_class(map, region));
+ if (type->IsClass()) {
+ return type;
+ } else {
+ TypeHandle bound = BitsetType::New(BitsetType::Lub(*map), region);
+ return New(map, bound, region);
+ }
+ }
+
+ static ClassType* cast(TypeImpl* type) {
+ ASSERT(type->IsClass());
+ return static_cast<ClassType*>(type);
+ }
};
-// A simple struct to represent a pair of lower/upper type bounds.
-struct Bounds {
- Handle<Type> lower;
- Handle<Type> upper;
+// -----------------------------------------------------------------------------
+// Constant types.
- Bounds() {}
- Bounds(Handle<Type> l, Handle<Type> u) : lower(l), upper(u) {
- ASSERT(lower->Is(upper));
+template<class Config>
+class TypeImpl<Config>::ConstantType : public StructuralType {
+ public:
+ TypeHandle Bound() { return this->Get(0); }
+ i::Handle<i::Object> Value() { return this->template GetValue<i::Object>(1); }
+
+ static ConstantHandle New(
+ i::Handle<i::Object> value, TypeHandle bound, Region* region) {
+ ConstantHandle type = Config::template cast<ConstantType>(
+ StructuralType::New(StructuralType::kConstantTag, 2, region));
+ type->Set(0, bound);
+ type->SetValue(1, value);
+ return type;
}
- Bounds(Type* l, Type* u, Isolate* isl) : lower(l, isl), upper(u, isl) {
- ASSERT(lower->Is(upper));
+
+ static ConstantHandle New(i::Handle<i::Object> value, Region* region) {
+ TypeHandle bound = BitsetType::New(BitsetType::Lub(*value), region);
+ return New(value, bound, region);
}
- explicit Bounds(Handle<Type> t) : lower(t), upper(t) {
- ASSERT(lower->Is(upper));
+
+ static ConstantType* cast(TypeImpl* type) {
+ ASSERT(type->IsConstant());
+ return static_cast<ConstantType*>(type);
+ }
+};
+
+
+// -----------------------------------------------------------------------------
+// Context types.
+
+template<class Config>
+class TypeImpl<Config>::ContextType : public StructuralType {
+ public:
+ TypeHandle Bound() { return this->Get(0); }
+ TypeHandle Outer() { return this->Get(1); }
+
+ static ContextHandle New(TypeHandle outer, TypeHandle bound, Region* region) {
+ ContextHandle type = Config::template cast<ContextType>(
+ StructuralType::New(StructuralType::kContextTag, 2, region));
+ type->Set(0, bound);
+ type->Set(1, outer);
+ return type;
+ }
+
+ static ContextHandle New(TypeHandle outer, Region* region) {
+ TypeHandle bound = BitsetType::New(
+ BitsetType::kInternal & BitsetType::kTaggedPtr, region);
+ return New(outer, bound, region);
+ }
+
+ static ContextType* cast(TypeImpl* type) {
+ ASSERT(type->IsContext());
+ return static_cast<ContextType*>(type);
+ }
+};
+
+
+// -----------------------------------------------------------------------------
+// Array types.
+
+template<class Config>
+class TypeImpl<Config>::ArrayType : public StructuralType {
+ public:
+ TypeHandle Bound() { return this->Get(0); }
+ TypeHandle Element() { return this->Get(1); }
+
+ static ArrayHandle New(TypeHandle element, TypeHandle bound, Region* region) {
+ ASSERT(SEMANTIC(bound->AsBitset()) == SEMANTIC(BitsetType::kArray));
+ ArrayHandle type = Config::template cast<ArrayType>(
+ StructuralType::New(StructuralType::kArrayTag, 2, region));
+ type->Set(0, bound);
+ type->Set(1, element);
+ return type;
}
- Bounds(Type* t, Isolate* isl) : lower(t, isl), upper(t, isl) {
+
+ static ArrayHandle New(TypeHandle element, Region* region) {
+ TypeHandle bound = BitsetType::New(BitsetType::kArray, region);
+ return New(element, bound, region);
+ }
+
+ static ArrayType* cast(TypeImpl* type) {
+ ASSERT(type->IsArray());
+ return static_cast<ArrayType*>(type);
+ }
+};
+
+
+// -----------------------------------------------------------------------------
+// Function types.
+
+template<class Config>
+class TypeImpl<Config>::FunctionType : public StructuralType {
+ public:
+ int Arity() { return this->Length() - 3; }
+ TypeHandle Bound() { return this->Get(0); }
+ TypeHandle Result() { return this->Get(1); }
+ TypeHandle Receiver() { return this->Get(2); }
+ TypeHandle Parameter(int i) { return this->Get(3 + i); }
+
+ void InitParameter(int i, TypeHandle type) { this->Set(3 + i, type); }
+
+ static FunctionHandle New(
+ TypeHandle result, TypeHandle receiver, TypeHandle bound,
+ int arity, Region* region) {
+ ASSERT(SEMANTIC(bound->AsBitset()) == SEMANTIC(BitsetType::kFunction));
+ FunctionHandle type = Config::template cast<FunctionType>(
+ StructuralType::New(StructuralType::kFunctionTag, 3 + arity, region));
+ type->Set(0, bound);
+ type->Set(1, result);
+ type->Set(2, receiver);
+ return type;
+ }
+
+ static FunctionHandle New(
+ TypeHandle result, TypeHandle receiver, int arity, Region* region) {
+ TypeHandle bound = BitsetType::New(BitsetType::kFunction, region);
+ return New(result, receiver, bound, arity, region);
+ }
+
+ static FunctionType* cast(TypeImpl* type) {
+ ASSERT(type->IsFunction());
+ return static_cast<FunctionType*>(type);
+ }
+};
+
+
+// -----------------------------------------------------------------------------
+// Type iterators.
+
+template<class Config> template<class T>
+class TypeImpl<Config>::Iterator {
+ public:
+ bool Done() const { return index_ < 0; }
+ i::Handle<T> Current();
+ void Advance();
+
+ private:
+ template<class> friend class TypeImpl;
+
+ Iterator() : index_(-1) {}
+ explicit Iterator(TypeHandle type) : type_(type), index_(-1) {
+ Advance();
+ }
+
+ inline bool matches(TypeHandle type);
+ inline TypeHandle get_type();
+
+ TypeHandle type_;
+ int index_;
+};
+
+
+// -----------------------------------------------------------------------------
+// Zone-allocated types; they are either (odd) integers to represent bitsets, or
+// (even) pointers to structures for everything else.
+
+struct ZoneTypeConfig {
+ typedef TypeImpl<ZoneTypeConfig> Type;
+ class Base {};
+ typedef void* Struct;
+ typedef i::Zone Region;
+ template<class T> struct Handle { typedef T* type; };
+
+ template<class T> static inline T* handle(T* type);
+ template<class T> static inline T* cast(Type* type);
+
+ static inline bool is_bitset(Type* type);
+ static inline bool is_class(Type* type);
+ static inline bool is_struct(Type* type, int tag);
+
+ static inline int as_bitset(Type* type);
+ static inline i::Handle<i::Map> as_class(Type* type);
+ static inline Struct* as_struct(Type* type);
+
+ static inline Type* from_bitset(int bitset);
+ static inline Type* from_bitset(int bitset, Zone* zone);
+ static inline Type* from_class(i::Handle<i::Map> map, Zone* zone);
+ static inline Type* from_struct(Struct* structured);
+
+ static inline Struct* struct_create(int tag, int length, Zone* zone);
+ static inline void struct_shrink(Struct* structure, int length);
+ static inline int struct_tag(Struct* structure);
+ static inline int struct_length(Struct* structure);
+ static inline Type* struct_get(Struct* structure, int i);
+ static inline void struct_set(Struct* structure, int i, Type* type);
+ template<class V>
+ static inline i::Handle<V> struct_get_value(Struct* structure, int i);
+ template<class V> static inline void struct_set_value(
+ Struct* structure, int i, i::Handle<V> x);
+};
+
+typedef TypeImpl<ZoneTypeConfig> Type;
+
+
+// -----------------------------------------------------------------------------
+// Heap-allocated types; either smis for bitsets, maps for classes, boxes for
+// constants, or fixed arrays for unions.
+
+struct HeapTypeConfig {
+ typedef TypeImpl<HeapTypeConfig> Type;
+ typedef i::Object Base;
+ typedef i::FixedArray Struct;
+ typedef i::Isolate Region;
+ template<class T> struct Handle { typedef i::Handle<T> type; };
+
+ template<class T> static inline i::Handle<T> handle(T* type);
+ template<class T> static inline i::Handle<T> cast(i::Handle<Type> type);
+
+ static inline bool is_bitset(Type* type);
+ static inline bool is_class(Type* type);
+ static inline bool is_struct(Type* type, int tag);
+
+ static inline int as_bitset(Type* type);
+ static inline i::Handle<i::Map> as_class(Type* type);
+ static inline i::Handle<Struct> as_struct(Type* type);
+
+ static inline Type* from_bitset(int bitset);
+ static inline i::Handle<Type> from_bitset(int bitset, Isolate* isolate);
+ static inline i::Handle<Type> from_class(
+ i::Handle<i::Map> map, Isolate* isolate);
+ static inline i::Handle<Type> from_struct(i::Handle<Struct> structure);
+
+ static inline i::Handle<Struct> struct_create(
+ int tag, int length, Isolate* isolate);
+ static inline void struct_shrink(i::Handle<Struct> structure, int length);
+ static inline int struct_tag(i::Handle<Struct> structure);
+ static inline int struct_length(i::Handle<Struct> structure);
+ static inline i::Handle<Type> struct_get(i::Handle<Struct> structure, int i);
+ static inline void struct_set(
+ i::Handle<Struct> structure, int i, i::Handle<Type> type);
+ template<class V>
+ static inline i::Handle<V> struct_get_value(
+ i::Handle<Struct> structure, int i);
+ template<class V>
+ static inline void struct_set_value(
+ i::Handle<Struct> structure, int i, i::Handle<V> x);
+};
+
+typedef TypeImpl<HeapTypeConfig> HeapType;
+
+
+// -----------------------------------------------------------------------------
+// Type bounds. A simple struct to represent a pair of lower/upper types.
+
+template<class Config>
+struct BoundsImpl {
+ typedef TypeImpl<Config> Type;
+ typedef typename Type::TypeHandle TypeHandle;
+ typedef typename Type::Region Region;
+
+ TypeHandle lower;
+ TypeHandle upper;
+
+ BoundsImpl() {}
+ explicit BoundsImpl(TypeHandle t) : lower(t), upper(t) {}
+ BoundsImpl(TypeHandle l, TypeHandle u) : lower(l), upper(u) {
ASSERT(lower->Is(upper));
}
// Unrestricted bounds.
- static Bounds Unbounded(Isolate* isl) {
- return Bounds(Type::None(), Type::Any(), isl);
+ static BoundsImpl Unbounded(Region* region) {
+ return BoundsImpl(Type::None(region), Type::Any(region));
}
// Meet: both b1 and b2 are known to hold.
- static Bounds Both(Bounds b1, Bounds b2, Isolate* isl) {
- Handle<Type> lower(Type::Union(b1.lower, b2.lower), isl);
- Handle<Type> upper(Type::Intersect(b1.upper, b2.upper), isl);
+ static BoundsImpl Both(BoundsImpl b1, BoundsImpl b2, Region* region) {
+ TypeHandle lower = Type::Union(b1.lower, b2.lower, region);
+ TypeHandle upper = Type::Intersect(b1.upper, b2.upper, region);
// Lower bounds are considered approximate, correct as necessary.
- lower = handle(Type::Intersect(lower, upper), isl);
- return Bounds(lower, upper);
+ lower = Type::Intersect(lower, upper, region);
+ return BoundsImpl(lower, upper);
}
// Join: either b1 or b2 is known to hold.
- static Bounds Either(Bounds b1, Bounds b2, Isolate* isl) {
- return Bounds(
- handle(Type::Intersect(b1.lower, b2.lower), isl),
- handle(Type::Union(b1.upper, b2.upper), isl));
+ static BoundsImpl Either(BoundsImpl b1, BoundsImpl b2, Region* region) {
+ TypeHandle lower = Type::Intersect(b1.lower, b2.lower, region);
+ TypeHandle upper = Type::Union(b1.upper, b2.upper, region);
+ return BoundsImpl(lower, upper);
}
- static Bounds NarrowLower(Bounds b, Handle<Type> t, Isolate* isl) {
+ static BoundsImpl NarrowLower(BoundsImpl b, TypeHandle t, Region* region) {
// Lower bounds are considered approximate, correct as necessary.
- t = handle(Type::Intersect(t, b.upper), isl);
- return Bounds(handle(Type::Union(b.lower, t), isl), b.upper);
+ t = Type::Intersect(t, b.upper, region);
+ TypeHandle lower = Type::Union(b.lower, t, region);
+ return BoundsImpl(lower, b.upper);
}
- static Bounds NarrowUpper(Bounds b, Handle<Type> t, Isolate* isl) {
- return Bounds(
- handle(Type::Intersect(b.lower, t), isl),
- handle(Type::Intersect(b.upper, t), isl));
+ static BoundsImpl NarrowUpper(BoundsImpl b, TypeHandle t, Region* region) {
+ TypeHandle lower = Type::Intersect(b.lower, t, region);
+ TypeHandle upper = Type::Intersect(b.upper, t, region);
+ return BoundsImpl(lower, upper);
+ }
+
+ bool Narrows(BoundsImpl that) {
+ return that.lower->Is(this->lower) && this->upper->Is(that.upper);
}
};
+typedef BoundsImpl<ZoneTypeConfig> Bounds;
+
} } // namespace v8::internal
#endif // V8_TYPES_H_
diff --git a/chromium/v8/src/typing.cc b/chromium/v8/src/typing.cc
index 9458d6dc2fe..7762624b073 100644
--- a/chromium/v8/src/typing.cc
+++ b/chromium/v8/src/typing.cc
@@ -1,34 +1,13 @@
// Copyright 2013 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "typing.h"
-
-#include "parser.h" // for CompileTimeValue; TODO(rossberg): should move
-#include "scopes.h"
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/typing.h"
+
+#include "src/frames.h"
+#include "src/frames-inl.h"
+#include "src/parser.h" // for CompileTimeValue; TODO(rossberg): should move
+#include "src/scopes.h"
namespace v8 {
namespace internal {
@@ -37,12 +16,12 @@ namespace internal {
AstTyper::AstTyper(CompilationInfo* info)
: info_(info),
oracle_(
- Handle<Code>(info->closure()->shared()->code()),
- Handle<Context>(info->closure()->context()->native_context()),
- info->isolate(),
+ handle(info->closure()->shared()->code()),
+ handle(info->closure()->shared()->feedback_vector()),
+ handle(info->closure()->context()->native_context()),
info->zone()),
store_(info->zone()) {
- InitializeAstVisitor(info->isolate());
+ InitializeAstVisitor(info->zone());
}
@@ -68,6 +47,75 @@ void AstTyper::Run(CompilationInfo* info) {
#undef RECURSE
+
+#ifdef OBJECT_PRINT
+ static void PrintObserved(Variable* var, Object* value, Type* type) {
+ PrintF(" observed %s ", var->IsParameter() ? "param" : "local");
+ var->name()->Print();
+ PrintF(" : ");
+ value->ShortPrint();
+ PrintF(" -> ");
+ type->TypePrint();
+ }
+#endif // OBJECT_PRINT
+
+
+Effect AstTyper::ObservedOnStack(Object* value) {
+ Type* lower = Type::NowOf(value, zone());
+ return Effect(Bounds(lower, Type::Any(zone())));
+}
+
+
+void AstTyper::ObserveTypesAtOsrEntry(IterationStatement* stmt) {
+ if (stmt->OsrEntryId() != info_->osr_ast_id()) return;
+
+ DisallowHeapAllocation no_gc;
+ JavaScriptFrameIterator it(isolate());
+ JavaScriptFrame* frame = it.frame();
+ Scope* scope = info_->scope();
+
+ // Assert that the frame on the stack belongs to the function we want to OSR.
+ ASSERT_EQ(*info_->closure(), frame->function());
+
+ int params = scope->num_parameters();
+ int locals = scope->StackLocalCount();
+
+ // Use sequential composition to achieve desired narrowing.
+ // The receiver is a parameter with index -1.
+ store_.Seq(parameter_index(-1), ObservedOnStack(frame->receiver()));
+ for (int i = 0; i < params; i++) {
+ store_.Seq(parameter_index(i), ObservedOnStack(frame->GetParameter(i)));
+ }
+
+ for (int i = 0; i < locals; i++) {
+ store_.Seq(stack_local_index(i), ObservedOnStack(frame->GetExpression(i)));
+ }
+
+#ifdef OBJECT_PRINT
+ if (FLAG_trace_osr && FLAG_print_scopes) {
+ PrintObserved(scope->receiver(),
+ frame->receiver(),
+ store_.LookupBounds(parameter_index(-1)).lower);
+
+ for (int i = 0; i < params; i++) {
+ PrintObserved(scope->parameter(i),
+ frame->GetParameter(i),
+ store_.LookupBounds(parameter_index(i)).lower);
+ }
+
+ ZoneList<Variable*> local_vars(locals, zone());
+ ZoneList<Variable*> context_vars(scope->ContextLocalCount(), zone());
+ scope->CollectStackAndContextLocals(&local_vars, &context_vars);
+ for (int i = 0; i < locals; i++) {
+ PrintObserved(local_vars.at(i),
+ frame->GetExpression(i),
+ store_.LookupBounds(stack_local_index(i)).lower);
+ }
+ }
+#endif // OBJECT_PRINT
+}
+
+
#define RECURSE(call) \
do { \
ASSERT(!HasStackOverflow()); \
@@ -151,24 +199,25 @@ void AstTyper::VisitSwitchStatement(SwitchStatement* stmt) {
RECURSE(Visit(stmt->tag()));
ZoneList<CaseClause*>* clauses = stmt->cases();
- SwitchStatement::SwitchType switch_type = stmt->switch_type();
Effects local_effects(zone());
bool complex_effects = false; // True for label effects or fall-through.
for (int i = 0; i < clauses->length(); ++i) {
CaseClause* clause = clauses->at(i);
+
Effects clause_effects = EnterEffects();
if (!clause->is_default()) {
Expression* label = clause->label();
- SwitchStatement::SwitchType label_switch_type =
- label->IsSmiLiteral() ? SwitchStatement::SMI_SWITCH :
- label->IsStringLiteral() ? SwitchStatement::STRING_SWITCH :
- SwitchStatement::GENERIC_SWITCH;
- if (switch_type == SwitchStatement::UNKNOWN_SWITCH)
- switch_type = label_switch_type;
- else if (switch_type != label_switch_type)
- switch_type = SwitchStatement::GENERIC_SWITCH;
+ // Collect type feedback.
+ Type* tag_type;
+ Type* label_type;
+ Type* combined_type;
+ oracle()->CompareType(clause->CompareId(),
+ &tag_type, &label_type, &combined_type);
+ NarrowLowerType(stmt->tag(), tag_type);
+ NarrowLowerType(label, label_type);
+ clause->set_compare_type(combined_type);
RECURSE(Visit(label));
if (!clause_effects.IsEmpty()) complex_effects = true;
@@ -189,20 +238,6 @@ void AstTyper::VisitSwitchStatement(SwitchStatement* stmt) {
} else {
store_.Seq(local_effects);
}
-
- if (switch_type == SwitchStatement::UNKNOWN_SWITCH)
- switch_type = SwitchStatement::GENERIC_SWITCH;
- stmt->set_switch_type(switch_type);
-
- // Collect type feedback.
- // TODO(rossberg): can we eliminate this special case and extra loop?
- if (switch_type == SwitchStatement::SMI_SWITCH) {
- for (int i = 0; i < clauses->length(); ++i) {
- CaseClause* clause = clauses->at(i);
- if (!clause->is_default())
- clause->set_compare_type(oracle()->ClauseType(clause->CompareId()));
- }
- }
}
@@ -221,6 +256,7 @@ void AstTyper::VisitDoWhileStatement(DoWhileStatement* stmt) {
// computing the set of variables assigned in only some of the origins of the
// control transfer (such as the loop body here).
store_.Forget(); // Control may transfer here via looping or 'continue'.
+ ObserveTypesAtOsrEntry(stmt);
RECURSE(Visit(stmt->body()));
RECURSE(Visit(stmt->cond()));
store_.Forget(); // Control may transfer here via 'break'.
@@ -235,6 +271,7 @@ void AstTyper::VisitWhileStatement(WhileStatement* stmt) {
store_.Forget(); // Control may transfer here via looping or 'continue'.
RECURSE(Visit(stmt->cond()));
+ ObserveTypesAtOsrEntry(stmt);
RECURSE(Visit(stmt->body()));
store_.Forget(); // Control may transfer here via termination or 'break'.
}
@@ -251,6 +288,7 @@ void AstTyper::VisitForStatement(ForStatement* stmt) {
RECURSE(Visit(stmt->cond()));
}
+ ObserveTypesAtOsrEntry(stmt);
RECURSE(Visit(stmt->body()));
if (stmt->next() != NULL) {
store_.Forget(); // Control may transfer here via 'continue'.
@@ -263,10 +301,11 @@ void AstTyper::VisitForStatement(ForStatement* stmt) {
void AstTyper::VisitForInStatement(ForInStatement* stmt) {
// Collect type feedback.
stmt->set_for_in_type(static_cast<ForInStatement::ForInType>(
- oracle()->ForInType(stmt->ForInFeedbackId())));
+ oracle()->ForInType(stmt->ForInFeedbackSlot())));
RECURSE(Visit(stmt->enumerable()));
store_.Forget(); // Control may transfer here via looping or 'continue'.
+ ObserveTypesAtOsrEntry(stmt);
RECURSE(Visit(stmt->body()));
store_.Forget(); // Control may transfer here via 'break'.
}
@@ -308,6 +347,7 @@ void AstTyper::VisitDebuggerStatement(DebuggerStatement* stmt) {
void AstTyper::VisitFunctionLiteral(FunctionLiteral* expr) {
+ expr->InitializeSharedInfo(Handle<Code>(info_->closure()->shared()->code()));
}
@@ -331,7 +371,7 @@ void AstTyper::VisitConditional(Conditional* expr) {
NarrowType(expr, Bounds::Either(
expr->then_expression()->bounds(),
- expr->else_expression()->bounds(), isolate_));
+ expr->else_expression()->bounds(), zone()));
}
@@ -344,13 +384,13 @@ void AstTyper::VisitVariableProxy(VariableProxy* expr) {
void AstTyper::VisitLiteral(Literal* expr) {
- Type* type = Type::Constant(expr->value(), isolate_);
- NarrowType(expr, Bounds(type, isolate_));
+ Type* type = Type::Constant(expr->value(), zone());
+ NarrowType(expr, Bounds(type));
}
void AstTyper::VisitRegExpLiteral(RegExpLiteral* expr) {
- NarrowType(expr, Bounds(Type::RegExp(), isolate_));
+ NarrowType(expr, Bounds(Type::RegExp(zone())));
}
@@ -371,7 +411,7 @@ void AstTyper::VisitObjectLiteral(ObjectLiteral* expr) {
RECURSE(Visit(prop->value()));
}
- NarrowType(expr, Bounds(Type::Object(), isolate_));
+ NarrowType(expr, Bounds(Type::Object(zone())));
}
@@ -382,7 +422,7 @@ void AstTyper::VisitArrayLiteral(ArrayLiteral* expr) {
RECURSE(Visit(value));
}
- NarrowType(expr, Bounds(Type::Array(), isolate_));
+ NarrowType(expr, Bounds(Type::Array(zone())));
}
@@ -393,7 +433,6 @@ void AstTyper::VisitAssignment(Assignment* expr) {
TypeFeedbackId id = expr->AssignmentFeedbackId();
expr->set_is_uninitialized(oracle()->StoreIsUninitialized(id));
if (!expr->IsUninitialized()) {
- expr->set_is_pre_monomorphic(oracle()->StoreIsPreMonomorphic(id));
if (prop->key()->IsPropertyName()) {
Literal* lit_key = prop->key()->AsLiteral();
ASSERT(lit_key != NULL && lit_key->value()->IsString());
@@ -405,7 +444,6 @@ void AstTyper::VisitAssignment(Assignment* expr) {
id, expr->GetReceiverTypes(), &store_mode);
expr->set_store_mode(store_mode);
}
- ASSERT(!expr->IsPreMonomorphic() || !expr->IsMonomorphic());
}
}
@@ -434,7 +472,7 @@ void AstTyper::VisitThrow(Throw* expr) {
RECURSE(Visit(expr->exception()));
// TODO(rossberg): is it worth having a non-termination effect?
- NarrowType(expr, Bounds(Type::None(), isolate_));
+ NarrowType(expr, Bounds(Type::None(zone())));
}
@@ -443,7 +481,6 @@ void AstTyper::VisitProperty(Property* expr) {
TypeFeedbackId id = expr->PropertyFeedbackId();
expr->set_is_uninitialized(oracle()->LoadIsUninitialized(id));
if (!expr->IsUninitialized()) {
- expr->set_is_pre_monomorphic(oracle()->LoadIsPreMonomorphic(id));
if (expr->key()->IsPropertyName()) {
Literal* lit_key = expr->key()->AsLiteral();
ASSERT(lit_key != NULL && lit_key->value()->IsString());
@@ -458,7 +495,6 @@ void AstTyper::VisitProperty(Property* expr) {
id, expr->GetReceiverTypes(), &is_string);
expr->set_is_string_access(is_string);
}
- ASSERT(!expr->IsPreMonomorphic() || !expr->IsMonomorphic());
}
RECURSE(Visit(expr->obj()));
@@ -470,15 +506,16 @@ void AstTyper::VisitProperty(Property* expr) {
void AstTyper::VisitCall(Call* expr) {
// Collect type feedback.
- Expression* callee = expr->expression();
- Property* prop = callee->AsProperty();
- if (prop != NULL) {
- expr->RecordTypeFeedback(oracle(), CALL_AS_METHOD);
- } else {
- expr->RecordTypeFeedback(oracle(), CALL_AS_FUNCTION);
+ RECURSE(Visit(expr->expression()));
+ if (!expr->expression()->IsProperty() &&
+ expr->IsUsingCallFeedbackSlot(isolate()) &&
+ oracle()->CallIsMonomorphic(expr->CallFeedbackSlot())) {
+ expr->set_target(oracle()->GetCallTarget(expr->CallFeedbackSlot()));
+ Handle<AllocationSite> site =
+ oracle()->GetCallAllocationSite(expr->CallFeedbackSlot());
+ expr->set_allocation_site(site);
}
- RECURSE(Visit(expr->expression()));
ZoneList<Expression*>* args = expr->arguments();
for (int i = 0; i < args->length(); ++i) {
Expression* arg = args->at(i);
@@ -505,7 +542,7 @@ void AstTyper::VisitCallNew(CallNew* expr) {
RECURSE(Visit(arg));
}
- // We don't know anything about the result type.
+ NarrowType(expr, Bounds(Type::None(zone()), Type::Receiver(zone())));
}
@@ -532,13 +569,13 @@ void AstTyper::VisitUnaryOperation(UnaryOperation* expr) {
switch (expr->op()) {
case Token::NOT:
case Token::DELETE:
- NarrowType(expr, Bounds(Type::Boolean(), isolate_));
+ NarrowType(expr, Bounds(Type::Boolean(zone())));
break;
case Token::VOID:
- NarrowType(expr, Bounds(Type::Undefined(), isolate_));
+ NarrowType(expr, Bounds(Type::Undefined(zone())));
break;
case Token::TYPEOF:
- NarrowType(expr, Bounds(Type::InternalizedString(), isolate_));
+ NarrowType(expr, Bounds(Type::InternalizedString(zone())));
break;
default:
UNREACHABLE();
@@ -556,7 +593,7 @@ void AstTyper::VisitCountOperation(CountOperation* expr) {
RECURSE(Visit(expr->expression()));
- NarrowType(expr, Bounds(Type::Smi(), Type::Number(), isolate_));
+ NarrowType(expr, Bounds(Type::SignedSmall(zone()), Type::Number(zone())));
VariableProxy* proxy = expr->expression()->AsVariableProxy();
if (proxy != NULL && proxy->var()->IsStackAllocated()) {
@@ -567,13 +604,18 @@ void AstTyper::VisitCountOperation(CountOperation* expr) {
void AstTyper::VisitBinaryOperation(BinaryOperation* expr) {
// Collect type feedback.
- Handle<Type> type, left_type, right_type;
+ Type* type;
+ Type* left_type;
+ Type* right_type;
Maybe<int> fixed_right_arg;
+ Handle<AllocationSite> allocation_site;
oracle()->BinaryType(expr->BinaryOperationFeedbackId(),
- &left_type, &right_type, &type, &fixed_right_arg, expr->op());
+ &left_type, &right_type, &type, &fixed_right_arg,
+ &allocation_site, expr->op());
NarrowLowerType(expr, type);
NarrowLowerType(expr->left(), left_type);
NarrowLowerType(expr->right(), right_type);
+ expr->set_allocation_site(allocation_site);
expr->set_fixed_right_arg(fixed_right_arg);
if (expr->op() == Token::OR || expr->op() == Token::AND) {
expr->left()->RecordToBooleanTypeFeedback(oracle());
@@ -597,21 +639,17 @@ void AstTyper::VisitBinaryOperation(BinaryOperation* expr) {
store_.Seq(left_effects);
NarrowType(expr, Bounds::Either(
- expr->left()->bounds(), expr->right()->bounds(), isolate_));
+ expr->left()->bounds(), expr->right()->bounds(), zone()));
break;
}
case Token::BIT_OR:
case Token::BIT_AND: {
RECURSE(Visit(expr->left()));
RECURSE(Visit(expr->right()));
- Handle<Type> upper(
- Type::Union(
- expr->left()->bounds().upper, expr->right()->bounds().upper),
- isolate_);
- if (!upper->Is(Type::Signed32()))
- upper = handle(Type::Signed32(), isolate_);
- Handle<Type> lower(Type::Intersect(
- handle(Type::Smi(), isolate_), upper), isolate_);
+ Type* upper = Type::Union(
+ expr->left()->bounds().upper, expr->right()->bounds().upper, zone());
+ if (!upper->Is(Type::Signed32())) upper = Type::Signed32(zone());
+ Type* lower = Type::Intersect(Type::SignedSmall(zone()), upper, zone());
NarrowType(expr, Bounds(lower, upper));
break;
}
@@ -620,7 +658,8 @@ void AstTyper::VisitBinaryOperation(BinaryOperation* expr) {
case Token::SAR:
RECURSE(Visit(expr->left()));
RECURSE(Visit(expr->right()));
- NarrowType(expr, Bounds(Type::Smi(), Type::Signed32(), isolate_));
+ NarrowType(expr,
+ Bounds(Type::SignedSmall(zone()), Type::Signed32(zone())));
break;
case Token::SHR:
RECURSE(Visit(expr->left()));
@@ -628,7 +667,7 @@ void AstTyper::VisitBinaryOperation(BinaryOperation* expr) {
// TODO(rossberg): The upper bound would be Unsigned32, but since there
// is no 'positive Smi' type for the lower bound, we use the smallest
// union of Smi and Unsigned32 as upper bound instead.
- NarrowType(expr, Bounds(Type::Smi(), Type::Number(), isolate_));
+ NarrowType(expr, Bounds(Type::SignedSmall(zone()), Type::Number(zone())));
break;
case Token::ADD: {
RECURSE(Visit(expr->left()));
@@ -636,18 +675,18 @@ void AstTyper::VisitBinaryOperation(BinaryOperation* expr) {
Bounds l = expr->left()->bounds();
Bounds r = expr->right()->bounds();
Type* lower =
- l.lower->Is(Type::None()) || r.lower->Is(Type::None()) ?
- Type::None() :
+ !l.lower->IsInhabited() || !r.lower->IsInhabited() ?
+ Type::None(zone()) :
l.lower->Is(Type::String()) || r.lower->Is(Type::String()) ?
- Type::String() :
+ Type::String(zone()) :
l.lower->Is(Type::Number()) && r.lower->Is(Type::Number()) ?
- Type::Smi() : Type::None();
+ Type::SignedSmall(zone()) : Type::None(zone());
Type* upper =
l.upper->Is(Type::String()) || r.upper->Is(Type::String()) ?
- Type::String() :
+ Type::String(zone()) :
l.upper->Is(Type::Number()) && r.upper->Is(Type::Number()) ?
- Type::Number() : Type::NumberOrString();
- NarrowType(expr, Bounds(lower, upper, isolate_));
+ Type::Number(zone()) : Type::NumberOrString(zone());
+ NarrowType(expr, Bounds(lower, upper));
break;
}
case Token::SUB:
@@ -656,7 +695,7 @@ void AstTyper::VisitBinaryOperation(BinaryOperation* expr) {
case Token::MOD:
RECURSE(Visit(expr->left()));
RECURSE(Visit(expr->right()));
- NarrowType(expr, Bounds(Type::Smi(), Type::Number(), isolate_));
+ NarrowType(expr, Bounds(Type::SignedSmall(zone()), Type::Number(zone())));
break;
default:
UNREACHABLE();
@@ -666,7 +705,9 @@ void AstTyper::VisitBinaryOperation(BinaryOperation* expr) {
void AstTyper::VisitCompareOperation(CompareOperation* expr) {
// Collect type feedback.
- Handle<Type> left_type, right_type, combined_type;
+ Type* left_type;
+ Type* right_type;
+ Type* combined_type;
oracle()->CompareType(expr->CompareOperationFeedbackId(),
&left_type, &right_type, &combined_type);
NarrowLowerType(expr->left(), left_type);
@@ -676,7 +717,7 @@ void AstTyper::VisitCompareOperation(CompareOperation* expr) {
RECURSE(Visit(expr->left()));
RECURSE(Visit(expr->right()));
- NarrowType(expr, Bounds(Type::Boolean(), isolate_));
+ NarrowType(expr, Bounds(Type::Boolean(zone())));
}
diff --git a/chromium/v8/src/typing.h b/chromium/v8/src/typing.h
index c942b006327..1d76f8a64c5 100644
--- a/chromium/v8/src/typing.h
+++ b/chromium/v8/src/typing.h
@@ -1,43 +1,20 @@
// Copyright 2013 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
#ifndef V8_TYPING_H_
#define V8_TYPING_H_
-#include "v8.h"
+#include "src/v8.h"
-#include "allocation.h"
-#include "ast.h"
-#include "compiler.h"
-#include "type-info.h"
-#include "types.h"
-#include "effects.h"
-#include "zone.h"
-#include "scopes.h"
+#include "src/allocation.h"
+#include "src/ast.h"
+#include "src/compiler.h"
+#include "src/type-info.h"
+#include "src/types.h"
+#include "src/effects.h"
+#include "src/zone.h"
+#include "src/scopes.h"
namespace v8 {
namespace internal {
@@ -58,6 +35,9 @@ class AstTyper: public AstVisitor {
private:
explicit AstTyper(CompilationInfo* info);
+ Effect ObservedOnStack(Object* value);
+ void ObserveTypesAtOsrEntry(IterationStatement* stmt);
+
static const int kNoVar = INT_MIN;
typedef v8::internal::Effects<int, kNoVar> Effects;
typedef v8::internal::NestedEffects<int, kNoVar> Store;
@@ -67,13 +47,12 @@ class AstTyper: public AstVisitor {
Store store_;
TypeFeedbackOracle* oracle() { return &oracle_; }
- Zone* zone() const { return info_->zone(); }
void NarrowType(Expression* e, Bounds b) {
- e->set_bounds(Bounds::Both(e->bounds(), b, isolate_));
+ e->set_bounds(Bounds::Both(e->bounds(), b, zone()));
}
- void NarrowLowerType(Expression* e, Handle<Type> t) {
- e->set_bounds(Bounds::NarrowLower(e->bounds(), t, isolate_));
+ void NarrowLowerType(Expression* e, Type* t) {
+ e->set_bounds(Bounds::NarrowLower(e->bounds(), t, zone()));
}
Effects EnterEffects() {
@@ -82,9 +61,15 @@ class AstTyper: public AstVisitor {
}
void ExitEffects() { store_ = store_.Pop(); }
+ int parameter_index(int index) { return -index - 2; }
+ int stack_local_index(int index) { return index; }
+
int variable_index(Variable* var) {
- return var->IsStackLocal() ? var->index() :
- var->IsParameter() ? -var->index() : kNoVar;
+ // Stack locals have the range [0 .. l]
+ // Parameters have the range [-1 .. p]
+ // We map this to [-p-2 .. -1, 0 .. l]
+ return var->IsStackLocal() ? stack_local_index(var->index()) :
+ var->IsParameter() ? parameter_index(var->index()) : kNoVar;
}
void VisitDeclarations(ZoneList<Declaration*>* declarations);
diff --git a/chromium/v8/src/unbound-queue-inl.h b/chromium/v8/src/unbound-queue-inl.h
index 796ba401d58..67822816800 100644
--- a/chromium/v8/src/unbound-queue-inl.h
+++ b/chromium/v8/src/unbound-queue-inl.h
@@ -1,36 +1,11 @@
// Copyright 2010 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
#ifndef V8_UNBOUND_QUEUE_INL_H_
#define V8_UNBOUND_QUEUE_INL_H_
-#include "unbound-queue.h"
-
-#include "atomicops.h"
+#include "src/unbound-queue.h"
namespace v8 {
namespace internal {
@@ -49,7 +24,7 @@ struct UnboundQueue<Record>::Node: public Malloced {
template<typename Record>
UnboundQueue<Record>::UnboundQueue() {
first_ = new Node(Record());
- divider_ = last_ = reinterpret_cast<AtomicWord>(first_);
+ divider_ = last_ = reinterpret_cast<base::AtomicWord>(first_);
}
@@ -69,10 +44,10 @@ void UnboundQueue<Record>::DeleteFirst() {
template<typename Record>
bool UnboundQueue<Record>::Dequeue(Record* rec) {
- if (divider_ == Acquire_Load(&last_)) return false;
+ if (divider_ == base::Acquire_Load(&last_)) return false;
Node* next = reinterpret_cast<Node*>(divider_)->next;
*rec = next->value;
- Release_Store(&divider_, reinterpret_cast<AtomicWord>(next));
+ base::Release_Store(&divider_, reinterpret_cast<base::AtomicWord>(next));
return true;
}
@@ -81,9 +56,9 @@ template<typename Record>
void UnboundQueue<Record>::Enqueue(const Record& rec) {
Node*& next = reinterpret_cast<Node*>(last_)->next;
next = new Node(rec);
- Release_Store(&last_, reinterpret_cast<AtomicWord>(next));
+ base::Release_Store(&last_, reinterpret_cast<base::AtomicWord>(next));
- while (first_ != reinterpret_cast<Node*>(Acquire_Load(&divider_))) {
+ while (first_ != reinterpret_cast<Node*>(base::Acquire_Load(&divider_))) {
DeleteFirst();
}
}
@@ -91,13 +66,13 @@ void UnboundQueue<Record>::Enqueue(const Record& rec) {
template<typename Record>
bool UnboundQueue<Record>::IsEmpty() const {
- return NoBarrier_Load(&divider_) == NoBarrier_Load(&last_);
+ return base::NoBarrier_Load(&divider_) == base::NoBarrier_Load(&last_);
}
template<typename Record>
Record* UnboundQueue<Record>::Peek() const {
- if (divider_ == Acquire_Load(&last_)) return NULL;
+ if (divider_ == base::Acquire_Load(&last_)) return NULL;
Node* next = reinterpret_cast<Node*>(divider_)->next;
return &next->value;
}
diff --git a/chromium/v8/src/unbound-queue.h b/chromium/v8/src/unbound-queue.h
index 429e3c673e5..3e129289739 100644
--- a/chromium/v8/src/unbound-queue.h
+++ b/chromium/v8/src/unbound-queue.h
@@ -1,34 +1,12 @@
// Copyright 2010 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
#ifndef V8_UNBOUND_QUEUE_
#define V8_UNBOUND_QUEUE_
-#include "allocation.h"
+#include "src/allocation.h"
+#include "src/base/atomicops.h"
namespace v8 {
namespace internal {
@@ -57,8 +35,8 @@ class UnboundQueue BASE_EMBEDDED {
struct Node;
Node* first_;
- AtomicWord divider_; // Node*
- AtomicWord last_; // Node*
+ base::AtomicWord divider_; // Node*
+ base::AtomicWord last_; // Node*
DISALLOW_COPY_AND_ASSIGN(UnboundQueue);
};
diff --git a/chromium/v8/src/unicode-inl.h b/chromium/v8/src/unicode-inl.h
index f861f9f2d47..6ef7f98a7d9 100644
--- a/chromium/v8/src/unicode-inl.h
+++ b/chromium/v8/src/unicode-inl.h
@@ -1,36 +1,13 @@
// Copyright 2007-2010 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
#ifndef V8_UNICODE_INL_H_
#define V8_UNICODE_INL_H_
-#include "unicode.h"
-#include "checks.h"
-#include "platform.h"
+#include "src/unicode.h"
+#include "src/checks.h"
+#include "src/utils.h"
namespace unibrow {
@@ -107,8 +84,14 @@ unsigned Utf8::EncodeOneByte(char* str, uint8_t c) {
return 2;
}
-
-unsigned Utf8::Encode(char* str, uchar c, int previous) {
+// Encode encodes the UTF-16 code units c and previous into the given str
+// buffer, and combines surrogate code units into single code points. If
+// replace_invalid is set to true, orphan surrogate code units will be replaced
+// with kBadChar.
+unsigned Utf8::Encode(char* str,
+ uchar c,
+ int previous,
+ bool replace_invalid) {
static const int kMask = ~(1 << 6);
if (c <= kMaxOneByteChar) {
str[0] = c;
@@ -118,12 +101,16 @@ unsigned Utf8::Encode(char* str, uchar c, int previous) {
str[1] = 0x80 | (c & kMask);
return 2;
} else if (c <= kMaxThreeByteChar) {
- if (Utf16::IsTrailSurrogate(c) &&
- Utf16::IsLeadSurrogate(previous)) {
+ if (Utf16::IsSurrogatePair(previous, c)) {
const int kUnmatchedSize = kSizeOfUnmatchedSurrogate;
return Encode(str - kUnmatchedSize,
Utf16::CombineSurrogatePair(previous, c),
- Utf16::kNoPreviousCharacter) - kUnmatchedSize;
+ Utf16::kNoPreviousCharacter,
+ replace_invalid) - kUnmatchedSize;
+ } else if (replace_invalid &&
+ (Utf16::IsLeadSurrogate(c) ||
+ Utf16::IsTrailSurrogate(c))) {
+ c = kBadChar;
}
str[0] = 0xE0 | (c >> 12);
str[1] = 0x80 | ((c >> 6) & kMask);
@@ -202,8 +189,8 @@ unsigned Utf8Decoder<kBufferSize>::WriteUtf16(uint16_t* data,
// memcpy everything in buffer.
unsigned buffer_length =
last_byte_of_buffer_unused_ ? kBufferSize - 1 : kBufferSize;
- unsigned memcpy_length = length <= buffer_length ? length : buffer_length;
- v8::internal::OS::MemCopy(data, buffer_, memcpy_length*sizeof(uint16_t));
+ unsigned memcpy_length = length <= buffer_length ? length : buffer_length;
+ v8::internal::MemCopy(data, buffer_, memcpy_length * sizeof(uint16_t));
if (length <= buffer_length) return length;
ASSERT(unbuffered_start_ != NULL);
// Copy the rest the slow way.
diff --git a/chromium/v8/src/unicode.cc b/chromium/v8/src/unicode.cc
index bd32467786f..2d75654d435 100644
--- a/chromium/v8/src/unicode.cc
+++ b/chromium/v8/src/unicode.cc
@@ -1,33 +1,10 @@
// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// This file was generated at 2012-03-06 09:55:58.934483
+// This file was generated at 2014-02-07 15:31:16.733174
-#include "unicode-inl.h"
+#include "src/unicode-inl.h"
#include <stdlib.h>
#include <stdio.h>
@@ -710,28 +687,6 @@ bool Letter::Is(uchar c) {
}
-// Space: point.category == 'Zs'
-
-static const uint16_t kSpaceTable0Size = 4;
-static const int32_t kSpaceTable0[4] = {
- 32, 160, 5760, 6158 }; // NOLINT
-static const uint16_t kSpaceTable1Size = 5;
-static const int32_t kSpaceTable1[5] = {
- 1073741824, 10, 47, 95, 4096 }; // NOLINT
-bool Space::Is(uchar c) {
- int chunk_index = c >> 13;
- switch (chunk_index) {
- case 0: return LookupPredicate(kSpaceTable0,
- kSpaceTable0Size,
- c);
- case 1: return LookupPredicate(kSpaceTable1,
- kSpaceTable1Size,
- c);
- default: return false;
- }
-}
-
-
// Number: point.category == 'Nd'
static const uint16_t kNumberTable0Size = 56;
@@ -767,14 +722,14 @@ bool Number::Is(uchar c) {
}
-// WhiteSpace: 'Ws' in point.properties
+// WhiteSpace: point.category == 'Zs'
-static const uint16_t kWhiteSpaceTable0Size = 7;
-static const int32_t kWhiteSpaceTable0[7] = {
- 1073741833, 13, 32, 133, 160, 5760, 6158 }; // NOLINT
-static const uint16_t kWhiteSpaceTable1Size = 7;
-static const int32_t kWhiteSpaceTable1[7] = {
- 1073741824, 10, 1073741864, 41, 47, 95, 4096 }; // NOLINT
+static const uint16_t kWhiteSpaceTable0Size = 4;
+static const int32_t kWhiteSpaceTable0[4] = {
+ 32, 160, 5760, 6158 }; // NOLINT
+static const uint16_t kWhiteSpaceTable1Size = 5;
+static const int32_t kWhiteSpaceTable1[5] = {
+ 1073741824, 10, 47, 95, 4096 }; // NOLINT
bool WhiteSpace::Is(uchar c) {
int chunk_index = c >> 13;
switch (chunk_index) {
@@ -1833,8 +1788,6 @@ int UnicodeData::GetByteCount() {
+ kLetterTable5Size * sizeof(int32_t) // NOLINT
+ kLetterTable6Size * sizeof(int32_t) // NOLINT
+ kLetterTable7Size * sizeof(int32_t) // NOLINT
- + kSpaceTable0Size * sizeof(int32_t) // NOLINT
- + kSpaceTable1Size * sizeof(int32_t) // NOLINT
+ kNumberTable0Size * sizeof(int32_t) // NOLINT
+ kNumberTable5Size * sizeof(int32_t) // NOLINT
+ kNumberTable7Size * sizeof(int32_t) // NOLINT
diff --git a/chromium/v8/src/unicode.h b/chromium/v8/src/unicode.h
index 6ba61d0e17b..e2d6b96b972 100644
--- a/chromium/v8/src/unicode.h
+++ b/chromium/v8/src/unicode.h
@@ -1,35 +1,12 @@
// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
#ifndef V8_UNICODE_H_
#define V8_UNICODE_H_
#include <sys/types.h>
-#include "globals.h"
+#include "src/globals.h"
/**
* \file
* Definitions and convenience functions for working with unicode.
@@ -102,6 +79,9 @@ class UnicodeData {
class Utf16 {
public:
+ static inline bool IsSurrogatePair(int lead, int trail) {
+ return IsLeadSurrogate(lead) && IsTrailSurrogate(trail);
+ }
static inline bool IsLeadSurrogate(int code) {
if (code == kNoPreviousCharacter) return false;
return (code & 0xfc00) == 0xd800;
@@ -146,11 +126,16 @@ class Utf8 {
public:
static inline uchar Length(uchar chr, int previous);
static inline unsigned EncodeOneByte(char* out, uint8_t c);
- static inline unsigned Encode(
- char* out, uchar c, int previous);
+ static inline unsigned Encode(char* out,
+ uchar c,
+ int previous,
+ bool replace_invalid = false);
static uchar CalculateValue(const byte* str,
unsigned length,
unsigned* cursor);
+
+ // The unicode replacement character, used to signal invalid unicode
+ // sequences (e.g. an orphan surrogate) when converting to a UTF-8 encoding.
static const uchar kBadChar = 0xFFFD;
static const unsigned kMaxEncodedSize = 4;
static const unsigned kMaxOneByteChar = 0x7f;
@@ -162,6 +147,9 @@ class Utf8 {
// that match are coded as a 4 byte UTF-8 sequence.
static const unsigned kBytesSavedByCombiningSurrogates = 2;
static const unsigned kSizeOfUnmatchedSurrogate = 3;
+ // The maximum size a single UTF-16 code unit may take up when encoded as
+ // UTF-8.
+ static const unsigned kMax16BitCodeUnitSize = 3;
static inline uchar ValueOf(const byte* str,
unsigned length,
unsigned* cursor);
@@ -215,9 +203,6 @@ struct Lowercase {
struct Letter {
static bool Is(uchar c);
};
-struct Space {
- static bool Is(uchar c);
-};
struct Number {
static bool Is(uchar c);
};
diff --git a/chromium/v8/src/unique.h b/chromium/v8/src/unique.h
index a93b0469935..4668128e19c 100644
--- a/chromium/v8/src/unique.h
+++ b/chromium/v8/src/unique.h
@@ -1,37 +1,14 @@
// Copyright 2013 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
#ifndef V8_HYDROGEN_UNIQUE_H_
#define V8_HYDROGEN_UNIQUE_H_
-#include "handles.h"
-#include "objects.h"
-#include "utils.h"
-#include "zone.h"
+#include "src/handles.h"
+#include "src/objects.h"
+#include "src/utils.h"
+#include "src/zone.h"
namespace v8 {
namespace internal {
@@ -142,8 +119,12 @@ class Unique V8_FINAL {
friend class Unique; // For comparing raw_address values.
private:
+ Unique<T>() : raw_address_(NULL) { }
+
Address raw_address_;
Handle<T> handle_;
+
+ friend class SideEffectsTracker;
};
@@ -153,6 +134,19 @@ class UniqueSet V8_FINAL : public ZoneObject {
// Constructor. A new set will be empty.
UniqueSet() : size_(0), capacity_(0), array_(NULL) { }
+ // Capacity constructor. A new set will be empty.
+ UniqueSet(int capacity, Zone* zone)
+ : size_(0), capacity_(capacity),
+ array_(zone->NewArray<Unique<T> >(capacity)) {
+ ASSERT(capacity <= kMaxCapacity);
+ }
+
+ // Singleton constructor.
+ UniqueSet(Unique<T> uniq, Zone* zone)
+ : size_(1), capacity_(1), array_(zone->NewArray<Unique<T> >(1)) {
+ array_[0] = uniq;
+ }
+
// Add a new element to this unique set. Mutates this set. O(|this|).
void Add(Unique<T> uniq, Zone* zone) {
ASSERT(uniq.IsInitialized());
@@ -185,7 +179,7 @@ class UniqueSet V8_FINAL : public ZoneObject {
}
// Compare this set against another set. O(|this|).
- bool Equals(UniqueSet<T>* that) const {
+ bool Equals(const UniqueSet<T>* that) const {
if (that->size_ != this->size_) return false;
for (int i = 0; i < this->size_; i++) {
if (this->array_[i] != that->array_[i]) return false;
@@ -196,15 +190,18 @@ class UniqueSet V8_FINAL : public ZoneObject {
// Check whether this set contains the given element. O(|this|)
// TODO(titzer): use binary search for large sets to make this O(log|this|)
template <typename U>
- bool Contains(Unique<U> elem) const {
- for (int i = 0; i < size_; i++) {
- if (this->array_[i] == elem) return true;
+ bool Contains(const Unique<U> elem) const {
+ for (int i = 0; i < this->size_; ++i) {
+ Unique<T> cand = this->array_[i];
+ if (cand.raw_address_ >= elem.raw_address_) {
+ return cand.raw_address_ == elem.raw_address_;
+ }
}
return false;
}
// Check if this set is a subset of the given set. O(|this| + |that|).
- bool IsSubset(UniqueSet<T>* that) const {
+ bool IsSubset(const UniqueSet<T>* that) const {
if (that->size_ < this->size_) return false;
int j = 0;
for (int i = 0; i < this->size_; i++) {
@@ -220,11 +217,11 @@ class UniqueSet V8_FINAL : public ZoneObject {
// Returns a new set representing the intersection of this set and the other.
// O(|this| + |that|).
- UniqueSet<T>* Intersect(UniqueSet<T>* that, Zone* zone) const {
+ UniqueSet<T>* Intersect(const UniqueSet<T>* that, Zone* zone) const {
if (that->size_ == 0 || this->size_ == 0) return new(zone) UniqueSet<T>();
- UniqueSet<T>* out = new(zone) UniqueSet<T>();
- out->Grow(Min(this->size_, that->size_), zone);
+ UniqueSet<T>* out = new(zone) UniqueSet<T>(
+ Min(this->size_, that->size_), zone);
int i = 0, j = 0, k = 0;
while (i < this->size_ && j < that->size_) {
@@ -247,12 +244,12 @@ class UniqueSet V8_FINAL : public ZoneObject {
// Returns a new set representing the union of this set and the other.
// O(|this| + |that|).
- UniqueSet<T>* Union(UniqueSet<T>* that, Zone* zone) const {
+ UniqueSet<T>* Union(const UniqueSet<T>* that, Zone* zone) const {
if (that->size_ == 0) return this->Copy(zone);
if (this->size_ == 0) return that->Copy(zone);
- UniqueSet<T>* out = new(zone) UniqueSet<T>();
- out->Grow(this->size_ + that->size_, zone);
+ UniqueSet<T>* out = new(zone) UniqueSet<T>(
+ this->size_ + that->size_, zone);
int i = 0, j = 0, k = 0;
while (i < this->size_ && j < that->size_) {
@@ -278,12 +275,30 @@ class UniqueSet V8_FINAL : public ZoneObject {
return out;
}
- // Makes an exact copy of this set. O(|this| + |that|).
+ // Returns a new set representing all elements from this set which are not in
+ // that set. O(|this| * |that|).
+ UniqueSet<T>* Subtract(const UniqueSet<T>* that, Zone* zone) const {
+ if (that->size_ == 0) return this->Copy(zone);
+
+ UniqueSet<T>* out = new(zone) UniqueSet<T>(this->size_, zone);
+
+ int i = 0, j = 0;
+ while (i < this->size_) {
+ Unique<T> cand = this->array_[i];
+ if (!that->Contains(cand)) {
+ out->array_[j++] = cand;
+ }
+ i++;
+ }
+
+ out->size_ = j;
+ return out;
+ }
+
+ // Makes an exact copy of this set. O(|this|).
UniqueSet<T>* Copy(Zone* zone) const {
- UniqueSet<T>* copy = new(zone) UniqueSet<T>();
+ UniqueSet<T>* copy = new(zone) UniqueSet<T>(this->size_, zone);
copy->size_ = this->size_;
- copy->capacity_ = this->size_;
- copy->array_ = zone->NewArray<Unique<T> >(this->size_);
memcpy(copy->array_, this->array_, this->size_ * sizeof(Unique<T>));
return copy;
}
diff --git a/chromium/v8/src/uri.h b/chromium/v8/src/uri.h
index ee1baeb5129..a35ee9919d5 100644
--- a/chromium/v8/src/uri.h
+++ b/chromium/v8/src/uri.h
@@ -1,38 +1,15 @@
// Copyright 2013 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
#ifndef V8_URI_H_
#define V8_URI_H_
-#include "v8.h"
+#include "src/v8.h"
-#include "string-search.h"
-#include "v8utils.h"
-#include "v8conversions.h"
+#include "src/conversions.h"
+#include "src/string-search.h"
+#include "src/utils.h"
namespace v8 {
namespace internal {
@@ -61,13 +38,14 @@ Vector<const uc16> GetCharVector(Handle<String> string) {
class URIUnescape : public AllStatic {
public:
template<typename Char>
- static Handle<String> Unescape(Isolate* isolate, Handle<String> source);
+ MUST_USE_RESULT static MaybeHandle<String> Unescape(Isolate* isolate,
+ Handle<String> source);
private:
static const signed char kHexValue['g'];
template<typename Char>
- static Handle<String> UnescapeSlow(
+ MUST_USE_RESULT static MaybeHandle<String> UnescapeSlow(
Isolate* isolate, Handle<String> string, int start_index);
static INLINE(int TwoDigitHex(uint16_t character1, uint16_t character2));
@@ -91,7 +69,8 @@ const signed char URIUnescape::kHexValue[] = {
template<typename Char>
-Handle<String> URIUnescape::Unescape(Isolate* isolate, Handle<String> source) {
+MaybeHandle<String> URIUnescape::Unescape(Isolate* isolate,
+ Handle<String> source) {
int index;
{ DisallowHeapAllocation no_allocation;
StringSearch<uint8_t, Char> search(isolate, STATIC_ASCII_VECTOR("%"));
@@ -103,7 +82,7 @@ Handle<String> URIUnescape::Unescape(Isolate* isolate, Handle<String> source) {
template <typename Char>
-Handle<String> URIUnescape::UnescapeSlow(
+MaybeHandle<String> URIUnescape::UnescapeSlow(
Isolate* isolate, Handle<String> string, int start_index) {
bool one_byte = true;
int length = string->length();
@@ -127,9 +106,10 @@ Handle<String> URIUnescape::UnescapeSlow(
int dest_position = 0;
Handle<String> second_part;
+ ASSERT(unescaped_length <= String::kMaxLength);
if (one_byte) {
- Handle<SeqOneByteString> dest =
- isolate->factory()->NewRawOneByteString(unescaped_length);
+ Handle<SeqOneByteString> dest = isolate->factory()->NewRawOneByteString(
+ unescaped_length).ToHandleChecked();
DisallowHeapAllocation no_allocation;
Vector<const Char> vector = GetCharVector<Char>(string);
for (int i = start_index; i < length; dest_position++) {
@@ -140,8 +120,8 @@ Handle<String> URIUnescape::UnescapeSlow(
}
second_part = dest;
} else {
- Handle<SeqTwoByteString> dest =
- isolate->factory()->NewRawTwoByteString(unescaped_length);
+ Handle<SeqTwoByteString> dest = isolate->factory()->NewRawTwoByteString(
+ unescaped_length).ToHandleChecked();
DisallowHeapAllocation no_allocation;
Vector<const Char> vector = GetCharVector<Char>(string);
for (int i = start_index; i < length; dest_position++) {
@@ -200,7 +180,8 @@ int URIUnescape::UnescapeChar(Vector<const Char> vector,
class URIEscape : public AllStatic {
public:
template<typename Char>
- static Handle<String> Escape(Isolate* isolate, Handle<String> string);
+ MUST_USE_RESULT static MaybeHandle<String> Escape(Isolate* isolate,
+ Handle<String> string);
private:
static const char kHexChars[17];
@@ -244,7 +225,7 @@ const char URIEscape::kNotEscaped[] = {
template<typename Char>
-Handle<String> URIEscape::Escape(Isolate* isolate, Handle<String> string) {
+MaybeHandle<String> URIEscape::Escape(Isolate* isolate, Handle<String> string) {
ASSERT(string->IsFlat());
int escaped_length = 0;
int length = string->length();
@@ -263,18 +244,18 @@ Handle<String> URIEscape::Escape(Isolate* isolate, Handle<String> string) {
// We don't allow strings that are longer than a maximal length.
ASSERT(String::kMaxLength < 0x7fffffff - 6); // Cannot overflow.
- if (escaped_length > String::kMaxLength) {
- isolate->context()->mark_out_of_memory();
- return Handle<String>::null();
- }
+ if (escaped_length > String::kMaxLength) break; // Provoke exception.
}
}
// No length change implies no change. Return original string if no change.
if (escaped_length == length) return string;
- Handle<SeqOneByteString> dest =
- isolate->factory()->NewRawOneByteString(escaped_length);
+ Handle<SeqOneByteString> dest;
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, dest,
+ isolate->factory()->NewRawOneByteString(escaped_length),
+ String);
int dest_position = 0;
{ DisallowHeapAllocation no_allocation;
diff --git a/chromium/v8/src/uri.js b/chromium/v8/src/uri.js
index 4e3f084af27..4b7d1f7e00d 100644
--- a/chromium/v8/src/uri.js
+++ b/chromium/v8/src/uri.js
@@ -1,29 +1,8 @@
// Copyright 2006-2008 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+"use strict";
// This file relies on the fact that the following declaration has been made
// in runtime.js:
@@ -34,424 +13,380 @@
// This file contains support for URI manipulations written in
// JavaScript.
-// Lazily initialized.
-var hexCharArray = 0;
-var hexCharCodeArray = 0;
+(function() {
-function URIAddEncodedOctetToBuffer(octet, result, index) {
- result[index++] = 37; // Char code of '%'.
- result[index++] = hexCharCodeArray[octet >> 4];
- result[index++] = hexCharCodeArray[octet & 0x0F];
- return index;
-}
+ // -------------------------------------------------------------------
+ // Define internal helper functions.
+ function HexValueOf(code) {
+ // 0-9
+ if (code >= 48 && code <= 57) return code - 48;
+ // A-F
+ if (code >= 65 && code <= 70) return code - 55;
+ // a-f
+ if (code >= 97 && code <= 102) return code - 87;
-function URIEncodeOctets(octets, result, index) {
- if (hexCharCodeArray === 0) {
- hexCharCodeArray = [48, 49, 50, 51, 52, 53, 54, 55, 56, 57,
- 65, 66, 67, 68, 69, 70];
+ return -1;
}
- index = URIAddEncodedOctetToBuffer(octets[0], result, index);
- if (octets[1]) index = URIAddEncodedOctetToBuffer(octets[1], result, index);
- if (octets[2]) index = URIAddEncodedOctetToBuffer(octets[2], result, index);
- if (octets[3]) index = URIAddEncodedOctetToBuffer(octets[3], result, index);
- return index;
-}
-
-
-function URIEncodeSingle(cc, result, index) {
- var x = (cc >> 12) & 0xF;
- var y = (cc >> 6) & 63;
- var z = cc & 63;
- var octets = new $Array(3);
- if (cc <= 0x007F) {
- octets[0] = cc;
- } else if (cc <= 0x07FF) {
- octets[0] = y + 192;
- octets[1] = z + 128;
- } else {
- octets[0] = x + 224;
- octets[1] = y + 128;
- octets[2] = z + 128;
+
+ // Does the char code correspond to an alpha-numeric char.
+ function isAlphaNumeric(cc) {
+ // a - z
+ if (97 <= cc && cc <= 122) return true;
+ // A - Z
+ if (65 <= cc && cc <= 90) return true;
+ // 0 - 9
+ if (48 <= cc && cc <= 57) return true;
+
+ return false;
}
- return URIEncodeOctets(octets, result, index);
-}
-
-
-function URIEncodePair(cc1 , cc2, result, index) {
- var u = ((cc1 >> 6) & 0xF) + 1;
- var w = (cc1 >> 2) & 0xF;
- var x = cc1 & 3;
- var y = (cc2 >> 6) & 0xF;
- var z = cc2 & 63;
- var octets = new $Array(4);
- octets[0] = (u >> 2) + 240;
- octets[1] = (((u & 3) << 4) | w) + 128;
- octets[2] = ((x << 4) | y) + 128;
- octets[3] = z + 128;
- return URIEncodeOctets(octets, result, index);
-}
-
-
-function URIHexCharsToCharCode(highChar, lowChar) {
- var highCode = HexValueOf(highChar);
- var lowCode = HexValueOf(lowChar);
- if (highCode == -1 || lowCode == -1) {
- throw new $URIError("URI malformed");
+
+ //Lazily initialized.
+ var hexCharCodeArray = 0;
+
+ function URIAddEncodedOctetToBuffer(octet, result, index) {
+ result[index++] = 37; // Char code of '%'.
+ result[index++] = hexCharCodeArray[octet >> 4];
+ result[index++] = hexCharCodeArray[octet & 0x0F];
+ return index;
}
- return (highCode << 4) | lowCode;
-}
-
-
-function URIDecodeOctets(octets, result, index) {
- var value;
- var o0 = octets[0];
- if (o0 < 0x80) {
- value = o0;
- } else if (o0 < 0xc2) {
- throw new $URIError("URI malformed");
- } else {
- var o1 = octets[1];
- if (o0 < 0xe0) {
- var a = o0 & 0x1f;
- if ((o1 < 0x80) || (o1 > 0xbf)) {
- throw new $URIError("URI malformed");
- }
- var b = o1 & 0x3f;
- value = (a << 6) + b;
- if (value < 0x80 || value > 0x7ff) {
- throw new $URIError("URI malformed");
- }
+
+ function URIEncodeOctets(octets, result, index) {
+ if (hexCharCodeArray === 0) {
+ hexCharCodeArray = [48, 49, 50, 51, 52, 53, 54, 55, 56, 57,
+ 65, 66, 67, 68, 69, 70];
+ }
+ index = URIAddEncodedOctetToBuffer(octets[0], result, index);
+ if (octets[1]) index = URIAddEncodedOctetToBuffer(octets[1], result, index);
+ if (octets[2]) index = URIAddEncodedOctetToBuffer(octets[2], result, index);
+ if (octets[3]) index = URIAddEncodedOctetToBuffer(octets[3], result, index);
+ return index;
+ }
+
+ function URIEncodeSingle(cc, result, index) {
+ var x = (cc >> 12) & 0xF;
+ var y = (cc >> 6) & 63;
+ var z = cc & 63;
+ var octets = new $Array(3);
+ if (cc <= 0x007F) {
+ octets[0] = cc;
+ } else if (cc <= 0x07FF) {
+ octets[0] = y + 192;
+ octets[1] = z + 128;
} else {
- var o2 = octets[2];
- if (o0 < 0xf0) {
- var a = o0 & 0x0f;
+ octets[0] = x + 224;
+ octets[1] = y + 128;
+ octets[2] = z + 128;
+ }
+ return URIEncodeOctets(octets, result, index);
+ }
+
+ function URIEncodePair(cc1 , cc2, result, index) {
+ var u = ((cc1 >> 6) & 0xF) + 1;
+ var w = (cc1 >> 2) & 0xF;
+ var x = cc1 & 3;
+ var y = (cc2 >> 6) & 0xF;
+ var z = cc2 & 63;
+ var octets = new $Array(4);
+ octets[0] = (u >> 2) + 240;
+ octets[1] = (((u & 3) << 4) | w) + 128;
+ octets[2] = ((x << 4) | y) + 128;
+ octets[3] = z + 128;
+ return URIEncodeOctets(octets, result, index);
+ }
+
+ function URIHexCharsToCharCode(highChar, lowChar) {
+ var highCode = HexValueOf(highChar);
+ var lowCode = HexValueOf(lowChar);
+ if (highCode == -1 || lowCode == -1) {
+ throw new $URIError("URI malformed");
+ }
+ return (highCode << 4) | lowCode;
+ }
+
+ // Callers must ensure that |result| is a sufficiently long sequential
+ // two-byte string!
+ function URIDecodeOctets(octets, result, index) {
+ var value;
+ var o0 = octets[0];
+ if (o0 < 0x80) {
+ value = o0;
+ } else if (o0 < 0xc2) {
+ throw new $URIError("URI malformed");
+ } else {
+ var o1 = octets[1];
+ if (o0 < 0xe0) {
+ var a = o0 & 0x1f;
if ((o1 < 0x80) || (o1 > 0xbf)) {
throw new $URIError("URI malformed");
}
var b = o1 & 0x3f;
- if ((o2 < 0x80) || (o2 > 0xbf)) {
- throw new $URIError("URI malformed");
- }
- var c = o2 & 0x3f;
- value = (a << 12) + (b << 6) + c;
- if ((value < 0x800) || (value > 0xffff)) {
+ value = (a << 6) + b;
+ if (value < 0x80 || value > 0x7ff) {
throw new $URIError("URI malformed");
}
} else {
- var o3 = octets[3];
- if (o0 < 0xf8) {
- var a = (o0 & 0x07);
+ var o2 = octets[2];
+ if (o0 < 0xf0) {
+ var a = o0 & 0x0f;
if ((o1 < 0x80) || (o1 > 0xbf)) {
throw new $URIError("URI malformed");
}
- var b = (o1 & 0x3f);
+ var b = o1 & 0x3f;
if ((o2 < 0x80) || (o2 > 0xbf)) {
throw new $URIError("URI malformed");
}
- var c = (o2 & 0x3f);
- if ((o3 < 0x80) || (o3 > 0xbf)) {
+ var c = o2 & 0x3f;
+ value = (a << 12) + (b << 6) + c;
+ if ((value < 0x800) || (value > 0xffff)) {
throw new $URIError("URI malformed");
}
- var d = (o3 & 0x3f);
- value = (a << 18) + (b << 12) + (c << 6) + d;
- if ((value < 0x10000) || (value > 0x10ffff)) {
+ } else {
+ var o3 = octets[3];
+ if (o0 < 0xf8) {
+ var a = (o0 & 0x07);
+ if ((o1 < 0x80) || (o1 > 0xbf)) {
+ throw new $URIError("URI malformed");
+ }
+ var b = (o1 & 0x3f);
+ if ((o2 < 0x80) || (o2 > 0xbf)) {
+ throw new $URIError("URI malformed");
+ }
+ var c = (o2 & 0x3f);
+ if ((o3 < 0x80) || (o3 > 0xbf)) {
+ throw new $URIError("URI malformed");
+ }
+ var d = (o3 & 0x3f);
+ value = (a << 18) + (b << 12) + (c << 6) + d;
+ if ((value < 0x10000) || (value > 0x10ffff)) {
+ throw new $URIError("URI malformed");
+ }
+ } else {
throw new $URIError("URI malformed");
}
- } else {
- throw new $URIError("URI malformed");
}
}
}
- }
- if (0xD800 <= value && value <= 0xDFFF) {
- throw new $URIError("URI malformed");
- }
- if (value < 0x10000) {
- %_TwoByteSeqStringSetChar(result, index++, value);
- return index;
- } else {
- %_TwoByteSeqStringSetChar(result, index++, (value >> 10) + 0xd7c0);
- %_TwoByteSeqStringSetChar(result, index++, (value & 0x3ff) + 0xdc00);
+ if (0xD800 <= value && value <= 0xDFFF) {
+ throw new $URIError("URI malformed");
+ }
+ if (value < 0x10000) {
+ %_TwoByteSeqStringSetChar(result, index++, value);
+ } else {
+ %_TwoByteSeqStringSetChar(result, index++, (value >> 10) + 0xd7c0);
+ %_TwoByteSeqStringSetChar(result, index++, (value & 0x3ff) + 0xdc00);
+ }
return index;
}
-}
-
-
-// ECMA-262, section 15.1.3
-function Encode(uri, unescape) {
- var uriLength = uri.length;
- var array = new InternalArray(uriLength);
- var index = 0;
- for (var k = 0; k < uriLength; k++) {
- var cc1 = uri.charCodeAt(k);
- if (unescape(cc1)) {
- array[index++] = cc1;
- } else {
- if (cc1 >= 0xDC00 && cc1 <= 0xDFFF) throw new $URIError("URI malformed");
- if (cc1 < 0xD800 || cc1 > 0xDBFF) {
- index = URIEncodeSingle(cc1, array, index);
+
+ // ECMA-262, section 15.1.3
+ function Encode(uri, unescape) {
+ var uriLength = uri.length;
+ var array = new InternalArray(uriLength);
+ var index = 0;
+ for (var k = 0; k < uriLength; k++) {
+ var cc1 = uri.charCodeAt(k);
+ if (unescape(cc1)) {
+ array[index++] = cc1;
} else {
- k++;
- if (k == uriLength) throw new $URIError("URI malformed");
- var cc2 = uri.charCodeAt(k);
- if (cc2 < 0xDC00 || cc2 > 0xDFFF) throw new $URIError("URI malformed");
- index = URIEncodePair(cc1, cc2, array, index);
+ if (cc1 >= 0xDC00 && cc1 <= 0xDFFF) throw new $URIError("URI malformed");
+ if (cc1 < 0xD800 || cc1 > 0xDBFF) {
+ index = URIEncodeSingle(cc1, array, index);
+ } else {
+ k++;
+ if (k == uriLength) throw new $URIError("URI malformed");
+ var cc2 = uri.charCodeAt(k);
+ if (cc2 < 0xDC00 || cc2 > 0xDFFF) throw new $URIError("URI malformed");
+ index = URIEncodePair(cc1, cc2, array, index);
+ }
}
}
- }
- var result = %NewString(array.length, NEW_ONE_BYTE_STRING);
- for (var i = 0; i < array.length; i++) {
- %_OneByteSeqStringSetChar(result, i, array[i]);
+ var result = %NewString(array.length, NEW_ONE_BYTE_STRING);
+ for (var i = 0; i < array.length; i++) {
+ %_OneByteSeqStringSetChar(result, i, array[i]);
+ }
+ return result;
}
- return result;
-}
-
-
-// ECMA-262, section 15.1.3
-function Decode(uri, reserved) {
- var uriLength = uri.length;
- var one_byte = %NewString(uriLength, NEW_ONE_BYTE_STRING);
- var index = 0;
- var k = 0;
-
- // Optimistically assume ascii string.
- for ( ; k < uriLength; k++) {
- var code = uri.charCodeAt(k);
- if (code == 37) { // '%'
- if (k + 2 >= uriLength) throw new $URIError("URI malformed");
- var cc = URIHexCharsToCharCode(uri.charCodeAt(k+1), uri.charCodeAt(k+2));
- if (cc >> 7) break; // Assumption wrong, two byte string.
- if (reserved(cc)) {
- %_OneByteSeqStringSetChar(one_byte, index++, 37); // '%'.
- %_OneByteSeqStringSetChar(one_byte, index++, uri.charCodeAt(k+1));
- %_OneByteSeqStringSetChar(one_byte, index++, uri.charCodeAt(k+2));
+
+ // ECMA-262, section 15.1.3
+ function Decode(uri, reserved) {
+ var uriLength = uri.length;
+ var one_byte = %NewString(uriLength, NEW_ONE_BYTE_STRING);
+ var index = 0;
+ var k = 0;
+
+ // Optimistically assume ascii string.
+ for ( ; k < uriLength; k++) {
+ var code = uri.charCodeAt(k);
+ if (code == 37) { // '%'
+ if (k + 2 >= uriLength) throw new $URIError("URI malformed");
+ var cc = URIHexCharsToCharCode(uri.charCodeAt(k+1), uri.charCodeAt(k+2));
+ if (cc >> 7) break; // Assumption wrong, two byte string.
+ if (reserved(cc)) {
+ %_OneByteSeqStringSetChar(one_byte, index++, 37); // '%'.
+ %_OneByteSeqStringSetChar(one_byte, index++, uri.charCodeAt(k+1));
+ %_OneByteSeqStringSetChar(one_byte, index++, uri.charCodeAt(k+2));
+ } else {
+ %_OneByteSeqStringSetChar(one_byte, index++, cc);
+ }
+ k += 2;
} else {
- %_OneByteSeqStringSetChar(one_byte, index++, cc);
+ if (code > 0x7f) break; // Assumption wrong, two byte string.
+ %_OneByteSeqStringSetChar(one_byte, index++, code);
}
- k += 2;
- } else {
- if (code > 0x7f) break; // Assumption wrong, two byte string.
- %_OneByteSeqStringSetChar(one_byte, index++, code);
}
- }
- one_byte = %TruncateString(one_byte, index);
- if (k == uriLength) return one_byte;
-
- // Write into two byte string.
- var two_byte = %NewString(uriLength - k, NEW_TWO_BYTE_STRING);
- index = 0;
-
- for ( ; k < uriLength; k++) {
- var code = uri.charCodeAt(k);
- if (code == 37) { // '%'
- if (k + 2 >= uriLength) throw new $URIError("URI malformed");
- var cc = URIHexCharsToCharCode(uri.charCodeAt(++k), uri.charCodeAt(++k));
- if (cc >> 7) {
- var n = 0;
- while (((cc << ++n) & 0x80) != 0) { }
- if (n == 1 || n > 4) throw new $URIError("URI malformed");
- var octets = new $Array(n);
- octets[0] = cc;
- if (k + 3 * (n - 1) >= uriLength) throw new $URIError("URI malformed");
- for (var i = 1; i < n; i++) {
- if (uri.charAt(++k) != '%') throw new $URIError("URI malformed");
- octets[i] = URIHexCharsToCharCode(uri.charCodeAt(++k),
- uri.charCodeAt(++k));
+ one_byte = %TruncateString(one_byte, index);
+ if (k == uriLength) return one_byte;
+
+ // Write into two byte string.
+ var two_byte = %NewString(uriLength - k, NEW_TWO_BYTE_STRING);
+ index = 0;
+
+ for ( ; k < uriLength; k++) {
+ var code = uri.charCodeAt(k);
+ if (code == 37) { // '%'
+ if (k + 2 >= uriLength) throw new $URIError("URI malformed");
+ var cc = URIHexCharsToCharCode(uri.charCodeAt(++k), uri.charCodeAt(++k));
+ if (cc >> 7) {
+ var n = 0;
+ while (((cc << ++n) & 0x80) != 0) { }
+ if (n == 1 || n > 4) throw new $URIError("URI malformed");
+ var octets = new $Array(n);
+ octets[0] = cc;
+ if (k + 3 * (n - 1) >= uriLength) throw new $URIError("URI malformed");
+ for (var i = 1; i < n; i++) {
+ if (uri.charAt(++k) != '%') throw new $URIError("URI malformed");
+ octets[i] = URIHexCharsToCharCode(uri.charCodeAt(++k),
+ uri.charCodeAt(++k));
+ }
+ index = URIDecodeOctets(octets, two_byte, index);
+ } else if (reserved(cc)) {
+ %_TwoByteSeqStringSetChar(two_byte, index++, 37); // '%'.
+ %_TwoByteSeqStringSetChar(two_byte, index++, uri.charCodeAt(k - 1));
+ %_TwoByteSeqStringSetChar(two_byte, index++, uri.charCodeAt(k));
+ } else {
+ %_TwoByteSeqStringSetChar(two_byte, index++, cc);
}
- index = URIDecodeOctets(octets, two_byte, index);
- } else if (reserved(cc)) {
- %_TwoByteSeqStringSetChar(two_byte, index++, 37); // '%'.
- %_TwoByteSeqStringSetChar(two_byte, index++, uri.charCodeAt(k - 1));
- %_TwoByteSeqStringSetChar(two_byte, index++, uri.charCodeAt(k));
} else {
- %_TwoByteSeqStringSetChar(two_byte, index++, cc);
+ %_TwoByteSeqStringSetChar(two_byte, index++, code);
}
- } else {
- %_TwoByteSeqStringSetChar(two_byte, index++, code);
}
- }
-
- two_byte = %TruncateString(two_byte, index);
- return one_byte + two_byte;
-}
-
-
-// ECMA-262 - 15.1.3.1.
-function URIDecode(uri) {
- var reservedPredicate = function(cc) {
- // #$
- if (35 <= cc && cc <= 36) return true;
- // &
- if (cc == 38) return true;
- // +,
- if (43 <= cc && cc <= 44) return true;
- // /
- if (cc == 47) return true;
- // :;
- if (58 <= cc && cc <= 59) return true;
- // =
- if (cc == 61) return true;
- // ?@
- if (63 <= cc && cc <= 64) return true;
-
- return false;
- };
- var string = ToString(uri);
- return Decode(string, reservedPredicate);
-}
-
-
-// ECMA-262 - 15.1.3.2.
-function URIDecodeComponent(component) {
- var reservedPredicate = function(cc) { return false; };
- var string = ToString(component);
- return Decode(string, reservedPredicate);
-}
-
-
-// Does the char code correspond to an alpha-numeric char.
-function isAlphaNumeric(cc) {
- // a - z
- if (97 <= cc && cc <= 122) return true;
- // A - Z
- if (65 <= cc && cc <= 90) return true;
- // 0 - 9
- if (48 <= cc && cc <= 57) return true;
-
- return false;
-}
-
-
-// ECMA-262 - 15.1.3.3.
-function URIEncode(uri) {
- var unescapePredicate = function(cc) {
- if (isAlphaNumeric(cc)) return true;
- // !
- if (cc == 33) return true;
- // #$
- if (35 <= cc && cc <= 36) return true;
- // &'()*+,-./
- if (38 <= cc && cc <= 47) return true;
- // :;
- if (58 <= cc && cc <= 59) return true;
- // =
- if (cc == 61) return true;
- // ?@
- if (63 <= cc && cc <= 64) return true;
- // _
- if (cc == 95) return true;
- // ~
- if (cc == 126) return true;
- return false;
- };
-
- var string = ToString(uri);
- return Encode(string, unescapePredicate);
-}
-
-
-// ECMA-262 - 15.1.3.4
-function URIEncodeComponent(component) {
- var unescapePredicate = function(cc) {
- if (isAlphaNumeric(cc)) return true;
- // !
- if (cc == 33) return true;
- // '()*
- if (39 <= cc && cc <= 42) return true;
- // -.
- if (45 <= cc && cc <= 46) return true;
- // _
- if (cc == 95) return true;
- // ~
- if (cc == 126) return true;
-
- return false;
- };
-
- var string = ToString(component);
- return Encode(string, unescapePredicate);
-}
+ two_byte = %TruncateString(two_byte, index);
+ return one_byte + two_byte;
+ }
+ // -------------------------------------------------------------------
+ // Define exported functions.
-function HexValueOf(code) {
- // 0-9
- if (code >= 48 && code <= 57) return code - 48;
- // A-F
- if (code >= 65 && code <= 70) return code - 55;
- // a-f
- if (code >= 97 && code <= 102) return code - 87;
+ // ECMA-262 - B.2.1.
+ function URIEscapeJS(str) {
+ var s = ToString(str);
+ return %URIEscape(s);
+ }
- return -1;
-}
+ // ECMA-262 - B.2.2.
+ function URIUnescapeJS(str) {
+ var s = ToString(str);
+ return %URIUnescape(s);
+ }
+ // ECMA-262 - 15.1.3.1.
+ function URIDecode(uri) {
+ var reservedPredicate = function(cc) {
+ // #$
+ if (35 <= cc && cc <= 36) return true;
+ // &
+ if (cc == 38) return true;
+ // +,
+ if (43 <= cc && cc <= 44) return true;
+ // /
+ if (cc == 47) return true;
+ // :;
+ if (58 <= cc && cc <= 59) return true;
+ // =
+ if (cc == 61) return true;
+ // ?@
+ if (63 <= cc && cc <= 64) return true;
-// Convert a character code to 4-digit hex string representation
-// 64 -> 0040, 62234 -> F31A.
-function CharCodeToHex4Str(cc) {
- var r = "";
- if (hexCharArray === 0) {
- hexCharArray = ["0", "1", "2", "3", "4", "5", "6", "7", "8", "9",
- "A", "B", "C", "D", "E", "F"];
- }
- for (var i = 0; i < 4; ++i) {
- var c = hexCharArray[cc & 0x0F];
- r = c + r;
- cc = cc >>> 4;
- }
- return r;
-}
-
-
-// Returns true if all digits in string s are valid hex numbers
-function IsValidHex(s) {
- for (var i = 0; i < s.length; ++i) {
- var cc = s.charCodeAt(i);
- if ((48 <= cc && cc <= 57) ||
- (65 <= cc && cc <= 70) ||
- (97 <= cc && cc <= 102)) {
- // '0'..'9', 'A'..'F' and 'a' .. 'f'.
- } else {
return false;
- }
+ };
+ var string = ToString(uri);
+ return Decode(string, reservedPredicate);
}
- return true;
-}
+ // ECMA-262 - 15.1.3.2.
+ function URIDecodeComponent(component) {
+ var reservedPredicate = function(cc) { return false; };
+ var string = ToString(component);
+ return Decode(string, reservedPredicate);
+ }
-// ECMA-262 - B.2.1.
-function URIEscape(str) {
- var s = ToString(str);
- return %URIEscape(s);
-}
+ // ECMA-262 - 15.1.3.3.
+ function URIEncode(uri) {
+ var unescapePredicate = function(cc) {
+ if (isAlphaNumeric(cc)) return true;
+ // !
+ if (cc == 33) return true;
+ // #$
+ if (35 <= cc && cc <= 36) return true;
+ // &'()*+,-./
+ if (38 <= cc && cc <= 47) return true;
+ // :;
+ if (58 <= cc && cc <= 59) return true;
+ // =
+ if (cc == 61) return true;
+ // ?@
+ if (63 <= cc && cc <= 64) return true;
+ // _
+ if (cc == 95) return true;
+ // ~
+ if (cc == 126) return true;
+ return false;
+ };
+ var string = ToString(uri);
+ return Encode(string, unescapePredicate);
+ }
-// ECMA-262 - B.2.2.
-function URIUnescape(str) {
- var s = ToString(str);
- return %URIUnescape(s);
-}
+ // ECMA-262 - 15.1.3.4
+ function URIEncodeComponent(component) {
+ var unescapePredicate = function(cc) {
+ if (isAlphaNumeric(cc)) return true;
+ // !
+ if (cc == 33) return true;
+ // '()*
+ if (39 <= cc && cc <= 42) return true;
+ // -.
+ if (45 <= cc && cc <= 46) return true;
+ // _
+ if (cc == 95) return true;
+ // ~
+ if (cc == 126) return true;
+ return false;
+ };
+ var string = ToString(component);
+ return Encode(string, unescapePredicate);
+ }
-// -------------------------------------------------------------------
+ // -------------------------------------------------------------------
+ // Install exported functions.
-function SetUpUri() {
%CheckIsBootstrapping();
// Set up non-enumerable URI functions on the global object and set
// their names.
InstallFunctions(global, DONT_ENUM, $Array(
- "escape", URIEscape,
- "unescape", URIUnescape,
- "decodeURI", URIDecode,
- "decodeURIComponent", URIDecodeComponent,
- "encodeURI", URIEncode,
- "encodeURIComponent", URIEncodeComponent
+ "escape", URIEscapeJS,
+ "unescape", URIUnescapeJS,
+ "decodeURI", URIDecode,
+ "decodeURIComponent", URIDecodeComponent,
+ "encodeURI", URIEncode,
+ "encodeURIComponent", URIEncodeComponent
));
-}
-SetUpUri();
+})();
diff --git a/chromium/v8/src/utils-inl.h b/chromium/v8/src/utils-inl.h
index 76a3c104ef5..d0c0e3cb2a9 100644
--- a/chromium/v8/src/utils-inl.h
+++ b/chromium/v8/src/utils-inl.h
@@ -1,34 +1,11 @@
// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
#ifndef V8_UTILS_INL_H_
#define V8_UTILS_INL_H_
-#include "list-inl.h"
+#include "src/list-inl.h"
namespace v8 {
namespace internal {
diff --git a/chromium/v8/src/utils.cc b/chromium/v8/src/utils.cc
index 8462615200a..52b0d485eb9 100644
--- a/chromium/v8/src/utils.cc
+++ b/chromium/v8/src/utils.cc
@@ -1,35 +1,15 @@
// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
#include <stdarg.h>
-#include "../include/v8stdint.h"
-#include "checks.h"
-#include "platform.h"
-#include "utils.h"
+#include <sys/stat.h>
+
+#include "src/v8.h"
+
+#include "src/checks.h"
+#include "src/platform.h"
+#include "src/utils.h"
namespace v8 {
namespace internal {
@@ -49,7 +29,7 @@ void SimpleStringBuilder::AddString(const char* s) {
void SimpleStringBuilder::AddSubstring(const char* s, int n) {
ASSERT(!is_finalized() && position_ + n <= buffer_.length());
ASSERT(static_cast<size_t>(n) <= strlen(s));
- OS::MemCopy(&buffer_[position_], s, n * kCharSize);
+ MemCopy(&buffer_[position_], s, n * kCharSize);
position_ += n;
}
@@ -97,18 +77,321 @@ char* SimpleStringBuilder::Finalize() {
}
-const DivMagicNumbers DivMagicNumberFor(int32_t divisor) {
- switch (divisor) {
- case 3: return DivMagicNumberFor3;
- case 5: return DivMagicNumberFor5;
- case 7: return DivMagicNumberFor7;
- case 9: return DivMagicNumberFor9;
- case 11: return DivMagicNumberFor11;
- case 25: return DivMagicNumberFor25;
- case 125: return DivMagicNumberFor125;
- case 625: return DivMagicNumberFor625;
- default: return InvalidDivMagicNumber;
+void PrintF(const char* format, ...) {
+ va_list arguments;
+ va_start(arguments, format);
+ OS::VPrint(format, arguments);
+ va_end(arguments);
+}
+
+
+void PrintF(FILE* out, const char* format, ...) {
+ va_list arguments;
+ va_start(arguments, format);
+ OS::VFPrint(out, format, arguments);
+ va_end(arguments);
+}
+
+
+void PrintPID(const char* format, ...) {
+ OS::Print("[%d] ", OS::GetCurrentProcessId());
+ va_list arguments;
+ va_start(arguments, format);
+ OS::VPrint(format, arguments);
+ va_end(arguments);
+}
+
+
+int SNPrintF(Vector<char> str, const char* format, ...) {
+ va_list args;
+ va_start(args, format);
+ int result = VSNPrintF(str, format, args);
+ va_end(args);
+ return result;
+}
+
+
+int VSNPrintF(Vector<char> str, const char* format, va_list args) {
+ return OS::VSNPrintF(str.start(), str.length(), format, args);
+}
+
+
+void StrNCpy(Vector<char> dest, const char* src, size_t n) {
+ OS::StrNCpy(dest.start(), dest.length(), src, n);
+}
+
+
+void Flush(FILE* out) {
+ fflush(out);
+}
+
+
+char* ReadLine(const char* prompt) {
+ char* result = NULL;
+ char line_buf[256];
+ int offset = 0;
+ bool keep_going = true;
+ fprintf(stdout, "%s", prompt);
+ fflush(stdout);
+ while (keep_going) {
+ if (fgets(line_buf, sizeof(line_buf), stdin) == NULL) {
+ // fgets got an error. Just give up.
+ if (result != NULL) {
+ DeleteArray(result);
+ }
+ return NULL;
+ }
+ int len = StrLength(line_buf);
+ if (len > 1 &&
+ line_buf[len - 2] == '\\' &&
+ line_buf[len - 1] == '\n') {
+ // When we read a line that ends with a "\" we remove the escape and
+ // append the remainder.
+ line_buf[len - 2] = '\n';
+ line_buf[len - 1] = 0;
+ len -= 1;
+ } else if ((len > 0) && (line_buf[len - 1] == '\n')) {
+ // Since we read a new line we are done reading the line. This
+ // will exit the loop after copying this buffer into the result.
+ keep_going = false;
+ }
+ if (result == NULL) {
+ // Allocate the initial result and make room for the terminating '\0'
+ result = NewArray<char>(len + 1);
+ } else {
+ // Allocate a new result with enough room for the new addition.
+ int new_len = offset + len + 1;
+ char* new_result = NewArray<char>(new_len);
+ // Copy the existing input into the new array and set the new
+ // array as the result.
+ MemCopy(new_result, result, offset * kCharSize);
+ DeleteArray(result);
+ result = new_result;
+ }
+ // Copy the newly read line into the result.
+ MemCopy(result + offset, line_buf, len * kCharSize);
+ offset += len;
}
+ ASSERT(result != NULL);
+ result[offset] = '\0';
+ return result;
}
+
+char* ReadCharsFromFile(FILE* file,
+ int* size,
+ int extra_space,
+ bool verbose,
+ const char* filename) {
+ if (file == NULL || fseek(file, 0, SEEK_END) != 0) {
+ if (verbose) {
+ OS::PrintError("Cannot read from file %s.\n", filename);
+ }
+ return NULL;
+ }
+
+ // Get the size of the file and rewind it.
+ *size = ftell(file);
+ rewind(file);
+
+ char* result = NewArray<char>(*size + extra_space);
+ for (int i = 0; i < *size && feof(file) == 0;) {
+ int read = static_cast<int>(fread(&result[i], 1, *size - i, file));
+ if (read != (*size - i) && ferror(file) != 0) {
+ fclose(file);
+ DeleteArray(result);
+ return NULL;
+ }
+ i += read;
+ }
+ return result;
+}
+
+
+char* ReadCharsFromFile(const char* filename,
+ int* size,
+ int extra_space,
+ bool verbose) {
+ FILE* file = OS::FOpen(filename, "rb");
+ char* result = ReadCharsFromFile(file, size, extra_space, verbose, filename);
+ if (file != NULL) fclose(file);
+ return result;
+}
+
+
+byte* ReadBytes(const char* filename, int* size, bool verbose) {
+ char* chars = ReadCharsFromFile(filename, size, 0, verbose);
+ return reinterpret_cast<byte*>(chars);
+}
+
+
+static Vector<const char> SetVectorContents(char* chars,
+ int size,
+ bool* exists) {
+ if (!chars) {
+ *exists = false;
+ return Vector<const char>::empty();
+ }
+ chars[size] = '\0';
+ *exists = true;
+ return Vector<const char>(chars, size);
+}
+
+
+Vector<const char> ReadFile(const char* filename,
+ bool* exists,
+ bool verbose) {
+ int size;
+ char* result = ReadCharsFromFile(filename, &size, 1, verbose);
+ return SetVectorContents(result, size, exists);
+}
+
+
+Vector<const char> ReadFile(FILE* file,
+ bool* exists,
+ bool verbose) {
+ int size;
+ char* result = ReadCharsFromFile(file, &size, 1, verbose, "");
+ return SetVectorContents(result, size, exists);
+}
+
+
+int WriteCharsToFile(const char* str, int size, FILE* f) {
+ int total = 0;
+ while (total < size) {
+ int write = static_cast<int>(fwrite(str, 1, size - total, f));
+ if (write == 0) {
+ return total;
+ }
+ total += write;
+ str += write;
+ }
+ return total;
+}
+
+
+int AppendChars(const char* filename,
+ const char* str,
+ int size,
+ bool verbose) {
+ FILE* f = OS::FOpen(filename, "ab");
+ if (f == NULL) {
+ if (verbose) {
+ OS::PrintError("Cannot open file %s for writing.\n", filename);
+ }
+ return 0;
+ }
+ int written = WriteCharsToFile(str, size, f);
+ fclose(f);
+ return written;
+}
+
+
+int WriteChars(const char* filename,
+ const char* str,
+ int size,
+ bool verbose) {
+ FILE* f = OS::FOpen(filename, "wb");
+ if (f == NULL) {
+ if (verbose) {
+ OS::PrintError("Cannot open file %s for writing.\n", filename);
+ }
+ return 0;
+ }
+ int written = WriteCharsToFile(str, size, f);
+ fclose(f);
+ return written;
+}
+
+
+int WriteBytes(const char* filename,
+ const byte* bytes,
+ int size,
+ bool verbose) {
+ const char* str = reinterpret_cast<const char*>(bytes);
+ return WriteChars(filename, str, size, verbose);
+}
+
+
+
+void StringBuilder::AddFormatted(const char* format, ...) {
+ va_list arguments;
+ va_start(arguments, format);
+ AddFormattedList(format, arguments);
+ va_end(arguments);
+}
+
+
+void StringBuilder::AddFormattedList(const char* format, va_list list) {
+ ASSERT(!is_finalized() && position_ <= buffer_.length());
+ int n = VSNPrintF(buffer_ + position_, format, list);
+ if (n < 0 || n >= (buffer_.length() - position_)) {
+ position_ = buffer_.length();
+ } else {
+ position_ += n;
+ }
+}
+
+
+#if V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X87
+static void MemMoveWrapper(void* dest, const void* src, size_t size) {
+ memmove(dest, src, size);
+}
+
+
+// Initialize to library version so we can call this at any time during startup.
+static MemMoveFunction memmove_function = &MemMoveWrapper;
+
+// Defined in codegen-ia32.cc.
+MemMoveFunction CreateMemMoveFunction();
+
+// Copy memory area to disjoint memory area.
+void MemMove(void* dest, const void* src, size_t size) {
+ if (size == 0) return;
+ // Note: here we rely on dependent reads being ordered. This is true
+ // on all architectures we currently support.
+ (*memmove_function)(dest, src, size);
+}
+
+#elif V8_OS_POSIX && V8_HOST_ARCH_ARM
+void MemCopyUint16Uint8Wrapper(uint16_t* dest, const uint8_t* src,
+ size_t chars) {
+ uint16_t* limit = dest + chars;
+ while (dest < limit) {
+ *dest++ = static_cast<uint16_t>(*src++);
+ }
+}
+
+
+MemCopyUint8Function memcopy_uint8_function = &MemCopyUint8Wrapper;
+MemCopyUint16Uint8Function memcopy_uint16_uint8_function =
+ &MemCopyUint16Uint8Wrapper;
+// Defined in codegen-arm.cc.
+MemCopyUint8Function CreateMemCopyUint8Function(MemCopyUint8Function stub);
+MemCopyUint16Uint8Function CreateMemCopyUint16Uint8Function(
+ MemCopyUint16Uint8Function stub);
+
+#elif V8_OS_POSIX && V8_HOST_ARCH_MIPS
+MemCopyUint8Function memcopy_uint8_function = &MemCopyUint8Wrapper;
+// Defined in codegen-mips.cc.
+MemCopyUint8Function CreateMemCopyUint8Function(MemCopyUint8Function stub);
+#endif
+
+
+void init_memcopy_functions() {
+#if V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X87
+ MemMoveFunction generated_memmove = CreateMemMoveFunction();
+ if (generated_memmove != NULL) {
+ memmove_function = generated_memmove;
+ }
+#elif V8_OS_POSIX && V8_HOST_ARCH_ARM
+ memcopy_uint8_function = CreateMemCopyUint8Function(&MemCopyUint8Wrapper);
+ memcopy_uint16_uint8_function =
+ CreateMemCopyUint16Uint8Function(&MemCopyUint16Uint8Wrapper);
+#elif V8_OS_POSIX && V8_HOST_ARCH_MIPS
+ memcopy_uint8_function = CreateMemCopyUint8Function(&MemCopyUint8Wrapper);
+#endif
+}
+
+
} } // namespace v8::internal
diff --git a/chromium/v8/src/utils.h b/chromium/v8/src/utils.h
index 3a0936eaa63..5422985bc53 100644
--- a/chromium/v8/src/utils.h
+++ b/chromium/v8/src/utils.h
@@ -1,41 +1,21 @@
// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
#ifndef V8_UTILS_H_
#define V8_UTILS_H_
+#include <limits.h>
#include <stdlib.h>
#include <string.h>
-#include <algorithm>
-#include <climits>
-#include "allocation.h"
-#include "checks.h"
-#include "globals.h"
+#include "src/allocation.h"
+#include "src/base/macros.h"
+#include "src/checks.h"
+#include "src/globals.h"
+#include "src/list.h"
+#include "src/platform.h"
+#include "src/vector.h"
namespace v8 {
namespace internal {
@@ -43,10 +23,8 @@ namespace internal {
// ----------------------------------------------------------------------------
// General helper functions
-#define IS_POWER_OF_TWO(x) (((x) & ((x) - 1)) == 0)
-
-// Returns true iff x is a power of 2 (or zero). Cannot be used with the
-// maximally negative value of the type T (the -1 overflows).
+// Returns true iff x is a power of 2. Cannot be used with the maximally
+// negative value of the type T (the -1 overflows).
template <typename T>
inline bool IsPowerOf2(T x) {
return IS_POWER_OF_TWO(x);
@@ -56,7 +34,6 @@ inline bool IsPowerOf2(T x) {
// X must be a power of 2. Returns the number of trailing zeros.
inline int WhichPowerOf2(uint32_t x) {
ASSERT(IsPowerOf2(x));
- ASSERT(x != 0);
int bits = 0;
#ifdef DEBUG
int original_x = x;
@@ -105,32 +82,6 @@ inline int MostSignificantBit(uint32_t x) {
}
-// Magic numbers for integer division.
-// These are kind of 2's complement reciprocal of the divisors.
-// Details and proofs can be found in:
-// - Hacker's Delight, Henry S. Warren, Jr.
-// - The PowerPC Compiler Writer’s Guide
-// and probably many others.
-// See details in the implementation of the algorithm in
-// lithium-codegen-arm.cc : LCodeGen::TryEmitSignedIntegerDivisionByConstant().
-struct DivMagicNumbers {
- unsigned M;
- unsigned s;
-};
-
-const DivMagicNumbers InvalidDivMagicNumber= {0, 0};
-const DivMagicNumbers DivMagicNumberFor3 = {0x55555556, 0};
-const DivMagicNumbers DivMagicNumberFor5 = {0x66666667, 1};
-const DivMagicNumbers DivMagicNumberFor7 = {0x92492493, 2};
-const DivMagicNumbers DivMagicNumberFor9 = {0x38e38e39, 1};
-const DivMagicNumbers DivMagicNumberFor11 = {0x2e8ba2e9, 1};
-const DivMagicNumbers DivMagicNumberFor25 = {0x51eb851f, 3};
-const DivMagicNumbers DivMagicNumberFor125 = {0x10624dd3, 3};
-const DivMagicNumbers DivMagicNumberFor625 = {0x68db8bad, 8};
-
-const DivMagicNumbers DivMagicNumberFor(int32_t divisor);
-
-
// The C++ standard leaves the semantics of '>>' undefined for
// negative signed operands. Most implementations do the right thing,
// though.
@@ -172,6 +123,17 @@ inline T RoundUp(T x, intptr_t m) {
}
+// Increment a pointer until it has the specified alignment.
+// This works like RoundUp, but it works correctly on pointer types where
+// sizeof(*pointer) might not be 1.
+template<class T>
+T AlignUp(T pointer, size_t alignment) {
+ ASSERT(sizeof(pointer) == sizeof(uintptr_t));
+ uintptr_t pointer_raw = reinterpret_cast<uintptr_t>(pointer);
+ return reinterpret_cast<T>(RoundUp(pointer_raw, alignment));
+}
+
+
template <typename T>
int Compare(const T& a, const T& b) {
if (a == b)
@@ -265,13 +227,31 @@ T NegAbs(T a) {
}
-inline int StrLength(const char* string) {
- size_t length = strlen(string);
- ASSERT(length == static_cast<size_t>(static_cast<int>(length)));
- return static_cast<int>(length);
+// TODO(svenpanne) Clean up the whole power-of-2 mess.
+inline int32_t WhichPowerOf2Abs(int32_t x) {
+ return (x == kMinInt) ? 31 : WhichPowerOf2(Abs(x));
}
+// Obtains the unsigned type corresponding to T
+// available in C++11 as std::make_unsigned
+template<typename T>
+struct make_unsigned {
+ typedef T type;
+};
+
+
+// Template specializations necessary to have make_unsigned work
+template<> struct make_unsigned<int32_t> {
+ typedef uint32_t type;
+};
+
+
+template<> struct make_unsigned<int64_t> {
+ typedef uint64_t type;
+};
+
+
// ----------------------------------------------------------------------------
// BitField is a help template for encoding and decode bitfield with
// unsigned content.
@@ -286,6 +266,7 @@ class BitFieldBase {
static const U kMask = ((kOne << shift) << size) - (kOne << shift);
static const U kShift = shift;
static const U kSize = size;
+ static const U kNext = kShift + kSize;
// Value for the field with all bits set.
static const T kMax = static_cast<T>((1U << size) - 1);
@@ -361,6 +342,85 @@ inline uint32_t ComputePointerHash(void* ptr) {
// ----------------------------------------------------------------------------
+// Generated memcpy/memmove
+
+// Initializes the codegen support that depends on CPU features. This is
+// called after CPU initialization.
+void init_memcopy_functions();
+
+#if defined(V8_TARGET_ARCH_IA32) || defined(V8_TARGET_ARCH_X87)
+// Limit below which the extra overhead of the MemCopy function is likely
+// to outweigh the benefits of faster copying.
+const int kMinComplexMemCopy = 64;
+
+// Copy memory area. No restrictions.
+void MemMove(void* dest, const void* src, size_t size);
+typedef void (*MemMoveFunction)(void* dest, const void* src, size_t size);
+
+// Keep the distinction of "move" vs. "copy" for the benefit of other
+// architectures.
+V8_INLINE void MemCopy(void* dest, const void* src, size_t size) {
+ MemMove(dest, src, size);
+}
+#elif defined(V8_HOST_ARCH_ARM)
+typedef void (*MemCopyUint8Function)(uint8_t* dest, const uint8_t* src,
+ size_t size);
+extern MemCopyUint8Function memcopy_uint8_function;
+V8_INLINE void MemCopyUint8Wrapper(uint8_t* dest, const uint8_t* src,
+ size_t chars) {
+ memcpy(dest, src, chars);
+}
+// For values < 16, the assembler function is slower than the inlined C code.
+const int kMinComplexMemCopy = 16;
+V8_INLINE void MemCopy(void* dest, const void* src, size_t size) {
+ (*memcopy_uint8_function)(reinterpret_cast<uint8_t*>(dest),
+ reinterpret_cast<const uint8_t*>(src), size);
+}
+V8_INLINE void MemMove(void* dest, const void* src, size_t size) {
+ memmove(dest, src, size);
+}
+
+typedef void (*MemCopyUint16Uint8Function)(uint16_t* dest, const uint8_t* src,
+ size_t size);
+extern MemCopyUint16Uint8Function memcopy_uint16_uint8_function;
+void MemCopyUint16Uint8Wrapper(uint16_t* dest, const uint8_t* src,
+ size_t chars);
+// For values < 12, the assembler function is slower than the inlined C code.
+const int kMinComplexConvertMemCopy = 12;
+V8_INLINE void MemCopyUint16Uint8(uint16_t* dest, const uint8_t* src,
+ size_t size) {
+ (*memcopy_uint16_uint8_function)(dest, src, size);
+}
+#elif defined(V8_HOST_ARCH_MIPS)
+typedef void (*MemCopyUint8Function)(uint8_t* dest, const uint8_t* src,
+ size_t size);
+extern MemCopyUint8Function memcopy_uint8_function;
+V8_INLINE void MemCopyUint8Wrapper(uint8_t* dest, const uint8_t* src,
+ size_t chars) {
+ memcpy(dest, src, chars);
+}
+// For values < 16, the assembler function is slower than the inlined C code.
+const int kMinComplexMemCopy = 16;
+V8_INLINE void MemCopy(void* dest, const void* src, size_t size) {
+ (*memcopy_uint8_function)(reinterpret_cast<uint8_t*>(dest),
+ reinterpret_cast<const uint8_t*>(src), size);
+}
+V8_INLINE void MemMove(void* dest, const void* src, size_t size) {
+ memmove(dest, src, size);
+}
+#else
+// Copy memory area to disjoint memory area.
+V8_INLINE void MemCopy(void* dest, const void* src, size_t size) {
+ memcpy(dest, src, size);
+}
+V8_INLINE void MemMove(void* dest, const void* src, size_t size) {
+ memmove(dest, src, size);
+}
+const int kMinComplexMemCopy = 16 * kPointerSize;
+#endif // V8_TARGET_ARCH_IA32
+
+
+// ----------------------------------------------------------------------------
// Miscellaneous
// A static resource holds a static instance that can be reserved in
@@ -404,110 +464,6 @@ class Access {
};
-template <typename T>
-class Vector {
- public:
- Vector() : start_(NULL), length_(0) {}
- Vector(T* data, int length) : start_(data), length_(length) {
- ASSERT(length == 0 || (length > 0 && data != NULL));
- }
-
- static Vector<T> New(int length) {
- return Vector<T>(NewArray<T>(length), length);
- }
-
- // Returns a vector using the same backing storage as this one,
- // spanning from and including 'from', to but not including 'to'.
- Vector<T> SubVector(int from, int to) {
- SLOW_ASSERT(to <= length_);
- SLOW_ASSERT(from < to);
- ASSERT(0 <= from);
- return Vector<T>(start() + from, to - from);
- }
-
- // Returns the length of the vector.
- int length() const { return length_; }
-
- // Returns whether or not the vector is empty.
- bool is_empty() const { return length_ == 0; }
-
- // Returns the pointer to the start of the data in the vector.
- T* start() const { return start_; }
-
- // Access individual vector elements - checks bounds in debug mode.
- T& operator[](int index) const {
- ASSERT(0 <= index && index < length_);
- return start_[index];
- }
-
- const T& at(int index) const { return operator[](index); }
-
- T& first() { return start_[0]; }
-
- T& last() { return start_[length_ - 1]; }
-
- // Returns a clone of this vector with a new backing store.
- Vector<T> Clone() const {
- T* result = NewArray<T>(length_);
- for (int i = 0; i < length_; i++) result[i] = start_[i];
- return Vector<T>(result, length_);
- }
-
- void Sort(int (*cmp)(const T*, const T*)) {
- std::sort(start(), start() + length(), RawComparer(cmp));
- }
-
- void Sort() {
- std::sort(start(), start() + length());
- }
-
- void Truncate(int length) {
- ASSERT(length <= length_);
- length_ = length;
- }
-
- // Releases the array underlying this vector. Once disposed the
- // vector is empty.
- void Dispose() {
- DeleteArray(start_);
- start_ = NULL;
- length_ = 0;
- }
-
- inline Vector<T> operator+(int offset) {
- ASSERT(offset < length_);
- return Vector<T>(start_ + offset, length_ - offset);
- }
-
- // Factory method for creating empty vectors.
- static Vector<T> empty() { return Vector<T>(NULL, 0); }
-
- template<typename S>
- static Vector<T> cast(Vector<S> input) {
- return Vector<T>(reinterpret_cast<T*>(input.start()),
- input.length() * sizeof(S) / sizeof(T));
- }
-
- protected:
- void set_start(T* start) { start_ = start; }
-
- private:
- T* start_;
- int length_;
-
- class RawComparer {
- public:
- explicit RawComparer(int (*cmp)(const T*, const T*)) : cmp_(cmp) {}
- bool operator()(const T& a, const T& b) {
- return cmp_(&a, &b) < 0;
- }
-
- private:
- int (*cmp_)(const T*, const T*);
- };
-};
-
-
// A pointer that can only be set once and doesn't allow NULL values.
template<typename T>
class SetOncePointer {
@@ -545,16 +501,14 @@ class EmbeddedVector : public Vector<T> {
// When copying, make underlying Vector to reference our buffer.
EmbeddedVector(const EmbeddedVector& rhs)
: Vector<T>(rhs) {
- // TODO(jkummerow): Refactor #includes and use OS::MemCopy() instead.
- memcpy(buffer_, rhs.buffer_, sizeof(T) * kSize);
+ MemCopy(buffer_, rhs.buffer_, sizeof(T) * kSize);
set_start(buffer_);
}
EmbeddedVector& operator=(const EmbeddedVector& rhs) {
if (this == &rhs) return *this;
Vector<T>::operator=(rhs);
- // TODO(jkummerow): Refactor #includes and use OS::MemCopy() instead.
- memcpy(buffer_, rhs.buffer_, sizeof(T) * kSize);
+ MemCopy(buffer_, rhs.buffer_, sizeof(T) * kSize);
this->set_start(buffer_);
return *this;
}
@@ -564,44 +518,6 @@ class EmbeddedVector : public Vector<T> {
};
-template <typename T>
-class ScopedVector : public Vector<T> {
- public:
- explicit ScopedVector(int length) : Vector<T>(NewArray<T>(length), length) { }
- ~ScopedVector() {
- DeleteArray(this->start());
- }
-
- private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(ScopedVector);
-};
-
-#define STATIC_ASCII_VECTOR(x) \
- v8::internal::Vector<const uint8_t>(reinterpret_cast<const uint8_t*>(x), \
- ARRAY_SIZE(x)-1)
-
-inline Vector<const char> CStrVector(const char* data) {
- return Vector<const char>(data, StrLength(data));
-}
-
-inline Vector<const uint8_t> OneByteVector(const char* data, int length) {
- return Vector<const uint8_t>(reinterpret_cast<const uint8_t*>(data), length);
-}
-
-inline Vector<const uint8_t> OneByteVector(const char* data) {
- return OneByteVector(data, StrLength(data));
-}
-
-inline Vector<char> MutableCStrVector(char* data) {
- return Vector<char>(data, StrLength(data));
-}
-
-inline Vector<char> MutableCStrVector(char* data, int max) {
- int length = StrLength(data);
- return Vector<char>(data, (length < max) ? length : max);
-}
-
-
/*
* A class that collects values into a backing store.
* Specialized versions of the class can allow access to the backing store
@@ -929,7 +845,6 @@ struct BitCastHelper {
INLINE(static Dest cast(const Source& source)) {
Dest dest;
- // TODO(jkummerow): Refactor #includes and use OS::MemCopy() instead.
memcpy(&dest, &source, sizeof(dest));
return dest;
}
@@ -1089,6 +1004,66 @@ class EnumSet {
T bits_;
};
+// Bit field extraction.
+inline uint32_t unsigned_bitextract_32(int msb, int lsb, uint32_t x) {
+ return (x >> lsb) & ((1 << (1 + msb - lsb)) - 1);
+}
+
+inline uint64_t unsigned_bitextract_64(int msb, int lsb, uint64_t x) {
+ return (x >> lsb) & ((static_cast<uint64_t>(1) << (1 + msb - lsb)) - 1);
+}
+
+inline int32_t signed_bitextract_32(int msb, int lsb, int32_t x) {
+ return (x << (31 - msb)) >> (lsb + 31 - msb);
+}
+
+inline int signed_bitextract_64(int msb, int lsb, int x) {
+ // TODO(jbramley): This is broken for big bitfields.
+ return (x << (63 - msb)) >> (lsb + 63 - msb);
+}
+
+// Check number width.
+inline bool is_intn(int64_t x, unsigned n) {
+ ASSERT((0 < n) && (n < 64));
+ int64_t limit = static_cast<int64_t>(1) << (n - 1);
+ return (-limit <= x) && (x < limit);
+}
+
+inline bool is_uintn(int64_t x, unsigned n) {
+ ASSERT((0 < n) && (n < (sizeof(x) * kBitsPerByte)));
+ return !(x >> n);
+}
+
+template <class T>
+inline T truncate_to_intn(T x, unsigned n) {
+ ASSERT((0 < n) && (n < (sizeof(x) * kBitsPerByte)));
+ return (x & ((static_cast<T>(1) << n) - 1));
+}
+
+#define INT_1_TO_63_LIST(V) \
+V(1) V(2) V(3) V(4) V(5) V(6) V(7) V(8) \
+V(9) V(10) V(11) V(12) V(13) V(14) V(15) V(16) \
+V(17) V(18) V(19) V(20) V(21) V(22) V(23) V(24) \
+V(25) V(26) V(27) V(28) V(29) V(30) V(31) V(32) \
+V(33) V(34) V(35) V(36) V(37) V(38) V(39) V(40) \
+V(41) V(42) V(43) V(44) V(45) V(46) V(47) V(48) \
+V(49) V(50) V(51) V(52) V(53) V(54) V(55) V(56) \
+V(57) V(58) V(59) V(60) V(61) V(62) V(63)
+
+#define DECLARE_IS_INT_N(N) \
+inline bool is_int##N(int64_t x) { return is_intn(x, N); }
+#define DECLARE_IS_UINT_N(N) \
+template <class T> \
+inline bool is_uint##N(T x) { return is_uintn(x, N); }
+#define DECLARE_TRUNCATE_TO_INT_N(N) \
+template <class T> \
+inline T truncate_to_int##N(T x) { return truncate_to_intn(x, N); }
+INT_1_TO_63_LIST(DECLARE_IS_INT_N)
+INT_1_TO_63_LIST(DECLARE_IS_UINT_N)
+INT_1_TO_63_LIST(DECLARE_TRUNCATE_TO_INT_N)
+#undef DECLARE_IS_INT_N
+#undef DECLARE_IS_UINT_N
+#undef DECLARE_TRUNCATE_TO_INT_N
class TypeFeedbackId {
public:
@@ -1118,6 +1093,7 @@ class BailoutId {
bool IsNone() const { return id_ == kNoneId; }
bool operator==(const BailoutId& other) const { return id_ == other.id_; }
+ bool operator!=(const BailoutId& other) const { return id_ != other.id_; }
private:
static const int kNoneId = -1;
@@ -1139,6 +1115,469 @@ class BailoutId {
int id_;
};
+
+template <class C>
+class ContainerPointerWrapper {
+ public:
+ typedef typename C::iterator iterator;
+ typedef typename C::reverse_iterator reverse_iterator;
+ explicit ContainerPointerWrapper(C* container) : container_(container) {}
+ iterator begin() { return container_->begin(); }
+ iterator end() { return container_->end(); }
+ reverse_iterator rbegin() { return container_->rbegin(); }
+ reverse_iterator rend() { return container_->rend(); }
+ private:
+ C* container_;
+};
+
+
+// ----------------------------------------------------------------------------
+// I/O support.
+
+#if __GNUC__ >= 4
+// On gcc we can ask the compiler to check the types of %d-style format
+// specifiers and their associated arguments. TODO(erikcorry) fix this
+// so it works on MacOSX.
+#if defined(__MACH__) && defined(__APPLE__)
+#define PRINTF_CHECKING
+#define FPRINTF_CHECKING
+#define PRINTF_METHOD_CHECKING
+#define FPRINTF_METHOD_CHECKING
+#else // MacOsX.
+#define PRINTF_CHECKING __attribute__ ((format (printf, 1, 2)))
+#define FPRINTF_CHECKING __attribute__ ((format (printf, 2, 3)))
+#define PRINTF_METHOD_CHECKING __attribute__ ((format (printf, 2, 3)))
+#define FPRINTF_METHOD_CHECKING __attribute__ ((format (printf, 3, 4)))
+#endif
+#else
+#define PRINTF_CHECKING
+#define FPRINTF_CHECKING
+#define PRINTF_METHOD_CHECKING
+#define FPRINTF_METHOD_CHECKING
+#endif
+
+// Our version of printf().
+void PRINTF_CHECKING PrintF(const char* format, ...);
+void FPRINTF_CHECKING PrintF(FILE* out, const char* format, ...);
+
+// Prepends the current process ID to the output.
+void PRINTF_CHECKING PrintPID(const char* format, ...);
+
+// Safe formatting print. Ensures that str is always null-terminated.
+// Returns the number of chars written, or -1 if output was truncated.
+int FPRINTF_CHECKING SNPrintF(Vector<char> str, const char* format, ...);
+int VSNPrintF(Vector<char> str, const char* format, va_list args);
+
+void StrNCpy(Vector<char> dest, const char* src, size_t n);
+
+// Our version of fflush.
+void Flush(FILE* out);
+
+inline void Flush() {
+ Flush(stdout);
+}
+
+
+// Read a line of characters after printing the prompt to stdout. The resulting
+// char* needs to be disposed off with DeleteArray by the caller.
+char* ReadLine(const char* prompt);
+
+
+// Read and return the raw bytes in a file. the size of the buffer is returned
+// in size.
+// The returned buffer must be freed by the caller.
+byte* ReadBytes(const char* filename, int* size, bool verbose = true);
+
+
+// Append size chars from str to the file given by filename.
+// The file is overwritten. Returns the number of chars written.
+int AppendChars(const char* filename,
+ const char* str,
+ int size,
+ bool verbose = true);
+
+
+// Write size chars from str to the file given by filename.
+// The file is overwritten. Returns the number of chars written.
+int WriteChars(const char* filename,
+ const char* str,
+ int size,
+ bool verbose = true);
+
+
+// Write size bytes to the file given by filename.
+// The file is overwritten. Returns the number of bytes written.
+int WriteBytes(const char* filename,
+ const byte* bytes,
+ int size,
+ bool verbose = true);
+
+
+// Write the C code
+// const char* <varname> = "<str>";
+// const int <varname>_len = <len>;
+// to the file given by filename. Only the first len chars are written.
+int WriteAsCFile(const char* filename, const char* varname,
+ const char* str, int size, bool verbose = true);
+
+
+// ----------------------------------------------------------------------------
+// Data structures
+
+template <typename T>
+inline Vector< Handle<Object> > HandleVector(v8::internal::Handle<T>* elms,
+ int length) {
+ return Vector< Handle<Object> >(
+ reinterpret_cast<v8::internal::Handle<Object>*>(elms), length);
+}
+
+
+// ----------------------------------------------------------------------------
+// Memory
+
+// Copies words from |src| to |dst|. The data spans must not overlap.
+template <typename T>
+inline void CopyWords(T* dst, const T* src, size_t num_words) {
+ STATIC_ASSERT(sizeof(T) == kPointerSize);
+ // TODO(mvstanton): disabled because mac builds are bogus failing on this
+ // assert. They are doing a signed comparison. Investigate in
+ // the morning.
+ // ASSERT(Min(dst, const_cast<T*>(src)) + num_words <=
+ // Max(dst, const_cast<T*>(src)));
+ ASSERT(num_words > 0);
+
+ // Use block copying MemCopy if the segment we're copying is
+ // enough to justify the extra call/setup overhead.
+ static const size_t kBlockCopyLimit = 16;
+
+ if (num_words < kBlockCopyLimit) {
+ do {
+ num_words--;
+ *dst++ = *src++;
+ } while (num_words > 0);
+ } else {
+ MemCopy(dst, src, num_words * kPointerSize);
+ }
+}
+
+
+// Copies words from |src| to |dst|. No restrictions.
+template <typename T>
+inline void MoveWords(T* dst, const T* src, size_t num_words) {
+ STATIC_ASSERT(sizeof(T) == kPointerSize);
+ ASSERT(num_words > 0);
+
+ // Use block copying MemCopy if the segment we're copying is
+ // enough to justify the extra call/setup overhead.
+ static const size_t kBlockCopyLimit = 16;
+
+ if (num_words < kBlockCopyLimit &&
+ ((dst < src) || (dst >= (src + num_words * kPointerSize)))) {
+ T* end = dst + num_words;
+ do {
+ num_words--;
+ *dst++ = *src++;
+ } while (num_words > 0);
+ } else {
+ MemMove(dst, src, num_words * kPointerSize);
+ }
+}
+
+
+// Copies data from |src| to |dst|. The data spans must not overlap.
+template <typename T>
+inline void CopyBytes(T* dst, const T* src, size_t num_bytes) {
+ STATIC_ASSERT(sizeof(T) == 1);
+ ASSERT(Min(dst, const_cast<T*>(src)) + num_bytes <=
+ Max(dst, const_cast<T*>(src)));
+ if (num_bytes == 0) return;
+
+ // Use block copying MemCopy if the segment we're copying is
+ // enough to justify the extra call/setup overhead.
+ static const int kBlockCopyLimit = kMinComplexMemCopy;
+
+ if (num_bytes < static_cast<size_t>(kBlockCopyLimit)) {
+ do {
+ num_bytes--;
+ *dst++ = *src++;
+ } while (num_bytes > 0);
+ } else {
+ MemCopy(dst, src, num_bytes);
+ }
+}
+
+
+template <typename T, typename U>
+inline void MemsetPointer(T** dest, U* value, int counter) {
+#ifdef DEBUG
+ T* a = NULL;
+ U* b = NULL;
+ a = b; // Fake assignment to check assignability.
+ USE(a);
+#endif // DEBUG
+#if V8_HOST_ARCH_IA32
+#define STOS "stosl"
+#elif V8_HOST_ARCH_X64
+#define STOS "stosq"
+#endif
+#if defined(__native_client__)
+ // This STOS sequence does not validate for x86_64 Native Client.
+ // Here we #undef STOS to force use of the slower C version.
+ // TODO(bradchen): Profile V8 and implement a faster REP STOS
+ // here if the profile indicates it matters.
+#undef STOS
+#endif
+
+#if defined(MEMORY_SANITIZER)
+ // MemorySanitizer does not understand inline assembly.
+#undef STOS
+#endif
+
+#if defined(__GNUC__) && defined(STOS)
+ asm volatile(
+ "cld;"
+ "rep ; " STOS
+ : "+&c" (counter), "+&D" (dest)
+ : "a" (value)
+ : "memory", "cc");
+#else
+ for (int i = 0; i < counter; i++) {
+ dest[i] = value;
+ }
+#endif
+
+#undef STOS
+}
+
+
+// Simple wrapper that allows an ExternalString to refer to a
+// Vector<const char>. Doesn't assume ownership of the data.
+class AsciiStringAdapter: public v8::String::ExternalAsciiStringResource {
+ public:
+ explicit AsciiStringAdapter(Vector<const char> data) : data_(data) {}
+
+ virtual const char* data() const { return data_.start(); }
+
+ virtual size_t length() const { return data_.length(); }
+
+ private:
+ Vector<const char> data_;
+};
+
+
+// Simple support to read a file into a 0-terminated C-string.
+// The returned buffer must be freed by the caller.
+// On return, *exits tells whether the file existed.
+Vector<const char> ReadFile(const char* filename,
+ bool* exists,
+ bool verbose = true);
+Vector<const char> ReadFile(FILE* file,
+ bool* exists,
+ bool verbose = true);
+
+
+template <typename sourcechar, typename sinkchar>
+INLINE(static void CopyCharsUnsigned(sinkchar* dest,
+ const sourcechar* src,
+ int chars));
+#if defined(V8_HOST_ARCH_ARM)
+INLINE(void CopyCharsUnsigned(uint8_t* dest, const uint8_t* src, int chars));
+INLINE(void CopyCharsUnsigned(uint16_t* dest, const uint8_t* src, int chars));
+INLINE(void CopyCharsUnsigned(uint16_t* dest, const uint16_t* src, int chars));
+#elif defined(V8_HOST_ARCH_MIPS)
+INLINE(void CopyCharsUnsigned(uint8_t* dest, const uint8_t* src, int chars));
+INLINE(void CopyCharsUnsigned(uint16_t* dest, const uint16_t* src, int chars));
+#endif
+
+// Copy from ASCII/16bit chars to ASCII/16bit chars.
+template <typename sourcechar, typename sinkchar>
+INLINE(void CopyChars(sinkchar* dest, const sourcechar* src, int chars));
+
+template<typename sourcechar, typename sinkchar>
+void CopyChars(sinkchar* dest, const sourcechar* src, int chars) {
+ ASSERT(sizeof(sourcechar) <= 2);
+ ASSERT(sizeof(sinkchar) <= 2);
+ if (sizeof(sinkchar) == 1) {
+ if (sizeof(sourcechar) == 1) {
+ CopyCharsUnsigned(reinterpret_cast<uint8_t*>(dest),
+ reinterpret_cast<const uint8_t*>(src),
+ chars);
+ } else {
+ CopyCharsUnsigned(reinterpret_cast<uint8_t*>(dest),
+ reinterpret_cast<const uint16_t*>(src),
+ chars);
+ }
+ } else {
+ if (sizeof(sourcechar) == 1) {
+ CopyCharsUnsigned(reinterpret_cast<uint16_t*>(dest),
+ reinterpret_cast<const uint8_t*>(src),
+ chars);
+ } else {
+ CopyCharsUnsigned(reinterpret_cast<uint16_t*>(dest),
+ reinterpret_cast<const uint16_t*>(src),
+ chars);
+ }
+ }
+}
+
+template <typename sourcechar, typename sinkchar>
+void CopyCharsUnsigned(sinkchar* dest, const sourcechar* src, int chars) {
+ sinkchar* limit = dest + chars;
+#ifdef V8_HOST_CAN_READ_UNALIGNED
+ if (sizeof(*dest) == sizeof(*src)) {
+ if (chars >= static_cast<int>(kMinComplexMemCopy / sizeof(*dest))) {
+ MemCopy(dest, src, chars * sizeof(*dest));
+ return;
+ }
+ // Number of characters in a uintptr_t.
+ static const int kStepSize = sizeof(uintptr_t) / sizeof(*dest); // NOLINT
+ ASSERT(dest + kStepSize > dest); // Check for overflow.
+ while (dest + kStepSize <= limit) {
+ *reinterpret_cast<uintptr_t*>(dest) =
+ *reinterpret_cast<const uintptr_t*>(src);
+ dest += kStepSize;
+ src += kStepSize;
+ }
+ }
+#endif
+ while (dest < limit) {
+ *dest++ = static_cast<sinkchar>(*src++);
+ }
+}
+
+
+#if defined(V8_HOST_ARCH_ARM)
+void CopyCharsUnsigned(uint8_t* dest, const uint8_t* src, int chars) {
+ switch (static_cast<unsigned>(chars)) {
+ case 0:
+ break;
+ case 1:
+ *dest = *src;
+ break;
+ case 2:
+ memcpy(dest, src, 2);
+ break;
+ case 3:
+ memcpy(dest, src, 3);
+ break;
+ case 4:
+ memcpy(dest, src, 4);
+ break;
+ case 5:
+ memcpy(dest, src, 5);
+ break;
+ case 6:
+ memcpy(dest, src, 6);
+ break;
+ case 7:
+ memcpy(dest, src, 7);
+ break;
+ case 8:
+ memcpy(dest, src, 8);
+ break;
+ case 9:
+ memcpy(dest, src, 9);
+ break;
+ case 10:
+ memcpy(dest, src, 10);
+ break;
+ case 11:
+ memcpy(dest, src, 11);
+ break;
+ case 12:
+ memcpy(dest, src, 12);
+ break;
+ case 13:
+ memcpy(dest, src, 13);
+ break;
+ case 14:
+ memcpy(dest, src, 14);
+ break;
+ case 15:
+ memcpy(dest, src, 15);
+ break;
+ default:
+ MemCopy(dest, src, chars);
+ break;
+ }
+}
+
+
+void CopyCharsUnsigned(uint16_t* dest, const uint8_t* src, int chars) {
+ if (chars >= kMinComplexConvertMemCopy) {
+ MemCopyUint16Uint8(dest, src, chars);
+ } else {
+ MemCopyUint16Uint8Wrapper(dest, src, chars);
+ }
+}
+
+
+void CopyCharsUnsigned(uint16_t* dest, const uint16_t* src, int chars) {
+ switch (static_cast<unsigned>(chars)) {
+ case 0:
+ break;
+ case 1:
+ *dest = *src;
+ break;
+ case 2:
+ memcpy(dest, src, 4);
+ break;
+ case 3:
+ memcpy(dest, src, 6);
+ break;
+ case 4:
+ memcpy(dest, src, 8);
+ break;
+ case 5:
+ memcpy(dest, src, 10);
+ break;
+ case 6:
+ memcpy(dest, src, 12);
+ break;
+ case 7:
+ memcpy(dest, src, 14);
+ break;
+ default:
+ MemCopy(dest, src, chars * sizeof(*dest));
+ break;
+ }
+}
+
+
+#elif defined(V8_HOST_ARCH_MIPS)
+void CopyCharsUnsigned(uint8_t* dest, const uint8_t* src, int chars) {
+ if (chars < kMinComplexMemCopy) {
+ memcpy(dest, src, chars);
+ } else {
+ MemCopy(dest, src, chars);
+ }
+}
+
+void CopyCharsUnsigned(uint16_t* dest, const uint16_t* src, int chars) {
+ if (chars < kMinComplexMemCopy) {
+ memcpy(dest, src, chars * sizeof(*dest));
+ } else {
+ MemCopy(dest, src, chars * sizeof(*dest));
+ }
+}
+#endif
+
+
+class StringBuilder : public SimpleStringBuilder {
+ public:
+ explicit StringBuilder(int size) : SimpleStringBuilder(size) { }
+ StringBuilder(char* buffer, int size) : SimpleStringBuilder(buffer, size) { }
+
+ // Add formatted contents to the builder just like printf().
+ void AddFormatted(const char* format, ...);
+
+ // Add formatted contents like printf based on a va_list.
+ void AddFormattedList(const char* format, va_list list);
+ private:
+ DISALLOW_IMPLICIT_CONSTRUCTORS(StringBuilder);
+};
+
+
} } // namespace v8::internal
#endif // V8_UTILS_H_
diff --git a/chromium/v8/src/utils/DEPS b/chromium/v8/src/utils/DEPS
new file mode 100644
index 00000000000..3da1ce1a56a
--- /dev/null
+++ b/chromium/v8/src/utils/DEPS
@@ -0,0 +1,5 @@
+include_rules = [
+ "-src",
+ "+src/base",
+ "+src/platform",
+]
diff --git a/chromium/v8/src/utils/random-number-generator.cc b/chromium/v8/src/utils/random-number-generator.cc
index fe273315a7a..3da6a5aa4ac 100644
--- a/chromium/v8/src/utils/random-number-generator.cc
+++ b/chromium/v8/src/utils/random-number-generator.cc
@@ -1,39 +1,17 @@
// Copyright 2013 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "utils/random-number-generator.h"
-
-#include <cstdio>
-#include <cstdlib>
-
-#include "flags.h"
-#include "platform/mutex.h"
-#include "platform/time.h"
-#include "utils.h"
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/utils/random-number-generator.h"
+
+#include <stdio.h>
+#include <stdlib.h>
+
+#include <new>
+
+#include "src/base/macros.h"
+#include "src/platform/mutex.h"
+#include "src/platform/time.h"
namespace v8 {
namespace internal {
@@ -50,12 +28,6 @@ void RandomNumberGenerator::SetEntropySource(EntropySource source) {
RandomNumberGenerator::RandomNumberGenerator() {
- // Check --random-seed flag first.
- if (FLAG_random_seed != 0) {
- SetSeed(FLAG_random_seed);
- return;
- }
-
// Check if embedder supplied an entropy source.
{ LockGuard<Mutex> lock_guard(entropy_mutex.Pointer());
if (entropy_source != NULL) {
@@ -110,7 +82,7 @@ int RandomNumberGenerator::NextInt(int max) {
ASSERT_LE(0, max);
// Fast path if max is a power of 2.
- if (IsPowerOf2(max)) {
+ if (IS_POWER_OF_TWO(max)) {
return static_cast<int>((max * static_cast<int64_t>(Next(31))) >> 31);
}
@@ -140,7 +112,13 @@ void RandomNumberGenerator::NextBytes(void* buffer, size_t buflen) {
int RandomNumberGenerator::Next(int bits) {
ASSERT_LT(0, bits);
ASSERT_GE(32, bits);
- int64_t seed = (seed_ * kMultiplier + kAddend) & kMask;
+ // Do unsigned multiplication, which has the intended modulo semantics, while
+ // signed multiplication would expose undefined behavior.
+ uint64_t product = static_cast<uint64_t>(seed_) * kMultiplier;
+ // Assigning a uint64_t to an int64_t is implementation defined, but this
+ // should be OK. Use a static_cast to explicitly state that we know what we're
+ // doing. (Famous last words...)
+ int64_t seed = static_cast<int64_t>((product + kAddend) & kMask);
seed_ = seed;
return static_cast<int>(seed >> (48 - bits));
}
diff --git a/chromium/v8/src/utils/random-number-generator.h b/chromium/v8/src/utils/random-number-generator.h
index cc7d7395e6a..54075716c48 100644
--- a/chromium/v8/src/utils/random-number-generator.h
+++ b/chromium/v8/src/utils/random-number-generator.h
@@ -1,34 +1,11 @@
// Copyright 2013 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
#ifndef V8_UTILS_RANDOM_NUMBER_GENERATOR_H_
#define V8_UTILS_RANDOM_NUMBER_GENERATOR_H_
-#include "globals.h"
+#include "src/base/macros.h"
namespace v8 {
namespace internal {
diff --git a/chromium/v8/src/v8-counters.cc b/chromium/v8/src/v8-counters.cc
deleted file mode 100644
index c899b289a53..00000000000
--- a/chromium/v8/src/v8-counters.cc
+++ /dev/null
@@ -1,104 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#include "v8-counters.h"
-
-namespace v8 {
-namespace internal {
-
-Counters::Counters(Isolate* isolate) {
-#define HT(name, caption) \
- name##_ = HistogramTimer(#caption, 0, 10000, 50, isolate);
- HISTOGRAM_TIMER_LIST(HT)
-#undef HT
-
-#define HP(name, caption) \
- name##_ = Histogram(#caption, 0, 101, 100, isolate);
- HISTOGRAM_PERCENTAGE_LIST(HP)
-#undef HP
-
-#define HM(name, caption) \
- name##_ = Histogram(#caption, 1000, 500000, 50, isolate);
- HISTOGRAM_MEMORY_LIST(HM)
-#undef HM
-
-#define SC(name, caption) \
- name##_ = StatsCounter(isolate, "c:" #caption);
-
- STATS_COUNTER_LIST_1(SC)
- STATS_COUNTER_LIST_2(SC)
-#undef SC
-
-#define SC(name) \
- count_of_##name##_ = StatsCounter(isolate, "c:" "V8.CountOf_" #name); \
- size_of_##name##_ = StatsCounter(isolate, "c:" "V8.SizeOf_" #name);
- INSTANCE_TYPE_LIST(SC)
-#undef SC
-
-#define SC(name) \
- count_of_CODE_TYPE_##name##_ = \
- StatsCounter(isolate, "c:" "V8.CountOf_CODE_TYPE-" #name); \
- size_of_CODE_TYPE_##name##_ = \
- StatsCounter(isolate, "c:" "V8.SizeOf_CODE_TYPE-" #name);
- CODE_KIND_LIST(SC)
-#undef SC
-
-#define SC(name) \
- count_of_FIXED_ARRAY_##name##_ = \
- StatsCounter(isolate, "c:" "V8.CountOf_FIXED_ARRAY-" #name); \
- size_of_FIXED_ARRAY_##name##_ = \
- StatsCounter(isolate, "c:" "V8.SizeOf_FIXED_ARRAY-" #name);
- FIXED_ARRAY_SUB_INSTANCE_TYPE_LIST(SC)
-#undef SC
-
-#define SC(name) \
- count_of_CODE_AGE_##name##_ = \
- StatsCounter(isolate, "c:" "V8.CountOf_CODE_AGE-" #name); \
- size_of_CODE_AGE_##name##_ = \
- StatsCounter(isolate, "c:" "V8.SizeOf_CODE_AGE-" #name);
- CODE_AGE_LIST_COMPLETE(SC)
-#undef SC
-}
-
-
-void Counters::ResetHistograms() {
-#define HT(name, caption) name##_.Reset();
- HISTOGRAM_TIMER_LIST(HT)
-#undef HT
-
-#define HP(name, caption) name##_.Reset();
- HISTOGRAM_PERCENTAGE_LIST(HP)
-#undef HP
-
-#define HM(name, caption) name##_.Reset();
- HISTOGRAM_MEMORY_LIST(HM)
-#undef HM
-}
-
-} } // namespace v8::internal
diff --git a/chromium/v8/src/v8-counters.h b/chromium/v8/src/v8-counters.h
deleted file mode 100644
index 9178046d6ed..00000000000
--- a/chromium/v8/src/v8-counters.h
+++ /dev/null
@@ -1,440 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_V8_COUNTERS_H_
-#define V8_V8_COUNTERS_H_
-
-#include "allocation.h"
-#include "counters.h"
-#include "objects.h"
-#include "v8globals.h"
-
-namespace v8 {
-namespace internal {
-
-#define HISTOGRAM_TIMER_LIST(HT) \
- /* Garbage collection timers. */ \
- HT(gc_compactor, V8.GCCompactor) \
- HT(gc_scavenger, V8.GCScavenger) \
- HT(gc_context, V8.GCContext) /* GC context cleanup time */ \
- /* Parsing timers. */ \
- HT(parse, V8.Parse) \
- HT(parse_lazy, V8.ParseLazy) \
- HT(pre_parse, V8.PreParse) \
- /* Total compilation times. */ \
- HT(compile, V8.Compile) \
- HT(compile_eval, V8.CompileEval) \
- HT(compile_lazy, V8.CompileLazy)
-
-#define HISTOGRAM_PERCENTAGE_LIST(HP) \
- /* Heap fragmentation. */ \
- HP(external_fragmentation_total, \
- V8.MemoryExternalFragmentationTotal) \
- HP(external_fragmentation_old_pointer_space, \
- V8.MemoryExternalFragmentationOldPointerSpace) \
- HP(external_fragmentation_old_data_space, \
- V8.MemoryExternalFragmentationOldDataSpace) \
- HP(external_fragmentation_code_space, \
- V8.MemoryExternalFragmentationCodeSpace) \
- HP(external_fragmentation_map_space, \
- V8.MemoryExternalFragmentationMapSpace) \
- HP(external_fragmentation_cell_space, \
- V8.MemoryExternalFragmentationCellSpace) \
- HP(external_fragmentation_property_cell_space, \
- V8.MemoryExternalFragmentationPropertyCellSpace) \
- HP(external_fragmentation_lo_space, \
- V8.MemoryExternalFragmentationLoSpace) \
- /* Percentages of heap committed to each space. */ \
- HP(heap_fraction_new_space, \
- V8.MemoryHeapFractionNewSpace) \
- HP(heap_fraction_old_pointer_space, \
- V8.MemoryHeapFractionOldPointerSpace) \
- HP(heap_fraction_old_data_space, \
- V8.MemoryHeapFractionOldDataSpace) \
- HP(heap_fraction_code_space, \
- V8.MemoryHeapFractionCodeSpace) \
- HP(heap_fraction_map_space, \
- V8.MemoryHeapFractionMapSpace) \
- HP(heap_fraction_cell_space, \
- V8.MemoryHeapFractionCellSpace) \
- HP(heap_fraction_property_cell_space, \
- V8.MemoryHeapFractionPropertyCellSpace) \
- HP(heap_fraction_lo_space, \
- V8.MemoryHeapFractionLoSpace) \
- /* Percentage of crankshafted codegen. */ \
- HP(codegen_fraction_crankshaft, \
- V8.CodegenFractionCrankshaft) \
-
-
-#define HISTOGRAM_MEMORY_LIST(HM) \
- HM(heap_sample_total_committed, V8.MemoryHeapSampleTotalCommitted) \
- HM(heap_sample_total_used, V8.MemoryHeapSampleTotalUsed) \
- HM(heap_sample_map_space_committed, \
- V8.MemoryHeapSampleMapSpaceCommitted) \
- HM(heap_sample_cell_space_committed, \
- V8.MemoryHeapSampleCellSpaceCommitted) \
- HM(heap_sample_property_cell_space_committed, \
- V8.MemoryHeapSamplePropertyCellSpaceCommitted) \
- HM(heap_sample_code_space_committed, \
- V8.MemoryHeapSampleCodeSpaceCommitted) \
- HM(heap_sample_maximum_committed, \
- V8.MemoryHeapSampleMaximumCommitted) \
-
-
-// WARNING: STATS_COUNTER_LIST_* is a very large macro that is causing MSVC
-// Intellisense to crash. It was broken into two macros (each of length 40
-// lines) rather than one macro (of length about 80 lines) to work around
-// this problem. Please avoid using recursive macros of this length when
-// possible.
-#define STATS_COUNTER_LIST_1(SC) \
- /* Global Handle Count*/ \
- SC(global_handles, V8.GlobalHandles) \
- /* OS Memory allocated */ \
- SC(memory_allocated, V8.OsMemoryAllocated) \
- SC(normalized_maps, V8.NormalizedMaps) \
- SC(props_to_dictionary, V8.ObjectPropertiesToDictionary) \
- SC(elements_to_dictionary, V8.ObjectElementsToDictionary) \
- SC(alive_after_last_gc, V8.AliveAfterLastGC) \
- SC(objs_since_last_young, V8.ObjsSinceLastYoung) \
- SC(objs_since_last_full, V8.ObjsSinceLastFull) \
- SC(string_table_capacity, V8.StringTableCapacity) \
- SC(number_of_symbols, V8.NumberOfSymbols) \
- SC(script_wrappers, V8.ScriptWrappers) \
- SC(call_initialize_stubs, V8.CallInitializeStubs) \
- SC(call_premonomorphic_stubs, V8.CallPreMonomorphicStubs) \
- SC(call_normal_stubs, V8.CallNormalStubs) \
- SC(call_megamorphic_stubs, V8.CallMegamorphicStubs) \
- SC(arguments_adaptors, V8.ArgumentsAdaptors) \
- SC(compilation_cache_hits, V8.CompilationCacheHits) \
- SC(compilation_cache_misses, V8.CompilationCacheMisses) \
- SC(string_ctor_calls, V8.StringConstructorCalls) \
- SC(string_ctor_conversions, V8.StringConstructorConversions) \
- SC(string_ctor_cached_number, V8.StringConstructorCachedNumber) \
- SC(string_ctor_string_value, V8.StringConstructorStringValue) \
- SC(string_ctor_gc_required, V8.StringConstructorGCRequired) \
- /* Amount of evaled source code. */ \
- SC(total_eval_size, V8.TotalEvalSize) \
- /* Amount of loaded source code. */ \
- SC(total_load_size, V8.TotalLoadSize) \
- /* Amount of parsed source code. */ \
- SC(total_parse_size, V8.TotalParseSize) \
- /* Amount of source code skipped over using preparsing. */ \
- SC(total_preparse_skipped, V8.TotalPreparseSkipped) \
- /* Number of symbol lookups skipped using preparsing */ \
- SC(total_preparse_symbols_skipped, V8.TotalPreparseSymbolSkipped) \
- /* Amount of compiled source code. */ \
- SC(total_compile_size, V8.TotalCompileSize) \
- /* Amount of source code compiled with the full codegen. */ \
- SC(total_full_codegen_source_size, V8.TotalFullCodegenSourceSize) \
- /* Number of contexts created from scratch. */ \
- SC(contexts_created_from_scratch, V8.ContextsCreatedFromScratch) \
- /* Number of contexts created by partial snapshot. */ \
- SC(contexts_created_by_snapshot, V8.ContextsCreatedBySnapshot) \
- /* Number of code objects found from pc. */ \
- SC(pc_to_code, V8.PcToCode) \
- SC(pc_to_code_cached, V8.PcToCodeCached) \
- /* The store-buffer implementation of the write barrier. */ \
- SC(store_buffer_compactions, V8.StoreBufferCompactions) \
- SC(store_buffer_overflows, V8.StoreBufferOverflows)
-
-
-#define STATS_COUNTER_LIST_2(SC) \
- /* Number of code stubs. */ \
- SC(code_stubs, V8.CodeStubs) \
- /* Amount of stub code. */ \
- SC(total_stubs_code_size, V8.TotalStubsCodeSize) \
- /* Amount of (JS) compiled code. */ \
- SC(total_compiled_code_size, V8.TotalCompiledCodeSize) \
- SC(gc_compactor_caused_by_request, V8.GCCompactorCausedByRequest) \
- SC(gc_compactor_caused_by_promoted_data, \
- V8.GCCompactorCausedByPromotedData) \
- SC(gc_compactor_caused_by_oldspace_exhaustion, \
- V8.GCCompactorCausedByOldspaceExhaustion) \
- SC(gc_last_resort_from_js, V8.GCLastResortFromJS) \
- SC(gc_last_resort_from_handles, V8.GCLastResortFromHandles) \
- /* How is the generic keyed-load stub used? */ \
- SC(keyed_load_generic_smi, V8.KeyedLoadGenericSmi) \
- SC(keyed_load_generic_symbol, V8.KeyedLoadGenericSymbol) \
- SC(keyed_load_generic_lookup_cache, V8.KeyedLoadGenericLookupCache) \
- SC(keyed_load_generic_slow, V8.KeyedLoadGenericSlow) \
- SC(keyed_load_polymorphic_stubs, V8.KeyedLoadPolymorphicStubs) \
- SC(keyed_load_external_array_slow, V8.KeyedLoadExternalArraySlow) \
- /* How is the generic keyed-call stub used? */ \
- SC(keyed_call_generic_smi_fast, V8.KeyedCallGenericSmiFast) \
- SC(keyed_call_generic_smi_dict, V8.KeyedCallGenericSmiDict) \
- SC(keyed_call_generic_lookup_cache, V8.KeyedCallGenericLookupCache) \
- SC(keyed_call_generic_lookup_dict, V8.KeyedCallGenericLookupDict) \
- SC(keyed_call_generic_slow, V8.KeyedCallGenericSlow) \
- SC(keyed_call_generic_slow_load, V8.KeyedCallGenericSlowLoad) \
- SC(named_load_global_stub, V8.NamedLoadGlobalStub) \
- SC(named_store_global_inline, V8.NamedStoreGlobalInline) \
- SC(named_store_global_inline_miss, V8.NamedStoreGlobalInlineMiss) \
- SC(keyed_store_polymorphic_stubs, V8.KeyedStorePolymorphicStubs) \
- SC(keyed_store_external_array_slow, V8.KeyedStoreExternalArraySlow) \
- SC(store_normal_miss, V8.StoreNormalMiss) \
- SC(store_normal_hit, V8.StoreNormalHit) \
- SC(cow_arrays_created_stub, V8.COWArraysCreatedStub) \
- SC(cow_arrays_created_runtime, V8.COWArraysCreatedRuntime) \
- SC(cow_arrays_converted, V8.COWArraysConverted) \
- SC(call_miss, V8.CallMiss) \
- SC(keyed_call_miss, V8.KeyedCallMiss) \
- SC(load_miss, V8.LoadMiss) \
- SC(keyed_load_miss, V8.KeyedLoadMiss) \
- SC(call_const, V8.CallConst) \
- SC(call_const_fast_api, V8.CallConstFastApi) \
- SC(call_const_interceptor, V8.CallConstInterceptor) \
- SC(call_const_interceptor_fast_api, V8.CallConstInterceptorFastApi) \
- SC(call_global_inline, V8.CallGlobalInline) \
- SC(call_global_inline_miss, V8.CallGlobalInlineMiss) \
- SC(constructed_objects, V8.ConstructedObjects) \
- SC(constructed_objects_runtime, V8.ConstructedObjectsRuntime) \
- SC(negative_lookups, V8.NegativeLookups) \
- SC(negative_lookups_miss, V8.NegativeLookupsMiss) \
- SC(megamorphic_stub_cache_probes, V8.MegamorphicStubCacheProbes) \
- SC(megamorphic_stub_cache_misses, V8.MegamorphicStubCacheMisses) \
- SC(megamorphic_stub_cache_updates, V8.MegamorphicStubCacheUpdates) \
- SC(array_function_runtime, V8.ArrayFunctionRuntime) \
- SC(array_function_native, V8.ArrayFunctionNative) \
- SC(for_in, V8.ForIn) \
- SC(enum_cache_hits, V8.EnumCacheHits) \
- SC(enum_cache_misses, V8.EnumCacheMisses) \
- SC(zone_segment_bytes, V8.ZoneSegmentBytes) \
- SC(fast_new_closure_total, V8.FastNewClosureTotal) \
- SC(fast_new_closure_try_optimized, V8.FastNewClosureTryOptimized) \
- SC(fast_new_closure_install_optimized, V8.FastNewClosureInstallOptimized) \
- SC(string_add_runtime, V8.StringAddRuntime) \
- SC(string_add_native, V8.StringAddNative) \
- SC(string_add_runtime_ext_to_ascii, V8.StringAddRuntimeExtToAscii) \
- SC(sub_string_runtime, V8.SubStringRuntime) \
- SC(sub_string_native, V8.SubStringNative) \
- SC(string_add_make_two_char, V8.StringAddMakeTwoChar) \
- SC(string_compare_native, V8.StringCompareNative) \
- SC(string_compare_runtime, V8.StringCompareRuntime) \
- SC(regexp_entry_runtime, V8.RegExpEntryRuntime) \
- SC(regexp_entry_native, V8.RegExpEntryNative) \
- SC(number_to_string_native, V8.NumberToStringNative) \
- SC(number_to_string_runtime, V8.NumberToStringRuntime) \
- SC(math_acos, V8.MathAcos) \
- SC(math_asin, V8.MathAsin) \
- SC(math_atan, V8.MathAtan) \
- SC(math_atan2, V8.MathAtan2) \
- SC(math_cos, V8.MathCos) \
- SC(math_exp, V8.MathExp) \
- SC(math_floor, V8.MathFloor) \
- SC(math_log, V8.MathLog) \
- SC(math_pow, V8.MathPow) \
- SC(math_round, V8.MathRound) \
- SC(math_sin, V8.MathSin) \
- SC(math_sqrt, V8.MathSqrt) \
- SC(math_tan, V8.MathTan) \
- SC(transcendental_cache_hit, V8.TranscendentalCacheHit) \
- SC(transcendental_cache_miss, V8.TranscendentalCacheMiss) \
- SC(stack_interrupts, V8.StackInterrupts) \
- SC(runtime_profiler_ticks, V8.RuntimeProfilerTicks) \
- SC(bounds_checks_eliminated, V8.BoundsChecksEliminated) \
- SC(bounds_checks_hoisted, V8.BoundsChecksHoisted) \
- SC(soft_deopts_requested, V8.SoftDeoptsRequested) \
- SC(soft_deopts_inserted, V8.SoftDeoptsInserted) \
- SC(soft_deopts_executed, V8.SoftDeoptsExecuted) \
- /* Number of write barriers in generated code. */ \
- SC(write_barriers_dynamic, V8.WriteBarriersDynamic) \
- SC(write_barriers_static, V8.WriteBarriersStatic) \
- SC(new_space_bytes_available, V8.MemoryNewSpaceBytesAvailable) \
- SC(new_space_bytes_committed, V8.MemoryNewSpaceBytesCommitted) \
- SC(new_space_bytes_used, V8.MemoryNewSpaceBytesUsed) \
- SC(old_pointer_space_bytes_available, \
- V8.MemoryOldPointerSpaceBytesAvailable) \
- SC(old_pointer_space_bytes_committed, \
- V8.MemoryOldPointerSpaceBytesCommitted) \
- SC(old_pointer_space_bytes_used, V8.MemoryOldPointerSpaceBytesUsed) \
- SC(old_data_space_bytes_available, V8.MemoryOldDataSpaceBytesAvailable) \
- SC(old_data_space_bytes_committed, V8.MemoryOldDataSpaceBytesCommitted) \
- SC(old_data_space_bytes_used, V8.MemoryOldDataSpaceBytesUsed) \
- SC(code_space_bytes_available, V8.MemoryCodeSpaceBytesAvailable) \
- SC(code_space_bytes_committed, V8.MemoryCodeSpaceBytesCommitted) \
- SC(code_space_bytes_used, V8.MemoryCodeSpaceBytesUsed) \
- SC(map_space_bytes_available, V8.MemoryMapSpaceBytesAvailable) \
- SC(map_space_bytes_committed, V8.MemoryMapSpaceBytesCommitted) \
- SC(map_space_bytes_used, V8.MemoryMapSpaceBytesUsed) \
- SC(cell_space_bytes_available, V8.MemoryCellSpaceBytesAvailable) \
- SC(cell_space_bytes_committed, V8.MemoryCellSpaceBytesCommitted) \
- SC(cell_space_bytes_used, V8.MemoryCellSpaceBytesUsed) \
- SC(property_cell_space_bytes_available, \
- V8.MemoryPropertyCellSpaceBytesAvailable) \
- SC(property_cell_space_bytes_committed, \
- V8.MemoryPropertyCellSpaceBytesCommitted) \
- SC(property_cell_space_bytes_used, \
- V8.MemoryPropertyCellSpaceBytesUsed) \
- SC(lo_space_bytes_available, V8.MemoryLoSpaceBytesAvailable) \
- SC(lo_space_bytes_committed, V8.MemoryLoSpaceBytesCommitted) \
- SC(lo_space_bytes_used, V8.MemoryLoSpaceBytesUsed)
-
-
-// This file contains all the v8 counters that are in use.
-class Counters {
- public:
-#define HT(name, caption) \
- HistogramTimer* name() { return &name##_; }
- HISTOGRAM_TIMER_LIST(HT)
-#undef HT
-
-#define HP(name, caption) \
- Histogram* name() { return &name##_; }
- HISTOGRAM_PERCENTAGE_LIST(HP)
-#undef HP
-
-#define HM(name, caption) \
- Histogram* name() { return &name##_; }
- HISTOGRAM_MEMORY_LIST(HM)
-#undef HM
-
-#define SC(name, caption) \
- StatsCounter* name() { return &name##_; }
- STATS_COUNTER_LIST_1(SC)
- STATS_COUNTER_LIST_2(SC)
-#undef SC
-
-#define SC(name) \
- StatsCounter* count_of_##name() { return &count_of_##name##_; } \
- StatsCounter* size_of_##name() { return &size_of_##name##_; }
- INSTANCE_TYPE_LIST(SC)
-#undef SC
-
-#define SC(name) \
- StatsCounter* count_of_CODE_TYPE_##name() \
- { return &count_of_CODE_TYPE_##name##_; } \
- StatsCounter* size_of_CODE_TYPE_##name() \
- { return &size_of_CODE_TYPE_##name##_; }
- CODE_KIND_LIST(SC)
-#undef SC
-
-#define SC(name) \
- StatsCounter* count_of_FIXED_ARRAY_##name() \
- { return &count_of_FIXED_ARRAY_##name##_; } \
- StatsCounter* size_of_FIXED_ARRAY_##name() \
- { return &size_of_FIXED_ARRAY_##name##_; }
- FIXED_ARRAY_SUB_INSTANCE_TYPE_LIST(SC)
-#undef SC
-
-#define SC(name) \
- StatsCounter* count_of_CODE_AGE_##name() \
- { return &count_of_CODE_AGE_##name##_; } \
- StatsCounter* size_of_CODE_AGE_##name() \
- { return &size_of_CODE_AGE_##name##_; }
- CODE_AGE_LIST_COMPLETE(SC)
-#undef SC
-
- enum Id {
-#define RATE_ID(name, caption) k_##name,
- HISTOGRAM_TIMER_LIST(RATE_ID)
-#undef RATE_ID
-#define PERCENTAGE_ID(name, caption) k_##name,
- HISTOGRAM_PERCENTAGE_LIST(PERCENTAGE_ID)
-#undef PERCENTAGE_ID
-#define MEMORY_ID(name, caption) k_##name,
- HISTOGRAM_MEMORY_LIST(MEMORY_ID)
-#undef MEMORY_ID
-#define COUNTER_ID(name, caption) k_##name,
- STATS_COUNTER_LIST_1(COUNTER_ID)
- STATS_COUNTER_LIST_2(COUNTER_ID)
-#undef COUNTER_ID
-#define COUNTER_ID(name) kCountOf##name, kSizeOf##name,
- INSTANCE_TYPE_LIST(COUNTER_ID)
-#undef COUNTER_ID
-#define COUNTER_ID(name) kCountOfCODE_TYPE_##name, \
- kSizeOfCODE_TYPE_##name,
- CODE_KIND_LIST(COUNTER_ID)
-#undef COUNTER_ID
-#define COUNTER_ID(name) kCountOfFIXED_ARRAY__##name, \
- kSizeOfFIXED_ARRAY__##name,
- FIXED_ARRAY_SUB_INSTANCE_TYPE_LIST(COUNTER_ID)
-#undef COUNTER_ID
-#define COUNTER_ID(name) kCountOfCODE_AGE__##name, \
- kSizeOfCODE_AGE__##name,
- CODE_AGE_LIST_COMPLETE(COUNTER_ID)
-#undef COUNTER_ID
- stats_counter_count
- };
-
- void ResetHistograms();
-
- private:
-#define HT(name, caption) \
- HistogramTimer name##_;
- HISTOGRAM_TIMER_LIST(HT)
-#undef HT
-
-#define HP(name, caption) \
- Histogram name##_;
- HISTOGRAM_PERCENTAGE_LIST(HP)
-#undef HP
-
-#define HM(name, caption) \
- Histogram name##_;
- HISTOGRAM_MEMORY_LIST(HM)
-#undef HM
-
-#define SC(name, caption) \
- StatsCounter name##_;
- STATS_COUNTER_LIST_1(SC)
- STATS_COUNTER_LIST_2(SC)
-#undef SC
-
-#define SC(name) \
- StatsCounter size_of_##name##_; \
- StatsCounter count_of_##name##_;
- INSTANCE_TYPE_LIST(SC)
-#undef SC
-
-#define SC(name) \
- StatsCounter size_of_CODE_TYPE_##name##_; \
- StatsCounter count_of_CODE_TYPE_##name##_;
- CODE_KIND_LIST(SC)
-#undef SC
-
-#define SC(name) \
- StatsCounter size_of_FIXED_ARRAY_##name##_; \
- StatsCounter count_of_FIXED_ARRAY_##name##_;
- FIXED_ARRAY_SUB_INSTANCE_TYPE_LIST(SC)
-#undef SC
-
-#define SC(name) \
- StatsCounter size_of_CODE_AGE_##name##_; \
- StatsCounter count_of_CODE_AGE_##name##_;
- CODE_AGE_LIST_COMPLETE(SC)
-#undef SC
-
- friend class Isolate;
-
- explicit Counters(Isolate* isolate);
-
- DISALLOW_IMPLICIT_CONSTRUCTORS(Counters);
-};
-
-} } // namespace v8::internal
-
-#endif // V8_V8_COUNTERS_H_
diff --git a/chromium/v8/src/v8.cc b/chromium/v8/src/v8.cc
index 004a3394614..8aba51ac9a6 100644
--- a/chromium/v8/src/v8.cc
+++ b/chromium/v8/src/v8.cc
@@ -1,108 +1,65 @@
// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#include "assembler.h"
-#include "isolate.h"
-#include "elements.h"
-#include "bootstrapper.h"
-#include "debug.h"
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#include "src/assembler.h"
+#include "src/base/once.h"
+#include "src/isolate.h"
+#include "src/elements.h"
+#include "src/bootstrapper.h"
+#include "src/debug.h"
+#include "src/deoptimizer.h"
+#include "src/frames.h"
+#include "src/heap-profiler.h"
+#include "src/hydrogen.h"
#ifdef V8_USE_DEFAULT_PLATFORM
-#include "default-platform.h"
+#include "src/libplatform/default-platform.h"
#endif
-#include "deoptimizer.h"
-#include "frames.h"
-#include "heap-profiler.h"
-#include "hydrogen.h"
-#include "lithium-allocator.h"
-#include "objects.h"
-#include "once.h"
-#include "platform.h"
-#include "sampler.h"
-#include "runtime-profiler.h"
-#include "serialize.h"
-#include "store-buffer.h"
+#include "src/lithium-allocator.h"
+#include "src/objects.h"
+#include "src/platform.h"
+#include "src/sampler.h"
+#include "src/runtime-profiler.h"
+#include "src/serialize.h"
+#include "src/store-buffer.h"
namespace v8 {
namespace internal {
V8_DECLARE_ONCE(init_once);
-List<CallCompletedCallback>* V8::call_completed_callbacks_ = NULL;
v8::ArrayBuffer::Allocator* V8::array_buffer_allocator_ = NULL;
v8::Platform* V8::platform_ = NULL;
bool V8::Initialize(Deserializer* des) {
InitializeOncePerProcess();
-
- // The current thread may not yet had entered an isolate to run.
- // Note the Isolate::Current() may be non-null because for various
- // initialization purposes an initializing thread may be assigned an isolate
- // but not actually enter it.
- if (i::Isolate::CurrentPerIsolateThreadData() == NULL) {
- i::Isolate::EnterDefaultIsolate();
- }
-
- ASSERT(i::Isolate::CurrentPerIsolateThreadData() != NULL);
- ASSERT(i::Isolate::CurrentPerIsolateThreadData()->thread_id().Equals(
- i::ThreadId::Current()));
- ASSERT(i::Isolate::CurrentPerIsolateThreadData()->isolate() ==
- i::Isolate::Current());
-
- Isolate* isolate = Isolate::Current();
+ Isolate* isolate = Isolate::UncheckedCurrent();
+ if (isolate == NULL) return true;
if (isolate->IsDead()) return false;
if (isolate->IsInitialized()) return true;
+#ifdef V8_USE_DEFAULT_PLATFORM
+ DefaultPlatform* platform = static_cast<DefaultPlatform*>(platform_);
+ platform->SetThreadPoolSize(isolate->max_available_threads());
+ // We currently only start the threads early, if we know that we'll use them.
+ if (FLAG_job_based_sweeping) platform->EnsureInitialized();
+#endif
+
return isolate->Init(des);
}
void V8::TearDown() {
- Isolate* isolate = Isolate::Current();
- ASSERT(isolate->IsDefaultIsolate());
- if (!isolate->IsInitialized()) return;
-
- // The isolate has to be torn down before clearing the LOperand
- // caches so that the optimizing compiler thread (if running)
- // doesn't see an inconsistent view of the lithium instructions.
- isolate->TearDown();
- delete isolate;
-
+ Bootstrapper::TearDownExtensions();
ElementsAccessor::TearDown();
LOperand::TearDownCaches();
ExternalReference::TearDownMathExpData();
RegisteredExtension::UnregisterAll();
Isolate::GlobalTearDown();
- delete call_completed_callbacks_;
- call_completed_callbacks_ = NULL;
-
Sampler::TearDown();
#ifdef V8_USE_DEFAULT_PLATFORM
@@ -119,61 +76,32 @@ void V8::SetReturnAddressLocationResolver(
}
-void V8::AddCallCompletedCallback(CallCompletedCallback callback) {
- if (call_completed_callbacks_ == NULL) { // Lazy init.
- call_completed_callbacks_ = new List<CallCompletedCallback>();
- }
- for (int i = 0; i < call_completed_callbacks_->length(); i++) {
- if (callback == call_completed_callbacks_->at(i)) return;
- }
- call_completed_callbacks_->Add(callback);
-}
-
-
-void V8::RemoveCallCompletedCallback(CallCompletedCallback callback) {
- if (call_completed_callbacks_ == NULL) return;
- for (int i = 0; i < call_completed_callbacks_->length(); i++) {
- if (callback == call_completed_callbacks_->at(i)) {
- call_completed_callbacks_->Remove(i);
- }
- }
-}
-
+void V8::InitializeOncePerProcessImpl() {
+ FlagList::EnforceFlagImplications();
-void V8::FireCallCompletedCallback(Isolate* isolate) {
- bool has_call_completed_callbacks = call_completed_callbacks_ != NULL;
- bool microtask_pending = isolate->microtask_pending();
- if (!has_call_completed_callbacks && !microtask_pending) return;
-
- HandleScopeImplementer* handle_scope_implementer =
- isolate->handle_scope_implementer();
- if (!handle_scope_implementer->CallDepthIsZero()) return;
- // Fire callbacks. Increase call depth to prevent recursive callbacks.
- handle_scope_implementer->IncrementCallDepth();
- if (microtask_pending) Execution::RunMicrotasks(isolate);
- if (has_call_completed_callbacks) {
- for (int i = 0; i < call_completed_callbacks_->length(); i++) {
- call_completed_callbacks_->at(i)();
- }
+ if (FLAG_predictable && FLAG_random_seed == 0) {
+ // Avoid random seeds in predictable mode.
+ FLAG_random_seed = 12347;
}
- handle_scope_implementer->DecrementCallDepth();
-}
-
-void V8::InitializeOncePerProcessImpl() {
- FlagList::EnforceFlagImplications();
if (FLAG_stress_compaction) {
FLAG_force_marking_deque_overflows = true;
FLAG_gc_global = true;
- FLAG_max_new_space_size = (1 << (kPageSizeBits - 10)) * 2;
+ FLAG_max_semi_space_size = 1;
}
#ifdef V8_USE_DEFAULT_PLATFORM
platform_ = new DefaultPlatform;
#endif
Sampler::SetUp();
- CPU::SetUp();
- OS::PostSetUp();
+ CpuFeatures::Probe(false);
+ init_memcopy_functions();
+ // The custom exp implementation needs 16KB of lookup data; initialize it
+ // on demand.
+ init_fast_sqrt_function();
+#ifdef _WIN64
+ init_modulo_function();
+#endif
ElementsAccessor::InitializeOncePerProcess();
LOperand::SetUpCaches();
SetUpJSCallerSavedCodeData();
@@ -183,7 +111,7 @@ void V8::InitializeOncePerProcessImpl() {
void V8::InitializeOncePerProcess() {
- CallOnce(&init_once, &InitializeOncePerProcessImpl);
+ base::CallOnce(&init_once, &InitializeOncePerProcessImpl);
}
diff --git a/chromium/v8/src/v8.h b/chromium/v8/src/v8.h
index 8069e8adda2..b14458a0b92 100644
--- a/chromium/v8/src/v8.h
+++ b/chromium/v8/src/v8.h
@@ -1,29 +1,6 @@
// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
//
// Top include for all V8 .cc files.
@@ -49,24 +26,25 @@
#endif
// Basic includes
-#include "../include/v8.h"
-#include "../include/v8-platform.h"
-#include "v8globals.h"
-#include "v8checks.h"
-#include "allocation.h"
-#include "assert-scope.h"
-#include "v8utils.h"
-#include "flags.h"
+#include "include/v8.h"
+#include "include/v8-platform.h"
+#include "src/v8checks.h"
+#include "src/allocation.h"
+#include "src/assert-scope.h"
+#include "src/utils.h"
+#include "src/flags.h"
+#include "src/globals.h"
// Objects & heap
-#include "objects-inl.h"
-#include "spaces-inl.h"
-#include "heap-inl.h"
-#include "incremental-marking-inl.h"
-#include "mark-compact-inl.h"
-#include "log-inl.h"
-#include "handles-inl.h"
-#include "zone-inl.h"
+#include "src/objects-inl.h"
+#include "src/spaces-inl.h"
+#include "src/heap-inl.h"
+#include "src/incremental-marking-inl.h"
+#include "src/mark-compact-inl.h"
+#include "src/log-inl.h"
+#include "src/handles-inl.h"
+#include "src/types-inl.h"
+#include "src/zone-inl.h"
namespace v8 {
namespace internal {
@@ -97,10 +75,6 @@ class V8 : public AllStatic {
// Support for entry hooking JITed code.
static void SetFunctionEntryHook(FunctionEntryHook entry_hook);
- static void AddCallCompletedCallback(CallCompletedCallback callback);
- static void RemoveCallCompletedCallback(CallCompletedCallback callback);
- static void FireCallCompletedCallback(Isolate* isolate);
-
static v8::ArrayBuffer::Allocator* ArrayBufferAllocator() {
return array_buffer_allocator_;
}
@@ -118,8 +92,6 @@ class V8 : public AllStatic {
static void InitializeOncePerProcessImpl();
static void InitializeOncePerProcess();
- // List of callbacks when a Call completes.
- static List<CallCompletedCallback>* call_completed_callbacks_;
// Allocator for external array buffers.
static v8::ArrayBuffer::Allocator* array_buffer_allocator_;
// v8::Platform to use.
@@ -133,6 +105,4 @@ enum NilValue { kNullValue, kUndefinedValue };
} } // namespace v8::internal
-namespace i = v8::internal;
-
#endif // V8_V8_H_
diff --git a/chromium/v8/src/v8checks.h b/chromium/v8/src/v8checks.h
index 9857f73d174..3d63cae4c83 100644
--- a/chromium/v8/src/v8checks.h
+++ b/chromium/v8/src/v8checks.h
@@ -1,36 +1,11 @@
// Copyright 2006-2008 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
#ifndef V8_V8CHECKS_H_
#define V8_V8CHECKS_H_
-#include "checks.h"
-
-void API_Fatal(const char* location, const char* format, ...);
+#include "src/checks.h"
namespace v8 {
class Value;
diff --git a/chromium/v8/src/v8conversions.cc b/chromium/v8/src/v8conversions.cc
deleted file mode 100644
index 900b62d10b3..00000000000
--- a/chromium/v8/src/v8conversions.cc
+++ /dev/null
@@ -1,132 +0,0 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include <stdarg.h>
-#include <limits.h>
-
-#include "v8.h"
-
-#include "conversions-inl.h"
-#include "v8conversions.h"
-#include "dtoa.h"
-#include "factory.h"
-#include "strtod.h"
-
-namespace v8 {
-namespace internal {
-
-namespace {
-
-// C++-style iterator adaptor for StringCharacterStream
-// (unlike C++ iterators the end-marker has different type).
-class StringCharacterStreamIterator {
- public:
- class EndMarker {};
-
- explicit StringCharacterStreamIterator(StringCharacterStream* stream);
-
- uint16_t operator*() const;
- void operator++();
- bool operator==(EndMarker const&) const { return end_; }
- bool operator!=(EndMarker const& m) const { return !end_; }
-
- private:
- StringCharacterStream* const stream_;
- uint16_t current_;
- bool end_;
-};
-
-
-StringCharacterStreamIterator::StringCharacterStreamIterator(
- StringCharacterStream* stream) : stream_(stream) {
- ++(*this);
-}
-
-uint16_t StringCharacterStreamIterator::operator*() const {
- return current_;
-}
-
-
-void StringCharacterStreamIterator::operator++() {
- end_ = !stream_->HasMore();
- if (!end_) {
- current_ = stream_->GetNext();
- }
-}
-} // End anonymous namespace.
-
-
-double StringToDouble(UnicodeCache* unicode_cache,
- String* str, int flags, double empty_string_val) {
- StringShape shape(str);
- // TODO(dcarney): Use a Visitor here.
- if (shape.IsSequentialAscii()) {
- const uint8_t* begin = SeqOneByteString::cast(str)->GetChars();
- const uint8_t* end = begin + str->length();
- return InternalStringToDouble(unicode_cache, begin, end, flags,
- empty_string_val);
- } else if (shape.IsSequentialTwoByte()) {
- const uc16* begin = SeqTwoByteString::cast(str)->GetChars();
- const uc16* end = begin + str->length();
- return InternalStringToDouble(unicode_cache, begin, end, flags,
- empty_string_val);
- } else {
- ConsStringIteratorOp op;
- StringCharacterStream stream(str, &op);
- return InternalStringToDouble(unicode_cache,
- StringCharacterStreamIterator(&stream),
- StringCharacterStreamIterator::EndMarker(),
- flags,
- empty_string_val);
- }
-}
-
-
-double StringToInt(UnicodeCache* unicode_cache,
- String* str,
- int radix) {
- StringShape shape(str);
- // TODO(dcarney): Use a Visitor here.
- if (shape.IsSequentialAscii()) {
- const uint8_t* begin = SeqOneByteString::cast(str)->GetChars();
- const uint8_t* end = begin + str->length();
- return InternalStringToInt(unicode_cache, begin, end, radix);
- } else if (shape.IsSequentialTwoByte()) {
- const uc16* begin = SeqTwoByteString::cast(str)->GetChars();
- const uc16* end = begin + str->length();
- return InternalStringToInt(unicode_cache, begin, end, radix);
- } else {
- ConsStringIteratorOp op;
- StringCharacterStream stream(str, &op);
- return InternalStringToInt(unicode_cache,
- StringCharacterStreamIterator(&stream),
- StringCharacterStreamIterator::EndMarker(),
- radix);
- }
-}
-
-} } // namespace v8::internal
diff --git a/chromium/v8/src/v8conversions.h b/chromium/v8/src/v8conversions.h
deleted file mode 100644
index 68107de97a2..00000000000
--- a/chromium/v8/src/v8conversions.h
+++ /dev/null
@@ -1,95 +0,0 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_V8CONVERSIONS_H_
-#define V8_V8CONVERSIONS_H_
-
-#include "conversions.h"
-
-namespace v8 {
-namespace internal {
-
-// Convert from Number object to C integer.
-inline int32_t NumberToInt32(Object* number) {
- if (number->IsSmi()) return Smi::cast(number)->value();
- return DoubleToInt32(number->Number());
-}
-
-
-inline uint32_t NumberToUint32(Object* number) {
- if (number->IsSmi()) return Smi::cast(number)->value();
- return DoubleToUint32(number->Number());
-}
-
-
-// Converts a string into a double value according to ECMA-262 9.3.1
-double StringToDouble(UnicodeCache* unicode_cache,
- String* str,
- int flags,
- double empty_string_val = 0);
-
-// Converts a string into an integer.
-double StringToInt(UnicodeCache* unicode_cache, String* str, int radix);
-
-inline bool TryNumberToSize(Isolate* isolate,
- Object* number, size_t* result) {
- SealHandleScope shs(isolate);
- if (number->IsSmi()) {
- int value = Smi::cast(number)->value();
- ASSERT(
- static_cast<unsigned>(Smi::kMaxValue)
- <= std::numeric_limits<size_t>::max());
- if (value >= 0) {
- *result = static_cast<size_t>(value);
- return true;
- }
- return false;
- } else {
- ASSERT(number->IsHeapNumber());
- double value = HeapNumber::cast(number)->value();
- if (value >= 0 &&
- value <= std::numeric_limits<size_t>::max()) {
- *result = static_cast<size_t>(value);
- return true;
- } else {
- return false;
- }
- }
-}
-
-// Converts a number into size_t.
-inline size_t NumberToSize(Isolate* isolate,
- Object* number) {
- size_t result = 0;
- bool is_valid = TryNumberToSize(isolate, number, &result);
- CHECK(is_valid);
- return result;
-}
-
-} } // namespace v8::internal
-
-#endif // V8_V8CONVERSIONS_H_
diff --git a/chromium/v8/src/v8dll-main.cc b/chromium/v8/src/v8dll-main.cc
index 7f6c9f955d3..6250b3e341d 100644
--- a/chromium/v8/src/v8dll-main.cc
+++ b/chromium/v8/src/v8dll-main.cc
@@ -1,37 +1,14 @@
// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
// The GYP based build ends up defining USING_V8_SHARED when compiling this
// file.
#undef USING_V8_SHARED
-#include "../include/v8.h"
+#include "include/v8.h"
#if V8_OS_WIN
-#include "win32-headers.h"
+#include "src/base/win32-headers.h"
extern "C" {
BOOL WINAPI DllMain(HANDLE hinstDLL,
diff --git a/chromium/v8/src/v8globals.h b/chromium/v8/src/v8globals.h
deleted file mode 100644
index 4910cb7358d..00000000000
--- a/chromium/v8/src/v8globals.h
+++ /dev/null
@@ -1,580 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_V8GLOBALS_H_
-#define V8_V8GLOBALS_H_
-
-#include "globals.h"
-#include "checks.h"
-
-namespace v8 {
-namespace internal {
-
-// This file contains constants and global declarations related to the
-// V8 system.
-
-// Mask for the sign bit in a smi.
-const intptr_t kSmiSignMask = kIntptrSignBit;
-
-const int kObjectAlignmentBits = kPointerSizeLog2;
-const intptr_t kObjectAlignment = 1 << kObjectAlignmentBits;
-const intptr_t kObjectAlignmentMask = kObjectAlignment - 1;
-
-// Desired alignment for pointers.
-const intptr_t kPointerAlignment = (1 << kPointerSizeLog2);
-const intptr_t kPointerAlignmentMask = kPointerAlignment - 1;
-
-// Desired alignment for double values.
-const intptr_t kDoubleAlignment = 8;
-const intptr_t kDoubleAlignmentMask = kDoubleAlignment - 1;
-
-// Desired alignment for generated code is 32 bytes (to improve cache line
-// utilization).
-const int kCodeAlignmentBits = 5;
-const intptr_t kCodeAlignment = 1 << kCodeAlignmentBits;
-const intptr_t kCodeAlignmentMask = kCodeAlignment - 1;
-
-// Tag information for Failure.
-const int kFailureTag = 3;
-const int kFailureTagSize = 2;
-const intptr_t kFailureTagMask = (1 << kFailureTagSize) - 1;
-
-
-// Zap-value: The value used for zapping dead objects.
-// Should be a recognizable hex value tagged as a failure.
-#ifdef V8_HOST_ARCH_64_BIT
-const Address kZapValue =
- reinterpret_cast<Address>(V8_UINT64_C(0xdeadbeedbeadbeef));
-const Address kHandleZapValue =
- reinterpret_cast<Address>(V8_UINT64_C(0x1baddead0baddeaf));
-const Address kGlobalHandleZapValue =
- reinterpret_cast<Address>(V8_UINT64_C(0x1baffed00baffedf));
-const Address kFromSpaceZapValue =
- reinterpret_cast<Address>(V8_UINT64_C(0x1beefdad0beefdaf));
-const uint64_t kDebugZapValue = V8_UINT64_C(0xbadbaddbbadbaddb);
-const uint64_t kSlotsZapValue = V8_UINT64_C(0xbeefdeadbeefdeef);
-const uint64_t kFreeListZapValue = 0xfeed1eaffeed1eaf;
-#else
-const Address kZapValue = reinterpret_cast<Address>(0xdeadbeef);
-const Address kHandleZapValue = reinterpret_cast<Address>(0xbaddeaf);
-const Address kGlobalHandleZapValue = reinterpret_cast<Address>(0xbaffedf);
-const Address kFromSpaceZapValue = reinterpret_cast<Address>(0xbeefdaf);
-const uint32_t kSlotsZapValue = 0xbeefdeef;
-const uint32_t kDebugZapValue = 0xbadbaddb;
-const uint32_t kFreeListZapValue = 0xfeed1eaf;
-#endif
-
-const int kCodeZapValue = 0xbadc0de;
-
-// Number of bits to represent the page size for paged spaces. The value of 20
-// gives 1Mb bytes per page.
-const int kPageSizeBits = 20;
-
-// On Intel architecture, cache line size is 64 bytes.
-// On ARM it may be less (32 bytes), but as far this constant is
-// used for aligning data, it doesn't hurt to align on a greater value.
-#define PROCESSOR_CACHE_LINE_SIZE 64
-
-// Constants relevant to double precision floating point numbers.
-// If looking only at the top 32 bits, the QNaN mask is bits 19 to 30.
-const uint32_t kQuietNaNHighBitsMask = 0xfff << (51 - 32);
-
-
-// -----------------------------------------------------------------------------
-// Forward declarations for frequently used classes
-
-class AccessorInfo;
-class Allocation;
-class Arguments;
-class Assembler;
-class Code;
-class CodeGenerator;
-class CodeStub;
-class Context;
-class Debug;
-class Debugger;
-class DebugInfo;
-class Descriptor;
-class DescriptorArray;
-class TransitionArray;
-class ExternalReference;
-class FixedArray;
-class FunctionTemplateInfo;
-class MemoryChunk;
-class SeededNumberDictionary;
-class UnseededNumberDictionary;
-class NameDictionary;
-template <typename T> class Handle;
-class Heap;
-class HeapObject;
-class IC;
-class InterceptorInfo;
-class JSReceiver;
-class JSArray;
-class JSFunction;
-class JSObject;
-class LargeObjectSpace;
-class LookupResult;
-class MacroAssembler;
-class Map;
-class MapSpace;
-class MarkCompactCollector;
-class NewSpace;
-class Object;
-class MaybeObject;
-class OldSpace;
-class Foreign;
-class Scope;
-class ScopeInfo;
-class Script;
-class Smi;
-template <typename Config, class Allocator = FreeStoreAllocationPolicy>
- class SplayTree;
-class String;
-class Name;
-class Struct;
-class Variable;
-class RelocInfo;
-class Deserializer;
-class MessageLocation;
-class VirtualMemory;
-class Mutex;
-class RecursiveMutex;
-
-typedef bool (*WeakSlotCallback)(Object** pointer);
-
-typedef bool (*WeakSlotCallbackWithHeap)(Heap* heap, Object** pointer);
-
-// -----------------------------------------------------------------------------
-// Miscellaneous
-
-// NOTE: SpaceIterator depends on AllocationSpace enumeration values being
-// consecutive.
-enum AllocationSpace {
- NEW_SPACE, // Semispaces collected with copying collector.
- OLD_POINTER_SPACE, // May contain pointers to new space.
- OLD_DATA_SPACE, // Must not have pointers to new space.
- CODE_SPACE, // No pointers to new space, marked executable.
- MAP_SPACE, // Only and all map objects.
- CELL_SPACE, // Only and all cell objects.
- PROPERTY_CELL_SPACE, // Only and all global property cell objects.
- LO_SPACE, // Promoted large objects.
-
- FIRST_SPACE = NEW_SPACE,
- LAST_SPACE = LO_SPACE,
- FIRST_PAGED_SPACE = OLD_POINTER_SPACE,
- LAST_PAGED_SPACE = PROPERTY_CELL_SPACE
-};
-const int kSpaceTagSize = 3;
-const int kSpaceTagMask = (1 << kSpaceTagSize) - 1;
-
-
-// A flag that indicates whether objects should be pretenured when
-// allocated (allocated directly into the old generation) or not
-// (allocated in the young generation if the object size and type
-// allows).
-enum PretenureFlag { NOT_TENURED, TENURED };
-
-enum MinimumCapacity {
- USE_DEFAULT_MINIMUM_CAPACITY,
- USE_CUSTOM_MINIMUM_CAPACITY
-};
-
-enum GarbageCollector { SCAVENGER, MARK_COMPACTOR };
-
-enum Executability { NOT_EXECUTABLE, EXECUTABLE };
-
-enum VisitMode {
- VISIT_ALL,
- VISIT_ALL_IN_SCAVENGE,
- VISIT_ALL_IN_SWEEP_NEWSPACE,
- VISIT_ONLY_STRONG
-};
-
-// Flag indicating whether code is built into the VM (one of the natives files).
-enum NativesFlag { NOT_NATIVES_CODE, NATIVES_CODE };
-
-
-// A CodeDesc describes a buffer holding instructions and relocation
-// information. The instructions start at the beginning of the buffer
-// and grow forward, the relocation information starts at the end of
-// the buffer and grows backward.
-//
-// |<--------------- buffer_size ---------------->|
-// |<-- instr_size -->| |<-- reloc_size -->|
-// +==================+========+==================+
-// | instructions | free | reloc info |
-// +==================+========+==================+
-// ^
-// |
-// buffer
-
-struct CodeDesc {
- byte* buffer;
- int buffer_size;
- int instr_size;
- int reloc_size;
- Assembler* origin;
-};
-
-
-// Callback function used for iterating objects in heap spaces,
-// for example, scanning heap objects.
-typedef int (*HeapObjectCallback)(HeapObject* obj);
-
-
-// Callback function used for checking constraints when copying/relocating
-// objects. Returns true if an object can be copied/relocated from its
-// old_addr to a new_addr.
-typedef bool (*ConstraintCallback)(Address new_addr, Address old_addr);
-
-
-// Callback function on inline caches, used for iterating over inline caches
-// in compiled code.
-typedef void (*InlineCacheCallback)(Code* code, Address ic);
-
-
-// State for inline cache call sites. Aliased as IC::State.
-enum InlineCacheState {
- // Has never been executed.
- UNINITIALIZED,
- // Has been executed but monomorhic state has been delayed.
- PREMONOMORPHIC,
- // Has been executed and only one receiver type has been seen.
- MONOMORPHIC,
- // Like MONOMORPHIC but check failed due to prototype.
- MONOMORPHIC_PROTOTYPE_FAILURE,
- // Multiple receiver types have been seen.
- POLYMORPHIC,
- // Many receiver types have been seen.
- MEGAMORPHIC,
- // A generic handler is installed and no extra typefeedback is recorded.
- GENERIC,
- // Special state for debug break or step in prepare stubs.
- DEBUG_STUB
-};
-
-
-enum CheckType {
- RECEIVER_MAP_CHECK,
- STRING_CHECK,
- SYMBOL_CHECK,
- NUMBER_CHECK,
- BOOLEAN_CHECK
-};
-
-
-enum CallFunctionFlags {
- NO_CALL_FUNCTION_FLAGS = 0,
- // Receiver might implicitly be the global objects. If it is, the
- // hole is passed to the call function stub.
- RECEIVER_MIGHT_BE_IMPLICIT = 1 << 0,
- // The call target is cached in the instruction stream.
- RECORD_CALL_TARGET = 1 << 1
-};
-
-
-enum InlineCacheHolderFlag {
- OWN_MAP, // For fast properties objects.
- PROTOTYPE_MAP // For slow properties objects (except GlobalObjects).
-};
-
-
-// The Store Buffer (GC).
-typedef enum {
- kStoreBufferFullEvent,
- kStoreBufferStartScanningPagesEvent,
- kStoreBufferScanningPageEvent
-} StoreBufferEvent;
-
-
-typedef void (*StoreBufferCallback)(Heap* heap,
- MemoryChunk* page,
- StoreBufferEvent event);
-
-
-// Union used for fast testing of specific double values.
-union DoubleRepresentation {
- double value;
- int64_t bits;
- DoubleRepresentation(double x) { value = x; }
-};
-
-
-// Union used for customized checking of the IEEE double types
-// inlined within v8 runtime, rather than going to the underlying
-// platform headers and libraries
-union IeeeDoubleLittleEndianArchType {
- double d;
- struct {
- unsigned int man_low :32;
- unsigned int man_high :20;
- unsigned int exp :11;
- unsigned int sign :1;
- } bits;
-};
-
-
-union IeeeDoubleBigEndianArchType {
- double d;
- struct {
- unsigned int sign :1;
- unsigned int exp :11;
- unsigned int man_high :20;
- unsigned int man_low :32;
- } bits;
-};
-
-
-// AccessorCallback
-struct AccessorDescriptor {
- MaybeObject* (*getter)(Isolate* isolate, Object* object, void* data);
- MaybeObject* (*setter)(
- Isolate* isolate, JSObject* object, Object* value, void* data);
- void* data;
-};
-
-
-// Logging and profiling. A StateTag represents a possible state of
-// the VM. The logger maintains a stack of these. Creating a VMState
-// object enters a state by pushing on the stack, and destroying a
-// VMState object leaves a state by popping the current state from the
-// stack.
-
-enum StateTag {
- JS,
- GC,
- COMPILER,
- OTHER,
- EXTERNAL,
- IDLE
-};
-
-
-// -----------------------------------------------------------------------------
-// Macros
-
-// Testers for test.
-
-#define HAS_SMI_TAG(value) \
- ((reinterpret_cast<intptr_t>(value) & kSmiTagMask) == kSmiTag)
-
-#define HAS_FAILURE_TAG(value) \
- ((reinterpret_cast<intptr_t>(value) & kFailureTagMask) == kFailureTag)
-
-// OBJECT_POINTER_ALIGN returns the value aligned as a HeapObject pointer
-#define OBJECT_POINTER_ALIGN(value) \
- (((value) + kObjectAlignmentMask) & ~kObjectAlignmentMask)
-
-// POINTER_SIZE_ALIGN returns the value aligned as a pointer.
-#define POINTER_SIZE_ALIGN(value) \
- (((value) + kPointerAlignmentMask) & ~kPointerAlignmentMask)
-
-// CODE_POINTER_ALIGN returns the value aligned as a generated code segment.
-#define CODE_POINTER_ALIGN(value) \
- (((value) + kCodeAlignmentMask) & ~kCodeAlignmentMask)
-
-// Support for tracking C++ memory allocation. Insert TRACK_MEMORY("Fisk")
-// inside a C++ class and new and delete will be overloaded so logging is
-// performed.
-// This file (globals.h) is included before log.h, so we use direct calls to
-// the Logger rather than the LOG macro.
-#ifdef DEBUG
-#define TRACK_MEMORY(name) \
- void* operator new(size_t size) { \
- void* result = ::operator new(size); \
- Logger::NewEventStatic(name, result, size); \
- return result; \
- } \
- void operator delete(void* object) { \
- Logger::DeleteEventStatic(name, object); \
- ::operator delete(object); \
- }
-#else
-#define TRACK_MEMORY(name)
-#endif
-
-
-// Feature flags bit positions. They are mostly based on the CPUID spec.
-// On X86/X64, values below 32 are bits in EDX, values above 32 are bits in ECX.
-enum CpuFeature { SSE4_1 = 32 + 19, // x86
- SSE3 = 32 + 0, // x86
- SSE2 = 26, // x86
- CMOV = 15, // x86
- VFP3 = 1, // ARM
- ARMv7 = 2, // ARM
- SUDIV = 3, // ARM
- UNALIGNED_ACCESSES = 4, // ARM
- MOVW_MOVT_IMMEDIATE_LOADS = 5, // ARM
- VFP32DREGS = 6, // ARM
- NEON = 7, // ARM
- SAHF = 0, // x86
- FPU = 1}; // MIPS
-
-
-// Used to specify if a macro instruction must perform a smi check on tagged
-// values.
-enum SmiCheckType {
- DONT_DO_SMI_CHECK,
- DO_SMI_CHECK
-};
-
-
-// Used to specify whether a receiver is implicitly or explicitly
-// provided to a call.
-enum CallKind {
- CALL_AS_METHOD,
- CALL_AS_FUNCTION
-};
-
-
-enum ScopeType {
- EVAL_SCOPE, // The top-level scope for an eval source.
- FUNCTION_SCOPE, // The top-level scope for a function.
- MODULE_SCOPE, // The scope introduced by a module literal
- GLOBAL_SCOPE, // The top-level scope for a program or a top-level eval.
- CATCH_SCOPE, // The scope introduced by catch.
- BLOCK_SCOPE, // The scope introduced by a new block.
- WITH_SCOPE // The scope introduced by with.
-};
-
-
-const uint32_t kHoleNanUpper32 = 0x7FFFFFFF;
-const uint32_t kHoleNanLower32 = 0xFFFFFFFF;
-const uint32_t kNaNOrInfinityLowerBoundUpper32 = 0x7FF00000;
-
-const uint64_t kHoleNanInt64 =
- (static_cast<uint64_t>(kHoleNanUpper32) << 32) | kHoleNanLower32;
-const uint64_t kLastNonNaNInt64 =
- (static_cast<uint64_t>(kNaNOrInfinityLowerBoundUpper32) << 32);
-
-
-// The order of this enum has to be kept in sync with the predicates below.
-enum VariableMode {
- // User declared variables:
- VAR, // declared via 'var', and 'function' declarations
-
- CONST, // declared via 'const' declarations
-
- LET, // declared via 'let' declarations (first lexical)
-
- CONST_HARMONY, // declared via 'const' declarations in harmony mode
-
- MODULE, // declared via 'module' declaration (last lexical)
-
- // Variables introduced by the compiler:
- INTERNAL, // like VAR, but not user-visible (may or may not
- // be in a context)
-
- TEMPORARY, // temporary variables (not user-visible), stack-allocated
- // unless the scope as a whole has forced context allocation
-
- DYNAMIC, // always require dynamic lookup (we don't know
- // the declaration)
-
- DYNAMIC_GLOBAL, // requires dynamic lookup, but we know that the
- // variable is global unless it has been shadowed
- // by an eval-introduced variable
-
- DYNAMIC_LOCAL // requires dynamic lookup, but we know that the
- // variable is local and where it is unless it
- // has been shadowed by an eval-introduced
- // variable
-};
-
-
-inline bool IsDynamicVariableMode(VariableMode mode) {
- return mode >= DYNAMIC && mode <= DYNAMIC_LOCAL;
-}
-
-
-inline bool IsDeclaredVariableMode(VariableMode mode) {
- return mode >= VAR && mode <= MODULE;
-}
-
-
-inline bool IsLexicalVariableMode(VariableMode mode) {
- return mode >= LET && mode <= MODULE;
-}
-
-
-inline bool IsImmutableVariableMode(VariableMode mode) {
- return mode == CONST || (mode >= CONST_HARMONY && mode <= MODULE);
-}
-
-
-// ES6 Draft Rev3 10.2 specifies declarative environment records with mutable
-// and immutable bindings that can be in two states: initialized and
-// uninitialized. In ES5 only immutable bindings have these two states. When
-// accessing a binding, it needs to be checked for initialization. However in
-// the following cases the binding is initialized immediately after creation
-// so the initialization check can always be skipped:
-// 1. Var declared local variables.
-// var foo;
-// 2. A local variable introduced by a function declaration.
-// function foo() {}
-// 3. Parameters
-// function x(foo) {}
-// 4. Catch bound variables.
-// try {} catch (foo) {}
-// 6. Function variables of named function expressions.
-// var x = function foo() {}
-// 7. Implicit binding of 'this'.
-// 8. Implicit binding of 'arguments' in functions.
-//
-// ES5 specified object environment records which are introduced by ES elements
-// such as Program and WithStatement that associate identifier bindings with the
-// properties of some object. In the specification only mutable bindings exist
-// (which may be non-writable) and have no distinct initialization step. However
-// V8 allows const declarations in global code with distinct creation and
-// initialization steps which are represented by non-writable properties in the
-// global object. As a result also these bindings need to be checked for
-// initialization.
-//
-// The following enum specifies a flag that indicates if the binding needs a
-// distinct initialization step (kNeedsInitialization) or if the binding is
-// immediately initialized upon creation (kCreatedInitialized).
-enum InitializationFlag {
- kNeedsInitialization,
- kCreatedInitialized
-};
-
-
-enum ClearExceptionFlag {
- KEEP_EXCEPTION,
- CLEAR_EXCEPTION
-};
-
-
-enum MinusZeroMode {
- TREAT_MINUS_ZERO_AS_ZERO,
- FAIL_ON_MINUS_ZERO
-};
-
-} } // namespace v8::internal
-
-#endif // V8_V8GLOBALS_H_
diff --git a/chromium/v8/src/v8memory.h b/chromium/v8/src/v8memory.h
index c72ce7ab7b8..615ec4fe87f 100644
--- a/chromium/v8/src/v8memory.h
+++ b/chromium/v8/src/v8memory.h
@@ -1,29 +1,6 @@
// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
#ifndef V8_MEMORY_H_
#define V8_MEMORY_H_
diff --git a/chromium/v8/src/v8natives.js b/chromium/v8/src/v8natives.js
index 96b88c5285c..1d05338016b 100644
--- a/chromium/v8/src/v8natives.js
+++ b/chromium/v8/src/v8natives.js
@@ -1,29 +1,6 @@
// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
// This file relies on the fact that the following declarations have been made
// in runtime.js:
@@ -79,6 +56,21 @@ function InstallGetterSetter(object, name, getter, setter) {
}
+// Helper function for installing constant properties on objects.
+function InstallConstants(object, constants) {
+ if (constants.length >= 4) {
+ %OptimizeObjectForAddingMultipleProperties(object, constants.length >> 1);
+ }
+ var attributes = DONT_ENUM | DONT_DELETE | READ_ONLY;
+ for (var i = 0; i < constants.length; i += 2) {
+ var name = constants[i];
+ var k = constants[i + 1];
+ %SetProperty(object, name, k, attributes);
+ }
+ %ToFastProperties(object);
+}
+
+
// Prevents changes to the prototype of a built-in function.
// The "prototype" property of the function object is made non-configurable,
// and the prototype object is made non-extensible. The latter prevents
@@ -232,10 +224,7 @@ function ObjectToString() {
// ECMA-262 - 15.2.4.3
function ObjectToLocaleString() {
- if (IS_NULL_OR_UNDEFINED(this) && !IS_UNDETECTABLE(this)) {
- throw MakeTypeError("called_on_null_or_undefined",
- ["Object.prototype.toLocaleString"]);
- }
+ CHECK_OBJECT_COERCIBLE(this, "Object.prototype.toLocaleString");
return this.toString();
}
@@ -255,16 +244,13 @@ function ObjectHasOwnProperty(V) {
var handler = %GetHandler(this);
return CallTrap1(handler, "hasOwn", DerivedHasOwnTrap, ToName(V));
}
- return %HasLocalProperty(TO_OBJECT_INLINE(this), ToName(V));
+ return %HasOwnProperty(TO_OBJECT_INLINE(this), ToName(V));
}
// ECMA-262 - 15.2.4.6
function ObjectIsPrototypeOf(V) {
- if (IS_NULL_OR_UNDEFINED(this) && !IS_UNDETECTABLE(this)) {
- throw MakeTypeError("called_on_null_or_undefined",
- ["Object.prototype.isPrototypeOf"]);
- }
+ CHECK_OBJECT_COERCIBLE(this, "Object.prototype.isPrototypeOf");
if (!IS_SPEC_OBJECT(V)) return false;
return %IsInPrototypeChain(this, V);
}
@@ -277,7 +263,7 @@ function ObjectPropertyIsEnumerable(V) {
// TODO(rossberg): adjust once there is a story for symbols vs proxies.
if (IS_SYMBOL(V)) return false;
- var desc = GetOwnProperty(this, P);
+ var desc = GetOwnPropertyJS(this, P);
return IS_UNDEFINED(desc) ? false : desc.isEnumerable();
}
return %IsPropertyEnumerable(ToObject(this), P);
@@ -346,7 +332,7 @@ function ObjectKeys(obj) {
var names = CallTrap0(handler, "keys", DerivedKeysTrap);
return ToNameArray(names, "keys", false);
}
- return %LocalKeys(obj);
+ return %OwnKeys(obj);
}
@@ -388,8 +374,7 @@ function FromPropertyDescriptor(desc) {
}
// Must be an AccessorDescriptor then. We never return a generic descriptor.
return { get: desc.getGet(),
- set: desc.getSet() === ObjectSetProto ? ObjectPoisonProto
- : desc.getSet(),
+ set: desc.getSet(),
enumerable: desc.isEnumerable(),
configurable: desc.isConfigurable() };
}
@@ -642,7 +627,7 @@ function CallTrap2(handler, name, defaultTrap, x, y) {
// ES5 section 8.12.1.
-function GetOwnProperty(obj, v) {
+function GetOwnPropertyJS(obj, v) {
var p = ToName(v);
if (%IsJSProxy(obj)) {
// TODO(rossberg): adjust once there is a story for symbols vs proxies.
@@ -674,7 +659,7 @@ function GetOwnProperty(obj, v) {
// ES5 section 8.12.7.
function Delete(obj, p, should_throw) {
- var desc = GetOwnProperty(obj, p);
+ var desc = GetOwnPropertyJS(obj, p);
if (IS_UNDEFINED(desc)) return true;
if (desc.isConfigurable()) {
%DeleteProperty(obj, p, 0);
@@ -881,7 +866,7 @@ function DefineArrayProperty(obj, p, desc, should_throw) {
if (new_length != ToNumber(desc.getValue())) {
throw new $RangeError('defineProperty() array length out of range');
}
- var length_desc = GetOwnProperty(obj, "length");
+ var length_desc = GetOwnPropertyJS(obj, "length");
if (new_length != length && !length_desc.isWritable()) {
if (should_throw) {
throw MakeTypeError("redefine_disallowed", [p]);
@@ -903,7 +888,7 @@ function DefineArrayProperty(obj, p, desc, should_throw) {
while (new_length < length--) {
var index = ToString(length);
if (emit_splice) {
- var deletedDesc = GetOwnProperty(obj, index);
+ var deletedDesc = GetOwnPropertyJS(obj, index);
if (deletedDesc && deletedDesc.hasValue())
removed[length - new_length] = deletedDesc.getValue();
}
@@ -950,7 +935,7 @@ function DefineArrayProperty(obj, p, desc, should_throw) {
BeginPerformSplice(obj);
}
- var length_desc = GetOwnProperty(obj, "length");
+ var length_desc = GetOwnPropertyJS(obj, "length");
if ((index >= length && !length_desc.isWritable()) ||
!DefineObjectProperty(obj, p, desc, true)) {
if (emit_splice)
@@ -1000,6 +985,21 @@ function ObjectGetPrototypeOf(obj) {
return %GetPrototype(obj);
}
+// ES6 section 19.1.2.19.
+function ObjectSetPrototypeOf(obj, proto) {
+ CHECK_OBJECT_COERCIBLE(obj, "Object.setPrototypeOf");
+
+ if (proto !== null && !IS_SPEC_OBJECT(proto)) {
+ throw MakeTypeError("proto_object_or_null", [proto]);
+ }
+
+ if (IS_SPEC_OBJECT(obj)) {
+ %SetPrototype(obj, proto);
+ }
+
+ return obj;
+}
+
// ES5 section 15.2.3.3
function ObjectGetOwnPropertyDescriptor(obj, p) {
@@ -1007,7 +1007,7 @@ function ObjectGetOwnPropertyDescriptor(obj, p) {
throw MakeTypeError("called_on_non_object",
["Object.getOwnPropertyDescriptor"]);
}
- var desc = GetOwnProperty(obj, p);
+ var desc = GetOwnPropertyJS(obj, p);
return FromPropertyDescriptor(desc);
}
@@ -1025,7 +1025,7 @@ function ToNameArray(obj, trap, includeSymbols) {
var s = ToName(obj[index]);
// TODO(rossberg): adjust once there is a story for symbols vs proxies.
if (IS_SYMBOL(s) && !includeSymbols) continue;
- if (%HasLocalProperty(names, s)) {
+ if (%HasOwnProperty(names, s)) {
throw MakeTypeError("proxy_repeated_prop_name", [obj, trap, s]);
}
array[index] = s;
@@ -1037,46 +1037,41 @@ function ToNameArray(obj, trap, includeSymbols) {
}
-// ES5 section 15.2.3.4.
-function ObjectGetOwnPropertyNames(obj) {
- if (!IS_SPEC_OBJECT(obj)) {
- throw MakeTypeError("called_on_non_object", ["Object.getOwnPropertyNames"]);
- }
- // Special handling for proxies.
- if (%IsJSProxy(obj)) {
- var handler = %GetHandler(obj);
- var names = CallTrap0(handler, "getOwnPropertyNames", UNDEFINED);
- return ToNameArray(names, "getOwnPropertyNames", false);
- }
-
+function ObjectGetOwnPropertyKeys(obj, symbolsOnly) {
var nameArrays = new InternalArray();
+ var filter = symbolsOnly ?
+ PROPERTY_ATTRIBUTES_STRING | PROPERTY_ATTRIBUTES_PRIVATE_SYMBOL :
+ PROPERTY_ATTRIBUTES_SYMBOLIC;
// Find all the indexed properties.
- // Get the local element names.
- var localElementNames = %GetLocalElementNames(obj);
- for (var i = 0; i < localElementNames.length; ++i) {
- localElementNames[i] = %_NumberToString(localElementNames[i]);
- }
- nameArrays.push(localElementNames);
-
- // Get names for indexed interceptor properties.
- var interceptorInfo = %GetInterceptorInfo(obj);
- if ((interceptorInfo & 1) != 0) {
- var indexedInterceptorNames = %GetIndexedInterceptorElementNames(obj);
- if (!IS_UNDEFINED(indexedInterceptorNames)) {
- nameArrays.push(indexedInterceptorNames);
+ // Only get own element names if we want to include string keys.
+ if (!symbolsOnly) {
+ var ownElementNames = %GetOwnElementNames(obj);
+ for (var i = 0; i < ownElementNames.length; ++i) {
+ ownElementNames[i] = %_NumberToString(ownElementNames[i]);
+ }
+ nameArrays.push(ownElementNames);
+
+ // Get names for indexed interceptor properties.
+ var interceptorInfo = %GetInterceptorInfo(obj);
+ if ((interceptorInfo & 1) != 0) {
+ var indexedInterceptorNames = %GetIndexedInterceptorElementNames(obj);
+ if (!IS_UNDEFINED(indexedInterceptorNames)) {
+ nameArrays.push(indexedInterceptorNames);
+ }
}
}
// Find all the named properties.
- // Get the local property names.
- nameArrays.push(%GetLocalPropertyNames(obj, false));
+ // Get own property names.
+ nameArrays.push(%GetOwnPropertyNames(obj, filter));
// Get names for named interceptor properties if any.
if ((interceptorInfo & 2) != 0) {
- var namedInterceptorNames = %GetNamedInterceptorPropertyNames(obj);
+ var namedInterceptorNames =
+ %GetNamedInterceptorPropertyNames(obj);
if (!IS_UNDEFINED(namedInterceptorNames)) {
nameArrays.push(namedInterceptorNames);
}
@@ -1089,18 +1084,18 @@ function ObjectGetOwnPropertyNames(obj) {
// Property names are expected to be unique strings,
// but interceptors can interfere with that assumption.
if (interceptorInfo != 0) {
- var propertySet = { __proto__: null };
+ var seenKeys = { __proto__: null };
var j = 0;
for (var i = 0; i < propertyNames.length; ++i) {
- if (IS_SYMBOL(propertyNames[i])) continue;
- var name = ToString(propertyNames[i]);
- // We need to check for the exact property value since for intrinsic
- // properties like toString if(propertySet["toString"]) will always
- // succeed.
- if (propertySet[name] === true) {
- continue;
+ var name = propertyNames[i];
+ if (symbolsOnly) {
+ if (!IS_SYMBOL(name) || IS_PRIVATE(name)) continue;
+ } else {
+ if (IS_SYMBOL(name)) continue;
+ name = ToString(name);
}
- propertySet[name] = true;
+ if (seenKeys[name]) continue;
+ seenKeys[name] = true;
propertyNames[j++] = name;
}
propertyNames.length = j;
@@ -1110,6 +1105,22 @@ function ObjectGetOwnPropertyNames(obj) {
}
+// ES5 section 15.2.3.4.
+function ObjectGetOwnPropertyNames(obj) {
+ if (!IS_SPEC_OBJECT(obj)) {
+ throw MakeTypeError("called_on_non_object", ["Object.getOwnPropertyNames"]);
+ }
+ // Special handling for proxies.
+ if (%IsJSProxy(obj)) {
+ var handler = %GetHandler(obj);
+ var names = CallTrap0(handler, "getOwnPropertyNames", UNDEFINED);
+ return ToNameArray(names, "getOwnPropertyNames", false);
+ }
+
+ return ObjectGetOwnPropertyKeys(obj, false);
+}
+
+
// ES5 section 15.2.3.5.
function ObjectCreate(proto, properties) {
if (!IS_SPEC_OBJECT(proto) && proto !== null) {
@@ -1145,8 +1156,8 @@ function ObjectDefineProperty(obj, p, attributes) {
{value: 0, writable: 0, get: 0, set: 0, enumerable: 0, configurable: 0};
for (var i = 0; i < names.length; i++) {
var N = names[i];
- if (!(%HasLocalProperty(standardNames, N))) {
- var attr = GetOwnProperty(attributes, N);
+ if (!(%HasOwnProperty(standardNames, N))) {
+ var attr = GetOwnPropertyJS(attributes, N);
DefineOwnProperty(descObj, N, attr, true);
}
}
@@ -1165,7 +1176,7 @@ function ObjectDefineProperty(obj, p, attributes) {
function GetOwnEnumerablePropertyNames(properties) {
var names = new InternalArray();
for (var key in properties) {
- if (%HasLocalProperty(properties, key)) {
+ if (%HasOwnProperty(properties, key)) {
names.push(key);
}
}
@@ -1231,7 +1242,7 @@ function ObjectSeal(obj) {
var names = ObjectGetOwnPropertyNames(obj);
for (var i = 0; i < names.length; i++) {
var name = names[i];
- var desc = GetOwnProperty(obj, name);
+ var desc = GetOwnPropertyJS(obj, name);
if (desc.isConfigurable()) {
desc.setConfigurable(false);
DefineOwnProperty(obj, name, desc, true);
@@ -1243,19 +1254,19 @@ function ObjectSeal(obj) {
// ES5 section 15.2.3.9.
-function ObjectFreeze(obj) {
+function ObjectFreezeJS(obj) {
if (!IS_SPEC_OBJECT(obj)) {
throw MakeTypeError("called_on_non_object", ["Object.freeze"]);
}
var isProxy = %IsJSProxy(obj);
- if (isProxy || %HasNonStrictArgumentsElements(obj) || %IsObserved(obj)) {
+ if (isProxy || %HasSloppyArgumentsElements(obj) || %IsObserved(obj)) {
if (isProxy) {
ProxyFix(obj);
}
var names = ObjectGetOwnPropertyNames(obj);
for (var i = 0; i < names.length; i++) {
var name = names[i];
- var desc = GetOwnProperty(obj, name);
+ var desc = GetOwnPropertyJS(obj, name);
if (desc.isWritable() || desc.isConfigurable()) {
if (IsDataDescriptor(desc)) desc.setWritable(false);
desc.setConfigurable(false);
@@ -1299,7 +1310,7 @@ function ObjectIsSealed(obj) {
var names = ObjectGetOwnPropertyNames(obj);
for (var i = 0; i < names.length; i++) {
var name = names[i];
- var desc = GetOwnProperty(obj, name);
+ var desc = GetOwnPropertyJS(obj, name);
if (desc.isConfigurable()) return false;
}
return true;
@@ -1320,7 +1331,7 @@ function ObjectIsFrozen(obj) {
var names = ObjectGetOwnPropertyNames(obj);
for (var i = 0; i < names.length; i++) {
var name = names[i];
- var desc = GetOwnProperty(obj, name);
+ var desc = GetOwnPropertyJS(obj, name);
if (IsDataDescriptor(desc) && desc.isWritable()) return false;
if (desc.isConfigurable()) return false;
}
@@ -1350,21 +1361,19 @@ function ObjectIs(obj1, obj2) {
}
-// Harmony __proto__ getter.
+// ECMA-262, Edition 6, section B.2.2.1.1
function ObjectGetProto() {
- return %GetPrototype(this);
+ return %GetPrototype(ToObject(this));
}
-// Harmony __proto__ setter.
-function ObjectSetProto(obj) {
- return %SetPrototype(this, obj);
-}
+// ECMA-262, Edition 6, section B.2.2.1.2
+function ObjectSetProto(proto) {
+ CHECK_OBJECT_COERCIBLE(this, "Object.prototype.__proto__");
-
-// Harmony __proto__ poison pill.
-function ObjectPoisonProto(obj) {
- throw MakeTypeError("proto_poison_pill", []);
+ if ((IS_SPEC_OBJECT(proto) || IS_NULL(proto)) && IS_SPEC_OBJECT(this)) {
+ %SetPrototype(this, proto);
+ }
}
@@ -1387,9 +1396,6 @@ function SetUpObject() {
%SetNativeFlag($Object);
%SetCode($Object, ObjectConstructor);
- %FunctionSetName(ObjectPoisonProto, "__proto__");
- %FunctionRemovePrototype(ObjectPoisonProto);
- %SetExpectedNumberOfProperties($Object, 4);
%SetProperty($Object.prototype, "constructor", $Object, DONT_ENUM);
@@ -1415,16 +1421,20 @@ function SetUpObject() {
"create", ObjectCreate,
"defineProperty", ObjectDefineProperty,
"defineProperties", ObjectDefineProperties,
- "freeze", ObjectFreeze,
+ "freeze", ObjectFreezeJS,
"getPrototypeOf", ObjectGetPrototypeOf,
+ "setPrototypeOf", ObjectSetPrototypeOf,
"getOwnPropertyDescriptor", ObjectGetOwnPropertyDescriptor,
"getOwnPropertyNames", ObjectGetOwnPropertyNames,
+ // getOwnPropertySymbols is added in symbol.js.
"is", ObjectIs,
"isExtensible", ObjectIsExtensible,
"isFrozen", ObjectIsFrozen,
"isSealed", ObjectIsSealed,
"preventExtensions", ObjectPreventExtension,
"seal", ObjectSeal
+ // deliverChangeRecords, getNotifier, observe and unobserve are added
+ // in object-observe.js.
));
}
@@ -1543,7 +1553,7 @@ function NumberValueOf() {
// ECMA-262 section 15.7.4.5
-function NumberToFixed(fractionDigits) {
+function NumberToFixedJS(fractionDigits) {
var x = this;
if (!IS_NUMBER(this)) {
if (!IS_NUMBER_WRAPPER(this)) {
@@ -1568,7 +1578,7 @@ function NumberToFixed(fractionDigits) {
// ECMA-262 section 15.7.4.6
-function NumberToExponential(fractionDigits) {
+function NumberToExponentialJS(fractionDigits) {
var x = this;
if (!IS_NUMBER(this)) {
if (!IS_NUMBER_WRAPPER(this)) {
@@ -1594,7 +1604,7 @@ function NumberToExponential(fractionDigits) {
// ECMA-262 section 15.7.4.7
-function NumberToPrecision(precision) {
+function NumberToPrecisionJS(precision) {
var x = this;
if (!IS_NUMBER(this)) {
if (!IS_NUMBER_WRAPPER(this)) {
@@ -1624,12 +1634,29 @@ function NumberIsFinite(number) {
}
+// Harmony isInteger
+function NumberIsInteger(number) {
+ return NumberIsFinite(number) && TO_INTEGER(number) == number;
+}
+
+
// Harmony isNaN.
function NumberIsNaN(number) {
return IS_NUMBER(number) && NUMBER_IS_NAN(number);
}
+// Harmony isSafeInteger
+function NumberIsSafeInteger(number) {
+ if (NumberIsFinite(number)) {
+ var integral = TO_INTEGER(number);
+ if (integral == number)
+ return MathAbs(integral) <= $Number.MAX_SAFE_INTEGER;
+ }
+ return false;
+}
+
+
// ----------------------------------------------------------------------------
function SetUpNumber() {
@@ -1642,45 +1669,43 @@ function SetUpNumber() {
// Set up the constructor property on the Number prototype object.
%SetProperty($Number.prototype, "constructor", $Number, DONT_ENUM);
- %OptimizeObjectForAddingMultipleProperties($Number, 5);
- // ECMA-262 section 15.7.3.1.
- %SetProperty($Number,
- "MAX_VALUE",
- 1.7976931348623157e+308,
- DONT_ENUM | DONT_DELETE | READ_ONLY);
-
- // ECMA-262 section 15.7.3.2.
- %SetProperty($Number, "MIN_VALUE", 5e-324,
- DONT_ENUM | DONT_DELETE | READ_ONLY);
-
- // ECMA-262 section 15.7.3.3.
- %SetProperty($Number, "NaN", NAN, DONT_ENUM | DONT_DELETE | READ_ONLY);
-
- // ECMA-262 section 15.7.3.4.
- %SetProperty($Number,
- "NEGATIVE_INFINITY",
- -INFINITY,
- DONT_ENUM | DONT_DELETE | READ_ONLY);
-
- // ECMA-262 section 15.7.3.5.
- %SetProperty($Number,
- "POSITIVE_INFINITY",
- INFINITY,
- DONT_ENUM | DONT_DELETE | READ_ONLY);
- %ToFastProperties($Number);
+ InstallConstants($Number, $Array(
+ // ECMA-262 section 15.7.3.1.
+ "MAX_VALUE", 1.7976931348623157e+308,
+ // ECMA-262 section 15.7.3.2.
+ "MIN_VALUE", 5e-324,
+ // ECMA-262 section 15.7.3.3.
+ "NaN", NAN,
+ // ECMA-262 section 15.7.3.4.
+ "NEGATIVE_INFINITY", -INFINITY,
+ // ECMA-262 section 15.7.3.5.
+ "POSITIVE_INFINITY", INFINITY,
+
+ // --- Harmony constants (no spec refs until settled.)
+
+ "MAX_SAFE_INTEGER", %_MathPow(2, 53) - 1,
+ "MIN_SAFE_INTEGER", -%_MathPow(2, 53) + 1,
+ "EPSILON", %_MathPow(2, -52)
+ ));
// Set up non-enumerable functions on the Number prototype object.
InstallFunctions($Number.prototype, DONT_ENUM, $Array(
"toString", NumberToString,
"toLocaleString", NumberToLocaleString,
"valueOf", NumberValueOf,
- "toFixed", NumberToFixed,
- "toExponential", NumberToExponential,
- "toPrecision", NumberToPrecision
+ "toFixed", NumberToFixedJS,
+ "toExponential", NumberToExponentialJS,
+ "toPrecision", NumberToPrecisionJS
));
+
+ // Harmony Number constructor additions
InstallFunctions($Number, DONT_ENUM, $Array(
"isFinite", NumberIsFinite,
- "isNaN", NumberIsNaN
+ "isInteger", NumberIsInteger,
+ "isNaN", NumberIsNaN,
+ "isSafeInteger", NumberIsSafeInteger,
+ "parseInt", GlobalParseInt,
+ "parseFloat", GlobalParseFloat
));
}
@@ -1756,19 +1781,15 @@ function FunctionBind(this_arg) { // Length is 1.
return %Apply(bindings[0], bindings[1], argv, 0, bound_argc + argc);
};
- %FunctionRemovePrototype(boundFunction);
var new_length = 0;
- if (%_ClassOf(this) == "Function") {
- // Function or FunctionProxy.
- var old_length = this.length;
- // FunctionProxies might provide a non-UInt32 value. If so, ignore it.
- if ((typeof old_length === "number") &&
- ((old_length >>> 0) === old_length)) {
- var argc = %_ArgumentsLength();
- if (argc > 0) argc--; // Don't count the thisArg as parameter.
- new_length = old_length - argc;
- if (new_length < 0) new_length = 0;
- }
+ var old_length = this.length;
+ // FunctionProxies might provide a non-UInt32 value. If so, ignore it.
+ if ((typeof old_length === "number") &&
+ ((old_length >>> 0) === old_length)) {
+ var argc = %_ArgumentsLength();
+ if (argc > 0) argc--; // Don't count the thisArg as parameter.
+ new_length = old_length - argc;
+ if (new_length < 0) new_length = 0;
}
// This runtime function finds any remaining arguments on the stack,
// so we don't pass the arguments object.
@@ -1797,7 +1818,7 @@ function NewFunctionString(arguments, function_token) {
// If the formal parameters string include ) - an illegal
// character - it may make the combined function expression
// compile. We avoid this problem by checking for this early on.
- if (%_CallFunction(p, ')', StringIndexOf) != -1) {
+ if (%_CallFunction(p, ')', StringIndexOfJS) != -1) {
throw MakeSyntaxError('paren_in_arg_string', []);
}
// If the formal parameters include an unbalanced block comment, the
@@ -1815,7 +1836,9 @@ function FunctionConstructor(arg1) { // length == 1
var global_receiver = %GlobalReceiver(global);
// Compile the string in the constructor and not a helper so that errors
// appear to come from here.
- var f = %_CallFunction(global_receiver, %CompileString(source, true));
+ var f = %CompileString(source, true);
+ if (!IS_FUNCTION(f)) return f;
+ f = %_CallFunction(global_receiver, f);
%FunctionMarkNameShouldPrintAsAnonymous(f);
return f;
}
@@ -1836,18 +1859,3 @@ function SetUpFunction() {
}
SetUpFunction();
-
-
-//----------------------------------------------------------------------------
-
-// TODO(rossberg): very simple abstraction for generic microtask queue.
-// Eventually, we should move to a real event queue that allows to maintain
-// relative ordering of different kinds of tasks.
-
-RunMicrotasks.runners = new InternalArray;
-
-function RunMicrotasks() {
- while (%SetMicrotaskPending(false)) {
- for (var i in RunMicrotasks.runners) RunMicrotasks.runners[i]();
- }
-}
diff --git a/chromium/v8/src/v8threads.cc b/chromium/v8/src/v8threads.cc
index 1de9d4fd761..12384454e59 100644
--- a/chromium/v8/src/v8threads.cc
+++ b/chromium/v8/src/v8threads.cc
@@ -1,38 +1,15 @@
// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#include "api.h"
-#include "bootstrapper.h"
-#include "debug.h"
-#include "execution.h"
-#include "v8threads.h"
-#include "regexp-stack.h"
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#include "src/api.h"
+#include "src/bootstrapper.h"
+#include "src/debug.h"
+#include "src/execution.h"
+#include "src/v8threads.h"
+#include "src/regexp-stack.h"
namespace v8 {
@@ -74,10 +51,6 @@ void Locker::Initialize(v8::Isolate* isolate) {
isolate_->stack_guard()->ClearThread(access);
isolate_->stack_guard()->InitThread(access);
}
- if (isolate_->IsDefaultIsolate()) {
- // This only enters if not yet entered.
- internal::Isolate::EnterDefaultIsolate();
- }
}
ASSERT(isolate_->thread_manager()->IsLockedByCurrentThread());
}
@@ -98,9 +71,6 @@ bool Locker::IsActive() {
Locker::~Locker() {
ASSERT(isolate_->thread_manager()->IsLockedByCurrentThread());
if (has_lock_) {
- if (isolate_->IsDefaultIsolate()) {
- isolate_->Exit();
- }
if (top_level_) {
isolate_->thread_manager()->FreeThreadResources();
} else {
@@ -115,9 +85,6 @@ void Unlocker::Initialize(v8::Isolate* isolate) {
ASSERT(isolate != NULL);
isolate_ = reinterpret_cast<i::Isolate*>(isolate);
ASSERT(isolate_->thread_manager()->IsLockedByCurrentThread());
- if (isolate_->IsDefaultIsolate()) {
- isolate_->Exit();
- }
isolate_->thread_manager()->ArchiveThread();
isolate_->thread_manager()->Unlock();
}
@@ -127,9 +94,6 @@ Unlocker::~Unlocker() {
ASSERT(!isolate_->thread_manager()->IsLockedByCurrentThread());
isolate_->thread_manager()->Lock();
isolate_->thread_manager()->RestoreThread();
- if (isolate_->IsDefaultIsolate()) {
- isolate_->Enter();
- }
}
@@ -175,15 +139,13 @@ bool ThreadManager::RestoreThread() {
from = isolate_->handle_scope_implementer()->RestoreThread(from);
from = isolate_->RestoreThread(from);
from = Relocatable::RestoreState(isolate_, from);
-#ifdef ENABLE_DEBUGGER_SUPPORT
from = isolate_->debug()->RestoreDebug(from);
-#endif
from = isolate_->stack_guard()->RestoreStackGuard(from);
from = isolate_->regexp_stack()->RestoreStack(from);
from = isolate_->bootstrapper()->RestoreState(from);
per_thread->set_thread_state(NULL);
if (state->terminate_on_restore()) {
- isolate_->stack_guard()->TerminateExecution();
+ isolate_->stack_guard()->RequestTerminateExecution();
state->set_terminate_on_restore(false);
}
state->set_id(ThreadId::Invalid());
@@ -209,9 +171,7 @@ void ThreadManager::Unlock() {
static int ArchiveSpacePerThread() {
return HandleScopeImplementer::ArchiveSpacePerThread() +
Isolate::ArchiveSpacePerThread() +
-#ifdef ENABLE_DEBUGGER_SUPPORT
Debug::ArchiveSpacePerThread() +
-#endif
StackGuard::ArchiveSpacePerThread() +
RegExpStack::ArchiveSpacePerThread() +
Bootstrapper::ArchiveSpacePerThread() +
@@ -337,9 +297,7 @@ void ThreadManager::EagerlyArchiveThread() {
to = isolate_->handle_scope_implementer()->ArchiveThread(to);
to = isolate_->ArchiveThread(to);
to = Relocatable::ArchiveState(isolate_, to);
-#ifdef ENABLE_DEBUGGER_SUPPORT
to = isolate_->debug()->ArchiveDebug(to);
-#endif
to = isolate_->stack_guard()->ArchiveStackGuard(to);
to = isolate_->regexp_stack()->ArchiveStack(to);
to = isolate_->bootstrapper()->ArchiveState(to);
@@ -351,9 +309,7 @@ void ThreadManager::EagerlyArchiveThread() {
void ThreadManager::FreeThreadResources() {
isolate_->handle_scope_implementer()->FreeThreadResources();
isolate_->FreeThreadResources();
-#ifdef ENABLE_DEBUGGER_SUPPORT
isolate_->debug()->FreeThreadResources();
-#endif
isolate_->stack_guard()->FreeThreadResources();
isolate_->regexp_stack()->FreeThreadResources();
isolate_->bootstrapper()->FreeThreadResources();
diff --git a/chromium/v8/src/v8threads.h b/chromium/v8/src/v8threads.h
index a20700a5c9e..ca722adc66f 100644
--- a/chromium/v8/src/v8threads.h
+++ b/chromium/v8/src/v8threads.h
@@ -1,29 +1,6 @@
// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
#ifndef V8_V8THREADS_H_
#define V8_V8THREADS_H_
diff --git a/chromium/v8/src/v8utils.cc b/chromium/v8/src/v8utils.cc
deleted file mode 100644
index 7390d854e63..00000000000
--- a/chromium/v8/src/v8utils.cc
+++ /dev/null
@@ -1,276 +0,0 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include <stdarg.h>
-
-#include "v8.h"
-
-#include "platform.h"
-
-#include "sys/stat.h"
-
-namespace v8 {
-namespace internal {
-
-
-void PrintF(const char* format, ...) {
- va_list arguments;
- va_start(arguments, format);
- OS::VPrint(format, arguments);
- va_end(arguments);
-}
-
-
-void PrintF(FILE* out, const char* format, ...) {
- va_list arguments;
- va_start(arguments, format);
- OS::VFPrint(out, format, arguments);
- va_end(arguments);
-}
-
-
-void PrintPID(const char* format, ...) {
- OS::Print("[%d] ", OS::GetCurrentProcessId());
- va_list arguments;
- va_start(arguments, format);
- OS::VPrint(format, arguments);
- va_end(arguments);
-}
-
-
-void Flush(FILE* out) {
- fflush(out);
-}
-
-
-char* ReadLine(const char* prompt) {
- char* result = NULL;
- char line_buf[256];
- int offset = 0;
- bool keep_going = true;
- fprintf(stdout, "%s", prompt);
- fflush(stdout);
- while (keep_going) {
- if (fgets(line_buf, sizeof(line_buf), stdin) == NULL) {
- // fgets got an error. Just give up.
- if (result != NULL) {
- DeleteArray(result);
- }
- return NULL;
- }
- int len = StrLength(line_buf);
- if (len > 1 &&
- line_buf[len - 2] == '\\' &&
- line_buf[len - 1] == '\n') {
- // When we read a line that ends with a "\" we remove the escape and
- // append the remainder.
- line_buf[len - 2] = '\n';
- line_buf[len - 1] = 0;
- len -= 1;
- } else if ((len > 0) && (line_buf[len - 1] == '\n')) {
- // Since we read a new line we are done reading the line. This
- // will exit the loop after copying this buffer into the result.
- keep_going = false;
- }
- if (result == NULL) {
- // Allocate the initial result and make room for the terminating '\0'
- result = NewArray<char>(len + 1);
- } else {
- // Allocate a new result with enough room for the new addition.
- int new_len = offset + len + 1;
- char* new_result = NewArray<char>(new_len);
- // Copy the existing input into the new array and set the new
- // array as the result.
- OS::MemCopy(new_result, result, offset * kCharSize);
- DeleteArray(result);
- result = new_result;
- }
- // Copy the newly read line into the result.
- OS::MemCopy(result + offset, line_buf, len * kCharSize);
- offset += len;
- }
- ASSERT(result != NULL);
- result[offset] = '\0';
- return result;
-}
-
-
-char* ReadCharsFromFile(FILE* file,
- int* size,
- int extra_space,
- bool verbose,
- const char* filename) {
- if (file == NULL || fseek(file, 0, SEEK_END) != 0) {
- if (verbose) {
- OS::PrintError("Cannot read from file %s.\n", filename);
- }
- return NULL;
- }
-
- // Get the size of the file and rewind it.
- *size = ftell(file);
- rewind(file);
-
- char* result = NewArray<char>(*size + extra_space);
- for (int i = 0; i < *size && feof(file) == 0;) {
- int read = static_cast<int>(fread(&result[i], 1, *size - i, file));
- if (read != (*size - i) && ferror(file) != 0) {
- fclose(file);
- DeleteArray(result);
- return NULL;
- }
- i += read;
- }
- return result;
-}
-
-
-char* ReadCharsFromFile(const char* filename,
- int* size,
- int extra_space,
- bool verbose) {
- FILE* file = OS::FOpen(filename, "rb");
- char* result = ReadCharsFromFile(file, size, extra_space, verbose, filename);
- if (file != NULL) fclose(file);
- return result;
-}
-
-
-byte* ReadBytes(const char* filename, int* size, bool verbose) {
- char* chars = ReadCharsFromFile(filename, size, 0, verbose);
- return reinterpret_cast<byte*>(chars);
-}
-
-
-static Vector<const char> SetVectorContents(char* chars,
- int size,
- bool* exists) {
- if (!chars) {
- *exists = false;
- return Vector<const char>::empty();
- }
- chars[size] = '\0';
- *exists = true;
- return Vector<const char>(chars, size);
-}
-
-
-Vector<const char> ReadFile(const char* filename,
- bool* exists,
- bool verbose) {
- int size;
- char* result = ReadCharsFromFile(filename, &size, 1, verbose);
- return SetVectorContents(result, size, exists);
-}
-
-
-Vector<const char> ReadFile(FILE* file,
- bool* exists,
- bool verbose) {
- int size;
- char* result = ReadCharsFromFile(file, &size, 1, verbose, "");
- return SetVectorContents(result, size, exists);
-}
-
-
-int WriteCharsToFile(const char* str, int size, FILE* f) {
- int total = 0;
- while (total < size) {
- int write = static_cast<int>(fwrite(str, 1, size - total, f));
- if (write == 0) {
- return total;
- }
- total += write;
- str += write;
- }
- return total;
-}
-
-
-int AppendChars(const char* filename,
- const char* str,
- int size,
- bool verbose) {
- FILE* f = OS::FOpen(filename, "ab");
- if (f == NULL) {
- if (verbose) {
- OS::PrintError("Cannot open file %s for writing.\n", filename);
- }
- return 0;
- }
- int written = WriteCharsToFile(str, size, f);
- fclose(f);
- return written;
-}
-
-
-int WriteChars(const char* filename,
- const char* str,
- int size,
- bool verbose) {
- FILE* f = OS::FOpen(filename, "wb");
- if (f == NULL) {
- if (verbose) {
- OS::PrintError("Cannot open file %s for writing.\n", filename);
- }
- return 0;
- }
- int written = WriteCharsToFile(str, size, f);
- fclose(f);
- return written;
-}
-
-
-int WriteBytes(const char* filename,
- const byte* bytes,
- int size,
- bool verbose) {
- const char* str = reinterpret_cast<const char*>(bytes);
- return WriteChars(filename, str, size, verbose);
-}
-
-
-
-void StringBuilder::AddFormatted(const char* format, ...) {
- va_list arguments;
- va_start(arguments, format);
- AddFormattedList(format, arguments);
- va_end(arguments);
-}
-
-
-void StringBuilder::AddFormattedList(const char* format, va_list list) {
- ASSERT(!is_finalized() && position_ <= buffer_.length());
- int n = OS::VSNPrintF(buffer_ + position_, format, list);
- if (n < 0 || n >= (buffer_.length() - position_)) {
- position_ = buffer_.length();
- } else {
- position_ += n;
- }
-}
-
-} } // namespace v8::internal
diff --git a/chromium/v8/src/v8utils.h b/chromium/v8/src/v8utils.h
deleted file mode 100644
index 02e57ebe727..00000000000
--- a/chromium/v8/src/v8utils.h
+++ /dev/null
@@ -1,443 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_V8UTILS_H_
-#define V8_V8UTILS_H_
-
-#include "utils.h"
-#include "platform.h" // For va_list on Solaris.
-
-namespace v8 {
-namespace internal {
-
-// ----------------------------------------------------------------------------
-// I/O support.
-
-#if __GNUC__ >= 4
-// On gcc we can ask the compiler to check the types of %d-style format
-// specifiers and their associated arguments. TODO(erikcorry) fix this
-// so it works on MacOSX.
-#if defined(__MACH__) && defined(__APPLE__)
-#define PRINTF_CHECKING
-#define FPRINTF_CHECKING
-#else // MacOsX.
-#define PRINTF_CHECKING __attribute__ ((format (printf, 1, 2)))
-#define FPRINTF_CHECKING __attribute__ ((format (printf, 2, 3)))
-#endif
-#else
-#define PRINTF_CHECKING
-#define FPRINTF_CHECKING
-#endif
-
-// Our version of printf().
-void PRINTF_CHECKING PrintF(const char* format, ...);
-void FPRINTF_CHECKING PrintF(FILE* out, const char* format, ...);
-
-// Prepends the current process ID to the output.
-void PRINTF_CHECKING PrintPID(const char* format, ...);
-
-// Our version of fflush.
-void Flush(FILE* out);
-
-inline void Flush() {
- Flush(stdout);
-}
-
-
-// Read a line of characters after printing the prompt to stdout. The resulting
-// char* needs to be disposed off with DeleteArray by the caller.
-char* ReadLine(const char* prompt);
-
-
-// Read and return the raw bytes in a file. the size of the buffer is returned
-// in size.
-// The returned buffer must be freed by the caller.
-byte* ReadBytes(const char* filename, int* size, bool verbose = true);
-
-
-// Append size chars from str to the file given by filename.
-// The file is overwritten. Returns the number of chars written.
-int AppendChars(const char* filename,
- const char* str,
- int size,
- bool verbose = true);
-
-
-// Write size chars from str to the file given by filename.
-// The file is overwritten. Returns the number of chars written.
-int WriteChars(const char* filename,
- const char* str,
- int size,
- bool verbose = true);
-
-
-// Write size bytes to the file given by filename.
-// The file is overwritten. Returns the number of bytes written.
-int WriteBytes(const char* filename,
- const byte* bytes,
- int size,
- bool verbose = true);
-
-
-// Write the C code
-// const char* <varname> = "<str>";
-// const int <varname>_len = <len>;
-// to the file given by filename. Only the first len chars are written.
-int WriteAsCFile(const char* filename, const char* varname,
- const char* str, int size, bool verbose = true);
-
-
-// ----------------------------------------------------------------------------
-// Data structures
-
-template <typename T>
-inline Vector< Handle<Object> > HandleVector(v8::internal::Handle<T>* elms,
- int length) {
- return Vector< Handle<Object> >(
- reinterpret_cast<v8::internal::Handle<Object>*>(elms), length);
-}
-
-
-// ----------------------------------------------------------------------------
-// Memory
-
-// Copies words from |src| to |dst|. The data spans must not overlap.
-template <typename T>
-inline void CopyWords(T* dst, const T* src, size_t num_words) {
- STATIC_ASSERT(sizeof(T) == kPointerSize);
- ASSERT(Min(dst, const_cast<T*>(src)) + num_words <=
- Max(dst, const_cast<T*>(src)));
- ASSERT(num_words > 0);
-
- // Use block copying OS::MemCopy if the segment we're copying is
- // enough to justify the extra call/setup overhead.
- static const size_t kBlockCopyLimit = 16;
-
- if (num_words < kBlockCopyLimit) {
- do {
- num_words--;
- *dst++ = *src++;
- } while (num_words > 0);
- } else {
- OS::MemCopy(dst, src, num_words * kPointerSize);
- }
-}
-
-
-// Copies words from |src| to |dst|. No restrictions.
-template <typename T>
-inline void MoveWords(T* dst, const T* src, size_t num_words) {
- STATIC_ASSERT(sizeof(T) == kPointerSize);
- ASSERT(num_words > 0);
-
- // Use block copying OS::MemCopy if the segment we're copying is
- // enough to justify the extra call/setup overhead.
- static const size_t kBlockCopyLimit = 16;
-
- if (num_words < kBlockCopyLimit &&
- ((dst < src) || (dst >= (src + num_words * kPointerSize)))) {
- T* end = dst + num_words;
- do {
- num_words--;
- *dst++ = *src++;
- } while (num_words > 0);
- } else {
- OS::MemMove(dst, src, num_words * kPointerSize);
- }
-}
-
-
-// Copies data from |src| to |dst|. The data spans must not overlap.
-template <typename T>
-inline void CopyBytes(T* dst, const T* src, size_t num_bytes) {
- STATIC_ASSERT(sizeof(T) == 1);
- ASSERT(Min(dst, const_cast<T*>(src)) + num_bytes <=
- Max(dst, const_cast<T*>(src)));
- if (num_bytes == 0) return;
-
- // Use block copying OS::MemCopy if the segment we're copying is
- // enough to justify the extra call/setup overhead.
- static const int kBlockCopyLimit = OS::kMinComplexMemCopy;
-
- if (num_bytes < static_cast<size_t>(kBlockCopyLimit)) {
- do {
- num_bytes--;
- *dst++ = *src++;
- } while (num_bytes > 0);
- } else {
- OS::MemCopy(dst, src, num_bytes);
- }
-}
-
-
-template <typename T, typename U>
-inline void MemsetPointer(T** dest, U* value, int counter) {
-#ifdef DEBUG
- T* a = NULL;
- U* b = NULL;
- a = b; // Fake assignment to check assignability.
- USE(a);
-#endif // DEBUG
-#if V8_HOST_ARCH_IA32
-#define STOS "stosl"
-#elif V8_HOST_ARCH_X64
-#define STOS "stosq"
-#endif
-#if defined(__native_client__)
- // This STOS sequence does not validate for x86_64 Native Client.
- // Here we #undef STOS to force use of the slower C version.
- // TODO(bradchen): Profile V8 and implement a faster REP STOS
- // here if the profile indicates it matters.
-#undef STOS
-#endif
-
-#if defined(__GNUC__) && defined(STOS)
- asm volatile(
- "cld;"
- "rep ; " STOS
- : "+&c" (counter), "+&D" (dest)
- : "a" (value)
- : "memory", "cc");
-#else
- for (int i = 0; i < counter; i++) {
- dest[i] = value;
- }
-#endif
-
-#undef STOS
-}
-
-
-// Simple wrapper that allows an ExternalString to refer to a
-// Vector<const char>. Doesn't assume ownership of the data.
-class AsciiStringAdapter: public v8::String::ExternalAsciiStringResource {
- public:
- explicit AsciiStringAdapter(Vector<const char> data) : data_(data) {}
-
- virtual const char* data() const { return data_.start(); }
-
- virtual size_t length() const { return data_.length(); }
-
- private:
- Vector<const char> data_;
-};
-
-
-// Simple support to read a file into a 0-terminated C-string.
-// The returned buffer must be freed by the caller.
-// On return, *exits tells whether the file existed.
-Vector<const char> ReadFile(const char* filename,
- bool* exists,
- bool verbose = true);
-Vector<const char> ReadFile(FILE* file,
- bool* exists,
- bool verbose = true);
-
-
-template <typename sourcechar, typename sinkchar>
-INLINE(static void CopyCharsUnsigned(sinkchar* dest,
- const sourcechar* src,
- int chars));
-#if defined(V8_HOST_ARCH_ARM)
-INLINE(void CopyCharsUnsigned(uint8_t* dest, const uint8_t* src, int chars));
-INLINE(void CopyCharsUnsigned(uint16_t* dest, const uint8_t* src, int chars));
-INLINE(void CopyCharsUnsigned(uint16_t* dest, const uint16_t* src, int chars));
-#endif
-
-// Copy from ASCII/16bit chars to ASCII/16bit chars.
-template <typename sourcechar, typename sinkchar>
-INLINE(void CopyChars(sinkchar* dest, const sourcechar* src, int chars));
-
-template<typename sourcechar, typename sinkchar>
-void CopyChars(sinkchar* dest, const sourcechar* src, int chars) {
- ASSERT(sizeof(sourcechar) <= 2);
- ASSERT(sizeof(sinkchar) <= 2);
- if (sizeof(sinkchar) == 1) {
- if (sizeof(sourcechar) == 1) {
- CopyCharsUnsigned(reinterpret_cast<uint8_t*>(dest),
- reinterpret_cast<const uint8_t*>(src),
- chars);
- } else {
- CopyCharsUnsigned(reinterpret_cast<uint8_t*>(dest),
- reinterpret_cast<const uint16_t*>(src),
- chars);
- }
- } else {
- if (sizeof(sourcechar) == 1) {
- CopyCharsUnsigned(reinterpret_cast<uint16_t*>(dest),
- reinterpret_cast<const uint8_t*>(src),
- chars);
- } else {
- CopyCharsUnsigned(reinterpret_cast<uint16_t*>(dest),
- reinterpret_cast<const uint16_t*>(src),
- chars);
- }
- }
-}
-
-template <typename sourcechar, typename sinkchar>
-void CopyCharsUnsigned(sinkchar* dest, const sourcechar* src, int chars) {
- sinkchar* limit = dest + chars;
-#ifdef V8_HOST_CAN_READ_UNALIGNED
- if (sizeof(*dest) == sizeof(*src)) {
- if (chars >= static_cast<int>(OS::kMinComplexMemCopy / sizeof(*dest))) {
- OS::MemCopy(dest, src, chars * sizeof(*dest));
- return;
- }
- // Number of characters in a uintptr_t.
- static const int kStepSize = sizeof(uintptr_t) / sizeof(*dest); // NOLINT
- ASSERT(dest + kStepSize > dest); // Check for overflow.
- while (dest + kStepSize <= limit) {
- *reinterpret_cast<uintptr_t*>(dest) =
- *reinterpret_cast<const uintptr_t*>(src);
- dest += kStepSize;
- src += kStepSize;
- }
- }
-#endif
- while (dest < limit) {
- *dest++ = static_cast<sinkchar>(*src++);
- }
-}
-
-
-#if defined(V8_HOST_ARCH_ARM)
-void CopyCharsUnsigned(uint8_t* dest, const uint8_t* src, int chars) {
- switch (static_cast<unsigned>(chars)) {
- case 0:
- break;
- case 1:
- *dest = *src;
- break;
- case 2:
- memcpy(dest, src, 2);
- break;
- case 3:
- memcpy(dest, src, 3);
- break;
- case 4:
- memcpy(dest, src, 4);
- break;
- case 5:
- memcpy(dest, src, 5);
- break;
- case 6:
- memcpy(dest, src, 6);
- break;
- case 7:
- memcpy(dest, src, 7);
- break;
- case 8:
- memcpy(dest, src, 8);
- break;
- case 9:
- memcpy(dest, src, 9);
- break;
- case 10:
- memcpy(dest, src, 10);
- break;
- case 11:
- memcpy(dest, src, 11);
- break;
- case 12:
- memcpy(dest, src, 12);
- break;
- case 13:
- memcpy(dest, src, 13);
- break;
- case 14:
- memcpy(dest, src, 14);
- break;
- case 15:
- memcpy(dest, src, 15);
- break;
- default:
- OS::MemCopy(dest, src, chars);
- break;
- }
-}
-
-
-void CopyCharsUnsigned(uint16_t* dest, const uint8_t* src, int chars) {
- if (chars >= OS::kMinComplexConvertMemCopy) {
- OS::MemCopyUint16Uint8(dest, src, chars);
- } else {
- OS::MemCopyUint16Uint8Wrapper(dest, src, chars);
- }
-}
-
-
-void CopyCharsUnsigned(uint16_t* dest, const uint16_t* src, int chars) {
- switch (static_cast<unsigned>(chars)) {
- case 0:
- break;
- case 1:
- *dest = *src;
- break;
- case 2:
- memcpy(dest, src, 4);
- break;
- case 3:
- memcpy(dest, src, 6);
- break;
- case 4:
- memcpy(dest, src, 8);
- break;
- case 5:
- memcpy(dest, src, 10);
- break;
- case 6:
- memcpy(dest, src, 12);
- break;
- case 7:
- memcpy(dest, src, 14);
- break;
- default:
- OS::MemCopy(dest, src, chars * sizeof(*dest));
- break;
- }
-}
-#endif
-
-
-class StringBuilder : public SimpleStringBuilder {
- public:
- explicit StringBuilder(int size) : SimpleStringBuilder(size) { }
- StringBuilder(char* buffer, int size) : SimpleStringBuilder(buffer, size) { }
-
- // Add formatted contents to the builder just like printf().
- void AddFormatted(const char* format, ...);
-
- // Add formatted contents like printf based on a va_list.
- void AddFormattedList(const char* format, va_list list);
- private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(StringBuilder);
-};
-
-} } // namespace v8::internal
-
-#endif // V8_V8UTILS_H_
diff --git a/chromium/v8/src/variables.cc b/chromium/v8/src/variables.cc
index 488da42ce66..906b6ab7d58 100644
--- a/chromium/v8/src/variables.cc
+++ b/chromium/v8/src/variables.cc
@@ -1,35 +1,12 @@
// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
-#include "v8.h"
+#include "src/v8.h"
-#include "ast.h"
-#include "scopes.h"
-#include "variables.h"
+#include "src/ast.h"
+#include "src/scopes.h"
+#include "src/variables.h"
namespace v8 {
namespace internal {
@@ -40,9 +17,9 @@ namespace internal {
const char* Variable::Mode2String(VariableMode mode) {
switch (mode) {
case VAR: return "VAR";
- case CONST: return "CONST";
+ case CONST_LEGACY: return "CONST_LEGACY";
case LET: return "LET";
- case CONST_HARMONY: return "CONST_HARMONY";
+ case CONST: return "CONST";
case MODULE: return "MODULE";
case DYNAMIC: return "DYNAMIC";
case DYNAMIC_GLOBAL: return "DYNAMIC_GLOBAL";
@@ -58,7 +35,7 @@ const char* Variable::Mode2String(VariableMode mode) {
Variable::Variable(Scope* scope,
Handle<String> name,
VariableMode mode,
- bool is_valid_LHS,
+ bool is_valid_ref,
Kind kind,
InitializationFlag initialization_flag,
Interface* interface)
@@ -70,7 +47,7 @@ Variable::Variable(Scope* scope,
index_(-1),
initializer_position_(RelocInfo::kNoPosition),
local_if_not_shadowed_(NULL),
- is_valid_LHS_(is_valid_LHS),
+ is_valid_ref_(is_valid_ref),
force_context_allocation_(false),
is_used_(false),
initialization_flag_(initialization_flag),
diff --git a/chromium/v8/src/variables.h b/chromium/v8/src/variables.h
index 39451d5dfb1..de209d871d5 100644
--- a/chromium/v8/src/variables.h
+++ b/chromium/v8/src/variables.h
@@ -1,35 +1,12 @@
// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
#ifndef V8_VARIABLES_H_
#define V8_VARIABLES_H_
-#include "zone.h"
-#include "interface.h"
+#include "src/zone.h"
+#include "src/interface.h"
namespace v8 {
namespace internal {
@@ -77,7 +54,7 @@ class Variable: public ZoneObject {
Variable(Scope* scope,
Handle<String> name,
VariableMode mode,
- bool is_valid_lhs,
+ bool is_valid_ref,
Kind kind,
InitializationFlag initialization_flag,
Interface* interface = Interface::NewValue());
@@ -85,7 +62,7 @@ class Variable: public ZoneObject {
// Printing support
static const char* Mode2String(VariableMode mode);
- bool IsValidLeftHandSide() { return is_valid_LHS_; }
+ bool IsValidReference() { return is_valid_ref_; }
// The source code for an eval() call may refer to a variable that is
// in an outer scope about which we don't know anything (it may not
@@ -168,12 +145,12 @@ class Variable: public ZoneObject {
// If this field is set, this variable references the stored locally bound
// variable, but it might be shadowed by variable bindings introduced by
- // non-strict 'eval' calls between the reference scope (inclusive) and the
+ // sloppy 'eval' calls between the reference scope (inclusive) and the
// binding scope (exclusive).
Variable* local_if_not_shadowed_;
- // Valid as a LHS? (const and this are not valid LHS, for example)
- bool is_valid_LHS_;
+ // Valid as a reference? (const and this are not valid, for example)
+ bool is_valid_ref_;
// Usage info.
bool force_context_allocation_; // set by variable resolver
diff --git a/chromium/v8/src/vector.h b/chromium/v8/src/vector.h
new file mode 100644
index 00000000000..505ef5ad561
--- /dev/null
+++ b/chromium/v8/src/vector.h
@@ -0,0 +1,171 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_VECTOR_H_
+#define V8_VECTOR_H_
+
+#include <string.h>
+#include <algorithm>
+
+#include "src/allocation.h"
+#include "src/checks.h"
+#include "src/globals.h"
+
+namespace v8 {
+namespace internal {
+
+
+template <typename T>
+class Vector {
+ public:
+ Vector() : start_(NULL), length_(0) {}
+ Vector(T* data, int length) : start_(data), length_(length) {
+ ASSERT(length == 0 || (length > 0 && data != NULL));
+ }
+
+ static Vector<T> New(int length) {
+ return Vector<T>(NewArray<T>(length), length);
+ }
+
+ // Returns a vector using the same backing storage as this one,
+ // spanning from and including 'from', to but not including 'to'.
+ Vector<T> SubVector(int from, int to) {
+ SLOW_ASSERT(to <= length_);
+ SLOW_ASSERT(from < to);
+ ASSERT(0 <= from);
+ return Vector<T>(start() + from, to - from);
+ }
+
+ // Returns the length of the vector.
+ int length() const { return length_; }
+
+ // Returns whether or not the vector is empty.
+ bool is_empty() const { return length_ == 0; }
+
+ // Returns the pointer to the start of the data in the vector.
+ T* start() const { return start_; }
+
+ // Access individual vector elements - checks bounds in debug mode.
+ T& operator[](int index) const {
+ ASSERT(0 <= index && index < length_);
+ return start_[index];
+ }
+
+ const T& at(int index) const { return operator[](index); }
+
+ T& first() { return start_[0]; }
+
+ T& last() { return start_[length_ - 1]; }
+
+ // Returns a clone of this vector with a new backing store.
+ Vector<T> Clone() const {
+ T* result = NewArray<T>(length_);
+ for (int i = 0; i < length_; i++) result[i] = start_[i];
+ return Vector<T>(result, length_);
+ }
+
+ void Sort(int (*cmp)(const T*, const T*)) {
+ std::sort(start(), start() + length(), RawComparer(cmp));
+ }
+
+ void Sort() {
+ std::sort(start(), start() + length());
+ }
+
+ void Truncate(int length) {
+ ASSERT(length <= length_);
+ length_ = length;
+ }
+
+ // Releases the array underlying this vector. Once disposed the
+ // vector is empty.
+ void Dispose() {
+ DeleteArray(start_);
+ start_ = NULL;
+ length_ = 0;
+ }
+
+ inline Vector<T> operator+(int offset) {
+ ASSERT(offset < length_);
+ return Vector<T>(start_ + offset, length_ - offset);
+ }
+
+ // Factory method for creating empty vectors.
+ static Vector<T> empty() { return Vector<T>(NULL, 0); }
+
+ template<typename S>
+ static Vector<T> cast(Vector<S> input) {
+ return Vector<T>(reinterpret_cast<T*>(input.start()),
+ input.length() * sizeof(S) / sizeof(T));
+ }
+
+ protected:
+ void set_start(T* start) { start_ = start; }
+
+ private:
+ T* start_;
+ int length_;
+
+ class RawComparer {
+ public:
+ explicit RawComparer(int (*cmp)(const T*, const T*)) : cmp_(cmp) {}
+ bool operator()(const T& a, const T& b) {
+ return cmp_(&a, &b) < 0;
+ }
+
+ private:
+ int (*cmp_)(const T*, const T*);
+ };
+};
+
+
+template <typename T>
+class ScopedVector : public Vector<T> {
+ public:
+ explicit ScopedVector(int length) : Vector<T>(NewArray<T>(length), length) { }
+ ~ScopedVector() {
+ DeleteArray(this->start());
+ }
+
+ private:
+ DISALLOW_IMPLICIT_CONSTRUCTORS(ScopedVector);
+};
+
+
+inline int StrLength(const char* string) {
+ size_t length = strlen(string);
+ ASSERT(length == static_cast<size_t>(static_cast<int>(length)));
+ return static_cast<int>(length);
+}
+
+
+#define STATIC_ASCII_VECTOR(x) \
+ v8::internal::Vector<const uint8_t>(reinterpret_cast<const uint8_t*>(x), \
+ ARRAY_SIZE(x)-1)
+
+inline Vector<const char> CStrVector(const char* data) {
+ return Vector<const char>(data, StrLength(data));
+}
+
+inline Vector<const uint8_t> OneByteVector(const char* data, int length) {
+ return Vector<const uint8_t>(reinterpret_cast<const uint8_t*>(data), length);
+}
+
+inline Vector<const uint8_t> OneByteVector(const char* data) {
+ return OneByteVector(data, StrLength(data));
+}
+
+inline Vector<char> MutableCStrVector(char* data) {
+ return Vector<char>(data, StrLength(data));
+}
+
+inline Vector<char> MutableCStrVector(char* data, int max) {
+ int length = StrLength(data);
+ return Vector<char>(data, (length < max) ? length : max);
+}
+
+
+} } // namespace v8::internal
+
+#endif // V8_VECTOR_H_
diff --git a/chromium/v8/src/version.cc b/chromium/v8/src/version.cc
index 2d89ca8bd50..c11fb5a34e8 100644
--- a/chromium/v8/src/version.cc
+++ b/chromium/v8/src/version.cc
@@ -25,17 +25,17 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-#include "v8.h"
+#include "src/v8.h"
-#include "version.h"
+#include "src/version.h"
// These macros define the version number for the current version.
// NOTE these macros are used by some of the tool scripts and the build
// system so their names cannot be changed without changing the scripts.
#define MAJOR_VERSION 3
-#define MINOR_VERSION 23
-#define BUILD_NUMBER 17
-#define PATCH_LEVEL 28
+#define MINOR_VERSION 27
+#define BUILD_NUMBER 34
+#define PATCH_LEVEL 11
// Use 1 for candidates and 0 otherwise.
// (Boolean macro values are not supported by all preprocessors.)
#define IS_CANDIDATE_VERSION 0
@@ -84,13 +84,13 @@ void Version::GetString(Vector<char> str) {
const char* is_simulator = "";
#endif // USE_SIMULATOR
if (GetPatch() > 0) {
- OS::SNPrintF(str, "%d.%d.%d.%d%s%s",
- GetMajor(), GetMinor(), GetBuild(), GetPatch(), candidate,
- is_simulator);
+ SNPrintF(str, "%d.%d.%d.%d%s%s",
+ GetMajor(), GetMinor(), GetBuild(), GetPatch(), candidate,
+ is_simulator);
} else {
- OS::SNPrintF(str, "%d.%d.%d%s%s",
- GetMajor(), GetMinor(), GetBuild(), candidate,
- is_simulator);
+ SNPrintF(str, "%d.%d.%d%s%s",
+ GetMajor(), GetMinor(), GetBuild(), candidate,
+ is_simulator);
}
}
@@ -101,15 +101,15 @@ void Version::GetSONAME(Vector<char> str) {
// Generate generic SONAME if no specific SONAME is defined.
const char* candidate = IsCandidate() ? "-candidate" : "";
if (GetPatch() > 0) {
- OS::SNPrintF(str, "libv8-%d.%d.%d.%d%s.so",
- GetMajor(), GetMinor(), GetBuild(), GetPatch(), candidate);
+ SNPrintF(str, "libv8-%d.%d.%d.%d%s.so",
+ GetMajor(), GetMinor(), GetBuild(), GetPatch(), candidate);
} else {
- OS::SNPrintF(str, "libv8-%d.%d.%d%s.so",
- GetMajor(), GetMinor(), GetBuild(), candidate);
+ SNPrintF(str, "libv8-%d.%d.%d%s.so",
+ GetMajor(), GetMinor(), GetBuild(), candidate);
}
} else {
// Use specific SONAME.
- OS::SNPrintF(str, "%s", soname_);
+ SNPrintF(str, "%s", soname_);
}
}
diff --git a/chromium/v8/src/version.h b/chromium/v8/src/version.h
index 4b3e7e2bde3..b0a60715215 100644
--- a/chromium/v8/src/version.h
+++ b/chromium/v8/src/version.h
@@ -1,29 +1,6 @@
// Copyright 2009 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
#ifndef V8_VERSION_H_
#define V8_VERSION_H_
diff --git a/chromium/v8/src/vm-state-inl.h b/chromium/v8/src/vm-state-inl.h
index 658773e6d6a..4e0d7b8619c 100644
--- a/chromium/v8/src/vm-state-inl.h
+++ b/chromium/v8/src/vm-state-inl.h
@@ -1,36 +1,13 @@
// Copyright 2010 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
#ifndef V8_VM_STATE_INL_H_
#define V8_VM_STATE_INL_H_
-#include "vm-state.h"
-#include "log.h"
-#include "simulator.h"
+#include "src/vm-state.h"
+#include "src/log.h"
+#include "src/simulator.h"
namespace v8 {
namespace internal {
@@ -85,8 +62,7 @@ ExternalCallbackScope::ExternalCallbackScope(Isolate* isolate, Address callback)
callback_(callback),
previous_scope_(isolate->external_callback_scope()) {
#ifdef USE_SIMULATOR
- int32_t sp = Simulator::current(isolate)->get_register(Simulator::sp);
- scope_address_ = reinterpret_cast<Address>(static_cast<intptr_t>(sp));
+ scope_address_ = Simulator::current(isolate)->get_sp();
#endif
isolate_->set_external_callback_scope(this);
}
diff --git a/chromium/v8/src/vm-state.h b/chromium/v8/src/vm-state.h
index f592bb92ca5..a72180ca45f 100644
--- a/chromium/v8/src/vm-state.h
+++ b/chromium/v8/src/vm-state.h
@@ -1,35 +1,12 @@
// Copyright 2010 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
#ifndef V8_VM_STATE_H_
#define V8_VM_STATE_H_
-#include "allocation.h"
-#include "isolate.h"
+#include "src/allocation.h"
+#include "src/isolate.h"
namespace v8 {
namespace internal {
diff --git a/chromium/v8/src/weak_collection.js b/chromium/v8/src/weak_collection.js
new file mode 100644
index 00000000000..4c26d257437
--- /dev/null
+++ b/chromium/v8/src/weak_collection.js
@@ -0,0 +1,183 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+"use strict";
+
+// This file relies on the fact that the following declaration has been made
+// in runtime.js:
+// var $Array = global.Array;
+
+var $WeakMap = global.WeakMap;
+var $WeakSet = global.WeakSet;
+
+
+// -------------------------------------------------------------------
+// Harmony WeakMap
+
+function WeakMapConstructor() {
+ if (%_IsConstructCall()) {
+ %WeakCollectionInitialize(this);
+ } else {
+ throw MakeTypeError('constructor_not_function', ['WeakMap']);
+ }
+}
+
+
+function WeakMapGet(key) {
+ if (!IS_WEAKMAP(this)) {
+ throw MakeTypeError('incompatible_method_receiver',
+ ['WeakMap.prototype.get', this]);
+ }
+ if (!(IS_SPEC_OBJECT(key) || IS_SYMBOL(key))) {
+ throw %MakeTypeError('invalid_weakmap_key', [this, key]);
+ }
+ return %WeakCollectionGet(this, key);
+}
+
+
+function WeakMapSet(key, value) {
+ if (!IS_WEAKMAP(this)) {
+ throw MakeTypeError('incompatible_method_receiver',
+ ['WeakMap.prototype.set', this]);
+ }
+ if (!(IS_SPEC_OBJECT(key) || IS_SYMBOL(key))) {
+ throw %MakeTypeError('invalid_weakmap_key', [this, key]);
+ }
+ return %WeakCollectionSet(this, key, value);
+}
+
+
+function WeakMapHas(key) {
+ if (!IS_WEAKMAP(this)) {
+ throw MakeTypeError('incompatible_method_receiver',
+ ['WeakMap.prototype.has', this]);
+ }
+ if (!(IS_SPEC_OBJECT(key) || IS_SYMBOL(key))) {
+ throw %MakeTypeError('invalid_weakmap_key', [this, key]);
+ }
+ return %WeakCollectionHas(this, key);
+}
+
+
+function WeakMapDelete(key) {
+ if (!IS_WEAKMAP(this)) {
+ throw MakeTypeError('incompatible_method_receiver',
+ ['WeakMap.prototype.delete', this]);
+ }
+ if (!(IS_SPEC_OBJECT(key) || IS_SYMBOL(key))) {
+ throw %MakeTypeError('invalid_weakmap_key', [this, key]);
+ }
+ return %WeakCollectionDelete(this, key);
+}
+
+
+function WeakMapClear() {
+ if (!IS_WEAKMAP(this)) {
+ throw MakeTypeError('incompatible_method_receiver',
+ ['WeakMap.prototype.clear', this]);
+ }
+ // Replace the internal table with a new empty table.
+ %WeakCollectionInitialize(this);
+}
+
+
+// -------------------------------------------------------------------
+
+function SetUpWeakMap() {
+ %CheckIsBootstrapping();
+
+ %SetCode($WeakMap, WeakMapConstructor);
+ %FunctionSetPrototype($WeakMap, new $Object());
+ %SetProperty($WeakMap.prototype, "constructor", $WeakMap, DONT_ENUM);
+
+ // Set up the non-enumerable functions on the WeakMap prototype object.
+ InstallFunctions($WeakMap.prototype, DONT_ENUM, $Array(
+ "get", WeakMapGet,
+ "set", WeakMapSet,
+ "has", WeakMapHas,
+ "delete", WeakMapDelete,
+ "clear", WeakMapClear
+ ));
+}
+
+SetUpWeakMap();
+
+
+// -------------------------------------------------------------------
+// Harmony WeakSet
+
+function WeakSetConstructor() {
+ if (%_IsConstructCall()) {
+ %WeakCollectionInitialize(this);
+ } else {
+ throw MakeTypeError('constructor_not_function', ['WeakSet']);
+ }
+}
+
+
+function WeakSetAdd(value) {
+ if (!IS_WEAKSET(this)) {
+ throw MakeTypeError('incompatible_method_receiver',
+ ['WeakSet.prototype.add', this]);
+ }
+ if (!(IS_SPEC_OBJECT(value) || IS_SYMBOL(value))) {
+ throw %MakeTypeError('invalid_weakset_value', [this, value]);
+ }
+ return %WeakCollectionSet(this, value, true);
+}
+
+
+function WeakSetHas(value) {
+ if (!IS_WEAKSET(this)) {
+ throw MakeTypeError('incompatible_method_receiver',
+ ['WeakSet.prototype.has', this]);
+ }
+ if (!(IS_SPEC_OBJECT(value) || IS_SYMBOL(value))) {
+ throw %MakeTypeError('invalid_weakset_value', [this, value]);
+ }
+ return %WeakCollectionHas(this, value);
+}
+
+
+function WeakSetDelete(value) {
+ if (!IS_WEAKSET(this)) {
+ throw MakeTypeError('incompatible_method_receiver',
+ ['WeakSet.prototype.delete', this]);
+ }
+ if (!(IS_SPEC_OBJECT(value) || IS_SYMBOL(value))) {
+ throw %MakeTypeError('invalid_weakset_value', [this, value]);
+ }
+ return %WeakCollectionDelete(this, value);
+}
+
+
+function WeakSetClear() {
+ if (!IS_WEAKSET(this)) {
+ throw MakeTypeError('incompatible_method_receiver',
+ ['WeakSet.prototype.clear', this]);
+ }
+ // Replace the internal table with a new empty table.
+ %WeakCollectionInitialize(this);
+}
+
+
+// -------------------------------------------------------------------
+
+function SetUpWeakSet() {
+ %CheckIsBootstrapping();
+
+ %SetCode($WeakSet, WeakSetConstructor);
+ %FunctionSetPrototype($WeakSet, new $Object());
+ %SetProperty($WeakSet.prototype, "constructor", $WeakSet, DONT_ENUM);
+
+ // Set up the non-enumerable functions on the WeakSet prototype object.
+ InstallFunctions($WeakSet.prototype, DONT_ENUM, $Array(
+ "add", WeakSetAdd,
+ "has", WeakSetHas,
+ "delete", WeakSetDelete,
+ "clear", WeakSetClear
+ ));
+}
+
+SetUpWeakSet();
diff --git a/chromium/v8/src/win32-math.cc b/chromium/v8/src/win32-math.cc
index 8f6d0774312..e0670b041eb 100644
--- a/chromium/v8/src/win32-math.cc
+++ b/chromium/v8/src/win32-math.cc
@@ -1,29 +1,6 @@
// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
// Extra POSIX/ANSI routines for Win32 when using Visual Studio C++. Please
// refer to The Open Group Base Specification for specification of the correct
@@ -31,13 +8,13 @@
// (http://www.opengroup.org/onlinepubs/000095399/)
#if defined(_MSC_VER) && (_MSC_VER < 1800)
-#include "win32-headers.h"
+#include "src/base/win32-headers.h"
#include <limits.h> // Required for INT_MAX etc.
#include <float.h> // Required for DBL_MAX and on Win32 for finite()
#include <cmath>
-#include "win32-math.h"
+#include "src/win32-math.h"
-#include "checks.h"
+#include "src/checks.h"
namespace std {
diff --git a/chromium/v8/src/win32-math.h b/chromium/v8/src/win32-math.h
index fd9312b0f54..7b7cbc9256a 100644
--- a/chromium/v8/src/win32-math.h
+++ b/chromium/v8/src/win32-math.h
@@ -1,29 +1,6 @@
// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
// Extra POSIX/ANSI routines for Win32 when using Visual Studio C++. Please
// refer to The Open Group Base Specification for specification of the correct
diff --git a/chromium/v8/src/x64/assembler-x64-inl.h b/chromium/v8/src/x64/assembler-x64-inl.h
index 073fcbe8e94..f1731af34b6 100644
--- a/chromium/v8/src/x64/assembler-x64-inl.h
+++ b/chromium/v8/src/x64/assembler-x64-inl.h
@@ -1,49 +1,29 @@
// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
#ifndef V8_X64_ASSEMBLER_X64_INL_H_
#define V8_X64_ASSEMBLER_X64_INL_H_
-#include "x64/assembler-x64.h"
+#include "src/x64/assembler-x64.h"
-#include "cpu.h"
-#include "debug.h"
-#include "v8memory.h"
+#include "src/cpu.h"
+#include "src/debug.h"
+#include "src/v8memory.h"
namespace v8 {
namespace internal {
+bool CpuFeatures::SupportsCrankshaft() { return true; }
+
// -----------------------------------------------------------------------------
// Implementation of Assembler
static const byte kCallOpcode = 0xE8;
-static const int kNoCodeAgeSequenceLength = 6;
+// The length of pushq(rbp), movp(rbp, rsp), Push(rsi) and Push(rdi).
+static const int kNoCodeAgeSequenceLength = kPointerSize == kInt64Size ? 6 : 17;
void Assembler::emitl(uint32_t x) {
@@ -97,7 +77,6 @@ void Assembler::emit_code_target(Handle<Code> target,
void Assembler::emit_runtime_entry(Address entry, RelocInfo::Mode rmode) {
ASSERT(RelocInfo::IsRuntimeEntry(rmode));
- ASSERT(isolate()->code_range()->exists());
RecordRelocInfo(rmode);
emitl(static_cast<uint32_t>(entry - isolate()->code_range()->start()));
}
@@ -205,14 +184,20 @@ void Assembler::emit_optional_rex_32(const Operand& op) {
}
-Address Assembler::target_address_at(Address pc) {
+Address Assembler::target_address_at(Address pc,
+ ConstantPoolArray* constant_pool) {
return Memory::int32_at(pc) + pc + 4;
}
-void Assembler::set_target_address_at(Address pc, Address target) {
+void Assembler::set_target_address_at(Address pc,
+ ConstantPoolArray* constant_pool,
+ Address target,
+ ICacheFlushMode icache_flush_mode) {
Memory::int32_at(pc) = static_cast<int32_t>(target - pc - 4);
- CPU::FlushICache(pc, sizeof(int32_t));
+ if (icache_flush_mode != SKIP_ICACHE_FLUSH) {
+ CPU::FlushICache(pc, sizeof(int32_t));
+ }
}
@@ -227,7 +212,6 @@ Handle<Object> Assembler::code_target_object_handle_at(Address pc) {
Address Assembler::runtime_entry_at(Address pc) {
- ASSERT(isolate()->code_range()->exists());
return Memory::int32_at(pc) + isolate()->code_range()->start();
}
@@ -235,19 +219,20 @@ Address Assembler::runtime_entry_at(Address pc) {
// Implementation of RelocInfo
// The modes possibly affected by apply must be in kApplyMask.
-void RelocInfo::apply(intptr_t delta) {
+void RelocInfo::apply(intptr_t delta, ICacheFlushMode icache_flush_mode) {
+ bool flush_icache = icache_flush_mode != SKIP_ICACHE_FLUSH;
if (IsInternalReference(rmode_)) {
// absolute code pointer inside code object moves with the code object.
Memory::Address_at(pc_) += static_cast<int32_t>(delta);
- CPU::FlushICache(pc_, sizeof(Address));
+ if (flush_icache) CPU::FlushICache(pc_, sizeof(Address));
} else if (IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_)) {
Memory::int32_at(pc_) -= static_cast<int32_t>(delta);
- CPU::FlushICache(pc_, sizeof(int32_t));
+ if (flush_icache) CPU::FlushICache(pc_, sizeof(int32_t));
} else if (rmode_ == CODE_AGE_SEQUENCE) {
if (*pc_ == kCallOpcode) {
int32_t* p = reinterpret_cast<int32_t*>(pc_ + 1);
*p -= static_cast<int32_t>(delta); // Relocate entry.
- CPU::FlushICache(p, sizeof(uint32_t));
+ if (flush_icache) CPU::FlushICache(p, sizeof(uint32_t));
}
}
}
@@ -255,7 +240,7 @@ void RelocInfo::apply(intptr_t delta) {
Address RelocInfo::target_address() {
ASSERT(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_));
- return Assembler::target_address_at(pc_);
+ return Assembler::target_address_at(pc_, host_);
}
@@ -267,6 +252,12 @@ Address RelocInfo::target_address_address() {
}
+Address RelocInfo::constant_pool_entry_address() {
+ UNREACHABLE();
+ return NULL;
+}
+
+
int RelocInfo::target_address_size() {
if (IsCodedSpecially()) {
return Assembler::kSpecialTargetSize;
@@ -276,10 +267,13 @@ int RelocInfo::target_address_size() {
}
-void RelocInfo::set_target_address(Address target, WriteBarrierMode mode) {
+void RelocInfo::set_target_address(Address target,
+ WriteBarrierMode write_barrier_mode,
+ ICacheFlushMode icache_flush_mode) {
ASSERT(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_));
- Assembler::set_target_address_at(pc_, target);
- if (mode == UPDATE_WRITE_BARRIER && host() != NULL && IsCodeTarget(rmode_)) {
+ Assembler::set_target_address_at(pc_, host_, target, icache_flush_mode);
+ if (write_barrier_mode == UPDATE_WRITE_BARRIER && host() != NULL &&
+ IsCodeTarget(rmode_)) {
Object* target_code = Code::GetCodeFromTargetAddress(target);
host()->GetHeap()->incremental_marking()->RecordWriteIntoCode(
host(), this, HeapObject::cast(target_code));
@@ -309,12 +303,16 @@ Address RelocInfo::target_reference() {
}
-void RelocInfo::set_target_object(Object* target, WriteBarrierMode mode) {
+void RelocInfo::set_target_object(Object* target,
+ WriteBarrierMode write_barrier_mode,
+ ICacheFlushMode icache_flush_mode) {
ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
ASSERT(!target->IsConsString());
Memory::Object_at(pc_) = target;
- CPU::FlushICache(pc_, sizeof(Address));
- if (mode == UPDATE_WRITE_BARRIER &&
+ if (icache_flush_mode != SKIP_ICACHE_FLUSH) {
+ CPU::FlushICache(pc_, sizeof(Address));
+ }
+ if (write_barrier_mode == UPDATE_WRITE_BARRIER &&
host() != NULL &&
target->IsHeapObject()) {
host()->GetHeap()->incremental_marking()->RecordWrite(
@@ -330,9 +328,12 @@ Address RelocInfo::target_runtime_entry(Assembler* origin) {
void RelocInfo::set_target_runtime_entry(Address target,
- WriteBarrierMode mode) {
+ WriteBarrierMode write_barrier_mode,
+ ICacheFlushMode icache_flush_mode) {
ASSERT(IsRuntimeEntry(rmode_));
- if (target_address() != target) set_target_address(target, mode);
+ if (target_address() != target) {
+ set_target_address(target, write_barrier_mode, icache_flush_mode);
+ }
}
@@ -349,12 +350,16 @@ Cell* RelocInfo::target_cell() {
}
-void RelocInfo::set_target_cell(Cell* cell, WriteBarrierMode mode) {
+void RelocInfo::set_target_cell(Cell* cell,
+ WriteBarrierMode write_barrier_mode,
+ ICacheFlushMode icache_flush_mode) {
ASSERT(rmode_ == RelocInfo::CELL);
Address address = cell->address() + Cell::kValueOffset;
Memory::Address_at(pc_) = address;
- CPU::FlushICache(pc_, sizeof(Address));
- if (mode == UPDATE_WRITE_BARRIER &&
+ if (icache_flush_mode != SKIP_ICACHE_FLUSH) {
+ CPU::FlushICache(pc_, sizeof(Address));
+ }
+ if (write_barrier_mode == UPDATE_WRITE_BARRIER &&
host() != NULL) {
// TODO(1550) We are passing NULL as a slot because cell can never be on
// evacuation candidate.
@@ -369,7 +374,7 @@ void RelocInfo::WipeOut() {
Memory::Address_at(pc_) = NULL;
} else if (IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_)) {
// Effectively write zero into the relocation.
- Assembler::set_target_address_at(pc_, pc_ + sizeof(int32_t));
+ Assembler::set_target_address_at(pc_, host_, pc_ + sizeof(int32_t));
} else {
UNREACHABLE();
}
@@ -383,12 +388,8 @@ bool RelocInfo::IsPatchedReturnSequence() {
// movq(rsp, rbp); pop(rbp); ret(n); int3 *6
// The 11th byte is int3 (0xCC) in the return sequence and
// REX.WB (0x48+register bit) for the call sequence.
-#ifdef ENABLE_DEBUGGER_SUPPORT
return pc_[Assembler::kMoveAddressIntoScratchRegisterInstructionLength] !=
0xCC;
-#else
- return false;
-#endif
}
@@ -408,14 +409,16 @@ Code* RelocInfo::code_age_stub() {
ASSERT(rmode_ == RelocInfo::CODE_AGE_SEQUENCE);
ASSERT(*pc_ == kCallOpcode);
return Code::GetCodeFromTargetAddress(
- Assembler::target_address_at(pc_ + 1));
+ Assembler::target_address_at(pc_ + 1, host_));
}
-void RelocInfo::set_code_age_stub(Code* stub) {
+void RelocInfo::set_code_age_stub(Code* stub,
+ ICacheFlushMode icache_flush_mode) {
ASSERT(*pc_ == kCallOpcode);
ASSERT(rmode_ == RelocInfo::CODE_AGE_SEQUENCE);
- Assembler::set_target_address_at(pc_ + 1, stub->instruction_start());
+ Assembler::set_target_address_at(pc_ + 1, host_, stub->instruction_start(),
+ icache_flush_mode);
}
@@ -474,14 +477,12 @@ void RelocInfo::Visit(Isolate* isolate, ObjectVisitor* visitor) {
CPU::FlushICache(pc_, sizeof(Address));
} else if (RelocInfo::IsCodeAgeSequence(mode)) {
visitor->VisitCodeAgeSequence(this);
-#ifdef ENABLE_DEBUGGER_SUPPORT
} else if (((RelocInfo::IsJSReturn(mode) &&
IsPatchedReturnSequence()) ||
(RelocInfo::IsDebugBreakSlot(mode) &&
IsPatchedDebugBreakSlotSequence())) &&
isolate->debug()->has_break_points()) {
visitor->VisitDebugTarget(this);
-#endif
} else if (RelocInfo::IsRuntimeEntry(mode)) {
visitor->VisitRuntimeEntry(this);
}
@@ -503,14 +504,12 @@ void RelocInfo::Visit(Heap* heap) {
CPU::FlushICache(pc_, sizeof(Address));
} else if (RelocInfo::IsCodeAgeSequence(mode)) {
StaticVisitor::VisitCodeAgeSequence(heap, this);
-#ifdef ENABLE_DEBUGGER_SUPPORT
} else if (heap->isolate()->debug()->has_break_points() &&
((RelocInfo::IsJSReturn(mode) &&
IsPatchedReturnSequence()) ||
(RelocInfo::IsDebugBreakSlot(mode) &&
IsPatchedDebugBreakSlotSequence()))) {
StaticVisitor::VisitDebugTarget(heap, this);
-#endif
} else if (RelocInfo::IsRuntimeEntry(mode)) {
StaticVisitor::VisitRuntimeEntry(this);
}
diff --git a/chromium/v8/src/x64/assembler-x64.cc b/chromium/v8/src/x64/assembler-x64.cc
index bc875d67e83..59b027f5beb 100644
--- a/chromium/v8/src/x64/assembler-x64.cc
+++ b/chromium/v8/src/x64/assembler-x64.cc
@@ -1,36 +1,13 @@
// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
#if V8_TARGET_ARCH_X64
-#include "macro-assembler.h"
-#include "serialize.h"
+#include "src/macro-assembler.h"
+#include "src/serialize.h"
namespace v8 {
namespace internal {
@@ -38,60 +15,25 @@ namespace internal {
// -----------------------------------------------------------------------------
// Implementation of CpuFeatures
-
-#ifdef DEBUG
-bool CpuFeatures::initialized_ = false;
-#endif
-uint64_t CpuFeatures::supported_ = CpuFeatures::kDefaultCpuFeatures;
-uint64_t CpuFeatures::found_by_runtime_probing_only_ = 0;
-uint64_t CpuFeatures::cross_compile_ = 0;
-
-ExternalReference ExternalReference::cpu_features() {
- ASSERT(CpuFeatures::initialized_);
- return ExternalReference(&CpuFeatures::supported_);
-}
-
-
-void CpuFeatures::Probe() {
- ASSERT(supported_ == CpuFeatures::kDefaultCpuFeatures);
-#ifdef DEBUG
- initialized_ = true;
-#endif
- supported_ = kDefaultCpuFeatures;
- if (Serializer::enabled()) {
- supported_ |= OS::CpuFeaturesImpliedByPlatform();
- return; // No features if we might serialize.
- }
-
- uint64_t probed_features = 0;
+void CpuFeatures::ProbeImpl(bool cross_compile) {
CPU cpu;
- if (cpu.has_sse41()) {
- probed_features |= static_cast<uint64_t>(1) << SSE4_1;
- }
- if (cpu.has_sse3()) {
- probed_features |= static_cast<uint64_t>(1) << SSE3;
- }
-
- // SSE2 must be available on every x64 CPU.
- ASSERT(cpu.has_sse2());
- probed_features |= static_cast<uint64_t>(1) << SSE2;
+ CHECK(cpu.has_sse2()); // SSE2 support is mandatory.
+ CHECK(cpu.has_cmov()); // CMOV support is mandatory.
- // CMOV must be available on every x64 CPU.
- ASSERT(cpu.has_cmov());
- probed_features |= static_cast<uint64_t>(1) << CMOV;
+ // Only use statically determined features for cross compile (snapshot).
+ if (cross_compile) return;
+ if (cpu.has_sse41() && FLAG_enable_sse4_1) supported_ |= 1u << SSE4_1;
+ if (cpu.has_sse3() && FLAG_enable_sse3) supported_ |= 1u << SSE3;
// SAHF is not generally available in long mode.
- if (cpu.has_sahf()) {
- probed_features |= static_cast<uint64_t>(1) << SAHF;
- }
-
- uint64_t platform_features = OS::CpuFeaturesImpliedByPlatform();
- supported_ = probed_features | platform_features;
- found_by_runtime_probing_only_
- = probed_features & ~kDefaultCpuFeatures & ~platform_features;
+ if (cpu.has_sahf() && FLAG_enable_sahf) supported_|= 1u << SAHF;
}
+void CpuFeatures::PrintTarget() { }
+void CpuFeatures::PrintFeatures() { }
+
+
// -----------------------------------------------------------------------------
// Implementation of RelocInfo
@@ -110,7 +52,8 @@ void RelocInfo::PatchCodeWithCall(Address target, int guard_bytes) {
#endif
// Patch the code.
- patcher.masm()->movq(kScratchRegister, target, RelocInfo::NONE64);
+ patcher.masm()->movp(kScratchRegister, reinterpret_cast<void*>(target),
+ Assembler::RelocInfoNone());
patcher.masm()->call(kScratchRegister);
// Check that the size of the code generated is as expected.
@@ -416,9 +359,9 @@ void Assembler::GrowBuffer() {
intptr_t pc_delta = desc.buffer - buffer_;
intptr_t rc_delta = (desc.buffer + desc.buffer_size) -
(buffer_ + buffer_size_);
- OS::MemMove(desc.buffer, buffer_, desc.instr_size);
- OS::MemMove(rc_delta + reloc_info_writer.pos(),
- reloc_info_writer.pos(), desc.reloc_size);
+ MemMove(desc.buffer, buffer_, desc.instr_size);
+ MemMove(rc_delta + reloc_info_writer.pos(), reloc_info_writer.pos(),
+ desc.reloc_size);
// Switch buffers.
if (isolate() != NULL &&
@@ -466,24 +409,30 @@ void Assembler::emit_operand(int code, const Operand& adr) {
// Assembler Instruction implementations.
-void Assembler::arithmetic_op(byte opcode, Register reg, const Operand& op) {
+void Assembler::arithmetic_op(byte opcode,
+ Register reg,
+ const Operand& op,
+ int size) {
EnsureSpace ensure_space(this);
- emit_rex_64(reg, op);
+ emit_rex(reg, op, size);
emit(opcode);
emit_operand(reg, op);
}
-void Assembler::arithmetic_op(byte opcode, Register reg, Register rm_reg) {
+void Assembler::arithmetic_op(byte opcode,
+ Register reg,
+ Register rm_reg,
+ int size) {
EnsureSpace ensure_space(this);
ASSERT((opcode & 0xC6) == 2);
if (rm_reg.low_bits() == 4) { // Forces SIB byte.
// Swap reg and rm_reg and change opcode operand order.
- emit_rex_64(rm_reg, reg);
+ emit_rex(rm_reg, reg, size);
emit(opcode ^ 0x02);
emit_modrm(rm_reg, reg);
} else {
- emit_rex_64(reg, rm_reg);
+ emit_rex(reg, rm_reg, size);
emit(opcode);
emit_modrm(reg, rm_reg);
}
@@ -519,37 +468,45 @@ void Assembler::arithmetic_op_16(byte opcode,
}
-void Assembler::arithmetic_op_32(byte opcode, Register reg, Register rm_reg) {
+void Assembler::arithmetic_op_8(byte opcode, Register reg, const Operand& op) {
+ EnsureSpace ensure_space(this);
+ if (!reg.is_byte_register()) {
+ // Register is not one of al, bl, cl, dl. Its encoding needs REX.
+ emit_rex_32(reg);
+ }
+ emit(opcode);
+ emit_operand(reg, op);
+}
+
+
+void Assembler::arithmetic_op_8(byte opcode, Register reg, Register rm_reg) {
EnsureSpace ensure_space(this);
ASSERT((opcode & 0xC6) == 2);
- if (rm_reg.low_bits() == 4) { // Forces SIB byte.
+ if (rm_reg.low_bits() == 4) { // Forces SIB byte.
// Swap reg and rm_reg and change opcode operand order.
- emit_optional_rex_32(rm_reg, reg);
- emit(opcode ^ 0x02); // E.g. 0x03 -> 0x01 for ADD.
+ if (!rm_reg.is_byte_register() || !reg.is_byte_register()) {
+ // Register is not one of al, bl, cl, dl. Its encoding needs REX.
+ emit_rex_32(rm_reg, reg);
+ }
+ emit(opcode ^ 0x02);
emit_modrm(rm_reg, reg);
} else {
- emit_optional_rex_32(reg, rm_reg);
+ if (!reg.is_byte_register() || !rm_reg.is_byte_register()) {
+ // Register is not one of al, bl, cl, dl. Its encoding needs REX.
+ emit_rex_32(reg, rm_reg);
+ }
emit(opcode);
emit_modrm(reg, rm_reg);
}
}
-void Assembler::arithmetic_op_32(byte opcode,
- Register reg,
- const Operand& rm_reg) {
- EnsureSpace ensure_space(this);
- emit_optional_rex_32(reg, rm_reg);
- emit(opcode);
- emit_operand(reg, rm_reg);
-}
-
-
void Assembler::immediate_arithmetic_op(byte subcode,
Register dst,
- Immediate src) {
+ Immediate src,
+ int size) {
EnsureSpace ensure_space(this);
- emit_rex_64(dst);
+ emit_rex(dst, size);
if (is_int8(src.value_)) {
emit(0x83);
emit_modrm(subcode, dst);
@@ -566,9 +523,10 @@ void Assembler::immediate_arithmetic_op(byte subcode,
void Assembler::immediate_arithmetic_op(byte subcode,
const Operand& dst,
- Immediate src) {
+ Immediate src,
+ int size) {
EnsureSpace ensure_space(this);
- emit_rex_64(dst);
+ emit_rex(dst, size);
if (is_int8(src.value_)) {
emit(0x83);
emit_operand(subcode, dst);
@@ -620,43 +578,6 @@ void Assembler::immediate_arithmetic_op_16(byte subcode,
}
-void Assembler::immediate_arithmetic_op_32(byte subcode,
- Register dst,
- Immediate src) {
- EnsureSpace ensure_space(this);
- emit_optional_rex_32(dst);
- if (is_int8(src.value_)) {
- emit(0x83);
- emit_modrm(subcode, dst);
- emit(src.value_);
- } else if (dst.is(rax)) {
- emit(0x05 | (subcode << 3));
- emitl(src.value_);
- } else {
- emit(0x81);
- emit_modrm(subcode, dst);
- emitl(src.value_);
- }
-}
-
-
-void Assembler::immediate_arithmetic_op_32(byte subcode,
- const Operand& dst,
- Immediate src) {
- EnsureSpace ensure_space(this);
- emit_optional_rex_32(dst);
- if (is_int8(src.value_)) {
- emit(0x83);
- emit_operand(subcode, dst);
- emit(src.value_);
- } else {
- emit(0x81);
- emit_operand(subcode, dst);
- emitl(src.value_);
- }
-}
-
-
void Assembler::immediate_arithmetic_op_8(byte subcode,
const Operand& dst,
Immediate src) {
@@ -674,8 +595,8 @@ void Assembler::immediate_arithmetic_op_8(byte subcode,
Immediate src) {
EnsureSpace ensure_space(this);
if (!dst.is_byte_register()) {
- // Use 64-bit mode byte registers.
- emit_rex_64(dst);
+ // Register is not one of al, bl, cl, dl. Its encoding needs REX.
+ emit_rex_32(dst);
}
ASSERT(is_int8(src.value_) || is_uint8(src.value_));
emit(0x80);
@@ -684,15 +605,19 @@ void Assembler::immediate_arithmetic_op_8(byte subcode,
}
-void Assembler::shift(Register dst, Immediate shift_amount, int subcode) {
+void Assembler::shift(Register dst,
+ Immediate shift_amount,
+ int subcode,
+ int size) {
EnsureSpace ensure_space(this);
- ASSERT(is_uint6(shift_amount.value_)); // illegal shift count
+ ASSERT(size == kInt64Size ? is_uint6(shift_amount.value_)
+ : is_uint5(shift_amount.value_));
if (shift_amount.value_ == 1) {
- emit_rex_64(dst);
+ emit_rex(dst, size);
emit(0xD1);
emit_modrm(subcode, dst);
} else {
- emit_rex_64(dst);
+ emit_rex(dst, size);
emit(0xC1);
emit_modrm(subcode, dst);
emit(shift_amount.value_);
@@ -700,38 +625,14 @@ void Assembler::shift(Register dst, Immediate shift_amount, int subcode) {
}
-void Assembler::shift(Register dst, int subcode) {
- EnsureSpace ensure_space(this);
- emit_rex_64(dst);
- emit(0xD3);
- emit_modrm(subcode, dst);
-}
-
-
-void Assembler::shift_32(Register dst, int subcode) {
+void Assembler::shift(Register dst, int subcode, int size) {
EnsureSpace ensure_space(this);
- emit_optional_rex_32(dst);
+ emit_rex(dst, size);
emit(0xD3);
emit_modrm(subcode, dst);
}
-void Assembler::shift_32(Register dst, Immediate shift_amount, int subcode) {
- EnsureSpace ensure_space(this);
- ASSERT(is_uint5(shift_amount.value_)); // illegal shift count
- if (shift_amount.value_ == 1) {
- emit_optional_rex_32(dst);
- emit(0xD1);
- emit_modrm(subcode, dst);
- } else {
- emit_optional_rex_32(dst);
- emit(0xC1);
- emit_modrm(subcode, dst);
- emit(shift_amount.value_);
- }
-}
-
-
void Assembler::bt(const Operand& dst, Register src) {
EnsureSpace ensure_space(this);
emit_rex_64(src, dst);
@@ -750,6 +651,15 @@ void Assembler::bts(const Operand& dst, Register src) {
}
+void Assembler::bsrl(Register dst, Register src) {
+ EnsureSpace ensure_space(this);
+ emit_optional_rex_32(dst, src);
+ emit(0x0F);
+ emit(0xBD);
+ emit_modrm(dst, src);
+}
+
+
void Assembler::call(Label* L) {
positions_recorder()->WriteRecordedPositions();
EnsureSpace ensure_space(this);
@@ -934,33 +844,17 @@ void Assembler::cqo() {
}
-void Assembler::decq(Register dst) {
- EnsureSpace ensure_space(this);
- emit_rex_64(dst);
- emit(0xFF);
- emit_modrm(0x1, dst);
-}
-
-
-void Assembler::decq(const Operand& dst) {
- EnsureSpace ensure_space(this);
- emit_rex_64(dst);
- emit(0xFF);
- emit_operand(1, dst);
-}
-
-
-void Assembler::decl(Register dst) {
+void Assembler::emit_dec(Register dst, int size) {
EnsureSpace ensure_space(this);
- emit_optional_rex_32(dst);
+ emit_rex(dst, size);
emit(0xFF);
emit_modrm(0x1, dst);
}
-void Assembler::decl(const Operand& dst) {
+void Assembler::emit_dec(const Operand& dst, int size) {
EnsureSpace ensure_space(this);
- emit_optional_rex_32(dst);
+ emit_rex(dst, size);
emit(0xFF);
emit_operand(1, dst);
}
@@ -999,84 +893,43 @@ void Assembler::hlt() {
}
-void Assembler::idivq(Register src) {
- EnsureSpace ensure_space(this);
- emit_rex_64(src);
- emit(0xF7);
- emit_modrm(0x7, src);
-}
-
-
-void Assembler::idivl(Register src) {
+void Assembler::emit_idiv(Register src, int size) {
EnsureSpace ensure_space(this);
- emit_optional_rex_32(src);
+ emit_rex(src, size);
emit(0xF7);
emit_modrm(0x7, src);
}
-void Assembler::imul(Register src) {
+void Assembler::emit_imul(Register src, int size) {
EnsureSpace ensure_space(this);
- emit_rex_64(src);
+ emit_rex(src, size);
emit(0xF7);
emit_modrm(0x5, src);
}
-void Assembler::imul(Register dst, Register src) {
+void Assembler::emit_imul(Register dst, Register src, int size) {
EnsureSpace ensure_space(this);
- emit_rex_64(dst, src);
- emit(0x0F);
- emit(0xAF);
- emit_modrm(dst, src);
-}
-
-
-void Assembler::imul(Register dst, const Operand& src) {
- EnsureSpace ensure_space(this);
- emit_rex_64(dst, src);
- emit(0x0F);
- emit(0xAF);
- emit_operand(dst, src);
-}
-
-
-void Assembler::imul(Register dst, Register src, Immediate imm) {
- EnsureSpace ensure_space(this);
- emit_rex_64(dst, src);
- if (is_int8(imm.value_)) {
- emit(0x6B);
- emit_modrm(dst, src);
- emit(imm.value_);
- } else {
- emit(0x69);
- emit_modrm(dst, src);
- emitl(imm.value_);
- }
-}
-
-
-void Assembler::imull(Register dst, Register src) {
- EnsureSpace ensure_space(this);
- emit_optional_rex_32(dst, src);
+ emit_rex(dst, src, size);
emit(0x0F);
emit(0xAF);
emit_modrm(dst, src);
}
-void Assembler::imull(Register dst, const Operand& src) {
+void Assembler::emit_imul(Register dst, const Operand& src, int size) {
EnsureSpace ensure_space(this);
- emit_optional_rex_32(dst, src);
+ emit_rex(dst, src, size);
emit(0x0F);
emit(0xAF);
emit_operand(dst, src);
}
-void Assembler::imull(Register dst, Register src, Immediate imm) {
+void Assembler::emit_imul(Register dst, Register src, Immediate imm, int size) {
EnsureSpace ensure_space(this);
- emit_optional_rex_32(dst, src);
+ emit_rex(dst, src, size);
if (is_int8(imm.value_)) {
emit(0x6B);
emit_modrm(dst, src);
@@ -1089,38 +942,22 @@ void Assembler::imull(Register dst, Register src, Immediate imm) {
}
-void Assembler::incq(Register dst) {
+void Assembler::emit_inc(Register dst, int size) {
EnsureSpace ensure_space(this);
- emit_rex_64(dst);
+ emit_rex(dst, size);
emit(0xFF);
emit_modrm(0x0, dst);
}
-void Assembler::incq(const Operand& dst) {
- EnsureSpace ensure_space(this);
- emit_rex_64(dst);
- emit(0xFF);
- emit_operand(0, dst);
-}
-
-
-void Assembler::incl(const Operand& dst) {
+void Assembler::emit_inc(const Operand& dst, int size) {
EnsureSpace ensure_space(this);
- emit_optional_rex_32(dst);
+ emit_rex(dst, size);
emit(0xFF);
emit_operand(0, dst);
}
-void Assembler::incl(Register dst) {
- EnsureSpace ensure_space(this);
- emit_optional_rex_32(dst);
- emit(0xFF);
- emit_modrm(0, dst);
-}
-
-
void Assembler::int3() {
EnsureSpace ensure_space(this);
emit(0xCC);
@@ -1287,17 +1124,9 @@ void Assembler::jmp(const Operand& src) {
}
-void Assembler::lea(Register dst, const Operand& src) {
- EnsureSpace ensure_space(this);
- emit_rex_64(dst, src);
- emit(0x8D);
- emit_operand(dst, src);
-}
-
-
-void Assembler::leal(Register dst, const Operand& src) {
+void Assembler::emit_lea(Register dst, const Operand& src, int size) {
EnsureSpace ensure_space(this);
- emit_optional_rex_32(dst, src);
+ emit_rex(dst, src, size);
emit(0x8D);
emit_operand(dst, src);
}
@@ -1305,9 +1134,19 @@ void Assembler::leal(Register dst, const Operand& src) {
void Assembler::load_rax(void* value, RelocInfo::Mode mode) {
EnsureSpace ensure_space(this);
- emit(0x48); // REX.W
- emit(0xA1);
- emitp(value, mode);
+ if (kPointerSize == kInt64Size) {
+ emit(0x48); // REX.W
+ emit(0xA1);
+ emitp(value, mode);
+ } else {
+ ASSERT(kPointerSize == kInt32Size);
+ emit(0xA1);
+ emitp(value, mode);
+ // In 64-bit mode, need to zero extend the operand to 8 bytes.
+ // See 2.2.1.4 in Intel64 and IA32 Architectures Software
+ // Developer's Manual Volume 2.
+ emitl(0);
+ }
}
@@ -1448,18 +1287,11 @@ void Assembler::emit_mov(const Operand& dst, Immediate value, int size) {
}
-void Assembler::movq(Register dst, void* value, RelocInfo::Mode rmode) {
- // This method must not be used with heap object references. The stored
- // address is not GC safe. Use the handle version instead.
- ASSERT(rmode > RelocInfo::LAST_GCED_ENUM);
- if (RelocInfo::IsNone(rmode)) {
- movq(dst, reinterpret_cast<int64_t>(value));
- } else {
- EnsureSpace ensure_space(this);
- emit_rex_64(dst);
- emit(0xB8 | dst.low_bits());
- emitp(value, rmode);
- }
+void Assembler::movp(Register dst, void* value, RelocInfo::Mode rmode) {
+ EnsureSpace ensure_space(this);
+ emit_rex(dst, kPointerSize);
+ emit(0xB8 | dst.low_bits());
+ emitp(value, rmode);
}
@@ -1499,15 +1331,12 @@ void Assembler::movl(const Operand& dst, Label* src) {
}
-void Assembler::movq(Register dst, Handle<Object> value, RelocInfo::Mode mode) {
- AllowDeferredHandleDereference using_raw_address;
- ASSERT(!RelocInfo::IsNone(mode));
+void Assembler::movsxbl(Register dst, const Operand& src) {
EnsureSpace ensure_space(this);
- ASSERT(value->IsHeapObject());
- ASSERT(!isolate()->heap()->InNewSpace(*value));
- emit_rex_64(dst);
- emit(0xB8 | dst.low_bits());
- emitp(value.location(), mode);
+ emit_optional_rex_32(dst, src);
+ emit(0x0F);
+ emit(0xBE);
+ emit_operand(dst, src);
}
@@ -1520,6 +1349,15 @@ void Assembler::movsxbq(Register dst, const Operand& src) {
}
+void Assembler::movsxwl(Register dst, const Operand& src) {
+ EnsureSpace ensure_space(this);
+ emit_optional_rex_32(dst, src);
+ emit(0x0F);
+ emit(0xBF);
+ emit_operand(dst, src);
+}
+
+
void Assembler::movsxwq(Register dst, const Operand& src) {
EnsureSpace ensure_space(this);
emit_rex_64(dst, src);
@@ -1545,7 +1383,7 @@ void Assembler::movsxlq(Register dst, const Operand& src) {
}
-void Assembler::movzxbq(Register dst, const Operand& src) {
+void Assembler::emit_movzxb(Register dst, const Operand& src, int size) {
EnsureSpace ensure_space(this);
// 32 bit operations zero the top 32 bits of 64 bit registers. Therefore
// there is no need to make this a 64 bit operation.
@@ -1556,26 +1394,10 @@ void Assembler::movzxbq(Register dst, const Operand& src) {
}
-void Assembler::movzxbl(Register dst, const Operand& src) {
- EnsureSpace ensure_space(this);
- emit_optional_rex_32(dst, src);
- emit(0x0F);
- emit(0xB6);
- emit_operand(dst, src);
-}
-
-
-void Assembler::movzxwq(Register dst, const Operand& src) {
- EnsureSpace ensure_space(this);
- emit_optional_rex_32(dst, src);
- emit(0x0F);
- emit(0xB7);
- emit_operand(dst, src);
-}
-
-
-void Assembler::movzxwl(Register dst, const Operand& src) {
+void Assembler::emit_movzxw(Register dst, const Operand& src, int size) {
EnsureSpace ensure_space(this);
+ // 32 bit operations zero the top 32 bits of 64 bit registers. Therefore
+ // there is no need to make this a 64 bit operation.
emit_optional_rex_32(dst, src);
emit(0x0F);
emit(0xB7);
@@ -1583,8 +1405,10 @@ void Assembler::movzxwl(Register dst, const Operand& src) {
}
-void Assembler::movzxwl(Register dst, Register src) {
+void Assembler::emit_movzxw(Register dst, Register src, int size) {
EnsureSpace ensure_space(this);
+ // 32 bit operations zero the top 32 bits of 64 bit registers. Therefore
+ // there is no need to make this a 64 bit operation.
emit_optional_rex_32(dst, src);
emit(0x0F);
emit(0xB7);
@@ -1607,17 +1431,10 @@ void Assembler::repmovsw() {
}
-void Assembler::repmovsl() {
+void Assembler::emit_repmovs(int size) {
EnsureSpace ensure_space(this);
emit(0xF3);
- emit(0xA5);
-}
-
-
-void Assembler::repmovsq() {
- EnsureSpace ensure_space(this);
- emit(0xF3);
- emit_rex_64();
+ emit_rex(size);
emit(0xA5);
}
@@ -1630,23 +1447,15 @@ void Assembler::mul(Register src) {
}
-void Assembler::neg(Register dst) {
- EnsureSpace ensure_space(this);
- emit_rex_64(dst);
- emit(0xF7);
- emit_modrm(0x3, dst);
-}
-
-
-void Assembler::negl(Register dst) {
+void Assembler::emit_neg(Register dst, int size) {
EnsureSpace ensure_space(this);
- emit_optional_rex_32(dst);
+ emit_rex(dst, size);
emit(0xF7);
emit_modrm(0x3, dst);
}
-void Assembler::neg(const Operand& dst) {
+void Assembler::emit_neg(const Operand& dst, int size) {
EnsureSpace ensure_space(this);
emit_rex_64(dst);
emit(0xF7);
@@ -1660,30 +1469,22 @@ void Assembler::nop() {
}
-void Assembler::not_(Register dst) {
+void Assembler::emit_not(Register dst, int size) {
EnsureSpace ensure_space(this);
- emit_rex_64(dst);
+ emit_rex(dst, size);
emit(0xF7);
emit_modrm(0x2, dst);
}
-void Assembler::not_(const Operand& dst) {
+void Assembler::emit_not(const Operand& dst, int size) {
EnsureSpace ensure_space(this);
- emit_rex_64(dst);
+ emit_rex(dst, size);
emit(0xF7);
emit_operand(2, dst);
}
-void Assembler::notl(Register dst) {
- EnsureSpace ensure_space(this);
- emit_optional_rex_32(dst);
- emit(0xF7);
- emit_modrm(0x2, dst);
-}
-
-
void Assembler::Nop(int n) {
// The recommended muti-byte sequences of NOP instructions from the Intel 64
// and IA-32 Architectures Software Developer's Manual.
@@ -1761,14 +1562,14 @@ void Assembler::Nop(int n) {
}
-void Assembler::pop(Register dst) {
+void Assembler::popq(Register dst) {
EnsureSpace ensure_space(this);
emit_optional_rex_32(dst);
emit(0x58 | dst.low_bits());
}
-void Assembler::pop(const Operand& dst) {
+void Assembler::popq(const Operand& dst) {
EnsureSpace ensure_space(this);
emit_optional_rex_32(dst);
emit(0x8F);
@@ -1782,14 +1583,14 @@ void Assembler::popfq() {
}
-void Assembler::push(Register src) {
+void Assembler::pushq(Register src) {
EnsureSpace ensure_space(this);
emit_optional_rex_32(src);
emit(0x50 | src.low_bits());
}
-void Assembler::push(const Operand& src) {
+void Assembler::pushq(const Operand& src) {
EnsureSpace ensure_space(this);
emit_optional_rex_32(src);
emit(0xFF);
@@ -1797,7 +1598,7 @@ void Assembler::push(const Operand& src) {
}
-void Assembler::push(Immediate value) {
+void Assembler::pushq(Immediate value) {
EnsureSpace ensure_space(this);
if (is_int8(value.value_)) {
emit(0x6A);
@@ -1809,7 +1610,7 @@ void Assembler::push(Immediate value) {
}
-void Assembler::push_imm32(int32_t imm32) {
+void Assembler::pushq_imm32(int32_t imm32) {
EnsureSpace ensure_space(this);
emit(0x68);
emitl(imm32);
@@ -1869,50 +1670,42 @@ void Assembler::shrd(Register dst, Register src) {
}
-void Assembler::xchgq(Register dst, Register src) {
+void Assembler::emit_xchg(Register dst, Register src, int size) {
EnsureSpace ensure_space(this);
if (src.is(rax) || dst.is(rax)) { // Single-byte encoding
Register other = src.is(rax) ? dst : src;
- emit_rex_64(other);
+ emit_rex(other, size);
emit(0x90 | other.low_bits());
} else if (dst.low_bits() == 4) {
- emit_rex_64(dst, src);
+ emit_rex(dst, src, size);
emit(0x87);
emit_modrm(dst, src);
} else {
- emit_rex_64(src, dst);
+ emit_rex(src, dst, size);
emit(0x87);
emit_modrm(src, dst);
}
}
-void Assembler::xchgl(Register dst, Register src) {
+void Assembler::store_rax(void* dst, RelocInfo::Mode mode) {
EnsureSpace ensure_space(this);
- if (src.is(rax) || dst.is(rax)) { // Single-byte encoding
- Register other = src.is(rax) ? dst : src;
- emit_optional_rex_32(other);
- emit(0x90 | other.low_bits());
- } else if (dst.low_bits() == 4) {
- emit_optional_rex_32(dst, src);
- emit(0x87);
- emit_modrm(dst, src);
+ if (kPointerSize == kInt64Size) {
+ emit(0x48); // REX.W
+ emit(0xA3);
+ emitp(dst, mode);
} else {
- emit_optional_rex_32(src, dst);
- emit(0x87);
- emit_modrm(src, dst);
+ ASSERT(kPointerSize == kInt32Size);
+ emit(0xA3);
+ emitp(dst, mode);
+ // In 64-bit mode, need to zero extend the operand to 8 bytes.
+ // See 2.2.1.4 in Intel64 and IA32 Architectures Software
+ // Developer's Manual Volume 2.
+ emitl(0);
}
}
-void Assembler::store_rax(void* dst, RelocInfo::Mode mode) {
- EnsureSpace ensure_space(this);
- emit(0x48); // REX.W
- emit(0xA3);
- emitp(dst, mode);
-}
-
-
void Assembler::store_rax(ExternalReference ref) {
store_rax(ref.address(), RelocInfo::EXTERNAL_REFERENCE);
}
@@ -1976,21 +1769,21 @@ void Assembler::testb(const Operand& op, Register reg) {
}
-void Assembler::testl(Register dst, Register src) {
+void Assembler::emit_test(Register dst, Register src, int size) {
EnsureSpace ensure_space(this);
if (src.low_bits() == 4) {
- emit_optional_rex_32(src, dst);
+ emit_rex(src, dst, size);
emit(0x85);
emit_modrm(src, dst);
} else {
- emit_optional_rex_32(dst, src);
+ emit_rex(dst, src, size);
emit(0x85);
emit_modrm(dst, src);
}
}
-void Assembler::testl(Register reg, Immediate mask) {
+void Assembler::emit_test(Register reg, Immediate mask, int size) {
// testl with a mask that fits in the low byte is exactly testb.
if (is_uint8(mask.value_)) {
testb(reg, mask);
@@ -1998,10 +1791,11 @@ void Assembler::testl(Register reg, Immediate mask) {
}
EnsureSpace ensure_space(this);
if (reg.is(rax)) {
+ emit_rex(rax, size);
emit(0xA9);
emit(mask);
} else {
- emit_optional_rex_32(rax, reg);
+ emit_rex(reg, size);
emit(0xF7);
emit_modrm(0x0, reg);
emit(mask);
@@ -2009,69 +1803,28 @@ void Assembler::testl(Register reg, Immediate mask) {
}
-void Assembler::testl(const Operand& op, Immediate mask) {
+void Assembler::emit_test(const Operand& op, Immediate mask, int size) {
// testl with a mask that fits in the low byte is exactly testb.
if (is_uint8(mask.value_)) {
testb(op, mask);
return;
}
EnsureSpace ensure_space(this);
- emit_optional_rex_32(rax, op);
+ emit_rex(rax, op, size);
emit(0xF7);
emit_operand(rax, op); // Operation code 0
emit(mask);
}
-void Assembler::testl(const Operand& op, Register reg) {
+void Assembler::emit_test(const Operand& op, Register reg, int size) {
EnsureSpace ensure_space(this);
- emit_optional_rex_32(reg, op);
+ emit_rex(reg, op, size);
emit(0x85);
emit_operand(reg, op);
}
-void Assembler::testq(const Operand& op, Register reg) {
- EnsureSpace ensure_space(this);
- emit_rex_64(reg, op);
- emit(0x85);
- emit_operand(reg, op);
-}
-
-
-void Assembler::testq(Register dst, Register src) {
- EnsureSpace ensure_space(this);
- if (src.low_bits() == 4) {
- emit_rex_64(src, dst);
- emit(0x85);
- emit_modrm(src, dst);
- } else {
- emit_rex_64(dst, src);
- emit(0x85);
- emit_modrm(dst, src);
- }
-}
-
-
-void Assembler::testq(Register dst, Immediate mask) {
- if (is_uint8(mask.value_)) {
- testb(dst, mask);
- return;
- }
- EnsureSpace ensure_space(this);
- if (dst.is(rax)) {
- emit_rex_64();
- emit(0xA9);
- emit(mask);
- } else {
- emit_rex_64(dst);
- emit(0xF7);
- emit_modrm(0, dst);
- emit(mask);
- }
-}
-
-
// FPU instructions.
@@ -2435,6 +2188,7 @@ void Assembler::fnclex() {
void Assembler::sahf() {
// TODO(X64): Test for presence. Not all 64-bit intel CPU's have sahf
// in 64-bit mode. Test CpuID.
+ ASSERT(IsEnabled(SAHF));
EnsureSpace ensure_space(this);
emit(0x9E);
}
@@ -2788,6 +2542,16 @@ void Assembler::movss(const Operand& src, XMMRegister dst) {
}
+void Assembler::psllq(XMMRegister reg, byte imm8) {
+ EnsureSpace ensure_space(this);
+ emit(0x66);
+ emit(0x0F);
+ emit(0x73);
+ emit_sse_operand(rsi, reg); // rsi == 6
+ emit(imm8);
+}
+
+
void Assembler::cvttss2si(Register dst, const Operand& src) {
EnsureSpace ensure_space(this);
emit(0xF3);
@@ -3028,6 +2792,16 @@ void Assembler::sqrtsd(XMMRegister dst, XMMRegister src) {
}
+void Assembler::sqrtsd(XMMRegister dst, const Operand& src) {
+ EnsureSpace ensure_space(this);
+ emit(0xF2);
+ emit_optional_rex_32(dst, src);
+ emit(0x0F);
+ emit(0x51);
+ emit_sse_operand(dst, src);
+}
+
+
void Assembler::ucomisd(XMMRegister dst, XMMRegister src) {
EnsureSpace ensure_space(this);
emit(0x66);
@@ -3130,16 +2904,10 @@ void Assembler::dd(uint32_t data) {
void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
ASSERT(!RelocInfo::IsNone(rmode));
- if (rmode == RelocInfo::EXTERNAL_REFERENCE) {
- // Don't record external references unless the heap will be serialized.
-#ifdef DEBUG
- if (!Serializer::enabled()) {
- Serializer::TooLateToEnableNow();
- }
-#endif
- if (!Serializer::enabled() && !emit_debug_code()) {
- return;
- }
+ // Don't record external references unless the heap will be serialized.
+ if (rmode == RelocInfo::EXTERNAL_REFERENCE &&
+ !serializer_enabled() && !emit_debug_code()) {
+ return;
} else if (rmode == RelocInfo::CODE_AGE_SEQUENCE) {
// Don't record psuedo relocation info for code age sequence mode.
return;
@@ -3171,6 +2939,20 @@ void Assembler::RecordComment(const char* msg, bool force) {
}
+Handle<ConstantPoolArray> Assembler::NewConstantPool(Isolate* isolate) {
+ // No out-of-line constant pool support.
+ ASSERT(!FLAG_enable_ool_constant_pool);
+ return isolate->factory()->empty_constant_pool_array();
+}
+
+
+void Assembler::PopulateConstantPool(ConstantPoolArray* constant_pool) {
+ // No out-of-line constant pool support.
+ ASSERT(!FLAG_enable_ool_constant_pool);
+ return;
+}
+
+
const int RelocInfo::kApplyMask = RelocInfo::kCodeTargetMask |
1 << RelocInfo::RUNTIME_ENTRY |
1 << RelocInfo::INTERNAL_REFERENCE |
@@ -3184,6 +2966,12 @@ bool RelocInfo::IsCodedSpecially() {
return (1 << rmode_) & kApplyMask;
}
+
+bool RelocInfo::IsInConstantPool() {
+ return false;
+}
+
+
} } // namespace v8::internal
#endif // V8_TARGET_ARCH_X64
diff --git a/chromium/v8/src/x64/assembler-x64.h b/chromium/v8/src/x64/assembler-x64.h
index 1f1316fa829..4259e9b50ef 100644
--- a/chromium/v8/src/x64/assembler-x64.h
+++ b/chromium/v8/src/x64/assembler-x64.h
@@ -37,34 +37,13 @@
#ifndef V8_X64_ASSEMBLER_X64_H_
#define V8_X64_ASSEMBLER_X64_H_
-#include "serialize.h"
+#include "src/serialize.h"
namespace v8 {
namespace internal {
// Utility functions
-// Test whether a 64-bit value is in a specific range.
-inline bool is_uint32(int64_t x) {
- static const uint64_t kMaxUInt32 = V8_UINT64_C(0xffffffff);
- return static_cast<uint64_t>(x) <= kMaxUInt32;
-}
-
-inline bool is_int32(int64_t x) {
- static const int64_t kMinInt32 = -V8_INT64_C(0x80000000);
- return is_uint32(x - kMinInt32);
-}
-
-inline bool uint_is_int32(uint64_t x) {
- static const uint64_t kMaxInt32 = V8_UINT64_C(0x7fffffff);
- return x <= kMaxInt32;
-}
-
-inline bool is_uint32(uint64_t x) {
- static const uint64_t kMaxUInt32 = V8_UINT64_C(0xffffffff);
- return x <= kMaxUInt32;
-}
-
// CPU Registers.
//
// 1) We would prefer to use an enum, but enum values are assignment-
@@ -347,8 +326,8 @@ inline Condition NegateCondition(Condition cc) {
}
-// Corresponds to transposing the operands of a comparison.
-inline Condition ReverseCondition(Condition cc) {
+// Commute a condition such that {a cond b == b cond' a}.
+inline Condition CommuteCondition(Condition cc) {
switch (cc) {
case below:
return above;
@@ -368,7 +347,7 @@ inline Condition ReverseCondition(Condition cc) {
return greater_equal;
default:
return cc;
- };
+ }
}
@@ -378,6 +357,10 @@ inline Condition ReverseCondition(Condition cc) {
class Immediate BASE_EMBEDDED {
public:
explicit Immediate(int32_t value) : value_(value) {}
+ explicit Immediate(Smi* value) {
+ ASSERT(SmiValuesAre31Bits()); // Only available for 31-bit SMI.
+ value_ = static_cast<int32_t>(reinterpret_cast<intptr_t>(value));
+ }
private:
int32_t value_;
@@ -395,7 +378,7 @@ enum ScaleFactor {
times_4 = 2,
times_8 = 3,
times_int_size = times_4,
- times_pointer_size = times_8
+ times_pointer_size = (kPointerSize == 8) ? times_8 : times_4
};
@@ -454,84 +437,39 @@ class Operand BASE_EMBEDDED {
};
-// CpuFeatures keeps track of which features are supported by the target CPU.
-// Supported features must be enabled by a CpuFeatureScope before use.
-// Example:
-// if (assembler->IsSupported(SSE3)) {
-// CpuFeatureScope fscope(assembler, SSE3);
-// // Generate SSE3 floating point code.
-// } else {
-// // Generate standard SSE2 floating point code.
-// }
-class CpuFeatures : public AllStatic {
- public:
- // Detect features of the target CPU. Set safe defaults if the serializer
- // is enabled (snapshots must be portable).
- static void Probe();
-
- // Check whether a feature is supported by the target CPU.
- static bool IsSupported(CpuFeature f) {
- if (Check(f, cross_compile_)) return true;
- ASSERT(initialized_);
- if (f == SSE3 && !FLAG_enable_sse3) return false;
- if (f == SSE4_1 && !FLAG_enable_sse4_1) return false;
- if (f == CMOV && !FLAG_enable_cmov) return false;
- if (f == SAHF && !FLAG_enable_sahf) return false;
- return Check(f, supported_);
- }
-
- static bool IsFoundByRuntimeProbingOnly(CpuFeature f) {
- ASSERT(initialized_);
- return Check(f, found_by_runtime_probing_only_);
- }
-
- static bool IsSafeForSnapshot(CpuFeature f) {
- return Check(f, cross_compile_) ||
- (IsSupported(f) &&
- (!Serializer::enabled() || !IsFoundByRuntimeProbingOnly(f)));
- }
-
- static bool VerifyCrossCompiling() {
- return cross_compile_ == 0;
- }
-
- static bool VerifyCrossCompiling(CpuFeature f) {
- uint64_t mask = flag2set(f);
- return cross_compile_ == 0 ||
- (cross_compile_ & mask) == mask;
- }
-
- private:
- static bool Check(CpuFeature f, uint64_t set) {
- return (set & flag2set(f)) != 0;
- }
-
- static uint64_t flag2set(CpuFeature f) {
- return static_cast<uint64_t>(1) << f;
- }
-
- // Safe defaults include CMOV for X64. It is always available, if
- // anyone checks, but they shouldn't need to check.
- // The required user mode extensions in X64 are (from AMD64 ABI Table A.1):
- // fpu, tsc, cx8, cmov, mmx, sse, sse2, fxsr, syscall
- static const uint64_t kDefaultCpuFeatures = (1 << CMOV);
-
-#ifdef DEBUG
- static bool initialized_;
-#endif
- static uint64_t supported_;
- static uint64_t found_by_runtime_probing_only_;
-
- static uint64_t cross_compile_;
-
- friend class ExternalReference;
- friend class PlatformFeatureScope;
- DISALLOW_COPY_AND_ASSIGN(CpuFeatures);
-};
-
-
-#define ASSEMBLER_INSTRUCTION_LIST(V) \
- V(mov)
+#define ASSEMBLER_INSTRUCTION_LIST(V) \
+ V(add) \
+ V(and) \
+ V(cmp) \
+ V(dec) \
+ V(idiv) \
+ V(imul) \
+ V(inc) \
+ V(lea) \
+ V(mov) \
+ V(movzxb) \
+ V(movzxw) \
+ V(neg) \
+ V(not) \
+ V(or) \
+ V(repmovs) \
+ V(sbb) \
+ V(sub) \
+ V(test) \
+ V(xchg) \
+ V(xor)
+
+
+// Shift instructions on operands/registers with kPointerSize, kInt32Size and
+// kInt64Size.
+#define SHIFT_INSTRUCTION_LIST(V) \
+ V(rol, 0x0) \
+ V(ror, 0x1) \
+ V(rcl, 0x2) \
+ V(rcr, 0x3) \
+ V(shl, 0x4) \
+ V(shr, 0x5) \
+ V(sar, 0x7) \
class Assembler : public AssemblerBase {
@@ -576,8 +514,25 @@ class Assembler : public AssemblerBase {
// the absolute address of the target.
// These functions convert between absolute Addresses of Code objects and
// the relative displacements stored in the code.
- static inline Address target_address_at(Address pc);
- static inline void set_target_address_at(Address pc, Address target);
+ static inline Address target_address_at(Address pc,
+ ConstantPoolArray* constant_pool);
+ static inline void set_target_address_at(Address pc,
+ ConstantPoolArray* constant_pool,
+ Address target,
+ ICacheFlushMode icache_flush_mode =
+ FLUSH_ICACHE_IF_NEEDED) ;
+ static inline Address target_address_at(Address pc, Code* code) {
+ ConstantPoolArray* constant_pool = code ? code->constant_pool() : NULL;
+ return target_address_at(pc, constant_pool);
+ }
+ static inline void set_target_address_at(Address pc,
+ Code* code,
+ Address target,
+ ICacheFlushMode icache_flush_mode =
+ FLUSH_ICACHE_IF_NEEDED) {
+ ConstantPoolArray* constant_pool = code ? code->constant_pool() : NULL;
+ set_target_address_at(pc, constant_pool, target, icache_flush_mode);
+ }
// Return the code target address at a call site from the return address
// of that call in the instruction stream.
@@ -586,8 +541,17 @@ class Assembler : public AssemblerBase {
// This sets the branch destination (which is in the instruction on x64).
// This is for calls and branches within generated code.
inline static void deserialization_set_special_target_at(
- Address instruction_payload, Address target) {
- set_target_address_at(instruction_payload, target);
+ Address instruction_payload, Code* code, Address target) {
+ set_target_address_at(instruction_payload, code, target);
+ }
+
+ static inline RelocInfo::Mode RelocInfoNone() {
+ if (kPointerSize == kInt64Size) {
+ return RelocInfo::NONE64;
+ } else {
+ ASSERT(kPointerSize == kInt32Size);
+ return RelocInfo::NONE32;
+ }
}
inline Handle<Object> code_target_object_handle_at(Address pc);
@@ -658,11 +622,26 @@ class Assembler : public AssemblerBase {
// - Instructions on 16-bit (word) operands/registers have a trailing 'w'.
// - Instructions on 32-bit (doubleword) operands/registers use 'l'.
// - Instructions on 64-bit (quadword) operands/registers use 'q'.
- //
- // Some mnemonics, such as "and", are the same as C++ keywords.
- // Naming conflicts with C++ keywords are resolved by adding a trailing '_'.
+ // - Instructions on operands/registers with pointer size use 'p'.
+
+ STATIC_ASSERT(kPointerSize == kInt64Size || kPointerSize == kInt32Size);
#define DECLARE_INSTRUCTION(instruction) \
+ template<class P1> \
+ void instruction##p(P1 p1) { \
+ emit_##instruction(p1, kPointerSize); \
+ } \
+ \
+ template<class P1> \
+ void instruction##l(P1 p1) { \
+ emit_##instruction(p1, kInt32Size); \
+ } \
+ \
+ template<class P1> \
+ void instruction##q(P1 p1) { \
+ emit_##instruction(p1, kInt64Size); \
+ } \
+ \
template<class P1, class P2> \
void instruction##p(P1 p1, P2 p2) { \
emit_##instruction(p1, p2, kPointerSize); \
@@ -676,6 +655,21 @@ class Assembler : public AssemblerBase {
template<class P1, class P2> \
void instruction##q(P1 p1, P2 p2) { \
emit_##instruction(p1, p2, kInt64Size); \
+ } \
+ \
+ template<class P1, class P2, class P3> \
+ void instruction##p(P1 p1, P2 p2, P3 p3) { \
+ emit_##instruction(p1, p2, p3, kPointerSize); \
+ } \
+ \
+ template<class P1, class P2, class P3> \
+ void instruction##l(P1 p1, P2 p2, P3 p3) { \
+ emit_##instruction(p1, p2, p3, kInt32Size); \
+ } \
+ \
+ template<class P1, class P2, class P3> \
+ void instruction##q(P1 p1, P2 p2, P3 p3) { \
+ emit_##instruction(p1, p2, p3, kInt64Size); \
}
ASSEMBLER_INSTRUCTION_LIST(DECLARE_INSTRUCTION)
#undef DECLARE_INSTRUCTION
@@ -692,15 +686,15 @@ class Assembler : public AssemblerBase {
void pushfq();
void popfq();
- void push(Immediate value);
+ void pushq(Immediate value);
// Push a 32 bit integer, and guarantee that it is actually pushed as a
// 32 bit value, the normal push will optimize the 8 bit case.
- void push_imm32(int32_t imm32);
- void push(Register src);
- void push(const Operand& src);
+ void pushq_imm32(int32_t imm32);
+ void pushq(Register src);
+ void pushq(const Operand& src);
- void pop(Register dst);
- void pop(const Operand& dst);
+ void popq(Register dst);
+ void popq(const Operand& dst);
void enter(Immediate size);
void leave();
@@ -722,28 +716,26 @@ class Assembler : public AssemblerBase {
void movl(const Operand& dst, Label* src);
// Loads a pointer into a register with a relocation mode.
- void movq(Register dst, void* ptr, RelocInfo::Mode rmode);
+ void movp(Register dst, void* ptr, RelocInfo::Mode rmode);
+
// Loads a 64-bit immediate into a register.
void movq(Register dst, int64_t value);
void movq(Register dst, uint64_t value);
- void movq(Register dst, Handle<Object> handle, RelocInfo::Mode rmode);
+ void movsxbl(Register dst, const Operand& src);
void movsxbq(Register dst, const Operand& src);
+ void movsxwl(Register dst, const Operand& src);
void movsxwq(Register dst, const Operand& src);
void movsxlq(Register dst, Register src);
void movsxlq(Register dst, const Operand& src);
- void movzxbq(Register dst, const Operand& src);
- void movzxbl(Register dst, const Operand& src);
- void movzxwq(Register dst, const Operand& src);
- void movzxwl(Register dst, const Operand& src);
- void movzxwl(Register dst, Register src);
// Repeated moves.
void repmovsb();
void repmovsw();
- void repmovsl();
- void repmovsq();
+ void repmovsp() { emit_repmovs(kPointerSize); }
+ void repmovsl() { emit_repmovs(kInt32Size); }
+ void repmovsq() { emit_repmovs(kInt64Size); }
// Instruction to load from an immediate 64-bit pointer into RAX.
void load_rax(void* ptr, RelocInfo::Mode rmode);
@@ -755,59 +747,6 @@ class Assembler : public AssemblerBase {
void cmovl(Condition cc, Register dst, Register src);
void cmovl(Condition cc, Register dst, const Operand& src);
- // Exchange two registers
- void xchgq(Register dst, Register src);
- void xchgl(Register dst, Register src);
-
- // Arithmetics
- void addl(Register dst, Register src) {
- arithmetic_op_32(0x03, dst, src);
- }
-
- void addl(Register dst, Immediate src) {
- immediate_arithmetic_op_32(0x0, dst, src);
- }
-
- void addl(Register dst, const Operand& src) {
- arithmetic_op_32(0x03, dst, src);
- }
-
- void addl(const Operand& dst, Immediate src) {
- immediate_arithmetic_op_32(0x0, dst, src);
- }
-
- void addl(const Operand& dst, Register src) {
- arithmetic_op_32(0x01, src, dst);
- }
-
- void addq(Register dst, Register src) {
- arithmetic_op(0x03, dst, src);
- }
-
- void addq(Register dst, const Operand& src) {
- arithmetic_op(0x03, dst, src);
- }
-
- void addq(const Operand& dst, Register src) {
- arithmetic_op(0x01, src, dst);
- }
-
- void addq(Register dst, Immediate src) {
- immediate_arithmetic_op(0x0, dst, src);
- }
-
- void addq(const Operand& dst, Immediate src) {
- immediate_arithmetic_op(0x0, dst, src);
- }
-
- void sbbl(Register dst, Register src) {
- arithmetic_op_32(0x1b, dst, src);
- }
-
- void sbbq(Register dst, Register src) {
- arithmetic_op(0x1b, dst, src);
- }
-
void cmpb(Register dst, Immediate src) {
immediate_arithmetic_op_8(0x7, dst, src);
}
@@ -815,15 +754,15 @@ class Assembler : public AssemblerBase {
void cmpb_al(Immediate src);
void cmpb(Register dst, Register src) {
- arithmetic_op(0x3A, dst, src);
+ arithmetic_op_8(0x3A, dst, src);
}
void cmpb(Register dst, const Operand& src) {
- arithmetic_op(0x3A, dst, src);
+ arithmetic_op_8(0x3A, dst, src);
}
void cmpb(const Operand& dst, Register src) {
- arithmetic_op(0x38, src, dst);
+ arithmetic_op_8(0x38, src, dst);
}
void cmpb(const Operand& dst, Immediate src) {
@@ -850,86 +789,10 @@ class Assembler : public AssemblerBase {
arithmetic_op_16(0x39, src, dst);
}
- void cmpl(Register dst, Register src) {
- arithmetic_op_32(0x3B, dst, src);
- }
-
- void cmpl(Register dst, const Operand& src) {
- arithmetic_op_32(0x3B, dst, src);
- }
-
- void cmpl(const Operand& dst, Register src) {
- arithmetic_op_32(0x39, src, dst);
- }
-
- void cmpl(Register dst, Immediate src) {
- immediate_arithmetic_op_32(0x7, dst, src);
- }
-
- void cmpl(const Operand& dst, Immediate src) {
- immediate_arithmetic_op_32(0x7, dst, src);
- }
-
- void cmpq(Register dst, Register src) {
- arithmetic_op(0x3B, dst, src);
- }
-
- void cmpq(Register dst, const Operand& src) {
- arithmetic_op(0x3B, dst, src);
- }
-
- void cmpq(const Operand& dst, Register src) {
- arithmetic_op(0x39, src, dst);
- }
-
- void cmpq(Register dst, Immediate src) {
- immediate_arithmetic_op(0x7, dst, src);
- }
-
- void cmpq(const Operand& dst, Immediate src) {
- immediate_arithmetic_op(0x7, dst, src);
- }
-
- void and_(Register dst, Register src) {
- arithmetic_op(0x23, dst, src);
- }
-
- void and_(Register dst, const Operand& src) {
- arithmetic_op(0x23, dst, src);
- }
-
- void and_(const Operand& dst, Register src) {
- arithmetic_op(0x21, src, dst);
- }
-
- void and_(Register dst, Immediate src) {
- immediate_arithmetic_op(0x4, dst, src);
- }
-
- void and_(const Operand& dst, Immediate src) {
- immediate_arithmetic_op(0x4, dst, src);
- }
-
- void andl(Register dst, Immediate src) {
- immediate_arithmetic_op_32(0x4, dst, src);
- }
-
- void andl(Register dst, Register src) {
- arithmetic_op_32(0x23, dst, src);
- }
-
- void andl(Register dst, const Operand& src) {
- arithmetic_op_32(0x23, dst, src);
- }
-
void andb(Register dst, Immediate src) {
immediate_arithmetic_op_8(0x4, dst, src);
}
- void decq(Register dst);
- void decq(const Operand& dst);
- void decl(Register dst);
- void decl(const Operand& dst);
void decb(Register dst);
void decb(const Operand& dst);
@@ -938,108 +801,35 @@ class Assembler : public AssemblerBase {
// Sign-extends eax into edx:eax.
void cdq();
- // Divide rdx:rax by src. Quotient in rax, remainder in rdx.
- void idivq(Register src);
- // Divide edx:eax by lower 32 bits of src. Quotient in eax, rem. in edx.
- void idivl(Register src);
-
- // Signed multiply instructions.
- void imul(Register src); // rdx:rax = rax * src.
- void imul(Register dst, Register src); // dst = dst * src.
- void imul(Register dst, const Operand& src); // dst = dst * src.
- void imul(Register dst, Register src, Immediate imm); // dst = src * imm.
- // Signed 32-bit multiply instructions.
- void imull(Register dst, Register src); // dst = dst * src.
- void imull(Register dst, const Operand& src); // dst = dst * src.
- void imull(Register dst, Register src, Immediate imm); // dst = src * imm.
-
- void incq(Register dst);
- void incq(const Operand& dst);
- void incl(Register dst);
- void incl(const Operand& dst);
-
- void lea(Register dst, const Operand& src);
- void leal(Register dst, const Operand& src);
-
// Multiply rax by src, put the result in rdx:rax.
void mul(Register src);
- void neg(Register dst);
- void neg(const Operand& dst);
- void negl(Register dst);
-
- void not_(Register dst);
- void not_(const Operand& dst);
- void notl(Register dst);
-
- void or_(Register dst, Register src) {
- arithmetic_op(0x0B, dst, src);
- }
-
- void orl(Register dst, Register src) {
- arithmetic_op_32(0x0B, dst, src);
- }
-
- void or_(Register dst, const Operand& src) {
- arithmetic_op(0x0B, dst, src);
- }
-
- void orl(Register dst, const Operand& src) {
- arithmetic_op_32(0x0B, dst, src);
- }
-
- void or_(const Operand& dst, Register src) {
- arithmetic_op(0x09, src, dst);
- }
-
- void orl(const Operand& dst, Register src) {
- arithmetic_op_32(0x09, src, dst);
- }
-
- void or_(Register dst, Immediate src) {
- immediate_arithmetic_op(0x1, dst, src);
- }
-
- void orl(Register dst, Immediate src) {
- immediate_arithmetic_op_32(0x1, dst, src);
- }
-
- void or_(const Operand& dst, Immediate src) {
- immediate_arithmetic_op(0x1, dst, src);
- }
-
- void orl(const Operand& dst, Immediate src) {
- immediate_arithmetic_op_32(0x1, dst, src);
- }
-
-
- void rcl(Register dst, Immediate imm8) {
- shift(dst, imm8, 0x2);
- }
-
- void rol(Register dst, Immediate imm8) {
- shift(dst, imm8, 0x0);
- }
-
- void roll(Register dst, Immediate imm8) {
- shift_32(dst, imm8, 0x0);
- }
-
- void rcr(Register dst, Immediate imm8) {
- shift(dst, imm8, 0x3);
- }
-
- void ror(Register dst, Immediate imm8) {
- shift(dst, imm8, 0x1);
- }
-
- void rorl(Register dst, Immediate imm8) {
- shift_32(dst, imm8, 0x1);
- }
-
- void rorl_cl(Register dst) {
- shift_32(dst, 0x1);
- }
+#define DECLARE_SHIFT_INSTRUCTION(instruction, subcode) \
+ void instruction##p(Register dst, Immediate imm8) { \
+ shift(dst, imm8, subcode, kPointerSize); \
+ } \
+ \
+ void instruction##l(Register dst, Immediate imm8) { \
+ shift(dst, imm8, subcode, kInt32Size); \
+ } \
+ \
+ void instruction##q(Register dst, Immediate imm8) { \
+ shift(dst, imm8, subcode, kInt64Size); \
+ } \
+ \
+ void instruction##p_cl(Register dst) { \
+ shift(dst, subcode, kPointerSize); \
+ } \
+ \
+ void instruction##l_cl(Register dst) { \
+ shift(dst, subcode, kInt32Size); \
+ } \
+ \
+ void instruction##q_cl(Register dst) { \
+ shift(dst, subcode, kInt64Size); \
+ }
+ SHIFT_INSTRUCTION_LIST(DECLARE_SHIFT_INSTRUCTION)
+#undef DECLARE_SHIFT_INSTRUCTION
// Shifts dst:src left by cl bits, affecting only dst.
void shld(Register dst, Register src);
@@ -1047,103 +837,9 @@ class Assembler : public AssemblerBase {
// Shifts src:dst right by cl bits, affecting only dst.
void shrd(Register dst, Register src);
- // Shifts dst right, duplicating sign bit, by shift_amount bits.
- // Shifting by 1 is handled efficiently.
- void sar(Register dst, Immediate shift_amount) {
- shift(dst, shift_amount, 0x7);
- }
-
- // Shifts dst right, duplicating sign bit, by shift_amount bits.
- // Shifting by 1 is handled efficiently.
- void sarl(Register dst, Immediate shift_amount) {
- shift_32(dst, shift_amount, 0x7);
- }
-
- // Shifts dst right, duplicating sign bit, by cl % 64 bits.
- void sar_cl(Register dst) {
- shift(dst, 0x7);
- }
-
- // Shifts dst right, duplicating sign bit, by cl % 64 bits.
- void sarl_cl(Register dst) {
- shift_32(dst, 0x7);
- }
-
- void shl(Register dst, Immediate shift_amount) {
- shift(dst, shift_amount, 0x4);
- }
-
- void shl_cl(Register dst) {
- shift(dst, 0x4);
- }
-
- void shll_cl(Register dst) {
- shift_32(dst, 0x4);
- }
-
- void shll(Register dst, Immediate shift_amount) {
- shift_32(dst, shift_amount, 0x4);
- }
-
- void shr(Register dst, Immediate shift_amount) {
- shift(dst, shift_amount, 0x5);
- }
-
- void shr_cl(Register dst) {
- shift(dst, 0x5);
- }
-
- void shrl_cl(Register dst) {
- shift_32(dst, 0x5);
- }
-
- void shrl(Register dst, Immediate shift_amount) {
- shift_32(dst, shift_amount, 0x5);
- }
-
void store_rax(void* dst, RelocInfo::Mode mode);
void store_rax(ExternalReference ref);
- void subq(Register dst, Register src) {
- arithmetic_op(0x2B, dst, src);
- }
-
- void subq(Register dst, const Operand& src) {
- arithmetic_op(0x2B, dst, src);
- }
-
- void subq(const Operand& dst, Register src) {
- arithmetic_op(0x29, src, dst);
- }
-
- void subq(Register dst, Immediate src) {
- immediate_arithmetic_op(0x5, dst, src);
- }
-
- void subq(const Operand& dst, Immediate src) {
- immediate_arithmetic_op(0x5, dst, src);
- }
-
- void subl(Register dst, Register src) {
- arithmetic_op_32(0x2B, dst, src);
- }
-
- void subl(Register dst, const Operand& src) {
- arithmetic_op_32(0x2B, dst, src);
- }
-
- void subl(const Operand& dst, Register src) {
- arithmetic_op_32(0x29, src, dst);
- }
-
- void subl(const Operand& dst, Immediate src) {
- immediate_arithmetic_op_32(0x5, dst, src);
- }
-
- void subl(Register dst, Immediate src) {
- immediate_arithmetic_op_32(0x5, dst, src);
- }
-
void subb(Register dst, Immediate src) {
immediate_arithmetic_op_8(0x5, dst, src);
}
@@ -1152,61 +848,11 @@ class Assembler : public AssemblerBase {
void testb(Register reg, Immediate mask);
void testb(const Operand& op, Immediate mask);
void testb(const Operand& op, Register reg);
- void testl(Register dst, Register src);
- void testl(Register reg, Immediate mask);
- void testl(const Operand& op, Register reg);
- void testl(const Operand& op, Immediate mask);
- void testq(const Operand& op, Register reg);
- void testq(Register dst, Register src);
- void testq(Register dst, Immediate mask);
-
- void xor_(Register dst, Register src) {
- if (dst.code() == src.code()) {
- arithmetic_op_32(0x33, dst, src);
- } else {
- arithmetic_op(0x33, dst, src);
- }
- }
-
- void xorl(Register dst, Register src) {
- arithmetic_op_32(0x33, dst, src);
- }
-
- void xorl(Register dst, const Operand& src) {
- arithmetic_op_32(0x33, dst, src);
- }
-
- void xorl(Register dst, Immediate src) {
- immediate_arithmetic_op_32(0x6, dst, src);
- }
-
- void xorl(const Operand& dst, Register src) {
- arithmetic_op_32(0x31, src, dst);
- }
-
- void xorl(const Operand& dst, Immediate src) {
- immediate_arithmetic_op_32(0x6, dst, src);
- }
-
- void xor_(Register dst, const Operand& src) {
- arithmetic_op(0x33, dst, src);
- }
-
- void xor_(const Operand& dst, Register src) {
- arithmetic_op(0x31, src, dst);
- }
-
- void xor_(Register dst, Immediate src) {
- immediate_arithmetic_op(0x6, dst, src);
- }
-
- void xor_(const Operand& dst, Immediate src) {
- immediate_arithmetic_op(0x6, dst, src);
- }
// Bit operations.
void bt(const Operand& dst, Register src);
void bts(const Operand& dst, Register src);
+ void bsrl(Register dst, Register src);
// Miscellaneous
void clc();
@@ -1252,9 +898,6 @@ class Assembler : public AssemblerBase {
// Call near absolute indirect, address in register
void call(Register adr);
- // Call near indirect
- void call(const Operand& operand);
-
// Jumps
// Jump short or near relative.
// Use a 32-bit signed displacement.
@@ -1266,9 +909,6 @@ class Assembler : public AssemblerBase {
// Jump near absolute indirect (r64)
void jmp(Register adr);
- // Jump near absolute indirect (m64)
- void jmp(const Operand& src);
-
// Conditional jumps
void j(Condition cc,
Label* L,
@@ -1399,6 +1039,8 @@ class Assembler : public AssemblerBase {
void movapd(XMMRegister dst, XMMRegister src);
+ void psllq(XMMRegister reg, byte imm8);
+
void cvttsd2si(Register dst, const Operand& src);
void cvttsd2si(Register dst, XMMRegister src);
void cvttsd2siq(Register dst, XMMRegister src);
@@ -1427,6 +1069,7 @@ class Assembler : public AssemblerBase {
void orpd(XMMRegister dst, XMMRegister src);
void xorpd(XMMRegister dst, XMMRegister src);
void sqrtsd(XMMRegister dst, XMMRegister src);
+ void sqrtsd(XMMRegister dst, const Operand& src);
void ucomisd(XMMRegister dst, XMMRegister src);
void ucomisd(XMMRegister dst, const Operand& src);
@@ -1464,6 +1107,12 @@ class Assembler : public AssemblerBase {
// Use --code-comments to enable.
void RecordComment(const char* msg, bool force = false);
+ // Allocate a constant pool of the correct size for the generated code.
+ Handle<ConstantPoolArray> NewConstantPool(Isolate* isolate);
+
+ // Generate the constant pool for the generated code.
+ void PopulateConstantPool(ConstantPoolArray* constant_pool);
+
// Writes a single word of data in the code stream.
// Used for inline tables, e.g., jump-tables.
void db(uint8_t data);
@@ -1491,6 +1140,13 @@ class Assembler : public AssemblerBase {
byte byte_at(int pos) { return buffer_[pos]; }
void set_byte_at(int pos, byte value) { buffer_[pos] = value; }
+ protected:
+ // Call near indirect
+ void call(const Operand& operand);
+
+ // Jump near absolute indirect (m64)
+ void jmp(const Operand& src);
+
private:
byte* addr_at(int pos) { return buffer_ + pos; }
uint32_t long_at(int pos) {
@@ -1597,6 +1253,14 @@ class Assembler : public AssemblerBase {
// numbers have a high bit set.
inline void emit_optional_rex_32(const Operand& op);
+ void emit_rex(int size) {
+ if (size == kInt64Size) {
+ emit_rex_64();
+ } else {
+ ASSERT(size == kInt32Size);
+ }
+ }
+
template<class P1>
void emit_rex(P1 p1, int size) {
if (size == kInt64Size) {
@@ -1655,14 +1319,16 @@ class Assembler : public AssemblerBase {
// AND, OR, XOR, or CMP. The encodings of these operations are all
// similar, differing just in the opcode or in the reg field of the
// ModR/M byte.
+ void arithmetic_op_8(byte opcode, Register reg, Register rm_reg);
+ void arithmetic_op_8(byte opcode, Register reg, const Operand& rm_reg);
void arithmetic_op_16(byte opcode, Register reg, Register rm_reg);
void arithmetic_op_16(byte opcode, Register reg, const Operand& rm_reg);
- void arithmetic_op_32(byte opcode, Register reg, Register rm_reg);
- void arithmetic_op_32(byte opcode, Register reg, const Operand& rm_reg);
- void arithmetic_op(byte opcode, Register reg, Register rm_reg);
- void arithmetic_op(byte opcode, Register reg, const Operand& rm_reg);
- void immediate_arithmetic_op(byte subcode, Register dst, Immediate src);
- void immediate_arithmetic_op(byte subcode, const Operand& dst, Immediate src);
+ // Operate on operands/registers with pointer size, 32-bit or 64-bit size.
+ void arithmetic_op(byte opcode, Register reg, Register rm_reg, int size);
+ void arithmetic_op(byte opcode,
+ Register reg,
+ const Operand& rm_reg,
+ int size);
// Operate on a byte in memory or register.
void immediate_arithmetic_op_8(byte subcode,
Register dst,
@@ -1677,20 +1343,20 @@ class Assembler : public AssemblerBase {
void immediate_arithmetic_op_16(byte subcode,
const Operand& dst,
Immediate src);
- // Operate on a 32-bit word in memory or register.
- void immediate_arithmetic_op_32(byte subcode,
- Register dst,
- Immediate src);
- void immediate_arithmetic_op_32(byte subcode,
- const Operand& dst,
- Immediate src);
+ // Operate on operands/registers with pointer size, 32-bit or 64-bit size.
+ void immediate_arithmetic_op(byte subcode,
+ Register dst,
+ Immediate src,
+ int size);
+ void immediate_arithmetic_op(byte subcode,
+ const Operand& dst,
+ Immediate src,
+ int size);
// Emit machine code for a shift operation.
- void shift(Register dst, Immediate shift_amount, int subcode);
- void shift_32(Register dst, Immediate shift_amount, int subcode);
+ void shift(Register dst, Immediate shift_amount, int subcode, int size);
// Shift dst by cl % 64 bits.
- void shift(Register dst, int subcode);
- void shift_32(Register dst, int subcode);
+ void shift(Register dst, int subcode, int size);
void emit_farith(int b1, int b2, int i);
@@ -1701,12 +1367,183 @@ class Assembler : public AssemblerBase {
// record reloc info for current pc_
void RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data = 0);
+ // Arithmetics
+ void emit_add(Register dst, Register src, int size) {
+ arithmetic_op(0x03, dst, src, size);
+ }
+
+ void emit_add(Register dst, Immediate src, int size) {
+ immediate_arithmetic_op(0x0, dst, src, size);
+ }
+
+ void emit_add(Register dst, const Operand& src, int size) {
+ arithmetic_op(0x03, dst, src, size);
+ }
+
+ void emit_add(const Operand& dst, Register src, int size) {
+ arithmetic_op(0x1, src, dst, size);
+ }
+
+ void emit_add(const Operand& dst, Immediate src, int size) {
+ immediate_arithmetic_op(0x0, dst, src, size);
+ }
+
+ void emit_and(Register dst, Register src, int size) {
+ arithmetic_op(0x23, dst, src, size);
+ }
+
+ void emit_and(Register dst, const Operand& src, int size) {
+ arithmetic_op(0x23, dst, src, size);
+ }
+
+ void emit_and(const Operand& dst, Register src, int size) {
+ arithmetic_op(0x21, src, dst, size);
+ }
+
+ void emit_and(Register dst, Immediate src, int size) {
+ immediate_arithmetic_op(0x4, dst, src, size);
+ }
+
+ void emit_and(const Operand& dst, Immediate src, int size) {
+ immediate_arithmetic_op(0x4, dst, src, size);
+ }
+
+ void emit_cmp(Register dst, Register src, int size) {
+ arithmetic_op(0x3B, dst, src, size);
+ }
+
+ void emit_cmp(Register dst, const Operand& src, int size) {
+ arithmetic_op(0x3B, dst, src, size);
+ }
+
+ void emit_cmp(const Operand& dst, Register src, int size) {
+ arithmetic_op(0x39, src, dst, size);
+ }
+
+ void emit_cmp(Register dst, Immediate src, int size) {
+ immediate_arithmetic_op(0x7, dst, src, size);
+ }
+
+ void emit_cmp(const Operand& dst, Immediate src, int size) {
+ immediate_arithmetic_op(0x7, dst, src, size);
+ }
+
+ void emit_dec(Register dst, int size);
+ void emit_dec(const Operand& dst, int size);
+
+ // Divide rdx:rax by src. Quotient in rax, remainder in rdx when size is 64.
+ // Divide edx:eax by lower 32 bits of src. Quotient in eax, remainder in edx
+ // when size is 32.
+ void emit_idiv(Register src, int size);
+
+ // Signed multiply instructions.
+ // rdx:rax = rax * src when size is 64 or edx:eax = eax * src when size is 32.
+ void emit_imul(Register src, int size);
+ void emit_imul(Register dst, Register src, int size);
+ void emit_imul(Register dst, const Operand& src, int size);
+ void emit_imul(Register dst, Register src, Immediate imm, int size);
+
+ void emit_inc(Register dst, int size);
+ void emit_inc(const Operand& dst, int size);
+
+ void emit_lea(Register dst, const Operand& src, int size);
+
void emit_mov(Register dst, const Operand& src, int size);
void emit_mov(Register dst, Register src, int size);
void emit_mov(const Operand& dst, Register src, int size);
void emit_mov(Register dst, Immediate value, int size);
void emit_mov(const Operand& dst, Immediate value, int size);
+ void emit_movzxb(Register dst, const Operand& src, int size);
+ void emit_movzxw(Register dst, const Operand& src, int size);
+ void emit_movzxw(Register dst, Register src, int size);
+
+ void emit_neg(Register dst, int size);
+ void emit_neg(const Operand& dst, int size);
+
+ void emit_not(Register dst, int size);
+ void emit_not(const Operand& dst, int size);
+
+ void emit_or(Register dst, Register src, int size) {
+ arithmetic_op(0x0B, dst, src, size);
+ }
+
+ void emit_or(Register dst, const Operand& src, int size) {
+ arithmetic_op(0x0B, dst, src, size);
+ }
+
+ void emit_or(const Operand& dst, Register src, int size) {
+ arithmetic_op(0x9, src, dst, size);
+ }
+
+ void emit_or(Register dst, Immediate src, int size) {
+ immediate_arithmetic_op(0x1, dst, src, size);
+ }
+
+ void emit_or(const Operand& dst, Immediate src, int size) {
+ immediate_arithmetic_op(0x1, dst, src, size);
+ }
+
+ void emit_repmovs(int size);
+
+ void emit_sbb(Register dst, Register src, int size) {
+ arithmetic_op(0x1b, dst, src, size);
+ }
+
+ void emit_sub(Register dst, Register src, int size) {
+ arithmetic_op(0x2B, dst, src, size);
+ }
+
+ void emit_sub(Register dst, Immediate src, int size) {
+ immediate_arithmetic_op(0x5, dst, src, size);
+ }
+
+ void emit_sub(Register dst, const Operand& src, int size) {
+ arithmetic_op(0x2B, dst, src, size);
+ }
+
+ void emit_sub(const Operand& dst, Register src, int size) {
+ arithmetic_op(0x29, src, dst, size);
+ }
+
+ void emit_sub(const Operand& dst, Immediate src, int size) {
+ immediate_arithmetic_op(0x5, dst, src, size);
+ }
+
+ void emit_test(Register dst, Register src, int size);
+ void emit_test(Register reg, Immediate mask, int size);
+ void emit_test(const Operand& op, Register reg, int size);
+ void emit_test(const Operand& op, Immediate mask, int size);
+
+ // Exchange two registers
+ void emit_xchg(Register dst, Register src, int size);
+
+ void emit_xor(Register dst, Register src, int size) {
+ if (size == kInt64Size && dst.code() == src.code()) {
+ // 32 bit operations zero the top 32 bits of 64 bit registers. Therefore
+ // there is no need to make this a 64 bit operation.
+ arithmetic_op(0x33, dst, src, kInt32Size);
+ } else {
+ arithmetic_op(0x33, dst, src, size);
+ }
+ }
+
+ void emit_xor(Register dst, const Operand& src, int size) {
+ arithmetic_op(0x33, dst, src, size);
+ }
+
+ void emit_xor(Register dst, Immediate src, int size) {
+ immediate_arithmetic_op(0x6, dst, src, size);
+ }
+
+ void emit_xor(const Operand& dst, Immediate src, int size) {
+ immediate_arithmetic_op(0x6, dst, src, size);
+ }
+
+ void emit_xor(const Operand& dst, Register src, int size) {
+ arithmetic_op(0x31, src, dst, size);
+ }
+
friend class CodePatcher;
friend class EnsureSpace;
friend class RegExpMacroAssemblerX64;
diff --git a/chromium/v8/src/x64/builtins-x64.cc b/chromium/v8/src/x64/builtins-x64.cc
index f4864f899ef..fa359c5bc6e 100644
--- a/chromium/v8/src/x64/builtins-x64.cc
+++ b/chromium/v8/src/x64/builtins-x64.cc
@@ -1,37 +1,15 @@
// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
#if V8_TARGET_ARCH_X64
-#include "codegen.h"
-#include "deoptimizer.h"
-#include "full-codegen.h"
+#include "src/codegen.h"
+#include "src/deoptimizer.h"
+#include "src/full-codegen.h"
+#include "src/stub-cache.h"
namespace v8 {
namespace internal {
@@ -60,7 +38,7 @@ void Builtins::Generate_Adaptor(MacroAssembler* masm,
if (extra_args == NEEDS_CALLED_FUNCTION) {
num_extra_args = 1;
__ PopReturnAddressTo(kScratchRegister);
- __ push(rdi);
+ __ Push(rdi);
__ PushReturnAddressFrom(kScratchRegister);
} else {
ASSERT(extra_args == NO_EXTRA_ARGUMENTS);
@@ -68,40 +46,42 @@ void Builtins::Generate_Adaptor(MacroAssembler* masm,
// JumpToExternalReference expects rax to contain the number of arguments
// including the receiver and the extra arguments.
- __ addq(rax, Immediate(num_extra_args + 1));
+ __ addp(rax, Immediate(num_extra_args + 1));
__ JumpToExternalReference(ExternalReference(id, masm->isolate()), 1);
}
-static void CallRuntimePassFunction(MacroAssembler* masm,
- Runtime::FunctionId function_id) {
+static void CallRuntimePassFunction(
+ MacroAssembler* masm, Runtime::FunctionId function_id) {
FrameScope scope(masm, StackFrame::INTERNAL);
// Push a copy of the function onto the stack.
- __ push(rdi);
- // Push call kind information.
- __ push(rcx);
+ __ Push(rdi);
// Function is also the parameter to the runtime call.
- __ push(rdi);
+ __ Push(rdi);
__ CallRuntime(function_id, 1);
- // Restore call kind information.
- __ pop(rcx);
// Restore receiver.
- __ pop(rdi);
+ __ Pop(rdi);
}
static void GenerateTailCallToSharedCode(MacroAssembler* masm) {
- __ movq(kScratchRegister,
+ __ movp(kScratchRegister,
FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
- __ movq(kScratchRegister,
+ __ movp(kScratchRegister,
FieldOperand(kScratchRegister, SharedFunctionInfo::kCodeOffset));
- __ lea(kScratchRegister, FieldOperand(kScratchRegister, Code::kHeaderSize));
+ __ leap(kScratchRegister, FieldOperand(kScratchRegister, Code::kHeaderSize));
__ jmp(kScratchRegister);
}
-void Builtins::Generate_InRecompileQueue(MacroAssembler* masm) {
+static void GenerateTailCallToReturnedCode(MacroAssembler* masm) {
+ __ leap(rax, FieldOperand(rax, Code::kHeaderSize));
+ __ jmp(rax);
+}
+
+
+void Builtins::Generate_InOptimizationQueue(MacroAssembler* masm) {
// Checking whether the queued function is ready for install is optional,
// since we come across interrupts and stack checks elsewhere. However,
// not checking may delay installing ready functions, and always checking
@@ -111,43 +91,41 @@ void Builtins::Generate_InRecompileQueue(MacroAssembler* masm) {
__ CompareRoot(rsp, Heap::kStackLimitRootIndex);
__ j(above_equal, &ok);
- CallRuntimePassFunction(masm, Runtime::kTryInstallRecompiledCode);
- // Tail call to returned code.
- __ lea(rax, FieldOperand(rax, Code::kHeaderSize));
- __ jmp(rax);
+ CallRuntimePassFunction(masm, Runtime::kHiddenTryInstallOptimizedCode);
+ GenerateTailCallToReturnedCode(masm);
__ bind(&ok);
GenerateTailCallToSharedCode(masm);
}
-void Builtins::Generate_ConcurrentRecompile(MacroAssembler* masm) {
- CallRuntimePassFunction(masm, Runtime::kConcurrentRecompile);
- GenerateTailCallToSharedCode(masm);
-}
-
-
static void Generate_JSConstructStubHelper(MacroAssembler* masm,
bool is_api_function,
- bool count_constructions) {
+ bool create_memento) {
// ----------- S t a t e -------------
// -- rax: number of arguments
// -- rdi: constructor function
+ // -- rbx: allocation site or undefined
// -----------------------------------
- // Should never count constructions for api objects.
- ASSERT(!is_api_function || !count_constructions);
+ // Should never create mementos for api functions.
+ ASSERT(!is_api_function || !create_memento);
// Enter a construct frame.
{
FrameScope scope(masm, StackFrame::CONSTRUCT);
+ if (create_memento) {
+ __ AssertUndefinedOrAllocationSite(rbx);
+ __ Push(rbx);
+ }
+
// Store a smi-tagged arguments count on the stack.
__ Integer32ToSmi(rax, rax);
- __ push(rax);
+ __ Push(rax);
// Push the function to invoke on the stack.
- __ push(rdi);
+ __ Push(rdi);
// Try to allocate the object without transitioning into C code. If any of
// the preconditions is not met, the code bails out to the runtime call.
@@ -155,18 +133,16 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
if (FLAG_inline_new) {
Label undo_allocation;
-#ifdef ENABLE_DEBUGGER_SUPPORT
ExternalReference debug_step_in_fp =
ExternalReference::debug_step_in_fp_address(masm->isolate());
__ Move(kScratchRegister, debug_step_in_fp);
- __ cmpq(Operand(kScratchRegister, 0), Immediate(0));
+ __ cmpp(Operand(kScratchRegister, 0), Immediate(0));
__ j(not_equal, &rt_call);
-#endif
// Verified that the constructor is a JSFunction.
// Load the initial map and verify that it is in fact a map.
// rdi: constructor
- __ movq(rax, FieldOperand(rdi, JSFunction::kPrototypeOrInitialMapOffset));
+ __ movp(rax, FieldOperand(rdi, JSFunction::kPrototypeOrInitialMapOffset));
// Will both indicate a NULL and a Smi
ASSERT(kSmiTag == 0);
__ JumpIfSmi(rax, &rt_call);
@@ -183,30 +159,42 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
__ CmpInstanceType(rax, JS_FUNCTION_TYPE);
__ j(equal, &rt_call);
- if (count_constructions) {
+ if (!is_api_function) {
Label allocate;
+ // The code below relies on these assumptions.
+ STATIC_ASSERT(JSFunction::kNoSlackTracking == 0);
+ STATIC_ASSERT(Map::ConstructionCount::kShift +
+ Map::ConstructionCount::kSize == 32);
+ // Check if slack tracking is enabled.
+ __ movl(rsi, FieldOperand(rax, Map::kBitField3Offset));
+ __ shrl(rsi, Immediate(Map::ConstructionCount::kShift));
+ __ j(zero, &allocate); // JSFunction::kNoSlackTracking
// Decrease generous allocation count.
- __ movq(rcx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
- __ decb(FieldOperand(rcx,
- SharedFunctionInfo::kConstructionCountOffset));
- __ j(not_zero, &allocate);
+ __ subl(FieldOperand(rax, Map::kBitField3Offset),
+ Immediate(1 << Map::ConstructionCount::kShift));
- __ push(rax);
- __ push(rdi);
+ __ cmpl(rsi, Immediate(JSFunction::kFinishSlackTracking));
+ __ j(not_equal, &allocate);
- __ push(rdi); // constructor
- // The call will replace the stub, so the countdown is only done once.
- __ CallRuntime(Runtime::kFinalizeInstanceSize, 1);
+ __ Push(rax);
+ __ Push(rdi);
- __ pop(rdi);
- __ pop(rax);
+ __ Push(rdi); // constructor
+ __ CallRuntime(Runtime::kHiddenFinalizeInstanceSize, 1);
+
+ __ Pop(rdi);
+ __ Pop(rax);
+ __ xorl(rsi, rsi); // JSFunction::kNoSlackTracking
__ bind(&allocate);
}
// Now allocate the JSObject on the heap.
- __ movzxbq(rdi, FieldOperand(rax, Map::kInstanceSizeOffset));
- __ shl(rdi, Immediate(kPointerSizeLog2));
+ __ movzxbp(rdi, FieldOperand(rax, Map::kInstanceSizeOffset));
+ __ shlp(rdi, Immediate(kPointerSizeLog2));
+ if (create_memento) {
+ __ addp(rdi, Immediate(AllocationMemento::kSize));
+ }
// rdi: size of new object
__ Allocate(rdi,
rbx,
@@ -214,35 +202,60 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
no_reg,
&rt_call,
NO_ALLOCATION_FLAGS);
+ Factory* factory = masm->isolate()->factory();
// Allocated the JSObject, now initialize the fields.
// rax: initial map
// rbx: JSObject (not HeapObject tagged - the actual address).
- // rdi: start of next object
- __ movq(Operand(rbx, JSObject::kMapOffset), rax);
+ // rdi: start of next object (including memento if create_memento)
+ __ movp(Operand(rbx, JSObject::kMapOffset), rax);
__ LoadRoot(rcx, Heap::kEmptyFixedArrayRootIndex);
- __ movq(Operand(rbx, JSObject::kPropertiesOffset), rcx);
- __ movq(Operand(rbx, JSObject::kElementsOffset), rcx);
+ __ movp(Operand(rbx, JSObject::kPropertiesOffset), rcx);
+ __ movp(Operand(rbx, JSObject::kElementsOffset), rcx);
// Set extra fields in the newly allocated object.
// rax: initial map
// rbx: JSObject
- // rdi: start of next object
- __ lea(rcx, Operand(rbx, JSObject::kHeaderSize));
+ // rdi: start of next object (including memento if create_memento)
+ // rsi: slack tracking counter (non-API function case)
+ __ leap(rcx, Operand(rbx, JSObject::kHeaderSize));
__ LoadRoot(rdx, Heap::kUndefinedValueRootIndex);
- if (count_constructions) {
- __ movzxbq(rsi,
+ if (!is_api_function) {
+ Label no_inobject_slack_tracking;
+
+ // Check if slack tracking is enabled.
+ __ cmpl(rsi, Immediate(JSFunction::kNoSlackTracking));
+ __ j(equal, &no_inobject_slack_tracking);
+
+ // Allocate object with a slack.
+ __ movzxbp(rsi,
FieldOperand(rax, Map::kPreAllocatedPropertyFieldsOffset));
- __ lea(rsi,
+ __ leap(rsi,
Operand(rbx, rsi, times_pointer_size, JSObject::kHeaderSize));
// rsi: offset of first field after pre-allocated fields
if (FLAG_debug_code) {
- __ cmpq(rsi, rdi);
+ __ cmpp(rsi, rdi);
__ Assert(less_equal,
kUnexpectedNumberOfPreAllocatedPropertyFields);
}
__ InitializeFieldsWithFiller(rcx, rsi, rdx);
__ LoadRoot(rdx, Heap::kOnePointerFillerMapRootIndex);
+ // Fill the remaining fields with one pointer filler map.
+
+ __ bind(&no_inobject_slack_tracking);
+ }
+ if (create_memento) {
+ __ leap(rsi, Operand(rdi, -AllocationMemento::kSize));
+ __ InitializeFieldsWithFiller(rcx, rsi, rdx);
+
+ // Fill in memento fields if necessary.
+ // rsi: points to the allocated but uninitialized memento.
+ __ Move(Operand(rsi, AllocationMemento::kMapOffset),
+ factory->allocation_memento_map());
+ // Get the cell or undefined.
+ __ movp(rdx, Operand(rsp, kPointerSize*2));
+ __ movp(Operand(rsi, AllocationMemento::kAllocationSiteOffset), rdx);
+ } else {
+ __ InitializeFieldsWithFiller(rcx, rdi, rdx);
}
- __ InitializeFieldsWithFiller(rcx, rdi, rdx);
// Add the object tag to make the JSObject real, so that we can continue
// and jump into the continuation code at any time from now on. Any
@@ -251,7 +264,7 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// rax: initial map
// rbx: JSObject
// rdi: start of next object
- __ or_(rbx, Immediate(kHeapObjectTag));
+ __ orp(rbx, Immediate(kHeapObjectTag));
// Check if a non-empty properties array is needed.
// Allocate and initialize a FixedArray if it is.
@@ -259,13 +272,13 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// rbx: JSObject
// rdi: start of next object
// Calculate total properties described map.
- __ movzxbq(rdx, FieldOperand(rax, Map::kUnusedPropertyFieldsOffset));
- __ movzxbq(rcx,
+ __ movzxbp(rdx, FieldOperand(rax, Map::kUnusedPropertyFieldsOffset));
+ __ movzxbp(rcx,
FieldOperand(rax, Map::kPreAllocatedPropertyFieldsOffset));
- __ addq(rdx, rcx);
+ __ addp(rdx, rcx);
// Calculate unused properties past the end of the in-object properties.
- __ movzxbq(rcx, FieldOperand(rax, Map::kInObjectPropertiesOffset));
- __ subq(rdx, rcx);
+ __ movzxbp(rcx, FieldOperand(rax, Map::kInObjectPropertiesOffset));
+ __ subp(rdx, rcx);
// Done if no extra properties are to be allocated.
__ j(zero, &allocated);
__ Assert(positive, kPropertyAllocationCountFailed);
@@ -290,9 +303,9 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// rdx: number of elements
// rax: start of next object
__ LoadRoot(rcx, Heap::kFixedArrayMapRootIndex);
- __ movq(Operand(rdi, HeapObject::kMapOffset), rcx); // setup the map
+ __ movp(Operand(rdi, HeapObject::kMapOffset), rcx); // setup the map
__ Integer32ToSmi(rdx, rdx);
- __ movq(Operand(rdi, FixedArray::kLengthOffset), rdx); // and length
+ __ movp(Operand(rdi, FixedArray::kLengthOffset), rdx); // and length
// Initialize the fields to undefined.
// rbx: JSObject
@@ -301,13 +314,13 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// rdx: number of elements
{ Label loop, entry;
__ LoadRoot(rdx, Heap::kUndefinedValueRootIndex);
- __ lea(rcx, Operand(rdi, FixedArray::kHeaderSize));
+ __ leap(rcx, Operand(rdi, FixedArray::kHeaderSize));
__ jmp(&entry);
__ bind(&loop);
- __ movq(Operand(rcx, 0), rdx);
- __ addq(rcx, Immediate(kPointerSize));
+ __ movp(Operand(rcx, 0), rdx);
+ __ addp(rcx, Immediate(kPointerSize));
__ bind(&entry);
- __ cmpq(rcx, rax);
+ __ cmpp(rcx, rax);
__ j(below, &loop);
}
@@ -315,8 +328,8 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// the JSObject
// rbx: JSObject
// rdi: FixedArray
- __ or_(rdi, Immediate(kHeapObjectTag)); // add the heap tag
- __ movq(FieldOperand(rbx, JSObject::kPropertiesOffset), rdi);
+ __ orp(rdi, Immediate(kHeapObjectTag)); // add the heap tag
+ __ movp(FieldOperand(rbx, JSObject::kPropertiesOffset), rdi);
// Continue with JSObject being successfully allocated
@@ -334,62 +347,93 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// Allocate the new receiver object using the runtime call.
// rdi: function (constructor)
__ bind(&rt_call);
- // Must restore rdi (constructor) before calling runtime.
- __ movq(rdi, Operand(rsp, 0));
- __ push(rdi);
- __ CallRuntime(Runtime::kNewObject, 1);
- __ movq(rbx, rax); // store result in rbx
+ int offset = 0;
+ if (create_memento) {
+ // Get the cell or allocation site.
+ __ movp(rdi, Operand(rsp, kPointerSize*2));
+ __ Push(rdi);
+ offset = kPointerSize;
+ }
+
+ // Must restore rsi (context) and rdi (constructor) before calling runtime.
+ __ movp(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
+ __ movp(rdi, Operand(rsp, offset));
+ __ Push(rdi);
+ if (create_memento) {
+ __ CallRuntime(Runtime::kHiddenNewObjectWithAllocationSite, 2);
+ } else {
+ __ CallRuntime(Runtime::kHiddenNewObject, 1);
+ }
+ __ movp(rbx, rax); // store result in rbx
+
+ // If we ended up using the runtime, and we want a memento, then the
+ // runtime call made it for us, and we shouldn't do create count
+ // increment.
+ Label count_incremented;
+ if (create_memento) {
+ __ jmp(&count_incremented);
+ }
// New object allocated.
// rbx: newly allocated object
__ bind(&allocated);
+
+ if (create_memento) {
+ __ movp(rcx, Operand(rsp, kPointerSize*2));
+ __ Cmp(rcx, masm->isolate()->factory()->undefined_value());
+ __ j(equal, &count_incremented);
+ // rcx is an AllocationSite. We are creating a memento from it, so we
+ // need to increment the memento create count.
+ __ SmiAddConstant(
+ FieldOperand(rcx, AllocationSite::kPretenureCreateCountOffset),
+ Smi::FromInt(1));
+ __ bind(&count_incremented);
+ }
+
// Retrieve the function from the stack.
- __ pop(rdi);
+ __ Pop(rdi);
// Retrieve smi-tagged arguments count from the stack.
- __ movq(rax, Operand(rsp, 0));
+ __ movp(rax, Operand(rsp, 0));
__ SmiToInteger32(rax, rax);
// Push the allocated receiver to the stack. We need two copies
// because we may have to return the original one and the calling
// conventions dictate that the called function pops the receiver.
- __ push(rbx);
- __ push(rbx);
+ __ Push(rbx);
+ __ Push(rbx);
// Set up pointer to last argument.
- __ lea(rbx, Operand(rbp, StandardFrameConstants::kCallerSPOffset));
+ __ leap(rbx, Operand(rbp, StandardFrameConstants::kCallerSPOffset));
// Copy arguments and receiver to the expression stack.
Label loop, entry;
- __ movq(rcx, rax);
+ __ movp(rcx, rax);
__ jmp(&entry);
__ bind(&loop);
- __ push(Operand(rbx, rcx, times_pointer_size, 0));
+ __ Push(Operand(rbx, rcx, times_pointer_size, 0));
__ bind(&entry);
- __ decq(rcx);
+ __ decp(rcx);
__ j(greater_equal, &loop);
// Call the function.
if (is_api_function) {
- __ movq(rsi, FieldOperand(rdi, JSFunction::kContextOffset));
+ __ movp(rsi, FieldOperand(rdi, JSFunction::kContextOffset));
Handle<Code> code =
masm->isolate()->builtins()->HandleApiCallConstruct();
- ParameterCount expected(0);
- __ InvokeCode(code, expected, expected, RelocInfo::CODE_TARGET,
- CALL_FUNCTION, NullCallWrapper(), CALL_AS_METHOD);
+ __ Call(code, RelocInfo::CODE_TARGET);
} else {
ParameterCount actual(rax);
- __ InvokeFunction(rdi, actual, CALL_FUNCTION,
- NullCallWrapper(), CALL_AS_METHOD);
+ __ InvokeFunction(rdi, actual, CALL_FUNCTION, NullCallWrapper());
}
// Store offset of return address for deoptimizer.
- if (!is_api_function && !count_constructions) {
+ if (!is_api_function) {
masm->isolate()->heap()->SetConstructStubDeoptPCOffset(masm->pc_offset());
}
// Restore context from the frame.
- __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
+ __ movp(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
// If the result is an object (in the ECMA sense), we should get rid
// of the receiver and use the result; see ECMA-262 section 13.2.2-7
@@ -407,11 +451,11 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// Throw away the result of the constructor invocation and use the
// on-stack receiver as the result.
__ bind(&use_receiver);
- __ movq(rax, Operand(rsp, 0));
+ __ movp(rax, Operand(rsp, 0));
// Restore the arguments count and leave the construct frame.
__ bind(&exit);
- __ movq(rbx, Operand(rsp, kPointerSize)); // Get arguments count.
+ __ movp(rbx, Operand(rsp, kPointerSize)); // Get arguments count.
// Leave construct frame.
}
@@ -419,7 +463,7 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// Remove caller arguments from the stack and return.
__ PopReturnAddressTo(rcx);
SmiIndex index = masm->SmiToIndex(rbx, rbx, kPointerSizeLog2);
- __ lea(rsp, Operand(rsp, index.reg, index.scale, 1 * kPointerSize));
+ __ leap(rsp, Operand(rsp, index.reg, index.scale, 1 * kPointerSize));
__ PushReturnAddressFrom(rcx);
Counters* counters = masm->isolate()->counters();
__ IncrementCounter(counters->constructed_objects(), 1);
@@ -427,13 +471,8 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
}
-void Builtins::Generate_JSConstructStubCountdown(MacroAssembler* masm) {
- Generate_JSConstructStubHelper(masm, false, true);
-}
-
-
void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
- Generate_JSConstructStubHelper(masm, false, false);
+ Generate_JSConstructStubHelper(masm, false, FLAG_pretenuring_call_new);
}
@@ -475,19 +514,19 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
FrameScope scope(masm, StackFrame::INTERNAL);
// Load the function context into rsi.
- __ movq(rsi, FieldOperand(rdx, JSFunction::kContextOffset));
+ __ movp(rsi, FieldOperand(rdx, JSFunction::kContextOffset));
// Push the function and the receiver onto the stack.
- __ push(rdx);
- __ push(r8);
+ __ Push(rdx);
+ __ Push(r8);
// Load the number of arguments and setup pointer to the arguments.
- __ movq(rax, r9);
+ __ movp(rax, r9);
// Load the previous frame pointer to access C argument on stack
- __ movq(kScratchRegister, Operand(rbp, 0));
- __ movq(rbx, Operand(kScratchRegister, EntryFrameConstants::kArgvOffset));
+ __ movp(kScratchRegister, Operand(rbp, 0));
+ __ movp(rbx, Operand(kScratchRegister, EntryFrameConstants::kArgvOffset));
// Load the function pointer into rdi.
- __ movq(rdi, rdx);
+ __ movp(rdi, rdx);
#else // _WIN64
// GCC parameters in:
// rdi : entry (ignored)
@@ -496,7 +535,7 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
// rcx : argc
// r8 : argv
- __ movq(rdi, rsi);
+ __ movp(rdi, rsi);
// rdi : function
// Clear the context before we push it when entering the internal frame.
@@ -505,13 +544,13 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
FrameScope scope(masm, StackFrame::INTERNAL);
// Push the function and receiver and setup the context.
- __ push(rdi);
- __ push(rdx);
- __ movq(rsi, FieldOperand(rdi, JSFunction::kContextOffset));
+ __ Push(rdi);
+ __ Push(rdx);
+ __ movp(rsi, FieldOperand(rdi, JSFunction::kContextOffset));
// Load the number of arguments and setup pointer to the arguments.
- __ movq(rax, rcx);
- __ movq(rbx, r8);
+ __ movp(rax, rcx);
+ __ movp(rbx, r8);
#endif // _WIN64
// Current stack contents:
@@ -531,27 +570,24 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
__ Set(rcx, 0); // Set loop variable to 0.
__ jmp(&entry);
__ bind(&loop);
- __ movq(kScratchRegister, Operand(rbx, rcx, times_pointer_size, 0));
- __ push(Operand(kScratchRegister, 0)); // dereference handle
- __ addq(rcx, Immediate(1));
+ __ movp(kScratchRegister, Operand(rbx, rcx, times_pointer_size, 0));
+ __ Push(Operand(kScratchRegister, 0)); // dereference handle
+ __ addp(rcx, Immediate(1));
__ bind(&entry);
- __ cmpq(rcx, rax);
+ __ cmpp(rcx, rax);
__ j(not_equal, &loop);
// Invoke the code.
if (is_construct) {
// No type feedback cell is available
- Handle<Object> undefined_sentinel(
- masm->isolate()->factory()->undefined_value());
- __ Move(rbx, undefined_sentinel);
+ __ LoadRoot(rbx, Heap::kUndefinedValueRootIndex);
// Expects rdi to hold function pointer.
- CallConstructStub stub(NO_CALL_FUNCTION_FLAGS);
+ CallConstructStub stub(masm->isolate(), NO_CALL_CONSTRUCTOR_FLAGS);
__ CallStub(&stub);
} else {
ParameterCount actual(rax);
// Function must be in rdi.
- __ InvokeFunction(rdi, actual, CALL_FUNCTION,
- NullCallWrapper(), CALL_AS_METHOD);
+ __ InvokeFunction(rdi, actual, CALL_FUNCTION, NullCallWrapper());
}
// Exit the internal frame. Notice that this also removes the empty
// context and the function left on the stack by the code
@@ -573,19 +609,37 @@ void Builtins::Generate_JSConstructEntryTrampoline(MacroAssembler* masm) {
}
-void Builtins::Generate_LazyCompile(MacroAssembler* masm) {
- CallRuntimePassFunction(masm, Runtime::kLazyCompile);
- // Do a tail-call of the compiled function.
- __ lea(rax, FieldOperand(rax, Code::kHeaderSize));
- __ jmp(rax);
+void Builtins::Generate_CompileUnoptimized(MacroAssembler* masm) {
+ CallRuntimePassFunction(masm, Runtime::kHiddenCompileUnoptimized);
+ GenerateTailCallToReturnedCode(masm);
}
-void Builtins::Generate_LazyRecompile(MacroAssembler* masm) {
- CallRuntimePassFunction(masm, Runtime::kLazyRecompile);
- // Do a tail-call of the compiled function.
- __ lea(rax, FieldOperand(rax, Code::kHeaderSize));
- __ jmp(rax);
+static void CallCompileOptimized(MacroAssembler* masm,
+ bool concurrent) {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ // Push a copy of the function onto the stack.
+ __ Push(rdi);
+ // Function is also the parameter to the runtime call.
+ __ Push(rdi);
+ // Whether to compile in a background thread.
+ __ Push(masm->isolate()->factory()->ToBoolean(concurrent));
+
+ __ CallRuntime(Runtime::kHiddenCompileOptimized, 2);
+ // Restore receiver.
+ __ Pop(rdi);
+}
+
+
+void Builtins::Generate_CompileOptimized(MacroAssembler* masm) {
+ CallCompileOptimized(masm, false);
+ GenerateTailCallToReturnedCode(masm);
+}
+
+
+void Builtins::Generate_CompileOptimizedConcurrent(MacroAssembler* masm) {
+ CallCompileOptimized(masm, true);
+ GenerateTailCallToReturnedCode(masm);
}
@@ -598,15 +652,15 @@ static void GenerateMakeCodeYoungAgainCommon(MacroAssembler* masm) {
// Re-execute the code that was patched back to the young age when
// the stub returns.
- __ subq(Operand(rsp, 0), Immediate(5));
+ __ subp(Operand(rsp, 0), Immediate(5));
__ Pushad();
__ Move(arg_reg_2, ExternalReference::isolate_address(masm->isolate()));
- __ movq(arg_reg_1, Operand(rsp, kNumSafepointRegisters * kPointerSize));
+ __ movp(arg_reg_1, Operand(rsp, kNumSafepointRegisters * kPointerSize));
{ // NOLINT
FrameScope scope(masm, StackFrame::MANUAL);
- __ PrepareCallCFunction(1);
+ __ PrepareCallCFunction(2);
__ CallCFunction(
- ExternalReference::get_make_code_young_function(masm->isolate()), 1);
+ ExternalReference::get_make_code_young_function(masm->isolate()), 2);
}
__ Popad();
__ ret(0);
@@ -633,23 +687,23 @@ void Builtins::Generate_MarkCodeAsExecutedOnce(MacroAssembler* masm) {
// pointers.
__ Pushad();
__ Move(arg_reg_2, ExternalReference::isolate_address(masm->isolate()));
- __ movq(arg_reg_1, Operand(rsp, kNumSafepointRegisters * kPointerSize));
- __ subq(arg_reg_1, Immediate(Assembler::kShortCallInstructionLength));
+ __ movp(arg_reg_1, Operand(rsp, kNumSafepointRegisters * kPointerSize));
+ __ subp(arg_reg_1, Immediate(Assembler::kShortCallInstructionLength));
{ // NOLINT
FrameScope scope(masm, StackFrame::MANUAL);
- __ PrepareCallCFunction(1);
+ __ PrepareCallCFunction(2);
__ CallCFunction(
ExternalReference::get_mark_code_as_executed_function(masm->isolate()),
- 1);
+ 2);
}
__ Popad();
// Perform prologue operations usually performed by the young code stub.
__ PopReturnAddressTo(kScratchRegister);
- __ push(rbp); // Caller's frame pointer.
- __ movq(rbp, rsp);
- __ push(rsi); // Callee's context.
- __ push(rdi); // Callee's JS Function.
+ __ pushq(rbp); // Caller's frame pointer.
+ __ movp(rbp, rsp);
+ __ Push(rsi); // Callee's context.
+ __ Push(rdi); // Callee's JS Function.
__ PushReturnAddressFrom(kScratchRegister);
// Jump to point after the code-age stub.
@@ -672,12 +726,12 @@ static void Generate_NotifyStubFailureHelper(MacroAssembler* masm,
// stubs that tail call the runtime on deopts passing their parameters in
// registers.
__ Pushad();
- __ CallRuntime(Runtime::kNotifyStubFailure, 0, save_doubles);
+ __ CallRuntime(Runtime::kHiddenNotifyStubFailure, 0, save_doubles);
__ Popad();
// Tear down internal frame.
}
- __ pop(MemOperand(rsp, 0)); // Ignore state offset
+ __ DropUnderReturnAddress(1); // Ignore state offset
__ ret(0); // Return to IC Miss stub, continuation still on stack.
}
@@ -701,7 +755,7 @@ static void Generate_NotifyDeoptimizedHelper(MacroAssembler* masm,
// Pass the deoptimization type to the runtime system.
__ Push(Smi::FromInt(static_cast<int>(type)));
- __ CallRuntime(Runtime::kNotifyDeoptimized, 1);
+ __ CallRuntime(Runtime::kHiddenNotifyDeoptimized, 1);
// Tear down internal frame.
}
@@ -710,13 +764,13 @@ static void Generate_NotifyDeoptimizedHelper(MacroAssembler* masm,
// Switch on the state.
Label not_no_registers, not_tos_rax;
- __ cmpq(kScratchRegister, Immediate(FullCodeGenerator::NO_REGISTERS));
+ __ cmpp(kScratchRegister, Immediate(FullCodeGenerator::NO_REGISTERS));
__ j(not_equal, &not_no_registers, Label::kNear);
__ ret(1 * kPointerSize); // Remove state.
__ bind(&not_no_registers);
- __ movq(rax, Operand(rsp, kPCOnStackSize + kPointerSize));
- __ cmpq(kScratchRegister, Immediate(FullCodeGenerator::TOS_REG));
+ __ movp(rax, Operand(rsp, kPCOnStackSize + kPointerSize));
+ __ cmpp(kScratchRegister, Immediate(FullCodeGenerator::TOS_REG));
__ j(not_equal, &not_tos_rax, Label::kNear);
__ ret(2 * kPointerSize); // Remove state, rax.
@@ -753,12 +807,12 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
//
// 1. Make sure we have at least one argument.
{ Label done;
- __ testq(rax, rax);
+ __ testp(rax, rax);
__ j(not_zero, &done);
__ PopReturnAddressTo(rbx);
__ Push(masm->isolate()->factory()->undefined_value());
__ PushReturnAddressFrom(rbx);
- __ incq(rax);
+ __ incp(rax);
__ bind(&done);
}
@@ -766,7 +820,7 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
// if it is a function.
Label slow, non_function;
StackArgumentsAccessor args(rsp, rax);
- __ movq(rdi, args.GetReceiverOperand());
+ __ movp(rdi, args.GetReceiverOperand());
__ JumpIfSmi(rdi, &non_function);
__ CmpObjectType(rdi, JS_FUNCTION_TYPE, rcx);
__ j(not_equal, &slow);
@@ -776,10 +830,10 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
__ Set(rdx, 0); // indicate regular JS_FUNCTION
{ Label convert_to_object, use_global_receiver, patch_receiver;
// Change context eagerly in case we need the global receiver.
- __ movq(rsi, FieldOperand(rdi, JSFunction::kContextOffset));
+ __ movp(rsi, FieldOperand(rdi, JSFunction::kContextOffset));
// Do not transform the receiver for strict mode functions.
- __ movq(rbx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
+ __ movp(rbx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
__ testb(FieldOperand(rbx, SharedFunctionInfo::kStrictModeByteOffset),
Immediate(1 << SharedFunctionInfo::kStrictModeBitWithinByte));
__ j(not_equal, &shift_arguments);
@@ -790,8 +844,8 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
Immediate(1 << SharedFunctionInfo::kNativeBitWithinByte));
__ j(not_zero, &shift_arguments);
- // Compute the receiver in non-strict mode.
- __ movq(rbx, args.GetArgumentOperand(1));
+ // Compute the receiver in sloppy mode.
+ __ movp(rbx, args.GetArgumentOperand(1));
__ JumpIfSmi(rbx, &convert_to_object, Label::kNear);
__ CompareRoot(rbx, Heap::kNullValueRootIndex);
@@ -808,33 +862,28 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
// Enter an internal frame in order to preserve argument count.
FrameScope scope(masm, StackFrame::INTERNAL);
__ Integer32ToSmi(rax, rax);
- __ push(rax);
+ __ Push(rax);
- __ push(rbx);
+ __ Push(rbx);
__ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
- __ movq(rbx, rax);
+ __ movp(rbx, rax);
__ Set(rdx, 0); // indicate regular JS_FUNCTION
- __ pop(rax);
+ __ Pop(rax);
__ SmiToInteger32(rax, rax);
}
// Restore the function to rdi.
- __ movq(rdi, args.GetReceiverOperand());
+ __ movp(rdi, args.GetReceiverOperand());
__ jmp(&patch_receiver, Label::kNear);
- // Use the global receiver object from the called function as the
- // receiver.
__ bind(&use_global_receiver);
- const int kGlobalIndex =
- Context::kHeaderSize + Context::GLOBAL_OBJECT_INDEX * kPointerSize;
- __ movq(rbx, FieldOperand(rsi, kGlobalIndex));
- __ movq(rbx, FieldOperand(rbx, GlobalObject::kNativeContextOffset));
- __ movq(rbx, FieldOperand(rbx, kGlobalIndex));
- __ movq(rbx, FieldOperand(rbx, GlobalObject::kGlobalReceiverOffset));
+ __ movp(rbx,
+ Operand(rsi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
+ __ movp(rbx, FieldOperand(rbx, GlobalObject::kGlobalReceiverOffset));
__ bind(&patch_receiver);
- __ movq(args.GetArgumentOperand(1), rbx);
+ __ movp(args.GetArgumentOperand(1), rbx);
__ jmp(&shift_arguments);
}
@@ -851,37 +900,37 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
// CALL_NON_FUNCTION builtin expects the non-function callee as
// receiver, so overwrite the first argument which will ultimately
// become the receiver.
- __ movq(args.GetArgumentOperand(1), rdi);
+ __ movp(args.GetArgumentOperand(1), rdi);
// 4. Shift arguments and return address one slot down on the stack
// (overwriting the original receiver). Adjust argument count to make
// the original first argument the new receiver.
__ bind(&shift_arguments);
{ Label loop;
- __ movq(rcx, rax);
+ __ movp(rcx, rax);
+ StackArgumentsAccessor args(rsp, rcx);
__ bind(&loop);
- __ movq(rbx, Operand(rsp, rcx, times_pointer_size, 0));
- __ movq(Operand(rsp, rcx, times_pointer_size, 1 * kPointerSize), rbx);
- __ decq(rcx);
- __ j(not_sign, &loop); // While non-negative (to copy return address).
- __ pop(rbx); // Discard copy of return address.
- __ decq(rax); // One fewer argument (first argument is new receiver).
+ __ movp(rbx, args.GetArgumentOperand(1));
+ __ movp(args.GetArgumentOperand(0), rbx);
+ __ decp(rcx);
+ __ j(not_zero, &loop); // While non-zero.
+ __ DropUnderReturnAddress(1, rbx); // Drop one slot under return address.
+ __ decp(rax); // One fewer argument (first argument is new receiver).
}
// 5a. Call non-function via tail call to CALL_NON_FUNCTION builtin,
// or a function proxy via CALL_FUNCTION_PROXY.
{ Label function, non_proxy;
- __ testq(rdx, rdx);
+ __ testp(rdx, rdx);
__ j(zero, &function);
__ Set(rbx, 0);
- __ SetCallKind(rcx, CALL_AS_METHOD);
- __ cmpq(rdx, Immediate(1));
+ __ cmpp(rdx, Immediate(1));
__ j(not_equal, &non_proxy);
__ PopReturnAddressTo(rdx);
- __ push(rdi); // re-add proxy object as additional argument
+ __ Push(rdi); // re-add proxy object as additional argument
__ PushReturnAddressFrom(rdx);
- __ incq(rax);
+ __ incp(rax);
__ GetBuiltinEntry(rdx, Builtins::CALL_FUNCTION_PROXY);
__ jmp(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
RelocInfo::CODE_TARGET);
@@ -896,20 +945,17 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
// 5b. Get the code to call from the function and check that the number of
// expected arguments matches what we're providing. If so, jump
// (tail-call) to the code in register edx without checking arguments.
- __ movq(rdx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
- __ movsxlq(rbx,
- FieldOperand(rdx,
- SharedFunctionInfo::kFormalParameterCountOffset));
- __ movq(rdx, FieldOperand(rdi, JSFunction::kCodeEntryOffset));
- __ SetCallKind(rcx, CALL_AS_METHOD);
- __ cmpq(rax, rbx);
+ __ movp(rdx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
+ __ LoadSharedFunctionInfoSpecialField(rbx, rdx,
+ SharedFunctionInfo::kFormalParameterCountOffset);
+ __ movp(rdx, FieldOperand(rdi, JSFunction::kCodeEntryOffset));
+ __ cmpp(rax, rbx);
__ j(not_equal,
masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
RelocInfo::CODE_TARGET);
ParameterCount expected(0);
- __ InvokeCode(rdx, expected, expected, JUMP_FUNCTION,
- NullCallWrapper(), CALL_AS_METHOD);
+ __ InvokeCode(rdx, expected, expected, JUMP_FUNCTION, NullCallWrapper());
}
@@ -931,8 +977,8 @@ void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
static const int kReceiverOffset = kArgumentsOffset + kPointerSize;
static const int kFunctionOffset = kReceiverOffset + kPointerSize;
- __ push(Operand(rbp, kFunctionOffset));
- __ push(Operand(rbp, kArgumentsOffset));
+ __ Push(Operand(rbp, kFunctionOffset));
+ __ Push(Operand(rbp, kArgumentsOffset));
__ InvokeBuiltin(Builtins::APPLY_PREPARE, CALL_FUNCTION);
// Check the stack for overflow. We are not trying to catch
@@ -940,21 +986,21 @@ void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
// limit" is checked.
Label okay;
__ LoadRoot(kScratchRegister, Heap::kRealStackLimitRootIndex);
- __ movq(rcx, rsp);
+ __ movp(rcx, rsp);
// Make rcx the space we have left. The stack might already be overflowed
// here which will cause rcx to become negative.
- __ subq(rcx, kScratchRegister);
+ __ subp(rcx, kScratchRegister);
// Make rdx the space we need for the array when it is unrolled onto the
// stack.
__ PositiveSmiTimesPowerOfTwoToInteger64(rdx, rax, kPointerSizeLog2);
// Check if the arguments will overflow the stack.
- __ cmpq(rcx, rdx);
+ __ cmpp(rcx, rdx);
__ j(greater, &okay); // Signed comparison.
// Out of stack space.
- __ push(Operand(rbp, kFunctionOffset));
- __ push(rax);
- __ InvokeBuiltin(Builtins::APPLY_OVERFLOW, CALL_FUNCTION);
+ __ Push(Operand(rbp, kFunctionOffset));
+ __ Push(rax);
+ __ InvokeBuiltin(Builtins::STACK_OVERFLOW, CALL_FUNCTION);
__ bind(&okay);
// End of stack check.
@@ -962,24 +1008,24 @@ void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
const int kLimitOffset =
StandardFrameConstants::kExpressionsOffset - 1 * kPointerSize;
const int kIndexOffset = kLimitOffset - 1 * kPointerSize;
- __ push(rax); // limit
- __ push(Immediate(0)); // index
+ __ Push(rax); // limit
+ __ Push(Immediate(0)); // index
// Get the receiver.
- __ movq(rbx, Operand(rbp, kReceiverOffset));
+ __ movp(rbx, Operand(rbp, kReceiverOffset));
// Check that the function is a JS function (otherwise it must be a proxy).
Label push_receiver;
- __ movq(rdi, Operand(rbp, kFunctionOffset));
+ __ movp(rdi, Operand(rbp, kFunctionOffset));
__ CmpObjectType(rdi, JS_FUNCTION_TYPE, rcx);
__ j(not_equal, &push_receiver);
// Change context eagerly to get the right global object if necessary.
- __ movq(rsi, FieldOperand(rdi, JSFunction::kContextOffset));
+ __ movp(rsi, FieldOperand(rdi, JSFunction::kContextOffset));
// Do not transform the receiver for strict mode functions.
Label call_to_object, use_global_receiver;
- __ movq(rdx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
+ __ movp(rdx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
__ testb(FieldOperand(rdx, SharedFunctionInfo::kStrictModeByteOffset),
Immediate(1 << SharedFunctionInfo::kStrictModeBitWithinByte));
__ j(not_equal, &push_receiver);
@@ -989,7 +1035,7 @@ void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
Immediate(1 << SharedFunctionInfo::kNativeBitWithinByte));
__ j(not_equal, &push_receiver);
- // Compute the receiver in non-strict mode.
+ // Compute the receiver in sloppy mode.
__ JumpIfSmi(rbx, &call_to_object, Label::kNear);
__ CompareRoot(rbx, Heap::kNullValueRootIndex);
__ j(equal, &use_global_receiver);
@@ -1004,30 +1050,26 @@ void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
// Convert the receiver to an object.
__ bind(&call_to_object);
- __ push(rbx);
+ __ Push(rbx);
__ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
- __ movq(rbx, rax);
+ __ movp(rbx, rax);
__ jmp(&push_receiver, Label::kNear);
- // Use the current global receiver object as the receiver.
__ bind(&use_global_receiver);
- const int kGlobalOffset =
- Context::kHeaderSize + Context::GLOBAL_OBJECT_INDEX * kPointerSize;
- __ movq(rbx, FieldOperand(rsi, kGlobalOffset));
- __ movq(rbx, FieldOperand(rbx, GlobalObject::kNativeContextOffset));
- __ movq(rbx, FieldOperand(rbx, kGlobalOffset));
- __ movq(rbx, FieldOperand(rbx, GlobalObject::kGlobalReceiverOffset));
+ __ movp(rbx,
+ Operand(rsi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
+ __ movp(rbx, FieldOperand(rbx, GlobalObject::kGlobalReceiverOffset));
// Push the receiver.
__ bind(&push_receiver);
- __ push(rbx);
+ __ Push(rbx);
// Copy all arguments from the array to the stack.
Label entry, loop;
- __ movq(rax, Operand(rbp, kIndexOffset));
+ __ movp(rax, Operand(rbp, kIndexOffset));
__ jmp(&entry);
__ bind(&loop);
- __ movq(rdx, Operand(rbp, kArgumentsOffset)); // load arguments
+ __ movp(rdx, Operand(rbp, kArgumentsOffset)); // load arguments
// Use inline caching to speed up access to arguments.
Handle<Code> ic =
@@ -1039,36 +1081,34 @@ void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
// case, we know that we are not generating a test instruction next.
// Push the nth argument.
- __ push(rax);
+ __ Push(rax);
// Update the index on the stack and in register rax.
- __ movq(rax, Operand(rbp, kIndexOffset));
+ __ movp(rax, Operand(rbp, kIndexOffset));
__ SmiAddConstant(rax, rax, Smi::FromInt(1));
- __ movq(Operand(rbp, kIndexOffset), rax);
+ __ movp(Operand(rbp, kIndexOffset), rax);
__ bind(&entry);
- __ cmpq(rax, Operand(rbp, kLimitOffset));
+ __ cmpp(rax, Operand(rbp, kLimitOffset));
__ j(not_equal, &loop);
- // Invoke the function.
+ // Call the function.
Label call_proxy;
ParameterCount actual(rax);
__ SmiToInteger32(rax, rax);
- __ movq(rdi, Operand(rbp, kFunctionOffset));
+ __ movp(rdi, Operand(rbp, kFunctionOffset));
__ CmpObjectType(rdi, JS_FUNCTION_TYPE, rcx);
__ j(not_equal, &call_proxy);
- __ InvokeFunction(rdi, actual, CALL_FUNCTION,
- NullCallWrapper(), CALL_AS_METHOD);
+ __ InvokeFunction(rdi, actual, CALL_FUNCTION, NullCallWrapper());
frame_scope.GenerateLeaveFrame();
__ ret(3 * kPointerSize); // remove this, receiver, and arguments
- // Invoke the function proxy.
+ // Call the function proxy.
__ bind(&call_proxy);
- __ push(rdi); // add function proxy as last argument
- __ incq(rax);
+ __ Push(rdi); // add function proxy as last argument
+ __ incp(rax);
__ Set(rbx, 0);
- __ SetCallKind(rcx, CALL_AS_METHOD);
__ GetBuiltinEntry(rdx, Builtins::CALL_FUNCTION_PROXY);
__ call(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
RelocInfo::CODE_TARGET);
@@ -1092,7 +1132,7 @@ void Builtins::Generate_InternalArrayCode(MacroAssembler* masm) {
if (FLAG_debug_code) {
// Initial map for the builtin InternalArray functions should be maps.
- __ movq(rbx, FieldOperand(rdi, JSFunction::kPrototypeOrInitialMapOffset));
+ __ movp(rbx, FieldOperand(rdi, JSFunction::kPrototypeOrInitialMapOffset));
// Will both indicate a NULL and a Smi.
STATIC_ASSERT(kSmiTag == 0);
Condition not_smi = NegateCondition(masm->CheckSmi(rbx));
@@ -1122,7 +1162,7 @@ void Builtins::Generate_ArrayCode(MacroAssembler* masm) {
if (FLAG_debug_code) {
// Initial map for the builtin Array functions should be maps.
- __ movq(rbx, FieldOperand(rdi, JSFunction::kPrototypeOrInitialMapOffset));
+ __ movp(rbx, FieldOperand(rdi, JSFunction::kPrototypeOrInitialMapOffset));
// Will both indicate a NULL and a Smi.
STATIC_ASSERT(kSmiTag == 0);
Condition not_smi = NegateCondition(masm->CheckSmi(rbx));
@@ -1133,10 +1173,7 @@ void Builtins::Generate_ArrayCode(MacroAssembler* masm) {
// Run the native code for the Array function called as a normal function.
// tail call a stub
- Handle<Object> undefined_sentinel(
- masm->isolate()->heap()->undefined_value(),
- masm->isolate());
- __ Move(rbx, undefined_sentinel);
+ __ LoadRoot(rbx, Heap::kUndefinedValueRootIndex);
ArrayConstructorStub stub(masm->isolate());
__ TailCallStub(&stub);
}
@@ -1155,7 +1192,7 @@ void Builtins::Generate_StringConstructCode(MacroAssembler* masm) {
if (FLAG_debug_code) {
__ LoadGlobalFunction(Context::STRING_FUNCTION_INDEX, rcx);
- __ cmpq(rdi, rcx);
+ __ cmpp(rdi, rcx);
__ Assert(equal, kUnexpectedStringFunction);
}
@@ -1163,13 +1200,13 @@ void Builtins::Generate_StringConstructCode(MacroAssembler* masm) {
// (including the receiver).
StackArgumentsAccessor args(rsp, rax);
Label no_arguments;
- __ testq(rax, rax);
+ __ testp(rax, rax);
__ j(zero, &no_arguments);
- __ movq(rbx, args.GetArgumentOperand(1));
+ __ movp(rbx, args.GetArgumentOperand(1));
__ PopReturnAddressTo(rcx);
- __ lea(rsp, Operand(rsp, rax, times_pointer_size, kPointerSize));
+ __ leap(rsp, Operand(rsp, rax, times_pointer_size, kPointerSize));
__ PushReturnAddressFrom(rcx);
- __ movq(rax, rbx);
+ __ movp(rax, rbx);
// Lookup the argument in the number to string cache.
Label not_cached, argument_is_string;
@@ -1205,15 +1242,15 @@ void Builtins::Generate_StringConstructCode(MacroAssembler* masm) {
__ cmpb(FieldOperand(rcx, Map::kUnusedPropertyFieldsOffset), Immediate(0));
__ Assert(equal, kUnexpectedUnusedPropertiesOfStringWrapper);
}
- __ movq(FieldOperand(rax, HeapObject::kMapOffset), rcx);
+ __ movp(FieldOperand(rax, HeapObject::kMapOffset), rcx);
// Set properties and elements.
__ LoadRoot(rcx, Heap::kEmptyFixedArrayRootIndex);
- __ movq(FieldOperand(rax, JSObject::kPropertiesOffset), rcx);
- __ movq(FieldOperand(rax, JSObject::kElementsOffset), rcx);
+ __ movp(FieldOperand(rax, JSObject::kPropertiesOffset), rcx);
+ __ movp(FieldOperand(rax, JSObject::kElementsOffset), rcx);
// Set the value.
- __ movq(FieldOperand(rax, JSValue::kValueOffset), rbx);
+ __ movp(FieldOperand(rax, JSValue::kValueOffset), rbx);
// Ensure the object is fully initialized.
STATIC_ASSERT(JSValue::kSize == 4 * kPointerSize);
@@ -1229,7 +1266,7 @@ void Builtins::Generate_StringConstructCode(MacroAssembler* masm) {
__ JumpIfSmi(rax, &convert_argument);
Condition is_string = masm->IsObjectStringType(rax, rbx, rcx);
__ j(NegateCondition(is_string), &convert_argument);
- __ movq(rbx, rax);
+ __ movp(rbx, rax);
__ IncrementCounter(counters->string_ctor_string_value(), 1);
__ jmp(&argument_is_string);
@@ -1238,12 +1275,12 @@ void Builtins::Generate_StringConstructCode(MacroAssembler* masm) {
__ IncrementCounter(counters->string_ctor_conversions(), 1);
{
FrameScope scope(masm, StackFrame::INTERNAL);
- __ push(rdi); // Preserve the function.
- __ push(rax);
+ __ Push(rdi); // Preserve the function.
+ __ Push(rax);
__ InvokeBuiltin(Builtins::TO_STRING, CALL_FUNCTION);
- __ pop(rdi);
+ __ Pop(rdi);
}
- __ movq(rbx, rax);
+ __ movp(rbx, rax);
__ jmp(&argument_is_string);
// Load the empty string into rbx, remove the receiver from the
@@ -1251,7 +1288,7 @@ void Builtins::Generate_StringConstructCode(MacroAssembler* masm) {
__ bind(&no_arguments);
__ LoadRoot(rbx, Heap::kempty_stringRootIndex);
__ PopReturnAddressTo(rcx);
- __ lea(rsp, Operand(rsp, kPointerSize));
+ __ leap(rsp, Operand(rsp, kPointerSize));
__ PushReturnAddressFrom(rcx);
__ jmp(&argument_is_string);
@@ -1261,43 +1298,69 @@ void Builtins::Generate_StringConstructCode(MacroAssembler* masm) {
__ IncrementCounter(counters->string_ctor_gc_required(), 1);
{
FrameScope scope(masm, StackFrame::INTERNAL);
- __ push(rbx);
+ __ Push(rbx);
__ CallRuntime(Runtime::kNewStringWrapper, 1);
}
__ ret(0);
}
+static void ArgumentsAdaptorStackCheck(MacroAssembler* masm,
+ Label* stack_overflow) {
+ // ----------- S t a t e -------------
+ // -- rax : actual number of arguments
+ // -- rbx : expected number of arguments
+ // -- rdi: function (passed through to callee)
+ // -----------------------------------
+ // Check the stack for overflow. We are not trying to catch
+ // interruptions (e.g. debug break and preemption) here, so the "real stack
+ // limit" is checked.
+ Label okay;
+ __ LoadRoot(rdx, Heap::kRealStackLimitRootIndex);
+ __ movp(rcx, rsp);
+ // Make rcx the space we have left. The stack might already be overflowed
+ // here which will cause rcx to become negative.
+ __ subp(rcx, rdx);
+ // Make rdx the space we need for the array when it is unrolled onto the
+ // stack.
+ __ movp(rdx, rbx);
+ __ shlp(rdx, Immediate(kPointerSizeLog2));
+ // Check if the arguments will overflow the stack.
+ __ cmpp(rcx, rdx);
+ __ j(less_equal, stack_overflow); // Signed comparison.
+}
+
+
static void EnterArgumentsAdaptorFrame(MacroAssembler* masm) {
- __ push(rbp);
- __ movq(rbp, rsp);
+ __ pushq(rbp);
+ __ movp(rbp, rsp);
// Store the arguments adaptor context sentinel.
__ Push(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
// Push the function on the stack.
- __ push(rdi);
+ __ Push(rdi);
// Preserve the number of arguments on the stack. Must preserve rax,
// rbx and rcx because these registers are used when copying the
// arguments and the receiver.
__ Integer32ToSmi(r8, rax);
- __ push(r8);
+ __ Push(r8);
}
static void LeaveArgumentsAdaptorFrame(MacroAssembler* masm) {
// Retrieve the number of arguments from the stack. Number is a Smi.
- __ movq(rbx, Operand(rbp, ArgumentsAdaptorFrameConstants::kLengthOffset));
+ __ movp(rbx, Operand(rbp, ArgumentsAdaptorFrameConstants::kLengthOffset));
// Leave the frame.
- __ movq(rsp, rbp);
- __ pop(rbp);
+ __ movp(rsp, rbp);
+ __ popq(rbp);
// Remove caller arguments from the stack.
__ PopReturnAddressTo(rcx);
SmiIndex index = masm->SmiToIndex(rbx, rbx, kPointerSizeLog2);
- __ lea(rsp, Operand(rsp, index.reg, index.scale, 1 * kPointerSize));
+ __ leap(rsp, Operand(rsp, index.reg, index.scale, 1 * kPointerSize));
__ PushReturnAddressFrom(rcx);
}
@@ -1306,18 +1369,21 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- rax : actual number of arguments
// -- rbx : expected number of arguments
- // -- rcx : call kind information
- // -- rdx : code entry to call
+ // -- rdi: function (passed through to callee)
// -----------------------------------
Label invoke, dont_adapt_arguments;
Counters* counters = masm->isolate()->counters();
__ IncrementCounter(counters->arguments_adaptors(), 1);
+ Label stack_overflow;
+ ArgumentsAdaptorStackCheck(masm, &stack_overflow);
+
Label enough, too_few;
- __ cmpq(rax, rbx);
+ __ movp(rdx, FieldOperand(rdi, JSFunction::kCodeEntryOffset));
+ __ cmpp(rax, rbx);
__ j(less, &too_few);
- __ cmpq(rbx, Immediate(SharedFunctionInfo::kDontAdaptArgumentsSentinel));
+ __ cmpp(rbx, Immediate(SharedFunctionInfo::kDontAdaptArgumentsSentinel));
__ j(equal, &dont_adapt_arguments);
{ // Enough parameters: Actual >= expected.
@@ -1326,15 +1392,15 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// Copy receiver and all expected arguments.
const int offset = StandardFrameConstants::kCallerSPOffset;
- __ lea(rax, Operand(rbp, rax, times_pointer_size, offset));
+ __ leap(rax, Operand(rbp, rax, times_pointer_size, offset));
__ Set(r8, -1); // account for receiver
Label copy;
__ bind(&copy);
- __ incq(r8);
- __ push(Operand(rax, 0));
- __ subq(rax, Immediate(kPointerSize));
- __ cmpq(r8, rbx);
+ __ incp(r8);
+ __ Push(Operand(rax, 0));
+ __ subp(rax, Immediate(kPointerSize));
+ __ cmpp(r8, rbx);
__ j(less, &copy);
__ jmp(&invoke);
}
@@ -1345,28 +1411,28 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// Copy receiver and all actual arguments.
const int offset = StandardFrameConstants::kCallerSPOffset;
- __ lea(rdi, Operand(rbp, rax, times_pointer_size, offset));
+ __ leap(rdi, Operand(rbp, rax, times_pointer_size, offset));
__ Set(r8, -1); // account for receiver
Label copy;
__ bind(&copy);
- __ incq(r8);
- __ push(Operand(rdi, 0));
- __ subq(rdi, Immediate(kPointerSize));
- __ cmpq(r8, rax);
+ __ incp(r8);
+ __ Push(Operand(rdi, 0));
+ __ subp(rdi, Immediate(kPointerSize));
+ __ cmpp(r8, rax);
__ j(less, &copy);
// Fill remaining expected arguments with undefined values.
Label fill;
__ LoadRoot(kScratchRegister, Heap::kUndefinedValueRootIndex);
__ bind(&fill);
- __ incq(r8);
- __ push(kScratchRegister);
- __ cmpq(r8, rbx);
+ __ incp(r8);
+ __ Push(kScratchRegister);
+ __ cmpp(r8, rbx);
__ j(less, &fill);
// Restore function pointer.
- __ movq(rdi, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
+ __ movp(rdi, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
}
// Call the entry point.
@@ -1385,47 +1451,47 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// -------------------------------------------
__ bind(&dont_adapt_arguments);
__ jmp(rdx);
+
+ __ bind(&stack_overflow);
+ {
+ FrameScope frame(masm, StackFrame::MANUAL);
+ EnterArgumentsAdaptorFrame(masm);
+ __ InvokeBuiltin(Builtins::STACK_OVERFLOW, CALL_FUNCTION);
+ __ int3();
+ }
}
void Builtins::Generate_OnStackReplacement(MacroAssembler* masm) {
// Lookup the function in the JavaScript frame.
- __ movq(rax, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
+ __ movp(rax, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
{
FrameScope scope(masm, StackFrame::INTERNAL);
- // Lookup and calculate pc offset.
- __ movq(rdx, Operand(rbp, StandardFrameConstants::kCallerPCOffset));
- __ movq(rbx, FieldOperand(rax, JSFunction::kSharedFunctionInfoOffset));
- __ subq(rdx, Immediate(Code::kHeaderSize - kHeapObjectTag));
- __ subq(rdx, FieldOperand(rbx, SharedFunctionInfo::kCodeOffset));
- __ Integer32ToSmi(rdx, rdx);
-
- // Pass both function and pc offset as arguments.
- __ push(rax);
- __ push(rdx);
- __ CallRuntime(Runtime::kCompileForOnStackReplacement, 2);
+ // Pass function as argument.
+ __ Push(rax);
+ __ CallRuntime(Runtime::kCompileForOnStackReplacement, 1);
}
Label skip;
// If the code object is null, just return to the unoptimized code.
- __ cmpq(rax, Immediate(0));
+ __ cmpp(rax, Immediate(0));
__ j(not_equal, &skip, Label::kNear);
__ ret(0);
__ bind(&skip);
// Load deoptimization data from the code object.
- __ movq(rbx, Operand(rax, Code::kDeoptimizationDataOffset - kHeapObjectTag));
+ __ movp(rbx, Operand(rax, Code::kDeoptimizationDataOffset - kHeapObjectTag));
// Load the OSR entrypoint offset from the deoptimization data.
__ SmiToInteger32(rbx, Operand(rbx, FixedArray::OffsetOfElementAt(
DeoptimizationInputData::kOsrPcOffsetIndex) - kHeapObjectTag));
// Compute the target address = code_obj + header_size + osr_offset
- __ lea(rax, Operand(rax, rbx, times_1, Code::kHeaderSize - kHeapObjectTag));
+ __ leap(rax, Operand(rax, rbx, times_1, Code::kHeaderSize - kHeapObjectTag));
// Overwrite the return address on the stack.
- __ movq(Operand(rsp, 0), rax);
+ __ movq(StackOperandForReturnAddress(0), rax);
// And "return" to the OSR entry point of the function.
__ ret(0);
@@ -1439,7 +1505,7 @@ void Builtins::Generate_OsrAfterStackCheck(MacroAssembler* masm) {
__ j(above_equal, &ok);
{
FrameScope scope(masm, StackFrame::INTERNAL);
- __ CallRuntime(Runtime::kStackGuard, 0);
+ __ CallRuntime(Runtime::kHiddenStackGuard, 0);
}
__ jmp(masm->isolate()->builtins()->OnStackReplacement(),
RelocInfo::CODE_TARGET);
diff --git a/chromium/v8/src/x64/code-stubs-x64.cc b/chromium/v8/src/x64/code-stubs-x64.cc
index 0c9a0f20cdd..0d54f89d16b 100644
--- a/chromium/v8/src/x64/code-stubs-x64.cc
+++ b/chromium/v8/src/x64/code-stubs-x64.cc
@@ -1,57 +1,41 @@
// Copyright 2013 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
#if V8_TARGET_ARCH_X64
-#include "bootstrapper.h"
-#include "code-stubs.h"
-#include "regexp-macro-assembler.h"
-#include "stub-cache.h"
-#include "runtime.h"
+#include "src/bootstrapper.h"
+#include "src/code-stubs.h"
+#include "src/regexp-macro-assembler.h"
+#include "src/stub-cache.h"
+#include "src/runtime.h"
namespace v8 {
namespace internal {
void FastNewClosureStub::InitializeInterfaceDescriptor(
- Isolate* isolate,
CodeStubInterfaceDescriptor* descriptor) {
static Register registers[] = { rbx };
descriptor->register_param_count_ = 1;
descriptor->register_params_ = registers;
descriptor->deoptimization_handler_ =
- Runtime::FunctionForId(Runtime::kNewClosureFromStubFailure)->entry;
+ Runtime::FunctionForId(Runtime::kHiddenNewClosureFromStubFailure)->entry;
+}
+
+
+void FastNewContextStub::InitializeInterfaceDescriptor(
+ CodeStubInterfaceDescriptor* descriptor) {
+ static Register registers[] = { rdi };
+ descriptor->register_param_count_ = 1;
+ descriptor->register_params_ = registers;
+ descriptor->deoptimization_handler_ = NULL;
}
void ToNumberStub::InitializeInterfaceDescriptor(
- Isolate* isolate,
CodeStubInterfaceDescriptor* descriptor) {
static Register registers[] = { rax };
descriptor->register_param_count_ = 1;
@@ -61,50 +45,51 @@ void ToNumberStub::InitializeInterfaceDescriptor(
void NumberToStringStub::InitializeInterfaceDescriptor(
- Isolate* isolate,
CodeStubInterfaceDescriptor* descriptor) {
static Register registers[] = { rax };
descriptor->register_param_count_ = 1;
descriptor->register_params_ = registers;
descriptor->deoptimization_handler_ =
- Runtime::FunctionForId(Runtime::kNumberToString)->entry;
+ Runtime::FunctionForId(Runtime::kHiddenNumberToString)->entry;
}
void FastCloneShallowArrayStub::InitializeInterfaceDescriptor(
- Isolate* isolate,
CodeStubInterfaceDescriptor* descriptor) {
static Register registers[] = { rax, rbx, rcx };
descriptor->register_param_count_ = 3;
descriptor->register_params_ = registers;
+ static Representation representations[] = {
+ Representation::Tagged(),
+ Representation::Smi(),
+ Representation::Tagged() };
+ descriptor->register_param_representations_ = representations;
descriptor->deoptimization_handler_ =
- Runtime::FunctionForId(Runtime::kCreateArrayLiteralStubBailout)->entry;
+ Runtime::FunctionForId(
+ Runtime::kHiddenCreateArrayLiteralStubBailout)->entry;
}
void FastCloneShallowObjectStub::InitializeInterfaceDescriptor(
- Isolate* isolate,
CodeStubInterfaceDescriptor* descriptor) {
static Register registers[] = { rax, rbx, rcx, rdx };
descriptor->register_param_count_ = 4;
descriptor->register_params_ = registers;
descriptor->deoptimization_handler_ =
- Runtime::FunctionForId(Runtime::kCreateObjectLiteral)->entry;
+ Runtime::FunctionForId(Runtime::kHiddenCreateObjectLiteral)->entry;
}
void CreateAllocationSiteStub::InitializeInterfaceDescriptor(
- Isolate* isolate,
CodeStubInterfaceDescriptor* descriptor) {
- static Register registers[] = { rbx };
- descriptor->register_param_count_ = 1;
+ static Register registers[] = { rbx, rdx };
+ descriptor->register_param_count_ = 2;
descriptor->register_params_ = registers;
descriptor->deoptimization_handler_ = NULL;
}
void KeyedLoadFastElementStub::InitializeInterfaceDescriptor(
- Isolate* isolate,
CodeStubInterfaceDescriptor* descriptor) {
static Register registers[] = { rdx, rax };
descriptor->register_param_count_ = 2;
@@ -115,7 +100,6 @@ void KeyedLoadFastElementStub::InitializeInterfaceDescriptor(
void KeyedLoadDictionaryElementStub::InitializeInterfaceDescriptor(
- Isolate* isolate,
CodeStubInterfaceDescriptor* descriptor) {
static Register registers[] = { rdx, rax };
descriptor->register_param_count_ = 2;
@@ -125,8 +109,27 @@ void KeyedLoadDictionaryElementStub::InitializeInterfaceDescriptor(
}
+void RegExpConstructResultStub::InitializeInterfaceDescriptor(
+ CodeStubInterfaceDescriptor* descriptor) {
+ static Register registers[] = { rcx, rbx, rax };
+ descriptor->register_param_count_ = 3;
+ descriptor->register_params_ = registers;
+ descriptor->deoptimization_handler_ =
+ Runtime::FunctionForId(Runtime::kHiddenRegExpConstructResult)->entry;
+}
+
+
+void KeyedLoadGenericElementStub::InitializeInterfaceDescriptor(
+ CodeStubInterfaceDescriptor* descriptor) {
+ static Register registers[] = { rdx, rax };
+ descriptor->register_param_count_ = 2;
+ descriptor->register_params_ = registers;
+ descriptor->deoptimization_handler_ =
+ Runtime::FunctionForId(Runtime::kKeyedGetProperty)->entry;
+}
+
+
void LoadFieldStub::InitializeInterfaceDescriptor(
- Isolate* isolate,
CodeStubInterfaceDescriptor* descriptor) {
static Register registers[] = { rax };
descriptor->register_param_count_ = 1;
@@ -136,7 +139,6 @@ void LoadFieldStub::InitializeInterfaceDescriptor(
void KeyedLoadFieldStub::InitializeInterfaceDescriptor(
- Isolate* isolate,
CodeStubInterfaceDescriptor* descriptor) {
static Register registers[] = { rdx };
descriptor->register_param_count_ = 1;
@@ -145,21 +147,25 @@ void KeyedLoadFieldStub::InitializeInterfaceDescriptor(
}
-void KeyedArrayCallStub::InitializeInterfaceDescriptor(
- Isolate* isolate,
+void StringLengthStub::InitializeInterfaceDescriptor(
CodeStubInterfaceDescriptor* descriptor) {
- static Register registers[] = { rcx };
- descriptor->register_param_count_ = 1;
+ static Register registers[] = { rax, rcx };
+ descriptor->register_param_count_ = 2;
descriptor->register_params_ = registers;
- descriptor->continuation_type_ = TAIL_CALL_CONTINUATION;
- descriptor->handler_arguments_mode_ = PASS_ARGUMENTS;
- descriptor->deoptimization_handler_ =
- FUNCTION_ADDR(KeyedCallIC_MissFromStubFailure);
+ descriptor->deoptimization_handler_ = NULL;
+}
+
+
+void KeyedStringLengthStub::InitializeInterfaceDescriptor(
+ CodeStubInterfaceDescriptor* descriptor) {
+ static Register registers[] = { rdx, rax };
+ descriptor->register_param_count_ = 2;
+ descriptor->register_params_ = registers;
+ descriptor->deoptimization_handler_ = NULL;
}
void KeyedStoreFastElementStub::InitializeInterfaceDescriptor(
- Isolate* isolate,
CodeStubInterfaceDescriptor* descriptor) {
static Register registers[] = { rdx, rcx, rax };
descriptor->register_param_count_ = 3;
@@ -170,7 +176,6 @@ void KeyedStoreFastElementStub::InitializeInterfaceDescriptor(
void TransitionElementsKindStub::InitializeInterfaceDescriptor(
- Isolate* isolate,
CodeStubInterfaceDescriptor* descriptor) {
static Register registers[] = { rax, rbx };
descriptor->register_param_count_ = 2;
@@ -180,26 +185,13 @@ void TransitionElementsKindStub::InitializeInterfaceDescriptor(
}
-void BinaryOpICStub::InitializeInterfaceDescriptor(
- Isolate* isolate,
- CodeStubInterfaceDescriptor* descriptor) {
- static Register registers[] = { rdx, rax };
- descriptor->register_param_count_ = 2;
- descriptor->register_params_ = registers;
- descriptor->deoptimization_handler_ = FUNCTION_ADDR(BinaryOpIC_Miss);
- descriptor->SetMissHandler(
- ExternalReference(IC_Utility(IC::kBinaryOpIC_Miss), isolate));
-}
-
-
static void InitializeArrayConstructorDescriptor(
- Isolate* isolate,
CodeStubInterfaceDescriptor* descriptor,
int constant_stack_parameter_count) {
// register state
// rax -- number of arguments
// rdi -- function
- // rbx -- type info cell with elements kind
+ // rbx -- allocation site with elements kind
static Register registers_variable_args[] = { rdi, rbx, rax };
static Register registers_no_args[] = { rdi, rbx };
@@ -211,18 +203,22 @@ static void InitializeArrayConstructorDescriptor(
descriptor->handler_arguments_mode_ = PASS_ARGUMENTS;
descriptor->stack_parameter_count_ = rax;
descriptor->register_param_count_ = 3;
+ static Representation representations[] = {
+ Representation::Tagged(),
+ Representation::Tagged(),
+ Representation::Integer32() };
+ descriptor->register_param_representations_ = representations;
descriptor->register_params_ = registers_variable_args;
}
descriptor->hint_stack_parameter_count_ = constant_stack_parameter_count;
descriptor->function_mode_ = JS_FUNCTION_STUB_MODE;
descriptor->deoptimization_handler_ =
- Runtime::FunctionForId(Runtime::kArrayConstructor)->entry;
+ Runtime::FunctionForId(Runtime::kHiddenArrayConstructor)->entry;
}
static void InitializeInternalArrayConstructorDescriptor(
- Isolate* isolate,
CodeStubInterfaceDescriptor* descriptor,
int constant_stack_parameter_count) {
// register state
@@ -240,59 +236,56 @@ static void InitializeInternalArrayConstructorDescriptor(
descriptor->stack_parameter_count_ = rax;
descriptor->register_param_count_ = 2;
descriptor->register_params_ = registers_variable_args;
+ static Representation representations[] = {
+ Representation::Tagged(),
+ Representation::Integer32() };
+ descriptor->register_param_representations_ = representations;
}
descriptor->hint_stack_parameter_count_ = constant_stack_parameter_count;
descriptor->function_mode_ = JS_FUNCTION_STUB_MODE;
descriptor->deoptimization_handler_ =
- Runtime::FunctionForId(Runtime::kInternalArrayConstructor)->entry;
+ Runtime::FunctionForId(Runtime::kHiddenInternalArrayConstructor)->entry;
}
void ArrayNoArgumentConstructorStub::InitializeInterfaceDescriptor(
- Isolate* isolate,
CodeStubInterfaceDescriptor* descriptor) {
- InitializeArrayConstructorDescriptor(isolate, descriptor, 0);
+ InitializeArrayConstructorDescriptor(descriptor, 0);
}
void ArraySingleArgumentConstructorStub::InitializeInterfaceDescriptor(
- Isolate* isolate,
CodeStubInterfaceDescriptor* descriptor) {
- InitializeArrayConstructorDescriptor(isolate, descriptor, 1);
+ InitializeArrayConstructorDescriptor(descriptor, 1);
}
void ArrayNArgumentsConstructorStub::InitializeInterfaceDescriptor(
- Isolate* isolate,
CodeStubInterfaceDescriptor* descriptor) {
- InitializeArrayConstructorDescriptor(isolate, descriptor, -1);
+ InitializeArrayConstructorDescriptor(descriptor, -1);
}
void InternalArrayNoArgumentConstructorStub::InitializeInterfaceDescriptor(
- Isolate* isolate,
CodeStubInterfaceDescriptor* descriptor) {
- InitializeInternalArrayConstructorDescriptor(isolate, descriptor, 0);
+ InitializeInternalArrayConstructorDescriptor(descriptor, 0);
}
void InternalArraySingleArgumentConstructorStub::InitializeInterfaceDescriptor(
- Isolate* isolate,
CodeStubInterfaceDescriptor* descriptor) {
- InitializeInternalArrayConstructorDescriptor(isolate, descriptor, 1);
+ InitializeInternalArrayConstructorDescriptor(descriptor, 1);
}
void InternalArrayNArgumentsConstructorStub::InitializeInterfaceDescriptor(
- Isolate* isolate,
CodeStubInterfaceDescriptor* descriptor) {
- InitializeInternalArrayConstructorDescriptor(isolate, descriptor, -1);
+ InitializeInternalArrayConstructorDescriptor(descriptor, -1);
}
void CompareNilICStub::InitializeInterfaceDescriptor(
- Isolate* isolate,
CodeStubInterfaceDescriptor* descriptor) {
static Register registers[] = { rax };
descriptor->register_param_count_ = 1;
@@ -300,12 +293,11 @@ void CompareNilICStub::InitializeInterfaceDescriptor(
descriptor->deoptimization_handler_ =
FUNCTION_ADDR(CompareNilIC_Miss);
descriptor->SetMissHandler(
- ExternalReference(IC_Utility(IC::kCompareNilIC_Miss), isolate));
+ ExternalReference(IC_Utility(IC::kCompareNilIC_Miss), isolate()));
}
void ToBooleanStub::InitializeInterfaceDescriptor(
- Isolate* isolate,
CodeStubInterfaceDescriptor* descriptor) {
static Register registers[] = { rax };
descriptor->register_param_count_ = 1;
@@ -313,12 +305,11 @@ void ToBooleanStub::InitializeInterfaceDescriptor(
descriptor->deoptimization_handler_ =
FUNCTION_ADDR(ToBooleanIC_Miss);
descriptor->SetMissHandler(
- ExternalReference(IC_Utility(IC::kToBooleanIC_Miss), isolate));
+ ExternalReference(IC_Utility(IC::kToBooleanIC_Miss), isolate()));
}
void StoreGlobalStub::InitializeInterfaceDescriptor(
- Isolate* isolate,
CodeStubInterfaceDescriptor* descriptor) {
static Register registers[] = { rdx, rcx, rax };
descriptor->register_param_count_ = 3;
@@ -329,7 +320,6 @@ void StoreGlobalStub::InitializeInterfaceDescriptor(
void ElementsTransitionAndStoreStub::InitializeInterfaceDescriptor(
- Isolate* isolate,
CodeStubInterfaceDescriptor* descriptor) {
static Register registers[] = { rax, rbx, rcx, rdx };
descriptor->register_param_count_ = 4;
@@ -339,14 +329,118 @@ void ElementsTransitionAndStoreStub::InitializeInterfaceDescriptor(
}
-void NewStringAddStub::InitializeInterfaceDescriptor(
- Isolate* isolate,
+void BinaryOpICStub::InitializeInterfaceDescriptor(
CodeStubInterfaceDescriptor* descriptor) {
static Register registers[] = { rdx, rax };
descriptor->register_param_count_ = 2;
descriptor->register_params_ = registers;
+ descriptor->deoptimization_handler_ = FUNCTION_ADDR(BinaryOpIC_Miss);
+ descriptor->SetMissHandler(
+ ExternalReference(IC_Utility(IC::kBinaryOpIC_Miss), isolate()));
+}
+
+
+void BinaryOpWithAllocationSiteStub::InitializeInterfaceDescriptor(
+ CodeStubInterfaceDescriptor* descriptor) {
+ static Register registers[] = { rcx, rdx, rax };
+ descriptor->register_param_count_ = 3;
+ descriptor->register_params_ = registers;
descriptor->deoptimization_handler_ =
- Runtime::FunctionForId(Runtime::kStringAdd)->entry;
+ FUNCTION_ADDR(BinaryOpIC_MissWithAllocationSite);
+}
+
+
+void StringAddStub::InitializeInterfaceDescriptor(
+ CodeStubInterfaceDescriptor* descriptor) {
+ static Register registers[] = { rdx, rax };
+ descriptor->register_param_count_ = 2;
+ descriptor->register_params_ = registers;
+ descriptor->deoptimization_handler_ =
+ Runtime::FunctionForId(Runtime::kHiddenStringAdd)->entry;
+}
+
+
+void CallDescriptors::InitializeForIsolate(Isolate* isolate) {
+ {
+ CallInterfaceDescriptor* descriptor =
+ isolate->call_descriptor(Isolate::ArgumentAdaptorCall);
+ static Register registers[] = { rdi, // JSFunction
+ rsi, // context
+ rax, // actual number of arguments
+ rbx, // expected number of arguments
+ };
+ static Representation representations[] = {
+ Representation::Tagged(), // JSFunction
+ Representation::Tagged(), // context
+ Representation::Integer32(), // actual number of arguments
+ Representation::Integer32(), // expected number of arguments
+ };
+ descriptor->register_param_count_ = 4;
+ descriptor->register_params_ = registers;
+ descriptor->param_representations_ = representations;
+ }
+ {
+ CallInterfaceDescriptor* descriptor =
+ isolate->call_descriptor(Isolate::KeyedCall);
+ static Register registers[] = { rsi, // context
+ rcx, // key
+ };
+ static Representation representations[] = {
+ Representation::Tagged(), // context
+ Representation::Tagged(), // key
+ };
+ descriptor->register_param_count_ = 2;
+ descriptor->register_params_ = registers;
+ descriptor->param_representations_ = representations;
+ }
+ {
+ CallInterfaceDescriptor* descriptor =
+ isolate->call_descriptor(Isolate::NamedCall);
+ static Register registers[] = { rsi, // context
+ rcx, // name
+ };
+ static Representation representations[] = {
+ Representation::Tagged(), // context
+ Representation::Tagged(), // name
+ };
+ descriptor->register_param_count_ = 2;
+ descriptor->register_params_ = registers;
+ descriptor->param_representations_ = representations;
+ }
+ {
+ CallInterfaceDescriptor* descriptor =
+ isolate->call_descriptor(Isolate::CallHandler);
+ static Register registers[] = { rsi, // context
+ rdx, // receiver
+ };
+ static Representation representations[] = {
+ Representation::Tagged(), // context
+ Representation::Tagged(), // receiver
+ };
+ descriptor->register_param_count_ = 2;
+ descriptor->register_params_ = registers;
+ descriptor->param_representations_ = representations;
+ }
+ {
+ CallInterfaceDescriptor* descriptor =
+ isolate->call_descriptor(Isolate::ApiFunctionCall);
+ static Register registers[] = { rax, // callee
+ rbx, // call_data
+ rcx, // holder
+ rdx, // api_function_address
+ rsi, // context
+ };
+ static Representation representations[] = {
+ Representation::Tagged(), // callee
+ Representation::Tagged(), // call_data
+ Representation::Tagged(), // holder
+ Representation::External(), // api_function_address
+ Representation::Tagged(), // context
+ };
+ descriptor->register_param_count_ = 5;
+ descriptor->register_params_ = registers;
+ descriptor->param_representations_ = representations;
+ }
}
@@ -355,10 +449,9 @@ void NewStringAddStub::InitializeInterfaceDescriptor(
void HydrogenCodeStub::GenerateLightweightMiss(MacroAssembler* masm) {
// Update the static counter each time a new code stub is generated.
- Isolate* isolate = masm->isolate();
- isolate->counters()->code_stubs()->Increment();
+ isolate()->counters()->code_stubs()->Increment();
- CodeStubInterfaceDescriptor* descriptor = GetInterfaceDescriptor(isolate);
+ CodeStubInterfaceDescriptor* descriptor = GetInterfaceDescriptor();
int param_count = descriptor->register_param_count_;
{
// Call the runtime system in a fresh internal frame.
@@ -367,7 +460,7 @@ void HydrogenCodeStub::GenerateLightweightMiss(MacroAssembler* masm) {
rax.is(descriptor->register_params_[param_count - 1]));
// Push arguments
for (int i = 0; i < param_count; ++i) {
- __ push(descriptor->register_params_[i]);
+ __ Push(descriptor->register_params_[i]);
}
ExternalReference miss = descriptor->miss_handler();
__ CallExternalReference(miss, descriptor->register_param_count_);
@@ -377,121 +470,16 @@ void HydrogenCodeStub::GenerateLightweightMiss(MacroAssembler* masm) {
}
-void FastNewContextStub::Generate(MacroAssembler* masm) {
- // Try to allocate the context in new space.
- Label gc;
- int length = slots_ + Context::MIN_CONTEXT_SLOTS;
- __ Allocate((length * kPointerSize) + FixedArray::kHeaderSize,
- rax, rbx, rcx, &gc, TAG_OBJECT);
-
- // Get the function from the stack.
- StackArgumentsAccessor args(rsp, 1, ARGUMENTS_DONT_CONTAIN_RECEIVER);
- __ movq(rcx, args.GetArgumentOperand(0));
-
- // Set up the object header.
- __ LoadRoot(kScratchRegister, Heap::kFunctionContextMapRootIndex);
- __ movq(FieldOperand(rax, HeapObject::kMapOffset), kScratchRegister);
- __ Move(FieldOperand(rax, FixedArray::kLengthOffset), Smi::FromInt(length));
-
- // Set up the fixed slots.
- __ Set(rbx, 0); // Set to NULL.
- __ movq(Operand(rax, Context::SlotOffset(Context::CLOSURE_INDEX)), rcx);
- __ movq(Operand(rax, Context::SlotOffset(Context::PREVIOUS_INDEX)), rsi);
- __ movq(Operand(rax, Context::SlotOffset(Context::EXTENSION_INDEX)), rbx);
-
- // Copy the global object from the previous context.
- __ movq(rbx, Operand(rsi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
- __ movq(Operand(rax, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)), rbx);
-
- // Initialize the rest of the slots to undefined.
- __ LoadRoot(rbx, Heap::kUndefinedValueRootIndex);
- for (int i = Context::MIN_CONTEXT_SLOTS; i < length; i++) {
- __ movq(Operand(rax, Context::SlotOffset(i)), rbx);
- }
-
- // Return and remove the on-stack parameter.
- __ movq(rsi, rax);
- __ ret(1 * kPointerSize);
-
- // Need to collect. Call into runtime system.
- __ bind(&gc);
- __ TailCallRuntime(Runtime::kNewFunctionContext, 1, 1);
-}
-
-
-void FastNewBlockContextStub::Generate(MacroAssembler* masm) {
- // Stack layout on entry:
- //
- // [rsp + (1 * kPointerSize)] : function
- // [rsp + (2 * kPointerSize)] : serialized scope info
-
- // Try to allocate the context in new space.
- Label gc;
- int length = slots_ + Context::MIN_CONTEXT_SLOTS;
- __ Allocate(FixedArray::SizeFor(length),
- rax, rbx, rcx, &gc, TAG_OBJECT);
-
- // Get the function from the stack.
- StackArgumentsAccessor args(rsp, 2, ARGUMENTS_DONT_CONTAIN_RECEIVER);
- __ movq(rcx, args.GetArgumentOperand(1));
- // Get the serialized scope info from the stack.
- __ movq(rbx, args.GetArgumentOperand(0));
-
- // Set up the object header.
- __ LoadRoot(kScratchRegister, Heap::kBlockContextMapRootIndex);
- __ movq(FieldOperand(rax, HeapObject::kMapOffset), kScratchRegister);
- __ Move(FieldOperand(rax, FixedArray::kLengthOffset), Smi::FromInt(length));
-
- // If this block context is nested in the native context we get a smi
- // sentinel instead of a function. The block context should get the
- // canonical empty function of the native context as its closure which
- // we still have to look up.
- Label after_sentinel;
- __ JumpIfNotSmi(rcx, &after_sentinel, Label::kNear);
- if (FLAG_debug_code) {
- __ cmpq(rcx, Immediate(0));
- __ Assert(equal, kExpected0AsASmiSentinel);
- }
- __ movq(rcx, GlobalObjectOperand());
- __ movq(rcx, FieldOperand(rcx, GlobalObject::kNativeContextOffset));
- __ movq(rcx, ContextOperand(rcx, Context::CLOSURE_INDEX));
- __ bind(&after_sentinel);
-
- // Set up the fixed slots.
- __ movq(ContextOperand(rax, Context::CLOSURE_INDEX), rcx);
- __ movq(ContextOperand(rax, Context::PREVIOUS_INDEX), rsi);
- __ movq(ContextOperand(rax, Context::EXTENSION_INDEX), rbx);
-
- // Copy the global object from the previous context.
- __ movq(rbx, ContextOperand(rsi, Context::GLOBAL_OBJECT_INDEX));
- __ movq(ContextOperand(rax, Context::GLOBAL_OBJECT_INDEX), rbx);
-
- // Initialize the rest of the slots to the hole value.
- __ LoadRoot(rbx, Heap::kTheHoleValueRootIndex);
- for (int i = 0; i < slots_; i++) {
- __ movq(ContextOperand(rax, i + Context::MIN_CONTEXT_SLOTS), rbx);
- }
-
- // Return and remove the on-stack parameter.
- __ movq(rsi, rax);
- __ ret(2 * kPointerSize);
-
- // Need to collect. Call into runtime system.
- __ bind(&gc);
- __ TailCallRuntime(Runtime::kPushBlockContext, 2, 1);
-}
-
-
void StoreBufferOverflowStub::Generate(MacroAssembler* masm) {
__ PushCallerSaved(save_doubles_);
const int argument_count = 1;
__ PrepareCallCFunction(argument_count);
__ LoadAddress(arg_reg_1,
- ExternalReference::isolate_address(masm->isolate()));
+ ExternalReference::isolate_address(isolate()));
AllowExternalCallThatCantCauseGC scope(masm);
__ CallCFunction(
- ExternalReference::store_buffer_overflow_function(masm->isolate()),
+ ExternalReference::store_buffer_overflow_function(isolate()),
argument_count);
__ PopCallerSaved(save_doubles_);
__ ret(0);
@@ -523,7 +511,7 @@ void DoubleToIStub::Generate(MacroAssembler* masm) {
int double_offset = offset();
// Account for return address and saved regs if input is rsp.
- if (input_reg.is(rsp)) double_offset += 3 * kPointerSize;
+ if (input_reg.is(rsp)) double_offset += 3 * kRegisterSize;
MemOperand mantissa_operand(MemOperand(input_reg, double_offset));
MemOperand exponent_operand(MemOperand(input_reg,
@@ -543,14 +531,14 @@ void DoubleToIStub::Generate(MacroAssembler* masm) {
// is the return register, then save the temp register we use in its stead
// for the result.
Register save_reg = final_result_reg.is(rcx) ? rax : rcx;
- __ push(scratch1);
- __ push(save_reg);
+ __ pushq(scratch1);
+ __ pushq(save_reg);
bool stash_exponent_copy = !input_reg.is(rsp);
__ movl(scratch1, mantissa_operand);
__ movsd(xmm0, mantissa_operand);
__ movl(rcx, exponent_operand);
- if (stash_exponent_copy) __ push(rcx);
+ if (stash_exponent_copy) __ pushq(rcx);
__ andl(rcx, Immediate(HeapNumber::kExponentMask));
__ shrl(rcx, Immediate(HeapNumber::kExponentShift));
@@ -585,338 +573,32 @@ void DoubleToIStub::Generate(MacroAssembler* masm) {
// Restore registers
__ bind(&done);
if (stash_exponent_copy) {
- __ addq(rsp, Immediate(kDoubleSize));
+ __ addp(rsp, Immediate(kDoubleSize));
}
if (!final_result_reg.is(result_reg)) {
ASSERT(final_result_reg.is(rcx));
__ movl(final_result_reg, result_reg);
}
- __ pop(save_reg);
- __ pop(scratch1);
+ __ popq(save_reg);
+ __ popq(scratch1);
__ ret(0);
}
-void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
- // TAGGED case:
- // Input:
- // rsp[8] : argument (should be number).
- // rsp[0] : return address.
- // Output:
- // rax: tagged double result.
- // UNTAGGED case:
- // Input::
- // rsp[0] : return address.
- // xmm1 : untagged double input argument
- // Output:
- // xmm1 : untagged double result.
-
- Label runtime_call;
- Label runtime_call_clear_stack;
- Label skip_cache;
- const bool tagged = (argument_type_ == TAGGED);
- if (tagged) {
- Label input_not_smi, loaded;
-
- // Test that rax is a number.
- StackArgumentsAccessor args(rsp, 1, ARGUMENTS_DONT_CONTAIN_RECEIVER);
- __ movq(rax, args.GetArgumentOperand(0));
- __ JumpIfNotSmi(rax, &input_not_smi, Label::kNear);
- // Input is a smi. Untag and load it onto the FPU stack.
- // Then load the bits of the double into rbx.
- __ SmiToInteger32(rax, rax);
- __ subq(rsp, Immediate(kDoubleSize));
- __ Cvtlsi2sd(xmm1, rax);
- __ movsd(Operand(rsp, 0), xmm1);
- __ movq(rbx, xmm1);
- __ movq(rdx, xmm1);
- __ fld_d(Operand(rsp, 0));
- __ addq(rsp, Immediate(kDoubleSize));
- __ jmp(&loaded, Label::kNear);
-
- __ bind(&input_not_smi);
- // Check if input is a HeapNumber.
- __ LoadRoot(rbx, Heap::kHeapNumberMapRootIndex);
- __ cmpq(rbx, FieldOperand(rax, HeapObject::kMapOffset));
- __ j(not_equal, &runtime_call);
- // Input is a HeapNumber. Push it on the FPU stack and load its
- // bits into rbx.
- __ fld_d(FieldOperand(rax, HeapNumber::kValueOffset));
- __ MoveDouble(rbx, FieldOperand(rax, HeapNumber::kValueOffset));
- __ movq(rdx, rbx);
-
- __ bind(&loaded);
- } else { // UNTAGGED.
- __ movq(rbx, xmm1);
- __ movq(rdx, xmm1);
- }
-
- // ST[0] == double value, if TAGGED.
- // rbx = bits of double value.
- // rdx = also bits of double value.
- // Compute hash (h is 32 bits, bits are 64 and the shifts are arithmetic):
- // h = h0 = bits ^ (bits >> 32);
- // h ^= h >> 16;
- // h ^= h >> 8;
- // h = h & (cacheSize - 1);
- // or h = (h0 ^ (h0 >> 8) ^ (h0 >> 16) ^ (h0 >> 24)) & (cacheSize - 1)
- __ sar(rdx, Immediate(32));
- __ xorl(rdx, rbx);
- __ movl(rcx, rdx);
- __ movl(rax, rdx);
- __ movl(rdi, rdx);
- __ sarl(rdx, Immediate(8));
- __ sarl(rcx, Immediate(16));
- __ sarl(rax, Immediate(24));
- __ xorl(rcx, rdx);
- __ xorl(rax, rdi);
- __ xorl(rcx, rax);
- ASSERT(IsPowerOf2(TranscendentalCache::SubCache::kCacheSize));
- __ andl(rcx, Immediate(TranscendentalCache::SubCache::kCacheSize - 1));
-
- // ST[0] == double value.
- // rbx = bits of double value.
- // rcx = TranscendentalCache::hash(double value).
- ExternalReference cache_array =
- ExternalReference::transcendental_cache_array_address(masm->isolate());
- __ Move(rax, cache_array);
- int cache_array_index =
- type_ * sizeof(masm->isolate()->transcendental_cache()->caches_[0]);
- __ movq(rax, Operand(rax, cache_array_index));
- // rax points to the cache for the type type_.
- // If NULL, the cache hasn't been initialized yet, so go through runtime.
- __ testq(rax, rax);
- __ j(zero, &runtime_call_clear_stack); // Only clears stack if TAGGED.
-#ifdef DEBUG
- // Check that the layout of cache elements match expectations.
- { // NOLINT - doesn't like a single brace on a line.
- TranscendentalCache::SubCache::Element test_elem[2];
- char* elem_start = reinterpret_cast<char*>(&test_elem[0]);
- char* elem2_start = reinterpret_cast<char*>(&test_elem[1]);
- char* elem_in0 = reinterpret_cast<char*>(&(test_elem[0].in[0]));
- char* elem_in1 = reinterpret_cast<char*>(&(test_elem[0].in[1]));
- char* elem_out = reinterpret_cast<char*>(&(test_elem[0].output));
- // Two uint_32's and a pointer per element.
- CHECK_EQ(2 * kIntSize + 1 * kPointerSize,
- static_cast<int>(elem2_start - elem_start));
- CHECK_EQ(0, static_cast<int>(elem_in0 - elem_start));
- CHECK_EQ(kIntSize, static_cast<int>(elem_in1 - elem_start));
- CHECK_EQ(2 * kIntSize, static_cast<int>(elem_out - elem_start));
- }
-#endif
- // Find the address of the rcx'th entry in the cache, i.e., &rax[rcx*16].
- __ addl(rcx, rcx);
- __ lea(rcx, Operand(rax, rcx, times_8, 0));
- // Check if cache matches: Double value is stored in uint32_t[2] array.
- Label cache_miss;
- __ cmpq(rbx, Operand(rcx, 0));
- __ j(not_equal, &cache_miss, Label::kNear);
- // Cache hit!
- Counters* counters = masm->isolate()->counters();
- __ IncrementCounter(counters->transcendental_cache_hit(), 1);
- __ movq(rax, Operand(rcx, 2 * kIntSize));
- if (tagged) {
- __ fstp(0); // Clear FPU stack.
- __ ret(kPointerSize);
- } else { // UNTAGGED.
- __ movsd(xmm1, FieldOperand(rax, HeapNumber::kValueOffset));
- __ Ret();
- }
-
- __ bind(&cache_miss);
- __ IncrementCounter(counters->transcendental_cache_miss(), 1);
- // Update cache with new value.
- if (tagged) {
- __ AllocateHeapNumber(rax, rdi, &runtime_call_clear_stack);
- } else { // UNTAGGED.
- __ AllocateHeapNumber(rax, rdi, &skip_cache);
- __ movsd(FieldOperand(rax, HeapNumber::kValueOffset), xmm1);
- __ fld_d(FieldOperand(rax, HeapNumber::kValueOffset));
- }
- GenerateOperation(masm, type_);
- __ movq(Operand(rcx, 0), rbx);
- __ movq(Operand(rcx, 2 * kIntSize), rax);
- __ fstp_d(FieldOperand(rax, HeapNumber::kValueOffset));
- if (tagged) {
- __ ret(kPointerSize);
- } else { // UNTAGGED.
- __ movsd(xmm1, FieldOperand(rax, HeapNumber::kValueOffset));
- __ Ret();
-
- // Skip cache and return answer directly, only in untagged case.
- __ bind(&skip_cache);
- __ subq(rsp, Immediate(kDoubleSize));
- __ movsd(Operand(rsp, 0), xmm1);
- __ fld_d(Operand(rsp, 0));
- GenerateOperation(masm, type_);
- __ fstp_d(Operand(rsp, 0));
- __ movsd(xmm1, Operand(rsp, 0));
- __ addq(rsp, Immediate(kDoubleSize));
- // We return the value in xmm1 without adding it to the cache, but
- // we cause a scavenging GC so that future allocations will succeed.
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- // Allocate an unused object bigger than a HeapNumber.
- __ Push(Smi::FromInt(2 * kDoubleSize));
- __ CallRuntimeSaveDoubles(Runtime::kAllocateInNewSpace);
- }
- __ Ret();
- }
-
- // Call runtime, doing whatever allocation and cleanup is necessary.
- if (tagged) {
- __ bind(&runtime_call_clear_stack);
- __ fstp(0);
- __ bind(&runtime_call);
- __ TailCallExternalReference(
- ExternalReference(RuntimeFunction(), masm->isolate()), 1, 1);
- } else { // UNTAGGED.
- __ bind(&runtime_call_clear_stack);
- __ bind(&runtime_call);
- __ AllocateHeapNumber(rax, rdi, &skip_cache);
- __ movsd(FieldOperand(rax, HeapNumber::kValueOffset), xmm1);
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ push(rax);
- __ CallRuntime(RuntimeFunction(), 1);
- }
- __ movsd(xmm1, FieldOperand(rax, HeapNumber::kValueOffset));
- __ Ret();
- }
-}
-
-
-Runtime::FunctionId TranscendentalCacheStub::RuntimeFunction() {
- switch (type_) {
- // Add more cases when necessary.
- case TranscendentalCache::SIN: return Runtime::kMath_sin;
- case TranscendentalCache::COS: return Runtime::kMath_cos;
- case TranscendentalCache::TAN: return Runtime::kMath_tan;
- case TranscendentalCache::LOG: return Runtime::kMath_log;
- default:
- UNIMPLEMENTED();
- return Runtime::kAbort;
- }
-}
-
-
-void TranscendentalCacheStub::GenerateOperation(
- MacroAssembler* masm, TranscendentalCache::Type type) {
- // Registers:
- // rax: Newly allocated HeapNumber, which must be preserved.
- // rbx: Bits of input double. Must be preserved.
- // rcx: Pointer to cache entry. Must be preserved.
- // st(0): Input double
- Label done;
- if (type == TranscendentalCache::SIN ||
- type == TranscendentalCache::COS ||
- type == TranscendentalCache::TAN) {
- // Both fsin and fcos require arguments in the range +/-2^63 and
- // return NaN for infinities and NaN. They can share all code except
- // the actual fsin/fcos operation.
- Label in_range;
- // If argument is outside the range -2^63..2^63, fsin/cos doesn't
- // work. We must reduce it to the appropriate range.
- __ movq(rdi, rbx);
- // Move exponent and sign bits to low bits.
- __ shr(rdi, Immediate(HeapNumber::kMantissaBits));
- // Remove sign bit.
- __ andl(rdi, Immediate((1 << HeapNumber::kExponentBits) - 1));
- int supported_exponent_limit = (63 + HeapNumber::kExponentBias);
- __ cmpl(rdi, Immediate(supported_exponent_limit));
- __ j(below, &in_range);
- // Check for infinity and NaN. Both return NaN for sin.
- __ cmpl(rdi, Immediate(0x7ff));
- Label non_nan_result;
- __ j(not_equal, &non_nan_result, Label::kNear);
- // Input is +/-Infinity or NaN. Result is NaN.
- __ fstp(0);
- // NaN is represented by 0x7ff8000000000000.
- __ subq(rsp, Immediate(kPointerSize));
- __ movl(Operand(rsp, 4), Immediate(0x7ff80000));
- __ movl(Operand(rsp, 0), Immediate(0x00000000));
- __ fld_d(Operand(rsp, 0));
- __ addq(rsp, Immediate(kPointerSize));
- __ jmp(&done);
-
- __ bind(&non_nan_result);
-
- // Use fpmod to restrict argument to the range +/-2*PI.
- __ movq(rdi, rax); // Save rax before using fnstsw_ax.
- __ fldpi();
- __ fadd(0);
- __ fld(1);
- // FPU Stack: input, 2*pi, input.
- {
- Label no_exceptions;
- __ fwait();
- __ fnstsw_ax();
- // Clear if Illegal Operand or Zero Division exceptions are set.
- __ testl(rax, Immediate(5)); // #IO and #ZD flags of FPU status word.
- __ j(zero, &no_exceptions);
- __ fnclex();
- __ bind(&no_exceptions);
- }
-
- // Compute st(0) % st(1)
- {
- Label partial_remainder_loop;
- __ bind(&partial_remainder_loop);
- __ fprem1();
- __ fwait();
- __ fnstsw_ax();
- __ testl(rax, Immediate(0x400)); // Check C2 bit of FPU status word.
- // If C2 is set, computation only has partial result. Loop to
- // continue computation.
- __ j(not_zero, &partial_remainder_loop);
- }
- // FPU Stack: input, 2*pi, input % 2*pi
- __ fstp(2);
- // FPU Stack: input % 2*pi, 2*pi,
- __ fstp(0);
- // FPU Stack: input % 2*pi
- __ movq(rax, rdi); // Restore rax, pointer to the new HeapNumber.
- __ bind(&in_range);
- switch (type) {
- case TranscendentalCache::SIN:
- __ fsin();
- break;
- case TranscendentalCache::COS:
- __ fcos();
- break;
- case TranscendentalCache::TAN:
- // FPTAN calculates tangent onto st(0) and pushes 1.0 onto the
- // FP register stack.
- __ fptan();
- __ fstp(0); // Pop FP register stack.
- break;
- default:
- UNREACHABLE();
- }
- __ bind(&done);
- } else {
- ASSERT(type == TranscendentalCache::LOG);
- __ fldln2();
- __ fxch();
- __ fyl2x();
- }
-}
-
-
void FloatingPointHelper::LoadSSE2UnknownOperands(MacroAssembler* masm,
Label* not_numbers) {
Label load_smi_rdx, load_nonsmi_rax, load_smi_rax, load_float_rax, done;
// Load operand in rdx into xmm0, or branch to not_numbers.
__ LoadRoot(rcx, Heap::kHeapNumberMapRootIndex);
__ JumpIfSmi(rdx, &load_smi_rdx);
- __ cmpq(FieldOperand(rdx, HeapObject::kMapOffset), rcx);
+ __ cmpp(FieldOperand(rdx, HeapObject::kMapOffset), rcx);
__ j(not_equal, not_numbers); // Argument in rdx is not a number.
__ movsd(xmm0, FieldOperand(rdx, HeapNumber::kValueOffset));
// Load operand in rax into xmm1, or branch to not_numbers.
__ JumpIfSmi(rax, &load_smi_rax);
__ bind(&load_nonsmi_rax);
- __ cmpq(FieldOperand(rax, HeapObject::kMapOffset), rcx);
+ __ cmpp(FieldOperand(rax, HeapObject::kMapOffset), rcx);
__ j(not_equal, not_numbers);
__ movsd(xmm1, FieldOperand(rax, HeapNumber::kValueOffset));
__ jmp(&done);
@@ -945,7 +627,7 @@ void MathPowStub::Generate(MacroAssembler* masm) {
Label call_runtime, done, exponent_not_smi, int_exponent;
// Save 1 in double_result - we need this several times later on.
- __ movq(scratch, Immediate(1));
+ __ movp(scratch, Immediate(1));
__ Cvtlsi2sd(double_result, scratch);
if (exponent_type_ == ON_STACK) {
@@ -954,8 +636,8 @@ void MathPowStub::Generate(MacroAssembler* masm) {
// This can only happen if the stub is called from non-optimized code.
// Load input parameters from stack.
StackArgumentsAccessor args(rsp, 2, ARGUMENTS_DONT_CONTAIN_RECEIVER);
- __ movq(base, args.GetArgumentOperand(0));
- __ movq(exponent, args.GetArgumentOperand(1));
+ __ movp(base, args.GetArgumentOperand(0));
+ __ movp(exponent, args.GetArgumentOperand(1));
__ JumpIfSmi(base, &base_is_smi, Label::kNear);
__ CompareRoot(FieldOperand(base, HeapObject::kMapOffset),
Heap::kHeapNumberMapRootIndex);
@@ -997,8 +679,8 @@ void MathPowStub::Generate(MacroAssembler* masm) {
__ bind(&try_arithmetic_simplification);
__ cvttsd2si(exponent, double_exponent);
// Skip to runtime if possibly NaN (indicated by the indefinite integer).
- __ cmpl(exponent, Immediate(0x80000000u));
- __ j(equal, &call_runtime);
+ __ cmpl(exponent, Immediate(0x1));
+ __ j(overflow, &call_runtime);
if (exponent_type_ == ON_STACK) {
// Detect square root case. Crankshaft detects constant +/-0.5 at
@@ -1075,7 +757,7 @@ void MathPowStub::Generate(MacroAssembler* masm) {
__ bind(&fast_power);
__ fnclex(); // Clear flags to catch exceptions later.
// Transfer (B)ase and (E)xponent onto the FPU register stack.
- __ subq(rsp, Immediate(kDoubleSize));
+ __ subp(rsp, Immediate(kDoubleSize));
__ movsd(Operand(rsp, 0), double_exponent);
__ fld_d(Operand(rsp, 0)); // E
__ movsd(Operand(rsp, 0), double_base);
@@ -1102,12 +784,12 @@ void MathPowStub::Generate(MacroAssembler* masm) {
__ j(not_zero, &fast_power_failed, Label::kNear);
__ fstp_d(Operand(rsp, 0));
__ movsd(double_result, Operand(rsp, 0));
- __ addq(rsp, Immediate(kDoubleSize));
+ __ addp(rsp, Immediate(kDoubleSize));
__ jmp(&done);
__ bind(&fast_power_failed);
__ fninit();
- __ addq(rsp, Immediate(kDoubleSize));
+ __ addp(rsp, Immediate(kDoubleSize));
__ jmp(&call_runtime);
}
@@ -1115,7 +797,7 @@ void MathPowStub::Generate(MacroAssembler* masm) {
__ bind(&int_exponent);
const XMMRegister double_scratch2 = double_exponent;
// Back up exponent as we need to check if exponent is negative later.
- __ movq(scratch, exponent); // Back up exponent.
+ __ movp(scratch, exponent); // Back up exponent.
__ movsd(double_scratch, double_base); // Back up base.
__ movsd(double_scratch2, double_result); // Load double_exponent with 1.
@@ -1158,11 +840,11 @@ void MathPowStub::Generate(MacroAssembler* masm) {
__ Cvtlsi2sd(double_exponent, exponent);
// Returning or bailing out.
- Counters* counters = masm->isolate()->counters();
+ Counters* counters = isolate()->counters();
if (exponent_type_ == ON_STACK) {
// The arguments are still on the stack.
__ bind(&call_runtime);
- __ TailCallRuntime(Runtime::kMath_pow_cfunction, 2, 1);
+ __ TailCallRuntime(Runtime::kHiddenMathPow, 2, 1);
// The stub is called from non-optimized code, which expects the result
// as heap number in rax.
@@ -1180,7 +862,7 @@ void MathPowStub::Generate(MacroAssembler* masm) {
AllowExternalCallThatCantCauseGC scope(masm);
__ PrepareCallCFunction(2);
__ CallCFunction(
- ExternalReference::power_double_double_function(masm->isolate()), 2);
+ ExternalReference::power_double_double_function(isolate()), 2);
}
// Return value is in xmm0.
__ movsd(double_result, xmm0);
@@ -1201,7 +883,7 @@ void FunctionPrototypeStub::Generate(MacroAssembler* masm) {
// -- rdx : receiver
// -- rsp[0] : return address
// -----------------------------------
- __ Cmp(rax, masm->isolate()->factory()->prototype_string());
+ __ Cmp(rax, isolate()->factory()->prototype_string());
__ j(not_equal, &miss);
receiver = rdx;
} else {
@@ -1221,99 +903,6 @@ void FunctionPrototypeStub::Generate(MacroAssembler* masm) {
}
-void StringLengthStub::Generate(MacroAssembler* masm) {
- Label miss;
- Register receiver;
- if (kind() == Code::KEYED_LOAD_IC) {
- // ----------- S t a t e -------------
- // -- rax : key
- // -- rdx : receiver
- // -- rsp[0] : return address
- // -----------------------------------
- __ Cmp(rax, masm->isolate()->factory()->length_string());
- __ j(not_equal, &miss);
- receiver = rdx;
- } else {
- ASSERT(kind() == Code::LOAD_IC);
- // ----------- S t a t e -------------
- // -- rax : receiver
- // -- rcx : name
- // -- rsp[0] : return address
- // -----------------------------------
- receiver = rax;
- }
-
- StubCompiler::GenerateLoadStringLength(masm, receiver, r8, r9, &miss);
- __ bind(&miss);
- StubCompiler::TailCallBuiltin(
- masm, BaseLoadStoreStubCompiler::MissBuiltin(kind()));
-}
-
-
-void StoreArrayLengthStub::Generate(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- rax : value
- // -- rcx : key
- // -- rdx : receiver
- // -- rsp[0] : return address
- // -----------------------------------
- //
- // This accepts as a receiver anything JSArray::SetElementsLength accepts
- // (currently anything except for external arrays which means anything with
- // elements of FixedArray type). Value must be a number, but only smis are
- // accepted as the most common case.
-
- Label miss;
-
- Register receiver = rdx;
- Register value = rax;
- Register scratch = rbx;
- if (kind() == Code::KEYED_STORE_IC) {
- __ Cmp(rcx, masm->isolate()->factory()->length_string());
- __ j(not_equal, &miss);
- }
-
- // Check that the receiver isn't a smi.
- __ JumpIfSmi(receiver, &miss);
-
- // Check that the object is a JS array.
- __ CmpObjectType(receiver, JS_ARRAY_TYPE, scratch);
- __ j(not_equal, &miss);
-
- // Check that elements are FixedArray.
- // We rely on StoreIC_ArrayLength below to deal with all types of
- // fast elements (including COW).
- __ movq(scratch, FieldOperand(receiver, JSArray::kElementsOffset));
- __ CmpObjectType(scratch, FIXED_ARRAY_TYPE, scratch);
- __ j(not_equal, &miss);
-
- // Check that the array has fast properties, otherwise the length
- // property might have been redefined.
- __ movq(scratch, FieldOperand(receiver, JSArray::kPropertiesOffset));
- __ CompareRoot(FieldOperand(scratch, FixedArray::kMapOffset),
- Heap::kHashTableMapRootIndex);
- __ j(equal, &miss);
-
- // Check that value is a smi.
- __ JumpIfNotSmi(value, &miss);
-
- // Prepare tail call to StoreIC_ArrayLength.
- __ PopReturnAddressTo(scratch);
- __ push(receiver);
- __ push(value);
- __ PushReturnAddressFrom(scratch);
-
- ExternalReference ref =
- ExternalReference(IC_Utility(IC::kStoreIC_ArrayLength), masm->isolate());
- __ TailCallExternalReference(ref, 2, 1);
-
- __ bind(&miss);
-
- StubCompiler::TailCallBuiltin(
- masm, BaseLoadStoreStubCompiler::MissBuiltin(kind()));
-}
-
-
void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
// The key is in rdx and the parameter count is in rax.
@@ -1326,7 +915,7 @@ void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
// Smi instead of the context. We can't use SmiCompare here, because that
// only works for comparing two smis.
Label adaptor;
- __ movq(rbx, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
+ __ movp(rbx, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
__ Cmp(Operand(rbx, StandardFrameConstants::kContextOffset),
Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
__ j(equal, &adaptor);
@@ -1334,22 +923,22 @@ void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
// Check index against formal parameters count limit passed in
// through register rax. Use unsigned comparison to get negative
// check for free.
- __ cmpq(rdx, rax);
+ __ cmpp(rdx, rax);
__ j(above_equal, &slow);
// Read the argument from the stack and return it.
__ SmiSub(rax, rax, rdx);
__ SmiToInteger32(rax, rax);
StackArgumentsAccessor args(rbp, rax, ARGUMENTS_DONT_CONTAIN_RECEIVER);
- __ movq(rax, args.GetArgumentOperand(0));
+ __ movp(rax, args.GetArgumentOperand(0));
__ Ret();
// Arguments adaptor case: Check index against actual arguments
// limit found in the arguments adaptor frame. Use unsigned
// comparison to get negative check for free.
__ bind(&adaptor);
- __ movq(rcx, Operand(rbx, ArgumentsAdaptorFrameConstants::kLengthOffset));
- __ cmpq(rdx, rcx);
+ __ movp(rcx, Operand(rbx, ArgumentsAdaptorFrameConstants::kLengthOffset));
+ __ cmpp(rdx, rcx);
__ j(above_equal, &slow);
// Read the argument from the stack and return it.
@@ -1357,20 +946,20 @@ void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
__ SmiToInteger32(rcx, rcx);
StackArgumentsAccessor adaptor_args(rbx, rcx,
ARGUMENTS_DONT_CONTAIN_RECEIVER);
- __ movq(rax, adaptor_args.GetArgumentOperand(0));
+ __ movp(rax, adaptor_args.GetArgumentOperand(0));
__ Ret();
// Slow-case: Handle non-smi or out-of-bounds access to arguments
// by calling the runtime system.
__ bind(&slow);
__ PopReturnAddressTo(rbx);
- __ push(rdx);
+ __ Push(rdx);
__ PushReturnAddressFrom(rbx);
__ TailCallRuntime(Runtime::kGetArgumentsProperty, 1, 1);
}
-void ArgumentsAccessStub::GenerateNewNonStrictFast(MacroAssembler* masm) {
+void ArgumentsAccessStub::GenerateNewSloppyFast(MacroAssembler* masm) {
// Stack layout:
// rsp[0] : return address
// rsp[8] : number of parameters (tagged)
@@ -1380,7 +969,7 @@ void ArgumentsAccessStub::GenerateNewNonStrictFast(MacroAssembler* masm) {
// rbx: the mapped parameter count (untagged)
// rax: the allocated object (tagged).
- Factory* factory = masm->isolate()->factory();
+ Factory* factory = isolate()->factory();
StackArgumentsAccessor args(rsp, 3, ARGUMENTS_DONT_CONTAIN_RECEIVER);
__ SmiToInteger64(rbx, args.GetArgumentOperand(2));
@@ -1389,13 +978,13 @@ void ArgumentsAccessStub::GenerateNewNonStrictFast(MacroAssembler* masm) {
// Check if the calling frame is an arguments adaptor frame.
Label runtime;
Label adaptor_frame, try_allocate;
- __ movq(rdx, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
- __ movq(rcx, Operand(rdx, StandardFrameConstants::kContextOffset));
+ __ movp(rdx, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
+ __ movp(rcx, Operand(rdx, StandardFrameConstants::kContextOffset));
__ Cmp(rcx, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
__ j(equal, &adaptor_frame);
// No adaptor, parameter count = argument count.
- __ movq(rcx, rbx);
+ __ movp(rcx, rbx);
__ jmp(&try_allocate, Label::kNear);
// We have an adaptor frame. Patch the parameters pointer.
@@ -1403,16 +992,16 @@ void ArgumentsAccessStub::GenerateNewNonStrictFast(MacroAssembler* masm) {
__ SmiToInteger64(rcx,
Operand(rdx,
ArgumentsAdaptorFrameConstants::kLengthOffset));
- __ lea(rdx, Operand(rdx, rcx, times_pointer_size,
+ __ leap(rdx, Operand(rdx, rcx, times_pointer_size,
StandardFrameConstants::kCallerSPOffset));
- __ movq(args.GetArgumentOperand(1), rdx);
+ __ movp(args.GetArgumentOperand(1), rdx);
// rbx = parameter count (untagged)
// rcx = argument count (untagged)
// Compute the mapped parameter count = min(rbx, rcx) in rbx.
- __ cmpq(rbx, rcx);
+ __ cmpp(rbx, rcx);
__ j(less_equal, &try_allocate, Label::kNear);
- __ movq(rbx, rcx);
+ __ movp(rbx, rcx);
__ bind(&try_allocate);
@@ -1421,17 +1010,17 @@ void ArgumentsAccessStub::GenerateNewNonStrictFast(MacroAssembler* masm) {
const int kParameterMapHeaderSize =
FixedArray::kHeaderSize + 2 * kPointerSize;
Label no_parameter_map;
- __ xor_(r8, r8);
- __ testq(rbx, rbx);
+ __ xorp(r8, r8);
+ __ testp(rbx, rbx);
__ j(zero, &no_parameter_map, Label::kNear);
- __ lea(r8, Operand(rbx, times_pointer_size, kParameterMapHeaderSize));
+ __ leap(r8, Operand(rbx, times_pointer_size, kParameterMapHeaderSize));
__ bind(&no_parameter_map);
// 2. Backing store.
- __ lea(r8, Operand(r8, rcx, times_pointer_size, FixedArray::kHeaderSize));
+ __ leap(r8, Operand(r8, rcx, times_pointer_size, FixedArray::kHeaderSize));
// 3. Arguments object.
- __ addq(r8, Immediate(Heap::kArgumentsObjectSize));
+ __ addp(r8, Immediate(Heap::kSloppyArgumentsObjectSize));
// Do the allocation of all three objects in one go.
__ Allocate(r8, rax, rdx, rdi, &runtime, TAG_OBJECT);
@@ -1440,18 +1029,18 @@ void ArgumentsAccessStub::GenerateNewNonStrictFast(MacroAssembler* masm) {
// rcx = argument count (untagged)
// Get the arguments boilerplate from the current native context into rdi.
Label has_mapped_parameters, copy;
- __ movq(rdi, Operand(rsi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
- __ movq(rdi, FieldOperand(rdi, GlobalObject::kNativeContextOffset));
- __ testq(rbx, rbx);
+ __ movp(rdi, Operand(rsi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
+ __ movp(rdi, FieldOperand(rdi, GlobalObject::kNativeContextOffset));
+ __ testp(rbx, rbx);
__ j(not_zero, &has_mapped_parameters, Label::kNear);
- const int kIndex = Context::ARGUMENTS_BOILERPLATE_INDEX;
- __ movq(rdi, Operand(rdi, Context::SlotOffset(kIndex)));
+ const int kIndex = Context::SLOPPY_ARGUMENTS_BOILERPLATE_INDEX;
+ __ movp(rdi, Operand(rdi, Context::SlotOffset(kIndex)));
__ jmp(&copy, Label::kNear);
const int kAliasedIndex = Context::ALIASED_ARGUMENTS_BOILERPLATE_INDEX;
__ bind(&has_mapped_parameters);
- __ movq(rdi, Operand(rdi, Context::SlotOffset(kAliasedIndex)));
+ __ movp(rdi, Operand(rdi, Context::SlotOffset(kAliasedIndex)));
__ bind(&copy);
// rax = address of new object (tagged)
@@ -1460,14 +1049,14 @@ void ArgumentsAccessStub::GenerateNewNonStrictFast(MacroAssembler* masm) {
// rdi = address of boilerplate object (tagged)
// Copy the JS object part.
for (int i = 0; i < JSObject::kHeaderSize; i += kPointerSize) {
- __ movq(rdx, FieldOperand(rdi, i));
- __ movq(FieldOperand(rax, i), rdx);
+ __ movp(rdx, FieldOperand(rdi, i));
+ __ movp(FieldOperand(rax, i), rdx);
}
// Set up the callee in-object property.
STATIC_ASSERT(Heap::kArgumentsCalleeIndex == 1);
- __ movq(rdx, args.GetArgumentOperand(0));
- __ movq(FieldOperand(rax, JSObject::kHeaderSize +
+ __ movp(rdx, args.GetArgumentOperand(0));
+ __ movp(FieldOperand(rax, JSObject::kHeaderSize +
Heap::kArgumentsCalleeIndex * kPointerSize),
rdx);
@@ -1475,15 +1064,15 @@ void ArgumentsAccessStub::GenerateNewNonStrictFast(MacroAssembler* masm) {
// Note: rcx is tagged from here on.
STATIC_ASSERT(Heap::kArgumentsLengthIndex == 0);
__ Integer32ToSmi(rcx, rcx);
- __ movq(FieldOperand(rax, JSObject::kHeaderSize +
+ __ movp(FieldOperand(rax, JSObject::kHeaderSize +
Heap::kArgumentsLengthIndex * kPointerSize),
rcx);
// Set up the elements pointer in the allocated arguments object.
// If we allocated a parameter map, edi will point there, otherwise to the
// backing store.
- __ lea(rdi, Operand(rax, Heap::kArgumentsObjectSize));
- __ movq(FieldOperand(rax, JSObject::kElementsOffset), rdi);
+ __ leap(rdi, Operand(rax, Heap::kSloppyArgumentsObjectSize));
+ __ movp(FieldOperand(rax, JSObject::kElementsOffset), rdi);
// rax = address of new object (tagged)
// rbx = mapped parameter count (untagged)
@@ -1492,17 +1081,17 @@ void ArgumentsAccessStub::GenerateNewNonStrictFast(MacroAssembler* masm) {
// Initialize parameter map. If there are no mapped arguments, we're done.
Label skip_parameter_map;
- __ testq(rbx, rbx);
+ __ testp(rbx, rbx);
__ j(zero, &skip_parameter_map);
- __ LoadRoot(kScratchRegister, Heap::kNonStrictArgumentsElementsMapRootIndex);
+ __ LoadRoot(kScratchRegister, Heap::kSloppyArgumentsElementsMapRootIndex);
// rbx contains the untagged argument count. Add 2 and tag to write.
- __ movq(FieldOperand(rdi, FixedArray::kMapOffset), kScratchRegister);
+ __ movp(FieldOperand(rdi, FixedArray::kMapOffset), kScratchRegister);
__ Integer64PlusConstantToSmi(r9, rbx, 2);
- __ movq(FieldOperand(rdi, FixedArray::kLengthOffset), r9);
- __ movq(FieldOperand(rdi, FixedArray::kHeaderSize + 0 * kPointerSize), rsi);
- __ lea(r9, Operand(rdi, rbx, times_pointer_size, kParameterMapHeaderSize));
- __ movq(FieldOperand(rdi, FixedArray::kHeaderSize + 1 * kPointerSize), r9);
+ __ movp(FieldOperand(rdi, FixedArray::kLengthOffset), r9);
+ __ movp(FieldOperand(rdi, FixedArray::kHeaderSize + 0 * kPointerSize), rsi);
+ __ leap(r9, Operand(rdi, rbx, times_pointer_size, kParameterMapHeaderSize));
+ __ movp(FieldOperand(rdi, FixedArray::kHeaderSize + 1 * kPointerSize), r9);
// Copy the parameter slots and the holes in the arguments.
// We need to fill in mapped_parameter_count slots. They index the context,
@@ -1517,11 +1106,11 @@ void ArgumentsAccessStub::GenerateNewNonStrictFast(MacroAssembler* masm) {
// Load tagged parameter count into r9.
__ Integer32ToSmi(r9, rbx);
__ Move(r8, Smi::FromInt(Context::MIN_CONTEXT_SLOTS));
- __ addq(r8, args.GetArgumentOperand(2));
- __ subq(r8, r9);
+ __ addp(r8, args.GetArgumentOperand(2));
+ __ subp(r8, r9);
__ Move(r11, factory->the_hole_value());
- __ movq(rdx, rdi);
- __ lea(rdi, Operand(rdi, rbx, times_pointer_size, kParameterMapHeaderSize));
+ __ movp(rdx, rdi);
+ __ leap(rdi, Operand(rdi, rbx, times_pointer_size, kParameterMapHeaderSize));
// r9 = loop variable (tagged)
// r8 = mapping index (tagged)
// r11 = the hole value
@@ -1532,11 +1121,11 @@ void ArgumentsAccessStub::GenerateNewNonStrictFast(MacroAssembler* masm) {
__ bind(&parameters_loop);
__ SmiSubConstant(r9, r9, Smi::FromInt(1));
__ SmiToInteger64(kScratchRegister, r9);
- __ movq(FieldOperand(rdx, kScratchRegister,
+ __ movp(FieldOperand(rdx, kScratchRegister,
times_pointer_size,
kParameterMapHeaderSize),
r8);
- __ movq(FieldOperand(rdi, kScratchRegister,
+ __ movp(FieldOperand(rdi, kScratchRegister,
times_pointer_size,
FixedArray::kHeaderSize),
r11);
@@ -1552,28 +1141,28 @@ void ArgumentsAccessStub::GenerateNewNonStrictFast(MacroAssembler* masm) {
// Copy arguments header and remaining slots (if there are any).
__ Move(FieldOperand(rdi, FixedArray::kMapOffset),
factory->fixed_array_map());
- __ movq(FieldOperand(rdi, FixedArray::kLengthOffset), rcx);
+ __ movp(FieldOperand(rdi, FixedArray::kLengthOffset), rcx);
Label arguments_loop, arguments_test;
- __ movq(r8, rbx);
- __ movq(rdx, args.GetArgumentOperand(1));
+ __ movp(r8, rbx);
+ __ movp(rdx, args.GetArgumentOperand(1));
// Untag rcx for the loop below.
__ SmiToInteger64(rcx, rcx);
- __ lea(kScratchRegister, Operand(r8, times_pointer_size, 0));
- __ subq(rdx, kScratchRegister);
+ __ leap(kScratchRegister, Operand(r8, times_pointer_size, 0));
+ __ subp(rdx, kScratchRegister);
__ jmp(&arguments_test, Label::kNear);
__ bind(&arguments_loop);
- __ subq(rdx, Immediate(kPointerSize));
- __ movq(r9, Operand(rdx, 0));
- __ movq(FieldOperand(rdi, r8,
+ __ subp(rdx, Immediate(kPointerSize));
+ __ movp(r9, Operand(rdx, 0));
+ __ movp(FieldOperand(rdi, r8,
times_pointer_size,
FixedArray::kHeaderSize),
r9);
- __ addq(r8, Immediate(1));
+ __ addp(r8, Immediate(1));
__ bind(&arguments_test);
- __ cmpq(r8, rcx);
+ __ cmpp(r8, rcx);
__ j(less, &arguments_loop, Label::kNear);
// Return and remove the on-stack parameters.
@@ -1583,12 +1172,12 @@ void ArgumentsAccessStub::GenerateNewNonStrictFast(MacroAssembler* masm) {
// rcx = argument count (untagged)
__ bind(&runtime);
__ Integer32ToSmi(rcx, rcx);
- __ movq(args.GetArgumentOperand(2), rcx); // Patch argument count.
- __ TailCallRuntime(Runtime::kNewArgumentsFast, 3, 1);
+ __ movp(args.GetArgumentOperand(2), rcx); // Patch argument count.
+ __ TailCallRuntime(Runtime::kHiddenNewSloppyArguments, 3, 1);
}
-void ArgumentsAccessStub::GenerateNewNonStrictSlow(MacroAssembler* masm) {
+void ArgumentsAccessStub::GenerateNewSloppySlow(MacroAssembler* masm) {
// rsp[0] : return address
// rsp[8] : number of parameters
// rsp[16] : receiver displacement
@@ -1596,22 +1185,22 @@ void ArgumentsAccessStub::GenerateNewNonStrictSlow(MacroAssembler* masm) {
// Check if the calling frame is an arguments adaptor frame.
Label runtime;
- __ movq(rdx, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
- __ movq(rcx, Operand(rdx, StandardFrameConstants::kContextOffset));
+ __ movp(rdx, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
+ __ movp(rcx, Operand(rdx, StandardFrameConstants::kContextOffset));
__ Cmp(rcx, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
__ j(not_equal, &runtime);
// Patch the arguments.length and the parameters pointer.
StackArgumentsAccessor args(rsp, 3, ARGUMENTS_DONT_CONTAIN_RECEIVER);
- __ movq(rcx, Operand(rdx, ArgumentsAdaptorFrameConstants::kLengthOffset));
- __ movq(args.GetArgumentOperand(2), rcx);
+ __ movp(rcx, Operand(rdx, ArgumentsAdaptorFrameConstants::kLengthOffset));
+ __ movp(args.GetArgumentOperand(2), rcx);
__ SmiToInteger64(rcx, rcx);
- __ lea(rdx, Operand(rdx, rcx, times_pointer_size,
+ __ leap(rdx, Operand(rdx, rcx, times_pointer_size,
StandardFrameConstants::kCallerSPOffset));
- __ movq(args.GetArgumentOperand(1), rdx);
+ __ movp(args.GetArgumentOperand(1), rdx);
__ bind(&runtime);
- __ TailCallRuntime(Runtime::kNewArgumentsFast, 3, 1);
+ __ TailCallRuntime(Runtime::kHiddenNewSloppyArguments, 3, 1);
}
@@ -1623,87 +1212,87 @@ void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
// Check if the calling frame is an arguments adaptor frame.
Label adaptor_frame, try_allocate, runtime;
- __ movq(rdx, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
- __ movq(rcx, Operand(rdx, StandardFrameConstants::kContextOffset));
+ __ movp(rdx, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
+ __ movp(rcx, Operand(rdx, StandardFrameConstants::kContextOffset));
__ Cmp(rcx, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
__ j(equal, &adaptor_frame);
// Get the length from the frame.
StackArgumentsAccessor args(rsp, 3, ARGUMENTS_DONT_CONTAIN_RECEIVER);
- __ movq(rcx, args.GetArgumentOperand(2));
+ __ movp(rcx, args.GetArgumentOperand(2));
__ SmiToInteger64(rcx, rcx);
__ jmp(&try_allocate);
// Patch the arguments.length and the parameters pointer.
__ bind(&adaptor_frame);
- __ movq(rcx, Operand(rdx, ArgumentsAdaptorFrameConstants::kLengthOffset));
- __ movq(args.GetArgumentOperand(2), rcx);
+ __ movp(rcx, Operand(rdx, ArgumentsAdaptorFrameConstants::kLengthOffset));
+ __ movp(args.GetArgumentOperand(2), rcx);
__ SmiToInteger64(rcx, rcx);
- __ lea(rdx, Operand(rdx, rcx, times_pointer_size,
+ __ leap(rdx, Operand(rdx, rcx, times_pointer_size,
StandardFrameConstants::kCallerSPOffset));
- __ movq(args.GetArgumentOperand(1), rdx);
+ __ movp(args.GetArgumentOperand(1), rdx);
// Try the new space allocation. Start out with computing the size of
// the arguments object and the elements array.
Label add_arguments_object;
__ bind(&try_allocate);
- __ testq(rcx, rcx);
+ __ testp(rcx, rcx);
__ j(zero, &add_arguments_object, Label::kNear);
- __ lea(rcx, Operand(rcx, times_pointer_size, FixedArray::kHeaderSize));
+ __ leap(rcx, Operand(rcx, times_pointer_size, FixedArray::kHeaderSize));
__ bind(&add_arguments_object);
- __ addq(rcx, Immediate(Heap::kArgumentsObjectSizeStrict));
+ __ addp(rcx, Immediate(Heap::kStrictArgumentsObjectSize));
// Do the allocation of both objects in one go.
__ Allocate(rcx, rax, rdx, rbx, &runtime, TAG_OBJECT);
// Get the arguments boilerplate from the current native context.
- __ movq(rdi, Operand(rsi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
- __ movq(rdi, FieldOperand(rdi, GlobalObject::kNativeContextOffset));
+ __ movp(rdi, Operand(rsi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
+ __ movp(rdi, FieldOperand(rdi, GlobalObject::kNativeContextOffset));
const int offset =
- Context::SlotOffset(Context::STRICT_MODE_ARGUMENTS_BOILERPLATE_INDEX);
- __ movq(rdi, Operand(rdi, offset));
+ Context::SlotOffset(Context::STRICT_ARGUMENTS_BOILERPLATE_INDEX);
+ __ movp(rdi, Operand(rdi, offset));
// Copy the JS object part.
for (int i = 0; i < JSObject::kHeaderSize; i += kPointerSize) {
- __ movq(rbx, FieldOperand(rdi, i));
- __ movq(FieldOperand(rax, i), rbx);
+ __ movp(rbx, FieldOperand(rdi, i));
+ __ movp(FieldOperand(rax, i), rbx);
}
// Get the length (smi tagged) and set that as an in-object property too.
STATIC_ASSERT(Heap::kArgumentsLengthIndex == 0);
- __ movq(rcx, args.GetArgumentOperand(2));
- __ movq(FieldOperand(rax, JSObject::kHeaderSize +
+ __ movp(rcx, args.GetArgumentOperand(2));
+ __ movp(FieldOperand(rax, JSObject::kHeaderSize +
Heap::kArgumentsLengthIndex * kPointerSize),
rcx);
// If there are no actual arguments, we're done.
Label done;
- __ testq(rcx, rcx);
+ __ testp(rcx, rcx);
__ j(zero, &done);
// Get the parameters pointer from the stack.
- __ movq(rdx, args.GetArgumentOperand(1));
+ __ movp(rdx, args.GetArgumentOperand(1));
// Set up the elements pointer in the allocated arguments object and
// initialize the header in the elements fixed array.
- __ lea(rdi, Operand(rax, Heap::kArgumentsObjectSizeStrict));
- __ movq(FieldOperand(rax, JSObject::kElementsOffset), rdi);
+ __ leap(rdi, Operand(rax, Heap::kStrictArgumentsObjectSize));
+ __ movp(FieldOperand(rax, JSObject::kElementsOffset), rdi);
__ LoadRoot(kScratchRegister, Heap::kFixedArrayMapRootIndex);
- __ movq(FieldOperand(rdi, FixedArray::kMapOffset), kScratchRegister);
+ __ movp(FieldOperand(rdi, FixedArray::kMapOffset), kScratchRegister);
- __ movq(FieldOperand(rdi, FixedArray::kLengthOffset), rcx);
+ __ movp(FieldOperand(rdi, FixedArray::kLengthOffset), rcx);
// Untag the length for the loop below.
__ SmiToInteger64(rcx, rcx);
// Copy the fixed array slots.
Label loop;
__ bind(&loop);
- __ movq(rbx, Operand(rdx, -1 * kPointerSize)); // Skip receiver.
- __ movq(FieldOperand(rdi, FixedArray::kHeaderSize), rbx);
- __ addq(rdi, Immediate(kPointerSize));
- __ subq(rdx, Immediate(kPointerSize));
- __ decq(rcx);
+ __ movp(rbx, Operand(rdx, -1 * kPointerSize)); // Skip receiver.
+ __ movp(FieldOperand(rdi, FixedArray::kHeaderSize), rbx);
+ __ addp(rdi, Immediate(kPointerSize));
+ __ subp(rdx, Immediate(kPointerSize));
+ __ decp(rcx);
__ j(not_zero, &loop);
// Return and remove the on-stack parameters.
@@ -1712,7 +1301,7 @@ void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
// Do the runtime call to allocate the arguments object.
__ bind(&runtime);
- __ TailCallRuntime(Runtime::kNewStrictArgumentsFast, 3, 1);
+ __ TailCallRuntime(Runtime::kHiddenNewStrictArguments, 3, 1);
}
@@ -1721,7 +1310,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// time or if regexp entry in generated code is turned off runtime switch or
// at compilation.
#ifdef V8_INTERPRETED_REGEXP
- __ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
+ __ TailCallRuntime(Runtime::kHiddenRegExpExec, 4, 1);
#else // V8_INTERPRETED_REGEXP
// Stack frame on entry.
@@ -1743,23 +1332,22 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
ARGUMENTS_DONT_CONTAIN_RECEIVER);
Label runtime;
// Ensure that a RegExp stack is allocated.
- Isolate* isolate = masm->isolate();
ExternalReference address_of_regexp_stack_memory_address =
- ExternalReference::address_of_regexp_stack_memory_address(isolate);
+ ExternalReference::address_of_regexp_stack_memory_address(isolate());
ExternalReference address_of_regexp_stack_memory_size =
- ExternalReference::address_of_regexp_stack_memory_size(isolate);
+ ExternalReference::address_of_regexp_stack_memory_size(isolate());
__ Load(kScratchRegister, address_of_regexp_stack_memory_size);
- __ testq(kScratchRegister, kScratchRegister);
+ __ testp(kScratchRegister, kScratchRegister);
__ j(zero, &runtime);
// Check that the first argument is a JSRegExp object.
- __ movq(rax, args.GetArgumentOperand(JS_REG_EXP_OBJECT_ARGUMENT_INDEX));
+ __ movp(rax, args.GetArgumentOperand(JS_REG_EXP_OBJECT_ARGUMENT_INDEX));
__ JumpIfSmi(rax, &runtime);
__ CmpObjectType(rax, JS_REGEXP_TYPE, kScratchRegister);
__ j(not_equal, &runtime);
// Check that the RegExp has been compiled (data contains a fixed array).
- __ movq(rax, FieldOperand(rax, JSRegExp::kDataOffset));
+ __ movp(rax, FieldOperand(rax, JSRegExp::kDataOffset));
if (FLAG_debug_code) {
Condition is_smi = masm->CheckSmi(rax);
__ Check(NegateCondition(is_smi),
@@ -1786,10 +1374,10 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// Reset offset for possibly sliced string.
__ Set(r14, 0);
- __ movq(rdi, args.GetArgumentOperand(SUBJECT_STRING_ARGUMENT_INDEX));
+ __ movp(rdi, args.GetArgumentOperand(SUBJECT_STRING_ARGUMENT_INDEX));
__ JumpIfSmi(rdi, &runtime);
- __ movq(r15, rdi); // Make a copy of the original subject string.
- __ movq(rbx, FieldOperand(rdi, HeapObject::kMapOffset));
+ __ movp(r15, rdi); // Make a copy of the original subject string.
+ __ movp(rbx, FieldOperand(rdi, HeapObject::kMapOffset));
__ movzxbl(rbx, FieldOperand(rbx, Map::kInstanceTypeOffset));
// rax: RegExp data (FixedArray)
// rdi: subject string
@@ -1841,7 +1429,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
STATIC_ASSERT(kSlicedStringTag > kExternalStringTag);
STATIC_ASSERT(kIsNotStringMask > kExternalStringTag);
STATIC_ASSERT(kShortExternalStringTag > kExternalStringTag);
- __ cmpq(rbx, Immediate(kExternalStringTag));
+ __ cmpp(rbx, Immediate(kExternalStringTag));
__ j(greater_equal, &not_seq_nor_cons); // Go to (7).
// (4) Cons string. Check that it's flat.
@@ -1849,10 +1437,10 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
__ CompareRoot(FieldOperand(rdi, ConsString::kSecondOffset),
Heap::kempty_stringRootIndex);
__ j(not_equal, &runtime);
- __ movq(rdi, FieldOperand(rdi, ConsString::kFirstOffset));
+ __ movp(rdi, FieldOperand(rdi, ConsString::kFirstOffset));
__ bind(&check_underlying);
- __ movq(rbx, FieldOperand(rdi, HeapObject::kMapOffset));
- __ movq(rbx, FieldOperand(rbx, Map::kInstanceTypeOffset));
+ __ movp(rbx, FieldOperand(rdi, HeapObject::kMapOffset));
+ __ movp(rbx, FieldOperand(rbx, Map::kInstanceTypeOffset));
// (5a) Is subject sequential two byte? If yes, go to (9).
__ testb(rbx, Immediate(kStringRepresentationMask | kStringEncodingMask));
@@ -1861,14 +1449,14 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// (5b) Is subject external? If yes, go to (8).
__ testb(rbx, Immediate(kStringRepresentationMask));
// The underlying external string is never a short external string.
- STATIC_CHECK(ExternalString::kMaxShortLength < ConsString::kMinLength);
- STATIC_CHECK(ExternalString::kMaxShortLength < SlicedString::kMinLength);
+ STATIC_ASSERT(ExternalString::kMaxShortLength < ConsString::kMinLength);
+ STATIC_ASSERT(ExternalString::kMaxShortLength < SlicedString::kMinLength);
__ j(not_zero, &external_string); // Go to (8)
// (6) One byte sequential. Load regexp code for one byte.
__ bind(&seq_one_byte_string);
// rax: RegExp data (FixedArray)
- __ movq(r11, FieldOperand(rax, JSRegExp::kDataAsciiCodeOffset));
+ __ movp(r11, FieldOperand(rax, JSRegExp::kDataAsciiCodeOffset));
__ Set(rcx, 1); // Type is one byte.
// (E) Carry on. String handling is done.
@@ -1888,7 +1476,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// We have to use r15 instead of rdi to load the length because rdi might
// have been only made to look like a sequential string when it actually
// is an external string.
- __ movq(rbx, args.GetArgumentOperand(PREVIOUS_INDEX_ARGUMENT_INDEX));
+ __ movp(rbx, args.GetArgumentOperand(PREVIOUS_INDEX_ARGUMENT_INDEX));
__ JumpIfNotSmi(rbx, &runtime);
__ SmiCompare(rbx, FieldOperand(r15, String::kLengthOffset));
__ j(above_equal, &runtime);
@@ -1899,7 +1487,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// rcx: encoding of subject string (1 if ASCII 0 if two_byte);
// r11: code
// All checks done. Now push arguments for native regexp code.
- Counters* counters = masm->isolate()->counters();
+ Counters* counters = isolate()->counters();
__ IncrementCounter(counters->regexp_entry_native(), 1);
// Isolates: note we add an additional parameter here (isolate pointer).
@@ -1910,37 +1498,37 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// Argument 9: Pass current isolate address.
__ LoadAddress(kScratchRegister,
- ExternalReference::isolate_address(masm->isolate()));
- __ movq(Operand(rsp, (argument_slots_on_stack - 1) * kPointerSize),
+ ExternalReference::isolate_address(isolate()));
+ __ movq(Operand(rsp, (argument_slots_on_stack - 1) * kRegisterSize),
kScratchRegister);
// Argument 8: Indicate that this is a direct call from JavaScript.
- __ movq(Operand(rsp, (argument_slots_on_stack - 2) * kPointerSize),
+ __ movq(Operand(rsp, (argument_slots_on_stack - 2) * kRegisterSize),
Immediate(1));
// Argument 7: Start (high end) of backtracking stack memory area.
__ Move(kScratchRegister, address_of_regexp_stack_memory_address);
- __ movq(r9, Operand(kScratchRegister, 0));
+ __ movp(r9, Operand(kScratchRegister, 0));
__ Move(kScratchRegister, address_of_regexp_stack_memory_size);
- __ addq(r9, Operand(kScratchRegister, 0));
- __ movq(Operand(rsp, (argument_slots_on_stack - 3) * kPointerSize), r9);
+ __ addp(r9, Operand(kScratchRegister, 0));
+ __ movq(Operand(rsp, (argument_slots_on_stack - 3) * kRegisterSize), r9);
// Argument 6: Set the number of capture registers to zero to force global
// regexps to behave as non-global. This does not affect non-global regexps.
// Argument 6 is passed in r9 on Linux and on the stack on Windows.
#ifdef _WIN64
- __ movq(Operand(rsp, (argument_slots_on_stack - 4) * kPointerSize),
+ __ movq(Operand(rsp, (argument_slots_on_stack - 4) * kRegisterSize),
Immediate(0));
#else
__ Set(r9, 0);
#endif
// Argument 5: static offsets vector buffer.
- __ LoadAddress(r8,
- ExternalReference::address_of_static_offsets_vector(isolate));
+ __ LoadAddress(
+ r8, ExternalReference::address_of_static_offsets_vector(isolate()));
// Argument 5 passed in r8 on Linux and on the stack on Windows.
#ifdef _WIN64
- __ movq(Operand(rsp, (argument_slots_on_stack - 5) * kPointerSize), r8);
+ __ movq(Operand(rsp, (argument_slots_on_stack - 5) * kRegisterSize), r8);
#endif
// rdi: subject string
@@ -1951,31 +1539,31 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// r15: original subject string
// Argument 2: Previous index.
- __ movq(arg_reg_2, rbx);
+ __ movp(arg_reg_2, rbx);
// Argument 4: End of string data
// Argument 3: Start of string data
Label setup_two_byte, setup_rest, got_length, length_not_from_slice;
// Prepare start and end index of the input.
// Load the length from the original sliced string if that is the case.
- __ addq(rbx, r14);
+ __ addp(rbx, r14);
__ SmiToInteger32(arg_reg_3, FieldOperand(r15, String::kLengthOffset));
- __ addq(r14, arg_reg_3); // Using arg3 as scratch.
+ __ addp(r14, arg_reg_3); // Using arg3 as scratch.
// rbx: start index of the input
// r14: end index of the input
// r15: original subject string
__ testb(rcx, rcx); // Last use of rcx as encoding of subject string.
__ j(zero, &setup_two_byte, Label::kNear);
- __ lea(arg_reg_4,
+ __ leap(arg_reg_4,
FieldOperand(rdi, r14, times_1, SeqOneByteString::kHeaderSize));
- __ lea(arg_reg_3,
+ __ leap(arg_reg_3,
FieldOperand(rdi, rbx, times_1, SeqOneByteString::kHeaderSize));
__ jmp(&setup_rest, Label::kNear);
__ bind(&setup_two_byte);
- __ lea(arg_reg_4,
+ __ leap(arg_reg_4,
FieldOperand(rdi, r14, times_2, SeqTwoByteString::kHeaderSize));
- __ lea(arg_reg_3,
+ __ leap(arg_reg_3,
FieldOperand(rdi, rbx, times_2, SeqTwoByteString::kHeaderSize));
__ bind(&setup_rest);
@@ -1984,10 +1572,10 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// use rbp, which points exactly to one pointer size below the previous rsp.
// (Because creating a new stack frame pushes the previous rbp onto the stack
// and thereby moves up rsp by one kPointerSize.)
- __ movq(arg_reg_1, r15);
+ __ movp(arg_reg_1, r15);
// Locate the code entry and call it.
- __ addq(r11, Immediate(Code::kHeaderSize - kHeapObjectTag));
+ __ addp(r11, Immediate(Code::kHeaderSize - kHeapObjectTag));
__ call(r11);
__ LeaveApiExitFrame(true);
@@ -2012,8 +1600,8 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// Load RegExp data.
__ bind(&success);
- __ movq(rax, args.GetArgumentOperand(JS_REG_EXP_OBJECT_ARGUMENT_INDEX));
- __ movq(rcx, FieldOperand(rax, JSRegExp::kDataOffset));
+ __ movp(rax, args.GetArgumentOperand(JS_REG_EXP_OBJECT_ARGUMENT_INDEX));
+ __ movp(rcx, FieldOperand(rax, JSRegExp::kDataOffset));
__ SmiToInteger32(rax,
FieldOperand(rcx, JSRegExp::kIrregexpCaptureCountOffset));
// Calculate number of capture registers (number_of_captures + 1) * 2.
@@ -2021,13 +1609,13 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// rdx: Number of capture registers
// Check that the fourth object is a JSArray object.
- __ movq(r15, args.GetArgumentOperand(LAST_MATCH_INFO_ARGUMENT_INDEX));
+ __ movp(r15, args.GetArgumentOperand(LAST_MATCH_INFO_ARGUMENT_INDEX));
__ JumpIfSmi(r15, &runtime);
__ CmpObjectType(r15, JS_ARRAY_TYPE, kScratchRegister);
__ j(not_equal, &runtime);
// Check that the JSArray is in fast case.
- __ movq(rbx, FieldOperand(r15, JSArray::kElementsOffset));
- __ movq(rax, FieldOperand(rbx, HeapObject::kMapOffset));
+ __ movp(rbx, FieldOperand(r15, JSArray::kElementsOffset));
+ __ movp(rax, FieldOperand(rbx, HeapObject::kMapOffset));
__ CompareRoot(rax, Heap::kFixedArrayMapRootIndex);
__ j(not_equal, &runtime);
// Check that the last match info has space for the capture registers and the
@@ -2042,19 +1630,19 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// rdx: number of capture registers
// Store the capture count.
__ Integer32ToSmi(kScratchRegister, rdx);
- __ movq(FieldOperand(rbx, RegExpImpl::kLastCaptureCountOffset),
+ __ movp(FieldOperand(rbx, RegExpImpl::kLastCaptureCountOffset),
kScratchRegister);
// Store last subject and last input.
- __ movq(rax, args.GetArgumentOperand(SUBJECT_STRING_ARGUMENT_INDEX));
- __ movq(FieldOperand(rbx, RegExpImpl::kLastSubjectOffset), rax);
- __ movq(rcx, rax);
+ __ movp(rax, args.GetArgumentOperand(SUBJECT_STRING_ARGUMENT_INDEX));
+ __ movp(FieldOperand(rbx, RegExpImpl::kLastSubjectOffset), rax);
+ __ movp(rcx, rax);
__ RecordWriteField(rbx,
RegExpImpl::kLastSubjectOffset,
rax,
rdi,
kDontSaveFPRegs);
- __ movq(rax, rcx);
- __ movq(FieldOperand(rbx, RegExpImpl::kLastInputOffset), rax);
+ __ movp(rax, rcx);
+ __ movp(FieldOperand(rbx, RegExpImpl::kLastInputOffset), rax);
__ RecordWriteField(rbx,
RegExpImpl::kLastInputOffset,
rax,
@@ -2062,8 +1650,8 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
kDontSaveFPRegs);
// Get the static offsets vector filled by the native regexp code.
- __ LoadAddress(rcx,
- ExternalReference::address_of_static_offsets_vector(isolate));
+ __ LoadAddress(
+ rcx, ExternalReference::address_of_static_offsets_vector(isolate()));
// rbx: last_match_info backing store (FixedArray)
// rcx: offsets vector
@@ -2072,13 +1660,13 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// Capture register counter starts from number of capture registers and
// counts down until wraping after zero.
__ bind(&next_capture);
- __ subq(rdx, Immediate(1));
+ __ subp(rdx, Immediate(1));
__ j(negative, &done, Label::kNear);
// Read the value from the static offsets vector buffer and make it a smi.
__ movl(rdi, Operand(rcx, rdx, times_int_size, 0));
__ Integer32ToSmi(rdi, rdi);
// Store the smi value in the last match info.
- __ movq(FieldOperand(rbx,
+ __ movp(FieldOperand(rbx,
rdx,
times_pointer_size,
RegExpImpl::kFirstCaptureOffset),
@@ -2087,7 +1675,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
__ bind(&done);
// Return last match info.
- __ movq(rax, r15);
+ __ movp(rax, r15);
__ ret(REG_EXP_EXEC_ARGUMENT_COUNT * kPointerSize);
__ bind(&exception);
@@ -2096,14 +1684,14 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// haven't created the exception yet. Handle that in the runtime system.
// TODO(592): Rerunning the RegExp to get the stack overflow exception.
ExternalReference pending_exception_address(
- Isolate::kPendingExceptionAddress, isolate);
+ Isolate::kPendingExceptionAddress, isolate());
Operand pending_exception_operand =
masm->ExternalOperand(pending_exception_address, rbx);
- __ movq(rax, pending_exception_operand);
+ __ movp(rax, pending_exception_operand);
__ LoadRoot(rdx, Heap::kTheHoleValueRootIndex);
- __ cmpq(rax, rdx);
+ __ cmpp(rax, rdx);
__ j(equal, &runtime);
- __ movq(pending_exception_operand, rdx);
+ __ movp(pending_exception_operand, rdx);
__ CompareRoot(rax, Heap::kTerminationExceptionRootIndex);
Label termination_exception;
@@ -2115,7 +1703,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// Do the runtime call to execute the regexp.
__ bind(&runtime);
- __ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
+ __ TailCallRuntime(Runtime::kHiddenRegExpExec, 4, 1);
// Deferred code for string handling.
// (7) Not a long external string? If yes, go to (10).
@@ -2125,7 +1713,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// (8) External string. Short external strings have been ruled out.
__ bind(&external_string);
- __ movq(rbx, FieldOperand(rdi, HeapObject::kMapOffset));
+ __ movp(rbx, FieldOperand(rdi, HeapObject::kMapOffset));
__ movzxbl(rbx, FieldOperand(rbx, Map::kInstanceTypeOffset));
if (FLAG_debug_code) {
// Assert that we do not have a cons or slice (indirect strings) here.
@@ -2133,10 +1721,10 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
__ testb(rbx, Immediate(kIsIndirectStringMask));
__ Assert(zero, kExternalStringExpectedButNotFound);
}
- __ movq(rdi, FieldOperand(rdi, ExternalString::kResourceDataOffset));
+ __ movp(rdi, FieldOperand(rdi, ExternalString::kResourceDataOffset));
// Move the pointer so that offset-wise, it looks like a sequential string.
STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqOneByteString::kHeaderSize);
- __ subq(rdi, Immediate(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
+ __ subp(rdi, Immediate(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
STATIC_ASSERT(kTwoByteStringTag == 0);
// (8a) Is the external string one byte? If yes, go to (6).
__ testb(rbx, Immediate(kStringEncodingMask));
@@ -2146,7 +1734,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// rax: RegExp data (FixedArray)
// (9) Two byte sequential. Load regexp code for one byte. Go to (E).
__ bind(&seq_two_byte_string);
- __ movq(r11, FieldOperand(rax, JSRegExp::kDataUC16CodeOffset));
+ __ movp(r11, FieldOperand(rax, JSRegExp::kDataUC16CodeOffset));
__ Set(rcx, 0); // Type is two byte.
__ jmp(&check_code); // Go to (E).
@@ -2160,97 +1748,12 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// (11) Sliced string. Replace subject with parent. Go to (5a).
// Load offset into r14 and replace subject string with parent.
__ SmiToInteger32(r14, FieldOperand(rdi, SlicedString::kOffsetOffset));
- __ movq(rdi, FieldOperand(rdi, SlicedString::kParentOffset));
+ __ movp(rdi, FieldOperand(rdi, SlicedString::kParentOffset));
__ jmp(&check_underlying);
#endif // V8_INTERPRETED_REGEXP
}
-void RegExpConstructResultStub::Generate(MacroAssembler* masm) {
- const int kMaxInlineLength = 100;
- Label slowcase;
- Label done;
- StackArgumentsAccessor args(rsp, 3, ARGUMENTS_DONT_CONTAIN_RECEIVER);
- __ movq(r8, args.GetArgumentOperand(0));
- __ JumpIfNotSmi(r8, &slowcase);
- __ SmiToInteger32(rbx, r8);
- __ cmpl(rbx, Immediate(kMaxInlineLength));
- __ j(above, &slowcase);
- // Smi-tagging is equivalent to multiplying by 2.
- STATIC_ASSERT(kSmiTag == 0);
- STATIC_ASSERT(kSmiTagSize == 1);
- // Allocate RegExpResult followed by FixedArray with size in rbx.
- // JSArray: [Map][empty properties][Elements][Length-smi][index][input]
- // Elements: [Map][Length][..elements..]
- __ Allocate(JSRegExpResult::kSize + FixedArray::kHeaderSize,
- times_pointer_size,
- rbx, // In: Number of elements.
- rax, // Out: Start of allocation (tagged).
- rcx, // Out: End of allocation.
- rdx, // Scratch register
- &slowcase,
- TAG_OBJECT);
- // rax: Start of allocated area, object-tagged.
- // rbx: Number of array elements as int32.
- // r8: Number of array elements as smi.
-
- // Set JSArray map to global.regexp_result_map().
- __ movq(rdx, ContextOperand(rsi, Context::GLOBAL_OBJECT_INDEX));
- __ movq(rdx, FieldOperand(rdx, GlobalObject::kNativeContextOffset));
- __ movq(rdx, ContextOperand(rdx, Context::REGEXP_RESULT_MAP_INDEX));
- __ movq(FieldOperand(rax, HeapObject::kMapOffset), rdx);
-
- // Set empty properties FixedArray.
- __ LoadRoot(kScratchRegister, Heap::kEmptyFixedArrayRootIndex);
- __ movq(FieldOperand(rax, JSObject::kPropertiesOffset), kScratchRegister);
-
- // Set elements to point to FixedArray allocated right after the JSArray.
- __ lea(rcx, Operand(rax, JSRegExpResult::kSize));
- __ movq(FieldOperand(rax, JSObject::kElementsOffset), rcx);
-
- // Set input, index and length fields from arguments.
- __ movq(r8, args.GetArgumentOperand(2));
- __ movq(FieldOperand(rax, JSRegExpResult::kInputOffset), r8);
- __ movq(r8, args.GetArgumentOperand(1));
- __ movq(FieldOperand(rax, JSRegExpResult::kIndexOffset), r8);
- __ movq(r8, args.GetArgumentOperand(0));
- __ movq(FieldOperand(rax, JSArray::kLengthOffset), r8);
-
- // Fill out the elements FixedArray.
- // rax: JSArray.
- // rcx: FixedArray.
- // rbx: Number of elements in array as int32.
-
- // Set map.
- __ LoadRoot(kScratchRegister, Heap::kFixedArrayMapRootIndex);
- __ movq(FieldOperand(rcx, HeapObject::kMapOffset), kScratchRegister);
- // Set length.
- __ Integer32ToSmi(rdx, rbx);
- __ movq(FieldOperand(rcx, FixedArray::kLengthOffset), rdx);
- // Fill contents of fixed-array with undefined.
- __ LoadRoot(rdx, Heap::kUndefinedValueRootIndex);
- __ lea(rcx, FieldOperand(rcx, FixedArray::kHeaderSize));
- // Fill fixed array elements with undefined.
- // rax: JSArray.
- // rbx: Number of elements in array that remains to be filled, as int32.
- // rcx: Start of elements in FixedArray.
- // rdx: undefined.
- Label loop;
- __ testl(rbx, rbx);
- __ bind(&loop);
- __ j(less_equal, &done); // Jump if rcx is negative or zero.
- __ subl(rbx, Immediate(1));
- __ movq(Operand(rcx, rbx, times_pointer_size, 0), rdx);
- __ jmp(&loop);
-
- __ bind(&done);
- __ ret(3 * kPointerSize);
-
- __ bind(&slowcase);
- __ TailCallRuntime(Runtime::kRegExpConstructResult, 3, 1);
-}
-
-
static int NegativeComparisonResult(Condition cc) {
ASSERT(cc != equal);
ASSERT((cc == less) || (cc == less_equal)
@@ -2282,8 +1785,8 @@ static void BranchIfNotInternalizedString(MacroAssembler* masm,
Register object,
Register scratch) {
__ JumpIfSmi(object, label);
- __ movq(scratch, FieldOperand(object, HeapObject::kMapOffset));
- __ movzxbq(scratch,
+ __ movp(scratch, FieldOperand(object, HeapObject::kMapOffset));
+ __ movzxbp(scratch,
FieldOperand(scratch, Map::kInstanceTypeOffset));
STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0);
__ testb(scratch, Immediate(kIsNotStringMask | kIsNotInternalizedMask));
@@ -2294,7 +1797,7 @@ static void BranchIfNotInternalizedString(MacroAssembler* masm,
void ICCompareStub::GenerateGeneric(MacroAssembler* masm) {
Label check_unequal_objects, done;
Condition cc = GetCondition();
- Factory* factory = masm->isolate()->factory();
+ Factory* factory = isolate()->factory();
Label miss;
CheckInputType(masm, rdx, left_, &miss);
@@ -2303,11 +1806,11 @@ void ICCompareStub::GenerateGeneric(MacroAssembler* masm) {
// Compare two smis.
Label non_smi, smi_done;
__ JumpIfNotBothSmi(rax, rdx, &non_smi);
- __ subq(rdx, rax);
+ __ subp(rdx, rax);
__ j(no_overflow, &smi_done);
- __ not_(rdx); // Correct sign in case of overflow. rdx cannot be 0 here.
+ __ notp(rdx); // Correct sign in case of overflow. rdx cannot be 0 here.
__ bind(&smi_done);
- __ movq(rax, rdx);
+ __ movp(rax, rdx);
__ ret(0);
__ bind(&non_smi);
@@ -2319,7 +1822,7 @@ void ICCompareStub::GenerateGeneric(MacroAssembler* masm) {
// Two identical objects are equal unless they are both NaN or undefined.
{
Label not_identical;
- __ cmpq(rax, rdx);
+ __ cmpp(rax, rdx);
__ j(not_equal, &not_identical, Label::kNear);
if (cc != equal) {
@@ -2359,7 +1862,7 @@ void ICCompareStub::GenerateGeneric(MacroAssembler* masm) {
__ setcc(parity_even, rax);
// rax is 0 for equal non-NaN heapnumbers, 1 for NaNs.
if (cc == greater_equal || cc == greater) {
- __ neg(rax);
+ __ negp(rax);
}
__ ret(0);
@@ -2386,7 +1889,7 @@ void ICCompareStub::GenerateGeneric(MacroAssembler* masm) {
// If heap number, handle it in the slow case.
__ j(equal, &slow);
// Return non-equal. ebx (the lower half of rbx) is not zero.
- __ movq(rax, rbx);
+ __ movp(rax, rbx);
__ ret(0);
__ bind(&not_smis);
@@ -2437,7 +1940,7 @@ void ICCompareStub::GenerateGeneric(MacroAssembler* masm) {
// Return a result of -1, 0, or 1, based on EFLAGS.
__ setcc(above, rax);
__ setcc(below, rcx);
- __ subq(rax, rcx);
+ __ subp(rax, rcx);
__ ret(0);
// If one of the numbers was NaN, then the result is always false.
@@ -2505,7 +2008,7 @@ void ICCompareStub::GenerateGeneric(MacroAssembler* masm) {
// a heap object has the low bit clear.
STATIC_ASSERT(kSmiTag == 0);
STATIC_ASSERT(kSmiTagMask == 1);
- __ lea(rcx, Operand(rax, rdx, times_1, 0));
+ __ leap(rcx, Operand(rax, rdx, times_1, 0));
__ testb(rcx, Immediate(kSmiTagMask));
__ j(not_zero, &not_both_objects, Label::kNear);
__ CmpObjectType(rax, FIRST_SPEC_OBJECT_TYPE, rbx);
@@ -2530,8 +2033,8 @@ void ICCompareStub::GenerateGeneric(MacroAssembler* masm) {
// Push arguments below the return address to prepare jump to builtin.
__ PopReturnAddressTo(rcx);
- __ push(rdx);
- __ push(rax);
+ __ Push(rdx);
+ __ Push(rax);
// Figure out which native to call and setup the arguments.
Builtins::JavaScript builtin;
@@ -2554,176 +2057,155 @@ void ICCompareStub::GenerateGeneric(MacroAssembler* masm) {
static void GenerateRecordCallTarget(MacroAssembler* masm) {
- // Cache the called function in a global property cell. Cache states
+ // Cache the called function in a feedback vector slot. Cache states
// are uninitialized, monomorphic (indicated by a JSFunction), and
// megamorphic.
// rax : number of arguments to the construct function
- // rbx : cache cell for call target
+ // rbx : Feedback vector
+ // rdx : slot in feedback vector (Smi)
// rdi : the function to call
Isolate* isolate = masm->isolate();
- Label initialize, done, miss, megamorphic, not_array_function;
+ Label initialize, done, miss, megamorphic, not_array_function,
+ done_no_smi_convert;
// Load the cache state into rcx.
- __ movq(rcx, FieldOperand(rbx, Cell::kValueOffset));
+ __ SmiToInteger32(rdx, rdx);
+ __ movp(rcx, FieldOperand(rbx, rdx, times_pointer_size,
+ FixedArray::kHeaderSize));
// A monomorphic cache hit or an already megamorphic state: invoke the
// function without changing the state.
- __ cmpq(rcx, rdi);
+ __ cmpp(rcx, rdi);
__ j(equal, &done);
- __ Cmp(rcx, TypeFeedbackCells::MegamorphicSentinel(isolate));
+ __ Cmp(rcx, TypeFeedbackInfo::MegamorphicSentinel(isolate));
__ j(equal, &done);
- // If we came here, we need to see if we are the array function.
- // If we didn't have a matching function, and we didn't find the megamorph
- // sentinel, then we have in the cell either some other function or an
- // AllocationSite. Do a map check on the object in rcx.
- Handle<Map> allocation_site_map =
- masm->isolate()->factory()->allocation_site_map();
- __ Cmp(FieldOperand(rcx, 0), allocation_site_map);
- __ j(not_equal, &miss);
+ if (!FLAG_pretenuring_call_new) {
+ // If we came here, we need to see if we are the array function.
+ // If we didn't have a matching function, and we didn't find the megamorph
+ // sentinel, then we have in the slot either some other function or an
+ // AllocationSite. Do a map check on the object in rcx.
+ Handle<Map> allocation_site_map =
+ masm->isolate()->factory()->allocation_site_map();
+ __ Cmp(FieldOperand(rcx, 0), allocation_site_map);
+ __ j(not_equal, &miss);
- // Make sure the function is the Array() function
- __ LoadArrayFunction(rcx);
- __ cmpq(rdi, rcx);
- __ j(not_equal, &megamorphic);
- __ jmp(&done);
+ // Make sure the function is the Array() function
+ __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, rcx);
+ __ cmpp(rdi, rcx);
+ __ j(not_equal, &megamorphic);
+ __ jmp(&done);
+ }
__ bind(&miss);
// A monomorphic miss (i.e, here the cache is not uninitialized) goes
// megamorphic.
- __ Cmp(rcx, TypeFeedbackCells::UninitializedSentinel(isolate));
+ __ Cmp(rcx, TypeFeedbackInfo::UninitializedSentinel(isolate));
__ j(equal, &initialize);
// MegamorphicSentinel is an immortal immovable object (undefined) so no
// write-barrier is needed.
__ bind(&megamorphic);
- __ Move(FieldOperand(rbx, Cell::kValueOffset),
- TypeFeedbackCells::MegamorphicSentinel(isolate));
+ __ Move(FieldOperand(rbx, rdx, times_pointer_size, FixedArray::kHeaderSize),
+ TypeFeedbackInfo::MegamorphicSentinel(isolate));
__ jmp(&done);
// An uninitialized cache is patched with the function or sentinel to
// indicate the ElementsKind if function is the Array constructor.
__ bind(&initialize);
- // Make sure the function is the Array() function
- __ LoadArrayFunction(rcx);
- __ cmpq(rdi, rcx);
- __ j(not_equal, &not_array_function);
- // The target function is the Array constructor,
- // Create an AllocationSite if we don't already have it, store it in the cell
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
+ if (!FLAG_pretenuring_call_new) {
+ // Make sure the function is the Array() function
+ __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, rcx);
+ __ cmpp(rdi, rcx);
+ __ j(not_equal, &not_array_function);
- // Arguments register must be smi-tagged to call out.
- __ Integer32ToSmi(rax, rax);
- __ push(rax);
- __ push(rdi);
- __ push(rbx);
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
- CreateAllocationSiteStub create_stub;
- __ CallStub(&create_stub);
+ // Arguments register must be smi-tagged to call out.
+ __ Integer32ToSmi(rax, rax);
+ __ Push(rax);
+ __ Push(rdi);
+ __ Integer32ToSmi(rdx, rdx);
+ __ Push(rdx);
+ __ Push(rbx);
+
+ CreateAllocationSiteStub create_stub(isolate);
+ __ CallStub(&create_stub);
+
+ __ Pop(rbx);
+ __ Pop(rdx);
+ __ Pop(rdi);
+ __ Pop(rax);
+ __ SmiToInteger32(rax, rax);
+ }
+ __ jmp(&done_no_smi_convert);
- __ pop(rbx);
- __ pop(rdi);
- __ pop(rax);
- __ SmiToInteger32(rax, rax);
+ __ bind(&not_array_function);
}
- __ jmp(&done);
- __ bind(&not_array_function);
- __ movq(FieldOperand(rbx, Cell::kValueOffset), rdi);
- // No need for a write barrier here - cells are rescanned.
+ __ movp(FieldOperand(rbx, rdx, times_pointer_size, FixedArray::kHeaderSize),
+ rdi);
+
+ // We won't need rdx or rbx anymore, just save rdi
+ __ Push(rdi);
+ __ Push(rbx);
+ __ Push(rdx);
+ __ RecordWriteArray(rbx, rdi, rdx, kDontSaveFPRegs,
+ EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
+ __ Pop(rdx);
+ __ Pop(rbx);
+ __ Pop(rdi);
__ bind(&done);
+ __ Integer32ToSmi(rdx, rdx);
+
+ __ bind(&done_no_smi_convert);
}
-void CallFunctionStub::Generate(MacroAssembler* masm) {
- // rbx : cache cell for call target
- // rdi : the function to call
- Isolate* isolate = masm->isolate();
- Label slow, non_function;
- StackArgumentsAccessor args(rsp, argc_);
-
- // The receiver might implicitly be the global object. This is
- // indicated by passing the hole as the receiver to the call
- // function stub.
- if (ReceiverMightBeImplicit()) {
- Label call;
- // Get the receiver from the stack.
- __ movq(rax, args.GetReceiverOperand());
- // Call as function is indicated with the hole.
- __ CompareRoot(rax, Heap::kTheHoleValueRootIndex);
- __ j(not_equal, &call, Label::kNear);
- // Patch the receiver on the stack with the global receiver object.
- __ movq(rcx, GlobalObjectOperand());
- __ movq(rcx, FieldOperand(rcx, GlobalObject::kGlobalReceiverOffset));
- __ movq(args.GetReceiverOperand(), rcx);
- __ bind(&call);
- }
+static void EmitContinueIfStrictOrNative(MacroAssembler* masm, Label* cont) {
+ // Do not transform the receiver for strict mode functions.
+ __ movp(rcx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
+ __ testb(FieldOperand(rcx, SharedFunctionInfo::kStrictModeByteOffset),
+ Immediate(1 << SharedFunctionInfo::kStrictModeBitWithinByte));
+ __ j(not_equal, cont);
- // Check that the function really is a JavaScript function.
- __ JumpIfSmi(rdi, &non_function);
- // Goto slow case if we do not have a function.
- __ CmpObjectType(rdi, JS_FUNCTION_TYPE, rcx);
- __ j(not_equal, &slow);
-
- if (RecordCallTarget()) {
- GenerateRecordCallTarget(masm);
- }
-
- // Fast-case: Just invoke the function.
- ParameterCount actual(argc_);
+ // Do not transform the receiver for natives.
+ // SharedFunctionInfo is already loaded into rcx.
+ __ testb(FieldOperand(rcx, SharedFunctionInfo::kNativeByteOffset),
+ Immediate(1 << SharedFunctionInfo::kNativeBitWithinByte));
+ __ j(not_equal, cont);
+}
- if (ReceiverMightBeImplicit()) {
- Label call_as_function;
- __ CompareRoot(rax, Heap::kTheHoleValueRootIndex);
- __ j(equal, &call_as_function);
- __ InvokeFunction(rdi,
- actual,
- JUMP_FUNCTION,
- NullCallWrapper(),
- CALL_AS_METHOD);
- __ bind(&call_as_function);
- }
- __ InvokeFunction(rdi,
- actual,
- JUMP_FUNCTION,
- NullCallWrapper(),
- CALL_AS_FUNCTION);
- // Slow-case: Non-function called.
- __ bind(&slow);
- if (RecordCallTarget()) {
- // If there is a call target cache, mark it megamorphic in the
- // non-function case. MegamorphicSentinel is an immortal immovable
- // object (undefined) so no write barrier is needed.
- __ Move(FieldOperand(rbx, Cell::kValueOffset),
- TypeFeedbackCells::MegamorphicSentinel(isolate));
- }
+static void EmitSlowCase(Isolate* isolate,
+ MacroAssembler* masm,
+ StackArgumentsAccessor* args,
+ int argc,
+ Label* non_function) {
// Check for function proxy.
__ CmpInstanceType(rcx, JS_FUNCTION_PROXY_TYPE);
- __ j(not_equal, &non_function);
+ __ j(not_equal, non_function);
__ PopReturnAddressTo(rcx);
- __ push(rdi); // put proxy as additional argument under return address
+ __ Push(rdi); // put proxy as additional argument under return address
__ PushReturnAddressFrom(rcx);
- __ Set(rax, argc_ + 1);
+ __ Set(rax, argc + 1);
__ Set(rbx, 0);
- __ SetCallKind(rcx, CALL_AS_METHOD);
__ GetBuiltinEntry(rdx, Builtins::CALL_FUNCTION_PROXY);
{
Handle<Code> adaptor =
- masm->isolate()->builtins()->ArgumentsAdaptorTrampoline();
+ masm->isolate()->builtins()->ArgumentsAdaptorTrampoline();
__ jmp(adaptor, RelocInfo::CODE_TARGET);
}
// CALL_NON_FUNCTION expects the non-function callee as receiver (instead
// of the original receiver from the call site).
- __ bind(&non_function);
- __ movq(args.GetReceiverOperand(), rdi);
- __ Set(rax, argc_);
+ __ bind(non_function);
+ __ movp(args->GetReceiverOperand(), rdi);
+ __ Set(rax, argc);
__ Set(rbx, 0);
- __ SetCallKind(rcx, CALL_AS_METHOD);
__ GetBuiltinEntry(rdx, Builtins::CALL_NON_FUNCTION);
Handle<Code> adaptor =
isolate->builtins()->ArgumentsAdaptorTrampoline();
@@ -2731,9 +2213,88 @@ void CallFunctionStub::Generate(MacroAssembler* masm) {
}
+static void EmitWrapCase(MacroAssembler* masm,
+ StackArgumentsAccessor* args,
+ Label* cont) {
+ // Wrap the receiver and patch it back onto the stack.
+ { FrameScope frame_scope(masm, StackFrame::INTERNAL);
+ __ Push(rdi);
+ __ Push(rax);
+ __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
+ __ Pop(rdi);
+ }
+ __ movp(args->GetReceiverOperand(), rax);
+ __ jmp(cont);
+}
+
+
+static void CallFunctionNoFeedback(MacroAssembler* masm,
+ int argc, bool needs_checks,
+ bool call_as_method) {
+ // rdi : the function to call
+
+ // wrap_and_call can only be true if we are compiling a monomorphic method.
+ Isolate* isolate = masm->isolate();
+ Label slow, non_function, wrap, cont;
+ StackArgumentsAccessor args(rsp, argc);
+
+ if (needs_checks) {
+ // Check that the function really is a JavaScript function.
+ __ JumpIfSmi(rdi, &non_function);
+
+ // Goto slow case if we do not have a function.
+ __ CmpObjectType(rdi, JS_FUNCTION_TYPE, rcx);
+ __ j(not_equal, &slow);
+ }
+
+ // Fast-case: Just invoke the function.
+ ParameterCount actual(argc);
+
+ if (call_as_method) {
+ if (needs_checks) {
+ EmitContinueIfStrictOrNative(masm, &cont);
+ }
+
+ // Load the receiver from the stack.
+ __ movp(rax, args.GetReceiverOperand());
+
+ if (needs_checks) {
+ __ JumpIfSmi(rax, &wrap);
+
+ __ CmpObjectType(rax, FIRST_SPEC_OBJECT_TYPE, rcx);
+ __ j(below, &wrap);
+ } else {
+ __ jmp(&wrap);
+ }
+
+ __ bind(&cont);
+ }
+
+ __ InvokeFunction(rdi, actual, JUMP_FUNCTION, NullCallWrapper());
+
+ if (needs_checks) {
+ // Slow-case: Non-function called.
+ __ bind(&slow);
+ EmitSlowCase(isolate, masm, &args, argc, &non_function);
+ }
+
+ if (call_as_method) {
+ __ bind(&wrap);
+ EmitWrapCase(masm, &args, &cont);
+ }
+}
+
+
+void CallFunctionStub::Generate(MacroAssembler* masm) {
+ CallFunctionNoFeedback(masm, argc_, NeedsChecks(), CallAsMethod());
+}
+
+
void CallConstructStub::Generate(MacroAssembler* masm) {
// rax : number of arguments
- // rbx : cache cell for call target
+ // rbx : feedback vector
+ // rdx : (only if rbx is not the megamorphic symbol) slot in feedback
+ // vector (Smi)
// rdi : constructor function
Label slow, non_function_call;
@@ -2745,14 +2306,34 @@ void CallConstructStub::Generate(MacroAssembler* masm) {
if (RecordCallTarget()) {
GenerateRecordCallTarget(masm);
+
+ __ SmiToInteger32(rdx, rdx);
+ if (FLAG_pretenuring_call_new) {
+ // Put the AllocationSite from the feedback vector into ebx.
+ // By adding kPointerSize we encode that we know the AllocationSite
+ // entry is at the feedback vector slot given by rdx + 1.
+ __ movp(rbx, FieldOperand(rbx, rdx, times_pointer_size,
+ FixedArray::kHeaderSize + kPointerSize));
+ } else {
+ Label feedback_register_initialized;
+ // Put the AllocationSite from the feedback vector into rbx, or undefined.
+ __ movp(rbx, FieldOperand(rbx, rdx, times_pointer_size,
+ FixedArray::kHeaderSize));
+ __ CompareRoot(FieldOperand(rbx, 0), Heap::kAllocationSiteMapRootIndex);
+ __ j(equal, &feedback_register_initialized);
+ __ LoadRoot(rbx, Heap::kUndefinedValueRootIndex);
+ __ bind(&feedback_register_initialized);
+ }
+
+ __ AssertUndefinedOrAllocationSite(rbx);
}
// Jump to the function-specific construct stub.
Register jmp_reg = rcx;
- __ movq(jmp_reg, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
- __ movq(jmp_reg, FieldOperand(jmp_reg,
+ __ movp(jmp_reg, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
+ __ movp(jmp_reg, FieldOperand(jmp_reg,
SharedFunctionInfo::kConstructStubOffset));
- __ lea(jmp_reg, FieldOperand(jmp_reg, Code::kHeaderSize));
+ __ leap(jmp_reg, FieldOperand(jmp_reg, Code::kHeaderSize));
__ jmp(jmp_reg);
// rdi: called object
@@ -2770,12 +2351,162 @@ void CallConstructStub::Generate(MacroAssembler* masm) {
__ bind(&do_call);
// Set expected number of arguments to zero (not changing rax).
__ Set(rbx, 0);
- __ SetCallKind(rcx, CALL_AS_METHOD);
- __ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
+ __ Jump(isolate()->builtins()->ArgumentsAdaptorTrampoline(),
RelocInfo::CODE_TARGET);
}
+static void EmitLoadTypeFeedbackVector(MacroAssembler* masm, Register vector) {
+ __ movp(vector, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
+ __ movp(vector, FieldOperand(vector, JSFunction::kSharedFunctionInfoOffset));
+ __ movp(vector, FieldOperand(vector,
+ SharedFunctionInfo::kFeedbackVectorOffset));
+}
+
+
+void CallIC_ArrayStub::Generate(MacroAssembler* masm) {
+ // rdi - function
+ // rdx - slot id (as integer)
+ Label miss;
+ int argc = state_.arg_count();
+ ParameterCount actual(argc);
+
+ EmitLoadTypeFeedbackVector(masm, rbx);
+ __ SmiToInteger32(rdx, rdx);
+
+ __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, rcx);
+ __ cmpq(rdi, rcx);
+ __ j(not_equal, &miss);
+
+ __ movq(rax, Immediate(arg_count()));
+ __ movp(rbx, FieldOperand(rbx, rdx, times_pointer_size,
+ FixedArray::kHeaderSize));
+
+ // Verify that ecx contains an AllocationSite
+ __ AssertUndefinedOrAllocationSite(rbx);
+ ArrayConstructorStub stub(masm->isolate(), arg_count());
+ __ TailCallStub(&stub);
+
+ __ bind(&miss);
+ GenerateMiss(masm, IC::kCallIC_Customization_Miss);
+
+ // The slow case, we need this no matter what to complete a call after a miss.
+ CallFunctionNoFeedback(masm,
+ arg_count(),
+ true,
+ CallAsMethod());
+
+ // Unreachable.
+ __ int3();
+}
+
+
+void CallICStub::Generate(MacroAssembler* masm) {
+ // rdi - function
+ // rbx - vector
+ // rdx - slot id
+ Isolate* isolate = masm->isolate();
+ Label extra_checks_or_miss, slow_start;
+ Label slow, non_function, wrap, cont;
+ Label have_js_function;
+ int argc = state_.arg_count();
+ StackArgumentsAccessor args(rsp, argc);
+ ParameterCount actual(argc);
+
+ EmitLoadTypeFeedbackVector(masm, rbx);
+
+ // The checks. First, does rdi match the recorded monomorphic target?
+ __ SmiToInteger32(rdx, rdx);
+ __ cmpq(rdi, FieldOperand(rbx, rdx, times_pointer_size,
+ FixedArray::kHeaderSize));
+ __ j(not_equal, &extra_checks_or_miss);
+
+ __ bind(&have_js_function);
+ if (state_.CallAsMethod()) {
+ EmitContinueIfStrictOrNative(masm, &cont);
+
+ // Load the receiver from the stack.
+ __ movp(rax, args.GetReceiverOperand());
+
+ __ JumpIfSmi(rax, &wrap);
+
+ __ CmpObjectType(rax, FIRST_SPEC_OBJECT_TYPE, rcx);
+ __ j(below, &wrap);
+
+ __ bind(&cont);
+ }
+
+ __ InvokeFunction(rdi, actual, JUMP_FUNCTION, NullCallWrapper());
+
+ __ bind(&slow);
+ EmitSlowCase(isolate, masm, &args, argc, &non_function);
+
+ if (state_.CallAsMethod()) {
+ __ bind(&wrap);
+ EmitWrapCase(masm, &args, &cont);
+ }
+
+ __ bind(&extra_checks_or_miss);
+ Label miss;
+
+ __ movp(rcx, FieldOperand(rbx, rdx, times_pointer_size,
+ FixedArray::kHeaderSize));
+ __ Cmp(rcx, TypeFeedbackInfo::MegamorphicSentinel(isolate));
+ __ j(equal, &slow_start);
+ __ Cmp(rcx, TypeFeedbackInfo::UninitializedSentinel(isolate));
+ __ j(equal, &miss);
+
+ if (!FLAG_trace_ic) {
+ // We are going megamorphic, and we don't want to visit the runtime.
+ __ Move(FieldOperand(rbx, rdx, times_pointer_size,
+ FixedArray::kHeaderSize),
+ TypeFeedbackInfo::MegamorphicSentinel(isolate));
+ __ jmp(&slow_start);
+ }
+
+ // We are here because tracing is on or we are going monomorphic.
+ __ bind(&miss);
+ GenerateMiss(masm, IC::kCallIC_Miss);
+
+ // the slow case
+ __ bind(&slow_start);
+ // Check that function is not a smi.
+ __ JumpIfSmi(rdi, &non_function);
+ // Check that function is a JSFunction.
+ __ CmpObjectType(rdi, JS_FUNCTION_TYPE, rcx);
+ __ j(not_equal, &slow);
+ __ jmp(&have_js_function);
+
+ // Unreachable
+ __ int3();
+}
+
+
+void CallICStub::GenerateMiss(MacroAssembler* masm, IC::UtilityId id) {
+ // Get the receiver of the function from the stack; 1 ~ return address.
+ __ movp(rcx, Operand(rsp, (state_.arg_count() + 1) * kPointerSize));
+
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+
+ // Push the receiver and the function and feedback info.
+ __ Push(rcx);
+ __ Push(rdi);
+ __ Push(rbx);
+ __ Integer32ToSmi(rdx, rdx);
+ __ Push(rdx);
+
+ // Call the entry.
+ ExternalReference miss = ExternalReference(IC_Utility(id),
+ masm->isolate());
+ __ CallExternalReference(miss, 4);
+
+ // Move result to edi and exit the internal frame.
+ __ movp(rdi, rax);
+ }
+}
+
+
bool CEntryStub::NeedsImmovableCode() {
return false;
}
@@ -2789,6 +2520,7 @@ void CodeStub::GenerateStubsAheadOfTime(Isolate* isolate) {
ArrayConstructorStubBase::GenerateStubsAheadOfTime(isolate);
CreateAllocationSiteStub::GenerateAheadOfTime(isolate);
BinaryOpICStub::GenerateAheadOfTime(isolate);
+ BinaryOpICWithAllocationSiteStub::GenerateAheadOfTime(isolate);
}
@@ -2797,40 +2529,35 @@ void CodeStub::GenerateFPStubs(Isolate* isolate) {
void CEntryStub::GenerateAheadOfTime(Isolate* isolate) {
- CEntryStub stub(1, kDontSaveFPRegs);
- stub.GetCode(isolate);
- CEntryStub save_doubles(1, kSaveFPRegs);
- save_doubles.GetCode(isolate);
+ CEntryStub stub(isolate, 1, kDontSaveFPRegs);
+ stub.GetCode();
+ CEntryStub save_doubles(isolate, 1, kSaveFPRegs);
+ save_doubles.GetCode();
}
-static void JumpIfOOM(MacroAssembler* masm,
- Register value,
- Register scratch,
- Label* oom_label) {
- __ movq(scratch, value);
- STATIC_ASSERT(Failure::OUT_OF_MEMORY_EXCEPTION == 3);
- STATIC_ASSERT(kFailureTag == 3);
- __ and_(scratch, Immediate(0xf));
- __ cmpq(scratch, Immediate(0xf));
- __ j(equal, oom_label);
-}
+void CEntryStub::Generate(MacroAssembler* masm) {
+ // rax: number of arguments including receiver
+ // rbx: pointer to C function (C callee-saved)
+ // rbp: frame pointer of calling JS frame (restored after C call)
+ // rsp: stack pointer (restored after C call)
+ // rsi: current context (restored)
+
+ ProfileEntryHookStub::MaybeCallEntryHook(masm);
+ // Enter the exit frame that transitions from JavaScript to C++.
+#ifdef _WIN64
+ int arg_stack_space = (result_size_ < 2 ? 2 : 4);
+#else
+ int arg_stack_space = 0;
+#endif
+ __ EnterExitFrame(arg_stack_space, save_doubles_);
-void CEntryStub::GenerateCore(MacroAssembler* masm,
- Label* throw_normal_exception,
- Label* throw_termination_exception,
- Label* throw_out_of_memory_exception,
- bool do_gc,
- bool always_allocate_scope) {
- // rax: result parameter for PerformGC, if any.
- // rbx: pointer to C function (C callee-saved).
- // rbp: frame pointer (restored after C call).
- // rsp: stack pointer (restored after C call).
+ // rbx: pointer to builtin function (C callee-saved).
+ // rbp: frame pointer of exit frame (restored after C call).
+ // rsp: stack pointer (restored after C call).
// r14: number of arguments including receiver (C callee-saved).
- // r15: pointer to the first argument (C callee-saved).
- // This pointer is reused in LeaveExitFrame(), so it is stored in a
- // callee-saved register.
+ // r15: argv pointer (C callee-saved).
// Simple results returned in rax (both AMD64 and Win64 calling conventions).
// Complex results must be written to address passed as first argument.
@@ -2841,25 +2568,6 @@ void CEntryStub::GenerateCore(MacroAssembler* masm,
__ CheckStackAlignment();
}
- if (do_gc) {
- // Pass failure code returned from last attempt as first argument to
- // PerformGC. No need to use PrepareCallCFunction/CallCFunction here as the
- // stack is known to be aligned. This function takes one argument which is
- // passed in register.
- __ Move(arg_reg_2, ExternalReference::isolate_address(masm->isolate()));
- __ movq(arg_reg_1, rax);
- __ Move(kScratchRegister,
- ExternalReference::perform_gc_function(masm->isolate()));
- __ call(kScratchRegister);
- }
-
- ExternalReference scope_depth =
- ExternalReference::heap_always_allocate_scope_depth(masm->isolate());
- if (always_allocate_scope) {
- Operand scope_depth_operand = masm->ExternalOperand(scope_depth);
- __ incl(scope_depth_operand);
- }
-
// Call C function.
#ifdef _WIN64
// Windows 64-bit ABI passes arguments in rcx, rdx, r8, r9.
@@ -2868,36 +2576,28 @@ void CEntryStub::GenerateCore(MacroAssembler* masm,
if (result_size_ < 2) {
// Pass a pointer to the Arguments object as the first argument.
// Return result in single register (rax).
- __ movq(rcx, r14); // argc.
- __ movq(rdx, r15); // argv.
- __ Move(r8, ExternalReference::isolate_address(masm->isolate()));
+ __ movp(rcx, r14); // argc.
+ __ movp(rdx, r15); // argv.
+ __ Move(r8, ExternalReference::isolate_address(isolate()));
} else {
ASSERT_EQ(2, result_size_);
// Pass a pointer to the result location as the first argument.
- __ lea(rcx, StackSpaceOperand(2));
+ __ leap(rcx, StackSpaceOperand(2));
// Pass a pointer to the Arguments object as the second argument.
- __ movq(rdx, r14); // argc.
- __ movq(r8, r15); // argv.
- __ Move(r9, ExternalReference::isolate_address(masm->isolate()));
+ __ movp(rdx, r14); // argc.
+ __ movp(r8, r15); // argv.
+ __ Move(r9, ExternalReference::isolate_address(isolate()));
}
#else // _WIN64
// GCC passes arguments in rdi, rsi, rdx, rcx, r8, r9.
- __ movq(rdi, r14); // argc.
- __ movq(rsi, r15); // argv.
- __ Move(rdx, ExternalReference::isolate_address(masm->isolate()));
+ __ movp(rdi, r14); // argc.
+ __ movp(rsi, r15); // argv.
+ __ Move(rdx, ExternalReference::isolate_address(isolate()));
#endif
__ call(rbx);
// Result is in rax - do not destroy this register!
- if (always_allocate_scope) {
- Operand scope_depth_operand = masm->ExternalOperand(scope_depth);
- __ decl(scope_depth_operand);
- }
-
- // Check for failure result.
- Label failure_returned;
- STATIC_ASSERT(((kFailureTag + 1) & kFailureTagMask) == 0);
#ifdef _WIN64
// If return value is on the stack, pop it to registers.
if (result_size_ > 1) {
@@ -2905,147 +2605,69 @@ void CEntryStub::GenerateCore(MacroAssembler* masm,
// Read result values stored on stack. Result is stored
// above the four argument mirror slots and the two
// Arguments object slots.
- __ movq(rax, Operand(rsp, 6 * kPointerSize));
- __ movq(rdx, Operand(rsp, 7 * kPointerSize));
+ __ movq(rax, Operand(rsp, 6 * kRegisterSize));
+ __ movq(rdx, Operand(rsp, 7 * kRegisterSize));
}
#endif
- __ lea(rcx, Operand(rax, 1));
- // Lower 2 bits of rcx are 0 iff rax has failure tag.
- __ testl(rcx, Immediate(kFailureTagMask));
- __ j(zero, &failure_returned);
+
+ // Runtime functions should not return 'the hole'. Allowing it to escape may
+ // lead to crashes in the IC code later.
+ if (FLAG_debug_code) {
+ Label okay;
+ __ CompareRoot(rax, Heap::kTheHoleValueRootIndex);
+ __ j(not_equal, &okay, Label::kNear);
+ __ int3();
+ __ bind(&okay);
+ }
+
+ // Check result for exception sentinel.
+ Label exception_returned;
+ __ CompareRoot(rax, Heap::kExceptionRootIndex);
+ __ j(equal, &exception_returned);
+
+ ExternalReference pending_exception_address(
+ Isolate::kPendingExceptionAddress, isolate());
+
+ // Check that there is no pending exception, otherwise we
+ // should have returned the exception sentinel.
+ if (FLAG_debug_code) {
+ Label okay;
+ __ LoadRoot(r14, Heap::kTheHoleValueRootIndex);
+ Operand pending_exception_operand =
+ masm->ExternalOperand(pending_exception_address);
+ __ cmpp(r14, pending_exception_operand);
+ __ j(equal, &okay, Label::kNear);
+ __ int3();
+ __ bind(&okay);
+ }
// Exit the JavaScript to C++ exit frame.
__ LeaveExitFrame(save_doubles_);
__ ret(0);
- // Handling of failure.
- __ bind(&failure_returned);
-
- Label retry;
- // If the returned exception is RETRY_AFTER_GC continue at retry label
- STATIC_ASSERT(Failure::RETRY_AFTER_GC == 0);
- __ testl(rax, Immediate(((1 << kFailureTypeTagSize) - 1) << kFailureTagSize));
- __ j(zero, &retry, Label::kNear);
-
- // Special handling of out of memory exceptions.
- JumpIfOOM(masm, rax, kScratchRegister, throw_out_of_memory_exception);
+ // Handling of exception.
+ __ bind(&exception_returned);
// Retrieve the pending exception.
- ExternalReference pending_exception_address(
- Isolate::kPendingExceptionAddress, masm->isolate());
Operand pending_exception_operand =
masm->ExternalOperand(pending_exception_address);
- __ movq(rax, pending_exception_operand);
-
- // See if we just retrieved an OOM exception.
- JumpIfOOM(masm, rax, kScratchRegister, throw_out_of_memory_exception);
+ __ movp(rax, pending_exception_operand);
// Clear the pending exception.
- pending_exception_operand =
- masm->ExternalOperand(pending_exception_address);
__ LoadRoot(rdx, Heap::kTheHoleValueRootIndex);
- __ movq(pending_exception_operand, rdx);
+ __ movp(pending_exception_operand, rdx);
// Special handling of termination exceptions which are uncatchable
// by javascript code.
+ Label throw_termination_exception;
__ CompareRoot(rax, Heap::kTerminationExceptionRootIndex);
- __ j(equal, throw_termination_exception);
+ __ j(equal, &throw_termination_exception);
// Handle normal exception.
- __ jmp(throw_normal_exception);
-
- // Retry.
- __ bind(&retry);
-}
-
-
-void CEntryStub::Generate(MacroAssembler* masm) {
- // rax: number of arguments including receiver
- // rbx: pointer to C function (C callee-saved)
- // rbp: frame pointer of calling JS frame (restored after C call)
- // rsp: stack pointer (restored after C call)
- // rsi: current context (restored)
-
- // NOTE: Invocations of builtins may return failure objects
- // instead of a proper result. The builtin entry handles
- // this by performing a garbage collection and retrying the
- // builtin once.
-
- ProfileEntryHookStub::MaybeCallEntryHook(masm);
-
- // Enter the exit frame that transitions from JavaScript to C++.
-#ifdef _WIN64
- int arg_stack_space = (result_size_ < 2 ? 2 : 4);
-#else
- int arg_stack_space = 0;
-#endif
- __ EnterExitFrame(arg_stack_space, save_doubles_);
-
- // rax: Holds the context at this point, but should not be used.
- // On entry to code generated by GenerateCore, it must hold
- // a failure result if the collect_garbage argument to GenerateCore
- // is true. This failure result can be the result of code
- // generated by a previous call to GenerateCore. The value
- // of rax is then passed to Runtime::PerformGC.
- // rbx: pointer to builtin function (C callee-saved).
- // rbp: frame pointer of exit frame (restored after C call).
- // rsp: stack pointer (restored after C call).
- // r14: number of arguments including receiver (C callee-saved).
- // r15: argv pointer (C callee-saved).
-
- Label throw_normal_exception;
- Label throw_termination_exception;
- Label throw_out_of_memory_exception;
-
- // Call into the runtime system.
- GenerateCore(masm,
- &throw_normal_exception,
- &throw_termination_exception,
- &throw_out_of_memory_exception,
- false,
- false);
-
- // Do space-specific GC and retry runtime call.
- GenerateCore(masm,
- &throw_normal_exception,
- &throw_termination_exception,
- &throw_out_of_memory_exception,
- true,
- false);
-
- // Do full GC and retry runtime call one final time.
- Failure* failure = Failure::InternalError();
- __ movq(rax, failure, RelocInfo::NONE64);
- GenerateCore(masm,
- &throw_normal_exception,
- &throw_termination_exception,
- &throw_out_of_memory_exception,
- true,
- true);
-
- __ bind(&throw_out_of_memory_exception);
- // Set external caught exception to false.
- Isolate* isolate = masm->isolate();
- ExternalReference external_caught(Isolate::kExternalCaughtExceptionAddress,
- isolate);
- __ Set(rax, static_cast<int64_t>(false));
- __ Store(external_caught, rax);
-
- // Set pending exception and rax to out of memory exception.
- ExternalReference pending_exception(Isolate::kPendingExceptionAddress,
- isolate);
- Label already_have_failure;
- JumpIfOOM(masm, rax, kScratchRegister, &already_have_failure);
- __ movq(rax, Failure::OutOfMemoryException(0x1), RelocInfo::NONE64);
- __ bind(&already_have_failure);
- __ Store(pending_exception, rax);
- // Fall through to the next label.
+ __ Throw(rax);
__ bind(&throw_termination_exception);
__ ThrowUncatchable(rax);
-
- __ bind(&throw_normal_exception);
- __ Throw(rax);
}
@@ -3058,31 +2680,31 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
{ // NOLINT. Scope block confuses linter.
MacroAssembler::NoRootArrayScope uninitialized_root_register(masm);
// Set up frame.
- __ push(rbp);
- __ movq(rbp, rsp);
+ __ pushq(rbp);
+ __ movp(rbp, rsp);
// Push the stack frame type marker twice.
int marker = is_construct ? StackFrame::ENTRY_CONSTRUCT : StackFrame::ENTRY;
// Scratch register is neither callee-save, nor an argument register on any
// platform. It's free to use at this point.
// Cannot use smi-register for loading yet.
- __ movq(kScratchRegister, Smi::FromInt(marker), RelocInfo::NONE64);
- __ push(kScratchRegister); // context slot
- __ push(kScratchRegister); // function slot
- // Save callee-saved registers (X64/Win64 calling conventions).
- __ push(r12);
- __ push(r13);
- __ push(r14);
- __ push(r15);
+ __ Move(kScratchRegister, Smi::FromInt(marker), Assembler::RelocInfoNone());
+ __ Push(kScratchRegister); // context slot
+ __ Push(kScratchRegister); // function slot
+ // Save callee-saved registers (X64/X32/Win64 calling conventions).
+ __ pushq(r12);
+ __ pushq(r13);
+ __ pushq(r14);
+ __ pushq(r15);
#ifdef _WIN64
- __ push(rdi); // Only callee save in Win64 ABI, argument in AMD64 ABI.
- __ push(rsi); // Only callee save in Win64 ABI, argument in AMD64 ABI.
+ __ pushq(rdi); // Only callee save in Win64 ABI, argument in AMD64 ABI.
+ __ pushq(rsi); // Only callee save in Win64 ABI, argument in AMD64 ABI.
#endif
- __ push(rbx);
+ __ pushq(rbx);
#ifdef _WIN64
// On Win64 XMM6-XMM15 are callee-save
- __ subq(rsp, Immediate(EntryFrameConstants::kXMMRegistersBlockSize));
+ __ subp(rsp, Immediate(EntryFrameConstants::kXMMRegistersBlockSize));
__ movdqu(Operand(rsp, EntryFrameConstants::kXMMRegisterSize * 0), xmm6);
__ movdqu(Operand(rsp, EntryFrameConstants::kXMMRegisterSize * 1), xmm7);
__ movdqu(Operand(rsp, EntryFrameConstants::kXMMRegisterSize * 2), xmm8);
@@ -3101,22 +2723,20 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
__ InitializeRootRegister();
}
- Isolate* isolate = masm->isolate();
-
// Save copies of the top frame descriptor on the stack.
- ExternalReference c_entry_fp(Isolate::kCEntryFPAddress, isolate);
+ ExternalReference c_entry_fp(Isolate::kCEntryFPAddress, isolate());
{
Operand c_entry_fp_operand = masm->ExternalOperand(c_entry_fp);
- __ push(c_entry_fp_operand);
+ __ Push(c_entry_fp_operand);
}
// If this is the outermost JS call, set js_entry_sp value.
- ExternalReference js_entry_sp(Isolate::kJSEntrySPAddress, isolate);
+ ExternalReference js_entry_sp(Isolate::kJSEntrySPAddress, isolate());
__ Load(rax, js_entry_sp);
- __ testq(rax, rax);
+ __ testp(rax, rax);
__ j(not_zero, &not_outermost_js);
__ Push(Smi::FromInt(StackFrame::OUTERMOST_JSENTRY_FRAME));
- __ movq(rax, rbp);
+ __ movp(rax, rbp);
__ Store(js_entry_sp, rax);
Label cont;
__ jmp(&cont);
@@ -3132,9 +2752,9 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
// Caught exception: Store result (exception) in the pending exception
// field in the JSEnv and return a failure sentinel.
ExternalReference pending_exception(Isolate::kPendingExceptionAddress,
- isolate);
+ isolate());
__ Store(pending_exception, rax);
- __ movq(rax, Failure::Exception(), RelocInfo::NONE64);
+ __ LoadRoot(rax, Heap::kExceptionRootIndex);
__ jmp(&exit);
// Invoke: Link this frame into the handler chain. There's only one
@@ -3147,7 +2767,7 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
__ Store(pending_exception, rax);
// Fake a receiver (NULL).
- __ push(Immediate(0)); // receiver
+ __ Push(Immediate(0)); // receiver
// Invoke the function by calling through JS entry trampoline builtin and
// pop the faked function when we return. We load the address from an
@@ -3156,13 +2776,13 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
// at the time this code is generated.
if (is_construct) {
ExternalReference construct_entry(Builtins::kJSConstructEntryTrampoline,
- isolate);
+ isolate());
__ Load(rax, construct_entry);
} else {
- ExternalReference entry(Builtins::kJSEntryTrampoline, isolate);
+ ExternalReference entry(Builtins::kJSEntryTrampoline, isolate());
__ Load(rax, entry);
}
- __ lea(kScratchRegister, FieldOperand(rax, Code::kHeaderSize));
+ __ leap(kScratchRegister, FieldOperand(rax, Code::kHeaderSize));
__ call(kScratchRegister);
// Unlink this frame from the handler chain.
@@ -3170,16 +2790,16 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
__ bind(&exit);
// Check if the current stack frame is marked as the outermost JS frame.
- __ pop(rbx);
+ __ Pop(rbx);
__ Cmp(rbx, Smi::FromInt(StackFrame::OUTERMOST_JSENTRY_FRAME));
__ j(not_equal, &not_outermost_js_2);
__ Move(kScratchRegister, js_entry_sp);
- __ movq(Operand(kScratchRegister, 0), Immediate(0));
+ __ movp(Operand(kScratchRegister, 0), Immediate(0));
__ bind(&not_outermost_js_2);
// Restore the top frame descriptor from the stack.
{ Operand c_entry_fp_operand = masm->ExternalOperand(c_entry_fp);
- __ pop(c_entry_fp_operand);
+ __ Pop(c_entry_fp_operand);
}
// Restore callee-saved registers (X64 conventions).
@@ -3195,23 +2815,23 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
__ movdqu(xmm13, Operand(rsp, EntryFrameConstants::kXMMRegisterSize * 7));
__ movdqu(xmm14, Operand(rsp, EntryFrameConstants::kXMMRegisterSize * 8));
__ movdqu(xmm15, Operand(rsp, EntryFrameConstants::kXMMRegisterSize * 9));
- __ addq(rsp, Immediate(EntryFrameConstants::kXMMRegistersBlockSize));
+ __ addp(rsp, Immediate(EntryFrameConstants::kXMMRegistersBlockSize));
#endif
- __ pop(rbx);
+ __ popq(rbx);
#ifdef _WIN64
// Callee save on in Win64 ABI, arguments/volatile in AMD64 ABI.
- __ pop(rsi);
- __ pop(rdi);
+ __ popq(rsi);
+ __ popq(rdi);
#endif
- __ pop(r15);
- __ pop(r14);
- __ pop(r13);
- __ pop(r12);
- __ addq(rsp, Immediate(2 * kPointerSize)); // remove markers
+ __ popq(r15);
+ __ popq(r14);
+ __ popq(r13);
+ __ popq(r12);
+ __ addp(rsp, Immediate(2 * kPointerSize)); // remove markers
// Restore frame pointer and return.
- __ pop(rbp);
+ __ popq(rbp);
__ ret(0);
}
@@ -3232,17 +2852,19 @@ void InstanceofStub::Generate(MacroAssembler* masm) {
// indicate that the value is not an instance.
static const int kOffsetToMapCheckValue = 2;
- static const int kOffsetToResultValue = 18;
+ static const int kOffsetToResultValue = kPointerSize == kInt64Size ? 18 : 14;
// The last 4 bytes of the instruction sequence
- // movq(rdi, FieldOperand(rax, HeapObject::kMapOffset))
+ // movp(rdi, FieldOperand(rax, HeapObject::kMapOffset))
// Move(kScratchRegister, Factory::the_hole_value())
// in front of the hole value address.
- static const unsigned int kWordBeforeMapCheckValue = 0xBA49FF78;
+ static const unsigned int kWordBeforeMapCheckValue =
+ kPointerSize == kInt64Size ? 0xBA49FF78 : 0xBA41FF78;
// The last 4 bytes of the instruction sequence
// __ j(not_equal, &cache_miss);
// __ LoadRoot(ToRegister(instr->result()), Heap::kTheHoleValueRootIndex);
// before the offset of the hole value in the root array.
- static const unsigned int kWordBeforeResultValue = 0x458B4906;
+ static const unsigned int kWordBeforeResultValue =
+ kPointerSize == kInt64Size ? 0x458B4906 : 0x458B4106;
// Only the inline check flag is supported on X64.
ASSERT(flags_ == kNoFlags || HasCallSiteInlineCheck());
int extra_argument_offset = HasCallSiteInlineCheck() ? 1 : 0;
@@ -3251,7 +2873,7 @@ void InstanceofStub::Generate(MacroAssembler* masm) {
Label slow;
StackArgumentsAccessor args(rsp, 2 + extra_argument_offset,
ARGUMENTS_DONT_CONTAIN_RECEIVER);
- __ movq(rax, args.GetArgumentOperand(0));
+ __ movp(rax, args.GetArgumentOperand(0));
__ JumpIfSmi(rax, &slow);
// Check that the left hand is a JS object. Leave its map in rax.
@@ -3261,7 +2883,7 @@ void InstanceofStub::Generate(MacroAssembler* masm) {
__ j(above, &slow);
// Get the prototype of the function.
- __ movq(rdx, args.GetArgumentOperand(1));
+ __ movp(rdx, args.GetArgumentOperand(1));
// rdx is function, rax is map.
// If there is a call site cache don't look in the global cache, but do the
@@ -3297,31 +2919,31 @@ void InstanceofStub::Generate(MacroAssembler* masm) {
} else {
// Get return address and delta to inlined map check.
__ movq(kScratchRegister, StackOperandForReturnAddress(0));
- __ subq(kScratchRegister, args.GetArgumentOperand(2));
+ __ subp(kScratchRegister, args.GetArgumentOperand(2));
if (FLAG_debug_code) {
__ movl(rdi, Immediate(kWordBeforeMapCheckValue));
__ cmpl(Operand(kScratchRegister, kOffsetToMapCheckValue - 4), rdi);
__ Assert(equal, kInstanceofStubUnexpectedCallSiteCacheCheck);
}
- __ movq(kScratchRegister,
+ __ movp(kScratchRegister,
Operand(kScratchRegister, kOffsetToMapCheckValue));
- __ movq(Operand(kScratchRegister, 0), rax);
+ __ movp(Operand(kScratchRegister, 0), rax);
}
- __ movq(rcx, FieldOperand(rax, Map::kPrototypeOffset));
+ __ movp(rcx, FieldOperand(rax, Map::kPrototypeOffset));
// Loop through the prototype chain looking for the function prototype.
Label loop, is_instance, is_not_instance;
__ LoadRoot(kScratchRegister, Heap::kNullValueRootIndex);
__ bind(&loop);
- __ cmpq(rcx, rbx);
+ __ cmpp(rcx, rbx);
__ j(equal, &is_instance, Label::kNear);
- __ cmpq(rcx, kScratchRegister);
+ __ cmpp(rcx, kScratchRegister);
// The code at is_not_instance assumes that kScratchRegister contains a
// non-zero GCable value (the null object in this case).
__ j(equal, &is_not_instance, Label::kNear);
- __ movq(rcx, FieldOperand(rcx, HeapObject::kMapOffset));
- __ movq(rcx, FieldOperand(rcx, Map::kPrototypeOffset));
+ __ movp(rcx, FieldOperand(rcx, HeapObject::kMapOffset));
+ __ movp(rcx, FieldOperand(rcx, Map::kPrototypeOffset));
__ jmp(&loop);
__ bind(&is_instance);
@@ -3338,7 +2960,7 @@ void InstanceofStub::Generate(MacroAssembler* masm) {
ASSERT(true_offset >= 0 && true_offset < 0x100);
__ movl(rax, Immediate(true_offset));
__ movq(kScratchRegister, StackOperandForReturnAddress(0));
- __ subq(kScratchRegister, args.GetArgumentOperand(2));
+ __ subp(kScratchRegister, args.GetArgumentOperand(2));
__ movb(Operand(kScratchRegister, kOffsetToResultValue), rax);
if (FLAG_debug_code) {
__ movl(rax, Immediate(kWordBeforeResultValue));
@@ -3361,7 +2983,7 @@ void InstanceofStub::Generate(MacroAssembler* masm) {
ASSERT(false_offset >= 0 && false_offset < 0x100);
__ movl(rax, Immediate(false_offset));
__ movq(kScratchRegister, StackOperandForReturnAddress(0));
- __ subq(kScratchRegister, args.GetArgumentOperand(2));
+ __ subp(kScratchRegister, args.GetArgumentOperand(2));
__ movb(Operand(kScratchRegister, kOffsetToResultValue), rax);
if (FLAG_debug_code) {
__ movl(rax, Immediate(kWordBeforeResultValue));
@@ -3376,7 +2998,7 @@ void InstanceofStub::Generate(MacroAssembler* masm) {
if (HasCallSiteInlineCheck()) {
// Remove extra value from the stack.
__ PopReturnAddressTo(rcx);
- __ pop(rax);
+ __ Pop(rax);
__ PushReturnAddressFrom(rcx);
}
__ InvokeBuiltin(Builtins::INSTANCE_OF, JUMP_FUNCTION);
@@ -3403,7 +3025,7 @@ void StringCharCodeAtGenerator::GenerateFast(MacroAssembler* masm) {
__ JumpIfSmi(object_, receiver_not_string_);
// Fetch the instance type of the receiver into result register.
- __ movq(result_, FieldOperand(object_, HeapObject::kMapOffset));
+ __ movp(result_, FieldOperand(object_, HeapObject::kMapOffset));
__ movzxbl(result_, FieldOperand(result_, Map::kInstanceTypeOffset));
// If the receiver is not a string trigger the non-string case.
__ testb(result_, Immediate(kIsNotStringMask));
@@ -3441,23 +3063,23 @@ void StringCharCodeAtGenerator::GenerateSlow(
index_not_number_,
DONT_DO_SMI_CHECK);
call_helper.BeforeCall(masm);
- __ push(object_);
- __ push(index_); // Consumed by runtime conversion function.
+ __ Push(object_);
+ __ Push(index_); // Consumed by runtime conversion function.
if (index_flags_ == STRING_INDEX_IS_NUMBER) {
__ CallRuntime(Runtime::kNumberToIntegerMapMinusZero, 1);
} else {
ASSERT(index_flags_ == STRING_INDEX_IS_ARRAY_INDEX);
// NumberToSmi discards numbers that are not exact integers.
- __ CallRuntime(Runtime::kNumberToSmi, 1);
+ __ CallRuntime(Runtime::kHiddenNumberToSmi, 1);
}
if (!index_.is(rax)) {
// Save the conversion result before the pop instructions below
// have a chance to overwrite it.
- __ movq(index_, rax);
+ __ movp(index_, rax);
}
- __ pop(object_);
+ __ Pop(object_);
// Reload the instance type.
- __ movq(result_, FieldOperand(object_, HeapObject::kMapOffset));
+ __ movp(result_, FieldOperand(object_, HeapObject::kMapOffset));
__ movzxbl(result_, FieldOperand(result_, Map::kInstanceTypeOffset));
call_helper.AfterCall(masm);
// If index is still not a smi, it must be out of range.
@@ -3470,12 +3092,12 @@ void StringCharCodeAtGenerator::GenerateSlow(
// is too complex (e.g., when the string needs to be flattened).
__ bind(&call_runtime_);
call_helper.BeforeCall(masm);
- __ push(object_);
+ __ Push(object_);
__ Integer32ToSmi(index_, index_);
- __ push(index_);
- __ CallRuntime(Runtime::kStringCharCodeAt, 2);
+ __ Push(index_);
+ __ CallRuntime(Runtime::kHiddenStringCharCodeAt, 2);
if (!result_.is(rax)) {
- __ movq(result_, rax);
+ __ movp(result_, rax);
}
call_helper.AfterCall(masm);
__ jmp(&exit_);
@@ -3495,7 +3117,7 @@ void StringCharFromCodeGenerator::GenerateFast(MacroAssembler* masm) {
__ LoadRoot(result_, Heap::kSingleCharacterStringCacheRootIndex);
SmiIndex index = masm->SmiToIndex(kScratchRegister, code_, kPointerSizeLog2);
- __ movq(result_, FieldOperand(result_, index.reg, index.scale,
+ __ movp(result_, FieldOperand(result_, index.reg, index.scale,
FixedArray::kHeaderSize));
__ CompareRoot(result_, Heap::kUndefinedValueRootIndex);
__ j(equal, &slow_case_);
@@ -3510,10 +3132,10 @@ void StringCharFromCodeGenerator::GenerateSlow(
__ bind(&slow_case_);
call_helper.BeforeCall(masm);
- __ push(code_);
+ __ Push(code_);
__ CallRuntime(Runtime::kCharFromCode, 1);
if (!result_.is(rax)) {
- __ movq(result_, rax);
+ __ movp(result_, rax);
}
call_helper.AfterCall(masm);
__ jmp(&exit_);
@@ -3522,548 +3144,35 @@ void StringCharFromCodeGenerator::GenerateSlow(
}
-void StringAddStub::Generate(MacroAssembler* masm) {
- Label call_runtime, call_builtin;
- Builtins::JavaScript builtin_id = Builtins::ADD;
-
- // Load the two arguments.
- StackArgumentsAccessor args(rsp, 2, ARGUMENTS_DONT_CONTAIN_RECEIVER);
- __ movq(rax, args.GetArgumentOperand(0)); // First argument (left).
- __ movq(rdx, args.GetArgumentOperand(1)); // Second argument (right).
-
- // Make sure that both arguments are strings if not known in advance.
- // Otherwise, at least one of the arguments is definitely a string,
- // and we convert the one that is not known to be a string.
- if ((flags_ & STRING_ADD_CHECK_BOTH) == STRING_ADD_CHECK_BOTH) {
- ASSERT((flags_ & STRING_ADD_CHECK_LEFT) == STRING_ADD_CHECK_LEFT);
- ASSERT((flags_ & STRING_ADD_CHECK_RIGHT) == STRING_ADD_CHECK_RIGHT);
- __ JumpIfSmi(rax, &call_runtime);
- __ CmpObjectType(rax, FIRST_NONSTRING_TYPE, r8);
- __ j(above_equal, &call_runtime);
-
- // First argument is a a string, test second.
- __ JumpIfSmi(rdx, &call_runtime);
- __ CmpObjectType(rdx, FIRST_NONSTRING_TYPE, r9);
- __ j(above_equal, &call_runtime);
- } else if ((flags_ & STRING_ADD_CHECK_LEFT) == STRING_ADD_CHECK_LEFT) {
- ASSERT((flags_ & STRING_ADD_CHECK_RIGHT) == 0);
- GenerateConvertArgument(masm, 2 * kPointerSize, rax, rbx, rcx, rdi,
- &call_builtin);
- builtin_id = Builtins::STRING_ADD_RIGHT;
- } else if ((flags_ & STRING_ADD_CHECK_RIGHT) == STRING_ADD_CHECK_RIGHT) {
- ASSERT((flags_ & STRING_ADD_CHECK_LEFT) == 0);
- GenerateConvertArgument(masm, 1 * kPointerSize, rdx, rbx, rcx, rdi,
- &call_builtin);
- builtin_id = Builtins::STRING_ADD_LEFT;
- }
-
- // Both arguments are strings.
- // rax: first string
- // rdx: second string
- // Check if either of the strings are empty. In that case return the other.
- Label second_not_zero_length, both_not_zero_length;
- __ movq(rcx, FieldOperand(rdx, String::kLengthOffset));
- __ SmiTest(rcx);
- __ j(not_zero, &second_not_zero_length, Label::kNear);
- // Second string is empty, result is first string which is already in rax.
- Counters* counters = masm->isolate()->counters();
- __ IncrementCounter(counters->string_add_native(), 1);
- __ ret(2 * kPointerSize);
- __ bind(&second_not_zero_length);
- __ movq(rbx, FieldOperand(rax, String::kLengthOffset));
- __ SmiTest(rbx);
- __ j(not_zero, &both_not_zero_length, Label::kNear);
- // First string is empty, result is second string which is in rdx.
- __ movq(rax, rdx);
- __ IncrementCounter(counters->string_add_native(), 1);
- __ ret(2 * kPointerSize);
-
- // Both strings are non-empty.
- // rax: first string
- // rbx: length of first string
- // rcx: length of second string
- // rdx: second string
- // r8: map of first string (if flags_ == NO_STRING_ADD_FLAGS)
- // r9: map of second string (if flags_ == NO_STRING_ADD_FLAGS)
- Label string_add_flat_result, longer_than_two;
- __ bind(&both_not_zero_length);
-
- // If arguments where known to be strings, maps are not loaded to r8 and r9
- // by the code above.
- if ((flags_ & STRING_ADD_CHECK_BOTH) != STRING_ADD_CHECK_BOTH) {
- __ movq(r8, FieldOperand(rax, HeapObject::kMapOffset));
- __ movq(r9, FieldOperand(rdx, HeapObject::kMapOffset));
- }
- // Get the instance types of the two strings as they will be needed soon.
- __ movzxbl(r8, FieldOperand(r8, Map::kInstanceTypeOffset));
- __ movzxbl(r9, FieldOperand(r9, Map::kInstanceTypeOffset));
-
- // Look at the length of the result of adding the two strings.
- STATIC_ASSERT(String::kMaxLength <= Smi::kMaxValue / 2);
- __ SmiAdd(rbx, rbx, rcx);
- // Use the string table when adding two one character strings, as it
- // helps later optimizations to return an internalized string here.
- __ SmiCompare(rbx, Smi::FromInt(2));
- __ j(not_equal, &longer_than_two);
-
- // Check that both strings are non-external ASCII strings.
- __ JumpIfBothInstanceTypesAreNotSequentialAscii(r8, r9, rbx, rcx,
- &call_runtime);
-
- // Get the two characters forming the sub string.
- __ movzxbq(rbx, FieldOperand(rax, SeqOneByteString::kHeaderSize));
- __ movzxbq(rcx, FieldOperand(rdx, SeqOneByteString::kHeaderSize));
-
- // Try to lookup two character string in string table. If it is not found
- // just allocate a new one.
- Label make_two_character_string, make_flat_ascii_string;
- StringHelper::GenerateTwoCharacterStringTableProbe(
- masm, rbx, rcx, r14, r11, rdi, r15, &make_two_character_string);
- __ IncrementCounter(counters->string_add_native(), 1);
- __ ret(2 * kPointerSize);
-
- __ bind(&make_two_character_string);
- __ Set(rdi, 2);
- __ AllocateAsciiString(rax, rdi, r8, r9, r11, &call_runtime);
- // rbx - first byte: first character
- // rbx - second byte: *maybe* second character
- // Make sure that the second byte of rbx contains the second character.
- __ movzxbq(rcx, FieldOperand(rdx, SeqOneByteString::kHeaderSize));
- __ shll(rcx, Immediate(kBitsPerByte));
- __ orl(rbx, rcx);
- // Write both characters to the new string.
- __ movw(FieldOperand(rax, SeqOneByteString::kHeaderSize), rbx);
- __ IncrementCounter(counters->string_add_native(), 1);
- __ ret(2 * kPointerSize);
-
- __ bind(&longer_than_two);
- // Check if resulting string will be flat.
- __ SmiCompare(rbx, Smi::FromInt(ConsString::kMinLength));
- __ j(below, &string_add_flat_result);
- // Handle exceptionally long strings in the runtime system.
- STATIC_ASSERT((String::kMaxLength & 0x80000000) == 0);
- __ SmiCompare(rbx, Smi::FromInt(String::kMaxLength));
- __ j(above, &call_runtime);
-
- // If result is not supposed to be flat, allocate a cons string object. If
- // both strings are ASCII the result is an ASCII cons string.
- // rax: first string
- // rbx: length of resulting flat string
- // rdx: second string
- // r8: instance type of first string
- // r9: instance type of second string
- Label non_ascii, allocated, ascii_data;
- __ movl(rcx, r8);
- __ and_(rcx, r9);
- STATIC_ASSERT((kStringEncodingMask & kOneByteStringTag) != 0);
- STATIC_ASSERT((kStringEncodingMask & kTwoByteStringTag) == 0);
- __ testl(rcx, Immediate(kStringEncodingMask));
- __ j(zero, &non_ascii);
- __ bind(&ascii_data);
- // Allocate an ASCII cons string.
- __ AllocateAsciiConsString(rcx, rdi, no_reg, &call_runtime);
- __ bind(&allocated);
- // Fill the fields of the cons string.
- __ movq(FieldOperand(rcx, ConsString::kLengthOffset), rbx);
- __ movq(FieldOperand(rcx, ConsString::kHashFieldOffset),
- Immediate(String::kEmptyHashField));
-
- Label skip_write_barrier, after_writing;
- ExternalReference high_promotion_mode = ExternalReference::
- new_space_high_promotion_mode_active_address(masm->isolate());
- __ Load(rbx, high_promotion_mode);
- __ testb(rbx, Immediate(1));
- __ j(zero, &skip_write_barrier);
-
- __ movq(FieldOperand(rcx, ConsString::kFirstOffset), rax);
- __ RecordWriteField(rcx,
- ConsString::kFirstOffset,
- rax,
- rbx,
- kDontSaveFPRegs);
- __ movq(FieldOperand(rcx, ConsString::kSecondOffset), rdx);
- __ RecordWriteField(rcx,
- ConsString::kSecondOffset,
- rdx,
- rbx,
- kDontSaveFPRegs);
- __ jmp(&after_writing);
-
- __ bind(&skip_write_barrier);
- __ movq(FieldOperand(rcx, ConsString::kFirstOffset), rax);
- __ movq(FieldOperand(rcx, ConsString::kSecondOffset), rdx);
-
- __ bind(&after_writing);
-
- __ movq(rax, rcx);
- __ IncrementCounter(counters->string_add_native(), 1);
- __ ret(2 * kPointerSize);
- __ bind(&non_ascii);
- // At least one of the strings is two-byte. Check whether it happens
- // to contain only one byte characters.
- // rcx: first instance type AND second instance type.
- // r8: first instance type.
- // r9: second instance type.
- __ testb(rcx, Immediate(kOneByteDataHintMask));
- __ j(not_zero, &ascii_data);
- __ xor_(r8, r9);
- STATIC_ASSERT(kOneByteStringTag != 0 && kOneByteDataHintTag != 0);
- __ andb(r8, Immediate(kOneByteStringTag | kOneByteDataHintTag));
- __ cmpb(r8, Immediate(kOneByteStringTag | kOneByteDataHintTag));
- __ j(equal, &ascii_data);
- // Allocate a two byte cons string.
- __ AllocateTwoByteConsString(rcx, rdi, no_reg, &call_runtime);
- __ jmp(&allocated);
-
- // We cannot encounter sliced strings or cons strings here since:
- STATIC_ASSERT(SlicedString::kMinLength >= ConsString::kMinLength);
- // Handle creating a flat result from either external or sequential strings.
- // Locate the first characters' locations.
- // rax: first string
- // rbx: length of resulting flat string as smi
- // rdx: second string
- // r8: instance type of first string
- // r9: instance type of first string
- Label first_prepared, second_prepared;
- Label first_is_sequential, second_is_sequential;
- __ bind(&string_add_flat_result);
-
- __ SmiToInteger32(r14, FieldOperand(rax, SeqString::kLengthOffset));
- // r14: length of first string
- STATIC_ASSERT(kSeqStringTag == 0);
- __ testb(r8, Immediate(kStringRepresentationMask));
- __ j(zero, &first_is_sequential, Label::kNear);
- // Rule out short external string and load string resource.
- STATIC_ASSERT(kShortExternalStringTag != 0);
- __ testb(r8, Immediate(kShortExternalStringMask));
- __ j(not_zero, &call_runtime);
- __ movq(rcx, FieldOperand(rax, ExternalString::kResourceDataOffset));
- __ jmp(&first_prepared, Label::kNear);
- __ bind(&first_is_sequential);
- STATIC_ASSERT(SeqOneByteString::kHeaderSize == SeqTwoByteString::kHeaderSize);
- __ lea(rcx, FieldOperand(rax, SeqOneByteString::kHeaderSize));
- __ bind(&first_prepared);
-
- // Check whether both strings have same encoding.
- __ xorl(r8, r9);
- __ testb(r8, Immediate(kStringEncodingMask));
- __ j(not_zero, &call_runtime);
-
- __ SmiToInteger32(r15, FieldOperand(rdx, SeqString::kLengthOffset));
- // r15: length of second string
- STATIC_ASSERT(kSeqStringTag == 0);
- __ testb(r9, Immediate(kStringRepresentationMask));
- __ j(zero, &second_is_sequential, Label::kNear);
- // Rule out short external string and load string resource.
- STATIC_ASSERT(kShortExternalStringTag != 0);
- __ testb(r9, Immediate(kShortExternalStringMask));
- __ j(not_zero, &call_runtime);
- __ movq(rdx, FieldOperand(rdx, ExternalString::kResourceDataOffset));
- __ jmp(&second_prepared, Label::kNear);
- __ bind(&second_is_sequential);
- STATIC_ASSERT(SeqOneByteString::kHeaderSize == SeqTwoByteString::kHeaderSize);
- __ lea(rdx, FieldOperand(rdx, SeqOneByteString::kHeaderSize));
- __ bind(&second_prepared);
-
- Label non_ascii_string_add_flat_result;
- // r9: instance type of second string
- // First string and second string have the same encoding.
- STATIC_ASSERT(kTwoByteStringTag == 0);
- __ SmiToInteger32(rbx, rbx);
- __ testb(r9, Immediate(kStringEncodingMask));
- __ j(zero, &non_ascii_string_add_flat_result);
-
- __ bind(&make_flat_ascii_string);
- // Both strings are ASCII strings. As they are short they are both flat.
- __ AllocateAsciiString(rax, rbx, rdi, r8, r9, &call_runtime);
- // rax: result string
- // Locate first character of result.
- __ lea(rbx, FieldOperand(rax, SeqOneByteString::kHeaderSize));
- // rcx: first char of first string
- // rbx: first character of result
- // r14: length of first string
- StringHelper::GenerateCopyCharacters(masm, rbx, rcx, r14, true);
- // rbx: next character of result
- // rdx: first char of second string
- // r15: length of second string
- StringHelper::GenerateCopyCharacters(masm, rbx, rdx, r15, true);
- __ IncrementCounter(counters->string_add_native(), 1);
- __ ret(2 * kPointerSize);
-
- __ bind(&non_ascii_string_add_flat_result);
- // Both strings are ASCII strings. As they are short they are both flat.
- __ AllocateTwoByteString(rax, rbx, rdi, r8, r9, &call_runtime);
- // rax: result string
- // Locate first character of result.
- __ lea(rbx, FieldOperand(rax, SeqTwoByteString::kHeaderSize));
- // rcx: first char of first string
- // rbx: first character of result
- // r14: length of first string
- StringHelper::GenerateCopyCharacters(masm, rbx, rcx, r14, false);
- // rbx: next character of result
- // rdx: first char of second string
- // r15: length of second string
- StringHelper::GenerateCopyCharacters(masm, rbx, rdx, r15, false);
- __ IncrementCounter(counters->string_add_native(), 1);
- __ ret(2 * kPointerSize);
-
- // Just jump to runtime to add the two strings.
- __ bind(&call_runtime);
- __ TailCallRuntime(Runtime::kStringAdd, 2, 1);
-
- if (call_builtin.is_linked()) {
- __ bind(&call_builtin);
- __ InvokeBuiltin(builtin_id, JUMP_FUNCTION);
- }
-}
-
-
-void StringAddStub::GenerateRegisterArgsPush(MacroAssembler* masm) {
- __ push(rax);
- __ push(rdx);
-}
-
-
-void StringAddStub::GenerateRegisterArgsPop(MacroAssembler* masm,
- Register temp) {
- __ PopReturnAddressTo(temp);
- __ pop(rdx);
- __ pop(rax);
- __ PushReturnAddressFrom(temp);
-}
-
-
-void StringAddStub::GenerateConvertArgument(MacroAssembler* masm,
- int stack_offset,
- Register arg,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Label* slow) {
- // First check if the argument is already a string.
- Label not_string, done;
- __ JumpIfSmi(arg, &not_string);
- __ CmpObjectType(arg, FIRST_NONSTRING_TYPE, scratch1);
- __ j(below, &done);
-
- // Check the number to string cache.
- __ bind(&not_string);
- // Puts the cached result into scratch1.
- __ LookupNumberStringCache(arg, scratch1, scratch2, scratch3, slow);
- __ movq(arg, scratch1);
- __ movq(Operand(rsp, stack_offset), arg);
- __ bind(&done);
-}
-
-
void StringHelper::GenerateCopyCharacters(MacroAssembler* masm,
Register dest,
Register src,
Register count,
- bool ascii) {
- Label loop;
- __ bind(&loop);
- // This loop just copies one character at a time, as it is only used for very
- // short strings.
- if (ascii) {
- __ movb(kScratchRegister, Operand(src, 0));
- __ movb(Operand(dest, 0), kScratchRegister);
- __ incq(src);
- __ incq(dest);
- } else {
- __ movzxwl(kScratchRegister, Operand(src, 0));
- __ movw(Operand(dest, 0), kScratchRegister);
- __ addq(src, Immediate(2));
- __ addq(dest, Immediate(2));
- }
- __ decl(count);
- __ j(not_zero, &loop);
-}
-
-
-void StringHelper::GenerateCopyCharactersREP(MacroAssembler* masm,
- Register dest,
- Register src,
- Register count,
- bool ascii) {
- // Copy characters using rep movs of doublewords. Align destination on 4 byte
- // boundary before starting rep movs. Copy remaining characters after running
- // rep movs.
- // Count is positive int32, dest and src are character pointers.
- ASSERT(dest.is(rdi)); // rep movs destination
- ASSERT(src.is(rsi)); // rep movs source
- ASSERT(count.is(rcx)); // rep movs count
-
+ String::Encoding encoding) {
// Nothing to do for zero characters.
Label done;
__ testl(count, count);
__ j(zero, &done, Label::kNear);
// Make count the number of bytes to copy.
- if (!ascii) {
+ if (encoding == String::TWO_BYTE_ENCODING) {
STATIC_ASSERT(2 == sizeof(uc16));
__ addl(count, count);
}
- // Don't enter the rep movs if there are less than 4 bytes to copy.
- Label last_bytes;
- __ testl(count, Immediate(~(kPointerSize - 1)));
- __ j(zero, &last_bytes, Label::kNear);
-
- // Copy from edi to esi using rep movs instruction.
- __ movl(kScratchRegister, count);
- __ shr(count, Immediate(kPointerSizeLog2)); // Number of doublewords to copy.
- __ repmovsq();
-
- // Find number of bytes left.
- __ movl(count, kScratchRegister);
- __ and_(count, Immediate(kPointerSize - 1));
-
- // Check if there are more bytes to copy.
- __ bind(&last_bytes);
- __ testl(count, count);
- __ j(zero, &done, Label::kNear);
-
// Copy remaining characters.
Label loop;
__ bind(&loop);
__ movb(kScratchRegister, Operand(src, 0));
__ movb(Operand(dest, 0), kScratchRegister);
- __ incq(src);
- __ incq(dest);
+ __ incp(src);
+ __ incp(dest);
__ decl(count);
__ j(not_zero, &loop);
__ bind(&done);
}
-void StringHelper::GenerateTwoCharacterStringTableProbe(MacroAssembler* masm,
- Register c1,
- Register c2,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Register scratch4,
- Label* not_found) {
- // Register scratch3 is the general scratch register in this function.
- Register scratch = scratch3;
-
- // Make sure that both characters are not digits as such strings has a
- // different hash algorithm. Don't try to look for these in the string table.
- Label not_array_index;
- __ leal(scratch, Operand(c1, -'0'));
- __ cmpl(scratch, Immediate(static_cast<int>('9' - '0')));
- __ j(above, &not_array_index, Label::kNear);
- __ leal(scratch, Operand(c2, -'0'));
- __ cmpl(scratch, Immediate(static_cast<int>('9' - '0')));
- __ j(below_equal, not_found);
-
- __ bind(&not_array_index);
- // Calculate the two character string hash.
- Register hash = scratch1;
- GenerateHashInit(masm, hash, c1, scratch);
- GenerateHashAddCharacter(masm, hash, c2, scratch);
- GenerateHashGetHash(masm, hash, scratch);
-
- // Collect the two characters in a register.
- Register chars = c1;
- __ shl(c2, Immediate(kBitsPerByte));
- __ orl(chars, c2);
-
- // chars: two character string, char 1 in byte 0 and char 2 in byte 1.
- // hash: hash of two character string.
-
- // Load the string table.
- Register string_table = c2;
- __ LoadRoot(string_table, Heap::kStringTableRootIndex);
-
- // Calculate capacity mask from the string table capacity.
- Register mask = scratch2;
- __ SmiToInteger32(mask,
- FieldOperand(string_table, StringTable::kCapacityOffset));
- __ decl(mask);
-
- Register map = scratch4;
-
- // Registers
- // chars: two character string, char 1 in byte 0 and char 2 in byte 1.
- // hash: hash of two character string (32-bit int)
- // string_table: string table
- // mask: capacity mask (32-bit int)
- // map: -
- // scratch: -
-
- // Perform a number of probes in the string table.
- static const int kProbes = 4;
- Label found_in_string_table;
- Label next_probe[kProbes];
- Register candidate = scratch; // Scratch register contains candidate.
- for (int i = 0; i < kProbes; i++) {
- // Calculate entry in string table.
- __ movl(scratch, hash);
- if (i > 0) {
- __ addl(scratch, Immediate(StringTable::GetProbeOffset(i)));
- }
- __ andl(scratch, mask);
-
- // Load the entry from the string table.
- STATIC_ASSERT(StringTable::kEntrySize == 1);
- __ movq(candidate,
- FieldOperand(string_table,
- scratch,
- times_pointer_size,
- StringTable::kElementsStartOffset));
-
- // If entry is undefined no string with this hash can be found.
- Label is_string;
- __ CmpObjectType(candidate, ODDBALL_TYPE, map);
- __ j(not_equal, &is_string, Label::kNear);
-
- __ CompareRoot(candidate, Heap::kUndefinedValueRootIndex);
- __ j(equal, not_found);
- // Must be the hole (deleted entry).
- if (FLAG_debug_code) {
- __ LoadRoot(kScratchRegister, Heap::kTheHoleValueRootIndex);
- __ cmpq(kScratchRegister, candidate);
- __ Assert(equal, kOddballInStringTableIsNotUndefinedOrTheHole);
- }
- __ jmp(&next_probe[i]);
-
- __ bind(&is_string);
-
- // If length is not 2 the string is not a candidate.
- __ SmiCompare(FieldOperand(candidate, String::kLengthOffset),
- Smi::FromInt(2));
- __ j(not_equal, &next_probe[i]);
-
- // We use kScratchRegister as a temporary register in assumption that
- // JumpIfInstanceTypeIsNotSequentialAscii does not use it implicitly
- Register temp = kScratchRegister;
-
- // Check that the candidate is a non-external ASCII string.
- __ movzxbl(temp, FieldOperand(map, Map::kInstanceTypeOffset));
- __ JumpIfInstanceTypeIsNotSequentialAscii(
- temp, temp, &next_probe[i]);
-
- // Check if the two characters match.
- __ movl(temp, FieldOperand(candidate, SeqOneByteString::kHeaderSize));
- __ andl(temp, Immediate(0x0000ffff));
- __ cmpl(chars, temp);
- __ j(equal, &found_in_string_table);
- __ bind(&next_probe[i]);
- }
-
- // No matching 2 character string found by probing.
- __ jmp(not_found);
-
- // Scratch register contains result when we fall through to here.
- Register result = candidate;
- __ bind(&found_in_string_table);
- if (!result.is(rax)) {
- __ movq(rax, result);
- }
-}
-
void StringHelper::GenerateHashInit(MacroAssembler* masm,
Register hash,
@@ -4144,7 +3253,7 @@ void SubStringStub::Generate(MacroAssembler* masm) {
ARGUMENTS_DONT_CONTAIN_RECEIVER);
// Make sure first argument is a string.
- __ movq(rax, args.GetArgumentOperand(STRING_ARGUMENT_INDEX));
+ __ movp(rax, args.GetArgumentOperand(STRING_ARGUMENT_INDEX));
STATIC_ASSERT(kSmiTag == 0);
__ testl(rax, Immediate(kSmiTagMask));
__ j(zero, &runtime);
@@ -4154,19 +3263,19 @@ void SubStringStub::Generate(MacroAssembler* masm) {
// rax: string
// rbx: instance type
// Calculate length of sub string using the smi values.
- __ movq(rcx, args.GetArgumentOperand(TO_ARGUMENT_INDEX));
- __ movq(rdx, args.GetArgumentOperand(FROM_ARGUMENT_INDEX));
+ __ movp(rcx, args.GetArgumentOperand(TO_ARGUMENT_INDEX));
+ __ movp(rdx, args.GetArgumentOperand(FROM_ARGUMENT_INDEX));
__ JumpUnlessBothNonNegativeSmi(rcx, rdx, &runtime);
__ SmiSub(rcx, rcx, rdx); // Overflow doesn't happen.
- __ cmpq(rcx, FieldOperand(rax, String::kLengthOffset));
+ __ cmpp(rcx, FieldOperand(rax, String::kLengthOffset));
Label not_original_string;
// Shorter than original string's length: an actual substring.
__ j(below, &not_original_string, Label::kNear);
// Longer than original string's length or negative: unsafe arguments.
__ j(above, &runtime);
// Return original string.
- Counters* counters = masm->isolate()->counters();
+ Counters* counters = isolate()->counters();
__ IncrementCounter(counters->sub_string_native(), 1);
__ ret(SUB_STRING_ARGUMENT_COUNT * kPointerSize);
__ bind(&not_original_string);
@@ -4197,24 +3306,24 @@ void SubStringStub::Generate(MacroAssembler* masm) {
__ CompareRoot(FieldOperand(rax, ConsString::kSecondOffset),
Heap::kempty_stringRootIndex);
__ j(not_equal, &runtime);
- __ movq(rdi, FieldOperand(rax, ConsString::kFirstOffset));
+ __ movp(rdi, FieldOperand(rax, ConsString::kFirstOffset));
// Update instance type.
- __ movq(rbx, FieldOperand(rdi, HeapObject::kMapOffset));
+ __ movp(rbx, FieldOperand(rdi, HeapObject::kMapOffset));
__ movzxbl(rbx, FieldOperand(rbx, Map::kInstanceTypeOffset));
__ jmp(&underlying_unpacked, Label::kNear);
__ bind(&sliced_string);
// Sliced string. Fetch parent and correct start index by offset.
- __ addq(rdx, FieldOperand(rax, SlicedString::kOffsetOffset));
- __ movq(rdi, FieldOperand(rax, SlicedString::kParentOffset));
+ __ addp(rdx, FieldOperand(rax, SlicedString::kOffsetOffset));
+ __ movp(rdi, FieldOperand(rax, SlicedString::kParentOffset));
// Update instance type.
- __ movq(rbx, FieldOperand(rdi, HeapObject::kMapOffset));
+ __ movp(rbx, FieldOperand(rdi, HeapObject::kMapOffset));
__ movzxbl(rbx, FieldOperand(rbx, Map::kInstanceTypeOffset));
__ jmp(&underlying_unpacked, Label::kNear);
__ bind(&seq_or_external_string);
// Sequential or external string. Just move string to the correct register.
- __ movq(rdi, rax);
+ __ movp(rdi, rax);
__ bind(&underlying_unpacked);
@@ -4226,7 +3335,7 @@ void SubStringStub::Generate(MacroAssembler* masm) {
// rcx: length
// If coming from the make_two_character_string path, the string
// is too short to be sliced anyways.
- __ cmpq(rcx, Immediate(SlicedString::kMinLength));
+ __ cmpp(rcx, Immediate(SlicedString::kMinLength));
// Short slice. Copy instead of slicing.
__ j(less, &copy_routine);
// Allocate new sliced string. At this point we do not reload the instance
@@ -4245,11 +3354,11 @@ void SubStringStub::Generate(MacroAssembler* masm) {
__ AllocateTwoByteSlicedString(rax, rbx, r14, &runtime);
__ bind(&set_slice_header);
__ Integer32ToSmi(rcx, rcx);
- __ movq(FieldOperand(rax, SlicedString::kLengthOffset), rcx);
- __ movq(FieldOperand(rax, SlicedString::kHashFieldOffset),
+ __ movp(FieldOperand(rax, SlicedString::kLengthOffset), rcx);
+ __ movp(FieldOperand(rax, SlicedString::kHashFieldOffset),
Immediate(String::kEmptyHashField));
- __ movq(FieldOperand(rax, SlicedString::kParentOffset), rdi);
- __ movq(FieldOperand(rax, SlicedString::kOffsetOffset), rdx);
+ __ movp(FieldOperand(rax, SlicedString::kParentOffset), rdi);
+ __ movp(FieldOperand(rax, SlicedString::kOffsetOffset), rdx);
__ IncrementCounter(counters->sub_string_native(), 1);
__ ret(3 * kPointerSize);
@@ -4270,13 +3379,13 @@ void SubStringStub::Generate(MacroAssembler* masm) {
// Handle external string.
// Rule out short external strings.
- STATIC_CHECK(kShortExternalStringTag != 0);
+ STATIC_ASSERT(kShortExternalStringTag != 0);
__ testb(rbx, Immediate(kShortExternalStringMask));
__ j(not_zero, &runtime);
- __ movq(rdi, FieldOperand(rdi, ExternalString::kResourceDataOffset));
+ __ movp(rdi, FieldOperand(rdi, ExternalString::kResourceDataOffset));
// Move the pointer so that offset-wise, it looks like a sequential string.
STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqOneByteString::kHeaderSize);
- __ subq(rdi, Immediate(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
+ __ subp(rdi, Immediate(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
__ bind(&sequential_string);
STATIC_ASSERT((kOneByteStringTag & kStringEncodingMask) != 0);
@@ -4288,22 +3397,20 @@ void SubStringStub::Generate(MacroAssembler* masm) {
// rax: result string
// rcx: result string length
- __ movq(r14, rsi); // esi used by following code.
{ // Locate character of sub string start.
SmiIndex smi_as_index = masm->SmiToIndex(rdx, rdx, times_1);
- __ lea(rsi, Operand(rdi, smi_as_index.reg, smi_as_index.scale,
+ __ leap(r14, Operand(rdi, smi_as_index.reg, smi_as_index.scale,
SeqOneByteString::kHeaderSize - kHeapObjectTag));
}
// Locate first character of result.
- __ lea(rdi, FieldOperand(rax, SeqOneByteString::kHeaderSize));
+ __ leap(rdi, FieldOperand(rax, SeqOneByteString::kHeaderSize));
// rax: result string
// rcx: result length
- // rdi: first character of result
+ // r14: first character of result
// rsi: character of sub string start
- // r14: original value of rsi
- StringHelper::GenerateCopyCharactersREP(masm, rdi, rsi, rcx, true);
- __ movq(rsi, r14); // Restore rsi.
+ StringHelper::GenerateCopyCharacters(
+ masm, rdi, r14, rcx, String::ONE_BYTE_ENCODING);
__ IncrementCounter(counters->sub_string_native(), 1);
__ ret(SUB_STRING_ARGUMENT_COUNT * kPointerSize);
@@ -4313,28 +3420,26 @@ void SubStringStub::Generate(MacroAssembler* masm) {
// rax: result string
// rcx: result string length
- __ movq(r14, rsi); // esi used by following code.
{ // Locate character of sub string start.
SmiIndex smi_as_index = masm->SmiToIndex(rdx, rdx, times_2);
- __ lea(rsi, Operand(rdi, smi_as_index.reg, smi_as_index.scale,
+ __ leap(r14, Operand(rdi, smi_as_index.reg, smi_as_index.scale,
SeqOneByteString::kHeaderSize - kHeapObjectTag));
}
// Locate first character of result.
- __ lea(rdi, FieldOperand(rax, SeqTwoByteString::kHeaderSize));
+ __ leap(rdi, FieldOperand(rax, SeqTwoByteString::kHeaderSize));
// rax: result string
// rcx: result length
// rdi: first character of result
- // rsi: character of sub string start
- // r14: original value of rsi
- StringHelper::GenerateCopyCharactersREP(masm, rdi, rsi, rcx, false);
- __ movq(rsi, r14); // Restore esi.
+ // r14: character of sub string start
+ StringHelper::GenerateCopyCharacters(
+ masm, rdi, r14, rcx, String::TWO_BYTE_ENCODING);
__ IncrementCounter(counters->sub_string_native(), 1);
__ ret(SUB_STRING_ARGUMENT_COUNT * kPointerSize);
// Just jump to runtime to create the sub string.
__ bind(&runtime);
- __ TailCallRuntime(Runtime::kSubString, 3, 1);
+ __ TailCallRuntime(Runtime::kHiddenSubString, 3, 1);
__ bind(&single_char);
// rax: string
@@ -4358,7 +3463,7 @@ void StringCompareStub::GenerateFlatAsciiStringEquals(MacroAssembler* masm,
// Compare lengths.
Label check_zero_length;
- __ movq(length, FieldOperand(left, String::kLengthOffset));
+ __ movp(length, FieldOperand(left, String::kLengthOffset));
__ SmiCompare(length, FieldOperand(right, String::kLengthOffset));
__ j(equal, &check_zero_length, Label::kNear);
__ Move(rax, Smi::FromInt(NOT_EQUAL));
@@ -4402,8 +3507,8 @@ void StringCompareStub::GenerateCompareFlatAsciiStrings(MacroAssembler* masm,
STATIC_ASSERT(String::kMaxLength < 0x7fffffff);
// Find minimum length and length difference.
- __ movq(scratch1, FieldOperand(left, String::kLengthOffset));
- __ movq(scratch4, scratch1);
+ __ movp(scratch1, FieldOperand(left, String::kLengthOffset));
+ __ movp(scratch4, scratch1);
__ SmiSub(scratch4,
scratch4,
FieldOperand(right, String::kLengthOffset));
@@ -4427,7 +3532,10 @@ void StringCompareStub::GenerateCompareFlatAsciiStrings(MacroAssembler* masm,
// Compare loop.
Label result_not_equal;
GenerateAsciiCharsCompareLoop(masm, left, right, min_length, scratch2,
- &result_not_equal, Label::kNear);
+ &result_not_equal,
+ // In debug-code mode, SmiTest below might push
+ // the target label outside the near range.
+ Label::kFar);
// Completed loop without finding different characters.
// Compare lengths (precomputed).
@@ -4473,11 +3581,11 @@ void StringCompareStub::GenerateAsciiCharsCompareLoop(
// start. This means that loop ends when index reaches zero, which
// doesn't need an additional compare.
__ SmiToInteger32(length, length);
- __ lea(left,
+ __ leap(left,
FieldOperand(left, length, times_1, SeqOneByteString::kHeaderSize));
- __ lea(right,
+ __ leap(right,
FieldOperand(right, length, times_1, SeqOneByteString::kHeaderSize));
- __ neg(length);
+ __ negq(length);
Register index = length; // index = -length;
// Compare loop.
@@ -4500,15 +3608,15 @@ void StringCompareStub::Generate(MacroAssembler* masm) {
// rsp[16] : left string
StackArgumentsAccessor args(rsp, 2, ARGUMENTS_DONT_CONTAIN_RECEIVER);
- __ movq(rdx, args.GetArgumentOperand(0)); // left
- __ movq(rax, args.GetArgumentOperand(1)); // right
+ __ movp(rdx, args.GetArgumentOperand(0)); // left
+ __ movp(rax, args.GetArgumentOperand(1)); // right
// Check for identity.
Label not_same;
- __ cmpq(rdx, rax);
+ __ cmpp(rdx, rax);
__ j(not_equal, &not_same, Label::kNear);
__ Move(rax, Smi::FromInt(EQUAL));
- Counters* counters = masm->isolate()->counters();
+ Counters* counters = isolate()->counters();
__ IncrementCounter(counters->string_compare_native(), 1);
__ ret(2 * kPointerSize);
@@ -4521,14 +3629,42 @@ void StringCompareStub::Generate(MacroAssembler* masm) {
__ IncrementCounter(counters->string_compare_native(), 1);
// Drop arguments from the stack
__ PopReturnAddressTo(rcx);
- __ addq(rsp, Immediate(2 * kPointerSize));
+ __ addp(rsp, Immediate(2 * kPointerSize));
__ PushReturnAddressFrom(rcx);
GenerateCompareFlatAsciiStrings(masm, rdx, rax, rcx, rbx, rdi, r8);
// Call the runtime; it returns -1 (less), 0 (equal), or 1 (greater)
// tagged as a small integer.
__ bind(&runtime);
- __ TailCallRuntime(Runtime::kStringCompare, 2, 1);
+ __ TailCallRuntime(Runtime::kHiddenStringCompare, 2, 1);
+}
+
+
+void BinaryOpICWithAllocationSiteStub::Generate(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- rdx : left
+ // -- rax : right
+ // -- rsp[0] : return address
+ // -----------------------------------
+
+ // Load rcx with the allocation site. We stick an undefined dummy value here
+ // and replace it with the real allocation site later when we instantiate this
+ // stub in BinaryOpICWithAllocationSiteStub::GetCodeCopyFromTemplate().
+ __ Move(rcx, handle(isolate()->heap()->undefined_value()));
+
+ // Make sure that we actually patched the allocation site.
+ if (FLAG_debug_code) {
+ __ testb(rcx, Immediate(kSmiTagMask));
+ __ Assert(not_equal, kExpectedAllocationSite);
+ __ Cmp(FieldOperand(rcx, HeapObject::kMapOffset),
+ isolate()->factory()->allocation_site_map());
+ __ Assert(equal, kExpectedAllocationSite);
+ }
+
+ // Tail call into the stub that handles binary operations with allocation
+ // sites.
+ BinaryOpWithAllocationSiteStub stub(isolate(), state_);
+ __ TailCallStub(&stub);
}
@@ -4539,15 +3675,15 @@ void ICCompareStub::GenerateSmis(MacroAssembler* masm) {
if (GetCondition() == equal) {
// For equality we do not care about the sign of the result.
- __ subq(rax, rdx);
+ __ subp(rax, rdx);
} else {
Label done;
- __ subq(rdx, rax);
+ __ subp(rdx, rax);
__ j(no_overflow, &done, Label::kNear);
// Correct sign of result in case of overflow.
- __ not_(rdx);
+ __ notp(rdx);
__ bind(&done);
- __ movq(rax, rdx);
+ __ movp(rax, rdx);
}
__ ret(0);
@@ -4573,7 +3709,7 @@ void ICCompareStub::GenerateNumbers(MacroAssembler* masm) {
// Load left and right operand.
Label done, left, left_smi, right_smi;
__ JumpIfSmi(rax, &right_smi, Label::kNear);
- __ CompareMap(rax, masm->isolate()->factory()->heap_number_map());
+ __ CompareMap(rax, isolate()->factory()->heap_number_map());
__ j(not_equal, &maybe_undefined1, Label::kNear);
__ movsd(xmm1, FieldOperand(rax, HeapNumber::kValueOffset));
__ jmp(&left, Label::kNear);
@@ -4583,7 +3719,7 @@ void ICCompareStub::GenerateNumbers(MacroAssembler* masm) {
__ bind(&left);
__ JumpIfSmi(rdx, &left_smi, Label::kNear);
- __ CompareMap(rdx, masm->isolate()->factory()->heap_number_map());
+ __ CompareMap(rdx, isolate()->factory()->heap_number_map());
__ j(not_equal, &maybe_undefined2, Label::kNear);
__ movsd(xmm0, FieldOperand(rdx, HeapNumber::kValueOffset));
__ jmp(&done);
@@ -4603,18 +3739,18 @@ void ICCompareStub::GenerateNumbers(MacroAssembler* masm) {
__ movl(rax, Immediate(0));
__ movl(rcx, Immediate(0));
__ setcc(above, rax); // Add one to zero if carry clear and not equal.
- __ sbbq(rax, rcx); // Subtract one if below (aka. carry set).
+ __ sbbp(rax, rcx); // Subtract one if below (aka. carry set).
__ ret(0);
__ bind(&unordered);
__ bind(&generic_stub);
- ICCompareStub stub(op_, CompareIC::GENERIC, CompareIC::GENERIC,
+ ICCompareStub stub(isolate(), op_, CompareIC::GENERIC, CompareIC::GENERIC,
CompareIC::GENERIC);
- __ jmp(stub.GetCode(masm->isolate()), RelocInfo::CODE_TARGET);
+ __ jmp(stub.GetCode(), RelocInfo::CODE_TARGET);
__ bind(&maybe_undefined1);
if (Token::IsOrderedRelationalCompareOp(op_)) {
- __ Cmp(rax, masm->isolate()->factory()->undefined_value());
+ __ Cmp(rax, isolate()->factory()->undefined_value());
__ j(not_equal, &miss);
__ JumpIfSmi(rdx, &unordered);
__ CmpObjectType(rdx, HEAP_NUMBER_TYPE, rcx);
@@ -4624,7 +3760,7 @@ void ICCompareStub::GenerateNumbers(MacroAssembler* masm) {
__ bind(&maybe_undefined2);
if (Token::IsOrderedRelationalCompareOp(op_)) {
- __ Cmp(rdx, masm->isolate()->factory()->undefined_value());
+ __ Cmp(rdx, isolate()->factory()->undefined_value());
__ j(equal, &unordered);
}
@@ -4649,18 +3785,18 @@ void ICCompareStub::GenerateInternalizedStrings(MacroAssembler* masm) {
__ j(cond, &miss, Label::kNear);
// Check that both operands are internalized strings.
- __ movq(tmp1, FieldOperand(left, HeapObject::kMapOffset));
- __ movq(tmp2, FieldOperand(right, HeapObject::kMapOffset));
- __ movzxbq(tmp1, FieldOperand(tmp1, Map::kInstanceTypeOffset));
- __ movzxbq(tmp2, FieldOperand(tmp2, Map::kInstanceTypeOffset));
+ __ movp(tmp1, FieldOperand(left, HeapObject::kMapOffset));
+ __ movp(tmp2, FieldOperand(right, HeapObject::kMapOffset));
+ __ movzxbp(tmp1, FieldOperand(tmp1, Map::kInstanceTypeOffset));
+ __ movzxbp(tmp2, FieldOperand(tmp2, Map::kInstanceTypeOffset));
STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0);
- __ or_(tmp1, tmp2);
+ __ orp(tmp1, tmp2);
__ testb(tmp1, Immediate(kIsNotStringMask | kIsNotInternalizedMask));
__ j(not_zero, &miss, Label::kNear);
// Internalized strings are compared by identity.
Label done;
- __ cmpq(left, right);
+ __ cmpp(left, right);
// Make sure rax is non-zero. At this point input operands are
// guaranteed to be non-zero.
ASSERT(right.is(rax));
@@ -4693,17 +3829,17 @@ void ICCompareStub::GenerateUniqueNames(MacroAssembler* masm) {
// Check that both operands are unique names. This leaves the instance
// types loaded in tmp1 and tmp2.
- __ movq(tmp1, FieldOperand(left, HeapObject::kMapOffset));
- __ movq(tmp2, FieldOperand(right, HeapObject::kMapOffset));
- __ movzxbq(tmp1, FieldOperand(tmp1, Map::kInstanceTypeOffset));
- __ movzxbq(tmp2, FieldOperand(tmp2, Map::kInstanceTypeOffset));
+ __ movp(tmp1, FieldOperand(left, HeapObject::kMapOffset));
+ __ movp(tmp2, FieldOperand(right, HeapObject::kMapOffset));
+ __ movzxbp(tmp1, FieldOperand(tmp1, Map::kInstanceTypeOffset));
+ __ movzxbp(tmp2, FieldOperand(tmp2, Map::kInstanceTypeOffset));
__ JumpIfNotUniqueName(tmp1, &miss, Label::kNear);
__ JumpIfNotUniqueName(tmp2, &miss, Label::kNear);
// Unique names are compared by identity.
Label done;
- __ cmpq(left, right);
+ __ cmpp(left, right);
// Make sure rax is non-zero. At this point input operands are
// guaranteed to be non-zero.
ASSERT(right.is(rax));
@@ -4738,19 +3874,19 @@ void ICCompareStub::GenerateStrings(MacroAssembler* masm) {
// Check that both operands are strings. This leaves the instance
// types loaded in tmp1 and tmp2.
- __ movq(tmp1, FieldOperand(left, HeapObject::kMapOffset));
- __ movq(tmp2, FieldOperand(right, HeapObject::kMapOffset));
- __ movzxbq(tmp1, FieldOperand(tmp1, Map::kInstanceTypeOffset));
- __ movzxbq(tmp2, FieldOperand(tmp2, Map::kInstanceTypeOffset));
- __ movq(tmp3, tmp1);
+ __ movp(tmp1, FieldOperand(left, HeapObject::kMapOffset));
+ __ movp(tmp2, FieldOperand(right, HeapObject::kMapOffset));
+ __ movzxbp(tmp1, FieldOperand(tmp1, Map::kInstanceTypeOffset));
+ __ movzxbp(tmp2, FieldOperand(tmp2, Map::kInstanceTypeOffset));
+ __ movp(tmp3, tmp1);
STATIC_ASSERT(kNotStringTag != 0);
- __ or_(tmp3, tmp2);
+ __ orp(tmp3, tmp2);
__ testb(tmp3, Immediate(kIsNotStringMask));
__ j(not_zero, &miss);
// Fast check for identical strings.
Label not_same;
- __ cmpq(left, right);
+ __ cmpp(left, right);
__ j(not_equal, &not_same, Label::kNear);
STATIC_ASSERT(EQUAL == 0);
STATIC_ASSERT(kSmiTag == 0);
@@ -4766,7 +3902,7 @@ void ICCompareStub::GenerateStrings(MacroAssembler* masm) {
if (equality) {
Label do_compare;
STATIC_ASSERT(kInternalizedTag == 0);
- __ or_(tmp1, tmp2);
+ __ orp(tmp1, tmp2);
__ testb(tmp1, Immediate(kIsNotInternalizedMask));
__ j(not_zero, &do_compare, Label::kNear);
// Make sure rax is non-zero. At this point input operands are
@@ -4792,13 +3928,13 @@ void ICCompareStub::GenerateStrings(MacroAssembler* masm) {
// Handle more complex cases in runtime.
__ bind(&runtime);
__ PopReturnAddressTo(tmp1);
- __ push(left);
- __ push(right);
+ __ Push(left);
+ __ Push(right);
__ PushReturnAddressFrom(tmp1);
if (equality) {
__ TailCallRuntime(Runtime::kStringEquals, 2, 1);
} else {
- __ TailCallRuntime(Runtime::kStringCompare, 2, 1);
+ __ TailCallRuntime(Runtime::kHiddenStringCompare, 2, 1);
}
__ bind(&miss);
@@ -4818,7 +3954,7 @@ void ICCompareStub::GenerateObjects(MacroAssembler* masm) {
__ j(not_equal, &miss, Label::kNear);
ASSERT(GetCondition() == equal);
- __ subq(rax, rdx);
+ __ subp(rax, rdx);
__ ret(0);
__ bind(&miss);
@@ -4831,14 +3967,14 @@ void ICCompareStub::GenerateKnownObjects(MacroAssembler* masm) {
Condition either_smi = masm->CheckEitherSmi(rdx, rax);
__ j(either_smi, &miss, Label::kNear);
- __ movq(rcx, FieldOperand(rax, HeapObject::kMapOffset));
- __ movq(rbx, FieldOperand(rdx, HeapObject::kMapOffset));
+ __ movp(rcx, FieldOperand(rax, HeapObject::kMapOffset));
+ __ movp(rbx, FieldOperand(rdx, HeapObject::kMapOffset));
__ Cmp(rcx, known_map_);
__ j(not_equal, &miss, Label::kNear);
__ Cmp(rbx, known_map_);
__ j(not_equal, &miss, Label::kNear);
- __ subq(rax, rdx);
+ __ subp(rax, rdx);
__ ret(0);
__ bind(&miss);
@@ -4850,20 +3986,20 @@ void ICCompareStub::GenerateMiss(MacroAssembler* masm) {
{
// Call the runtime system in a fresh internal frame.
ExternalReference miss =
- ExternalReference(IC_Utility(IC::kCompareIC_Miss), masm->isolate());
+ ExternalReference(IC_Utility(IC::kCompareIC_Miss), isolate());
FrameScope scope(masm, StackFrame::INTERNAL);
- __ push(rdx);
- __ push(rax);
- __ push(rdx);
- __ push(rax);
+ __ Push(rdx);
+ __ Push(rax);
+ __ Push(rdx);
+ __ Push(rax);
__ Push(Smi::FromInt(op_));
__ CallExternalReference(miss, 3);
// Compute the entry point of the rewritten stub.
- __ lea(rdi, FieldOperand(rax, Code::kHeaderSize));
- __ pop(rax);
- __ pop(rdx);
+ __ leap(rdi, FieldOperand(rax, Code::kHeaderSize));
+ __ Pop(rax);
+ __ Pop(rdx);
}
// Do a tail call to the rewritten stub.
@@ -4890,17 +4026,17 @@ void NameDictionaryLookupStub::GenerateNegativeLookup(MacroAssembler* masm,
// Capacity is smi 2^n.
__ SmiToInteger32(index, FieldOperand(properties, kCapacityOffset));
__ decl(index);
- __ and_(index,
+ __ andp(index,
Immediate(name->Hash() + NameDictionary::GetProbeOffset(i)));
// Scale the index by multiplying by the entry size.
ASSERT(NameDictionary::kEntrySize == 3);
- __ lea(index, Operand(index, index, times_2, 0)); // index *= 3.
+ __ leap(index, Operand(index, index, times_2, 0)); // index *= 3.
Register entity_name = r0;
// Having undefined at this place means the name is not contained.
ASSERT_EQ(kSmiTagSize, 1);
- __ movq(entity_name, Operand(properties,
+ __ movp(entity_name, Operand(properties,
index,
times_pointer_size,
kElementsStartOffset - kHeapObjectTag));
@@ -4917,17 +4053,18 @@ void NameDictionaryLookupStub::GenerateNegativeLookup(MacroAssembler* masm,
__ j(equal, &good, Label::kNear);
// Check if the entry name is not a unique name.
- __ movq(entity_name, FieldOperand(entity_name, HeapObject::kMapOffset));
+ __ movp(entity_name, FieldOperand(entity_name, HeapObject::kMapOffset));
__ JumpIfNotUniqueName(FieldOperand(entity_name, Map::kInstanceTypeOffset),
miss);
__ bind(&good);
}
- NameDictionaryLookupStub stub(properties, r0, r0, NEGATIVE_LOOKUP);
+ NameDictionaryLookupStub stub(masm->isolate(), properties, r0, r0,
+ NEGATIVE_LOOKUP);
__ Push(Handle<Object>(name));
- __ push(Immediate(name->Hash()));
+ __ Push(Immediate(name->Hash()));
__ CallStub(&stub);
- __ testq(r0, r0);
+ __ testp(r0, r0);
__ j(not_zero, miss);
__ jmp(done);
}
@@ -4961,26 +4098,27 @@ void NameDictionaryLookupStub::GeneratePositiveLookup(MacroAssembler* masm,
if (i > 0) {
__ addl(r1, Immediate(NameDictionary::GetProbeOffset(i)));
}
- __ and_(r1, r0);
+ __ andp(r1, r0);
// Scale the index by multiplying by the entry size.
ASSERT(NameDictionary::kEntrySize == 3);
- __ lea(r1, Operand(r1, r1, times_2, 0)); // r1 = r1 * 3
+ __ leap(r1, Operand(r1, r1, times_2, 0)); // r1 = r1 * 3
// Check if the key is identical to the name.
- __ cmpq(name, Operand(elements, r1, times_pointer_size,
+ __ cmpp(name, Operand(elements, r1, times_pointer_size,
kElementsStartOffset - kHeapObjectTag));
__ j(equal, done);
}
- NameDictionaryLookupStub stub(elements, r0, r1, POSITIVE_LOOKUP);
- __ push(name);
+ NameDictionaryLookupStub stub(masm->isolate(), elements, r0, r1,
+ POSITIVE_LOOKUP);
+ __ Push(name);
__ movl(r0, FieldOperand(name, Name::kHashFieldOffset));
__ shrl(r0, Immediate(Name::kHashShift));
- __ push(r0);
+ __ Push(r0);
__ CallStub(&stub);
- __ testq(r0, r0);
+ __ testp(r0, r0);
__ j(zero, miss);
__ jmp(done);
}
@@ -5007,7 +4145,7 @@ void NameDictionaryLookupStub::Generate(MacroAssembler* masm) {
__ SmiToInteger32(scratch, FieldOperand(dictionary_, kCapacityOffset));
__ decl(scratch);
- __ push(scratch);
+ __ Push(scratch);
// If names of slots in range from 1 to kProbes - 1 for the hash value are
// not equal to the name and kProbes-th slot is not used (its name is the
@@ -5018,27 +4156,27 @@ void NameDictionaryLookupStub::Generate(MacroAssembler* masm) {
kPointerSize);
for (int i = kInlinedProbes; i < kTotalProbes; i++) {
// Compute the masked index: (hash + i + i * i) & mask.
- __ movq(scratch, args.GetArgumentOperand(1));
+ __ movp(scratch, args.GetArgumentOperand(1));
if (i > 0) {
__ addl(scratch, Immediate(NameDictionary::GetProbeOffset(i)));
}
- __ and_(scratch, Operand(rsp, 0));
+ __ andp(scratch, Operand(rsp, 0));
// Scale the index by multiplying by the entry size.
ASSERT(NameDictionary::kEntrySize == 3);
- __ lea(index_, Operand(scratch, scratch, times_2, 0)); // index *= 3.
+ __ leap(index_, Operand(scratch, scratch, times_2, 0)); // index *= 3.
// Having undefined at this place means the name is not contained.
- __ movq(scratch, Operand(dictionary_,
+ __ movp(scratch, Operand(dictionary_,
index_,
times_pointer_size,
kElementsStartOffset - kHeapObjectTag));
- __ Cmp(scratch, masm->isolate()->factory()->undefined_value());
+ __ Cmp(scratch, isolate()->factory()->undefined_value());
__ j(equal, &not_in_dictionary);
// Stop if found the property.
- __ cmpq(scratch, args.GetArgumentOperand(0));
+ __ cmpp(scratch, args.GetArgumentOperand(0));
__ j(equal, &in_dictionary);
if (i != kTotalProbes - 1 && mode_ == NEGATIVE_LOOKUP) {
@@ -5047,7 +4185,7 @@ void NameDictionaryLookupStub::Generate(MacroAssembler* masm) {
// key we are looking for.
// Check if the entry name is not a unique name.
- __ movq(scratch, FieldOperand(scratch, HeapObject::kMapOffset));
+ __ movp(scratch, FieldOperand(scratch, HeapObject::kMapOffset));
__ JumpIfNotUniqueName(FieldOperand(scratch, Map::kInstanceTypeOffset),
&maybe_in_dictionary);
}
@@ -5058,18 +4196,18 @@ void NameDictionaryLookupStub::Generate(MacroAssembler* masm) {
// treated as a lookup success. For positive lookup probing failure
// should be treated as lookup failure.
if (mode_ == POSITIVE_LOOKUP) {
- __ movq(scratch, Immediate(0));
+ __ movp(scratch, Immediate(0));
__ Drop(1);
__ ret(2 * kPointerSize);
}
__ bind(&in_dictionary);
- __ movq(scratch, Immediate(1));
+ __ movp(scratch, Immediate(1));
__ Drop(1);
__ ret(2 * kPointerSize);
__ bind(&not_in_dictionary);
- __ movq(scratch, Immediate(0));
+ __ movp(scratch, Immediate(0));
__ Drop(1);
__ ret(2 * kPointerSize);
}
@@ -5077,15 +4215,10 @@ void NameDictionaryLookupStub::Generate(MacroAssembler* masm) {
void StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime(
Isolate* isolate) {
- StoreBufferOverflowStub stub1(kDontSaveFPRegs);
- stub1.GetCode(isolate);
- StoreBufferOverflowStub stub2(kSaveFPRegs);
- stub2.GetCode(isolate);
-}
-
-
-bool CodeStub::CanUseFPRegisters() {
- return true; // Always have SSE2 on x64.
+ StoreBufferOverflowStub stub1(isolate, kDontSaveFPRegs);
+ stub1.GetCode();
+ StoreBufferOverflowStub stub2(isolate, kSaveFPRegs);
+ stub2.GetCode();
}
@@ -5134,7 +4267,7 @@ void RecordWriteStub::GenerateIncremental(MacroAssembler* masm, Mode mode) {
if (remembered_set_action_ == EMIT_REMEMBERED_SET) {
Label dont_need_remembered_set;
- __ movq(regs_.scratch0(), Operand(regs_.address(), 0));
+ __ movp(regs_.scratch0(), Operand(regs_.address(), 0));
__ JumpIfNotInNewSpace(regs_.scratch0(),
regs_.scratch0(),
&dont_need_remembered_set);
@@ -5149,7 +4282,7 @@ void RecordWriteStub::GenerateIncremental(MacroAssembler* masm, Mode mode) {
// remembered set.
CheckNeedsToInformIncrementalMarker(
masm, kUpdateRememberedSetOnNoNeedToInformIncrementalMarker, mode);
- InformIncrementalMarker(masm, mode);
+ InformIncrementalMarker(masm);
regs_.Restore(masm);
__ RememberedSetHelper(object_,
address_,
@@ -5162,13 +4295,13 @@ void RecordWriteStub::GenerateIncremental(MacroAssembler* masm, Mode mode) {
CheckNeedsToInformIncrementalMarker(
masm, kReturnOnNoNeedToInformIncrementalMarker, mode);
- InformIncrementalMarker(masm, mode);
+ InformIncrementalMarker(masm);
regs_.Restore(masm);
__ ret(0);
}
-void RecordWriteStub::InformIncrementalMarker(MacroAssembler* masm, Mode mode) {
+void RecordWriteStub::InformIncrementalMarker(MacroAssembler* masm) {
regs_.SaveCallerSaveRegisters(masm, save_fp_regs_mode_);
Register address =
arg_reg_1.is(regs_.address()) ? kScratchRegister : regs_.address();
@@ -5179,23 +4312,14 @@ void RecordWriteStub::InformIncrementalMarker(MacroAssembler* masm, Mode mode) {
// TODO(gc) Can we just set address arg2 in the beginning?
__ Move(arg_reg_2, address);
__ LoadAddress(arg_reg_3,
- ExternalReference::isolate_address(masm->isolate()));
+ ExternalReference::isolate_address(isolate()));
int argument_count = 3;
AllowExternalCallThatCantCauseGC scope(masm);
__ PrepareCallCFunction(argument_count);
- if (mode == INCREMENTAL_COMPACTION) {
- __ CallCFunction(
- ExternalReference::incremental_evacuation_record_write_function(
- masm->isolate()),
- argument_count);
- } else {
- ASSERT(mode == INCREMENTAL);
- __ CallCFunction(
- ExternalReference::incremental_marking_record_write_function(
- masm->isolate()),
- argument_count);
- }
+ __ CallCFunction(
+ ExternalReference::incremental_marking_record_write_function(isolate()),
+ argument_count);
regs_.RestoreCallerSaveRegisters(masm, save_fp_regs_mode_);
}
@@ -5208,13 +4332,13 @@ void RecordWriteStub::CheckNeedsToInformIncrementalMarker(
Label need_incremental;
Label need_incremental_pop_object;
- __ movq(regs_.scratch0(), Immediate(~Page::kPageAlignmentMask));
- __ and_(regs_.scratch0(), regs_.object());
- __ movq(regs_.scratch1(),
+ __ movp(regs_.scratch0(), Immediate(~Page::kPageAlignmentMask));
+ __ andp(regs_.scratch0(), regs_.object());
+ __ movp(regs_.scratch1(),
Operand(regs_.scratch0(),
MemoryChunk::kWriteBarrierCounterOffset));
- __ subq(regs_.scratch1(), Immediate(1));
- __ movq(Operand(regs_.scratch0(),
+ __ subp(regs_.scratch1(), Immediate(1));
+ __ movp(Operand(regs_.scratch0(),
MemoryChunk::kWriteBarrierCounterOffset),
regs_.scratch1());
__ j(negative, &need_incremental);
@@ -5241,7 +4365,7 @@ void RecordWriteStub::CheckNeedsToInformIncrementalMarker(
__ bind(&on_black);
// Get the value from the slot.
- __ movq(regs_.scratch0(), Operand(regs_.address(), 0));
+ __ movp(regs_.scratch0(), Operand(regs_.address(), 0));
if (mode == INCREMENTAL_COMPACTION) {
Label ensure_not_white;
@@ -5264,13 +4388,13 @@ void RecordWriteStub::CheckNeedsToInformIncrementalMarker(
// We need an extra register for this, so we push the object register
// temporarily.
- __ push(regs_.object());
+ __ Push(regs_.object());
__ EnsureNotWhite(regs_.scratch0(), // The value.
regs_.scratch1(), // Scratch.
regs_.object(), // Scratch.
&need_incremental_pop_object,
Label::kNear);
- __ pop(regs_.object());
+ __ Pop(regs_.object());
regs_.Restore(masm);
if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) {
@@ -5284,7 +4408,7 @@ void RecordWriteStub::CheckNeedsToInformIncrementalMarker(
}
__ bind(&need_incremental_pop_object);
- __ pop(regs_.object());
+ __ Pop(regs_.object());
__ bind(&need_incremental);
@@ -5310,9 +4434,9 @@ void StoreArrayLiteralElementStub::Generate(MacroAssembler* masm) {
// Get array literal index, array literal and its map.
StackArgumentsAccessor args(rsp, 2, ARGUMENTS_DONT_CONTAIN_RECEIVER);
- __ movq(rdx, args.GetArgumentOperand(1));
- __ movq(rbx, args.GetArgumentOperand(0));
- __ movq(rdi, FieldOperand(rbx, JSObject::kMapOffset));
+ __ movp(rdx, args.GetArgumentOperand(1));
+ __ movp(rbx, args.GetArgumentOperand(0));
+ __ movp(rdi, FieldOperand(rbx, JSObject::kMapOffset));
__ CheckFastElements(rdi, &double_elements);
@@ -5325,22 +4449,22 @@ void StoreArrayLiteralElementStub::Generate(MacroAssembler* masm) {
__ bind(&slow_elements);
__ PopReturnAddressTo(rdi);
- __ push(rbx);
- __ push(rcx);
- __ push(rax);
- __ movq(rbx, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
- __ push(FieldOperand(rbx, JSFunction::kLiteralsOffset));
- __ push(rdx);
+ __ Push(rbx);
+ __ Push(rcx);
+ __ Push(rax);
+ __ movp(rbx, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
+ __ Push(FieldOperand(rbx, JSFunction::kLiteralsOffset));
+ __ Push(rdx);
__ PushReturnAddressFrom(rdi);
__ TailCallRuntime(Runtime::kStoreArrayLiteralElement, 5, 1);
// Array literal has ElementsKind of FAST_*_ELEMENTS and value is an object.
__ bind(&fast_elements);
__ SmiToInteger32(kScratchRegister, rcx);
- __ movq(rbx, FieldOperand(rbx, JSObject::kElementsOffset));
- __ lea(rcx, FieldOperand(rbx, kScratchRegister, times_pointer_size,
+ __ movp(rbx, FieldOperand(rbx, JSObject::kElementsOffset));
+ __ leap(rcx, FieldOperand(rbx, kScratchRegister, times_pointer_size,
FixedArrayBase::kHeaderSize));
- __ movq(Operand(rcx, 0), rax);
+ __ movp(Operand(rcx, 0), rax);
// Update the write barrier for the array store.
__ RecordWrite(rbx, rcx, rax,
kDontSaveFPRegs,
@@ -5352,15 +4476,15 @@ void StoreArrayLiteralElementStub::Generate(MacroAssembler* masm) {
// FAST_*_ELEMENTS, and value is Smi.
__ bind(&smi_element);
__ SmiToInteger32(kScratchRegister, rcx);
- __ movq(rbx, FieldOperand(rbx, JSObject::kElementsOffset));
- __ movq(FieldOperand(rbx, kScratchRegister, times_pointer_size,
+ __ movp(rbx, FieldOperand(rbx, JSObject::kElementsOffset));
+ __ movp(FieldOperand(rbx, kScratchRegister, times_pointer_size,
FixedArrayBase::kHeaderSize), rax);
__ ret(0);
// Array literal has ElementsKind of FAST_DOUBLE_ELEMENTS.
__ bind(&double_elements);
- __ movq(r9, FieldOperand(rbx, JSObject::kElementsOffset));
+ __ movp(r9, FieldOperand(rbx, JSObject::kElementsOffset));
__ SmiToInteger32(r11, rcx);
__ StoreNumberToDoubleElements(rax,
r9,
@@ -5372,42 +4496,24 @@ void StoreArrayLiteralElementStub::Generate(MacroAssembler* masm) {
void StubFailureTrampolineStub::Generate(MacroAssembler* masm) {
- CEntryStub ces(1, fp_registers_ ? kSaveFPRegs : kDontSaveFPRegs);
- __ Call(ces.GetCode(masm->isolate()), RelocInfo::CODE_TARGET);
+ CEntryStub ces(isolate(), 1, kSaveFPRegs);
+ __ Call(ces.GetCode(), RelocInfo::CODE_TARGET);
int parameter_count_offset =
StubFailureTrampolineFrame::kCallerStackParameterCountFrameOffset;
- __ movq(rbx, MemOperand(rbp, parameter_count_offset));
+ __ movp(rbx, MemOperand(rbp, parameter_count_offset));
masm->LeaveFrame(StackFrame::STUB_FAILURE_TRAMPOLINE);
__ PopReturnAddressTo(rcx);
int additional_offset = function_mode_ == JS_FUNCTION_STUB_MODE
? kPointerSize
: 0;
- __ lea(rsp, MemOperand(rsp, rbx, times_pointer_size, additional_offset));
+ __ leap(rsp, MemOperand(rsp, rbx, times_pointer_size, additional_offset));
__ jmp(rcx); // Return to IC Miss stub, continuation still on stack.
}
-void StubFailureTailCallTrampolineStub::Generate(MacroAssembler* masm) {
- CEntryStub ces(1, fp_registers_ ? kSaveFPRegs : kDontSaveFPRegs);
- __ Call(ces.GetCode(masm->isolate()), RelocInfo::CODE_TARGET);
- __ movq(rdi, rax);
- int parameter_count_offset =
- StubFailureTrampolineFrame::kCallerStackParameterCountFrameOffset;
- __ movq(rax, MemOperand(rbp, parameter_count_offset));
- // The parameter count above includes the receiver for the arguments passed to
- // the deoptimization handler. Subtract the receiver for the parameter count
- // for the call.
- __ subl(rax, Immediate(1));
- masm->LeaveFrame(StackFrame::STUB_FAILURE_TRAMPOLINE);
- ParameterCount argument_count(rax);
- __ InvokeFunction(
- rdi, argument_count, JUMP_FUNCTION, NullCallWrapper(), CALL_AS_METHOD);
-}
-
-
void ProfileEntryHookStub::MaybeCallEntryHook(MacroAssembler* masm) {
if (masm->isolate()->function_entry_hook() != NULL) {
- ProfileEntryHookStub stub;
+ ProfileEntryHookStub stub(masm->isolate());
masm->CallStub(&stub);
}
}
@@ -5417,22 +4523,23 @@ void ProfileEntryHookStub::Generate(MacroAssembler* masm) {
// This stub can be called from essentially anywhere, so it needs to save
// all volatile and callee-save registers.
const size_t kNumSavedRegisters = 2;
- __ push(arg_reg_1);
- __ push(arg_reg_2);
+ __ pushq(arg_reg_1);
+ __ pushq(arg_reg_2);
// Calculate the original stack pointer and store it in the second arg.
- __ lea(arg_reg_2, Operand(rsp, (kNumSavedRegisters + 1) * kPointerSize));
+ __ leap(arg_reg_2,
+ Operand(rsp, kNumSavedRegisters * kRegisterSize + kPCOnStackSize));
// Calculate the function address to the first arg.
- __ movq(arg_reg_1, Operand(rsp, kNumSavedRegisters * kPointerSize));
- __ subq(arg_reg_1, Immediate(Assembler::kShortCallInstructionLength));
+ __ movp(arg_reg_1, Operand(rsp, kNumSavedRegisters * kRegisterSize));
+ __ subp(arg_reg_1, Immediate(Assembler::kShortCallInstructionLength));
// Save the remainder of the volatile registers.
masm->PushCallerSaved(kSaveFPRegs, arg_reg_1, arg_reg_2);
// Call the entry hook function.
- __ movq(rax, FUNCTION_ADDR(masm->isolate()->function_entry_hook()),
- RelocInfo::NONE64);
+ __ Move(rax, FUNCTION_ADDR(isolate()->function_entry_hook()),
+ Assembler::RelocInfoNone());
AllowExternalCallThatCantCauseGC scope(masm);
@@ -5442,8 +4549,8 @@ void ProfileEntryHookStub::Generate(MacroAssembler* masm) {
// Restore volatile regs.
masm->PopCallerSaved(kSaveFPRegs, arg_reg_1, arg_reg_2);
- __ pop(arg_reg_2);
- __ pop(arg_reg_1);
+ __ popq(arg_reg_2);
+ __ popq(arg_reg_1);
__ Ret();
}
@@ -5453,9 +4560,7 @@ template<class T>
static void CreateArrayDispatch(MacroAssembler* masm,
AllocationSiteOverrideMode mode) {
if (mode == DISABLE_ALLOCATION_SITES) {
- T stub(GetInitialFastElementsKind(),
- CONTEXT_CHECK_REQUIRED,
- mode);
+ T stub(masm->isolate(), GetInitialFastElementsKind(), mode);
__ TailCallStub(&stub);
} else if (mode == DONT_OVERRIDE) {
int last_index = GetSequenceIndexFromFastElementsKind(
@@ -5465,7 +4570,7 @@ static void CreateArrayDispatch(MacroAssembler* masm,
ElementsKind kind = GetFastElementsKindFromSequenceIndex(i);
__ cmpl(rdx, Immediate(kind));
__ j(not_equal, &next);
- T stub(kind);
+ T stub(masm->isolate(), kind);
__ TailCallStub(&stub);
__ bind(&next);
}
@@ -5480,7 +4585,7 @@ static void CreateArrayDispatch(MacroAssembler* masm,
static void CreateArrayDispatchOneArgument(MacroAssembler* masm,
AllocationSiteOverrideMode mode) {
- // rbx - type info cell (if mode != DISABLE_ALLOCATION_SITES)
+ // rbx - allocation site (if mode != DISABLE_ALLOCATION_SITES)
// rdx - kind (if mode != DISABLE_ALLOCATION_SITES)
// rax - number of arguments
// rdi - constructor?
@@ -5506,41 +4611,41 @@ static void CreateArrayDispatchOneArgument(MacroAssembler* masm,
// look at the first argument
StackArgumentsAccessor args(rsp, 1, ARGUMENTS_DONT_CONTAIN_RECEIVER);
- __ movq(rcx, args.GetArgumentOperand(0));
- __ testq(rcx, rcx);
+ __ movp(rcx, args.GetArgumentOperand(0));
+ __ testp(rcx, rcx);
__ j(zero, &normal_sequence);
if (mode == DISABLE_ALLOCATION_SITES) {
ElementsKind initial = GetInitialFastElementsKind();
ElementsKind holey_initial = GetHoleyElementsKind(initial);
- ArraySingleArgumentConstructorStub stub_holey(holey_initial,
- CONTEXT_CHECK_REQUIRED,
+ ArraySingleArgumentConstructorStub stub_holey(masm->isolate(),
+ holey_initial,
DISABLE_ALLOCATION_SITES);
__ TailCallStub(&stub_holey);
__ bind(&normal_sequence);
- ArraySingleArgumentConstructorStub stub(initial,
- CONTEXT_CHECK_REQUIRED,
+ ArraySingleArgumentConstructorStub stub(masm->isolate(),
+ initial,
DISABLE_ALLOCATION_SITES);
__ TailCallStub(&stub);
} else if (mode == DONT_OVERRIDE) {
// We are going to create a holey array, but our kind is non-holey.
- // Fix kind and retry (only if we have an allocation site in the cell).
+ // Fix kind and retry (only if we have an allocation site in the slot).
__ incl(rdx);
- __ movq(rcx, FieldOperand(rbx, Cell::kValueOffset));
+
if (FLAG_debug_code) {
Handle<Map> allocation_site_map =
masm->isolate()->factory()->allocation_site_map();
- __ Cmp(FieldOperand(rcx, 0), allocation_site_map);
- __ Assert(equal, kExpectedAllocationSiteInCell);
+ __ Cmp(FieldOperand(rbx, 0), allocation_site_map);
+ __ Assert(equal, kExpectedAllocationSite);
}
// Save the resulting elements kind in type info. We can't just store r3
// in the AllocationSite::transition_info field because elements kind is
// restricted to a portion of the field...upper bits need to be left alone.
STATIC_ASSERT(AllocationSite::ElementsKindBits::kShift == 0);
- __ SmiAddConstant(FieldOperand(rcx, AllocationSite::kTransitionInfoOffset),
+ __ SmiAddConstant(FieldOperand(rbx, AllocationSite::kTransitionInfoOffset),
Smi::FromInt(kFastElementsKindPackedToHoley));
__ bind(&normal_sequence);
@@ -5551,7 +4656,7 @@ static void CreateArrayDispatchOneArgument(MacroAssembler* masm,
ElementsKind kind = GetFastElementsKindFromSequenceIndex(i);
__ cmpl(rdx, Immediate(kind));
__ j(not_equal, &next);
- ArraySingleArgumentConstructorStub stub(kind);
+ ArraySingleArgumentConstructorStub stub(masm->isolate(), kind);
__ TailCallStub(&stub);
__ bind(&next);
}
@@ -5566,20 +4671,15 @@ static void CreateArrayDispatchOneArgument(MacroAssembler* masm,
template<class T>
static void ArrayConstructorStubAheadOfTimeHelper(Isolate* isolate) {
- ElementsKind initial_kind = GetInitialFastElementsKind();
- ElementsKind initial_holey_kind = GetHoleyElementsKind(initial_kind);
-
int to_index = GetSequenceIndexFromFastElementsKind(
TERMINAL_FAST_ELEMENTS_KIND);
for (int i = 0; i <= to_index; ++i) {
ElementsKind kind = GetFastElementsKindFromSequenceIndex(i);
- T stub(kind);
- stub.GetCode(isolate);
- if (AllocationSite::GetMode(kind) != DONT_TRACK_ALLOCATION_SITE ||
- (!FLAG_track_allocation_sites &&
- (kind == initial_kind || kind == initial_holey_kind))) {
- T stub1(kind, CONTEXT_CHECK_REQUIRED, DISABLE_ALLOCATION_SITES);
- stub1.GetCode(isolate);
+ T stub(isolate, kind);
+ stub.GetCode();
+ if (AllocationSite::GetMode(kind) != DONT_TRACK_ALLOCATION_SITE) {
+ T stub1(isolate, kind, DISABLE_ALLOCATION_SITES);
+ stub1.GetCode();
}
}
}
@@ -5600,12 +4700,12 @@ void InternalArrayConstructorStubBase::GenerateStubsAheadOfTime(
ElementsKind kinds[2] = { FAST_ELEMENTS, FAST_HOLEY_ELEMENTS };
for (int i = 0; i < 2; i++) {
// For internal arrays we only need a few things
- InternalArrayNoArgumentConstructorStub stubh1(kinds[i]);
- stubh1.GetCode(isolate);
- InternalArraySingleArgumentConstructorStub stubh2(kinds[i]);
- stubh2.GetCode(isolate);
- InternalArrayNArgumentsConstructorStub stubh3(kinds[i]);
- stubh3.GetCode(isolate);
+ InternalArrayNoArgumentConstructorStub stubh1(isolate, kinds[i]);
+ stubh1.GetCode();
+ InternalArraySingleArgumentConstructorStub stubh2(isolate, kinds[i]);
+ stubh2.GetCode();
+ InternalArrayNArgumentsConstructorStub stubh3(isolate, kinds[i]);
+ stubh3.GetCode();
}
}
@@ -5615,7 +4715,7 @@ void ArrayConstructorStub::GenerateDispatchToArrayStub(
AllocationSiteOverrideMode mode) {
if (argument_count_ == ANY) {
Label not_zero_case, not_one_case;
- __ testq(rax, rax);
+ __ testp(rax, rax);
__ j(not_zero, &not_zero_case);
CreateArrayDispatch<ArrayNoArgumentConstructorStub>(masm, mode);
@@ -5641,21 +4741,17 @@ void ArrayConstructorStub::GenerateDispatchToArrayStub(
void ArrayConstructorStub::Generate(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- rax : argc
- // -- rbx : type info cell
+ // -- rbx : AllocationSite or undefined
// -- rdi : constructor
// -- rsp[0] : return address
// -- rsp[8] : last argument
// -----------------------------------
- Handle<Object> undefined_sentinel(
- masm->isolate()->heap()->undefined_value(),
- masm->isolate());
-
if (FLAG_debug_code) {
// The array construct code is only set for the global and natives
// builtin Array functions which always have maps.
// Initial map for the builtin Array function should be a map.
- __ movq(rcx, FieldOperand(rdi, JSFunction::kPrototypeOrInitialMapOffset));
+ __ movp(rcx, FieldOperand(rdi, JSFunction::kPrototypeOrInitialMapOffset));
// Will both indicate a NULL and a Smi.
STATIC_ASSERT(kSmiTag == 0);
Condition not_smi = NegateCondition(masm->CheckSmi(rcx));
@@ -5663,31 +4759,21 @@ void ArrayConstructorStub::Generate(MacroAssembler* masm) {
__ CmpObjectType(rcx, MAP_TYPE, rcx);
__ Check(equal, kUnexpectedInitialMapForArrayFunction);
- // We should either have undefined in rbx or a valid cell
- Label okay_here;
- Handle<Map> cell_map = masm->isolate()->factory()->cell_map();
- __ Cmp(rbx, undefined_sentinel);
- __ j(equal, &okay_here);
- __ Cmp(FieldOperand(rbx, 0), cell_map);
- __ Assert(equal, kExpectedPropertyCellInRegisterRbx);
- __ bind(&okay_here);
+ // We should either have undefined in rbx or a valid AllocationSite
+ __ AssertUndefinedOrAllocationSite(rbx);
}
Label no_info;
- // If the type cell is undefined, or contains anything other than an
- // AllocationSite, call an array constructor that doesn't use AllocationSites.
- __ Cmp(rbx, undefined_sentinel);
+ // If the feedback vector is the undefined value call an array constructor
+ // that doesn't use AllocationSites.
+ __ CompareRoot(rbx, Heap::kUndefinedValueRootIndex);
__ j(equal, &no_info);
- __ movq(rdx, FieldOperand(rbx, Cell::kValueOffset));
- __ Cmp(FieldOperand(rdx, 0),
- masm->isolate()->factory()->allocation_site_map());
- __ j(not_equal, &no_info);
// Only look at the lower 16 bits of the transition info.
- __ movq(rdx, FieldOperand(rdx, AllocationSite::kTransitionInfoOffset));
+ __ movp(rdx, FieldOperand(rbx, AllocationSite::kTransitionInfoOffset));
__ SmiToInteger32(rdx, rdx);
STATIC_ASSERT(AllocationSite::ElementsKindBits::kShift == 0);
- __ and_(rdx, Immediate(AllocationSite::ElementsKindBits::kMask));
+ __ andp(rdx, Immediate(AllocationSite::ElementsKindBits::kMask));
GenerateDispatchToArrayStub(masm, DONT_OVERRIDE);
__ bind(&no_info);
@@ -5700,9 +4786,9 @@ void InternalArrayConstructorStub::GenerateCase(
Label not_zero_case, not_one_case;
Label normal_sequence;
- __ testq(rax, rax);
+ __ testp(rax, rax);
__ j(not_zero, &not_zero_case);
- InternalArrayNoArgumentConstructorStub stub0(kind);
+ InternalArrayNoArgumentConstructorStub stub0(isolate(), kind);
__ TailCallStub(&stub0);
__ bind(&not_zero_case);
@@ -5713,21 +4799,21 @@ void InternalArrayConstructorStub::GenerateCase(
// We might need to create a holey array
// look at the first argument
StackArgumentsAccessor args(rsp, 1, ARGUMENTS_DONT_CONTAIN_RECEIVER);
- __ movq(rcx, args.GetArgumentOperand(0));
- __ testq(rcx, rcx);
+ __ movp(rcx, args.GetArgumentOperand(0));
+ __ testp(rcx, rcx);
__ j(zero, &normal_sequence);
InternalArraySingleArgumentConstructorStub
- stub1_holey(GetHoleyElementsKind(kind));
+ stub1_holey(isolate(), GetHoleyElementsKind(kind));
__ TailCallStub(&stub1_holey);
}
__ bind(&normal_sequence);
- InternalArraySingleArgumentConstructorStub stub1(kind);
+ InternalArraySingleArgumentConstructorStub stub1(isolate(), kind);
__ TailCallStub(&stub1);
__ bind(&not_one_case);
- InternalArrayNArgumentsConstructorStub stubN(kind);
+ InternalArrayNArgumentsConstructorStub stubN(isolate(), kind);
__ TailCallStub(&stubN);
}
@@ -5735,7 +4821,6 @@ void InternalArrayConstructorStub::GenerateCase(
void InternalArrayConstructorStub::Generate(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- rax : argc
- // -- rbx : type info cell
// -- rdi : constructor
// -- rsp[0] : return address
// -- rsp[8] : last argument
@@ -5746,7 +4831,7 @@ void InternalArrayConstructorStub::Generate(MacroAssembler* masm) {
// builtin Array functions which always have maps.
// Initial map for the builtin Array function should be a map.
- __ movq(rcx, FieldOperand(rdi, JSFunction::kPrototypeOrInitialMapOffset));
+ __ movp(rcx, FieldOperand(rdi, JSFunction::kPrototypeOrInitialMapOffset));
// Will both indicate a NULL and a Smi.
STATIC_ASSERT(kSmiTag == 0);
Condition not_smi = NegateCondition(masm->CheckSmi(rcx));
@@ -5756,14 +4841,13 @@ void InternalArrayConstructorStub::Generate(MacroAssembler* masm) {
}
// Figure out the right elements kind
- __ movq(rcx, FieldOperand(rdi, JSFunction::kPrototypeOrInitialMapOffset));
+ __ movp(rcx, FieldOperand(rdi, JSFunction::kPrototypeOrInitialMapOffset));
// Load the map's "bit field 2" into |result|. We only need the first byte,
// but the following masking takes care of that anyway.
- __ movzxbq(rcx, FieldOperand(rcx, Map::kBitField2Offset));
+ __ movzxbp(rcx, FieldOperand(rcx, Map::kBitField2Offset));
// Retrieve elements_kind from bit field 2.
- __ and_(rcx, Immediate(Map::kElementsKindMask));
- __ shr(rcx, Immediate(Map::kElementsKindShift));
+ __ DecodeField<Map::ElementsKindBits>(rcx);
if (FLAG_debug_code) {
Label done;
@@ -5785,6 +4869,185 @@ void InternalArrayConstructorStub::Generate(MacroAssembler* masm) {
}
+void CallApiFunctionStub::Generate(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- rax : callee
+ // -- rbx : call_data
+ // -- rcx : holder
+ // -- rdx : api_function_address
+ // -- rsi : context
+ // --
+ // -- rsp[0] : return address
+ // -- rsp[8] : last argument
+ // -- ...
+ // -- rsp[argc * 8] : first argument
+ // -- rsp[(argc + 1) * 8] : receiver
+ // -----------------------------------
+
+ Register callee = rax;
+ Register call_data = rbx;
+ Register holder = rcx;
+ Register api_function_address = rdx;
+ Register return_address = rdi;
+ Register context = rsi;
+
+ int argc = ArgumentBits::decode(bit_field_);
+ bool is_store = IsStoreBits::decode(bit_field_);
+ bool call_data_undefined = CallDataUndefinedBits::decode(bit_field_);
+
+ typedef FunctionCallbackArguments FCA;
+
+ STATIC_ASSERT(FCA::kContextSaveIndex == 6);
+ STATIC_ASSERT(FCA::kCalleeIndex == 5);
+ STATIC_ASSERT(FCA::kDataIndex == 4);
+ STATIC_ASSERT(FCA::kReturnValueOffset == 3);
+ STATIC_ASSERT(FCA::kReturnValueDefaultValueIndex == 2);
+ STATIC_ASSERT(FCA::kIsolateIndex == 1);
+ STATIC_ASSERT(FCA::kHolderIndex == 0);
+ STATIC_ASSERT(FCA::kArgsLength == 7);
+
+ __ PopReturnAddressTo(return_address);
+
+ // context save
+ __ Push(context);
+ // load context from callee
+ __ movp(context, FieldOperand(callee, JSFunction::kContextOffset));
+
+ // callee
+ __ Push(callee);
+
+ // call data
+ __ Push(call_data);
+ Register scratch = call_data;
+ if (!call_data_undefined) {
+ __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
+ }
+ // return value
+ __ Push(scratch);
+ // return value default
+ __ Push(scratch);
+ // isolate
+ __ Move(scratch,
+ ExternalReference::isolate_address(isolate()));
+ __ Push(scratch);
+ // holder
+ __ Push(holder);
+
+ __ movp(scratch, rsp);
+ // Push return address back on stack.
+ __ PushReturnAddressFrom(return_address);
+
+ // Allocate the v8::Arguments structure in the arguments' space since
+ // it's not controlled by GC.
+ const int kApiStackSpace = 4;
+
+ __ PrepareCallApiFunction(kApiStackSpace);
+
+ // FunctionCallbackInfo::implicit_args_.
+ __ movp(StackSpaceOperand(0), scratch);
+ __ addp(scratch, Immediate((argc + FCA::kArgsLength - 1) * kPointerSize));
+ __ movp(StackSpaceOperand(1), scratch); // FunctionCallbackInfo::values_.
+ __ Set(StackSpaceOperand(2), argc); // FunctionCallbackInfo::length_.
+ // FunctionCallbackInfo::is_construct_call_.
+ __ Set(StackSpaceOperand(3), 0);
+
+#if defined(__MINGW64__) || defined(_WIN64)
+ Register arguments_arg = rcx;
+ Register callback_arg = rdx;
+#else
+ Register arguments_arg = rdi;
+ Register callback_arg = rsi;
+#endif
+
+ // It's okay if api_function_address == callback_arg
+ // but not arguments_arg
+ ASSERT(!api_function_address.is(arguments_arg));
+
+ // v8::InvocationCallback's argument.
+ __ leap(arguments_arg, StackSpaceOperand(0));
+
+ ExternalReference thunk_ref =
+ ExternalReference::invoke_function_callback(isolate());
+
+ // Accessor for FunctionCallbackInfo and first js arg.
+ StackArgumentsAccessor args_from_rbp(rbp, FCA::kArgsLength + 1,
+ ARGUMENTS_DONT_CONTAIN_RECEIVER);
+ Operand context_restore_operand = args_from_rbp.GetArgumentOperand(
+ FCA::kArgsLength - FCA::kContextSaveIndex);
+ // Stores return the first js argument
+ Operand return_value_operand = args_from_rbp.GetArgumentOperand(
+ is_store ? 0 : FCA::kArgsLength - FCA::kReturnValueOffset);
+ __ CallApiFunctionAndReturn(
+ api_function_address,
+ thunk_ref,
+ callback_arg,
+ argc + FCA::kArgsLength + 1,
+ return_value_operand,
+ &context_restore_operand);
+}
+
+
+void CallApiGetterStub::Generate(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- rsp[0] : return address
+ // -- rsp[8] : name
+ // -- rsp[16 - kArgsLength*8] : PropertyCallbackArguments object
+ // -- ...
+ // -- r8 : api_function_address
+ // -----------------------------------
+
+#if defined(__MINGW64__) || defined(_WIN64)
+ Register getter_arg = r8;
+ Register accessor_info_arg = rdx;
+ Register name_arg = rcx;
+#else
+ Register getter_arg = rdx;
+ Register accessor_info_arg = rsi;
+ Register name_arg = rdi;
+#endif
+ Register api_function_address = r8;
+ Register scratch = rax;
+
+ // v8::Arguments::values_ and handler for name.
+ const int kStackSpace = PropertyCallbackArguments::kArgsLength + 1;
+
+ // Allocate v8::AccessorInfo in non-GCed stack space.
+ const int kArgStackSpace = 1;
+
+ __ leap(name_arg, Operand(rsp, kPCOnStackSize));
+
+ __ PrepareCallApiFunction(kArgStackSpace);
+ __ leap(scratch, Operand(name_arg, 1 * kPointerSize));
+
+ // v8::PropertyAccessorInfo::args_.
+ __ movp(StackSpaceOperand(0), scratch);
+
+ // The context register (rsi) has been saved in PrepareCallApiFunction and
+ // could be used to pass arguments.
+ __ leap(accessor_info_arg, StackSpaceOperand(0));
+
+ ExternalReference thunk_ref =
+ ExternalReference::invoke_accessor_getter_callback(isolate());
+
+ // It's okay if api_function_address == getter_arg
+ // but not accessor_info_arg or name_arg
+ ASSERT(!api_function_address.is(accessor_info_arg) &&
+ !api_function_address.is(name_arg));
+
+ // The name handler is counted as an argument.
+ StackArgumentsAccessor args(rbp, PropertyCallbackArguments::kArgsLength);
+ Operand return_value_operand = args.GetArgumentOperand(
+ PropertyCallbackArguments::kArgsLength - 1 -
+ PropertyCallbackArguments::kReturnValueOffset);
+ __ CallApiFunctionAndReturn(api_function_address,
+ thunk_ref,
+ getter_arg,
+ kStackSpace,
+ return_value_operand,
+ NULL);
+}
+
+
#undef __
} } // namespace v8::internal
diff --git a/chromium/v8/src/x64/code-stubs-x64.h b/chromium/v8/src/x64/code-stubs-x64.h
index 7a3f6a68691..7f9420c3bc2 100644
--- a/chromium/v8/src/x64/code-stubs-x64.h
+++ b/chromium/v8/src/x64/code-stubs-x64.h
@@ -1,35 +1,11 @@
// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
#ifndef V8_X64_CODE_STUBS_X64_H_
#define V8_X64_CODE_STUBS_X64_H_
-#include "ic-inl.h"
-#include "type-info.h"
+#include "src/ic-inl.h"
namespace v8 {
namespace internal {
@@ -37,35 +13,10 @@ namespace internal {
void ArrayNativeCode(MacroAssembler* masm, Label* call_generic_code);
-// Compute a transcendental math function natively, or call the
-// TranscendentalCache runtime function.
-class TranscendentalCacheStub: public PlatformCodeStub {
- public:
- enum ArgumentType {
- TAGGED = 0,
- UNTAGGED = 1 << TranscendentalCache::kTranscendentalTypeBits
- };
-
- explicit TranscendentalCacheStub(TranscendentalCache::Type type,
- ArgumentType argument_type)
- : type_(type), argument_type_(argument_type) {}
- void Generate(MacroAssembler* masm);
- static void GenerateOperation(MacroAssembler* masm,
- TranscendentalCache::Type type);
- private:
- TranscendentalCache::Type type_;
- ArgumentType argument_type_;
-
- Major MajorKey() { return TranscendentalCache; }
- int MinorKey() { return type_ | argument_type_; }
- Runtime::FunctionId RuntimeFunction();
-};
-
-
class StoreBufferOverflowStub: public PlatformCodeStub {
public:
- explicit StoreBufferOverflowStub(SaveFPRegsMode save_fp)
- : save_doubles_(save_fp) { }
+ StoreBufferOverflowStub(Isolate* isolate, SaveFPRegsMode save_fp)
+ : PlatformCodeStub(isolate), save_doubles_(save_fp) { }
void Generate(MacroAssembler* masm);
@@ -82,38 +33,15 @@ class StoreBufferOverflowStub: public PlatformCodeStub {
class StringHelper : public AllStatic {
public:
- // Generate code for copying characters using a simple loop. This should only
- // be used in places where the number of characters is small and the
- // additional setup and checking in GenerateCopyCharactersREP adds too much
- // overhead. Copying of overlapping regions is not supported.
+ // Generate code for copying characters using the rep movs instruction.
+ // Copies rcx characters from rsi to rdi. Copying of overlapping regions is
+ // not supported.
static void GenerateCopyCharacters(MacroAssembler* masm,
Register dest,
Register src,
Register count,
- bool ascii);
+ String::Encoding encoding);
- // Generate code for copying characters using the rep movs instruction.
- // Copies rcx characters from rsi to rdi. Copying of overlapping regions is
- // not supported.
- static void GenerateCopyCharactersREP(MacroAssembler* masm,
- Register dest, // Must be rdi.
- Register src, // Must be rsi.
- Register count, // Must be rcx.
- bool ascii);
-
-
- // Probe the string table for a two character string. If the string is
- // not found by probing a jump to the label not_found is performed. This jump
- // does not guarantee that the string is not in the string table. If the
- // string is found the code falls through with the string in register rax.
- static void GenerateTwoCharacterStringTableProbe(MacroAssembler* masm,
- Register c1,
- Register c2,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Register scratch4,
- Label* not_found);
// Generate string hash.
static void GenerateHashInit(MacroAssembler* masm,
@@ -133,34 +61,9 @@ class StringHelper : public AllStatic {
};
-class StringAddStub: public PlatformCodeStub {
- public:
- explicit StringAddStub(StringAddFlags flags) : flags_(flags) {}
-
- private:
- Major MajorKey() { return StringAdd; }
- int MinorKey() { return flags_; }
-
- void Generate(MacroAssembler* masm);
-
- void GenerateConvertArgument(MacroAssembler* masm,
- int stack_offset,
- Register arg,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Label* slow);
-
- void GenerateRegisterArgsPush(MacroAssembler* masm);
- void GenerateRegisterArgsPop(MacroAssembler* masm, Register temp);
-
- const StringAddFlags flags_;
-};
-
-
class SubStringStub: public PlatformCodeStub {
public:
- SubStringStub() {}
+ explicit SubStringStub(Isolate* isolate) : PlatformCodeStub(isolate) {}
private:
Major MajorKey() { return SubString; }
@@ -172,7 +75,7 @@ class SubStringStub: public PlatformCodeStub {
class StringCompareStub: public PlatformCodeStub {
public:
- StringCompareStub() {}
+ explicit StringCompareStub(Isolate* isolate) : PlatformCodeStub(isolate) {}
// Compares two flat ASCII strings and returns result in rax.
static void GenerateCompareFlatAsciiStrings(MacroAssembler* masm,
@@ -211,11 +114,16 @@ class NameDictionaryLookupStub: public PlatformCodeStub {
public:
enum LookupMode { POSITIVE_LOOKUP, NEGATIVE_LOOKUP };
- NameDictionaryLookupStub(Register dictionary,
+ NameDictionaryLookupStub(Isolate* isolate,
+ Register dictionary,
Register result,
Register index,
LookupMode mode)
- : dictionary_(dictionary), result_(result), index_(index), mode_(mode) { }
+ : PlatformCodeStub(isolate),
+ dictionary_(dictionary),
+ result_(result),
+ index_(index),
+ mode_(mode) { }
void Generate(MacroAssembler* masm);
@@ -271,12 +179,14 @@ class NameDictionaryLookupStub: public PlatformCodeStub {
class RecordWriteStub: public PlatformCodeStub {
public:
- RecordWriteStub(Register object,
+ RecordWriteStub(Isolate* isolate,
+ Register object,
Register value,
Register address,
RememberedSetAction remembered_set_action,
SaveFPRegsMode fp_mode)
- : object_(object),
+ : PlatformCodeStub(isolate),
+ object_(object),
value_(value),
address_(address),
remembered_set_action_(remembered_set_action),
@@ -379,20 +289,20 @@ class RecordWriteStub: public PlatformCodeStub {
// We don't have to save scratch0_orig_ because it was given to us as
// a scratch register. But if we had to switch to a different reg then
// we should save the new scratch0_.
- if (!scratch0_.is(scratch0_orig_)) masm->push(scratch0_);
+ if (!scratch0_.is(scratch0_orig_)) masm->Push(scratch0_);
if (!rcx.is(scratch0_orig_) &&
!rcx.is(object_orig_) &&
!rcx.is(address_orig_)) {
- masm->push(rcx);
+ masm->Push(rcx);
}
- masm->push(scratch1_);
+ masm->Push(scratch1_);
if (!address_.is(address_orig_)) {
- masm->push(address_);
- masm->movq(address_, address_orig_);
+ masm->Push(address_);
+ masm->movp(address_, address_orig_);
}
if (!object_.is(object_orig_)) {
- masm->push(object_);
- masm->movq(object_, object_orig_);
+ masm->Push(object_);
+ masm->movp(object_, object_orig_);
}
}
@@ -401,20 +311,20 @@ class RecordWriteStub: public PlatformCodeStub {
// them back. Only in one case is the orig_ reg different from the plain
// one, since only one of them can alias with rcx.
if (!object_.is(object_orig_)) {
- masm->movq(object_orig_, object_);
- masm->pop(object_);
+ masm->movp(object_orig_, object_);
+ masm->Pop(object_);
}
if (!address_.is(address_orig_)) {
- masm->movq(address_orig_, address_);
- masm->pop(address_);
+ masm->movp(address_orig_, address_);
+ masm->Pop(address_);
}
- masm->pop(scratch1_);
+ masm->Pop(scratch1_);
if (!rcx.is(scratch0_orig_) &&
!rcx.is(object_orig_) &&
!rcx.is(address_orig_)) {
- masm->pop(rcx);
+ masm->Pop(rcx);
}
- if (!scratch0_.is(scratch0_orig_)) masm->pop(scratch0_);
+ if (!scratch0_.is(scratch0_orig_)) masm->Pop(scratch0_);
}
// If we have to call into C then we need to save and restore all caller-
@@ -475,7 +385,7 @@ class RecordWriteStub: public PlatformCodeStub {
MacroAssembler* masm,
OnNoNeedToInformIncrementalMarker on_no_need,
Mode mode);
- void InformIncrementalMarker(MacroAssembler* masm, Mode mode);
+ void InformIncrementalMarker(MacroAssembler* masm);
Major MajorKey() { return RecordWrite; }
diff --git a/chromium/v8/src/x64/codegen-x64.cc b/chromium/v8/src/x64/codegen-x64.cc
index afe0e3b7f52..0f939d98dff 100644
--- a/chromium/v8/src/x64/codegen-x64.cc
+++ b/chromium/v8/src/x64/codegen-x64.cc
@@ -1,36 +1,13 @@
// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
#if V8_TARGET_ARCH_X64
-#include "codegen.h"
-#include "macro-assembler.h"
+#include "src/codegen.h"
+#include "src/macro-assembler.h"
namespace v8 {
namespace internal {
@@ -55,68 +32,24 @@ void StubRuntimeCallHelper::AfterCall(MacroAssembler* masm) const {
#define __ masm.
-UnaryMathFunction CreateTranscendentalFunction(TranscendentalCache::Type type) {
- size_t actual_size;
- // Allocate buffer in executable space.
- byte* buffer = static_cast<byte*>(OS::Allocate(1 * KB,
- &actual_size,
- true));
- if (buffer == NULL) {
- // Fallback to library function if function cannot be created.
- switch (type) {
- case TranscendentalCache::SIN: return &sin;
- case TranscendentalCache::COS: return &cos;
- case TranscendentalCache::TAN: return &tan;
- case TranscendentalCache::LOG: return &log;
- default: UNIMPLEMENTED();
- }
- }
-
- MacroAssembler masm(NULL, buffer, static_cast<int>(actual_size));
- // xmm0: raw double input.
- // Move double input into registers.
- __ push(rbx);
- __ push(rdi);
- __ movq(rbx, xmm0);
- __ push(rbx);
- __ fld_d(Operand(rsp, 0));
- TranscendentalCacheStub::GenerateOperation(&masm, type);
- // The return value is expected to be in xmm0.
- __ fstp_d(Operand(rsp, 0));
- __ pop(rbx);
- __ movq(xmm0, rbx);
- __ pop(rdi);
- __ pop(rbx);
- __ Ret();
-
- CodeDesc desc;
- masm.GetCode(&desc);
- ASSERT(!RelocInfo::RequiresRelocation(desc));
-
- CPU::FlushICache(buffer, actual_size);
- OS::ProtectCode(buffer, actual_size);
- return FUNCTION_CAST<UnaryMathFunction>(buffer);
-}
-
-
UnaryMathFunction CreateExpFunction() {
- if (!FLAG_fast_math) return &exp;
+ if (!FLAG_fast_math) return &std::exp;
size_t actual_size;
byte* buffer = static_cast<byte*>(OS::Allocate(1 * KB, &actual_size, true));
- if (buffer == NULL) return &exp;
+ if (buffer == NULL) return &std::exp;
ExternalReference::InitializeMathExpData();
MacroAssembler masm(NULL, buffer, static_cast<int>(actual_size));
// xmm0: raw double input.
XMMRegister input = xmm0;
XMMRegister result = xmm1;
- __ push(rax);
- __ push(rbx);
+ __ pushq(rax);
+ __ pushq(rbx);
MathExpGenerator::EmitMathExp(&masm, input, result, xmm2, rax, rbx);
- __ pop(rbx);
- __ pop(rax);
+ __ popq(rbx);
+ __ popq(rax);
__ movsd(xmm0, result);
__ Ret();
@@ -136,7 +69,7 @@ UnaryMathFunction CreateSqrtFunction() {
byte* buffer = static_cast<byte*>(OS::Allocate(1 * KB,
&actual_size,
true));
- if (buffer == NULL) return &sqrt;
+ if (buffer == NULL) return &std::sqrt;
MacroAssembler masm(NULL, buffer, static_cast<int>(actual_size));
// xmm0: raw double input.
@@ -175,10 +108,10 @@ ModuloFunction CreateModuloFunction() {
// Compute x mod y.
// Load y and x (use argument backing store as temporary storage).
- __ movsd(Operand(rsp, kPointerSize * 2), xmm1);
- __ movsd(Operand(rsp, kPointerSize), xmm0);
- __ fld_d(Operand(rsp, kPointerSize * 2));
- __ fld_d(Operand(rsp, kPointerSize));
+ __ movsd(Operand(rsp, kRegisterSize * 2), xmm1);
+ __ movsd(Operand(rsp, kRegisterSize), xmm0);
+ __ fld_d(Operand(rsp, kRegisterSize * 2));
+ __ fld_d(Operand(rsp, kRegisterSize));
// Clear exception flags before operation.
{
@@ -214,14 +147,14 @@ ModuloFunction CreateModuloFunction() {
__ fstp(0); // Drop result in st(0).
int64_t kNaNValue = V8_INT64_C(0x7ff8000000000000);
__ movq(rcx, kNaNValue);
- __ movq(Operand(rsp, kPointerSize), rcx);
- __ movsd(xmm0, Operand(rsp, kPointerSize));
+ __ movq(Operand(rsp, kRegisterSize), rcx);
+ __ movsd(xmm0, Operand(rsp, kRegisterSize));
__ jmp(&return_result);
// If result is valid, return that.
__ bind(&valid_result);
- __ fstp_d(Operand(rsp, kPointerSize));
- __ movsd(xmm0, Operand(rsp, kPointerSize));
+ __ fstp_d(Operand(rsp, kRegisterSize));
+ __ movsd(xmm0, Operand(rsp, kRegisterSize));
// Clean up FPU stack and exceptions and return xmm0
__ bind(&return_result);
@@ -267,7 +200,7 @@ void ElementsTransitionGenerator::GenerateMapChangeElementsTransition(
}
// Set transitioned map.
- __ movq(FieldOperand(rdx, HeapObject::kMapOffset), rbx);
+ __ movp(FieldOperand(rdx, HeapObject::kMapOffset), rbx);
__ RecordWriteField(rdx,
HeapObject::kMapOffset,
rbx,
@@ -296,34 +229,42 @@ void ElementsTransitionGenerator::GenerateSmiToDouble(
// Check for empty arrays, which only require a map transition and no changes
// to the backing store.
- __ movq(r8, FieldOperand(rdx, JSObject::kElementsOffset));
+ __ movp(r8, FieldOperand(rdx, JSObject::kElementsOffset));
__ CompareRoot(r8, Heap::kEmptyFixedArrayRootIndex);
__ j(equal, &only_change_map);
- // Check backing store for COW-ness. For COW arrays we have to
- // allocate a new backing store.
__ SmiToInteger32(r9, FieldOperand(r8, FixedDoubleArray::kLengthOffset));
- __ CompareRoot(FieldOperand(r8, HeapObject::kMapOffset),
- Heap::kFixedCOWArrayMapRootIndex);
- __ j(equal, &new_backing_store);
+ if (kPointerSize == kDoubleSize) {
+ // Check backing store for COW-ness. For COW arrays we have to
+ // allocate a new backing store.
+ __ CompareRoot(FieldOperand(r8, HeapObject::kMapOffset),
+ Heap::kFixedCOWArrayMapRootIndex);
+ __ j(equal, &new_backing_store);
+ } else {
+ // For x32 port we have to allocate a new backing store as SMI size is
+ // not equal with double size.
+ ASSERT(kDoubleSize == 2 * kPointerSize);
+ __ jmp(&new_backing_store);
+ }
+
// Check if the backing store is in new-space. If not, we need to allocate
// a new one since the old one is in pointer-space.
// If in new space, we can reuse the old backing store because it is
// the same size.
__ JumpIfNotInNewSpace(r8, rdi, &new_backing_store);
- __ movq(r14, r8); // Destination array equals source array.
+ __ movp(r14, r8); // Destination array equals source array.
// r8 : source FixedArray
// r9 : elements array length
// r14: destination FixedDoubleArray
// Set backing store's map
__ LoadRoot(rdi, Heap::kFixedDoubleArrayMapRootIndex);
- __ movq(FieldOperand(r14, HeapObject::kMapOffset), rdi);
+ __ movp(FieldOperand(r14, HeapObject::kMapOffset), rdi);
__ bind(&allocated);
// Set transitioned map.
- __ movq(FieldOperand(rdx, HeapObject::kMapOffset), rbx);
+ __ movp(FieldOperand(rdx, HeapObject::kMapOffset), rbx);
__ RecordWriteField(rdx,
HeapObject::kMapOffset,
rbx,
@@ -344,14 +285,14 @@ void ElementsTransitionGenerator::GenerateSmiToDouble(
// Allocate new backing store.
__ bind(&new_backing_store);
- __ lea(rdi, Operand(r9, times_8, FixedArray::kHeaderSize));
+ __ leap(rdi, Operand(r9, times_8, FixedArray::kHeaderSize));
__ Allocate(rdi, r14, r11, r15, fail, TAG_OBJECT);
// Set backing store's map
__ LoadRoot(rdi, Heap::kFixedDoubleArrayMapRootIndex);
- __ movq(FieldOperand(r14, HeapObject::kMapOffset), rdi);
+ __ movp(FieldOperand(r14, HeapObject::kMapOffset), rdi);
// Set receiver's backing store.
- __ movq(FieldOperand(rdx, JSObject::kElementsOffset), r14);
- __ movq(r11, r14);
+ __ movp(FieldOperand(rdx, JSObject::kElementsOffset), r14);
+ __ movp(r11, r14);
__ RecordWriteField(rdx,
JSObject::kElementsOffset,
r11,
@@ -361,12 +302,12 @@ void ElementsTransitionGenerator::GenerateSmiToDouble(
OMIT_SMI_CHECK);
// Set backing store's length.
__ Integer32ToSmi(r11, r9);
- __ movq(FieldOperand(r14, FixedDoubleArray::kLengthOffset), r11);
+ __ movp(FieldOperand(r14, FixedDoubleArray::kLengthOffset), r11);
__ jmp(&allocated);
__ bind(&only_change_map);
// Set transitioned map.
- __ movq(FieldOperand(rdx, HeapObject::kMapOffset), rbx);
+ __ movp(FieldOperand(rdx, HeapObject::kMapOffset), rbx);
__ RecordWriteField(rdx,
HeapObject::kMapOffset,
rbx,
@@ -378,7 +319,7 @@ void ElementsTransitionGenerator::GenerateSmiToDouble(
// Conversion loop.
__ bind(&loop);
- __ movq(rbx,
+ __ movp(rbx,
FieldOperand(r8, r9, times_pointer_size, FixedArray::kHeaderSize));
// r9 : current element's index
// rbx: current element (smi-tagged)
@@ -397,7 +338,7 @@ void ElementsTransitionGenerator::GenerateSmiToDouble(
__ movq(FieldOperand(r14, r9, times_8, FixedDoubleArray::kHeaderSize), r15);
__ bind(&entry);
- __ decq(r9);
+ __ decp(r9);
__ j(not_sign, &loop);
__ bind(&done);
@@ -421,23 +362,23 @@ void ElementsTransitionGenerator::GenerateDoubleToObject(
// Check for empty arrays, which only require a map transition and no changes
// to the backing store.
- __ movq(r8, FieldOperand(rdx, JSObject::kElementsOffset));
+ __ movp(r8, FieldOperand(rdx, JSObject::kElementsOffset));
__ CompareRoot(r8, Heap::kEmptyFixedArrayRootIndex);
__ j(equal, &only_change_map);
- __ push(rax);
+ __ Push(rax);
- __ movq(r8, FieldOperand(rdx, JSObject::kElementsOffset));
+ __ movp(r8, FieldOperand(rdx, JSObject::kElementsOffset));
__ SmiToInteger32(r9, FieldOperand(r8, FixedDoubleArray::kLengthOffset));
// r8 : source FixedDoubleArray
// r9 : number of elements
- __ lea(rdi, Operand(r9, times_pointer_size, FixedArray::kHeaderSize));
+ __ leap(rdi, Operand(r9, times_pointer_size, FixedArray::kHeaderSize));
__ Allocate(rdi, r11, r14, r15, &gc_required, TAG_OBJECT);
// r11: destination FixedArray
__ LoadRoot(rdi, Heap::kFixedArrayMapRootIndex);
- __ movq(FieldOperand(r11, HeapObject::kMapOffset), rdi);
+ __ movp(FieldOperand(r11, HeapObject::kMapOffset), rdi);
__ Integer32ToSmi(r14, r9);
- __ movq(FieldOperand(r11, FixedArray::kLengthOffset), r14);
+ __ movp(FieldOperand(r11, FixedArray::kLengthOffset), r14);
// Prepare for conversion loop.
__ movq(rsi, BitCast<int64_t, uint64_t>(kHoleNanInt64));
@@ -448,8 +389,8 @@ void ElementsTransitionGenerator::GenerateDoubleToObject(
// Call into runtime if GC is required.
__ bind(&gc_required);
- __ pop(rax);
- __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
+ __ Pop(rax);
+ __ movp(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
__ jmp(fail);
// Box doubles into heap numbers.
@@ -466,13 +407,13 @@ void ElementsTransitionGenerator::GenerateDoubleToObject(
// Non-hole double, copy value into a heap number.
__ AllocateHeapNumber(rax, r15, &gc_required);
// rax: new heap number
- __ MoveDouble(FieldOperand(rax, HeapNumber::kValueOffset), r14);
- __ movq(FieldOperand(r11,
+ __ movq(FieldOperand(rax, HeapNumber::kValueOffset), r14);
+ __ movp(FieldOperand(r11,
r9,
times_pointer_size,
FixedArray::kHeaderSize),
rax);
- __ movq(r15, r9);
+ __ movp(r15, r9);
__ RecordWriteArray(r11,
rax,
r15,
@@ -483,18 +424,18 @@ void ElementsTransitionGenerator::GenerateDoubleToObject(
// Replace the-hole NaN with the-hole pointer.
__ bind(&convert_hole);
- __ movq(FieldOperand(r11,
+ __ movp(FieldOperand(r11,
r9,
times_pointer_size,
FixedArray::kHeaderSize),
rdi);
__ bind(&entry);
- __ decq(r9);
+ __ decp(r9);
__ j(not_sign, &loop);
// Replace receiver's backing store with newly created and filled FixedArray.
- __ movq(FieldOperand(rdx, JSObject::kElementsOffset), r11);
+ __ movp(FieldOperand(rdx, JSObject::kElementsOffset), r11);
__ RecordWriteField(rdx,
JSObject::kElementsOffset,
r11,
@@ -502,12 +443,12 @@ void ElementsTransitionGenerator::GenerateDoubleToObject(
kDontSaveFPRegs,
EMIT_REMEMBERED_SET,
OMIT_SMI_CHECK);
- __ pop(rax);
- __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
+ __ Pop(rax);
+ __ movp(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
__ bind(&only_change_map);
// Set transitioned map.
- __ movq(FieldOperand(rdx, HeapObject::kMapOffset), rbx);
+ __ movp(FieldOperand(rdx, HeapObject::kMapOffset), rbx);
__ RecordWriteField(rdx,
HeapObject::kMapOffset,
rbx,
@@ -524,7 +465,7 @@ void StringCharLoadGenerator::Generate(MacroAssembler* masm,
Register result,
Label* call_runtime) {
// Fetch the instance type of the receiver into result register.
- __ movq(result, FieldOperand(string, HeapObject::kMapOffset));
+ __ movp(result, FieldOperand(string, HeapObject::kMapOffset));
__ movzxbl(result, FieldOperand(result, Map::kInstanceTypeOffset));
// We need special handling for indirect strings.
@@ -540,8 +481,8 @@ void StringCharLoadGenerator::Generate(MacroAssembler* masm,
// Handle slices.
Label indirect_string_loaded;
__ SmiToInteger32(result, FieldOperand(string, SlicedString::kOffsetOffset));
- __ addq(index, result);
- __ movq(string, FieldOperand(string, SlicedString::kParentOffset));
+ __ addp(index, result);
+ __ movp(string, FieldOperand(string, SlicedString::kParentOffset));
__ jmp(&indirect_string_loaded, Label::kNear);
// Handle cons strings.
@@ -553,10 +494,10 @@ void StringCharLoadGenerator::Generate(MacroAssembler* masm,
__ CompareRoot(FieldOperand(string, ConsString::kSecondOffset),
Heap::kempty_stringRootIndex);
__ j(not_equal, call_runtime);
- __ movq(string, FieldOperand(string, ConsString::kFirstOffset));
+ __ movp(string, FieldOperand(string, ConsString::kFirstOffset));
__ bind(&indirect_string_loaded);
- __ movq(result, FieldOperand(string, HeapObject::kMapOffset));
+ __ movp(result, FieldOperand(string, HeapObject::kMapOffset));
__ movzxbl(result, FieldOperand(result, Map::kInstanceTypeOffset));
// Distinguish sequential and external strings. Only these two string
@@ -577,13 +518,13 @@ void StringCharLoadGenerator::Generate(MacroAssembler* masm,
__ Assert(zero, kExternalStringExpectedButNotFound);
}
// Rule out short external strings.
- STATIC_CHECK(kShortExternalStringTag != 0);
+ STATIC_ASSERT(kShortExternalStringTag != 0);
__ testb(result, Immediate(kShortExternalStringTag));
__ j(not_zero, call_runtime);
// Check encoding.
STATIC_ASSERT(kTwoByteStringTag == 0);
__ testb(result, Immediate(kStringEncodingMask));
- __ movq(result, FieldOperand(string, ExternalString::kResourceDataOffset));
+ __ movp(result, FieldOperand(string, ExternalString::kResourceDataOffset));
__ j(not_equal, &ascii_external, Label::kNear);
// Two-byte string.
__ movzxwl(result, Operand(result, index, times_2, 0));
@@ -650,13 +591,13 @@ void MathExpGenerator::EmitMathExp(MacroAssembler* masm,
__ movq(temp2, double_scratch);
__ subsd(double_scratch, result);
__ movsd(result, Operand(kScratchRegister, 6 * kDoubleSize));
- __ lea(temp1, Operand(temp2, 0x1ff800));
- __ and_(temp2, Immediate(0x7ff));
- __ shr(temp1, Immediate(11));
+ __ leaq(temp1, Operand(temp2, 0x1ff800));
+ __ andq(temp2, Immediate(0x7ff));
+ __ shrq(temp1, Immediate(11));
__ mulsd(double_scratch, Operand(kScratchRegister, 5 * kDoubleSize));
__ Move(kScratchRegister, ExternalReference::math_exp_log_table());
- __ shl(temp1, Immediate(52));
- __ or_(temp1, Operand(kScratchRegister, temp2, times_8, 0));
+ __ shlq(temp1, Immediate(52));
+ __ orq(temp1, Operand(kScratchRegister, temp2, times_8, 0));
__ Move(kScratchRegister, ExternalReference::math_exp_constants(0));
__ subsd(double_scratch, input);
__ movsd(input, double_scratch);
@@ -675,37 +616,36 @@ void MathExpGenerator::EmitMathExp(MacroAssembler* masm,
#undef __
-static byte* GetNoCodeAgeSequence(uint32_t* length) {
- static bool initialized = false;
- static byte sequence[kNoCodeAgeSequenceLength];
- *length = kNoCodeAgeSequenceLength;
- if (!initialized) {
- // The sequence of instructions that is patched out for aging code is the
- // following boilerplate stack-building prologue that is found both in
- // FUNCTION and OPTIMIZED_FUNCTION code:
- CodePatcher patcher(sequence, kNoCodeAgeSequenceLength);
- patcher.masm()->push(rbp);
- patcher.masm()->movq(rbp, rsp);
- patcher.masm()->push(rsi);
- patcher.masm()->push(rdi);
- initialized = true;
- }
- return sequence;
+CodeAgingHelper::CodeAgingHelper() {
+ ASSERT(young_sequence_.length() == kNoCodeAgeSequenceLength);
+ // The sequence of instructions that is patched out for aging code is the
+ // following boilerplate stack-building prologue that is found both in
+ // FUNCTION and OPTIMIZED_FUNCTION code:
+ CodePatcher patcher(young_sequence_.start(), young_sequence_.length());
+ patcher.masm()->pushq(rbp);
+ patcher.masm()->movp(rbp, rsp);
+ patcher.masm()->Push(rsi);
+ patcher.masm()->Push(rdi);
}
-bool Code::IsYoungSequence(byte* sequence) {
- uint32_t young_length;
- byte* young_sequence = GetNoCodeAgeSequence(&young_length);
- bool result = (!memcmp(sequence, young_sequence, young_length));
- ASSERT(result || *sequence == kCallOpcode);
+#ifdef DEBUG
+bool CodeAgingHelper::IsOld(byte* candidate) const {
+ return *candidate == kCallOpcode;
+}
+#endif
+
+
+bool Code::IsYoungSequence(Isolate* isolate, byte* sequence) {
+ bool result = isolate->code_aging_helper()->IsYoung(sequence);
+ ASSERT(result || isolate->code_aging_helper()->IsOld(sequence));
return result;
}
-void Code::GetCodeAgeAndParity(byte* sequence, Age* age,
+void Code::GetCodeAgeAndParity(Isolate* isolate, byte* sequence, Age* age,
MarkingParity* parity) {
- if (IsYoungSequence(sequence)) {
+ if (IsYoungSequence(isolate, sequence)) {
*age = kNoAgeCodeAge;
*parity = NO_MARKING_PARITY;
} else {
@@ -722,10 +662,9 @@ void Code::PatchPlatformCodeAge(Isolate* isolate,
byte* sequence,
Code::Age age,
MarkingParity parity) {
- uint32_t young_length;
- byte* young_sequence = GetNoCodeAgeSequence(&young_length);
+ uint32_t young_length = isolate->code_aging_helper()->young_sequence_length();
if (age == kNoAgeCodeAge) {
- CopyBytes(sequence, young_sequence, young_length);
+ isolate->code_aging_helper()->CopyYoungSequenceTo(sequence);
CPU::FlushICache(sequence, young_length);
} else {
Code* stub = GetCodeAgeStub(isolate, age, parity);
diff --git a/chromium/v8/src/x64/codegen-x64.h b/chromium/v8/src/x64/codegen-x64.h
index 811ac507d53..5faa9878c01 100644
--- a/chromium/v8/src/x64/codegen-x64.h
+++ b/chromium/v8/src/x64/codegen-x64.h
@@ -1,78 +1,19 @@
// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
#ifndef V8_X64_CODEGEN_X64_H_
#define V8_X64_CODEGEN_X64_H_
-#include "ast.h"
-#include "ic-inl.h"
+#include "src/ast.h"
+#include "src/ic-inl.h"
namespace v8 {
namespace internal {
-// Forward declarations
-class CompilationInfo;
enum TypeofState { INSIDE_TYPEOF, NOT_INSIDE_TYPEOF };
-// -------------------------------------------------------------------------
-// CodeGenerator
-
-class CodeGenerator: public AstVisitor {
- public:
- explicit CodeGenerator(Isolate* isolate) {
- InitializeAstVisitor(isolate);
- }
-
- static bool MakeCode(CompilationInfo* info);
-
- // Printing of AST, etc. as requested by flags.
- static void MakeCodePrologue(CompilationInfo* info, const char* kind);
-
- // Allocate and install the code.
- static Handle<Code> MakeCodeEpilogue(MacroAssembler* masm,
- Code::Flags flags,
- CompilationInfo* info);
-
- // Print the code after compiling it.
- static void PrintCode(Handle<Code> code, CompilationInfo* info);
-
- static bool ShouldGenerateLog(Isolate* isolate, Expression* type);
-
- static bool RecordPositions(MacroAssembler* masm,
- int pos,
- bool right_here = false);
-
- DEFINE_AST_VISITOR_SUBCLASS_MEMBERS();
-
- private:
- DISALLOW_COPY_AND_ASSIGN(CodeGenerator);
-};
-
class StringCharLoadGenerator : public AllStatic {
public:
diff --git a/chromium/v8/src/x64/cpu-x64.cc b/chromium/v8/src/x64/cpu-x64.cc
index 4fa290a8b5f..ca2b89b2268 100644
--- a/chromium/v8/src/x64/cpu-x64.cc
+++ b/chromium/v8/src/x64/cpu-x64.cc
@@ -1,56 +1,23 @@
// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
// CPU specific code for x64 independent of OS goes here.
#if defined(__GNUC__) && !defined(__MINGW64__)
-#include "third_party/valgrind/valgrind.h"
+#include "src/third_party/valgrind/valgrind.h"
#endif
-#include "v8.h"
+#include "src/v8.h"
#if V8_TARGET_ARCH_X64
-#include "cpu.h"
-#include "macro-assembler.h"
+#include "src/cpu.h"
+#include "src/macro-assembler.h"
namespace v8 {
namespace internal {
-void CPU::SetUp() {
- CpuFeatures::Probe();
-}
-
-
-bool CPU::SupportsCrankshaft() {
- return true; // Yay!
-}
-
-
void CPU::FlushICache(void* start, size_t size) {
// No need to flush the instruction cache on Intel. On Intel instruction
// cache flushing is only necessary when multiple cores running the same
diff --git a/chromium/v8/src/x64/debug-x64.cc b/chromium/v8/src/x64/debug-x64.cc
index 5ddf69a414e..4703e423547 100644
--- a/chromium/v8/src/x64/debug-x64.cc
+++ b/chromium/v8/src/x64/debug-x64.cc
@@ -1,44 +1,19 @@
// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
#if V8_TARGET_ARCH_X64
-#include "assembler.h"
-#include "codegen.h"
-#include "debug.h"
+#include "src/assembler.h"
+#include "src/codegen.h"
+#include "src/debug.h"
namespace v8 {
namespace internal {
-#ifdef ENABLE_DEBUGGER_SUPPORT
-
bool BreakLocationIterator::IsDebugBreakAtReturn() {
return Debug::IsDebugBreakAtReturn(rinfo());
}
@@ -50,7 +25,7 @@ bool BreakLocationIterator::IsDebugBreakAtReturn() {
void BreakLocationIterator::SetDebugBreakAtReturn() {
ASSERT(Assembler::kJSReturnSequenceLength >= Assembler::kCallSequenceLength);
rinfo()->PatchCodeWithCall(
- debug_info_->GetIsolate()->debug()->debug_break_return()->entry(),
+ debug_info_->GetIsolate()->builtins()->Return_DebugBreak()->entry(),
Assembler::kJSReturnSequenceLength - Assembler::kCallSequenceLength);
}
@@ -73,14 +48,14 @@ bool Debug::IsDebugBreakAtReturn(v8::internal::RelocInfo* rinfo) {
bool BreakLocationIterator::IsDebugBreakAtSlot() {
ASSERT(IsDebugBreakSlot());
// Check whether the debug break slot instructions have been patched.
- return !Assembler::IsNop(rinfo()->pc());
+ return rinfo()->IsPatchedDebugBreakSlotSequence();
}
void BreakLocationIterator::SetDebugBreakAtSlot() {
ASSERT(IsDebugBreakSlot());
rinfo()->PatchCodeWithCall(
- debug_info_->GetIsolate()->debug()->debug_break_slot()->entry(),
+ debug_info_->GetIsolate()->builtins()->Slot_DebugBreak()->entry(),
Assembler::kDebugBreakSlotLength - Assembler::kCallSequenceLength);
}
@@ -90,8 +65,6 @@ void BreakLocationIterator::ClearDebugBreakAtSlot() {
rinfo()->PatchCode(original_rinfo()->pc(), Assembler::kDebugBreakSlotLength);
}
-const bool Debug::FramePaddingLayout::kIsSupported = true;
-
#define __ ACCESS_MASM(masm)
@@ -105,10 +78,10 @@ static void Generate_DebugBreakCallHelper(MacroAssembler* masm,
FrameScope scope(masm, StackFrame::INTERNAL);
// Load padding words on stack.
- for (int i = 0; i < Debug::FramePaddingLayout::kInitialSize; i++) {
- __ Push(Smi::FromInt(Debug::FramePaddingLayout::kPaddingValue));
+ for (int i = 0; i < LiveEdit::kFramePaddingInitialSize; i++) {
+ __ Push(Smi::FromInt(LiveEdit::kFramePaddingValue));
}
- __ Push(Smi::FromInt(Debug::FramePaddingLayout::kInitialSize));
+ __ Push(Smi::FromInt(LiveEdit::kFramePaddingInitialSize));
// Store the registers containing live values on the expression stack to
// make sure that these are correctly updated during GC. Non object values
@@ -121,10 +94,10 @@ static void Generate_DebugBreakCallHelper(MacroAssembler* masm,
Register reg = { r };
ASSERT(!reg.is(kScratchRegister));
if ((object_regs & (1 << r)) != 0) {
- __ push(reg);
+ __ Push(reg);
}
if ((non_object_regs & (1 << r)) != 0) {
- __ PushInt64AsTwoSmis(reg);
+ __ PushRegisterAsTwoSmis(reg);
}
}
@@ -134,7 +107,7 @@ static void Generate_DebugBreakCallHelper(MacroAssembler* masm,
__ Set(rax, 0); // No arguments (argc == 0).
__ Move(rbx, ExternalReference::debug_break(masm->isolate()));
- CEntryStub ceb(1);
+ CEntryStub ceb(masm->isolate(), 1);
__ CallStub(&ceb);
// Restore the register values from the expression stack.
@@ -145,18 +118,18 @@ static void Generate_DebugBreakCallHelper(MacroAssembler* masm,
__ Set(reg, kDebugZapValue);
}
if ((object_regs & (1 << r)) != 0) {
- __ pop(reg);
+ __ Pop(reg);
}
// Reconstruct the 64-bit value from two smis.
if ((non_object_regs & (1 << r)) != 0) {
- __ PopInt64AsTwoSmis(reg);
+ __ PopRegisterAsTwoSmis(reg);
}
}
// Read current padding counter and skip corresponding number of words.
- __ pop(kScratchRegister);
+ __ Pop(kScratchRegister);
__ SmiToInteger32(kScratchRegister, kScratchRegister);
- __ lea(rsp, Operand(rsp, kScratchRegister, times_pointer_size, 0));
+ __ leap(rsp, Operand(rsp, kScratchRegister, times_pointer_size, 0));
// Get rid of the internal frame.
}
@@ -164,20 +137,30 @@ static void Generate_DebugBreakCallHelper(MacroAssembler* masm,
// If this call did not replace a call but patched other code then there will
// be an unwanted return address left on the stack. Here we get rid of that.
if (convert_call_to_jmp) {
- __ addq(rsp, Immediate(kPointerSize));
+ __ addp(rsp, Immediate(kPCOnStackSize));
}
// Now that the break point has been handled, resume normal execution by
// jumping to the target address intended by the caller and that was
// overwritten by the address of DebugBreakXXX.
ExternalReference after_break_target =
- ExternalReference(Debug_Address::AfterBreakTarget(), masm->isolate());
+ ExternalReference::debug_after_break_target_address(masm->isolate());
__ Move(kScratchRegister, after_break_target);
- __ jmp(Operand(kScratchRegister, 0));
+ __ Jump(Operand(kScratchRegister, 0));
}
-void Debug::GenerateLoadICDebugBreak(MacroAssembler* masm) {
+void DebugCodegen::GenerateCallICStubDebugBreak(MacroAssembler* masm) {
+ // Register state for CallICStub
+ // ----------- S t a t e -------------
+ // -- rdx : type feedback slot (smi)
+ // -- rdi : function
+ // -----------------------------------
+ Generate_DebugBreakCallHelper(masm, rdx.bit() | rdi.bit(), 0, false);
+}
+
+
+void DebugCodegen::GenerateLoadICDebugBreak(MacroAssembler* masm) {
// Register state for IC load call (from ic-x64.cc).
// ----------- S t a t e -------------
// -- rax : receiver
@@ -187,7 +170,7 @@ void Debug::GenerateLoadICDebugBreak(MacroAssembler* masm) {
}
-void Debug::GenerateStoreICDebugBreak(MacroAssembler* masm) {
+void DebugCodegen::GenerateStoreICDebugBreak(MacroAssembler* masm) {
// Register state for IC store call (from ic-x64.cc).
// ----------- S t a t e -------------
// -- rax : value
@@ -199,7 +182,7 @@ void Debug::GenerateStoreICDebugBreak(MacroAssembler* masm) {
}
-void Debug::GenerateKeyedLoadICDebugBreak(MacroAssembler* masm) {
+void DebugCodegen::GenerateKeyedLoadICDebugBreak(MacroAssembler* masm) {
// Register state for keyed IC load call (from ic-x64.cc).
// ----------- S t a t e -------------
// -- rax : key
@@ -209,7 +192,7 @@ void Debug::GenerateKeyedLoadICDebugBreak(MacroAssembler* masm) {
}
-void Debug::GenerateKeyedStoreICDebugBreak(MacroAssembler* masm) {
+void DebugCodegen::GenerateKeyedStoreICDebugBreak(MacroAssembler* masm) {
// Register state for keyed IC load call (from ic-x64.cc).
// ----------- S t a t e -------------
// -- rax : value
@@ -221,7 +204,7 @@ void Debug::GenerateKeyedStoreICDebugBreak(MacroAssembler* masm) {
}
-void Debug::GenerateCompareNilICDebugBreak(MacroAssembler* masm) {
+void DebugCodegen::GenerateCompareNilICDebugBreak(MacroAssembler* masm) {
// Register state for CompareNil IC
// ----------- S t a t e -------------
// -- rax : value
@@ -230,16 +213,7 @@ void Debug::GenerateCompareNilICDebugBreak(MacroAssembler* masm) {
}
-void Debug::GenerateCallICDebugBreak(MacroAssembler* masm) {
- // Register state for IC call call (from ic-x64.cc)
- // ----------- S t a t e -------------
- // -- rcx: function name
- // -----------------------------------
- Generate_DebugBreakCallHelper(masm, rcx.bit(), 0, false);
-}
-
-
-void Debug::GenerateReturnDebugBreak(MacroAssembler* masm) {
+void DebugCodegen::GenerateReturnDebugBreak(MacroAssembler* masm) {
// Register state just before return from JS function (from codegen-x64.cc).
// ----------- S t a t e -------------
// -- rax: return value
@@ -248,7 +222,7 @@ void Debug::GenerateReturnDebugBreak(MacroAssembler* masm) {
}
-void Debug::GenerateCallFunctionStubDebugBreak(MacroAssembler* masm) {
+void DebugCodegen::GenerateCallFunctionStubDebugBreak(MacroAssembler* masm) {
// Register state for CallFunctionStub (from code-stubs-x64.cc).
// ----------- S t a t e -------------
// -- rdi : function
@@ -257,17 +231,7 @@ void Debug::GenerateCallFunctionStubDebugBreak(MacroAssembler* masm) {
}
-void Debug::GenerateCallFunctionStubRecordDebugBreak(MacroAssembler* masm) {
- // Register state for CallFunctionStub (from code-stubs-x64.cc).
- // ----------- S t a t e -------------
- // -- rdi : function
- // -- rbx: cache cell for call target
- // -----------------------------------
- Generate_DebugBreakCallHelper(masm, rbx.bit() | rdi.bit(), 0, false);
-}
-
-
-void Debug::GenerateCallConstructStubDebugBreak(MacroAssembler* masm) {
+void DebugCodegen::GenerateCallConstructStubDebugBreak(MacroAssembler* masm) {
// Register state for CallConstructStub (from code-stubs-x64.cc).
// rax is the actual number of arguments not encoded as a smi, see comment
// above IC call.
@@ -279,20 +243,23 @@ void Debug::GenerateCallConstructStubDebugBreak(MacroAssembler* masm) {
}
-void Debug::GenerateCallConstructStubRecordDebugBreak(MacroAssembler* masm) {
+void DebugCodegen::GenerateCallConstructStubRecordDebugBreak(
+ MacroAssembler* masm) {
// Register state for CallConstructStub (from code-stubs-x64.cc).
// rax is the actual number of arguments not encoded as a smi, see comment
// above IC call.
// ----------- S t a t e -------------
// -- rax: number of arguments
- // -- rbx: cache cell for call target
+ // -- rbx: feedback array
+ // -- rdx: feedback slot (smi)
// -----------------------------------
// The number of arguments in rax is not smi encoded.
- Generate_DebugBreakCallHelper(masm, rbx.bit() | rdi.bit(), rax.bit(), false);
+ Generate_DebugBreakCallHelper(masm, rbx.bit() | rdx.bit() | rdi.bit(),
+ rax.bit(), false);
}
-void Debug::GenerateSlot(MacroAssembler* masm) {
+void DebugCodegen::GenerateSlot(MacroAssembler* masm) {
// Generate enough nop's to make space for a call instruction.
Label check_codesize;
__ bind(&check_codesize);
@@ -303,49 +270,47 @@ void Debug::GenerateSlot(MacroAssembler* masm) {
}
-void Debug::GenerateSlotDebugBreak(MacroAssembler* masm) {
+void DebugCodegen::GenerateSlotDebugBreak(MacroAssembler* masm) {
// In the places where a debug break slot is inserted no registers can contain
// object pointers.
Generate_DebugBreakCallHelper(masm, 0, 0, true);
}
-void Debug::GeneratePlainReturnLiveEdit(MacroAssembler* masm) {
+void DebugCodegen::GeneratePlainReturnLiveEdit(MacroAssembler* masm) {
masm->ret(0);
}
-void Debug::GenerateFrameDropperLiveEdit(MacroAssembler* masm) {
+void DebugCodegen::GenerateFrameDropperLiveEdit(MacroAssembler* masm) {
ExternalReference restarter_frame_function_slot =
- ExternalReference(Debug_Address::RestarterFrameFunctionPointer(),
- masm->isolate());
+ ExternalReference::debug_restarter_frame_function_pointer_address(
+ masm->isolate());
__ Move(rax, restarter_frame_function_slot);
- __ movq(Operand(rax, 0), Immediate(0));
+ __ movp(Operand(rax, 0), Immediate(0));
// We do not know our frame height, but set rsp based on rbp.
- __ lea(rsp, Operand(rbp, -1 * kPointerSize));
+ __ leap(rsp, Operand(rbp, -1 * kPointerSize));
- __ pop(rdi); // Function.
- __ pop(rbp);
+ __ Pop(rdi); // Function.
+ __ popq(rbp);
// Load context from the function.
- __ movq(rsi, FieldOperand(rdi, JSFunction::kContextOffset));
+ __ movp(rsi, FieldOperand(rdi, JSFunction::kContextOffset));
// Get function code.
- __ movq(rdx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
- __ movq(rdx, FieldOperand(rdx, SharedFunctionInfo::kCodeOffset));
- __ lea(rdx, FieldOperand(rdx, Code::kHeaderSize));
+ __ movp(rdx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
+ __ movp(rdx, FieldOperand(rdx, SharedFunctionInfo::kCodeOffset));
+ __ leap(rdx, FieldOperand(rdx, Code::kHeaderSize));
// Re-run JSFunction, rdi is function, rsi is context.
__ jmp(rdx);
}
-const bool Debug::kFrameDropperSupported = true;
+const bool LiveEdit::kFrameDropperSupported = true;
#undef __
-#endif // ENABLE_DEBUGGER_SUPPORT
-
} } // namespace v8::internal
#endif // V8_TARGET_ARCH_X64
diff --git a/chromium/v8/src/x64/deoptimizer-x64.cc b/chromium/v8/src/x64/deoptimizer-x64.cc
index ae180ec59b4..ae3a8242922 100644
--- a/chromium/v8/src/x64/deoptimizer-x64.cc
+++ b/chromium/v8/src/x64/deoptimizer-x64.cc
@@ -1,38 +1,15 @@
// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
#if V8_TARGET_ARCH_X64
-#include "codegen.h"
-#include "deoptimizer.h"
-#include "full-codegen.h"
-#include "safepoint-table.h"
+#include "src/codegen.h"
+#include "src/deoptimizer.h"
+#include "src/full-codegen.h"
+#include "src/safepoint-table.h"
namespace v8 {
namespace internal {
@@ -51,6 +28,26 @@ void Deoptimizer::PatchCodeForDeoptimization(Isolate* isolate, Code* code) {
// code patching below, and is not needed any more.
code->InvalidateRelocation();
+ if (FLAG_zap_code_space) {
+ // Fail hard and early if we enter this code object again.
+ byte* pointer = code->FindCodeAgeSequence();
+ if (pointer != NULL) {
+ pointer += kNoCodeAgeSequenceLength;
+ } else {
+ pointer = code->instruction_start();
+ }
+ CodePatcher patcher(pointer, 1);
+ patcher.masm()->int3();
+
+ DeoptimizationInputData* data =
+ DeoptimizationInputData::cast(code->deoptimization_data());
+ int osr_offset = data->OsrPcOffset()->value();
+ if (osr_offset > 0) {
+ CodePatcher osr_patcher(code->instruction_start() + osr_offset, 1);
+ osr_patcher.masm()->int3();
+ }
+ }
+
// For each LLazyBailout instruction insert a absolute call to the
// corresponding deoptimization entry, or a short call to an absolute
// jump if space is short. The absolute jumps are put in a table just
@@ -63,6 +60,12 @@ void Deoptimizer::PatchCodeForDeoptimization(Isolate* isolate, Code* code) {
#endif
DeoptimizationInputData* deopt_data =
DeoptimizationInputData::cast(code->deoptimization_data());
+ SharedFunctionInfo* shared =
+ SharedFunctionInfo::cast(deopt_data->SharedFunctionInfo());
+ shared->EvictFromOptimizedCodeMap(code, "deoptimized code");
+ deopt_data->SetSharedFunctionInfo(Smi::FromInt(0));
+ // For each LLazyBailout instruction insert a call to the corresponding
+ // deoptimization entry.
for (int i = 0; i < deopt_data->DeoptCount(); i++) {
if (deopt_data->Pc(i)->value() == -1) continue;
// Position where Call will be patched in.
@@ -71,7 +74,7 @@ void Deoptimizer::PatchCodeForDeoptimization(Isolate* isolate, Code* code) {
// LLazyBailout instructions with nops if necessary.
CodePatcher patcher(call_address, Assembler::kCallSequenceLength);
patcher.masm()->Call(GetDeoptimizationEntry(isolate, i, LAZY),
- RelocInfo::NONE64);
+ Assembler::RelocInfoNone());
ASSERT(prev_call_address == NULL ||
call_address >= prev_call_address + patch_size());
ASSERT(call_address + patch_size() <= code->instruction_end());
@@ -97,7 +100,7 @@ void Deoptimizer::FillInputFrame(Address tos, JavaScriptFrame* frame) {
// Fill the frame content from the actual data on the frame.
for (unsigned i = 0; i < input_->GetFrameSize(); i += kPointerSize) {
- input_->SetFrameSlot(i, Memory::uint64_at(tos + i));
+ input_->SetFrameSlot(i, Memory::uintptr_at(tos + i));
}
}
@@ -126,11 +129,6 @@ bool Deoptimizer::HasAlignmentPadding(JSFunction* function) {
}
-Code* Deoptimizer::NotifyStubFailureBuiltin() {
- return isolate_->builtins()->builtin(Builtins::kNotifyStubFailureSaveDoubles);
-}
-
-
#define __ masm()->
void Deoptimizer::EntryGenerator::Generate() {
@@ -141,7 +139,7 @@ void Deoptimizer::EntryGenerator::Generate() {
const int kDoubleRegsSize = kDoubleSize *
XMMRegister::NumAllocatableRegisters();
- __ subq(rsp, Immediate(kDoubleRegsSize));
+ __ subp(rsp, Immediate(kDoubleRegsSize));
for (int i = 0; i < XMMRegister::NumAllocatableRegisters(); ++i) {
XMMRegister xmm_reg = XMMRegister::FromAllocationIndex(i);
@@ -153,10 +151,10 @@ void Deoptimizer::EntryGenerator::Generate() {
// to restore all later.
for (int i = 0; i < kNumberOfRegisters; i++) {
Register r = Register::from_code(i);
- __ push(r);
+ __ pushq(r);
}
- const int kSavedRegistersAreaSize = kNumberOfRegisters * kPointerSize +
+ const int kSavedRegistersAreaSize = kNumberOfRegisters * kRegisterSize +
kDoubleRegsSize;
// We use this to keep the value of the fifth argument temporarily.
@@ -165,32 +163,32 @@ void Deoptimizer::EntryGenerator::Generate() {
Register arg5 = r11;
// Get the bailout id from the stack.
- __ movq(arg_reg_3, Operand(rsp, kSavedRegistersAreaSize));
+ __ movp(arg_reg_3, Operand(rsp, kSavedRegistersAreaSize));
// Get the address of the location in the code object
// and compute the fp-to-sp delta in register arg5.
- __ movq(arg_reg_4,
- Operand(rsp, kSavedRegistersAreaSize + 1 * kPointerSize));
- __ lea(arg5, Operand(rsp, kSavedRegistersAreaSize + 2 * kPointerSize));
+ __ movp(arg_reg_4, Operand(rsp, kSavedRegistersAreaSize + 1 * kRegisterSize));
+ __ leap(arg5, Operand(rsp, kSavedRegistersAreaSize + 1 * kRegisterSize +
+ kPCOnStackSize));
- __ subq(arg5, rbp);
- __ neg(arg5);
+ __ subp(arg5, rbp);
+ __ negp(arg5);
// Allocate a new deoptimizer object.
__ PrepareCallCFunction(6);
- __ movq(rax, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
- __ movq(arg_reg_1, rax);
+ __ movp(rax, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
+ __ movp(arg_reg_1, rax);
__ Set(arg_reg_2, type());
// Args 3 and 4 are already in the right registers.
// On windows put the arguments on the stack (PrepareCallCFunction
// has created space for this). On linux pass the arguments in r8 and r9.
#ifdef _WIN64
- __ movq(Operand(rsp, 4 * kPointerSize), arg5);
+ __ movq(Operand(rsp, 4 * kRegisterSize), arg5);
__ LoadAddress(arg5, ExternalReference::isolate_address(isolate()));
- __ movq(Operand(rsp, 5 * kPointerSize), arg5);
+ __ movq(Operand(rsp, 5 * kRegisterSize), arg5);
#else
- __ movq(r8, arg5);
+ __ movp(r8, arg5);
__ LoadAddress(r9, ExternalReference::isolate_address(isolate()));
#endif
@@ -199,54 +197,54 @@ void Deoptimizer::EntryGenerator::Generate() {
}
// Preserve deoptimizer object in register rax and get the input
// frame descriptor pointer.
- __ movq(rbx, Operand(rax, Deoptimizer::input_offset()));
+ __ movp(rbx, Operand(rax, Deoptimizer::input_offset()));
// Fill in the input registers.
for (int i = kNumberOfRegisters -1; i >= 0; i--) {
int offset = (i * kPointerSize) + FrameDescription::registers_offset();
- __ pop(Operand(rbx, offset));
+ __ PopQuad(Operand(rbx, offset));
}
// Fill in the double input registers.
int double_regs_offset = FrameDescription::double_registers_offset();
for (int i = 0; i < XMMRegister::NumAllocatableRegisters(); i++) {
int dst_offset = i * kDoubleSize + double_regs_offset;
- __ pop(Operand(rbx, dst_offset));
+ __ popq(Operand(rbx, dst_offset));
}
// Remove the bailout id and return address from the stack.
- __ addq(rsp, Immediate(2 * kPointerSize));
+ __ addp(rsp, Immediate(1 * kRegisterSize + kPCOnStackSize));
// Compute a pointer to the unwinding limit in register rcx; that is
// the first stack slot not part of the input frame.
- __ movq(rcx, Operand(rbx, FrameDescription::frame_size_offset()));
- __ addq(rcx, rsp);
+ __ movp(rcx, Operand(rbx, FrameDescription::frame_size_offset()));
+ __ addp(rcx, rsp);
// Unwind the stack down to - but not including - the unwinding
// limit and copy the contents of the activation frame to the input
// frame description.
- __ lea(rdx, Operand(rbx, FrameDescription::frame_content_offset()));
+ __ leap(rdx, Operand(rbx, FrameDescription::frame_content_offset()));
Label pop_loop_header;
__ jmp(&pop_loop_header);
Label pop_loop;
__ bind(&pop_loop);
- __ pop(Operand(rdx, 0));
- __ addq(rdx, Immediate(sizeof(intptr_t)));
+ __ Pop(Operand(rdx, 0));
+ __ addp(rdx, Immediate(sizeof(intptr_t)));
__ bind(&pop_loop_header);
- __ cmpq(rcx, rsp);
+ __ cmpp(rcx, rsp);
__ j(not_equal, &pop_loop);
// Compute the output frame in the deoptimizer.
- __ push(rax);
+ __ pushq(rax);
__ PrepareCallCFunction(2);
- __ movq(arg_reg_1, rax);
+ __ movp(arg_reg_1, rax);
__ LoadAddress(arg_reg_2, ExternalReference::isolate_address(isolate()));
{
AllowExternalCallThatCantCauseGC scope(masm());
__ CallCFunction(
ExternalReference::compute_output_frames_function(isolate()), 2);
}
- __ pop(rax);
+ __ popq(rax);
// Replace the current frame with the output frames.
Label outer_push_loop, inner_push_loop,
@@ -254,23 +252,23 @@ void Deoptimizer::EntryGenerator::Generate() {
// Outer loop state: rax = current FrameDescription**, rdx = one past the
// last FrameDescription**.
__ movl(rdx, Operand(rax, Deoptimizer::output_count_offset()));
- __ movq(rax, Operand(rax, Deoptimizer::output_offset()));
- __ lea(rdx, Operand(rax, rdx, times_pointer_size, 0));
+ __ movp(rax, Operand(rax, Deoptimizer::output_offset()));
+ __ leap(rdx, Operand(rax, rdx, times_pointer_size, 0));
__ jmp(&outer_loop_header);
__ bind(&outer_push_loop);
// Inner loop state: rbx = current FrameDescription*, rcx = loop index.
- __ movq(rbx, Operand(rax, 0));
- __ movq(rcx, Operand(rbx, FrameDescription::frame_size_offset()));
+ __ movp(rbx, Operand(rax, 0));
+ __ movp(rcx, Operand(rbx, FrameDescription::frame_size_offset()));
__ jmp(&inner_loop_header);
__ bind(&inner_push_loop);
- __ subq(rcx, Immediate(sizeof(intptr_t)));
- __ push(Operand(rbx, rcx, times_1, FrameDescription::frame_content_offset()));
+ __ subp(rcx, Immediate(sizeof(intptr_t)));
+ __ Push(Operand(rbx, rcx, times_1, FrameDescription::frame_content_offset()));
__ bind(&inner_loop_header);
- __ testq(rcx, rcx);
+ __ testp(rcx, rcx);
__ j(not_zero, &inner_push_loop);
- __ addq(rax, Immediate(kPointerSize));
+ __ addp(rax, Immediate(kPointerSize));
__ bind(&outer_loop_header);
- __ cmpq(rax, rdx);
+ __ cmpp(rax, rdx);
__ j(below, &outer_push_loop);
for (int i = 0; i < XMMRegister::NumAllocatableRegisters(); ++i) {
@@ -280,14 +278,14 @@ void Deoptimizer::EntryGenerator::Generate() {
}
// Push state, pc, and continuation from the last output frame.
- __ push(Operand(rbx, FrameDescription::state_offset()));
- __ push(Operand(rbx, FrameDescription::pc_offset()));
- __ push(Operand(rbx, FrameDescription::continuation_offset()));
+ __ Push(Operand(rbx, FrameDescription::state_offset()));
+ __ PushQuad(Operand(rbx, FrameDescription::pc_offset()));
+ __ PushQuad(Operand(rbx, FrameDescription::continuation_offset()));
// Push the registers from the last output frame.
for (int i = 0; i < kNumberOfRegisters; i++) {
int offset = (i * kPointerSize) + FrameDescription::registers_offset();
- __ push(Operand(rbx, offset));
+ __ PushQuad(Operand(rbx, offset));
}
// Restore the registers from the stack.
@@ -299,7 +297,7 @@ void Deoptimizer::EntryGenerator::Generate() {
ASSERT(i > 0);
r = Register::from_code(i - 1);
}
- __ pop(r);
+ __ popq(r);
}
// Set up the roots register.
@@ -317,7 +315,7 @@ void Deoptimizer::TableEntryGenerator::GeneratePrologue() {
for (int i = 0; i < count(); i++) {
int start = masm()->pc_offset();
USE(start);
- __ push_imm32(i);
+ __ pushq_imm32(i);
__ jmp(&done);
ASSERT(masm()->pc_offset() - start == table_entry_size_);
}
@@ -326,15 +324,29 @@ void Deoptimizer::TableEntryGenerator::GeneratePrologue() {
void FrameDescription::SetCallerPc(unsigned offset, intptr_t value) {
+ if (kPCOnStackSize == 2 * kPointerSize) {
+ // Zero out the high-32 bit of PC for x32 port.
+ SetFrameSlot(offset + kPointerSize, 0);
+ }
SetFrameSlot(offset, value);
}
void FrameDescription::SetCallerFp(unsigned offset, intptr_t value) {
+ if (kFPOnStackSize == 2 * kPointerSize) {
+ // Zero out the high-32 bit of FP for x32 port.
+ SetFrameSlot(offset + kPointerSize, 0);
+ }
SetFrameSlot(offset, value);
}
+void FrameDescription::SetCallerConstantPool(unsigned offset, intptr_t value) {
+ // No out-of-line constant pool support.
+ UNREACHABLE();
+}
+
+
#undef __
diff --git a/chromium/v8/src/x64/disasm-x64.cc b/chromium/v8/src/x64/disasm-x64.cc
index 76b541c0100..f4c5de88d62 100644
--- a/chromium/v8/src/x64/disasm-x64.cc
+++ b/chromium/v8/src/x64/disasm-x64.cc
@@ -1,40 +1,17 @@
// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
#include <assert.h>
#include <stdio.h>
#include <stdarg.h>
-#include "v8.h"
+#include "src/v8.h"
#if V8_TARGET_ARCH_X64
-#include "disasm.h"
-#include "lazy-instance.h"
+#include "src/base/lazy-instance.h"
+#include "src/disasm.h"
namespace disasm {
@@ -271,7 +248,7 @@ void InstructionTable::AddJumpConditionalShort() {
}
-static v8::internal::LazyInstance<InstructionTable>::type instruction_table =
+static v8::base::LazyInstance<InstructionTable>::type instruction_table =
LAZY_INSTANCE_INITIALIZER;
@@ -453,7 +430,7 @@ void DisassemblerX64::AppendToBuffer(const char* format, ...) {
v8::internal::Vector<char> buf = tmp_buffer_ + tmp_buffer_pos_;
va_list args;
va_start(args, format);
- int result = v8::internal::OS::VSNPrintF(buf, format, args);
+ int result = v8::internal::VSNPrintF(buf, format, args);
va_end(args);
tmp_buffer_pos_ += result;
}
@@ -485,9 +462,11 @@ int DisassemblerX64::PrintRightOperandHelper(
} else if (base == 5) {
// base == rbp means no base register (when mod == 0).
int32_t disp = *reinterpret_cast<int32_t*>(modrmp + 2);
- AppendToBuffer("[%s*%d+0x%x]",
+ AppendToBuffer("[%s*%d%s0x%x]",
NameOfCPURegister(index),
- 1 << scale, disp);
+ 1 << scale,
+ disp < 0 ? "-" : "+",
+ disp < 0 ? -disp : disp);
return 6;
} else if (index != 4 && base != 5) {
// [base+index*scale]
@@ -512,38 +491,29 @@ int DisassemblerX64::PrintRightOperandHelper(
int scale, index, base;
get_sib(sib, &scale, &index, &base);
int disp = (mod == 2) ? *reinterpret_cast<int32_t*>(modrmp + 2)
- : *reinterpret_cast<char*>(modrmp + 2);
+ : *reinterpret_cast<int8_t*>(modrmp + 2);
if (index == 4 && (base & 7) == 4 && scale == 0 /*times_1*/) {
- if (-disp > 0) {
- AppendToBuffer("[%s-0x%x]", NameOfCPURegister(base), -disp);
- } else {
- AppendToBuffer("[%s+0x%x]", NameOfCPURegister(base), disp);
- }
+ AppendToBuffer("[%s%s0x%x]",
+ NameOfCPURegister(base),
+ disp < 0 ? "-" : "+",
+ disp < 0 ? -disp : disp);
} else {
- if (-disp > 0) {
- AppendToBuffer("[%s+%s*%d-0x%x]",
- NameOfCPURegister(base),
- NameOfCPURegister(index),
- 1 << scale,
- -disp);
- } else {
- AppendToBuffer("[%s+%s*%d+0x%x]",
- NameOfCPURegister(base),
- NameOfCPURegister(index),
- 1 << scale,
- disp);
- }
+ AppendToBuffer("[%s+%s*%d%s0x%x]",
+ NameOfCPURegister(base),
+ NameOfCPURegister(index),
+ 1 << scale,
+ disp < 0 ? "-" : "+",
+ disp < 0 ? -disp : disp);
}
return mod == 2 ? 6 : 3;
} else {
// No sib.
int disp = (mod == 2) ? *reinterpret_cast<int32_t*>(modrmp + 1)
- : *reinterpret_cast<char*>(modrmp + 1);
- if (-disp > 0) {
- AppendToBuffer("[%s-0x%x]", NameOfCPURegister(rm), -disp);
- } else {
- AppendToBuffer("[%s+0x%x]", NameOfCPURegister(rm), disp);
- }
+ : *reinterpret_cast<int8_t*>(modrmp + 1);
+ AppendToBuffer("[%s%s0x%x]",
+ NameOfCPURegister(rm),
+ disp < 0 ? "-" : "+",
+ disp < 0 ? -disp : disp);
return (mod == 2) ? 5 : 2;
}
break;
@@ -934,6 +904,7 @@ int DisassemblerX64::RegisterFPUInstruction(int escape_opcode,
case 0xF5: mnem = "fprem1"; break;
case 0xF7: mnem = "fincstp"; break;
case 0xF8: mnem = "fprem"; break;
+ case 0xFC: mnem = "frndint"; break;
case 0xFD: mnem = "fscale"; break;
case 0xFE: mnem = "fsin"; break;
case 0xFF: mnem = "fcos"; break;
@@ -956,6 +927,8 @@ int DisassemblerX64::RegisterFPUInstruction(int escape_opcode,
has_register = true;
} else if (modrm_byte == 0xE2) {
mnem = "fclex";
+ } else if (modrm_byte == 0xE3) {
+ mnem = "fninit";
} else {
UnimplementedInstruction();
}
@@ -1093,6 +1066,11 @@ int DisassemblerX64::TwoByteOpcodeInstruction(byte* data) {
} else if (opcode == 0x50) {
AppendToBuffer("movmskpd %s,", NameOfCPURegister(regop));
current += PrintRightXMMOperand(current);
+ } else if (opcode == 0x73) {
+ current += 1;
+ ASSERT(regop == 6);
+ AppendToBuffer("psllq,%s,%d", NameOfXMMRegister(rm), *current & 0x7f);
+ current += 1;
} else {
const char* mnemonic = "?";
if (opcode == 0x54) {
@@ -1323,6 +1301,12 @@ int DisassemblerX64::TwoByteOpcodeInstruction(byte* data) {
} else {
AppendToBuffer(",%s,cl", NameOfCPURegister(regop));
}
+ } else if (opcode == 0xBD) {
+ AppendToBuffer("%s%c ", mnemonic, operand_size_code());
+ int mod, regop, rm;
+ get_modrm(*current, &mod, &regop, &rm);
+ AppendToBuffer("%s,", NameOfCPURegister(regop));
+ current += PrintRightOperand(current);
} else {
UnimplementedInstruction();
}
@@ -1365,6 +1349,8 @@ const char* DisassemblerX64::TwoByteMnemonic(byte opcode) {
return "movzxb";
case 0xB7:
return "movzxw";
+ case 0xBD:
+ return "bsr";
case 0xBE:
return "movsxb";
case 0xBF:
@@ -1448,7 +1434,8 @@ int DisassemblerX64::InstructionDecode(v8::internal::Vector<char> out_buffer,
data += 3;
break;
case OPERAND_DOUBLEWORD_SIZE:
- addr = reinterpret_cast<byte*>(*reinterpret_cast<int32_t*>(data + 1));
+ addr =
+ reinterpret_cast<byte*>(*reinterpret_cast<uint32_t*>(data + 1));
data += 5;
break;
case OPERAND_QUADWORD_SIZE:
@@ -1806,14 +1793,14 @@ int DisassemblerX64::InstructionDecode(v8::internal::Vector<char> out_buffer,
int outp = 0;
// Instruction bytes.
for (byte* bp = instr; bp < data; bp++) {
- outp += v8::internal::OS::SNPrintF(out_buffer + outp, "%02x", *bp);
+ outp += v8::internal::SNPrintF(out_buffer + outp, "%02x", *bp);
}
for (int i = 6 - instr_len; i >= 0; i--) {
- outp += v8::internal::OS::SNPrintF(out_buffer + outp, " ");
+ outp += v8::internal::SNPrintF(out_buffer + outp, " ");
}
- outp += v8::internal::OS::SNPrintF(out_buffer + outp, " %s",
- tmp_buffer_.start());
+ outp += v8::internal::SNPrintF(out_buffer + outp, " %s",
+ tmp_buffer_.start());
return instr_len;
}
@@ -1840,7 +1827,7 @@ static const char* xmm_regs[16] = {
const char* NameConverter::NameOfAddress(byte* addr) const {
- v8::internal::OS::SNPrintF(tmp_buffer_, "%p", addr);
+ v8::internal::SNPrintF(tmp_buffer_, "%p", addr);
return tmp_buffer_.start();
}
diff --git a/chromium/v8/src/x64/frames-x64.cc b/chromium/v8/src/x64/frames-x64.cc
index 5cc27a6e12b..5513308828e 100644
--- a/chromium/v8/src/x64/frames-x64.cc
+++ b/chromium/v8/src/x64/frames-x64.cc
@@ -1,38 +1,15 @@
// Copyright 2010 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
#if V8_TARGET_ARCH_X64
-#include "assembler.h"
-#include "assembler-x64.h"
-#include "assembler-x64-inl.h"
-#include "frames.h"
+#include "src/assembler.h"
+#include "src/x64/assembler-x64.h"
+#include "src/x64/assembler-x64-inl.h"
+#include "src/frames.h"
namespace v8 {
namespace internal {
@@ -40,10 +17,24 @@ namespace internal {
Register JavaScriptFrame::fp_register() { return rbp; }
Register JavaScriptFrame::context_register() { return rsi; }
+Register JavaScriptFrame::constant_pool_pointer_register() {
+ UNREACHABLE();
+ return no_reg;
+}
Register StubFailureTrampolineFrame::fp_register() { return rbp; }
Register StubFailureTrampolineFrame::context_register() { return rsi; }
+Register StubFailureTrampolineFrame::constant_pool_pointer_register() {
+ UNREACHABLE();
+ return no_reg;
+}
+
+
+Object*& ExitFrame::constant_pool_slot() const {
+ UNREACHABLE();
+ return Memory::Object_at(NULL);
+}
} } // namespace v8::internal
diff --git a/chromium/v8/src/x64/frames-x64.h b/chromium/v8/src/x64/frames-x64.h
index fb17964adae..88130302849 100644
--- a/chromium/v8/src/x64/frames-x64.h
+++ b/chromium/v8/src/x64/frames-x64.h
@@ -1,29 +1,6 @@
// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
#ifndef V8_X64_FRAMES_X64_H_
#define V8_X64_FRAMES_X64_H_
@@ -41,8 +18,6 @@ const RegList kJSCallerSaved =
const int kNumJSCallerSaved = 5;
-typedef Object* JSCallerSavedBuffer[kNumJSCallerSaved];
-
// Number of registers for which space is reserved in safepoints.
const int kNumSafepointRegisters = 16;
@@ -56,16 +31,19 @@ class EntryFrameConstants : public AllStatic {
static const int kXMMRegistersBlockSize =
kXMMRegisterSize * kCalleeSaveXMMRegisters;
static const int kCallerFPOffset =
- -10 * kPointerSize - kXMMRegistersBlockSize;
+ -3 * kPointerSize + -7 * kRegisterSize - kXMMRegistersBlockSize;
#else
- static const int kCallerFPOffset = -8 * kPointerSize;
+ // We have 3 Push and 5 pushq in the JSEntryStub::GenerateBody.
+ static const int kCallerFPOffset = -3 * kPointerSize + -5 * kRegisterSize;
#endif
- static const int kArgvOffset = 6 * kPointerSize;
+ static const int kArgvOffset = 6 * kPointerSize;
};
class ExitFrameConstants : public AllStatic {
public:
+ static const int kFrameSize = 2 * kPointerSize;
+
static const int kCodeOffset = -2 * kPointerSize;
static const int kSPOffset = -1 * kPointerSize;
@@ -75,6 +53,8 @@ class ExitFrameConstants : public AllStatic {
// FP-relative displacement of the caller's SP. It points just
// below the saved PC.
static const int kCallerSPDisplacement = kCallerPCOffset + kPCOnStackSize;
+
+ static const int kConstantPoolOffset = 0; // Not used
};
@@ -128,6 +108,10 @@ inline Object* JavaScriptFrame::function_slot_object() const {
inline void StackHandler::SetFp(Address slot, Address fp) {
+ if (kFPOnStackSize == 2 * kPointerSize) {
+ // Zero out the high-32 bit of FP for x32 port.
+ Memory::Address_at(slot + kPointerSize) = 0;
+ }
Memory::Address_at(slot) = fp;
}
diff --git a/chromium/v8/src/x64/full-codegen-x64.cc b/chromium/v8/src/x64/full-codegen-x64.cc
index e4793683ee7..fa1eee6e517 100644
--- a/chromium/v8/src/x64/full-codegen-x64.cc
+++ b/chromium/v8/src/x64/full-codegen-x64.cc
@@ -1,43 +1,20 @@
// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
#if V8_TARGET_ARCH_X64
-#include "code-stubs.h"
-#include "codegen.h"
-#include "compiler.h"
-#include "debug.h"
-#include "full-codegen.h"
-#include "isolate-inl.h"
-#include "parser.h"
-#include "scopes.h"
-#include "stub-cache.h"
+#include "src/code-stubs.h"
+#include "src/codegen.h"
+#include "src/compiler.h"
+#include "src/debug.h"
+#include "src/full-codegen.h"
+#include "src/isolate-inl.h"
+#include "src/parser.h"
+#include "src/scopes.h"
+#include "src/stub-cache.h"
namespace v8 {
namespace internal {
@@ -74,7 +51,7 @@ class JumpPatchSite BASE_EMBEDDED {
void EmitPatchInfo() {
if (patch_site_.is_bound()) {
int delta_to_patch_site = masm_->SizeOfCodeGeneratedSince(&patch_site_);
- ASSERT(is_int8(delta_to_patch_site));
+ ASSERT(is_uint8(delta_to_patch_site));
__ testl(rax, Immediate(delta_to_patch_site));
#ifdef DEBUG
info_emitted_ = true;
@@ -118,6 +95,7 @@ void FullCodeGenerator::Generate() {
CompilationInfo* info = info_;
handler_table_ =
isolate()->factory()->NewFixedArray(function()->handler_count(), TENURED);
+
profiling_counter_ = isolate()->factory()->NewCell(
Handle<Smi>(Smi::FromInt(FLAG_interrupt_budget), isolate()));
SetFunctionPosition(function());
@@ -132,17 +110,23 @@ void FullCodeGenerator::Generate() {
}
#endif
- // Strict mode functions and builtins need to replace the receiver
- // with undefined when called as functions (without an explicit
- // receiver object). rcx is zero for method calls and non-zero for
- // function calls.
- if (!info->is_classic_mode() || info->is_native()) {
+ // Sloppy mode functions and builtins need to replace the receiver with the
+ // global proxy when called as functions (without an explicit receiver
+ // object).
+ if (info->strict_mode() == SLOPPY && !info->is_native()) {
Label ok;
- __ testq(rcx, rcx);
- __ j(zero, &ok, Label::kNear);
+ // +1 for return address.
StackArgumentsAccessor args(rsp, info->scope()->num_parameters());
- __ LoadRoot(kScratchRegister, Heap::kUndefinedValueRootIndex);
- __ movq(args.GetReceiverOperand(), kScratchRegister);
+ __ movp(rcx, args.GetReceiverOperand());
+
+ __ CompareRoot(rcx, Heap::kUndefinedValueRootIndex);
+ __ j(not_equal, &ok, Label::kNear);
+
+ __ movp(rcx, GlobalObjectOperand());
+ __ movp(rcx, FieldOperand(rcx, GlobalObject::kGlobalReceiverOffset));
+
+ __ movp(args.GetReceiverOperand(), rcx);
+
__ bind(&ok);
}
@@ -152,7 +136,7 @@ void FullCodeGenerator::Generate() {
FrameScope frame_scope(masm_, StackFrame::MANUAL);
info->set_prologue_offset(masm_->pc_offset());
- __ Prologue(BUILD_FUNCTION_FRAME);
+ __ Prologue(info->IsCodePreAgingActive());
info->AddNoFrameRange(0, masm_->pc_offset());
{ Comment cmnt(masm_, "[ Allocate locals");
@@ -162,9 +146,34 @@ void FullCodeGenerator::Generate() {
if (locals_count == 1) {
__ PushRoot(Heap::kUndefinedValueRootIndex);
} else if (locals_count > 1) {
+ if (locals_count >= 128) {
+ Label ok;
+ __ movp(rcx, rsp);
+ __ subp(rcx, Immediate(locals_count * kPointerSize));
+ __ CompareRoot(rcx, Heap::kRealStackLimitRootIndex);
+ __ j(above_equal, &ok, Label::kNear);
+ __ InvokeBuiltin(Builtins::STACK_OVERFLOW, CALL_FUNCTION);
+ __ bind(&ok);
+ }
__ LoadRoot(rdx, Heap::kUndefinedValueRootIndex);
- for (int i = 0; i < locals_count; i++) {
- __ push(rdx);
+ const int kMaxPushes = 32;
+ if (locals_count >= kMaxPushes) {
+ int loop_iterations = locals_count / kMaxPushes;
+ __ movp(rcx, Immediate(loop_iterations));
+ Label loop_header;
+ __ bind(&loop_header);
+ // Do pushes.
+ for (int i = 0; i < kMaxPushes; i++) {
+ __ Push(rdx);
+ }
+ // Continue loop if not done.
+ __ decp(rcx);
+ __ j(not_zero, &loop_header, Label::kNear);
+ }
+ int remaining = locals_count % kMaxPushes;
+ // Emit the remaining pushes.
+ for (int i = 0; i < remaining; i++) {
+ __ Push(rdx);
}
}
}
@@ -175,21 +184,26 @@ void FullCodeGenerator::Generate() {
int heap_slots = info->scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
if (heap_slots > 0) {
Comment cmnt(masm_, "[ Allocate context");
+ bool need_write_barrier = true;
// Argument to NewContext is the function, which is still in rdi.
- __ push(rdi);
if (FLAG_harmony_scoping && info->scope()->is_global_scope()) {
+ __ Push(rdi);
__ Push(info->scope()->GetScopeInfo());
- __ CallRuntime(Runtime::kNewGlobalContext, 2);
+ __ CallRuntime(Runtime::kHiddenNewGlobalContext, 2);
} else if (heap_slots <= FastNewContextStub::kMaximumSlots) {
- FastNewContextStub stub(heap_slots);
+ FastNewContextStub stub(isolate(), heap_slots);
__ CallStub(&stub);
+ // Result of FastNewContextStub is always in new space.
+ need_write_barrier = false;
} else {
- __ CallRuntime(Runtime::kNewFunctionContext, 1);
+ __ Push(rdi);
+ __ CallRuntime(Runtime::kHiddenNewFunctionContext, 1);
}
function_in_register = false;
- // Context is returned in both rax and rsi. It replaces the context
- // passed to us. It's saved in the stack and kept live in rsi.
- __ movq(Operand(rbp, StandardFrameConstants::kContextOffset), rsi);
+ // Context is returned in rax. It replaces the context passed to us.
+ // It's saved in the stack and kept live in rsi.
+ __ movp(rsi, rax);
+ __ movp(Operand(rbp, StandardFrameConstants::kContextOffset), rax);
// Copy any necessary parameters into the context.
int num_parameters = info->scope()->num_parameters();
@@ -199,13 +213,20 @@ void FullCodeGenerator::Generate() {
int parameter_offset = StandardFrameConstants::kCallerSPOffset +
(num_parameters - 1 - i) * kPointerSize;
// Load parameter from stack.
- __ movq(rax, Operand(rbp, parameter_offset));
+ __ movp(rax, Operand(rbp, parameter_offset));
// Store it in the context.
int context_offset = Context::SlotOffset(var->index());
- __ movq(Operand(rsi, context_offset), rax);
+ __ movp(Operand(rsi, context_offset), rax);
// Update the write barrier. This clobbers rax and rbx.
- __ RecordWriteContextSlot(
- rsi, context_offset, rax, rbx, kDontSaveFPRegs);
+ if (need_write_barrier) {
+ __ RecordWriteContextSlot(
+ rsi, context_offset, rax, rbx, kDontSaveFPRegs);
+ } else if (FLAG_debug_code) {
+ Label done;
+ __ JumpIfInNewSpace(rsi, rax, &done, Label::kNear);
+ __ Abort(kExpectedNewSpaceObject);
+ __ bind(&done);
+ }
}
}
}
@@ -217,30 +238,30 @@ void FullCodeGenerator::Generate() {
// case the "arguments" or ".arguments" variables are in the context.
Comment cmnt(masm_, "[ Allocate arguments object");
if (function_in_register) {
- __ push(rdi);
+ __ Push(rdi);
} else {
- __ push(Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
+ __ Push(Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
}
// The receiver is just before the parameters on the caller's stack.
int num_parameters = info->scope()->num_parameters();
int offset = num_parameters * kPointerSize;
- __ lea(rdx,
+ __ leap(rdx,
Operand(rbp, StandardFrameConstants::kCallerSPOffset + offset));
- __ push(rdx);
+ __ Push(rdx);
__ Push(Smi::FromInt(num_parameters));
// Arguments to ArgumentsAccessStub:
// function, receiver address, parameter count.
// The stub will rewrite receiver and parameter count if the previous
// stack frame was an arguments adapter frame.
ArgumentsAccessStub::Type type;
- if (!is_classic_mode()) {
+ if (strict_mode() == STRICT) {
type = ArgumentsAccessStub::NEW_STRICT;
} else if (function()->has_duplicate_parameters()) {
- type = ArgumentsAccessStub::NEW_NON_STRICT_SLOW;
+ type = ArgumentsAccessStub::NEW_SLOPPY_SLOW;
} else {
- type = ArgumentsAccessStub::NEW_NON_STRICT_FAST;
+ type = ArgumentsAccessStub::NEW_SLOPPY_FAST;
}
- ArgumentsAccessStub stub(type);
+ ArgumentsAccessStub stub(isolate(), type);
__ CallStub(&stub);
SetVar(arguments, rax, rbx, rdx);
@@ -264,7 +285,7 @@ void FullCodeGenerator::Generate() {
if (scope()->is_function_scope() && scope()->function() != NULL) {
VariableDeclaration* function = scope()->function();
ASSERT(function->proxy()->var()->mode() == CONST ||
- function->proxy()->var()->mode() == CONST_HARMONY);
+ function->proxy()->var()->mode() == CONST_LEGACY);
ASSERT(function->proxy()->var()->location() != Variable::UNALLOCATED);
VisitVariableDeclaration(function);
}
@@ -273,11 +294,11 @@ void FullCodeGenerator::Generate() {
{ Comment cmnt(masm_, "[ Stack check");
PrepareForBailoutForId(BailoutId::Declarations(), NO_REGISTERS);
- Label ok;
- __ CompareRoot(rsp, Heap::kStackLimitRootIndex);
- __ j(above_equal, &ok, Label::kNear);
- __ call(isolate()->builtins()->StackCheck(), RelocInfo::CODE_TARGET);
- __ bind(&ok);
+ Label ok;
+ __ CompareRoot(rsp, Heap::kStackLimitRootIndex);
+ __ j(above_equal, &ok, Label::kNear);
+ __ call(isolate()->builtins()->StackCheck(), RelocInfo::CODE_TARGET);
+ __ bind(&ok);
}
{ Comment cmnt(masm_, "[ Body");
@@ -302,7 +323,7 @@ void FullCodeGenerator::ClearAccumulator() {
void FullCodeGenerator::EmitProfilingCounterDecrement(int delta) {
- __ movq(rbx, profiling_counter_, RelocInfo::EMBEDDED_OBJECT);
+ __ Move(rbx, profiling_counter_, RelocInfo::EMBEDDED_OBJECT);
__ SmiAddConstant(FieldOperand(rbx, Cell::kValueOffset),
Smi::FromInt(-delta));
}
@@ -310,40 +331,41 @@ void FullCodeGenerator::EmitProfilingCounterDecrement(int delta) {
void FullCodeGenerator::EmitProfilingCounterReset() {
int reset_value = FLAG_interrupt_budget;
- if (info_->ShouldSelfOptimize() && !FLAG_retry_self_opt) {
- // Self-optimization is a one-off thing; if it fails, don't try again.
- reset_value = Smi::kMaxValue;
- }
- __ movq(rbx, profiling_counter_, RelocInfo::EMBEDDED_OBJECT);
+ __ Move(rbx, profiling_counter_, RelocInfo::EMBEDDED_OBJECT);
__ Move(kScratchRegister, Smi::FromInt(reset_value));
- __ movq(FieldOperand(rbx, Cell::kValueOffset), kScratchRegister);
+ __ movp(FieldOperand(rbx, Cell::kValueOffset), kScratchRegister);
}
+static const byte kJnsOffset = kPointerSize == kInt64Size ? 0x1d : 0x14;
+
+
void FullCodeGenerator::EmitBackEdgeBookkeeping(IterationStatement* stmt,
Label* back_edge_target) {
Comment cmnt(masm_, "[ Back edge bookkeeping");
Label ok;
- int weight = 1;
- if (FLAG_weighted_back_edges) {
- ASSERT(back_edge_target->is_bound());
- int distance = masm_->SizeOfCodeGeneratedSince(back_edge_target);
- weight = Min(kMaxBackEdgeWeight,
- Max(1, distance / kCodeSizeMultiplier));
- }
+ ASSERT(back_edge_target->is_bound());
+ int distance = masm_->SizeOfCodeGeneratedSince(back_edge_target);
+ int weight = Min(kMaxBackEdgeWeight,
+ Max(1, distance / kCodeSizeMultiplier));
EmitProfilingCounterDecrement(weight);
- __ j(positive, &ok, Label::kNear);
- __ call(isolate()->builtins()->InterruptCheck(), RelocInfo::CODE_TARGET);
- // Record a mapping of this PC offset to the OSR id. This is used to find
- // the AST id from the unoptimized code in order to use it as a key into
- // the deoptimization input data found in the optimized code.
- RecordBackEdge(stmt->OsrEntryId());
+ __ j(positive, &ok, Label::kNear);
+ {
+ PredictableCodeSizeScope predictible_code_size_scope(masm_, kJnsOffset);
+ DontEmitDebugCodeScope dont_emit_debug_code_scope(masm_);
+ __ call(isolate()->builtins()->InterruptCheck(), RelocInfo::CODE_TARGET);
- EmitProfilingCounterReset();
+ // Record a mapping of this PC offset to the OSR id. This is used to find
+ // the AST id from the unoptimized code in order to use it as a key into
+ // the deoptimization input data found in the optimized code.
+ RecordBackEdge(stmt->OsrEntryId());
+ EmitProfilingCounterReset();
+ }
__ bind(&ok);
+
PrepareForBailoutForId(stmt->EntryId(), NO_REGISTERS);
// Record a mapping of the OSR id to this PC. This is used if the OSR
// entry becomes the target of a bailout. We don't expect it to be, but
@@ -359,34 +381,27 @@ void FullCodeGenerator::EmitReturnSequence() {
} else {
__ bind(&return_label_);
if (FLAG_trace) {
- __ push(rax);
+ __ Push(rax);
__ CallRuntime(Runtime::kTraceExit, 1);
}
- if (FLAG_interrupt_at_exit || FLAG_self_optimization) {
- // Pretend that the exit is a backwards jump to the entry.
- int weight = 1;
- if (info_->ShouldSelfOptimize()) {
- weight = FLAG_interrupt_budget / FLAG_self_opt_count;
- } else if (FLAG_weighted_back_edges) {
- int distance = masm_->pc_offset();
- weight = Min(kMaxBackEdgeWeight,
- Max(1, distance / kCodeSizeMultiplier));
- }
- EmitProfilingCounterDecrement(weight);
- Label ok;
- __ j(positive, &ok, Label::kNear);
- __ push(rax);
- if (info_->ShouldSelfOptimize() && FLAG_direct_self_opt) {
- __ push(Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
- __ CallRuntime(Runtime::kOptimizeFunctionOnNextCall, 1);
- } else {
- __ call(isolate()->builtins()->InterruptCheck(),
- RelocInfo::CODE_TARGET);
- }
- __ pop(rax);
- EmitProfilingCounterReset();
- __ bind(&ok);
+ // Pretend that the exit is a backwards jump to the entry.
+ int weight = 1;
+ if (info_->ShouldSelfOptimize()) {
+ weight = FLAG_interrupt_budget / FLAG_self_opt_count;
+ } else {
+ int distance = masm_->pc_offset();
+ weight = Min(kMaxBackEdgeWeight,
+ Max(1, distance / kCodeSizeMultiplier));
}
+ EmitProfilingCounterDecrement(weight);
+ Label ok;
+ __ j(positive, &ok, Label::kNear);
+ __ Push(rax);
+ __ call(isolate()->builtins()->InterruptCheck(),
+ RelocInfo::CODE_TARGET);
+ __ Pop(rax);
+ EmitProfilingCounterReset();
+ __ bind(&ok);
#ifdef DEBUG
// Add a label for checking the size of the code used for returning.
Label check_exit_codesize;
@@ -396,18 +411,18 @@ void FullCodeGenerator::EmitReturnSequence() {
__ RecordJSReturn();
// Do not use the leave instruction here because it is too short to
// patch with the code required by the debugger.
- __ movq(rsp, rbp);
- __ pop(rbp);
+ __ movp(rsp, rbp);
+ __ popq(rbp);
int no_frame_start = masm_->pc_offset();
int arguments_bytes = (info_->scope()->num_parameters() + 1) * kPointerSize;
__ Ret(arguments_bytes, rcx);
-#ifdef ENABLE_DEBUGGER_SUPPORT
// Add padding that will be overwritten by a debugger breakpoint. We
- // have just generated at least 7 bytes: "movq rsp, rbp; pop rbp; ret k"
- // (3 + 1 + 3).
- const int kPadding = Assembler::kJSReturnSequenceLength - 7;
+ // have just generated at least 7 bytes: "movp rsp, rbp; pop rbp; ret k"
+ // (3 + 1 + 3) for x64 and at least 6 (2 + 1 + 3) bytes for x32.
+ const int kPadding = Assembler::kJSReturnSequenceLength -
+ kPointerSize == kInt64Size ? 7 : 6;
for (int i = 0; i < kPadding; ++i) {
masm_->int3();
}
@@ -415,7 +430,7 @@ void FullCodeGenerator::EmitReturnSequence() {
// for the debugger's requirements.
ASSERT(Assembler::kJSReturnSequenceLength <=
masm_->SizeOfCodeGeneratedSince(&check_exit_codesize));
-#endif
+
info_->AddNoFrameRange(no_frame_start, masm_->pc_offset());
}
}
@@ -435,7 +450,7 @@ void FullCodeGenerator::AccumulatorValueContext::Plug(Variable* var) const {
void FullCodeGenerator::StackValueContext::Plug(Variable* var) const {
ASSERT(var->IsStackAllocated() || var->IsContextSlot());
MemOperand operand = codegen()->VarOperand(var, result_register());
- __ push(operand);
+ __ Push(operand);
}
@@ -553,7 +568,7 @@ void FullCodeGenerator::StackValueContext::DropAndPlug(int count,
Register reg) const {
ASSERT(count > 0);
if (count > 1) __ Drop(count - 1);
- __ movq(Operand(rsp, 0), reg);
+ __ movp(Operand(rsp, 0), reg);
}
@@ -644,8 +659,8 @@ void FullCodeGenerator::DoTest(Expression* condition,
Label* if_false,
Label* fall_through) {
Handle<Code> ic = ToBooleanStub::GetUninitialized(isolate());
- CallIC(ic, RelocInfo::CODE_TARGET, condition->test_id());
- __ testq(result_register(), result_register());
+ CallIC(ic, condition->test_id());
+ __ testp(result_register(), result_register());
// The stub returns nonzero for true.
Split(not_zero, if_true, if_false, fall_through);
}
@@ -696,7 +711,7 @@ MemOperand FullCodeGenerator::VarOperand(Variable* var, Register scratch) {
void FullCodeGenerator::GetVar(Register dest, Variable* var) {
ASSERT(var->IsContextSlot() || var->IsStackAllocated());
MemOperand location = VarOperand(var, dest);
- __ movq(dest, location);
+ __ movp(dest, location);
}
@@ -709,7 +724,7 @@ void FullCodeGenerator::SetVar(Variable* var,
ASSERT(!scratch0.is(scratch1));
ASSERT(!scratch1.is(src));
MemOperand location = VarOperand(var, scratch0);
- __ movq(location, src);
+ __ movp(location, src);
// Emit the write barrier code if the location is in the heap.
if (var->IsContextSlot()) {
@@ -744,7 +759,7 @@ void FullCodeGenerator::EmitDebugCheckDeclarationContext(Variable* variable) {
ASSERT_EQ(0, scope()->ContextChainLength(variable->scope()));
if (generate_debug_code_) {
// Check that we're not inside a with or catch context.
- __ movq(rbx, FieldOperand(rsi, HeapObject::kMapOffset));
+ __ movp(rbx, FieldOperand(rsi, HeapObject::kMapOffset));
__ CompareRoot(rbx, Heap::kWithContextMapRootIndex);
__ Check(not_equal, kDeclarationInWithContext);
__ CompareRoot(rbx, Heap::kCatchContextMapRootIndex);
@@ -761,7 +776,7 @@ void FullCodeGenerator::VisitVariableDeclaration(
VariableProxy* proxy = declaration->proxy();
VariableMode mode = declaration->mode();
Variable* variable = proxy->var();
- bool hole_init = mode == CONST || mode == CONST_HARMONY || mode == LET;
+ bool hole_init = mode == LET || mode == CONST || mode == CONST_LEGACY;
switch (variable->location()) {
case Variable::UNALLOCATED:
globals_->Add(variable->name(), zone());
@@ -776,7 +791,7 @@ void FullCodeGenerator::VisitVariableDeclaration(
if (hole_init) {
Comment cmnt(masm_, "[ VariableDeclaration");
__ LoadRoot(kScratchRegister, Heap::kTheHoleValueRootIndex);
- __ movq(StackOperand(variable), kScratchRegister);
+ __ movp(StackOperand(variable), kScratchRegister);
}
break;
@@ -785,7 +800,7 @@ void FullCodeGenerator::VisitVariableDeclaration(
Comment cmnt(masm_, "[ VariableDeclaration");
EmitDebugCheckDeclarationContext(variable);
__ LoadRoot(kScratchRegister, Heap::kTheHoleValueRootIndex);
- __ movq(ContextOperand(rsi, variable->index()), kScratchRegister);
+ __ movp(ContextOperand(rsi, variable->index()), kScratchRegister);
// No write barrier since the hole value is in old space.
PrepareForBailoutForId(proxy->id(), NO_REGISTERS);
}
@@ -793,7 +808,7 @@ void FullCodeGenerator::VisitVariableDeclaration(
case Variable::LOOKUP: {
Comment cmnt(masm_, "[ VariableDeclaration");
- __ push(rsi);
+ __ Push(rsi);
__ Push(variable->name());
// Declaration nodes are always introduced in one of four modes.
ASSERT(IsDeclaredVariableMode(mode));
@@ -809,7 +824,7 @@ void FullCodeGenerator::VisitVariableDeclaration(
} else {
__ Push(Smi::FromInt(0)); // Indicates no initial value.
}
- __ CallRuntime(Runtime::kDeclareContextSlot, 4);
+ __ CallRuntime(Runtime::kHiddenDeclareContextSlot, 4);
break;
}
}
@@ -835,7 +850,7 @@ void FullCodeGenerator::VisitFunctionDeclaration(
case Variable::LOCAL: {
Comment cmnt(masm_, "[ FunctionDeclaration");
VisitForAccumulatorValue(declaration->fun());
- __ movq(StackOperand(variable), result_register());
+ __ movp(StackOperand(variable), result_register());
break;
}
@@ -843,7 +858,7 @@ void FullCodeGenerator::VisitFunctionDeclaration(
Comment cmnt(masm_, "[ FunctionDeclaration");
EmitDebugCheckDeclarationContext(variable);
VisitForAccumulatorValue(declaration->fun());
- __ movq(ContextOperand(rsi, variable->index()), result_register());
+ __ movp(ContextOperand(rsi, variable->index()), result_register());
int offset = Context::SlotOffset(variable->index());
// We know that we have written a function, which is not a smi.
__ RecordWriteContextSlot(rsi,
@@ -859,11 +874,11 @@ void FullCodeGenerator::VisitFunctionDeclaration(
case Variable::LOOKUP: {
Comment cmnt(masm_, "[ FunctionDeclaration");
- __ push(rsi);
+ __ Push(rsi);
__ Push(variable->name());
__ Push(Smi::FromInt(NONE));
VisitForStackValue(declaration->fun());
- __ CallRuntime(Runtime::kDeclareContextSlot, 4);
+ __ CallRuntime(Runtime::kHiddenDeclareContextSlot, 4);
break;
}
}
@@ -880,11 +895,11 @@ void FullCodeGenerator::VisitModuleDeclaration(ModuleDeclaration* declaration) {
// Load instance object.
__ LoadContext(rax, scope_->ContextChainLength(scope_->GlobalScope()));
- __ movq(rax, ContextOperand(rax, variable->interface()->Index()));
- __ movq(rax, ContextOperand(rax, Context::EXTENSION_INDEX));
+ __ movp(rax, ContextOperand(rax, variable->interface()->Index()));
+ __ movp(rax, ContextOperand(rax, Context::EXTENSION_INDEX));
// Assign it.
- __ movq(ContextOperand(rsi, variable->index()), rax);
+ __ movp(ContextOperand(rsi, variable->index()), rax);
// We know that we have written a module, which is not a smi.
__ RecordWriteContextSlot(rsi,
Context::SlotOffset(variable->index()),
@@ -930,10 +945,10 @@ void FullCodeGenerator::VisitExportDeclaration(ExportDeclaration* declaration) {
void FullCodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) {
// Call the runtime to declare the globals.
- __ push(rsi); // The context is the first argument.
+ __ Push(rsi); // The context is the first argument.
__ Push(pairs);
__ Push(Smi::FromInt(DeclareGlobalsFlags()));
- __ CallRuntime(Runtime::kDeclareGlobals, 3);
+ __ CallRuntime(Runtime::kHiddenDeclareGlobals, 3);
// Return value is ignored.
}
@@ -941,7 +956,7 @@ void FullCodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) {
void FullCodeGenerator::DeclareModules(Handle<FixedArray> descriptions) {
// Call the runtime to declare the modules.
__ Push(descriptions);
- __ CallRuntime(Runtime::kDeclareModules, 1);
+ __ CallRuntime(Runtime::kHiddenDeclareModules, 1);
// Return value is ignored.
}
@@ -978,16 +993,16 @@ void FullCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) {
VisitForAccumulatorValue(clause->label());
// Perform the comparison as if via '==='.
- __ movq(rdx, Operand(rsp, 0)); // Switch value.
+ __ movp(rdx, Operand(rsp, 0)); // Switch value.
bool inline_smi_code = ShouldInlineSmiCase(Token::EQ_STRICT);
JumpPatchSite patch_site(masm_);
if (inline_smi_code) {
Label slow_case;
- __ movq(rcx, rdx);
- __ or_(rcx, rax);
+ __ movp(rcx, rdx);
+ __ orp(rcx, rax);
patch_site.EmitJumpIfNotSmi(rcx, &slow_case, Label::kNear);
- __ cmpq(rdx, rax);
+ __ cmpp(rdx, rax);
__ j(not_equal, &next_test);
__ Drop(1); // Switch value is no longer needed.
__ jmp(clause->body_target());
@@ -997,10 +1012,19 @@ void FullCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) {
// Record position before stub call for type feedback.
SetSourcePosition(clause->position());
Handle<Code> ic = CompareIC::GetUninitialized(isolate(), Token::EQ_STRICT);
- CallIC(ic, RelocInfo::CODE_TARGET, clause->CompareId());
+ CallIC(ic, clause->CompareId());
patch_site.EmitPatchInfo();
- __ testq(rax, rax);
+ Label skip;
+ __ jmp(&skip, Label::kNear);
+ PrepareForBailout(clause, TOS_REG);
+ __ CompareRoot(rax, Heap::kTrueValueRootIndex);
+ __ j(not_equal, &next_test);
+ __ Drop(1);
+ __ jmp(clause->body_target());
+ __ bind(&skip);
+
+ __ testp(rax, rax);
__ j(not_equal, &next_test);
__ Drop(1); // Switch value is no longer needed.
__ jmp(clause->body_target());
@@ -1032,6 +1056,7 @@ void FullCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) {
void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
Comment cmnt(masm_, "[ ForInStatement");
+ int slot = stmt->ForInFeedbackSlot();
SetStatementPosition(stmt);
Label loop, exit;
@@ -1045,7 +1070,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
__ j(equal, &exit);
Register null_value = rdi;
__ LoadRoot(null_value, Heap::kNullValueRootIndex);
- __ cmpq(rax, null_value);
+ __ cmpp(rax, null_value);
__ j(equal, &exit);
PrepareForBailoutForId(stmt->PrepareId(), TOS_REG);
@@ -1056,10 +1081,10 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
__ CmpObjectType(rax, FIRST_SPEC_OBJECT_TYPE, rcx);
__ j(above_equal, &done_convert);
__ bind(&convert);
- __ push(rax);
+ __ Push(rax);
__ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
__ bind(&done_convert);
- __ push(rax);
+ __ Push(rax);
// Check for proxies.
Label call_runtime;
@@ -1076,12 +1101,12 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
// The enum cache is valid. Load the map of the object being
// iterated over and use the cache for the iteration.
Label use_cache;
- __ movq(rax, FieldOperand(rax, HeapObject::kMapOffset));
+ __ movp(rax, FieldOperand(rax, HeapObject::kMapOffset));
__ jmp(&use_cache, Label::kNear);
// Get the set of properties to enumerate.
__ bind(&call_runtime);
- __ push(rax); // Duplicate the enumerable object on the stack.
+ __ Push(rax); // Duplicate the enumerable object on the stack.
__ CallRuntime(Runtime::kGetPropertyNamesFast, 1);
// If we got a map from the runtime call, we can do a fast
@@ -1102,69 +1127,65 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
__ j(equal, &no_descriptors);
__ LoadInstanceDescriptors(rax, rcx);
- __ movq(rcx, FieldOperand(rcx, DescriptorArray::kEnumCacheOffset));
- __ movq(rcx, FieldOperand(rcx, DescriptorArray::kEnumCacheBridgeCacheOffset));
+ __ movp(rcx, FieldOperand(rcx, DescriptorArray::kEnumCacheOffset));
+ __ movp(rcx, FieldOperand(rcx, DescriptorArray::kEnumCacheBridgeCacheOffset));
// Set up the four remaining stack slots.
- __ push(rax); // Map.
- __ push(rcx); // Enumeration cache.
- __ push(rdx); // Number of valid entries for the map in the enum cache.
+ __ Push(rax); // Map.
+ __ Push(rcx); // Enumeration cache.
+ __ Push(rdx); // Number of valid entries for the map in the enum cache.
__ Push(Smi::FromInt(0)); // Initial index.
__ jmp(&loop);
__ bind(&no_descriptors);
- __ addq(rsp, Immediate(kPointerSize));
+ __ addp(rsp, Immediate(kPointerSize));
__ jmp(&exit);
// We got a fixed array in register rax. Iterate through that.
Label non_proxy;
__ bind(&fixed_array);
- Handle<Cell> cell = isolate()->factory()->NewCell(
- Handle<Object>(Smi::FromInt(TypeFeedbackCells::kForInFastCaseMarker),
- isolate()));
- RecordTypeFeedbackCell(stmt->ForInFeedbackId(), cell);
- __ Move(rbx, cell);
- __ Move(FieldOperand(rbx, Cell::kValueOffset),
- Smi::FromInt(TypeFeedbackCells::kForInSlowCaseMarker));
-
+ // No need for a write barrier, we are storing a Smi in the feedback vector.
+ __ Move(rbx, FeedbackVector());
+ __ Move(FieldOperand(rbx, FixedArray::OffsetOfElementAt(slot)),
+ TypeFeedbackInfo::MegamorphicSentinel(isolate()));
__ Move(rbx, Smi::FromInt(1)); // Smi indicates slow check
- __ movq(rcx, Operand(rsp, 0 * kPointerSize)); // Get enumerated object
+ __ movp(rcx, Operand(rsp, 0 * kPointerSize)); // Get enumerated object
STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE);
__ CmpObjectType(rcx, LAST_JS_PROXY_TYPE, rcx);
__ j(above, &non_proxy);
__ Move(rbx, Smi::FromInt(0)); // Zero indicates proxy
__ bind(&non_proxy);
- __ push(rbx); // Smi
- __ push(rax); // Array
- __ movq(rax, FieldOperand(rax, FixedArray::kLengthOffset));
- __ push(rax); // Fixed array length (as smi).
+ __ Push(rbx); // Smi
+ __ Push(rax); // Array
+ __ movp(rax, FieldOperand(rax, FixedArray::kLengthOffset));
+ __ Push(rax); // Fixed array length (as smi).
__ Push(Smi::FromInt(0)); // Initial index.
// Generate code for doing the condition check.
PrepareForBailoutForId(stmt->BodyId(), NO_REGISTERS);
__ bind(&loop);
- __ movq(rax, Operand(rsp, 0 * kPointerSize)); // Get the current index.
- __ cmpq(rax, Operand(rsp, 1 * kPointerSize)); // Compare to the array length.
+ __ movp(rax, Operand(rsp, 0 * kPointerSize)); // Get the current index.
+ __ cmpp(rax, Operand(rsp, 1 * kPointerSize)); // Compare to the array length.
__ j(above_equal, loop_statement.break_label());
// Get the current entry of the array into register rbx.
- __ movq(rbx, Operand(rsp, 2 * kPointerSize));
+ __ movp(rbx, Operand(rsp, 2 * kPointerSize));
SmiIndex index = masm()->SmiToIndex(rax, rax, kPointerSizeLog2);
- __ movq(rbx, FieldOperand(rbx,
+ __ movp(rbx, FieldOperand(rbx,
index.reg,
index.scale,
FixedArray::kHeaderSize));
// Get the expected map from the stack or a smi in the
// permanent slow case into register rdx.
- __ movq(rdx, Operand(rsp, 3 * kPointerSize));
+ __ movp(rdx, Operand(rsp, 3 * kPointerSize));
// Check if the expected map still matches that of the enumerable.
// If not, we may have to filter the key.
Label update_each;
- __ movq(rcx, Operand(rsp, 4 * kPointerSize));
- __ cmpq(rdx, FieldOperand(rcx, HeapObject::kMapOffset));
+ __ movp(rcx, Operand(rsp, 4 * kPointerSize));
+ __ cmpp(rdx, FieldOperand(rcx, HeapObject::kMapOffset));
__ j(equal, &update_each, Label::kNear);
// For proxies, no filtering is done.
@@ -1175,17 +1196,17 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
// Convert the entry to a string or null if it isn't a property
// anymore. If the property has been removed while iterating, we
// just skip it.
- __ push(rcx); // Enumerable.
- __ push(rbx); // Current entry.
+ __ Push(rcx); // Enumerable.
+ __ Push(rbx); // Current entry.
__ InvokeBuiltin(Builtins::FILTER_KEY, CALL_FUNCTION);
__ Cmp(rax, Smi::FromInt(0));
__ j(equal, loop_statement.continue_label());
- __ movq(rbx, rax);
+ __ movp(rbx, rax);
// Update the 'each' property or variable from the possibly filtered
// entry in register rbx.
__ bind(&update_each);
- __ movq(result_register(), rbx);
+ __ movp(result_register(), rbx);
// Perform the assignment as if via '='.
{ EffectContext context(this);
EmitAssignment(stmt->each());
@@ -1204,7 +1225,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
// Remove the pointers stored on the stack.
__ bind(loop_statement.break_label());
- __ addq(rsp, Immediate(5 * kPointerSize));
+ __ addp(rsp, Immediate(5 * kPointerSize));
// Exit and decrement the loop depth.
PrepareForBailoutForId(stmt->ExitId(), NO_REGISTERS);
@@ -1220,24 +1241,17 @@ void FullCodeGenerator::VisitForOfStatement(ForOfStatement* stmt) {
Iteration loop_statement(this, stmt);
increment_loop_depth();
- // var iterator = iterable[@@iterator]()
- VisitForAccumulatorValue(stmt->assign_iterator());
+ // var iterable = subject
+ VisitForAccumulatorValue(stmt->assign_iterable());
- // As with for-in, skip the loop if the iterator is null or undefined.
+ // As with for-in, skip the loop if the iterable is null or undefined.
__ CompareRoot(rax, Heap::kUndefinedValueRootIndex);
__ j(equal, loop_statement.break_label());
__ CompareRoot(rax, Heap::kNullValueRootIndex);
__ j(equal, loop_statement.break_label());
- // Convert the iterator to a JS object.
- Label convert, done_convert;
- __ JumpIfSmi(rax, &convert);
- __ CmpObjectType(rax, FIRST_SPEC_OBJECT_TYPE, rcx);
- __ j(above_equal, &done_convert);
- __ bind(&convert);
- __ push(rax);
- __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
- __ bind(&done_convert);
+ // var iterator = iterable[Symbol.iterator]();
+ VisitForEffect(stmt->assign_iterator());
// Loop entry.
__ bind(loop_statement.continue_label());
@@ -1284,16 +1298,18 @@ void FullCodeGenerator::EmitNewClosure(Handle<SharedFunctionInfo> info,
!pretenure &&
scope()->is_function_scope() &&
info->num_literals() == 0) {
- FastNewClosureStub stub(info->language_mode(), info->is_generator());
+ FastNewClosureStub stub(isolate(),
+ info->strict_mode(),
+ info->is_generator());
__ Move(rbx, info);
__ CallStub(&stub);
} else {
- __ push(rsi);
+ __ Push(rsi);
__ Push(info);
__ Push(pretenure
? isolate()->factory()->true_value()
: isolate()->factory()->false_value());
- __ CallRuntime(Runtime::kNewClosure, 3);
+ __ CallRuntime(Runtime::kHiddenNewClosure, 3);
}
context()->Plug(rax);
}
@@ -1314,21 +1330,21 @@ void FullCodeGenerator::EmitLoadGlobalCheckExtensions(Variable* var,
Scope* s = scope();
while (s != NULL) {
if (s->num_heap_slots() > 0) {
- if (s->calls_non_strict_eval()) {
+ if (s->calls_sloppy_eval()) {
// Check that extension is NULL.
- __ cmpq(ContextOperand(context, Context::EXTENSION_INDEX),
+ __ cmpp(ContextOperand(context, Context::EXTENSION_INDEX),
Immediate(0));
__ j(not_equal, slow);
}
// Load next context in chain.
- __ movq(temp, ContextOperand(context, Context::PREVIOUS_INDEX));
+ __ movp(temp, ContextOperand(context, Context::PREVIOUS_INDEX));
// Walk the rest of the chain without clobbering rsi.
context = temp;
}
// If no outer scope calls eval, we do not need to check more
// context extensions. If we have reached an eval scope, we check
// all extensions from this point.
- if (!s->outer_scope_calls_non_strict_eval() || s->is_eval_scope()) break;
+ if (!s->outer_scope_calls_sloppy_eval() || s->is_eval_scope()) break;
s = s->outer_scope();
}
@@ -1337,32 +1353,31 @@ void FullCodeGenerator::EmitLoadGlobalCheckExtensions(Variable* var,
// safe to use raw labels here.
Label next, fast;
if (!context.is(temp)) {
- __ movq(temp, context);
+ __ movp(temp, context);
}
// Load map for comparison into register, outside loop.
__ LoadRoot(kScratchRegister, Heap::kNativeContextMapRootIndex);
__ bind(&next);
// Terminate at native context.
- __ cmpq(kScratchRegister, FieldOperand(temp, HeapObject::kMapOffset));
+ __ cmpp(kScratchRegister, FieldOperand(temp, HeapObject::kMapOffset));
__ j(equal, &fast, Label::kNear);
// Check that extension is NULL.
- __ cmpq(ContextOperand(temp, Context::EXTENSION_INDEX), Immediate(0));
+ __ cmpp(ContextOperand(temp, Context::EXTENSION_INDEX), Immediate(0));
__ j(not_equal, slow);
// Load next context in chain.
- __ movq(temp, ContextOperand(temp, Context::PREVIOUS_INDEX));
+ __ movp(temp, ContextOperand(temp, Context::PREVIOUS_INDEX));
__ jmp(&next);
__ bind(&fast);
}
// All extension objects were empty and it is safe to use a global
// load IC call.
- __ movq(rax, GlobalObjectOperand());
+ __ movp(rax, GlobalObjectOperand());
__ Move(rcx, var->name());
- Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
- RelocInfo::Mode mode = (typeof_state == INSIDE_TYPEOF)
- ? RelocInfo::CODE_TARGET
- : RelocInfo::CODE_TARGET_CONTEXT;
- CallIC(ic, mode);
+ ContextualMode mode = (typeof_state == INSIDE_TYPEOF)
+ ? NOT_CONTEXTUAL
+ : CONTEXTUAL;
+ CallLoadIC(mode);
}
@@ -1374,19 +1389,19 @@ MemOperand FullCodeGenerator::ContextSlotOperandCheckExtensions(Variable* var,
for (Scope* s = scope(); s != var->scope(); s = s->outer_scope()) {
if (s->num_heap_slots() > 0) {
- if (s->calls_non_strict_eval()) {
+ if (s->calls_sloppy_eval()) {
// Check that extension is NULL.
- __ cmpq(ContextOperand(context, Context::EXTENSION_INDEX),
+ __ cmpp(ContextOperand(context, Context::EXTENSION_INDEX),
Immediate(0));
__ j(not_equal, slow);
}
- __ movq(temp, ContextOperand(context, Context::PREVIOUS_INDEX));
+ __ movp(temp, ContextOperand(context, Context::PREVIOUS_INDEX));
// Walk the rest of the chain without clobbering rsi.
context = temp;
}
}
// Check that last extension is NULL.
- __ cmpq(ContextOperand(context, Context::EXTENSION_INDEX), Immediate(0));
+ __ cmpp(ContextOperand(context, Context::EXTENSION_INDEX), Immediate(0));
__ j(not_equal, slow);
// This function is used only for loads, not stores, so it's safe to
@@ -1410,17 +1425,16 @@ void FullCodeGenerator::EmitDynamicLookupFastCase(Variable* var,
__ jmp(done);
} else if (var->mode() == DYNAMIC_LOCAL) {
Variable* local = var->local_if_not_shadowed();
- __ movq(rax, ContextSlotOperandCheckExtensions(local, slow));
- if (local->mode() == LET ||
- local->mode() == CONST ||
- local->mode() == CONST_HARMONY) {
+ __ movp(rax, ContextSlotOperandCheckExtensions(local, slow));
+ if (local->mode() == LET || local->mode() == CONST ||
+ local->mode() == CONST_LEGACY) {
__ CompareRoot(rax, Heap::kTheHoleValueRootIndex);
__ j(not_equal, done);
- if (local->mode() == CONST) {
+ if (local->mode() == CONST_LEGACY) {
__ LoadRoot(rax, Heap::kUndefinedValueRootIndex);
- } else { // LET || CONST_HARMONY
+ } else { // LET || CONST
__ Push(var->name());
- __ CallRuntime(Runtime::kThrowReferenceError, 1);
+ __ CallRuntime(Runtime::kHiddenThrowReferenceError, 1);
}
}
__ jmp(done);
@@ -1437,13 +1451,12 @@ void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy) {
// variables.
switch (var->location()) {
case Variable::UNALLOCATED: {
- Comment cmnt(masm_, "Global variable");
+ Comment cmnt(masm_, "[ Global variable");
// Use inline caching. Variable name is passed in rcx and the global
// object on the stack.
__ Move(rcx, var->name());
- __ movq(rax, GlobalObjectOperand());
- Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
- CallIC(ic, RelocInfo::CODE_TARGET_CONTEXT);
+ __ movp(rax, GlobalObjectOperand());
+ CallLoadIC(CONTEXTUAL);
context()->Plug(rax);
break;
}
@@ -1451,7 +1464,8 @@ void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy) {
case Variable::PARAMETER:
case Variable::LOCAL:
case Variable::CONTEXT: {
- Comment cmnt(masm_, var->IsContextSlot() ? "Context slot" : "Stack slot");
+ Comment cmnt(masm_, var->IsContextSlot() ? "[ Context slot"
+ : "[ Stack slot");
if (var->binding_needs_init()) {
// var->scope() may be NULL when the proxy is located in eval code and
// refers to a potential outside binding. Currently those bindings are
@@ -1483,7 +1497,7 @@ void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy) {
// Check that we always have valid source position.
ASSERT(var->initializer_position() != RelocInfo::kNoPosition);
ASSERT(proxy->position() != RelocInfo::kNoPosition);
- skip_init_check = var->mode() != CONST &&
+ skip_init_check = var->mode() != CONST_LEGACY &&
var->initializer_position() < proxy->position();
}
@@ -1493,14 +1507,14 @@ void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy) {
GetVar(rax, var);
__ CompareRoot(rax, Heap::kTheHoleValueRootIndex);
__ j(not_equal, &done, Label::kNear);
- if (var->mode() == LET || var->mode() == CONST_HARMONY) {
+ if (var->mode() == LET || var->mode() == CONST) {
// Throw a reference error when using an uninitialized let/const
// binding in harmony mode.
__ Push(var->name());
- __ CallRuntime(Runtime::kThrowReferenceError, 1);
+ __ CallRuntime(Runtime::kHiddenThrowReferenceError, 1);
} else {
// Uninitalized const bindings outside of harmony mode are unholed.
- ASSERT(var->mode() == CONST);
+ ASSERT(var->mode() == CONST_LEGACY);
__ LoadRoot(rax, Heap::kUndefinedValueRootIndex);
}
__ bind(&done);
@@ -1513,15 +1527,15 @@ void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy) {
}
case Variable::LOOKUP: {
+ Comment cmnt(masm_, "[ Lookup slot");
Label done, slow;
// Generate code for loading from variables potentially shadowed
// by eval-introduced variables.
EmitDynamicLookupFastCase(var, NOT_INSIDE_TYPEOF, &slow, &done);
__ bind(&slow);
- Comment cmnt(masm_, "Lookup slot");
- __ push(rsi); // Context.
+ __ Push(rsi); // Context.
__ Push(var->name());
- __ CallRuntime(Runtime::kLoadContextSlot, 2);
+ __ CallRuntime(Runtime::kHiddenLoadContextSlot, 2);
__ bind(&done);
context()->Plug(rax);
break;
@@ -1538,22 +1552,22 @@ void FullCodeGenerator::VisitRegExpLiteral(RegExpLiteral* expr) {
// rcx = literals array.
// rbx = regexp literal.
// rax = regexp literal clone.
- __ movq(rdi, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
- __ movq(rcx, FieldOperand(rdi, JSFunction::kLiteralsOffset));
+ __ movp(rdi, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
+ __ movp(rcx, FieldOperand(rdi, JSFunction::kLiteralsOffset));
int literal_offset =
FixedArray::kHeaderSize + expr->literal_index() * kPointerSize;
- __ movq(rbx, FieldOperand(rcx, literal_offset));
+ __ movp(rbx, FieldOperand(rcx, literal_offset));
__ CompareRoot(rbx, Heap::kUndefinedValueRootIndex);
__ j(not_equal, &materialized, Label::kNear);
// Create regexp literal using runtime function
// Result will be in rax.
- __ push(rcx);
+ __ Push(rcx);
__ Push(Smi::FromInt(expr->literal_index()));
__ Push(expr->pattern());
__ Push(expr->flags());
- __ CallRuntime(Runtime::kMaterializeRegExpLiteral, 4);
- __ movq(rbx, rax);
+ __ CallRuntime(Runtime::kHiddenMaterializeRegExpLiteral, 4);
+ __ movp(rbx, rax);
__ bind(&materialized);
int size = JSRegExp::kSize + JSRegExp::kInObjectFieldCount * kPointerSize;
@@ -1562,23 +1576,23 @@ void FullCodeGenerator::VisitRegExpLiteral(RegExpLiteral* expr) {
__ jmp(&allocated);
__ bind(&runtime_allocate);
- __ push(rbx);
+ __ Push(rbx);
__ Push(Smi::FromInt(size));
- __ CallRuntime(Runtime::kAllocateInNewSpace, 1);
- __ pop(rbx);
+ __ CallRuntime(Runtime::kHiddenAllocateInNewSpace, 1);
+ __ Pop(rbx);
__ bind(&allocated);
// Copy the content into the newly allocated memory.
// (Unroll copy loop once for better throughput).
for (int i = 0; i < size - kPointerSize; i += 2 * kPointerSize) {
- __ movq(rdx, FieldOperand(rbx, i));
- __ movq(rcx, FieldOperand(rbx, i + kPointerSize));
- __ movq(FieldOperand(rax, i), rdx);
- __ movq(FieldOperand(rax, i + kPointerSize), rcx);
+ __ movp(rdx, FieldOperand(rbx, i));
+ __ movp(rcx, FieldOperand(rbx, i + kPointerSize));
+ __ movp(FieldOperand(rax, i), rdx);
+ __ movp(FieldOperand(rax, i + kPointerSize), rcx);
}
if ((size % (2 * kPointerSize)) != 0) {
- __ movq(rdx, FieldOperand(rbx, size - kPointerSize));
- __ movq(FieldOperand(rax, size - kPointerSize), rdx);
+ __ movp(rdx, FieldOperand(rbx, size - kPointerSize));
+ __ movp(FieldOperand(rax, size - kPointerSize), rdx);
}
context()->Plug(rax);
}
@@ -1605,23 +1619,22 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
? ObjectLiteral::kHasFunction
: ObjectLiteral::kNoFlags;
int properties_count = constant_properties->length() / 2;
- if ((FLAG_track_double_fields && expr->may_store_doubles()) ||
- expr->depth() > 1 || Serializer::enabled() ||
- flags != ObjectLiteral::kFastElements ||
+ if (expr->may_store_doubles() || expr->depth() > 1 ||
+ masm()->serializer_enabled() || flags != ObjectLiteral::kFastElements ||
properties_count > FastCloneShallowObjectStub::kMaximumClonedProperties) {
- __ movq(rdi, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
- __ push(FieldOperand(rdi, JSFunction::kLiteralsOffset));
+ __ movp(rdi, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
+ __ Push(FieldOperand(rdi, JSFunction::kLiteralsOffset));
__ Push(Smi::FromInt(expr->literal_index()));
__ Push(constant_properties);
__ Push(Smi::FromInt(flags));
- __ CallRuntime(Runtime::kCreateObjectLiteral, 4);
+ __ CallRuntime(Runtime::kHiddenCreateObjectLiteral, 4);
} else {
- __ movq(rdi, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
- __ movq(rax, FieldOperand(rdi, JSFunction::kLiteralsOffset));
+ __ movp(rdi, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
+ __ movp(rax, FieldOperand(rdi, JSFunction::kLiteralsOffset));
__ Move(rbx, Smi::FromInt(expr->literal_index()));
__ Move(rcx, constant_properties);
__ Move(rdx, Smi::FromInt(flags));
- FastCloneShallowObjectStub stub(properties_count);
+ FastCloneShallowObjectStub stub(isolate(), properties_count);
__ CallStub(&stub);
}
@@ -1642,7 +1655,7 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
Literal* key = property->key();
Expression* value = property->value();
if (!result_saved) {
- __ push(rax); // Save result on the stack
+ __ Push(rax); // Save result on the stack
result_saved = true;
}
switch (property->kind()) {
@@ -1656,18 +1669,15 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
if (property->emit_store()) {
VisitForAccumulatorValue(value);
__ Move(rcx, key->value());
- __ movq(rdx, Operand(rsp, 0));
- Handle<Code> ic = is_classic_mode()
- ? isolate()->builtins()->StoreIC_Initialize()
- : isolate()->builtins()->StoreIC_Initialize_Strict();
- CallIC(ic, RelocInfo::CODE_TARGET, key->LiteralFeedbackId());
+ __ movp(rdx, Operand(rsp, 0));
+ CallStoreIC(key->LiteralFeedbackId());
PrepareForBailoutForId(key->id(), NO_REGISTERS);
} else {
VisitForEffect(value);
}
break;
}
- __ push(Operand(rsp, 0)); // Duplicate receiver.
+ __ Push(Operand(rsp, 0)); // Duplicate receiver.
VisitForStackValue(key);
VisitForStackValue(value);
if (property->emit_store()) {
@@ -1678,7 +1688,7 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
}
break;
case ObjectLiteral::Property::PROTOTYPE:
- __ push(Operand(rsp, 0)); // Duplicate receiver.
+ __ Push(Operand(rsp, 0)); // Duplicate receiver.
VisitForStackValue(value);
if (property->emit_store()) {
__ CallRuntime(Runtime::kSetPrototype, 2);
@@ -1700,7 +1710,7 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
for (AccessorTable::Iterator it = accessor_table.begin();
it != accessor_table.end();
++it) {
- __ push(Operand(rsp, 0)); // Duplicate receiver.
+ __ Push(Operand(rsp, 0)); // Duplicate receiver.
VisitForStackValue(it->first);
EmitAccessor(it->second->getter);
EmitAccessor(it->second->setter);
@@ -1710,7 +1720,7 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
if (expr->has_function()) {
ASSERT(result_saved);
- __ push(Operand(rsp, 0));
+ __ Push(Operand(rsp, 0));
__ CallRuntime(Runtime::kToFastProperties, 1);
}
@@ -1741,54 +1751,26 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
Handle<FixedArrayBase> constant_elements_values(
FixedArrayBase::cast(constant_elements->get(1)));
- AllocationSiteMode allocation_site_mode = FLAG_track_allocation_sites
- ? TRACK_ALLOCATION_SITE : DONT_TRACK_ALLOCATION_SITE;
+ AllocationSiteMode allocation_site_mode = TRACK_ALLOCATION_SITE;
if (has_constant_fast_elements && !FLAG_allocation_site_pretenuring) {
// If the only customer of allocation sites is transitioning, then
// we can turn it off if we don't have anywhere else to transition to.
allocation_site_mode = DONT_TRACK_ALLOCATION_SITE;
}
- Heap* heap = isolate()->heap();
- if (has_constant_fast_elements &&
- constant_elements_values->map() == heap->fixed_cow_array_map()) {
- // If the elements are already FAST_*_ELEMENTS, the boilerplate cannot
- // change, so it's possible to specialize the stub in advance.
- __ IncrementCounter(isolate()->counters()->cow_arrays_created_stub(), 1);
- __ movq(rbx, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
- __ movq(rax, FieldOperand(rbx, JSFunction::kLiteralsOffset));
- __ Move(rbx, Smi::FromInt(expr->literal_index()));
- __ Move(rcx, constant_elements);
- FastCloneShallowArrayStub stub(
- FastCloneShallowArrayStub::COPY_ON_WRITE_ELEMENTS,
- allocation_site_mode,
- length);
- __ CallStub(&stub);
- } else if (expr->depth() > 1 || Serializer::enabled() ||
- length > FastCloneShallowArrayStub::kMaximumClonedLength) {
- __ movq(rbx, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
- __ push(FieldOperand(rbx, JSFunction::kLiteralsOffset));
+ if (expr->depth() > 1 || length > JSObject::kInitialMaxFastElementArray) {
+ __ movp(rbx, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
+ __ Push(FieldOperand(rbx, JSFunction::kLiteralsOffset));
__ Push(Smi::FromInt(expr->literal_index()));
__ Push(constant_elements);
__ Push(Smi::FromInt(flags));
- __ CallRuntime(Runtime::kCreateArrayLiteral, 4);
+ __ CallRuntime(Runtime::kHiddenCreateArrayLiteral, 4);
} else {
- ASSERT(IsFastSmiOrObjectElementsKind(constant_elements_kind) ||
- FLAG_smi_only_arrays);
- FastCloneShallowArrayStub::Mode mode =
- FastCloneShallowArrayStub::CLONE_ANY_ELEMENTS;
-
- // If the elements are already FAST_*_ELEMENTS, the boilerplate cannot
- // change, so it's possible to specialize the stub in advance.
- if (has_constant_fast_elements) {
- mode = FastCloneShallowArrayStub::CLONE_ELEMENTS;
- }
-
- __ movq(rbx, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
- __ movq(rax, FieldOperand(rbx, JSFunction::kLiteralsOffset));
+ __ movp(rbx, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
+ __ movp(rax, FieldOperand(rbx, JSFunction::kLiteralsOffset));
__ Move(rbx, Smi::FromInt(expr->literal_index()));
__ Move(rcx, constant_elements);
- FastCloneShallowArrayStub stub(mode, allocation_site_mode, length);
+ FastCloneShallowArrayStub stub(isolate(), allocation_site_mode);
__ CallStub(&stub);
}
@@ -1803,7 +1785,7 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
if (CompileTimeValue::IsCompileTimeValue(subexpr)) continue;
if (!result_saved) {
- __ push(rax); // array literal
+ __ Push(rax); // array literal
__ Push(Smi::FromInt(expr->literal_index()));
result_saved = true;
}
@@ -1813,10 +1795,10 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
// Fast-case array literal with ElementsKind of FAST_*_ELEMENTS, they
// cannot transition and don't need to call the runtime stub.
int offset = FixedArray::kHeaderSize + (i * kPointerSize);
- __ movq(rbx, Operand(rsp, kPointerSize)); // Copy of array literal.
- __ movq(rbx, FieldOperand(rbx, JSObject::kElementsOffset));
+ __ movp(rbx, Operand(rsp, kPointerSize)); // Copy of array literal.
+ __ movp(rbx, FieldOperand(rbx, JSObject::kElementsOffset));
// Store the subexpression value in the array's elements.
- __ movq(FieldOperand(rbx, offset), result_register());
+ __ movp(FieldOperand(rbx, offset), result_register());
// Update the write barrier for the array store.
__ RecordWriteField(rbx, offset, result_register(), rcx,
kDontSaveFPRegs,
@@ -1825,7 +1807,7 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
} else {
// Store the subexpression value in the array's elements.
__ Move(rcx, Smi::FromInt(i));
- StoreArrayLiteralElementStub stub;
+ StoreArrayLiteralElementStub stub(isolate());
__ CallStub(&stub);
}
@@ -1833,7 +1815,7 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
}
if (result_saved) {
- __ addq(rsp, Immediate(kPointerSize)); // literal index
+ __ addp(rsp, Immediate(kPointerSize)); // literal index
context()->PlugTOS();
} else {
context()->Plug(rax);
@@ -1842,13 +1824,9 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
void FullCodeGenerator::VisitAssignment(Assignment* expr) {
+ ASSERT(expr->target()->IsValidReferenceExpression());
+
Comment cmnt(masm_, "[ Assignment");
- // Invalid left-hand sides are rewritten to have a 'throw ReferenceError'
- // on the left-hand side.
- if (!expr->target()->IsValidLeftHandSide()) {
- VisitForEffect(expr->target());
- return;
- }
// Left-hand side can only be a property, a global or a (parameter or local)
// slot.
@@ -1870,7 +1848,7 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) {
if (expr->is_compound()) {
// We need the receiver both on the stack and in the accumulator.
VisitForAccumulatorValue(property->obj());
- __ push(result_register());
+ __ Push(result_register());
} else {
VisitForStackValue(property->obj());
}
@@ -1879,8 +1857,8 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) {
if (expr->is_compound()) {
VisitForStackValue(property->obj());
VisitForAccumulatorValue(property->key());
- __ movq(rdx, Operand(rsp, 0));
- __ push(rax);
+ __ movp(rdx, Operand(rsp, 0));
+ __ Push(rax);
} else {
VisitForStackValue(property->obj());
VisitForStackValue(property->key());
@@ -1910,7 +1888,7 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) {
}
Token::Value op = expr->binary_op();
- __ push(rax); // Left operand goes on the stack.
+ __ Push(rax); // Left operand goes on the stack.
VisitForAccumulatorValue(expr->value());
OverwriteMode mode = expr->value()->ResultOverwriteAllowed()
@@ -1964,7 +1942,7 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
case Yield::SUSPEND:
// Pop value from top-of-stack slot; box result into result register.
EmitCreateIteratorResult(false);
- __ push(result_register());
+ __ Push(result_register());
// Fall through.
case Yield::INITIAL: {
Label suspend, continuation, post_runtime, resume;
@@ -1979,20 +1957,20 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
ASSERT(continuation.pos() > 0 && Smi::IsValid(continuation.pos()));
__ Move(FieldOperand(rax, JSGeneratorObject::kContinuationOffset),
Smi::FromInt(continuation.pos()));
- __ movq(FieldOperand(rax, JSGeneratorObject::kContextOffset), rsi);
- __ movq(rcx, rsi);
+ __ movp(FieldOperand(rax, JSGeneratorObject::kContextOffset), rsi);
+ __ movp(rcx, rsi);
__ RecordWriteField(rax, JSGeneratorObject::kContextOffset, rcx, rdx,
kDontSaveFPRegs);
- __ lea(rbx, Operand(rbp, StandardFrameConstants::kExpressionsOffset));
- __ cmpq(rsp, rbx);
+ __ leap(rbx, Operand(rbp, StandardFrameConstants::kExpressionsOffset));
+ __ cmpp(rsp, rbx);
__ j(equal, &post_runtime);
- __ push(rax); // generator object
- __ CallRuntime(Runtime::kSuspendJSGeneratorObject, 1);
- __ movq(context_register(),
+ __ Push(rax); // generator object
+ __ CallRuntime(Runtime::kHiddenSuspendJSGeneratorObject, 1);
+ __ movp(context_register(),
Operand(rbp, StandardFrameConstants::kContextOffset));
__ bind(&post_runtime);
- __ pop(result_register());
+ __ Pop(result_register());
EmitReturnSequence();
__ bind(&resume);
@@ -2029,37 +2007,37 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
__ bind(&l_catch);
handler_table()->set(expr->index(), Smi::FromInt(l_catch.pos()));
__ LoadRoot(rcx, Heap::kthrow_stringRootIndex); // "throw"
- __ push(rcx);
- __ push(Operand(rsp, 2 * kPointerSize)); // iter
- __ push(rax); // exception
+ __ Push(rcx);
+ __ Push(Operand(rsp, 2 * kPointerSize)); // iter
+ __ Push(rax); // exception
__ jmp(&l_call);
// try { received = %yield result }
// Shuffle the received result above a try handler and yield it without
// re-boxing.
__ bind(&l_try);
- __ pop(rax); // result
+ __ Pop(rax); // result
__ PushTryHandler(StackHandler::CATCH, expr->index());
const int handler_size = StackHandlerConstants::kSize;
- __ push(rax); // result
+ __ Push(rax); // result
__ jmp(&l_suspend);
__ bind(&l_continuation);
__ jmp(&l_resume);
__ bind(&l_suspend);
const int generator_object_depth = kPointerSize + handler_size;
- __ movq(rax, Operand(rsp, generator_object_depth));
- __ push(rax); // g
+ __ movp(rax, Operand(rsp, generator_object_depth));
+ __ Push(rax); // g
ASSERT(l_continuation.pos() > 0 && Smi::IsValid(l_continuation.pos()));
__ Move(FieldOperand(rax, JSGeneratorObject::kContinuationOffset),
Smi::FromInt(l_continuation.pos()));
- __ movq(FieldOperand(rax, JSGeneratorObject::kContextOffset), rsi);
- __ movq(rcx, rsi);
+ __ movp(FieldOperand(rax, JSGeneratorObject::kContextOffset), rsi);
+ __ movp(rcx, rsi);
__ RecordWriteField(rax, JSGeneratorObject::kContextOffset, rcx, rdx,
kDontSaveFPRegs);
- __ CallRuntime(Runtime::kSuspendJSGeneratorObject, 1);
- __ movq(context_register(),
+ __ CallRuntime(Runtime::kHiddenSuspendJSGeneratorObject, 1);
+ __ movp(context_register(),
Operand(rbp, StandardFrameConstants::kContextOffset));
- __ pop(rax); // result
+ __ Pop(rax); // result
EmitReturnSequence();
__ bind(&l_resume); // received in rax
__ PopTryHandler();
@@ -2067,33 +2045,38 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
// receiver = iter; f = 'next'; arg = received;
__ bind(&l_next);
__ LoadRoot(rcx, Heap::knext_stringRootIndex); // "next"
- __ push(rcx);
- __ push(Operand(rsp, 2 * kPointerSize)); // iter
- __ push(rax); // received
+ __ Push(rcx);
+ __ Push(Operand(rsp, 2 * kPointerSize)); // iter
+ __ Push(rax); // received
// result = receiver[f](arg);
__ bind(&l_call);
- Handle<Code> ic = isolate()->stub_cache()->ComputeKeyedCallInitialize(1);
- CallIC(ic);
- __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
- __ Drop(1); // The key is still on the stack; drop it.
+ __ movp(rdx, Operand(rsp, kPointerSize));
+ __ movp(rax, Operand(rsp, 2 * kPointerSize));
+ Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize();
+ CallIC(ic, TypeFeedbackId::None());
+ __ movp(rdi, rax);
+ __ movp(Operand(rsp, 2 * kPointerSize), rdi);
+ CallFunctionStub stub(isolate(), 1, CALL_AS_METHOD);
+ __ CallStub(&stub);
+
+ __ movp(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
+ __ Drop(1); // The function is still on the stack; drop it.
// if (!result.done) goto l_try;
__ bind(&l_loop);
- __ push(rax); // save result
+ __ Push(rax); // save result
__ LoadRoot(rcx, Heap::kdone_stringRootIndex); // "done"
- Handle<Code> done_ic = isolate()->builtins()->LoadIC_Initialize();
- CallIC(done_ic); // result.done in rax
+ CallLoadIC(NOT_CONTEXTUAL); // result.done in rax
Handle<Code> bool_ic = ToBooleanStub::GetUninitialized(isolate());
CallIC(bool_ic);
- __ testq(result_register(), result_register());
+ __ testp(result_register(), result_register());
__ j(zero, &l_try);
// result.value
- __ pop(rax); // result
+ __ Pop(rax); // result
__ LoadRoot(rcx, Heap::kvalue_stringRootIndex); // "value"
- Handle<Code> value_ic = isolate()->builtins()->LoadIC_Initialize();
- CallIC(value_ic); // result.value in rax
+ CallLoadIC(NOT_CONTEXTUAL); // result.value in rax
context()->DropAndPlug(2, rax); // drop iter and g
break;
}
@@ -2105,38 +2088,39 @@ void FullCodeGenerator::EmitGeneratorResume(Expression *generator,
Expression *value,
JSGeneratorObject::ResumeMode resume_mode) {
// The value stays in rax, and is ultimately read by the resumed generator, as
- // if the CallRuntime(Runtime::kSuspendJSGeneratorObject) returned it. rbx
- // will hold the generator object until the activation has been resumed.
+ // if CallRuntime(Runtime::kHiddenSuspendJSGeneratorObject) returned it. Or it
+ // is read to throw the value when the resumed generator is already closed.
+ // rbx will hold the generator object until the activation has been resumed.
VisitForStackValue(generator);
VisitForAccumulatorValue(value);
- __ pop(rbx);
+ __ Pop(rbx);
// Check generator state.
- Label wrong_state, done;
- STATIC_ASSERT(JSGeneratorObject::kGeneratorExecuting <= 0);
- STATIC_ASSERT(JSGeneratorObject::kGeneratorClosed <= 0);
+ Label wrong_state, closed_state, done;
+ STATIC_ASSERT(JSGeneratorObject::kGeneratorExecuting < 0);
+ STATIC_ASSERT(JSGeneratorObject::kGeneratorClosed == 0);
__ SmiCompare(FieldOperand(rbx, JSGeneratorObject::kContinuationOffset),
Smi::FromInt(0));
- __ j(less_equal, &wrong_state);
+ __ j(equal, &closed_state);
+ __ j(less, &wrong_state);
// Load suspended function and context.
- __ movq(rsi, FieldOperand(rbx, JSGeneratorObject::kContextOffset));
- __ movq(rdi, FieldOperand(rbx, JSGeneratorObject::kFunctionOffset));
+ __ movp(rsi, FieldOperand(rbx, JSGeneratorObject::kContextOffset));
+ __ movp(rdi, FieldOperand(rbx, JSGeneratorObject::kFunctionOffset));
// Push receiver.
- __ push(FieldOperand(rbx, JSGeneratorObject::kReceiverOffset));
+ __ Push(FieldOperand(rbx, JSGeneratorObject::kReceiverOffset));
// Push holes for arguments to generator function.
- __ movq(rdx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
- __ movsxlq(rdx,
- FieldOperand(rdx,
- SharedFunctionInfo::kFormalParameterCountOffset));
+ __ movp(rdx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
+ __ LoadSharedFunctionInfoSpecialField(rdx, rdx,
+ SharedFunctionInfo::kFormalParameterCountOffset);
__ LoadRoot(rcx, Heap::kTheHoleValueRootIndex);
Label push_argument_holes, push_frame;
__ bind(&push_argument_holes);
- __ subq(rdx, Immediate(1));
+ __ subp(rdx, Immediate(1));
__ j(carry, &push_frame);
- __ push(rcx);
+ __ Push(rcx);
__ jmp(&push_argument_holes);
// Enter a new JavaScript frame, and initialize its slots as they were when
@@ -2146,26 +2130,26 @@ void FullCodeGenerator::EmitGeneratorResume(Expression *generator,
__ call(&resume_frame);
__ jmp(&done);
__ bind(&resume_frame);
- __ push(rbp); // Caller's frame pointer.
- __ movq(rbp, rsp);
- __ push(rsi); // Callee's context.
- __ push(rdi); // Callee's JS Function.
+ __ pushq(rbp); // Caller's frame pointer.
+ __ movp(rbp, rsp);
+ __ Push(rsi); // Callee's context.
+ __ Push(rdi); // Callee's JS Function.
// Load the operand stack size.
- __ movq(rdx, FieldOperand(rbx, JSGeneratorObject::kOperandStackOffset));
- __ movq(rdx, FieldOperand(rdx, FixedArray::kLengthOffset));
+ __ movp(rdx, FieldOperand(rbx, JSGeneratorObject::kOperandStackOffset));
+ __ movp(rdx, FieldOperand(rdx, FixedArray::kLengthOffset));
__ SmiToInteger32(rdx, rdx);
// If we are sending a value and there is no operand stack, we can jump back
// in directly.
if (resume_mode == JSGeneratorObject::NEXT) {
Label slow_resume;
- __ cmpq(rdx, Immediate(0));
+ __ cmpp(rdx, Immediate(0));
__ j(not_zero, &slow_resume);
- __ movq(rdx, FieldOperand(rdi, JSFunction::kCodeEntryOffset));
+ __ movp(rdx, FieldOperand(rdi, JSFunction::kCodeEntryOffset));
__ SmiToInteger64(rcx,
FieldOperand(rbx, JSGeneratorObject::kContinuationOffset));
- __ addq(rdx, rcx);
+ __ addp(rdx, rcx);
__ Move(FieldOperand(rbx, JSGeneratorObject::kContinuationOffset),
Smi::FromInt(JSGeneratorObject::kGeneratorExecuting));
__ jmp(rdx);
@@ -2176,22 +2160,36 @@ void FullCodeGenerator::EmitGeneratorResume(Expression *generator,
// up the stack and the handlers.
Label push_operand_holes, call_resume;
__ bind(&push_operand_holes);
- __ subq(rdx, Immediate(1));
+ __ subp(rdx, Immediate(1));
__ j(carry, &call_resume);
- __ push(rcx);
+ __ Push(rcx);
__ jmp(&push_operand_holes);
__ bind(&call_resume);
- __ push(rbx);
- __ push(result_register());
+ __ Push(rbx);
+ __ Push(result_register());
__ Push(Smi::FromInt(resume_mode));
- __ CallRuntime(Runtime::kResumeJSGeneratorObject, 3);
+ __ CallRuntime(Runtime::kHiddenResumeJSGeneratorObject, 3);
// Not reached: the runtime call returns elsewhere.
__ Abort(kGeneratorFailedToResume);
+ // Reach here when generator is closed.
+ __ bind(&closed_state);
+ if (resume_mode == JSGeneratorObject::NEXT) {
+ // Return completed iterator result when generator is closed.
+ __ PushRoot(Heap::kUndefinedValueRootIndex);
+ // Pop value from top-of-stack slot; box result into result register.
+ EmitCreateIteratorResult(true);
+ } else {
+ // Throw the provided value.
+ __ Push(rax);
+ __ CallRuntime(Runtime::kHiddenThrow, 1);
+ }
+ __ jmp(&done);
+
// Throw error if we attempt to operate on a running generator.
__ bind(&wrong_state);
- __ push(rbx);
- __ CallRuntime(Runtime::kThrowGeneratorStateError, 1);
+ __ Push(rbx);
+ __ CallRuntime(Runtime::kHiddenThrowGeneratorStateError, 1);
__ bind(&done);
context()->Plug(result_register());
@@ -2202,30 +2200,30 @@ void FullCodeGenerator::EmitCreateIteratorResult(bool done) {
Label gc_required;
Label allocated;
- Handle<Map> map(isolate()->native_context()->generator_result_map());
+ Handle<Map> map(isolate()->native_context()->iterator_result_map());
__ Allocate(map->instance_size(), rax, rcx, rdx, &gc_required, TAG_OBJECT);
__ jmp(&allocated);
__ bind(&gc_required);
__ Push(Smi::FromInt(map->instance_size()));
- __ CallRuntime(Runtime::kAllocateInNewSpace, 1);
- __ movq(context_register(),
+ __ CallRuntime(Runtime::kHiddenAllocateInNewSpace, 1);
+ __ movp(context_register(),
Operand(rbp, StandardFrameConstants::kContextOffset));
__ bind(&allocated);
__ Move(rbx, map);
- __ pop(rcx);
+ __ Pop(rcx);
__ Move(rdx, isolate()->factory()->ToBoolean(done));
ASSERT_EQ(map->instance_size(), 5 * kPointerSize);
- __ movq(FieldOperand(rax, HeapObject::kMapOffset), rbx);
+ __ movp(FieldOperand(rax, HeapObject::kMapOffset), rbx);
__ Move(FieldOperand(rax, JSObject::kPropertiesOffset),
isolate()->factory()->empty_fixed_array());
__ Move(FieldOperand(rax, JSObject::kElementsOffset),
isolate()->factory()->empty_fixed_array());
- __ movq(FieldOperand(rax, JSGeneratorObject::kResultValuePropertyOffset),
+ __ movp(FieldOperand(rax, JSGeneratorObject::kResultValuePropertyOffset),
rcx);
- __ movq(FieldOperand(rax, JSGeneratorObject::kResultDonePropertyOffset),
+ __ movp(FieldOperand(rax, JSGeneratorObject::kResultDonePropertyOffset),
rdx);
// Only the value field needs a write barrier, as the other values are in the
@@ -2239,15 +2237,14 @@ void FullCodeGenerator::EmitNamedPropertyLoad(Property* prop) {
SetSourcePosition(prop->position());
Literal* key = prop->key()->AsLiteral();
__ Move(rcx, key->value());
- Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
- CallIC(ic, RelocInfo::CODE_TARGET, prop->PropertyFeedbackId());
+ CallLoadIC(NOT_CONTEXTUAL, prop->PropertyFeedbackId());
}
void FullCodeGenerator::EmitKeyedPropertyLoad(Property* prop) {
SetSourcePosition(prop->position());
Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize();
- CallIC(ic, RelocInfo::CODE_TARGET, prop->PropertyFeedbackId());
+ CallIC(ic, prop->PropertyFeedbackId());
}
@@ -2260,17 +2257,16 @@ void FullCodeGenerator::EmitInlineSmiBinaryOp(BinaryOperation* expr,
// stack (popped into rdx). Right operand is in rax but moved into
// rcx to make the shifts easier.
Label done, stub_call, smi_case;
- __ pop(rdx);
- __ movq(rcx, rax);
- __ or_(rax, rdx);
+ __ Pop(rdx);
+ __ movp(rcx, rax);
+ __ orp(rax, rdx);
JumpPatchSite patch_site(masm_);
patch_site.EmitJumpIfSmi(rax, &smi_case, Label::kNear);
__ bind(&stub_call);
- __ movq(rax, rcx);
- BinaryOpICStub stub(op, mode);
- CallIC(stub.GetCode(isolate()), RelocInfo::CODE_TARGET,
- expr->BinaryOperationFeedbackId());
+ __ movp(rax, rcx);
+ BinaryOpICStub stub(isolate(), op, mode);
+ CallIC(stub.GetCode(), expr->BinaryOperationFeedbackId());
patch_site.EmitPatchInfo();
__ jmp(&done, Label::kNear);
@@ -2280,7 +2276,7 @@ void FullCodeGenerator::EmitInlineSmiBinaryOp(BinaryOperation* expr,
__ SmiShiftArithmeticRight(rax, rdx, rcx);
break;
case Token::SHL:
- __ SmiShiftLeft(rax, rdx, rcx);
+ __ SmiShiftLeft(rax, rdx, rcx, &stub_call);
break;
case Token::SHR:
__ SmiShiftLogicalRight(rax, rdx, rcx, &stub_call);
@@ -2316,23 +2312,17 @@ void FullCodeGenerator::EmitInlineSmiBinaryOp(BinaryOperation* expr,
void FullCodeGenerator::EmitBinaryOp(BinaryOperation* expr,
Token::Value op,
OverwriteMode mode) {
- __ pop(rdx);
- BinaryOpICStub stub(op, mode);
+ __ Pop(rdx);
+ BinaryOpICStub stub(isolate(), op, mode);
JumpPatchSite patch_site(masm_); // unbound, signals no inlined smi code.
- CallIC(stub.GetCode(isolate()), RelocInfo::CODE_TARGET,
- expr->BinaryOperationFeedbackId());
+ CallIC(stub.GetCode(), expr->BinaryOperationFeedbackId());
patch_site.EmitPatchInfo();
context()->Plug(rax);
}
void FullCodeGenerator::EmitAssignment(Expression* expr) {
- // Invalid left-hand sides are rewritten by the parser to have a 'throw
- // ReferenceError' on the left-hand side.
- if (!expr->IsValidLeftHandSide()) {
- VisitForEffect(expr);
- return;
- }
+ ASSERT(expr->IsValidReferenceExpression());
// Left-hand side can only be a property, a global or a (parameter or local)
// slot.
@@ -2353,25 +2343,22 @@ void FullCodeGenerator::EmitAssignment(Expression* expr) {
break;
}
case NAMED_PROPERTY: {
- __ push(rax); // Preserve value.
+ __ Push(rax); // Preserve value.
VisitForAccumulatorValue(prop->obj());
- __ movq(rdx, rax);
- __ pop(rax); // Restore value.
+ __ movp(rdx, rax);
+ __ Pop(rax); // Restore value.
__ Move(rcx, prop->key()->AsLiteral()->value());
- Handle<Code> ic = is_classic_mode()
- ? isolate()->builtins()->StoreIC_Initialize()
- : isolate()->builtins()->StoreIC_Initialize_Strict();
- CallIC(ic);
+ CallStoreIC();
break;
}
case KEYED_PROPERTY: {
- __ push(rax); // Preserve value.
+ __ Push(rax); // Preserve value.
VisitForStackValue(prop->obj());
VisitForAccumulatorValue(prop->key());
- __ movq(rcx, rax);
- __ pop(rdx);
- __ pop(rax); // Restore value.
- Handle<Code> ic = is_classic_mode()
+ __ movp(rcx, rax);
+ __ Pop(rdx);
+ __ Pop(rax); // Restore value.
+ Handle<Code> ic = strict_mode() == SLOPPY
? isolate()->builtins()->KeyedStoreIC_Initialize()
: isolate()->builtins()->KeyedStoreIC_Initialize_Strict();
CallIC(ic);
@@ -2382,90 +2369,86 @@ void FullCodeGenerator::EmitAssignment(Expression* expr) {
}
+void FullCodeGenerator::EmitStoreToStackLocalOrContextSlot(
+ Variable* var, MemOperand location) {
+ __ movp(location, rax);
+ if (var->IsContextSlot()) {
+ __ movp(rdx, rax);
+ __ RecordWriteContextSlot(
+ rcx, Context::SlotOffset(var->index()), rdx, rbx, kDontSaveFPRegs);
+ }
+}
+
+
+void FullCodeGenerator::EmitCallStoreContextSlot(
+ Handle<String> name, StrictMode strict_mode) {
+ __ Push(rax); // Value.
+ __ Push(rsi); // Context.
+ __ Push(name);
+ __ Push(Smi::FromInt(strict_mode));
+ __ CallRuntime(Runtime::kHiddenStoreContextSlot, 4);
+}
+
+
void FullCodeGenerator::EmitVariableAssignment(Variable* var,
Token::Value op) {
if (var->IsUnallocated()) {
// Global var, const, or let.
__ Move(rcx, var->name());
- __ movq(rdx, GlobalObjectOperand());
- Handle<Code> ic = is_classic_mode()
- ? isolate()->builtins()->StoreIC_Initialize()
- : isolate()->builtins()->StoreIC_Initialize_Strict();
- CallIC(ic, RelocInfo::CODE_TARGET_CONTEXT);
- } else if (op == Token::INIT_CONST) {
+ __ movp(rdx, GlobalObjectOperand());
+ CallStoreIC();
+
+ } else if (op == Token::INIT_CONST_LEGACY) {
// Const initializers need a write barrier.
ASSERT(!var->IsParameter()); // No const parameters.
- if (var->IsStackLocal()) {
+ if (var->IsLookupSlot()) {
+ __ Push(rax);
+ __ Push(rsi);
+ __ Push(var->name());
+ __ CallRuntime(Runtime::kHiddenInitializeConstContextSlot, 3);
+ } else {
+ ASSERT(var->IsStackLocal() || var->IsContextSlot());
Label skip;
- __ movq(rdx, StackOperand(var));
+ MemOperand location = VarOperand(var, rcx);
+ __ movp(rdx, location);
__ CompareRoot(rdx, Heap::kTheHoleValueRootIndex);
__ j(not_equal, &skip);
- __ movq(StackOperand(var), rax);
+ EmitStoreToStackLocalOrContextSlot(var, location);
__ bind(&skip);
- } else {
- ASSERT(var->IsContextSlot() || var->IsLookupSlot());
- // Like var declarations, const declarations are hoisted to function
- // scope. However, unlike var initializers, const initializers are
- // able to drill a hole to that function context, even from inside a
- // 'with' context. We thus bypass the normal static scope lookup for
- // var->IsContextSlot().
- __ push(rax);
- __ push(rsi);
- __ Push(var->name());
- __ CallRuntime(Runtime::kInitializeConstContextSlot, 3);
}
} else if (var->mode() == LET && op != Token::INIT_LET) {
// Non-initializing assignment to let variable needs a write barrier.
if (var->IsLookupSlot()) {
- __ push(rax); // Value.
- __ push(rsi); // Context.
- __ Push(var->name());
- __ Push(Smi::FromInt(language_mode()));
- __ CallRuntime(Runtime::kStoreContextSlot, 4);
+ EmitCallStoreContextSlot(var->name(), strict_mode());
} else {
ASSERT(var->IsStackAllocated() || var->IsContextSlot());
Label assign;
MemOperand location = VarOperand(var, rcx);
- __ movq(rdx, location);
+ __ movp(rdx, location);
__ CompareRoot(rdx, Heap::kTheHoleValueRootIndex);
__ j(not_equal, &assign, Label::kNear);
__ Push(var->name());
- __ CallRuntime(Runtime::kThrowReferenceError, 1);
+ __ CallRuntime(Runtime::kHiddenThrowReferenceError, 1);
__ bind(&assign);
- __ movq(location, rax);
- if (var->IsContextSlot()) {
- __ movq(rdx, rax);
- __ RecordWriteContextSlot(
- rcx, Context::SlotOffset(var->index()), rdx, rbx, kDontSaveFPRegs);
- }
+ EmitStoreToStackLocalOrContextSlot(var, location);
}
- } else if (!var->is_const_mode() || op == Token::INIT_CONST_HARMONY) {
+ } else if (!var->is_const_mode() || op == Token::INIT_CONST) {
// Assignment to var or initializing assignment to let/const
// in harmony mode.
- if (var->IsStackAllocated() || var->IsContextSlot()) {
+ if (var->IsLookupSlot()) {
+ EmitCallStoreContextSlot(var->name(), strict_mode());
+ } else {
+ ASSERT(var->IsStackAllocated() || var->IsContextSlot());
MemOperand location = VarOperand(var, rcx);
if (generate_debug_code_ && op == Token::INIT_LET) {
// Check for an uninitialized let binding.
- __ movq(rdx, location);
+ __ movp(rdx, location);
__ CompareRoot(rdx, Heap::kTheHoleValueRootIndex);
__ Check(equal, kLetBindingReInitialization);
}
- // Perform the assignment.
- __ movq(location, rax);
- if (var->IsContextSlot()) {
- __ movq(rdx, rax);
- __ RecordWriteContextSlot(
- rcx, Context::SlotOffset(var->index()), rdx, rbx, kDontSaveFPRegs);
- }
- } else {
- ASSERT(var->IsLookupSlot());
- __ push(rax); // Value.
- __ push(rsi); // Context.
- __ Push(var->name());
- __ Push(Smi::FromInt(language_mode()));
- __ CallRuntime(Runtime::kStoreContextSlot, 4);
+ EmitStoreToStackLocalOrContextSlot(var, location);
}
}
// Non-initializing assignments to consts are ignored.
@@ -2476,16 +2459,13 @@ void FullCodeGenerator::EmitNamedPropertyAssignment(Assignment* expr) {
// Assignment to a property, using a named store IC.
Property* prop = expr->target()->AsProperty();
ASSERT(prop != NULL);
- ASSERT(prop->key()->AsLiteral() != NULL);
+ ASSERT(prop->key()->IsLiteral());
// Record source code position before IC call.
SetSourcePosition(expr->position());
__ Move(rcx, prop->key()->AsLiteral()->value());
- __ pop(rdx);
- Handle<Code> ic = is_classic_mode()
- ? isolate()->builtins()->StoreIC_Initialize()
- : isolate()->builtins()->StoreIC_Initialize_Strict();
- CallIC(ic, RelocInfo::CODE_TARGET, expr->AssignmentFeedbackId());
+ __ Pop(rdx);
+ CallStoreIC(expr->AssignmentFeedbackId());
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
context()->Plug(rax);
@@ -2495,14 +2475,14 @@ void FullCodeGenerator::EmitNamedPropertyAssignment(Assignment* expr) {
void FullCodeGenerator::EmitKeyedPropertyAssignment(Assignment* expr) {
// Assignment to a property, using a keyed store IC.
- __ pop(rcx);
- __ pop(rdx);
+ __ Pop(rcx);
+ __ Pop(rdx);
// Record source code position before IC call.
SetSourcePosition(expr->position());
- Handle<Code> ic = is_classic_mode()
+ Handle<Code> ic = strict_mode() == SLOPPY
? isolate()->builtins()->KeyedStoreIC_Initialize()
: isolate()->builtins()->KeyedStoreIC_Initialize_Strict();
- CallIC(ic, RelocInfo::CODE_TARGET, expr->AssignmentFeedbackId());
+ CallIC(ic, expr->AssignmentFeedbackId());
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
context()->Plug(rax);
@@ -2521,7 +2501,7 @@ void FullCodeGenerator::VisitProperty(Property* expr) {
} else {
VisitForStackValue(expr->obj());
VisitForAccumulatorValue(expr->key());
- __ pop(rdx);
+ __ Pop(rdx);
EmitKeyedPropertyLoad(expr);
context()->Plug(rax);
}
@@ -2529,73 +2509,67 @@ void FullCodeGenerator::VisitProperty(Property* expr) {
void FullCodeGenerator::CallIC(Handle<Code> code,
- RelocInfo::Mode rmode,
TypeFeedbackId ast_id) {
ic_total_count_++;
- __ call(code, rmode, ast_id);
+ __ call(code, RelocInfo::CODE_TARGET, ast_id);
}
-void FullCodeGenerator::EmitCallWithIC(Call* expr,
- Handle<Object> name,
- RelocInfo::Mode mode) {
- // Code common for calls using the IC.
- ZoneList<Expression*>* args = expr->arguments();
- int arg_count = args->length();
- { PreservePositionScope scope(masm()->positions_recorder());
- for (int i = 0; i < arg_count; i++) {
- VisitForStackValue(args->at(i));
+// Code common for calls using the IC.
+void FullCodeGenerator::EmitCallWithLoadIC(Call* expr) {
+ Expression* callee = expr->expression();
+
+ CallIC::CallType call_type = callee->IsVariableProxy()
+ ? CallIC::FUNCTION
+ : CallIC::METHOD;
+ // Get the target function.
+ if (call_type == CallIC::FUNCTION) {
+ { StackValueContext context(this);
+ EmitVariableLoad(callee->AsVariableProxy());
+ PrepareForBailout(callee, NO_REGISTERS);
}
- __ Move(rcx, name);
+ // Push undefined as receiver. This is patched in the method prologue if it
+ // is a sloppy mode method.
+ __ Push(isolate()->factory()->undefined_value());
+ } else {
+ // Load the function from the receiver.
+ ASSERT(callee->IsProperty());
+ __ movp(rax, Operand(rsp, 0));
+ EmitNamedPropertyLoad(callee->AsProperty());
+ PrepareForBailoutForId(callee->AsProperty()->LoadId(), TOS_REG);
+ // Push the target function under the receiver.
+ __ Push(Operand(rsp, 0));
+ __ movp(Operand(rsp, kPointerSize), rax);
}
- // Record source position for debugger.
- SetSourcePosition(expr->position());
- // Call the IC initialization code.
- Handle<Code> ic =
- isolate()->stub_cache()->ComputeCallInitialize(arg_count, mode);
- CallIC(ic, mode, expr->CallFeedbackId());
- RecordJSReturnSite(expr);
- // Restore context register.
- __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
- context()->Plug(rax);
+
+ EmitCall(expr, call_type);
}
-void FullCodeGenerator::EmitKeyedCallWithIC(Call* expr,
- Expression* key) {
+// Common code for calls using the IC.
+void FullCodeGenerator::EmitKeyedCallWithLoadIC(Call* expr,
+ Expression* key) {
// Load the key.
VisitForAccumulatorValue(key);
- // Swap the name of the function and the receiver on the stack to follow
- // the calling convention for call ICs.
- __ pop(rcx);
- __ push(rax);
- __ push(rcx);
+ Expression* callee = expr->expression();
- // Load the arguments.
- ZoneList<Expression*>* args = expr->arguments();
- int arg_count = args->length();
- { PreservePositionScope scope(masm()->positions_recorder());
- for (int i = 0; i < arg_count; i++) {
- VisitForStackValue(args->at(i));
- }
- }
- // Record source position for debugger.
- SetSourcePosition(expr->position());
- // Call the IC initialization code.
- Handle<Code> ic =
- isolate()->stub_cache()->ComputeKeyedCallInitialize(arg_count);
- __ movq(rcx, Operand(rsp, (arg_count + 1) * kPointerSize)); // Key.
- CallIC(ic, RelocInfo::CODE_TARGET, expr->CallFeedbackId());
- RecordJSReturnSite(expr);
- // Restore context register.
- __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
- context()->DropAndPlug(1, rax); // Drop the key still on the stack.
+ // Load the function from the receiver.
+ ASSERT(callee->IsProperty());
+ __ movp(rdx, Operand(rsp, 0));
+ EmitKeyedPropertyLoad(callee->AsProperty());
+ PrepareForBailoutForId(callee->AsProperty()->LoadId(), TOS_REG);
+
+ // Push the target function under the receiver.
+ __ Push(Operand(rsp, 0));
+ __ movp(Operand(rsp, kPointerSize), rax);
+
+ EmitCall(expr, CallIC::METHOD);
}
-void FullCodeGenerator::EmitCallWithStub(Call* expr, CallFunctionFlags flags) {
- // Code common for calls using the call stub.
+void FullCodeGenerator::EmitCall(Call* expr, CallIC::CallType call_type) {
+ // Load the arguments.
ZoneList<Expression*>* args = expr->arguments();
int arg_count = args->length();
{ PreservePositionScope scope(masm()->positions_recorder());
@@ -2603,23 +2577,21 @@ void FullCodeGenerator::EmitCallWithStub(Call* expr, CallFunctionFlags flags) {
VisitForStackValue(args->at(i));
}
}
- // Record source position for debugger.
+
+ // Record source position of the IC call.
SetSourcePosition(expr->position());
+ Handle<Code> ic = CallIC::initialize_stub(
+ isolate(), arg_count, call_type);
+ __ Move(rdx, Smi::FromInt(expr->CallFeedbackSlot()));
+ __ movp(rdi, Operand(rsp, (arg_count + 1) * kPointerSize));
+ // Don't assign a type feedback id to the IC, since type feedback is provided
+ // by the vector above.
+ CallIC(ic);
- // Record call targets in unoptimized code.
- flags = static_cast<CallFunctionFlags>(flags | RECORD_CALL_TARGET);
- Handle<Object> uninitialized =
- TypeFeedbackCells::UninitializedSentinel(isolate());
- Handle<Cell> cell = isolate()->factory()->NewCell(uninitialized);
- RecordTypeFeedbackCell(expr->CallFeedbackId(), cell);
- __ Move(rbx, cell);
-
- CallFunctionStub stub(arg_count, flags);
- __ movq(rdi, Operand(rsp, (arg_count + 1) * kPointerSize));
- __ CallStub(&stub, expr->CallFeedbackId());
RecordJSReturnSite(expr);
+
// Restore context register.
- __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
+ __ movp(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
// Discard the function left on TOS.
context()->DropAndPlug(1, rax);
}
@@ -2628,23 +2600,23 @@ void FullCodeGenerator::EmitCallWithStub(Call* expr, CallFunctionFlags flags) {
void FullCodeGenerator::EmitResolvePossiblyDirectEval(int arg_count) {
// Push copy of the first argument or undefined if it doesn't exist.
if (arg_count > 0) {
- __ push(Operand(rsp, arg_count * kPointerSize));
+ __ Push(Operand(rsp, arg_count * kPointerSize));
} else {
__ PushRoot(Heap::kUndefinedValueRootIndex);
}
// Push the receiver of the enclosing function and do runtime call.
StackArgumentsAccessor args(rbp, info_->scope()->num_parameters());
- __ push(args.GetReceiverOperand());
+ __ Push(args.GetReceiverOperand());
// Push the language mode.
- __ Push(Smi::FromInt(language_mode()));
+ __ Push(Smi::FromInt(strict_mode()));
// Push the start position of the scope the calls resides in.
__ Push(Smi::FromInt(scope()->start_position()));
// Do the runtime call.
- __ CallRuntime(Runtime::kResolvePossiblyDirectEval, 5);
+ __ CallRuntime(Runtime::kHiddenResolvePossiblyDirectEval, 5);
}
@@ -2657,12 +2629,11 @@ void FullCodeGenerator::VisitCall(Call* expr) {
Comment cmnt(masm_, "[ Call");
Expression* callee = expr->expression();
- VariableProxy* proxy = callee->AsVariableProxy();
- Property* property = callee->AsProperty();
+ Call::CallType call_type = expr->GetCallType(isolate());
- if (proxy != NULL && proxy->var()->is_possibly_eval(isolate())) {
- // In a call to eval, we first call %ResolvePossiblyDirectEval to
- // resolve the function we need to call and the receiver of the call.
+ if (call_type == Call::POSSIBLY_EVAL_CALL) {
+ // In a call to eval, we first call RuntimeHidden_ResolvePossiblyDirectEval
+ // to resolve the function we need to call and the receiver of the call.
// Then we call the resolved function using the given arguments.
ZoneList<Expression*>* args = expr->arguments();
int arg_count = args->length();
@@ -2677,30 +2648,29 @@ void FullCodeGenerator::VisitCall(Call* expr) {
// Push a copy of the function (found below the arguments) and resolve
// eval.
- __ push(Operand(rsp, (arg_count + 1) * kPointerSize));
+ __ Push(Operand(rsp, (arg_count + 1) * kPointerSize));
EmitResolvePossiblyDirectEval(arg_count);
// The runtime call returns a pair of values in rax (function) and
// rdx (receiver). Touch up the stack with the right values.
- __ movq(Operand(rsp, (arg_count + 0) * kPointerSize), rdx);
- __ movq(Operand(rsp, (arg_count + 1) * kPointerSize), rax);
+ __ movp(Operand(rsp, (arg_count + 0) * kPointerSize), rdx);
+ __ movp(Operand(rsp, (arg_count + 1) * kPointerSize), rax);
}
// Record source position for debugger.
SetSourcePosition(expr->position());
- CallFunctionStub stub(arg_count, RECEIVER_MIGHT_BE_IMPLICIT);
- __ movq(rdi, Operand(rsp, (arg_count + 1) * kPointerSize));
+ CallFunctionStub stub(isolate(), arg_count, NO_CALL_FUNCTION_FLAGS);
+ __ movp(rdi, Operand(rsp, (arg_count + 1) * kPointerSize));
__ CallStub(&stub);
RecordJSReturnSite(expr);
// Restore context register.
- __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
+ __ movp(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
context()->DropAndPlug(1, rax);
- } else if (proxy != NULL && proxy->var()->IsUnallocated()) {
- // Call to a global variable. Push global object as receiver for the
- // call IC lookup.
- __ push(GlobalObjectOperand());
- EmitCallWithIC(expr, proxy->name(), RelocInfo::CODE_TARGET_CONTEXT);
- } else if (proxy != NULL && proxy->var()->IsLookupSlot()) {
+ } else if (call_type == Call::GLOBAL_CALL) {
+ EmitCallWithLoadIC(expr);
+
+ } else if (call_type == Call::LOOKUP_SLOT_CALL) {
// Call to a lookup slot (dynamically introduced variable).
+ VariableProxy* proxy = callee->AsVariableProxy();
Label slow, done;
{ PreservePositionScope scope(masm()->positions_recorder());
@@ -2711,11 +2681,11 @@ void FullCodeGenerator::VisitCall(Call* expr) {
__ bind(&slow);
// Call the runtime to find the function to call (returned in rax) and
// the object holding it (returned in rdx).
- __ push(context_register());
+ __ Push(context_register());
__ Push(proxy->name());
- __ CallRuntime(Runtime::kLoadContextSlot, 2);
- __ push(rax); // Function.
- __ push(rdx); // Receiver.
+ __ CallRuntime(Runtime::kHiddenLoadContextSlot, 2);
+ __ Push(rax); // Function.
+ __ Push(rdx); // Receiver.
// If fast case code has been generated, emit code to push the function
// and receiver and have the slow path jump around this code.
@@ -2724,38 +2694,35 @@ void FullCodeGenerator::VisitCall(Call* expr) {
__ jmp(&call, Label::kNear);
__ bind(&done);
// Push function.
- __ push(rax);
+ __ Push(rax);
// The receiver is implicitly the global receiver. Indicate this by
// passing the hole to the call function stub.
- __ PushRoot(Heap::kTheHoleValueRootIndex);
+ __ PushRoot(Heap::kUndefinedValueRootIndex);
__ bind(&call);
}
// The receiver is either the global receiver or an object found by
- // LoadContextSlot. That object could be the hole if the receiver is
- // implicitly the global object.
- EmitCallWithStub(expr, RECEIVER_MIGHT_BE_IMPLICIT);
- } else if (property != NULL) {
+ // LoadContextSlot.
+ EmitCall(expr);
+ } else if (call_type == Call::PROPERTY_CALL) {
+ Property* property = callee->AsProperty();
{ PreservePositionScope scope(masm()->positions_recorder());
VisitForStackValue(property->obj());
}
if (property->key()->IsPropertyName()) {
- EmitCallWithIC(expr,
- property->key()->AsLiteral()->value(),
- RelocInfo::CODE_TARGET);
+ EmitCallWithLoadIC(expr);
} else {
- EmitKeyedCallWithIC(expr, property->key());
+ EmitKeyedCallWithLoadIC(expr, property->key());
}
} else {
+ ASSERT(call_type == Call::OTHER_CALL);
// Call to an arbitrary expression not handled specially above.
{ PreservePositionScope scope(masm()->positions_recorder());
VisitForStackValue(callee);
}
- // Load global receiver object.
- __ movq(rbx, GlobalObjectOperand());
- __ push(FieldOperand(rbx, GlobalObject::kGlobalReceiverOffset));
+ __ PushRoot(Heap::kUndefinedValueRootIndex);
// Emit function call.
- EmitCallWithStub(expr, NO_CALL_FUNCTION_FLAGS);
+ EmitCall(expr);
}
#ifdef DEBUG
@@ -2789,17 +2756,20 @@ void FullCodeGenerator::VisitCallNew(CallNew* expr) {
// Load function and argument count into rdi and rax.
__ Set(rax, arg_count);
- __ movq(rdi, Operand(rsp, arg_count * kPointerSize));
+ __ movp(rdi, Operand(rsp, arg_count * kPointerSize));
// Record call targets in unoptimized code, but not in the snapshot.
- Handle<Object> uninitialized =
- TypeFeedbackCells::UninitializedSentinel(isolate());
- Handle<Cell> cell = isolate()->factory()->NewCell(uninitialized);
- RecordTypeFeedbackCell(expr->CallNewFeedbackId(), cell);
- __ Move(rbx, cell);
-
- CallConstructStub stub(RECORD_CALL_TARGET);
- __ Call(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL);
+ if (FLAG_pretenuring_call_new) {
+ EnsureSlotContainsAllocationSite(expr->AllocationSiteFeedbackSlot());
+ ASSERT(expr->AllocationSiteFeedbackSlot() ==
+ expr->CallNewFeedbackSlot() + 1);
+ }
+
+ __ Move(rbx, FeedbackVector());
+ __ Move(rdx, Smi::FromInt(expr->CallNewFeedbackSlot()));
+
+ CallConstructStub stub(isolate(), RECORD_CONSTRUCTOR_TARGET);
+ __ Call(stub.GetCode(), RelocInfo::CONSTRUCT_CALL);
PrepareForBailoutForId(expr->ReturnId(), TOS_REG);
context()->Plug(rax);
}
@@ -2863,15 +2833,15 @@ void FullCodeGenerator::EmitIsObject(CallRuntime* expr) {
__ JumpIfSmi(rax, if_false);
__ CompareRoot(rax, Heap::kNullValueRootIndex);
__ j(equal, if_true);
- __ movq(rbx, FieldOperand(rax, HeapObject::kMapOffset));
+ __ movp(rbx, FieldOperand(rax, HeapObject::kMapOffset));
// Undetectable objects behave like undefined when tested with typeof.
__ testb(FieldOperand(rbx, Map::kBitFieldOffset),
Immediate(1 << Map::kIsUndetectable));
__ j(not_zero, if_false);
- __ movzxbq(rbx, FieldOperand(rbx, Map::kInstanceTypeOffset));
- __ cmpq(rbx, Immediate(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
+ __ movzxbp(rbx, FieldOperand(rbx, Map::kInstanceTypeOffset));
+ __ cmpp(rbx, Immediate(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
__ j(below, if_false);
- __ cmpq(rbx, Immediate(LAST_NONCALLABLE_SPEC_OBJECT_TYPE));
+ __ cmpp(rbx, Immediate(LAST_NONCALLABLE_SPEC_OBJECT_TYPE));
PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
Split(below_equal, if_true, if_false, fall_through);
@@ -2915,7 +2885,7 @@ void FullCodeGenerator::EmitIsUndetectableObject(CallRuntime* expr) {
&if_true, &if_false, &fall_through);
__ JumpIfSmi(rax, if_false);
- __ movq(rbx, FieldOperand(rax, HeapObject::kMapOffset));
+ __ movp(rbx, FieldOperand(rax, HeapObject::kMapOffset));
__ testb(FieldOperand(rbx, Map::kBitFieldOffset),
Immediate(1 << Map::kIsUndetectable));
PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
@@ -2943,14 +2913,14 @@ void FullCodeGenerator::EmitIsStringWrapperSafeForDefaultValueOf(
// Check whether this map has already been checked to be safe for default
// valueOf.
- __ movq(rbx, FieldOperand(rax, HeapObject::kMapOffset));
+ __ movp(rbx, FieldOperand(rax, HeapObject::kMapOffset));
__ testb(FieldOperand(rbx, Map::kBitField2Offset),
Immediate(1 << Map::kStringWrapperSafeForDefaultValueOf));
__ j(not_zero, &skip_lookup);
// Check for fast case object. Generate false result for slow case object.
- __ movq(rcx, FieldOperand(rax, JSObject::kPropertiesOffset));
- __ movq(rcx, FieldOperand(rcx, HeapObject::kMapOffset));
+ __ movp(rcx, FieldOperand(rax, JSObject::kPropertiesOffset));
+ __ movp(rcx, FieldOperand(rcx, HeapObject::kMapOffset));
__ CompareRoot(rcx, Heap::kHashTableMapRootIndex);
__ j(equal, if_false);
@@ -2961,49 +2931,46 @@ void FullCodeGenerator::EmitIsStringWrapperSafeForDefaultValueOf(
// Skip loop if no descriptors are valid.
__ NumberOfOwnDescriptors(rcx, rbx);
- __ cmpq(rcx, Immediate(0));
+ __ cmpp(rcx, Immediate(0));
__ j(equal, &done);
__ LoadInstanceDescriptors(rbx, r8);
// rbx: descriptor array.
// rcx: valid entries in the descriptor array.
// Calculate the end of the descriptor array.
- __ imul(rcx, rcx, Immediate(DescriptorArray::kDescriptorSize));
- SmiIndex index = masm_->SmiToIndex(rdx, rcx, kPointerSizeLog2);
- __ lea(rcx,
- Operand(
- r8, index.reg, index.scale, DescriptorArray::kFirstOffset));
+ __ imulp(rcx, rcx, Immediate(DescriptorArray::kDescriptorSize));
+ __ leap(rcx, Operand(r8, rcx, times_8, DescriptorArray::kFirstOffset));
// Calculate location of the first key name.
- __ addq(r8, Immediate(DescriptorArray::kFirstOffset));
+ __ addp(r8, Immediate(DescriptorArray::kFirstOffset));
// Loop through all the keys in the descriptor array. If one of these is the
// internalized string "valueOf" the result is false.
__ jmp(&entry);
__ bind(&loop);
- __ movq(rdx, FieldOperand(r8, 0));
+ __ movp(rdx, FieldOperand(r8, 0));
__ Cmp(rdx, isolate()->factory()->value_of_string());
__ j(equal, if_false);
- __ addq(r8, Immediate(DescriptorArray::kDescriptorSize * kPointerSize));
+ __ addp(r8, Immediate(DescriptorArray::kDescriptorSize * kPointerSize));
__ bind(&entry);
- __ cmpq(r8, rcx);
+ __ cmpp(r8, rcx);
__ j(not_equal, &loop);
__ bind(&done);
// Set the bit in the map to indicate that there is no local valueOf field.
- __ or_(FieldOperand(rbx, Map::kBitField2Offset),
+ __ orp(FieldOperand(rbx, Map::kBitField2Offset),
Immediate(1 << Map::kStringWrapperSafeForDefaultValueOf));
__ bind(&skip_lookup);
// If a valueOf property is not found on the object check that its
// prototype is the un-modified String prototype. If not result is false.
- __ movq(rcx, FieldOperand(rbx, Map::kPrototypeOffset));
- __ testq(rcx, Immediate(kSmiTagMask));
+ __ movp(rcx, FieldOperand(rbx, Map::kPrototypeOffset));
+ __ testp(rcx, Immediate(kSmiTagMask));
__ j(zero, if_false);
- __ movq(rcx, FieldOperand(rcx, HeapObject::kMapOffset));
- __ movq(rdx, Operand(rsi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
- __ movq(rdx, FieldOperand(rdx, GlobalObject::kNativeContextOffset));
- __ cmpq(rcx,
+ __ movp(rcx, FieldOperand(rcx, HeapObject::kMapOffset));
+ __ movp(rdx, Operand(rsi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
+ __ movp(rdx, FieldOperand(rdx, GlobalObject::kNativeContextOffset));
+ __ cmpp(rcx,
ContextOperand(rdx, Context::STRING_FUNCTION_PROTOTYPE_MAP_INDEX));
PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
Split(equal, if_true, if_false, fall_through);
@@ -3050,8 +3017,8 @@ void FullCodeGenerator::EmitIsMinusZero(CallRuntime* expr) {
Handle<Map> map = masm()->isolate()->factory()->heap_number_map();
__ CheckMap(rax, map, if_false, DO_SMI_CHECK);
__ cmpl(FieldOperand(rax, HeapNumber::kExponentOffset),
- Immediate(0x80000000));
- __ j(not_equal, if_false);
+ Immediate(0x1));
+ __ j(no_overflow, if_false);
__ cmpl(FieldOperand(rax, HeapNumber::kMantissaOffset),
Immediate(0x00000000));
PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
@@ -3117,14 +3084,14 @@ void FullCodeGenerator::EmitIsConstructCall(CallRuntime* expr) {
&if_true, &if_false, &fall_through);
// Get the frame pointer for the calling frame.
- __ movq(rax, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
+ __ movp(rax, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
// Skip the arguments adaptor frame if it exists.
Label check_frame_marker;
__ Cmp(Operand(rax, StandardFrameConstants::kContextOffset),
Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
__ j(not_equal, &check_frame_marker);
- __ movq(rax, Operand(rax, StandardFrameConstants::kCallerFPOffset));
+ __ movp(rax, Operand(rax, StandardFrameConstants::kCallerFPOffset));
// Check the marker in the calling frame.
__ bind(&check_frame_marker);
@@ -3152,8 +3119,8 @@ void FullCodeGenerator::EmitObjectEquals(CallRuntime* expr) {
context()->PrepareTest(&materialize_true, &materialize_false,
&if_true, &if_false, &fall_through);
- __ pop(rbx);
- __ cmpq(rax, rbx);
+ __ Pop(rbx);
+ __ cmpp(rax, rbx);
PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
Split(equal, if_true, if_false, fall_through);
@@ -3168,9 +3135,9 @@ void FullCodeGenerator::EmitArguments(CallRuntime* expr) {
// ArgumentsAccessStub expects the key in rdx and the formal
// parameter count in rax.
VisitForAccumulatorValue(args->at(0));
- __ movq(rdx, rax);
+ __ movp(rdx, rax);
__ Move(rax, Smi::FromInt(info_->scope()->num_parameters()));
- ArgumentsAccessStub stub(ArgumentsAccessStub::READ_ELEMENT);
+ ArgumentsAccessStub stub(isolate(), ArgumentsAccessStub::READ_ELEMENT);
__ CallStub(&stub);
context()->Plug(rax);
}
@@ -3184,14 +3151,14 @@ void FullCodeGenerator::EmitArgumentsLength(CallRuntime* expr) {
__ Move(rax, Smi::FromInt(info_->scope()->num_parameters()));
// Check if the calling frame is an arguments adaptor frame.
- __ movq(rbx, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
+ __ movp(rbx, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
__ Cmp(Operand(rbx, StandardFrameConstants::kContextOffset),
Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
__ j(not_equal, &exit, Label::kNear);
// Arguments adaptor case: Read the arguments length from the
// adaptor frame.
- __ movq(rax, Operand(rbx, ArgumentsAdaptorFrameConstants::kLengthOffset));
+ __ movp(rax, Operand(rbx, ArgumentsAdaptorFrameConstants::kLengthOffset));
__ bind(&exit);
__ AssertSmi(rax);
@@ -3229,14 +3196,14 @@ void FullCodeGenerator::EmitClassOf(CallRuntime* expr) {
STATIC_ASSERT(LAST_NONCALLABLE_SPEC_OBJECT_TYPE == LAST_TYPE - 1);
// Check if the constructor in the map is a JS function.
- __ movq(rax, FieldOperand(rax, Map::kConstructorOffset));
+ __ movp(rax, FieldOperand(rax, Map::kConstructorOffset));
__ CmpObjectType(rax, JS_FUNCTION_TYPE, rbx);
__ j(not_equal, &non_function_constructor);
// rax now contains the constructor function. Grab the
// instance class name from there.
- __ movq(rax, FieldOperand(rax, JSFunction::kSharedFunctionInfoOffset));
- __ movq(rax, FieldOperand(rax, SharedFunctionInfo::kInstanceClassNameOffset));
+ __ movp(rax, FieldOperand(rax, JSFunction::kSharedFunctionInfoOffset));
+ __ movp(rax, FieldOperand(rax, SharedFunctionInfo::kInstanceClassNameOffset));
__ jmp(&done);
// Functions have class 'Function'.
@@ -3260,30 +3227,9 @@ void FullCodeGenerator::EmitClassOf(CallRuntime* expr) {
}
-void FullCodeGenerator::EmitLog(CallRuntime* expr) {
- // Conditionally generate a log call.
- // Args:
- // 0 (literal string): The type of logging (corresponds to the flags).
- // This is used to determine whether or not to generate the log call.
- // 1 (string): Format string. Access the string at argument index 2
- // with '%2s' (see Logger::LogRuntime for all the formats).
- // 2 (array): Arguments to the format string.
- ZoneList<Expression*>* args = expr->arguments();
- ASSERT_EQ(args->length(), 3);
- if (CodeGenerator::ShouldGenerateLog(isolate(), args->at(0))) {
- VisitForStackValue(args->at(1));
- VisitForStackValue(args->at(2));
- __ CallRuntime(Runtime::kLog, 2);
- }
- // Finally, we're expected to leave a value on the top of the stack.
- __ LoadRoot(rax, Heap::kUndefinedValueRootIndex);
- context()->Plug(rax);
-}
-
-
void FullCodeGenerator::EmitSubString(CallRuntime* expr) {
// Load the arguments on the stack and call the stub.
- SubStringStub stub;
+ SubStringStub stub(isolate());
ZoneList<Expression*>* args = expr->arguments();
ASSERT(args->length() == 3);
VisitForStackValue(args->at(0));
@@ -3296,7 +3242,7 @@ void FullCodeGenerator::EmitSubString(CallRuntime* expr) {
void FullCodeGenerator::EmitRegExpExec(CallRuntime* expr) {
// Load the arguments on the stack and call the stub.
- RegExpExecStub stub;
+ RegExpExecStub stub(isolate());
ZoneList<Expression*>* args = expr->arguments();
ASSERT(args->length() == 4);
VisitForStackValue(args->at(0));
@@ -3320,7 +3266,7 @@ void FullCodeGenerator::EmitValueOf(CallRuntime* expr) {
// If the object is not a value type, return the object.
__ CmpObjectType(rax, JS_VALUE_TYPE, rbx);
__ j(not_equal, &done);
- __ movq(rax, FieldOperand(rax, JSValue::kValueOffset));
+ __ movp(rax, FieldOperand(rax, JSValue::kValueOffset));
__ bind(&done);
context()->Plug(rax);
@@ -3345,30 +3291,30 @@ void FullCodeGenerator::EmitDateField(CallRuntime* expr) {
__ j(not_equal, &not_date_object);
if (index->value() == 0) {
- __ movq(result, FieldOperand(object, JSDate::kValueOffset));
+ __ movp(result, FieldOperand(object, JSDate::kValueOffset));
__ jmp(&done);
} else {
if (index->value() < JSDate::kFirstUncachedField) {
ExternalReference stamp = ExternalReference::date_cache_stamp(isolate());
Operand stamp_operand = __ ExternalOperand(stamp);
- __ movq(scratch, stamp_operand);
- __ cmpq(scratch, FieldOperand(object, JSDate::kCacheStampOffset));
+ __ movp(scratch, stamp_operand);
+ __ cmpp(scratch, FieldOperand(object, JSDate::kCacheStampOffset));
__ j(not_equal, &runtime, Label::kNear);
- __ movq(result, FieldOperand(object, JSDate::kValueOffset +
+ __ movp(result, FieldOperand(object, JSDate::kValueOffset +
kPointerSize * index->value()));
__ jmp(&done);
}
__ bind(&runtime);
__ PrepareCallCFunction(2);
- __ movq(arg_reg_1, object);
- __ movq(arg_reg_2, index, RelocInfo::NONE64);
+ __ movp(arg_reg_1, object);
+ __ Move(arg_reg_2, index, Assembler::RelocInfoNone());
__ CallCFunction(ExternalReference::get_date_field_function(isolate()), 2);
- __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
+ __ movp(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
__ jmp(&done);
}
__ bind(&not_date_object);
- __ CallRuntime(Runtime::kThrowNotDateError, 0);
+ __ CallRuntime(Runtime::kHiddenThrowNotDateError, 0);
__ bind(&done);
context()->Plug(rax);
}
@@ -3385,12 +3331,12 @@ void FullCodeGenerator::EmitOneByteSeqStringSetChar(CallRuntime* expr) {
VisitForStackValue(args->at(1)); // index
VisitForStackValue(args->at(2)); // value
VisitForAccumulatorValue(args->at(0)); // string
- __ pop(value);
- __ pop(index);
+ __ Pop(value);
+ __ Pop(index);
if (FLAG_debug_code) {
- __ ThrowIf(NegateCondition(__ CheckSmi(value)), kNonSmiValue);
- __ ThrowIf(NegateCondition(__ CheckSmi(index)), kNonSmiValue);
+ __ Check(__ CheckSmi(value), kNonSmiValue);
+ __ Check(__ CheckSmi(index), kNonSmiValue);
}
__ SmiToInteger32(value, value);
@@ -3418,12 +3364,12 @@ void FullCodeGenerator::EmitTwoByteSeqStringSetChar(CallRuntime* expr) {
VisitForStackValue(args->at(1)); // index
VisitForStackValue(args->at(2)); // value
VisitForAccumulatorValue(args->at(0)); // string
- __ pop(value);
- __ pop(index);
+ __ Pop(value);
+ __ Pop(index);
if (FLAG_debug_code) {
- __ ThrowIf(NegateCondition(__ CheckSmi(value)), kNonSmiValue);
- __ ThrowIf(NegateCondition(__ CheckSmi(index)), kNonSmiValue);
+ __ Check(__ CheckSmi(value), kNonSmiValue);
+ __ Check(__ CheckSmi(index), kNonSmiValue);
}
__ SmiToInteger32(value, value);
@@ -3446,7 +3392,7 @@ void FullCodeGenerator::EmitMathPow(CallRuntime* expr) {
ASSERT(args->length() == 2);
VisitForStackValue(args->at(0));
VisitForStackValue(args->at(1));
- MathPowStub stub(MathPowStub::ON_STACK);
+ MathPowStub stub(isolate(), MathPowStub::ON_STACK);
__ CallStub(&stub);
context()->Plug(rax);
}
@@ -3458,7 +3404,7 @@ void FullCodeGenerator::EmitSetValueOf(CallRuntime* expr) {
VisitForStackValue(args->at(0)); // Load the object.
VisitForAccumulatorValue(args->at(1)); // Load the value.
- __ pop(rbx); // rax = value. rbx = object.
+ __ Pop(rbx); // rax = value. rbx = object.
Label done;
// If the object is a smi, return the value.
@@ -3469,10 +3415,10 @@ void FullCodeGenerator::EmitSetValueOf(CallRuntime* expr) {
__ j(not_equal, &done);
// Store the value.
- __ movq(FieldOperand(rbx, JSValue::kValueOffset), rax);
+ __ movp(FieldOperand(rbx, JSValue::kValueOffset), rax);
// Update the write barrier. Save the value as it will be
// overwritten by the write barrier code and is needed afterward.
- __ movq(rdx, rax);
+ __ movp(rdx, rax);
__ RecordWriteField(rbx, JSValue::kValueOffset, rdx, rcx, kDontSaveFPRegs);
__ bind(&done);
@@ -3487,7 +3433,7 @@ void FullCodeGenerator::EmitNumberToString(CallRuntime* expr) {
// Load the argument into rax and call the stub.
VisitForAccumulatorValue(args->at(0));
- NumberToStringStub stub;
+ NumberToStringStub stub(isolate());
__ CallStub(&stub);
context()->Plug(rax);
}
@@ -3523,7 +3469,7 @@ void FullCodeGenerator::EmitStringCharCodeAt(CallRuntime* expr) {
Register index = rax;
Register result = rdx;
- __ pop(object);
+ __ Pop(object);
Label need_conversion;
Label index_out_of_range;
@@ -3570,7 +3516,7 @@ void FullCodeGenerator::EmitStringCharAt(CallRuntime* expr) {
Register scratch = rdx;
Register result = rax;
- __ pop(object);
+ __ Pop(object);
Label need_conversion;
Label index_out_of_range;
@@ -3609,21 +3555,12 @@ void FullCodeGenerator::EmitStringCharAt(CallRuntime* expr) {
void FullCodeGenerator::EmitStringAdd(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
ASSERT_EQ(2, args->length());
+ VisitForStackValue(args->at(0));
+ VisitForAccumulatorValue(args->at(1));
- if (FLAG_new_string_add) {
- VisitForStackValue(args->at(0));
- VisitForAccumulatorValue(args->at(1));
-
- __ pop(rdx);
- NewStringAddStub stub(STRING_ADD_CHECK_BOTH, NOT_TENURED);
- __ CallStub(&stub);
- } else {
- VisitForStackValue(args->at(0));
- VisitForStackValue(args->at(1));
-
- StringAddStub stub(STRING_ADD_CHECK_BOTH);
- __ CallStub(&stub);
- }
+ __ Pop(rdx);
+ StringAddStub stub(isolate(), STRING_ADD_CHECK_BOTH, NOT_TENURED);
+ __ CallStub(&stub);
context()->Plug(rax);
}
@@ -3635,34 +3572,12 @@ void FullCodeGenerator::EmitStringCompare(CallRuntime* expr) {
VisitForStackValue(args->at(0));
VisitForStackValue(args->at(1));
- StringCompareStub stub;
- __ CallStub(&stub);
- context()->Plug(rax);
-}
-
-
-void FullCodeGenerator::EmitMathLog(CallRuntime* expr) {
- // Load the argument on the stack and call the stub.
- TranscendentalCacheStub stub(TranscendentalCache::LOG,
- TranscendentalCacheStub::TAGGED);
- ZoneList<Expression*>* args = expr->arguments();
- ASSERT(args->length() == 1);
- VisitForStackValue(args->at(0));
+ StringCompareStub stub(isolate());
__ CallStub(&stub);
context()->Plug(rax);
}
-void FullCodeGenerator::EmitMathSqrt(CallRuntime* expr) {
- // Load the argument on the stack and call the runtime function.
- ZoneList<Expression*>* args = expr->arguments();
- ASSERT(args->length() == 1);
- VisitForStackValue(args->at(0));
- __ CallRuntime(Runtime::kMath_sqrt, 1);
- context()->Plug(rax);
-}
-
-
void FullCodeGenerator::EmitCallFunction(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
ASSERT(args->length() >= 2);
@@ -3680,15 +3595,14 @@ void FullCodeGenerator::EmitCallFunction(CallRuntime* expr) {
__ j(not_equal, &runtime);
// InvokeFunction requires the function in rdi. Move it in there.
- __ movq(rdi, result_register());
+ __ movp(rdi, result_register());
ParameterCount count(arg_count);
- __ InvokeFunction(rdi, count, CALL_FUNCTION,
- NullCallWrapper(), CALL_AS_METHOD);
- __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
+ __ InvokeFunction(rdi, count, CALL_FUNCTION, NullCallWrapper());
+ __ movp(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
__ jmp(&done);
__ bind(&runtime);
- __ push(rax);
+ __ Push(rax);
__ CallRuntime(Runtime::kCall, args->length());
__ bind(&done);
@@ -3697,12 +3611,14 @@ void FullCodeGenerator::EmitCallFunction(CallRuntime* expr) {
void FullCodeGenerator::EmitRegExpConstructResult(CallRuntime* expr) {
- RegExpConstructResultStub stub;
+ RegExpConstructResultStub stub(isolate());
ZoneList<Expression*>* args = expr->arguments();
ASSERT(args->length() == 3);
VisitForStackValue(args->at(0));
VisitForStackValue(args->at(1));
- VisitForStackValue(args->at(2));
+ VisitForAccumulatorValue(args->at(2));
+ __ Pop(rbx);
+ __ Pop(rcx);
__ CallStub(&stub);
context()->Plug(rax);
}
@@ -3729,26 +3645,26 @@ void FullCodeGenerator::EmitGetFromCache(CallRuntime* expr) {
Register key = rax;
Register cache = rbx;
Register tmp = rcx;
- __ movq(cache, ContextOperand(rsi, Context::GLOBAL_OBJECT_INDEX));
- __ movq(cache,
+ __ movp(cache, ContextOperand(rsi, Context::GLOBAL_OBJECT_INDEX));
+ __ movp(cache,
FieldOperand(cache, GlobalObject::kNativeContextOffset));
- __ movq(cache,
+ __ movp(cache,
ContextOperand(cache, Context::JSFUNCTION_RESULT_CACHES_INDEX));
- __ movq(cache,
+ __ movp(cache,
FieldOperand(cache, FixedArray::OffsetOfElementAt(cache_id)));
Label done, not_found;
- // tmp now holds finger offset as a smi.
STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
- __ movq(tmp, FieldOperand(cache, JSFunctionResultCache::kFingerOffset));
+ __ movp(tmp, FieldOperand(cache, JSFunctionResultCache::kFingerOffset));
+ // tmp now holds finger offset as a smi.
SmiIndex index =
__ SmiToIndex(kScratchRegister, tmp, kPointerSizeLog2);
- __ cmpq(key, FieldOperand(cache,
+ __ cmpp(key, FieldOperand(cache,
index.reg,
index.scale,
FixedArray::kHeaderSize));
__ j(not_equal, &not_found, Label::kNear);
- __ movq(rax, FieldOperand(cache,
+ __ movp(rax, FieldOperand(cache,
index.reg,
index.scale,
FixedArray::kHeaderSize + kPointerSize));
@@ -3756,50 +3672,11 @@ void FullCodeGenerator::EmitGetFromCache(CallRuntime* expr) {
__ bind(&not_found);
// Call runtime to perform the lookup.
- __ push(cache);
- __ push(key);
- __ CallRuntime(Runtime::kGetFromCache, 2);
-
- __ bind(&done);
- context()->Plug(rax);
-}
-
-
-void FullCodeGenerator::EmitIsRegExpEquivalent(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- ASSERT_EQ(2, args->length());
-
- Register right = rax;
- Register left = rbx;
- Register tmp = rcx;
+ __ Push(cache);
+ __ Push(key);
+ __ CallRuntime(Runtime::kHiddenGetFromCache, 2);
- VisitForStackValue(args->at(0));
- VisitForAccumulatorValue(args->at(1));
- __ pop(left);
-
- Label done, fail, ok;
- __ cmpq(left, right);
- __ j(equal, &ok, Label::kNear);
- // Fail if either is a non-HeapObject.
- Condition either_smi = masm()->CheckEitherSmi(left, right, tmp);
- __ j(either_smi, &fail, Label::kNear);
- __ j(zero, &fail, Label::kNear);
- __ movq(tmp, FieldOperand(left, HeapObject::kMapOffset));
- __ cmpb(FieldOperand(tmp, Map::kInstanceTypeOffset),
- Immediate(JS_REGEXP_TYPE));
- __ j(not_equal, &fail, Label::kNear);
- __ cmpq(tmp, FieldOperand(right, HeapObject::kMapOffset));
- __ j(not_equal, &fail, Label::kNear);
- __ movq(tmp, FieldOperand(left, JSRegExp::kDataOffset));
- __ cmpq(tmp, FieldOperand(right, JSRegExp::kDataOffset));
- __ j(equal, &ok, Label::kNear);
- __ bind(&fail);
- __ Move(rax, isolate()->factory()->false_value());
- __ jmp(&done, Label::kNear);
- __ bind(&ok);
- __ Move(rax, isolate()->factory()->true_value());
__ bind(&done);
-
context()->Plug(rax);
}
@@ -3873,7 +3750,7 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
// Separator operand is already pushed. Make room for the two
// other stack fields, and clear the direction flag in anticipation
// of calling CopyBytes.
- __ subq(rsp, Immediate(2 * kPointerSize));
+ __ subp(rsp, Immediate(2 * kPointerSize));
__ cld();
// Check that the array is a JSArray
__ JumpIfSmi(array, &bailout);
@@ -3885,7 +3762,7 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
// Array has fast elements, so its length must be a smi.
// If the array has length zero, return the empty string.
- __ movq(array_length, FieldOperand(array, JSArray::kLengthOffset));
+ __ movp(array_length, FieldOperand(array, JSArray::kLengthOffset));
__ SmiCompare(array_length, Smi::FromInt(0));
__ j(not_zero, &non_trivial_array);
__ LoadRoot(rax, Heap::kempty_stringRootIndex);
@@ -3899,7 +3776,7 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
// Save the FixedArray containing array's elements.
// End of array's live range.
elements = array;
- __ movq(elements, FieldOperand(array, JSArray::kElementsOffset));
+ __ movp(elements, FieldOperand(array, JSArray::kElementsOffset));
array = no_reg;
@@ -3911,16 +3788,16 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
// Live loop registers: index(int32), array_length(int32), string(String*),
// scratch, string_length(int32), elements(FixedArray*).
if (generate_debug_code_) {
- __ cmpq(index, array_length);
+ __ cmpp(index, array_length);
__ Assert(below, kNoEmptyArraysHereInEmitFastAsciiArrayJoin);
}
__ bind(&loop);
- __ movq(string, FieldOperand(elements,
+ __ movp(string, FieldOperand(elements,
index,
times_pointer_size,
FixedArray::kHeaderSize));
__ JumpIfSmi(string, &bailout);
- __ movq(scratch, FieldOperand(string, HeapObject::kMapOffset));
+ __ movp(scratch, FieldOperand(string, HeapObject::kMapOffset));
__ movzxbl(scratch, FieldOperand(scratch, Map::kInstanceTypeOffset));
__ andb(scratch, Immediate(
kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask));
@@ -3942,7 +3819,7 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
// If array_length is 1, return elements[0], a string.
__ cmpl(array_length, Immediate(1));
__ j(not_equal, &not_size_one_array);
- __ movq(rax, FieldOperand(elements, FixedArray::kHeaderSize));
+ __ movp(rax, FieldOperand(elements, FixedArray::kHeaderSize));
__ jmp(&return_result);
__ bind(&not_size_one_array);
@@ -3957,9 +3834,9 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
// index: Array length.
// Check that the separator is a sequential ASCII string.
- __ movq(string, separator_operand);
+ __ movp(string, separator_operand);
__ JumpIfSmi(string, &bailout);
- __ movq(scratch, FieldOperand(string, HeapObject::kMapOffset));
+ __ movp(scratch, FieldOperand(string, HeapObject::kMapOffset));
__ movzxbl(scratch, FieldOperand(scratch, Map::kInstanceTypeOffset));
__ andb(scratch, Immediate(
kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask));
@@ -3986,10 +3863,10 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
// elements: FixedArray of strings.
__ AllocateAsciiString(result_pos, string_length, scratch,
index, string, &bailout);
- __ movq(result_operand, result_pos);
- __ lea(result_pos, FieldOperand(result_pos, SeqOneByteString::kHeaderSize));
+ __ movp(result_operand, result_pos);
+ __ leap(result_pos, FieldOperand(result_pos, SeqOneByteString::kHeaderSize));
- __ movq(string, separator_operand);
+ __ movp(string, separator_operand);
__ SmiCompare(FieldOperand(string, SeqOneByteString::kLengthOffset),
Smi::FromInt(1));
__ j(equal, &one_char_separator);
@@ -4010,12 +3887,12 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
// scratch: array length.
// Get string = array[index].
- __ movq(string, FieldOperand(elements, index,
+ __ movp(string, FieldOperand(elements, index,
times_pointer_size,
FixedArray::kHeaderSize));
__ SmiToInteger32(string_length,
FieldOperand(string, String::kLengthOffset));
- __ lea(string,
+ __ leap(string,
FieldOperand(string, SeqOneByteString::kHeaderSize));
__ CopyBytes(result_pos, string, string_length);
__ incl(index);
@@ -4050,16 +3927,16 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
// Copy the separator character to the result.
__ movb(Operand(result_pos, 0), scratch);
- __ incq(result_pos);
+ __ incp(result_pos);
__ bind(&loop_2_entry);
// Get string = array[index].
- __ movq(string, FieldOperand(elements, index,
+ __ movp(string, FieldOperand(elements, index,
times_pointer_size,
FixedArray::kHeaderSize));
__ SmiToInteger32(string_length,
FieldOperand(string, String::kLengthOffset));
- __ lea(string,
+ __ leap(string,
FieldOperand(string, SeqOneByteString::kHeaderSize));
__ CopyBytes(result_pos, string, string_length);
__ incl(index);
@@ -4075,18 +3952,18 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
// count from -array_length to zero, so we don't need to maintain
// a loop limit.
__ movl(index, array_length_operand);
- __ lea(elements, FieldOperand(elements, index, times_pointer_size,
+ __ leap(elements, FieldOperand(elements, index, times_pointer_size,
FixedArray::kHeaderSize));
- __ neg(index);
+ __ negq(index);
// Replace separator string with pointer to its first character, and
// make scratch be its length.
- __ movq(string, separator_operand);
+ __ movp(string, separator_operand);
__ SmiToInteger32(scratch,
FieldOperand(string, String::kLengthOffset));
- __ lea(string,
+ __ leap(string,
FieldOperand(string, SeqOneByteString::kHeaderSize));
- __ movq(separator_operand, string);
+ __ movp(separator_operand, string);
// Jump into the loop after the code that copies the separator, so the first
// element is not preceded by a separator
@@ -4101,35 +3978,35 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
// separator_operand (rsp[0x10]): Address of first char of separator.
// Copy the separator to the result.
- __ movq(string, separator_operand);
+ __ movp(string, separator_operand);
__ movl(string_length, scratch);
__ CopyBytes(result_pos, string, string_length, 2);
__ bind(&loop_3_entry);
// Get string = array[index].
- __ movq(string, Operand(elements, index, times_pointer_size, 0));
+ __ movp(string, Operand(elements, index, times_pointer_size, 0));
__ SmiToInteger32(string_length,
FieldOperand(string, String::kLengthOffset));
- __ lea(string,
+ __ leap(string,
FieldOperand(string, SeqOneByteString::kHeaderSize));
__ CopyBytes(result_pos, string, string_length);
__ incq(index);
__ j(not_equal, &loop_3); // Loop while (index < 0).
__ bind(&done);
- __ movq(rax, result_operand);
+ __ movp(rax, result_operand);
__ bind(&return_result);
// Drop temp values from the stack, and restore context register.
- __ addq(rsp, Immediate(3 * kPointerSize));
- __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
+ __ addp(rsp, Immediate(3 * kPointerSize));
+ __ movp(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
context()->Plug(rax);
}
void FullCodeGenerator::VisitCallRuntime(CallRuntime* expr) {
- Handle<String> name = expr->name();
- if (name->length() > 0 && name->Get(0) == '_') {
+ if (expr->function() != NULL &&
+ expr->function()->intrinsic_type == Runtime::INLINE) {
Comment cmnt(masm_, "[ InlineRuntimeCall");
EmitInlineRuntimeCall(expr);
return;
@@ -4137,32 +4014,47 @@ void FullCodeGenerator::VisitCallRuntime(CallRuntime* expr) {
Comment cmnt(masm_, "[ CallRuntime");
ZoneList<Expression*>* args = expr->arguments();
-
- if (expr->is_jsruntime()) {
- // Prepare for calling JS runtime function.
- __ movq(rax, GlobalObjectOperand());
- __ push(FieldOperand(rax, GlobalObject::kBuiltinsOffset));
- }
-
- // Push the arguments ("left-to-right").
int arg_count = args->length();
- for (int i = 0; i < arg_count; i++) {
- VisitForStackValue(args->at(i));
- }
if (expr->is_jsruntime()) {
- // Call the JS runtime function using a call IC.
+ // Push the builtins object as receiver.
+ __ movp(rax, GlobalObjectOperand());
+ __ Push(FieldOperand(rax, GlobalObject::kBuiltinsOffset));
+
+ // Load the function from the receiver.
+ __ movp(rax, Operand(rsp, 0));
__ Move(rcx, expr->name());
- RelocInfo::Mode mode = RelocInfo::CODE_TARGET;
- Handle<Code> ic =
- isolate()->stub_cache()->ComputeCallInitialize(arg_count, mode);
- CallIC(ic, mode, expr->CallRuntimeFeedbackId());
+ CallLoadIC(NOT_CONTEXTUAL, expr->CallRuntimeFeedbackId());
+
+ // Push the target function under the receiver.
+ __ Push(Operand(rsp, 0));
+ __ movp(Operand(rsp, kPointerSize), rax);
+
+ // Push the arguments ("left-to-right").
+ for (int i = 0; i < arg_count; i++) {
+ VisitForStackValue(args->at(i));
+ }
+
+ // Record source position of the IC call.
+ SetSourcePosition(expr->position());
+ CallFunctionStub stub(isolate(), arg_count, NO_CALL_FUNCTION_FLAGS);
+ __ movp(rdi, Operand(rsp, (arg_count + 1) * kPointerSize));
+ __ CallStub(&stub);
+
// Restore context register.
- __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
+ __ movp(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
+ context()->DropAndPlug(1, rax);
+
} else {
+ // Push the arguments ("left-to-right").
+ for (int i = 0; i < arg_count; i++) {
+ VisitForStackValue(args->at(i));
+ }
+
+ // Call the C runtime.
__ CallRuntime(expr->function(), arg_count);
+ context()->Plug(rax);
}
- context()->Plug(rax);
}
@@ -4176,20 +4068,18 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
if (property != NULL) {
VisitForStackValue(property->obj());
VisitForStackValue(property->key());
- StrictModeFlag strict_mode_flag = (language_mode() == CLASSIC_MODE)
- ? kNonStrictMode : kStrictMode;
- __ Push(Smi::FromInt(strict_mode_flag));
+ __ Push(Smi::FromInt(strict_mode()));
__ InvokeBuiltin(Builtins::DELETE, CALL_FUNCTION);
context()->Plug(rax);
} else if (proxy != NULL) {
Variable* var = proxy->var();
// Delete of an unqualified identifier is disallowed in strict mode
// but "delete this" is allowed.
- ASSERT(language_mode() == CLASSIC_MODE || var->is_this());
+ ASSERT(strict_mode() == SLOPPY || var->is_this());
if (var->IsUnallocated()) {
- __ push(GlobalObjectOperand());
+ __ Push(GlobalObjectOperand());
__ Push(var->name());
- __ Push(Smi::FromInt(kNonStrictMode));
+ __ Push(Smi::FromInt(SLOPPY));
__ InvokeBuiltin(Builtins::DELETE, CALL_FUNCTION);
context()->Plug(rax);
} else if (var->IsStackAllocated() || var->IsContextSlot()) {
@@ -4200,9 +4090,9 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
} else {
// Non-global variable. Call the runtime to try to delete from the
// context where the variable was introduced.
- __ push(context_register());
+ __ Push(context_register());
__ Push(var->name());
- __ CallRuntime(Runtime::kDeleteContextSlot, 2);
+ __ CallRuntime(Runtime::kHiddenDeleteContextSlot, 2);
context()->Plug(rax);
}
} else {
@@ -4283,16 +4173,11 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
+ ASSERT(expr->expression()->IsValidReferenceExpression());
+
Comment cmnt(masm_, "[ CountOperation");
SetSourcePosition(expr->position());
- // Invalid left-hand-sides are rewritten to have a 'throw
- // ReferenceError' as the left-hand side.
- if (!expr->expression()->IsValidLeftHandSide()) {
- VisitForEffect(expr->expression());
- return;
- }
-
// Expression can only be a property, a global or a (parameter or local)
// slot.
enum LhsKind { VARIABLE, NAMED_PROPERTY, KEYED_PROPERTY };
@@ -4317,13 +4202,13 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
}
if (assign_type == NAMED_PROPERTY) {
VisitForAccumulatorValue(prop->obj());
- __ push(rax); // Copy of receiver, needed for later store.
+ __ Push(rax); // Copy of receiver, needed for later store.
EmitNamedPropertyLoad(prop);
} else {
VisitForStackValue(prop->obj());
VisitForAccumulatorValue(prop->key());
- __ movq(rdx, Operand(rsp, 0)); // Leave receiver on stack
- __ push(rax); // Copy of key, needed for later store.
+ __ movp(rdx, Operand(rsp, 0)); // Leave receiver on stack
+ __ Push(rax); // Copy of key, needed for later store.
EmitKeyedPropertyLoad(prop);
}
}
@@ -4351,13 +4236,13 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
// of the stack.
switch (assign_type) {
case VARIABLE:
- __ push(rax);
+ __ Push(rax);
break;
case NAMED_PROPERTY:
- __ movq(Operand(rsp, kPointerSize), rax);
+ __ movp(Operand(rsp, kPointerSize), rax);
break;
case KEYED_PROPERTY:
- __ movq(Operand(rsp, 2 * kPointerSize), rax);
+ __ movp(Operand(rsp, 2 * kPointerSize), rax);
break;
}
}
@@ -4375,7 +4260,7 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
__ bind(&slow);
}
- ToNumberStub convert_stub;
+ ToNumberStub convert_stub(isolate());
__ CallStub(&convert_stub);
// Save result for postfix expressions.
@@ -4386,13 +4271,13 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
// of the stack.
switch (assign_type) {
case VARIABLE:
- __ push(rax);
+ __ Push(rax);
break;
case NAMED_PROPERTY:
- __ movq(Operand(rsp, kPointerSize), rax);
+ __ movp(Operand(rsp, kPointerSize), rax);
break;
case KEYED_PROPERTY:
- __ movq(Operand(rsp, 2 * kPointerSize), rax);
+ __ movp(Operand(rsp, 2 * kPointerSize), rax);
break;
}
}
@@ -4403,12 +4288,10 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
// Call stub for +1/-1.
__ bind(&stub_call);
- __ movq(rdx, rax);
+ __ movp(rdx, rax);
__ Move(rax, Smi::FromInt(1));
- BinaryOpICStub stub(expr->binary_op(), NO_OVERWRITE);
- CallIC(stub.GetCode(isolate()),
- RelocInfo::CODE_TARGET,
- expr->CountBinOpFeedbackId());
+ BinaryOpICStub stub(isolate(), expr->binary_op(), NO_OVERWRITE);
+ CallIC(stub.GetCode(), expr->CountBinOpFeedbackId());
patch_site.EmitPatchInfo();
__ bind(&done);
@@ -4438,11 +4321,8 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
break;
case NAMED_PROPERTY: {
__ Move(rcx, prop->key()->AsLiteral()->value());
- __ pop(rdx);
- Handle<Code> ic = is_classic_mode()
- ? isolate()->builtins()->StoreIC_Initialize()
- : isolate()->builtins()->StoreIC_Initialize_Strict();
- CallIC(ic, RelocInfo::CODE_TARGET, expr->CountStoreFeedbackId());
+ __ Pop(rdx);
+ CallStoreIC(expr->CountStoreFeedbackId());
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
if (expr->is_postfix()) {
if (!context()->IsEffect()) {
@@ -4454,12 +4334,12 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
break;
}
case KEYED_PROPERTY: {
- __ pop(rcx);
- __ pop(rdx);
- Handle<Code> ic = is_classic_mode()
+ __ Pop(rcx);
+ __ Pop(rdx);
+ Handle<Code> ic = strict_mode() == SLOPPY
? isolate()->builtins()->KeyedStoreIC_Initialize()
: isolate()->builtins()->KeyedStoreIC_Initialize_Strict();
- CallIC(ic, RelocInfo::CODE_TARGET, expr->CountStoreFeedbackId());
+ CallIC(ic, expr->CountStoreFeedbackId());
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
if (expr->is_postfix()) {
if (!context()->IsEffect()) {
@@ -4480,16 +4360,16 @@ void FullCodeGenerator::VisitForTypeofValue(Expression* expr) {
ASSERT(!context()->IsTest());
if (proxy != NULL && proxy->var()->IsUnallocated()) {
- Comment cmnt(masm_, "Global variable");
+ Comment cmnt(masm_, "[ Global variable");
__ Move(rcx, proxy->name());
- __ movq(rax, GlobalObjectOperand());
- Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
+ __ movp(rax, GlobalObjectOperand());
// Use a regular load, not a contextual load, to avoid a reference
// error.
- CallIC(ic);
+ CallLoadIC(NOT_CONTEXTUAL);
PrepareForBailout(expr, TOS_REG);
context()->Plug(rax);
} else if (proxy != NULL && proxy->var()->IsLookupSlot()) {
+ Comment cmnt(masm_, "[ Lookup slot");
Label done, slow;
// Generate code for loading from variables potentially shadowed
@@ -4497,9 +4377,9 @@ void FullCodeGenerator::VisitForTypeofValue(Expression* expr) {
EmitDynamicLookupFastCase(proxy->var(), INSIDE_TYPEOF, &slow, &done);
__ bind(&slow);
- __ push(rsi);
+ __ Push(rsi);
__ Push(proxy->name());
- __ CallRuntime(Runtime::kLoadContextSlotNoReferenceError, 2);
+ __ CallRuntime(Runtime::kHiddenLoadContextSlotNoReferenceError, 2);
PrepareForBailout(expr, TOS_REG);
__ bind(&done);
@@ -4526,12 +4406,13 @@ void FullCodeGenerator::EmitLiteralCompareTypeof(Expression* expr,
}
PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
- if (check->Equals(isolate()->heap()->number_string())) {
+ Factory* factory = isolate()->factory();
+ if (String::Equals(check, factory->number_string())) {
__ JumpIfSmi(rax, if_true);
- __ movq(rax, FieldOperand(rax, HeapObject::kMapOffset));
+ __ movp(rax, FieldOperand(rax, HeapObject::kMapOffset));
__ CompareRoot(rax, Heap::kHeapNumberMapRootIndex);
Split(equal, if_true, if_false, fall_through);
- } else if (check->Equals(isolate()->heap()->string_string())) {
+ } else if (String::Equals(check, factory->string_string())) {
__ JumpIfSmi(rax, if_false);
// Check for undetectable objects => false.
__ CmpObjectType(rax, FIRST_NONSTRING_TYPE, rdx);
@@ -4539,36 +4420,36 @@ void FullCodeGenerator::EmitLiteralCompareTypeof(Expression* expr,
__ testb(FieldOperand(rdx, Map::kBitFieldOffset),
Immediate(1 << Map::kIsUndetectable));
Split(zero, if_true, if_false, fall_through);
- } else if (check->Equals(isolate()->heap()->symbol_string())) {
+ } else if (String::Equals(check, factory->symbol_string())) {
__ JumpIfSmi(rax, if_false);
__ CmpObjectType(rax, SYMBOL_TYPE, rdx);
Split(equal, if_true, if_false, fall_through);
- } else if (check->Equals(isolate()->heap()->boolean_string())) {
+ } else if (String::Equals(check, factory->boolean_string())) {
__ CompareRoot(rax, Heap::kTrueValueRootIndex);
__ j(equal, if_true);
__ CompareRoot(rax, Heap::kFalseValueRootIndex);
Split(equal, if_true, if_false, fall_through);
} else if (FLAG_harmony_typeof &&
- check->Equals(isolate()->heap()->null_string())) {
+ String::Equals(check, factory->null_string())) {
__ CompareRoot(rax, Heap::kNullValueRootIndex);
Split(equal, if_true, if_false, fall_through);
- } else if (check->Equals(isolate()->heap()->undefined_string())) {
+ } else if (String::Equals(check, factory->undefined_string())) {
__ CompareRoot(rax, Heap::kUndefinedValueRootIndex);
__ j(equal, if_true);
__ JumpIfSmi(rax, if_false);
// Check for undetectable objects => true.
- __ movq(rdx, FieldOperand(rax, HeapObject::kMapOffset));
+ __ movp(rdx, FieldOperand(rax, HeapObject::kMapOffset));
__ testb(FieldOperand(rdx, Map::kBitFieldOffset),
Immediate(1 << Map::kIsUndetectable));
Split(not_zero, if_true, if_false, fall_through);
- } else if (check->Equals(isolate()->heap()->function_string())) {
+ } else if (String::Equals(check, factory->function_string())) {
__ JumpIfSmi(rax, if_false);
STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
__ CmpObjectType(rax, JS_FUNCTION_TYPE, rdx);
__ j(equal, if_true);
__ CmpInstanceType(rdx, JS_FUNCTION_PROXY_TYPE);
Split(equal, if_true, if_false, fall_through);
- } else if (check->Equals(isolate()->heap()->object_string())) {
+ } else if (String::Equals(check, factory->object_string())) {
__ JumpIfSmi(rax, if_false);
if (!FLAG_harmony_typeof) {
__ CompareRoot(rax, Heap::kNullValueRootIndex);
@@ -4619,10 +4500,10 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
case Token::INSTANCEOF: {
VisitForStackValue(expr->right());
- InstanceofStub stub(InstanceofStub::kNoFlags);
+ InstanceofStub stub(isolate(), InstanceofStub::kNoFlags);
__ CallStub(&stub);
PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
- __ testq(rax, rax);
+ __ testp(rax, rax);
// The stub returns 0 for true.
Split(zero, if_true, if_false, fall_through);
break;
@@ -4631,16 +4512,16 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
default: {
VisitForAccumulatorValue(expr->right());
Condition cc = CompareIC::ComputeCondition(op);
- __ pop(rdx);
+ __ Pop(rdx);
bool inline_smi_code = ShouldInlineSmiCase(op);
JumpPatchSite patch_site(masm_);
if (inline_smi_code) {
Label slow_case;
- __ movq(rcx, rdx);
- __ or_(rcx, rax);
+ __ movp(rcx, rdx);
+ __ orp(rcx, rax);
patch_site.EmitJumpIfNotSmi(rcx, &slow_case, Label::kNear);
- __ cmpq(rdx, rax);
+ __ cmpp(rdx, rax);
Split(cc, if_true, if_false, NULL);
__ bind(&slow_case);
}
@@ -4648,11 +4529,11 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
// Record position and call the compare IC.
SetSourcePosition(expr->position());
Handle<Code> ic = CompareIC::GetUninitialized(isolate(), op);
- CallIC(ic, RelocInfo::CODE_TARGET, expr->CompareOperationFeedbackId());
+ CallIC(ic, expr->CompareOperationFeedbackId());
patch_site.EmitPatchInfo();
PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
- __ testq(rax, rax);
+ __ testp(rax, rax);
Split(cc, if_true, if_false, fall_through);
}
}
@@ -4683,8 +4564,8 @@ void FullCodeGenerator::EmitLiteralCompareNil(CompareOperation* expr,
Split(equal, if_true, if_false, fall_through);
} else {
Handle<Code> ic = CompareNilICStub::GetUninitialized(isolate(), nil);
- CallIC(ic, RelocInfo::CODE_TARGET, expr->CompareOperationFeedbackId());
- __ testq(rax, rax);
+ CallIC(ic, expr->CompareOperationFeedbackId());
+ __ testp(rax, rax);
Split(not_zero, if_true, if_false, fall_through);
}
context()->Plug(if_true, if_false);
@@ -4692,7 +4573,7 @@ void FullCodeGenerator::EmitLiteralCompareNil(CompareOperation* expr,
void FullCodeGenerator::VisitThisFunction(ThisFunction* expr) {
- __ movq(rax, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
+ __ movp(rax, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
context()->Plug(rax);
}
@@ -4709,12 +4590,12 @@ Register FullCodeGenerator::context_register() {
void FullCodeGenerator::StoreToFrameField(int frame_offset, Register value) {
ASSERT(IsAligned(frame_offset, kPointerSize));
- __ movq(Operand(rbp, frame_offset), value);
+ __ movp(Operand(rbp, frame_offset), value);
}
void FullCodeGenerator::LoadContextField(Register dst, int context_index) {
- __ movq(dst, ContextOperand(rsi, context_index));
+ __ movp(dst, ContextOperand(rsi, context_index));
}
@@ -4731,10 +4612,10 @@ void FullCodeGenerator::PushFunctionArgumentForContextAllocation() {
// Contexts created by a call to eval have the same closure as the
// context calling eval, not the anonymous closure containing the eval
// code. Fetch it from the context.
- __ push(ContextOperand(rsi, Context::CLOSURE_INDEX));
+ __ Push(ContextOperand(rsi, Context::CLOSURE_INDEX));
} else {
ASSERT(declaration_scope->is_function_scope());
- __ push(Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
+ __ Push(Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
}
}
@@ -4749,29 +4630,29 @@ void FullCodeGenerator::EnterFinallyBlock() {
// Cook return address on top of stack (smi encoded Code* delta)
__ PopReturnAddressTo(rdx);
__ Move(rcx, masm_->CodeObject());
- __ subq(rdx, rcx);
+ __ subp(rdx, rcx);
__ Integer32ToSmi(rdx, rdx);
- __ push(rdx);
+ __ Push(rdx);
// Store result register while executing finally block.
- __ push(result_register());
+ __ Push(result_register());
// Store pending message while executing finally block.
ExternalReference pending_message_obj =
ExternalReference::address_of_pending_message_obj(isolate());
__ Load(rdx, pending_message_obj);
- __ push(rdx);
+ __ Push(rdx);
ExternalReference has_pending_message =
ExternalReference::address_of_has_pending_message(isolate());
__ Load(rdx, has_pending_message);
__ Integer32ToSmi(rdx, rdx);
- __ push(rdx);
+ __ Push(rdx);
ExternalReference pending_message_script =
ExternalReference::address_of_pending_message_script(isolate());
__ Load(rdx, pending_message_script);
- __ push(rdx);
+ __ Push(rdx);
}
@@ -4779,30 +4660,30 @@ void FullCodeGenerator::ExitFinallyBlock() {
ASSERT(!result_register().is(rdx));
ASSERT(!result_register().is(rcx));
// Restore pending message from stack.
- __ pop(rdx);
+ __ Pop(rdx);
ExternalReference pending_message_script =
ExternalReference::address_of_pending_message_script(isolate());
__ Store(pending_message_script, rdx);
- __ pop(rdx);
+ __ Pop(rdx);
__ SmiToInteger32(rdx, rdx);
ExternalReference has_pending_message =
ExternalReference::address_of_has_pending_message(isolate());
__ Store(has_pending_message, rdx);
- __ pop(rdx);
+ __ Pop(rdx);
ExternalReference pending_message_obj =
ExternalReference::address_of_pending_message_obj(isolate());
__ Store(pending_message_obj, rdx);
// Restore result register from stack.
- __ pop(result_register());
+ __ Pop(result_register());
// Uncook return address.
- __ pop(rdx);
+ __ Pop(rdx);
__ SmiToInteger32(rdx, rdx);
__ Move(rcx, masm_->CodeObject());
- __ addq(rdx, rcx);
+ __ addp(rdx, rcx);
__ jmp(rdx);
}
@@ -4823,8 +4704,8 @@ FullCodeGenerator::NestedStatement* FullCodeGenerator::TryFinally::Exit(
__ Drop(*stack_depth); // Down to the handler block.
if (*context_length > 0) {
// Restore the context to its dedicated register and the stack.
- __ movq(rsi, Operand(rsp, StackHandlerConstants::kContextOffset));
- __ movq(Operand(rbp, StandardFrameConstants::kContextOffset), rsi);
+ __ movp(rsi, Operand(rsp, StackHandlerConstants::kContextOffset));
+ __ movp(Operand(rbp, StandardFrameConstants::kContextOffset), rsi);
}
__ PopTryHandler();
__ call(finally_entry_);
@@ -4839,10 +4720,11 @@ FullCodeGenerator::NestedStatement* FullCodeGenerator::TryFinally::Exit(
static const byte kJnsInstruction = 0x79;
-static const byte kJnsOffset = 0x1d;
-static const byte kCallInstruction = 0xe8;
static const byte kNopByteOne = 0x66;
static const byte kNopByteTwo = 0x90;
+#ifdef DEBUG
+static const byte kCallInstruction = 0xe8;
+#endif
void BackEdgeTable::PatchAt(Code* unoptimized_code,
@@ -4875,6 +4757,7 @@ void BackEdgeTable::PatchAt(Code* unoptimized_code,
}
Assembler::set_target_address_at(call_target_address,
+ unoptimized_code,
replacement_code->entry());
unoptimized_code->GetHeap()->incremental_marking()->RecordCodeTargetPatch(
unoptimized_code, call_target_address, replacement_code);
@@ -4892,20 +4775,23 @@ BackEdgeTable::BackEdgeState BackEdgeTable::GetBackEdgeState(
if (*jns_instr_address == kJnsInstruction) {
ASSERT_EQ(kJnsOffset, *(call_target_address - 2));
ASSERT_EQ(isolate->builtins()->InterruptCheck()->entry(),
- Assembler::target_address_at(call_target_address));
+ Assembler::target_address_at(call_target_address,
+ unoptimized_code));
return INTERRUPT;
}
ASSERT_EQ(kNopByteOne, *jns_instr_address);
ASSERT_EQ(kNopByteTwo, *(call_target_address - 2));
- if (Assembler::target_address_at(call_target_address) ==
+ if (Assembler::target_address_at(call_target_address,
+ unoptimized_code) ==
isolate->builtins()->OnStackReplacement()->entry()) {
return ON_STACK_REPLACEMENT;
}
ASSERT_EQ(isolate->builtins()->OsrAfterStackCheck()->entry(),
- Assembler::target_address_at(call_target_address));
+ Assembler::target_address_at(call_target_address,
+ unoptimized_code));
return OSR_AFTER_STACK_CHECK;
}
diff --git a/chromium/v8/src/x64/ic-x64.cc b/chromium/v8/src/x64/ic-x64.cc
index 9448d3771a7..0cda1df16bd 100644
--- a/chromium/v8/src/x64/ic-x64.cc
+++ b/chromium/v8/src/x64/ic-x64.cc
@@ -1,38 +1,15 @@
// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
#if V8_TARGET_ARCH_X64
-#include "codegen.h"
-#include "ic-inl.h"
-#include "runtime.h"
-#include "stub-cache.h"
+#include "src/codegen.h"
+#include "src/ic-inl.h"
+#include "src/runtime.h"
+#include "src/stub-cache.h"
namespace v8 {
namespace internal {
@@ -74,7 +51,7 @@ static void GenerateNameDictionaryReceiverCheck(MacroAssembler* masm,
__ JumpIfSmi(receiver, miss);
// Check that the receiver is a valid JS object.
- __ movq(r1, FieldOperand(receiver, HeapObject::kMapOffset));
+ __ movp(r1, FieldOperand(receiver, HeapObject::kMapOffset));
__ movb(r0, FieldOperand(r1, Map::kInstanceTypeOffset));
__ cmpb(r0, Immediate(FIRST_SPEC_OBJECT_TYPE));
__ j(below, miss);
@@ -90,7 +67,7 @@ static void GenerateNameDictionaryReceiverCheck(MacroAssembler* masm,
(1 << Map::kHasNamedInterceptor)));
__ j(not_zero, miss);
- __ movq(r0, FieldOperand(receiver, JSObject::kPropertiesOffset));
+ __ movp(r0, FieldOperand(receiver, JSObject::kPropertiesOffset));
__ CompareRoot(FieldOperand(r0, HeapObject::kMapOffset),
Heap::kHashTableMapRootIndex);
__ j(not_equal, miss);
@@ -150,7 +127,7 @@ static void GenerateDictionaryLoad(MacroAssembler* masm,
// Get the value at the masked, scaled index.
const int kValueOffset = kElementsStartOffset + kPointerSize;
- __ movq(result,
+ __ movp(result,
Operand(elements, r1, times_pointer_size,
kValueOffset - kHeapObjectTag));
}
@@ -212,14 +189,14 @@ static void GenerateDictionaryStore(MacroAssembler* masm,
// Store the value at the masked, scaled index.
const int kValueOffset = kElementsStartOffset + kPointerSize;
- __ lea(scratch1, Operand(elements,
+ __ leap(scratch1, Operand(elements,
scratch1,
times_pointer_size,
kValueOffset - kHeapObjectTag));
- __ movq(Operand(scratch1, 0), value);
+ __ movp(Operand(scratch1, 0), value);
// Update write barrier. Make sure not to clobber the value.
- __ movq(scratch0, value);
+ __ movp(scratch0, value);
__ RecordWrite(elements, scratch1, scratch0, kDontSaveFPRegs);
}
@@ -284,7 +261,7 @@ static void GenerateFastArrayLoad(MacroAssembler* masm,
//
// scratch - used to hold elements of the receiver and the loaded value.
- __ movq(elements, FieldOperand(receiver, JSObject::kElementsOffset));
+ __ movp(elements, FieldOperand(receiver, JSObject::kElementsOffset));
if (not_fast_array != NULL) {
// Check that the object is in fast mode and writable.
__ CompareRoot(FieldOperand(elements, HeapObject::kMapOffset),
@@ -299,7 +276,7 @@ static void GenerateFastArrayLoad(MacroAssembler* masm,
__ j(above_equal, out_of_range);
// Fast case: Do the load.
SmiIndex index = masm->SmiToIndex(scratch, key, kPointerSizeLog2);
- __ movq(scratch, FieldOperand(elements,
+ __ movp(scratch, FieldOperand(elements,
index.reg,
index.scale,
FixedArray::kHeaderSize));
@@ -308,7 +285,7 @@ static void GenerateFastArrayLoad(MacroAssembler* masm,
// to ensure the prototype chain is searched.
__ j(equal, out_of_range);
if (!result.is(scratch)) {
- __ movq(result, scratch);
+ __ movp(result, scratch);
}
}
@@ -384,7 +361,7 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
__ bind(&check_number_dictionary);
__ SmiToInteger32(rbx, rax);
- __ movq(rcx, FieldOperand(rdx, JSObject::kElementsOffset));
+ __ movp(rcx, FieldOperand(rdx, JSObject::kElementsOffset));
// Check whether the elements is a number dictionary.
// rdx: receiver
@@ -412,21 +389,21 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
// If the receiver is a fast-case object, check the keyed lookup
// cache. Otherwise probe the dictionary leaving result in rcx.
- __ movq(rbx, FieldOperand(rdx, JSObject::kPropertiesOffset));
+ __ movp(rbx, FieldOperand(rdx, JSObject::kPropertiesOffset));
__ CompareRoot(FieldOperand(rbx, HeapObject::kMapOffset),
Heap::kHashTableMapRootIndex);
__ j(equal, &probe_dictionary);
// Load the map of the receiver, compute the keyed lookup cache hash
// based on 32 bits of the map pointer and the string hash.
- __ movq(rbx, FieldOperand(rdx, HeapObject::kMapOffset));
+ __ movp(rbx, FieldOperand(rdx, HeapObject::kMapOffset));
__ movl(rcx, rbx);
- __ shr(rcx, Immediate(KeyedLookupCache::kMapHashShift));
+ __ shrl(rcx, Immediate(KeyedLookupCache::kMapHashShift));
__ movl(rdi, FieldOperand(rax, String::kHashFieldOffset));
- __ shr(rdi, Immediate(String::kHashShift));
- __ xor_(rcx, rdi);
+ __ shrl(rdi, Immediate(String::kHashShift));
+ __ xorp(rcx, rdi);
int mask = (KeyedLookupCache::kCapacityMask & KeyedLookupCache::kHashMask);
- __ and_(rcx, Immediate(mask));
+ __ andp(rcx, Immediate(mask));
// Load the key (consisting of map and internalized string) from the cache and
// check for match.
@@ -438,21 +415,21 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
for (int i = 0; i < kEntriesPerBucket - 1; i++) {
Label try_next_entry;
- __ movq(rdi, rcx);
- __ shl(rdi, Immediate(kPointerSizeLog2 + 1));
+ __ movp(rdi, rcx);
+ __ shlp(rdi, Immediate(kPointerSizeLog2 + 1));
__ LoadAddress(kScratchRegister, cache_keys);
int off = kPointerSize * i * 2;
- __ cmpq(rbx, Operand(kScratchRegister, rdi, times_1, off));
+ __ cmpp(rbx, Operand(kScratchRegister, rdi, times_1, off));
__ j(not_equal, &try_next_entry);
- __ cmpq(rax, Operand(kScratchRegister, rdi, times_1, off + kPointerSize));
+ __ cmpp(rax, Operand(kScratchRegister, rdi, times_1, off + kPointerSize));
__ j(equal, &hit_on_nth_entry[i]);
__ bind(&try_next_entry);
}
int off = kPointerSize * (kEntriesPerBucket - 1) * 2;
- __ cmpq(rbx, Operand(kScratchRegister, rdi, times_1, off));
+ __ cmpp(rbx, Operand(kScratchRegister, rdi, times_1, off));
__ j(not_equal, &slow);
- __ cmpq(rax, Operand(kScratchRegister, rdi, times_1, off + kPointerSize));
+ __ cmpp(rax, Operand(kScratchRegister, rdi, times_1, off + kPointerSize));
__ j(not_equal, &slow);
// Get field offset, which is a 32-bit integer.
@@ -467,8 +444,8 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
}
__ LoadAddress(kScratchRegister, cache_field_offsets);
__ movl(rdi, Operand(kScratchRegister, rcx, times_4, 0));
- __ movzxbq(rcx, FieldOperand(rbx, Map::kInObjectPropertiesOffset));
- __ subq(rdi, rcx);
+ __ movzxbp(rcx, FieldOperand(rbx, Map::kInObjectPropertiesOffset));
+ __ subp(rdi, rcx);
__ j(above_equal, &property_array_property);
if (i != 0) {
__ jmp(&load_in_object_property);
@@ -477,16 +454,16 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
// Load in-object property.
__ bind(&load_in_object_property);
- __ movzxbq(rcx, FieldOperand(rbx, Map::kInstanceSizeOffset));
- __ addq(rcx, rdi);
- __ movq(rax, FieldOperand(rdx, rcx, times_pointer_size, 0));
+ __ movzxbp(rcx, FieldOperand(rbx, Map::kInstanceSizeOffset));
+ __ addp(rcx, rdi);
+ __ movp(rax, FieldOperand(rdx, rcx, times_pointer_size, 0));
__ IncrementCounter(counters->keyed_load_generic_lookup_cache(), 1);
__ ret(0);
// Load property array property.
__ bind(&property_array_property);
- __ movq(rax, FieldOperand(rdx, JSObject::kPropertiesOffset));
- __ movq(rax, FieldOperand(rax, rdi, times_pointer_size,
+ __ movp(rax, FieldOperand(rdx, JSObject::kPropertiesOffset));
+ __ movp(rax, FieldOperand(rax, rdi, times_pointer_size,
FixedArray::kHeaderSize));
__ IncrementCounter(counters->keyed_load_generic_lookup_cache(), 1);
__ ret(0);
@@ -498,7 +475,7 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
// rax: key
// rbx: elements
- __ movq(rcx, FieldOperand(rdx, JSObject::kMapOffset));
+ __ movp(rcx, FieldOperand(rdx, JSObject::kMapOffset));
__ movb(rcx, FieldOperand(rcx, Map::kInstanceTypeOffset));
GenerateGlobalInstanceTypeCheck(masm, rcx, &slow);
@@ -560,7 +537,7 @@ void KeyedLoadIC::GenerateIndexedInterceptor(MacroAssembler* masm) {
__ JumpUnlessNonNegativeSmi(rax, &slow);
// Get the map of the receiver.
- __ movq(rcx, FieldOperand(rdx, HeapObject::kMapOffset));
+ __ movp(rcx, FieldOperand(rdx, HeapObject::kMapOffset));
// Check that it has indexed interceptor and access checks
// are not enabled for this object.
@@ -571,8 +548,8 @@ void KeyedLoadIC::GenerateIndexedInterceptor(MacroAssembler* masm) {
// Everything is fine, call runtime.
__ PopReturnAddressTo(rcx);
- __ push(rdx); // receiver
- __ push(rax); // key
+ __ Push(rdx); // receiver
+ __ Push(rax); // key
__ PushReturnAddressFrom(rcx);
// Perform tail call to the entry.
@@ -605,7 +582,7 @@ static void KeyedStoreGenerateGenericHelper(
// rdx: receiver (a JSArray)
// r9: map of receiver
if (check_map == kCheckMap) {
- __ movq(rdi, FieldOperand(rbx, HeapObject::kMapOffset));
+ __ movp(rdi, FieldOperand(rbx, HeapObject::kMapOffset));
__ CompareRoot(rdi, Heap::kFixedArrayMapRootIndex);
__ j(not_equal, fast_double);
}
@@ -614,7 +591,7 @@ static void KeyedStoreGenerateGenericHelper(
// We have to go to the runtime if the current value is the hole because
// there may be a callback on the element
Label holecheck_passed1;
- __ movq(kScratchRegister, FieldOperand(rbx,
+ __ movp(kScratchRegister, FieldOperand(rbx,
rcx,
times_pointer_size,
FixedArray::kHeaderSize));
@@ -633,7 +610,7 @@ static void KeyedStoreGenerateGenericHelper(
__ Integer32ToSmiField(FieldOperand(rdx, JSArray::kLengthOffset), rdi);
}
// It's irrelevant whether array is smi-only or not when writing a smi.
- __ movq(FieldOperand(rbx, rcx, times_pointer_size, FixedArray::kHeaderSize),
+ __ movp(FieldOperand(rbx, rcx, times_pointer_size, FixedArray::kHeaderSize),
rax);
__ ret(0);
@@ -648,9 +625,9 @@ static void KeyedStoreGenerateGenericHelper(
__ leal(rdi, Operand(rcx, 1));
__ Integer32ToSmiField(FieldOperand(rdx, JSArray::kLengthOffset), rdi);
}
- __ movq(FieldOperand(rbx, rcx, times_pointer_size, FixedArray::kHeaderSize),
+ __ movp(FieldOperand(rbx, rcx, times_pointer_size, FixedArray::kHeaderSize),
rax);
- __ movq(rdx, rax); // Preserve the value which is returned.
+ __ movp(rdx, rax); // Preserve the value which is returned.
__ RecordWriteArray(
rbx, rdx, rcx, kDontSaveFPRegs, EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
__ ret(0);
@@ -683,10 +660,10 @@ static void KeyedStoreGenerateGenericHelper(
__ ret(0);
__ bind(&transition_smi_elements);
- __ movq(rbx, FieldOperand(rdx, HeapObject::kMapOffset));
+ __ movp(rbx, FieldOperand(rdx, HeapObject::kMapOffset));
// Transition the array appropriately depending on the value type.
- __ movq(r9, FieldOperand(rax, HeapObject::kMapOffset));
+ __ movp(r9, FieldOperand(rax, HeapObject::kMapOffset));
__ CompareRoot(r9, Heap::kHeapNumberMapRootIndex);
__ j(not_equal, &non_double_value);
@@ -700,7 +677,7 @@ static void KeyedStoreGenerateGenericHelper(
AllocationSiteMode mode = AllocationSite::GetMode(FAST_SMI_ELEMENTS,
FAST_DOUBLE_ELEMENTS);
ElementsTransitionGenerator::GenerateSmiToDouble(masm, mode, slow);
- __ movq(rbx, FieldOperand(rdx, JSObject::kElementsOffset));
+ __ movp(rbx, FieldOperand(rdx, JSObject::kElementsOffset));
__ jmp(&fast_double_without_map_check);
__ bind(&non_double_value);
@@ -713,14 +690,14 @@ static void KeyedStoreGenerateGenericHelper(
mode = AllocationSite::GetMode(FAST_SMI_ELEMENTS, FAST_ELEMENTS);
ElementsTransitionGenerator::GenerateMapChangeElementsTransition(masm, mode,
slow);
- __ movq(rbx, FieldOperand(rdx, JSObject::kElementsOffset));
+ __ movp(rbx, FieldOperand(rdx, JSObject::kElementsOffset));
__ jmp(&finish_object_store);
__ bind(&transition_double_elements);
// Elements are FAST_DOUBLE_ELEMENTS, but value is an Object that's not a
// HeapNumber. Make sure that the receiver is a Array with FAST_ELEMENTS and
// transition array from FAST_DOUBLE_ELEMENTS to FAST_ELEMENTS
- __ movq(rbx, FieldOperand(rdx, HeapObject::kMapOffset));
+ __ movp(rbx, FieldOperand(rdx, HeapObject::kMapOffset));
__ LoadTransitionedArrayMapConditional(FAST_DOUBLE_ELEMENTS,
FAST_ELEMENTS,
rbx,
@@ -728,13 +705,13 @@ static void KeyedStoreGenerateGenericHelper(
slow);
mode = AllocationSite::GetMode(FAST_DOUBLE_ELEMENTS, FAST_ELEMENTS);
ElementsTransitionGenerator::GenerateDoubleToObject(masm, mode, slow);
- __ movq(rbx, FieldOperand(rdx, JSObject::kElementsOffset));
+ __ movp(rbx, FieldOperand(rdx, JSObject::kElementsOffset));
__ jmp(&finish_object_store);
}
void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm,
- StrictModeFlag strict_mode) {
+ StrictMode strict_mode) {
// ----------- S t a t e -------------
// -- rax : value
// -- rcx : key
@@ -748,7 +725,7 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm,
// Check that the object isn't a smi.
__ JumpIfSmi(rdx, &slow_with_tagged_index);
// Get the map from the receiver.
- __ movq(r9, FieldOperand(rdx, HeapObject::kMapOffset));
+ __ movp(r9, FieldOperand(rdx, HeapObject::kMapOffset));
// Check that the receiver does not require access checks and is not observed.
// The generic stub does not perform map checks or handle observed objects.
__ testb(FieldOperand(r9, Map::kBitFieldOffset),
@@ -768,7 +745,7 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm,
// rax: value
// rdx: JSObject
// rcx: index
- __ movq(rbx, FieldOperand(rdx, JSObject::kElementsOffset));
+ __ movp(rbx, FieldOperand(rdx, JSObject::kElementsOffset));
// Check array bounds.
__ SmiCompareInteger32(FieldOperand(rbx, FixedArray::kLengthOffset), rcx);
// rax: value
@@ -796,7 +773,7 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm,
__ SmiCompareInteger32(FieldOperand(rbx, FixedArray::kLengthOffset), rcx);
__ j(below_equal, &slow);
// Increment index to get new length.
- __ movq(rdi, FieldOperand(rbx, HeapObject::kMapOffset));
+ __ movp(rdi, FieldOperand(rbx, HeapObject::kMapOffset));
__ CompareRoot(rdi, Heap::kFixedArrayMapRootIndex);
__ j(not_equal, &check_if_double_array);
__ jmp(&fast_object_grow);
@@ -814,7 +791,7 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm,
// rax: value
// rdx: receiver (a JSArray)
// rcx: index
- __ movq(rbx, FieldOperand(rdx, JSObject::kElementsOffset));
+ __ movp(rbx, FieldOperand(rdx, JSObject::kElementsOffset));
// Check the key against the length in the array, compute the
// address to store into and fall through to fast case.
@@ -828,347 +805,6 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm,
}
-// The generated code does not accept smi keys.
-// The generated code falls through if both probes miss.
-void CallICBase::GenerateMonomorphicCacheProbe(MacroAssembler* masm,
- int argc,
- Code::Kind kind,
- ExtraICState extra_state) {
- // ----------- S t a t e -------------
- // rcx : function name
- // rdx : receiver
- // -----------------------------------
- Label number, non_number, non_string, boolean, probe, miss;
-
- // Probe the stub cache.
- Code::Flags flags = Code::ComputeFlags(kind,
- MONOMORPHIC,
- extra_state,
- Code::NORMAL,
- argc);
- masm->isolate()->stub_cache()->GenerateProbe(
- masm, flags, rdx, rcx, rbx, rax);
-
- // If the stub cache probing failed, the receiver might be a value.
- // For value objects, we use the map of the prototype objects for
- // the corresponding JSValue for the cache and that is what we need
- // to probe.
- //
- // Check for number.
- __ JumpIfSmi(rdx, &number);
- __ CmpObjectType(rdx, HEAP_NUMBER_TYPE, rbx);
- __ j(not_equal, &non_number);
- __ bind(&number);
- StubCompiler::GenerateLoadGlobalFunctionPrototype(
- masm, Context::NUMBER_FUNCTION_INDEX, rdx);
- __ jmp(&probe);
-
- // Check for string.
- __ bind(&non_number);
- __ CmpInstanceType(rbx, FIRST_NONSTRING_TYPE);
- __ j(above_equal, &non_string);
- StubCompiler::GenerateLoadGlobalFunctionPrototype(
- masm, Context::STRING_FUNCTION_INDEX, rdx);
- __ jmp(&probe);
-
- // Check for boolean.
- __ bind(&non_string);
- __ CompareRoot(rdx, Heap::kTrueValueRootIndex);
- __ j(equal, &boolean);
- __ CompareRoot(rdx, Heap::kFalseValueRootIndex);
- __ j(not_equal, &miss);
- __ bind(&boolean);
- StubCompiler::GenerateLoadGlobalFunctionPrototype(
- masm, Context::BOOLEAN_FUNCTION_INDEX, rdx);
-
- // Probe the stub cache for the value object.
- __ bind(&probe);
- masm->isolate()->stub_cache()->GenerateProbe(
- masm, flags, rdx, rcx, rbx, no_reg);
-
- __ bind(&miss);
-}
-
-
-static void GenerateFunctionTailCall(MacroAssembler* masm,
- int argc,
- Label* miss) {
- // ----------- S t a t e -------------
- // rcx : function name
- // rdi : function
- // rsp[0] : return address
- // rsp[8] : argument argc
- // rsp[16] : argument argc - 1
- // ...
- // rsp[argc * 8] : argument 1
- // rsp[(argc + 1) * 8] : argument 0 = receiver
- // -----------------------------------
- __ JumpIfSmi(rdi, miss);
- // Check that the value is a JavaScript function.
- __ CmpObjectType(rdi, JS_FUNCTION_TYPE, rdx);
- __ j(not_equal, miss);
-
- // Invoke the function.
- ParameterCount actual(argc);
- __ InvokeFunction(rdi, actual, JUMP_FUNCTION,
- NullCallWrapper(), CALL_AS_METHOD);
-}
-
-
-// The generated code falls through if the call should be handled by runtime.
-void CallICBase::GenerateNormal(MacroAssembler* masm, int argc) {
- // ----------- S t a t e -------------
- // rcx : function name
- // rsp[0] : return address
- // rsp[8] : argument argc
- // rsp[16] : argument argc - 1
- // ...
- // rsp[argc * 8] : argument 1
- // rsp[(argc + 1) * 8] : argument 0 = receiver
- // -----------------------------------
- Label miss;
-
- StackArgumentsAccessor args(rsp, argc);
- __ movq(rdx, args.GetReceiverOperand());
-
- GenerateNameDictionaryReceiverCheck(masm, rdx, rax, rbx, &miss);
-
- // rax: elements
- // Search the dictionary placing the result in rdi.
- GenerateDictionaryLoad(masm, &miss, rax, rcx, rbx, rdi, rdi);
-
- GenerateFunctionTailCall(masm, argc, &miss);
-
- __ bind(&miss);
-}
-
-
-void CallICBase::GenerateMiss(MacroAssembler* masm,
- int argc,
- IC::UtilityId id,
- ExtraICState extra_state) {
- // ----------- S t a t e -------------
- // rcx : function name
- // rsp[0] : return address
- // rsp[8] : argument argc
- // rsp[16] : argument argc - 1
- // ...
- // rsp[argc * 8] : argument 1
- // rsp[(argc + 1) * 8] : argument 0 = receiver
- // -----------------------------------
-
- Counters* counters = masm->isolate()->counters();
- if (id == IC::kCallIC_Miss) {
- __ IncrementCounter(counters->call_miss(), 1);
- } else {
- __ IncrementCounter(counters->keyed_call_miss(), 1);
- }
-
- StackArgumentsAccessor args(rsp, argc);
- __ movq(rdx, args.GetReceiverOperand());
-
- // Enter an internal frame.
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
-
- // Push the receiver and the name of the function.
- __ push(rdx);
- __ push(rcx);
-
- // Call the entry.
- CEntryStub stub(1);
- __ Set(rax, 2);
- __ LoadAddress(rbx, ExternalReference(IC_Utility(id), masm->isolate()));
- __ CallStub(&stub);
-
- // Move result to rdi and exit the internal frame.
- __ movq(rdi, rax);
- }
-
- // Check if the receiver is a global object of some sort.
- // This can happen only for regular CallIC but not KeyedCallIC.
- if (id == IC::kCallIC_Miss) {
- Label invoke, global;
- __ movq(rdx, args.GetReceiverOperand());
- __ JumpIfSmi(rdx, &invoke);
- __ CmpObjectType(rdx, JS_GLOBAL_OBJECT_TYPE, rcx);
- __ j(equal, &global);
- __ CmpInstanceType(rcx, JS_BUILTINS_OBJECT_TYPE);
- __ j(not_equal, &invoke);
-
- // Patch the receiver on the stack.
- __ bind(&global);
- __ movq(rdx, FieldOperand(rdx, GlobalObject::kGlobalReceiverOffset));
- __ movq(args.GetReceiverOperand(), rdx);
- __ bind(&invoke);
- }
-
- // Invoke the function.
- CallKind call_kind = CallICBase::Contextual::decode(extra_state)
- ? CALL_AS_FUNCTION
- : CALL_AS_METHOD;
- ParameterCount actual(argc);
- __ InvokeFunction(rdi,
- actual,
- JUMP_FUNCTION,
- NullCallWrapper(),
- call_kind);
-}
-
-
-void CallIC::GenerateMegamorphic(MacroAssembler* masm,
- int argc,
- ExtraICState extra_ic_state) {
- // ----------- S t a t e -------------
- // rcx : function name
- // rsp[0] : return address
- // rsp[8] : argument argc
- // rsp[16] : argument argc - 1
- // ...
- // rsp[argc * 8] : argument 1
- // rsp[(argc + 1) * 8] : argument 0 = receiver
- // -----------------------------------
-
- StackArgumentsAccessor args(rsp, argc);
- __ movq(rdx, args.GetReceiverOperand());
- GenerateMonomorphicCacheProbe(masm, argc, Code::CALL_IC, extra_ic_state);
- GenerateMiss(masm, argc, extra_ic_state);
-}
-
-
-void KeyedCallIC::GenerateMegamorphic(MacroAssembler* masm, int argc) {
- // ----------- S t a t e -------------
- // rcx : function name
- // rsp[0] : return address
- // rsp[8] : argument argc
- // rsp[16] : argument argc - 1
- // ...
- // rsp[argc * 8] : argument 1
- // rsp[(argc + 1) * 8] : argument 0 = receiver
- // -----------------------------------
-
- StackArgumentsAccessor args(rsp, argc);
- __ movq(rdx, args.GetReceiverOperand());
-
- Label do_call, slow_call, slow_load;
- Label check_number_dictionary, check_name, lookup_monomorphic_cache;
- Label index_smi, index_name;
-
- // Check that the key is a smi.
- __ JumpIfNotSmi(rcx, &check_name);
-
- __ bind(&index_smi);
- // Now the key is known to be a smi. This place is also jumped to from below
- // where a numeric string is converted to a smi.
-
- GenerateKeyedLoadReceiverCheck(
- masm, rdx, rax, Map::kHasIndexedInterceptor, &slow_call);
-
- GenerateFastArrayLoad(
- masm, rdx, rcx, rax, rbx, rdi, &check_number_dictionary, &slow_load);
- Counters* counters = masm->isolate()->counters();
- __ IncrementCounter(counters->keyed_call_generic_smi_fast(), 1);
-
- __ bind(&do_call);
- // receiver in rdx is not used after this point.
- // rcx: key
- // rdi: function
- GenerateFunctionTailCall(masm, argc, &slow_call);
-
- __ bind(&check_number_dictionary);
- // rax: elements
- // rcx: smi key
- // Check whether the elements is a number dictionary.
- __ CompareRoot(FieldOperand(rax, HeapObject::kMapOffset),
- Heap::kHashTableMapRootIndex);
- __ j(not_equal, &slow_load);
- __ SmiToInteger32(rbx, rcx);
- // ebx: untagged index
- __ LoadFromNumberDictionary(&slow_load, rax, rcx, rbx, r9, rdi, rdi);
- __ IncrementCounter(counters->keyed_call_generic_smi_dict(), 1);
- __ jmp(&do_call);
-
- __ bind(&slow_load);
- // This branch is taken when calling KeyedCallIC_Miss is neither required
- // nor beneficial.
- __ IncrementCounter(counters->keyed_call_generic_slow_load(), 1);
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ push(rcx); // save the key
- __ push(rdx); // pass the receiver
- __ push(rcx); // pass the key
- __ CallRuntime(Runtime::kKeyedGetProperty, 2);
- __ pop(rcx); // restore the key
- }
- __ movq(rdi, rax);
- __ jmp(&do_call);
-
- __ bind(&check_name);
- GenerateKeyNameCheck(masm, rcx, rax, rbx, &index_name, &slow_call);
-
- // The key is known to be a unique name.
- // If the receiver is a regular JS object with slow properties then do
- // a quick inline probe of the receiver's dictionary.
- // Otherwise do the monomorphic cache probe.
- GenerateKeyedLoadReceiverCheck(
- masm, rdx, rax, Map::kHasNamedInterceptor, &lookup_monomorphic_cache);
-
- __ movq(rbx, FieldOperand(rdx, JSObject::kPropertiesOffset));
- __ CompareRoot(FieldOperand(rbx, HeapObject::kMapOffset),
- Heap::kHashTableMapRootIndex);
- __ j(not_equal, &lookup_monomorphic_cache);
-
- GenerateDictionaryLoad(masm, &slow_load, rbx, rcx, rax, rdi, rdi);
- __ IncrementCounter(counters->keyed_call_generic_lookup_dict(), 1);
- __ jmp(&do_call);
-
- __ bind(&lookup_monomorphic_cache);
- __ IncrementCounter(counters->keyed_call_generic_lookup_cache(), 1);
- GenerateMonomorphicCacheProbe(masm,
- argc,
- Code::KEYED_CALL_IC,
- kNoExtraICState);
- // Fall through on miss.
-
- __ bind(&slow_call);
- // This branch is taken if:
- // - the receiver requires boxing or access check,
- // - the key is neither smi nor a unique name,
- // - the value loaded is not a function,
- // - there is hope that the runtime will create a monomorphic call stub
- // that will get fetched next time.
- __ IncrementCounter(counters->keyed_call_generic_slow(), 1);
- GenerateMiss(masm, argc);
-
- __ bind(&index_name);
- __ IndexFromHash(rbx, rcx);
- // Now jump to the place where smi keys are handled.
- __ jmp(&index_smi);
-}
-
-
-void KeyedCallIC::GenerateNormal(MacroAssembler* masm, int argc) {
- // ----------- S t a t e -------------
- // rcx : function name
- // rsp[0] : return address
- // rsp[8] : argument argc
- // rsp[16] : argument argc - 1
- // ...
- // rsp[argc * 8] : argument 1
- // rsp[(argc + 1) * 8] : argument 0 = receiver
- // -----------------------------------
-
- // Check if the name is really a name.
- Label miss;
- __ JumpIfSmi(rcx, &miss);
- Condition cond = masm->IsObjectNameType(rcx, rax, rax);
- __ j(NegateCondition(cond), &miss);
- CallICBase::GenerateNormal(masm, argc);
- __ bind(&miss);
- GenerateMiss(masm, argc);
-}
-
-
static Operand GenerateMappedArgumentsLookup(MacroAssembler* masm,
Register object,
Register key,
@@ -1193,20 +829,20 @@ static Operand GenerateMappedArgumentsLookup(MacroAssembler* masm,
// Load the elements into scratch1 and check its map. If not, jump
// to the unmapped lookup with the parameter map in scratch1.
- Handle<Map> arguments_map(heap->non_strict_arguments_elements_map());
- __ movq(scratch1, FieldOperand(object, JSObject::kElementsOffset));
+ Handle<Map> arguments_map(heap->sloppy_arguments_elements_map());
+ __ movp(scratch1, FieldOperand(object, JSObject::kElementsOffset));
__ CheckMap(scratch1, arguments_map, slow_case, DONT_DO_SMI_CHECK);
// Check if element is in the range of mapped arguments.
- __ movq(scratch2, FieldOperand(scratch1, FixedArray::kLengthOffset));
+ __ movp(scratch2, FieldOperand(scratch1, FixedArray::kLengthOffset));
__ SmiSubConstant(scratch2, scratch2, Smi::FromInt(2));
- __ cmpq(key, scratch2);
+ __ cmpp(key, scratch2);
__ j(greater_equal, unmapped_case);
// Load element index and check whether it is the hole.
const int kHeaderSize = FixedArray::kHeaderSize + 2 * kPointerSize;
__ SmiToInteger64(scratch3, key);
- __ movq(scratch2, FieldOperand(scratch1,
+ __ movp(scratch2, FieldOperand(scratch1,
scratch3,
times_pointer_size,
kHeaderSize));
@@ -1216,7 +852,7 @@ static Operand GenerateMappedArgumentsLookup(MacroAssembler* masm,
// Load value from context and return it. We can reuse scratch1 because
// we do not jump to the unmapped lookup (which requires the parameter
// map in scratch1).
- __ movq(scratch1, FieldOperand(scratch1, FixedArray::kHeaderSize));
+ __ movp(scratch1, FieldOperand(scratch1, FixedArray::kHeaderSize));
__ SmiToInteger64(scratch3, scratch2);
return FieldOperand(scratch1,
scratch3,
@@ -1236,11 +872,11 @@ static Operand GenerateUnmappedArgumentsLookup(MacroAssembler* masm,
// overwritten.
const int kBackingStoreOffset = FixedArray::kHeaderSize + kPointerSize;
Register backing_store = parameter_map;
- __ movq(backing_store, FieldOperand(parameter_map, kBackingStoreOffset));
+ __ movp(backing_store, FieldOperand(parameter_map, kBackingStoreOffset));
Handle<Map> fixed_array_map(masm->isolate()->heap()->fixed_array_map());
__ CheckMap(backing_store, fixed_array_map, slow_case, DONT_DO_SMI_CHECK);
- __ movq(scratch, FieldOperand(backing_store, FixedArray::kLengthOffset));
- __ cmpq(key, scratch);
+ __ movp(scratch, FieldOperand(backing_store, FixedArray::kLengthOffset));
+ __ cmpp(key, scratch);
__ j(greater_equal, slow_case);
__ SmiToInteger64(scratch, key);
return FieldOperand(backing_store,
@@ -1250,7 +886,7 @@ static Operand GenerateUnmappedArgumentsLookup(MacroAssembler* masm,
}
-void KeyedLoadIC::GenerateNonStrictArguments(MacroAssembler* masm) {
+void KeyedLoadIC::GenerateSloppyArguments(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- rax : key
// -- rdx : receiver
@@ -1260,7 +896,7 @@ void KeyedLoadIC::GenerateNonStrictArguments(MacroAssembler* masm) {
Operand mapped_location =
GenerateMappedArgumentsLookup(
masm, rdx, rax, rbx, rcx, rdi, &notin, &slow);
- __ movq(rax, mapped_location);
+ __ movp(rax, mapped_location);
__ Ret();
__ bind(&notin);
// The unmapped lookup expects that the parameter map is in rbx.
@@ -1268,14 +904,14 @@ void KeyedLoadIC::GenerateNonStrictArguments(MacroAssembler* masm) {
GenerateUnmappedArgumentsLookup(masm, rax, rbx, rcx, &slow);
__ CompareRoot(unmapped_location, Heap::kTheHoleValueRootIndex);
__ j(equal, &slow);
- __ movq(rax, unmapped_location);
+ __ movp(rax, unmapped_location);
__ Ret();
__ bind(&slow);
GenerateMiss(masm);
}
-void KeyedStoreIC::GenerateNonStrictArguments(MacroAssembler* masm) {
+void KeyedStoreIC::GenerateSloppyArguments(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- rax : value
// -- rcx : key
@@ -1285,9 +921,9 @@ void KeyedStoreIC::GenerateNonStrictArguments(MacroAssembler* masm) {
Label slow, notin;
Operand mapped_location = GenerateMappedArgumentsLookup(
masm, rdx, rcx, rbx, rdi, r8, &notin, &slow);
- __ movq(mapped_location, rax);
- __ lea(r9, mapped_location);
- __ movq(r8, rax);
+ __ movp(mapped_location, rax);
+ __ leap(r9, mapped_location);
+ __ movp(r8, rax);
__ RecordWrite(rbx,
r9,
r8,
@@ -1299,9 +935,9 @@ void KeyedStoreIC::GenerateNonStrictArguments(MacroAssembler* masm) {
// The unmapped lookup expects that the parameter map is in rbx.
Operand unmapped_location =
GenerateUnmappedArgumentsLookup(masm, rcx, rbx, rdi, &slow);
- __ movq(unmapped_location, rax);
- __ lea(r9, unmapped_location);
- __ movq(r8, rax);
+ __ movp(unmapped_location, rax);
+ __ leap(r9, unmapped_location);
+ __ movp(r8, rax);
__ RecordWrite(rbx,
r9,
r8,
@@ -1314,37 +950,6 @@ void KeyedStoreIC::GenerateNonStrictArguments(MacroAssembler* masm) {
}
-void KeyedCallIC::GenerateNonStrictArguments(MacroAssembler* masm,
- int argc) {
- // ----------- S t a t e -------------
- // rcx : function name
- // rsp[0] : return address
- // rsp[8] : argument argc
- // rsp[16] : argument argc - 1
- // ...
- // rsp[argc * 8] : argument 1
- // rsp[(argc + 1) * 8] : argument 0 = receiver
- // -----------------------------------
- Label slow, notin;
- StackArgumentsAccessor args(rsp, argc);
- __ movq(rdx, args.GetReceiverOperand());
- Operand mapped_location = GenerateMappedArgumentsLookup(
- masm, rdx, rcx, rbx, rax, r8, &notin, &slow);
- __ movq(rdi, mapped_location);
- GenerateFunctionTailCall(masm, argc, &slow);
- __ bind(&notin);
- // The unmapped lookup expects that the parameter map is in rbx.
- Operand unmapped_location =
- GenerateUnmappedArgumentsLookup(masm, rcx, rbx, rax, &slow);
- __ CompareRoot(unmapped_location, Heap::kTheHoleValueRootIndex);
- __ j(equal, &slow);
- __ movq(rdi, unmapped_location);
- GenerateFunctionTailCall(masm, argc, &slow);
- __ bind(&slow);
- GenerateMiss(masm, argc);
-}
-
-
void LoadIC::GenerateMegamorphic(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- rax : receiver
@@ -1353,9 +958,7 @@ void LoadIC::GenerateMegamorphic(MacroAssembler* masm) {
// -----------------------------------
// Probe the stub cache.
- Code::Flags flags = Code::ComputeFlags(
- Code::HANDLER, MONOMORPHIC, kNoExtraICState,
- Code::NORMAL, Code::LOAD_IC);
+ Code::Flags flags = Code::ComputeHandlerFlags(Code::LOAD_IC);
masm->isolate()->stub_cache()->GenerateProbe(
masm, flags, rax, rcx, rbx, rdx);
@@ -1369,15 +972,19 @@ void LoadIC::GenerateNormal(MacroAssembler* masm) {
// -- rcx : name
// -- rsp[0] : return address
// -----------------------------------
- Label miss;
+ Label miss, slow;
GenerateNameDictionaryReceiverCheck(masm, rax, rdx, rbx, &miss);
// rdx: elements
// Search the dictionary placing the result in rax.
- GenerateDictionaryLoad(masm, &miss, rdx, rcx, rbx, rdi, rax);
+ GenerateDictionaryLoad(masm, &slow, rdx, rcx, rbx, rdi, rax);
__ ret(0);
+ // Dictionary load failed, go slow (but don't miss).
+ __ bind(&slow);
+ GenerateRuntimeGetProperty(masm);
+
// Cache miss: Jump to runtime.
__ bind(&miss);
GenerateMiss(masm);
@@ -1395,8 +1002,8 @@ void LoadIC::GenerateMiss(MacroAssembler* masm) {
__ IncrementCounter(counters->load_miss(), 1);
__ PopReturnAddressTo(rbx);
- __ push(rax); // receiver
- __ push(rcx); // name
+ __ Push(rax); // receiver
+ __ Push(rcx); // name
__ PushReturnAddressFrom(rbx);
// Perform tail call to the entry.
@@ -1414,8 +1021,8 @@ void LoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
// -----------------------------------
__ PopReturnAddressTo(rbx);
- __ push(rax); // receiver
- __ push(rcx); // name
+ __ Push(rax); // receiver
+ __ Push(rcx); // name
__ PushReturnAddressFrom(rbx);
// Perform tail call to the entry.
@@ -1434,8 +1041,8 @@ void KeyedLoadIC::GenerateMiss(MacroAssembler* masm) {
__ IncrementCounter(counters->keyed_load_miss(), 1);
__ PopReturnAddressTo(rbx);
- __ push(rdx); // receiver
- __ push(rax); // name
+ __ Push(rdx); // receiver
+ __ Push(rax); // name
__ PushReturnAddressFrom(rbx);
// Perform tail call to the entry.
@@ -1453,8 +1060,8 @@ void KeyedLoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
// -----------------------------------
__ PopReturnAddressTo(rbx);
- __ push(rdx); // receiver
- __ push(rax); // name
+ __ Push(rdx); // receiver
+ __ Push(rax); // name
__ PushReturnAddressFrom(rbx);
// Perform tail call to the entry.
@@ -1462,8 +1069,7 @@ void KeyedLoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
}
-void StoreIC::GenerateMegamorphic(MacroAssembler* masm,
- ExtraICState extra_ic_state) {
+void StoreIC::GenerateMegamorphic(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- rax : value
// -- rcx : name
@@ -1472,9 +1078,7 @@ void StoreIC::GenerateMegamorphic(MacroAssembler* masm,
// -----------------------------------
// Get the receiver from the stack and probe the stub cache.
- Code::Flags flags = Code::ComputeFlags(
- Code::HANDLER, MONOMORPHIC, extra_ic_state,
- Code::NORMAL, Code::STORE_IC);
+ Code::Flags flags = Code::ComputeHandlerFlags(Code::STORE_IC);
masm->isolate()->stub_cache()->GenerateProbe(
masm, flags, rdx, rcx, rbx, no_reg);
@@ -1492,9 +1096,9 @@ void StoreIC::GenerateMiss(MacroAssembler* masm) {
// -----------------------------------
__ PopReturnAddressTo(rbx);
- __ push(rdx); // receiver
- __ push(rcx); // name
- __ push(rax); // value
+ __ Push(rdx); // receiver
+ __ Push(rcx); // name
+ __ Push(rax); // value
__ PushReturnAddressFrom(rbx);
// Perform tail call to the entry.
@@ -1528,7 +1132,7 @@ void StoreIC::GenerateNormal(MacroAssembler* masm) {
void StoreIC::GenerateRuntimeSetProperty(MacroAssembler* masm,
- StrictModeFlag strict_mode) {
+ StrictMode strict_mode) {
// ----------- S t a t e -------------
// -- rax : value
// -- rcx : name
@@ -1536,9 +1140,9 @@ void StoreIC::GenerateRuntimeSetProperty(MacroAssembler* masm,
// -- rsp[0] : return address
// -----------------------------------
__ PopReturnAddressTo(rbx);
- __ push(rdx);
- __ push(rcx);
- __ push(rax);
+ __ Push(rdx);
+ __ Push(rcx);
+ __ Push(rax);
__ Push(Smi::FromInt(NONE)); // PropertyAttributes
__ Push(Smi::FromInt(strict_mode));
__ PushReturnAddressFrom(rbx);
@@ -1549,7 +1153,7 @@ void StoreIC::GenerateRuntimeSetProperty(MacroAssembler* masm,
void KeyedStoreIC::GenerateRuntimeSetProperty(MacroAssembler* masm,
- StrictModeFlag strict_mode) {
+ StrictMode strict_mode) {
// ----------- S t a t e -------------
// -- rax : value
// -- rcx : key
@@ -1558,9 +1162,9 @@ void KeyedStoreIC::GenerateRuntimeSetProperty(MacroAssembler* masm,
// -----------------------------------
__ PopReturnAddressTo(rbx);
- __ push(rdx); // receiver
- __ push(rcx); // key
- __ push(rax); // value
+ __ Push(rdx); // receiver
+ __ Push(rcx); // key
+ __ Push(rax); // value
__ Push(Smi::FromInt(NONE)); // PropertyAttributes
__ Push(Smi::FromInt(strict_mode)); // Strict mode.
__ PushReturnAddressFrom(rbx);
@@ -1579,9 +1183,9 @@ void StoreIC::GenerateSlow(MacroAssembler* masm) {
// -----------------------------------
__ PopReturnAddressTo(rbx);
- __ push(rdx); // receiver
- __ push(rcx); // key
- __ push(rax); // value
+ __ Push(rdx); // receiver
+ __ Push(rcx); // key
+ __ Push(rax); // value
__ PushReturnAddressFrom(rbx);
// Do tail-call to runtime routine.
@@ -1599,9 +1203,9 @@ void KeyedStoreIC::GenerateSlow(MacroAssembler* masm) {
// -----------------------------------
__ PopReturnAddressTo(rbx);
- __ push(rdx); // receiver
- __ push(rcx); // key
- __ push(rax); // value
+ __ Push(rdx); // receiver
+ __ Push(rcx); // key
+ __ Push(rax); // value
__ PushReturnAddressFrom(rbx);
// Do tail-call to runtime routine.
@@ -1619,9 +1223,9 @@ void KeyedStoreIC::GenerateMiss(MacroAssembler* masm) {
// -----------------------------------
__ PopReturnAddressTo(rbx);
- __ push(rdx); // receiver
- __ push(rcx); // key
- __ push(rax); // value
+ __ Push(rdx); // receiver
+ __ Push(rcx); // key
+ __ Push(rax); // value
__ PushReturnAddressFrom(rbx);
// Do tail-call to runtime routine.
@@ -1680,7 +1284,7 @@ void PatchInlinedSmiCode(Address address, InlinedSmiCheck check) {
Address delta_address = test_instruction_address + 1;
// The delta to the start of the map check instruction and the
// condition code uses at the patched jump.
- int8_t delta = *reinterpret_cast<int8_t*>(delta_address);
+ uint8_t delta = *reinterpret_cast<uint8_t*>(delta_address);
if (FLAG_trace_ic) {
PrintF("[ patching ic at %p, test=%p, delta=%d\n",
address, test_instruction_address, delta);
diff --git a/chromium/v8/src/x64/lithium-codegen-x64.cc b/chromium/v8/src/x64/lithium-codegen-x64.cc
index 80024e78e17..81e8e9b99a1 100644
--- a/chromium/v8/src/x64/lithium-codegen-x64.cc
+++ b/chromium/v8/src/x64/lithium-codegen-x64.cc
@@ -1,38 +1,15 @@
// Copyright 2013 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
#if V8_TARGET_ARCH_X64
-#include "x64/lithium-codegen-x64.h"
-#include "code-stubs.h"
-#include "stub-cache.h"
-#include "hydrogen-osr.h"
+#include "src/x64/lithium-codegen-x64.h"
+#include "src/code-stubs.h"
+#include "src/stub-cache.h"
+#include "src/hydrogen-osr.h"
namespace v8 {
namespace internal {
@@ -50,9 +27,7 @@ class SafepointGenerator V8_FINAL : public CallWrapper {
deopt_mode_(mode) { }
virtual ~SafepointGenerator() {}
- virtual void BeforeCall(int call_size) const V8_OVERRIDE {
- codegen_->EnsureSpaceForLazyDeopt(Deoptimizer::patch_size() - call_size);
- }
+ virtual void BeforeCall(int call_size) const V8_OVERRIDE {}
virtual void AfterCall() const V8_OVERRIDE {
codegen_->RecordSafepoint(pointers_, deopt_mode_);
@@ -89,15 +64,8 @@ void LCodeGen::FinishCode(Handle<Code> code) {
ASSERT(is_done());
code->set_stack_slots(GetStackSlotCount());
code->set_safepoint_table_offset(safepoints_.GetCodeOffset());
- RegisterDependentCodeForEmbeddedMaps(code);
+ if (code->is_optimized_code()) RegisterWeakObjectsInOptimizedCode(code);
PopulateDeoptimizationData(code);
- info()->CommitDependencies(code);
-}
-
-
-void LChunkBuilder::Abort(BailoutReason reason) {
- info()->set_bailout_reason(reason);
- status_ = ABORTED;
}
@@ -105,7 +73,7 @@ void LChunkBuilder::Abort(BailoutReason reason) {
void LCodeGen::MakeSureStackPagesMapped(int offset) {
const int kPageSize = 4 * KB;
for (offset -= kPageSize; offset > 0; offset -= kPageSize) {
- __ movq(Operand(rsp, offset), rax);
+ __ movp(Operand(rsp, offset), rax);
}
}
#endif
@@ -156,17 +124,23 @@ bool LCodeGen::GeneratePrologue() {
}
#endif
- // Strict mode functions need to replace the receiver with undefined
- // when called as functions (without an explicit receiver
- // object). rcx is zero for method calls and non-zero for function
- // calls.
- if (!info_->is_classic_mode() || info_->is_native()) {
+ // Sloppy mode functions need to replace the receiver with the global proxy
+ // when called as functions (without an explicit receiver object).
+ if (info_->this_has_uses() &&
+ info_->strict_mode() == SLOPPY &&
+ !info_->is_native()) {
Label ok;
- __ testq(rcx, rcx);
- __ j(zero, &ok, Label::kNear);
StackArgumentsAccessor args(rsp, scope()->num_parameters());
- __ LoadRoot(kScratchRegister, Heap::kUndefinedValueRootIndex);
- __ movq(args.GetReceiverOperand(), kScratchRegister);
+ __ movp(rcx, args.GetReceiverOperand());
+
+ __ CompareRoot(rcx, Heap::kUndefinedValueRootIndex);
+ __ j(not_equal, &ok, Label::kNear);
+
+ __ movp(rcx, GlobalObjectOperand());
+ __ movp(rcx, FieldOperand(rcx, GlobalObject::kGlobalReceiverOffset));
+
+ __ movp(args.GetReceiverOperand(), rcx);
+
__ bind(&ok);
}
}
@@ -175,7 +149,11 @@ bool LCodeGen::GeneratePrologue() {
if (NeedsEagerFrame()) {
ASSERT(!frame_is_built_);
frame_is_built_ = true;
- __ Prologue(info()->IsStub() ? BUILD_STUB_FRAME : BUILD_FUNCTION_FRAME);
+ if (info()->IsStub()) {
+ __ StubPrologue();
+ } else {
+ __ Prologue(info()->IsCodePreAgingActive());
+ }
info()->AddNoFrameRange(0, masm_->pc_offset());
}
@@ -183,22 +161,22 @@ bool LCodeGen::GeneratePrologue() {
int slots = GetStackSlotCount();
if (slots > 0) {
if (FLAG_debug_code) {
- __ subq(rsp, Immediate(slots * kPointerSize));
+ __ subp(rsp, Immediate(slots * kPointerSize));
#ifdef _MSC_VER
MakeSureStackPagesMapped(slots * kPointerSize);
#endif
- __ push(rax);
+ __ Push(rax);
__ Set(rax, slots);
__ movq(kScratchRegister, kSlotsZapValue);
Label loop;
__ bind(&loop);
- __ movq(MemOperand(rsp, rax, times_pointer_size, 0),
+ __ movp(MemOperand(rsp, rax, times_pointer_size, 0),
kScratchRegister);
__ decl(rax);
__ j(not_zero, &loop);
- __ pop(rax);
+ __ Pop(rax);
} else {
- __ subq(rsp, Immediate(slots * kPointerSize));
+ __ subp(rsp, Immediate(slots * kPointerSize));
#ifdef _MSC_VER
MakeSureStackPagesMapped(slots * kPointerSize);
#endif
@@ -213,18 +191,22 @@ bool LCodeGen::GeneratePrologue() {
int heap_slots = info_->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
if (heap_slots > 0) {
Comment(";;; Allocate local context");
+ bool need_write_barrier = true;
// Argument to NewContext is the function, which is still in rdi.
- __ push(rdi);
if (heap_slots <= FastNewContextStub::kMaximumSlots) {
- FastNewContextStub stub(heap_slots);
+ FastNewContextStub stub(isolate(), heap_slots);
__ CallStub(&stub);
+ // Result of FastNewContextStub is always in new space.
+ need_write_barrier = false;
} else {
- __ CallRuntime(Runtime::kNewFunctionContext, 1);
+ __ Push(rdi);
+ __ CallRuntime(Runtime::kHiddenNewFunctionContext, 1);
}
RecordSafepoint(Safepoint::kNoLazyDeopt);
- // Context is returned in both rax and rsi. It replaces the context
- // passed to us. It's saved in the stack and kept live in rsi.
- __ movq(Operand(rbp, StandardFrameConstants::kContextOffset), rsi);
+ // Context is returned in rax. It replaces the context passed to us.
+ // It's saved in the stack and kept live in rsi.
+ __ movp(rsi, rax);
+ __ movp(Operand(rbp, StandardFrameConstants::kContextOffset), rax);
// Copy any necessary parameters into the context.
int num_parameters = scope()->num_parameters();
@@ -234,12 +216,19 @@ bool LCodeGen::GeneratePrologue() {
int parameter_offset = StandardFrameConstants::kCallerSPOffset +
(num_parameters - 1 - i) * kPointerSize;
// Load parameter from stack.
- __ movq(rax, Operand(rbp, parameter_offset));
+ __ movp(rax, Operand(rbp, parameter_offset));
// Store it in the context.
int context_offset = Context::SlotOffset(var->index());
- __ movq(Operand(rsi, context_offset), rax);
+ __ movp(Operand(rsi, context_offset), rax);
// Update the write barrier. This clobbers rax and rbx.
- __ RecordWriteContextSlot(rsi, context_offset, rax, rbx, kSaveFPRegs);
+ if (need_write_barrier) {
+ __ RecordWriteContextSlot(rsi, context_offset, rax, rbx, kSaveFPRegs);
+ } else if (FLAG_debug_code) {
+ Label done;
+ __ JumpIfInNewSpace(rsi, rax, &done, Label::kNear);
+ __ Abort(kExpectedNewSpaceObject);
+ __ bind(&done);
+ }
}
}
Comment(";;; End allocate local context");
@@ -264,17 +253,47 @@ void LCodeGen::GenerateOsrPrologue() {
// optimized frame.
int slots = GetStackSlotCount() - graph()->osr()->UnoptimizedFrameSlots();
ASSERT(slots >= 0);
- __ subq(rsp, Immediate(slots * kPointerSize));
+ __ subp(rsp, Immediate(slots * kPointerSize));
}
void LCodeGen::GenerateBodyInstructionPre(LInstruction* instr) {
+ if (instr->IsCall()) {
+ EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
+ }
if (!instr->IsLazyBailout() && !instr->IsGap()) {
safepoints_.BumpLastLazySafepointIndex();
}
}
+void LCodeGen::GenerateBodyInstructionPost(LInstruction* instr) {
+ if (FLAG_debug_code && FLAG_enable_slow_asserts && instr->HasResult() &&
+ instr->hydrogen_value()->representation().IsInteger32() &&
+ instr->result()->IsRegister()) {
+ __ AssertZeroExtended(ToRegister(instr->result()));
+ }
+
+ if (instr->HasResult() && instr->MustSignExtendResult(chunk())) {
+ // We sign extend the dehoisted key at the definition point when the pointer
+ // size is 64-bit. For x32 port, we sign extend the dehoisted key at the use
+ // points and MustSignExtendResult is always false. We can't use
+ // STATIC_ASSERT here as the pointer size is 32-bit for x32.
+ ASSERT(kPointerSize == kInt64Size);
+ if (instr->result()->IsRegister()) {
+ Register result_reg = ToRegister(instr->result());
+ __ movsxlq(result_reg, result_reg);
+ } else {
+ // Sign extend the 32bit result in the stack slots.
+ ASSERT(instr->result()->IsStackSlot());
+ Operand src = ToOperand(instr->result());
+ __ movsxlq(kScratchRegister, src);
+ __ movq(src, kScratchRegister);
+ }
+ }
+}
+
+
bool LCodeGen::GenerateJumpTable() {
Label needs_frame;
if (jump_table_.length() > 0) {
@@ -297,17 +316,17 @@ bool LCodeGen::GenerateJumpTable() {
__ jmp(&needs_frame);
} else {
__ bind(&needs_frame);
- __ movq(rsi, MemOperand(rbp, StandardFrameConstants::kContextOffset));
- __ push(rbp);
- __ movq(rbp, rsp);
- __ push(rsi);
+ __ movp(rsi, MemOperand(rbp, StandardFrameConstants::kContextOffset));
+ __ pushq(rbp);
+ __ movp(rbp, rsp);
+ __ Push(rsi);
// This variant of deopt can only be used with stubs. Since we don't
// have a function pointer to install in the stack frame that we're
// building, install a special marker there instead.
ASSERT(info()->IsStub());
__ Move(rsi, Smi::FromInt(StackFrame::STUB));
- __ push(rsi);
- __ movq(rsi, MemOperand(rsp, kPointerSize));
+ __ Push(rsi);
+ __ movp(rsi, MemOperand(rsp, kPointerSize));
__ call(kScratchRegister);
}
} else {
@@ -330,7 +349,8 @@ bool LCodeGen::GenerateDeferredCode() {
HValue* value =
instructions_->at(code->instruction_index())->hydrogen_value();
- RecordAndWritePosition(value->position());
+ RecordAndWritePosition(
+ chunk()->graph()->SourcePositionToScriptPosition(value->position()));
Comment(";;; <@%d,#%d> "
"-------------------- Deferred %s --------------------",
@@ -344,10 +364,10 @@ bool LCodeGen::GenerateDeferredCode() {
ASSERT(info()->IsStub());
frame_is_built_ = true;
// Build the frame in such a way that esi isn't trashed.
- __ push(rbp); // Caller's frame pointer.
- __ push(Operand(rbp, StandardFrameConstants::kContextOffset));
+ __ pushq(rbp); // Caller's frame pointer.
+ __ Push(Operand(rbp, StandardFrameConstants::kContextOffset));
__ Push(Smi::FromInt(StackFrame::STUB));
- __ lea(rbp, Operand(rsp, 2 * kPointerSize));
+ __ leap(rbp, Operand(rsp, 2 * kPointerSize));
Comment(";;; Deferred code");
}
code->Generate();
@@ -356,8 +376,8 @@ bool LCodeGen::GenerateDeferredCode() {
Comment(";;; Destroy frame");
ASSERT(frame_is_built_);
frame_is_built_ = false;
- __ movq(rsp, rbp);
- __ pop(rbp);
+ __ movp(rsp, rbp);
+ __ popq(rbp);
}
__ jmp(code->exit());
}
@@ -400,26 +420,33 @@ XMMRegister LCodeGen::ToDoubleRegister(LOperand* op) const {
bool LCodeGen::IsInteger32Constant(LConstantOperand* op) const {
- return op->IsConstantOperand() &&
- chunk_->LookupLiteralRepresentation(op).IsSmiOrInteger32();
+ return chunk_->LookupLiteralRepresentation(op).IsSmiOrInteger32();
}
-bool LCodeGen::IsSmiConstant(LConstantOperand* op) const {
+bool LCodeGen::IsDehoistedKeyConstant(LConstantOperand* op) const {
return op->IsConstantOperand() &&
- chunk_->LookupLiteralRepresentation(op).IsSmi();
+ chunk_->IsDehoistedKey(chunk_->LookupConstant(op));
}
-bool LCodeGen::IsTaggedConstant(LConstantOperand* op) const {
- return op->IsConstantOperand() &&
- chunk_->LookupLiteralRepresentation(op).IsTagged();
+bool LCodeGen::IsSmiConstant(LConstantOperand* op) const {
+ return chunk_->LookupLiteralRepresentation(op).IsSmi();
}
int32_t LCodeGen::ToInteger32(LConstantOperand* op) const {
+ return ToRepresentation(op, Representation::Integer32());
+}
+
+
+int32_t LCodeGen::ToRepresentation(LConstantOperand* op,
+ const Representation& r) const {
HConstant* constant = chunk_->LookupConstant(op);
- return constant->Integer32Value();
+ int32_t value = constant->Integer32Value();
+ if (r.IsInteger32()) return value;
+ ASSERT(SmiValuesAre31Bits() && r.IsSmiOrTagged());
+ return static_cast<int32_t>(reinterpret_cast<intptr_t>(Smi::FromInt(value)));
}
@@ -572,10 +599,6 @@ void LCodeGen::AddToTranslation(LEnvironment* environment,
}
} else if (op->IsDoubleStackSlot()) {
translation->StoreDoubleStackSlot(op->index());
- } else if (op->IsArgument()) {
- ASSERT(is_tagged);
- int src_index = GetStackSlotCount() + op->index();
- translation->StoreStackSlot(src_index);
} else if (op->IsRegister()) {
Register reg = ToRegister(op);
if (is_tagged) {
@@ -603,7 +626,6 @@ void LCodeGen::CallCodeGeneric(Handle<Code> code,
LInstruction* instr,
SafepointMode safepoint_mode,
int argc) {
- EnsureSpaceForLazyDeopt(Deoptimizer::patch_size() - masm()->CallSize(code));
ASSERT(instr != NULL);
__ call(code, mode);
RecordSafepointWithLazyDeopt(instr, safepoint_mode, argc);
@@ -640,10 +662,10 @@ void LCodeGen::CallRuntime(const Runtime::Function* function,
void LCodeGen::LoadContextFromDeferred(LOperand* context) {
if (context->IsRegister()) {
if (!ToRegister(context).is(rsi)) {
- __ movq(rsi, ToRegister(context));
+ __ movp(rsi, ToRegister(context));
}
} else if (context->IsStackSlot()) {
- __ movq(rsi, ToOperand(context));
+ __ movp(rsi, ToOperand(context));
} else if (context->IsConstantOperand()) {
HConstant* constant =
chunk_->LookupConstant(LConstantOperand::cast(context));
@@ -669,6 +691,7 @@ void LCodeGen::CallRuntimeFromDeferred(Runtime::FunctionId id,
void LCodeGen::RegisterEnvironmentForDeoptimization(LEnvironment* environment,
Safepoint::DeoptMode mode) {
+ environment->set_has_been_used();
if (!environment->HasBeenRegistered()) {
// Physical stack frame layout:
// -x ............. -4 0 ..................................... y
@@ -721,7 +744,7 @@ void LCodeGen::DeoptimizeIf(Condition cc,
ExternalReference count = ExternalReference::stress_deopt_count(isolate());
Label no_deopt;
__ pushfq();
- __ push(rax);
+ __ Push(rax);
Operand count_operand = masm()->ExternalOperand(count, kScratchRegister);
__ movl(rax, count_operand);
__ subl(rax, Immediate(1));
@@ -729,13 +752,13 @@ void LCodeGen::DeoptimizeIf(Condition cc,
if (FLAG_trap_on_deopt) __ int3();
__ movl(rax, Immediate(FLAG_deopt_every_n_times));
__ movl(count_operand, rax);
- __ pop(rax);
+ __ Pop(rax);
__ popfq();
ASSERT(frame_is_built_);
__ call(entry, RelocInfo::RUNTIME_ENTRY);
__ bind(&no_deopt);
__ movl(count_operand, rax);
- __ pop(rax);
+ __ Pop(rax);
__ popfq();
}
@@ -784,46 +807,24 @@ void LCodeGen::DeoptimizeIf(Condition cc,
}
-void LCodeGen::RegisterDependentCodeForEmbeddedMaps(Handle<Code> code) {
- ZoneList<Handle<Map> > maps(1, zone());
- ZoneList<Handle<JSObject> > objects(1, zone());
- int mode_mask = RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT);
- for (RelocIterator it(*code, mode_mask); !it.done(); it.next()) {
- if (Code::IsWeakEmbeddedObject(code->kind(), it.rinfo()->target_object())) {
- if (it.rinfo()->target_object()->IsMap()) {
- Handle<Map> map(Map::cast(it.rinfo()->target_object()));
- maps.Add(map, zone());
- } else if (it.rinfo()->target_object()->IsJSObject()) {
- Handle<JSObject> object(JSObject::cast(it.rinfo()->target_object()));
- objects.Add(object, zone());
- }
- }
- }
-#ifdef VERIFY_HEAP
- // This disables verification of weak embedded objects after full GC.
- // AddDependentCode can cause a GC, which would observe the state where
- // this code is not yet in the depended code lists of the embedded maps.
- NoWeakObjectVerificationScope disable_verification_of_embedded_objects;
-#endif
- for (int i = 0; i < maps.length(); i++) {
- maps.at(i)->AddDependentCode(DependentCode::kWeaklyEmbeddedGroup, code);
- }
- for (int i = 0; i < objects.length(); i++) {
- AddWeakObjectToCodeDependency(isolate()->heap(), objects.at(i), code);
- }
-}
-
-
void LCodeGen::PopulateDeoptimizationData(Handle<Code> code) {
int length = deoptimizations_.length();
if (length == 0) return;
Handle<DeoptimizationInputData> data =
- factory()->NewDeoptimizationInputData(length, TENURED);
+ DeoptimizationInputData::New(isolate(), length, TENURED);
Handle<ByteArray> translations =
translations_.CreateByteArray(isolate()->factory());
data->SetTranslationByteArray(*translations);
data->SetInlinedFunctionCount(Smi::FromInt(inlined_function_count_));
+ data->SetOptimizationId(Smi::FromInt(info_->optimization_id()));
+ if (info_->IsOptimizing()) {
+ // Reference to shared function info does not change between phases.
+ AllowDeferredHandleDereference allow_handle_dereference;
+ data->SetSharedFunctionInfo(*info_->shared_info());
+ } else {
+ data->SetSharedFunctionInfo(Smi::FromInt(0));
+ }
Handle<FixedArray> literals =
factory()->NewFixedArray(deoptimization_literals_.length(), TENURED);
@@ -985,30 +986,19 @@ void LCodeGen::DoCallStub(LCallStub* instr) {
ASSERT(ToRegister(instr->context()).is(rsi));
ASSERT(ToRegister(instr->result()).is(rax));
switch (instr->hydrogen()->major_key()) {
- case CodeStub::RegExpConstructResult: {
- RegExpConstructResultStub stub;
- CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
- break;
- }
case CodeStub::RegExpExec: {
- RegExpExecStub stub;
- CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
+ RegExpExecStub stub(isolate());
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
break;
}
case CodeStub::SubString: {
- SubStringStub stub;
- CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
+ SubStringStub stub(isolate());
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
break;
}
case CodeStub::StringCompare: {
- StringCompareStub stub;
- CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
- break;
- }
- case CodeStub::TranscendentalCache: {
- TranscendentalCacheStub stub(instr->transcendental_type(),
- TranscendentalCacheStub::TAGGED);
- CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
+ StringCompareStub stub(isolate());
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
break;
}
default:
@@ -1022,281 +1012,376 @@ void LCodeGen::DoUnknownOSRValue(LUnknownOSRValue* instr) {
}
-void LCodeGen::DoModI(LModI* instr) {
+void LCodeGen::DoModByPowerOf2I(LModByPowerOf2I* instr) {
+ Register dividend = ToRegister(instr->dividend());
+ int32_t divisor = instr->divisor();
+ ASSERT(dividend.is(ToRegister(instr->result())));
+
+ // Theoretically, a variation of the branch-free code for integer division by
+ // a power of 2 (calculating the remainder via an additional multiplication
+ // (which gets simplified to an 'and') and subtraction) should be faster, and
+ // this is exactly what GCC and clang emit. Nevertheless, benchmarks seem to
+ // indicate that positive dividends are heavily favored, so the branching
+ // version performs better.
HMod* hmod = instr->hydrogen();
- HValue* left = hmod->left();
- HValue* right = hmod->right();
- if (hmod->HasPowerOf2Divisor()) {
- // TODO(svenpanne) We should really do the strength reduction on the
- // Hydrogen level.
- Register left_reg = ToRegister(instr->left());
- ASSERT(left_reg.is(ToRegister(instr->result())));
-
- // Note: The code below even works when right contains kMinInt.
- int32_t divisor = Abs(right->GetInteger32Constant());
-
- Label left_is_not_negative, done;
- if (left->CanBeNegative()) {
- __ testl(left_reg, left_reg);
- __ j(not_sign, &left_is_not_negative, Label::kNear);
- __ negl(left_reg);
- __ andl(left_reg, Immediate(divisor - 1));
- __ negl(left_reg);
- if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
- DeoptimizeIf(zero, instr->environment());
- }
- __ jmp(&done, Label::kNear);
+ int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1);
+ Label dividend_is_not_negative, done;
+ if (hmod->CheckFlag(HValue::kLeftCanBeNegative)) {
+ __ testl(dividend, dividend);
+ __ j(not_sign, &dividend_is_not_negative, Label::kNear);
+ // Note that this is correct even for kMinInt operands.
+ __ negl(dividend);
+ __ andl(dividend, Immediate(mask));
+ __ negl(dividend);
+ if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ DeoptimizeIf(zero, instr->environment());
}
+ __ jmp(&done, Label::kNear);
+ }
- __ bind(&left_is_not_negative);
- __ andl(left_reg, Immediate(divisor - 1));
- __ bind(&done);
- } else {
- Register left_reg = ToRegister(instr->left());
- ASSERT(left_reg.is(rax));
- Register right_reg = ToRegister(instr->right());
- ASSERT(!right_reg.is(rax));
- ASSERT(!right_reg.is(rdx));
- Register result_reg = ToRegister(instr->result());
- ASSERT(result_reg.is(rdx));
+ __ bind(&dividend_is_not_negative);
+ __ andl(dividend, Immediate(mask));
+ __ bind(&done);
+}
- Label done;
- // Check for x % 0, idiv would signal a divide error. We have to
- // deopt in this case because we can't return a NaN.
- if (right->CanBeZero()) {
- __ testl(right_reg, right_reg);
- DeoptimizeIf(zero, instr->environment());
- }
- // Check for kMinInt % -1, idiv would signal a divide error. We
- // have to deopt if we care about -0, because we can't return that.
- if (left->RangeCanInclude(kMinInt) && right->RangeCanInclude(-1)) {
- Label no_overflow_possible;
- __ cmpl(left_reg, Immediate(kMinInt));
- __ j(not_zero, &no_overflow_possible, Label::kNear);
- __ cmpl(right_reg, Immediate(-1));
- if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
- DeoptimizeIf(equal, instr->environment());
- } else {
- __ j(not_equal, &no_overflow_possible, Label::kNear);
- __ Set(result_reg, 0);
- __ jmp(&done, Label::kNear);
- }
- __ bind(&no_overflow_possible);
- }
+void LCodeGen::DoModByConstI(LModByConstI* instr) {
+ Register dividend = ToRegister(instr->dividend());
+ int32_t divisor = instr->divisor();
+ ASSERT(ToRegister(instr->result()).is(rax));
- // Sign extend dividend in eax into edx:eax, since we are using only the low
- // 32 bits of the values.
- __ cdq();
-
- // If we care about -0, test if the dividend is <0 and the result is 0.
- if (left->CanBeNegative() &&
- hmod->CanBeZero() &&
- hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
- Label positive_left;
- __ testl(left_reg, left_reg);
- __ j(not_sign, &positive_left, Label::kNear);
- __ idivl(right_reg);
- __ testl(result_reg, result_reg);
- DeoptimizeIf(zero, instr->environment());
+ if (divisor == 0) {
+ DeoptimizeIf(no_condition, instr->environment());
+ return;
+ }
+
+ __ TruncatingDiv(dividend, Abs(divisor));
+ __ imull(rdx, rdx, Immediate(Abs(divisor)));
+ __ movl(rax, dividend);
+ __ subl(rax, rdx);
+
+ // Check for negative zero.
+ HMod* hmod = instr->hydrogen();
+ if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ Label remainder_not_zero;
+ __ j(not_zero, &remainder_not_zero, Label::kNear);
+ __ cmpl(dividend, Immediate(0));
+ DeoptimizeIf(less, instr->environment());
+ __ bind(&remainder_not_zero);
+ }
+}
+
+
+void LCodeGen::DoModI(LModI* instr) {
+ HMod* hmod = instr->hydrogen();
+
+ Register left_reg = ToRegister(instr->left());
+ ASSERT(left_reg.is(rax));
+ Register right_reg = ToRegister(instr->right());
+ ASSERT(!right_reg.is(rax));
+ ASSERT(!right_reg.is(rdx));
+ Register result_reg = ToRegister(instr->result());
+ ASSERT(result_reg.is(rdx));
+
+ Label done;
+ // Check for x % 0, idiv would signal a divide error. We have to
+ // deopt in this case because we can't return a NaN.
+ if (hmod->CheckFlag(HValue::kCanBeDivByZero)) {
+ __ testl(right_reg, right_reg);
+ DeoptimizeIf(zero, instr->environment());
+ }
+
+ // Check for kMinInt % -1, idiv would signal a divide error. We
+ // have to deopt if we care about -0, because we can't return that.
+ if (hmod->CheckFlag(HValue::kCanOverflow)) {
+ Label no_overflow_possible;
+ __ cmpl(left_reg, Immediate(kMinInt));
+ __ j(not_zero, &no_overflow_possible, Label::kNear);
+ __ cmpl(right_reg, Immediate(-1));
+ if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ DeoptimizeIf(equal, instr->environment());
+ } else {
+ __ j(not_equal, &no_overflow_possible, Label::kNear);
+ __ Set(result_reg, 0);
__ jmp(&done, Label::kNear);
- __ bind(&positive_left);
}
+ __ bind(&no_overflow_possible);
+ }
+
+ // Sign extend dividend in eax into edx:eax, since we are using only the low
+ // 32 bits of the values.
+ __ cdq();
+
+ // If we care about -0, test if the dividend is <0 and the result is 0.
+ if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ Label positive_left;
+ __ testl(left_reg, left_reg);
+ __ j(not_sign, &positive_left, Label::kNear);
__ idivl(right_reg);
- __ bind(&done);
+ __ testl(result_reg, result_reg);
+ DeoptimizeIf(zero, instr->environment());
+ __ jmp(&done, Label::kNear);
+ __ bind(&positive_left);
}
+ __ idivl(right_reg);
+ __ bind(&done);
}
-void LCodeGen::DoMathFloorOfDiv(LMathFloorOfDiv* instr) {
- ASSERT(instr->right()->IsConstantOperand());
+void LCodeGen::DoFlooringDivByPowerOf2I(LFlooringDivByPowerOf2I* instr) {
+ Register dividend = ToRegister(instr->dividend());
+ int32_t divisor = instr->divisor();
+ ASSERT(dividend.is(ToRegister(instr->result())));
- const Register dividend = ToRegister(instr->left());
- int32_t divisor = ToInteger32(LConstantOperand::cast(instr->right()));
- const Register result = ToRegister(instr->result());
-
- switch (divisor) {
- case 0:
- DeoptimizeIf(no_condition, instr->environment());
+ // If the divisor is positive, things are easy: There can be no deopts and we
+ // can simply do an arithmetic right shift.
+ if (divisor == 1) return;
+ int32_t shift = WhichPowerOf2Abs(divisor);
+ if (divisor > 1) {
+ __ sarl(dividend, Immediate(shift));
return;
+ }
- case 1:
- if (!result.is(dividend)) {
- __ movl(result, dividend);
- }
- return;
+ // If the divisor is negative, we have to negate and handle edge cases.
+ __ negl(dividend);
+ if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ DeoptimizeIf(zero, instr->environment());
+ }
- case -1:
- if (!result.is(dividend)) {
- __ movl(result, dividend);
- }
- __ negl(result);
- if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
- DeoptimizeIf(zero, instr->environment());
- }
- if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
+ // Dividing by -1 is basically negation, unless we overflow.
+ if (divisor == -1) {
+ if (instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) {
DeoptimizeIf(overflow, instr->environment());
}
return;
}
- uint32_t divisor_abs = abs(divisor);
- if (IsPowerOf2(divisor_abs)) {
- int32_t power = WhichPowerOf2(divisor_abs);
- if (divisor < 0) {
- __ movsxlq(result, dividend);
- __ neg(result);
- if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
- DeoptimizeIf(zero, instr->environment());
- }
- __ sar(result, Immediate(power));
- } else {
- if (!result.is(dividend)) {
- __ movl(result, dividend);
- }
- __ sarl(result, Immediate(power));
- }
- } else {
- Register reg1 = ToRegister(instr->temp());
- Register reg2 = ToRegister(instr->result());
-
- // Find b which: 2^b < divisor_abs < 2^(b+1).
- unsigned b = 31 - CompilerIntrinsics::CountLeadingZeros(divisor_abs);
- unsigned shift = 32 + b; // Precision +1bit (effectively).
- double multiplier_f =
- static_cast<double>(static_cast<uint64_t>(1) << shift) / divisor_abs;
- int64_t multiplier;
- if (multiplier_f - floor(multiplier_f) < 0.5) {
- multiplier = static_cast<int64_t>(floor(multiplier_f));
- } else {
- multiplier = static_cast<int64_t>(floor(multiplier_f)) + 1;
- }
- // The multiplier is a uint32.
- ASSERT(multiplier > 0 &&
- multiplier < (static_cast<int64_t>(1) << 32));
- // The multiply is int64, so sign-extend to r64.
- __ movsxlq(reg1, dividend);
- if (divisor < 0 &&
- instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
- __ neg(reg1);
- DeoptimizeIf(zero, instr->environment());
- }
- __ Set(reg2, multiplier);
- // Result just fit in r64, because it's int32 * uint32.
- __ imul(reg2, reg1);
+ // If the negation could not overflow, simply shifting is OK.
+ if (!instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) {
+ __ sarl(dividend, Immediate(shift));
+ return;
+ }
+
+ Label not_kmin_int, done;
+ __ j(no_overflow, &not_kmin_int, Label::kNear);
+ __ movl(dividend, Immediate(kMinInt / divisor));
+ __ jmp(&done, Label::kNear);
+ __ bind(&not_kmin_int);
+ __ sarl(dividend, Immediate(shift));
+ __ bind(&done);
+}
+
+
+void LCodeGen::DoFlooringDivByConstI(LFlooringDivByConstI* instr) {
+ Register dividend = ToRegister(instr->dividend());
+ int32_t divisor = instr->divisor();
+ ASSERT(ToRegister(instr->result()).is(rdx));
+
+ if (divisor == 0) {
+ DeoptimizeIf(no_condition, instr->environment());
+ return;
+ }
+
+ // Check for (0 / -x) that will produce negative zero.
+ HMathFloorOfDiv* hdiv = instr->hydrogen();
+ if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
+ __ testl(dividend, dividend);
+ DeoptimizeIf(zero, instr->environment());
+ }
+
+ // Easy case: We need no dynamic check for the dividend and the flooring
+ // division is the same as the truncating division.
+ if ((divisor > 0 && !hdiv->CheckFlag(HValue::kLeftCanBeNegative)) ||
+ (divisor < 0 && !hdiv->CheckFlag(HValue::kLeftCanBePositive))) {
+ __ TruncatingDiv(dividend, Abs(divisor));
+ if (divisor < 0) __ negl(rdx);
+ return;
+ }
+
+ // In the general case we may need to adjust before and after the truncating
+ // division to get a flooring division.
+ Register temp = ToRegister(instr->temp3());
+ ASSERT(!temp.is(dividend) && !temp.is(rax) && !temp.is(rdx));
+ Label needs_adjustment, done;
+ __ cmpl(dividend, Immediate(0));
+ __ j(divisor > 0 ? less : greater, &needs_adjustment, Label::kNear);
+ __ TruncatingDiv(dividend, Abs(divisor));
+ if (divisor < 0) __ negl(rdx);
+ __ jmp(&done, Label::kNear);
+ __ bind(&needs_adjustment);
+ __ leal(temp, Operand(dividend, divisor > 0 ? 1 : -1));
+ __ TruncatingDiv(temp, Abs(divisor));
+ if (divisor < 0) __ negl(rdx);
+ __ decl(rdx);
+ __ bind(&done);
+}
+
+
+// TODO(svenpanne) Refactor this to avoid code duplication with DoDivI.
+void LCodeGen::DoFlooringDivI(LFlooringDivI* instr) {
+ HBinaryOperation* hdiv = instr->hydrogen();
+ Register dividend = ToRegister(instr->dividend());
+ Register divisor = ToRegister(instr->divisor());
+ Register remainder = ToRegister(instr->temp());
+ Register result = ToRegister(instr->result());
+ ASSERT(dividend.is(rax));
+ ASSERT(remainder.is(rdx));
+ ASSERT(result.is(rax));
+ ASSERT(!divisor.is(rax));
+ ASSERT(!divisor.is(rdx));
- __ addq(reg2, Immediate(1 << 30));
- __ sar(reg2, Immediate(shift));
+ // Check for x / 0.
+ if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) {
+ __ testl(divisor, divisor);
+ DeoptimizeIf(zero, instr->environment());
}
+
+ // Check for (0 / -x) that will produce negative zero.
+ if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ Label dividend_not_zero;
+ __ testl(dividend, dividend);
+ __ j(not_zero, &dividend_not_zero, Label::kNear);
+ __ testl(divisor, divisor);
+ DeoptimizeIf(sign, instr->environment());
+ __ bind(&dividend_not_zero);
+ }
+
+ // Check for (kMinInt / -1).
+ if (hdiv->CheckFlag(HValue::kCanOverflow)) {
+ Label dividend_not_min_int;
+ __ cmpl(dividend, Immediate(kMinInt));
+ __ j(not_zero, &dividend_not_min_int, Label::kNear);
+ __ cmpl(divisor, Immediate(-1));
+ DeoptimizeIf(zero, instr->environment());
+ __ bind(&dividend_not_min_int);
+ }
+
+ // Sign extend to rdx (= remainder).
+ __ cdq();
+ __ idivl(divisor);
+
+ Label done;
+ __ testl(remainder, remainder);
+ __ j(zero, &done, Label::kNear);
+ __ xorl(remainder, divisor);
+ __ sarl(remainder, Immediate(31));
+ __ addl(result, remainder);
+ __ bind(&done);
}
-void LCodeGen::DoDivI(LDivI* instr) {
- if (!instr->is_flooring() && instr->hydrogen()->HasPowerOf2Divisor()) {
- Register dividend = ToRegister(instr->left());
- int32_t divisor =
- HConstant::cast(instr->hydrogen()->right())->Integer32Value();
- int32_t test_value = 0;
- int32_t power = 0;
-
- if (divisor > 0) {
- test_value = divisor - 1;
- power = WhichPowerOf2(divisor);
- } else {
- // Check for (0 / -x) that will produce negative zero.
- if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
- __ testl(dividend, dividend);
- DeoptimizeIf(zero, instr->environment());
- }
- // Check for (kMinInt / -1).
- if (divisor == -1 && instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
- __ cmpl(dividend, Immediate(kMinInt));
- DeoptimizeIf(zero, instr->environment());
- }
- test_value = - divisor - 1;
- power = WhichPowerOf2(-divisor);
- }
+void LCodeGen::DoDivByPowerOf2I(LDivByPowerOf2I* instr) {
+ Register dividend = ToRegister(instr->dividend());
+ int32_t divisor = instr->divisor();
+ Register result = ToRegister(instr->result());
+ ASSERT(divisor == kMinInt || IsPowerOf2(Abs(divisor)));
+ ASSERT(!result.is(dividend));
+
+ // Check for (0 / -x) that will produce negative zero.
+ HDiv* hdiv = instr->hydrogen();
+ if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
+ __ testl(dividend, dividend);
+ DeoptimizeIf(zero, instr->environment());
+ }
+ // Check for (kMinInt / -1).
+ if (hdiv->CheckFlag(HValue::kCanOverflow) && divisor == -1) {
+ __ cmpl(dividend, Immediate(kMinInt));
+ DeoptimizeIf(zero, instr->environment());
+ }
+ // Deoptimize if remainder will not be 0.
+ if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32) &&
+ divisor != 1 && divisor != -1) {
+ int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1);
+ __ testl(dividend, Immediate(mask));
+ DeoptimizeIf(not_zero, instr->environment());
+ }
+ __ Move(result, dividend);
+ int32_t shift = WhichPowerOf2Abs(divisor);
+ if (shift > 0) {
+ // The arithmetic shift is always OK, the 'if' is an optimization only.
+ if (shift > 1) __ sarl(result, Immediate(31));
+ __ shrl(result, Immediate(32 - shift));
+ __ addl(result, dividend);
+ __ sarl(result, Immediate(shift));
+ }
+ if (divisor < 0) __ negl(result);
+}
- if (test_value != 0) {
- if (instr->hydrogen()->CheckFlag(
- HInstruction::kAllUsesTruncatingToInt32)) {
- Label done, negative;
- __ cmpl(dividend, Immediate(0));
- __ j(less, &negative, Label::kNear);
- __ sarl(dividend, Immediate(power));
- if (divisor < 0) __ negl(dividend);
- __ jmp(&done, Label::kNear);
-
- __ bind(&negative);
- __ negl(dividend);
- __ sarl(dividend, Immediate(power));
- if (divisor > 0) __ negl(dividend);
- __ bind(&done);
- return; // Don't fall through to "__ neg" below.
- } else {
- // Deoptimize if remainder is not 0.
- __ testl(dividend, Immediate(test_value));
- DeoptimizeIf(not_zero, instr->environment());
- __ sarl(dividend, Immediate(power));
- }
- }
- if (divisor < 0) __ negl(dividend);
+void LCodeGen::DoDivByConstI(LDivByConstI* instr) {
+ Register dividend = ToRegister(instr->dividend());
+ int32_t divisor = instr->divisor();
+ ASSERT(ToRegister(instr->result()).is(rdx));
+ if (divisor == 0) {
+ DeoptimizeIf(no_condition, instr->environment());
return;
}
- LOperand* right = instr->right();
- ASSERT(ToRegister(instr->result()).is(rax));
- ASSERT(ToRegister(instr->left()).is(rax));
- ASSERT(!ToRegister(instr->right()).is(rax));
- ASSERT(!ToRegister(instr->right()).is(rdx));
+ // Check for (0 / -x) that will produce negative zero.
+ HDiv* hdiv = instr->hydrogen();
+ if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
+ __ testl(dividend, dividend);
+ DeoptimizeIf(zero, instr->environment());
+ }
- Register left_reg = rax;
+ __ TruncatingDiv(dividend, Abs(divisor));
+ if (divisor < 0) __ negl(rdx);
+
+ if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32)) {
+ __ movl(rax, rdx);
+ __ imull(rax, rax, Immediate(divisor));
+ __ subl(rax, dividend);
+ DeoptimizeIf(not_equal, instr->environment());
+ }
+}
+
+
+// TODO(svenpanne) Refactor this to avoid code duplication with DoFlooringDivI.
+void LCodeGen::DoDivI(LDivI* instr) {
+ HBinaryOperation* hdiv = instr->hydrogen();
+ Register dividend = ToRegister(instr->dividend());
+ Register divisor = ToRegister(instr->divisor());
+ Register remainder = ToRegister(instr->temp());
+ ASSERT(dividend.is(rax));
+ ASSERT(remainder.is(rdx));
+ ASSERT(ToRegister(instr->result()).is(rax));
+ ASSERT(!divisor.is(rax));
+ ASSERT(!divisor.is(rdx));
// Check for x / 0.
- Register right_reg = ToRegister(right);
- if (instr->hydrogen_value()->CheckFlag(HValue::kCanBeDivByZero)) {
- __ testl(right_reg, right_reg);
+ if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) {
+ __ testl(divisor, divisor);
DeoptimizeIf(zero, instr->environment());
}
// Check for (0 / -x) that will produce negative zero.
- if (instr->hydrogen_value()->CheckFlag(HValue::kBailoutOnMinusZero)) {
- Label left_not_zero;
- __ testl(left_reg, left_reg);
- __ j(not_zero, &left_not_zero, Label::kNear);
- __ testl(right_reg, right_reg);
+ if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ Label dividend_not_zero;
+ __ testl(dividend, dividend);
+ __ j(not_zero, &dividend_not_zero, Label::kNear);
+ __ testl(divisor, divisor);
DeoptimizeIf(sign, instr->environment());
- __ bind(&left_not_zero);
+ __ bind(&dividend_not_zero);
}
// Check for (kMinInt / -1).
- if (instr->hydrogen_value()->CheckFlag(HValue::kCanOverflow)) {
- Label left_not_min_int;
- __ cmpl(left_reg, Immediate(kMinInt));
- __ j(not_zero, &left_not_min_int, Label::kNear);
- __ cmpl(right_reg, Immediate(-1));
+ if (hdiv->CheckFlag(HValue::kCanOverflow)) {
+ Label dividend_not_min_int;
+ __ cmpl(dividend, Immediate(kMinInt));
+ __ j(not_zero, &dividend_not_min_int, Label::kNear);
+ __ cmpl(divisor, Immediate(-1));
DeoptimizeIf(zero, instr->environment());
- __ bind(&left_not_min_int);
+ __ bind(&dividend_not_min_int);
}
- // Sign extend to rdx.
+ // Sign extend to rdx (= remainder).
__ cdq();
- __ idivl(right_reg);
+ __ idivl(divisor);
- if (instr->is_flooring()) {
- Label done;
- __ testl(rdx, rdx);
- __ j(zero, &done, Label::kNear);
- __ xorl(rdx, right_reg);
- __ sarl(rdx, Immediate(31));
- __ addl(rax, rdx);
- __ bind(&done);
- } else if (!instr->hydrogen()->CheckFlag(
- HInstruction::kAllUsesTruncatingToInt32)) {
+ if (!hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) {
// Deoptimize if remainder is not 0.
- __ testl(rdx, rdx);
+ __ testl(remainder, remainder);
DeoptimizeIf(not_zero, instr->environment());
}
}
@@ -1308,7 +1393,7 @@ void LCodeGen::DoMulI(LMulI* instr) {
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
if (instr->hydrogen_value()->representation().IsSmi()) {
- __ movq(kScratchRegister, left);
+ __ movp(kScratchRegister, left);
} else {
__ movl(kScratchRegister, left);
}
@@ -1360,14 +1445,14 @@ void LCodeGen::DoMulI(LMulI* instr) {
} else if (right->IsStackSlot()) {
if (instr->hydrogen_value()->representation().IsSmi()) {
__ SmiToInteger64(left, left);
- __ imul(left, ToOperand(right));
+ __ imulp(left, ToOperand(right));
} else {
__ imull(left, ToOperand(right));
}
} else {
if (instr->hydrogen_value()->representation().IsSmi()) {
__ SmiToInteger64(left, left);
- __ imul(left, ToRegister(right));
+ __ imulp(left, ToRegister(right));
} else {
__ imull(left, ToRegister(right));
}
@@ -1381,14 +1466,17 @@ void LCodeGen::DoMulI(LMulI* instr) {
// Bail out if the result is supposed to be negative zero.
Label done;
if (instr->hydrogen_value()->representation().IsSmi()) {
- __ testq(left, left);
+ __ testp(left, left);
} else {
__ testl(left, left);
}
__ j(not_zero, &done, Label::kNear);
if (right->IsConstantOperand()) {
- // Constant can't be represented as Smi due to immediate size limit.
- ASSERT(!instr->hydrogen_value()->representation().IsSmi());
+ // Constant can't be represented as 32-bit Smi due to immediate size
+ // limit.
+ ASSERT(SmiValuesAre32Bits()
+ ? !instr->hydrogen_value()->representation().IsSmi()
+ : SmiValuesAre31Bits());
if (ToInteger32(LConstantOperand::cast(right)) < 0) {
DeoptimizeIf(no_condition, instr->environment());
} else if (ToInteger32(LConstantOperand::cast(right)) == 0) {
@@ -1397,7 +1485,7 @@ void LCodeGen::DoMulI(LMulI* instr) {
}
} else if (right->IsStackSlot()) {
if (instr->hydrogen_value()->representation().IsSmi()) {
- __ or_(kScratchRegister, ToOperand(right));
+ __ orp(kScratchRegister, ToOperand(right));
} else {
__ orl(kScratchRegister, ToOperand(right));
}
@@ -1405,7 +1493,7 @@ void LCodeGen::DoMulI(LMulI* instr) {
} else {
// Test the non-zero operand for negative sign.
if (instr->hydrogen_value()->representation().IsSmi()) {
- __ or_(kScratchRegister, ToRegister(right));
+ __ orp(kScratchRegister, ToRegister(right));
} else {
__ orl(kScratchRegister, ToRegister(right));
}
@@ -1423,7 +1511,9 @@ void LCodeGen::DoBitI(LBitI* instr) {
ASSERT(left->IsRegister());
if (right->IsConstantOperand()) {
- int32_t right_operand = ToInteger32(LConstantOperand::cast(right));
+ int32_t right_operand =
+ ToRepresentation(LConstantOperand::cast(right),
+ instr->hydrogen()->right()->representation());
switch (instr->op()) {
case Token::BIT_AND:
__ andl(ToRegister(left), Immediate(right_operand));
@@ -1445,13 +1535,25 @@ void LCodeGen::DoBitI(LBitI* instr) {
} else if (right->IsStackSlot()) {
switch (instr->op()) {
case Token::BIT_AND:
- __ and_(ToRegister(left), ToOperand(right));
+ if (instr->IsInteger32()) {
+ __ andl(ToRegister(left), ToOperand(right));
+ } else {
+ __ andp(ToRegister(left), ToOperand(right));
+ }
break;
case Token::BIT_OR:
- __ or_(ToRegister(left), ToOperand(right));
+ if (instr->IsInteger32()) {
+ __ orl(ToRegister(left), ToOperand(right));
+ } else {
+ __ orp(ToRegister(left), ToOperand(right));
+ }
break;
case Token::BIT_XOR:
- __ xor_(ToRegister(left), ToOperand(right));
+ if (instr->IsInteger32()) {
+ __ xorl(ToRegister(left), ToOperand(right));
+ } else {
+ __ xorp(ToRegister(left), ToOperand(right));
+ }
break;
default:
UNREACHABLE();
@@ -1461,13 +1563,25 @@ void LCodeGen::DoBitI(LBitI* instr) {
ASSERT(right->IsRegister());
switch (instr->op()) {
case Token::BIT_AND:
- __ and_(ToRegister(left), ToRegister(right));
+ if (instr->IsInteger32()) {
+ __ andl(ToRegister(left), ToRegister(right));
+ } else {
+ __ andp(ToRegister(left), ToRegister(right));
+ }
break;
case Token::BIT_OR:
- __ or_(ToRegister(left), ToRegister(right));
+ if (instr->IsInteger32()) {
+ __ orl(ToRegister(left), ToRegister(right));
+ } else {
+ __ orp(ToRegister(left), ToRegister(right));
+ }
break;
case Token::BIT_XOR:
- __ xor_(ToRegister(left), ToRegister(right));
+ if (instr->IsInteger32()) {
+ __ xorl(ToRegister(left), ToRegister(right));
+ } else {
+ __ xorp(ToRegister(left), ToRegister(right));
+ }
break;
default:
UNREACHABLE();
@@ -1521,17 +1635,30 @@ void LCodeGen::DoShiftI(LShiftI* instr) {
}
break;
case Token::SHR:
- if (shift_count == 0 && instr->can_deopt()) {
+ if (shift_count != 0) {
+ __ shrl(ToRegister(left), Immediate(shift_count));
+ } else if (instr->can_deopt()) {
__ testl(ToRegister(left), ToRegister(left));
DeoptimizeIf(negative, instr->environment());
- } else {
- __ shrl(ToRegister(left), Immediate(shift_count));
}
break;
case Token::SHL:
if (shift_count != 0) {
if (instr->hydrogen_value()->representation().IsSmi()) {
- __ shl(ToRegister(left), Immediate(shift_count));
+ if (SmiValuesAre32Bits()) {
+ __ shlp(ToRegister(left), Immediate(shift_count));
+ } else {
+ ASSERT(SmiValuesAre31Bits());
+ if (instr->can_deopt()) {
+ if (shift_count != 1) {
+ __ shll(ToRegister(left), Immediate(shift_count - 1));
+ }
+ __ Integer32ToSmi(ToRegister(left), ToRegister(left));
+ DeoptimizeIf(overflow, instr->environment());
+ } else {
+ __ shll(ToRegister(left), Immediate(shift_count));
+ }
+ }
} else {
__ shll(ToRegister(left), Immediate(shift_count));
}
@@ -1551,17 +1678,19 @@ void LCodeGen::DoSubI(LSubI* instr) {
ASSERT(left->Equals(instr->result()));
if (right->IsConstantOperand()) {
- __ subl(ToRegister(left),
- Immediate(ToInteger32(LConstantOperand::cast(right))));
+ int32_t right_operand =
+ ToRepresentation(LConstantOperand::cast(right),
+ instr->hydrogen()->right()->representation());
+ __ subl(ToRegister(left), Immediate(right_operand));
} else if (right->IsRegister()) {
if (instr->hydrogen_value()->representation().IsSmi()) {
- __ subq(ToRegister(left), ToRegister(right));
+ __ subp(ToRegister(left), ToRegister(right));
} else {
__ subl(ToRegister(left), ToRegister(right));
}
} else {
if (instr->hydrogen_value()->representation().IsSmi()) {
- __ subq(ToRegister(left), ToOperand(right));
+ __ subp(ToRegister(left), ToOperand(right));
} else {
__ subl(ToRegister(left), ToOperand(right));
}
@@ -1574,7 +1703,12 @@ void LCodeGen::DoSubI(LSubI* instr) {
void LCodeGen::DoConstantI(LConstantI* instr) {
- __ Set(ToRegister(instr->result()), instr->value());
+ Register dst = ToRegister(instr->result());
+ if (instr->value() == 0) {
+ __ xorl(dst, dst);
+ } else {
+ __ movl(dst, Immediate(instr->value()));
+ }
}
@@ -1606,8 +1740,9 @@ void LCodeGen::DoConstantE(LConstantE* instr) {
void LCodeGen::DoConstantT(LConstantT* instr) {
- Handle<Object> value = instr->value(isolate());
- __ Move(ToRegister(instr->result()), value);
+ Handle<Object> object = instr->value(isolate());
+ AllowDeferredHandleDereference smi_check;
+ __ Move(ToRegister(instr->result()), object);
}
@@ -1618,40 +1753,6 @@ void LCodeGen::DoMapEnumLength(LMapEnumLength* instr) {
}
-void LCodeGen::DoElementsKind(LElementsKind* instr) {
- Register result = ToRegister(instr->result());
- Register input = ToRegister(instr->value());
-
- // Load map into |result|.
- __ movq(result, FieldOperand(input, HeapObject::kMapOffset));
- // Load the map's "bit field 2" into |result|. We only need the first byte.
- __ movzxbq(result, FieldOperand(result, Map::kBitField2Offset));
- // Retrieve elements_kind from bit field 2.
- __ and_(result, Immediate(Map::kElementsKindMask));
- __ shr(result, Immediate(Map::kElementsKindShift));
-}
-
-
-void LCodeGen::DoValueOf(LValueOf* instr) {
- Register input = ToRegister(instr->value());
- Register result = ToRegister(instr->result());
- ASSERT(input.is(result));
- Label done;
-
- if (!instr->hydrogen()->value()->IsHeapObject()) {
- // If the object is a smi return the object.
- __ JumpIfSmi(input, &done, Label::kNear);
- }
-
- // If the object is not a value type, return the object.
- __ CmpObjectType(input, JS_VALUE_TYPE, kScratchRegister);
- __ j(not_equal, &done, Label::kNear);
- __ movq(result, FieldOperand(input, JSValue::kValueOffset));
-
- __ bind(&done);
-}
-
-
void LCodeGen::DoDateField(LDateField* instr) {
Register object = ToRegister(instr->date());
Register result = ToRegister(instr->result());
@@ -1666,23 +1767,23 @@ void LCodeGen::DoDateField(LDateField* instr) {
DeoptimizeIf(not_equal, instr->environment());
if (index->value() == 0) {
- __ movq(result, FieldOperand(object, JSDate::kValueOffset));
+ __ movp(result, FieldOperand(object, JSDate::kValueOffset));
} else {
if (index->value() < JSDate::kFirstUncachedField) {
ExternalReference stamp = ExternalReference::date_cache_stamp(isolate());
Operand stamp_operand = __ ExternalOperand(stamp);
- __ movq(kScratchRegister, stamp_operand);
- __ cmpq(kScratchRegister, FieldOperand(object,
+ __ movp(kScratchRegister, stamp_operand);
+ __ cmpp(kScratchRegister, FieldOperand(object,
JSDate::kCacheStampOffset));
__ j(not_equal, &runtime, Label::kNear);
- __ movq(result, FieldOperand(object, JSDate::kValueOffset +
+ __ movp(result, FieldOperand(object, JSDate::kValueOffset +
kPointerSize * index->value()));
__ jmp(&done, Label::kNear);
}
__ bind(&runtime);
__ PrepareCallCFunction(2);
- __ movq(arg_reg_1, object);
- __ movq(arg_reg_2, index, RelocInfo::NONE64);
+ __ movp(arg_reg_1, object);
+ __ Move(arg_reg_2, index, Assembler::RelocInfoNone());
__ CallCFunction(ExternalReference::get_date_field_function(isolate()), 2);
__ bind(&done);
}
@@ -1713,17 +1814,17 @@ void LCodeGen::DoSeqStringGetChar(LSeqStringGetChar* instr) {
Register string = ToRegister(instr->string());
if (FLAG_debug_code) {
- __ push(string);
- __ movq(string, FieldOperand(string, HeapObject::kMapOffset));
- __ movzxbq(string, FieldOperand(string, Map::kInstanceTypeOffset));
+ __ Push(string);
+ __ movp(string, FieldOperand(string, HeapObject::kMapOffset));
+ __ movzxbp(string, FieldOperand(string, Map::kInstanceTypeOffset));
__ andb(string, Immediate(kStringRepresentationMask | kStringEncodingMask));
static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
- __ cmpq(string, Immediate(encoding == String::ONE_BYTE_ENCODING
+ __ cmpp(string, Immediate(encoding == String::ONE_BYTE_ENCODING
? one_byte_seq_type : two_byte_seq_type));
__ Check(equal, kUnexpectedStringType);
- __ pop(string);
+ __ Pop(string);
}
Operand operand = BuildSeqStringOperand(string, instr->index(), encoding);
@@ -1772,61 +1873,56 @@ void LCodeGen::DoSeqStringSetChar(LSeqStringSetChar* instr) {
}
-void LCodeGen::DoThrow(LThrow* instr) {
- __ push(ToRegister(instr->value()));
- ASSERT(ToRegister(instr->context()).is(rsi));
- CallRuntime(Runtime::kThrow, 1, instr);
-
- if (FLAG_debug_code) {
- Comment("Unreachable code.");
- __ int3();
- }
-}
-
-
void LCodeGen::DoAddI(LAddI* instr) {
LOperand* left = instr->left();
LOperand* right = instr->right();
Representation target_rep = instr->hydrogen()->representation();
- bool is_q = target_rep.IsSmi() || target_rep.IsExternal();
+ bool is_p = target_rep.IsSmi() || target_rep.IsExternal();
if (LAddI::UseLea(instr->hydrogen()) && !left->Equals(instr->result())) {
if (right->IsConstantOperand()) {
- int32_t offset = ToInteger32(LConstantOperand::cast(right));
- if (is_q) {
- __ lea(ToRegister(instr->result()),
- MemOperand(ToRegister(left), offset));
+ // No support for smi-immediates for 32-bit SMI.
+ ASSERT(SmiValuesAre32Bits() ? !target_rep.IsSmi() : SmiValuesAre31Bits());
+ int32_t offset =
+ ToRepresentation(LConstantOperand::cast(right),
+ instr->hydrogen()->right()->representation());
+ if (is_p) {
+ __ leap(ToRegister(instr->result()),
+ MemOperand(ToRegister(left), offset));
} else {
__ leal(ToRegister(instr->result()),
MemOperand(ToRegister(left), offset));
}
} else {
Operand address(ToRegister(left), ToRegister(right), times_1, 0);
- if (is_q) {
- __ lea(ToRegister(instr->result()), address);
+ if (is_p) {
+ __ leap(ToRegister(instr->result()), address);
} else {
__ leal(ToRegister(instr->result()), address);
}
}
} else {
if (right->IsConstantOperand()) {
- if (is_q) {
- __ addq(ToRegister(left),
- Immediate(ToInteger32(LConstantOperand::cast(right))));
+ // No support for smi-immediates for 32-bit SMI.
+ ASSERT(SmiValuesAre32Bits() ? !target_rep.IsSmi() : SmiValuesAre31Bits());
+ int32_t right_operand =
+ ToRepresentation(LConstantOperand::cast(right),
+ instr->hydrogen()->right()->representation());
+ if (is_p) {
+ __ addp(ToRegister(left), Immediate(right_operand));
} else {
- __ addl(ToRegister(left),
- Immediate(ToInteger32(LConstantOperand::cast(right))));
+ __ addl(ToRegister(left), Immediate(right_operand));
}
} else if (right->IsRegister()) {
- if (is_q) {
- __ addq(ToRegister(left), ToRegister(right));
+ if (is_p) {
+ __ addp(ToRegister(left), ToRegister(right));
} else {
__ addl(ToRegister(left), ToRegister(right));
}
} else {
- if (is_q) {
- __ addq(ToRegister(left), ToOperand(right));
+ if (is_p) {
+ __ addp(ToRegister(left), ToOperand(right));
} else {
__ addl(ToRegister(left), ToOperand(right));
}
@@ -1850,30 +1946,33 @@ void LCodeGen::DoMathMinMax(LMathMinMax* instr) {
: greater_equal;
Register left_reg = ToRegister(left);
if (right->IsConstantOperand()) {
- Immediate right_imm =
- Immediate(ToInteger32(LConstantOperand::cast(right)));
- ASSERT(!instr->hydrogen_value()->representation().IsSmi());
+ Immediate right_imm = Immediate(
+ ToRepresentation(LConstantOperand::cast(right),
+ instr->hydrogen()->right()->representation()));
+ ASSERT(SmiValuesAre32Bits()
+ ? !instr->hydrogen()->representation().IsSmi()
+ : SmiValuesAre31Bits());
__ cmpl(left_reg, right_imm);
__ j(condition, &return_left, Label::kNear);
- __ movq(left_reg, right_imm);
+ __ movp(left_reg, right_imm);
} else if (right->IsRegister()) {
Register right_reg = ToRegister(right);
if (instr->hydrogen_value()->representation().IsSmi()) {
- __ cmpq(left_reg, right_reg);
+ __ cmpp(left_reg, right_reg);
} else {
__ cmpl(left_reg, right_reg);
}
__ j(condition, &return_left, Label::kNear);
- __ movq(left_reg, right_reg);
+ __ movp(left_reg, right_reg);
} else {
Operand right_op = ToOperand(right);
if (instr->hydrogen_value()->representation().IsSmi()) {
- __ cmpq(left_reg, right_op);
+ __ cmpp(left_reg, right_op);
} else {
__ cmpl(left_reg, right_op);
}
__ j(condition, &return_left, Label::kNear);
- __ movq(left_reg, right_op);
+ __ movp(left_reg, right_op);
}
__ bind(&return_left);
} else {
@@ -1941,7 +2040,7 @@ void LCodeGen::DoArithmeticD(LArithmeticD* instr) {
__ movaps(xmm_scratch, left);
ASSERT(right.is(xmm1));
__ CallCFunction(
- ExternalReference::double_fp_operation(Token::MOD, isolate()), 2);
+ ExternalReference::mod_two_doubles_operation(isolate()), 2);
__ movaps(result, xmm_scratch);
break;
}
@@ -1958,8 +2057,8 @@ void LCodeGen::DoArithmeticT(LArithmeticT* instr) {
ASSERT(ToRegister(instr->right()).is(rax));
ASSERT(ToRegister(instr->result()).is(rax));
- BinaryOpICStub stub(instr->op(), NO_OVERWRITE);
- CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
+ BinaryOpICStub stub(isolate(), instr->op(), NO_OVERWRITE);
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
}
@@ -2007,7 +2106,7 @@ void LCodeGen::DoBranch(LBranch* instr) {
} else if (r.IsSmi()) {
ASSERT(!info()->IsStub());
Register reg = ToRegister(instr->value());
- __ testq(reg, reg);
+ __ testp(reg, reg);
EmitBranch(instr, not_zero);
} else if (r.IsDouble()) {
ASSERT(!info()->IsStub());
@@ -2039,7 +2138,7 @@ void LCodeGen::DoBranch(LBranch* instr) {
EmitBranch(instr, not_equal);
} else if (type.IsString()) {
ASSERT(!info()->IsStub());
- __ cmpq(FieldOperand(reg, String::kLengthOffset), Immediate(0));
+ __ cmpp(FieldOperand(reg, String::kLengthOffset), Immediate(0));
EmitBranch(instr, not_equal);
} else {
ToBooleanStub::Types expected = instr->hydrogen()->expected_input_types();
@@ -2078,7 +2177,7 @@ void LCodeGen::DoBranch(LBranch* instr) {
const Register map = kScratchRegister;
if (expected.NeedsMap()) {
- __ movq(map, FieldOperand(reg, HeapObject::kMapOffset));
+ __ movp(map, FieldOperand(reg, HeapObject::kMapOffset));
if (expected.CanBeUndetectable()) {
// Undetectable -> false.
@@ -2099,7 +2198,7 @@ void LCodeGen::DoBranch(LBranch* instr) {
Label not_string;
__ CmpInstanceType(map, FIRST_NONSTRING_TYPE);
__ j(above_equal, &not_string, Label::kNear);
- __ cmpq(FieldOperand(reg, String::kLengthOffset), Immediate(0));
+ __ cmpp(FieldOperand(reg, String::kLengthOffset), Immediate(0));
__ j(not_zero, instr->TrueLabel(chunk_));
__ jmp(instr->FalseLabel(chunk_));
__ bind(&not_string);
@@ -2181,7 +2280,11 @@ inline Condition LCodeGen::TokenToCondition(Token::Value op, bool is_unsigned) {
void LCodeGen::DoCompareNumericAndBranch(LCompareNumericAndBranch* instr) {
LOperand* left = instr->left();
LOperand* right = instr->right();
- Condition cc = TokenToCondition(instr->op(), instr->is_double());
+ bool is_unsigned =
+ instr->is_double() ||
+ instr->hydrogen()->left()->CheckFlag(HInstruction::kUint32) ||
+ instr->hydrogen()->right()->CheckFlag(HInstruction::kUint32);
+ Condition cc = TokenToCondition(instr->op(), is_unsigned);
if (left->IsConstantOperand() && right->IsConstantOperand()) {
// We can statically evaluate the comparison.
@@ -2218,13 +2321,13 @@ void LCodeGen::DoCompareNumericAndBranch(LCompareNumericAndBranch* instr) {
} else {
__ cmpl(ToOperand(right), Immediate(value));
}
- // We transposed the operands. Reverse the condition.
- cc = ReverseCondition(cc);
+ // We commuted the operands, so commute the condition.
+ cc = CommuteCondition(cc);
} else if (instr->hydrogen_value()->representation().IsSmi()) {
if (right->IsRegister()) {
- __ cmpq(ToRegister(left), ToRegister(right));
+ __ cmpp(ToRegister(left), ToRegister(right));
} else {
- __ cmpq(ToRegister(left), ToOperand(right));
+ __ cmpp(ToRegister(left), ToOperand(right));
}
} else {
if (right->IsRegister()) {
@@ -2247,7 +2350,7 @@ void LCodeGen::DoCmpObjectEqAndBranch(LCmpObjectEqAndBranch* instr) {
__ Cmp(left, right);
} else {
Register right = ToRegister(instr->right());
- __ cmpq(left, right);
+ __ cmpp(left, right);
}
EmitBranch(instr, equal);
}
@@ -2265,9 +2368,9 @@ void LCodeGen::DoCmpHoleAndBranch(LCmpHoleAndBranch* instr) {
__ ucomisd(input_reg, input_reg);
EmitFalseBranch(instr, parity_odd);
- __ subq(rsp, Immediate(kDoubleSize));
+ __ subp(rsp, Immediate(kDoubleSize));
__ movsd(MemOperand(rsp, 0), input_reg);
- __ addq(rsp, Immediate(kDoubleSize));
+ __ addp(rsp, Immediate(kDoubleSize));
int offset = sizeof(kHoleNanUpper32);
__ cmpl(MemOperand(rsp, -offset), Immediate(kHoleNanUpper32));
@@ -2293,8 +2396,8 @@ void LCodeGen::DoCompareMinusZeroAndBranch(LCompareMinusZeroAndBranch* instr) {
Handle<Map> map = masm()->isolate()->factory()->heap_number_map();
__ CheckMap(value, map, instr->FalseLabel(chunk()), DO_SMI_CHECK);
__ cmpl(FieldOperand(value, HeapNumber::kExponentOffset),
- Immediate(0x80000000));
- EmitFalseBranch(instr, not_equal);
+ Immediate(0x1));
+ EmitFalseBranch(instr, no_overflow);
__ cmpl(FieldOperand(value, HeapNumber::kMantissaOffset),
Immediate(0x00000000));
EmitBranch(instr, equal);
@@ -2312,7 +2415,7 @@ Condition LCodeGen::EmitIsObject(Register input,
__ CompareRoot(input, Heap::kNullValueRootIndex);
__ j(equal, is_object);
- __ movq(kScratchRegister, FieldOperand(input, HeapObject::kMapOffset));
+ __ movp(kScratchRegister, FieldOperand(input, HeapObject::kMapOffset));
// Undetectable objects behave like undefined.
__ testb(FieldOperand(kScratchRegister, Map::kBitFieldOffset),
Immediate(1 << Map::kIsUndetectable));
@@ -2356,7 +2459,7 @@ void LCodeGen::DoIsStringAndBranch(LIsStringAndBranch* instr) {
Register temp = ToRegister(instr->temp());
SmiCheck check_needed =
- instr->hydrogen()->value()->IsHeapObject()
+ instr->hydrogen()->value()->type().IsHeapObject()
? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
Condition true_cond = EmitIsString(
@@ -2383,10 +2486,10 @@ void LCodeGen::DoIsUndetectableAndBranch(LIsUndetectableAndBranch* instr) {
Register input = ToRegister(instr->value());
Register temp = ToRegister(instr->temp());
- if (!instr->hydrogen()->value()->IsHeapObject()) {
+ if (!instr->hydrogen()->value()->type().IsHeapObject()) {
__ JumpIfSmi(input, instr->FalseLabel(chunk_));
}
- __ movq(temp, FieldOperand(input, HeapObject::kMapOffset));
+ __ movp(temp, FieldOperand(input, HeapObject::kMapOffset));
__ testb(FieldOperand(temp, Map::kBitFieldOffset),
Immediate(1 << Map::kIsUndetectable));
EmitBranch(instr, not_zero);
@@ -2401,7 +2504,7 @@ void LCodeGen::DoStringCompareAndBranch(LStringCompareAndBranch* instr) {
CallCode(ic, RelocInfo::CODE_TARGET, instr);
Condition condition = TokenToCondition(op, false);
- __ testq(rax, rax);
+ __ testp(rax, rax);
EmitBranch(instr, condition);
}
@@ -2430,7 +2533,7 @@ static Condition BranchCondition(HHasInstanceTypeAndBranch* instr) {
void LCodeGen::DoHasInstanceTypeAndBranch(LHasInstanceTypeAndBranch* instr) {
Register input = ToRegister(instr->value());
- if (!instr->hydrogen()->value()->IsHeapObject()) {
+ if (!instr->hydrogen()->value()->type().IsHeapObject()) {
__ JumpIfSmi(input, instr->FalseLabel(chunk_));
}
@@ -2492,17 +2595,17 @@ void LCodeGen::EmitClassOfTest(Label* is_true,
} else {
// Faster code path to avoid two compares: subtract lower bound from the
// actual type and do a signed compare with the width of the type range.
- __ movq(temp, FieldOperand(input, HeapObject::kMapOffset));
+ __ movp(temp, FieldOperand(input, HeapObject::kMapOffset));
__ movzxbl(temp2, FieldOperand(temp, Map::kInstanceTypeOffset));
- __ subq(temp2, Immediate(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
- __ cmpq(temp2, Immediate(LAST_NONCALLABLE_SPEC_OBJECT_TYPE -
+ __ subp(temp2, Immediate(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
+ __ cmpp(temp2, Immediate(LAST_NONCALLABLE_SPEC_OBJECT_TYPE -
FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
__ j(above, is_false);
}
// Now we are in the FIRST-LAST_NONCALLABLE_SPEC_OBJECT_TYPE range.
// Check if the constructor in the map is a function.
- __ movq(temp, FieldOperand(temp, Map::kConstructorOffset));
+ __ movp(temp, FieldOperand(temp, Map::kConstructorOffset));
// Objects with a non-function constructor have class 'Object'.
__ CmpObjectType(temp, JS_FUNCTION_TYPE, kScratchRegister);
@@ -2514,8 +2617,8 @@ void LCodeGen::EmitClassOfTest(Label* is_true,
// temp now contains the constructor function. Grab the
// instance class name from there.
- __ movq(temp, FieldOperand(temp, JSFunction::kSharedFunctionInfoOffset));
- __ movq(temp, FieldOperand(temp,
+ __ movp(temp, FieldOperand(temp, JSFunction::kSharedFunctionInfoOffset));
+ __ movp(temp, FieldOperand(temp,
SharedFunctionInfo::kInstanceClassNameOffset));
// The class name we are testing against is internalized since it's a literal.
// The name in the constructor is internalized because of the way the context
@@ -2552,12 +2655,12 @@ void LCodeGen::DoCmpMapAndBranch(LCmpMapAndBranch* instr) {
void LCodeGen::DoInstanceOf(LInstanceOf* instr) {
ASSERT(ToRegister(instr->context()).is(rsi));
- InstanceofStub stub(InstanceofStub::kNoFlags);
- __ push(ToRegister(instr->left()));
- __ push(ToRegister(instr->right()));
- CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
+ InstanceofStub stub(isolate(), InstanceofStub::kNoFlags);
+ __ Push(ToRegister(instr->left()));
+ __ Push(ToRegister(instr->right()));
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
Label true_value, done;
- __ testq(rax, rax);
+ __ testp(rax, rax);
__ j(zero, &true_value, Label::kNear);
__ LoadRoot(ToRegister(instr->result()), Heap::kFalseValueRootIndex);
__ jmp(&done, Label::kNear);
@@ -2599,11 +2702,11 @@ void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) {
Label cache_miss;
// Use a temp register to avoid memory operands with variable lengths.
Register map = ToRegister(instr->temp());
- __ movq(map, FieldOperand(object, HeapObject::kMapOffset));
+ __ movp(map, FieldOperand(object, HeapObject::kMapOffset));
__ bind(deferred->map_check()); // Label for calculating code patching.
Handle<Cell> cache_cell = factory()->NewCell(factory()->the_hole_value());
- __ movq(kScratchRegister, cache_cell, RelocInfo::CELL);
- __ cmpq(map, Operand(kScratchRegister, 0));
+ __ Move(kScratchRegister, cache_cell, RelocInfo::CELL);
+ __ cmpp(map, Operand(kScratchRegister, 0));
__ j(not_equal, &cache_miss, Label::kNear);
// Patched to load either true or false.
__ LoadRoot(ToRegister(instr->result()), Heap::kTheHoleValueRootIndex);
@@ -2638,22 +2741,22 @@ void LCodeGen::DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
PushSafepointRegistersScope scope(this);
InstanceofStub::Flags flags = static_cast<InstanceofStub::Flags>(
InstanceofStub::kNoFlags | InstanceofStub::kCallSiteInlineCheck);
- InstanceofStub stub(flags);
+ InstanceofStub stub(isolate(), flags);
- __ push(ToRegister(instr->value()));
+ __ Push(ToRegister(instr->value()));
__ Push(instr->function());
static const int kAdditionalDelta = 10;
int delta =
masm_->SizeOfCodeGeneratedSince(map_check) + kAdditionalDelta;
ASSERT(delta >= 0);
- __ push_imm32(delta);
+ __ PushImm32(delta);
// We are pushing three values on the stack but recording a
// safepoint with two arguments because stub is going to
// remove the third argument from the stack before jumping
// to instanceof builtin on the slow path.
- CallCodeGeneric(stub.GetCode(isolate()),
+ CallCodeGeneric(stub.GetCode(),
RelocInfo::CODE_TARGET,
instr,
RECORD_SAFEPOINT_WITH_REGISTERS,
@@ -2663,9 +2766,9 @@ void LCodeGen::DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
// Move result to a register that survives the end of the
// PushSafepointRegisterScope.
- __ movq(kScratchRegister, rax);
+ __ movp(kScratchRegister, rax);
}
- __ testq(kScratchRegister, kScratchRegister);
+ __ testp(kScratchRegister, kScratchRegister);
Label load_false;
Label done;
__ j(not_zero, &load_false, Label::kNear);
@@ -2686,7 +2789,7 @@ void LCodeGen::DoCmpT(LCmpT* instr) {
Condition condition = TokenToCondition(op, false);
Label true_value, done;
- __ testq(rax, rax);
+ __ testp(rax, rax);
__ j(condition, &true_value, Label::kNear);
__ LoadRoot(ToRegister(instr->result()), Heap::kFalseValueRootIndex);
__ jmp(&done, Label::kNear);
@@ -2702,8 +2805,8 @@ void LCodeGen::DoReturn(LReturn* instr) {
// to return the value in the same register. We're leaving the code
// managed by the register allocator and tearing down the frame, it's
// safe to write to the context register.
- __ push(rax);
- __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
+ __ Push(rax);
+ __ movp(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
__ CallRuntime(Runtime::kTraceExit, 1);
}
if (info()->saves_caller_doubles()) {
@@ -2711,8 +2814,8 @@ void LCodeGen::DoReturn(LReturn* instr) {
}
int no_frame_start = -1;
if (NeedsEagerFrame()) {
- __ movq(rsp, rbp);
- __ pop(rbp);
+ __ movp(rsp, rbp);
+ __ popq(rbp);
no_frame_start = masm_->pc_offset();
}
if (instr->has_constant_parameter_count()) {
@@ -2724,8 +2827,8 @@ void LCodeGen::DoReturn(LReturn* instr) {
__ SmiToInteger32(reg, reg);
Register return_addr_reg = reg.is(rcx) ? rbx : rcx;
__ PopReturnAddressTo(return_addr_reg);
- __ shl(reg, Immediate(kPointerSizeLog2));
- __ addq(rsp, reg);
+ __ shlp(reg, Immediate(kPointerSizeLog2));
+ __ addp(rsp, reg);
__ jmp(return_addr_reg);
}
if (no_frame_start != -1) {
@@ -2750,10 +2853,9 @@ void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) {
ASSERT(ToRegister(instr->result()).is(rax));
__ Move(rcx, instr->name());
- RelocInfo::Mode mode = instr->for_typeof() ? RelocInfo::CODE_TARGET :
- RelocInfo::CODE_TARGET_CONTEXT;
- Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
- CallCode(ic, mode, instr);
+ ContextualMode mode = instr->for_typeof() ? NOT_CONTEXTUAL : CONTEXTUAL;
+ Handle<Code> ic = LoadIC::initialize_stub(isolate(), mode);
+ CallCode(ic, RelocInfo::CODE_TARGET, instr);
}
@@ -2769,37 +2871,24 @@ void LCodeGen::DoStoreGlobalCell(LStoreGlobalCell* instr) {
// We have a temp because CompareRoot might clobber kScratchRegister.
Register cell = ToRegister(instr->temp());
ASSERT(!value.is(cell));
- __ movq(cell, cell_handle, RelocInfo::CELL);
+ __ Move(cell, cell_handle, RelocInfo::CELL);
__ CompareRoot(Operand(cell, 0), Heap::kTheHoleValueRootIndex);
DeoptimizeIf(equal, instr->environment());
// Store the value.
- __ movq(Operand(cell, 0), value);
+ __ movp(Operand(cell, 0), value);
} else {
// Store the value.
- __ movq(kScratchRegister, cell_handle, RelocInfo::CELL);
- __ movq(Operand(kScratchRegister, 0), value);
+ __ Move(kScratchRegister, cell_handle, RelocInfo::CELL);
+ __ movp(Operand(kScratchRegister, 0), value);
}
// Cells are always rescanned, so no write barrier here.
}
-void LCodeGen::DoStoreGlobalGeneric(LStoreGlobalGeneric* instr) {
- ASSERT(ToRegister(instr->context()).is(rsi));
- ASSERT(ToRegister(instr->global_object()).is(rdx));
- ASSERT(ToRegister(instr->value()).is(rax));
-
- __ Move(rcx, instr->name());
- Handle<Code> ic = (instr->strict_mode_flag() == kStrictMode)
- ? isolate()->builtins()->StoreIC_Initialize_Strict()
- : isolate()->builtins()->StoreIC_Initialize();
- CallCode(ic, RelocInfo::CODE_TARGET_CONTEXT, instr);
-}
-
-
void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) {
Register context = ToRegister(instr->context());
Register result = ToRegister(instr->result());
- __ movq(result, ContextOperand(context, instr->slot_index()));
+ __ movp(result, ContextOperand(context, instr->slot_index()));
if (instr->hydrogen()->RequiresHoleCheck()) {
__ CompareRoot(result, Heap::kTheHoleValueRootIndex);
if (instr->hydrogen()->DeoptimizesOnHole()) {
@@ -2829,11 +2918,11 @@ void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) {
__ j(not_equal, &skip_assignment);
}
}
- __ movq(target, value);
+ __ movp(target, value);
if (instr->hydrogen()->NeedsWriteBarrier()) {
SmiCheck check_needed =
- instr->hydrogen()->value()->IsHeapObject()
+ instr->hydrogen()->value()->type().IsHeapObject()
? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
int offset = Context::SlotOffset(instr->slot_index());
Register scratch = ToRegister(instr->temp());
@@ -2867,8 +2956,7 @@ void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) {
}
Register object = ToRegister(instr->object());
- if (FLAG_track_double_fields &&
- instr->hydrogen()->representation().IsDouble()) {
+ if (instr->hydrogen()->representation().IsDouble()) {
XMMRegister result = ToDoubleRegister(instr->result());
__ movsd(result, FieldOperand(object, offset));
return;
@@ -2876,10 +2964,26 @@ void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) {
Register result = ToRegister(instr->result());
if (!access.IsInobject()) {
- __ movq(result, FieldOperand(object, JSObject::kPropertiesOffset));
+ __ movp(result, FieldOperand(object, JSObject::kPropertiesOffset));
object = result;
}
- __ Load(result, FieldOperand(object, offset), access.representation());
+
+ Representation representation = access.representation();
+ if (representation.IsSmi() && SmiValuesAre32Bits() &&
+ instr->hydrogen()->representation().IsInteger32()) {
+ if (FLAG_debug_code) {
+ Register scratch = kScratchRegister;
+ __ Load(scratch, FieldOperand(object, offset), representation);
+ __ AssertSmi(scratch);
+ }
+
+ // Read int value directly from upper half of the smi.
+ STATIC_ASSERT(kSmiTag == 0);
+ ASSERT(kSmiTagSize + kSmiShiftSize == 32);
+ offset += kPointerSize / 2;
+ representation = Representation::Integer32();
+ }
+ __ Load(result, FieldOperand(object, offset), representation);
}
@@ -2889,7 +2993,7 @@ void LCodeGen::DoLoadNamedGeneric(LLoadNamedGeneric* instr) {
ASSERT(ToRegister(instr->result()).is(rax));
__ Move(rcx, instr->name());
- Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
+ Handle<Code> ic = LoadIC::initialize_stub(isolate(), NOT_CONTEXTUAL);
CallCode(ic, RelocInfo::CODE_TARGET, instr);
}
@@ -2909,7 +3013,7 @@ void LCodeGen::DoLoadFunctionPrototype(LLoadFunctionPrototype* instr) {
__ j(not_zero, &non_instance, Label::kNear);
// Get the prototype or initial map from the function.
- __ movq(result,
+ __ movp(result,
FieldOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
// Check that the function has a prototype or an initial map.
@@ -2922,13 +3026,13 @@ void LCodeGen::DoLoadFunctionPrototype(LLoadFunctionPrototype* instr) {
__ j(not_equal, &done, Label::kNear);
// Get the prototype from the initial map.
- __ movq(result, FieldOperand(result, Map::kPrototypeOffset));
+ __ movp(result, FieldOperand(result, Map::kPrototypeOffset));
__ jmp(&done, Label::kNear);
// Non-instance prototype: Fetch prototype from constructor field
// in the function's map.
__ bind(&non_instance);
- __ movq(result, FieldOperand(result, Map::kConstructorOffset));
+ __ movp(result, FieldOperand(result, Map::kConstructorOffset));
// All done.
__ bind(&done);
@@ -2941,15 +3045,6 @@ void LCodeGen::DoLoadRoot(LLoadRoot* instr) {
}
-void LCodeGen::DoLoadExternalArrayPointer(
- LLoadExternalArrayPointer* instr) {
- Register result = ToRegister(instr->result());
- Register input = ToRegister(instr->object());
- __ movq(result, FieldOperand(input,
- ExternalPixelArray::kExternalPointerOffset));
-}
-
-
void LCodeGen::DoAccessArgumentsAt(LAccessArgumentsAt* instr) {
Register arguments = ToRegister(instr->arguments());
Register result = ToRegister(instr->result());
@@ -2958,9 +3053,13 @@ void LCodeGen::DoAccessArgumentsAt(LAccessArgumentsAt* instr) {
instr->index()->IsConstantOperand()) {
int32_t const_index = ToInteger32(LConstantOperand::cast(instr->index()));
int32_t const_length = ToInteger32(LConstantOperand::cast(instr->length()));
- StackArgumentsAccessor args(arguments, const_length,
- ARGUMENTS_DONT_CONTAIN_RECEIVER);
- __ movq(result, args.GetArgumentOperand(const_index));
+ if (const_index >= 0 && const_index < const_length) {
+ StackArgumentsAccessor args(arguments, const_length,
+ ARGUMENTS_DONT_CONTAIN_RECEIVER);
+ __ movp(result, args.GetArgumentOperand(const_index));
+ } else if (FLAG_debug_code) {
+ __ int3();
+ }
} else {
Register length = ToRegister(instr->length());
// There are two words between the frame pointer and the last argument.
@@ -2972,7 +3071,7 @@ void LCodeGen::DoAccessArgumentsAt(LAccessArgumentsAt* instr) {
}
StackArgumentsAccessor args(arguments, length,
ARGUMENTS_DONT_CONTAIN_RECEIVER);
- __ movq(result, args.GetArgumentOperand(0));
+ __ movp(result, args.GetArgumentOperand(0));
}
}
@@ -2980,14 +3079,13 @@ void LCodeGen::DoAccessArgumentsAt(LAccessArgumentsAt* instr) {
void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) {
ElementsKind elements_kind = instr->elements_kind();
LOperand* key = instr->key();
- if (!key->IsConstantOperand()) {
+ if (kPointerSize == kInt32Size && !key->IsConstantOperand()) {
Register key_reg = ToRegister(key);
- // Even though the HLoad/StoreKeyed (in this case) instructions force
- // the input representation for the key to be an integer, the input
- // gets replaced during bound check elimination with the index argument
- // to the bounds check, which can be tagged, so that case must be
- // handled here, too.
- if (instr->hydrogen()->IsDehoisted()) {
+ Representation key_representation =
+ instr->hydrogen()->key()->representation();
+ if (ExternalArrayOpRequiresTemp(key_representation, elements_kind)) {
+ __ SmiToInteger64(key_reg, key_reg);
+ } else if (instr->hydrogen()->IsDehoisted()) {
// Sign extend key because it could be a 32 bit negative value
// and the dehoisted address computation happens in 64 bits
__ movsxlq(key_reg, key_reg);
@@ -2996,44 +3094,55 @@ void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) {
Operand operand(BuildFastArrayOperand(
instr->elements(),
key,
+ instr->hydrogen()->key()->representation(),
elements_kind,
- 0,
- instr->additional_index()));
+ instr->base_offset()));
- if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
+ if (elements_kind == EXTERNAL_FLOAT32_ELEMENTS ||
+ elements_kind == FLOAT32_ELEMENTS) {
XMMRegister result(ToDoubleRegister(instr->result()));
__ movss(result, operand);
__ cvtss2sd(result, result);
- } else if (elements_kind == EXTERNAL_DOUBLE_ELEMENTS) {
+ } else if (elements_kind == EXTERNAL_FLOAT64_ELEMENTS ||
+ elements_kind == FLOAT64_ELEMENTS) {
__ movsd(ToDoubleRegister(instr->result()), operand);
} else {
Register result(ToRegister(instr->result()));
switch (elements_kind) {
- case EXTERNAL_BYTE_ELEMENTS:
- __ movsxbq(result, operand);
+ case EXTERNAL_INT8_ELEMENTS:
+ case INT8_ELEMENTS:
+ __ movsxbl(result, operand);
break;
- case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
- case EXTERNAL_PIXEL_ELEMENTS:
- __ movzxbq(result, operand);
+ case EXTERNAL_UINT8_ELEMENTS:
+ case EXTERNAL_UINT8_CLAMPED_ELEMENTS:
+ case UINT8_ELEMENTS:
+ case UINT8_CLAMPED_ELEMENTS:
+ __ movzxbl(result, operand);
break;
- case EXTERNAL_SHORT_ELEMENTS:
- __ movsxwq(result, operand);
+ case EXTERNAL_INT16_ELEMENTS:
+ case INT16_ELEMENTS:
+ __ movsxwl(result, operand);
break;
- case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
- __ movzxwq(result, operand);
+ case EXTERNAL_UINT16_ELEMENTS:
+ case UINT16_ELEMENTS:
+ __ movzxwl(result, operand);
break;
- case EXTERNAL_INT_ELEMENTS:
- __ movsxlq(result, operand);
+ case EXTERNAL_INT32_ELEMENTS:
+ case INT32_ELEMENTS:
+ __ movl(result, operand);
break;
- case EXTERNAL_UNSIGNED_INT_ELEMENTS:
+ case EXTERNAL_UINT32_ELEMENTS:
+ case UINT32_ELEMENTS:
__ movl(result, operand);
if (!instr->hydrogen()->CheckFlag(HInstruction::kUint32)) {
__ testl(result, result);
DeoptimizeIf(negative, instr->environment());
}
break;
- case EXTERNAL_FLOAT_ELEMENTS:
- case EXTERNAL_DOUBLE_ELEMENTS:
+ case EXTERNAL_FLOAT32_ELEMENTS:
+ case EXTERNAL_FLOAT64_ELEMENTS:
+ case FLOAT32_ELEMENTS:
+ case FLOAT64_ELEMENTS:
case FAST_ELEMENTS:
case FAST_SMI_ELEMENTS:
case FAST_DOUBLE_ELEMENTS:
@@ -3041,7 +3150,7 @@ void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) {
case FAST_HOLEY_SMI_ELEMENTS:
case FAST_HOLEY_DOUBLE_ELEMENTS:
case DICTIONARY_ELEMENTS:
- case NON_STRICT_ARGUMENTS_ELEMENTS:
+ case SLOPPY_ARGUMENTS_ELEMENTS:
UNREACHABLE();
break;
}
@@ -3052,28 +3161,19 @@ void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) {
void LCodeGen::DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr) {
XMMRegister result(ToDoubleRegister(instr->result()));
LOperand* key = instr->key();
- if (!key->IsConstantOperand()) {
- Register key_reg = ToRegister(key);
- // Even though the HLoad/StoreKeyed instructions force the input
- // representation for the key to be an integer, the input gets replaced
- // during bound check elimination with the index argument to the bounds
- // check, which can be tagged, so that case must be handled here, too.
- if (instr->hydrogen()->IsDehoisted()) {
- // Sign extend key because it could be a 32 bit negative value
- // and the dehoisted address computation happens in 64 bits
- __ movsxlq(key_reg, key_reg);
- }
+ if (kPointerSize == kInt32Size && !key->IsConstantOperand() &&
+ instr->hydrogen()->IsDehoisted()) {
+ // Sign extend key because it could be a 32 bit negative value
+ // and the dehoisted address computation happens in 64 bits
+ __ movsxlq(ToRegister(key), ToRegister(key));
}
-
if (instr->hydrogen()->RequiresHoleCheck()) {
- int offset = FixedDoubleArray::kHeaderSize - kHeapObjectTag +
- sizeof(kHoleNanLower32);
Operand hole_check_operand = BuildFastArrayOperand(
instr->elements(),
key,
+ instr->hydrogen()->key()->representation(),
FAST_DOUBLE_ELEMENTS,
- offset,
- instr->additional_index());
+ instr->base_offset() + sizeof(kHoleNanLower32));
__ cmpl(hole_check_operand, Immediate(kHoleNanUpper32));
DeoptimizeIf(equal, instr->environment());
}
@@ -3081,41 +3181,58 @@ void LCodeGen::DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr) {
Operand double_load_operand = BuildFastArrayOperand(
instr->elements(),
key,
+ instr->hydrogen()->key()->representation(),
FAST_DOUBLE_ELEMENTS,
- FixedDoubleArray::kHeaderSize - kHeapObjectTag,
- instr->additional_index());
+ instr->base_offset());
__ movsd(result, double_load_operand);
}
void LCodeGen::DoLoadKeyedFixedArray(LLoadKeyed* instr) {
+ HLoadKeyed* hinstr = instr->hydrogen();
Register result = ToRegister(instr->result());
LOperand* key = instr->key();
- if (!key->IsConstantOperand()) {
- Register key_reg = ToRegister(key);
- // Even though the HLoad/StoreKeyedFastElement instructions force
- // the input representation for the key to be an integer, the input
- // gets replaced during bound check elimination with the index
- // argument to the bounds check, which can be tagged, so that
- // case must be handled here, too.
- if (instr->hydrogen()->IsDehoisted()) {
- // Sign extend key because it could be a 32 bit negative value
- // and the dehoisted address computation happens in 64 bits
- __ movsxlq(key_reg, key_reg);
+ bool requires_hole_check = hinstr->RequiresHoleCheck();
+ Representation representation = hinstr->representation();
+ int offset = instr->base_offset();
+
+ if (kPointerSize == kInt32Size && !key->IsConstantOperand() &&
+ instr->hydrogen()->IsDehoisted()) {
+ // Sign extend key because it could be a 32 bit negative value
+ // and the dehoisted address computation happens in 64 bits
+ __ movsxlq(ToRegister(key), ToRegister(key));
+ }
+ if (representation.IsInteger32() && SmiValuesAre32Bits() &&
+ hinstr->elements_kind() == FAST_SMI_ELEMENTS) {
+ ASSERT(!requires_hole_check);
+ if (FLAG_debug_code) {
+ Register scratch = kScratchRegister;
+ __ Load(scratch,
+ BuildFastArrayOperand(instr->elements(),
+ key,
+ instr->hydrogen()->key()->representation(),
+ FAST_ELEMENTS,
+ offset),
+ Representation::Smi());
+ __ AssertSmi(scratch);
}
+ // Read int value directly from upper half of the smi.
+ STATIC_ASSERT(kSmiTag == 0);
+ ASSERT(kSmiTagSize + kSmiShiftSize == 32);
+ offset += kPointerSize / 2;
}
- // Load the result.
- __ movq(result,
+ __ Load(result,
BuildFastArrayOperand(instr->elements(),
key,
+ instr->hydrogen()->key()->representation(),
FAST_ELEMENTS,
- FixedArray::kHeaderSize - kHeapObjectTag,
- instr->additional_index()));
+ offset),
+ representation);
// Check for the hole value.
- if (instr->hydrogen()->RequiresHoleCheck()) {
- if (IsFastSmiElementsKind(instr->hydrogen()->elements_kind())) {
+ if (requires_hole_check) {
+ if (IsFastSmiElementsKind(hinstr->elements_kind())) {
Condition smi = __ CheckSmi(result);
DeoptimizeIf(NegateCondition(smi), instr->environment());
} else {
@@ -3127,7 +3244,7 @@ void LCodeGen::DoLoadKeyedFixedArray(LLoadKeyed* instr) {
void LCodeGen::DoLoadKeyed(LLoadKeyed* instr) {
- if (instr->is_external()) {
+ if (instr->is_typed_elements()) {
DoLoadKeyedExternalArray(instr);
} else if (instr->hydrogen()->representation().IsDouble()) {
DoLoadKeyedFixedDoubleArray(instr);
@@ -3140,9 +3257,9 @@ void LCodeGen::DoLoadKeyed(LLoadKeyed* instr) {
Operand LCodeGen::BuildFastArrayOperand(
LOperand* elements_pointer,
LOperand* key,
+ Representation key_representation,
ElementsKind elements_kind,
- uint32_t offset,
- uint32_t additional_index) {
+ uint32_t offset) {
Register elements_pointer_reg = ToRegister(elements_pointer);
int shift_size = ElementsKindToShiftSize(elements_kind);
if (key->IsConstantOperand()) {
@@ -3151,14 +3268,18 @@ Operand LCodeGen::BuildFastArrayOperand(
Abort(kArrayIndexConstantValueTooBig);
}
return Operand(elements_pointer_reg,
- ((constant_value + additional_index) << shift_size)
- + offset);
+ (constant_value << shift_size) + offset);
} else {
+ // Take the tag bit into account while computing the shift size.
+ if (key_representation.IsSmi() && (shift_size >= 1)) {
+ ASSERT(SmiValuesAre31Bits());
+ shift_size -= kSmiTagSize;
+ }
ScaleFactor scale_factor = static_cast<ScaleFactor>(shift_size);
return Operand(elements_pointer_reg,
ToRegister(key),
scale_factor,
- offset + (additional_index << shift_size));
+ offset);
}
}
@@ -3177,22 +3298,22 @@ void LCodeGen::DoArgumentsElements(LArgumentsElements* instr) {
Register result = ToRegister(instr->result());
if (instr->hydrogen()->from_inlined()) {
- __ lea(result, Operand(rsp, -kFPOnStackSize + -kPCOnStackSize));
+ __ leap(result, Operand(rsp, -kFPOnStackSize + -kPCOnStackSize));
} else {
// Check for arguments adapter frame.
Label done, adapted;
- __ movq(result, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
+ __ movp(result, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
__ Cmp(Operand(result, StandardFrameConstants::kContextOffset),
Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
__ j(equal, &adapted, Label::kNear);
// No arguments adaptor frame.
- __ movq(result, rbp);
+ __ movp(result, rbp);
__ jmp(&done, Label::kNear);
// Arguments adaptor frame present.
__ bind(&adapted);
- __ movq(result, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
+ __ movp(result, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
// Result is the frame pointer for the frame if not adapted and for the real
// frame below the adaptor frame if adapted.
@@ -3208,15 +3329,15 @@ void LCodeGen::DoArgumentsLength(LArgumentsLength* instr) {
// If no arguments adaptor frame the number of arguments is fixed.
if (instr->elements()->IsRegister()) {
- __ cmpq(rbp, ToRegister(instr->elements()));
+ __ cmpp(rbp, ToRegister(instr->elements()));
} else {
- __ cmpq(rbp, ToOperand(instr->elements()));
+ __ cmpp(rbp, ToOperand(instr->elements()));
}
__ movl(result, Immediate(scope()->num_parameters()));
__ j(equal, &done, Label::kNear);
// Arguments adaptor frame present. Get argument length from there.
- __ movq(result, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
+ __ movp(result, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
__ SmiToInteger32(result,
Operand(result,
ArgumentsAdaptorFrameConstants::kLengthOffset));
@@ -3236,20 +3357,22 @@ void LCodeGen::DoWrapReceiver(LWrapReceiver* instr) {
Label global_object, receiver_ok;
Label::Distance dist = DeoptEveryNTimes() ? Label::kFar : Label::kNear;
- // Do not transform the receiver to object for strict mode
- // functions.
- __ movq(kScratchRegister,
- FieldOperand(function, JSFunction::kSharedFunctionInfoOffset));
- __ testb(FieldOperand(kScratchRegister,
- SharedFunctionInfo::kStrictModeByteOffset),
- Immediate(1 << SharedFunctionInfo::kStrictModeBitWithinByte));
- __ j(not_equal, &receiver_ok, dist);
-
- // Do not transform the receiver to object for builtins.
- __ testb(FieldOperand(kScratchRegister,
- SharedFunctionInfo::kNativeByteOffset),
- Immediate(1 << SharedFunctionInfo::kNativeBitWithinByte));
- __ j(not_equal, &receiver_ok, dist);
+ if (!instr->hydrogen()->known_function()) {
+ // Do not transform the receiver to object for strict mode
+ // functions.
+ __ movp(kScratchRegister,
+ FieldOperand(function, JSFunction::kSharedFunctionInfoOffset));
+ __ testb(FieldOperand(kScratchRegister,
+ SharedFunctionInfo::kStrictModeByteOffset),
+ Immediate(1 << SharedFunctionInfo::kStrictModeBitWithinByte));
+ __ j(not_equal, &receiver_ok, dist);
+
+ // Do not transform the receiver to object for builtins.
+ __ testb(FieldOperand(kScratchRegister,
+ SharedFunctionInfo::kNativeByteOffset),
+ Immediate(1 << SharedFunctionInfo::kNativeBitWithinByte));
+ __ j(not_equal, &receiver_ok, dist);
+ }
// Normal function. Replace undefined or null with global receiver.
__ CompareRoot(receiver, Heap::kNullValueRootIndex);
@@ -3262,16 +3385,16 @@ void LCodeGen::DoWrapReceiver(LWrapReceiver* instr) {
DeoptimizeIf(is_smi, instr->environment());
__ CmpObjectType(receiver, FIRST_SPEC_OBJECT_TYPE, kScratchRegister);
DeoptimizeIf(below, instr->environment());
- __ jmp(&receiver_ok, Label::kNear);
+ __ jmp(&receiver_ok, Label::kNear);
__ bind(&global_object);
- // TODO(kmillikin): We have a hydrogen value for the global object. See
- // if it's better to use it than to explicitly fetch it from the context
- // here.
- __ movq(receiver, Operand(rbp, StandardFrameConstants::kContextOffset));
- __ movq(receiver, ContextOperand(receiver, Context::GLOBAL_OBJECT_INDEX));
- __ movq(receiver,
- FieldOperand(receiver, JSGlobalObject::kGlobalReceiverOffset));
+ __ movp(receiver, FieldOperand(function, JSFunction::kContextOffset));
+ __ movp(receiver,
+ Operand(receiver,
+ Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
+ __ movp(receiver,
+ FieldOperand(receiver, GlobalObject::kGlobalReceiverOffset));
+
__ bind(&receiver_ok);
}
@@ -3288,11 +3411,11 @@ void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
// Copy the arguments to this function possibly from the
// adaptor frame below it.
const uint32_t kArgumentsLimit = 1 * KB;
- __ cmpq(length, Immediate(kArgumentsLimit));
+ __ cmpp(length, Immediate(kArgumentsLimit));
DeoptimizeIf(above, instr->environment());
- __ push(receiver);
- __ movq(receiver, length);
+ __ Push(receiver);
+ __ movp(receiver, length);
// Loop through the arguments pushing them onto the execution
// stack.
@@ -3303,7 +3426,7 @@ void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
__ bind(&loop);
StackArgumentsAccessor args(elements, length,
ARGUMENTS_DONT_CONTAIN_RECEIVER);
- __ push(args.GetArgumentOperand(0));
+ __ Push(args.GetArgumentOperand(0));
__ decl(length);
__ j(not_zero, &loop);
@@ -3314,8 +3437,7 @@ void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
SafepointGenerator safepoint_generator(
this, pointers, Safepoint::kLazyDeopt);
ParameterCount actual(rax);
- __ InvokeFunction(function, actual, CALL_FUNCTION,
- safepoint_generator, CALL_AS_METHOD);
+ __ InvokeFunction(function, actual, CALL_FUNCTION, safepoint_generator);
}
@@ -3332,14 +3454,14 @@ void LCodeGen::DoDrop(LDrop* instr) {
void LCodeGen::DoThisFunction(LThisFunction* instr) {
Register result = ToRegister(instr->result());
- __ movq(result, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
+ __ movp(result, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
}
void LCodeGen::DoContext(LContext* instr) {
Register result = ToRegister(instr->result());
if (info()->IsOptimizing()) {
- __ movq(result, Operand(rbp, StandardFrameConstants::kContextOffset));
+ __ movp(result, Operand(rbp, StandardFrameConstants::kContextOffset));
} else {
// If there is no frame, the context must be in rsi.
ASSERT(result.is(rsi));
@@ -3347,35 +3469,12 @@ void LCodeGen::DoContext(LContext* instr) {
}
-void LCodeGen::DoOuterContext(LOuterContext* instr) {
- Register context = ToRegister(instr->context());
- Register result = ToRegister(instr->result());
- __ movq(result,
- Operand(context, Context::SlotOffset(Context::PREVIOUS_INDEX)));
-}
-
-
void LCodeGen::DoDeclareGlobals(LDeclareGlobals* instr) {
ASSERT(ToRegister(instr->context()).is(rsi));
- __ push(rsi); // The context is the first argument.
+ __ Push(rsi); // The context is the first argument.
__ Push(instr->hydrogen()->pairs());
__ Push(Smi::FromInt(instr->hydrogen()->flags()));
- CallRuntime(Runtime::kDeclareGlobals, 3, instr);
-}
-
-
-void LCodeGen::DoGlobalObject(LGlobalObject* instr) {
- Register context = ToRegister(instr->context());
- Register result = ToRegister(instr->result());
- __ movq(result,
- Operand(context, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
-}
-
-
-void LCodeGen::DoGlobalReceiver(LGlobalReceiver* instr) {
- Register global = ToRegister(instr->global());
- Register result = ToRegister(instr->result());
- __ movq(result, FieldOperand(global, GlobalObject::kGlobalReceiverOffset));
+ CallRuntime(Runtime::kHiddenDeclareGlobals, 3, instr);
}
@@ -3383,7 +3482,6 @@ void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
int formal_parameter_count,
int arity,
LInstruction* instr,
- CallKind call_kind,
RDIState rdi_state) {
bool dont_adapt_arguments =
formal_parameter_count == SharedFunctionInfo::kDontAdaptArgumentsSentinel;
@@ -3398,7 +3496,7 @@ void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
}
// Change context.
- __ movq(rsi, FieldOperand(rdi, JSFunction::kContextOffset));
+ __ movp(rsi, FieldOperand(rdi, JSFunction::kContextOffset));
// Set rax to arguments count if adaption is not needed. Assumes that rax
// is available to write to at this point.
@@ -3407,11 +3505,10 @@ void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
}
// Invoke function.
- __ SetCallKind(rcx, call_kind);
if (function.is_identical_to(info()->closure())) {
__ CallSelf();
} else {
- __ call(FieldOperand(rdi, JSFunction::kCodeEntryOffset));
+ __ Call(FieldOperand(rdi, JSFunction::kCodeEntryOffset));
}
// Set up deoptimization.
@@ -3422,20 +3519,63 @@ void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
this, pointers, Safepoint::kLazyDeopt);
ParameterCount count(arity);
ParameterCount expected(formal_parameter_count);
- __ InvokeFunction(
- function, expected, count, CALL_FUNCTION, generator, call_kind);
+ __ InvokeFunction(function, expected, count, CALL_FUNCTION, generator);
}
}
-void LCodeGen::DoCallConstantFunction(LCallConstantFunction* instr) {
+void LCodeGen::DoCallWithDescriptor(LCallWithDescriptor* instr) {
ASSERT(ToRegister(instr->result()).is(rax));
- CallKnownFunction(instr->hydrogen()->function(),
- instr->hydrogen()->formal_parameter_count(),
- instr->arity(),
- instr,
- CALL_AS_METHOD,
- RDI_UNINITIALIZED);
+
+ LPointerMap* pointers = instr->pointer_map();
+ SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
+
+ if (instr->target()->IsConstantOperand()) {
+ LConstantOperand* target = LConstantOperand::cast(instr->target());
+ Handle<Code> code = Handle<Code>::cast(ToHandle(target));
+ generator.BeforeCall(__ CallSize(code));
+ __ call(code, RelocInfo::CODE_TARGET);
+ } else {
+ ASSERT(instr->target()->IsRegister());
+ Register target = ToRegister(instr->target());
+ generator.BeforeCall(__ CallSize(target));
+ __ addp(target, Immediate(Code::kHeaderSize - kHeapObjectTag));
+ __ call(target);
+ }
+ generator.AfterCall();
+}
+
+
+void LCodeGen::DoCallJSFunction(LCallJSFunction* instr) {
+ ASSERT(ToRegister(instr->function()).is(rdi));
+ ASSERT(ToRegister(instr->result()).is(rax));
+
+ if (instr->hydrogen()->pass_argument_count()) {
+ __ Set(rax, instr->arity());
+ }
+
+ // Change context.
+ __ movp(rsi, FieldOperand(rdi, JSFunction::kContextOffset));
+
+ LPointerMap* pointers = instr->pointer_map();
+ SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
+
+ bool is_self_call = false;
+ if (instr->hydrogen()->function()->IsConstant()) {
+ Handle<JSFunction> jsfun = Handle<JSFunction>::null();
+ HConstant* fun_const = HConstant::cast(instr->hydrogen()->function());
+ jsfun = Handle<JSFunction>::cast(fun_const->handle(isolate()));
+ is_self_call = jsfun.is_identical_to(info()->closure());
+ }
+
+ if (is_self_call) {
+ __ CallSelf();
+ } else {
+ Operand target = FieldOperand(rdi, JSFunction::kCodeEntryOffset);
+ generator.BeforeCall(__ CallSize(target));
+ __ Call(target);
+ }
+ generator.AfterCall();
}
@@ -3466,17 +3606,17 @@ void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LMathAbs* instr) {
// Slow case: Call the runtime system to do the number allocation.
__ bind(&slow);
CallRuntimeFromDeferred(
- Runtime::kAllocateHeapNumber, 0, instr, instr->context());
+ Runtime::kHiddenAllocateHeapNumber, 0, instr, instr->context());
// Set the pointer to the new heap number in tmp.
- if (!tmp.is(rax)) __ movq(tmp, rax);
+ if (!tmp.is(rax)) __ movp(tmp, rax);
// Restore input_reg after call to runtime.
__ LoadFromSafepointRegisterSlot(input_reg, input_reg);
__ bind(&allocated);
- __ MoveDouble(tmp2, FieldOperand(input_reg, HeapNumber::kValueOffset));
- __ shl(tmp2, Immediate(1));
- __ shr(tmp2, Immediate(1));
- __ MoveDouble(FieldOperand(tmp, HeapNumber::kValueOffset), tmp2);
+ __ movq(tmp2, FieldOperand(input_reg, HeapNumber::kValueOffset));
+ __ shlq(tmp2, Immediate(1));
+ __ shrq(tmp2, Immediate(1));
+ __ movq(FieldOperand(tmp, HeapNumber::kValueOffset), tmp2);
__ StoreToSafepointRegisterSlot(input_reg, tmp);
__ bind(&done);
@@ -3496,10 +3636,10 @@ void LCodeGen::EmitIntegerMathAbs(LMathAbs* instr) {
void LCodeGen::EmitSmiMathAbs(LMathAbs* instr) {
Register input_reg = ToRegister(instr->value());
- __ testq(input_reg, input_reg);
+ __ testp(input_reg, input_reg);
Label is_positive;
__ j(not_sign, &is_positive, Label::kNear);
- __ neg(input_reg); // Sets flags.
+ __ negp(input_reg); // Sets flags.
DeoptimizeIf(negative, instr->environment());
__ bind(&is_positive);
}
@@ -3559,8 +3699,8 @@ void LCodeGen::DoMathFloor(LMathFloor* instr) {
}
__ roundsd(xmm_scratch, input_reg, Assembler::kRoundDown);
__ cvttsd2si(output_reg, xmm_scratch);
- __ cmpl(output_reg, Immediate(0x80000000));
- DeoptimizeIf(equal, instr->environment());
+ __ cmpl(output_reg, Immediate(0x1));
+ DeoptimizeIf(overflow, instr->environment());
} else {
Label negative_sign, done;
// Deoptimize on unordered.
@@ -3577,15 +3717,15 @@ void LCodeGen::DoMathFloor(LMathFloor* instr) {
__ testq(output_reg, Immediate(1));
DeoptimizeIf(not_zero, instr->environment());
__ Set(output_reg, 0);
- __ jmp(&done, Label::kNear);
+ __ jmp(&done);
__ bind(&positive_sign);
}
// Use truncating instruction (OK because input is positive).
__ cvttsd2si(output_reg, input_reg);
// Overflow is signalled with minint.
- __ cmpl(output_reg, Immediate(0x80000000));
- DeoptimizeIf(equal, instr->environment());
+ __ cmpl(output_reg, Immediate(0x1));
+ DeoptimizeIf(overflow, instr->environment());
__ jmp(&done, Label::kNear);
// Non-zero negative reaches here.
@@ -3622,9 +3762,9 @@ void LCodeGen::DoMathRound(LMathRound* instr) {
__ addsd(xmm_scratch, input_reg);
__ cvttsd2si(output_reg, xmm_scratch);
// Overflow is signalled with minint.
- __ cmpl(output_reg, Immediate(0x80000000));
+ __ cmpl(output_reg, Immediate(0x1));
__ RecordComment("D2I conversion overflow");
- DeoptimizeIf(equal, instr->environment());
+ DeoptimizeIf(overflow, instr->environment());
__ jmp(&done, dist);
__ bind(&below_one_half);
@@ -3639,9 +3779,9 @@ void LCodeGen::DoMathRound(LMathRound* instr) {
__ subsd(input_temp, xmm_scratch);
__ cvttsd2si(output_reg, input_temp);
// Catch minint due to overflow, and to prevent overflow when compensating.
- __ cmpl(output_reg, Immediate(0x80000000));
+ __ cmpl(output_reg, Immediate(0x1));
__ RecordComment("D2I conversion overflow");
- DeoptimizeIf(equal, instr->environment());
+ DeoptimizeIf(overflow, instr->environment());
__ Cvtlsi2sd(xmm_scratch, output_reg);
__ ucomisd(xmm_scratch, input_temp);
@@ -3665,9 +3805,14 @@ void LCodeGen::DoMathRound(LMathRound* instr) {
void LCodeGen::DoMathSqrt(LMathSqrt* instr) {
- XMMRegister input_reg = ToDoubleRegister(instr->value());
- ASSERT(ToDoubleRegister(instr->result()).is(input_reg));
- __ sqrtsd(input_reg, input_reg);
+ XMMRegister output = ToDoubleRegister(instr->result());
+ if (instr->value()->IsDoubleRegister()) {
+ XMMRegister input = ToDoubleRegister(instr->value());
+ __ sqrtsd(output, input);
+ } else {
+ Operand input = ToOperand(instr->value());
+ __ sqrtsd(output, input);
+ }
}
@@ -3717,7 +3862,7 @@ void LCodeGen::DoPower(LPower* instr) {
ASSERT(ToDoubleRegister(instr->result()).is(xmm3));
if (exponent_type.IsSmi()) {
- MathPowStub stub(MathPowStub::TAGGED);
+ MathPowStub stub(isolate(), MathPowStub::TAGGED);
__ CallStub(&stub);
} else if (exponent_type.IsTagged()) {
Label no_deopt;
@@ -3725,14 +3870,14 @@ void LCodeGen::DoPower(LPower* instr) {
__ CmpObjectType(exponent, HEAP_NUMBER_TYPE, rcx);
DeoptimizeIf(not_equal, instr->environment());
__ bind(&no_deopt);
- MathPowStub stub(MathPowStub::TAGGED);
+ MathPowStub stub(isolate(), MathPowStub::TAGGED);
__ CallStub(&stub);
} else if (exponent_type.IsInteger32()) {
- MathPowStub stub(MathPowStub::INTEGER);
+ MathPowStub stub(isolate(), MathPowStub::INTEGER);
__ CallStub(&stub);
} else {
ASSERT(exponent_type.IsDouble());
- MathPowStub stub(MathPowStub::DOUBLE);
+ MathPowStub stub(isolate(), MathPowStub::DOUBLE);
__ CallStub(&stub);
}
}
@@ -3757,7 +3902,7 @@ void LCodeGen::DoMathLog(LMathLog* instr) {
__ xorps(xmm_scratch, xmm_scratch);
__ ucomisd(input_reg, xmm_scratch);
__ j(above, &positive, Label::kNear);
- __ j(equal, &zero, Label::kNear);
+ __ j(not_carry, &zero, Label::kNear);
ExternalReference nan =
ExternalReference::address_of_canonical_non_hole_nan();
Operand nan_operand = masm()->ExternalOperand(nan);
@@ -3771,47 +3916,28 @@ void LCodeGen::DoMathLog(LMathLog* instr) {
__ jmp(&done, Label::kNear);
__ bind(&positive);
__ fldln2();
- __ subq(rsp, Immediate(kDoubleSize));
+ __ subp(rsp, Immediate(kDoubleSize));
__ movsd(Operand(rsp, 0), input_reg);
__ fld_d(Operand(rsp, 0));
__ fyl2x();
__ fstp_d(Operand(rsp, 0));
__ movsd(input_reg, Operand(rsp, 0));
- __ addq(rsp, Immediate(kDoubleSize));
+ __ addp(rsp, Immediate(kDoubleSize));
__ bind(&done);
}
-void LCodeGen::DoMathTan(LMathTan* instr) {
- ASSERT(ToDoubleRegister(instr->result()).is(xmm1));
- // Set the context register to a GC-safe fake value. Clobbering it is
- // OK because this instruction is marked as a call.
- __ Set(rsi, 0);
- TranscendentalCacheStub stub(TranscendentalCache::TAN,
- TranscendentalCacheStub::UNTAGGED);
- CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
-}
-
-
-void LCodeGen::DoMathCos(LMathCos* instr) {
- ASSERT(ToDoubleRegister(instr->result()).is(xmm1));
- // Set the context register to a GC-safe fake value. Clobbering it is
- // OK because this instruction is marked as a call.
- __ Set(rsi, 0);
- TranscendentalCacheStub stub(TranscendentalCache::COS,
- TranscendentalCacheStub::UNTAGGED);
- CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
-}
+void LCodeGen::DoMathClz32(LMathClz32* instr) {
+ Register input = ToRegister(instr->value());
+ Register result = ToRegister(instr->result());
+ Label not_zero_input;
+ __ bsrl(result, input);
+ __ j(not_zero, &not_zero_input);
+ __ Set(result, 63); // 63^31 == 32
-void LCodeGen::DoMathSin(LMathSin* instr) {
- ASSERT(ToDoubleRegister(instr->result()).is(xmm1));
- // Set the context register to a GC-safe fake value. Clobbering it is
- // OK because this instruction is marked as a call.
- __ Set(rsi, 0);
- TranscendentalCacheStub stub(TranscendentalCache::SIN,
- TranscendentalCacheStub::UNTAGGED);
- CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
+ __ bind(&not_zero_input);
+ __ xorl(result, Immediate(31)); // for x in [0..31], 31^x == 31-x.
}
@@ -3825,79 +3951,25 @@ void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) {
LPointerMap* pointers = instr->pointer_map();
SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
ParameterCount count(instr->arity());
- __ InvokeFunction(rdi, count, CALL_FUNCTION, generator, CALL_AS_METHOD);
+ __ InvokeFunction(rdi, count, CALL_FUNCTION, generator);
} else {
CallKnownFunction(known_function,
instr->hydrogen()->formal_parameter_count(),
instr->arity(),
instr,
- CALL_AS_METHOD,
RDI_CONTAINS_TARGET);
}
}
-void LCodeGen::DoCallKeyed(LCallKeyed* instr) {
- ASSERT(ToRegister(instr->context()).is(rsi));
- ASSERT(ToRegister(instr->key()).is(rcx));
- ASSERT(ToRegister(instr->result()).is(rax));
-
- int arity = instr->arity();
- Handle<Code> ic =
- isolate()->stub_cache()->ComputeKeyedCallInitialize(arity);
- CallCode(ic, RelocInfo::CODE_TARGET, instr);
-}
-
-
-void LCodeGen::DoCallNamed(LCallNamed* instr) {
- ASSERT(ToRegister(instr->context()).is(rsi));
- ASSERT(ToRegister(instr->result()).is(rax));
-
- int arity = instr->arity();
- RelocInfo::Mode mode = RelocInfo::CODE_TARGET;
- Handle<Code> ic =
- isolate()->stub_cache()->ComputeCallInitialize(arity, mode);
- __ Move(rcx, instr->name());
- CallCode(ic, mode, instr);
-}
-
-
void LCodeGen::DoCallFunction(LCallFunction* instr) {
ASSERT(ToRegister(instr->context()).is(rsi));
ASSERT(ToRegister(instr->function()).is(rdi));
ASSERT(ToRegister(instr->result()).is(rax));
int arity = instr->arity();
- CallFunctionStub stub(arity, NO_CALL_FUNCTION_FLAGS);
- if (instr->hydrogen()->IsTailCall()) {
- if (NeedsEagerFrame()) __ leave();
- __ jmp(stub.GetCode(isolate()), RelocInfo::CODE_TARGET);
- } else {
- CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
- }
-}
-
-
-void LCodeGen::DoCallGlobal(LCallGlobal* instr) {
- ASSERT(ToRegister(instr->context()).is(rsi));
- ASSERT(ToRegister(instr->result()).is(rax));
- int arity = instr->arity();
- RelocInfo::Mode mode = RelocInfo::CODE_TARGET_CONTEXT;
- Handle<Code> ic =
- isolate()->stub_cache()->ComputeCallInitialize(arity, mode);
- __ Move(rcx, instr->name());
- CallCode(ic, mode, instr);
-}
-
-
-void LCodeGen::DoCallKnownGlobal(LCallKnownGlobal* instr) {
- ASSERT(ToRegister(instr->result()).is(rax));
- CallKnownFunction(instr->hydrogen()->target(),
- instr->hydrogen()->formal_parameter_count(),
- instr->arity(),
- instr,
- CALL_AS_FUNCTION,
- RDI_UNINITIALIZED);
+ CallFunctionStub stub(isolate(), arity, instr->hydrogen()->function_flags());
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
}
@@ -3908,10 +3980,9 @@ void LCodeGen::DoCallNew(LCallNew* instr) {
__ Set(rax, instr->arity());
// No cell in ebx for construct type feedback in optimized code
- Handle<Object> undefined_value(isolate()->factory()->undefined_value());
- __ Move(rbx, undefined_value);
- CallConstructStub stub(NO_CALL_FUNCTION_FLAGS);
- CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr);
+ __ LoadRoot(rbx, Heap::kUndefinedValueRootIndex);
+ CallConstructStub stub(isolate(), NO_CALL_CONSTRUCTOR_FLAGS);
+ CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
}
@@ -3921,41 +3992,41 @@ void LCodeGen::DoCallNewArray(LCallNewArray* instr) {
ASSERT(ToRegister(instr->result()).is(rax));
__ Set(rax, instr->arity());
- __ Move(rbx, instr->hydrogen()->property_cell());
+ __ LoadRoot(rbx, Heap::kUndefinedValueRootIndex);
ElementsKind kind = instr->hydrogen()->elements_kind();
AllocationSiteOverrideMode override_mode =
(AllocationSite::GetMode(kind) == TRACK_ALLOCATION_SITE)
? DISABLE_ALLOCATION_SITES
: DONT_OVERRIDE;
- ContextCheckMode context_mode = CONTEXT_CHECK_NOT_REQUIRED;
if (instr->arity() == 0) {
- ArrayNoArgumentConstructorStub stub(kind, context_mode, override_mode);
- CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr);
+ ArrayNoArgumentConstructorStub stub(isolate(), kind, override_mode);
+ CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
} else if (instr->arity() == 1) {
Label done;
if (IsFastPackedElementsKind(kind)) {
Label packed_case;
// We might need a change here
// look at the first argument
- __ movq(rcx, Operand(rsp, 0));
- __ testq(rcx, rcx);
+ __ movp(rcx, Operand(rsp, 0));
+ __ testp(rcx, rcx);
__ j(zero, &packed_case, Label::kNear);
ElementsKind holey_kind = GetHoleyElementsKind(kind);
- ArraySingleArgumentConstructorStub stub(holey_kind, context_mode,
+ ArraySingleArgumentConstructorStub stub(isolate(),
+ holey_kind,
override_mode);
- CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr);
+ CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
__ jmp(&done, Label::kNear);
__ bind(&packed_case);
}
- ArraySingleArgumentConstructorStub stub(kind, context_mode, override_mode);
- CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr);
+ ArraySingleArgumentConstructorStub stub(isolate(), kind, override_mode);
+ CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
__ bind(&done);
} else {
- ArrayNArgumentsConstructorStub stub(kind, context_mode, override_mode);
- CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr);
+ ArrayNArgumentsConstructorStub stub(isolate(), kind, override_mode);
+ CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
}
}
@@ -3969,8 +4040,8 @@ void LCodeGen::DoCallRuntime(LCallRuntime* instr) {
void LCodeGen::DoStoreCodeEntry(LStoreCodeEntry* instr) {
Register function = ToRegister(instr->function());
Register code_object = ToRegister(instr->code_object());
- __ lea(code_object, FieldOperand(code_object, Code::kHeaderSize));
- __ movq(FieldOperand(function, JSFunction::kCodeEntryOffset), code_object);
+ __ leap(code_object, FieldOperand(code_object, Code::kHeaderSize));
+ __ movp(FieldOperand(function, JSFunction::kCodeEntryOffset), code_object);
}
@@ -3979,26 +4050,26 @@ void LCodeGen::DoInnerAllocatedObject(LInnerAllocatedObject* instr) {
Register base = ToRegister(instr->base_object());
if (instr->offset()->IsConstantOperand()) {
LConstantOperand* offset = LConstantOperand::cast(instr->offset());
- __ lea(result, Operand(base, ToInteger32(offset)));
+ __ leap(result, Operand(base, ToInteger32(offset)));
} else {
Register offset = ToRegister(instr->offset());
- __ lea(result, Operand(base, offset, times_1, 0));
+ __ leap(result, Operand(base, offset, times_1, 0));
}
}
void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
+ HStoreNamedField* hinstr = instr->hydrogen();
Representation representation = instr->representation();
- HObjectAccess access = instr->hydrogen()->access();
+ HObjectAccess access = hinstr->access();
int offset = access.offset();
if (access.IsExternalMemory()) {
- ASSERT(!instr->hydrogen()->NeedsWriteBarrier());
+ ASSERT(!hinstr->NeedsWriteBarrier());
Register value = ToRegister(instr->value());
if (instr->object()->IsConstantOperand()) {
ASSERT(value.is(rax));
- ASSERT(!access.representation().IsSpecialization());
LConstantOperand* object = LConstantOperand::cast(instr->object());
__ store_rax(ToExternalReference(object));
} else {
@@ -4009,86 +4080,84 @@ void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
}
Register object = ToRegister(instr->object());
- Handle<Map> transition = instr->transition();
+ __ AssertNotSmi(object);
- if (FLAG_track_fields && representation.IsSmi()) {
- if (instr->value()->IsConstantOperand()) {
- LConstantOperand* operand_value = LConstantOperand::cast(instr->value());
- if (!IsSmiConstant(operand_value)) {
- DeoptimizeIf(no_condition, instr->environment());
- }
- }
- } else if (FLAG_track_heap_object_fields && representation.IsHeapObject()) {
- if (instr->value()->IsConstantOperand()) {
- LConstantOperand* operand_value = LConstantOperand::cast(instr->value());
- if (IsInteger32Constant(operand_value)) {
- DeoptimizeIf(no_condition, instr->environment());
- }
- } else {
- if (!instr->hydrogen()->value()->type().IsHeapObject()) {
- Register value = ToRegister(instr->value());
- Condition cc = masm()->CheckSmi(value);
- DeoptimizeIf(cc, instr->environment());
- }
- }
- } else if (FLAG_track_double_fields && representation.IsDouble()) {
- ASSERT(transition.is_null());
+ ASSERT(!representation.IsSmi() ||
+ !instr->value()->IsConstantOperand() ||
+ IsInteger32Constant(LConstantOperand::cast(instr->value())));
+ if (representation.IsDouble()) {
ASSERT(access.IsInobject());
- ASSERT(!instr->hydrogen()->NeedsWriteBarrier());
+ ASSERT(!hinstr->has_transition());
+ ASSERT(!hinstr->NeedsWriteBarrier());
XMMRegister value = ToDoubleRegister(instr->value());
__ movsd(FieldOperand(object, offset), value);
return;
}
- if (!transition.is_null()) {
- if (!instr->hydrogen()->NeedsWriteBarrierForMap()) {
+ if (hinstr->has_transition()) {
+ Handle<Map> transition = hinstr->transition_map();
+ AddDeprecationDependency(transition);
+ if (!hinstr->NeedsWriteBarrierForMap()) {
__ Move(FieldOperand(object, HeapObject::kMapOffset), transition);
} else {
Register temp = ToRegister(instr->temp());
__ Move(kScratchRegister, transition);
- __ movq(FieldOperand(object, HeapObject::kMapOffset), kScratchRegister);
+ __ movp(FieldOperand(object, HeapObject::kMapOffset), kScratchRegister);
// Update the write barrier for the map field.
- __ RecordWriteField(object,
- HeapObject::kMapOffset,
- kScratchRegister,
- temp,
- kSaveFPRegs,
- OMIT_REMEMBERED_SET,
- OMIT_SMI_CHECK);
+ __ RecordWriteForMap(object,
+ kScratchRegister,
+ temp,
+ kSaveFPRegs);
}
}
// Do the store.
- SmiCheck check_needed =
- instr->hydrogen()->value()->IsHeapObject()
- ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
-
Register write_register = object;
if (!access.IsInobject()) {
write_register = ToRegister(instr->temp());
- __ movq(write_register, FieldOperand(object, JSObject::kPropertiesOffset));
+ __ movp(write_register, FieldOperand(object, JSObject::kPropertiesOffset));
}
- if (instr->value()->IsConstantOperand()) {
+ if (representation.IsSmi() && SmiValuesAre32Bits() &&
+ hinstr->value()->representation().IsInteger32()) {
+ ASSERT(hinstr->store_mode() == STORE_TO_INITIALIZED_ENTRY);
+ if (FLAG_debug_code) {
+ Register scratch = kScratchRegister;
+ __ Load(scratch, FieldOperand(write_register, offset), representation);
+ __ AssertSmi(scratch);
+ }
+ // Store int value directly to upper half of the smi.
+ STATIC_ASSERT(kSmiTag == 0);
+ ASSERT(kSmiTagSize + kSmiShiftSize == 32);
+ offset += kPointerSize / 2;
+ representation = Representation::Integer32();
+ }
+
+ Operand operand = FieldOperand(write_register, offset);
+
+ if (instr->value()->IsRegister()) {
+ Register value = ToRegister(instr->value());
+ __ Store(operand, value, representation);
+ } else {
LConstantOperand* operand_value = LConstantOperand::cast(instr->value());
- if (operand_value->IsRegister()) {
- Register value = ToRegister(operand_value);
- __ Store(FieldOperand(write_register, offset), value, representation);
- } else if (representation.IsInteger32()) {
+ if (IsInteger32Constant(operand_value)) {
+ ASSERT(!hinstr->NeedsWriteBarrier());
int32_t value = ToInteger32(operand_value);
- ASSERT(!instr->hydrogen()->NeedsWriteBarrier());
- __ movl(FieldOperand(write_register, offset), Immediate(value));
+ if (representation.IsSmi()) {
+ __ Move(operand, Smi::FromInt(value));
+
+ } else {
+ __ movl(operand, Immediate(value));
+ }
+
} else {
Handle<Object> handle_value = ToHandle(operand_value);
- ASSERT(!instr->hydrogen()->NeedsWriteBarrier());
- __ Move(FieldOperand(write_register, offset), handle_value);
+ ASSERT(!hinstr->NeedsWriteBarrier());
+ __ Move(operand, handle_value);
}
- } else {
- Register value = ToRegister(instr->value());
- __ Store(FieldOperand(write_register, offset), value, representation);
}
- if (instr->hydrogen()->NeedsWriteBarrier()) {
+ if (hinstr->NeedsWriteBarrier()) {
Register value = ToRegister(instr->value());
Register temp = access.IsInobject() ? ToRegister(instr->temp()) : object;
// Update the write barrier for the object for in-object properties.
@@ -4098,7 +4167,8 @@ void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
temp,
kSaveFPRegs,
EMIT_REMEMBERED_SET,
- check_needed);
+ hinstr->SmiCheckForWriteBarrier(),
+ hinstr->PointersToHereCheckForValue());
}
}
@@ -4109,86 +4179,82 @@ void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) {
ASSERT(ToRegister(instr->value()).is(rax));
__ Move(rcx, instr->hydrogen()->name());
- Handle<Code> ic = (instr->strict_mode_flag() == kStrictMode)
- ? isolate()->builtins()->StoreIC_Initialize_Strict()
- : isolate()->builtins()->StoreIC_Initialize();
+ Handle<Code> ic = StoreIC::initialize_stub(isolate(), instr->strict_mode());
CallCode(ic, RelocInfo::CODE_TARGET, instr);
}
-void LCodeGen::ApplyCheckIf(Condition cc, LBoundsCheck* check) {
- if (FLAG_debug_code && check->hydrogen()->skip_check()) {
- Label done;
- __ j(NegateCondition(cc), &done, Label::kNear);
- __ int3();
- __ bind(&done);
- } else {
- DeoptimizeIf(cc, check->environment());
- }
-}
-
-
void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) {
- HBoundsCheck* hinstr = instr->hydrogen();
- if (hinstr->skip_check()) return;
-
- Representation representation = hinstr->length()->representation();
- ASSERT(representation.Equals(hinstr->index()->representation()));
+ Representation representation = instr->hydrogen()->length()->representation();
+ ASSERT(representation.Equals(instr->hydrogen()->index()->representation()));
ASSERT(representation.IsSmiOrInteger32());
- if (instr->length()->IsRegister()) {
- Register reg = ToRegister(instr->length());
-
- if (instr->index()->IsConstantOperand()) {
- int32_t constant_index =
- ToInteger32(LConstantOperand::cast(instr->index()));
+ Condition cc = instr->hydrogen()->allow_equality() ? below : below_equal;
+ if (instr->length()->IsConstantOperand()) {
+ int32_t length = ToInteger32(LConstantOperand::cast(instr->length()));
+ Register index = ToRegister(instr->index());
+ if (representation.IsSmi()) {
+ __ Cmp(index, Smi::FromInt(length));
+ } else {
+ __ cmpl(index, Immediate(length));
+ }
+ cc = CommuteCondition(cc);
+ } else if (instr->index()->IsConstantOperand()) {
+ int32_t index = ToInteger32(LConstantOperand::cast(instr->index()));
+ if (instr->length()->IsRegister()) {
+ Register length = ToRegister(instr->length());
if (representation.IsSmi()) {
- __ Cmp(reg, Smi::FromInt(constant_index));
+ __ Cmp(length, Smi::FromInt(index));
} else {
- __ cmpl(reg, Immediate(constant_index));
+ __ cmpl(length, Immediate(index));
}
} else {
- Register reg2 = ToRegister(instr->index());
+ Operand length = ToOperand(instr->length());
if (representation.IsSmi()) {
- __ cmpq(reg, reg2);
+ __ Cmp(length, Smi::FromInt(index));
} else {
- __ cmpl(reg, reg2);
+ __ cmpl(length, Immediate(index));
}
}
} else {
- Operand length = ToOperand(instr->length());
- if (instr->index()->IsConstantOperand()) {
- int32_t constant_index =
- ToInteger32(LConstantOperand::cast(instr->index()));
+ Register index = ToRegister(instr->index());
+ if (instr->length()->IsRegister()) {
+ Register length = ToRegister(instr->length());
if (representation.IsSmi()) {
- __ Cmp(length, Smi::FromInt(constant_index));
+ __ cmpp(length, index);
} else {
- __ cmpl(length, Immediate(constant_index));
+ __ cmpl(length, index);
}
} else {
+ Operand length = ToOperand(instr->length());
if (representation.IsSmi()) {
- __ cmpq(length, ToRegister(instr->index()));
+ __ cmpp(length, index);
} else {
- __ cmpl(length, ToRegister(instr->index()));
+ __ cmpl(length, index);
}
}
}
- Condition condition = hinstr->allow_equality() ? below : below_equal;
- ApplyCheckIf(condition, instr);
+ if (FLAG_debug_code && instr->hydrogen()->skip_check()) {
+ Label done;
+ __ j(NegateCondition(cc), &done, Label::kNear);
+ __ int3();
+ __ bind(&done);
+ } else {
+ DeoptimizeIf(cc, instr->environment());
+ }
}
void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) {
ElementsKind elements_kind = instr->elements_kind();
LOperand* key = instr->key();
- if (!key->IsConstantOperand()) {
+ if (kPointerSize == kInt32Size && !key->IsConstantOperand()) {
Register key_reg = ToRegister(key);
- // Even though the HLoad/StoreKeyedFastElement instructions force
- // the input representation for the key to be an integer, the input
- // gets replaced during bound check elimination with the index
- // argument to the bounds check, which can be tagged, so that case
- // must be handled here, too.
- if (instr->hydrogen()->IsDehoisted()) {
+ Representation key_representation =
+ instr->hydrogen()->key()->representation();
+ if (ExternalArrayOpRequiresTemp(key_representation, elements_kind)) {
+ __ SmiToInteger64(key_reg, key_reg);
+ } else if (instr->hydrogen()->IsDehoisted()) {
// Sign extend key because it could be a 32 bit negative value
// and the dehoisted address computation happens in 64 bits
__ movsxlq(key_reg, key_reg);
@@ -4197,34 +4263,45 @@ void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) {
Operand operand(BuildFastArrayOperand(
instr->elements(),
key,
+ instr->hydrogen()->key()->representation(),
elements_kind,
- 0,
- instr->additional_index()));
+ instr->base_offset()));
- if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
+ if (elements_kind == EXTERNAL_FLOAT32_ELEMENTS ||
+ elements_kind == FLOAT32_ELEMENTS) {
XMMRegister value(ToDoubleRegister(instr->value()));
__ cvtsd2ss(value, value);
__ movss(operand, value);
- } else if (elements_kind == EXTERNAL_DOUBLE_ELEMENTS) {
+ } else if (elements_kind == EXTERNAL_FLOAT64_ELEMENTS ||
+ elements_kind == FLOAT64_ELEMENTS) {
__ movsd(operand, ToDoubleRegister(instr->value()));
} else {
Register value(ToRegister(instr->value()));
switch (elements_kind) {
- case EXTERNAL_PIXEL_ELEMENTS:
- case EXTERNAL_BYTE_ELEMENTS:
- case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
+ case EXTERNAL_UINT8_CLAMPED_ELEMENTS:
+ case EXTERNAL_INT8_ELEMENTS:
+ case EXTERNAL_UINT8_ELEMENTS:
+ case INT8_ELEMENTS:
+ case UINT8_ELEMENTS:
+ case UINT8_CLAMPED_ELEMENTS:
__ movb(operand, value);
break;
- case EXTERNAL_SHORT_ELEMENTS:
- case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
+ case EXTERNAL_INT16_ELEMENTS:
+ case EXTERNAL_UINT16_ELEMENTS:
+ case INT16_ELEMENTS:
+ case UINT16_ELEMENTS:
__ movw(operand, value);
break;
- case EXTERNAL_INT_ELEMENTS:
- case EXTERNAL_UNSIGNED_INT_ELEMENTS:
+ case EXTERNAL_INT32_ELEMENTS:
+ case EXTERNAL_UINT32_ELEMENTS:
+ case INT32_ELEMENTS:
+ case UINT32_ELEMENTS:
__ movl(operand, value);
break;
- case EXTERNAL_FLOAT_ELEMENTS:
- case EXTERNAL_DOUBLE_ELEMENTS:
+ case EXTERNAL_FLOAT32_ELEMENTS:
+ case EXTERNAL_FLOAT64_ELEMENTS:
+ case FLOAT32_ELEMENTS:
+ case FLOAT64_ELEMENTS:
case FAST_ELEMENTS:
case FAST_SMI_ELEMENTS:
case FAST_DOUBLE_ELEMENTS:
@@ -4232,7 +4309,7 @@ void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) {
case FAST_HOLEY_SMI_ELEMENTS:
case FAST_HOLEY_DOUBLE_ELEMENTS:
case DICTIONARY_ELEMENTS:
- case NON_STRICT_ARGUMENTS_ELEMENTS:
+ case SLOPPY_ARGUMENTS_ELEMENTS:
UNREACHABLE();
break;
}
@@ -4243,20 +4320,12 @@ void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) {
void LCodeGen::DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr) {
XMMRegister value = ToDoubleRegister(instr->value());
LOperand* key = instr->key();
- if (!key->IsConstantOperand()) {
- Register key_reg = ToRegister(key);
- // Even though the HLoad/StoreKeyedFastElement instructions force
- // the input representation for the key to be an integer, the
- // input gets replaced during bound check elimination with the index
- // argument to the bounds check, which can be tagged, so that case
- // must be handled here, too.
- if (instr->hydrogen()->IsDehoisted()) {
- // Sign extend key because it could be a 32 bit negative value
- // and the dehoisted address computation happens in 64 bits
- __ movsxlq(key_reg, key_reg);
- }
+ if (kPointerSize == kInt32Size && !key->IsConstantOperand() &&
+ instr->hydrogen()->IsDehoisted()) {
+ // Sign extend key because it could be a 32 bit negative value
+ // and the dehoisted address computation happens in 64 bits
+ __ movsxlq(ToRegister(key), ToRegister(key));
}
-
if (instr->NeedsCanonicalization()) {
Label have_value;
@@ -4273,72 +4342,93 @@ void LCodeGen::DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr) {
Operand double_store_operand = BuildFastArrayOperand(
instr->elements(),
key,
+ instr->hydrogen()->key()->representation(),
FAST_DOUBLE_ELEMENTS,
- FixedDoubleArray::kHeaderSize - kHeapObjectTag,
- instr->additional_index());
+ instr->base_offset());
__ movsd(double_store_operand, value);
}
void LCodeGen::DoStoreKeyedFixedArray(LStoreKeyed* instr) {
- Register elements = ToRegister(instr->elements());
+ HStoreKeyed* hinstr = instr->hydrogen();
LOperand* key = instr->key();
- if (!key->IsConstantOperand()) {
- Register key_reg = ToRegister(key);
- // Even though the HLoad/StoreKeyedFastElement instructions force
- // the input representation for the key to be an integer, the
- // input gets replaced during bound check elimination with the index
- // argument to the bounds check, which can be tagged, so that case
- // must be handled here, too.
- if (instr->hydrogen()->IsDehoisted()) {
- // Sign extend key because it could be a 32 bit negative value
- // and the dehoisted address computation happens in 64 bits
- __ movsxlq(key_reg, key_reg);
+ int offset = instr->base_offset();
+ Representation representation = hinstr->value()->representation();
+
+ if (kPointerSize == kInt32Size && !key->IsConstantOperand() &&
+ instr->hydrogen()->IsDehoisted()) {
+ // Sign extend key because it could be a 32 bit negative value
+ // and the dehoisted address computation happens in 64 bits
+ __ movsxlq(ToRegister(key), ToRegister(key));
+ }
+ if (representation.IsInteger32() && SmiValuesAre32Bits()) {
+ ASSERT(hinstr->store_mode() == STORE_TO_INITIALIZED_ENTRY);
+ ASSERT(hinstr->elements_kind() == FAST_SMI_ELEMENTS);
+ if (FLAG_debug_code) {
+ Register scratch = kScratchRegister;
+ __ Load(scratch,
+ BuildFastArrayOperand(instr->elements(),
+ key,
+ instr->hydrogen()->key()->representation(),
+ FAST_ELEMENTS,
+ offset),
+ Representation::Smi());
+ __ AssertSmi(scratch);
}
+ // Store int value directly to upper half of the smi.
+ STATIC_ASSERT(kSmiTag == 0);
+ ASSERT(kSmiTagSize + kSmiShiftSize == 32);
+ offset += kPointerSize / 2;
}
Operand operand =
BuildFastArrayOperand(instr->elements(),
key,
+ instr->hydrogen()->key()->representation(),
FAST_ELEMENTS,
- FixedArray::kHeaderSize - kHeapObjectTag,
- instr->additional_index());
+ offset);
if (instr->value()->IsRegister()) {
- __ movq(operand, ToRegister(instr->value()));
+ __ Store(operand, ToRegister(instr->value()), representation);
} else {
LConstantOperand* operand_value = LConstantOperand::cast(instr->value());
if (IsInteger32Constant(operand_value)) {
- Smi* smi_value = Smi::FromInt(ToInteger32(operand_value));
- __ Move(operand, smi_value);
+ int32_t value = ToInteger32(operand_value);
+ if (representation.IsSmi()) {
+ __ Move(operand, Smi::FromInt(value));
+
+ } else {
+ __ movl(operand, Immediate(value));
+ }
} else {
Handle<Object> handle_value = ToHandle(operand_value);
__ Move(operand, handle_value);
}
}
- if (instr->hydrogen()->NeedsWriteBarrier()) {
+ if (hinstr->NeedsWriteBarrier()) {
+ Register elements = ToRegister(instr->elements());
ASSERT(instr->value()->IsRegister());
Register value = ToRegister(instr->value());
- ASSERT(!instr->key()->IsConstantOperand());
- SmiCheck check_needed =
- instr->hydrogen()->value()->IsHeapObject()
+ ASSERT(!key->IsConstantOperand());
+ SmiCheck check_needed = hinstr->value()->type().IsHeapObject()
? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
// Compute address of modified element and store it into key register.
Register key_reg(ToRegister(key));
- __ lea(key_reg, operand);
+ __ leap(key_reg, operand);
__ RecordWrite(elements,
key_reg,
value,
kSaveFPRegs,
EMIT_REMEMBERED_SET,
- check_needed);
+ check_needed,
+ hinstr->PointersToHereCheckForValue());
}
}
void LCodeGen::DoStoreKeyed(LStoreKeyed* instr) {
- if (instr->is_external()) {
+ if (instr->is_typed_elements()) {
DoStoreKeyedExternalArray(instr);
} else if (instr->hydrogen()->value()->representation().IsDouble()) {
DoStoreKeyedFixedDoubleArray(instr);
@@ -4354,7 +4444,7 @@ void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) {
ASSERT(ToRegister(instr->key()).is(rcx));
ASSERT(ToRegister(instr->value()).is(rax));
- Handle<Code> ic = (instr->strict_mode_flag() == kStrictMode)
+ Handle<Code> ic = instr->strict_mode() == STRICT
? isolate()->builtins()->KeyedStoreIC_Initialize_Strict()
: isolate()->builtins()->KeyedStoreIC_Initialize();
CallCode(ic, RelocInfo::CODE_TARGET, instr);
@@ -4374,23 +4464,20 @@ void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) {
__ j(not_equal, &not_applicable);
if (IsSimpleMapChangeTransition(from_kind, to_kind)) {
Register new_map_reg = ToRegister(instr->new_map_temp());
- __ movq(new_map_reg, to_map, RelocInfo::EMBEDDED_OBJECT);
- __ movq(FieldOperand(object_reg, HeapObject::kMapOffset), new_map_reg);
+ __ Move(new_map_reg, to_map, RelocInfo::EMBEDDED_OBJECT);
+ __ movp(FieldOperand(object_reg, HeapObject::kMapOffset), new_map_reg);
// Write barrier.
- ASSERT_NE(instr->temp(), NULL);
- __ RecordWriteField(object_reg, HeapObject::kMapOffset, new_map_reg,
- ToRegister(instr->temp()), kDontSaveFPRegs);
+ __ RecordWriteForMap(object_reg, new_map_reg, ToRegister(instr->temp()),
+ kDontSaveFPRegs);
} else {
+ ASSERT(object_reg.is(rax));
ASSERT(ToRegister(instr->context()).is(rsi));
PushSafepointRegistersScope scope(this);
- if (!object_reg.is(rax)) {
- __ movq(rax, object_reg);
- }
__ Move(rbx, to_map);
- TransitionElementsKindStub stub(from_kind, to_kind);
+ bool is_js_array = from_map->instance_type() == JS_ARRAY_TYPE;
+ TransitionElementsKindStub stub(isolate(), from_kind, to_kind, is_js_array);
__ CallStub(&stub);
- RecordSafepointWithRegisters(
- instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
+ RecordSafepointWithLazyDeopt(instr, RECORD_SAFEPOINT_WITH_REGISTERS, 0);
}
__ bind(&not_applicable);
}
@@ -4408,18 +4495,12 @@ void LCodeGen::DoTrapAllocationMemento(LTrapAllocationMemento* instr) {
void LCodeGen::DoStringAdd(LStringAdd* instr) {
ASSERT(ToRegister(instr->context()).is(rsi));
- if (FLAG_new_string_add) {
- ASSERT(ToRegister(instr->left()).is(rdx));
- ASSERT(ToRegister(instr->right()).is(rax));
- NewStringAddStub stub(instr->hydrogen()->flags(),
- isolate()->heap()->GetPretenureMode());
- CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
- } else {
- EmitPushTaggedOperand(instr->left());
- EmitPushTaggedOperand(instr->right());
- StringAddStub stub(instr->hydrogen()->flags());
- CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
- }
+ ASSERT(ToRegister(instr->left()).is(rdx));
+ ASSERT(ToRegister(instr->right()).is(rax));
+ StringAddStub stub(isolate(),
+ instr->hydrogen()->flags(),
+ instr->hydrogen()->pretenure_flag());
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
}
@@ -4458,7 +4539,7 @@ void LCodeGen::DoDeferredStringCharCodeAt(LStringCharCodeAt* instr) {
__ Set(result, 0);
PushSafepointRegistersScope scope(this);
- __ push(string);
+ __ Push(string);
// Push the index as a smi. This is safe because of the checks in
// DoStringCharCodeAt above.
STATIC_ASSERT(String::kMaxLength <= Smi::kMaxValue);
@@ -4468,10 +4549,10 @@ void LCodeGen::DoDeferredStringCharCodeAt(LStringCharCodeAt* instr) {
} else {
Register index = ToRegister(instr->index());
__ Integer32ToSmi(index, index);
- __ push(index);
+ __ Push(index);
}
CallRuntimeFromDeferred(
- Runtime::kStringCharCodeAt, 2, instr, instr->context());
+ Runtime::kHiddenStringCharCodeAt, 2, instr, instr->context());
__ AssertSmi(rax);
__ SmiToInteger32(rax, rax);
__ StoreToSafepointRegisterSlot(result, rax);
@@ -4503,7 +4584,7 @@ void LCodeGen::DoStringCharFromCode(LStringCharFromCode* instr) {
__ j(above, deferred->entry());
__ movsxlq(char_code, char_code);
__ LoadRoot(result, Heap::kSingleCharacterStringCacheRootIndex);
- __ movq(result, FieldOperand(result,
+ __ movp(result, FieldOperand(result,
char_code, times_pointer_size,
FixedArray::kHeaderSize));
__ CompareRoot(result, Heap::kUndefinedValueRootIndex);
@@ -4523,7 +4604,7 @@ void LCodeGen::DoDeferredStringCharFromCode(LStringCharFromCode* instr) {
PushSafepointRegistersScope scope(this);
__ Integer32ToSmi(char_code, char_code);
- __ push(char_code);
+ __ Push(char_code);
CallRuntimeFromDeferred(Runtime::kCharFromCode, 1, instr, instr->context());
__ StoreToSafepointRegisterSlot(result, rax);
}
@@ -4542,51 +4623,41 @@ void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) {
}
-void LCodeGen::DoInteger32ToSmi(LInteger32ToSmi* instr) {
- LOperand* input = instr->value();
- ASSERT(input->IsRegister());
- LOperand* output = instr->result();
- __ Integer32ToSmi(ToRegister(output), ToRegister(input));
- if (!instr->hydrogen()->value()->HasRange() ||
- !instr->hydrogen()->value()->range()->IsInSmiRange()) {
- DeoptimizeIf(overflow, instr->environment());
- }
-}
-
-
void LCodeGen::DoUint32ToDouble(LUint32ToDouble* instr) {
LOperand* input = instr->value();
LOperand* output = instr->result();
- LOperand* temp = instr->temp();
-
- __ LoadUint32(ToDoubleRegister(output),
- ToRegister(input),
- ToDoubleRegister(temp));
-}
-
-void LCodeGen::DoUint32ToSmi(LUint32ToSmi* instr) {
- LOperand* input = instr->value();
- ASSERT(input->IsRegister());
- LOperand* output = instr->result();
- if (!instr->hydrogen()->value()->HasRange() ||
- !instr->hydrogen()->value()->range()->IsInSmiRange() ||
- instr->hydrogen()->value()->range()->upper() == kMaxInt) {
- // The Range class can't express upper bounds in the (kMaxInt, kMaxUint32]
- // interval, so we treat kMaxInt as a sentinel for this entire interval.
- __ testl(ToRegister(input), Immediate(0x80000000));
- DeoptimizeIf(not_zero, instr->environment());
- }
- __ Integer32ToSmi(ToRegister(output), ToRegister(input));
+ __ LoadUint32(ToDoubleRegister(output), ToRegister(input));
}
void LCodeGen::DoNumberTagI(LNumberTagI* instr) {
+ class DeferredNumberTagI V8_FINAL : public LDeferredCode {
+ public:
+ DeferredNumberTagI(LCodeGen* codegen, LNumberTagI* instr)
+ : LDeferredCode(codegen), instr_(instr) { }
+ virtual void Generate() V8_OVERRIDE {
+ codegen()->DoDeferredNumberTagIU(instr_, instr_->value(), instr_->temp1(),
+ instr_->temp2(), SIGNED_INT32);
+ }
+ virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
+ private:
+ LNumberTagI* instr_;
+ };
+
LOperand* input = instr->value();
ASSERT(input->IsRegister() && input->Equals(instr->result()));
Register reg = ToRegister(input);
- __ Integer32ToSmi(reg, reg);
+ if (SmiValuesAre32Bits()) {
+ __ Integer32ToSmi(reg, reg);
+ } else {
+ ASSERT(SmiValuesAre31Bits());
+ DeferredNumberTagI* deferred = new(zone()) DeferredNumberTagI(this, instr);
+ __ Integer32ToSmi(reg, reg);
+ __ j(overflow, deferred->entry());
+ __ bind(deferred->exit());
+ }
}
@@ -4596,7 +4667,8 @@ void LCodeGen::DoNumberTagU(LNumberTagU* instr) {
DeferredNumberTagU(LCodeGen* codegen, LNumberTagU* instr)
: LDeferredCode(codegen), instr_(instr) { }
virtual void Generate() V8_OVERRIDE {
- codegen()->DoDeferredNumberTagU(instr_);
+ codegen()->DoDeferredNumberTagIU(instr_, instr_->value(), instr_->temp1(),
+ instr_->temp2(), UNSIGNED_INT32);
}
virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
private:
@@ -4615,21 +4687,31 @@ void LCodeGen::DoNumberTagU(LNumberTagU* instr) {
}
-void LCodeGen::DoDeferredNumberTagU(LNumberTagU* instr) {
- Label slow;
- Register reg = ToRegister(instr->value());
- Register tmp = reg.is(rax) ? rcx : rax;
- XMMRegister temp_xmm = ToDoubleRegister(instr->temp());
+void LCodeGen::DoDeferredNumberTagIU(LInstruction* instr,
+ LOperand* value,
+ LOperand* temp1,
+ LOperand* temp2,
+ IntegerSignedness signedness) {
+ Label done, slow;
+ Register reg = ToRegister(value);
+ Register tmp = ToRegister(temp1);
+ XMMRegister temp_xmm = ToDoubleRegister(temp2);
- // Preserve the value of all registers.
- PushSafepointRegistersScope scope(this);
-
- Label done;
// Load value into temp_xmm which will be preserved across potential call to
// runtime (MacroAssembler::EnterExitFrameEpilogue preserves only allocatable
// XMM registers on x64).
- XMMRegister xmm_scratch = double_scratch0();
- __ LoadUint32(temp_xmm, reg, xmm_scratch);
+ if (signedness == SIGNED_INT32) {
+ ASSERT(SmiValuesAre31Bits());
+ // There was overflow, so bits 30 and 31 of the original integer
+ // disagree. Try to allocate a heap number in new space and store
+ // the value in there. If that fails, call the runtime system.
+ __ SmiToInteger32(reg, reg);
+ __ xorl(reg, Immediate(0x80000000));
+ __ cvtlsi2sd(temp_xmm, reg);
+ } else {
+ ASSERT(signedness == UNSIGNED_INT32);
+ __ LoadUint32(temp_xmm, reg);
+ }
if (FLAG_inline_new) {
__ AllocateHeapNumber(reg, tmp, &slow);
@@ -4638,29 +4720,31 @@ void LCodeGen::DoDeferredNumberTagU(LNumberTagU* instr) {
// Slow case: Call the runtime system to do the number allocation.
__ bind(&slow);
+ {
+ // Put a valid pointer value in the stack slot where the result
+ // register is stored, as this register is in the pointer map, but contains
+ // an integer value.
+ __ Set(reg, 0);
- // Put a valid pointer value in the stack slot where the result
- // register is stored, as this register is in the pointer map, but contains an
- // integer value.
- __ StoreToSafepointRegisterSlot(reg, Immediate(0));
-
- // NumberTagU uses the context from the frame, rather than
- // the environment's HContext or HInlinedContext value.
- // They only call Runtime::kAllocateHeapNumber.
- // The corresponding HChange instructions are added in a phase that does
- // not have easy access to the local context.
- __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
- __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
- RecordSafepointWithRegisters(
- instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
+ // Preserve the value of all registers.
+ PushSafepointRegistersScope scope(this);
- if (!reg.is(rax)) __ movq(reg, rax);
+ // NumberTagIU uses the context from the frame, rather than
+ // the environment's HContext or HInlinedContext value.
+ // They only call Runtime::kHiddenAllocateHeapNumber.
+ // The corresponding HChange instructions are added in a phase that does
+ // not have easy access to the local context.
+ __ movp(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
+ __ CallRuntimeSaveDoubles(Runtime::kHiddenAllocateHeapNumber);
+ RecordSafepointWithRegisters(
+ instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
+ __ StoreToSafepointRegisterSlot(reg, rax);
+ }
// Done. Put the value in temp_xmm into the value of the allocated heap
// number.
__ bind(&done);
__ movsd(FieldOperand(reg, HeapNumber::kValueOffset), temp_xmm);
- __ StoreToSafepointRegisterSlot(reg, reg);
}
@@ -4703,24 +4787,33 @@ void LCodeGen::DoDeferredNumberTagD(LNumberTagD* instr) {
PushSafepointRegistersScope scope(this);
// NumberTagD uses the context from the frame, rather than
// the environment's HContext or HInlinedContext value.
- // They only call Runtime::kAllocateHeapNumber.
+ // They only call Runtime::kHiddenAllocateHeapNumber.
// The corresponding HChange instructions are added in a phase that does
// not have easy access to the local context.
- __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
- __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
+ __ movp(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
+ __ CallRuntimeSaveDoubles(Runtime::kHiddenAllocateHeapNumber);
RecordSafepointWithRegisters(
instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
- __ movq(kScratchRegister, rax);
+ __ movp(kScratchRegister, rax);
}
- __ movq(reg, kScratchRegister);
+ __ movp(reg, kScratchRegister);
}
void LCodeGen::DoSmiTag(LSmiTag* instr) {
- ASSERT(instr->value()->Equals(instr->result()));
+ HChange* hchange = instr->hydrogen();
Register input = ToRegister(instr->value());
- ASSERT(!instr->hydrogen_value()->CheckFlag(HValue::kCanOverflow));
- __ Integer32ToSmi(input, input);
+ Register output = ToRegister(instr->result());
+ if (hchange->CheckFlag(HValue::kCanOverflow) &&
+ hchange->value()->CheckFlag(HValue::kUint32)) {
+ Condition is_smi = __ CheckUInteger32ValidSmiValue(input);
+ DeoptimizeIf(NegateCondition(is_smi), instr->environment());
+ }
+ __ Integer32ToSmi(output, input);
+ if (hchange->CheckFlag(HValue::kCanOverflow) &&
+ !hchange->value()->CheckFlag(HValue::kUint32)) {
+ DeoptimizeIf(overflow, instr->environment());
+ }
}
@@ -4950,7 +5043,7 @@ void LCodeGen::DoCheckSmi(LCheckSmi* instr) {
void LCodeGen::DoCheckNonSmi(LCheckNonSmi* instr) {
- if (!instr->hydrogen()->value()->IsHeapObject()) {
+ if (!instr->hydrogen()->value()->type().IsHeapObject()) {
LOperand* input = instr->value();
Condition cc = masm()->CheckSmi(ToRegister(input));
DeoptimizeIf(cc, instr->environment());
@@ -4961,7 +5054,7 @@ void LCodeGen::DoCheckNonSmi(LCheckNonSmi* instr) {
void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) {
Register input = ToRegister(instr->value());
- __ movq(kScratchRegister, FieldOperand(input, HeapObject::kMapOffset));
+ __ movp(kScratchRegister, FieldOperand(input, HeapObject::kMapOffset));
if (instr->hydrogen()->is_interval_check()) {
InstanceType first;
@@ -5014,13 +5107,13 @@ void LCodeGen::DoCheckValue(LCheckValue* instr) {
void LCodeGen::DoDeferredInstanceMigration(LCheckMaps* instr, Register object) {
{
PushSafepointRegistersScope scope(this);
- __ push(object);
+ __ Push(object);
__ Set(rsi, 0);
- __ CallRuntimeSaveDoubles(Runtime::kMigrateInstance);
+ __ CallRuntimeSaveDoubles(Runtime::kTryMigrateInstance);
RecordSafepointWithRegisters(
instr->pointer_map(), 1, Safepoint::kNoLazyDeopt);
- __ testq(rax, Immediate(kSmiTagMask));
+ __ testp(rax, Immediate(kSmiTagMask));
}
DeoptimizeIf(zero, instr->environment());
}
@@ -5044,29 +5137,35 @@ void LCodeGen::DoCheckMaps(LCheckMaps* instr) {
Register object_;
};
- if (instr->hydrogen()->CanOmitMapChecks()) return;
+ if (instr->hydrogen()->IsStabilityCheck()) {
+ const UniqueSet<Map>* maps = instr->hydrogen()->maps();
+ for (int i = 0; i < maps->size(); ++i) {
+ AddStabilityDependency(maps->at(i).handle());
+ }
+ return;
+ }
LOperand* input = instr->value();
ASSERT(input->IsRegister());
Register reg = ToRegister(input);
DeferredCheckMaps* deferred = NULL;
- if (instr->hydrogen()->has_migration_target()) {
+ if (instr->hydrogen()->HasMigrationTarget()) {
deferred = new(zone()) DeferredCheckMaps(this, instr, reg);
__ bind(deferred->check_maps());
}
- UniqueSet<Map> map_set = instr->hydrogen()->map_set();
+ const UniqueSet<Map>* maps = instr->hydrogen()->maps();
Label success;
- for (int i = 0; i < map_set.size() - 1; i++) {
- Handle<Map> map = map_set.at(i).handle();
+ for (int i = 0; i < maps->size() - 1; i++) {
+ Handle<Map> map = maps->at(i).handle();
__ CompareMap(reg, map);
__ j(equal, &success, Label::kNear);
}
- Handle<Map> map = map_set.at(map_set.size() - 1).handle();
+ Handle<Map> map = maps->at(maps->size() - 1).handle();
__ CompareMap(reg, map);
- if (instr->hydrogen()->has_migration_target()) {
+ if (instr->hydrogen()->HasMigrationTarget()) {
__ j(not_equal, deferred->entry());
} else {
DeoptimizeIf(not_equal, instr->environment());
@@ -5109,7 +5208,7 @@ void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) {
// conversions.
__ Cmp(input_reg, factory()->undefined_value());
DeoptimizeIf(not_equal, instr->environment());
- __ movq(input_reg, Immediate(0));
+ __ xorl(input_reg, input_reg);
__ jmp(&done, Label::kNear);
// Heap number
@@ -5127,6 +5226,30 @@ void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) {
}
+void LCodeGen::DoDoubleBits(LDoubleBits* instr) {
+ XMMRegister value_reg = ToDoubleRegister(instr->value());
+ Register result_reg = ToRegister(instr->result());
+ if (instr->hydrogen()->bits() == HDoubleBits::HIGH) {
+ __ movq(result_reg, value_reg);
+ __ shrq(result_reg, Immediate(32));
+ } else {
+ __ movd(result_reg, value_reg);
+ }
+}
+
+
+void LCodeGen::DoConstructDouble(LConstructDouble* instr) {
+ Register hi_reg = ToRegister(instr->hi());
+ Register lo_reg = ToRegister(instr->lo());
+ XMMRegister result_reg = ToDoubleRegister(instr->result());
+ XMMRegister xmm_scratch = double_scratch0();
+ __ movd(result_reg, hi_reg);
+ __ psllq(result_reg, 32);
+ __ movd(xmm_scratch, lo_reg);
+ __ orps(result_reg, xmm_scratch);
+}
+
+
void LCodeGen::DoAllocate(LAllocate* instr) {
class DeferredAllocate V8_FINAL : public LDeferredCode {
public:
@@ -5180,7 +5303,7 @@ void LCodeGen::DoAllocate(LAllocate* instr) {
__ movl(temp, Immediate((size / kPointerSize) - 1));
} else {
temp = ToRegister(instr->size());
- __ sar(temp, Immediate(kPointerSizeLog2));
+ __ sarp(temp, Immediate(kPointerSizeLog2));
__ decl(temp);
}
Label loop;
@@ -5206,7 +5329,7 @@ void LCodeGen::DoDeferredAllocate(LAllocate* instr) {
Register size = ToRegister(instr->size());
ASSERT(!size.is(result));
__ Integer32ToSmi(size, size);
- __ push(size);
+ __ Push(size);
} else {
int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
__ Push(Smi::FromInt(size));
@@ -5226,14 +5349,14 @@ void LCodeGen::DoDeferredAllocate(LAllocate* instr) {
__ Push(Smi::FromInt(flags));
CallRuntimeFromDeferred(
- Runtime::kAllocateInTargetSpace, 2, instr, instr->context());
+ Runtime::kHiddenAllocateInTargetSpace, 2, instr, instr->context());
__ StoreToSafepointRegisterSlot(result, rax);
}
void LCodeGen::DoToFastProperties(LToFastProperties* instr) {
ASSERT(ToRegister(instr->value()).is(rax));
- __ push(rax);
+ __ Push(rax);
CallRuntime(Runtime::kToFastProperties, 1, instr);
}
@@ -5248,18 +5371,18 @@ void LCodeGen::DoRegExpLiteral(LRegExpLiteral* instr) {
int literal_offset =
FixedArray::OffsetOfElementAt(instr->hydrogen()->literal_index());
__ Move(rcx, instr->hydrogen()->literals());
- __ movq(rbx, FieldOperand(rcx, literal_offset));
+ __ movp(rbx, FieldOperand(rcx, literal_offset));
__ CompareRoot(rbx, Heap::kUndefinedValueRootIndex);
__ j(not_equal, &materialized, Label::kNear);
// Create regexp literal using runtime function
// Result will be in rax.
- __ push(rcx);
+ __ Push(rcx);
__ Push(Smi::FromInt(instr->hydrogen()->literal_index()));
__ Push(instr->hydrogen()->pattern());
__ Push(instr->hydrogen()->flags());
- CallRuntime(Runtime::kMaterializeRegExpLiteral, 4, instr);
- __ movq(rbx, rax);
+ CallRuntime(Runtime::kHiddenMaterializeRegExpLiteral, 4, instr);
+ __ movp(rbx, rax);
__ bind(&materialized);
int size = JSRegExp::kSize + JSRegExp::kInObjectFieldCount * kPointerSize;
@@ -5268,23 +5391,23 @@ void LCodeGen::DoRegExpLiteral(LRegExpLiteral* instr) {
__ jmp(&allocated, Label::kNear);
__ bind(&runtime_allocate);
- __ push(rbx);
+ __ Push(rbx);
__ Push(Smi::FromInt(size));
- CallRuntime(Runtime::kAllocateInNewSpace, 1, instr);
- __ pop(rbx);
+ CallRuntime(Runtime::kHiddenAllocateInNewSpace, 1, instr);
+ __ Pop(rbx);
__ bind(&allocated);
// Copy the content into the newly allocated memory.
// (Unroll copy loop once for better throughput).
for (int i = 0; i < size - kPointerSize; i += 2 * kPointerSize) {
- __ movq(rdx, FieldOperand(rbx, i));
- __ movq(rcx, FieldOperand(rbx, i + kPointerSize));
- __ movq(FieldOperand(rax, i), rdx);
- __ movq(FieldOperand(rax, i + kPointerSize), rcx);
+ __ movp(rdx, FieldOperand(rbx, i));
+ __ movp(rcx, FieldOperand(rbx, i + kPointerSize));
+ __ movp(FieldOperand(rax, i), rdx);
+ __ movp(FieldOperand(rax, i + kPointerSize), rcx);
}
if ((size % (2 * kPointerSize)) != 0) {
- __ movq(rdx, FieldOperand(rbx, size - kPointerSize));
- __ movq(FieldOperand(rax, size - kPointerSize), rdx);
+ __ movp(rdx, FieldOperand(rbx, size - kPointerSize));
+ __ movp(FieldOperand(rax, size - kPointerSize), rdx);
}
}
@@ -5295,16 +5418,17 @@ void LCodeGen::DoFunctionLiteral(LFunctionLiteral* instr) {
// space for nested functions that don't need literals cloning.
bool pretenure = instr->hydrogen()->pretenure();
if (!pretenure && instr->hydrogen()->has_no_literals()) {
- FastNewClosureStub stub(instr->hydrogen()->language_mode(),
+ FastNewClosureStub stub(isolate(),
+ instr->hydrogen()->strict_mode(),
instr->hydrogen()->is_generator());
__ Move(rbx, instr->hydrogen()->shared_info());
- CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
} else {
- __ push(rsi);
+ __ Push(rsi);
__ Push(instr->hydrogen()->shared_info());
__ PushRoot(pretenure ? Heap::kTrueValueRootIndex :
Heap::kFalseValueRootIndex);
- CallRuntime(Runtime::kNewClosure, 3, instr);
+ CallRuntime(Runtime::kHiddenNewClosure, 3, instr);
}
}
@@ -5322,9 +5446,9 @@ void LCodeGen::EmitPushTaggedOperand(LOperand* operand) {
if (operand->IsConstantOperand()) {
__ Push(ToHandle(LConstantOperand::cast(operand)));
} else if (operand->IsRegister()) {
- __ push(ToRegister(operand));
+ __ Push(ToRegister(operand));
} else {
- __ push(ToOperand(operand));
+ __ Push(ToOperand(operand));
}
}
@@ -5351,14 +5475,15 @@ Condition LCodeGen::EmitTypeofIs(LTypeofIsAndBranch* instr, Register input) {
Label::Distance false_distance = right_block == next_block ? Label::kNear
: Label::kFar;
Condition final_branch_condition = no_condition;
- if (type_name->Equals(heap()->number_string())) {
+ Factory* factory = isolate()->factory();
+ if (String::Equals(type_name, factory->number_string())) {
__ JumpIfSmi(input, true_label, true_distance);
__ CompareRoot(FieldOperand(input, HeapObject::kMapOffset),
Heap::kHeapNumberMapRootIndex);
final_branch_condition = equal;
- } else if (type_name->Equals(heap()->string_string())) {
+ } else if (String::Equals(type_name, factory->string_string())) {
__ JumpIfSmi(input, false_label, false_distance);
__ CmpObjectType(input, FIRST_NONSTRING_TYPE, input);
__ j(above_equal, false_label, false_distance);
@@ -5366,32 +5491,33 @@ Condition LCodeGen::EmitTypeofIs(LTypeofIsAndBranch* instr, Register input) {
Immediate(1 << Map::kIsUndetectable));
final_branch_condition = zero;
- } else if (type_name->Equals(heap()->symbol_string())) {
+ } else if (String::Equals(type_name, factory->symbol_string())) {
__ JumpIfSmi(input, false_label, false_distance);
__ CmpObjectType(input, SYMBOL_TYPE, input);
final_branch_condition = equal;
- } else if (type_name->Equals(heap()->boolean_string())) {
+ } else if (String::Equals(type_name, factory->boolean_string())) {
__ CompareRoot(input, Heap::kTrueValueRootIndex);
__ j(equal, true_label, true_distance);
__ CompareRoot(input, Heap::kFalseValueRootIndex);
final_branch_condition = equal;
- } else if (FLAG_harmony_typeof && type_name->Equals(heap()->null_string())) {
+ } else if (FLAG_harmony_typeof &&
+ String::Equals(type_name, factory->null_string())) {
__ CompareRoot(input, Heap::kNullValueRootIndex);
final_branch_condition = equal;
- } else if (type_name->Equals(heap()->undefined_string())) {
+ } else if (String::Equals(type_name, factory->undefined_string())) {
__ CompareRoot(input, Heap::kUndefinedValueRootIndex);
__ j(equal, true_label, true_distance);
__ JumpIfSmi(input, false_label, false_distance);
// Check for undetectable objects => true.
- __ movq(input, FieldOperand(input, HeapObject::kMapOffset));
+ __ movp(input, FieldOperand(input, HeapObject::kMapOffset));
__ testb(FieldOperand(input, Map::kBitFieldOffset),
Immediate(1 << Map::kIsUndetectable));
final_branch_condition = not_zero;
- } else if (type_name->Equals(heap()->function_string())) {
+ } else if (String::Equals(type_name, factory->function_string())) {
STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
__ JumpIfSmi(input, false_label, false_distance);
__ CmpObjectType(input, JS_FUNCTION_TYPE, input);
@@ -5399,7 +5525,7 @@ Condition LCodeGen::EmitTypeofIs(LTypeofIsAndBranch* instr, Register input) {
__ CmpInstanceType(input, JS_FUNCTION_PROXY_TYPE);
final_branch_condition = equal;
- } else if (type_name->Equals(heap()->object_string())) {
+ } else if (String::Equals(type_name, factory->object_string())) {
__ JumpIfSmi(input, false_label, false_distance);
if (!FLAG_harmony_typeof) {
__ CompareRoot(input, Heap::kNullValueRootIndex);
@@ -5432,14 +5558,14 @@ void LCodeGen::DoIsConstructCallAndBranch(LIsConstructCallAndBranch* instr) {
void LCodeGen::EmitIsConstructCall(Register temp) {
// Get the frame pointer for the calling frame.
- __ movq(temp, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
+ __ movp(temp, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
// Skip the arguments adaptor frame if it exists.
Label check_frame_marker;
__ Cmp(Operand(temp, StandardFrameConstants::kContextOffset),
Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
__ j(not_equal, &check_frame_marker, Label::kNear);
- __ movq(temp, Operand(temp, StandardFrameConstants::kCallerFPOffset));
+ __ movp(temp, Operand(temp, StandardFrameConstants::kCallerFPOffset));
// Check the marker in the calling frame.
__ bind(&check_frame_marker);
@@ -5449,19 +5575,20 @@ void LCodeGen::EmitIsConstructCall(Register temp) {
void LCodeGen::EnsureSpaceForLazyDeopt(int space_needed) {
- if (info()->IsStub()) return;
- // Ensure that we have enough space after the previous lazy-bailout
- // instruction for patching the code here.
- int current_pc = masm()->pc_offset();
- if (current_pc < last_lazy_deopt_pc_ + space_needed) {
- int padding_size = last_lazy_deopt_pc_ + space_needed - current_pc;
- __ Nop(padding_size);
+ if (!info()->IsStub()) {
+ // Ensure that we have enough space after the previous lazy-bailout
+ // instruction for patching the code here.
+ int current_pc = masm()->pc_offset();
+ if (current_pc < last_lazy_deopt_pc_ + space_needed) {
+ int padding_size = last_lazy_deopt_pc_ + space_needed - current_pc;
+ __ Nop(padding_size);
+ }
}
+ last_lazy_deopt_pc_ = masm()->pc_offset();
}
void LCodeGen::DoLazyBailout(LLazyBailout* instr) {
- EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
last_lazy_deopt_pc_ = masm()->pc_offset();
ASSERT(instr->HasEnvironment());
LEnvironment* env = instr->environment();
@@ -5497,8 +5624,8 @@ void LCodeGen::DoDummyUse(LDummyUse* instr) {
void LCodeGen::DoDeferredStackCheck(LStackCheck* instr) {
PushSafepointRegistersScope scope(this);
- __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
- __ CallRuntimeSaveDoubles(Runtime::kStackGuard);
+ __ movp(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
+ __ CallRuntimeSaveDoubles(Runtime::kHiddenStackGuard);
RecordSafepointWithLazyDeopt(instr, RECORD_SAFEPOINT_WITH_REGISTERS, 0);
ASSERT(instr->HasEnvironment());
LEnvironment* env = instr->environment();
@@ -5534,11 +5661,7 @@ void LCodeGen::DoStackCheck(LStackCheck* instr) {
CallCode(isolate()->builtins()->StackCheck(),
RelocInfo::CODE_TARGET,
instr);
- EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
- last_lazy_deopt_pc_ = masm()->pc_offset();
__ bind(&done);
- RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
- safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
} else {
ASSERT(instr->hydrogen()->is_backwards_branch());
// Perform stack overflow check if this goto needs it before jumping.
@@ -5547,7 +5670,6 @@ void LCodeGen::DoStackCheck(LStackCheck* instr) {
__ CompareRoot(rsp, Heap::kStackLimitRootIndex);
__ j(below, deferred_stack_check->entry());
EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
- last_lazy_deopt_pc_ = masm()->pc_offset();
__ bind(instr->done_label());
deferred_stack_check->SetExit(instr->done_label());
RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
@@ -5580,7 +5702,7 @@ void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) {
Register null_value = rdi;
__ LoadRoot(null_value, Heap::kNullValueRootIndex);
- __ cmpq(rax, null_value);
+ __ cmpp(rax, null_value);
DeoptimizeIf(equal, instr->environment());
Condition cc = masm()->CheckSmi(rax);
@@ -5593,12 +5715,12 @@ void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) {
Label use_cache, call_runtime;
__ CheckEnumCache(null_value, &call_runtime);
- __ movq(rax, FieldOperand(rax, HeapObject::kMapOffset));
+ __ movp(rax, FieldOperand(rax, HeapObject::kMapOffset));
__ jmp(&use_cache, Label::kNear);
// Get the set of properties to enumerate.
__ bind(&call_runtime);
- __ push(rax);
+ __ Push(rax);
CallRuntime(Runtime::kGetPropertyNamesFast, 1, instr);
__ CompareRoot(FieldOperand(rax, HeapObject::kMapOffset),
@@ -5619,9 +5741,9 @@ void LCodeGen::DoForInCacheArray(LForInCacheArray* instr) {
__ jmp(&done, Label::kNear);
__ bind(&load_cache);
__ LoadInstanceDescriptors(map, result);
- __ movq(result,
+ __ movp(result,
FieldOperand(result, DescriptorArray::kEnumCacheOffset));
- __ movq(result,
+ __ movp(result,
FieldOperand(result, FixedArray::SizeFor(instr->idx())));
__ bind(&done);
Condition cc = masm()->CheckSmi(result);
@@ -5631,38 +5753,98 @@ void LCodeGen::DoForInCacheArray(LForInCacheArray* instr) {
void LCodeGen::DoCheckMapValue(LCheckMapValue* instr) {
Register object = ToRegister(instr->value());
- __ cmpq(ToRegister(instr->map()),
+ __ cmpp(ToRegister(instr->map()),
FieldOperand(object, HeapObject::kMapOffset));
DeoptimizeIf(not_equal, instr->environment());
}
+void LCodeGen::DoDeferredLoadMutableDouble(LLoadFieldByIndex* instr,
+ Register object,
+ Register index) {
+ PushSafepointRegistersScope scope(this);
+ __ Push(object);
+ __ Push(index);
+ __ xorp(rsi, rsi);
+ __ CallRuntimeSaveDoubles(Runtime::kLoadMutableDouble);
+ RecordSafepointWithRegisters(
+ instr->pointer_map(), 2, Safepoint::kNoLazyDeopt);
+ __ StoreToSafepointRegisterSlot(object, rax);
+}
+
+
void LCodeGen::DoLoadFieldByIndex(LLoadFieldByIndex* instr) {
+ class DeferredLoadMutableDouble V8_FINAL : public LDeferredCode {
+ public:
+ DeferredLoadMutableDouble(LCodeGen* codegen,
+ LLoadFieldByIndex* instr,
+ Register object,
+ Register index)
+ : LDeferredCode(codegen),
+ instr_(instr),
+ object_(object),
+ index_(index) {
+ }
+ virtual void Generate() V8_OVERRIDE {
+ codegen()->DoDeferredLoadMutableDouble(instr_, object_, index_);
+ }
+ virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
+ private:
+ LLoadFieldByIndex* instr_;
+ Register object_;
+ Register index_;
+ };
+
Register object = ToRegister(instr->object());
Register index = ToRegister(instr->index());
+ DeferredLoadMutableDouble* deferred;
+ deferred = new(zone()) DeferredLoadMutableDouble(this, instr, object, index);
+
Label out_of_object, done;
+ __ Move(kScratchRegister, Smi::FromInt(1));
+ __ testp(index, kScratchRegister);
+ __ j(not_zero, deferred->entry());
+
+ __ sarp(index, Immediate(1));
+
__ SmiToInteger32(index, index);
__ cmpl(index, Immediate(0));
__ j(less, &out_of_object, Label::kNear);
- __ movq(object, FieldOperand(object,
+ __ movp(object, FieldOperand(object,
index,
times_pointer_size,
JSObject::kHeaderSize));
__ jmp(&done, Label::kNear);
__ bind(&out_of_object);
- __ movq(object, FieldOperand(object, JSObject::kPropertiesOffset));
+ __ movp(object, FieldOperand(object, JSObject::kPropertiesOffset));
__ negl(index);
// Index is now equal to out of object property index plus 1.
- __ movq(object, FieldOperand(object,
+ __ movp(object, FieldOperand(object,
index,
times_pointer_size,
FixedArray::kHeaderSize - kPointerSize));
+ __ bind(deferred->exit());
__ bind(&done);
}
+void LCodeGen::DoStoreFrameContext(LStoreFrameContext* instr) {
+ Register context = ToRegister(instr->context());
+ __ movp(Operand(rbp, StandardFrameConstants::kContextOffset), context);
+}
+
+
+void LCodeGen::DoAllocateBlockContext(LAllocateBlockContext* instr) {
+ Handle<ScopeInfo> scope_info = instr->scope_info();
+ __ Push(scope_info);
+ __ Push(ToRegister(instr->function()));
+ CallRuntime(Runtime::kHiddenPushBlockContext, 2, instr);
+ RecordSafepoint(Safepoint::kNoLazyDeopt);
+}
+
+
#undef __
} } // namespace v8::internal
diff --git a/chromium/v8/src/x64/lithium-codegen-x64.h b/chromium/v8/src/x64/lithium-codegen-x64.h
index 63bfe187f14..5621a3d367b 100644
--- a/chromium/v8/src/x64/lithium-codegen-x64.h
+++ b/chromium/v8/src/x64/lithium-codegen-x64.h
@@ -1,42 +1,19 @@
// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
#ifndef V8_X64_LITHIUM_CODEGEN_X64_H_
#define V8_X64_LITHIUM_CODEGEN_X64_H_
-#include "x64/lithium-x64.h"
+#include "src/x64/lithium-x64.h"
-#include "checks.h"
-#include "deoptimizer.h"
-#include "lithium-codegen.h"
-#include "safepoint-table.h"
-#include "scopes.h"
-#include "v8utils.h"
-#include "x64/lithium-gap-resolver-x64.h"
+#include "src/checks.h"
+#include "src/deoptimizer.h"
+#include "src/lithium-codegen.h"
+#include "src/safepoint-table.h"
+#include "src/scopes.h"
+#include "src/utils.h"
+#include "src/x64/lithium-gap-resolver-x64.h"
namespace v8 {
namespace internal {
@@ -86,12 +63,13 @@ class LCodeGen: public LCodeGenBase {
Register ToRegister(LOperand* op) const;
XMMRegister ToDoubleRegister(LOperand* op) const;
bool IsInteger32Constant(LConstantOperand* op) const;
+ bool IsDehoistedKeyConstant(LConstantOperand* op) const;
bool IsSmiConstant(LConstantOperand* op) const;
+ int32_t ToRepresentation(LConstantOperand* op, const Representation& r) const;
int32_t ToInteger32(LConstantOperand* op) const;
Smi* ToSmi(LConstantOperand* op) const;
double ToDouble(LConstantOperand* op) const;
ExternalReference ToExternalReference(LConstantOperand* op) const;
- bool IsTaggedConstant(LConstantOperand* op) const;
Handle<Object> ToHandle(LConstantOperand* op) const;
Operand ToOperand(LOperand* op) const;
@@ -106,7 +84,14 @@ class LCodeGen: public LCodeGenBase {
// Deferred code support.
void DoDeferredNumberTagD(LNumberTagD* instr);
- void DoDeferredNumberTagU(LNumberTagU* instr);
+
+ enum IntegerSignedness { SIGNED_INT32, UNSIGNED_INT32 };
+ void DoDeferredNumberTagIU(LInstruction* instr,
+ LOperand* value,
+ LOperand* temp1,
+ LOperand* temp2,
+ IntegerSignedness signedness);
+
void DoDeferredTaggedToI(LTaggedToI* instr, Label* done);
void DoDeferredMathAbsTaggedHeapNumber(LMathAbs* instr);
void DoDeferredStackCheck(LStackCheck* instr);
@@ -116,6 +101,9 @@ class LCodeGen: public LCodeGenBase {
void DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
Label* map_check);
void DoDeferredInstanceMigration(LCheckMaps* instr, Register object);
+ void DoDeferredLoadMutableDouble(LLoadFieldByIndex* instr,
+ Register object,
+ Register index);
// Parallel move support.
void DoParallelMove(LParallelMove* move);
@@ -130,9 +118,7 @@ class LCodeGen: public LCodeGenBase {
#undef DECLARE_DO
private:
- StrictModeFlag strict_mode_flag() const {
- return info()->is_classic_mode() ? kNonStrictMode : kStrictMode;
- }
+ StrictMode strict_mode() const { return info()->strict_mode(); }
LPlatformChunk* chunk() const { return chunk_; }
Scope* scope() const { return scope_; }
@@ -149,8 +135,6 @@ class LCodeGen: public LCodeGenBase {
int GetStackSlotCount() const { return chunk()->spill_slot_count(); }
- void Abort(BailoutReason reason);
-
void AddDeferredCode(LDeferredCode* code) { deferred_.Add(code, zone()); }
@@ -160,6 +144,7 @@ class LCodeGen: public LCodeGenBase {
// Code generation passes. Returns true if code generation should
// continue.
void GenerateBodyInstructionPre(LInstruction* instr) V8_OVERRIDE;
+ void GenerateBodyInstructionPost(LInstruction* instr) V8_OVERRIDE;
bool GeneratePrologue();
bool GenerateDeferredCode();
bool GenerateJumpTable();
@@ -214,7 +199,6 @@ class LCodeGen: public LCodeGenBase {
int formal_parameter_count,
int arity,
LInstruction* instr,
- CallKind call_kind,
RDIState rdi_state);
void RecordSafepointWithLazyDeopt(LInstruction* instr,
@@ -226,7 +210,6 @@ class LCodeGen: public LCodeGenBase {
LEnvironment* environment,
Deoptimizer::BailoutType bailout_type);
void DeoptimizeIf(Condition cc, LEnvironment* environment);
- void ApplyCheckIf(Condition cc, LBoundsCheck* check);
bool DeoptEveryNTimes() {
return FLAG_deopt_every_n_times != 0 && !info()->IsStub();
@@ -239,7 +222,6 @@ class LCodeGen: public LCodeGenBase {
bool is_uint32,
int* object_index_pointer,
int* dematerialized_index_pointer);
- void RegisterDependentCodeForEmbeddedMaps(Handle<Code> code);
void PopulateDeoptimizationData(Handle<Code> code);
int DefineDeoptimizationLiteral(Handle<Object> literal);
@@ -250,9 +232,9 @@ class LCodeGen: public LCodeGenBase {
Operand BuildFastArrayOperand(
LOperand* elements_pointer,
LOperand* key,
+ Representation key_representation,
ElementsKind elements_kind,
- uint32_t offset,
- uint32_t additional_index = 0);
+ uint32_t base_offset);
Operand BuildSeqStringOperand(Register string,
LOperand* index,
diff --git a/chromium/v8/src/x64/lithium-gap-resolver-x64.cc b/chromium/v8/src/x64/lithium-gap-resolver-x64.cc
index 6059c50b726..93c1512625a 100644
--- a/chromium/v8/src/x64/lithium-gap-resolver-x64.cc
+++ b/chromium/v8/src/x64/lithium-gap-resolver-x64.cc
@@ -1,36 +1,13 @@
// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
#if V8_TARGET_ARCH_X64
-#include "x64/lithium-gap-resolver-x64.h"
-#include "x64/lithium-codegen-x64.h"
+#include "src/x64/lithium-gap-resolver-x64.h"
+#include "src/x64/lithium-codegen-x64.h"
namespace v8 {
namespace internal {
@@ -172,23 +149,23 @@ void LGapResolver::EmitMove(int index) {
Register src = cgen_->ToRegister(source);
if (destination->IsRegister()) {
Register dst = cgen_->ToRegister(destination);
- __ movq(dst, src);
+ __ movp(dst, src);
} else {
ASSERT(destination->IsStackSlot());
Operand dst = cgen_->ToOperand(destination);
- __ movq(dst, src);
+ __ movp(dst, src);
}
} else if (source->IsStackSlot()) {
Operand src = cgen_->ToOperand(source);
if (destination->IsRegister()) {
Register dst = cgen_->ToRegister(destination);
- __ movq(dst, src);
+ __ movp(dst, src);
} else {
ASSERT(destination->IsStackSlot());
Operand dst = cgen_->ToOperand(destination);
- __ movq(kScratchRegister, src);
- __ movq(dst, kScratchRegister);
+ __ movp(kScratchRegister, src);
+ __ movp(dst, kScratchRegister);
}
} else if (source->IsConstantOperand()) {
@@ -198,7 +175,14 @@ void LGapResolver::EmitMove(int index) {
if (cgen_->IsSmiConstant(constant_source)) {
__ Move(dst, cgen_->ToSmi(constant_source));
} else if (cgen_->IsInteger32Constant(constant_source)) {
- __ movl(dst, Immediate(cgen_->ToInteger32(constant_source)));
+ int32_t constant = cgen_->ToInteger32(constant_source);
+ // Do sign extension only for constant used as de-hoisted array key.
+ // Others only need zero extension, which saves 2 bytes.
+ if (cgen_->IsDehoistedKeyConstant(constant_source)) {
+ __ Set(dst, constant);
+ } else {
+ __ Set(dst, static_cast<uint32_t>(constant));
+ }
} else {
__ Move(dst, cgen_->ToHandle(constant_source));
}
@@ -218,12 +202,11 @@ void LGapResolver::EmitMove(int index) {
if (cgen_->IsSmiConstant(constant_source)) {
__ Move(dst, cgen_->ToSmi(constant_source));
} else if (cgen_->IsInteger32Constant(constant_source)) {
- // Zero top 32 bits of a 64 bit spill slot that holds a 32 bit untagged
- // value.
- __ movq(dst, Immediate(cgen_->ToInteger32(constant_source)));
+ // Do sign extension to 64 bits when stored into stack slot.
+ __ movp(dst, Immediate(cgen_->ToInteger32(constant_source)));
} else {
__ Move(kScratchRegister, cgen_->ToHandle(constant_source));
- __ movq(dst, kScratchRegister);
+ __ movp(dst, kScratchRegister);
}
}
@@ -271,9 +254,9 @@ void LGapResolver::EmitSwap(int index) {
cgen_->ToRegister(source->IsRegister() ? source : destination);
Operand mem =
cgen_->ToOperand(source->IsRegister() ? destination : source);
- __ movq(kScratchRegister, mem);
- __ movq(mem, reg);
- __ movq(reg, kScratchRegister);
+ __ movp(kScratchRegister, mem);
+ __ movp(mem, reg);
+ __ movp(reg, kScratchRegister);
} else if ((source->IsStackSlot() && destination->IsStackSlot()) ||
(source->IsDoubleStackSlot() && destination->IsDoubleStackSlot())) {
@@ -281,9 +264,9 @@ void LGapResolver::EmitSwap(int index) {
Operand src = cgen_->ToOperand(source);
Operand dst = cgen_->ToOperand(destination);
__ movsd(xmm0, src);
- __ movq(kScratchRegister, dst);
+ __ movp(kScratchRegister, dst);
__ movsd(dst, xmm0);
- __ movq(src, kScratchRegister);
+ __ movp(src, kScratchRegister);
} else if (source->IsDoubleRegister() && destination->IsDoubleRegister()) {
// Swap two double registers.
diff --git a/chromium/v8/src/x64/lithium-gap-resolver-x64.h b/chromium/v8/src/x64/lithium-gap-resolver-x64.h
index f218455b675..fd4b91ab348 100644
--- a/chromium/v8/src/x64/lithium-gap-resolver-x64.h
+++ b/chromium/v8/src/x64/lithium-gap-resolver-x64.h
@@ -1,36 +1,13 @@
// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
#ifndef V8_X64_LITHIUM_GAP_RESOLVER_X64_H_
#define V8_X64_LITHIUM_GAP_RESOLVER_X64_H_
-#include "v8.h"
+#include "src/v8.h"
-#include "lithium.h"
+#include "src/lithium.h"
namespace v8 {
namespace internal {
diff --git a/chromium/v8/src/x64/lithium-x64.cc b/chromium/v8/src/x64/lithium-x64.cc
index 449eb2b6a11..325f2c0da30 100644
--- a/chromium/v8/src/x64/lithium-x64.cc
+++ b/chromium/v8/src/x64/lithium-x64.cc
@@ -1,38 +1,15 @@
// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
#if V8_TARGET_ARCH_X64
-#include "lithium-allocator-inl.h"
-#include "x64/lithium-x64.h"
-#include "x64/lithium-codegen-x64.h"
-#include "hydrogen-osr.h"
+#include "src/lithium-allocator-inl.h"
+#include "src/x64/lithium-x64.h"
+#include "src/x64/lithium-codegen-x64.h"
+#include "src/hydrogen-osr.h"
namespace v8 {
namespace internal {
@@ -175,6 +152,16 @@ bool LGoto::HasInterestingComment(LCodeGen* gen) const {
}
+template<int R>
+bool LTemplateResultInstruction<R>::MustSignExtendResult(
+ LPlatformChunk* chunk) const {
+ HValue* hvalue = this->hydrogen_value();
+ return hvalue != NULL &&
+ hvalue->representation().IsInteger32() &&
+ chunk->GetDehoistedKeyIds()->Contains(hvalue->id());
+}
+
+
void LGoto::PrintDataTo(StringStream* stream) {
stream->Add("B%d", block_id());
}
@@ -259,7 +246,7 @@ void LTypeofIsAndBranch::PrintDataTo(StringStream* stream) {
stream->Add("if typeof ");
value()->PrintTo(stream);
stream->Add(" == \"%s\" then B%d else B%d",
- *hydrogen()->type_literal()->ToCString(),
+ hydrogen()->type_literal()->ToCString().get(),
true_block_id(), false_block_id());
}
@@ -280,7 +267,18 @@ void LInnerAllocatedObject::PrintDataTo(StringStream* stream) {
}
-void LCallConstantFunction::PrintDataTo(StringStream* stream) {
+void LCallJSFunction::PrintDataTo(StringStream* stream) {
+ stream->Add("= ");
+ function()->PrintTo(stream);
+ stream->Add("#%d / ", arity());
+}
+
+
+void LCallWithDescriptor::PrintDataTo(StringStream* stream) {
+ for (int i = 0; i < InputCount(); i++) {
+ InputAt(i)->PrintTo(stream);
+ stream->Add(" ");
+ }
stream->Add("#%d / ", arity());
}
@@ -305,28 +303,6 @@ void LInvokeFunction::PrintDataTo(StringStream* stream) {
}
-void LCallKeyed::PrintDataTo(StringStream* stream) {
- stream->Add("[rcx] #%d / ", arity());
-}
-
-
-void LCallNamed::PrintDataTo(StringStream* stream) {
- SmartArrayPointer<char> name_string = name()->ToCString();
- stream->Add("%s #%d / ", *name_string, arity());
-}
-
-
-void LCallGlobal::PrintDataTo(StringStream* stream) {
- SmartArrayPointer<char> name_string = name()->ToCString();
- stream->Add("%s #%d / ", *name_string, arity());
-}
-
-
-void LCallKnownGlobal::PrintDataTo(StringStream* stream) {
- stream->Add("#%d / ", arity());
-}
-
-
void LCallNew::PrintDataTo(StringStream* stream) {
stream->Add("= ");
constructor()->PrintTo(stream);
@@ -384,7 +360,7 @@ void LStoreNamedField::PrintDataTo(StringStream* stream) {
void LStoreNamedGeneric::PrintDataTo(StringStream* stream) {
object()->PrintTo(stream);
stream->Add(".");
- stream->Add(*String::cast(*name())->ToCString());
+ stream->Add(String::cast(*name())->ToCString().get());
stream->Add(" <- ");
value()->PrintTo(stream);
}
@@ -395,7 +371,7 @@ void LLoadKeyed::PrintDataTo(StringStream* stream) {
stream->Add("[");
key()->PrintTo(stream);
if (hydrogen()->IsDehoisted()) {
- stream->Add(" + %d]", additional_index());
+ stream->Add(" + %d]", base_offset());
} else {
stream->Add("]");
}
@@ -407,7 +383,7 @@ void LStoreKeyed::PrintDataTo(StringStream* stream) {
stream->Add("[");
key()->PrintTo(stream);
if (hydrogen()->IsDehoisted()) {
- stream->Add(" + %d] <-", additional_index());
+ stream->Add(" + %d] <-", base_offset());
} else {
stream->Add("] <- ");
}
@@ -463,7 +439,7 @@ LPlatformChunk* LChunkBuilder::Build() {
}
-void LCodeGen::Abort(BailoutReason reason) {
+void LChunkBuilder::Abort(BailoutReason reason) {
info()->set_bailout_reason(reason);
status_ = ABORTED;
}
@@ -508,6 +484,13 @@ LOperand* LChunkBuilder::UseTempRegister(HValue* value) {
}
+LOperand* LChunkBuilder::UseTempRegisterOrConstant(HValue* value) {
+ return value->IsConstant()
+ ? chunk_->DefineConstantOperand(HConstant::cast(value))
+ : UseTempRegister(value);
+}
+
+
LOperand* LChunkBuilder::Use(HValue* value) {
return Use(value, new(zone()) LUnallocated(LUnallocated::NONE));
}
@@ -569,8 +552,7 @@ LOperand* LChunkBuilder::Use(HValue* value, LUnallocated* operand) {
}
-template<int I, int T>
-LInstruction* LChunkBuilder::Define(LTemplateInstruction<1, I, T>* instr,
+LInstruction* LChunkBuilder::Define(LTemplateResultInstruction<1>* instr,
LUnallocated* result) {
result->set_virtual_register(current_instruction_->id());
instr->set_result(result);
@@ -578,41 +560,36 @@ LInstruction* LChunkBuilder::Define(LTemplateInstruction<1, I, T>* instr,
}
-template<int I, int T>
LInstruction* LChunkBuilder::DefineAsRegister(
- LTemplateInstruction<1, I, T>* instr) {
+ LTemplateResultInstruction<1>* instr) {
return Define(instr,
new(zone()) LUnallocated(LUnallocated::MUST_HAVE_REGISTER));
}
-template<int I, int T>
LInstruction* LChunkBuilder::DefineAsSpilled(
- LTemplateInstruction<1, I, T>* instr,
+ LTemplateResultInstruction<1>* instr,
int index) {
return Define(instr,
new(zone()) LUnallocated(LUnallocated::FIXED_SLOT, index));
}
-template<int I, int T>
LInstruction* LChunkBuilder::DefineSameAsFirst(
- LTemplateInstruction<1, I, T>* instr) {
+ LTemplateResultInstruction<1>* instr) {
return Define(instr,
new(zone()) LUnallocated(LUnallocated::SAME_AS_FIRST_INPUT));
}
-template<int I, int T>
-LInstruction* LChunkBuilder::DefineFixed(LTemplateInstruction<1, I, T>* instr,
+LInstruction* LChunkBuilder::DefineFixed(LTemplateResultInstruction<1>* instr,
Register reg) {
return Define(instr, ToUnallocated(reg));
}
-template<int I, int T>
LInstruction* LChunkBuilder::DefineFixedDouble(
- LTemplateInstruction<1, I, T>* instr,
+ LTemplateResultInstruction<1>* instr,
XMMRegister reg) {
return Define(instr, ToUnallocated(reg));
}
@@ -649,6 +626,8 @@ LInstruction* LChunkBuilder::MarkAsCall(LInstruction* instr,
!hinstr->HasObservableSideEffects();
if (needs_environment && !instr->HasEnvironment()) {
instr = AssignEnvironment(instr);
+ // We can't really figure out if the environment is needed or not.
+ instr->environment()->set_has_been_used();
}
return instr;
@@ -720,17 +699,23 @@ LInstruction* LChunkBuilder::DoShift(Token::Value op,
HValue* right_value = instr->right();
LOperand* right = NULL;
int constant_value = 0;
+ bool does_deopt = false;
if (right_value->IsConstant()) {
HConstant* constant = HConstant::cast(right_value);
right = chunk_->DefineConstantOperand(constant);
constant_value = constant->Integer32Value() & 0x1f;
+ if (SmiValuesAre31Bits() && instr->representation().IsSmi() &&
+ constant_value > 0) {
+ // Left shift can deoptimize if we shift by > 0 and the result
+ // cannot be truncated to smi.
+ does_deopt = !instr->CheckUsesForFlag(HValue::kTruncatingToSmi);
+ }
} else {
right = UseFixed(right_value, rcx);
}
// Shift operations can only deoptimize if we do a logical shift by 0 and
// the result cannot be truncated to int32.
- bool does_deopt = false;
if (op == Token::SHR && constant_value == 0) {
if (FLAG_opt_safe_uint32_operations) {
does_deopt = !instr->CheckFlag(HInstruction::kUint32);
@@ -858,169 +843,102 @@ void LChunkBuilder::VisitInstruction(HInstruction* current) {
if (current->OperandCount() == 0) {
instr = DefineAsRegister(new(zone()) LDummy());
} else {
+ ASSERT(!current->OperandAt(0)->IsControlInstruction());
instr = DefineAsRegister(new(zone())
LDummyUse(UseAny(current->OperandAt(0))));
}
for (int i = 1; i < current->OperandCount(); ++i) {
+ if (current->OperandAt(i)->IsControlInstruction()) continue;
LInstruction* dummy =
new(zone()) LDummyUse(UseAny(current->OperandAt(i)));
dummy->set_hydrogen_value(current);
chunk_->AddInstruction(dummy, current_block_);
}
} else {
- instr = current->CompileToLithium(this);
+ HBasicBlock* successor;
+ if (current->IsControlInstruction() &&
+ HControlInstruction::cast(current)->KnownSuccessorBlock(&successor) &&
+ successor != NULL) {
+ instr = new(zone()) LGoto(successor);
+ } else {
+ instr = current->CompileToLithium(this);
+ }
}
argument_count_ += current->argument_delta();
ASSERT(argument_count_ >= 0);
if (instr != NULL) {
- // Associate the hydrogen instruction first, since we may need it for
- // the ClobbersRegisters() or ClobbersDoubleRegisters() calls below.
- instr->set_hydrogen_value(current);
-
-#if DEBUG
- // Make sure that the lithium instruction has either no fixed register
- // constraints in temps or the result OR no uses that are only used at
- // start. If this invariant doesn't hold, the register allocator can decide
- // to insert a split of a range immediately before the instruction due to an
- // already allocated register needing to be used for the instruction's fixed
- // register constraint. In this case, The register allocator won't see an
- // interference between the split child and the use-at-start (it would if
- // the it was just a plain use), so it is free to move the split child into
- // the same register that is used for the use-at-start.
- // See https://code.google.com/p/chromium/issues/detail?id=201590
- if (!(instr->ClobbersRegisters() && instr->ClobbersDoubleRegisters())) {
- int fixed = 0;
- int used_at_start = 0;
- for (UseIterator it(instr); !it.Done(); it.Advance()) {
- LUnallocated* operand = LUnallocated::cast(it.Current());
- if (operand->IsUsedAtStart()) ++used_at_start;
- }
- if (instr->Output() != NULL) {
- if (LUnallocated::cast(instr->Output())->HasFixedPolicy()) ++fixed;
- }
- for (TempIterator it(instr); !it.Done(); it.Advance()) {
- LUnallocated* operand = LUnallocated::cast(it.Current());
- if (operand->HasFixedPolicy()) ++fixed;
- }
- ASSERT(fixed == 0 || used_at_start == 0);
- }
-#endif
-
- if (FLAG_stress_pointer_maps && !instr->HasPointerMap()) {
- instr = AssignPointerMap(instr);
- }
- if (FLAG_stress_environments && !instr->HasEnvironment()) {
- instr = AssignEnvironment(instr);
- }
- chunk_->AddInstruction(instr, current_block_);
-
- if (instr->IsCall()) {
- HValue* hydrogen_value_for_lazy_bailout = current;
- LInstruction* instruction_needing_environment = NULL;
- if (current->HasObservableSideEffects()) {
- HSimulate* sim = HSimulate::cast(current->next());
- instruction_needing_environment = instr;
- sim->ReplayEnvironment(current_block_->last_environment());
- hydrogen_value_for_lazy_bailout = sim;
- }
- LInstruction* bailout = AssignEnvironment(new(zone()) LLazyBailout());
- bailout->set_hydrogen_value(hydrogen_value_for_lazy_bailout);
- chunk_->AddInstruction(bailout, current_block_);
- if (instruction_needing_environment != NULL) {
- // Store the lazy deopt environment with the instruction if needed.
- // Right now it is only used for LInstanceOfKnownGlobal.
- instruction_needing_environment->
- SetDeferredLazyDeoptimizationEnvironment(bailout->environment());
- }
- }
+ AddInstruction(instr, current);
}
+
current_instruction_ = old_current;
}
-LEnvironment* LChunkBuilder::CreateEnvironment(
- HEnvironment* hydrogen_env,
- int* argument_index_accumulator,
- ZoneList<HValue*>* objects_to_materialize) {
- if (hydrogen_env == NULL) return NULL;
-
- LEnvironment* outer = CreateEnvironment(hydrogen_env->outer(),
- argument_index_accumulator,
- objects_to_materialize);
- BailoutId ast_id = hydrogen_env->ast_id();
- ASSERT(!ast_id.IsNone() ||
- hydrogen_env->frame_type() != JS_FUNCTION);
- int value_count = hydrogen_env->length() - hydrogen_env->specials_count();
- LEnvironment* result = new(zone()) LEnvironment(
- hydrogen_env->closure(),
- hydrogen_env->frame_type(),
- ast_id,
- hydrogen_env->parameter_count(),
- argument_count_,
- value_count,
- outer,
- hydrogen_env->entry(),
- zone());
- int argument_index = *argument_index_accumulator;
- int object_index = objects_to_materialize->length();
- for (int i = 0; i < hydrogen_env->length(); ++i) {
- if (hydrogen_env->is_special_index(i)) continue;
-
- LOperand* op;
- HValue* value = hydrogen_env->values()->at(i);
- if (value->IsArgumentsObject() || value->IsCapturedObject()) {
- objects_to_materialize->Add(value, zone());
- op = LEnvironment::materialization_marker();
- } else if (value->IsPushArgument()) {
- op = new(zone()) LArgument(argument_index++);
- } else {
- op = UseAny(value);
- }
- result->AddValue(op,
- value->representation(),
- value->CheckFlag(HInstruction::kUint32));
- }
-
- for (int i = object_index; i < objects_to_materialize->length(); ++i) {
- HValue* object_to_materialize = objects_to_materialize->at(i);
- int previously_materialized_object = -1;
- for (int prev = 0; prev < i; ++prev) {
- if (objects_to_materialize->at(prev) == objects_to_materialize->at(i)) {
- previously_materialized_object = prev;
- break;
- }
+void LChunkBuilder::AddInstruction(LInstruction* instr,
+ HInstruction* hydrogen_val) {
+ // Associate the hydrogen instruction first, since we may need it for
+ // the ClobbersRegisters() or ClobbersDoubleRegisters() calls below.
+ instr->set_hydrogen_value(hydrogen_val);
+
+#if DEBUG
+ // Make sure that the lithium instruction has either no fixed register
+ // constraints in temps or the result OR no uses that are only used at
+ // start. If this invariant doesn't hold, the register allocator can decide
+ // to insert a split of a range immediately before the instruction due to an
+ // already allocated register needing to be used for the instruction's fixed
+ // register constraint. In this case, The register allocator won't see an
+ // interference between the split child and the use-at-start (it would if
+ // the it was just a plain use), so it is free to move the split child into
+ // the same register that is used for the use-at-start.
+ // See https://code.google.com/p/chromium/issues/detail?id=201590
+ if (!(instr->ClobbersRegisters() &&
+ instr->ClobbersDoubleRegisters(isolate()))) {
+ int fixed = 0;
+ int used_at_start = 0;
+ for (UseIterator it(instr); !it.Done(); it.Advance()) {
+ LUnallocated* operand = LUnallocated::cast(it.Current());
+ if (operand->IsUsedAtStart()) ++used_at_start;
}
- int length = object_to_materialize->OperandCount();
- bool is_arguments = object_to_materialize->IsArgumentsObject();
- if (previously_materialized_object >= 0) {
- result->AddDuplicateObject(previously_materialized_object);
- continue;
- } else {
- result->AddNewObject(is_arguments ? length - 1 : length, is_arguments);
+ if (instr->Output() != NULL) {
+ if (LUnallocated::cast(instr->Output())->HasFixedPolicy()) ++fixed;
}
- for (int i = is_arguments ? 1 : 0; i < length; ++i) {
- LOperand* op;
- HValue* value = object_to_materialize->OperandAt(i);
- if (value->IsArgumentsObject() || value->IsCapturedObject()) {
- objects_to_materialize->Add(value, zone());
- op = LEnvironment::materialization_marker();
- } else {
- ASSERT(!value->IsPushArgument());
- op = UseAny(value);
- }
- result->AddValue(op,
- value->representation(),
- value->CheckFlag(HInstruction::kUint32));
+ for (TempIterator it(instr); !it.Done(); it.Advance()) {
+ LUnallocated* operand = LUnallocated::cast(it.Current());
+ if (operand->HasFixedPolicy()) ++fixed;
}
+ ASSERT(fixed == 0 || used_at_start == 0);
}
+#endif
- if (hydrogen_env->frame_type() == JS_FUNCTION) {
- *argument_index_accumulator = argument_index;
+ if (FLAG_stress_pointer_maps && !instr->HasPointerMap()) {
+ instr = AssignPointerMap(instr);
+ }
+ if (FLAG_stress_environments && !instr->HasEnvironment()) {
+ instr = AssignEnvironment(instr);
}
+ chunk_->AddInstruction(instr, current_block_);
- return result;
+ if (instr->IsCall()) {
+ HValue* hydrogen_value_for_lazy_bailout = hydrogen_val;
+ LInstruction* instruction_needing_environment = NULL;
+ if (hydrogen_val->HasObservableSideEffects()) {
+ HSimulate* sim = HSimulate::cast(hydrogen_val->next());
+ instruction_needing_environment = instr;
+ sim->ReplayEnvironment(current_block_->last_environment());
+ hydrogen_value_for_lazy_bailout = sim;
+ }
+ LInstruction* bailout = AssignEnvironment(new(zone()) LLazyBailout());
+ bailout->set_hydrogen_value(hydrogen_value_for_lazy_bailout);
+ chunk_->AddInstruction(bailout, current_block_);
+ if (instruction_needing_environment != NULL) {
+ // Store the lazy deopt environment with the instruction if needed.
+ // Right now it is only used for LInstanceOfKnownGlobal.
+ instruction_needing_environment->
+ SetDeferredLazyDeoptimizationEnvironment(bailout->environment());
+ }
+ }
}
@@ -1035,22 +953,21 @@ LInstruction* LChunkBuilder::DoDebugBreak(HDebugBreak* instr) {
LInstruction* LChunkBuilder::DoBranch(HBranch* instr) {
- LInstruction* goto_instr = CheckElideControlInstruction(instr);
- if (goto_instr != NULL) return goto_instr;
-
HValue* value = instr->value();
- LBranch* result = new(zone()) LBranch(UseRegister(value));
- // Tagged values that are not known smis or booleans require a
- // deoptimization environment. If the instruction is generic no
- // environment is needed since all cases are handled.
- ToBooleanStub::Types expected = instr->expected_input_types();
- Representation rep = value->representation();
+ Representation r = value->representation();
HType type = value->type();
- if (rep.IsTagged() && !type.IsSmi() && !type.IsBoolean() &&
- !expected.IsGeneric()) {
- return AssignEnvironment(result);
+ ToBooleanStub::Types expected = instr->expected_input_types();
+ if (expected.IsEmpty()) expected = ToBooleanStub::Types::Generic();
+
+ bool easy_case = !r.IsTagged() || type.IsBoolean() || type.IsSmi() ||
+ type.IsJSArray() || type.IsHeapNumber() || type.IsString();
+ LInstruction* branch = new(zone()) LBranch(UseRegister(value));
+ if (!easy_case &&
+ ((!expected.Contains(ToBooleanStub::SMI) && expected.NeedsMap()) ||
+ !expected.IsGeneric())) {
+ branch = AssignEnvironment(branch);
}
- return result;
+ return branch;
}
@@ -1113,9 +1030,13 @@ LInstruction* LChunkBuilder::DoApplyArguments(HApplyArguments* instr) {
}
-LInstruction* LChunkBuilder::DoPushArgument(HPushArgument* instr) {
- LOperand* argument = UseOrConstant(instr->argument());
- return new(zone()) LPushArgument(argument);
+LInstruction* LChunkBuilder::DoPushArguments(HPushArguments* instr) {
+ int argc = instr->OperandCount();
+ for (int i = 0; i < argc; ++i) {
+ LOperand* argument = UseOrConstant(instr->argument(i));
+ AddInstruction(new(zone()) LPushArgument(argument), instr);
+ }
+ return NULL;
}
@@ -1154,33 +1075,38 @@ LInstruction* LChunkBuilder::DoContext(HContext* instr) {
}
-LInstruction* LChunkBuilder::DoOuterContext(HOuterContext* instr) {
- LOperand* context = UseRegisterAtStart(instr->value());
- return DefineAsRegister(new(zone()) LOuterContext(context));
-}
-
-
LInstruction* LChunkBuilder::DoDeclareGlobals(HDeclareGlobals* instr) {
LOperand* context = UseFixed(instr->context(), rsi);
return MarkAsCall(new(zone()) LDeclareGlobals(context), instr);
}
-LInstruction* LChunkBuilder::DoGlobalObject(HGlobalObject* instr) {
- LOperand* context = UseRegisterAtStart(instr->value());
- return DefineAsRegister(new(zone()) LGlobalObject(context));
-}
+LInstruction* LChunkBuilder::DoCallJSFunction(
+ HCallJSFunction* instr) {
+ LOperand* function = UseFixed(instr->function(), rdi);
+ LCallJSFunction* result = new(zone()) LCallJSFunction(function);
-LInstruction* LChunkBuilder::DoGlobalReceiver(HGlobalReceiver* instr) {
- LOperand* global_object = UseRegisterAtStart(instr->value());
- return DefineAsRegister(new(zone()) LGlobalReceiver(global_object));
+ return MarkAsCall(DefineFixed(result, rax), instr);
}
-LInstruction* LChunkBuilder::DoCallConstantFunction(
- HCallConstantFunction* instr) {
- return MarkAsCall(DefineFixed(new(zone()) LCallConstantFunction, rax), instr);
+LInstruction* LChunkBuilder::DoCallWithDescriptor(
+ HCallWithDescriptor* instr) {
+ const CallInterfaceDescriptor* descriptor = instr->descriptor();
+
+ LOperand* target = UseRegisterOrConstantAtStart(instr->target());
+ ZoneList<LOperand*> ops(instr->OperandCount(), zone());
+ ops.Add(target, zone());
+ for (int i = 1; i < instr->OperandCount(); i++) {
+ LOperand* op = UseFixed(instr->OperandAt(i),
+ descriptor->GetParameterRegister(i - 1));
+ ops.Add(op, zone());
+ }
+
+ LCallWithDescriptor* result = new(zone()) LCallWithDescriptor(
+ descriptor, ops, zone());
+ return MarkAsCall(DefineFixed(result, rax), instr);
}
@@ -1198,12 +1124,10 @@ LInstruction* LChunkBuilder::DoUnaryMathOperation(HUnaryMathOperation* instr) {
case kMathRound: return DoMathRound(instr);
case kMathAbs: return DoMathAbs(instr);
case kMathLog: return DoMathLog(instr);
- case kMathSin: return DoMathSin(instr);
- case kMathCos: return DoMathCos(instr);
- case kMathTan: return DoMathTan(instr);
case kMathExp: return DoMathExp(instr);
case kMathSqrt: return DoMathSqrt(instr);
case kMathPowHalf: return DoMathPowHalf(instr);
+ case kMathClz32: return DoMathClz32(instr);
default:
UNREACHABLE();
return NULL;
@@ -1229,8 +1153,12 @@ LInstruction* LChunkBuilder::DoMathRound(HUnaryMathOperation* instr) {
LInstruction* LChunkBuilder::DoMathAbs(HUnaryMathOperation* instr) {
LOperand* context = UseAny(instr->context());
LOperand* input = UseRegisterAtStart(instr->value());
- LMathAbs* result = new(zone()) LMathAbs(context, input);
- return AssignEnvironment(AssignPointerMap(DefineSameAsFirst(result)));
+ LInstruction* result =
+ DefineSameAsFirst(new(zone()) LMathAbs(context, input));
+ Representation r = instr->value()->representation();
+ if (!r.IsDouble() && !r.IsSmiOrInteger32()) result = AssignPointerMap(result);
+ if (!r.IsDouble()) result = AssignEnvironment(result);
+ return result;
}
@@ -1238,29 +1166,14 @@ LInstruction* LChunkBuilder::DoMathLog(HUnaryMathOperation* instr) {
ASSERT(instr->representation().IsDouble());
ASSERT(instr->value()->representation().IsDouble());
LOperand* input = UseRegisterAtStart(instr->value());
- LMathLog* result = new(zone()) LMathLog(input);
- return DefineSameAsFirst(result);
+ return MarkAsCall(DefineSameAsFirst(new(zone()) LMathLog(input)), instr);
}
-LInstruction* LChunkBuilder::DoMathSin(HUnaryMathOperation* instr) {
- LOperand* input = UseFixedDouble(instr->value(), xmm1);
- LMathSin* result = new(zone()) LMathSin(input);
- return MarkAsCall(DefineFixedDouble(result, xmm1), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoMathCos(HUnaryMathOperation* instr) {
- LOperand* input = UseFixedDouble(instr->value(), xmm1);
- LMathCos* result = new(zone()) LMathCos(input);
- return MarkAsCall(DefineFixedDouble(result, xmm1), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoMathTan(HUnaryMathOperation* instr) {
- LOperand* input = UseFixedDouble(instr->value(), xmm1);
- LMathTan* result = new(zone()) LMathTan(input);
- return MarkAsCall(DefineFixedDouble(result, xmm1), instr);
+LInstruction* LChunkBuilder::DoMathClz32(HUnaryMathOperation* instr) {
+ LOperand* input = UseRegisterAtStart(instr->value());
+ LMathClz32* result = new(zone()) LMathClz32(input);
+ return DefineAsRegister(result);
}
@@ -1276,9 +1189,8 @@ LInstruction* LChunkBuilder::DoMathExp(HUnaryMathOperation* instr) {
LInstruction* LChunkBuilder::DoMathSqrt(HUnaryMathOperation* instr) {
- LOperand* input = UseRegisterAtStart(instr->value());
- LMathSqrt* result = new(zone()) LMathSqrt(input);
- return DefineSameAsFirst(result);
+ LOperand* input = UseAtStart(instr->value());
+ return DefineAsRegister(new(zone()) LMathSqrt(input));
}
@@ -1289,34 +1201,6 @@ LInstruction* LChunkBuilder::DoMathPowHalf(HUnaryMathOperation* instr) {
}
-LInstruction* LChunkBuilder::DoCallKeyed(HCallKeyed* instr) {
- ASSERT(instr->key()->representation().IsTagged());
- LOperand* context = UseFixed(instr->context(), rsi);
- LOperand* key = UseFixed(instr->key(), rcx);
- LCallKeyed* result = new(zone()) LCallKeyed(context, key);
- return MarkAsCall(DefineFixed(result, rax), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoCallNamed(HCallNamed* instr) {
- LOperand* context = UseFixed(instr->context(), rsi);
- LCallNamed* result = new(zone()) LCallNamed(context);
- return MarkAsCall(DefineFixed(result, rax), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoCallGlobal(HCallGlobal* instr) {
- LOperand* context = UseFixed(instr->context(), rsi);
- LCallGlobal* result = new(zone()) LCallGlobal(context);
- return MarkAsCall(DefineFixed(result, rax), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoCallKnownGlobal(HCallKnownGlobal* instr) {
- return MarkAsCall(DefineFixed(new(zone()) LCallKnownGlobal, rax), instr);
-}
-
-
LInstruction* LChunkBuilder::DoCallNew(HCallNew* instr) {
LOperand* context = UseFixed(instr->context(), rsi);
LOperand* constructor = UseFixed(instr->constructor(), rdi);
@@ -1337,9 +1221,7 @@ LInstruction* LChunkBuilder::DoCallFunction(HCallFunction* instr) {
LOperand* context = UseFixed(instr->context(), rsi);
LOperand* function = UseFixed(instr->function(), rdi);
LCallFunction* call = new(zone()) LCallFunction(context, function);
- LInstruction* result = DefineFixed(call, rax);
- if (instr->IsTailCall()) return result;
- return MarkAsCall(result, instr);
+ return MarkAsCall(DefineFixed(call, rax), instr);
}
@@ -1385,24 +1267,71 @@ LInstruction* LChunkBuilder::DoBitwise(HBitwise* instr) {
}
+LInstruction* LChunkBuilder::DoDivByPowerOf2I(HDiv* instr) {
+ ASSERT(instr->representation().IsSmiOrInteger32());
+ ASSERT(instr->left()->representation().Equals(instr->representation()));
+ ASSERT(instr->right()->representation().Equals(instr->representation()));
+ LOperand* dividend = UseRegister(instr->left());
+ int32_t divisor = instr->right()->GetInteger32Constant();
+ LInstruction* result = DefineAsRegister(new(zone()) LDivByPowerOf2I(
+ dividend, divisor));
+ if ((instr->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) ||
+ (instr->CheckFlag(HValue::kCanOverflow) && divisor == -1) ||
+ (!instr->CheckFlag(HInstruction::kAllUsesTruncatingToInt32) &&
+ divisor != 1 && divisor != -1)) {
+ result = AssignEnvironment(result);
+ }
+ return result;
+}
+
+
+LInstruction* LChunkBuilder::DoDivByConstI(HDiv* instr) {
+ ASSERT(instr->representation().IsInteger32());
+ ASSERT(instr->left()->representation().Equals(instr->representation()));
+ ASSERT(instr->right()->representation().Equals(instr->representation()));
+ LOperand* dividend = UseRegister(instr->left());
+ int32_t divisor = instr->right()->GetInteger32Constant();
+ LOperand* temp1 = FixedTemp(rax);
+ LOperand* temp2 = FixedTemp(rdx);
+ LInstruction* result = DefineFixed(new(zone()) LDivByConstI(
+ dividend, divisor, temp1, temp2), rdx);
+ if (divisor == 0 ||
+ (instr->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) ||
+ !instr->CheckFlag(HInstruction::kAllUsesTruncatingToInt32)) {
+ result = AssignEnvironment(result);
+ }
+ return result;
+}
+
+
+LInstruction* LChunkBuilder::DoDivI(HDiv* instr) {
+ ASSERT(instr->representation().IsSmiOrInteger32());
+ ASSERT(instr->left()->representation().Equals(instr->representation()));
+ ASSERT(instr->right()->representation().Equals(instr->representation()));
+ LOperand* dividend = UseFixed(instr->left(), rax);
+ LOperand* divisor = UseRegister(instr->right());
+ LOperand* temp = FixedTemp(rdx);
+ LInstruction* result = DefineFixed(new(zone()) LDivI(
+ dividend, divisor, temp), rax);
+ if (instr->CheckFlag(HValue::kCanBeDivByZero) ||
+ instr->CheckFlag(HValue::kBailoutOnMinusZero) ||
+ instr->CheckFlag(HValue::kCanOverflow) ||
+ !instr->CheckFlag(HValue::kAllUsesTruncatingToInt32)) {
+ result = AssignEnvironment(result);
+ }
+ return result;
+}
+
+
LInstruction* LChunkBuilder::DoDiv(HDiv* instr) {
if (instr->representation().IsSmiOrInteger32()) {
- ASSERT(instr->left()->representation().Equals(instr->representation()));
- ASSERT(instr->right()->representation().Equals(instr->representation()));
- if (instr->HasPowerOf2Divisor()) {
- ASSERT(!instr->CheckFlag(HValue::kCanBeDivByZero));
- LOperand* value = UseRegisterAtStart(instr->left());
- LDivI* div =
- new(zone()) LDivI(value, UseOrConstant(instr->right()), NULL);
- return AssignEnvironment(DefineSameAsFirst(div));
+ if (instr->RightIsPowerOf2()) {
+ return DoDivByPowerOf2I(instr);
+ } else if (instr->right()->IsConstant()) {
+ return DoDivByConstI(instr);
+ } else {
+ return DoDivI(instr);
}
- // The temporary operand is necessary to ensure that right is not allocated
- // into rdx.
- LOperand* temp = FixedTemp(rdx);
- LOperand* dividend = UseFixed(instr->left(), rax);
- LOperand* divisor = UseRegister(instr->right());
- LDivI* result = new(zone()) LDivI(dividend, divisor, temp);
- return AssignEnvironment(DefineFixed(result, rax));
} else if (instr->representation().IsDouble()) {
return DoArithmeticD(Token::DIV, instr);
} else {
@@ -1411,93 +1340,132 @@ LInstruction* LChunkBuilder::DoDiv(HDiv* instr) {
}
-HValue* LChunkBuilder::SimplifiedDivisorForMathFloorOfDiv(HValue* divisor) {
- if (divisor->IsConstant() &&
- HConstant::cast(divisor)->HasInteger32Value()) {
- HConstant* constant_val = HConstant::cast(divisor);
- return constant_val->CopyToRepresentation(Representation::Integer32(),
- divisor->block()->zone());
+LInstruction* LChunkBuilder::DoFlooringDivByPowerOf2I(HMathFloorOfDiv* instr) {
+ LOperand* dividend = UseRegisterAtStart(instr->left());
+ int32_t divisor = instr->right()->GetInteger32Constant();
+ LInstruction* result = DefineSameAsFirst(new(zone()) LFlooringDivByPowerOf2I(
+ dividend, divisor));
+ if ((instr->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) ||
+ (instr->CheckFlag(HValue::kLeftCanBeMinInt) && divisor == -1)) {
+ result = AssignEnvironment(result);
}
- // A value with an integer representation does not need to be transformed.
- if (divisor->representation().IsInteger32()) {
- return divisor;
- // A change from an integer32 can be replaced by the integer32 value.
- } else if (divisor->IsChange() &&
- HChange::cast(divisor)->from().IsInteger32()) {
- return HChange::cast(divisor)->value();
+ return result;
+}
+
+
+LInstruction* LChunkBuilder::DoFlooringDivByConstI(HMathFloorOfDiv* instr) {
+ ASSERT(instr->representation().IsInteger32());
+ ASSERT(instr->left()->representation().Equals(instr->representation()));
+ ASSERT(instr->right()->representation().Equals(instr->representation()));
+ LOperand* dividend = UseRegister(instr->left());
+ int32_t divisor = instr->right()->GetInteger32Constant();
+ LOperand* temp1 = FixedTemp(rax);
+ LOperand* temp2 = FixedTemp(rdx);
+ LOperand* temp3 =
+ ((divisor > 0 && !instr->CheckFlag(HValue::kLeftCanBeNegative)) ||
+ (divisor < 0 && !instr->CheckFlag(HValue::kLeftCanBePositive))) ?
+ NULL : TempRegister();
+ LInstruction* result =
+ DefineFixed(new(zone()) LFlooringDivByConstI(dividend,
+ divisor,
+ temp1,
+ temp2,
+ temp3),
+ rdx);
+ if (divisor == 0 ||
+ (instr->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0)) {
+ result = AssignEnvironment(result);
}
- return NULL;
+ return result;
+}
+
+
+LInstruction* LChunkBuilder::DoFlooringDivI(HMathFloorOfDiv* instr) {
+ ASSERT(instr->representation().IsSmiOrInteger32());
+ ASSERT(instr->left()->representation().Equals(instr->representation()));
+ ASSERT(instr->right()->representation().Equals(instr->representation()));
+ LOperand* dividend = UseFixed(instr->left(), rax);
+ LOperand* divisor = UseRegister(instr->right());
+ LOperand* temp = FixedTemp(rdx);
+ LInstruction* result = DefineFixed(new(zone()) LFlooringDivI(
+ dividend, divisor, temp), rax);
+ if (instr->CheckFlag(HValue::kCanBeDivByZero) ||
+ instr->CheckFlag(HValue::kBailoutOnMinusZero) ||
+ instr->CheckFlag(HValue::kCanOverflow)) {
+ result = AssignEnvironment(result);
+ }
+ return result;
}
LInstruction* LChunkBuilder::DoMathFloorOfDiv(HMathFloorOfDiv* instr) {
- HValue* right = instr->right();
- if (!right->IsConstant()) {
- ASSERT(right->representation().IsInteger32());
- // The temporary operand is necessary to ensure that right is not allocated
- // into rdx.
- LOperand* temp = FixedTemp(rdx);
- LOperand* dividend = UseFixed(instr->left(), rax);
- LOperand* divisor = UseRegister(instr->right());
- LDivI* flooring_div = new(zone()) LDivI(dividend, divisor, temp);
- return AssignEnvironment(DefineFixed(flooring_div, rax));
- }
-
- ASSERT(right->IsConstant() && HConstant::cast(right)->HasInteger32Value());
- LOperand* divisor = chunk_->DefineConstantOperand(HConstant::cast(right));
- int32_t divisor_si = HConstant::cast(right)->Integer32Value();
- if (divisor_si == 0) {
- LOperand* dividend = UseRegister(instr->left());
- return AssignEnvironment(DefineAsRegister(
- new(zone()) LMathFloorOfDiv(dividend, divisor, NULL)));
- } else if (IsPowerOf2(abs(divisor_si))) {
- LOperand* dividend = UseRegisterAtStart(instr->left());
- LInstruction* result = DefineAsRegister(
- new(zone()) LMathFloorOfDiv(dividend, divisor, NULL));
- return divisor_si < 0 ? AssignEnvironment(result) : result;
+ if (instr->RightIsPowerOf2()) {
+ return DoFlooringDivByPowerOf2I(instr);
+ } else if (instr->right()->IsConstant()) {
+ return DoFlooringDivByConstI(instr);
} else {
- // use two r64
- LOperand* dividend = UseRegisterAtStart(instr->left());
- LOperand* temp = TempRegister();
- LInstruction* result = DefineAsRegister(
- new(zone()) LMathFloorOfDiv(dividend, divisor, temp));
- return divisor_si < 0 ? AssignEnvironment(result) : result;
+ return DoFlooringDivI(instr);
+ }
+}
+
+
+LInstruction* LChunkBuilder::DoModByPowerOf2I(HMod* instr) {
+ ASSERT(instr->representation().IsSmiOrInteger32());
+ ASSERT(instr->left()->representation().Equals(instr->representation()));
+ ASSERT(instr->right()->representation().Equals(instr->representation()));
+ LOperand* dividend = UseRegisterAtStart(instr->left());
+ int32_t divisor = instr->right()->GetInteger32Constant();
+ LInstruction* result = DefineSameAsFirst(new(zone()) LModByPowerOf2I(
+ dividend, divisor));
+ if (instr->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ result = AssignEnvironment(result);
+ }
+ return result;
+}
+
+
+LInstruction* LChunkBuilder::DoModByConstI(HMod* instr) {
+ ASSERT(instr->representation().IsSmiOrInteger32());
+ ASSERT(instr->left()->representation().Equals(instr->representation()));
+ ASSERT(instr->right()->representation().Equals(instr->representation()));
+ LOperand* dividend = UseRegister(instr->left());
+ int32_t divisor = instr->right()->GetInteger32Constant();
+ LOperand* temp1 = FixedTemp(rax);
+ LOperand* temp2 = FixedTemp(rdx);
+ LInstruction* result = DefineFixed(new(zone()) LModByConstI(
+ dividend, divisor, temp1, temp2), rax);
+ if (divisor == 0 || instr->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ result = AssignEnvironment(result);
}
+ return result;
+}
+
+
+LInstruction* LChunkBuilder::DoModI(HMod* instr) {
+ ASSERT(instr->representation().IsSmiOrInteger32());
+ ASSERT(instr->left()->representation().Equals(instr->representation()));
+ ASSERT(instr->right()->representation().Equals(instr->representation()));
+ LOperand* dividend = UseFixed(instr->left(), rax);
+ LOperand* divisor = UseRegister(instr->right());
+ LOperand* temp = FixedTemp(rdx);
+ LInstruction* result = DefineFixed(new(zone()) LModI(
+ dividend, divisor, temp), rdx);
+ if (instr->CheckFlag(HValue::kCanBeDivByZero) ||
+ instr->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ result = AssignEnvironment(result);
+ }
+ return result;
}
LInstruction* LChunkBuilder::DoMod(HMod* instr) {
- HValue* left = instr->left();
- HValue* right = instr->right();
if (instr->representation().IsSmiOrInteger32()) {
- ASSERT(left->representation().Equals(instr->representation()));
- ASSERT(right->representation().Equals(instr->representation()));
- if (instr->HasPowerOf2Divisor()) {
- ASSERT(!right->CanBeZero());
- LModI* mod = new(zone()) LModI(UseRegisterAtStart(left),
- UseOrConstant(right),
- NULL);
- LInstruction* result = DefineSameAsFirst(mod);
- return (left->CanBeNegative() &&
- instr->CheckFlag(HValue::kBailoutOnMinusZero))
- ? AssignEnvironment(result)
- : result;
+ if (instr->RightIsPowerOf2()) {
+ return DoModByPowerOf2I(instr);
+ } else if (instr->right()->IsConstant()) {
+ return DoModByConstI(instr);
} else {
- // The temporary operand is necessary to ensure that right is not
- // allocated into edx.
- LModI* mod = new(zone()) LModI(UseFixed(left, rax),
- UseRegister(right),
- FixedTemp(rdx));
- LInstruction* result = DefineFixed(mod, rdx);
- return (right->CanBeZero() ||
- (left->RangeCanInclude(kMinInt) &&
- right->RangeCanInclude(-1) &&
- instr->CheckFlag(HValue::kBailoutOnMinusZero)) ||
- (left->CanBeNegative() &&
- instr->CanBeZero() &&
- instr->CheckFlag(HValue::kBailoutOnMinusZero)))
- ? AssignEnvironment(result)
- : result;
+ return DoModI(instr);
}
} else if (instr->representation().IsDouble()) {
return DoArithmeticD(Token::MOD, instr);
@@ -1558,14 +1526,19 @@ LInstruction* LChunkBuilder::DoAdd(HAdd* instr) {
ASSERT(instr->right()->representation().Equals(instr->representation()));
LOperand* left = UseRegisterAtStart(instr->BetterLeftOperand());
HValue* right_candidate = instr->BetterRightOperand();
- LOperand* right = use_lea
- ? UseRegisterOrConstantAtStart(right_candidate)
- : UseOrConstantAtStart(right_candidate);
+ LOperand* right;
+ if (SmiValuesAre32Bits() && instr->representation().IsSmi()) {
+ // We cannot add a tagged immediate to a tagged value,
+ // so we request it in a register.
+ right = UseRegisterAtStart(right_candidate);
+ } else {
+ right = use_lea ? UseRegisterOrConstantAtStart(right_candidate)
+ : UseOrConstantAtStart(right_candidate);
+ }
LAddI* add = new(zone()) LAddI(left, right);
bool can_overflow = instr->CheckFlag(HValue::kCanOverflow);
- LInstruction* result = use_lea
- ? DefineAsRegister(add)
- : DefineSameAsFirst(add);
+ LInstruction* result = use_lea ? DefineAsRegister(add)
+ : DefineSameAsFirst(add);
if (can_overflow) {
result = AssignEnvironment(result);
}
@@ -1670,8 +1643,6 @@ LInstruction* LChunkBuilder::DoCompareNumericAndBranch(
LInstruction* LChunkBuilder::DoCompareObjectEqAndBranch(
HCompareObjectEqAndBranch* instr) {
- LInstruction* goto_instr = CheckElideControlInstruction(instr);
- if (goto_instr != NULL) return goto_instr;
LOperand* left = UseRegisterAtStart(instr->left());
LOperand* right = UseRegisterOrConstantAtStart(instr->right());
return new(zone()) LCmpObjectEqAndBranch(left, right);
@@ -1687,8 +1658,6 @@ LInstruction* LChunkBuilder::DoCompareHoleAndBranch(
LInstruction* LChunkBuilder::DoCompareMinusZeroAndBranch(
HCompareMinusZeroAndBranch* instr) {
- LInstruction* goto_instr = CheckElideControlInstruction(instr);
- if (goto_instr != NULL) return goto_instr;
LOperand* value = UseRegister(instr->value());
return new(zone()) LCompareMinusZeroAndBranch(value);
}
@@ -1778,19 +1747,6 @@ LInstruction* LChunkBuilder::DoMapEnumLength(HMapEnumLength* instr) {
}
-LInstruction* LChunkBuilder::DoElementsKind(HElementsKind* instr) {
- LOperand* object = UseRegisterAtStart(instr->value());
- return DefineAsRegister(new(zone()) LElementsKind(object));
-}
-
-
-LInstruction* LChunkBuilder::DoValueOf(HValueOf* instr) {
- LOperand* object = UseRegister(instr->value());
- LValueOf* result = new(zone()) LValueOf(object);
- return DefineSameAsFirst(result);
-}
-
-
LInstruction* LChunkBuilder::DoDateField(HDateField* instr) {
LOperand* object = UseFixed(instr->value(), rax);
LDateField* result = new(zone()) LDateField(object, instr->index());
@@ -1824,9 +1780,16 @@ LInstruction* LChunkBuilder::DoSeqStringSetChar(HSeqStringSetChar* instr) {
LInstruction* LChunkBuilder::DoBoundsCheck(HBoundsCheck* instr) {
- LOperand* value = UseRegisterOrConstantAtStart(instr->index());
- LOperand* length = Use(instr->length());
- return AssignEnvironment(new(zone()) LBoundsCheck(value, length));
+ if (!FLAG_debug_code && instr->skip_check()) return NULL;
+ LOperand* index = UseRegisterOrConstantAtStart(instr->index());
+ LOperand* length = !index->IsConstantOperand()
+ ? UseOrConstantAtStart(instr->length())
+ : UseAtStart(instr->length());
+ LInstruction* result = new(zone()) LBoundsCheck(index, length);
+ if (!FLAG_debug_code || !instr->skip_check()) {
+ result = AssignEnvironment(result);
+ }
+ return result;
}
@@ -1844,13 +1807,6 @@ LInstruction* LChunkBuilder::DoAbnormalExit(HAbnormalExit* instr) {
}
-LInstruction* LChunkBuilder::DoThrow(HThrow* instr) {
- LOperand* context = UseFixed(instr->context(), rsi);
- LOperand* value = UseFixed(instr->value(), rax);
- return MarkAsCall(new(zone()) LThrow(context, value), instr);
-}
-
-
LInstruction* LChunkBuilder::DoUseConst(HUseConst* instr) {
return NULL;
}
@@ -1867,23 +1823,21 @@ LInstruction* LChunkBuilder::DoForceRepresentation(HForceRepresentation* bad) {
LInstruction* LChunkBuilder::DoChange(HChange* instr) {
Representation from = instr->from();
Representation to = instr->to();
+ HValue* val = instr->value();
if (from.IsSmi()) {
if (to.IsTagged()) {
- LOperand* value = UseRegister(instr->value());
+ LOperand* value = UseRegister(val);
return DefineSameAsFirst(new(zone()) LDummyUse(value));
}
from = Representation::Tagged();
}
- // Only mark conversions that might need to allocate as calling rather than
- // all changes. This makes simple, non-allocating conversion not have to force
- // building a stack frame.
if (from.IsTagged()) {
if (to.IsDouble()) {
- LOperand* value = UseRegister(instr->value());
- LNumberUntagD* res = new(zone()) LNumberUntagD(value);
- return AssignEnvironment(DefineAsRegister(res));
+ LOperand* value = UseRegister(val);
+ LInstruction* result = DefineAsRegister(new(zone()) LNumberUntagD(value));
+ if (!val->representation().IsSmi()) result = AssignEnvironment(result);
+ return result;
} else if (to.IsSmi()) {
- HValue* val = instr->value();
LOperand* value = UseRegister(val);
if (val->type().IsSmi()) {
return DefineSameAsFirst(new(zone()) LDummyUse(value));
@@ -1891,77 +1845,70 @@ LInstruction* LChunkBuilder::DoChange(HChange* instr) {
return AssignEnvironment(DefineSameAsFirst(new(zone()) LCheckSmi(value)));
} else {
ASSERT(to.IsInteger32());
- HValue* val = instr->value();
- LOperand* value = UseRegister(val);
if (val->type().IsSmi() || val->representation().IsSmi()) {
+ LOperand* value = UseRegister(val);
return DefineSameAsFirst(new(zone()) LSmiUntag(value, false));
} else {
+ LOperand* value = UseRegister(val);
bool truncating = instr->CanTruncateToInt32();
LOperand* xmm_temp = truncating ? NULL : FixedTemp(xmm1);
- LTaggedToI* res = new(zone()) LTaggedToI(value, xmm_temp);
- return AssignEnvironment(DefineSameAsFirst(res));
+ LInstruction* result =
+ DefineSameAsFirst(new(zone()) LTaggedToI(value, xmm_temp));
+ if (!val->representation().IsSmi()) result = AssignEnvironment(result);
+ return result;
}
}
} else if (from.IsDouble()) {
if (to.IsTagged()) {
info()->MarkAsDeferredCalling();
- LOperand* value = UseRegister(instr->value());
+ LOperand* value = UseRegister(val);
LOperand* temp = TempRegister();
-
- // Make sure that temp and result_temp are different registers.
LUnallocated* result_temp = TempRegister();
LNumberTagD* result = new(zone()) LNumberTagD(value, temp);
return AssignPointerMap(Define(result, result_temp));
} else if (to.IsSmi()) {
- LOperand* value = UseRegister(instr->value());
+ LOperand* value = UseRegister(val);
return AssignEnvironment(
DefineAsRegister(new(zone()) LDoubleToSmi(value)));
} else {
ASSERT(to.IsInteger32());
- LOperand* value = UseRegister(instr->value());
- return AssignEnvironment(
- DefineAsRegister(new(zone()) LDoubleToI(value)));
+ LOperand* value = UseRegister(val);
+ LInstruction* result = DefineAsRegister(new(zone()) LDoubleToI(value));
+ if (!instr->CanTruncateToInt32()) result = AssignEnvironment(result);
+ return result;
}
} else if (from.IsInteger32()) {
info()->MarkAsDeferredCalling();
if (to.IsTagged()) {
- HValue* val = instr->value();
- LOperand* value = UseRegister(val);
- if (val->CheckFlag(HInstruction::kUint32)) {
- LOperand* temp = FixedTemp(xmm1);
- LNumberTagU* result = new(zone()) LNumberTagU(value, temp);
- return AssignEnvironment(AssignPointerMap(DefineSameAsFirst(result)));
- } else if (val->HasRange() && val->range()->IsInSmiRange()) {
- return DefineSameAsFirst(new(zone()) LSmiTag(value));
+ if (!instr->CheckFlag(HValue::kCanOverflow)) {
+ LOperand* value = UseRegister(val);
+ return DefineAsRegister(new(zone()) LSmiTag(value));
+ } else if (val->CheckFlag(HInstruction::kUint32)) {
+ LOperand* value = UseRegister(val);
+ LOperand* temp1 = TempRegister();
+ LOperand* temp2 = FixedTemp(xmm1);
+ LNumberTagU* result = new(zone()) LNumberTagU(value, temp1, temp2);
+ return AssignPointerMap(DefineSameAsFirst(result));
} else {
- LNumberTagI* result = new(zone()) LNumberTagI(value);
- return AssignEnvironment(AssignPointerMap(DefineSameAsFirst(result)));
+ LOperand* value = UseRegister(val);
+ LOperand* temp1 = SmiValuesAre32Bits() ? NULL : TempRegister();
+ LOperand* temp2 = SmiValuesAre32Bits() ? NULL : FixedTemp(xmm1);
+ LNumberTagI* result = new(zone()) LNumberTagI(value, temp1, temp2);
+ return AssignPointerMap(DefineSameAsFirst(result));
}
} else if (to.IsSmi()) {
- HValue* val = instr->value();
LOperand* value = UseRegister(val);
- LInstruction* result = NULL;
- if (val->CheckFlag(HInstruction::kUint32)) {
- result = DefineAsRegister(new(zone()) LUint32ToSmi(value));
- if (val->HasRange() && val->range()->IsInSmiRange() &&
- val->range()->upper() != kMaxInt) {
- return result;
- }
- } else {
- result = DefineAsRegister(new(zone()) LInteger32ToSmi(value));
- if (val->HasRange() && val->range()->IsInSmiRange()) {
- return result;
- }
+ LInstruction* result = DefineAsRegister(new(zone()) LSmiTag(value));
+ if (instr->CheckFlag(HValue::kCanOverflow)) {
+ result = AssignEnvironment(result);
}
- return AssignEnvironment(result);
+ return result;
} else {
- if (instr->value()->CheckFlag(HInstruction::kUint32)) {
- LOperand* temp = FixedTemp(xmm1);
- return DefineAsRegister(
- new(zone()) LUint32ToDouble(UseRegister(instr->value()), temp));
+ ASSERT(to.IsDouble());
+ if (val->CheckFlag(HInstruction::kUint32)) {
+ return DefineAsRegister(new(zone()) LUint32ToDouble(UseRegister(val)));
} else {
- ASSERT(to.IsDouble());
- LOperand* value = Use(instr->value());
+ LOperand* value = Use(val);
return DefineAsRegister(new(zone()) LInteger32ToDouble(value));
}
}
@@ -1973,7 +1920,11 @@ LInstruction* LChunkBuilder::DoChange(HChange* instr) {
LInstruction* LChunkBuilder::DoCheckHeapObject(HCheckHeapObject* instr) {
LOperand* value = UseRegisterAtStart(instr->value());
- return AssignEnvironment(new(zone()) LCheckNonSmi(value));
+ LInstruction* result = new(zone()) LCheckNonSmi(value);
+ if (!instr->value()->type().IsHeapObject()) {
+ result = AssignEnvironment(result);
+ }
+ return result;
}
@@ -1997,15 +1948,12 @@ LInstruction* LChunkBuilder::DoCheckValue(HCheckValue* instr) {
LInstruction* LChunkBuilder::DoCheckMaps(HCheckMaps* instr) {
- LOperand* value = NULL;
- if (!instr->CanOmitMapChecks()) {
- value = UseRegisterAtStart(instr->value());
- if (instr->has_migration_target()) info()->MarkAsDeferredCalling();
- }
- LCheckMaps* result = new(zone()) LCheckMaps(value);
- if (!instr->CanOmitMapChecks()) {
- AssignEnvironment(result);
- if (instr->has_migration_target()) return AssignPointerMap(result);
+ if (instr->IsStabilityCheck()) return new(zone()) LCheckMaps;
+ LOperand* value = UseRegisterAtStart(instr->value());
+ LInstruction* result = AssignEnvironment(new(zone()) LCheckMaps(value));
+ if (instr->HasMigrationTarget()) {
+ info()->MarkAsDeferredCalling();
+ result = AssignPointerMap(result);
}
return result;
}
@@ -2030,6 +1978,20 @@ LInstruction* LChunkBuilder::DoClampToUint8(HClampToUint8* instr) {
}
+LInstruction* LChunkBuilder::DoDoubleBits(HDoubleBits* instr) {
+ HValue* value = instr->value();
+ ASSERT(value->representation().IsDouble());
+ return DefineAsRegister(new(zone()) LDoubleBits(UseRegister(value)));
+}
+
+
+LInstruction* LChunkBuilder::DoConstructDouble(HConstructDouble* instr) {
+ LOperand* lo = UseRegister(instr->lo());
+ LOperand* hi = UseRegister(instr->hi());
+ return DefineAsRegister(new(zone()) LConstructDouble(hi, lo));
+}
+
+
LInstruction* LChunkBuilder::DoReturn(HReturn* instr) {
LOperand* context = info()->IsStub() ? UseFixed(instr->context(), rsi) : NULL;
LOperand* parameter_count = UseRegisterOrConstant(instr->parameter_count());
@@ -2085,21 +2047,14 @@ LInstruction* LChunkBuilder::DoStoreGlobalCell(HStoreGlobalCell* instr) {
}
-LInstruction* LChunkBuilder::DoStoreGlobalGeneric(HStoreGlobalGeneric* instr) {
- LOperand* context = UseFixed(instr->context(), rsi);
- LOperand* global_object = UseFixed(instr->global_object(), rdx);
- LOperand* value = UseFixed(instr->value(), rax);
- LStoreGlobalGeneric* result =
- new(zone()) LStoreGlobalGeneric(context, global_object, value);
- return MarkAsCall(result, instr);
-}
-
-
LInstruction* LChunkBuilder::DoLoadContextSlot(HLoadContextSlot* instr) {
LOperand* context = UseRegisterAtStart(instr->value());
LInstruction* result =
DefineAsRegister(new(zone()) LLoadContextSlot(context));
- return instr->RequiresHoleCheck() ? AssignEnvironment(result) : result;
+ if (instr->RequiresHoleCheck() && instr->DeoptimizesOnHole()) {
+ result = AssignEnvironment(result);
+ }
+ return result;
}
@@ -2116,7 +2071,10 @@ LInstruction* LChunkBuilder::DoStoreContextSlot(HStoreContextSlot* instr) {
temp = NULL;
}
LInstruction* result = new(zone()) LStoreContextSlot(context, value, temp);
- return instr->RequiresHoleCheck() ? AssignEnvironment(result) : result;
+ if (instr->RequiresHoleCheck() && instr->DeoptimizesOnHole()) {
+ result = AssignEnvironment(result);
+ }
+ return result;
}
@@ -2157,40 +2115,69 @@ LInstruction* LChunkBuilder::DoLoadRoot(HLoadRoot* instr) {
}
-LInstruction* LChunkBuilder::DoLoadExternalArrayPointer(
- HLoadExternalArrayPointer* instr) {
- LOperand* input = UseRegisterAtStart(instr->value());
- return DefineAsRegister(new(zone()) LLoadExternalArrayPointer(input));
+void LChunkBuilder::FindDehoistedKeyDefinitions(HValue* candidate) {
+ // We sign extend the dehoisted key at the definition point when the pointer
+ // size is 64-bit. For x32 port, we sign extend the dehoisted key at the use
+ // points and should not invoke this function. We can't use STATIC_ASSERT
+ // here as the pointer size is 32-bit for x32.
+ ASSERT(kPointerSize == kInt64Size);
+ BitVector* dehoisted_key_ids = chunk_->GetDehoistedKeyIds();
+ if (dehoisted_key_ids->Contains(candidate->id())) return;
+ dehoisted_key_ids->Add(candidate->id());
+ if (!candidate->IsPhi()) return;
+ for (int i = 0; i < candidate->OperandCount(); ++i) {
+ FindDehoistedKeyDefinitions(candidate->OperandAt(i));
+ }
}
LInstruction* LChunkBuilder::DoLoadKeyed(HLoadKeyed* instr) {
- ASSERT(instr->key()->representation().IsInteger32());
+ ASSERT((kPointerSize == kInt64Size &&
+ instr->key()->representation().IsInteger32()) ||
+ (kPointerSize == kInt32Size &&
+ instr->key()->representation().IsSmiOrInteger32()));
ElementsKind elements_kind = instr->elements_kind();
- LOperand* key = UseRegisterOrConstantAtStart(instr->key());
- LLoadKeyed* result = NULL;
+ LOperand* key = NULL;
+ LInstruction* result = NULL;
- if (!instr->is_external()) {
+ if (kPointerSize == kInt64Size) {
+ key = UseRegisterOrConstantAtStart(instr->key());
+ } else {
+ bool clobbers_key = ExternalArrayOpRequiresTemp(
+ instr->key()->representation(), elements_kind);
+ key = clobbers_key
+ ? UseTempRegister(instr->key())
+ : UseRegisterOrConstantAtStart(instr->key());
+ }
+
+ if ((kPointerSize == kInt64Size) && instr->IsDehoisted()) {
+ FindDehoistedKeyDefinitions(instr->key());
+ }
+
+ if (!instr->is_typed_elements()) {
LOperand* obj = UseRegisterAtStart(instr->elements());
- result = new(zone()) LLoadKeyed(obj, key);
+ result = DefineAsRegister(new(zone()) LLoadKeyed(obj, key));
} else {
ASSERT(
(instr->representation().IsInteger32() &&
- (elements_kind != EXTERNAL_FLOAT_ELEMENTS) &&
- (elements_kind != EXTERNAL_DOUBLE_ELEMENTS)) ||
+ !(IsDoubleOrFloatElementsKind(elements_kind))) ||
(instr->representation().IsDouble() &&
- ((elements_kind == EXTERNAL_FLOAT_ELEMENTS) ||
- (elements_kind == EXTERNAL_DOUBLE_ELEMENTS))));
- LOperand* external_pointer = UseRegister(instr->elements());
- result = new(zone()) LLoadKeyed(external_pointer, key);
+ (IsDoubleOrFloatElementsKind(elements_kind))));
+ LOperand* backing_store = UseRegister(instr->elements());
+ result = DefineAsRegister(new(zone()) LLoadKeyed(backing_store, key));
}
- DefineAsRegister(result);
- bool can_deoptimize = instr->RequiresHoleCheck() ||
- (elements_kind == EXTERNAL_UNSIGNED_INT_ELEMENTS);
- // An unsigned int array load might overflow and cause a deopt, make sure it
- // has an environment.
- return can_deoptimize ? AssignEnvironment(result) : result;
+ if ((instr->is_external() || instr->is_fixed_typed_array()) ?
+ // see LCodeGen::DoLoadKeyedExternalArray
+ ((elements_kind == EXTERNAL_UINT32_ELEMENTS ||
+ elements_kind == UINT32_ELEMENTS) &&
+ !instr->CheckFlag(HInstruction::kUint32)) :
+ // see LCodeGen::DoLoadKeyedFixedDoubleArray and
+ // LCodeGen::DoLoadKeyedFixedArray
+ instr->RequiresHoleCheck()) {
+ result = AssignEnvironment(result);
+ }
+ return result;
}
@@ -2208,24 +2195,31 @@ LInstruction* LChunkBuilder::DoLoadKeyedGeneric(HLoadKeyedGeneric* instr) {
LInstruction* LChunkBuilder::DoStoreKeyed(HStoreKeyed* instr) {
ElementsKind elements_kind = instr->elements_kind();
- if (!instr->is_external()) {
+ if ((kPointerSize == kInt64Size) && instr->IsDehoisted()) {
+ FindDehoistedKeyDefinitions(instr->key());
+ }
+
+ if (!instr->is_typed_elements()) {
ASSERT(instr->elements()->representation().IsTagged());
bool needs_write_barrier = instr->NeedsWriteBarrier();
LOperand* object = NULL;
LOperand* key = NULL;
LOperand* val = NULL;
- if (instr->value()->representation().IsDouble()) {
+ Representation value_representation = instr->value()->representation();
+ if (value_representation.IsDouble()) {
object = UseRegisterAtStart(instr->elements());
- val = UseTempRegister(instr->value());
+ val = UseRegisterAtStart(instr->value());
key = UseRegisterOrConstantAtStart(instr->key());
} else {
- ASSERT(instr->value()->representation().IsSmiOrTagged());
- object = UseTempRegister(instr->elements());
+ ASSERT(value_representation.IsSmiOrTagged() ||
+ value_representation.IsInteger32());
if (needs_write_barrier) {
+ object = UseTempRegister(instr->elements());
val = UseTempRegister(instr->value());
key = UseTempRegister(instr->key());
} else {
+ object = UseRegisterAtStart(instr->elements());
val = UseRegisterOrConstantAtStart(instr->value());
key = UseRegisterOrConstantAtStart(instr->key());
}
@@ -2235,21 +2229,32 @@ LInstruction* LChunkBuilder::DoStoreKeyed(HStoreKeyed* instr) {
}
ASSERT(
- (instr->value()->representation().IsInteger32() &&
- (elements_kind != EXTERNAL_FLOAT_ELEMENTS) &&
- (elements_kind != EXTERNAL_DOUBLE_ELEMENTS)) ||
- (instr->value()->representation().IsDouble() &&
- ((elements_kind == EXTERNAL_FLOAT_ELEMENTS) ||
- (elements_kind == EXTERNAL_DOUBLE_ELEMENTS))));
- ASSERT(instr->elements()->representation().IsExternal());
+ (instr->value()->representation().IsInteger32() &&
+ !IsDoubleOrFloatElementsKind(elements_kind)) ||
+ (instr->value()->representation().IsDouble() &&
+ IsDoubleOrFloatElementsKind(elements_kind)));
+ ASSERT((instr->is_fixed_typed_array() &&
+ instr->elements()->representation().IsTagged()) ||
+ (instr->is_external() &&
+ instr->elements()->representation().IsExternal()));
bool val_is_temp_register =
- elements_kind == EXTERNAL_PIXEL_ELEMENTS ||
- elements_kind == EXTERNAL_FLOAT_ELEMENTS;
+ elements_kind == EXTERNAL_UINT8_CLAMPED_ELEMENTS ||
+ elements_kind == EXTERNAL_FLOAT32_ELEMENTS ||
+ elements_kind == FLOAT32_ELEMENTS;
LOperand* val = val_is_temp_register ? UseTempRegister(instr->value())
: UseRegister(instr->value());
- LOperand* key = UseRegisterOrConstantAtStart(instr->key());
- LOperand* external_pointer = UseRegister(instr->elements());
- return new(zone()) LStoreKeyed(external_pointer, key, val);
+ LOperand* key = NULL;
+ if (kPointerSize == kInt64Size) {
+ key = UseRegisterOrConstantAtStart(instr->key());
+ } else {
+ bool clobbers_key = ExternalArrayOpRequiresTemp(
+ instr->key()->representation(), elements_kind);
+ key = clobbers_key
+ ? UseTempRegister(instr->key())
+ : UseRegisterOrConstantAtStart(instr->key());
+ }
+ LOperand* backing_store = UseRegister(instr->elements());
+ return new(zone()) LStoreKeyed(backing_store, key, val);
}
@@ -2271,7 +2276,6 @@ LInstruction* LChunkBuilder::DoStoreKeyedGeneric(HStoreKeyedGeneric* instr) {
LInstruction* LChunkBuilder::DoTransitionElementsKind(
HTransitionElementsKind* instr) {
- LOperand* object = UseRegister(instr->object());
if (IsSimpleMapChangeTransition(instr->from_kind(), instr->to_kind())) {
LOperand* object = UseRegister(instr->object());
LOperand* new_map_reg = TempRegister();
@@ -2280,10 +2284,11 @@ LInstruction* LChunkBuilder::DoTransitionElementsKind(
object, NULL, new_map_reg, temp_reg);
return result;
} else {
+ LOperand* object = UseFixed(instr->object(), rax);
LOperand* context = UseFixed(instr->context(), rsi);
LTransitionElementsKind* result =
new(zone()) LTransitionElementsKind(object, context, NULL, NULL);
- return AssignPointerMap(result);
+ return MarkAsCall(result, instr);
}
}
@@ -2324,7 +2329,7 @@ LInstruction* LChunkBuilder::DoStoreNamedField(HStoreNamedField* instr) {
bool can_be_constant = instr->value()->IsConstant() &&
HConstant::cast(instr->value())->NotInNewSpace() &&
- !(FLAG_track_double_fields && instr->field_representation().IsDouble());
+ !instr->field_representation().IsDouble();
LOperand* val;
if (needs_write_barrier) {
@@ -2333,10 +2338,9 @@ LInstruction* LChunkBuilder::DoStoreNamedField(HStoreNamedField* instr) {
val = UseFixed(instr->value(), rax);
} else if (can_be_constant) {
val = UseRegisterOrConstant(instr->value());
- } else if (FLAG_track_fields && instr->field_representation().IsSmi()) {
- val = UseTempRegister(instr->value());
- } else if (FLAG_track_double_fields &&
- instr->field_representation().IsDouble()) {
+ } else if (instr->field_representation().IsSmi()) {
+ val = UseRegister(instr->value());
+ } else if (instr->field_representation().IsDouble()) {
val = UseRegisterAtStart(instr->value());
} else {
val = UseRegister(instr->value());
@@ -2347,14 +2351,7 @@ LInstruction* LChunkBuilder::DoStoreNamedField(HStoreNamedField* instr) {
LOperand* temp = (!is_in_object || needs_write_barrier ||
needs_write_barrier_for_map) ? TempRegister() : NULL;
- LStoreNamedField* result = new(zone()) LStoreNamedField(obj, val, temp);
- if (FLAG_track_heap_object_fields &&
- instr->field_representation().IsHeapObject()) {
- if (!instr->value()->type().IsHeapObject()) {
- return AssignEnvironment(result);
- }
- }
- return result;
+ return new(zone()) LStoreNamedField(obj, val, temp);
}
@@ -2371,12 +2368,8 @@ LInstruction* LChunkBuilder::DoStoreNamedGeneric(HStoreNamedGeneric* instr) {
LInstruction* LChunkBuilder::DoStringAdd(HStringAdd* instr) {
LOperand* context = UseFixed(instr->context(), rsi);
- LOperand* left = FLAG_new_string_add
- ? UseFixed(instr->left(), rdx)
- : UseOrConstantAtStart(instr->left());
- LOperand* right = FLAG_new_string_add
- ? UseFixed(instr->right(), rax)
- : UseOrConstantAtStart(instr->right());
+ LOperand* left = UseFixed(instr->left(), rdx);
+ LOperand* right = UseFixed(instr->right(), rax);
return MarkAsCall(
DefineFixed(new(zone()) LStringAdd(context, left, right), rax), instr);
}
@@ -2388,7 +2381,7 @@ LInstruction* LChunkBuilder::DoStringCharCodeAt(HStringCharCodeAt* instr) {
LOperand* context = UseAny(instr->context());
LStringCharCodeAt* result =
new(zone()) LStringCharCodeAt(context, string, index);
- return AssignEnvironment(AssignPointerMap(DefineAsRegister(result)));
+ return AssignPointerMap(DefineAsRegister(result));
}
@@ -2443,7 +2436,7 @@ LInstruction* LChunkBuilder::DoParameter(HParameter* instr) {
} else {
ASSERT(info()->IsStub());
CodeStubInterfaceDescriptor* descriptor =
- info()->code_stub()->GetInterfaceDescriptor(info()->isolate());
+ info()->code_stub()->GetInterfaceDescriptor();
int index = static_cast<int>(instr->index());
Register reg = descriptor->GetParameterRegister(index);
return DefineFixed(result, reg);
@@ -2525,9 +2518,6 @@ LInstruction* LChunkBuilder::DoTypeof(HTypeof* instr) {
LInstruction* LChunkBuilder::DoTypeofIsAndBranch(HTypeofIsAndBranch* instr) {
- LInstruction* goto_instr = CheckElideControlInstruction(instr);
- if (goto_instr != NULL) return goto_instr;
-
return new(zone()) LTypeofIsAndBranch(UseTempRegister(instr->value()));
}
@@ -2560,13 +2550,13 @@ LInstruction* LChunkBuilder::DoStackCheck(HStackCheck* instr) {
LInstruction* LChunkBuilder::DoEnterInlined(HEnterInlined* instr) {
HEnvironment* outer = current_block_->last_environment();
+ outer->set_ast_id(instr->ReturnId());
HConstant* undefined = graph()->GetConstantUndefined();
HEnvironment* inner = outer->CopyForInlining(instr->closure(),
instr->arguments_count(),
instr->function(),
undefined,
- instr->inlining_kind(),
- instr->undefined_receiver());
+ instr->inlining_kind());
// Only replay binding of arguments object if it wasn't removed from graph.
if (instr->arguments_var() != NULL && instr->arguments_object()->IsLinked()) {
inner->Bind(instr->arguments_var(), instr->arguments_object());
@@ -2622,7 +2612,25 @@ LInstruction* LChunkBuilder::DoCheckMapValue(HCheckMapValue* instr) {
LInstruction* LChunkBuilder::DoLoadFieldByIndex(HLoadFieldByIndex* instr) {
LOperand* object = UseRegister(instr->object());
LOperand* index = UseTempRegister(instr->index());
- return DefineSameAsFirst(new(zone()) LLoadFieldByIndex(object, index));
+ LLoadFieldByIndex* load = new(zone()) LLoadFieldByIndex(object, index);
+ LInstruction* result = DefineSameAsFirst(load);
+ return AssignPointerMap(result);
+}
+
+
+LInstruction* LChunkBuilder::DoStoreFrameContext(HStoreFrameContext* instr) {
+ LOperand* context = UseRegisterAtStart(instr->context());
+ return new(zone()) LStoreFrameContext(context);
+}
+
+
+LInstruction* LChunkBuilder::DoAllocateBlockContext(
+ HAllocateBlockContext* instr) {
+ LOperand* context = UseFixed(instr->context(), rsi);
+ LOperand* function = UseRegisterAtStart(instr->function());
+ LAllocateBlockContext* result =
+ new(zone()) LAllocateBlockContext(context, function);
+ return MarkAsCall(DefineFixed(result, rsi), instr);
}
diff --git a/chromium/v8/src/x64/lithium-x64.h b/chromium/v8/src/x64/lithium-x64.h
index dc15c97c44c..9609cfc9dce 100644
--- a/chromium/v8/src/x64/lithium-x64.h
+++ b/chromium/v8/src/x64/lithium-x64.h
@@ -1,38 +1,15 @@
// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
#ifndef V8_X64_LITHIUM_X64_H_
#define V8_X64_LITHIUM_X64_H_
-#include "hydrogen.h"
-#include "lithium-allocator.h"
-#include "lithium.h"
-#include "safepoint-table.h"
-#include "utils.h"
+#include "src/hydrogen.h"
+#include "src/lithium-allocator.h"
+#include "src/lithium.h"
+#include "src/safepoint-table.h"
+#include "src/utils.h"
namespace v8 {
namespace internal {
@@ -44,6 +21,7 @@ class LCodeGen;
V(AccessArgumentsAt) \
V(AddI) \
V(Allocate) \
+ V(AllocateBlockContext) \
V(ApplyArguments) \
V(ArgumentsElements) \
V(ArgumentsLength) \
@@ -52,12 +30,9 @@ class LCodeGen;
V(BitI) \
V(BoundsCheck) \
V(Branch) \
- V(CallConstantFunction) \
+ V(CallJSFunction) \
+ V(CallWithDescriptor) \
V(CallFunction) \
- V(CallGlobal) \
- V(CallKeyed) \
- V(CallKnownGlobal) \
- V(CallNamed) \
V(CallNew) \
V(CallNewArray) \
V(CallRuntime) \
@@ -83,24 +58,28 @@ class LCodeGen;
V(ConstantI) \
V(ConstantS) \
V(ConstantT) \
+ V(ConstructDouble) \
V(Context) \
V(DateField) \
V(DebugBreak) \
V(DeclareGlobals) \
V(Deoptimize) \
+ V(DivByConstI) \
+ V(DivByPowerOf2I) \
V(DivI) \
+ V(DoubleBits) \
V(DoubleToI) \
V(DoubleToSmi) \
V(Drop) \
V(DummyUse) \
V(Dummy) \
- V(ElementsKind) \
+ V(FlooringDivByConstI) \
+ V(FlooringDivByPowerOf2I) \
+ V(FlooringDivI) \
V(ForInCacheArray) \
V(ForInPrepareMap) \
V(FunctionLiteral) \
V(GetCachedArrayIndex) \
- V(GlobalObject) \
- V(GlobalReceiver) \
V(Goto) \
V(HasCachedArrayIndexAndBranch) \
V(HasInstanceTypeAndBranch) \
@@ -109,7 +88,6 @@ class LCodeGen;
V(InstanceOfKnownGlobal) \
V(InstructionGap) \
V(Integer32ToDouble) \
- V(Integer32ToSmi) \
V(InvokeFunction) \
V(IsConstructCallAndBranch) \
V(IsObjectAndBranch) \
@@ -119,7 +97,6 @@ class LCodeGen;
V(Label) \
V(LazyBailout) \
V(LoadContextSlot) \
- V(LoadExternalArrayPointer) \
V(LoadRoot) \
V(LoadFieldByIndex) \
V(LoadFunctionPrototype) \
@@ -131,17 +108,16 @@ class LCodeGen;
V(LoadNamedGeneric) \
V(MapEnumLength) \
V(MathAbs) \
- V(MathCos) \
+ V(MathClz32) \
V(MathExp) \
V(MathFloor) \
- V(MathFloorOfDiv) \
V(MathLog) \
V(MathMinMax) \
V(MathPowHalf) \
V(MathRound) \
- V(MathSin) \
V(MathSqrt) \
- V(MathTan) \
+ V(ModByConstI) \
+ V(ModByPowerOf2I) \
V(ModI) \
V(MulI) \
V(NumberTagD) \
@@ -149,7 +125,6 @@ class LCodeGen;
V(NumberTagU) \
V(NumberUntagD) \
V(OsrEntry) \
- V(OuterContext) \
V(Parameter) \
V(Power) \
V(PushArgument) \
@@ -163,8 +138,8 @@ class LCodeGen;
V(StackCheck) \
V(StoreCodeEntry) \
V(StoreContextSlot) \
+ V(StoreFrameContext) \
V(StoreGlobalCell) \
- V(StoreGlobalGeneric) \
V(StoreKeyed) \
V(StoreKeyedGeneric) \
V(StoreNamedField) \
@@ -176,16 +151,13 @@ class LCodeGen;
V(SubI) \
V(TaggedToI) \
V(ThisFunction) \
- V(Throw) \
V(ToFastProperties) \
V(TransitionElementsKind) \
V(TrapAllocationMemento) \
V(Typeof) \
V(TypeofIsAndBranch) \
V(Uint32ToDouble) \
- V(Uint32ToSmi) \
V(UnknownOSRValue) \
- V(ValueOf) \
V(WrapReceiver)
@@ -264,7 +236,9 @@ class LInstruction : public ZoneObject {
// Interface to the register allocator and iterators.
bool ClobbersTemps() const { return IsCall(); }
bool ClobbersRegisters() const { return IsCall(); }
- virtual bool ClobbersDoubleRegisters() const { return IsCall(); }
+ virtual bool ClobbersDoubleRegisters(Isolate* isolate) const {
+ return IsCall();
+ }
virtual void SetDeferredLazyDeoptimizationEnvironment(LEnvironment* env) { }
@@ -279,6 +253,10 @@ class LInstruction : public ZoneObject {
virtual bool HasInterestingComment(LCodeGen* gen) const { return true; }
+ virtual bool MustSignExtendResult(LPlatformChunk* chunk) const {
+ return false;
+ }
+
#ifdef DEBUG
void VerifyCall();
#endif
@@ -303,10 +281,8 @@ class LInstruction : public ZoneObject {
// R = number of result operands (0 or 1).
-// I = number of input operands.
-// T = number of temporary operands.
-template<int R, int I, int T>
-class LTemplateInstruction : public LInstruction {
+template<int R>
+class LTemplateResultInstruction : public LInstruction {
public:
// Allow 0 or 1 output operands.
STATIC_ASSERT(R == 0 || R == 1);
@@ -316,8 +292,20 @@ class LTemplateInstruction : public LInstruction {
void set_result(LOperand* operand) { results_[0] = operand; }
LOperand* result() const { return results_[0]; }
+ virtual bool MustSignExtendResult(
+ LPlatformChunk* chunk) const V8_FINAL V8_OVERRIDE;
+
protected:
EmbeddedContainer<LOperand*, R> results_;
+};
+
+
+// R = number of result operands (0 or 1).
+// I = number of input operands.
+// T = number of temporary operands.
+template<int R, int I, int T>
+class LTemplateInstruction : public LTemplateResultInstruction<R> {
+ protected:
EmbeddedContainer<LOperand*, I> inputs_;
EmbeddedContainer<LOperand*, T> temps_;
@@ -442,6 +430,7 @@ class LDummyUse V8_FINAL : public LTemplateInstruction<1, 1, 0> {
class LDeoptimize V8_FINAL : public LTemplateInstruction<0, 0, 0> {
public:
+ virtual bool IsControl() const V8_OVERRIDE { return true; }
DECLARE_CONCRETE_INSTRUCTION(Deoptimize, "deoptimize")
DECLARE_HYDROGEN_ACCESSOR(Deoptimize)
};
@@ -492,10 +481,6 @@ class LCallStub V8_FINAL : public LTemplateInstruction<1, 1, 0> {
DECLARE_CONCRETE_INSTRUCTION(CallStub, "call-stub")
DECLARE_HYDROGEN_ACCESSOR(CallStub)
-
- TranscendentalCache::Type transcendental_type() {
- return hydrogen()->transcendental_type();
- }
};
@@ -563,6 +548,7 @@ class LWrapReceiver V8_FINAL : public LTemplateInstruction<1, 2, 0> {
LOperand* function() { return inputs_[1]; }
DECLARE_CONCRETE_INSTRUCTION(WrapReceiver, "wrap-receiver")
+ DECLARE_HYDROGEN_ACCESSOR(WrapReceiver)
};
@@ -624,6 +610,49 @@ class LArgumentsElements V8_FINAL : public LTemplateInstruction<1, 0, 0> {
};
+class LModByPowerOf2I V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+ LModByPowerOf2I(LOperand* dividend, int32_t divisor) {
+ inputs_[0] = dividend;
+ divisor_ = divisor;
+ }
+
+ LOperand* dividend() { return inputs_[0]; }
+ int32_t divisor() const { return divisor_; }
+
+ DECLARE_CONCRETE_INSTRUCTION(ModByPowerOf2I, "mod-by-power-of-2-i")
+ DECLARE_HYDROGEN_ACCESSOR(Mod)
+
+ private:
+ int32_t divisor_;
+};
+
+
+class LModByConstI V8_FINAL : public LTemplateInstruction<1, 1, 2> {
+ public:
+ LModByConstI(LOperand* dividend,
+ int32_t divisor,
+ LOperand* temp1,
+ LOperand* temp2) {
+ inputs_[0] = dividend;
+ divisor_ = divisor;
+ temps_[0] = temp1;
+ temps_[1] = temp2;
+ }
+
+ LOperand* dividend() { return inputs_[0]; }
+ int32_t divisor() const { return divisor_; }
+ LOperand* temp1() { return temps_[0]; }
+ LOperand* temp2() { return temps_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(ModByConstI, "mod-by-const-i")
+ DECLARE_HYDROGEN_ACCESSOR(Mod)
+
+ private:
+ int32_t divisor_;
+};
+
+
class LModI V8_FINAL : public LTemplateInstruction<1, 2, 1> {
public:
LModI(LOperand* left, LOperand* right, LOperand* temp) {
@@ -641,40 +670,126 @@ class LModI V8_FINAL : public LTemplateInstruction<1, 2, 1> {
};
+class LDivByPowerOf2I V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+ LDivByPowerOf2I(LOperand* dividend, int32_t divisor) {
+ inputs_[0] = dividend;
+ divisor_ = divisor;
+ }
+
+ LOperand* dividend() { return inputs_[0]; }
+ int32_t divisor() const { return divisor_; }
+
+ DECLARE_CONCRETE_INSTRUCTION(DivByPowerOf2I, "div-by-power-of-2-i")
+ DECLARE_HYDROGEN_ACCESSOR(Div)
+
+ private:
+ int32_t divisor_;
+};
+
+
+class LDivByConstI V8_FINAL : public LTemplateInstruction<1, 1, 2> {
+ public:
+ LDivByConstI(LOperand* dividend,
+ int32_t divisor,
+ LOperand* temp1,
+ LOperand* temp2) {
+ inputs_[0] = dividend;
+ divisor_ = divisor;
+ temps_[0] = temp1;
+ temps_[1] = temp2;
+ }
+
+ LOperand* dividend() { return inputs_[0]; }
+ int32_t divisor() const { return divisor_; }
+ LOperand* temp1() { return temps_[0]; }
+ LOperand* temp2() { return temps_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(DivByConstI, "div-by-const-i")
+ DECLARE_HYDROGEN_ACCESSOR(Div)
+
+ private:
+ int32_t divisor_;
+};
+
+
class LDivI V8_FINAL : public LTemplateInstruction<1, 2, 1> {
public:
- LDivI(LOperand* left, LOperand* right, LOperand* temp) {
- inputs_[0] = left;
- inputs_[1] = right;
+ LDivI(LOperand* dividend, LOperand* divisor, LOperand* temp) {
+ inputs_[0] = dividend;
+ inputs_[1] = divisor;
temps_[0] = temp;
}
- LOperand* left() { return inputs_[0]; }
- LOperand* right() { return inputs_[1]; }
+ LOperand* dividend() { return inputs_[0]; }
+ LOperand* divisor() { return inputs_[1]; }
LOperand* temp() { return temps_[0]; }
- bool is_flooring() { return hydrogen_value()->IsMathFloorOfDiv(); }
-
DECLARE_CONCRETE_INSTRUCTION(DivI, "div-i")
- DECLARE_HYDROGEN_ACCESSOR(Div)
+ DECLARE_HYDROGEN_ACCESSOR(BinaryOperation)
};
-class LMathFloorOfDiv V8_FINAL : public LTemplateInstruction<1, 2, 1> {
+class LFlooringDivByPowerOf2I V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
- LMathFloorOfDiv(LOperand* left,
- LOperand* right,
- LOperand* temp = NULL) {
- inputs_[0] = left;
- inputs_[1] = right;
+ LFlooringDivByPowerOf2I(LOperand* dividend, int32_t divisor) {
+ inputs_[0] = dividend;
+ divisor_ = divisor;
+ }
+
+ LOperand* dividend() { return inputs_[0]; }
+ int32_t divisor() const { return divisor_; }
+
+ DECLARE_CONCRETE_INSTRUCTION(FlooringDivByPowerOf2I,
+ "flooring-div-by-power-of-2-i")
+ DECLARE_HYDROGEN_ACCESSOR(MathFloorOfDiv)
+
+ private:
+ int32_t divisor_;
+};
+
+
+class LFlooringDivByConstI V8_FINAL : public LTemplateInstruction<1, 1, 3> {
+ public:
+ LFlooringDivByConstI(LOperand* dividend,
+ int32_t divisor,
+ LOperand* temp1,
+ LOperand* temp2,
+ LOperand* temp3) {
+ inputs_[0] = dividend;
+ divisor_ = divisor;
+ temps_[0] = temp1;
+ temps_[1] = temp2;
+ temps_[2] = temp3;
+ }
+
+ LOperand* dividend() { return inputs_[0]; }
+ int32_t divisor() const { return divisor_; }
+ LOperand* temp1() { return temps_[0]; }
+ LOperand* temp2() { return temps_[1]; }
+ LOperand* temp3() { return temps_[2]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(FlooringDivByConstI, "flooring-div-by-const-i")
+ DECLARE_HYDROGEN_ACCESSOR(MathFloorOfDiv)
+
+ private:
+ int32_t divisor_;
+};
+
+
+class LFlooringDivI V8_FINAL : public LTemplateInstruction<1, 2, 1> {
+ public:
+ LFlooringDivI(LOperand* dividend, LOperand* divisor, LOperand* temp) {
+ inputs_[0] = dividend;
+ inputs_[1] = divisor;
temps_[0] = temp;
}
- LOperand* left() { return inputs_[0]; }
- LOperand* right() { return inputs_[1]; }
+ LOperand* dividend() { return inputs_[0]; }
+ LOperand* divisor() { return inputs_[1]; }
LOperand* temp() { return temps_[0]; }
- DECLARE_CONCRETE_INSTRUCTION(MathFloorOfDiv, "math-floor-of-div")
+ DECLARE_CONCRETE_INSTRUCTION(FlooringDivI, "flooring-div-i")
DECLARE_HYDROGEN_ACCESSOR(MathFloorOfDiv)
};
@@ -772,39 +887,15 @@ class LMathLog V8_FINAL : public LTemplateInstruction<1, 1, 0> {
};
-class LMathSin V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LMathClz32 V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
- explicit LMathSin(LOperand* value) {
+ explicit LMathClz32(LOperand* value) {
inputs_[0] = value;
}
LOperand* value() { return inputs_[0]; }
- DECLARE_CONCRETE_INSTRUCTION(MathSin, "math-sin")
-};
-
-
-class LMathCos V8_FINAL : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LMathCos(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(MathCos, "math-cos")
-};
-
-
-class LMathTan V8_FINAL : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LMathTan(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(MathTan, "math-tan")
+ DECLARE_CONCRETE_INSTRUCTION(MathClz32, "math-clz32")
};
@@ -1137,6 +1228,9 @@ class LBitI V8_FINAL : public LTemplateInstruction<1, 2, 0> {
LOperand* right() { return inputs_[1]; }
Token::Value op() const { return hydrogen()->op(); }
+ bool IsInteger32() const {
+ return hydrogen()->representation().IsInteger32();
+ }
DECLARE_CONCRETE_INSTRUCTION(BitI, "bit-i")
DECLARE_HYDROGEN_ACCESSOR(Bitwise)
@@ -1282,32 +1376,6 @@ class LMapEnumLength V8_FINAL : public LTemplateInstruction<1, 1, 0> {
};
-class LElementsKind V8_FINAL : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LElementsKind(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(ElementsKind, "elements-kind")
- DECLARE_HYDROGEN_ACCESSOR(ElementsKind)
-};
-
-
-class LValueOf V8_FINAL : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LValueOf(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(ValueOf, "value-of")
- DECLARE_HYDROGEN_ACCESSOR(ValueOf)
-};
-
-
class LDateField V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
LDateField(LOperand* date, Smi* index) : index_(index) {
@@ -1361,20 +1429,6 @@ class LSeqStringSetChar V8_FINAL : public LTemplateInstruction<1, 4, 0> {
};
-class LThrow V8_FINAL : public LTemplateInstruction<0, 2, 0> {
- public:
- explicit LThrow(LOperand* context, LOperand* value) {
- inputs_[0] = context;
- inputs_[1] = value;
- }
-
- LOperand* context() { return inputs_[0]; }
- LOperand* value() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(Throw, "throw")
-};
-
-
class LAddI V8_FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LAddI(LOperand* left, LOperand* right) {
@@ -1554,18 +1608,20 @@ class LLoadRoot V8_FINAL : public LTemplateInstruction<1, 0, 0> {
};
-class LLoadExternalArrayPointer V8_FINAL
- : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LLoadExternalArrayPointer(LOperand* object) {
- inputs_[0] = object;
- }
-
- LOperand* object() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(LoadExternalArrayPointer,
- "load-external-array-pointer")
-};
+inline static bool ExternalArrayOpRequiresTemp(
+ Representation key_representation,
+ ElementsKind elements_kind) {
+ // Operations that require the key to be divided by two to be converted into
+ // an index cannot fold the scale operation into a load and need an extra
+ // temp register to do the work.
+ return SmiValuesAre31Bits() && key_representation.IsSmi() &&
+ (elements_kind == EXTERNAL_INT8_ELEMENTS ||
+ elements_kind == EXTERNAL_UINT8_ELEMENTS ||
+ elements_kind == EXTERNAL_UINT8_CLAMPED_ELEMENTS ||
+ elements_kind == UINT8_ELEMENTS ||
+ elements_kind == INT8_ELEMENTS ||
+ elements_kind == UINT8_CLAMPED_ELEMENTS);
+}
class LLoadKeyed V8_FINAL : public LTemplateInstruction<1, 2, 0> {
@@ -1581,10 +1637,16 @@ class LLoadKeyed V8_FINAL : public LTemplateInstruction<1, 2, 0> {
bool is_external() const {
return hydrogen()->is_external();
}
+ bool is_fixed_typed_array() const {
+ return hydrogen()->is_fixed_typed_array();
+ }
+ bool is_typed_elements() const {
+ return is_external() || is_fixed_typed_array();
+ }
LOperand* elements() { return inputs_[0]; }
LOperand* key() { return inputs_[1]; }
virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
- uint32_t additional_index() const { return hydrogen()->index_offset(); }
+ uint32_t base_offset() const { return hydrogen()->base_offset(); }
ElementsKind elements_kind() const {
return hydrogen()->elements_kind();
}
@@ -1646,28 +1708,6 @@ class LStoreGlobalCell V8_FINAL : public LTemplateInstruction<0, 1, 1> {
};
-class LStoreGlobalGeneric V8_FINAL : public LTemplateInstruction<0, 3, 0> {
- public:
- explicit LStoreGlobalGeneric(LOperand* context,
- LOperand* global_object,
- LOperand* value) {
- inputs_[0] = context;
- inputs_[1] = global_object;
- inputs_[2] = value;
- }
-
- LOperand* context() { return inputs_[0]; }
- LOperand* global_object() { return inputs_[1]; }
- LOperand* value() { return inputs_[2]; }
-
- DECLARE_CONCRETE_INSTRUCTION(StoreGlobalGeneric, "store-global-generic")
- DECLARE_HYDROGEN_ACCESSOR(StoreGlobalGeneric)
-
- Handle<Object> name() const { return hydrogen()->name(); }
- StrictModeFlag strict_mode_flag() { return hydrogen()->strict_mode_flag(); }
-};
-
-
class LLoadContextSlot V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LLoadContextSlot(LOperand* context) {
@@ -1731,15 +1771,15 @@ class LDrop V8_FINAL : public LTemplateInstruction<0, 0, 0> {
};
-class LStoreCodeEntry V8_FINAL: public LTemplateInstruction<0, 1, 1> {
+class LStoreCodeEntry V8_FINAL: public LTemplateInstruction<0, 2, 0> {
public:
LStoreCodeEntry(LOperand* function, LOperand* code_object) {
inputs_[0] = function;
- temps_[0] = code_object;
+ inputs_[1] = code_object;
}
LOperand* function() { return inputs_[0]; }
- LOperand* code_object() { return temps_[0]; }
+ LOperand* code_object() { return inputs_[1]; }
virtual void PrintDataTo(StringStream* stream);
@@ -1778,18 +1818,6 @@ class LContext V8_FINAL : public LTemplateInstruction<1, 0, 0> {
};
-class LOuterContext V8_FINAL : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LOuterContext(LOperand* context) {
- inputs_[0] = context;
- }
-
- LOperand* context() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(OuterContext, "outer-context")
-};
-
-
class LDeclareGlobals V8_FINAL : public LTemplateInstruction<0, 1, 0> {
public:
explicit LDeclareGlobals(LOperand* context) {
@@ -1803,94 +1831,69 @@ class LDeclareGlobals V8_FINAL : public LTemplateInstruction<0, 1, 0> {
};
-class LGlobalObject V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LCallJSFunction V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
- explicit LGlobalObject(LOperand* context) {
- inputs_[0] = context;
- }
-
- LOperand* context() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(GlobalObject, "global-object")
-};
-
-
-class LGlobalReceiver V8_FINAL : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LGlobalReceiver(LOperand* global_object) {
- inputs_[0] = global_object;
+ explicit LCallJSFunction(LOperand* function) {
+ inputs_[0] = function;
}
- LOperand* global() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(GlobalReceiver, "global-receiver")
-};
-
+ LOperand* function() { return inputs_[0]; }
-class LCallConstantFunction V8_FINAL : public LTemplateInstruction<1, 0, 0> {
- public:
- DECLARE_CONCRETE_INSTRUCTION(CallConstantFunction, "call-constant-function")
- DECLARE_HYDROGEN_ACCESSOR(CallConstantFunction)
+ DECLARE_CONCRETE_INSTRUCTION(CallJSFunction, "call-js-function")
+ DECLARE_HYDROGEN_ACCESSOR(CallJSFunction)
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
- Handle<JSFunction> function() { return hydrogen()->function(); }
int arity() const { return hydrogen()->argument_count() - 1; }
};
-class LInvokeFunction V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+class LCallWithDescriptor V8_FINAL : public LTemplateResultInstruction<1> {
public:
- LInvokeFunction(LOperand* context, LOperand* function) {
- inputs_[0] = context;
- inputs_[1] = function;
+ LCallWithDescriptor(const CallInterfaceDescriptor* descriptor,
+ const ZoneList<LOperand*>& operands,
+ Zone* zone)
+ : inputs_(descriptor->environment_length() + 1, zone) {
+ ASSERT(descriptor->environment_length() + 1 == operands.length());
+ inputs_.AddAll(operands, zone);
}
- LOperand* context() { return inputs_[0]; }
- LOperand* function() { return inputs_[1]; }
+ LOperand* target() const { return inputs_[0]; }
- DECLARE_CONCRETE_INSTRUCTION(InvokeFunction, "invoke-function")
- DECLARE_HYDROGEN_ACCESSOR(InvokeFunction)
+ private:
+ DECLARE_CONCRETE_INSTRUCTION(CallWithDescriptor, "call-with-descriptor")
+ DECLARE_HYDROGEN_ACCESSOR(CallWithDescriptor)
virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
int arity() const { return hydrogen()->argument_count() - 1; }
-};
+ ZoneList<LOperand*> inputs_;
-class LCallKeyed V8_FINAL : public LTemplateInstruction<1, 2, 0> {
- public:
- LCallKeyed(LOperand* context, LOperand* key) {
- inputs_[0] = context;
- inputs_[1] = key;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(CallKeyed, "call-keyed")
- DECLARE_HYDROGEN_ACCESSOR(CallKeyed)
-
- LOperand* context() { return inputs_[0]; }
- LOperand* key() { return inputs_[1]; }
-
- virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+ // Iterator support.
+ virtual int InputCount() V8_FINAL V8_OVERRIDE { return inputs_.length(); }
+ virtual LOperand* InputAt(int i) V8_FINAL V8_OVERRIDE { return inputs_[i]; }
- int arity() const { return hydrogen()->argument_count() - 1; }
+ virtual int TempCount() V8_FINAL V8_OVERRIDE { return 0; }
+ virtual LOperand* TempAt(int i) V8_FINAL V8_OVERRIDE { return NULL; }
};
-class LCallNamed V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LInvokeFunction V8_FINAL : public LTemplateInstruction<1, 2, 0> {
public:
- explicit LCallNamed(LOperand* context) {
+ LInvokeFunction(LOperand* context, LOperand* function) {
inputs_[0] = context;
+ inputs_[1] = function;
}
LOperand* context() { return inputs_[0]; }
+ LOperand* function() { return inputs_[1]; }
- DECLARE_CONCRETE_INSTRUCTION(CallNamed, "call-named")
- DECLARE_HYDROGEN_ACCESSOR(CallNamed)
+ DECLARE_CONCRETE_INSTRUCTION(InvokeFunction, "invoke-function")
+ DECLARE_HYDROGEN_ACCESSOR(InvokeFunction)
virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
- Handle<String> name() const { return hydrogen()->name(); }
int arity() const { return hydrogen()->argument_count() - 1; }
};
@@ -1911,35 +1914,6 @@ class LCallFunction V8_FINAL : public LTemplateInstruction<1, 2, 0> {
};
-class LCallGlobal V8_FINAL : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LCallGlobal(LOperand* context) {
- inputs_[0] = context;
- }
-
- LOperand* context() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(CallGlobal, "call-global")
- DECLARE_HYDROGEN_ACCESSOR(CallGlobal)
-
- virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
-
- Handle<String> name() const {return hydrogen()->name(); }
- int arity() const { return hydrogen()->argument_count() - 1; }
-};
-
-
-class LCallKnownGlobal V8_FINAL : public LTemplateInstruction<1, 0, 0> {
- public:
- DECLARE_CONCRETE_INSTRUCTION(CallKnownGlobal, "call-known-global")
- DECLARE_HYDROGEN_ACCESSOR(CallKnownGlobal)
-
- virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
-
- int arity() const { return hydrogen()->argument_count() - 1; }
-};
-
-
class LCallNew V8_FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LCallNew(LOperand* context, LOperand* constructor) {
@@ -1989,7 +1963,7 @@ class LCallRuntime V8_FINAL : public LTemplateInstruction<1, 1, 0> {
DECLARE_CONCRETE_INSTRUCTION(CallRuntime, "call-runtime")
DECLARE_HYDROGEN_ACCESSOR(CallRuntime)
- virtual bool ClobbersDoubleRegisters() const V8_OVERRIDE {
+ virtual bool ClobbersDoubleRegisters(Isolate* isolate) const V8_OVERRIDE {
return save_doubles() == kDontSaveFPRegs;
}
@@ -2011,67 +1985,45 @@ class LInteger32ToDouble V8_FINAL : public LTemplateInstruction<1, 1, 0> {
};
-class LInteger32ToSmi V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LUint32ToDouble V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
- explicit LInteger32ToSmi(LOperand* value) {
+ explicit LUint32ToDouble(LOperand* value) {
inputs_[0] = value;
}
LOperand* value() { return inputs_[0]; }
- DECLARE_CONCRETE_INSTRUCTION(Integer32ToSmi, "int32-to-smi")
- DECLARE_HYDROGEN_ACCESSOR(Change)
-};
-
-
-class LUint32ToDouble V8_FINAL : public LTemplateInstruction<1, 1, 1> {
- public:
- explicit LUint32ToDouble(LOperand* value, LOperand* temp) {
- inputs_[0] = value;
- temps_[0] = temp;
- }
-
- LOperand* value() { return inputs_[0]; }
- LOperand* temp() { return temps_[0]; }
-
DECLARE_CONCRETE_INSTRUCTION(Uint32ToDouble, "uint32-to-double")
};
-class LUint32ToSmi V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LNumberTagI V8_FINAL : public LTemplateInstruction<1, 1, 2> {
public:
- explicit LUint32ToSmi(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(Uint32ToSmi, "uint32-to-smi")
- DECLARE_HYDROGEN_ACCESSOR(Change)
-};
-
-
-class LNumberTagI V8_FINAL : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LNumberTagI(LOperand* value) {
+ LNumberTagI(LOperand* value, LOperand* temp1, LOperand* temp2) {
inputs_[0] = value;
+ temps_[0] = temp1;
+ temps_[1] = temp2;
}
LOperand* value() { return inputs_[0]; }
+ LOperand* temp1() { return temps_[0]; }
+ LOperand* temp2() { return temps_[1]; }
DECLARE_CONCRETE_INSTRUCTION(NumberTagI, "number-tag-i")
};
-class LNumberTagU V8_FINAL : public LTemplateInstruction<1, 1, 1> {
+class LNumberTagU V8_FINAL : public LTemplateInstruction<1, 1, 2> {
public:
- explicit LNumberTagU(LOperand* value, LOperand* temp) {
+ LNumberTagU(LOperand* value, LOperand* temp1, LOperand* temp2) {
inputs_[0] = value;
- temps_[0] = temp;
+ temps_[0] = temp1;
+ temps_[1] = temp2;
}
LOperand* value() { return inputs_[0]; }
- LOperand* temp() { return temps_[0]; }
+ LOperand* temp1() { return temps_[0]; }
+ LOperand* temp2() { return temps_[1]; }
DECLARE_CONCRETE_INSTRUCTION(NumberTagU, "number-tag-u")
};
@@ -2148,6 +2100,7 @@ class LSmiTag V8_FINAL : public LTemplateInstruction<1, 1, 0> {
LOperand* value() { return inputs_[0]; }
DECLARE_CONCRETE_INSTRUCTION(SmiTag, "smi-tag")
+ DECLARE_HYDROGEN_ACCESSOR(Change)
};
@@ -2198,7 +2151,6 @@ class LStoreNamedField V8_FINAL : public LTemplateInstruction<0, 2, 1> {
virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
- Handle<Map> transition() const { return hydrogen()->transition_map(); }
Representation representation() const {
return hydrogen()->field_representation();
}
@@ -2223,7 +2175,7 @@ class LStoreNamedGeneric V8_FINAL : public LTemplateInstruction<0, 3, 0> {
virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
Handle<Object> name() const { return hydrogen()->name(); }
- StrictModeFlag strict_mode_flag() { return hydrogen()->strict_mode_flag(); }
+ StrictMode strict_mode() { return hydrogen()->strict_mode(); }
};
@@ -2236,6 +2188,12 @@ class LStoreKeyed V8_FINAL : public LTemplateInstruction<0, 3, 0> {
}
bool is_external() const { return hydrogen()->is_external(); }
+ bool is_fixed_typed_array() const {
+ return hydrogen()->is_fixed_typed_array();
+ }
+ bool is_typed_elements() const {
+ return is_external() || is_fixed_typed_array();
+ }
LOperand* elements() { return inputs_[0]; }
LOperand* key() { return inputs_[1]; }
LOperand* value() { return inputs_[2]; }
@@ -2246,7 +2204,7 @@ class LStoreKeyed V8_FINAL : public LTemplateInstruction<0, 3, 0> {
virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
bool NeedsCanonicalization() { return hydrogen()->NeedsCanonicalization(); }
- uint32_t additional_index() const { return hydrogen()->index_offset(); }
+ uint32_t base_offset() const { return hydrogen()->base_offset(); }
};
@@ -2272,7 +2230,7 @@ class LStoreKeyedGeneric V8_FINAL : public LTemplateInstruction<0, 4, 0> {
virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
- StrictModeFlag strict_mode_flag() { return hydrogen()->strict_mode_flag(); }
+ StrictMode strict_mode() { return hydrogen()->strict_mode(); }
};
@@ -2401,7 +2359,7 @@ class LCheckInstanceType V8_FINAL : public LTemplateInstruction<0, 1, 0> {
class LCheckMaps V8_FINAL : public LTemplateInstruction<0, 1, 0> {
public:
- explicit LCheckMaps(LOperand* value) {
+ explicit LCheckMaps(LOperand* value = NULL) {
inputs_[0] = value;
}
@@ -2476,6 +2434,33 @@ class LCheckNonSmi V8_FINAL : public LTemplateInstruction<0, 1, 0> {
};
+class LDoubleBits V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LDoubleBits(LOperand* value) {
+ inputs_[0] = value;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(DoubleBits, "double-bits")
+ DECLARE_HYDROGEN_ACCESSOR(DoubleBits)
+};
+
+
+class LConstructDouble V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+ public:
+ LConstructDouble(LOperand* hi, LOperand* lo) {
+ inputs_[0] = hi;
+ inputs_[1] = lo;
+ }
+
+ LOperand* hi() { return inputs_[0]; }
+ LOperand* lo() { return inputs_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(ConstructDouble, "construct-double")
+};
+
+
class LAllocate V8_FINAL : public LTemplateInstruction<1, 2, 1> {
public:
LAllocate(LOperand* context, LOperand* size, LOperand* temp) {
@@ -2664,53 +2649,94 @@ class LLoadFieldByIndex V8_FINAL : public LTemplateInstruction<1, 2, 0> {
};
+class LStoreFrameContext: public LTemplateInstruction<0, 1, 0> {
+ public:
+ explicit LStoreFrameContext(LOperand* context) {
+ inputs_[0] = context;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(StoreFrameContext, "store-frame-context")
+};
+
+
+class LAllocateBlockContext: public LTemplateInstruction<1, 2, 0> {
+ public:
+ LAllocateBlockContext(LOperand* context, LOperand* function) {
+ inputs_[0] = context;
+ inputs_[1] = function;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+ LOperand* function() { return inputs_[1]; }
+
+ Handle<ScopeInfo> scope_info() { return hydrogen()->scope_info(); }
+
+ DECLARE_CONCRETE_INSTRUCTION(AllocateBlockContext, "allocate-block-context")
+ DECLARE_HYDROGEN_ACCESSOR(AllocateBlockContext)
+};
+
+
class LChunkBuilder;
class LPlatformChunk V8_FINAL : public LChunk {
public:
LPlatformChunk(CompilationInfo* info, HGraph* graph)
- : LChunk(info, graph) { }
+ : LChunk(info, graph),
+ dehoisted_key_ids_(graph->GetMaximumValueID(), graph->zone()) { }
int GetNextSpillIndex(RegisterKind kind);
LOperand* GetNextSpillSlot(RegisterKind kind);
+ BitVector* GetDehoistedKeyIds() { return &dehoisted_key_ids_; }
+ bool IsDehoistedKey(HValue* value) {
+ return dehoisted_key_ids_.Contains(value->id());
+ }
+
+ private:
+ BitVector dehoisted_key_ids_;
};
-class LChunkBuilder V8_FINAL BASE_EMBEDDED {
+class LChunkBuilder V8_FINAL : public LChunkBuilderBase {
public:
LChunkBuilder(CompilationInfo* info, HGraph* graph, LAllocator* allocator)
- : chunk_(NULL),
+ : LChunkBuilderBase(graph->zone()),
+ chunk_(NULL),
info_(info),
graph_(graph),
- zone_(graph->zone()),
status_(UNUSED),
current_instruction_(NULL),
current_block_(NULL),
next_block_(NULL),
- argument_count_(0),
allocator_(allocator) { }
+ Isolate* isolate() const { return graph_->isolate(); }
+
// Build the sequence for the graph.
LPlatformChunk* Build();
- LInstruction* CheckElideControlInstruction(HControlInstruction* instr);
-
// Declare methods that deal with the individual node types.
#define DECLARE_DO(type) LInstruction* Do##type(H##type* node);
HYDROGEN_CONCRETE_INSTRUCTION_LIST(DECLARE_DO)
#undef DECLARE_DO
- static HValue* SimplifiedDivisorForMathFloorOfDiv(HValue* val);
-
LInstruction* DoMathFloor(HUnaryMathOperation* instr);
LInstruction* DoMathRound(HUnaryMathOperation* instr);
LInstruction* DoMathAbs(HUnaryMathOperation* instr);
LInstruction* DoMathLog(HUnaryMathOperation* instr);
- LInstruction* DoMathSin(HUnaryMathOperation* instr);
- LInstruction* DoMathCos(HUnaryMathOperation* instr);
- LInstruction* DoMathTan(HUnaryMathOperation* instr);
LInstruction* DoMathExp(HUnaryMathOperation* instr);
LInstruction* DoMathSqrt(HUnaryMathOperation* instr);
LInstruction* DoMathPowHalf(HUnaryMathOperation* instr);
+ LInstruction* DoMathClz32(HUnaryMathOperation* instr);
+ LInstruction* DoDivByPowerOf2I(HDiv* instr);
+ LInstruction* DoDivByConstI(HDiv* instr);
+ LInstruction* DoDivI(HDiv* instr);
+ LInstruction* DoModByPowerOf2I(HMod* instr);
+ LInstruction* DoModByConstI(HMod* instr);
+ LInstruction* DoModI(HMod* instr);
+ LInstruction* DoFlooringDivByPowerOf2I(HMathFloorOfDiv* instr);
+ LInstruction* DoFlooringDivByConstI(HMathFloorOfDiv* instr);
+ LInstruction* DoFlooringDivI(HMathFloorOfDiv* instr);
private:
enum Status {
@@ -2723,7 +2749,6 @@ class LChunkBuilder V8_FINAL BASE_EMBEDDED {
LPlatformChunk* chunk() const { return chunk_; }
CompilationInfo* info() const { return info_; }
HGraph* graph() const { return graph_; }
- Zone* zone() const { return zone_; }
bool is_unused() const { return status_ == UNUSED; }
bool is_building() const { return status_ == BUILDING; }
@@ -2756,6 +2781,9 @@ class LChunkBuilder V8_FINAL BASE_EMBEDDED {
// An input operand in a register that may be trashed.
MUST_USE_RESULT LOperand* UseTempRegister(HValue* value);
+ // An input operand in a register that may be trashed or a constant operand.
+ MUST_USE_RESULT LOperand* UseTempRegisterOrConstant(HValue* value);
+
// An input operand in a register or stack slot.
MUST_USE_RESULT LOperand* Use(HValue* value);
MUST_USE_RESULT LOperand* UseAtStart(HValue* value);
@@ -2773,7 +2801,7 @@ class LChunkBuilder V8_FINAL BASE_EMBEDDED {
// An input operand in register, stack slot or a constant operand.
// Will not be moved to a register even if one is freely available.
- MUST_USE_RESULT LOperand* UseAny(HValue* value);
+ virtual MUST_USE_RESULT LOperand* UseAny(HValue* value) V8_OVERRIDE;
// Temporary operand that must be in a register.
MUST_USE_RESULT LUnallocated* TempRegister();
@@ -2782,22 +2810,16 @@ class LChunkBuilder V8_FINAL BASE_EMBEDDED {
// Methods for setting up define-use relationships.
// Return the same instruction that they are passed.
- template<int I, int T>
- LInstruction* Define(LTemplateInstruction<1, I, T>* instr,
- LUnallocated* result);
- template<int I, int T>
- LInstruction* DefineAsRegister(LTemplateInstruction<1, I, T>* instr);
- template<int I, int T>
- LInstruction* DefineAsSpilled(LTemplateInstruction<1, I, T>* instr,
- int index);
- template<int I, int T>
- LInstruction* DefineSameAsFirst(LTemplateInstruction<1, I, T>* instr);
- template<int I, int T>
- LInstruction* DefineFixed(LTemplateInstruction<1, I, T>* instr,
- Register reg);
- template<int I, int T>
- LInstruction* DefineFixedDouble(LTemplateInstruction<1, I, T>* instr,
- XMMRegister reg);
+ LInstruction* Define(LTemplateResultInstruction<1>* instr,
+ LUnallocated* result);
+ LInstruction* DefineAsRegister(LTemplateResultInstruction<1>* instr);
+ LInstruction* DefineAsSpilled(LTemplateResultInstruction<1>* instr,
+ int index);
+ LInstruction* DefineSameAsFirst(LTemplateResultInstruction<1>* instr);
+ LInstruction* DefineFixed(LTemplateResultInstruction<1>* instr,
+ Register reg);
+ LInstruction* DefineFixedDouble(LTemplateResultInstruction<1>* instr,
+ XMMRegister reg);
// Assigns an environment to an instruction. An instruction which can
// deoptimize must have an environment.
LInstruction* AssignEnvironment(LInstruction* instr);
@@ -2815,11 +2837,8 @@ class LChunkBuilder V8_FINAL BASE_EMBEDDED {
HInstruction* hinstr,
CanDeoptimize can_deoptimize = CANNOT_DEOPTIMIZE_EAGERLY);
- LEnvironment* CreateEnvironment(HEnvironment* hydrogen_env,
- int* argument_index_accumulator,
- ZoneList<HValue*>* objects_to_materialize);
-
void VisitInstruction(HInstruction* current);
+ void AddInstruction(LInstruction* instr, HInstruction* current);
void DoBasicBlock(HBasicBlock* block, HBasicBlock* next_block);
LInstruction* DoShift(Token::Value op, HBitwiseBinaryOperation* instr);
@@ -2827,16 +2846,15 @@ class LChunkBuilder V8_FINAL BASE_EMBEDDED {
HArithmeticBinaryOperation* instr);
LInstruction* DoArithmeticT(Token::Value op,
HBinaryOperation* instr);
+ void FindDehoistedKeyDefinitions(HValue* candidate);
LPlatformChunk* chunk_;
CompilationInfo* info_;
HGraph* const graph_;
- Zone* zone_;
Status status_;
HInstruction* current_instruction_;
HBasicBlock* current_block_;
HBasicBlock* next_block_;
- int argument_count_;
LAllocator* allocator_;
DISALLOW_COPY_AND_ASSIGN(LChunkBuilder);
diff --git a/chromium/v8/src/x64/macro-assembler-x64.cc b/chromium/v8/src/x64/macro-assembler-x64.cc
index 6c3f50163ef..39acf800340 100644
--- a/chromium/v8/src/x64/macro-assembler-x64.cc
+++ b/chromium/v8/src/x64/macro-assembler-x64.cc
@@ -1,43 +1,20 @@
// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
#if V8_TARGET_ARCH_X64
-#include "bootstrapper.h"
-#include "codegen.h"
-#include "cpu-profiler.h"
-#include "assembler-x64.h"
-#include "macro-assembler-x64.h"
-#include "serialize.h"
-#include "debug.h"
-#include "heap.h"
-#include "isolate-inl.h"
+#include "src/bootstrapper.h"
+#include "src/codegen.h"
+#include "src/cpu-profiler.h"
+#include "src/x64/assembler-x64.h"
+#include "src/x64/macro-assembler-x64.h"
+#include "src/serialize.h"
+#include "src/debug.h"
+#include "src/heap.h"
+#include "src/isolate-inl.h"
namespace v8 {
namespace internal {
@@ -54,10 +31,10 @@ MacroAssembler::MacroAssembler(Isolate* arg_isolate, void* buffer, int size)
}
-static const int kInvalidRootRegisterDelta = -1;
+static const int64_t kInvalidRootRegisterDelta = -1;
-intptr_t MacroAssembler::RootRegisterDelta(ExternalReference other) {
+int64_t MacroAssembler::RootRegisterDelta(ExternalReference other) {
if (predictable_code_size() &&
(other.address() < reinterpret_cast<Address>(isolate()) ||
other.address() >= reinterpret_cast<Address>(isolate() + 1))) {
@@ -65,17 +42,27 @@ intptr_t MacroAssembler::RootRegisterDelta(ExternalReference other) {
}
Address roots_register_value = kRootRegisterBias +
reinterpret_cast<Address>(isolate()->heap()->roots_array_start());
- intptr_t delta = other.address() - roots_register_value;
+
+ int64_t delta = kInvalidRootRegisterDelta; // Bogus initialization.
+ if (kPointerSize == kInt64Size) {
+ delta = other.address() - roots_register_value;
+ } else {
+ // For x32, zero extend the address to 64-bit and calculate the delta.
+ uint64_t o = static_cast<uint32_t>(
+ reinterpret_cast<intptr_t>(other.address()));
+ uint64_t r = static_cast<uint32_t>(
+ reinterpret_cast<intptr_t>(roots_register_value));
+ delta = o - r;
+ }
return delta;
}
Operand MacroAssembler::ExternalOperand(ExternalReference target,
Register scratch) {
- if (root_array_available_ && !Serializer::enabled()) {
- intptr_t delta = RootRegisterDelta(target);
+ if (root_array_available_ && !serializer_enabled()) {
+ int64_t delta = RootRegisterDelta(target);
if (delta != kInvalidRootRegisterDelta && is_int32(delta)) {
- Serializer::TooLateToEnableNow();
return Operand(kRootRegister, static_cast<int32_t>(delta));
}
}
@@ -85,11 +72,10 @@ Operand MacroAssembler::ExternalOperand(ExternalReference target,
void MacroAssembler::Load(Register destination, ExternalReference source) {
- if (root_array_available_ && !Serializer::enabled()) {
- intptr_t delta = RootRegisterDelta(source);
+ if (root_array_available_ && !serializer_enabled()) {
+ int64_t delta = RootRegisterDelta(source);
if (delta != kInvalidRootRegisterDelta && is_int32(delta)) {
- Serializer::TooLateToEnableNow();
- movq(destination, Operand(kRootRegister, static_cast<int32_t>(delta)));
+ movp(destination, Operand(kRootRegister, static_cast<int32_t>(delta)));
return;
}
}
@@ -98,17 +84,16 @@ void MacroAssembler::Load(Register destination, ExternalReference source) {
load_rax(source);
} else {
Move(kScratchRegister, source);
- movq(destination, Operand(kScratchRegister, 0));
+ movp(destination, Operand(kScratchRegister, 0));
}
}
void MacroAssembler::Store(ExternalReference destination, Register source) {
- if (root_array_available_ && !Serializer::enabled()) {
- intptr_t delta = RootRegisterDelta(destination);
+ if (root_array_available_ && !serializer_enabled()) {
+ int64_t delta = RootRegisterDelta(destination);
if (delta != kInvalidRootRegisterDelta && is_int32(delta)) {
- Serializer::TooLateToEnableNow();
- movq(Operand(kRootRegister, static_cast<int32_t>(delta)), source);
+ movp(Operand(kRootRegister, static_cast<int32_t>(delta)), source);
return;
}
}
@@ -117,18 +102,17 @@ void MacroAssembler::Store(ExternalReference destination, Register source) {
store_rax(destination);
} else {
Move(kScratchRegister, destination);
- movq(Operand(kScratchRegister, 0), source);
+ movp(Operand(kScratchRegister, 0), source);
}
}
void MacroAssembler::LoadAddress(Register destination,
ExternalReference source) {
- if (root_array_available_ && !Serializer::enabled()) {
- intptr_t delta = RootRegisterDelta(source);
+ if (root_array_available_ && !serializer_enabled()) {
+ int64_t delta = RootRegisterDelta(source);
if (delta != kInvalidRootRegisterDelta && is_int32(delta)) {
- Serializer::TooLateToEnableNow();
- lea(destination, Operand(kRootRegister, static_cast<int32_t>(delta)));
+ leap(destination, Operand(kRootRegister, static_cast<int32_t>(delta)));
return;
}
}
@@ -138,14 +122,13 @@ void MacroAssembler::LoadAddress(Register destination,
int MacroAssembler::LoadAddressSize(ExternalReference source) {
- if (root_array_available_ && !Serializer::enabled()) {
+ if (root_array_available_ && !serializer_enabled()) {
// This calculation depends on the internals of LoadAddress.
// It's correctness is ensured by the asserts in the Call
// instruction below.
- intptr_t delta = RootRegisterDelta(source);
+ int64_t delta = RootRegisterDelta(source);
if (delta != kInvalidRootRegisterDelta && is_int32(delta)) {
- Serializer::TooLateToEnableNow();
- // Operand is lea(scratch, Operand(kRootRegister, delta));
+ // Operand is leap(scratch, Operand(kRootRegister, delta));
// Opcodes : REX.W 8D ModRM Disp8/Disp32 - 4 or 7.
int size = 4;
if (!is_int8(static_cast<int32_t>(delta))) {
@@ -154,28 +137,28 @@ int MacroAssembler::LoadAddressSize(ExternalReference source) {
return size;
}
}
- // Size of movq(destination, src);
+ // Size of movp(destination, src);
return Assembler::kMoveAddressIntoScratchRegisterInstructionLength;
}
void MacroAssembler::PushAddress(ExternalReference source) {
int64_t address = reinterpret_cast<int64_t>(source.address());
- if (is_int32(address) && !Serializer::enabled()) {
+ if (is_int32(address) && !serializer_enabled()) {
if (emit_debug_code()) {
- movq(kScratchRegister, kZapValue, RelocInfo::NONE64);
+ Move(kScratchRegister, kZapValue, Assembler::RelocInfoNone());
}
- push(Immediate(static_cast<int32_t>(address)));
+ Push(Immediate(static_cast<int32_t>(address)));
return;
}
LoadAddress(kScratchRegister, source);
- push(kScratchRegister);
+ Push(kScratchRegister);
}
void MacroAssembler::LoadRoot(Register destination, Heap::RootListIndex index) {
ASSERT(root_array_available_);
- movq(destination, Operand(kRootRegister,
+ movp(destination, Operand(kRootRegister,
(index << kPointerSizeLog2) - kRootRegisterBias));
}
@@ -184,7 +167,7 @@ void MacroAssembler::LoadRootIndexed(Register destination,
Register variable_offset,
int fixed_offset) {
ASSERT(root_array_available_);
- movq(destination,
+ movp(destination,
Operand(kRootRegister,
variable_offset, times_pointer_size,
(fixed_offset << kPointerSizeLog2) - kRootRegisterBias));
@@ -193,20 +176,20 @@ void MacroAssembler::LoadRootIndexed(Register destination,
void MacroAssembler::StoreRoot(Register source, Heap::RootListIndex index) {
ASSERT(root_array_available_);
- movq(Operand(kRootRegister, (index << kPointerSizeLog2) - kRootRegisterBias),
+ movp(Operand(kRootRegister, (index << kPointerSizeLog2) - kRootRegisterBias),
source);
}
void MacroAssembler::PushRoot(Heap::RootListIndex index) {
ASSERT(root_array_available_);
- push(Operand(kRootRegister, (index << kPointerSizeLog2) - kRootRegisterBias));
+ Push(Operand(kRootRegister, (index << kPointerSizeLog2) - kRootRegisterBias));
}
void MacroAssembler::CompareRoot(Register with, Heap::RootListIndex index) {
ASSERT(root_array_available_);
- cmpq(with, Operand(kRootRegister,
+ cmpp(with, Operand(kRootRegister,
(index << kPointerSizeLog2) - kRootRegisterBias));
}
@@ -216,7 +199,7 @@ void MacroAssembler::CompareRoot(const Operand& with,
ASSERT(root_array_available_);
ASSERT(!with.AddressUsesRegister(kScratchRegister));
LoadRoot(kScratchRegister, index);
- cmpq(with, kScratchRegister);
+ cmpp(with, kScratchRegister);
}
@@ -234,15 +217,15 @@ void MacroAssembler::RememberedSetHelper(Register object, // For debug tests.
// Load store buffer top.
LoadRoot(scratch, Heap::kStoreBufferTopRootIndex);
// Store pointer to buffer.
- movq(Operand(scratch, 0), addr);
+ movp(Operand(scratch, 0), addr);
// Increment buffer top.
- addq(scratch, Immediate(kPointerSize));
+ addp(scratch, Immediate(kPointerSize));
// Write back new top of buffer.
StoreRoot(scratch, Heap::kStoreBufferTopRootIndex);
// Call stub on end of buffer.
Label done;
// Check for end of buffer.
- testq(scratch, Immediate(StoreBuffer::kStoreBufferOverflowBit));
+ testp(scratch, Immediate(StoreBuffer::kStoreBufferOverflowBit));
if (and_then == kReturnAtEnd) {
Label buffer_overflowed;
j(not_equal, &buffer_overflowed, Label::kNear);
@@ -253,7 +236,7 @@ void MacroAssembler::RememberedSetHelper(Register object, // For debug tests.
j(equal, &done, Label::kNear);
}
StoreBufferOverflowStub store_buffer_overflow =
- StoreBufferOverflowStub(save_fp);
+ StoreBufferOverflowStub(isolate(), save_fp);
CallStub(&store_buffer_overflow);
if (and_then == kReturnAtEnd) {
ret(0);
@@ -269,33 +252,35 @@ void MacroAssembler::InNewSpace(Register object,
Condition cc,
Label* branch,
Label::Distance distance) {
- if (Serializer::enabled()) {
+ if (serializer_enabled()) {
// Can't do arithmetic on external references if it might get serialized.
// The mask isn't really an address. We load it as an external reference in
// case the size of the new space is different between the snapshot maker
// and the running system.
if (scratch.is(object)) {
Move(kScratchRegister, ExternalReference::new_space_mask(isolate()));
- and_(scratch, kScratchRegister);
+ andp(scratch, kScratchRegister);
} else {
Move(scratch, ExternalReference::new_space_mask(isolate()));
- and_(scratch, object);
+ andp(scratch, object);
}
Move(kScratchRegister, ExternalReference::new_space_start(isolate()));
- cmpq(scratch, kScratchRegister);
+ cmpp(scratch, kScratchRegister);
j(cc, branch, distance);
} else {
- ASSERT(is_int32(static_cast<int64_t>(isolate()->heap()->NewSpaceMask())));
+ ASSERT(kPointerSize == kInt64Size
+ ? is_int32(static_cast<int64_t>(isolate()->heap()->NewSpaceMask()))
+ : kPointerSize == kInt32Size);
intptr_t new_space_start =
reinterpret_cast<intptr_t>(isolate()->heap()->NewSpaceStart());
- movq(kScratchRegister, reinterpret_cast<Address>(-new_space_start),
- RelocInfo::NONE64);
+ Move(kScratchRegister, reinterpret_cast<Address>(-new_space_start),
+ Assembler::RelocInfoNone());
if (scratch.is(object)) {
- addq(scratch, kScratchRegister);
+ addp(scratch, kScratchRegister);
} else {
- lea(scratch, Operand(object, kScratchRegister, times_1, 0));
+ leap(scratch, Operand(object, kScratchRegister, times_1, 0));
}
- and_(scratch,
+ andp(scratch,
Immediate(static_cast<int32_t>(isolate()->heap()->NewSpaceMask())));
j(cc, branch, distance);
}
@@ -309,7 +294,8 @@ void MacroAssembler::RecordWriteField(
Register dst,
SaveFPRegsMode save_fp,
RememberedSetAction remembered_set_action,
- SmiCheck smi_check) {
+ SmiCheck smi_check,
+ PointersToHereCheck pointers_to_here_check_for_value) {
// First, check if a write barrier is even needed. The tests below
// catch stores of Smis.
Label done;
@@ -323,7 +309,7 @@ void MacroAssembler::RecordWriteField(
// of the object, so so offset must be a multiple of kPointerSize.
ASSERT(IsAligned(offset, kPointerSize));
- lea(dst, FieldOperand(object, offset));
+ leap(dst, FieldOperand(object, offset));
if (emit_debug_code()) {
Label ok;
testb(dst, Immediate((1 << kPointerSizeLog2) - 1));
@@ -332,26 +318,28 @@ void MacroAssembler::RecordWriteField(
bind(&ok);
}
- RecordWrite(
- object, dst, value, save_fp, remembered_set_action, OMIT_SMI_CHECK);
+ RecordWrite(object, dst, value, save_fp, remembered_set_action,
+ OMIT_SMI_CHECK, pointers_to_here_check_for_value);
bind(&done);
// Clobber clobbered input registers when running with the debug-code flag
// turned on to provoke errors.
if (emit_debug_code()) {
- movq(value, kZapValue, RelocInfo::NONE64);
- movq(dst, kZapValue, RelocInfo::NONE64);
+ Move(value, kZapValue, Assembler::RelocInfoNone());
+ Move(dst, kZapValue, Assembler::RelocInfoNone());
}
}
-void MacroAssembler::RecordWriteArray(Register object,
- Register value,
- Register index,
- SaveFPRegsMode save_fp,
- RememberedSetAction remembered_set_action,
- SmiCheck smi_check) {
+void MacroAssembler::RecordWriteArray(
+ Register object,
+ Register value,
+ Register index,
+ SaveFPRegsMode save_fp,
+ RememberedSetAction remembered_set_action,
+ SmiCheck smi_check,
+ PointersToHereCheck pointers_to_here_check_for_value) {
// First, check if a write barrier is even needed. The tests below
// catch stores of Smis.
Label done;
@@ -363,29 +351,102 @@ void MacroAssembler::RecordWriteArray(Register object,
// Array access: calculate the destination address. Index is not a smi.
Register dst = index;
- lea(dst, Operand(object, index, times_pointer_size,
+ leap(dst, Operand(object, index, times_pointer_size,
FixedArray::kHeaderSize - kHeapObjectTag));
- RecordWrite(
- object, dst, value, save_fp, remembered_set_action, OMIT_SMI_CHECK);
+ RecordWrite(object, dst, value, save_fp, remembered_set_action,
+ OMIT_SMI_CHECK, pointers_to_here_check_for_value);
bind(&done);
// Clobber clobbered input registers when running with the debug-code flag
// turned on to provoke errors.
if (emit_debug_code()) {
- movq(value, kZapValue, RelocInfo::NONE64);
- movq(index, kZapValue, RelocInfo::NONE64);
+ Move(value, kZapValue, Assembler::RelocInfoNone());
+ Move(index, kZapValue, Assembler::RelocInfoNone());
}
}
-void MacroAssembler::RecordWrite(Register object,
- Register address,
- Register value,
- SaveFPRegsMode fp_mode,
- RememberedSetAction remembered_set_action,
- SmiCheck smi_check) {
+void MacroAssembler::RecordWriteForMap(Register object,
+ Register map,
+ Register dst,
+ SaveFPRegsMode fp_mode) {
+ ASSERT(!object.is(kScratchRegister));
+ ASSERT(!object.is(map));
+ ASSERT(!object.is(dst));
+ ASSERT(!map.is(dst));
+ AssertNotSmi(object);
+
+ if (emit_debug_code()) {
+ Label ok;
+ if (map.is(kScratchRegister)) pushq(map);
+ CompareMap(map, isolate()->factory()->meta_map());
+ if (map.is(kScratchRegister)) popq(map);
+ j(equal, &ok, Label::kNear);
+ int3();
+ bind(&ok);
+ }
+
+ if (!FLAG_incremental_marking) {
+ return;
+ }
+
+ if (emit_debug_code()) {
+ Label ok;
+ if (map.is(kScratchRegister)) pushq(map);
+ cmpp(map, FieldOperand(object, HeapObject::kMapOffset));
+ if (map.is(kScratchRegister)) popq(map);
+ j(equal, &ok, Label::kNear);
+ int3();
+ bind(&ok);
+ }
+
+ // Compute the address.
+ leap(dst, FieldOperand(object, HeapObject::kMapOffset));
+
+ // Count number of write barriers in generated code.
+ isolate()->counters()->write_barriers_static()->Increment();
+ IncrementCounter(isolate()->counters()->write_barriers_dynamic(), 1);
+
+ // First, check if a write barrier is even needed. The tests below
+ // catch stores of smis and stores into the young generation.
+ Label done;
+
+ // A single check of the map's pages interesting flag suffices, since it is
+ // only set during incremental collection, and then it's also guaranteed that
+ // the from object's page's interesting flag is also set. This optimization
+ // relies on the fact that maps can never be in new space.
+ CheckPageFlag(map,
+ map, // Used as scratch.
+ MemoryChunk::kPointersToHereAreInterestingMask,
+ zero,
+ &done,
+ Label::kNear);
+
+ RecordWriteStub stub(isolate(), object, map, dst, OMIT_REMEMBERED_SET,
+ fp_mode);
+ CallStub(&stub);
+
+ bind(&done);
+
+ // Clobber clobbered registers when running with the debug-code flag
+ // turned on to provoke errors.
+ if (emit_debug_code()) {
+ Move(dst, kZapValue, Assembler::RelocInfoNone());
+ Move(map, kZapValue, Assembler::RelocInfoNone());
+ }
+}
+
+
+void MacroAssembler::RecordWrite(
+ Register object,
+ Register address,
+ Register value,
+ SaveFPRegsMode fp_mode,
+ RememberedSetAction remembered_set_action,
+ SmiCheck smi_check,
+ PointersToHereCheck pointers_to_here_check_for_value) {
ASSERT(!object.is(value));
ASSERT(!object.is(address));
ASSERT(!value.is(address));
@@ -398,7 +459,7 @@ void MacroAssembler::RecordWrite(Register object,
if (emit_debug_code()) {
Label ok;
- cmpq(value, Operand(address, 0));
+ cmpp(value, Operand(address, 0));
j(equal, &ok, Label::kNear);
int3();
bind(&ok);
@@ -417,12 +478,14 @@ void MacroAssembler::RecordWrite(Register object,
JumpIfSmi(value, &done);
}
- CheckPageFlag(value,
- value, // Used as scratch.
- MemoryChunk::kPointersToHereAreInterestingMask,
- zero,
- &done,
- Label::kNear);
+ if (pointers_to_here_check_for_value != kPointersToHereAreAlwaysInteresting) {
+ CheckPageFlag(value,
+ value, // Used as scratch.
+ MemoryChunk::kPointersToHereAreInterestingMask,
+ zero,
+ &done,
+ Label::kNear);
+ }
CheckPageFlag(object,
value, // Used as scratch.
@@ -431,7 +494,8 @@ void MacroAssembler::RecordWrite(Register object,
&done,
Label::kNear);
- RecordWriteStub stub(object, value, address, remembered_set_action, fp_mode);
+ RecordWriteStub stub(isolate(), object, value, address, remembered_set_action,
+ fp_mode);
CallStub(&stub);
bind(&done);
@@ -439,8 +503,8 @@ void MacroAssembler::RecordWrite(Register object,
// Clobber clobbered registers when running with the debug-code flag
// turned on to provoke errors.
if (emit_debug_code()) {
- movq(address, kZapValue, RelocInfo::NONE64);
- movq(value, kZapValue, RelocInfo::NONE64);
+ Move(address, kZapValue, Assembler::RelocInfoNone());
+ Move(value, kZapValue, Assembler::RelocInfoNone());
}
}
@@ -483,7 +547,7 @@ void MacroAssembler::CheckStackAlignment() {
if (frame_alignment > kPointerSize) {
ASSERT(IsPowerOf2(frame_alignment));
Label alignment_as_expected;
- testq(rsp, Immediate(frame_alignment_mask));
+ testp(rsp, Immediate(frame_alignment_mask));
j(zero, &alignment_as_expected, Label::kNear);
// Abort if stack is not aligned.
int3();
@@ -505,17 +569,8 @@ void MacroAssembler::NegativeZeroTest(Register result,
void MacroAssembler::Abort(BailoutReason reason) {
- // We want to pass the msg string like a smi to avoid GC
- // problems, however msg is not guaranteed to be aligned
- // properly. Instead, we pass an aligned pointer that is
- // a proper v8 smi, but also pass the alignment difference
- // from the real pointer as a smi.
- const char* msg = GetBailoutReason(reason);
- intptr_t p1 = reinterpret_cast<intptr_t>(msg);
- intptr_t p0 = (p1 & ~kSmiTagMask) + kSmiTag;
- // Note: p0 might not be a valid Smi _value_, but it has a valid Smi tag.
- ASSERT(reinterpret_cast<Object*>(p0)->IsSmi());
#ifdef DEBUG
+ const char* msg = GetBailoutReason(reason);
if (msg != NULL) {
RecordComment("Abort message: ");
RecordComment(msg);
@@ -527,20 +582,17 @@ void MacroAssembler::Abort(BailoutReason reason) {
}
#endif
- push(rax);
- movq(kScratchRegister, reinterpret_cast<Smi*>(p0), RelocInfo::NONE64);
- push(kScratchRegister);
- movq(kScratchRegister, Smi::FromInt(static_cast<int>(p1 - p0)),
- RelocInfo::NONE64);
- push(kScratchRegister);
+ Move(kScratchRegister, Smi::FromInt(static_cast<int>(reason)),
+ Assembler::RelocInfoNone());
+ Push(kScratchRegister);
if (!has_frame_) {
// We don't actually want to generate a pile of code for this, so just
// claim there is a stack frame, without generating one.
FrameScope scope(this, StackFrame::NONE);
- CallRuntime(Runtime::kAbort, 2);
+ CallRuntime(Runtime::kAbort, 1);
} else {
- CallRuntime(Runtime::kAbort, 2);
+ CallRuntime(Runtime::kAbort, 1);
}
// Control will not return here.
int3();
@@ -549,12 +601,12 @@ void MacroAssembler::Abort(BailoutReason reason) {
void MacroAssembler::CallStub(CodeStub* stub, TypeFeedbackId ast_id) {
ASSERT(AllowThisStubCall(stub)); // Calls are not allowed in some stubs
- Call(stub->GetCode(isolate()), RelocInfo::CODE_TARGET, ast_id);
+ Call(stub->GetCode(), RelocInfo::CODE_TARGET, ast_id);
}
void MacroAssembler::TailCallStub(CodeStub* stub) {
- Jump(stub->GetCode(isolate()), RelocInfo::CODE_TARGET);
+ Jump(stub->GetCode(), RelocInfo::CODE_TARGET);
}
@@ -569,30 +621,16 @@ bool MacroAssembler::AllowThisStubCall(CodeStub* stub) {
}
-void MacroAssembler::IllegalOperation(int num_arguments) {
- if (num_arguments > 0) {
- addq(rsp, Immediate(num_arguments * kPointerSize));
- }
- LoadRoot(rax, Heap::kUndefinedValueRootIndex);
-}
-
-
void MacroAssembler::IndexFromHash(Register hash, Register index) {
// The assert checks that the constants for the maximum number of digits
// for an array index cached in the hash field and the number of bits
// reserved for it does not conflict.
ASSERT(TenToThe(String::kMaxCachedArrayIndexLength) <
(1 << String::kArrayIndexValueBits));
- // We want the smi-tagged index in key. Even if we subsequently go to
- // the slow case, converting the key to a smi is always valid.
- // key: string key
- // hash: key's hash field, including its array index value.
- and_(hash, Immediate(String::kArrayIndexValueMask));
- shr(hash, Immediate(String::kHashShift));
- // Here we actually clobber the key which will be used if calling into
- // runtime later. However as the new key is the numeric value of a string key
- // there is no difference in using either key.
- Integer32ToSmi(index, hash);
+ if (!hash.is(index)) {
+ movl(index, hash);
+ }
+ DecodeFieldToSmi<String::ArrayIndexValueBits>(index);
}
@@ -602,10 +640,7 @@ void MacroAssembler::CallRuntime(const Runtime::Function* f,
// If the expected number of arguments of the runtime function is
// constant, we check that the actual number of arguments match the
// expectation.
- if (f->nargs >= 0 && f->nargs != num_arguments) {
- IllegalOperation(num_arguments);
- return;
- }
+ CHECK(f->nargs < 0 || f->nargs == num_arguments);
// TODO(1236192): Most runtime routines don't need the number of
// arguments passed in because it is constant. At some point we
@@ -613,7 +648,7 @@ void MacroAssembler::CallRuntime(const Runtime::Function* f,
// smarter.
Set(rax, num_arguments);
LoadAddress(rbx, ExternalReference(f, isolate()));
- CEntryStub ces(f->result_size, save_doubles);
+ CEntryStub ces(isolate(), f->result_size, save_doubles);
CallStub(&ces);
}
@@ -623,7 +658,7 @@ void MacroAssembler::CallExternalReference(const ExternalReference& ext,
Set(rax, num_arguments);
LoadAddress(rbx, ext);
- CEntryStub stub(1);
+ CEntryStub stub(isolate(), 1);
CallStub(&stub);
}
@@ -670,8 +705,8 @@ void MacroAssembler::PrepareCallApiFunction(int arg_stack_space) {
void MacroAssembler::CallApiFunctionAndReturn(
- Address function_address,
- Address thunk_address,
+ Register function_address,
+ ExternalReference thunk_ref,
Register thunk_last_arg,
int stack_space,
Operand return_value_operand,
@@ -696,13 +731,14 @@ void MacroAssembler::CallApiFunctionAndReturn(
ExternalReference scheduled_exception_address =
ExternalReference::scheduled_exception_address(isolate());
+ ASSERT(rdx.is(function_address) || r8.is(function_address));
// Allocate HandleScope in callee-save registers.
Register prev_next_address_reg = r14;
Register prev_limit_reg = rbx;
Register base_reg = r15;
Move(base_reg, next_address);
- movq(prev_next_address_reg, Operand(base_reg, kNextOffset));
- movq(prev_limit_reg, Operand(base_reg, kLimitOffset));
+ movp(prev_next_address_reg, Operand(base_reg, kNextOffset));
+ movp(prev_limit_reg, Operand(base_reg, kLimitOffset));
addl(Operand(base_reg, kLevelOffset), Immediate(1));
if (FLAG_log_timer_events) {
@@ -717,22 +753,18 @@ void MacroAssembler::CallApiFunctionAndReturn(
Label profiler_disabled;
Label end_profiler_check;
- bool* is_profiling_flag =
- isolate()->cpu_profiler()->is_profiling_address();
- STATIC_ASSERT(sizeof(*is_profiling_flag) == 1);
- movq(rax, is_profiling_flag, RelocInfo::EXTERNAL_REFERENCE);
+ Move(rax, ExternalReference::is_profiling_address(isolate()));
cmpb(Operand(rax, 0), Immediate(0));
j(zero, &profiler_disabled);
// Third parameter is the address of the actual getter function.
- movq(thunk_last_arg, function_address, RelocInfo::EXTERNAL_REFERENCE);
- movq(rax, thunk_address, RelocInfo::EXTERNAL_REFERENCE);
+ Move(thunk_last_arg, function_address);
+ Move(rax, thunk_ref);
jmp(&end_profiler_check);
bind(&profiler_disabled);
// Call the api function!
- movq(rax, reinterpret_cast<Address>(function_address),
- RelocInfo::EXTERNAL_REFERENCE);
+ Move(rax, function_address);
bind(&end_profiler_check);
@@ -749,14 +781,14 @@ void MacroAssembler::CallApiFunctionAndReturn(
}
// Load the value from ReturnValue
- movq(rax, return_value_operand);
+ movp(rax, return_value_operand);
bind(&prologue);
// No more valid handles (the result handle was the last one). Restore
// previous handle scope.
subl(Operand(base_reg, kLevelOffset), Immediate(1));
- movq(Operand(base_reg, kNextOffset), prev_next_address_reg);
- cmpq(prev_limit_reg, Operand(base_reg, kLimitOffset));
+ movp(Operand(base_reg, kNextOffset), prev_next_address_reg);
+ cmpp(prev_limit_reg, Operand(base_reg, kLimitOffset));
j(not_equal, &delete_allocated_handles);
bind(&leave_exit_frame);
@@ -773,7 +805,7 @@ void MacroAssembler::CallApiFunctionAndReturn(
Register map = rcx;
JumpIfSmi(return_value, &ok, Label::kNear);
- movq(map, FieldOperand(return_value, HeapObject::kMapOffset));
+ movp(map, FieldOperand(return_value, HeapObject::kMapOffset));
CmpInstanceType(map, FIRST_NONSTRING_TYPE);
j(below, &ok, Label::kNear);
@@ -803,7 +835,7 @@ void MacroAssembler::CallApiFunctionAndReturn(
bool restore_context = context_restore_operand != NULL;
if (restore_context) {
- movq(rsi, *context_restore_operand);
+ movp(rsi, *context_restore_operand);
}
LeaveApiExitFrame(!restore_context);
ret(stack_space * kPointerSize);
@@ -811,19 +843,19 @@ void MacroAssembler::CallApiFunctionAndReturn(
bind(&promote_scheduled_exception);
{
FrameScope frame(this, StackFrame::INTERNAL);
- CallRuntime(Runtime::kPromoteScheduledException, 0);
+ CallRuntime(Runtime::kHiddenPromoteScheduledException, 0);
}
jmp(&exception_handled);
// HandleScope limit has changed. Delete allocated extensions.
bind(&delete_allocated_handles);
- movq(Operand(base_reg, kLimitOffset), prev_limit_reg);
- movq(prev_limit_reg, rax);
+ movp(Operand(base_reg, kLimitOffset), prev_limit_reg);
+ movp(prev_limit_reg, rax);
LoadAddress(arg_reg_1, ExternalReference::isolate_address(isolate()));
LoadAddress(rax,
ExternalReference::delete_handle_scope_extensions(isolate()));
call(rax);
- movq(rax, prev_limit_reg);
+ movp(rax, prev_limit_reg);
jmp(&leave_exit_frame);
}
@@ -832,8 +864,8 @@ void MacroAssembler::JumpToExternalReference(const ExternalReference& ext,
int result_size) {
// Set the entry point and jump to the C entry runtime stub.
LoadAddress(rbx, ext);
- CEntryStub ces(result_size);
- jmp(ces.GetCode(isolate()), RelocInfo::CODE_TARGET);
+ CEntryStub ces(isolate(), result_size);
+ jmp(ces.GetCode(), RelocInfo::CODE_TARGET);
}
@@ -848,16 +880,16 @@ void MacroAssembler::InvokeBuiltin(Builtins::JavaScript id,
// parameter count to avoid emitting code to do the check.
ParameterCount expected(0);
GetBuiltinEntry(rdx, id);
- InvokeCode(rdx, expected, expected, flag, call_wrapper, CALL_AS_METHOD);
+ InvokeCode(rdx, expected, expected, flag, call_wrapper);
}
void MacroAssembler::GetBuiltinFunction(Register target,
Builtins::JavaScript id) {
// Load the builtins object into target register.
- movq(target, Operand(rsi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
- movq(target, FieldOperand(target, GlobalObject::kBuiltinsOffset));
- movq(target, FieldOperand(target,
+ movp(target, Operand(rsi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
+ movp(target, FieldOperand(target, GlobalObject::kBuiltinsOffset));
+ movp(target, FieldOperand(target,
JSBuiltinsObject::OffsetOfFunctionWithId(id)));
}
@@ -866,7 +898,7 @@ void MacroAssembler::GetBuiltinEntry(Register target, Builtins::JavaScript id) {
ASSERT(!target.is(rdi));
// Load the JavaScript builtin function from the builtins object.
GetBuiltinFunction(rdi, id);
- movq(target, FieldOperand(rdi, JSFunction::kCodeEntryOffset));
+ movp(target, FieldOperand(rdi, JSFunction::kCodeEntryOffset));
}
@@ -892,12 +924,12 @@ void MacroAssembler::PushCallerSaved(SaveFPRegsMode fp_mode,
for (int i = 0; i < kNumberOfSavedRegs; i++) {
Register reg = saved_regs[i];
if (!reg.is(exclusion1) && !reg.is(exclusion2) && !reg.is(exclusion3)) {
- push(reg);
+ pushq(reg);
}
}
// R12 to r15 are callee save on all platforms.
if (fp_mode == kSaveFPRegs) {
- subq(rsp, Immediate(kDoubleSize * XMMRegister::kMaxNumRegisters));
+ subp(rsp, Immediate(kDoubleSize * XMMRegister::kMaxNumRegisters));
for (int i = 0; i < XMMRegister::kMaxNumRegisters; i++) {
XMMRegister reg = XMMRegister::from_code(i);
movsd(Operand(rsp, i * kDoubleSize), reg);
@@ -915,12 +947,12 @@ void MacroAssembler::PopCallerSaved(SaveFPRegsMode fp_mode,
XMMRegister reg = XMMRegister::from_code(i);
movsd(reg, Operand(rsp, i * kDoubleSize));
}
- addq(rsp, Immediate(kDoubleSize * XMMRegister::kMaxNumRegisters));
+ addp(rsp, Immediate(kDoubleSize * XMMRegister::kMaxNumRegisters));
}
for (int i = kNumberOfSavedRegs - 1; i >= 0; i--) {
Register reg = saved_regs[i];
if (!reg.is(exclusion1) && !reg.is(exclusion2) && !reg.is(exclusion3)) {
- pop(reg);
+ popq(reg);
}
}
}
@@ -951,7 +983,7 @@ void MacroAssembler::Load(Register dst, const Operand& src, Representation r) {
} else if (r.IsInteger32()) {
movl(dst, src);
} else {
- movq(dst, src);
+ movp(dst, src);
}
}
@@ -965,7 +997,12 @@ void MacroAssembler::Store(const Operand& dst, Register src, Representation r) {
} else if (r.IsInteger32()) {
movl(dst, src);
} else {
- movq(dst, src);
+ if (r.IsHeapObject()) {
+ AssertNotSmi(src);
+ } else if (r.IsSmi()) {
+ AssertSmi(src);
+ }
+ movp(dst, src);
}
}
@@ -983,12 +1020,16 @@ void MacroAssembler::Set(Register dst, int64_t x) {
}
-void MacroAssembler::Set(const Operand& dst, int64_t x) {
- if (is_int32(x)) {
- movq(dst, Immediate(static_cast<int32_t>(x)));
+void MacroAssembler::Set(const Operand& dst, intptr_t x) {
+ if (kPointerSize == kInt64Size) {
+ if (is_int32(x)) {
+ movp(dst, Immediate(static_cast<int32_t>(x)));
+ } else {
+ Set(kScratchRegister, x);
+ movp(dst, kScratchRegister);
+ }
} else {
- Set(kScratchRegister, x);
- movq(dst, kScratchRegister);
+ movp(dst, Immediate(static_cast<int32_t>(x)));
}
}
@@ -1004,11 +1045,18 @@ bool MacroAssembler::IsUnsafeInt(const int32_t x) {
void MacroAssembler::SafeMove(Register dst, Smi* src) {
ASSERT(!dst.is(kScratchRegister));
- ASSERT(SmiValuesAre32Bits()); // JIT cookie can be converted to Smi.
if (IsUnsafeInt(src->value()) && jit_cookie() != 0) {
- Move(dst, Smi::FromInt(src->value() ^ jit_cookie()));
- Move(kScratchRegister, Smi::FromInt(jit_cookie()));
- xor_(dst, kScratchRegister);
+ if (SmiValuesAre32Bits()) {
+ // JIT cookie can be converted to Smi.
+ Move(dst, Smi::FromInt(src->value() ^ jit_cookie()));
+ Move(kScratchRegister, Smi::FromInt(jit_cookie()));
+ xorp(dst, kScratchRegister);
+ } else {
+ ASSERT(SmiValuesAre31Bits());
+ int32_t value = static_cast<int32_t>(reinterpret_cast<intptr_t>(src));
+ movp(dst, Immediate(value ^ jit_cookie()));
+ xorp(dst, Immediate(jit_cookie()));
+ }
} else {
Move(dst, src);
}
@@ -1016,11 +1064,18 @@ void MacroAssembler::SafeMove(Register dst, Smi* src) {
void MacroAssembler::SafePush(Smi* src) {
- ASSERT(SmiValuesAre32Bits()); // JIT cookie can be converted to Smi.
if (IsUnsafeInt(src->value()) && jit_cookie() != 0) {
- Push(Smi::FromInt(src->value() ^ jit_cookie()));
- Move(kScratchRegister, Smi::FromInt(jit_cookie()));
- xor_(Operand(rsp, 0), kScratchRegister);
+ if (SmiValuesAre32Bits()) {
+ // JIT cookie can be converted to Smi.
+ Push(Smi::FromInt(src->value() ^ jit_cookie()));
+ Move(kScratchRegister, Smi::FromInt(jit_cookie()));
+ xorp(Operand(rsp, 0), kScratchRegister);
+ } else {
+ ASSERT(SmiValuesAre31Bits());
+ int32_t value = static_cast<int32_t>(reinterpret_cast<intptr_t>(src));
+ Push(Immediate(value ^ jit_cookie()));
+ xorp(Operand(rsp, 0), Immediate(jit_cookie()));
+ }
} else {
Push(src);
}
@@ -1043,7 +1098,8 @@ Register MacroAssembler::GetSmiConstant(Smi* source) {
void MacroAssembler::LoadSmiConstant(Register dst, Smi* source) {
if (emit_debug_code()) {
- movq(dst, Smi::FromInt(kSmiConstantRegisterValue), RelocInfo::NONE64);
+ Move(dst, Smi::FromInt(kSmiConstantRegisterValue),
+ Assembler::RelocInfoNone());
cmpq(dst, kSmiConstantRegister);
Assert(equal, kUninitializedKSmiConstantRegister);
}
@@ -1057,37 +1113,41 @@ void MacroAssembler::LoadSmiConstant(Register dst, Smi* source) {
switch (uvalue) {
case 9:
- lea(dst, Operand(kSmiConstantRegister, kSmiConstantRegister, times_8, 0));
+ leap(dst,
+ Operand(kSmiConstantRegister, kSmiConstantRegister, times_8, 0));
break;
case 8:
xorl(dst, dst);
- lea(dst, Operand(dst, kSmiConstantRegister, times_8, 0));
+ leap(dst, Operand(dst, kSmiConstantRegister, times_8, 0));
break;
case 4:
xorl(dst, dst);
- lea(dst, Operand(dst, kSmiConstantRegister, times_4, 0));
+ leap(dst, Operand(dst, kSmiConstantRegister, times_4, 0));
break;
case 5:
- lea(dst, Operand(kSmiConstantRegister, kSmiConstantRegister, times_4, 0));
+ leap(dst,
+ Operand(kSmiConstantRegister, kSmiConstantRegister, times_4, 0));
break;
case 3:
- lea(dst, Operand(kSmiConstantRegister, kSmiConstantRegister, times_2, 0));
+ leap(dst,
+ Operand(kSmiConstantRegister, kSmiConstantRegister, times_2, 0));
break;
case 2:
- lea(dst, Operand(kSmiConstantRegister, kSmiConstantRegister, times_1, 0));
+ leap(dst,
+ Operand(kSmiConstantRegister, kSmiConstantRegister, times_1, 0));
break;
case 1:
- movq(dst, kSmiConstantRegister);
+ movp(dst, kSmiConstantRegister);
break;
case 0:
UNREACHABLE();
return;
default:
- movq(dst, source, RelocInfo::NONE64);
+ Move(dst, source, Assembler::RelocInfoNone());
return;
}
if (negative) {
- neg(dst);
+ negp(dst);
}
}
@@ -1097,7 +1157,7 @@ void MacroAssembler::Integer32ToSmi(Register dst, Register src) {
if (!dst.is(src)) {
movl(dst, src);
}
- shl(dst, Immediate(kSmiShift));
+ shlp(dst, Immediate(kSmiShift));
}
@@ -1109,8 +1169,15 @@ void MacroAssembler::Integer32ToSmiField(const Operand& dst, Register src) {
Abort(kInteger32ToSmiFieldWritingToNonSmiLocation);
bind(&ok);
}
- ASSERT(kSmiShift % kBitsPerByte == 0);
- movl(Operand(dst, kSmiShift / kBitsPerByte), src);
+
+ if (SmiValuesAre32Bits()) {
+ ASSERT(kSmiShift % kBitsPerByte == 0);
+ movl(Operand(dst, kSmiShift / kBitsPerByte), src);
+ } else {
+ ASSERT(SmiValuesAre31Bits());
+ Integer32ToSmi(kScratchRegister, src);
+ movp(dst, kScratchRegister);
+ }
}
@@ -1122,48 +1189,70 @@ void MacroAssembler::Integer64PlusConstantToSmi(Register dst,
} else {
leal(dst, Operand(src, constant));
}
- shl(dst, Immediate(kSmiShift));
+ shlp(dst, Immediate(kSmiShift));
}
void MacroAssembler::SmiToInteger32(Register dst, Register src) {
STATIC_ASSERT(kSmiTag == 0);
if (!dst.is(src)) {
- movq(dst, src);
+ movp(dst, src);
+ }
+
+ if (SmiValuesAre32Bits()) {
+ shrp(dst, Immediate(kSmiShift));
+ } else {
+ ASSERT(SmiValuesAre31Bits());
+ sarl(dst, Immediate(kSmiShift));
}
- shr(dst, Immediate(kSmiShift));
}
void MacroAssembler::SmiToInteger32(Register dst, const Operand& src) {
- movl(dst, Operand(src, kSmiShift / kBitsPerByte));
+ if (SmiValuesAre32Bits()) {
+ movl(dst, Operand(src, kSmiShift / kBitsPerByte));
+ } else {
+ ASSERT(SmiValuesAre31Bits());
+ movl(dst, src);
+ sarl(dst, Immediate(kSmiShift));
+ }
}
void MacroAssembler::SmiToInteger64(Register dst, Register src) {
STATIC_ASSERT(kSmiTag == 0);
if (!dst.is(src)) {
- movq(dst, src);
+ movp(dst, src);
+ }
+ sarp(dst, Immediate(kSmiShift));
+ if (kPointerSize == kInt32Size) {
+ // Sign extend to 64-bit.
+ movsxlq(dst, dst);
}
- sar(dst, Immediate(kSmiShift));
}
void MacroAssembler::SmiToInteger64(Register dst, const Operand& src) {
- movsxlq(dst, Operand(src, kSmiShift / kBitsPerByte));
+ if (SmiValuesAre32Bits()) {
+ movsxlq(dst, Operand(src, kSmiShift / kBitsPerByte));
+ } else {
+ ASSERT(SmiValuesAre31Bits());
+ movp(dst, src);
+ SmiToInteger64(dst, dst);
+ }
}
void MacroAssembler::SmiTest(Register src) {
AssertSmi(src);
- testq(src, src);
+ testp(src, src);
}
void MacroAssembler::SmiCompare(Register smi1, Register smi2) {
AssertSmi(smi1);
AssertSmi(smi2);
- cmpq(smi1, smi2);
+ cmpp(smi1, smi2);
}
@@ -1176,10 +1265,10 @@ void MacroAssembler::SmiCompare(Register dst, Smi* src) {
void MacroAssembler::Cmp(Register dst, Smi* src) {
ASSERT(!dst.is(kScratchRegister));
if (src->value() == 0) {
- testq(dst, dst);
+ testp(dst, dst);
} else {
Register constant_reg = GetSmiConstant(src);
- cmpq(dst, constant_reg);
+ cmpp(dst, constant_reg);
}
}
@@ -1187,20 +1276,25 @@ void MacroAssembler::Cmp(Register dst, Smi* src) {
void MacroAssembler::SmiCompare(Register dst, const Operand& src) {
AssertSmi(dst);
AssertSmi(src);
- cmpq(dst, src);
+ cmpp(dst, src);
}
void MacroAssembler::SmiCompare(const Operand& dst, Register src) {
AssertSmi(dst);
AssertSmi(src);
- cmpq(dst, src);
+ cmpp(dst, src);
}
void MacroAssembler::SmiCompare(const Operand& dst, Smi* src) {
AssertSmi(dst);
- cmpl(Operand(dst, kSmiShift / kBitsPerByte), Immediate(src->value()));
+ if (SmiValuesAre32Bits()) {
+ cmpl(Operand(dst, kSmiShift / kBitsPerByte), Immediate(src->value()));
+ } else {
+ ASSERT(SmiValuesAre31Bits());
+ cmpl(dst, Immediate(src));
+ }
}
@@ -1208,12 +1302,18 @@ void MacroAssembler::Cmp(const Operand& dst, Smi* src) {
// The Operand cannot use the smi register.
Register smi_reg = GetSmiConstant(src);
ASSERT(!dst.AddressUsesRegister(smi_reg));
- cmpq(dst, smi_reg);
+ cmpp(dst, smi_reg);
}
void MacroAssembler::SmiCompareInteger32(const Operand& dst, Register src) {
- cmpl(Operand(dst, kSmiShift / kBitsPerByte), src);
+ if (SmiValuesAre32Bits()) {
+ cmpl(Operand(dst, kSmiShift / kBitsPerByte), src);
+ } else {
+ ASSERT(SmiValuesAre31Bits());
+ SmiToInteger32(kScratchRegister, dst);
+ cmpl(kScratchRegister, src);
+ }
}
@@ -1227,12 +1327,12 @@ void MacroAssembler::PositiveSmiTimesPowerOfTwoToInteger64(Register dst,
return;
}
if (!dst.is(src)) {
- movq(dst, src);
+ movp(dst, src);
}
if (power < kSmiShift) {
- sar(dst, Immediate(kSmiShift - power));
+ sarp(dst, Immediate(kSmiShift - power));
} else if (power > kSmiShift) {
- shl(dst, Immediate(power - kSmiShift));
+ shlp(dst, Immediate(power - kSmiShift));
}
}
@@ -1242,7 +1342,7 @@ void MacroAssembler::PositiveSmiDivPowerOfTwoToInteger32(Register dst,
int power) {
ASSERT((0 <= power) && (power < 32));
if (dst.is(src)) {
- shr(dst, Immediate(power + kSmiShift));
+ shrp(dst, Immediate(power + kSmiShift));
} else {
UNIMPLEMENTED(); // Not used.
}
@@ -1255,13 +1355,13 @@ void MacroAssembler::SmiOrIfSmis(Register dst, Register src1, Register src2,
if (dst.is(src1) || dst.is(src2)) {
ASSERT(!src1.is(kScratchRegister));
ASSERT(!src2.is(kScratchRegister));
- movq(kScratchRegister, src1);
- or_(kScratchRegister, src2);
+ movp(kScratchRegister, src1);
+ orp(kScratchRegister, src2);
JumpIfNotSmi(kScratchRegister, on_not_smis, near_jump);
- movq(dst, kScratchRegister);
+ movp(dst, kScratchRegister);
} else {
- movq(dst, src1);
- or_(dst, src2);
+ movp(dst, src1);
+ orp(dst, src2);
JumpIfNotSmi(dst, on_not_smis, near_jump);
}
}
@@ -1284,8 +1384,8 @@ Condition MacroAssembler::CheckSmi(const Operand& src) {
Condition MacroAssembler::CheckNonNegativeSmi(Register src) {
STATIC_ASSERT(kSmiTag == 0);
// Test that both bits of the mask 0x8000000000000001 are zero.
- movq(kScratchRegister, src);
- rol(kScratchRegister, Immediate(1));
+ movp(kScratchRegister, src);
+ rolp(kScratchRegister, Immediate(1));
testb(kScratchRegister, Immediate(3));
return zero;
}
@@ -1296,8 +1396,15 @@ Condition MacroAssembler::CheckBothSmi(Register first, Register second) {
return CheckSmi(first);
}
STATIC_ASSERT(kSmiTag == 0 && kHeapObjectTag == 1 && kHeapObjectTagMask == 3);
- leal(kScratchRegister, Operand(first, second, times_1, 0));
- testb(kScratchRegister, Immediate(0x03));
+ if (SmiValuesAre32Bits()) {
+ leal(kScratchRegister, Operand(first, second, times_1, 0));
+ testb(kScratchRegister, Immediate(0x03));
+ } else {
+ ASSERT(SmiValuesAre31Bits());
+ movl(kScratchRegister, first);
+ orl(kScratchRegister, second);
+ testb(kScratchRegister, Immediate(kSmiTagMask));
+ }
return zero;
}
@@ -1307,9 +1414,9 @@ Condition MacroAssembler::CheckBothNonNegativeSmi(Register first,
if (first.is(second)) {
return CheckNonNegativeSmi(first);
}
- movq(kScratchRegister, first);
- or_(kScratchRegister, second);
- rol(kScratchRegister, Immediate(1));
+ movp(kScratchRegister, first);
+ orp(kScratchRegister, second);
+ rolp(kScratchRegister, Immediate(1));
testl(kScratchRegister, Immediate(3));
return zero;
}
@@ -1337,22 +1444,34 @@ Condition MacroAssembler::CheckEitherSmi(Register first,
Condition MacroAssembler::CheckIsMinSmi(Register src) {
ASSERT(!src.is(kScratchRegister));
// If we overflow by subtracting one, it's the minimal smi value.
- cmpq(src, kSmiConstantRegister);
+ cmpp(src, kSmiConstantRegister);
return overflow;
}
Condition MacroAssembler::CheckInteger32ValidSmiValue(Register src) {
- // A 32-bit integer value can always be converted to a smi.
- return always;
+ if (SmiValuesAre32Bits()) {
+ // A 32-bit integer value can always be converted to a smi.
+ return always;
+ } else {
+ ASSERT(SmiValuesAre31Bits());
+ cmpl(src, Immediate(0xc0000000));
+ return positive;
+ }
}
Condition MacroAssembler::CheckUInteger32ValidSmiValue(Register src) {
- // An unsigned 32-bit integer value is valid as long as the high bit
- // is not set.
- testl(src, src);
- return positive;
+ if (SmiValuesAre32Bits()) {
+ // An unsigned 32-bit integer value is valid as long as the high bit
+ // is not set.
+ testl(src, src);
+ return positive;
+ } else {
+ ASSERT(SmiValuesAre31Bits());
+ testl(src, Immediate(0xc0000000));
+ return zero;
+ }
}
@@ -1377,6 +1496,14 @@ void MacroAssembler::CheckSmiToIndicator(Register dst, const Operand& src) {
}
+void MacroAssembler::JumpIfValidSmiValue(Register src,
+ Label* on_valid,
+ Label::Distance near_jump) {
+ Condition is_valid = CheckInteger32ValidSmiValue(src);
+ j(is_valid, on_valid, near_jump);
+}
+
+
void MacroAssembler::JumpIfNotValidSmiValue(Register src,
Label* on_invalid,
Label::Distance near_jump) {
@@ -1385,6 +1512,14 @@ void MacroAssembler::JumpIfNotValidSmiValue(Register src,
}
+void MacroAssembler::JumpIfUIntValidSmiValue(Register src,
+ Label* on_valid,
+ Label::Distance near_jump) {
+ Condition is_valid = CheckUInteger32ValidSmiValue(src);
+ j(is_valid, on_valid, near_jump);
+}
+
+
void MacroAssembler::JumpIfUIntNotValidSmiValue(Register src,
Label* on_invalid,
Label::Distance near_jump) {
@@ -1447,46 +1582,46 @@ void MacroAssembler::JumpUnlessBothNonNegativeSmi(Register src1,
void MacroAssembler::SmiAddConstant(Register dst, Register src, Smi* constant) {
if (constant->value() == 0) {
if (!dst.is(src)) {
- movq(dst, src);
+ movp(dst, src);
}
return;
} else if (dst.is(src)) {
ASSERT(!dst.is(kScratchRegister));
switch (constant->value()) {
case 1:
- addq(dst, kSmiConstantRegister);
+ addp(dst, kSmiConstantRegister);
return;
case 2:
- lea(dst, Operand(src, kSmiConstantRegister, times_2, 0));
+ leap(dst, Operand(src, kSmiConstantRegister, times_2, 0));
return;
case 4:
- lea(dst, Operand(src, kSmiConstantRegister, times_4, 0));
+ leap(dst, Operand(src, kSmiConstantRegister, times_4, 0));
return;
case 8:
- lea(dst, Operand(src, kSmiConstantRegister, times_8, 0));
+ leap(dst, Operand(src, kSmiConstantRegister, times_8, 0));
return;
default:
Register constant_reg = GetSmiConstant(constant);
- addq(dst, constant_reg);
+ addp(dst, constant_reg);
return;
}
} else {
switch (constant->value()) {
case 1:
- lea(dst, Operand(src, kSmiConstantRegister, times_1, 0));
+ leap(dst, Operand(src, kSmiConstantRegister, times_1, 0));
return;
case 2:
- lea(dst, Operand(src, kSmiConstantRegister, times_2, 0));
+ leap(dst, Operand(src, kSmiConstantRegister, times_2, 0));
return;
case 4:
- lea(dst, Operand(src, kSmiConstantRegister, times_4, 0));
+ leap(dst, Operand(src, kSmiConstantRegister, times_4, 0));
return;
case 8:
- lea(dst, Operand(src, kSmiConstantRegister, times_8, 0));
+ leap(dst, Operand(src, kSmiConstantRegister, times_8, 0));
return;
default:
LoadSmiConstant(dst, constant);
- addq(dst, src);
+ addp(dst, src);
return;
}
}
@@ -1495,7 +1630,13 @@ void MacroAssembler::SmiAddConstant(Register dst, Register src, Smi* constant) {
void MacroAssembler::SmiAddConstant(const Operand& dst, Smi* constant) {
if (constant->value() != 0) {
- addl(Operand(dst, kSmiShift / kBitsPerByte), Immediate(constant->value()));
+ if (SmiValuesAre32Bits()) {
+ addl(Operand(dst, kSmiShift / kBitsPerByte),
+ Immediate(constant->value()));
+ } else {
+ ASSERT(SmiValuesAre31Bits());
+ addp(dst, Immediate(constant));
+ }
}
}
@@ -1508,21 +1649,21 @@ void MacroAssembler::SmiAddConstant(Register dst,
Label::Distance near_jump) {
if (constant->value() == 0) {
if (!dst.is(src)) {
- movq(dst, src);
+ movp(dst, src);
}
} else if (dst.is(src)) {
ASSERT(!dst.is(kScratchRegister));
LoadSmiConstant(kScratchRegister, constant);
- addq(dst, kScratchRegister);
+ addp(dst, kScratchRegister);
if (mode.Contains(BAILOUT_ON_NO_OVERFLOW)) {
j(no_overflow, bailout_label, near_jump);
ASSERT(mode.Contains(PRESERVE_SOURCE_REGISTER));
- subq(dst, kScratchRegister);
+ subp(dst, kScratchRegister);
} else if (mode.Contains(BAILOUT_ON_OVERFLOW)) {
if (mode.Contains(PRESERVE_SOURCE_REGISTER)) {
Label done;
j(no_overflow, &done, Label::kNear);
- subq(dst, kScratchRegister);
+ subp(dst, kScratchRegister);
jmp(bailout_label, near_jump);
bind(&done);
} else {
@@ -1536,7 +1677,7 @@ void MacroAssembler::SmiAddConstant(Register dst,
ASSERT(mode.Contains(PRESERVE_SOURCE_REGISTER));
ASSERT(mode.Contains(BAILOUT_ON_OVERFLOW));
LoadSmiConstant(dst, constant);
- addq(dst, src);
+ addp(dst, src);
j(overflow, bailout_label, near_jump);
}
}
@@ -1545,22 +1686,22 @@ void MacroAssembler::SmiAddConstant(Register dst,
void MacroAssembler::SmiSubConstant(Register dst, Register src, Smi* constant) {
if (constant->value() == 0) {
if (!dst.is(src)) {
- movq(dst, src);
+ movp(dst, src);
}
} else if (dst.is(src)) {
ASSERT(!dst.is(kScratchRegister));
Register constant_reg = GetSmiConstant(constant);
- subq(dst, constant_reg);
+ subp(dst, constant_reg);
} else {
if (constant->value() == Smi::kMinValue) {
LoadSmiConstant(dst, constant);
// Adding and subtracting the min-value gives the same result, it only
// differs on the overflow bit, which we don't check here.
- addq(dst, src);
+ addp(dst, src);
} else {
// Subtract by adding the negation.
LoadSmiConstant(dst, Smi::FromInt(-constant->value()));
- addq(dst, src);
+ addp(dst, src);
}
}
}
@@ -1574,21 +1715,21 @@ void MacroAssembler::SmiSubConstant(Register dst,
Label::Distance near_jump) {
if (constant->value() == 0) {
if (!dst.is(src)) {
- movq(dst, src);
+ movp(dst, src);
}
} else if (dst.is(src)) {
ASSERT(!dst.is(kScratchRegister));
LoadSmiConstant(kScratchRegister, constant);
- subq(dst, kScratchRegister);
+ subp(dst, kScratchRegister);
if (mode.Contains(BAILOUT_ON_NO_OVERFLOW)) {
j(no_overflow, bailout_label, near_jump);
ASSERT(mode.Contains(PRESERVE_SOURCE_REGISTER));
- addq(dst, kScratchRegister);
+ addp(dst, kScratchRegister);
} else if (mode.Contains(BAILOUT_ON_OVERFLOW)) {
if (mode.Contains(PRESERVE_SOURCE_REGISTER)) {
Label done;
j(no_overflow, &done, Label::kNear);
- addq(dst, kScratchRegister);
+ addp(dst, kScratchRegister);
jmp(bailout_label, near_jump);
bind(&done);
} else {
@@ -1603,14 +1744,14 @@ void MacroAssembler::SmiSubConstant(Register dst,
ASSERT(mode.Contains(BAILOUT_ON_OVERFLOW));
if (constant->value() == Smi::kMinValue) {
ASSERT(!dst.is(kScratchRegister));
- movq(dst, src);
+ movp(dst, src);
LoadSmiConstant(kScratchRegister, constant);
- subq(dst, kScratchRegister);
+ subp(dst, kScratchRegister);
j(overflow, bailout_label, near_jump);
} else {
// Subtract by adding the negation.
LoadSmiConstant(dst, Smi::FromInt(-(constant->value())));
- addq(dst, src);
+ addp(dst, src);
j(overflow, bailout_label, near_jump);
}
}
@@ -1623,16 +1764,16 @@ void MacroAssembler::SmiNeg(Register dst,
Label::Distance near_jump) {
if (dst.is(src)) {
ASSERT(!dst.is(kScratchRegister));
- movq(kScratchRegister, src);
- neg(dst); // Low 32 bits are retained as zero by negation.
+ movp(kScratchRegister, src);
+ negp(dst); // Low 32 bits are retained as zero by negation.
// Test if result is zero or Smi::kMinValue.
- cmpq(dst, kScratchRegister);
+ cmpp(dst, kScratchRegister);
j(not_equal, on_smi_result, near_jump);
- movq(src, kScratchRegister);
+ movp(src, kScratchRegister);
} else {
- movq(dst, src);
- neg(dst);
- cmpq(dst, src);
+ movp(dst, src);
+ negp(dst);
+ cmpp(dst, src);
// If the result is zero or Smi::kMinValue, negation failed to create a smi.
j(not_equal, on_smi_result, near_jump);
}
@@ -1648,15 +1789,15 @@ static void SmiAddHelper(MacroAssembler* masm,
Label::Distance near_jump) {
if (dst.is(src1)) {
Label done;
- masm->addq(dst, src2);
+ masm->addp(dst, src2);
masm->j(no_overflow, &done, Label::kNear);
// Restore src1.
- masm->subq(dst, src2);
+ masm->subp(dst, src2);
masm->jmp(on_not_smi_result, near_jump);
masm->bind(&done);
} else {
- masm->movq(dst, src1);
- masm->addq(dst, src2);
+ masm->movp(dst, src1);
+ masm->addp(dst, src2);
masm->j(overflow, on_not_smi_result, near_jump);
}
}
@@ -1691,13 +1832,13 @@ void MacroAssembler::SmiAdd(Register dst,
// overflowing is impossible.
if (!dst.is(src1)) {
if (emit_debug_code()) {
- movq(kScratchRegister, src1);
- addq(kScratchRegister, src2);
+ movp(kScratchRegister, src1);
+ addp(kScratchRegister, src2);
Check(no_overflow, kSmiAdditionOverflow);
}
- lea(dst, Operand(src1, src2, times_1, 0));
+ leap(dst, Operand(src1, src2, times_1, 0));
} else {
- addq(dst, src2);
+ addp(dst, src2);
Assert(no_overflow, kSmiAdditionOverflow);
}
}
@@ -1712,15 +1853,15 @@ static void SmiSubHelper(MacroAssembler* masm,
Label::Distance near_jump) {
if (dst.is(src1)) {
Label done;
- masm->subq(dst, src2);
+ masm->subp(dst, src2);
masm->j(no_overflow, &done, Label::kNear);
// Restore src1.
- masm->addq(dst, src2);
+ masm->addp(dst, src2);
masm->jmp(on_not_smi_result, near_jump);
masm->bind(&done);
} else {
- masm->movq(dst, src1);
- masm->subq(dst, src2);
+ masm->movp(dst, src1);
+ masm->subp(dst, src2);
masm->j(overflow, on_not_smi_result, near_jump);
}
}
@@ -1756,9 +1897,9 @@ static void SmiSubNoOverflowHelper(MacroAssembler* masm,
// No overflow checking. Use only when it's known that
// overflowing is impossible (e.g., subtracting two positive smis).
if (!dst.is(src1)) {
- masm->movq(dst, src1);
+ masm->movp(dst, src1);
}
- masm->subq(dst, src2);
+ masm->subp(dst, src2);
masm->Assert(no_overflow, kSmiSubtractionOverflow);
}
@@ -1788,24 +1929,24 @@ void MacroAssembler::SmiMul(Register dst,
if (dst.is(src1)) {
Label failure, zero_correct_result;
- movq(kScratchRegister, src1); // Create backup for later testing.
+ movp(kScratchRegister, src1); // Create backup for later testing.
SmiToInteger64(dst, src1);
- imul(dst, src2);
+ imulp(dst, src2);
j(overflow, &failure, Label::kNear);
// Check for negative zero result. If product is zero, and one
// argument is negative, go to slow case.
Label correct_result;
- testq(dst, dst);
+ testp(dst, dst);
j(not_zero, &correct_result, Label::kNear);
- movq(dst, kScratchRegister);
- xor_(dst, src2);
+ movp(dst, kScratchRegister);
+ xorp(dst, src2);
// Result was positive zero.
j(positive, &zero_correct_result, Label::kNear);
bind(&failure); // Reused failure exit, restores src1.
- movq(src1, kScratchRegister);
+ movp(src1, kScratchRegister);
jmp(on_not_smi_result, near_jump);
bind(&zero_correct_result);
@@ -1814,17 +1955,17 @@ void MacroAssembler::SmiMul(Register dst,
bind(&correct_result);
} else {
SmiToInteger64(dst, src1);
- imul(dst, src2);
+ imulp(dst, src2);
j(overflow, on_not_smi_result, near_jump);
// Check for negative zero result. If product is zero, and one
// argument is negative, go to slow case.
Label correct_result;
- testq(dst, dst);
+ testp(dst, dst);
j(not_zero, &correct_result, Label::kNear);
// One of src1 and src2 is zero, the check whether the other is
// negative.
- movq(kScratchRegister, src1);
- xor_(kScratchRegister, src2);
+ movp(kScratchRegister, src1);
+ xorp(kScratchRegister, src2);
j(negative, on_not_smi_result, near_jump);
bind(&correct_result);
}
@@ -1844,11 +1985,11 @@ void MacroAssembler::SmiDiv(Register dst,
ASSERT(!src1.is(rdx));
// Check for 0 divisor (result is +/-Infinity).
- testq(src2, src2);
+ testp(src2, src2);
j(zero, on_not_smi_result, near_jump);
if (src1.is(rax)) {
- movq(kScratchRegister, src1);
+ movp(kScratchRegister, src1);
}
SmiToInteger32(rax, src1);
// We need to rule out dividing Smi::kMinValue by -1, since that would
@@ -1859,12 +2000,12 @@ void MacroAssembler::SmiDiv(Register dst,
// We overshoot a little and go to slow case if we divide min-value
// by any negative value, not just -1.
Label safe_div;
- testl(rax, Immediate(0x7fffffff));
+ testl(rax, Immediate(~Smi::kMinValue));
j(not_zero, &safe_div, Label::kNear);
- testq(src2, src2);
+ testp(src2, src2);
if (src1.is(rax)) {
j(positive, &safe_div, Label::kNear);
- movq(src1, kScratchRegister);
+ movp(src1, kScratchRegister);
jmp(on_not_smi_result, near_jump);
} else {
j(negative, on_not_smi_result, near_jump);
@@ -1881,14 +2022,14 @@ void MacroAssembler::SmiDiv(Register dst,
if (src1.is(rax)) {
Label smi_result;
j(zero, &smi_result, Label::kNear);
- movq(src1, kScratchRegister);
+ movp(src1, kScratchRegister);
jmp(on_not_smi_result, near_jump);
bind(&smi_result);
} else {
j(not_zero, on_not_smi_result, near_jump);
}
if (!dst.is(src1) && src1.is(rax)) {
- movq(src1, kScratchRegister);
+ movp(src1, kScratchRegister);
}
Integer32ToSmi(dst, rax);
}
@@ -1907,11 +2048,11 @@ void MacroAssembler::SmiMod(Register dst,
ASSERT(!src1.is(rdx));
ASSERT(!src1.is(src2));
- testq(src2, src2);
+ testp(src2, src2);
j(zero, on_not_smi_result, near_jump);
if (src1.is(rax)) {
- movq(kScratchRegister, src1);
+ movp(kScratchRegister, src1);
}
SmiToInteger32(rax, src1);
SmiToInteger32(src2, src2);
@@ -1925,7 +2066,7 @@ void MacroAssembler::SmiMod(Register dst,
// Retag inputs and go slow case.
Integer32ToSmi(src2, src2);
if (src1.is(rax)) {
- movq(src1, kScratchRegister);
+ movp(src1, kScratchRegister);
}
jmp(on_not_smi_result, near_jump);
bind(&safe_div);
@@ -1936,14 +2077,14 @@ void MacroAssembler::SmiMod(Register dst,
// Restore smi tags on inputs.
Integer32ToSmi(src2, src2);
if (src1.is(rax)) {
- movq(src1, kScratchRegister);
+ movp(src1, kScratchRegister);
}
// Check for a negative zero result. If the result is zero, and the
// dividend is negative, go slow to return a floating point negative zero.
Label smi_result;
testl(rdx, rdx);
j(not_zero, &smi_result, Label::kNear);
- testq(src1, src1);
+ testp(src1, src1);
j(negative, on_not_smi_result, near_jump);
bind(&smi_result);
Integer32ToSmi(dst, rdx);
@@ -1953,23 +2094,29 @@ void MacroAssembler::SmiMod(Register dst,
void MacroAssembler::SmiNot(Register dst, Register src) {
ASSERT(!dst.is(kScratchRegister));
ASSERT(!src.is(kScratchRegister));
- // Set tag and padding bits before negating, so that they are zero afterwards.
- movl(kScratchRegister, Immediate(~0));
+ if (SmiValuesAre32Bits()) {
+ // Set tag and padding bits before negating, so that they are zero
+ // afterwards.
+ movl(kScratchRegister, Immediate(~0));
+ } else {
+ ASSERT(SmiValuesAre31Bits());
+ movl(kScratchRegister, Immediate(1));
+ }
if (dst.is(src)) {
- xor_(dst, kScratchRegister);
+ xorp(dst, kScratchRegister);
} else {
- lea(dst, Operand(src, kScratchRegister, times_1, 0));
+ leap(dst, Operand(src, kScratchRegister, times_1, 0));
}
- not_(dst);
+ notp(dst);
}
void MacroAssembler::SmiAnd(Register dst, Register src1, Register src2) {
ASSERT(!dst.is(src2));
if (!dst.is(src1)) {
- movq(dst, src1);
+ movp(dst, src1);
}
- and_(dst, src2);
+ andp(dst, src2);
}
@@ -1979,10 +2126,10 @@ void MacroAssembler::SmiAndConstant(Register dst, Register src, Smi* constant) {
} else if (dst.is(src)) {
ASSERT(!dst.is(kScratchRegister));
Register constant_reg = GetSmiConstant(constant);
- and_(dst, constant_reg);
+ andp(dst, constant_reg);
} else {
LoadSmiConstant(dst, constant);
- and_(dst, src);
+ andp(dst, src);
}
}
@@ -1990,9 +2137,9 @@ void MacroAssembler::SmiAndConstant(Register dst, Register src, Smi* constant) {
void MacroAssembler::SmiOr(Register dst, Register src1, Register src2) {
if (!dst.is(src1)) {
ASSERT(!src1.is(src2));
- movq(dst, src1);
+ movp(dst, src1);
}
- or_(dst, src2);
+ orp(dst, src2);
}
@@ -2000,10 +2147,10 @@ void MacroAssembler::SmiOrConstant(Register dst, Register src, Smi* constant) {
if (dst.is(src)) {
ASSERT(!dst.is(kScratchRegister));
Register constant_reg = GetSmiConstant(constant);
- or_(dst, constant_reg);
+ orp(dst, constant_reg);
} else {
LoadSmiConstant(dst, constant);
- or_(dst, src);
+ orp(dst, src);
}
}
@@ -2011,9 +2158,9 @@ void MacroAssembler::SmiOrConstant(Register dst, Register src, Smi* constant) {
void MacroAssembler::SmiXor(Register dst, Register src1, Register src2) {
if (!dst.is(src1)) {
ASSERT(!src1.is(src2));
- movq(dst, src1);
+ movp(dst, src1);
}
- xor_(dst, src2);
+ xorp(dst, src2);
}
@@ -2021,10 +2168,10 @@ void MacroAssembler::SmiXorConstant(Register dst, Register src, Smi* constant) {
if (dst.is(src)) {
ASSERT(!dst.is(kScratchRegister));
Register constant_reg = GetSmiConstant(constant);
- xor_(dst, constant_reg);
+ xorp(dst, constant_reg);
} else {
LoadSmiConstant(dst, constant);
- xor_(dst, src);
+ xorp(dst, src);
}
}
@@ -2035,8 +2182,8 @@ void MacroAssembler::SmiShiftArithmeticRightConstant(Register dst,
ASSERT(is_uint5(shift_value));
if (shift_value > 0) {
if (dst.is(src)) {
- sar(dst, Immediate(shift_value + kSmiShift));
- shl(dst, Immediate(kSmiShift));
+ sarp(dst, Immediate(shift_value + kSmiShift));
+ shlp(dst, Immediate(kSmiShift));
} else {
UNIMPLEMENTED(); // Not used.
}
@@ -2046,12 +2193,27 @@ void MacroAssembler::SmiShiftArithmeticRightConstant(Register dst,
void MacroAssembler::SmiShiftLeftConstant(Register dst,
Register src,
- int shift_value) {
- if (!dst.is(src)) {
- movq(dst, src);
- }
- if (shift_value > 0) {
- shl(dst, Immediate(shift_value));
+ int shift_value,
+ Label* on_not_smi_result,
+ Label::Distance near_jump) {
+ if (SmiValuesAre32Bits()) {
+ if (!dst.is(src)) {
+ movp(dst, src);
+ }
+ if (shift_value > 0) {
+ // Shift amount specified by lower 5 bits, not six as the shl opcode.
+ shlq(dst, Immediate(shift_value & 0x1f));
+ }
+ } else {
+ ASSERT(SmiValuesAre31Bits());
+ if (dst.is(src)) {
+ UNIMPLEMENTED(); // Not used.
+ } else {
+ SmiToInteger32(dst, src);
+ shll(dst, Immediate(shift_value));
+ JumpIfNotValidSmiValue(dst, on_not_smi_result, near_jump);
+ Integer32ToSmi(dst, dst);
+ }
}
}
@@ -2063,29 +2225,73 @@ void MacroAssembler::SmiShiftLogicalRightConstant(
if (dst.is(src)) {
UNIMPLEMENTED(); // Not used.
} else {
- movq(dst, src);
if (shift_value == 0) {
- testq(dst, dst);
+ testp(src, src);
j(negative, on_not_smi_result, near_jump);
}
- shr(dst, Immediate(shift_value + kSmiShift));
- shl(dst, Immediate(kSmiShift));
+ if (SmiValuesAre32Bits()) {
+ movp(dst, src);
+ shrp(dst, Immediate(shift_value + kSmiShift));
+ shlp(dst, Immediate(kSmiShift));
+ } else {
+ ASSERT(SmiValuesAre31Bits());
+ SmiToInteger32(dst, src);
+ shrp(dst, Immediate(shift_value));
+ JumpIfUIntNotValidSmiValue(dst, on_not_smi_result, near_jump);
+ Integer32ToSmi(dst, dst);
+ }
}
}
void MacroAssembler::SmiShiftLeft(Register dst,
Register src1,
- Register src2) {
- ASSERT(!dst.is(rcx));
- // Untag shift amount.
- if (!dst.is(src1)) {
- movq(dst, src1);
+ Register src2,
+ Label* on_not_smi_result,
+ Label::Distance near_jump) {
+ if (SmiValuesAre32Bits()) {
+ ASSERT(!dst.is(rcx));
+ if (!dst.is(src1)) {
+ movp(dst, src1);
+ }
+ // Untag shift amount.
+ SmiToInteger32(rcx, src2);
+ // Shift amount specified by lower 5 bits, not six as the shl opcode.
+ andp(rcx, Immediate(0x1f));
+ shlq_cl(dst);
+ } else {
+ ASSERT(SmiValuesAre31Bits());
+ ASSERT(!dst.is(kScratchRegister));
+ ASSERT(!src1.is(kScratchRegister));
+ ASSERT(!src2.is(kScratchRegister));
+ ASSERT(!dst.is(src2));
+ ASSERT(!dst.is(rcx));
+
+ if (src1.is(rcx) || src2.is(rcx)) {
+ movq(kScratchRegister, rcx);
+ }
+ if (dst.is(src1)) {
+ UNIMPLEMENTED(); // Not used.
+ } else {
+ Label valid_result;
+ SmiToInteger32(dst, src1);
+ SmiToInteger32(rcx, src2);
+ shll_cl(dst);
+ JumpIfValidSmiValue(dst, &valid_result, Label::kNear);
+ // As src1 or src2 could not be dst, we do not need to restore them for
+ // clobbering dst.
+ if (src1.is(rcx) || src2.is(rcx)) {
+ if (src1.is(rcx)) {
+ movq(src1, kScratchRegister);
+ } else {
+ movq(src2, kScratchRegister);
+ }
+ }
+ jmp(on_not_smi_result, near_jump);
+ bind(&valid_result);
+ Integer32ToSmi(dst, dst);
+ }
}
- SmiToInteger32(rcx, src2);
- // Shift amount specified by lower 5 bits, not six as the shl opcode.
- and_(rcx, Immediate(0x1f));
- shl_cl(dst);
}
@@ -2097,33 +2303,31 @@ void MacroAssembler::SmiShiftLogicalRight(Register dst,
ASSERT(!dst.is(kScratchRegister));
ASSERT(!src1.is(kScratchRegister));
ASSERT(!src2.is(kScratchRegister));
+ ASSERT(!dst.is(src2));
ASSERT(!dst.is(rcx));
- // dst and src1 can be the same, because the one case that bails out
- // is a shift by 0, which leaves dst, and therefore src1, unchanged.
if (src1.is(rcx) || src2.is(rcx)) {
movq(kScratchRegister, rcx);
}
- if (!dst.is(src1)) {
- movq(dst, src1);
- }
- SmiToInteger32(rcx, src2);
- orl(rcx, Immediate(kSmiShift));
- shr_cl(dst); // Shift is rcx modulo 0x1f + 32.
- shl(dst, Immediate(kSmiShift));
- testq(dst, dst);
- if (src1.is(rcx) || src2.is(rcx)) {
- Label positive_result;
- j(positive, &positive_result, Label::kNear);
- if (src1.is(rcx)) {
- movq(src1, kScratchRegister);
- } else {
- movq(src2, kScratchRegister);
- }
- jmp(on_not_smi_result, near_jump);
- bind(&positive_result);
+ if (dst.is(src1)) {
+ UNIMPLEMENTED(); // Not used.
} else {
- // src2 was zero and src1 negative.
- j(negative, on_not_smi_result, near_jump);
+ Label valid_result;
+ SmiToInteger32(dst, src1);
+ SmiToInteger32(rcx, src2);
+ shrl_cl(dst);
+ JumpIfUIntValidSmiValue(dst, &valid_result, Label::kNear);
+ // As src1 or src2 could not be dst, we do not need to restore them for
+ // clobbering dst.
+ if (src1.is(rcx) || src2.is(rcx)) {
+ if (src1.is(rcx)) {
+ movq(src1, kScratchRegister);
+ } else {
+ movq(src2, kScratchRegister);
+ }
+ }
+ jmp(on_not_smi_result, near_jump);
+ bind(&valid_result);
+ Integer32ToSmi(dst, dst);
}
}
@@ -2135,23 +2339,14 @@ void MacroAssembler::SmiShiftArithmeticRight(Register dst,
ASSERT(!src1.is(kScratchRegister));
ASSERT(!src2.is(kScratchRegister));
ASSERT(!dst.is(rcx));
- if (src1.is(rcx)) {
- movq(kScratchRegister, src1);
- } else if (src2.is(rcx)) {
- movq(kScratchRegister, src2);
- }
- if (!dst.is(src1)) {
- movq(dst, src1);
- }
+
SmiToInteger32(rcx, src2);
- orl(rcx, Immediate(kSmiShift));
- sar_cl(dst); // Shift 32 + original rcx & 0x1f.
- shl(dst, Immediate(kSmiShift));
- if (src1.is(rcx)) {
- movq(src1, kScratchRegister);
- } else if (src2.is(rcx)) {
- movq(src2, kScratchRegister);
+ if (!dst.is(src1)) {
+ movp(dst, src1);
}
+ SmiToInteger32(dst, dst);
+ sarl_cl(dst);
+ Integer32ToSmi(dst, dst);
}
@@ -2173,7 +2368,7 @@ void MacroAssembler::SelectNonSmi(Register dst,
STATIC_ASSERT(kSmiTag == 0);
ASSERT_EQ(0, Smi::FromInt(0));
movl(kScratchRegister, Immediate(kSmiTagMask));
- and_(kScratchRegister, src1);
+ andp(kScratchRegister, src1);
testl(kScratchRegister, src2);
// If non-zero then both are smis.
j(not_zero, on_not_smis, near_jump);
@@ -2181,13 +2376,13 @@ void MacroAssembler::SelectNonSmi(Register dst,
// Exactly one operand is a smi.
ASSERT_EQ(1, static_cast<int>(kSmiTagMask));
// kScratchRegister still holds src1 & kSmiTag, which is either zero or one.
- subq(kScratchRegister, Immediate(1));
+ subp(kScratchRegister, Immediate(1));
// If src1 is a smi, then scratch register all 1s, else it is all 0s.
- movq(dst, src1);
- xor_(dst, src2);
- and_(dst, kScratchRegister);
+ movp(dst, src1);
+ xorp(dst, src2);
+ andp(dst, kScratchRegister);
// If src1 is a smi, dst holds src1 ^ src2, else it is zero.
- xor_(dst, src1);
+ xorp(dst, src1);
// If src1 is a smi, dst is src2, else it is src1, i.e., the non-smi.
}
@@ -2195,81 +2390,125 @@ void MacroAssembler::SelectNonSmi(Register dst,
SmiIndex MacroAssembler::SmiToIndex(Register dst,
Register src,
int shift) {
- ASSERT(is_uint6(shift));
- // There is a possible optimization if shift is in the range 60-63, but that
- // will (and must) never happen.
- if (!dst.is(src)) {
- movq(dst, src);
- }
- if (shift < kSmiShift) {
- sar(dst, Immediate(kSmiShift - shift));
+ if (SmiValuesAre32Bits()) {
+ ASSERT(is_uint6(shift));
+ // There is a possible optimization if shift is in the range 60-63, but that
+ // will (and must) never happen.
+ if (!dst.is(src)) {
+ movp(dst, src);
+ }
+ if (shift < kSmiShift) {
+ sarp(dst, Immediate(kSmiShift - shift));
+ } else {
+ shlp(dst, Immediate(shift - kSmiShift));
+ }
+ return SmiIndex(dst, times_1);
} else {
- shl(dst, Immediate(shift - kSmiShift));
+ ASSERT(SmiValuesAre31Bits());
+ ASSERT(shift >= times_1 && shift <= (static_cast<int>(times_8) + 1));
+ if (!dst.is(src)) {
+ movp(dst, src);
+ }
+ // We have to sign extend the index register to 64-bit as the SMI might
+ // be negative.
+ movsxlq(dst, dst);
+ if (shift == times_1) {
+ sarq(dst, Immediate(kSmiShift));
+ return SmiIndex(dst, times_1);
+ }
+ return SmiIndex(dst, static_cast<ScaleFactor>(shift - 1));
}
- return SmiIndex(dst, times_1);
}
+
SmiIndex MacroAssembler::SmiToNegativeIndex(Register dst,
Register src,
int shift) {
- // Register src holds a positive smi.
- ASSERT(is_uint6(shift));
- if (!dst.is(src)) {
- movq(dst, src);
- }
- neg(dst);
- if (shift < kSmiShift) {
- sar(dst, Immediate(kSmiShift - shift));
+ if (SmiValuesAre32Bits()) {
+ // Register src holds a positive smi.
+ ASSERT(is_uint6(shift));
+ if (!dst.is(src)) {
+ movp(dst, src);
+ }
+ negp(dst);
+ if (shift < kSmiShift) {
+ sarp(dst, Immediate(kSmiShift - shift));
+ } else {
+ shlp(dst, Immediate(shift - kSmiShift));
+ }
+ return SmiIndex(dst, times_1);
} else {
- shl(dst, Immediate(shift - kSmiShift));
+ ASSERT(SmiValuesAre31Bits());
+ ASSERT(shift >= times_1 && shift <= (static_cast<int>(times_8) + 1));
+ if (!dst.is(src)) {
+ movp(dst, src);
+ }
+ negq(dst);
+ if (shift == times_1) {
+ sarq(dst, Immediate(kSmiShift));
+ return SmiIndex(dst, times_1);
+ }
+ return SmiIndex(dst, static_cast<ScaleFactor>(shift - 1));
}
- return SmiIndex(dst, times_1);
}
void MacroAssembler::AddSmiField(Register dst, const Operand& src) {
- ASSERT_EQ(0, kSmiShift % kBitsPerByte);
- addl(dst, Operand(src, kSmiShift / kBitsPerByte));
+ if (SmiValuesAre32Bits()) {
+ ASSERT_EQ(0, kSmiShift % kBitsPerByte);
+ addl(dst, Operand(src, kSmiShift / kBitsPerByte));
+ } else {
+ ASSERT(SmiValuesAre31Bits());
+ SmiToInteger32(kScratchRegister, src);
+ addl(dst, kScratchRegister);
+ }
}
void MacroAssembler::Push(Smi* source) {
intptr_t smi = reinterpret_cast<intptr_t>(source);
if (is_int32(smi)) {
- push(Immediate(static_cast<int32_t>(smi)));
+ Push(Immediate(static_cast<int32_t>(smi)));
} else {
Register constant = GetSmiConstant(source);
- push(constant);
+ Push(constant);
}
}
-void MacroAssembler::PushInt64AsTwoSmis(Register src, Register scratch) {
- movq(scratch, src);
+void MacroAssembler::PushRegisterAsTwoSmis(Register src, Register scratch) {
+ ASSERT(!src.is(scratch));
+ movp(scratch, src);
// High bits.
- shr(src, Immediate(64 - kSmiShift));
- shl(src, Immediate(kSmiShift));
- push(src);
+ shrp(src, Immediate(kPointerSize * kBitsPerByte - kSmiShift));
+ shlp(src, Immediate(kSmiShift));
+ Push(src);
// Low bits.
- shl(scratch, Immediate(kSmiShift));
- push(scratch);
+ shlp(scratch, Immediate(kSmiShift));
+ Push(scratch);
}
-void MacroAssembler::PopInt64AsTwoSmis(Register dst, Register scratch) {
- pop(scratch);
+void MacroAssembler::PopRegisterAsTwoSmis(Register dst, Register scratch) {
+ ASSERT(!dst.is(scratch));
+ Pop(scratch);
// Low bits.
- shr(scratch, Immediate(kSmiShift));
- pop(dst);
- shr(dst, Immediate(kSmiShift));
+ shrp(scratch, Immediate(kSmiShift));
+ Pop(dst);
+ shrp(dst, Immediate(kSmiShift));
// High bits.
- shl(dst, Immediate(64 - kSmiShift));
- or_(dst, scratch);
+ shlp(dst, Immediate(kPointerSize * kBitsPerByte - kSmiShift));
+ orp(dst, scratch);
}
void MacroAssembler::Test(const Operand& src, Smi* source) {
- testl(Operand(src, kIntSize), Immediate(source->value()));
+ if (SmiValuesAre32Bits()) {
+ testl(Operand(src, kIntSize), Immediate(source->value()));
+ } else {
+ ASSERT(SmiValuesAre31Bits());
+ testl(src, Immediate(source));
+ }
}
@@ -2294,7 +2533,7 @@ void MacroAssembler::LookupNumberStringCache(Register object,
SmiToInteger32(
mask, FieldOperand(number_string_cache, FixedArray::kLengthOffset));
shrl(mask, Immediate(1));
- subq(mask, Immediate(1)); // Make mask.
+ subp(mask, Immediate(1)); // Make mask.
// Calculate the entry in the number string cache. The hash value in the
// number string cache for smis is just the smi value, and the hash for
@@ -2310,17 +2549,17 @@ void MacroAssembler::LookupNumberStringCache(Register object,
STATIC_ASSERT(8 == kDoubleSize);
movl(scratch, FieldOperand(object, HeapNumber::kValueOffset + 4));
- xor_(scratch, FieldOperand(object, HeapNumber::kValueOffset));
- and_(scratch, mask);
+ xorp(scratch, FieldOperand(object, HeapNumber::kValueOffset));
+ andp(scratch, mask);
// Each entry in string cache consists of two pointer sized fields,
// but times_twice_pointer_size (multiplication by 16) scale factor
// is not supported by addrmode on x64 platform.
// So we have to premultiply entry index before lookup.
- shl(scratch, Immediate(kPointerSizeLog2 + 1));
+ shlp(scratch, Immediate(kPointerSizeLog2 + 1));
Register index = scratch;
Register probe = mask;
- movq(probe,
+ movp(probe,
FieldOperand(number_string_cache,
index,
times_1,
@@ -2334,15 +2573,15 @@ void MacroAssembler::LookupNumberStringCache(Register object,
bind(&is_smi);
SmiToInteger32(scratch, object);
- and_(scratch, mask);
+ andp(scratch, mask);
// Each entry in string cache consists of two pointer sized fields,
// but times_twice_pointer_size (multiplication by 16) scale factor
// is not supported by addrmode on x64 platform.
// So we have to premultiply entry index before lookup.
- shl(scratch, Immediate(kPointerSizeLog2 + 1));
+ shlp(scratch, Immediate(kPointerSizeLog2 + 1));
// Check if the entry is the smi we are looking for.
- cmpq(object,
+ cmpp(object,
FieldOperand(number_string_cache,
index,
times_1,
@@ -2351,7 +2590,7 @@ void MacroAssembler::LookupNumberStringCache(Register object,
// Get the result from the cache.
bind(&load_result_from_cache);
- movq(result,
+ movp(result,
FieldOperand(number_string_cache,
index,
times_1,
@@ -2383,8 +2622,8 @@ void MacroAssembler::JumpIfNotBothSequentialAsciiStrings(
j(either_smi, on_fail, near_jump);
// Load instance type for both strings.
- movq(scratch1, FieldOperand(first_object, HeapObject::kMapOffset));
- movq(scratch2, FieldOperand(second_object, HeapObject::kMapOffset));
+ movp(scratch1, FieldOperand(first_object, HeapObject::kMapOffset));
+ movp(scratch2, FieldOperand(second_object, HeapObject::kMapOffset));
movzxbl(scratch1, FieldOperand(scratch1, Map::kInstanceTypeOffset));
movzxbl(scratch2, FieldOperand(scratch2, Map::kInstanceTypeOffset));
@@ -2399,7 +2638,7 @@ void MacroAssembler::JumpIfNotBothSequentialAsciiStrings(
andl(scratch2, Immediate(kFlatAsciiStringMask));
// Interleave the bits to check both scratch1 and scratch2 in one test.
ASSERT_EQ(0, kFlatAsciiStringMask & (kFlatAsciiStringMask << 3));
- lea(scratch1, Operand(scratch1, scratch2, times_8, 0));
+ leap(scratch1, Operand(scratch1, scratch2, times_8, 0));
cmpl(scratch1,
Immediate(kFlatAsciiStringTag + (kFlatAsciiStringTag << 3)));
j(not_equal, on_fail, near_jump);
@@ -2432,8 +2671,8 @@ void MacroAssembler::JumpIfBothInstanceTypesAreNotSequentialAscii(
Label* on_fail,
Label::Distance near_jump) {
// Load instance type for both strings.
- movq(scratch1, first_object_instance_type);
- movq(scratch2, second_object_instance_type);
+ movp(scratch1, first_object_instance_type);
+ movp(scratch2, second_object_instance_type);
// Check that both are flat ASCII strings.
ASSERT(kNotStringTag != 0);
@@ -2446,7 +2685,7 @@ void MacroAssembler::JumpIfBothInstanceTypesAreNotSequentialAscii(
andl(scratch2, Immediate(kFlatAsciiStringMask));
// Interleave the bits to check both scratch1 and scratch2 in one test.
ASSERT_EQ(0, kFlatAsciiStringMask & (kFlatAsciiStringMask << 3));
- lea(scratch1, Operand(scratch1, scratch2, times_8, 0));
+ leap(scratch1, Operand(scratch1, scratch2, times_8, 0));
cmpl(scratch1,
Immediate(kFlatAsciiStringTag + (kFlatAsciiStringTag << 3)));
j(not_equal, on_fail, near_jump);
@@ -2486,7 +2725,7 @@ void MacroAssembler::JumpIfNotUniqueName(Register reg,
void MacroAssembler::Move(Register dst, Register src) {
if (!dst.is(src)) {
- movq(dst, src);
+ movp(dst, src);
}
}
@@ -2507,7 +2746,7 @@ void MacroAssembler::Move(const Operand& dst, Handle<Object> source) {
Move(dst, Smi::cast(*source));
} else {
MoveHeapObject(kScratchRegister, source);
- movq(dst, kScratchRegister);
+ movp(dst, kScratchRegister);
}
}
@@ -2518,7 +2757,7 @@ void MacroAssembler::Cmp(Register dst, Handle<Object> source) {
Cmp(dst, Smi::cast(*source));
} else {
MoveHeapObject(kScratchRegister, source);
- cmpq(dst, kScratchRegister);
+ cmpp(dst, kScratchRegister);
}
}
@@ -2529,7 +2768,7 @@ void MacroAssembler::Cmp(const Operand& dst, Handle<Object> source) {
Cmp(dst, Smi::cast(*source));
} else {
MoveHeapObject(kScratchRegister, source);
- cmpq(dst, kScratchRegister);
+ cmpp(dst, kScratchRegister);
}
}
@@ -2540,7 +2779,7 @@ void MacroAssembler::Push(Handle<Object> source) {
Push(Smi::cast(*source));
} else {
MoveHeapObject(kScratchRegister, source);
- push(kScratchRegister);
+ Push(kScratchRegister);
}
}
@@ -2551,10 +2790,10 @@ void MacroAssembler::MoveHeapObject(Register result,
ASSERT(object->IsHeapObject());
if (isolate()->heap()->InNewSpace(*object)) {
Handle<Cell> cell = isolate()->factory()->NewCell(object);
- movq(result, cell, RelocInfo::CELL);
- movq(result, Operand(result, 0));
+ Move(result, cell, RelocInfo::CELL);
+ movp(result, Operand(result, 0));
} else {
- movq(result, object, RelocInfo::EMBEDDED_OBJECT);
+ Move(result, object, RelocInfo::EMBEDDED_OBJECT);
}
}
@@ -2564,23 +2803,155 @@ void MacroAssembler::LoadGlobalCell(Register dst, Handle<Cell> cell) {
AllowDeferredHandleDereference embedding_raw_address;
load_rax(cell.location(), RelocInfo::CELL);
} else {
- movq(dst, cell, RelocInfo::CELL);
- movq(dst, Operand(dst, 0));
+ Move(dst, cell, RelocInfo::CELL);
+ movp(dst, Operand(dst, 0));
}
}
void MacroAssembler::Drop(int stack_elements) {
if (stack_elements > 0) {
- addq(rsp, Immediate(stack_elements * kPointerSize));
+ addp(rsp, Immediate(stack_elements * kPointerSize));
+ }
+}
+
+
+void MacroAssembler::DropUnderReturnAddress(int stack_elements,
+ Register scratch) {
+ ASSERT(stack_elements > 0);
+ if (kPointerSize == kInt64Size && stack_elements == 1) {
+ popq(MemOperand(rsp, 0));
+ return;
+ }
+
+ PopReturnAddressTo(scratch);
+ Drop(stack_elements);
+ PushReturnAddressFrom(scratch);
+}
+
+
+void MacroAssembler::Push(Register src) {
+ if (kPointerSize == kInt64Size) {
+ pushq(src);
+ } else {
+ // x32 uses 64-bit push for rbp in the prologue.
+ ASSERT(src.code() != rbp.code());
+ leal(rsp, Operand(rsp, -4));
+ movp(Operand(rsp, 0), src);
+ }
+}
+
+
+void MacroAssembler::Push(const Operand& src) {
+ if (kPointerSize == kInt64Size) {
+ pushq(src);
+ } else {
+ movp(kScratchRegister, src);
+ leal(rsp, Operand(rsp, -4));
+ movp(Operand(rsp, 0), kScratchRegister);
+ }
+}
+
+
+void MacroAssembler::PushQuad(const Operand& src) {
+ if (kPointerSize == kInt64Size) {
+ pushq(src);
+ } else {
+ movp(kScratchRegister, src);
+ pushq(kScratchRegister);
+ }
+}
+
+
+void MacroAssembler::Push(Immediate value) {
+ if (kPointerSize == kInt64Size) {
+ pushq(value);
+ } else {
+ leal(rsp, Operand(rsp, -4));
+ movp(Operand(rsp, 0), value);
+ }
+}
+
+
+void MacroAssembler::PushImm32(int32_t imm32) {
+ if (kPointerSize == kInt64Size) {
+ pushq_imm32(imm32);
+ } else {
+ leal(rsp, Operand(rsp, -4));
+ movp(Operand(rsp, 0), Immediate(imm32));
}
}
-void MacroAssembler::TestBit(const Operand& src, int bits) {
+void MacroAssembler::Pop(Register dst) {
+ if (kPointerSize == kInt64Size) {
+ popq(dst);
+ } else {
+ // x32 uses 64-bit pop for rbp in the epilogue.
+ ASSERT(dst.code() != rbp.code());
+ movp(dst, Operand(rsp, 0));
+ leal(rsp, Operand(rsp, 4));
+ }
+}
+
+
+void MacroAssembler::Pop(const Operand& dst) {
+ if (kPointerSize == kInt64Size) {
+ popq(dst);
+ } else {
+ Register scratch = dst.AddressUsesRegister(kScratchRegister)
+ ? kSmiConstantRegister : kScratchRegister;
+ movp(scratch, Operand(rsp, 0));
+ movp(dst, scratch);
+ leal(rsp, Operand(rsp, 4));
+ if (scratch.is(kSmiConstantRegister)) {
+ // Restore kSmiConstantRegister.
+ movp(kSmiConstantRegister,
+ reinterpret_cast<void*>(Smi::FromInt(kSmiConstantRegisterValue)),
+ Assembler::RelocInfoNone());
+ }
+ }
+}
+
+
+void MacroAssembler::PopQuad(const Operand& dst) {
+ if (kPointerSize == kInt64Size) {
+ popq(dst);
+ } else {
+ popq(kScratchRegister);
+ movp(dst, kScratchRegister);
+ }
+}
+
+
+void MacroAssembler::LoadSharedFunctionInfoSpecialField(Register dst,
+ Register base,
+ int offset) {
+ ASSERT(offset > SharedFunctionInfo::kLengthOffset &&
+ offset <= SharedFunctionInfo::kSize &&
+ (((offset - SharedFunctionInfo::kLengthOffset) / kIntSize) % 2 == 1));
+ if (kPointerSize == kInt64Size) {
+ movsxlq(dst, FieldOperand(base, offset));
+ } else {
+ movp(dst, FieldOperand(base, offset));
+ SmiToInteger32(dst, dst);
+ }
+}
+
+
+void MacroAssembler::TestBitSharedFunctionInfoSpecialField(Register base,
+ int offset,
+ int bits) {
+ ASSERT(offset > SharedFunctionInfo::kLengthOffset &&
+ offset <= SharedFunctionInfo::kSize &&
+ (((offset - SharedFunctionInfo::kLengthOffset) / kIntSize) % 2 == 1));
+ if (kPointerSize == kInt32Size) {
+ // On x32, this field is represented by SMI.
+ bits += kSmiShift;
+ }
int byte_offset = bits / kBitsPerByte;
int bit_in_byte = bits & (kBitsPerByte - 1);
- testb(Operand(src, byte_offset), Immediate(1 << bit_in_byte));
+ testb(FieldOperand(base, offset + byte_offset), Immediate(1 << bit_in_byte));
}
@@ -2590,8 +2961,18 @@ void MacroAssembler::Jump(ExternalReference ext) {
}
+void MacroAssembler::Jump(const Operand& op) {
+ if (kPointerSize == kInt64Size) {
+ jmp(op);
+ } else {
+ movp(kScratchRegister, op);
+ jmp(kScratchRegister);
+ }
+}
+
+
void MacroAssembler::Jump(Address destination, RelocInfo::Mode rmode) {
- movq(kScratchRegister, destination, rmode);
+ Move(kScratchRegister, destination, rmode);
jmp(kScratchRegister);
}
@@ -2621,11 +3002,21 @@ void MacroAssembler::Call(ExternalReference ext) {
}
+void MacroAssembler::Call(const Operand& op) {
+ if (kPointerSize == kInt64Size) {
+ call(op);
+ } else {
+ movp(kScratchRegister, op);
+ call(kScratchRegister);
+ }
+}
+
+
void MacroAssembler::Call(Address destination, RelocInfo::Mode rmode) {
#ifdef DEBUG
- int end_position = pc_offset() + CallSize(destination, rmode);
+ int end_position = pc_offset() + CallSize(destination);
#endif
- movq(kScratchRegister, destination, rmode);
+ Move(kScratchRegister, destination, rmode);
call(kScratchRegister);
#ifdef DEBUG
CHECK_EQ(pc_offset(), end_position);
@@ -2649,26 +3040,26 @@ void MacroAssembler::Call(Handle<Code> code_object,
void MacroAssembler::Pushad() {
- push(rax);
- push(rcx);
- push(rdx);
- push(rbx);
+ Push(rax);
+ Push(rcx);
+ Push(rdx);
+ Push(rbx);
// Not pushing rsp or rbp.
- push(rsi);
- push(rdi);
- push(r8);
- push(r9);
+ Push(rsi);
+ Push(rdi);
+ Push(r8);
+ Push(r9);
// r10 is kScratchRegister.
- push(r11);
+ Push(r11);
// r12 is kSmiConstantRegister.
// r13 is kRootRegister.
- push(r14);
- push(r15);
+ Push(r14);
+ Push(r15);
STATIC_ASSERT(11 == kNumSafepointSavedRegisters);
// Use lea for symmetry with Popad.
int sp_delta =
(kNumSafepointRegisters - kNumSafepointSavedRegisters) * kPointerSize;
- lea(rsp, Operand(rsp, -sp_delta));
+ leap(rsp, Operand(rsp, -sp_delta));
}
@@ -2676,23 +3067,23 @@ void MacroAssembler::Popad() {
// Popad must not change the flags, so use lea instead of addq.
int sp_delta =
(kNumSafepointRegisters - kNumSafepointSavedRegisters) * kPointerSize;
- lea(rsp, Operand(rsp, sp_delta));
- pop(r15);
- pop(r14);
- pop(r11);
- pop(r9);
- pop(r8);
- pop(rdi);
- pop(rsi);
- pop(rbx);
- pop(rdx);
- pop(rcx);
- pop(rax);
+ leap(rsp, Operand(rsp, sp_delta));
+ Pop(r15);
+ Pop(r14);
+ Pop(r11);
+ Pop(r9);
+ Pop(r8);
+ Pop(rdi);
+ Pop(rsi);
+ Pop(rbx);
+ Pop(rdx);
+ Pop(rcx);
+ Pop(rax);
}
void MacroAssembler::Dropad() {
- addq(rsp, Immediate(kNumSafepointRegisters * kPointerSize));
+ addp(rsp, Immediate(kNumSafepointRegisters * kPointerSize));
}
@@ -2721,17 +3112,17 @@ MacroAssembler::kSafepointPushRegisterIndices[Register::kNumRegisters] = {
void MacroAssembler::StoreToSafepointRegisterSlot(Register dst,
const Immediate& imm) {
- movq(SafepointRegisterSlot(dst), imm);
+ movp(SafepointRegisterSlot(dst), imm);
}
void MacroAssembler::StoreToSafepointRegisterSlot(Register dst, Register src) {
- movq(SafepointRegisterSlot(dst), src);
+ movp(SafepointRegisterSlot(dst), src);
}
void MacroAssembler::LoadFromSafepointRegisterSlot(Register dst, Register src) {
- movq(dst, SafepointRegisterSlot(src));
+ movp(dst, SafepointRegisterSlot(src));
}
@@ -2757,33 +3148,33 @@ void MacroAssembler::PushTryHandler(StackHandler::Kind kind,
// The frame pointer does not point to a JS frame so we save NULL for
// rbp. We expect the code throwing an exception to check rbp before
// dereferencing it to restore the context.
- push(Immediate(0)); // NULL frame pointer.
+ pushq(Immediate(0)); // NULL frame pointer.
Push(Smi::FromInt(0)); // No context.
} else {
- push(rbp);
- push(rsi);
+ pushq(rbp);
+ Push(rsi);
}
// Push the state and the code object.
unsigned state =
StackHandler::IndexField::encode(handler_index) |
StackHandler::KindField::encode(kind);
- push(Immediate(state));
+ Push(Immediate(state));
Push(CodeObject());
// Link the current handler as the next handler.
ExternalReference handler_address(Isolate::kHandlerAddress, isolate());
- push(ExternalOperand(handler_address));
+ Push(ExternalOperand(handler_address));
// Set this new handler as the current one.
- movq(ExternalOperand(handler_address), rsp);
+ movp(ExternalOperand(handler_address), rsp);
}
void MacroAssembler::PopTryHandler() {
STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
ExternalReference handler_address(Isolate::kHandlerAddress, isolate());
- pop(ExternalOperand(handler_address));
- addq(rsp, Immediate(StackHandlerConstants::kSize - kPointerSize));
+ Pop(ExternalOperand(handler_address));
+ addp(rsp, Immediate(StackHandlerConstants::kSize - kPointerSize));
}
@@ -2791,12 +3182,12 @@ void MacroAssembler::JumpToHandlerEntry() {
// Compute the handler entry address and jump to it. The handler table is
// a fixed array of (smi-tagged) code offsets.
// rax = exception, rdi = code object, rdx = state.
- movq(rbx, FieldOperand(rdi, Code::kHandlerTableOffset));
- shr(rdx, Immediate(StackHandler::kKindWidth));
- movq(rdx,
+ movp(rbx, FieldOperand(rdi, Code::kHandlerTableOffset));
+ shrp(rdx, Immediate(StackHandler::kKindWidth));
+ movp(rdx,
FieldOperand(rbx, rdx, times_pointer_size, FixedArray::kHeaderSize));
SmiToInteger64(rdx, rdx);
- lea(rdi, FieldOperand(rdi, rdx, times_1, Code::kHeaderSize));
+ leap(rdi, FieldOperand(rdi, rdx, times_1, Code::kHeaderSize));
jmp(rdi);
}
@@ -2813,29 +3204,29 @@ void MacroAssembler::Throw(Register value) {
// The exception is expected in rax.
if (!value.is(rax)) {
- movq(rax, value);
+ movp(rax, value);
}
// Drop the stack pointer to the top of the top handler.
ExternalReference handler_address(Isolate::kHandlerAddress, isolate());
- movq(rsp, ExternalOperand(handler_address));
+ movp(rsp, ExternalOperand(handler_address));
// Restore the next handler.
- pop(ExternalOperand(handler_address));
+ Pop(ExternalOperand(handler_address));
// Remove the code object and state, compute the handler address in rdi.
- pop(rdi); // Code object.
- pop(rdx); // Offset and state.
+ Pop(rdi); // Code object.
+ Pop(rdx); // Offset and state.
// Restore the context and frame pointer.
- pop(rsi); // Context.
- pop(rbp); // Frame pointer.
+ Pop(rsi); // Context.
+ popq(rbp); // Frame pointer.
// If the handler is a JS frame, restore the context to the frame.
// (kind == ENTRY) == (rbp == 0) == (rsi == 0), so we could test either
// rbp or rsi.
Label skip;
- testq(rsi, rsi);
+ testp(rsi, rsi);
j(zero, &skip, Label::kNear);
- movq(Operand(rbp, StandardFrameConstants::kContextOffset), rsi);
+ movp(Operand(rbp, StandardFrameConstants::kContextOffset), rsi);
bind(&skip);
JumpToHandlerEntry();
@@ -2854,7 +3245,7 @@ void MacroAssembler::ThrowUncatchable(Register value) {
// The exception is expected in rax.
if (!value.is(rax)) {
- movq(rax, value);
+ movp(rax, value);
}
// Drop the stack pointer to the top of the top stack handler.
ExternalReference handler_address(Isolate::kHandlerAddress, isolate());
@@ -2864,7 +3255,7 @@ void MacroAssembler::ThrowUncatchable(Register value) {
Label fetch_next, check_kind;
jmp(&check_kind, Label::kNear);
bind(&fetch_next);
- movq(rsp, Operand(rsp, StackHandlerConstants::kNextOffset));
+ movp(rsp, Operand(rsp, StackHandlerConstants::kNextOffset));
bind(&check_kind);
STATIC_ASSERT(StackHandler::JS_ENTRY == 0);
@@ -2873,15 +3264,15 @@ void MacroAssembler::ThrowUncatchable(Register value) {
j(not_zero, &fetch_next);
// Set the top handler address to next handler past the top ENTRY handler.
- pop(ExternalOperand(handler_address));
+ Pop(ExternalOperand(handler_address));
// Remove the code object and state, compute the handler address in rdi.
- pop(rdi); // Code object.
- pop(rdx); // Offset and state.
+ Pop(rdi); // Code object.
+ Pop(rdx); // Offset and state.
// Clear the context pointer and frame pointer (0 was saved in the handler).
- pop(rsi);
- pop(rbp);
+ Pop(rsi);
+ popq(rbp);
JumpToHandlerEntry();
}
@@ -2897,7 +3288,7 @@ void MacroAssembler::Ret(int bytes_dropped, Register scratch) {
ret(bytes_dropped);
} else {
PopReturnAddressTo(scratch);
- addq(rsp, Immediate(bytes_dropped));
+ addp(rsp, Immediate(bytes_dropped));
PushReturnAddressFrom(scratch);
ret(0);
}
@@ -2913,7 +3304,7 @@ void MacroAssembler::FCmp() {
void MacroAssembler::CmpObjectType(Register heap_object,
InstanceType type,
Register map) {
- movq(map, FieldOperand(heap_object, HeapObject::kMapOffset));
+ movp(map, FieldOperand(heap_object, HeapObject::kMapOffset));
CmpInstanceType(map, type);
}
@@ -3057,10 +3448,10 @@ void MacroAssembler::ClampDoubleToUint8(XMMRegister input_reg,
cvtsd2si(result_reg, input_reg);
testl(result_reg, Immediate(0xFFFFFF00));
j(zero, &done, Label::kNear);
- cmpl(result_reg, Immediate(0x80000000));
- j(equal, &conv_failure, Label::kNear);
+ cmpl(result_reg, Immediate(1));
+ j(overflow, &conv_failure, Label::kNear);
movl(result_reg, Immediate(0));
- setcc(above, result_reg);
+ setcc(sign, result_reg);
subl(result_reg, Immediate(1));
andl(result_reg, Immediate(255));
jmp(&done, Label::kNear);
@@ -3074,8 +3465,7 @@ void MacroAssembler::ClampDoubleToUint8(XMMRegister input_reg,
void MacroAssembler::LoadUint32(XMMRegister dst,
- Register src,
- XMMRegister scratch) {
+ Register src) {
if (FLAG_debug_code) {
cmpq(src, Immediate(0xffffffff));
Assert(below_equal, kInputGPRIsExpectedToHaveUpper32Cleared);
@@ -3087,8 +3477,8 @@ void MacroAssembler::LoadUint32(XMMRegister dst,
void MacroAssembler::SlowTruncateToI(Register result_reg,
Register input_reg,
int offset) {
- DoubleToIStub stub(input_reg, result_reg, offset, true);
- call(stub.GetCode(isolate()), RelocInfo::CODE_TARGET);
+ DoubleToIStub stub(isolate(), input_reg, result_reg, offset, true);
+ call(stub.GetCode(), RelocInfo::CODE_TARGET);
}
@@ -3097,21 +3487,22 @@ void MacroAssembler::TruncateHeapNumberToI(Register result_reg,
Label done;
movsd(xmm0, FieldOperand(input_reg, HeapNumber::kValueOffset));
cvttsd2siq(result_reg, xmm0);
- Set(kScratchRegister, V8_UINT64_C(0x8000000000000000));
- cmpq(result_reg, kScratchRegister);
- j(not_equal, &done, Label::kNear);
+ cmpq(result_reg, Immediate(1));
+ j(no_overflow, &done, Label::kNear);
// Slow case.
if (input_reg.is(result_reg)) {
- subq(rsp, Immediate(kDoubleSize));
+ subp(rsp, Immediate(kDoubleSize));
movsd(MemOperand(rsp, 0), xmm0);
SlowTruncateToI(result_reg, rsp, 0);
- addq(rsp, Immediate(kDoubleSize));
+ addp(rsp, Immediate(kDoubleSize));
} else {
SlowTruncateToI(result_reg, input_reg);
}
bind(&done);
+ // Keep our invariant that the upper 32 bits are zero.
+ movl(result_reg, result_reg);
}
@@ -3119,16 +3510,17 @@ void MacroAssembler::TruncateDoubleToI(Register result_reg,
XMMRegister input_reg) {
Label done;
cvttsd2siq(result_reg, input_reg);
- movq(kScratchRegister, V8_INT64_C(0x8000000000000000));
- cmpq(result_reg, kScratchRegister);
- j(not_equal, &done, Label::kNear);
+ cmpq(result_reg, Immediate(1));
+ j(no_overflow, &done, Label::kNear);
- subq(rsp, Immediate(kDoubleSize));
+ subp(rsp, Immediate(kDoubleSize));
movsd(MemOperand(rsp, 0), input_reg);
SlowTruncateToI(result_reg, rsp, 0);
- addq(rsp, Immediate(kDoubleSize));
+ addp(rsp, Immediate(kDoubleSize));
bind(&done);
+ // Keep our invariant that the upper 32 bits are zero.
+ movl(result_reg, result_reg);
}
@@ -3193,56 +3585,23 @@ void MacroAssembler::TaggedToI(Register result_reg,
}
-void MacroAssembler::Throw(BailoutReason reason) {
-#ifdef DEBUG
- const char* msg = GetBailoutReason(reason);
- if (msg != NULL) {
- RecordComment("Throw message: ");
- RecordComment(msg);
- }
-#endif
-
- push(rax);
- Push(Smi::FromInt(reason));
- if (!has_frame_) {
- // We don't actually want to generate a pile of code for this, so just
- // claim there is a stack frame, without generating one.
- FrameScope scope(this, StackFrame::NONE);
- CallRuntime(Runtime::kThrowMessage, 1);
- } else {
- CallRuntime(Runtime::kThrowMessage, 1);
- }
- // Control will not return here.
- int3();
-}
-
-
-void MacroAssembler::ThrowIf(Condition cc, BailoutReason reason) {
- Label L;
- j(NegateCondition(cc), &L);
- Throw(reason);
- // will not return here
- bind(&L);
-}
-
-
void MacroAssembler::LoadInstanceDescriptors(Register map,
Register descriptors) {
- movq(descriptors, FieldOperand(map, Map::kDescriptorsOffset));
+ movp(descriptors, FieldOperand(map, Map::kDescriptorsOffset));
}
void MacroAssembler::NumberOfOwnDescriptors(Register dst, Register map) {
- movq(dst, FieldOperand(map, Map::kBitField3Offset));
+ movl(dst, FieldOperand(map, Map::kBitField3Offset));
DecodeField<Map::NumberOfOwnDescriptorsBits>(dst);
}
void MacroAssembler::EnumLength(Register dst, Register map) {
STATIC_ASSERT(Map::EnumLengthBits::kShift == 0);
- movq(dst, FieldOperand(map, Map::kBitField3Offset));
- Move(kScratchRegister, Smi::FromInt(Map::EnumLengthBits::kMask));
- and_(dst, kScratchRegister);
+ movl(dst, FieldOperand(map, Map::kBitField3Offset));
+ andl(dst, Immediate(Map::EnumLengthBits::kMask));
+ Integer32ToSmi(dst, dst);
}
@@ -3313,10 +3672,10 @@ void MacroAssembler::AssertString(Register object) {
if (emit_debug_code()) {
testb(object, Immediate(kSmiTagMask));
Check(not_equal, kOperandIsASmiAndNotAString);
- push(object);
- movq(object, FieldOperand(object, HeapObject::kMapOffset));
+ Push(object);
+ movp(object, FieldOperand(object, HeapObject::kMapOffset));
CmpInstanceType(object, FIRST_NONSTRING_TYPE);
- pop(object);
+ Pop(object);
Check(below, kOperandIsNotAString);
}
}
@@ -3326,22 +3685,35 @@ void MacroAssembler::AssertName(Register object) {
if (emit_debug_code()) {
testb(object, Immediate(kSmiTagMask));
Check(not_equal, kOperandIsASmiAndNotAName);
- push(object);
- movq(object, FieldOperand(object, HeapObject::kMapOffset));
+ Push(object);
+ movp(object, FieldOperand(object, HeapObject::kMapOffset));
CmpInstanceType(object, LAST_NAME_TYPE);
- pop(object);
+ Pop(object);
Check(below_equal, kOperandIsNotAName);
}
}
+void MacroAssembler::AssertUndefinedOrAllocationSite(Register object) {
+ if (emit_debug_code()) {
+ Label done_checking;
+ AssertNotSmi(object);
+ Cmp(object, isolate()->factory()->undefined_value());
+ j(equal, &done_checking);
+ Cmp(FieldOperand(object, 0), isolate()->factory()->allocation_site_map());
+ Assert(equal, kExpectedUndefinedOrCell);
+ bind(&done_checking);
+ }
+}
+
+
void MacroAssembler::AssertRootValue(Register src,
Heap::RootListIndex root_value_index,
BailoutReason reason) {
if (emit_debug_code()) {
ASSERT(!src.is(kScratchRegister));
LoadRoot(kScratchRegister, root_value_index);
- cmpq(src, kScratchRegister);
+ cmpp(src, kScratchRegister);
Check(equal, reason);
}
}
@@ -3351,7 +3723,7 @@ void MacroAssembler::AssertRootValue(Register src,
Condition MacroAssembler::IsObjectStringType(Register heap_object,
Register map,
Register instance_type) {
- movq(map, FieldOperand(heap_object, HeapObject::kMapOffset));
+ movp(map, FieldOperand(heap_object, HeapObject::kMapOffset));
movzxbl(instance_type, FieldOperand(map, Map::kInstanceTypeOffset));
STATIC_ASSERT(kNotStringTag != 0);
testb(instance_type, Immediate(kIsNotStringMask));
@@ -3362,7 +3734,7 @@ Condition MacroAssembler::IsObjectStringType(Register heap_object,
Condition MacroAssembler::IsObjectNameType(Register heap_object,
Register map,
Register instance_type) {
- movq(map, FieldOperand(heap_object, HeapObject::kMapOffset));
+ movp(map, FieldOperand(heap_object, HeapObject::kMapOffset));
movzxbl(instance_type, FieldOperand(map, Map::kInstanceTypeOffset));
cmpb(instance_type, Immediate(static_cast<uint8_t>(LAST_NAME_TYPE)));
return below_equal;
@@ -3382,13 +3754,13 @@ void MacroAssembler::TryGetFunctionPrototype(Register function,
j(not_equal, miss);
if (miss_on_bound_function) {
- movq(kScratchRegister,
+ movp(kScratchRegister,
FieldOperand(function, JSFunction::kSharedFunctionInfoOffset));
// It's not smi-tagged (stored in the top half of a smi-tagged 8-byte
// field).
- TestBit(FieldOperand(kScratchRegister,
- SharedFunctionInfo::kCompilerHintsOffset),
- SharedFunctionInfo::kBoundFunction);
+ TestBitSharedFunctionInfoSpecialField(kScratchRegister,
+ SharedFunctionInfo::kCompilerHintsOffset,
+ SharedFunctionInfo::kBoundFunction);
j(not_zero, miss);
}
@@ -3399,7 +3771,7 @@ void MacroAssembler::TryGetFunctionPrototype(Register function,
j(not_zero, &non_instance, Label::kNear);
// Get the prototype or initial map from the function.
- movq(result,
+ movp(result,
FieldOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
// If the prototype or initial map is the hole, don't return it and
@@ -3414,13 +3786,13 @@ void MacroAssembler::TryGetFunctionPrototype(Register function,
j(not_equal, &done, Label::kNear);
// Get the prototype from the initial map.
- movq(result, FieldOperand(result, Map::kPrototypeOffset));
+ movp(result, FieldOperand(result, Map::kPrototypeOffset));
jmp(&done, Label::kNear);
// Non-instance prototype: Fetch prototype from constructor field
// in initial map.
bind(&non_instance);
- movq(result, FieldOperand(result, Map::kConstructorOffset));
+ movp(result, FieldOperand(result, Map::kConstructorOffset));
// All done.
bind(&done);
@@ -3461,28 +3833,12 @@ void MacroAssembler::DecrementCounter(StatsCounter* counter, int value) {
}
-#ifdef ENABLE_DEBUGGER_SUPPORT
void MacroAssembler::DebugBreak() {
Set(rax, 0); // No arguments.
LoadAddress(rbx, ExternalReference(Runtime::kDebugBreak, isolate()));
- CEntryStub ces(1);
+ CEntryStub ces(isolate(), 1);
ASSERT(AllowThisStubCall(&ces));
- Call(ces.GetCode(isolate()), RelocInfo::DEBUG_BREAK);
-}
-#endif // ENABLE_DEBUGGER_SUPPORT
-
-
-void MacroAssembler::SetCallKind(Register dst, CallKind call_kind) {
- // This macro takes the dst register to make the code more readable
- // at the call sites. However, the dst register has to be rcx to
- // follow the calling convention which requires the call type to be
- // in rcx.
- ASSERT(dst.is(rcx));
- if (call_kind == CALL_AS_FUNCTION) {
- LoadSmiConstant(dst, Smi::FromInt(1));
- } else {
- LoadSmiConstant(dst, Smi::FromInt(0));
- }
+ Call(ces.GetCode(), RelocInfo::DEBUG_BREAK);
}
@@ -3490,8 +3846,7 @@ void MacroAssembler::InvokeCode(Register code,
const ParameterCount& expected,
const ParameterCount& actual,
InvokeFlag flag,
- const CallWrapper& call_wrapper,
- CallKind call_kind) {
+ const CallWrapper& call_wrapper) {
// You can't call a function without a valid frame.
ASSERT(flag == JUMP_FUNCTION || has_frame());
@@ -3505,17 +3860,14 @@ void MacroAssembler::InvokeCode(Register code,
&definitely_mismatches,
flag,
Label::kNear,
- call_wrapper,
- call_kind);
+ call_wrapper);
if (!definitely_mismatches) {
if (flag == CALL_FUNCTION) {
call_wrapper.BeforeCall(CallSize(code));
- SetCallKind(rcx, call_kind);
call(code);
call_wrapper.AfterCall();
} else {
ASSERT(flag == JUMP_FUNCTION);
- SetCallKind(rcx, call_kind);
jmp(code);
}
bind(&done);
@@ -3523,64 +3875,24 @@ void MacroAssembler::InvokeCode(Register code,
}
-void MacroAssembler::InvokeCode(Handle<Code> code,
- const ParameterCount& expected,
- const ParameterCount& actual,
- RelocInfo::Mode rmode,
- InvokeFlag flag,
- const CallWrapper& call_wrapper,
- CallKind call_kind) {
- // You can't call a function without a valid frame.
- ASSERT(flag == JUMP_FUNCTION || has_frame());
-
- Label done;
- bool definitely_mismatches = false;
- Register dummy = rax;
- InvokePrologue(expected,
- actual,
- code,
- dummy,
- &done,
- &definitely_mismatches,
- flag,
- Label::kNear,
- call_wrapper,
- call_kind);
- if (!definitely_mismatches) {
- if (flag == CALL_FUNCTION) {
- call_wrapper.BeforeCall(CallSize(code));
- SetCallKind(rcx, call_kind);
- Call(code, rmode);
- call_wrapper.AfterCall();
- } else {
- ASSERT(flag == JUMP_FUNCTION);
- SetCallKind(rcx, call_kind);
- Jump(code, rmode);
- }
- bind(&done);
- }
-}
-
-
void MacroAssembler::InvokeFunction(Register function,
const ParameterCount& actual,
InvokeFlag flag,
- const CallWrapper& call_wrapper,
- CallKind call_kind) {
+ const CallWrapper& call_wrapper) {
// You can't call a function without a valid frame.
ASSERT(flag == JUMP_FUNCTION || has_frame());
ASSERT(function.is(rdi));
- movq(rdx, FieldOperand(function, JSFunction::kSharedFunctionInfoOffset));
- movq(rsi, FieldOperand(function, JSFunction::kContextOffset));
- movsxlq(rbx,
- FieldOperand(rdx, SharedFunctionInfo::kFormalParameterCountOffset));
+ movp(rdx, FieldOperand(function, JSFunction::kSharedFunctionInfoOffset));
+ movp(rsi, FieldOperand(function, JSFunction::kContextOffset));
+ LoadSharedFunctionInfoSpecialField(rbx, rdx,
+ SharedFunctionInfo::kFormalParameterCountOffset);
// Advances rdx to the end of the Code object header, to the start of
// the executable code.
- movq(rdx, FieldOperand(rdi, JSFunction::kCodeEntryOffset));
+ movp(rdx, FieldOperand(rdi, JSFunction::kCodeEntryOffset));
ParameterCount expected(rbx);
- InvokeCode(rdx, expected, actual, flag, call_wrapper, call_kind);
+ InvokeCode(rdx, expected, actual, flag, call_wrapper);
}
@@ -3588,18 +3900,17 @@ void MacroAssembler::InvokeFunction(Register function,
const ParameterCount& expected,
const ParameterCount& actual,
InvokeFlag flag,
- const CallWrapper& call_wrapper,
- CallKind call_kind) {
+ const CallWrapper& call_wrapper) {
// You can't call a function without a valid frame.
ASSERT(flag == JUMP_FUNCTION || has_frame());
ASSERT(function.is(rdi));
- movq(rsi, FieldOperand(function, JSFunction::kContextOffset));
+ movp(rsi, FieldOperand(function, JSFunction::kContextOffset));
// Advances rdx to the end of the Code object header, to the start of
// the executable code.
- movq(rdx, FieldOperand(rdi, JSFunction::kCodeEntryOffset));
+ movp(rdx, FieldOperand(rdi, JSFunction::kCodeEntryOffset));
- InvokeCode(rdx, expected, actual, flag, call_wrapper, call_kind);
+ InvokeCode(rdx, expected, actual, flag, call_wrapper);
}
@@ -3607,10 +3918,9 @@ void MacroAssembler::InvokeFunction(Handle<JSFunction> function,
const ParameterCount& expected,
const ParameterCount& actual,
InvokeFlag flag,
- const CallWrapper& call_wrapper,
- CallKind call_kind) {
+ const CallWrapper& call_wrapper) {
Move(rdi, function);
- InvokeFunction(rdi, expected, actual, flag, call_wrapper, call_kind);
+ InvokeFunction(rdi, expected, actual, flag, call_wrapper);
}
@@ -3622,8 +3932,7 @@ void MacroAssembler::InvokePrologue(const ParameterCount& expected,
bool* definitely_mismatches,
InvokeFlag flag,
Label::Distance near_jump,
- const CallWrapper& call_wrapper,
- CallKind call_kind) {
+ const CallWrapper& call_wrapper) {
bool definitely_matches = false;
*definitely_mismatches = false;
Label invoke;
@@ -3650,14 +3959,14 @@ void MacroAssembler::InvokePrologue(const ParameterCount& expected,
// Expected is in register, actual is immediate. This is the
// case when we invoke function values without going through the
// IC mechanism.
- cmpq(expected.reg(), Immediate(actual.immediate()));
+ cmpp(expected.reg(), Immediate(actual.immediate()));
j(equal, &invoke, Label::kNear);
ASSERT(expected.reg().is(rbx));
Set(rax, actual.immediate());
} else if (!expected.reg().is(actual.reg())) {
// Both expected and actual are in (different) registers. This
// is the case when we invoke functions using call and apply.
- cmpq(expected.reg(), actual.reg());
+ cmpp(expected.reg(), actual.reg());
j(equal, &invoke, Label::kNear);
ASSERT(actual.reg().is(rax));
ASSERT(expected.reg().is(rbx));
@@ -3667,22 +3976,20 @@ void MacroAssembler::InvokePrologue(const ParameterCount& expected,
if (!definitely_matches) {
Handle<Code> adaptor = isolate()->builtins()->ArgumentsAdaptorTrampoline();
if (!code_constant.is_null()) {
- movq(rdx, code_constant, RelocInfo::EMBEDDED_OBJECT);
- addq(rdx, Immediate(Code::kHeaderSize - kHeapObjectTag));
+ Move(rdx, code_constant, RelocInfo::EMBEDDED_OBJECT);
+ addp(rdx, Immediate(Code::kHeaderSize - kHeapObjectTag));
} else if (!code_register.is(rdx)) {
- movq(rdx, code_register);
+ movp(rdx, code_register);
}
if (flag == CALL_FUNCTION) {
call_wrapper.BeforeCall(CallSize(adaptor));
- SetCallKind(rcx, call_kind);
Call(adaptor, RelocInfo::CODE_TARGET);
call_wrapper.AfterCall();
if (!*definitely_mismatches) {
jmp(done, near_jump);
}
} else {
- SetCallKind(rcx, call_kind);
Jump(adaptor, RelocInfo::CODE_TARGET);
}
bind(&invoke);
@@ -3690,42 +3997,43 @@ void MacroAssembler::InvokePrologue(const ParameterCount& expected,
}
-void MacroAssembler::Prologue(PrologueFrameMode frame_mode) {
- if (frame_mode == BUILD_STUB_FRAME) {
- push(rbp); // Caller's frame pointer.
- movq(rbp, rsp);
- push(rsi); // Callee's context.
+void MacroAssembler::StubPrologue() {
+ pushq(rbp); // Caller's frame pointer.
+ movp(rbp, rsp);
+ Push(rsi); // Callee's context.
Push(Smi::FromInt(StackFrame::STUB));
+}
+
+
+void MacroAssembler::Prologue(bool code_pre_aging) {
+ PredictableCodeSizeScope predictible_code_size_scope(this,
+ kNoCodeAgeSequenceLength);
+ if (code_pre_aging) {
+ // Pre-age the code.
+ Call(isolate()->builtins()->MarkCodeAsExecutedOnce(),
+ RelocInfo::CODE_AGE_SEQUENCE);
+ Nop(kNoCodeAgeSequenceLength - Assembler::kShortCallInstructionLength);
} else {
- PredictableCodeSizeScope predictible_code_size_scope(this,
- kNoCodeAgeSequenceLength);
- if (isolate()->IsCodePreAgingActive()) {
- // Pre-age the code.
- Call(isolate()->builtins()->MarkCodeAsExecutedOnce(),
- RelocInfo::CODE_AGE_SEQUENCE);
- Nop(kNoCodeAgeSequenceLength - Assembler::kShortCallInstructionLength);
- } else {
- push(rbp); // Caller's frame pointer.
- movq(rbp, rsp);
- push(rsi); // Callee's context.
- push(rdi); // Callee's JS function.
- }
+ pushq(rbp); // Caller's frame pointer.
+ movp(rbp, rsp);
+ Push(rsi); // Callee's context.
+ Push(rdi); // Callee's JS function.
}
}
void MacroAssembler::EnterFrame(StackFrame::Type type) {
- push(rbp);
- movq(rbp, rsp);
- push(rsi); // Context.
+ pushq(rbp);
+ movp(rbp, rsp);
+ Push(rsi); // Context.
Push(Smi::FromInt(type));
- movq(kScratchRegister, CodeObject(), RelocInfo::EMBEDDED_OBJECT);
- push(kScratchRegister);
+ Move(kScratchRegister, CodeObject(), RelocInfo::EMBEDDED_OBJECT);
+ Push(kScratchRegister);
if (emit_debug_code()) {
- movq(kScratchRegister,
+ Move(kScratchRegister,
isolate()->factory()->undefined_value(),
RelocInfo::EMBEDDED_OBJECT);
- cmpq(Operand(rsp, 0), kScratchRegister);
+ cmpp(Operand(rsp, 0), kScratchRegister);
Check(not_equal, kCodeObjectNotProperlyPatched);
}
}
@@ -3734,11 +4042,11 @@ void MacroAssembler::EnterFrame(StackFrame::Type type) {
void MacroAssembler::LeaveFrame(StackFrame::Type type) {
if (emit_debug_code()) {
Move(kScratchRegister, Smi::FromInt(type));
- cmpq(Operand(rbp, StandardFrameConstants::kMarkerOffset), kScratchRegister);
+ cmpp(Operand(rbp, StandardFrameConstants::kMarkerOffset), kScratchRegister);
Check(equal, kStackFrameTypesMustMatch);
}
- movq(rsp, rbp);
- pop(rbp);
+ movp(rsp, rbp);
+ popq(rbp);
}
@@ -3749,18 +4057,18 @@ void MacroAssembler::EnterExitFramePrologue(bool save_rax) {
kFPOnStackSize + kPCOnStackSize);
ASSERT(ExitFrameConstants::kCallerPCOffset == kFPOnStackSize);
ASSERT(ExitFrameConstants::kCallerFPOffset == 0 * kPointerSize);
- push(rbp);
- movq(rbp, rsp);
+ pushq(rbp);
+ movp(rbp, rsp);
// Reserve room for entry stack pointer and push the code object.
ASSERT(ExitFrameConstants::kSPOffset == -1 * kPointerSize);
- push(Immediate(0)); // Saved entry sp, patched before call.
- movq(kScratchRegister, CodeObject(), RelocInfo::EMBEDDED_OBJECT);
- push(kScratchRegister); // Accessed from EditFrame::code_slot.
+ Push(Immediate(0)); // Saved entry sp, patched before call.
+ Move(kScratchRegister, CodeObject(), RelocInfo::EMBEDDED_OBJECT);
+ Push(kScratchRegister); // Accessed from EditFrame::code_slot.
// Save the frame pointer and the context in top.
if (save_rax) {
- movq(r14, rax); // Backup rax in callee-save register.
+ movp(r14, rax); // Backup rax in callee-save register.
}
Store(ExternalReference(Isolate::kCEntryFPAddress, isolate()), rbp);
@@ -3777,15 +4085,15 @@ void MacroAssembler::EnterExitFrameEpilogue(int arg_stack_space,
// Optionally save all XMM registers.
if (save_doubles) {
int space = XMMRegister::kMaxNumAllocatableRegisters * kDoubleSize +
- arg_stack_space * kPointerSize;
- subq(rsp, Immediate(space));
+ arg_stack_space * kRegisterSize;
+ subp(rsp, Immediate(space));
int offset = -2 * kPointerSize;
for (int i = 0; i < XMMRegister::NumAllocatableRegisters(); i++) {
XMMRegister reg = XMMRegister::FromAllocationIndex(i);
movsd(Operand(rbp, offset - ((i + 1) * kDoubleSize)), reg);
}
} else if (arg_stack_space > 0) {
- subq(rsp, Immediate(arg_stack_space * kPointerSize));
+ subp(rsp, Immediate(arg_stack_space * kRegisterSize));
}
// Get the required frame alignment for the OS.
@@ -3793,11 +4101,11 @@ void MacroAssembler::EnterExitFrameEpilogue(int arg_stack_space,
if (kFrameAlignment > 0) {
ASSERT(IsPowerOf2(kFrameAlignment));
ASSERT(is_int8(kFrameAlignment));
- and_(rsp, Immediate(-kFrameAlignment));
+ andp(rsp, Immediate(-kFrameAlignment));
}
// Patch the saved entry sp.
- movq(Operand(rbp, ExitFrameConstants::kSPOffset), rsp);
+ movp(Operand(rbp, ExitFrameConstants::kSPOffset), rsp);
}
@@ -3807,7 +4115,7 @@ void MacroAssembler::EnterExitFrame(int arg_stack_space, bool save_doubles) {
// Set up argv in callee-saved register r15. It is reused in LeaveExitFrame,
// so it must be retained across the C-call.
int offset = StandardFrameConstants::kCallerSPOffset - kPointerSize;
- lea(r15, Operand(rbp, r14, times_pointer_size, offset));
+ leap(r15, Operand(rbp, r14, times_pointer_size, offset));
EnterExitFrameEpilogue(arg_stack_space, save_doubles);
}
@@ -3830,12 +4138,12 @@ void MacroAssembler::LeaveExitFrame(bool save_doubles) {
}
}
// Get the return address from the stack and restore the frame pointer.
- movq(rcx, Operand(rbp, 1 * kPointerSize));
- movq(rbp, Operand(rbp, 0 * kPointerSize));
+ movp(rcx, Operand(rbp, kFPOnStackSize));
+ movp(rbp, Operand(rbp, 0 * kPointerSize));
// Drop everything up to and including the arguments and the receiver
// from the caller stack.
- lea(rsp, Operand(r15, 1 * kPointerSize));
+ leap(rsp, Operand(r15, 1 * kPointerSize));
PushReturnAddressFrom(rcx);
@@ -3844,8 +4152,8 @@ void MacroAssembler::LeaveExitFrame(bool save_doubles) {
void MacroAssembler::LeaveApiExitFrame(bool restore_context) {
- movq(rsp, rbp);
- pop(rbp);
+ movp(rsp, rbp);
+ popq(rbp);
LeaveExitFrameEpilogue(restore_context);
}
@@ -3856,17 +4164,17 @@ void MacroAssembler::LeaveExitFrameEpilogue(bool restore_context) {
ExternalReference context_address(Isolate::kContextAddress, isolate());
Operand context_operand = ExternalOperand(context_address);
if (restore_context) {
- movq(rsi, context_operand);
+ movp(rsi, context_operand);
}
#ifdef DEBUG
- movq(context_operand, Immediate(0));
+ movp(context_operand, Immediate(0));
#endif
// Clear the top frame.
ExternalReference c_entry_fp_address(Isolate::kCEntryFPAddress,
isolate());
Operand c_entry_fp_operand = ExternalOperand(c_entry_fp_address);
- movq(c_entry_fp_operand, Immediate(0));
+ movp(c_entry_fp_operand, Immediate(0));
}
@@ -3878,18 +4186,18 @@ void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
ASSERT(!holder_reg.is(scratch));
ASSERT(!scratch.is(kScratchRegister));
// Load current lexical context from the stack frame.
- movq(scratch, Operand(rbp, StandardFrameConstants::kContextOffset));
+ movp(scratch, Operand(rbp, StandardFrameConstants::kContextOffset));
// When generating debug code, make sure the lexical context is set.
if (emit_debug_code()) {
- cmpq(scratch, Immediate(0));
+ cmpp(scratch, Immediate(0));
Check(not_equal, kWeShouldNotHaveAnEmptyLexicalContext);
}
// Load the native context of the current context.
int offset =
Context::kHeaderSize + Context::GLOBAL_OBJECT_INDEX * kPointerSize;
- movq(scratch, FieldOperand(scratch, offset));
- movq(scratch, FieldOperand(scratch, GlobalObject::kNativeContextOffset));
+ movp(scratch, FieldOperand(scratch, offset));
+ movp(scratch, FieldOperand(scratch, GlobalObject::kNativeContextOffset));
// Check the context is a native context.
if (emit_debug_code()) {
@@ -3899,7 +4207,7 @@ void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
}
// Check if both contexts are the same.
- cmpq(scratch, FieldOperand(holder_reg, JSGlobalProxy::kNativeContextOffset));
+ cmpp(scratch, FieldOperand(holder_reg, JSGlobalProxy::kNativeContextOffset));
j(equal, &same_contexts);
// Compare security tokens.
@@ -3910,25 +4218,25 @@ void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
// Check the context is a native context.
if (emit_debug_code()) {
// Preserve original value of holder_reg.
- push(holder_reg);
- movq(holder_reg,
+ Push(holder_reg);
+ movp(holder_reg,
FieldOperand(holder_reg, JSGlobalProxy::kNativeContextOffset));
CompareRoot(holder_reg, Heap::kNullValueRootIndex);
Check(not_equal, kJSGlobalProxyContextShouldNotBeNull);
// Read the first word and compare to native_context_map(),
- movq(holder_reg, FieldOperand(holder_reg, HeapObject::kMapOffset));
+ movp(holder_reg, FieldOperand(holder_reg, HeapObject::kMapOffset));
CompareRoot(holder_reg, Heap::kNativeContextMapRootIndex);
Check(equal, kJSGlobalObjectNativeContextShouldBeANativeContext);
- pop(holder_reg);
+ Pop(holder_reg);
}
- movq(kScratchRegister,
+ movp(kScratchRegister,
FieldOperand(holder_reg, JSGlobalProxy::kNativeContextOffset));
int token_offset =
Context::kHeaderSize + Context::SECURITY_TOKEN_INDEX * kPointerSize;
- movq(scratch, FieldOperand(scratch, token_offset));
- cmpq(scratch, FieldOperand(kScratchRegister, token_offset));
+ movp(scratch, FieldOperand(scratch, token_offset));
+ cmpp(scratch, FieldOperand(kScratchRegister, token_offset));
j(not_equal, miss);
bind(&same_contexts);
@@ -4014,19 +4322,19 @@ void MacroAssembler::LoadFromNumberDictionary(Label* miss,
// Generate an unrolled loop that performs a few probes before giving up.
for (int i = 0; i < kNumberDictionaryProbes; i++) {
// Use r2 for index calculations and keep the hash intact in r0.
- movq(r2, r0);
+ movp(r2, r0);
// Compute the masked index: (hash + i + i * i) & mask.
if (i > 0) {
addl(r2, Immediate(SeededNumberDictionary::GetProbeOffset(i)));
}
- and_(r2, r1);
+ andp(r2, r1);
// Scale the index by multiplying by the entry size.
ASSERT(SeededNumberDictionary::kEntrySize == 3);
- lea(r2, Operand(r2, r2, times_2, 0)); // r2 = r2 * 3
+ leap(r2, Operand(r2, r2, times_2, 0)); // r2 = r2 * 3
// Check if the key matches.
- cmpq(key, FieldOperand(elements,
+ cmpp(key, FieldOperand(elements,
r2,
times_pointer_size,
SeededNumberDictionary::kElementsStartOffset));
@@ -4049,7 +4357,7 @@ void MacroAssembler::LoadFromNumberDictionary(Label* miss,
// Get the value at the masked, scaled index.
const int kValueOffset =
SeededNumberDictionary::kElementsStartOffset + kPointerSize;
- movq(result, FieldOperand(elements, r2, times_pointer_size, kValueOffset));
+ movp(result, FieldOperand(elements, r2, times_pointer_size, kValueOffset));
}
@@ -4066,7 +4374,7 @@ void MacroAssembler::LoadAllocationTopHelper(Register result,
#ifdef DEBUG
// Assert that result actually contains top on entry.
Operand top_operand = ExternalOperand(allocation_top);
- cmpq(result, top_operand);
+ cmpp(result, top_operand);
Check(equal, kUnexpectedAllocationTop);
#endif
return;
@@ -4076,18 +4384,53 @@ void MacroAssembler::LoadAllocationTopHelper(Register result,
// and keep address in scratch until call to UpdateAllocationTopHelper.
if (scratch.is_valid()) {
LoadAddress(scratch, allocation_top);
- movq(result, Operand(scratch, 0));
+ movp(result, Operand(scratch, 0));
} else {
Load(result, allocation_top);
}
}
+void MacroAssembler::MakeSureDoubleAlignedHelper(Register result,
+ Register scratch,
+ Label* gc_required,
+ AllocationFlags flags) {
+ if (kPointerSize == kDoubleSize) {
+ if (FLAG_debug_code) {
+ testl(result, Immediate(kDoubleAlignmentMask));
+ Check(zero, kAllocationIsNotDoubleAligned);
+ }
+ } else {
+ // Align the next allocation. Storing the filler map without checking top
+ // is safe in new-space because the limit of the heap is aligned there.
+ ASSERT(kPointerSize * 2 == kDoubleSize);
+ ASSERT((flags & PRETENURE_OLD_POINTER_SPACE) == 0);
+ ASSERT(kPointerAlignment * 2 == kDoubleAlignment);
+ // Make sure scratch is not clobbered by this function as it might be
+ // used in UpdateAllocationTopHelper later.
+ ASSERT(!scratch.is(kScratchRegister));
+ Label aligned;
+ testl(result, Immediate(kDoubleAlignmentMask));
+ j(zero, &aligned, Label::kNear);
+ if ((flags & PRETENURE_OLD_DATA_SPACE) != 0) {
+ ExternalReference allocation_limit =
+ AllocationUtils::GetAllocationLimitReference(isolate(), flags);
+ cmpp(result, ExternalOperand(allocation_limit));
+ j(above_equal, gc_required);
+ }
+ LoadRoot(kScratchRegister, Heap::kOnePointerFillerMapRootIndex);
+ movp(Operand(result, 0), kScratchRegister);
+ addp(result, Immediate(kDoubleSize / 2));
+ bind(&aligned);
+ }
+}
+
+
void MacroAssembler::UpdateAllocationTopHelper(Register result_end,
Register scratch,
AllocationFlags flags) {
if (emit_debug_code()) {
- testq(result_end, Immediate(kObjectAlignmentMask));
+ testp(result_end, Immediate(kObjectAlignmentMask));
Check(zero, kUnalignedAllocationInNewSpace);
}
@@ -4097,7 +4440,7 @@ void MacroAssembler::UpdateAllocationTopHelper(Register result_end,
// Update new top.
if (scratch.is_valid()) {
// Scratch already contains address of allocation top.
- movq(Operand(scratch, 0), result_end);
+ movp(Operand(scratch, 0), result_end);
} else {
Store(allocation_top, result_end);
}
@@ -4111,7 +4454,7 @@ void MacroAssembler::Allocate(int object_size,
Label* gc_required,
AllocationFlags flags) {
ASSERT((flags & (RESULT_CONTAINS_TOP | SIZE_IN_WORDS)) == 0);
- ASSERT(object_size <= Page::kMaxNonCodeHeapObjectSize);
+ ASSERT(object_size <= Page::kMaxRegularHeapObjectSize);
if (!FLAG_inline_new) {
if (emit_debug_code()) {
// Trash the registers to simulate an allocation failure.
@@ -4131,11 +4474,8 @@ void MacroAssembler::Allocate(int object_size,
// Load address of new object into result.
LoadAllocationTopHelper(result, scratch, flags);
- // Align the next allocation. Storing the filler map without checking top is
- // safe in new-space because the limit of the heap is aligned there.
- if (((flags & DOUBLE_ALIGNMENT) != 0) && FLAG_debug_code) {
- testq(result, Immediate(kDoubleAlignmentMask));
- Check(zero, kAllocationIsNotDoubleAligned);
+ if ((flags & DOUBLE_ALIGNMENT) != 0) {
+ MakeSureDoubleAlignedHelper(result, scratch, gc_required, flags);
}
// Calculate new top and bail out if new space is exhausted.
@@ -4145,12 +4485,12 @@ void MacroAssembler::Allocate(int object_size,
Register top_reg = result_end.is_valid() ? result_end : result;
if (!top_reg.is(result)) {
- movq(top_reg, result);
+ movp(top_reg, result);
}
- addq(top_reg, Immediate(object_size));
+ addp(top_reg, Immediate(object_size));
j(carry, gc_required);
Operand limit_operand = ExternalOperand(allocation_limit);
- cmpq(top_reg, limit_operand);
+ cmpp(top_reg, limit_operand);
j(above, gc_required);
// Update allocation top.
@@ -4159,14 +4499,14 @@ void MacroAssembler::Allocate(int object_size,
bool tag_result = (flags & TAG_OBJECT) != 0;
if (top_reg.is(result)) {
if (tag_result) {
- subq(result, Immediate(object_size - kHeapObjectTag));
+ subp(result, Immediate(object_size - kHeapObjectTag));
} else {
- subq(result, Immediate(object_size));
+ subp(result, Immediate(object_size));
}
} else if (tag_result) {
// Tag the result if requested.
ASSERT(kHeapObjectTag == 1);
- incq(result);
+ incp(result);
}
}
@@ -4180,7 +4520,7 @@ void MacroAssembler::Allocate(int header_size,
Label* gc_required,
AllocationFlags flags) {
ASSERT((flags & SIZE_IN_WORDS) == 0);
- lea(result_end, Operand(element_count, element_size, header_size));
+ leap(result_end, Operand(element_count, element_size, header_size));
Allocate(result_end, result, result_end, scratch, gc_required, flags);
}
@@ -4210,23 +4550,20 @@ void MacroAssembler::Allocate(Register object_size,
// Load address of new object into result.
LoadAllocationTopHelper(result, scratch, flags);
- // Align the next allocation. Storing the filler map without checking top is
- // safe in new-space because the limit of the heap is aligned there.
- if (((flags & DOUBLE_ALIGNMENT) != 0) && FLAG_debug_code) {
- testq(result, Immediate(kDoubleAlignmentMask));
- Check(zero, kAllocationIsNotDoubleAligned);
+ if ((flags & DOUBLE_ALIGNMENT) != 0) {
+ MakeSureDoubleAlignedHelper(result, scratch, gc_required, flags);
}
// Calculate new top and bail out if new space is exhausted.
ExternalReference allocation_limit =
AllocationUtils::GetAllocationLimitReference(isolate(), flags);
if (!object_size.is(result_end)) {
- movq(result_end, object_size);
+ movp(result_end, object_size);
}
- addq(result_end, result);
+ addp(result_end, result);
j(carry, gc_required);
Operand limit_operand = ExternalOperand(allocation_limit);
- cmpq(result_end, limit_operand);
+ cmpp(result_end, limit_operand);
j(above, gc_required);
// Update allocation top.
@@ -4234,7 +4571,7 @@ void MacroAssembler::Allocate(Register object_size,
// Tag the result if requested.
if ((flags & TAG_OBJECT) != 0) {
- addq(result, Immediate(kHeapObjectTag));
+ addp(result, Immediate(kHeapObjectTag));
}
}
@@ -4244,13 +4581,13 @@ void MacroAssembler::UndoAllocationInNewSpace(Register object) {
ExternalReference::new_space_allocation_top_address(isolate());
// Make sure the object has no tag before resetting top.
- and_(object, Immediate(~kHeapObjectTagMask));
+ andp(object, Immediate(~kHeapObjectTagMask));
Operand top_operand = ExternalOperand(new_space_allocation_top);
#ifdef DEBUG
- cmpq(object, top_operand);
+ cmpp(object, top_operand);
Check(below, kUndoAllocationOfNonAllocatedMemory);
#endif
- movq(top_operand, object);
+ movp(top_operand, object);
}
@@ -4262,7 +4599,7 @@ void MacroAssembler::AllocateHeapNumber(Register result,
// Set the map.
LoadRoot(kScratchRegister, Heap::kHeapNumberMapRootIndex);
- movq(FieldOperand(result, HeapObject::kMapOffset), kScratchRegister);
+ movp(FieldOperand(result, HeapObject::kMapOffset), kScratchRegister);
}
@@ -4278,11 +4615,11 @@ void MacroAssembler::AllocateTwoByteString(Register result,
kObjectAlignmentMask;
ASSERT(kShortSize == 2);
// scratch1 = length * 2 + kObjectAlignmentMask.
- lea(scratch1, Operand(length, length, times_1, kObjectAlignmentMask +
+ leap(scratch1, Operand(length, length, times_1, kObjectAlignmentMask +
kHeaderAlignment));
- and_(scratch1, Immediate(~kObjectAlignmentMask));
+ andp(scratch1, Immediate(~kObjectAlignmentMask));
if (kHeaderAlignment > 0) {
- subq(scratch1, Immediate(kHeaderAlignment));
+ subp(scratch1, Immediate(kHeaderAlignment));
}
// Allocate two byte string in new space.
@@ -4297,10 +4634,10 @@ void MacroAssembler::AllocateTwoByteString(Register result,
// Set the map, length and hash field.
LoadRoot(kScratchRegister, Heap::kStringMapRootIndex);
- movq(FieldOperand(result, HeapObject::kMapOffset), kScratchRegister);
+ movp(FieldOperand(result, HeapObject::kMapOffset), kScratchRegister);
Integer32ToSmi(scratch1, length);
- movq(FieldOperand(result, String::kLengthOffset), scratch1);
- movq(FieldOperand(result, String::kHashFieldOffset),
+ movp(FieldOperand(result, String::kLengthOffset), scratch1);
+ movp(FieldOperand(result, String::kHashFieldOffset),
Immediate(String::kEmptyHashField));
}
@@ -4317,10 +4654,10 @@ void MacroAssembler::AllocateAsciiString(Register result,
kObjectAlignmentMask;
movl(scratch1, length);
ASSERT(kCharSize == 1);
- addq(scratch1, Immediate(kObjectAlignmentMask + kHeaderAlignment));
- and_(scratch1, Immediate(~kObjectAlignmentMask));
+ addp(scratch1, Immediate(kObjectAlignmentMask + kHeaderAlignment));
+ andp(scratch1, Immediate(~kObjectAlignmentMask));
if (kHeaderAlignment > 0) {
- subq(scratch1, Immediate(kHeaderAlignment));
+ subp(scratch1, Immediate(kHeaderAlignment));
}
// Allocate ASCII string in new space.
@@ -4335,10 +4672,10 @@ void MacroAssembler::AllocateAsciiString(Register result,
// Set the map, length and hash field.
LoadRoot(kScratchRegister, Heap::kAsciiStringMapRootIndex);
- movq(FieldOperand(result, HeapObject::kMapOffset), kScratchRegister);
+ movp(FieldOperand(result, HeapObject::kMapOffset), kScratchRegister);
Integer32ToSmi(scratch1, length);
- movq(FieldOperand(result, String::kLengthOffset), scratch1);
- movq(FieldOperand(result, String::kHashFieldOffset),
+ movp(FieldOperand(result, String::kLengthOffset), scratch1);
+ movp(FieldOperand(result, String::kHashFieldOffset),
Immediate(String::kEmptyHashField));
}
@@ -4353,7 +4690,7 @@ void MacroAssembler::AllocateTwoByteConsString(Register result,
// Set the map. The other fields are left uninitialized.
LoadRoot(kScratchRegister, Heap::kConsStringMapRootIndex);
- movq(FieldOperand(result, HeapObject::kMapOffset), kScratchRegister);
+ movp(FieldOperand(result, HeapObject::kMapOffset), kScratchRegister);
}
@@ -4361,37 +4698,16 @@ void MacroAssembler::AllocateAsciiConsString(Register result,
Register scratch1,
Register scratch2,
Label* gc_required) {
- Label allocate_new_space, install_map;
- AllocationFlags flags = TAG_OBJECT;
-
- ExternalReference high_promotion_mode = ExternalReference::
- new_space_high_promotion_mode_active_address(isolate());
-
- Load(scratch1, high_promotion_mode);
- testb(scratch1, Immediate(1));
- j(zero, &allocate_new_space);
Allocate(ConsString::kSize,
result,
scratch1,
scratch2,
gc_required,
- static_cast<AllocationFlags>(flags | PRETENURE_OLD_POINTER_SPACE));
-
- jmp(&install_map);
-
- bind(&allocate_new_space);
- Allocate(ConsString::kSize,
- result,
- scratch1,
- scratch2,
- gc_required,
- flags);
-
- bind(&install_map);
+ TAG_OBJECT);
// Set the map. The other fields are left uninitialized.
LoadRoot(kScratchRegister, Heap::kConsAsciiStringMapRootIndex);
- movq(FieldOperand(result, HeapObject::kMapOffset), kScratchRegister);
+ movp(FieldOperand(result, HeapObject::kMapOffset), kScratchRegister);
}
@@ -4405,7 +4721,7 @@ void MacroAssembler::AllocateTwoByteSlicedString(Register result,
// Set the map. The other fields are left uninitialized.
LoadRoot(kScratchRegister, Heap::kSlicedStringMapRootIndex);
- movq(FieldOperand(result, HeapObject::kMapOffset), kScratchRegister);
+ movp(FieldOperand(result, HeapObject::kMapOffset), kScratchRegister);
}
@@ -4419,7 +4735,7 @@ void MacroAssembler::AllocateAsciiSlicedString(Register result,
// Set the map. The other fields are left uninitialized.
LoadRoot(kScratchRegister, Heap::kSlicedAsciiStringMapRootIndex);
- movq(FieldOperand(result, HeapObject::kMapOffset), kScratchRegister);
+ movp(FieldOperand(result, HeapObject::kMapOffset), kScratchRegister);
}
@@ -4464,30 +4780,30 @@ void MacroAssembler::CopyBytes(Register destination,
// Because source is 8-byte aligned in our uses of this function,
// we keep source aligned for the rep movs operation by copying the odd bytes
// at the end of the ranges.
- movq(scratch, length);
+ movp(scratch, length);
shrl(length, Immediate(kPointerSizeLog2));
- repmovsq();
+ repmovsp();
// Move remaining bytes of length.
andl(scratch, Immediate(kPointerSize - 1));
- movq(length, Operand(source, scratch, times_1, -kPointerSize));
- movq(Operand(destination, scratch, times_1, -kPointerSize), length);
- addq(destination, scratch);
+ movp(length, Operand(source, scratch, times_1, -kPointerSize));
+ movp(Operand(destination, scratch, times_1, -kPointerSize), length);
+ addp(destination, scratch);
if (min_length <= kLongStringLimit) {
jmp(&done, Label::kNear);
bind(&len24);
- movq(scratch, Operand(source, 2 * kPointerSize));
- movq(Operand(destination, 2 * kPointerSize), scratch);
+ movp(scratch, Operand(source, 2 * kPointerSize));
+ movp(Operand(destination, 2 * kPointerSize), scratch);
bind(&len16);
- movq(scratch, Operand(source, kPointerSize));
- movq(Operand(destination, kPointerSize), scratch);
+ movp(scratch, Operand(source, kPointerSize));
+ movp(Operand(destination, kPointerSize), scratch);
bind(&len8);
- movq(scratch, Operand(source, 0));
- movq(Operand(destination, 0), scratch);
+ movp(scratch, Operand(source, 0));
+ movp(Operand(destination, 0), scratch);
// Move remaining bytes of length.
- movq(scratch, Operand(source, length, times_1, -kPointerSize));
- movq(Operand(destination, length, times_1, -kPointerSize), scratch);
- addq(destination, length);
+ movp(scratch, Operand(source, length, times_1, -kPointerSize));
+ movp(Operand(destination, length, times_1, -kPointerSize), scratch);
+ addp(destination, length);
jmp(&done, Label::kNear);
bind(&short_string);
@@ -4499,8 +4815,8 @@ void MacroAssembler::CopyBytes(Register destination,
bind(&short_loop);
movb(scratch, Operand(source, 0));
movb(Operand(destination, 0), scratch);
- incq(source);
- incq(destination);
+ incp(source);
+ incp(destination);
decl(length);
j(not_zero, &short_loop);
}
@@ -4515,10 +4831,10 @@ void MacroAssembler::InitializeFieldsWithFiller(Register start_offset,
Label loop, entry;
jmp(&entry);
bind(&loop);
- movq(Operand(start_offset, 0), filler);
- addq(start_offset, Immediate(kPointerSize));
+ movp(Operand(start_offset, 0), filler);
+ addp(start_offset, Immediate(kPointerSize));
bind(&entry);
- cmpq(start_offset, end_offset);
+ cmpp(start_offset, end_offset);
j(less, &loop);
}
@@ -4526,15 +4842,15 @@ void MacroAssembler::InitializeFieldsWithFiller(Register start_offset,
void MacroAssembler::LoadContext(Register dst, int context_chain_length) {
if (context_chain_length > 0) {
// Move up the chain of contexts to the context containing the slot.
- movq(dst, Operand(rsi, Context::SlotOffset(Context::PREVIOUS_INDEX)));
+ movp(dst, Operand(rsi, Context::SlotOffset(Context::PREVIOUS_INDEX)));
for (int i = 1; i < context_chain_length; i++) {
- movq(dst, Operand(dst, Context::SlotOffset(Context::PREVIOUS_INDEX)));
+ movp(dst, Operand(dst, Context::SlotOffset(Context::PREVIOUS_INDEX)));
}
} else {
// Slot is in the current function context. Move it into the
// destination register in case we store into it (the write barrier
// cannot be allowed to destroy the context in rsi).
- movq(dst, rsi);
+ movp(dst, rsi);
}
// We should not have found a with context by walking the context
@@ -4556,50 +4872,26 @@ void MacroAssembler::LoadTransitionedArrayMapConditional(
Register scratch,
Label* no_map_match) {
// Load the global or builtins object from the current context.
- movq(scratch,
+ movp(scratch,
Operand(rsi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
- movq(scratch, FieldOperand(scratch, GlobalObject::kNativeContextOffset));
+ movp(scratch, FieldOperand(scratch, GlobalObject::kNativeContextOffset));
// Check that the function's map is the same as the expected cached map.
- movq(scratch, Operand(scratch,
+ movp(scratch, Operand(scratch,
Context::SlotOffset(Context::JS_ARRAY_MAPS_INDEX)));
int offset = expected_kind * kPointerSize +
FixedArrayBase::kHeaderSize;
- cmpq(map_in_out, FieldOperand(scratch, offset));
+ cmpp(map_in_out, FieldOperand(scratch, offset));
j(not_equal, no_map_match);
// Use the transitioned cached map.
offset = transitioned_kind * kPointerSize +
FixedArrayBase::kHeaderSize;
- movq(map_in_out, FieldOperand(scratch, offset));
+ movp(map_in_out, FieldOperand(scratch, offset));
}
-void MacroAssembler::LoadInitialArrayMap(
- Register function_in, Register scratch,
- Register map_out, bool can_have_holes) {
- ASSERT(!function_in.is(map_out));
- Label done;
- movq(map_out, FieldOperand(function_in,
- JSFunction::kPrototypeOrInitialMapOffset));
- if (!FLAG_smi_only_arrays) {
- ElementsKind kind = can_have_holes ? FAST_HOLEY_ELEMENTS : FAST_ELEMENTS;
- LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS,
- kind,
- map_out,
- scratch,
- &done);
- } else if (can_have_holes) {
- LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS,
- FAST_HOLEY_SMI_ELEMENTS,
- map_out,
- scratch,
- &done);
- }
- bind(&done);
-}
-
#ifdef _WIN64
static const int kRegisterPassedArguments = 4;
#else
@@ -4608,28 +4900,19 @@ static const int kRegisterPassedArguments = 6;
void MacroAssembler::LoadGlobalFunction(int index, Register function) {
// Load the global or builtins object from the current context.
- movq(function,
+ movp(function,
Operand(rsi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
// Load the native context from the global or builtins object.
- movq(function, FieldOperand(function, GlobalObject::kNativeContextOffset));
+ movp(function, FieldOperand(function, GlobalObject::kNativeContextOffset));
// Load the function from the native context.
- movq(function, Operand(function, Context::SlotOffset(index)));
-}
-
-
-void MacroAssembler::LoadArrayFunction(Register function) {
- movq(function,
- Operand(rsi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
- movq(function, FieldOperand(function, GlobalObject::kGlobalContextOffset));
- movq(function,
- Operand(function, Context::SlotOffset(Context::ARRAY_FUNCTION_INDEX)));
+ movp(function, Operand(function, Context::SlotOffset(index)));
}
void MacroAssembler::LoadGlobalFunctionInitialMap(Register function,
Register map) {
// Load the initial map. The global functions all have initial maps.
- movq(map, FieldOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
+ movp(map, FieldOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
if (emit_debug_code()) {
Label ok, fail;
CheckMap(map, isolate()->factory()->meta_map(), &fail, DO_SMI_CHECK);
@@ -4666,27 +4949,27 @@ void MacroAssembler::EmitSeqStringSetCharCheck(Register string,
uint32_t encoding_mask) {
Label is_object;
JumpIfNotSmi(string, &is_object);
- Throw(kNonObject);
+ Abort(kNonObject);
bind(&is_object);
- push(value);
- movq(value, FieldOperand(string, HeapObject::kMapOffset));
- movzxbq(value, FieldOperand(value, Map::kInstanceTypeOffset));
+ Push(value);
+ movp(value, FieldOperand(string, HeapObject::kMapOffset));
+ movzxbp(value, FieldOperand(value, Map::kInstanceTypeOffset));
andb(value, Immediate(kStringRepresentationMask | kStringEncodingMask));
- cmpq(value, Immediate(encoding_mask));
- pop(value);
- ThrowIf(not_equal, kUnexpectedStringType);
+ cmpp(value, Immediate(encoding_mask));
+ Pop(value);
+ Check(equal, kUnexpectedStringType);
// The index is assumed to be untagged coming in, tag it to compare with the
// string length without using a temp register, it is restored at the end of
// this function.
Integer32ToSmi(index, index);
SmiCompare(index, FieldOperand(string, String::kLengthOffset));
- ThrowIf(greater_equal, kIndexIsTooLarge);
+ Check(less, kIndexIsTooLarge);
SmiCompare(index, Smi::FromInt(0));
- ThrowIf(less, kIndexIsNegative);
+ Check(greater_equal, kIndexIsNegative);
// Restore the index
SmiToInteger32(index, index);
@@ -4699,13 +4982,13 @@ void MacroAssembler::PrepareCallCFunction(int num_arguments) {
ASSERT(num_arguments >= 0);
// Make stack end at alignment and allocate space for arguments and old rsp.
- movq(kScratchRegister, rsp);
+ movp(kScratchRegister, rsp);
ASSERT(IsPowerOf2(frame_alignment));
int argument_slots_on_stack =
ArgumentStackSlotsForCFunctionCall(num_arguments);
- subq(rsp, Immediate((argument_slots_on_stack + 1) * kPointerSize));
- and_(rsp, Immediate(-frame_alignment));
- movq(Operand(rsp, argument_slots_on_stack * kPointerSize), kScratchRegister);
+ subp(rsp, Immediate((argument_slots_on_stack + 1) * kRegisterSize));
+ andp(rsp, Immediate(-frame_alignment));
+ movp(Operand(rsp, argument_slots_on_stack * kRegisterSize), kScratchRegister);
}
@@ -4728,7 +5011,7 @@ void MacroAssembler::CallCFunction(Register function, int num_arguments) {
ASSERT(num_arguments >= 0);
int argument_slots_on_stack =
ArgumentStackSlotsForCFunctionCall(num_arguments);
- movq(rsp, Operand(rsp, argument_slots_on_stack * kPointerSize));
+ movp(rsp, Operand(rsp, argument_slots_on_stack * kRegisterSize));
}
@@ -4773,10 +5056,10 @@ void MacroAssembler::CheckPageFlag(
Label::Distance condition_met_distance) {
ASSERT(cc == zero || cc == not_zero);
if (scratch.is(object)) {
- and_(scratch, Immediate(~Page::kPageAlignmentMask));
+ andp(scratch, Immediate(~Page::kPageAlignmentMask));
} else {
- movq(scratch, Immediate(~Page::kPageAlignmentMask));
- and_(scratch, object);
+ movp(scratch, Immediate(~Page::kPageAlignmentMask));
+ andp(scratch, object);
}
if (mask < (1 << kBitsPerByte)) {
testb(Operand(scratch, MemoryChunk::kFlagsOffset),
@@ -4793,9 +5076,8 @@ void MacroAssembler::CheckMapDeprecated(Handle<Map> map,
Label* if_deprecated) {
if (map->CanBeDeprecated()) {
Move(scratch, map);
- movq(scratch, FieldOperand(scratch, Map::kBitField3Offset));
- SmiToInteger32(scratch, scratch);
- and_(scratch, Immediate(Map::Deprecated::kMask));
+ movl(scratch, FieldOperand(scratch, Map::kBitField3Offset));
+ andl(scratch, Immediate(Map::Deprecated::kMask));
j(not_zero, if_deprecated);
}
}
@@ -4812,13 +5094,13 @@ void MacroAssembler::JumpIfBlack(Register object,
ASSERT(strcmp(Marking::kBlackBitPattern, "10") == 0);
// The mask_scratch register contains a 1 at the position of the first bit
// and a 0 at all other positions, including the position of the second bit.
- movq(rcx, mask_scratch);
+ movp(rcx, mask_scratch);
// Make rcx into a mask that covers both marking bits using the operation
// rcx = mask | (mask << 1).
- lea(rcx, Operand(mask_scratch, mask_scratch, times_2, 0));
+ leap(rcx, Operand(mask_scratch, mask_scratch, times_2, 0));
// Note that we are using a 4-byte aligned 8-byte load.
- and_(rcx, Operand(bitmap_scratch, MemoryChunk::kHeaderSize));
- cmpq(mask_scratch, rcx);
+ andp(rcx, Operand(bitmap_scratch, MemoryChunk::kHeaderSize));
+ cmpp(mask_scratch, rcx);
j(equal, on_black, on_black_distance);
}
@@ -4832,7 +5114,7 @@ void MacroAssembler::JumpIfDataObject(
Label* not_data_object,
Label::Distance not_data_object_distance) {
Label is_data_object;
- movq(scratch, FieldOperand(value, HeapObject::kMapOffset));
+ movp(scratch, FieldOperand(value, HeapObject::kMapOffset));
CompareRoot(scratch, Heap::kHeapNumberMapRootIndex);
j(equal, &is_data_object, Label::kNear);
ASSERT(kIsIndirectStringTag == 1 && kIsIndirectStringMask == 1);
@@ -4850,23 +5132,23 @@ void MacroAssembler::GetMarkBits(Register addr_reg,
Register bitmap_reg,
Register mask_reg) {
ASSERT(!AreAliased(addr_reg, bitmap_reg, mask_reg, rcx));
- movq(bitmap_reg, addr_reg);
+ movp(bitmap_reg, addr_reg);
// Sign extended 32 bit immediate.
- and_(bitmap_reg, Immediate(~Page::kPageAlignmentMask));
- movq(rcx, addr_reg);
+ andp(bitmap_reg, Immediate(~Page::kPageAlignmentMask));
+ movp(rcx, addr_reg);
int shift =
Bitmap::kBitsPerCellLog2 + kPointerSizeLog2 - Bitmap::kBytesPerCellLog2;
shrl(rcx, Immediate(shift));
- and_(rcx,
+ andp(rcx,
Immediate((Page::kPageAlignmentMask >> shift) &
~(Bitmap::kBytesPerCell - 1)));
- addq(bitmap_reg, rcx);
- movq(rcx, addr_reg);
+ addp(bitmap_reg, rcx);
+ movp(rcx, addr_reg);
shrl(rcx, Immediate(kPointerSizeLog2));
- and_(rcx, Immediate((1 << Bitmap::kBitsPerCellLog2) - 1));
+ andp(rcx, Immediate((1 << Bitmap::kBitsPerCellLog2) - 1));
movl(mask_reg, Immediate(1));
- shl_cl(mask_reg);
+ shlp_cl(mask_reg);
}
@@ -4889,20 +5171,20 @@ void MacroAssembler::EnsureNotWhite(
// Since both black and grey have a 1 in the first position and white does
// not have a 1 there we only need to check one bit.
- testq(Operand(bitmap_scratch, MemoryChunk::kHeaderSize), mask_scratch);
+ testp(Operand(bitmap_scratch, MemoryChunk::kHeaderSize), mask_scratch);
j(not_zero, &done, Label::kNear);
if (emit_debug_code()) {
// Check for impossible bit pattern.
Label ok;
- push(mask_scratch);
+ Push(mask_scratch);
// shl. May overflow making the check conservative.
- addq(mask_scratch, mask_scratch);
- testq(Operand(bitmap_scratch, MemoryChunk::kHeaderSize), mask_scratch);
+ addp(mask_scratch, mask_scratch);
+ testp(Operand(bitmap_scratch, MemoryChunk::kHeaderSize), mask_scratch);
j(zero, &ok, Label::kNear);
int3();
bind(&ok);
- pop(mask_scratch);
+ Pop(mask_scratch);
}
// Value is white. We check whether it is data that doesn't need scanning.
@@ -4913,10 +5195,10 @@ void MacroAssembler::EnsureNotWhite(
Label is_data_object;
// Check for heap-number
- movq(map, FieldOperand(value, HeapObject::kMapOffset));
+ movp(map, FieldOperand(value, HeapObject::kMapOffset));
CompareRoot(map, Heap::kHeapNumberMapRootIndex);
j(not_equal, &not_heap_number, Label::kNear);
- movq(length, Immediate(HeapNumber::kSize));
+ movp(length, Immediate(HeapNumber::kSize));
jmp(&is_data_object, Label::kNear);
bind(&not_heap_number);
@@ -4939,27 +5221,27 @@ void MacroAssembler::EnsureNotWhite(
ASSERT_EQ(0, kConsStringTag & kExternalStringTag);
testb(instance_type, Immediate(kExternalStringTag));
j(zero, &not_external, Label::kNear);
- movq(length, Immediate(ExternalString::kSize));
+ movp(length, Immediate(ExternalString::kSize));
jmp(&is_data_object, Label::kNear);
bind(&not_external);
// Sequential string, either ASCII or UC16.
ASSERT(kOneByteStringTag == 0x04);
- and_(length, Immediate(kStringEncodingMask));
- xor_(length, Immediate(kStringEncodingMask));
- addq(length, Immediate(0x04));
+ andp(length, Immediate(kStringEncodingMask));
+ xorp(length, Immediate(kStringEncodingMask));
+ addp(length, Immediate(0x04));
// Value now either 4 (if ASCII) or 8 (if UC16), i.e. char-size shifted by 2.
- imul(length, FieldOperand(value, String::kLengthOffset));
- shr(length, Immediate(2 + kSmiTagSize + kSmiShiftSize));
- addq(length, Immediate(SeqString::kHeaderSize + kObjectAlignmentMask));
- and_(length, Immediate(~kObjectAlignmentMask));
+ imulp(length, FieldOperand(value, String::kLengthOffset));
+ shrp(length, Immediate(2 + kSmiTagSize + kSmiShiftSize));
+ addp(length, Immediate(SeqString::kHeaderSize + kObjectAlignmentMask));
+ andp(length, Immediate(~kObjectAlignmentMask));
bind(&is_data_object);
// Value is a data object, and it is white. Mark it black. Since we know
// that the object is white we can make it black by flipping one bit.
- or_(Operand(bitmap_scratch, MemoryChunk::kHeaderSize), mask_scratch);
+ orp(Operand(bitmap_scratch, MemoryChunk::kHeaderSize), mask_scratch);
- and_(bitmap_scratch, Immediate(~Page::kPageAlignmentMask));
+ andp(bitmap_scratch, Immediate(~Page::kPageAlignmentMask));
addl(Operand(bitmap_scratch, MemoryChunk::kLiveBytesOffset), length);
bind(&done);
@@ -4970,11 +5252,11 @@ void MacroAssembler::CheckEnumCache(Register null_value, Label* call_runtime) {
Label next, start;
Register empty_fixed_array_value = r8;
LoadRoot(empty_fixed_array_value, Heap::kEmptyFixedArrayRootIndex);
- movq(rcx, rax);
+ movp(rcx, rax);
// Check if the enum length field is properly initialized, indicating that
// there is an enum cache.
- movq(rbx, FieldOperand(rcx, HeapObject::kMapOffset));
+ movp(rbx, FieldOperand(rcx, HeapObject::kMapOffset));
EnumLength(rdx, rbx);
Cmp(rdx, Smi::FromInt(kInvalidEnumCacheSentinel));
@@ -4984,7 +5266,7 @@ void MacroAssembler::CheckEnumCache(Register null_value, Label* call_runtime) {
bind(&next);
- movq(rbx, FieldOperand(rcx, HeapObject::kMapOffset));
+ movp(rbx, FieldOperand(rcx, HeapObject::kMapOffset));
// For all objects but the receiver, check that the cache is empty.
EnumLength(rdx, rbx);
@@ -4995,12 +5277,19 @@ void MacroAssembler::CheckEnumCache(Register null_value, Label* call_runtime) {
// Check that there are no elements. Register rcx contains the current JS
// object we've reached through the prototype chain.
- cmpq(empty_fixed_array_value,
+ Label no_elements;
+ cmpp(empty_fixed_array_value,
FieldOperand(rcx, JSObject::kElementsOffset));
+ j(equal, &no_elements);
+
+ // Second chance, the object may be using the empty slow element dictionary.
+ LoadRoot(kScratchRegister, Heap::kEmptySlowElementDictionaryRootIndex);
+ cmpp(kScratchRegister, FieldOperand(rcx, JSObject::kElementsOffset));
j(not_equal, call_runtime);
- movq(rcx, FieldOperand(rbx, Map::kPrototypeOffset));
- cmpq(rcx, null_value);
+ bind(&no_elements);
+ movp(rcx, FieldOperand(rbx, Map::kPrototypeOffset));
+ cmpp(rcx, null_value);
j(not_equal, &next);
}
@@ -5013,12 +5302,12 @@ void MacroAssembler::TestJSArrayForAllocationMemento(
ExternalReference new_space_allocation_top =
ExternalReference::new_space_allocation_top_address(isolate());
- lea(scratch_reg, Operand(receiver_reg,
+ leap(scratch_reg, Operand(receiver_reg,
JSArray::kSize + AllocationMemento::kSize - kHeapObjectTag));
Move(kScratchRegister, new_space_start);
- cmpq(scratch_reg, kScratchRegister);
+ cmpp(scratch_reg, kScratchRegister);
j(less, no_memento_found);
- cmpq(scratch_reg, ExternalOperand(new_space_allocation_top));
+ cmpp(scratch_reg, ExternalOperand(new_space_allocation_top));
j(greater, no_memento_found);
CompareRoot(MemOperand(scratch_reg, -AllocationMemento::kSize),
Heap::kAllocationMementoMapRootIndex);
@@ -5035,22 +5324,36 @@ void MacroAssembler::JumpIfDictionaryInPrototypeChain(
Register current = scratch0;
Label loop_again;
- movq(current, object);
+ movp(current, object);
// Loop based on the map going up the prototype chain.
bind(&loop_again);
- movq(current, FieldOperand(current, HeapObject::kMapOffset));
- movq(scratch1, FieldOperand(current, Map::kBitField2Offset));
- and_(scratch1, Immediate(Map::kElementsKindMask));
- shr(scratch1, Immediate(Map::kElementsKindShift));
- cmpq(scratch1, Immediate(DICTIONARY_ELEMENTS));
+ movp(current, FieldOperand(current, HeapObject::kMapOffset));
+ movp(scratch1, FieldOperand(current, Map::kBitField2Offset));
+ DecodeField<Map::ElementsKindBits>(scratch1);
+ cmpp(scratch1, Immediate(DICTIONARY_ELEMENTS));
j(equal, found);
- movq(current, FieldOperand(current, Map::kPrototypeOffset));
+ movp(current, FieldOperand(current, Map::kPrototypeOffset));
CompareRoot(current, Heap::kNullValueRootIndex);
j(not_equal, &loop_again);
}
+void MacroAssembler::TruncatingDiv(Register dividend, int32_t divisor) {
+ ASSERT(!dividend.is(rax));
+ ASSERT(!dividend.is(rdx));
+ MultiplierAndShift ms(divisor);
+ movl(rax, Immediate(ms.multiplier()));
+ imull(dividend);
+ if (divisor > 0 && ms.multiplier() < 0) addl(rdx, dividend);
+ if (divisor < 0 && ms.multiplier() > 0) subl(rdx, dividend);
+ if (ms.shift() > 0) sarl(rdx, Immediate(ms.shift()));
+ movl(rax, dividend);
+ shrl(rax, Immediate(31));
+ addl(rdx, rax);
+}
+
+
} } // namespace v8::internal
#endif // V8_TARGET_ARCH_X64
diff --git a/chromium/v8/src/x64/macro-assembler-x64.h b/chromium/v8/src/x64/macro-assembler-x64.h
index 98808a86722..8a0ffa61540 100644
--- a/chromium/v8/src/x64/macro-assembler-x64.h
+++ b/chromium/v8/src/x64/macro-assembler-x64.h
@@ -1,36 +1,13 @@
// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
#ifndef V8_X64_MACRO_ASSEMBLER_X64_H_
#define V8_X64_MACRO_ASSEMBLER_X64_H_
-#include "assembler.h"
-#include "frames.h"
-#include "v8globals.h"
+#include "src/assembler.h"
+#include "src/frames.h"
+#include "src/globals.h"
namespace v8 {
namespace internal {
@@ -52,6 +29,10 @@ typedef Operand MemOperand;
enum RememberedSetAction { EMIT_REMEMBERED_SET, OMIT_REMEMBERED_SET };
enum SmiCheck { INLINE_SMI_CHECK, OMIT_SMI_CHECK };
+enum PointersToHereCheck {
+ kPointersToHereMaybeInteresting,
+ kPointersToHereAreAlwaysInteresting
+};
enum SmiOperationConstraint {
PRESERVE_SOURCE_REGISTER,
@@ -243,7 +224,9 @@ class MacroAssembler: public Assembler {
Register scratch,
SaveFPRegsMode save_fp,
RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
- SmiCheck smi_check = INLINE_SMI_CHECK);
+ SmiCheck smi_check = INLINE_SMI_CHECK,
+ PointersToHereCheck pointers_to_here_check_for_value =
+ kPointersToHereMaybeInteresting);
// As above, but the offset has the tag presubtracted. For use with
// Operand(reg, off).
@@ -254,14 +237,17 @@ class MacroAssembler: public Assembler {
Register scratch,
SaveFPRegsMode save_fp,
RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
- SmiCheck smi_check = INLINE_SMI_CHECK) {
+ SmiCheck smi_check = INLINE_SMI_CHECK,
+ PointersToHereCheck pointers_to_here_check_for_value =
+ kPointersToHereMaybeInteresting) {
RecordWriteField(context,
offset + kHeapObjectTag,
value,
scratch,
save_fp,
remembered_set_action,
- smi_check);
+ smi_check,
+ pointers_to_here_check_for_value);
}
// Notify the garbage collector that we wrote a pointer into a fixed array.
@@ -276,7 +262,15 @@ class MacroAssembler: public Assembler {
Register index,
SaveFPRegsMode save_fp,
RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
- SmiCheck smi_check = INLINE_SMI_CHECK);
+ SmiCheck smi_check = INLINE_SMI_CHECK,
+ PointersToHereCheck pointers_to_here_check_for_value =
+ kPointersToHereMaybeInteresting);
+
+ void RecordWriteForMap(
+ Register object,
+ Register map,
+ Register dst,
+ SaveFPRegsMode save_fp);
// For page containing |object| mark region covering |address|
// dirty. |object| is the object being stored into, |value| is the
@@ -289,17 +283,18 @@ class MacroAssembler: public Assembler {
Register value,
SaveFPRegsMode save_fp,
RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
- SmiCheck smi_check = INLINE_SMI_CHECK);
+ SmiCheck smi_check = INLINE_SMI_CHECK,
+ PointersToHereCheck pointers_to_here_check_for_value =
+ kPointersToHereMaybeInteresting);
-#ifdef ENABLE_DEBUGGER_SUPPORT
// ---------------------------------------------------------------------------
// Debugger Support
void DebugBreak();
-#endif
// Generates function and stub prologue code.
- void Prologue(PrologueFrameMode frame_mode);
+ void StubPrologue();
+ void Prologue(bool code_pre_aging);
// Enter specific kind of exit frame; either in normal or
// debug mode. Expects the number of arguments in register rax and
@@ -336,54 +331,37 @@ class MacroAssembler: public Assembler {
ExternalReference roots_array_start =
ExternalReference::roots_array_start(isolate());
Move(kRootRegister, roots_array_start);
- addq(kRootRegister, Immediate(kRootRegisterBias));
+ addp(kRootRegister, Immediate(kRootRegisterBias));
}
// ---------------------------------------------------------------------------
// JavaScript invokes
- // Set up call kind marking in rcx. The method takes rcx as an
- // explicit first parameter to make the code more readable at the
- // call sites.
- void SetCallKind(Register dst, CallKind kind);
-
// Invoke the JavaScript function code by either calling or jumping.
void InvokeCode(Register code,
const ParameterCount& expected,
const ParameterCount& actual,
InvokeFlag flag,
- const CallWrapper& call_wrapper,
- CallKind call_kind);
-
- void InvokeCode(Handle<Code> code,
- const ParameterCount& expected,
- const ParameterCount& actual,
- RelocInfo::Mode rmode,
- InvokeFlag flag,
- const CallWrapper& call_wrapper,
- CallKind call_kind);
+ const CallWrapper& call_wrapper);
// Invoke the JavaScript function in the given register. Changes the
// current context to the context in the function before invoking.
void InvokeFunction(Register function,
const ParameterCount& actual,
InvokeFlag flag,
- const CallWrapper& call_wrapper,
- CallKind call_kind);
+ const CallWrapper& call_wrapper);
void InvokeFunction(Register function,
const ParameterCount& expected,
const ParameterCount& actual,
InvokeFlag flag,
- const CallWrapper& call_wrapper,
- CallKind call_kind);
+ const CallWrapper& call_wrapper);
void InvokeFunction(Handle<JSFunction> function,
const ParameterCount& expected,
const ParameterCount& actual,
InvokeFlag flag,
- const CallWrapper& call_wrapper,
- CallKind call_kind);
+ const CallWrapper& call_wrapper);
// Invoke specified builtin JavaScript function. Adds an entry to
// the unresolved list if the name does not resolve.
@@ -407,8 +385,8 @@ class MacroAssembler: public Assembler {
void SafePush(Smi* src);
void InitializeSmiConstantRegister() {
- movq(kSmiConstantRegister, Smi::FromInt(kSmiConstantRegisterValue),
- RelocInfo::NONE64);
+ Move(kSmiConstantRegister, Smi::FromInt(kSmiConstantRegisterValue),
+ Assembler::RelocInfoNone());
}
// Conversions between tagged smi values and non-tagged integer values.
@@ -511,10 +489,18 @@ class MacroAssembler: public Assembler {
// Test-and-jump functions. Typically combines a check function
// above with a conditional jump.
+ // Jump if the value can be represented by a smi.
+ void JumpIfValidSmiValue(Register src, Label* on_valid,
+ Label::Distance near_jump = Label::kFar);
+
// Jump if the value cannot be represented by a smi.
void JumpIfNotValidSmiValue(Register src, Label* on_invalid,
Label::Distance near_jump = Label::kFar);
+ // Jump if the unsigned integer value can be represented by a smi.
+ void JumpIfUIntValidSmiValue(Register src, Label* on_valid,
+ Label::Distance near_jump = Label::kFar);
+
// Jump if the unsigned integer value cannot be represented by a smi.
void JumpIfUIntNotValidSmiValue(Register src, Label* on_invalid,
Label::Distance near_jump = Label::kFar);
@@ -672,12 +658,14 @@ class MacroAssembler: public Assembler {
void SmiShiftLeftConstant(Register dst,
Register src,
- int shift_value);
+ int shift_value,
+ Label* on_not_smi_result = NULL,
+ Label::Distance near_jump = Label::kFar);
void SmiShiftLogicalRightConstant(Register dst,
- Register src,
- int shift_value,
- Label* on_not_smi_result,
- Label::Distance near_jump = Label::kFar);
+ Register src,
+ int shift_value,
+ Label* on_not_smi_result,
+ Label::Distance near_jump = Label::kFar);
void SmiShiftArithmeticRightConstant(Register dst,
Register src,
int shift_value);
@@ -686,7 +674,9 @@ class MacroAssembler: public Assembler {
// Uses and clobbers rcx, so dst may not be rcx.
void SmiShiftLeft(Register dst,
Register src1,
- Register src2);
+ Register src2,
+ Label* on_not_smi_result = NULL,
+ Label::Distance near_jump = Label::kFar);
// Shifts a smi value to the right, shifting in zero bits at the top, and
// returns the unsigned intepretation of the result if that is a smi.
// Uses and clobbers rcx, so dst may not be rcx.
@@ -738,17 +728,17 @@ class MacroAssembler: public Assembler {
void Move(const Operand& dst, Smi* source) {
Register constant = GetSmiConstant(source);
- movq(dst, constant);
+ movp(dst, constant);
}
void Push(Smi* smi);
- // Save away a 64-bit integer on the stack as two 32-bit integers
+ // Save away a raw integer with pointer size on the stack as two integers
// masquerading as smis so that the garbage collector skips visiting them.
- void PushInt64AsTwoSmis(Register src, Register scratch = kScratchRegister);
- // Reconstruct a 64-bit integer from two 32-bit integers masquerading as
- // smis on the top of stack.
- void PopInt64AsTwoSmis(Register dst, Register scratch = kScratchRegister);
+ void PushRegisterAsTwoSmis(Register src, Register scratch = kScratchRegister);
+ // Reconstruct a raw integer with pointer size from two integers masquerading
+ // as smis on the top of stack.
+ void PopRegisterAsTwoSmis(Register dst, Register scratch = kScratchRegister);
void Test(const Operand& dst, Smi* source);
@@ -819,7 +809,7 @@ class MacroAssembler: public Assembler {
// Load a register with a long value as efficiently as possible.
void Set(Register dst, int64_t x);
- void Set(const Operand& dst, int64_t x);
+ void Set(const Operand& dst, intptr_t x);
// cvtsi2sd instruction only writes to the low 64-bit of dst register, which
// hinders register renaming and makes dependence chains longer. So we use
@@ -830,8 +820,13 @@ class MacroAssembler: public Assembler {
// Move if the registers are not identical.
void Move(Register target, Register source);
- // Bit-field support.
- void TestBit(const Operand& dst, int bit_index);
+ // TestBit and Load SharedFunctionInfo special field.
+ void TestBitSharedFunctionInfoSpecialField(Register base,
+ int offset,
+ int bit_index);
+ void LoadSharedFunctionInfoSpecialField(Register dst,
+ Register base,
+ int offset);
// Handle support
void Move(Register dst, Handle<Object> source);
@@ -852,32 +847,59 @@ class MacroAssembler: public Assembler {
// Emit code to discard a non-negative number of pointer-sized elements
// from the stack, clobbering only the rsp register.
void Drop(int stack_elements);
+ // Emit code to discard a positive number of pointer-sized elements
+ // from the stack under the return address which remains on the top,
+ // clobbering the rsp register.
+ void DropUnderReturnAddress(int stack_elements,
+ Register scratch = kScratchRegister);
void Call(Label* target) { call(target); }
- void Push(Register src) { push(src); }
- void Pop(Register dst) { pop(dst); }
- void PushReturnAddressFrom(Register src) { push(src); }
- void PopReturnAddressTo(Register dst) { pop(dst); }
- void MoveDouble(Register dst, const Operand& src) { movq(dst, src); }
- void MoveDouble(const Operand& dst, Register src) { movq(dst, src); }
+ void Push(Register src);
+ void Push(const Operand& src);
+ void PushQuad(const Operand& src);
+ void Push(Immediate value);
+ void PushImm32(int32_t imm32);
+ void Pop(Register dst);
+ void Pop(const Operand& dst);
+ void PopQuad(const Operand& dst);
+ void PushReturnAddressFrom(Register src) { pushq(src); }
+ void PopReturnAddressTo(Register dst) { popq(dst); }
void Move(Register dst, ExternalReference ext) {
- movq(dst, reinterpret_cast<Address>(ext.address()),
+ movp(dst, reinterpret_cast<void*>(ext.address()),
RelocInfo::EXTERNAL_REFERENCE);
}
+ // Loads a pointer into a register with a relocation mode.
+ void Move(Register dst, void* ptr, RelocInfo::Mode rmode) {
+ // This method must not be used with heap object references. The stored
+ // address is not GC safe. Use the handle version instead.
+ ASSERT(rmode > RelocInfo::LAST_GCED_ENUM);
+ movp(dst, ptr, rmode);
+ }
+
+ void Move(Register dst, Handle<Object> value, RelocInfo::Mode rmode) {
+ AllowDeferredHandleDereference using_raw_address;
+ ASSERT(!RelocInfo::IsNone(rmode));
+ ASSERT(value->IsHeapObject());
+ ASSERT(!isolate()->heap()->InNewSpace(*value));
+ movp(dst, reinterpret_cast<void*>(value.location()), rmode);
+ }
+
// Control Flow
void Jump(Address destination, RelocInfo::Mode rmode);
void Jump(ExternalReference ext);
+ void Jump(const Operand& op);
void Jump(Handle<Code> code_object, RelocInfo::Mode rmode);
void Call(Address destination, RelocInfo::Mode rmode);
void Call(ExternalReference ext);
+ void Call(const Operand& op);
void Call(Handle<Code> code_object,
RelocInfo::Mode rmode,
TypeFeedbackId ast_id = TypeFeedbackId::None());
// The size of the code generated for different call instructions.
- int CallSize(Address destination, RelocInfo::Mode rmode) {
+ int CallSize(Address destination) {
return kCallSequenceLength;
}
int CallSize(ExternalReference ext);
@@ -1013,7 +1035,7 @@ class MacroAssembler: public Assembler {
MinusZeroMode minus_zero_mode, Label* lost_precision,
Label::Distance dst = Label::kFar);
- void LoadUint32(XMMRegister dst, Register src, XMMRegister scratch);
+ void LoadUint32(XMMRegister dst, Register src);
void LoadInstanceDescriptors(Register map, Register descriptors);
void EnumLength(Register dst, Register map);
@@ -1021,11 +1043,32 @@ class MacroAssembler: public Assembler {
template<typename Field>
void DecodeField(Register reg) {
- static const int shift = Field::kShift + kSmiShift;
+ static const int shift = Field::kShift;
static const int mask = Field::kMask >> Field::kShift;
- shr(reg, Immediate(shift));
- and_(reg, Immediate(mask));
- shl(reg, Immediate(kSmiShift));
+ if (shift != 0) {
+ shrp(reg, Immediate(shift));
+ }
+ andp(reg, Immediate(mask));
+ }
+
+ template<typename Field>
+ void DecodeFieldToSmi(Register reg) {
+ if (SmiValuesAre32Bits()) {
+ andp(reg, Immediate(Field::kMask));
+ shlp(reg, Immediate(kSmiShift - Field::kShift));
+ } else {
+ static const int shift = Field::kShift;
+ static const int mask = (Field::kMask >> Field::kShift) << kSmiTagSize;
+ ASSERT(SmiValuesAre31Bits());
+ ASSERT(kSmiShift == kSmiTagSize);
+ ASSERT((mask & 0x80000000u) == 0);
+ if (shift < kSmiShift) {
+ shlp(reg, Immediate(kSmiShift - shift));
+ } else if (shift > kSmiShift) {
+ sarp(reg, Immediate(shift - kSmiShift));
+ }
+ andp(reg, Immediate(mask));
+ }
}
// Abort execution if argument is not a number, enabled via --debug-code.
@@ -1048,6 +1091,10 @@ class MacroAssembler: public Assembler {
// Abort execution if argument is not a name, enabled via --debug-code.
void AssertName(Register object);
+ // Abort execution if argument is not undefined or an AllocationSite, enabled
+ // via --debug-code.
+ void AssertUndefinedOrAllocationSite(Register object);
+
// Abort execution if argument is not the root value with the given index,
// enabled via --debug-code.
void AssertRootValue(Register src,
@@ -1070,12 +1117,6 @@ class MacroAssembler: public Assembler {
// Propagate an uncatchable exception out of the current JS stack.
void ThrowUncatchable(Register value);
- // Throw a message string as an exception.
- void Throw(BailoutReason reason);
-
- // Throw a message string as an exception if a condition is not true.
- void ThrowIf(Condition cc, BailoutReason reason);
-
// ---------------------------------------------------------------------------
// Inline caching support
@@ -1211,10 +1252,6 @@ class MacroAssembler: public Assembler {
Label* miss,
bool miss_on_bound_function = false);
- // Generates code for reporting that an illegal operation has
- // occurred.
- void IllegalOperation(int num_arguments);
-
// Picks out an array index from the hash field.
// Register use:
// hash - holds the index's hash. Clobbered.
@@ -1235,15 +1272,8 @@ class MacroAssembler: public Assembler {
Register scratch,
Label* no_map_match);
- // Load the initial map for new Arrays from a JSFunction.
- void LoadInitialArrayMap(Register function_in,
- Register scratch,
- Register map_out,
- bool can_have_holes);
-
// Load the global function with the given index.
void LoadGlobalFunction(int index, Register function);
- void LoadArrayFunction(Register function);
// Load the initial map from the global function. The registers
// function and map can be the same.
@@ -1309,8 +1339,8 @@ class MacroAssembler: public Assembler {
// from handle and propagates exceptions. Clobbers r14, r15, rbx and
// caller-save registers. Restores context. On return removes
// stack_space * kPointerSize (GCed).
- void CallApiFunctionAndReturn(Address function_address,
- Address thunk_address,
+ void CallApiFunctionAndReturn(Register function_address,
+ ExternalReference thunk_ref,
Register thunk_last_arg,
int stack_space,
Operand return_value_operand,
@@ -1371,6 +1401,10 @@ class MacroAssembler: public Assembler {
Register filler);
+ // Emit code for a truncating division by a constant. The dividend register is
+ // unchanged, the result is in rdx, and rax gets clobbered.
+ void TruncatingDiv(Register dividend, int32_t divisor);
+
// ---------------------------------------------------------------------------
// StatsCounter support
@@ -1456,7 +1490,7 @@ class MacroAssembler: public Assembler {
// modified. It may be the "smi 1 constant" register.
Register GetSmiConstant(Smi* value);
- intptr_t RootRegisterDelta(ExternalReference other);
+ int64_t RootRegisterDelta(ExternalReference other);
// Moves the smi value to the destination register.
void LoadSmiConstant(Register dst, Smi* value);
@@ -1473,8 +1507,7 @@ class MacroAssembler: public Assembler {
bool* definitely_mismatches,
InvokeFlag flag,
Label::Distance near_jump = Label::kFar,
- const CallWrapper& call_wrapper = NullCallWrapper(),
- CallKind call_kind = CALL_AS_METHOD);
+ const CallWrapper& call_wrapper = NullCallWrapper());
void EnterExitFramePrologue(bool save_rax);
@@ -1492,19 +1525,17 @@ class MacroAssembler: public Assembler {
Register scratch,
AllocationFlags flags);
+ void MakeSureDoubleAlignedHelper(Register result,
+ Register scratch,
+ Label* gc_required,
+ AllocationFlags flags);
+
// Update allocation top with value in result_end register.
// If scratch is valid, it contains the address of the allocation top.
void UpdateAllocationTopHelper(Register result_end,
Register scratch,
AllocationFlags flags);
- // Helper for PopHandleScope. Allowed to perform a GC and returns
- // NULL if gc_allowed. Does not perform a GC if !gc_allowed, and
- // possibly returns a failure object indicating an allocation failure.
- Object* PopHandleScopeHelper(Register saved,
- Register scratch,
- bool gc_allowed);
-
// Helper for implementing JumpIfNotInNewSpace and JumpIfInNewSpace.
void InNewSpace(Register object,
Register scratch,
@@ -1609,9 +1640,9 @@ extern void LogGeneratedCodeCoverage(const char* file_line);
Address x64_coverage_function = FUNCTION_ADDR(LogGeneratedCodeCoverage); \
masm->pushfq(); \
masm->Pushad(); \
- masm->push(Immediate(reinterpret_cast<int>(&__FILE_LINE__))); \
+ masm->Push(Immediate(reinterpret_cast<int>(&__FILE_LINE__))); \
masm->Call(x64_coverage_function, RelocInfo::EXTERNAL_REFERENCE); \
- masm->pop(rax); \
+ masm->Pop(rax); \
masm->Popad(); \
masm->popfq(); \
} \
diff --git a/chromium/v8/src/x64/regexp-macro-assembler-x64.cc b/chromium/v8/src/x64/regexp-macro-assembler-x64.cc
index 3e65a68b831..a8c1cb47a7f 100644
--- a/chromium/v8/src/x64/regexp-macro-assembler-x64.cc
+++ b/chromium/v8/src/x64/regexp-macro-assembler-x64.cc
@@ -1,42 +1,19 @@
// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
#if V8_TARGET_ARCH_X64
-#include "cpu-profiler.h"
-#include "serialize.h"
-#include "unicode.h"
-#include "log.h"
-#include "regexp-stack.h"
-#include "macro-assembler.h"
-#include "regexp-macro-assembler.h"
-#include "x64/regexp-macro-assembler-x64.h"
+#include "src/cpu-profiler.h"
+#include "src/serialize.h"
+#include "src/unicode.h"
+#include "src/log.h"
+#include "src/regexp-stack.h"
+#include "src/macro-assembler.h"
+#include "src/regexp-macro-assembler.h"
+#include "src/x64/regexp-macro-assembler-x64.h"
namespace v8 {
namespace internal {
@@ -166,7 +143,7 @@ void RegExpMacroAssemblerX64::AdvanceRegister(int reg, int by) {
ASSERT(reg >= 0);
ASSERT(reg < num_registers_);
if (by != 0) {
- __ addq(register_location(reg), Immediate(by));
+ __ addp(register_location(reg), Immediate(by));
}
}
@@ -175,7 +152,7 @@ void RegExpMacroAssemblerX64::Backtrack() {
CheckPreemption();
// Pop Code* offset from backtrack stack, add Code* and jump to location.
Pop(rbx);
- __ addq(rbx, code_object_pointer());
+ __ addp(rbx, code_object_pointer());
__ jmp(rbx);
}
@@ -203,8 +180,8 @@ void RegExpMacroAssemblerX64::CheckAtStart(Label* on_at_start) {
__ cmpl(Operand(rbp, kStartIndex), Immediate(0));
BranchOrBacktrack(not_equal, &not_at_start);
// If we did, are we still at the start of the input?
- __ lea(rax, Operand(rsi, rdi, times_1, 0));
- __ cmpq(rax, Operand(rbp, kInputStart));
+ __ leap(rax, Operand(rsi, rdi, times_1, 0));
+ __ cmpp(rax, Operand(rbp, kInputStart));
BranchOrBacktrack(equal, on_at_start);
__ bind(&not_at_start);
}
@@ -215,8 +192,8 @@ void RegExpMacroAssemblerX64::CheckNotAtStart(Label* on_not_at_start) {
__ cmpl(Operand(rbp, kStartIndex), Immediate(0));
BranchOrBacktrack(not_equal, on_not_at_start);
// If we did, are we still at the start of the input?
- __ lea(rax, Operand(rsi, rdi, times_1, 0));
- __ cmpq(rax, Operand(rbp, kInputStart));
+ __ leap(rax, Operand(rsi, rdi, times_1, 0));
+ __ cmpp(rax, Operand(rbp, kInputStart));
BranchOrBacktrack(not_equal, on_not_at_start);
}
@@ -241,9 +218,9 @@ void RegExpMacroAssemblerX64::CheckNotBackReferenceIgnoreCase(
int start_reg,
Label* on_no_match) {
Label fallthrough;
- __ movq(rdx, register_location(start_reg)); // Offset of start of capture
- __ movq(rbx, register_location(start_reg + 1)); // Offset of end of capture
- __ subq(rbx, rdx); // Length of capture.
+ ReadPositionFromRegister(rdx, start_reg); // Offset of start of capture
+ ReadPositionFromRegister(rbx, start_reg + 1); // Offset of end of capture
+ __ subp(rbx, rdx); // Length of capture.
// -----------------------
// rdx = Start offset of capture.
@@ -273,9 +250,9 @@ void RegExpMacroAssemblerX64::CheckNotBackReferenceIgnoreCase(
on_no_match = &backtrack_label_;
}
- __ lea(r9, Operand(rsi, rdx, times_1, 0));
- __ lea(r11, Operand(rsi, rdi, times_1, 0));
- __ addq(rbx, r9); // End of capture
+ __ leap(r9, Operand(rsi, rdx, times_1, 0));
+ __ leap(r11, Operand(rsi, rdi, times_1, 0));
+ __ addp(rbx, r9); // End of capture
// ---------------------
// r11 - current input character address
// r9 - current capture character address
@@ -293,8 +270,8 @@ void RegExpMacroAssemblerX64::CheckNotBackReferenceIgnoreCase(
// Mismatch, try case-insensitive match (converting letters to lower-case).
// I.e., if or-ing with 0x20 makes values equal and in range 'a'-'z', it's
// a match.
- __ or_(rax, Immediate(0x20)); // Convert match character to lower-case.
- __ or_(rdx, Immediate(0x20)); // Convert capture character to lower-case.
+ __ orp(rax, Immediate(0x20)); // Convert match character to lower-case.
+ __ orp(rdx, Immediate(0x20)); // Convert capture character to lower-case.
__ cmpb(rax, rdx);
__ j(not_equal, on_no_match); // Definitely not equal.
__ subb(rax, Immediate('a'));
@@ -308,24 +285,24 @@ void RegExpMacroAssemblerX64::CheckNotBackReferenceIgnoreCase(
__ j(equal, on_no_match);
__ bind(&loop_increment);
// Increment pointers into match and capture strings.
- __ addq(r11, Immediate(1));
- __ addq(r9, Immediate(1));
+ __ addp(r11, Immediate(1));
+ __ addp(r9, Immediate(1));
// Compare to end of capture, and loop if not done.
- __ cmpq(r9, rbx);
+ __ cmpp(r9, rbx);
__ j(below, &loop);
// Compute new value of character position after the matched part.
- __ movq(rdi, r11);
+ __ movp(rdi, r11);
__ subq(rdi, rsi);
} else {
ASSERT(mode_ == UC16);
// Save important/volatile registers before calling C function.
#ifndef _WIN64
// Caller save on Linux and callee save in Windows.
- __ push(rsi);
- __ push(rdi);
+ __ pushq(rsi);
+ __ pushq(rdi);
#endif
- __ push(backtrack_stackpointer());
+ __ pushq(backtrack_stackpointer());
static const int num_arguments = 4;
__ PrepareCallCFunction(num_arguments);
@@ -337,22 +314,22 @@ void RegExpMacroAssemblerX64::CheckNotBackReferenceIgnoreCase(
// Isolate* isolate
#ifdef _WIN64
// Compute and set byte_offset1 (start of capture).
- __ lea(rcx, Operand(rsi, rdx, times_1, 0));
+ __ leap(rcx, Operand(rsi, rdx, times_1, 0));
// Set byte_offset2.
- __ lea(rdx, Operand(rsi, rdi, times_1, 0));
+ __ leap(rdx, Operand(rsi, rdi, times_1, 0));
// Set byte_length.
- __ movq(r8, rbx);
+ __ movp(r8, rbx);
// Isolate.
__ LoadAddress(r9, ExternalReference::isolate_address(isolate()));
#else // AMD64 calling convention
// Compute byte_offset2 (current position = rsi+rdi).
- __ lea(rax, Operand(rsi, rdi, times_1, 0));
+ __ leap(rax, Operand(rsi, rdi, times_1, 0));
// Compute and set byte_offset1 (start of capture).
- __ lea(rdi, Operand(rsi, rdx, times_1, 0));
+ __ leap(rdi, Operand(rsi, rdx, times_1, 0));
// Set byte_offset2.
- __ movq(rsi, rax);
+ __ movp(rsi, rax);
// Set byte_length.
- __ movq(rdx, rbx);
+ __ movp(rdx, rbx);
// Isolate.
__ LoadAddress(rcx, ExternalReference::isolate_address(isolate()));
#endif
@@ -367,14 +344,14 @@ void RegExpMacroAssemblerX64::CheckNotBackReferenceIgnoreCase(
// Restore original values before reacting on result value.
__ Move(code_object_pointer(), masm_.CodeObject());
- __ pop(backtrack_stackpointer());
+ __ popq(backtrack_stackpointer());
#ifndef _WIN64
- __ pop(rdi);
- __ pop(rsi);
+ __ popq(rdi);
+ __ popq(rsi);
#endif
// Check if function returned non-zero for success or zero for failure.
- __ testq(rax, rax);
+ __ testp(rax, rax);
BranchOrBacktrack(zero, on_no_match);
// On success, increment position by length of capture.
// Requires that rbx is callee save (true for both Win64 and AMD64 ABIs).
@@ -390,9 +367,9 @@ void RegExpMacroAssemblerX64::CheckNotBackReference(
Label fallthrough;
// Find length of back-referenced capture.
- __ movq(rdx, register_location(start_reg));
- __ movq(rax, register_location(start_reg + 1));
- __ subq(rax, rdx); // Length to check.
+ ReadPositionFromRegister(rdx, start_reg); // Offset of start of capture
+ ReadPositionFromRegister(rax, start_reg + 1); // Offset of end of capture
+ __ subp(rax, rdx); // Length to check.
// Fail on partial or illegal capture (start of capture after end of capture).
// This must not happen (no back-reference can reference a capture that wasn't
@@ -412,9 +389,9 @@ void RegExpMacroAssemblerX64::CheckNotBackReference(
BranchOrBacktrack(greater, on_no_match);
// Compute pointers to match string and capture string
- __ lea(rbx, Operand(rsi, rdi, times_1, 0)); // Start of match.
- __ addq(rdx, rsi); // Start of capture.
- __ lea(r9, Operand(rdx, rax, times_1, 0)); // End of capture
+ __ leap(rbx, Operand(rsi, rdi, times_1, 0)); // Start of match.
+ __ addp(rdx, rsi); // Start of capture.
+ __ leap(r9, Operand(rdx, rax, times_1, 0)); // End of capture
// -----------------------
// rbx - current capture character address.
@@ -433,15 +410,15 @@ void RegExpMacroAssemblerX64::CheckNotBackReference(
}
BranchOrBacktrack(not_equal, on_no_match);
// Increment pointers into capture and match string.
- __ addq(rbx, Immediate(char_size()));
- __ addq(rdx, Immediate(char_size()));
+ __ addp(rbx, Immediate(char_size()));
+ __ addp(rdx, Immediate(char_size()));
// Check if we have reached end of match area.
- __ cmpq(rdx, r9);
+ __ cmpp(rdx, r9);
__ j(below, &loop);
// Success.
// Set current character position to position after match.
- __ movq(rdi, rbx);
+ __ movp(rdi, rbx);
__ subq(rdi, rsi);
__ bind(&fallthrough);
@@ -462,7 +439,7 @@ void RegExpMacroAssemblerX64::CheckCharacterAfterAnd(uint32_t c,
__ testl(current_character(), Immediate(mask));
} else {
__ movl(rax, Immediate(mask));
- __ and_(rax, current_character());
+ __ andp(rax, current_character());
__ cmpl(rax, Immediate(c));
}
BranchOrBacktrack(equal, on_equal);
@@ -476,7 +453,7 @@ void RegExpMacroAssemblerX64::CheckNotCharacterAfterAnd(uint32_t c,
__ testl(current_character(), Immediate(mask));
} else {
__ movl(rax, Immediate(mask));
- __ and_(rax, current_character());
+ __ andp(rax, current_character());
__ cmpl(rax, Immediate(c));
}
BranchOrBacktrack(not_equal, on_not_equal);
@@ -489,8 +466,8 @@ void RegExpMacroAssemblerX64::CheckNotCharacterAfterMinusAnd(
uc16 mask,
Label* on_not_equal) {
ASSERT(minus < String::kMaxUtf16CodeUnit);
- __ lea(rax, Operand(current_character(), -minus));
- __ and_(rax, Immediate(mask));
+ __ leap(rax, Operand(current_character(), -minus));
+ __ andp(rax, Immediate(mask));
__ cmpl(rax, Immediate(c));
BranchOrBacktrack(not_equal, on_not_equal);
}
@@ -522,8 +499,8 @@ void RegExpMacroAssemblerX64::CheckBitInTable(
__ Move(rax, table);
Register index = current_character();
if (mode_ != ASCII || kTableMask != String::kMaxOneByteCharCode) {
- __ movq(rbx, current_character());
- __ and_(rbx, Immediate(kTableMask));
+ __ movp(rbx, current_character());
+ __ andp(rbx, Immediate(kTableMask));
index = rbx;
}
__ cmpb(FieldOperand(rax, index, times_1, ByteArray::kHeaderSize),
@@ -536,7 +513,7 @@ bool RegExpMacroAssemblerX64::CheckSpecialCharacterClass(uc16 type,
Label* on_no_match) {
// Range checks (c in min..max) are generally implemented by an unsigned
// (c - min) <= (max - min) check, using the sequence:
- // lea(rax, Operand(current_character(), -min)) or sub(rax, Immediate(min))
+ // leap(rax, Operand(current_character(), -min)) or sub(rax, Immediate(min))
// cmp(rax, Immediate(max - min))
switch (type) {
case 's':
@@ -547,7 +524,7 @@ bool RegExpMacroAssemblerX64::CheckSpecialCharacterClass(uc16 type,
__ cmpl(current_character(), Immediate(' '));
__ j(equal, &success, Label::kNear);
// Check range 0x09..0x0d
- __ lea(rax, Operand(current_character(), -'\t'));
+ __ leap(rax, Operand(current_character(), -'\t'));
__ cmpl(rax, Immediate('\r' - '\t'));
__ j(below_equal, &success, Label::kNear);
// \u00a0 (NBSP).
@@ -562,20 +539,20 @@ bool RegExpMacroAssemblerX64::CheckSpecialCharacterClass(uc16 type,
return false;
case 'd':
// Match ASCII digits ('0'..'9')
- __ lea(rax, Operand(current_character(), -'0'));
+ __ leap(rax, Operand(current_character(), -'0'));
__ cmpl(rax, Immediate('9' - '0'));
BranchOrBacktrack(above, on_no_match);
return true;
case 'D':
// Match non ASCII-digits
- __ lea(rax, Operand(current_character(), -'0'));
+ __ leap(rax, Operand(current_character(), -'0'));
__ cmpl(rax, Immediate('9' - '0'));
BranchOrBacktrack(below_equal, on_no_match);
return true;
case '.': {
// Match non-newlines (not 0x0a('\n'), 0x0d('\r'), 0x2028 and 0x2029)
__ movl(rax, current_character());
- __ xor_(rax, Immediate(0x01));
+ __ xorp(rax, Immediate(0x01));
// See if current character is '\n'^1 or '\r'^1, i.e., 0x0b or 0x0c
__ subl(rax, Immediate(0x0b));
__ cmpl(rax, Immediate(0x0c - 0x0b));
@@ -593,7 +570,7 @@ bool RegExpMacroAssemblerX64::CheckSpecialCharacterClass(uc16 type,
case 'n': {
// Match newlines (0x0a('\n'), 0x0d('\r'), 0x2028 and 0x2029)
__ movl(rax, current_character());
- __ xor_(rax, Immediate(0x01));
+ __ xorp(rax, Immediate(0x01));
// See if current character is '\n'^1 or '\r'^1, i.e., 0x0b or 0x0c
__ subl(rax, Immediate(0x0b));
__ cmpl(rax, Immediate(0x0c - 0x0b));
@@ -674,8 +651,8 @@ Handle<HeapObject> RegExpMacroAssemblerX64::GetCode(Handle<String> source) {
FrameScope scope(&masm_, StackFrame::MANUAL);
// Actually emit code to start a new stack frame.
- __ push(rbp);
- __ movq(rbp, rsp);
+ __ pushq(rbp);
+ __ movp(rbp, rsp);
// Save parameters and callee-save registers. Order here should correspond
// to order of kBackup_ebx etc.
#ifdef _WIN64
@@ -686,30 +663,30 @@ Handle<HeapObject> RegExpMacroAssemblerX64::GetCode(Handle<String> source) {
__ movq(Operand(rbp, kInputStart), r8);
__ movq(Operand(rbp, kInputEnd), r9);
// Callee-save on Win64.
- __ push(rsi);
- __ push(rdi);
- __ push(rbx);
+ __ pushq(rsi);
+ __ pushq(rdi);
+ __ pushq(rbx);
#else
// GCC passes arguments in rdi, rsi, rdx, rcx, r8, r9 (and then on stack).
// Push register parameters on stack for reference.
- ASSERT_EQ(kInputString, -1 * kPointerSize);
- ASSERT_EQ(kStartIndex, -2 * kPointerSize);
- ASSERT_EQ(kInputStart, -3 * kPointerSize);
- ASSERT_EQ(kInputEnd, -4 * kPointerSize);
- ASSERT_EQ(kRegisterOutput, -5 * kPointerSize);
- ASSERT_EQ(kNumOutputRegisters, -6 * kPointerSize);
- __ push(rdi);
- __ push(rsi);
- __ push(rdx);
- __ push(rcx);
- __ push(r8);
- __ push(r9);
-
- __ push(rbx); // Callee-save
+ ASSERT_EQ(kInputString, -1 * kRegisterSize);
+ ASSERT_EQ(kStartIndex, -2 * kRegisterSize);
+ ASSERT_EQ(kInputStart, -3 * kRegisterSize);
+ ASSERT_EQ(kInputEnd, -4 * kRegisterSize);
+ ASSERT_EQ(kRegisterOutput, -5 * kRegisterSize);
+ ASSERT_EQ(kNumOutputRegisters, -6 * kRegisterSize);
+ __ pushq(rdi);
+ __ pushq(rsi);
+ __ pushq(rdx);
+ __ pushq(rcx);
+ __ pushq(r8);
+ __ pushq(r9);
+
+ __ pushq(rbx); // Callee-save
#endif
- __ push(Immediate(0)); // Number of successful matches in a global regexp.
- __ push(Immediate(0)); // Make room for "input start - 1" constant.
+ __ Push(Immediate(0)); // Number of successful matches in a global regexp.
+ __ Push(Immediate(0)); // Make room for "input start - 1" constant.
// Check if we have space on the stack for registers.
Label stack_limit_hit;
@@ -717,14 +694,14 @@ Handle<HeapObject> RegExpMacroAssemblerX64::GetCode(Handle<String> source) {
ExternalReference stack_limit =
ExternalReference::address_of_stack_limit(isolate());
- __ movq(rcx, rsp);
+ __ movp(rcx, rsp);
__ Move(kScratchRegister, stack_limit);
- __ subq(rcx, Operand(kScratchRegister, 0));
+ __ subp(rcx, Operand(kScratchRegister, 0));
// Handle it if the stack pointer is already below the stack limit.
__ j(below_equal, &stack_limit_hit);
// Check if there is room for the variable number of registers above
// the stack limit.
- __ cmpq(rcx, Immediate(num_registers_ * kPointerSize));
+ __ cmpp(rcx, Immediate(num_registers_ * kPointerSize));
__ j(above_equal, &stack_ok);
// Exit with OutOfMemory exception. There is not enough space on the stack
// for our working registers.
@@ -734,32 +711,32 @@ Handle<HeapObject> RegExpMacroAssemblerX64::GetCode(Handle<String> source) {
__ bind(&stack_limit_hit);
__ Move(code_object_pointer(), masm_.CodeObject());
CallCheckStackGuardState(); // Preserves no registers beside rbp and rsp.
- __ testq(rax, rax);
+ __ testp(rax, rax);
// If returned value is non-zero, we exit with the returned value as result.
__ j(not_zero, &return_rax);
__ bind(&stack_ok);
// Allocate space on stack for registers.
- __ subq(rsp, Immediate(num_registers_ * kPointerSize));
+ __ subp(rsp, Immediate(num_registers_ * kPointerSize));
// Load string length.
- __ movq(rsi, Operand(rbp, kInputEnd));
+ __ movp(rsi, Operand(rbp, kInputEnd));
// Load input position.
- __ movq(rdi, Operand(rbp, kInputStart));
+ __ movp(rdi, Operand(rbp, kInputStart));
// Set up rdi to be negative offset from string end.
__ subq(rdi, rsi);
// Set rax to address of char before start of the string
// (effectively string position -1).
- __ movq(rbx, Operand(rbp, kStartIndex));
- __ neg(rbx);
+ __ movp(rbx, Operand(rbp, kStartIndex));
+ __ negq(rbx);
if (mode_ == UC16) {
- __ lea(rax, Operand(rdi, rbx, times_2, -char_size()));
+ __ leap(rax, Operand(rdi, rbx, times_2, -char_size()));
} else {
- __ lea(rax, Operand(rdi, rbx, times_1, -char_size()));
+ __ leap(rax, Operand(rdi, rbx, times_1, -char_size()));
}
// Store this value in a local variable, for use when clearing
// position registers.
- __ movq(Operand(rbp, kInputStartMinusOne), rax);
+ __ movp(Operand(rbp, kInputStartMinusOne), rax);
#if V8_OS_WIN
// Ensure that we have written to each stack page, in order. Skipping a page
@@ -769,7 +746,7 @@ Handle<HeapObject> RegExpMacroAssemblerX64::GetCode(Handle<String> source) {
for (int i = num_saved_registers_ + kRegistersPerPage - 1;
i < num_registers_;
i += kRegistersPerPage) {
- __ movq(register_location(i), rax); // One write every page.
+ __ movp(register_location(i), rax); // One write every page.
}
#endif // V8_OS_WIN
@@ -798,20 +775,20 @@ Handle<HeapObject> RegExpMacroAssemblerX64::GetCode(Handle<String> source) {
__ Set(rcx, kRegisterZero);
Label init_loop;
__ bind(&init_loop);
- __ movq(Operand(rbp, rcx, times_1, 0), rax);
+ __ movp(Operand(rbp, rcx, times_1, 0), rax);
__ subq(rcx, Immediate(kPointerSize));
__ cmpq(rcx,
Immediate(kRegisterZero - num_saved_registers_ * kPointerSize));
__ j(greater, &init_loop);
} else { // Unroll the loop.
for (int i = 0; i < num_saved_registers_; i++) {
- __ movq(register_location(i), rax);
+ __ movp(register_location(i), rax);
}
}
}
// Initialize backtrack stack pointer.
- __ movq(backtrack_stackpointer(), Operand(rbp, kStackHighEnd));
+ __ movp(backtrack_stackpointer(), Operand(rbp, kStackHighEnd));
__ jmp(&start_label_);
@@ -821,24 +798,24 @@ Handle<HeapObject> RegExpMacroAssemblerX64::GetCode(Handle<String> source) {
__ bind(&success_label_);
if (num_saved_registers_ > 0) {
// copy captures to output
- __ movq(rdx, Operand(rbp, kStartIndex));
- __ movq(rbx, Operand(rbp, kRegisterOutput));
- __ movq(rcx, Operand(rbp, kInputEnd));
- __ subq(rcx, Operand(rbp, kInputStart));
+ __ movp(rdx, Operand(rbp, kStartIndex));
+ __ movp(rbx, Operand(rbp, kRegisterOutput));
+ __ movp(rcx, Operand(rbp, kInputEnd));
+ __ subp(rcx, Operand(rbp, kInputStart));
if (mode_ == UC16) {
- __ lea(rcx, Operand(rcx, rdx, times_2, 0));
+ __ leap(rcx, Operand(rcx, rdx, times_2, 0));
} else {
- __ addq(rcx, rdx);
+ __ addp(rcx, rdx);
}
for (int i = 0; i < num_saved_registers_; i++) {
- __ movq(rax, register_location(i));
+ __ movp(rax, register_location(i));
if (i == 0 && global_with_zero_length_check()) {
// Keep capture start in rdx for the zero-length check later.
- __ movq(rdx, rax);
+ __ movp(rdx, rax);
}
- __ addq(rax, rcx); // Convert to index from start, not end.
+ __ addp(rax, rcx); // Convert to index from start, not end.
if (mode_ == UC16) {
- __ sar(rax, Immediate(1)); // Convert byte index to character index.
+ __ sarp(rax, Immediate(1)); // Convert byte index to character index.
}
__ movl(Operand(rbx, i * kIntSize), rax);
}
@@ -847,31 +824,31 @@ Handle<HeapObject> RegExpMacroAssemblerX64::GetCode(Handle<String> source) {
if (global()) {
// Restart matching if the regular expression is flagged as global.
// Increment success counter.
- __ incq(Operand(rbp, kSuccessfulCaptures));
+ __ incp(Operand(rbp, kSuccessfulCaptures));
// Capture results have been stored, so the number of remaining global
// output registers is reduced by the number of stored captures.
__ movsxlq(rcx, Operand(rbp, kNumOutputRegisters));
- __ subq(rcx, Immediate(num_saved_registers_));
+ __ subp(rcx, Immediate(num_saved_registers_));
// Check whether we have enough room for another set of capture results.
- __ cmpq(rcx, Immediate(num_saved_registers_));
+ __ cmpp(rcx, Immediate(num_saved_registers_));
__ j(less, &exit_label_);
- __ movq(Operand(rbp, kNumOutputRegisters), rcx);
+ __ movp(Operand(rbp, kNumOutputRegisters), rcx);
// Advance the location for output.
- __ addq(Operand(rbp, kRegisterOutput),
+ __ addp(Operand(rbp, kRegisterOutput),
Immediate(num_saved_registers_ * kIntSize));
// Prepare rax to initialize registers with its value in the next run.
- __ movq(rax, Operand(rbp, kInputStartMinusOne));
+ __ movp(rax, Operand(rbp, kInputStartMinusOne));
if (global_with_zero_length_check()) {
// Special case for zero-length matches.
// rdx: capture start index
- __ cmpq(rdi, rdx);
+ __ cmpp(rdi, rdx);
// Not a zero-length match, restart.
__ j(not_equal, &load_char_start_regexp);
// rdi (offset from the end) is zero if we already reached the end.
- __ testq(rdi, rdi);
+ __ testp(rdi, rdi);
__ j(zero, &exit_label_, Label::kNear);
// Advance current position after a zero-length match.
if (mode_ == UC16) {
@@ -883,32 +860,32 @@ Handle<HeapObject> RegExpMacroAssemblerX64::GetCode(Handle<String> source) {
__ jmp(&load_char_start_regexp);
} else {
- __ movq(rax, Immediate(SUCCESS));
+ __ movp(rax, Immediate(SUCCESS));
}
}
__ bind(&exit_label_);
if (global()) {
// Return the number of successful captures.
- __ movq(rax, Operand(rbp, kSuccessfulCaptures));
+ __ movp(rax, Operand(rbp, kSuccessfulCaptures));
}
__ bind(&return_rax);
#ifdef _WIN64
// Restore callee save registers.
- __ lea(rsp, Operand(rbp, kLastCalleeSaveRegister));
- __ pop(rbx);
- __ pop(rdi);
- __ pop(rsi);
+ __ leap(rsp, Operand(rbp, kLastCalleeSaveRegister));
+ __ popq(rbx);
+ __ popq(rdi);
+ __ popq(rsi);
// Stack now at rbp.
#else
// Restore callee save register.
- __ movq(rbx, Operand(rbp, kBackup_rbx));
+ __ movp(rbx, Operand(rbp, kBackup_rbx));
// Skip rsp to rbp.
- __ movq(rsp, rbp);
+ __ movp(rsp, rbp);
#endif
// Exit function frame, restore previous one.
- __ pop(rbp);
+ __ popq(rbp);
__ ret(0);
// Backtrack code (branch target for conditional backtracks).
@@ -923,21 +900,21 @@ Handle<HeapObject> RegExpMacroAssemblerX64::GetCode(Handle<String> source) {
if (check_preempt_label_.is_linked()) {
SafeCallTarget(&check_preempt_label_);
- __ push(backtrack_stackpointer());
- __ push(rdi);
+ __ pushq(backtrack_stackpointer());
+ __ pushq(rdi);
CallCheckStackGuardState();
- __ testq(rax, rax);
+ __ testp(rax, rax);
// If returning non-zero, we should end execution with the given
// result as return value.
__ j(not_zero, &return_rax);
// Restore registers.
__ Move(code_object_pointer(), masm_.CodeObject());
- __ pop(rdi);
- __ pop(backtrack_stackpointer());
+ __ popq(rdi);
+ __ popq(backtrack_stackpointer());
// String might have moved: Reload esi from frame.
- __ movq(rsi, Operand(rbp, kInputEnd));
+ __ movp(rsi, Operand(rbp, kInputEnd));
SafeReturn();
}
@@ -950,8 +927,8 @@ Handle<HeapObject> RegExpMacroAssemblerX64::GetCode(Handle<String> source) {
// Save registers before calling C function
#ifndef _WIN64
// Callee-save in Microsoft 64-bit ABI, but not in AMD64 ABI.
- __ push(rsi);
- __ push(rdi);
+ __ pushq(rsi);
+ __ pushq(rdi);
#endif
// Call GrowStack(backtrack_stackpointer())
@@ -960,12 +937,12 @@ Handle<HeapObject> RegExpMacroAssemblerX64::GetCode(Handle<String> source) {
#ifdef _WIN64
// Microsoft passes parameters in rcx, rdx, r8.
// First argument, backtrack stackpointer, is already in rcx.
- __ lea(rdx, Operand(rbp, kStackHighEnd)); // Second argument
+ __ leap(rdx, Operand(rbp, kStackHighEnd)); // Second argument
__ LoadAddress(r8, ExternalReference::isolate_address(isolate()));
#else
// AMD64 ABI passes parameters in rdi, rsi, rdx.
- __ movq(rdi, backtrack_stackpointer()); // First argument.
- __ lea(rsi, Operand(rbp, kStackHighEnd)); // Second argument.
+ __ movp(rdi, backtrack_stackpointer()); // First argument.
+ __ leap(rsi, Operand(rbp, kStackHighEnd)); // Second argument.
__ LoadAddress(rdx, ExternalReference::isolate_address(isolate()));
#endif
ExternalReference grow_stack =
@@ -973,15 +950,15 @@ Handle<HeapObject> RegExpMacroAssemblerX64::GetCode(Handle<String> source) {
__ CallCFunction(grow_stack, num_arguments);
// If return NULL, we have failed to grow the stack, and
// must exit with a stack-overflow exception.
- __ testq(rax, rax);
+ __ testp(rax, rax);
__ j(equal, &exit_with_exception);
// Otherwise use return value as new stack pointer.
- __ movq(backtrack_stackpointer(), rax);
+ __ movp(backtrack_stackpointer(), rax);
// Restore saved registers and continue.
__ Move(code_object_pointer(), masm_.CodeObject());
#ifndef _WIN64
- __ pop(rdi);
- __ pop(rsi);
+ __ popq(rdi);
+ __ popq(rsi);
#endif
SafeReturn();
}
@@ -1015,7 +992,7 @@ void RegExpMacroAssemblerX64::GoTo(Label* to) {
void RegExpMacroAssemblerX64::IfRegisterGE(int reg,
int comparand,
Label* if_ge) {
- __ cmpq(register_location(reg), Immediate(comparand));
+ __ cmpp(register_location(reg), Immediate(comparand));
BranchOrBacktrack(greater_equal, if_ge);
}
@@ -1023,14 +1000,14 @@ void RegExpMacroAssemblerX64::IfRegisterGE(int reg,
void RegExpMacroAssemblerX64::IfRegisterLT(int reg,
int comparand,
Label* if_lt) {
- __ cmpq(register_location(reg), Immediate(comparand));
+ __ cmpp(register_location(reg), Immediate(comparand));
BranchOrBacktrack(less, if_lt);
}
void RegExpMacroAssemblerX64::IfRegisterEqPos(int reg,
Label* if_eq) {
- __ cmpq(rdi, register_location(reg));
+ __ cmpp(rdi, register_location(reg));
BranchOrBacktrack(equal, if_eq);
}
@@ -1061,7 +1038,7 @@ void RegExpMacroAssemblerX64::PopCurrentPosition() {
void RegExpMacroAssemblerX64::PopRegister(int register_index) {
Pop(rax);
- __ movq(register_location(register_index), rax);
+ __ movp(register_location(register_index), rax);
}
@@ -1078,26 +1055,44 @@ void RegExpMacroAssemblerX64::PushCurrentPosition() {
void RegExpMacroAssemblerX64::PushRegister(int register_index,
StackCheckFlag check_stack_limit) {
- __ movq(rax, register_location(register_index));
+ __ movp(rax, register_location(register_index));
Push(rax);
if (check_stack_limit) CheckStackLimit();
}
+STATIC_ASSERT(kPointerSize == kInt64Size || kPointerSize == kInt32Size);
+
+
void RegExpMacroAssemblerX64::ReadCurrentPositionFromRegister(int reg) {
- __ movq(rdi, register_location(reg));
+ if (kPointerSize == kInt64Size) {
+ __ movq(rdi, register_location(reg));
+ } else {
+ // Need sign extension for x32 as rdi might be used as an index register.
+ __ movsxlq(rdi, register_location(reg));
+ }
+}
+
+
+void RegExpMacroAssemblerX64::ReadPositionFromRegister(Register dst, int reg) {
+ if (kPointerSize == kInt64Size) {
+ __ movq(dst, register_location(reg));
+ } else {
+ // Need sign extension for x32 as dst might be used as an index register.
+ __ movsxlq(dst, register_location(reg));
+ }
}
void RegExpMacroAssemblerX64::ReadStackPointerFromRegister(int reg) {
- __ movq(backtrack_stackpointer(), register_location(reg));
- __ addq(backtrack_stackpointer(), Operand(rbp, kStackHighEnd));
+ __ movp(backtrack_stackpointer(), register_location(reg));
+ __ addp(backtrack_stackpointer(), Operand(rbp, kStackHighEnd));
}
void RegExpMacroAssemblerX64::SetCurrentPositionFromEnd(int by) {
Label after_position;
- __ cmpq(rdi, Immediate(-by * char_size()));
+ __ cmpp(rdi, Immediate(-by * char_size()));
__ j(greater_equal, &after_position, Label::kNear);
__ movq(rdi, Immediate(-by * char_size()));
// On RegExp code entry (where this operation is used), the character before
@@ -1110,7 +1105,7 @@ void RegExpMacroAssemblerX64::SetCurrentPositionFromEnd(int by) {
void RegExpMacroAssemblerX64::SetRegister(int register_index, int to) {
ASSERT(register_index >= num_saved_registers_); // Reserved for positions!
- __ movq(register_location(register_index), Immediate(to));
+ __ movp(register_location(register_index), Immediate(to));
}
@@ -1123,27 +1118,27 @@ bool RegExpMacroAssemblerX64::Succeed() {
void RegExpMacroAssemblerX64::WriteCurrentPositionToRegister(int reg,
int cp_offset) {
if (cp_offset == 0) {
- __ movq(register_location(reg), rdi);
+ __ movp(register_location(reg), rdi);
} else {
- __ lea(rax, Operand(rdi, cp_offset * char_size()));
- __ movq(register_location(reg), rax);
+ __ leap(rax, Operand(rdi, cp_offset * char_size()));
+ __ movp(register_location(reg), rax);
}
}
void RegExpMacroAssemblerX64::ClearRegisters(int reg_from, int reg_to) {
ASSERT(reg_from <= reg_to);
- __ movq(rax, Operand(rbp, kInputStartMinusOne));
+ __ movp(rax, Operand(rbp, kInputStartMinusOne));
for (int reg = reg_from; reg <= reg_to; reg++) {
- __ movq(register_location(reg), rax);
+ __ movp(register_location(reg), rax);
}
}
void RegExpMacroAssemblerX64::WriteStackPointerToRegister(int reg) {
- __ movq(rax, backtrack_stackpointer());
- __ subq(rax, Operand(rbp, kStackHighEnd));
- __ movq(register_location(reg), rax);
+ __ movp(rax, backtrack_stackpointer());
+ __ subp(rax, Operand(rbp, kStackHighEnd));
+ __ movp(register_location(reg), rax);
}
@@ -1156,20 +1151,20 @@ void RegExpMacroAssemblerX64::CallCheckStackGuardState() {
__ PrepareCallCFunction(num_arguments);
#ifdef _WIN64
// Second argument: Code* of self. (Do this before overwriting r8).
- __ movq(rdx, code_object_pointer());
+ __ movp(rdx, code_object_pointer());
// Third argument: RegExp code frame pointer.
- __ movq(r8, rbp);
+ __ movp(r8, rbp);
// First argument: Next address on the stack (will be address of
// return address).
- __ lea(rcx, Operand(rsp, -kPointerSize));
+ __ leap(rcx, Operand(rsp, -kPointerSize));
#else
// Third argument: RegExp code frame pointer.
- __ movq(rdx, rbp);
+ __ movp(rdx, rbp);
// Second argument: Code* of self.
- __ movq(rsi, code_object_pointer());
+ __ movp(rsi, code_object_pointer());
// First argument: Next address on the stack (will be address of
// return address).
- __ lea(rdi, Operand(rsp, -kPointerSize));
+ __ leap(rdi, Operand(rsp, -kRegisterSize));
#endif
ExternalReference stack_check =
ExternalReference::re_check_stack_guard_state(isolate());
@@ -1188,7 +1183,8 @@ int RegExpMacroAssemblerX64::CheckStackGuardState(Address* return_address,
Code* re_code,
Address re_frame) {
Isolate* isolate = frame_entry<Isolate*>(re_frame, kIsolate);
- if (isolate->stack_guard()->IsStackOverflow()) {
+ StackLimitCheck check(isolate);
+ if (check.JsHasOverflowed()) {
isolate->StackOverflow();
return EXCEPTION;
}
@@ -1215,7 +1211,7 @@ int RegExpMacroAssemblerX64::CheckStackGuardState(Address* return_address,
ASSERT(*return_address <=
re_code->instruction_start() + re_code->instruction_size());
- MaybeObject* result = Execution::HandleStackGuardInterrupt(isolate);
+ Object* result = isolate->stack_guard()->HandleInterrupts();
if (*code_handle != re_code) { // Return address no longer valid
intptr_t delta = code_handle->address() - re_code->address();
@@ -1323,12 +1319,12 @@ void RegExpMacroAssemblerX64::SafeCall(Label* to) {
void RegExpMacroAssemblerX64::SafeCallTarget(Label* label) {
__ bind(label);
- __ subq(Operand(rsp, 0), code_object_pointer());
+ __ subp(Operand(rsp, 0), code_object_pointer());
}
void RegExpMacroAssemblerX64::SafeReturn() {
- __ addq(Operand(rsp, 0), code_object_pointer());
+ __ addp(Operand(rsp, 0), code_object_pointer());
__ ret(0);
}
@@ -1336,14 +1332,14 @@ void RegExpMacroAssemblerX64::SafeReturn() {
void RegExpMacroAssemblerX64::Push(Register source) {
ASSERT(!source.is(backtrack_stackpointer()));
// Notice: This updates flags, unlike normal Push.
- __ subq(backtrack_stackpointer(), Immediate(kIntSize));
+ __ subp(backtrack_stackpointer(), Immediate(kIntSize));
__ movl(Operand(backtrack_stackpointer(), 0), source);
}
void RegExpMacroAssemblerX64::Push(Immediate value) {
// Notice: This updates flags, unlike normal Push.
- __ subq(backtrack_stackpointer(), Immediate(kIntSize));
+ __ subp(backtrack_stackpointer(), Immediate(kIntSize));
__ movl(Operand(backtrack_stackpointer(), 0), value);
}
@@ -1367,7 +1363,7 @@ void RegExpMacroAssemblerX64::FixupCodeRelativePositions() {
void RegExpMacroAssemblerX64::Push(Label* backtrack_target) {
- __ subq(backtrack_stackpointer(), Immediate(kIntSize));
+ __ subp(backtrack_stackpointer(), Immediate(kIntSize));
__ movl(Operand(backtrack_stackpointer(), 0), backtrack_target);
MarkPositionForCodeRelativeFixup();
}
@@ -1377,12 +1373,12 @@ void RegExpMacroAssemblerX64::Pop(Register target) {
ASSERT(!target.is(backtrack_stackpointer()));
__ movsxlq(target, Operand(backtrack_stackpointer(), 0));
// Notice: This updates flags, unlike normal Pop.
- __ addq(backtrack_stackpointer(), Immediate(kIntSize));
+ __ addp(backtrack_stackpointer(), Immediate(kIntSize));
}
void RegExpMacroAssemblerX64::Drop() {
- __ addq(backtrack_stackpointer(), Immediate(kIntSize));
+ __ addp(backtrack_stackpointer(), Immediate(kIntSize));
}
@@ -1392,7 +1388,7 @@ void RegExpMacroAssemblerX64::CheckPreemption() {
ExternalReference stack_limit =
ExternalReference::address_of_stack_limit(isolate());
__ load_rax(stack_limit);
- __ cmpq(rsp, rax);
+ __ cmpp(rsp, rax);
__ j(above, &no_preempt);
SafeCall(&check_preempt_label_);
@@ -1406,7 +1402,7 @@ void RegExpMacroAssemblerX64::CheckStackLimit() {
ExternalReference stack_limit =
ExternalReference::address_of_regexp_stack_limit(isolate());
__ load_rax(stack_limit);
- __ cmpq(backtrack_stackpointer(), rax);
+ __ cmpp(backtrack_stackpointer(), rax);
__ j(above, &no_stack_overflow);
SafeCall(&stack_overflow_label_);
diff --git a/chromium/v8/src/x64/regexp-macro-assembler-x64.h b/chromium/v8/src/x64/regexp-macro-assembler-x64.h
index b230ea47fc6..89d8d3b5cc7 100644
--- a/chromium/v8/src/x64/regexp-macro-assembler-x64.h
+++ b/chromium/v8/src/x64/regexp-macro-assembler-x64.h
@@ -1,38 +1,15 @@
// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
#ifndef V8_X64_REGEXP_MACRO_ASSEMBLER_X64_H_
#define V8_X64_REGEXP_MACRO_ASSEMBLER_X64_H_
-#include "x64/assembler-x64.h"
-#include "x64/assembler-x64-inl.h"
-#include "macro-assembler.h"
-#include "code.h"
-#include "x64/macro-assembler-x64.h"
+#include "src/x64/assembler-x64.h"
+#include "src/x64/assembler-x64-inl.h"
+#include "src/macro-assembler.h"
+#include "src/code.h"
+#include "src/x64/macro-assembler-x64.h"
namespace v8 {
namespace internal {
@@ -135,8 +112,8 @@ class RegExpMacroAssemblerX64: public NativeRegExpMacroAssembler {
// Offsets from rbp of function parameters and stored registers.
static const int kFramePointer = 0;
// Above the frame pointer - function parameters and return address.
- static const int kReturn_eip = kFramePointer + kPointerSize;
- static const int kFrameAlign = kReturn_eip + kPointerSize;
+ static const int kReturn_eip = kFramePointer + kRegisterSize;
+ static const int kFrameAlign = kReturn_eip + kRegisterSize;
#ifdef _WIN64
// Parameters (first four passed as registers, but with room on stack).
@@ -145,49 +122,50 @@ class RegExpMacroAssemblerX64: public NativeRegExpMacroAssembler {
// use this space to store the register passed parameters.
static const int kInputString = kFrameAlign;
// StartIndex is passed as 32 bit int.
- static const int kStartIndex = kInputString + kPointerSize;
- static const int kInputStart = kStartIndex + kPointerSize;
- static const int kInputEnd = kInputStart + kPointerSize;
- static const int kRegisterOutput = kInputEnd + kPointerSize;
+ static const int kStartIndex = kInputString + kRegisterSize;
+ static const int kInputStart = kStartIndex + kRegisterSize;
+ static const int kInputEnd = kInputStart + kRegisterSize;
+ static const int kRegisterOutput = kInputEnd + kRegisterSize;
// For the case of global regular expression, we have room to store at least
// one set of capture results. For the case of non-global regexp, we ignore
// this value. NumOutputRegisters is passed as 32-bit value. The upper
// 32 bit of this 64-bit stack slot may contain garbage.
- static const int kNumOutputRegisters = kRegisterOutput + kPointerSize;
- static const int kStackHighEnd = kNumOutputRegisters + kPointerSize;
+ static const int kNumOutputRegisters = kRegisterOutput + kRegisterSize;
+ static const int kStackHighEnd = kNumOutputRegisters + kRegisterSize;
// DirectCall is passed as 32 bit int (values 0 or 1).
- static const int kDirectCall = kStackHighEnd + kPointerSize;
- static const int kIsolate = kDirectCall + kPointerSize;
+ static const int kDirectCall = kStackHighEnd + kRegisterSize;
+ static const int kIsolate = kDirectCall + kRegisterSize;
#else
// In AMD64 ABI Calling Convention, the first six integer parameters
// are passed as registers, and caller must allocate space on the stack
// if it wants them stored. We push the parameters after the frame pointer.
- static const int kInputString = kFramePointer - kPointerSize;
- static const int kStartIndex = kInputString - kPointerSize;
- static const int kInputStart = kStartIndex - kPointerSize;
- static const int kInputEnd = kInputStart - kPointerSize;
- static const int kRegisterOutput = kInputEnd - kPointerSize;
+ static const int kInputString = kFramePointer - kRegisterSize;
+ static const int kStartIndex = kInputString - kRegisterSize;
+ static const int kInputStart = kStartIndex - kRegisterSize;
+ static const int kInputEnd = kInputStart - kRegisterSize;
+ static const int kRegisterOutput = kInputEnd - kRegisterSize;
+
// For the case of global regular expression, we have room to store at least
// one set of capture results. For the case of non-global regexp, we ignore
// this value.
- static const int kNumOutputRegisters = kRegisterOutput - kPointerSize;
+ static const int kNumOutputRegisters = kRegisterOutput - kRegisterSize;
static const int kStackHighEnd = kFrameAlign;
- static const int kDirectCall = kStackHighEnd + kPointerSize;
- static const int kIsolate = kDirectCall + kPointerSize;
+ static const int kDirectCall = kStackHighEnd + kRegisterSize;
+ static const int kIsolate = kDirectCall + kRegisterSize;
#endif
#ifdef _WIN64
// Microsoft calling convention has three callee-saved registers
// (that we are using). We push these after the frame pointer.
- static const int kBackup_rsi = kFramePointer - kPointerSize;
- static const int kBackup_rdi = kBackup_rsi - kPointerSize;
- static const int kBackup_rbx = kBackup_rdi - kPointerSize;
+ static const int kBackup_rsi = kFramePointer - kRegisterSize;
+ static const int kBackup_rdi = kBackup_rsi - kRegisterSize;
+ static const int kBackup_rbx = kBackup_rdi - kRegisterSize;
static const int kLastCalleeSaveRegister = kBackup_rbx;
#else
// AMD64 Calling Convention has only one callee-save register that
// we use. We push this after the frame pointer (and after the
// parameters).
- static const int kBackup_rbx = kNumOutputRegisters - kPointerSize;
+ static const int kBackup_rbx = kNumOutputRegisters - kRegisterSize;
static const int kLastCalleeSaveRegister = kBackup_rbx;
#endif
@@ -268,6 +246,8 @@ class RegExpMacroAssemblerX64: public NativeRegExpMacroAssembler {
// Increments the stack pointer (rcx) by a word size.
inline void Drop();
+ inline void ReadPositionFromRegister(Register dst, int reg);
+
Isolate* isolate() const { return masm_.isolate(); }
MacroAssembler masm_;
diff --git a/chromium/v8/src/x64/simulator-x64.cc b/chromium/v8/src/x64/simulator-x64.cc
index 448b025a6bf..f7f2fb4bb4f 100644
--- a/chromium/v8/src/x64/simulator-x64.cc
+++ b/chromium/v8/src/x64/simulator-x64.cc
@@ -1,26 +1,3 @@
// Copyright 2009 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
diff --git a/chromium/v8/src/x64/simulator-x64.h b/chromium/v8/src/x64/simulator-x64.h
index 8aba70181f4..35cbdc78884 100644
--- a/chromium/v8/src/x64/simulator-x64.h
+++ b/chromium/v8/src/x64/simulator-x64.h
@@ -1,34 +1,11 @@
// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
#ifndef V8_X64_SIMULATOR_X64_H_
#define V8_X64_SIMULATOR_X64_H_
-#include "allocation.h"
+#include "src/allocation.h"
namespace v8 {
namespace internal {
@@ -47,9 +24,6 @@ typedef int (*regexp_matcher)(String*, int, const byte*,
#define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6, p7, p8) \
(FUNCTION_CAST<regexp_matcher>(entry)(p0, p1, p2, p3, p4, p5, p6, p7, p8))
-#define TRY_CATCH_FROM_ADDRESS(try_catch_address) \
- (reinterpret_cast<TryCatch*>(try_catch_address))
-
// The stack limit beyond which we will throw stack overflow errors in
// generated code. Because generated code on x64 uses the C stack, we
// just use the C stack limit.
diff --git a/chromium/v8/src/x64/stub-cache-x64.cc b/chromium/v8/src/x64/stub-cache-x64.cc
index c87f00fc4db..422ef2e0626 100644
--- a/chromium/v8/src/x64/stub-cache-x64.cc
+++ b/chromium/v8/src/x64/stub-cache-x64.cc
@@ -1,38 +1,15 @@
// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
#if V8_TARGET_ARCH_X64
-#include "arguments.h"
-#include "ic-inl.h"
-#include "codegen.h"
-#include "stub-cache.h"
+#include "src/arguments.h"
+#include "src/ic-inl.h"
+#include "src/codegen.h"
+#include "src/stub-cache.h"
namespace v8 {
namespace internal {
@@ -49,10 +26,12 @@ static void ProbeTable(Isolate* isolate,
// The offset is scaled by 4, based on
// kHeapObjectTagSize, which is two bits
Register offset) {
- // We need to scale up the pointer by 2 because the offset is scaled by less
+ // We need to scale up the pointer by 2 when the offset is scaled by less
// than the pointer size.
- ASSERT(kPointerSizeLog2 == kHeapObjectTagSize + 1);
- ScaleFactor scale_factor = times_2;
+ ASSERT(kPointerSize == kInt64Size
+ ? kPointerSizeLog2 == kHeapObjectTagSize + 1
+ : kPointerSizeLog2 == kHeapObjectTagSize);
+ ScaleFactor scale_factor = kPointerSize == kInt64Size ? times_2 : times_1;
ASSERT_EQ(3 * kPointerSize, sizeof(StubCache::Entry));
// The offset register holds the entry offset times four (due to masking
@@ -62,7 +41,7 @@ static void ProbeTable(Isolate* isolate,
Label miss;
// Multiply by 3 because there are 3 fields per entry (name, code, map).
- __ lea(offset, Operand(offset, offset, times_2, 0));
+ __ leap(offset, Operand(offset, offset, times_2, 0));
__ LoadAddress(kScratchRegister, key_offset);
@@ -75,19 +54,19 @@ static void ProbeTable(Isolate* isolate,
// Get the map entry from the cache.
// Use key_offset + kPointerSize * 2, rather than loading map_offset.
- __ movq(kScratchRegister,
+ __ movp(kScratchRegister,
Operand(kScratchRegister, offset, scale_factor, kPointerSize * 2));
- __ cmpq(kScratchRegister, FieldOperand(receiver, HeapObject::kMapOffset));
+ __ cmpp(kScratchRegister, FieldOperand(receiver, HeapObject::kMapOffset));
__ j(not_equal, &miss);
// Get the code entry from the cache.
__ LoadAddress(kScratchRegister, value_offset);
- __ movq(kScratchRegister,
+ __ movp(kScratchRegister,
Operand(kScratchRegister, offset, scale_factor, 0));
// Check that the flags match what we're looking for.
__ movl(offset, FieldOperand(kScratchRegister, Code::kFlagsOffset));
- __ and_(offset, Immediate(~Code::kFlagsNotUsedInLookup));
+ __ andp(offset, Immediate(~Code::kFlagsNotUsedInLookup));
__ cmpl(offset, Immediate(flags));
__ j(not_equal, &miss);
@@ -100,7 +79,7 @@ static void ProbeTable(Isolate* isolate,
#endif
// Jump to the first instruction in the code stub.
- __ addq(kScratchRegister, Immediate(Code::kHeaderSize - kHeapObjectTag));
+ __ addp(kScratchRegister, Immediate(Code::kHeaderSize - kHeapObjectTag));
__ jmp(kScratchRegister);
__ bind(&miss);
@@ -119,7 +98,7 @@ void StubCompiler::GenerateDictionaryNegativeLookup(MacroAssembler* masm,
__ IncrementCounter(counters->negative_lookups(), 1);
__ IncrementCounter(counters->negative_lookups_miss(), 1);
- __ movq(scratch0, FieldOperand(receiver, HeapObject::kMapOffset));
+ __ movp(scratch0, FieldOperand(receiver, HeapObject::kMapOffset));
const int kInterceptorOrAccessCheckNeededMask =
(1 << Map::kHasNamedInterceptor) | (1 << Map::kIsAccessCheckNeeded);
@@ -135,7 +114,7 @@ void StubCompiler::GenerateDictionaryNegativeLookup(MacroAssembler* masm,
// Load properties array.
Register properties = scratch0;
- __ movq(properties, FieldOperand(receiver, JSObject::kPropertiesOffset));
+ __ movp(properties, FieldOperand(receiver, JSObject::kPropertiesOffset));
// Check that the properties array is a dictionary.
__ CompareRoot(FieldOperand(properties, HeapObject::kMapOffset),
@@ -193,10 +172,10 @@ void StubCache::GenerateProbe(MacroAssembler* masm,
__ movl(scratch, FieldOperand(name, Name::kHashFieldOffset));
// Use only the low 32 bits of the map pointer.
__ addl(scratch, FieldOperand(receiver, HeapObject::kMapOffset));
- __ xor_(scratch, Immediate(flags));
+ __ xorp(scratch, Immediate(flags));
// We mask out the last two bits because they are not part of the hash and
// they are always 01 for maps. Also in the two 'and' instructions below.
- __ and_(scratch, Immediate((kPrimaryTableSize - 1) << kHeapObjectTagSize));
+ __ andp(scratch, Immediate((kPrimaryTableSize - 1) << kHeapObjectTagSize));
// Probe the primary table.
ProbeTable(isolate, masm, flags, kPrimary, receiver, name, scratch);
@@ -204,11 +183,11 @@ void StubCache::GenerateProbe(MacroAssembler* masm,
// Primary miss: Compute hash for secondary probe.
__ movl(scratch, FieldOperand(name, Name::kHashFieldOffset));
__ addl(scratch, FieldOperand(receiver, HeapObject::kMapOffset));
- __ xor_(scratch, Immediate(flags));
- __ and_(scratch, Immediate((kPrimaryTableSize - 1) << kHeapObjectTagSize));
+ __ xorp(scratch, Immediate(flags));
+ __ andp(scratch, Immediate((kPrimaryTableSize - 1) << kHeapObjectTagSize));
__ subl(scratch, name);
__ addl(scratch, Immediate(flags));
- __ and_(scratch, Immediate((kSecondaryTableSize - 1) << kHeapObjectTagSize));
+ __ andp(scratch, Immediate((kSecondaryTableSize - 1) << kHeapObjectTagSize));
// Probe the secondary table.
ProbeTable(isolate, masm, flags, kSecondary, receiver, name, scratch);
@@ -224,18 +203,18 @@ void StubCompiler::GenerateLoadGlobalFunctionPrototype(MacroAssembler* masm,
int index,
Register prototype) {
// Load the global or builtins object from the current context.
- __ movq(prototype,
+ __ movp(prototype,
Operand(rsi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
// Load the native context from the global or builtins object.
- __ movq(prototype,
+ __ movp(prototype,
FieldOperand(prototype, GlobalObject::kNativeContextOffset));
// Load the function from the native context.
- __ movq(prototype, Operand(prototype, Context::SlotOffset(index)));
+ __ movp(prototype, Operand(prototype, Context::SlotOffset(index)));
// Load the initial map. The global functions all have initial maps.
- __ movq(prototype,
+ __ movp(prototype,
FieldOperand(prototype, JSFunction::kPrototypeOrInitialMapOffset));
// Load the prototype from the initial map.
- __ movq(prototype, FieldOperand(prototype, Map::kPrototypeOffset));
+ __ movp(prototype, FieldOperand(prototype, Map::kPrototypeOffset));
}
@@ -245,18 +224,22 @@ void StubCompiler::GenerateDirectLoadGlobalFunctionPrototype(
Register prototype,
Label* miss) {
Isolate* isolate = masm->isolate();
- // Check we're still in the same context.
- __ Move(prototype, isolate->global_object());
- __ cmpq(Operand(rsi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)),
- prototype);
- __ j(not_equal, miss);
// Get the global function with the given index.
Handle<JSFunction> function(
JSFunction::cast(isolate->native_context()->get(index)));
+
+ // Check we're still in the same context.
+ Register scratch = prototype;
+ const int offset = Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX);
+ __ movp(scratch, Operand(rsi, offset));
+ __ movp(scratch, FieldOperand(scratch, GlobalObject::kNativeContextOffset));
+ __ Cmp(Operand(scratch, Context::SlotOffset(index)), function);
+ __ j(not_equal, miss);
+
// Load its initial map. The global functions all have initial maps.
__ Move(prototype, Handle<Map>(function->initial_map()));
// Load the prototype from the initial map.
- __ movq(prototype, FieldOperand(prototype, Map::kPrototypeOffset));
+ __ movp(prototype, FieldOperand(prototype, Map::kPrototypeOffset));
}
@@ -272,55 +255,7 @@ void StubCompiler::GenerateLoadArrayLength(MacroAssembler* masm,
__ j(not_equal, miss_label);
// Load length directly from the JS array.
- __ movq(rax, FieldOperand(receiver, JSArray::kLengthOffset));
- __ ret(0);
-}
-
-
-// Generate code to check if an object is a string. If the object is
-// a string, the map's instance type is left in the scratch register.
-static void GenerateStringCheck(MacroAssembler* masm,
- Register receiver,
- Register scratch,
- Label* smi,
- Label* non_string_object) {
- // Check that the object isn't a smi.
- __ JumpIfSmi(receiver, smi);
-
- // Check that the object is a string.
- __ movq(scratch, FieldOperand(receiver, HeapObject::kMapOffset));
- __ movzxbq(scratch, FieldOperand(scratch, Map::kInstanceTypeOffset));
- STATIC_ASSERT(kNotStringTag != 0);
- __ testl(scratch, Immediate(kNotStringTag));
- __ j(not_zero, non_string_object);
-}
-
-
-void StubCompiler::GenerateLoadStringLength(MacroAssembler* masm,
- Register receiver,
- Register scratch1,
- Register scratch2,
- Label* miss) {
- Label check_wrapper;
-
- // Check if the object is a string leaving the instance type in the
- // scratch register.
- GenerateStringCheck(masm, receiver, scratch1, miss, &check_wrapper);
-
- // Load length directly from the string.
- __ movq(rax, FieldOperand(receiver, String::kLengthOffset));
- __ ret(0);
-
- // Check if the object is a JSValue wrapper.
- __ bind(&check_wrapper);
- __ cmpl(scratch1, Immediate(JS_VALUE_TYPE));
- __ j(not_equal, miss);
-
- // Check if the wrapped value is a string and load the length
- // directly if it is.
- __ movq(scratch2, FieldOperand(receiver, JSValue::kValueOffset));
- GenerateStringCheck(masm, scratch2, scratch1, miss, miss);
- __ movq(rax, FieldOperand(scratch2, String::kLengthOffset));
+ __ movp(rax, FieldOperand(receiver, JSArray::kLengthOffset));
__ ret(0);
}
@@ -331,7 +266,7 @@ void StubCompiler::GenerateLoadFunctionPrototype(MacroAssembler* masm,
Register scratch,
Label* miss_label) {
__ TryGetFunctionPrototype(receiver, result, miss_label);
- if (!result.is(rax)) __ movq(rax, result);
+ if (!result.is(rax)) __ movp(rax, result);
__ ret(0);
}
@@ -342,15 +277,15 @@ void StubCompiler::GenerateFastPropertyLoad(MacroAssembler* masm,
bool inobject,
int index,
Representation representation) {
- ASSERT(!FLAG_track_double_fields || !representation.IsDouble());
+ ASSERT(!representation.IsDouble());
int offset = index * kPointerSize;
if (!inobject) {
// Calculate the offset into the properties array.
offset = offset + FixedArray::kHeaderSize;
- __ movq(dst, FieldOperand(src, JSObject::kPropertiesOffset));
+ __ movp(dst, FieldOperand(src, JSObject::kPropertiesOffset));
src = dst;
}
- __ movq(dst, FieldOperand(src, offset));
+ __ movp(dst, FieldOperand(src, offset));
}
@@ -364,13 +299,13 @@ static void PushInterceptorArguments(MacroAssembler* masm,
STATIC_ASSERT(StubCache::kInterceptorArgsThisIndex == 2);
STATIC_ASSERT(StubCache::kInterceptorArgsHolderIndex == 3);
STATIC_ASSERT(StubCache::kInterceptorArgsLength == 4);
- __ push(name);
+ __ Push(name);
Handle<InterceptorInfo> interceptor(holder_obj->GetNamedInterceptor());
ASSERT(!masm->isolate()->heap()->InNewSpace(*interceptor));
__ Move(kScratchRegister, interceptor);
- __ push(kScratchRegister);
- __ push(receiver);
- __ push(holder);
+ __ Push(kScratchRegister);
+ __ Push(receiver);
+ __ Push(holder);
}
@@ -388,444 +323,85 @@ static void CompileCallLoadPropertyWithInterceptor(
}
-// Number of pointers to be reserved on stack for fast API call.
-static const int kFastApiCallArguments = FunctionCallbackArguments::kArgsLength;
-
-
-// Reserves space for the extra arguments to API function in the
-// caller's frame.
-//
-// These arguments are set by CheckPrototypes and GenerateFastApiCall.
-static void ReserveSpaceForFastApiCall(MacroAssembler* masm, Register scratch) {
- // ----------- S t a t e -------------
- // -- rsp[0] : return address
- // -- rsp[8] : last argument in the internal frame of the caller
- // -----------------------------------
- __ movq(scratch, StackOperandForReturnAddress(0));
- __ subq(rsp, Immediate(kFastApiCallArguments * kPointerSize));
- __ movq(StackOperandForReturnAddress(0), scratch);
- __ Move(scratch, Smi::FromInt(0));
- StackArgumentsAccessor args(rsp, kFastApiCallArguments,
- ARGUMENTS_DONT_CONTAIN_RECEIVER);
- for (int i = 0; i < kFastApiCallArguments; i++) {
- __ movq(args.GetArgumentOperand(i), scratch);
- }
-}
-
-
-// Undoes the effects of ReserveSpaceForFastApiCall.
-static void FreeSpaceForFastApiCall(MacroAssembler* masm, Register scratch) {
- // ----------- S t a t e -------------
- // -- rsp[0] : return address.
- // -- rsp[8] : last fast api call extra argument.
- // -- ...
- // -- rsp[kFastApiCallArguments * 8] : first fast api call extra
- // argument.
- // -- rsp[kFastApiCallArguments * 8 + 8] : last argument in the internal
- // frame.
- // -----------------------------------
- __ movq(scratch, StackOperandForReturnAddress(0));
- __ movq(StackOperandForReturnAddress(kFastApiCallArguments * kPointerSize),
- scratch);
- __ addq(rsp, Immediate(kPointerSize * kFastApiCallArguments));
-}
-
-
-static void GenerateFastApiCallBody(MacroAssembler* masm,
- const CallOptimization& optimization,
- int argc,
- bool restore_context);
-
-
-// Generates call to API function.
-static void GenerateFastApiCall(MacroAssembler* masm,
- const CallOptimization& optimization,
- int argc) {
- typedef FunctionCallbackArguments FCA;
- StackArgumentsAccessor args(rsp, argc + kFastApiCallArguments);
-
- // Save calling context.
- int offset = argc + kFastApiCallArguments;
- __ movq(args.GetArgumentOperand(offset - FCA::kContextSaveIndex), rsi);
-
- // Get the function and setup the context.
- Handle<JSFunction> function = optimization.constant_function();
- __ Move(rdi, function);
- __ movq(rsi, FieldOperand(rdi, JSFunction::kContextOffset));
- // Construct the FunctionCallbackInfo on the stack.
- __ movq(args.GetArgumentOperand(offset - FCA::kCalleeIndex), rdi);
- Handle<CallHandlerInfo> api_call_info = optimization.api_call_info();
- Handle<Object> call_data(api_call_info->data(), masm->isolate());
- if (masm->isolate()->heap()->InNewSpace(*call_data)) {
- __ Move(rcx, api_call_info);
- __ movq(rbx, FieldOperand(rcx, CallHandlerInfo::kDataOffset));
- __ movq(args.GetArgumentOperand(offset - FCA::kDataIndex), rbx);
- } else {
- __ Move(args.GetArgumentOperand(offset - FCA::kDataIndex), call_data);
- }
- __ Move(kScratchRegister,
- ExternalReference::isolate_address(masm->isolate()));
- __ movq(args.GetArgumentOperand(offset - FCA::kIsolateIndex),
- kScratchRegister);
- __ LoadRoot(kScratchRegister, Heap::kUndefinedValueRootIndex);
- __ movq(args.GetArgumentOperand(offset - FCA::kReturnValueDefaultValueIndex),
- kScratchRegister);
- __ movq(args.GetArgumentOperand(offset - FCA::kReturnValueOffset),
- kScratchRegister);
-
- // Prepare arguments.
- STATIC_ASSERT(kFastApiCallArguments == 7);
- __ lea(rax, Operand(rsp, 1 * kPointerSize));
-
- GenerateFastApiCallBody(masm, optimization, argc, false);
-}
-
-
// Generate call to api function.
-// This function uses push() to generate smaller, faster code than
-// the version above. It is an optimization that should will be removed
-// when api call ICs are generated in hydrogen.
-static void GenerateFastApiCall(MacroAssembler* masm,
- const CallOptimization& optimization,
- Register receiver,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- int argc,
- Register* values) {
+void StubCompiler::GenerateFastApiCall(MacroAssembler* masm,
+ const CallOptimization& optimization,
+ Handle<Map> receiver_map,
+ Register receiver,
+ Register scratch_in,
+ bool is_store,
+ int argc,
+ Register* values) {
ASSERT(optimization.is_simple_api_call());
- // Copy return value.
- __ pop(scratch1);
-
+ __ PopReturnAddressTo(scratch_in);
// receiver
- __ push(receiver);
-
+ __ Push(receiver);
// Write the arguments to stack frame.
for (int i = 0; i < argc; i++) {
Register arg = values[argc-1-i];
ASSERT(!receiver.is(arg));
- ASSERT(!scratch1.is(arg));
- ASSERT(!scratch2.is(arg));
- ASSERT(!scratch3.is(arg));
- __ push(arg);
+ ASSERT(!scratch_in.is(arg));
+ __ Push(arg);
+ }
+ __ PushReturnAddressFrom(scratch_in);
+ // Stack now matches JSFunction abi.
+
+ // Abi for CallApiFunctionStub.
+ Register callee = rax;
+ Register call_data = rbx;
+ Register holder = rcx;
+ Register api_function_address = rdx;
+ Register scratch = rdi; // scratch_in is no longer valid.
+
+ // Put holder in place.
+ CallOptimization::HolderLookup holder_lookup;
+ Handle<JSObject> api_holder = optimization.LookupHolderOfExpectedType(
+ receiver_map,
+ &holder_lookup);
+ switch (holder_lookup) {
+ case CallOptimization::kHolderIsReceiver:
+ __ Move(holder, receiver);
+ break;
+ case CallOptimization::kHolderFound:
+ __ Move(holder, api_holder);
+ break;
+ case CallOptimization::kHolderNotFound:
+ UNREACHABLE();
+ break;
}
-
- typedef FunctionCallbackArguments FCA;
-
- STATIC_ASSERT(FCA::kHolderIndex == 0);
- STATIC_ASSERT(FCA::kIsolateIndex == 1);
- STATIC_ASSERT(FCA::kReturnValueDefaultValueIndex == 2);
- STATIC_ASSERT(FCA::kReturnValueOffset == 3);
- STATIC_ASSERT(FCA::kDataIndex == 4);
- STATIC_ASSERT(FCA::kCalleeIndex == 5);
- STATIC_ASSERT(FCA::kContextSaveIndex == 6);
- STATIC_ASSERT(FCA::kArgsLength == 7);
-
- // context save
- __ push(rsi);
-
- // Get the function and setup the context.
- Handle<JSFunction> function = optimization.constant_function();
- __ Move(scratch2, function);
- __ push(scratch2);
Isolate* isolate = masm->isolate();
+ Handle<JSFunction> function = optimization.constant_function();
Handle<CallHandlerInfo> api_call_info = optimization.api_call_info();
- Handle<Object> call_data(api_call_info->data(), isolate);
- // Push data from ExecutableAccessorInfo.
+ Handle<Object> call_data_obj(api_call_info->data(), isolate);
+
+ // Put callee in place.
+ __ Move(callee, function);
+
bool call_data_undefined = false;
- if (isolate->heap()->InNewSpace(*call_data)) {
- __ Move(scratch2, api_call_info);
- __ movq(scratch3, FieldOperand(scratch2, CallHandlerInfo::kDataOffset));
- } else if (call_data->IsUndefined()) {
+ // Put call_data in place.
+ if (isolate->heap()->InNewSpace(*call_data_obj)) {
+ __ Move(scratch, api_call_info);
+ __ movp(call_data, FieldOperand(scratch, CallHandlerInfo::kDataOffset));
+ } else if (call_data_obj->IsUndefined()) {
call_data_undefined = true;
- __ LoadRoot(scratch3, Heap::kUndefinedValueRootIndex);
+ __ LoadRoot(call_data, Heap::kUndefinedValueRootIndex);
} else {
- __ Move(scratch3, call_data);
- }
- // call data
- __ push(scratch3);
- if (!call_data_undefined) {
- __ LoadRoot(scratch3, Heap::kUndefinedValueRootIndex);
+ __ Move(call_data, call_data_obj);
}
- // return value
- __ push(scratch3);
- // return value default
- __ push(scratch3);
- // isolate
- __ Move(scratch3,
- ExternalReference::isolate_address(masm->isolate()));
- __ push(scratch3);
- // holder
- __ push(receiver);
-
- ASSERT(!scratch1.is(rax));
- // store receiver address for GenerateFastApiCallBody
- __ movq(rax, rsp);
-
- // return address
- __ push(scratch1);
-
- GenerateFastApiCallBody(masm, optimization, argc, true);
-}
-
-static void GenerateFastApiCallBody(MacroAssembler* masm,
- const CallOptimization& optimization,
- int argc,
- bool restore_context) {
- // ----------- S t a t e -------------
- // -- rsp[0] : return address
- // -- rsp[8] - rsp[56] : FunctionCallbackInfo, incl.
- // : object passing the type check
- // (set by CheckPrototypes)
- // -- rsp[64] : last argument
- // -- ...
- // -- rsp[(argc + 7) * 8] : first argument
- // -- rsp[(argc + 8) * 8] : receiver
- //
- // rax : receiver address
- // -----------------------------------
- typedef FunctionCallbackArguments FCA;
-
- Handle<CallHandlerInfo> api_call_info = optimization.api_call_info();
- // Function address is a foreign pointer outside V8's heap.
+ // Put api_function_address in place.
Address function_address = v8::ToCData<Address>(api_call_info->callback());
+ __ Move(
+ api_function_address, function_address, RelocInfo::EXTERNAL_REFERENCE);
- // Allocate the v8::Arguments structure in the arguments' space since
- // it's not controlled by GC.
- const int kApiStackSpace = 4;
-
- __ PrepareCallApiFunction(kApiStackSpace);
-
- __ movq(StackSpaceOperand(0), rax); // FunctionCallbackInfo::implicit_args_.
- __ addq(rax, Immediate((argc + kFastApiCallArguments - 1) * kPointerSize));
- __ movq(StackSpaceOperand(1), rax); // FunctionCallbackInfo::values_.
- __ Set(StackSpaceOperand(2), argc); // FunctionCallbackInfo::length_.
- // FunctionCallbackInfo::is_construct_call_.
- __ Set(StackSpaceOperand(3), 0);
-
-#if defined(__MINGW64__) || defined(_WIN64)
- Register arguments_arg = rcx;
- Register callback_arg = rdx;
-#else
- Register arguments_arg = rdi;
- Register callback_arg = rsi;
-#endif
-
- // v8::InvocationCallback's argument.
- __ lea(arguments_arg, StackSpaceOperand(0));
-
- Address thunk_address = FUNCTION_ADDR(&InvokeFunctionCallback);
-
- StackArgumentsAccessor args_from_rbp(rbp, kFastApiCallArguments,
- ARGUMENTS_DONT_CONTAIN_RECEIVER);
- Operand context_restore_operand = args_from_rbp.GetArgumentOperand(
- kFastApiCallArguments - 1 - FCA::kContextSaveIndex);
- Operand return_value_operand = args_from_rbp.GetArgumentOperand(
- kFastApiCallArguments - 1 - FCA::kReturnValueOffset);
- __ CallApiFunctionAndReturn(
- function_address,
- thunk_address,
- callback_arg,
- argc + kFastApiCallArguments + 1,
- return_value_operand,
- restore_context ? &context_restore_operand : NULL);
+ // Jump to stub.
+ CallApiFunctionStub stub(isolate, is_store, call_data_undefined, argc);
+ __ TailCallStub(&stub);
}
-class CallInterceptorCompiler BASE_EMBEDDED {
- public:
- CallInterceptorCompiler(CallStubCompiler* stub_compiler,
- const ParameterCount& arguments,
- Register name,
- ExtraICState extra_ic_state)
- : stub_compiler_(stub_compiler),
- arguments_(arguments),
- name_(name),
- extra_ic_state_(extra_ic_state) {}
-
- void Compile(MacroAssembler* masm,
- Handle<JSObject> object,
- Handle<JSObject> holder,
- Handle<Name> name,
- LookupResult* lookup,
- Register receiver,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Label* miss) {
- ASSERT(holder->HasNamedInterceptor());
- ASSERT(!holder->GetNamedInterceptor()->getter()->IsUndefined());
-
- // Check that the receiver isn't a smi.
- __ JumpIfSmi(receiver, miss);
-
- CallOptimization optimization(lookup);
- if (optimization.is_constant_call()) {
- CompileCacheable(masm, object, receiver, scratch1, scratch2, scratch3,
- holder, lookup, name, optimization, miss);
- } else {
- CompileRegular(masm, object, receiver, scratch1, scratch2, scratch3,
- name, holder, miss);
- }
- }
-
- private:
- void CompileCacheable(MacroAssembler* masm,
- Handle<JSObject> object,
- Register receiver,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Handle<JSObject> interceptor_holder,
- LookupResult* lookup,
- Handle<Name> name,
- const CallOptimization& optimization,
- Label* miss_label) {
- ASSERT(optimization.is_constant_call());
- ASSERT(!lookup->holder()->IsGlobalObject());
-
- int depth1 = kInvalidProtoDepth;
- int depth2 = kInvalidProtoDepth;
- bool can_do_fast_api_call = false;
- if (optimization.is_simple_api_call() &&
- !lookup->holder()->IsGlobalObject()) {
- depth1 = optimization.GetPrototypeDepthOfExpectedType(
- object, interceptor_holder);
- if (depth1 == kInvalidProtoDepth) {
- depth2 = optimization.GetPrototypeDepthOfExpectedType(
- interceptor_holder, Handle<JSObject>(lookup->holder()));
- }
- can_do_fast_api_call =
- depth1 != kInvalidProtoDepth || depth2 != kInvalidProtoDepth;
- }
-
- Counters* counters = masm->isolate()->counters();
- __ IncrementCounter(counters->call_const_interceptor(), 1);
-
- if (can_do_fast_api_call) {
- __ IncrementCounter(counters->call_const_interceptor_fast_api(), 1);
- ReserveSpaceForFastApiCall(masm, scratch1);
- }
-
- // Check that the maps from receiver to interceptor's holder
- // haven't changed and thus we can invoke interceptor.
- Label miss_cleanup;
- Label* miss = can_do_fast_api_call ? &miss_cleanup : miss_label;
- Register holder =
- stub_compiler_->CheckPrototypes(
- IC::CurrentTypeOf(object, masm->isolate()), receiver,
- interceptor_holder, scratch1, scratch2, scratch3,
- name, depth1, miss);
-
- // Invoke an interceptor and if it provides a value,
- // branch to |regular_invoke|.
- Label regular_invoke;
- LoadWithInterceptor(masm, receiver, holder, interceptor_holder,
- &regular_invoke);
-
- // Interceptor returned nothing for this property. Try to use cached
- // constant function.
-
- // Check that the maps from interceptor's holder to constant function's
- // holder haven't changed and thus we can use cached constant function.
- if (*interceptor_holder != lookup->holder()) {
- stub_compiler_->CheckPrototypes(
- IC::CurrentTypeOf(interceptor_holder, masm->isolate()), holder,
- handle(lookup->holder()), scratch1, scratch2, scratch3,
- name, depth2, miss);
- } else {
- // CheckPrototypes has a side effect of fetching a 'holder'
- // for API (object which is instanceof for the signature). It's
- // safe to omit it here, as if present, it should be fetched
- // by the previous CheckPrototypes.
- ASSERT(depth2 == kInvalidProtoDepth);
- }
-
- // Invoke function.
- if (can_do_fast_api_call) {
- GenerateFastApiCall(masm, optimization, arguments_.immediate());
- } else {
- Handle<JSFunction> fun = optimization.constant_function();
- stub_compiler_->GenerateJumpFunction(object, fun);
- }
-
- // Deferred code for fast API call case---clean preallocated space.
- if (can_do_fast_api_call) {
- __ bind(&miss_cleanup);
- FreeSpaceForFastApiCall(masm, scratch1);
- __ jmp(miss_label);
- }
-
- // Invoke a regular function.
- __ bind(&regular_invoke);
- if (can_do_fast_api_call) {
- FreeSpaceForFastApiCall(masm, scratch1);
- }
- }
-
- void CompileRegular(MacroAssembler* masm,
- Handle<JSObject> object,
- Register receiver,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Handle<Name> name,
- Handle<JSObject> interceptor_holder,
- Label* miss_label) {
- Register holder =
- stub_compiler_->CheckPrototypes(
- IC::CurrentTypeOf(object, masm->isolate()), receiver,
- interceptor_holder, scratch1, scratch2, scratch3, name, miss_label);
-
- FrameScope scope(masm, StackFrame::INTERNAL);
- // Save the name_ register across the call.
- __ push(name_);
-
- CompileCallLoadPropertyWithInterceptor(
- masm, receiver, holder, name_, interceptor_holder,
- IC::kLoadPropertyWithInterceptorForCall);
-
- // Restore the name_ register.
- __ pop(name_);
-
- // Leave the internal frame.
- }
-
- void LoadWithInterceptor(MacroAssembler* masm,
- Register receiver,
- Register holder,
- Handle<JSObject> holder_obj,
- Label* interceptor_succeeded) {
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ push(receiver);
- __ push(holder);
- __ push(name_);
-
- CompileCallLoadPropertyWithInterceptor(
- masm, receiver, holder, name_, holder_obj,
- IC::kLoadPropertyWithInterceptorOnly);
-
- __ pop(name_);
- __ pop(holder);
- __ pop(receiver);
- // Leave the internal frame.
- }
-
- __ CompareRoot(rax, Heap::kNoInterceptorResultSentinelRootIndex);
- __ j(not_equal, interceptor_succeeded);
- }
-
- CallStubCompiler* stub_compiler_;
- const ParameterCount& arguments_;
- Register name_;
- ExtraICState extra_ic_state_;
-};
-
-
void StoreStubCompiler::GenerateRestoreName(MacroAssembler* masm,
Label* label,
Handle<Name> name) {
@@ -892,11 +468,26 @@ void StoreStubCompiler::GenerateStoreTransition(MacroAssembler* masm,
Handle<Object> constant(descriptors->GetValue(descriptor), masm->isolate());
__ Cmp(value_reg, constant);
__ j(not_equal, miss_label);
- } else if (FLAG_track_fields && representation.IsSmi()) {
+ } else if (representation.IsSmi()) {
__ JumpIfNotSmi(value_reg, miss_label);
- } else if (FLAG_track_heap_object_fields && representation.IsHeapObject()) {
+ } else if (representation.IsHeapObject()) {
__ JumpIfSmi(value_reg, miss_label);
- } else if (FLAG_track_double_fields && representation.IsDouble()) {
+ HeapType* field_type = descriptors->GetFieldType(descriptor);
+ HeapType::Iterator<Map> it = field_type->Classes();
+ if (!it.Done()) {
+ Label do_store;
+ while (true) {
+ __ CompareMap(value_reg, it.Current());
+ it.Advance();
+ if (it.Done()) {
+ __ j(not_equal, miss_label);
+ break;
+ }
+ __ j(equal, &do_store, Label::kNear);
+ }
+ __ bind(&do_store);
+ }
+ } else if (representation.IsDouble()) {
Label do_store, heap_number;
__ AllocateHeapNumber(storage_reg, scratch1, slow);
@@ -924,9 +515,9 @@ void StoreStubCompiler::GenerateStoreTransition(MacroAssembler* masm,
// The properties must be extended before we can store the value.
// We jump to a runtime call that extends the properties array.
__ PopReturnAddressTo(scratch1);
- __ push(receiver_reg);
+ __ Push(receiver_reg);
__ Push(transition);
- __ push(value_reg);
+ __ Push(value_reg);
__ PushReturnAddressFrom(scratch1);
__ TailCallExternalReference(
ExternalReference(IC_Utility(IC::kSharedStoreIC_ExtendStorage),
@@ -938,7 +529,7 @@ void StoreStubCompiler::GenerateStoreTransition(MacroAssembler* masm,
// Update the map of the object.
__ Move(scratch1, transition);
- __ movq(FieldOperand(receiver_reg, HeapObject::kMapOffset), scratch1);
+ __ movp(FieldOperand(receiver_reg, HeapObject::kMapOffset), scratch1);
// Update the write barrier for the map field.
__ RecordWriteField(receiver_reg,
@@ -969,16 +560,16 @@ void StoreStubCompiler::GenerateStoreTransition(MacroAssembler* masm,
if (index < 0) {
// Set the property straight into the object.
int offset = object->map()->instance_size() + (index * kPointerSize);
- if (FLAG_track_double_fields && representation.IsDouble()) {
- __ movq(FieldOperand(receiver_reg, offset), storage_reg);
+ if (representation.IsDouble()) {
+ __ movp(FieldOperand(receiver_reg, offset), storage_reg);
} else {
- __ movq(FieldOperand(receiver_reg, offset), value_reg);
+ __ movp(FieldOperand(receiver_reg, offset), value_reg);
}
- if (!FLAG_track_fields || !representation.IsSmi()) {
+ if (!representation.IsSmi()) {
// Update the write barrier for the array address.
- if (!FLAG_track_double_fields || !representation.IsDouble()) {
- __ movq(storage_reg, value_reg);
+ if (!representation.IsDouble()) {
+ __ movp(storage_reg, value_reg);
}
__ RecordWriteField(
receiver_reg, offset, storage_reg, scratch1, kDontSaveFPRegs,
@@ -988,17 +579,17 @@ void StoreStubCompiler::GenerateStoreTransition(MacroAssembler* masm,
// Write to the properties array.
int offset = index * kPointerSize + FixedArray::kHeaderSize;
// Get the properties array (optimistically).
- __ movq(scratch1, FieldOperand(receiver_reg, JSObject::kPropertiesOffset));
- if (FLAG_track_double_fields && representation.IsDouble()) {
- __ movq(FieldOperand(scratch1, offset), storage_reg);
+ __ movp(scratch1, FieldOperand(receiver_reg, JSObject::kPropertiesOffset));
+ if (representation.IsDouble()) {
+ __ movp(FieldOperand(scratch1, offset), storage_reg);
} else {
- __ movq(FieldOperand(scratch1, offset), value_reg);
+ __ movp(FieldOperand(scratch1, offset), value_reg);
}
- if (!FLAG_track_fields || !representation.IsSmi()) {
+ if (!representation.IsSmi()) {
// Update the write barrier for the array address.
- if (!FLAG_track_double_fields || !representation.IsDouble()) {
- __ movq(storage_reg, value_reg);
+ if (!representation.IsDouble()) {
+ __ movp(storage_reg, value_reg);
}
__ RecordWriteField(
scratch1, offset, storage_reg, receiver_reg, kDontSaveFPRegs,
@@ -1027,29 +618,37 @@ void StoreStubCompiler::GenerateStoreField(MacroAssembler* masm,
// checks.
ASSERT(object->IsJSGlobalProxy() || !object->IsAccessCheckNeeded());
- int index = lookup->GetFieldIndex().field_index();
-
- // Adjust for the number of properties stored in the object. Even in the
- // face of a transition we can use the old map here because the size of the
- // object and the number of in-object properties is not going to change.
- index -= object->map()->inobject_properties();
+ FieldIndex index = lookup->GetFieldIndex();
Representation representation = lookup->representation();
ASSERT(!representation.IsNone());
- if (FLAG_track_fields && representation.IsSmi()) {
+ if (representation.IsSmi()) {
__ JumpIfNotSmi(value_reg, miss_label);
- } else if (FLAG_track_heap_object_fields && representation.IsHeapObject()) {
+ } else if (representation.IsHeapObject()) {
__ JumpIfSmi(value_reg, miss_label);
- } else if (FLAG_track_double_fields && representation.IsDouble()) {
+ HeapType* field_type = lookup->GetFieldType();
+ HeapType::Iterator<Map> it = field_type->Classes();
+ if (!it.Done()) {
+ Label do_store;
+ while (true) {
+ __ CompareMap(value_reg, it.Current());
+ it.Advance();
+ if (it.Done()) {
+ __ j(not_equal, miss_label);
+ break;
+ }
+ __ j(equal, &do_store, Label::kNear);
+ }
+ __ bind(&do_store);
+ }
+ } else if (representation.IsDouble()) {
// Load the double storage.
- if (index < 0) {
- int offset = object->map()->instance_size() + (index * kPointerSize);
- __ movq(scratch1, FieldOperand(receiver_reg, offset));
+ if (index.is_inobject()) {
+ __ movp(scratch1, FieldOperand(receiver_reg, index.offset()));
} else {
- __ movq(scratch1,
+ __ movp(scratch1,
FieldOperand(receiver_reg, JSObject::kPropertiesOffset));
- int offset = index * kPointerSize + FixedArray::kHeaderSize;
- __ movq(scratch1, FieldOperand(scratch1, offset));
+ __ movp(scratch1, FieldOperand(scratch1, index.offset()));
}
// Store the value into the storage.
@@ -1074,32 +673,30 @@ void StoreStubCompiler::GenerateStoreField(MacroAssembler* masm,
// TODO(verwaest): Share this code as a code stub.
SmiCheck smi_check = representation.IsTagged()
? INLINE_SMI_CHECK : OMIT_SMI_CHECK;
- if (index < 0) {
+ if (index.is_inobject()) {
// Set the property straight into the object.
- int offset = object->map()->instance_size() + (index * kPointerSize);
- __ movq(FieldOperand(receiver_reg, offset), value_reg);
+ __ movp(FieldOperand(receiver_reg, index.offset()), value_reg);
- if (!FLAG_track_fields || !representation.IsSmi()) {
+ if (!representation.IsSmi()) {
// Update the write barrier for the array address.
// Pass the value being stored in the now unused name_reg.
- __ movq(name_reg, value_reg);
+ __ movp(name_reg, value_reg);
__ RecordWriteField(
- receiver_reg, offset, name_reg, scratch1, kDontSaveFPRegs,
+ receiver_reg, index.offset(), name_reg, scratch1, kDontSaveFPRegs,
EMIT_REMEMBERED_SET, smi_check);
}
} else {
// Write to the properties array.
- int offset = index * kPointerSize + FixedArray::kHeaderSize;
// Get the properties array (optimistically).
- __ movq(scratch1, FieldOperand(receiver_reg, JSObject::kPropertiesOffset));
- __ movq(FieldOperand(scratch1, offset), value_reg);
+ __ movp(scratch1, FieldOperand(receiver_reg, JSObject::kPropertiesOffset));
+ __ movp(FieldOperand(scratch1, index.offset()), value_reg);
- if (!FLAG_track_fields || !representation.IsSmi()) {
+ if (!representation.IsSmi()) {
// Update the write barrier for the array address.
// Pass the value being stored in the now unused name_reg.
- __ movq(name_reg, value_reg);
+ __ movp(name_reg, value_reg);
__ RecordWriteField(
- scratch1, offset, name_reg, receiver_reg, kDontSaveFPRegs,
+ scratch1, index.offset(), name_reg, receiver_reg, kDontSaveFPRegs,
EMIT_REMEMBERED_SET, smi_check);
}
}
@@ -1119,20 +716,16 @@ void StubCompiler::GenerateTailCall(MacroAssembler* masm, Handle<Code> code) {
#define __ ACCESS_MASM((masm()))
-Register StubCompiler::CheckPrototypes(Handle<Type> type,
+Register StubCompiler::CheckPrototypes(Handle<HeapType> type,
Register object_reg,
Handle<JSObject> holder,
Register holder_reg,
Register scratch1,
Register scratch2,
Handle<Name> name,
- int save_at_depth,
Label* miss,
PrototypeCheckType check) {
Handle<Map> receiver_map(IC::TypeToMap(*type, isolate()));
- // Make sure that the type feedback oracle harvests the receiver map.
- // TODO(svenpanne) Remove this hack when all ICs are reworked.
- __ Move(scratch1, receiver_map);
// Make sure there's no overlap between holder and object registers.
ASSERT(!scratch1.is(object_reg) && !scratch1.is(holder_reg));
@@ -1145,17 +738,10 @@ Register StubCompiler::CheckPrototypes(Handle<Type> type,
Register reg = object_reg;
int depth = 0;
- StackArgumentsAccessor args(rsp, kFastApiCallArguments,
- ARGUMENTS_DONT_CONTAIN_RECEIVER);
- const int kHolderIndex = kFastApiCallArguments - 1 -
- FunctionCallbackArguments::kHolderIndex;
-
- if (save_at_depth == depth) {
- __ movq(args.GetArgumentOperand(kHolderIndex), object_reg);
- }
-
Handle<JSObject> current = Handle<JSObject>::null();
- if (type->IsConstant()) current = Handle<JSObject>::cast(type->AsConstant());
+ if (type->IsConstant()) {
+ current = Handle<JSObject>::cast(type->AsConstant()->Value());
+ }
Handle<JSObject> prototype = Handle<JSObject>::null();
Handle<Map> current_map = receiver_map;
Handle<Map> holder_map(holder->map());
@@ -1178,20 +764,20 @@ Register StubCompiler::CheckPrototypes(Handle<Type> type,
name = factory()->InternalizeString(Handle<String>::cast(name));
}
ASSERT(current.is_null() ||
- current->property_dictionary()->FindEntry(*name) ==
+ current->property_dictionary()->FindEntry(name) ==
NameDictionary::kNotFound);
GenerateDictionaryNegativeLookup(masm(), miss, reg, name,
scratch1, scratch2);
- __ movq(scratch1, FieldOperand(reg, HeapObject::kMapOffset));
+ __ movp(scratch1, FieldOperand(reg, HeapObject::kMapOffset));
reg = holder_reg; // From now on the object will be in holder_reg.
- __ movq(reg, FieldOperand(scratch1, Map::kPrototypeOffset));
+ __ movp(reg, FieldOperand(scratch1, Map::kPrototypeOffset));
} else {
bool in_new_space = heap()->InNewSpace(*prototype);
if (in_new_space) {
// Save the map in scratch1 for later.
- __ movq(scratch1, FieldOperand(reg, HeapObject::kMapOffset));
+ __ movp(scratch1, FieldOperand(reg, HeapObject::kMapOffset));
}
if (depth != 1 || check == CHECK_ALL_MAPS) {
__ CheckMap(reg, current_map, miss, DONT_DO_SMI_CHECK);
@@ -1212,17 +798,13 @@ Register StubCompiler::CheckPrototypes(Handle<Type> type,
if (in_new_space) {
// The prototype is in new space; we cannot store a reference to it
// in the code. Load it from the map.
- __ movq(reg, FieldOperand(scratch1, Map::kPrototypeOffset));
+ __ movp(reg, FieldOperand(scratch1, Map::kPrototypeOffset));
} else {
// The prototype is in old space; load it directly.
__ Move(reg, prototype);
}
}
- if (save_at_depth == depth) {
- __ movq(args.GetArgumentOperand(kHolderIndex), reg);
- }
-
// Go to the next object in the prototype chain.
current = prototype;
current_map = handle(current->map());
@@ -1271,7 +853,7 @@ void StoreStubCompiler::HandlerFrontendFooter(Handle<Name> name, Label* miss) {
Register LoadStubCompiler::CallbackHandlerFrontend(
- Handle<Type> type,
+ Handle<HeapType> type,
Register object_reg,
Handle<JSObject> holder,
Handle<Name> name,
@@ -1287,7 +869,7 @@ Register LoadStubCompiler::CallbackHandlerFrontend(
// Load the properties dictionary.
Register dictionary = scratch4();
- __ movq(dictionary, FieldOperand(reg, JSObject::kPropertiesOffset));
+ __ movp(dictionary, FieldOperand(reg, JSObject::kPropertiesOffset));
// Probe the dictionary.
Label probe_done;
@@ -1307,11 +889,11 @@ Register LoadStubCompiler::CallbackHandlerFrontend(
NameDictionary::kHeaderSize +
NameDictionary::kElementsStartIndex * kPointerSize;
const int kValueOffset = kElementsStartOffset + kPointerSize;
- __ movq(scratch2(),
+ __ movp(scratch2(),
Operand(dictionary, index, times_pointer_size,
kValueOffset - kHeapObjectTag));
- __ movq(scratch3(), callback, RelocInfo::EMBEDDED_OBJECT);
- __ cmpq(scratch2(), scratch3());
+ __ Move(scratch3(), callback, RelocInfo::EMBEDDED_OBJECT);
+ __ cmpp(scratch2(), scratch3());
__ j(not_equal, &miss);
}
@@ -1322,32 +904,20 @@ Register LoadStubCompiler::CallbackHandlerFrontend(
void LoadStubCompiler::GenerateLoadField(Register reg,
Handle<JSObject> holder,
- PropertyIndex field,
+ FieldIndex field,
Representation representation) {
- if (!reg.is(receiver())) __ movq(receiver(), reg);
+ if (!reg.is(receiver())) __ movp(receiver(), reg);
if (kind() == Code::LOAD_IC) {
- LoadFieldStub stub(field.is_inobject(holder),
- field.translate(holder),
- representation);
- GenerateTailCall(masm(), stub.GetCode(isolate()));
+ LoadFieldStub stub(isolate(), field);
+ GenerateTailCall(masm(), stub.GetCode());
} else {
- KeyedLoadFieldStub stub(field.is_inobject(holder),
- field.translate(holder),
- representation);
- GenerateTailCall(masm(), stub.GetCode(isolate()));
+ KeyedLoadFieldStub stub(isolate(), field);
+ GenerateTailCall(masm(), stub.GetCode());
}
}
void LoadStubCompiler::GenerateLoadCallback(
- const CallOptimization& call_optimization) {
- GenerateFastApiCall(
- masm(), call_optimization, receiver(),
- scratch1(), scratch2(), name(), 0, NULL);
-}
-
-
-void LoadStubCompiler::GenerateLoadCallback(
Register reg,
Handle<ExecutableAccessorInfo> callback) {
// Insert additional parameters into the stack frame above return address.
@@ -1361,70 +931,34 @@ void LoadStubCompiler::GenerateLoadCallback(
STATIC_ASSERT(PropertyCallbackArguments::kDataIndex == 4);
STATIC_ASSERT(PropertyCallbackArguments::kThisIndex == 5);
STATIC_ASSERT(PropertyCallbackArguments::kArgsLength == 6);
- __ push(receiver()); // receiver
+ __ Push(receiver()); // receiver
if (heap()->InNewSpace(callback->data())) {
ASSERT(!scratch2().is(reg));
__ Move(scratch2(), callback);
- __ push(FieldOperand(scratch2(),
+ __ Push(FieldOperand(scratch2(),
ExecutableAccessorInfo::kDataOffset)); // data
} else {
__ Push(Handle<Object>(callback->data(), isolate()));
}
ASSERT(!kScratchRegister.is(reg));
__ LoadRoot(kScratchRegister, Heap::kUndefinedValueRootIndex);
- __ push(kScratchRegister); // return value
- __ push(kScratchRegister); // return value default
+ __ Push(kScratchRegister); // return value
+ __ Push(kScratchRegister); // return value default
__ PushAddress(ExternalReference::isolate_address(isolate()));
- __ push(reg); // holder
- __ push(name()); // name
+ __ Push(reg); // holder
+ __ Push(name()); // name
// Save a pointer to where we pushed the arguments pointer. This will be
// passed as the const PropertyAccessorInfo& to the C++ callback.
- Address getter_address = v8::ToCData<Address>(callback->getter());
-
-#if defined(__MINGW64__) || defined(_WIN64)
- Register getter_arg = r8;
- Register accessor_info_arg = rdx;
- Register name_arg = rcx;
-#else
- Register getter_arg = rdx;
- Register accessor_info_arg = rsi;
- Register name_arg = rdi;
-#endif
-
- ASSERT(!name_arg.is(scratch4()));
- __ movq(name_arg, rsp);
__ PushReturnAddressFrom(scratch4());
- // v8::Arguments::values_ and handler for name.
- const int kStackSpace = PropertyCallbackArguments::kArgsLength + 1;
-
- // Allocate v8::AccessorInfo in non-GCed stack space.
- const int kArgStackSpace = 1;
-
- __ PrepareCallApiFunction(kArgStackSpace);
- __ lea(rax, Operand(name_arg, 1 * kPointerSize));
-
- // v8::PropertyAccessorInfo::args_.
- __ movq(StackSpaceOperand(0), rax);
-
- // The context register (rsi) has been saved in PrepareCallApiFunction and
- // could be used to pass arguments.
- __ lea(accessor_info_arg, StackSpaceOperand(0));
-
- Address thunk_address = FUNCTION_ADDR(&InvokeAccessorGetterCallback);
+ // Abi for CallApiGetter
+ Register api_function_address = r8;
+ Address getter_address = v8::ToCData<Address>(callback->getter());
+ __ Move(api_function_address, getter_address, RelocInfo::EXTERNAL_REFERENCE);
- // The name handler is counted as an argument.
- StackArgumentsAccessor args(rbp, PropertyCallbackArguments::kArgsLength);
- Operand return_value_operand = args.GetArgumentOperand(
- PropertyCallbackArguments::kArgsLength - 1 -
- PropertyCallbackArguments::kReturnValueOffset);
- __ CallApiFunctionAndReturn(getter_address,
- thunk_address,
- getter_arg,
- kStackSpace,
- return_value_operand,
- NULL);
+ CallApiGetterStub stub(isolate());
+ __ TailCallStub(&stub);
}
@@ -1480,10 +1014,10 @@ void LoadStubCompiler::GenerateLoadInterceptor(
FrameScope frame_scope(masm(), StackFrame::INTERNAL);
if (must_preserve_receiver_reg) {
- __ push(receiver());
+ __ Push(receiver());
}
- __ push(holder_reg);
- __ push(this->name());
+ __ Push(holder_reg);
+ __ Push(this->name());
// Invoke an interceptor. Note: map checks from receiver to
// interceptor's holder has been compiled before (see a caller
@@ -1501,10 +1035,10 @@ void LoadStubCompiler::GenerateLoadInterceptor(
__ ret(0);
__ bind(&interceptor_failed);
- __ pop(this->name());
- __ pop(holder_reg);
+ __ Pop(this->name());
+ __ Pop(holder_reg);
if (must_preserve_receiver_reg) {
- __ pop(receiver());
+ __ Pop(receiver());
}
// Leave the internal frame.
@@ -1520,1067 +1054,32 @@ void LoadStubCompiler::GenerateLoadInterceptor(
__ PushReturnAddressFrom(scratch2());
ExternalReference ref = ExternalReference(
- IC_Utility(IC::kLoadPropertyWithInterceptorForLoad), isolate());
+ IC_Utility(IC::kLoadPropertyWithInterceptor), isolate());
__ TailCallExternalReference(ref, StubCache::kInterceptorArgsLength, 1);
}
}
-void CallStubCompiler::GenerateNameCheck(Handle<Name> name, Label* miss) {
- if (kind_ == Code::KEYED_CALL_IC) {
- __ Cmp(rcx, name);
- __ j(not_equal, miss);
- }
-}
-
-
-void CallStubCompiler::GenerateFunctionCheck(Register function,
- Register scratch,
- Label* miss) {
- __ JumpIfSmi(function, miss);
- __ CmpObjectType(function, JS_FUNCTION_TYPE, scratch);
- __ j(not_equal, miss);
-}
-
-
-void CallStubCompiler::GenerateLoadFunctionFromCell(
- Handle<Cell> cell,
- Handle<JSFunction> function,
- Label* miss) {
- // Get the value from the cell.
- __ Move(rdi, cell);
- __ movq(rdi, FieldOperand(rdi, Cell::kValueOffset));
-
- // Check that the cell contains the same function.
- if (heap()->InNewSpace(*function)) {
- // We can't embed a pointer to a function in new space so we have
- // to verify that the shared function info is unchanged. This has
- // the nice side effect that multiple closures based on the same
- // function can all use this call IC. Before we load through the
- // function, we have to verify that it still is a function.
- GenerateFunctionCheck(rdi, rax, miss);
-
- // Check the shared function info. Make sure it hasn't changed.
- __ Move(rax, Handle<SharedFunctionInfo>(function->shared()));
- __ cmpq(FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset), rax);
- } else {
- __ Cmp(rdi, function);
- }
- __ j(not_equal, miss);
-}
-
-
-void CallStubCompiler::GenerateMissBranch() {
- Handle<Code> code =
- isolate()->stub_cache()->ComputeCallMiss(arguments().immediate(),
- kind_,
- extra_state());
- __ Jump(code, RelocInfo::CODE_TARGET);
-}
-
-
-Handle<Code> CallStubCompiler::CompileCallField(Handle<JSObject> object,
- Handle<JSObject> holder,
- PropertyIndex index,
- Handle<Name> name) {
- Label miss;
-
- Register reg = HandlerFrontendHeader(
- object, holder, name, RECEIVER_MAP_CHECK, &miss);
-
- GenerateFastPropertyLoad(masm(), rdi, reg, index.is_inobject(holder),
- index.translate(holder), Representation::Tagged());
- GenerateJumpFunction(object, rdi, &miss);
-
- HandlerFrontendFooter(&miss);
-
- // Return the generated code.
- return GetCode(Code::FAST, name);
-}
-
-
-Handle<Code> CallStubCompiler::CompileArrayCodeCall(
- Handle<Object> object,
- Handle<JSObject> holder,
- Handle<Cell> cell,
- Handle<JSFunction> function,
- Handle<String> name,
- Code::StubType type) {
- Label miss;
-
- HandlerFrontendHeader(object, holder, name, RECEIVER_MAP_CHECK, &miss);
- if (!cell.is_null()) {
- ASSERT(cell->value() == *function);
- GenerateLoadFunctionFromCell(cell, function, &miss);
- }
-
- Handle<AllocationSite> site = isolate()->factory()->NewAllocationSite();
- site->SetElementsKind(GetInitialFastElementsKind());
- Handle<Cell> site_feedback_cell = isolate()->factory()->NewCell(site);
- const int argc = arguments().immediate();
- __ movq(rax, Immediate(argc));
- __ Move(rbx, site_feedback_cell);
- __ Move(rdi, function);
-
- ArrayConstructorStub stub(isolate());
- __ TailCallStub(&stub);
-
- HandlerFrontendFooter(&miss);
-
- // Return the generated code.
- return GetCode(type, name);
-}
-
-
-Handle<Code> CallStubCompiler::CompileArrayPushCall(
- Handle<Object> object,
- Handle<JSObject> holder,
- Handle<Cell> cell,
- Handle<JSFunction> function,
- Handle<String> name,
- Code::StubType type) {
- // If object is not an array or is observed or sealed, bail out to regular
- // call.
- if (!object->IsJSArray() ||
- !cell.is_null() ||
- Handle<JSArray>::cast(object)->map()->is_observed() ||
- !Handle<JSArray>::cast(object)->map()->is_extensible()) {
- return Handle<Code>::null();
- }
-
- Label miss;
-
- HandlerFrontendHeader(object, holder, name, RECEIVER_MAP_CHECK, &miss);
-
- const int argc = arguments().immediate();
- StackArgumentsAccessor args(rsp, argc);
- if (argc == 0) {
- // Noop, return the length.
- __ movq(rax, FieldOperand(rdx, JSArray::kLengthOffset));
- __ ret((argc + 1) * kPointerSize);
- } else {
- Label call_builtin;
-
- if (argc == 1) { // Otherwise fall through to call builtin.
- Label attempt_to_grow_elements, with_write_barrier, check_double;
-
- // Get the elements array of the object.
- __ movq(rdi, FieldOperand(rdx, JSArray::kElementsOffset));
-
- // Check that the elements are in fast mode and writable.
- __ Cmp(FieldOperand(rdi, HeapObject::kMapOffset),
- factory()->fixed_array_map());
- __ j(not_equal, &check_double);
-
- // Get the array's length into rax and calculate new length.
- __ SmiToInteger32(rax, FieldOperand(rdx, JSArray::kLengthOffset));
- STATIC_ASSERT(FixedArray::kMaxLength < Smi::kMaxValue);
- __ addl(rax, Immediate(argc));
-
- // Get the elements' length into rcx.
- __ SmiToInteger32(rcx, FieldOperand(rdi, FixedArray::kLengthOffset));
-
- // Check if we could survive without allocation.
- __ cmpl(rax, rcx);
- __ j(greater, &attempt_to_grow_elements);
-
- // Check if value is a smi.
- __ movq(rcx, args.GetArgumentOperand(1));
- __ JumpIfNotSmi(rcx, &with_write_barrier);
-
- // Save new length.
- __ Integer32ToSmiField(FieldOperand(rdx, JSArray::kLengthOffset), rax);
-
- // Store the value.
- __ movq(FieldOperand(rdi,
- rax,
- times_pointer_size,
- FixedArray::kHeaderSize - argc * kPointerSize),
- rcx);
-
- __ Integer32ToSmi(rax, rax); // Return new length as smi.
- __ ret((argc + 1) * kPointerSize);
-
- __ bind(&check_double);
-
- // Check that the elements are in double mode.
- __ Cmp(FieldOperand(rdi, HeapObject::kMapOffset),
- factory()->fixed_double_array_map());
- __ j(not_equal, &call_builtin);
-
- // Get the array's length into rax and calculate new length.
- __ SmiToInteger32(rax, FieldOperand(rdx, JSArray::kLengthOffset));
- STATIC_ASSERT(FixedArray::kMaxLength < Smi::kMaxValue);
- __ addl(rax, Immediate(argc));
-
- // Get the elements' length into rcx.
- __ SmiToInteger32(rcx, FieldOperand(rdi, FixedArray::kLengthOffset));
-
- // Check if we could survive without allocation.
- __ cmpl(rax, rcx);
- __ j(greater, &call_builtin);
-
- __ movq(rcx, args.GetArgumentOperand(1));
- __ StoreNumberToDoubleElements(
- rcx, rdi, rax, xmm0, &call_builtin, argc * kDoubleSize);
-
- // Save new length.
- __ Integer32ToSmiField(FieldOperand(rdx, JSArray::kLengthOffset), rax);
- __ Integer32ToSmi(rax, rax); // Return new length as smi.
- __ ret((argc + 1) * kPointerSize);
-
- __ bind(&with_write_barrier);
-
- __ movq(rbx, FieldOperand(rdx, HeapObject::kMapOffset));
-
- if (FLAG_smi_only_arrays && !FLAG_trace_elements_transitions) {
- Label fast_object, not_fast_object;
- __ CheckFastObjectElements(rbx, &not_fast_object, Label::kNear);
- __ jmp(&fast_object);
- // In case of fast smi-only, convert to fast object, otherwise bail out.
- __ bind(&not_fast_object);
- __ CheckFastSmiElements(rbx, &call_builtin);
- __ Cmp(FieldOperand(rcx, HeapObject::kMapOffset),
- factory()->heap_number_map());
- __ j(equal, &call_builtin);
- // rdx: receiver
- // rbx: map
-
- Label try_holey_map;
- __ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS,
- FAST_ELEMENTS,
- rbx,
- rdi,
- &try_holey_map);
-
- ElementsTransitionGenerator::
- GenerateMapChangeElementsTransition(masm(),
- DONT_TRACK_ALLOCATION_SITE,
- NULL);
- // Restore edi.
- __ movq(rdi, FieldOperand(rdx, JSArray::kElementsOffset));
- __ jmp(&fast_object);
-
- __ bind(&try_holey_map);
- __ LoadTransitionedArrayMapConditional(FAST_HOLEY_SMI_ELEMENTS,
- FAST_HOLEY_ELEMENTS,
- rbx,
- rdi,
- &call_builtin);
- ElementsTransitionGenerator::
- GenerateMapChangeElementsTransition(masm(),
- DONT_TRACK_ALLOCATION_SITE,
- NULL);
- __ movq(rdi, FieldOperand(rdx, JSArray::kElementsOffset));
- __ bind(&fast_object);
- } else {
- __ CheckFastObjectElements(rbx, &call_builtin);
- }
-
- // Save new length.
- __ Integer32ToSmiField(FieldOperand(rdx, JSArray::kLengthOffset), rax);
-
- // Store the value.
- __ lea(rdx, FieldOperand(rdi,
- rax, times_pointer_size,
- FixedArray::kHeaderSize - argc * kPointerSize));
- __ movq(Operand(rdx, 0), rcx);
-
- __ RecordWrite(rdi, rdx, rcx, kDontSaveFPRegs, EMIT_REMEMBERED_SET,
- OMIT_SMI_CHECK);
-
- __ Integer32ToSmi(rax, rax); // Return new length as smi.
- __ ret((argc + 1) * kPointerSize);
-
- __ bind(&attempt_to_grow_elements);
- if (!FLAG_inline_new) {
- __ jmp(&call_builtin);
- }
-
- __ movq(rbx, args.GetArgumentOperand(1));
- // Growing elements that are SMI-only requires special handling in case
- // the new element is non-Smi. For now, delegate to the builtin.
- Label no_fast_elements_check;
- __ JumpIfSmi(rbx, &no_fast_elements_check);
- __ movq(rcx, FieldOperand(rdx, HeapObject::kMapOffset));
- __ CheckFastObjectElements(rcx, &call_builtin, Label::kFar);
- __ bind(&no_fast_elements_check);
-
- ExternalReference new_space_allocation_top =
- ExternalReference::new_space_allocation_top_address(isolate());
- ExternalReference new_space_allocation_limit =
- ExternalReference::new_space_allocation_limit_address(isolate());
-
- const int kAllocationDelta = 4;
- // Load top.
- __ Load(rcx, new_space_allocation_top);
-
- // Check if it's the end of elements.
- __ lea(rdx, FieldOperand(rdi,
- rax, times_pointer_size,
- FixedArray::kHeaderSize - argc * kPointerSize));
- __ cmpq(rdx, rcx);
- __ j(not_equal, &call_builtin);
- __ addq(rcx, Immediate(kAllocationDelta * kPointerSize));
- Operand limit_operand =
- masm()->ExternalOperand(new_space_allocation_limit);
- __ cmpq(rcx, limit_operand);
- __ j(above, &call_builtin);
-
- // We fit and could grow elements.
- __ Store(new_space_allocation_top, rcx);
-
- // Push the argument...
- __ movq(Operand(rdx, 0), rbx);
- // ... and fill the rest with holes.
- __ LoadRoot(kScratchRegister, Heap::kTheHoleValueRootIndex);
- for (int i = 1; i < kAllocationDelta; i++) {
- __ movq(Operand(rdx, i * kPointerSize), kScratchRegister);
- }
-
- // We know the elements array is in new space so we don't need the
- // remembered set, but we just pushed a value onto it so we may have to
- // tell the incremental marker to rescan the object that we just grew. We
- // don't need to worry about the holes because they are in old space and
- // already marked black.
- __ RecordWrite(rdi, rdx, rbx, kDontSaveFPRegs, OMIT_REMEMBERED_SET);
-
- // Restore receiver to rdx as finish sequence assumes it's here.
- __ movq(rdx, args.GetReceiverOperand());
-
- // Increment element's and array's sizes.
- __ SmiAddConstant(FieldOperand(rdi, FixedArray::kLengthOffset),
- Smi::FromInt(kAllocationDelta));
-
- // Make new length a smi before returning it.
- __ Integer32ToSmi(rax, rax);
- __ movq(FieldOperand(rdx, JSArray::kLengthOffset), rax);
-
- __ ret((argc + 1) * kPointerSize);
- }
-
- __ bind(&call_builtin);
- __ TailCallExternalReference(ExternalReference(Builtins::c_ArrayPush,
- isolate()),
- argc + 1,
- 1);
- }
-
- HandlerFrontendFooter(&miss);
-
- // Return the generated code.
- return GetCode(type, name);
-}
-
-
-Handle<Code> CallStubCompiler::CompileArrayPopCall(
- Handle<Object> object,
- Handle<JSObject> holder,
- Handle<Cell> cell,
- Handle<JSFunction> function,
- Handle<String> name,
- Code::StubType type) {
- // If object is not an array or is observed or sealed, bail out to regular
- // call.
- if (!object->IsJSArray() ||
- !cell.is_null() ||
- Handle<JSArray>::cast(object)->map()->is_observed() ||
- !Handle<JSArray>::cast(object)->map()->is_extensible()) {
- return Handle<Code>::null();
- }
-
- Label miss, return_undefined, call_builtin;
-
- HandlerFrontendHeader(object, holder, name, RECEIVER_MAP_CHECK, &miss);
-
- // Get the elements array of the object.
- __ movq(rbx, FieldOperand(rdx, JSArray::kElementsOffset));
-
- // Check that the elements are in fast mode and writable.
- __ CompareRoot(FieldOperand(rbx, HeapObject::kMapOffset),
- Heap::kFixedArrayMapRootIndex);
- __ j(not_equal, &call_builtin);
-
- // Get the array's length into rcx and calculate new length.
- __ SmiToInteger32(rcx, FieldOperand(rdx, JSArray::kLengthOffset));
- __ subl(rcx, Immediate(1));
- __ j(negative, &return_undefined);
-
- // Get the last element.
- __ LoadRoot(r9, Heap::kTheHoleValueRootIndex);
- __ movq(rax, FieldOperand(rbx,
- rcx, times_pointer_size,
- FixedArray::kHeaderSize));
- // Check if element is already the hole.
- __ cmpq(rax, r9);
- // If so, call slow-case to also check prototypes for value.
- __ j(equal, &call_builtin);
-
- // Set the array's length.
- __ Integer32ToSmiField(FieldOperand(rdx, JSArray::kLengthOffset), rcx);
-
- // Fill with the hole and return original value.
- __ movq(FieldOperand(rbx,
- rcx, times_pointer_size,
- FixedArray::kHeaderSize),
- r9);
- const int argc = arguments().immediate();
- __ ret((argc + 1) * kPointerSize);
-
- __ bind(&return_undefined);
- __ LoadRoot(rax, Heap::kUndefinedValueRootIndex);
- __ ret((argc + 1) * kPointerSize);
-
- __ bind(&call_builtin);
- __ TailCallExternalReference(
- ExternalReference(Builtins::c_ArrayPop, isolate()),
- argc + 1,
- 1);
-
- HandlerFrontendFooter(&miss);
-
- // Return the generated code.
- return GetCode(type, name);
-}
-
-
-Handle<Code> CallStubCompiler::CompileStringCharCodeAtCall(
- Handle<Object> object,
- Handle<JSObject> holder,
- Handle<Cell> cell,
- Handle<JSFunction> function,
- Handle<String> name,
- Code::StubType type) {
- // If object is not a string, bail out to regular call.
- if (!object->IsString() || !cell.is_null()) return Handle<Code>::null();
-
- Label miss;
- Label name_miss;
- Label index_out_of_range;
- Label* index_out_of_range_label = &index_out_of_range;
- if (kind_ == Code::CALL_IC &&
- (CallICBase::StringStubState::decode(extra_state()) ==
- DEFAULT_STRING_STUB)) {
- index_out_of_range_label = &miss;
- }
-
- HandlerFrontendHeader(object, holder, name, STRING_CHECK, &name_miss);
-
- Register receiver = rbx;
- Register index = rdi;
- Register result = rax;
- const int argc = arguments().immediate();
- StackArgumentsAccessor args(rsp, argc);
-
- __ movq(receiver, args.GetReceiverOperand());
- if (argc > 0) {
- __ movq(index, args.GetArgumentOperand(1));
- } else {
- __ LoadRoot(index, Heap::kUndefinedValueRootIndex);
- }
-
- StringCharCodeAtGenerator generator(receiver,
- index,
- result,
- &miss, // When not a string.
- &miss, // When not a number.
- index_out_of_range_label,
- STRING_INDEX_IS_NUMBER);
- generator.GenerateFast(masm());
- __ ret((argc + 1) * kPointerSize);
-
- StubRuntimeCallHelper call_helper;
- generator.GenerateSlow(masm(), call_helper);
-
- if (index_out_of_range.is_linked()) {
- __ bind(&index_out_of_range);
- __ LoadRoot(rax, Heap::kNanValueRootIndex);
- __ ret((argc + 1) * kPointerSize);
- }
-
- __ bind(&miss);
- // Restore function name in rcx.
- __ Move(rcx, name);
- HandlerFrontendFooter(&name_miss);
-
- // Return the generated code.
- return GetCode(type, name);
-}
-
-
-Handle<Code> CallStubCompiler::CompileStringCharAtCall(
- Handle<Object> object,
- Handle<JSObject> holder,
- Handle<Cell> cell,
- Handle<JSFunction> function,
- Handle<String> name,
- Code::StubType type) {
- // If object is not a string, bail out to regular call.
- if (!object->IsString() || !cell.is_null()) return Handle<Code>::null();
-
- const int argc = arguments().immediate();
- StackArgumentsAccessor args(rsp, argc);
-
- Label miss;
- Label name_miss;
- Label index_out_of_range;
- Label* index_out_of_range_label = &index_out_of_range;
- if (kind_ == Code::CALL_IC &&
- (CallICBase::StringStubState::decode(extra_state()) ==
- DEFAULT_STRING_STUB)) {
- index_out_of_range_label = &miss;
- }
-
- HandlerFrontendHeader(object, holder, name, STRING_CHECK, &name_miss);
-
- Register receiver = rax;
- Register index = rdi;
- Register scratch = rdx;
- Register result = rax;
- __ movq(receiver, args.GetReceiverOperand());
- if (argc > 0) {
- __ movq(index, args.GetArgumentOperand(1));
- } else {
- __ LoadRoot(index, Heap::kUndefinedValueRootIndex);
- }
-
- StringCharAtGenerator generator(receiver,
- index,
- scratch,
- result,
- &miss, // When not a string.
- &miss, // When not a number.
- index_out_of_range_label,
- STRING_INDEX_IS_NUMBER);
- generator.GenerateFast(masm());
- __ ret((argc + 1) * kPointerSize);
-
- StubRuntimeCallHelper call_helper;
- generator.GenerateSlow(masm(), call_helper);
-
- if (index_out_of_range.is_linked()) {
- __ bind(&index_out_of_range);
- __ LoadRoot(rax, Heap::kempty_stringRootIndex);
- __ ret((argc + 1) * kPointerSize);
- }
- __ bind(&miss);
- // Restore function name in rcx.
- __ Move(rcx, name);
- HandlerFrontendFooter(&name_miss);
-
- // Return the generated code.
- return GetCode(type, name);
-}
-
-
-Handle<Code> CallStubCompiler::CompileStringFromCharCodeCall(
- Handle<Object> object,
- Handle<JSObject> holder,
- Handle<Cell> cell,
- Handle<JSFunction> function,
- Handle<String> name,
- Code::StubType type) {
- // If the object is not a JSObject or we got an unexpected number of
- // arguments, bail out to the regular call.
- const int argc = arguments().immediate();
- StackArgumentsAccessor args(rsp, argc);
- if (!object->IsJSObject() || argc != 1) return Handle<Code>::null();
-
- Label miss;
-
- HandlerFrontendHeader(object, holder, name, RECEIVER_MAP_CHECK, &miss);
- if (!cell.is_null()) {
- ASSERT(cell->value() == *function);
- GenerateLoadFunctionFromCell(cell, function, &miss);
- }
-
- // Load the char code argument.
- Register code = rbx;
- __ movq(code, args.GetArgumentOperand(1));
-
- // Check the code is a smi.
- Label slow;
- __ JumpIfNotSmi(code, &slow);
-
- // Convert the smi code to uint16.
- __ SmiAndConstant(code, code, Smi::FromInt(0xffff));
-
- StringCharFromCodeGenerator generator(code, rax);
- generator.GenerateFast(masm());
- __ ret(2 * kPointerSize);
-
- StubRuntimeCallHelper call_helper;
- generator.GenerateSlow(masm(), call_helper);
-
- __ bind(&slow);
- // We do not have to patch the receiver because the function makes no use of
- // it.
- GenerateJumpFunctionIgnoreReceiver(function);
-
- HandlerFrontendFooter(&miss);
-
- // Return the generated code.
- return GetCode(type, name);
-}
-
-
-Handle<Code> CallStubCompiler::CompileMathFloorCall(
- Handle<Object> object,
- Handle<JSObject> holder,
- Handle<Cell> cell,
- Handle<JSFunction> function,
- Handle<String> name,
- Code::StubType type) {
- const int argc = arguments().immediate();
- StackArgumentsAccessor args(rsp, argc);
-
- // If the object is not a JSObject or we got an unexpected number of
- // arguments, bail out to the regular call.
- if (!object->IsJSObject() || argc != 1) {
- return Handle<Code>::null();
- }
-
- Label miss, slow;
-
- HandlerFrontendHeader(object, holder, name, RECEIVER_MAP_CHECK, &miss);
- if (!cell.is_null()) {
- ASSERT(cell->value() == *function);
- GenerateLoadFunctionFromCell(cell, function, &miss);
- }
-
- // Load the (only) argument into rax.
- __ movq(rax, args.GetArgumentOperand(1));
-
- // Check if the argument is a smi.
- Label smi;
- STATIC_ASSERT(kSmiTag == 0);
- __ JumpIfSmi(rax, &smi);
-
- // Check if the argument is a heap number and load its value into xmm0.
- __ CheckMap(rax, factory()->heap_number_map(), &slow, DONT_DO_SMI_CHECK);
- __ movsd(xmm0, FieldOperand(rax, HeapNumber::kValueOffset));
-
- // Check if the argument is strictly positive. Note this also discards NaN.
- __ xorpd(xmm1, xmm1);
- __ ucomisd(xmm0, xmm1);
- __ j(below_equal, &slow);
-
- // Do a truncating conversion.
- __ cvttsd2si(rax, xmm0);
-
- // Checks for 0x80000000 which signals a failed conversion.
- Label conversion_failure;
- __ cmpl(rax, Immediate(0x80000000));
- __ j(equal, &conversion_failure);
-
- // Smi tag and return.
- __ Integer32ToSmi(rax, rax);
- __ bind(&smi);
- __ ret(2 * kPointerSize);
-
- // Check if the argument is < 2^kMantissaBits.
- Label already_round;
- __ bind(&conversion_failure);
- int64_t kTwoMantissaBits= V8_INT64_C(0x4330000000000000);
- __ movq(rbx, kTwoMantissaBits);
- __ movq(xmm1, rbx);
- __ ucomisd(xmm0, xmm1);
- __ j(above_equal, &already_round);
-
- // Save a copy of the argument.
- __ movaps(xmm2, xmm0);
-
- // Compute (argument + 2^kMantissaBits) - 2^kMantissaBits.
- __ addsd(xmm0, xmm1);
- __ subsd(xmm0, xmm1);
-
- // Compare the argument and the tentative result to get the right mask:
- // if xmm2 < xmm0:
- // xmm2 = 1...1
- // else:
- // xmm2 = 0...0
- __ cmpltsd(xmm2, xmm0);
-
- // Subtract 1 if the argument was less than the tentative result.
- int64_t kOne = V8_INT64_C(0x3ff0000000000000);
- __ movq(rbx, kOne);
- __ movq(xmm1, rbx);
- __ andpd(xmm1, xmm2);
- __ subsd(xmm0, xmm1);
-
- // Return a new heap number.
- __ AllocateHeapNumber(rax, rbx, &slow);
- __ movsd(FieldOperand(rax, HeapNumber::kValueOffset), xmm0);
- __ ret(2 * kPointerSize);
-
- // Return the argument (when it's an already round heap number).
- __ bind(&already_round);
- __ movq(rax, args.GetArgumentOperand(1));
- __ ret(2 * kPointerSize);
-
- __ bind(&slow);
- // We do not have to patch the receiver because the function makes no use of
- // it.
- GenerateJumpFunctionIgnoreReceiver(function);
-
- HandlerFrontendFooter(&miss);
-
- // Return the generated code.
- return GetCode(type, name);
-}
-
-
-Handle<Code> CallStubCompiler::CompileMathAbsCall(
- Handle<Object> object,
- Handle<JSObject> holder,
- Handle<Cell> cell,
- Handle<JSFunction> function,
- Handle<String> name,
- Code::StubType type) {
- // If the object is not a JSObject or we got an unexpected number of
- // arguments, bail out to the regular call.
- const int argc = arguments().immediate();
- StackArgumentsAccessor args(rsp, argc);
- if (!object->IsJSObject() || argc != 1) return Handle<Code>::null();
-
- Label miss;
-
- HandlerFrontendHeader(object, holder, name, RECEIVER_MAP_CHECK, &miss);
- if (!cell.is_null()) {
- ASSERT(cell->value() == *function);
- GenerateLoadFunctionFromCell(cell, function, &miss);
- }
-
- // Load the (only) argument into rax.
- __ movq(rax, args.GetArgumentOperand(1));
-
- // Check if the argument is a smi.
- Label not_smi;
- STATIC_ASSERT(kSmiTag == 0);
- __ JumpIfNotSmi(rax, &not_smi);
-
- // Branchless abs implementation, refer to below:
- // http://graphics.stanford.edu/~seander/bithacks.html#IntegerAbs
- // Set ebx to 1...1 (== -1) if the argument is negative, or to 0...0
- // otherwise.
- __ movq(rbx, rax);
- __ sar(rbx, Immediate(kBitsPerPointer - 1));
-
- // Do bitwise not or do nothing depending on ebx.
- __ xor_(rax, rbx);
-
- // Add 1 or do nothing depending on ebx.
- __ subq(rax, rbx);
-
- // If the result is still negative, go to the slow case.
- // This only happens for the most negative smi.
- Label slow;
- __ j(negative, &slow);
-
- __ ret(2 * kPointerSize);
-
- // Check if the argument is a heap number and load its value.
- __ bind(&not_smi);
- __ CheckMap(rax, factory()->heap_number_map(), &slow, DONT_DO_SMI_CHECK);
- __ MoveDouble(rbx, FieldOperand(rax, HeapNumber::kValueOffset));
-
- // Check the sign of the argument. If the argument is positive,
- // just return it.
- Label negative_sign;
- const int sign_mask_shift =
- (HeapNumber::kExponentOffset - HeapNumber::kValueOffset) * kBitsPerByte;
- __ Set(rdi, static_cast<int64_t>(HeapNumber::kSignMask) << sign_mask_shift);
- __ testq(rbx, rdi);
- __ j(not_zero, &negative_sign);
- __ ret(2 * kPointerSize);
-
- // If the argument is negative, clear the sign, and return a new
- // number. We still have the sign mask in rdi.
- __ bind(&negative_sign);
- __ xor_(rbx, rdi);
- __ AllocateHeapNumber(rax, rdx, &slow);
- __ MoveDouble(FieldOperand(rax, HeapNumber::kValueOffset), rbx);
- __ ret(2 * kPointerSize);
-
- __ bind(&slow);
- // We do not have to patch the receiver because the function makes no use of
- // it.
- GenerateJumpFunctionIgnoreReceiver(function);
-
- HandlerFrontendFooter(&miss);
-
- // Return the generated code.
- return GetCode(type, name);
-}
-
-
-Handle<Code> CallStubCompiler::CompileFastApiCall(
- const CallOptimization& optimization,
- Handle<Object> object,
- Handle<JSObject> holder,
- Handle<Cell> cell,
- Handle<JSFunction> function,
- Handle<String> name) {
- ASSERT(optimization.is_simple_api_call());
- // Bail out if object is a global object as we don't want to
- // repatch it to global receiver.
- if (object->IsGlobalObject()) return Handle<Code>::null();
- if (!cell.is_null()) return Handle<Code>::null();
- if (!object->IsJSObject()) return Handle<Code>::null();
- int depth = optimization.GetPrototypeDepthOfExpectedType(
- Handle<JSObject>::cast(object), holder);
- if (depth == kInvalidProtoDepth) return Handle<Code>::null();
-
- Label miss, miss_before_stack_reserved;
- GenerateNameCheck(name, &miss_before_stack_reserved);
-
- const int argc = arguments().immediate();
- StackArgumentsAccessor args(rsp, argc);
- __ movq(rdx, args.GetReceiverOperand());
-
- // Check that the receiver isn't a smi.
- __ JumpIfSmi(rdx, &miss_before_stack_reserved);
-
- Counters* counters = isolate()->counters();
- __ IncrementCounter(counters->call_const(), 1);
- __ IncrementCounter(counters->call_const_fast_api(), 1);
-
- // Allocate space for v8::Arguments implicit values. Must be initialized
- // before calling any runtime function.
- __ subq(rsp, Immediate(kFastApiCallArguments * kPointerSize));
-
- // Check that the maps haven't changed and find a Holder as a side effect.
- CheckPrototypes(IC::CurrentTypeOf(object, isolate()), rdx, holder,
- rbx, rax, rdi, name, depth, &miss);
-
- // Move the return address on top of the stack.
- __ movq(rax,
- StackOperandForReturnAddress(kFastApiCallArguments * kPointerSize));
- __ movq(StackOperandForReturnAddress(0), rax);
-
- GenerateFastApiCall(masm(), optimization, argc);
-
- __ bind(&miss);
- __ addq(rsp, Immediate(kFastApiCallArguments * kPointerSize));
-
- HandlerFrontendFooter(&miss_before_stack_reserved);
-
- // Return the generated code.
- return GetCode(function);
-}
-
-
-void StubCompiler::GenerateBooleanCheck(Register object, Label* miss) {
- Label success;
- // Check that the object is a boolean.
- __ CompareRoot(object, Heap::kTrueValueRootIndex);
- __ j(equal, &success);
- __ CompareRoot(object, Heap::kFalseValueRootIndex);
- __ j(not_equal, miss);
- __ bind(&success);
-}
-
-
-void CallStubCompiler::PatchGlobalProxy(Handle<Object> object) {
- if (object->IsGlobalObject()) {
- StackArgumentsAccessor args(rsp, arguments());
- __ movq(rdx, FieldOperand(rdx, GlobalObject::kGlobalReceiverOffset));
- __ movq(args.GetReceiverOperand(), rdx);
- }
-}
-
-
-Register CallStubCompiler::HandlerFrontendHeader(Handle<Object> object,
- Handle<JSObject> holder,
- Handle<Name> name,
- CheckType check,
- Label* miss) {
- GenerateNameCheck(name, miss);
-
- Register reg = rdx;
-
- StackArgumentsAccessor args(rsp, arguments());
- __ movq(reg, args.GetReceiverOperand());
-
- // Check that the receiver isn't a smi.
- if (check != NUMBER_CHECK) {
- __ JumpIfSmi(reg, miss);
- }
-
- // Make sure that it's okay not to patch the on stack receiver
- // unless we're doing a receiver map check.
- ASSERT(!object->IsGlobalObject() || check == RECEIVER_MAP_CHECK);
-
- Counters* counters = isolate()->counters();
- switch (check) {
- case RECEIVER_MAP_CHECK:
- __ IncrementCounter(counters->call_const(), 1);
-
- // Check that the maps haven't changed.
- reg = CheckPrototypes(IC::CurrentTypeOf(object, isolate()), reg, holder,
- rbx, rax, rdi, name, miss);
- break;
-
- case STRING_CHECK: {
- // Check that the object is a string.
- __ CmpObjectType(reg, FIRST_NONSTRING_TYPE, rax);
- __ j(above_equal, miss);
- // Check that the maps starting from the prototype haven't changed.
- GenerateDirectLoadGlobalFunctionPrototype(
- masm(), Context::STRING_FUNCTION_INDEX, rax, miss);
- break;
- }
- case SYMBOL_CHECK: {
- // Check that the object is a symbol.
- __ CmpObjectType(reg, SYMBOL_TYPE, rax);
- __ j(not_equal, miss);
- // Check that the maps starting from the prototype haven't changed.
- GenerateDirectLoadGlobalFunctionPrototype(
- masm(), Context::SYMBOL_FUNCTION_INDEX, rax, miss);
- break;
- }
- case NUMBER_CHECK: {
- Label fast;
- // Check that the object is a smi or a heap number.
- __ JumpIfSmi(reg, &fast);
- __ CmpObjectType(reg, HEAP_NUMBER_TYPE, rax);
- __ j(not_equal, miss);
- __ bind(&fast);
- // Check that the maps starting from the prototype haven't changed.
- GenerateDirectLoadGlobalFunctionPrototype(
- masm(), Context::NUMBER_FUNCTION_INDEX, rax, miss);
- break;
- }
- case BOOLEAN_CHECK: {
- GenerateBooleanCheck(reg, miss);
- // Check that the maps starting from the prototype haven't changed.
- GenerateDirectLoadGlobalFunctionPrototype(
- masm(), Context::BOOLEAN_FUNCTION_INDEX, rax, miss);
- break;
- }
- }
-
- if (check != RECEIVER_MAP_CHECK) {
- Handle<Object> prototype(object->GetPrototype(isolate()), isolate());
- reg = CheckPrototypes(
- IC::CurrentTypeOf(prototype, isolate()),
- rax, holder, rbx, rdx, rdi, name, miss);
- }
-
- return reg;
-}
-
-
-void CallStubCompiler::GenerateJumpFunction(Handle<Object> object,
- Register function,
- Label* miss) {
- // Check that the function really is a function.
- GenerateFunctionCheck(function, rbx, miss);
-
- if (!function.is(rdi)) __ movq(rdi, function);
- PatchGlobalProxy(object);
-
- // Invoke the function.
- __ InvokeFunction(rdi, arguments(), JUMP_FUNCTION,
- NullCallWrapper(), call_kind());
-}
-
-
-Handle<Code> CallStubCompiler::CompileCallInterceptor(Handle<JSObject> object,
- Handle<JSObject> holder,
- Handle<Name> name) {
- Label miss;
- GenerateNameCheck(name, &miss);
-
- LookupResult lookup(isolate());
- LookupPostInterceptor(holder, name, &lookup);
-
- // Get the receiver from the stack.
- StackArgumentsAccessor args(rsp, arguments());
- __ movq(rdx, args.GetReceiverOperand());
-
- CallInterceptorCompiler compiler(this, arguments(), rcx, extra_state());
- compiler.Compile(masm(), object, holder, name, &lookup, rdx, rbx, rdi, rax,
- &miss);
-
- // Restore receiver.
- __ movq(rdx, args.GetReceiverOperand());
-
- GenerateJumpFunction(object, rax, &miss);
-
- HandlerFrontendFooter(&miss);
-
- // Return the generated code.
- return GetCode(Code::FAST, name);
-}
-
-
-Handle<Code> CallStubCompiler::CompileCallGlobal(
- Handle<JSObject> object,
- Handle<GlobalObject> holder,
- Handle<PropertyCell> cell,
- Handle<JSFunction> function,
- Handle<Name> name) {
- if (HasCustomCallGenerator(function)) {
- Handle<Code> code = CompileCustomCall(
- object, holder, cell, function, Handle<String>::cast(name),
- Code::NORMAL);
- // A null handle means bail out to the regular compiler code below.
- if (!code.is_null()) return code;
- }
-
- Label miss;
- HandlerFrontendHeader(object, holder, name, RECEIVER_MAP_CHECK, &miss);
- // Potentially loads a closure that matches the shared function info of the
- // function, rather than function.
- GenerateLoadFunctionFromCell(cell, function, &miss);
- Counters* counters = isolate()->counters();
- __ IncrementCounter(counters->call_global_inline(), 1);
- GenerateJumpFunction(object, rdi, function);
- HandlerFrontendFooter(&miss);
-
- // Return the generated code.
- return GetCode(Code::NORMAL, name);
-}
-
-
Handle<Code> StoreStubCompiler::CompileStoreCallback(
Handle<JSObject> object,
Handle<JSObject> holder,
Handle<Name> name,
Handle<ExecutableAccessorInfo> callback) {
- HandlerFrontend(IC::CurrentTypeOf(object, isolate()),
- receiver(), holder, name);
+ Register holder_reg = HandlerFrontend(
+ IC::CurrentTypeOf(object, isolate()), receiver(), holder, name);
__ PopReturnAddressTo(scratch1());
- __ push(receiver());
+ __ Push(receiver());
+ __ Push(holder_reg);
__ Push(callback); // callback info
__ Push(name);
- __ push(value());
+ __ Push(value());
__ PushReturnAddressFrom(scratch1());
// Do tail-call to the runtime system.
ExternalReference store_callback_property =
ExternalReference(IC_Utility(IC::kStoreCallbackProperty), isolate());
- __ TailCallExternalReference(store_callback_property, 4, 1);
-
- // Return the generated code.
- return GetCode(kind(), Code::FAST, name);
-}
-
-
-Handle<Code> StoreStubCompiler::CompileStoreCallback(
- Handle<JSObject> object,
- Handle<JSObject> holder,
- Handle<Name> name,
- const CallOptimization& call_optimization) {
- HandlerFrontend(IC::CurrentTypeOf(object, isolate()),
- receiver(), holder, name);
-
- Register values[] = { value() };
- GenerateFastApiCall(
- masm(), call_optimization, receiver(), scratch1(),
- scratch2(), this->name(), 1, values);
+ __ TailCallExternalReference(store_callback_property, 5, 1);
// Return the generated code.
return GetCode(kind(), Code::FAST, name);
@@ -2593,27 +1092,31 @@ Handle<Code> StoreStubCompiler::CompileStoreCallback(
void StoreStubCompiler::GenerateStoreViaSetter(
MacroAssembler* masm,
+ Handle<HeapType> type,
+ Register receiver,
Handle<JSFunction> setter) {
// ----------- S t a t e -------------
- // -- rax : value
- // -- rcx : name
- // -- rdx : receiver
// -- rsp[0] : return address
// -----------------------------------
{
FrameScope scope(masm, StackFrame::INTERNAL);
// Save value register, so we can restore it later.
- __ push(rax);
+ __ Push(value());
if (!setter.is_null()) {
// Call the JavaScript setter with receiver and value on the stack.
- __ push(rdx);
- __ push(rax);
+ if (IC::TypeToMap(*type, masm->isolate())->IsJSGlobalObjectMap()) {
+ // Swap in the global receiver.
+ __ movp(receiver,
+ FieldOperand(receiver, JSGlobalObject::kGlobalReceiverOffset));
+ }
+ __ Push(receiver);
+ __ Push(value());
ParameterCount actual(1);
ParameterCount expected(setter);
__ InvokeFunction(setter, expected, actual,
- CALL_FUNCTION, NullCallWrapper(), CALL_AS_METHOD);
+ CALL_FUNCTION, NullCallWrapper());
} else {
// If we generate a global code snippet for deoptimization only, remember
// the place to continue after deoptimization.
@@ -2621,10 +1124,10 @@ void StoreStubCompiler::GenerateStoreViaSetter(
}
// We have to return the passed value, not the return value of the setter.
- __ pop(rax);
+ __ Pop(rax);
// Restore context register.
- __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
+ __ movp(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
}
__ ret(0);
}
@@ -2638,9 +1141,9 @@ Handle<Code> StoreStubCompiler::CompileStoreInterceptor(
Handle<JSObject> object,
Handle<Name> name) {
__ PopReturnAddressTo(scratch1());
- __ push(receiver());
- __ push(this->name());
- __ push(value());
+ __ Push(receiver());
+ __ Push(this->name());
+ __ Push(value());
__ PushReturnAddressFrom(scratch1());
// Do tail-call to the runtime system.
@@ -2653,6 +1156,20 @@ Handle<Code> StoreStubCompiler::CompileStoreInterceptor(
}
+void StoreStubCompiler::GenerateStoreArrayLength() {
+ // Prepare tail call to StoreIC_ArrayLength.
+ __ PopReturnAddressTo(scratch1());
+ __ Push(receiver());
+ __ Push(value());
+ __ PushReturnAddressFrom(scratch1());
+
+ ExternalReference ref =
+ ExternalReference(IC_Utility(IC::kStoreIC_ArrayLength),
+ masm()->isolate());
+ __ TailCallExternalReference(ref, 2, 1);
+}
+
+
Handle<Code> KeyedStoreStubCompiler::CompileStorePolymorphic(
MapHandleList* receiver_maps,
CodeHandleList* handler_stubs,
@@ -2660,7 +1177,7 @@ Handle<Code> KeyedStoreStubCompiler::CompileStorePolymorphic(
Label miss;
__ JumpIfSmi(receiver(), &miss, Label::kNear);
- __ movq(scratch1(), FieldOperand(receiver(), HeapObject::kMapOffset));
+ __ movp(scratch1(), FieldOperand(receiver(), HeapObject::kMapOffset));
int receiver_count = receiver_maps->length();
for (int i = 0; i < receiver_count; ++i) {
// Check map and tail call if there's a match
@@ -2670,7 +1187,7 @@ Handle<Code> KeyedStoreStubCompiler::CompileStorePolymorphic(
} else {
Label next_map;
__ j(not_equal, &next_map, Label::kNear);
- __ movq(transition_map(),
+ __ Move(transition_map(),
transitioned_maps->at(i),
RelocInfo::EMBEDDED_OBJECT);
__ jmp(handler_stubs->at(i), RelocInfo::CODE_TARGET);
@@ -2688,7 +1205,7 @@ Handle<Code> KeyedStoreStubCompiler::CompileStorePolymorphic(
}
-Handle<Code> LoadStubCompiler::CompileLoadNonexistent(Handle<Type> type,
+Handle<Code> LoadStubCompiler::CompileLoadNonexistent(Handle<HeapType> type,
Handle<JSObject> last,
Handle<Name> name) {
NonexistentHandlerFrontend(type, last, name);
@@ -2717,33 +1234,22 @@ Register* KeyedLoadStubCompiler::registers() {
}
-Register* StoreStubCompiler::registers() {
- // receiver, name, value, scratch1, scratch2, scratch3.
- static Register registers[] = { rdx, rcx, rax, rbx, rdi, r8 };
- return registers;
+Register StoreStubCompiler::value() {
+ return rax;
}
-Register* KeyedStoreStubCompiler::registers() {
- // receiver, name, value, scratch1, scratch2, scratch3.
- static Register registers[] = { rdx, rcx, rax, rbx, rdi, r8 };
+Register* StoreStubCompiler::registers() {
+ // receiver, name, scratch1, scratch2, scratch3.
+ static Register registers[] = { rdx, rcx, rbx, rdi, r8 };
return registers;
}
-void KeyedLoadStubCompiler::GenerateNameCheck(Handle<Name> name,
- Register name_reg,
- Label* miss) {
- __ Cmp(name_reg, name);
- __ j(not_equal, miss);
-}
-
-
-void KeyedStoreStubCompiler::GenerateNameCheck(Handle<Name> name,
- Register name_reg,
- Label* miss) {
- __ Cmp(name_reg, name);
- __ j(not_equal, miss);
+Register* KeyedStoreStubCompiler::registers() {
+ // receiver, name, scratch1, scratch2, scratch3.
+ static Register registers[] = { rdx, rcx, rbx, rdi, r8 };
+ return registers;
}
@@ -2752,6 +1258,7 @@ void KeyedStoreStubCompiler::GenerateNameCheck(Handle<Name> name,
void LoadStubCompiler::GenerateLoadViaGetter(MacroAssembler* masm,
+ Handle<HeapType> type,
Register receiver,
Handle<JSFunction> getter) {
// ----------- S t a t e -------------
@@ -2764,11 +1271,16 @@ void LoadStubCompiler::GenerateLoadViaGetter(MacroAssembler* masm,
if (!getter.is_null()) {
// Call the JavaScript getter with the receiver on the stack.
- __ push(receiver);
+ if (IC::TypeToMap(*type, masm->isolate())->IsJSGlobalObjectMap()) {
+ // Swap in the global receiver.
+ __ movp(receiver,
+ FieldOperand(receiver, JSGlobalObject::kGlobalReceiverOffset));
+ }
+ __ Push(receiver);
ParameterCount actual(0);
ParameterCount expected(getter);
__ InvokeFunction(getter, expected, actual,
- CALL_FUNCTION, NullCallWrapper(), CALL_AS_METHOD);
+ CALL_FUNCTION, NullCallWrapper());
} else {
// If we generate a global code snippet for deoptimization only, remember
// the place to continue after deoptimization.
@@ -2776,7 +1288,7 @@ void LoadStubCompiler::GenerateLoadViaGetter(MacroAssembler* masm,
}
// Restore context register.
- __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
+ __ movp(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
}
__ ret(0);
}
@@ -2787,7 +1299,7 @@ void LoadStubCompiler::GenerateLoadViaGetter(MacroAssembler* masm,
Handle<Code> LoadStubCompiler::CompileLoadGlobal(
- Handle<Type> type,
+ Handle<HeapType> type,
Handle<GlobalObject> global,
Handle<PropertyCell> cell,
Handle<Name> name,
@@ -2800,7 +1312,7 @@ Handle<Code> LoadStubCompiler::CompileLoadGlobal(
// Get the value from the cell.
__ Move(rbx, cell);
- __ movq(rbx, FieldOperand(rbx, PropertyCell::kValueOffset));
+ __ movp(rbx, FieldOperand(rbx, PropertyCell::kValueOffset));
// Check for deleted property if property can actually be deleted.
if (!is_dont_delete) {
@@ -2811,13 +1323,13 @@ Handle<Code> LoadStubCompiler::CompileLoadGlobal(
__ Check(not_equal, kDontDeleteCellsCannotContainTheHole);
}
- HandlerFrontendFooter(name, &miss);
-
Counters* counters = isolate()->counters();
__ IncrementCounter(counters->named_load_global_stub(), 1);
- __ movq(rax, rbx);
+ __ movp(rax, rbx);
__ ret(0);
+ HandlerFrontendFooter(name, &miss);
+
// Return the generated code.
return GetCode(kind(), Code::NORMAL, name);
}
@@ -2831,8 +1343,10 @@ Handle<Code> BaseLoadStoreStubCompiler::CompilePolymorphicIC(
IcCheckType check) {
Label miss;
- if (check == PROPERTY) {
- GenerateNameCheck(name, this->name(), &miss);
+ if (check == PROPERTY &&
+ (kind() == Code::KEYED_LOAD_IC || kind() == Code::KEYED_STORE_IC)) {
+ __ Cmp(this->name(), name);
+ __ j(not_equal, &miss);
}
Label number_case;
@@ -2840,17 +1354,17 @@ Handle<Code> BaseLoadStoreStubCompiler::CompilePolymorphicIC(
__ JumpIfSmi(receiver(), smi_target);
Register map_reg = scratch1();
- __ movq(map_reg, FieldOperand(receiver(), HeapObject::kMapOffset));
+ __ movp(map_reg, FieldOperand(receiver(), HeapObject::kMapOffset));
int receiver_count = types->length();
int number_of_handled_maps = 0;
for (int current = 0; current < receiver_count; ++current) {
- Handle<Type> type = types->at(current);
+ Handle<HeapType> type = types->at(current);
Handle<Map> map = IC::TypeToMap(*type, isolate());
if (!map->is_deprecated()) {
number_of_handled_maps++;
// Check map and tail call if there's a match
__ Cmp(map_reg, map);
- if (type->Is(Type::Number())) {
+ if (type->Is(HeapType::Number())) {
ASSERT(!number_case.is_unused());
__ bind(&number_case);
}
@@ -2887,7 +1401,7 @@ void KeyedLoadStubCompiler::GenerateLoadDictionaryElement(
__ JumpIfNotSmi(rax, &miss);
__ SmiToInteger32(rbx, rax);
- __ movq(rcx, FieldOperand(rdx, JSObject::kElementsOffset));
+ __ movp(rcx, FieldOperand(rdx, JSObject::kElementsOffset));
// Check whether the elements is a number dictionary.
// rdx: receiver
diff --git a/chromium/v8/src/x87/OWNERS b/chromium/v8/src/x87/OWNERS
new file mode 100644
index 00000000000..dd9998b2610
--- /dev/null
+++ b/chromium/v8/src/x87/OWNERS
@@ -0,0 +1 @@
+weiliang.lin@intel.com
diff --git a/chromium/v8/src/x87/assembler-x87-inl.h b/chromium/v8/src/x87/assembler-x87-inl.h
new file mode 100644
index 00000000000..4a5583c44d6
--- /dev/null
+++ b/chromium/v8/src/x87/assembler-x87-inl.h
@@ -0,0 +1,561 @@
+// Copyright (c) 1994-2006 Sun Microsystems Inc.
+// All Rights Reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// - Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+//
+// - Redistribution in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// - Neither the name of Sun Microsystems or the names of contributors may
+// be used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
+// IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+// THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// The original source code covered by the above license above has been
+// modified significantly by Google Inc.
+// Copyright 2012 the V8 project authors. All rights reserved.
+
+// A light-weight IA32 Assembler.
+
+#ifndef V8_X87_ASSEMBLER_X87_INL_H_
+#define V8_X87_ASSEMBLER_X87_INL_H_
+
+#include "src/x87/assembler-x87.h"
+
+#include "src/cpu.h"
+#include "src/debug.h"
+
+namespace v8 {
+namespace internal {
+
+bool CpuFeatures::SupportsCrankshaft() { return false; }
+
+
+static const byte kCallOpcode = 0xE8;
+static const int kNoCodeAgeSequenceLength = 5;
+
+
+// The modes possibly affected by apply must be in kApplyMask.
+void RelocInfo::apply(intptr_t delta, ICacheFlushMode icache_flush_mode) {
+ bool flush_icache = icache_flush_mode != SKIP_ICACHE_FLUSH;
+ if (IsRuntimeEntry(rmode_) || IsCodeTarget(rmode_)) {
+ int32_t* p = reinterpret_cast<int32_t*>(pc_);
+ *p -= delta; // Relocate entry.
+ if (flush_icache) CPU::FlushICache(p, sizeof(uint32_t));
+ } else if (rmode_ == CODE_AGE_SEQUENCE) {
+ if (*pc_ == kCallOpcode) {
+ int32_t* p = reinterpret_cast<int32_t*>(pc_ + 1);
+ *p -= delta; // Relocate entry.
+ if (flush_icache) CPU::FlushICache(p, sizeof(uint32_t));
+ }
+ } else if (rmode_ == JS_RETURN && IsPatchedReturnSequence()) {
+ // Special handling of js_return when a break point is set (call
+ // instruction has been inserted).
+ int32_t* p = reinterpret_cast<int32_t*>(pc_ + 1);
+ *p -= delta; // Relocate entry.
+ if (flush_icache) CPU::FlushICache(p, sizeof(uint32_t));
+ } else if (rmode_ == DEBUG_BREAK_SLOT && IsPatchedDebugBreakSlotSequence()) {
+ // Special handling of a debug break slot when a break point is set (call
+ // instruction has been inserted).
+ int32_t* p = reinterpret_cast<int32_t*>(pc_ + 1);
+ *p -= delta; // Relocate entry.
+ if (flush_icache) CPU::FlushICache(p, sizeof(uint32_t));
+ } else if (IsInternalReference(rmode_)) {
+ // absolute code pointer inside code object moves with the code object.
+ int32_t* p = reinterpret_cast<int32_t*>(pc_);
+ *p += delta; // Relocate entry.
+ if (flush_icache) CPU::FlushICache(p, sizeof(uint32_t));
+ }
+}
+
+
+Address RelocInfo::target_address() {
+ ASSERT(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_));
+ return Assembler::target_address_at(pc_, host_);
+}
+
+
+Address RelocInfo::target_address_address() {
+ ASSERT(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_)
+ || rmode_ == EMBEDDED_OBJECT
+ || rmode_ == EXTERNAL_REFERENCE);
+ return reinterpret_cast<Address>(pc_);
+}
+
+
+Address RelocInfo::constant_pool_entry_address() {
+ UNREACHABLE();
+ return NULL;
+}
+
+
+int RelocInfo::target_address_size() {
+ return Assembler::kSpecialTargetSize;
+}
+
+
+void RelocInfo::set_target_address(Address target,
+ WriteBarrierMode write_barrier_mode,
+ ICacheFlushMode icache_flush_mode) {
+ Assembler::set_target_address_at(pc_, host_, target, icache_flush_mode);
+ Assembler::set_target_address_at(pc_, host_, target);
+ ASSERT(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_));
+ if (write_barrier_mode == UPDATE_WRITE_BARRIER && host() != NULL &&
+ IsCodeTarget(rmode_)) {
+ Object* target_code = Code::GetCodeFromTargetAddress(target);
+ host()->GetHeap()->incremental_marking()->RecordWriteIntoCode(
+ host(), this, HeapObject::cast(target_code));
+ }
+}
+
+
+Object* RelocInfo::target_object() {
+ ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
+ return Memory::Object_at(pc_);
+}
+
+
+Handle<Object> RelocInfo::target_object_handle(Assembler* origin) {
+ ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
+ return Memory::Object_Handle_at(pc_);
+}
+
+
+void RelocInfo::set_target_object(Object* target,
+ WriteBarrierMode write_barrier_mode,
+ ICacheFlushMode icache_flush_mode) {
+ ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
+ ASSERT(!target->IsConsString());
+ Memory::Object_at(pc_) = target;
+ if (icache_flush_mode != SKIP_ICACHE_FLUSH) {
+ CPU::FlushICache(pc_, sizeof(Address));
+ }
+ if (write_barrier_mode == UPDATE_WRITE_BARRIER &&
+ host() != NULL &&
+ target->IsHeapObject()) {
+ host()->GetHeap()->incremental_marking()->RecordWrite(
+ host(), &Memory::Object_at(pc_), HeapObject::cast(target));
+ }
+}
+
+
+Address RelocInfo::target_reference() {
+ ASSERT(rmode_ == RelocInfo::EXTERNAL_REFERENCE);
+ return Memory::Address_at(pc_);
+}
+
+
+Address RelocInfo::target_runtime_entry(Assembler* origin) {
+ ASSERT(IsRuntimeEntry(rmode_));
+ return reinterpret_cast<Address>(*reinterpret_cast<int32_t*>(pc_));
+}
+
+
+void RelocInfo::set_target_runtime_entry(Address target,
+ WriteBarrierMode write_barrier_mode,
+ ICacheFlushMode icache_flush_mode) {
+ ASSERT(IsRuntimeEntry(rmode_));
+ if (target_address() != target) {
+ set_target_address(target, write_barrier_mode, icache_flush_mode);
+ }
+}
+
+
+Handle<Cell> RelocInfo::target_cell_handle() {
+ ASSERT(rmode_ == RelocInfo::CELL);
+ Address address = Memory::Address_at(pc_);
+ return Handle<Cell>(reinterpret_cast<Cell**>(address));
+}
+
+
+Cell* RelocInfo::target_cell() {
+ ASSERT(rmode_ == RelocInfo::CELL);
+ return Cell::FromValueAddress(Memory::Address_at(pc_));
+}
+
+
+void RelocInfo::set_target_cell(Cell* cell,
+ WriteBarrierMode write_barrier_mode,
+ ICacheFlushMode icache_flush_mode) {
+ ASSERT(rmode_ == RelocInfo::CELL);
+ Address address = cell->address() + Cell::kValueOffset;
+ Memory::Address_at(pc_) = address;
+ if (icache_flush_mode != SKIP_ICACHE_FLUSH) {
+ CPU::FlushICache(pc_, sizeof(Address));
+ }
+ if (write_barrier_mode == UPDATE_WRITE_BARRIER && host() != NULL) {
+ // TODO(1550) We are passing NULL as a slot because cell can never be on
+ // evacuation candidate.
+ host()->GetHeap()->incremental_marking()->RecordWrite(
+ host(), NULL, cell);
+ }
+}
+
+
+Handle<Object> RelocInfo::code_age_stub_handle(Assembler* origin) {
+ ASSERT(rmode_ == RelocInfo::CODE_AGE_SEQUENCE);
+ ASSERT(*pc_ == kCallOpcode);
+ return Memory::Object_Handle_at(pc_ + 1);
+}
+
+
+Code* RelocInfo::code_age_stub() {
+ ASSERT(rmode_ == RelocInfo::CODE_AGE_SEQUENCE);
+ ASSERT(*pc_ == kCallOpcode);
+ return Code::GetCodeFromTargetAddress(
+ Assembler::target_address_at(pc_ + 1, host_));
+}
+
+
+void RelocInfo::set_code_age_stub(Code* stub,
+ ICacheFlushMode icache_flush_mode) {
+ ASSERT(*pc_ == kCallOpcode);
+ ASSERT(rmode_ == RelocInfo::CODE_AGE_SEQUENCE);
+ Assembler::set_target_address_at(pc_ + 1, host_, stub->instruction_start(),
+ icache_flush_mode);
+}
+
+
+Address RelocInfo::call_address() {
+ ASSERT((IsJSReturn(rmode()) && IsPatchedReturnSequence()) ||
+ (IsDebugBreakSlot(rmode()) && IsPatchedDebugBreakSlotSequence()));
+ return Assembler::target_address_at(pc_ + 1, host_);
+}
+
+
+void RelocInfo::set_call_address(Address target) {
+ ASSERT((IsJSReturn(rmode()) && IsPatchedReturnSequence()) ||
+ (IsDebugBreakSlot(rmode()) && IsPatchedDebugBreakSlotSequence()));
+ Assembler::set_target_address_at(pc_ + 1, host_, target);
+ if (host() != NULL) {
+ Object* target_code = Code::GetCodeFromTargetAddress(target);
+ host()->GetHeap()->incremental_marking()->RecordWriteIntoCode(
+ host(), this, HeapObject::cast(target_code));
+ }
+}
+
+
+Object* RelocInfo::call_object() {
+ return *call_object_address();
+}
+
+
+void RelocInfo::set_call_object(Object* target) {
+ *call_object_address() = target;
+}
+
+
+Object** RelocInfo::call_object_address() {
+ ASSERT((IsJSReturn(rmode()) && IsPatchedReturnSequence()) ||
+ (IsDebugBreakSlot(rmode()) && IsPatchedDebugBreakSlotSequence()));
+ return reinterpret_cast<Object**>(pc_ + 1);
+}
+
+
+void RelocInfo::WipeOut() {
+ if (IsEmbeddedObject(rmode_) || IsExternalReference(rmode_)) {
+ Memory::Address_at(pc_) = NULL;
+ } else if (IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_)) {
+ // Effectively write zero into the relocation.
+ Assembler::set_target_address_at(pc_, host_, pc_ + sizeof(int32_t));
+ } else {
+ UNREACHABLE();
+ }
+}
+
+
+bool RelocInfo::IsPatchedReturnSequence() {
+ return *pc_ == kCallOpcode;
+}
+
+
+bool RelocInfo::IsPatchedDebugBreakSlotSequence() {
+ return !Assembler::IsNop(pc());
+}
+
+
+void RelocInfo::Visit(Isolate* isolate, ObjectVisitor* visitor) {
+ RelocInfo::Mode mode = rmode();
+ if (mode == RelocInfo::EMBEDDED_OBJECT) {
+ visitor->VisitEmbeddedPointer(this);
+ CPU::FlushICache(pc_, sizeof(Address));
+ } else if (RelocInfo::IsCodeTarget(mode)) {
+ visitor->VisitCodeTarget(this);
+ } else if (mode == RelocInfo::CELL) {
+ visitor->VisitCell(this);
+ } else if (mode == RelocInfo::EXTERNAL_REFERENCE) {
+ visitor->VisitExternalReference(this);
+ CPU::FlushICache(pc_, sizeof(Address));
+ } else if (RelocInfo::IsCodeAgeSequence(mode)) {
+ visitor->VisitCodeAgeSequence(this);
+ } else if (((RelocInfo::IsJSReturn(mode) &&
+ IsPatchedReturnSequence()) ||
+ (RelocInfo::IsDebugBreakSlot(mode) &&
+ IsPatchedDebugBreakSlotSequence())) &&
+ isolate->debug()->has_break_points()) {
+ visitor->VisitDebugTarget(this);
+ } else if (IsRuntimeEntry(mode)) {
+ visitor->VisitRuntimeEntry(this);
+ }
+}
+
+
+template<typename StaticVisitor>
+void RelocInfo::Visit(Heap* heap) {
+ RelocInfo::Mode mode = rmode();
+ if (mode == RelocInfo::EMBEDDED_OBJECT) {
+ StaticVisitor::VisitEmbeddedPointer(heap, this);
+ CPU::FlushICache(pc_, sizeof(Address));
+ } else if (RelocInfo::IsCodeTarget(mode)) {
+ StaticVisitor::VisitCodeTarget(heap, this);
+ } else if (mode == RelocInfo::CELL) {
+ StaticVisitor::VisitCell(heap, this);
+ } else if (mode == RelocInfo::EXTERNAL_REFERENCE) {
+ StaticVisitor::VisitExternalReference(this);
+ CPU::FlushICache(pc_, sizeof(Address));
+ } else if (RelocInfo::IsCodeAgeSequence(mode)) {
+ StaticVisitor::VisitCodeAgeSequence(heap, this);
+ } else if (heap->isolate()->debug()->has_break_points() &&
+ ((RelocInfo::IsJSReturn(mode) &&
+ IsPatchedReturnSequence()) ||
+ (RelocInfo::IsDebugBreakSlot(mode) &&
+ IsPatchedDebugBreakSlotSequence()))) {
+ StaticVisitor::VisitDebugTarget(heap, this);
+ } else if (IsRuntimeEntry(mode)) {
+ StaticVisitor::VisitRuntimeEntry(this);
+ }
+}
+
+
+
+Immediate::Immediate(int x) {
+ x_ = x;
+ rmode_ = RelocInfo::NONE32;
+}
+
+
+Immediate::Immediate(const ExternalReference& ext) {
+ x_ = reinterpret_cast<int32_t>(ext.address());
+ rmode_ = RelocInfo::EXTERNAL_REFERENCE;
+}
+
+
+Immediate::Immediate(Label* internal_offset) {
+ x_ = reinterpret_cast<int32_t>(internal_offset);
+ rmode_ = RelocInfo::INTERNAL_REFERENCE;
+}
+
+
+Immediate::Immediate(Handle<Object> handle) {
+ AllowDeferredHandleDereference using_raw_address;
+ // Verify all Objects referred by code are NOT in new space.
+ Object* obj = *handle;
+ if (obj->IsHeapObject()) {
+ ASSERT(!HeapObject::cast(obj)->GetHeap()->InNewSpace(obj));
+ x_ = reinterpret_cast<intptr_t>(handle.location());
+ rmode_ = RelocInfo::EMBEDDED_OBJECT;
+ } else {
+ // no relocation needed
+ x_ = reinterpret_cast<intptr_t>(obj);
+ rmode_ = RelocInfo::NONE32;
+ }
+}
+
+
+Immediate::Immediate(Smi* value) {
+ x_ = reinterpret_cast<intptr_t>(value);
+ rmode_ = RelocInfo::NONE32;
+}
+
+
+Immediate::Immediate(Address addr) {
+ x_ = reinterpret_cast<int32_t>(addr);
+ rmode_ = RelocInfo::NONE32;
+}
+
+
+void Assembler::emit(uint32_t x) {
+ *reinterpret_cast<uint32_t*>(pc_) = x;
+ pc_ += sizeof(uint32_t);
+}
+
+
+void Assembler::emit(Handle<Object> handle) {
+ AllowDeferredHandleDereference heap_object_check;
+ // Verify all Objects referred by code are NOT in new space.
+ Object* obj = *handle;
+ ASSERT(!isolate()->heap()->InNewSpace(obj));
+ if (obj->IsHeapObject()) {
+ emit(reinterpret_cast<intptr_t>(handle.location()),
+ RelocInfo::EMBEDDED_OBJECT);
+ } else {
+ // no relocation needed
+ emit(reinterpret_cast<intptr_t>(obj));
+ }
+}
+
+
+void Assembler::emit(uint32_t x, RelocInfo::Mode rmode, TypeFeedbackId id) {
+ if (rmode == RelocInfo::CODE_TARGET && !id.IsNone()) {
+ RecordRelocInfo(RelocInfo::CODE_TARGET_WITH_ID, id.ToInt());
+ } else if (!RelocInfo::IsNone(rmode)
+ && rmode != RelocInfo::CODE_AGE_SEQUENCE) {
+ RecordRelocInfo(rmode);
+ }
+ emit(x);
+}
+
+
+void Assembler::emit(Handle<Code> code,
+ RelocInfo::Mode rmode,
+ TypeFeedbackId id) {
+ AllowDeferredHandleDereference embedding_raw_address;
+ emit(reinterpret_cast<intptr_t>(code.location()), rmode, id);
+}
+
+
+void Assembler::emit(const Immediate& x) {
+ if (x.rmode_ == RelocInfo::INTERNAL_REFERENCE) {
+ Label* label = reinterpret_cast<Label*>(x.x_);
+ emit_code_relative_offset(label);
+ return;
+ }
+ if (!RelocInfo::IsNone(x.rmode_)) RecordRelocInfo(x.rmode_);
+ emit(x.x_);
+}
+
+
+void Assembler::emit_code_relative_offset(Label* label) {
+ if (label->is_bound()) {
+ int32_t pos;
+ pos = label->pos() + Code::kHeaderSize - kHeapObjectTag;
+ emit(pos);
+ } else {
+ emit_disp(label, Displacement::CODE_RELATIVE);
+ }
+}
+
+
+void Assembler::emit_w(const Immediate& x) {
+ ASSERT(RelocInfo::IsNone(x.rmode_));
+ uint16_t value = static_cast<uint16_t>(x.x_);
+ reinterpret_cast<uint16_t*>(pc_)[0] = value;
+ pc_ += sizeof(uint16_t);
+}
+
+
+Address Assembler::target_address_at(Address pc,
+ ConstantPoolArray* constant_pool) {
+ return pc + sizeof(int32_t) + *reinterpret_cast<int32_t*>(pc);
+}
+
+
+void Assembler::set_target_address_at(Address pc,
+ ConstantPoolArray* constant_pool,
+ Address target,
+ ICacheFlushMode icache_flush_mode) {
+ int32_t* p = reinterpret_cast<int32_t*>(pc);
+ *p = target - (pc + sizeof(int32_t));
+ if (icache_flush_mode != SKIP_ICACHE_FLUSH) {
+ CPU::FlushICache(p, sizeof(int32_t));
+ }
+}
+
+
+Address Assembler::target_address_from_return_address(Address pc) {
+ return pc - kCallTargetAddressOffset;
+}
+
+
+Displacement Assembler::disp_at(Label* L) {
+ return Displacement(long_at(L->pos()));
+}
+
+
+void Assembler::disp_at_put(Label* L, Displacement disp) {
+ long_at_put(L->pos(), disp.data());
+}
+
+
+void Assembler::emit_disp(Label* L, Displacement::Type type) {
+ Displacement disp(L, type);
+ L->link_to(pc_offset());
+ emit(static_cast<int>(disp.data()));
+}
+
+
+void Assembler::emit_near_disp(Label* L) {
+ byte disp = 0x00;
+ if (L->is_near_linked()) {
+ int offset = L->near_link_pos() - pc_offset();
+ ASSERT(is_int8(offset));
+ disp = static_cast<byte>(offset & 0xFF);
+ }
+ L->link_to(pc_offset(), Label::kNear);
+ *pc_++ = disp;
+}
+
+
+void Operand::set_modrm(int mod, Register rm) {
+ ASSERT((mod & -4) == 0);
+ buf_[0] = mod << 6 | rm.code();
+ len_ = 1;
+}
+
+
+void Operand::set_sib(ScaleFactor scale, Register index, Register base) {
+ ASSERT(len_ == 1);
+ ASSERT((scale & -4) == 0);
+ // Use SIB with no index register only for base esp.
+ ASSERT(!index.is(esp) || base.is(esp));
+ buf_[1] = scale << 6 | index.code() << 3 | base.code();
+ len_ = 2;
+}
+
+
+void Operand::set_disp8(int8_t disp) {
+ ASSERT(len_ == 1 || len_ == 2);
+ *reinterpret_cast<int8_t*>(&buf_[len_++]) = disp;
+}
+
+
+void Operand::set_dispr(int32_t disp, RelocInfo::Mode rmode) {
+ ASSERT(len_ == 1 || len_ == 2);
+ int32_t* p = reinterpret_cast<int32_t*>(&buf_[len_]);
+ *p = disp;
+ len_ += sizeof(int32_t);
+ rmode_ = rmode;
+}
+
+Operand::Operand(Register reg) {
+ // reg
+ set_modrm(3, reg);
+}
+
+
+Operand::Operand(int32_t disp, RelocInfo::Mode rmode) {
+ // [disp/r]
+ set_modrm(0, ebp);
+ set_dispr(disp, rmode);
+}
+
+} } // namespace v8::internal
+
+#endif // V8_X87_ASSEMBLER_X87_INL_H_
diff --git a/chromium/v8/src/x87/assembler-x87.cc b/chromium/v8/src/x87/assembler-x87.cc
new file mode 100644
index 00000000000..f2082c2a7f6
--- /dev/null
+++ b/chromium/v8/src/x87/assembler-x87.cc
@@ -0,0 +1,2028 @@
+// Copyright (c) 1994-2006 Sun Microsystems Inc.
+// All Rights Reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+//
+// - Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+//
+// - Redistribution in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the
+// distribution.
+//
+// - Neither the name of Sun Microsystems or the names of contributors may
+// be used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+// COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+// HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+// STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
+// OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// The original source code covered by the above license above has been modified
+// significantly by Google Inc.
+// Copyright 2012 the V8 project authors. All rights reserved.
+
+#include "src/v8.h"
+
+#if V8_TARGET_ARCH_X87
+
+#include "src/disassembler.h"
+#include "src/macro-assembler.h"
+#include "src/serialize.h"
+
+namespace v8 {
+namespace internal {
+
+// -----------------------------------------------------------------------------
+// Implementation of CpuFeatures
+
+void CpuFeatures::ProbeImpl(bool cross_compile) {
+ CPU cpu;
+
+ // Only use statically determined features for cross compile (snapshot).
+ if (cross_compile) return;
+}
+
+
+void CpuFeatures::PrintTarget() { }
+void CpuFeatures::PrintFeatures() { }
+
+
+// -----------------------------------------------------------------------------
+// Implementation of Displacement
+
+void Displacement::init(Label* L, Type type) {
+ ASSERT(!L->is_bound());
+ int next = 0;
+ if (L->is_linked()) {
+ next = L->pos();
+ ASSERT(next > 0); // Displacements must be at positions > 0
+ }
+ // Ensure that we _never_ overflow the next field.
+ ASSERT(NextField::is_valid(Assembler::kMaximalBufferSize));
+ data_ = NextField::encode(next) | TypeField::encode(type);
+}
+
+
+// -----------------------------------------------------------------------------
+// Implementation of RelocInfo
+
+
+const int RelocInfo::kApplyMask =
+ RelocInfo::kCodeTargetMask | 1 << RelocInfo::RUNTIME_ENTRY |
+ 1 << RelocInfo::JS_RETURN | 1 << RelocInfo::INTERNAL_REFERENCE |
+ 1 << RelocInfo::DEBUG_BREAK_SLOT | 1 << RelocInfo::CODE_AGE_SEQUENCE;
+
+
+bool RelocInfo::IsCodedSpecially() {
+ // The deserializer needs to know whether a pointer is specially coded. Being
+ // specially coded on IA32 means that it is a relative address, as used by
+ // branch instructions. These are also the ones that need changing when a
+ // code object moves.
+ return (1 << rmode_) & kApplyMask;
+}
+
+
+bool RelocInfo::IsInConstantPool() {
+ return false;
+}
+
+
+void RelocInfo::PatchCode(byte* instructions, int instruction_count) {
+ // Patch the code at the current address with the supplied instructions.
+ for (int i = 0; i < instruction_count; i++) {
+ *(pc_ + i) = *(instructions + i);
+ }
+
+ // Indicate that code has changed.
+ CPU::FlushICache(pc_, instruction_count);
+}
+
+
+// Patch the code at the current PC with a call to the target address.
+// Additional guard int3 instructions can be added if required.
+void RelocInfo::PatchCodeWithCall(Address target, int guard_bytes) {
+ // Call instruction takes up 5 bytes and int3 takes up one byte.
+ static const int kCallCodeSize = 5;
+ int code_size = kCallCodeSize + guard_bytes;
+
+ // Create a code patcher.
+ CodePatcher patcher(pc_, code_size);
+
+ // Add a label for checking the size of the code used for returning.
+#ifdef DEBUG
+ Label check_codesize;
+ patcher.masm()->bind(&check_codesize);
+#endif
+
+ // Patch the code.
+ patcher.masm()->call(target, RelocInfo::NONE32);
+
+ // Check that the size of the code generated is as expected.
+ ASSERT_EQ(kCallCodeSize,
+ patcher.masm()->SizeOfCodeGeneratedSince(&check_codesize));
+
+ // Add the requested number of int3 instructions after the call.
+ ASSERT_GE(guard_bytes, 0);
+ for (int i = 0; i < guard_bytes; i++) {
+ patcher.masm()->int3();
+ }
+}
+
+
+// -----------------------------------------------------------------------------
+// Implementation of Operand
+
+Operand::Operand(Register base, int32_t disp, RelocInfo::Mode rmode) {
+ // [base + disp/r]
+ if (disp == 0 && RelocInfo::IsNone(rmode) && !base.is(ebp)) {
+ // [base]
+ set_modrm(0, base);
+ if (base.is(esp)) set_sib(times_1, esp, base);
+ } else if (is_int8(disp) && RelocInfo::IsNone(rmode)) {
+ // [base + disp8]
+ set_modrm(1, base);
+ if (base.is(esp)) set_sib(times_1, esp, base);
+ set_disp8(disp);
+ } else {
+ // [base + disp/r]
+ set_modrm(2, base);
+ if (base.is(esp)) set_sib(times_1, esp, base);
+ set_dispr(disp, rmode);
+ }
+}
+
+
+Operand::Operand(Register base,
+ Register index,
+ ScaleFactor scale,
+ int32_t disp,
+ RelocInfo::Mode rmode) {
+ ASSERT(!index.is(esp)); // illegal addressing mode
+ // [base + index*scale + disp/r]
+ if (disp == 0 && RelocInfo::IsNone(rmode) && !base.is(ebp)) {
+ // [base + index*scale]
+ set_modrm(0, esp);
+ set_sib(scale, index, base);
+ } else if (is_int8(disp) && RelocInfo::IsNone(rmode)) {
+ // [base + index*scale + disp8]
+ set_modrm(1, esp);
+ set_sib(scale, index, base);
+ set_disp8(disp);
+ } else {
+ // [base + index*scale + disp/r]
+ set_modrm(2, esp);
+ set_sib(scale, index, base);
+ set_dispr(disp, rmode);
+ }
+}
+
+
+Operand::Operand(Register index,
+ ScaleFactor scale,
+ int32_t disp,
+ RelocInfo::Mode rmode) {
+ ASSERT(!index.is(esp)); // illegal addressing mode
+ // [index*scale + disp/r]
+ set_modrm(0, esp);
+ set_sib(scale, index, ebp);
+ set_dispr(disp, rmode);
+}
+
+
+bool Operand::is_reg(Register reg) const {
+ return ((buf_[0] & 0xF8) == 0xC0) // addressing mode is register only.
+ && ((buf_[0] & 0x07) == reg.code()); // register codes match.
+}
+
+
+bool Operand::is_reg_only() const {
+ return (buf_[0] & 0xF8) == 0xC0; // Addressing mode is register only.
+}
+
+
+Register Operand::reg() const {
+ ASSERT(is_reg_only());
+ return Register::from_code(buf_[0] & 0x07);
+}
+
+
+// -----------------------------------------------------------------------------
+// Implementation of Assembler.
+
+// Emit a single byte. Must always be inlined.
+#define EMIT(x) \
+ *pc_++ = (x)
+
+
+#ifdef GENERATED_CODE_COVERAGE
+static void InitCoverageLog();
+#endif
+
+Assembler::Assembler(Isolate* isolate, void* buffer, int buffer_size)
+ : AssemblerBase(isolate, buffer, buffer_size),
+ positions_recorder_(this) {
+ // Clear the buffer in debug mode unless it was provided by the
+ // caller in which case we can't be sure it's okay to overwrite
+ // existing code in it; see CodePatcher::CodePatcher(...).
+#ifdef DEBUG
+ if (own_buffer_) {
+ memset(buffer_, 0xCC, buffer_size_); // int3
+ }
+#endif
+
+ reloc_info_writer.Reposition(buffer_ + buffer_size_, pc_);
+
+#ifdef GENERATED_CODE_COVERAGE
+ InitCoverageLog();
+#endif
+}
+
+
+void Assembler::GetCode(CodeDesc* desc) {
+ // Finalize code (at this point overflow() may be true, but the gap ensures
+ // that we are still not overlapping instructions and relocation info).
+ ASSERT(pc_ <= reloc_info_writer.pos()); // No overlap.
+ // Set up code descriptor.
+ desc->buffer = buffer_;
+ desc->buffer_size = buffer_size_;
+ desc->instr_size = pc_offset();
+ desc->reloc_size = (buffer_ + buffer_size_) - reloc_info_writer.pos();
+ desc->origin = this;
+}
+
+
+void Assembler::Align(int m) {
+ ASSERT(IsPowerOf2(m));
+ int mask = m - 1;
+ int addr = pc_offset();
+ Nop((m - (addr & mask)) & mask);
+}
+
+
+bool Assembler::IsNop(Address addr) {
+ Address a = addr;
+ while (*a == 0x66) a++;
+ if (*a == 0x90) return true;
+ if (a[0] == 0xf && a[1] == 0x1f) return true;
+ return false;
+}
+
+
+void Assembler::Nop(int bytes) {
+ EnsureSpace ensure_space(this);
+
+ // Older CPUs that do not support SSE2 may not support multibyte NOP
+ // instructions.
+ for (; bytes > 0; bytes--) {
+ EMIT(0x90);
+ }
+ return;
+}
+
+
+void Assembler::CodeTargetAlign() {
+ Align(16); // Preferred alignment of jump targets on ia32.
+}
+
+
+void Assembler::cpuid() {
+ EnsureSpace ensure_space(this);
+ EMIT(0x0F);
+ EMIT(0xA2);
+}
+
+
+void Assembler::pushad() {
+ EnsureSpace ensure_space(this);
+ EMIT(0x60);
+}
+
+
+void Assembler::popad() {
+ EnsureSpace ensure_space(this);
+ EMIT(0x61);
+}
+
+
+void Assembler::pushfd() {
+ EnsureSpace ensure_space(this);
+ EMIT(0x9C);
+}
+
+
+void Assembler::popfd() {
+ EnsureSpace ensure_space(this);
+ EMIT(0x9D);
+}
+
+
+void Assembler::push(const Immediate& x) {
+ EnsureSpace ensure_space(this);
+ if (x.is_int8()) {
+ EMIT(0x6a);
+ EMIT(x.x_);
+ } else {
+ EMIT(0x68);
+ emit(x);
+ }
+}
+
+
+void Assembler::push_imm32(int32_t imm32) {
+ EnsureSpace ensure_space(this);
+ EMIT(0x68);
+ emit(imm32);
+}
+
+
+void Assembler::push(Register src) {
+ EnsureSpace ensure_space(this);
+ EMIT(0x50 | src.code());
+}
+
+
+void Assembler::push(const Operand& src) {
+ EnsureSpace ensure_space(this);
+ EMIT(0xFF);
+ emit_operand(esi, src);
+}
+
+
+void Assembler::pop(Register dst) {
+ ASSERT(reloc_info_writer.last_pc() != NULL);
+ EnsureSpace ensure_space(this);
+ EMIT(0x58 | dst.code());
+}
+
+
+void Assembler::pop(const Operand& dst) {
+ EnsureSpace ensure_space(this);
+ EMIT(0x8F);
+ emit_operand(eax, dst);
+}
+
+
+void Assembler::enter(const Immediate& size) {
+ EnsureSpace ensure_space(this);
+ EMIT(0xC8);
+ emit_w(size);
+ EMIT(0);
+}
+
+
+void Assembler::leave() {
+ EnsureSpace ensure_space(this);
+ EMIT(0xC9);
+}
+
+
+void Assembler::mov_b(Register dst, const Operand& src) {
+ CHECK(dst.is_byte_register());
+ EnsureSpace ensure_space(this);
+ EMIT(0x8A);
+ emit_operand(dst, src);
+}
+
+
+void Assembler::mov_b(const Operand& dst, int8_t imm8) {
+ EnsureSpace ensure_space(this);
+ EMIT(0xC6);
+ emit_operand(eax, dst);
+ EMIT(imm8);
+}
+
+
+void Assembler::mov_b(const Operand& dst, Register src) {
+ CHECK(src.is_byte_register());
+ EnsureSpace ensure_space(this);
+ EMIT(0x88);
+ emit_operand(src, dst);
+}
+
+
+void Assembler::mov_w(Register dst, const Operand& src) {
+ EnsureSpace ensure_space(this);
+ EMIT(0x66);
+ EMIT(0x8B);
+ emit_operand(dst, src);
+}
+
+
+void Assembler::mov_w(const Operand& dst, Register src) {
+ EnsureSpace ensure_space(this);
+ EMIT(0x66);
+ EMIT(0x89);
+ emit_operand(src, dst);
+}
+
+
+void Assembler::mov_w(const Operand& dst, int16_t imm16) {
+ EnsureSpace ensure_space(this);
+ EMIT(0x66);
+ EMIT(0xC7);
+ emit_operand(eax, dst);
+ EMIT(static_cast<int8_t>(imm16 & 0xff));
+ EMIT(static_cast<int8_t>(imm16 >> 8));
+}
+
+
+void Assembler::mov(Register dst, int32_t imm32) {
+ EnsureSpace ensure_space(this);
+ EMIT(0xB8 | dst.code());
+ emit(imm32);
+}
+
+
+void Assembler::mov(Register dst, const Immediate& x) {
+ EnsureSpace ensure_space(this);
+ EMIT(0xB8 | dst.code());
+ emit(x);
+}
+
+
+void Assembler::mov(Register dst, Handle<Object> handle) {
+ EnsureSpace ensure_space(this);
+ EMIT(0xB8 | dst.code());
+ emit(handle);
+}
+
+
+void Assembler::mov(Register dst, const Operand& src) {
+ EnsureSpace ensure_space(this);
+ EMIT(0x8B);
+ emit_operand(dst, src);
+}
+
+
+void Assembler::mov(Register dst, Register src) {
+ EnsureSpace ensure_space(this);
+ EMIT(0x89);
+ EMIT(0xC0 | src.code() << 3 | dst.code());
+}
+
+
+void Assembler::mov(const Operand& dst, const Immediate& x) {
+ EnsureSpace ensure_space(this);
+ EMIT(0xC7);
+ emit_operand(eax, dst);
+ emit(x);
+}
+
+
+void Assembler::mov(const Operand& dst, Handle<Object> handle) {
+ EnsureSpace ensure_space(this);
+ EMIT(0xC7);
+ emit_operand(eax, dst);
+ emit(handle);
+}
+
+
+void Assembler::mov(const Operand& dst, Register src) {
+ EnsureSpace ensure_space(this);
+ EMIT(0x89);
+ emit_operand(src, dst);
+}
+
+
+void Assembler::movsx_b(Register dst, const Operand& src) {
+ EnsureSpace ensure_space(this);
+ EMIT(0x0F);
+ EMIT(0xBE);
+ emit_operand(dst, src);
+}
+
+
+void Assembler::movsx_w(Register dst, const Operand& src) {
+ EnsureSpace ensure_space(this);
+ EMIT(0x0F);
+ EMIT(0xBF);
+ emit_operand(dst, src);
+}
+
+
+void Assembler::movzx_b(Register dst, const Operand& src) {
+ EnsureSpace ensure_space(this);
+ EMIT(0x0F);
+ EMIT(0xB6);
+ emit_operand(dst, src);
+}
+
+
+void Assembler::movzx_w(Register dst, const Operand& src) {
+ EnsureSpace ensure_space(this);
+ EMIT(0x0F);
+ EMIT(0xB7);
+ emit_operand(dst, src);
+}
+
+
+void Assembler::cld() {
+ EnsureSpace ensure_space(this);
+ EMIT(0xFC);
+}
+
+
+void Assembler::rep_movs() {
+ EnsureSpace ensure_space(this);
+ EMIT(0xF3);
+ EMIT(0xA5);
+}
+
+
+void Assembler::rep_stos() {
+ EnsureSpace ensure_space(this);
+ EMIT(0xF3);
+ EMIT(0xAB);
+}
+
+
+void Assembler::stos() {
+ EnsureSpace ensure_space(this);
+ EMIT(0xAB);
+}
+
+
+void Assembler::xchg(Register dst, Register src) {
+ EnsureSpace ensure_space(this);
+ if (src.is(eax) || dst.is(eax)) { // Single-byte encoding.
+ EMIT(0x90 | (src.is(eax) ? dst.code() : src.code()));
+ } else {
+ EMIT(0x87);
+ EMIT(0xC0 | src.code() << 3 | dst.code());
+ }
+}
+
+
+void Assembler::adc(Register dst, int32_t imm32) {
+ EnsureSpace ensure_space(this);
+ emit_arith(2, Operand(dst), Immediate(imm32));
+}
+
+
+void Assembler::adc(Register dst, const Operand& src) {
+ EnsureSpace ensure_space(this);
+ EMIT(0x13);
+ emit_operand(dst, src);
+}
+
+
+void Assembler::add(Register dst, const Operand& src) {
+ EnsureSpace ensure_space(this);
+ EMIT(0x03);
+ emit_operand(dst, src);
+}
+
+
+void Assembler::add(const Operand& dst, Register src) {
+ EnsureSpace ensure_space(this);
+ EMIT(0x01);
+ emit_operand(src, dst);
+}
+
+
+void Assembler::add(const Operand& dst, const Immediate& x) {
+ ASSERT(reloc_info_writer.last_pc() != NULL);
+ EnsureSpace ensure_space(this);
+ emit_arith(0, dst, x);
+}
+
+
+void Assembler::and_(Register dst, int32_t imm32) {
+ and_(dst, Immediate(imm32));
+}
+
+
+void Assembler::and_(Register dst, const Immediate& x) {
+ EnsureSpace ensure_space(this);
+ emit_arith(4, Operand(dst), x);
+}
+
+
+void Assembler::and_(Register dst, const Operand& src) {
+ EnsureSpace ensure_space(this);
+ EMIT(0x23);
+ emit_operand(dst, src);
+}
+
+
+void Assembler::and_(const Operand& dst, const Immediate& x) {
+ EnsureSpace ensure_space(this);
+ emit_arith(4, dst, x);
+}
+
+
+void Assembler::and_(const Operand& dst, Register src) {
+ EnsureSpace ensure_space(this);
+ EMIT(0x21);
+ emit_operand(src, dst);
+}
+
+
+void Assembler::cmpb(const Operand& op, int8_t imm8) {
+ EnsureSpace ensure_space(this);
+ if (op.is_reg(eax)) {
+ EMIT(0x3C);
+ } else {
+ EMIT(0x80);
+ emit_operand(edi, op); // edi == 7
+ }
+ EMIT(imm8);
+}
+
+
+void Assembler::cmpb(const Operand& op, Register reg) {
+ CHECK(reg.is_byte_register());
+ EnsureSpace ensure_space(this);
+ EMIT(0x38);
+ emit_operand(reg, op);
+}
+
+
+void Assembler::cmpb(Register reg, const Operand& op) {
+ CHECK(reg.is_byte_register());
+ EnsureSpace ensure_space(this);
+ EMIT(0x3A);
+ emit_operand(reg, op);
+}
+
+
+void Assembler::cmpw(const Operand& op, Immediate imm16) {
+ ASSERT(imm16.is_int16());
+ EnsureSpace ensure_space(this);
+ EMIT(0x66);
+ EMIT(0x81);
+ emit_operand(edi, op);
+ emit_w(imm16);
+}
+
+
+void Assembler::cmp(Register reg, int32_t imm32) {
+ EnsureSpace ensure_space(this);
+ emit_arith(7, Operand(reg), Immediate(imm32));
+}
+
+
+void Assembler::cmp(Register reg, Handle<Object> handle) {
+ EnsureSpace ensure_space(this);
+ emit_arith(7, Operand(reg), Immediate(handle));
+}
+
+
+void Assembler::cmp(Register reg, const Operand& op) {
+ EnsureSpace ensure_space(this);
+ EMIT(0x3B);
+ emit_operand(reg, op);
+}
+
+
+void Assembler::cmp(const Operand& op, const Immediate& imm) {
+ EnsureSpace ensure_space(this);
+ emit_arith(7, op, imm);
+}
+
+
+void Assembler::cmp(const Operand& op, Handle<Object> handle) {
+ EnsureSpace ensure_space(this);
+ emit_arith(7, op, Immediate(handle));
+}
+
+
+void Assembler::cmpb_al(const Operand& op) {
+ EnsureSpace ensure_space(this);
+ EMIT(0x38); // CMP r/m8, r8
+ emit_operand(eax, op); // eax has same code as register al.
+}
+
+
+void Assembler::cmpw_ax(const Operand& op) {
+ EnsureSpace ensure_space(this);
+ EMIT(0x66);
+ EMIT(0x39); // CMP r/m16, r16
+ emit_operand(eax, op); // eax has same code as register ax.
+}
+
+
+void Assembler::dec_b(Register dst) {
+ CHECK(dst.is_byte_register());
+ EnsureSpace ensure_space(this);
+ EMIT(0xFE);
+ EMIT(0xC8 | dst.code());
+}
+
+
+void Assembler::dec_b(const Operand& dst) {
+ EnsureSpace ensure_space(this);
+ EMIT(0xFE);
+ emit_operand(ecx, dst);
+}
+
+
+void Assembler::dec(Register dst) {
+ EnsureSpace ensure_space(this);
+ EMIT(0x48 | dst.code());
+}
+
+
+void Assembler::dec(const Operand& dst) {
+ EnsureSpace ensure_space(this);
+ EMIT(0xFF);
+ emit_operand(ecx, dst);
+}
+
+
+void Assembler::cdq() {
+ EnsureSpace ensure_space(this);
+ EMIT(0x99);
+}
+
+
+void Assembler::idiv(Register src) {
+ EnsureSpace ensure_space(this);
+ EMIT(0xF7);
+ EMIT(0xF8 | src.code());
+}
+
+
+void Assembler::imul(Register reg) {
+ EnsureSpace ensure_space(this);
+ EMIT(0xF7);
+ EMIT(0xE8 | reg.code());
+}
+
+
+void Assembler::imul(Register dst, const Operand& src) {
+ EnsureSpace ensure_space(this);
+ EMIT(0x0F);
+ EMIT(0xAF);
+ emit_operand(dst, src);
+}
+
+
+void Assembler::imul(Register dst, Register src, int32_t imm32) {
+ EnsureSpace ensure_space(this);
+ if (is_int8(imm32)) {
+ EMIT(0x6B);
+ EMIT(0xC0 | dst.code() << 3 | src.code());
+ EMIT(imm32);
+ } else {
+ EMIT(0x69);
+ EMIT(0xC0 | dst.code() << 3 | src.code());
+ emit(imm32);
+ }
+}
+
+
+void Assembler::inc(Register dst) {
+ EnsureSpace ensure_space(this);
+ EMIT(0x40 | dst.code());
+}
+
+
+void Assembler::inc(const Operand& dst) {
+ EnsureSpace ensure_space(this);
+ EMIT(0xFF);
+ emit_operand(eax, dst);
+}
+
+
+void Assembler::lea(Register dst, const Operand& src) {
+ EnsureSpace ensure_space(this);
+ EMIT(0x8D);
+ emit_operand(dst, src);
+}
+
+
+void Assembler::mul(Register src) {
+ EnsureSpace ensure_space(this);
+ EMIT(0xF7);
+ EMIT(0xE0 | src.code());
+}
+
+
+void Assembler::neg(Register dst) {
+ EnsureSpace ensure_space(this);
+ EMIT(0xF7);
+ EMIT(0xD8 | dst.code());
+}
+
+
+void Assembler::not_(Register dst) {
+ EnsureSpace ensure_space(this);
+ EMIT(0xF7);
+ EMIT(0xD0 | dst.code());
+}
+
+
+void Assembler::or_(Register dst, int32_t imm32) {
+ EnsureSpace ensure_space(this);
+ emit_arith(1, Operand(dst), Immediate(imm32));
+}
+
+
+void Assembler::or_(Register dst, const Operand& src) {
+ EnsureSpace ensure_space(this);
+ EMIT(0x0B);
+ emit_operand(dst, src);
+}
+
+
+void Assembler::or_(const Operand& dst, const Immediate& x) {
+ EnsureSpace ensure_space(this);
+ emit_arith(1, dst, x);
+}
+
+
+void Assembler::or_(const Operand& dst, Register src) {
+ EnsureSpace ensure_space(this);
+ EMIT(0x09);
+ emit_operand(src, dst);
+}
+
+
+void Assembler::rcl(Register dst, uint8_t imm8) {
+ EnsureSpace ensure_space(this);
+ ASSERT(is_uint5(imm8)); // illegal shift count
+ if (imm8 == 1) {
+ EMIT(0xD1);
+ EMIT(0xD0 | dst.code());
+ } else {
+ EMIT(0xC1);
+ EMIT(0xD0 | dst.code());
+ EMIT(imm8);
+ }
+}
+
+
+void Assembler::rcr(Register dst, uint8_t imm8) {
+ EnsureSpace ensure_space(this);
+ ASSERT(is_uint5(imm8)); // illegal shift count
+ if (imm8 == 1) {
+ EMIT(0xD1);
+ EMIT(0xD8 | dst.code());
+ } else {
+ EMIT(0xC1);
+ EMIT(0xD8 | dst.code());
+ EMIT(imm8);
+ }
+}
+
+
+void Assembler::ror(Register dst, uint8_t imm8) {
+ EnsureSpace ensure_space(this);
+ ASSERT(is_uint5(imm8)); // illegal shift count
+ if (imm8 == 1) {
+ EMIT(0xD1);
+ EMIT(0xC8 | dst.code());
+ } else {
+ EMIT(0xC1);
+ EMIT(0xC8 | dst.code());
+ EMIT(imm8);
+ }
+}
+
+
+void Assembler::ror_cl(Register dst) {
+ EnsureSpace ensure_space(this);
+ EMIT(0xD3);
+ EMIT(0xC8 | dst.code());
+}
+
+
+void Assembler::sar(Register dst, uint8_t imm8) {
+ EnsureSpace ensure_space(this);
+ ASSERT(is_uint5(imm8)); // illegal shift count
+ if (imm8 == 1) {
+ EMIT(0xD1);
+ EMIT(0xF8 | dst.code());
+ } else {
+ EMIT(0xC1);
+ EMIT(0xF8 | dst.code());
+ EMIT(imm8);
+ }
+}
+
+
+void Assembler::sar_cl(Register dst) {
+ EnsureSpace ensure_space(this);
+ EMIT(0xD3);
+ EMIT(0xF8 | dst.code());
+}
+
+
+void Assembler::sbb(Register dst, const Operand& src) {
+ EnsureSpace ensure_space(this);
+ EMIT(0x1B);
+ emit_operand(dst, src);
+}
+
+
+void Assembler::shld(Register dst, const Operand& src) {
+ EnsureSpace ensure_space(this);
+ EMIT(0x0F);
+ EMIT(0xA5);
+ emit_operand(dst, src);
+}
+
+
+void Assembler::shl(Register dst, uint8_t imm8) {
+ EnsureSpace ensure_space(this);
+ ASSERT(is_uint5(imm8)); // illegal shift count
+ if (imm8 == 1) {
+ EMIT(0xD1);
+ EMIT(0xE0 | dst.code());
+ } else {
+ EMIT(0xC1);
+ EMIT(0xE0 | dst.code());
+ EMIT(imm8);
+ }
+}
+
+
+void Assembler::shl_cl(Register dst) {
+ EnsureSpace ensure_space(this);
+ EMIT(0xD3);
+ EMIT(0xE0 | dst.code());
+}
+
+
+void Assembler::shrd(Register dst, const Operand& src) {
+ EnsureSpace ensure_space(this);
+ EMIT(0x0F);
+ EMIT(0xAD);
+ emit_operand(dst, src);
+}
+
+
+void Assembler::shr(Register dst, uint8_t imm8) {
+ EnsureSpace ensure_space(this);
+ ASSERT(is_uint5(imm8)); // illegal shift count
+ if (imm8 == 1) {
+ EMIT(0xD1);
+ EMIT(0xE8 | dst.code());
+ } else {
+ EMIT(0xC1);
+ EMIT(0xE8 | dst.code());
+ EMIT(imm8);
+ }
+}
+
+
+void Assembler::shr_cl(Register dst) {
+ EnsureSpace ensure_space(this);
+ EMIT(0xD3);
+ EMIT(0xE8 | dst.code());
+}
+
+
+void Assembler::sub(const Operand& dst, const Immediate& x) {
+ EnsureSpace ensure_space(this);
+ emit_arith(5, dst, x);
+}
+
+
+void Assembler::sub(Register dst, const Operand& src) {
+ EnsureSpace ensure_space(this);
+ EMIT(0x2B);
+ emit_operand(dst, src);
+}
+
+
+void Assembler::sub(const Operand& dst, Register src) {
+ EnsureSpace ensure_space(this);
+ EMIT(0x29);
+ emit_operand(src, dst);
+}
+
+
+void Assembler::test(Register reg, const Immediate& imm) {
+ if (RelocInfo::IsNone(imm.rmode_) && is_uint8(imm.x_)) {
+ test_b(reg, imm.x_);
+ return;
+ }
+
+ EnsureSpace ensure_space(this);
+ // This is not using emit_arith because test doesn't support
+ // sign-extension of 8-bit operands.
+ if (reg.is(eax)) {
+ EMIT(0xA9);
+ } else {
+ EMIT(0xF7);
+ EMIT(0xC0 | reg.code());
+ }
+ emit(imm);
+}
+
+
+void Assembler::test(Register reg, const Operand& op) {
+ EnsureSpace ensure_space(this);
+ EMIT(0x85);
+ emit_operand(reg, op);
+}
+
+
+void Assembler::test_b(Register reg, const Operand& op) {
+ CHECK(reg.is_byte_register());
+ EnsureSpace ensure_space(this);
+ EMIT(0x84);
+ emit_operand(reg, op);
+}
+
+
+void Assembler::test(const Operand& op, const Immediate& imm) {
+ if (op.is_reg_only()) {
+ test(op.reg(), imm);
+ return;
+ }
+ if (RelocInfo::IsNone(imm.rmode_) && is_uint8(imm.x_)) {
+ return test_b(op, imm.x_);
+ }
+ EnsureSpace ensure_space(this);
+ EMIT(0xF7);
+ emit_operand(eax, op);
+ emit(imm);
+}
+
+
+void Assembler::test_b(Register reg, uint8_t imm8) {
+ EnsureSpace ensure_space(this);
+ // Only use test against byte for registers that have a byte
+ // variant: eax, ebx, ecx, and edx.
+ if (reg.is(eax)) {
+ EMIT(0xA8);
+ EMIT(imm8);
+ } else if (reg.is_byte_register()) {
+ emit_arith_b(0xF6, 0xC0, reg, imm8);
+ } else {
+ EMIT(0xF7);
+ EMIT(0xC0 | reg.code());
+ emit(imm8);
+ }
+}
+
+
+void Assembler::test_b(const Operand& op, uint8_t imm8) {
+ if (op.is_reg_only()) {
+ test_b(op.reg(), imm8);
+ return;
+ }
+ EnsureSpace ensure_space(this);
+ EMIT(0xF6);
+ emit_operand(eax, op);
+ EMIT(imm8);
+}
+
+
+void Assembler::xor_(Register dst, int32_t imm32) {
+ EnsureSpace ensure_space(this);
+ emit_arith(6, Operand(dst), Immediate(imm32));
+}
+
+
+void Assembler::xor_(Register dst, const Operand& src) {
+ EnsureSpace ensure_space(this);
+ EMIT(0x33);
+ emit_operand(dst, src);
+}
+
+
+void Assembler::xor_(const Operand& dst, Register src) {
+ EnsureSpace ensure_space(this);
+ EMIT(0x31);
+ emit_operand(src, dst);
+}
+
+
+void Assembler::xor_(const Operand& dst, const Immediate& x) {
+ EnsureSpace ensure_space(this);
+ emit_arith(6, dst, x);
+}
+
+
+void Assembler::bt(const Operand& dst, Register src) {
+ EnsureSpace ensure_space(this);
+ EMIT(0x0F);
+ EMIT(0xA3);
+ emit_operand(src, dst);
+}
+
+
+void Assembler::bts(const Operand& dst, Register src) {
+ EnsureSpace ensure_space(this);
+ EMIT(0x0F);
+ EMIT(0xAB);
+ emit_operand(src, dst);
+}
+
+
+void Assembler::bsr(Register dst, const Operand& src) {
+ EnsureSpace ensure_space(this);
+ EMIT(0x0F);
+ EMIT(0xBD);
+ emit_operand(dst, src);
+}
+
+
+void Assembler::hlt() {
+ EnsureSpace ensure_space(this);
+ EMIT(0xF4);
+}
+
+
+void Assembler::int3() {
+ EnsureSpace ensure_space(this);
+ EMIT(0xCC);
+}
+
+
+void Assembler::nop() {
+ EnsureSpace ensure_space(this);
+ EMIT(0x90);
+}
+
+
+void Assembler::ret(int imm16) {
+ EnsureSpace ensure_space(this);
+ ASSERT(is_uint16(imm16));
+ if (imm16 == 0) {
+ EMIT(0xC3);
+ } else {
+ EMIT(0xC2);
+ EMIT(imm16 & 0xFF);
+ EMIT((imm16 >> 8) & 0xFF);
+ }
+}
+
+
+// Labels refer to positions in the (to be) generated code.
+// There are bound, linked, and unused labels.
+//
+// Bound labels refer to known positions in the already
+// generated code. pos() is the position the label refers to.
+//
+// Linked labels refer to unknown positions in the code
+// to be generated; pos() is the position of the 32bit
+// Displacement of the last instruction using the label.
+
+
+void Assembler::print(Label* L) {
+ if (L->is_unused()) {
+ PrintF("unused label\n");
+ } else if (L->is_bound()) {
+ PrintF("bound label to %d\n", L->pos());
+ } else if (L->is_linked()) {
+ Label l = *L;
+ PrintF("unbound label");
+ while (l.is_linked()) {
+ Displacement disp = disp_at(&l);
+ PrintF("@ %d ", l.pos());
+ disp.print();
+ PrintF("\n");
+ disp.next(&l);
+ }
+ } else {
+ PrintF("label in inconsistent state (pos = %d)\n", L->pos_);
+ }
+}
+
+
+void Assembler::bind_to(Label* L, int pos) {
+ EnsureSpace ensure_space(this);
+ ASSERT(0 <= pos && pos <= pc_offset()); // must have a valid binding position
+ while (L->is_linked()) {
+ Displacement disp = disp_at(L);
+ int fixup_pos = L->pos();
+ if (disp.type() == Displacement::CODE_RELATIVE) {
+ // Relative to Code* heap object pointer.
+ long_at_put(fixup_pos, pos + Code::kHeaderSize - kHeapObjectTag);
+ } else {
+ if (disp.type() == Displacement::UNCONDITIONAL_JUMP) {
+ ASSERT(byte_at(fixup_pos - 1) == 0xE9); // jmp expected
+ }
+ // Relative address, relative to point after address.
+ int imm32 = pos - (fixup_pos + sizeof(int32_t));
+ long_at_put(fixup_pos, imm32);
+ }
+ disp.next(L);
+ }
+ while (L->is_near_linked()) {
+ int fixup_pos = L->near_link_pos();
+ int offset_to_next =
+ static_cast<int>(*reinterpret_cast<int8_t*>(addr_at(fixup_pos)));
+ ASSERT(offset_to_next <= 0);
+ // Relative address, relative to point after address.
+ int disp = pos - fixup_pos - sizeof(int8_t);
+ CHECK(0 <= disp && disp <= 127);
+ set_byte_at(fixup_pos, disp);
+ if (offset_to_next < 0) {
+ L->link_to(fixup_pos + offset_to_next, Label::kNear);
+ } else {
+ L->UnuseNear();
+ }
+ }
+ L->bind_to(pos);
+}
+
+
+void Assembler::bind(Label* L) {
+ EnsureSpace ensure_space(this);
+ ASSERT(!L->is_bound()); // label can only be bound once
+ bind_to(L, pc_offset());
+}
+
+
+void Assembler::call(Label* L) {
+ positions_recorder()->WriteRecordedPositions();
+ EnsureSpace ensure_space(this);
+ if (L->is_bound()) {
+ const int long_size = 5;
+ int offs = L->pos() - pc_offset();
+ ASSERT(offs <= 0);
+ // 1110 1000 #32-bit disp.
+ EMIT(0xE8);
+ emit(offs - long_size);
+ } else {
+ // 1110 1000 #32-bit disp.
+ EMIT(0xE8);
+ emit_disp(L, Displacement::OTHER);
+ }
+}
+
+
+void Assembler::call(byte* entry, RelocInfo::Mode rmode) {
+ positions_recorder()->WriteRecordedPositions();
+ EnsureSpace ensure_space(this);
+ ASSERT(!RelocInfo::IsCodeTarget(rmode));
+ EMIT(0xE8);
+ if (RelocInfo::IsRuntimeEntry(rmode)) {
+ emit(reinterpret_cast<uint32_t>(entry), rmode);
+ } else {
+ emit(entry - (pc_ + sizeof(int32_t)), rmode);
+ }
+}
+
+
+int Assembler::CallSize(const Operand& adr) {
+ // Call size is 1 (opcode) + adr.len_ (operand).
+ return 1 + adr.len_;
+}
+
+
+void Assembler::call(const Operand& adr) {
+ positions_recorder()->WriteRecordedPositions();
+ EnsureSpace ensure_space(this);
+ EMIT(0xFF);
+ emit_operand(edx, adr);
+}
+
+
+int Assembler::CallSize(Handle<Code> code, RelocInfo::Mode rmode) {
+ return 1 /* EMIT */ + sizeof(uint32_t) /* emit */;
+}
+
+
+void Assembler::call(Handle<Code> code,
+ RelocInfo::Mode rmode,
+ TypeFeedbackId ast_id) {
+ positions_recorder()->WriteRecordedPositions();
+ EnsureSpace ensure_space(this);
+ ASSERT(RelocInfo::IsCodeTarget(rmode)
+ || rmode == RelocInfo::CODE_AGE_SEQUENCE);
+ EMIT(0xE8);
+ emit(code, rmode, ast_id);
+}
+
+
+void Assembler::jmp(Label* L, Label::Distance distance) {
+ EnsureSpace ensure_space(this);
+ if (L->is_bound()) {
+ const int short_size = 2;
+ const int long_size = 5;
+ int offs = L->pos() - pc_offset();
+ ASSERT(offs <= 0);
+ if (is_int8(offs - short_size)) {
+ // 1110 1011 #8-bit disp.
+ EMIT(0xEB);
+ EMIT((offs - short_size) & 0xFF);
+ } else {
+ // 1110 1001 #32-bit disp.
+ EMIT(0xE9);
+ emit(offs - long_size);
+ }
+ } else if (distance == Label::kNear) {
+ EMIT(0xEB);
+ emit_near_disp(L);
+ } else {
+ // 1110 1001 #32-bit disp.
+ EMIT(0xE9);
+ emit_disp(L, Displacement::UNCONDITIONAL_JUMP);
+ }
+}
+
+
+void Assembler::jmp(byte* entry, RelocInfo::Mode rmode) {
+ EnsureSpace ensure_space(this);
+ ASSERT(!RelocInfo::IsCodeTarget(rmode));
+ EMIT(0xE9);
+ if (RelocInfo::IsRuntimeEntry(rmode)) {
+ emit(reinterpret_cast<uint32_t>(entry), rmode);
+ } else {
+ emit(entry - (pc_ + sizeof(int32_t)), rmode);
+ }
+}
+
+
+void Assembler::jmp(const Operand& adr) {
+ EnsureSpace ensure_space(this);
+ EMIT(0xFF);
+ emit_operand(esp, adr);
+}
+
+
+void Assembler::jmp(Handle<Code> code, RelocInfo::Mode rmode) {
+ EnsureSpace ensure_space(this);
+ ASSERT(RelocInfo::IsCodeTarget(rmode));
+ EMIT(0xE9);
+ emit(code, rmode);
+}
+
+
+void Assembler::j(Condition cc, Label* L, Label::Distance distance) {
+ EnsureSpace ensure_space(this);
+ ASSERT(0 <= cc && static_cast<int>(cc) < 16);
+ if (L->is_bound()) {
+ const int short_size = 2;
+ const int long_size = 6;
+ int offs = L->pos() - pc_offset();
+ ASSERT(offs <= 0);
+ if (is_int8(offs - short_size)) {
+ // 0111 tttn #8-bit disp
+ EMIT(0x70 | cc);
+ EMIT((offs - short_size) & 0xFF);
+ } else {
+ // 0000 1111 1000 tttn #32-bit disp
+ EMIT(0x0F);
+ EMIT(0x80 | cc);
+ emit(offs - long_size);
+ }
+ } else if (distance == Label::kNear) {
+ EMIT(0x70 | cc);
+ emit_near_disp(L);
+ } else {
+ // 0000 1111 1000 tttn #32-bit disp
+ // Note: could eliminate cond. jumps to this jump if condition
+ // is the same however, seems to be rather unlikely case.
+ EMIT(0x0F);
+ EMIT(0x80 | cc);
+ emit_disp(L, Displacement::OTHER);
+ }
+}
+
+
+void Assembler::j(Condition cc, byte* entry, RelocInfo::Mode rmode) {
+ EnsureSpace ensure_space(this);
+ ASSERT((0 <= cc) && (static_cast<int>(cc) < 16));
+ // 0000 1111 1000 tttn #32-bit disp.
+ EMIT(0x0F);
+ EMIT(0x80 | cc);
+ if (RelocInfo::IsRuntimeEntry(rmode)) {
+ emit(reinterpret_cast<uint32_t>(entry), rmode);
+ } else {
+ emit(entry - (pc_ + sizeof(int32_t)), rmode);
+ }
+}
+
+
+void Assembler::j(Condition cc, Handle<Code> code) {
+ EnsureSpace ensure_space(this);
+ // 0000 1111 1000 tttn #32-bit disp
+ EMIT(0x0F);
+ EMIT(0x80 | cc);
+ emit(code, RelocInfo::CODE_TARGET);
+}
+
+
+// FPU instructions.
+
+void Assembler::fld(int i) {
+ EnsureSpace ensure_space(this);
+ emit_farith(0xD9, 0xC0, i);
+}
+
+
+void Assembler::fstp(int i) {
+ EnsureSpace ensure_space(this);
+ emit_farith(0xDD, 0xD8, i);
+}
+
+
+void Assembler::fld1() {
+ EnsureSpace ensure_space(this);
+ EMIT(0xD9);
+ EMIT(0xE8);
+}
+
+
+void Assembler::fldpi() {
+ EnsureSpace ensure_space(this);
+ EMIT(0xD9);
+ EMIT(0xEB);
+}
+
+
+void Assembler::fldz() {
+ EnsureSpace ensure_space(this);
+ EMIT(0xD9);
+ EMIT(0xEE);
+}
+
+
+void Assembler::fldln2() {
+ EnsureSpace ensure_space(this);
+ EMIT(0xD9);
+ EMIT(0xED);
+}
+
+
+void Assembler::fld_s(const Operand& adr) {
+ EnsureSpace ensure_space(this);
+ EMIT(0xD9);
+ emit_operand(eax, adr);
+}
+
+
+void Assembler::fld_d(const Operand& adr) {
+ EnsureSpace ensure_space(this);
+ EMIT(0xDD);
+ emit_operand(eax, adr);
+}
+
+
+void Assembler::fstp_s(const Operand& adr) {
+ EnsureSpace ensure_space(this);
+ EMIT(0xD9);
+ emit_operand(ebx, adr);
+}
+
+
+void Assembler::fst_s(const Operand& adr) {
+ EnsureSpace ensure_space(this);
+ EMIT(0xD9);
+ emit_operand(edx, adr);
+}
+
+
+void Assembler::fstp_d(const Operand& adr) {
+ EnsureSpace ensure_space(this);
+ EMIT(0xDD);
+ emit_operand(ebx, adr);
+}
+
+
+void Assembler::fst_d(const Operand& adr) {
+ EnsureSpace ensure_space(this);
+ EMIT(0xDD);
+ emit_operand(edx, adr);
+}
+
+
+void Assembler::fild_s(const Operand& adr) {
+ EnsureSpace ensure_space(this);
+ EMIT(0xDB);
+ emit_operand(eax, adr);
+}
+
+
+void Assembler::fild_d(const Operand& adr) {
+ EnsureSpace ensure_space(this);
+ EMIT(0xDF);
+ emit_operand(ebp, adr);
+}
+
+
+void Assembler::fistp_s(const Operand& adr) {
+ EnsureSpace ensure_space(this);
+ EMIT(0xDB);
+ emit_operand(ebx, adr);
+}
+
+
+void Assembler::fisttp_s(const Operand& adr) {
+ ASSERT(IsEnabled(SSE3));
+ EnsureSpace ensure_space(this);
+ EMIT(0xDB);
+ emit_operand(ecx, adr);
+}
+
+
+void Assembler::fisttp_d(const Operand& adr) {
+ ASSERT(IsEnabled(SSE3));
+ EnsureSpace ensure_space(this);
+ EMIT(0xDD);
+ emit_operand(ecx, adr);
+}
+
+
+void Assembler::fist_s(const Operand& adr) {
+ EnsureSpace ensure_space(this);
+ EMIT(0xDB);
+ emit_operand(edx, adr);
+}
+
+
+void Assembler::fistp_d(const Operand& adr) {
+ EnsureSpace ensure_space(this);
+ EMIT(0xDF);
+ emit_operand(edi, adr);
+}
+
+
+void Assembler::fabs() {
+ EnsureSpace ensure_space(this);
+ EMIT(0xD9);
+ EMIT(0xE1);
+}
+
+
+void Assembler::fchs() {
+ EnsureSpace ensure_space(this);
+ EMIT(0xD9);
+ EMIT(0xE0);
+}
+
+
+void Assembler::fcos() {
+ EnsureSpace ensure_space(this);
+ EMIT(0xD9);
+ EMIT(0xFF);
+}
+
+
+void Assembler::fsin() {
+ EnsureSpace ensure_space(this);
+ EMIT(0xD9);
+ EMIT(0xFE);
+}
+
+
+void Assembler::fptan() {
+ EnsureSpace ensure_space(this);
+ EMIT(0xD9);
+ EMIT(0xF2);
+}
+
+
+void Assembler::fyl2x() {
+ EnsureSpace ensure_space(this);
+ EMIT(0xD9);
+ EMIT(0xF1);
+}
+
+
+void Assembler::f2xm1() {
+ EnsureSpace ensure_space(this);
+ EMIT(0xD9);
+ EMIT(0xF0);
+}
+
+
+void Assembler::fscale() {
+ EnsureSpace ensure_space(this);
+ EMIT(0xD9);
+ EMIT(0xFD);
+}
+
+
+void Assembler::fninit() {
+ EnsureSpace ensure_space(this);
+ EMIT(0xDB);
+ EMIT(0xE3);
+}
+
+
+void Assembler::fadd(int i) {
+ EnsureSpace ensure_space(this);
+ emit_farith(0xDC, 0xC0, i);
+}
+
+
+void Assembler::fadd_i(int i) {
+ EnsureSpace ensure_space(this);
+ emit_farith(0xD8, 0xC0, i);
+}
+
+
+void Assembler::fsub(int i) {
+ EnsureSpace ensure_space(this);
+ emit_farith(0xDC, 0xE8, i);
+}
+
+
+void Assembler::fsub_i(int i) {
+ EnsureSpace ensure_space(this);
+ emit_farith(0xD8, 0xE0, i);
+}
+
+
+void Assembler::fisub_s(const Operand& adr) {
+ EnsureSpace ensure_space(this);
+ EMIT(0xDA);
+ emit_operand(esp, adr);
+}
+
+
+void Assembler::fmul_i(int i) {
+ EnsureSpace ensure_space(this);
+ emit_farith(0xD8, 0xC8, i);
+}
+
+
+void Assembler::fmul(int i) {
+ EnsureSpace ensure_space(this);
+ emit_farith(0xDC, 0xC8, i);
+}
+
+
+void Assembler::fdiv(int i) {
+ EnsureSpace ensure_space(this);
+ emit_farith(0xDC, 0xF8, i);
+}
+
+
+void Assembler::fdiv_i(int i) {
+ EnsureSpace ensure_space(this);
+ emit_farith(0xD8, 0xF0, i);
+}
+
+
+void Assembler::faddp(int i) {
+ EnsureSpace ensure_space(this);
+ emit_farith(0xDE, 0xC0, i);
+}
+
+
+void Assembler::fsubp(int i) {
+ EnsureSpace ensure_space(this);
+ emit_farith(0xDE, 0xE8, i);
+}
+
+
+void Assembler::fsubrp(int i) {
+ EnsureSpace ensure_space(this);
+ emit_farith(0xDE, 0xE0, i);
+}
+
+
+void Assembler::fmulp(int i) {
+ EnsureSpace ensure_space(this);
+ emit_farith(0xDE, 0xC8, i);
+}
+
+
+void Assembler::fdivp(int i) {
+ EnsureSpace ensure_space(this);
+ emit_farith(0xDE, 0xF8, i);
+}
+
+
+void Assembler::fprem() {
+ EnsureSpace ensure_space(this);
+ EMIT(0xD9);
+ EMIT(0xF8);
+}
+
+
+void Assembler::fprem1() {
+ EnsureSpace ensure_space(this);
+ EMIT(0xD9);
+ EMIT(0xF5);
+}
+
+
+void Assembler::fxch(int i) {
+ EnsureSpace ensure_space(this);
+ emit_farith(0xD9, 0xC8, i);
+}
+
+
+void Assembler::fincstp() {
+ EnsureSpace ensure_space(this);
+ EMIT(0xD9);
+ EMIT(0xF7);
+}
+
+
+void Assembler::ffree(int i) {
+ EnsureSpace ensure_space(this);
+ emit_farith(0xDD, 0xC0, i);
+}
+
+
+void Assembler::ftst() {
+ EnsureSpace ensure_space(this);
+ EMIT(0xD9);
+ EMIT(0xE4);
+}
+
+
+void Assembler::fucomp(int i) {
+ EnsureSpace ensure_space(this);
+ emit_farith(0xDD, 0xE8, i);
+}
+
+
+void Assembler::fucompp() {
+ EnsureSpace ensure_space(this);
+ EMIT(0xDA);
+ EMIT(0xE9);
+}
+
+
+void Assembler::fucomi(int i) {
+ EnsureSpace ensure_space(this);
+ EMIT(0xDB);
+ EMIT(0xE8 + i);
+}
+
+
+void Assembler::fucomip() {
+ EnsureSpace ensure_space(this);
+ EMIT(0xDF);
+ EMIT(0xE9);
+}
+
+
+void Assembler::fcompp() {
+ EnsureSpace ensure_space(this);
+ EMIT(0xDE);
+ EMIT(0xD9);
+}
+
+
+void Assembler::fnstsw_ax() {
+ EnsureSpace ensure_space(this);
+ EMIT(0xDF);
+ EMIT(0xE0);
+}
+
+
+void Assembler::fwait() {
+ EnsureSpace ensure_space(this);
+ EMIT(0x9B);
+}
+
+
+void Assembler::frndint() {
+ EnsureSpace ensure_space(this);
+ EMIT(0xD9);
+ EMIT(0xFC);
+}
+
+
+void Assembler::fnclex() {
+ EnsureSpace ensure_space(this);
+ EMIT(0xDB);
+ EMIT(0xE2);
+}
+
+
+void Assembler::sahf() {
+ EnsureSpace ensure_space(this);
+ EMIT(0x9E);
+}
+
+
+void Assembler::setcc(Condition cc, Register reg) {
+ ASSERT(reg.is_byte_register());
+ EnsureSpace ensure_space(this);
+ EMIT(0x0F);
+ EMIT(0x90 | cc);
+ EMIT(0xC0 | reg.code());
+}
+
+
+void Assembler::Print() {
+ Disassembler::Decode(isolate(), stdout, buffer_, pc_);
+}
+
+
+void Assembler::RecordJSReturn() {
+ positions_recorder()->WriteRecordedPositions();
+ EnsureSpace ensure_space(this);
+ RecordRelocInfo(RelocInfo::JS_RETURN);
+}
+
+
+void Assembler::RecordDebugBreakSlot() {
+ positions_recorder()->WriteRecordedPositions();
+ EnsureSpace ensure_space(this);
+ RecordRelocInfo(RelocInfo::DEBUG_BREAK_SLOT);
+}
+
+
+void Assembler::RecordComment(const char* msg, bool force) {
+ if (FLAG_code_comments || force) {
+ EnsureSpace ensure_space(this);
+ RecordRelocInfo(RelocInfo::COMMENT, reinterpret_cast<intptr_t>(msg));
+ }
+}
+
+
+void Assembler::GrowBuffer() {
+ ASSERT(buffer_overflow());
+ if (!own_buffer_) FATAL("external code buffer is too small");
+
+ // Compute new buffer size.
+ CodeDesc desc; // the new buffer
+ if (buffer_size_ < 4*KB) {
+ desc.buffer_size = 4*KB;
+ } else {
+ desc.buffer_size = 2*buffer_size_;
+ }
+ // Some internal data structures overflow for very large buffers,
+ // they must ensure that kMaximalBufferSize is not too large.
+ if ((desc.buffer_size > kMaximalBufferSize) ||
+ (desc.buffer_size > isolate()->heap()->MaxOldGenerationSize())) {
+ V8::FatalProcessOutOfMemory("Assembler::GrowBuffer");
+ }
+
+ // Set up new buffer.
+ desc.buffer = NewArray<byte>(desc.buffer_size);
+ desc.instr_size = pc_offset();
+ desc.reloc_size = (buffer_ + buffer_size_) - (reloc_info_writer.pos());
+
+ // Clear the buffer in debug mode. Use 'int3' instructions to make
+ // sure to get into problems if we ever run uninitialized code.
+#ifdef DEBUG
+ memset(desc.buffer, 0xCC, desc.buffer_size);
+#endif
+
+ // Copy the data.
+ int pc_delta = desc.buffer - buffer_;
+ int rc_delta = (desc.buffer + desc.buffer_size) - (buffer_ + buffer_size_);
+ MemMove(desc.buffer, buffer_, desc.instr_size);
+ MemMove(rc_delta + reloc_info_writer.pos(), reloc_info_writer.pos(),
+ desc.reloc_size);
+
+ // Switch buffers.
+ if (isolate()->assembler_spare_buffer() == NULL &&
+ buffer_size_ == kMinimalBufferSize) {
+ isolate()->set_assembler_spare_buffer(buffer_);
+ } else {
+ DeleteArray(buffer_);
+ }
+ buffer_ = desc.buffer;
+ buffer_size_ = desc.buffer_size;
+ pc_ += pc_delta;
+ reloc_info_writer.Reposition(reloc_info_writer.pos() + rc_delta,
+ reloc_info_writer.last_pc() + pc_delta);
+
+ // Relocate runtime entries.
+ for (RelocIterator it(desc); !it.done(); it.next()) {
+ RelocInfo::Mode rmode = it.rinfo()->rmode();
+ if (rmode == RelocInfo::INTERNAL_REFERENCE) {
+ int32_t* p = reinterpret_cast<int32_t*>(it.rinfo()->pc());
+ if (*p != 0) { // 0 means uninitialized.
+ *p += pc_delta;
+ }
+ }
+ }
+
+ ASSERT(!buffer_overflow());
+}
+
+
+void Assembler::emit_arith_b(int op1, int op2, Register dst, int imm8) {
+ ASSERT(is_uint8(op1) && is_uint8(op2)); // wrong opcode
+ ASSERT(is_uint8(imm8));
+ ASSERT((op1 & 0x01) == 0); // should be 8bit operation
+ EMIT(op1);
+ EMIT(op2 | dst.code());
+ EMIT(imm8);
+}
+
+
+void Assembler::emit_arith(int sel, Operand dst, const Immediate& x) {
+ ASSERT((0 <= sel) && (sel <= 7));
+ Register ireg = { sel };
+ if (x.is_int8()) {
+ EMIT(0x83); // using a sign-extended 8-bit immediate.
+ emit_operand(ireg, dst);
+ EMIT(x.x_ & 0xFF);
+ } else if (dst.is_reg(eax)) {
+ EMIT((sel << 3) | 0x05); // short form if the destination is eax.
+ emit(x);
+ } else {
+ EMIT(0x81); // using a literal 32-bit immediate.
+ emit_operand(ireg, dst);
+ emit(x);
+ }
+}
+
+
+void Assembler::emit_operand(Register reg, const Operand& adr) {
+ const unsigned length = adr.len_;
+ ASSERT(length > 0);
+
+ // Emit updated ModRM byte containing the given register.
+ pc_[0] = (adr.buf_[0] & ~0x38) | (reg.code() << 3);
+
+ // Emit the rest of the encoded operand.
+ for (unsigned i = 1; i < length; i++) pc_[i] = adr.buf_[i];
+ pc_ += length;
+
+ // Emit relocation information if necessary.
+ if (length >= sizeof(int32_t) && !RelocInfo::IsNone(adr.rmode_)) {
+ pc_ -= sizeof(int32_t); // pc_ must be *at* disp32
+ RecordRelocInfo(adr.rmode_);
+ pc_ += sizeof(int32_t);
+ }
+}
+
+
+void Assembler::emit_farith(int b1, int b2, int i) {
+ ASSERT(is_uint8(b1) && is_uint8(b2)); // wrong opcode
+ ASSERT(0 <= i && i < 8); // illegal stack offset
+ EMIT(b1);
+ EMIT(b2 + i);
+}
+
+
+void Assembler::db(uint8_t data) {
+ EnsureSpace ensure_space(this);
+ EMIT(data);
+}
+
+
+void Assembler::dd(uint32_t data) {
+ EnsureSpace ensure_space(this);
+ emit(data);
+}
+
+
+void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
+ ASSERT(!RelocInfo::IsNone(rmode));
+ // Don't record external references unless the heap will be serialized.
+ if (rmode == RelocInfo::EXTERNAL_REFERENCE &&
+ !serializer_enabled() && !emit_debug_code()) {
+ return;
+ }
+ RelocInfo rinfo(pc_, rmode, data, NULL);
+ reloc_info_writer.Write(&rinfo);
+}
+
+
+Handle<ConstantPoolArray> Assembler::NewConstantPool(Isolate* isolate) {
+ // No out-of-line constant pool support.
+ ASSERT(!FLAG_enable_ool_constant_pool);
+ return isolate->factory()->empty_constant_pool_array();
+}
+
+
+void Assembler::PopulateConstantPool(ConstantPoolArray* constant_pool) {
+ // No out-of-line constant pool support.
+ ASSERT(!FLAG_enable_ool_constant_pool);
+ return;
+}
+
+
+#ifdef GENERATED_CODE_COVERAGE
+static FILE* coverage_log = NULL;
+
+
+static void InitCoverageLog() {
+ char* file_name = getenv("V8_GENERATED_CODE_COVERAGE_LOG");
+ if (file_name != NULL) {
+ coverage_log = fopen(file_name, "aw+");
+ }
+}
+
+
+void LogGeneratedCodeCoverage(const char* file_line) {
+ const char* return_address = (&file_line)[-1];
+ char* push_insn = const_cast<char*>(return_address - 12);
+ push_insn[0] = 0xeb; // Relative branch insn.
+ push_insn[1] = 13; // Skip over coverage insns.
+ if (coverage_log != NULL) {
+ fprintf(coverage_log, "%s\n", file_line);
+ fflush(coverage_log);
+ }
+}
+
+#endif
+
+} } // namespace v8::internal
+
+#endif // V8_TARGET_ARCH_X87
diff --git a/chromium/v8/src/x87/assembler-x87.h b/chromium/v8/src/x87/assembler-x87.h
new file mode 100644
index 00000000000..162416735b0
--- /dev/null
+++ b/chromium/v8/src/x87/assembler-x87.h
@@ -0,0 +1,1031 @@
+// Copyright (c) 1994-2006 Sun Microsystems Inc.
+// All Rights Reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// - Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+//
+// - Redistribution in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// - Neither the name of Sun Microsystems or the names of contributors may
+// be used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
+// IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+// THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// The original source code covered by the above license above has been
+// modified significantly by Google Inc.
+// Copyright 2011 the V8 project authors. All rights reserved.
+
+// A light-weight IA32 Assembler.
+
+#ifndef V8_X87_ASSEMBLER_X87_H_
+#define V8_X87_ASSEMBLER_X87_H_
+
+#include "src/isolate.h"
+#include "src/serialize.h"
+
+namespace v8 {
+namespace internal {
+
+// CPU Registers.
+//
+// 1) We would prefer to use an enum, but enum values are assignment-
+// compatible with int, which has caused code-generation bugs.
+//
+// 2) We would prefer to use a class instead of a struct but we don't like
+// the register initialization to depend on the particular initialization
+// order (which appears to be different on OS X, Linux, and Windows for the
+// installed versions of C++ we tried). Using a struct permits C-style
+// "initialization". Also, the Register objects cannot be const as this
+// forces initialization stubs in MSVC, making us dependent on initialization
+// order.
+//
+// 3) By not using an enum, we are possibly preventing the compiler from
+// doing certain constant folds, which may significantly reduce the
+// code generated for some assembly instructions (because they boil down
+// to a few constants). If this is a problem, we could change the code
+// such that we use an enum in optimized mode, and the struct in debug
+// mode. This way we get the compile-time error checking in debug mode
+// and best performance in optimized code.
+//
+struct Register {
+ static const int kMaxNumAllocatableRegisters = 6;
+ static int NumAllocatableRegisters() {
+ return kMaxNumAllocatableRegisters;
+ }
+ static const int kNumRegisters = 8;
+
+ static inline const char* AllocationIndexToString(int index);
+
+ static inline int ToAllocationIndex(Register reg);
+
+ static inline Register FromAllocationIndex(int index);
+
+ static Register from_code(int code) {
+ ASSERT(code >= 0);
+ ASSERT(code < kNumRegisters);
+ Register r = { code };
+ return r;
+ }
+ bool is_valid() const { return 0 <= code_ && code_ < kNumRegisters; }
+ bool is(Register reg) const { return code_ == reg.code_; }
+ // eax, ebx, ecx and edx are byte registers, the rest are not.
+ bool is_byte_register() const { return code_ <= 3; }
+ int code() const {
+ ASSERT(is_valid());
+ return code_;
+ }
+ int bit() const {
+ ASSERT(is_valid());
+ return 1 << code_;
+ }
+
+ // Unfortunately we can't make this private in a struct.
+ int code_;
+};
+
+const int kRegister_eax_Code = 0;
+const int kRegister_ecx_Code = 1;
+const int kRegister_edx_Code = 2;
+const int kRegister_ebx_Code = 3;
+const int kRegister_esp_Code = 4;
+const int kRegister_ebp_Code = 5;
+const int kRegister_esi_Code = 6;
+const int kRegister_edi_Code = 7;
+const int kRegister_no_reg_Code = -1;
+
+const Register eax = { kRegister_eax_Code };
+const Register ecx = { kRegister_ecx_Code };
+const Register edx = { kRegister_edx_Code };
+const Register ebx = { kRegister_ebx_Code };
+const Register esp = { kRegister_esp_Code };
+const Register ebp = { kRegister_ebp_Code };
+const Register esi = { kRegister_esi_Code };
+const Register edi = { kRegister_edi_Code };
+const Register no_reg = { kRegister_no_reg_Code };
+
+
+inline const char* Register::AllocationIndexToString(int index) {
+ ASSERT(index >= 0 && index < kMaxNumAllocatableRegisters);
+ // This is the mapping of allocation indices to registers.
+ const char* const kNames[] = { "eax", "ecx", "edx", "ebx", "esi", "edi" };
+ return kNames[index];
+}
+
+
+inline int Register::ToAllocationIndex(Register reg) {
+ ASSERT(reg.is_valid() && !reg.is(esp) && !reg.is(ebp));
+ return (reg.code() >= 6) ? reg.code() - 2 : reg.code();
+}
+
+
+inline Register Register::FromAllocationIndex(int index) {
+ ASSERT(index >= 0 && index < kMaxNumAllocatableRegisters);
+ return (index >= 4) ? from_code(index + 2) : from_code(index);
+}
+
+
+struct X87Register {
+ static const int kMaxNumAllocatableRegisters = 8;
+ static const int kMaxNumRegisters = 8;
+ static int NumAllocatableRegisters() {
+ return kMaxNumAllocatableRegisters;
+ }
+
+ static int ToAllocationIndex(X87Register reg) {
+ return reg.code_;
+ }
+
+ static const char* AllocationIndexToString(int index) {
+ ASSERT(index >= 0 && index < kMaxNumAllocatableRegisters);
+ const char* const names[] = {
+ "stX_0", "stX_1", "stX_2", "stX_3", "stX_4",
+ "stX_5", "stX_6", "stX_7"
+ };
+ return names[index];
+ }
+
+ static X87Register FromAllocationIndex(int index) {
+ ASSERT(index >= 0 && index < kMaxNumAllocatableRegisters);
+ X87Register result;
+ result.code_ = index;
+ return result;
+ }
+
+ bool is_valid() const {
+ return 0 <= code_ && code_ < kMaxNumRegisters;
+ }
+
+ int code() const {
+ ASSERT(is_valid());
+ return code_;
+ }
+
+ bool is(X87Register reg) const {
+ return code_ == reg.code_;
+ }
+
+ int code_;
+};
+
+
+typedef X87Register DoubleRegister;
+
+
+const X87Register stX_0 = { 0 };
+const X87Register stX_1 = { 1 };
+const X87Register stX_2 = { 2 };
+const X87Register stX_3 = { 3 };
+const X87Register stX_4 = { 4 };
+const X87Register stX_5 = { 5 };
+const X87Register stX_6 = { 6 };
+const X87Register stX_7 = { 7 };
+
+
+enum Condition {
+ // any value < 0 is considered no_condition
+ no_condition = -1,
+
+ overflow = 0,
+ no_overflow = 1,
+ below = 2,
+ above_equal = 3,
+ equal = 4,
+ not_equal = 5,
+ below_equal = 6,
+ above = 7,
+ negative = 8,
+ positive = 9,
+ parity_even = 10,
+ parity_odd = 11,
+ less = 12,
+ greater_equal = 13,
+ less_equal = 14,
+ greater = 15,
+
+ // aliases
+ carry = below,
+ not_carry = above_equal,
+ zero = equal,
+ not_zero = not_equal,
+ sign = negative,
+ not_sign = positive
+};
+
+
+// Returns the equivalent of !cc.
+// Negation of the default no_condition (-1) results in a non-default
+// no_condition value (-2). As long as tests for no_condition check
+// for condition < 0, this will work as expected.
+inline Condition NegateCondition(Condition cc) {
+ return static_cast<Condition>(cc ^ 1);
+}
+
+
+// Commute a condition such that {a cond b == b cond' a}.
+inline Condition CommuteCondition(Condition cc) {
+ switch (cc) {
+ case below:
+ return above;
+ case above:
+ return below;
+ case above_equal:
+ return below_equal;
+ case below_equal:
+ return above_equal;
+ case less:
+ return greater;
+ case greater:
+ return less;
+ case greater_equal:
+ return less_equal;
+ case less_equal:
+ return greater_equal;
+ default:
+ return cc;
+ }
+}
+
+
+// -----------------------------------------------------------------------------
+// Machine instruction Immediates
+
+class Immediate BASE_EMBEDDED {
+ public:
+ inline explicit Immediate(int x);
+ inline explicit Immediate(const ExternalReference& ext);
+ inline explicit Immediate(Handle<Object> handle);
+ inline explicit Immediate(Smi* value);
+ inline explicit Immediate(Address addr);
+
+ static Immediate CodeRelativeOffset(Label* label) {
+ return Immediate(label);
+ }
+
+ bool is_zero() const { return x_ == 0 && RelocInfo::IsNone(rmode_); }
+ bool is_int8() const {
+ return -128 <= x_ && x_ < 128 && RelocInfo::IsNone(rmode_);
+ }
+ bool is_int16() const {
+ return -32768 <= x_ && x_ < 32768 && RelocInfo::IsNone(rmode_);
+ }
+
+ private:
+ inline explicit Immediate(Label* value);
+
+ int x_;
+ RelocInfo::Mode rmode_;
+
+ friend class Assembler;
+ friend class MacroAssembler;
+};
+
+
+// -----------------------------------------------------------------------------
+// Machine instruction Operands
+
+enum ScaleFactor {
+ times_1 = 0,
+ times_2 = 1,
+ times_4 = 2,
+ times_8 = 3,
+ times_int_size = times_4,
+ times_half_pointer_size = times_2,
+ times_pointer_size = times_4,
+ times_twice_pointer_size = times_8
+};
+
+
+class Operand BASE_EMBEDDED {
+ public:
+ // [disp/r]
+ INLINE(explicit Operand(int32_t disp, RelocInfo::Mode rmode));
+ // disp only must always be relocated
+
+ // [base + disp/r]
+ explicit Operand(Register base, int32_t disp,
+ RelocInfo::Mode rmode = RelocInfo::NONE32);
+
+ // [base + index*scale + disp/r]
+ explicit Operand(Register base,
+ Register index,
+ ScaleFactor scale,
+ int32_t disp,
+ RelocInfo::Mode rmode = RelocInfo::NONE32);
+
+ // [index*scale + disp/r]
+ explicit Operand(Register index,
+ ScaleFactor scale,
+ int32_t disp,
+ RelocInfo::Mode rmode = RelocInfo::NONE32);
+
+ static Operand StaticVariable(const ExternalReference& ext) {
+ return Operand(reinterpret_cast<int32_t>(ext.address()),
+ RelocInfo::EXTERNAL_REFERENCE);
+ }
+
+ static Operand StaticArray(Register index,
+ ScaleFactor scale,
+ const ExternalReference& arr) {
+ return Operand(index, scale, reinterpret_cast<int32_t>(arr.address()),
+ RelocInfo::EXTERNAL_REFERENCE);
+ }
+
+ static Operand ForCell(Handle<Cell> cell) {
+ AllowDeferredHandleDereference embedding_raw_address;
+ return Operand(reinterpret_cast<int32_t>(cell.location()),
+ RelocInfo::CELL);
+ }
+
+ // Returns true if this Operand is a wrapper for the specified register.
+ bool is_reg(Register reg) const;
+
+ // Returns true if this Operand is a wrapper for one register.
+ bool is_reg_only() const;
+
+ // Asserts that this Operand is a wrapper for one register and returns the
+ // register.
+ Register reg() const;
+
+ private:
+ // reg
+ INLINE(explicit Operand(Register reg));
+
+ // Set the ModRM byte without an encoded 'reg' register. The
+ // register is encoded later as part of the emit_operand operation.
+ inline void set_modrm(int mod, Register rm);
+
+ inline void set_sib(ScaleFactor scale, Register index, Register base);
+ inline void set_disp8(int8_t disp);
+ inline void set_dispr(int32_t disp, RelocInfo::Mode rmode);
+
+ byte buf_[6];
+ // The number of bytes in buf_.
+ unsigned int len_;
+ // Only valid if len_ > 4.
+ RelocInfo::Mode rmode_;
+
+ friend class Assembler;
+ friend class MacroAssembler;
+ friend class LCodeGen;
+};
+
+
+// -----------------------------------------------------------------------------
+// A Displacement describes the 32bit immediate field of an instruction which
+// may be used together with a Label in order to refer to a yet unknown code
+// position. Displacements stored in the instruction stream are used to describe
+// the instruction and to chain a list of instructions using the same Label.
+// A Displacement contains 2 different fields:
+//
+// next field: position of next displacement in the chain (0 = end of list)
+// type field: instruction type
+//
+// A next value of null (0) indicates the end of a chain (note that there can
+// be no displacement at position zero, because there is always at least one
+// instruction byte before the displacement).
+//
+// Displacement _data field layout
+//
+// |31.....2|1......0|
+// [ next | type |
+
+class Displacement BASE_EMBEDDED {
+ public:
+ enum Type {
+ UNCONDITIONAL_JUMP,
+ CODE_RELATIVE,
+ OTHER
+ };
+
+ int data() const { return data_; }
+ Type type() const { return TypeField::decode(data_); }
+ void next(Label* L) const {
+ int n = NextField::decode(data_);
+ n > 0 ? L->link_to(n) : L->Unuse();
+ }
+ void link_to(Label* L) { init(L, type()); }
+
+ explicit Displacement(int data) { data_ = data; }
+
+ Displacement(Label* L, Type type) { init(L, type); }
+
+ void print() {
+ PrintF("%s (%x) ", (type() == UNCONDITIONAL_JUMP ? "jmp" : "[other]"),
+ NextField::decode(data_));
+ }
+
+ private:
+ int data_;
+
+ class TypeField: public BitField<Type, 0, 2> {};
+ class NextField: public BitField<int, 2, 32-2> {};
+
+ void init(Label* L, Type type);
+};
+
+
+class Assembler : public AssemblerBase {
+ private:
+ // We check before assembling an instruction that there is sufficient
+ // space to write an instruction and its relocation information.
+ // The relocation writer's position must be kGap bytes above the end of
+ // the generated instructions. This leaves enough space for the
+ // longest possible ia32 instruction, 15 bytes, and the longest possible
+ // relocation information encoding, RelocInfoWriter::kMaxLength == 16.
+ // (There is a 15 byte limit on ia32 instruction length that rules out some
+ // otherwise valid instructions.)
+ // This allows for a single, fast space check per instruction.
+ static const int kGap = 32;
+
+ public:
+ // Create an assembler. Instructions and relocation information are emitted
+ // into a buffer, with the instructions starting from the beginning and the
+ // relocation information starting from the end of the buffer. See CodeDesc
+ // for a detailed comment on the layout (globals.h).
+ //
+ // If the provided buffer is NULL, the assembler allocates and grows its own
+ // buffer, and buffer_size determines the initial buffer size. The buffer is
+ // owned by the assembler and deallocated upon destruction of the assembler.
+ //
+ // If the provided buffer is not NULL, the assembler uses the provided buffer
+ // for code generation and assumes its size to be buffer_size. If the buffer
+ // is too small, a fatal error occurs. No deallocation of the buffer is done
+ // upon destruction of the assembler.
+ // TODO(vitalyr): the assembler does not need an isolate.
+ Assembler(Isolate* isolate, void* buffer, int buffer_size);
+ virtual ~Assembler() { }
+
+ // GetCode emits any pending (non-emitted) code and fills the descriptor
+ // desc. GetCode() is idempotent; it returns the same result if no other
+ // Assembler functions are invoked in between GetCode() calls.
+ void GetCode(CodeDesc* desc);
+
+ // Read/Modify the code target in the branch/call instruction at pc.
+ inline static Address target_address_at(Address pc,
+ ConstantPoolArray* constant_pool);
+ inline static void set_target_address_at(Address pc,
+ ConstantPoolArray* constant_pool,
+ Address target,
+ ICacheFlushMode icache_flush_mode =
+ FLUSH_ICACHE_IF_NEEDED);
+ static inline Address target_address_at(Address pc, Code* code) {
+ ConstantPoolArray* constant_pool = code ? code->constant_pool() : NULL;
+ return target_address_at(pc, constant_pool);
+ }
+ static inline void set_target_address_at(Address pc,
+ Code* code,
+ Address target,
+ ICacheFlushMode icache_flush_mode =
+ FLUSH_ICACHE_IF_NEEDED) {
+ ConstantPoolArray* constant_pool = code ? code->constant_pool() : NULL;
+ set_target_address_at(pc, constant_pool, target);
+ }
+
+ // Return the code target address at a call site from the return address
+ // of that call in the instruction stream.
+ inline static Address target_address_from_return_address(Address pc);
+
+ // This sets the branch destination (which is in the instruction on x86).
+ // This is for calls and branches within generated code.
+ inline static void deserialization_set_special_target_at(
+ Address instruction_payload, Code* code, Address target) {
+ set_target_address_at(instruction_payload, code, target);
+ }
+
+ static const int kSpecialTargetSize = kPointerSize;
+
+ // Distance between the address of the code target in the call instruction
+ // and the return address
+ static const int kCallTargetAddressOffset = kPointerSize;
+ // Distance between start of patched return sequence and the emitted address
+ // to jump to.
+ static const int kPatchReturnSequenceAddressOffset = 1; // JMP imm32.
+
+ // Distance between start of patched debug break slot and the emitted address
+ // to jump to.
+ static const int kPatchDebugBreakSlotAddressOffset = 1; // JMP imm32.
+
+ static const int kCallInstructionLength = 5;
+ static const int kPatchDebugBreakSlotReturnOffset = kPointerSize;
+ static const int kJSReturnSequenceLength = 6;
+
+ // The debug break slot must be able to contain a call instruction.
+ static const int kDebugBreakSlotLength = kCallInstructionLength;
+
+ // One byte opcode for test al, 0xXX.
+ static const byte kTestAlByte = 0xA8;
+ // One byte opcode for nop.
+ static const byte kNopByte = 0x90;
+
+ // One byte opcode for a short unconditional jump.
+ static const byte kJmpShortOpcode = 0xEB;
+ // One byte prefix for a short conditional jump.
+ static const byte kJccShortPrefix = 0x70;
+ static const byte kJncShortOpcode = kJccShortPrefix | not_carry;
+ static const byte kJcShortOpcode = kJccShortPrefix | carry;
+ static const byte kJnzShortOpcode = kJccShortPrefix | not_zero;
+ static const byte kJzShortOpcode = kJccShortPrefix | zero;
+
+
+ // ---------------------------------------------------------------------------
+ // Code generation
+ //
+ // - function names correspond one-to-one to ia32 instruction mnemonics
+ // - unless specified otherwise, instructions operate on 32bit operands
+ // - instructions on 8bit (byte) operands/registers have a trailing '_b'
+ // - instructions on 16bit (word) operands/registers have a trailing '_w'
+ // - naming conflicts with C++ keywords are resolved via a trailing '_'
+
+ // NOTE ON INTERFACE: Currently, the interface is not very consistent
+ // in the sense that some operations (e.g. mov()) can be called in more
+ // the one way to generate the same instruction: The Register argument
+ // can in some cases be replaced with an Operand(Register) argument.
+ // This should be cleaned up and made more orthogonal. The questions
+ // is: should we always use Operands instead of Registers where an
+ // Operand is possible, or should we have a Register (overloaded) form
+ // instead? We must be careful to make sure that the selected instruction
+ // is obvious from the parameters to avoid hard-to-find code generation
+ // bugs.
+
+ // Insert the smallest number of nop instructions
+ // possible to align the pc offset to a multiple
+ // of m. m must be a power of 2.
+ void Align(int m);
+ void Nop(int bytes = 1);
+ // Aligns code to something that's optimal for a jump target for the platform.
+ void CodeTargetAlign();
+
+ // Stack
+ void pushad();
+ void popad();
+
+ void pushfd();
+ void popfd();
+
+ void push(const Immediate& x);
+ void push_imm32(int32_t imm32);
+ void push(Register src);
+ void push(const Operand& src);
+
+ void pop(Register dst);
+ void pop(const Operand& dst);
+
+ void enter(const Immediate& size);
+ void leave();
+
+ // Moves
+ void mov_b(Register dst, Register src) { mov_b(dst, Operand(src)); }
+ void mov_b(Register dst, const Operand& src);
+ void mov_b(Register dst, int8_t imm8) { mov_b(Operand(dst), imm8); }
+ void mov_b(const Operand& dst, int8_t imm8);
+ void mov_b(const Operand& dst, Register src);
+
+ void mov_w(Register dst, const Operand& src);
+ void mov_w(const Operand& dst, Register src);
+ void mov_w(const Operand& dst, int16_t imm16);
+
+ void mov(Register dst, int32_t imm32);
+ void mov(Register dst, const Immediate& x);
+ void mov(Register dst, Handle<Object> handle);
+ void mov(Register dst, const Operand& src);
+ void mov(Register dst, Register src);
+ void mov(const Operand& dst, const Immediate& x);
+ void mov(const Operand& dst, Handle<Object> handle);
+ void mov(const Operand& dst, Register src);
+
+ void movsx_b(Register dst, Register src) { movsx_b(dst, Operand(src)); }
+ void movsx_b(Register dst, const Operand& src);
+
+ void movsx_w(Register dst, Register src) { movsx_w(dst, Operand(src)); }
+ void movsx_w(Register dst, const Operand& src);
+
+ void movzx_b(Register dst, Register src) { movzx_b(dst, Operand(src)); }
+ void movzx_b(Register dst, const Operand& src);
+
+ void movzx_w(Register dst, Register src) { movzx_w(dst, Operand(src)); }
+ void movzx_w(Register dst, const Operand& src);
+
+ // Flag management.
+ void cld();
+
+ // Repetitive string instructions.
+ void rep_movs();
+ void rep_stos();
+ void stos();
+
+ // Exchange two registers
+ void xchg(Register dst, Register src);
+
+ // Arithmetics
+ void adc(Register dst, int32_t imm32);
+ void adc(Register dst, const Operand& src);
+
+ void add(Register dst, Register src) { add(dst, Operand(src)); }
+ void add(Register dst, const Operand& src);
+ void add(const Operand& dst, Register src);
+ void add(Register dst, const Immediate& imm) { add(Operand(dst), imm); }
+ void add(const Operand& dst, const Immediate& x);
+
+ void and_(Register dst, int32_t imm32);
+ void and_(Register dst, const Immediate& x);
+ void and_(Register dst, Register src) { and_(dst, Operand(src)); }
+ void and_(Register dst, const Operand& src);
+ void and_(const Operand& dst, Register src);
+ void and_(const Operand& dst, const Immediate& x);
+
+ void cmpb(Register reg, int8_t imm8) { cmpb(Operand(reg), imm8); }
+ void cmpb(const Operand& op, int8_t imm8);
+ void cmpb(Register reg, const Operand& op);
+ void cmpb(const Operand& op, Register reg);
+ void cmpb_al(const Operand& op);
+ void cmpw_ax(const Operand& op);
+ void cmpw(const Operand& op, Immediate imm16);
+ void cmp(Register reg, int32_t imm32);
+ void cmp(Register reg, Handle<Object> handle);
+ void cmp(Register reg0, Register reg1) { cmp(reg0, Operand(reg1)); }
+ void cmp(Register reg, const Operand& op);
+ void cmp(Register reg, const Immediate& imm) { cmp(Operand(reg), imm); }
+ void cmp(const Operand& op, const Immediate& imm);
+ void cmp(const Operand& op, Handle<Object> handle);
+
+ void dec_b(Register dst);
+ void dec_b(const Operand& dst);
+
+ void dec(Register dst);
+ void dec(const Operand& dst);
+
+ void cdq();
+
+ void idiv(Register src);
+
+ // Signed multiply instructions.
+ void imul(Register src); // edx:eax = eax * src.
+ void imul(Register dst, Register src) { imul(dst, Operand(src)); }
+ void imul(Register dst, const Operand& src); // dst = dst * src.
+ void imul(Register dst, Register src, int32_t imm32); // dst = src * imm32.
+
+ void inc(Register dst);
+ void inc(const Operand& dst);
+
+ void lea(Register dst, const Operand& src);
+
+ // Unsigned multiply instruction.
+ void mul(Register src); // edx:eax = eax * reg.
+
+ void neg(Register dst);
+
+ void not_(Register dst);
+
+ void or_(Register dst, int32_t imm32);
+ void or_(Register dst, Register src) { or_(dst, Operand(src)); }
+ void or_(Register dst, const Operand& src);
+ void or_(const Operand& dst, Register src);
+ void or_(Register dst, const Immediate& imm) { or_(Operand(dst), imm); }
+ void or_(const Operand& dst, const Immediate& x);
+
+ void rcl(Register dst, uint8_t imm8);
+ void rcr(Register dst, uint8_t imm8);
+ void ror(Register dst, uint8_t imm8);
+ void ror_cl(Register dst);
+
+ void sar(Register dst, uint8_t imm8);
+ void sar_cl(Register dst);
+
+ void sbb(Register dst, const Operand& src);
+
+ void shld(Register dst, Register src) { shld(dst, Operand(src)); }
+ void shld(Register dst, const Operand& src);
+
+ void shl(Register dst, uint8_t imm8);
+ void shl_cl(Register dst);
+
+ void shrd(Register dst, Register src) { shrd(dst, Operand(src)); }
+ void shrd(Register dst, const Operand& src);
+
+ void shr(Register dst, uint8_t imm8);
+ void shr_cl(Register dst);
+
+ void sub(Register dst, const Immediate& imm) { sub(Operand(dst), imm); }
+ void sub(const Operand& dst, const Immediate& x);
+ void sub(Register dst, Register src) { sub(dst, Operand(src)); }
+ void sub(Register dst, const Operand& src);
+ void sub(const Operand& dst, Register src);
+
+ void test(Register reg, const Immediate& imm);
+ void test(Register reg0, Register reg1) { test(reg0, Operand(reg1)); }
+ void test(Register reg, const Operand& op);
+ void test_b(Register reg, const Operand& op);
+ void test(const Operand& op, const Immediate& imm);
+ void test_b(Register reg, uint8_t imm8);
+ void test_b(const Operand& op, uint8_t imm8);
+
+ void xor_(Register dst, int32_t imm32);
+ void xor_(Register dst, Register src) { xor_(dst, Operand(src)); }
+ void xor_(Register dst, const Operand& src);
+ void xor_(const Operand& dst, Register src);
+ void xor_(Register dst, const Immediate& imm) { xor_(Operand(dst), imm); }
+ void xor_(const Operand& dst, const Immediate& x);
+
+ // Bit operations.
+ void bt(const Operand& dst, Register src);
+ void bts(Register dst, Register src) { bts(Operand(dst), src); }
+ void bts(const Operand& dst, Register src);
+ void bsr(Register dst, Register src) { bsr(dst, Operand(src)); }
+ void bsr(Register dst, const Operand& src);
+
+ // Miscellaneous
+ void hlt();
+ void int3();
+ void nop();
+ void ret(int imm16);
+
+ // Label operations & relative jumps (PPUM Appendix D)
+ //
+ // Takes a branch opcode (cc) and a label (L) and generates
+ // either a backward branch or a forward branch and links it
+ // to the label fixup chain. Usage:
+ //
+ // Label L; // unbound label
+ // j(cc, &L); // forward branch to unbound label
+ // bind(&L); // bind label to the current pc
+ // j(cc, &L); // backward branch to bound label
+ // bind(&L); // illegal: a label may be bound only once
+ //
+ // Note: The same Label can be used for forward and backward branches
+ // but it may be bound only once.
+
+ void bind(Label* L); // binds an unbound label L to the current code position
+
+ // Calls
+ void call(Label* L);
+ void call(byte* entry, RelocInfo::Mode rmode);
+ int CallSize(const Operand& adr);
+ void call(Register reg) { call(Operand(reg)); }
+ void call(const Operand& adr);
+ int CallSize(Handle<Code> code, RelocInfo::Mode mode);
+ void call(Handle<Code> code,
+ RelocInfo::Mode rmode,
+ TypeFeedbackId id = TypeFeedbackId::None());
+
+ // Jumps
+ // unconditional jump to L
+ void jmp(Label* L, Label::Distance distance = Label::kFar);
+ void jmp(byte* entry, RelocInfo::Mode rmode);
+ void jmp(Register reg) { jmp(Operand(reg)); }
+ void jmp(const Operand& adr);
+ void jmp(Handle<Code> code, RelocInfo::Mode rmode);
+
+ // Conditional jumps
+ void j(Condition cc,
+ Label* L,
+ Label::Distance distance = Label::kFar);
+ void j(Condition cc, byte* entry, RelocInfo::Mode rmode);
+ void j(Condition cc, Handle<Code> code);
+
+ // Floating-point operations
+ void fld(int i);
+ void fstp(int i);
+
+ void fld1();
+ void fldz();
+ void fldpi();
+ void fldln2();
+
+ void fld_s(const Operand& adr);
+ void fld_d(const Operand& adr);
+
+ void fstp_s(const Operand& adr);
+ void fst_s(const Operand& adr);
+ void fstp_d(const Operand& adr);
+ void fst_d(const Operand& adr);
+
+ void fild_s(const Operand& adr);
+ void fild_d(const Operand& adr);
+
+ void fist_s(const Operand& adr);
+
+ void fistp_s(const Operand& adr);
+ void fistp_d(const Operand& adr);
+
+ // The fisttp instructions require SSE3.
+ void fisttp_s(const Operand& adr);
+ void fisttp_d(const Operand& adr);
+
+ void fabs();
+ void fchs();
+ void fcos();
+ void fsin();
+ void fptan();
+ void fyl2x();
+ void f2xm1();
+ void fscale();
+ void fninit();
+
+ void fadd(int i);
+ void fadd_i(int i);
+ void fsub(int i);
+ void fsub_i(int i);
+ void fmul(int i);
+ void fmul_i(int i);
+ void fdiv(int i);
+ void fdiv_i(int i);
+
+ void fisub_s(const Operand& adr);
+
+ void faddp(int i = 1);
+ void fsubp(int i = 1);
+ void fsubrp(int i = 1);
+ void fmulp(int i = 1);
+ void fdivp(int i = 1);
+ void fprem();
+ void fprem1();
+
+ void fxch(int i = 1);
+ void fincstp();
+ void ffree(int i = 0);
+
+ void ftst();
+ void fucomp(int i);
+ void fucompp();
+ void fucomi(int i);
+ void fucomip();
+ void fcompp();
+ void fnstsw_ax();
+ void fwait();
+ void fnclex();
+
+ void frndint();
+
+ void sahf();
+ void setcc(Condition cc, Register reg);
+
+ void cpuid();
+
+ // TODO(lrn): Need SFENCE for movnt?
+
+ // Debugging
+ void Print();
+
+ // Check the code size generated from label to here.
+ int SizeOfCodeGeneratedSince(Label* label) {
+ return pc_offset() - label->pos();
+ }
+
+ // Mark address of the ExitJSFrame code.
+ void RecordJSReturn();
+
+ // Mark address of a debug break slot.
+ void RecordDebugBreakSlot();
+
+ // Record a comment relocation entry that can be used by a disassembler.
+ // Use --code-comments to enable, or provide "force = true" flag to always
+ // write a comment.
+ void RecordComment(const char* msg, bool force = false);
+
+ // Writes a single byte or word of data in the code stream. Used for
+ // inline tables, e.g., jump-tables.
+ void db(uint8_t data);
+ void dd(uint32_t data);
+
+ // Check if there is less than kGap bytes available in the buffer.
+ // If this is the case, we need to grow the buffer before emitting
+ // an instruction or relocation information.
+ inline bool buffer_overflow() const {
+ return pc_ >= reloc_info_writer.pos() - kGap;
+ }
+
+ // Get the number of bytes available in the buffer.
+ inline int available_space() const { return reloc_info_writer.pos() - pc_; }
+
+ static bool IsNop(Address addr);
+
+ PositionsRecorder* positions_recorder() { return &positions_recorder_; }
+
+ int relocation_writer_size() {
+ return (buffer_ + buffer_size_) - reloc_info_writer.pos();
+ }
+
+ // Avoid overflows for displacements etc.
+ static const int kMaximalBufferSize = 512*MB;
+
+ byte byte_at(int pos) { return buffer_[pos]; }
+ void set_byte_at(int pos, byte value) { buffer_[pos] = value; }
+
+ // Allocate a constant pool of the correct size for the generated code.
+ Handle<ConstantPoolArray> NewConstantPool(Isolate* isolate);
+
+ // Generate the constant pool for the generated code.
+ void PopulateConstantPool(ConstantPoolArray* constant_pool);
+
+ protected:
+ byte* addr_at(int pos) { return buffer_ + pos; }
+
+
+ private:
+ uint32_t long_at(int pos) {
+ return *reinterpret_cast<uint32_t*>(addr_at(pos));
+ }
+ void long_at_put(int pos, uint32_t x) {
+ *reinterpret_cast<uint32_t*>(addr_at(pos)) = x;
+ }
+
+ // code emission
+ void GrowBuffer();
+ inline void emit(uint32_t x);
+ inline void emit(Handle<Object> handle);
+ inline void emit(uint32_t x,
+ RelocInfo::Mode rmode,
+ TypeFeedbackId id = TypeFeedbackId::None());
+ inline void emit(Handle<Code> code,
+ RelocInfo::Mode rmode,
+ TypeFeedbackId id = TypeFeedbackId::None());
+ inline void emit(const Immediate& x);
+ inline void emit_w(const Immediate& x);
+
+ // Emit the code-object-relative offset of the label's position
+ inline void emit_code_relative_offset(Label* label);
+
+ // instruction generation
+ void emit_arith_b(int op1, int op2, Register dst, int imm8);
+
+ // Emit a basic arithmetic instruction (i.e. first byte of the family is 0x81)
+ // with a given destination expression and an immediate operand. It attempts
+ // to use the shortest encoding possible.
+ // sel specifies the /n in the modrm byte (see the Intel PRM).
+ void emit_arith(int sel, Operand dst, const Immediate& x);
+
+ void emit_operand(Register reg, const Operand& adr);
+
+ void emit_farith(int b1, int b2, int i);
+
+ // labels
+ void print(Label* L);
+ void bind_to(Label* L, int pos);
+
+ // displacements
+ inline Displacement disp_at(Label* L);
+ inline void disp_at_put(Label* L, Displacement disp);
+ inline void emit_disp(Label* L, Displacement::Type type);
+ inline void emit_near_disp(Label* L);
+
+ // record reloc info for current pc_
+ void RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data = 0);
+
+ friend class CodePatcher;
+ friend class EnsureSpace;
+
+ // code generation
+ RelocInfoWriter reloc_info_writer;
+
+ PositionsRecorder positions_recorder_;
+ friend class PositionsRecorder;
+};
+
+
+// Helper class that ensures that there is enough space for generating
+// instructions and relocation information. The constructor makes
+// sure that there is enough space and (in debug mode) the destructor
+// checks that we did not generate too much.
+class EnsureSpace BASE_EMBEDDED {
+ public:
+ explicit EnsureSpace(Assembler* assembler) : assembler_(assembler) {
+ if (assembler_->buffer_overflow()) assembler_->GrowBuffer();
+#ifdef DEBUG
+ space_before_ = assembler_->available_space();
+#endif
+ }
+
+#ifdef DEBUG
+ ~EnsureSpace() {
+ int bytes_generated = space_before_ - assembler_->available_space();
+ ASSERT(bytes_generated < assembler_->kGap);
+ }
+#endif
+
+ private:
+ Assembler* assembler_;
+#ifdef DEBUG
+ int space_before_;
+#endif
+};
+
+} } // namespace v8::internal
+
+#endif // V8_X87_ASSEMBLER_X87_H_
diff --git a/chromium/v8/src/x87/builtins-x87.cc b/chromium/v8/src/x87/builtins-x87.cc
new file mode 100644
index 00000000000..8db42d8e560
--- /dev/null
+++ b/chromium/v8/src/x87/builtins-x87.cc
@@ -0,0 +1,1452 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#if V8_TARGET_ARCH_X87
+
+#include "src/codegen.h"
+#include "src/deoptimizer.h"
+#include "src/full-codegen.h"
+#include "src/stub-cache.h"
+
+namespace v8 {
+namespace internal {
+
+
+#define __ ACCESS_MASM(masm)
+
+
+void Builtins::Generate_Adaptor(MacroAssembler* masm,
+ CFunctionId id,
+ BuiltinExtraArguments extra_args) {
+ // ----------- S t a t e -------------
+ // -- eax : number of arguments excluding receiver
+ // -- edi : called function (only guaranteed when
+ // extra_args requires it)
+ // -- esi : context
+ // -- esp[0] : return address
+ // -- esp[4] : last argument
+ // -- ...
+ // -- esp[4 * argc] : first argument (argc == eax)
+ // -- esp[4 * (argc +1)] : receiver
+ // -----------------------------------
+
+ // Insert extra arguments.
+ int num_extra_args = 0;
+ if (extra_args == NEEDS_CALLED_FUNCTION) {
+ num_extra_args = 1;
+ Register scratch = ebx;
+ __ pop(scratch); // Save return address.
+ __ push(edi);
+ __ push(scratch); // Restore return address.
+ } else {
+ ASSERT(extra_args == NO_EXTRA_ARGUMENTS);
+ }
+
+ // JumpToExternalReference expects eax to contain the number of arguments
+ // including the receiver and the extra arguments.
+ __ add(eax, Immediate(num_extra_args + 1));
+ __ JumpToExternalReference(ExternalReference(id, masm->isolate()));
+}
+
+
+static void CallRuntimePassFunction(
+ MacroAssembler* masm, Runtime::FunctionId function_id) {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ // Push a copy of the function.
+ __ push(edi);
+ // Function is also the parameter to the runtime call.
+ __ push(edi);
+
+ __ CallRuntime(function_id, 1);
+ // Restore receiver.
+ __ pop(edi);
+}
+
+
+static void GenerateTailCallToSharedCode(MacroAssembler* masm) {
+ __ mov(eax, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
+ __ mov(eax, FieldOperand(eax, SharedFunctionInfo::kCodeOffset));
+ __ lea(eax, FieldOperand(eax, Code::kHeaderSize));
+ __ jmp(eax);
+}
+
+
+static void GenerateTailCallToReturnedCode(MacroAssembler* masm) {
+ __ lea(eax, FieldOperand(eax, Code::kHeaderSize));
+ __ jmp(eax);
+}
+
+
+void Builtins::Generate_InOptimizationQueue(MacroAssembler* masm) {
+ // Checking whether the queued function is ready for install is optional,
+ // since we come across interrupts and stack checks elsewhere. However,
+ // not checking may delay installing ready functions, and always checking
+ // would be quite expensive. A good compromise is to first check against
+ // stack limit as a cue for an interrupt signal.
+ Label ok;
+ ExternalReference stack_limit =
+ ExternalReference::address_of_stack_limit(masm->isolate());
+ __ cmp(esp, Operand::StaticVariable(stack_limit));
+ __ j(above_equal, &ok, Label::kNear);
+
+ CallRuntimePassFunction(masm, Runtime::kHiddenTryInstallOptimizedCode);
+ GenerateTailCallToReturnedCode(masm);
+
+ __ bind(&ok);
+ GenerateTailCallToSharedCode(masm);
+}
+
+
+static void Generate_JSConstructStubHelper(MacroAssembler* masm,
+ bool is_api_function,
+ bool create_memento) {
+ // ----------- S t a t e -------------
+ // -- eax: number of arguments
+ // -- edi: constructor function
+ // -- ebx: allocation site or undefined
+ // -----------------------------------
+
+ // Should never create mementos for api functions.
+ ASSERT(!is_api_function || !create_memento);
+
+ // Enter a construct frame.
+ {
+ FrameScope scope(masm, StackFrame::CONSTRUCT);
+
+ if (create_memento) {
+ __ AssertUndefinedOrAllocationSite(ebx);
+ __ push(ebx);
+ }
+
+ // Store a smi-tagged arguments count on the stack.
+ __ SmiTag(eax);
+ __ push(eax);
+
+ // Push the function to invoke on the stack.
+ __ push(edi);
+
+ // Try to allocate the object without transitioning into C code. If any of
+ // the preconditions is not met, the code bails out to the runtime call.
+ Label rt_call, allocated;
+ if (FLAG_inline_new) {
+ Label undo_allocation;
+ ExternalReference debug_step_in_fp =
+ ExternalReference::debug_step_in_fp_address(masm->isolate());
+ __ cmp(Operand::StaticVariable(debug_step_in_fp), Immediate(0));
+ __ j(not_equal, &rt_call);
+
+ // Verified that the constructor is a JSFunction.
+ // Load the initial map and verify that it is in fact a map.
+ // edi: constructor
+ __ mov(eax, FieldOperand(edi, JSFunction::kPrototypeOrInitialMapOffset));
+ // Will both indicate a NULL and a Smi
+ __ JumpIfSmi(eax, &rt_call);
+ // edi: constructor
+ // eax: initial map (if proven valid below)
+ __ CmpObjectType(eax, MAP_TYPE, ebx);
+ __ j(not_equal, &rt_call);
+
+ // Check that the constructor is not constructing a JSFunction (see
+ // comments in Runtime_NewObject in runtime.cc). In which case the
+ // initial map's instance type would be JS_FUNCTION_TYPE.
+ // edi: constructor
+ // eax: initial map
+ __ CmpInstanceType(eax, JS_FUNCTION_TYPE);
+ __ j(equal, &rt_call);
+
+ if (!is_api_function) {
+ Label allocate;
+ // The code below relies on these assumptions.
+ STATIC_ASSERT(JSFunction::kNoSlackTracking == 0);
+ STATIC_ASSERT(Map::ConstructionCount::kShift +
+ Map::ConstructionCount::kSize == 32);
+ // Check if slack tracking is enabled.
+ __ mov(esi, FieldOperand(eax, Map::kBitField3Offset));
+ __ shr(esi, Map::ConstructionCount::kShift);
+ __ j(zero, &allocate); // JSFunction::kNoSlackTracking
+ // Decrease generous allocation count.
+ __ sub(FieldOperand(eax, Map::kBitField3Offset),
+ Immediate(1 << Map::ConstructionCount::kShift));
+
+ __ cmp(esi, JSFunction::kFinishSlackTracking);
+ __ j(not_equal, &allocate);
+
+ __ push(eax);
+ __ push(edi);
+
+ __ push(edi); // constructor
+ __ CallRuntime(Runtime::kHiddenFinalizeInstanceSize, 1);
+
+ __ pop(edi);
+ __ pop(eax);
+ __ xor_(esi, esi); // JSFunction::kNoSlackTracking
+
+ __ bind(&allocate);
+ }
+
+ // Now allocate the JSObject on the heap.
+ // edi: constructor
+ // eax: initial map
+ __ movzx_b(edi, FieldOperand(eax, Map::kInstanceSizeOffset));
+ __ shl(edi, kPointerSizeLog2);
+ if (create_memento) {
+ __ add(edi, Immediate(AllocationMemento::kSize));
+ }
+
+ __ Allocate(edi, ebx, edi, no_reg, &rt_call, NO_ALLOCATION_FLAGS);
+
+ Factory* factory = masm->isolate()->factory();
+
+ // Allocated the JSObject, now initialize the fields.
+ // eax: initial map
+ // ebx: JSObject
+ // edi: start of next object (including memento if create_memento)
+ __ mov(Operand(ebx, JSObject::kMapOffset), eax);
+ __ mov(ecx, factory->empty_fixed_array());
+ __ mov(Operand(ebx, JSObject::kPropertiesOffset), ecx);
+ __ mov(Operand(ebx, JSObject::kElementsOffset), ecx);
+ // Set extra fields in the newly allocated object.
+ // eax: initial map
+ // ebx: JSObject
+ // edi: start of next object (including memento if create_memento)
+ // esi: slack tracking counter (non-API function case)
+ __ mov(edx, factory->undefined_value());
+ __ lea(ecx, Operand(ebx, JSObject::kHeaderSize));
+ if (!is_api_function) {
+ Label no_inobject_slack_tracking;
+
+ // Check if slack tracking is enabled.
+ __ cmp(esi, JSFunction::kNoSlackTracking);
+ __ j(equal, &no_inobject_slack_tracking);
+
+ // Allocate object with a slack.
+ __ movzx_b(esi,
+ FieldOperand(eax, Map::kPreAllocatedPropertyFieldsOffset));
+ __ lea(esi,
+ Operand(ebx, esi, times_pointer_size, JSObject::kHeaderSize));
+ // esi: offset of first field after pre-allocated fields
+ if (FLAG_debug_code) {
+ __ cmp(esi, edi);
+ __ Assert(less_equal,
+ kUnexpectedNumberOfPreAllocatedPropertyFields);
+ }
+ __ InitializeFieldsWithFiller(ecx, esi, edx);
+ __ mov(edx, factory->one_pointer_filler_map());
+ // Fill the remaining fields with one pointer filler map.
+
+ __ bind(&no_inobject_slack_tracking);
+ }
+
+ if (create_memento) {
+ __ lea(esi, Operand(edi, -AllocationMemento::kSize));
+ __ InitializeFieldsWithFiller(ecx, esi, edx);
+
+ // Fill in memento fields if necessary.
+ // esi: points to the allocated but uninitialized memento.
+ __ mov(Operand(esi, AllocationMemento::kMapOffset),
+ factory->allocation_memento_map());
+ // Get the cell or undefined.
+ __ mov(edx, Operand(esp, kPointerSize*2));
+ __ mov(Operand(esi, AllocationMemento::kAllocationSiteOffset),
+ edx);
+ } else {
+ __ InitializeFieldsWithFiller(ecx, edi, edx);
+ }
+
+ // Add the object tag to make the JSObject real, so that we can continue
+ // and jump into the continuation code at any time from now on. Any
+ // failures need to undo the allocation, so that the heap is in a
+ // consistent state and verifiable.
+ // eax: initial map
+ // ebx: JSObject
+ // edi: start of next object
+ __ or_(ebx, Immediate(kHeapObjectTag));
+
+ // Check if a non-empty properties array is needed.
+ // Allocate and initialize a FixedArray if it is.
+ // eax: initial map
+ // ebx: JSObject
+ // edi: start of next object
+ // Calculate the total number of properties described by the map.
+ __ movzx_b(edx, FieldOperand(eax, Map::kUnusedPropertyFieldsOffset));
+ __ movzx_b(ecx,
+ FieldOperand(eax, Map::kPreAllocatedPropertyFieldsOffset));
+ __ add(edx, ecx);
+ // Calculate unused properties past the end of the in-object properties.
+ __ movzx_b(ecx, FieldOperand(eax, Map::kInObjectPropertiesOffset));
+ __ sub(edx, ecx);
+ // Done if no extra properties are to be allocated.
+ __ j(zero, &allocated);
+ __ Assert(positive, kPropertyAllocationCountFailed);
+
+ // Scale the number of elements by pointer size and add the header for
+ // FixedArrays to the start of the next object calculation from above.
+ // ebx: JSObject
+ // edi: start of next object (will be start of FixedArray)
+ // edx: number of elements in properties array
+ __ Allocate(FixedArray::kHeaderSize,
+ times_pointer_size,
+ edx,
+ REGISTER_VALUE_IS_INT32,
+ edi,
+ ecx,
+ no_reg,
+ &undo_allocation,
+ RESULT_CONTAINS_TOP);
+
+ // Initialize the FixedArray.
+ // ebx: JSObject
+ // edi: FixedArray
+ // edx: number of elements
+ // ecx: start of next object
+ __ mov(eax, factory->fixed_array_map());
+ __ mov(Operand(edi, FixedArray::kMapOffset), eax); // setup the map
+ __ SmiTag(edx);
+ __ mov(Operand(edi, FixedArray::kLengthOffset), edx); // and length
+
+ // Initialize the fields to undefined.
+ // ebx: JSObject
+ // edi: FixedArray
+ // ecx: start of next object
+ { Label loop, entry;
+ __ mov(edx, factory->undefined_value());
+ __ lea(eax, Operand(edi, FixedArray::kHeaderSize));
+ __ jmp(&entry);
+ __ bind(&loop);
+ __ mov(Operand(eax, 0), edx);
+ __ add(eax, Immediate(kPointerSize));
+ __ bind(&entry);
+ __ cmp(eax, ecx);
+ __ j(below, &loop);
+ }
+
+ // Store the initialized FixedArray into the properties field of
+ // the JSObject
+ // ebx: JSObject
+ // edi: FixedArray
+ __ or_(edi, Immediate(kHeapObjectTag)); // add the heap tag
+ __ mov(FieldOperand(ebx, JSObject::kPropertiesOffset), edi);
+
+
+ // Continue with JSObject being successfully allocated
+ // ebx: JSObject
+ __ jmp(&allocated);
+
+ // Undo the setting of the new top so that the heap is verifiable. For
+ // example, the map's unused properties potentially do not match the
+ // allocated objects unused properties.
+ // ebx: JSObject (previous new top)
+ __ bind(&undo_allocation);
+ __ UndoAllocationInNewSpace(ebx);
+ }
+
+ // Allocate the new receiver object using the runtime call.
+ __ bind(&rt_call);
+ int offset = 0;
+ if (create_memento) {
+ // Get the cell or allocation site.
+ __ mov(edi, Operand(esp, kPointerSize * 2));
+ __ push(edi);
+ offset = kPointerSize;
+ }
+
+ // Must restore esi (context) and edi (constructor) before calling runtime.
+ __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
+ __ mov(edi, Operand(esp, offset));
+ // edi: function (constructor)
+ __ push(edi);
+ if (create_memento) {
+ __ CallRuntime(Runtime::kHiddenNewObjectWithAllocationSite, 2);
+ } else {
+ __ CallRuntime(Runtime::kHiddenNewObject, 1);
+ }
+ __ mov(ebx, eax); // store result in ebx
+
+ // If we ended up using the runtime, and we want a memento, then the
+ // runtime call made it for us, and we shouldn't do create count
+ // increment.
+ Label count_incremented;
+ if (create_memento) {
+ __ jmp(&count_incremented);
+ }
+
+ // New object allocated.
+ // ebx: newly allocated object
+ __ bind(&allocated);
+
+ if (create_memento) {
+ __ mov(ecx, Operand(esp, kPointerSize * 2));
+ __ cmp(ecx, masm->isolate()->factory()->undefined_value());
+ __ j(equal, &count_incremented);
+ // ecx is an AllocationSite. We are creating a memento from it, so we
+ // need to increment the memento create count.
+ __ add(FieldOperand(ecx, AllocationSite::kPretenureCreateCountOffset),
+ Immediate(Smi::FromInt(1)));
+ __ bind(&count_incremented);
+ }
+
+ // Retrieve the function from the stack.
+ __ pop(edi);
+
+ // Retrieve smi-tagged arguments count from the stack.
+ __ mov(eax, Operand(esp, 0));
+ __ SmiUntag(eax);
+
+ // Push the allocated receiver to the stack. We need two copies
+ // because we may have to return the original one and the calling
+ // conventions dictate that the called function pops the receiver.
+ __ push(ebx);
+ __ push(ebx);
+
+ // Set up pointer to last argument.
+ __ lea(ebx, Operand(ebp, StandardFrameConstants::kCallerSPOffset));
+
+ // Copy arguments and receiver to the expression stack.
+ Label loop, entry;
+ __ mov(ecx, eax);
+ __ jmp(&entry);
+ __ bind(&loop);
+ __ push(Operand(ebx, ecx, times_4, 0));
+ __ bind(&entry);
+ __ dec(ecx);
+ __ j(greater_equal, &loop);
+
+ // Call the function.
+ if (is_api_function) {
+ __ mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
+ Handle<Code> code =
+ masm->isolate()->builtins()->HandleApiCallConstruct();
+ __ call(code, RelocInfo::CODE_TARGET);
+ } else {
+ ParameterCount actual(eax);
+ __ InvokeFunction(edi, actual, CALL_FUNCTION,
+ NullCallWrapper());
+ }
+
+ // Store offset of return address for deoptimizer.
+ if (!is_api_function) {
+ masm->isolate()->heap()->SetConstructStubDeoptPCOffset(masm->pc_offset());
+ }
+
+ // Restore context from the frame.
+ __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
+
+ // If the result is an object (in the ECMA sense), we should get rid
+ // of the receiver and use the result; see ECMA-262 section 13.2.2-7
+ // on page 74.
+ Label use_receiver, exit;
+
+ // If the result is a smi, it is *not* an object in the ECMA sense.
+ __ JumpIfSmi(eax, &use_receiver);
+
+ // If the type of the result (stored in its map) is less than
+ // FIRST_SPEC_OBJECT_TYPE, it is not an object in the ECMA sense.
+ __ CmpObjectType(eax, FIRST_SPEC_OBJECT_TYPE, ecx);
+ __ j(above_equal, &exit);
+
+ // Throw away the result of the constructor invocation and use the
+ // on-stack receiver as the result.
+ __ bind(&use_receiver);
+ __ mov(eax, Operand(esp, 0));
+
+ // Restore the arguments count and leave the construct frame.
+ __ bind(&exit);
+ __ mov(ebx, Operand(esp, kPointerSize)); // Get arguments count.
+
+ // Leave construct frame.
+ }
+
+ // Remove caller arguments from the stack and return.
+ STATIC_ASSERT(kSmiTagSize == 1 && kSmiTag == 0);
+ __ pop(ecx);
+ __ lea(esp, Operand(esp, ebx, times_2, 1 * kPointerSize)); // 1 ~ receiver
+ __ push(ecx);
+ __ IncrementCounter(masm->isolate()->counters()->constructed_objects(), 1);
+ __ ret(0);
+}
+
+
+void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
+ Generate_JSConstructStubHelper(masm, false, FLAG_pretenuring_call_new);
+}
+
+
+void Builtins::Generate_JSConstructStubApi(MacroAssembler* masm) {
+ Generate_JSConstructStubHelper(masm, true, false);
+}
+
+
+static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
+ bool is_construct) {
+ ProfileEntryHookStub::MaybeCallEntryHook(masm);
+
+ // Clear the context before we push it when entering the internal frame.
+ __ Move(esi, Immediate(0));
+
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+
+ // Load the previous frame pointer (ebx) to access C arguments
+ __ mov(ebx, Operand(ebp, 0));
+
+ // Get the function from the frame and setup the context.
+ __ mov(ecx, Operand(ebx, EntryFrameConstants::kFunctionArgOffset));
+ __ mov(esi, FieldOperand(ecx, JSFunction::kContextOffset));
+
+ // Push the function and the receiver onto the stack.
+ __ push(ecx);
+ __ push(Operand(ebx, EntryFrameConstants::kReceiverArgOffset));
+
+ // Load the number of arguments and setup pointer to the arguments.
+ __ mov(eax, Operand(ebx, EntryFrameConstants::kArgcOffset));
+ __ mov(ebx, Operand(ebx, EntryFrameConstants::kArgvOffset));
+
+ // Copy arguments to the stack in a loop.
+ Label loop, entry;
+ __ Move(ecx, Immediate(0));
+ __ jmp(&entry);
+ __ bind(&loop);
+ __ mov(edx, Operand(ebx, ecx, times_4, 0)); // push parameter from argv
+ __ push(Operand(edx, 0)); // dereference handle
+ __ inc(ecx);
+ __ bind(&entry);
+ __ cmp(ecx, eax);
+ __ j(not_equal, &loop);
+
+ // Get the function from the stack and call it.
+ // kPointerSize for the receiver.
+ __ mov(edi, Operand(esp, eax, times_4, kPointerSize));
+
+ // Invoke the code.
+ if (is_construct) {
+ // No type feedback cell is available
+ __ mov(ebx, masm->isolate()->factory()->undefined_value());
+ CallConstructStub stub(masm->isolate(), NO_CALL_CONSTRUCTOR_FLAGS);
+ __ CallStub(&stub);
+ } else {
+ ParameterCount actual(eax);
+ __ InvokeFunction(edi, actual, CALL_FUNCTION,
+ NullCallWrapper());
+ }
+
+ // Exit the internal frame. Notice that this also removes the empty.
+ // context and the function left on the stack by the code
+ // invocation.
+ }
+ __ ret(kPointerSize); // Remove receiver.
+}
+
+
+void Builtins::Generate_JSEntryTrampoline(MacroAssembler* masm) {
+ Generate_JSEntryTrampolineHelper(masm, false);
+}
+
+
+void Builtins::Generate_JSConstructEntryTrampoline(MacroAssembler* masm) {
+ Generate_JSEntryTrampolineHelper(masm, true);
+}
+
+
+void Builtins::Generate_CompileUnoptimized(MacroAssembler* masm) {
+ CallRuntimePassFunction(masm, Runtime::kHiddenCompileUnoptimized);
+ GenerateTailCallToReturnedCode(masm);
+}
+
+
+
+static void CallCompileOptimized(MacroAssembler* masm, bool concurrent) {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ // Push a copy of the function.
+ __ push(edi);
+ // Function is also the parameter to the runtime call.
+ __ push(edi);
+ // Whether to compile in a background thread.
+ __ Push(masm->isolate()->factory()->ToBoolean(concurrent));
+
+ __ CallRuntime(Runtime::kHiddenCompileOptimized, 2);
+ // Restore receiver.
+ __ pop(edi);
+}
+
+
+void Builtins::Generate_CompileOptimized(MacroAssembler* masm) {
+ CallCompileOptimized(masm, false);
+ GenerateTailCallToReturnedCode(masm);
+}
+
+
+void Builtins::Generate_CompileOptimizedConcurrent(MacroAssembler* masm) {
+ CallCompileOptimized(masm, true);
+ GenerateTailCallToReturnedCode(masm);
+}
+
+
+static void GenerateMakeCodeYoungAgainCommon(MacroAssembler* masm) {
+ // For now, we are relying on the fact that make_code_young doesn't do any
+ // garbage collection which allows us to save/restore the registers without
+ // worrying about which of them contain pointers. We also don't build an
+ // internal frame to make the code faster, since we shouldn't have to do stack
+ // crawls in MakeCodeYoung. This seems a bit fragile.
+
+ // Re-execute the code that was patched back to the young age when
+ // the stub returns.
+ __ sub(Operand(esp, 0), Immediate(5));
+ __ pushad();
+ __ mov(eax, Operand(esp, 8 * kPointerSize));
+ {
+ FrameScope scope(masm, StackFrame::MANUAL);
+ __ PrepareCallCFunction(2, ebx);
+ __ mov(Operand(esp, 1 * kPointerSize),
+ Immediate(ExternalReference::isolate_address(masm->isolate())));
+ __ mov(Operand(esp, 0), eax);
+ __ CallCFunction(
+ ExternalReference::get_make_code_young_function(masm->isolate()), 2);
+ }
+ __ popad();
+ __ ret(0);
+}
+
+#define DEFINE_CODE_AGE_BUILTIN_GENERATOR(C) \
+void Builtins::Generate_Make##C##CodeYoungAgainEvenMarking( \
+ MacroAssembler* masm) { \
+ GenerateMakeCodeYoungAgainCommon(masm); \
+} \
+void Builtins::Generate_Make##C##CodeYoungAgainOddMarking( \
+ MacroAssembler* masm) { \
+ GenerateMakeCodeYoungAgainCommon(masm); \
+}
+CODE_AGE_LIST(DEFINE_CODE_AGE_BUILTIN_GENERATOR)
+#undef DEFINE_CODE_AGE_BUILTIN_GENERATOR
+
+
+void Builtins::Generate_MarkCodeAsExecutedOnce(MacroAssembler* masm) {
+ // For now, as in GenerateMakeCodeYoungAgainCommon, we are relying on the fact
+ // that make_code_young doesn't do any garbage collection which allows us to
+ // save/restore the registers without worrying about which of them contain
+ // pointers.
+ __ pushad();
+ __ mov(eax, Operand(esp, 8 * kPointerSize));
+ __ sub(eax, Immediate(Assembler::kCallInstructionLength));
+ { // NOLINT
+ FrameScope scope(masm, StackFrame::MANUAL);
+ __ PrepareCallCFunction(2, ebx);
+ __ mov(Operand(esp, 1 * kPointerSize),
+ Immediate(ExternalReference::isolate_address(masm->isolate())));
+ __ mov(Operand(esp, 0), eax);
+ __ CallCFunction(
+ ExternalReference::get_mark_code_as_executed_function(masm->isolate()),
+ 2);
+ }
+ __ popad();
+
+ // Perform prologue operations usually performed by the young code stub.
+ __ pop(eax); // Pop return address into scratch register.
+ __ push(ebp); // Caller's frame pointer.
+ __ mov(ebp, esp);
+ __ push(esi); // Callee's context.
+ __ push(edi); // Callee's JS Function.
+ __ push(eax); // Push return address after frame prologue.
+
+ // Jump to point after the code-age stub.
+ __ ret(0);
+}
+
+
+void Builtins::Generate_MarkCodeAsExecutedTwice(MacroAssembler* masm) {
+ GenerateMakeCodeYoungAgainCommon(masm);
+}
+
+
+static void Generate_NotifyStubFailureHelper(MacroAssembler* masm) {
+ // Enter an internal frame.
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+
+ // Preserve registers across notification, this is important for compiled
+ // stubs that tail call the runtime on deopts passing their parameters in
+ // registers.
+ __ pushad();
+ __ CallRuntime(Runtime::kHiddenNotifyStubFailure, 0);
+ __ popad();
+ // Tear down internal frame.
+ }
+
+ __ pop(MemOperand(esp, 0)); // Ignore state offset
+ __ ret(0); // Return to IC Miss stub, continuation still on stack.
+}
+
+
+void Builtins::Generate_NotifyStubFailure(MacroAssembler* masm) {
+ Generate_NotifyStubFailureHelper(masm);
+}
+
+
+void Builtins::Generate_NotifyStubFailureSaveDoubles(MacroAssembler* masm) {
+ // SaveDoubles is meanless for X87, just used by deoptimizer.cc
+ Generate_NotifyStubFailureHelper(masm);
+}
+
+
+static void Generate_NotifyDeoptimizedHelper(MacroAssembler* masm,
+ Deoptimizer::BailoutType type) {
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+
+ // Pass deoptimization type to the runtime system.
+ __ push(Immediate(Smi::FromInt(static_cast<int>(type))));
+ __ CallRuntime(Runtime::kHiddenNotifyDeoptimized, 1);
+
+ // Tear down internal frame.
+ }
+
+ // Get the full codegen state from the stack and untag it.
+ __ mov(ecx, Operand(esp, 1 * kPointerSize));
+ __ SmiUntag(ecx);
+
+ // Switch on the state.
+ Label not_no_registers, not_tos_eax;
+ __ cmp(ecx, FullCodeGenerator::NO_REGISTERS);
+ __ j(not_equal, &not_no_registers, Label::kNear);
+ __ ret(1 * kPointerSize); // Remove state.
+
+ __ bind(&not_no_registers);
+ __ mov(eax, Operand(esp, 2 * kPointerSize));
+ __ cmp(ecx, FullCodeGenerator::TOS_REG);
+ __ j(not_equal, &not_tos_eax, Label::kNear);
+ __ ret(2 * kPointerSize); // Remove state, eax.
+
+ __ bind(&not_tos_eax);
+ __ Abort(kNoCasesLeft);
+}
+
+
+void Builtins::Generate_NotifyDeoptimized(MacroAssembler* masm) {
+ Generate_NotifyDeoptimizedHelper(masm, Deoptimizer::EAGER);
+}
+
+
+void Builtins::Generate_NotifySoftDeoptimized(MacroAssembler* masm) {
+ Generate_NotifyDeoptimizedHelper(masm, Deoptimizer::SOFT);
+}
+
+
+void Builtins::Generate_NotifyLazyDeoptimized(MacroAssembler* masm) {
+ Generate_NotifyDeoptimizedHelper(masm, Deoptimizer::LAZY);
+}
+
+
+void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
+ Factory* factory = masm->isolate()->factory();
+
+ // 1. Make sure we have at least one argument.
+ { Label done;
+ __ test(eax, eax);
+ __ j(not_zero, &done);
+ __ pop(ebx);
+ __ push(Immediate(factory->undefined_value()));
+ __ push(ebx);
+ __ inc(eax);
+ __ bind(&done);
+ }
+
+ // 2. Get the function to call (passed as receiver) from the stack, check
+ // if it is a function.
+ Label slow, non_function;
+ // 1 ~ return address.
+ __ mov(edi, Operand(esp, eax, times_4, 1 * kPointerSize));
+ __ JumpIfSmi(edi, &non_function);
+ __ CmpObjectType(edi, JS_FUNCTION_TYPE, ecx);
+ __ j(not_equal, &slow);
+
+
+ // 3a. Patch the first argument if necessary when calling a function.
+ Label shift_arguments;
+ __ Move(edx, Immediate(0)); // indicate regular JS_FUNCTION
+ { Label convert_to_object, use_global_receiver, patch_receiver;
+ // Change context eagerly in case we need the global receiver.
+ __ mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
+
+ // Do not transform the receiver for strict mode functions.
+ __ mov(ebx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
+ __ test_b(FieldOperand(ebx, SharedFunctionInfo::kStrictModeByteOffset),
+ 1 << SharedFunctionInfo::kStrictModeBitWithinByte);
+ __ j(not_equal, &shift_arguments);
+
+ // Do not transform the receiver for natives (shared already in ebx).
+ __ test_b(FieldOperand(ebx, SharedFunctionInfo::kNativeByteOffset),
+ 1 << SharedFunctionInfo::kNativeBitWithinByte);
+ __ j(not_equal, &shift_arguments);
+
+ // Compute the receiver in sloppy mode.
+ __ mov(ebx, Operand(esp, eax, times_4, 0)); // First argument.
+
+ // Call ToObject on the receiver if it is not an object, or use the
+ // global object if it is null or undefined.
+ __ JumpIfSmi(ebx, &convert_to_object);
+ __ cmp(ebx, factory->null_value());
+ __ j(equal, &use_global_receiver);
+ __ cmp(ebx, factory->undefined_value());
+ __ j(equal, &use_global_receiver);
+ STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE);
+ __ CmpObjectType(ebx, FIRST_SPEC_OBJECT_TYPE, ecx);
+ __ j(above_equal, &shift_arguments);
+
+ __ bind(&convert_to_object);
+
+ { // In order to preserve argument count.
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ SmiTag(eax);
+ __ push(eax);
+
+ __ push(ebx);
+ __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
+ __ mov(ebx, eax);
+ __ Move(edx, Immediate(0)); // restore
+
+ __ pop(eax);
+ __ SmiUntag(eax);
+ }
+
+ // Restore the function to edi.
+ __ mov(edi, Operand(esp, eax, times_4, 1 * kPointerSize));
+ __ jmp(&patch_receiver);
+
+ __ bind(&use_global_receiver);
+ __ mov(ebx,
+ Operand(esi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
+ __ mov(ebx, FieldOperand(ebx, GlobalObject::kGlobalReceiverOffset));
+
+ __ bind(&patch_receiver);
+ __ mov(Operand(esp, eax, times_4, 0), ebx);
+
+ __ jmp(&shift_arguments);
+ }
+
+ // 3b. Check for function proxy.
+ __ bind(&slow);
+ __ Move(edx, Immediate(1)); // indicate function proxy
+ __ CmpInstanceType(ecx, JS_FUNCTION_PROXY_TYPE);
+ __ j(equal, &shift_arguments);
+ __ bind(&non_function);
+ __ Move(edx, Immediate(2)); // indicate non-function
+
+ // 3c. Patch the first argument when calling a non-function. The
+ // CALL_NON_FUNCTION builtin expects the non-function callee as
+ // receiver, so overwrite the first argument which will ultimately
+ // become the receiver.
+ __ mov(Operand(esp, eax, times_4, 0), edi);
+
+ // 4. Shift arguments and return address one slot down on the stack
+ // (overwriting the original receiver). Adjust argument count to make
+ // the original first argument the new receiver.
+ __ bind(&shift_arguments);
+ { Label loop;
+ __ mov(ecx, eax);
+ __ bind(&loop);
+ __ mov(ebx, Operand(esp, ecx, times_4, 0));
+ __ mov(Operand(esp, ecx, times_4, kPointerSize), ebx);
+ __ dec(ecx);
+ __ j(not_sign, &loop); // While non-negative (to copy return address).
+ __ pop(ebx); // Discard copy of return address.
+ __ dec(eax); // One fewer argument (first argument is new receiver).
+ }
+
+ // 5a. Call non-function via tail call to CALL_NON_FUNCTION builtin,
+ // or a function proxy via CALL_FUNCTION_PROXY.
+ { Label function, non_proxy;
+ __ test(edx, edx);
+ __ j(zero, &function);
+ __ Move(ebx, Immediate(0));
+ __ cmp(edx, Immediate(1));
+ __ j(not_equal, &non_proxy);
+
+ __ pop(edx); // return address
+ __ push(edi); // re-add proxy object as additional argument
+ __ push(edx);
+ __ inc(eax);
+ __ GetBuiltinEntry(edx, Builtins::CALL_FUNCTION_PROXY);
+ __ jmp(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
+ RelocInfo::CODE_TARGET);
+
+ __ bind(&non_proxy);
+ __ GetBuiltinEntry(edx, Builtins::CALL_NON_FUNCTION);
+ __ jmp(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
+ RelocInfo::CODE_TARGET);
+ __ bind(&function);
+ }
+
+ // 5b. Get the code to call from the function and check that the number of
+ // expected arguments matches what we're providing. If so, jump
+ // (tail-call) to the code in register edx without checking arguments.
+ __ mov(edx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
+ __ mov(ebx,
+ FieldOperand(edx, SharedFunctionInfo::kFormalParameterCountOffset));
+ __ mov(edx, FieldOperand(edi, JSFunction::kCodeEntryOffset));
+ __ SmiUntag(ebx);
+ __ cmp(eax, ebx);
+ __ j(not_equal,
+ masm->isolate()->builtins()->ArgumentsAdaptorTrampoline());
+
+ ParameterCount expected(0);
+ __ InvokeCode(edx, expected, expected, JUMP_FUNCTION, NullCallWrapper());
+}
+
+
+void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
+ static const int kArgumentsOffset = 2 * kPointerSize;
+ static const int kReceiverOffset = 3 * kPointerSize;
+ static const int kFunctionOffset = 4 * kPointerSize;
+ {
+ FrameScope frame_scope(masm, StackFrame::INTERNAL);
+
+ __ push(Operand(ebp, kFunctionOffset)); // push this
+ __ push(Operand(ebp, kArgumentsOffset)); // push arguments
+ __ InvokeBuiltin(Builtins::APPLY_PREPARE, CALL_FUNCTION);
+
+ // Check the stack for overflow. We are not trying to catch
+ // interruptions (e.g. debug break and preemption) here, so the "real stack
+ // limit" is checked.
+ Label okay;
+ ExternalReference real_stack_limit =
+ ExternalReference::address_of_real_stack_limit(masm->isolate());
+ __ mov(edi, Operand::StaticVariable(real_stack_limit));
+ // Make ecx the space we have left. The stack might already be overflowed
+ // here which will cause ecx to become negative.
+ __ mov(ecx, esp);
+ __ sub(ecx, edi);
+ // Make edx the space we need for the array when it is unrolled onto the
+ // stack.
+ __ mov(edx, eax);
+ __ shl(edx, kPointerSizeLog2 - kSmiTagSize);
+ // Check if the arguments will overflow the stack.
+ __ cmp(ecx, edx);
+ __ j(greater, &okay); // Signed comparison.
+
+ // Out of stack space.
+ __ push(Operand(ebp, 4 * kPointerSize)); // push this
+ __ push(eax);
+ __ InvokeBuiltin(Builtins::STACK_OVERFLOW, CALL_FUNCTION);
+ __ bind(&okay);
+ // End of stack check.
+
+ // Push current index and limit.
+ const int kLimitOffset =
+ StandardFrameConstants::kExpressionsOffset - 1 * kPointerSize;
+ const int kIndexOffset = kLimitOffset - 1 * kPointerSize;
+ __ push(eax); // limit
+ __ push(Immediate(0)); // index
+
+ // Get the receiver.
+ __ mov(ebx, Operand(ebp, kReceiverOffset));
+
+ // Check that the function is a JS function (otherwise it must be a proxy).
+ Label push_receiver, use_global_receiver;
+ __ mov(edi, Operand(ebp, kFunctionOffset));
+ __ CmpObjectType(edi, JS_FUNCTION_TYPE, ecx);
+ __ j(not_equal, &push_receiver);
+
+ // Change context eagerly to get the right global object if necessary.
+ __ mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
+
+ // Compute the receiver.
+ // Do not transform the receiver for strict mode functions.
+ Label call_to_object;
+ __ mov(ecx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
+ __ test_b(FieldOperand(ecx, SharedFunctionInfo::kStrictModeByteOffset),
+ 1 << SharedFunctionInfo::kStrictModeBitWithinByte);
+ __ j(not_equal, &push_receiver);
+
+ Factory* factory = masm->isolate()->factory();
+
+ // Do not transform the receiver for natives (shared already in ecx).
+ __ test_b(FieldOperand(ecx, SharedFunctionInfo::kNativeByteOffset),
+ 1 << SharedFunctionInfo::kNativeBitWithinByte);
+ __ j(not_equal, &push_receiver);
+
+ // Compute the receiver in sloppy mode.
+ // Call ToObject on the receiver if it is not an object, or use the
+ // global object if it is null or undefined.
+ __ JumpIfSmi(ebx, &call_to_object);
+ __ cmp(ebx, factory->null_value());
+ __ j(equal, &use_global_receiver);
+ __ cmp(ebx, factory->undefined_value());
+ __ j(equal, &use_global_receiver);
+ STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE);
+ __ CmpObjectType(ebx, FIRST_SPEC_OBJECT_TYPE, ecx);
+ __ j(above_equal, &push_receiver);
+
+ __ bind(&call_to_object);
+ __ push(ebx);
+ __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
+ __ mov(ebx, eax);
+ __ jmp(&push_receiver);
+
+ __ bind(&use_global_receiver);
+ __ mov(ebx,
+ Operand(esi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
+ __ mov(ebx, FieldOperand(ebx, GlobalObject::kGlobalReceiverOffset));
+
+ // Push the receiver.
+ __ bind(&push_receiver);
+ __ push(ebx);
+
+ // Copy all arguments from the array to the stack.
+ Label entry, loop;
+ __ mov(ecx, Operand(ebp, kIndexOffset));
+ __ jmp(&entry);
+ __ bind(&loop);
+ __ mov(edx, Operand(ebp, kArgumentsOffset)); // load arguments
+
+ // Use inline caching to speed up access to arguments.
+ Handle<Code> ic = masm->isolate()->builtins()->KeyedLoadIC_Initialize();
+ __ call(ic, RelocInfo::CODE_TARGET);
+ // It is important that we do not have a test instruction after the
+ // call. A test instruction after the call is used to indicate that
+ // we have generated an inline version of the keyed load. In this
+ // case, we know that we are not generating a test instruction next.
+
+ // Push the nth argument.
+ __ push(eax);
+
+ // Update the index on the stack and in register eax.
+ __ mov(ecx, Operand(ebp, kIndexOffset));
+ __ add(ecx, Immediate(1 << kSmiTagSize));
+ __ mov(Operand(ebp, kIndexOffset), ecx);
+
+ __ bind(&entry);
+ __ cmp(ecx, Operand(ebp, kLimitOffset));
+ __ j(not_equal, &loop);
+
+ // Call the function.
+ Label call_proxy;
+ __ mov(eax, ecx);
+ ParameterCount actual(eax);
+ __ SmiUntag(eax);
+ __ mov(edi, Operand(ebp, kFunctionOffset));
+ __ CmpObjectType(edi, JS_FUNCTION_TYPE, ecx);
+ __ j(not_equal, &call_proxy);
+ __ InvokeFunction(edi, actual, CALL_FUNCTION, NullCallWrapper());
+
+ frame_scope.GenerateLeaveFrame();
+ __ ret(3 * kPointerSize); // remove this, receiver, and arguments
+
+ // Call the function proxy.
+ __ bind(&call_proxy);
+ __ push(edi); // add function proxy as last argument
+ __ inc(eax);
+ __ Move(ebx, Immediate(0));
+ __ GetBuiltinEntry(edx, Builtins::CALL_FUNCTION_PROXY);
+ __ call(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
+ RelocInfo::CODE_TARGET);
+
+ // Leave internal frame.
+ }
+ __ ret(3 * kPointerSize); // remove this, receiver, and arguments
+}
+
+
+void Builtins::Generate_InternalArrayCode(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- eax : argc
+ // -- esp[0] : return address
+ // -- esp[4] : last argument
+ // -----------------------------------
+ Label generic_array_code;
+
+ // Get the InternalArray function.
+ __ LoadGlobalFunction(Context::INTERNAL_ARRAY_FUNCTION_INDEX, edi);
+
+ if (FLAG_debug_code) {
+ // Initial map for the builtin InternalArray function should be a map.
+ __ mov(ebx, FieldOperand(edi, JSFunction::kPrototypeOrInitialMapOffset));
+ // Will both indicate a NULL and a Smi.
+ __ test(ebx, Immediate(kSmiTagMask));
+ __ Assert(not_zero, kUnexpectedInitialMapForInternalArrayFunction);
+ __ CmpObjectType(ebx, MAP_TYPE, ecx);
+ __ Assert(equal, kUnexpectedInitialMapForInternalArrayFunction);
+ }
+
+ // Run the native code for the InternalArray function called as a normal
+ // function.
+ // tail call a stub
+ InternalArrayConstructorStub stub(masm->isolate());
+ __ TailCallStub(&stub);
+}
+
+
+void Builtins::Generate_ArrayCode(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- eax : argc
+ // -- esp[0] : return address
+ // -- esp[4] : last argument
+ // -----------------------------------
+ Label generic_array_code;
+
+ // Get the Array function.
+ __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, edi);
+
+ if (FLAG_debug_code) {
+ // Initial map for the builtin Array function should be a map.
+ __ mov(ebx, FieldOperand(edi, JSFunction::kPrototypeOrInitialMapOffset));
+ // Will both indicate a NULL and a Smi.
+ __ test(ebx, Immediate(kSmiTagMask));
+ __ Assert(not_zero, kUnexpectedInitialMapForArrayFunction);
+ __ CmpObjectType(ebx, MAP_TYPE, ecx);
+ __ Assert(equal, kUnexpectedInitialMapForArrayFunction);
+ }
+
+ // Run the native code for the Array function called as a normal function.
+ // tail call a stub
+ __ mov(ebx, masm->isolate()->factory()->undefined_value());
+ ArrayConstructorStub stub(masm->isolate());
+ __ TailCallStub(&stub);
+}
+
+
+void Builtins::Generate_StringConstructCode(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- eax : number of arguments
+ // -- edi : constructor function
+ // -- esp[0] : return address
+ // -- esp[(argc - n) * 4] : arg[n] (zero-based)
+ // -- esp[(argc + 1) * 4] : receiver
+ // -----------------------------------
+ Counters* counters = masm->isolate()->counters();
+ __ IncrementCounter(counters->string_ctor_calls(), 1);
+
+ if (FLAG_debug_code) {
+ __ LoadGlobalFunction(Context::STRING_FUNCTION_INDEX, ecx);
+ __ cmp(edi, ecx);
+ __ Assert(equal, kUnexpectedStringFunction);
+ }
+
+ // Load the first argument into eax and get rid of the rest
+ // (including the receiver).
+ Label no_arguments;
+ __ test(eax, eax);
+ __ j(zero, &no_arguments);
+ __ mov(ebx, Operand(esp, eax, times_pointer_size, 0));
+ __ pop(ecx);
+ __ lea(esp, Operand(esp, eax, times_pointer_size, kPointerSize));
+ __ push(ecx);
+ __ mov(eax, ebx);
+
+ // Lookup the argument in the number to string cache.
+ Label not_cached, argument_is_string;
+ __ LookupNumberStringCache(eax, // Input.
+ ebx, // Result.
+ ecx, // Scratch 1.
+ edx, // Scratch 2.
+ &not_cached);
+ __ IncrementCounter(counters->string_ctor_cached_number(), 1);
+ __ bind(&argument_is_string);
+ // ----------- S t a t e -------------
+ // -- ebx : argument converted to string
+ // -- edi : constructor function
+ // -- esp[0] : return address
+ // -----------------------------------
+
+ // Allocate a JSValue and put the tagged pointer into eax.
+ Label gc_required;
+ __ Allocate(JSValue::kSize,
+ eax, // Result.
+ ecx, // New allocation top (we ignore it).
+ no_reg,
+ &gc_required,
+ TAG_OBJECT);
+
+ // Set the map.
+ __ LoadGlobalFunctionInitialMap(edi, ecx);
+ if (FLAG_debug_code) {
+ __ cmpb(FieldOperand(ecx, Map::kInstanceSizeOffset),
+ JSValue::kSize >> kPointerSizeLog2);
+ __ Assert(equal, kUnexpectedStringWrapperInstanceSize);
+ __ cmpb(FieldOperand(ecx, Map::kUnusedPropertyFieldsOffset), 0);
+ __ Assert(equal, kUnexpectedUnusedPropertiesOfStringWrapper);
+ }
+ __ mov(FieldOperand(eax, HeapObject::kMapOffset), ecx);
+
+ // Set properties and elements.
+ Factory* factory = masm->isolate()->factory();
+ __ Move(ecx, Immediate(factory->empty_fixed_array()));
+ __ mov(FieldOperand(eax, JSObject::kPropertiesOffset), ecx);
+ __ mov(FieldOperand(eax, JSObject::kElementsOffset), ecx);
+
+ // Set the value.
+ __ mov(FieldOperand(eax, JSValue::kValueOffset), ebx);
+
+ // Ensure the object is fully initialized.
+ STATIC_ASSERT(JSValue::kSize == 4 * kPointerSize);
+
+ // We're done. Return.
+ __ ret(0);
+
+ // The argument was not found in the number to string cache. Check
+ // if it's a string already before calling the conversion builtin.
+ Label convert_argument;
+ __ bind(&not_cached);
+ STATIC_ASSERT(kSmiTag == 0);
+ __ JumpIfSmi(eax, &convert_argument);
+ Condition is_string = masm->IsObjectStringType(eax, ebx, ecx);
+ __ j(NegateCondition(is_string), &convert_argument);
+ __ mov(ebx, eax);
+ __ IncrementCounter(counters->string_ctor_string_value(), 1);
+ __ jmp(&argument_is_string);
+
+ // Invoke the conversion builtin and put the result into ebx.
+ __ bind(&convert_argument);
+ __ IncrementCounter(counters->string_ctor_conversions(), 1);
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ push(edi); // Preserve the function.
+ __ push(eax);
+ __ InvokeBuiltin(Builtins::TO_STRING, CALL_FUNCTION);
+ __ pop(edi);
+ }
+ __ mov(ebx, eax);
+ __ jmp(&argument_is_string);
+
+ // Load the empty string into ebx, remove the receiver from the
+ // stack, and jump back to the case where the argument is a string.
+ __ bind(&no_arguments);
+ __ Move(ebx, Immediate(factory->empty_string()));
+ __ pop(ecx);
+ __ lea(esp, Operand(esp, kPointerSize));
+ __ push(ecx);
+ __ jmp(&argument_is_string);
+
+ // At this point the argument is already a string. Call runtime to
+ // create a string wrapper.
+ __ bind(&gc_required);
+ __ IncrementCounter(counters->string_ctor_gc_required(), 1);
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ push(ebx);
+ __ CallRuntime(Runtime::kNewStringWrapper, 1);
+ }
+ __ ret(0);
+}
+
+
+static void ArgumentsAdaptorStackCheck(MacroAssembler* masm,
+ Label* stack_overflow) {
+ // ----------- S t a t e -------------
+ // -- eax : actual number of arguments
+ // -- ebx : expected number of arguments
+ // -- edi : function (passed through to callee)
+ // -----------------------------------
+ // Check the stack for overflow. We are not trying to catch
+ // interruptions (e.g. debug break and preemption) here, so the "real stack
+ // limit" is checked.
+ ExternalReference real_stack_limit =
+ ExternalReference::address_of_real_stack_limit(masm->isolate());
+ __ mov(edx, Operand::StaticVariable(real_stack_limit));
+ // Make ecx the space we have left. The stack might already be overflowed
+ // here which will cause ecx to become negative.
+ __ mov(ecx, esp);
+ __ sub(ecx, edx);
+ // Make edx the space we need for the array when it is unrolled onto the
+ // stack.
+ __ mov(edx, ebx);
+ __ shl(edx, kPointerSizeLog2);
+ // Check if the arguments will overflow the stack.
+ __ cmp(ecx, edx);
+ __ j(less_equal, stack_overflow); // Signed comparison.
+}
+
+
+static void EnterArgumentsAdaptorFrame(MacroAssembler* masm) {
+ __ push(ebp);
+ __ mov(ebp, esp);
+
+ // Store the arguments adaptor context sentinel.
+ __ push(Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+
+ // Push the function on the stack.
+ __ push(edi);
+
+ // Preserve the number of arguments on the stack. Must preserve eax,
+ // ebx and ecx because these registers are used when copying the
+ // arguments and the receiver.
+ STATIC_ASSERT(kSmiTagSize == 1);
+ __ lea(edi, Operand(eax, eax, times_1, kSmiTag));
+ __ push(edi);
+}
+
+
+static void LeaveArgumentsAdaptorFrame(MacroAssembler* masm) {
+ // Retrieve the number of arguments from the stack.
+ __ mov(ebx, Operand(ebp, ArgumentsAdaptorFrameConstants::kLengthOffset));
+
+ // Leave the frame.
+ __ leave();
+
+ // Remove caller arguments from the stack.
+ STATIC_ASSERT(kSmiTagSize == 1 && kSmiTag == 0);
+ __ pop(ecx);
+ __ lea(esp, Operand(esp, ebx, times_2, 1 * kPointerSize)); // 1 ~ receiver
+ __ push(ecx);
+}
+
+
+void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- eax : actual number of arguments
+ // -- ebx : expected number of arguments
+ // -- edi : function (passed through to callee)
+ // -----------------------------------
+
+ Label invoke, dont_adapt_arguments;
+ __ IncrementCounter(masm->isolate()->counters()->arguments_adaptors(), 1);
+
+ Label stack_overflow;
+ ArgumentsAdaptorStackCheck(masm, &stack_overflow);
+
+ Label enough, too_few;
+ __ mov(edx, FieldOperand(edi, JSFunction::kCodeEntryOffset));
+ __ cmp(eax, ebx);
+ __ j(less, &too_few);
+ __ cmp(ebx, SharedFunctionInfo::kDontAdaptArgumentsSentinel);
+ __ j(equal, &dont_adapt_arguments);
+
+ { // Enough parameters: Actual >= expected.
+ __ bind(&enough);
+ EnterArgumentsAdaptorFrame(masm);
+
+ // Copy receiver and all expected arguments.
+ const int offset = StandardFrameConstants::kCallerSPOffset;
+ __ lea(eax, Operand(ebp, eax, times_4, offset));
+ __ mov(edi, -1); // account for receiver
+
+ Label copy;
+ __ bind(&copy);
+ __ inc(edi);
+ __ push(Operand(eax, 0));
+ __ sub(eax, Immediate(kPointerSize));
+ __ cmp(edi, ebx);
+ __ j(less, &copy);
+ __ jmp(&invoke);
+ }
+
+ { // Too few parameters: Actual < expected.
+ __ bind(&too_few);
+ EnterArgumentsAdaptorFrame(masm);
+
+ // Copy receiver and all actual arguments.
+ const int offset = StandardFrameConstants::kCallerSPOffset;
+ __ lea(edi, Operand(ebp, eax, times_4, offset));
+ // ebx = expected - actual.
+ __ sub(ebx, eax);
+ // eax = -actual - 1
+ __ neg(eax);
+ __ sub(eax, Immediate(1));
+
+ Label copy;
+ __ bind(&copy);
+ __ inc(eax);
+ __ push(Operand(edi, 0));
+ __ sub(edi, Immediate(kPointerSize));
+ __ test(eax, eax);
+ __ j(not_zero, &copy);
+
+ // Fill remaining expected arguments with undefined values.
+ Label fill;
+ __ bind(&fill);
+ __ inc(eax);
+ __ push(Immediate(masm->isolate()->factory()->undefined_value()));
+ __ cmp(eax, ebx);
+ __ j(less, &fill);
+ }
+
+ // Call the entry point.
+ __ bind(&invoke);
+ // Restore function pointer.
+ __ mov(edi, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
+ __ call(edx);
+
+ // Store offset of return address for deoptimizer.
+ masm->isolate()->heap()->SetArgumentsAdaptorDeoptPCOffset(masm->pc_offset());
+
+ // Leave frame and return.
+ LeaveArgumentsAdaptorFrame(masm);
+ __ ret(0);
+
+ // -------------------------------------------
+ // Dont adapt arguments.
+ // -------------------------------------------
+ __ bind(&dont_adapt_arguments);
+ __ jmp(edx);
+
+ __ bind(&stack_overflow);
+ {
+ FrameScope frame(masm, StackFrame::MANUAL);
+ EnterArgumentsAdaptorFrame(masm);
+ __ InvokeBuiltin(Builtins::STACK_OVERFLOW, CALL_FUNCTION);
+ __ int3();
+ }
+}
+
+
+void Builtins::Generate_OnStackReplacement(MacroAssembler* masm) {
+ // Lookup the function in the JavaScript frame.
+ __ mov(eax, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ // Pass function as argument.
+ __ push(eax);
+ __ CallRuntime(Runtime::kCompileForOnStackReplacement, 1);
+ }
+
+ Label skip;
+ // If the code object is null, just return to the unoptimized code.
+ __ cmp(eax, Immediate(0));
+ __ j(not_equal, &skip, Label::kNear);
+ __ ret(0);
+
+ __ bind(&skip);
+
+ // Load deoptimization data from the code object.
+ __ mov(ebx, Operand(eax, Code::kDeoptimizationDataOffset - kHeapObjectTag));
+
+ // Load the OSR entrypoint offset from the deoptimization data.
+ __ mov(ebx, Operand(ebx, FixedArray::OffsetOfElementAt(
+ DeoptimizationInputData::kOsrPcOffsetIndex) - kHeapObjectTag));
+ __ SmiUntag(ebx);
+
+ // Compute the target address = code_obj + header_size + osr_offset
+ __ lea(eax, Operand(eax, ebx, times_1, Code::kHeaderSize - kHeapObjectTag));
+
+ // Overwrite the return address on the stack.
+ __ mov(Operand(esp, 0), eax);
+
+ // And "return" to the OSR entry point of the function.
+ __ ret(0);
+}
+
+
+void Builtins::Generate_OsrAfterStackCheck(MacroAssembler* masm) {
+ // We check the stack limit as indicator that recompilation might be done.
+ Label ok;
+ ExternalReference stack_limit =
+ ExternalReference::address_of_stack_limit(masm->isolate());
+ __ cmp(esp, Operand::StaticVariable(stack_limit));
+ __ j(above_equal, &ok, Label::kNear);
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ CallRuntime(Runtime::kHiddenStackGuard, 0);
+ }
+ __ jmp(masm->isolate()->builtins()->OnStackReplacement(),
+ RelocInfo::CODE_TARGET);
+
+ __ bind(&ok);
+ __ ret(0);
+}
+
+#undef __
+}
+} // namespace v8::internal
+
+#endif // V8_TARGET_ARCH_X87
diff --git a/chromium/v8/src/x87/code-stubs-x87.cc b/chromium/v8/src/x87/code-stubs-x87.cc
new file mode 100644
index 00000000000..a194780fae1
--- /dev/null
+++ b/chromium/v8/src/x87/code-stubs-x87.cc
@@ -0,0 +1,4734 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#if V8_TARGET_ARCH_X87
+
+#include "src/bootstrapper.h"
+#include "src/code-stubs.h"
+#include "src/isolate.h"
+#include "src/jsregexp.h"
+#include "src/regexp-macro-assembler.h"
+#include "src/runtime.h"
+#include "src/stub-cache.h"
+#include "src/codegen.h"
+#include "src/runtime.h"
+
+namespace v8 {
+namespace internal {
+
+
+void FastNewClosureStub::InitializeInterfaceDescriptor(
+ CodeStubInterfaceDescriptor* descriptor) {
+ static Register registers[] = { ebx };
+ descriptor->register_param_count_ = 1;
+ descriptor->register_params_ = registers;
+ descriptor->deoptimization_handler_ =
+ Runtime::FunctionForId(Runtime::kHiddenNewClosureFromStubFailure)->entry;
+}
+
+
+void FastNewContextStub::InitializeInterfaceDescriptor(
+ CodeStubInterfaceDescriptor* descriptor) {
+ static Register registers[] = { edi };
+ descriptor->register_param_count_ = 1;
+ descriptor->register_params_ = registers;
+ descriptor->deoptimization_handler_ = NULL;
+}
+
+
+void ToNumberStub::InitializeInterfaceDescriptor(
+ CodeStubInterfaceDescriptor* descriptor) {
+ static Register registers[] = { eax };
+ descriptor->register_param_count_ = 1;
+ descriptor->register_params_ = registers;
+ descriptor->deoptimization_handler_ = NULL;
+}
+
+
+void NumberToStringStub::InitializeInterfaceDescriptor(
+ CodeStubInterfaceDescriptor* descriptor) {
+ static Register registers[] = { eax };
+ descriptor->register_param_count_ = 1;
+ descriptor->register_params_ = registers;
+ descriptor->deoptimization_handler_ =
+ Runtime::FunctionForId(Runtime::kHiddenNumberToString)->entry;
+}
+
+
+void FastCloneShallowArrayStub::InitializeInterfaceDescriptor(
+ CodeStubInterfaceDescriptor* descriptor) {
+ static Register registers[] = { eax, ebx, ecx };
+ descriptor->register_param_count_ = 3;
+ descriptor->register_params_ = registers;
+ static Representation representations[] = {
+ Representation::Tagged(),
+ Representation::Smi(),
+ Representation::Tagged() };
+ descriptor->register_param_representations_ = representations;
+ descriptor->deoptimization_handler_ =
+ Runtime::FunctionForId(
+ Runtime::kHiddenCreateArrayLiteralStubBailout)->entry;
+}
+
+
+void FastCloneShallowObjectStub::InitializeInterfaceDescriptor(
+ CodeStubInterfaceDescriptor* descriptor) {
+ static Register registers[] = { eax, ebx, ecx, edx };
+ descriptor->register_param_count_ = 4;
+ descriptor->register_params_ = registers;
+ descriptor->deoptimization_handler_ =
+ Runtime::FunctionForId(Runtime::kHiddenCreateObjectLiteral)->entry;
+}
+
+
+void CreateAllocationSiteStub::InitializeInterfaceDescriptor(
+ CodeStubInterfaceDescriptor* descriptor) {
+ static Register registers[] = { ebx, edx };
+ descriptor->register_param_count_ = 2;
+ descriptor->register_params_ = registers;
+ descriptor->deoptimization_handler_ = NULL;
+}
+
+
+void KeyedLoadFastElementStub::InitializeInterfaceDescriptor(
+ CodeStubInterfaceDescriptor* descriptor) {
+ static Register registers[] = { edx, ecx };
+ descriptor->register_param_count_ = 2;
+ descriptor->register_params_ = registers;
+ descriptor->deoptimization_handler_ =
+ FUNCTION_ADDR(KeyedLoadIC_MissFromStubFailure);
+}
+
+
+void KeyedLoadDictionaryElementStub::InitializeInterfaceDescriptor(
+ CodeStubInterfaceDescriptor* descriptor) {
+ static Register registers[] = { edx, ecx };
+ descriptor->register_param_count_ = 2;
+ descriptor->register_params_ = registers;
+ descriptor->deoptimization_handler_ =
+ FUNCTION_ADDR(KeyedLoadIC_MissFromStubFailure);
+}
+
+
+void RegExpConstructResultStub::InitializeInterfaceDescriptor(
+ CodeStubInterfaceDescriptor* descriptor) {
+ static Register registers[] = { ecx, ebx, eax };
+ descriptor->register_param_count_ = 3;
+ descriptor->register_params_ = registers;
+ descriptor->deoptimization_handler_ =
+ Runtime::FunctionForId(Runtime::kHiddenRegExpConstructResult)->entry;
+}
+
+
+void KeyedLoadGenericElementStub::InitializeInterfaceDescriptor(
+ CodeStubInterfaceDescriptor* descriptor) {
+ static Register registers[] = { edx, ecx };
+ descriptor->register_param_count_ = 2;
+ descriptor->register_params_ = registers;
+ descriptor->deoptimization_handler_ =
+ Runtime::FunctionForId(Runtime::kKeyedGetProperty)->entry;
+}
+
+
+void LoadFieldStub::InitializeInterfaceDescriptor(
+ CodeStubInterfaceDescriptor* descriptor) {
+ static Register registers[] = { edx };
+ descriptor->register_param_count_ = 1;
+ descriptor->register_params_ = registers;
+ descriptor->deoptimization_handler_ = NULL;
+}
+
+
+void KeyedLoadFieldStub::InitializeInterfaceDescriptor(
+ CodeStubInterfaceDescriptor* descriptor) {
+ static Register registers[] = { edx };
+ descriptor->register_param_count_ = 1;
+ descriptor->register_params_ = registers;
+ descriptor->deoptimization_handler_ = NULL;
+}
+
+
+void StringLengthStub::InitializeInterfaceDescriptor(
+ CodeStubInterfaceDescriptor* descriptor) {
+ static Register registers[] = { edx, ecx };
+ descriptor->register_param_count_ = 2;
+ descriptor->register_params_ = registers;
+ descriptor->deoptimization_handler_ = NULL;
+}
+
+
+void KeyedStringLengthStub::InitializeInterfaceDescriptor(
+ CodeStubInterfaceDescriptor* descriptor) {
+ static Register registers[] = { edx, ecx };
+ descriptor->register_param_count_ = 2;
+ descriptor->register_params_ = registers;
+ descriptor->deoptimization_handler_ = NULL;
+}
+
+
+void KeyedStoreFastElementStub::InitializeInterfaceDescriptor(
+ CodeStubInterfaceDescriptor* descriptor) {
+ static Register registers[] = { edx, ecx, eax };
+ descriptor->register_param_count_ = 3;
+ descriptor->register_params_ = registers;
+ descriptor->deoptimization_handler_ =
+ FUNCTION_ADDR(KeyedStoreIC_MissFromStubFailure);
+}
+
+
+void TransitionElementsKindStub::InitializeInterfaceDescriptor(
+ CodeStubInterfaceDescriptor* descriptor) {
+ static Register registers[] = { eax, ebx };
+ descriptor->register_param_count_ = 2;
+ descriptor->register_params_ = registers;
+ descriptor->deoptimization_handler_ =
+ Runtime::FunctionForId(Runtime::kTransitionElementsKind)->entry;
+}
+
+
+static void InitializeArrayConstructorDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor,
+ int constant_stack_parameter_count) {
+ // register state
+ // eax -- number of arguments
+ // edi -- function
+ // ebx -- allocation site with elements kind
+ static Register registers_variable_args[] = { edi, ebx, eax };
+ static Register registers_no_args[] = { edi, ebx };
+
+ if (constant_stack_parameter_count == 0) {
+ descriptor->register_param_count_ = 2;
+ descriptor->register_params_ = registers_no_args;
+ } else {
+ // stack param count needs (constructor pointer, and single argument)
+ descriptor->handler_arguments_mode_ = PASS_ARGUMENTS;
+ descriptor->stack_parameter_count_ = eax;
+ descriptor->register_param_count_ = 3;
+ descriptor->register_params_ = registers_variable_args;
+ static Representation representations[] = {
+ Representation::Tagged(),
+ Representation::Tagged(),
+ Representation::Integer32() };
+ descriptor->register_param_representations_ = representations;
+ }
+
+ descriptor->hint_stack_parameter_count_ = constant_stack_parameter_count;
+ descriptor->function_mode_ = JS_FUNCTION_STUB_MODE;
+ descriptor->deoptimization_handler_ =
+ Runtime::FunctionForId(Runtime::kHiddenArrayConstructor)->entry;
+}
+
+
+static void InitializeInternalArrayConstructorDescriptor(
+ CodeStubInterfaceDescriptor* descriptor,
+ int constant_stack_parameter_count) {
+ // register state
+ // eax -- number of arguments
+ // edi -- constructor function
+ static Register registers_variable_args[] = { edi, eax };
+ static Register registers_no_args[] = { edi };
+
+ if (constant_stack_parameter_count == 0) {
+ descriptor->register_param_count_ = 1;
+ descriptor->register_params_ = registers_no_args;
+ } else {
+ // stack param count needs (constructor pointer, and single argument)
+ descriptor->handler_arguments_mode_ = PASS_ARGUMENTS;
+ descriptor->stack_parameter_count_ = eax;
+ descriptor->register_param_count_ = 2;
+ descriptor->register_params_ = registers_variable_args;
+ static Representation representations[] = {
+ Representation::Tagged(),
+ Representation::Integer32() };
+ descriptor->register_param_representations_ = representations;
+ }
+
+ descriptor->hint_stack_parameter_count_ = constant_stack_parameter_count;
+ descriptor->function_mode_ = JS_FUNCTION_STUB_MODE;
+ descriptor->deoptimization_handler_ =
+ Runtime::FunctionForId(Runtime::kHiddenInternalArrayConstructor)->entry;
+}
+
+
+void ArrayNoArgumentConstructorStub::InitializeInterfaceDescriptor(
+ CodeStubInterfaceDescriptor* descriptor) {
+ InitializeArrayConstructorDescriptor(isolate(), descriptor, 0);
+}
+
+
+void ArraySingleArgumentConstructorStub::InitializeInterfaceDescriptor(
+ CodeStubInterfaceDescriptor* descriptor) {
+ InitializeArrayConstructorDescriptor(isolate(), descriptor, 1);
+}
+
+
+void ArrayNArgumentsConstructorStub::InitializeInterfaceDescriptor(
+ CodeStubInterfaceDescriptor* descriptor) {
+ InitializeArrayConstructorDescriptor(isolate(), descriptor, -1);
+}
+
+
+void InternalArrayNoArgumentConstructorStub::InitializeInterfaceDescriptor(
+ CodeStubInterfaceDescriptor* descriptor) {
+ InitializeInternalArrayConstructorDescriptor(descriptor, 0);
+}
+
+
+void InternalArraySingleArgumentConstructorStub::InitializeInterfaceDescriptor(
+ CodeStubInterfaceDescriptor* descriptor) {
+ InitializeInternalArrayConstructorDescriptor(descriptor, 1);
+}
+
+
+void InternalArrayNArgumentsConstructorStub::InitializeInterfaceDescriptor(
+ CodeStubInterfaceDescriptor* descriptor) {
+ InitializeInternalArrayConstructorDescriptor(descriptor, -1);
+}
+
+
+void CompareNilICStub::InitializeInterfaceDescriptor(
+ CodeStubInterfaceDescriptor* descriptor) {
+ static Register registers[] = { eax };
+ descriptor->register_param_count_ = 1;
+ descriptor->register_params_ = registers;
+ descriptor->deoptimization_handler_ =
+ FUNCTION_ADDR(CompareNilIC_Miss);
+ descriptor->SetMissHandler(
+ ExternalReference(IC_Utility(IC::kCompareNilIC_Miss), isolate()));
+}
+
+void ToBooleanStub::InitializeInterfaceDescriptor(
+ CodeStubInterfaceDescriptor* descriptor) {
+ static Register registers[] = { eax };
+ descriptor->register_param_count_ = 1;
+ descriptor->register_params_ = registers;
+ descriptor->deoptimization_handler_ =
+ FUNCTION_ADDR(ToBooleanIC_Miss);
+ descriptor->SetMissHandler(
+ ExternalReference(IC_Utility(IC::kToBooleanIC_Miss), isolate()));
+}
+
+
+void StoreGlobalStub::InitializeInterfaceDescriptor(
+ CodeStubInterfaceDescriptor* descriptor) {
+ static Register registers[] = { edx, ecx, eax };
+ descriptor->register_param_count_ = 3;
+ descriptor->register_params_ = registers;
+ descriptor->deoptimization_handler_ =
+ FUNCTION_ADDR(StoreIC_MissFromStubFailure);
+}
+
+
+void ElementsTransitionAndStoreStub::InitializeInterfaceDescriptor(
+ CodeStubInterfaceDescriptor* descriptor) {
+ static Register registers[] = { eax, ebx, ecx, edx };
+ descriptor->register_param_count_ = 4;
+ descriptor->register_params_ = registers;
+ descriptor->deoptimization_handler_ =
+ FUNCTION_ADDR(ElementsTransitionAndStoreIC_Miss);
+}
+
+
+void BinaryOpICStub::InitializeInterfaceDescriptor(
+ CodeStubInterfaceDescriptor* descriptor) {
+ static Register registers[] = { edx, eax };
+ descriptor->register_param_count_ = 2;
+ descriptor->register_params_ = registers;
+ descriptor->deoptimization_handler_ = FUNCTION_ADDR(BinaryOpIC_Miss);
+ descriptor->SetMissHandler(
+ ExternalReference(IC_Utility(IC::kBinaryOpIC_Miss), isolate()));
+}
+
+
+void BinaryOpWithAllocationSiteStub::InitializeInterfaceDescriptor(
+ CodeStubInterfaceDescriptor* descriptor) {
+ static Register registers[] = { ecx, edx, eax };
+ descriptor->register_param_count_ = 3;
+ descriptor->register_params_ = registers;
+ descriptor->deoptimization_handler_ =
+ FUNCTION_ADDR(BinaryOpIC_MissWithAllocationSite);
+}
+
+
+void StringAddStub::InitializeInterfaceDescriptor(
+ CodeStubInterfaceDescriptor* descriptor) {
+ static Register registers[] = { edx, eax };
+ descriptor->register_param_count_ = 2;
+ descriptor->register_params_ = registers;
+ descriptor->deoptimization_handler_ =
+ Runtime::FunctionForId(Runtime::kHiddenStringAdd)->entry;
+}
+
+
+void CallDescriptors::InitializeForIsolate(Isolate* isolate) {
+ {
+ CallInterfaceDescriptor* descriptor =
+ isolate->call_descriptor(Isolate::ArgumentAdaptorCall);
+ static Register registers[] = { edi, // JSFunction
+ esi, // context
+ eax, // actual number of arguments
+ ebx, // expected number of arguments
+ };
+ static Representation representations[] = {
+ Representation::Tagged(), // JSFunction
+ Representation::Tagged(), // context
+ Representation::Integer32(), // actual number of arguments
+ Representation::Integer32(), // expected number of arguments
+ };
+ descriptor->register_param_count_ = 4;
+ descriptor->register_params_ = registers;
+ descriptor->param_representations_ = representations;
+ }
+ {
+ CallInterfaceDescriptor* descriptor =
+ isolate->call_descriptor(Isolate::KeyedCall);
+ static Register registers[] = { esi, // context
+ ecx, // key
+ };
+ static Representation representations[] = {
+ Representation::Tagged(), // context
+ Representation::Tagged(), // key
+ };
+ descriptor->register_param_count_ = 2;
+ descriptor->register_params_ = registers;
+ descriptor->param_representations_ = representations;
+ }
+ {
+ CallInterfaceDescriptor* descriptor =
+ isolate->call_descriptor(Isolate::NamedCall);
+ static Register registers[] = { esi, // context
+ ecx, // name
+ };
+ static Representation representations[] = {
+ Representation::Tagged(), // context
+ Representation::Tagged(), // name
+ };
+ descriptor->register_param_count_ = 2;
+ descriptor->register_params_ = registers;
+ descriptor->param_representations_ = representations;
+ }
+ {
+ CallInterfaceDescriptor* descriptor =
+ isolate->call_descriptor(Isolate::CallHandler);
+ static Register registers[] = { esi, // context
+ edx, // receiver
+ };
+ static Representation representations[] = {
+ Representation::Tagged(), // context
+ Representation::Tagged(), // receiver
+ };
+ descriptor->register_param_count_ = 2;
+ descriptor->register_params_ = registers;
+ descriptor->param_representations_ = representations;
+ }
+ {
+ CallInterfaceDescriptor* descriptor =
+ isolate->call_descriptor(Isolate::ApiFunctionCall);
+ static Register registers[] = { eax, // callee
+ ebx, // call_data
+ ecx, // holder
+ edx, // api_function_address
+ esi, // context
+ };
+ static Representation representations[] = {
+ Representation::Tagged(), // callee
+ Representation::Tagged(), // call_data
+ Representation::Tagged(), // holder
+ Representation::External(), // api_function_address
+ Representation::Tagged(), // context
+ };
+ descriptor->register_param_count_ = 5;
+ descriptor->register_params_ = registers;
+ descriptor->param_representations_ = representations;
+ }
+}
+
+
+#define __ ACCESS_MASM(masm)
+
+
+void HydrogenCodeStub::GenerateLightweightMiss(MacroAssembler* masm) {
+ // Update the static counter each time a new code stub is generated.
+ isolate()->counters()->code_stubs()->Increment();
+
+ CodeStubInterfaceDescriptor* descriptor = GetInterfaceDescriptor();
+ int param_count = descriptor->register_param_count_;
+ {
+ // Call the runtime system in a fresh internal frame.
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ ASSERT(descriptor->register_param_count_ == 0 ||
+ eax.is(descriptor->register_params_[param_count - 1]));
+ // Push arguments
+ for (int i = 0; i < param_count; ++i) {
+ __ push(descriptor->register_params_[i]);
+ }
+ ExternalReference miss = descriptor->miss_handler();
+ __ CallExternalReference(miss, descriptor->register_param_count_);
+ }
+
+ __ ret(0);
+}
+
+
+void StoreBufferOverflowStub::Generate(MacroAssembler* masm) {
+ // We don't allow a GC during a store buffer overflow so there is no need to
+ // store the registers in any particular way, but we do have to store and
+ // restore them.
+ __ pushad();
+ const int argument_count = 1;
+
+ AllowExternalCallThatCantCauseGC scope(masm);
+ __ PrepareCallCFunction(argument_count, ecx);
+ __ mov(Operand(esp, 0 * kPointerSize),
+ Immediate(ExternalReference::isolate_address(isolate())));
+ __ CallCFunction(
+ ExternalReference::store_buffer_overflow_function(isolate()),
+ argument_count);
+ __ popad();
+ __ ret(0);
+}
+
+
+class FloatingPointHelper : public AllStatic {
+ public:
+ enum ArgLocation {
+ ARGS_ON_STACK,
+ ARGS_IN_REGISTERS
+ };
+
+ // Code pattern for loading a floating point value. Input value must
+ // be either a smi or a heap number object (fp value). Requirements:
+ // operand in register number. Returns operand as floating point number
+ // on FPU stack.
+ static void LoadFloatOperand(MacroAssembler* masm, Register number);
+
+ // Test if operands are smi or number objects (fp). Requirements:
+ // operand_1 in eax, operand_2 in edx; falls through on float
+ // operands, jumps to the non_float label otherwise.
+ static void CheckFloatOperands(MacroAssembler* masm,
+ Label* non_float,
+ Register scratch);
+};
+
+
+void DoubleToIStub::Generate(MacroAssembler* masm) {
+ Register input_reg = this->source();
+ Register final_result_reg = this->destination();
+ ASSERT(is_truncating());
+
+ Label check_negative, process_64_bits, done, done_no_stash;
+
+ int double_offset = offset();
+
+ // Account for return address and saved regs if input is esp.
+ if (input_reg.is(esp)) double_offset += 3 * kPointerSize;
+
+ MemOperand mantissa_operand(MemOperand(input_reg, double_offset));
+ MemOperand exponent_operand(MemOperand(input_reg,
+ double_offset + kDoubleSize / 2));
+
+ Register scratch1;
+ {
+ Register scratch_candidates[3] = { ebx, edx, edi };
+ for (int i = 0; i < 3; i++) {
+ scratch1 = scratch_candidates[i];
+ if (!final_result_reg.is(scratch1) && !input_reg.is(scratch1)) break;
+ }
+ }
+ // Since we must use ecx for shifts below, use some other register (eax)
+ // to calculate the result if ecx is the requested return register.
+ Register result_reg = final_result_reg.is(ecx) ? eax : final_result_reg;
+ // Save ecx if it isn't the return register and therefore volatile, or if it
+ // is the return register, then save the temp register we use in its stead for
+ // the result.
+ Register save_reg = final_result_reg.is(ecx) ? eax : ecx;
+ __ push(scratch1);
+ __ push(save_reg);
+
+ bool stash_exponent_copy = !input_reg.is(esp);
+ __ mov(scratch1, mantissa_operand);
+ __ mov(ecx, exponent_operand);
+ if (stash_exponent_copy) __ push(ecx);
+
+ __ and_(ecx, HeapNumber::kExponentMask);
+ __ shr(ecx, HeapNumber::kExponentShift);
+ __ lea(result_reg, MemOperand(ecx, -HeapNumber::kExponentBias));
+ __ cmp(result_reg, Immediate(HeapNumber::kMantissaBits));
+ __ j(below, &process_64_bits);
+
+ // Result is entirely in lower 32-bits of mantissa
+ int delta = HeapNumber::kExponentBias + Double::kPhysicalSignificandSize;
+ __ sub(ecx, Immediate(delta));
+ __ xor_(result_reg, result_reg);
+ __ cmp(ecx, Immediate(31));
+ __ j(above, &done);
+ __ shl_cl(scratch1);
+ __ jmp(&check_negative);
+
+ __ bind(&process_64_bits);
+ // Result must be extracted from shifted 32-bit mantissa
+ __ sub(ecx, Immediate(delta));
+ __ neg(ecx);
+ if (stash_exponent_copy) {
+ __ mov(result_reg, MemOperand(esp, 0));
+ } else {
+ __ mov(result_reg, exponent_operand);
+ }
+ __ and_(result_reg,
+ Immediate(static_cast<uint32_t>(Double::kSignificandMask >> 32)));
+ __ add(result_reg,
+ Immediate(static_cast<uint32_t>(Double::kHiddenBit >> 32)));
+ __ shrd(result_reg, scratch1);
+ __ shr_cl(result_reg);
+ __ test(ecx, Immediate(32));
+ {
+ Label skip_mov;
+ __ j(equal, &skip_mov, Label::kNear);
+ __ mov(scratch1, result_reg);
+ __ bind(&skip_mov);
+ }
+
+ // If the double was negative, negate the integer result.
+ __ bind(&check_negative);
+ __ mov(result_reg, scratch1);
+ __ neg(result_reg);
+ if (stash_exponent_copy) {
+ __ cmp(MemOperand(esp, 0), Immediate(0));
+ } else {
+ __ cmp(exponent_operand, Immediate(0));
+ }
+ {
+ Label skip_mov;
+ __ j(less_equal, &skip_mov, Label::kNear);
+ __ mov(result_reg, scratch1);
+ __ bind(&skip_mov);
+ }
+
+ // Restore registers
+ __ bind(&done);
+ if (stash_exponent_copy) {
+ __ add(esp, Immediate(kDoubleSize / 2));
+ }
+ __ bind(&done_no_stash);
+ if (!final_result_reg.is(result_reg)) {
+ ASSERT(final_result_reg.is(ecx));
+ __ mov(final_result_reg, result_reg);
+ }
+ __ pop(save_reg);
+ __ pop(scratch1);
+ __ ret(0);
+}
+
+
+void FloatingPointHelper::LoadFloatOperand(MacroAssembler* masm,
+ Register number) {
+ Label load_smi, done;
+
+ __ JumpIfSmi(number, &load_smi, Label::kNear);
+ __ fld_d(FieldOperand(number, HeapNumber::kValueOffset));
+ __ jmp(&done, Label::kNear);
+
+ __ bind(&load_smi);
+ __ SmiUntag(number);
+ __ push(number);
+ __ fild_s(Operand(esp, 0));
+ __ pop(number);
+
+ __ bind(&done);
+}
+
+
+void FloatingPointHelper::CheckFloatOperands(MacroAssembler* masm,
+ Label* non_float,
+ Register scratch) {
+ Label test_other, done;
+ // Test if both operands are floats or smi -> scratch=k_is_float;
+ // Otherwise scratch = k_not_float.
+ __ JumpIfSmi(edx, &test_other, Label::kNear);
+ __ mov(scratch, FieldOperand(edx, HeapObject::kMapOffset));
+ Factory* factory = masm->isolate()->factory();
+ __ cmp(scratch, factory->heap_number_map());
+ __ j(not_equal, non_float); // argument in edx is not a number -> NaN
+
+ __ bind(&test_other);
+ __ JumpIfSmi(eax, &done, Label::kNear);
+ __ mov(scratch, FieldOperand(eax, HeapObject::kMapOffset));
+ __ cmp(scratch, factory->heap_number_map());
+ __ j(not_equal, non_float); // argument in eax is not a number -> NaN
+
+ // Fall-through: Both operands are numbers.
+ __ bind(&done);
+}
+
+
+void MathPowStub::Generate(MacroAssembler* masm) {
+ // No SSE2 support
+ UNREACHABLE();
+}
+
+
+void FunctionPrototypeStub::Generate(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- ecx : name
+ // -- edx : receiver
+ // -- esp[0] : return address
+ // -----------------------------------
+ Label miss;
+
+ if (kind() == Code::KEYED_LOAD_IC) {
+ __ cmp(ecx, Immediate(isolate()->factory()->prototype_string()));
+ __ j(not_equal, &miss);
+ }
+
+ StubCompiler::GenerateLoadFunctionPrototype(masm, edx, eax, ebx, &miss);
+ __ bind(&miss);
+ StubCompiler::TailCallBuiltin(
+ masm, BaseLoadStoreStubCompiler::MissBuiltin(kind()));
+}
+
+
+void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
+ // The key is in edx and the parameter count is in eax.
+
+ // The displacement is used for skipping the frame pointer on the
+ // stack. It is the offset of the last parameter (if any) relative
+ // to the frame pointer.
+ static const int kDisplacement = 1 * kPointerSize;
+
+ // Check that the key is a smi.
+ Label slow;
+ __ JumpIfNotSmi(edx, &slow, Label::kNear);
+
+ // Check if the calling frame is an arguments adaptor frame.
+ Label adaptor;
+ __ mov(ebx, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
+ __ mov(ecx, Operand(ebx, StandardFrameConstants::kContextOffset));
+ __ cmp(ecx, Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+ __ j(equal, &adaptor, Label::kNear);
+
+ // Check index against formal parameters count limit passed in
+ // through register eax. Use unsigned comparison to get negative
+ // check for free.
+ __ cmp(edx, eax);
+ __ j(above_equal, &slow, Label::kNear);
+
+ // Read the argument from the stack and return it.
+ STATIC_ASSERT(kSmiTagSize == 1);
+ STATIC_ASSERT(kSmiTag == 0); // Shifting code depends on these.
+ __ lea(ebx, Operand(ebp, eax, times_2, 0));
+ __ neg(edx);
+ __ mov(eax, Operand(ebx, edx, times_2, kDisplacement));
+ __ ret(0);
+
+ // Arguments adaptor case: Check index against actual arguments
+ // limit found in the arguments adaptor frame. Use unsigned
+ // comparison to get negative check for free.
+ __ bind(&adaptor);
+ __ mov(ecx, Operand(ebx, ArgumentsAdaptorFrameConstants::kLengthOffset));
+ __ cmp(edx, ecx);
+ __ j(above_equal, &slow, Label::kNear);
+
+ // Read the argument from the stack and return it.
+ STATIC_ASSERT(kSmiTagSize == 1);
+ STATIC_ASSERT(kSmiTag == 0); // Shifting code depends on these.
+ __ lea(ebx, Operand(ebx, ecx, times_2, 0));
+ __ neg(edx);
+ __ mov(eax, Operand(ebx, edx, times_2, kDisplacement));
+ __ ret(0);
+
+ // Slow-case: Handle non-smi or out-of-bounds access to arguments
+ // by calling the runtime system.
+ __ bind(&slow);
+ __ pop(ebx); // Return address.
+ __ push(edx);
+ __ push(ebx);
+ __ TailCallRuntime(Runtime::kGetArgumentsProperty, 1, 1);
+}
+
+
+void ArgumentsAccessStub::GenerateNewSloppySlow(MacroAssembler* masm) {
+ // esp[0] : return address
+ // esp[4] : number of parameters
+ // esp[8] : receiver displacement
+ // esp[12] : function
+
+ // Check if the calling frame is an arguments adaptor frame.
+ Label runtime;
+ __ mov(edx, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
+ __ mov(ecx, Operand(edx, StandardFrameConstants::kContextOffset));
+ __ cmp(ecx, Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+ __ j(not_equal, &runtime, Label::kNear);
+
+ // Patch the arguments.length and the parameters pointer.
+ __ mov(ecx, Operand(edx, ArgumentsAdaptorFrameConstants::kLengthOffset));
+ __ mov(Operand(esp, 1 * kPointerSize), ecx);
+ __ lea(edx, Operand(edx, ecx, times_2,
+ StandardFrameConstants::kCallerSPOffset));
+ __ mov(Operand(esp, 2 * kPointerSize), edx);
+
+ __ bind(&runtime);
+ __ TailCallRuntime(Runtime::kHiddenNewSloppyArguments, 3, 1);
+}
+
+
+void ArgumentsAccessStub::GenerateNewSloppyFast(MacroAssembler* masm) {
+ // esp[0] : return address
+ // esp[4] : number of parameters (tagged)
+ // esp[8] : receiver displacement
+ // esp[12] : function
+
+ // ebx = parameter count (tagged)
+ __ mov(ebx, Operand(esp, 1 * kPointerSize));
+
+ // Check if the calling frame is an arguments adaptor frame.
+ // TODO(rossberg): Factor out some of the bits that are shared with the other
+ // Generate* functions.
+ Label runtime;
+ Label adaptor_frame, try_allocate;
+ __ mov(edx, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
+ __ mov(ecx, Operand(edx, StandardFrameConstants::kContextOffset));
+ __ cmp(ecx, Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+ __ j(equal, &adaptor_frame, Label::kNear);
+
+ // No adaptor, parameter count = argument count.
+ __ mov(ecx, ebx);
+ __ jmp(&try_allocate, Label::kNear);
+
+ // We have an adaptor frame. Patch the parameters pointer.
+ __ bind(&adaptor_frame);
+ __ mov(ecx, Operand(edx, ArgumentsAdaptorFrameConstants::kLengthOffset));
+ __ lea(edx, Operand(edx, ecx, times_2,
+ StandardFrameConstants::kCallerSPOffset));
+ __ mov(Operand(esp, 2 * kPointerSize), edx);
+
+ // ebx = parameter count (tagged)
+ // ecx = argument count (tagged)
+ // esp[4] = parameter count (tagged)
+ // esp[8] = address of receiver argument
+ // Compute the mapped parameter count = min(ebx, ecx) in ebx.
+ __ cmp(ebx, ecx);
+ __ j(less_equal, &try_allocate, Label::kNear);
+ __ mov(ebx, ecx);
+
+ __ bind(&try_allocate);
+
+ // Save mapped parameter count.
+ __ push(ebx);
+
+ // Compute the sizes of backing store, parameter map, and arguments object.
+ // 1. Parameter map, has 2 extra words containing context and backing store.
+ const int kParameterMapHeaderSize =
+ FixedArray::kHeaderSize + 2 * kPointerSize;
+ Label no_parameter_map;
+ __ test(ebx, ebx);
+ __ j(zero, &no_parameter_map, Label::kNear);
+ __ lea(ebx, Operand(ebx, times_2, kParameterMapHeaderSize));
+ __ bind(&no_parameter_map);
+
+ // 2. Backing store.
+ __ lea(ebx, Operand(ebx, ecx, times_2, FixedArray::kHeaderSize));
+
+ // 3. Arguments object.
+ __ add(ebx, Immediate(Heap::kSloppyArgumentsObjectSize));
+
+ // Do the allocation of all three objects in one go.
+ __ Allocate(ebx, eax, edx, edi, &runtime, TAG_OBJECT);
+
+ // eax = address of new object(s) (tagged)
+ // ecx = argument count (tagged)
+ // esp[0] = mapped parameter count (tagged)
+ // esp[8] = parameter count (tagged)
+ // esp[12] = address of receiver argument
+ // Get the arguments boilerplate from the current native context into edi.
+ Label has_mapped_parameters, copy;
+ __ mov(edi, Operand(esi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
+ __ mov(edi, FieldOperand(edi, GlobalObject::kNativeContextOffset));
+ __ mov(ebx, Operand(esp, 0 * kPointerSize));
+ __ test(ebx, ebx);
+ __ j(not_zero, &has_mapped_parameters, Label::kNear);
+ __ mov(edi, Operand(edi,
+ Context::SlotOffset(Context::SLOPPY_ARGUMENTS_BOILERPLATE_INDEX)));
+ __ jmp(&copy, Label::kNear);
+
+ __ bind(&has_mapped_parameters);
+ __ mov(edi, Operand(edi,
+ Context::SlotOffset(Context::ALIASED_ARGUMENTS_BOILERPLATE_INDEX)));
+ __ bind(&copy);
+
+ // eax = address of new object (tagged)
+ // ebx = mapped parameter count (tagged)
+ // ecx = argument count (tagged)
+ // edi = address of boilerplate object (tagged)
+ // esp[0] = mapped parameter count (tagged)
+ // esp[8] = parameter count (tagged)
+ // esp[12] = address of receiver argument
+ // Copy the JS object part.
+ for (int i = 0; i < JSObject::kHeaderSize; i += kPointerSize) {
+ __ mov(edx, FieldOperand(edi, i));
+ __ mov(FieldOperand(eax, i), edx);
+ }
+
+ // Set up the callee in-object property.
+ STATIC_ASSERT(Heap::kArgumentsCalleeIndex == 1);
+ __ mov(edx, Operand(esp, 4 * kPointerSize));
+ __ mov(FieldOperand(eax, JSObject::kHeaderSize +
+ Heap::kArgumentsCalleeIndex * kPointerSize),
+ edx);
+
+ // Use the length (smi tagged) and set that as an in-object property too.
+ STATIC_ASSERT(Heap::kArgumentsLengthIndex == 0);
+ __ mov(FieldOperand(eax, JSObject::kHeaderSize +
+ Heap::kArgumentsLengthIndex * kPointerSize),
+ ecx);
+
+ // Set up the elements pointer in the allocated arguments object.
+ // If we allocated a parameter map, edi will point there, otherwise to the
+ // backing store.
+ __ lea(edi, Operand(eax, Heap::kSloppyArgumentsObjectSize));
+ __ mov(FieldOperand(eax, JSObject::kElementsOffset), edi);
+
+ // eax = address of new object (tagged)
+ // ebx = mapped parameter count (tagged)
+ // ecx = argument count (tagged)
+ // edi = address of parameter map or backing store (tagged)
+ // esp[0] = mapped parameter count (tagged)
+ // esp[8] = parameter count (tagged)
+ // esp[12] = address of receiver argument
+ // Free a register.
+ __ push(eax);
+
+ // Initialize parameter map. If there are no mapped arguments, we're done.
+ Label skip_parameter_map;
+ __ test(ebx, ebx);
+ __ j(zero, &skip_parameter_map);
+
+ __ mov(FieldOperand(edi, FixedArray::kMapOffset),
+ Immediate(isolate()->factory()->sloppy_arguments_elements_map()));
+ __ lea(eax, Operand(ebx, reinterpret_cast<intptr_t>(Smi::FromInt(2))));
+ __ mov(FieldOperand(edi, FixedArray::kLengthOffset), eax);
+ __ mov(FieldOperand(edi, FixedArray::kHeaderSize + 0 * kPointerSize), esi);
+ __ lea(eax, Operand(edi, ebx, times_2, kParameterMapHeaderSize));
+ __ mov(FieldOperand(edi, FixedArray::kHeaderSize + 1 * kPointerSize), eax);
+
+ // Copy the parameter slots and the holes in the arguments.
+ // We need to fill in mapped_parameter_count slots. They index the context,
+ // where parameters are stored in reverse order, at
+ // MIN_CONTEXT_SLOTS .. MIN_CONTEXT_SLOTS+parameter_count-1
+ // The mapped parameter thus need to get indices
+ // MIN_CONTEXT_SLOTS+parameter_count-1 ..
+ // MIN_CONTEXT_SLOTS+parameter_count-mapped_parameter_count
+ // We loop from right to left.
+ Label parameters_loop, parameters_test;
+ __ push(ecx);
+ __ mov(eax, Operand(esp, 2 * kPointerSize));
+ __ mov(ebx, Immediate(Smi::FromInt(Context::MIN_CONTEXT_SLOTS)));
+ __ add(ebx, Operand(esp, 4 * kPointerSize));
+ __ sub(ebx, eax);
+ __ mov(ecx, isolate()->factory()->the_hole_value());
+ __ mov(edx, edi);
+ __ lea(edi, Operand(edi, eax, times_2, kParameterMapHeaderSize));
+ // eax = loop variable (tagged)
+ // ebx = mapping index (tagged)
+ // ecx = the hole value
+ // edx = address of parameter map (tagged)
+ // edi = address of backing store (tagged)
+ // esp[0] = argument count (tagged)
+ // esp[4] = address of new object (tagged)
+ // esp[8] = mapped parameter count (tagged)
+ // esp[16] = parameter count (tagged)
+ // esp[20] = address of receiver argument
+ __ jmp(&parameters_test, Label::kNear);
+
+ __ bind(&parameters_loop);
+ __ sub(eax, Immediate(Smi::FromInt(1)));
+ __ mov(FieldOperand(edx, eax, times_2, kParameterMapHeaderSize), ebx);
+ __ mov(FieldOperand(edi, eax, times_2, FixedArray::kHeaderSize), ecx);
+ __ add(ebx, Immediate(Smi::FromInt(1)));
+ __ bind(&parameters_test);
+ __ test(eax, eax);
+ __ j(not_zero, &parameters_loop, Label::kNear);
+ __ pop(ecx);
+
+ __ bind(&skip_parameter_map);
+
+ // ecx = argument count (tagged)
+ // edi = address of backing store (tagged)
+ // esp[0] = address of new object (tagged)
+ // esp[4] = mapped parameter count (tagged)
+ // esp[12] = parameter count (tagged)
+ // esp[16] = address of receiver argument
+ // Copy arguments header and remaining slots (if there are any).
+ __ mov(FieldOperand(edi, FixedArray::kMapOffset),
+ Immediate(isolate()->factory()->fixed_array_map()));
+ __ mov(FieldOperand(edi, FixedArray::kLengthOffset), ecx);
+
+ Label arguments_loop, arguments_test;
+ __ mov(ebx, Operand(esp, 1 * kPointerSize));
+ __ mov(edx, Operand(esp, 4 * kPointerSize));
+ __ sub(edx, ebx); // Is there a smarter way to do negative scaling?
+ __ sub(edx, ebx);
+ __ jmp(&arguments_test, Label::kNear);
+
+ __ bind(&arguments_loop);
+ __ sub(edx, Immediate(kPointerSize));
+ __ mov(eax, Operand(edx, 0));
+ __ mov(FieldOperand(edi, ebx, times_2, FixedArray::kHeaderSize), eax);
+ __ add(ebx, Immediate(Smi::FromInt(1)));
+
+ __ bind(&arguments_test);
+ __ cmp(ebx, ecx);
+ __ j(less, &arguments_loop, Label::kNear);
+
+ // Restore.
+ __ pop(eax); // Address of arguments object.
+ __ pop(ebx); // Parameter count.
+
+ // Return and remove the on-stack parameters.
+ __ ret(3 * kPointerSize);
+
+ // Do the runtime call to allocate the arguments object.
+ __ bind(&runtime);
+ __ pop(eax); // Remove saved parameter count.
+ __ mov(Operand(esp, 1 * kPointerSize), ecx); // Patch argument count.
+ __ TailCallRuntime(Runtime::kHiddenNewSloppyArguments, 3, 1);
+}
+
+
+void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
+ // esp[0] : return address
+ // esp[4] : number of parameters
+ // esp[8] : receiver displacement
+ // esp[12] : function
+
+ // Check if the calling frame is an arguments adaptor frame.
+ Label adaptor_frame, try_allocate, runtime;
+ __ mov(edx, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
+ __ mov(ecx, Operand(edx, StandardFrameConstants::kContextOffset));
+ __ cmp(ecx, Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+ __ j(equal, &adaptor_frame, Label::kNear);
+
+ // Get the length from the frame.
+ __ mov(ecx, Operand(esp, 1 * kPointerSize));
+ __ jmp(&try_allocate, Label::kNear);
+
+ // Patch the arguments.length and the parameters pointer.
+ __ bind(&adaptor_frame);
+ __ mov(ecx, Operand(edx, ArgumentsAdaptorFrameConstants::kLengthOffset));
+ __ mov(Operand(esp, 1 * kPointerSize), ecx);
+ __ lea(edx, Operand(edx, ecx, times_2,
+ StandardFrameConstants::kCallerSPOffset));
+ __ mov(Operand(esp, 2 * kPointerSize), edx);
+
+ // Try the new space allocation. Start out with computing the size of
+ // the arguments object and the elements array.
+ Label add_arguments_object;
+ __ bind(&try_allocate);
+ __ test(ecx, ecx);
+ __ j(zero, &add_arguments_object, Label::kNear);
+ __ lea(ecx, Operand(ecx, times_2, FixedArray::kHeaderSize));
+ __ bind(&add_arguments_object);
+ __ add(ecx, Immediate(Heap::kStrictArgumentsObjectSize));
+
+ // Do the allocation of both objects in one go.
+ __ Allocate(ecx, eax, edx, ebx, &runtime, TAG_OBJECT);
+
+ // Get the arguments boilerplate from the current native context.
+ __ mov(edi, Operand(esi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
+ __ mov(edi, FieldOperand(edi, GlobalObject::kNativeContextOffset));
+ const int offset =
+ Context::SlotOffset(Context::STRICT_ARGUMENTS_BOILERPLATE_INDEX);
+ __ mov(edi, Operand(edi, offset));
+
+ // Copy the JS object part.
+ for (int i = 0; i < JSObject::kHeaderSize; i += kPointerSize) {
+ __ mov(ebx, FieldOperand(edi, i));
+ __ mov(FieldOperand(eax, i), ebx);
+ }
+
+ // Get the length (smi tagged) and set that as an in-object property too.
+ STATIC_ASSERT(Heap::kArgumentsLengthIndex == 0);
+ __ mov(ecx, Operand(esp, 1 * kPointerSize));
+ __ mov(FieldOperand(eax, JSObject::kHeaderSize +
+ Heap::kArgumentsLengthIndex * kPointerSize),
+ ecx);
+
+ // If there are no actual arguments, we're done.
+ Label done;
+ __ test(ecx, ecx);
+ __ j(zero, &done, Label::kNear);
+
+ // Get the parameters pointer from the stack.
+ __ mov(edx, Operand(esp, 2 * kPointerSize));
+
+ // Set up the elements pointer in the allocated arguments object and
+ // initialize the header in the elements fixed array.
+ __ lea(edi, Operand(eax, Heap::kStrictArgumentsObjectSize));
+ __ mov(FieldOperand(eax, JSObject::kElementsOffset), edi);
+ __ mov(FieldOperand(edi, FixedArray::kMapOffset),
+ Immediate(isolate()->factory()->fixed_array_map()));
+
+ __ mov(FieldOperand(edi, FixedArray::kLengthOffset), ecx);
+ // Untag the length for the loop below.
+ __ SmiUntag(ecx);
+
+ // Copy the fixed array slots.
+ Label loop;
+ __ bind(&loop);
+ __ mov(ebx, Operand(edx, -1 * kPointerSize)); // Skip receiver.
+ __ mov(FieldOperand(edi, FixedArray::kHeaderSize), ebx);
+ __ add(edi, Immediate(kPointerSize));
+ __ sub(edx, Immediate(kPointerSize));
+ __ dec(ecx);
+ __ j(not_zero, &loop);
+
+ // Return and remove the on-stack parameters.
+ __ bind(&done);
+ __ ret(3 * kPointerSize);
+
+ // Do the runtime call to allocate the arguments object.
+ __ bind(&runtime);
+ __ TailCallRuntime(Runtime::kHiddenNewStrictArguments, 3, 1);
+}
+
+
+void RegExpExecStub::Generate(MacroAssembler* masm) {
+ // Just jump directly to runtime if native RegExp is not selected at compile
+ // time or if regexp entry in generated code is turned off runtime switch or
+ // at compilation.
+#ifdef V8_INTERPRETED_REGEXP
+ __ TailCallRuntime(Runtime::kHiddenRegExpExec, 4, 1);
+#else // V8_INTERPRETED_REGEXP
+
+ // Stack frame on entry.
+ // esp[0]: return address
+ // esp[4]: last_match_info (expected JSArray)
+ // esp[8]: previous index
+ // esp[12]: subject string
+ // esp[16]: JSRegExp object
+
+ static const int kLastMatchInfoOffset = 1 * kPointerSize;
+ static const int kPreviousIndexOffset = 2 * kPointerSize;
+ static const int kSubjectOffset = 3 * kPointerSize;
+ static const int kJSRegExpOffset = 4 * kPointerSize;
+
+ Label runtime;
+ Factory* factory = isolate()->factory();
+
+ // Ensure that a RegExp stack is allocated.
+ ExternalReference address_of_regexp_stack_memory_address =
+ ExternalReference::address_of_regexp_stack_memory_address(isolate());
+ ExternalReference address_of_regexp_stack_memory_size =
+ ExternalReference::address_of_regexp_stack_memory_size(isolate());
+ __ mov(ebx, Operand::StaticVariable(address_of_regexp_stack_memory_size));
+ __ test(ebx, ebx);
+ __ j(zero, &runtime);
+
+ // Check that the first argument is a JSRegExp object.
+ __ mov(eax, Operand(esp, kJSRegExpOffset));
+ STATIC_ASSERT(kSmiTag == 0);
+ __ JumpIfSmi(eax, &runtime);
+ __ CmpObjectType(eax, JS_REGEXP_TYPE, ecx);
+ __ j(not_equal, &runtime);
+
+ // Check that the RegExp has been compiled (data contains a fixed array).
+ __ mov(ecx, FieldOperand(eax, JSRegExp::kDataOffset));
+ if (FLAG_debug_code) {
+ __ test(ecx, Immediate(kSmiTagMask));
+ __ Check(not_zero, kUnexpectedTypeForRegExpDataFixedArrayExpected);
+ __ CmpObjectType(ecx, FIXED_ARRAY_TYPE, ebx);
+ __ Check(equal, kUnexpectedTypeForRegExpDataFixedArrayExpected);
+ }
+
+ // ecx: RegExp data (FixedArray)
+ // Check the type of the RegExp. Only continue if type is JSRegExp::IRREGEXP.
+ __ mov(ebx, FieldOperand(ecx, JSRegExp::kDataTagOffset));
+ __ cmp(ebx, Immediate(Smi::FromInt(JSRegExp::IRREGEXP)));
+ __ j(not_equal, &runtime);
+
+ // ecx: RegExp data (FixedArray)
+ // Check that the number of captures fit in the static offsets vector buffer.
+ __ mov(edx, FieldOperand(ecx, JSRegExp::kIrregexpCaptureCountOffset));
+ // Check (number_of_captures + 1) * 2 <= offsets vector size
+ // Or number_of_captures * 2 <= offsets vector size - 2
+ // Multiplying by 2 comes for free since edx is smi-tagged.
+ STATIC_ASSERT(kSmiTag == 0);
+ STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1);
+ STATIC_ASSERT(Isolate::kJSRegexpStaticOffsetsVectorSize >= 2);
+ __ cmp(edx, Isolate::kJSRegexpStaticOffsetsVectorSize - 2);
+ __ j(above, &runtime);
+
+ // Reset offset for possibly sliced string.
+ __ Move(edi, Immediate(0));
+ __ mov(eax, Operand(esp, kSubjectOffset));
+ __ JumpIfSmi(eax, &runtime);
+ __ mov(edx, eax); // Make a copy of the original subject string.
+ __ mov(ebx, FieldOperand(eax, HeapObject::kMapOffset));
+ __ movzx_b(ebx, FieldOperand(ebx, Map::kInstanceTypeOffset));
+
+ // eax: subject string
+ // edx: subject string
+ // ebx: subject string instance type
+ // ecx: RegExp data (FixedArray)
+ // Handle subject string according to its encoding and representation:
+ // (1) Sequential two byte? If yes, go to (9).
+ // (2) Sequential one byte? If yes, go to (6).
+ // (3) Anything but sequential or cons? If yes, go to (7).
+ // (4) Cons string. If the string is flat, replace subject with first string.
+ // Otherwise bailout.
+ // (5a) Is subject sequential two byte? If yes, go to (9).
+ // (5b) Is subject external? If yes, go to (8).
+ // (6) One byte sequential. Load regexp code for one byte.
+ // (E) Carry on.
+ /// [...]
+
+ // Deferred code at the end of the stub:
+ // (7) Not a long external string? If yes, go to (10).
+ // (8) External string. Make it, offset-wise, look like a sequential string.
+ // (8a) Is the external string one byte? If yes, go to (6).
+ // (9) Two byte sequential. Load regexp code for one byte. Go to (E).
+ // (10) Short external string or not a string? If yes, bail out to runtime.
+ // (11) Sliced string. Replace subject with parent. Go to (5a).
+
+ Label seq_one_byte_string /* 6 */, seq_two_byte_string /* 9 */,
+ external_string /* 8 */, check_underlying /* 5a */,
+ not_seq_nor_cons /* 7 */, check_code /* E */,
+ not_long_external /* 10 */;
+
+ // (1) Sequential two byte? If yes, go to (9).
+ __ and_(ebx, kIsNotStringMask |
+ kStringRepresentationMask |
+ kStringEncodingMask |
+ kShortExternalStringMask);
+ STATIC_ASSERT((kStringTag | kSeqStringTag | kTwoByteStringTag) == 0);
+ __ j(zero, &seq_two_byte_string); // Go to (9).
+
+ // (2) Sequential one byte? If yes, go to (6).
+ // Any other sequential string must be one byte.
+ __ and_(ebx, Immediate(kIsNotStringMask |
+ kStringRepresentationMask |
+ kShortExternalStringMask));
+ __ j(zero, &seq_one_byte_string, Label::kNear); // Go to (6).
+
+ // (3) Anything but sequential or cons? If yes, go to (7).
+ // We check whether the subject string is a cons, since sequential strings
+ // have already been covered.
+ STATIC_ASSERT(kConsStringTag < kExternalStringTag);
+ STATIC_ASSERT(kSlicedStringTag > kExternalStringTag);
+ STATIC_ASSERT(kIsNotStringMask > kExternalStringTag);
+ STATIC_ASSERT(kShortExternalStringTag > kExternalStringTag);
+ __ cmp(ebx, Immediate(kExternalStringTag));
+ __ j(greater_equal, &not_seq_nor_cons); // Go to (7).
+
+ // (4) Cons string. Check that it's flat.
+ // Replace subject with first string and reload instance type.
+ __ cmp(FieldOperand(eax, ConsString::kSecondOffset), factory->empty_string());
+ __ j(not_equal, &runtime);
+ __ mov(eax, FieldOperand(eax, ConsString::kFirstOffset));
+ __ bind(&check_underlying);
+ __ mov(ebx, FieldOperand(eax, HeapObject::kMapOffset));
+ __ mov(ebx, FieldOperand(ebx, Map::kInstanceTypeOffset));
+
+ // (5a) Is subject sequential two byte? If yes, go to (9).
+ __ test_b(ebx, kStringRepresentationMask | kStringEncodingMask);
+ STATIC_ASSERT((kSeqStringTag | kTwoByteStringTag) == 0);
+ __ j(zero, &seq_two_byte_string); // Go to (9).
+ // (5b) Is subject external? If yes, go to (8).
+ __ test_b(ebx, kStringRepresentationMask);
+ // The underlying external string is never a short external string.
+ STATIC_ASSERT(ExternalString::kMaxShortLength < ConsString::kMinLength);
+ STATIC_ASSERT(ExternalString::kMaxShortLength < SlicedString::kMinLength);
+ __ j(not_zero, &external_string); // Go to (8).
+
+ // eax: sequential subject string (or look-alike, external string)
+ // edx: original subject string
+ // ecx: RegExp data (FixedArray)
+ // (6) One byte sequential. Load regexp code for one byte.
+ __ bind(&seq_one_byte_string);
+ // Load previous index and check range before edx is overwritten. We have
+ // to use edx instead of eax here because it might have been only made to
+ // look like a sequential string when it actually is an external string.
+ __ mov(ebx, Operand(esp, kPreviousIndexOffset));
+ __ JumpIfNotSmi(ebx, &runtime);
+ __ cmp(ebx, FieldOperand(edx, String::kLengthOffset));
+ __ j(above_equal, &runtime);
+ __ mov(edx, FieldOperand(ecx, JSRegExp::kDataAsciiCodeOffset));
+ __ Move(ecx, Immediate(1)); // Type is one byte.
+
+ // (E) Carry on. String handling is done.
+ __ bind(&check_code);
+ // edx: irregexp code
+ // Check that the irregexp code has been generated for the actual string
+ // encoding. If it has, the field contains a code object otherwise it contains
+ // a smi (code flushing support).
+ __ JumpIfSmi(edx, &runtime);
+
+ // eax: subject string
+ // ebx: previous index (smi)
+ // edx: code
+ // ecx: encoding of subject string (1 if ASCII, 0 if two_byte);
+ // All checks done. Now push arguments for native regexp code.
+ Counters* counters = isolate()->counters();
+ __ IncrementCounter(counters->regexp_entry_native(), 1);
+
+ // Isolates: note we add an additional parameter here (isolate pointer).
+ static const int kRegExpExecuteArguments = 9;
+ __ EnterApiExitFrame(kRegExpExecuteArguments);
+
+ // Argument 9: Pass current isolate address.
+ __ mov(Operand(esp, 8 * kPointerSize),
+ Immediate(ExternalReference::isolate_address(isolate())));
+
+ // Argument 8: Indicate that this is a direct call from JavaScript.
+ __ mov(Operand(esp, 7 * kPointerSize), Immediate(1));
+
+ // Argument 7: Start (high end) of backtracking stack memory area.
+ __ mov(esi, Operand::StaticVariable(address_of_regexp_stack_memory_address));
+ __ add(esi, Operand::StaticVariable(address_of_regexp_stack_memory_size));
+ __ mov(Operand(esp, 6 * kPointerSize), esi);
+
+ // Argument 6: Set the number of capture registers to zero to force global
+ // regexps to behave as non-global. This does not affect non-global regexps.
+ __ mov(Operand(esp, 5 * kPointerSize), Immediate(0));
+
+ // Argument 5: static offsets vector buffer.
+ __ mov(Operand(esp, 4 * kPointerSize),
+ Immediate(ExternalReference::address_of_static_offsets_vector(
+ isolate())));
+
+ // Argument 2: Previous index.
+ __ SmiUntag(ebx);
+ __ mov(Operand(esp, 1 * kPointerSize), ebx);
+
+ // Argument 1: Original subject string.
+ // The original subject is in the previous stack frame. Therefore we have to
+ // use ebp, which points exactly to one pointer size below the previous esp.
+ // (Because creating a new stack frame pushes the previous ebp onto the stack
+ // and thereby moves up esp by one kPointerSize.)
+ __ mov(esi, Operand(ebp, kSubjectOffset + kPointerSize));
+ __ mov(Operand(esp, 0 * kPointerSize), esi);
+
+ // esi: original subject string
+ // eax: underlying subject string
+ // ebx: previous index
+ // ecx: encoding of subject string (1 if ASCII 0 if two_byte);
+ // edx: code
+ // Argument 4: End of string data
+ // Argument 3: Start of string data
+ // Prepare start and end index of the input.
+ // Load the length from the original sliced string if that is the case.
+ __ mov(esi, FieldOperand(esi, String::kLengthOffset));
+ __ add(esi, edi); // Calculate input end wrt offset.
+ __ SmiUntag(edi);
+ __ add(ebx, edi); // Calculate input start wrt offset.
+
+ // ebx: start index of the input string
+ // esi: end index of the input string
+ Label setup_two_byte, setup_rest;
+ __ test(ecx, ecx);
+ __ j(zero, &setup_two_byte, Label::kNear);
+ __ SmiUntag(esi);
+ __ lea(ecx, FieldOperand(eax, esi, times_1, SeqOneByteString::kHeaderSize));
+ __ mov(Operand(esp, 3 * kPointerSize), ecx); // Argument 4.
+ __ lea(ecx, FieldOperand(eax, ebx, times_1, SeqOneByteString::kHeaderSize));
+ __ mov(Operand(esp, 2 * kPointerSize), ecx); // Argument 3.
+ __ jmp(&setup_rest, Label::kNear);
+
+ __ bind(&setup_two_byte);
+ STATIC_ASSERT(kSmiTag == 0);
+ STATIC_ASSERT(kSmiTagSize == 1); // esi is smi (powered by 2).
+ __ lea(ecx, FieldOperand(eax, esi, times_1, SeqTwoByteString::kHeaderSize));
+ __ mov(Operand(esp, 3 * kPointerSize), ecx); // Argument 4.
+ __ lea(ecx, FieldOperand(eax, ebx, times_2, SeqTwoByteString::kHeaderSize));
+ __ mov(Operand(esp, 2 * kPointerSize), ecx); // Argument 3.
+
+ __ bind(&setup_rest);
+
+ // Locate the code entry and call it.
+ __ add(edx, Immediate(Code::kHeaderSize - kHeapObjectTag));
+ __ call(edx);
+
+ // Drop arguments and come back to JS mode.
+ __ LeaveApiExitFrame(true);
+
+ // Check the result.
+ Label success;
+ __ cmp(eax, 1);
+ // We expect exactly one result since we force the called regexp to behave
+ // as non-global.
+ __ j(equal, &success);
+ Label failure;
+ __ cmp(eax, NativeRegExpMacroAssembler::FAILURE);
+ __ j(equal, &failure);
+ __ cmp(eax, NativeRegExpMacroAssembler::EXCEPTION);
+ // If not exception it can only be retry. Handle that in the runtime system.
+ __ j(not_equal, &runtime);
+ // Result must now be exception. If there is no pending exception already a
+ // stack overflow (on the backtrack stack) was detected in RegExp code but
+ // haven't created the exception yet. Handle that in the runtime system.
+ // TODO(592): Rerunning the RegExp to get the stack overflow exception.
+ ExternalReference pending_exception(Isolate::kPendingExceptionAddress,
+ isolate());
+ __ mov(edx, Immediate(isolate()->factory()->the_hole_value()));
+ __ mov(eax, Operand::StaticVariable(pending_exception));
+ __ cmp(edx, eax);
+ __ j(equal, &runtime);
+ // For exception, throw the exception again.
+
+ // Clear the pending exception variable.
+ __ mov(Operand::StaticVariable(pending_exception), edx);
+
+ // Special handling of termination exceptions which are uncatchable
+ // by javascript code.
+ __ cmp(eax, factory->termination_exception());
+ Label throw_termination_exception;
+ __ j(equal, &throw_termination_exception, Label::kNear);
+
+ // Handle normal exception by following handler chain.
+ __ Throw(eax);
+
+ __ bind(&throw_termination_exception);
+ __ ThrowUncatchable(eax);
+
+ __ bind(&failure);
+ // For failure to match, return null.
+ __ mov(eax, factory->null_value());
+ __ ret(4 * kPointerSize);
+
+ // Load RegExp data.
+ __ bind(&success);
+ __ mov(eax, Operand(esp, kJSRegExpOffset));
+ __ mov(ecx, FieldOperand(eax, JSRegExp::kDataOffset));
+ __ mov(edx, FieldOperand(ecx, JSRegExp::kIrregexpCaptureCountOffset));
+ // Calculate number of capture registers (number_of_captures + 1) * 2.
+ STATIC_ASSERT(kSmiTag == 0);
+ STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1);
+ __ add(edx, Immediate(2)); // edx was a smi.
+
+ // edx: Number of capture registers
+ // Load last_match_info which is still known to be a fast case JSArray.
+ // Check that the fourth object is a JSArray object.
+ __ mov(eax, Operand(esp, kLastMatchInfoOffset));
+ __ JumpIfSmi(eax, &runtime);
+ __ CmpObjectType(eax, JS_ARRAY_TYPE, ebx);
+ __ j(not_equal, &runtime);
+ // Check that the JSArray is in fast case.
+ __ mov(ebx, FieldOperand(eax, JSArray::kElementsOffset));
+ __ mov(eax, FieldOperand(ebx, HeapObject::kMapOffset));
+ __ cmp(eax, factory->fixed_array_map());
+ __ j(not_equal, &runtime);
+ // Check that the last match info has space for the capture registers and the
+ // additional information.
+ __ mov(eax, FieldOperand(ebx, FixedArray::kLengthOffset));
+ __ SmiUntag(eax);
+ __ sub(eax, Immediate(RegExpImpl::kLastMatchOverhead));
+ __ cmp(edx, eax);
+ __ j(greater, &runtime);
+
+ // ebx: last_match_info backing store (FixedArray)
+ // edx: number of capture registers
+ // Store the capture count.
+ __ SmiTag(edx); // Number of capture registers to smi.
+ __ mov(FieldOperand(ebx, RegExpImpl::kLastCaptureCountOffset), edx);
+ __ SmiUntag(edx); // Number of capture registers back from smi.
+ // Store last subject and last input.
+ __ mov(eax, Operand(esp, kSubjectOffset));
+ __ mov(ecx, eax);
+ __ mov(FieldOperand(ebx, RegExpImpl::kLastSubjectOffset), eax);
+ __ RecordWriteField(ebx,
+ RegExpImpl::kLastSubjectOffset,
+ eax,
+ edi);
+ __ mov(eax, ecx);
+ __ mov(FieldOperand(ebx, RegExpImpl::kLastInputOffset), eax);
+ __ RecordWriteField(ebx,
+ RegExpImpl::kLastInputOffset,
+ eax,
+ edi);
+
+ // Get the static offsets vector filled by the native regexp code.
+ ExternalReference address_of_static_offsets_vector =
+ ExternalReference::address_of_static_offsets_vector(isolate());
+ __ mov(ecx, Immediate(address_of_static_offsets_vector));
+
+ // ebx: last_match_info backing store (FixedArray)
+ // ecx: offsets vector
+ // edx: number of capture registers
+ Label next_capture, done;
+ // Capture register counter starts from number of capture registers and
+ // counts down until wraping after zero.
+ __ bind(&next_capture);
+ __ sub(edx, Immediate(1));
+ __ j(negative, &done, Label::kNear);
+ // Read the value from the static offsets vector buffer.
+ __ mov(edi, Operand(ecx, edx, times_int_size, 0));
+ __ SmiTag(edi);
+ // Store the smi value in the last match info.
+ __ mov(FieldOperand(ebx,
+ edx,
+ times_pointer_size,
+ RegExpImpl::kFirstCaptureOffset),
+ edi);
+ __ jmp(&next_capture);
+ __ bind(&done);
+
+ // Return last match info.
+ __ mov(eax, Operand(esp, kLastMatchInfoOffset));
+ __ ret(4 * kPointerSize);
+
+ // Do the runtime call to execute the regexp.
+ __ bind(&runtime);
+ __ TailCallRuntime(Runtime::kHiddenRegExpExec, 4, 1);
+
+ // Deferred code for string handling.
+ // (7) Not a long external string? If yes, go to (10).
+ __ bind(&not_seq_nor_cons);
+ // Compare flags are still set from (3).
+ __ j(greater, &not_long_external, Label::kNear); // Go to (10).
+
+ // (8) External string. Short external strings have been ruled out.
+ __ bind(&external_string);
+ // Reload instance type.
+ __ mov(ebx, FieldOperand(eax, HeapObject::kMapOffset));
+ __ movzx_b(ebx, FieldOperand(ebx, Map::kInstanceTypeOffset));
+ if (FLAG_debug_code) {
+ // Assert that we do not have a cons or slice (indirect strings) here.
+ // Sequential strings have already been ruled out.
+ __ test_b(ebx, kIsIndirectStringMask);
+ __ Assert(zero, kExternalStringExpectedButNotFound);
+ }
+ __ mov(eax, FieldOperand(eax, ExternalString::kResourceDataOffset));
+ // Move the pointer so that offset-wise, it looks like a sequential string.
+ STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqOneByteString::kHeaderSize);
+ __ sub(eax, Immediate(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
+ STATIC_ASSERT(kTwoByteStringTag == 0);
+ // (8a) Is the external string one byte? If yes, go to (6).
+ __ test_b(ebx, kStringEncodingMask);
+ __ j(not_zero, &seq_one_byte_string); // Goto (6).
+
+ // eax: sequential subject string (or look-alike, external string)
+ // edx: original subject string
+ // ecx: RegExp data (FixedArray)
+ // (9) Two byte sequential. Load regexp code for one byte. Go to (E).
+ __ bind(&seq_two_byte_string);
+ // Load previous index and check range before edx is overwritten. We have
+ // to use edx instead of eax here because it might have been only made to
+ // look like a sequential string when it actually is an external string.
+ __ mov(ebx, Operand(esp, kPreviousIndexOffset));
+ __ JumpIfNotSmi(ebx, &runtime);
+ __ cmp(ebx, FieldOperand(edx, String::kLengthOffset));
+ __ j(above_equal, &runtime);
+ __ mov(edx, FieldOperand(ecx, JSRegExp::kDataUC16CodeOffset));
+ __ Move(ecx, Immediate(0)); // Type is two byte.
+ __ jmp(&check_code); // Go to (E).
+
+ // (10) Not a string or a short external string? If yes, bail out to runtime.
+ __ bind(&not_long_external);
+ // Catch non-string subject or short external string.
+ STATIC_ASSERT(kNotStringTag != 0 && kShortExternalStringTag !=0);
+ __ test(ebx, Immediate(kIsNotStringMask | kShortExternalStringTag));
+ __ j(not_zero, &runtime);
+
+ // (11) Sliced string. Replace subject with parent. Go to (5a).
+ // Load offset into edi and replace subject string with parent.
+ __ mov(edi, FieldOperand(eax, SlicedString::kOffsetOffset));
+ __ mov(eax, FieldOperand(eax, SlicedString::kParentOffset));
+ __ jmp(&check_underlying); // Go to (5a).
+#endif // V8_INTERPRETED_REGEXP
+}
+
+
+static int NegativeComparisonResult(Condition cc) {
+ ASSERT(cc != equal);
+ ASSERT((cc == less) || (cc == less_equal)
+ || (cc == greater) || (cc == greater_equal));
+ return (cc == greater || cc == greater_equal) ? LESS : GREATER;
+}
+
+
+static void CheckInputType(MacroAssembler* masm,
+ Register input,
+ CompareIC::State expected,
+ Label* fail) {
+ Label ok;
+ if (expected == CompareIC::SMI) {
+ __ JumpIfNotSmi(input, fail);
+ } else if (expected == CompareIC::NUMBER) {
+ __ JumpIfSmi(input, &ok);
+ __ cmp(FieldOperand(input, HeapObject::kMapOffset),
+ Immediate(masm->isolate()->factory()->heap_number_map()));
+ __ j(not_equal, fail);
+ }
+ // We could be strict about internalized/non-internalized here, but as long as
+ // hydrogen doesn't care, the stub doesn't have to care either.
+ __ bind(&ok);
+}
+
+
+static void BranchIfNotInternalizedString(MacroAssembler* masm,
+ Label* label,
+ Register object,
+ Register scratch) {
+ __ JumpIfSmi(object, label);
+ __ mov(scratch, FieldOperand(object, HeapObject::kMapOffset));
+ __ movzx_b(scratch, FieldOperand(scratch, Map::kInstanceTypeOffset));
+ STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0);
+ __ test(scratch, Immediate(kIsNotStringMask | kIsNotInternalizedMask));
+ __ j(not_zero, label);
+}
+
+
+void ICCompareStub::GenerateGeneric(MacroAssembler* masm) {
+ Label check_unequal_objects;
+ Condition cc = GetCondition();
+
+ Label miss;
+ CheckInputType(masm, edx, left_, &miss);
+ CheckInputType(masm, eax, right_, &miss);
+
+ // Compare two smis.
+ Label non_smi, smi_done;
+ __ mov(ecx, edx);
+ __ or_(ecx, eax);
+ __ JumpIfNotSmi(ecx, &non_smi, Label::kNear);
+ __ sub(edx, eax); // Return on the result of the subtraction.
+ __ j(no_overflow, &smi_done, Label::kNear);
+ __ not_(edx); // Correct sign in case of overflow. edx is never 0 here.
+ __ bind(&smi_done);
+ __ mov(eax, edx);
+ __ ret(0);
+ __ bind(&non_smi);
+
+ // NOTICE! This code is only reached after a smi-fast-case check, so
+ // it is certain that at least one operand isn't a smi.
+
+ // Identical objects can be compared fast, but there are some tricky cases
+ // for NaN and undefined.
+ Label generic_heap_number_comparison;
+ {
+ Label not_identical;
+ __ cmp(eax, edx);
+ __ j(not_equal, &not_identical);
+
+ if (cc != equal) {
+ // Check for undefined. undefined OP undefined is false even though
+ // undefined == undefined.
+ Label check_for_nan;
+ __ cmp(edx, isolate()->factory()->undefined_value());
+ __ j(not_equal, &check_for_nan, Label::kNear);
+ __ Move(eax, Immediate(Smi::FromInt(NegativeComparisonResult(cc))));
+ __ ret(0);
+ __ bind(&check_for_nan);
+ }
+
+ // Test for NaN. Compare heap numbers in a general way,
+ // to hanlde NaNs correctly.
+ __ cmp(FieldOperand(edx, HeapObject::kMapOffset),
+ Immediate(isolate()->factory()->heap_number_map()));
+ __ j(equal, &generic_heap_number_comparison, Label::kNear);
+ if (cc != equal) {
+ // Call runtime on identical JSObjects. Otherwise return equal.
+ __ CmpObjectType(eax, FIRST_SPEC_OBJECT_TYPE, ecx);
+ __ j(above_equal, &not_identical);
+ }
+ __ Move(eax, Immediate(Smi::FromInt(EQUAL)));
+ __ ret(0);
+
+
+ __ bind(&not_identical);
+ }
+
+ // Strict equality can quickly decide whether objects are equal.
+ // Non-strict object equality is slower, so it is handled later in the stub.
+ if (cc == equal && strict()) {
+ Label slow; // Fallthrough label.
+ Label not_smis;
+ // If we're doing a strict equality comparison, we don't have to do
+ // type conversion, so we generate code to do fast comparison for objects
+ // and oddballs. Non-smi numbers and strings still go through the usual
+ // slow-case code.
+ // If either is a Smi (we know that not both are), then they can only
+ // be equal if the other is a HeapNumber. If so, use the slow case.
+ STATIC_ASSERT(kSmiTag == 0);
+ ASSERT_EQ(0, Smi::FromInt(0));
+ __ mov(ecx, Immediate(kSmiTagMask));
+ __ and_(ecx, eax);
+ __ test(ecx, edx);
+ __ j(not_zero, &not_smis, Label::kNear);
+ // One operand is a smi.
+
+ // Check whether the non-smi is a heap number.
+ STATIC_ASSERT(kSmiTagMask == 1);
+ // ecx still holds eax & kSmiTag, which is either zero or one.
+ __ sub(ecx, Immediate(0x01));
+ __ mov(ebx, edx);
+ __ xor_(ebx, eax);
+ __ and_(ebx, ecx); // ebx holds either 0 or eax ^ edx.
+ __ xor_(ebx, eax);
+ // if eax was smi, ebx is now edx, else eax.
+
+ // Check if the non-smi operand is a heap number.
+ __ cmp(FieldOperand(ebx, HeapObject::kMapOffset),
+ Immediate(isolate()->factory()->heap_number_map()));
+ // If heap number, handle it in the slow case.
+ __ j(equal, &slow, Label::kNear);
+ // Return non-equal (ebx is not zero)
+ __ mov(eax, ebx);
+ __ ret(0);
+
+ __ bind(&not_smis);
+ // If either operand is a JSObject or an oddball value, then they are not
+ // equal since their pointers are different
+ // There is no test for undetectability in strict equality.
+
+ // Get the type of the first operand.
+ // If the first object is a JS object, we have done pointer comparison.
+ Label first_non_object;
+ STATIC_ASSERT(LAST_TYPE == LAST_SPEC_OBJECT_TYPE);
+ __ CmpObjectType(eax, FIRST_SPEC_OBJECT_TYPE, ecx);
+ __ j(below, &first_non_object, Label::kNear);
+
+ // Return non-zero (eax is not zero)
+ Label return_not_equal;
+ STATIC_ASSERT(kHeapObjectTag != 0);
+ __ bind(&return_not_equal);
+ __ ret(0);
+
+ __ bind(&first_non_object);
+ // Check for oddballs: true, false, null, undefined.
+ __ CmpInstanceType(ecx, ODDBALL_TYPE);
+ __ j(equal, &return_not_equal);
+
+ __ CmpObjectType(edx, FIRST_SPEC_OBJECT_TYPE, ecx);
+ __ j(above_equal, &return_not_equal);
+
+ // Check for oddballs: true, false, null, undefined.
+ __ CmpInstanceType(ecx, ODDBALL_TYPE);
+ __ j(equal, &return_not_equal);
+
+ // Fall through to the general case.
+ __ bind(&slow);
+ }
+
+ // Generate the number comparison code.
+ Label non_number_comparison;
+ Label unordered;
+ __ bind(&generic_heap_number_comparison);
+ FloatingPointHelper::CheckFloatOperands(
+ masm, &non_number_comparison, ebx);
+ FloatingPointHelper::LoadFloatOperand(masm, eax);
+ FloatingPointHelper::LoadFloatOperand(masm, edx);
+ __ FCmp();
+
+ // Don't base result on EFLAGS when a NaN is involved.
+ __ j(parity_even, &unordered, Label::kNear);
+
+ Label below_label, above_label;
+ // Return a result of -1, 0, or 1, based on EFLAGS.
+ __ j(below, &below_label, Label::kNear);
+ __ j(above, &above_label, Label::kNear);
+
+ __ Move(eax, Immediate(0));
+ __ ret(0);
+
+ __ bind(&below_label);
+ __ mov(eax, Immediate(Smi::FromInt(-1)));
+ __ ret(0);
+
+ __ bind(&above_label);
+ __ mov(eax, Immediate(Smi::FromInt(1)));
+ __ ret(0);
+
+ // If one of the numbers was NaN, then the result is always false.
+ // The cc is never not-equal.
+ __ bind(&unordered);
+ ASSERT(cc != not_equal);
+ if (cc == less || cc == less_equal) {
+ __ mov(eax, Immediate(Smi::FromInt(1)));
+ } else {
+ __ mov(eax, Immediate(Smi::FromInt(-1)));
+ }
+ __ ret(0);
+
+ // The number comparison code did not provide a valid result.
+ __ bind(&non_number_comparison);
+
+ // Fast negative check for internalized-to-internalized equality.
+ Label check_for_strings;
+ if (cc == equal) {
+ BranchIfNotInternalizedString(masm, &check_for_strings, eax, ecx);
+ BranchIfNotInternalizedString(masm, &check_for_strings, edx, ecx);
+
+ // We've already checked for object identity, so if both operands
+ // are internalized they aren't equal. Register eax already holds a
+ // non-zero value, which indicates not equal, so just return.
+ __ ret(0);
+ }
+
+ __ bind(&check_for_strings);
+
+ __ JumpIfNotBothSequentialAsciiStrings(edx, eax, ecx, ebx,
+ &check_unequal_objects);
+
+ // Inline comparison of ASCII strings.
+ if (cc == equal) {
+ StringCompareStub::GenerateFlatAsciiStringEquals(masm,
+ edx,
+ eax,
+ ecx,
+ ebx);
+ } else {
+ StringCompareStub::GenerateCompareFlatAsciiStrings(masm,
+ edx,
+ eax,
+ ecx,
+ ebx,
+ edi);
+ }
+#ifdef DEBUG
+ __ Abort(kUnexpectedFallThroughFromStringComparison);
+#endif
+
+ __ bind(&check_unequal_objects);
+ if (cc == equal && !strict()) {
+ // Non-strict equality. Objects are unequal if
+ // they are both JSObjects and not undetectable,
+ // and their pointers are different.
+ Label not_both_objects;
+ Label return_unequal;
+ // At most one is a smi, so we can test for smi by adding the two.
+ // A smi plus a heap object has the low bit set, a heap object plus
+ // a heap object has the low bit clear.
+ STATIC_ASSERT(kSmiTag == 0);
+ STATIC_ASSERT(kSmiTagMask == 1);
+ __ lea(ecx, Operand(eax, edx, times_1, 0));
+ __ test(ecx, Immediate(kSmiTagMask));
+ __ j(not_zero, &not_both_objects, Label::kNear);
+ __ CmpObjectType(eax, FIRST_SPEC_OBJECT_TYPE, ecx);
+ __ j(below, &not_both_objects, Label::kNear);
+ __ CmpObjectType(edx, FIRST_SPEC_OBJECT_TYPE, ebx);
+ __ j(below, &not_both_objects, Label::kNear);
+ // We do not bail out after this point. Both are JSObjects, and
+ // they are equal if and only if both are undetectable.
+ // The and of the undetectable flags is 1 if and only if they are equal.
+ __ test_b(FieldOperand(ecx, Map::kBitFieldOffset),
+ 1 << Map::kIsUndetectable);
+ __ j(zero, &return_unequal, Label::kNear);
+ __ test_b(FieldOperand(ebx, Map::kBitFieldOffset),
+ 1 << Map::kIsUndetectable);
+ __ j(zero, &return_unequal, Label::kNear);
+ // The objects are both undetectable, so they both compare as the value
+ // undefined, and are equal.
+ __ Move(eax, Immediate(EQUAL));
+ __ bind(&return_unequal);
+ // Return non-equal by returning the non-zero object pointer in eax,
+ // or return equal if we fell through to here.
+ __ ret(0); // rax, rdx were pushed
+ __ bind(&not_both_objects);
+ }
+
+ // Push arguments below the return address.
+ __ pop(ecx);
+ __ push(edx);
+ __ push(eax);
+
+ // Figure out which native to call and setup the arguments.
+ Builtins::JavaScript builtin;
+ if (cc == equal) {
+ builtin = strict() ? Builtins::STRICT_EQUALS : Builtins::EQUALS;
+ } else {
+ builtin = Builtins::COMPARE;
+ __ push(Immediate(Smi::FromInt(NegativeComparisonResult(cc))));
+ }
+
+ // Restore return address on the stack.
+ __ push(ecx);
+
+ // Call the native; it returns -1 (less), 0 (equal), or 1 (greater)
+ // tagged as a small integer.
+ __ InvokeBuiltin(builtin, JUMP_FUNCTION);
+
+ __ bind(&miss);
+ GenerateMiss(masm);
+}
+
+
+static void GenerateRecordCallTarget(MacroAssembler* masm) {
+ // Cache the called function in a feedback vector slot. Cache states
+ // are uninitialized, monomorphic (indicated by a JSFunction), and
+ // megamorphic.
+ // eax : number of arguments to the construct function
+ // ebx : Feedback vector
+ // edx : slot in feedback vector (Smi)
+ // edi : the function to call
+ Isolate* isolate = masm->isolate();
+ Label initialize, done, miss, megamorphic, not_array_function;
+
+ // Load the cache state into ecx.
+ __ mov(ecx, FieldOperand(ebx, edx, times_half_pointer_size,
+ FixedArray::kHeaderSize));
+
+ // A monomorphic cache hit or an already megamorphic state: invoke the
+ // function without changing the state.
+ __ cmp(ecx, edi);
+ __ j(equal, &done, Label::kFar);
+ __ cmp(ecx, Immediate(TypeFeedbackInfo::MegamorphicSentinel(isolate)));
+ __ j(equal, &done, Label::kFar);
+
+ if (!FLAG_pretenuring_call_new) {
+ // If we came here, we need to see if we are the array function.
+ // If we didn't have a matching function, and we didn't find the megamorph
+ // sentinel, then we have in the slot either some other function or an
+ // AllocationSite. Do a map check on the object in ecx.
+ Handle<Map> allocation_site_map = isolate->factory()->allocation_site_map();
+ __ cmp(FieldOperand(ecx, 0), Immediate(allocation_site_map));
+ __ j(not_equal, &miss);
+
+ // Make sure the function is the Array() function
+ __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, ecx);
+ __ cmp(edi, ecx);
+ __ j(not_equal, &megamorphic);
+ __ jmp(&done, Label::kFar);
+ }
+
+ __ bind(&miss);
+
+ // A monomorphic miss (i.e, here the cache is not uninitialized) goes
+ // megamorphic.
+ __ cmp(ecx, Immediate(TypeFeedbackInfo::UninitializedSentinel(isolate)));
+ __ j(equal, &initialize);
+ // MegamorphicSentinel is an immortal immovable object (undefined) so no
+ // write-barrier is needed.
+ __ bind(&megamorphic);
+ __ mov(FieldOperand(ebx, edx, times_half_pointer_size,
+ FixedArray::kHeaderSize),
+ Immediate(TypeFeedbackInfo::MegamorphicSentinel(isolate)));
+ __ jmp(&done, Label::kFar);
+
+ // An uninitialized cache is patched with the function or sentinel to
+ // indicate the ElementsKind if function is the Array constructor.
+ __ bind(&initialize);
+ if (!FLAG_pretenuring_call_new) {
+ // Make sure the function is the Array() function
+ __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, ecx);
+ __ cmp(edi, ecx);
+ __ j(not_equal, &not_array_function);
+
+ // The target function is the Array constructor,
+ // Create an AllocationSite if we don't already have it, store it in the
+ // slot.
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+
+ // Arguments register must be smi-tagged to call out.
+ __ SmiTag(eax);
+ __ push(eax);
+ __ push(edi);
+ __ push(edx);
+ __ push(ebx);
+
+ CreateAllocationSiteStub create_stub(isolate);
+ __ CallStub(&create_stub);
+
+ __ pop(ebx);
+ __ pop(edx);
+ __ pop(edi);
+ __ pop(eax);
+ __ SmiUntag(eax);
+ }
+ __ jmp(&done);
+
+ __ bind(&not_array_function);
+ }
+
+ __ mov(FieldOperand(ebx, edx, times_half_pointer_size,
+ FixedArray::kHeaderSize),
+ edi);
+ // We won't need edx or ebx anymore, just save edi
+ __ push(edi);
+ __ push(ebx);
+ __ push(edx);
+ __ RecordWriteArray(ebx, edi, edx, EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
+ __ pop(edx);
+ __ pop(ebx);
+ __ pop(edi);
+
+ __ bind(&done);
+}
+
+
+static void EmitContinueIfStrictOrNative(MacroAssembler* masm, Label* cont) {
+ // Do not transform the receiver for strict mode functions.
+ __ mov(ecx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
+ __ test_b(FieldOperand(ecx, SharedFunctionInfo::kStrictModeByteOffset),
+ 1 << SharedFunctionInfo::kStrictModeBitWithinByte);
+ __ j(not_equal, cont);
+
+ // Do not transform the receiver for natives (shared already in ecx).
+ __ test_b(FieldOperand(ecx, SharedFunctionInfo::kNativeByteOffset),
+ 1 << SharedFunctionInfo::kNativeBitWithinByte);
+ __ j(not_equal, cont);
+}
+
+
+static void EmitSlowCase(Isolate* isolate,
+ MacroAssembler* masm,
+ int argc,
+ Label* non_function) {
+ // Check for function proxy.
+ __ CmpInstanceType(ecx, JS_FUNCTION_PROXY_TYPE);
+ __ j(not_equal, non_function);
+ __ pop(ecx);
+ __ push(edi); // put proxy as additional argument under return address
+ __ push(ecx);
+ __ Move(eax, Immediate(argc + 1));
+ __ Move(ebx, Immediate(0));
+ __ GetBuiltinEntry(edx, Builtins::CALL_FUNCTION_PROXY);
+ {
+ Handle<Code> adaptor = isolate->builtins()->ArgumentsAdaptorTrampoline();
+ __ jmp(adaptor, RelocInfo::CODE_TARGET);
+ }
+
+ // CALL_NON_FUNCTION expects the non-function callee as receiver (instead
+ // of the original receiver from the call site).
+ __ bind(non_function);
+ __ mov(Operand(esp, (argc + 1) * kPointerSize), edi);
+ __ Move(eax, Immediate(argc));
+ __ Move(ebx, Immediate(0));
+ __ GetBuiltinEntry(edx, Builtins::CALL_NON_FUNCTION);
+ Handle<Code> adaptor = isolate->builtins()->ArgumentsAdaptorTrampoline();
+ __ jmp(adaptor, RelocInfo::CODE_TARGET);
+}
+
+
+static void EmitWrapCase(MacroAssembler* masm, int argc, Label* cont) {
+ // Wrap the receiver and patch it back onto the stack.
+ { FrameScope frame_scope(masm, StackFrame::INTERNAL);
+ __ push(edi);
+ __ push(eax);
+ __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
+ __ pop(edi);
+ }
+ __ mov(Operand(esp, (argc + 1) * kPointerSize), eax);
+ __ jmp(cont);
+}
+
+
+static void CallFunctionNoFeedback(MacroAssembler* masm,
+ int argc, bool needs_checks,
+ bool call_as_method) {
+ // edi : the function to call
+ Label slow, non_function, wrap, cont;
+
+ if (needs_checks) {
+ // Check that the function really is a JavaScript function.
+ __ JumpIfSmi(edi, &non_function);
+
+ // Goto slow case if we do not have a function.
+ __ CmpObjectType(edi, JS_FUNCTION_TYPE, ecx);
+ __ j(not_equal, &slow);
+ }
+
+ // Fast-case: Just invoke the function.
+ ParameterCount actual(argc);
+
+ if (call_as_method) {
+ if (needs_checks) {
+ EmitContinueIfStrictOrNative(masm, &cont);
+ }
+
+ // Load the receiver from the stack.
+ __ mov(eax, Operand(esp, (argc + 1) * kPointerSize));
+
+ if (call_as_method) {
+ __ JumpIfSmi(eax, &wrap);
+
+ __ CmpObjectType(eax, FIRST_SPEC_OBJECT_TYPE, ecx);
+ __ j(below, &wrap);
+ } else {
+ __ jmp(&wrap);
+ }
+
+ __ bind(&cont);
+ }
+
+ __ InvokeFunction(edi, actual, JUMP_FUNCTION, NullCallWrapper());
+
+ if (needs_checks) {
+ // Slow-case: Non-function called.
+ __ bind(&slow);
+ // (non_function is bound in EmitSlowCase)
+ EmitSlowCase(masm->isolate(), masm, argc, &non_function);
+ }
+
+ if (call_as_method) {
+ __ bind(&wrap);
+ EmitWrapCase(masm, argc, &cont);
+ }
+}
+
+
+void CallFunctionStub::Generate(MacroAssembler* masm) {
+ CallFunctionNoFeedback(masm, argc_, NeedsChecks(), CallAsMethod());
+}
+
+
+void CallConstructStub::Generate(MacroAssembler* masm) {
+ // eax : number of arguments
+ // ebx : feedback vector
+ // edx : (only if ebx is not the megamorphic symbol) slot in feedback
+ // vector (Smi)
+ // edi : constructor function
+ Label slow, non_function_call;
+
+ // Check that function is not a smi.
+ __ JumpIfSmi(edi, &non_function_call);
+ // Check that function is a JSFunction.
+ __ CmpObjectType(edi, JS_FUNCTION_TYPE, ecx);
+ __ j(not_equal, &slow);
+
+ if (RecordCallTarget()) {
+ GenerateRecordCallTarget(masm);
+
+ if (FLAG_pretenuring_call_new) {
+ // Put the AllocationSite from the feedback vector into ebx.
+ // By adding kPointerSize we encode that we know the AllocationSite
+ // entry is at the feedback vector slot given by edx + 1.
+ __ mov(ebx, FieldOperand(ebx, edx, times_half_pointer_size,
+ FixedArray::kHeaderSize + kPointerSize));
+ } else {
+ Label feedback_register_initialized;
+ // Put the AllocationSite from the feedback vector into ebx, or undefined.
+ __ mov(ebx, FieldOperand(ebx, edx, times_half_pointer_size,
+ FixedArray::kHeaderSize));
+ Handle<Map> allocation_site_map =
+ isolate()->factory()->allocation_site_map();
+ __ cmp(FieldOperand(ebx, 0), Immediate(allocation_site_map));
+ __ j(equal, &feedback_register_initialized);
+ __ mov(ebx, isolate()->factory()->undefined_value());
+ __ bind(&feedback_register_initialized);
+ }
+
+ __ AssertUndefinedOrAllocationSite(ebx);
+ }
+
+ // Jump to the function-specific construct stub.
+ Register jmp_reg = ecx;
+ __ mov(jmp_reg, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
+ __ mov(jmp_reg, FieldOperand(jmp_reg,
+ SharedFunctionInfo::kConstructStubOffset));
+ __ lea(jmp_reg, FieldOperand(jmp_reg, Code::kHeaderSize));
+ __ jmp(jmp_reg);
+
+ // edi: called object
+ // eax: number of arguments
+ // ecx: object map
+ Label do_call;
+ __ bind(&slow);
+ __ CmpInstanceType(ecx, JS_FUNCTION_PROXY_TYPE);
+ __ j(not_equal, &non_function_call);
+ __ GetBuiltinEntry(edx, Builtins::CALL_FUNCTION_PROXY_AS_CONSTRUCTOR);
+ __ jmp(&do_call);
+
+ __ bind(&non_function_call);
+ __ GetBuiltinEntry(edx, Builtins::CALL_NON_FUNCTION_AS_CONSTRUCTOR);
+ __ bind(&do_call);
+ // Set expected number of arguments to zero (not changing eax).
+ __ Move(ebx, Immediate(0));
+ Handle<Code> arguments_adaptor =
+ isolate()->builtins()->ArgumentsAdaptorTrampoline();
+ __ jmp(arguments_adaptor, RelocInfo::CODE_TARGET);
+}
+
+
+static void EmitLoadTypeFeedbackVector(MacroAssembler* masm, Register vector) {
+ __ mov(vector, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
+ __ mov(vector, FieldOperand(vector, JSFunction::kSharedFunctionInfoOffset));
+ __ mov(vector, FieldOperand(vector,
+ SharedFunctionInfo::kFeedbackVectorOffset));
+}
+
+
+void CallIC_ArrayStub::Generate(MacroAssembler* masm) {
+ // edi - function
+ // edx - slot id
+ Label miss;
+ int argc = state_.arg_count();
+ ParameterCount actual(argc);
+
+ EmitLoadTypeFeedbackVector(masm, ebx);
+
+ __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, ecx);
+ __ cmp(edi, ecx);
+ __ j(not_equal, &miss);
+
+ __ mov(eax, arg_count());
+ __ mov(ebx, FieldOperand(ebx, edx, times_half_pointer_size,
+ FixedArray::kHeaderSize));
+ // Verify that ecx contains an AllocationSite
+ __ AssertUndefinedOrAllocationSite(ebx);
+ ArrayConstructorStub stub(masm->isolate(), arg_count());
+ __ TailCallStub(&stub);
+
+ __ bind(&miss);
+ GenerateMiss(masm, IC::kCallIC_Customization_Miss);
+
+ // The slow case, we need this no matter what to complete a call after a miss.
+ CallFunctionNoFeedback(masm,
+ arg_count(),
+ true,
+ CallAsMethod());
+
+ // Unreachable.
+ __ int3();
+}
+
+
+void CallICStub::Generate(MacroAssembler* masm) {
+ // edi - function
+ // edx - slot id
+ Isolate* isolate = masm->isolate();
+ Label extra_checks_or_miss, slow_start;
+ Label slow, non_function, wrap, cont;
+ Label have_js_function;
+ int argc = state_.arg_count();
+ ParameterCount actual(argc);
+
+ EmitLoadTypeFeedbackVector(masm, ebx);
+
+ // The checks. First, does edi match the recorded monomorphic target?
+ __ cmp(edi, FieldOperand(ebx, edx, times_half_pointer_size,
+ FixedArray::kHeaderSize));
+ __ j(not_equal, &extra_checks_or_miss);
+
+ __ bind(&have_js_function);
+ if (state_.CallAsMethod()) {
+ EmitContinueIfStrictOrNative(masm, &cont);
+
+ // Load the receiver from the stack.
+ __ mov(eax, Operand(esp, (argc + 1) * kPointerSize));
+
+ __ JumpIfSmi(eax, &wrap);
+
+ __ CmpObjectType(eax, FIRST_SPEC_OBJECT_TYPE, ecx);
+ __ j(below, &wrap);
+
+ __ bind(&cont);
+ }
+
+ __ InvokeFunction(edi, actual, JUMP_FUNCTION, NullCallWrapper());
+
+ __ bind(&slow);
+ EmitSlowCase(isolate, masm, argc, &non_function);
+
+ if (state_.CallAsMethod()) {
+ __ bind(&wrap);
+ EmitWrapCase(masm, argc, &cont);
+ }
+
+ __ bind(&extra_checks_or_miss);
+ Label miss;
+
+ __ mov(ecx, FieldOperand(ebx, edx, times_half_pointer_size,
+ FixedArray::kHeaderSize));
+ __ cmp(ecx, Immediate(TypeFeedbackInfo::MegamorphicSentinel(isolate)));
+ __ j(equal, &slow_start);
+ __ cmp(ecx, Immediate(TypeFeedbackInfo::UninitializedSentinel(isolate)));
+ __ j(equal, &miss);
+
+ if (!FLAG_trace_ic) {
+ // We are going megamorphic, and we don't want to visit the runtime.
+ __ mov(FieldOperand(ebx, edx, times_half_pointer_size,
+ FixedArray::kHeaderSize),
+ Immediate(TypeFeedbackInfo::MegamorphicSentinel(isolate)));
+ __ jmp(&slow_start);
+ }
+
+ // We are here because tracing is on or we are going monomorphic.
+ __ bind(&miss);
+ GenerateMiss(masm, IC::kCallIC_Miss);
+
+ // the slow case
+ __ bind(&slow_start);
+
+ // Check that the function really is a JavaScript function.
+ __ JumpIfSmi(edi, &non_function);
+
+ // Goto slow case if we do not have a function.
+ __ CmpObjectType(edi, JS_FUNCTION_TYPE, ecx);
+ __ j(not_equal, &slow);
+ __ jmp(&have_js_function);
+
+ // Unreachable
+ __ int3();
+}
+
+
+void CallICStub::GenerateMiss(MacroAssembler* masm, IC::UtilityId id) {
+ // Get the receiver of the function from the stack; 1 ~ return address.
+ __ mov(ecx, Operand(esp, (state_.arg_count() + 1) * kPointerSize));
+
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+
+ // Push the receiver and the function and feedback info.
+ __ push(ecx);
+ __ push(edi);
+ __ push(ebx);
+ __ push(edx);
+
+ // Call the entry.
+ ExternalReference miss = ExternalReference(IC_Utility(id),
+ masm->isolate());
+ __ CallExternalReference(miss, 4);
+
+ // Move result to edi and exit the internal frame.
+ __ mov(edi, eax);
+ }
+}
+
+
+bool CEntryStub::NeedsImmovableCode() {
+ return false;
+}
+
+
+void CodeStub::GenerateStubsAheadOfTime(Isolate* isolate) {
+ CEntryStub::GenerateAheadOfTime(isolate);
+ StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime(isolate);
+ StubFailureTrampolineStub::GenerateAheadOfTime(isolate);
+ // It is important that the store buffer overflow stubs are generated first.
+ ArrayConstructorStubBase::GenerateStubsAheadOfTime(isolate);
+ CreateAllocationSiteStub::GenerateAheadOfTime(isolate);
+ BinaryOpICStub::GenerateAheadOfTime(isolate);
+ BinaryOpICWithAllocationSiteStub::GenerateAheadOfTime(isolate);
+}
+
+
+void CodeStub::GenerateFPStubs(Isolate* isolate) {
+ // Do nothing.
+}
+
+
+void CEntryStub::GenerateAheadOfTime(Isolate* isolate) {
+ CEntryStub stub(isolate, 1);
+ stub.GetCode();
+}
+
+
+void CEntryStub::Generate(MacroAssembler* masm) {
+ // eax: number of arguments including receiver
+ // ebx: pointer to C function (C callee-saved)
+ // ebp: frame pointer (restored after C call)
+ // esp: stack pointer (restored after C call)
+ // esi: current context (C callee-saved)
+ // edi: JS function of the caller (C callee-saved)
+
+ ProfileEntryHookStub::MaybeCallEntryHook(masm);
+
+ // Enter the exit frame that transitions from JavaScript to C++.
+ __ EnterExitFrame();
+
+ // ebx: pointer to C function (C callee-saved)
+ // ebp: frame pointer (restored after C call)
+ // esp: stack pointer (restored after C call)
+ // edi: number of arguments including receiver (C callee-saved)
+ // esi: pointer to the first argument (C callee-saved)
+
+ // Result returned in eax, or eax+edx if result_size_ is 2.
+
+ // Check stack alignment.
+ if (FLAG_debug_code) {
+ __ CheckStackAlignment();
+ }
+
+ // Call C function.
+ __ mov(Operand(esp, 0 * kPointerSize), edi); // argc.
+ __ mov(Operand(esp, 1 * kPointerSize), esi); // argv.
+ __ mov(Operand(esp, 2 * kPointerSize),
+ Immediate(ExternalReference::isolate_address(isolate())));
+ __ call(ebx);
+ // Result is in eax or edx:eax - do not destroy these registers!
+
+ // Runtime functions should not return 'the hole'. Allowing it to escape may
+ // lead to crashes in the IC code later.
+ if (FLAG_debug_code) {
+ Label okay;
+ __ cmp(eax, isolate()->factory()->the_hole_value());
+ __ j(not_equal, &okay, Label::kNear);
+ __ int3();
+ __ bind(&okay);
+ }
+
+ // Check result for exception sentinel.
+ Label exception_returned;
+ __ cmp(eax, isolate()->factory()->exception());
+ __ j(equal, &exception_returned);
+
+ ExternalReference pending_exception_address(
+ Isolate::kPendingExceptionAddress, isolate());
+
+ // Check that there is no pending exception, otherwise we
+ // should have returned the exception sentinel.
+ if (FLAG_debug_code) {
+ __ push(edx);
+ __ mov(edx, Immediate(isolate()->factory()->the_hole_value()));
+ Label okay;
+ __ cmp(edx, Operand::StaticVariable(pending_exception_address));
+ // Cannot use check here as it attempts to generate call into runtime.
+ __ j(equal, &okay, Label::kNear);
+ __ int3();
+ __ bind(&okay);
+ __ pop(edx);
+ }
+
+ // Exit the JavaScript to C++ exit frame.
+ __ LeaveExitFrame();
+ __ ret(0);
+
+ // Handling of exception.
+ __ bind(&exception_returned);
+
+ // Retrieve the pending exception.
+ __ mov(eax, Operand::StaticVariable(pending_exception_address));
+
+ // Clear the pending exception.
+ __ mov(edx, Immediate(isolate()->factory()->the_hole_value()));
+ __ mov(Operand::StaticVariable(pending_exception_address), edx);
+
+ // Special handling of termination exceptions which are uncatchable
+ // by javascript code.
+ Label throw_termination_exception;
+ __ cmp(eax, isolate()->factory()->termination_exception());
+ __ j(equal, &throw_termination_exception);
+
+ // Handle normal exception.
+ __ Throw(eax);
+
+ __ bind(&throw_termination_exception);
+ __ ThrowUncatchable(eax);
+}
+
+
+void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
+ Label invoke, handler_entry, exit;
+ Label not_outermost_js, not_outermost_js_2;
+
+ ProfileEntryHookStub::MaybeCallEntryHook(masm);
+
+ // Set up frame.
+ __ push(ebp);
+ __ mov(ebp, esp);
+
+ // Push marker in two places.
+ int marker = is_construct ? StackFrame::ENTRY_CONSTRUCT : StackFrame::ENTRY;
+ __ push(Immediate(Smi::FromInt(marker))); // context slot
+ __ push(Immediate(Smi::FromInt(marker))); // function slot
+ // Save callee-saved registers (C calling conventions).
+ __ push(edi);
+ __ push(esi);
+ __ push(ebx);
+
+ // Save copies of the top frame descriptor on the stack.
+ ExternalReference c_entry_fp(Isolate::kCEntryFPAddress, isolate());
+ __ push(Operand::StaticVariable(c_entry_fp));
+
+ // If this is the outermost JS call, set js_entry_sp value.
+ ExternalReference js_entry_sp(Isolate::kJSEntrySPAddress, isolate());
+ __ cmp(Operand::StaticVariable(js_entry_sp), Immediate(0));
+ __ j(not_equal, &not_outermost_js, Label::kNear);
+ __ mov(Operand::StaticVariable(js_entry_sp), ebp);
+ __ push(Immediate(Smi::FromInt(StackFrame::OUTERMOST_JSENTRY_FRAME)));
+ __ jmp(&invoke, Label::kNear);
+ __ bind(&not_outermost_js);
+ __ push(Immediate(Smi::FromInt(StackFrame::INNER_JSENTRY_FRAME)));
+
+ // Jump to a faked try block that does the invoke, with a faked catch
+ // block that sets the pending exception.
+ __ jmp(&invoke);
+ __ bind(&handler_entry);
+ handler_offset_ = handler_entry.pos();
+ // Caught exception: Store result (exception) in the pending exception
+ // field in the JSEnv and return a failure sentinel.
+ ExternalReference pending_exception(Isolate::kPendingExceptionAddress,
+ isolate());
+ __ mov(Operand::StaticVariable(pending_exception), eax);
+ __ mov(eax, Immediate(isolate()->factory()->exception()));
+ __ jmp(&exit);
+
+ // Invoke: Link this frame into the handler chain. There's only one
+ // handler block in this code object, so its index is 0.
+ __ bind(&invoke);
+ __ PushTryHandler(StackHandler::JS_ENTRY, 0);
+
+ // Clear any pending exceptions.
+ __ mov(edx, Immediate(isolate()->factory()->the_hole_value()));
+ __ mov(Operand::StaticVariable(pending_exception), edx);
+
+ // Fake a receiver (NULL).
+ __ push(Immediate(0)); // receiver
+
+ // Invoke the function by calling through JS entry trampoline builtin and
+ // pop the faked function when we return. Notice that we cannot store a
+ // reference to the trampoline code directly in this stub, because the
+ // builtin stubs may not have been generated yet.
+ if (is_construct) {
+ ExternalReference construct_entry(Builtins::kJSConstructEntryTrampoline,
+ isolate());
+ __ mov(edx, Immediate(construct_entry));
+ } else {
+ ExternalReference entry(Builtins::kJSEntryTrampoline, isolate());
+ __ mov(edx, Immediate(entry));
+ }
+ __ mov(edx, Operand(edx, 0)); // deref address
+ __ lea(edx, FieldOperand(edx, Code::kHeaderSize));
+ __ call(edx);
+
+ // Unlink this frame from the handler chain.
+ __ PopTryHandler();
+
+ __ bind(&exit);
+ // Check if the current stack frame is marked as the outermost JS frame.
+ __ pop(ebx);
+ __ cmp(ebx, Immediate(Smi::FromInt(StackFrame::OUTERMOST_JSENTRY_FRAME)));
+ __ j(not_equal, &not_outermost_js_2);
+ __ mov(Operand::StaticVariable(js_entry_sp), Immediate(0));
+ __ bind(&not_outermost_js_2);
+
+ // Restore the top frame descriptor from the stack.
+ __ pop(Operand::StaticVariable(ExternalReference(
+ Isolate::kCEntryFPAddress, isolate())));
+
+ // Restore callee-saved registers (C calling conventions).
+ __ pop(ebx);
+ __ pop(esi);
+ __ pop(edi);
+ __ add(esp, Immediate(2 * kPointerSize)); // remove markers
+
+ // Restore frame pointer and return.
+ __ pop(ebp);
+ __ ret(0);
+}
+
+
+// Generate stub code for instanceof.
+// This code can patch a call site inlined cache of the instance of check,
+// which looks like this.
+//
+// 81 ff XX XX XX XX cmp edi, <the hole, patched to a map>
+// 75 0a jne <some near label>
+// b8 XX XX XX XX mov eax, <the hole, patched to either true or false>
+//
+// If call site patching is requested the stack will have the delta from the
+// return address to the cmp instruction just below the return address. This
+// also means that call site patching can only take place with arguments in
+// registers. TOS looks like this when call site patching is requested
+//
+// esp[0] : return address
+// esp[4] : delta from return address to cmp instruction
+//
+void InstanceofStub::Generate(MacroAssembler* masm) {
+ // Call site inlining and patching implies arguments in registers.
+ ASSERT(HasArgsInRegisters() || !HasCallSiteInlineCheck());
+
+ // Fixed register usage throughout the stub.
+ Register object = eax; // Object (lhs).
+ Register map = ebx; // Map of the object.
+ Register function = edx; // Function (rhs).
+ Register prototype = edi; // Prototype of the function.
+ Register scratch = ecx;
+
+ // Constants describing the call site code to patch.
+ static const int kDeltaToCmpImmediate = 2;
+ static const int kDeltaToMov = 8;
+ static const int kDeltaToMovImmediate = 9;
+ static const int8_t kCmpEdiOperandByte1 = BitCast<int8_t, uint8_t>(0x3b);
+ static const int8_t kCmpEdiOperandByte2 = BitCast<int8_t, uint8_t>(0x3d);
+ static const int8_t kMovEaxImmediateByte = BitCast<int8_t, uint8_t>(0xb8);
+
+ ASSERT_EQ(object.code(), InstanceofStub::left().code());
+ ASSERT_EQ(function.code(), InstanceofStub::right().code());
+
+ // Get the object and function - they are always both needed.
+ Label slow, not_js_object;
+ if (!HasArgsInRegisters()) {
+ __ mov(object, Operand(esp, 2 * kPointerSize));
+ __ mov(function, Operand(esp, 1 * kPointerSize));
+ }
+
+ // Check that the left hand is a JS object.
+ __ JumpIfSmi(object, &not_js_object);
+ __ IsObjectJSObjectType(object, map, scratch, &not_js_object);
+
+ // If there is a call site cache don't look in the global cache, but do the
+ // real lookup and update the call site cache.
+ if (!HasCallSiteInlineCheck()) {
+ // Look up the function and the map in the instanceof cache.
+ Label miss;
+ __ CompareRoot(function, scratch, Heap::kInstanceofCacheFunctionRootIndex);
+ __ j(not_equal, &miss, Label::kNear);
+ __ CompareRoot(map, scratch, Heap::kInstanceofCacheMapRootIndex);
+ __ j(not_equal, &miss, Label::kNear);
+ __ LoadRoot(eax, Heap::kInstanceofCacheAnswerRootIndex);
+ __ ret((HasArgsInRegisters() ? 0 : 2) * kPointerSize);
+ __ bind(&miss);
+ }
+
+ // Get the prototype of the function.
+ __ TryGetFunctionPrototype(function, prototype, scratch, &slow, true);
+
+ // Check that the function prototype is a JS object.
+ __ JumpIfSmi(prototype, &slow);
+ __ IsObjectJSObjectType(prototype, scratch, scratch, &slow);
+
+ // Update the global instanceof or call site inlined cache with the current
+ // map and function. The cached answer will be set when it is known below.
+ if (!HasCallSiteInlineCheck()) {
+ __ StoreRoot(map, scratch, Heap::kInstanceofCacheMapRootIndex);
+ __ StoreRoot(function, scratch, Heap::kInstanceofCacheFunctionRootIndex);
+ } else {
+ // The constants for the code patching are based on no push instructions
+ // at the call site.
+ ASSERT(HasArgsInRegisters());
+ // Get return address and delta to inlined map check.
+ __ mov(scratch, Operand(esp, 0 * kPointerSize));
+ __ sub(scratch, Operand(esp, 1 * kPointerSize));
+ if (FLAG_debug_code) {
+ __ cmpb(Operand(scratch, 0), kCmpEdiOperandByte1);
+ __ Assert(equal, kInstanceofStubUnexpectedCallSiteCacheCmp1);
+ __ cmpb(Operand(scratch, 1), kCmpEdiOperandByte2);
+ __ Assert(equal, kInstanceofStubUnexpectedCallSiteCacheCmp2);
+ }
+ __ mov(scratch, Operand(scratch, kDeltaToCmpImmediate));
+ __ mov(Operand(scratch, 0), map);
+ }
+
+ // Loop through the prototype chain of the object looking for the function
+ // prototype.
+ __ mov(scratch, FieldOperand(map, Map::kPrototypeOffset));
+ Label loop, is_instance, is_not_instance;
+ __ bind(&loop);
+ __ cmp(scratch, prototype);
+ __ j(equal, &is_instance, Label::kNear);
+ Factory* factory = isolate()->factory();
+ __ cmp(scratch, Immediate(factory->null_value()));
+ __ j(equal, &is_not_instance, Label::kNear);
+ __ mov(scratch, FieldOperand(scratch, HeapObject::kMapOffset));
+ __ mov(scratch, FieldOperand(scratch, Map::kPrototypeOffset));
+ __ jmp(&loop);
+
+ __ bind(&is_instance);
+ if (!HasCallSiteInlineCheck()) {
+ __ mov(eax, Immediate(0));
+ __ StoreRoot(eax, scratch, Heap::kInstanceofCacheAnswerRootIndex);
+ } else {
+ // Get return address and delta to inlined map check.
+ __ mov(eax, factory->true_value());
+ __ mov(scratch, Operand(esp, 0 * kPointerSize));
+ __ sub(scratch, Operand(esp, 1 * kPointerSize));
+ if (FLAG_debug_code) {
+ __ cmpb(Operand(scratch, kDeltaToMov), kMovEaxImmediateByte);
+ __ Assert(equal, kInstanceofStubUnexpectedCallSiteCacheMov);
+ }
+ __ mov(Operand(scratch, kDeltaToMovImmediate), eax);
+ if (!ReturnTrueFalseObject()) {
+ __ Move(eax, Immediate(0));
+ }
+ }
+ __ ret((HasArgsInRegisters() ? 0 : 2) * kPointerSize);
+
+ __ bind(&is_not_instance);
+ if (!HasCallSiteInlineCheck()) {
+ __ mov(eax, Immediate(Smi::FromInt(1)));
+ __ StoreRoot(eax, scratch, Heap::kInstanceofCacheAnswerRootIndex);
+ } else {
+ // Get return address and delta to inlined map check.
+ __ mov(eax, factory->false_value());
+ __ mov(scratch, Operand(esp, 0 * kPointerSize));
+ __ sub(scratch, Operand(esp, 1 * kPointerSize));
+ if (FLAG_debug_code) {
+ __ cmpb(Operand(scratch, kDeltaToMov), kMovEaxImmediateByte);
+ __ Assert(equal, kInstanceofStubUnexpectedCallSiteCacheMov);
+ }
+ __ mov(Operand(scratch, kDeltaToMovImmediate), eax);
+ if (!ReturnTrueFalseObject()) {
+ __ Move(eax, Immediate(Smi::FromInt(1)));
+ }
+ }
+ __ ret((HasArgsInRegisters() ? 0 : 2) * kPointerSize);
+
+ Label object_not_null, object_not_null_or_smi;
+ __ bind(&not_js_object);
+ // Before null, smi and string value checks, check that the rhs is a function
+ // as for a non-function rhs an exception needs to be thrown.
+ __ JumpIfSmi(function, &slow, Label::kNear);
+ __ CmpObjectType(function, JS_FUNCTION_TYPE, scratch);
+ __ j(not_equal, &slow, Label::kNear);
+
+ // Null is not instance of anything.
+ __ cmp(object, factory->null_value());
+ __ j(not_equal, &object_not_null, Label::kNear);
+ __ Move(eax, Immediate(Smi::FromInt(1)));
+ __ ret((HasArgsInRegisters() ? 0 : 2) * kPointerSize);
+
+ __ bind(&object_not_null);
+ // Smi values is not instance of anything.
+ __ JumpIfNotSmi(object, &object_not_null_or_smi, Label::kNear);
+ __ Move(eax, Immediate(Smi::FromInt(1)));
+ __ ret((HasArgsInRegisters() ? 0 : 2) * kPointerSize);
+
+ __ bind(&object_not_null_or_smi);
+ // String values is not instance of anything.
+ Condition is_string = masm->IsObjectStringType(object, scratch, scratch);
+ __ j(NegateCondition(is_string), &slow, Label::kNear);
+ __ Move(eax, Immediate(Smi::FromInt(1)));
+ __ ret((HasArgsInRegisters() ? 0 : 2) * kPointerSize);
+
+ // Slow-case: Go through the JavaScript implementation.
+ __ bind(&slow);
+ if (!ReturnTrueFalseObject()) {
+ // Tail call the builtin which returns 0 or 1.
+ if (HasArgsInRegisters()) {
+ // Push arguments below return address.
+ __ pop(scratch);
+ __ push(object);
+ __ push(function);
+ __ push(scratch);
+ }
+ __ InvokeBuiltin(Builtins::INSTANCE_OF, JUMP_FUNCTION);
+ } else {
+ // Call the builtin and convert 0/1 to true/false.
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ push(object);
+ __ push(function);
+ __ InvokeBuiltin(Builtins::INSTANCE_OF, CALL_FUNCTION);
+ }
+ Label true_value, done;
+ __ test(eax, eax);
+ __ j(zero, &true_value, Label::kNear);
+ __ mov(eax, factory->false_value());
+ __ jmp(&done, Label::kNear);
+ __ bind(&true_value);
+ __ mov(eax, factory->true_value());
+ __ bind(&done);
+ __ ret((HasArgsInRegisters() ? 0 : 2) * kPointerSize);
+ }
+}
+
+
+Register InstanceofStub::left() { return eax; }
+
+
+Register InstanceofStub::right() { return edx; }
+
+
+// -------------------------------------------------------------------------
+// StringCharCodeAtGenerator
+
+void StringCharCodeAtGenerator::GenerateFast(MacroAssembler* masm) {
+ // If the receiver is a smi trigger the non-string case.
+ STATIC_ASSERT(kSmiTag == 0);
+ __ JumpIfSmi(object_, receiver_not_string_);
+
+ // Fetch the instance type of the receiver into result register.
+ __ mov(result_, FieldOperand(object_, HeapObject::kMapOffset));
+ __ movzx_b(result_, FieldOperand(result_, Map::kInstanceTypeOffset));
+ // If the receiver is not a string trigger the non-string case.
+ __ test(result_, Immediate(kIsNotStringMask));
+ __ j(not_zero, receiver_not_string_);
+
+ // If the index is non-smi trigger the non-smi case.
+ STATIC_ASSERT(kSmiTag == 0);
+ __ JumpIfNotSmi(index_, &index_not_smi_);
+ __ bind(&got_smi_index_);
+
+ // Check for index out of range.
+ __ cmp(index_, FieldOperand(object_, String::kLengthOffset));
+ __ j(above_equal, index_out_of_range_);
+
+ __ SmiUntag(index_);
+
+ Factory* factory = masm->isolate()->factory();
+ StringCharLoadGenerator::Generate(
+ masm, factory, object_, index_, result_, &call_runtime_);
+
+ __ SmiTag(result_);
+ __ bind(&exit_);
+}
+
+
+void StringCharCodeAtGenerator::GenerateSlow(
+ MacroAssembler* masm,
+ const RuntimeCallHelper& call_helper) {
+ __ Abort(kUnexpectedFallthroughToCharCodeAtSlowCase);
+
+ // Index is not a smi.
+ __ bind(&index_not_smi_);
+ // If index is a heap number, try converting it to an integer.
+ __ CheckMap(index_,
+ masm->isolate()->factory()->heap_number_map(),
+ index_not_number_,
+ DONT_DO_SMI_CHECK);
+ call_helper.BeforeCall(masm);
+ __ push(object_);
+ __ push(index_); // Consumed by runtime conversion function.
+ if (index_flags_ == STRING_INDEX_IS_NUMBER) {
+ __ CallRuntime(Runtime::kNumberToIntegerMapMinusZero, 1);
+ } else {
+ ASSERT(index_flags_ == STRING_INDEX_IS_ARRAY_INDEX);
+ // NumberToSmi discards numbers that are not exact integers.
+ __ CallRuntime(Runtime::kHiddenNumberToSmi, 1);
+ }
+ if (!index_.is(eax)) {
+ // Save the conversion result before the pop instructions below
+ // have a chance to overwrite it.
+ __ mov(index_, eax);
+ }
+ __ pop(object_);
+ // Reload the instance type.
+ __ mov(result_, FieldOperand(object_, HeapObject::kMapOffset));
+ __ movzx_b(result_, FieldOperand(result_, Map::kInstanceTypeOffset));
+ call_helper.AfterCall(masm);
+ // If index is still not a smi, it must be out of range.
+ STATIC_ASSERT(kSmiTag == 0);
+ __ JumpIfNotSmi(index_, index_out_of_range_);
+ // Otherwise, return to the fast path.
+ __ jmp(&got_smi_index_);
+
+ // Call runtime. We get here when the receiver is a string and the
+ // index is a number, but the code of getting the actual character
+ // is too complex (e.g., when the string needs to be flattened).
+ __ bind(&call_runtime_);
+ call_helper.BeforeCall(masm);
+ __ push(object_);
+ __ SmiTag(index_);
+ __ push(index_);
+ __ CallRuntime(Runtime::kHiddenStringCharCodeAt, 2);
+ if (!result_.is(eax)) {
+ __ mov(result_, eax);
+ }
+ call_helper.AfterCall(masm);
+ __ jmp(&exit_);
+
+ __ Abort(kUnexpectedFallthroughFromCharCodeAtSlowCase);
+}
+
+
+// -------------------------------------------------------------------------
+// StringCharFromCodeGenerator
+
+void StringCharFromCodeGenerator::GenerateFast(MacroAssembler* masm) {
+ // Fast case of Heap::LookupSingleCharacterStringFromCode.
+ STATIC_ASSERT(kSmiTag == 0);
+ STATIC_ASSERT(kSmiShiftSize == 0);
+ ASSERT(IsPowerOf2(String::kMaxOneByteCharCode + 1));
+ __ test(code_,
+ Immediate(kSmiTagMask |
+ ((~String::kMaxOneByteCharCode) << kSmiTagSize)));
+ __ j(not_zero, &slow_case_);
+
+ Factory* factory = masm->isolate()->factory();
+ __ Move(result_, Immediate(factory->single_character_string_cache()));
+ STATIC_ASSERT(kSmiTag == 0);
+ STATIC_ASSERT(kSmiTagSize == 1);
+ STATIC_ASSERT(kSmiShiftSize == 0);
+ // At this point code register contains smi tagged ASCII char code.
+ __ mov(result_, FieldOperand(result_,
+ code_, times_half_pointer_size,
+ FixedArray::kHeaderSize));
+ __ cmp(result_, factory->undefined_value());
+ __ j(equal, &slow_case_);
+ __ bind(&exit_);
+}
+
+
+void StringCharFromCodeGenerator::GenerateSlow(
+ MacroAssembler* masm,
+ const RuntimeCallHelper& call_helper) {
+ __ Abort(kUnexpectedFallthroughToCharFromCodeSlowCase);
+
+ __ bind(&slow_case_);
+ call_helper.BeforeCall(masm);
+ __ push(code_);
+ __ CallRuntime(Runtime::kCharFromCode, 1);
+ if (!result_.is(eax)) {
+ __ mov(result_, eax);
+ }
+ call_helper.AfterCall(masm);
+ __ jmp(&exit_);
+
+ __ Abort(kUnexpectedFallthroughFromCharFromCodeSlowCase);
+}
+
+
+void StringHelper::GenerateCopyCharacters(MacroAssembler* masm,
+ Register dest,
+ Register src,
+ Register count,
+ Register scratch,
+ String::Encoding encoding) {
+ ASSERT(!scratch.is(dest));
+ ASSERT(!scratch.is(src));
+ ASSERT(!scratch.is(count));
+
+ // Nothing to do for zero characters.
+ Label done;
+ __ test(count, count);
+ __ j(zero, &done);
+
+ // Make count the number of bytes to copy.
+ if (encoding == String::TWO_BYTE_ENCODING) {
+ __ shl(count, 1);
+ }
+
+ Label loop;
+ __ bind(&loop);
+ __ mov_b(scratch, Operand(src, 0));
+ __ mov_b(Operand(dest, 0), scratch);
+ __ inc(src);
+ __ inc(dest);
+ __ dec(count);
+ __ j(not_zero, &loop);
+
+ __ bind(&done);
+}
+
+
+void StringHelper::GenerateHashInit(MacroAssembler* masm,
+ Register hash,
+ Register character,
+ Register scratch) {
+ // hash = (seed + character) + ((seed + character) << 10);
+ if (masm->serializer_enabled()) {
+ __ LoadRoot(scratch, Heap::kHashSeedRootIndex);
+ __ SmiUntag(scratch);
+ __ add(scratch, character);
+ __ mov(hash, scratch);
+ __ shl(scratch, 10);
+ __ add(hash, scratch);
+ } else {
+ int32_t seed = masm->isolate()->heap()->HashSeed();
+ __ lea(scratch, Operand(character, seed));
+ __ shl(scratch, 10);
+ __ lea(hash, Operand(scratch, character, times_1, seed));
+ }
+ // hash ^= hash >> 6;
+ __ mov(scratch, hash);
+ __ shr(scratch, 6);
+ __ xor_(hash, scratch);
+}
+
+
+void StringHelper::GenerateHashAddCharacter(MacroAssembler* masm,
+ Register hash,
+ Register character,
+ Register scratch) {
+ // hash += character;
+ __ add(hash, character);
+ // hash += hash << 10;
+ __ mov(scratch, hash);
+ __ shl(scratch, 10);
+ __ add(hash, scratch);
+ // hash ^= hash >> 6;
+ __ mov(scratch, hash);
+ __ shr(scratch, 6);
+ __ xor_(hash, scratch);
+}
+
+
+void StringHelper::GenerateHashGetHash(MacroAssembler* masm,
+ Register hash,
+ Register scratch) {
+ // hash += hash << 3;
+ __ mov(scratch, hash);
+ __ shl(scratch, 3);
+ __ add(hash, scratch);
+ // hash ^= hash >> 11;
+ __ mov(scratch, hash);
+ __ shr(scratch, 11);
+ __ xor_(hash, scratch);
+ // hash += hash << 15;
+ __ mov(scratch, hash);
+ __ shl(scratch, 15);
+ __ add(hash, scratch);
+
+ __ and_(hash, String::kHashBitMask);
+
+ // if (hash == 0) hash = 27;
+ Label hash_not_zero;
+ __ j(not_zero, &hash_not_zero, Label::kNear);
+ __ mov(hash, Immediate(StringHasher::kZeroHash));
+ __ bind(&hash_not_zero);
+}
+
+
+void SubStringStub::Generate(MacroAssembler* masm) {
+ Label runtime;
+
+ // Stack frame on entry.
+ // esp[0]: return address
+ // esp[4]: to
+ // esp[8]: from
+ // esp[12]: string
+
+ // Make sure first argument is a string.
+ __ mov(eax, Operand(esp, 3 * kPointerSize));
+ STATIC_ASSERT(kSmiTag == 0);
+ __ JumpIfSmi(eax, &runtime);
+ Condition is_string = masm->IsObjectStringType(eax, ebx, ebx);
+ __ j(NegateCondition(is_string), &runtime);
+
+ // eax: string
+ // ebx: instance type
+
+ // Calculate length of sub string using the smi values.
+ __ mov(ecx, Operand(esp, 1 * kPointerSize)); // To index.
+ __ JumpIfNotSmi(ecx, &runtime);
+ __ mov(edx, Operand(esp, 2 * kPointerSize)); // From index.
+ __ JumpIfNotSmi(edx, &runtime);
+ __ sub(ecx, edx);
+ __ cmp(ecx, FieldOperand(eax, String::kLengthOffset));
+ Label not_original_string;
+ // Shorter than original string's length: an actual substring.
+ __ j(below, &not_original_string, Label::kNear);
+ // Longer than original string's length or negative: unsafe arguments.
+ __ j(above, &runtime);
+ // Return original string.
+ Counters* counters = isolate()->counters();
+ __ IncrementCounter(counters->sub_string_native(), 1);
+ __ ret(3 * kPointerSize);
+ __ bind(&not_original_string);
+
+ Label single_char;
+ __ cmp(ecx, Immediate(Smi::FromInt(1)));
+ __ j(equal, &single_char);
+
+ // eax: string
+ // ebx: instance type
+ // ecx: sub string length (smi)
+ // edx: from index (smi)
+ // Deal with different string types: update the index if necessary
+ // and put the underlying string into edi.
+ Label underlying_unpacked, sliced_string, seq_or_external_string;
+ // If the string is not indirect, it can only be sequential or external.
+ STATIC_ASSERT(kIsIndirectStringMask == (kSlicedStringTag & kConsStringTag));
+ STATIC_ASSERT(kIsIndirectStringMask != 0);
+ __ test(ebx, Immediate(kIsIndirectStringMask));
+ __ j(zero, &seq_or_external_string, Label::kNear);
+
+ Factory* factory = isolate()->factory();
+ __ test(ebx, Immediate(kSlicedNotConsMask));
+ __ j(not_zero, &sliced_string, Label::kNear);
+ // Cons string. Check whether it is flat, then fetch first part.
+ // Flat cons strings have an empty second part.
+ __ cmp(FieldOperand(eax, ConsString::kSecondOffset),
+ factory->empty_string());
+ __ j(not_equal, &runtime);
+ __ mov(edi, FieldOperand(eax, ConsString::kFirstOffset));
+ // Update instance type.
+ __ mov(ebx, FieldOperand(edi, HeapObject::kMapOffset));
+ __ movzx_b(ebx, FieldOperand(ebx, Map::kInstanceTypeOffset));
+ __ jmp(&underlying_unpacked, Label::kNear);
+
+ __ bind(&sliced_string);
+ // Sliced string. Fetch parent and adjust start index by offset.
+ __ add(edx, FieldOperand(eax, SlicedString::kOffsetOffset));
+ __ mov(edi, FieldOperand(eax, SlicedString::kParentOffset));
+ // Update instance type.
+ __ mov(ebx, FieldOperand(edi, HeapObject::kMapOffset));
+ __ movzx_b(ebx, FieldOperand(ebx, Map::kInstanceTypeOffset));
+ __ jmp(&underlying_unpacked, Label::kNear);
+
+ __ bind(&seq_or_external_string);
+ // Sequential or external string. Just move string to the expected register.
+ __ mov(edi, eax);
+
+ __ bind(&underlying_unpacked);
+
+ if (FLAG_string_slices) {
+ Label copy_routine;
+ // edi: underlying subject string
+ // ebx: instance type of underlying subject string
+ // edx: adjusted start index (smi)
+ // ecx: length (smi)
+ __ cmp(ecx, Immediate(Smi::FromInt(SlicedString::kMinLength)));
+ // Short slice. Copy instead of slicing.
+ __ j(less, &copy_routine);
+ // Allocate new sliced string. At this point we do not reload the instance
+ // type including the string encoding because we simply rely on the info
+ // provided by the original string. It does not matter if the original
+ // string's encoding is wrong because we always have to recheck encoding of
+ // the newly created string's parent anyways due to externalized strings.
+ Label two_byte_slice, set_slice_header;
+ STATIC_ASSERT((kStringEncodingMask & kOneByteStringTag) != 0);
+ STATIC_ASSERT((kStringEncodingMask & kTwoByteStringTag) == 0);
+ __ test(ebx, Immediate(kStringEncodingMask));
+ __ j(zero, &two_byte_slice, Label::kNear);
+ __ AllocateAsciiSlicedString(eax, ebx, no_reg, &runtime);
+ __ jmp(&set_slice_header, Label::kNear);
+ __ bind(&two_byte_slice);
+ __ AllocateTwoByteSlicedString(eax, ebx, no_reg, &runtime);
+ __ bind(&set_slice_header);
+ __ mov(FieldOperand(eax, SlicedString::kLengthOffset), ecx);
+ __ mov(FieldOperand(eax, SlicedString::kHashFieldOffset),
+ Immediate(String::kEmptyHashField));
+ __ mov(FieldOperand(eax, SlicedString::kParentOffset), edi);
+ __ mov(FieldOperand(eax, SlicedString::kOffsetOffset), edx);
+ __ IncrementCounter(counters->sub_string_native(), 1);
+ __ ret(3 * kPointerSize);
+
+ __ bind(&copy_routine);
+ }
+
+ // edi: underlying subject string
+ // ebx: instance type of underlying subject string
+ // edx: adjusted start index (smi)
+ // ecx: length (smi)
+ // The subject string can only be external or sequential string of either
+ // encoding at this point.
+ Label two_byte_sequential, runtime_drop_two, sequential_string;
+ STATIC_ASSERT(kExternalStringTag != 0);
+ STATIC_ASSERT(kSeqStringTag == 0);
+ __ test_b(ebx, kExternalStringTag);
+ __ j(zero, &sequential_string);
+
+ // Handle external string.
+ // Rule out short external strings.
+ STATIC_ASSERT(kShortExternalStringTag != 0);
+ __ test_b(ebx, kShortExternalStringMask);
+ __ j(not_zero, &runtime);
+ __ mov(edi, FieldOperand(edi, ExternalString::kResourceDataOffset));
+ // Move the pointer so that offset-wise, it looks like a sequential string.
+ STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqOneByteString::kHeaderSize);
+ __ sub(edi, Immediate(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
+
+ __ bind(&sequential_string);
+ // Stash away (adjusted) index and (underlying) string.
+ __ push(edx);
+ __ push(edi);
+ __ SmiUntag(ecx);
+ STATIC_ASSERT((kOneByteStringTag & kStringEncodingMask) != 0);
+ __ test_b(ebx, kStringEncodingMask);
+ __ j(zero, &two_byte_sequential);
+
+ // Sequential ASCII string. Allocate the result.
+ __ AllocateAsciiString(eax, ecx, ebx, edx, edi, &runtime_drop_two);
+
+ // eax: result string
+ // ecx: result string length
+ // Locate first character of result.
+ __ mov(edi, eax);
+ __ add(edi, Immediate(SeqOneByteString::kHeaderSize - kHeapObjectTag));
+ // Load string argument and locate character of sub string start.
+ __ pop(edx);
+ __ pop(ebx);
+ __ SmiUntag(ebx);
+ __ lea(edx, FieldOperand(edx, ebx, times_1, SeqOneByteString::kHeaderSize));
+
+ // eax: result string
+ // ecx: result length
+ // edi: first character of result
+ // edx: character of sub string start
+ StringHelper::GenerateCopyCharacters(
+ masm, edi, edx, ecx, ebx, String::ONE_BYTE_ENCODING);
+ __ IncrementCounter(counters->sub_string_native(), 1);
+ __ ret(3 * kPointerSize);
+
+ __ bind(&two_byte_sequential);
+ // Sequential two-byte string. Allocate the result.
+ __ AllocateTwoByteString(eax, ecx, ebx, edx, edi, &runtime_drop_two);
+
+ // eax: result string
+ // ecx: result string length
+ // Locate first character of result.
+ __ mov(edi, eax);
+ __ add(edi,
+ Immediate(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
+ // Load string argument and locate character of sub string start.
+ __ pop(edx);
+ __ pop(ebx);
+ // As from is a smi it is 2 times the value which matches the size of a two
+ // byte character.
+ STATIC_ASSERT(kSmiTag == 0);
+ STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1);
+ __ lea(edx, FieldOperand(edx, ebx, times_1, SeqTwoByteString::kHeaderSize));
+
+ // eax: result string
+ // ecx: result length
+ // edi: first character of result
+ // edx: character of sub string start
+ StringHelper::GenerateCopyCharacters(
+ masm, edi, edx, ecx, ebx, String::TWO_BYTE_ENCODING);
+ __ IncrementCounter(counters->sub_string_native(), 1);
+ __ ret(3 * kPointerSize);
+
+ // Drop pushed values on the stack before tail call.
+ __ bind(&runtime_drop_two);
+ __ Drop(2);
+
+ // Just jump to runtime to create the sub string.
+ __ bind(&runtime);
+ __ TailCallRuntime(Runtime::kHiddenSubString, 3, 1);
+
+ __ bind(&single_char);
+ // eax: string
+ // ebx: instance type
+ // ecx: sub string length (smi)
+ // edx: from index (smi)
+ StringCharAtGenerator generator(
+ eax, edx, ecx, eax, &runtime, &runtime, &runtime, STRING_INDEX_IS_NUMBER);
+ generator.GenerateFast(masm);
+ __ ret(3 * kPointerSize);
+ generator.SkipSlow(masm, &runtime);
+}
+
+
+void StringCompareStub::GenerateFlatAsciiStringEquals(MacroAssembler* masm,
+ Register left,
+ Register right,
+ Register scratch1,
+ Register scratch2) {
+ Register length = scratch1;
+
+ // Compare lengths.
+ Label strings_not_equal, check_zero_length;
+ __ mov(length, FieldOperand(left, String::kLengthOffset));
+ __ cmp(length, FieldOperand(right, String::kLengthOffset));
+ __ j(equal, &check_zero_length, Label::kNear);
+ __ bind(&strings_not_equal);
+ __ Move(eax, Immediate(Smi::FromInt(NOT_EQUAL)));
+ __ ret(0);
+
+ // Check if the length is zero.
+ Label compare_chars;
+ __ bind(&check_zero_length);
+ STATIC_ASSERT(kSmiTag == 0);
+ __ test(length, length);
+ __ j(not_zero, &compare_chars, Label::kNear);
+ __ Move(eax, Immediate(Smi::FromInt(EQUAL)));
+ __ ret(0);
+
+ // Compare characters.
+ __ bind(&compare_chars);
+ GenerateAsciiCharsCompareLoop(masm, left, right, length, scratch2,
+ &strings_not_equal, Label::kNear);
+
+ // Characters are equal.
+ __ Move(eax, Immediate(Smi::FromInt(EQUAL)));
+ __ ret(0);
+}
+
+
+void StringCompareStub::GenerateCompareFlatAsciiStrings(MacroAssembler* masm,
+ Register left,
+ Register right,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3) {
+ Counters* counters = masm->isolate()->counters();
+ __ IncrementCounter(counters->string_compare_native(), 1);
+
+ // Find minimum length.
+ Label left_shorter;
+ __ mov(scratch1, FieldOperand(left, String::kLengthOffset));
+ __ mov(scratch3, scratch1);
+ __ sub(scratch3, FieldOperand(right, String::kLengthOffset));
+
+ Register length_delta = scratch3;
+
+ __ j(less_equal, &left_shorter, Label::kNear);
+ // Right string is shorter. Change scratch1 to be length of right string.
+ __ sub(scratch1, length_delta);
+ __ bind(&left_shorter);
+
+ Register min_length = scratch1;
+
+ // If either length is zero, just compare lengths.
+ Label compare_lengths;
+ __ test(min_length, min_length);
+ __ j(zero, &compare_lengths, Label::kNear);
+
+ // Compare characters.
+ Label result_not_equal;
+ GenerateAsciiCharsCompareLoop(masm, left, right, min_length, scratch2,
+ &result_not_equal, Label::kNear);
+
+ // Compare lengths - strings up to min-length are equal.
+ __ bind(&compare_lengths);
+ __ test(length_delta, length_delta);
+ Label length_not_equal;
+ __ j(not_zero, &length_not_equal, Label::kNear);
+
+ // Result is EQUAL.
+ STATIC_ASSERT(EQUAL == 0);
+ STATIC_ASSERT(kSmiTag == 0);
+ __ Move(eax, Immediate(Smi::FromInt(EQUAL)));
+ __ ret(0);
+
+ Label result_greater;
+ Label result_less;
+ __ bind(&length_not_equal);
+ __ j(greater, &result_greater, Label::kNear);
+ __ jmp(&result_less, Label::kNear);
+ __ bind(&result_not_equal);
+ __ j(above, &result_greater, Label::kNear);
+ __ bind(&result_less);
+
+ // Result is LESS.
+ __ Move(eax, Immediate(Smi::FromInt(LESS)));
+ __ ret(0);
+
+ // Result is GREATER.
+ __ bind(&result_greater);
+ __ Move(eax, Immediate(Smi::FromInt(GREATER)));
+ __ ret(0);
+}
+
+
+void StringCompareStub::GenerateAsciiCharsCompareLoop(
+ MacroAssembler* masm,
+ Register left,
+ Register right,
+ Register length,
+ Register scratch,
+ Label* chars_not_equal,
+ Label::Distance chars_not_equal_near) {
+ // Change index to run from -length to -1 by adding length to string
+ // start. This means that loop ends when index reaches zero, which
+ // doesn't need an additional compare.
+ __ SmiUntag(length);
+ __ lea(left,
+ FieldOperand(left, length, times_1, SeqOneByteString::kHeaderSize));
+ __ lea(right,
+ FieldOperand(right, length, times_1, SeqOneByteString::kHeaderSize));
+ __ neg(length);
+ Register index = length; // index = -length;
+
+ // Compare loop.
+ Label loop;
+ __ bind(&loop);
+ __ mov_b(scratch, Operand(left, index, times_1, 0));
+ __ cmpb(scratch, Operand(right, index, times_1, 0));
+ __ j(not_equal, chars_not_equal, chars_not_equal_near);
+ __ inc(index);
+ __ j(not_zero, &loop);
+}
+
+
+void StringCompareStub::Generate(MacroAssembler* masm) {
+ Label runtime;
+
+ // Stack frame on entry.
+ // esp[0]: return address
+ // esp[4]: right string
+ // esp[8]: left string
+
+ __ mov(edx, Operand(esp, 2 * kPointerSize)); // left
+ __ mov(eax, Operand(esp, 1 * kPointerSize)); // right
+
+ Label not_same;
+ __ cmp(edx, eax);
+ __ j(not_equal, &not_same, Label::kNear);
+ STATIC_ASSERT(EQUAL == 0);
+ STATIC_ASSERT(kSmiTag == 0);
+ __ Move(eax, Immediate(Smi::FromInt(EQUAL)));
+ __ IncrementCounter(isolate()->counters()->string_compare_native(), 1);
+ __ ret(2 * kPointerSize);
+
+ __ bind(&not_same);
+
+ // Check that both objects are sequential ASCII strings.
+ __ JumpIfNotBothSequentialAsciiStrings(edx, eax, ecx, ebx, &runtime);
+
+ // Compare flat ASCII strings.
+ // Drop arguments from the stack.
+ __ pop(ecx);
+ __ add(esp, Immediate(2 * kPointerSize));
+ __ push(ecx);
+ GenerateCompareFlatAsciiStrings(masm, edx, eax, ecx, ebx, edi);
+
+ // Call the runtime; it returns -1 (less), 0 (equal), or 1 (greater)
+ // tagged as a small integer.
+ __ bind(&runtime);
+ __ TailCallRuntime(Runtime::kHiddenStringCompare, 2, 1);
+}
+
+
+void BinaryOpICWithAllocationSiteStub::Generate(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- edx : left
+ // -- eax : right
+ // -- esp[0] : return address
+ // -----------------------------------
+
+ // Load ecx with the allocation site. We stick an undefined dummy value here
+ // and replace it with the real allocation site later when we instantiate this
+ // stub in BinaryOpICWithAllocationSiteStub::GetCodeCopyFromTemplate().
+ __ mov(ecx, handle(isolate()->heap()->undefined_value()));
+
+ // Make sure that we actually patched the allocation site.
+ if (FLAG_debug_code) {
+ __ test(ecx, Immediate(kSmiTagMask));
+ __ Assert(not_equal, kExpectedAllocationSite);
+ __ cmp(FieldOperand(ecx, HeapObject::kMapOffset),
+ isolate()->factory()->allocation_site_map());
+ __ Assert(equal, kExpectedAllocationSite);
+ }
+
+ // Tail call into the stub that handles binary operations with allocation
+ // sites.
+ BinaryOpWithAllocationSiteStub stub(isolate(), state_);
+ __ TailCallStub(&stub);
+}
+
+
+void ICCompareStub::GenerateSmis(MacroAssembler* masm) {
+ ASSERT(state_ == CompareIC::SMI);
+ Label miss;
+ __ mov(ecx, edx);
+ __ or_(ecx, eax);
+ __ JumpIfNotSmi(ecx, &miss, Label::kNear);
+
+ if (GetCondition() == equal) {
+ // For equality we do not care about the sign of the result.
+ __ sub(eax, edx);
+ } else {
+ Label done;
+ __ sub(edx, eax);
+ __ j(no_overflow, &done, Label::kNear);
+ // Correct sign of result in case of overflow.
+ __ not_(edx);
+ __ bind(&done);
+ __ mov(eax, edx);
+ }
+ __ ret(0);
+
+ __ bind(&miss);
+ GenerateMiss(masm);
+}
+
+
+void ICCompareStub::GenerateNumbers(MacroAssembler* masm) {
+ ASSERT(state_ == CompareIC::NUMBER);
+
+ Label generic_stub;
+ Label unordered, maybe_undefined1, maybe_undefined2;
+ Label miss;
+
+ if (left_ == CompareIC::SMI) {
+ __ JumpIfNotSmi(edx, &miss);
+ }
+ if (right_ == CompareIC::SMI) {
+ __ JumpIfNotSmi(eax, &miss);
+ }
+
+ // Inlining the double comparison and falling back to the general compare
+ // stub if NaN is involved or SSE2 or CMOV is unsupported.
+ __ mov(ecx, edx);
+ __ and_(ecx, eax);
+ __ JumpIfSmi(ecx, &generic_stub, Label::kNear);
+
+ __ cmp(FieldOperand(eax, HeapObject::kMapOffset),
+ isolate()->factory()->heap_number_map());
+ __ j(not_equal, &maybe_undefined1, Label::kNear);
+ __ cmp(FieldOperand(edx, HeapObject::kMapOffset),
+ isolate()->factory()->heap_number_map());
+ __ j(not_equal, &maybe_undefined2, Label::kNear);
+
+ __ bind(&unordered);
+ __ bind(&generic_stub);
+ ICCompareStub stub(isolate(), op_, CompareIC::GENERIC, CompareIC::GENERIC,
+ CompareIC::GENERIC);
+ __ jmp(stub.GetCode(), RelocInfo::CODE_TARGET);
+
+ __ bind(&maybe_undefined1);
+ if (Token::IsOrderedRelationalCompareOp(op_)) {
+ __ cmp(eax, Immediate(isolate()->factory()->undefined_value()));
+ __ j(not_equal, &miss);
+ __ JumpIfSmi(edx, &unordered);
+ __ CmpObjectType(edx, HEAP_NUMBER_TYPE, ecx);
+ __ j(not_equal, &maybe_undefined2, Label::kNear);
+ __ jmp(&unordered);
+ }
+
+ __ bind(&maybe_undefined2);
+ if (Token::IsOrderedRelationalCompareOp(op_)) {
+ __ cmp(edx, Immediate(isolate()->factory()->undefined_value()));
+ __ j(equal, &unordered);
+ }
+
+ __ bind(&miss);
+ GenerateMiss(masm);
+}
+
+
+void ICCompareStub::GenerateInternalizedStrings(MacroAssembler* masm) {
+ ASSERT(state_ == CompareIC::INTERNALIZED_STRING);
+ ASSERT(GetCondition() == equal);
+
+ // Registers containing left and right operands respectively.
+ Register left = edx;
+ Register right = eax;
+ Register tmp1 = ecx;
+ Register tmp2 = ebx;
+
+ // Check that both operands are heap objects.
+ Label miss;
+ __ mov(tmp1, left);
+ STATIC_ASSERT(kSmiTag == 0);
+ __ and_(tmp1, right);
+ __ JumpIfSmi(tmp1, &miss, Label::kNear);
+
+ // Check that both operands are internalized strings.
+ __ mov(tmp1, FieldOperand(left, HeapObject::kMapOffset));
+ __ mov(tmp2, FieldOperand(right, HeapObject::kMapOffset));
+ __ movzx_b(tmp1, FieldOperand(tmp1, Map::kInstanceTypeOffset));
+ __ movzx_b(tmp2, FieldOperand(tmp2, Map::kInstanceTypeOffset));
+ STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0);
+ __ or_(tmp1, tmp2);
+ __ test(tmp1, Immediate(kIsNotStringMask | kIsNotInternalizedMask));
+ __ j(not_zero, &miss, Label::kNear);
+
+ // Internalized strings are compared by identity.
+ Label done;
+ __ cmp(left, right);
+ // Make sure eax is non-zero. At this point input operands are
+ // guaranteed to be non-zero.
+ ASSERT(right.is(eax));
+ __ j(not_equal, &done, Label::kNear);
+ STATIC_ASSERT(EQUAL == 0);
+ STATIC_ASSERT(kSmiTag == 0);
+ __ Move(eax, Immediate(Smi::FromInt(EQUAL)));
+ __ bind(&done);
+ __ ret(0);
+
+ __ bind(&miss);
+ GenerateMiss(masm);
+}
+
+
+void ICCompareStub::GenerateUniqueNames(MacroAssembler* masm) {
+ ASSERT(state_ == CompareIC::UNIQUE_NAME);
+ ASSERT(GetCondition() == equal);
+
+ // Registers containing left and right operands respectively.
+ Register left = edx;
+ Register right = eax;
+ Register tmp1 = ecx;
+ Register tmp2 = ebx;
+
+ // Check that both operands are heap objects.
+ Label miss;
+ __ mov(tmp1, left);
+ STATIC_ASSERT(kSmiTag == 0);
+ __ and_(tmp1, right);
+ __ JumpIfSmi(tmp1, &miss, Label::kNear);
+
+ // Check that both operands are unique names. This leaves the instance
+ // types loaded in tmp1 and tmp2.
+ __ mov(tmp1, FieldOperand(left, HeapObject::kMapOffset));
+ __ mov(tmp2, FieldOperand(right, HeapObject::kMapOffset));
+ __ movzx_b(tmp1, FieldOperand(tmp1, Map::kInstanceTypeOffset));
+ __ movzx_b(tmp2, FieldOperand(tmp2, Map::kInstanceTypeOffset));
+
+ __ JumpIfNotUniqueName(tmp1, &miss, Label::kNear);
+ __ JumpIfNotUniqueName(tmp2, &miss, Label::kNear);
+
+ // Unique names are compared by identity.
+ Label done;
+ __ cmp(left, right);
+ // Make sure eax is non-zero. At this point input operands are
+ // guaranteed to be non-zero.
+ ASSERT(right.is(eax));
+ __ j(not_equal, &done, Label::kNear);
+ STATIC_ASSERT(EQUAL == 0);
+ STATIC_ASSERT(kSmiTag == 0);
+ __ Move(eax, Immediate(Smi::FromInt(EQUAL)));
+ __ bind(&done);
+ __ ret(0);
+
+ __ bind(&miss);
+ GenerateMiss(masm);
+}
+
+
+void ICCompareStub::GenerateStrings(MacroAssembler* masm) {
+ ASSERT(state_ == CompareIC::STRING);
+ Label miss;
+
+ bool equality = Token::IsEqualityOp(op_);
+
+ // Registers containing left and right operands respectively.
+ Register left = edx;
+ Register right = eax;
+ Register tmp1 = ecx;
+ Register tmp2 = ebx;
+ Register tmp3 = edi;
+
+ // Check that both operands are heap objects.
+ __ mov(tmp1, left);
+ STATIC_ASSERT(kSmiTag == 0);
+ __ and_(tmp1, right);
+ __ JumpIfSmi(tmp1, &miss);
+
+ // Check that both operands are strings. This leaves the instance
+ // types loaded in tmp1 and tmp2.
+ __ mov(tmp1, FieldOperand(left, HeapObject::kMapOffset));
+ __ mov(tmp2, FieldOperand(right, HeapObject::kMapOffset));
+ __ movzx_b(tmp1, FieldOperand(tmp1, Map::kInstanceTypeOffset));
+ __ movzx_b(tmp2, FieldOperand(tmp2, Map::kInstanceTypeOffset));
+ __ mov(tmp3, tmp1);
+ STATIC_ASSERT(kNotStringTag != 0);
+ __ or_(tmp3, tmp2);
+ __ test(tmp3, Immediate(kIsNotStringMask));
+ __ j(not_zero, &miss);
+
+ // Fast check for identical strings.
+ Label not_same;
+ __ cmp(left, right);
+ __ j(not_equal, &not_same, Label::kNear);
+ STATIC_ASSERT(EQUAL == 0);
+ STATIC_ASSERT(kSmiTag == 0);
+ __ Move(eax, Immediate(Smi::FromInt(EQUAL)));
+ __ ret(0);
+
+ // Handle not identical strings.
+ __ bind(&not_same);
+
+ // Check that both strings are internalized. If they are, we're done
+ // because we already know they are not identical. But in the case of
+ // non-equality compare, we still need to determine the order. We
+ // also know they are both strings.
+ if (equality) {
+ Label do_compare;
+ STATIC_ASSERT(kInternalizedTag == 0);
+ __ or_(tmp1, tmp2);
+ __ test(tmp1, Immediate(kIsNotInternalizedMask));
+ __ j(not_zero, &do_compare, Label::kNear);
+ // Make sure eax is non-zero. At this point input operands are
+ // guaranteed to be non-zero.
+ ASSERT(right.is(eax));
+ __ ret(0);
+ __ bind(&do_compare);
+ }
+
+ // Check that both strings are sequential ASCII.
+ Label runtime;
+ __ JumpIfNotBothSequentialAsciiStrings(left, right, tmp1, tmp2, &runtime);
+
+ // Compare flat ASCII strings. Returns when done.
+ if (equality) {
+ StringCompareStub::GenerateFlatAsciiStringEquals(
+ masm, left, right, tmp1, tmp2);
+ } else {
+ StringCompareStub::GenerateCompareFlatAsciiStrings(
+ masm, left, right, tmp1, tmp2, tmp3);
+ }
+
+ // Handle more complex cases in runtime.
+ __ bind(&runtime);
+ __ pop(tmp1); // Return address.
+ __ push(left);
+ __ push(right);
+ __ push(tmp1);
+ if (equality) {
+ __ TailCallRuntime(Runtime::kStringEquals, 2, 1);
+ } else {
+ __ TailCallRuntime(Runtime::kHiddenStringCompare, 2, 1);
+ }
+
+ __ bind(&miss);
+ GenerateMiss(masm);
+}
+
+
+void ICCompareStub::GenerateObjects(MacroAssembler* masm) {
+ ASSERT(state_ == CompareIC::OBJECT);
+ Label miss;
+ __ mov(ecx, edx);
+ __ and_(ecx, eax);
+ __ JumpIfSmi(ecx, &miss, Label::kNear);
+
+ __ CmpObjectType(eax, JS_OBJECT_TYPE, ecx);
+ __ j(not_equal, &miss, Label::kNear);
+ __ CmpObjectType(edx, JS_OBJECT_TYPE, ecx);
+ __ j(not_equal, &miss, Label::kNear);
+
+ ASSERT(GetCondition() == equal);
+ __ sub(eax, edx);
+ __ ret(0);
+
+ __ bind(&miss);
+ GenerateMiss(masm);
+}
+
+
+void ICCompareStub::GenerateKnownObjects(MacroAssembler* masm) {
+ Label miss;
+ __ mov(ecx, edx);
+ __ and_(ecx, eax);
+ __ JumpIfSmi(ecx, &miss, Label::kNear);
+
+ __ mov(ecx, FieldOperand(eax, HeapObject::kMapOffset));
+ __ mov(ebx, FieldOperand(edx, HeapObject::kMapOffset));
+ __ cmp(ecx, known_map_);
+ __ j(not_equal, &miss, Label::kNear);
+ __ cmp(ebx, known_map_);
+ __ j(not_equal, &miss, Label::kNear);
+
+ __ sub(eax, edx);
+ __ ret(0);
+
+ __ bind(&miss);
+ GenerateMiss(masm);
+}
+
+
+void ICCompareStub::GenerateMiss(MacroAssembler* masm) {
+ {
+ // Call the runtime system in a fresh internal frame.
+ ExternalReference miss = ExternalReference(IC_Utility(IC::kCompareIC_Miss),
+ isolate());
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ push(edx); // Preserve edx and eax.
+ __ push(eax);
+ __ push(edx); // And also use them as the arguments.
+ __ push(eax);
+ __ push(Immediate(Smi::FromInt(op_)));
+ __ CallExternalReference(miss, 3);
+ // Compute the entry point of the rewritten stub.
+ __ lea(edi, FieldOperand(eax, Code::kHeaderSize));
+ __ pop(eax);
+ __ pop(edx);
+ }
+
+ // Do a tail call to the rewritten stub.
+ __ jmp(edi);
+}
+
+
+// Helper function used to check that the dictionary doesn't contain
+// the property. This function may return false negatives, so miss_label
+// must always call a backup property check that is complete.
+// This function is safe to call if the receiver has fast properties.
+// Name must be a unique name and receiver must be a heap object.
+void NameDictionaryLookupStub::GenerateNegativeLookup(MacroAssembler* masm,
+ Label* miss,
+ Label* done,
+ Register properties,
+ Handle<Name> name,
+ Register r0) {
+ ASSERT(name->IsUniqueName());
+
+ // If names of slots in range from 1 to kProbes - 1 for the hash value are
+ // not equal to the name and kProbes-th slot is not used (its name is the
+ // undefined value), it guarantees the hash table doesn't contain the
+ // property. It's true even if some slots represent deleted properties
+ // (their names are the hole value).
+ for (int i = 0; i < kInlinedProbes; i++) {
+ // Compute the masked index: (hash + i + i * i) & mask.
+ Register index = r0;
+ // Capacity is smi 2^n.
+ __ mov(index, FieldOperand(properties, kCapacityOffset));
+ __ dec(index);
+ __ and_(index,
+ Immediate(Smi::FromInt(name->Hash() +
+ NameDictionary::GetProbeOffset(i))));
+
+ // Scale the index by multiplying by the entry size.
+ ASSERT(NameDictionary::kEntrySize == 3);
+ __ lea(index, Operand(index, index, times_2, 0)); // index *= 3.
+ Register entity_name = r0;
+ // Having undefined at this place means the name is not contained.
+ ASSERT_EQ(kSmiTagSize, 1);
+ __ mov(entity_name, Operand(properties, index, times_half_pointer_size,
+ kElementsStartOffset - kHeapObjectTag));
+ __ cmp(entity_name, masm->isolate()->factory()->undefined_value());
+ __ j(equal, done);
+
+ // Stop if found the property.
+ __ cmp(entity_name, Handle<Name>(name));
+ __ j(equal, miss);
+
+ Label good;
+ // Check for the hole and skip.
+ __ cmp(entity_name, masm->isolate()->factory()->the_hole_value());
+ __ j(equal, &good, Label::kNear);
+
+ // Check if the entry name is not a unique name.
+ __ mov(entity_name, FieldOperand(entity_name, HeapObject::kMapOffset));
+ __ JumpIfNotUniqueName(FieldOperand(entity_name, Map::kInstanceTypeOffset),
+ miss);
+ __ bind(&good);
+ }
+
+ NameDictionaryLookupStub stub(masm->isolate(), properties, r0, r0,
+ NEGATIVE_LOOKUP);
+ __ push(Immediate(Handle<Object>(name)));
+ __ push(Immediate(name->Hash()));
+ __ CallStub(&stub);
+ __ test(r0, r0);
+ __ j(not_zero, miss);
+ __ jmp(done);
+}
+
+
+// Probe the name dictionary in the |elements| register. Jump to the
+// |done| label if a property with the given name is found leaving the
+// index into the dictionary in |r0|. Jump to the |miss| label
+// otherwise.
+void NameDictionaryLookupStub::GeneratePositiveLookup(MacroAssembler* masm,
+ Label* miss,
+ Label* done,
+ Register elements,
+ Register name,
+ Register r0,
+ Register r1) {
+ ASSERT(!elements.is(r0));
+ ASSERT(!elements.is(r1));
+ ASSERT(!name.is(r0));
+ ASSERT(!name.is(r1));
+
+ __ AssertName(name);
+
+ __ mov(r1, FieldOperand(elements, kCapacityOffset));
+ __ shr(r1, kSmiTagSize); // convert smi to int
+ __ dec(r1);
+
+ // Generate an unrolled loop that performs a few probes before
+ // giving up. Measurements done on Gmail indicate that 2 probes
+ // cover ~93% of loads from dictionaries.
+ for (int i = 0; i < kInlinedProbes; i++) {
+ // Compute the masked index: (hash + i + i * i) & mask.
+ __ mov(r0, FieldOperand(name, Name::kHashFieldOffset));
+ __ shr(r0, Name::kHashShift);
+ if (i > 0) {
+ __ add(r0, Immediate(NameDictionary::GetProbeOffset(i)));
+ }
+ __ and_(r0, r1);
+
+ // Scale the index by multiplying by the entry size.
+ ASSERT(NameDictionary::kEntrySize == 3);
+ __ lea(r0, Operand(r0, r0, times_2, 0)); // r0 = r0 * 3
+
+ // Check if the key is identical to the name.
+ __ cmp(name, Operand(elements,
+ r0,
+ times_4,
+ kElementsStartOffset - kHeapObjectTag));
+ __ j(equal, done);
+ }
+
+ NameDictionaryLookupStub stub(masm->isolate(), elements, r1, r0,
+ POSITIVE_LOOKUP);
+ __ push(name);
+ __ mov(r0, FieldOperand(name, Name::kHashFieldOffset));
+ __ shr(r0, Name::kHashShift);
+ __ push(r0);
+ __ CallStub(&stub);
+
+ __ test(r1, r1);
+ __ j(zero, miss);
+ __ jmp(done);
+}
+
+
+void NameDictionaryLookupStub::Generate(MacroAssembler* masm) {
+ // This stub overrides SometimesSetsUpAFrame() to return false. That means
+ // we cannot call anything that could cause a GC from this stub.
+ // Stack frame on entry:
+ // esp[0 * kPointerSize]: return address.
+ // esp[1 * kPointerSize]: key's hash.
+ // esp[2 * kPointerSize]: key.
+ // Registers:
+ // dictionary_: NameDictionary to probe.
+ // result_: used as scratch.
+ // index_: will hold an index of entry if lookup is successful.
+ // might alias with result_.
+ // Returns:
+ // result_ is zero if lookup failed, non zero otherwise.
+
+ Label in_dictionary, maybe_in_dictionary, not_in_dictionary;
+
+ Register scratch = result_;
+
+ __ mov(scratch, FieldOperand(dictionary_, kCapacityOffset));
+ __ dec(scratch);
+ __ SmiUntag(scratch);
+ __ push(scratch);
+
+ // If names of slots in range from 1 to kProbes - 1 for the hash value are
+ // not equal to the name and kProbes-th slot is not used (its name is the
+ // undefined value), it guarantees the hash table doesn't contain the
+ // property. It's true even if some slots represent deleted properties
+ // (their names are the null value).
+ for (int i = kInlinedProbes; i < kTotalProbes; i++) {
+ // Compute the masked index: (hash + i + i * i) & mask.
+ __ mov(scratch, Operand(esp, 2 * kPointerSize));
+ if (i > 0) {
+ __ add(scratch, Immediate(NameDictionary::GetProbeOffset(i)));
+ }
+ __ and_(scratch, Operand(esp, 0));
+
+ // Scale the index by multiplying by the entry size.
+ ASSERT(NameDictionary::kEntrySize == 3);
+ __ lea(index_, Operand(scratch, scratch, times_2, 0)); // index *= 3.
+
+ // Having undefined at this place means the name is not contained.
+ ASSERT_EQ(kSmiTagSize, 1);
+ __ mov(scratch, Operand(dictionary_,
+ index_,
+ times_pointer_size,
+ kElementsStartOffset - kHeapObjectTag));
+ __ cmp(scratch, isolate()->factory()->undefined_value());
+ __ j(equal, &not_in_dictionary);
+
+ // Stop if found the property.
+ __ cmp(scratch, Operand(esp, 3 * kPointerSize));
+ __ j(equal, &in_dictionary);
+
+ if (i != kTotalProbes - 1 && mode_ == NEGATIVE_LOOKUP) {
+ // If we hit a key that is not a unique name during negative
+ // lookup we have to bailout as this key might be equal to the
+ // key we are looking for.
+
+ // Check if the entry name is not a unique name.
+ __ mov(scratch, FieldOperand(scratch, HeapObject::kMapOffset));
+ __ JumpIfNotUniqueName(FieldOperand(scratch, Map::kInstanceTypeOffset),
+ &maybe_in_dictionary);
+ }
+ }
+
+ __ bind(&maybe_in_dictionary);
+ // If we are doing negative lookup then probing failure should be
+ // treated as a lookup success. For positive lookup probing failure
+ // should be treated as lookup failure.
+ if (mode_ == POSITIVE_LOOKUP) {
+ __ mov(result_, Immediate(0));
+ __ Drop(1);
+ __ ret(2 * kPointerSize);
+ }
+
+ __ bind(&in_dictionary);
+ __ mov(result_, Immediate(1));
+ __ Drop(1);
+ __ ret(2 * kPointerSize);
+
+ __ bind(&not_in_dictionary);
+ __ mov(result_, Immediate(0));
+ __ Drop(1);
+ __ ret(2 * kPointerSize);
+}
+
+
+void StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime(
+ Isolate* isolate) {
+ StoreBufferOverflowStub stub(isolate);
+ stub.GetCode();
+}
+
+
+// Takes the input in 3 registers: address_ value_ and object_. A pointer to
+// the value has just been written into the object, now this stub makes sure
+// we keep the GC informed. The word in the object where the value has been
+// written is in the address register.
+void RecordWriteStub::Generate(MacroAssembler* masm) {
+ Label skip_to_incremental_noncompacting;
+ Label skip_to_incremental_compacting;
+
+ // The first two instructions are generated with labels so as to get the
+ // offset fixed up correctly by the bind(Label*) call. We patch it back and
+ // forth between a compare instructions (a nop in this position) and the
+ // real branch when we start and stop incremental heap marking.
+ __ jmp(&skip_to_incremental_noncompacting, Label::kNear);
+ __ jmp(&skip_to_incremental_compacting, Label::kFar);
+
+ if (remembered_set_action_ == EMIT_REMEMBERED_SET) {
+ __ RememberedSetHelper(object_,
+ address_,
+ value_,
+ MacroAssembler::kReturnAtEnd);
+ } else {
+ __ ret(0);
+ }
+
+ __ bind(&skip_to_incremental_noncompacting);
+ GenerateIncremental(masm, INCREMENTAL);
+
+ __ bind(&skip_to_incremental_compacting);
+ GenerateIncremental(masm, INCREMENTAL_COMPACTION);
+
+ // Initial mode of the stub is expected to be STORE_BUFFER_ONLY.
+ // Will be checked in IncrementalMarking::ActivateGeneratedStub.
+ masm->set_byte_at(0, kTwoByteNopInstruction);
+ masm->set_byte_at(2, kFiveByteNopInstruction);
+}
+
+
+void RecordWriteStub::GenerateIncremental(MacroAssembler* masm, Mode mode) {
+ regs_.Save(masm);
+
+ if (remembered_set_action_ == EMIT_REMEMBERED_SET) {
+ Label dont_need_remembered_set;
+
+ __ mov(regs_.scratch0(), Operand(regs_.address(), 0));
+ __ JumpIfNotInNewSpace(regs_.scratch0(), // Value.
+ regs_.scratch0(),
+ &dont_need_remembered_set);
+
+ __ CheckPageFlag(regs_.object(),
+ regs_.scratch0(),
+ 1 << MemoryChunk::SCAN_ON_SCAVENGE,
+ not_zero,
+ &dont_need_remembered_set);
+
+ // First notify the incremental marker if necessary, then update the
+ // remembered set.
+ CheckNeedsToInformIncrementalMarker(
+ masm,
+ kUpdateRememberedSetOnNoNeedToInformIncrementalMarker,
+ mode);
+ InformIncrementalMarker(masm);
+ regs_.Restore(masm);
+ __ RememberedSetHelper(object_,
+ address_,
+ value_,
+ MacroAssembler::kReturnAtEnd);
+
+ __ bind(&dont_need_remembered_set);
+ }
+
+ CheckNeedsToInformIncrementalMarker(
+ masm,
+ kReturnOnNoNeedToInformIncrementalMarker,
+ mode);
+ InformIncrementalMarker(masm);
+ regs_.Restore(masm);
+ __ ret(0);
+}
+
+
+void RecordWriteStub::InformIncrementalMarker(MacroAssembler* masm) {
+ regs_.SaveCallerSaveRegisters(masm);
+ int argument_count = 3;
+ __ PrepareCallCFunction(argument_count, regs_.scratch0());
+ __ mov(Operand(esp, 0 * kPointerSize), regs_.object());
+ __ mov(Operand(esp, 1 * kPointerSize), regs_.address()); // Slot.
+ __ mov(Operand(esp, 2 * kPointerSize),
+ Immediate(ExternalReference::isolate_address(isolate())));
+
+ AllowExternalCallThatCantCauseGC scope(masm);
+ __ CallCFunction(
+ ExternalReference::incremental_marking_record_write_function(isolate()),
+ argument_count);
+
+ regs_.RestoreCallerSaveRegisters(masm);
+}
+
+
+void RecordWriteStub::CheckNeedsToInformIncrementalMarker(
+ MacroAssembler* masm,
+ OnNoNeedToInformIncrementalMarker on_no_need,
+ Mode mode) {
+ Label object_is_black, need_incremental, need_incremental_pop_object;
+
+ __ mov(regs_.scratch0(), Immediate(~Page::kPageAlignmentMask));
+ __ and_(regs_.scratch0(), regs_.object());
+ __ mov(regs_.scratch1(),
+ Operand(regs_.scratch0(),
+ MemoryChunk::kWriteBarrierCounterOffset));
+ __ sub(regs_.scratch1(), Immediate(1));
+ __ mov(Operand(regs_.scratch0(),
+ MemoryChunk::kWriteBarrierCounterOffset),
+ regs_.scratch1());
+ __ j(negative, &need_incremental);
+
+ // Let's look at the color of the object: If it is not black we don't have
+ // to inform the incremental marker.
+ __ JumpIfBlack(regs_.object(),
+ regs_.scratch0(),
+ regs_.scratch1(),
+ &object_is_black,
+ Label::kNear);
+
+ regs_.Restore(masm);
+ if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) {
+ __ RememberedSetHelper(object_,
+ address_,
+ value_,
+ MacroAssembler::kReturnAtEnd);
+ } else {
+ __ ret(0);
+ }
+
+ __ bind(&object_is_black);
+
+ // Get the value from the slot.
+ __ mov(regs_.scratch0(), Operand(regs_.address(), 0));
+
+ if (mode == INCREMENTAL_COMPACTION) {
+ Label ensure_not_white;
+
+ __ CheckPageFlag(regs_.scratch0(), // Contains value.
+ regs_.scratch1(), // Scratch.
+ MemoryChunk::kEvacuationCandidateMask,
+ zero,
+ &ensure_not_white,
+ Label::kNear);
+
+ __ CheckPageFlag(regs_.object(),
+ regs_.scratch1(), // Scratch.
+ MemoryChunk::kSkipEvacuationSlotsRecordingMask,
+ not_zero,
+ &ensure_not_white,
+ Label::kNear);
+
+ __ jmp(&need_incremental);
+
+ __ bind(&ensure_not_white);
+ }
+
+ // We need an extra register for this, so we push the object register
+ // temporarily.
+ __ push(regs_.object());
+ __ EnsureNotWhite(regs_.scratch0(), // The value.
+ regs_.scratch1(), // Scratch.
+ regs_.object(), // Scratch.
+ &need_incremental_pop_object,
+ Label::kNear);
+ __ pop(regs_.object());
+
+ regs_.Restore(masm);
+ if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) {
+ __ RememberedSetHelper(object_,
+ address_,
+ value_,
+ MacroAssembler::kReturnAtEnd);
+ } else {
+ __ ret(0);
+ }
+
+ __ bind(&need_incremental_pop_object);
+ __ pop(regs_.object());
+
+ __ bind(&need_incremental);
+
+ // Fall through when we need to inform the incremental marker.
+}
+
+
+void StoreArrayLiteralElementStub::Generate(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- eax : element value to store
+ // -- ecx : element index as smi
+ // -- esp[0] : return address
+ // -- esp[4] : array literal index in function
+ // -- esp[8] : array literal
+ // clobbers ebx, edx, edi
+ // -----------------------------------
+
+ Label element_done;
+ Label double_elements;
+ Label smi_element;
+ Label slow_elements;
+ Label slow_elements_from_double;
+ Label fast_elements;
+
+ // Get array literal index, array literal and its map.
+ __ mov(edx, Operand(esp, 1 * kPointerSize));
+ __ mov(ebx, Operand(esp, 2 * kPointerSize));
+ __ mov(edi, FieldOperand(ebx, JSObject::kMapOffset));
+
+ __ CheckFastElements(edi, &double_elements);
+
+ // Check for FAST_*_SMI_ELEMENTS or FAST_*_ELEMENTS elements
+ __ JumpIfSmi(eax, &smi_element);
+ __ CheckFastSmiElements(edi, &fast_elements, Label::kNear);
+
+ // Store into the array literal requires a elements transition. Call into
+ // the runtime.
+
+ __ bind(&slow_elements);
+ __ pop(edi); // Pop return address and remember to put back later for tail
+ // call.
+ __ push(ebx);
+ __ push(ecx);
+ __ push(eax);
+ __ mov(ebx, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
+ __ push(FieldOperand(ebx, JSFunction::kLiteralsOffset));
+ __ push(edx);
+ __ push(edi); // Return return address so that tail call returns to right
+ // place.
+ __ TailCallRuntime(Runtime::kStoreArrayLiteralElement, 5, 1);
+
+ __ bind(&slow_elements_from_double);
+ __ pop(edx);
+ __ jmp(&slow_elements);
+
+ // Array literal has ElementsKind of FAST_*_ELEMENTS and value is an object.
+ __ bind(&fast_elements);
+ __ mov(ebx, FieldOperand(ebx, JSObject::kElementsOffset));
+ __ lea(ecx, FieldOperand(ebx, ecx, times_half_pointer_size,
+ FixedArrayBase::kHeaderSize));
+ __ mov(Operand(ecx, 0), eax);
+ // Update the write barrier for the array store.
+ __ RecordWrite(ebx, ecx, eax,
+ EMIT_REMEMBERED_SET,
+ OMIT_SMI_CHECK);
+ __ ret(0);
+
+ // Array literal has ElementsKind of FAST_*_SMI_ELEMENTS or FAST_*_ELEMENTS,
+ // and value is Smi.
+ __ bind(&smi_element);
+ __ mov(ebx, FieldOperand(ebx, JSObject::kElementsOffset));
+ __ mov(FieldOperand(ebx, ecx, times_half_pointer_size,
+ FixedArrayBase::kHeaderSize), eax);
+ __ ret(0);
+
+ // Array literal has ElementsKind of FAST_*_DOUBLE_ELEMENTS.
+ __ bind(&double_elements);
+
+ __ push(edx);
+ __ mov(edx, FieldOperand(ebx, JSObject::kElementsOffset));
+ __ StoreNumberToDoubleElements(eax,
+ edx,
+ ecx,
+ edi,
+ &slow_elements_from_double,
+ false);
+ __ pop(edx);
+ __ ret(0);
+}
+
+
+void StubFailureTrampolineStub::Generate(MacroAssembler* masm) {
+ CEntryStub ces(isolate(), 1);
+ __ call(ces.GetCode(), RelocInfo::CODE_TARGET);
+ int parameter_count_offset =
+ StubFailureTrampolineFrame::kCallerStackParameterCountFrameOffset;
+ __ mov(ebx, MemOperand(ebp, parameter_count_offset));
+ masm->LeaveFrame(StackFrame::STUB_FAILURE_TRAMPOLINE);
+ __ pop(ecx);
+ int additional_offset = function_mode_ == JS_FUNCTION_STUB_MODE
+ ? kPointerSize
+ : 0;
+ __ lea(esp, MemOperand(esp, ebx, times_pointer_size, additional_offset));
+ __ jmp(ecx); // Return to IC Miss stub, continuation still on stack.
+}
+
+
+void ProfileEntryHookStub::MaybeCallEntryHook(MacroAssembler* masm) {
+ if (masm->isolate()->function_entry_hook() != NULL) {
+ ProfileEntryHookStub stub(masm->isolate());
+ masm->CallStub(&stub);
+ }
+}
+
+
+void ProfileEntryHookStub::Generate(MacroAssembler* masm) {
+ // Save volatile registers.
+ const int kNumSavedRegisters = 3;
+ __ push(eax);
+ __ push(ecx);
+ __ push(edx);
+
+ // Calculate and push the original stack pointer.
+ __ lea(eax, Operand(esp, (kNumSavedRegisters + 1) * kPointerSize));
+ __ push(eax);
+
+ // Retrieve our return address and use it to calculate the calling
+ // function's address.
+ __ mov(eax, Operand(esp, (kNumSavedRegisters + 1) * kPointerSize));
+ __ sub(eax, Immediate(Assembler::kCallInstructionLength));
+ __ push(eax);
+
+ // Call the entry hook.
+ ASSERT(isolate()->function_entry_hook() != NULL);
+ __ call(FUNCTION_ADDR(isolate()->function_entry_hook()),
+ RelocInfo::RUNTIME_ENTRY);
+ __ add(esp, Immediate(2 * kPointerSize));
+
+ // Restore ecx.
+ __ pop(edx);
+ __ pop(ecx);
+ __ pop(eax);
+
+ __ ret(0);
+}
+
+
+template<class T>
+static void CreateArrayDispatch(MacroAssembler* masm,
+ AllocationSiteOverrideMode mode) {
+ if (mode == DISABLE_ALLOCATION_SITES) {
+ T stub(masm->isolate(),
+ GetInitialFastElementsKind(),
+ mode);
+ __ TailCallStub(&stub);
+ } else if (mode == DONT_OVERRIDE) {
+ int last_index = GetSequenceIndexFromFastElementsKind(
+ TERMINAL_FAST_ELEMENTS_KIND);
+ for (int i = 0; i <= last_index; ++i) {
+ Label next;
+ ElementsKind kind = GetFastElementsKindFromSequenceIndex(i);
+ __ cmp(edx, kind);
+ __ j(not_equal, &next);
+ T stub(masm->isolate(), kind);
+ __ TailCallStub(&stub);
+ __ bind(&next);
+ }
+
+ // If we reached this point there is a problem.
+ __ Abort(kUnexpectedElementsKindInArrayConstructor);
+ } else {
+ UNREACHABLE();
+ }
+}
+
+
+static void CreateArrayDispatchOneArgument(MacroAssembler* masm,
+ AllocationSiteOverrideMode mode) {
+ // ebx - allocation site (if mode != DISABLE_ALLOCATION_SITES)
+ // edx - kind (if mode != DISABLE_ALLOCATION_SITES)
+ // eax - number of arguments
+ // edi - constructor?
+ // esp[0] - return address
+ // esp[4] - last argument
+ Label normal_sequence;
+ if (mode == DONT_OVERRIDE) {
+ ASSERT(FAST_SMI_ELEMENTS == 0);
+ ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
+ ASSERT(FAST_ELEMENTS == 2);
+ ASSERT(FAST_HOLEY_ELEMENTS == 3);
+ ASSERT(FAST_DOUBLE_ELEMENTS == 4);
+ ASSERT(FAST_HOLEY_DOUBLE_ELEMENTS == 5);
+
+ // is the low bit set? If so, we are holey and that is good.
+ __ test_b(edx, 1);
+ __ j(not_zero, &normal_sequence);
+ }
+
+ // look at the first argument
+ __ mov(ecx, Operand(esp, kPointerSize));
+ __ test(ecx, ecx);
+ __ j(zero, &normal_sequence);
+
+ if (mode == DISABLE_ALLOCATION_SITES) {
+ ElementsKind initial = GetInitialFastElementsKind();
+ ElementsKind holey_initial = GetHoleyElementsKind(initial);
+
+ ArraySingleArgumentConstructorStub stub_holey(masm->isolate(),
+ holey_initial,
+ DISABLE_ALLOCATION_SITES);
+ __ TailCallStub(&stub_holey);
+
+ __ bind(&normal_sequence);
+ ArraySingleArgumentConstructorStub stub(masm->isolate(),
+ initial,
+ DISABLE_ALLOCATION_SITES);
+ __ TailCallStub(&stub);
+ } else if (mode == DONT_OVERRIDE) {
+ // We are going to create a holey array, but our kind is non-holey.
+ // Fix kind and retry.
+ __ inc(edx);
+
+ if (FLAG_debug_code) {
+ Handle<Map> allocation_site_map =
+ masm->isolate()->factory()->allocation_site_map();
+ __ cmp(FieldOperand(ebx, 0), Immediate(allocation_site_map));
+ __ Assert(equal, kExpectedAllocationSite);
+ }
+
+ // Save the resulting elements kind in type info. We can't just store r3
+ // in the AllocationSite::transition_info field because elements kind is
+ // restricted to a portion of the field...upper bits need to be left alone.
+ STATIC_ASSERT(AllocationSite::ElementsKindBits::kShift == 0);
+ __ add(FieldOperand(ebx, AllocationSite::kTransitionInfoOffset),
+ Immediate(Smi::FromInt(kFastElementsKindPackedToHoley)));
+
+ __ bind(&normal_sequence);
+ int last_index = GetSequenceIndexFromFastElementsKind(
+ TERMINAL_FAST_ELEMENTS_KIND);
+ for (int i = 0; i <= last_index; ++i) {
+ Label next;
+ ElementsKind kind = GetFastElementsKindFromSequenceIndex(i);
+ __ cmp(edx, kind);
+ __ j(not_equal, &next);
+ ArraySingleArgumentConstructorStub stub(masm->isolate(), kind);
+ __ TailCallStub(&stub);
+ __ bind(&next);
+ }
+
+ // If we reached this point there is a problem.
+ __ Abort(kUnexpectedElementsKindInArrayConstructor);
+ } else {
+ UNREACHABLE();
+ }
+}
+
+
+template<class T>
+static void ArrayConstructorStubAheadOfTimeHelper(Isolate* isolate) {
+ int to_index = GetSequenceIndexFromFastElementsKind(
+ TERMINAL_FAST_ELEMENTS_KIND);
+ for (int i = 0; i <= to_index; ++i) {
+ ElementsKind kind = GetFastElementsKindFromSequenceIndex(i);
+ T stub(isolate, kind);
+ stub.GetCode();
+ if (AllocationSite::GetMode(kind) != DONT_TRACK_ALLOCATION_SITE) {
+ T stub1(isolate, kind, DISABLE_ALLOCATION_SITES);
+ stub1.GetCode();
+ }
+ }
+}
+
+
+void ArrayConstructorStubBase::GenerateStubsAheadOfTime(Isolate* isolate) {
+ ArrayConstructorStubAheadOfTimeHelper<ArrayNoArgumentConstructorStub>(
+ isolate);
+ ArrayConstructorStubAheadOfTimeHelper<ArraySingleArgumentConstructorStub>(
+ isolate);
+ ArrayConstructorStubAheadOfTimeHelper<ArrayNArgumentsConstructorStub>(
+ isolate);
+}
+
+
+void InternalArrayConstructorStubBase::GenerateStubsAheadOfTime(
+ Isolate* isolate) {
+ ElementsKind kinds[2] = { FAST_ELEMENTS, FAST_HOLEY_ELEMENTS };
+ for (int i = 0; i < 2; i++) {
+ // For internal arrays we only need a few things
+ InternalArrayNoArgumentConstructorStub stubh1(isolate, kinds[i]);
+ stubh1.GetCode();
+ InternalArraySingleArgumentConstructorStub stubh2(isolate, kinds[i]);
+ stubh2.GetCode();
+ InternalArrayNArgumentsConstructorStub stubh3(isolate, kinds[i]);
+ stubh3.GetCode();
+ }
+}
+
+
+void ArrayConstructorStub::GenerateDispatchToArrayStub(
+ MacroAssembler* masm,
+ AllocationSiteOverrideMode mode) {
+ if (argument_count_ == ANY) {
+ Label not_zero_case, not_one_case;
+ __ test(eax, eax);
+ __ j(not_zero, &not_zero_case);
+ CreateArrayDispatch<ArrayNoArgumentConstructorStub>(masm, mode);
+
+ __ bind(&not_zero_case);
+ __ cmp(eax, 1);
+ __ j(greater, &not_one_case);
+ CreateArrayDispatchOneArgument(masm, mode);
+
+ __ bind(&not_one_case);
+ CreateArrayDispatch<ArrayNArgumentsConstructorStub>(masm, mode);
+ } else if (argument_count_ == NONE) {
+ CreateArrayDispatch<ArrayNoArgumentConstructorStub>(masm, mode);
+ } else if (argument_count_ == ONE) {
+ CreateArrayDispatchOneArgument(masm, mode);
+ } else if (argument_count_ == MORE_THAN_ONE) {
+ CreateArrayDispatch<ArrayNArgumentsConstructorStub>(masm, mode);
+ } else {
+ UNREACHABLE();
+ }
+}
+
+
+void ArrayConstructorStub::Generate(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- eax : argc (only if argument_count_ == ANY)
+ // -- ebx : AllocationSite or undefined
+ // -- edi : constructor
+ // -- esp[0] : return address
+ // -- esp[4] : last argument
+ // -----------------------------------
+ if (FLAG_debug_code) {
+ // The array construct code is only set for the global and natives
+ // builtin Array functions which always have maps.
+
+ // Initial map for the builtin Array function should be a map.
+ __ mov(ecx, FieldOperand(edi, JSFunction::kPrototypeOrInitialMapOffset));
+ // Will both indicate a NULL and a Smi.
+ __ test(ecx, Immediate(kSmiTagMask));
+ __ Assert(not_zero, kUnexpectedInitialMapForArrayFunction);
+ __ CmpObjectType(ecx, MAP_TYPE, ecx);
+ __ Assert(equal, kUnexpectedInitialMapForArrayFunction);
+
+ // We should either have undefined in ebx or a valid AllocationSite
+ __ AssertUndefinedOrAllocationSite(ebx);
+ }
+
+ Label no_info;
+ // If the feedback vector is the undefined value call an array constructor
+ // that doesn't use AllocationSites.
+ __ cmp(ebx, isolate()->factory()->undefined_value());
+ __ j(equal, &no_info);
+
+ // Only look at the lower 16 bits of the transition info.
+ __ mov(edx, FieldOperand(ebx, AllocationSite::kTransitionInfoOffset));
+ __ SmiUntag(edx);
+ STATIC_ASSERT(AllocationSite::ElementsKindBits::kShift == 0);
+ __ and_(edx, Immediate(AllocationSite::ElementsKindBits::kMask));
+ GenerateDispatchToArrayStub(masm, DONT_OVERRIDE);
+
+ __ bind(&no_info);
+ GenerateDispatchToArrayStub(masm, DISABLE_ALLOCATION_SITES);
+}
+
+
+void InternalArrayConstructorStub::GenerateCase(
+ MacroAssembler* masm, ElementsKind kind) {
+ Label not_zero_case, not_one_case;
+ Label normal_sequence;
+
+ __ test(eax, eax);
+ __ j(not_zero, &not_zero_case);
+ InternalArrayNoArgumentConstructorStub stub0(isolate(), kind);
+ __ TailCallStub(&stub0);
+
+ __ bind(&not_zero_case);
+ __ cmp(eax, 1);
+ __ j(greater, &not_one_case);
+
+ if (IsFastPackedElementsKind(kind)) {
+ // We might need to create a holey array
+ // look at the first argument
+ __ mov(ecx, Operand(esp, kPointerSize));
+ __ test(ecx, ecx);
+ __ j(zero, &normal_sequence);
+
+ InternalArraySingleArgumentConstructorStub
+ stub1_holey(isolate(), GetHoleyElementsKind(kind));
+ __ TailCallStub(&stub1_holey);
+ }
+
+ __ bind(&normal_sequence);
+ InternalArraySingleArgumentConstructorStub stub1(isolate(), kind);
+ __ TailCallStub(&stub1);
+
+ __ bind(&not_one_case);
+ InternalArrayNArgumentsConstructorStub stubN(isolate(), kind);
+ __ TailCallStub(&stubN);
+}
+
+
+void InternalArrayConstructorStub::Generate(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- eax : argc
+ // -- edi : constructor
+ // -- esp[0] : return address
+ // -- esp[4] : last argument
+ // -----------------------------------
+
+ if (FLAG_debug_code) {
+ // The array construct code is only set for the global and natives
+ // builtin Array functions which always have maps.
+
+ // Initial map for the builtin Array function should be a map.
+ __ mov(ecx, FieldOperand(edi, JSFunction::kPrototypeOrInitialMapOffset));
+ // Will both indicate a NULL and a Smi.
+ __ test(ecx, Immediate(kSmiTagMask));
+ __ Assert(not_zero, kUnexpectedInitialMapForArrayFunction);
+ __ CmpObjectType(ecx, MAP_TYPE, ecx);
+ __ Assert(equal, kUnexpectedInitialMapForArrayFunction);
+ }
+
+ // Figure out the right elements kind
+ __ mov(ecx, FieldOperand(edi, JSFunction::kPrototypeOrInitialMapOffset));
+
+ // Load the map's "bit field 2" into |result|. We only need the first byte,
+ // but the following masking takes care of that anyway.
+ __ mov(ecx, FieldOperand(ecx, Map::kBitField2Offset));
+ // Retrieve elements_kind from bit field 2.
+ __ DecodeField<Map::ElementsKindBits>(ecx);
+
+ if (FLAG_debug_code) {
+ Label done;
+ __ cmp(ecx, Immediate(FAST_ELEMENTS));
+ __ j(equal, &done);
+ __ cmp(ecx, Immediate(FAST_HOLEY_ELEMENTS));
+ __ Assert(equal,
+ kInvalidElementsKindForInternalArrayOrInternalPackedArray);
+ __ bind(&done);
+ }
+
+ Label fast_elements_case;
+ __ cmp(ecx, Immediate(FAST_ELEMENTS));
+ __ j(equal, &fast_elements_case);
+ GenerateCase(masm, FAST_HOLEY_ELEMENTS);
+
+ __ bind(&fast_elements_case);
+ GenerateCase(masm, FAST_ELEMENTS);
+}
+
+
+void CallApiFunctionStub::Generate(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- eax : callee
+ // -- ebx : call_data
+ // -- ecx : holder
+ // -- edx : api_function_address
+ // -- esi : context
+ // --
+ // -- esp[0] : return address
+ // -- esp[4] : last argument
+ // -- ...
+ // -- esp[argc * 4] : first argument
+ // -- esp[(argc + 1) * 4] : receiver
+ // -----------------------------------
+
+ Register callee = eax;
+ Register call_data = ebx;
+ Register holder = ecx;
+ Register api_function_address = edx;
+ Register return_address = edi;
+ Register context = esi;
+
+ int argc = ArgumentBits::decode(bit_field_);
+ bool is_store = IsStoreBits::decode(bit_field_);
+ bool call_data_undefined = CallDataUndefinedBits::decode(bit_field_);
+
+ typedef FunctionCallbackArguments FCA;
+
+ STATIC_ASSERT(FCA::kContextSaveIndex == 6);
+ STATIC_ASSERT(FCA::kCalleeIndex == 5);
+ STATIC_ASSERT(FCA::kDataIndex == 4);
+ STATIC_ASSERT(FCA::kReturnValueOffset == 3);
+ STATIC_ASSERT(FCA::kReturnValueDefaultValueIndex == 2);
+ STATIC_ASSERT(FCA::kIsolateIndex == 1);
+ STATIC_ASSERT(FCA::kHolderIndex == 0);
+ STATIC_ASSERT(FCA::kArgsLength == 7);
+
+ __ pop(return_address);
+
+ // context save
+ __ push(context);
+ // load context from callee
+ __ mov(context, FieldOperand(callee, JSFunction::kContextOffset));
+
+ // callee
+ __ push(callee);
+
+ // call data
+ __ push(call_data);
+
+ Register scratch = call_data;
+ if (!call_data_undefined) {
+ // return value
+ __ push(Immediate(isolate()->factory()->undefined_value()));
+ // return value default
+ __ push(Immediate(isolate()->factory()->undefined_value()));
+ } else {
+ // return value
+ __ push(scratch);
+ // return value default
+ __ push(scratch);
+ }
+ // isolate
+ __ push(Immediate(reinterpret_cast<int>(isolate())));
+ // holder
+ __ push(holder);
+
+ __ mov(scratch, esp);
+
+ // return address
+ __ push(return_address);
+
+ // API function gets reference to the v8::Arguments. If CPU profiler
+ // is enabled wrapper function will be called and we need to pass
+ // address of the callback as additional parameter, always allocate
+ // space for it.
+ const int kApiArgc = 1 + 1;
+
+ // Allocate the v8::Arguments structure in the arguments' space since
+ // it's not controlled by GC.
+ const int kApiStackSpace = 4;
+
+ __ PrepareCallApiFunction(kApiArgc + kApiStackSpace);
+
+ // FunctionCallbackInfo::implicit_args_.
+ __ mov(ApiParameterOperand(2), scratch);
+ __ add(scratch, Immediate((argc + FCA::kArgsLength - 1) * kPointerSize));
+ // FunctionCallbackInfo::values_.
+ __ mov(ApiParameterOperand(3), scratch);
+ // FunctionCallbackInfo::length_.
+ __ Move(ApiParameterOperand(4), Immediate(argc));
+ // FunctionCallbackInfo::is_construct_call_.
+ __ Move(ApiParameterOperand(5), Immediate(0));
+
+ // v8::InvocationCallback's argument.
+ __ lea(scratch, ApiParameterOperand(2));
+ __ mov(ApiParameterOperand(0), scratch);
+
+ ExternalReference thunk_ref =
+ ExternalReference::invoke_function_callback(isolate());
+
+ Operand context_restore_operand(ebp,
+ (2 + FCA::kContextSaveIndex) * kPointerSize);
+ // Stores return the first js argument
+ int return_value_offset = 0;
+ if (is_store) {
+ return_value_offset = 2 + FCA::kArgsLength;
+ } else {
+ return_value_offset = 2 + FCA::kReturnValueOffset;
+ }
+ Operand return_value_operand(ebp, return_value_offset * kPointerSize);
+ __ CallApiFunctionAndReturn(api_function_address,
+ thunk_ref,
+ ApiParameterOperand(1),
+ argc + FCA::kArgsLength + 1,
+ return_value_operand,
+ &context_restore_operand);
+}
+
+
+void CallApiGetterStub::Generate(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- esp[0] : return address
+ // -- esp[4] : name
+ // -- esp[8 - kArgsLength*4] : PropertyCallbackArguments object
+ // -- ...
+ // -- edx : api_function_address
+ // -----------------------------------
+
+ // array for v8::Arguments::values_, handler for name and pointer
+ // to the values (it considered as smi in GC).
+ const int kStackSpace = PropertyCallbackArguments::kArgsLength + 2;
+ // Allocate space for opional callback address parameter in case
+ // CPU profiler is active.
+ const int kApiArgc = 2 + 1;
+
+ Register api_function_address = edx;
+ Register scratch = ebx;
+
+ // load address of name
+ __ lea(scratch, Operand(esp, 1 * kPointerSize));
+
+ __ PrepareCallApiFunction(kApiArgc);
+ __ mov(ApiParameterOperand(0), scratch); // name.
+ __ add(scratch, Immediate(kPointerSize));
+ __ mov(ApiParameterOperand(1), scratch); // arguments pointer.
+
+ ExternalReference thunk_ref =
+ ExternalReference::invoke_accessor_getter_callback(isolate());
+
+ __ CallApiFunctionAndReturn(api_function_address,
+ thunk_ref,
+ ApiParameterOperand(2),
+ kStackSpace,
+ Operand(ebp, 7 * kPointerSize),
+ NULL);
+}
+
+
+#undef __
+
+} } // namespace v8::internal
+
+#endif // V8_TARGET_ARCH_X87
diff --git a/chromium/v8/src/x87/code-stubs-x87.h b/chromium/v8/src/x87/code-stubs-x87.h
new file mode 100644
index 00000000000..13ab10ffbde
--- /dev/null
+++ b/chromium/v8/src/x87/code-stubs-x87.h
@@ -0,0 +1,413 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_X87_CODE_STUBS_X87_H_
+#define V8_X87_CODE_STUBS_X87_H_
+
+#include "src/macro-assembler.h"
+#include "src/ic-inl.h"
+
+namespace v8 {
+namespace internal {
+
+
+void ArrayNativeCode(MacroAssembler* masm,
+ bool construct_call,
+ Label* call_generic_code);
+
+
+class StoreBufferOverflowStub: public PlatformCodeStub {
+ public:
+ explicit StoreBufferOverflowStub(Isolate* isolate)
+ : PlatformCodeStub(isolate) { }
+
+ void Generate(MacroAssembler* masm);
+
+ static void GenerateFixedRegStubsAheadOfTime(Isolate* isolate);
+ virtual bool SometimesSetsUpAFrame() { return false; }
+
+ private:
+ Major MajorKey() { return StoreBufferOverflow; }
+ int MinorKey() { return 0; }
+};
+
+
+class StringHelper : public AllStatic {
+ public:
+ // Generate code for copying characters using the rep movs instruction.
+ // Copies ecx characters from esi to edi. Copying of overlapping regions is
+ // not supported.
+ static void GenerateCopyCharacters(MacroAssembler* masm,
+ Register dest,
+ Register src,
+ Register count,
+ Register scratch,
+ String::Encoding encoding);
+
+ // Generate string hash.
+ static void GenerateHashInit(MacroAssembler* masm,
+ Register hash,
+ Register character,
+ Register scratch);
+ static void GenerateHashAddCharacter(MacroAssembler* masm,
+ Register hash,
+ Register character,
+ Register scratch);
+ static void GenerateHashGetHash(MacroAssembler* masm,
+ Register hash,
+ Register scratch);
+
+ private:
+ DISALLOW_IMPLICIT_CONSTRUCTORS(StringHelper);
+};
+
+
+class SubStringStub: public PlatformCodeStub {
+ public:
+ explicit SubStringStub(Isolate* isolate) : PlatformCodeStub(isolate) {}
+
+ private:
+ Major MajorKey() { return SubString; }
+ int MinorKey() { return 0; }
+
+ void Generate(MacroAssembler* masm);
+};
+
+
+class StringCompareStub: public PlatformCodeStub {
+ public:
+ explicit StringCompareStub(Isolate* isolate) : PlatformCodeStub(isolate) { }
+
+ // Compares two flat ASCII strings and returns result in eax.
+ static void GenerateCompareFlatAsciiStrings(MacroAssembler* masm,
+ Register left,
+ Register right,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3);
+
+ // Compares two flat ASCII strings for equality and returns result
+ // in eax.
+ static void GenerateFlatAsciiStringEquals(MacroAssembler* masm,
+ Register left,
+ Register right,
+ Register scratch1,
+ Register scratch2);
+
+ private:
+ virtual Major MajorKey() { return StringCompare; }
+ virtual int MinorKey() { return 0; }
+ virtual void Generate(MacroAssembler* masm);
+
+ static void GenerateAsciiCharsCompareLoop(
+ MacroAssembler* masm,
+ Register left,
+ Register right,
+ Register length,
+ Register scratch,
+ Label* chars_not_equal,
+ Label::Distance chars_not_equal_near = Label::kFar);
+};
+
+
+class NameDictionaryLookupStub: public PlatformCodeStub {
+ public:
+ enum LookupMode { POSITIVE_LOOKUP, NEGATIVE_LOOKUP };
+
+ NameDictionaryLookupStub(Isolate* isolate,
+ Register dictionary,
+ Register result,
+ Register index,
+ LookupMode mode)
+ : PlatformCodeStub(isolate),
+ dictionary_(dictionary), result_(result), index_(index), mode_(mode) { }
+
+ void Generate(MacroAssembler* masm);
+
+ static void GenerateNegativeLookup(MacroAssembler* masm,
+ Label* miss,
+ Label* done,
+ Register properties,
+ Handle<Name> name,
+ Register r0);
+
+ static void GeneratePositiveLookup(MacroAssembler* masm,
+ Label* miss,
+ Label* done,
+ Register elements,
+ Register name,
+ Register r0,
+ Register r1);
+
+ virtual bool SometimesSetsUpAFrame() { return false; }
+
+ private:
+ static const int kInlinedProbes = 4;
+ static const int kTotalProbes = 20;
+
+ static const int kCapacityOffset =
+ NameDictionary::kHeaderSize +
+ NameDictionary::kCapacityIndex * kPointerSize;
+
+ static const int kElementsStartOffset =
+ NameDictionary::kHeaderSize +
+ NameDictionary::kElementsStartIndex * kPointerSize;
+
+ Major MajorKey() { return NameDictionaryLookup; }
+
+ int MinorKey() {
+ return DictionaryBits::encode(dictionary_.code()) |
+ ResultBits::encode(result_.code()) |
+ IndexBits::encode(index_.code()) |
+ LookupModeBits::encode(mode_);
+ }
+
+ class DictionaryBits: public BitField<int, 0, 3> {};
+ class ResultBits: public BitField<int, 3, 3> {};
+ class IndexBits: public BitField<int, 6, 3> {};
+ class LookupModeBits: public BitField<LookupMode, 9, 1> {};
+
+ Register dictionary_;
+ Register result_;
+ Register index_;
+ LookupMode mode_;
+};
+
+
+class RecordWriteStub: public PlatformCodeStub {
+ public:
+ RecordWriteStub(Isolate* isolate,
+ Register object,
+ Register value,
+ Register address,
+ RememberedSetAction remembered_set_action)
+ : PlatformCodeStub(isolate),
+ object_(object),
+ value_(value),
+ address_(address),
+ remembered_set_action_(remembered_set_action),
+ regs_(object, // An input reg.
+ address, // An input reg.
+ value) { // One scratch reg.
+ }
+
+ enum Mode {
+ STORE_BUFFER_ONLY,
+ INCREMENTAL,
+ INCREMENTAL_COMPACTION
+ };
+
+ virtual bool SometimesSetsUpAFrame() { return false; }
+
+ static const byte kTwoByteNopInstruction = 0x3c; // Cmpb al, #imm8.
+ static const byte kTwoByteJumpInstruction = 0xeb; // Jmp #imm8.
+
+ static const byte kFiveByteNopInstruction = 0x3d; // Cmpl eax, #imm32.
+ static const byte kFiveByteJumpInstruction = 0xe9; // Jmp #imm32.
+
+ static Mode GetMode(Code* stub) {
+ byte first_instruction = stub->instruction_start()[0];
+ byte second_instruction = stub->instruction_start()[2];
+
+ if (first_instruction == kTwoByteJumpInstruction) {
+ return INCREMENTAL;
+ }
+
+ ASSERT(first_instruction == kTwoByteNopInstruction);
+
+ if (second_instruction == kFiveByteJumpInstruction) {
+ return INCREMENTAL_COMPACTION;
+ }
+
+ ASSERT(second_instruction == kFiveByteNopInstruction);
+
+ return STORE_BUFFER_ONLY;
+ }
+
+ static void Patch(Code* stub, Mode mode) {
+ switch (mode) {
+ case STORE_BUFFER_ONLY:
+ ASSERT(GetMode(stub) == INCREMENTAL ||
+ GetMode(stub) == INCREMENTAL_COMPACTION);
+ stub->instruction_start()[0] = kTwoByteNopInstruction;
+ stub->instruction_start()[2] = kFiveByteNopInstruction;
+ break;
+ case INCREMENTAL:
+ ASSERT(GetMode(stub) == STORE_BUFFER_ONLY);
+ stub->instruction_start()[0] = kTwoByteJumpInstruction;
+ break;
+ case INCREMENTAL_COMPACTION:
+ ASSERT(GetMode(stub) == STORE_BUFFER_ONLY);
+ stub->instruction_start()[0] = kTwoByteNopInstruction;
+ stub->instruction_start()[2] = kFiveByteJumpInstruction;
+ break;
+ }
+ ASSERT(GetMode(stub) == mode);
+ CPU::FlushICache(stub->instruction_start(), 7);
+ }
+
+ private:
+ // This is a helper class for freeing up 3 scratch registers, where the third
+ // is always ecx (needed for shift operations). The input is two registers
+ // that must be preserved and one scratch register provided by the caller.
+ class RegisterAllocation {
+ public:
+ RegisterAllocation(Register object,
+ Register address,
+ Register scratch0)
+ : object_orig_(object),
+ address_orig_(address),
+ scratch0_orig_(scratch0),
+ object_(object),
+ address_(address),
+ scratch0_(scratch0) {
+ ASSERT(!AreAliased(scratch0, object, address, no_reg));
+ scratch1_ = GetRegThatIsNotEcxOr(object_, address_, scratch0_);
+ if (scratch0.is(ecx)) {
+ scratch0_ = GetRegThatIsNotEcxOr(object_, address_, scratch1_);
+ }
+ if (object.is(ecx)) {
+ object_ = GetRegThatIsNotEcxOr(address_, scratch0_, scratch1_);
+ }
+ if (address.is(ecx)) {
+ address_ = GetRegThatIsNotEcxOr(object_, scratch0_, scratch1_);
+ }
+ ASSERT(!AreAliased(scratch0_, object_, address_, ecx));
+ }
+
+ void Save(MacroAssembler* masm) {
+ ASSERT(!address_orig_.is(object_));
+ ASSERT(object_.is(object_orig_) || address_.is(address_orig_));
+ ASSERT(!AreAliased(object_, address_, scratch1_, scratch0_));
+ ASSERT(!AreAliased(object_orig_, address_, scratch1_, scratch0_));
+ ASSERT(!AreAliased(object_, address_orig_, scratch1_, scratch0_));
+ // We don't have to save scratch0_orig_ because it was given to us as
+ // a scratch register. But if we had to switch to a different reg then
+ // we should save the new scratch0_.
+ if (!scratch0_.is(scratch0_orig_)) masm->push(scratch0_);
+ if (!ecx.is(scratch0_orig_) &&
+ !ecx.is(object_orig_) &&
+ !ecx.is(address_orig_)) {
+ masm->push(ecx);
+ }
+ masm->push(scratch1_);
+ if (!address_.is(address_orig_)) {
+ masm->push(address_);
+ masm->mov(address_, address_orig_);
+ }
+ if (!object_.is(object_orig_)) {
+ masm->push(object_);
+ masm->mov(object_, object_orig_);
+ }
+ }
+
+ void Restore(MacroAssembler* masm) {
+ // These will have been preserved the entire time, so we just need to move
+ // them back. Only in one case is the orig_ reg different from the plain
+ // one, since only one of them can alias with ecx.
+ if (!object_.is(object_orig_)) {
+ masm->mov(object_orig_, object_);
+ masm->pop(object_);
+ }
+ if (!address_.is(address_orig_)) {
+ masm->mov(address_orig_, address_);
+ masm->pop(address_);
+ }
+ masm->pop(scratch1_);
+ if (!ecx.is(scratch0_orig_) &&
+ !ecx.is(object_orig_) &&
+ !ecx.is(address_orig_)) {
+ masm->pop(ecx);
+ }
+ if (!scratch0_.is(scratch0_orig_)) masm->pop(scratch0_);
+ }
+
+ // If we have to call into C then we need to save and restore all caller-
+ // saved registers that were not already preserved. The caller saved
+ // registers are eax, ecx and edx. The three scratch registers (incl. ecx)
+ // will be restored by other means so we don't bother pushing them here.
+ void SaveCallerSaveRegisters(MacroAssembler* masm) {
+ if (!scratch0_.is(eax) && !scratch1_.is(eax)) masm->push(eax);
+ if (!scratch0_.is(edx) && !scratch1_.is(edx)) masm->push(edx);
+ }
+
+ inline void RestoreCallerSaveRegisters(MacroAssembler*masm) {
+ if (!scratch0_.is(edx) && !scratch1_.is(edx)) masm->pop(edx);
+ if (!scratch0_.is(eax) && !scratch1_.is(eax)) masm->pop(eax);
+ }
+
+ inline Register object() { return object_; }
+ inline Register address() { return address_; }
+ inline Register scratch0() { return scratch0_; }
+ inline Register scratch1() { return scratch1_; }
+
+ private:
+ Register object_orig_;
+ Register address_orig_;
+ Register scratch0_orig_;
+ Register object_;
+ Register address_;
+ Register scratch0_;
+ Register scratch1_;
+ // Third scratch register is always ecx.
+
+ Register GetRegThatIsNotEcxOr(Register r1,
+ Register r2,
+ Register r3) {
+ for (int i = 0; i < Register::NumAllocatableRegisters(); i++) {
+ Register candidate = Register::FromAllocationIndex(i);
+ if (candidate.is(ecx)) continue;
+ if (candidate.is(r1)) continue;
+ if (candidate.is(r2)) continue;
+ if (candidate.is(r3)) continue;
+ return candidate;
+ }
+ UNREACHABLE();
+ return no_reg;
+ }
+ friend class RecordWriteStub;
+ };
+
+ enum OnNoNeedToInformIncrementalMarker {
+ kReturnOnNoNeedToInformIncrementalMarker,
+ kUpdateRememberedSetOnNoNeedToInformIncrementalMarker
+ }
+;
+ void Generate(MacroAssembler* masm);
+ void GenerateIncremental(MacroAssembler* masm, Mode mode);
+ void CheckNeedsToInformIncrementalMarker(
+ MacroAssembler* masm,
+ OnNoNeedToInformIncrementalMarker on_no_need,
+ Mode mode);
+ void InformIncrementalMarker(MacroAssembler* masm);
+
+ Major MajorKey() { return RecordWrite; }
+
+ int MinorKey() {
+ return ObjectBits::encode(object_.code()) |
+ ValueBits::encode(value_.code()) |
+ AddressBits::encode(address_.code()) |
+ RememberedSetActionBits::encode(remembered_set_action_);
+ }
+
+ void Activate(Code* code) {
+ code->GetHeap()->incremental_marking()->ActivateGeneratedStub(code);
+ }
+
+ class ObjectBits: public BitField<int, 0, 3> {};
+ class ValueBits: public BitField<int, 3, 3> {};
+ class AddressBits: public BitField<int, 6, 3> {};
+ class RememberedSetActionBits: public BitField<RememberedSetAction, 9, 1> {};
+
+ Register object_;
+ Register value_;
+ Register address_;
+ RememberedSetAction remembered_set_action_;
+ RegisterAllocation regs_;
+};
+
+
+} } // namespace v8::internal
+
+#endif // V8_X87_CODE_STUBS_X87_H_
diff --git a/chromium/v8/src/x87/codegen-x87.cc b/chromium/v8/src/x87/codegen-x87.cc
new file mode 100644
index 00000000000..5091e88aa33
--- /dev/null
+++ b/chromium/v8/src/x87/codegen-x87.cc
@@ -0,0 +1,632 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#if V8_TARGET_ARCH_X87
+
+#include "src/codegen.h"
+#include "src/heap.h"
+#include "src/macro-assembler.h"
+
+namespace v8 {
+namespace internal {
+
+
+// -------------------------------------------------------------------------
+// Platform-specific RuntimeCallHelper functions.
+
+void StubRuntimeCallHelper::BeforeCall(MacroAssembler* masm) const {
+ masm->EnterFrame(StackFrame::INTERNAL);
+ ASSERT(!masm->has_frame());
+ masm->set_has_frame(true);
+}
+
+
+void StubRuntimeCallHelper::AfterCall(MacroAssembler* masm) const {
+ masm->LeaveFrame(StackFrame::INTERNAL);
+ ASSERT(masm->has_frame());
+ masm->set_has_frame(false);
+}
+
+
+#define __ masm.
+
+
+UnaryMathFunction CreateExpFunction() {
+ // No SSE2 support
+ return &std::exp;
+}
+
+
+UnaryMathFunction CreateSqrtFunction() {
+ // No SSE2 support
+ return &std::sqrt;
+}
+
+
+// Helper functions for CreateMemMoveFunction.
+#undef __
+#define __ ACCESS_MASM(masm)
+
+enum Direction { FORWARD, BACKWARD };
+enum Alignment { MOVE_ALIGNED, MOVE_UNALIGNED };
+
+
+void MemMoveEmitPopAndReturn(MacroAssembler* masm) {
+ __ pop(esi);
+ __ pop(edi);
+ __ ret(0);
+}
+
+
+#undef __
+#define __ masm.
+
+
+class LabelConverter {
+ public:
+ explicit LabelConverter(byte* buffer) : buffer_(buffer) {}
+ int32_t address(Label* l) const {
+ return reinterpret_cast<int32_t>(buffer_) + l->pos();
+ }
+ private:
+ byte* buffer_;
+};
+
+
+MemMoveFunction CreateMemMoveFunction() {
+ size_t actual_size;
+ // Allocate buffer in executable space.
+ byte* buffer = static_cast<byte*>(OS::Allocate(1 * KB, &actual_size, true));
+ if (buffer == NULL) return NULL;
+ MacroAssembler masm(NULL, buffer, static_cast<int>(actual_size));
+ LabelConverter conv(buffer);
+
+ // Generated code is put into a fixed, unmovable buffer, and not into
+ // the V8 heap. We can't, and don't, refer to any relocatable addresses
+ // (e.g. the JavaScript nan-object).
+
+ // 32-bit C declaration function calls pass arguments on stack.
+
+ // Stack layout:
+ // esp[12]: Third argument, size.
+ // esp[8]: Second argument, source pointer.
+ // esp[4]: First argument, destination pointer.
+ // esp[0]: return address
+
+ const int kDestinationOffset = 1 * kPointerSize;
+ const int kSourceOffset = 2 * kPointerSize;
+ const int kSizeOffset = 3 * kPointerSize;
+
+ int stack_offset = 0; // Update if we change the stack height.
+
+ Label backward, backward_much_overlap;
+ Label forward_much_overlap, small_size, medium_size, pop_and_return;
+ __ push(edi);
+ __ push(esi);
+ stack_offset += 2 * kPointerSize;
+ Register dst = edi;
+ Register src = esi;
+ Register count = ecx;
+ __ mov(dst, Operand(esp, stack_offset + kDestinationOffset));
+ __ mov(src, Operand(esp, stack_offset + kSourceOffset));
+ __ mov(count, Operand(esp, stack_offset + kSizeOffset));
+
+ __ cmp(dst, src);
+ __ j(equal, &pop_and_return);
+
+ // No SSE2.
+ Label forward;
+ __ cmp(count, 0);
+ __ j(equal, &pop_and_return);
+ __ cmp(dst, src);
+ __ j(above, &backward);
+ __ jmp(&forward);
+ {
+ // Simple forward copier.
+ Label forward_loop_1byte, forward_loop_4byte;
+ __ bind(&forward_loop_4byte);
+ __ mov(eax, Operand(src, 0));
+ __ sub(count, Immediate(4));
+ __ add(src, Immediate(4));
+ __ mov(Operand(dst, 0), eax);
+ __ add(dst, Immediate(4));
+ __ bind(&forward); // Entry point.
+ __ cmp(count, 3);
+ __ j(above, &forward_loop_4byte);
+ __ bind(&forward_loop_1byte);
+ __ cmp(count, 0);
+ __ j(below_equal, &pop_and_return);
+ __ mov_b(eax, Operand(src, 0));
+ __ dec(count);
+ __ inc(src);
+ __ mov_b(Operand(dst, 0), eax);
+ __ inc(dst);
+ __ jmp(&forward_loop_1byte);
+ }
+ {
+ // Simple backward copier.
+ Label backward_loop_1byte, backward_loop_4byte, entry_shortcut;
+ __ bind(&backward);
+ __ add(src, count);
+ __ add(dst, count);
+ __ cmp(count, 3);
+ __ j(below_equal, &entry_shortcut);
+
+ __ bind(&backward_loop_4byte);
+ __ sub(src, Immediate(4));
+ __ sub(count, Immediate(4));
+ __ mov(eax, Operand(src, 0));
+ __ sub(dst, Immediate(4));
+ __ mov(Operand(dst, 0), eax);
+ __ cmp(count, 3);
+ __ j(above, &backward_loop_4byte);
+ __ bind(&backward_loop_1byte);
+ __ cmp(count, 0);
+ __ j(below_equal, &pop_and_return);
+ __ bind(&entry_shortcut);
+ __ dec(src);
+ __ dec(count);
+ __ mov_b(eax, Operand(src, 0));
+ __ dec(dst);
+ __ mov_b(Operand(dst, 0), eax);
+ __ jmp(&backward_loop_1byte);
+ }
+
+ __ bind(&pop_and_return);
+ MemMoveEmitPopAndReturn(&masm);
+
+ CodeDesc desc;
+ masm.GetCode(&desc);
+ ASSERT(!RelocInfo::RequiresRelocation(desc));
+ CPU::FlushICache(buffer, actual_size);
+ OS::ProtectCode(buffer, actual_size);
+ // TODO(jkummerow): It would be nice to register this code creation event
+ // with the PROFILE / GDBJIT system.
+ return FUNCTION_CAST<MemMoveFunction>(buffer);
+}
+
+
+#undef __
+
+// -------------------------------------------------------------------------
+// Code generators
+
+#define __ ACCESS_MASM(masm)
+
+
+void ElementsTransitionGenerator::GenerateMapChangeElementsTransition(
+ MacroAssembler* masm, AllocationSiteMode mode,
+ Label* allocation_memento_found) {
+ // ----------- S t a t e -------------
+ // -- eax : value
+ // -- ebx : target map
+ // -- ecx : key
+ // -- edx : receiver
+ // -- esp[0] : return address
+ // -----------------------------------
+ if (mode == TRACK_ALLOCATION_SITE) {
+ ASSERT(allocation_memento_found != NULL);
+ __ JumpIfJSArrayHasAllocationMemento(edx, edi, allocation_memento_found);
+ }
+
+ // Set transitioned map.
+ __ mov(FieldOperand(edx, HeapObject::kMapOffset), ebx);
+ __ RecordWriteField(edx,
+ HeapObject::kMapOffset,
+ ebx,
+ edi,
+ EMIT_REMEMBERED_SET,
+ OMIT_SMI_CHECK);
+}
+
+
+void ElementsTransitionGenerator::GenerateSmiToDouble(
+ MacroAssembler* masm, AllocationSiteMode mode, Label* fail) {
+ // ----------- S t a t e -------------
+ // -- eax : value
+ // -- ebx : target map
+ // -- ecx : key
+ // -- edx : receiver
+ // -- esp[0] : return address
+ // -----------------------------------
+ Label loop, entry, convert_hole, gc_required, only_change_map;
+
+ if (mode == TRACK_ALLOCATION_SITE) {
+ __ JumpIfJSArrayHasAllocationMemento(edx, edi, fail);
+ }
+
+ // Check for empty arrays, which only require a map transition and no changes
+ // to the backing store.
+ __ mov(edi, FieldOperand(edx, JSObject::kElementsOffset));
+ __ cmp(edi, Immediate(masm->isolate()->factory()->empty_fixed_array()));
+ __ j(equal, &only_change_map);
+
+ __ push(eax);
+ __ push(ebx);
+
+ __ mov(edi, FieldOperand(edi, FixedArray::kLengthOffset));
+
+ // Allocate new FixedDoubleArray.
+ // edx: receiver
+ // edi: length of source FixedArray (smi-tagged)
+ AllocationFlags flags =
+ static_cast<AllocationFlags>(TAG_OBJECT | DOUBLE_ALIGNMENT);
+ __ Allocate(FixedDoubleArray::kHeaderSize, times_8, edi,
+ REGISTER_VALUE_IS_SMI, eax, ebx, no_reg, &gc_required, flags);
+
+ // eax: destination FixedDoubleArray
+ // edi: number of elements
+ // edx: receiver
+ __ mov(FieldOperand(eax, HeapObject::kMapOffset),
+ Immediate(masm->isolate()->factory()->fixed_double_array_map()));
+ __ mov(FieldOperand(eax, FixedDoubleArray::kLengthOffset), edi);
+ __ mov(esi, FieldOperand(edx, JSObject::kElementsOffset));
+ // Replace receiver's backing store with newly created FixedDoubleArray.
+ __ mov(FieldOperand(edx, JSObject::kElementsOffset), eax);
+ __ mov(ebx, eax);
+ __ RecordWriteField(edx,
+ JSObject::kElementsOffset,
+ ebx,
+ edi,
+ EMIT_REMEMBERED_SET,
+ OMIT_SMI_CHECK);
+
+ __ mov(edi, FieldOperand(esi, FixedArray::kLengthOffset));
+
+ // Prepare for conversion loop.
+ ExternalReference canonical_the_hole_nan_reference =
+ ExternalReference::address_of_the_hole_nan();
+ __ jmp(&entry);
+
+ // Call into runtime if GC is required.
+ __ bind(&gc_required);
+ // Restore registers before jumping into runtime.
+ __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
+ __ pop(ebx);
+ __ pop(eax);
+ __ jmp(fail);
+
+ // Convert and copy elements
+ // esi: source FixedArray
+ __ bind(&loop);
+ __ mov(ebx, FieldOperand(esi, edi, times_2, FixedArray::kHeaderSize));
+ // ebx: current element from source
+ // edi: index of current element
+ __ JumpIfNotSmi(ebx, &convert_hole);
+
+ // Normal smi, convert it to double and store.
+ __ SmiUntag(ebx);
+ __ push(ebx);
+ __ fild_s(Operand(esp, 0));
+ __ pop(ebx);
+ __ fstp_d(FieldOperand(eax, edi, times_4, FixedDoubleArray::kHeaderSize));
+ __ jmp(&entry);
+
+ // Found hole, store hole_nan_as_double instead.
+ __ bind(&convert_hole);
+
+ if (FLAG_debug_code) {
+ __ cmp(ebx, masm->isolate()->factory()->the_hole_value());
+ __ Assert(equal, kObjectFoundInSmiOnlyArray);
+ }
+
+ __ fld_d(Operand::StaticVariable(canonical_the_hole_nan_reference));
+ __ fstp_d(FieldOperand(eax, edi, times_4, FixedDoubleArray::kHeaderSize));
+
+ __ bind(&entry);
+ __ sub(edi, Immediate(Smi::FromInt(1)));
+ __ j(not_sign, &loop);
+
+ __ pop(ebx);
+ __ pop(eax);
+
+ // Restore esi.
+ __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
+
+ __ bind(&only_change_map);
+ // eax: value
+ // ebx: target map
+ // Set transitioned map.
+ __ mov(FieldOperand(edx, HeapObject::kMapOffset), ebx);
+ __ RecordWriteField(edx,
+ HeapObject::kMapOffset,
+ ebx,
+ edi,
+ OMIT_REMEMBERED_SET,
+ OMIT_SMI_CHECK);
+}
+
+
+void ElementsTransitionGenerator::GenerateDoubleToObject(
+ MacroAssembler* masm, AllocationSiteMode mode, Label* fail) {
+ // ----------- S t a t e -------------
+ // -- eax : value
+ // -- ebx : target map
+ // -- ecx : key
+ // -- edx : receiver
+ // -- esp[0] : return address
+ // -----------------------------------
+ Label loop, entry, convert_hole, gc_required, only_change_map, success;
+
+ if (mode == TRACK_ALLOCATION_SITE) {
+ __ JumpIfJSArrayHasAllocationMemento(edx, edi, fail);
+ }
+
+ // Check for empty arrays, which only require a map transition and no changes
+ // to the backing store.
+ __ mov(edi, FieldOperand(edx, JSObject::kElementsOffset));
+ __ cmp(edi, Immediate(masm->isolate()->factory()->empty_fixed_array()));
+ __ j(equal, &only_change_map);
+
+ __ push(eax);
+ __ push(edx);
+ __ push(ebx);
+
+ __ mov(ebx, FieldOperand(edi, FixedDoubleArray::kLengthOffset));
+
+ // Allocate new FixedArray.
+ // ebx: length of source FixedDoubleArray (smi-tagged)
+ __ lea(edi, Operand(ebx, times_2, FixedArray::kHeaderSize));
+ __ Allocate(edi, eax, esi, no_reg, &gc_required, TAG_OBJECT);
+
+ // eax: destination FixedArray
+ // ebx: number of elements
+ __ mov(FieldOperand(eax, HeapObject::kMapOffset),
+ Immediate(masm->isolate()->factory()->fixed_array_map()));
+ __ mov(FieldOperand(eax, FixedArray::kLengthOffset), ebx);
+ __ mov(edi, FieldOperand(edx, JSObject::kElementsOffset));
+
+ __ jmp(&entry);
+
+ // ebx: target map
+ // edx: receiver
+ // Set transitioned map.
+ __ bind(&only_change_map);
+ __ mov(FieldOperand(edx, HeapObject::kMapOffset), ebx);
+ __ RecordWriteField(edx,
+ HeapObject::kMapOffset,
+ ebx,
+ edi,
+ OMIT_REMEMBERED_SET,
+ OMIT_SMI_CHECK);
+ __ jmp(&success);
+
+ // Call into runtime if GC is required.
+ __ bind(&gc_required);
+ __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
+ __ pop(ebx);
+ __ pop(edx);
+ __ pop(eax);
+ __ jmp(fail);
+
+ // Box doubles into heap numbers.
+ // edi: source FixedDoubleArray
+ // eax: destination FixedArray
+ __ bind(&loop);
+ // ebx: index of current element (smi-tagged)
+ uint32_t offset = FixedDoubleArray::kHeaderSize + sizeof(kHoleNanLower32);
+ __ cmp(FieldOperand(edi, ebx, times_4, offset), Immediate(kHoleNanUpper32));
+ __ j(equal, &convert_hole);
+
+ // Non-hole double, copy value into a heap number.
+ __ AllocateHeapNumber(edx, esi, no_reg, &gc_required);
+ // edx: new heap number
+ __ mov(esi, FieldOperand(edi, ebx, times_4, FixedDoubleArray::kHeaderSize));
+ __ mov(FieldOperand(edx, HeapNumber::kValueOffset), esi);
+ __ mov(esi, FieldOperand(edi, ebx, times_4, offset));
+ __ mov(FieldOperand(edx, HeapNumber::kValueOffset + kPointerSize), esi);
+ __ mov(FieldOperand(eax, ebx, times_2, FixedArray::kHeaderSize), edx);
+ __ mov(esi, ebx);
+ __ RecordWriteArray(eax,
+ edx,
+ esi,
+ EMIT_REMEMBERED_SET,
+ OMIT_SMI_CHECK);
+ __ jmp(&entry, Label::kNear);
+
+ // Replace the-hole NaN with the-hole pointer.
+ __ bind(&convert_hole);
+ __ mov(FieldOperand(eax, ebx, times_2, FixedArray::kHeaderSize),
+ masm->isolate()->factory()->the_hole_value());
+
+ __ bind(&entry);
+ __ sub(ebx, Immediate(Smi::FromInt(1)));
+ __ j(not_sign, &loop);
+
+ __ pop(ebx);
+ __ pop(edx);
+ // ebx: target map
+ // edx: receiver
+ // Set transitioned map.
+ __ mov(FieldOperand(edx, HeapObject::kMapOffset), ebx);
+ __ RecordWriteField(edx,
+ HeapObject::kMapOffset,
+ ebx,
+ edi,
+ OMIT_REMEMBERED_SET,
+ OMIT_SMI_CHECK);
+ // Replace receiver's backing store with newly created and filled FixedArray.
+ __ mov(FieldOperand(edx, JSObject::kElementsOffset), eax);
+ __ RecordWriteField(edx,
+ JSObject::kElementsOffset,
+ eax,
+ edi,
+ EMIT_REMEMBERED_SET,
+ OMIT_SMI_CHECK);
+
+ // Restore registers.
+ __ pop(eax);
+ __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
+
+ __ bind(&success);
+}
+
+
+void StringCharLoadGenerator::Generate(MacroAssembler* masm,
+ Factory* factory,
+ Register string,
+ Register index,
+ Register result,
+ Label* call_runtime) {
+ // Fetch the instance type of the receiver into result register.
+ __ mov(result, FieldOperand(string, HeapObject::kMapOffset));
+ __ movzx_b(result, FieldOperand(result, Map::kInstanceTypeOffset));
+
+ // We need special handling for indirect strings.
+ Label check_sequential;
+ __ test(result, Immediate(kIsIndirectStringMask));
+ __ j(zero, &check_sequential, Label::kNear);
+
+ // Dispatch on the indirect string shape: slice or cons.
+ Label cons_string;
+ __ test(result, Immediate(kSlicedNotConsMask));
+ __ j(zero, &cons_string, Label::kNear);
+
+ // Handle slices.
+ Label indirect_string_loaded;
+ __ mov(result, FieldOperand(string, SlicedString::kOffsetOffset));
+ __ SmiUntag(result);
+ __ add(index, result);
+ __ mov(string, FieldOperand(string, SlicedString::kParentOffset));
+ __ jmp(&indirect_string_loaded, Label::kNear);
+
+ // Handle cons strings.
+ // Check whether the right hand side is the empty string (i.e. if
+ // this is really a flat string in a cons string). If that is not
+ // the case we would rather go to the runtime system now to flatten
+ // the string.
+ __ bind(&cons_string);
+ __ cmp(FieldOperand(string, ConsString::kSecondOffset),
+ Immediate(factory->empty_string()));
+ __ j(not_equal, call_runtime);
+ __ mov(string, FieldOperand(string, ConsString::kFirstOffset));
+
+ __ bind(&indirect_string_loaded);
+ __ mov(result, FieldOperand(string, HeapObject::kMapOffset));
+ __ movzx_b(result, FieldOperand(result, Map::kInstanceTypeOffset));
+
+ // Distinguish sequential and external strings. Only these two string
+ // representations can reach here (slices and flat cons strings have been
+ // reduced to the underlying sequential or external string).
+ Label seq_string;
+ __ bind(&check_sequential);
+ STATIC_ASSERT(kSeqStringTag == 0);
+ __ test(result, Immediate(kStringRepresentationMask));
+ __ j(zero, &seq_string, Label::kNear);
+
+ // Handle external strings.
+ Label ascii_external, done;
+ if (FLAG_debug_code) {
+ // Assert that we do not have a cons or slice (indirect strings) here.
+ // Sequential strings have already been ruled out.
+ __ test(result, Immediate(kIsIndirectStringMask));
+ __ Assert(zero, kExternalStringExpectedButNotFound);
+ }
+ // Rule out short external strings.
+ STATIC_ASSERT(kShortExternalStringTag != 0);
+ __ test_b(result, kShortExternalStringMask);
+ __ j(not_zero, call_runtime);
+ // Check encoding.
+ STATIC_ASSERT(kTwoByteStringTag == 0);
+ __ test_b(result, kStringEncodingMask);
+ __ mov(result, FieldOperand(string, ExternalString::kResourceDataOffset));
+ __ j(not_equal, &ascii_external, Label::kNear);
+ // Two-byte string.
+ __ movzx_w(result, Operand(result, index, times_2, 0));
+ __ jmp(&done, Label::kNear);
+ __ bind(&ascii_external);
+ // Ascii string.
+ __ movzx_b(result, Operand(result, index, times_1, 0));
+ __ jmp(&done, Label::kNear);
+
+ // Dispatch on the encoding: ASCII or two-byte.
+ Label ascii;
+ __ bind(&seq_string);
+ STATIC_ASSERT((kStringEncodingMask & kOneByteStringTag) != 0);
+ STATIC_ASSERT((kStringEncodingMask & kTwoByteStringTag) == 0);
+ __ test(result, Immediate(kStringEncodingMask));
+ __ j(not_zero, &ascii, Label::kNear);
+
+ // Two-byte string.
+ // Load the two-byte character code into the result register.
+ __ movzx_w(result, FieldOperand(string,
+ index,
+ times_2,
+ SeqTwoByteString::kHeaderSize));
+ __ jmp(&done, Label::kNear);
+
+ // Ascii string.
+ // Load the byte into the result register.
+ __ bind(&ascii);
+ __ movzx_b(result, FieldOperand(string,
+ index,
+ times_1,
+ SeqOneByteString::kHeaderSize));
+ __ bind(&done);
+}
+
+
+#undef __
+
+
+CodeAgingHelper::CodeAgingHelper() {
+ ASSERT(young_sequence_.length() == kNoCodeAgeSequenceLength);
+ CodePatcher patcher(young_sequence_.start(), young_sequence_.length());
+ patcher.masm()->push(ebp);
+ patcher.masm()->mov(ebp, esp);
+ patcher.masm()->push(esi);
+ patcher.masm()->push(edi);
+}
+
+
+#ifdef DEBUG
+bool CodeAgingHelper::IsOld(byte* candidate) const {
+ return *candidate == kCallOpcode;
+}
+#endif
+
+
+bool Code::IsYoungSequence(Isolate* isolate, byte* sequence) {
+ bool result = isolate->code_aging_helper()->IsYoung(sequence);
+ ASSERT(result || isolate->code_aging_helper()->IsOld(sequence));
+ return result;
+}
+
+
+void Code::GetCodeAgeAndParity(Isolate* isolate, byte* sequence, Age* age,
+ MarkingParity* parity) {
+ if (IsYoungSequence(isolate, sequence)) {
+ *age = kNoAgeCodeAge;
+ *parity = NO_MARKING_PARITY;
+ } else {
+ sequence++; // Skip the kCallOpcode byte
+ Address target_address = sequence + *reinterpret_cast<int*>(sequence) +
+ Assembler::kCallTargetAddressOffset;
+ Code* stub = GetCodeFromTargetAddress(target_address);
+ GetCodeAgeAndParity(stub, age, parity);
+ }
+}
+
+
+void Code::PatchPlatformCodeAge(Isolate* isolate,
+ byte* sequence,
+ Code::Age age,
+ MarkingParity parity) {
+ uint32_t young_length = isolate->code_aging_helper()->young_sequence_length();
+ if (age == kNoAgeCodeAge) {
+ isolate->code_aging_helper()->CopyYoungSequenceTo(sequence);
+ CPU::FlushICache(sequence, young_length);
+ } else {
+ Code* stub = GetCodeAgeStub(isolate, age, parity);
+ CodePatcher patcher(sequence, young_length);
+ patcher.masm()->call(stub->instruction_start(), RelocInfo::NONE32);
+ }
+}
+
+
+} } // namespace v8::internal
+
+#endif // V8_TARGET_ARCH_X87
diff --git a/chromium/v8/src/x87/codegen-x87.h b/chromium/v8/src/x87/codegen-x87.h
new file mode 100644
index 00000000000..15b2702407f
--- /dev/null
+++ b/chromium/v8/src/x87/codegen-x87.h
@@ -0,0 +1,33 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_X87_CODEGEN_X87_H_
+#define V8_X87_CODEGEN_X87_H_
+
+#include "src/ast.h"
+#include "src/ic-inl.h"
+
+namespace v8 {
+namespace internal {
+
+
+class StringCharLoadGenerator : public AllStatic {
+ public:
+ // Generates the code for handling different string types and loading the
+ // indexed character into |result|. We expect |index| as untagged input and
+ // |result| as untagged output.
+ static void Generate(MacroAssembler* masm,
+ Factory* factory,
+ Register string,
+ Register index,
+ Register result,
+ Label* call_runtime);
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(StringCharLoadGenerator);
+};
+
+} } // namespace v8::internal
+
+#endif // V8_X87_CODEGEN_X87_H_
diff --git a/chromium/v8/src/x87/cpu-x87.cc b/chromium/v8/src/x87/cpu-x87.cc
new file mode 100644
index 00000000000..469f58eea7e
--- /dev/null
+++ b/chromium/v8/src/x87/cpu-x87.cc
@@ -0,0 +1,44 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// CPU specific code for ia32 independent of OS goes here.
+
+#ifdef __GNUC__
+#include "src/third_party/valgrind/valgrind.h"
+#endif
+
+#include "src/v8.h"
+
+#if V8_TARGET_ARCH_X87
+
+#include "src/cpu.h"
+#include "src/macro-assembler.h"
+
+namespace v8 {
+namespace internal {
+
+void CPU::FlushICache(void* start, size_t size) {
+ // No need to flush the instruction cache on Intel. On Intel instruction
+ // cache flushing is only necessary when multiple cores running the same
+ // code simultaneously. V8 (and JavaScript) is single threaded and when code
+ // is patched on an intel CPU the core performing the patching will have its
+ // own instruction cache updated automatically.
+
+ // If flushing of the instruction cache becomes necessary Windows has the
+ // API function FlushInstructionCache.
+
+ // By default, valgrind only checks the stack for writes that might need to
+ // invalidate already cached translated code. This leads to random
+ // instability when code patches or moves are sometimes unnoticed. One
+ // solution is to run valgrind with --smc-check=all, but this comes at a big
+ // performance cost. We can notify valgrind to invalidate its cache.
+#ifdef VALGRIND_DISCARD_TRANSLATIONS
+ unsigned res = VALGRIND_DISCARD_TRANSLATIONS(start, size);
+ USE(res);
+#endif
+}
+
+} } // namespace v8::internal
+
+#endif // V8_TARGET_ARCH_X87
diff --git a/chromium/v8/src/x87/debug-x87.cc b/chromium/v8/src/x87/debug-x87.cc
new file mode 100644
index 00000000000..e3e91653c0f
--- /dev/null
+++ b/chromium/v8/src/x87/debug-x87.cc
@@ -0,0 +1,336 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#if V8_TARGET_ARCH_X87
+
+#include "src/codegen.h"
+#include "src/debug.h"
+
+
+namespace v8 {
+namespace internal {
+
+bool BreakLocationIterator::IsDebugBreakAtReturn() {
+ return Debug::IsDebugBreakAtReturn(rinfo());
+}
+
+
+// Patch the JS frame exit code with a debug break call. See
+// CodeGenerator::VisitReturnStatement and VirtualFrame::Exit in codegen-x87.cc
+// for the precise return instructions sequence.
+void BreakLocationIterator::SetDebugBreakAtReturn() {
+ ASSERT(Assembler::kJSReturnSequenceLength >=
+ Assembler::kCallInstructionLength);
+ rinfo()->PatchCodeWithCall(
+ debug_info_->GetIsolate()->builtins()->Return_DebugBreak()->entry(),
+ Assembler::kJSReturnSequenceLength - Assembler::kCallInstructionLength);
+}
+
+
+// Restore the JS frame exit code.
+void BreakLocationIterator::ClearDebugBreakAtReturn() {
+ rinfo()->PatchCode(original_rinfo()->pc(),
+ Assembler::kJSReturnSequenceLength);
+}
+
+
+// A debug break in the frame exit code is identified by the JS frame exit code
+// having been patched with a call instruction.
+bool Debug::IsDebugBreakAtReturn(RelocInfo* rinfo) {
+ ASSERT(RelocInfo::IsJSReturn(rinfo->rmode()));
+ return rinfo->IsPatchedReturnSequence();
+}
+
+
+bool BreakLocationIterator::IsDebugBreakAtSlot() {
+ ASSERT(IsDebugBreakSlot());
+ // Check whether the debug break slot instructions have been patched.
+ return rinfo()->IsPatchedDebugBreakSlotSequence();
+}
+
+
+void BreakLocationIterator::SetDebugBreakAtSlot() {
+ ASSERT(IsDebugBreakSlot());
+ Isolate* isolate = debug_info_->GetIsolate();
+ rinfo()->PatchCodeWithCall(
+ isolate->builtins()->Slot_DebugBreak()->entry(),
+ Assembler::kDebugBreakSlotLength - Assembler::kCallInstructionLength);
+}
+
+
+void BreakLocationIterator::ClearDebugBreakAtSlot() {
+ ASSERT(IsDebugBreakSlot());
+ rinfo()->PatchCode(original_rinfo()->pc(), Assembler::kDebugBreakSlotLength);
+}
+
+
+#define __ ACCESS_MASM(masm)
+
+static void Generate_DebugBreakCallHelper(MacroAssembler* masm,
+ RegList object_regs,
+ RegList non_object_regs,
+ bool convert_call_to_jmp) {
+ // Enter an internal frame.
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+
+ // Load padding words on stack.
+ for (int i = 0; i < LiveEdit::kFramePaddingInitialSize; i++) {
+ __ push(Immediate(Smi::FromInt(LiveEdit::kFramePaddingValue)));
+ }
+ __ push(Immediate(Smi::FromInt(LiveEdit::kFramePaddingInitialSize)));
+
+ // Store the registers containing live values on the expression stack to
+ // make sure that these are correctly updated during GC. Non object values
+ // are stored as a smi causing it to be untouched by GC.
+ ASSERT((object_regs & ~kJSCallerSaved) == 0);
+ ASSERT((non_object_regs & ~kJSCallerSaved) == 0);
+ ASSERT((object_regs & non_object_regs) == 0);
+ for (int i = 0; i < kNumJSCallerSaved; i++) {
+ int r = JSCallerSavedCode(i);
+ Register reg = { r };
+ if ((object_regs & (1 << r)) != 0) {
+ __ push(reg);
+ }
+ if ((non_object_regs & (1 << r)) != 0) {
+ if (FLAG_debug_code) {
+ __ test(reg, Immediate(0xc0000000));
+ __ Assert(zero, kUnableToEncodeValueAsSmi);
+ }
+ __ SmiTag(reg);
+ __ push(reg);
+ }
+ }
+
+#ifdef DEBUG
+ __ RecordComment("// Calling from debug break to runtime - come in - over");
+#endif
+ __ Move(eax, Immediate(0)); // No arguments.
+ __ mov(ebx, Immediate(ExternalReference::debug_break(masm->isolate())));
+
+ CEntryStub ceb(masm->isolate(), 1);
+ __ CallStub(&ceb);
+
+ // Automatically find register that could be used after register restore.
+ // We need one register for padding skip instructions.
+ Register unused_reg = { -1 };
+
+ // Restore the register values containing object pointers from the
+ // expression stack.
+ for (int i = kNumJSCallerSaved; --i >= 0;) {
+ int r = JSCallerSavedCode(i);
+ Register reg = { r };
+ if (FLAG_debug_code) {
+ __ Move(reg, Immediate(kDebugZapValue));
+ }
+ bool taken = reg.code() == esi.code();
+ if ((object_regs & (1 << r)) != 0) {
+ __ pop(reg);
+ taken = true;
+ }
+ if ((non_object_regs & (1 << r)) != 0) {
+ __ pop(reg);
+ __ SmiUntag(reg);
+ taken = true;
+ }
+ if (!taken) {
+ unused_reg = reg;
+ }
+ }
+
+ ASSERT(unused_reg.code() != -1);
+
+ // Read current padding counter and skip corresponding number of words.
+ __ pop(unused_reg);
+ // We divide stored value by 2 (untagging) and multiply it by word's size.
+ STATIC_ASSERT(kSmiTagSize == 1 && kSmiShiftSize == 0);
+ __ lea(esp, Operand(esp, unused_reg, times_half_pointer_size, 0));
+
+ // Get rid of the internal frame.
+ }
+
+ // If this call did not replace a call but patched other code then there will
+ // be an unwanted return address left on the stack. Here we get rid of that.
+ if (convert_call_to_jmp) {
+ __ add(esp, Immediate(kPointerSize));
+ }
+
+ // Now that the break point has been handled, resume normal execution by
+ // jumping to the target address intended by the caller and that was
+ // overwritten by the address of DebugBreakXXX.
+ ExternalReference after_break_target =
+ ExternalReference::debug_after_break_target_address(masm->isolate());
+ __ jmp(Operand::StaticVariable(after_break_target));
+}
+
+
+void DebugCodegen::GenerateCallICStubDebugBreak(MacroAssembler* masm) {
+ // Register state for CallICStub
+ // ----------- S t a t e -------------
+ // -- edx : type feedback slot (smi)
+ // -- edi : function
+ // -----------------------------------
+ Generate_DebugBreakCallHelper(masm, edx.bit() | edi.bit(),
+ 0, false);
+}
+
+
+void DebugCodegen::GenerateLoadICDebugBreak(MacroAssembler* masm) {
+ // Register state for IC load call (from ic-x87.cc).
+ // ----------- S t a t e -------------
+ // -- ecx : name
+ // -- edx : receiver
+ // -----------------------------------
+ Generate_DebugBreakCallHelper(masm, ecx.bit() | edx.bit(), 0, false);
+}
+
+
+void DebugCodegen::GenerateStoreICDebugBreak(MacroAssembler* masm) {
+ // Register state for IC store call (from ic-x87.cc).
+ // ----------- S t a t e -------------
+ // -- eax : value
+ // -- ecx : name
+ // -- edx : receiver
+ // -----------------------------------
+ Generate_DebugBreakCallHelper(
+ masm, eax.bit() | ecx.bit() | edx.bit(), 0, false);
+}
+
+
+void DebugCodegen::GenerateKeyedLoadICDebugBreak(MacroAssembler* masm) {
+ // Register state for keyed IC load call (from ic-x87.cc).
+ // ----------- S t a t e -------------
+ // -- ecx : key
+ // -- edx : receiver
+ // -----------------------------------
+ Generate_DebugBreakCallHelper(masm, ecx.bit() | edx.bit(), 0, false);
+}
+
+
+void DebugCodegen::GenerateKeyedStoreICDebugBreak(MacroAssembler* masm) {
+ // Register state for keyed IC load call (from ic-x87.cc).
+ // ----------- S t a t e -------------
+ // -- eax : value
+ // -- ecx : key
+ // -- edx : receiver
+ // -----------------------------------
+ Generate_DebugBreakCallHelper(
+ masm, eax.bit() | ecx.bit() | edx.bit(), 0, false);
+}
+
+
+void DebugCodegen::GenerateCompareNilICDebugBreak(MacroAssembler* masm) {
+ // Register state for CompareNil IC
+ // ----------- S t a t e -------------
+ // -- eax : value
+ // -----------------------------------
+ Generate_DebugBreakCallHelper(masm, eax.bit(), 0, false);
+}
+
+
+void DebugCodegen::GenerateReturnDebugBreak(MacroAssembler* masm) {
+ // Register state just before return from JS function (from codegen-x87.cc).
+ // ----------- S t a t e -------------
+ // -- eax: return value
+ // -----------------------------------
+ Generate_DebugBreakCallHelper(masm, eax.bit(), 0, true);
+}
+
+
+void DebugCodegen::GenerateCallFunctionStubDebugBreak(MacroAssembler* masm) {
+ // Register state for CallFunctionStub (from code-stubs-x87.cc).
+ // ----------- S t a t e -------------
+ // -- edi: function
+ // -----------------------------------
+ Generate_DebugBreakCallHelper(masm, edi.bit(), 0, false);
+}
+
+
+void DebugCodegen::GenerateCallConstructStubDebugBreak(MacroAssembler* masm) {
+ // Register state for CallConstructStub (from code-stubs-x87.cc).
+ // eax is the actual number of arguments not encoded as a smi see comment
+ // above IC call.
+ // ----------- S t a t e -------------
+ // -- eax: number of arguments (not smi)
+ // -- edi: constructor function
+ // -----------------------------------
+ // The number of arguments in eax is not smi encoded.
+ Generate_DebugBreakCallHelper(masm, edi.bit(), eax.bit(), false);
+}
+
+
+void DebugCodegen::GenerateCallConstructStubRecordDebugBreak(
+ MacroAssembler* masm) {
+ // Register state for CallConstructStub (from code-stubs-x87.cc).
+ // eax is the actual number of arguments not encoded as a smi see comment
+ // above IC call.
+ // ----------- S t a t e -------------
+ // -- eax: number of arguments (not smi)
+ // -- ebx: feedback array
+ // -- edx: feedback slot (smi)
+ // -- edi: constructor function
+ // -----------------------------------
+ // The number of arguments in eax is not smi encoded.
+ Generate_DebugBreakCallHelper(masm, ebx.bit() | edx.bit() | edi.bit(),
+ eax.bit(), false);
+}
+
+
+void DebugCodegen::GenerateSlot(MacroAssembler* masm) {
+ // Generate enough nop's to make space for a call instruction.
+ Label check_codesize;
+ __ bind(&check_codesize);
+ __ RecordDebugBreakSlot();
+ __ Nop(Assembler::kDebugBreakSlotLength);
+ ASSERT_EQ(Assembler::kDebugBreakSlotLength,
+ masm->SizeOfCodeGeneratedSince(&check_codesize));
+}
+
+
+void DebugCodegen::GenerateSlotDebugBreak(MacroAssembler* masm) {
+ // In the places where a debug break slot is inserted no registers can contain
+ // object pointers.
+ Generate_DebugBreakCallHelper(masm, 0, 0, true);
+}
+
+
+void DebugCodegen::GeneratePlainReturnLiveEdit(MacroAssembler* masm) {
+ masm->ret(0);
+}
+
+
+void DebugCodegen::GenerateFrameDropperLiveEdit(MacroAssembler* masm) {
+ ExternalReference restarter_frame_function_slot =
+ ExternalReference::debug_restarter_frame_function_pointer_address(
+ masm->isolate());
+ __ mov(Operand::StaticVariable(restarter_frame_function_slot), Immediate(0));
+
+ // We do not know our frame height, but set esp based on ebp.
+ __ lea(esp, Operand(ebp, -1 * kPointerSize));
+
+ __ pop(edi); // Function.
+ __ pop(ebp);
+
+ // Load context from the function.
+ __ mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
+
+ // Get function code.
+ __ mov(edx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
+ __ mov(edx, FieldOperand(edx, SharedFunctionInfo::kCodeOffset));
+ __ lea(edx, FieldOperand(edx, Code::kHeaderSize));
+
+ // Re-run JSFunction, edi is function, esi is context.
+ __ jmp(edx);
+}
+
+
+const bool LiveEdit::kFrameDropperSupported = true;
+
+#undef __
+
+} } // namespace v8::internal
+
+#endif // V8_TARGET_ARCH_X87
diff --git a/chromium/v8/src/x87/deoptimizer-x87.cc b/chromium/v8/src/x87/deoptimizer-x87.cc
new file mode 100644
index 00000000000..36d66497c8d
--- /dev/null
+++ b/chromium/v8/src/x87/deoptimizer-x87.cc
@@ -0,0 +1,406 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#if V8_TARGET_ARCH_X87
+
+#include "src/codegen.h"
+#include "src/deoptimizer.h"
+#include "src/full-codegen.h"
+#include "src/safepoint-table.h"
+
+namespace v8 {
+namespace internal {
+
+const int Deoptimizer::table_entry_size_ = 10;
+
+
+int Deoptimizer::patch_size() {
+ return Assembler::kCallInstructionLength;
+}
+
+
+void Deoptimizer::EnsureRelocSpaceForLazyDeoptimization(Handle<Code> code) {
+ Isolate* isolate = code->GetIsolate();
+ HandleScope scope(isolate);
+
+ // Compute the size of relocation information needed for the code
+ // patching in Deoptimizer::DeoptimizeFunction.
+ int min_reloc_size = 0;
+ int prev_pc_offset = 0;
+ DeoptimizationInputData* deopt_data =
+ DeoptimizationInputData::cast(code->deoptimization_data());
+ for (int i = 0; i < deopt_data->DeoptCount(); i++) {
+ int pc_offset = deopt_data->Pc(i)->value();
+ if (pc_offset == -1) continue;
+ ASSERT_GE(pc_offset, prev_pc_offset);
+ int pc_delta = pc_offset - prev_pc_offset;
+ // We use RUNTIME_ENTRY reloc info which has a size of 2 bytes
+ // if encodable with small pc delta encoding and up to 6 bytes
+ // otherwise.
+ if (pc_delta <= RelocInfo::kMaxSmallPCDelta) {
+ min_reloc_size += 2;
+ } else {
+ min_reloc_size += 6;
+ }
+ prev_pc_offset = pc_offset;
+ }
+
+ // If the relocation information is not big enough we create a new
+ // relocation info object that is padded with comments to make it
+ // big enough for lazy doptimization.
+ int reloc_length = code->relocation_info()->length();
+ if (min_reloc_size > reloc_length) {
+ int comment_reloc_size = RelocInfo::kMinRelocCommentSize;
+ // Padding needed.
+ int min_padding = min_reloc_size - reloc_length;
+ // Number of comments needed to take up at least that much space.
+ int additional_comments =
+ (min_padding + comment_reloc_size - 1) / comment_reloc_size;
+ // Actual padding size.
+ int padding = additional_comments * comment_reloc_size;
+ // Allocate new relocation info and copy old relocation to the end
+ // of the new relocation info array because relocation info is
+ // written and read backwards.
+ Factory* factory = isolate->factory();
+ Handle<ByteArray> new_reloc =
+ factory->NewByteArray(reloc_length + padding, TENURED);
+ MemCopy(new_reloc->GetDataStartAddress() + padding,
+ code->relocation_info()->GetDataStartAddress(), reloc_length);
+ // Create a relocation writer to write the comments in the padding
+ // space. Use position 0 for everything to ensure short encoding.
+ RelocInfoWriter reloc_info_writer(
+ new_reloc->GetDataStartAddress() + padding, 0);
+ intptr_t comment_string
+ = reinterpret_cast<intptr_t>(RelocInfo::kFillerCommentString);
+ RelocInfo rinfo(0, RelocInfo::COMMENT, comment_string, NULL);
+ for (int i = 0; i < additional_comments; ++i) {
+#ifdef DEBUG
+ byte* pos_before = reloc_info_writer.pos();
+#endif
+ reloc_info_writer.Write(&rinfo);
+ ASSERT(RelocInfo::kMinRelocCommentSize ==
+ pos_before - reloc_info_writer.pos());
+ }
+ // Replace relocation information on the code object.
+ code->set_relocation_info(*new_reloc);
+ }
+}
+
+
+void Deoptimizer::PatchCodeForDeoptimization(Isolate* isolate, Code* code) {
+ Address code_start_address = code->instruction_start();
+
+ if (FLAG_zap_code_space) {
+ // Fail hard and early if we enter this code object again.
+ byte* pointer = code->FindCodeAgeSequence();
+ if (pointer != NULL) {
+ pointer += kNoCodeAgeSequenceLength;
+ } else {
+ pointer = code->instruction_start();
+ }
+ CodePatcher patcher(pointer, 1);
+ patcher.masm()->int3();
+
+ DeoptimizationInputData* data =
+ DeoptimizationInputData::cast(code->deoptimization_data());
+ int osr_offset = data->OsrPcOffset()->value();
+ if (osr_offset > 0) {
+ CodePatcher osr_patcher(code->instruction_start() + osr_offset, 1);
+ osr_patcher.masm()->int3();
+ }
+ }
+
+ // We will overwrite the code's relocation info in-place. Relocation info
+ // is written backward. The relocation info is the payload of a byte
+ // array. Later on we will slide this to the start of the byte array and
+ // create a filler object in the remaining space.
+ ByteArray* reloc_info = code->relocation_info();
+ Address reloc_end_address = reloc_info->address() + reloc_info->Size();
+ RelocInfoWriter reloc_info_writer(reloc_end_address, code_start_address);
+
+ // Since the call is a relative encoding, write new
+ // reloc info. We do not need any of the existing reloc info because the
+ // existing code will not be used again (we zap it in debug builds).
+ //
+ // Emit call to lazy deoptimization at all lazy deopt points.
+ DeoptimizationInputData* deopt_data =
+ DeoptimizationInputData::cast(code->deoptimization_data());
+ SharedFunctionInfo* shared =
+ SharedFunctionInfo::cast(deopt_data->SharedFunctionInfo());
+ shared->EvictFromOptimizedCodeMap(code, "deoptimized code");
+#ifdef DEBUG
+ Address prev_call_address = NULL;
+#endif
+ // For each LLazyBailout instruction insert a call to the corresponding
+ // deoptimization entry.
+ for (int i = 0; i < deopt_data->DeoptCount(); i++) {
+ if (deopt_data->Pc(i)->value() == -1) continue;
+ // Patch lazy deoptimization entry.
+ Address call_address = code_start_address + deopt_data->Pc(i)->value();
+ CodePatcher patcher(call_address, patch_size());
+ Address deopt_entry = GetDeoptimizationEntry(isolate, i, LAZY);
+ patcher.masm()->call(deopt_entry, RelocInfo::NONE32);
+ // We use RUNTIME_ENTRY for deoptimization bailouts.
+ RelocInfo rinfo(call_address + 1, // 1 after the call opcode.
+ RelocInfo::RUNTIME_ENTRY,
+ reinterpret_cast<intptr_t>(deopt_entry),
+ NULL);
+ reloc_info_writer.Write(&rinfo);
+ ASSERT_GE(reloc_info_writer.pos(),
+ reloc_info->address() + ByteArray::kHeaderSize);
+ ASSERT(prev_call_address == NULL ||
+ call_address >= prev_call_address + patch_size());
+ ASSERT(call_address + patch_size() <= code->instruction_end());
+#ifdef DEBUG
+ prev_call_address = call_address;
+#endif
+ }
+
+ // Move the relocation info to the beginning of the byte array.
+ int new_reloc_size = reloc_end_address - reloc_info_writer.pos();
+ MemMove(code->relocation_start(), reloc_info_writer.pos(), new_reloc_size);
+
+ // The relocation info is in place, update the size.
+ reloc_info->set_length(new_reloc_size);
+
+ // Handle the junk part after the new relocation info. We will create
+ // a non-live object in the extra space at the end of the former reloc info.
+ Address junk_address = reloc_info->address() + reloc_info->Size();
+ ASSERT(junk_address <= reloc_end_address);
+ isolate->heap()->CreateFillerObjectAt(junk_address,
+ reloc_end_address - junk_address);
+}
+
+
+void Deoptimizer::FillInputFrame(Address tos, JavaScriptFrame* frame) {
+ // Set the register values. The values are not important as there are no
+ // callee saved registers in JavaScript frames, so all registers are
+ // spilled. Registers ebp and esp are set to the correct values though.
+
+ for (int i = 0; i < Register::kNumRegisters; i++) {
+ input_->SetRegister(i, i * 4);
+ }
+ input_->SetRegister(esp.code(), reinterpret_cast<intptr_t>(frame->sp()));
+ input_->SetRegister(ebp.code(), reinterpret_cast<intptr_t>(frame->fp()));
+ for (int i = 0; i < DoubleRegister::NumAllocatableRegisters(); i++) {
+ input_->SetDoubleRegister(i, 0.0);
+ }
+
+ // Fill the frame content from the actual data on the frame.
+ for (unsigned i = 0; i < input_->GetFrameSize(); i += kPointerSize) {
+ input_->SetFrameSlot(i, Memory::uint32_at(tos + i));
+ }
+}
+
+
+void Deoptimizer::SetPlatformCompiledStubRegisters(
+ FrameDescription* output_frame, CodeStubInterfaceDescriptor* descriptor) {
+ intptr_t handler =
+ reinterpret_cast<intptr_t>(descriptor->deoptimization_handler_);
+ int params = descriptor->GetHandlerParameterCount();
+ output_frame->SetRegister(eax.code(), params);
+ output_frame->SetRegister(ebx.code(), handler);
+}
+
+
+void Deoptimizer::CopyDoubleRegisters(FrameDescription* output_frame) {
+ // Do nothing for X87.
+ return;
+}
+
+
+bool Deoptimizer::HasAlignmentPadding(JSFunction* function) {
+ int parameter_count = function->shared()->formal_parameter_count() + 1;
+ unsigned input_frame_size = input_->GetFrameSize();
+ unsigned alignment_state_offset =
+ input_frame_size - parameter_count * kPointerSize -
+ StandardFrameConstants::kFixedFrameSize -
+ kPointerSize;
+ ASSERT(JavaScriptFrameConstants::kDynamicAlignmentStateOffset ==
+ JavaScriptFrameConstants::kLocal0Offset);
+ int32_t alignment_state = input_->GetFrameSlot(alignment_state_offset);
+ return (alignment_state == kAlignmentPaddingPushed);
+}
+
+
+#define __ masm()->
+
+void Deoptimizer::EntryGenerator::Generate() {
+ GeneratePrologue();
+
+ // Save all general purpose registers before messing with them.
+ const int kNumberOfRegisters = Register::kNumRegisters;
+ __ pushad();
+
+ const int kSavedRegistersAreaSize = kNumberOfRegisters * kPointerSize;
+
+ // Get the bailout id from the stack.
+ __ mov(ebx, Operand(esp, kSavedRegistersAreaSize));
+
+ // Get the address of the location in the code object
+ // and compute the fp-to-sp delta in register edx.
+ __ mov(ecx, Operand(esp, kSavedRegistersAreaSize + 1 * kPointerSize));
+ __ lea(edx, Operand(esp, kSavedRegistersAreaSize + 2 * kPointerSize));
+
+ __ sub(edx, ebp);
+ __ neg(edx);
+
+ // Allocate a new deoptimizer object.
+ __ PrepareCallCFunction(6, eax);
+ __ mov(eax, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
+ __ mov(Operand(esp, 0 * kPointerSize), eax); // Function.
+ __ mov(Operand(esp, 1 * kPointerSize), Immediate(type())); // Bailout type.
+ __ mov(Operand(esp, 2 * kPointerSize), ebx); // Bailout id.
+ __ mov(Operand(esp, 3 * kPointerSize), ecx); // Code address or 0.
+ __ mov(Operand(esp, 4 * kPointerSize), edx); // Fp-to-sp delta.
+ __ mov(Operand(esp, 5 * kPointerSize),
+ Immediate(ExternalReference::isolate_address(isolate())));
+ {
+ AllowExternalCallThatCantCauseGC scope(masm());
+ __ CallCFunction(ExternalReference::new_deoptimizer_function(isolate()), 6);
+ }
+
+ // Preserve deoptimizer object in register eax and get the input
+ // frame descriptor pointer.
+ __ mov(ebx, Operand(eax, Deoptimizer::input_offset()));
+
+ // Fill in the input registers.
+ for (int i = kNumberOfRegisters - 1; i >= 0; i--) {
+ int offset = (i * kPointerSize) + FrameDescription::registers_offset();
+ __ pop(Operand(ebx, offset));
+ }
+
+ // Clear FPU all exceptions.
+ // TODO(ulan): Find out why the TOP register is not zero here in some cases,
+ // and check that the generated code never deoptimizes with unbalanced stack.
+ __ fnclex();
+
+ // Remove the bailout id, return address and the double registers.
+ __ add(esp, Immediate(2 * kPointerSize));
+
+ // Compute a pointer to the unwinding limit in register ecx; that is
+ // the first stack slot not part of the input frame.
+ __ mov(ecx, Operand(ebx, FrameDescription::frame_size_offset()));
+ __ add(ecx, esp);
+
+ // Unwind the stack down to - but not including - the unwinding
+ // limit and copy the contents of the activation frame to the input
+ // frame description.
+ __ lea(edx, Operand(ebx, FrameDescription::frame_content_offset()));
+ Label pop_loop_header;
+ __ jmp(&pop_loop_header);
+ Label pop_loop;
+ __ bind(&pop_loop);
+ __ pop(Operand(edx, 0));
+ __ add(edx, Immediate(sizeof(uint32_t)));
+ __ bind(&pop_loop_header);
+ __ cmp(ecx, esp);
+ __ j(not_equal, &pop_loop);
+
+ // Compute the output frame in the deoptimizer.
+ __ push(eax);
+ __ PrepareCallCFunction(1, ebx);
+ __ mov(Operand(esp, 0 * kPointerSize), eax);
+ {
+ AllowExternalCallThatCantCauseGC scope(masm());
+ __ CallCFunction(
+ ExternalReference::compute_output_frames_function(isolate()), 1);
+ }
+ __ pop(eax);
+
+ // If frame was dynamically aligned, pop padding.
+ Label no_padding;
+ __ cmp(Operand(eax, Deoptimizer::has_alignment_padding_offset()),
+ Immediate(0));
+ __ j(equal, &no_padding);
+ __ pop(ecx);
+ if (FLAG_debug_code) {
+ __ cmp(ecx, Immediate(kAlignmentZapValue));
+ __ Assert(equal, kAlignmentMarkerExpected);
+ }
+ __ bind(&no_padding);
+
+ // Replace the current frame with the output frames.
+ Label outer_push_loop, inner_push_loop,
+ outer_loop_header, inner_loop_header;
+ // Outer loop state: eax = current FrameDescription**, edx = one past the
+ // last FrameDescription**.
+ __ mov(edx, Operand(eax, Deoptimizer::output_count_offset()));
+ __ mov(eax, Operand(eax, Deoptimizer::output_offset()));
+ __ lea(edx, Operand(eax, edx, times_4, 0));
+ __ jmp(&outer_loop_header);
+ __ bind(&outer_push_loop);
+ // Inner loop state: ebx = current FrameDescription*, ecx = loop index.
+ __ mov(ebx, Operand(eax, 0));
+ __ mov(ecx, Operand(ebx, FrameDescription::frame_size_offset()));
+ __ jmp(&inner_loop_header);
+ __ bind(&inner_push_loop);
+ __ sub(ecx, Immediate(sizeof(uint32_t)));
+ __ push(Operand(ebx, ecx, times_1, FrameDescription::frame_content_offset()));
+ __ bind(&inner_loop_header);
+ __ test(ecx, ecx);
+ __ j(not_zero, &inner_push_loop);
+ __ add(eax, Immediate(kPointerSize));
+ __ bind(&outer_loop_header);
+ __ cmp(eax, edx);
+ __ j(below, &outer_push_loop);
+
+ // Push state, pc, and continuation from the last output frame.
+ __ push(Operand(ebx, FrameDescription::state_offset()));
+ __ push(Operand(ebx, FrameDescription::pc_offset()));
+ __ push(Operand(ebx, FrameDescription::continuation_offset()));
+
+
+ // Push the registers from the last output frame.
+ for (int i = 0; i < kNumberOfRegisters; i++) {
+ int offset = (i * kPointerSize) + FrameDescription::registers_offset();
+ __ push(Operand(ebx, offset));
+ }
+
+ // Restore the registers from the stack.
+ __ popad();
+
+ // Return to the continuation point.
+ __ ret(0);
+}
+
+
+void Deoptimizer::TableEntryGenerator::GeneratePrologue() {
+ // Create a sequence of deoptimization entries.
+ Label done;
+ for (int i = 0; i < count(); i++) {
+ int start = masm()->pc_offset();
+ USE(start);
+ __ push_imm32(i);
+ __ jmp(&done);
+ ASSERT(masm()->pc_offset() - start == table_entry_size_);
+ }
+ __ bind(&done);
+}
+
+
+void FrameDescription::SetCallerPc(unsigned offset, intptr_t value) {
+ SetFrameSlot(offset, value);
+}
+
+
+void FrameDescription::SetCallerFp(unsigned offset, intptr_t value) {
+ SetFrameSlot(offset, value);
+}
+
+
+void FrameDescription::SetCallerConstantPool(unsigned offset, intptr_t value) {
+ // No out-of-line constant pool support.
+ UNREACHABLE();
+}
+
+
+#undef __
+
+
+} } // namespace v8::internal
+
+#endif // V8_TARGET_ARCH_X87
diff --git a/chromium/v8/src/x87/disasm-x87.cc b/chromium/v8/src/x87/disasm-x87.cc
new file mode 100644
index 00000000000..a7d473f922e
--- /dev/null
+++ b/chromium/v8/src/x87/disasm-x87.cc
@@ -0,0 +1,1757 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <assert.h>
+#include <stdio.h>
+#include <stdarg.h>
+
+#include "src/v8.h"
+
+#if V8_TARGET_ARCH_X87
+
+#include "src/disasm.h"
+
+namespace disasm {
+
+enum OperandOrder {
+ UNSET_OP_ORDER = 0,
+ REG_OPER_OP_ORDER,
+ OPER_REG_OP_ORDER
+};
+
+
+//------------------------------------------------------------------
+// Tables
+//------------------------------------------------------------------
+struct ByteMnemonic {
+ int b; // -1 terminates, otherwise must be in range (0..255)
+ const char* mnem;
+ OperandOrder op_order_;
+};
+
+
+static const ByteMnemonic two_operands_instr[] = {
+ {0x01, "add", OPER_REG_OP_ORDER},
+ {0x03, "add", REG_OPER_OP_ORDER},
+ {0x09, "or", OPER_REG_OP_ORDER},
+ {0x0B, "or", REG_OPER_OP_ORDER},
+ {0x1B, "sbb", REG_OPER_OP_ORDER},
+ {0x21, "and", OPER_REG_OP_ORDER},
+ {0x23, "and", REG_OPER_OP_ORDER},
+ {0x29, "sub", OPER_REG_OP_ORDER},
+ {0x2A, "subb", REG_OPER_OP_ORDER},
+ {0x2B, "sub", REG_OPER_OP_ORDER},
+ {0x31, "xor", OPER_REG_OP_ORDER},
+ {0x33, "xor", REG_OPER_OP_ORDER},
+ {0x38, "cmpb", OPER_REG_OP_ORDER},
+ {0x3A, "cmpb", REG_OPER_OP_ORDER},
+ {0x3B, "cmp", REG_OPER_OP_ORDER},
+ {0x84, "test_b", REG_OPER_OP_ORDER},
+ {0x85, "test", REG_OPER_OP_ORDER},
+ {0x87, "xchg", REG_OPER_OP_ORDER},
+ {0x8A, "mov_b", REG_OPER_OP_ORDER},
+ {0x8B, "mov", REG_OPER_OP_ORDER},
+ {0x8D, "lea", REG_OPER_OP_ORDER},
+ {-1, "", UNSET_OP_ORDER}
+};
+
+
+static const ByteMnemonic zero_operands_instr[] = {
+ {0xC3, "ret", UNSET_OP_ORDER},
+ {0xC9, "leave", UNSET_OP_ORDER},
+ {0x90, "nop", UNSET_OP_ORDER},
+ {0xF4, "hlt", UNSET_OP_ORDER},
+ {0xCC, "int3", UNSET_OP_ORDER},
+ {0x60, "pushad", UNSET_OP_ORDER},
+ {0x61, "popad", UNSET_OP_ORDER},
+ {0x9C, "pushfd", UNSET_OP_ORDER},
+ {0x9D, "popfd", UNSET_OP_ORDER},
+ {0x9E, "sahf", UNSET_OP_ORDER},
+ {0x99, "cdq", UNSET_OP_ORDER},
+ {0x9B, "fwait", UNSET_OP_ORDER},
+ {0xFC, "cld", UNSET_OP_ORDER},
+ {0xAB, "stos", UNSET_OP_ORDER},
+ {-1, "", UNSET_OP_ORDER}
+};
+
+
+static const ByteMnemonic call_jump_instr[] = {
+ {0xE8, "call", UNSET_OP_ORDER},
+ {0xE9, "jmp", UNSET_OP_ORDER},
+ {-1, "", UNSET_OP_ORDER}
+};
+
+
+static const ByteMnemonic short_immediate_instr[] = {
+ {0x05, "add", UNSET_OP_ORDER},
+ {0x0D, "or", UNSET_OP_ORDER},
+ {0x15, "adc", UNSET_OP_ORDER},
+ {0x25, "and", UNSET_OP_ORDER},
+ {0x2D, "sub", UNSET_OP_ORDER},
+ {0x35, "xor", UNSET_OP_ORDER},
+ {0x3D, "cmp", UNSET_OP_ORDER},
+ {-1, "", UNSET_OP_ORDER}
+};
+
+
+// Generally we don't want to generate these because they are subject to partial
+// register stalls. They are included for completeness and because the cmp
+// variant is used by the RecordWrite stub. Because it does not update the
+// register it is not subject to partial register stalls.
+static ByteMnemonic byte_immediate_instr[] = {
+ {0x0c, "or", UNSET_OP_ORDER},
+ {0x24, "and", UNSET_OP_ORDER},
+ {0x34, "xor", UNSET_OP_ORDER},
+ {0x3c, "cmp", UNSET_OP_ORDER},
+ {-1, "", UNSET_OP_ORDER}
+};
+
+
+static const char* const jump_conditional_mnem[] = {
+ /*0*/ "jo", "jno", "jc", "jnc",
+ /*4*/ "jz", "jnz", "jna", "ja",
+ /*8*/ "js", "jns", "jpe", "jpo",
+ /*12*/ "jl", "jnl", "jng", "jg"
+};
+
+
+static const char* const set_conditional_mnem[] = {
+ /*0*/ "seto", "setno", "setc", "setnc",
+ /*4*/ "setz", "setnz", "setna", "seta",
+ /*8*/ "sets", "setns", "setpe", "setpo",
+ /*12*/ "setl", "setnl", "setng", "setg"
+};
+
+
+static const char* const conditional_move_mnem[] = {
+ /*0*/ "cmovo", "cmovno", "cmovc", "cmovnc",
+ /*4*/ "cmovz", "cmovnz", "cmovna", "cmova",
+ /*8*/ "cmovs", "cmovns", "cmovpe", "cmovpo",
+ /*12*/ "cmovl", "cmovnl", "cmovng", "cmovg"
+};
+
+
+enum InstructionType {
+ NO_INSTR,
+ ZERO_OPERANDS_INSTR,
+ TWO_OPERANDS_INSTR,
+ JUMP_CONDITIONAL_SHORT_INSTR,
+ REGISTER_INSTR,
+ MOVE_REG_INSTR,
+ CALL_JUMP_INSTR,
+ SHORT_IMMEDIATE_INSTR,
+ BYTE_IMMEDIATE_INSTR
+};
+
+
+struct InstructionDesc {
+ const char* mnem;
+ InstructionType type;
+ OperandOrder op_order_;
+};
+
+
+class InstructionTable {
+ public:
+ InstructionTable();
+ const InstructionDesc& Get(byte x) const { return instructions_[x]; }
+ static InstructionTable* get_instance() {
+ static InstructionTable table;
+ return &table;
+ }
+
+ private:
+ InstructionDesc instructions_[256];
+ void Clear();
+ void Init();
+ void CopyTable(const ByteMnemonic bm[], InstructionType type);
+ void SetTableRange(InstructionType type,
+ byte start,
+ byte end,
+ const char* mnem);
+ void AddJumpConditionalShort();
+};
+
+
+InstructionTable::InstructionTable() {
+ Clear();
+ Init();
+}
+
+
+void InstructionTable::Clear() {
+ for (int i = 0; i < 256; i++) {
+ instructions_[i].mnem = "";
+ instructions_[i].type = NO_INSTR;
+ instructions_[i].op_order_ = UNSET_OP_ORDER;
+ }
+}
+
+
+void InstructionTable::Init() {
+ CopyTable(two_operands_instr, TWO_OPERANDS_INSTR);
+ CopyTable(zero_operands_instr, ZERO_OPERANDS_INSTR);
+ CopyTable(call_jump_instr, CALL_JUMP_INSTR);
+ CopyTable(short_immediate_instr, SHORT_IMMEDIATE_INSTR);
+ CopyTable(byte_immediate_instr, BYTE_IMMEDIATE_INSTR);
+ AddJumpConditionalShort();
+ SetTableRange(REGISTER_INSTR, 0x40, 0x47, "inc");
+ SetTableRange(REGISTER_INSTR, 0x48, 0x4F, "dec");
+ SetTableRange(REGISTER_INSTR, 0x50, 0x57, "push");
+ SetTableRange(REGISTER_INSTR, 0x58, 0x5F, "pop");
+ SetTableRange(REGISTER_INSTR, 0x91, 0x97, "xchg eax,"); // 0x90 is nop.
+ SetTableRange(MOVE_REG_INSTR, 0xB8, 0xBF, "mov");
+}
+
+
+void InstructionTable::CopyTable(const ByteMnemonic bm[],
+ InstructionType type) {
+ for (int i = 0; bm[i].b >= 0; i++) {
+ InstructionDesc* id = &instructions_[bm[i].b];
+ id->mnem = bm[i].mnem;
+ id->op_order_ = bm[i].op_order_;
+ ASSERT_EQ(NO_INSTR, id->type); // Information not already entered.
+ id->type = type;
+ }
+}
+
+
+void InstructionTable::SetTableRange(InstructionType type,
+ byte start,
+ byte end,
+ const char* mnem) {
+ for (byte b = start; b <= end; b++) {
+ InstructionDesc* id = &instructions_[b];
+ ASSERT_EQ(NO_INSTR, id->type); // Information not already entered.
+ id->mnem = mnem;
+ id->type = type;
+ }
+}
+
+
+void InstructionTable::AddJumpConditionalShort() {
+ for (byte b = 0x70; b <= 0x7F; b++) {
+ InstructionDesc* id = &instructions_[b];
+ ASSERT_EQ(NO_INSTR, id->type); // Information not already entered.
+ id->mnem = jump_conditional_mnem[b & 0x0F];
+ id->type = JUMP_CONDITIONAL_SHORT_INSTR;
+ }
+}
+
+
+// The X87 disassembler implementation.
+class DisassemblerX87 {
+ public:
+ DisassemblerX87(const NameConverter& converter,
+ bool abort_on_unimplemented = true)
+ : converter_(converter),
+ instruction_table_(InstructionTable::get_instance()),
+ tmp_buffer_pos_(0),
+ abort_on_unimplemented_(abort_on_unimplemented) {
+ tmp_buffer_[0] = '\0';
+ }
+
+ virtual ~DisassemblerX87() {}
+
+ // Writes one disassembled instruction into 'buffer' (0-terminated).
+ // Returns the length of the disassembled machine instruction in bytes.
+ int InstructionDecode(v8::internal::Vector<char> buffer, byte* instruction);
+
+ private:
+ const NameConverter& converter_;
+ InstructionTable* instruction_table_;
+ v8::internal::EmbeddedVector<char, 128> tmp_buffer_;
+ unsigned int tmp_buffer_pos_;
+ bool abort_on_unimplemented_;
+
+ enum {
+ eax = 0,
+ ecx = 1,
+ edx = 2,
+ ebx = 3,
+ esp = 4,
+ ebp = 5,
+ esi = 6,
+ edi = 7
+ };
+
+
+ enum ShiftOpcodeExtension {
+ kROL = 0,
+ kROR = 1,
+ kRCL = 2,
+ kRCR = 3,
+ kSHL = 4,
+ KSHR = 5,
+ kSAR = 7
+ };
+
+
+ const char* NameOfCPURegister(int reg) const {
+ return converter_.NameOfCPURegister(reg);
+ }
+
+
+ const char* NameOfByteCPURegister(int reg) const {
+ return converter_.NameOfByteCPURegister(reg);
+ }
+
+
+ const char* NameOfXMMRegister(int reg) const {
+ return converter_.NameOfXMMRegister(reg);
+ }
+
+
+ const char* NameOfAddress(byte* addr) const {
+ return converter_.NameOfAddress(addr);
+ }
+
+
+ // Disassembler helper functions.
+ static void get_modrm(byte data, int* mod, int* regop, int* rm) {
+ *mod = (data >> 6) & 3;
+ *regop = (data & 0x38) >> 3;
+ *rm = data & 7;
+ }
+
+
+ static void get_sib(byte data, int* scale, int* index, int* base) {
+ *scale = (data >> 6) & 3;
+ *index = (data >> 3) & 7;
+ *base = data & 7;
+ }
+
+ typedef const char* (DisassemblerX87::*RegisterNameMapping)(int reg) const;
+
+ int PrintRightOperandHelper(byte* modrmp, RegisterNameMapping register_name);
+ int PrintRightOperand(byte* modrmp);
+ int PrintRightByteOperand(byte* modrmp);
+ int PrintRightXMMOperand(byte* modrmp);
+ int PrintOperands(const char* mnem, OperandOrder op_order, byte* data);
+ int PrintImmediateOp(byte* data);
+ int F7Instruction(byte* data);
+ int D1D3C1Instruction(byte* data);
+ int JumpShort(byte* data);
+ int JumpConditional(byte* data, const char* comment);
+ int JumpConditionalShort(byte* data, const char* comment);
+ int SetCC(byte* data);
+ int CMov(byte* data);
+ int FPUInstruction(byte* data);
+ int MemoryFPUInstruction(int escape_opcode, int regop, byte* modrm_start);
+ int RegisterFPUInstruction(int escape_opcode, byte modrm_byte);
+ void AppendToBuffer(const char* format, ...);
+
+
+ void UnimplementedInstruction() {
+ if (abort_on_unimplemented_) {
+ UNIMPLEMENTED();
+ } else {
+ AppendToBuffer("'Unimplemented Instruction'");
+ }
+ }
+};
+
+
+void DisassemblerX87::AppendToBuffer(const char* format, ...) {
+ v8::internal::Vector<char> buf = tmp_buffer_ + tmp_buffer_pos_;
+ va_list args;
+ va_start(args, format);
+ int result = v8::internal::VSNPrintF(buf, format, args);
+ va_end(args);
+ tmp_buffer_pos_ += result;
+}
+
+int DisassemblerX87::PrintRightOperandHelper(
+ byte* modrmp,
+ RegisterNameMapping direct_register_name) {
+ int mod, regop, rm;
+ get_modrm(*modrmp, &mod, &regop, &rm);
+ RegisterNameMapping register_name = (mod == 3) ? direct_register_name :
+ &DisassemblerX87::NameOfCPURegister;
+ switch (mod) {
+ case 0:
+ if (rm == ebp) {
+ int32_t disp = *reinterpret_cast<int32_t*>(modrmp+1);
+ AppendToBuffer("[0x%x]", disp);
+ return 5;
+ } else if (rm == esp) {
+ byte sib = *(modrmp + 1);
+ int scale, index, base;
+ get_sib(sib, &scale, &index, &base);
+ if (index == esp && base == esp && scale == 0 /*times_1*/) {
+ AppendToBuffer("[%s]", (this->*register_name)(rm));
+ return 2;
+ } else if (base == ebp) {
+ int32_t disp = *reinterpret_cast<int32_t*>(modrmp + 2);
+ AppendToBuffer("[%s*%d%s0x%x]",
+ (this->*register_name)(index),
+ 1 << scale,
+ disp < 0 ? "-" : "+",
+ disp < 0 ? -disp : disp);
+ return 6;
+ } else if (index != esp && base != ebp) {
+ // [base+index*scale]
+ AppendToBuffer("[%s+%s*%d]",
+ (this->*register_name)(base),
+ (this->*register_name)(index),
+ 1 << scale);
+ return 2;
+ } else {
+ UnimplementedInstruction();
+ return 1;
+ }
+ } else {
+ AppendToBuffer("[%s]", (this->*register_name)(rm));
+ return 1;
+ }
+ break;
+ case 1: // fall through
+ case 2:
+ if (rm == esp) {
+ byte sib = *(modrmp + 1);
+ int scale, index, base;
+ get_sib(sib, &scale, &index, &base);
+ int disp = mod == 2 ? *reinterpret_cast<int32_t*>(modrmp + 2)
+ : *reinterpret_cast<int8_t*>(modrmp + 2);
+ if (index == base && index == rm /*esp*/ && scale == 0 /*times_1*/) {
+ AppendToBuffer("[%s%s0x%x]",
+ (this->*register_name)(rm),
+ disp < 0 ? "-" : "+",
+ disp < 0 ? -disp : disp);
+ } else {
+ AppendToBuffer("[%s+%s*%d%s0x%x]",
+ (this->*register_name)(base),
+ (this->*register_name)(index),
+ 1 << scale,
+ disp < 0 ? "-" : "+",
+ disp < 0 ? -disp : disp);
+ }
+ return mod == 2 ? 6 : 3;
+ } else {
+ // No sib.
+ int disp = mod == 2 ? *reinterpret_cast<int32_t*>(modrmp + 1)
+ : *reinterpret_cast<int8_t*>(modrmp + 1);
+ AppendToBuffer("[%s%s0x%x]",
+ (this->*register_name)(rm),
+ disp < 0 ? "-" : "+",
+ disp < 0 ? -disp : disp);
+ return mod == 2 ? 5 : 2;
+ }
+ break;
+ case 3:
+ AppendToBuffer("%s", (this->*register_name)(rm));
+ return 1;
+ default:
+ UnimplementedInstruction();
+ return 1;
+ }
+ UNREACHABLE();
+}
+
+
+int DisassemblerX87::PrintRightOperand(byte* modrmp) {
+ return PrintRightOperandHelper(modrmp, &DisassemblerX87::NameOfCPURegister);
+}
+
+
+int DisassemblerX87::PrintRightByteOperand(byte* modrmp) {
+ return PrintRightOperandHelper(modrmp,
+ &DisassemblerX87::NameOfByteCPURegister);
+}
+
+
+int DisassemblerX87::PrintRightXMMOperand(byte* modrmp) {
+ return PrintRightOperandHelper(modrmp,
+ &DisassemblerX87::NameOfXMMRegister);
+}
+
+
+// Returns number of bytes used including the current *data.
+// Writes instruction's mnemonic, left and right operands to 'tmp_buffer_'.
+int DisassemblerX87::PrintOperands(const char* mnem,
+ OperandOrder op_order,
+ byte* data) {
+ byte modrm = *data;
+ int mod, regop, rm;
+ get_modrm(modrm, &mod, &regop, &rm);
+ int advance = 0;
+ switch (op_order) {
+ case REG_OPER_OP_ORDER: {
+ AppendToBuffer("%s %s,", mnem, NameOfCPURegister(regop));
+ advance = PrintRightOperand(data);
+ break;
+ }
+ case OPER_REG_OP_ORDER: {
+ AppendToBuffer("%s ", mnem);
+ advance = PrintRightOperand(data);
+ AppendToBuffer(",%s", NameOfCPURegister(regop));
+ break;
+ }
+ default:
+ UNREACHABLE();
+ break;
+ }
+ return advance;
+}
+
+
+// Returns number of bytes used by machine instruction, including *data byte.
+// Writes immediate instructions to 'tmp_buffer_'.
+int DisassemblerX87::PrintImmediateOp(byte* data) {
+ bool sign_extension_bit = (*data & 0x02) != 0;
+ byte modrm = *(data+1);
+ int mod, regop, rm;
+ get_modrm(modrm, &mod, &regop, &rm);
+ const char* mnem = "Imm???";
+ switch (regop) {
+ case 0: mnem = "add"; break;
+ case 1: mnem = "or"; break;
+ case 2: mnem = "adc"; break;
+ case 4: mnem = "and"; break;
+ case 5: mnem = "sub"; break;
+ case 6: mnem = "xor"; break;
+ case 7: mnem = "cmp"; break;
+ default: UnimplementedInstruction();
+ }
+ AppendToBuffer("%s ", mnem);
+ int count = PrintRightOperand(data+1);
+ if (sign_extension_bit) {
+ AppendToBuffer(",0x%x", *(data + 1 + count));
+ return 1 + count + 1 /*int8*/;
+ } else {
+ AppendToBuffer(",0x%x", *reinterpret_cast<int32_t*>(data + 1 + count));
+ return 1 + count + 4 /*int32_t*/;
+ }
+}
+
+
+// Returns number of bytes used, including *data.
+int DisassemblerX87::F7Instruction(byte* data) {
+ ASSERT_EQ(0xF7, *data);
+ byte modrm = *(data+1);
+ int mod, regop, rm;
+ get_modrm(modrm, &mod, &regop, &rm);
+ if (mod == 3 && regop != 0) {
+ const char* mnem = NULL;
+ switch (regop) {
+ case 2: mnem = "not"; break;
+ case 3: mnem = "neg"; break;
+ case 4: mnem = "mul"; break;
+ case 5: mnem = "imul"; break;
+ case 7: mnem = "idiv"; break;
+ default: UnimplementedInstruction();
+ }
+ AppendToBuffer("%s %s", mnem, NameOfCPURegister(rm));
+ return 2;
+ } else if (mod == 3 && regop == eax) {
+ int32_t imm = *reinterpret_cast<int32_t*>(data+2);
+ AppendToBuffer("test %s,0x%x", NameOfCPURegister(rm), imm);
+ return 6;
+ } else if (regop == eax) {
+ AppendToBuffer("test ");
+ int count = PrintRightOperand(data+1);
+ int32_t imm = *reinterpret_cast<int32_t*>(data+1+count);
+ AppendToBuffer(",0x%x", imm);
+ return 1+count+4 /*int32_t*/;
+ } else {
+ UnimplementedInstruction();
+ return 2;
+ }
+}
+
+
+int DisassemblerX87::D1D3C1Instruction(byte* data) {
+ byte op = *data;
+ ASSERT(op == 0xD1 || op == 0xD3 || op == 0xC1);
+ byte modrm = *(data+1);
+ int mod, regop, rm;
+ get_modrm(modrm, &mod, &regop, &rm);
+ int imm8 = -1;
+ int num_bytes = 2;
+ if (mod == 3) {
+ const char* mnem = NULL;
+ switch (regop) {
+ case kROL: mnem = "rol"; break;
+ case kROR: mnem = "ror"; break;
+ case kRCL: mnem = "rcl"; break;
+ case kRCR: mnem = "rcr"; break;
+ case kSHL: mnem = "shl"; break;
+ case KSHR: mnem = "shr"; break;
+ case kSAR: mnem = "sar"; break;
+ default: UnimplementedInstruction();
+ }
+ if (op == 0xD1) {
+ imm8 = 1;
+ } else if (op == 0xC1) {
+ imm8 = *(data+2);
+ num_bytes = 3;
+ } else if (op == 0xD3) {
+ // Shift/rotate by cl.
+ }
+ ASSERT_NE(NULL, mnem);
+ AppendToBuffer("%s %s,", mnem, NameOfCPURegister(rm));
+ if (imm8 >= 0) {
+ AppendToBuffer("%d", imm8);
+ } else {
+ AppendToBuffer("cl");
+ }
+ } else {
+ UnimplementedInstruction();
+ }
+ return num_bytes;
+}
+
+
+// Returns number of bytes used, including *data.
+int DisassemblerX87::JumpShort(byte* data) {
+ ASSERT_EQ(0xEB, *data);
+ byte b = *(data+1);
+ byte* dest = data + static_cast<int8_t>(b) + 2;
+ AppendToBuffer("jmp %s", NameOfAddress(dest));
+ return 2;
+}
+
+
+// Returns number of bytes used, including *data.
+int DisassemblerX87::JumpConditional(byte* data, const char* comment) {
+ ASSERT_EQ(0x0F, *data);
+ byte cond = *(data+1) & 0x0F;
+ byte* dest = data + *reinterpret_cast<int32_t*>(data+2) + 6;
+ const char* mnem = jump_conditional_mnem[cond];
+ AppendToBuffer("%s %s", mnem, NameOfAddress(dest));
+ if (comment != NULL) {
+ AppendToBuffer(", %s", comment);
+ }
+ return 6; // includes 0x0F
+}
+
+
+// Returns number of bytes used, including *data.
+int DisassemblerX87::JumpConditionalShort(byte* data, const char* comment) {
+ byte cond = *data & 0x0F;
+ byte b = *(data+1);
+ byte* dest = data + static_cast<int8_t>(b) + 2;
+ const char* mnem = jump_conditional_mnem[cond];
+ AppendToBuffer("%s %s", mnem, NameOfAddress(dest));
+ if (comment != NULL) {
+ AppendToBuffer(", %s", comment);
+ }
+ return 2;
+}
+
+
+// Returns number of bytes used, including *data.
+int DisassemblerX87::SetCC(byte* data) {
+ ASSERT_EQ(0x0F, *data);
+ byte cond = *(data+1) & 0x0F;
+ const char* mnem = set_conditional_mnem[cond];
+ AppendToBuffer("%s ", mnem);
+ PrintRightByteOperand(data+2);
+ return 3; // Includes 0x0F.
+}
+
+
+// Returns number of bytes used, including *data.
+int DisassemblerX87::CMov(byte* data) {
+ ASSERT_EQ(0x0F, *data);
+ byte cond = *(data + 1) & 0x0F;
+ const char* mnem = conditional_move_mnem[cond];
+ int op_size = PrintOperands(mnem, REG_OPER_OP_ORDER, data + 2);
+ return 2 + op_size; // includes 0x0F
+}
+
+
+// Returns number of bytes used, including *data.
+int DisassemblerX87::FPUInstruction(byte* data) {
+ byte escape_opcode = *data;
+ ASSERT_EQ(0xD8, escape_opcode & 0xF8);
+ byte modrm_byte = *(data+1);
+
+ if (modrm_byte >= 0xC0) {
+ return RegisterFPUInstruction(escape_opcode, modrm_byte);
+ } else {
+ return MemoryFPUInstruction(escape_opcode, modrm_byte, data+1);
+ }
+}
+
+int DisassemblerX87::MemoryFPUInstruction(int escape_opcode,
+ int modrm_byte,
+ byte* modrm_start) {
+ const char* mnem = "?";
+ int regop = (modrm_byte >> 3) & 0x7; // reg/op field of modrm byte.
+ switch (escape_opcode) {
+ case 0xD9: switch (regop) {
+ case 0: mnem = "fld_s"; break;
+ case 2: mnem = "fst_s"; break;
+ case 3: mnem = "fstp_s"; break;
+ case 7: mnem = "fstcw"; break;
+ default: UnimplementedInstruction();
+ }
+ break;
+
+ case 0xDB: switch (regop) {
+ case 0: mnem = "fild_s"; break;
+ case 1: mnem = "fisttp_s"; break;
+ case 2: mnem = "fist_s"; break;
+ case 3: mnem = "fistp_s"; break;
+ default: UnimplementedInstruction();
+ }
+ break;
+
+ case 0xDD: switch (regop) {
+ case 0: mnem = "fld_d"; break;
+ case 1: mnem = "fisttp_d"; break;
+ case 2: mnem = "fst_d"; break;
+ case 3: mnem = "fstp_d"; break;
+ default: UnimplementedInstruction();
+ }
+ break;
+
+ case 0xDF: switch (regop) {
+ case 5: mnem = "fild_d"; break;
+ case 7: mnem = "fistp_d"; break;
+ default: UnimplementedInstruction();
+ }
+ break;
+
+ default: UnimplementedInstruction();
+ }
+ AppendToBuffer("%s ", mnem);
+ int count = PrintRightOperand(modrm_start);
+ return count + 1;
+}
+
+int DisassemblerX87::RegisterFPUInstruction(int escape_opcode,
+ byte modrm_byte) {
+ bool has_register = false; // Is the FPU register encoded in modrm_byte?
+ const char* mnem = "?";
+
+ switch (escape_opcode) {
+ case 0xD8:
+ has_register = true;
+ switch (modrm_byte & 0xF8) {
+ case 0xC0: mnem = "fadd_i"; break;
+ case 0xE0: mnem = "fsub_i"; break;
+ case 0xC8: mnem = "fmul_i"; break;
+ case 0xF0: mnem = "fdiv_i"; break;
+ default: UnimplementedInstruction();
+ }
+ break;
+
+ case 0xD9:
+ switch (modrm_byte & 0xF8) {
+ case 0xC0:
+ mnem = "fld";
+ has_register = true;
+ break;
+ case 0xC8:
+ mnem = "fxch";
+ has_register = true;
+ break;
+ default:
+ switch (modrm_byte) {
+ case 0xE0: mnem = "fchs"; break;
+ case 0xE1: mnem = "fabs"; break;
+ case 0xE4: mnem = "ftst"; break;
+ case 0xE8: mnem = "fld1"; break;
+ case 0xEB: mnem = "fldpi"; break;
+ case 0xED: mnem = "fldln2"; break;
+ case 0xEE: mnem = "fldz"; break;
+ case 0xF0: mnem = "f2xm1"; break;
+ case 0xF1: mnem = "fyl2x"; break;
+ case 0xF4: mnem = "fxtract"; break;
+ case 0xF5: mnem = "fprem1"; break;
+ case 0xF7: mnem = "fincstp"; break;
+ case 0xF8: mnem = "fprem"; break;
+ case 0xFC: mnem = "frndint"; break;
+ case 0xFD: mnem = "fscale"; break;
+ case 0xFE: mnem = "fsin"; break;
+ case 0xFF: mnem = "fcos"; break;
+ default: UnimplementedInstruction();
+ }
+ }
+ break;
+
+ case 0xDA:
+ if (modrm_byte == 0xE9) {
+ mnem = "fucompp";
+ } else {
+ UnimplementedInstruction();
+ }
+ break;
+
+ case 0xDB:
+ if ((modrm_byte & 0xF8) == 0xE8) {
+ mnem = "fucomi";
+ has_register = true;
+ } else if (modrm_byte == 0xE2) {
+ mnem = "fclex";
+ } else if (modrm_byte == 0xE3) {
+ mnem = "fninit";
+ } else {
+ UnimplementedInstruction();
+ }
+ break;
+
+ case 0xDC:
+ has_register = true;
+ switch (modrm_byte & 0xF8) {
+ case 0xC0: mnem = "fadd"; break;
+ case 0xE8: mnem = "fsub"; break;
+ case 0xC8: mnem = "fmul"; break;
+ case 0xF8: mnem = "fdiv"; break;
+ default: UnimplementedInstruction();
+ }
+ break;
+
+ case 0xDD:
+ has_register = true;
+ switch (modrm_byte & 0xF8) {
+ case 0xC0: mnem = "ffree"; break;
+ case 0xD0: mnem = "fst"; break;
+ case 0xD8: mnem = "fstp"; break;
+ default: UnimplementedInstruction();
+ }
+ break;
+
+ case 0xDE:
+ if (modrm_byte == 0xD9) {
+ mnem = "fcompp";
+ } else {
+ has_register = true;
+ switch (modrm_byte & 0xF8) {
+ case 0xC0: mnem = "faddp"; break;
+ case 0xE8: mnem = "fsubp"; break;
+ case 0xC8: mnem = "fmulp"; break;
+ case 0xF8: mnem = "fdivp"; break;
+ default: UnimplementedInstruction();
+ }
+ }
+ break;
+
+ case 0xDF:
+ if (modrm_byte == 0xE0) {
+ mnem = "fnstsw_ax";
+ } else if ((modrm_byte & 0xF8) == 0xE8) {
+ mnem = "fucomip";
+ has_register = true;
+ }
+ break;
+
+ default: UnimplementedInstruction();
+ }
+
+ if (has_register) {
+ AppendToBuffer("%s st%d", mnem, modrm_byte & 0x7);
+ } else {
+ AppendToBuffer("%s", mnem);
+ }
+ return 2;
+}
+
+
+// Mnemonics for instructions 0xF0 byte.
+// Returns NULL if the instruction is not handled here.
+static const char* F0Mnem(byte f0byte) {
+ switch (f0byte) {
+ case 0x18: return "prefetch";
+ case 0xA2: return "cpuid";
+ case 0xBE: return "movsx_b";
+ case 0xBF: return "movsx_w";
+ case 0xB6: return "movzx_b";
+ case 0xB7: return "movzx_w";
+ case 0xAF: return "imul";
+ case 0xA5: return "shld";
+ case 0xAD: return "shrd";
+ case 0xAC: return "shrd"; // 3-operand version.
+ case 0xAB: return "bts";
+ case 0xBD: return "bsr";
+ default: return NULL;
+ }
+}
+
+
+// Disassembled instruction '*instr' and writes it into 'out_buffer'.
+int DisassemblerX87::InstructionDecode(v8::internal::Vector<char> out_buffer,
+ byte* instr) {
+ tmp_buffer_pos_ = 0; // starting to write as position 0
+ byte* data = instr;
+ // Check for hints.
+ const char* branch_hint = NULL;
+ // We use these two prefixes only with branch prediction
+ if (*data == 0x3E /*ds*/) {
+ branch_hint = "predicted taken";
+ data++;
+ } else if (*data == 0x2E /*cs*/) {
+ branch_hint = "predicted not taken";
+ data++;
+ }
+ bool processed = true; // Will be set to false if the current instruction
+ // is not in 'instructions' table.
+ const InstructionDesc& idesc = instruction_table_->Get(*data);
+ switch (idesc.type) {
+ case ZERO_OPERANDS_INSTR:
+ AppendToBuffer(idesc.mnem);
+ data++;
+ break;
+
+ case TWO_OPERANDS_INSTR:
+ data++;
+ data += PrintOperands(idesc.mnem, idesc.op_order_, data);
+ break;
+
+ case JUMP_CONDITIONAL_SHORT_INSTR:
+ data += JumpConditionalShort(data, branch_hint);
+ break;
+
+ case REGISTER_INSTR:
+ AppendToBuffer("%s %s", idesc.mnem, NameOfCPURegister(*data & 0x07));
+ data++;
+ break;
+
+ case MOVE_REG_INSTR: {
+ byte* addr = reinterpret_cast<byte*>(*reinterpret_cast<int32_t*>(data+1));
+ AppendToBuffer("mov %s,%s",
+ NameOfCPURegister(*data & 0x07),
+ NameOfAddress(addr));
+ data += 5;
+ break;
+ }
+
+ case CALL_JUMP_INSTR: {
+ byte* addr = data + *reinterpret_cast<int32_t*>(data+1) + 5;
+ AppendToBuffer("%s %s", idesc.mnem, NameOfAddress(addr));
+ data += 5;
+ break;
+ }
+
+ case SHORT_IMMEDIATE_INSTR: {
+ byte* addr = reinterpret_cast<byte*>(*reinterpret_cast<int32_t*>(data+1));
+ AppendToBuffer("%s eax,%s", idesc.mnem, NameOfAddress(addr));
+ data += 5;
+ break;
+ }
+
+ case BYTE_IMMEDIATE_INSTR: {
+ AppendToBuffer("%s al,0x%x", idesc.mnem, data[1]);
+ data += 2;
+ break;
+ }
+
+ case NO_INSTR:
+ processed = false;
+ break;
+
+ default:
+ UNIMPLEMENTED(); // This type is not implemented.
+ }
+ //----------------------------
+ if (!processed) {
+ switch (*data) {
+ case 0xC2:
+ AppendToBuffer("ret 0x%x", *reinterpret_cast<uint16_t*>(data+1));
+ data += 3;
+ break;
+
+ case 0x69: // fall through
+ case 0x6B:
+ { int mod, regop, rm;
+ get_modrm(*(data+1), &mod, &regop, &rm);
+ int32_t imm =
+ *data == 0x6B ? *(data+2) : *reinterpret_cast<int32_t*>(data+2);
+ AppendToBuffer("imul %s,%s,0x%x",
+ NameOfCPURegister(regop),
+ NameOfCPURegister(rm),
+ imm);
+ data += 2 + (*data == 0x6B ? 1 : 4);
+ }
+ break;
+
+ case 0xF6:
+ { data++;
+ int mod, regop, rm;
+ get_modrm(*data, &mod, &regop, &rm);
+ if (regop == eax) {
+ AppendToBuffer("test_b ");
+ data += PrintRightByteOperand(data);
+ int32_t imm = *data;
+ AppendToBuffer(",0x%x", imm);
+ data++;
+ } else {
+ UnimplementedInstruction();
+ }
+ }
+ break;
+
+ case 0x81: // fall through
+ case 0x83: // 0x81 with sign extension bit set
+ data += PrintImmediateOp(data);
+ break;
+
+ case 0x0F:
+ { byte f0byte = data[1];
+ const char* f0mnem = F0Mnem(f0byte);
+ if (f0byte == 0x18) {
+ data += 2;
+ int mod, regop, rm;
+ get_modrm(*data, &mod, &regop, &rm);
+ const char* suffix[] = {"nta", "1", "2", "3"};
+ AppendToBuffer("%s%s ", f0mnem, suffix[regop & 0x03]);
+ data += PrintRightOperand(data);
+ } else if (f0byte == 0x1F && data[2] == 0) {
+ AppendToBuffer("nop"); // 3 byte nop.
+ data += 3;
+ } else if (f0byte == 0x1F && data[2] == 0x40 && data[3] == 0) {
+ AppendToBuffer("nop"); // 4 byte nop.
+ data += 4;
+ } else if (f0byte == 0x1F && data[2] == 0x44 && data[3] == 0 &&
+ data[4] == 0) {
+ AppendToBuffer("nop"); // 5 byte nop.
+ data += 5;
+ } else if (f0byte == 0x1F && data[2] == 0x80 && data[3] == 0 &&
+ data[4] == 0 && data[5] == 0 && data[6] == 0) {
+ AppendToBuffer("nop"); // 7 byte nop.
+ data += 7;
+ } else if (f0byte == 0x1F && data[2] == 0x84 && data[3] == 0 &&
+ data[4] == 0 && data[5] == 0 && data[6] == 0 &&
+ data[7] == 0) {
+ AppendToBuffer("nop"); // 8 byte nop.
+ data += 8;
+ } else if (f0byte == 0xA2 || f0byte == 0x31) {
+ AppendToBuffer("%s", f0mnem);
+ data += 2;
+ } else if (f0byte == 0x28) {
+ data += 2;
+ int mod, regop, rm;
+ get_modrm(*data, &mod, &regop, &rm);
+ AppendToBuffer("movaps %s,%s",
+ NameOfXMMRegister(regop),
+ NameOfXMMRegister(rm));
+ data++;
+ } else if (f0byte >= 0x53 && f0byte <= 0x5F) {
+ const char* const pseudo_op[] = {
+ "rcpps",
+ "andps",
+ "andnps",
+ "orps",
+ "xorps",
+ "addps",
+ "mulps",
+ "cvtps2pd",
+ "cvtdq2ps",
+ "subps",
+ "minps",
+ "divps",
+ "maxps",
+ };
+
+ data += 2;
+ int mod, regop, rm;
+ get_modrm(*data, &mod, &regop, &rm);
+ AppendToBuffer("%s %s,",
+ pseudo_op[f0byte - 0x53],
+ NameOfXMMRegister(regop));
+ data += PrintRightXMMOperand(data);
+ } else if (f0byte == 0x50) {
+ data += 2;
+ int mod, regop, rm;
+ get_modrm(*data, &mod, &regop, &rm);
+ AppendToBuffer("movmskps %s,%s",
+ NameOfCPURegister(regop),
+ NameOfXMMRegister(rm));
+ data++;
+ } else if (f0byte== 0xC6) {
+ // shufps xmm, xmm/m128, imm8
+ data += 2;
+ int mod, regop, rm;
+ get_modrm(*data, &mod, &regop, &rm);
+ int8_t imm8 = static_cast<int8_t>(data[1]);
+ AppendToBuffer("shufps %s,%s,%d",
+ NameOfXMMRegister(rm),
+ NameOfXMMRegister(regop),
+ static_cast<int>(imm8));
+ data += 2;
+ } else if ((f0byte & 0xF0) == 0x80) {
+ data += JumpConditional(data, branch_hint);
+ } else if (f0byte == 0xBE || f0byte == 0xBF || f0byte == 0xB6 ||
+ f0byte == 0xB7 || f0byte == 0xAF) {
+ data += 2;
+ data += PrintOperands(f0mnem, REG_OPER_OP_ORDER, data);
+ } else if ((f0byte & 0xF0) == 0x90) {
+ data += SetCC(data);
+ } else if ((f0byte & 0xF0) == 0x40) {
+ data += CMov(data);
+ } else if (f0byte == 0xAB || f0byte == 0xA5 || f0byte == 0xAD) {
+ // shrd, shld, bts
+ data += 2;
+ AppendToBuffer("%s ", f0mnem);
+ int mod, regop, rm;
+ get_modrm(*data, &mod, &regop, &rm);
+ data += PrintRightOperand(data);
+ if (f0byte == 0xAB) {
+ AppendToBuffer(",%s", NameOfCPURegister(regop));
+ } else {
+ AppendToBuffer(",%s,cl", NameOfCPURegister(regop));
+ }
+ } else if (f0byte == 0xBD) {
+ data += 2;
+ int mod, regop, rm;
+ get_modrm(*data, &mod, &regop, &rm);
+ AppendToBuffer("%s %s,", f0mnem, NameOfCPURegister(regop));
+ data += PrintRightOperand(data);
+ } else {
+ UnimplementedInstruction();
+ }
+ }
+ break;
+
+ case 0x8F:
+ { data++;
+ int mod, regop, rm;
+ get_modrm(*data, &mod, &regop, &rm);
+ if (regop == eax) {
+ AppendToBuffer("pop ");
+ data += PrintRightOperand(data);
+ }
+ }
+ break;
+
+ case 0xFF:
+ { data++;
+ int mod, regop, rm;
+ get_modrm(*data, &mod, &regop, &rm);
+ const char* mnem = NULL;
+ switch (regop) {
+ case esi: mnem = "push"; break;
+ case eax: mnem = "inc"; break;
+ case ecx: mnem = "dec"; break;
+ case edx: mnem = "call"; break;
+ case esp: mnem = "jmp"; break;
+ default: mnem = "???";
+ }
+ AppendToBuffer("%s ", mnem);
+ data += PrintRightOperand(data);
+ }
+ break;
+
+ case 0xC7: // imm32, fall through
+ case 0xC6: // imm8
+ { bool is_byte = *data == 0xC6;
+ data++;
+ if (is_byte) {
+ AppendToBuffer("%s ", "mov_b");
+ data += PrintRightByteOperand(data);
+ int32_t imm = *data;
+ AppendToBuffer(",0x%x", imm);
+ data++;
+ } else {
+ AppendToBuffer("%s ", "mov");
+ data += PrintRightOperand(data);
+ int32_t imm = *reinterpret_cast<int32_t*>(data);
+ AppendToBuffer(",0x%x", imm);
+ data += 4;
+ }
+ }
+ break;
+
+ case 0x80:
+ { data++;
+ int mod, regop, rm;
+ get_modrm(*data, &mod, &regop, &rm);
+ const char* mnem = NULL;
+ switch (regop) {
+ case 5: mnem = "subb"; break;
+ case 7: mnem = "cmpb"; break;
+ default: UnimplementedInstruction();
+ }
+ AppendToBuffer("%s ", mnem);
+ data += PrintRightByteOperand(data);
+ int32_t imm = *data;
+ AppendToBuffer(",0x%x", imm);
+ data++;
+ }
+ break;
+
+ case 0x88: // 8bit, fall through
+ case 0x89: // 32bit
+ { bool is_byte = *data == 0x88;
+ int mod, regop, rm;
+ data++;
+ get_modrm(*data, &mod, &regop, &rm);
+ if (is_byte) {
+ AppendToBuffer("%s ", "mov_b");
+ data += PrintRightByteOperand(data);
+ AppendToBuffer(",%s", NameOfByteCPURegister(regop));
+ } else {
+ AppendToBuffer("%s ", "mov");
+ data += PrintRightOperand(data);
+ AppendToBuffer(",%s", NameOfCPURegister(regop));
+ }
+ }
+ break;
+
+ case 0x66: // prefix
+ while (*data == 0x66) data++;
+ if (*data == 0xf && data[1] == 0x1f) {
+ AppendToBuffer("nop"); // 0x66 prefix
+ } else if (*data == 0x90) {
+ AppendToBuffer("nop"); // 0x66 prefix
+ } else if (*data == 0x8B) {
+ data++;
+ data += PrintOperands("mov_w", REG_OPER_OP_ORDER, data);
+ } else if (*data == 0x89) {
+ data++;
+ int mod, regop, rm;
+ get_modrm(*data, &mod, &regop, &rm);
+ AppendToBuffer("mov_w ");
+ data += PrintRightOperand(data);
+ AppendToBuffer(",%s", NameOfCPURegister(regop));
+ } else if (*data == 0xC7) {
+ data++;
+ AppendToBuffer("%s ", "mov_w");
+ data += PrintRightOperand(data);
+ int imm = *reinterpret_cast<int16_t*>(data);
+ AppendToBuffer(",0x%x", imm);
+ data += 2;
+ } else if (*data == 0x0F) {
+ data++;
+ if (*data == 0x38) {
+ data++;
+ if (*data == 0x17) {
+ data++;
+ int mod, regop, rm;
+ get_modrm(*data, &mod, &regop, &rm);
+ AppendToBuffer("ptest %s,%s",
+ NameOfXMMRegister(regop),
+ NameOfXMMRegister(rm));
+ data++;
+ } else if (*data == 0x2A) {
+ // movntdqa
+ data++;
+ int mod, regop, rm;
+ get_modrm(*data, &mod, &regop, &rm);
+ AppendToBuffer("movntdqa %s,", NameOfXMMRegister(regop));
+ data += PrintRightOperand(data);
+ } else {
+ UnimplementedInstruction();
+ }
+ } else if (*data == 0x3A) {
+ data++;
+ if (*data == 0x0B) {
+ data++;
+ int mod, regop, rm;
+ get_modrm(*data, &mod, &regop, &rm);
+ int8_t imm8 = static_cast<int8_t>(data[1]);
+ AppendToBuffer("roundsd %s,%s,%d",
+ NameOfXMMRegister(regop),
+ NameOfXMMRegister(rm),
+ static_cast<int>(imm8));
+ data += 2;
+ } else if (*data == 0x16) {
+ data++;
+ int mod, regop, rm;
+ get_modrm(*data, &mod, &regop, &rm);
+ int8_t imm8 = static_cast<int8_t>(data[1]);
+ AppendToBuffer("pextrd %s,%s,%d",
+ NameOfCPURegister(regop),
+ NameOfXMMRegister(rm),
+ static_cast<int>(imm8));
+ data += 2;
+ } else if (*data == 0x17) {
+ data++;
+ int mod, regop, rm;
+ get_modrm(*data, &mod, &regop, &rm);
+ int8_t imm8 = static_cast<int8_t>(data[1]);
+ AppendToBuffer("extractps %s,%s,%d",
+ NameOfCPURegister(rm),
+ NameOfXMMRegister(regop),
+ static_cast<int>(imm8));
+ data += 2;
+ } else if (*data == 0x22) {
+ data++;
+ int mod, regop, rm;
+ get_modrm(*data, &mod, &regop, &rm);
+ int8_t imm8 = static_cast<int8_t>(data[1]);
+ AppendToBuffer("pinsrd %s,%s,%d",
+ NameOfXMMRegister(regop),
+ NameOfCPURegister(rm),
+ static_cast<int>(imm8));
+ data += 2;
+ } else {
+ UnimplementedInstruction();
+ }
+ } else if (*data == 0x2E || *data == 0x2F) {
+ const char* mnem = (*data == 0x2E) ? "ucomisd" : "comisd";
+ data++;
+ int mod, regop, rm;
+ get_modrm(*data, &mod, &regop, &rm);
+ if (mod == 0x3) {
+ AppendToBuffer("%s %s,%s", mnem,
+ NameOfXMMRegister(regop),
+ NameOfXMMRegister(rm));
+ data++;
+ } else {
+ AppendToBuffer("%s %s,", mnem, NameOfXMMRegister(regop));
+ data += PrintRightOperand(data);
+ }
+ } else if (*data == 0x50) {
+ data++;
+ int mod, regop, rm;
+ get_modrm(*data, &mod, &regop, &rm);
+ AppendToBuffer("movmskpd %s,%s",
+ NameOfCPURegister(regop),
+ NameOfXMMRegister(rm));
+ data++;
+ } else if (*data == 0x54) {
+ data++;
+ int mod, regop, rm;
+ get_modrm(*data, &mod, &regop, &rm);
+ AppendToBuffer("andpd %s,%s",
+ NameOfXMMRegister(regop),
+ NameOfXMMRegister(rm));
+ data++;
+ } else if (*data == 0x56) {
+ data++;
+ int mod, regop, rm;
+ get_modrm(*data, &mod, &regop, &rm);
+ AppendToBuffer("orpd %s,%s",
+ NameOfXMMRegister(regop),
+ NameOfXMMRegister(rm));
+ data++;
+ } else if (*data == 0x57) {
+ data++;
+ int mod, regop, rm;
+ get_modrm(*data, &mod, &regop, &rm);
+ AppendToBuffer("xorpd %s,%s",
+ NameOfXMMRegister(regop),
+ NameOfXMMRegister(rm));
+ data++;
+ } else if (*data == 0x6E) {
+ data++;
+ int mod, regop, rm;
+ get_modrm(*data, &mod, &regop, &rm);
+ AppendToBuffer("movd %s,", NameOfXMMRegister(regop));
+ data += PrintRightOperand(data);
+ } else if (*data == 0x6F) {
+ data++;
+ int mod, regop, rm;
+ get_modrm(*data, &mod, &regop, &rm);
+ AppendToBuffer("movdqa %s,", NameOfXMMRegister(regop));
+ data += PrintRightXMMOperand(data);
+ } else if (*data == 0x70) {
+ data++;
+ int mod, regop, rm;
+ get_modrm(*data, &mod, &regop, &rm);
+ int8_t imm8 = static_cast<int8_t>(data[1]);
+ AppendToBuffer("pshufd %s,%s,%d",
+ NameOfXMMRegister(regop),
+ NameOfXMMRegister(rm),
+ static_cast<int>(imm8));
+ data += 2;
+ } else if (*data == 0x76) {
+ data++;
+ int mod, regop, rm;
+ get_modrm(*data, &mod, &regop, &rm);
+ AppendToBuffer("pcmpeqd %s,%s",
+ NameOfXMMRegister(regop),
+ NameOfXMMRegister(rm));
+ data++;
+ } else if (*data == 0x90) {
+ data++;
+ AppendToBuffer("nop"); // 2 byte nop.
+ } else if (*data == 0xF3) {
+ data++;
+ int mod, regop, rm;
+ get_modrm(*data, &mod, &regop, &rm);
+ AppendToBuffer("psllq %s,%s",
+ NameOfXMMRegister(regop),
+ NameOfXMMRegister(rm));
+ data++;
+ } else if (*data == 0x73) {
+ data++;
+ int mod, regop, rm;
+ get_modrm(*data, &mod, &regop, &rm);
+ int8_t imm8 = static_cast<int8_t>(data[1]);
+ ASSERT(regop == esi || regop == edx);
+ AppendToBuffer("%s %s,%d",
+ (regop == esi) ? "psllq" : "psrlq",
+ NameOfXMMRegister(rm),
+ static_cast<int>(imm8));
+ data += 2;
+ } else if (*data == 0xD3) {
+ data++;
+ int mod, regop, rm;
+ get_modrm(*data, &mod, &regop, &rm);
+ AppendToBuffer("psrlq %s,%s",
+ NameOfXMMRegister(regop),
+ NameOfXMMRegister(rm));
+ data++;
+ } else if (*data == 0x7F) {
+ AppendToBuffer("movdqa ");
+ data++;
+ int mod, regop, rm;
+ get_modrm(*data, &mod, &regop, &rm);
+ data += PrintRightXMMOperand(data);
+ AppendToBuffer(",%s", NameOfXMMRegister(regop));
+ } else if (*data == 0x7E) {
+ data++;
+ int mod, regop, rm;
+ get_modrm(*data, &mod, &regop, &rm);
+ AppendToBuffer("movd ");
+ data += PrintRightOperand(data);
+ AppendToBuffer(",%s", NameOfXMMRegister(regop));
+ } else if (*data == 0xDB) {
+ data++;
+ int mod, regop, rm;
+ get_modrm(*data, &mod, &regop, &rm);
+ AppendToBuffer("pand %s,%s",
+ NameOfXMMRegister(regop),
+ NameOfXMMRegister(rm));
+ data++;
+ } else if (*data == 0xE7) {
+ data++;
+ int mod, regop, rm;
+ get_modrm(*data, &mod, &regop, &rm);
+ if (mod == 3) {
+ AppendToBuffer("movntdq ");
+ data += PrintRightOperand(data);
+ AppendToBuffer(",%s", NameOfXMMRegister(regop));
+ } else {
+ UnimplementedInstruction();
+ }
+ } else if (*data == 0xEF) {
+ data++;
+ int mod, regop, rm;
+ get_modrm(*data, &mod, &regop, &rm);
+ AppendToBuffer("pxor %s,%s",
+ NameOfXMMRegister(regop),
+ NameOfXMMRegister(rm));
+ data++;
+ } else if (*data == 0xEB) {
+ data++;
+ int mod, regop, rm;
+ get_modrm(*data, &mod, &regop, &rm);
+ AppendToBuffer("por %s,%s",
+ NameOfXMMRegister(regop),
+ NameOfXMMRegister(rm));
+ data++;
+ } else {
+ UnimplementedInstruction();
+ }
+ } else {
+ UnimplementedInstruction();
+ }
+ break;
+
+ case 0xFE:
+ { data++;
+ int mod, regop, rm;
+ get_modrm(*data, &mod, &regop, &rm);
+ if (regop == ecx) {
+ AppendToBuffer("dec_b ");
+ data += PrintRightOperand(data);
+ } else {
+ UnimplementedInstruction();
+ }
+ }
+ break;
+
+ case 0x68:
+ AppendToBuffer("push 0x%x", *reinterpret_cast<int32_t*>(data+1));
+ data += 5;
+ break;
+
+ case 0x6A:
+ AppendToBuffer("push 0x%x", *reinterpret_cast<int8_t*>(data + 1));
+ data += 2;
+ break;
+
+ case 0xA8:
+ AppendToBuffer("test al,0x%x", *reinterpret_cast<uint8_t*>(data+1));
+ data += 2;
+ break;
+
+ case 0xA9:
+ AppendToBuffer("test eax,0x%x", *reinterpret_cast<int32_t*>(data+1));
+ data += 5;
+ break;
+
+ case 0xD1: // fall through
+ case 0xD3: // fall through
+ case 0xC1:
+ data += D1D3C1Instruction(data);
+ break;
+
+ case 0xD8: // fall through
+ case 0xD9: // fall through
+ case 0xDA: // fall through
+ case 0xDB: // fall through
+ case 0xDC: // fall through
+ case 0xDD: // fall through
+ case 0xDE: // fall through
+ case 0xDF:
+ data += FPUInstruction(data);
+ break;
+
+ case 0xEB:
+ data += JumpShort(data);
+ break;
+
+ case 0xF2:
+ if (*(data+1) == 0x0F) {
+ byte b2 = *(data+2);
+ if (b2 == 0x11) {
+ AppendToBuffer("movsd ");
+ data += 3;
+ int mod, regop, rm;
+ get_modrm(*data, &mod, &regop, &rm);
+ data += PrintRightXMMOperand(data);
+ AppendToBuffer(",%s", NameOfXMMRegister(regop));
+ } else if (b2 == 0x10) {
+ data += 3;
+ int mod, regop, rm;
+ get_modrm(*data, &mod, &regop, &rm);
+ AppendToBuffer("movsd %s,", NameOfXMMRegister(regop));
+ data += PrintRightXMMOperand(data);
+ } else if (b2 == 0x5A) {
+ data += 3;
+ int mod, regop, rm;
+ get_modrm(*data, &mod, &regop, &rm);
+ AppendToBuffer("cvtsd2ss %s,", NameOfXMMRegister(regop));
+ data += PrintRightXMMOperand(data);
+ } else {
+ const char* mnem = "?";
+ switch (b2) {
+ case 0x2A: mnem = "cvtsi2sd"; break;
+ case 0x2C: mnem = "cvttsd2si"; break;
+ case 0x2D: mnem = "cvtsd2si"; break;
+ case 0x51: mnem = "sqrtsd"; break;
+ case 0x58: mnem = "addsd"; break;
+ case 0x59: mnem = "mulsd"; break;
+ case 0x5C: mnem = "subsd"; break;
+ case 0x5E: mnem = "divsd"; break;
+ }
+ data += 3;
+ int mod, regop, rm;
+ get_modrm(*data, &mod, &regop, &rm);
+ if (b2 == 0x2A) {
+ AppendToBuffer("%s %s,", mnem, NameOfXMMRegister(regop));
+ data += PrintRightOperand(data);
+ } else if (b2 == 0x2C || b2 == 0x2D) {
+ AppendToBuffer("%s %s,", mnem, NameOfCPURegister(regop));
+ data += PrintRightXMMOperand(data);
+ } else if (b2 == 0xC2) {
+ // Intel manual 2A, Table 3-18.
+ const char* const pseudo_op[] = {
+ "cmpeqsd",
+ "cmpltsd",
+ "cmplesd",
+ "cmpunordsd",
+ "cmpneqsd",
+ "cmpnltsd",
+ "cmpnlesd",
+ "cmpordsd"
+ };
+ AppendToBuffer("%s %s,%s",
+ pseudo_op[data[1]],
+ NameOfXMMRegister(regop),
+ NameOfXMMRegister(rm));
+ data += 2;
+ } else {
+ AppendToBuffer("%s %s,", mnem, NameOfXMMRegister(regop));
+ data += PrintRightXMMOperand(data);
+ }
+ }
+ } else {
+ UnimplementedInstruction();
+ }
+ break;
+
+ case 0xF3:
+ if (*(data+1) == 0x0F) {
+ byte b2 = *(data+2);
+ if (b2 == 0x11) {
+ AppendToBuffer("movss ");
+ data += 3;
+ int mod, regop, rm;
+ get_modrm(*data, &mod, &regop, &rm);
+ data += PrintRightXMMOperand(data);
+ AppendToBuffer(",%s", NameOfXMMRegister(regop));
+ } else if (b2 == 0x10) {
+ data += 3;
+ int mod, regop, rm;
+ get_modrm(*data, &mod, &regop, &rm);
+ AppendToBuffer("movss %s,", NameOfXMMRegister(regop));
+ data += PrintRightXMMOperand(data);
+ } else if (b2 == 0x2C) {
+ data += 3;
+ int mod, regop, rm;
+ get_modrm(*data, &mod, &regop, &rm);
+ AppendToBuffer("cvttss2si %s,", NameOfCPURegister(regop));
+ data += PrintRightXMMOperand(data);
+ } else if (b2 == 0x5A) {
+ data += 3;
+ int mod, regop, rm;
+ get_modrm(*data, &mod, &regop, &rm);
+ AppendToBuffer("cvtss2sd %s,", NameOfXMMRegister(regop));
+ data += PrintRightXMMOperand(data);
+ } else if (b2 == 0x6F) {
+ data += 3;
+ int mod, regop, rm;
+ get_modrm(*data, &mod, &regop, &rm);
+ AppendToBuffer("movdqu %s,", NameOfXMMRegister(regop));
+ data += PrintRightXMMOperand(data);
+ } else if (b2 == 0x7F) {
+ AppendToBuffer("movdqu ");
+ data += 3;
+ int mod, regop, rm;
+ get_modrm(*data, &mod, &regop, &rm);
+ data += PrintRightXMMOperand(data);
+ AppendToBuffer(",%s", NameOfXMMRegister(regop));
+ } else {
+ UnimplementedInstruction();
+ }
+ } else if (*(data+1) == 0xA5) {
+ data += 2;
+ AppendToBuffer("rep_movs");
+ } else if (*(data+1) == 0xAB) {
+ data += 2;
+ AppendToBuffer("rep_stos");
+ } else {
+ UnimplementedInstruction();
+ }
+ break;
+
+ case 0xF7:
+ data += F7Instruction(data);
+ break;
+
+ default:
+ UnimplementedInstruction();
+ }
+ }
+
+ if (tmp_buffer_pos_ < sizeof tmp_buffer_) {
+ tmp_buffer_[tmp_buffer_pos_] = '\0';
+ }
+
+ int instr_len = data - instr;
+ if (instr_len == 0) {
+ printf("%02x", *data);
+ }
+ ASSERT(instr_len > 0); // Ensure progress.
+
+ int outp = 0;
+ // Instruction bytes.
+ for (byte* bp = instr; bp < data; bp++) {
+ outp += v8::internal::SNPrintF(out_buffer + outp, "%02x", *bp);
+ }
+ for (int i = 6 - instr_len; i >= 0; i--) {
+ outp += v8::internal::SNPrintF(out_buffer + outp, " ");
+ }
+
+ outp += v8::internal::SNPrintF(out_buffer + outp, " %s", tmp_buffer_.start());
+ return instr_len;
+} // NOLINT (function is too long)
+
+
+//------------------------------------------------------------------------------
+
+
+static const char* cpu_regs[8] = {
+ "eax", "ecx", "edx", "ebx", "esp", "ebp", "esi", "edi"
+};
+
+
+static const char* byte_cpu_regs[8] = {
+ "al", "cl", "dl", "bl", "ah", "ch", "dh", "bh"
+};
+
+
+static const char* xmm_regs[8] = {
+ "xmm0", "xmm1", "xmm2", "xmm3", "xmm4", "xmm5", "xmm6", "xmm7"
+};
+
+
+const char* NameConverter::NameOfAddress(byte* addr) const {
+ v8::internal::SNPrintF(tmp_buffer_, "%p", addr);
+ return tmp_buffer_.start();
+}
+
+
+const char* NameConverter::NameOfConstant(byte* addr) const {
+ return NameOfAddress(addr);
+}
+
+
+const char* NameConverter::NameOfCPURegister(int reg) const {
+ if (0 <= reg && reg < 8) return cpu_regs[reg];
+ return "noreg";
+}
+
+
+const char* NameConverter::NameOfByteCPURegister(int reg) const {
+ if (0 <= reg && reg < 8) return byte_cpu_regs[reg];
+ return "noreg";
+}
+
+
+const char* NameConverter::NameOfXMMRegister(int reg) const {
+ if (0 <= reg && reg < 8) return xmm_regs[reg];
+ return "noxmmreg";
+}
+
+
+const char* NameConverter::NameInCode(byte* addr) const {
+ // X87 does not embed debug strings at the moment.
+ UNREACHABLE();
+ return "";
+}
+
+
+//------------------------------------------------------------------------------
+
+Disassembler::Disassembler(const NameConverter& converter)
+ : converter_(converter) {}
+
+
+Disassembler::~Disassembler() {}
+
+
+int Disassembler::InstructionDecode(v8::internal::Vector<char> buffer,
+ byte* instruction) {
+ DisassemblerX87 d(converter_, false /*do not crash if unimplemented*/);
+ return d.InstructionDecode(buffer, instruction);
+}
+
+
+// The IA-32 assembler does not currently use constant pools.
+int Disassembler::ConstantPoolSizeAt(byte* instruction) { return -1; }
+
+
+/*static*/ void Disassembler::Disassemble(FILE* f, byte* begin, byte* end) {
+ NameConverter converter;
+ Disassembler d(converter);
+ for (byte* pc = begin; pc < end;) {
+ v8::internal::EmbeddedVector<char, 128> buffer;
+ buffer[0] = '\0';
+ byte* prev_pc = pc;
+ pc += d.InstructionDecode(buffer, pc);
+ fprintf(f, "%p", prev_pc);
+ fprintf(f, " ");
+
+ for (byte* bp = prev_pc; bp < pc; bp++) {
+ fprintf(f, "%02x", *bp);
+ }
+ for (int i = 6 - (pc - prev_pc); i >= 0; i--) {
+ fprintf(f, " ");
+ }
+ fprintf(f, " %s\n", buffer.start());
+ }
+}
+
+
+} // namespace disasm
+
+#endif // V8_TARGET_ARCH_X87
diff --git a/chromium/v8/src/x87/frames-x87.cc b/chromium/v8/src/x87/frames-x87.cc
new file mode 100644
index 00000000000..cd4b724e840
--- /dev/null
+++ b/chromium/v8/src/x87/frames-x87.cc
@@ -0,0 +1,42 @@
+// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#if V8_TARGET_ARCH_X87
+
+#include "src/assembler.h"
+#include "src/x87/assembler-x87.h"
+#include "src/x87/assembler-x87-inl.h"
+#include "src/frames.h"
+
+namespace v8 {
+namespace internal {
+
+
+Register JavaScriptFrame::fp_register() { return ebp; }
+Register JavaScriptFrame::context_register() { return esi; }
+Register JavaScriptFrame::constant_pool_pointer_register() {
+ UNREACHABLE();
+ return no_reg;
+}
+
+
+Register StubFailureTrampolineFrame::fp_register() { return ebp; }
+Register StubFailureTrampolineFrame::context_register() { return esi; }
+Register StubFailureTrampolineFrame::constant_pool_pointer_register() {
+ UNREACHABLE();
+ return no_reg;
+}
+
+
+Object*& ExitFrame::constant_pool_slot() const {
+ UNREACHABLE();
+ return Memory::Object_at(NULL);
+}
+
+
+} } // namespace v8::internal
+
+#endif // V8_TARGET_ARCH_X87
diff --git a/chromium/v8/src/x87/frames-x87.h b/chromium/v8/src/x87/frames-x87.h
new file mode 100644
index 00000000000..5b91baf3858
--- /dev/null
+++ b/chromium/v8/src/x87/frames-x87.h
@@ -0,0 +1,125 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_X87_FRAMES_X87_H_
+#define V8_X87_FRAMES_X87_H_
+
+namespace v8 {
+namespace internal {
+
+
+// Register lists
+// Note that the bit values must match those used in actual instruction encoding
+const int kNumRegs = 8;
+
+
+// Caller-saved registers
+const RegList kJSCallerSaved =
+ 1 << 0 | // eax
+ 1 << 1 | // ecx
+ 1 << 2 | // edx
+ 1 << 3 | // ebx - used as a caller-saved register in JavaScript code
+ 1 << 7; // edi - callee function
+
+const int kNumJSCallerSaved = 5;
+
+
+// Number of registers for which space is reserved in safepoints.
+const int kNumSafepointRegisters = 8;
+
+const int kNoAlignmentPadding = 0;
+const int kAlignmentPaddingPushed = 2;
+const int kAlignmentZapValue = 0x12345678; // Not heap object tagged.
+
+// ----------------------------------------------------
+
+
+class EntryFrameConstants : public AllStatic {
+ public:
+ static const int kCallerFPOffset = -6 * kPointerSize;
+
+ static const int kFunctionArgOffset = +3 * kPointerSize;
+ static const int kReceiverArgOffset = +4 * kPointerSize;
+ static const int kArgcOffset = +5 * kPointerSize;
+ static const int kArgvOffset = +6 * kPointerSize;
+};
+
+
+class ExitFrameConstants : public AllStatic {
+ public:
+ static const int kFrameSize = 2 * kPointerSize;
+
+ static const int kCodeOffset = -2 * kPointerSize;
+ static const int kSPOffset = -1 * kPointerSize;
+
+ static const int kCallerFPOffset = 0 * kPointerSize;
+ static const int kCallerPCOffset = +1 * kPointerSize;
+
+ // FP-relative displacement of the caller's SP. It points just
+ // below the saved PC.
+ static const int kCallerSPDisplacement = +2 * kPointerSize;
+
+ static const int kConstantPoolOffset = 0; // Not used
+};
+
+
+class JavaScriptFrameConstants : public AllStatic {
+ public:
+ // FP-relative.
+ static const int kLocal0Offset = StandardFrameConstants::kExpressionsOffset;
+ static const int kLastParameterOffset = +2 * kPointerSize;
+ static const int kFunctionOffset = StandardFrameConstants::kMarkerOffset;
+
+ // Caller SP-relative.
+ static const int kParam0Offset = -2 * kPointerSize;
+ static const int kReceiverOffset = -1 * kPointerSize;
+
+ static const int kDynamicAlignmentStateOffset = kLocal0Offset;
+};
+
+
+class ArgumentsAdaptorFrameConstants : public AllStatic {
+ public:
+ // FP-relative.
+ static const int kLengthOffset = StandardFrameConstants::kExpressionsOffset;
+
+ static const int kFrameSize =
+ StandardFrameConstants::kFixedFrameSize + kPointerSize;
+};
+
+
+class ConstructFrameConstants : public AllStatic {
+ public:
+ // FP-relative.
+ static const int kImplicitReceiverOffset = -5 * kPointerSize;
+ static const int kConstructorOffset = kMinInt;
+ static const int kLengthOffset = -4 * kPointerSize;
+ static const int kCodeOffset = StandardFrameConstants::kExpressionsOffset;
+
+ static const int kFrameSize =
+ StandardFrameConstants::kFixedFrameSize + 3 * kPointerSize;
+};
+
+
+class InternalFrameConstants : public AllStatic {
+ public:
+ // FP-relative.
+ static const int kCodeOffset = StandardFrameConstants::kExpressionsOffset;
+};
+
+
+inline Object* JavaScriptFrame::function_slot_object() const {
+ const int offset = JavaScriptFrameConstants::kFunctionOffset;
+ return Memory::Object_at(fp() + offset);
+}
+
+
+inline void StackHandler::SetFp(Address slot, Address fp) {
+ Memory::Address_at(slot) = fp;
+}
+
+
+} } // namespace v8::internal
+
+#endif // V8_X87_FRAMES_X87_H_
diff --git a/chromium/v8/src/x87/full-codegen-x87.cc b/chromium/v8/src/x87/full-codegen-x87.cc
new file mode 100644
index 00000000000..7006e7bdabc
--- /dev/null
+++ b/chromium/v8/src/x87/full-codegen-x87.cc
@@ -0,0 +1,4791 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#if V8_TARGET_ARCH_X87
+
+#include "src/code-stubs.h"
+#include "src/codegen.h"
+#include "src/compiler.h"
+#include "src/debug.h"
+#include "src/full-codegen.h"
+#include "src/isolate-inl.h"
+#include "src/parser.h"
+#include "src/scopes.h"
+#include "src/stub-cache.h"
+
+namespace v8 {
+namespace internal {
+
+#define __ ACCESS_MASM(masm_)
+
+
+class JumpPatchSite BASE_EMBEDDED {
+ public:
+ explicit JumpPatchSite(MacroAssembler* masm) : masm_(masm) {
+#ifdef DEBUG
+ info_emitted_ = false;
+#endif
+ }
+
+ ~JumpPatchSite() {
+ ASSERT(patch_site_.is_bound() == info_emitted_);
+ }
+
+ void EmitJumpIfNotSmi(Register reg,
+ Label* target,
+ Label::Distance distance = Label::kFar) {
+ __ test(reg, Immediate(kSmiTagMask));
+ EmitJump(not_carry, target, distance); // Always taken before patched.
+ }
+
+ void EmitJumpIfSmi(Register reg,
+ Label* target,
+ Label::Distance distance = Label::kFar) {
+ __ test(reg, Immediate(kSmiTagMask));
+ EmitJump(carry, target, distance); // Never taken before patched.
+ }
+
+ void EmitPatchInfo() {
+ if (patch_site_.is_bound()) {
+ int delta_to_patch_site = masm_->SizeOfCodeGeneratedSince(&patch_site_);
+ ASSERT(is_uint8(delta_to_patch_site));
+ __ test(eax, Immediate(delta_to_patch_site));
+#ifdef DEBUG
+ info_emitted_ = true;
+#endif
+ } else {
+ __ nop(); // Signals no inlined code.
+ }
+ }
+
+ private:
+ // jc will be patched with jz, jnc will become jnz.
+ void EmitJump(Condition cc, Label* target, Label::Distance distance) {
+ ASSERT(!patch_site_.is_bound() && !info_emitted_);
+ ASSERT(cc == carry || cc == not_carry);
+ __ bind(&patch_site_);
+ __ j(cc, target, distance);
+ }
+
+ MacroAssembler* masm_;
+ Label patch_site_;
+#ifdef DEBUG
+ bool info_emitted_;
+#endif
+};
+
+
+// Generate code for a JS function. On entry to the function the receiver
+// and arguments have been pushed on the stack left to right, with the
+// return address on top of them. The actual argument count matches the
+// formal parameter count expected by the function.
+//
+// The live registers are:
+// o edi: the JS function object being called (i.e. ourselves)
+// o esi: our context
+// o ebp: our caller's frame pointer
+// o esp: stack pointer (pointing to return address)
+//
+// The function builds a JS frame. Please see JavaScriptFrameConstants in
+// frames-x87.h for its layout.
+void FullCodeGenerator::Generate() {
+ CompilationInfo* info = info_;
+ handler_table_ =
+ isolate()->factory()->NewFixedArray(function()->handler_count(), TENURED);
+
+ profiling_counter_ = isolate()->factory()->NewCell(
+ Handle<Smi>(Smi::FromInt(FLAG_interrupt_budget), isolate()));
+ SetFunctionPosition(function());
+ Comment cmnt(masm_, "[ function compiled by full code generator");
+
+ ProfileEntryHookStub::MaybeCallEntryHook(masm_);
+
+#ifdef DEBUG
+ if (strlen(FLAG_stop_at) > 0 &&
+ info->function()->name()->IsUtf8EqualTo(CStrVector(FLAG_stop_at))) {
+ __ int3();
+ }
+#endif
+
+ // Sloppy mode functions and builtins need to replace the receiver with the
+ // global proxy when called as functions (without an explicit receiver
+ // object).
+ if (info->strict_mode() == SLOPPY && !info->is_native()) {
+ Label ok;
+ // +1 for return address.
+ int receiver_offset = (info->scope()->num_parameters() + 1) * kPointerSize;
+ __ mov(ecx, Operand(esp, receiver_offset));
+
+ __ cmp(ecx, isolate()->factory()->undefined_value());
+ __ j(not_equal, &ok, Label::kNear);
+
+ __ mov(ecx, GlobalObjectOperand());
+ __ mov(ecx, FieldOperand(ecx, GlobalObject::kGlobalReceiverOffset));
+
+ __ mov(Operand(esp, receiver_offset), ecx);
+
+ __ bind(&ok);
+ }
+
+ // Open a frame scope to indicate that there is a frame on the stack. The
+ // MANUAL indicates that the scope shouldn't actually generate code to set up
+ // the frame (that is done below).
+ FrameScope frame_scope(masm_, StackFrame::MANUAL);
+
+ info->set_prologue_offset(masm_->pc_offset());
+ __ Prologue(info->IsCodePreAgingActive());
+ info->AddNoFrameRange(0, masm_->pc_offset());
+
+ { Comment cmnt(masm_, "[ Allocate locals");
+ int locals_count = info->scope()->num_stack_slots();
+ // Generators allocate locals, if any, in context slots.
+ ASSERT(!info->function()->is_generator() || locals_count == 0);
+ if (locals_count == 1) {
+ __ push(Immediate(isolate()->factory()->undefined_value()));
+ } else if (locals_count > 1) {
+ if (locals_count >= 128) {
+ Label ok;
+ __ mov(ecx, esp);
+ __ sub(ecx, Immediate(locals_count * kPointerSize));
+ ExternalReference stack_limit =
+ ExternalReference::address_of_real_stack_limit(isolate());
+ __ cmp(ecx, Operand::StaticVariable(stack_limit));
+ __ j(above_equal, &ok, Label::kNear);
+ __ InvokeBuiltin(Builtins::STACK_OVERFLOW, CALL_FUNCTION);
+ __ bind(&ok);
+ }
+ __ mov(eax, Immediate(isolate()->factory()->undefined_value()));
+ const int kMaxPushes = 32;
+ if (locals_count >= kMaxPushes) {
+ int loop_iterations = locals_count / kMaxPushes;
+ __ mov(ecx, loop_iterations);
+ Label loop_header;
+ __ bind(&loop_header);
+ // Do pushes.
+ for (int i = 0; i < kMaxPushes; i++) {
+ __ push(eax);
+ }
+ __ dec(ecx);
+ __ j(not_zero, &loop_header, Label::kNear);
+ }
+ int remaining = locals_count % kMaxPushes;
+ // Emit the remaining pushes.
+ for (int i = 0; i < remaining; i++) {
+ __ push(eax);
+ }
+ }
+ }
+
+ bool function_in_register = true;
+
+ // Possibly allocate a local context.
+ int heap_slots = info->scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
+ if (heap_slots > 0) {
+ Comment cmnt(masm_, "[ Allocate context");
+ bool need_write_barrier = true;
+ // Argument to NewContext is the function, which is still in edi.
+ if (FLAG_harmony_scoping && info->scope()->is_global_scope()) {
+ __ push(edi);
+ __ Push(info->scope()->GetScopeInfo());
+ __ CallRuntime(Runtime::kHiddenNewGlobalContext, 2);
+ } else if (heap_slots <= FastNewContextStub::kMaximumSlots) {
+ FastNewContextStub stub(isolate(), heap_slots);
+ __ CallStub(&stub);
+ // Result of FastNewContextStub is always in new space.
+ need_write_barrier = false;
+ } else {
+ __ push(edi);
+ __ CallRuntime(Runtime::kHiddenNewFunctionContext, 1);
+ }
+ function_in_register = false;
+ // Context is returned in eax. It replaces the context passed to us.
+ // It's saved in the stack and kept live in esi.
+ __ mov(esi, eax);
+ __ mov(Operand(ebp, StandardFrameConstants::kContextOffset), eax);
+
+ // Copy parameters into context if necessary.
+ int num_parameters = info->scope()->num_parameters();
+ for (int i = 0; i < num_parameters; i++) {
+ Variable* var = scope()->parameter(i);
+ if (var->IsContextSlot()) {
+ int parameter_offset = StandardFrameConstants::kCallerSPOffset +
+ (num_parameters - 1 - i) * kPointerSize;
+ // Load parameter from stack.
+ __ mov(eax, Operand(ebp, parameter_offset));
+ // Store it in the context.
+ int context_offset = Context::SlotOffset(var->index());
+ __ mov(Operand(esi, context_offset), eax);
+ // Update the write barrier. This clobbers eax and ebx.
+ if (need_write_barrier) {
+ __ RecordWriteContextSlot(esi,
+ context_offset,
+ eax,
+ ebx);
+ } else if (FLAG_debug_code) {
+ Label done;
+ __ JumpIfInNewSpace(esi, eax, &done, Label::kNear);
+ __ Abort(kExpectedNewSpaceObject);
+ __ bind(&done);
+ }
+ }
+ }
+ }
+
+ Variable* arguments = scope()->arguments();
+ if (arguments != NULL) {
+ // Function uses arguments object.
+ Comment cmnt(masm_, "[ Allocate arguments object");
+ if (function_in_register) {
+ __ push(edi);
+ } else {
+ __ push(Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
+ }
+ // Receiver is just before the parameters on the caller's stack.
+ int num_parameters = info->scope()->num_parameters();
+ int offset = num_parameters * kPointerSize;
+ __ lea(edx,
+ Operand(ebp, StandardFrameConstants::kCallerSPOffset + offset));
+ __ push(edx);
+ __ push(Immediate(Smi::FromInt(num_parameters)));
+ // Arguments to ArgumentsAccessStub:
+ // function, receiver address, parameter count.
+ // The stub will rewrite receiver and parameter count if the previous
+ // stack frame was an arguments adapter frame.
+ ArgumentsAccessStub::Type type;
+ if (strict_mode() == STRICT) {
+ type = ArgumentsAccessStub::NEW_STRICT;
+ } else if (function()->has_duplicate_parameters()) {
+ type = ArgumentsAccessStub::NEW_SLOPPY_SLOW;
+ } else {
+ type = ArgumentsAccessStub::NEW_SLOPPY_FAST;
+ }
+ ArgumentsAccessStub stub(isolate(), type);
+ __ CallStub(&stub);
+
+ SetVar(arguments, eax, ebx, edx);
+ }
+
+ if (FLAG_trace) {
+ __ CallRuntime(Runtime::kTraceEnter, 0);
+ }
+
+ // Visit the declarations and body unless there is an illegal
+ // redeclaration.
+ if (scope()->HasIllegalRedeclaration()) {
+ Comment cmnt(masm_, "[ Declarations");
+ scope()->VisitIllegalRedeclaration(this);
+
+ } else {
+ PrepareForBailoutForId(BailoutId::FunctionEntry(), NO_REGISTERS);
+ { Comment cmnt(masm_, "[ Declarations");
+ // For named function expressions, declare the function name as a
+ // constant.
+ if (scope()->is_function_scope() && scope()->function() != NULL) {
+ VariableDeclaration* function = scope()->function();
+ ASSERT(function->proxy()->var()->mode() == CONST ||
+ function->proxy()->var()->mode() == CONST_LEGACY);
+ ASSERT(function->proxy()->var()->location() != Variable::UNALLOCATED);
+ VisitVariableDeclaration(function);
+ }
+ VisitDeclarations(scope()->declarations());
+ }
+
+ { Comment cmnt(masm_, "[ Stack check");
+ PrepareForBailoutForId(BailoutId::Declarations(), NO_REGISTERS);
+ Label ok;
+ ExternalReference stack_limit
+ = ExternalReference::address_of_stack_limit(isolate());
+ __ cmp(esp, Operand::StaticVariable(stack_limit));
+ __ j(above_equal, &ok, Label::kNear);
+ __ call(isolate()->builtins()->StackCheck(), RelocInfo::CODE_TARGET);
+ __ bind(&ok);
+ }
+
+ { Comment cmnt(masm_, "[ Body");
+ ASSERT(loop_depth() == 0);
+ VisitStatements(function()->body());
+ ASSERT(loop_depth() == 0);
+ }
+ }
+
+ // Always emit a 'return undefined' in case control fell off the end of
+ // the body.
+ { Comment cmnt(masm_, "[ return <undefined>;");
+ __ mov(eax, isolate()->factory()->undefined_value());
+ EmitReturnSequence();
+ }
+}
+
+
+void FullCodeGenerator::ClearAccumulator() {
+ __ Move(eax, Immediate(Smi::FromInt(0)));
+}
+
+
+void FullCodeGenerator::EmitProfilingCounterDecrement(int delta) {
+ __ mov(ebx, Immediate(profiling_counter_));
+ __ sub(FieldOperand(ebx, Cell::kValueOffset),
+ Immediate(Smi::FromInt(delta)));
+}
+
+
+void FullCodeGenerator::EmitProfilingCounterReset() {
+ int reset_value = FLAG_interrupt_budget;
+ __ mov(ebx, Immediate(profiling_counter_));
+ __ mov(FieldOperand(ebx, Cell::kValueOffset),
+ Immediate(Smi::FromInt(reset_value)));
+}
+
+
+void FullCodeGenerator::EmitBackEdgeBookkeeping(IterationStatement* stmt,
+ Label* back_edge_target) {
+ Comment cmnt(masm_, "[ Back edge bookkeeping");
+ Label ok;
+
+ ASSERT(back_edge_target->is_bound());
+ int distance = masm_->SizeOfCodeGeneratedSince(back_edge_target);
+ int weight = Min(kMaxBackEdgeWeight,
+ Max(1, distance / kCodeSizeMultiplier));
+ EmitProfilingCounterDecrement(weight);
+ __ j(positive, &ok, Label::kNear);
+ __ call(isolate()->builtins()->InterruptCheck(), RelocInfo::CODE_TARGET);
+
+ // Record a mapping of this PC offset to the OSR id. This is used to find
+ // the AST id from the unoptimized code in order to use it as a key into
+ // the deoptimization input data found in the optimized code.
+ RecordBackEdge(stmt->OsrEntryId());
+
+ EmitProfilingCounterReset();
+
+ __ bind(&ok);
+ PrepareForBailoutForId(stmt->EntryId(), NO_REGISTERS);
+ // Record a mapping of the OSR id to this PC. This is used if the OSR
+ // entry becomes the target of a bailout. We don't expect it to be, but
+ // we want it to work if it is.
+ PrepareForBailoutForId(stmt->OsrEntryId(), NO_REGISTERS);
+}
+
+
+void FullCodeGenerator::EmitReturnSequence() {
+ Comment cmnt(masm_, "[ Return sequence");
+ if (return_label_.is_bound()) {
+ __ jmp(&return_label_);
+ } else {
+ // Common return label
+ __ bind(&return_label_);
+ if (FLAG_trace) {
+ __ push(eax);
+ __ CallRuntime(Runtime::kTraceExit, 1);
+ }
+ // Pretend that the exit is a backwards jump to the entry.
+ int weight = 1;
+ if (info_->ShouldSelfOptimize()) {
+ weight = FLAG_interrupt_budget / FLAG_self_opt_count;
+ } else {
+ int distance = masm_->pc_offset();
+ weight = Min(kMaxBackEdgeWeight,
+ Max(1, distance / kCodeSizeMultiplier));
+ }
+ EmitProfilingCounterDecrement(weight);
+ Label ok;
+ __ j(positive, &ok, Label::kNear);
+ __ push(eax);
+ __ call(isolate()->builtins()->InterruptCheck(),
+ RelocInfo::CODE_TARGET);
+ __ pop(eax);
+ EmitProfilingCounterReset();
+ __ bind(&ok);
+#ifdef DEBUG
+ // Add a label for checking the size of the code used for returning.
+ Label check_exit_codesize;
+ masm_->bind(&check_exit_codesize);
+#endif
+ SetSourcePosition(function()->end_position() - 1);
+ __ RecordJSReturn();
+ // Do not use the leave instruction here because it is too short to
+ // patch with the code required by the debugger.
+ __ mov(esp, ebp);
+ int no_frame_start = masm_->pc_offset();
+ __ pop(ebp);
+
+ int arguments_bytes = (info_->scope()->num_parameters() + 1) * kPointerSize;
+ __ Ret(arguments_bytes, ecx);
+ // Check that the size of the code used for returning is large enough
+ // for the debugger's requirements.
+ ASSERT(Assembler::kJSReturnSequenceLength <=
+ masm_->SizeOfCodeGeneratedSince(&check_exit_codesize));
+ info_->AddNoFrameRange(no_frame_start, masm_->pc_offset());
+ }
+}
+
+
+void FullCodeGenerator::EffectContext::Plug(Variable* var) const {
+ ASSERT(var->IsStackAllocated() || var->IsContextSlot());
+}
+
+
+void FullCodeGenerator::AccumulatorValueContext::Plug(Variable* var) const {
+ ASSERT(var->IsStackAllocated() || var->IsContextSlot());
+ codegen()->GetVar(result_register(), var);
+}
+
+
+void FullCodeGenerator::StackValueContext::Plug(Variable* var) const {
+ ASSERT(var->IsStackAllocated() || var->IsContextSlot());
+ MemOperand operand = codegen()->VarOperand(var, result_register());
+ // Memory operands can be pushed directly.
+ __ push(operand);
+}
+
+
+void FullCodeGenerator::TestContext::Plug(Variable* var) const {
+ // For simplicity we always test the accumulator register.
+ codegen()->GetVar(result_register(), var);
+ codegen()->PrepareForBailoutBeforeSplit(condition(), false, NULL, NULL);
+ codegen()->DoTest(this);
+}
+
+
+void FullCodeGenerator::EffectContext::Plug(Heap::RootListIndex index) const {
+ UNREACHABLE(); // Not used on X87.
+}
+
+
+void FullCodeGenerator::AccumulatorValueContext::Plug(
+ Heap::RootListIndex index) const {
+ UNREACHABLE(); // Not used on X87.
+}
+
+
+void FullCodeGenerator::StackValueContext::Plug(
+ Heap::RootListIndex index) const {
+ UNREACHABLE(); // Not used on X87.
+}
+
+
+void FullCodeGenerator::TestContext::Plug(Heap::RootListIndex index) const {
+ UNREACHABLE(); // Not used on X87.
+}
+
+
+void FullCodeGenerator::EffectContext::Plug(Handle<Object> lit) const {
+}
+
+
+void FullCodeGenerator::AccumulatorValueContext::Plug(
+ Handle<Object> lit) const {
+ if (lit->IsSmi()) {
+ __ SafeMove(result_register(), Immediate(lit));
+ } else {
+ __ Move(result_register(), Immediate(lit));
+ }
+}
+
+
+void FullCodeGenerator::StackValueContext::Plug(Handle<Object> lit) const {
+ if (lit->IsSmi()) {
+ __ SafePush(Immediate(lit));
+ } else {
+ __ push(Immediate(lit));
+ }
+}
+
+
+void FullCodeGenerator::TestContext::Plug(Handle<Object> lit) const {
+ codegen()->PrepareForBailoutBeforeSplit(condition(),
+ true,
+ true_label_,
+ false_label_);
+ ASSERT(!lit->IsUndetectableObject()); // There are no undetectable literals.
+ if (lit->IsUndefined() || lit->IsNull() || lit->IsFalse()) {
+ if (false_label_ != fall_through_) __ jmp(false_label_);
+ } else if (lit->IsTrue() || lit->IsJSObject()) {
+ if (true_label_ != fall_through_) __ jmp(true_label_);
+ } else if (lit->IsString()) {
+ if (String::cast(*lit)->length() == 0) {
+ if (false_label_ != fall_through_) __ jmp(false_label_);
+ } else {
+ if (true_label_ != fall_through_) __ jmp(true_label_);
+ }
+ } else if (lit->IsSmi()) {
+ if (Smi::cast(*lit)->value() == 0) {
+ if (false_label_ != fall_through_) __ jmp(false_label_);
+ } else {
+ if (true_label_ != fall_through_) __ jmp(true_label_);
+ }
+ } else {
+ // For simplicity we always test the accumulator register.
+ __ mov(result_register(), lit);
+ codegen()->DoTest(this);
+ }
+}
+
+
+void FullCodeGenerator::EffectContext::DropAndPlug(int count,
+ Register reg) const {
+ ASSERT(count > 0);
+ __ Drop(count);
+}
+
+
+void FullCodeGenerator::AccumulatorValueContext::DropAndPlug(
+ int count,
+ Register reg) const {
+ ASSERT(count > 0);
+ __ Drop(count);
+ __ Move(result_register(), reg);
+}
+
+
+void FullCodeGenerator::StackValueContext::DropAndPlug(int count,
+ Register reg) const {
+ ASSERT(count > 0);
+ if (count > 1) __ Drop(count - 1);
+ __ mov(Operand(esp, 0), reg);
+}
+
+
+void FullCodeGenerator::TestContext::DropAndPlug(int count,
+ Register reg) const {
+ ASSERT(count > 0);
+ // For simplicity we always test the accumulator register.
+ __ Drop(count);
+ __ Move(result_register(), reg);
+ codegen()->PrepareForBailoutBeforeSplit(condition(), false, NULL, NULL);
+ codegen()->DoTest(this);
+}
+
+
+void FullCodeGenerator::EffectContext::Plug(Label* materialize_true,
+ Label* materialize_false) const {
+ ASSERT(materialize_true == materialize_false);
+ __ bind(materialize_true);
+}
+
+
+void FullCodeGenerator::AccumulatorValueContext::Plug(
+ Label* materialize_true,
+ Label* materialize_false) const {
+ Label done;
+ __ bind(materialize_true);
+ __ mov(result_register(), isolate()->factory()->true_value());
+ __ jmp(&done, Label::kNear);
+ __ bind(materialize_false);
+ __ mov(result_register(), isolate()->factory()->false_value());
+ __ bind(&done);
+}
+
+
+void FullCodeGenerator::StackValueContext::Plug(
+ Label* materialize_true,
+ Label* materialize_false) const {
+ Label done;
+ __ bind(materialize_true);
+ __ push(Immediate(isolate()->factory()->true_value()));
+ __ jmp(&done, Label::kNear);
+ __ bind(materialize_false);
+ __ push(Immediate(isolate()->factory()->false_value()));
+ __ bind(&done);
+}
+
+
+void FullCodeGenerator::TestContext::Plug(Label* materialize_true,
+ Label* materialize_false) const {
+ ASSERT(materialize_true == true_label_);
+ ASSERT(materialize_false == false_label_);
+}
+
+
+void FullCodeGenerator::EffectContext::Plug(bool flag) const {
+}
+
+
+void FullCodeGenerator::AccumulatorValueContext::Plug(bool flag) const {
+ Handle<Object> value = flag
+ ? isolate()->factory()->true_value()
+ : isolate()->factory()->false_value();
+ __ mov(result_register(), value);
+}
+
+
+void FullCodeGenerator::StackValueContext::Plug(bool flag) const {
+ Handle<Object> value = flag
+ ? isolate()->factory()->true_value()
+ : isolate()->factory()->false_value();
+ __ push(Immediate(value));
+}
+
+
+void FullCodeGenerator::TestContext::Plug(bool flag) const {
+ codegen()->PrepareForBailoutBeforeSplit(condition(),
+ true,
+ true_label_,
+ false_label_);
+ if (flag) {
+ if (true_label_ != fall_through_) __ jmp(true_label_);
+ } else {
+ if (false_label_ != fall_through_) __ jmp(false_label_);
+ }
+}
+
+
+void FullCodeGenerator::DoTest(Expression* condition,
+ Label* if_true,
+ Label* if_false,
+ Label* fall_through) {
+ Handle<Code> ic = ToBooleanStub::GetUninitialized(isolate());
+ CallIC(ic, condition->test_id());
+ __ test(result_register(), result_register());
+ // The stub returns nonzero for true.
+ Split(not_zero, if_true, if_false, fall_through);
+}
+
+
+void FullCodeGenerator::Split(Condition cc,
+ Label* if_true,
+ Label* if_false,
+ Label* fall_through) {
+ if (if_false == fall_through) {
+ __ j(cc, if_true);
+ } else if (if_true == fall_through) {
+ __ j(NegateCondition(cc), if_false);
+ } else {
+ __ j(cc, if_true);
+ __ jmp(if_false);
+ }
+}
+
+
+MemOperand FullCodeGenerator::StackOperand(Variable* var) {
+ ASSERT(var->IsStackAllocated());
+ // Offset is negative because higher indexes are at lower addresses.
+ int offset = -var->index() * kPointerSize;
+ // Adjust by a (parameter or local) base offset.
+ if (var->IsParameter()) {
+ offset += (info_->scope()->num_parameters() + 1) * kPointerSize;
+ } else {
+ offset += JavaScriptFrameConstants::kLocal0Offset;
+ }
+ return Operand(ebp, offset);
+}
+
+
+MemOperand FullCodeGenerator::VarOperand(Variable* var, Register scratch) {
+ ASSERT(var->IsContextSlot() || var->IsStackAllocated());
+ if (var->IsContextSlot()) {
+ int context_chain_length = scope()->ContextChainLength(var->scope());
+ __ LoadContext(scratch, context_chain_length);
+ return ContextOperand(scratch, var->index());
+ } else {
+ return StackOperand(var);
+ }
+}
+
+
+void FullCodeGenerator::GetVar(Register dest, Variable* var) {
+ ASSERT(var->IsContextSlot() || var->IsStackAllocated());
+ MemOperand location = VarOperand(var, dest);
+ __ mov(dest, location);
+}
+
+
+void FullCodeGenerator::SetVar(Variable* var,
+ Register src,
+ Register scratch0,
+ Register scratch1) {
+ ASSERT(var->IsContextSlot() || var->IsStackAllocated());
+ ASSERT(!scratch0.is(src));
+ ASSERT(!scratch0.is(scratch1));
+ ASSERT(!scratch1.is(src));
+ MemOperand location = VarOperand(var, scratch0);
+ __ mov(location, src);
+
+ // Emit the write barrier code if the location is in the heap.
+ if (var->IsContextSlot()) {
+ int offset = Context::SlotOffset(var->index());
+ ASSERT(!scratch0.is(esi) && !src.is(esi) && !scratch1.is(esi));
+ __ RecordWriteContextSlot(scratch0, offset, src, scratch1);
+ }
+}
+
+
+void FullCodeGenerator::PrepareForBailoutBeforeSplit(Expression* expr,
+ bool should_normalize,
+ Label* if_true,
+ Label* if_false) {
+ // Only prepare for bailouts before splits if we're in a test
+ // context. Otherwise, we let the Visit function deal with the
+ // preparation to avoid preparing with the same AST id twice.
+ if (!context()->IsTest() || !info_->IsOptimizable()) return;
+
+ Label skip;
+ if (should_normalize) __ jmp(&skip, Label::kNear);
+ PrepareForBailout(expr, TOS_REG);
+ if (should_normalize) {
+ __ cmp(eax, isolate()->factory()->true_value());
+ Split(equal, if_true, if_false, NULL);
+ __ bind(&skip);
+ }
+}
+
+
+void FullCodeGenerator::EmitDebugCheckDeclarationContext(Variable* variable) {
+ // The variable in the declaration always resides in the current context.
+ ASSERT_EQ(0, scope()->ContextChainLength(variable->scope()));
+ if (generate_debug_code_) {
+ // Check that we're not inside a with or catch context.
+ __ mov(ebx, FieldOperand(esi, HeapObject::kMapOffset));
+ __ cmp(ebx, isolate()->factory()->with_context_map());
+ __ Check(not_equal, kDeclarationInWithContext);
+ __ cmp(ebx, isolate()->factory()->catch_context_map());
+ __ Check(not_equal, kDeclarationInCatchContext);
+ }
+}
+
+
+void FullCodeGenerator::VisitVariableDeclaration(
+ VariableDeclaration* declaration) {
+ // If it was not possible to allocate the variable at compile time, we
+ // need to "declare" it at runtime to make sure it actually exists in the
+ // local context.
+ VariableProxy* proxy = declaration->proxy();
+ VariableMode mode = declaration->mode();
+ Variable* variable = proxy->var();
+ bool hole_init = mode == LET || mode == CONST || mode == CONST_LEGACY;
+ switch (variable->location()) {
+ case Variable::UNALLOCATED:
+ globals_->Add(variable->name(), zone());
+ globals_->Add(variable->binding_needs_init()
+ ? isolate()->factory()->the_hole_value()
+ : isolate()->factory()->undefined_value(), zone());
+ break;
+
+ case Variable::PARAMETER:
+ case Variable::LOCAL:
+ if (hole_init) {
+ Comment cmnt(masm_, "[ VariableDeclaration");
+ __ mov(StackOperand(variable),
+ Immediate(isolate()->factory()->the_hole_value()));
+ }
+ break;
+
+ case Variable::CONTEXT:
+ if (hole_init) {
+ Comment cmnt(masm_, "[ VariableDeclaration");
+ EmitDebugCheckDeclarationContext(variable);
+ __ mov(ContextOperand(esi, variable->index()),
+ Immediate(isolate()->factory()->the_hole_value()));
+ // No write barrier since the hole value is in old space.
+ PrepareForBailoutForId(proxy->id(), NO_REGISTERS);
+ }
+ break;
+
+ case Variable::LOOKUP: {
+ Comment cmnt(masm_, "[ VariableDeclaration");
+ __ push(esi);
+ __ push(Immediate(variable->name()));
+ // VariableDeclaration nodes are always introduced in one of four modes.
+ ASSERT(IsDeclaredVariableMode(mode));
+ PropertyAttributes attr =
+ IsImmutableVariableMode(mode) ? READ_ONLY : NONE;
+ __ push(Immediate(Smi::FromInt(attr)));
+ // Push initial value, if any.
+ // Note: For variables we must not push an initial value (such as
+ // 'undefined') because we may have a (legal) redeclaration and we
+ // must not destroy the current value.
+ if (hole_init) {
+ __ push(Immediate(isolate()->factory()->the_hole_value()));
+ } else {
+ __ push(Immediate(Smi::FromInt(0))); // Indicates no initial value.
+ }
+ __ CallRuntime(Runtime::kHiddenDeclareContextSlot, 4);
+ break;
+ }
+ }
+}
+
+
+void FullCodeGenerator::VisitFunctionDeclaration(
+ FunctionDeclaration* declaration) {
+ VariableProxy* proxy = declaration->proxy();
+ Variable* variable = proxy->var();
+ switch (variable->location()) {
+ case Variable::UNALLOCATED: {
+ globals_->Add(variable->name(), zone());
+ Handle<SharedFunctionInfo> function =
+ Compiler::BuildFunctionInfo(declaration->fun(), script());
+ // Check for stack-overflow exception.
+ if (function.is_null()) return SetStackOverflow();
+ globals_->Add(function, zone());
+ break;
+ }
+
+ case Variable::PARAMETER:
+ case Variable::LOCAL: {
+ Comment cmnt(masm_, "[ FunctionDeclaration");
+ VisitForAccumulatorValue(declaration->fun());
+ __ mov(StackOperand(variable), result_register());
+ break;
+ }
+
+ case Variable::CONTEXT: {
+ Comment cmnt(masm_, "[ FunctionDeclaration");
+ EmitDebugCheckDeclarationContext(variable);
+ VisitForAccumulatorValue(declaration->fun());
+ __ mov(ContextOperand(esi, variable->index()), result_register());
+ // We know that we have written a function, which is not a smi.
+ __ RecordWriteContextSlot(esi,
+ Context::SlotOffset(variable->index()),
+ result_register(),
+ ecx,
+ EMIT_REMEMBERED_SET,
+ OMIT_SMI_CHECK);
+ PrepareForBailoutForId(proxy->id(), NO_REGISTERS);
+ break;
+ }
+
+ case Variable::LOOKUP: {
+ Comment cmnt(masm_, "[ FunctionDeclaration");
+ __ push(esi);
+ __ push(Immediate(variable->name()));
+ __ push(Immediate(Smi::FromInt(NONE)));
+ VisitForStackValue(declaration->fun());
+ __ CallRuntime(Runtime::kHiddenDeclareContextSlot, 4);
+ break;
+ }
+ }
+}
+
+
+void FullCodeGenerator::VisitModuleDeclaration(ModuleDeclaration* declaration) {
+ Variable* variable = declaration->proxy()->var();
+ ASSERT(variable->location() == Variable::CONTEXT);
+ ASSERT(variable->interface()->IsFrozen());
+
+ Comment cmnt(masm_, "[ ModuleDeclaration");
+ EmitDebugCheckDeclarationContext(variable);
+
+ // Load instance object.
+ __ LoadContext(eax, scope_->ContextChainLength(scope_->GlobalScope()));
+ __ mov(eax, ContextOperand(eax, variable->interface()->Index()));
+ __ mov(eax, ContextOperand(eax, Context::EXTENSION_INDEX));
+
+ // Assign it.
+ __ mov(ContextOperand(esi, variable->index()), eax);
+ // We know that we have written a module, which is not a smi.
+ __ RecordWriteContextSlot(esi,
+ Context::SlotOffset(variable->index()),
+ eax,
+ ecx,
+ EMIT_REMEMBERED_SET,
+ OMIT_SMI_CHECK);
+ PrepareForBailoutForId(declaration->proxy()->id(), NO_REGISTERS);
+
+ // Traverse into body.
+ Visit(declaration->module());
+}
+
+
+void FullCodeGenerator::VisitImportDeclaration(ImportDeclaration* declaration) {
+ VariableProxy* proxy = declaration->proxy();
+ Variable* variable = proxy->var();
+ switch (variable->location()) {
+ case Variable::UNALLOCATED:
+ // TODO(rossberg)
+ break;
+
+ case Variable::CONTEXT: {
+ Comment cmnt(masm_, "[ ImportDeclaration");
+ EmitDebugCheckDeclarationContext(variable);
+ // TODO(rossberg)
+ break;
+ }
+
+ case Variable::PARAMETER:
+ case Variable::LOCAL:
+ case Variable::LOOKUP:
+ UNREACHABLE();
+ }
+}
+
+
+void FullCodeGenerator::VisitExportDeclaration(ExportDeclaration* declaration) {
+ // TODO(rossberg)
+}
+
+
+void FullCodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) {
+ // Call the runtime to declare the globals.
+ __ push(esi); // The context is the first argument.
+ __ Push(pairs);
+ __ Push(Smi::FromInt(DeclareGlobalsFlags()));
+ __ CallRuntime(Runtime::kHiddenDeclareGlobals, 3);
+ // Return value is ignored.
+}
+
+
+void FullCodeGenerator::DeclareModules(Handle<FixedArray> descriptions) {
+ // Call the runtime to declare the modules.
+ __ Push(descriptions);
+ __ CallRuntime(Runtime::kHiddenDeclareModules, 1);
+ // Return value is ignored.
+}
+
+
+void FullCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) {
+ Comment cmnt(masm_, "[ SwitchStatement");
+ Breakable nested_statement(this, stmt);
+ SetStatementPosition(stmt);
+
+ // Keep the switch value on the stack until a case matches.
+ VisitForStackValue(stmt->tag());
+ PrepareForBailoutForId(stmt->EntryId(), NO_REGISTERS);
+
+ ZoneList<CaseClause*>* clauses = stmt->cases();
+ CaseClause* default_clause = NULL; // Can occur anywhere in the list.
+
+ Label next_test; // Recycled for each test.
+ // Compile all the tests with branches to their bodies.
+ for (int i = 0; i < clauses->length(); i++) {
+ CaseClause* clause = clauses->at(i);
+ clause->body_target()->Unuse();
+
+ // The default is not a test, but remember it as final fall through.
+ if (clause->is_default()) {
+ default_clause = clause;
+ continue;
+ }
+
+ Comment cmnt(masm_, "[ Case comparison");
+ __ bind(&next_test);
+ next_test.Unuse();
+
+ // Compile the label expression.
+ VisitForAccumulatorValue(clause->label());
+
+ // Perform the comparison as if via '==='.
+ __ mov(edx, Operand(esp, 0)); // Switch value.
+ bool inline_smi_code = ShouldInlineSmiCase(Token::EQ_STRICT);
+ JumpPatchSite patch_site(masm_);
+ if (inline_smi_code) {
+ Label slow_case;
+ __ mov(ecx, edx);
+ __ or_(ecx, eax);
+ patch_site.EmitJumpIfNotSmi(ecx, &slow_case, Label::kNear);
+
+ __ cmp(edx, eax);
+ __ j(not_equal, &next_test);
+ __ Drop(1); // Switch value is no longer needed.
+ __ jmp(clause->body_target());
+ __ bind(&slow_case);
+ }
+
+ // Record position before stub call for type feedback.
+ SetSourcePosition(clause->position());
+ Handle<Code> ic = CompareIC::GetUninitialized(isolate(), Token::EQ_STRICT);
+ CallIC(ic, clause->CompareId());
+ patch_site.EmitPatchInfo();
+
+ Label skip;
+ __ jmp(&skip, Label::kNear);
+ PrepareForBailout(clause, TOS_REG);
+ __ cmp(eax, isolate()->factory()->true_value());
+ __ j(not_equal, &next_test);
+ __ Drop(1);
+ __ jmp(clause->body_target());
+ __ bind(&skip);
+
+ __ test(eax, eax);
+ __ j(not_equal, &next_test);
+ __ Drop(1); // Switch value is no longer needed.
+ __ jmp(clause->body_target());
+ }
+
+ // Discard the test value and jump to the default if present, otherwise to
+ // the end of the statement.
+ __ bind(&next_test);
+ __ Drop(1); // Switch value is no longer needed.
+ if (default_clause == NULL) {
+ __ jmp(nested_statement.break_label());
+ } else {
+ __ jmp(default_clause->body_target());
+ }
+
+ // Compile all the case bodies.
+ for (int i = 0; i < clauses->length(); i++) {
+ Comment cmnt(masm_, "[ Case body");
+ CaseClause* clause = clauses->at(i);
+ __ bind(clause->body_target());
+ PrepareForBailoutForId(clause->EntryId(), NO_REGISTERS);
+ VisitStatements(clause->statements());
+ }
+
+ __ bind(nested_statement.break_label());
+ PrepareForBailoutForId(stmt->ExitId(), NO_REGISTERS);
+}
+
+
+void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
+ Comment cmnt(masm_, "[ ForInStatement");
+ int slot = stmt->ForInFeedbackSlot();
+
+ SetStatementPosition(stmt);
+
+ Label loop, exit;
+ ForIn loop_statement(this, stmt);
+ increment_loop_depth();
+
+ // Get the object to enumerate over. If the object is null or undefined, skip
+ // over the loop. See ECMA-262 version 5, section 12.6.4.
+ VisitForAccumulatorValue(stmt->enumerable());
+ __ cmp(eax, isolate()->factory()->undefined_value());
+ __ j(equal, &exit);
+ __ cmp(eax, isolate()->factory()->null_value());
+ __ j(equal, &exit);
+
+ PrepareForBailoutForId(stmt->PrepareId(), TOS_REG);
+
+ // Convert the object to a JS object.
+ Label convert, done_convert;
+ __ JumpIfSmi(eax, &convert, Label::kNear);
+ __ CmpObjectType(eax, FIRST_SPEC_OBJECT_TYPE, ecx);
+ __ j(above_equal, &done_convert, Label::kNear);
+ __ bind(&convert);
+ __ push(eax);
+ __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
+ __ bind(&done_convert);
+ __ push(eax);
+
+ // Check for proxies.
+ Label call_runtime, use_cache, fixed_array;
+ STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE);
+ __ CmpObjectType(eax, LAST_JS_PROXY_TYPE, ecx);
+ __ j(below_equal, &call_runtime);
+
+ // Check cache validity in generated code. This is a fast case for
+ // the JSObject::IsSimpleEnum cache validity checks. If we cannot
+ // guarantee cache validity, call the runtime system to check cache
+ // validity or get the property names in a fixed array.
+ __ CheckEnumCache(&call_runtime);
+
+ __ mov(eax, FieldOperand(eax, HeapObject::kMapOffset));
+ __ jmp(&use_cache, Label::kNear);
+
+ // Get the set of properties to enumerate.
+ __ bind(&call_runtime);
+ __ push(eax);
+ __ CallRuntime(Runtime::kGetPropertyNamesFast, 1);
+ __ cmp(FieldOperand(eax, HeapObject::kMapOffset),
+ isolate()->factory()->meta_map());
+ __ j(not_equal, &fixed_array);
+
+
+ // We got a map in register eax. Get the enumeration cache from it.
+ Label no_descriptors;
+ __ bind(&use_cache);
+
+ __ EnumLength(edx, eax);
+ __ cmp(edx, Immediate(Smi::FromInt(0)));
+ __ j(equal, &no_descriptors);
+
+ __ LoadInstanceDescriptors(eax, ecx);
+ __ mov(ecx, FieldOperand(ecx, DescriptorArray::kEnumCacheOffset));
+ __ mov(ecx, FieldOperand(ecx, DescriptorArray::kEnumCacheBridgeCacheOffset));
+
+ // Set up the four remaining stack slots.
+ __ push(eax); // Map.
+ __ push(ecx); // Enumeration cache.
+ __ push(edx); // Number of valid entries for the map in the enum cache.
+ __ push(Immediate(Smi::FromInt(0))); // Initial index.
+ __ jmp(&loop);
+
+ __ bind(&no_descriptors);
+ __ add(esp, Immediate(kPointerSize));
+ __ jmp(&exit);
+
+ // We got a fixed array in register eax. Iterate through that.
+ Label non_proxy;
+ __ bind(&fixed_array);
+
+ // No need for a write barrier, we are storing a Smi in the feedback vector.
+ __ LoadHeapObject(ebx, FeedbackVector());
+ __ mov(FieldOperand(ebx, FixedArray::OffsetOfElementAt(slot)),
+ Immediate(TypeFeedbackInfo::MegamorphicSentinel(isolate())));
+
+ __ mov(ebx, Immediate(Smi::FromInt(1))); // Smi indicates slow check
+ __ mov(ecx, Operand(esp, 0 * kPointerSize)); // Get enumerated object
+ STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE);
+ __ CmpObjectType(ecx, LAST_JS_PROXY_TYPE, ecx);
+ __ j(above, &non_proxy);
+ __ Move(ebx, Immediate(Smi::FromInt(0))); // Zero indicates proxy
+ __ bind(&non_proxy);
+ __ push(ebx); // Smi
+ __ push(eax); // Array
+ __ mov(eax, FieldOperand(eax, FixedArray::kLengthOffset));
+ __ push(eax); // Fixed array length (as smi).
+ __ push(Immediate(Smi::FromInt(0))); // Initial index.
+
+ // Generate code for doing the condition check.
+ PrepareForBailoutForId(stmt->BodyId(), NO_REGISTERS);
+ __ bind(&loop);
+ __ mov(eax, Operand(esp, 0 * kPointerSize)); // Get the current index.
+ __ cmp(eax, Operand(esp, 1 * kPointerSize)); // Compare to the array length.
+ __ j(above_equal, loop_statement.break_label());
+
+ // Get the current entry of the array into register ebx.
+ __ mov(ebx, Operand(esp, 2 * kPointerSize));
+ __ mov(ebx, FieldOperand(ebx, eax, times_2, FixedArray::kHeaderSize));
+
+ // Get the expected map from the stack or a smi in the
+ // permanent slow case into register edx.
+ __ mov(edx, Operand(esp, 3 * kPointerSize));
+
+ // Check if the expected map still matches that of the enumerable.
+ // If not, we may have to filter the key.
+ Label update_each;
+ __ mov(ecx, Operand(esp, 4 * kPointerSize));
+ __ cmp(edx, FieldOperand(ecx, HeapObject::kMapOffset));
+ __ j(equal, &update_each, Label::kNear);
+
+ // For proxies, no filtering is done.
+ // TODO(rossberg): What if only a prototype is a proxy? Not specified yet.
+ ASSERT(Smi::FromInt(0) == 0);
+ __ test(edx, edx);
+ __ j(zero, &update_each);
+
+ // Convert the entry to a string or null if it isn't a property
+ // anymore. If the property has been removed while iterating, we
+ // just skip it.
+ __ push(ecx); // Enumerable.
+ __ push(ebx); // Current entry.
+ __ InvokeBuiltin(Builtins::FILTER_KEY, CALL_FUNCTION);
+ __ test(eax, eax);
+ __ j(equal, loop_statement.continue_label());
+ __ mov(ebx, eax);
+
+ // Update the 'each' property or variable from the possibly filtered
+ // entry in register ebx.
+ __ bind(&update_each);
+ __ mov(result_register(), ebx);
+ // Perform the assignment as if via '='.
+ { EffectContext context(this);
+ EmitAssignment(stmt->each());
+ }
+
+ // Generate code for the body of the loop.
+ Visit(stmt->body());
+
+ // Generate code for going to the next element by incrementing the
+ // index (smi) stored on top of the stack.
+ __ bind(loop_statement.continue_label());
+ __ add(Operand(esp, 0 * kPointerSize), Immediate(Smi::FromInt(1)));
+
+ EmitBackEdgeBookkeeping(stmt, &loop);
+ __ jmp(&loop);
+
+ // Remove the pointers stored on the stack.
+ __ bind(loop_statement.break_label());
+ __ add(esp, Immediate(5 * kPointerSize));
+
+ // Exit and decrement the loop depth.
+ PrepareForBailoutForId(stmt->ExitId(), NO_REGISTERS);
+ __ bind(&exit);
+ decrement_loop_depth();
+}
+
+
+void FullCodeGenerator::VisitForOfStatement(ForOfStatement* stmt) {
+ Comment cmnt(masm_, "[ ForOfStatement");
+ SetStatementPosition(stmt);
+
+ Iteration loop_statement(this, stmt);
+ increment_loop_depth();
+
+ // var iterable = subject
+ VisitForAccumulatorValue(stmt->assign_iterable());
+
+ // As with for-in, skip the loop if the iterator is null or undefined.
+ __ CompareRoot(eax, Heap::kUndefinedValueRootIndex);
+ __ j(equal, loop_statement.break_label());
+ __ CompareRoot(eax, Heap::kNullValueRootIndex);
+ __ j(equal, loop_statement.break_label());
+
+ // var iterator = iterable[Symbol.iterator]();
+ VisitForEffect(stmt->assign_iterator());
+
+ // Loop entry.
+ __ bind(loop_statement.continue_label());
+
+ // result = iterator.next()
+ VisitForEffect(stmt->next_result());
+
+ // if (result.done) break;
+ Label result_not_done;
+ VisitForControl(stmt->result_done(),
+ loop_statement.break_label(),
+ &result_not_done,
+ &result_not_done);
+ __ bind(&result_not_done);
+
+ // each = result.value
+ VisitForEffect(stmt->assign_each());
+
+ // Generate code for the body of the loop.
+ Visit(stmt->body());
+
+ // Check stack before looping.
+ PrepareForBailoutForId(stmt->BackEdgeId(), NO_REGISTERS);
+ EmitBackEdgeBookkeeping(stmt, loop_statement.continue_label());
+ __ jmp(loop_statement.continue_label());
+
+ // Exit and decrement the loop depth.
+ PrepareForBailoutForId(stmt->ExitId(), NO_REGISTERS);
+ __ bind(loop_statement.break_label());
+ decrement_loop_depth();
+}
+
+
+void FullCodeGenerator::EmitNewClosure(Handle<SharedFunctionInfo> info,
+ bool pretenure) {
+ // Use the fast case closure allocation code that allocates in new
+ // space for nested functions that don't need literals cloning. If
+ // we're running with the --always-opt or the --prepare-always-opt
+ // flag, we need to use the runtime function so that the new function
+ // we are creating here gets a chance to have its code optimized and
+ // doesn't just get a copy of the existing unoptimized code.
+ if (!FLAG_always_opt &&
+ !FLAG_prepare_always_opt &&
+ !pretenure &&
+ scope()->is_function_scope() &&
+ info->num_literals() == 0) {
+ FastNewClosureStub stub(isolate(),
+ info->strict_mode(),
+ info->is_generator());
+ __ mov(ebx, Immediate(info));
+ __ CallStub(&stub);
+ } else {
+ __ push(esi);
+ __ push(Immediate(info));
+ __ push(Immediate(pretenure
+ ? isolate()->factory()->true_value()
+ : isolate()->factory()->false_value()));
+ __ CallRuntime(Runtime::kHiddenNewClosure, 3);
+ }
+ context()->Plug(eax);
+}
+
+
+void FullCodeGenerator::VisitVariableProxy(VariableProxy* expr) {
+ Comment cmnt(masm_, "[ VariableProxy");
+ EmitVariableLoad(expr);
+}
+
+
+void FullCodeGenerator::EmitLoadGlobalCheckExtensions(Variable* var,
+ TypeofState typeof_state,
+ Label* slow) {
+ Register context = esi;
+ Register temp = edx;
+
+ Scope* s = scope();
+ while (s != NULL) {
+ if (s->num_heap_slots() > 0) {
+ if (s->calls_sloppy_eval()) {
+ // Check that extension is NULL.
+ __ cmp(ContextOperand(context, Context::EXTENSION_INDEX),
+ Immediate(0));
+ __ j(not_equal, slow);
+ }
+ // Load next context in chain.
+ __ mov(temp, ContextOperand(context, Context::PREVIOUS_INDEX));
+ // Walk the rest of the chain without clobbering esi.
+ context = temp;
+ }
+ // If no outer scope calls eval, we do not need to check more
+ // context extensions. If we have reached an eval scope, we check
+ // all extensions from this point.
+ if (!s->outer_scope_calls_sloppy_eval() || s->is_eval_scope()) break;
+ s = s->outer_scope();
+ }
+
+ if (s != NULL && s->is_eval_scope()) {
+ // Loop up the context chain. There is no frame effect so it is
+ // safe to use raw labels here.
+ Label next, fast;
+ if (!context.is(temp)) {
+ __ mov(temp, context);
+ }
+ __ bind(&next);
+ // Terminate at native context.
+ __ cmp(FieldOperand(temp, HeapObject::kMapOffset),
+ Immediate(isolate()->factory()->native_context_map()));
+ __ j(equal, &fast, Label::kNear);
+ // Check that extension is NULL.
+ __ cmp(ContextOperand(temp, Context::EXTENSION_INDEX), Immediate(0));
+ __ j(not_equal, slow);
+ // Load next context in chain.
+ __ mov(temp, ContextOperand(temp, Context::PREVIOUS_INDEX));
+ __ jmp(&next);
+ __ bind(&fast);
+ }
+
+ // All extension objects were empty and it is safe to use a global
+ // load IC call.
+ __ mov(edx, GlobalObjectOperand());
+ __ mov(ecx, var->name());
+ ContextualMode mode = (typeof_state == INSIDE_TYPEOF)
+ ? NOT_CONTEXTUAL
+ : CONTEXTUAL;
+
+ CallLoadIC(mode);
+}
+
+
+MemOperand FullCodeGenerator::ContextSlotOperandCheckExtensions(Variable* var,
+ Label* slow) {
+ ASSERT(var->IsContextSlot());
+ Register context = esi;
+ Register temp = ebx;
+
+ for (Scope* s = scope(); s != var->scope(); s = s->outer_scope()) {
+ if (s->num_heap_slots() > 0) {
+ if (s->calls_sloppy_eval()) {
+ // Check that extension is NULL.
+ __ cmp(ContextOperand(context, Context::EXTENSION_INDEX),
+ Immediate(0));
+ __ j(not_equal, slow);
+ }
+ __ mov(temp, ContextOperand(context, Context::PREVIOUS_INDEX));
+ // Walk the rest of the chain without clobbering esi.
+ context = temp;
+ }
+ }
+ // Check that last extension is NULL.
+ __ cmp(ContextOperand(context, Context::EXTENSION_INDEX), Immediate(0));
+ __ j(not_equal, slow);
+
+ // This function is used only for loads, not stores, so it's safe to
+ // return an esi-based operand (the write barrier cannot be allowed to
+ // destroy the esi register).
+ return ContextOperand(context, var->index());
+}
+
+
+void FullCodeGenerator::EmitDynamicLookupFastCase(Variable* var,
+ TypeofState typeof_state,
+ Label* slow,
+ Label* done) {
+ // Generate fast-case code for variables that might be shadowed by
+ // eval-introduced variables. Eval is used a lot without
+ // introducing variables. In those cases, we do not want to
+ // perform a runtime call for all variables in the scope
+ // containing the eval.
+ if (var->mode() == DYNAMIC_GLOBAL) {
+ EmitLoadGlobalCheckExtensions(var, typeof_state, slow);
+ __ jmp(done);
+ } else if (var->mode() == DYNAMIC_LOCAL) {
+ Variable* local = var->local_if_not_shadowed();
+ __ mov(eax, ContextSlotOperandCheckExtensions(local, slow));
+ if (local->mode() == LET || local->mode() == CONST ||
+ local->mode() == CONST_LEGACY) {
+ __ cmp(eax, isolate()->factory()->the_hole_value());
+ __ j(not_equal, done);
+ if (local->mode() == CONST_LEGACY) {
+ __ mov(eax, isolate()->factory()->undefined_value());
+ } else { // LET || CONST
+ __ push(Immediate(var->name()));
+ __ CallRuntime(Runtime::kHiddenThrowReferenceError, 1);
+ }
+ }
+ __ jmp(done);
+ }
+}
+
+
+void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy) {
+ // Record position before possible IC call.
+ SetSourcePosition(proxy->position());
+ Variable* var = proxy->var();
+
+ // Three cases: global variables, lookup variables, and all other types of
+ // variables.
+ switch (var->location()) {
+ case Variable::UNALLOCATED: {
+ Comment cmnt(masm_, "[ Global variable");
+ // Use inline caching. Variable name is passed in ecx and the global
+ // object in eax.
+ __ mov(edx, GlobalObjectOperand());
+ __ mov(ecx, var->name());
+ CallLoadIC(CONTEXTUAL);
+ context()->Plug(eax);
+ break;
+ }
+
+ case Variable::PARAMETER:
+ case Variable::LOCAL:
+ case Variable::CONTEXT: {
+ Comment cmnt(masm_, var->IsContextSlot() ? "[ Context variable"
+ : "[ Stack variable");
+ if (var->binding_needs_init()) {
+ // var->scope() may be NULL when the proxy is located in eval code and
+ // refers to a potential outside binding. Currently those bindings are
+ // always looked up dynamically, i.e. in that case
+ // var->location() == LOOKUP.
+ // always holds.
+ ASSERT(var->scope() != NULL);
+
+ // Check if the binding really needs an initialization check. The check
+ // can be skipped in the following situation: we have a LET or CONST
+ // binding in harmony mode, both the Variable and the VariableProxy have
+ // the same declaration scope (i.e. they are both in global code, in the
+ // same function or in the same eval code) and the VariableProxy is in
+ // the source physically located after the initializer of the variable.
+ //
+ // We cannot skip any initialization checks for CONST in non-harmony
+ // mode because const variables may be declared but never initialized:
+ // if (false) { const x; }; var y = x;
+ //
+ // The condition on the declaration scopes is a conservative check for
+ // nested functions that access a binding and are called before the
+ // binding is initialized:
+ // function() { f(); let x = 1; function f() { x = 2; } }
+ //
+ bool skip_init_check;
+ if (var->scope()->DeclarationScope() != scope()->DeclarationScope()) {
+ skip_init_check = false;
+ } else {
+ // Check that we always have valid source position.
+ ASSERT(var->initializer_position() != RelocInfo::kNoPosition);
+ ASSERT(proxy->position() != RelocInfo::kNoPosition);
+ skip_init_check = var->mode() != CONST_LEGACY &&
+ var->initializer_position() < proxy->position();
+ }
+
+ if (!skip_init_check) {
+ // Let and const need a read barrier.
+ Label done;
+ GetVar(eax, var);
+ __ cmp(eax, isolate()->factory()->the_hole_value());
+ __ j(not_equal, &done, Label::kNear);
+ if (var->mode() == LET || var->mode() == CONST) {
+ // Throw a reference error when using an uninitialized let/const
+ // binding in harmony mode.
+ __ push(Immediate(var->name()));
+ __ CallRuntime(Runtime::kHiddenThrowReferenceError, 1);
+ } else {
+ // Uninitalized const bindings outside of harmony mode are unholed.
+ ASSERT(var->mode() == CONST_LEGACY);
+ __ mov(eax, isolate()->factory()->undefined_value());
+ }
+ __ bind(&done);
+ context()->Plug(eax);
+ break;
+ }
+ }
+ context()->Plug(var);
+ break;
+ }
+
+ case Variable::LOOKUP: {
+ Comment cmnt(masm_, "[ Lookup variable");
+ Label done, slow;
+ // Generate code for loading from variables potentially shadowed
+ // by eval-introduced variables.
+ EmitDynamicLookupFastCase(var, NOT_INSIDE_TYPEOF, &slow, &done);
+ __ bind(&slow);
+ __ push(esi); // Context.
+ __ push(Immediate(var->name()));
+ __ CallRuntime(Runtime::kHiddenLoadContextSlot, 2);
+ __ bind(&done);
+ context()->Plug(eax);
+ break;
+ }
+ }
+}
+
+
+void FullCodeGenerator::VisitRegExpLiteral(RegExpLiteral* expr) {
+ Comment cmnt(masm_, "[ RegExpLiteral");
+ Label materialized;
+ // Registers will be used as follows:
+ // edi = JS function.
+ // ecx = literals array.
+ // ebx = regexp literal.
+ // eax = regexp literal clone.
+ __ mov(edi, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
+ __ mov(ecx, FieldOperand(edi, JSFunction::kLiteralsOffset));
+ int literal_offset =
+ FixedArray::kHeaderSize + expr->literal_index() * kPointerSize;
+ __ mov(ebx, FieldOperand(ecx, literal_offset));
+ __ cmp(ebx, isolate()->factory()->undefined_value());
+ __ j(not_equal, &materialized, Label::kNear);
+
+ // Create regexp literal using runtime function
+ // Result will be in eax.
+ __ push(ecx);
+ __ push(Immediate(Smi::FromInt(expr->literal_index())));
+ __ push(Immediate(expr->pattern()));
+ __ push(Immediate(expr->flags()));
+ __ CallRuntime(Runtime::kHiddenMaterializeRegExpLiteral, 4);
+ __ mov(ebx, eax);
+
+ __ bind(&materialized);
+ int size = JSRegExp::kSize + JSRegExp::kInObjectFieldCount * kPointerSize;
+ Label allocated, runtime_allocate;
+ __ Allocate(size, eax, ecx, edx, &runtime_allocate, TAG_OBJECT);
+ __ jmp(&allocated);
+
+ __ bind(&runtime_allocate);
+ __ push(ebx);
+ __ push(Immediate(Smi::FromInt(size)));
+ __ CallRuntime(Runtime::kHiddenAllocateInNewSpace, 1);
+ __ pop(ebx);
+
+ __ bind(&allocated);
+ // Copy the content into the newly allocated memory.
+ // (Unroll copy loop once for better throughput).
+ for (int i = 0; i < size - kPointerSize; i += 2 * kPointerSize) {
+ __ mov(edx, FieldOperand(ebx, i));
+ __ mov(ecx, FieldOperand(ebx, i + kPointerSize));
+ __ mov(FieldOperand(eax, i), edx);
+ __ mov(FieldOperand(eax, i + kPointerSize), ecx);
+ }
+ if ((size % (2 * kPointerSize)) != 0) {
+ __ mov(edx, FieldOperand(ebx, size - kPointerSize));
+ __ mov(FieldOperand(eax, size - kPointerSize), edx);
+ }
+ context()->Plug(eax);
+}
+
+
+void FullCodeGenerator::EmitAccessor(Expression* expression) {
+ if (expression == NULL) {
+ __ push(Immediate(isolate()->factory()->null_value()));
+ } else {
+ VisitForStackValue(expression);
+ }
+}
+
+
+void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
+ Comment cmnt(masm_, "[ ObjectLiteral");
+
+ expr->BuildConstantProperties(isolate());
+ Handle<FixedArray> constant_properties = expr->constant_properties();
+ int flags = expr->fast_elements()
+ ? ObjectLiteral::kFastElements
+ : ObjectLiteral::kNoFlags;
+ flags |= expr->has_function()
+ ? ObjectLiteral::kHasFunction
+ : ObjectLiteral::kNoFlags;
+ int properties_count = constant_properties->length() / 2;
+ if (expr->may_store_doubles() || expr->depth() > 1 ||
+ masm()->serializer_enabled() ||
+ flags != ObjectLiteral::kFastElements ||
+ properties_count > FastCloneShallowObjectStub::kMaximumClonedProperties) {
+ __ mov(edi, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
+ __ push(FieldOperand(edi, JSFunction::kLiteralsOffset));
+ __ push(Immediate(Smi::FromInt(expr->literal_index())));
+ __ push(Immediate(constant_properties));
+ __ push(Immediate(Smi::FromInt(flags)));
+ __ CallRuntime(Runtime::kHiddenCreateObjectLiteral, 4);
+ } else {
+ __ mov(edi, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
+ __ mov(eax, FieldOperand(edi, JSFunction::kLiteralsOffset));
+ __ mov(ebx, Immediate(Smi::FromInt(expr->literal_index())));
+ __ mov(ecx, Immediate(constant_properties));
+ __ mov(edx, Immediate(Smi::FromInt(flags)));
+ FastCloneShallowObjectStub stub(isolate(), properties_count);
+ __ CallStub(&stub);
+ }
+
+ // If result_saved is true the result is on top of the stack. If
+ // result_saved is false the result is in eax.
+ bool result_saved = false;
+
+ // Mark all computed expressions that are bound to a key that
+ // is shadowed by a later occurrence of the same key. For the
+ // marked expressions, no store code is emitted.
+ expr->CalculateEmitStore(zone());
+
+ AccessorTable accessor_table(zone());
+ for (int i = 0; i < expr->properties()->length(); i++) {
+ ObjectLiteral::Property* property = expr->properties()->at(i);
+ if (property->IsCompileTimeValue()) continue;
+
+ Literal* key = property->key();
+ Expression* value = property->value();
+ if (!result_saved) {
+ __ push(eax); // Save result on the stack
+ result_saved = true;
+ }
+ switch (property->kind()) {
+ case ObjectLiteral::Property::CONSTANT:
+ UNREACHABLE();
+ case ObjectLiteral::Property::MATERIALIZED_LITERAL:
+ ASSERT(!CompileTimeValue::IsCompileTimeValue(value));
+ // Fall through.
+ case ObjectLiteral::Property::COMPUTED:
+ if (key->value()->IsInternalizedString()) {
+ if (property->emit_store()) {
+ VisitForAccumulatorValue(value);
+ __ mov(ecx, Immediate(key->value()));
+ __ mov(edx, Operand(esp, 0));
+ CallStoreIC(key->LiteralFeedbackId());
+ PrepareForBailoutForId(key->id(), NO_REGISTERS);
+ } else {
+ VisitForEffect(value);
+ }
+ break;
+ }
+ __ push(Operand(esp, 0)); // Duplicate receiver.
+ VisitForStackValue(key);
+ VisitForStackValue(value);
+ if (property->emit_store()) {
+ __ push(Immediate(Smi::FromInt(NONE))); // PropertyAttributes
+ __ CallRuntime(Runtime::kSetProperty, 4);
+ } else {
+ __ Drop(3);
+ }
+ break;
+ case ObjectLiteral::Property::PROTOTYPE:
+ __ push(Operand(esp, 0)); // Duplicate receiver.
+ VisitForStackValue(value);
+ if (property->emit_store()) {
+ __ CallRuntime(Runtime::kSetPrototype, 2);
+ } else {
+ __ Drop(2);
+ }
+ break;
+ case ObjectLiteral::Property::GETTER:
+ accessor_table.lookup(key)->second->getter = value;
+ break;
+ case ObjectLiteral::Property::SETTER:
+ accessor_table.lookup(key)->second->setter = value;
+ break;
+ }
+ }
+
+ // Emit code to define accessors, using only a single call to the runtime for
+ // each pair of corresponding getters and setters.
+ for (AccessorTable::Iterator it = accessor_table.begin();
+ it != accessor_table.end();
+ ++it) {
+ __ push(Operand(esp, 0)); // Duplicate receiver.
+ VisitForStackValue(it->first);
+ EmitAccessor(it->second->getter);
+ EmitAccessor(it->second->setter);
+ __ push(Immediate(Smi::FromInt(NONE)));
+ __ CallRuntime(Runtime::kDefineOrRedefineAccessorProperty, 5);
+ }
+
+ if (expr->has_function()) {
+ ASSERT(result_saved);
+ __ push(Operand(esp, 0));
+ __ CallRuntime(Runtime::kToFastProperties, 1);
+ }
+
+ if (result_saved) {
+ context()->PlugTOS();
+ } else {
+ context()->Plug(eax);
+ }
+}
+
+
+void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
+ Comment cmnt(masm_, "[ ArrayLiteral");
+
+ expr->BuildConstantElements(isolate());
+ int flags = expr->depth() == 1
+ ? ArrayLiteral::kShallowElements
+ : ArrayLiteral::kNoFlags;
+
+ ZoneList<Expression*>* subexprs = expr->values();
+ int length = subexprs->length();
+ Handle<FixedArray> constant_elements = expr->constant_elements();
+ ASSERT_EQ(2, constant_elements->length());
+ ElementsKind constant_elements_kind =
+ static_cast<ElementsKind>(Smi::cast(constant_elements->get(0))->value());
+ bool has_constant_fast_elements =
+ IsFastObjectElementsKind(constant_elements_kind);
+ Handle<FixedArrayBase> constant_elements_values(
+ FixedArrayBase::cast(constant_elements->get(1)));
+
+ AllocationSiteMode allocation_site_mode = TRACK_ALLOCATION_SITE;
+ if (has_constant_fast_elements && !FLAG_allocation_site_pretenuring) {
+ // If the only customer of allocation sites is transitioning, then
+ // we can turn it off if we don't have anywhere else to transition to.
+ allocation_site_mode = DONT_TRACK_ALLOCATION_SITE;
+ }
+
+ if (expr->depth() > 1 || length > JSObject::kInitialMaxFastElementArray) {
+ __ mov(ebx, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
+ __ push(FieldOperand(ebx, JSFunction::kLiteralsOffset));
+ __ push(Immediate(Smi::FromInt(expr->literal_index())));
+ __ push(Immediate(constant_elements));
+ __ push(Immediate(Smi::FromInt(flags)));
+ __ CallRuntime(Runtime::kHiddenCreateArrayLiteral, 4);
+ } else {
+ __ mov(ebx, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
+ __ mov(eax, FieldOperand(ebx, JSFunction::kLiteralsOffset));
+ __ mov(ebx, Immediate(Smi::FromInt(expr->literal_index())));
+ __ mov(ecx, Immediate(constant_elements));
+ FastCloneShallowArrayStub stub(isolate(), allocation_site_mode);
+ __ CallStub(&stub);
+ }
+
+ bool result_saved = false; // Is the result saved to the stack?
+
+ // Emit code to evaluate all the non-constant subexpressions and to store
+ // them into the newly cloned array.
+ for (int i = 0; i < length; i++) {
+ Expression* subexpr = subexprs->at(i);
+ // If the subexpression is a literal or a simple materialized literal it
+ // is already set in the cloned array.
+ if (CompileTimeValue::IsCompileTimeValue(subexpr)) continue;
+
+ if (!result_saved) {
+ __ push(eax); // array literal.
+ __ push(Immediate(Smi::FromInt(expr->literal_index())));
+ result_saved = true;
+ }
+ VisitForAccumulatorValue(subexpr);
+
+ if (IsFastObjectElementsKind(constant_elements_kind)) {
+ // Fast-case array literal with ElementsKind of FAST_*_ELEMENTS, they
+ // cannot transition and don't need to call the runtime stub.
+ int offset = FixedArray::kHeaderSize + (i * kPointerSize);
+ __ mov(ebx, Operand(esp, kPointerSize)); // Copy of array literal.
+ __ mov(ebx, FieldOperand(ebx, JSObject::kElementsOffset));
+ // Store the subexpression value in the array's elements.
+ __ mov(FieldOperand(ebx, offset), result_register());
+ // Update the write barrier for the array store.
+ __ RecordWriteField(ebx, offset, result_register(), ecx,
+ EMIT_REMEMBERED_SET,
+ INLINE_SMI_CHECK);
+ } else {
+ // Store the subexpression value in the array's elements.
+ __ mov(ecx, Immediate(Smi::FromInt(i)));
+ StoreArrayLiteralElementStub stub(isolate());
+ __ CallStub(&stub);
+ }
+
+ PrepareForBailoutForId(expr->GetIdForElement(i), NO_REGISTERS);
+ }
+
+ if (result_saved) {
+ __ add(esp, Immediate(kPointerSize)); // literal index
+ context()->PlugTOS();
+ } else {
+ context()->Plug(eax);
+ }
+}
+
+
+void FullCodeGenerator::VisitAssignment(Assignment* expr) {
+ ASSERT(expr->target()->IsValidReferenceExpression());
+
+ Comment cmnt(masm_, "[ Assignment");
+
+ // Left-hand side can only be a property, a global or a (parameter or local)
+ // slot.
+ enum LhsKind { VARIABLE, NAMED_PROPERTY, KEYED_PROPERTY };
+ LhsKind assign_type = VARIABLE;
+ Property* property = expr->target()->AsProperty();
+ if (property != NULL) {
+ assign_type = (property->key()->IsPropertyName())
+ ? NAMED_PROPERTY
+ : KEYED_PROPERTY;
+ }
+
+ // Evaluate LHS expression.
+ switch (assign_type) {
+ case VARIABLE:
+ // Nothing to do here.
+ break;
+ case NAMED_PROPERTY:
+ if (expr->is_compound()) {
+ // We need the receiver both on the stack and in edx.
+ VisitForStackValue(property->obj());
+ __ mov(edx, Operand(esp, 0));
+ } else {
+ VisitForStackValue(property->obj());
+ }
+ break;
+ case KEYED_PROPERTY: {
+ if (expr->is_compound()) {
+ VisitForStackValue(property->obj());
+ VisitForStackValue(property->key());
+ __ mov(edx, Operand(esp, kPointerSize)); // Object.
+ __ mov(ecx, Operand(esp, 0)); // Key.
+ } else {
+ VisitForStackValue(property->obj());
+ VisitForStackValue(property->key());
+ }
+ break;
+ }
+ }
+
+ // For compound assignments we need another deoptimization point after the
+ // variable/property load.
+ if (expr->is_compound()) {
+ AccumulatorValueContext result_context(this);
+ { AccumulatorValueContext left_operand_context(this);
+ switch (assign_type) {
+ case VARIABLE:
+ EmitVariableLoad(expr->target()->AsVariableProxy());
+ PrepareForBailout(expr->target(), TOS_REG);
+ break;
+ case NAMED_PROPERTY:
+ EmitNamedPropertyLoad(property);
+ PrepareForBailoutForId(property->LoadId(), TOS_REG);
+ break;
+ case KEYED_PROPERTY:
+ EmitKeyedPropertyLoad(property);
+ PrepareForBailoutForId(property->LoadId(), TOS_REG);
+ break;
+ }
+ }
+
+ Token::Value op = expr->binary_op();
+ __ push(eax); // Left operand goes on the stack.
+ VisitForAccumulatorValue(expr->value());
+
+ OverwriteMode mode = expr->value()->ResultOverwriteAllowed()
+ ? OVERWRITE_RIGHT
+ : NO_OVERWRITE;
+ SetSourcePosition(expr->position() + 1);
+ if (ShouldInlineSmiCase(op)) {
+ EmitInlineSmiBinaryOp(expr->binary_operation(),
+ op,
+ mode,
+ expr->target(),
+ expr->value());
+ } else {
+ EmitBinaryOp(expr->binary_operation(), op, mode);
+ }
+
+ // Deoptimization point in case the binary operation may have side effects.
+ PrepareForBailout(expr->binary_operation(), TOS_REG);
+ } else {
+ VisitForAccumulatorValue(expr->value());
+ }
+
+ // Record source position before possible IC call.
+ SetSourcePosition(expr->position());
+
+ // Store the value.
+ switch (assign_type) {
+ case VARIABLE:
+ EmitVariableAssignment(expr->target()->AsVariableProxy()->var(),
+ expr->op());
+ PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
+ context()->Plug(eax);
+ break;
+ case NAMED_PROPERTY:
+ EmitNamedPropertyAssignment(expr);
+ break;
+ case KEYED_PROPERTY:
+ EmitKeyedPropertyAssignment(expr);
+ break;
+ }
+}
+
+
+void FullCodeGenerator::VisitYield(Yield* expr) {
+ Comment cmnt(masm_, "[ Yield");
+ // Evaluate yielded value first; the initial iterator definition depends on
+ // this. It stays on the stack while we update the iterator.
+ VisitForStackValue(expr->expression());
+
+ switch (expr->yield_kind()) {
+ case Yield::SUSPEND:
+ // Pop value from top-of-stack slot; box result into result register.
+ EmitCreateIteratorResult(false);
+ __ push(result_register());
+ // Fall through.
+ case Yield::INITIAL: {
+ Label suspend, continuation, post_runtime, resume;
+
+ __ jmp(&suspend);
+
+ __ bind(&continuation);
+ __ jmp(&resume);
+
+ __ bind(&suspend);
+ VisitForAccumulatorValue(expr->generator_object());
+ ASSERT(continuation.pos() > 0 && Smi::IsValid(continuation.pos()));
+ __ mov(FieldOperand(eax, JSGeneratorObject::kContinuationOffset),
+ Immediate(Smi::FromInt(continuation.pos())));
+ __ mov(FieldOperand(eax, JSGeneratorObject::kContextOffset), esi);
+ __ mov(ecx, esi);
+ __ RecordWriteField(eax, JSGeneratorObject::kContextOffset, ecx, edx);
+ __ lea(ebx, Operand(ebp, StandardFrameConstants::kExpressionsOffset));
+ __ cmp(esp, ebx);
+ __ j(equal, &post_runtime);
+ __ push(eax); // generator object
+ __ CallRuntime(Runtime::kHiddenSuspendJSGeneratorObject, 1);
+ __ mov(context_register(),
+ Operand(ebp, StandardFrameConstants::kContextOffset));
+ __ bind(&post_runtime);
+ __ pop(result_register());
+ EmitReturnSequence();
+
+ __ bind(&resume);
+ context()->Plug(result_register());
+ break;
+ }
+
+ case Yield::FINAL: {
+ VisitForAccumulatorValue(expr->generator_object());
+ __ mov(FieldOperand(result_register(),
+ JSGeneratorObject::kContinuationOffset),
+ Immediate(Smi::FromInt(JSGeneratorObject::kGeneratorClosed)));
+ // Pop value from top-of-stack slot, box result into result register.
+ EmitCreateIteratorResult(true);
+ EmitUnwindBeforeReturn();
+ EmitReturnSequence();
+ break;
+ }
+
+ case Yield::DELEGATING: {
+ VisitForStackValue(expr->generator_object());
+
+ // Initial stack layout is as follows:
+ // [sp + 1 * kPointerSize] iter
+ // [sp + 0 * kPointerSize] g
+
+ Label l_catch, l_try, l_suspend, l_continuation, l_resume;
+ Label l_next, l_call, l_loop;
+ // Initial send value is undefined.
+ __ mov(eax, isolate()->factory()->undefined_value());
+ __ jmp(&l_next);
+
+ // catch (e) { receiver = iter; f = 'throw'; arg = e; goto l_call; }
+ __ bind(&l_catch);
+ handler_table()->set(expr->index(), Smi::FromInt(l_catch.pos()));
+ __ mov(ecx, isolate()->factory()->throw_string()); // "throw"
+ __ push(ecx); // "throw"
+ __ push(Operand(esp, 2 * kPointerSize)); // iter
+ __ push(eax); // exception
+ __ jmp(&l_call);
+
+ // try { received = %yield result }
+ // Shuffle the received result above a try handler and yield it without
+ // re-boxing.
+ __ bind(&l_try);
+ __ pop(eax); // result
+ __ PushTryHandler(StackHandler::CATCH, expr->index());
+ const int handler_size = StackHandlerConstants::kSize;
+ __ push(eax); // result
+ __ jmp(&l_suspend);
+ __ bind(&l_continuation);
+ __ jmp(&l_resume);
+ __ bind(&l_suspend);
+ const int generator_object_depth = kPointerSize + handler_size;
+ __ mov(eax, Operand(esp, generator_object_depth));
+ __ push(eax); // g
+ ASSERT(l_continuation.pos() > 0 && Smi::IsValid(l_continuation.pos()));
+ __ mov(FieldOperand(eax, JSGeneratorObject::kContinuationOffset),
+ Immediate(Smi::FromInt(l_continuation.pos())));
+ __ mov(FieldOperand(eax, JSGeneratorObject::kContextOffset), esi);
+ __ mov(ecx, esi);
+ __ RecordWriteField(eax, JSGeneratorObject::kContextOffset, ecx, edx);
+ __ CallRuntime(Runtime::kHiddenSuspendJSGeneratorObject, 1);
+ __ mov(context_register(),
+ Operand(ebp, StandardFrameConstants::kContextOffset));
+ __ pop(eax); // result
+ EmitReturnSequence();
+ __ bind(&l_resume); // received in eax
+ __ PopTryHandler();
+
+ // receiver = iter; f = iter.next; arg = received;
+ __ bind(&l_next);
+ __ mov(ecx, isolate()->factory()->next_string()); // "next"
+ __ push(ecx);
+ __ push(Operand(esp, 2 * kPointerSize)); // iter
+ __ push(eax); // received
+
+ // result = receiver[f](arg);
+ __ bind(&l_call);
+ __ mov(edx, Operand(esp, kPointerSize));
+ Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize();
+ CallIC(ic, TypeFeedbackId::None());
+ __ mov(edi, eax);
+ __ mov(Operand(esp, 2 * kPointerSize), edi);
+ CallFunctionStub stub(isolate(), 1, CALL_AS_METHOD);
+ __ CallStub(&stub);
+
+ __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
+ __ Drop(1); // The function is still on the stack; drop it.
+
+ // if (!result.done) goto l_try;
+ __ bind(&l_loop);
+ __ push(eax); // save result
+ __ mov(edx, eax); // result
+ __ mov(ecx, isolate()->factory()->done_string()); // "done"
+ CallLoadIC(NOT_CONTEXTUAL); // result.done in eax
+ Handle<Code> bool_ic = ToBooleanStub::GetUninitialized(isolate());
+ CallIC(bool_ic);
+ __ test(eax, eax);
+ __ j(zero, &l_try);
+
+ // result.value
+ __ pop(edx); // result
+ __ mov(ecx, isolate()->factory()->value_string()); // "value"
+ CallLoadIC(NOT_CONTEXTUAL); // result.value in eax
+ context()->DropAndPlug(2, eax); // drop iter and g
+ break;
+ }
+ }
+}
+
+
+void FullCodeGenerator::EmitGeneratorResume(Expression *generator,
+ Expression *value,
+ JSGeneratorObject::ResumeMode resume_mode) {
+ // The value stays in eax, and is ultimately read by the resumed generator, as
+ // if CallRuntime(Runtime::kHiddenSuspendJSGeneratorObject) returned it. Or it
+ // is read to throw the value when the resumed generator is already closed.
+ // ebx will hold the generator object until the activation has been resumed.
+ VisitForStackValue(generator);
+ VisitForAccumulatorValue(value);
+ __ pop(ebx);
+
+ // Check generator state.
+ Label wrong_state, closed_state, done;
+ STATIC_ASSERT(JSGeneratorObject::kGeneratorExecuting < 0);
+ STATIC_ASSERT(JSGeneratorObject::kGeneratorClosed == 0);
+ __ cmp(FieldOperand(ebx, JSGeneratorObject::kContinuationOffset),
+ Immediate(Smi::FromInt(0)));
+ __ j(equal, &closed_state);
+ __ j(less, &wrong_state);
+
+ // Load suspended function and context.
+ __ mov(esi, FieldOperand(ebx, JSGeneratorObject::kContextOffset));
+ __ mov(edi, FieldOperand(ebx, JSGeneratorObject::kFunctionOffset));
+
+ // Push receiver.
+ __ push(FieldOperand(ebx, JSGeneratorObject::kReceiverOffset));
+
+ // Push holes for arguments to generator function.
+ __ mov(edx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
+ __ mov(edx,
+ FieldOperand(edx, SharedFunctionInfo::kFormalParameterCountOffset));
+ __ mov(ecx, isolate()->factory()->the_hole_value());
+ Label push_argument_holes, push_frame;
+ __ bind(&push_argument_holes);
+ __ sub(edx, Immediate(Smi::FromInt(1)));
+ __ j(carry, &push_frame);
+ __ push(ecx);
+ __ jmp(&push_argument_holes);
+
+ // Enter a new JavaScript frame, and initialize its slots as they were when
+ // the generator was suspended.
+ Label resume_frame;
+ __ bind(&push_frame);
+ __ call(&resume_frame);
+ __ jmp(&done);
+ __ bind(&resume_frame);
+ __ push(ebp); // Caller's frame pointer.
+ __ mov(ebp, esp);
+ __ push(esi); // Callee's context.
+ __ push(edi); // Callee's JS Function.
+
+ // Load the operand stack size.
+ __ mov(edx, FieldOperand(ebx, JSGeneratorObject::kOperandStackOffset));
+ __ mov(edx, FieldOperand(edx, FixedArray::kLengthOffset));
+ __ SmiUntag(edx);
+
+ // If we are sending a value and there is no operand stack, we can jump back
+ // in directly.
+ if (resume_mode == JSGeneratorObject::NEXT) {
+ Label slow_resume;
+ __ cmp(edx, Immediate(0));
+ __ j(not_zero, &slow_resume);
+ __ mov(edx, FieldOperand(edi, JSFunction::kCodeEntryOffset));
+ __ mov(ecx, FieldOperand(ebx, JSGeneratorObject::kContinuationOffset));
+ __ SmiUntag(ecx);
+ __ add(edx, ecx);
+ __ mov(FieldOperand(ebx, JSGeneratorObject::kContinuationOffset),
+ Immediate(Smi::FromInt(JSGeneratorObject::kGeneratorExecuting)));
+ __ jmp(edx);
+ __ bind(&slow_resume);
+ }
+
+ // Otherwise, we push holes for the operand stack and call the runtime to fix
+ // up the stack and the handlers.
+ Label push_operand_holes, call_resume;
+ __ bind(&push_operand_holes);
+ __ sub(edx, Immediate(1));
+ __ j(carry, &call_resume);
+ __ push(ecx);
+ __ jmp(&push_operand_holes);
+ __ bind(&call_resume);
+ __ push(ebx);
+ __ push(result_register());
+ __ Push(Smi::FromInt(resume_mode));
+ __ CallRuntime(Runtime::kHiddenResumeJSGeneratorObject, 3);
+ // Not reached: the runtime call returns elsewhere.
+ __ Abort(kGeneratorFailedToResume);
+
+ // Reach here when generator is closed.
+ __ bind(&closed_state);
+ if (resume_mode == JSGeneratorObject::NEXT) {
+ // Return completed iterator result when generator is closed.
+ __ push(Immediate(isolate()->factory()->undefined_value()));
+ // Pop value from top-of-stack slot; box result into result register.
+ EmitCreateIteratorResult(true);
+ } else {
+ // Throw the provided value.
+ __ push(eax);
+ __ CallRuntime(Runtime::kHiddenThrow, 1);
+ }
+ __ jmp(&done);
+
+ // Throw error if we attempt to operate on a running generator.
+ __ bind(&wrong_state);
+ __ push(ebx);
+ __ CallRuntime(Runtime::kHiddenThrowGeneratorStateError, 1);
+
+ __ bind(&done);
+ context()->Plug(result_register());
+}
+
+
+void FullCodeGenerator::EmitCreateIteratorResult(bool done) {
+ Label gc_required;
+ Label allocated;
+
+ Handle<Map> map(isolate()->native_context()->iterator_result_map());
+
+ __ Allocate(map->instance_size(), eax, ecx, edx, &gc_required, TAG_OBJECT);
+ __ jmp(&allocated);
+
+ __ bind(&gc_required);
+ __ Push(Smi::FromInt(map->instance_size()));
+ __ CallRuntime(Runtime::kHiddenAllocateInNewSpace, 1);
+ __ mov(context_register(),
+ Operand(ebp, StandardFrameConstants::kContextOffset));
+
+ __ bind(&allocated);
+ __ mov(ebx, map);
+ __ pop(ecx);
+ __ mov(edx, isolate()->factory()->ToBoolean(done));
+ ASSERT_EQ(map->instance_size(), 5 * kPointerSize);
+ __ mov(FieldOperand(eax, HeapObject::kMapOffset), ebx);
+ __ mov(FieldOperand(eax, JSObject::kPropertiesOffset),
+ isolate()->factory()->empty_fixed_array());
+ __ mov(FieldOperand(eax, JSObject::kElementsOffset),
+ isolate()->factory()->empty_fixed_array());
+ __ mov(FieldOperand(eax, JSGeneratorObject::kResultValuePropertyOffset), ecx);
+ __ mov(FieldOperand(eax, JSGeneratorObject::kResultDonePropertyOffset), edx);
+
+ // Only the value field needs a write barrier, as the other values are in the
+ // root set.
+ __ RecordWriteField(eax, JSGeneratorObject::kResultValuePropertyOffset,
+ ecx, edx);
+}
+
+
+void FullCodeGenerator::EmitNamedPropertyLoad(Property* prop) {
+ SetSourcePosition(prop->position());
+ Literal* key = prop->key()->AsLiteral();
+ ASSERT(!key->value()->IsSmi());
+ __ mov(ecx, Immediate(key->value()));
+ CallLoadIC(NOT_CONTEXTUAL, prop->PropertyFeedbackId());
+}
+
+
+void FullCodeGenerator::EmitKeyedPropertyLoad(Property* prop) {
+ SetSourcePosition(prop->position());
+ Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize();
+ CallIC(ic, prop->PropertyFeedbackId());
+}
+
+
+void FullCodeGenerator::EmitInlineSmiBinaryOp(BinaryOperation* expr,
+ Token::Value op,
+ OverwriteMode mode,
+ Expression* left,
+ Expression* right) {
+ // Do combined smi check of the operands. Left operand is on the
+ // stack. Right operand is in eax.
+ Label smi_case, done, stub_call;
+ __ pop(edx);
+ __ mov(ecx, eax);
+ __ or_(eax, edx);
+ JumpPatchSite patch_site(masm_);
+ patch_site.EmitJumpIfSmi(eax, &smi_case, Label::kNear);
+
+ __ bind(&stub_call);
+ __ mov(eax, ecx);
+ BinaryOpICStub stub(isolate(), op, mode);
+ CallIC(stub.GetCode(), expr->BinaryOperationFeedbackId());
+ patch_site.EmitPatchInfo();
+ __ jmp(&done, Label::kNear);
+
+ // Smi case.
+ __ bind(&smi_case);
+ __ mov(eax, edx); // Copy left operand in case of a stub call.
+
+ switch (op) {
+ case Token::SAR:
+ __ SmiUntag(ecx);
+ __ sar_cl(eax); // No checks of result necessary
+ __ and_(eax, Immediate(~kSmiTagMask));
+ break;
+ case Token::SHL: {
+ Label result_ok;
+ __ SmiUntag(eax);
+ __ SmiUntag(ecx);
+ __ shl_cl(eax);
+ // Check that the *signed* result fits in a smi.
+ __ cmp(eax, 0xc0000000);
+ __ j(positive, &result_ok);
+ __ SmiTag(ecx);
+ __ jmp(&stub_call);
+ __ bind(&result_ok);
+ __ SmiTag(eax);
+ break;
+ }
+ case Token::SHR: {
+ Label result_ok;
+ __ SmiUntag(eax);
+ __ SmiUntag(ecx);
+ __ shr_cl(eax);
+ __ test(eax, Immediate(0xc0000000));
+ __ j(zero, &result_ok);
+ __ SmiTag(ecx);
+ __ jmp(&stub_call);
+ __ bind(&result_ok);
+ __ SmiTag(eax);
+ break;
+ }
+ case Token::ADD:
+ __ add(eax, ecx);
+ __ j(overflow, &stub_call);
+ break;
+ case Token::SUB:
+ __ sub(eax, ecx);
+ __ j(overflow, &stub_call);
+ break;
+ case Token::MUL: {
+ __ SmiUntag(eax);
+ __ imul(eax, ecx);
+ __ j(overflow, &stub_call);
+ __ test(eax, eax);
+ __ j(not_zero, &done, Label::kNear);
+ __ mov(ebx, edx);
+ __ or_(ebx, ecx);
+ __ j(negative, &stub_call);
+ break;
+ }
+ case Token::BIT_OR:
+ __ or_(eax, ecx);
+ break;
+ case Token::BIT_AND:
+ __ and_(eax, ecx);
+ break;
+ case Token::BIT_XOR:
+ __ xor_(eax, ecx);
+ break;
+ default:
+ UNREACHABLE();
+ }
+
+ __ bind(&done);
+ context()->Plug(eax);
+}
+
+
+void FullCodeGenerator::EmitBinaryOp(BinaryOperation* expr,
+ Token::Value op,
+ OverwriteMode mode) {
+ __ pop(edx);
+ BinaryOpICStub stub(isolate(), op, mode);
+ JumpPatchSite patch_site(masm_); // unbound, signals no inlined smi code.
+ CallIC(stub.GetCode(), expr->BinaryOperationFeedbackId());
+ patch_site.EmitPatchInfo();
+ context()->Plug(eax);
+}
+
+
+void FullCodeGenerator::EmitAssignment(Expression* expr) {
+ ASSERT(expr->IsValidReferenceExpression());
+
+ // Left-hand side can only be a property, a global or a (parameter or local)
+ // slot.
+ enum LhsKind { VARIABLE, NAMED_PROPERTY, KEYED_PROPERTY };
+ LhsKind assign_type = VARIABLE;
+ Property* prop = expr->AsProperty();
+ if (prop != NULL) {
+ assign_type = (prop->key()->IsPropertyName())
+ ? NAMED_PROPERTY
+ : KEYED_PROPERTY;
+ }
+
+ switch (assign_type) {
+ case VARIABLE: {
+ Variable* var = expr->AsVariableProxy()->var();
+ EffectContext context(this);
+ EmitVariableAssignment(var, Token::ASSIGN);
+ break;
+ }
+ case NAMED_PROPERTY: {
+ __ push(eax); // Preserve value.
+ VisitForAccumulatorValue(prop->obj());
+ __ mov(edx, eax);
+ __ pop(eax); // Restore value.
+ __ mov(ecx, prop->key()->AsLiteral()->value());
+ CallStoreIC();
+ break;
+ }
+ case KEYED_PROPERTY: {
+ __ push(eax); // Preserve value.
+ VisitForStackValue(prop->obj());
+ VisitForAccumulatorValue(prop->key());
+ __ mov(ecx, eax);
+ __ pop(edx); // Receiver.
+ __ pop(eax); // Restore value.
+ Handle<Code> ic = strict_mode() == SLOPPY
+ ? isolate()->builtins()->KeyedStoreIC_Initialize()
+ : isolate()->builtins()->KeyedStoreIC_Initialize_Strict();
+ CallIC(ic);
+ break;
+ }
+ }
+ context()->Plug(eax);
+}
+
+
+void FullCodeGenerator::EmitStoreToStackLocalOrContextSlot(
+ Variable* var, MemOperand location) {
+ __ mov(location, eax);
+ if (var->IsContextSlot()) {
+ __ mov(edx, eax);
+ int offset = Context::SlotOffset(var->index());
+ __ RecordWriteContextSlot(ecx, offset, edx, ebx);
+ }
+}
+
+
+void FullCodeGenerator::EmitCallStoreContextSlot(
+ Handle<String> name, StrictMode strict_mode) {
+ __ push(eax); // Value.
+ __ push(esi); // Context.
+ __ push(Immediate(name));
+ __ push(Immediate(Smi::FromInt(strict_mode)));
+ __ CallRuntime(Runtime::kHiddenStoreContextSlot, 4);
+}
+
+
+void FullCodeGenerator::EmitVariableAssignment(Variable* var,
+ Token::Value op) {
+ if (var->IsUnallocated()) {
+ // Global var, const, or let.
+ __ mov(ecx, var->name());
+ __ mov(edx, GlobalObjectOperand());
+ CallStoreIC();
+
+ } else if (op == Token::INIT_CONST_LEGACY) {
+ // Const initializers need a write barrier.
+ ASSERT(!var->IsParameter()); // No const parameters.
+ if (var->IsLookupSlot()) {
+ __ push(eax);
+ __ push(esi);
+ __ push(Immediate(var->name()));
+ __ CallRuntime(Runtime::kHiddenInitializeConstContextSlot, 3);
+ } else {
+ ASSERT(var->IsStackLocal() || var->IsContextSlot());
+ Label skip;
+ MemOperand location = VarOperand(var, ecx);
+ __ mov(edx, location);
+ __ cmp(edx, isolate()->factory()->the_hole_value());
+ __ j(not_equal, &skip, Label::kNear);
+ EmitStoreToStackLocalOrContextSlot(var, location);
+ __ bind(&skip);
+ }
+
+ } else if (var->mode() == LET && op != Token::INIT_LET) {
+ // Non-initializing assignment to let variable needs a write barrier.
+ if (var->IsLookupSlot()) {
+ EmitCallStoreContextSlot(var->name(), strict_mode());
+ } else {
+ ASSERT(var->IsStackAllocated() || var->IsContextSlot());
+ Label assign;
+ MemOperand location = VarOperand(var, ecx);
+ __ mov(edx, location);
+ __ cmp(edx, isolate()->factory()->the_hole_value());
+ __ j(not_equal, &assign, Label::kNear);
+ __ push(Immediate(var->name()));
+ __ CallRuntime(Runtime::kHiddenThrowReferenceError, 1);
+ __ bind(&assign);
+ EmitStoreToStackLocalOrContextSlot(var, location);
+ }
+
+ } else if (!var->is_const_mode() || op == Token::INIT_CONST) {
+ // Assignment to var or initializing assignment to let/const
+ // in harmony mode.
+ if (var->IsLookupSlot()) {
+ EmitCallStoreContextSlot(var->name(), strict_mode());
+ } else {
+ ASSERT(var->IsStackAllocated() || var->IsContextSlot());
+ MemOperand location = VarOperand(var, ecx);
+ if (generate_debug_code_ && op == Token::INIT_LET) {
+ // Check for an uninitialized let binding.
+ __ mov(edx, location);
+ __ cmp(edx, isolate()->factory()->the_hole_value());
+ __ Check(equal, kLetBindingReInitialization);
+ }
+ EmitStoreToStackLocalOrContextSlot(var, location);
+ }
+ }
+ // Non-initializing assignments to consts are ignored.
+}
+
+
+void FullCodeGenerator::EmitNamedPropertyAssignment(Assignment* expr) {
+ // Assignment to a property, using a named store IC.
+ // eax : value
+ // esp[0] : receiver
+
+ Property* prop = expr->target()->AsProperty();
+ ASSERT(prop != NULL);
+ ASSERT(prop->key()->IsLiteral());
+
+ // Record source code position before IC call.
+ SetSourcePosition(expr->position());
+ __ mov(ecx, prop->key()->AsLiteral()->value());
+ __ pop(edx);
+ CallStoreIC(expr->AssignmentFeedbackId());
+ PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
+ context()->Plug(eax);
+}
+
+
+void FullCodeGenerator::EmitKeyedPropertyAssignment(Assignment* expr) {
+ // Assignment to a property, using a keyed store IC.
+ // eax : value
+ // esp[0] : key
+ // esp[kPointerSize] : receiver
+
+ __ pop(ecx); // Key.
+ __ pop(edx);
+ // Record source code position before IC call.
+ SetSourcePosition(expr->position());
+ Handle<Code> ic = strict_mode() == SLOPPY
+ ? isolate()->builtins()->KeyedStoreIC_Initialize()
+ : isolate()->builtins()->KeyedStoreIC_Initialize_Strict();
+ CallIC(ic, expr->AssignmentFeedbackId());
+
+ PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
+ context()->Plug(eax);
+}
+
+
+void FullCodeGenerator::VisitProperty(Property* expr) {
+ Comment cmnt(masm_, "[ Property");
+ Expression* key = expr->key();
+
+ if (key->IsPropertyName()) {
+ VisitForAccumulatorValue(expr->obj());
+ __ mov(edx, result_register());
+ EmitNamedPropertyLoad(expr);
+ PrepareForBailoutForId(expr->LoadId(), TOS_REG);
+ context()->Plug(eax);
+ } else {
+ VisitForStackValue(expr->obj());
+ VisitForAccumulatorValue(expr->key());
+ __ pop(edx); // Object.
+ __ mov(ecx, result_register()); // Key.
+ EmitKeyedPropertyLoad(expr);
+ context()->Plug(eax);
+ }
+}
+
+
+void FullCodeGenerator::CallIC(Handle<Code> code,
+ TypeFeedbackId ast_id) {
+ ic_total_count_++;
+ __ call(code, RelocInfo::CODE_TARGET, ast_id);
+}
+
+
+// Code common for calls using the IC.
+void FullCodeGenerator::EmitCallWithLoadIC(Call* expr) {
+ Expression* callee = expr->expression();
+
+ CallIC::CallType call_type = callee->IsVariableProxy()
+ ? CallIC::FUNCTION
+ : CallIC::METHOD;
+ // Get the target function.
+ if (call_type == CallIC::FUNCTION) {
+ { StackValueContext context(this);
+ EmitVariableLoad(callee->AsVariableProxy());
+ PrepareForBailout(callee, NO_REGISTERS);
+ }
+ // Push undefined as receiver. This is patched in the method prologue if it
+ // is a sloppy mode method.
+ __ push(Immediate(isolate()->factory()->undefined_value()));
+ } else {
+ // Load the function from the receiver.
+ ASSERT(callee->IsProperty());
+ __ mov(edx, Operand(esp, 0));
+ EmitNamedPropertyLoad(callee->AsProperty());
+ PrepareForBailoutForId(callee->AsProperty()->LoadId(), TOS_REG);
+ // Push the target function under the receiver.
+ __ push(Operand(esp, 0));
+ __ mov(Operand(esp, kPointerSize), eax);
+ }
+
+ EmitCall(expr, call_type);
+}
+
+
+// Code common for calls using the IC.
+void FullCodeGenerator::EmitKeyedCallWithLoadIC(Call* expr,
+ Expression* key) {
+ // Load the key.
+ VisitForAccumulatorValue(key);
+
+ Expression* callee = expr->expression();
+
+ // Load the function from the receiver.
+ ASSERT(callee->IsProperty());
+ __ mov(edx, Operand(esp, 0));
+ // Move the key into the right register for the keyed load IC.
+ __ mov(ecx, eax);
+ EmitKeyedPropertyLoad(callee->AsProperty());
+ PrepareForBailoutForId(callee->AsProperty()->LoadId(), TOS_REG);
+
+ // Push the target function under the receiver.
+ __ push(Operand(esp, 0));
+ __ mov(Operand(esp, kPointerSize), eax);
+
+ EmitCall(expr, CallIC::METHOD);
+}
+
+
+void FullCodeGenerator::EmitCall(Call* expr, CallIC::CallType call_type) {
+ // Load the arguments.
+ ZoneList<Expression*>* args = expr->arguments();
+ int arg_count = args->length();
+ { PreservePositionScope scope(masm()->positions_recorder());
+ for (int i = 0; i < arg_count; i++) {
+ VisitForStackValue(args->at(i));
+ }
+ }
+
+ // Record source position of the IC call.
+ SetSourcePosition(expr->position());
+ Handle<Code> ic = CallIC::initialize_stub(
+ isolate(), arg_count, call_type);
+ __ Move(edx, Immediate(Smi::FromInt(expr->CallFeedbackSlot())));
+ __ mov(edi, Operand(esp, (arg_count + 1) * kPointerSize));
+ // Don't assign a type feedback id to the IC, since type feedback is provided
+ // by the vector above.
+ CallIC(ic);
+
+ RecordJSReturnSite(expr);
+
+ // Restore context register.
+ __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
+
+ context()->DropAndPlug(1, eax);
+}
+
+
+void FullCodeGenerator::EmitResolvePossiblyDirectEval(int arg_count) {
+ // Push copy of the first argument or undefined if it doesn't exist.
+ if (arg_count > 0) {
+ __ push(Operand(esp, arg_count * kPointerSize));
+ } else {
+ __ push(Immediate(isolate()->factory()->undefined_value()));
+ }
+
+ // Push the receiver of the enclosing function.
+ __ push(Operand(ebp, (2 + info_->scope()->num_parameters()) * kPointerSize));
+ // Push the language mode.
+ __ push(Immediate(Smi::FromInt(strict_mode())));
+
+ // Push the start position of the scope the calls resides in.
+ __ push(Immediate(Smi::FromInt(scope()->start_position())));
+
+ // Do the runtime call.
+ __ CallRuntime(Runtime::kHiddenResolvePossiblyDirectEval, 5);
+}
+
+
+void FullCodeGenerator::VisitCall(Call* expr) {
+#ifdef DEBUG
+ // We want to verify that RecordJSReturnSite gets called on all paths
+ // through this function. Avoid early returns.
+ expr->return_is_recorded_ = false;
+#endif
+
+ Comment cmnt(masm_, "[ Call");
+ Expression* callee = expr->expression();
+ Call::CallType call_type = expr->GetCallType(isolate());
+
+ if (call_type == Call::POSSIBLY_EVAL_CALL) {
+ // In a call to eval, we first call RuntimeHidden_ResolvePossiblyDirectEval
+ // to resolve the function we need to call and the receiver of the call.
+ // Then we call the resolved function using the given arguments.
+ ZoneList<Expression*>* args = expr->arguments();
+ int arg_count = args->length();
+ { PreservePositionScope pos_scope(masm()->positions_recorder());
+ VisitForStackValue(callee);
+ // Reserved receiver slot.
+ __ push(Immediate(isolate()->factory()->undefined_value()));
+ // Push the arguments.
+ for (int i = 0; i < arg_count; i++) {
+ VisitForStackValue(args->at(i));
+ }
+
+ // Push a copy of the function (found below the arguments) and
+ // resolve eval.
+ __ push(Operand(esp, (arg_count + 1) * kPointerSize));
+ EmitResolvePossiblyDirectEval(arg_count);
+
+ // The runtime call returns a pair of values in eax (function) and
+ // edx (receiver). Touch up the stack with the right values.
+ __ mov(Operand(esp, (arg_count + 0) * kPointerSize), edx);
+ __ mov(Operand(esp, (arg_count + 1) * kPointerSize), eax);
+ }
+ // Record source position for debugger.
+ SetSourcePosition(expr->position());
+ CallFunctionStub stub(isolate(), arg_count, NO_CALL_FUNCTION_FLAGS);
+ __ mov(edi, Operand(esp, (arg_count + 1) * kPointerSize));
+ __ CallStub(&stub);
+ RecordJSReturnSite(expr);
+ // Restore context register.
+ __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
+ context()->DropAndPlug(1, eax);
+
+ } else if (call_type == Call::GLOBAL_CALL) {
+ EmitCallWithLoadIC(expr);
+
+ } else if (call_type == Call::LOOKUP_SLOT_CALL) {
+ // Call to a lookup slot (dynamically introduced variable).
+ VariableProxy* proxy = callee->AsVariableProxy();
+ Label slow, done;
+ { PreservePositionScope scope(masm()->positions_recorder());
+ // Generate code for loading from variables potentially shadowed by
+ // eval-introduced variables.
+ EmitDynamicLookupFastCase(proxy->var(), NOT_INSIDE_TYPEOF, &slow, &done);
+ }
+ __ bind(&slow);
+ // Call the runtime to find the function to call (returned in eax) and
+ // the object holding it (returned in edx).
+ __ push(context_register());
+ __ push(Immediate(proxy->name()));
+ __ CallRuntime(Runtime::kHiddenLoadContextSlot, 2);
+ __ push(eax); // Function.
+ __ push(edx); // Receiver.
+
+ // If fast case code has been generated, emit code to push the function
+ // and receiver and have the slow path jump around this code.
+ if (done.is_linked()) {
+ Label call;
+ __ jmp(&call, Label::kNear);
+ __ bind(&done);
+ // Push function.
+ __ push(eax);
+ // The receiver is implicitly the global receiver. Indicate this by
+ // passing the hole to the call function stub.
+ __ push(Immediate(isolate()->factory()->undefined_value()));
+ __ bind(&call);
+ }
+
+ // The receiver is either the global receiver or an object found by
+ // LoadContextSlot.
+ EmitCall(expr);
+
+ } else if (call_type == Call::PROPERTY_CALL) {
+ Property* property = callee->AsProperty();
+ { PreservePositionScope scope(masm()->positions_recorder());
+ VisitForStackValue(property->obj());
+ }
+ if (property->key()->IsPropertyName()) {
+ EmitCallWithLoadIC(expr);
+ } else {
+ EmitKeyedCallWithLoadIC(expr, property->key());
+ }
+
+ } else {
+ ASSERT(call_type == Call::OTHER_CALL);
+ // Call to an arbitrary expression not handled specially above.
+ { PreservePositionScope scope(masm()->positions_recorder());
+ VisitForStackValue(callee);
+ }
+ __ push(Immediate(isolate()->factory()->undefined_value()));
+ // Emit function call.
+ EmitCall(expr);
+ }
+
+#ifdef DEBUG
+ // RecordJSReturnSite should have been called.
+ ASSERT(expr->return_is_recorded_);
+#endif
+}
+
+
+void FullCodeGenerator::VisitCallNew(CallNew* expr) {
+ Comment cmnt(masm_, "[ CallNew");
+ // According to ECMA-262, section 11.2.2, page 44, the function
+ // expression in new calls must be evaluated before the
+ // arguments.
+
+ // Push constructor on the stack. If it's not a function it's used as
+ // receiver for CALL_NON_FUNCTION, otherwise the value on the stack is
+ // ignored.
+ VisitForStackValue(expr->expression());
+
+ // Push the arguments ("left-to-right") on the stack.
+ ZoneList<Expression*>* args = expr->arguments();
+ int arg_count = args->length();
+ for (int i = 0; i < arg_count; i++) {
+ VisitForStackValue(args->at(i));
+ }
+
+ // Call the construct call builtin that handles allocation and
+ // constructor invocation.
+ SetSourcePosition(expr->position());
+
+ // Load function and argument count into edi and eax.
+ __ Move(eax, Immediate(arg_count));
+ __ mov(edi, Operand(esp, arg_count * kPointerSize));
+
+ // Record call targets in unoptimized code.
+ if (FLAG_pretenuring_call_new) {
+ EnsureSlotContainsAllocationSite(expr->AllocationSiteFeedbackSlot());
+ ASSERT(expr->AllocationSiteFeedbackSlot() ==
+ expr->CallNewFeedbackSlot() + 1);
+ }
+
+ __ LoadHeapObject(ebx, FeedbackVector());
+ __ mov(edx, Immediate(Smi::FromInt(expr->CallNewFeedbackSlot())));
+
+ CallConstructStub stub(isolate(), RECORD_CONSTRUCTOR_TARGET);
+ __ call(stub.GetCode(), RelocInfo::CONSTRUCT_CALL);
+ PrepareForBailoutForId(expr->ReturnId(), TOS_REG);
+ context()->Plug(eax);
+}
+
+
+void FullCodeGenerator::EmitIsSmi(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
+ ASSERT(args->length() == 1);
+
+ VisitForAccumulatorValue(args->at(0));
+
+ Label materialize_true, materialize_false;
+ Label* if_true = NULL;
+ Label* if_false = NULL;
+ Label* fall_through = NULL;
+ context()->PrepareTest(&materialize_true, &materialize_false,
+ &if_true, &if_false, &fall_through);
+
+ PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
+ __ test(eax, Immediate(kSmiTagMask));
+ Split(zero, if_true, if_false, fall_through);
+
+ context()->Plug(if_true, if_false);
+}
+
+
+void FullCodeGenerator::EmitIsNonNegativeSmi(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
+ ASSERT(args->length() == 1);
+
+ VisitForAccumulatorValue(args->at(0));
+
+ Label materialize_true, materialize_false;
+ Label* if_true = NULL;
+ Label* if_false = NULL;
+ Label* fall_through = NULL;
+ context()->PrepareTest(&materialize_true, &materialize_false,
+ &if_true, &if_false, &fall_through);
+
+ PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
+ __ test(eax, Immediate(kSmiTagMask | 0x80000000));
+ Split(zero, if_true, if_false, fall_through);
+
+ context()->Plug(if_true, if_false);
+}
+
+
+void FullCodeGenerator::EmitIsObject(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
+ ASSERT(args->length() == 1);
+
+ VisitForAccumulatorValue(args->at(0));
+
+ Label materialize_true, materialize_false;
+ Label* if_true = NULL;
+ Label* if_false = NULL;
+ Label* fall_through = NULL;
+ context()->PrepareTest(&materialize_true, &materialize_false,
+ &if_true, &if_false, &fall_through);
+
+ __ JumpIfSmi(eax, if_false);
+ __ cmp(eax, isolate()->factory()->null_value());
+ __ j(equal, if_true);
+ __ mov(ebx, FieldOperand(eax, HeapObject::kMapOffset));
+ // Undetectable objects behave like undefined when tested with typeof.
+ __ movzx_b(ecx, FieldOperand(ebx, Map::kBitFieldOffset));
+ __ test(ecx, Immediate(1 << Map::kIsUndetectable));
+ __ j(not_zero, if_false);
+ __ movzx_b(ecx, FieldOperand(ebx, Map::kInstanceTypeOffset));
+ __ cmp(ecx, FIRST_NONCALLABLE_SPEC_OBJECT_TYPE);
+ __ j(below, if_false);
+ __ cmp(ecx, LAST_NONCALLABLE_SPEC_OBJECT_TYPE);
+ PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
+ Split(below_equal, if_true, if_false, fall_through);
+
+ context()->Plug(if_true, if_false);
+}
+
+
+void FullCodeGenerator::EmitIsSpecObject(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
+ ASSERT(args->length() == 1);
+
+ VisitForAccumulatorValue(args->at(0));
+
+ Label materialize_true, materialize_false;
+ Label* if_true = NULL;
+ Label* if_false = NULL;
+ Label* fall_through = NULL;
+ context()->PrepareTest(&materialize_true, &materialize_false,
+ &if_true, &if_false, &fall_through);
+
+ __ JumpIfSmi(eax, if_false);
+ __ CmpObjectType(eax, FIRST_SPEC_OBJECT_TYPE, ebx);
+ PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
+ Split(above_equal, if_true, if_false, fall_through);
+
+ context()->Plug(if_true, if_false);
+}
+
+
+void FullCodeGenerator::EmitIsUndetectableObject(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
+ ASSERT(args->length() == 1);
+
+ VisitForAccumulatorValue(args->at(0));
+
+ Label materialize_true, materialize_false;
+ Label* if_true = NULL;
+ Label* if_false = NULL;
+ Label* fall_through = NULL;
+ context()->PrepareTest(&materialize_true, &materialize_false,
+ &if_true, &if_false, &fall_through);
+
+ __ JumpIfSmi(eax, if_false);
+ __ mov(ebx, FieldOperand(eax, HeapObject::kMapOffset));
+ __ movzx_b(ebx, FieldOperand(ebx, Map::kBitFieldOffset));
+ __ test(ebx, Immediate(1 << Map::kIsUndetectable));
+ PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
+ Split(not_zero, if_true, if_false, fall_through);
+
+ context()->Plug(if_true, if_false);
+}
+
+
+void FullCodeGenerator::EmitIsStringWrapperSafeForDefaultValueOf(
+ CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
+ ASSERT(args->length() == 1);
+
+ VisitForAccumulatorValue(args->at(0));
+
+ Label materialize_true, materialize_false, skip_lookup;
+ Label* if_true = NULL;
+ Label* if_false = NULL;
+ Label* fall_through = NULL;
+ context()->PrepareTest(&materialize_true, &materialize_false,
+ &if_true, &if_false, &fall_through);
+
+ __ AssertNotSmi(eax);
+
+ // Check whether this map has already been checked to be safe for default
+ // valueOf.
+ __ mov(ebx, FieldOperand(eax, HeapObject::kMapOffset));
+ __ test_b(FieldOperand(ebx, Map::kBitField2Offset),
+ 1 << Map::kStringWrapperSafeForDefaultValueOf);
+ __ j(not_zero, &skip_lookup);
+
+ // Check for fast case object. Return false for slow case objects.
+ __ mov(ecx, FieldOperand(eax, JSObject::kPropertiesOffset));
+ __ mov(ecx, FieldOperand(ecx, HeapObject::kMapOffset));
+ __ cmp(ecx, isolate()->factory()->hash_table_map());
+ __ j(equal, if_false);
+
+ // Look for valueOf string in the descriptor array, and indicate false if
+ // found. Since we omit an enumeration index check, if it is added via a
+ // transition that shares its descriptor array, this is a false positive.
+ Label entry, loop, done;
+
+ // Skip loop if no descriptors are valid.
+ __ NumberOfOwnDescriptors(ecx, ebx);
+ __ cmp(ecx, 0);
+ __ j(equal, &done);
+
+ __ LoadInstanceDescriptors(ebx, ebx);
+ // ebx: descriptor array.
+ // ecx: valid entries in the descriptor array.
+ // Calculate the end of the descriptor array.
+ STATIC_ASSERT(kSmiTag == 0);
+ STATIC_ASSERT(kSmiTagSize == 1);
+ STATIC_ASSERT(kPointerSize == 4);
+ __ imul(ecx, ecx, DescriptorArray::kDescriptorSize);
+ __ lea(ecx, Operand(ebx, ecx, times_4, DescriptorArray::kFirstOffset));
+ // Calculate location of the first key name.
+ __ add(ebx, Immediate(DescriptorArray::kFirstOffset));
+ // Loop through all the keys in the descriptor array. If one of these is the
+ // internalized string "valueOf" the result is false.
+ __ jmp(&entry);
+ __ bind(&loop);
+ __ mov(edx, FieldOperand(ebx, 0));
+ __ cmp(edx, isolate()->factory()->value_of_string());
+ __ j(equal, if_false);
+ __ add(ebx, Immediate(DescriptorArray::kDescriptorSize * kPointerSize));
+ __ bind(&entry);
+ __ cmp(ebx, ecx);
+ __ j(not_equal, &loop);
+
+ __ bind(&done);
+
+ // Reload map as register ebx was used as temporary above.
+ __ mov(ebx, FieldOperand(eax, HeapObject::kMapOffset));
+
+ // Set the bit in the map to indicate that there is no local valueOf field.
+ __ or_(FieldOperand(ebx, Map::kBitField2Offset),
+ Immediate(1 << Map::kStringWrapperSafeForDefaultValueOf));
+
+ __ bind(&skip_lookup);
+
+ // If a valueOf property is not found on the object check that its
+ // prototype is the un-modified String prototype. If not result is false.
+ __ mov(ecx, FieldOperand(ebx, Map::kPrototypeOffset));
+ __ JumpIfSmi(ecx, if_false);
+ __ mov(ecx, FieldOperand(ecx, HeapObject::kMapOffset));
+ __ mov(edx, Operand(esi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
+ __ mov(edx,
+ FieldOperand(edx, GlobalObject::kNativeContextOffset));
+ __ cmp(ecx,
+ ContextOperand(edx,
+ Context::STRING_FUNCTION_PROTOTYPE_MAP_INDEX));
+ PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
+ Split(equal, if_true, if_false, fall_through);
+
+ context()->Plug(if_true, if_false);
+}
+
+
+void FullCodeGenerator::EmitIsFunction(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
+ ASSERT(args->length() == 1);
+
+ VisitForAccumulatorValue(args->at(0));
+
+ Label materialize_true, materialize_false;
+ Label* if_true = NULL;
+ Label* if_false = NULL;
+ Label* fall_through = NULL;
+ context()->PrepareTest(&materialize_true, &materialize_false,
+ &if_true, &if_false, &fall_through);
+
+ __ JumpIfSmi(eax, if_false);
+ __ CmpObjectType(eax, JS_FUNCTION_TYPE, ebx);
+ PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
+ Split(equal, if_true, if_false, fall_through);
+
+ context()->Plug(if_true, if_false);
+}
+
+
+void FullCodeGenerator::EmitIsMinusZero(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
+ ASSERT(args->length() == 1);
+
+ VisitForAccumulatorValue(args->at(0));
+
+ Label materialize_true, materialize_false;
+ Label* if_true = NULL;
+ Label* if_false = NULL;
+ Label* fall_through = NULL;
+ context()->PrepareTest(&materialize_true, &materialize_false,
+ &if_true, &if_false, &fall_through);
+
+ Handle<Map> map = masm()->isolate()->factory()->heap_number_map();
+ __ CheckMap(eax, map, if_false, DO_SMI_CHECK);
+ // Check if the exponent half is 0x80000000. Comparing against 1 and
+ // checking for overflow is the shortest possible encoding.
+ __ cmp(FieldOperand(eax, HeapNumber::kExponentOffset), Immediate(0x1));
+ __ j(no_overflow, if_false);
+ __ cmp(FieldOperand(eax, HeapNumber::kMantissaOffset), Immediate(0x0));
+ PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
+ Split(equal, if_true, if_false, fall_through);
+
+ context()->Plug(if_true, if_false);
+}
+
+
+
+void FullCodeGenerator::EmitIsArray(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
+ ASSERT(args->length() == 1);
+
+ VisitForAccumulatorValue(args->at(0));
+
+ Label materialize_true, materialize_false;
+ Label* if_true = NULL;
+ Label* if_false = NULL;
+ Label* fall_through = NULL;
+ context()->PrepareTest(&materialize_true, &materialize_false,
+ &if_true, &if_false, &fall_through);
+
+ __ JumpIfSmi(eax, if_false);
+ __ CmpObjectType(eax, JS_ARRAY_TYPE, ebx);
+ PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
+ Split(equal, if_true, if_false, fall_through);
+
+ context()->Plug(if_true, if_false);
+}
+
+
+void FullCodeGenerator::EmitIsRegExp(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
+ ASSERT(args->length() == 1);
+
+ VisitForAccumulatorValue(args->at(0));
+
+ Label materialize_true, materialize_false;
+ Label* if_true = NULL;
+ Label* if_false = NULL;
+ Label* fall_through = NULL;
+ context()->PrepareTest(&materialize_true, &materialize_false,
+ &if_true, &if_false, &fall_through);
+
+ __ JumpIfSmi(eax, if_false);
+ __ CmpObjectType(eax, JS_REGEXP_TYPE, ebx);
+ PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
+ Split(equal, if_true, if_false, fall_through);
+
+ context()->Plug(if_true, if_false);
+}
+
+
+
+void FullCodeGenerator::EmitIsConstructCall(CallRuntime* expr) {
+ ASSERT(expr->arguments()->length() == 0);
+
+ Label materialize_true, materialize_false;
+ Label* if_true = NULL;
+ Label* if_false = NULL;
+ Label* fall_through = NULL;
+ context()->PrepareTest(&materialize_true, &materialize_false,
+ &if_true, &if_false, &fall_through);
+
+ // Get the frame pointer for the calling frame.
+ __ mov(eax, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
+
+ // Skip the arguments adaptor frame if it exists.
+ Label check_frame_marker;
+ __ cmp(Operand(eax, StandardFrameConstants::kContextOffset),
+ Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+ __ j(not_equal, &check_frame_marker);
+ __ mov(eax, Operand(eax, StandardFrameConstants::kCallerFPOffset));
+
+ // Check the marker in the calling frame.
+ __ bind(&check_frame_marker);
+ __ cmp(Operand(eax, StandardFrameConstants::kMarkerOffset),
+ Immediate(Smi::FromInt(StackFrame::CONSTRUCT)));
+ PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
+ Split(equal, if_true, if_false, fall_through);
+
+ context()->Plug(if_true, if_false);
+}
+
+
+void FullCodeGenerator::EmitObjectEquals(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
+ ASSERT(args->length() == 2);
+
+ // Load the two objects into registers and perform the comparison.
+ VisitForStackValue(args->at(0));
+ VisitForAccumulatorValue(args->at(1));
+
+ Label materialize_true, materialize_false;
+ Label* if_true = NULL;
+ Label* if_false = NULL;
+ Label* fall_through = NULL;
+ context()->PrepareTest(&materialize_true, &materialize_false,
+ &if_true, &if_false, &fall_through);
+
+ __ pop(ebx);
+ __ cmp(eax, ebx);
+ PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
+ Split(equal, if_true, if_false, fall_through);
+
+ context()->Plug(if_true, if_false);
+}
+
+
+void FullCodeGenerator::EmitArguments(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
+ ASSERT(args->length() == 1);
+
+ // ArgumentsAccessStub expects the key in edx and the formal
+ // parameter count in eax.
+ VisitForAccumulatorValue(args->at(0));
+ __ mov(edx, eax);
+ __ Move(eax, Immediate(Smi::FromInt(info_->scope()->num_parameters())));
+ ArgumentsAccessStub stub(isolate(), ArgumentsAccessStub::READ_ELEMENT);
+ __ CallStub(&stub);
+ context()->Plug(eax);
+}
+
+
+void FullCodeGenerator::EmitArgumentsLength(CallRuntime* expr) {
+ ASSERT(expr->arguments()->length() == 0);
+
+ Label exit;
+ // Get the number of formal parameters.
+ __ Move(eax, Immediate(Smi::FromInt(info_->scope()->num_parameters())));
+
+ // Check if the calling frame is an arguments adaptor frame.
+ __ mov(ebx, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
+ __ cmp(Operand(ebx, StandardFrameConstants::kContextOffset),
+ Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+ __ j(not_equal, &exit);
+
+ // Arguments adaptor case: Read the arguments length from the
+ // adaptor frame.
+ __ mov(eax, Operand(ebx, ArgumentsAdaptorFrameConstants::kLengthOffset));
+
+ __ bind(&exit);
+ __ AssertSmi(eax);
+ context()->Plug(eax);
+}
+
+
+void FullCodeGenerator::EmitClassOf(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
+ ASSERT(args->length() == 1);
+ Label done, null, function, non_function_constructor;
+
+ VisitForAccumulatorValue(args->at(0));
+
+ // If the object is a smi, we return null.
+ __ JumpIfSmi(eax, &null);
+
+ // Check that the object is a JS object but take special care of JS
+ // functions to make sure they have 'Function' as their class.
+ // Assume that there are only two callable types, and one of them is at
+ // either end of the type range for JS object types. Saves extra comparisons.
+ STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
+ __ CmpObjectType(eax, FIRST_SPEC_OBJECT_TYPE, eax);
+ // Map is now in eax.
+ __ j(below, &null);
+ STATIC_ASSERT(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE ==
+ FIRST_SPEC_OBJECT_TYPE + 1);
+ __ j(equal, &function);
+
+ __ CmpInstanceType(eax, LAST_SPEC_OBJECT_TYPE);
+ STATIC_ASSERT(LAST_NONCALLABLE_SPEC_OBJECT_TYPE ==
+ LAST_SPEC_OBJECT_TYPE - 1);
+ __ j(equal, &function);
+ // Assume that there is no larger type.
+ STATIC_ASSERT(LAST_NONCALLABLE_SPEC_OBJECT_TYPE == LAST_TYPE - 1);
+
+ // Check if the constructor in the map is a JS function.
+ __ mov(eax, FieldOperand(eax, Map::kConstructorOffset));
+ __ CmpObjectType(eax, JS_FUNCTION_TYPE, ebx);
+ __ j(not_equal, &non_function_constructor);
+
+ // eax now contains the constructor function. Grab the
+ // instance class name from there.
+ __ mov(eax, FieldOperand(eax, JSFunction::kSharedFunctionInfoOffset));
+ __ mov(eax, FieldOperand(eax, SharedFunctionInfo::kInstanceClassNameOffset));
+ __ jmp(&done);
+
+ // Functions have class 'Function'.
+ __ bind(&function);
+ __ mov(eax, isolate()->factory()->function_class_string());
+ __ jmp(&done);
+
+ // Objects with a non-function constructor have class 'Object'.
+ __ bind(&non_function_constructor);
+ __ mov(eax, isolate()->factory()->Object_string());
+ __ jmp(&done);
+
+ // Non-JS objects have class null.
+ __ bind(&null);
+ __ mov(eax, isolate()->factory()->null_value());
+
+ // All done.
+ __ bind(&done);
+
+ context()->Plug(eax);
+}
+
+
+void FullCodeGenerator::EmitSubString(CallRuntime* expr) {
+ // Load the arguments on the stack and call the stub.
+ SubStringStub stub(isolate());
+ ZoneList<Expression*>* args = expr->arguments();
+ ASSERT(args->length() == 3);
+ VisitForStackValue(args->at(0));
+ VisitForStackValue(args->at(1));
+ VisitForStackValue(args->at(2));
+ __ CallStub(&stub);
+ context()->Plug(eax);
+}
+
+
+void FullCodeGenerator::EmitRegExpExec(CallRuntime* expr) {
+ // Load the arguments on the stack and call the stub.
+ RegExpExecStub stub(isolate());
+ ZoneList<Expression*>* args = expr->arguments();
+ ASSERT(args->length() == 4);
+ VisitForStackValue(args->at(0));
+ VisitForStackValue(args->at(1));
+ VisitForStackValue(args->at(2));
+ VisitForStackValue(args->at(3));
+ __ CallStub(&stub);
+ context()->Plug(eax);
+}
+
+
+void FullCodeGenerator::EmitValueOf(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
+ ASSERT(args->length() == 1);
+
+ VisitForAccumulatorValue(args->at(0)); // Load the object.
+
+ Label done;
+ // If the object is a smi return the object.
+ __ JumpIfSmi(eax, &done, Label::kNear);
+ // If the object is not a value type, return the object.
+ __ CmpObjectType(eax, JS_VALUE_TYPE, ebx);
+ __ j(not_equal, &done, Label::kNear);
+ __ mov(eax, FieldOperand(eax, JSValue::kValueOffset));
+
+ __ bind(&done);
+ context()->Plug(eax);
+}
+
+
+void FullCodeGenerator::EmitDateField(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
+ ASSERT(args->length() == 2);
+ ASSERT_NE(NULL, args->at(1)->AsLiteral());
+ Smi* index = Smi::cast(*(args->at(1)->AsLiteral()->value()));
+
+ VisitForAccumulatorValue(args->at(0)); // Load the object.
+
+ Label runtime, done, not_date_object;
+ Register object = eax;
+ Register result = eax;
+ Register scratch = ecx;
+
+ __ JumpIfSmi(object, &not_date_object);
+ __ CmpObjectType(object, JS_DATE_TYPE, scratch);
+ __ j(not_equal, &not_date_object);
+
+ if (index->value() == 0) {
+ __ mov(result, FieldOperand(object, JSDate::kValueOffset));
+ __ jmp(&done);
+ } else {
+ if (index->value() < JSDate::kFirstUncachedField) {
+ ExternalReference stamp = ExternalReference::date_cache_stamp(isolate());
+ __ mov(scratch, Operand::StaticVariable(stamp));
+ __ cmp(scratch, FieldOperand(object, JSDate::kCacheStampOffset));
+ __ j(not_equal, &runtime, Label::kNear);
+ __ mov(result, FieldOperand(object, JSDate::kValueOffset +
+ kPointerSize * index->value()));
+ __ jmp(&done);
+ }
+ __ bind(&runtime);
+ __ PrepareCallCFunction(2, scratch);
+ __ mov(Operand(esp, 0), object);
+ __ mov(Operand(esp, 1 * kPointerSize), Immediate(index));
+ __ CallCFunction(ExternalReference::get_date_field_function(isolate()), 2);
+ __ jmp(&done);
+ }
+
+ __ bind(&not_date_object);
+ __ CallRuntime(Runtime::kHiddenThrowNotDateError, 0);
+ __ bind(&done);
+ context()->Plug(result);
+}
+
+
+void FullCodeGenerator::EmitOneByteSeqStringSetChar(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
+ ASSERT_EQ(3, args->length());
+
+ Register string = eax;
+ Register index = ebx;
+ Register value = ecx;
+
+ VisitForStackValue(args->at(1)); // index
+ VisitForStackValue(args->at(2)); // value
+ VisitForAccumulatorValue(args->at(0)); // string
+
+ __ pop(value);
+ __ pop(index);
+
+ if (FLAG_debug_code) {
+ __ test(value, Immediate(kSmiTagMask));
+ __ Check(zero, kNonSmiValue);
+ __ test(index, Immediate(kSmiTagMask));
+ __ Check(zero, kNonSmiValue);
+ }
+
+ __ SmiUntag(value);
+ __ SmiUntag(index);
+
+ if (FLAG_debug_code) {
+ static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
+ __ EmitSeqStringSetCharCheck(string, index, value, one_byte_seq_type);
+ }
+
+ __ mov_b(FieldOperand(string, index, times_1, SeqOneByteString::kHeaderSize),
+ value);
+ context()->Plug(string);
+}
+
+
+void FullCodeGenerator::EmitTwoByteSeqStringSetChar(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
+ ASSERT_EQ(3, args->length());
+
+ Register string = eax;
+ Register index = ebx;
+ Register value = ecx;
+
+ VisitForStackValue(args->at(1)); // index
+ VisitForStackValue(args->at(2)); // value
+ VisitForAccumulatorValue(args->at(0)); // string
+ __ pop(value);
+ __ pop(index);
+
+ if (FLAG_debug_code) {
+ __ test(value, Immediate(kSmiTagMask));
+ __ Check(zero, kNonSmiValue);
+ __ test(index, Immediate(kSmiTagMask));
+ __ Check(zero, kNonSmiValue);
+ __ SmiUntag(index);
+ static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
+ __ EmitSeqStringSetCharCheck(string, index, value, two_byte_seq_type);
+ __ SmiTag(index);
+ }
+
+ __ SmiUntag(value);
+ // No need to untag a smi for two-byte addressing.
+ __ mov_w(FieldOperand(string, index, times_1, SeqTwoByteString::kHeaderSize),
+ value);
+ context()->Plug(string);
+}
+
+
+void FullCodeGenerator::EmitMathPow(CallRuntime* expr) {
+ // Load the arguments on the stack and call the runtime function.
+ ZoneList<Expression*>* args = expr->arguments();
+ ASSERT(args->length() == 2);
+ VisitForStackValue(args->at(0));
+ VisitForStackValue(args->at(1));
+
+ __ CallRuntime(Runtime::kHiddenMathPowSlow, 2);
+ context()->Plug(eax);
+}
+
+
+void FullCodeGenerator::EmitSetValueOf(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
+ ASSERT(args->length() == 2);
+
+ VisitForStackValue(args->at(0)); // Load the object.
+ VisitForAccumulatorValue(args->at(1)); // Load the value.
+ __ pop(ebx); // eax = value. ebx = object.
+
+ Label done;
+ // If the object is a smi, return the value.
+ __ JumpIfSmi(ebx, &done, Label::kNear);
+
+ // If the object is not a value type, return the value.
+ __ CmpObjectType(ebx, JS_VALUE_TYPE, ecx);
+ __ j(not_equal, &done, Label::kNear);
+
+ // Store the value.
+ __ mov(FieldOperand(ebx, JSValue::kValueOffset), eax);
+
+ // Update the write barrier. Save the value as it will be
+ // overwritten by the write barrier code and is needed afterward.
+ __ mov(edx, eax);
+ __ RecordWriteField(ebx, JSValue::kValueOffset, edx, ecx);
+
+ __ bind(&done);
+ context()->Plug(eax);
+}
+
+
+void FullCodeGenerator::EmitNumberToString(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
+ ASSERT_EQ(args->length(), 1);
+
+ // Load the argument into eax and call the stub.
+ VisitForAccumulatorValue(args->at(0));
+
+ NumberToStringStub stub(isolate());
+ __ CallStub(&stub);
+ context()->Plug(eax);
+}
+
+
+void FullCodeGenerator::EmitStringCharFromCode(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
+ ASSERT(args->length() == 1);
+
+ VisitForAccumulatorValue(args->at(0));
+
+ Label done;
+ StringCharFromCodeGenerator generator(eax, ebx);
+ generator.GenerateFast(masm_);
+ __ jmp(&done);
+
+ NopRuntimeCallHelper call_helper;
+ generator.GenerateSlow(masm_, call_helper);
+
+ __ bind(&done);
+ context()->Plug(ebx);
+}
+
+
+void FullCodeGenerator::EmitStringCharCodeAt(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
+ ASSERT(args->length() == 2);
+
+ VisitForStackValue(args->at(0));
+ VisitForAccumulatorValue(args->at(1));
+
+ Register object = ebx;
+ Register index = eax;
+ Register result = edx;
+
+ __ pop(object);
+
+ Label need_conversion;
+ Label index_out_of_range;
+ Label done;
+ StringCharCodeAtGenerator generator(object,
+ index,
+ result,
+ &need_conversion,
+ &need_conversion,
+ &index_out_of_range,
+ STRING_INDEX_IS_NUMBER);
+ generator.GenerateFast(masm_);
+ __ jmp(&done);
+
+ __ bind(&index_out_of_range);
+ // When the index is out of range, the spec requires us to return
+ // NaN.
+ __ Move(result, Immediate(isolate()->factory()->nan_value()));
+ __ jmp(&done);
+
+ __ bind(&need_conversion);
+ // Move the undefined value into the result register, which will
+ // trigger conversion.
+ __ Move(result, Immediate(isolate()->factory()->undefined_value()));
+ __ jmp(&done);
+
+ NopRuntimeCallHelper call_helper;
+ generator.GenerateSlow(masm_, call_helper);
+
+ __ bind(&done);
+ context()->Plug(result);
+}
+
+
+void FullCodeGenerator::EmitStringCharAt(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
+ ASSERT(args->length() == 2);
+
+ VisitForStackValue(args->at(0));
+ VisitForAccumulatorValue(args->at(1));
+
+ Register object = ebx;
+ Register index = eax;
+ Register scratch = edx;
+ Register result = eax;
+
+ __ pop(object);
+
+ Label need_conversion;
+ Label index_out_of_range;
+ Label done;
+ StringCharAtGenerator generator(object,
+ index,
+ scratch,
+ result,
+ &need_conversion,
+ &need_conversion,
+ &index_out_of_range,
+ STRING_INDEX_IS_NUMBER);
+ generator.GenerateFast(masm_);
+ __ jmp(&done);
+
+ __ bind(&index_out_of_range);
+ // When the index is out of range, the spec requires us to return
+ // the empty string.
+ __ Move(result, Immediate(isolate()->factory()->empty_string()));
+ __ jmp(&done);
+
+ __ bind(&need_conversion);
+ // Move smi zero into the result register, which will trigger
+ // conversion.
+ __ Move(result, Immediate(Smi::FromInt(0)));
+ __ jmp(&done);
+
+ NopRuntimeCallHelper call_helper;
+ generator.GenerateSlow(masm_, call_helper);
+
+ __ bind(&done);
+ context()->Plug(result);
+}
+
+
+void FullCodeGenerator::EmitStringAdd(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
+ ASSERT_EQ(2, args->length());
+ VisitForStackValue(args->at(0));
+ VisitForAccumulatorValue(args->at(1));
+
+ __ pop(edx);
+ StringAddStub stub(isolate(), STRING_ADD_CHECK_BOTH, NOT_TENURED);
+ __ CallStub(&stub);
+ context()->Plug(eax);
+}
+
+
+void FullCodeGenerator::EmitStringCompare(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
+ ASSERT_EQ(2, args->length());
+
+ VisitForStackValue(args->at(0));
+ VisitForStackValue(args->at(1));
+
+ StringCompareStub stub(isolate());
+ __ CallStub(&stub);
+ context()->Plug(eax);
+}
+
+
+void FullCodeGenerator::EmitCallFunction(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
+ ASSERT(args->length() >= 2);
+
+ int arg_count = args->length() - 2; // 2 ~ receiver and function.
+ for (int i = 0; i < arg_count + 1; ++i) {
+ VisitForStackValue(args->at(i));
+ }
+ VisitForAccumulatorValue(args->last()); // Function.
+
+ Label runtime, done;
+ // Check for non-function argument (including proxy).
+ __ JumpIfSmi(eax, &runtime);
+ __ CmpObjectType(eax, JS_FUNCTION_TYPE, ebx);
+ __ j(not_equal, &runtime);
+
+ // InvokeFunction requires the function in edi. Move it in there.
+ __ mov(edi, result_register());
+ ParameterCount count(arg_count);
+ __ InvokeFunction(edi, count, CALL_FUNCTION, NullCallWrapper());
+ __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
+ __ jmp(&done);
+
+ __ bind(&runtime);
+ __ push(eax);
+ __ CallRuntime(Runtime::kCall, args->length());
+ __ bind(&done);
+
+ context()->Plug(eax);
+}
+
+
+void FullCodeGenerator::EmitRegExpConstructResult(CallRuntime* expr) {
+ // Load the arguments on the stack and call the stub.
+ RegExpConstructResultStub stub(isolate());
+ ZoneList<Expression*>* args = expr->arguments();
+ ASSERT(args->length() == 3);
+ VisitForStackValue(args->at(0));
+ VisitForStackValue(args->at(1));
+ VisitForAccumulatorValue(args->at(2));
+ __ pop(ebx);
+ __ pop(ecx);
+ __ CallStub(&stub);
+ context()->Plug(eax);
+}
+
+
+void FullCodeGenerator::EmitGetFromCache(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
+ ASSERT_EQ(2, args->length());
+
+ ASSERT_NE(NULL, args->at(0)->AsLiteral());
+ int cache_id = Smi::cast(*(args->at(0)->AsLiteral()->value()))->value();
+
+ Handle<FixedArray> jsfunction_result_caches(
+ isolate()->native_context()->jsfunction_result_caches());
+ if (jsfunction_result_caches->length() <= cache_id) {
+ __ Abort(kAttemptToUseUndefinedCache);
+ __ mov(eax, isolate()->factory()->undefined_value());
+ context()->Plug(eax);
+ return;
+ }
+
+ VisitForAccumulatorValue(args->at(1));
+
+ Register key = eax;
+ Register cache = ebx;
+ Register tmp = ecx;
+ __ mov(cache, ContextOperand(esi, Context::GLOBAL_OBJECT_INDEX));
+ __ mov(cache,
+ FieldOperand(cache, GlobalObject::kNativeContextOffset));
+ __ mov(cache, ContextOperand(cache, Context::JSFUNCTION_RESULT_CACHES_INDEX));
+ __ mov(cache,
+ FieldOperand(cache, FixedArray::OffsetOfElementAt(cache_id)));
+
+ Label done, not_found;
+ STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
+ __ mov(tmp, FieldOperand(cache, JSFunctionResultCache::kFingerOffset));
+ // tmp now holds finger offset as a smi.
+ __ cmp(key, FixedArrayElementOperand(cache, tmp));
+ __ j(not_equal, &not_found);
+
+ __ mov(eax, FixedArrayElementOperand(cache, tmp, 1));
+ __ jmp(&done);
+
+ __ bind(&not_found);
+ // Call runtime to perform the lookup.
+ __ push(cache);
+ __ push(key);
+ __ CallRuntime(Runtime::kHiddenGetFromCache, 2);
+
+ __ bind(&done);
+ context()->Plug(eax);
+}
+
+
+void FullCodeGenerator::EmitHasCachedArrayIndex(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
+ ASSERT(args->length() == 1);
+
+ VisitForAccumulatorValue(args->at(0));
+
+ __ AssertString(eax);
+
+ Label materialize_true, materialize_false;
+ Label* if_true = NULL;
+ Label* if_false = NULL;
+ Label* fall_through = NULL;
+ context()->PrepareTest(&materialize_true, &materialize_false,
+ &if_true, &if_false, &fall_through);
+
+ __ test(FieldOperand(eax, String::kHashFieldOffset),
+ Immediate(String::kContainsCachedArrayIndexMask));
+ PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
+ Split(zero, if_true, if_false, fall_through);
+
+ context()->Plug(if_true, if_false);
+}
+
+
+void FullCodeGenerator::EmitGetCachedArrayIndex(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
+ ASSERT(args->length() == 1);
+ VisitForAccumulatorValue(args->at(0));
+
+ __ AssertString(eax);
+
+ __ mov(eax, FieldOperand(eax, String::kHashFieldOffset));
+ __ IndexFromHash(eax, eax);
+
+ context()->Plug(eax);
+}
+
+
+void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
+ Label bailout, done, one_char_separator, long_separator,
+ non_trivial_array, not_size_one_array, loop,
+ loop_1, loop_1_condition, loop_2, loop_2_entry, loop_3, loop_3_entry;
+
+ ZoneList<Expression*>* args = expr->arguments();
+ ASSERT(args->length() == 2);
+ // We will leave the separator on the stack until the end of the function.
+ VisitForStackValue(args->at(1));
+ // Load this to eax (= array)
+ VisitForAccumulatorValue(args->at(0));
+ // All aliases of the same register have disjoint lifetimes.
+ Register array = eax;
+ Register elements = no_reg; // Will be eax.
+
+ Register index = edx;
+
+ Register string_length = ecx;
+
+ Register string = esi;
+
+ Register scratch = ebx;
+
+ Register array_length = edi;
+ Register result_pos = no_reg; // Will be edi.
+
+ // Separator operand is already pushed.
+ Operand separator_operand = Operand(esp, 2 * kPointerSize);
+ Operand result_operand = Operand(esp, 1 * kPointerSize);
+ Operand array_length_operand = Operand(esp, 0);
+ __ sub(esp, Immediate(2 * kPointerSize));
+ __ cld();
+ // Check that the array is a JSArray
+ __ JumpIfSmi(array, &bailout);
+ __ CmpObjectType(array, JS_ARRAY_TYPE, scratch);
+ __ j(not_equal, &bailout);
+
+ // Check that the array has fast elements.
+ __ CheckFastElements(scratch, &bailout);
+
+ // If the array has length zero, return the empty string.
+ __ mov(array_length, FieldOperand(array, JSArray::kLengthOffset));
+ __ SmiUntag(array_length);
+ __ j(not_zero, &non_trivial_array);
+ __ mov(result_operand, isolate()->factory()->empty_string());
+ __ jmp(&done);
+
+ // Save the array length.
+ __ bind(&non_trivial_array);
+ __ mov(array_length_operand, array_length);
+
+ // Save the FixedArray containing array's elements.
+ // End of array's live range.
+ elements = array;
+ __ mov(elements, FieldOperand(array, JSArray::kElementsOffset));
+ array = no_reg;
+
+
+ // Check that all array elements are sequential ASCII strings, and
+ // accumulate the sum of their lengths, as a smi-encoded value.
+ __ Move(index, Immediate(0));
+ __ Move(string_length, Immediate(0));
+ // Loop condition: while (index < length).
+ // Live loop registers: index, array_length, string,
+ // scratch, string_length, elements.
+ if (generate_debug_code_) {
+ __ cmp(index, array_length);
+ __ Assert(less, kNoEmptyArraysHereInEmitFastAsciiArrayJoin);
+ }
+ __ bind(&loop);
+ __ mov(string, FieldOperand(elements,
+ index,
+ times_pointer_size,
+ FixedArray::kHeaderSize));
+ __ JumpIfSmi(string, &bailout);
+ __ mov(scratch, FieldOperand(string, HeapObject::kMapOffset));
+ __ movzx_b(scratch, FieldOperand(scratch, Map::kInstanceTypeOffset));
+ __ and_(scratch, Immediate(
+ kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask));
+ __ cmp(scratch, kStringTag | kOneByteStringTag | kSeqStringTag);
+ __ j(not_equal, &bailout);
+ __ add(string_length,
+ FieldOperand(string, SeqOneByteString::kLengthOffset));
+ __ j(overflow, &bailout);
+ __ add(index, Immediate(1));
+ __ cmp(index, array_length);
+ __ j(less, &loop);
+
+ // If array_length is 1, return elements[0], a string.
+ __ cmp(array_length, 1);
+ __ j(not_equal, &not_size_one_array);
+ __ mov(scratch, FieldOperand(elements, FixedArray::kHeaderSize));
+ __ mov(result_operand, scratch);
+ __ jmp(&done);
+
+ __ bind(&not_size_one_array);
+
+ // End of array_length live range.
+ result_pos = array_length;
+ array_length = no_reg;
+
+ // Live registers:
+ // string_length: Sum of string lengths, as a smi.
+ // elements: FixedArray of strings.
+
+ // Check that the separator is a flat ASCII string.
+ __ mov(string, separator_operand);
+ __ JumpIfSmi(string, &bailout);
+ __ mov(scratch, FieldOperand(string, HeapObject::kMapOffset));
+ __ movzx_b(scratch, FieldOperand(scratch, Map::kInstanceTypeOffset));
+ __ and_(scratch, Immediate(
+ kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask));
+ __ cmp(scratch, kStringTag | kOneByteStringTag | kSeqStringTag);
+ __ j(not_equal, &bailout);
+
+ // Add (separator length times array_length) - separator length
+ // to string_length.
+ __ mov(scratch, separator_operand);
+ __ mov(scratch, FieldOperand(scratch, SeqOneByteString::kLengthOffset));
+ __ sub(string_length, scratch); // May be negative, temporarily.
+ __ imul(scratch, array_length_operand);
+ __ j(overflow, &bailout);
+ __ add(string_length, scratch);
+ __ j(overflow, &bailout);
+
+ __ shr(string_length, 1);
+ // Live registers and stack values:
+ // string_length
+ // elements
+ __ AllocateAsciiString(result_pos, string_length, scratch,
+ index, string, &bailout);
+ __ mov(result_operand, result_pos);
+ __ lea(result_pos, FieldOperand(result_pos, SeqOneByteString::kHeaderSize));
+
+
+ __ mov(string, separator_operand);
+ __ cmp(FieldOperand(string, SeqOneByteString::kLengthOffset),
+ Immediate(Smi::FromInt(1)));
+ __ j(equal, &one_char_separator);
+ __ j(greater, &long_separator);
+
+
+ // Empty separator case
+ __ mov(index, Immediate(0));
+ __ jmp(&loop_1_condition);
+ // Loop condition: while (index < length).
+ __ bind(&loop_1);
+ // Each iteration of the loop concatenates one string to the result.
+ // Live values in registers:
+ // index: which element of the elements array we are adding to the result.
+ // result_pos: the position to which we are currently copying characters.
+ // elements: the FixedArray of strings we are joining.
+
+ // Get string = array[index].
+ __ mov(string, FieldOperand(elements, index,
+ times_pointer_size,
+ FixedArray::kHeaderSize));
+ __ mov(string_length,
+ FieldOperand(string, String::kLengthOffset));
+ __ shr(string_length, 1);
+ __ lea(string,
+ FieldOperand(string, SeqOneByteString::kHeaderSize));
+ __ CopyBytes(string, result_pos, string_length, scratch);
+ __ add(index, Immediate(1));
+ __ bind(&loop_1_condition);
+ __ cmp(index, array_length_operand);
+ __ j(less, &loop_1); // End while (index < length).
+ __ jmp(&done);
+
+
+
+ // One-character separator case
+ __ bind(&one_char_separator);
+ // Replace separator with its ASCII character value.
+ __ mov_b(scratch, FieldOperand(string, SeqOneByteString::kHeaderSize));
+ __ mov_b(separator_operand, scratch);
+
+ __ Move(index, Immediate(0));
+ // Jump into the loop after the code that copies the separator, so the first
+ // element is not preceded by a separator
+ __ jmp(&loop_2_entry);
+ // Loop condition: while (index < length).
+ __ bind(&loop_2);
+ // Each iteration of the loop concatenates one string to the result.
+ // Live values in registers:
+ // index: which element of the elements array we are adding to the result.
+ // result_pos: the position to which we are currently copying characters.
+
+ // Copy the separator character to the result.
+ __ mov_b(scratch, separator_operand);
+ __ mov_b(Operand(result_pos, 0), scratch);
+ __ inc(result_pos);
+
+ __ bind(&loop_2_entry);
+ // Get string = array[index].
+ __ mov(string, FieldOperand(elements, index,
+ times_pointer_size,
+ FixedArray::kHeaderSize));
+ __ mov(string_length,
+ FieldOperand(string, String::kLengthOffset));
+ __ shr(string_length, 1);
+ __ lea(string,
+ FieldOperand(string, SeqOneByteString::kHeaderSize));
+ __ CopyBytes(string, result_pos, string_length, scratch);
+ __ add(index, Immediate(1));
+
+ __ cmp(index, array_length_operand);
+ __ j(less, &loop_2); // End while (index < length).
+ __ jmp(&done);
+
+
+ // Long separator case (separator is more than one character).
+ __ bind(&long_separator);
+
+ __ Move(index, Immediate(0));
+ // Jump into the loop after the code that copies the separator, so the first
+ // element is not preceded by a separator
+ __ jmp(&loop_3_entry);
+ // Loop condition: while (index < length).
+ __ bind(&loop_3);
+ // Each iteration of the loop concatenates one string to the result.
+ // Live values in registers:
+ // index: which element of the elements array we are adding to the result.
+ // result_pos: the position to which we are currently copying characters.
+
+ // Copy the separator to the result.
+ __ mov(string, separator_operand);
+ __ mov(string_length,
+ FieldOperand(string, String::kLengthOffset));
+ __ shr(string_length, 1);
+ __ lea(string,
+ FieldOperand(string, SeqOneByteString::kHeaderSize));
+ __ CopyBytes(string, result_pos, string_length, scratch);
+
+ __ bind(&loop_3_entry);
+ // Get string = array[index].
+ __ mov(string, FieldOperand(elements, index,
+ times_pointer_size,
+ FixedArray::kHeaderSize));
+ __ mov(string_length,
+ FieldOperand(string, String::kLengthOffset));
+ __ shr(string_length, 1);
+ __ lea(string,
+ FieldOperand(string, SeqOneByteString::kHeaderSize));
+ __ CopyBytes(string, result_pos, string_length, scratch);
+ __ add(index, Immediate(1));
+
+ __ cmp(index, array_length_operand);
+ __ j(less, &loop_3); // End while (index < length).
+ __ jmp(&done);
+
+
+ __ bind(&bailout);
+ __ mov(result_operand, isolate()->factory()->undefined_value());
+ __ bind(&done);
+ __ mov(eax, result_operand);
+ // Drop temp values from the stack, and restore context register.
+ __ add(esp, Immediate(3 * kPointerSize));
+
+ __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
+ context()->Plug(eax);
+}
+
+
+void FullCodeGenerator::VisitCallRuntime(CallRuntime* expr) {
+ if (expr->function() != NULL &&
+ expr->function()->intrinsic_type == Runtime::INLINE) {
+ Comment cmnt(masm_, "[ InlineRuntimeCall");
+ EmitInlineRuntimeCall(expr);
+ return;
+ }
+
+ Comment cmnt(masm_, "[ CallRuntime");
+ ZoneList<Expression*>* args = expr->arguments();
+
+ if (expr->is_jsruntime()) {
+ // Push the builtins object as receiver.
+ __ mov(eax, GlobalObjectOperand());
+ __ push(FieldOperand(eax, GlobalObject::kBuiltinsOffset));
+
+ // Load the function from the receiver.
+ __ mov(edx, Operand(esp, 0));
+ __ mov(ecx, Immediate(expr->name()));
+ CallLoadIC(NOT_CONTEXTUAL, expr->CallRuntimeFeedbackId());
+
+ // Push the target function under the receiver.
+ __ push(Operand(esp, 0));
+ __ mov(Operand(esp, kPointerSize), eax);
+
+ // Code common for calls using the IC.
+ ZoneList<Expression*>* args = expr->arguments();
+ int arg_count = args->length();
+ for (int i = 0; i < arg_count; i++) {
+ VisitForStackValue(args->at(i));
+ }
+
+ // Record source position of the IC call.
+ SetSourcePosition(expr->position());
+ CallFunctionStub stub(isolate(), arg_count, NO_CALL_FUNCTION_FLAGS);
+ __ mov(edi, Operand(esp, (arg_count + 1) * kPointerSize));
+ __ CallStub(&stub);
+ // Restore context register.
+ __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
+ context()->DropAndPlug(1, eax);
+
+ } else {
+ // Push the arguments ("left-to-right").
+ int arg_count = args->length();
+ for (int i = 0; i < arg_count; i++) {
+ VisitForStackValue(args->at(i));
+ }
+
+ // Call the C runtime function.
+ __ CallRuntime(expr->function(), arg_count);
+
+ context()->Plug(eax);
+ }
+}
+
+
+void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
+ switch (expr->op()) {
+ case Token::DELETE: {
+ Comment cmnt(masm_, "[ UnaryOperation (DELETE)");
+ Property* property = expr->expression()->AsProperty();
+ VariableProxy* proxy = expr->expression()->AsVariableProxy();
+
+ if (property != NULL) {
+ VisitForStackValue(property->obj());
+ VisitForStackValue(property->key());
+ __ push(Immediate(Smi::FromInt(strict_mode())));
+ __ InvokeBuiltin(Builtins::DELETE, CALL_FUNCTION);
+ context()->Plug(eax);
+ } else if (proxy != NULL) {
+ Variable* var = proxy->var();
+ // Delete of an unqualified identifier is disallowed in strict mode
+ // but "delete this" is allowed.
+ ASSERT(strict_mode() == SLOPPY || var->is_this());
+ if (var->IsUnallocated()) {
+ __ push(GlobalObjectOperand());
+ __ push(Immediate(var->name()));
+ __ push(Immediate(Smi::FromInt(SLOPPY)));
+ __ InvokeBuiltin(Builtins::DELETE, CALL_FUNCTION);
+ context()->Plug(eax);
+ } else if (var->IsStackAllocated() || var->IsContextSlot()) {
+ // Result of deleting non-global variables is false. 'this' is
+ // not really a variable, though we implement it as one. The
+ // subexpression does not have side effects.
+ context()->Plug(var->is_this());
+ } else {
+ // Non-global variable. Call the runtime to try to delete from the
+ // context where the variable was introduced.
+ __ push(context_register());
+ __ push(Immediate(var->name()));
+ __ CallRuntime(Runtime::kHiddenDeleteContextSlot, 2);
+ context()->Plug(eax);
+ }
+ } else {
+ // Result of deleting non-property, non-variable reference is true.
+ // The subexpression may have side effects.
+ VisitForEffect(expr->expression());
+ context()->Plug(true);
+ }
+ break;
+ }
+
+ case Token::VOID: {
+ Comment cmnt(masm_, "[ UnaryOperation (VOID)");
+ VisitForEffect(expr->expression());
+ context()->Plug(isolate()->factory()->undefined_value());
+ break;
+ }
+
+ case Token::NOT: {
+ Comment cmnt(masm_, "[ UnaryOperation (NOT)");
+ if (context()->IsEffect()) {
+ // Unary NOT has no side effects so it's only necessary to visit the
+ // subexpression. Match the optimizing compiler by not branching.
+ VisitForEffect(expr->expression());
+ } else if (context()->IsTest()) {
+ const TestContext* test = TestContext::cast(context());
+ // The labels are swapped for the recursive call.
+ VisitForControl(expr->expression(),
+ test->false_label(),
+ test->true_label(),
+ test->fall_through());
+ context()->Plug(test->true_label(), test->false_label());
+ } else {
+ // We handle value contexts explicitly rather than simply visiting
+ // for control and plugging the control flow into the context,
+ // because we need to prepare a pair of extra administrative AST ids
+ // for the optimizing compiler.
+ ASSERT(context()->IsAccumulatorValue() || context()->IsStackValue());
+ Label materialize_true, materialize_false, done;
+ VisitForControl(expr->expression(),
+ &materialize_false,
+ &materialize_true,
+ &materialize_true);
+ __ bind(&materialize_true);
+ PrepareForBailoutForId(expr->MaterializeTrueId(), NO_REGISTERS);
+ if (context()->IsAccumulatorValue()) {
+ __ mov(eax, isolate()->factory()->true_value());
+ } else {
+ __ Push(isolate()->factory()->true_value());
+ }
+ __ jmp(&done, Label::kNear);
+ __ bind(&materialize_false);
+ PrepareForBailoutForId(expr->MaterializeFalseId(), NO_REGISTERS);
+ if (context()->IsAccumulatorValue()) {
+ __ mov(eax, isolate()->factory()->false_value());
+ } else {
+ __ Push(isolate()->factory()->false_value());
+ }
+ __ bind(&done);
+ }
+ break;
+ }
+
+ case Token::TYPEOF: {
+ Comment cmnt(masm_, "[ UnaryOperation (TYPEOF)");
+ { StackValueContext context(this);
+ VisitForTypeofValue(expr->expression());
+ }
+ __ CallRuntime(Runtime::kTypeof, 1);
+ context()->Plug(eax);
+ break;
+ }
+
+ default:
+ UNREACHABLE();
+ }
+}
+
+
+void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
+ ASSERT(expr->expression()->IsValidReferenceExpression());
+
+ Comment cmnt(masm_, "[ CountOperation");
+ SetSourcePosition(expr->position());
+
+ // Expression can only be a property, a global or a (parameter or local)
+ // slot.
+ enum LhsKind { VARIABLE, NAMED_PROPERTY, KEYED_PROPERTY };
+ LhsKind assign_type = VARIABLE;
+ Property* prop = expr->expression()->AsProperty();
+ // In case of a property we use the uninitialized expression context
+ // of the key to detect a named property.
+ if (prop != NULL) {
+ assign_type =
+ (prop->key()->IsPropertyName()) ? NAMED_PROPERTY : KEYED_PROPERTY;
+ }
+
+ // Evaluate expression and get value.
+ if (assign_type == VARIABLE) {
+ ASSERT(expr->expression()->AsVariableProxy()->var() != NULL);
+ AccumulatorValueContext context(this);
+ EmitVariableLoad(expr->expression()->AsVariableProxy());
+ } else {
+ // Reserve space for result of postfix operation.
+ if (expr->is_postfix() && !context()->IsEffect()) {
+ __ push(Immediate(Smi::FromInt(0)));
+ }
+ if (assign_type == NAMED_PROPERTY) {
+ // Put the object both on the stack and in edx.
+ VisitForAccumulatorValue(prop->obj());
+ __ push(eax);
+ __ mov(edx, eax);
+ EmitNamedPropertyLoad(prop);
+ } else {
+ VisitForStackValue(prop->obj());
+ VisitForStackValue(prop->key());
+ __ mov(edx, Operand(esp, kPointerSize)); // Object.
+ __ mov(ecx, Operand(esp, 0)); // Key.
+ EmitKeyedPropertyLoad(prop);
+ }
+ }
+
+ // We need a second deoptimization point after loading the value
+ // in case evaluating the property load my have a side effect.
+ if (assign_type == VARIABLE) {
+ PrepareForBailout(expr->expression(), TOS_REG);
+ } else {
+ PrepareForBailoutForId(prop->LoadId(), TOS_REG);
+ }
+
+ // Inline smi case if we are in a loop.
+ Label done, stub_call;
+ JumpPatchSite patch_site(masm_);
+ if (ShouldInlineSmiCase(expr->op())) {
+ Label slow;
+ patch_site.EmitJumpIfNotSmi(eax, &slow, Label::kNear);
+
+ // Save result for postfix expressions.
+ if (expr->is_postfix()) {
+ if (!context()->IsEffect()) {
+ // Save the result on the stack. If we have a named or keyed property
+ // we store the result under the receiver that is currently on top
+ // of the stack.
+ switch (assign_type) {
+ case VARIABLE:
+ __ push(eax);
+ break;
+ case NAMED_PROPERTY:
+ __ mov(Operand(esp, kPointerSize), eax);
+ break;
+ case KEYED_PROPERTY:
+ __ mov(Operand(esp, 2 * kPointerSize), eax);
+ break;
+ }
+ }
+ }
+
+ if (expr->op() == Token::INC) {
+ __ add(eax, Immediate(Smi::FromInt(1)));
+ } else {
+ __ sub(eax, Immediate(Smi::FromInt(1)));
+ }
+ __ j(no_overflow, &done, Label::kNear);
+ // Call stub. Undo operation first.
+ if (expr->op() == Token::INC) {
+ __ sub(eax, Immediate(Smi::FromInt(1)));
+ } else {
+ __ add(eax, Immediate(Smi::FromInt(1)));
+ }
+ __ jmp(&stub_call, Label::kNear);
+ __ bind(&slow);
+ }
+ ToNumberStub convert_stub(isolate());
+ __ CallStub(&convert_stub);
+
+ // Save result for postfix expressions.
+ if (expr->is_postfix()) {
+ if (!context()->IsEffect()) {
+ // Save the result on the stack. If we have a named or keyed property
+ // we store the result under the receiver that is currently on top
+ // of the stack.
+ switch (assign_type) {
+ case VARIABLE:
+ __ push(eax);
+ break;
+ case NAMED_PROPERTY:
+ __ mov(Operand(esp, kPointerSize), eax);
+ break;
+ case KEYED_PROPERTY:
+ __ mov(Operand(esp, 2 * kPointerSize), eax);
+ break;
+ }
+ }
+ }
+
+ // Record position before stub call.
+ SetSourcePosition(expr->position());
+
+ // Call stub for +1/-1.
+ __ bind(&stub_call);
+ __ mov(edx, eax);
+ __ mov(eax, Immediate(Smi::FromInt(1)));
+ BinaryOpICStub stub(isolate(), expr->binary_op(), NO_OVERWRITE);
+ CallIC(stub.GetCode(), expr->CountBinOpFeedbackId());
+ patch_site.EmitPatchInfo();
+ __ bind(&done);
+
+ // Store the value returned in eax.
+ switch (assign_type) {
+ case VARIABLE:
+ if (expr->is_postfix()) {
+ // Perform the assignment as if via '='.
+ { EffectContext context(this);
+ EmitVariableAssignment(expr->expression()->AsVariableProxy()->var(),
+ Token::ASSIGN);
+ PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
+ context.Plug(eax);
+ }
+ // For all contexts except EffectContext We have the result on
+ // top of the stack.
+ if (!context()->IsEffect()) {
+ context()->PlugTOS();
+ }
+ } else {
+ // Perform the assignment as if via '='.
+ EmitVariableAssignment(expr->expression()->AsVariableProxy()->var(),
+ Token::ASSIGN);
+ PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
+ context()->Plug(eax);
+ }
+ break;
+ case NAMED_PROPERTY: {
+ __ mov(ecx, prop->key()->AsLiteral()->value());
+ __ pop(edx);
+ CallStoreIC(expr->CountStoreFeedbackId());
+ PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
+ if (expr->is_postfix()) {
+ if (!context()->IsEffect()) {
+ context()->PlugTOS();
+ }
+ } else {
+ context()->Plug(eax);
+ }
+ break;
+ }
+ case KEYED_PROPERTY: {
+ __ pop(ecx);
+ __ pop(edx);
+ Handle<Code> ic = strict_mode() == SLOPPY
+ ? isolate()->builtins()->KeyedStoreIC_Initialize()
+ : isolate()->builtins()->KeyedStoreIC_Initialize_Strict();
+ CallIC(ic, expr->CountStoreFeedbackId());
+ PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
+ if (expr->is_postfix()) {
+ // Result is on the stack
+ if (!context()->IsEffect()) {
+ context()->PlugTOS();
+ }
+ } else {
+ context()->Plug(eax);
+ }
+ break;
+ }
+ }
+}
+
+
+void FullCodeGenerator::VisitForTypeofValue(Expression* expr) {
+ VariableProxy* proxy = expr->AsVariableProxy();
+ ASSERT(!context()->IsEffect());
+ ASSERT(!context()->IsTest());
+
+ if (proxy != NULL && proxy->var()->IsUnallocated()) {
+ Comment cmnt(masm_, "[ Global variable");
+ __ mov(edx, GlobalObjectOperand());
+ __ mov(ecx, Immediate(proxy->name()));
+ // Use a regular load, not a contextual load, to avoid a reference
+ // error.
+ CallLoadIC(NOT_CONTEXTUAL);
+ PrepareForBailout(expr, TOS_REG);
+ context()->Plug(eax);
+ } else if (proxy != NULL && proxy->var()->IsLookupSlot()) {
+ Comment cmnt(masm_, "[ Lookup slot");
+ Label done, slow;
+
+ // Generate code for loading from variables potentially shadowed
+ // by eval-introduced variables.
+ EmitDynamicLookupFastCase(proxy->var(), INSIDE_TYPEOF, &slow, &done);
+
+ __ bind(&slow);
+ __ push(esi);
+ __ push(Immediate(proxy->name()));
+ __ CallRuntime(Runtime::kHiddenLoadContextSlotNoReferenceError, 2);
+ PrepareForBailout(expr, TOS_REG);
+ __ bind(&done);
+
+ context()->Plug(eax);
+ } else {
+ // This expression cannot throw a reference error at the top level.
+ VisitInDuplicateContext(expr);
+ }
+}
+
+
+void FullCodeGenerator::EmitLiteralCompareTypeof(Expression* expr,
+ Expression* sub_expr,
+ Handle<String> check) {
+ Label materialize_true, materialize_false;
+ Label* if_true = NULL;
+ Label* if_false = NULL;
+ Label* fall_through = NULL;
+ context()->PrepareTest(&materialize_true, &materialize_false,
+ &if_true, &if_false, &fall_through);
+
+ { AccumulatorValueContext context(this);
+ VisitForTypeofValue(sub_expr);
+ }
+ PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
+
+ Factory* factory = isolate()->factory();
+ if (String::Equals(check, factory->number_string())) {
+ __ JumpIfSmi(eax, if_true);
+ __ cmp(FieldOperand(eax, HeapObject::kMapOffset),
+ isolate()->factory()->heap_number_map());
+ Split(equal, if_true, if_false, fall_through);
+ } else if (String::Equals(check, factory->string_string())) {
+ __ JumpIfSmi(eax, if_false);
+ __ CmpObjectType(eax, FIRST_NONSTRING_TYPE, edx);
+ __ j(above_equal, if_false);
+ // Check for undetectable objects => false.
+ __ test_b(FieldOperand(edx, Map::kBitFieldOffset),
+ 1 << Map::kIsUndetectable);
+ Split(zero, if_true, if_false, fall_through);
+ } else if (String::Equals(check, factory->symbol_string())) {
+ __ JumpIfSmi(eax, if_false);
+ __ CmpObjectType(eax, SYMBOL_TYPE, edx);
+ Split(equal, if_true, if_false, fall_through);
+ } else if (String::Equals(check, factory->boolean_string())) {
+ __ cmp(eax, isolate()->factory()->true_value());
+ __ j(equal, if_true);
+ __ cmp(eax, isolate()->factory()->false_value());
+ Split(equal, if_true, if_false, fall_through);
+ } else if (FLAG_harmony_typeof &&
+ String::Equals(check, factory->null_string())) {
+ __ cmp(eax, isolate()->factory()->null_value());
+ Split(equal, if_true, if_false, fall_through);
+ } else if (String::Equals(check, factory->undefined_string())) {
+ __ cmp(eax, isolate()->factory()->undefined_value());
+ __ j(equal, if_true);
+ __ JumpIfSmi(eax, if_false);
+ // Check for undetectable objects => true.
+ __ mov(edx, FieldOperand(eax, HeapObject::kMapOffset));
+ __ movzx_b(ecx, FieldOperand(edx, Map::kBitFieldOffset));
+ __ test(ecx, Immediate(1 << Map::kIsUndetectable));
+ Split(not_zero, if_true, if_false, fall_through);
+ } else if (String::Equals(check, factory->function_string())) {
+ __ JumpIfSmi(eax, if_false);
+ STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
+ __ CmpObjectType(eax, JS_FUNCTION_TYPE, edx);
+ __ j(equal, if_true);
+ __ CmpInstanceType(edx, JS_FUNCTION_PROXY_TYPE);
+ Split(equal, if_true, if_false, fall_through);
+ } else if (String::Equals(check, factory->object_string())) {
+ __ JumpIfSmi(eax, if_false);
+ if (!FLAG_harmony_typeof) {
+ __ cmp(eax, isolate()->factory()->null_value());
+ __ j(equal, if_true);
+ }
+ __ CmpObjectType(eax, FIRST_NONCALLABLE_SPEC_OBJECT_TYPE, edx);
+ __ j(below, if_false);
+ __ CmpInstanceType(edx, LAST_NONCALLABLE_SPEC_OBJECT_TYPE);
+ __ j(above, if_false);
+ // Check for undetectable objects => false.
+ __ test_b(FieldOperand(edx, Map::kBitFieldOffset),
+ 1 << Map::kIsUndetectable);
+ Split(zero, if_true, if_false, fall_through);
+ } else {
+ if (if_false != fall_through) __ jmp(if_false);
+ }
+ context()->Plug(if_true, if_false);
+}
+
+
+void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
+ Comment cmnt(masm_, "[ CompareOperation");
+ SetSourcePosition(expr->position());
+
+ // First we try a fast inlined version of the compare when one of
+ // the operands is a literal.
+ if (TryLiteralCompare(expr)) return;
+
+ // Always perform the comparison for its control flow. Pack the result
+ // into the expression's context after the comparison is performed.
+ Label materialize_true, materialize_false;
+ Label* if_true = NULL;
+ Label* if_false = NULL;
+ Label* fall_through = NULL;
+ context()->PrepareTest(&materialize_true, &materialize_false,
+ &if_true, &if_false, &fall_through);
+
+ Token::Value op = expr->op();
+ VisitForStackValue(expr->left());
+ switch (op) {
+ case Token::IN:
+ VisitForStackValue(expr->right());
+ __ InvokeBuiltin(Builtins::IN, CALL_FUNCTION);
+ PrepareForBailoutBeforeSplit(expr, false, NULL, NULL);
+ __ cmp(eax, isolate()->factory()->true_value());
+ Split(equal, if_true, if_false, fall_through);
+ break;
+
+ case Token::INSTANCEOF: {
+ VisitForStackValue(expr->right());
+ InstanceofStub stub(isolate(), InstanceofStub::kNoFlags);
+ __ CallStub(&stub);
+ PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
+ __ test(eax, eax);
+ // The stub returns 0 for true.
+ Split(zero, if_true, if_false, fall_through);
+ break;
+ }
+
+ default: {
+ VisitForAccumulatorValue(expr->right());
+ Condition cc = CompareIC::ComputeCondition(op);
+ __ pop(edx);
+
+ bool inline_smi_code = ShouldInlineSmiCase(op);
+ JumpPatchSite patch_site(masm_);
+ if (inline_smi_code) {
+ Label slow_case;
+ __ mov(ecx, edx);
+ __ or_(ecx, eax);
+ patch_site.EmitJumpIfNotSmi(ecx, &slow_case, Label::kNear);
+ __ cmp(edx, eax);
+ Split(cc, if_true, if_false, NULL);
+ __ bind(&slow_case);
+ }
+
+ // Record position and call the compare IC.
+ SetSourcePosition(expr->position());
+ Handle<Code> ic = CompareIC::GetUninitialized(isolate(), op);
+ CallIC(ic, expr->CompareOperationFeedbackId());
+ patch_site.EmitPatchInfo();
+
+ PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
+ __ test(eax, eax);
+ Split(cc, if_true, if_false, fall_through);
+ }
+ }
+
+ // Convert the result of the comparison into one expected for this
+ // expression's context.
+ context()->Plug(if_true, if_false);
+}
+
+
+void FullCodeGenerator::EmitLiteralCompareNil(CompareOperation* expr,
+ Expression* sub_expr,
+ NilValue nil) {
+ Label materialize_true, materialize_false;
+ Label* if_true = NULL;
+ Label* if_false = NULL;
+ Label* fall_through = NULL;
+ context()->PrepareTest(&materialize_true, &materialize_false,
+ &if_true, &if_false, &fall_through);
+
+ VisitForAccumulatorValue(sub_expr);
+ PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
+
+ Handle<Object> nil_value = nil == kNullValue
+ ? isolate()->factory()->null_value()
+ : isolate()->factory()->undefined_value();
+ if (expr->op() == Token::EQ_STRICT) {
+ __ cmp(eax, nil_value);
+ Split(equal, if_true, if_false, fall_through);
+ } else {
+ Handle<Code> ic = CompareNilICStub::GetUninitialized(isolate(), nil);
+ CallIC(ic, expr->CompareOperationFeedbackId());
+ __ test(eax, eax);
+ Split(not_zero, if_true, if_false, fall_through);
+ }
+ context()->Plug(if_true, if_false);
+}
+
+
+void FullCodeGenerator::VisitThisFunction(ThisFunction* expr) {
+ __ mov(eax, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
+ context()->Plug(eax);
+}
+
+
+Register FullCodeGenerator::result_register() {
+ return eax;
+}
+
+
+Register FullCodeGenerator::context_register() {
+ return esi;
+}
+
+
+void FullCodeGenerator::StoreToFrameField(int frame_offset, Register value) {
+ ASSERT_EQ(POINTER_SIZE_ALIGN(frame_offset), frame_offset);
+ __ mov(Operand(ebp, frame_offset), value);
+}
+
+
+void FullCodeGenerator::LoadContextField(Register dst, int context_index) {
+ __ mov(dst, ContextOperand(esi, context_index));
+}
+
+
+void FullCodeGenerator::PushFunctionArgumentForContextAllocation() {
+ Scope* declaration_scope = scope()->DeclarationScope();
+ if (declaration_scope->is_global_scope() ||
+ declaration_scope->is_module_scope()) {
+ // Contexts nested in the native context have a canonical empty function
+ // as their closure, not the anonymous closure containing the global
+ // code. Pass a smi sentinel and let the runtime look up the empty
+ // function.
+ __ push(Immediate(Smi::FromInt(0)));
+ } else if (declaration_scope->is_eval_scope()) {
+ // Contexts nested inside eval code have the same closure as the context
+ // calling eval, not the anonymous closure containing the eval code.
+ // Fetch it from the context.
+ __ push(ContextOperand(esi, Context::CLOSURE_INDEX));
+ } else {
+ ASSERT(declaration_scope->is_function_scope());
+ __ push(Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
+ }
+}
+
+
+// ----------------------------------------------------------------------------
+// Non-local control flow support.
+
+void FullCodeGenerator::EnterFinallyBlock() {
+ // Cook return address on top of stack (smi encoded Code* delta)
+ ASSERT(!result_register().is(edx));
+ __ pop(edx);
+ __ sub(edx, Immediate(masm_->CodeObject()));
+ STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1);
+ STATIC_ASSERT(kSmiTag == 0);
+ __ SmiTag(edx);
+ __ push(edx);
+
+ // Store result register while executing finally block.
+ __ push(result_register());
+
+ // Store pending message while executing finally block.
+ ExternalReference pending_message_obj =
+ ExternalReference::address_of_pending_message_obj(isolate());
+ __ mov(edx, Operand::StaticVariable(pending_message_obj));
+ __ push(edx);
+
+ ExternalReference has_pending_message =
+ ExternalReference::address_of_has_pending_message(isolate());
+ __ mov(edx, Operand::StaticVariable(has_pending_message));
+ __ SmiTag(edx);
+ __ push(edx);
+
+ ExternalReference pending_message_script =
+ ExternalReference::address_of_pending_message_script(isolate());
+ __ mov(edx, Operand::StaticVariable(pending_message_script));
+ __ push(edx);
+}
+
+
+void FullCodeGenerator::ExitFinallyBlock() {
+ ASSERT(!result_register().is(edx));
+ // Restore pending message from stack.
+ __ pop(edx);
+ ExternalReference pending_message_script =
+ ExternalReference::address_of_pending_message_script(isolate());
+ __ mov(Operand::StaticVariable(pending_message_script), edx);
+
+ __ pop(edx);
+ __ SmiUntag(edx);
+ ExternalReference has_pending_message =
+ ExternalReference::address_of_has_pending_message(isolate());
+ __ mov(Operand::StaticVariable(has_pending_message), edx);
+
+ __ pop(edx);
+ ExternalReference pending_message_obj =
+ ExternalReference::address_of_pending_message_obj(isolate());
+ __ mov(Operand::StaticVariable(pending_message_obj), edx);
+
+ // Restore result register from stack.
+ __ pop(result_register());
+
+ // Uncook return address.
+ __ pop(edx);
+ __ SmiUntag(edx);
+ __ add(edx, Immediate(masm_->CodeObject()));
+ __ jmp(edx);
+}
+
+
+#undef __
+
+#define __ ACCESS_MASM(masm())
+
+FullCodeGenerator::NestedStatement* FullCodeGenerator::TryFinally::Exit(
+ int* stack_depth,
+ int* context_length) {
+ // The macros used here must preserve the result register.
+
+ // Because the handler block contains the context of the finally
+ // code, we can restore it directly from there for the finally code
+ // rather than iteratively unwinding contexts via their previous
+ // links.
+ __ Drop(*stack_depth); // Down to the handler block.
+ if (*context_length > 0) {
+ // Restore the context to its dedicated register and the stack.
+ __ mov(esi, Operand(esp, StackHandlerConstants::kContextOffset));
+ __ mov(Operand(ebp, StandardFrameConstants::kContextOffset), esi);
+ }
+ __ PopTryHandler();
+ __ call(finally_entry_);
+
+ *stack_depth = 0;
+ *context_length = 0;
+ return previous_;
+}
+
+#undef __
+
+
+static const byte kJnsInstruction = 0x79;
+static const byte kJnsOffset = 0x11;
+static const byte kNopByteOne = 0x66;
+static const byte kNopByteTwo = 0x90;
+#ifdef DEBUG
+static const byte kCallInstruction = 0xe8;
+#endif
+
+
+void BackEdgeTable::PatchAt(Code* unoptimized_code,
+ Address pc,
+ BackEdgeState target_state,
+ Code* replacement_code) {
+ Address call_target_address = pc - kIntSize;
+ Address jns_instr_address = call_target_address - 3;
+ Address jns_offset_address = call_target_address - 2;
+
+ switch (target_state) {
+ case INTERRUPT:
+ // sub <profiling_counter>, <delta> ;; Not changed
+ // jns ok
+ // call <interrupt stub>
+ // ok:
+ *jns_instr_address = kJnsInstruction;
+ *jns_offset_address = kJnsOffset;
+ break;
+ case ON_STACK_REPLACEMENT:
+ case OSR_AFTER_STACK_CHECK:
+ // sub <profiling_counter>, <delta> ;; Not changed
+ // nop
+ // nop
+ // call <on-stack replacment>
+ // ok:
+ *jns_instr_address = kNopByteOne;
+ *jns_offset_address = kNopByteTwo;
+ break;
+ }
+
+ Assembler::set_target_address_at(call_target_address,
+ unoptimized_code,
+ replacement_code->entry());
+ unoptimized_code->GetHeap()->incremental_marking()->RecordCodeTargetPatch(
+ unoptimized_code, call_target_address, replacement_code);
+}
+
+
+BackEdgeTable::BackEdgeState BackEdgeTable::GetBackEdgeState(
+ Isolate* isolate,
+ Code* unoptimized_code,
+ Address pc) {
+ Address call_target_address = pc - kIntSize;
+ Address jns_instr_address = call_target_address - 3;
+ ASSERT_EQ(kCallInstruction, *(call_target_address - 1));
+
+ if (*jns_instr_address == kJnsInstruction) {
+ ASSERT_EQ(kJnsOffset, *(call_target_address - 2));
+ ASSERT_EQ(isolate->builtins()->InterruptCheck()->entry(),
+ Assembler::target_address_at(call_target_address,
+ unoptimized_code));
+ return INTERRUPT;
+ }
+
+ ASSERT_EQ(kNopByteOne, *jns_instr_address);
+ ASSERT_EQ(kNopByteTwo, *(call_target_address - 2));
+
+ if (Assembler::target_address_at(call_target_address, unoptimized_code) ==
+ isolate->builtins()->OnStackReplacement()->entry()) {
+ return ON_STACK_REPLACEMENT;
+ }
+
+ ASSERT_EQ(isolate->builtins()->OsrAfterStackCheck()->entry(),
+ Assembler::target_address_at(call_target_address,
+ unoptimized_code));
+ return OSR_AFTER_STACK_CHECK;
+}
+
+
+} } // namespace v8::internal
+
+#endif // V8_TARGET_ARCH_X87
diff --git a/chromium/v8/src/x87/ic-x87.cc b/chromium/v8/src/x87/ic-x87.cc
new file mode 100644
index 00000000000..6cd9ac4e683
--- /dev/null
+++ b/chromium/v8/src/x87/ic-x87.cc
@@ -0,0 +1,1290 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#if V8_TARGET_ARCH_X87
+
+#include "src/codegen.h"
+#include "src/ic-inl.h"
+#include "src/runtime.h"
+#include "src/stub-cache.h"
+
+namespace v8 {
+namespace internal {
+
+// ----------------------------------------------------------------------------
+// Static IC stub generators.
+//
+
+#define __ ACCESS_MASM(masm)
+
+
+static void GenerateGlobalInstanceTypeCheck(MacroAssembler* masm,
+ Register type,
+ Label* global_object) {
+ // Register usage:
+ // type: holds the receiver instance type on entry.
+ __ cmp(type, JS_GLOBAL_OBJECT_TYPE);
+ __ j(equal, global_object);
+ __ cmp(type, JS_BUILTINS_OBJECT_TYPE);
+ __ j(equal, global_object);
+ __ cmp(type, JS_GLOBAL_PROXY_TYPE);
+ __ j(equal, global_object);
+}
+
+
+// Generated code falls through if the receiver is a regular non-global
+// JS object with slow properties and no interceptors.
+static void GenerateNameDictionaryReceiverCheck(MacroAssembler* masm,
+ Register receiver,
+ Register r0,
+ Register r1,
+ Label* miss) {
+ // Register usage:
+ // receiver: holds the receiver on entry and is unchanged.
+ // r0: used to hold receiver instance type.
+ // Holds the property dictionary on fall through.
+ // r1: used to hold receivers map.
+
+ // Check that the receiver isn't a smi.
+ __ JumpIfSmi(receiver, miss);
+
+ // Check that the receiver is a valid JS object.
+ __ mov(r1, FieldOperand(receiver, HeapObject::kMapOffset));
+ __ movzx_b(r0, FieldOperand(r1, Map::kInstanceTypeOffset));
+ __ cmp(r0, FIRST_SPEC_OBJECT_TYPE);
+ __ j(below, miss);
+
+ // If this assert fails, we have to check upper bound too.
+ STATIC_ASSERT(LAST_TYPE == LAST_SPEC_OBJECT_TYPE);
+
+ GenerateGlobalInstanceTypeCheck(masm, r0, miss);
+
+ // Check for non-global object that requires access check.
+ __ test_b(FieldOperand(r1, Map::kBitFieldOffset),
+ (1 << Map::kIsAccessCheckNeeded) |
+ (1 << Map::kHasNamedInterceptor));
+ __ j(not_zero, miss);
+
+ __ mov(r0, FieldOperand(receiver, JSObject::kPropertiesOffset));
+ __ CheckMap(r0, masm->isolate()->factory()->hash_table_map(), miss,
+ DONT_DO_SMI_CHECK);
+}
+
+
+// Helper function used to load a property from a dictionary backing
+// storage. This function may fail to load a property even though it is
+// in the dictionary, so code at miss_label must always call a backup
+// property load that is complete. This function is safe to call if
+// name is not internalized, and will jump to the miss_label in that
+// case. The generated code assumes that the receiver has slow
+// properties, is not a global object and does not have interceptors.
+static void GenerateDictionaryLoad(MacroAssembler* masm,
+ Label* miss_label,
+ Register elements,
+ Register name,
+ Register r0,
+ Register r1,
+ Register result) {
+ // Register use:
+ //
+ // elements - holds the property dictionary on entry and is unchanged.
+ //
+ // name - holds the name of the property on entry and is unchanged.
+ //
+ // Scratch registers:
+ //
+ // r0 - used for the index into the property dictionary
+ //
+ // r1 - used to hold the capacity of the property dictionary.
+ //
+ // result - holds the result on exit.
+
+ Label done;
+
+ // Probe the dictionary.
+ NameDictionaryLookupStub::GeneratePositiveLookup(masm,
+ miss_label,
+ &done,
+ elements,
+ name,
+ r0,
+ r1);
+
+ // If probing finds an entry in the dictionary, r0 contains the
+ // index into the dictionary. Check that the value is a normal
+ // property.
+ __ bind(&done);
+ const int kElementsStartOffset =
+ NameDictionary::kHeaderSize +
+ NameDictionary::kElementsStartIndex * kPointerSize;
+ const int kDetailsOffset = kElementsStartOffset + 2 * kPointerSize;
+ __ test(Operand(elements, r0, times_4, kDetailsOffset - kHeapObjectTag),
+ Immediate(PropertyDetails::TypeField::kMask << kSmiTagSize));
+ __ j(not_zero, miss_label);
+
+ // Get the value at the masked, scaled index.
+ const int kValueOffset = kElementsStartOffset + kPointerSize;
+ __ mov(result, Operand(elements, r0, times_4, kValueOffset - kHeapObjectTag));
+}
+
+
+// Helper function used to store a property to a dictionary backing
+// storage. This function may fail to store a property eventhough it
+// is in the dictionary, so code at miss_label must always call a
+// backup property store that is complete. This function is safe to
+// call if name is not internalized, and will jump to the miss_label in
+// that case. The generated code assumes that the receiver has slow
+// properties, is not a global object and does not have interceptors.
+static void GenerateDictionaryStore(MacroAssembler* masm,
+ Label* miss_label,
+ Register elements,
+ Register name,
+ Register value,
+ Register r0,
+ Register r1) {
+ // Register use:
+ //
+ // elements - holds the property dictionary on entry and is clobbered.
+ //
+ // name - holds the name of the property on entry and is unchanged.
+ //
+ // value - holds the value to store and is unchanged.
+ //
+ // r0 - used for index into the property dictionary and is clobbered.
+ //
+ // r1 - used to hold the capacity of the property dictionary and is clobbered.
+ Label done;
+
+
+ // Probe the dictionary.
+ NameDictionaryLookupStub::GeneratePositiveLookup(masm,
+ miss_label,
+ &done,
+ elements,
+ name,
+ r0,
+ r1);
+
+ // If probing finds an entry in the dictionary, r0 contains the
+ // index into the dictionary. Check that the value is a normal
+ // property that is not read only.
+ __ bind(&done);
+ const int kElementsStartOffset =
+ NameDictionary::kHeaderSize +
+ NameDictionary::kElementsStartIndex * kPointerSize;
+ const int kDetailsOffset = kElementsStartOffset + 2 * kPointerSize;
+ const int kTypeAndReadOnlyMask =
+ (PropertyDetails::TypeField::kMask |
+ PropertyDetails::AttributesField::encode(READ_ONLY)) << kSmiTagSize;
+ __ test(Operand(elements, r0, times_4, kDetailsOffset - kHeapObjectTag),
+ Immediate(kTypeAndReadOnlyMask));
+ __ j(not_zero, miss_label);
+
+ // Store the value at the masked, scaled index.
+ const int kValueOffset = kElementsStartOffset + kPointerSize;
+ __ lea(r0, Operand(elements, r0, times_4, kValueOffset - kHeapObjectTag));
+ __ mov(Operand(r0, 0), value);
+
+ // Update write barrier. Make sure not to clobber the value.
+ __ mov(r1, value);
+ __ RecordWrite(elements, r0, r1);
+}
+
+
+// Checks the receiver for special cases (value type, slow case bits).
+// Falls through for regular JS object.
+static void GenerateKeyedLoadReceiverCheck(MacroAssembler* masm,
+ Register receiver,
+ Register map,
+ int interceptor_bit,
+ Label* slow) {
+ // Register use:
+ // receiver - holds the receiver and is unchanged.
+ // Scratch registers:
+ // map - used to hold the map of the receiver.
+
+ // Check that the object isn't a smi.
+ __ JumpIfSmi(receiver, slow);
+
+ // Get the map of the receiver.
+ __ mov(map, FieldOperand(receiver, HeapObject::kMapOffset));
+
+ // Check bit field.
+ __ test_b(FieldOperand(map, Map::kBitFieldOffset),
+ (1 << Map::kIsAccessCheckNeeded) | (1 << interceptor_bit));
+ __ j(not_zero, slow);
+ // Check that the object is some kind of JS object EXCEPT JS Value type.
+ // In the case that the object is a value-wrapper object,
+ // we enter the runtime system to make sure that indexing
+ // into string objects works as intended.
+ ASSERT(JS_OBJECT_TYPE > JS_VALUE_TYPE);
+
+ __ CmpInstanceType(map, JS_OBJECT_TYPE);
+ __ j(below, slow);
+}
+
+
+// Loads an indexed element from a fast case array.
+// If not_fast_array is NULL, doesn't perform the elements map check.
+static void GenerateFastArrayLoad(MacroAssembler* masm,
+ Register receiver,
+ Register key,
+ Register scratch,
+ Register result,
+ Label* not_fast_array,
+ Label* out_of_range) {
+ // Register use:
+ // receiver - holds the receiver and is unchanged.
+ // key - holds the key and is unchanged (must be a smi).
+ // Scratch registers:
+ // scratch - used to hold elements of the receiver and the loaded value.
+ // result - holds the result on exit if the load succeeds and
+ // we fall through.
+
+ __ mov(scratch, FieldOperand(receiver, JSObject::kElementsOffset));
+ if (not_fast_array != NULL) {
+ // Check that the object is in fast mode and writable.
+ __ CheckMap(scratch,
+ masm->isolate()->factory()->fixed_array_map(),
+ not_fast_array,
+ DONT_DO_SMI_CHECK);
+ } else {
+ __ AssertFastElements(scratch);
+ }
+ // Check that the key (index) is within bounds.
+ __ cmp(key, FieldOperand(scratch, FixedArray::kLengthOffset));
+ __ j(above_equal, out_of_range);
+ // Fast case: Do the load.
+ STATIC_ASSERT((kPointerSize == 4) && (kSmiTagSize == 1) && (kSmiTag == 0));
+ __ mov(scratch, FieldOperand(scratch, key, times_2, FixedArray::kHeaderSize));
+ __ cmp(scratch, Immediate(masm->isolate()->factory()->the_hole_value()));
+ // In case the loaded value is the_hole we have to consult GetProperty
+ // to ensure the prototype chain is searched.
+ __ j(equal, out_of_range);
+ if (!result.is(scratch)) {
+ __ mov(result, scratch);
+ }
+}
+
+
+// Checks whether a key is an array index string or a unique name.
+// Falls through if the key is a unique name.
+static void GenerateKeyNameCheck(MacroAssembler* masm,
+ Register key,
+ Register map,
+ Register hash,
+ Label* index_string,
+ Label* not_unique) {
+ // Register use:
+ // key - holds the key and is unchanged. Assumed to be non-smi.
+ // Scratch registers:
+ // map - used to hold the map of the key.
+ // hash - used to hold the hash of the key.
+ Label unique;
+ __ CmpObjectType(key, LAST_UNIQUE_NAME_TYPE, map);
+ __ j(above, not_unique);
+ STATIC_ASSERT(LAST_UNIQUE_NAME_TYPE == FIRST_NONSTRING_TYPE);
+ __ j(equal, &unique);
+
+ // Is the string an array index, with cached numeric value?
+ __ mov(hash, FieldOperand(key, Name::kHashFieldOffset));
+ __ test(hash, Immediate(Name::kContainsCachedArrayIndexMask));
+ __ j(zero, index_string);
+
+ // Is the string internalized? We already know it's a string so a single
+ // bit test is enough.
+ STATIC_ASSERT(kNotInternalizedTag != 0);
+ __ test_b(FieldOperand(map, Map::kInstanceTypeOffset),
+ kIsNotInternalizedMask);
+ __ j(not_zero, not_unique);
+
+ __ bind(&unique);
+}
+
+
+static Operand GenerateMappedArgumentsLookup(MacroAssembler* masm,
+ Register object,
+ Register key,
+ Register scratch1,
+ Register scratch2,
+ Label* unmapped_case,
+ Label* slow_case) {
+ Heap* heap = masm->isolate()->heap();
+ Factory* factory = masm->isolate()->factory();
+
+ // Check that the receiver is a JSObject. Because of the elements
+ // map check later, we do not need to check for interceptors or
+ // whether it requires access checks.
+ __ JumpIfSmi(object, slow_case);
+ // Check that the object is some kind of JSObject.
+ __ CmpObjectType(object, FIRST_JS_RECEIVER_TYPE, scratch1);
+ __ j(below, slow_case);
+
+ // Check that the key is a positive smi.
+ __ test(key, Immediate(0x80000001));
+ __ j(not_zero, slow_case);
+
+ // Load the elements into scratch1 and check its map.
+ Handle<Map> arguments_map(heap->sloppy_arguments_elements_map());
+ __ mov(scratch1, FieldOperand(object, JSObject::kElementsOffset));
+ __ CheckMap(scratch1, arguments_map, slow_case, DONT_DO_SMI_CHECK);
+
+ // Check if element is in the range of mapped arguments. If not, jump
+ // to the unmapped lookup with the parameter map in scratch1.
+ __ mov(scratch2, FieldOperand(scratch1, FixedArray::kLengthOffset));
+ __ sub(scratch2, Immediate(Smi::FromInt(2)));
+ __ cmp(key, scratch2);
+ __ j(above_equal, unmapped_case);
+
+ // Load element index and check whether it is the hole.
+ const int kHeaderSize = FixedArray::kHeaderSize + 2 * kPointerSize;
+ __ mov(scratch2, FieldOperand(scratch1,
+ key,
+ times_half_pointer_size,
+ kHeaderSize));
+ __ cmp(scratch2, factory->the_hole_value());
+ __ j(equal, unmapped_case);
+
+ // Load value from context and return it. We can reuse scratch1 because
+ // we do not jump to the unmapped lookup (which requires the parameter
+ // map in scratch1).
+ const int kContextOffset = FixedArray::kHeaderSize;
+ __ mov(scratch1, FieldOperand(scratch1, kContextOffset));
+ return FieldOperand(scratch1,
+ scratch2,
+ times_half_pointer_size,
+ Context::kHeaderSize);
+}
+
+
+static Operand GenerateUnmappedArgumentsLookup(MacroAssembler* masm,
+ Register key,
+ Register parameter_map,
+ Register scratch,
+ Label* slow_case) {
+ // Element is in arguments backing store, which is referenced by the
+ // second element of the parameter_map.
+ const int kBackingStoreOffset = FixedArray::kHeaderSize + kPointerSize;
+ Register backing_store = parameter_map;
+ __ mov(backing_store, FieldOperand(parameter_map, kBackingStoreOffset));
+ Handle<Map> fixed_array_map(masm->isolate()->heap()->fixed_array_map());
+ __ CheckMap(backing_store, fixed_array_map, slow_case, DONT_DO_SMI_CHECK);
+ __ mov(scratch, FieldOperand(backing_store, FixedArray::kLengthOffset));
+ __ cmp(key, scratch);
+ __ j(greater_equal, slow_case);
+ return FieldOperand(backing_store,
+ key,
+ times_half_pointer_size,
+ FixedArray::kHeaderSize);
+}
+
+
+void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- ecx : key
+ // -- edx : receiver
+ // -- esp[0] : return address
+ // -----------------------------------
+ Label slow, check_name, index_smi, index_name, property_array_property;
+ Label probe_dictionary, check_number_dictionary;
+
+ // Check that the key is a smi.
+ __ JumpIfNotSmi(ecx, &check_name);
+ __ bind(&index_smi);
+ // Now the key is known to be a smi. This place is also jumped to from
+ // where a numeric string is converted to a smi.
+
+ GenerateKeyedLoadReceiverCheck(
+ masm, edx, eax, Map::kHasIndexedInterceptor, &slow);
+
+ // Check the receiver's map to see if it has fast elements.
+ __ CheckFastElements(eax, &check_number_dictionary);
+
+ GenerateFastArrayLoad(masm, edx, ecx, eax, eax, NULL, &slow);
+ Isolate* isolate = masm->isolate();
+ Counters* counters = isolate->counters();
+ __ IncrementCounter(counters->keyed_load_generic_smi(), 1);
+ __ ret(0);
+
+ __ bind(&check_number_dictionary);
+ __ mov(ebx, ecx);
+ __ SmiUntag(ebx);
+ __ mov(eax, FieldOperand(edx, JSObject::kElementsOffset));
+
+ // Check whether the elements is a number dictionary.
+ // edx: receiver
+ // ebx: untagged index
+ // ecx: key
+ // eax: elements
+ __ CheckMap(eax,
+ isolate->factory()->hash_table_map(),
+ &slow,
+ DONT_DO_SMI_CHECK);
+ Label slow_pop_receiver;
+ // Push receiver on the stack to free up a register for the dictionary
+ // probing.
+ __ push(edx);
+ __ LoadFromNumberDictionary(&slow_pop_receiver, eax, ecx, ebx, edx, edi, eax);
+ // Pop receiver before returning.
+ __ pop(edx);
+ __ ret(0);
+
+ __ bind(&slow_pop_receiver);
+ // Pop the receiver from the stack and jump to runtime.
+ __ pop(edx);
+
+ __ bind(&slow);
+ // Slow case: jump to runtime.
+ // edx: receiver
+ // ecx: key
+ __ IncrementCounter(counters->keyed_load_generic_slow(), 1);
+ GenerateRuntimeGetProperty(masm);
+
+ __ bind(&check_name);
+ GenerateKeyNameCheck(masm, ecx, eax, ebx, &index_name, &slow);
+
+ GenerateKeyedLoadReceiverCheck(
+ masm, edx, eax, Map::kHasNamedInterceptor, &slow);
+
+ // If the receiver is a fast-case object, check the keyed lookup
+ // cache. Otherwise probe the dictionary.
+ __ mov(ebx, FieldOperand(edx, JSObject::kPropertiesOffset));
+ __ cmp(FieldOperand(ebx, HeapObject::kMapOffset),
+ Immediate(isolate->factory()->hash_table_map()));
+ __ j(equal, &probe_dictionary);
+
+ // The receiver's map is still in eax, compute the keyed lookup cache hash
+ // based on 32 bits of the map pointer and the string hash.
+ if (FLAG_debug_code) {
+ __ cmp(eax, FieldOperand(edx, HeapObject::kMapOffset));
+ __ Check(equal, kMapIsNoLongerInEax);
+ }
+ __ mov(ebx, eax); // Keep the map around for later.
+ __ shr(eax, KeyedLookupCache::kMapHashShift);
+ __ mov(edi, FieldOperand(ecx, String::kHashFieldOffset));
+ __ shr(edi, String::kHashShift);
+ __ xor_(eax, edi);
+ __ and_(eax, KeyedLookupCache::kCapacityMask & KeyedLookupCache::kHashMask);
+
+ // Load the key (consisting of map and internalized string) from the cache and
+ // check for match.
+ Label load_in_object_property;
+ static const int kEntriesPerBucket = KeyedLookupCache::kEntriesPerBucket;
+ Label hit_on_nth_entry[kEntriesPerBucket];
+ ExternalReference cache_keys =
+ ExternalReference::keyed_lookup_cache_keys(masm->isolate());
+
+ for (int i = 0; i < kEntriesPerBucket - 1; i++) {
+ Label try_next_entry;
+ __ mov(edi, eax);
+ __ shl(edi, kPointerSizeLog2 + 1);
+ if (i != 0) {
+ __ add(edi, Immediate(kPointerSize * i * 2));
+ }
+ __ cmp(ebx, Operand::StaticArray(edi, times_1, cache_keys));
+ __ j(not_equal, &try_next_entry);
+ __ add(edi, Immediate(kPointerSize));
+ __ cmp(ecx, Operand::StaticArray(edi, times_1, cache_keys));
+ __ j(equal, &hit_on_nth_entry[i]);
+ __ bind(&try_next_entry);
+ }
+
+ __ lea(edi, Operand(eax, 1));
+ __ shl(edi, kPointerSizeLog2 + 1);
+ __ add(edi, Immediate(kPointerSize * (kEntriesPerBucket - 1) * 2));
+ __ cmp(ebx, Operand::StaticArray(edi, times_1, cache_keys));
+ __ j(not_equal, &slow);
+ __ add(edi, Immediate(kPointerSize));
+ __ cmp(ecx, Operand::StaticArray(edi, times_1, cache_keys));
+ __ j(not_equal, &slow);
+
+ // Get field offset.
+ // edx : receiver
+ // ebx : receiver's map
+ // ecx : key
+ // eax : lookup cache index
+ ExternalReference cache_field_offsets =
+ ExternalReference::keyed_lookup_cache_field_offsets(masm->isolate());
+
+ // Hit on nth entry.
+ for (int i = kEntriesPerBucket - 1; i >= 0; i--) {
+ __ bind(&hit_on_nth_entry[i]);
+ if (i != 0) {
+ __ add(eax, Immediate(i));
+ }
+ __ mov(edi,
+ Operand::StaticArray(eax, times_pointer_size, cache_field_offsets));
+ __ movzx_b(eax, FieldOperand(ebx, Map::kInObjectPropertiesOffset));
+ __ sub(edi, eax);
+ __ j(above_equal, &property_array_property);
+ if (i != 0) {
+ __ jmp(&load_in_object_property);
+ }
+ }
+
+ // Load in-object property.
+ __ bind(&load_in_object_property);
+ __ movzx_b(eax, FieldOperand(ebx, Map::kInstanceSizeOffset));
+ __ add(eax, edi);
+ __ mov(eax, FieldOperand(edx, eax, times_pointer_size, 0));
+ __ IncrementCounter(counters->keyed_load_generic_lookup_cache(), 1);
+ __ ret(0);
+
+ // Load property array property.
+ __ bind(&property_array_property);
+ __ mov(eax, FieldOperand(edx, JSObject::kPropertiesOffset));
+ __ mov(eax, FieldOperand(eax, edi, times_pointer_size,
+ FixedArray::kHeaderSize));
+ __ IncrementCounter(counters->keyed_load_generic_lookup_cache(), 1);
+ __ ret(0);
+
+ // Do a quick inline probe of the receiver's dictionary, if it
+ // exists.
+ __ bind(&probe_dictionary);
+
+ __ mov(eax, FieldOperand(edx, JSObject::kMapOffset));
+ __ movzx_b(eax, FieldOperand(eax, Map::kInstanceTypeOffset));
+ GenerateGlobalInstanceTypeCheck(masm, eax, &slow);
+
+ GenerateDictionaryLoad(masm, &slow, ebx, ecx, eax, edi, eax);
+ __ IncrementCounter(counters->keyed_load_generic_symbol(), 1);
+ __ ret(0);
+
+ __ bind(&index_name);
+ __ IndexFromHash(ebx, ecx);
+ // Now jump to the place where smi keys are handled.
+ __ jmp(&index_smi);
+}
+
+
+void KeyedLoadIC::GenerateString(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- ecx : key (index)
+ // -- edx : receiver
+ // -- esp[0] : return address
+ // -----------------------------------
+ Label miss;
+
+ Register receiver = edx;
+ Register index = ecx;
+ Register scratch = ebx;
+ Register result = eax;
+
+ StringCharAtGenerator char_at_generator(receiver,
+ index,
+ scratch,
+ result,
+ &miss, // When not a string.
+ &miss, // When not a number.
+ &miss, // When index out of range.
+ STRING_INDEX_IS_ARRAY_INDEX);
+ char_at_generator.GenerateFast(masm);
+ __ ret(0);
+
+ StubRuntimeCallHelper call_helper;
+ char_at_generator.GenerateSlow(masm, call_helper);
+
+ __ bind(&miss);
+ GenerateMiss(masm);
+}
+
+
+void KeyedLoadIC::GenerateIndexedInterceptor(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- ecx : key
+ // -- edx : receiver
+ // -- esp[0] : return address
+ // -----------------------------------
+ Label slow;
+
+ // Check that the receiver isn't a smi.
+ __ JumpIfSmi(edx, &slow);
+
+ // Check that the key is an array index, that is Uint32.
+ __ test(ecx, Immediate(kSmiTagMask | kSmiSignMask));
+ __ j(not_zero, &slow);
+
+ // Get the map of the receiver.
+ __ mov(eax, FieldOperand(edx, HeapObject::kMapOffset));
+
+ // Check that it has indexed interceptor and access checks
+ // are not enabled for this object.
+ __ movzx_b(eax, FieldOperand(eax, Map::kBitFieldOffset));
+ __ and_(eax, Immediate(kSlowCaseBitFieldMask));
+ __ cmp(eax, Immediate(1 << Map::kHasIndexedInterceptor));
+ __ j(not_zero, &slow);
+
+ // Everything is fine, call runtime.
+ __ pop(eax);
+ __ push(edx); // receiver
+ __ push(ecx); // key
+ __ push(eax); // return address
+
+ // Perform tail call to the entry.
+ ExternalReference ref =
+ ExternalReference(IC_Utility(kKeyedLoadPropertyWithInterceptor),
+ masm->isolate());
+ __ TailCallExternalReference(ref, 2, 1);
+
+ __ bind(&slow);
+ GenerateMiss(masm);
+}
+
+
+void KeyedLoadIC::GenerateSloppyArguments(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- ecx : key
+ // -- edx : receiver
+ // -- esp[0] : return address
+ // -----------------------------------
+ Label slow, notin;
+ Factory* factory = masm->isolate()->factory();
+ Operand mapped_location =
+ GenerateMappedArgumentsLookup(masm, edx, ecx, ebx, eax, &notin, &slow);
+ __ mov(eax, mapped_location);
+ __ Ret();
+ __ bind(&notin);
+ // The unmapped lookup expects that the parameter map is in ebx.
+ Operand unmapped_location =
+ GenerateUnmappedArgumentsLookup(masm, ecx, ebx, eax, &slow);
+ __ cmp(unmapped_location, factory->the_hole_value());
+ __ j(equal, &slow);
+ __ mov(eax, unmapped_location);
+ __ Ret();
+ __ bind(&slow);
+ GenerateMiss(masm);
+}
+
+
+void KeyedStoreIC::GenerateSloppyArguments(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- eax : value
+ // -- ecx : key
+ // -- edx : receiver
+ // -- esp[0] : return address
+ // -----------------------------------
+ Label slow, notin;
+ Operand mapped_location =
+ GenerateMappedArgumentsLookup(masm, edx, ecx, ebx, edi, &notin, &slow);
+ __ mov(mapped_location, eax);
+ __ lea(ecx, mapped_location);
+ __ mov(edx, eax);
+ __ RecordWrite(ebx, ecx, edx);
+ __ Ret();
+ __ bind(&notin);
+ // The unmapped lookup expects that the parameter map is in ebx.
+ Operand unmapped_location =
+ GenerateUnmappedArgumentsLookup(masm, ecx, ebx, edi, &slow);
+ __ mov(unmapped_location, eax);
+ __ lea(edi, unmapped_location);
+ __ mov(edx, eax);
+ __ RecordWrite(ebx, edi, edx);
+ __ Ret();
+ __ bind(&slow);
+ GenerateMiss(masm);
+}
+
+
+static void KeyedStoreGenerateGenericHelper(
+ MacroAssembler* masm,
+ Label* fast_object,
+ Label* fast_double,
+ Label* slow,
+ KeyedStoreCheckMap check_map,
+ KeyedStoreIncrementLength increment_length) {
+ Label transition_smi_elements;
+ Label finish_object_store, non_double_value, transition_double_elements;
+ Label fast_double_without_map_check;
+ // eax: value
+ // ecx: key (a smi)
+ // edx: receiver
+ // ebx: FixedArray receiver->elements
+ // edi: receiver map
+ // Fast case: Do the store, could either Object or double.
+ __ bind(fast_object);
+ if (check_map == kCheckMap) {
+ __ mov(edi, FieldOperand(ebx, HeapObject::kMapOffset));
+ __ cmp(edi, masm->isolate()->factory()->fixed_array_map());
+ __ j(not_equal, fast_double);
+ }
+
+ // HOLECHECK: guards "A[i] = V"
+ // We have to go to the runtime if the current value is the hole because
+ // there may be a callback on the element
+ Label holecheck_passed1;
+ __ cmp(FixedArrayElementOperand(ebx, ecx),
+ masm->isolate()->factory()->the_hole_value());
+ __ j(not_equal, &holecheck_passed1);
+ __ JumpIfDictionaryInPrototypeChain(edx, ebx, edi, slow);
+ __ mov(ebx, FieldOperand(edx, JSObject::kElementsOffset));
+
+ __ bind(&holecheck_passed1);
+
+ // Smi stores don't require further checks.
+ Label non_smi_value;
+ __ JumpIfNotSmi(eax, &non_smi_value);
+ if (increment_length == kIncrementLength) {
+ // Add 1 to receiver->length.
+ __ add(FieldOperand(edx, JSArray::kLengthOffset),
+ Immediate(Smi::FromInt(1)));
+ }
+ // It's irrelevant whether array is smi-only or not when writing a smi.
+ __ mov(FixedArrayElementOperand(ebx, ecx), eax);
+ __ ret(0);
+
+ __ bind(&non_smi_value);
+ // Escape to elements kind transition case.
+ __ mov(edi, FieldOperand(edx, HeapObject::kMapOffset));
+ __ CheckFastObjectElements(edi, &transition_smi_elements);
+
+ // Fast elements array, store the value to the elements backing store.
+ __ bind(&finish_object_store);
+ if (increment_length == kIncrementLength) {
+ // Add 1 to receiver->length.
+ __ add(FieldOperand(edx, JSArray::kLengthOffset),
+ Immediate(Smi::FromInt(1)));
+ }
+ __ mov(FixedArrayElementOperand(ebx, ecx), eax);
+ // Update write barrier for the elements array address.
+ __ mov(edx, eax); // Preserve the value which is returned.
+ __ RecordWriteArray(
+ ebx, edx, ecx, EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
+ __ ret(0);
+
+ __ bind(fast_double);
+ if (check_map == kCheckMap) {
+ // Check for fast double array case. If this fails, call through to the
+ // runtime.
+ __ cmp(edi, masm->isolate()->factory()->fixed_double_array_map());
+ __ j(not_equal, slow);
+ // If the value is a number, store it as a double in the FastDoubleElements
+ // array.
+ }
+
+ // HOLECHECK: guards "A[i] double hole?"
+ // We have to see if the double version of the hole is present. If so
+ // go to the runtime.
+ uint32_t offset = FixedDoubleArray::kHeaderSize + sizeof(kHoleNanLower32);
+ __ cmp(FieldOperand(ebx, ecx, times_4, offset), Immediate(kHoleNanUpper32));
+ __ j(not_equal, &fast_double_without_map_check);
+ __ JumpIfDictionaryInPrototypeChain(edx, ebx, edi, slow);
+ __ mov(ebx, FieldOperand(edx, JSObject::kElementsOffset));
+
+ __ bind(&fast_double_without_map_check);
+ __ StoreNumberToDoubleElements(eax, ebx, ecx, edi,
+ &transition_double_elements, false);
+ if (increment_length == kIncrementLength) {
+ // Add 1 to receiver->length.
+ __ add(FieldOperand(edx, JSArray::kLengthOffset),
+ Immediate(Smi::FromInt(1)));
+ }
+ __ ret(0);
+
+ __ bind(&transition_smi_elements);
+ __ mov(ebx, FieldOperand(edx, HeapObject::kMapOffset));
+
+ // Transition the array appropriately depending on the value type.
+ __ CheckMap(eax,
+ masm->isolate()->factory()->heap_number_map(),
+ &non_double_value,
+ DONT_DO_SMI_CHECK);
+
+ // Value is a double. Transition FAST_SMI_ELEMENTS -> FAST_DOUBLE_ELEMENTS
+ // and complete the store.
+ __ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS,
+ FAST_DOUBLE_ELEMENTS,
+ ebx,
+ edi,
+ slow);
+ AllocationSiteMode mode = AllocationSite::GetMode(FAST_SMI_ELEMENTS,
+ FAST_DOUBLE_ELEMENTS);
+ ElementsTransitionGenerator::GenerateSmiToDouble(masm, mode, slow);
+ __ mov(ebx, FieldOperand(edx, JSObject::kElementsOffset));
+ __ jmp(&fast_double_without_map_check);
+
+ __ bind(&non_double_value);
+ // Value is not a double, FAST_SMI_ELEMENTS -> FAST_ELEMENTS
+ __ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS,
+ FAST_ELEMENTS,
+ ebx,
+ edi,
+ slow);
+ mode = AllocationSite::GetMode(FAST_SMI_ELEMENTS, FAST_ELEMENTS);
+ ElementsTransitionGenerator::GenerateMapChangeElementsTransition(masm, mode,
+ slow);
+ __ mov(ebx, FieldOperand(edx, JSObject::kElementsOffset));
+ __ jmp(&finish_object_store);
+
+ __ bind(&transition_double_elements);
+ // Elements are FAST_DOUBLE_ELEMENTS, but value is an Object that's not a
+ // HeapNumber. Make sure that the receiver is a Array with FAST_ELEMENTS and
+ // transition array from FAST_DOUBLE_ELEMENTS to FAST_ELEMENTS
+ __ mov(ebx, FieldOperand(edx, HeapObject::kMapOffset));
+ __ LoadTransitionedArrayMapConditional(FAST_DOUBLE_ELEMENTS,
+ FAST_ELEMENTS,
+ ebx,
+ edi,
+ slow);
+ mode = AllocationSite::GetMode(FAST_DOUBLE_ELEMENTS, FAST_ELEMENTS);
+ ElementsTransitionGenerator::GenerateDoubleToObject(masm, mode, slow);
+ __ mov(ebx, FieldOperand(edx, JSObject::kElementsOffset));
+ __ jmp(&finish_object_store);
+}
+
+
+void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm,
+ StrictMode strict_mode) {
+ // ----------- S t a t e -------------
+ // -- eax : value
+ // -- ecx : key
+ // -- edx : receiver
+ // -- esp[0] : return address
+ // -----------------------------------
+ Label slow, fast_object, fast_object_grow;
+ Label fast_double, fast_double_grow;
+ Label array, extra, check_if_double_array;
+
+ // Check that the object isn't a smi.
+ __ JumpIfSmi(edx, &slow);
+ // Get the map from the receiver.
+ __ mov(edi, FieldOperand(edx, HeapObject::kMapOffset));
+ // Check that the receiver does not require access checks and is not observed.
+ // The generic stub does not perform map checks or handle observed objects.
+ __ test_b(FieldOperand(edi, Map::kBitFieldOffset),
+ 1 << Map::kIsAccessCheckNeeded | 1 << Map::kIsObserved);
+ __ j(not_zero, &slow);
+ // Check that the key is a smi.
+ __ JumpIfNotSmi(ecx, &slow);
+ __ CmpInstanceType(edi, JS_ARRAY_TYPE);
+ __ j(equal, &array);
+ // Check that the object is some kind of JSObject.
+ __ CmpInstanceType(edi, FIRST_JS_OBJECT_TYPE);
+ __ j(below, &slow);
+
+ // Object case: Check key against length in the elements array.
+ // eax: value
+ // edx: JSObject
+ // ecx: key (a smi)
+ // edi: receiver map
+ __ mov(ebx, FieldOperand(edx, JSObject::kElementsOffset));
+ // Check array bounds. Both the key and the length of FixedArray are smis.
+ __ cmp(ecx, FieldOperand(ebx, FixedArray::kLengthOffset));
+ __ j(below, &fast_object);
+
+ // Slow case: call runtime.
+ __ bind(&slow);
+ GenerateRuntimeSetProperty(masm, strict_mode);
+
+ // Extra capacity case: Check if there is extra capacity to
+ // perform the store and update the length. Used for adding one
+ // element to the array by writing to array[array.length].
+ __ bind(&extra);
+ // eax: value
+ // edx: receiver, a JSArray
+ // ecx: key, a smi.
+ // ebx: receiver->elements, a FixedArray
+ // edi: receiver map
+ // flags: compare (ecx, edx.length())
+ // do not leave holes in the array:
+ __ j(not_equal, &slow);
+ __ cmp(ecx, FieldOperand(ebx, FixedArray::kLengthOffset));
+ __ j(above_equal, &slow);
+ __ mov(edi, FieldOperand(ebx, HeapObject::kMapOffset));
+ __ cmp(edi, masm->isolate()->factory()->fixed_array_map());
+ __ j(not_equal, &check_if_double_array);
+ __ jmp(&fast_object_grow);
+
+ __ bind(&check_if_double_array);
+ __ cmp(edi, masm->isolate()->factory()->fixed_double_array_map());
+ __ j(not_equal, &slow);
+ __ jmp(&fast_double_grow);
+
+ // Array case: Get the length and the elements array from the JS
+ // array. Check that the array is in fast mode (and writable); if it
+ // is the length is always a smi.
+ __ bind(&array);
+ // eax: value
+ // edx: receiver, a JSArray
+ // ecx: key, a smi.
+ // edi: receiver map
+ __ mov(ebx, FieldOperand(edx, JSObject::kElementsOffset));
+
+ // Check the key against the length in the array and fall through to the
+ // common store code.
+ __ cmp(ecx, FieldOperand(edx, JSArray::kLengthOffset)); // Compare smis.
+ __ j(above_equal, &extra);
+
+ KeyedStoreGenerateGenericHelper(masm, &fast_object, &fast_double,
+ &slow, kCheckMap, kDontIncrementLength);
+ KeyedStoreGenerateGenericHelper(masm, &fast_object_grow, &fast_double_grow,
+ &slow, kDontCheckMap, kIncrementLength);
+}
+
+
+void LoadIC::GenerateMegamorphic(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- ecx : name
+ // -- edx : receiver
+ // -- esp[0] : return address
+ // -----------------------------------
+
+ // Probe the stub cache.
+ Code::Flags flags = Code::ComputeHandlerFlags(Code::LOAD_IC);
+ masm->isolate()->stub_cache()->GenerateProbe(
+ masm, flags, edx, ecx, ebx, eax);
+
+ // Cache miss: Jump to runtime.
+ GenerateMiss(masm);
+}
+
+
+void LoadIC::GenerateNormal(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- ecx : name
+ // -- edx : receiver
+ // -- esp[0] : return address
+ // -----------------------------------
+ Label miss, slow;
+
+ GenerateNameDictionaryReceiverCheck(masm, edx, eax, ebx, &miss);
+
+ // eax: elements
+ // Search the dictionary placing the result in eax.
+ GenerateDictionaryLoad(masm, &slow, eax, ecx, edi, ebx, eax);
+ __ ret(0);
+
+ // Dictionary load failed, go slow (but don't miss).
+ __ bind(&slow);
+ GenerateRuntimeGetProperty(masm);
+
+ // Cache miss: Jump to runtime.
+ __ bind(&miss);
+ GenerateMiss(masm);
+}
+
+
+void LoadIC::GenerateMiss(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- ecx : name
+ // -- edx : receiver
+ // -- esp[0] : return address
+ // -----------------------------------
+
+ __ IncrementCounter(masm->isolate()->counters()->load_miss(), 1);
+
+ __ pop(ebx);
+ __ push(edx); // receiver
+ __ push(ecx); // name
+ __ push(ebx); // return address
+
+ // Perform tail call to the entry.
+ ExternalReference ref =
+ ExternalReference(IC_Utility(kLoadIC_Miss), masm->isolate());
+ __ TailCallExternalReference(ref, 2, 1);
+}
+
+
+void LoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- ecx : key
+ // -- edx : receiver
+ // -- esp[0] : return address
+ // -----------------------------------
+
+ __ pop(ebx);
+ __ push(edx); // receiver
+ __ push(ecx); // name
+ __ push(ebx); // return address
+
+ // Perform tail call to the entry.
+ __ TailCallRuntime(Runtime::kGetProperty, 2, 1);
+}
+
+
+void KeyedLoadIC::GenerateMiss(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- ecx : key
+ // -- edx : receiver
+ // -- esp[0] : return address
+ // -----------------------------------
+
+ __ IncrementCounter(masm->isolate()->counters()->keyed_load_miss(), 1);
+
+ __ pop(ebx);
+ __ push(edx); // receiver
+ __ push(ecx); // name
+ __ push(ebx); // return address
+
+ // Perform tail call to the entry.
+ ExternalReference ref =
+ ExternalReference(IC_Utility(kKeyedLoadIC_Miss), masm->isolate());
+ __ TailCallExternalReference(ref, 2, 1);
+}
+
+
+void KeyedLoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- ecx : key
+ // -- edx : receiver
+ // -- esp[0] : return address
+ // -----------------------------------
+
+ __ pop(ebx);
+ __ push(edx); // receiver
+ __ push(ecx); // name
+ __ push(ebx); // return address
+
+ // Perform tail call to the entry.
+ __ TailCallRuntime(Runtime::kKeyedGetProperty, 2, 1);
+}
+
+
+void StoreIC::GenerateMegamorphic(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- eax : value
+ // -- ecx : name
+ // -- edx : receiver
+ // -- esp[0] : return address
+ // -----------------------------------
+ Code::Flags flags = Code::ComputeHandlerFlags(Code::STORE_IC);
+ masm->isolate()->stub_cache()->GenerateProbe(
+ masm, flags, edx, ecx, ebx, no_reg);
+
+ // Cache miss: Jump to runtime.
+ GenerateMiss(masm);
+}
+
+
+void StoreIC::GenerateMiss(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- eax : value
+ // -- ecx : name
+ // -- edx : receiver
+ // -- esp[0] : return address
+ // -----------------------------------
+
+ __ pop(ebx);
+ __ push(edx);
+ __ push(ecx);
+ __ push(eax);
+ __ push(ebx);
+
+ // Perform tail call to the entry.
+ ExternalReference ref =
+ ExternalReference(IC_Utility(kStoreIC_Miss), masm->isolate());
+ __ TailCallExternalReference(ref, 3, 1);
+}
+
+
+void StoreIC::GenerateNormal(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- eax : value
+ // -- ecx : name
+ // -- edx : receiver
+ // -- esp[0] : return address
+ // -----------------------------------
+
+ Label miss, restore_miss;
+
+ GenerateNameDictionaryReceiverCheck(masm, edx, ebx, edi, &miss);
+
+ // A lot of registers are needed for storing to slow case
+ // objects. Push and restore receiver but rely on
+ // GenerateDictionaryStore preserving the value and name.
+ __ push(edx);
+ GenerateDictionaryStore(masm, &restore_miss, ebx, ecx, eax, edx, edi);
+ __ Drop(1);
+ Counters* counters = masm->isolate()->counters();
+ __ IncrementCounter(counters->store_normal_hit(), 1);
+ __ ret(0);
+
+ __ bind(&restore_miss);
+ __ pop(edx);
+
+ __ bind(&miss);
+ __ IncrementCounter(counters->store_normal_miss(), 1);
+ GenerateMiss(masm);
+}
+
+
+void StoreIC::GenerateRuntimeSetProperty(MacroAssembler* masm,
+ StrictMode strict_mode) {
+ // ----------- S t a t e -------------
+ // -- eax : value
+ // -- ecx : name
+ // -- edx : receiver
+ // -- esp[0] : return address
+ // -----------------------------------
+ __ pop(ebx);
+ __ push(edx);
+ __ push(ecx);
+ __ push(eax);
+ __ push(Immediate(Smi::FromInt(NONE))); // PropertyAttributes
+ __ push(Immediate(Smi::FromInt(strict_mode)));
+ __ push(ebx); // return address
+
+ // Do tail-call to runtime routine.
+ __ TailCallRuntime(Runtime::kSetProperty, 5, 1);
+}
+
+
+void KeyedStoreIC::GenerateRuntimeSetProperty(MacroAssembler* masm,
+ StrictMode strict_mode) {
+ // ----------- S t a t e -------------
+ // -- eax : value
+ // -- ecx : key
+ // -- edx : receiver
+ // -- esp[0] : return address
+ // -----------------------------------
+
+ __ pop(ebx);
+ __ push(edx);
+ __ push(ecx);
+ __ push(eax);
+ __ push(Immediate(Smi::FromInt(NONE))); // PropertyAttributes
+ __ push(Immediate(Smi::FromInt(strict_mode))); // Strict mode.
+ __ push(ebx); // return address
+
+ // Do tail-call to runtime routine.
+ __ TailCallRuntime(Runtime::kSetProperty, 5, 1);
+}
+
+
+void KeyedStoreIC::GenerateMiss(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- eax : value
+ // -- ecx : key
+ // -- edx : receiver
+ // -- esp[0] : return address
+ // -----------------------------------
+
+ __ pop(ebx);
+ __ push(edx);
+ __ push(ecx);
+ __ push(eax);
+ __ push(ebx);
+
+ // Do tail-call to runtime routine.
+ ExternalReference ref =
+ ExternalReference(IC_Utility(kKeyedStoreIC_Miss), masm->isolate());
+ __ TailCallExternalReference(ref, 3, 1);
+}
+
+
+void StoreIC::GenerateSlow(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- eax : value
+ // -- ecx : key
+ // -- edx : receiver
+ // -- esp[0] : return address
+ // -----------------------------------
+
+ __ pop(ebx);
+ __ push(edx);
+ __ push(ecx);
+ __ push(eax);
+ __ push(ebx); // return address
+
+ // Do tail-call to runtime routine.
+ ExternalReference ref(IC_Utility(kStoreIC_Slow), masm->isolate());
+ __ TailCallExternalReference(ref, 3, 1);
+}
+
+
+void KeyedStoreIC::GenerateSlow(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- eax : value
+ // -- ecx : key
+ // -- edx : receiver
+ // -- esp[0] : return address
+ // -----------------------------------
+
+ __ pop(ebx);
+ __ push(edx);
+ __ push(ecx);
+ __ push(eax);
+ __ push(ebx); // return address
+
+ // Do tail-call to runtime routine.
+ ExternalReference ref(IC_Utility(kKeyedStoreIC_Slow), masm->isolate());
+ __ TailCallExternalReference(ref, 3, 1);
+}
+
+
+#undef __
+
+
+Condition CompareIC::ComputeCondition(Token::Value op) {
+ switch (op) {
+ case Token::EQ_STRICT:
+ case Token::EQ:
+ return equal;
+ case Token::LT:
+ return less;
+ case Token::GT:
+ return greater;
+ case Token::LTE:
+ return less_equal;
+ case Token::GTE:
+ return greater_equal;
+ default:
+ UNREACHABLE();
+ return no_condition;
+ }
+}
+
+
+bool CompareIC::HasInlinedSmiCode(Address address) {
+ // The address of the instruction following the call.
+ Address test_instruction_address =
+ address + Assembler::kCallTargetAddressOffset;
+
+ // If the instruction following the call is not a test al, nothing
+ // was inlined.
+ return *test_instruction_address == Assembler::kTestAlByte;
+}
+
+
+void PatchInlinedSmiCode(Address address, InlinedSmiCheck check) {
+ // The address of the instruction following the call.
+ Address test_instruction_address =
+ address + Assembler::kCallTargetAddressOffset;
+
+ // If the instruction following the call is not a test al, nothing
+ // was inlined.
+ if (*test_instruction_address != Assembler::kTestAlByte) {
+ ASSERT(*test_instruction_address == Assembler::kNopByte);
+ return;
+ }
+
+ Address delta_address = test_instruction_address + 1;
+ // The delta to the start of the map check instruction and the
+ // condition code uses at the patched jump.
+ uint8_t delta = *reinterpret_cast<uint8_t*>(delta_address);
+ if (FLAG_trace_ic) {
+ PrintF("[ patching ic at %p, test=%p, delta=%d\n",
+ address, test_instruction_address, delta);
+ }
+
+ // Patch with a short conditional jump. Enabling means switching from a short
+ // jump-if-carry/not-carry to jump-if-zero/not-zero, whereas disabling is the
+ // reverse operation of that.
+ Address jmp_address = test_instruction_address - delta;
+ ASSERT((check == ENABLE_INLINED_SMI_CHECK)
+ ? (*jmp_address == Assembler::kJncShortOpcode ||
+ *jmp_address == Assembler::kJcShortOpcode)
+ : (*jmp_address == Assembler::kJnzShortOpcode ||
+ *jmp_address == Assembler::kJzShortOpcode));
+ Condition cc = (check == ENABLE_INLINED_SMI_CHECK)
+ ? (*jmp_address == Assembler::kJncShortOpcode ? not_zero : zero)
+ : (*jmp_address == Assembler::kJnzShortOpcode ? not_carry : carry);
+ *jmp_address = static_cast<byte>(Assembler::kJccShortPrefix | cc);
+}
+
+
+} } // namespace v8::internal
+
+#endif // V8_TARGET_ARCH_X87
diff --git a/chromium/v8/src/x87/lithium-codegen-x87.cc b/chromium/v8/src/x87/lithium-codegen-x87.cc
new file mode 100644
index 00000000000..dab87688a3f
--- /dev/null
+++ b/chromium/v8/src/x87/lithium-codegen-x87.cc
@@ -0,0 +1,5707 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#if V8_TARGET_ARCH_X87
+
+#include "src/x87/lithium-codegen-x87.h"
+#include "src/ic.h"
+#include "src/code-stubs.h"
+#include "src/deoptimizer.h"
+#include "src/stub-cache.h"
+#include "src/codegen.h"
+#include "src/hydrogen-osr.h"
+
+namespace v8 {
+namespace internal {
+
+
+// When invoking builtins, we need to record the safepoint in the middle of
+// the invoke instruction sequence generated by the macro assembler.
+class SafepointGenerator V8_FINAL : public CallWrapper {
+ public:
+ SafepointGenerator(LCodeGen* codegen,
+ LPointerMap* pointers,
+ Safepoint::DeoptMode mode)
+ : codegen_(codegen),
+ pointers_(pointers),
+ deopt_mode_(mode) {}
+ virtual ~SafepointGenerator() {}
+
+ virtual void BeforeCall(int call_size) const V8_OVERRIDE {}
+
+ virtual void AfterCall() const V8_OVERRIDE {
+ codegen_->RecordSafepoint(pointers_, deopt_mode_);
+ }
+
+ private:
+ LCodeGen* codegen_;
+ LPointerMap* pointers_;
+ Safepoint::DeoptMode deopt_mode_;
+};
+
+
+#define __ masm()->
+
+bool LCodeGen::GenerateCode() {
+ LPhase phase("Z_Code generation", chunk());
+ ASSERT(is_unused());
+ status_ = GENERATING;
+
+ // Open a frame scope to indicate that there is a frame on the stack. The
+ // MANUAL indicates that the scope shouldn't actually generate code to set up
+ // the frame (that is done in GeneratePrologue).
+ FrameScope frame_scope(masm_, StackFrame::MANUAL);
+
+ support_aligned_spilled_doubles_ = info()->IsOptimizing();
+
+ dynamic_frame_alignment_ = info()->IsOptimizing() &&
+ ((chunk()->num_double_slots() > 2 &&
+ !chunk()->graph()->is_recursive()) ||
+ !info()->osr_ast_id().IsNone());
+
+ return GeneratePrologue() &&
+ GenerateBody() &&
+ GenerateDeferredCode() &&
+ GenerateJumpTable() &&
+ GenerateSafepointTable();
+}
+
+
+void LCodeGen::FinishCode(Handle<Code> code) {
+ ASSERT(is_done());
+ code->set_stack_slots(GetStackSlotCount());
+ code->set_safepoint_table_offset(safepoints_.GetCodeOffset());
+ if (code->is_optimized_code()) RegisterWeakObjectsInOptimizedCode(code);
+ PopulateDeoptimizationData(code);
+ if (!info()->IsStub()) {
+ Deoptimizer::EnsureRelocSpaceForLazyDeoptimization(code);
+ }
+}
+
+
+#ifdef _MSC_VER
+void LCodeGen::MakeSureStackPagesMapped(int offset) {
+ const int kPageSize = 4 * KB;
+ for (offset -= kPageSize; offset > 0; offset -= kPageSize) {
+ __ mov(Operand(esp, offset), eax);
+ }
+}
+#endif
+
+
+bool LCodeGen::GeneratePrologue() {
+ ASSERT(is_generating());
+
+ if (info()->IsOptimizing()) {
+ ProfileEntryHookStub::MaybeCallEntryHook(masm_);
+
+#ifdef DEBUG
+ if (strlen(FLAG_stop_at) > 0 &&
+ info_->function()->name()->IsUtf8EqualTo(CStrVector(FLAG_stop_at))) {
+ __ int3();
+ }
+#endif
+
+ // Sloppy mode functions and builtins need to replace the receiver with the
+ // global proxy when called as functions (without an explicit receiver
+ // object).
+ if (info_->this_has_uses() &&
+ info_->strict_mode() == SLOPPY &&
+ !info_->is_native()) {
+ Label ok;
+ // +1 for return address.
+ int receiver_offset = (scope()->num_parameters() + 1) * kPointerSize;
+ __ mov(ecx, Operand(esp, receiver_offset));
+
+ __ cmp(ecx, isolate()->factory()->undefined_value());
+ __ j(not_equal, &ok, Label::kNear);
+
+ __ mov(ecx, GlobalObjectOperand());
+ __ mov(ecx, FieldOperand(ecx, GlobalObject::kGlobalReceiverOffset));
+
+ __ mov(Operand(esp, receiver_offset), ecx);
+
+ __ bind(&ok);
+ }
+
+ if (support_aligned_spilled_doubles_ && dynamic_frame_alignment_) {
+ // Move state of dynamic frame alignment into edx.
+ __ Move(edx, Immediate(kNoAlignmentPadding));
+
+ Label do_not_pad, align_loop;
+ STATIC_ASSERT(kDoubleSize == 2 * kPointerSize);
+ // Align esp + 4 to a multiple of 2 * kPointerSize.
+ __ test(esp, Immediate(kPointerSize));
+ __ j(not_zero, &do_not_pad, Label::kNear);
+ __ push(Immediate(0));
+ __ mov(ebx, esp);
+ __ mov(edx, Immediate(kAlignmentPaddingPushed));
+ // Copy arguments, receiver, and return address.
+ __ mov(ecx, Immediate(scope()->num_parameters() + 2));
+
+ __ bind(&align_loop);
+ __ mov(eax, Operand(ebx, 1 * kPointerSize));
+ __ mov(Operand(ebx, 0), eax);
+ __ add(Operand(ebx), Immediate(kPointerSize));
+ __ dec(ecx);
+ __ j(not_zero, &align_loop, Label::kNear);
+ __ mov(Operand(ebx, 0), Immediate(kAlignmentZapValue));
+ __ bind(&do_not_pad);
+ }
+ }
+
+ info()->set_prologue_offset(masm_->pc_offset());
+ if (NeedsEagerFrame()) {
+ ASSERT(!frame_is_built_);
+ frame_is_built_ = true;
+ if (info()->IsStub()) {
+ __ StubPrologue();
+ } else {
+ __ Prologue(info()->IsCodePreAgingActive());
+ }
+ info()->AddNoFrameRange(0, masm_->pc_offset());
+ }
+
+ if (info()->IsOptimizing() &&
+ dynamic_frame_alignment_ &&
+ FLAG_debug_code) {
+ __ test(esp, Immediate(kPointerSize));
+ __ Assert(zero, kFrameIsExpectedToBeAligned);
+ }
+
+ // Reserve space for the stack slots needed by the code.
+ int slots = GetStackSlotCount();
+ ASSERT(slots != 0 || !info()->IsOptimizing());
+ if (slots > 0) {
+ if (slots == 1) {
+ if (dynamic_frame_alignment_) {
+ __ push(edx);
+ } else {
+ __ push(Immediate(kNoAlignmentPadding));
+ }
+ } else {
+ if (FLAG_debug_code) {
+ __ sub(Operand(esp), Immediate(slots * kPointerSize));
+#ifdef _MSC_VER
+ MakeSureStackPagesMapped(slots * kPointerSize);
+#endif
+ __ push(eax);
+ __ mov(Operand(eax), Immediate(slots));
+ Label loop;
+ __ bind(&loop);
+ __ mov(MemOperand(esp, eax, times_4, 0),
+ Immediate(kSlotsZapValue));
+ __ dec(eax);
+ __ j(not_zero, &loop);
+ __ pop(eax);
+ } else {
+ __ sub(Operand(esp), Immediate(slots * kPointerSize));
+#ifdef _MSC_VER
+ MakeSureStackPagesMapped(slots * kPointerSize);
+#endif
+ }
+
+ if (support_aligned_spilled_doubles_) {
+ Comment(";;; Store dynamic frame alignment tag for spilled doubles");
+ // Store dynamic frame alignment state in the first local.
+ int offset = JavaScriptFrameConstants::kDynamicAlignmentStateOffset;
+ if (dynamic_frame_alignment_) {
+ __ mov(Operand(ebp, offset), edx);
+ } else {
+ __ mov(Operand(ebp, offset), Immediate(kNoAlignmentPadding));
+ }
+ }
+ }
+ }
+
+ // Possibly allocate a local context.
+ int heap_slots = info_->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
+ if (heap_slots > 0) {
+ Comment(";;; Allocate local context");
+ bool need_write_barrier = true;
+ // Argument to NewContext is the function, which is still in edi.
+ if (heap_slots <= FastNewContextStub::kMaximumSlots) {
+ FastNewContextStub stub(isolate(), heap_slots);
+ __ CallStub(&stub);
+ // Result of FastNewContextStub is always in new space.
+ need_write_barrier = false;
+ } else {
+ __ push(edi);
+ __ CallRuntime(Runtime::kHiddenNewFunctionContext, 1);
+ }
+ RecordSafepoint(Safepoint::kNoLazyDeopt);
+ // Context is returned in eax. It replaces the context passed to us.
+ // It's saved in the stack and kept live in esi.
+ __ mov(esi, eax);
+ __ mov(Operand(ebp, StandardFrameConstants::kContextOffset), eax);
+
+ // Copy parameters into context if necessary.
+ int num_parameters = scope()->num_parameters();
+ for (int i = 0; i < num_parameters; i++) {
+ Variable* var = scope()->parameter(i);
+ if (var->IsContextSlot()) {
+ int parameter_offset = StandardFrameConstants::kCallerSPOffset +
+ (num_parameters - 1 - i) * kPointerSize;
+ // Load parameter from stack.
+ __ mov(eax, Operand(ebp, parameter_offset));
+ // Store it in the context.
+ int context_offset = Context::SlotOffset(var->index());
+ __ mov(Operand(esi, context_offset), eax);
+ // Update the write barrier. This clobbers eax and ebx.
+ if (need_write_barrier) {
+ __ RecordWriteContextSlot(esi,
+ context_offset,
+ eax,
+ ebx);
+ } else if (FLAG_debug_code) {
+ Label done;
+ __ JumpIfInNewSpace(esi, eax, &done, Label::kNear);
+ __ Abort(kExpectedNewSpaceObject);
+ __ bind(&done);
+ }
+ }
+ }
+ Comment(";;; End allocate local context");
+ }
+
+ // Trace the call.
+ if (FLAG_trace && info()->IsOptimizing()) {
+ // We have not executed any compiled code yet, so esi still holds the
+ // incoming context.
+ __ CallRuntime(Runtime::kTraceEnter, 0);
+ }
+ return !is_aborted();
+}
+
+
+void LCodeGen::GenerateOsrPrologue() {
+ // Generate the OSR entry prologue at the first unknown OSR value, or if there
+ // are none, at the OSR entrypoint instruction.
+ if (osr_pc_offset_ >= 0) return;
+
+ osr_pc_offset_ = masm()->pc_offset();
+
+ // Move state of dynamic frame alignment into edx.
+ __ Move(edx, Immediate(kNoAlignmentPadding));
+
+ if (support_aligned_spilled_doubles_ && dynamic_frame_alignment_) {
+ Label do_not_pad, align_loop;
+ // Align ebp + 4 to a multiple of 2 * kPointerSize.
+ __ test(ebp, Immediate(kPointerSize));
+ __ j(zero, &do_not_pad, Label::kNear);
+ __ push(Immediate(0));
+ __ mov(ebx, esp);
+ __ mov(edx, Immediate(kAlignmentPaddingPushed));
+
+ // Move all parts of the frame over one word. The frame consists of:
+ // unoptimized frame slots, alignment state, context, frame pointer, return
+ // address, receiver, and the arguments.
+ __ mov(ecx, Immediate(scope()->num_parameters() +
+ 5 + graph()->osr()->UnoptimizedFrameSlots()));
+
+ __ bind(&align_loop);
+ __ mov(eax, Operand(ebx, 1 * kPointerSize));
+ __ mov(Operand(ebx, 0), eax);
+ __ add(Operand(ebx), Immediate(kPointerSize));
+ __ dec(ecx);
+ __ j(not_zero, &align_loop, Label::kNear);
+ __ mov(Operand(ebx, 0), Immediate(kAlignmentZapValue));
+ __ sub(Operand(ebp), Immediate(kPointerSize));
+ __ bind(&do_not_pad);
+ }
+
+ // Save the first local, which is overwritten by the alignment state.
+ Operand alignment_loc = MemOperand(ebp, -3 * kPointerSize);
+ __ push(alignment_loc);
+
+ // Set the dynamic frame alignment state.
+ __ mov(alignment_loc, edx);
+
+ // Adjust the frame size, subsuming the unoptimized frame into the
+ // optimized frame.
+ int slots = GetStackSlotCount() - graph()->osr()->UnoptimizedFrameSlots();
+ ASSERT(slots >= 1);
+ __ sub(esp, Immediate((slots - 1) * kPointerSize));
+}
+
+
+void LCodeGen::GenerateBodyInstructionPre(LInstruction* instr) {
+ if (instr->IsCall()) {
+ EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
+ }
+ if (!instr->IsLazyBailout() && !instr->IsGap()) {
+ safepoints_.BumpLastLazySafepointIndex();
+ }
+ FlushX87StackIfNecessary(instr);
+}
+
+
+void LCodeGen::GenerateBodyInstructionPost(LInstruction* instr) {
+ if (instr->IsGoto()) {
+ x87_stack_.LeavingBlock(current_block_, LGoto::cast(instr));
+ } else if (FLAG_debug_code && FLAG_enable_slow_asserts &&
+ !instr->IsGap() && !instr->IsReturn()) {
+ if (instr->ClobbersDoubleRegisters(isolate())) {
+ if (instr->HasDoubleRegisterResult()) {
+ ASSERT_EQ(1, x87_stack_.depth());
+ } else {
+ ASSERT_EQ(0, x87_stack_.depth());
+ }
+ }
+ __ VerifyX87StackDepth(x87_stack_.depth());
+ }
+}
+
+
+bool LCodeGen::GenerateJumpTable() {
+ Label needs_frame;
+ if (jump_table_.length() > 0) {
+ Comment(";;; -------------------- Jump table --------------------");
+ }
+ for (int i = 0; i < jump_table_.length(); i++) {
+ __ bind(&jump_table_[i].label);
+ Address entry = jump_table_[i].address;
+ Deoptimizer::BailoutType type = jump_table_[i].bailout_type;
+ int id = Deoptimizer::GetDeoptimizationId(isolate(), entry, type);
+ if (id == Deoptimizer::kNotDeoptimizationEntry) {
+ Comment(";;; jump table entry %d.", i);
+ } else {
+ Comment(";;; jump table entry %d: deoptimization bailout %d.", i, id);
+ }
+ if (jump_table_[i].needs_frame) {
+ ASSERT(!info()->saves_caller_doubles());
+ __ push(Immediate(ExternalReference::ForDeoptEntry(entry)));
+ if (needs_frame.is_bound()) {
+ __ jmp(&needs_frame);
+ } else {
+ __ bind(&needs_frame);
+ __ push(MemOperand(ebp, StandardFrameConstants::kContextOffset));
+ // This variant of deopt can only be used with stubs. Since we don't
+ // have a function pointer to install in the stack frame that we're
+ // building, install a special marker there instead.
+ ASSERT(info()->IsStub());
+ __ push(Immediate(Smi::FromInt(StackFrame::STUB)));
+ // Push a PC inside the function so that the deopt code can find where
+ // the deopt comes from. It doesn't have to be the precise return
+ // address of a "calling" LAZY deopt, it only has to be somewhere
+ // inside the code body.
+ Label push_approx_pc;
+ __ call(&push_approx_pc);
+ __ bind(&push_approx_pc);
+ // Push the continuation which was stashed were the ebp should
+ // be. Replace it with the saved ebp.
+ __ push(MemOperand(esp, 3 * kPointerSize));
+ __ mov(MemOperand(esp, 4 * kPointerSize), ebp);
+ __ lea(ebp, MemOperand(esp, 4 * kPointerSize));
+ __ ret(0); // Call the continuation without clobbering registers.
+ }
+ } else {
+ __ call(entry, RelocInfo::RUNTIME_ENTRY);
+ }
+ }
+ return !is_aborted();
+}
+
+
+bool LCodeGen::GenerateDeferredCode() {
+ ASSERT(is_generating());
+ if (deferred_.length() > 0) {
+ for (int i = 0; !is_aborted() && i < deferred_.length(); i++) {
+ LDeferredCode* code = deferred_[i];
+ X87Stack copy(code->x87_stack());
+ x87_stack_ = copy;
+
+ HValue* value =
+ instructions_->at(code->instruction_index())->hydrogen_value();
+ RecordAndWritePosition(
+ chunk()->graph()->SourcePositionToScriptPosition(value->position()));
+
+ Comment(";;; <@%d,#%d> "
+ "-------------------- Deferred %s --------------------",
+ code->instruction_index(),
+ code->instr()->hydrogen_value()->id(),
+ code->instr()->Mnemonic());
+ __ bind(code->entry());
+ if (NeedsDeferredFrame()) {
+ Comment(";;; Build frame");
+ ASSERT(!frame_is_built_);
+ ASSERT(info()->IsStub());
+ frame_is_built_ = true;
+ // Build the frame in such a way that esi isn't trashed.
+ __ push(ebp); // Caller's frame pointer.
+ __ push(Operand(ebp, StandardFrameConstants::kContextOffset));
+ __ push(Immediate(Smi::FromInt(StackFrame::STUB)));
+ __ lea(ebp, Operand(esp, 2 * kPointerSize));
+ Comment(";;; Deferred code");
+ }
+ code->Generate();
+ if (NeedsDeferredFrame()) {
+ __ bind(code->done());
+ Comment(";;; Destroy frame");
+ ASSERT(frame_is_built_);
+ frame_is_built_ = false;
+ __ mov(esp, ebp);
+ __ pop(ebp);
+ }
+ __ jmp(code->exit());
+ }
+ }
+
+ // Deferred code is the last part of the instruction sequence. Mark
+ // the generated code as done unless we bailed out.
+ if (!is_aborted()) status_ = DONE;
+ return !is_aborted();
+}
+
+
+bool LCodeGen::GenerateSafepointTable() {
+ ASSERT(is_done());
+ if (!info()->IsStub()) {
+ // For lazy deoptimization we need space to patch a call after every call.
+ // Ensure there is always space for such patching, even if the code ends
+ // in a call.
+ int target_offset = masm()->pc_offset() + Deoptimizer::patch_size();
+ while (masm()->pc_offset() < target_offset) {
+ masm()->nop();
+ }
+ }
+ safepoints_.Emit(masm(), GetStackSlotCount());
+ return !is_aborted();
+}
+
+
+Register LCodeGen::ToRegister(int index) const {
+ return Register::FromAllocationIndex(index);
+}
+
+
+X87Register LCodeGen::ToX87Register(int index) const {
+ return X87Register::FromAllocationIndex(index);
+}
+
+
+void LCodeGen::X87LoadForUsage(X87Register reg) {
+ ASSERT(x87_stack_.Contains(reg));
+ x87_stack_.Fxch(reg);
+ x87_stack_.pop();
+}
+
+
+void LCodeGen::X87LoadForUsage(X87Register reg1, X87Register reg2) {
+ ASSERT(x87_stack_.Contains(reg1));
+ ASSERT(x87_stack_.Contains(reg2));
+ x87_stack_.Fxch(reg1, 1);
+ x87_stack_.Fxch(reg2);
+ x87_stack_.pop();
+ x87_stack_.pop();
+}
+
+
+void LCodeGen::X87Stack::Fxch(X87Register reg, int other_slot) {
+ ASSERT(is_mutable_);
+ ASSERT(Contains(reg) && stack_depth_ > other_slot);
+ int i = ArrayIndex(reg);
+ int st = st2idx(i);
+ if (st != other_slot) {
+ int other_i = st2idx(other_slot);
+ X87Register other = stack_[other_i];
+ stack_[other_i] = reg;
+ stack_[i] = other;
+ if (st == 0) {
+ __ fxch(other_slot);
+ } else if (other_slot == 0) {
+ __ fxch(st);
+ } else {
+ __ fxch(st);
+ __ fxch(other_slot);
+ __ fxch(st);
+ }
+ }
+}
+
+
+int LCodeGen::X87Stack::st2idx(int pos) {
+ return stack_depth_ - pos - 1;
+}
+
+
+int LCodeGen::X87Stack::ArrayIndex(X87Register reg) {
+ for (int i = 0; i < stack_depth_; i++) {
+ if (stack_[i].is(reg)) return i;
+ }
+ UNREACHABLE();
+ return -1;
+}
+
+
+bool LCodeGen::X87Stack::Contains(X87Register reg) {
+ for (int i = 0; i < stack_depth_; i++) {
+ if (stack_[i].is(reg)) return true;
+ }
+ return false;
+}
+
+
+void LCodeGen::X87Stack::Free(X87Register reg) {
+ ASSERT(is_mutable_);
+ ASSERT(Contains(reg));
+ int i = ArrayIndex(reg);
+ int st = st2idx(i);
+ if (st > 0) {
+ // keep track of how fstp(i) changes the order of elements
+ int tos_i = st2idx(0);
+ stack_[i] = stack_[tos_i];
+ }
+ pop();
+ __ fstp(st);
+}
+
+
+void LCodeGen::X87Mov(X87Register dst, Operand src, X87OperandType opts) {
+ if (x87_stack_.Contains(dst)) {
+ x87_stack_.Fxch(dst);
+ __ fstp(0);
+ } else {
+ x87_stack_.push(dst);
+ }
+ X87Fld(src, opts);
+}
+
+
+void LCodeGen::X87Fld(Operand src, X87OperandType opts) {
+ ASSERT(!src.is_reg_only());
+ switch (opts) {
+ case kX87DoubleOperand:
+ __ fld_d(src);
+ break;
+ case kX87FloatOperand:
+ __ fld_s(src);
+ break;
+ case kX87IntOperand:
+ __ fild_s(src);
+ break;
+ default:
+ UNREACHABLE();
+ }
+}
+
+
+void LCodeGen::X87Mov(Operand dst, X87Register src, X87OperandType opts) {
+ ASSERT(!dst.is_reg_only());
+ x87_stack_.Fxch(src);
+ switch (opts) {
+ case kX87DoubleOperand:
+ __ fst_d(dst);
+ break;
+ case kX87IntOperand:
+ __ fist_s(dst);
+ break;
+ default:
+ UNREACHABLE();
+ }
+}
+
+
+void LCodeGen::X87Stack::PrepareToWrite(X87Register reg) {
+ ASSERT(is_mutable_);
+ if (Contains(reg)) {
+ Free(reg);
+ }
+ // Mark this register as the next register to write to
+ stack_[stack_depth_] = reg;
+}
+
+
+void LCodeGen::X87Stack::CommitWrite(X87Register reg) {
+ ASSERT(is_mutable_);
+ // Assert the reg is prepared to write, but not on the virtual stack yet
+ ASSERT(!Contains(reg) && stack_[stack_depth_].is(reg) &&
+ stack_depth_ < X87Register::kMaxNumAllocatableRegisters);
+ stack_depth_++;
+}
+
+
+void LCodeGen::X87PrepareBinaryOp(
+ X87Register left, X87Register right, X87Register result) {
+ // You need to use DefineSameAsFirst for x87 instructions
+ ASSERT(result.is(left));
+ x87_stack_.Fxch(right, 1);
+ x87_stack_.Fxch(left);
+}
+
+
+void LCodeGen::X87Stack::FlushIfNecessary(LInstruction* instr, LCodeGen* cgen) {
+ if (stack_depth_ > 0 && instr->ClobbersDoubleRegisters(isolate())) {
+ bool double_inputs = instr->HasDoubleRegisterInput();
+
+ // Flush stack from tos down, since FreeX87() will mess with tos
+ for (int i = stack_depth_-1; i >= 0; i--) {
+ X87Register reg = stack_[i];
+ // Skip registers which contain the inputs for the next instruction
+ // when flushing the stack
+ if (double_inputs && instr->IsDoubleInput(reg, cgen)) {
+ continue;
+ }
+ Free(reg);
+ if (i < stack_depth_-1) i++;
+ }
+ }
+ if (instr->IsReturn()) {
+ while (stack_depth_ > 0) {
+ __ fstp(0);
+ stack_depth_--;
+ }
+ if (FLAG_debug_code && FLAG_enable_slow_asserts) __ VerifyX87StackDepth(0);
+ }
+}
+
+
+void LCodeGen::X87Stack::LeavingBlock(int current_block_id, LGoto* goto_instr) {
+ ASSERT(stack_depth_ <= 1);
+ // If ever used for new stubs producing two pairs of doubles joined into two
+ // phis this assert hits. That situation is not handled, since the two stacks
+ // might have st0 and st1 swapped.
+ if (current_block_id + 1 != goto_instr->block_id()) {
+ // If we have a value on the x87 stack on leaving a block, it must be a
+ // phi input. If the next block we compile is not the join block, we have
+ // to discard the stack state.
+ stack_depth_ = 0;
+ }
+}
+
+
+void LCodeGen::EmitFlushX87ForDeopt() {
+ // The deoptimizer does not support X87 Registers. But as long as we
+ // deopt from a stub its not a problem, since we will re-materialize the
+ // original stub inputs, which can't be double registers.
+ ASSERT(info()->IsStub());
+ if (FLAG_debug_code && FLAG_enable_slow_asserts) {
+ __ pushfd();
+ __ VerifyX87StackDepth(x87_stack_.depth());
+ __ popfd();
+ }
+ for (int i = 0; i < x87_stack_.depth(); i++) __ fstp(0);
+}
+
+
+Register LCodeGen::ToRegister(LOperand* op) const {
+ ASSERT(op->IsRegister());
+ return ToRegister(op->index());
+}
+
+
+X87Register LCodeGen::ToX87Register(LOperand* op) const {
+ ASSERT(op->IsDoubleRegister());
+ return ToX87Register(op->index());
+}
+
+
+int32_t LCodeGen::ToInteger32(LConstantOperand* op) const {
+ return ToRepresentation(op, Representation::Integer32());
+}
+
+
+int32_t LCodeGen::ToRepresentation(LConstantOperand* op,
+ const Representation& r) const {
+ HConstant* constant = chunk_->LookupConstant(op);
+ int32_t value = constant->Integer32Value();
+ if (r.IsInteger32()) return value;
+ ASSERT(r.IsSmiOrTagged());
+ return reinterpret_cast<int32_t>(Smi::FromInt(value));
+}
+
+
+Handle<Object> LCodeGen::ToHandle(LConstantOperand* op) const {
+ HConstant* constant = chunk_->LookupConstant(op);
+ ASSERT(chunk_->LookupLiteralRepresentation(op).IsSmiOrTagged());
+ return constant->handle(isolate());
+}
+
+
+double LCodeGen::ToDouble(LConstantOperand* op) const {
+ HConstant* constant = chunk_->LookupConstant(op);
+ ASSERT(constant->HasDoubleValue());
+ return constant->DoubleValue();
+}
+
+
+ExternalReference LCodeGen::ToExternalReference(LConstantOperand* op) const {
+ HConstant* constant = chunk_->LookupConstant(op);
+ ASSERT(constant->HasExternalReferenceValue());
+ return constant->ExternalReferenceValue();
+}
+
+
+bool LCodeGen::IsInteger32(LConstantOperand* op) const {
+ return chunk_->LookupLiteralRepresentation(op).IsSmiOrInteger32();
+}
+
+
+bool LCodeGen::IsSmi(LConstantOperand* op) const {
+ return chunk_->LookupLiteralRepresentation(op).IsSmi();
+}
+
+
+static int ArgumentsOffsetWithoutFrame(int index) {
+ ASSERT(index < 0);
+ return -(index + 1) * kPointerSize + kPCOnStackSize;
+}
+
+
+Operand LCodeGen::ToOperand(LOperand* op) const {
+ if (op->IsRegister()) return Operand(ToRegister(op));
+ ASSERT(!op->IsDoubleRegister());
+ ASSERT(op->IsStackSlot() || op->IsDoubleStackSlot());
+ if (NeedsEagerFrame()) {
+ return Operand(ebp, StackSlotOffset(op->index()));
+ } else {
+ // Retrieve parameter without eager stack-frame relative to the
+ // stack-pointer.
+ return Operand(esp, ArgumentsOffsetWithoutFrame(op->index()));
+ }
+}
+
+
+Operand LCodeGen::HighOperand(LOperand* op) {
+ ASSERT(op->IsDoubleStackSlot());
+ if (NeedsEagerFrame()) {
+ return Operand(ebp, StackSlotOffset(op->index()) + kPointerSize);
+ } else {
+ // Retrieve parameter without eager stack-frame relative to the
+ // stack-pointer.
+ return Operand(
+ esp, ArgumentsOffsetWithoutFrame(op->index()) + kPointerSize);
+ }
+}
+
+
+void LCodeGen::WriteTranslation(LEnvironment* environment,
+ Translation* translation) {
+ if (environment == NULL) return;
+
+ // The translation includes one command per value in the environment.
+ int translation_size = environment->translation_size();
+ // The output frame height does not include the parameters.
+ int height = translation_size - environment->parameter_count();
+
+ WriteTranslation(environment->outer(), translation);
+ bool has_closure_id = !info()->closure().is_null() &&
+ !info()->closure().is_identical_to(environment->closure());
+ int closure_id = has_closure_id
+ ? DefineDeoptimizationLiteral(environment->closure())
+ : Translation::kSelfLiteralId;
+ switch (environment->frame_type()) {
+ case JS_FUNCTION:
+ translation->BeginJSFrame(environment->ast_id(), closure_id, height);
+ break;
+ case JS_CONSTRUCT:
+ translation->BeginConstructStubFrame(closure_id, translation_size);
+ break;
+ case JS_GETTER:
+ ASSERT(translation_size == 1);
+ ASSERT(height == 0);
+ translation->BeginGetterStubFrame(closure_id);
+ break;
+ case JS_SETTER:
+ ASSERT(translation_size == 2);
+ ASSERT(height == 0);
+ translation->BeginSetterStubFrame(closure_id);
+ break;
+ case ARGUMENTS_ADAPTOR:
+ translation->BeginArgumentsAdaptorFrame(closure_id, translation_size);
+ break;
+ case STUB:
+ translation->BeginCompiledStubFrame();
+ break;
+ default:
+ UNREACHABLE();
+ }
+
+ int object_index = 0;
+ int dematerialized_index = 0;
+ for (int i = 0; i < translation_size; ++i) {
+ LOperand* value = environment->values()->at(i);
+ AddToTranslation(environment,
+ translation,
+ value,
+ environment->HasTaggedValueAt(i),
+ environment->HasUint32ValueAt(i),
+ &object_index,
+ &dematerialized_index);
+ }
+}
+
+
+void LCodeGen::AddToTranslation(LEnvironment* environment,
+ Translation* translation,
+ LOperand* op,
+ bool is_tagged,
+ bool is_uint32,
+ int* object_index_pointer,
+ int* dematerialized_index_pointer) {
+ if (op == LEnvironment::materialization_marker()) {
+ int object_index = (*object_index_pointer)++;
+ if (environment->ObjectIsDuplicateAt(object_index)) {
+ int dupe_of = environment->ObjectDuplicateOfAt(object_index);
+ translation->DuplicateObject(dupe_of);
+ return;
+ }
+ int object_length = environment->ObjectLengthAt(object_index);
+ if (environment->ObjectIsArgumentsAt(object_index)) {
+ translation->BeginArgumentsObject(object_length);
+ } else {
+ translation->BeginCapturedObject(object_length);
+ }
+ int dematerialized_index = *dematerialized_index_pointer;
+ int env_offset = environment->translation_size() + dematerialized_index;
+ *dematerialized_index_pointer += object_length;
+ for (int i = 0; i < object_length; ++i) {
+ LOperand* value = environment->values()->at(env_offset + i);
+ AddToTranslation(environment,
+ translation,
+ value,
+ environment->HasTaggedValueAt(env_offset + i),
+ environment->HasUint32ValueAt(env_offset + i),
+ object_index_pointer,
+ dematerialized_index_pointer);
+ }
+ return;
+ }
+
+ if (op->IsStackSlot()) {
+ if (is_tagged) {
+ translation->StoreStackSlot(op->index());
+ } else if (is_uint32) {
+ translation->StoreUint32StackSlot(op->index());
+ } else {
+ translation->StoreInt32StackSlot(op->index());
+ }
+ } else if (op->IsDoubleStackSlot()) {
+ translation->StoreDoubleStackSlot(op->index());
+ } else if (op->IsRegister()) {
+ Register reg = ToRegister(op);
+ if (is_tagged) {
+ translation->StoreRegister(reg);
+ } else if (is_uint32) {
+ translation->StoreUint32Register(reg);
+ } else {
+ translation->StoreInt32Register(reg);
+ }
+ } else if (op->IsConstantOperand()) {
+ HConstant* constant = chunk()->LookupConstant(LConstantOperand::cast(op));
+ int src_index = DefineDeoptimizationLiteral(constant->handle(isolate()));
+ translation->StoreLiteral(src_index);
+ } else {
+ UNREACHABLE();
+ }
+}
+
+
+void LCodeGen::CallCodeGeneric(Handle<Code> code,
+ RelocInfo::Mode mode,
+ LInstruction* instr,
+ SafepointMode safepoint_mode) {
+ ASSERT(instr != NULL);
+ __ call(code, mode);
+ RecordSafepointWithLazyDeopt(instr, safepoint_mode);
+
+ // Signal that we don't inline smi code before these stubs in the
+ // optimizing code generator.
+ if (code->kind() == Code::BINARY_OP_IC ||
+ code->kind() == Code::COMPARE_IC) {
+ __ nop();
+ }
+}
+
+
+void LCodeGen::CallCode(Handle<Code> code,
+ RelocInfo::Mode mode,
+ LInstruction* instr) {
+ CallCodeGeneric(code, mode, instr, RECORD_SIMPLE_SAFEPOINT);
+}
+
+
+void LCodeGen::CallRuntime(const Runtime::Function* fun,
+ int argc,
+ LInstruction* instr) {
+ ASSERT(instr != NULL);
+ ASSERT(instr->HasPointerMap());
+
+ __ CallRuntime(fun, argc);
+
+ RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
+
+ ASSERT(info()->is_calling());
+}
+
+
+void LCodeGen::LoadContextFromDeferred(LOperand* context) {
+ if (context->IsRegister()) {
+ if (!ToRegister(context).is(esi)) {
+ __ mov(esi, ToRegister(context));
+ }
+ } else if (context->IsStackSlot()) {
+ __ mov(esi, ToOperand(context));
+ } else if (context->IsConstantOperand()) {
+ HConstant* constant =
+ chunk_->LookupConstant(LConstantOperand::cast(context));
+ __ LoadObject(esi, Handle<Object>::cast(constant->handle(isolate())));
+ } else {
+ UNREACHABLE();
+ }
+}
+
+void LCodeGen::CallRuntimeFromDeferred(Runtime::FunctionId id,
+ int argc,
+ LInstruction* instr,
+ LOperand* context) {
+ LoadContextFromDeferred(context);
+
+ __ CallRuntime(id);
+ RecordSafepointWithRegisters(
+ instr->pointer_map(), argc, Safepoint::kNoLazyDeopt);
+
+ ASSERT(info()->is_calling());
+}
+
+
+void LCodeGen::RegisterEnvironmentForDeoptimization(
+ LEnvironment* environment, Safepoint::DeoptMode mode) {
+ environment->set_has_been_used();
+ if (!environment->HasBeenRegistered()) {
+ // Physical stack frame layout:
+ // -x ............. -4 0 ..................................... y
+ // [incoming arguments] [spill slots] [pushed outgoing arguments]
+
+ // Layout of the environment:
+ // 0 ..................................................... size-1
+ // [parameters] [locals] [expression stack including arguments]
+
+ // Layout of the translation:
+ // 0 ........................................................ size - 1 + 4
+ // [expression stack including arguments] [locals] [4 words] [parameters]
+ // |>------------ translation_size ------------<|
+
+ int frame_count = 0;
+ int jsframe_count = 0;
+ for (LEnvironment* e = environment; e != NULL; e = e->outer()) {
+ ++frame_count;
+ if (e->frame_type() == JS_FUNCTION) {
+ ++jsframe_count;
+ }
+ }
+ Translation translation(&translations_, frame_count, jsframe_count, zone());
+ WriteTranslation(environment, &translation);
+ int deoptimization_index = deoptimizations_.length();
+ int pc_offset = masm()->pc_offset();
+ environment->Register(deoptimization_index,
+ translation.index(),
+ (mode == Safepoint::kLazyDeopt) ? pc_offset : -1);
+ deoptimizations_.Add(environment, zone());
+ }
+}
+
+
+void LCodeGen::DeoptimizeIf(Condition cc,
+ LEnvironment* environment,
+ Deoptimizer::BailoutType bailout_type) {
+ RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
+ ASSERT(environment->HasBeenRegistered());
+ int id = environment->deoptimization_index();
+ ASSERT(info()->IsOptimizing() || info()->IsStub());
+ Address entry =
+ Deoptimizer::GetDeoptimizationEntry(isolate(), id, bailout_type);
+ if (entry == NULL) {
+ Abort(kBailoutWasNotPrepared);
+ return;
+ }
+
+ if (DeoptEveryNTimes()) {
+ ExternalReference count = ExternalReference::stress_deopt_count(isolate());
+ Label no_deopt;
+ __ pushfd();
+ __ push(eax);
+ __ mov(eax, Operand::StaticVariable(count));
+ __ sub(eax, Immediate(1));
+ __ j(not_zero, &no_deopt, Label::kNear);
+ if (FLAG_trap_on_deopt) __ int3();
+ __ mov(eax, Immediate(FLAG_deopt_every_n_times));
+ __ mov(Operand::StaticVariable(count), eax);
+ __ pop(eax);
+ __ popfd();
+ ASSERT(frame_is_built_);
+ __ call(entry, RelocInfo::RUNTIME_ENTRY);
+ __ bind(&no_deopt);
+ __ mov(Operand::StaticVariable(count), eax);
+ __ pop(eax);
+ __ popfd();
+ }
+
+ // Before Instructions which can deopt, we normally flush the x87 stack. But
+ // we can have inputs or outputs of the current instruction on the stack,
+ // thus we need to flush them here from the physical stack to leave it in a
+ // consistent state.
+ if (x87_stack_.depth() > 0) {
+ Label done;
+ if (cc != no_condition) __ j(NegateCondition(cc), &done, Label::kNear);
+ EmitFlushX87ForDeopt();
+ __ bind(&done);
+ }
+
+ if (info()->ShouldTrapOnDeopt()) {
+ Label done;
+ if (cc != no_condition) __ j(NegateCondition(cc), &done, Label::kNear);
+ __ int3();
+ __ bind(&done);
+ }
+
+ ASSERT(info()->IsStub() || frame_is_built_);
+ if (cc == no_condition && frame_is_built_) {
+ __ call(entry, RelocInfo::RUNTIME_ENTRY);
+ } else {
+ // We often have several deopts to the same entry, reuse the last
+ // jump entry if this is the case.
+ if (jump_table_.is_empty() ||
+ jump_table_.last().address != entry ||
+ jump_table_.last().needs_frame != !frame_is_built_ ||
+ jump_table_.last().bailout_type != bailout_type) {
+ Deoptimizer::JumpTableEntry table_entry(entry,
+ bailout_type,
+ !frame_is_built_);
+ jump_table_.Add(table_entry, zone());
+ }
+ if (cc == no_condition) {
+ __ jmp(&jump_table_.last().label);
+ } else {
+ __ j(cc, &jump_table_.last().label);
+ }
+ }
+}
+
+
+void LCodeGen::DeoptimizeIf(Condition cc,
+ LEnvironment* environment) {
+ Deoptimizer::BailoutType bailout_type = info()->IsStub()
+ ? Deoptimizer::LAZY
+ : Deoptimizer::EAGER;
+ DeoptimizeIf(cc, environment, bailout_type);
+}
+
+
+void LCodeGen::PopulateDeoptimizationData(Handle<Code> code) {
+ int length = deoptimizations_.length();
+ if (length == 0) return;
+ Handle<DeoptimizationInputData> data =
+ DeoptimizationInputData::New(isolate(), length, TENURED);
+
+ Handle<ByteArray> translations =
+ translations_.CreateByteArray(isolate()->factory());
+ data->SetTranslationByteArray(*translations);
+ data->SetInlinedFunctionCount(Smi::FromInt(inlined_function_count_));
+ data->SetOptimizationId(Smi::FromInt(info_->optimization_id()));
+ if (info_->IsOptimizing()) {
+ // Reference to shared function info does not change between phases.
+ AllowDeferredHandleDereference allow_handle_dereference;
+ data->SetSharedFunctionInfo(*info_->shared_info());
+ } else {
+ data->SetSharedFunctionInfo(Smi::FromInt(0));
+ }
+
+ Handle<FixedArray> literals =
+ factory()->NewFixedArray(deoptimization_literals_.length(), TENURED);
+ { AllowDeferredHandleDereference copy_handles;
+ for (int i = 0; i < deoptimization_literals_.length(); i++) {
+ literals->set(i, *deoptimization_literals_[i]);
+ }
+ data->SetLiteralArray(*literals);
+ }
+
+ data->SetOsrAstId(Smi::FromInt(info_->osr_ast_id().ToInt()));
+ data->SetOsrPcOffset(Smi::FromInt(osr_pc_offset_));
+
+ // Populate the deoptimization entries.
+ for (int i = 0; i < length; i++) {
+ LEnvironment* env = deoptimizations_[i];
+ data->SetAstId(i, env->ast_id());
+ data->SetTranslationIndex(i, Smi::FromInt(env->translation_index()));
+ data->SetArgumentsStackHeight(i,
+ Smi::FromInt(env->arguments_stack_height()));
+ data->SetPc(i, Smi::FromInt(env->pc_offset()));
+ }
+ code->set_deoptimization_data(*data);
+}
+
+
+int LCodeGen::DefineDeoptimizationLiteral(Handle<Object> literal) {
+ int result = deoptimization_literals_.length();
+ for (int i = 0; i < deoptimization_literals_.length(); ++i) {
+ if (deoptimization_literals_[i].is_identical_to(literal)) return i;
+ }
+ deoptimization_literals_.Add(literal, zone());
+ return result;
+}
+
+
+void LCodeGen::PopulateDeoptimizationLiteralsWithInlinedFunctions() {
+ ASSERT(deoptimization_literals_.length() == 0);
+
+ const ZoneList<Handle<JSFunction> >* inlined_closures =
+ chunk()->inlined_closures();
+
+ for (int i = 0, length = inlined_closures->length();
+ i < length;
+ i++) {
+ DefineDeoptimizationLiteral(inlined_closures->at(i));
+ }
+
+ inlined_function_count_ = deoptimization_literals_.length();
+}
+
+
+void LCodeGen::RecordSafepointWithLazyDeopt(
+ LInstruction* instr, SafepointMode safepoint_mode) {
+ if (safepoint_mode == RECORD_SIMPLE_SAFEPOINT) {
+ RecordSafepoint(instr->pointer_map(), Safepoint::kLazyDeopt);
+ } else {
+ ASSERT(safepoint_mode == RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
+ RecordSafepointWithRegisters(
+ instr->pointer_map(), 0, Safepoint::kLazyDeopt);
+ }
+}
+
+
+void LCodeGen::RecordSafepoint(
+ LPointerMap* pointers,
+ Safepoint::Kind kind,
+ int arguments,
+ Safepoint::DeoptMode deopt_mode) {
+ ASSERT(kind == expected_safepoint_kind_);
+ const ZoneList<LOperand*>* operands = pointers->GetNormalizedOperands();
+ Safepoint safepoint =
+ safepoints_.DefineSafepoint(masm(), kind, arguments, deopt_mode);
+ for (int i = 0; i < operands->length(); i++) {
+ LOperand* pointer = operands->at(i);
+ if (pointer->IsStackSlot()) {
+ safepoint.DefinePointerSlot(pointer->index(), zone());
+ } else if (pointer->IsRegister() && (kind & Safepoint::kWithRegisters)) {
+ safepoint.DefinePointerRegister(ToRegister(pointer), zone());
+ }
+ }
+}
+
+
+void LCodeGen::RecordSafepoint(LPointerMap* pointers,
+ Safepoint::DeoptMode mode) {
+ RecordSafepoint(pointers, Safepoint::kSimple, 0, mode);
+}
+
+
+void LCodeGen::RecordSafepoint(Safepoint::DeoptMode mode) {
+ LPointerMap empty_pointers(zone());
+ RecordSafepoint(&empty_pointers, mode);
+}
+
+
+void LCodeGen::RecordSafepointWithRegisters(LPointerMap* pointers,
+ int arguments,
+ Safepoint::DeoptMode mode) {
+ RecordSafepoint(pointers, Safepoint::kWithRegisters, arguments, mode);
+}
+
+
+void LCodeGen::RecordAndWritePosition(int position) {
+ if (position == RelocInfo::kNoPosition) return;
+ masm()->positions_recorder()->RecordPosition(position);
+ masm()->positions_recorder()->WriteRecordedPositions();
+}
+
+
+static const char* LabelType(LLabel* label) {
+ if (label->is_loop_header()) return " (loop header)";
+ if (label->is_osr_entry()) return " (OSR entry)";
+ return "";
+}
+
+
+void LCodeGen::DoLabel(LLabel* label) {
+ Comment(";;; <@%d,#%d> -------------------- B%d%s --------------------",
+ current_instruction_,
+ label->hydrogen_value()->id(),
+ label->block_id(),
+ LabelType(label));
+ __ bind(label->label());
+ current_block_ = label->block_id();
+ DoGap(label);
+}
+
+
+void LCodeGen::DoParallelMove(LParallelMove* move) {
+ resolver_.Resolve(move);
+}
+
+
+void LCodeGen::DoGap(LGap* gap) {
+ for (int i = LGap::FIRST_INNER_POSITION;
+ i <= LGap::LAST_INNER_POSITION;
+ i++) {
+ LGap::InnerPosition inner_pos = static_cast<LGap::InnerPosition>(i);
+ LParallelMove* move = gap->GetParallelMove(inner_pos);
+ if (move != NULL) DoParallelMove(move);
+ }
+}
+
+
+void LCodeGen::DoInstructionGap(LInstructionGap* instr) {
+ DoGap(instr);
+}
+
+
+void LCodeGen::DoParameter(LParameter* instr) {
+ // Nothing to do.
+}
+
+
+void LCodeGen::DoCallStub(LCallStub* instr) {
+ ASSERT(ToRegister(instr->context()).is(esi));
+ ASSERT(ToRegister(instr->result()).is(eax));
+ switch (instr->hydrogen()->major_key()) {
+ case CodeStub::RegExpExec: {
+ RegExpExecStub stub(isolate());
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+ break;
+ }
+ case CodeStub::SubString: {
+ SubStringStub stub(isolate());
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+ break;
+ }
+ case CodeStub::StringCompare: {
+ StringCompareStub stub(isolate());
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+ break;
+ }
+ default:
+ UNREACHABLE();
+ }
+}
+
+
+void LCodeGen::DoUnknownOSRValue(LUnknownOSRValue* instr) {
+ GenerateOsrPrologue();
+}
+
+
+void LCodeGen::DoModByPowerOf2I(LModByPowerOf2I* instr) {
+ Register dividend = ToRegister(instr->dividend());
+ int32_t divisor = instr->divisor();
+ ASSERT(dividend.is(ToRegister(instr->result())));
+
+ // Theoretically, a variation of the branch-free code for integer division by
+ // a power of 2 (calculating the remainder via an additional multiplication
+ // (which gets simplified to an 'and') and subtraction) should be faster, and
+ // this is exactly what GCC and clang emit. Nevertheless, benchmarks seem to
+ // indicate that positive dividends are heavily favored, so the branching
+ // version performs better.
+ HMod* hmod = instr->hydrogen();
+ int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1);
+ Label dividend_is_not_negative, done;
+ if (hmod->CheckFlag(HValue::kLeftCanBeNegative)) {
+ __ test(dividend, dividend);
+ __ j(not_sign, &dividend_is_not_negative, Label::kNear);
+ // Note that this is correct even for kMinInt operands.
+ __ neg(dividend);
+ __ and_(dividend, mask);
+ __ neg(dividend);
+ if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ DeoptimizeIf(zero, instr->environment());
+ }
+ __ jmp(&done, Label::kNear);
+ }
+
+ __ bind(&dividend_is_not_negative);
+ __ and_(dividend, mask);
+ __ bind(&done);
+}
+
+
+void LCodeGen::DoModByConstI(LModByConstI* instr) {
+ Register dividend = ToRegister(instr->dividend());
+ int32_t divisor = instr->divisor();
+ ASSERT(ToRegister(instr->result()).is(eax));
+
+ if (divisor == 0) {
+ DeoptimizeIf(no_condition, instr->environment());
+ return;
+ }
+
+ __ TruncatingDiv(dividend, Abs(divisor));
+ __ imul(edx, edx, Abs(divisor));
+ __ mov(eax, dividend);
+ __ sub(eax, edx);
+
+ // Check for negative zero.
+ HMod* hmod = instr->hydrogen();
+ if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ Label remainder_not_zero;
+ __ j(not_zero, &remainder_not_zero, Label::kNear);
+ __ cmp(dividend, Immediate(0));
+ DeoptimizeIf(less, instr->environment());
+ __ bind(&remainder_not_zero);
+ }
+}
+
+
+void LCodeGen::DoModI(LModI* instr) {
+ HMod* hmod = instr->hydrogen();
+
+ Register left_reg = ToRegister(instr->left());
+ ASSERT(left_reg.is(eax));
+ Register right_reg = ToRegister(instr->right());
+ ASSERT(!right_reg.is(eax));
+ ASSERT(!right_reg.is(edx));
+ Register result_reg = ToRegister(instr->result());
+ ASSERT(result_reg.is(edx));
+
+ Label done;
+ // Check for x % 0, idiv would signal a divide error. We have to
+ // deopt in this case because we can't return a NaN.
+ if (hmod->CheckFlag(HValue::kCanBeDivByZero)) {
+ __ test(right_reg, Operand(right_reg));
+ DeoptimizeIf(zero, instr->environment());
+ }
+
+ // Check for kMinInt % -1, idiv would signal a divide error. We
+ // have to deopt if we care about -0, because we can't return that.
+ if (hmod->CheckFlag(HValue::kCanOverflow)) {
+ Label no_overflow_possible;
+ __ cmp(left_reg, kMinInt);
+ __ j(not_equal, &no_overflow_possible, Label::kNear);
+ __ cmp(right_reg, -1);
+ if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ DeoptimizeIf(equal, instr->environment());
+ } else {
+ __ j(not_equal, &no_overflow_possible, Label::kNear);
+ __ Move(result_reg, Immediate(0));
+ __ jmp(&done, Label::kNear);
+ }
+ __ bind(&no_overflow_possible);
+ }
+
+ // Sign extend dividend in eax into edx:eax.
+ __ cdq();
+
+ // If we care about -0, test if the dividend is <0 and the result is 0.
+ if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ Label positive_left;
+ __ test(left_reg, Operand(left_reg));
+ __ j(not_sign, &positive_left, Label::kNear);
+ __ idiv(right_reg);
+ __ test(result_reg, Operand(result_reg));
+ DeoptimizeIf(zero, instr->environment());
+ __ jmp(&done, Label::kNear);
+ __ bind(&positive_left);
+ }
+ __ idiv(right_reg);
+ __ bind(&done);
+}
+
+
+void LCodeGen::DoDivByPowerOf2I(LDivByPowerOf2I* instr) {
+ Register dividend = ToRegister(instr->dividend());
+ int32_t divisor = instr->divisor();
+ Register result = ToRegister(instr->result());
+ ASSERT(divisor == kMinInt || IsPowerOf2(Abs(divisor)));
+ ASSERT(!result.is(dividend));
+
+ // Check for (0 / -x) that will produce negative zero.
+ HDiv* hdiv = instr->hydrogen();
+ if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
+ __ test(dividend, dividend);
+ DeoptimizeIf(zero, instr->environment());
+ }
+ // Check for (kMinInt / -1).
+ if (hdiv->CheckFlag(HValue::kCanOverflow) && divisor == -1) {
+ __ cmp(dividend, kMinInt);
+ DeoptimizeIf(zero, instr->environment());
+ }
+ // Deoptimize if remainder will not be 0.
+ if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32) &&
+ divisor != 1 && divisor != -1) {
+ int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1);
+ __ test(dividend, Immediate(mask));
+ DeoptimizeIf(not_zero, instr->environment());
+ }
+ __ Move(result, dividend);
+ int32_t shift = WhichPowerOf2Abs(divisor);
+ if (shift > 0) {
+ // The arithmetic shift is always OK, the 'if' is an optimization only.
+ if (shift > 1) __ sar(result, 31);
+ __ shr(result, 32 - shift);
+ __ add(result, dividend);
+ __ sar(result, shift);
+ }
+ if (divisor < 0) __ neg(result);
+}
+
+
+void LCodeGen::DoDivByConstI(LDivByConstI* instr) {
+ Register dividend = ToRegister(instr->dividend());
+ int32_t divisor = instr->divisor();
+ ASSERT(ToRegister(instr->result()).is(edx));
+
+ if (divisor == 0) {
+ DeoptimizeIf(no_condition, instr->environment());
+ return;
+ }
+
+ // Check for (0 / -x) that will produce negative zero.
+ HDiv* hdiv = instr->hydrogen();
+ if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
+ __ test(dividend, dividend);
+ DeoptimizeIf(zero, instr->environment());
+ }
+
+ __ TruncatingDiv(dividend, Abs(divisor));
+ if (divisor < 0) __ neg(edx);
+
+ if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32)) {
+ __ mov(eax, edx);
+ __ imul(eax, eax, divisor);
+ __ sub(eax, dividend);
+ DeoptimizeIf(not_equal, instr->environment());
+ }
+}
+
+
+// TODO(svenpanne) Refactor this to avoid code duplication with DoFlooringDivI.
+void LCodeGen::DoDivI(LDivI* instr) {
+ HBinaryOperation* hdiv = instr->hydrogen();
+ Register dividend = ToRegister(instr->dividend());
+ Register divisor = ToRegister(instr->divisor());
+ Register remainder = ToRegister(instr->temp());
+ ASSERT(dividend.is(eax));
+ ASSERT(remainder.is(edx));
+ ASSERT(ToRegister(instr->result()).is(eax));
+ ASSERT(!divisor.is(eax));
+ ASSERT(!divisor.is(edx));
+
+ // Check for x / 0.
+ if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) {
+ __ test(divisor, divisor);
+ DeoptimizeIf(zero, instr->environment());
+ }
+
+ // Check for (0 / -x) that will produce negative zero.
+ if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ Label dividend_not_zero;
+ __ test(dividend, dividend);
+ __ j(not_zero, &dividend_not_zero, Label::kNear);
+ __ test(divisor, divisor);
+ DeoptimizeIf(sign, instr->environment());
+ __ bind(&dividend_not_zero);
+ }
+
+ // Check for (kMinInt / -1).
+ if (hdiv->CheckFlag(HValue::kCanOverflow)) {
+ Label dividend_not_min_int;
+ __ cmp(dividend, kMinInt);
+ __ j(not_zero, &dividend_not_min_int, Label::kNear);
+ __ cmp(divisor, -1);
+ DeoptimizeIf(zero, instr->environment());
+ __ bind(&dividend_not_min_int);
+ }
+
+ // Sign extend to edx (= remainder).
+ __ cdq();
+ __ idiv(divisor);
+
+ if (!hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) {
+ // Deoptimize if remainder is not 0.
+ __ test(remainder, remainder);
+ DeoptimizeIf(not_zero, instr->environment());
+ }
+}
+
+
+void LCodeGen::DoFlooringDivByPowerOf2I(LFlooringDivByPowerOf2I* instr) {
+ Register dividend = ToRegister(instr->dividend());
+ int32_t divisor = instr->divisor();
+ ASSERT(dividend.is(ToRegister(instr->result())));
+
+ // If the divisor is positive, things are easy: There can be no deopts and we
+ // can simply do an arithmetic right shift.
+ if (divisor == 1) return;
+ int32_t shift = WhichPowerOf2Abs(divisor);
+ if (divisor > 1) {
+ __ sar(dividend, shift);
+ return;
+ }
+
+ // If the divisor is negative, we have to negate and handle edge cases.
+ __ neg(dividend);
+ if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ DeoptimizeIf(zero, instr->environment());
+ }
+
+ // Dividing by -1 is basically negation, unless we overflow.
+ if (divisor == -1) {
+ if (instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) {
+ DeoptimizeIf(overflow, instr->environment());
+ }
+ return;
+ }
+
+ // If the negation could not overflow, simply shifting is OK.
+ if (!instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) {
+ __ sar(dividend, shift);
+ return;
+ }
+
+ Label not_kmin_int, done;
+ __ j(no_overflow, &not_kmin_int, Label::kNear);
+ __ mov(dividend, Immediate(kMinInt / divisor));
+ __ jmp(&done, Label::kNear);
+ __ bind(&not_kmin_int);
+ __ sar(dividend, shift);
+ __ bind(&done);
+}
+
+
+void LCodeGen::DoFlooringDivByConstI(LFlooringDivByConstI* instr) {
+ Register dividend = ToRegister(instr->dividend());
+ int32_t divisor = instr->divisor();
+ ASSERT(ToRegister(instr->result()).is(edx));
+
+ if (divisor == 0) {
+ DeoptimizeIf(no_condition, instr->environment());
+ return;
+ }
+
+ // Check for (0 / -x) that will produce negative zero.
+ HMathFloorOfDiv* hdiv = instr->hydrogen();
+ if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
+ __ test(dividend, dividend);
+ DeoptimizeIf(zero, instr->environment());
+ }
+
+ // Easy case: We need no dynamic check for the dividend and the flooring
+ // division is the same as the truncating division.
+ if ((divisor > 0 && !hdiv->CheckFlag(HValue::kLeftCanBeNegative)) ||
+ (divisor < 0 && !hdiv->CheckFlag(HValue::kLeftCanBePositive))) {
+ __ TruncatingDiv(dividend, Abs(divisor));
+ if (divisor < 0) __ neg(edx);
+ return;
+ }
+
+ // In the general case we may need to adjust before and after the truncating
+ // division to get a flooring division.
+ Register temp = ToRegister(instr->temp3());
+ ASSERT(!temp.is(dividend) && !temp.is(eax) && !temp.is(edx));
+ Label needs_adjustment, done;
+ __ cmp(dividend, Immediate(0));
+ __ j(divisor > 0 ? less : greater, &needs_adjustment, Label::kNear);
+ __ TruncatingDiv(dividend, Abs(divisor));
+ if (divisor < 0) __ neg(edx);
+ __ jmp(&done, Label::kNear);
+ __ bind(&needs_adjustment);
+ __ lea(temp, Operand(dividend, divisor > 0 ? 1 : -1));
+ __ TruncatingDiv(temp, Abs(divisor));
+ if (divisor < 0) __ neg(edx);
+ __ dec(edx);
+ __ bind(&done);
+}
+
+
+// TODO(svenpanne) Refactor this to avoid code duplication with DoDivI.
+void LCodeGen::DoFlooringDivI(LFlooringDivI* instr) {
+ HBinaryOperation* hdiv = instr->hydrogen();
+ Register dividend = ToRegister(instr->dividend());
+ Register divisor = ToRegister(instr->divisor());
+ Register remainder = ToRegister(instr->temp());
+ Register result = ToRegister(instr->result());
+ ASSERT(dividend.is(eax));
+ ASSERT(remainder.is(edx));
+ ASSERT(result.is(eax));
+ ASSERT(!divisor.is(eax));
+ ASSERT(!divisor.is(edx));
+
+ // Check for x / 0.
+ if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) {
+ __ test(divisor, divisor);
+ DeoptimizeIf(zero, instr->environment());
+ }
+
+ // Check for (0 / -x) that will produce negative zero.
+ if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ Label dividend_not_zero;
+ __ test(dividend, dividend);
+ __ j(not_zero, &dividend_not_zero, Label::kNear);
+ __ test(divisor, divisor);
+ DeoptimizeIf(sign, instr->environment());
+ __ bind(&dividend_not_zero);
+ }
+
+ // Check for (kMinInt / -1).
+ if (hdiv->CheckFlag(HValue::kCanOverflow)) {
+ Label dividend_not_min_int;
+ __ cmp(dividend, kMinInt);
+ __ j(not_zero, &dividend_not_min_int, Label::kNear);
+ __ cmp(divisor, -1);
+ DeoptimizeIf(zero, instr->environment());
+ __ bind(&dividend_not_min_int);
+ }
+
+ // Sign extend to edx (= remainder).
+ __ cdq();
+ __ idiv(divisor);
+
+ Label done;
+ __ test(remainder, remainder);
+ __ j(zero, &done, Label::kNear);
+ __ xor_(remainder, divisor);
+ __ sar(remainder, 31);
+ __ add(result, remainder);
+ __ bind(&done);
+}
+
+
+void LCodeGen::DoMulI(LMulI* instr) {
+ Register left = ToRegister(instr->left());
+ LOperand* right = instr->right();
+
+ if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ __ mov(ToRegister(instr->temp()), left);
+ }
+
+ if (right->IsConstantOperand()) {
+ // Try strength reductions on the multiplication.
+ // All replacement instructions are at most as long as the imul
+ // and have better latency.
+ int constant = ToInteger32(LConstantOperand::cast(right));
+ if (constant == -1) {
+ __ neg(left);
+ } else if (constant == 0) {
+ __ xor_(left, Operand(left));
+ } else if (constant == 2) {
+ __ add(left, Operand(left));
+ } else if (!instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
+ // If we know that the multiplication can't overflow, it's safe to
+ // use instructions that don't set the overflow flag for the
+ // multiplication.
+ switch (constant) {
+ case 1:
+ // Do nothing.
+ break;
+ case 3:
+ __ lea(left, Operand(left, left, times_2, 0));
+ break;
+ case 4:
+ __ shl(left, 2);
+ break;
+ case 5:
+ __ lea(left, Operand(left, left, times_4, 0));
+ break;
+ case 8:
+ __ shl(left, 3);
+ break;
+ case 9:
+ __ lea(left, Operand(left, left, times_8, 0));
+ break;
+ case 16:
+ __ shl(left, 4);
+ break;
+ default:
+ __ imul(left, left, constant);
+ break;
+ }
+ } else {
+ __ imul(left, left, constant);
+ }
+ } else {
+ if (instr->hydrogen()->representation().IsSmi()) {
+ __ SmiUntag(left);
+ }
+ __ imul(left, ToOperand(right));
+ }
+
+ if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
+ DeoptimizeIf(overflow, instr->environment());
+ }
+
+ if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ // Bail out if the result is supposed to be negative zero.
+ Label done;
+ __ test(left, Operand(left));
+ __ j(not_zero, &done, Label::kNear);
+ if (right->IsConstantOperand()) {
+ if (ToInteger32(LConstantOperand::cast(right)) < 0) {
+ DeoptimizeIf(no_condition, instr->environment());
+ } else if (ToInteger32(LConstantOperand::cast(right)) == 0) {
+ __ cmp(ToRegister(instr->temp()), Immediate(0));
+ DeoptimizeIf(less, instr->environment());
+ }
+ } else {
+ // Test the non-zero operand for negative sign.
+ __ or_(ToRegister(instr->temp()), ToOperand(right));
+ DeoptimizeIf(sign, instr->environment());
+ }
+ __ bind(&done);
+ }
+}
+
+
+void LCodeGen::DoBitI(LBitI* instr) {
+ LOperand* left = instr->left();
+ LOperand* right = instr->right();
+ ASSERT(left->Equals(instr->result()));
+ ASSERT(left->IsRegister());
+
+ if (right->IsConstantOperand()) {
+ int32_t right_operand =
+ ToRepresentation(LConstantOperand::cast(right),
+ instr->hydrogen()->representation());
+ switch (instr->op()) {
+ case Token::BIT_AND:
+ __ and_(ToRegister(left), right_operand);
+ break;
+ case Token::BIT_OR:
+ __ or_(ToRegister(left), right_operand);
+ break;
+ case Token::BIT_XOR:
+ if (right_operand == int32_t(~0)) {
+ __ not_(ToRegister(left));
+ } else {
+ __ xor_(ToRegister(left), right_operand);
+ }
+ break;
+ default:
+ UNREACHABLE();
+ break;
+ }
+ } else {
+ switch (instr->op()) {
+ case Token::BIT_AND:
+ __ and_(ToRegister(left), ToOperand(right));
+ break;
+ case Token::BIT_OR:
+ __ or_(ToRegister(left), ToOperand(right));
+ break;
+ case Token::BIT_XOR:
+ __ xor_(ToRegister(left), ToOperand(right));
+ break;
+ default:
+ UNREACHABLE();
+ break;
+ }
+ }
+}
+
+
+void LCodeGen::DoShiftI(LShiftI* instr) {
+ LOperand* left = instr->left();
+ LOperand* right = instr->right();
+ ASSERT(left->Equals(instr->result()));
+ ASSERT(left->IsRegister());
+ if (right->IsRegister()) {
+ ASSERT(ToRegister(right).is(ecx));
+
+ switch (instr->op()) {
+ case Token::ROR:
+ __ ror_cl(ToRegister(left));
+ if (instr->can_deopt()) {
+ __ test(ToRegister(left), ToRegister(left));
+ DeoptimizeIf(sign, instr->environment());
+ }
+ break;
+ case Token::SAR:
+ __ sar_cl(ToRegister(left));
+ break;
+ case Token::SHR:
+ __ shr_cl(ToRegister(left));
+ if (instr->can_deopt()) {
+ __ test(ToRegister(left), ToRegister(left));
+ DeoptimizeIf(sign, instr->environment());
+ }
+ break;
+ case Token::SHL:
+ __ shl_cl(ToRegister(left));
+ break;
+ default:
+ UNREACHABLE();
+ break;
+ }
+ } else {
+ int value = ToInteger32(LConstantOperand::cast(right));
+ uint8_t shift_count = static_cast<uint8_t>(value & 0x1F);
+ switch (instr->op()) {
+ case Token::ROR:
+ if (shift_count == 0 && instr->can_deopt()) {
+ __ test(ToRegister(left), ToRegister(left));
+ DeoptimizeIf(sign, instr->environment());
+ } else {
+ __ ror(ToRegister(left), shift_count);
+ }
+ break;
+ case Token::SAR:
+ if (shift_count != 0) {
+ __ sar(ToRegister(left), shift_count);
+ }
+ break;
+ case Token::SHR:
+ if (shift_count != 0) {
+ __ shr(ToRegister(left), shift_count);
+ } else if (instr->can_deopt()) {
+ __ test(ToRegister(left), ToRegister(left));
+ DeoptimizeIf(sign, instr->environment());
+ }
+ break;
+ case Token::SHL:
+ if (shift_count != 0) {
+ if (instr->hydrogen_value()->representation().IsSmi() &&
+ instr->can_deopt()) {
+ if (shift_count != 1) {
+ __ shl(ToRegister(left), shift_count - 1);
+ }
+ __ SmiTag(ToRegister(left));
+ DeoptimizeIf(overflow, instr->environment());
+ } else {
+ __ shl(ToRegister(left), shift_count);
+ }
+ }
+ break;
+ default:
+ UNREACHABLE();
+ break;
+ }
+ }
+}
+
+
+void LCodeGen::DoSubI(LSubI* instr) {
+ LOperand* left = instr->left();
+ LOperand* right = instr->right();
+ ASSERT(left->Equals(instr->result()));
+
+ if (right->IsConstantOperand()) {
+ __ sub(ToOperand(left),
+ ToImmediate(right, instr->hydrogen()->representation()));
+ } else {
+ __ sub(ToRegister(left), ToOperand(right));
+ }
+ if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
+ DeoptimizeIf(overflow, instr->environment());
+ }
+}
+
+
+void LCodeGen::DoConstantI(LConstantI* instr) {
+ __ Move(ToRegister(instr->result()), Immediate(instr->value()));
+}
+
+
+void LCodeGen::DoConstantS(LConstantS* instr) {
+ __ Move(ToRegister(instr->result()), Immediate(instr->value()));
+}
+
+
+void LCodeGen::DoConstantD(LConstantD* instr) {
+ double v = instr->value();
+ uint64_t int_val = BitCast<uint64_t, double>(v);
+ int32_t lower = static_cast<int32_t>(int_val);
+ int32_t upper = static_cast<int32_t>(int_val >> (kBitsPerInt));
+ ASSERT(instr->result()->IsDoubleRegister());
+
+ __ push(Immediate(upper));
+ __ push(Immediate(lower));
+ X87Register reg = ToX87Register(instr->result());
+ X87Mov(reg, Operand(esp, 0));
+ __ add(Operand(esp), Immediate(kDoubleSize));
+}
+
+
+void LCodeGen::DoConstantE(LConstantE* instr) {
+ __ lea(ToRegister(instr->result()), Operand::StaticVariable(instr->value()));
+}
+
+
+void LCodeGen::DoConstantT(LConstantT* instr) {
+ Register reg = ToRegister(instr->result());
+ Handle<Object> object = instr->value(isolate());
+ AllowDeferredHandleDereference smi_check;
+ __ LoadObject(reg, object);
+}
+
+
+void LCodeGen::DoMapEnumLength(LMapEnumLength* instr) {
+ Register result = ToRegister(instr->result());
+ Register map = ToRegister(instr->value());
+ __ EnumLength(result, map);
+}
+
+
+void LCodeGen::DoDateField(LDateField* instr) {
+ Register object = ToRegister(instr->date());
+ Register result = ToRegister(instr->result());
+ Register scratch = ToRegister(instr->temp());
+ Smi* index = instr->index();
+ Label runtime, done;
+ ASSERT(object.is(result));
+ ASSERT(object.is(eax));
+
+ __ test(object, Immediate(kSmiTagMask));
+ DeoptimizeIf(zero, instr->environment());
+ __ CmpObjectType(object, JS_DATE_TYPE, scratch);
+ DeoptimizeIf(not_equal, instr->environment());
+
+ if (index->value() == 0) {
+ __ mov(result, FieldOperand(object, JSDate::kValueOffset));
+ } else {
+ if (index->value() < JSDate::kFirstUncachedField) {
+ ExternalReference stamp = ExternalReference::date_cache_stamp(isolate());
+ __ mov(scratch, Operand::StaticVariable(stamp));
+ __ cmp(scratch, FieldOperand(object, JSDate::kCacheStampOffset));
+ __ j(not_equal, &runtime, Label::kNear);
+ __ mov(result, FieldOperand(object, JSDate::kValueOffset +
+ kPointerSize * index->value()));
+ __ jmp(&done, Label::kNear);
+ }
+ __ bind(&runtime);
+ __ PrepareCallCFunction(2, scratch);
+ __ mov(Operand(esp, 0), object);
+ __ mov(Operand(esp, 1 * kPointerSize), Immediate(index));
+ __ CallCFunction(ExternalReference::get_date_field_function(isolate()), 2);
+ __ bind(&done);
+ }
+}
+
+
+Operand LCodeGen::BuildSeqStringOperand(Register string,
+ LOperand* index,
+ String::Encoding encoding) {
+ if (index->IsConstantOperand()) {
+ int offset = ToRepresentation(LConstantOperand::cast(index),
+ Representation::Integer32());
+ if (encoding == String::TWO_BYTE_ENCODING) {
+ offset *= kUC16Size;
+ }
+ STATIC_ASSERT(kCharSize == 1);
+ return FieldOperand(string, SeqString::kHeaderSize + offset);
+ }
+ return FieldOperand(
+ string, ToRegister(index),
+ encoding == String::ONE_BYTE_ENCODING ? times_1 : times_2,
+ SeqString::kHeaderSize);
+}
+
+
+void LCodeGen::DoSeqStringGetChar(LSeqStringGetChar* instr) {
+ String::Encoding encoding = instr->hydrogen()->encoding();
+ Register result = ToRegister(instr->result());
+ Register string = ToRegister(instr->string());
+
+ if (FLAG_debug_code) {
+ __ push(string);
+ __ mov(string, FieldOperand(string, HeapObject::kMapOffset));
+ __ movzx_b(string, FieldOperand(string, Map::kInstanceTypeOffset));
+
+ __ and_(string, Immediate(kStringRepresentationMask | kStringEncodingMask));
+ static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
+ static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
+ __ cmp(string, Immediate(encoding == String::ONE_BYTE_ENCODING
+ ? one_byte_seq_type : two_byte_seq_type));
+ __ Check(equal, kUnexpectedStringType);
+ __ pop(string);
+ }
+
+ Operand operand = BuildSeqStringOperand(string, instr->index(), encoding);
+ if (encoding == String::ONE_BYTE_ENCODING) {
+ __ movzx_b(result, operand);
+ } else {
+ __ movzx_w(result, operand);
+ }
+}
+
+
+void LCodeGen::DoSeqStringSetChar(LSeqStringSetChar* instr) {
+ String::Encoding encoding = instr->hydrogen()->encoding();
+ Register string = ToRegister(instr->string());
+
+ if (FLAG_debug_code) {
+ Register value = ToRegister(instr->value());
+ Register index = ToRegister(instr->index());
+ static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
+ static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
+ int encoding_mask =
+ instr->hydrogen()->encoding() == String::ONE_BYTE_ENCODING
+ ? one_byte_seq_type : two_byte_seq_type;
+ __ EmitSeqStringSetCharCheck(string, index, value, encoding_mask);
+ }
+
+ Operand operand = BuildSeqStringOperand(string, instr->index(), encoding);
+ if (instr->value()->IsConstantOperand()) {
+ int value = ToRepresentation(LConstantOperand::cast(instr->value()),
+ Representation::Integer32());
+ ASSERT_LE(0, value);
+ if (encoding == String::ONE_BYTE_ENCODING) {
+ ASSERT_LE(value, String::kMaxOneByteCharCode);
+ __ mov_b(operand, static_cast<int8_t>(value));
+ } else {
+ ASSERT_LE(value, String::kMaxUtf16CodeUnit);
+ __ mov_w(operand, static_cast<int16_t>(value));
+ }
+ } else {
+ Register value = ToRegister(instr->value());
+ if (encoding == String::ONE_BYTE_ENCODING) {
+ __ mov_b(operand, value);
+ } else {
+ __ mov_w(operand, value);
+ }
+ }
+}
+
+
+void LCodeGen::DoAddI(LAddI* instr) {
+ LOperand* left = instr->left();
+ LOperand* right = instr->right();
+
+ if (LAddI::UseLea(instr->hydrogen()) && !left->Equals(instr->result())) {
+ if (right->IsConstantOperand()) {
+ int32_t offset = ToRepresentation(LConstantOperand::cast(right),
+ instr->hydrogen()->representation());
+ __ lea(ToRegister(instr->result()), MemOperand(ToRegister(left), offset));
+ } else {
+ Operand address(ToRegister(left), ToRegister(right), times_1, 0);
+ __ lea(ToRegister(instr->result()), address);
+ }
+ } else {
+ if (right->IsConstantOperand()) {
+ __ add(ToOperand(left),
+ ToImmediate(right, instr->hydrogen()->representation()));
+ } else {
+ __ add(ToRegister(left), ToOperand(right));
+ }
+ if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
+ DeoptimizeIf(overflow, instr->environment());
+ }
+ }
+}
+
+
+void LCodeGen::DoMathMinMax(LMathMinMax* instr) {
+ LOperand* left = instr->left();
+ LOperand* right = instr->right();
+ ASSERT(left->Equals(instr->result()));
+ HMathMinMax::Operation operation = instr->hydrogen()->operation();
+ if (instr->hydrogen()->representation().IsSmiOrInteger32()) {
+ Label return_left;
+ Condition condition = (operation == HMathMinMax::kMathMin)
+ ? less_equal
+ : greater_equal;
+ if (right->IsConstantOperand()) {
+ Operand left_op = ToOperand(left);
+ Immediate immediate = ToImmediate(LConstantOperand::cast(instr->right()),
+ instr->hydrogen()->representation());
+ __ cmp(left_op, immediate);
+ __ j(condition, &return_left, Label::kNear);
+ __ mov(left_op, immediate);
+ } else {
+ Register left_reg = ToRegister(left);
+ Operand right_op = ToOperand(right);
+ __ cmp(left_reg, right_op);
+ __ j(condition, &return_left, Label::kNear);
+ __ mov(left_reg, right_op);
+ }
+ __ bind(&return_left);
+ } else {
+ // TODO(weiliang) use X87 for double representation.
+ UNIMPLEMENTED();
+ }
+}
+
+
+void LCodeGen::DoArithmeticD(LArithmeticD* instr) {
+ X87Register left = ToX87Register(instr->left());
+ X87Register right = ToX87Register(instr->right());
+ X87Register result = ToX87Register(instr->result());
+ if (instr->op() != Token::MOD) {
+ X87PrepareBinaryOp(left, right, result);
+ }
+ switch (instr->op()) {
+ case Token::ADD:
+ __ fadd_i(1);
+ break;
+ case Token::SUB:
+ __ fsub_i(1);
+ break;
+ case Token::MUL:
+ __ fmul_i(1);
+ break;
+ case Token::DIV:
+ __ fdiv_i(1);
+ break;
+ case Token::MOD: {
+ // Pass two doubles as arguments on the stack.
+ __ PrepareCallCFunction(4, eax);
+ X87Mov(Operand(esp, 1 * kDoubleSize), right);
+ X87Mov(Operand(esp, 0), left);
+ X87Free(right);
+ ASSERT(left.is(result));
+ X87PrepareToWrite(result);
+ __ CallCFunction(
+ ExternalReference::mod_two_doubles_operation(isolate()),
+ 4);
+
+ // Return value is in st(0) on ia32.
+ X87CommitWrite(result);
+ break;
+ }
+ default:
+ UNREACHABLE();
+ break;
+ }
+}
+
+
+void LCodeGen::DoArithmeticT(LArithmeticT* instr) {
+ ASSERT(ToRegister(instr->context()).is(esi));
+ ASSERT(ToRegister(instr->left()).is(edx));
+ ASSERT(ToRegister(instr->right()).is(eax));
+ ASSERT(ToRegister(instr->result()).is(eax));
+
+ BinaryOpICStub stub(isolate(), instr->op(), NO_OVERWRITE);
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+}
+
+
+template<class InstrType>
+void LCodeGen::EmitBranch(InstrType instr, Condition cc) {
+ int left_block = instr->TrueDestination(chunk_);
+ int right_block = instr->FalseDestination(chunk_);
+
+ int next_block = GetNextEmittedBlock();
+
+ if (right_block == left_block || cc == no_condition) {
+ EmitGoto(left_block);
+ } else if (left_block == next_block) {
+ __ j(NegateCondition(cc), chunk_->GetAssemblyLabel(right_block));
+ } else if (right_block == next_block) {
+ __ j(cc, chunk_->GetAssemblyLabel(left_block));
+ } else {
+ __ j(cc, chunk_->GetAssemblyLabel(left_block));
+ __ jmp(chunk_->GetAssemblyLabel(right_block));
+ }
+}
+
+
+template<class InstrType>
+void LCodeGen::EmitFalseBranch(InstrType instr, Condition cc) {
+ int false_block = instr->FalseDestination(chunk_);
+ if (cc == no_condition) {
+ __ jmp(chunk_->GetAssemblyLabel(false_block));
+ } else {
+ __ j(cc, chunk_->GetAssemblyLabel(false_block));
+ }
+}
+
+
+void LCodeGen::DoBranch(LBranch* instr) {
+ Representation r = instr->hydrogen()->value()->representation();
+ if (r.IsSmiOrInteger32()) {
+ Register reg = ToRegister(instr->value());
+ __ test(reg, Operand(reg));
+ EmitBranch(instr, not_zero);
+ } else if (r.IsDouble()) {
+ UNREACHABLE();
+ } else {
+ ASSERT(r.IsTagged());
+ Register reg = ToRegister(instr->value());
+ HType type = instr->hydrogen()->value()->type();
+ if (type.IsBoolean()) {
+ ASSERT(!info()->IsStub());
+ __ cmp(reg, factory()->true_value());
+ EmitBranch(instr, equal);
+ } else if (type.IsSmi()) {
+ ASSERT(!info()->IsStub());
+ __ test(reg, Operand(reg));
+ EmitBranch(instr, not_equal);
+ } else if (type.IsJSArray()) {
+ ASSERT(!info()->IsStub());
+ EmitBranch(instr, no_condition);
+ } else if (type.IsHeapNumber()) {
+ UNREACHABLE();
+ } else if (type.IsString()) {
+ ASSERT(!info()->IsStub());
+ __ cmp(FieldOperand(reg, String::kLengthOffset), Immediate(0));
+ EmitBranch(instr, not_equal);
+ } else {
+ ToBooleanStub::Types expected = instr->hydrogen()->expected_input_types();
+ if (expected.IsEmpty()) expected = ToBooleanStub::Types::Generic();
+
+ if (expected.Contains(ToBooleanStub::UNDEFINED)) {
+ // undefined -> false.
+ __ cmp(reg, factory()->undefined_value());
+ __ j(equal, instr->FalseLabel(chunk_));
+ }
+ if (expected.Contains(ToBooleanStub::BOOLEAN)) {
+ // true -> true.
+ __ cmp(reg, factory()->true_value());
+ __ j(equal, instr->TrueLabel(chunk_));
+ // false -> false.
+ __ cmp(reg, factory()->false_value());
+ __ j(equal, instr->FalseLabel(chunk_));
+ }
+ if (expected.Contains(ToBooleanStub::NULL_TYPE)) {
+ // 'null' -> false.
+ __ cmp(reg, factory()->null_value());
+ __ j(equal, instr->FalseLabel(chunk_));
+ }
+
+ if (expected.Contains(ToBooleanStub::SMI)) {
+ // Smis: 0 -> false, all other -> true.
+ __ test(reg, Operand(reg));
+ __ j(equal, instr->FalseLabel(chunk_));
+ __ JumpIfSmi(reg, instr->TrueLabel(chunk_));
+ } else if (expected.NeedsMap()) {
+ // If we need a map later and have a Smi -> deopt.
+ __ test(reg, Immediate(kSmiTagMask));
+ DeoptimizeIf(zero, instr->environment());
+ }
+
+ Register map = no_reg; // Keep the compiler happy.
+ if (expected.NeedsMap()) {
+ map = ToRegister(instr->temp());
+ ASSERT(!map.is(reg));
+ __ mov(map, FieldOperand(reg, HeapObject::kMapOffset));
+
+ if (expected.CanBeUndetectable()) {
+ // Undetectable -> false.
+ __ test_b(FieldOperand(map, Map::kBitFieldOffset),
+ 1 << Map::kIsUndetectable);
+ __ j(not_zero, instr->FalseLabel(chunk_));
+ }
+ }
+
+ if (expected.Contains(ToBooleanStub::SPEC_OBJECT)) {
+ // spec object -> true.
+ __ CmpInstanceType(map, FIRST_SPEC_OBJECT_TYPE);
+ __ j(above_equal, instr->TrueLabel(chunk_));
+ }
+
+ if (expected.Contains(ToBooleanStub::STRING)) {
+ // String value -> false iff empty.
+ Label not_string;
+ __ CmpInstanceType(map, FIRST_NONSTRING_TYPE);
+ __ j(above_equal, &not_string, Label::kNear);
+ __ cmp(FieldOperand(reg, String::kLengthOffset), Immediate(0));
+ __ j(not_zero, instr->TrueLabel(chunk_));
+ __ jmp(instr->FalseLabel(chunk_));
+ __ bind(&not_string);
+ }
+
+ if (expected.Contains(ToBooleanStub::SYMBOL)) {
+ // Symbol value -> true.
+ __ CmpInstanceType(map, SYMBOL_TYPE);
+ __ j(equal, instr->TrueLabel(chunk_));
+ }
+
+ if (expected.Contains(ToBooleanStub::HEAP_NUMBER)) {
+ // heap number -> false iff +0, -0, or NaN.
+ Label not_heap_number;
+ __ cmp(FieldOperand(reg, HeapObject::kMapOffset),
+ factory()->heap_number_map());
+ __ j(not_equal, &not_heap_number, Label::kNear);
+ __ fldz();
+ __ fld_d(FieldOperand(reg, HeapNumber::kValueOffset));
+ __ FCmp();
+ __ j(zero, instr->FalseLabel(chunk_));
+ __ jmp(instr->TrueLabel(chunk_));
+ __ bind(&not_heap_number);
+ }
+
+ if (!expected.IsGeneric()) {
+ // We've seen something for the first time -> deopt.
+ // This can only happen if we are not generic already.
+ DeoptimizeIf(no_condition, instr->environment());
+ }
+ }
+ }
+}
+
+
+void LCodeGen::EmitGoto(int block) {
+ if (!IsNextEmittedBlock(block)) {
+ __ jmp(chunk_->GetAssemblyLabel(LookupDestination(block)));
+ }
+}
+
+
+void LCodeGen::DoClobberDoubles(LClobberDoubles* instr) {
+}
+
+
+void LCodeGen::DoGoto(LGoto* instr) {
+ EmitGoto(instr->block_id());
+}
+
+
+Condition LCodeGen::TokenToCondition(Token::Value op, bool is_unsigned) {
+ Condition cond = no_condition;
+ switch (op) {
+ case Token::EQ:
+ case Token::EQ_STRICT:
+ cond = equal;
+ break;
+ case Token::NE:
+ case Token::NE_STRICT:
+ cond = not_equal;
+ break;
+ case Token::LT:
+ cond = is_unsigned ? below : less;
+ break;
+ case Token::GT:
+ cond = is_unsigned ? above : greater;
+ break;
+ case Token::LTE:
+ cond = is_unsigned ? below_equal : less_equal;
+ break;
+ case Token::GTE:
+ cond = is_unsigned ? above_equal : greater_equal;
+ break;
+ case Token::IN:
+ case Token::INSTANCEOF:
+ default:
+ UNREACHABLE();
+ }
+ return cond;
+}
+
+
+void LCodeGen::DoCompareNumericAndBranch(LCompareNumericAndBranch* instr) {
+ LOperand* left = instr->left();
+ LOperand* right = instr->right();
+ bool is_unsigned =
+ instr->is_double() ||
+ instr->hydrogen()->left()->CheckFlag(HInstruction::kUint32) ||
+ instr->hydrogen()->right()->CheckFlag(HInstruction::kUint32);
+ Condition cc = TokenToCondition(instr->op(), is_unsigned);
+
+ if (left->IsConstantOperand() && right->IsConstantOperand()) {
+ // We can statically evaluate the comparison.
+ double left_val = ToDouble(LConstantOperand::cast(left));
+ double right_val = ToDouble(LConstantOperand::cast(right));
+ int next_block = EvalComparison(instr->op(), left_val, right_val) ?
+ instr->TrueDestination(chunk_) : instr->FalseDestination(chunk_);
+ EmitGoto(next_block);
+ } else {
+ if (instr->is_double()) {
+ X87LoadForUsage(ToX87Register(right), ToX87Register(left));
+ __ FCmp();
+ // Don't base result on EFLAGS when a NaN is involved. Instead
+ // jump to the false block.
+ __ j(parity_even, instr->FalseLabel(chunk_));
+ } else {
+ if (right->IsConstantOperand()) {
+ __ cmp(ToOperand(left),
+ ToImmediate(right, instr->hydrogen()->representation()));
+ } else if (left->IsConstantOperand()) {
+ __ cmp(ToOperand(right),
+ ToImmediate(left, instr->hydrogen()->representation()));
+ // We commuted the operands, so commute the condition.
+ cc = CommuteCondition(cc);
+ } else {
+ __ cmp(ToRegister(left), ToOperand(right));
+ }
+ }
+ EmitBranch(instr, cc);
+ }
+}
+
+
+void LCodeGen::DoCmpObjectEqAndBranch(LCmpObjectEqAndBranch* instr) {
+ Register left = ToRegister(instr->left());
+
+ if (instr->right()->IsConstantOperand()) {
+ Handle<Object> right = ToHandle(LConstantOperand::cast(instr->right()));
+ __ CmpObject(left, right);
+ } else {
+ Operand right = ToOperand(instr->right());
+ __ cmp(left, right);
+ }
+ EmitBranch(instr, equal);
+}
+
+
+void LCodeGen::DoCmpHoleAndBranch(LCmpHoleAndBranch* instr) {
+ if (instr->hydrogen()->representation().IsTagged()) {
+ Register input_reg = ToRegister(instr->object());
+ __ cmp(input_reg, factory()->the_hole_value());
+ EmitBranch(instr, equal);
+ return;
+ }
+
+ // Put the value to the top of stack
+ X87Register src = ToX87Register(instr->object());
+ X87LoadForUsage(src);
+ __ fld(0);
+ __ fld(0);
+ __ FCmp();
+ Label ok;
+ __ j(parity_even, &ok, Label::kNear);
+ __ fstp(0);
+ EmitFalseBranch(instr, no_condition);
+ __ bind(&ok);
+
+
+ __ sub(esp, Immediate(kDoubleSize));
+ __ fstp_d(MemOperand(esp, 0));
+
+ __ add(esp, Immediate(kDoubleSize));
+ int offset = sizeof(kHoleNanUpper32);
+ __ cmp(MemOperand(esp, -offset), Immediate(kHoleNanUpper32));
+ EmitBranch(instr, equal);
+}
+
+
+void LCodeGen::DoCompareMinusZeroAndBranch(LCompareMinusZeroAndBranch* instr) {
+ Representation rep = instr->hydrogen()->value()->representation();
+ ASSERT(!rep.IsInteger32());
+
+ if (rep.IsDouble()) {
+ UNREACHABLE();
+ } else {
+ Register value = ToRegister(instr->value());
+ Handle<Map> map = masm()->isolate()->factory()->heap_number_map();
+ __ CheckMap(value, map, instr->FalseLabel(chunk()), DO_SMI_CHECK);
+ __ cmp(FieldOperand(value, HeapNumber::kExponentOffset),
+ Immediate(0x1));
+ EmitFalseBranch(instr, no_overflow);
+ __ cmp(FieldOperand(value, HeapNumber::kMantissaOffset),
+ Immediate(0x00000000));
+ EmitBranch(instr, equal);
+ }
+}
+
+
+Condition LCodeGen::EmitIsObject(Register input,
+ Register temp1,
+ Label* is_not_object,
+ Label* is_object) {
+ __ JumpIfSmi(input, is_not_object);
+
+ __ cmp(input, isolate()->factory()->null_value());
+ __ j(equal, is_object);
+
+ __ mov(temp1, FieldOperand(input, HeapObject::kMapOffset));
+ // Undetectable objects behave like undefined.
+ __ test_b(FieldOperand(temp1, Map::kBitFieldOffset),
+ 1 << Map::kIsUndetectable);
+ __ j(not_zero, is_not_object);
+
+ __ movzx_b(temp1, FieldOperand(temp1, Map::kInstanceTypeOffset));
+ __ cmp(temp1, FIRST_NONCALLABLE_SPEC_OBJECT_TYPE);
+ __ j(below, is_not_object);
+ __ cmp(temp1, LAST_NONCALLABLE_SPEC_OBJECT_TYPE);
+ return below_equal;
+}
+
+
+void LCodeGen::DoIsObjectAndBranch(LIsObjectAndBranch* instr) {
+ Register reg = ToRegister(instr->value());
+ Register temp = ToRegister(instr->temp());
+
+ Condition true_cond = EmitIsObject(
+ reg, temp, instr->FalseLabel(chunk_), instr->TrueLabel(chunk_));
+
+ EmitBranch(instr, true_cond);
+}
+
+
+Condition LCodeGen::EmitIsString(Register input,
+ Register temp1,
+ Label* is_not_string,
+ SmiCheck check_needed = INLINE_SMI_CHECK) {
+ if (check_needed == INLINE_SMI_CHECK) {
+ __ JumpIfSmi(input, is_not_string);
+ }
+
+ Condition cond = masm_->IsObjectStringType(input, temp1, temp1);
+
+ return cond;
+}
+
+
+void LCodeGen::DoIsStringAndBranch(LIsStringAndBranch* instr) {
+ Register reg = ToRegister(instr->value());
+ Register temp = ToRegister(instr->temp());
+
+ SmiCheck check_needed =
+ instr->hydrogen()->value()->type().IsHeapObject()
+ ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
+
+ Condition true_cond = EmitIsString(
+ reg, temp, instr->FalseLabel(chunk_), check_needed);
+
+ EmitBranch(instr, true_cond);
+}
+
+
+void LCodeGen::DoIsSmiAndBranch(LIsSmiAndBranch* instr) {
+ Operand input = ToOperand(instr->value());
+
+ __ test(input, Immediate(kSmiTagMask));
+ EmitBranch(instr, zero);
+}
+
+
+void LCodeGen::DoIsUndetectableAndBranch(LIsUndetectableAndBranch* instr) {
+ Register input = ToRegister(instr->value());
+ Register temp = ToRegister(instr->temp());
+
+ if (!instr->hydrogen()->value()->type().IsHeapObject()) {
+ STATIC_ASSERT(kSmiTag == 0);
+ __ JumpIfSmi(input, instr->FalseLabel(chunk_));
+ }
+ __ mov(temp, FieldOperand(input, HeapObject::kMapOffset));
+ __ test_b(FieldOperand(temp, Map::kBitFieldOffset),
+ 1 << Map::kIsUndetectable);
+ EmitBranch(instr, not_zero);
+}
+
+
+static Condition ComputeCompareCondition(Token::Value op) {
+ switch (op) {
+ case Token::EQ_STRICT:
+ case Token::EQ:
+ return equal;
+ case Token::LT:
+ return less;
+ case Token::GT:
+ return greater;
+ case Token::LTE:
+ return less_equal;
+ case Token::GTE:
+ return greater_equal;
+ default:
+ UNREACHABLE();
+ return no_condition;
+ }
+}
+
+
+void LCodeGen::DoStringCompareAndBranch(LStringCompareAndBranch* instr) {
+ Token::Value op = instr->op();
+
+ Handle<Code> ic = CompareIC::GetUninitialized(isolate(), op);
+ CallCode(ic, RelocInfo::CODE_TARGET, instr);
+
+ Condition condition = ComputeCompareCondition(op);
+ __ test(eax, Operand(eax));
+
+ EmitBranch(instr, condition);
+}
+
+
+static InstanceType TestType(HHasInstanceTypeAndBranch* instr) {
+ InstanceType from = instr->from();
+ InstanceType to = instr->to();
+ if (from == FIRST_TYPE) return to;
+ ASSERT(from == to || to == LAST_TYPE);
+ return from;
+}
+
+
+static Condition BranchCondition(HHasInstanceTypeAndBranch* instr) {
+ InstanceType from = instr->from();
+ InstanceType to = instr->to();
+ if (from == to) return equal;
+ if (to == LAST_TYPE) return above_equal;
+ if (from == FIRST_TYPE) return below_equal;
+ UNREACHABLE();
+ return equal;
+}
+
+
+void LCodeGen::DoHasInstanceTypeAndBranch(LHasInstanceTypeAndBranch* instr) {
+ Register input = ToRegister(instr->value());
+ Register temp = ToRegister(instr->temp());
+
+ if (!instr->hydrogen()->value()->type().IsHeapObject()) {
+ __ JumpIfSmi(input, instr->FalseLabel(chunk_));
+ }
+
+ __ CmpObjectType(input, TestType(instr->hydrogen()), temp);
+ EmitBranch(instr, BranchCondition(instr->hydrogen()));
+}
+
+
+void LCodeGen::DoGetCachedArrayIndex(LGetCachedArrayIndex* instr) {
+ Register input = ToRegister(instr->value());
+ Register result = ToRegister(instr->result());
+
+ __ AssertString(input);
+
+ __ mov(result, FieldOperand(input, String::kHashFieldOffset));
+ __ IndexFromHash(result, result);
+}
+
+
+void LCodeGen::DoHasCachedArrayIndexAndBranch(
+ LHasCachedArrayIndexAndBranch* instr) {
+ Register input = ToRegister(instr->value());
+
+ __ test(FieldOperand(input, String::kHashFieldOffset),
+ Immediate(String::kContainsCachedArrayIndexMask));
+ EmitBranch(instr, equal);
+}
+
+
+// Branches to a label or falls through with the answer in the z flag. Trashes
+// the temp registers, but not the input.
+void LCodeGen::EmitClassOfTest(Label* is_true,
+ Label* is_false,
+ Handle<String>class_name,
+ Register input,
+ Register temp,
+ Register temp2) {
+ ASSERT(!input.is(temp));
+ ASSERT(!input.is(temp2));
+ ASSERT(!temp.is(temp2));
+ __ JumpIfSmi(input, is_false);
+
+ if (class_name->IsOneByteEqualTo(STATIC_ASCII_VECTOR("Function"))) {
+ // Assuming the following assertions, we can use the same compares to test
+ // for both being a function type and being in the object type range.
+ STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
+ STATIC_ASSERT(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE ==
+ FIRST_SPEC_OBJECT_TYPE + 1);
+ STATIC_ASSERT(LAST_NONCALLABLE_SPEC_OBJECT_TYPE ==
+ LAST_SPEC_OBJECT_TYPE - 1);
+ STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE);
+ __ CmpObjectType(input, FIRST_SPEC_OBJECT_TYPE, temp);
+ __ j(below, is_false);
+ __ j(equal, is_true);
+ __ CmpInstanceType(temp, LAST_SPEC_OBJECT_TYPE);
+ __ j(equal, is_true);
+ } else {
+ // Faster code path to avoid two compares: subtract lower bound from the
+ // actual type and do a signed compare with the width of the type range.
+ __ mov(temp, FieldOperand(input, HeapObject::kMapOffset));
+ __ movzx_b(temp2, FieldOperand(temp, Map::kInstanceTypeOffset));
+ __ sub(Operand(temp2), Immediate(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
+ __ cmp(Operand(temp2), Immediate(LAST_NONCALLABLE_SPEC_OBJECT_TYPE -
+ FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
+ __ j(above, is_false);
+ }
+
+ // Now we are in the FIRST-LAST_NONCALLABLE_SPEC_OBJECT_TYPE range.
+ // Check if the constructor in the map is a function.
+ __ mov(temp, FieldOperand(temp, Map::kConstructorOffset));
+ // Objects with a non-function constructor have class 'Object'.
+ __ CmpObjectType(temp, JS_FUNCTION_TYPE, temp2);
+ if (class_name->IsOneByteEqualTo(STATIC_ASCII_VECTOR("Object"))) {
+ __ j(not_equal, is_true);
+ } else {
+ __ j(not_equal, is_false);
+ }
+
+ // temp now contains the constructor function. Grab the
+ // instance class name from there.
+ __ mov(temp, FieldOperand(temp, JSFunction::kSharedFunctionInfoOffset));
+ __ mov(temp, FieldOperand(temp,
+ SharedFunctionInfo::kInstanceClassNameOffset));
+ // The class name we are testing against is internalized since it's a literal.
+ // The name in the constructor is internalized because of the way the context
+ // is booted. This routine isn't expected to work for random API-created
+ // classes and it doesn't have to because you can't access it with natives
+ // syntax. Since both sides are internalized it is sufficient to use an
+ // identity comparison.
+ __ cmp(temp, class_name);
+ // End with the answer in the z flag.
+}
+
+
+void LCodeGen::DoClassOfTestAndBranch(LClassOfTestAndBranch* instr) {
+ Register input = ToRegister(instr->value());
+ Register temp = ToRegister(instr->temp());
+ Register temp2 = ToRegister(instr->temp2());
+
+ Handle<String> class_name = instr->hydrogen()->class_name();
+
+ EmitClassOfTest(instr->TrueLabel(chunk_), instr->FalseLabel(chunk_),
+ class_name, input, temp, temp2);
+
+ EmitBranch(instr, equal);
+}
+
+
+void LCodeGen::DoCmpMapAndBranch(LCmpMapAndBranch* instr) {
+ Register reg = ToRegister(instr->value());
+ __ cmp(FieldOperand(reg, HeapObject::kMapOffset), instr->map());
+ EmitBranch(instr, equal);
+}
+
+
+void LCodeGen::DoInstanceOf(LInstanceOf* instr) {
+ // Object and function are in fixed registers defined by the stub.
+ ASSERT(ToRegister(instr->context()).is(esi));
+ InstanceofStub stub(isolate(), InstanceofStub::kArgsInRegisters);
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+
+ Label true_value, done;
+ __ test(eax, Operand(eax));
+ __ j(zero, &true_value, Label::kNear);
+ __ mov(ToRegister(instr->result()), factory()->false_value());
+ __ jmp(&done, Label::kNear);
+ __ bind(&true_value);
+ __ mov(ToRegister(instr->result()), factory()->true_value());
+ __ bind(&done);
+}
+
+
+void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) {
+ class DeferredInstanceOfKnownGlobal V8_FINAL : public LDeferredCode {
+ public:
+ DeferredInstanceOfKnownGlobal(LCodeGen* codegen,
+ LInstanceOfKnownGlobal* instr,
+ const X87Stack& x87_stack)
+ : LDeferredCode(codegen, x87_stack), instr_(instr) { }
+ virtual void Generate() V8_OVERRIDE {
+ codegen()->DoDeferredInstanceOfKnownGlobal(instr_, &map_check_);
+ }
+ virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
+ Label* map_check() { return &map_check_; }
+ private:
+ LInstanceOfKnownGlobal* instr_;
+ Label map_check_;
+ };
+
+ DeferredInstanceOfKnownGlobal* deferred;
+ deferred = new(zone()) DeferredInstanceOfKnownGlobal(this, instr, x87_stack_);
+
+ Label done, false_result;
+ Register object = ToRegister(instr->value());
+ Register temp = ToRegister(instr->temp());
+
+ // A Smi is not an instance of anything.
+ __ JumpIfSmi(object, &false_result, Label::kNear);
+
+ // This is the inlined call site instanceof cache. The two occurences of the
+ // hole value will be patched to the last map/result pair generated by the
+ // instanceof stub.
+ Label cache_miss;
+ Register map = ToRegister(instr->temp());
+ __ mov(map, FieldOperand(object, HeapObject::kMapOffset));
+ __ bind(deferred->map_check()); // Label for calculating code patching.
+ Handle<Cell> cache_cell = factory()->NewCell(factory()->the_hole_value());
+ __ cmp(map, Operand::ForCell(cache_cell)); // Patched to cached map.
+ __ j(not_equal, &cache_miss, Label::kNear);
+ __ mov(eax, factory()->the_hole_value()); // Patched to either true or false.
+ __ jmp(&done, Label::kNear);
+
+ // The inlined call site cache did not match. Check for null and string
+ // before calling the deferred code.
+ __ bind(&cache_miss);
+ // Null is not an instance of anything.
+ __ cmp(object, factory()->null_value());
+ __ j(equal, &false_result, Label::kNear);
+
+ // String values are not instances of anything.
+ Condition is_string = masm_->IsObjectStringType(object, temp, temp);
+ __ j(is_string, &false_result, Label::kNear);
+
+ // Go to the deferred code.
+ __ jmp(deferred->entry());
+
+ __ bind(&false_result);
+ __ mov(ToRegister(instr->result()), factory()->false_value());
+
+ // Here result has either true or false. Deferred code also produces true or
+ // false object.
+ __ bind(deferred->exit());
+ __ bind(&done);
+}
+
+
+void LCodeGen::DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
+ Label* map_check) {
+ PushSafepointRegistersScope scope(this);
+
+ InstanceofStub::Flags flags = InstanceofStub::kNoFlags;
+ flags = static_cast<InstanceofStub::Flags>(
+ flags | InstanceofStub::kArgsInRegisters);
+ flags = static_cast<InstanceofStub::Flags>(
+ flags | InstanceofStub::kCallSiteInlineCheck);
+ flags = static_cast<InstanceofStub::Flags>(
+ flags | InstanceofStub::kReturnTrueFalseObject);
+ InstanceofStub stub(isolate(), flags);
+
+ // Get the temp register reserved by the instruction. This needs to be a
+ // register which is pushed last by PushSafepointRegisters as top of the
+ // stack is used to pass the offset to the location of the map check to
+ // the stub.
+ Register temp = ToRegister(instr->temp());
+ ASSERT(MacroAssembler::SafepointRegisterStackIndex(temp) == 0);
+ __ LoadHeapObject(InstanceofStub::right(), instr->function());
+ static const int kAdditionalDelta = 13;
+ int delta = masm_->SizeOfCodeGeneratedSince(map_check) + kAdditionalDelta;
+ __ mov(temp, Immediate(delta));
+ __ StoreToSafepointRegisterSlot(temp, temp);
+ CallCodeGeneric(stub.GetCode(),
+ RelocInfo::CODE_TARGET,
+ instr,
+ RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
+ // Get the deoptimization index of the LLazyBailout-environment that
+ // corresponds to this instruction.
+ LEnvironment* env = instr->GetDeferredLazyDeoptimizationEnvironment();
+ safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
+
+ // Put the result value into the eax slot and restore all registers.
+ __ StoreToSafepointRegisterSlot(eax, eax);
+}
+
+
+void LCodeGen::DoCmpT(LCmpT* instr) {
+ Token::Value op = instr->op();
+
+ Handle<Code> ic = CompareIC::GetUninitialized(isolate(), op);
+ CallCode(ic, RelocInfo::CODE_TARGET, instr);
+
+ Condition condition = ComputeCompareCondition(op);
+ Label true_value, done;
+ __ test(eax, Operand(eax));
+ __ j(condition, &true_value, Label::kNear);
+ __ mov(ToRegister(instr->result()), factory()->false_value());
+ __ jmp(&done, Label::kNear);
+ __ bind(&true_value);
+ __ mov(ToRegister(instr->result()), factory()->true_value());
+ __ bind(&done);
+}
+
+
+void LCodeGen::EmitReturn(LReturn* instr, bool dynamic_frame_alignment) {
+ int extra_value_count = dynamic_frame_alignment ? 2 : 1;
+
+ if (instr->has_constant_parameter_count()) {
+ int parameter_count = ToInteger32(instr->constant_parameter_count());
+ if (dynamic_frame_alignment && FLAG_debug_code) {
+ __ cmp(Operand(esp,
+ (parameter_count + extra_value_count) * kPointerSize),
+ Immediate(kAlignmentZapValue));
+ __ Assert(equal, kExpectedAlignmentMarker);
+ }
+ __ Ret((parameter_count + extra_value_count) * kPointerSize, ecx);
+ } else {
+ Register reg = ToRegister(instr->parameter_count());
+ // The argument count parameter is a smi
+ __ SmiUntag(reg);
+ Register return_addr_reg = reg.is(ecx) ? ebx : ecx;
+ if (dynamic_frame_alignment && FLAG_debug_code) {
+ ASSERT(extra_value_count == 2);
+ __ cmp(Operand(esp, reg, times_pointer_size,
+ extra_value_count * kPointerSize),
+ Immediate(kAlignmentZapValue));
+ __ Assert(equal, kExpectedAlignmentMarker);
+ }
+
+ // emit code to restore stack based on instr->parameter_count()
+ __ pop(return_addr_reg); // save return address
+ if (dynamic_frame_alignment) {
+ __ inc(reg); // 1 more for alignment
+ }
+ __ shl(reg, kPointerSizeLog2);
+ __ add(esp, reg);
+ __ jmp(return_addr_reg);
+ }
+}
+
+
+void LCodeGen::DoReturn(LReturn* instr) {
+ if (FLAG_trace && info()->IsOptimizing()) {
+ // Preserve the return value on the stack and rely on the runtime call
+ // to return the value in the same register. We're leaving the code
+ // managed by the register allocator and tearing down the frame, it's
+ // safe to write to the context register.
+ __ push(eax);
+ __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
+ __ CallRuntime(Runtime::kTraceExit, 1);
+ }
+ if (dynamic_frame_alignment_) {
+ // Fetch the state of the dynamic frame alignment.
+ __ mov(edx, Operand(ebp,
+ JavaScriptFrameConstants::kDynamicAlignmentStateOffset));
+ }
+ int no_frame_start = -1;
+ if (NeedsEagerFrame()) {
+ __ mov(esp, ebp);
+ __ pop(ebp);
+ no_frame_start = masm_->pc_offset();
+ }
+ if (dynamic_frame_alignment_) {
+ Label no_padding;
+ __ cmp(edx, Immediate(kNoAlignmentPadding));
+ __ j(equal, &no_padding, Label::kNear);
+
+ EmitReturn(instr, true);
+ __ bind(&no_padding);
+ }
+
+ EmitReturn(instr, false);
+ if (no_frame_start != -1) {
+ info()->AddNoFrameRange(no_frame_start, masm_->pc_offset());
+ }
+}
+
+
+void LCodeGen::DoLoadGlobalCell(LLoadGlobalCell* instr) {
+ Register result = ToRegister(instr->result());
+ __ mov(result, Operand::ForCell(instr->hydrogen()->cell().handle()));
+ if (instr->hydrogen()->RequiresHoleCheck()) {
+ __ cmp(result, factory()->the_hole_value());
+ DeoptimizeIf(equal, instr->environment());
+ }
+}
+
+
+void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) {
+ ASSERT(ToRegister(instr->context()).is(esi));
+ ASSERT(ToRegister(instr->global_object()).is(edx));
+ ASSERT(ToRegister(instr->result()).is(eax));
+
+ __ mov(ecx, instr->name());
+ ContextualMode mode = instr->for_typeof() ? NOT_CONTEXTUAL : CONTEXTUAL;
+ Handle<Code> ic = LoadIC::initialize_stub(isolate(), mode);
+ CallCode(ic, RelocInfo::CODE_TARGET, instr);
+}
+
+
+void LCodeGen::DoStoreGlobalCell(LStoreGlobalCell* instr) {
+ Register value = ToRegister(instr->value());
+ Handle<PropertyCell> cell_handle = instr->hydrogen()->cell().handle();
+
+ // If the cell we are storing to contains the hole it could have
+ // been deleted from the property dictionary. In that case, we need
+ // to update the property details in the property dictionary to mark
+ // it as no longer deleted. We deoptimize in that case.
+ if (instr->hydrogen()->RequiresHoleCheck()) {
+ __ cmp(Operand::ForCell(cell_handle), factory()->the_hole_value());
+ DeoptimizeIf(equal, instr->environment());
+ }
+
+ // Store the value.
+ __ mov(Operand::ForCell(cell_handle), value);
+ // Cells are always rescanned, so no write barrier here.
+}
+
+
+void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) {
+ Register context = ToRegister(instr->context());
+ Register result = ToRegister(instr->result());
+ __ mov(result, ContextOperand(context, instr->slot_index()));
+
+ if (instr->hydrogen()->RequiresHoleCheck()) {
+ __ cmp(result, factory()->the_hole_value());
+ if (instr->hydrogen()->DeoptimizesOnHole()) {
+ DeoptimizeIf(equal, instr->environment());
+ } else {
+ Label is_not_hole;
+ __ j(not_equal, &is_not_hole, Label::kNear);
+ __ mov(result, factory()->undefined_value());
+ __ bind(&is_not_hole);
+ }
+ }
+}
+
+
+void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) {
+ Register context = ToRegister(instr->context());
+ Register value = ToRegister(instr->value());
+
+ Label skip_assignment;
+
+ Operand target = ContextOperand(context, instr->slot_index());
+ if (instr->hydrogen()->RequiresHoleCheck()) {
+ __ cmp(target, factory()->the_hole_value());
+ if (instr->hydrogen()->DeoptimizesOnHole()) {
+ DeoptimizeIf(equal, instr->environment());
+ } else {
+ __ j(not_equal, &skip_assignment, Label::kNear);
+ }
+ }
+
+ __ mov(target, value);
+ if (instr->hydrogen()->NeedsWriteBarrier()) {
+ SmiCheck check_needed =
+ instr->hydrogen()->value()->type().IsHeapObject()
+ ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
+ Register temp = ToRegister(instr->temp());
+ int offset = Context::SlotOffset(instr->slot_index());
+ __ RecordWriteContextSlot(context,
+ offset,
+ value,
+ temp,
+ EMIT_REMEMBERED_SET,
+ check_needed);
+ }
+
+ __ bind(&skip_assignment);
+}
+
+
+void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) {
+ HObjectAccess access = instr->hydrogen()->access();
+ int offset = access.offset();
+
+ if (access.IsExternalMemory()) {
+ Register result = ToRegister(instr->result());
+ MemOperand operand = instr->object()->IsConstantOperand()
+ ? MemOperand::StaticVariable(ToExternalReference(
+ LConstantOperand::cast(instr->object())))
+ : MemOperand(ToRegister(instr->object()), offset);
+ __ Load(result, operand, access.representation());
+ return;
+ }
+
+ Register object = ToRegister(instr->object());
+ if (instr->hydrogen()->representation().IsDouble()) {
+ X87Mov(ToX87Register(instr->result()), FieldOperand(object, offset));
+ return;
+ }
+
+ Register result = ToRegister(instr->result());
+ if (!access.IsInobject()) {
+ __ mov(result, FieldOperand(object, JSObject::kPropertiesOffset));
+ object = result;
+ }
+ __ Load(result, FieldOperand(object, offset), access.representation());
+}
+
+
+void LCodeGen::EmitPushTaggedOperand(LOperand* operand) {
+ ASSERT(!operand->IsDoubleRegister());
+ if (operand->IsConstantOperand()) {
+ Handle<Object> object = ToHandle(LConstantOperand::cast(operand));
+ AllowDeferredHandleDereference smi_check;
+ if (object->IsSmi()) {
+ __ Push(Handle<Smi>::cast(object));
+ } else {
+ __ PushHeapObject(Handle<HeapObject>::cast(object));
+ }
+ } else if (operand->IsRegister()) {
+ __ push(ToRegister(operand));
+ } else {
+ __ push(ToOperand(operand));
+ }
+}
+
+
+void LCodeGen::DoLoadNamedGeneric(LLoadNamedGeneric* instr) {
+ ASSERT(ToRegister(instr->context()).is(esi));
+ ASSERT(ToRegister(instr->object()).is(edx));
+ ASSERT(ToRegister(instr->result()).is(eax));
+
+ __ mov(ecx, instr->name());
+ Handle<Code> ic = LoadIC::initialize_stub(isolate(), NOT_CONTEXTUAL);
+ CallCode(ic, RelocInfo::CODE_TARGET, instr);
+}
+
+
+void LCodeGen::DoLoadFunctionPrototype(LLoadFunctionPrototype* instr) {
+ Register function = ToRegister(instr->function());
+ Register temp = ToRegister(instr->temp());
+ Register result = ToRegister(instr->result());
+
+ // Check that the function really is a function.
+ __ CmpObjectType(function, JS_FUNCTION_TYPE, result);
+ DeoptimizeIf(not_equal, instr->environment());
+
+ // Check whether the function has an instance prototype.
+ Label non_instance;
+ __ test_b(FieldOperand(result, Map::kBitFieldOffset),
+ 1 << Map::kHasNonInstancePrototype);
+ __ j(not_zero, &non_instance, Label::kNear);
+
+ // Get the prototype or initial map from the function.
+ __ mov(result,
+ FieldOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
+
+ // Check that the function has a prototype or an initial map.
+ __ cmp(Operand(result), Immediate(factory()->the_hole_value()));
+ DeoptimizeIf(equal, instr->environment());
+
+ // If the function does not have an initial map, we're done.
+ Label done;
+ __ CmpObjectType(result, MAP_TYPE, temp);
+ __ j(not_equal, &done, Label::kNear);
+
+ // Get the prototype from the initial map.
+ __ mov(result, FieldOperand(result, Map::kPrototypeOffset));
+ __ jmp(&done, Label::kNear);
+
+ // Non-instance prototype: Fetch prototype from constructor field
+ // in the function's map.
+ __ bind(&non_instance);
+ __ mov(result, FieldOperand(result, Map::kConstructorOffset));
+
+ // All done.
+ __ bind(&done);
+}
+
+
+void LCodeGen::DoLoadRoot(LLoadRoot* instr) {
+ Register result = ToRegister(instr->result());
+ __ LoadRoot(result, instr->index());
+}
+
+
+void LCodeGen::DoAccessArgumentsAt(LAccessArgumentsAt* instr) {
+ Register arguments = ToRegister(instr->arguments());
+ Register result = ToRegister(instr->result());
+ if (instr->length()->IsConstantOperand() &&
+ instr->index()->IsConstantOperand()) {
+ int const_index = ToInteger32(LConstantOperand::cast(instr->index()));
+ int const_length = ToInteger32(LConstantOperand::cast(instr->length()));
+ int index = (const_length - const_index) + 1;
+ __ mov(result, Operand(arguments, index * kPointerSize));
+ } else {
+ Register length = ToRegister(instr->length());
+ Operand index = ToOperand(instr->index());
+ // There are two words between the frame pointer and the last argument.
+ // Subtracting from length accounts for one of them add one more.
+ __ sub(length, index);
+ __ mov(result, Operand(arguments, length, times_4, kPointerSize));
+ }
+}
+
+
+void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) {
+ ElementsKind elements_kind = instr->elements_kind();
+ LOperand* key = instr->key();
+ if (!key->IsConstantOperand() &&
+ ExternalArrayOpRequiresTemp(instr->hydrogen()->key()->representation(),
+ elements_kind)) {
+ __ SmiUntag(ToRegister(key));
+ }
+ Operand operand(BuildFastArrayOperand(
+ instr->elements(),
+ key,
+ instr->hydrogen()->key()->representation(),
+ elements_kind,
+ instr->base_offset()));
+ if (elements_kind == EXTERNAL_FLOAT32_ELEMENTS ||
+ elements_kind == FLOAT32_ELEMENTS) {
+ X87Mov(ToX87Register(instr->result()), operand, kX87FloatOperand);
+ } else if (elements_kind == EXTERNAL_FLOAT64_ELEMENTS ||
+ elements_kind == FLOAT64_ELEMENTS) {
+ X87Mov(ToX87Register(instr->result()), operand);
+ } else {
+ Register result(ToRegister(instr->result()));
+ switch (elements_kind) {
+ case EXTERNAL_INT8_ELEMENTS:
+ case INT8_ELEMENTS:
+ __ movsx_b(result, operand);
+ break;
+ case EXTERNAL_UINT8_CLAMPED_ELEMENTS:
+ case EXTERNAL_UINT8_ELEMENTS:
+ case UINT8_ELEMENTS:
+ case UINT8_CLAMPED_ELEMENTS:
+ __ movzx_b(result, operand);
+ break;
+ case EXTERNAL_INT16_ELEMENTS:
+ case INT16_ELEMENTS:
+ __ movsx_w(result, operand);
+ break;
+ case EXTERNAL_UINT16_ELEMENTS:
+ case UINT16_ELEMENTS:
+ __ movzx_w(result, operand);
+ break;
+ case EXTERNAL_INT32_ELEMENTS:
+ case INT32_ELEMENTS:
+ __ mov(result, operand);
+ break;
+ case EXTERNAL_UINT32_ELEMENTS:
+ case UINT32_ELEMENTS:
+ __ mov(result, operand);
+ if (!instr->hydrogen()->CheckFlag(HInstruction::kUint32)) {
+ __ test(result, Operand(result));
+ DeoptimizeIf(negative, instr->environment());
+ }
+ break;
+ case EXTERNAL_FLOAT32_ELEMENTS:
+ case EXTERNAL_FLOAT64_ELEMENTS:
+ case FLOAT32_ELEMENTS:
+ case FLOAT64_ELEMENTS:
+ case FAST_SMI_ELEMENTS:
+ case FAST_ELEMENTS:
+ case FAST_DOUBLE_ELEMENTS:
+ case FAST_HOLEY_SMI_ELEMENTS:
+ case FAST_HOLEY_ELEMENTS:
+ case FAST_HOLEY_DOUBLE_ELEMENTS:
+ case DICTIONARY_ELEMENTS:
+ case SLOPPY_ARGUMENTS_ELEMENTS:
+ UNREACHABLE();
+ break;
+ }
+ }
+}
+
+
+void LCodeGen::DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr) {
+ if (instr->hydrogen()->RequiresHoleCheck()) {
+ Operand hole_check_operand = BuildFastArrayOperand(
+ instr->elements(), instr->key(),
+ instr->hydrogen()->key()->representation(),
+ FAST_DOUBLE_ELEMENTS,
+ instr->base_offset() + sizeof(kHoleNanLower32));
+ __ cmp(hole_check_operand, Immediate(kHoleNanUpper32));
+ DeoptimizeIf(equal, instr->environment());
+ }
+
+ Operand double_load_operand = BuildFastArrayOperand(
+ instr->elements(),
+ instr->key(),
+ instr->hydrogen()->key()->representation(),
+ FAST_DOUBLE_ELEMENTS,
+ instr->base_offset());
+ X87Mov(ToX87Register(instr->result()), double_load_operand);
+}
+
+
+void LCodeGen::DoLoadKeyedFixedArray(LLoadKeyed* instr) {
+ Register result = ToRegister(instr->result());
+
+ // Load the result.
+ __ mov(result,
+ BuildFastArrayOperand(instr->elements(),
+ instr->key(),
+ instr->hydrogen()->key()->representation(),
+ FAST_ELEMENTS,
+ instr->base_offset()));
+
+ // Check for the hole value.
+ if (instr->hydrogen()->RequiresHoleCheck()) {
+ if (IsFastSmiElementsKind(instr->hydrogen()->elements_kind())) {
+ __ test(result, Immediate(kSmiTagMask));
+ DeoptimizeIf(not_equal, instr->environment());
+ } else {
+ __ cmp(result, factory()->the_hole_value());
+ DeoptimizeIf(equal, instr->environment());
+ }
+ }
+}
+
+
+void LCodeGen::DoLoadKeyed(LLoadKeyed* instr) {
+ if (instr->is_typed_elements()) {
+ DoLoadKeyedExternalArray(instr);
+ } else if (instr->hydrogen()->representation().IsDouble()) {
+ DoLoadKeyedFixedDoubleArray(instr);
+ } else {
+ DoLoadKeyedFixedArray(instr);
+ }
+}
+
+
+Operand LCodeGen::BuildFastArrayOperand(
+ LOperand* elements_pointer,
+ LOperand* key,
+ Representation key_representation,
+ ElementsKind elements_kind,
+ uint32_t base_offset) {
+ Register elements_pointer_reg = ToRegister(elements_pointer);
+ int element_shift_size = ElementsKindToShiftSize(elements_kind);
+ int shift_size = element_shift_size;
+ if (key->IsConstantOperand()) {
+ int constant_value = ToInteger32(LConstantOperand::cast(key));
+ if (constant_value & 0xF0000000) {
+ Abort(kArrayIndexConstantValueTooBig);
+ }
+ return Operand(elements_pointer_reg,
+ ((constant_value) << shift_size)
+ + base_offset);
+ } else {
+ // Take the tag bit into account while computing the shift size.
+ if (key_representation.IsSmi() && (shift_size >= 1)) {
+ shift_size -= kSmiTagSize;
+ }
+ ScaleFactor scale_factor = static_cast<ScaleFactor>(shift_size);
+ return Operand(elements_pointer_reg,
+ ToRegister(key),
+ scale_factor,
+ base_offset);
+ }
+}
+
+
+void LCodeGen::DoLoadKeyedGeneric(LLoadKeyedGeneric* instr) {
+ ASSERT(ToRegister(instr->context()).is(esi));
+ ASSERT(ToRegister(instr->object()).is(edx));
+ ASSERT(ToRegister(instr->key()).is(ecx));
+
+ Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize();
+ CallCode(ic, RelocInfo::CODE_TARGET, instr);
+}
+
+
+void LCodeGen::DoArgumentsElements(LArgumentsElements* instr) {
+ Register result = ToRegister(instr->result());
+
+ if (instr->hydrogen()->from_inlined()) {
+ __ lea(result, Operand(esp, -2 * kPointerSize));
+ } else {
+ // Check for arguments adapter frame.
+ Label done, adapted;
+ __ mov(result, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
+ __ mov(result, Operand(result, StandardFrameConstants::kContextOffset));
+ __ cmp(Operand(result),
+ Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+ __ j(equal, &adapted, Label::kNear);
+
+ // No arguments adaptor frame.
+ __ mov(result, Operand(ebp));
+ __ jmp(&done, Label::kNear);
+
+ // Arguments adaptor frame present.
+ __ bind(&adapted);
+ __ mov(result, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
+
+ // Result is the frame pointer for the frame if not adapted and for the real
+ // frame below the adaptor frame if adapted.
+ __ bind(&done);
+ }
+}
+
+
+void LCodeGen::DoArgumentsLength(LArgumentsLength* instr) {
+ Operand elem = ToOperand(instr->elements());
+ Register result = ToRegister(instr->result());
+
+ Label done;
+
+ // If no arguments adaptor frame the number of arguments is fixed.
+ __ cmp(ebp, elem);
+ __ mov(result, Immediate(scope()->num_parameters()));
+ __ j(equal, &done, Label::kNear);
+
+ // Arguments adaptor frame present. Get argument length from there.
+ __ mov(result, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
+ __ mov(result, Operand(result,
+ ArgumentsAdaptorFrameConstants::kLengthOffset));
+ __ SmiUntag(result);
+
+ // Argument length is in result register.
+ __ bind(&done);
+}
+
+
+void LCodeGen::DoWrapReceiver(LWrapReceiver* instr) {
+ Register receiver = ToRegister(instr->receiver());
+ Register function = ToRegister(instr->function());
+
+ // If the receiver is null or undefined, we have to pass the global
+ // object as a receiver to normal functions. Values have to be
+ // passed unchanged to builtins and strict-mode functions.
+ Label receiver_ok, global_object;
+ Label::Distance dist = DeoptEveryNTimes() ? Label::kFar : Label::kNear;
+ Register scratch = ToRegister(instr->temp());
+
+ if (!instr->hydrogen()->known_function()) {
+ // Do not transform the receiver to object for strict mode
+ // functions.
+ __ mov(scratch,
+ FieldOperand(function, JSFunction::kSharedFunctionInfoOffset));
+ __ test_b(FieldOperand(scratch, SharedFunctionInfo::kStrictModeByteOffset),
+ 1 << SharedFunctionInfo::kStrictModeBitWithinByte);
+ __ j(not_equal, &receiver_ok, dist);
+
+ // Do not transform the receiver to object for builtins.
+ __ test_b(FieldOperand(scratch, SharedFunctionInfo::kNativeByteOffset),
+ 1 << SharedFunctionInfo::kNativeBitWithinByte);
+ __ j(not_equal, &receiver_ok, dist);
+ }
+
+ // Normal function. Replace undefined or null with global receiver.
+ __ cmp(receiver, factory()->null_value());
+ __ j(equal, &global_object, Label::kNear);
+ __ cmp(receiver, factory()->undefined_value());
+ __ j(equal, &global_object, Label::kNear);
+
+ // The receiver should be a JS object.
+ __ test(receiver, Immediate(kSmiTagMask));
+ DeoptimizeIf(equal, instr->environment());
+ __ CmpObjectType(receiver, FIRST_SPEC_OBJECT_TYPE, scratch);
+ DeoptimizeIf(below, instr->environment());
+
+ __ jmp(&receiver_ok, Label::kNear);
+ __ bind(&global_object);
+ __ mov(receiver, FieldOperand(function, JSFunction::kContextOffset));
+ const int global_offset = Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX);
+ __ mov(receiver, Operand(receiver, global_offset));
+ const int receiver_offset = GlobalObject::kGlobalReceiverOffset;
+ __ mov(receiver, FieldOperand(receiver, receiver_offset));
+ __ bind(&receiver_ok);
+}
+
+
+void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
+ Register receiver = ToRegister(instr->receiver());
+ Register function = ToRegister(instr->function());
+ Register length = ToRegister(instr->length());
+ Register elements = ToRegister(instr->elements());
+ ASSERT(receiver.is(eax)); // Used for parameter count.
+ ASSERT(function.is(edi)); // Required by InvokeFunction.
+ ASSERT(ToRegister(instr->result()).is(eax));
+
+ // Copy the arguments to this function possibly from the
+ // adaptor frame below it.
+ const uint32_t kArgumentsLimit = 1 * KB;
+ __ cmp(length, kArgumentsLimit);
+ DeoptimizeIf(above, instr->environment());
+
+ __ push(receiver);
+ __ mov(receiver, length);
+
+ // Loop through the arguments pushing them onto the execution
+ // stack.
+ Label invoke, loop;
+ // length is a small non-negative integer, due to the test above.
+ __ test(length, Operand(length));
+ __ j(zero, &invoke, Label::kNear);
+ __ bind(&loop);
+ __ push(Operand(elements, length, times_pointer_size, 1 * kPointerSize));
+ __ dec(length);
+ __ j(not_zero, &loop);
+
+ // Invoke the function.
+ __ bind(&invoke);
+ ASSERT(instr->HasPointerMap());
+ LPointerMap* pointers = instr->pointer_map();
+ SafepointGenerator safepoint_generator(
+ this, pointers, Safepoint::kLazyDeopt);
+ ParameterCount actual(eax);
+ __ InvokeFunction(function, actual, CALL_FUNCTION, safepoint_generator);
+}
+
+
+void LCodeGen::DoDebugBreak(LDebugBreak* instr) {
+ __ int3();
+}
+
+
+void LCodeGen::DoPushArgument(LPushArgument* instr) {
+ LOperand* argument = instr->value();
+ EmitPushTaggedOperand(argument);
+}
+
+
+void LCodeGen::DoDrop(LDrop* instr) {
+ __ Drop(instr->count());
+}
+
+
+void LCodeGen::DoThisFunction(LThisFunction* instr) {
+ Register result = ToRegister(instr->result());
+ __ mov(result, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
+}
+
+
+void LCodeGen::DoContext(LContext* instr) {
+ Register result = ToRegister(instr->result());
+ if (info()->IsOptimizing()) {
+ __ mov(result, Operand(ebp, StandardFrameConstants::kContextOffset));
+ } else {
+ // If there is no frame, the context must be in esi.
+ ASSERT(result.is(esi));
+ }
+}
+
+
+void LCodeGen::DoDeclareGlobals(LDeclareGlobals* instr) {
+ ASSERT(ToRegister(instr->context()).is(esi));
+ __ push(esi); // The context is the first argument.
+ __ push(Immediate(instr->hydrogen()->pairs()));
+ __ push(Immediate(Smi::FromInt(instr->hydrogen()->flags())));
+ CallRuntime(Runtime::kHiddenDeclareGlobals, 3, instr);
+}
+
+
+void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
+ int formal_parameter_count,
+ int arity,
+ LInstruction* instr,
+ EDIState edi_state) {
+ bool dont_adapt_arguments =
+ formal_parameter_count == SharedFunctionInfo::kDontAdaptArgumentsSentinel;
+ bool can_invoke_directly =
+ dont_adapt_arguments || formal_parameter_count == arity;
+
+ if (can_invoke_directly) {
+ if (edi_state == EDI_UNINITIALIZED) {
+ __ LoadHeapObject(edi, function);
+ }
+
+ // Change context.
+ __ mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
+
+ // Set eax to arguments count if adaption is not needed. Assumes that eax
+ // is available to write to at this point.
+ if (dont_adapt_arguments) {
+ __ mov(eax, arity);
+ }
+
+ // Invoke function directly.
+ if (function.is_identical_to(info()->closure())) {
+ __ CallSelf();
+ } else {
+ __ call(FieldOperand(edi, JSFunction::kCodeEntryOffset));
+ }
+ RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
+ } else {
+ // We need to adapt arguments.
+ LPointerMap* pointers = instr->pointer_map();
+ SafepointGenerator generator(
+ this, pointers, Safepoint::kLazyDeopt);
+ ParameterCount count(arity);
+ ParameterCount expected(formal_parameter_count);
+ __ InvokeFunction(function, expected, count, CALL_FUNCTION, generator);
+ }
+}
+
+
+void LCodeGen::DoCallWithDescriptor(LCallWithDescriptor* instr) {
+ ASSERT(ToRegister(instr->result()).is(eax));
+
+ LPointerMap* pointers = instr->pointer_map();
+ SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
+
+ if (instr->target()->IsConstantOperand()) {
+ LConstantOperand* target = LConstantOperand::cast(instr->target());
+ Handle<Code> code = Handle<Code>::cast(ToHandle(target));
+ generator.BeforeCall(__ CallSize(code, RelocInfo::CODE_TARGET));
+ __ call(code, RelocInfo::CODE_TARGET);
+ } else {
+ ASSERT(instr->target()->IsRegister());
+ Register target = ToRegister(instr->target());
+ generator.BeforeCall(__ CallSize(Operand(target)));
+ __ add(target, Immediate(Code::kHeaderSize - kHeapObjectTag));
+ __ call(target);
+ }
+ generator.AfterCall();
+}
+
+
+void LCodeGen::DoCallJSFunction(LCallJSFunction* instr) {
+ ASSERT(ToRegister(instr->function()).is(edi));
+ ASSERT(ToRegister(instr->result()).is(eax));
+
+ if (instr->hydrogen()->pass_argument_count()) {
+ __ mov(eax, instr->arity());
+ }
+
+ // Change context.
+ __ mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
+
+ bool is_self_call = false;
+ if (instr->hydrogen()->function()->IsConstant()) {
+ HConstant* fun_const = HConstant::cast(instr->hydrogen()->function());
+ Handle<JSFunction> jsfun =
+ Handle<JSFunction>::cast(fun_const->handle(isolate()));
+ is_self_call = jsfun.is_identical_to(info()->closure());
+ }
+
+ if (is_self_call) {
+ __ CallSelf();
+ } else {
+ __ call(FieldOperand(edi, JSFunction::kCodeEntryOffset));
+ }
+
+ RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
+}
+
+
+void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LMathAbs* instr) {
+ Register input_reg = ToRegister(instr->value());
+ __ cmp(FieldOperand(input_reg, HeapObject::kMapOffset),
+ factory()->heap_number_map());
+ DeoptimizeIf(not_equal, instr->environment());
+
+ Label slow, allocated, done;
+ Register tmp = input_reg.is(eax) ? ecx : eax;
+ Register tmp2 = tmp.is(ecx) ? edx : input_reg.is(ecx) ? edx : ecx;
+
+ // Preserve the value of all registers.
+ PushSafepointRegistersScope scope(this);
+
+ __ mov(tmp, FieldOperand(input_reg, HeapNumber::kExponentOffset));
+ // Check the sign of the argument. If the argument is positive, just
+ // return it. We do not need to patch the stack since |input| and
+ // |result| are the same register and |input| will be restored
+ // unchanged by popping safepoint registers.
+ __ test(tmp, Immediate(HeapNumber::kSignMask));
+ __ j(zero, &done, Label::kNear);
+
+ __ AllocateHeapNumber(tmp, tmp2, no_reg, &slow);
+ __ jmp(&allocated, Label::kNear);
+
+ // Slow case: Call the runtime system to do the number allocation.
+ __ bind(&slow);
+ CallRuntimeFromDeferred(Runtime::kHiddenAllocateHeapNumber, 0,
+ instr, instr->context());
+ // Set the pointer to the new heap number in tmp.
+ if (!tmp.is(eax)) __ mov(tmp, eax);
+ // Restore input_reg after call to runtime.
+ __ LoadFromSafepointRegisterSlot(input_reg, input_reg);
+
+ __ bind(&allocated);
+ __ mov(tmp2, FieldOperand(input_reg, HeapNumber::kExponentOffset));
+ __ and_(tmp2, ~HeapNumber::kSignMask);
+ __ mov(FieldOperand(tmp, HeapNumber::kExponentOffset), tmp2);
+ __ mov(tmp2, FieldOperand(input_reg, HeapNumber::kMantissaOffset));
+ __ mov(FieldOperand(tmp, HeapNumber::kMantissaOffset), tmp2);
+ __ StoreToSafepointRegisterSlot(input_reg, tmp);
+
+ __ bind(&done);
+}
+
+
+void LCodeGen::EmitIntegerMathAbs(LMathAbs* instr) {
+ Register input_reg = ToRegister(instr->value());
+ __ test(input_reg, Operand(input_reg));
+ Label is_positive;
+ __ j(not_sign, &is_positive, Label::kNear);
+ __ neg(input_reg); // Sets flags.
+ DeoptimizeIf(negative, instr->environment());
+ __ bind(&is_positive);
+}
+
+
+void LCodeGen::DoMathAbs(LMathAbs* instr) {
+ // Class for deferred case.
+ class DeferredMathAbsTaggedHeapNumber V8_FINAL : public LDeferredCode {
+ public:
+ DeferredMathAbsTaggedHeapNumber(LCodeGen* codegen,
+ LMathAbs* instr,
+ const X87Stack& x87_stack)
+ : LDeferredCode(codegen, x87_stack), instr_(instr) { }
+ virtual void Generate() V8_OVERRIDE {
+ codegen()->DoDeferredMathAbsTaggedHeapNumber(instr_);
+ }
+ virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
+ private:
+ LMathAbs* instr_;
+ };
+
+ ASSERT(instr->value()->Equals(instr->result()));
+ Representation r = instr->hydrogen()->value()->representation();
+
+ if (r.IsDouble()) {
+ UNIMPLEMENTED();
+ } else if (r.IsSmiOrInteger32()) {
+ EmitIntegerMathAbs(instr);
+ } else { // Tagged case.
+ DeferredMathAbsTaggedHeapNumber* deferred =
+ new(zone()) DeferredMathAbsTaggedHeapNumber(this, instr, x87_stack_);
+ Register input_reg = ToRegister(instr->value());
+ // Smi check.
+ __ JumpIfNotSmi(input_reg, deferred->entry());
+ EmitIntegerMathAbs(instr);
+ __ bind(deferred->exit());
+ }
+}
+
+
+void LCodeGen::DoMathFloor(LMathFloor* instr) {
+ UNIMPLEMENTED();
+}
+
+
+void LCodeGen::DoMathRound(LMathRound* instr) {
+ UNIMPLEMENTED();
+}
+
+
+void LCodeGen::DoMathSqrt(LMathSqrt* instr) {
+ UNIMPLEMENTED();
+}
+
+
+void LCodeGen::DoMathPowHalf(LMathPowHalf* instr) {
+ UNIMPLEMENTED();
+}
+
+
+void LCodeGen::DoPower(LPower* instr) {
+ UNIMPLEMENTED();
+}
+
+
+void LCodeGen::DoMathLog(LMathLog* instr) {
+ UNIMPLEMENTED();
+}
+
+
+void LCodeGen::DoMathClz32(LMathClz32* instr) {
+ UNIMPLEMENTED();
+}
+
+
+void LCodeGen::DoMathExp(LMathExp* instr) {
+ UNIMPLEMENTED();
+}
+
+
+void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) {
+ ASSERT(ToRegister(instr->context()).is(esi));
+ ASSERT(ToRegister(instr->function()).is(edi));
+ ASSERT(instr->HasPointerMap());
+
+ Handle<JSFunction> known_function = instr->hydrogen()->known_function();
+ if (known_function.is_null()) {
+ LPointerMap* pointers = instr->pointer_map();
+ SafepointGenerator generator(
+ this, pointers, Safepoint::kLazyDeopt);
+ ParameterCount count(instr->arity());
+ __ InvokeFunction(edi, count, CALL_FUNCTION, generator);
+ } else {
+ CallKnownFunction(known_function,
+ instr->hydrogen()->formal_parameter_count(),
+ instr->arity(),
+ instr,
+ EDI_CONTAINS_TARGET);
+ }
+}
+
+
+void LCodeGen::DoCallFunction(LCallFunction* instr) {
+ ASSERT(ToRegister(instr->context()).is(esi));
+ ASSERT(ToRegister(instr->function()).is(edi));
+ ASSERT(ToRegister(instr->result()).is(eax));
+
+ int arity = instr->arity();
+ CallFunctionStub stub(isolate(), arity, instr->hydrogen()->function_flags());
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+}
+
+
+void LCodeGen::DoCallNew(LCallNew* instr) {
+ ASSERT(ToRegister(instr->context()).is(esi));
+ ASSERT(ToRegister(instr->constructor()).is(edi));
+ ASSERT(ToRegister(instr->result()).is(eax));
+
+ // No cell in ebx for construct type feedback in optimized code
+ __ mov(ebx, isolate()->factory()->undefined_value());
+ CallConstructStub stub(isolate(), NO_CALL_CONSTRUCTOR_FLAGS);
+ __ Move(eax, Immediate(instr->arity()));
+ CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
+}
+
+
+void LCodeGen::DoCallNewArray(LCallNewArray* instr) {
+ ASSERT(ToRegister(instr->context()).is(esi));
+ ASSERT(ToRegister(instr->constructor()).is(edi));
+ ASSERT(ToRegister(instr->result()).is(eax));
+
+ __ Move(eax, Immediate(instr->arity()));
+ __ mov(ebx, isolate()->factory()->undefined_value());
+ ElementsKind kind = instr->hydrogen()->elements_kind();
+ AllocationSiteOverrideMode override_mode =
+ (AllocationSite::GetMode(kind) == TRACK_ALLOCATION_SITE)
+ ? DISABLE_ALLOCATION_SITES
+ : DONT_OVERRIDE;
+
+ if (instr->arity() == 0) {
+ ArrayNoArgumentConstructorStub stub(isolate(), kind, override_mode);
+ CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
+ } else if (instr->arity() == 1) {
+ Label done;
+ if (IsFastPackedElementsKind(kind)) {
+ Label packed_case;
+ // We might need a change here
+ // look at the first argument
+ __ mov(ecx, Operand(esp, 0));
+ __ test(ecx, ecx);
+ __ j(zero, &packed_case, Label::kNear);
+
+ ElementsKind holey_kind = GetHoleyElementsKind(kind);
+ ArraySingleArgumentConstructorStub stub(isolate(),
+ holey_kind,
+ override_mode);
+ CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
+ __ jmp(&done, Label::kNear);
+ __ bind(&packed_case);
+ }
+
+ ArraySingleArgumentConstructorStub stub(isolate(), kind, override_mode);
+ CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
+ __ bind(&done);
+ } else {
+ ArrayNArgumentsConstructorStub stub(isolate(), kind, override_mode);
+ CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
+ }
+}
+
+
+void LCodeGen::DoCallRuntime(LCallRuntime* instr) {
+ ASSERT(ToRegister(instr->context()).is(esi));
+ CallRuntime(instr->function(), instr->arity(), instr);
+}
+
+
+void LCodeGen::DoStoreCodeEntry(LStoreCodeEntry* instr) {
+ Register function = ToRegister(instr->function());
+ Register code_object = ToRegister(instr->code_object());
+ __ lea(code_object, FieldOperand(code_object, Code::kHeaderSize));
+ __ mov(FieldOperand(function, JSFunction::kCodeEntryOffset), code_object);
+}
+
+
+void LCodeGen::DoInnerAllocatedObject(LInnerAllocatedObject* instr) {
+ Register result = ToRegister(instr->result());
+ Register base = ToRegister(instr->base_object());
+ if (instr->offset()->IsConstantOperand()) {
+ LConstantOperand* offset = LConstantOperand::cast(instr->offset());
+ __ lea(result, Operand(base, ToInteger32(offset)));
+ } else {
+ Register offset = ToRegister(instr->offset());
+ __ lea(result, Operand(base, offset, times_1, 0));
+ }
+}
+
+
+void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
+ Representation representation = instr->hydrogen()->field_representation();
+
+ HObjectAccess access = instr->hydrogen()->access();
+ int offset = access.offset();
+
+ if (access.IsExternalMemory()) {
+ ASSERT(!instr->hydrogen()->NeedsWriteBarrier());
+ MemOperand operand = instr->object()->IsConstantOperand()
+ ? MemOperand::StaticVariable(
+ ToExternalReference(LConstantOperand::cast(instr->object())))
+ : MemOperand(ToRegister(instr->object()), offset);
+ if (instr->value()->IsConstantOperand()) {
+ LConstantOperand* operand_value = LConstantOperand::cast(instr->value());
+ __ mov(operand, Immediate(ToInteger32(operand_value)));
+ } else {
+ Register value = ToRegister(instr->value());
+ __ Store(value, operand, representation);
+ }
+ return;
+ }
+
+ Register object = ToRegister(instr->object());
+ __ AssertNotSmi(object);
+ ASSERT(!representation.IsSmi() ||
+ !instr->value()->IsConstantOperand() ||
+ IsSmi(LConstantOperand::cast(instr->value())));
+ if (representation.IsDouble()) {
+ ASSERT(access.IsInobject());
+ ASSERT(!instr->hydrogen()->has_transition());
+ ASSERT(!instr->hydrogen()->NeedsWriteBarrier());
+ X87Register value = ToX87Register(instr->value());
+ X87Mov(FieldOperand(object, offset), value);
+ return;
+ }
+
+ if (instr->hydrogen()->has_transition()) {
+ Handle<Map> transition = instr->hydrogen()->transition_map();
+ AddDeprecationDependency(transition);
+ __ mov(FieldOperand(object, HeapObject::kMapOffset), transition);
+ if (instr->hydrogen()->NeedsWriteBarrierForMap()) {
+ Register temp = ToRegister(instr->temp());
+ Register temp_map = ToRegister(instr->temp_map());
+ __ mov(temp_map, transition);
+ __ mov(FieldOperand(object, HeapObject::kMapOffset), temp_map);
+ // Update the write barrier for the map field.
+ __ RecordWriteForMap(object, transition, temp_map, temp);
+ }
+ }
+
+ // Do the store.
+ Register write_register = object;
+ if (!access.IsInobject()) {
+ write_register = ToRegister(instr->temp());
+ __ mov(write_register, FieldOperand(object, JSObject::kPropertiesOffset));
+ }
+
+ MemOperand operand = FieldOperand(write_register, offset);
+ if (instr->value()->IsConstantOperand()) {
+ LConstantOperand* operand_value = LConstantOperand::cast(instr->value());
+ if (operand_value->IsRegister()) {
+ Register value = ToRegister(operand_value);
+ __ Store(value, operand, representation);
+ } else if (representation.IsInteger32()) {
+ Immediate immediate = ToImmediate(operand_value, representation);
+ ASSERT(!instr->hydrogen()->NeedsWriteBarrier());
+ __ mov(operand, immediate);
+ } else {
+ Handle<Object> handle_value = ToHandle(operand_value);
+ ASSERT(!instr->hydrogen()->NeedsWriteBarrier());
+ __ mov(operand, handle_value);
+ }
+ } else {
+ Register value = ToRegister(instr->value());
+ __ Store(value, operand, representation);
+ }
+
+ if (instr->hydrogen()->NeedsWriteBarrier()) {
+ Register value = ToRegister(instr->value());
+ Register temp = access.IsInobject() ? ToRegister(instr->temp()) : object;
+ // Update the write barrier for the object for in-object properties.
+ __ RecordWriteField(write_register,
+ offset,
+ value,
+ temp,
+ EMIT_REMEMBERED_SET,
+ instr->hydrogen()->SmiCheckForWriteBarrier(),
+ instr->hydrogen()->PointersToHereCheckForValue());
+ }
+}
+
+
+void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) {
+ ASSERT(ToRegister(instr->context()).is(esi));
+ ASSERT(ToRegister(instr->object()).is(edx));
+ ASSERT(ToRegister(instr->value()).is(eax));
+
+ __ mov(ecx, instr->name());
+ Handle<Code> ic = StoreIC::initialize_stub(isolate(), instr->strict_mode());
+ CallCode(ic, RelocInfo::CODE_TARGET, instr);
+}
+
+
+void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) {
+ Condition cc = instr->hydrogen()->allow_equality() ? above : above_equal;
+ if (instr->index()->IsConstantOperand()) {
+ __ cmp(ToOperand(instr->length()),
+ ToImmediate(LConstantOperand::cast(instr->index()),
+ instr->hydrogen()->length()->representation()));
+ cc = CommuteCondition(cc);
+ } else if (instr->length()->IsConstantOperand()) {
+ __ cmp(ToOperand(instr->index()),
+ ToImmediate(LConstantOperand::cast(instr->length()),
+ instr->hydrogen()->index()->representation()));
+ } else {
+ __ cmp(ToRegister(instr->index()), ToOperand(instr->length()));
+ }
+ if (FLAG_debug_code && instr->hydrogen()->skip_check()) {
+ Label done;
+ __ j(NegateCondition(cc), &done, Label::kNear);
+ __ int3();
+ __ bind(&done);
+ } else {
+ DeoptimizeIf(cc, instr->environment());
+ }
+}
+
+
+void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) {
+ ElementsKind elements_kind = instr->elements_kind();
+ LOperand* key = instr->key();
+ if (!key->IsConstantOperand() &&
+ ExternalArrayOpRequiresTemp(instr->hydrogen()->key()->representation(),
+ elements_kind)) {
+ __ SmiUntag(ToRegister(key));
+ }
+ Operand operand(BuildFastArrayOperand(
+ instr->elements(),
+ key,
+ instr->hydrogen()->key()->representation(),
+ elements_kind,
+ instr->base_offset()));
+ if (elements_kind == EXTERNAL_FLOAT32_ELEMENTS ||
+ elements_kind == FLOAT32_ELEMENTS) {
+ __ fld(0);
+ __ fstp_s(operand);
+ } else if (elements_kind == EXTERNAL_FLOAT64_ELEMENTS ||
+ elements_kind == FLOAT64_ELEMENTS) {
+ X87Mov(operand, ToX87Register(instr->value()));
+ } else {
+ Register value = ToRegister(instr->value());
+ switch (elements_kind) {
+ case EXTERNAL_UINT8_CLAMPED_ELEMENTS:
+ case EXTERNAL_UINT8_ELEMENTS:
+ case EXTERNAL_INT8_ELEMENTS:
+ case UINT8_ELEMENTS:
+ case INT8_ELEMENTS:
+ case UINT8_CLAMPED_ELEMENTS:
+ __ mov_b(operand, value);
+ break;
+ case EXTERNAL_INT16_ELEMENTS:
+ case EXTERNAL_UINT16_ELEMENTS:
+ case UINT16_ELEMENTS:
+ case INT16_ELEMENTS:
+ __ mov_w(operand, value);
+ break;
+ case EXTERNAL_INT32_ELEMENTS:
+ case EXTERNAL_UINT32_ELEMENTS:
+ case UINT32_ELEMENTS:
+ case INT32_ELEMENTS:
+ __ mov(operand, value);
+ break;
+ case EXTERNAL_FLOAT32_ELEMENTS:
+ case EXTERNAL_FLOAT64_ELEMENTS:
+ case FLOAT32_ELEMENTS:
+ case FLOAT64_ELEMENTS:
+ case FAST_SMI_ELEMENTS:
+ case FAST_ELEMENTS:
+ case FAST_DOUBLE_ELEMENTS:
+ case FAST_HOLEY_SMI_ELEMENTS:
+ case FAST_HOLEY_ELEMENTS:
+ case FAST_HOLEY_DOUBLE_ELEMENTS:
+ case DICTIONARY_ELEMENTS:
+ case SLOPPY_ARGUMENTS_ELEMENTS:
+ UNREACHABLE();
+ break;
+ }
+ }
+}
+
+
+void LCodeGen::DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr) {
+ ExternalReference canonical_nan_reference =
+ ExternalReference::address_of_canonical_non_hole_nan();
+ Operand double_store_operand = BuildFastArrayOperand(
+ instr->elements(),
+ instr->key(),
+ instr->hydrogen()->key()->representation(),
+ FAST_DOUBLE_ELEMENTS,
+ instr->base_offset());
+
+ // Can't use SSE2 in the serializer
+ if (instr->hydrogen()->IsConstantHoleStore()) {
+ // This means we should store the (double) hole. No floating point
+ // registers required.
+ double nan_double = FixedDoubleArray::hole_nan_as_double();
+ uint64_t int_val = BitCast<uint64_t, double>(nan_double);
+ int32_t lower = static_cast<int32_t>(int_val);
+ int32_t upper = static_cast<int32_t>(int_val >> (kBitsPerInt));
+
+ __ mov(double_store_operand, Immediate(lower));
+ Operand double_store_operand2 = BuildFastArrayOperand(
+ instr->elements(),
+ instr->key(),
+ instr->hydrogen()->key()->representation(),
+ FAST_DOUBLE_ELEMENTS,
+ instr->base_offset() + kPointerSize);
+ __ mov(double_store_operand2, Immediate(upper));
+ } else {
+ Label no_special_nan_handling;
+ X87Register value = ToX87Register(instr->value());
+ X87Fxch(value);
+
+ if (instr->NeedsCanonicalization()) {
+ __ fld(0);
+ __ fld(0);
+ __ FCmp();
+
+ __ j(parity_odd, &no_special_nan_handling, Label::kNear);
+ __ sub(esp, Immediate(kDoubleSize));
+ __ fst_d(MemOperand(esp, 0));
+ __ cmp(MemOperand(esp, sizeof(kHoleNanLower32)),
+ Immediate(kHoleNanUpper32));
+ __ add(esp, Immediate(kDoubleSize));
+ Label canonicalize;
+ __ j(not_equal, &canonicalize, Label::kNear);
+ __ jmp(&no_special_nan_handling, Label::kNear);
+ __ bind(&canonicalize);
+ __ fstp(0);
+ __ fld_d(Operand::StaticVariable(canonical_nan_reference));
+ }
+
+ __ bind(&no_special_nan_handling);
+ __ fst_d(double_store_operand);
+ }
+}
+
+
+void LCodeGen::DoStoreKeyedFixedArray(LStoreKeyed* instr) {
+ Register elements = ToRegister(instr->elements());
+ Register key = instr->key()->IsRegister() ? ToRegister(instr->key()) : no_reg;
+
+ Operand operand = BuildFastArrayOperand(
+ instr->elements(),
+ instr->key(),
+ instr->hydrogen()->key()->representation(),
+ FAST_ELEMENTS,
+ instr->base_offset());
+ if (instr->value()->IsRegister()) {
+ __ mov(operand, ToRegister(instr->value()));
+ } else {
+ LConstantOperand* operand_value = LConstantOperand::cast(instr->value());
+ if (IsSmi(operand_value)) {
+ Immediate immediate = ToImmediate(operand_value, Representation::Smi());
+ __ mov(operand, immediate);
+ } else {
+ ASSERT(!IsInteger32(operand_value));
+ Handle<Object> handle_value = ToHandle(operand_value);
+ __ mov(operand, handle_value);
+ }
+ }
+
+ if (instr->hydrogen()->NeedsWriteBarrier()) {
+ ASSERT(instr->value()->IsRegister());
+ Register value = ToRegister(instr->value());
+ ASSERT(!instr->key()->IsConstantOperand());
+ SmiCheck check_needed =
+ instr->hydrogen()->value()->type().IsHeapObject()
+ ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
+ // Compute address of modified element and store it into key register.
+ __ lea(key, operand);
+ __ RecordWrite(elements,
+ key,
+ value,
+ EMIT_REMEMBERED_SET,
+ check_needed,
+ instr->hydrogen()->PointersToHereCheckForValue());
+ }
+}
+
+
+void LCodeGen::DoStoreKeyed(LStoreKeyed* instr) {
+ // By cases...external, fast-double, fast
+ if (instr->is_typed_elements()) {
+ DoStoreKeyedExternalArray(instr);
+ } else if (instr->hydrogen()->value()->representation().IsDouble()) {
+ DoStoreKeyedFixedDoubleArray(instr);
+ } else {
+ DoStoreKeyedFixedArray(instr);
+ }
+}
+
+
+void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) {
+ ASSERT(ToRegister(instr->context()).is(esi));
+ ASSERT(ToRegister(instr->object()).is(edx));
+ ASSERT(ToRegister(instr->key()).is(ecx));
+ ASSERT(ToRegister(instr->value()).is(eax));
+
+ Handle<Code> ic = instr->strict_mode() == STRICT
+ ? isolate()->builtins()->KeyedStoreIC_Initialize_Strict()
+ : isolate()->builtins()->KeyedStoreIC_Initialize();
+ CallCode(ic, RelocInfo::CODE_TARGET, instr);
+}
+
+
+void LCodeGen::DoTrapAllocationMemento(LTrapAllocationMemento* instr) {
+ Register object = ToRegister(instr->object());
+ Register temp = ToRegister(instr->temp());
+ Label no_memento_found;
+ __ TestJSArrayForAllocationMemento(object, temp, &no_memento_found);
+ DeoptimizeIf(equal, instr->environment());
+ __ bind(&no_memento_found);
+}
+
+
+void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) {
+ Register object_reg = ToRegister(instr->object());
+
+ Handle<Map> from_map = instr->original_map();
+ Handle<Map> to_map = instr->transitioned_map();
+ ElementsKind from_kind = instr->from_kind();
+ ElementsKind to_kind = instr->to_kind();
+
+ Label not_applicable;
+ bool is_simple_map_transition =
+ IsSimpleMapChangeTransition(from_kind, to_kind);
+ Label::Distance branch_distance =
+ is_simple_map_transition ? Label::kNear : Label::kFar;
+ __ cmp(FieldOperand(object_reg, HeapObject::kMapOffset), from_map);
+ __ j(not_equal, &not_applicable, branch_distance);
+ if (is_simple_map_transition) {
+ Register new_map_reg = ToRegister(instr->new_map_temp());
+ __ mov(FieldOperand(object_reg, HeapObject::kMapOffset),
+ Immediate(to_map));
+ // Write barrier.
+ ASSERT_NE(instr->temp(), NULL);
+ __ RecordWriteForMap(object_reg, to_map, new_map_reg,
+ ToRegister(instr->temp()));
+ } else {
+ ASSERT(ToRegister(instr->context()).is(esi));
+ ASSERT(object_reg.is(eax));
+ PushSafepointRegistersScope scope(this);
+ __ mov(ebx, to_map);
+ bool is_js_array = from_map->instance_type() == JS_ARRAY_TYPE;
+ TransitionElementsKindStub stub(isolate(), from_kind, to_kind, is_js_array);
+ __ CallStub(&stub);
+ RecordSafepointWithLazyDeopt(instr,
+ RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
+ }
+ __ bind(&not_applicable);
+}
+
+
+void LCodeGen::DoStringCharCodeAt(LStringCharCodeAt* instr) {
+ class DeferredStringCharCodeAt V8_FINAL : public LDeferredCode {
+ public:
+ DeferredStringCharCodeAt(LCodeGen* codegen,
+ LStringCharCodeAt* instr,
+ const X87Stack& x87_stack)
+ : LDeferredCode(codegen, x87_stack), instr_(instr) { }
+ virtual void Generate() V8_OVERRIDE {
+ codegen()->DoDeferredStringCharCodeAt(instr_);
+ }
+ virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
+ private:
+ LStringCharCodeAt* instr_;
+ };
+
+ DeferredStringCharCodeAt* deferred =
+ new(zone()) DeferredStringCharCodeAt(this, instr, x87_stack_);
+
+ StringCharLoadGenerator::Generate(masm(),
+ factory(),
+ ToRegister(instr->string()),
+ ToRegister(instr->index()),
+ ToRegister(instr->result()),
+ deferred->entry());
+ __ bind(deferred->exit());
+}
+
+
+void LCodeGen::DoDeferredStringCharCodeAt(LStringCharCodeAt* instr) {
+ Register string = ToRegister(instr->string());
+ Register result = ToRegister(instr->result());
+
+ // TODO(3095996): Get rid of this. For now, we need to make the
+ // result register contain a valid pointer because it is already
+ // contained in the register pointer map.
+ __ Move(result, Immediate(0));
+
+ PushSafepointRegistersScope scope(this);
+ __ push(string);
+ // Push the index as a smi. This is safe because of the checks in
+ // DoStringCharCodeAt above.
+ STATIC_ASSERT(String::kMaxLength <= Smi::kMaxValue);
+ if (instr->index()->IsConstantOperand()) {
+ Immediate immediate = ToImmediate(LConstantOperand::cast(instr->index()),
+ Representation::Smi());
+ __ push(immediate);
+ } else {
+ Register index = ToRegister(instr->index());
+ __ SmiTag(index);
+ __ push(index);
+ }
+ CallRuntimeFromDeferred(Runtime::kHiddenStringCharCodeAt, 2,
+ instr, instr->context());
+ __ AssertSmi(eax);
+ __ SmiUntag(eax);
+ __ StoreToSafepointRegisterSlot(result, eax);
+}
+
+
+void LCodeGen::DoStringCharFromCode(LStringCharFromCode* instr) {
+ class DeferredStringCharFromCode V8_FINAL : public LDeferredCode {
+ public:
+ DeferredStringCharFromCode(LCodeGen* codegen,
+ LStringCharFromCode* instr,
+ const X87Stack& x87_stack)
+ : LDeferredCode(codegen, x87_stack), instr_(instr) { }
+ virtual void Generate() V8_OVERRIDE {
+ codegen()->DoDeferredStringCharFromCode(instr_);
+ }
+ virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
+ private:
+ LStringCharFromCode* instr_;
+ };
+
+ DeferredStringCharFromCode* deferred =
+ new(zone()) DeferredStringCharFromCode(this, instr, x87_stack_);
+
+ ASSERT(instr->hydrogen()->value()->representation().IsInteger32());
+ Register char_code = ToRegister(instr->char_code());
+ Register result = ToRegister(instr->result());
+ ASSERT(!char_code.is(result));
+
+ __ cmp(char_code, String::kMaxOneByteCharCode);
+ __ j(above, deferred->entry());
+ __ Move(result, Immediate(factory()->single_character_string_cache()));
+ __ mov(result, FieldOperand(result,
+ char_code, times_pointer_size,
+ FixedArray::kHeaderSize));
+ __ cmp(result, factory()->undefined_value());
+ __ j(equal, deferred->entry());
+ __ bind(deferred->exit());
+}
+
+
+void LCodeGen::DoDeferredStringCharFromCode(LStringCharFromCode* instr) {
+ Register char_code = ToRegister(instr->char_code());
+ Register result = ToRegister(instr->result());
+
+ // TODO(3095996): Get rid of this. For now, we need to make the
+ // result register contain a valid pointer because it is already
+ // contained in the register pointer map.
+ __ Move(result, Immediate(0));
+
+ PushSafepointRegistersScope scope(this);
+ __ SmiTag(char_code);
+ __ push(char_code);
+ CallRuntimeFromDeferred(Runtime::kCharFromCode, 1, instr, instr->context());
+ __ StoreToSafepointRegisterSlot(result, eax);
+}
+
+
+void LCodeGen::DoStringAdd(LStringAdd* instr) {
+ ASSERT(ToRegister(instr->context()).is(esi));
+ ASSERT(ToRegister(instr->left()).is(edx));
+ ASSERT(ToRegister(instr->right()).is(eax));
+ StringAddStub stub(isolate(),
+ instr->hydrogen()->flags(),
+ instr->hydrogen()->pretenure_flag());
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+}
+
+
+void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) {
+ LOperand* input = instr->value();
+ LOperand* output = instr->result();
+ ASSERT(input->IsRegister() || input->IsStackSlot());
+ ASSERT(output->IsDoubleRegister());
+ if (input->IsRegister()) {
+ Register input_reg = ToRegister(input);
+ __ push(input_reg);
+ X87Mov(ToX87Register(output), Operand(esp, 0), kX87IntOperand);
+ __ pop(input_reg);
+ } else {
+ X87Mov(ToX87Register(output), ToOperand(input), kX87IntOperand);
+ }
+}
+
+
+void LCodeGen::DoUint32ToDouble(LUint32ToDouble* instr) {
+ LOperand* input = instr->value();
+ LOperand* output = instr->result();
+ X87Register res = ToX87Register(output);
+ X87PrepareToWrite(res);
+ __ LoadUint32NoSSE2(ToRegister(input));
+ X87CommitWrite(res);
+}
+
+
+void LCodeGen::DoNumberTagI(LNumberTagI* instr) {
+ class DeferredNumberTagI V8_FINAL : public LDeferredCode {
+ public:
+ DeferredNumberTagI(LCodeGen* codegen,
+ LNumberTagI* instr,
+ const X87Stack& x87_stack)
+ : LDeferredCode(codegen, x87_stack), instr_(instr) { }
+ virtual void Generate() V8_OVERRIDE {
+ codegen()->DoDeferredNumberTagIU(instr_, instr_->value(), instr_->temp(),
+ SIGNED_INT32);
+ }
+ virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
+ private:
+ LNumberTagI* instr_;
+ };
+
+ LOperand* input = instr->value();
+ ASSERT(input->IsRegister() && input->Equals(instr->result()));
+ Register reg = ToRegister(input);
+
+ DeferredNumberTagI* deferred =
+ new(zone()) DeferredNumberTagI(this, instr, x87_stack_);
+ __ SmiTag(reg);
+ __ j(overflow, deferred->entry());
+ __ bind(deferred->exit());
+}
+
+
+void LCodeGen::DoNumberTagU(LNumberTagU* instr) {
+ class DeferredNumberTagU V8_FINAL : public LDeferredCode {
+ public:
+ DeferredNumberTagU(LCodeGen* codegen,
+ LNumberTagU* instr,
+ const X87Stack& x87_stack)
+ : LDeferredCode(codegen, x87_stack), instr_(instr) { }
+ virtual void Generate() V8_OVERRIDE {
+ codegen()->DoDeferredNumberTagIU(instr_, instr_->value(), instr_->temp(),
+ UNSIGNED_INT32);
+ }
+ virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
+ private:
+ LNumberTagU* instr_;
+ };
+
+ LOperand* input = instr->value();
+ ASSERT(input->IsRegister() && input->Equals(instr->result()));
+ Register reg = ToRegister(input);
+
+ DeferredNumberTagU* deferred =
+ new(zone()) DeferredNumberTagU(this, instr, x87_stack_);
+ __ cmp(reg, Immediate(Smi::kMaxValue));
+ __ j(above, deferred->entry());
+ __ SmiTag(reg);
+ __ bind(deferred->exit());
+}
+
+
+void LCodeGen::DoDeferredNumberTagIU(LInstruction* instr,
+ LOperand* value,
+ LOperand* temp,
+ IntegerSignedness signedness) {
+ Label done, slow;
+ Register reg = ToRegister(value);
+ Register tmp = ToRegister(temp);
+
+ if (signedness == SIGNED_INT32) {
+ // There was overflow, so bits 30 and 31 of the original integer
+ // disagree. Try to allocate a heap number in new space and store
+ // the value in there. If that fails, call the runtime system.
+ __ SmiUntag(reg);
+ __ xor_(reg, 0x80000000);
+ __ push(reg);
+ __ fild_s(Operand(esp, 0));
+ __ pop(reg);
+ } else {
+ // There's no fild variant for unsigned values, so zero-extend to a 64-bit
+ // int manually.
+ __ push(Immediate(0));
+ __ push(reg);
+ __ fild_d(Operand(esp, 0));
+ __ pop(reg);
+ __ pop(reg);
+ }
+
+ if (FLAG_inline_new) {
+ __ AllocateHeapNumber(reg, tmp, no_reg, &slow);
+ __ jmp(&done, Label::kNear);
+ }
+
+ // Slow case: Call the runtime system to do the number allocation.
+ __ bind(&slow);
+ {
+ // TODO(3095996): Put a valid pointer value in the stack slot where the
+ // result register is stored, as this register is in the pointer map, but
+ // contains an integer value.
+ __ Move(reg, Immediate(0));
+
+ // Preserve the value of all registers.
+ PushSafepointRegistersScope scope(this);
+
+ // NumberTagI and NumberTagD use the context from the frame, rather than
+ // the environment's HContext or HInlinedContext value.
+ // They only call Runtime::kHiddenAllocateHeapNumber.
+ // The corresponding HChange instructions are added in a phase that does
+ // not have easy access to the local context.
+ __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
+ __ CallRuntime(Runtime::kHiddenAllocateHeapNumber);
+ RecordSafepointWithRegisters(
+ instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
+ __ StoreToSafepointRegisterSlot(reg, eax);
+ }
+
+ __ bind(&done);
+ __ fstp_d(FieldOperand(reg, HeapNumber::kValueOffset));
+}
+
+
+void LCodeGen::DoNumberTagD(LNumberTagD* instr) {
+ class DeferredNumberTagD V8_FINAL : public LDeferredCode {
+ public:
+ DeferredNumberTagD(LCodeGen* codegen,
+ LNumberTagD* instr,
+ const X87Stack& x87_stack)
+ : LDeferredCode(codegen, x87_stack), instr_(instr) { }
+ virtual void Generate() V8_OVERRIDE {
+ codegen()->DoDeferredNumberTagD(instr_);
+ }
+ virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
+ private:
+ LNumberTagD* instr_;
+ };
+
+ Register reg = ToRegister(instr->result());
+
+ // Put the value to the top of stack
+ X87Register src = ToX87Register(instr->value());
+ X87LoadForUsage(src);
+
+ DeferredNumberTagD* deferred =
+ new(zone()) DeferredNumberTagD(this, instr, x87_stack_);
+ if (FLAG_inline_new) {
+ Register tmp = ToRegister(instr->temp());
+ __ AllocateHeapNumber(reg, tmp, no_reg, deferred->entry());
+ } else {
+ __ jmp(deferred->entry());
+ }
+ __ bind(deferred->exit());
+ __ fstp_d(FieldOperand(reg, HeapNumber::kValueOffset));
+}
+
+
+void LCodeGen::DoDeferredNumberTagD(LNumberTagD* instr) {
+ // TODO(3095996): Get rid of this. For now, we need to make the
+ // result register contain a valid pointer because it is already
+ // contained in the register pointer map.
+ Register reg = ToRegister(instr->result());
+ __ Move(reg, Immediate(0));
+
+ PushSafepointRegistersScope scope(this);
+ // NumberTagI and NumberTagD use the context from the frame, rather than
+ // the environment's HContext or HInlinedContext value.
+ // They only call Runtime::kHiddenAllocateHeapNumber.
+ // The corresponding HChange instructions are added in a phase that does
+ // not have easy access to the local context.
+ __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
+ __ CallRuntime(Runtime::kHiddenAllocateHeapNumber);
+ RecordSafepointWithRegisters(
+ instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
+ __ StoreToSafepointRegisterSlot(reg, eax);
+}
+
+
+void LCodeGen::DoSmiTag(LSmiTag* instr) {
+ HChange* hchange = instr->hydrogen();
+ Register input = ToRegister(instr->value());
+ if (hchange->CheckFlag(HValue::kCanOverflow) &&
+ hchange->value()->CheckFlag(HValue::kUint32)) {
+ __ test(input, Immediate(0xc0000000));
+ DeoptimizeIf(not_zero, instr->environment());
+ }
+ __ SmiTag(input);
+ if (hchange->CheckFlag(HValue::kCanOverflow) &&
+ !hchange->value()->CheckFlag(HValue::kUint32)) {
+ DeoptimizeIf(overflow, instr->environment());
+ }
+}
+
+
+void LCodeGen::DoSmiUntag(LSmiUntag* instr) {
+ LOperand* input = instr->value();
+ Register result = ToRegister(input);
+ ASSERT(input->IsRegister() && input->Equals(instr->result()));
+ if (instr->needs_check()) {
+ __ test(result, Immediate(kSmiTagMask));
+ DeoptimizeIf(not_zero, instr->environment());
+ } else {
+ __ AssertSmi(result);
+ }
+ __ SmiUntag(result);
+}
+
+
+void LCodeGen::EmitNumberUntagDNoSSE2(Register input_reg,
+ Register temp_reg,
+ X87Register res_reg,
+ bool can_convert_undefined_to_nan,
+ bool deoptimize_on_minus_zero,
+ LEnvironment* env,
+ NumberUntagDMode mode) {
+ Label load_smi, done;
+
+ X87PrepareToWrite(res_reg);
+ if (mode == NUMBER_CANDIDATE_IS_ANY_TAGGED) {
+ // Smi check.
+ __ JumpIfSmi(input_reg, &load_smi, Label::kNear);
+
+ // Heap number map check.
+ __ cmp(FieldOperand(input_reg, HeapObject::kMapOffset),
+ factory()->heap_number_map());
+ if (!can_convert_undefined_to_nan) {
+ DeoptimizeIf(not_equal, env);
+ } else {
+ Label heap_number, convert;
+ __ j(equal, &heap_number, Label::kNear);
+
+ // Convert undefined (or hole) to NaN.
+ __ cmp(input_reg, factory()->undefined_value());
+ DeoptimizeIf(not_equal, env);
+
+ __ bind(&convert);
+ ExternalReference nan =
+ ExternalReference::address_of_canonical_non_hole_nan();
+ __ fld_d(Operand::StaticVariable(nan));
+ __ jmp(&done, Label::kNear);
+
+ __ bind(&heap_number);
+ }
+ // Heap number to x87 conversion.
+ __ fld_d(FieldOperand(input_reg, HeapNumber::kValueOffset));
+ if (deoptimize_on_minus_zero) {
+ __ fldz();
+ __ FCmp();
+ __ fld_d(FieldOperand(input_reg, HeapNumber::kValueOffset));
+ __ j(not_zero, &done, Label::kNear);
+
+ // Use general purpose registers to check if we have -0.0
+ __ mov(temp_reg, FieldOperand(input_reg, HeapNumber::kExponentOffset));
+ __ test(temp_reg, Immediate(HeapNumber::kSignMask));
+ __ j(zero, &done, Label::kNear);
+
+ // Pop FPU stack before deoptimizing.
+ __ fstp(0);
+ DeoptimizeIf(not_zero, env);
+ }
+ __ jmp(&done, Label::kNear);
+ } else {
+ ASSERT(mode == NUMBER_CANDIDATE_IS_SMI);
+ }
+
+ __ bind(&load_smi);
+ // Clobbering a temp is faster than re-tagging the
+ // input register since we avoid dependencies.
+ __ mov(temp_reg, input_reg);
+ __ SmiUntag(temp_reg); // Untag smi before converting to float.
+ __ push(temp_reg);
+ __ fild_s(Operand(esp, 0));
+ __ add(esp, Immediate(kPointerSize));
+ __ bind(&done);
+ X87CommitWrite(res_reg);
+}
+
+
+void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr, Label* done) {
+ Register input_reg = ToRegister(instr->value());
+
+ // The input was optimistically untagged; revert it.
+ STATIC_ASSERT(kSmiTagSize == 1);
+ __ lea(input_reg, Operand(input_reg, times_2, kHeapObjectTag));
+
+ if (instr->truncating()) {
+ Label no_heap_number, check_bools, check_false;
+
+ // Heap number map check.
+ __ cmp(FieldOperand(input_reg, HeapObject::kMapOffset),
+ factory()->heap_number_map());
+ __ j(not_equal, &no_heap_number, Label::kNear);
+ __ TruncateHeapNumberToI(input_reg, input_reg);
+ __ jmp(done);
+
+ __ bind(&no_heap_number);
+ // Check for Oddballs. Undefined/False is converted to zero and True to one
+ // for truncating conversions.
+ __ cmp(input_reg, factory()->undefined_value());
+ __ j(not_equal, &check_bools, Label::kNear);
+ __ Move(input_reg, Immediate(0));
+ __ jmp(done);
+
+ __ bind(&check_bools);
+ __ cmp(input_reg, factory()->true_value());
+ __ j(not_equal, &check_false, Label::kNear);
+ __ Move(input_reg, Immediate(1));
+ __ jmp(done);
+
+ __ bind(&check_false);
+ __ cmp(input_reg, factory()->false_value());
+ __ RecordComment("Deferred TaggedToI: cannot truncate");
+ DeoptimizeIf(not_equal, instr->environment());
+ __ Move(input_reg, Immediate(0));
+ } else {
+ Label bailout;
+ __ TaggedToI(input_reg, input_reg,
+ instr->hydrogen()->GetMinusZeroMode(), &bailout);
+ __ jmp(done);
+ __ bind(&bailout);
+ DeoptimizeIf(no_condition, instr->environment());
+ }
+}
+
+
+void LCodeGen::DoTaggedToI(LTaggedToI* instr) {
+ class DeferredTaggedToI V8_FINAL : public LDeferredCode {
+ public:
+ DeferredTaggedToI(LCodeGen* codegen,
+ LTaggedToI* instr,
+ const X87Stack& x87_stack)
+ : LDeferredCode(codegen, x87_stack), instr_(instr) { }
+ virtual void Generate() V8_OVERRIDE {
+ codegen()->DoDeferredTaggedToI(instr_, done());
+ }
+ virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
+ private:
+ LTaggedToI* instr_;
+ };
+
+ LOperand* input = instr->value();
+ ASSERT(input->IsRegister());
+ Register input_reg = ToRegister(input);
+ ASSERT(input_reg.is(ToRegister(instr->result())));
+
+ if (instr->hydrogen()->value()->representation().IsSmi()) {
+ __ SmiUntag(input_reg);
+ } else {
+ DeferredTaggedToI* deferred =
+ new(zone()) DeferredTaggedToI(this, instr, x87_stack_);
+ // Optimistically untag the input.
+ // If the input is a HeapObject, SmiUntag will set the carry flag.
+ STATIC_ASSERT(kSmiTagSize == 1 && kSmiTag == 0);
+ __ SmiUntag(input_reg);
+ // Branch to deferred code if the input was tagged.
+ // The deferred code will take care of restoring the tag.
+ __ j(carry, deferred->entry());
+ __ bind(deferred->exit());
+ }
+}
+
+
+void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) {
+ LOperand* input = instr->value();
+ ASSERT(input->IsRegister());
+ LOperand* temp = instr->temp();
+ ASSERT(temp->IsRegister());
+ LOperand* result = instr->result();
+ ASSERT(result->IsDoubleRegister());
+
+ Register input_reg = ToRegister(input);
+ bool deoptimize_on_minus_zero =
+ instr->hydrogen()->deoptimize_on_minus_zero();
+ Register temp_reg = ToRegister(temp);
+
+ HValue* value = instr->hydrogen()->value();
+ NumberUntagDMode mode = value->representation().IsSmi()
+ ? NUMBER_CANDIDATE_IS_SMI : NUMBER_CANDIDATE_IS_ANY_TAGGED;
+
+ EmitNumberUntagDNoSSE2(input_reg,
+ temp_reg,
+ ToX87Register(result),
+ instr->hydrogen()->can_convert_undefined_to_nan(),
+ deoptimize_on_minus_zero,
+ instr->environment(),
+ mode);
+}
+
+
+void LCodeGen::DoDoubleToI(LDoubleToI* instr) {
+ LOperand* input = instr->value();
+ ASSERT(input->IsDoubleRegister());
+ LOperand* result = instr->result();
+ ASSERT(result->IsRegister());
+ Register result_reg = ToRegister(result);
+
+ if (instr->truncating()) {
+ X87Register input_reg = ToX87Register(input);
+ X87Fxch(input_reg);
+ __ TruncateX87TOSToI(result_reg);
+ } else {
+ Label bailout, done;
+ X87Register input_reg = ToX87Register(input);
+ X87Fxch(input_reg);
+ __ X87TOSToI(result_reg, instr->hydrogen()->GetMinusZeroMode(),
+ &bailout, Label::kNear);
+ __ jmp(&done, Label::kNear);
+ __ bind(&bailout);
+ DeoptimizeIf(no_condition, instr->environment());
+ __ bind(&done);
+ }
+}
+
+
+void LCodeGen::DoDoubleToSmi(LDoubleToSmi* instr) {
+ LOperand* input = instr->value();
+ ASSERT(input->IsDoubleRegister());
+ LOperand* result = instr->result();
+ ASSERT(result->IsRegister());
+ Register result_reg = ToRegister(result);
+
+ Label bailout, done;
+ X87Register input_reg = ToX87Register(input);
+ X87Fxch(input_reg);
+ __ X87TOSToI(result_reg, instr->hydrogen()->GetMinusZeroMode(),
+ &bailout, Label::kNear);
+ __ jmp(&done, Label::kNear);
+ __ bind(&bailout);
+ DeoptimizeIf(no_condition, instr->environment());
+ __ bind(&done);
+
+ __ SmiTag(result_reg);
+ DeoptimizeIf(overflow, instr->environment());
+}
+
+
+void LCodeGen::DoCheckSmi(LCheckSmi* instr) {
+ LOperand* input = instr->value();
+ __ test(ToOperand(input), Immediate(kSmiTagMask));
+ DeoptimizeIf(not_zero, instr->environment());
+}
+
+
+void LCodeGen::DoCheckNonSmi(LCheckNonSmi* instr) {
+ if (!instr->hydrogen()->value()->type().IsHeapObject()) {
+ LOperand* input = instr->value();
+ __ test(ToOperand(input), Immediate(kSmiTagMask));
+ DeoptimizeIf(zero, instr->environment());
+ }
+}
+
+
+void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) {
+ Register input = ToRegister(instr->value());
+ Register temp = ToRegister(instr->temp());
+
+ __ mov(temp, FieldOperand(input, HeapObject::kMapOffset));
+
+ if (instr->hydrogen()->is_interval_check()) {
+ InstanceType first;
+ InstanceType last;
+ instr->hydrogen()->GetCheckInterval(&first, &last);
+
+ __ cmpb(FieldOperand(temp, Map::kInstanceTypeOffset),
+ static_cast<int8_t>(first));
+
+ // If there is only one type in the interval check for equality.
+ if (first == last) {
+ DeoptimizeIf(not_equal, instr->environment());
+ } else {
+ DeoptimizeIf(below, instr->environment());
+ // Omit check for the last type.
+ if (last != LAST_TYPE) {
+ __ cmpb(FieldOperand(temp, Map::kInstanceTypeOffset),
+ static_cast<int8_t>(last));
+ DeoptimizeIf(above, instr->environment());
+ }
+ }
+ } else {
+ uint8_t mask;
+ uint8_t tag;
+ instr->hydrogen()->GetCheckMaskAndTag(&mask, &tag);
+
+ if (IsPowerOf2(mask)) {
+ ASSERT(tag == 0 || IsPowerOf2(tag));
+ __ test_b(FieldOperand(temp, Map::kInstanceTypeOffset), mask);
+ DeoptimizeIf(tag == 0 ? not_zero : zero, instr->environment());
+ } else {
+ __ movzx_b(temp, FieldOperand(temp, Map::kInstanceTypeOffset));
+ __ and_(temp, mask);
+ __ cmp(temp, tag);
+ DeoptimizeIf(not_equal, instr->environment());
+ }
+ }
+}
+
+
+void LCodeGen::DoCheckValue(LCheckValue* instr) {
+ Handle<HeapObject> object = instr->hydrogen()->object().handle();
+ if (instr->hydrogen()->object_in_new_space()) {
+ Register reg = ToRegister(instr->value());
+ Handle<Cell> cell = isolate()->factory()->NewCell(object);
+ __ cmp(reg, Operand::ForCell(cell));
+ } else {
+ Operand operand = ToOperand(instr->value());
+ __ cmp(operand, object);
+ }
+ DeoptimizeIf(not_equal, instr->environment());
+}
+
+
+void LCodeGen::DoDeferredInstanceMigration(LCheckMaps* instr, Register object) {
+ {
+ PushSafepointRegistersScope scope(this);
+ __ push(object);
+ __ xor_(esi, esi);
+ __ CallRuntime(Runtime::kTryMigrateInstance);
+ RecordSafepointWithRegisters(
+ instr->pointer_map(), 1, Safepoint::kNoLazyDeopt);
+
+ __ test(eax, Immediate(kSmiTagMask));
+ }
+ DeoptimizeIf(zero, instr->environment());
+}
+
+
+void LCodeGen::DoCheckMaps(LCheckMaps* instr) {
+ class DeferredCheckMaps V8_FINAL : public LDeferredCode {
+ public:
+ DeferredCheckMaps(LCodeGen* codegen,
+ LCheckMaps* instr,
+ Register object,
+ const X87Stack& x87_stack)
+ : LDeferredCode(codegen, x87_stack), instr_(instr), object_(object) {
+ SetExit(check_maps());
+ }
+ virtual void Generate() V8_OVERRIDE {
+ codegen()->DoDeferredInstanceMigration(instr_, object_);
+ }
+ Label* check_maps() { return &check_maps_; }
+ virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
+ private:
+ LCheckMaps* instr_;
+ Label check_maps_;
+ Register object_;
+ };
+
+ if (instr->hydrogen()->IsStabilityCheck()) {
+ const UniqueSet<Map>* maps = instr->hydrogen()->maps();
+ for (int i = 0; i < maps->size(); ++i) {
+ AddStabilityDependency(maps->at(i).handle());
+ }
+ return;
+ }
+
+ LOperand* input = instr->value();
+ ASSERT(input->IsRegister());
+ Register reg = ToRegister(input);
+
+ DeferredCheckMaps* deferred = NULL;
+ if (instr->hydrogen()->HasMigrationTarget()) {
+ deferred = new(zone()) DeferredCheckMaps(this, instr, reg, x87_stack_);
+ __ bind(deferred->check_maps());
+ }
+
+ const UniqueSet<Map>* maps = instr->hydrogen()->maps();
+ Label success;
+ for (int i = 0; i < maps->size() - 1; i++) {
+ Handle<Map> map = maps->at(i).handle();
+ __ CompareMap(reg, map);
+ __ j(equal, &success, Label::kNear);
+ }
+
+ Handle<Map> map = maps->at(maps->size() - 1).handle();
+ __ CompareMap(reg, map);
+ if (instr->hydrogen()->HasMigrationTarget()) {
+ __ j(not_equal, deferred->entry());
+ } else {
+ DeoptimizeIf(not_equal, instr->environment());
+ }
+
+ __ bind(&success);
+}
+
+
+void LCodeGen::DoClampDToUint8(LClampDToUint8* instr) {
+ UNREACHABLE();
+}
+
+
+void LCodeGen::DoClampIToUint8(LClampIToUint8* instr) {
+ ASSERT(instr->unclamped()->Equals(instr->result()));
+ Register value_reg = ToRegister(instr->result());
+ __ ClampUint8(value_reg);
+}
+
+
+void LCodeGen::DoClampTToUint8NoSSE2(LClampTToUint8NoSSE2* instr) {
+ Register input_reg = ToRegister(instr->unclamped());
+ Register result_reg = ToRegister(instr->result());
+ Register scratch = ToRegister(instr->scratch());
+ Register scratch2 = ToRegister(instr->scratch2());
+ Register scratch3 = ToRegister(instr->scratch3());
+ Label is_smi, done, heap_number, valid_exponent,
+ largest_value, zero_result, maybe_nan_or_infinity;
+
+ __ JumpIfSmi(input_reg, &is_smi);
+
+ // Check for heap number
+ __ cmp(FieldOperand(input_reg, HeapObject::kMapOffset),
+ factory()->heap_number_map());
+ __ j(equal, &heap_number, Label::kNear);
+
+ // Check for undefined. Undefined is converted to zero for clamping
+ // conversions.
+ __ cmp(input_reg, factory()->undefined_value());
+ DeoptimizeIf(not_equal, instr->environment());
+ __ jmp(&zero_result, Label::kNear);
+
+ // Heap number
+ __ bind(&heap_number);
+
+ // Surprisingly, all of the hand-crafted bit-manipulations below are much
+ // faster than the x86 FPU built-in instruction, especially since "banker's
+ // rounding" would be additionally very expensive
+
+ // Get exponent word.
+ __ mov(scratch, FieldOperand(input_reg, HeapNumber::kExponentOffset));
+ __ mov(scratch3, FieldOperand(input_reg, HeapNumber::kMantissaOffset));
+
+ // Test for negative values --> clamp to zero
+ __ test(scratch, scratch);
+ __ j(negative, &zero_result, Label::kNear);
+
+ // Get exponent alone in scratch2.
+ __ mov(scratch2, scratch);
+ __ and_(scratch2, HeapNumber::kExponentMask);
+ __ shr(scratch2, HeapNumber::kExponentShift);
+ __ j(zero, &zero_result, Label::kNear);
+ __ sub(scratch2, Immediate(HeapNumber::kExponentBias - 1));
+ __ j(negative, &zero_result, Label::kNear);
+
+ const uint32_t non_int8_exponent = 7;
+ __ cmp(scratch2, Immediate(non_int8_exponent + 1));
+ // If the exponent is too big, check for special values.
+ __ j(greater, &maybe_nan_or_infinity, Label::kNear);
+
+ __ bind(&valid_exponent);
+ // Exponent word in scratch, exponent in scratch2. We know that 0 <= exponent
+ // < 7. The shift bias is the number of bits to shift the mantissa such that
+ // with an exponent of 7 such the that top-most one is in bit 30, allowing
+ // detection the rounding overflow of a 255.5 to 256 (bit 31 goes from 0 to
+ // 1).
+ int shift_bias = (30 - HeapNumber::kExponentShift) - 7 - 1;
+ __ lea(result_reg, MemOperand(scratch2, shift_bias));
+ // Here result_reg (ecx) is the shift, scratch is the exponent word. Get the
+ // top bits of the mantissa.
+ __ and_(scratch, HeapNumber::kMantissaMask);
+ // Put back the implicit 1 of the mantissa
+ __ or_(scratch, 1 << HeapNumber::kExponentShift);
+ // Shift up to round
+ __ shl_cl(scratch);
+ // Use "banker's rounding" to spec: If fractional part of number is 0.5, then
+ // use the bit in the "ones" place and add it to the "halves" place, which has
+ // the effect of rounding to even.
+ __ mov(scratch2, scratch);
+ const uint32_t one_half_bit_shift = 30 - sizeof(uint8_t) * 8;
+ const uint32_t one_bit_shift = one_half_bit_shift + 1;
+ __ and_(scratch2, Immediate((1 << one_bit_shift) - 1));
+ __ cmp(scratch2, Immediate(1 << one_half_bit_shift));
+ Label no_round;
+ __ j(less, &no_round, Label::kNear);
+ Label round_up;
+ __ mov(scratch2, Immediate(1 << one_half_bit_shift));
+ __ j(greater, &round_up, Label::kNear);
+ __ test(scratch3, scratch3);
+ __ j(not_zero, &round_up, Label::kNear);
+ __ mov(scratch2, scratch);
+ __ and_(scratch2, Immediate(1 << one_bit_shift));
+ __ shr(scratch2, 1);
+ __ bind(&round_up);
+ __ add(scratch, scratch2);
+ __ j(overflow, &largest_value, Label::kNear);
+ __ bind(&no_round);
+ __ shr(scratch, 23);
+ __ mov(result_reg, scratch);
+ __ jmp(&done, Label::kNear);
+
+ __ bind(&maybe_nan_or_infinity);
+ // Check for NaN/Infinity, all other values map to 255
+ __ cmp(scratch2, Immediate(HeapNumber::kInfinityOrNanExponent + 1));
+ __ j(not_equal, &largest_value, Label::kNear);
+
+ // Check for NaN, which differs from Infinity in that at least one mantissa
+ // bit is set.
+ __ and_(scratch, HeapNumber::kMantissaMask);
+ __ or_(scratch, FieldOperand(input_reg, HeapNumber::kMantissaOffset));
+ __ j(not_zero, &zero_result, Label::kNear); // M!=0 --> NaN
+ // Infinity -> Fall through to map to 255.
+
+ __ bind(&largest_value);
+ __ mov(result_reg, Immediate(255));
+ __ jmp(&done, Label::kNear);
+
+ __ bind(&zero_result);
+ __ xor_(result_reg, result_reg);
+ __ jmp(&done, Label::kNear);
+
+ // smi
+ __ bind(&is_smi);
+ if (!input_reg.is(result_reg)) {
+ __ mov(result_reg, input_reg);
+ }
+ __ SmiUntag(result_reg);
+ __ ClampUint8(result_reg);
+ __ bind(&done);
+}
+
+
+void LCodeGen::DoDoubleBits(LDoubleBits* instr) {
+ UNREACHABLE();
+}
+
+
+void LCodeGen::DoConstructDouble(LConstructDouble* instr) {
+ UNREACHABLE();
+}
+
+
+void LCodeGen::DoAllocate(LAllocate* instr) {
+ class DeferredAllocate V8_FINAL : public LDeferredCode {
+ public:
+ DeferredAllocate(LCodeGen* codegen,
+ LAllocate* instr,
+ const X87Stack& x87_stack)
+ : LDeferredCode(codegen, x87_stack), instr_(instr) { }
+ virtual void Generate() V8_OVERRIDE {
+ codegen()->DoDeferredAllocate(instr_);
+ }
+ virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
+ private:
+ LAllocate* instr_;
+ };
+
+ DeferredAllocate* deferred =
+ new(zone()) DeferredAllocate(this, instr, x87_stack_);
+
+ Register result = ToRegister(instr->result());
+ Register temp = ToRegister(instr->temp());
+
+ // Allocate memory for the object.
+ AllocationFlags flags = TAG_OBJECT;
+ if (instr->hydrogen()->MustAllocateDoubleAligned()) {
+ flags = static_cast<AllocationFlags>(flags | DOUBLE_ALIGNMENT);
+ }
+ if (instr->hydrogen()->IsOldPointerSpaceAllocation()) {
+ ASSERT(!instr->hydrogen()->IsOldDataSpaceAllocation());
+ ASSERT(!instr->hydrogen()->IsNewSpaceAllocation());
+ flags = static_cast<AllocationFlags>(flags | PRETENURE_OLD_POINTER_SPACE);
+ } else if (instr->hydrogen()->IsOldDataSpaceAllocation()) {
+ ASSERT(!instr->hydrogen()->IsNewSpaceAllocation());
+ flags = static_cast<AllocationFlags>(flags | PRETENURE_OLD_DATA_SPACE);
+ }
+
+ if (instr->size()->IsConstantOperand()) {
+ int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
+ if (size <= Page::kMaxRegularHeapObjectSize) {
+ __ Allocate(size, result, temp, no_reg, deferred->entry(), flags);
+ } else {
+ __ jmp(deferred->entry());
+ }
+ } else {
+ Register size = ToRegister(instr->size());
+ __ Allocate(size, result, temp, no_reg, deferred->entry(), flags);
+ }
+
+ __ bind(deferred->exit());
+
+ if (instr->hydrogen()->MustPrefillWithFiller()) {
+ if (instr->size()->IsConstantOperand()) {
+ int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
+ __ mov(temp, (size / kPointerSize) - 1);
+ } else {
+ temp = ToRegister(instr->size());
+ __ shr(temp, kPointerSizeLog2);
+ __ dec(temp);
+ }
+ Label loop;
+ __ bind(&loop);
+ __ mov(FieldOperand(result, temp, times_pointer_size, 0),
+ isolate()->factory()->one_pointer_filler_map());
+ __ dec(temp);
+ __ j(not_zero, &loop);
+ }
+}
+
+
+void LCodeGen::DoDeferredAllocate(LAllocate* instr) {
+ Register result = ToRegister(instr->result());
+
+ // TODO(3095996): Get rid of this. For now, we need to make the
+ // result register contain a valid pointer because it is already
+ // contained in the register pointer map.
+ __ Move(result, Immediate(Smi::FromInt(0)));
+
+ PushSafepointRegistersScope scope(this);
+ if (instr->size()->IsRegister()) {
+ Register size = ToRegister(instr->size());
+ ASSERT(!size.is(result));
+ __ SmiTag(ToRegister(instr->size()));
+ __ push(size);
+ } else {
+ int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
+ if (size >= 0 && size <= Smi::kMaxValue) {
+ __ push(Immediate(Smi::FromInt(size)));
+ } else {
+ // We should never get here at runtime => abort
+ __ int3();
+ return;
+ }
+ }
+
+ int flags = AllocateDoubleAlignFlag::encode(
+ instr->hydrogen()->MustAllocateDoubleAligned());
+ if (instr->hydrogen()->IsOldPointerSpaceAllocation()) {
+ ASSERT(!instr->hydrogen()->IsOldDataSpaceAllocation());
+ ASSERT(!instr->hydrogen()->IsNewSpaceAllocation());
+ flags = AllocateTargetSpace::update(flags, OLD_POINTER_SPACE);
+ } else if (instr->hydrogen()->IsOldDataSpaceAllocation()) {
+ ASSERT(!instr->hydrogen()->IsNewSpaceAllocation());
+ flags = AllocateTargetSpace::update(flags, OLD_DATA_SPACE);
+ } else {
+ flags = AllocateTargetSpace::update(flags, NEW_SPACE);
+ }
+ __ push(Immediate(Smi::FromInt(flags)));
+
+ CallRuntimeFromDeferred(
+ Runtime::kHiddenAllocateInTargetSpace, 2, instr, instr->context());
+ __ StoreToSafepointRegisterSlot(result, eax);
+}
+
+
+void LCodeGen::DoToFastProperties(LToFastProperties* instr) {
+ ASSERT(ToRegister(instr->value()).is(eax));
+ __ push(eax);
+ CallRuntime(Runtime::kToFastProperties, 1, instr);
+}
+
+
+void LCodeGen::DoRegExpLiteral(LRegExpLiteral* instr) {
+ ASSERT(ToRegister(instr->context()).is(esi));
+ Label materialized;
+ // Registers will be used as follows:
+ // ecx = literals array.
+ // ebx = regexp literal.
+ // eax = regexp literal clone.
+ // esi = context.
+ int literal_offset =
+ FixedArray::OffsetOfElementAt(instr->hydrogen()->literal_index());
+ __ LoadHeapObject(ecx, instr->hydrogen()->literals());
+ __ mov(ebx, FieldOperand(ecx, literal_offset));
+ __ cmp(ebx, factory()->undefined_value());
+ __ j(not_equal, &materialized, Label::kNear);
+
+ // Create regexp literal using runtime function
+ // Result will be in eax.
+ __ push(ecx);
+ __ push(Immediate(Smi::FromInt(instr->hydrogen()->literal_index())));
+ __ push(Immediate(instr->hydrogen()->pattern()));
+ __ push(Immediate(instr->hydrogen()->flags()));
+ CallRuntime(Runtime::kHiddenMaterializeRegExpLiteral, 4, instr);
+ __ mov(ebx, eax);
+
+ __ bind(&materialized);
+ int size = JSRegExp::kSize + JSRegExp::kInObjectFieldCount * kPointerSize;
+ Label allocated, runtime_allocate;
+ __ Allocate(size, eax, ecx, edx, &runtime_allocate, TAG_OBJECT);
+ __ jmp(&allocated, Label::kNear);
+
+ __ bind(&runtime_allocate);
+ __ push(ebx);
+ __ push(Immediate(Smi::FromInt(size)));
+ CallRuntime(Runtime::kHiddenAllocateInNewSpace, 1, instr);
+ __ pop(ebx);
+
+ __ bind(&allocated);
+ // Copy the content into the newly allocated memory.
+ // (Unroll copy loop once for better throughput).
+ for (int i = 0; i < size - kPointerSize; i += 2 * kPointerSize) {
+ __ mov(edx, FieldOperand(ebx, i));
+ __ mov(ecx, FieldOperand(ebx, i + kPointerSize));
+ __ mov(FieldOperand(eax, i), edx);
+ __ mov(FieldOperand(eax, i + kPointerSize), ecx);
+ }
+ if ((size % (2 * kPointerSize)) != 0) {
+ __ mov(edx, FieldOperand(ebx, size - kPointerSize));
+ __ mov(FieldOperand(eax, size - kPointerSize), edx);
+ }
+}
+
+
+void LCodeGen::DoFunctionLiteral(LFunctionLiteral* instr) {
+ ASSERT(ToRegister(instr->context()).is(esi));
+ // Use the fast case closure allocation code that allocates in new
+ // space for nested functions that don't need literals cloning.
+ bool pretenure = instr->hydrogen()->pretenure();
+ if (!pretenure && instr->hydrogen()->has_no_literals()) {
+ FastNewClosureStub stub(isolate(),
+ instr->hydrogen()->strict_mode(),
+ instr->hydrogen()->is_generator());
+ __ mov(ebx, Immediate(instr->hydrogen()->shared_info()));
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+ } else {
+ __ push(esi);
+ __ push(Immediate(instr->hydrogen()->shared_info()));
+ __ push(Immediate(pretenure ? factory()->true_value()
+ : factory()->false_value()));
+ CallRuntime(Runtime::kHiddenNewClosure, 3, instr);
+ }
+}
+
+
+void LCodeGen::DoTypeof(LTypeof* instr) {
+ ASSERT(ToRegister(instr->context()).is(esi));
+ LOperand* input = instr->value();
+ EmitPushTaggedOperand(input);
+ CallRuntime(Runtime::kTypeof, 1, instr);
+}
+
+
+void LCodeGen::DoTypeofIsAndBranch(LTypeofIsAndBranch* instr) {
+ Register input = ToRegister(instr->value());
+ Condition final_branch_condition = EmitTypeofIs(instr, input);
+ if (final_branch_condition != no_condition) {
+ EmitBranch(instr, final_branch_condition);
+ }
+}
+
+
+Condition LCodeGen::EmitTypeofIs(LTypeofIsAndBranch* instr, Register input) {
+ Label* true_label = instr->TrueLabel(chunk_);
+ Label* false_label = instr->FalseLabel(chunk_);
+ Handle<String> type_name = instr->type_literal();
+ int left_block = instr->TrueDestination(chunk_);
+ int right_block = instr->FalseDestination(chunk_);
+ int next_block = GetNextEmittedBlock();
+
+ Label::Distance true_distance = left_block == next_block ? Label::kNear
+ : Label::kFar;
+ Label::Distance false_distance = right_block == next_block ? Label::kNear
+ : Label::kFar;
+ Condition final_branch_condition = no_condition;
+ if (String::Equals(type_name, factory()->number_string())) {
+ __ JumpIfSmi(input, true_label, true_distance);
+ __ cmp(FieldOperand(input, HeapObject::kMapOffset),
+ factory()->heap_number_map());
+ final_branch_condition = equal;
+
+ } else if (String::Equals(type_name, factory()->string_string())) {
+ __ JumpIfSmi(input, false_label, false_distance);
+ __ CmpObjectType(input, FIRST_NONSTRING_TYPE, input);
+ __ j(above_equal, false_label, false_distance);
+ __ test_b(FieldOperand(input, Map::kBitFieldOffset),
+ 1 << Map::kIsUndetectable);
+ final_branch_condition = zero;
+
+ } else if (String::Equals(type_name, factory()->symbol_string())) {
+ __ JumpIfSmi(input, false_label, false_distance);
+ __ CmpObjectType(input, SYMBOL_TYPE, input);
+ final_branch_condition = equal;
+
+ } else if (String::Equals(type_name, factory()->boolean_string())) {
+ __ cmp(input, factory()->true_value());
+ __ j(equal, true_label, true_distance);
+ __ cmp(input, factory()->false_value());
+ final_branch_condition = equal;
+
+ } else if (FLAG_harmony_typeof &&
+ String::Equals(type_name, factory()->null_string())) {
+ __ cmp(input, factory()->null_value());
+ final_branch_condition = equal;
+
+ } else if (String::Equals(type_name, factory()->undefined_string())) {
+ __ cmp(input, factory()->undefined_value());
+ __ j(equal, true_label, true_distance);
+ __ JumpIfSmi(input, false_label, false_distance);
+ // Check for undetectable objects => true.
+ __ mov(input, FieldOperand(input, HeapObject::kMapOffset));
+ __ test_b(FieldOperand(input, Map::kBitFieldOffset),
+ 1 << Map::kIsUndetectable);
+ final_branch_condition = not_zero;
+
+ } else if (String::Equals(type_name, factory()->function_string())) {
+ STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
+ __ JumpIfSmi(input, false_label, false_distance);
+ __ CmpObjectType(input, JS_FUNCTION_TYPE, input);
+ __ j(equal, true_label, true_distance);
+ __ CmpInstanceType(input, JS_FUNCTION_PROXY_TYPE);
+ final_branch_condition = equal;
+
+ } else if (String::Equals(type_name, factory()->object_string())) {
+ __ JumpIfSmi(input, false_label, false_distance);
+ if (!FLAG_harmony_typeof) {
+ __ cmp(input, factory()->null_value());
+ __ j(equal, true_label, true_distance);
+ }
+ __ CmpObjectType(input, FIRST_NONCALLABLE_SPEC_OBJECT_TYPE, input);
+ __ j(below, false_label, false_distance);
+ __ CmpInstanceType(input, LAST_NONCALLABLE_SPEC_OBJECT_TYPE);
+ __ j(above, false_label, false_distance);
+ // Check for undetectable objects => false.
+ __ test_b(FieldOperand(input, Map::kBitFieldOffset),
+ 1 << Map::kIsUndetectable);
+ final_branch_condition = zero;
+
+ } else {
+ __ jmp(false_label, false_distance);
+ }
+ return final_branch_condition;
+}
+
+
+void LCodeGen::DoIsConstructCallAndBranch(LIsConstructCallAndBranch* instr) {
+ Register temp = ToRegister(instr->temp());
+
+ EmitIsConstructCall(temp);
+ EmitBranch(instr, equal);
+}
+
+
+void LCodeGen::EmitIsConstructCall(Register temp) {
+ // Get the frame pointer for the calling frame.
+ __ mov(temp, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
+
+ // Skip the arguments adaptor frame if it exists.
+ Label check_frame_marker;
+ __ cmp(Operand(temp, StandardFrameConstants::kContextOffset),
+ Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+ __ j(not_equal, &check_frame_marker, Label::kNear);
+ __ mov(temp, Operand(temp, StandardFrameConstants::kCallerFPOffset));
+
+ // Check the marker in the calling frame.
+ __ bind(&check_frame_marker);
+ __ cmp(Operand(temp, StandardFrameConstants::kMarkerOffset),
+ Immediate(Smi::FromInt(StackFrame::CONSTRUCT)));
+}
+
+
+void LCodeGen::EnsureSpaceForLazyDeopt(int space_needed) {
+ if (!info()->IsStub()) {
+ // Ensure that we have enough space after the previous lazy-bailout
+ // instruction for patching the code here.
+ int current_pc = masm()->pc_offset();
+ if (current_pc < last_lazy_deopt_pc_ + space_needed) {
+ int padding_size = last_lazy_deopt_pc_ + space_needed - current_pc;
+ __ Nop(padding_size);
+ }
+ }
+ last_lazy_deopt_pc_ = masm()->pc_offset();
+}
+
+
+void LCodeGen::DoLazyBailout(LLazyBailout* instr) {
+ last_lazy_deopt_pc_ = masm()->pc_offset();
+ ASSERT(instr->HasEnvironment());
+ LEnvironment* env = instr->environment();
+ RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
+ safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
+}
+
+
+void LCodeGen::DoDeoptimize(LDeoptimize* instr) {
+ Deoptimizer::BailoutType type = instr->hydrogen()->type();
+ // TODO(danno): Stubs expect all deopts to be lazy for historical reasons (the
+ // needed return address), even though the implementation of LAZY and EAGER is
+ // now identical. When LAZY is eventually completely folded into EAGER, remove
+ // the special case below.
+ if (info()->IsStub() && type == Deoptimizer::EAGER) {
+ type = Deoptimizer::LAZY;
+ }
+ Comment(";;; deoptimize: %s", instr->hydrogen()->reason());
+ DeoptimizeIf(no_condition, instr->environment(), type);
+}
+
+
+void LCodeGen::DoDummy(LDummy* instr) {
+ // Nothing to see here, move on!
+}
+
+
+void LCodeGen::DoDummyUse(LDummyUse* instr) {
+ // Nothing to see here, move on!
+}
+
+
+void LCodeGen::DoDeferredStackCheck(LStackCheck* instr) {
+ PushSafepointRegistersScope scope(this);
+ __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
+ __ CallRuntime(Runtime::kHiddenStackGuard);
+ RecordSafepointWithLazyDeopt(
+ instr, RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
+ ASSERT(instr->HasEnvironment());
+ LEnvironment* env = instr->environment();
+ safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
+}
+
+
+void LCodeGen::DoStackCheck(LStackCheck* instr) {
+ class DeferredStackCheck V8_FINAL : public LDeferredCode {
+ public:
+ DeferredStackCheck(LCodeGen* codegen,
+ LStackCheck* instr,
+ const X87Stack& x87_stack)
+ : LDeferredCode(codegen, x87_stack), instr_(instr) { }
+ virtual void Generate() V8_OVERRIDE {
+ codegen()->DoDeferredStackCheck(instr_);
+ }
+ virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
+ private:
+ LStackCheck* instr_;
+ };
+
+ ASSERT(instr->HasEnvironment());
+ LEnvironment* env = instr->environment();
+ // There is no LLazyBailout instruction for stack-checks. We have to
+ // prepare for lazy deoptimization explicitly here.
+ if (instr->hydrogen()->is_function_entry()) {
+ // Perform stack overflow check.
+ Label done;
+ ExternalReference stack_limit =
+ ExternalReference::address_of_stack_limit(isolate());
+ __ cmp(esp, Operand::StaticVariable(stack_limit));
+ __ j(above_equal, &done, Label::kNear);
+
+ ASSERT(instr->context()->IsRegister());
+ ASSERT(ToRegister(instr->context()).is(esi));
+ CallCode(isolate()->builtins()->StackCheck(),
+ RelocInfo::CODE_TARGET,
+ instr);
+ __ bind(&done);
+ } else {
+ ASSERT(instr->hydrogen()->is_backwards_branch());
+ // Perform stack overflow check if this goto needs it before jumping.
+ DeferredStackCheck* deferred_stack_check =
+ new(zone()) DeferredStackCheck(this, instr, x87_stack_);
+ ExternalReference stack_limit =
+ ExternalReference::address_of_stack_limit(isolate());
+ __ cmp(esp, Operand::StaticVariable(stack_limit));
+ __ j(below, deferred_stack_check->entry());
+ EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
+ __ bind(instr->done_label());
+ deferred_stack_check->SetExit(instr->done_label());
+ RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
+ // Don't record a deoptimization index for the safepoint here.
+ // This will be done explicitly when emitting call and the safepoint in
+ // the deferred code.
+ }
+}
+
+
+void LCodeGen::DoOsrEntry(LOsrEntry* instr) {
+ // This is a pseudo-instruction that ensures that the environment here is
+ // properly registered for deoptimization and records the assembler's PC
+ // offset.
+ LEnvironment* environment = instr->environment();
+
+ // If the environment were already registered, we would have no way of
+ // backpatching it with the spill slot operands.
+ ASSERT(!environment->HasBeenRegistered());
+ RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
+
+ GenerateOsrPrologue();
+}
+
+
+void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) {
+ ASSERT(ToRegister(instr->context()).is(esi));
+ __ cmp(eax, isolate()->factory()->undefined_value());
+ DeoptimizeIf(equal, instr->environment());
+
+ __ cmp(eax, isolate()->factory()->null_value());
+ DeoptimizeIf(equal, instr->environment());
+
+ __ test(eax, Immediate(kSmiTagMask));
+ DeoptimizeIf(zero, instr->environment());
+
+ STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE);
+ __ CmpObjectType(eax, LAST_JS_PROXY_TYPE, ecx);
+ DeoptimizeIf(below_equal, instr->environment());
+
+ Label use_cache, call_runtime;
+ __ CheckEnumCache(&call_runtime);
+
+ __ mov(eax, FieldOperand(eax, HeapObject::kMapOffset));
+ __ jmp(&use_cache, Label::kNear);
+
+ // Get the set of properties to enumerate.
+ __ bind(&call_runtime);
+ __ push(eax);
+ CallRuntime(Runtime::kGetPropertyNamesFast, 1, instr);
+
+ __ cmp(FieldOperand(eax, HeapObject::kMapOffset),
+ isolate()->factory()->meta_map());
+ DeoptimizeIf(not_equal, instr->environment());
+ __ bind(&use_cache);
+}
+
+
+void LCodeGen::DoForInCacheArray(LForInCacheArray* instr) {
+ Register map = ToRegister(instr->map());
+ Register result = ToRegister(instr->result());
+ Label load_cache, done;
+ __ EnumLength(result, map);
+ __ cmp(result, Immediate(Smi::FromInt(0)));
+ __ j(not_equal, &load_cache, Label::kNear);
+ __ mov(result, isolate()->factory()->empty_fixed_array());
+ __ jmp(&done, Label::kNear);
+
+ __ bind(&load_cache);
+ __ LoadInstanceDescriptors(map, result);
+ __ mov(result,
+ FieldOperand(result, DescriptorArray::kEnumCacheOffset));
+ __ mov(result,
+ FieldOperand(result, FixedArray::SizeFor(instr->idx())));
+ __ bind(&done);
+ __ test(result, result);
+ DeoptimizeIf(equal, instr->environment());
+}
+
+
+void LCodeGen::DoCheckMapValue(LCheckMapValue* instr) {
+ Register object = ToRegister(instr->value());
+ __ cmp(ToRegister(instr->map()),
+ FieldOperand(object, HeapObject::kMapOffset));
+ DeoptimizeIf(not_equal, instr->environment());
+}
+
+
+void LCodeGen::DoDeferredLoadMutableDouble(LLoadFieldByIndex* instr,
+ Register object,
+ Register index) {
+ PushSafepointRegistersScope scope(this);
+ __ push(object);
+ __ push(index);
+ __ xor_(esi, esi);
+ __ CallRuntime(Runtime::kLoadMutableDouble);
+ RecordSafepointWithRegisters(
+ instr->pointer_map(), 2, Safepoint::kNoLazyDeopt);
+ __ StoreToSafepointRegisterSlot(object, eax);
+}
+
+
+void LCodeGen::DoLoadFieldByIndex(LLoadFieldByIndex* instr) {
+ class DeferredLoadMutableDouble V8_FINAL : public LDeferredCode {
+ public:
+ DeferredLoadMutableDouble(LCodeGen* codegen,
+ LLoadFieldByIndex* instr,
+ Register object,
+ Register index,
+ const X87Stack& x87_stack)
+ : LDeferredCode(codegen, x87_stack),
+ instr_(instr),
+ object_(object),
+ index_(index) {
+ }
+ virtual void Generate() V8_OVERRIDE {
+ codegen()->DoDeferredLoadMutableDouble(instr_, object_, index_);
+ }
+ virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
+ private:
+ LLoadFieldByIndex* instr_;
+ Register object_;
+ Register index_;
+ };
+
+ Register object = ToRegister(instr->object());
+ Register index = ToRegister(instr->index());
+
+ DeferredLoadMutableDouble* deferred;
+ deferred = new(zone()) DeferredLoadMutableDouble(
+ this, instr, object, index, x87_stack_);
+
+ Label out_of_object, done;
+ __ test(index, Immediate(Smi::FromInt(1)));
+ __ j(not_zero, deferred->entry());
+
+ __ sar(index, 1);
+
+ __ cmp(index, Immediate(0));
+ __ j(less, &out_of_object, Label::kNear);
+ __ mov(object, FieldOperand(object,
+ index,
+ times_half_pointer_size,
+ JSObject::kHeaderSize));
+ __ jmp(&done, Label::kNear);
+
+ __ bind(&out_of_object);
+ __ mov(object, FieldOperand(object, JSObject::kPropertiesOffset));
+ __ neg(index);
+ // Index is now equal to out of object property index plus 1.
+ __ mov(object, FieldOperand(object,
+ index,
+ times_half_pointer_size,
+ FixedArray::kHeaderSize - kPointerSize));
+ __ bind(deferred->exit());
+ __ bind(&done);
+}
+
+
+void LCodeGen::DoStoreFrameContext(LStoreFrameContext* instr) {
+ Register context = ToRegister(instr->context());
+ __ mov(Operand(ebp, StandardFrameConstants::kContextOffset), context);
+}
+
+
+void LCodeGen::DoAllocateBlockContext(LAllocateBlockContext* instr) {
+ Handle<ScopeInfo> scope_info = instr->scope_info();
+ __ Push(scope_info);
+ __ push(ToRegister(instr->function()));
+ CallRuntime(Runtime::kHiddenPushBlockContext, 2, instr);
+ RecordSafepoint(Safepoint::kNoLazyDeopt);
+}
+
+
+#undef __
+
+} } // namespace v8::internal
+
+#endif // V8_TARGET_ARCH_X87
diff --git a/chromium/v8/src/x87/lithium-codegen-x87.h b/chromium/v8/src/x87/lithium-codegen-x87.h
new file mode 100644
index 00000000000..a84b49ce5ff
--- /dev/null
+++ b/chromium/v8/src/x87/lithium-codegen-x87.h
@@ -0,0 +1,504 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_X87_LITHIUM_CODEGEN_X87_H_
+#define V8_X87_LITHIUM_CODEGEN_X87_H_
+
+#include "src/x87/lithium-x87.h"
+
+#include "src/checks.h"
+#include "src/deoptimizer.h"
+#include "src/x87/lithium-gap-resolver-x87.h"
+#include "src/lithium-codegen.h"
+#include "src/safepoint-table.h"
+#include "src/scopes.h"
+#include "src/utils.h"
+
+namespace v8 {
+namespace internal {
+
+// Forward declarations.
+class LDeferredCode;
+class LGapNode;
+class SafepointGenerator;
+
+class LCodeGen: public LCodeGenBase {
+ public:
+ LCodeGen(LChunk* chunk, MacroAssembler* assembler, CompilationInfo* info)
+ : LCodeGenBase(chunk, assembler, info),
+ deoptimizations_(4, info->zone()),
+ jump_table_(4, info->zone()),
+ deoptimization_literals_(8, info->zone()),
+ inlined_function_count_(0),
+ scope_(info->scope()),
+ translations_(info->zone()),
+ deferred_(8, info->zone()),
+ dynamic_frame_alignment_(false),
+ support_aligned_spilled_doubles_(false),
+ osr_pc_offset_(-1),
+ frame_is_built_(false),
+ x87_stack_(assembler),
+ safepoints_(info->zone()),
+ resolver_(this),
+ expected_safepoint_kind_(Safepoint::kSimple) {
+ PopulateDeoptimizationLiteralsWithInlinedFunctions();
+ }
+
+ int LookupDestination(int block_id) const {
+ return chunk()->LookupDestination(block_id);
+ }
+
+ bool IsNextEmittedBlock(int block_id) const {
+ return LookupDestination(block_id) == GetNextEmittedBlock();
+ }
+
+ bool NeedsEagerFrame() const {
+ return GetStackSlotCount() > 0 ||
+ info()->is_non_deferred_calling() ||
+ !info()->IsStub() ||
+ info()->requires_frame();
+ }
+ bool NeedsDeferredFrame() const {
+ return !NeedsEagerFrame() && info()->is_deferred_calling();
+ }
+
+ // Support for converting LOperands to assembler types.
+ Operand ToOperand(LOperand* op) const;
+ Register ToRegister(LOperand* op) const;
+ X87Register ToX87Register(LOperand* op) const;
+
+ bool IsInteger32(LConstantOperand* op) const;
+ bool IsSmi(LConstantOperand* op) const;
+ Immediate ToImmediate(LOperand* op, const Representation& r) const {
+ return Immediate(ToRepresentation(LConstantOperand::cast(op), r));
+ }
+ double ToDouble(LConstantOperand* op) const;
+
+ // Support for non-sse2 (x87) floating point stack handling.
+ // These functions maintain the mapping of physical stack registers to our
+ // virtual registers between instructions.
+ enum X87OperandType { kX87DoubleOperand, kX87FloatOperand, kX87IntOperand };
+
+ void X87Mov(X87Register reg, Operand src,
+ X87OperandType operand = kX87DoubleOperand);
+ void X87Mov(Operand src, X87Register reg,
+ X87OperandType operand = kX87DoubleOperand);
+
+ void X87PrepareBinaryOp(
+ X87Register left, X87Register right, X87Register result);
+
+ void X87LoadForUsage(X87Register reg);
+ void X87LoadForUsage(X87Register reg1, X87Register reg2);
+ void X87PrepareToWrite(X87Register reg) { x87_stack_.PrepareToWrite(reg); }
+ void X87CommitWrite(X87Register reg) { x87_stack_.CommitWrite(reg); }
+
+ void X87Fxch(X87Register reg, int other_slot = 0) {
+ x87_stack_.Fxch(reg, other_slot);
+ }
+ void X87Free(X87Register reg) {
+ x87_stack_.Free(reg);
+ }
+
+
+ bool X87StackEmpty() {
+ return x87_stack_.depth() == 0;
+ }
+
+ Handle<Object> ToHandle(LConstantOperand* op) const;
+
+ // The operand denoting the second word (the one with a higher address) of
+ // a double stack slot.
+ Operand HighOperand(LOperand* op);
+
+ // Try to generate code for the entire chunk, but it may fail if the
+ // chunk contains constructs we cannot handle. Returns true if the
+ // code generation attempt succeeded.
+ bool GenerateCode();
+
+ // Finish the code by setting stack height, safepoint, and bailout
+ // information on it.
+ void FinishCode(Handle<Code> code);
+
+ // Deferred code support.
+ void DoDeferredNumberTagD(LNumberTagD* instr);
+
+ enum IntegerSignedness { SIGNED_INT32, UNSIGNED_INT32 };
+ void DoDeferredNumberTagIU(LInstruction* instr,
+ LOperand* value,
+ LOperand* temp,
+ IntegerSignedness signedness);
+
+ void DoDeferredTaggedToI(LTaggedToI* instr, Label* done);
+ void DoDeferredMathAbsTaggedHeapNumber(LMathAbs* instr);
+ void DoDeferredStackCheck(LStackCheck* instr);
+ void DoDeferredStringCharCodeAt(LStringCharCodeAt* instr);
+ void DoDeferredStringCharFromCode(LStringCharFromCode* instr);
+ void DoDeferredAllocate(LAllocate* instr);
+ void DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
+ Label* map_check);
+ void DoDeferredInstanceMigration(LCheckMaps* instr, Register object);
+ void DoDeferredLoadMutableDouble(LLoadFieldByIndex* instr,
+ Register object,
+ Register index);
+
+ // Parallel move support.
+ void DoParallelMove(LParallelMove* move);
+ void DoGap(LGap* instr);
+
+ // Emit frame translation commands for an environment.
+ void WriteTranslation(LEnvironment* environment, Translation* translation);
+
+ void EnsureRelocSpaceForDeoptimization();
+
+ // Declare methods that deal with the individual node types.
+#define DECLARE_DO(type) void Do##type(L##type* node);
+ LITHIUM_CONCRETE_INSTRUCTION_LIST(DECLARE_DO)
+#undef DECLARE_DO
+
+ private:
+ StrictMode strict_mode() const { return info()->strict_mode(); }
+
+ Scope* scope() const { return scope_; }
+
+ void EmitClassOfTest(Label* if_true,
+ Label* if_false,
+ Handle<String> class_name,
+ Register input,
+ Register temporary,
+ Register temporary2);
+
+ int GetStackSlotCount() const { return chunk()->spill_slot_count(); }
+
+ void AddDeferredCode(LDeferredCode* code) { deferred_.Add(code, zone()); }
+
+ // Code generation passes. Returns true if code generation should
+ // continue.
+ void GenerateBodyInstructionPre(LInstruction* instr) V8_OVERRIDE;
+ void GenerateBodyInstructionPost(LInstruction* instr) V8_OVERRIDE;
+ bool GeneratePrologue();
+ bool GenerateDeferredCode();
+ bool GenerateJumpTable();
+ bool GenerateSafepointTable();
+
+ // Generates the custom OSR entrypoint and sets the osr_pc_offset.
+ void GenerateOsrPrologue();
+
+ enum SafepointMode {
+ RECORD_SIMPLE_SAFEPOINT,
+ RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS
+ };
+
+ void CallCode(Handle<Code> code,
+ RelocInfo::Mode mode,
+ LInstruction* instr);
+
+ void CallCodeGeneric(Handle<Code> code,
+ RelocInfo::Mode mode,
+ LInstruction* instr,
+ SafepointMode safepoint_mode);
+
+ void CallRuntime(const Runtime::Function* fun,
+ int argc,
+ LInstruction* instr);
+
+ void CallRuntime(Runtime::FunctionId id,
+ int argc,
+ LInstruction* instr) {
+ const Runtime::Function* function = Runtime::FunctionForId(id);
+ CallRuntime(function, argc, instr);
+ }
+
+ void CallRuntimeFromDeferred(Runtime::FunctionId id,
+ int argc,
+ LInstruction* instr,
+ LOperand* context);
+
+ void LoadContextFromDeferred(LOperand* context);
+
+ enum EDIState {
+ EDI_UNINITIALIZED,
+ EDI_CONTAINS_TARGET
+ };
+
+ // Generate a direct call to a known function. Expects the function
+ // to be in edi.
+ void CallKnownFunction(Handle<JSFunction> function,
+ int formal_parameter_count,
+ int arity,
+ LInstruction* instr,
+ EDIState edi_state);
+
+ void RecordSafepointWithLazyDeopt(LInstruction* instr,
+ SafepointMode safepoint_mode);
+
+ void RegisterEnvironmentForDeoptimization(LEnvironment* environment,
+ Safepoint::DeoptMode mode);
+ void DeoptimizeIf(Condition cc,
+ LEnvironment* environment,
+ Deoptimizer::BailoutType bailout_type);
+ void DeoptimizeIf(Condition cc, LEnvironment* environment);
+
+ bool DeoptEveryNTimes() {
+ return FLAG_deopt_every_n_times != 0 && !info()->IsStub();
+ }
+
+ void AddToTranslation(LEnvironment* environment,
+ Translation* translation,
+ LOperand* op,
+ bool is_tagged,
+ bool is_uint32,
+ int* object_index_pointer,
+ int* dematerialized_index_pointer);
+ void PopulateDeoptimizationData(Handle<Code> code);
+ int DefineDeoptimizationLiteral(Handle<Object> literal);
+
+ void PopulateDeoptimizationLiteralsWithInlinedFunctions();
+
+ Register ToRegister(int index) const;
+ X87Register ToX87Register(int index) const;
+ int32_t ToRepresentation(LConstantOperand* op, const Representation& r) const;
+ int32_t ToInteger32(LConstantOperand* op) const;
+ ExternalReference ToExternalReference(LConstantOperand* op) const;
+
+ Operand BuildFastArrayOperand(LOperand* elements_pointer,
+ LOperand* key,
+ Representation key_representation,
+ ElementsKind elements_kind,
+ uint32_t base_offset);
+
+ Operand BuildSeqStringOperand(Register string,
+ LOperand* index,
+ String::Encoding encoding);
+
+ void EmitIntegerMathAbs(LMathAbs* instr);
+
+ // Support for recording safepoint and position information.
+ void RecordSafepoint(LPointerMap* pointers,
+ Safepoint::Kind kind,
+ int arguments,
+ Safepoint::DeoptMode mode);
+ void RecordSafepoint(LPointerMap* pointers, Safepoint::DeoptMode mode);
+ void RecordSafepoint(Safepoint::DeoptMode mode);
+ void RecordSafepointWithRegisters(LPointerMap* pointers,
+ int arguments,
+ Safepoint::DeoptMode mode);
+
+ void RecordAndWritePosition(int position) V8_OVERRIDE;
+
+ static Condition TokenToCondition(Token::Value op, bool is_unsigned);
+ void EmitGoto(int block);
+
+ // EmitBranch expects to be the last instruction of a block.
+ template<class InstrType>
+ void EmitBranch(InstrType instr, Condition cc);
+ template<class InstrType>
+ void EmitFalseBranch(InstrType instr, Condition cc);
+ void EmitNumberUntagDNoSSE2(
+ Register input,
+ Register temp,
+ X87Register res_reg,
+ bool allow_undefined_as_nan,
+ bool deoptimize_on_minus_zero,
+ LEnvironment* env,
+ NumberUntagDMode mode = NUMBER_CANDIDATE_IS_ANY_TAGGED);
+
+ // Emits optimized code for typeof x == "y". Modifies input register.
+ // Returns the condition on which a final split to
+ // true and false label should be made, to optimize fallthrough.
+ Condition EmitTypeofIs(LTypeofIsAndBranch* instr, Register input);
+
+ // Emits optimized code for %_IsObject(x). Preserves input register.
+ // Returns the condition on which a final split to
+ // true and false label should be made, to optimize fallthrough.
+ Condition EmitIsObject(Register input,
+ Register temp1,
+ Label* is_not_object,
+ Label* is_object);
+
+ // Emits optimized code for %_IsString(x). Preserves input register.
+ // Returns the condition on which a final split to
+ // true and false label should be made, to optimize fallthrough.
+ Condition EmitIsString(Register input,
+ Register temp1,
+ Label* is_not_string,
+ SmiCheck check_needed);
+
+ // Emits optimized code for %_IsConstructCall().
+ // Caller should branch on equal condition.
+ void EmitIsConstructCall(Register temp);
+
+ // Emits optimized code to deep-copy the contents of statically known
+ // object graphs (e.g. object literal boilerplate).
+ void EmitDeepCopy(Handle<JSObject> object,
+ Register result,
+ Register source,
+ int* offset,
+ AllocationSiteMode mode);
+
+ void EnsureSpaceForLazyDeopt(int space_needed) V8_OVERRIDE;
+ void DoLoadKeyedExternalArray(LLoadKeyed* instr);
+ void DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr);
+ void DoLoadKeyedFixedArray(LLoadKeyed* instr);
+ void DoStoreKeyedExternalArray(LStoreKeyed* instr);
+ void DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr);
+ void DoStoreKeyedFixedArray(LStoreKeyed* instr);
+
+ void EmitReturn(LReturn* instr, bool dynamic_frame_alignment);
+
+ // Emits code for pushing either a tagged constant, a (non-double)
+ // register, or a stack slot operand.
+ void EmitPushTaggedOperand(LOperand* operand);
+
+ void X87Fld(Operand src, X87OperandType opts);
+
+ void EmitFlushX87ForDeopt();
+ void FlushX87StackIfNecessary(LInstruction* instr) {
+ x87_stack_.FlushIfNecessary(instr, this);
+ }
+ friend class LGapResolver;
+
+#ifdef _MSC_VER
+ // On windows, you may not access the stack more than one page below
+ // the most recently mapped page. To make the allocated area randomly
+ // accessible, we write an arbitrary value to each page in range
+ // esp + offset - page_size .. esp in turn.
+ void MakeSureStackPagesMapped(int offset);
+#endif
+
+ ZoneList<LEnvironment*> deoptimizations_;
+ ZoneList<Deoptimizer::JumpTableEntry> jump_table_;
+ ZoneList<Handle<Object> > deoptimization_literals_;
+ int inlined_function_count_;
+ Scope* const scope_;
+ TranslationBuffer translations_;
+ ZoneList<LDeferredCode*> deferred_;
+ bool dynamic_frame_alignment_;
+ bool support_aligned_spilled_doubles_;
+ int osr_pc_offset_;
+ bool frame_is_built_;
+
+ class X87Stack {
+ public:
+ explicit X87Stack(MacroAssembler* masm)
+ : stack_depth_(0), is_mutable_(true), masm_(masm) { }
+ explicit X87Stack(const X87Stack& other)
+ : stack_depth_(other.stack_depth_), is_mutable_(false), masm_(masm()) {
+ for (int i = 0; i < stack_depth_; i++) {
+ stack_[i] = other.stack_[i];
+ }
+ }
+ bool operator==(const X87Stack& other) const {
+ if (stack_depth_ != other.stack_depth_) return false;
+ for (int i = 0; i < stack_depth_; i++) {
+ if (!stack_[i].is(other.stack_[i])) return false;
+ }
+ return true;
+ }
+ bool Contains(X87Register reg);
+ void Fxch(X87Register reg, int other_slot = 0);
+ void Free(X87Register reg);
+ void PrepareToWrite(X87Register reg);
+ void CommitWrite(X87Register reg);
+ void FlushIfNecessary(LInstruction* instr, LCodeGen* cgen);
+ void LeavingBlock(int current_block_id, LGoto* goto_instr);
+ int depth() const { return stack_depth_; }
+ void pop() {
+ ASSERT(is_mutable_);
+ stack_depth_--;
+ }
+ void push(X87Register reg) {
+ ASSERT(is_mutable_);
+ ASSERT(stack_depth_ < X87Register::kMaxNumAllocatableRegisters);
+ stack_[stack_depth_] = reg;
+ stack_depth_++;
+ }
+
+ MacroAssembler* masm() const { return masm_; }
+ Isolate* isolate() const { return masm_->isolate(); }
+
+ private:
+ int ArrayIndex(X87Register reg);
+ int st2idx(int pos);
+
+ X87Register stack_[X87Register::kMaxNumAllocatableRegisters];
+ int stack_depth_;
+ bool is_mutable_;
+ MacroAssembler* masm_;
+ };
+ X87Stack x87_stack_;
+
+ // Builder that keeps track of safepoints in the code. The table
+ // itself is emitted at the end of the generated code.
+ SafepointTableBuilder safepoints_;
+
+ // Compiler from a set of parallel moves to a sequential list of moves.
+ LGapResolver resolver_;
+
+ Safepoint::Kind expected_safepoint_kind_;
+
+ class PushSafepointRegistersScope V8_FINAL BASE_EMBEDDED {
+ public:
+ explicit PushSafepointRegistersScope(LCodeGen* codegen)
+ : codegen_(codegen) {
+ ASSERT(codegen_->expected_safepoint_kind_ == Safepoint::kSimple);
+ codegen_->masm_->PushSafepointRegisters();
+ codegen_->expected_safepoint_kind_ = Safepoint::kWithRegisters;
+ ASSERT(codegen_->info()->is_calling());
+ }
+
+ ~PushSafepointRegistersScope() {
+ ASSERT(codegen_->expected_safepoint_kind_ == Safepoint::kWithRegisters);
+ codegen_->masm_->PopSafepointRegisters();
+ codegen_->expected_safepoint_kind_ = Safepoint::kSimple;
+ }
+
+ private:
+ LCodeGen* codegen_;
+ };
+
+ friend class LDeferredCode;
+ friend class LEnvironment;
+ friend class SafepointGenerator;
+ DISALLOW_COPY_AND_ASSIGN(LCodeGen);
+};
+
+
+class LDeferredCode : public ZoneObject {
+ public:
+ explicit LDeferredCode(LCodeGen* codegen, const LCodeGen::X87Stack& x87_stack)
+ : codegen_(codegen),
+ external_exit_(NULL),
+ instruction_index_(codegen->current_instruction_),
+ x87_stack_(x87_stack) {
+ codegen->AddDeferredCode(this);
+ }
+
+ virtual ~LDeferredCode() {}
+ virtual void Generate() = 0;
+ virtual LInstruction* instr() = 0;
+
+ void SetExit(Label* exit) { external_exit_ = exit; }
+ Label* entry() { return &entry_; }
+ Label* exit() { return external_exit_ != NULL ? external_exit_ : &exit_; }
+ Label* done() { return codegen_->NeedsDeferredFrame() ? &done_ : exit(); }
+ int instruction_index() const { return instruction_index_; }
+ const LCodeGen::X87Stack& x87_stack() const { return x87_stack_; }
+
+ protected:
+ LCodeGen* codegen() const { return codegen_; }
+ MacroAssembler* masm() const { return codegen_->masm(); }
+
+ private:
+ LCodeGen* codegen_;
+ Label entry_;
+ Label exit_;
+ Label* external_exit_;
+ Label done_;
+ int instruction_index_;
+ LCodeGen::X87Stack x87_stack_;
+};
+
+} } // namespace v8::internal
+
+#endif // V8_X87_LITHIUM_CODEGEN_X87_H_
diff --git a/chromium/v8/src/x87/lithium-gap-resolver-x87.cc b/chromium/v8/src/x87/lithium-gap-resolver-x87.cc
new file mode 100644
index 00000000000..9ebfa3a22c2
--- /dev/null
+++ b/chromium/v8/src/x87/lithium-gap-resolver-x87.cc
@@ -0,0 +1,445 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#if V8_TARGET_ARCH_X87
+
+#include "src/x87/lithium-gap-resolver-x87.h"
+#include "src/x87/lithium-codegen-x87.h"
+
+namespace v8 {
+namespace internal {
+
+LGapResolver::LGapResolver(LCodeGen* owner)
+ : cgen_(owner),
+ moves_(32, owner->zone()),
+ source_uses_(),
+ destination_uses_(),
+ spilled_register_(-1) {}
+
+
+void LGapResolver::Resolve(LParallelMove* parallel_move) {
+ ASSERT(HasBeenReset());
+ // Build up a worklist of moves.
+ BuildInitialMoveList(parallel_move);
+
+ for (int i = 0; i < moves_.length(); ++i) {
+ LMoveOperands move = moves_[i];
+ // Skip constants to perform them last. They don't block other moves
+ // and skipping such moves with register destinations keeps those
+ // registers free for the whole algorithm.
+ if (!move.IsEliminated() && !move.source()->IsConstantOperand()) {
+ PerformMove(i);
+ }
+ }
+
+ // Perform the moves with constant sources.
+ for (int i = 0; i < moves_.length(); ++i) {
+ if (!moves_[i].IsEliminated()) {
+ ASSERT(moves_[i].source()->IsConstantOperand());
+ EmitMove(i);
+ }
+ }
+
+ Finish();
+ ASSERT(HasBeenReset());
+}
+
+
+void LGapResolver::BuildInitialMoveList(LParallelMove* parallel_move) {
+ // Perform a linear sweep of the moves to add them to the initial list of
+ // moves to perform, ignoring any move that is redundant (the source is
+ // the same as the destination, the destination is ignored and
+ // unallocated, or the move was already eliminated).
+ const ZoneList<LMoveOperands>* moves = parallel_move->move_operands();
+ for (int i = 0; i < moves->length(); ++i) {
+ LMoveOperands move = moves->at(i);
+ if (!move.IsRedundant()) AddMove(move);
+ }
+ Verify();
+}
+
+
+void LGapResolver::PerformMove(int index) {
+ // Each call to this function performs a move and deletes it from the move
+ // graph. We first recursively perform any move blocking this one. We
+ // mark a move as "pending" on entry to PerformMove in order to detect
+ // cycles in the move graph. We use operand swaps to resolve cycles,
+ // which means that a call to PerformMove could change any source operand
+ // in the move graph.
+
+ ASSERT(!moves_[index].IsPending());
+ ASSERT(!moves_[index].IsRedundant());
+
+ // Clear this move's destination to indicate a pending move. The actual
+ // destination is saved on the side.
+ ASSERT(moves_[index].source() != NULL); // Or else it will look eliminated.
+ LOperand* destination = moves_[index].destination();
+ moves_[index].set_destination(NULL);
+
+ // Perform a depth-first traversal of the move graph to resolve
+ // dependencies. Any unperformed, unpending move with a source the same
+ // as this one's destination blocks this one so recursively perform all
+ // such moves.
+ for (int i = 0; i < moves_.length(); ++i) {
+ LMoveOperands other_move = moves_[i];
+ if (other_move.Blocks(destination) && !other_move.IsPending()) {
+ // Though PerformMove can change any source operand in the move graph,
+ // this call cannot create a blocking move via a swap (this loop does
+ // not miss any). Assume there is a non-blocking move with source A
+ // and this move is blocked on source B and there is a swap of A and
+ // B. Then A and B must be involved in the same cycle (or they would
+ // not be swapped). Since this move's destination is B and there is
+ // only a single incoming edge to an operand, this move must also be
+ // involved in the same cycle. In that case, the blocking move will
+ // be created but will be "pending" when we return from PerformMove.
+ PerformMove(i);
+ }
+ }
+
+ // We are about to resolve this move and don't need it marked as
+ // pending, so restore its destination.
+ moves_[index].set_destination(destination);
+
+ // This move's source may have changed due to swaps to resolve cycles and
+ // so it may now be the last move in the cycle. If so remove it.
+ if (moves_[index].source()->Equals(destination)) {
+ RemoveMove(index);
+ return;
+ }
+
+ // The move may be blocked on a (at most one) pending move, in which case
+ // we have a cycle. Search for such a blocking move and perform a swap to
+ // resolve it.
+ for (int i = 0; i < moves_.length(); ++i) {
+ LMoveOperands other_move = moves_[i];
+ if (other_move.Blocks(destination)) {
+ ASSERT(other_move.IsPending());
+ EmitSwap(index);
+ return;
+ }
+ }
+
+ // This move is not blocked.
+ EmitMove(index);
+}
+
+
+void LGapResolver::AddMove(LMoveOperands move) {
+ LOperand* source = move.source();
+ if (source->IsRegister()) ++source_uses_[source->index()];
+
+ LOperand* destination = move.destination();
+ if (destination->IsRegister()) ++destination_uses_[destination->index()];
+
+ moves_.Add(move, cgen_->zone());
+}
+
+
+void LGapResolver::RemoveMove(int index) {
+ LOperand* source = moves_[index].source();
+ if (source->IsRegister()) {
+ --source_uses_[source->index()];
+ ASSERT(source_uses_[source->index()] >= 0);
+ }
+
+ LOperand* destination = moves_[index].destination();
+ if (destination->IsRegister()) {
+ --destination_uses_[destination->index()];
+ ASSERT(destination_uses_[destination->index()] >= 0);
+ }
+
+ moves_[index].Eliminate();
+}
+
+
+int LGapResolver::CountSourceUses(LOperand* operand) {
+ int count = 0;
+ for (int i = 0; i < moves_.length(); ++i) {
+ if (!moves_[i].IsEliminated() && moves_[i].source()->Equals(operand)) {
+ ++count;
+ }
+ }
+ return count;
+}
+
+
+Register LGapResolver::GetFreeRegisterNot(Register reg) {
+ int skip_index = reg.is(no_reg) ? -1 : Register::ToAllocationIndex(reg);
+ for (int i = 0; i < Register::NumAllocatableRegisters(); ++i) {
+ if (source_uses_[i] == 0 && destination_uses_[i] > 0 && i != skip_index) {
+ return Register::FromAllocationIndex(i);
+ }
+ }
+ return no_reg;
+}
+
+
+bool LGapResolver::HasBeenReset() {
+ if (!moves_.is_empty()) return false;
+ if (spilled_register_ >= 0) return false;
+
+ for (int i = 0; i < Register::NumAllocatableRegisters(); ++i) {
+ if (source_uses_[i] != 0) return false;
+ if (destination_uses_[i] != 0) return false;
+ }
+ return true;
+}
+
+
+void LGapResolver::Verify() {
+#ifdef ENABLE_SLOW_ASSERTS
+ // No operand should be the destination for more than one move.
+ for (int i = 0; i < moves_.length(); ++i) {
+ LOperand* destination = moves_[i].destination();
+ for (int j = i + 1; j < moves_.length(); ++j) {
+ SLOW_ASSERT(!destination->Equals(moves_[j].destination()));
+ }
+ }
+#endif
+}
+
+
+#define __ ACCESS_MASM(cgen_->masm())
+
+void LGapResolver::Finish() {
+ if (spilled_register_ >= 0) {
+ __ pop(Register::FromAllocationIndex(spilled_register_));
+ spilled_register_ = -1;
+ }
+ moves_.Rewind(0);
+}
+
+
+void LGapResolver::EnsureRestored(LOperand* operand) {
+ if (operand->IsRegister() && operand->index() == spilled_register_) {
+ __ pop(Register::FromAllocationIndex(spilled_register_));
+ spilled_register_ = -1;
+ }
+}
+
+
+Register LGapResolver::EnsureTempRegister() {
+ // 1. We may have already spilled to create a temp register.
+ if (spilled_register_ >= 0) {
+ return Register::FromAllocationIndex(spilled_register_);
+ }
+
+ // 2. We may have a free register that we can use without spilling.
+ Register free = GetFreeRegisterNot(no_reg);
+ if (!free.is(no_reg)) return free;
+
+ // 3. Prefer to spill a register that is not used in any remaining move
+ // because it will not need to be restored until the end.
+ for (int i = 0; i < Register::NumAllocatableRegisters(); ++i) {
+ if (source_uses_[i] == 0 && destination_uses_[i] == 0) {
+ Register scratch = Register::FromAllocationIndex(i);
+ __ push(scratch);
+ spilled_register_ = i;
+ return scratch;
+ }
+ }
+
+ // 4. Use an arbitrary register. Register 0 is as arbitrary as any other.
+ Register scratch = Register::FromAllocationIndex(0);
+ __ push(scratch);
+ spilled_register_ = 0;
+ return scratch;
+}
+
+
+void LGapResolver::EmitMove(int index) {
+ LOperand* source = moves_[index].source();
+ LOperand* destination = moves_[index].destination();
+ EnsureRestored(source);
+ EnsureRestored(destination);
+
+ // Dispatch on the source and destination operand kinds. Not all
+ // combinations are possible.
+ if (source->IsRegister()) {
+ ASSERT(destination->IsRegister() || destination->IsStackSlot());
+ Register src = cgen_->ToRegister(source);
+ Operand dst = cgen_->ToOperand(destination);
+ __ mov(dst, src);
+
+ } else if (source->IsStackSlot()) {
+ ASSERT(destination->IsRegister() || destination->IsStackSlot());
+ Operand src = cgen_->ToOperand(source);
+ if (destination->IsRegister()) {
+ Register dst = cgen_->ToRegister(destination);
+ __ mov(dst, src);
+ } else {
+ // Spill on demand to use a temporary register for memory-to-memory
+ // moves.
+ Register tmp = EnsureTempRegister();
+ Operand dst = cgen_->ToOperand(destination);
+ __ mov(tmp, src);
+ __ mov(dst, tmp);
+ }
+
+ } else if (source->IsConstantOperand()) {
+ LConstantOperand* constant_source = LConstantOperand::cast(source);
+ if (destination->IsRegister()) {
+ Register dst = cgen_->ToRegister(destination);
+ Representation r = cgen_->IsSmi(constant_source)
+ ? Representation::Smi() : Representation::Integer32();
+ if (cgen_->IsInteger32(constant_source)) {
+ __ Move(dst, cgen_->ToImmediate(constant_source, r));
+ } else {
+ __ LoadObject(dst, cgen_->ToHandle(constant_source));
+ }
+ } else if (destination->IsDoubleRegister()) {
+ double v = cgen_->ToDouble(constant_source);
+ uint64_t int_val = BitCast<uint64_t, double>(v);
+ int32_t lower = static_cast<int32_t>(int_val);
+ int32_t upper = static_cast<int32_t>(int_val >> kBitsPerInt);
+ __ push(Immediate(upper));
+ __ push(Immediate(lower));
+ X87Register dst = cgen_->ToX87Register(destination);
+ cgen_->X87Mov(dst, MemOperand(esp, 0));
+ __ add(esp, Immediate(kDoubleSize));
+ } else {
+ ASSERT(destination->IsStackSlot());
+ Operand dst = cgen_->ToOperand(destination);
+ Representation r = cgen_->IsSmi(constant_source)
+ ? Representation::Smi() : Representation::Integer32();
+ if (cgen_->IsInteger32(constant_source)) {
+ __ Move(dst, cgen_->ToImmediate(constant_source, r));
+ } else {
+ Register tmp = EnsureTempRegister();
+ __ LoadObject(tmp, cgen_->ToHandle(constant_source));
+ __ mov(dst, tmp);
+ }
+ }
+
+ } else if (source->IsDoubleRegister()) {
+ // load from the register onto the stack, store in destination, which must
+ // be a double stack slot in the non-SSE2 case.
+ ASSERT(destination->IsDoubleStackSlot());
+ Operand dst = cgen_->ToOperand(destination);
+ X87Register src = cgen_->ToX87Register(source);
+ cgen_->X87Mov(dst, src);
+ } else if (source->IsDoubleStackSlot()) {
+ // load from the stack slot on top of the floating point stack, and then
+ // store in destination. If destination is a double register, then it
+ // represents the top of the stack and nothing needs to be done.
+ if (destination->IsDoubleStackSlot()) {
+ Register tmp = EnsureTempRegister();
+ Operand src0 = cgen_->ToOperand(source);
+ Operand src1 = cgen_->HighOperand(source);
+ Operand dst0 = cgen_->ToOperand(destination);
+ Operand dst1 = cgen_->HighOperand(destination);
+ __ mov(tmp, src0); // Then use tmp to copy source to destination.
+ __ mov(dst0, tmp);
+ __ mov(tmp, src1);
+ __ mov(dst1, tmp);
+ } else {
+ Operand src = cgen_->ToOperand(source);
+ X87Register dst = cgen_->ToX87Register(destination);
+ cgen_->X87Mov(dst, src);
+ }
+ } else {
+ UNREACHABLE();
+ }
+
+ RemoveMove(index);
+}
+
+
+void LGapResolver::EmitSwap(int index) {
+ LOperand* source = moves_[index].source();
+ LOperand* destination = moves_[index].destination();
+ EnsureRestored(source);
+ EnsureRestored(destination);
+
+ // Dispatch on the source and destination operand kinds. Not all
+ // combinations are possible.
+ if (source->IsRegister() && destination->IsRegister()) {
+ // Register-register.
+ Register src = cgen_->ToRegister(source);
+ Register dst = cgen_->ToRegister(destination);
+ __ xchg(dst, src);
+
+ } else if ((source->IsRegister() && destination->IsStackSlot()) ||
+ (source->IsStackSlot() && destination->IsRegister())) {
+ // Register-memory. Use a free register as a temp if possible. Do not
+ // spill on demand because the simple spill implementation cannot avoid
+ // spilling src at this point.
+ Register tmp = GetFreeRegisterNot(no_reg);
+ Register reg =
+ cgen_->ToRegister(source->IsRegister() ? source : destination);
+ Operand mem =
+ cgen_->ToOperand(source->IsRegister() ? destination : source);
+ if (tmp.is(no_reg)) {
+ __ xor_(reg, mem);
+ __ xor_(mem, reg);
+ __ xor_(reg, mem);
+ } else {
+ __ mov(tmp, mem);
+ __ mov(mem, reg);
+ __ mov(reg, tmp);
+ }
+
+ } else if (source->IsStackSlot() && destination->IsStackSlot()) {
+ // Memory-memory. Spill on demand to use a temporary. If there is a
+ // free register after that, use it as a second temporary.
+ Register tmp0 = EnsureTempRegister();
+ Register tmp1 = GetFreeRegisterNot(tmp0);
+ Operand src = cgen_->ToOperand(source);
+ Operand dst = cgen_->ToOperand(destination);
+ if (tmp1.is(no_reg)) {
+ // Only one temp register available to us.
+ __ mov(tmp0, dst);
+ __ xor_(tmp0, src);
+ __ xor_(src, tmp0);
+ __ xor_(tmp0, src);
+ __ mov(dst, tmp0);
+ } else {
+ __ mov(tmp0, dst);
+ __ mov(tmp1, src);
+ __ mov(dst, tmp1);
+ __ mov(src, tmp0);
+ }
+ } else {
+ // No other combinations are possible.
+ UNREACHABLE();
+ }
+
+ // The swap of source and destination has executed a move from source to
+ // destination.
+ RemoveMove(index);
+
+ // Any unperformed (including pending) move with a source of either
+ // this move's source or destination needs to have their source
+ // changed to reflect the state of affairs after the swap.
+ for (int i = 0; i < moves_.length(); ++i) {
+ LMoveOperands other_move = moves_[i];
+ if (other_move.Blocks(source)) {
+ moves_[i].set_source(destination);
+ } else if (other_move.Blocks(destination)) {
+ moves_[i].set_source(source);
+ }
+ }
+
+ // In addition to swapping the actual uses as sources, we need to update
+ // the use counts.
+ if (source->IsRegister() && destination->IsRegister()) {
+ int temp = source_uses_[source->index()];
+ source_uses_[source->index()] = source_uses_[destination->index()];
+ source_uses_[destination->index()] = temp;
+ } else if (source->IsRegister()) {
+ // We don't have use counts for non-register operands like destination.
+ // Compute those counts now.
+ source_uses_[source->index()] = CountSourceUses(source);
+ } else if (destination->IsRegister()) {
+ source_uses_[destination->index()] = CountSourceUses(destination);
+ }
+}
+
+#undef __
+
+} } // namespace v8::internal
+
+#endif // V8_TARGET_ARCH_X87
diff --git a/chromium/v8/src/x87/lithium-gap-resolver-x87.h b/chromium/v8/src/x87/lithium-gap-resolver-x87.h
new file mode 100644
index 00000000000..737660c71ac
--- /dev/null
+++ b/chromium/v8/src/x87/lithium-gap-resolver-x87.h
@@ -0,0 +1,87 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_X87_LITHIUM_GAP_RESOLVER_X87_H_
+#define V8_X87_LITHIUM_GAP_RESOLVER_X87_H_
+
+#include "src/v8.h"
+
+#include "src/lithium.h"
+
+namespace v8 {
+namespace internal {
+
+class LCodeGen;
+class LGapResolver;
+
+class LGapResolver V8_FINAL BASE_EMBEDDED {
+ public:
+ explicit LGapResolver(LCodeGen* owner);
+
+ // Resolve a set of parallel moves, emitting assembler instructions.
+ void Resolve(LParallelMove* parallel_move);
+
+ private:
+ // Build the initial list of moves.
+ void BuildInitialMoveList(LParallelMove* parallel_move);
+
+ // Perform the move at the moves_ index in question (possibly requiring
+ // other moves to satisfy dependencies).
+ void PerformMove(int index);
+
+ // Emit any code necessary at the end of a gap move.
+ void Finish();
+
+ // Add or delete a move from the move graph without emitting any code.
+ // Used to build up the graph and remove trivial moves.
+ void AddMove(LMoveOperands move);
+ void RemoveMove(int index);
+
+ // Report the count of uses of operand as a source in a not-yet-performed
+ // move. Used to rebuild use counts.
+ int CountSourceUses(LOperand* operand);
+
+ // Emit a move and remove it from the move graph.
+ void EmitMove(int index);
+
+ // Execute a move by emitting a swap of two operands. The move from
+ // source to destination is removed from the move graph.
+ void EmitSwap(int index);
+
+ // Ensure that the given operand is not spilled.
+ void EnsureRestored(LOperand* operand);
+
+ // Return a register that can be used as a temp register, spilling
+ // something if necessary.
+ Register EnsureTempRegister();
+
+ // Return a known free register different from the given one (which could
+ // be no_reg---returning any free register), or no_reg if there is no such
+ // register.
+ Register GetFreeRegisterNot(Register reg);
+
+ // Verify that the state is the initial one, ready to resolve a single
+ // parallel move.
+ bool HasBeenReset();
+
+ // Verify the move list before performing moves.
+ void Verify();
+
+ LCodeGen* cgen_;
+
+ // List of moves not yet resolved.
+ ZoneList<LMoveOperands> moves_;
+
+ // Source and destination use counts for the general purpose registers.
+ int source_uses_[Register::kMaxNumAllocatableRegisters];
+ int destination_uses_[Register::kMaxNumAllocatableRegisters];
+
+ // If we had to spill on demand, the currently spilled register's
+ // allocation index.
+ int spilled_register_;
+};
+
+} } // namespace v8::internal
+
+#endif // V8_X87_LITHIUM_GAP_RESOLVER_X87_H_
diff --git a/chromium/v8/src/x87/lithium-x87.cc b/chromium/v8/src/x87/lithium-x87.cc
new file mode 100644
index 00000000000..707783d7f66
--- /dev/null
+++ b/chromium/v8/src/x87/lithium-x87.cc
@@ -0,0 +1,2660 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#if V8_TARGET_ARCH_X87
+
+#include "src/lithium-allocator-inl.h"
+#include "src/x87/lithium-x87.h"
+#include "src/x87/lithium-codegen-x87.h"
+#include "src/hydrogen-osr.h"
+
+namespace v8 {
+namespace internal {
+
+#define DEFINE_COMPILE(type) \
+ void L##type::CompileToNative(LCodeGen* generator) { \
+ generator->Do##type(this); \
+ }
+LITHIUM_CONCRETE_INSTRUCTION_LIST(DEFINE_COMPILE)
+#undef DEFINE_COMPILE
+
+
+#ifdef DEBUG
+void LInstruction::VerifyCall() {
+ // Call instructions can use only fixed registers as temporaries and
+ // outputs because all registers are blocked by the calling convention.
+ // Inputs operands must use a fixed register or use-at-start policy or
+ // a non-register policy.
+ ASSERT(Output() == NULL ||
+ LUnallocated::cast(Output())->HasFixedPolicy() ||
+ !LUnallocated::cast(Output())->HasRegisterPolicy());
+ for (UseIterator it(this); !it.Done(); it.Advance()) {
+ LUnallocated* operand = LUnallocated::cast(it.Current());
+ ASSERT(operand->HasFixedPolicy() ||
+ operand->IsUsedAtStart());
+ }
+ for (TempIterator it(this); !it.Done(); it.Advance()) {
+ LUnallocated* operand = LUnallocated::cast(it.Current());
+ ASSERT(operand->HasFixedPolicy() ||!operand->HasRegisterPolicy());
+ }
+}
+#endif
+
+
+bool LInstruction::HasDoubleRegisterResult() {
+ return HasResult() && result()->IsDoubleRegister();
+}
+
+
+bool LInstruction::HasDoubleRegisterInput() {
+ for (int i = 0; i < InputCount(); i++) {
+ LOperand* op = InputAt(i);
+ if (op != NULL && op->IsDoubleRegister()) {
+ return true;
+ }
+ }
+ return false;
+}
+
+
+bool LInstruction::IsDoubleInput(X87Register reg, LCodeGen* cgen) {
+ for (int i = 0; i < InputCount(); i++) {
+ LOperand* op = InputAt(i);
+ if (op != NULL && op->IsDoubleRegister()) {
+ if (cgen->ToX87Register(op).is(reg)) return true;
+ }
+ }
+ return false;
+}
+
+
+void LInstruction::PrintTo(StringStream* stream) {
+ stream->Add("%s ", this->Mnemonic());
+
+ PrintOutputOperandTo(stream);
+
+ PrintDataTo(stream);
+
+ if (HasEnvironment()) {
+ stream->Add(" ");
+ environment()->PrintTo(stream);
+ }
+
+ if (HasPointerMap()) {
+ stream->Add(" ");
+ pointer_map()->PrintTo(stream);
+ }
+}
+
+
+void LInstruction::PrintDataTo(StringStream* stream) {
+ stream->Add("= ");
+ for (int i = 0; i < InputCount(); i++) {
+ if (i > 0) stream->Add(" ");
+ if (InputAt(i) == NULL) {
+ stream->Add("NULL");
+ } else {
+ InputAt(i)->PrintTo(stream);
+ }
+ }
+}
+
+
+void LInstruction::PrintOutputOperandTo(StringStream* stream) {
+ if (HasResult()) result()->PrintTo(stream);
+}
+
+
+void LLabel::PrintDataTo(StringStream* stream) {
+ LGap::PrintDataTo(stream);
+ LLabel* rep = replacement();
+ if (rep != NULL) {
+ stream->Add(" Dead block replaced with B%d", rep->block_id());
+ }
+}
+
+
+bool LGap::IsRedundant() const {
+ for (int i = 0; i < 4; i++) {
+ if (parallel_moves_[i] != NULL && !parallel_moves_[i]->IsRedundant()) {
+ return false;
+ }
+ }
+
+ return true;
+}
+
+
+void LGap::PrintDataTo(StringStream* stream) {
+ for (int i = 0; i < 4; i++) {
+ stream->Add("(");
+ if (parallel_moves_[i] != NULL) {
+ parallel_moves_[i]->PrintDataTo(stream);
+ }
+ stream->Add(") ");
+ }
+}
+
+
+const char* LArithmeticD::Mnemonic() const {
+ switch (op()) {
+ case Token::ADD: return "add-d";
+ case Token::SUB: return "sub-d";
+ case Token::MUL: return "mul-d";
+ case Token::DIV: return "div-d";
+ case Token::MOD: return "mod-d";
+ default:
+ UNREACHABLE();
+ return NULL;
+ }
+}
+
+
+const char* LArithmeticT::Mnemonic() const {
+ switch (op()) {
+ case Token::ADD: return "add-t";
+ case Token::SUB: return "sub-t";
+ case Token::MUL: return "mul-t";
+ case Token::MOD: return "mod-t";
+ case Token::DIV: return "div-t";
+ case Token::BIT_AND: return "bit-and-t";
+ case Token::BIT_OR: return "bit-or-t";
+ case Token::BIT_XOR: return "bit-xor-t";
+ case Token::ROR: return "ror-t";
+ case Token::SHL: return "sal-t";
+ case Token::SAR: return "sar-t";
+ case Token::SHR: return "shr-t";
+ default:
+ UNREACHABLE();
+ return NULL;
+ }
+}
+
+
+bool LGoto::HasInterestingComment(LCodeGen* gen) const {
+ return !gen->IsNextEmittedBlock(block_id());
+}
+
+
+void LGoto::PrintDataTo(StringStream* stream) {
+ stream->Add("B%d", block_id());
+}
+
+
+void LBranch::PrintDataTo(StringStream* stream) {
+ stream->Add("B%d | B%d on ", true_block_id(), false_block_id());
+ value()->PrintTo(stream);
+}
+
+
+void LCompareNumericAndBranch::PrintDataTo(StringStream* stream) {
+ stream->Add("if ");
+ left()->PrintTo(stream);
+ stream->Add(" %s ", Token::String(op()));
+ right()->PrintTo(stream);
+ stream->Add(" then B%d else B%d", true_block_id(), false_block_id());
+}
+
+
+void LIsObjectAndBranch::PrintDataTo(StringStream* stream) {
+ stream->Add("if is_object(");
+ value()->PrintTo(stream);
+ stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
+}
+
+
+void LIsStringAndBranch::PrintDataTo(StringStream* stream) {
+ stream->Add("if is_string(");
+ value()->PrintTo(stream);
+ stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
+}
+
+
+void LIsSmiAndBranch::PrintDataTo(StringStream* stream) {
+ stream->Add("if is_smi(");
+ value()->PrintTo(stream);
+ stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
+}
+
+
+void LIsUndetectableAndBranch::PrintDataTo(StringStream* stream) {
+ stream->Add("if is_undetectable(");
+ value()->PrintTo(stream);
+ stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
+}
+
+
+void LStringCompareAndBranch::PrintDataTo(StringStream* stream) {
+ stream->Add("if string_compare(");
+ left()->PrintTo(stream);
+ right()->PrintTo(stream);
+ stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
+}
+
+
+void LHasInstanceTypeAndBranch::PrintDataTo(StringStream* stream) {
+ stream->Add("if has_instance_type(");
+ value()->PrintTo(stream);
+ stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
+}
+
+
+void LHasCachedArrayIndexAndBranch::PrintDataTo(StringStream* stream) {
+ stream->Add("if has_cached_array_index(");
+ value()->PrintTo(stream);
+ stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
+}
+
+
+void LClassOfTestAndBranch::PrintDataTo(StringStream* stream) {
+ stream->Add("if class_of_test(");
+ value()->PrintTo(stream);
+ stream->Add(", \"%o\") then B%d else B%d",
+ *hydrogen()->class_name(),
+ true_block_id(),
+ false_block_id());
+}
+
+
+void LTypeofIsAndBranch::PrintDataTo(StringStream* stream) {
+ stream->Add("if typeof ");
+ value()->PrintTo(stream);
+ stream->Add(" == \"%s\" then B%d else B%d",
+ hydrogen()->type_literal()->ToCString().get(),
+ true_block_id(), false_block_id());
+}
+
+
+void LStoreCodeEntry::PrintDataTo(StringStream* stream) {
+ stream->Add(" = ");
+ function()->PrintTo(stream);
+ stream->Add(".code_entry = ");
+ code_object()->PrintTo(stream);
+}
+
+
+void LInnerAllocatedObject::PrintDataTo(StringStream* stream) {
+ stream->Add(" = ");
+ base_object()->PrintTo(stream);
+ stream->Add(" + ");
+ offset()->PrintTo(stream);
+}
+
+
+void LCallJSFunction::PrintDataTo(StringStream* stream) {
+ stream->Add("= ");
+ function()->PrintTo(stream);
+ stream->Add("#%d / ", arity());
+}
+
+
+void LCallWithDescriptor::PrintDataTo(StringStream* stream) {
+ for (int i = 0; i < InputCount(); i++) {
+ InputAt(i)->PrintTo(stream);
+ stream->Add(" ");
+ }
+ stream->Add("#%d / ", arity());
+}
+
+
+void LLoadContextSlot::PrintDataTo(StringStream* stream) {
+ context()->PrintTo(stream);
+ stream->Add("[%d]", slot_index());
+}
+
+
+void LStoreContextSlot::PrintDataTo(StringStream* stream) {
+ context()->PrintTo(stream);
+ stream->Add("[%d] <- ", slot_index());
+ value()->PrintTo(stream);
+}
+
+
+void LInvokeFunction::PrintDataTo(StringStream* stream) {
+ stream->Add("= ");
+ context()->PrintTo(stream);
+ stream->Add(" ");
+ function()->PrintTo(stream);
+ stream->Add(" #%d / ", arity());
+}
+
+
+void LCallNew::PrintDataTo(StringStream* stream) {
+ stream->Add("= ");
+ context()->PrintTo(stream);
+ stream->Add(" ");
+ constructor()->PrintTo(stream);
+ stream->Add(" #%d / ", arity());
+}
+
+
+void LCallNewArray::PrintDataTo(StringStream* stream) {
+ stream->Add("= ");
+ context()->PrintTo(stream);
+ stream->Add(" ");
+ constructor()->PrintTo(stream);
+ stream->Add(" #%d / ", arity());
+ ElementsKind kind = hydrogen()->elements_kind();
+ stream->Add(" (%s) ", ElementsKindToString(kind));
+}
+
+
+void LAccessArgumentsAt::PrintDataTo(StringStream* stream) {
+ arguments()->PrintTo(stream);
+
+ stream->Add(" length ");
+ length()->PrintTo(stream);
+
+ stream->Add(" index ");
+ index()->PrintTo(stream);
+}
+
+
+int LPlatformChunk::GetNextSpillIndex(RegisterKind kind) {
+ // Skip a slot if for a double-width slot.
+ if (kind == DOUBLE_REGISTERS) {
+ spill_slot_count_++;
+ spill_slot_count_ |= 1;
+ num_double_slots_++;
+ }
+ return spill_slot_count_++;
+}
+
+
+LOperand* LPlatformChunk::GetNextSpillSlot(RegisterKind kind) {
+ int index = GetNextSpillIndex(kind);
+ if (kind == DOUBLE_REGISTERS) {
+ return LDoubleStackSlot::Create(index, zone());
+ } else {
+ ASSERT(kind == GENERAL_REGISTERS);
+ return LStackSlot::Create(index, zone());
+ }
+}
+
+
+void LStoreNamedField::PrintDataTo(StringStream* stream) {
+ object()->PrintTo(stream);
+ hydrogen()->access().PrintTo(stream);
+ stream->Add(" <- ");
+ value()->PrintTo(stream);
+}
+
+
+void LStoreNamedGeneric::PrintDataTo(StringStream* stream) {
+ object()->PrintTo(stream);
+ stream->Add(".");
+ stream->Add(String::cast(*name())->ToCString().get());
+ stream->Add(" <- ");
+ value()->PrintTo(stream);
+}
+
+
+void LLoadKeyed::PrintDataTo(StringStream* stream) {
+ elements()->PrintTo(stream);
+ stream->Add("[");
+ key()->PrintTo(stream);
+ if (hydrogen()->IsDehoisted()) {
+ stream->Add(" + %d]", base_offset());
+ } else {
+ stream->Add("]");
+ }
+}
+
+
+void LStoreKeyed::PrintDataTo(StringStream* stream) {
+ elements()->PrintTo(stream);
+ stream->Add("[");
+ key()->PrintTo(stream);
+ if (hydrogen()->IsDehoisted()) {
+ stream->Add(" + %d] <-", base_offset());
+ } else {
+ stream->Add("] <- ");
+ }
+
+ if (value() == NULL) {
+ ASSERT(hydrogen()->IsConstantHoleStore() &&
+ hydrogen()->value()->representation().IsDouble());
+ stream->Add("<the hole(nan)>");
+ } else {
+ value()->PrintTo(stream);
+ }
+}
+
+
+void LStoreKeyedGeneric::PrintDataTo(StringStream* stream) {
+ object()->PrintTo(stream);
+ stream->Add("[");
+ key()->PrintTo(stream);
+ stream->Add("] <- ");
+ value()->PrintTo(stream);
+}
+
+
+void LTransitionElementsKind::PrintDataTo(StringStream* stream) {
+ object()->PrintTo(stream);
+ stream->Add(" %p -> %p", *original_map(), *transitioned_map());
+}
+
+
+LPlatformChunk* LChunkBuilder::Build() {
+ ASSERT(is_unused());
+ chunk_ = new(zone()) LPlatformChunk(info(), graph());
+ LPhase phase("L_Building chunk", chunk_);
+ status_ = BUILDING;
+
+ // Reserve the first spill slot for the state of dynamic alignment.
+ if (info()->IsOptimizing()) {
+ int alignment_state_index = chunk_->GetNextSpillIndex(GENERAL_REGISTERS);
+ ASSERT_EQ(alignment_state_index, 0);
+ USE(alignment_state_index);
+ }
+
+ // If compiling for OSR, reserve space for the unoptimized frame,
+ // which will be subsumed into this frame.
+ if (graph()->has_osr()) {
+ for (int i = graph()->osr()->UnoptimizedFrameSlots(); i > 0; i--) {
+ chunk_->GetNextSpillIndex(GENERAL_REGISTERS);
+ }
+ }
+
+ const ZoneList<HBasicBlock*>* blocks = graph()->blocks();
+ for (int i = 0; i < blocks->length(); i++) {
+ HBasicBlock* next = NULL;
+ if (i < blocks->length() - 1) next = blocks->at(i + 1);
+ DoBasicBlock(blocks->at(i), next);
+ if (is_aborted()) return NULL;
+ }
+ status_ = DONE;
+ return chunk_;
+}
+
+
+void LChunkBuilder::Abort(BailoutReason reason) {
+ info()->set_bailout_reason(reason);
+ status_ = ABORTED;
+}
+
+
+LUnallocated* LChunkBuilder::ToUnallocated(Register reg) {
+ return new(zone()) LUnallocated(LUnallocated::FIXED_REGISTER,
+ Register::ToAllocationIndex(reg));
+}
+
+
+LOperand* LChunkBuilder::UseFixed(HValue* value, Register fixed_register) {
+ return Use(value, ToUnallocated(fixed_register));
+}
+
+
+LOperand* LChunkBuilder::UseRegister(HValue* value) {
+ return Use(value, new(zone()) LUnallocated(LUnallocated::MUST_HAVE_REGISTER));
+}
+
+
+LOperand* LChunkBuilder::UseRegisterAtStart(HValue* value) {
+ return Use(value,
+ new(zone()) LUnallocated(LUnallocated::MUST_HAVE_REGISTER,
+ LUnallocated::USED_AT_START));
+}
+
+
+LOperand* LChunkBuilder::UseTempRegister(HValue* value) {
+ return Use(value, new(zone()) LUnallocated(LUnallocated::WRITABLE_REGISTER));
+}
+
+
+LOperand* LChunkBuilder::Use(HValue* value) {
+ return Use(value, new(zone()) LUnallocated(LUnallocated::NONE));
+}
+
+
+LOperand* LChunkBuilder::UseAtStart(HValue* value) {
+ return Use(value, new(zone()) LUnallocated(LUnallocated::NONE,
+ LUnallocated::USED_AT_START));
+}
+
+
+static inline bool CanBeImmediateConstant(HValue* value) {
+ return value->IsConstant() && HConstant::cast(value)->NotInNewSpace();
+}
+
+
+LOperand* LChunkBuilder::UseOrConstant(HValue* value) {
+ return CanBeImmediateConstant(value)
+ ? chunk_->DefineConstantOperand(HConstant::cast(value))
+ : Use(value);
+}
+
+
+LOperand* LChunkBuilder::UseOrConstantAtStart(HValue* value) {
+ return CanBeImmediateConstant(value)
+ ? chunk_->DefineConstantOperand(HConstant::cast(value))
+ : UseAtStart(value);
+}
+
+
+LOperand* LChunkBuilder::UseFixedOrConstant(HValue* value,
+ Register fixed_register) {
+ return CanBeImmediateConstant(value)
+ ? chunk_->DefineConstantOperand(HConstant::cast(value))
+ : UseFixed(value, fixed_register);
+}
+
+
+LOperand* LChunkBuilder::UseRegisterOrConstant(HValue* value) {
+ return CanBeImmediateConstant(value)
+ ? chunk_->DefineConstantOperand(HConstant::cast(value))
+ : UseRegister(value);
+}
+
+
+LOperand* LChunkBuilder::UseRegisterOrConstantAtStart(HValue* value) {
+ return CanBeImmediateConstant(value)
+ ? chunk_->DefineConstantOperand(HConstant::cast(value))
+ : UseRegisterAtStart(value);
+}
+
+
+LOperand* LChunkBuilder::UseConstant(HValue* value) {
+ return chunk_->DefineConstantOperand(HConstant::cast(value));
+}
+
+
+LOperand* LChunkBuilder::UseAny(HValue* value) {
+ return value->IsConstant()
+ ? chunk_->DefineConstantOperand(HConstant::cast(value))
+ : Use(value, new(zone()) LUnallocated(LUnallocated::ANY));
+}
+
+
+LOperand* LChunkBuilder::Use(HValue* value, LUnallocated* operand) {
+ if (value->EmitAtUses()) {
+ HInstruction* instr = HInstruction::cast(value);
+ VisitInstruction(instr);
+ }
+ operand->set_virtual_register(value->id());
+ return operand;
+}
+
+
+LInstruction* LChunkBuilder::Define(LTemplateResultInstruction<1>* instr,
+ LUnallocated* result) {
+ result->set_virtual_register(current_instruction_->id());
+ instr->set_result(result);
+ return instr;
+}
+
+
+LInstruction* LChunkBuilder::DefineAsRegister(
+ LTemplateResultInstruction<1>* instr) {
+ return Define(instr,
+ new(zone()) LUnallocated(LUnallocated::MUST_HAVE_REGISTER));
+}
+
+
+LInstruction* LChunkBuilder::DefineAsSpilled(
+ LTemplateResultInstruction<1>* instr,
+ int index) {
+ return Define(instr,
+ new(zone()) LUnallocated(LUnallocated::FIXED_SLOT, index));
+}
+
+
+LInstruction* LChunkBuilder::DefineSameAsFirst(
+ LTemplateResultInstruction<1>* instr) {
+ return Define(instr,
+ new(zone()) LUnallocated(LUnallocated::SAME_AS_FIRST_INPUT));
+}
+
+
+LInstruction* LChunkBuilder::DefineFixed(LTemplateResultInstruction<1>* instr,
+ Register reg) {
+ return Define(instr, ToUnallocated(reg));
+}
+
+
+LInstruction* LChunkBuilder::AssignEnvironment(LInstruction* instr) {
+ HEnvironment* hydrogen_env = current_block_->last_environment();
+ int argument_index_accumulator = 0;
+ ZoneList<HValue*> objects_to_materialize(0, zone());
+ instr->set_environment(CreateEnvironment(hydrogen_env,
+ &argument_index_accumulator,
+ &objects_to_materialize));
+ return instr;
+}
+
+
+LInstruction* LChunkBuilder::MarkAsCall(LInstruction* instr,
+ HInstruction* hinstr,
+ CanDeoptimize can_deoptimize) {
+ info()->MarkAsNonDeferredCalling();
+
+#ifdef DEBUG
+ instr->VerifyCall();
+#endif
+ instr->MarkAsCall();
+ instr = AssignPointerMap(instr);
+
+ // If instruction does not have side-effects lazy deoptimization
+ // after the call will try to deoptimize to the point before the call.
+ // Thus we still need to attach environment to this call even if
+ // call sequence can not deoptimize eagerly.
+ bool needs_environment =
+ (can_deoptimize == CAN_DEOPTIMIZE_EAGERLY) ||
+ !hinstr->HasObservableSideEffects();
+ if (needs_environment && !instr->HasEnvironment()) {
+ instr = AssignEnvironment(instr);
+ // We can't really figure out if the environment is needed or not.
+ instr->environment()->set_has_been_used();
+ }
+
+ return instr;
+}
+
+
+LInstruction* LChunkBuilder::AssignPointerMap(LInstruction* instr) {
+ ASSERT(!instr->HasPointerMap());
+ instr->set_pointer_map(new(zone()) LPointerMap(zone()));
+ return instr;
+}
+
+
+LUnallocated* LChunkBuilder::TempRegister() {
+ LUnallocated* operand =
+ new(zone()) LUnallocated(LUnallocated::MUST_HAVE_REGISTER);
+ int vreg = allocator_->GetVirtualRegister();
+ if (!allocator_->AllocationOk()) {
+ Abort(kOutOfVirtualRegistersWhileTryingToAllocateTempRegister);
+ vreg = 0;
+ }
+ operand->set_virtual_register(vreg);
+ return operand;
+}
+
+
+LOperand* LChunkBuilder::FixedTemp(Register reg) {
+ LUnallocated* operand = ToUnallocated(reg);
+ ASSERT(operand->HasFixedPolicy());
+ return operand;
+}
+
+
+LInstruction* LChunkBuilder::DoBlockEntry(HBlockEntry* instr) {
+ return new(zone()) LLabel(instr->block());
+}
+
+
+LInstruction* LChunkBuilder::DoDummyUse(HDummyUse* instr) {
+ return DefineAsRegister(new(zone()) LDummyUse(UseAny(instr->value())));
+}
+
+
+LInstruction* LChunkBuilder::DoEnvironmentMarker(HEnvironmentMarker* instr) {
+ UNREACHABLE();
+ return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoDeoptimize(HDeoptimize* instr) {
+ return AssignEnvironment(new(zone()) LDeoptimize);
+}
+
+
+LInstruction* LChunkBuilder::DoShift(Token::Value op,
+ HBitwiseBinaryOperation* instr) {
+ if (instr->representation().IsSmiOrInteger32()) {
+ ASSERT(instr->left()->representation().Equals(instr->representation()));
+ ASSERT(instr->right()->representation().Equals(instr->representation()));
+ LOperand* left = UseRegisterAtStart(instr->left());
+
+ HValue* right_value = instr->right();
+ LOperand* right = NULL;
+ int constant_value = 0;
+ bool does_deopt = false;
+ if (right_value->IsConstant()) {
+ HConstant* constant = HConstant::cast(right_value);
+ right = chunk_->DefineConstantOperand(constant);
+ constant_value = constant->Integer32Value() & 0x1f;
+ // Left shifts can deoptimize if we shift by > 0 and the result cannot be
+ // truncated to smi.
+ if (instr->representation().IsSmi() && constant_value > 0) {
+ does_deopt = !instr->CheckUsesForFlag(HValue::kTruncatingToSmi);
+ }
+ } else {
+ right = UseFixed(right_value, ecx);
+ }
+
+ // Shift operations can only deoptimize if we do a logical shift by 0 and
+ // the result cannot be truncated to int32.
+ if (op == Token::SHR && constant_value == 0) {
+ if (FLAG_opt_safe_uint32_operations) {
+ does_deopt = !instr->CheckFlag(HInstruction::kUint32);
+ } else {
+ does_deopt = !instr->CheckUsesForFlag(HValue::kTruncatingToInt32);
+ }
+ }
+
+ LInstruction* result =
+ DefineSameAsFirst(new(zone()) LShiftI(op, left, right, does_deopt));
+ return does_deopt ? AssignEnvironment(result) : result;
+ } else {
+ return DoArithmeticT(op, instr);
+ }
+}
+
+
+LInstruction* LChunkBuilder::DoArithmeticD(Token::Value op,
+ HArithmeticBinaryOperation* instr) {
+ ASSERT(instr->representation().IsDouble());
+ ASSERT(instr->left()->representation().IsDouble());
+ ASSERT(instr->right()->representation().IsDouble());
+ if (op == Token::MOD) {
+ LOperand* left = UseRegisterAtStart(instr->BetterLeftOperand());
+ LOperand* right = UseRegisterAtStart(instr->BetterRightOperand());
+ LArithmeticD* result = new(zone()) LArithmeticD(op, left, right);
+ return MarkAsCall(DefineSameAsFirst(result), instr);
+ } else {
+ LOperand* left = UseRegisterAtStart(instr->BetterLeftOperand());
+ LOperand* right = UseRegisterAtStart(instr->BetterRightOperand());
+ LArithmeticD* result = new(zone()) LArithmeticD(op, left, right);
+ return DefineSameAsFirst(result);
+ }
+}
+
+
+LInstruction* LChunkBuilder::DoArithmeticT(Token::Value op,
+ HBinaryOperation* instr) {
+ HValue* left = instr->left();
+ HValue* right = instr->right();
+ ASSERT(left->representation().IsTagged());
+ ASSERT(right->representation().IsTagged());
+ LOperand* context = UseFixed(instr->context(), esi);
+ LOperand* left_operand = UseFixed(left, edx);
+ LOperand* right_operand = UseFixed(right, eax);
+ LArithmeticT* result =
+ new(zone()) LArithmeticT(op, context, left_operand, right_operand);
+ return MarkAsCall(DefineFixed(result, eax), instr);
+}
+
+
+void LChunkBuilder::DoBasicBlock(HBasicBlock* block, HBasicBlock* next_block) {
+ ASSERT(is_building());
+ current_block_ = block;
+ next_block_ = next_block;
+ if (block->IsStartBlock()) {
+ block->UpdateEnvironment(graph_->start_environment());
+ argument_count_ = 0;
+ } else if (block->predecessors()->length() == 1) {
+ // We have a single predecessor => copy environment and outgoing
+ // argument count from the predecessor.
+ ASSERT(block->phis()->length() == 0);
+ HBasicBlock* pred = block->predecessors()->at(0);
+ HEnvironment* last_environment = pred->last_environment();
+ ASSERT(last_environment != NULL);
+ // Only copy the environment, if it is later used again.
+ if (pred->end()->SecondSuccessor() == NULL) {
+ ASSERT(pred->end()->FirstSuccessor() == block);
+ } else {
+ if (pred->end()->FirstSuccessor()->block_id() > block->block_id() ||
+ pred->end()->SecondSuccessor()->block_id() > block->block_id()) {
+ last_environment = last_environment->Copy();
+ }
+ }
+ block->UpdateEnvironment(last_environment);
+ ASSERT(pred->argument_count() >= 0);
+ argument_count_ = pred->argument_count();
+ } else {
+ // We are at a state join => process phis.
+ HBasicBlock* pred = block->predecessors()->at(0);
+ // No need to copy the environment, it cannot be used later.
+ HEnvironment* last_environment = pred->last_environment();
+ for (int i = 0; i < block->phis()->length(); ++i) {
+ HPhi* phi = block->phis()->at(i);
+ if (phi->HasMergedIndex()) {
+ last_environment->SetValueAt(phi->merged_index(), phi);
+ }
+ }
+ for (int i = 0; i < block->deleted_phis()->length(); ++i) {
+ if (block->deleted_phis()->at(i) < last_environment->length()) {
+ last_environment->SetValueAt(block->deleted_phis()->at(i),
+ graph_->GetConstantUndefined());
+ }
+ }
+ block->UpdateEnvironment(last_environment);
+ // Pick up the outgoing argument count of one of the predecessors.
+ argument_count_ = pred->argument_count();
+ }
+ HInstruction* current = block->first();
+ int start = chunk_->instructions()->length();
+ while (current != NULL && !is_aborted()) {
+ // Code for constants in registers is generated lazily.
+ if (!current->EmitAtUses()) {
+ VisitInstruction(current);
+ }
+ current = current->next();
+ }
+ int end = chunk_->instructions()->length() - 1;
+ if (end >= start) {
+ block->set_first_instruction_index(start);
+ block->set_last_instruction_index(end);
+ }
+ block->set_argument_count(argument_count_);
+ next_block_ = NULL;
+ current_block_ = NULL;
+}
+
+
+void LChunkBuilder::VisitInstruction(HInstruction* current) {
+ HInstruction* old_current = current_instruction_;
+ current_instruction_ = current;
+
+ LInstruction* instr = NULL;
+ if (current->CanReplaceWithDummyUses()) {
+ if (current->OperandCount() == 0) {
+ instr = DefineAsRegister(new(zone()) LDummy());
+ } else {
+ ASSERT(!current->OperandAt(0)->IsControlInstruction());
+ instr = DefineAsRegister(new(zone())
+ LDummyUse(UseAny(current->OperandAt(0))));
+ }
+ for (int i = 1; i < current->OperandCount(); ++i) {
+ if (current->OperandAt(i)->IsControlInstruction()) continue;
+ LInstruction* dummy =
+ new(zone()) LDummyUse(UseAny(current->OperandAt(i)));
+ dummy->set_hydrogen_value(current);
+ chunk_->AddInstruction(dummy, current_block_);
+ }
+ } else {
+ HBasicBlock* successor;
+ if (current->IsControlInstruction() &&
+ HControlInstruction::cast(current)->KnownSuccessorBlock(&successor) &&
+ successor != NULL) {
+ instr = new(zone()) LGoto(successor);
+ } else {
+ instr = current->CompileToLithium(this);
+ }
+ }
+
+ argument_count_ += current->argument_delta();
+ ASSERT(argument_count_ >= 0);
+
+ if (instr != NULL) {
+ AddInstruction(instr, current);
+ }
+
+ current_instruction_ = old_current;
+}
+
+
+void LChunkBuilder::AddInstruction(LInstruction* instr,
+ HInstruction* hydrogen_val) {
+ // Associate the hydrogen instruction first, since we may need it for
+ // the ClobbersRegisters() or ClobbersDoubleRegisters() calls below.
+ instr->set_hydrogen_value(hydrogen_val);
+
+#if DEBUG
+ // Make sure that the lithium instruction has either no fixed register
+ // constraints in temps or the result OR no uses that are only used at
+ // start. If this invariant doesn't hold, the register allocator can decide
+ // to insert a split of a range immediately before the instruction due to an
+ // already allocated register needing to be used for the instruction's fixed
+ // register constraint. In this case, The register allocator won't see an
+ // interference between the split child and the use-at-start (it would if
+ // the it was just a plain use), so it is free to move the split child into
+ // the same register that is used for the use-at-start.
+ // See https://code.google.com/p/chromium/issues/detail?id=201590
+ if (!(instr->ClobbersRegisters() &&
+ instr->ClobbersDoubleRegisters(isolate()))) {
+ int fixed = 0;
+ int used_at_start = 0;
+ for (UseIterator it(instr); !it.Done(); it.Advance()) {
+ LUnallocated* operand = LUnallocated::cast(it.Current());
+ if (operand->IsUsedAtStart()) ++used_at_start;
+ }
+ if (instr->Output() != NULL) {
+ if (LUnallocated::cast(instr->Output())->HasFixedPolicy()) ++fixed;
+ }
+ for (TempIterator it(instr); !it.Done(); it.Advance()) {
+ LUnallocated* operand = LUnallocated::cast(it.Current());
+ if (operand->HasFixedPolicy()) ++fixed;
+ }
+ ASSERT(fixed == 0 || used_at_start == 0);
+ }
+#endif
+
+ if (FLAG_stress_pointer_maps && !instr->HasPointerMap()) {
+ instr = AssignPointerMap(instr);
+ }
+ if (FLAG_stress_environments && !instr->HasEnvironment()) {
+ instr = AssignEnvironment(instr);
+ }
+ if (instr->IsGoto() && LGoto::cast(instr)->jumps_to_join()) {
+ // TODO(olivf) Since phis of spilled values are joined as registers
+ // (not in the stack slot), we need to allow the goto gaps to keep one
+ // x87 register alive. To ensure all other values are still spilled, we
+ // insert a fpu register barrier right before.
+ LClobberDoubles* clobber = new(zone()) LClobberDoubles(isolate());
+ clobber->set_hydrogen_value(hydrogen_val);
+ chunk_->AddInstruction(clobber, current_block_);
+ }
+ chunk_->AddInstruction(instr, current_block_);
+
+ if (instr->IsCall()) {
+ HValue* hydrogen_value_for_lazy_bailout = hydrogen_val;
+ LInstruction* instruction_needing_environment = NULL;
+ if (hydrogen_val->HasObservableSideEffects()) {
+ HSimulate* sim = HSimulate::cast(hydrogen_val->next());
+ instruction_needing_environment = instr;
+ sim->ReplayEnvironment(current_block_->last_environment());
+ hydrogen_value_for_lazy_bailout = sim;
+ }
+ LInstruction* bailout = AssignEnvironment(new(zone()) LLazyBailout());
+ bailout->set_hydrogen_value(hydrogen_value_for_lazy_bailout);
+ chunk_->AddInstruction(bailout, current_block_);
+ if (instruction_needing_environment != NULL) {
+ // Store the lazy deopt environment with the instruction if needed.
+ // Right now it is only used for LInstanceOfKnownGlobal.
+ instruction_needing_environment->
+ SetDeferredLazyDeoptimizationEnvironment(bailout->environment());
+ }
+ }
+}
+
+
+LInstruction* LChunkBuilder::DoGoto(HGoto* instr) {
+ return new(zone()) LGoto(instr->FirstSuccessor());
+}
+
+
+LInstruction* LChunkBuilder::DoBranch(HBranch* instr) {
+ HValue* value = instr->value();
+ Representation r = value->representation();
+ HType type = value->type();
+ ToBooleanStub::Types expected = instr->expected_input_types();
+ if (expected.IsEmpty()) expected = ToBooleanStub::Types::Generic();
+
+ bool easy_case = !r.IsTagged() || type.IsBoolean() || type.IsSmi() ||
+ type.IsJSArray() || type.IsHeapNumber() || type.IsString();
+ LOperand* temp = !easy_case && expected.NeedsMap() ? TempRegister() : NULL;
+ LInstruction* branch = new(zone()) LBranch(UseRegister(value), temp);
+ if (!easy_case &&
+ ((!expected.Contains(ToBooleanStub::SMI) && expected.NeedsMap()) ||
+ !expected.IsGeneric())) {
+ branch = AssignEnvironment(branch);
+ }
+ return branch;
+}
+
+
+LInstruction* LChunkBuilder::DoDebugBreak(HDebugBreak* instr) {
+ return new(zone()) LDebugBreak();
+}
+
+
+LInstruction* LChunkBuilder::DoCompareMap(HCompareMap* instr) {
+ ASSERT(instr->value()->representation().IsTagged());
+ LOperand* value = UseRegisterAtStart(instr->value());
+ return new(zone()) LCmpMapAndBranch(value);
+}
+
+
+LInstruction* LChunkBuilder::DoArgumentsLength(HArgumentsLength* length) {
+ info()->MarkAsRequiresFrame();
+ return DefineAsRegister(new(zone()) LArgumentsLength(Use(length->value())));
+}
+
+
+LInstruction* LChunkBuilder::DoArgumentsElements(HArgumentsElements* elems) {
+ info()->MarkAsRequiresFrame();
+ return DefineAsRegister(new(zone()) LArgumentsElements);
+}
+
+
+LInstruction* LChunkBuilder::DoInstanceOf(HInstanceOf* instr) {
+ LOperand* left = UseFixed(instr->left(), InstanceofStub::left());
+ LOperand* right = UseFixed(instr->right(), InstanceofStub::right());
+ LOperand* context = UseFixed(instr->context(), esi);
+ LInstanceOf* result = new(zone()) LInstanceOf(context, left, right);
+ return MarkAsCall(DefineFixed(result, eax), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoInstanceOfKnownGlobal(
+ HInstanceOfKnownGlobal* instr) {
+ LInstanceOfKnownGlobal* result =
+ new(zone()) LInstanceOfKnownGlobal(
+ UseFixed(instr->context(), esi),
+ UseFixed(instr->left(), InstanceofStub::left()),
+ FixedTemp(edi));
+ return MarkAsCall(DefineFixed(result, eax), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoWrapReceiver(HWrapReceiver* instr) {
+ LOperand* receiver = UseRegister(instr->receiver());
+ LOperand* function = UseRegister(instr->function());
+ LOperand* temp = TempRegister();
+ LWrapReceiver* result =
+ new(zone()) LWrapReceiver(receiver, function, temp);
+ return AssignEnvironment(DefineSameAsFirst(result));
+}
+
+
+LInstruction* LChunkBuilder::DoApplyArguments(HApplyArguments* instr) {
+ LOperand* function = UseFixed(instr->function(), edi);
+ LOperand* receiver = UseFixed(instr->receiver(), eax);
+ LOperand* length = UseFixed(instr->length(), ebx);
+ LOperand* elements = UseFixed(instr->elements(), ecx);
+ LApplyArguments* result = new(zone()) LApplyArguments(function,
+ receiver,
+ length,
+ elements);
+ return MarkAsCall(DefineFixed(result, eax), instr, CAN_DEOPTIMIZE_EAGERLY);
+}
+
+
+LInstruction* LChunkBuilder::DoPushArguments(HPushArguments* instr) {
+ int argc = instr->OperandCount();
+ for (int i = 0; i < argc; ++i) {
+ LOperand* argument = UseAny(instr->argument(i));
+ AddInstruction(new(zone()) LPushArgument(argument), instr);
+ }
+ return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoStoreCodeEntry(
+ HStoreCodeEntry* store_code_entry) {
+ LOperand* function = UseRegister(store_code_entry->function());
+ LOperand* code_object = UseTempRegister(store_code_entry->code_object());
+ return new(zone()) LStoreCodeEntry(function, code_object);
+}
+
+
+LInstruction* LChunkBuilder::DoInnerAllocatedObject(
+ HInnerAllocatedObject* instr) {
+ LOperand* base_object = UseRegisterAtStart(instr->base_object());
+ LOperand* offset = UseRegisterOrConstantAtStart(instr->offset());
+ return DefineAsRegister(
+ new(zone()) LInnerAllocatedObject(base_object, offset));
+}
+
+
+LInstruction* LChunkBuilder::DoThisFunction(HThisFunction* instr) {
+ return instr->HasNoUses()
+ ? NULL
+ : DefineAsRegister(new(zone()) LThisFunction);
+}
+
+
+LInstruction* LChunkBuilder::DoContext(HContext* instr) {
+ if (instr->HasNoUses()) return NULL;
+
+ if (info()->IsStub()) {
+ return DefineFixed(new(zone()) LContext, esi);
+ }
+
+ return DefineAsRegister(new(zone()) LContext);
+}
+
+
+LInstruction* LChunkBuilder::DoDeclareGlobals(HDeclareGlobals* instr) {
+ LOperand* context = UseFixed(instr->context(), esi);
+ return MarkAsCall(new(zone()) LDeclareGlobals(context), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoCallJSFunction(
+ HCallJSFunction* instr) {
+ LOperand* function = UseFixed(instr->function(), edi);
+
+ LCallJSFunction* result = new(zone()) LCallJSFunction(function);
+
+ return MarkAsCall(DefineFixed(result, eax), instr, CANNOT_DEOPTIMIZE_EAGERLY);
+}
+
+
+LInstruction* LChunkBuilder::DoCallWithDescriptor(
+ HCallWithDescriptor* instr) {
+ const CallInterfaceDescriptor* descriptor = instr->descriptor();
+
+ LOperand* target = UseRegisterOrConstantAtStart(instr->target());
+ ZoneList<LOperand*> ops(instr->OperandCount(), zone());
+ ops.Add(target, zone());
+ for (int i = 1; i < instr->OperandCount(); i++) {
+ LOperand* op = UseFixed(instr->OperandAt(i),
+ descriptor->GetParameterRegister(i - 1));
+ ops.Add(op, zone());
+ }
+
+ LCallWithDescriptor* result = new(zone()) LCallWithDescriptor(
+ descriptor, ops, zone());
+ return MarkAsCall(DefineFixed(result, eax), instr, CANNOT_DEOPTIMIZE_EAGERLY);
+}
+
+
+LInstruction* LChunkBuilder::DoInvokeFunction(HInvokeFunction* instr) {
+ LOperand* context = UseFixed(instr->context(), esi);
+ LOperand* function = UseFixed(instr->function(), edi);
+ LInvokeFunction* result = new(zone()) LInvokeFunction(context, function);
+ return MarkAsCall(DefineFixed(result, eax), instr, CANNOT_DEOPTIMIZE_EAGERLY);
+}
+
+
+LInstruction* LChunkBuilder::DoUnaryMathOperation(HUnaryMathOperation* instr) {
+ switch (instr->op()) {
+ case kMathFloor: return DoMathFloor(instr);
+ case kMathRound: return DoMathRound(instr);
+ case kMathAbs: return DoMathAbs(instr);
+ case kMathLog: return DoMathLog(instr);
+ case kMathExp: return DoMathExp(instr);
+ case kMathSqrt: return DoMathSqrt(instr);
+ case kMathPowHalf: return DoMathPowHalf(instr);
+ case kMathClz32: return DoMathClz32(instr);
+ default:
+ UNREACHABLE();
+ return NULL;
+ }
+}
+
+
+LInstruction* LChunkBuilder::DoMathFloor(HUnaryMathOperation* instr) {
+ LOperand* input = UseRegisterAtStart(instr->value());
+ LMathFloor* result = new(zone()) LMathFloor(input);
+ return AssignEnvironment(DefineAsRegister(result));
+}
+
+
+LInstruction* LChunkBuilder::DoMathRound(HUnaryMathOperation* instr) {
+ // Crankshaft is turned off for nosse2.
+ UNREACHABLE();
+ return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoMathAbs(HUnaryMathOperation* instr) {
+ LOperand* context = UseAny(instr->context()); // Deferred use.
+ LOperand* input = UseRegisterAtStart(instr->value());
+ LInstruction* result =
+ DefineSameAsFirst(new(zone()) LMathAbs(context, input));
+ Representation r = instr->value()->representation();
+ if (!r.IsDouble() && !r.IsSmiOrInteger32()) result = AssignPointerMap(result);
+ if (!r.IsDouble()) result = AssignEnvironment(result);
+ return result;
+}
+
+
+LInstruction* LChunkBuilder::DoMathLog(HUnaryMathOperation* instr) {
+ ASSERT(instr->representation().IsDouble());
+ ASSERT(instr->value()->representation().IsDouble());
+ LOperand* input = UseRegisterAtStart(instr->value());
+ return MarkAsCall(DefineSameAsFirst(new(zone()) LMathLog(input)), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoMathClz32(HUnaryMathOperation* instr) {
+ LOperand* input = UseRegisterAtStart(instr->value());
+ LMathClz32* result = new(zone()) LMathClz32(input);
+ return DefineAsRegister(result);
+}
+
+
+LInstruction* LChunkBuilder::DoMathExp(HUnaryMathOperation* instr) {
+ ASSERT(instr->representation().IsDouble());
+ ASSERT(instr->value()->representation().IsDouble());
+ LOperand* value = UseTempRegister(instr->value());
+ LOperand* temp1 = TempRegister();
+ LOperand* temp2 = TempRegister();
+ LMathExp* result = new(zone()) LMathExp(value, temp1, temp2);
+ return DefineAsRegister(result);
+}
+
+
+LInstruction* LChunkBuilder::DoMathSqrt(HUnaryMathOperation* instr) {
+ LOperand* input = UseRegisterAtStart(instr->value());
+ LMathSqrt* result = new(zone()) LMathSqrt(input);
+ return DefineSameAsFirst(result);
+}
+
+
+LInstruction* LChunkBuilder::DoMathPowHalf(HUnaryMathOperation* instr) {
+ LOperand* input = UseRegisterAtStart(instr->value());
+ LOperand* temp = TempRegister();
+ LMathPowHalf* result = new(zone()) LMathPowHalf(input, temp);
+ return DefineSameAsFirst(result);
+}
+
+
+LInstruction* LChunkBuilder::DoCallNew(HCallNew* instr) {
+ LOperand* context = UseFixed(instr->context(), esi);
+ LOperand* constructor = UseFixed(instr->constructor(), edi);
+ LCallNew* result = new(zone()) LCallNew(context, constructor);
+ return MarkAsCall(DefineFixed(result, eax), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoCallNewArray(HCallNewArray* instr) {
+ LOperand* context = UseFixed(instr->context(), esi);
+ LOperand* constructor = UseFixed(instr->constructor(), edi);
+ LCallNewArray* result = new(zone()) LCallNewArray(context, constructor);
+ return MarkAsCall(DefineFixed(result, eax), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoCallFunction(HCallFunction* instr) {
+ LOperand* context = UseFixed(instr->context(), esi);
+ LOperand* function = UseFixed(instr->function(), edi);
+ LCallFunction* call = new(zone()) LCallFunction(context, function);
+ return MarkAsCall(DefineFixed(call, eax), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoCallRuntime(HCallRuntime* instr) {
+ LOperand* context = UseFixed(instr->context(), esi);
+ return MarkAsCall(DefineFixed(new(zone()) LCallRuntime(context), eax), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoRor(HRor* instr) {
+ return DoShift(Token::ROR, instr);
+}
+
+
+LInstruction* LChunkBuilder::DoShr(HShr* instr) {
+ return DoShift(Token::SHR, instr);
+}
+
+
+LInstruction* LChunkBuilder::DoSar(HSar* instr) {
+ return DoShift(Token::SAR, instr);
+}
+
+
+LInstruction* LChunkBuilder::DoShl(HShl* instr) {
+ return DoShift(Token::SHL, instr);
+}
+
+
+LInstruction* LChunkBuilder::DoBitwise(HBitwise* instr) {
+ if (instr->representation().IsSmiOrInteger32()) {
+ ASSERT(instr->left()->representation().Equals(instr->representation()));
+ ASSERT(instr->right()->representation().Equals(instr->representation()));
+ ASSERT(instr->CheckFlag(HValue::kTruncatingToInt32));
+
+ LOperand* left = UseRegisterAtStart(instr->BetterLeftOperand());
+ LOperand* right = UseOrConstantAtStart(instr->BetterRightOperand());
+ return DefineSameAsFirst(new(zone()) LBitI(left, right));
+ } else {
+ return DoArithmeticT(instr->op(), instr);
+ }
+}
+
+
+LInstruction* LChunkBuilder::DoDivByPowerOf2I(HDiv* instr) {
+ ASSERT(instr->representation().IsSmiOrInteger32());
+ ASSERT(instr->left()->representation().Equals(instr->representation()));
+ ASSERT(instr->right()->representation().Equals(instr->representation()));
+ LOperand* dividend = UseRegister(instr->left());
+ int32_t divisor = instr->right()->GetInteger32Constant();
+ LInstruction* result = DefineAsRegister(new(zone()) LDivByPowerOf2I(
+ dividend, divisor));
+ if ((instr->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) ||
+ (instr->CheckFlag(HValue::kCanOverflow) && divisor == -1) ||
+ (!instr->CheckFlag(HInstruction::kAllUsesTruncatingToInt32) &&
+ divisor != 1 && divisor != -1)) {
+ result = AssignEnvironment(result);
+ }
+ return result;
+}
+
+
+LInstruction* LChunkBuilder::DoDivByConstI(HDiv* instr) {
+ ASSERT(instr->representation().IsInteger32());
+ ASSERT(instr->left()->representation().Equals(instr->representation()));
+ ASSERT(instr->right()->representation().Equals(instr->representation()));
+ LOperand* dividend = UseRegister(instr->left());
+ int32_t divisor = instr->right()->GetInteger32Constant();
+ LOperand* temp1 = FixedTemp(eax);
+ LOperand* temp2 = FixedTemp(edx);
+ LInstruction* result = DefineFixed(new(zone()) LDivByConstI(
+ dividend, divisor, temp1, temp2), edx);
+ if (divisor == 0 ||
+ (instr->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) ||
+ !instr->CheckFlag(HInstruction::kAllUsesTruncatingToInt32)) {
+ result = AssignEnvironment(result);
+ }
+ return result;
+}
+
+
+LInstruction* LChunkBuilder::DoDivI(HDiv* instr) {
+ ASSERT(instr->representation().IsSmiOrInteger32());
+ ASSERT(instr->left()->representation().Equals(instr->representation()));
+ ASSERT(instr->right()->representation().Equals(instr->representation()));
+ LOperand* dividend = UseFixed(instr->left(), eax);
+ LOperand* divisor = UseRegister(instr->right());
+ LOperand* temp = FixedTemp(edx);
+ LInstruction* result = DefineFixed(new(zone()) LDivI(
+ dividend, divisor, temp), eax);
+ if (instr->CheckFlag(HValue::kCanBeDivByZero) ||
+ instr->CheckFlag(HValue::kBailoutOnMinusZero) ||
+ instr->CheckFlag(HValue::kCanOverflow) ||
+ !instr->CheckFlag(HValue::kAllUsesTruncatingToInt32)) {
+ result = AssignEnvironment(result);
+ }
+ return result;
+}
+
+
+LInstruction* LChunkBuilder::DoDiv(HDiv* instr) {
+ if (instr->representation().IsSmiOrInteger32()) {
+ if (instr->RightIsPowerOf2()) {
+ return DoDivByPowerOf2I(instr);
+ } else if (instr->right()->IsConstant()) {
+ return DoDivByConstI(instr);
+ } else {
+ return DoDivI(instr);
+ }
+ } else if (instr->representation().IsDouble()) {
+ return DoArithmeticD(Token::DIV, instr);
+ } else {
+ return DoArithmeticT(Token::DIV, instr);
+ }
+}
+
+
+LInstruction* LChunkBuilder::DoFlooringDivByPowerOf2I(HMathFloorOfDiv* instr) {
+ LOperand* dividend = UseRegisterAtStart(instr->left());
+ int32_t divisor = instr->right()->GetInteger32Constant();
+ LInstruction* result = DefineSameAsFirst(new(zone()) LFlooringDivByPowerOf2I(
+ dividend, divisor));
+ if ((instr->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) ||
+ (instr->CheckFlag(HValue::kLeftCanBeMinInt) && divisor == -1)) {
+ result = AssignEnvironment(result);
+ }
+ return result;
+}
+
+
+LInstruction* LChunkBuilder::DoFlooringDivByConstI(HMathFloorOfDiv* instr) {
+ ASSERT(instr->representation().IsInteger32());
+ ASSERT(instr->left()->representation().Equals(instr->representation()));
+ ASSERT(instr->right()->representation().Equals(instr->representation()));
+ LOperand* dividend = UseRegister(instr->left());
+ int32_t divisor = instr->right()->GetInteger32Constant();
+ LOperand* temp1 = FixedTemp(eax);
+ LOperand* temp2 = FixedTemp(edx);
+ LOperand* temp3 =
+ ((divisor > 0 && !instr->CheckFlag(HValue::kLeftCanBeNegative)) ||
+ (divisor < 0 && !instr->CheckFlag(HValue::kLeftCanBePositive))) ?
+ NULL : TempRegister();
+ LInstruction* result =
+ DefineFixed(new(zone()) LFlooringDivByConstI(dividend,
+ divisor,
+ temp1,
+ temp2,
+ temp3),
+ edx);
+ if (divisor == 0 ||
+ (instr->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0)) {
+ result = AssignEnvironment(result);
+ }
+ return result;
+}
+
+
+LInstruction* LChunkBuilder::DoFlooringDivI(HMathFloorOfDiv* instr) {
+ ASSERT(instr->representation().IsSmiOrInteger32());
+ ASSERT(instr->left()->representation().Equals(instr->representation()));
+ ASSERT(instr->right()->representation().Equals(instr->representation()));
+ LOperand* dividend = UseFixed(instr->left(), eax);
+ LOperand* divisor = UseRegister(instr->right());
+ LOperand* temp = FixedTemp(edx);
+ LInstruction* result = DefineFixed(new(zone()) LFlooringDivI(
+ dividend, divisor, temp), eax);
+ if (instr->CheckFlag(HValue::kCanBeDivByZero) ||
+ instr->CheckFlag(HValue::kBailoutOnMinusZero) ||
+ instr->CheckFlag(HValue::kCanOverflow)) {
+ result = AssignEnvironment(result);
+ }
+ return result;
+}
+
+
+LInstruction* LChunkBuilder::DoMathFloorOfDiv(HMathFloorOfDiv* instr) {
+ if (instr->RightIsPowerOf2()) {
+ return DoFlooringDivByPowerOf2I(instr);
+ } else if (instr->right()->IsConstant()) {
+ return DoFlooringDivByConstI(instr);
+ } else {
+ return DoFlooringDivI(instr);
+ }
+}
+
+
+LInstruction* LChunkBuilder::DoModByPowerOf2I(HMod* instr) {
+ ASSERT(instr->representation().IsSmiOrInteger32());
+ ASSERT(instr->left()->representation().Equals(instr->representation()));
+ ASSERT(instr->right()->representation().Equals(instr->representation()));
+ LOperand* dividend = UseRegisterAtStart(instr->left());
+ int32_t divisor = instr->right()->GetInteger32Constant();
+ LInstruction* result = DefineSameAsFirst(new(zone()) LModByPowerOf2I(
+ dividend, divisor));
+ if (instr->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ result = AssignEnvironment(result);
+ }
+ return result;
+}
+
+
+LInstruction* LChunkBuilder::DoModByConstI(HMod* instr) {
+ ASSERT(instr->representation().IsSmiOrInteger32());
+ ASSERT(instr->left()->representation().Equals(instr->representation()));
+ ASSERT(instr->right()->representation().Equals(instr->representation()));
+ LOperand* dividend = UseRegister(instr->left());
+ int32_t divisor = instr->right()->GetInteger32Constant();
+ LOperand* temp1 = FixedTemp(eax);
+ LOperand* temp2 = FixedTemp(edx);
+ LInstruction* result = DefineFixed(new(zone()) LModByConstI(
+ dividend, divisor, temp1, temp2), eax);
+ if (divisor == 0 || instr->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ result = AssignEnvironment(result);
+ }
+ return result;
+}
+
+
+LInstruction* LChunkBuilder::DoModI(HMod* instr) {
+ ASSERT(instr->representation().IsSmiOrInteger32());
+ ASSERT(instr->left()->representation().Equals(instr->representation()));
+ ASSERT(instr->right()->representation().Equals(instr->representation()));
+ LOperand* dividend = UseFixed(instr->left(), eax);
+ LOperand* divisor = UseRegister(instr->right());
+ LOperand* temp = FixedTemp(edx);
+ LInstruction* result = DefineFixed(new(zone()) LModI(
+ dividend, divisor, temp), edx);
+ if (instr->CheckFlag(HValue::kCanBeDivByZero) ||
+ instr->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ result = AssignEnvironment(result);
+ }
+ return result;
+}
+
+
+LInstruction* LChunkBuilder::DoMod(HMod* instr) {
+ if (instr->representation().IsSmiOrInteger32()) {
+ if (instr->RightIsPowerOf2()) {
+ return DoModByPowerOf2I(instr);
+ } else if (instr->right()->IsConstant()) {
+ return DoModByConstI(instr);
+ } else {
+ return DoModI(instr);
+ }
+ } else if (instr->representation().IsDouble()) {
+ return DoArithmeticD(Token::MOD, instr);
+ } else {
+ return DoArithmeticT(Token::MOD, instr);
+ }
+}
+
+
+LInstruction* LChunkBuilder::DoMul(HMul* instr) {
+ if (instr->representation().IsSmiOrInteger32()) {
+ ASSERT(instr->left()->representation().Equals(instr->representation()));
+ ASSERT(instr->right()->representation().Equals(instr->representation()));
+ LOperand* left = UseRegisterAtStart(instr->BetterLeftOperand());
+ LOperand* right = UseOrConstant(instr->BetterRightOperand());
+ LOperand* temp = NULL;
+ if (instr->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ temp = TempRegister();
+ }
+ LMulI* mul = new(zone()) LMulI(left, right, temp);
+ if (instr->CheckFlag(HValue::kCanOverflow) ||
+ instr->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ AssignEnvironment(mul);
+ }
+ return DefineSameAsFirst(mul);
+ } else if (instr->representation().IsDouble()) {
+ return DoArithmeticD(Token::MUL, instr);
+ } else {
+ return DoArithmeticT(Token::MUL, instr);
+ }
+}
+
+
+LInstruction* LChunkBuilder::DoSub(HSub* instr) {
+ if (instr->representation().IsSmiOrInteger32()) {
+ ASSERT(instr->left()->representation().Equals(instr->representation()));
+ ASSERT(instr->right()->representation().Equals(instr->representation()));
+ LOperand* left = UseRegisterAtStart(instr->left());
+ LOperand* right = UseOrConstantAtStart(instr->right());
+ LSubI* sub = new(zone()) LSubI(left, right);
+ LInstruction* result = DefineSameAsFirst(sub);
+ if (instr->CheckFlag(HValue::kCanOverflow)) {
+ result = AssignEnvironment(result);
+ }
+ return result;
+ } else if (instr->representation().IsDouble()) {
+ return DoArithmeticD(Token::SUB, instr);
+ } else {
+ return DoArithmeticT(Token::SUB, instr);
+ }
+}
+
+
+LInstruction* LChunkBuilder::DoAdd(HAdd* instr) {
+ if (instr->representation().IsSmiOrInteger32()) {
+ ASSERT(instr->left()->representation().Equals(instr->representation()));
+ ASSERT(instr->right()->representation().Equals(instr->representation()));
+ // Check to see if it would be advantageous to use an lea instruction rather
+ // than an add. This is the case when no overflow check is needed and there
+ // are multiple uses of the add's inputs, so using a 3-register add will
+ // preserve all input values for later uses.
+ bool use_lea = LAddI::UseLea(instr);
+ LOperand* left = UseRegisterAtStart(instr->BetterLeftOperand());
+ HValue* right_candidate = instr->BetterRightOperand();
+ LOperand* right = use_lea
+ ? UseRegisterOrConstantAtStart(right_candidate)
+ : UseOrConstantAtStart(right_candidate);
+ LAddI* add = new(zone()) LAddI(left, right);
+ bool can_overflow = instr->CheckFlag(HValue::kCanOverflow);
+ LInstruction* result = use_lea
+ ? DefineAsRegister(add)
+ : DefineSameAsFirst(add);
+ if (can_overflow) {
+ result = AssignEnvironment(result);
+ }
+ return result;
+ } else if (instr->representation().IsDouble()) {
+ return DoArithmeticD(Token::ADD, instr);
+ } else if (instr->representation().IsExternal()) {
+ ASSERT(instr->left()->representation().IsExternal());
+ ASSERT(instr->right()->representation().IsInteger32());
+ ASSERT(!instr->CheckFlag(HValue::kCanOverflow));
+ bool use_lea = LAddI::UseLea(instr);
+ LOperand* left = UseRegisterAtStart(instr->left());
+ HValue* right_candidate = instr->right();
+ LOperand* right = use_lea
+ ? UseRegisterOrConstantAtStart(right_candidate)
+ : UseOrConstantAtStart(right_candidate);
+ LAddI* add = new(zone()) LAddI(left, right);
+ LInstruction* result = use_lea
+ ? DefineAsRegister(add)
+ : DefineSameAsFirst(add);
+ return result;
+ } else {
+ return DoArithmeticT(Token::ADD, instr);
+ }
+}
+
+
+LInstruction* LChunkBuilder::DoMathMinMax(HMathMinMax* instr) {
+ LOperand* left = NULL;
+ LOperand* right = NULL;
+ if (instr->representation().IsSmiOrInteger32()) {
+ ASSERT(instr->left()->representation().Equals(instr->representation()));
+ ASSERT(instr->right()->representation().Equals(instr->representation()));
+ left = UseRegisterAtStart(instr->BetterLeftOperand());
+ right = UseOrConstantAtStart(instr->BetterRightOperand());
+ } else {
+ ASSERT(instr->representation().IsDouble());
+ ASSERT(instr->left()->representation().IsDouble());
+ ASSERT(instr->right()->representation().IsDouble());
+ left = UseRegisterAtStart(instr->left());
+ right = UseRegisterAtStart(instr->right());
+ }
+ LMathMinMax* minmax = new(zone()) LMathMinMax(left, right);
+ return DefineSameAsFirst(minmax);
+}
+
+
+LInstruction* LChunkBuilder::DoPower(HPower* instr) {
+ // Crankshaft is turned off for nosse2.
+ UNREACHABLE();
+ return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoCompareGeneric(HCompareGeneric* instr) {
+ ASSERT(instr->left()->representation().IsSmiOrTagged());
+ ASSERT(instr->right()->representation().IsSmiOrTagged());
+ LOperand* context = UseFixed(instr->context(), esi);
+ LOperand* left = UseFixed(instr->left(), edx);
+ LOperand* right = UseFixed(instr->right(), eax);
+ LCmpT* result = new(zone()) LCmpT(context, left, right);
+ return MarkAsCall(DefineFixed(result, eax), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoCompareNumericAndBranch(
+ HCompareNumericAndBranch* instr) {
+ Representation r = instr->representation();
+ if (r.IsSmiOrInteger32()) {
+ ASSERT(instr->left()->representation().Equals(r));
+ ASSERT(instr->right()->representation().Equals(r));
+ LOperand* left = UseRegisterOrConstantAtStart(instr->left());
+ LOperand* right = UseOrConstantAtStart(instr->right());
+ return new(zone()) LCompareNumericAndBranch(left, right);
+ } else {
+ ASSERT(r.IsDouble());
+ ASSERT(instr->left()->representation().IsDouble());
+ ASSERT(instr->right()->representation().IsDouble());
+ LOperand* left;
+ LOperand* right;
+ if (CanBeImmediateConstant(instr->left()) &&
+ CanBeImmediateConstant(instr->right())) {
+ // The code generator requires either both inputs to be constant
+ // operands, or neither.
+ left = UseConstant(instr->left());
+ right = UseConstant(instr->right());
+ } else {
+ left = UseRegisterAtStart(instr->left());
+ right = UseRegisterAtStart(instr->right());
+ }
+ return new(zone()) LCompareNumericAndBranch(left, right);
+ }
+}
+
+
+LInstruction* LChunkBuilder::DoCompareObjectEqAndBranch(
+ HCompareObjectEqAndBranch* instr) {
+ LOperand* left = UseRegisterAtStart(instr->left());
+ LOperand* right = UseOrConstantAtStart(instr->right());
+ return new(zone()) LCmpObjectEqAndBranch(left, right);
+}
+
+
+LInstruction* LChunkBuilder::DoCompareHoleAndBranch(
+ HCompareHoleAndBranch* instr) {
+ LOperand* value = UseRegisterAtStart(instr->value());
+ return new(zone()) LCmpHoleAndBranch(value);
+}
+
+
+LInstruction* LChunkBuilder::DoCompareMinusZeroAndBranch(
+ HCompareMinusZeroAndBranch* instr) {
+ LOperand* value = UseRegister(instr->value());
+ LOperand* scratch = TempRegister();
+ return new(zone()) LCompareMinusZeroAndBranch(value, scratch);
+}
+
+
+LInstruction* LChunkBuilder::DoIsObjectAndBranch(HIsObjectAndBranch* instr) {
+ ASSERT(instr->value()->representation().IsSmiOrTagged());
+ LOperand* temp = TempRegister();
+ return new(zone()) LIsObjectAndBranch(UseRegister(instr->value()), temp);
+}
+
+
+LInstruction* LChunkBuilder::DoIsStringAndBranch(HIsStringAndBranch* instr) {
+ ASSERT(instr->value()->representation().IsTagged());
+ LOperand* temp = TempRegister();
+ return new(zone()) LIsStringAndBranch(UseRegister(instr->value()), temp);
+}
+
+
+LInstruction* LChunkBuilder::DoIsSmiAndBranch(HIsSmiAndBranch* instr) {
+ ASSERT(instr->value()->representation().IsTagged());
+ return new(zone()) LIsSmiAndBranch(Use(instr->value()));
+}
+
+
+LInstruction* LChunkBuilder::DoIsUndetectableAndBranch(
+ HIsUndetectableAndBranch* instr) {
+ ASSERT(instr->value()->representation().IsTagged());
+ return new(zone()) LIsUndetectableAndBranch(
+ UseRegisterAtStart(instr->value()), TempRegister());
+}
+
+
+LInstruction* LChunkBuilder::DoStringCompareAndBranch(
+ HStringCompareAndBranch* instr) {
+ ASSERT(instr->left()->representation().IsTagged());
+ ASSERT(instr->right()->representation().IsTagged());
+ LOperand* context = UseFixed(instr->context(), esi);
+ LOperand* left = UseFixed(instr->left(), edx);
+ LOperand* right = UseFixed(instr->right(), eax);
+
+ LStringCompareAndBranch* result = new(zone())
+ LStringCompareAndBranch(context, left, right);
+
+ return MarkAsCall(result, instr);
+}
+
+
+LInstruction* LChunkBuilder::DoHasInstanceTypeAndBranch(
+ HHasInstanceTypeAndBranch* instr) {
+ ASSERT(instr->value()->representation().IsTagged());
+ return new(zone()) LHasInstanceTypeAndBranch(
+ UseRegisterAtStart(instr->value()),
+ TempRegister());
+}
+
+
+LInstruction* LChunkBuilder::DoGetCachedArrayIndex(
+ HGetCachedArrayIndex* instr) {
+ ASSERT(instr->value()->representation().IsTagged());
+ LOperand* value = UseRegisterAtStart(instr->value());
+
+ return DefineAsRegister(new(zone()) LGetCachedArrayIndex(value));
+}
+
+
+LInstruction* LChunkBuilder::DoHasCachedArrayIndexAndBranch(
+ HHasCachedArrayIndexAndBranch* instr) {
+ ASSERT(instr->value()->representation().IsTagged());
+ return new(zone()) LHasCachedArrayIndexAndBranch(
+ UseRegisterAtStart(instr->value()));
+}
+
+
+LInstruction* LChunkBuilder::DoClassOfTestAndBranch(
+ HClassOfTestAndBranch* instr) {
+ ASSERT(instr->value()->representation().IsTagged());
+ return new(zone()) LClassOfTestAndBranch(UseRegister(instr->value()),
+ TempRegister(),
+ TempRegister());
+}
+
+
+LInstruction* LChunkBuilder::DoMapEnumLength(HMapEnumLength* instr) {
+ LOperand* map = UseRegisterAtStart(instr->value());
+ return DefineAsRegister(new(zone()) LMapEnumLength(map));
+}
+
+
+LInstruction* LChunkBuilder::DoDateField(HDateField* instr) {
+ LOperand* date = UseFixed(instr->value(), eax);
+ LDateField* result =
+ new(zone()) LDateField(date, FixedTemp(ecx), instr->index());
+ return MarkAsCall(DefineFixed(result, eax), instr, CAN_DEOPTIMIZE_EAGERLY);
+}
+
+
+LInstruction* LChunkBuilder::DoSeqStringGetChar(HSeqStringGetChar* instr) {
+ LOperand* string = UseRegisterAtStart(instr->string());
+ LOperand* index = UseRegisterOrConstantAtStart(instr->index());
+ return DefineAsRegister(new(zone()) LSeqStringGetChar(string, index));
+}
+
+
+LOperand* LChunkBuilder::GetSeqStringSetCharOperand(HSeqStringSetChar* instr) {
+ if (instr->encoding() == String::ONE_BYTE_ENCODING) {
+ if (FLAG_debug_code) {
+ return UseFixed(instr->value(), eax);
+ } else {
+ return UseFixedOrConstant(instr->value(), eax);
+ }
+ } else {
+ if (FLAG_debug_code) {
+ return UseRegisterAtStart(instr->value());
+ } else {
+ return UseRegisterOrConstantAtStart(instr->value());
+ }
+ }
+}
+
+
+LInstruction* LChunkBuilder::DoSeqStringSetChar(HSeqStringSetChar* instr) {
+ LOperand* string = UseRegisterAtStart(instr->string());
+ LOperand* index = FLAG_debug_code
+ ? UseRegisterAtStart(instr->index())
+ : UseRegisterOrConstantAtStart(instr->index());
+ LOperand* value = GetSeqStringSetCharOperand(instr);
+ LOperand* context = FLAG_debug_code ? UseFixed(instr->context(), esi) : NULL;
+ LInstruction* result = new(zone()) LSeqStringSetChar(context, string,
+ index, value);
+ if (FLAG_debug_code) {
+ result = MarkAsCall(result, instr);
+ }
+ return result;
+}
+
+
+LInstruction* LChunkBuilder::DoBoundsCheck(HBoundsCheck* instr) {
+ if (!FLAG_debug_code && instr->skip_check()) return NULL;
+ LOperand* index = UseRegisterOrConstantAtStart(instr->index());
+ LOperand* length = !index->IsConstantOperand()
+ ? UseOrConstantAtStart(instr->length())
+ : UseAtStart(instr->length());
+ LInstruction* result = new(zone()) LBoundsCheck(index, length);
+ if (!FLAG_debug_code || !instr->skip_check()) {
+ result = AssignEnvironment(result);
+ }
+ return result;
+}
+
+
+LInstruction* LChunkBuilder::DoBoundsCheckBaseIndexInformation(
+ HBoundsCheckBaseIndexInformation* instr) {
+ UNREACHABLE();
+ return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoAbnormalExit(HAbnormalExit* instr) {
+ // The control instruction marking the end of a block that completed
+ // abruptly (e.g., threw an exception). There is nothing specific to do.
+ return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoUseConst(HUseConst* instr) {
+ return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoForceRepresentation(HForceRepresentation* bad) {
+ // All HForceRepresentation instructions should be eliminated in the
+ // representation change phase of Hydrogen.
+ UNREACHABLE();
+ return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoChange(HChange* instr) {
+ Representation from = instr->from();
+ Representation to = instr->to();
+ HValue* val = instr->value();
+ if (from.IsSmi()) {
+ if (to.IsTagged()) {
+ LOperand* value = UseRegister(val);
+ return DefineSameAsFirst(new(zone()) LDummyUse(value));
+ }
+ from = Representation::Tagged();
+ }
+ if (from.IsTagged()) {
+ if (to.IsDouble()) {
+ LOperand* value = UseRegister(val);
+ LOperand* temp = TempRegister();
+ LInstruction* result =
+ DefineAsRegister(new(zone()) LNumberUntagD(value, temp));
+ if (!val->representation().IsSmi()) result = AssignEnvironment(result);
+ return result;
+ } else if (to.IsSmi()) {
+ LOperand* value = UseRegister(val);
+ if (val->type().IsSmi()) {
+ return DefineSameAsFirst(new(zone()) LDummyUse(value));
+ }
+ return AssignEnvironment(DefineSameAsFirst(new(zone()) LCheckSmi(value)));
+ } else {
+ ASSERT(to.IsInteger32());
+ if (val->type().IsSmi() || val->representation().IsSmi()) {
+ LOperand* value = UseRegister(val);
+ return DefineSameAsFirst(new(zone()) LSmiUntag(value, false));
+ } else {
+ LOperand* value = UseRegister(val);
+ LInstruction* result = DefineSameAsFirst(new(zone()) LTaggedToI(value));
+ if (!val->representation().IsSmi()) result = AssignEnvironment(result);
+ return result;
+ }
+ }
+ } else if (from.IsDouble()) {
+ if (to.IsTagged()) {
+ info()->MarkAsDeferredCalling();
+ LOperand* value = UseRegisterAtStart(val);
+ LOperand* temp = FLAG_inline_new ? TempRegister() : NULL;
+ LUnallocated* result_temp = TempRegister();
+ LNumberTagD* result = new(zone()) LNumberTagD(value, temp);
+ return AssignPointerMap(Define(result, result_temp));
+ } else if (to.IsSmi()) {
+ LOperand* value = UseRegister(val);
+ return AssignEnvironment(
+ DefineAsRegister(new(zone()) LDoubleToSmi(value)));
+ } else {
+ ASSERT(to.IsInteger32());
+ bool truncating = instr->CanTruncateToInt32();
+ LOperand* value = UseRegister(val);
+ LInstruction* result = DefineAsRegister(new(zone()) LDoubleToI(value));
+ if (!truncating) result = AssignEnvironment(result);
+ return result;
+ }
+ } else if (from.IsInteger32()) {
+ info()->MarkAsDeferredCalling();
+ if (to.IsTagged()) {
+ if (!instr->CheckFlag(HValue::kCanOverflow)) {
+ LOperand* value = UseRegister(val);
+ return DefineSameAsFirst(new(zone()) LSmiTag(value));
+ } else if (val->CheckFlag(HInstruction::kUint32)) {
+ LOperand* value = UseRegister(val);
+ LOperand* temp = TempRegister();
+ LNumberTagU* result = new(zone()) LNumberTagU(value, temp);
+ return AssignPointerMap(DefineSameAsFirst(result));
+ } else {
+ LOperand* value = UseRegister(val);
+ LOperand* temp = TempRegister();
+ LNumberTagI* result = new(zone()) LNumberTagI(value, temp);
+ return AssignPointerMap(DefineSameAsFirst(result));
+ }
+ } else if (to.IsSmi()) {
+ LOperand* value = UseRegister(val);
+ LInstruction* result = DefineSameAsFirst(new(zone()) LSmiTag(value));
+ if (instr->CheckFlag(HValue::kCanOverflow)) {
+ result = AssignEnvironment(result);
+ }
+ return result;
+ } else {
+ ASSERT(to.IsDouble());
+ if (val->CheckFlag(HInstruction::kUint32)) {
+ return DefineAsRegister(new(zone()) LUint32ToDouble(UseRegister(val)));
+ } else {
+ return DefineAsRegister(new(zone()) LInteger32ToDouble(Use(val)));
+ }
+ }
+ }
+ UNREACHABLE();
+ return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoCheckHeapObject(HCheckHeapObject* instr) {
+ LOperand* value = UseAtStart(instr->value());
+ LInstruction* result = new(zone()) LCheckNonSmi(value);
+ if (!instr->value()->type().IsHeapObject()) {
+ result = AssignEnvironment(result);
+ }
+ return result;
+}
+
+
+LInstruction* LChunkBuilder::DoCheckSmi(HCheckSmi* instr) {
+ LOperand* value = UseRegisterAtStart(instr->value());
+ return AssignEnvironment(new(zone()) LCheckSmi(value));
+}
+
+
+LInstruction* LChunkBuilder::DoCheckInstanceType(HCheckInstanceType* instr) {
+ LOperand* value = UseRegisterAtStart(instr->value());
+ LOperand* temp = TempRegister();
+ LCheckInstanceType* result = new(zone()) LCheckInstanceType(value, temp);
+ return AssignEnvironment(result);
+}
+
+
+LInstruction* LChunkBuilder::DoCheckValue(HCheckValue* instr) {
+ // If the object is in new space, we'll emit a global cell compare and so
+ // want the value in a register. If the object gets promoted before we
+ // emit code, we will still get the register but will do an immediate
+ // compare instead of the cell compare. This is safe.
+ LOperand* value = instr->object_in_new_space()
+ ? UseRegisterAtStart(instr->value()) : UseAtStart(instr->value());
+ return AssignEnvironment(new(zone()) LCheckValue(value));
+}
+
+
+LInstruction* LChunkBuilder::DoCheckMaps(HCheckMaps* instr) {
+ if (instr->IsStabilityCheck()) return new(zone()) LCheckMaps;
+ LOperand* value = UseRegisterAtStart(instr->value());
+ LInstruction* result = AssignEnvironment(new(zone()) LCheckMaps(value));
+ if (instr->HasMigrationTarget()) {
+ info()->MarkAsDeferredCalling();
+ result = AssignPointerMap(result);
+ }
+ return result;
+}
+
+
+LInstruction* LChunkBuilder::DoClampToUint8(HClampToUint8* instr) {
+ HValue* value = instr->value();
+ Representation input_rep = value->representation();
+ if (input_rep.IsDouble()) {
+ UNREACHABLE();
+ return NULL;
+ } else if (input_rep.IsInteger32()) {
+ LOperand* reg = UseFixed(value, eax);
+ return DefineFixed(new(zone()) LClampIToUint8(reg), eax);
+ } else {
+ ASSERT(input_rep.IsSmiOrTagged());
+ LOperand* value = UseRegister(instr->value());
+ LClampTToUint8NoSSE2* res =
+ new(zone()) LClampTToUint8NoSSE2(value, TempRegister(),
+ TempRegister(), TempRegister());
+ return AssignEnvironment(DefineFixed(res, ecx));
+ }
+}
+
+
+LInstruction* LChunkBuilder::DoDoubleBits(HDoubleBits* instr) {
+ HValue* value = instr->value();
+ ASSERT(value->representation().IsDouble());
+ return DefineAsRegister(new(zone()) LDoubleBits(UseRegister(value)));
+}
+
+
+LInstruction* LChunkBuilder::DoConstructDouble(HConstructDouble* instr) {
+ LOperand* lo = UseRegister(instr->lo());
+ LOperand* hi = UseRegister(instr->hi());
+ return DefineAsRegister(new(zone()) LConstructDouble(hi, lo));
+}
+
+
+LInstruction* LChunkBuilder::DoReturn(HReturn* instr) {
+ LOperand* context = info()->IsStub() ? UseFixed(instr->context(), esi) : NULL;
+ LOperand* parameter_count = UseRegisterOrConstant(instr->parameter_count());
+ return new(zone()) LReturn(
+ UseFixed(instr->value(), eax), context, parameter_count);
+}
+
+
+LInstruction* LChunkBuilder::DoConstant(HConstant* instr) {
+ Representation r = instr->representation();
+ if (r.IsSmi()) {
+ return DefineAsRegister(new(zone()) LConstantS);
+ } else if (r.IsInteger32()) {
+ return DefineAsRegister(new(zone()) LConstantI);
+ } else if (r.IsDouble()) {
+ double value = instr->DoubleValue();
+ bool value_is_zero = BitCast<uint64_t, double>(value) == 0;
+ LOperand* temp = value_is_zero ? NULL : TempRegister();
+ return DefineAsRegister(new(zone()) LConstantD(temp));
+ } else if (r.IsExternal()) {
+ return DefineAsRegister(new(zone()) LConstantE);
+ } else if (r.IsTagged()) {
+ return DefineAsRegister(new(zone()) LConstantT);
+ } else {
+ UNREACHABLE();
+ return NULL;
+ }
+}
+
+
+LInstruction* LChunkBuilder::DoLoadGlobalCell(HLoadGlobalCell* instr) {
+ LLoadGlobalCell* result = new(zone()) LLoadGlobalCell;
+ return instr->RequiresHoleCheck()
+ ? AssignEnvironment(DefineAsRegister(result))
+ : DefineAsRegister(result);
+}
+
+
+LInstruction* LChunkBuilder::DoLoadGlobalGeneric(HLoadGlobalGeneric* instr) {
+ LOperand* context = UseFixed(instr->context(), esi);
+ LOperand* global_object = UseFixed(instr->global_object(), edx);
+ LLoadGlobalGeneric* result =
+ new(zone()) LLoadGlobalGeneric(context, global_object);
+ return MarkAsCall(DefineFixed(result, eax), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoStoreGlobalCell(HStoreGlobalCell* instr) {
+ LStoreGlobalCell* result =
+ new(zone()) LStoreGlobalCell(UseRegister(instr->value()));
+ return instr->RequiresHoleCheck() ? AssignEnvironment(result) : result;
+}
+
+
+LInstruction* LChunkBuilder::DoLoadContextSlot(HLoadContextSlot* instr) {
+ LOperand* context = UseRegisterAtStart(instr->value());
+ LInstruction* result =
+ DefineAsRegister(new(zone()) LLoadContextSlot(context));
+ if (instr->RequiresHoleCheck() && instr->DeoptimizesOnHole()) {
+ result = AssignEnvironment(result);
+ }
+ return result;
+}
+
+
+LInstruction* LChunkBuilder::DoStoreContextSlot(HStoreContextSlot* instr) {
+ LOperand* value;
+ LOperand* temp;
+ LOperand* context = UseRegister(instr->context());
+ if (instr->NeedsWriteBarrier()) {
+ value = UseTempRegister(instr->value());
+ temp = TempRegister();
+ } else {
+ value = UseRegister(instr->value());
+ temp = NULL;
+ }
+ LInstruction* result = new(zone()) LStoreContextSlot(context, value, temp);
+ if (instr->RequiresHoleCheck() && instr->DeoptimizesOnHole()) {
+ result = AssignEnvironment(result);
+ }
+ return result;
+}
+
+
+LInstruction* LChunkBuilder::DoLoadNamedField(HLoadNamedField* instr) {
+ LOperand* obj = (instr->access().IsExternalMemory() &&
+ instr->access().offset() == 0)
+ ? UseRegisterOrConstantAtStart(instr->object())
+ : UseRegisterAtStart(instr->object());
+ return DefineAsRegister(new(zone()) LLoadNamedField(obj));
+}
+
+
+LInstruction* LChunkBuilder::DoLoadNamedGeneric(HLoadNamedGeneric* instr) {
+ LOperand* context = UseFixed(instr->context(), esi);
+ LOperand* object = UseFixed(instr->object(), edx);
+ LLoadNamedGeneric* result = new(zone()) LLoadNamedGeneric(context, object);
+ return MarkAsCall(DefineFixed(result, eax), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoLoadFunctionPrototype(
+ HLoadFunctionPrototype* instr) {
+ return AssignEnvironment(DefineAsRegister(
+ new(zone()) LLoadFunctionPrototype(UseRegister(instr->function()),
+ TempRegister())));
+}
+
+
+LInstruction* LChunkBuilder::DoLoadRoot(HLoadRoot* instr) {
+ return DefineAsRegister(new(zone()) LLoadRoot);
+}
+
+
+LInstruction* LChunkBuilder::DoLoadKeyed(HLoadKeyed* instr) {
+ ASSERT(instr->key()->representation().IsSmiOrInteger32());
+ ElementsKind elements_kind = instr->elements_kind();
+ bool clobbers_key = ExternalArrayOpRequiresTemp(
+ instr->key()->representation(), elements_kind);
+ LOperand* key = clobbers_key
+ ? UseTempRegister(instr->key())
+ : UseRegisterOrConstantAtStart(instr->key());
+ LInstruction* result = NULL;
+
+ if (!instr->is_typed_elements()) {
+ LOperand* obj = UseRegisterAtStart(instr->elements());
+ result = DefineAsRegister(new(zone()) LLoadKeyed(obj, key));
+ } else {
+ ASSERT(
+ (instr->representation().IsInteger32() &&
+ !(IsDoubleOrFloatElementsKind(instr->elements_kind()))) ||
+ (instr->representation().IsDouble() &&
+ (IsDoubleOrFloatElementsKind(instr->elements_kind()))));
+ LOperand* backing_store = UseRegister(instr->elements());
+ result = DefineAsRegister(new(zone()) LLoadKeyed(backing_store, key));
+ }
+
+ if ((instr->is_external() || instr->is_fixed_typed_array()) ?
+ // see LCodeGen::DoLoadKeyedExternalArray
+ ((instr->elements_kind() == EXTERNAL_UINT32_ELEMENTS ||
+ instr->elements_kind() == UINT32_ELEMENTS) &&
+ !instr->CheckFlag(HInstruction::kUint32)) :
+ // see LCodeGen::DoLoadKeyedFixedDoubleArray and
+ // LCodeGen::DoLoadKeyedFixedArray
+ instr->RequiresHoleCheck()) {
+ result = AssignEnvironment(result);
+ }
+ return result;
+}
+
+
+LInstruction* LChunkBuilder::DoLoadKeyedGeneric(HLoadKeyedGeneric* instr) {
+ LOperand* context = UseFixed(instr->context(), esi);
+ LOperand* object = UseFixed(instr->object(), edx);
+ LOperand* key = UseFixed(instr->key(), ecx);
+
+ LLoadKeyedGeneric* result =
+ new(zone()) LLoadKeyedGeneric(context, object, key);
+ return MarkAsCall(DefineFixed(result, eax), instr);
+}
+
+
+LOperand* LChunkBuilder::GetStoreKeyedValueOperand(HStoreKeyed* instr) {
+ ElementsKind elements_kind = instr->elements_kind();
+
+ // Determine if we need a byte register in this case for the value.
+ bool val_is_fixed_register =
+ elements_kind == EXTERNAL_INT8_ELEMENTS ||
+ elements_kind == EXTERNAL_UINT8_ELEMENTS ||
+ elements_kind == EXTERNAL_UINT8_CLAMPED_ELEMENTS ||
+ elements_kind == UINT8_ELEMENTS ||
+ elements_kind == INT8_ELEMENTS ||
+ elements_kind == UINT8_CLAMPED_ELEMENTS;
+ if (val_is_fixed_register) {
+ return UseFixed(instr->value(), eax);
+ }
+
+ if (IsDoubleOrFloatElementsKind(elements_kind)) {
+ return UseRegisterAtStart(instr->value());
+ }
+
+ return UseRegister(instr->value());
+}
+
+
+LInstruction* LChunkBuilder::DoStoreKeyed(HStoreKeyed* instr) {
+ if (!instr->is_typed_elements()) {
+ ASSERT(instr->elements()->representation().IsTagged());
+ ASSERT(instr->key()->representation().IsInteger32() ||
+ instr->key()->representation().IsSmi());
+
+ if (instr->value()->representation().IsDouble()) {
+ LOperand* object = UseRegisterAtStart(instr->elements());
+ LOperand* val = NULL;
+ val = UseRegisterAtStart(instr->value());
+ LOperand* key = UseRegisterOrConstantAtStart(instr->key());
+ return new(zone()) LStoreKeyed(object, key, val);
+ } else {
+ ASSERT(instr->value()->representation().IsSmiOrTagged());
+ bool needs_write_barrier = instr->NeedsWriteBarrier();
+
+ LOperand* obj = UseRegister(instr->elements());
+ LOperand* val;
+ LOperand* key;
+ if (needs_write_barrier) {
+ val = UseTempRegister(instr->value());
+ key = UseTempRegister(instr->key());
+ } else {
+ val = UseRegisterOrConstantAtStart(instr->value());
+ key = UseRegisterOrConstantAtStart(instr->key());
+ }
+ return new(zone()) LStoreKeyed(obj, key, val);
+ }
+ }
+
+ ElementsKind elements_kind = instr->elements_kind();
+ ASSERT(
+ (instr->value()->representation().IsInteger32() &&
+ !IsDoubleOrFloatElementsKind(elements_kind)) ||
+ (instr->value()->representation().IsDouble() &&
+ IsDoubleOrFloatElementsKind(elements_kind)));
+ ASSERT((instr->is_fixed_typed_array() &&
+ instr->elements()->representation().IsTagged()) ||
+ (instr->is_external() &&
+ instr->elements()->representation().IsExternal()));
+
+ LOperand* backing_store = UseRegister(instr->elements());
+ LOperand* val = GetStoreKeyedValueOperand(instr);
+ bool clobbers_key = ExternalArrayOpRequiresTemp(
+ instr->key()->representation(), elements_kind);
+ LOperand* key = clobbers_key
+ ? UseTempRegister(instr->key())
+ : UseRegisterOrConstantAtStart(instr->key());
+ return new(zone()) LStoreKeyed(backing_store, key, val);
+}
+
+
+LInstruction* LChunkBuilder::DoStoreKeyedGeneric(HStoreKeyedGeneric* instr) {
+ LOperand* context = UseFixed(instr->context(), esi);
+ LOperand* object = UseFixed(instr->object(), edx);
+ LOperand* key = UseFixed(instr->key(), ecx);
+ LOperand* value = UseFixed(instr->value(), eax);
+
+ ASSERT(instr->object()->representation().IsTagged());
+ ASSERT(instr->key()->representation().IsTagged());
+ ASSERT(instr->value()->representation().IsTagged());
+
+ LStoreKeyedGeneric* result =
+ new(zone()) LStoreKeyedGeneric(context, object, key, value);
+ return MarkAsCall(result, instr);
+}
+
+
+LInstruction* LChunkBuilder::DoTransitionElementsKind(
+ HTransitionElementsKind* instr) {
+ if (IsSimpleMapChangeTransition(instr->from_kind(), instr->to_kind())) {
+ LOperand* object = UseRegister(instr->object());
+ LOperand* new_map_reg = TempRegister();
+ LOperand* temp_reg = TempRegister();
+ LTransitionElementsKind* result =
+ new(zone()) LTransitionElementsKind(object, NULL,
+ new_map_reg, temp_reg);
+ return result;
+ } else {
+ LOperand* object = UseFixed(instr->object(), eax);
+ LOperand* context = UseFixed(instr->context(), esi);
+ LTransitionElementsKind* result =
+ new(zone()) LTransitionElementsKind(object, context, NULL, NULL);
+ return MarkAsCall(result, instr);
+ }
+}
+
+
+LInstruction* LChunkBuilder::DoTrapAllocationMemento(
+ HTrapAllocationMemento* instr) {
+ LOperand* object = UseRegister(instr->object());
+ LOperand* temp = TempRegister();
+ LTrapAllocationMemento* result =
+ new(zone()) LTrapAllocationMemento(object, temp);
+ return AssignEnvironment(result);
+}
+
+
+LInstruction* LChunkBuilder::DoStoreNamedField(HStoreNamedField* instr) {
+ bool is_in_object = instr->access().IsInobject();
+ bool is_external_location = instr->access().IsExternalMemory() &&
+ instr->access().offset() == 0;
+ bool needs_write_barrier = instr->NeedsWriteBarrier();
+ bool needs_write_barrier_for_map = instr->has_transition() &&
+ instr->NeedsWriteBarrierForMap();
+
+ LOperand* obj;
+ if (needs_write_barrier) {
+ obj = is_in_object
+ ? UseRegister(instr->object())
+ : UseTempRegister(instr->object());
+ } else if (is_external_location) {
+ ASSERT(!is_in_object);
+ ASSERT(!needs_write_barrier);
+ ASSERT(!needs_write_barrier_for_map);
+ obj = UseRegisterOrConstant(instr->object());
+ } else {
+ obj = needs_write_barrier_for_map
+ ? UseRegister(instr->object())
+ : UseRegisterAtStart(instr->object());
+ }
+
+ bool can_be_constant = instr->value()->IsConstant() &&
+ HConstant::cast(instr->value())->NotInNewSpace() &&
+ !instr->field_representation().IsDouble();
+
+ LOperand* val;
+ if (instr->field_representation().IsInteger8() ||
+ instr->field_representation().IsUInteger8()) {
+ // mov_b requires a byte register (i.e. any of eax, ebx, ecx, edx).
+ // Just force the value to be in eax and we're safe here.
+ val = UseFixed(instr->value(), eax);
+ } else if (needs_write_barrier) {
+ val = UseTempRegister(instr->value());
+ } else if (can_be_constant) {
+ val = UseRegisterOrConstant(instr->value());
+ } else if (instr->field_representation().IsSmi()) {
+ val = UseTempRegister(instr->value());
+ } else if (instr->field_representation().IsDouble()) {
+ val = UseRegisterAtStart(instr->value());
+ } else {
+ val = UseRegister(instr->value());
+ }
+
+ // We only need a scratch register if we have a write barrier or we
+ // have a store into the properties array (not in-object-property).
+ LOperand* temp = (!is_in_object || needs_write_barrier ||
+ needs_write_barrier_for_map) ? TempRegister() : NULL;
+
+ // We need a temporary register for write barrier of the map field.
+ LOperand* temp_map = needs_write_barrier_for_map ? TempRegister() : NULL;
+
+ return new(zone()) LStoreNamedField(obj, val, temp, temp_map);
+}
+
+
+LInstruction* LChunkBuilder::DoStoreNamedGeneric(HStoreNamedGeneric* instr) {
+ LOperand* context = UseFixed(instr->context(), esi);
+ LOperand* object = UseFixed(instr->object(), edx);
+ LOperand* value = UseFixed(instr->value(), eax);
+
+ LStoreNamedGeneric* result =
+ new(zone()) LStoreNamedGeneric(context, object, value);
+ return MarkAsCall(result, instr);
+}
+
+
+LInstruction* LChunkBuilder::DoStringAdd(HStringAdd* instr) {
+ LOperand* context = UseFixed(instr->context(), esi);
+ LOperand* left = UseFixed(instr->left(), edx);
+ LOperand* right = UseFixed(instr->right(), eax);
+ LStringAdd* string_add = new(zone()) LStringAdd(context, left, right);
+ return MarkAsCall(DefineFixed(string_add, eax), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoStringCharCodeAt(HStringCharCodeAt* instr) {
+ LOperand* string = UseTempRegister(instr->string());
+ LOperand* index = UseTempRegister(instr->index());
+ LOperand* context = UseAny(instr->context());
+ LStringCharCodeAt* result =
+ new(zone()) LStringCharCodeAt(context, string, index);
+ return AssignPointerMap(DefineAsRegister(result));
+}
+
+
+LInstruction* LChunkBuilder::DoStringCharFromCode(HStringCharFromCode* instr) {
+ LOperand* char_code = UseRegister(instr->value());
+ LOperand* context = UseAny(instr->context());
+ LStringCharFromCode* result =
+ new(zone()) LStringCharFromCode(context, char_code);
+ return AssignPointerMap(DefineAsRegister(result));
+}
+
+
+LInstruction* LChunkBuilder::DoAllocate(HAllocate* instr) {
+ info()->MarkAsDeferredCalling();
+ LOperand* context = UseAny(instr->context());
+ LOperand* size = instr->size()->IsConstant()
+ ? UseConstant(instr->size())
+ : UseTempRegister(instr->size());
+ LOperand* temp = TempRegister();
+ LAllocate* result = new(zone()) LAllocate(context, size, temp);
+ return AssignPointerMap(DefineAsRegister(result));
+}
+
+
+LInstruction* LChunkBuilder::DoRegExpLiteral(HRegExpLiteral* instr) {
+ LOperand* context = UseFixed(instr->context(), esi);
+ return MarkAsCall(
+ DefineFixed(new(zone()) LRegExpLiteral(context), eax), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoFunctionLiteral(HFunctionLiteral* instr) {
+ LOperand* context = UseFixed(instr->context(), esi);
+ return MarkAsCall(
+ DefineFixed(new(zone()) LFunctionLiteral(context), eax), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoOsrEntry(HOsrEntry* instr) {
+ ASSERT(argument_count_ == 0);
+ allocator_->MarkAsOsrEntry();
+ current_block_->last_environment()->set_ast_id(instr->ast_id());
+ return AssignEnvironment(new(zone()) LOsrEntry);
+}
+
+
+LInstruction* LChunkBuilder::DoParameter(HParameter* instr) {
+ LParameter* result = new(zone()) LParameter;
+ if (instr->kind() == HParameter::STACK_PARAMETER) {
+ int spill_index = chunk()->GetParameterStackSlot(instr->index());
+ return DefineAsSpilled(result, spill_index);
+ } else {
+ ASSERT(info()->IsStub());
+ CodeStubInterfaceDescriptor* descriptor =
+ info()->code_stub()->GetInterfaceDescriptor();
+ int index = static_cast<int>(instr->index());
+ Register reg = descriptor->GetParameterRegister(index);
+ return DefineFixed(result, reg);
+ }
+}
+
+
+LInstruction* LChunkBuilder::DoUnknownOSRValue(HUnknownOSRValue* instr) {
+ // Use an index that corresponds to the location in the unoptimized frame,
+ // which the optimized frame will subsume.
+ int env_index = instr->index();
+ int spill_index = 0;
+ if (instr->environment()->is_parameter_index(env_index)) {
+ spill_index = chunk()->GetParameterStackSlot(env_index);
+ } else {
+ spill_index = env_index - instr->environment()->first_local_index();
+ if (spill_index > LUnallocated::kMaxFixedSlotIndex) {
+ Abort(kNotEnoughSpillSlotsForOsr);
+ spill_index = 0;
+ }
+ if (spill_index == 0) {
+ // The dynamic frame alignment state overwrites the first local.
+ // The first local is saved at the end of the unoptimized frame.
+ spill_index = graph()->osr()->UnoptimizedFrameSlots();
+ }
+ }
+ return DefineAsSpilled(new(zone()) LUnknownOSRValue, spill_index);
+}
+
+
+LInstruction* LChunkBuilder::DoCallStub(HCallStub* instr) {
+ LOperand* context = UseFixed(instr->context(), esi);
+ LCallStub* result = new(zone()) LCallStub(context);
+ return MarkAsCall(DefineFixed(result, eax), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoArgumentsObject(HArgumentsObject* instr) {
+ // There are no real uses of the arguments object.
+ // arguments.length and element access are supported directly on
+ // stack arguments, and any real arguments object use causes a bailout.
+ // So this value is never used.
+ return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoCapturedObject(HCapturedObject* instr) {
+ instr->ReplayEnvironment(current_block_->last_environment());
+
+ // There are no real uses of a captured object.
+ return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoAccessArgumentsAt(HAccessArgumentsAt* instr) {
+ info()->MarkAsRequiresFrame();
+ LOperand* args = UseRegister(instr->arguments());
+ LOperand* length;
+ LOperand* index;
+ if (instr->length()->IsConstant() && instr->index()->IsConstant()) {
+ length = UseRegisterOrConstant(instr->length());
+ index = UseOrConstant(instr->index());
+ } else {
+ length = UseTempRegister(instr->length());
+ index = Use(instr->index());
+ }
+ return DefineAsRegister(new(zone()) LAccessArgumentsAt(args, length, index));
+}
+
+
+LInstruction* LChunkBuilder::DoToFastProperties(HToFastProperties* instr) {
+ LOperand* object = UseFixed(instr->value(), eax);
+ LToFastProperties* result = new(zone()) LToFastProperties(object);
+ return MarkAsCall(DefineFixed(result, eax), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoTypeof(HTypeof* instr) {
+ LOperand* context = UseFixed(instr->context(), esi);
+ LOperand* value = UseAtStart(instr->value());
+ LTypeof* result = new(zone()) LTypeof(context, value);
+ return MarkAsCall(DefineFixed(result, eax), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoTypeofIsAndBranch(HTypeofIsAndBranch* instr) {
+ return new(zone()) LTypeofIsAndBranch(UseTempRegister(instr->value()));
+}
+
+
+LInstruction* LChunkBuilder::DoIsConstructCallAndBranch(
+ HIsConstructCallAndBranch* instr) {
+ return new(zone()) LIsConstructCallAndBranch(TempRegister());
+}
+
+
+LInstruction* LChunkBuilder::DoSimulate(HSimulate* instr) {
+ instr->ReplayEnvironment(current_block_->last_environment());
+ return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoStackCheck(HStackCheck* instr) {
+ info()->MarkAsDeferredCalling();
+ if (instr->is_function_entry()) {
+ LOperand* context = UseFixed(instr->context(), esi);
+ return MarkAsCall(new(zone()) LStackCheck(context), instr);
+ } else {
+ ASSERT(instr->is_backwards_branch());
+ LOperand* context = UseAny(instr->context());
+ return AssignEnvironment(
+ AssignPointerMap(new(zone()) LStackCheck(context)));
+ }
+}
+
+
+LInstruction* LChunkBuilder::DoEnterInlined(HEnterInlined* instr) {
+ HEnvironment* outer = current_block_->last_environment();
+ outer->set_ast_id(instr->ReturnId());
+ HConstant* undefined = graph()->GetConstantUndefined();
+ HEnvironment* inner = outer->CopyForInlining(instr->closure(),
+ instr->arguments_count(),
+ instr->function(),
+ undefined,
+ instr->inlining_kind());
+ // Only replay binding of arguments object if it wasn't removed from graph.
+ if (instr->arguments_var() != NULL && instr->arguments_object()->IsLinked()) {
+ inner->Bind(instr->arguments_var(), instr->arguments_object());
+ }
+ inner->set_entry(instr);
+ current_block_->UpdateEnvironment(inner);
+ chunk_->AddInlinedClosure(instr->closure());
+ return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoLeaveInlined(HLeaveInlined* instr) {
+ LInstruction* pop = NULL;
+
+ HEnvironment* env = current_block_->last_environment();
+
+ if (env->entry()->arguments_pushed()) {
+ int argument_count = env->arguments_environment()->parameter_count();
+ pop = new(zone()) LDrop(argument_count);
+ ASSERT(instr->argument_delta() == -argument_count);
+ }
+
+ HEnvironment* outer = current_block_->last_environment()->
+ DiscardInlined(false);
+ current_block_->UpdateEnvironment(outer);
+ return pop;
+}
+
+
+LInstruction* LChunkBuilder::DoForInPrepareMap(HForInPrepareMap* instr) {
+ LOperand* context = UseFixed(instr->context(), esi);
+ LOperand* object = UseFixed(instr->enumerable(), eax);
+ LForInPrepareMap* result = new(zone()) LForInPrepareMap(context, object);
+ return MarkAsCall(DefineFixed(result, eax), instr, CAN_DEOPTIMIZE_EAGERLY);
+}
+
+
+LInstruction* LChunkBuilder::DoForInCacheArray(HForInCacheArray* instr) {
+ LOperand* map = UseRegister(instr->map());
+ return AssignEnvironment(DefineAsRegister(
+ new(zone()) LForInCacheArray(map)));
+}
+
+
+LInstruction* LChunkBuilder::DoCheckMapValue(HCheckMapValue* instr) {
+ LOperand* value = UseRegisterAtStart(instr->value());
+ LOperand* map = UseRegisterAtStart(instr->map());
+ return AssignEnvironment(new(zone()) LCheckMapValue(value, map));
+}
+
+
+LInstruction* LChunkBuilder::DoLoadFieldByIndex(HLoadFieldByIndex* instr) {
+ LOperand* object = UseRegister(instr->object());
+ LOperand* index = UseTempRegister(instr->index());
+ LLoadFieldByIndex* load = new(zone()) LLoadFieldByIndex(object, index);
+ LInstruction* result = DefineSameAsFirst(load);
+ return AssignPointerMap(result);
+}
+
+
+LInstruction* LChunkBuilder::DoStoreFrameContext(HStoreFrameContext* instr) {
+ LOperand* context = UseRegisterAtStart(instr->context());
+ return new(zone()) LStoreFrameContext(context);
+}
+
+
+LInstruction* LChunkBuilder::DoAllocateBlockContext(
+ HAllocateBlockContext* instr) {
+ LOperand* context = UseFixed(instr->context(), esi);
+ LOperand* function = UseRegisterAtStart(instr->function());
+ LAllocateBlockContext* result =
+ new(zone()) LAllocateBlockContext(context, function);
+ return MarkAsCall(DefineFixed(result, esi), instr);
+}
+
+
+} } // namespace v8::internal
+
+#endif // V8_TARGET_ARCH_X87
diff --git a/chromium/v8/src/x87/lithium-x87.h b/chromium/v8/src/x87/lithium-x87.h
new file mode 100644
index 00000000000..8c992b8f6b7
--- /dev/null
+++ b/chromium/v8/src/x87/lithium-x87.h
@@ -0,0 +1,2888 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_X87_LITHIUM_X87_H_
+#define V8_X87_LITHIUM_X87_H_
+
+#include "src/hydrogen.h"
+#include "src/lithium-allocator.h"
+#include "src/lithium.h"
+#include "src/safepoint-table.h"
+#include "src/utils.h"
+
+namespace v8 {
+namespace internal {
+
+// Forward declarations.
+class LCodeGen;
+
+#define LITHIUM_CONCRETE_INSTRUCTION_LIST(V) \
+ V(AccessArgumentsAt) \
+ V(AddI) \
+ V(AllocateBlockContext) \
+ V(Allocate) \
+ V(ApplyArguments) \
+ V(ArgumentsElements) \
+ V(ArgumentsLength) \
+ V(ArithmeticD) \
+ V(ArithmeticT) \
+ V(BitI) \
+ V(BoundsCheck) \
+ V(Branch) \
+ V(CallJSFunction) \
+ V(CallWithDescriptor) \
+ V(CallFunction) \
+ V(CallNew) \
+ V(CallNewArray) \
+ V(CallRuntime) \
+ V(CallStub) \
+ V(CheckInstanceType) \
+ V(CheckMaps) \
+ V(CheckMapValue) \
+ V(CheckNonSmi) \
+ V(CheckSmi) \
+ V(CheckValue) \
+ V(ClampDToUint8) \
+ V(ClampIToUint8) \
+ V(ClampTToUint8NoSSE2) \
+ V(ClassOfTestAndBranch) \
+ V(ClobberDoubles) \
+ V(CompareMinusZeroAndBranch) \
+ V(CompareNumericAndBranch) \
+ V(CmpObjectEqAndBranch) \
+ V(CmpHoleAndBranch) \
+ V(CmpMapAndBranch) \
+ V(CmpT) \
+ V(ConstantD) \
+ V(ConstantE) \
+ V(ConstantI) \
+ V(ConstantS) \
+ V(ConstantT) \
+ V(ConstructDouble) \
+ V(Context) \
+ V(DateField) \
+ V(DebugBreak) \
+ V(DeclareGlobals) \
+ V(Deoptimize) \
+ V(DivByConstI) \
+ V(DivByPowerOf2I) \
+ V(DivI) \
+ V(DoubleBits) \
+ V(DoubleToI) \
+ V(DoubleToSmi) \
+ V(Drop) \
+ V(Dummy) \
+ V(DummyUse) \
+ V(FlooringDivByConstI) \
+ V(FlooringDivByPowerOf2I) \
+ V(FlooringDivI) \
+ V(ForInCacheArray) \
+ V(ForInPrepareMap) \
+ V(FunctionLiteral) \
+ V(GetCachedArrayIndex) \
+ V(Goto) \
+ V(HasCachedArrayIndexAndBranch) \
+ V(HasInstanceTypeAndBranch) \
+ V(InnerAllocatedObject) \
+ V(InstanceOf) \
+ V(InstanceOfKnownGlobal) \
+ V(InstructionGap) \
+ V(Integer32ToDouble) \
+ V(InvokeFunction) \
+ V(IsConstructCallAndBranch) \
+ V(IsObjectAndBranch) \
+ V(IsStringAndBranch) \
+ V(IsSmiAndBranch) \
+ V(IsUndetectableAndBranch) \
+ V(Label) \
+ V(LazyBailout) \
+ V(LoadContextSlot) \
+ V(LoadFieldByIndex) \
+ V(LoadFunctionPrototype) \
+ V(LoadGlobalCell) \
+ V(LoadGlobalGeneric) \
+ V(LoadKeyed) \
+ V(LoadKeyedGeneric) \
+ V(LoadNamedField) \
+ V(LoadNamedGeneric) \
+ V(LoadRoot) \
+ V(MapEnumLength) \
+ V(MathAbs) \
+ V(MathClz32) \
+ V(MathExp) \
+ V(MathFloor) \
+ V(MathLog) \
+ V(MathMinMax) \
+ V(MathPowHalf) \
+ V(MathRound) \
+ V(MathSqrt) \
+ V(ModByConstI) \
+ V(ModByPowerOf2I) \
+ V(ModI) \
+ V(MulI) \
+ V(NumberTagD) \
+ V(NumberTagI) \
+ V(NumberTagU) \
+ V(NumberUntagD) \
+ V(OsrEntry) \
+ V(Parameter) \
+ V(Power) \
+ V(PushArgument) \
+ V(RegExpLiteral) \
+ V(Return) \
+ V(SeqStringGetChar) \
+ V(SeqStringSetChar) \
+ V(ShiftI) \
+ V(SmiTag) \
+ V(SmiUntag) \
+ V(StackCheck) \
+ V(StoreCodeEntry) \
+ V(StoreContextSlot) \
+ V(StoreFrameContext) \
+ V(StoreGlobalCell) \
+ V(StoreKeyed) \
+ V(StoreKeyedGeneric) \
+ V(StoreNamedField) \
+ V(StoreNamedGeneric) \
+ V(StringAdd) \
+ V(StringCharCodeAt) \
+ V(StringCharFromCode) \
+ V(StringCompareAndBranch) \
+ V(SubI) \
+ V(TaggedToI) \
+ V(ThisFunction) \
+ V(ToFastProperties) \
+ V(TransitionElementsKind) \
+ V(TrapAllocationMemento) \
+ V(Typeof) \
+ V(TypeofIsAndBranch) \
+ V(Uint32ToDouble) \
+ V(UnknownOSRValue) \
+ V(WrapReceiver)
+
+
+#define DECLARE_CONCRETE_INSTRUCTION(type, mnemonic) \
+ virtual Opcode opcode() const V8_FINAL V8_OVERRIDE { \
+ return LInstruction::k##type; \
+ } \
+ virtual void CompileToNative(LCodeGen* generator) V8_FINAL V8_OVERRIDE; \
+ virtual const char* Mnemonic() const V8_FINAL V8_OVERRIDE { \
+ return mnemonic; \
+ } \
+ static L##type* cast(LInstruction* instr) { \
+ ASSERT(instr->Is##type()); \
+ return reinterpret_cast<L##type*>(instr); \
+ }
+
+
+#define DECLARE_HYDROGEN_ACCESSOR(type) \
+ H##type* hydrogen() const { \
+ return H##type::cast(hydrogen_value()); \
+ }
+
+
+class LInstruction : public ZoneObject {
+ public:
+ LInstruction()
+ : environment_(NULL),
+ hydrogen_value_(NULL),
+ bit_field_(IsCallBits::encode(false)) {
+ }
+
+ virtual ~LInstruction() {}
+
+ virtual void CompileToNative(LCodeGen* generator) = 0;
+ virtual const char* Mnemonic() const = 0;
+ virtual void PrintTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintOutputOperandTo(StringStream* stream);
+
+ enum Opcode {
+ // Declare a unique enum value for each instruction.
+#define DECLARE_OPCODE(type) k##type,
+ LITHIUM_CONCRETE_INSTRUCTION_LIST(DECLARE_OPCODE)
+ kNumberOfInstructions
+#undef DECLARE_OPCODE
+ };
+
+ virtual Opcode opcode() const = 0;
+
+ // Declare non-virtual type testers for all leaf IR classes.
+#define DECLARE_PREDICATE(type) \
+ bool Is##type() const { return opcode() == k##type; }
+ LITHIUM_CONCRETE_INSTRUCTION_LIST(DECLARE_PREDICATE)
+#undef DECLARE_PREDICATE
+
+ // Declare virtual predicates for instructions that don't have
+ // an opcode.
+ virtual bool IsGap() const { return false; }
+
+ virtual bool IsControl() const { return false; }
+
+ void set_environment(LEnvironment* env) { environment_ = env; }
+ LEnvironment* environment() const { return environment_; }
+ bool HasEnvironment() const { return environment_ != NULL; }
+
+ void set_pointer_map(LPointerMap* p) { pointer_map_.set(p); }
+ LPointerMap* pointer_map() const { return pointer_map_.get(); }
+ bool HasPointerMap() const { return pointer_map_.is_set(); }
+
+ void set_hydrogen_value(HValue* value) { hydrogen_value_ = value; }
+ HValue* hydrogen_value() const { return hydrogen_value_; }
+
+ virtual void SetDeferredLazyDeoptimizationEnvironment(LEnvironment* env) { }
+
+ void MarkAsCall() { bit_field_ = IsCallBits::update(bit_field_, true); }
+ bool IsCall() const { return IsCallBits::decode(bit_field_); }
+
+ // Interface to the register allocator and iterators.
+ bool ClobbersTemps() const { return IsCall(); }
+ bool ClobbersRegisters() const { return IsCall(); }
+ virtual bool ClobbersDoubleRegisters(Isolate* isolate) const {
+ return IsCall() ||
+ // We only have rudimentary X87Stack tracking, thus in general
+ // cannot handle phi-nodes.
+ (IsControl());
+ }
+
+ virtual bool HasResult() const = 0;
+ virtual LOperand* result() const = 0;
+
+ bool HasDoubleRegisterResult();
+ bool HasDoubleRegisterInput();
+ bool IsDoubleInput(X87Register reg, LCodeGen* cgen);
+
+ LOperand* FirstInput() { return InputAt(0); }
+ LOperand* Output() { return HasResult() ? result() : NULL; }
+
+ virtual bool HasInterestingComment(LCodeGen* gen) const { return true; }
+
+#ifdef DEBUG
+ void VerifyCall();
+#endif
+
+ private:
+ // Iterator support.
+ friend class InputIterator;
+ virtual int InputCount() = 0;
+ virtual LOperand* InputAt(int i) = 0;
+
+ friend class TempIterator;
+ virtual int TempCount() = 0;
+ virtual LOperand* TempAt(int i) = 0;
+
+ class IsCallBits: public BitField<bool, 0, 1> {};
+
+ LEnvironment* environment_;
+ SetOncePointer<LPointerMap> pointer_map_;
+ HValue* hydrogen_value_;
+ int bit_field_;
+};
+
+
+// R = number of result operands (0 or 1).
+template<int R>
+class LTemplateResultInstruction : public LInstruction {
+ public:
+ // Allow 0 or 1 output operands.
+ STATIC_ASSERT(R == 0 || R == 1);
+ virtual bool HasResult() const V8_FINAL V8_OVERRIDE {
+ return R != 0 && result() != NULL;
+ }
+ void set_result(LOperand* operand) { results_[0] = operand; }
+ LOperand* result() const { return results_[0]; }
+
+ protected:
+ EmbeddedContainer<LOperand*, R> results_;
+};
+
+
+// R = number of result operands (0 or 1).
+// I = number of input operands.
+// T = number of temporary operands.
+template<int R, int I, int T>
+class LTemplateInstruction : public LTemplateResultInstruction<R> {
+ protected:
+ EmbeddedContainer<LOperand*, I> inputs_;
+ EmbeddedContainer<LOperand*, T> temps_;
+
+ private:
+ // Iterator support.
+ virtual int InputCount() V8_FINAL V8_OVERRIDE { return I; }
+ virtual LOperand* InputAt(int i) V8_FINAL V8_OVERRIDE { return inputs_[i]; }
+
+ virtual int TempCount() V8_FINAL V8_OVERRIDE { return T; }
+ virtual LOperand* TempAt(int i) V8_FINAL V8_OVERRIDE { return temps_[i]; }
+};
+
+
+class LGap : public LTemplateInstruction<0, 0, 0> {
+ public:
+ explicit LGap(HBasicBlock* block) : block_(block) {
+ parallel_moves_[BEFORE] = NULL;
+ parallel_moves_[START] = NULL;
+ parallel_moves_[END] = NULL;
+ parallel_moves_[AFTER] = NULL;
+ }
+
+ // Can't use the DECLARE-macro here because of sub-classes.
+ virtual bool IsGap() const V8_FINAL V8_OVERRIDE { return true; }
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+ static LGap* cast(LInstruction* instr) {
+ ASSERT(instr->IsGap());
+ return reinterpret_cast<LGap*>(instr);
+ }
+
+ bool IsRedundant() const;
+
+ HBasicBlock* block() const { return block_; }
+
+ enum InnerPosition {
+ BEFORE,
+ START,
+ END,
+ AFTER,
+ FIRST_INNER_POSITION = BEFORE,
+ LAST_INNER_POSITION = AFTER
+ };
+
+ LParallelMove* GetOrCreateParallelMove(InnerPosition pos, Zone* zone) {
+ if (parallel_moves_[pos] == NULL) {
+ parallel_moves_[pos] = new(zone) LParallelMove(zone);
+ }
+ return parallel_moves_[pos];
+ }
+
+ LParallelMove* GetParallelMove(InnerPosition pos) {
+ return parallel_moves_[pos];
+ }
+
+ private:
+ LParallelMove* parallel_moves_[LAST_INNER_POSITION + 1];
+ HBasicBlock* block_;
+};
+
+
+class LInstructionGap V8_FINAL : public LGap {
+ public:
+ explicit LInstructionGap(HBasicBlock* block) : LGap(block) { }
+
+ virtual bool HasInterestingComment(LCodeGen* gen) const V8_OVERRIDE {
+ return !IsRedundant();
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(InstructionGap, "gap")
+};
+
+
+class LClobberDoubles V8_FINAL : public LTemplateInstruction<0, 0, 0> {
+ public:
+ explicit LClobberDoubles(Isolate* isolate) { }
+
+ virtual bool ClobbersDoubleRegisters(Isolate* isolate) const V8_OVERRIDE {
+ return true;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(ClobberDoubles, "clobber-d")
+};
+
+
+class LGoto V8_FINAL : public LTemplateInstruction<0, 0, 0> {
+ public:
+ explicit LGoto(HBasicBlock* block) : block_(block) { }
+
+ virtual bool HasInterestingComment(LCodeGen* gen) const V8_OVERRIDE;
+ DECLARE_CONCRETE_INSTRUCTION(Goto, "goto")
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+ virtual bool IsControl() const V8_OVERRIDE { return true; }
+
+ int block_id() const { return block_->block_id(); }
+ virtual bool ClobbersDoubleRegisters(Isolate* isolate) const V8_OVERRIDE {
+ return false;
+ }
+
+ bool jumps_to_join() const { return block_->predecessors()->length() > 1; }
+
+ private:
+ HBasicBlock* block_;
+};
+
+
+class LLazyBailout V8_FINAL : public LTemplateInstruction<0, 0, 0> {
+ public:
+ DECLARE_CONCRETE_INSTRUCTION(LazyBailout, "lazy-bailout")
+};
+
+
+class LDummy V8_FINAL : public LTemplateInstruction<1, 0, 0> {
+ public:
+ explicit LDummy() { }
+ DECLARE_CONCRETE_INSTRUCTION(Dummy, "dummy")
+};
+
+
+class LDummyUse V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LDummyUse(LOperand* value) {
+ inputs_[0] = value;
+ }
+ DECLARE_CONCRETE_INSTRUCTION(DummyUse, "dummy-use")
+};
+
+
+class LDeoptimize V8_FINAL : public LTemplateInstruction<0, 0, 0> {
+ public:
+ virtual bool IsControl() const V8_OVERRIDE { return true; }
+ DECLARE_CONCRETE_INSTRUCTION(Deoptimize, "deoptimize")
+ DECLARE_HYDROGEN_ACCESSOR(Deoptimize)
+};
+
+
+class LLabel V8_FINAL : public LGap {
+ public:
+ explicit LLabel(HBasicBlock* block)
+ : LGap(block), replacement_(NULL) { }
+
+ virtual bool HasInterestingComment(LCodeGen* gen) const V8_OVERRIDE {
+ return false;
+ }
+ DECLARE_CONCRETE_INSTRUCTION(Label, "label")
+
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+
+ int block_id() const { return block()->block_id(); }
+ bool is_loop_header() const { return block()->IsLoopHeader(); }
+ bool is_osr_entry() const { return block()->is_osr_entry(); }
+ Label* label() { return &label_; }
+ LLabel* replacement() const { return replacement_; }
+ void set_replacement(LLabel* label) { replacement_ = label; }
+ bool HasReplacement() const { return replacement_ != NULL; }
+
+ private:
+ Label label_;
+ LLabel* replacement_;
+};
+
+
+class LParameter V8_FINAL : public LTemplateInstruction<1, 0, 0> {
+ public:
+ virtual bool HasInterestingComment(LCodeGen* gen) const V8_OVERRIDE {
+ return false;
+ }
+ DECLARE_CONCRETE_INSTRUCTION(Parameter, "parameter")
+};
+
+
+class LCallStub V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LCallStub(LOperand* context) {
+ inputs_[0] = context;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(CallStub, "call-stub")
+ DECLARE_HYDROGEN_ACCESSOR(CallStub)
+};
+
+
+class LUnknownOSRValue V8_FINAL : public LTemplateInstruction<1, 0, 0> {
+ public:
+ virtual bool HasInterestingComment(LCodeGen* gen) const V8_OVERRIDE {
+ return false;
+ }
+ DECLARE_CONCRETE_INSTRUCTION(UnknownOSRValue, "unknown-osr-value")
+};
+
+
+template<int I, int T>
+class LControlInstruction: public LTemplateInstruction<0, I, T> {
+ public:
+ LControlInstruction() : false_label_(NULL), true_label_(NULL) { }
+
+ virtual bool IsControl() const V8_FINAL V8_OVERRIDE { return true; }
+
+ int SuccessorCount() { return hydrogen()->SuccessorCount(); }
+ HBasicBlock* SuccessorAt(int i) { return hydrogen()->SuccessorAt(i); }
+
+ int TrueDestination(LChunk* chunk) {
+ return chunk->LookupDestination(true_block_id());
+ }
+ int FalseDestination(LChunk* chunk) {
+ return chunk->LookupDestination(false_block_id());
+ }
+
+ Label* TrueLabel(LChunk* chunk) {
+ if (true_label_ == NULL) {
+ true_label_ = chunk->GetAssemblyLabel(TrueDestination(chunk));
+ }
+ return true_label_;
+ }
+ Label* FalseLabel(LChunk* chunk) {
+ if (false_label_ == NULL) {
+ false_label_ = chunk->GetAssemblyLabel(FalseDestination(chunk));
+ }
+ return false_label_;
+ }
+
+ protected:
+ int true_block_id() { return SuccessorAt(0)->block_id(); }
+ int false_block_id() { return SuccessorAt(1)->block_id(); }
+
+ private:
+ HControlInstruction* hydrogen() {
+ return HControlInstruction::cast(this->hydrogen_value());
+ }
+
+ Label* false_label_;
+ Label* true_label_;
+};
+
+
+class LWrapReceiver V8_FINAL : public LTemplateInstruction<1, 2, 1> {
+ public:
+ LWrapReceiver(LOperand* receiver,
+ LOperand* function,
+ LOperand* temp) {
+ inputs_[0] = receiver;
+ inputs_[1] = function;
+ temps_[0] = temp;
+ }
+
+ LOperand* receiver() { return inputs_[0]; }
+ LOperand* function() { return inputs_[1]; }
+ LOperand* temp() { return temps_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(WrapReceiver, "wrap-receiver")
+ DECLARE_HYDROGEN_ACCESSOR(WrapReceiver)
+};
+
+
+class LApplyArguments V8_FINAL : public LTemplateInstruction<1, 4, 0> {
+ public:
+ LApplyArguments(LOperand* function,
+ LOperand* receiver,
+ LOperand* length,
+ LOperand* elements) {
+ inputs_[0] = function;
+ inputs_[1] = receiver;
+ inputs_[2] = length;
+ inputs_[3] = elements;
+ }
+
+ LOperand* function() { return inputs_[0]; }
+ LOperand* receiver() { return inputs_[1]; }
+ LOperand* length() { return inputs_[2]; }
+ LOperand* elements() { return inputs_[3]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(ApplyArguments, "apply-arguments")
+};
+
+
+class LAccessArgumentsAt V8_FINAL : public LTemplateInstruction<1, 3, 0> {
+ public:
+ LAccessArgumentsAt(LOperand* arguments, LOperand* length, LOperand* index) {
+ inputs_[0] = arguments;
+ inputs_[1] = length;
+ inputs_[2] = index;
+ }
+
+ LOperand* arguments() { return inputs_[0]; }
+ LOperand* length() { return inputs_[1]; }
+ LOperand* index() { return inputs_[2]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(AccessArgumentsAt, "access-arguments-at")
+
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+};
+
+
+class LArgumentsLength V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LArgumentsLength(LOperand* elements) {
+ inputs_[0] = elements;
+ }
+
+ LOperand* elements() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(ArgumentsLength, "arguments-length")
+};
+
+
+class LArgumentsElements V8_FINAL : public LTemplateInstruction<1, 0, 0> {
+ public:
+ DECLARE_CONCRETE_INSTRUCTION(ArgumentsElements, "arguments-elements")
+ DECLARE_HYDROGEN_ACCESSOR(ArgumentsElements)
+};
+
+
+class LDebugBreak V8_FINAL : public LTemplateInstruction<0, 0, 0> {
+ public:
+ DECLARE_CONCRETE_INSTRUCTION(DebugBreak, "break")
+};
+
+
+class LModByPowerOf2I V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+ LModByPowerOf2I(LOperand* dividend, int32_t divisor) {
+ inputs_[0] = dividend;
+ divisor_ = divisor;
+ }
+
+ LOperand* dividend() { return inputs_[0]; }
+ int32_t divisor() const { return divisor_; }
+
+ DECLARE_CONCRETE_INSTRUCTION(ModByPowerOf2I, "mod-by-power-of-2-i")
+ DECLARE_HYDROGEN_ACCESSOR(Mod)
+
+ private:
+ int32_t divisor_;
+};
+
+
+class LModByConstI V8_FINAL : public LTemplateInstruction<1, 1, 2> {
+ public:
+ LModByConstI(LOperand* dividend,
+ int32_t divisor,
+ LOperand* temp1,
+ LOperand* temp2) {
+ inputs_[0] = dividend;
+ divisor_ = divisor;
+ temps_[0] = temp1;
+ temps_[1] = temp2;
+ }
+
+ LOperand* dividend() { return inputs_[0]; }
+ int32_t divisor() const { return divisor_; }
+ LOperand* temp1() { return temps_[0]; }
+ LOperand* temp2() { return temps_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(ModByConstI, "mod-by-const-i")
+ DECLARE_HYDROGEN_ACCESSOR(Mod)
+
+ private:
+ int32_t divisor_;
+};
+
+
+class LModI V8_FINAL : public LTemplateInstruction<1, 2, 1> {
+ public:
+ LModI(LOperand* left, LOperand* right, LOperand* temp) {
+ inputs_[0] = left;
+ inputs_[1] = right;
+ temps_[0] = temp;
+ }
+
+ LOperand* left() { return inputs_[0]; }
+ LOperand* right() { return inputs_[1]; }
+ LOperand* temp() { return temps_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(ModI, "mod-i")
+ DECLARE_HYDROGEN_ACCESSOR(Mod)
+};
+
+
+class LDivByPowerOf2I V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+ LDivByPowerOf2I(LOperand* dividend, int32_t divisor) {
+ inputs_[0] = dividend;
+ divisor_ = divisor;
+ }
+
+ LOperand* dividend() { return inputs_[0]; }
+ int32_t divisor() const { return divisor_; }
+
+ DECLARE_CONCRETE_INSTRUCTION(DivByPowerOf2I, "div-by-power-of-2-i")
+ DECLARE_HYDROGEN_ACCESSOR(Div)
+
+ private:
+ int32_t divisor_;
+};
+
+
+class LDivByConstI V8_FINAL : public LTemplateInstruction<1, 1, 2> {
+ public:
+ LDivByConstI(LOperand* dividend,
+ int32_t divisor,
+ LOperand* temp1,
+ LOperand* temp2) {
+ inputs_[0] = dividend;
+ divisor_ = divisor;
+ temps_[0] = temp1;
+ temps_[1] = temp2;
+ }
+
+ LOperand* dividend() { return inputs_[0]; }
+ int32_t divisor() const { return divisor_; }
+ LOperand* temp1() { return temps_[0]; }
+ LOperand* temp2() { return temps_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(DivByConstI, "div-by-const-i")
+ DECLARE_HYDROGEN_ACCESSOR(Div)
+
+ private:
+ int32_t divisor_;
+};
+
+
+class LDivI V8_FINAL : public LTemplateInstruction<1, 2, 1> {
+ public:
+ LDivI(LOperand* dividend, LOperand* divisor, LOperand* temp) {
+ inputs_[0] = dividend;
+ inputs_[1] = divisor;
+ temps_[0] = temp;
+ }
+
+ LOperand* dividend() { return inputs_[0]; }
+ LOperand* divisor() { return inputs_[1]; }
+ LOperand* temp() { return temps_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(DivI, "div-i")
+ DECLARE_HYDROGEN_ACCESSOR(BinaryOperation)
+};
+
+
+class LFlooringDivByPowerOf2I V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+ LFlooringDivByPowerOf2I(LOperand* dividend, int32_t divisor) {
+ inputs_[0] = dividend;
+ divisor_ = divisor;
+ }
+
+ LOperand* dividend() { return inputs_[0]; }
+ int32_t divisor() const { return divisor_; }
+
+ DECLARE_CONCRETE_INSTRUCTION(FlooringDivByPowerOf2I,
+ "flooring-div-by-power-of-2-i")
+ DECLARE_HYDROGEN_ACCESSOR(MathFloorOfDiv)
+
+ private:
+ int32_t divisor_;
+};
+
+
+class LFlooringDivByConstI V8_FINAL : public LTemplateInstruction<1, 1, 3> {
+ public:
+ LFlooringDivByConstI(LOperand* dividend,
+ int32_t divisor,
+ LOperand* temp1,
+ LOperand* temp2,
+ LOperand* temp3) {
+ inputs_[0] = dividend;
+ divisor_ = divisor;
+ temps_[0] = temp1;
+ temps_[1] = temp2;
+ temps_[2] = temp3;
+ }
+
+ LOperand* dividend() { return inputs_[0]; }
+ int32_t divisor() const { return divisor_; }
+ LOperand* temp1() { return temps_[0]; }
+ LOperand* temp2() { return temps_[1]; }
+ LOperand* temp3() { return temps_[2]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(FlooringDivByConstI, "flooring-div-by-const-i")
+ DECLARE_HYDROGEN_ACCESSOR(MathFloorOfDiv)
+
+ private:
+ int32_t divisor_;
+};
+
+
+class LFlooringDivI V8_FINAL : public LTemplateInstruction<1, 2, 1> {
+ public:
+ LFlooringDivI(LOperand* dividend, LOperand* divisor, LOperand* temp) {
+ inputs_[0] = dividend;
+ inputs_[1] = divisor;
+ temps_[0] = temp;
+ }
+
+ LOperand* dividend() { return inputs_[0]; }
+ LOperand* divisor() { return inputs_[1]; }
+ LOperand* temp() { return temps_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(FlooringDivI, "flooring-div-i")
+ DECLARE_HYDROGEN_ACCESSOR(MathFloorOfDiv)
+};
+
+
+class LMulI V8_FINAL : public LTemplateInstruction<1, 2, 1> {
+ public:
+ LMulI(LOperand* left, LOperand* right, LOperand* temp) {
+ inputs_[0] = left;
+ inputs_[1] = right;
+ temps_[0] = temp;
+ }
+
+ LOperand* left() { return inputs_[0]; }
+ LOperand* right() { return inputs_[1]; }
+ LOperand* temp() { return temps_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(MulI, "mul-i")
+ DECLARE_HYDROGEN_ACCESSOR(Mul)
+};
+
+
+class LCompareNumericAndBranch V8_FINAL : public LControlInstruction<2, 0> {
+ public:
+ LCompareNumericAndBranch(LOperand* left, LOperand* right) {
+ inputs_[0] = left;
+ inputs_[1] = right;
+ }
+
+ LOperand* left() { return inputs_[0]; }
+ LOperand* right() { return inputs_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(CompareNumericAndBranch,
+ "compare-numeric-and-branch")
+ DECLARE_HYDROGEN_ACCESSOR(CompareNumericAndBranch)
+
+ Token::Value op() const { return hydrogen()->token(); }
+ bool is_double() const {
+ return hydrogen()->representation().IsDouble();
+ }
+
+ virtual void PrintDataTo(StringStream* stream);
+};
+
+
+class LMathFloor V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LMathFloor(LOperand* value) {
+ inputs_[0] = value;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(MathFloor, "math-floor")
+ DECLARE_HYDROGEN_ACCESSOR(UnaryMathOperation)
+};
+
+
+class LMathRound V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LMathRound(LOperand* value) {
+ inputs_[0] = value;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(MathRound, "math-round")
+ DECLARE_HYDROGEN_ACCESSOR(UnaryMathOperation)
+};
+
+
+class LMathAbs V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+ public:
+ LMathAbs(LOperand* context, LOperand* value) {
+ inputs_[1] = context;
+ inputs_[0] = value;
+ }
+
+ LOperand* context() { return inputs_[1]; }
+ LOperand* value() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(MathAbs, "math-abs")
+ DECLARE_HYDROGEN_ACCESSOR(UnaryMathOperation)
+};
+
+
+class LMathLog V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LMathLog(LOperand* value) {
+ inputs_[0] = value;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(MathLog, "math-log")
+};
+
+
+class LMathClz32 V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LMathClz32(LOperand* value) {
+ inputs_[0] = value;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(MathClz32, "math-clz32")
+};
+
+
+class LMathExp V8_FINAL : public LTemplateInstruction<1, 1, 2> {
+ public:
+ LMathExp(LOperand* value,
+ LOperand* temp1,
+ LOperand* temp2) {
+ inputs_[0] = value;
+ temps_[0] = temp1;
+ temps_[1] = temp2;
+ ExternalReference::InitializeMathExpData();
+ }
+
+ LOperand* value() { return inputs_[0]; }
+ LOperand* temp1() { return temps_[0]; }
+ LOperand* temp2() { return temps_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(MathExp, "math-exp")
+};
+
+
+class LMathSqrt V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LMathSqrt(LOperand* value) {
+ inputs_[0] = value;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(MathSqrt, "math-sqrt")
+};
+
+
+class LMathPowHalf V8_FINAL : public LTemplateInstruction<1, 1, 1> {
+ public:
+ LMathPowHalf(LOperand* value, LOperand* temp) {
+ inputs_[0] = value;
+ temps_[0] = temp;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+ LOperand* temp() { return temps_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(MathPowHalf, "math-pow-half")
+};
+
+
+class LCmpObjectEqAndBranch V8_FINAL : public LControlInstruction<2, 0> {
+ public:
+ LCmpObjectEqAndBranch(LOperand* left, LOperand* right) {
+ inputs_[0] = left;
+ inputs_[1] = right;
+ }
+
+ LOperand* left() { return inputs_[0]; }
+ LOperand* right() { return inputs_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(CmpObjectEqAndBranch, "cmp-object-eq-and-branch")
+};
+
+
+class LCmpHoleAndBranch V8_FINAL : public LControlInstruction<1, 0> {
+ public:
+ explicit LCmpHoleAndBranch(LOperand* object) {
+ inputs_[0] = object;
+ }
+
+ LOperand* object() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(CmpHoleAndBranch, "cmp-hole-and-branch")
+ DECLARE_HYDROGEN_ACCESSOR(CompareHoleAndBranch)
+};
+
+
+class LCompareMinusZeroAndBranch V8_FINAL : public LControlInstruction<1, 1> {
+ public:
+ LCompareMinusZeroAndBranch(LOperand* value, LOperand* temp) {
+ inputs_[0] = value;
+ temps_[0] = temp;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+ LOperand* temp() { return temps_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(CompareMinusZeroAndBranch,
+ "cmp-minus-zero-and-branch")
+ DECLARE_HYDROGEN_ACCESSOR(CompareMinusZeroAndBranch)
+};
+
+
+class LIsObjectAndBranch V8_FINAL : public LControlInstruction<1, 1> {
+ public:
+ LIsObjectAndBranch(LOperand* value, LOperand* temp) {
+ inputs_[0] = value;
+ temps_[0] = temp;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+ LOperand* temp() { return temps_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(IsObjectAndBranch, "is-object-and-branch")
+
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+};
+
+
+class LIsStringAndBranch V8_FINAL : public LControlInstruction<1, 1> {
+ public:
+ LIsStringAndBranch(LOperand* value, LOperand* temp) {
+ inputs_[0] = value;
+ temps_[0] = temp;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+ LOperand* temp() { return temps_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(IsStringAndBranch, "is-string-and-branch")
+ DECLARE_HYDROGEN_ACCESSOR(IsStringAndBranch)
+
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+};
+
+
+class LIsSmiAndBranch V8_FINAL : public LControlInstruction<1, 0> {
+ public:
+ explicit LIsSmiAndBranch(LOperand* value) {
+ inputs_[0] = value;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(IsSmiAndBranch, "is-smi-and-branch")
+ DECLARE_HYDROGEN_ACCESSOR(IsSmiAndBranch)
+
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+};
+
+
+class LIsUndetectableAndBranch V8_FINAL : public LControlInstruction<1, 1> {
+ public:
+ LIsUndetectableAndBranch(LOperand* value, LOperand* temp) {
+ inputs_[0] = value;
+ temps_[0] = temp;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+ LOperand* temp() { return temps_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(IsUndetectableAndBranch,
+ "is-undetectable-and-branch")
+ DECLARE_HYDROGEN_ACCESSOR(IsUndetectableAndBranch)
+
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+};
+
+
+class LStringCompareAndBranch V8_FINAL : public LControlInstruction<3, 0> {
+ public:
+ LStringCompareAndBranch(LOperand* context, LOperand* left, LOperand* right) {
+ inputs_[0] = context;
+ inputs_[1] = left;
+ inputs_[2] = right;
+ }
+
+ LOperand* context() { return inputs_[1]; }
+ LOperand* left() { return inputs_[1]; }
+ LOperand* right() { return inputs_[2]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(StringCompareAndBranch,
+ "string-compare-and-branch")
+ DECLARE_HYDROGEN_ACCESSOR(StringCompareAndBranch)
+
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+
+ Token::Value op() const { return hydrogen()->token(); }
+};
+
+
+class LHasInstanceTypeAndBranch V8_FINAL : public LControlInstruction<1, 1> {
+ public:
+ LHasInstanceTypeAndBranch(LOperand* value, LOperand* temp) {
+ inputs_[0] = value;
+ temps_[0] = temp;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+ LOperand* temp() { return temps_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(HasInstanceTypeAndBranch,
+ "has-instance-type-and-branch")
+ DECLARE_HYDROGEN_ACCESSOR(HasInstanceTypeAndBranch)
+
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+};
+
+
+class LGetCachedArrayIndex V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LGetCachedArrayIndex(LOperand* value) {
+ inputs_[0] = value;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(GetCachedArrayIndex, "get-cached-array-index")
+ DECLARE_HYDROGEN_ACCESSOR(GetCachedArrayIndex)
+};
+
+
+class LHasCachedArrayIndexAndBranch V8_FINAL
+ : public LControlInstruction<1, 0> {
+ public:
+ explicit LHasCachedArrayIndexAndBranch(LOperand* value) {
+ inputs_[0] = value;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(HasCachedArrayIndexAndBranch,
+ "has-cached-array-index-and-branch")
+
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+};
+
+
+class LIsConstructCallAndBranch V8_FINAL : public LControlInstruction<0, 1> {
+ public:
+ explicit LIsConstructCallAndBranch(LOperand* temp) {
+ temps_[0] = temp;
+ }
+
+ LOperand* temp() { return temps_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(IsConstructCallAndBranch,
+ "is-construct-call-and-branch")
+};
+
+
+class LClassOfTestAndBranch V8_FINAL : public LControlInstruction<1, 2> {
+ public:
+ LClassOfTestAndBranch(LOperand* value, LOperand* temp, LOperand* temp2) {
+ inputs_[0] = value;
+ temps_[0] = temp;
+ temps_[1] = temp2;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+ LOperand* temp() { return temps_[0]; }
+ LOperand* temp2() { return temps_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(ClassOfTestAndBranch,
+ "class-of-test-and-branch")
+ DECLARE_HYDROGEN_ACCESSOR(ClassOfTestAndBranch)
+
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+};
+
+
+class LCmpT V8_FINAL : public LTemplateInstruction<1, 3, 0> {
+ public:
+ LCmpT(LOperand* context, LOperand* left, LOperand* right) {
+ inputs_[0] = context;
+ inputs_[1] = left;
+ inputs_[2] = right;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(CmpT, "cmp-t")
+ DECLARE_HYDROGEN_ACCESSOR(CompareGeneric)
+
+ LOperand* context() { return inputs_[0]; }
+ Token::Value op() const { return hydrogen()->token(); }
+};
+
+
+class LInstanceOf V8_FINAL : public LTemplateInstruction<1, 3, 0> {
+ public:
+ LInstanceOf(LOperand* context, LOperand* left, LOperand* right) {
+ inputs_[0] = context;
+ inputs_[1] = left;
+ inputs_[2] = right;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(InstanceOf, "instance-of")
+};
+
+
+class LInstanceOfKnownGlobal V8_FINAL : public LTemplateInstruction<1, 2, 1> {
+ public:
+ LInstanceOfKnownGlobal(LOperand* context, LOperand* value, LOperand* temp) {
+ inputs_[0] = context;
+ inputs_[1] = value;
+ temps_[0] = temp;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+ LOperand* value() { return inputs_[1]; }
+ LOperand* temp() { return temps_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(InstanceOfKnownGlobal,
+ "instance-of-known-global")
+ DECLARE_HYDROGEN_ACCESSOR(InstanceOfKnownGlobal)
+
+ Handle<JSFunction> function() const { return hydrogen()->function(); }
+ LEnvironment* GetDeferredLazyDeoptimizationEnvironment() {
+ return lazy_deopt_env_;
+ }
+ virtual void SetDeferredLazyDeoptimizationEnvironment(
+ LEnvironment* env) V8_OVERRIDE {
+ lazy_deopt_env_ = env;
+ }
+
+ private:
+ LEnvironment* lazy_deopt_env_;
+};
+
+
+class LBoundsCheck V8_FINAL : public LTemplateInstruction<0, 2, 0> {
+ public:
+ LBoundsCheck(LOperand* index, LOperand* length) {
+ inputs_[0] = index;
+ inputs_[1] = length;
+ }
+
+ LOperand* index() { return inputs_[0]; }
+ LOperand* length() { return inputs_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(BoundsCheck, "bounds-check")
+ DECLARE_HYDROGEN_ACCESSOR(BoundsCheck)
+};
+
+
+class LBitI V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+ public:
+ LBitI(LOperand* left, LOperand* right) {
+ inputs_[0] = left;
+ inputs_[1] = right;
+ }
+
+ LOperand* left() { return inputs_[0]; }
+ LOperand* right() { return inputs_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(BitI, "bit-i")
+ DECLARE_HYDROGEN_ACCESSOR(Bitwise)
+
+ Token::Value op() const { return hydrogen()->op(); }
+};
+
+
+class LShiftI V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+ public:
+ LShiftI(Token::Value op, LOperand* left, LOperand* right, bool can_deopt)
+ : op_(op), can_deopt_(can_deopt) {
+ inputs_[0] = left;
+ inputs_[1] = right;
+ }
+
+ LOperand* left() { return inputs_[0]; }
+ LOperand* right() { return inputs_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(ShiftI, "shift-i")
+
+ Token::Value op() const { return op_; }
+ bool can_deopt() const { return can_deopt_; }
+
+ private:
+ Token::Value op_;
+ bool can_deopt_;
+};
+
+
+class LSubI V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+ public:
+ LSubI(LOperand* left, LOperand* right) {
+ inputs_[0] = left;
+ inputs_[1] = right;
+ }
+
+ LOperand* left() { return inputs_[0]; }
+ LOperand* right() { return inputs_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(SubI, "sub-i")
+ DECLARE_HYDROGEN_ACCESSOR(Sub)
+};
+
+
+class LConstantI V8_FINAL : public LTemplateInstruction<1, 0, 0> {
+ public:
+ DECLARE_CONCRETE_INSTRUCTION(ConstantI, "constant-i")
+ DECLARE_HYDROGEN_ACCESSOR(Constant)
+
+ int32_t value() const { return hydrogen()->Integer32Value(); }
+};
+
+
+class LConstantS V8_FINAL : public LTemplateInstruction<1, 0, 0> {
+ public:
+ DECLARE_CONCRETE_INSTRUCTION(ConstantS, "constant-s")
+ DECLARE_HYDROGEN_ACCESSOR(Constant)
+
+ Smi* value() const { return Smi::FromInt(hydrogen()->Integer32Value()); }
+};
+
+
+class LConstantD V8_FINAL : public LTemplateInstruction<1, 0, 1> {
+ public:
+ explicit LConstantD(LOperand* temp) {
+ temps_[0] = temp;
+ }
+
+ LOperand* temp() { return temps_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(ConstantD, "constant-d")
+ DECLARE_HYDROGEN_ACCESSOR(Constant)
+
+ double value() const { return hydrogen()->DoubleValue(); }
+};
+
+
+class LConstantE V8_FINAL : public LTemplateInstruction<1, 0, 0> {
+ public:
+ DECLARE_CONCRETE_INSTRUCTION(ConstantE, "constant-e")
+ DECLARE_HYDROGEN_ACCESSOR(Constant)
+
+ ExternalReference value() const {
+ return hydrogen()->ExternalReferenceValue();
+ }
+};
+
+
+class LConstantT V8_FINAL : public LTemplateInstruction<1, 0, 0> {
+ public:
+ DECLARE_CONCRETE_INSTRUCTION(ConstantT, "constant-t")
+ DECLARE_HYDROGEN_ACCESSOR(Constant)
+
+ Handle<Object> value(Isolate* isolate) const {
+ return hydrogen()->handle(isolate);
+ }
+};
+
+
+class LBranch V8_FINAL : public LControlInstruction<1, 1> {
+ public:
+ LBranch(LOperand* value, LOperand* temp) {
+ inputs_[0] = value;
+ temps_[0] = temp;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+ LOperand* temp() { return temps_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(Branch, "branch")
+ DECLARE_HYDROGEN_ACCESSOR(Branch)
+
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+};
+
+
+class LCmpMapAndBranch V8_FINAL : public LControlInstruction<1, 0> {
+ public:
+ explicit LCmpMapAndBranch(LOperand* value) {
+ inputs_[0] = value;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(CmpMapAndBranch, "cmp-map-and-branch")
+ DECLARE_HYDROGEN_ACCESSOR(CompareMap)
+
+ Handle<Map> map() const { return hydrogen()->map().handle(); }
+};
+
+
+class LMapEnumLength V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LMapEnumLength(LOperand* value) {
+ inputs_[0] = value;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(MapEnumLength, "map-enum-length")
+};
+
+
+class LDateField V8_FINAL : public LTemplateInstruction<1, 1, 1> {
+ public:
+ LDateField(LOperand* date, LOperand* temp, Smi* index)
+ : index_(index) {
+ inputs_[0] = date;
+ temps_[0] = temp;
+ }
+
+ LOperand* date() { return inputs_[0]; }
+ LOperand* temp() { return temps_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(DateField, "date-field")
+ DECLARE_HYDROGEN_ACCESSOR(DateField)
+
+ Smi* index() const { return index_; }
+
+ private:
+ Smi* index_;
+};
+
+
+class LSeqStringGetChar V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+ public:
+ LSeqStringGetChar(LOperand* string, LOperand* index) {
+ inputs_[0] = string;
+ inputs_[1] = index;
+ }
+
+ LOperand* string() const { return inputs_[0]; }
+ LOperand* index() const { return inputs_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(SeqStringGetChar, "seq-string-get-char")
+ DECLARE_HYDROGEN_ACCESSOR(SeqStringGetChar)
+};
+
+
+class LSeqStringSetChar V8_FINAL : public LTemplateInstruction<1, 4, 0> {
+ public:
+ LSeqStringSetChar(LOperand* context,
+ LOperand* string,
+ LOperand* index,
+ LOperand* value) {
+ inputs_[0] = context;
+ inputs_[1] = string;
+ inputs_[2] = index;
+ inputs_[3] = value;
+ }
+
+ LOperand* string() { return inputs_[1]; }
+ LOperand* index() { return inputs_[2]; }
+ LOperand* value() { return inputs_[3]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(SeqStringSetChar, "seq-string-set-char")
+ DECLARE_HYDROGEN_ACCESSOR(SeqStringSetChar)
+};
+
+
+class LAddI V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+ public:
+ LAddI(LOperand* left, LOperand* right) {
+ inputs_[0] = left;
+ inputs_[1] = right;
+ }
+
+ LOperand* left() { return inputs_[0]; }
+ LOperand* right() { return inputs_[1]; }
+
+ static bool UseLea(HAdd* add) {
+ return !add->CheckFlag(HValue::kCanOverflow) &&
+ add->BetterLeftOperand()->UseCount() > 1;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(AddI, "add-i")
+ DECLARE_HYDROGEN_ACCESSOR(Add)
+};
+
+
+class LMathMinMax V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+ public:
+ LMathMinMax(LOperand* left, LOperand* right) {
+ inputs_[0] = left;
+ inputs_[1] = right;
+ }
+
+ LOperand* left() { return inputs_[0]; }
+ LOperand* right() { return inputs_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(MathMinMax, "math-min-max")
+ DECLARE_HYDROGEN_ACCESSOR(MathMinMax)
+};
+
+
+class LPower V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+ public:
+ LPower(LOperand* left, LOperand* right) {
+ inputs_[0] = left;
+ inputs_[1] = right;
+ }
+
+ LOperand* left() { return inputs_[0]; }
+ LOperand* right() { return inputs_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(Power, "power")
+ DECLARE_HYDROGEN_ACCESSOR(Power)
+};
+
+
+class LArithmeticD V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+ public:
+ LArithmeticD(Token::Value op, LOperand* left, LOperand* right)
+ : op_(op) {
+ inputs_[0] = left;
+ inputs_[1] = right;
+ }
+
+ LOperand* left() { return inputs_[0]; }
+ LOperand* right() { return inputs_[1]; }
+
+ Token::Value op() const { return op_; }
+
+ virtual Opcode opcode() const V8_OVERRIDE {
+ return LInstruction::kArithmeticD;
+ }
+ virtual void CompileToNative(LCodeGen* generator) V8_OVERRIDE;
+ virtual const char* Mnemonic() const V8_OVERRIDE;
+
+ private:
+ Token::Value op_;
+};
+
+
+class LArithmeticT V8_FINAL : public LTemplateInstruction<1, 3, 0> {
+ public:
+ LArithmeticT(Token::Value op,
+ LOperand* context,
+ LOperand* left,
+ LOperand* right)
+ : op_(op) {
+ inputs_[0] = context;
+ inputs_[1] = left;
+ inputs_[2] = right;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+ LOperand* left() { return inputs_[1]; }
+ LOperand* right() { return inputs_[2]; }
+
+ virtual Opcode opcode() const V8_OVERRIDE {
+ return LInstruction::kArithmeticT;
+ }
+ virtual void CompileToNative(LCodeGen* generator) V8_OVERRIDE;
+ virtual const char* Mnemonic() const V8_OVERRIDE;
+
+ Token::Value op() const { return op_; }
+
+ private:
+ Token::Value op_;
+};
+
+
+class LReturn V8_FINAL : public LTemplateInstruction<0, 3, 0> {
+ public:
+ explicit LReturn(LOperand* value,
+ LOperand* context,
+ LOperand* parameter_count) {
+ inputs_[0] = value;
+ inputs_[1] = context;
+ inputs_[2] = parameter_count;
+ }
+
+ bool has_constant_parameter_count() {
+ return parameter_count()->IsConstantOperand();
+ }
+ LConstantOperand* constant_parameter_count() {
+ ASSERT(has_constant_parameter_count());
+ return LConstantOperand::cast(parameter_count());
+ }
+ LOperand* parameter_count() { return inputs_[2]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(Return, "return")
+ DECLARE_HYDROGEN_ACCESSOR(Return)
+};
+
+
+class LLoadNamedField V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LLoadNamedField(LOperand* object) {
+ inputs_[0] = object;
+ }
+
+ LOperand* object() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(LoadNamedField, "load-named-field")
+ DECLARE_HYDROGEN_ACCESSOR(LoadNamedField)
+};
+
+
+class LLoadNamedGeneric V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+ public:
+ LLoadNamedGeneric(LOperand* context, LOperand* object) {
+ inputs_[0] = context;
+ inputs_[1] = object;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+ LOperand* object() { return inputs_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(LoadNamedGeneric, "load-named-generic")
+ DECLARE_HYDROGEN_ACCESSOR(LoadNamedGeneric)
+
+ Handle<Object> name() const { return hydrogen()->name(); }
+};
+
+
+class LLoadFunctionPrototype V8_FINAL : public LTemplateInstruction<1, 1, 1> {
+ public:
+ LLoadFunctionPrototype(LOperand* function, LOperand* temp) {
+ inputs_[0] = function;
+ temps_[0] = temp;
+ }
+
+ LOperand* function() { return inputs_[0]; }
+ LOperand* temp() { return temps_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(LoadFunctionPrototype, "load-function-prototype")
+ DECLARE_HYDROGEN_ACCESSOR(LoadFunctionPrototype)
+};
+
+
+class LLoadRoot V8_FINAL : public LTemplateInstruction<1, 0, 0> {
+ public:
+ DECLARE_CONCRETE_INSTRUCTION(LoadRoot, "load-root")
+ DECLARE_HYDROGEN_ACCESSOR(LoadRoot)
+
+ Heap::RootListIndex index() const { return hydrogen()->index(); }
+};
+
+
+class LLoadKeyed V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+ public:
+ LLoadKeyed(LOperand* elements, LOperand* key) {
+ inputs_[0] = elements;
+ inputs_[1] = key;
+ }
+ LOperand* elements() { return inputs_[0]; }
+ LOperand* key() { return inputs_[1]; }
+ ElementsKind elements_kind() const {
+ return hydrogen()->elements_kind();
+ }
+ bool is_external() const {
+ return hydrogen()->is_external();
+ }
+ bool is_fixed_typed_array() const {
+ return hydrogen()->is_fixed_typed_array();
+ }
+ bool is_typed_elements() const {
+ return is_external() || is_fixed_typed_array();
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(LoadKeyed, "load-keyed")
+ DECLARE_HYDROGEN_ACCESSOR(LoadKeyed)
+
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+ uint32_t base_offset() const { return hydrogen()->base_offset(); }
+ bool key_is_smi() {
+ return hydrogen()->key()->representation().IsTagged();
+ }
+};
+
+
+inline static bool ExternalArrayOpRequiresTemp(
+ Representation key_representation,
+ ElementsKind elements_kind) {
+ // Operations that require the key to be divided by two to be converted into
+ // an index cannot fold the scale operation into a load and need an extra
+ // temp register to do the work.
+ return key_representation.IsSmi() &&
+ (elements_kind == EXTERNAL_INT8_ELEMENTS ||
+ elements_kind == EXTERNAL_UINT8_ELEMENTS ||
+ elements_kind == EXTERNAL_UINT8_CLAMPED_ELEMENTS ||
+ elements_kind == UINT8_ELEMENTS ||
+ elements_kind == INT8_ELEMENTS ||
+ elements_kind == UINT8_CLAMPED_ELEMENTS);
+}
+
+
+class LLoadKeyedGeneric V8_FINAL : public LTemplateInstruction<1, 3, 0> {
+ public:
+ LLoadKeyedGeneric(LOperand* context, LOperand* obj, LOperand* key) {
+ inputs_[0] = context;
+ inputs_[1] = obj;
+ inputs_[2] = key;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+ LOperand* object() { return inputs_[1]; }
+ LOperand* key() { return inputs_[2]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(LoadKeyedGeneric, "load-keyed-generic")
+};
+
+
+class LLoadGlobalCell V8_FINAL : public LTemplateInstruction<1, 0, 0> {
+ public:
+ DECLARE_CONCRETE_INSTRUCTION(LoadGlobalCell, "load-global-cell")
+ DECLARE_HYDROGEN_ACCESSOR(LoadGlobalCell)
+};
+
+
+class LLoadGlobalGeneric V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+ public:
+ LLoadGlobalGeneric(LOperand* context, LOperand* global_object) {
+ inputs_[0] = context;
+ inputs_[1] = global_object;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+ LOperand* global_object() { return inputs_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(LoadGlobalGeneric, "load-global-generic")
+ DECLARE_HYDROGEN_ACCESSOR(LoadGlobalGeneric)
+
+ Handle<Object> name() const { return hydrogen()->name(); }
+ bool for_typeof() const { return hydrogen()->for_typeof(); }
+};
+
+
+class LStoreGlobalCell V8_FINAL : public LTemplateInstruction<0, 1, 0> {
+ public:
+ explicit LStoreGlobalCell(LOperand* value) {
+ inputs_[0] = value;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(StoreGlobalCell, "store-global-cell")
+ DECLARE_HYDROGEN_ACCESSOR(StoreGlobalCell)
+};
+
+
+class LLoadContextSlot V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LLoadContextSlot(LOperand* context) {
+ inputs_[0] = context;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(LoadContextSlot, "load-context-slot")
+ DECLARE_HYDROGEN_ACCESSOR(LoadContextSlot)
+
+ int slot_index() { return hydrogen()->slot_index(); }
+
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+};
+
+
+class LStoreContextSlot V8_FINAL : public LTemplateInstruction<0, 2, 1> {
+ public:
+ LStoreContextSlot(LOperand* context, LOperand* value, LOperand* temp) {
+ inputs_[0] = context;
+ inputs_[1] = value;
+ temps_[0] = temp;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+ LOperand* value() { return inputs_[1]; }
+ LOperand* temp() { return temps_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(StoreContextSlot, "store-context-slot")
+ DECLARE_HYDROGEN_ACCESSOR(StoreContextSlot)
+
+ int slot_index() { return hydrogen()->slot_index(); }
+
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+};
+
+
+class LPushArgument V8_FINAL : public LTemplateInstruction<0, 1, 0> {
+ public:
+ explicit LPushArgument(LOperand* value) {
+ inputs_[0] = value;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(PushArgument, "push-argument")
+};
+
+
+class LDrop V8_FINAL : public LTemplateInstruction<0, 0, 0> {
+ public:
+ explicit LDrop(int count) : count_(count) { }
+
+ int count() const { return count_; }
+
+ DECLARE_CONCRETE_INSTRUCTION(Drop, "drop")
+
+ private:
+ int count_;
+};
+
+
+class LStoreCodeEntry V8_FINAL: public LTemplateInstruction<0, 2, 0> {
+ public:
+ LStoreCodeEntry(LOperand* function, LOperand* code_object) {
+ inputs_[0] = function;
+ inputs_[1] = code_object;
+ }
+
+ LOperand* function() { return inputs_[0]; }
+ LOperand* code_object() { return inputs_[1]; }
+
+ virtual void PrintDataTo(StringStream* stream);
+
+ DECLARE_CONCRETE_INSTRUCTION(StoreCodeEntry, "store-code-entry")
+ DECLARE_HYDROGEN_ACCESSOR(StoreCodeEntry)
+};
+
+
+class LInnerAllocatedObject V8_FINAL: public LTemplateInstruction<1, 2, 0> {
+ public:
+ LInnerAllocatedObject(LOperand* base_object, LOperand* offset) {
+ inputs_[0] = base_object;
+ inputs_[1] = offset;
+ }
+
+ LOperand* base_object() const { return inputs_[0]; }
+ LOperand* offset() const { return inputs_[1]; }
+
+ virtual void PrintDataTo(StringStream* stream);
+
+ DECLARE_CONCRETE_INSTRUCTION(InnerAllocatedObject, "inner-allocated-object")
+};
+
+
+class LThisFunction V8_FINAL : public LTemplateInstruction<1, 0, 0> {
+ public:
+ DECLARE_CONCRETE_INSTRUCTION(ThisFunction, "this-function")
+ DECLARE_HYDROGEN_ACCESSOR(ThisFunction)
+};
+
+
+class LContext V8_FINAL : public LTemplateInstruction<1, 0, 0> {
+ public:
+ DECLARE_CONCRETE_INSTRUCTION(Context, "context")
+ DECLARE_HYDROGEN_ACCESSOR(Context)
+};
+
+
+class LDeclareGlobals V8_FINAL : public LTemplateInstruction<0, 1, 0> {
+ public:
+ explicit LDeclareGlobals(LOperand* context) {
+ inputs_[0] = context;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(DeclareGlobals, "declare-globals")
+ DECLARE_HYDROGEN_ACCESSOR(DeclareGlobals)
+};
+
+
+class LCallJSFunction V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LCallJSFunction(LOperand* function) {
+ inputs_[0] = function;
+ }
+
+ LOperand* function() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(CallJSFunction, "call-js-function")
+ DECLARE_HYDROGEN_ACCESSOR(CallJSFunction)
+
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+
+ int arity() const { return hydrogen()->argument_count() - 1; }
+};
+
+
+class LCallWithDescriptor V8_FINAL : public LTemplateResultInstruction<1> {
+ public:
+ LCallWithDescriptor(const CallInterfaceDescriptor* descriptor,
+ const ZoneList<LOperand*>& operands,
+ Zone* zone)
+ : inputs_(descriptor->environment_length() + 1, zone) {
+ ASSERT(descriptor->environment_length() + 1 == operands.length());
+ inputs_.AddAll(operands, zone);
+ }
+
+ LOperand* target() const { return inputs_[0]; }
+
+ private:
+ DECLARE_CONCRETE_INSTRUCTION(CallWithDescriptor, "call-with-descriptor")
+ DECLARE_HYDROGEN_ACCESSOR(CallWithDescriptor)
+
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+
+ int arity() const { return hydrogen()->argument_count() - 1; }
+
+ ZoneList<LOperand*> inputs_;
+
+ // Iterator support.
+ virtual int InputCount() V8_FINAL V8_OVERRIDE { return inputs_.length(); }
+ virtual LOperand* InputAt(int i) V8_FINAL V8_OVERRIDE { return inputs_[i]; }
+
+ virtual int TempCount() V8_FINAL V8_OVERRIDE { return 0; }
+ virtual LOperand* TempAt(int i) V8_FINAL V8_OVERRIDE { return NULL; }
+};
+
+
+class LInvokeFunction V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+ public:
+ LInvokeFunction(LOperand* context, LOperand* function) {
+ inputs_[0] = context;
+ inputs_[1] = function;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+ LOperand* function() { return inputs_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(InvokeFunction, "invoke-function")
+ DECLARE_HYDROGEN_ACCESSOR(InvokeFunction)
+
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+
+ int arity() const { return hydrogen()->argument_count() - 1; }
+};
+
+
+class LCallFunction V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+ public:
+ explicit LCallFunction(LOperand* context, LOperand* function) {
+ inputs_[0] = context;
+ inputs_[1] = function;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+ LOperand* function() { return inputs_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(CallFunction, "call-function")
+ DECLARE_HYDROGEN_ACCESSOR(CallFunction)
+
+ int arity() const { return hydrogen()->argument_count() - 1; }
+};
+
+
+class LCallNew V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+ public:
+ LCallNew(LOperand* context, LOperand* constructor) {
+ inputs_[0] = context;
+ inputs_[1] = constructor;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+ LOperand* constructor() { return inputs_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(CallNew, "call-new")
+ DECLARE_HYDROGEN_ACCESSOR(CallNew)
+
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+
+ int arity() const { return hydrogen()->argument_count() - 1; }
+};
+
+
+class LCallNewArray V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+ public:
+ LCallNewArray(LOperand* context, LOperand* constructor) {
+ inputs_[0] = context;
+ inputs_[1] = constructor;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+ LOperand* constructor() { return inputs_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(CallNewArray, "call-new-array")
+ DECLARE_HYDROGEN_ACCESSOR(CallNewArray)
+
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+
+ int arity() const { return hydrogen()->argument_count() - 1; }
+};
+
+
+class LCallRuntime V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LCallRuntime(LOperand* context) {
+ inputs_[0] = context;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(CallRuntime, "call-runtime")
+ DECLARE_HYDROGEN_ACCESSOR(CallRuntime)
+
+ virtual bool ClobbersDoubleRegisters(Isolate* isolate) const V8_OVERRIDE {
+ return true;
+ }
+
+ const Runtime::Function* function() const { return hydrogen()->function(); }
+ int arity() const { return hydrogen()->argument_count(); }
+};
+
+
+class LInteger32ToDouble V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LInteger32ToDouble(LOperand* value) {
+ inputs_[0] = value;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(Integer32ToDouble, "int32-to-double")
+};
+
+
+class LUint32ToDouble V8_FINAL : public LTemplateInstruction<1, 1, 1> {
+ public:
+ explicit LUint32ToDouble(LOperand* value) {
+ inputs_[0] = value;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(Uint32ToDouble, "uint32-to-double")
+};
+
+
+class LNumberTagI V8_FINAL : public LTemplateInstruction<1, 1, 1> {
+ public:
+ LNumberTagI(LOperand* value, LOperand* temp) {
+ inputs_[0] = value;
+ temps_[0] = temp;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+ LOperand* temp() { return temps_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(NumberTagI, "number-tag-i")
+};
+
+
+class LNumberTagU V8_FINAL : public LTemplateInstruction<1, 1, 1> {
+ public:
+ LNumberTagU(LOperand* value, LOperand* temp) {
+ inputs_[0] = value;
+ temps_[0] = temp;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+ LOperand* temp() { return temps_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(NumberTagU, "number-tag-u")
+};
+
+
+class LNumberTagD V8_FINAL : public LTemplateInstruction<1, 1, 1> {
+ public:
+ LNumberTagD(LOperand* value, LOperand* temp) {
+ inputs_[0] = value;
+ temps_[0] = temp;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+ LOperand* temp() { return temps_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(NumberTagD, "number-tag-d")
+ DECLARE_HYDROGEN_ACCESSOR(Change)
+};
+
+
+// Sometimes truncating conversion from a tagged value to an int32.
+class LDoubleToI V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LDoubleToI(LOperand* value) {
+ inputs_[0] = value;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(DoubleToI, "double-to-i")
+ DECLARE_HYDROGEN_ACCESSOR(UnaryOperation)
+
+ bool truncating() { return hydrogen()->CanTruncateToInt32(); }
+};
+
+
+class LDoubleToSmi V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LDoubleToSmi(LOperand* value) {
+ inputs_[0] = value;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(DoubleToSmi, "double-to-smi")
+ DECLARE_HYDROGEN_ACCESSOR(UnaryOperation)
+};
+
+
+// Truncating conversion from a tagged value to an int32.
+class LTaggedToI V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LTaggedToI(LOperand* value) {
+ inputs_[0] = value;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(TaggedToI, "tagged-to-i")
+ DECLARE_HYDROGEN_ACCESSOR(Change)
+
+ bool truncating() { return hydrogen()->CanTruncateToInt32(); }
+};
+
+
+class LSmiTag V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LSmiTag(LOperand* value) {
+ inputs_[0] = value;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(SmiTag, "smi-tag")
+ DECLARE_HYDROGEN_ACCESSOR(Change)
+};
+
+
+class LNumberUntagD V8_FINAL : public LTemplateInstruction<1, 1, 1> {
+ public:
+ explicit LNumberUntagD(LOperand* value, LOperand* temp) {
+ inputs_[0] = value;
+ temps_[0] = temp;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+ LOperand* temp() { return temps_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(NumberUntagD, "double-untag")
+ DECLARE_HYDROGEN_ACCESSOR(Change);
+};
+
+
+class LSmiUntag V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+ LSmiUntag(LOperand* value, bool needs_check)
+ : needs_check_(needs_check) {
+ inputs_[0] = value;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(SmiUntag, "smi-untag")
+
+ bool needs_check() const { return needs_check_; }
+
+ private:
+ bool needs_check_;
+};
+
+
+class LStoreNamedField V8_FINAL : public LTemplateInstruction<0, 2, 2> {
+ public:
+ LStoreNamedField(LOperand* obj,
+ LOperand* val,
+ LOperand* temp,
+ LOperand* temp_map) {
+ inputs_[0] = obj;
+ inputs_[1] = val;
+ temps_[0] = temp;
+ temps_[1] = temp_map;
+ }
+
+ LOperand* object() { return inputs_[0]; }
+ LOperand* value() { return inputs_[1]; }
+ LOperand* temp() { return temps_[0]; }
+ LOperand* temp_map() { return temps_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(StoreNamedField, "store-named-field")
+ DECLARE_HYDROGEN_ACCESSOR(StoreNamedField)
+
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+};
+
+
+class LStoreNamedGeneric V8_FINAL : public LTemplateInstruction<0, 3, 0> {
+ public:
+ LStoreNamedGeneric(LOperand* context, LOperand* object, LOperand* value) {
+ inputs_[0] = context;
+ inputs_[1] = object;
+ inputs_[2] = value;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+ LOperand* object() { return inputs_[1]; }
+ LOperand* value() { return inputs_[2]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(StoreNamedGeneric, "store-named-generic")
+ DECLARE_HYDROGEN_ACCESSOR(StoreNamedGeneric)
+
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+ Handle<Object> name() const { return hydrogen()->name(); }
+ StrictMode strict_mode() { return hydrogen()->strict_mode(); }
+};
+
+
+class LStoreKeyed V8_FINAL : public LTemplateInstruction<0, 3, 0> {
+ public:
+ LStoreKeyed(LOperand* obj, LOperand* key, LOperand* val) {
+ inputs_[0] = obj;
+ inputs_[1] = key;
+ inputs_[2] = val;
+ }
+
+ bool is_external() const { return hydrogen()->is_external(); }
+ bool is_fixed_typed_array() const {
+ return hydrogen()->is_fixed_typed_array();
+ }
+ bool is_typed_elements() const {
+ return is_external() || is_fixed_typed_array();
+ }
+ LOperand* elements() { return inputs_[0]; }
+ LOperand* key() { return inputs_[1]; }
+ LOperand* value() { return inputs_[2]; }
+ ElementsKind elements_kind() const {
+ return hydrogen()->elements_kind();
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(StoreKeyed, "store-keyed")
+ DECLARE_HYDROGEN_ACCESSOR(StoreKeyed)
+
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+ uint32_t base_offset() const { return hydrogen()->base_offset(); }
+ bool NeedsCanonicalization() { return hydrogen()->NeedsCanonicalization(); }
+};
+
+
+class LStoreKeyedGeneric V8_FINAL : public LTemplateInstruction<0, 4, 0> {
+ public:
+ LStoreKeyedGeneric(LOperand* context,
+ LOperand* object,
+ LOperand* key,
+ LOperand* value) {
+ inputs_[0] = context;
+ inputs_[1] = object;
+ inputs_[2] = key;
+ inputs_[3] = value;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+ LOperand* object() { return inputs_[1]; }
+ LOperand* key() { return inputs_[2]; }
+ LOperand* value() { return inputs_[3]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(StoreKeyedGeneric, "store-keyed-generic")
+ DECLARE_HYDROGEN_ACCESSOR(StoreKeyedGeneric)
+
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+
+ StrictMode strict_mode() { return hydrogen()->strict_mode(); }
+};
+
+
+class LTransitionElementsKind V8_FINAL : public LTemplateInstruction<0, 2, 2> {
+ public:
+ LTransitionElementsKind(LOperand* object,
+ LOperand* context,
+ LOperand* new_map_temp,
+ LOperand* temp) {
+ inputs_[0] = object;
+ inputs_[1] = context;
+ temps_[0] = new_map_temp;
+ temps_[1] = temp;
+ }
+
+ LOperand* context() { return inputs_[1]; }
+ LOperand* object() { return inputs_[0]; }
+ LOperand* new_map_temp() { return temps_[0]; }
+ LOperand* temp() { return temps_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(TransitionElementsKind,
+ "transition-elements-kind")
+ DECLARE_HYDROGEN_ACCESSOR(TransitionElementsKind)
+
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+
+ Handle<Map> original_map() { return hydrogen()->original_map().handle(); }
+ Handle<Map> transitioned_map() {
+ return hydrogen()->transitioned_map().handle();
+ }
+ ElementsKind from_kind() { return hydrogen()->from_kind(); }
+ ElementsKind to_kind() { return hydrogen()->to_kind(); }
+};
+
+
+class LTrapAllocationMemento V8_FINAL : public LTemplateInstruction<0, 1, 1> {
+ public:
+ LTrapAllocationMemento(LOperand* object,
+ LOperand* temp) {
+ inputs_[0] = object;
+ temps_[0] = temp;
+ }
+
+ LOperand* object() { return inputs_[0]; }
+ LOperand* temp() { return temps_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(TrapAllocationMemento,
+ "trap-allocation-memento")
+};
+
+
+class LStringAdd V8_FINAL : public LTemplateInstruction<1, 3, 0> {
+ public:
+ LStringAdd(LOperand* context, LOperand* left, LOperand* right) {
+ inputs_[0] = context;
+ inputs_[1] = left;
+ inputs_[2] = right;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+ LOperand* left() { return inputs_[1]; }
+ LOperand* right() { return inputs_[2]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(StringAdd, "string-add")
+ DECLARE_HYDROGEN_ACCESSOR(StringAdd)
+};
+
+
+class LStringCharCodeAt V8_FINAL : public LTemplateInstruction<1, 3, 0> {
+ public:
+ LStringCharCodeAt(LOperand* context, LOperand* string, LOperand* index) {
+ inputs_[0] = context;
+ inputs_[1] = string;
+ inputs_[2] = index;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+ LOperand* string() { return inputs_[1]; }
+ LOperand* index() { return inputs_[2]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(StringCharCodeAt, "string-char-code-at")
+ DECLARE_HYDROGEN_ACCESSOR(StringCharCodeAt)
+};
+
+
+class LStringCharFromCode V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+ public:
+ LStringCharFromCode(LOperand* context, LOperand* char_code) {
+ inputs_[0] = context;
+ inputs_[1] = char_code;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+ LOperand* char_code() { return inputs_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(StringCharFromCode, "string-char-from-code")
+ DECLARE_HYDROGEN_ACCESSOR(StringCharFromCode)
+};
+
+
+class LCheckValue V8_FINAL : public LTemplateInstruction<0, 1, 0> {
+ public:
+ explicit LCheckValue(LOperand* value) {
+ inputs_[0] = value;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(CheckValue, "check-value")
+ DECLARE_HYDROGEN_ACCESSOR(CheckValue)
+};
+
+
+class LCheckInstanceType V8_FINAL : public LTemplateInstruction<0, 1, 1> {
+ public:
+ LCheckInstanceType(LOperand* value, LOperand* temp) {
+ inputs_[0] = value;
+ temps_[0] = temp;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+ LOperand* temp() { return temps_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(CheckInstanceType, "check-instance-type")
+ DECLARE_HYDROGEN_ACCESSOR(CheckInstanceType)
+};
+
+
+class LCheckMaps V8_FINAL : public LTemplateInstruction<0, 1, 0> {
+ public:
+ explicit LCheckMaps(LOperand* value = NULL) {
+ inputs_[0] = value;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(CheckMaps, "check-maps")
+ DECLARE_HYDROGEN_ACCESSOR(CheckMaps)
+};
+
+
+class LCheckSmi V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LCheckSmi(LOperand* value) {
+ inputs_[0] = value;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(CheckSmi, "check-smi")
+};
+
+
+class LClampDToUint8 V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LClampDToUint8(LOperand* value) {
+ inputs_[0] = value;
+ }
+
+ LOperand* unclamped() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(ClampDToUint8, "clamp-d-to-uint8")
+};
+
+
+class LClampIToUint8 V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LClampIToUint8(LOperand* value) {
+ inputs_[0] = value;
+ }
+
+ LOperand* unclamped() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(ClampIToUint8, "clamp-i-to-uint8")
+};
+
+
+// Truncating conversion from a tagged value to an int32.
+class LClampTToUint8NoSSE2 V8_FINAL : public LTemplateInstruction<1, 1, 3> {
+ public:
+ LClampTToUint8NoSSE2(LOperand* unclamped,
+ LOperand* temp1,
+ LOperand* temp2,
+ LOperand* temp3) {
+ inputs_[0] = unclamped;
+ temps_[0] = temp1;
+ temps_[1] = temp2;
+ temps_[2] = temp3;
+ }
+
+ LOperand* unclamped() { return inputs_[0]; }
+ LOperand* scratch() { return temps_[0]; }
+ LOperand* scratch2() { return temps_[1]; }
+ LOperand* scratch3() { return temps_[2]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(ClampTToUint8NoSSE2,
+ "clamp-t-to-uint8-nosse2")
+ DECLARE_HYDROGEN_ACCESSOR(UnaryOperation)
+};
+
+
+class LCheckNonSmi V8_FINAL : public LTemplateInstruction<0, 1, 0> {
+ public:
+ explicit LCheckNonSmi(LOperand* value) {
+ inputs_[0] = value;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(CheckNonSmi, "check-non-smi")
+ DECLARE_HYDROGEN_ACCESSOR(CheckHeapObject)
+};
+
+
+class LDoubleBits V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LDoubleBits(LOperand* value) {
+ inputs_[0] = value;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(DoubleBits, "double-bits")
+ DECLARE_HYDROGEN_ACCESSOR(DoubleBits)
+};
+
+
+class LConstructDouble V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+ public:
+ LConstructDouble(LOperand* hi, LOperand* lo) {
+ inputs_[0] = hi;
+ inputs_[1] = lo;
+ }
+
+ LOperand* hi() { return inputs_[0]; }
+ LOperand* lo() { return inputs_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(ConstructDouble, "construct-double")
+};
+
+
+class LAllocate V8_FINAL : public LTemplateInstruction<1, 2, 1> {
+ public:
+ LAllocate(LOperand* context, LOperand* size, LOperand* temp) {
+ inputs_[0] = context;
+ inputs_[1] = size;
+ temps_[0] = temp;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+ LOperand* size() { return inputs_[1]; }
+ LOperand* temp() { return temps_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(Allocate, "allocate")
+ DECLARE_HYDROGEN_ACCESSOR(Allocate)
+};
+
+
+class LRegExpLiteral V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LRegExpLiteral(LOperand* context) {
+ inputs_[0] = context;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(RegExpLiteral, "regexp-literal")
+ DECLARE_HYDROGEN_ACCESSOR(RegExpLiteral)
+};
+
+
+class LFunctionLiteral V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LFunctionLiteral(LOperand* context) {
+ inputs_[0] = context;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(FunctionLiteral, "function-literal")
+ DECLARE_HYDROGEN_ACCESSOR(FunctionLiteral)
+};
+
+
+class LToFastProperties V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LToFastProperties(LOperand* value) {
+ inputs_[0] = value;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(ToFastProperties, "to-fast-properties")
+ DECLARE_HYDROGEN_ACCESSOR(ToFastProperties)
+};
+
+
+class LTypeof V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+ public:
+ LTypeof(LOperand* context, LOperand* value) {
+ inputs_[0] = context;
+ inputs_[1] = value;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+ LOperand* value() { return inputs_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(Typeof, "typeof")
+};
+
+
+class LTypeofIsAndBranch V8_FINAL : public LControlInstruction<1, 0> {
+ public:
+ explicit LTypeofIsAndBranch(LOperand* value) {
+ inputs_[0] = value;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(TypeofIsAndBranch, "typeof-is-and-branch")
+ DECLARE_HYDROGEN_ACCESSOR(TypeofIsAndBranch)
+
+ Handle<String> type_literal() { return hydrogen()->type_literal(); }
+
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+};
+
+
+class LOsrEntry V8_FINAL : public LTemplateInstruction<0, 0, 0> {
+ public:
+ virtual bool HasInterestingComment(LCodeGen* gen) const V8_OVERRIDE {
+ return false;
+ }
+ DECLARE_CONCRETE_INSTRUCTION(OsrEntry, "osr-entry")
+};
+
+
+class LStackCheck V8_FINAL : public LTemplateInstruction<0, 1, 0> {
+ public:
+ explicit LStackCheck(LOperand* context) {
+ inputs_[0] = context;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(StackCheck, "stack-check")
+ DECLARE_HYDROGEN_ACCESSOR(StackCheck)
+
+ Label* done_label() { return &done_label_; }
+
+ private:
+ Label done_label_;
+};
+
+
+class LForInPrepareMap V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+ public:
+ LForInPrepareMap(LOperand* context, LOperand* object) {
+ inputs_[0] = context;
+ inputs_[1] = object;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+ LOperand* object() { return inputs_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(ForInPrepareMap, "for-in-prepare-map")
+};
+
+
+class LForInCacheArray V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LForInCacheArray(LOperand* map) {
+ inputs_[0] = map;
+ }
+
+ LOperand* map() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(ForInCacheArray, "for-in-cache-array")
+
+ int idx() {
+ return HForInCacheArray::cast(this->hydrogen_value())->idx();
+ }
+};
+
+
+class LCheckMapValue V8_FINAL : public LTemplateInstruction<0, 2, 0> {
+ public:
+ LCheckMapValue(LOperand* value, LOperand* map) {
+ inputs_[0] = value;
+ inputs_[1] = map;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+ LOperand* map() { return inputs_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(CheckMapValue, "check-map-value")
+};
+
+
+class LLoadFieldByIndex V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+ public:
+ LLoadFieldByIndex(LOperand* object, LOperand* index) {
+ inputs_[0] = object;
+ inputs_[1] = index;
+ }
+
+ LOperand* object() { return inputs_[0]; }
+ LOperand* index() { return inputs_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(LoadFieldByIndex, "load-field-by-index")
+};
+
+
+class LStoreFrameContext: public LTemplateInstruction<0, 1, 0> {
+ public:
+ explicit LStoreFrameContext(LOperand* context) {
+ inputs_[0] = context;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(StoreFrameContext, "store-frame-context")
+};
+
+
+class LAllocateBlockContext: public LTemplateInstruction<1, 2, 0> {
+ public:
+ LAllocateBlockContext(LOperand* context, LOperand* function) {
+ inputs_[0] = context;
+ inputs_[1] = function;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+ LOperand* function() { return inputs_[1]; }
+
+ Handle<ScopeInfo> scope_info() { return hydrogen()->scope_info(); }
+
+ DECLARE_CONCRETE_INSTRUCTION(AllocateBlockContext, "allocate-block-context")
+ DECLARE_HYDROGEN_ACCESSOR(AllocateBlockContext)
+};
+
+
+class LChunkBuilder;
+class LPlatformChunk V8_FINAL : public LChunk {
+ public:
+ LPlatformChunk(CompilationInfo* info, HGraph* graph)
+ : LChunk(info, graph),
+ num_double_slots_(0) { }
+
+ int GetNextSpillIndex(RegisterKind kind);
+ LOperand* GetNextSpillSlot(RegisterKind kind);
+
+ int num_double_slots() const { return num_double_slots_; }
+
+ private:
+ int num_double_slots_;
+};
+
+
+class LChunkBuilder V8_FINAL : public LChunkBuilderBase {
+ public:
+ LChunkBuilder(CompilationInfo* info, HGraph* graph, LAllocator* allocator)
+ : LChunkBuilderBase(graph->zone()),
+ chunk_(NULL),
+ info_(info),
+ graph_(graph),
+ status_(UNUSED),
+ current_instruction_(NULL),
+ current_block_(NULL),
+ next_block_(NULL),
+ allocator_(allocator) { }
+
+ Isolate* isolate() const { return graph_->isolate(); }
+
+ // Build the sequence for the graph.
+ LPlatformChunk* Build();
+
+ // Declare methods that deal with the individual node types.
+#define DECLARE_DO(type) LInstruction* Do##type(H##type* node);
+ HYDROGEN_CONCRETE_INSTRUCTION_LIST(DECLARE_DO)
+#undef DECLARE_DO
+
+ LInstruction* DoMathFloor(HUnaryMathOperation* instr);
+ LInstruction* DoMathRound(HUnaryMathOperation* instr);
+ LInstruction* DoMathAbs(HUnaryMathOperation* instr);
+ LInstruction* DoMathLog(HUnaryMathOperation* instr);
+ LInstruction* DoMathExp(HUnaryMathOperation* instr);
+ LInstruction* DoMathSqrt(HUnaryMathOperation* instr);
+ LInstruction* DoMathPowHalf(HUnaryMathOperation* instr);
+ LInstruction* DoMathClz32(HUnaryMathOperation* instr);
+ LInstruction* DoDivByPowerOf2I(HDiv* instr);
+ LInstruction* DoDivByConstI(HDiv* instr);
+ LInstruction* DoDivI(HDiv* instr);
+ LInstruction* DoModByPowerOf2I(HMod* instr);
+ LInstruction* DoModByConstI(HMod* instr);
+ LInstruction* DoModI(HMod* instr);
+ LInstruction* DoFlooringDivByPowerOf2I(HMathFloorOfDiv* instr);
+ LInstruction* DoFlooringDivByConstI(HMathFloorOfDiv* instr);
+ LInstruction* DoFlooringDivI(HMathFloorOfDiv* instr);
+
+ private:
+ enum Status {
+ UNUSED,
+ BUILDING,
+ DONE,
+ ABORTED
+ };
+
+ LPlatformChunk* chunk() const { return chunk_; }
+ CompilationInfo* info() const { return info_; }
+ HGraph* graph() const { return graph_; }
+
+ bool is_unused() const { return status_ == UNUSED; }
+ bool is_building() const { return status_ == BUILDING; }
+ bool is_done() const { return status_ == DONE; }
+ bool is_aborted() const { return status_ == ABORTED; }
+
+ void Abort(BailoutReason reason);
+
+ // Methods for getting operands for Use / Define / Temp.
+ LUnallocated* ToUnallocated(Register reg);
+ LUnallocated* ToUnallocated(X87Register reg);
+
+ // Methods for setting up define-use relationships.
+ MUST_USE_RESULT LOperand* Use(HValue* value, LUnallocated* operand);
+ MUST_USE_RESULT LOperand* UseFixed(HValue* value, Register fixed_register);
+
+ // A value that is guaranteed to be allocated to a register.
+ // Operand created by UseRegister is guaranteed to be live until the end of
+ // instruction. This means that register allocator will not reuse it's
+ // register for any other operand inside instruction.
+ // Operand created by UseRegisterAtStart is guaranteed to be live only at
+ // instruction start. Register allocator is free to assign the same register
+ // to some other operand used inside instruction (i.e. temporary or
+ // output).
+ MUST_USE_RESULT LOperand* UseRegister(HValue* value);
+ MUST_USE_RESULT LOperand* UseRegisterAtStart(HValue* value);
+
+ // An input operand in a register that may be trashed.
+ MUST_USE_RESULT LOperand* UseTempRegister(HValue* value);
+
+ // An input operand in a register or stack slot.
+ MUST_USE_RESULT LOperand* Use(HValue* value);
+ MUST_USE_RESULT LOperand* UseAtStart(HValue* value);
+
+ // An input operand in a register, stack slot or a constant operand.
+ MUST_USE_RESULT LOperand* UseOrConstant(HValue* value);
+ MUST_USE_RESULT LOperand* UseOrConstantAtStart(HValue* value);
+
+ // An input operand in a fixed register or a constant operand.
+ MUST_USE_RESULT LOperand* UseFixedOrConstant(HValue* value,
+ Register fixed_register);
+
+ // An input operand in a register or a constant operand.
+ MUST_USE_RESULT LOperand* UseRegisterOrConstant(HValue* value);
+ MUST_USE_RESULT LOperand* UseRegisterOrConstantAtStart(HValue* value);
+
+ // An input operand in a constant operand.
+ MUST_USE_RESULT LOperand* UseConstant(HValue* value);
+
+ // An input operand in register, stack slot or a constant operand.
+ // Will not be moved to a register even if one is freely available.
+ virtual MUST_USE_RESULT LOperand* UseAny(HValue* value) V8_OVERRIDE;
+
+ // Temporary operand that must be in a register.
+ MUST_USE_RESULT LUnallocated* TempRegister();
+ MUST_USE_RESULT LOperand* FixedTemp(Register reg);
+
+ // Methods for setting up define-use relationships.
+ // Return the same instruction that they are passed.
+ LInstruction* Define(LTemplateResultInstruction<1>* instr,
+ LUnallocated* result);
+ LInstruction* DefineAsRegister(LTemplateResultInstruction<1>* instr);
+ LInstruction* DefineAsSpilled(LTemplateResultInstruction<1>* instr,
+ int index);
+ LInstruction* DefineSameAsFirst(LTemplateResultInstruction<1>* instr);
+ LInstruction* DefineFixed(LTemplateResultInstruction<1>* instr,
+ Register reg);
+ LInstruction* DefineX87TOS(LTemplateResultInstruction<1>* instr);
+ // Assigns an environment to an instruction. An instruction which can
+ // deoptimize must have an environment.
+ LInstruction* AssignEnvironment(LInstruction* instr);
+ // Assigns a pointer map to an instruction. An instruction which can
+ // trigger a GC or a lazy deoptimization must have a pointer map.
+ LInstruction* AssignPointerMap(LInstruction* instr);
+
+ enum CanDeoptimize { CAN_DEOPTIMIZE_EAGERLY, CANNOT_DEOPTIMIZE_EAGERLY };
+
+ LOperand* GetSeqStringSetCharOperand(HSeqStringSetChar* instr);
+
+ // Marks a call for the register allocator. Assigns a pointer map to
+ // support GC and lazy deoptimization. Assigns an environment to support
+ // eager deoptimization if CAN_DEOPTIMIZE_EAGERLY.
+ LInstruction* MarkAsCall(
+ LInstruction* instr,
+ HInstruction* hinstr,
+ CanDeoptimize can_deoptimize = CANNOT_DEOPTIMIZE_EAGERLY);
+
+ void VisitInstruction(HInstruction* current);
+ void AddInstruction(LInstruction* instr, HInstruction* current);
+
+ void DoBasicBlock(HBasicBlock* block, HBasicBlock* next_block);
+ LInstruction* DoShift(Token::Value op, HBitwiseBinaryOperation* instr);
+ LInstruction* DoArithmeticD(Token::Value op,
+ HArithmeticBinaryOperation* instr);
+ LInstruction* DoArithmeticT(Token::Value op,
+ HBinaryOperation* instr);
+
+ LOperand* GetStoreKeyedValueOperand(HStoreKeyed* instr);
+
+ LPlatformChunk* chunk_;
+ CompilationInfo* info_;
+ HGraph* const graph_;
+ Status status_;
+ HInstruction* current_instruction_;
+ HBasicBlock* current_block_;
+ HBasicBlock* next_block_;
+ LAllocator* allocator_;
+
+ DISALLOW_COPY_AND_ASSIGN(LChunkBuilder);
+};
+
+#undef DECLARE_HYDROGEN_ACCESSOR
+#undef DECLARE_CONCRETE_INSTRUCTION
+
+} } // namespace v8::internal
+
+#endif // V8_X87_LITHIUM_X87_H_
diff --git a/chromium/v8/src/x87/macro-assembler-x87.cc b/chromium/v8/src/x87/macro-assembler-x87.cc
new file mode 100644
index 00000000000..06bd774a552
--- /dev/null
+++ b/chromium/v8/src/x87/macro-assembler-x87.cc
@@ -0,0 +1,3301 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#if V8_TARGET_ARCH_X87
+
+#include "src/bootstrapper.h"
+#include "src/codegen.h"
+#include "src/cpu-profiler.h"
+#include "src/debug.h"
+#include "src/isolate-inl.h"
+#include "src/runtime.h"
+#include "src/serialize.h"
+
+namespace v8 {
+namespace internal {
+
+// -------------------------------------------------------------------------
+// MacroAssembler implementation.
+
+MacroAssembler::MacroAssembler(Isolate* arg_isolate, void* buffer, int size)
+ : Assembler(arg_isolate, buffer, size),
+ generating_stub_(false),
+ has_frame_(false) {
+ if (isolate() != NULL) {
+ // TODO(titzer): should we just use a null handle here instead?
+ code_object_ = Handle<Object>(isolate()->heap()->undefined_value(),
+ isolate());
+ }
+}
+
+
+void MacroAssembler::Load(Register dst, const Operand& src, Representation r) {
+ ASSERT(!r.IsDouble());
+ if (r.IsInteger8()) {
+ movsx_b(dst, src);
+ } else if (r.IsUInteger8()) {
+ movzx_b(dst, src);
+ } else if (r.IsInteger16()) {
+ movsx_w(dst, src);
+ } else if (r.IsUInteger16()) {
+ movzx_w(dst, src);
+ } else {
+ mov(dst, src);
+ }
+}
+
+
+void MacroAssembler::Store(Register src, const Operand& dst, Representation r) {
+ ASSERT(!r.IsDouble());
+ if (r.IsInteger8() || r.IsUInteger8()) {
+ mov_b(dst, src);
+ } else if (r.IsInteger16() || r.IsUInteger16()) {
+ mov_w(dst, src);
+ } else {
+ if (r.IsHeapObject()) {
+ AssertNotSmi(src);
+ } else if (r.IsSmi()) {
+ AssertSmi(src);
+ }
+ mov(dst, src);
+ }
+}
+
+
+void MacroAssembler::LoadRoot(Register destination, Heap::RootListIndex index) {
+ if (isolate()->heap()->RootCanBeTreatedAsConstant(index)) {
+ Handle<Object> value(&isolate()->heap()->roots_array_start()[index]);
+ mov(destination, value);
+ return;
+ }
+ ExternalReference roots_array_start =
+ ExternalReference::roots_array_start(isolate());
+ mov(destination, Immediate(index));
+ mov(destination, Operand::StaticArray(destination,
+ times_pointer_size,
+ roots_array_start));
+}
+
+
+void MacroAssembler::StoreRoot(Register source,
+ Register scratch,
+ Heap::RootListIndex index) {
+ ASSERT(Heap::RootCanBeWrittenAfterInitialization(index));
+ ExternalReference roots_array_start =
+ ExternalReference::roots_array_start(isolate());
+ mov(scratch, Immediate(index));
+ mov(Operand::StaticArray(scratch, times_pointer_size, roots_array_start),
+ source);
+}
+
+
+void MacroAssembler::CompareRoot(Register with,
+ Register scratch,
+ Heap::RootListIndex index) {
+ ExternalReference roots_array_start =
+ ExternalReference::roots_array_start(isolate());
+ mov(scratch, Immediate(index));
+ cmp(with, Operand::StaticArray(scratch,
+ times_pointer_size,
+ roots_array_start));
+}
+
+
+void MacroAssembler::CompareRoot(Register with, Heap::RootListIndex index) {
+ ASSERT(isolate()->heap()->RootCanBeTreatedAsConstant(index));
+ Handle<Object> value(&isolate()->heap()->roots_array_start()[index]);
+ cmp(with, value);
+}
+
+
+void MacroAssembler::CompareRoot(const Operand& with,
+ Heap::RootListIndex index) {
+ ASSERT(isolate()->heap()->RootCanBeTreatedAsConstant(index));
+ Handle<Object> value(&isolate()->heap()->roots_array_start()[index]);
+ cmp(with, value);
+}
+
+
+void MacroAssembler::InNewSpace(
+ Register object,
+ Register scratch,
+ Condition cc,
+ Label* condition_met,
+ Label::Distance condition_met_distance) {
+ ASSERT(cc == equal || cc == not_equal);
+ if (scratch.is(object)) {
+ and_(scratch, Immediate(~Page::kPageAlignmentMask));
+ } else {
+ mov(scratch, Immediate(~Page::kPageAlignmentMask));
+ and_(scratch, object);
+ }
+ // Check that we can use a test_b.
+ ASSERT(MemoryChunk::IN_FROM_SPACE < 8);
+ ASSERT(MemoryChunk::IN_TO_SPACE < 8);
+ int mask = (1 << MemoryChunk::IN_FROM_SPACE)
+ | (1 << MemoryChunk::IN_TO_SPACE);
+ // If non-zero, the page belongs to new-space.
+ test_b(Operand(scratch, MemoryChunk::kFlagsOffset),
+ static_cast<uint8_t>(mask));
+ j(cc, condition_met, condition_met_distance);
+}
+
+
+void MacroAssembler::RememberedSetHelper(
+ Register object, // Only used for debug checks.
+ Register addr,
+ Register scratch,
+ MacroAssembler::RememberedSetFinalAction and_then) {
+ Label done;
+ if (emit_debug_code()) {
+ Label ok;
+ JumpIfNotInNewSpace(object, scratch, &ok, Label::kNear);
+ int3();
+ bind(&ok);
+ }
+ // Load store buffer top.
+ ExternalReference store_buffer =
+ ExternalReference::store_buffer_top(isolate());
+ mov(scratch, Operand::StaticVariable(store_buffer));
+ // Store pointer to buffer.
+ mov(Operand(scratch, 0), addr);
+ // Increment buffer top.
+ add(scratch, Immediate(kPointerSize));
+ // Write back new top of buffer.
+ mov(Operand::StaticVariable(store_buffer), scratch);
+ // Call stub on end of buffer.
+ // Check for end of buffer.
+ test(scratch, Immediate(StoreBuffer::kStoreBufferOverflowBit));
+ if (and_then == kReturnAtEnd) {
+ Label buffer_overflowed;
+ j(not_equal, &buffer_overflowed, Label::kNear);
+ ret(0);
+ bind(&buffer_overflowed);
+ } else {
+ ASSERT(and_then == kFallThroughAtEnd);
+ j(equal, &done, Label::kNear);
+ }
+ StoreBufferOverflowStub store_buffer_overflow =
+ StoreBufferOverflowStub(isolate());
+ CallStub(&store_buffer_overflow);
+ if (and_then == kReturnAtEnd) {
+ ret(0);
+ } else {
+ ASSERT(and_then == kFallThroughAtEnd);
+ bind(&done);
+ }
+}
+
+
+void MacroAssembler::ClampUint8(Register reg) {
+ Label done;
+ test(reg, Immediate(0xFFFFFF00));
+ j(zero, &done, Label::kNear);
+ setcc(negative, reg); // 1 if negative, 0 if positive.
+ dec_b(reg); // 0 if negative, 255 if positive.
+ bind(&done);
+}
+
+
+void MacroAssembler::SlowTruncateToI(Register result_reg,
+ Register input_reg,
+ int offset) {
+ DoubleToIStub stub(isolate(), input_reg, result_reg, offset, true);
+ call(stub.GetCode(), RelocInfo::CODE_TARGET);
+}
+
+
+void MacroAssembler::TruncateX87TOSToI(Register result_reg) {
+ sub(esp, Immediate(kDoubleSize));
+ fst_d(MemOperand(esp, 0));
+ SlowTruncateToI(result_reg, esp, 0);
+ add(esp, Immediate(kDoubleSize));
+}
+
+
+void MacroAssembler::X87TOSToI(Register result_reg,
+ MinusZeroMode minus_zero_mode,
+ Label* conversion_failed,
+ Label::Distance dst) {
+ Label done;
+ sub(esp, Immediate(kPointerSize));
+ fld(0);
+ fist_s(MemOperand(esp, 0));
+ fild_s(MemOperand(esp, 0));
+ pop(result_reg);
+ FCmp();
+ j(not_equal, conversion_failed, dst);
+ j(parity_even, conversion_failed, dst);
+ if (minus_zero_mode == FAIL_ON_MINUS_ZERO) {
+ test(result_reg, Operand(result_reg));
+ j(not_zero, &done, Label::kNear);
+ // To check for minus zero, we load the value again as float, and check
+ // if that is still 0.
+ sub(esp, Immediate(kPointerSize));
+ fst_s(MemOperand(esp, 0));
+ pop(result_reg);
+ test(result_reg, Operand(result_reg));
+ j(not_zero, conversion_failed, dst);
+ }
+ bind(&done);
+}
+
+
+void MacroAssembler::TruncateHeapNumberToI(Register result_reg,
+ Register input_reg) {
+ Label done, slow_case;
+
+ SlowTruncateToI(result_reg, input_reg);
+ bind(&done);
+}
+
+
+void MacroAssembler::TaggedToI(Register result_reg,
+ Register input_reg,
+ MinusZeroMode minus_zero_mode,
+ Label* lost_precision) {
+ Label done;
+
+ cmp(FieldOperand(input_reg, HeapObject::kMapOffset),
+ isolate()->factory()->heap_number_map());
+ j(not_equal, lost_precision, Label::kNear);
+
+ // TODO(olivf) Converting a number on the fpu is actually quite slow. We
+ // should first try a fast conversion and then bailout to this slow case.
+ Label lost_precision_pop, zero_check;
+ Label* lost_precision_int = (minus_zero_mode == FAIL_ON_MINUS_ZERO)
+ ? &lost_precision_pop : lost_precision;
+ sub(esp, Immediate(kPointerSize));
+ fld_d(FieldOperand(input_reg, HeapNumber::kValueOffset));
+ if (minus_zero_mode == FAIL_ON_MINUS_ZERO) fld(0);
+ fist_s(MemOperand(esp, 0));
+ fild_s(MemOperand(esp, 0));
+ FCmp();
+ pop(result_reg);
+ j(not_equal, lost_precision_int, Label::kNear);
+ j(parity_even, lost_precision_int, Label::kNear); // NaN.
+ if (minus_zero_mode == FAIL_ON_MINUS_ZERO) {
+ test(result_reg, Operand(result_reg));
+ j(zero, &zero_check, Label::kNear);
+ fstp(0);
+ jmp(&done, Label::kNear);
+ bind(&zero_check);
+ // To check for minus zero, we load the value again as float, and check
+ // if that is still 0.
+ sub(esp, Immediate(kPointerSize));
+ fstp_s(Operand(esp, 0));
+ pop(result_reg);
+ test(result_reg, Operand(result_reg));
+ j(zero, &done, Label::kNear);
+ jmp(lost_precision, Label::kNear);
+
+ bind(&lost_precision_pop);
+ fstp(0);
+ jmp(lost_precision, Label::kNear);
+ }
+ bind(&done);
+}
+
+
+void MacroAssembler::LoadUint32NoSSE2(Register src) {
+ Label done;
+ push(src);
+ fild_s(Operand(esp, 0));
+ cmp(src, Immediate(0));
+ j(not_sign, &done, Label::kNear);
+ ExternalReference uint32_bias =
+ ExternalReference::address_of_uint32_bias();
+ fld_d(Operand::StaticVariable(uint32_bias));
+ faddp(1);
+ bind(&done);
+ add(esp, Immediate(kPointerSize));
+}
+
+
+void MacroAssembler::RecordWriteArray(
+ Register object,
+ Register value,
+ Register index,
+ RememberedSetAction remembered_set_action,
+ SmiCheck smi_check,
+ PointersToHereCheck pointers_to_here_check_for_value) {
+ // First, check if a write barrier is even needed. The tests below
+ // catch stores of Smis.
+ Label done;
+
+ // Skip barrier if writing a smi.
+ if (smi_check == INLINE_SMI_CHECK) {
+ ASSERT_EQ(0, kSmiTag);
+ test(value, Immediate(kSmiTagMask));
+ j(zero, &done);
+ }
+
+ // Array access: calculate the destination address in the same manner as
+ // KeyedStoreIC::GenerateGeneric. Multiply a smi by 2 to get an offset
+ // into an array of words.
+ Register dst = index;
+ lea(dst, Operand(object, index, times_half_pointer_size,
+ FixedArray::kHeaderSize - kHeapObjectTag));
+
+ RecordWrite(object, dst, value, remembered_set_action, OMIT_SMI_CHECK,
+ pointers_to_here_check_for_value);
+
+ bind(&done);
+
+ // Clobber clobbered input registers when running with the debug-code flag
+ // turned on to provoke errors.
+ if (emit_debug_code()) {
+ mov(value, Immediate(BitCast<int32_t>(kZapValue)));
+ mov(index, Immediate(BitCast<int32_t>(kZapValue)));
+ }
+}
+
+
+void MacroAssembler::RecordWriteField(
+ Register object,
+ int offset,
+ Register value,
+ Register dst,
+ RememberedSetAction remembered_set_action,
+ SmiCheck smi_check,
+ PointersToHereCheck pointers_to_here_check_for_value) {
+ // First, check if a write barrier is even needed. The tests below
+ // catch stores of Smis.
+ Label done;
+
+ // Skip barrier if writing a smi.
+ if (smi_check == INLINE_SMI_CHECK) {
+ JumpIfSmi(value, &done, Label::kNear);
+ }
+
+ // Although the object register is tagged, the offset is relative to the start
+ // of the object, so so offset must be a multiple of kPointerSize.
+ ASSERT(IsAligned(offset, kPointerSize));
+
+ lea(dst, FieldOperand(object, offset));
+ if (emit_debug_code()) {
+ Label ok;
+ test_b(dst, (1 << kPointerSizeLog2) - 1);
+ j(zero, &ok, Label::kNear);
+ int3();
+ bind(&ok);
+ }
+
+ RecordWrite(object, dst, value, remembered_set_action, OMIT_SMI_CHECK,
+ pointers_to_here_check_for_value);
+
+ bind(&done);
+
+ // Clobber clobbered input registers when running with the debug-code flag
+ // turned on to provoke errors.
+ if (emit_debug_code()) {
+ mov(value, Immediate(BitCast<int32_t>(kZapValue)));
+ mov(dst, Immediate(BitCast<int32_t>(kZapValue)));
+ }
+}
+
+
+void MacroAssembler::RecordWriteForMap(
+ Register object,
+ Handle<Map> map,
+ Register scratch1,
+ Register scratch2) {
+ Label done;
+
+ Register address = scratch1;
+ Register value = scratch2;
+ if (emit_debug_code()) {
+ Label ok;
+ lea(address, FieldOperand(object, HeapObject::kMapOffset));
+ test_b(address, (1 << kPointerSizeLog2) - 1);
+ j(zero, &ok, Label::kNear);
+ int3();
+ bind(&ok);
+ }
+
+ ASSERT(!object.is(value));
+ ASSERT(!object.is(address));
+ ASSERT(!value.is(address));
+ AssertNotSmi(object);
+
+ if (!FLAG_incremental_marking) {
+ return;
+ }
+
+ // Compute the address.
+ lea(address, FieldOperand(object, HeapObject::kMapOffset));
+
+ // Count number of write barriers in generated code.
+ isolate()->counters()->write_barriers_static()->Increment();
+ IncrementCounter(isolate()->counters()->write_barriers_dynamic(), 1);
+
+ // A single check of the map's pages interesting flag suffices, since it is
+ // only set during incremental collection, and then it's also guaranteed that
+ // the from object's page's interesting flag is also set. This optimization
+ // relies on the fact that maps can never be in new space.
+ ASSERT(!isolate()->heap()->InNewSpace(*map));
+ CheckPageFlagForMap(map,
+ MemoryChunk::kPointersToHereAreInterestingMask,
+ zero,
+ &done,
+ Label::kNear);
+
+ RecordWriteStub stub(isolate(), object, value, address, OMIT_REMEMBERED_SET);
+ CallStub(&stub);
+
+ bind(&done);
+
+ // Clobber clobbered input registers when running with the debug-code flag
+ // turned on to provoke errors.
+ if (emit_debug_code()) {
+ mov(value, Immediate(BitCast<int32_t>(kZapValue)));
+ mov(scratch1, Immediate(BitCast<int32_t>(kZapValue)));
+ mov(scratch2, Immediate(BitCast<int32_t>(kZapValue)));
+ }
+}
+
+
+void MacroAssembler::RecordWrite(
+ Register object,
+ Register address,
+ Register value,
+ RememberedSetAction remembered_set_action,
+ SmiCheck smi_check,
+ PointersToHereCheck pointers_to_here_check_for_value) {
+ ASSERT(!object.is(value));
+ ASSERT(!object.is(address));
+ ASSERT(!value.is(address));
+ AssertNotSmi(object);
+
+ if (remembered_set_action == OMIT_REMEMBERED_SET &&
+ !FLAG_incremental_marking) {
+ return;
+ }
+
+ if (emit_debug_code()) {
+ Label ok;
+ cmp(value, Operand(address, 0));
+ j(equal, &ok, Label::kNear);
+ int3();
+ bind(&ok);
+ }
+
+ // Count number of write barriers in generated code.
+ isolate()->counters()->write_barriers_static()->Increment();
+ IncrementCounter(isolate()->counters()->write_barriers_dynamic(), 1);
+
+ // First, check if a write barrier is even needed. The tests below
+ // catch stores of Smis and stores into young gen.
+ Label done;
+
+ if (smi_check == INLINE_SMI_CHECK) {
+ // Skip barrier if writing a smi.
+ JumpIfSmi(value, &done, Label::kNear);
+ }
+
+ if (pointers_to_here_check_for_value != kPointersToHereAreAlwaysInteresting) {
+ CheckPageFlag(value,
+ value, // Used as scratch.
+ MemoryChunk::kPointersToHereAreInterestingMask,
+ zero,
+ &done,
+ Label::kNear);
+ }
+ CheckPageFlag(object,
+ value, // Used as scratch.
+ MemoryChunk::kPointersFromHereAreInterestingMask,
+ zero,
+ &done,
+ Label::kNear);
+
+ RecordWriteStub stub(isolate(), object, value, address,
+ remembered_set_action);
+ CallStub(&stub);
+
+ bind(&done);
+
+ // Clobber clobbered registers when running with the debug-code flag
+ // turned on to provoke errors.
+ if (emit_debug_code()) {
+ mov(address, Immediate(BitCast<int32_t>(kZapValue)));
+ mov(value, Immediate(BitCast<int32_t>(kZapValue)));
+ }
+}
+
+
+void MacroAssembler::DebugBreak() {
+ Move(eax, Immediate(0));
+ mov(ebx, Immediate(ExternalReference(Runtime::kDebugBreak, isolate())));
+ CEntryStub ces(isolate(), 1);
+ call(ces.GetCode(), RelocInfo::DEBUG_BREAK);
+}
+
+
+bool MacroAssembler::IsUnsafeImmediate(const Immediate& x) {
+ static const int kMaxImmediateBits = 17;
+ if (!RelocInfo::IsNone(x.rmode_)) return false;
+ return !is_intn(x.x_, kMaxImmediateBits);
+}
+
+
+void MacroAssembler::SafeMove(Register dst, const Immediate& x) {
+ if (IsUnsafeImmediate(x) && jit_cookie() != 0) {
+ Move(dst, Immediate(x.x_ ^ jit_cookie()));
+ xor_(dst, jit_cookie());
+ } else {
+ Move(dst, x);
+ }
+}
+
+
+void MacroAssembler::SafePush(const Immediate& x) {
+ if (IsUnsafeImmediate(x) && jit_cookie() != 0) {
+ push(Immediate(x.x_ ^ jit_cookie()));
+ xor_(Operand(esp, 0), Immediate(jit_cookie()));
+ } else {
+ push(x);
+ }
+}
+
+
+void MacroAssembler::CmpObjectType(Register heap_object,
+ InstanceType type,
+ Register map) {
+ mov(map, FieldOperand(heap_object, HeapObject::kMapOffset));
+ CmpInstanceType(map, type);
+}
+
+
+void MacroAssembler::CmpInstanceType(Register map, InstanceType type) {
+ cmpb(FieldOperand(map, Map::kInstanceTypeOffset),
+ static_cast<int8_t>(type));
+}
+
+
+void MacroAssembler::CheckFastElements(Register map,
+ Label* fail,
+ Label::Distance distance) {
+ STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
+ STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
+ STATIC_ASSERT(FAST_ELEMENTS == 2);
+ STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
+ cmpb(FieldOperand(map, Map::kBitField2Offset),
+ Map::kMaximumBitField2FastHoleyElementValue);
+ j(above, fail, distance);
+}
+
+
+void MacroAssembler::CheckFastObjectElements(Register map,
+ Label* fail,
+ Label::Distance distance) {
+ STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
+ STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
+ STATIC_ASSERT(FAST_ELEMENTS == 2);
+ STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
+ cmpb(FieldOperand(map, Map::kBitField2Offset),
+ Map::kMaximumBitField2FastHoleySmiElementValue);
+ j(below_equal, fail, distance);
+ cmpb(FieldOperand(map, Map::kBitField2Offset),
+ Map::kMaximumBitField2FastHoleyElementValue);
+ j(above, fail, distance);
+}
+
+
+void MacroAssembler::CheckFastSmiElements(Register map,
+ Label* fail,
+ Label::Distance distance) {
+ STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
+ STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
+ cmpb(FieldOperand(map, Map::kBitField2Offset),
+ Map::kMaximumBitField2FastHoleySmiElementValue);
+ j(above, fail, distance);
+}
+
+
+void MacroAssembler::StoreNumberToDoubleElements(
+ Register maybe_number,
+ Register elements,
+ Register key,
+ Register scratch,
+ Label* fail,
+ int elements_offset) {
+ Label smi_value, done, maybe_nan, not_nan, is_nan, have_double_value;
+ JumpIfSmi(maybe_number, &smi_value, Label::kNear);
+
+ CheckMap(maybe_number,
+ isolate()->factory()->heap_number_map(),
+ fail,
+ DONT_DO_SMI_CHECK);
+
+ // Double value, canonicalize NaN.
+ uint32_t offset = HeapNumber::kValueOffset + sizeof(kHoleNanLower32);
+ cmp(FieldOperand(maybe_number, offset),
+ Immediate(kNaNOrInfinityLowerBoundUpper32));
+ j(greater_equal, &maybe_nan, Label::kNear);
+
+ bind(&not_nan);
+ ExternalReference canonical_nan_reference =
+ ExternalReference::address_of_canonical_non_hole_nan();
+ fld_d(FieldOperand(maybe_number, HeapNumber::kValueOffset));
+ bind(&have_double_value);
+ fstp_d(FieldOperand(elements, key, times_4,
+ FixedDoubleArray::kHeaderSize - elements_offset));
+ jmp(&done);
+
+ bind(&maybe_nan);
+ // Could be NaN or Infinity. If fraction is not zero, it's NaN, otherwise
+ // it's an Infinity, and the non-NaN code path applies.
+ j(greater, &is_nan, Label::kNear);
+ cmp(FieldOperand(maybe_number, HeapNumber::kValueOffset), Immediate(0));
+ j(zero, &not_nan);
+ bind(&is_nan);
+ fld_d(Operand::StaticVariable(canonical_nan_reference));
+ jmp(&have_double_value, Label::kNear);
+
+ bind(&smi_value);
+ // Value is a smi. Convert to a double and store.
+ // Preserve original value.
+ mov(scratch, maybe_number);
+ SmiUntag(scratch);
+ push(scratch);
+ fild_s(Operand(esp, 0));
+ pop(scratch);
+ fstp_d(FieldOperand(elements, key, times_4,
+ FixedDoubleArray::kHeaderSize - elements_offset));
+ bind(&done);
+}
+
+
+void MacroAssembler::CompareMap(Register obj, Handle<Map> map) {
+ cmp(FieldOperand(obj, HeapObject::kMapOffset), map);
+}
+
+
+void MacroAssembler::CheckMap(Register obj,
+ Handle<Map> map,
+ Label* fail,
+ SmiCheckType smi_check_type) {
+ if (smi_check_type == DO_SMI_CHECK) {
+ JumpIfSmi(obj, fail);
+ }
+
+ CompareMap(obj, map);
+ j(not_equal, fail);
+}
+
+
+void MacroAssembler::DispatchMap(Register obj,
+ Register unused,
+ Handle<Map> map,
+ Handle<Code> success,
+ SmiCheckType smi_check_type) {
+ Label fail;
+ if (smi_check_type == DO_SMI_CHECK) {
+ JumpIfSmi(obj, &fail);
+ }
+ cmp(FieldOperand(obj, HeapObject::kMapOffset), Immediate(map));
+ j(equal, success);
+
+ bind(&fail);
+}
+
+
+Condition MacroAssembler::IsObjectStringType(Register heap_object,
+ Register map,
+ Register instance_type) {
+ mov(map, FieldOperand(heap_object, HeapObject::kMapOffset));
+ movzx_b(instance_type, FieldOperand(map, Map::kInstanceTypeOffset));
+ STATIC_ASSERT(kNotStringTag != 0);
+ test(instance_type, Immediate(kIsNotStringMask));
+ return zero;
+}
+
+
+Condition MacroAssembler::IsObjectNameType(Register heap_object,
+ Register map,
+ Register instance_type) {
+ mov(map, FieldOperand(heap_object, HeapObject::kMapOffset));
+ movzx_b(instance_type, FieldOperand(map, Map::kInstanceTypeOffset));
+ cmpb(instance_type, static_cast<uint8_t>(LAST_NAME_TYPE));
+ return below_equal;
+}
+
+
+void MacroAssembler::IsObjectJSObjectType(Register heap_object,
+ Register map,
+ Register scratch,
+ Label* fail) {
+ mov(map, FieldOperand(heap_object, HeapObject::kMapOffset));
+ IsInstanceJSObjectType(map, scratch, fail);
+}
+
+
+void MacroAssembler::IsInstanceJSObjectType(Register map,
+ Register scratch,
+ Label* fail) {
+ movzx_b(scratch, FieldOperand(map, Map::kInstanceTypeOffset));
+ sub(scratch, Immediate(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
+ cmp(scratch,
+ LAST_NONCALLABLE_SPEC_OBJECT_TYPE - FIRST_NONCALLABLE_SPEC_OBJECT_TYPE);
+ j(above, fail);
+}
+
+
+void MacroAssembler::FCmp() {
+ fucompp();
+ push(eax);
+ fnstsw_ax();
+ sahf();
+ pop(eax);
+}
+
+
+void MacroAssembler::AssertNumber(Register object) {
+ if (emit_debug_code()) {
+ Label ok;
+ JumpIfSmi(object, &ok);
+ cmp(FieldOperand(object, HeapObject::kMapOffset),
+ isolate()->factory()->heap_number_map());
+ Check(equal, kOperandNotANumber);
+ bind(&ok);
+ }
+}
+
+
+void MacroAssembler::AssertSmi(Register object) {
+ if (emit_debug_code()) {
+ test(object, Immediate(kSmiTagMask));
+ Check(equal, kOperandIsNotASmi);
+ }
+}
+
+
+void MacroAssembler::AssertString(Register object) {
+ if (emit_debug_code()) {
+ test(object, Immediate(kSmiTagMask));
+ Check(not_equal, kOperandIsASmiAndNotAString);
+ push(object);
+ mov(object, FieldOperand(object, HeapObject::kMapOffset));
+ CmpInstanceType(object, FIRST_NONSTRING_TYPE);
+ pop(object);
+ Check(below, kOperandIsNotAString);
+ }
+}
+
+
+void MacroAssembler::AssertName(Register object) {
+ if (emit_debug_code()) {
+ test(object, Immediate(kSmiTagMask));
+ Check(not_equal, kOperandIsASmiAndNotAName);
+ push(object);
+ mov(object, FieldOperand(object, HeapObject::kMapOffset));
+ CmpInstanceType(object, LAST_NAME_TYPE);
+ pop(object);
+ Check(below_equal, kOperandIsNotAName);
+ }
+}
+
+
+void MacroAssembler::AssertUndefinedOrAllocationSite(Register object) {
+ if (emit_debug_code()) {
+ Label done_checking;
+ AssertNotSmi(object);
+ cmp(object, isolate()->factory()->undefined_value());
+ j(equal, &done_checking);
+ cmp(FieldOperand(object, 0),
+ Immediate(isolate()->factory()->allocation_site_map()));
+ Assert(equal, kExpectedUndefinedOrCell);
+ bind(&done_checking);
+ }
+}
+
+
+void MacroAssembler::AssertNotSmi(Register object) {
+ if (emit_debug_code()) {
+ test(object, Immediate(kSmiTagMask));
+ Check(not_equal, kOperandIsASmi);
+ }
+}
+
+
+void MacroAssembler::StubPrologue() {
+ push(ebp); // Caller's frame pointer.
+ mov(ebp, esp);
+ push(esi); // Callee's context.
+ push(Immediate(Smi::FromInt(StackFrame::STUB)));
+}
+
+
+void MacroAssembler::Prologue(bool code_pre_aging) {
+ PredictableCodeSizeScope predictible_code_size_scope(this,
+ kNoCodeAgeSequenceLength);
+ if (code_pre_aging) {
+ // Pre-age the code.
+ call(isolate()->builtins()->MarkCodeAsExecutedOnce(),
+ RelocInfo::CODE_AGE_SEQUENCE);
+ Nop(kNoCodeAgeSequenceLength - Assembler::kCallInstructionLength);
+ } else {
+ push(ebp); // Caller's frame pointer.
+ mov(ebp, esp);
+ push(esi); // Callee's context.
+ push(edi); // Callee's JS function.
+ }
+}
+
+
+void MacroAssembler::EnterFrame(StackFrame::Type type) {
+ push(ebp);
+ mov(ebp, esp);
+ push(esi);
+ push(Immediate(Smi::FromInt(type)));
+ push(Immediate(CodeObject()));
+ if (emit_debug_code()) {
+ cmp(Operand(esp, 0), Immediate(isolate()->factory()->undefined_value()));
+ Check(not_equal, kCodeObjectNotProperlyPatched);
+ }
+}
+
+
+void MacroAssembler::LeaveFrame(StackFrame::Type type) {
+ if (emit_debug_code()) {
+ cmp(Operand(ebp, StandardFrameConstants::kMarkerOffset),
+ Immediate(Smi::FromInt(type)));
+ Check(equal, kStackFrameTypesMustMatch);
+ }
+ leave();
+}
+
+
+void MacroAssembler::EnterExitFramePrologue() {
+ // Set up the frame structure on the stack.
+ ASSERT(ExitFrameConstants::kCallerSPDisplacement == +2 * kPointerSize);
+ ASSERT(ExitFrameConstants::kCallerPCOffset == +1 * kPointerSize);
+ ASSERT(ExitFrameConstants::kCallerFPOffset == 0 * kPointerSize);
+ push(ebp);
+ mov(ebp, esp);
+
+ // Reserve room for entry stack pointer and push the code object.
+ ASSERT(ExitFrameConstants::kSPOffset == -1 * kPointerSize);
+ push(Immediate(0)); // Saved entry sp, patched before call.
+ push(Immediate(CodeObject())); // Accessed from ExitFrame::code_slot.
+
+ // Save the frame pointer and the context in top.
+ ExternalReference c_entry_fp_address(Isolate::kCEntryFPAddress, isolate());
+ ExternalReference context_address(Isolate::kContextAddress, isolate());
+ mov(Operand::StaticVariable(c_entry_fp_address), ebp);
+ mov(Operand::StaticVariable(context_address), esi);
+}
+
+
+void MacroAssembler::EnterExitFrameEpilogue(int argc) {
+ sub(esp, Immediate(argc * kPointerSize));
+
+ // Get the required frame alignment for the OS.
+ const int kFrameAlignment = OS::ActivationFrameAlignment();
+ if (kFrameAlignment > 0) {
+ ASSERT(IsPowerOf2(kFrameAlignment));
+ and_(esp, -kFrameAlignment);
+ }
+
+ // Patch the saved entry sp.
+ mov(Operand(ebp, ExitFrameConstants::kSPOffset), esp);
+}
+
+
+void MacroAssembler::EnterExitFrame() {
+ EnterExitFramePrologue();
+
+ // Set up argc and argv in callee-saved registers.
+ int offset = StandardFrameConstants::kCallerSPOffset - kPointerSize;
+ mov(edi, eax);
+ lea(esi, Operand(ebp, eax, times_4, offset));
+
+ // Reserve space for argc, argv and isolate.
+ EnterExitFrameEpilogue(3);
+}
+
+
+void MacroAssembler::EnterApiExitFrame(int argc) {
+ EnterExitFramePrologue();
+ EnterExitFrameEpilogue(argc);
+}
+
+
+void MacroAssembler::LeaveExitFrame() {
+ // Get the return address from the stack and restore the frame pointer.
+ mov(ecx, Operand(ebp, 1 * kPointerSize));
+ mov(ebp, Operand(ebp, 0 * kPointerSize));
+
+ // Pop the arguments and the receiver from the caller stack.
+ lea(esp, Operand(esi, 1 * kPointerSize));
+
+ // Push the return address to get ready to return.
+ push(ecx);
+
+ LeaveExitFrameEpilogue(true);
+}
+
+
+void MacroAssembler::LeaveExitFrameEpilogue(bool restore_context) {
+ // Restore current context from top and clear it in debug mode.
+ ExternalReference context_address(Isolate::kContextAddress, isolate());
+ if (restore_context) {
+ mov(esi, Operand::StaticVariable(context_address));
+ }
+#ifdef DEBUG
+ mov(Operand::StaticVariable(context_address), Immediate(0));
+#endif
+
+ // Clear the top frame.
+ ExternalReference c_entry_fp_address(Isolate::kCEntryFPAddress,
+ isolate());
+ mov(Operand::StaticVariable(c_entry_fp_address), Immediate(0));
+}
+
+
+void MacroAssembler::LeaveApiExitFrame(bool restore_context) {
+ mov(esp, ebp);
+ pop(ebp);
+
+ LeaveExitFrameEpilogue(restore_context);
+}
+
+
+void MacroAssembler::PushTryHandler(StackHandler::Kind kind,
+ int handler_index) {
+ // Adjust this code if not the case.
+ STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize);
+ STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
+ STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize);
+ STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize);
+ STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize);
+ STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize);
+
+ // We will build up the handler from the bottom by pushing on the stack.
+ // First push the frame pointer and context.
+ if (kind == StackHandler::JS_ENTRY) {
+ // The frame pointer does not point to a JS frame so we save NULL for
+ // ebp. We expect the code throwing an exception to check ebp before
+ // dereferencing it to restore the context.
+ push(Immediate(0)); // NULL frame pointer.
+ push(Immediate(Smi::FromInt(0))); // No context.
+ } else {
+ push(ebp);
+ push(esi);
+ }
+ // Push the state and the code object.
+ unsigned state =
+ StackHandler::IndexField::encode(handler_index) |
+ StackHandler::KindField::encode(kind);
+ push(Immediate(state));
+ Push(CodeObject());
+
+ // Link the current handler as the next handler.
+ ExternalReference handler_address(Isolate::kHandlerAddress, isolate());
+ push(Operand::StaticVariable(handler_address));
+ // Set this new handler as the current one.
+ mov(Operand::StaticVariable(handler_address), esp);
+}
+
+
+void MacroAssembler::PopTryHandler() {
+ STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
+ ExternalReference handler_address(Isolate::kHandlerAddress, isolate());
+ pop(Operand::StaticVariable(handler_address));
+ add(esp, Immediate(StackHandlerConstants::kSize - kPointerSize));
+}
+
+
+void MacroAssembler::JumpToHandlerEntry() {
+ // Compute the handler entry address and jump to it. The handler table is
+ // a fixed array of (smi-tagged) code offsets.
+ // eax = exception, edi = code object, edx = state.
+ mov(ebx, FieldOperand(edi, Code::kHandlerTableOffset));
+ shr(edx, StackHandler::kKindWidth);
+ mov(edx, FieldOperand(ebx, edx, times_4, FixedArray::kHeaderSize));
+ SmiUntag(edx);
+ lea(edi, FieldOperand(edi, edx, times_1, Code::kHeaderSize));
+ jmp(edi);
+}
+
+
+void MacroAssembler::Throw(Register value) {
+ // Adjust this code if not the case.
+ STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize);
+ STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
+ STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize);
+ STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize);
+ STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize);
+ STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize);
+
+ // The exception is expected in eax.
+ if (!value.is(eax)) {
+ mov(eax, value);
+ }
+ // Drop the stack pointer to the top of the top handler.
+ ExternalReference handler_address(Isolate::kHandlerAddress, isolate());
+ mov(esp, Operand::StaticVariable(handler_address));
+ // Restore the next handler.
+ pop(Operand::StaticVariable(handler_address));
+
+ // Remove the code object and state, compute the handler address in edi.
+ pop(edi); // Code object.
+ pop(edx); // Index and state.
+
+ // Restore the context and frame pointer.
+ pop(esi); // Context.
+ pop(ebp); // Frame pointer.
+
+ // If the handler is a JS frame, restore the context to the frame.
+ // (kind == ENTRY) == (ebp == 0) == (esi == 0), so we could test either
+ // ebp or esi.
+ Label skip;
+ test(esi, esi);
+ j(zero, &skip, Label::kNear);
+ mov(Operand(ebp, StandardFrameConstants::kContextOffset), esi);
+ bind(&skip);
+
+ JumpToHandlerEntry();
+}
+
+
+void MacroAssembler::ThrowUncatchable(Register value) {
+ // Adjust this code if not the case.
+ STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize);
+ STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
+ STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize);
+ STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize);
+ STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize);
+ STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize);
+
+ // The exception is expected in eax.
+ if (!value.is(eax)) {
+ mov(eax, value);
+ }
+ // Drop the stack pointer to the top of the top stack handler.
+ ExternalReference handler_address(Isolate::kHandlerAddress, isolate());
+ mov(esp, Operand::StaticVariable(handler_address));
+
+ // Unwind the handlers until the top ENTRY handler is found.
+ Label fetch_next, check_kind;
+ jmp(&check_kind, Label::kNear);
+ bind(&fetch_next);
+ mov(esp, Operand(esp, StackHandlerConstants::kNextOffset));
+
+ bind(&check_kind);
+ STATIC_ASSERT(StackHandler::JS_ENTRY == 0);
+ test(Operand(esp, StackHandlerConstants::kStateOffset),
+ Immediate(StackHandler::KindField::kMask));
+ j(not_zero, &fetch_next);
+
+ // Set the top handler address to next handler past the top ENTRY handler.
+ pop(Operand::StaticVariable(handler_address));
+
+ // Remove the code object and state, compute the handler address in edi.
+ pop(edi); // Code object.
+ pop(edx); // Index and state.
+
+ // Clear the context pointer and frame pointer (0 was saved in the handler).
+ pop(esi);
+ pop(ebp);
+
+ JumpToHandlerEntry();
+}
+
+
+void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
+ Register scratch1,
+ Register scratch2,
+ Label* miss) {
+ Label same_contexts;
+
+ ASSERT(!holder_reg.is(scratch1));
+ ASSERT(!holder_reg.is(scratch2));
+ ASSERT(!scratch1.is(scratch2));
+
+ // Load current lexical context from the stack frame.
+ mov(scratch1, Operand(ebp, StandardFrameConstants::kContextOffset));
+
+ // When generating debug code, make sure the lexical context is set.
+ if (emit_debug_code()) {
+ cmp(scratch1, Immediate(0));
+ Check(not_equal, kWeShouldNotHaveAnEmptyLexicalContext);
+ }
+ // Load the native context of the current context.
+ int offset =
+ Context::kHeaderSize + Context::GLOBAL_OBJECT_INDEX * kPointerSize;
+ mov(scratch1, FieldOperand(scratch1, offset));
+ mov(scratch1, FieldOperand(scratch1, GlobalObject::kNativeContextOffset));
+
+ // Check the context is a native context.
+ if (emit_debug_code()) {
+ // Read the first word and compare to native_context_map.
+ cmp(FieldOperand(scratch1, HeapObject::kMapOffset),
+ isolate()->factory()->native_context_map());
+ Check(equal, kJSGlobalObjectNativeContextShouldBeANativeContext);
+ }
+
+ // Check if both contexts are the same.
+ cmp(scratch1, FieldOperand(holder_reg, JSGlobalProxy::kNativeContextOffset));
+ j(equal, &same_contexts);
+
+ // Compare security tokens, save holder_reg on the stack so we can use it
+ // as a temporary register.
+ //
+ // Check that the security token in the calling global object is
+ // compatible with the security token in the receiving global
+ // object.
+ mov(scratch2,
+ FieldOperand(holder_reg, JSGlobalProxy::kNativeContextOffset));
+
+ // Check the context is a native context.
+ if (emit_debug_code()) {
+ cmp(scratch2, isolate()->factory()->null_value());
+ Check(not_equal, kJSGlobalProxyContextShouldNotBeNull);
+
+ // Read the first word and compare to native_context_map(),
+ cmp(FieldOperand(scratch2, HeapObject::kMapOffset),
+ isolate()->factory()->native_context_map());
+ Check(equal, kJSGlobalObjectNativeContextShouldBeANativeContext);
+ }
+
+ int token_offset = Context::kHeaderSize +
+ Context::SECURITY_TOKEN_INDEX * kPointerSize;
+ mov(scratch1, FieldOperand(scratch1, token_offset));
+ cmp(scratch1, FieldOperand(scratch2, token_offset));
+ j(not_equal, miss);
+
+ bind(&same_contexts);
+}
+
+
+// Compute the hash code from the untagged key. This must be kept in sync with
+// ComputeIntegerHash in utils.h and KeyedLoadGenericElementStub in
+// code-stub-hydrogen.cc
+//
+// Note: r0 will contain hash code
+void MacroAssembler::GetNumberHash(Register r0, Register scratch) {
+ // Xor original key with a seed.
+ if (serializer_enabled()) {
+ ExternalReference roots_array_start =
+ ExternalReference::roots_array_start(isolate());
+ mov(scratch, Immediate(Heap::kHashSeedRootIndex));
+ mov(scratch,
+ Operand::StaticArray(scratch, times_pointer_size, roots_array_start));
+ SmiUntag(scratch);
+ xor_(r0, scratch);
+ } else {
+ int32_t seed = isolate()->heap()->HashSeed();
+ xor_(r0, Immediate(seed));
+ }
+
+ // hash = ~hash + (hash << 15);
+ mov(scratch, r0);
+ not_(r0);
+ shl(scratch, 15);
+ add(r0, scratch);
+ // hash = hash ^ (hash >> 12);
+ mov(scratch, r0);
+ shr(scratch, 12);
+ xor_(r0, scratch);
+ // hash = hash + (hash << 2);
+ lea(r0, Operand(r0, r0, times_4, 0));
+ // hash = hash ^ (hash >> 4);
+ mov(scratch, r0);
+ shr(scratch, 4);
+ xor_(r0, scratch);
+ // hash = hash * 2057;
+ imul(r0, r0, 2057);
+ // hash = hash ^ (hash >> 16);
+ mov(scratch, r0);
+ shr(scratch, 16);
+ xor_(r0, scratch);
+}
+
+
+
+void MacroAssembler::LoadFromNumberDictionary(Label* miss,
+ Register elements,
+ Register key,
+ Register r0,
+ Register r1,
+ Register r2,
+ Register result) {
+ // Register use:
+ //
+ // elements - holds the slow-case elements of the receiver and is unchanged.
+ //
+ // key - holds the smi key on entry and is unchanged.
+ //
+ // Scratch registers:
+ //
+ // r0 - holds the untagged key on entry and holds the hash once computed.
+ //
+ // r1 - used to hold the capacity mask of the dictionary
+ //
+ // r2 - used for the index into the dictionary.
+ //
+ // result - holds the result on exit if the load succeeds and we fall through.
+
+ Label done;
+
+ GetNumberHash(r0, r1);
+
+ // Compute capacity mask.
+ mov(r1, FieldOperand(elements, SeededNumberDictionary::kCapacityOffset));
+ shr(r1, kSmiTagSize); // convert smi to int
+ dec(r1);
+
+ // Generate an unrolled loop that performs a few probes before giving up.
+ for (int i = 0; i < kNumberDictionaryProbes; i++) {
+ // Use r2 for index calculations and keep the hash intact in r0.
+ mov(r2, r0);
+ // Compute the masked index: (hash + i + i * i) & mask.
+ if (i > 0) {
+ add(r2, Immediate(SeededNumberDictionary::GetProbeOffset(i)));
+ }
+ and_(r2, r1);
+
+ // Scale the index by multiplying by the entry size.
+ ASSERT(SeededNumberDictionary::kEntrySize == 3);
+ lea(r2, Operand(r2, r2, times_2, 0)); // r2 = r2 * 3
+
+ // Check if the key matches.
+ cmp(key, FieldOperand(elements,
+ r2,
+ times_pointer_size,
+ SeededNumberDictionary::kElementsStartOffset));
+ if (i != (kNumberDictionaryProbes - 1)) {
+ j(equal, &done);
+ } else {
+ j(not_equal, miss);
+ }
+ }
+
+ bind(&done);
+ // Check that the value is a normal propety.
+ const int kDetailsOffset =
+ SeededNumberDictionary::kElementsStartOffset + 2 * kPointerSize;
+ ASSERT_EQ(NORMAL, 0);
+ test(FieldOperand(elements, r2, times_pointer_size, kDetailsOffset),
+ Immediate(PropertyDetails::TypeField::kMask << kSmiTagSize));
+ j(not_zero, miss);
+
+ // Get the value at the masked, scaled index.
+ const int kValueOffset =
+ SeededNumberDictionary::kElementsStartOffset + kPointerSize;
+ mov(result, FieldOperand(elements, r2, times_pointer_size, kValueOffset));
+}
+
+
+void MacroAssembler::LoadAllocationTopHelper(Register result,
+ Register scratch,
+ AllocationFlags flags) {
+ ExternalReference allocation_top =
+ AllocationUtils::GetAllocationTopReference(isolate(), flags);
+
+ // Just return if allocation top is already known.
+ if ((flags & RESULT_CONTAINS_TOP) != 0) {
+ // No use of scratch if allocation top is provided.
+ ASSERT(scratch.is(no_reg));
+#ifdef DEBUG
+ // Assert that result actually contains top on entry.
+ cmp(result, Operand::StaticVariable(allocation_top));
+ Check(equal, kUnexpectedAllocationTop);
+#endif
+ return;
+ }
+
+ // Move address of new object to result. Use scratch register if available.
+ if (scratch.is(no_reg)) {
+ mov(result, Operand::StaticVariable(allocation_top));
+ } else {
+ mov(scratch, Immediate(allocation_top));
+ mov(result, Operand(scratch, 0));
+ }
+}
+
+
+void MacroAssembler::UpdateAllocationTopHelper(Register result_end,
+ Register scratch,
+ AllocationFlags flags) {
+ if (emit_debug_code()) {
+ test(result_end, Immediate(kObjectAlignmentMask));
+ Check(zero, kUnalignedAllocationInNewSpace);
+ }
+
+ ExternalReference allocation_top =
+ AllocationUtils::GetAllocationTopReference(isolate(), flags);
+
+ // Update new top. Use scratch if available.
+ if (scratch.is(no_reg)) {
+ mov(Operand::StaticVariable(allocation_top), result_end);
+ } else {
+ mov(Operand(scratch, 0), result_end);
+ }
+}
+
+
+void MacroAssembler::Allocate(int object_size,
+ Register result,
+ Register result_end,
+ Register scratch,
+ Label* gc_required,
+ AllocationFlags flags) {
+ ASSERT((flags & (RESULT_CONTAINS_TOP | SIZE_IN_WORDS)) == 0);
+ ASSERT(object_size <= Page::kMaxRegularHeapObjectSize);
+ if (!FLAG_inline_new) {
+ if (emit_debug_code()) {
+ // Trash the registers to simulate an allocation failure.
+ mov(result, Immediate(0x7091));
+ if (result_end.is_valid()) {
+ mov(result_end, Immediate(0x7191));
+ }
+ if (scratch.is_valid()) {
+ mov(scratch, Immediate(0x7291));
+ }
+ }
+ jmp(gc_required);
+ return;
+ }
+ ASSERT(!result.is(result_end));
+
+ // Load address of new object into result.
+ LoadAllocationTopHelper(result, scratch, flags);
+
+ ExternalReference allocation_limit =
+ AllocationUtils::GetAllocationLimitReference(isolate(), flags);
+
+ // Align the next allocation. Storing the filler map without checking top is
+ // safe in new-space because the limit of the heap is aligned there.
+ if ((flags & DOUBLE_ALIGNMENT) != 0) {
+ ASSERT((flags & PRETENURE_OLD_POINTER_SPACE) == 0);
+ ASSERT(kPointerAlignment * 2 == kDoubleAlignment);
+ Label aligned;
+ test(result, Immediate(kDoubleAlignmentMask));
+ j(zero, &aligned, Label::kNear);
+ if ((flags & PRETENURE_OLD_DATA_SPACE) != 0) {
+ cmp(result, Operand::StaticVariable(allocation_limit));
+ j(above_equal, gc_required);
+ }
+ mov(Operand(result, 0),
+ Immediate(isolate()->factory()->one_pointer_filler_map()));
+ add(result, Immediate(kDoubleSize / 2));
+ bind(&aligned);
+ }
+
+ // Calculate new top and bail out if space is exhausted.
+ Register top_reg = result_end.is_valid() ? result_end : result;
+ if (!top_reg.is(result)) {
+ mov(top_reg, result);
+ }
+ add(top_reg, Immediate(object_size));
+ j(carry, gc_required);
+ cmp(top_reg, Operand::StaticVariable(allocation_limit));
+ j(above, gc_required);
+
+ // Update allocation top.
+ UpdateAllocationTopHelper(top_reg, scratch, flags);
+
+ // Tag result if requested.
+ bool tag_result = (flags & TAG_OBJECT) != 0;
+ if (top_reg.is(result)) {
+ if (tag_result) {
+ sub(result, Immediate(object_size - kHeapObjectTag));
+ } else {
+ sub(result, Immediate(object_size));
+ }
+ } else if (tag_result) {
+ ASSERT(kHeapObjectTag == 1);
+ inc(result);
+ }
+}
+
+
+void MacroAssembler::Allocate(int header_size,
+ ScaleFactor element_size,
+ Register element_count,
+ RegisterValueType element_count_type,
+ Register result,
+ Register result_end,
+ Register scratch,
+ Label* gc_required,
+ AllocationFlags flags) {
+ ASSERT((flags & SIZE_IN_WORDS) == 0);
+ if (!FLAG_inline_new) {
+ if (emit_debug_code()) {
+ // Trash the registers to simulate an allocation failure.
+ mov(result, Immediate(0x7091));
+ mov(result_end, Immediate(0x7191));
+ if (scratch.is_valid()) {
+ mov(scratch, Immediate(0x7291));
+ }
+ // Register element_count is not modified by the function.
+ }
+ jmp(gc_required);
+ return;
+ }
+ ASSERT(!result.is(result_end));
+
+ // Load address of new object into result.
+ LoadAllocationTopHelper(result, scratch, flags);
+
+ ExternalReference allocation_limit =
+ AllocationUtils::GetAllocationLimitReference(isolate(), flags);
+
+ // Align the next allocation. Storing the filler map without checking top is
+ // safe in new-space because the limit of the heap is aligned there.
+ if ((flags & DOUBLE_ALIGNMENT) != 0) {
+ ASSERT((flags & PRETENURE_OLD_POINTER_SPACE) == 0);
+ ASSERT(kPointerAlignment * 2 == kDoubleAlignment);
+ Label aligned;
+ test(result, Immediate(kDoubleAlignmentMask));
+ j(zero, &aligned, Label::kNear);
+ if ((flags & PRETENURE_OLD_DATA_SPACE) != 0) {
+ cmp(result, Operand::StaticVariable(allocation_limit));
+ j(above_equal, gc_required);
+ }
+ mov(Operand(result, 0),
+ Immediate(isolate()->factory()->one_pointer_filler_map()));
+ add(result, Immediate(kDoubleSize / 2));
+ bind(&aligned);
+ }
+
+ // Calculate new top and bail out if space is exhausted.
+ // We assume that element_count*element_size + header_size does not
+ // overflow.
+ if (element_count_type == REGISTER_VALUE_IS_SMI) {
+ STATIC_ASSERT(static_cast<ScaleFactor>(times_2 - 1) == times_1);
+ STATIC_ASSERT(static_cast<ScaleFactor>(times_4 - 1) == times_2);
+ STATIC_ASSERT(static_cast<ScaleFactor>(times_8 - 1) == times_4);
+ ASSERT(element_size >= times_2);
+ ASSERT(kSmiTagSize == 1);
+ element_size = static_cast<ScaleFactor>(element_size - 1);
+ } else {
+ ASSERT(element_count_type == REGISTER_VALUE_IS_INT32);
+ }
+ lea(result_end, Operand(element_count, element_size, header_size));
+ add(result_end, result);
+ j(carry, gc_required);
+ cmp(result_end, Operand::StaticVariable(allocation_limit));
+ j(above, gc_required);
+
+ if ((flags & TAG_OBJECT) != 0) {
+ ASSERT(kHeapObjectTag == 1);
+ inc(result);
+ }
+
+ // Update allocation top.
+ UpdateAllocationTopHelper(result_end, scratch, flags);
+}
+
+
+void MacroAssembler::Allocate(Register object_size,
+ Register result,
+ Register result_end,
+ Register scratch,
+ Label* gc_required,
+ AllocationFlags flags) {
+ ASSERT((flags & (RESULT_CONTAINS_TOP | SIZE_IN_WORDS)) == 0);
+ if (!FLAG_inline_new) {
+ if (emit_debug_code()) {
+ // Trash the registers to simulate an allocation failure.
+ mov(result, Immediate(0x7091));
+ mov(result_end, Immediate(0x7191));
+ if (scratch.is_valid()) {
+ mov(scratch, Immediate(0x7291));
+ }
+ // object_size is left unchanged by this function.
+ }
+ jmp(gc_required);
+ return;
+ }
+ ASSERT(!result.is(result_end));
+
+ // Load address of new object into result.
+ LoadAllocationTopHelper(result, scratch, flags);
+
+ ExternalReference allocation_limit =
+ AllocationUtils::GetAllocationLimitReference(isolate(), flags);
+
+ // Align the next allocation. Storing the filler map without checking top is
+ // safe in new-space because the limit of the heap is aligned there.
+ if ((flags & DOUBLE_ALIGNMENT) != 0) {
+ ASSERT((flags & PRETENURE_OLD_POINTER_SPACE) == 0);
+ ASSERT(kPointerAlignment * 2 == kDoubleAlignment);
+ Label aligned;
+ test(result, Immediate(kDoubleAlignmentMask));
+ j(zero, &aligned, Label::kNear);
+ if ((flags & PRETENURE_OLD_DATA_SPACE) != 0) {
+ cmp(result, Operand::StaticVariable(allocation_limit));
+ j(above_equal, gc_required);
+ }
+ mov(Operand(result, 0),
+ Immediate(isolate()->factory()->one_pointer_filler_map()));
+ add(result, Immediate(kDoubleSize / 2));
+ bind(&aligned);
+ }
+
+ // Calculate new top and bail out if space is exhausted.
+ if (!object_size.is(result_end)) {
+ mov(result_end, object_size);
+ }
+ add(result_end, result);
+ j(carry, gc_required);
+ cmp(result_end, Operand::StaticVariable(allocation_limit));
+ j(above, gc_required);
+
+ // Tag result if requested.
+ if ((flags & TAG_OBJECT) != 0) {
+ ASSERT(kHeapObjectTag == 1);
+ inc(result);
+ }
+
+ // Update allocation top.
+ UpdateAllocationTopHelper(result_end, scratch, flags);
+}
+
+
+void MacroAssembler::UndoAllocationInNewSpace(Register object) {
+ ExternalReference new_space_allocation_top =
+ ExternalReference::new_space_allocation_top_address(isolate());
+
+ // Make sure the object has no tag before resetting top.
+ and_(object, Immediate(~kHeapObjectTagMask));
+#ifdef DEBUG
+ cmp(object, Operand::StaticVariable(new_space_allocation_top));
+ Check(below, kUndoAllocationOfNonAllocatedMemory);
+#endif
+ mov(Operand::StaticVariable(new_space_allocation_top), object);
+}
+
+
+void MacroAssembler::AllocateHeapNumber(Register result,
+ Register scratch1,
+ Register scratch2,
+ Label* gc_required) {
+ // Allocate heap number in new space.
+ Allocate(HeapNumber::kSize, result, scratch1, scratch2, gc_required,
+ TAG_OBJECT);
+
+ // Set the map.
+ mov(FieldOperand(result, HeapObject::kMapOffset),
+ Immediate(isolate()->factory()->heap_number_map()));
+}
+
+
+void MacroAssembler::AllocateTwoByteString(Register result,
+ Register length,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ Label* gc_required) {
+ // Calculate the number of bytes needed for the characters in the string while
+ // observing object alignment.
+ ASSERT((SeqTwoByteString::kHeaderSize & kObjectAlignmentMask) == 0);
+ ASSERT(kShortSize == 2);
+ // scratch1 = length * 2 + kObjectAlignmentMask.
+ lea(scratch1, Operand(length, length, times_1, kObjectAlignmentMask));
+ and_(scratch1, Immediate(~kObjectAlignmentMask));
+
+ // Allocate two byte string in new space.
+ Allocate(SeqTwoByteString::kHeaderSize,
+ times_1,
+ scratch1,
+ REGISTER_VALUE_IS_INT32,
+ result,
+ scratch2,
+ scratch3,
+ gc_required,
+ TAG_OBJECT);
+
+ // Set the map, length and hash field.
+ mov(FieldOperand(result, HeapObject::kMapOffset),
+ Immediate(isolate()->factory()->string_map()));
+ mov(scratch1, length);
+ SmiTag(scratch1);
+ mov(FieldOperand(result, String::kLengthOffset), scratch1);
+ mov(FieldOperand(result, String::kHashFieldOffset),
+ Immediate(String::kEmptyHashField));
+}
+
+
+void MacroAssembler::AllocateAsciiString(Register result,
+ Register length,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ Label* gc_required) {
+ // Calculate the number of bytes needed for the characters in the string while
+ // observing object alignment.
+ ASSERT((SeqOneByteString::kHeaderSize & kObjectAlignmentMask) == 0);
+ mov(scratch1, length);
+ ASSERT(kCharSize == 1);
+ add(scratch1, Immediate(kObjectAlignmentMask));
+ and_(scratch1, Immediate(~kObjectAlignmentMask));
+
+ // Allocate ASCII string in new space.
+ Allocate(SeqOneByteString::kHeaderSize,
+ times_1,
+ scratch1,
+ REGISTER_VALUE_IS_INT32,
+ result,
+ scratch2,
+ scratch3,
+ gc_required,
+ TAG_OBJECT);
+
+ // Set the map, length and hash field.
+ mov(FieldOperand(result, HeapObject::kMapOffset),
+ Immediate(isolate()->factory()->ascii_string_map()));
+ mov(scratch1, length);
+ SmiTag(scratch1);
+ mov(FieldOperand(result, String::kLengthOffset), scratch1);
+ mov(FieldOperand(result, String::kHashFieldOffset),
+ Immediate(String::kEmptyHashField));
+}
+
+
+void MacroAssembler::AllocateAsciiString(Register result,
+ int length,
+ Register scratch1,
+ Register scratch2,
+ Label* gc_required) {
+ ASSERT(length > 0);
+
+ // Allocate ASCII string in new space.
+ Allocate(SeqOneByteString::SizeFor(length), result, scratch1, scratch2,
+ gc_required, TAG_OBJECT);
+
+ // Set the map, length and hash field.
+ mov(FieldOperand(result, HeapObject::kMapOffset),
+ Immediate(isolate()->factory()->ascii_string_map()));
+ mov(FieldOperand(result, String::kLengthOffset),
+ Immediate(Smi::FromInt(length)));
+ mov(FieldOperand(result, String::kHashFieldOffset),
+ Immediate(String::kEmptyHashField));
+}
+
+
+void MacroAssembler::AllocateTwoByteConsString(Register result,
+ Register scratch1,
+ Register scratch2,
+ Label* gc_required) {
+ // Allocate heap number in new space.
+ Allocate(ConsString::kSize, result, scratch1, scratch2, gc_required,
+ TAG_OBJECT);
+
+ // Set the map. The other fields are left uninitialized.
+ mov(FieldOperand(result, HeapObject::kMapOffset),
+ Immediate(isolate()->factory()->cons_string_map()));
+}
+
+
+void MacroAssembler::AllocateAsciiConsString(Register result,
+ Register scratch1,
+ Register scratch2,
+ Label* gc_required) {
+ Allocate(ConsString::kSize,
+ result,
+ scratch1,
+ scratch2,
+ gc_required,
+ TAG_OBJECT);
+
+ // Set the map. The other fields are left uninitialized.
+ mov(FieldOperand(result, HeapObject::kMapOffset),
+ Immediate(isolate()->factory()->cons_ascii_string_map()));
+}
+
+
+void MacroAssembler::AllocateTwoByteSlicedString(Register result,
+ Register scratch1,
+ Register scratch2,
+ Label* gc_required) {
+ // Allocate heap number in new space.
+ Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required,
+ TAG_OBJECT);
+
+ // Set the map. The other fields are left uninitialized.
+ mov(FieldOperand(result, HeapObject::kMapOffset),
+ Immediate(isolate()->factory()->sliced_string_map()));
+}
+
+
+void MacroAssembler::AllocateAsciiSlicedString(Register result,
+ Register scratch1,
+ Register scratch2,
+ Label* gc_required) {
+ // Allocate heap number in new space.
+ Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required,
+ TAG_OBJECT);
+
+ // Set the map. The other fields are left uninitialized.
+ mov(FieldOperand(result, HeapObject::kMapOffset),
+ Immediate(isolate()->factory()->sliced_ascii_string_map()));
+}
+
+
+// Copy memory, byte-by-byte, from source to destination. Not optimized for
+// long or aligned copies. The contents of scratch and length are destroyed.
+// Source and destination are incremented by length.
+// Many variants of movsb, loop unrolling, word moves, and indexed operands
+// have been tried here already, and this is fastest.
+// A simpler loop is faster on small copies, but 30% slower on large ones.
+// The cld() instruction must have been emitted, to set the direction flag(),
+// before calling this function.
+void MacroAssembler::CopyBytes(Register source,
+ Register destination,
+ Register length,
+ Register scratch) {
+ Label short_loop, len4, len8, len12, done, short_string;
+ ASSERT(source.is(esi));
+ ASSERT(destination.is(edi));
+ ASSERT(length.is(ecx));
+ cmp(length, Immediate(4));
+ j(below, &short_string, Label::kNear);
+
+ // Because source is 4-byte aligned in our uses of this function,
+ // we keep source aligned for the rep_movs call by copying the odd bytes
+ // at the end of the ranges.
+ mov(scratch, Operand(source, length, times_1, -4));
+ mov(Operand(destination, length, times_1, -4), scratch);
+
+ cmp(length, Immediate(8));
+ j(below_equal, &len4, Label::kNear);
+ cmp(length, Immediate(12));
+ j(below_equal, &len8, Label::kNear);
+ cmp(length, Immediate(16));
+ j(below_equal, &len12, Label::kNear);
+
+ mov(scratch, ecx);
+ shr(ecx, 2);
+ rep_movs();
+ and_(scratch, Immediate(0x3));
+ add(destination, scratch);
+ jmp(&done, Label::kNear);
+
+ bind(&len12);
+ mov(scratch, Operand(source, 8));
+ mov(Operand(destination, 8), scratch);
+ bind(&len8);
+ mov(scratch, Operand(source, 4));
+ mov(Operand(destination, 4), scratch);
+ bind(&len4);
+ mov(scratch, Operand(source, 0));
+ mov(Operand(destination, 0), scratch);
+ add(destination, length);
+ jmp(&done, Label::kNear);
+
+ bind(&short_string);
+ test(length, length);
+ j(zero, &done, Label::kNear);
+
+ bind(&short_loop);
+ mov_b(scratch, Operand(source, 0));
+ mov_b(Operand(destination, 0), scratch);
+ inc(source);
+ inc(destination);
+ dec(length);
+ j(not_zero, &short_loop);
+
+ bind(&done);
+}
+
+
+void MacroAssembler::InitializeFieldsWithFiller(Register start_offset,
+ Register end_offset,
+ Register filler) {
+ Label loop, entry;
+ jmp(&entry);
+ bind(&loop);
+ mov(Operand(start_offset, 0), filler);
+ add(start_offset, Immediate(kPointerSize));
+ bind(&entry);
+ cmp(start_offset, end_offset);
+ j(less, &loop);
+}
+
+
+void MacroAssembler::BooleanBitTest(Register object,
+ int field_offset,
+ int bit_index) {
+ bit_index += kSmiTagSize + kSmiShiftSize;
+ ASSERT(IsPowerOf2(kBitsPerByte));
+ int byte_index = bit_index / kBitsPerByte;
+ int byte_bit_index = bit_index & (kBitsPerByte - 1);
+ test_b(FieldOperand(object, field_offset + byte_index),
+ static_cast<byte>(1 << byte_bit_index));
+}
+
+
+
+void MacroAssembler::NegativeZeroTest(Register result,
+ Register op,
+ Label* then_label) {
+ Label ok;
+ test(result, result);
+ j(not_zero, &ok);
+ test(op, op);
+ j(sign, then_label);
+ bind(&ok);
+}
+
+
+void MacroAssembler::NegativeZeroTest(Register result,
+ Register op1,
+ Register op2,
+ Register scratch,
+ Label* then_label) {
+ Label ok;
+ test(result, result);
+ j(not_zero, &ok);
+ mov(scratch, op1);
+ or_(scratch, op2);
+ j(sign, then_label);
+ bind(&ok);
+}
+
+
+void MacroAssembler::TryGetFunctionPrototype(Register function,
+ Register result,
+ Register scratch,
+ Label* miss,
+ bool miss_on_bound_function) {
+ // Check that the receiver isn't a smi.
+ JumpIfSmi(function, miss);
+
+ // Check that the function really is a function.
+ CmpObjectType(function, JS_FUNCTION_TYPE, result);
+ j(not_equal, miss);
+
+ if (miss_on_bound_function) {
+ // If a bound function, go to miss label.
+ mov(scratch,
+ FieldOperand(function, JSFunction::kSharedFunctionInfoOffset));
+ BooleanBitTest(scratch, SharedFunctionInfo::kCompilerHintsOffset,
+ SharedFunctionInfo::kBoundFunction);
+ j(not_zero, miss);
+ }
+
+ // Make sure that the function has an instance prototype.
+ Label non_instance;
+ movzx_b(scratch, FieldOperand(result, Map::kBitFieldOffset));
+ test(scratch, Immediate(1 << Map::kHasNonInstancePrototype));
+ j(not_zero, &non_instance);
+
+ // Get the prototype or initial map from the function.
+ mov(result,
+ FieldOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
+
+ // If the prototype or initial map is the hole, don't return it and
+ // simply miss the cache instead. This will allow us to allocate a
+ // prototype object on-demand in the runtime system.
+ cmp(result, Immediate(isolate()->factory()->the_hole_value()));
+ j(equal, miss);
+
+ // If the function does not have an initial map, we're done.
+ Label done;
+ CmpObjectType(result, MAP_TYPE, scratch);
+ j(not_equal, &done);
+
+ // Get the prototype from the initial map.
+ mov(result, FieldOperand(result, Map::kPrototypeOffset));
+ jmp(&done);
+
+ // Non-instance prototype: Fetch prototype from constructor field
+ // in initial map.
+ bind(&non_instance);
+ mov(result, FieldOperand(result, Map::kConstructorOffset));
+
+ // All done.
+ bind(&done);
+}
+
+
+void MacroAssembler::CallStub(CodeStub* stub, TypeFeedbackId ast_id) {
+ ASSERT(AllowThisStubCall(stub)); // Calls are not allowed in some stubs.
+ call(stub->GetCode(), RelocInfo::CODE_TARGET, ast_id);
+}
+
+
+void MacroAssembler::TailCallStub(CodeStub* stub) {
+ jmp(stub->GetCode(), RelocInfo::CODE_TARGET);
+}
+
+
+void MacroAssembler::StubReturn(int argc) {
+ ASSERT(argc >= 1 && generating_stub());
+ ret((argc - 1) * kPointerSize);
+}
+
+
+bool MacroAssembler::AllowThisStubCall(CodeStub* stub) {
+ return has_frame_ || !stub->SometimesSetsUpAFrame();
+}
+
+
+void MacroAssembler::IndexFromHash(Register hash, Register index) {
+ // The assert checks that the constants for the maximum number of digits
+ // for an array index cached in the hash field and the number of bits
+ // reserved for it does not conflict.
+ ASSERT(TenToThe(String::kMaxCachedArrayIndexLength) <
+ (1 << String::kArrayIndexValueBits));
+ if (!index.is(hash)) {
+ mov(index, hash);
+ }
+ DecodeFieldToSmi<String::ArrayIndexValueBits>(index);
+}
+
+
+void MacroAssembler::CallRuntime(const Runtime::Function* f,
+ int num_arguments) {
+ // If the expected number of arguments of the runtime function is
+ // constant, we check that the actual number of arguments match the
+ // expectation.
+ CHECK(f->nargs < 0 || f->nargs == num_arguments);
+
+ // TODO(1236192): Most runtime routines don't need the number of
+ // arguments passed in because it is constant. At some point we
+ // should remove this need and make the runtime routine entry code
+ // smarter.
+ Move(eax, Immediate(num_arguments));
+ mov(ebx, Immediate(ExternalReference(f, isolate())));
+ CEntryStub ces(isolate(), 1);
+ CallStub(&ces);
+}
+
+
+void MacroAssembler::CallExternalReference(ExternalReference ref,
+ int num_arguments) {
+ mov(eax, Immediate(num_arguments));
+ mov(ebx, Immediate(ref));
+
+ CEntryStub stub(isolate(), 1);
+ CallStub(&stub);
+}
+
+
+void MacroAssembler::TailCallExternalReference(const ExternalReference& ext,
+ int num_arguments,
+ int result_size) {
+ // TODO(1236192): Most runtime routines don't need the number of
+ // arguments passed in because it is constant. At some point we
+ // should remove this need and make the runtime routine entry code
+ // smarter.
+ Move(eax, Immediate(num_arguments));
+ JumpToExternalReference(ext);
+}
+
+
+void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid,
+ int num_arguments,
+ int result_size) {
+ TailCallExternalReference(ExternalReference(fid, isolate()),
+ num_arguments,
+ result_size);
+}
+
+
+Operand ApiParameterOperand(int index) {
+ return Operand(esp, index * kPointerSize);
+}
+
+
+void MacroAssembler::PrepareCallApiFunction(int argc) {
+ EnterApiExitFrame(argc);
+ if (emit_debug_code()) {
+ mov(esi, Immediate(BitCast<int32_t>(kZapValue)));
+ }
+}
+
+
+void MacroAssembler::CallApiFunctionAndReturn(
+ Register function_address,
+ ExternalReference thunk_ref,
+ Operand thunk_last_arg,
+ int stack_space,
+ Operand return_value_operand,
+ Operand* context_restore_operand) {
+ ExternalReference next_address =
+ ExternalReference::handle_scope_next_address(isolate());
+ ExternalReference limit_address =
+ ExternalReference::handle_scope_limit_address(isolate());
+ ExternalReference level_address =
+ ExternalReference::handle_scope_level_address(isolate());
+
+ ASSERT(edx.is(function_address));
+ // Allocate HandleScope in callee-save registers.
+ mov(ebx, Operand::StaticVariable(next_address));
+ mov(edi, Operand::StaticVariable(limit_address));
+ add(Operand::StaticVariable(level_address), Immediate(1));
+
+ if (FLAG_log_timer_events) {
+ FrameScope frame(this, StackFrame::MANUAL);
+ PushSafepointRegisters();
+ PrepareCallCFunction(1, eax);
+ mov(Operand(esp, 0),
+ Immediate(ExternalReference::isolate_address(isolate())));
+ CallCFunction(ExternalReference::log_enter_external_function(isolate()), 1);
+ PopSafepointRegisters();
+ }
+
+
+ Label profiler_disabled;
+ Label end_profiler_check;
+ mov(eax, Immediate(ExternalReference::is_profiling_address(isolate())));
+ cmpb(Operand(eax, 0), 0);
+ j(zero, &profiler_disabled);
+
+ // Additional parameter is the address of the actual getter function.
+ mov(thunk_last_arg, function_address);
+ // Call the api function.
+ mov(eax, Immediate(thunk_ref));
+ call(eax);
+ jmp(&end_profiler_check);
+
+ bind(&profiler_disabled);
+ // Call the api function.
+ call(function_address);
+ bind(&end_profiler_check);
+
+ if (FLAG_log_timer_events) {
+ FrameScope frame(this, StackFrame::MANUAL);
+ PushSafepointRegisters();
+ PrepareCallCFunction(1, eax);
+ mov(Operand(esp, 0),
+ Immediate(ExternalReference::isolate_address(isolate())));
+ CallCFunction(ExternalReference::log_leave_external_function(isolate()), 1);
+ PopSafepointRegisters();
+ }
+
+ Label prologue;
+ // Load the value from ReturnValue
+ mov(eax, return_value_operand);
+
+ Label promote_scheduled_exception;
+ Label exception_handled;
+ Label delete_allocated_handles;
+ Label leave_exit_frame;
+
+ bind(&prologue);
+ // No more valid handles (the result handle was the last one). Restore
+ // previous handle scope.
+ mov(Operand::StaticVariable(next_address), ebx);
+ sub(Operand::StaticVariable(level_address), Immediate(1));
+ Assert(above_equal, kInvalidHandleScopeLevel);
+ cmp(edi, Operand::StaticVariable(limit_address));
+ j(not_equal, &delete_allocated_handles);
+ bind(&leave_exit_frame);
+
+ // Check if the function scheduled an exception.
+ ExternalReference scheduled_exception_address =
+ ExternalReference::scheduled_exception_address(isolate());
+ cmp(Operand::StaticVariable(scheduled_exception_address),
+ Immediate(isolate()->factory()->the_hole_value()));
+ j(not_equal, &promote_scheduled_exception);
+ bind(&exception_handled);
+
+#if ENABLE_EXTRA_CHECKS
+ // Check if the function returned a valid JavaScript value.
+ Label ok;
+ Register return_value = eax;
+ Register map = ecx;
+
+ JumpIfSmi(return_value, &ok, Label::kNear);
+ mov(map, FieldOperand(return_value, HeapObject::kMapOffset));
+
+ CmpInstanceType(map, FIRST_NONSTRING_TYPE);
+ j(below, &ok, Label::kNear);
+
+ CmpInstanceType(map, FIRST_SPEC_OBJECT_TYPE);
+ j(above_equal, &ok, Label::kNear);
+
+ cmp(map, isolate()->factory()->heap_number_map());
+ j(equal, &ok, Label::kNear);
+
+ cmp(return_value, isolate()->factory()->undefined_value());
+ j(equal, &ok, Label::kNear);
+
+ cmp(return_value, isolate()->factory()->true_value());
+ j(equal, &ok, Label::kNear);
+
+ cmp(return_value, isolate()->factory()->false_value());
+ j(equal, &ok, Label::kNear);
+
+ cmp(return_value, isolate()->factory()->null_value());
+ j(equal, &ok, Label::kNear);
+
+ Abort(kAPICallReturnedInvalidObject);
+
+ bind(&ok);
+#endif
+
+ bool restore_context = context_restore_operand != NULL;
+ if (restore_context) {
+ mov(esi, *context_restore_operand);
+ }
+ LeaveApiExitFrame(!restore_context);
+ ret(stack_space * kPointerSize);
+
+ bind(&promote_scheduled_exception);
+ {
+ FrameScope frame(this, StackFrame::INTERNAL);
+ CallRuntime(Runtime::kHiddenPromoteScheduledException, 0);
+ }
+ jmp(&exception_handled);
+
+ // HandleScope limit has changed. Delete allocated extensions.
+ ExternalReference delete_extensions =
+ ExternalReference::delete_handle_scope_extensions(isolate());
+ bind(&delete_allocated_handles);
+ mov(Operand::StaticVariable(limit_address), edi);
+ mov(edi, eax);
+ mov(Operand(esp, 0),
+ Immediate(ExternalReference::isolate_address(isolate())));
+ mov(eax, Immediate(delete_extensions));
+ call(eax);
+ mov(eax, edi);
+ jmp(&leave_exit_frame);
+}
+
+
+void MacroAssembler::JumpToExternalReference(const ExternalReference& ext) {
+ // Set the entry point and jump to the C entry runtime stub.
+ mov(ebx, Immediate(ext));
+ CEntryStub ces(isolate(), 1);
+ jmp(ces.GetCode(), RelocInfo::CODE_TARGET);
+}
+
+
+void MacroAssembler::InvokePrologue(const ParameterCount& expected,
+ const ParameterCount& actual,
+ Handle<Code> code_constant,
+ const Operand& code_operand,
+ Label* done,
+ bool* definitely_mismatches,
+ InvokeFlag flag,
+ Label::Distance done_near,
+ const CallWrapper& call_wrapper) {
+ bool definitely_matches = false;
+ *definitely_mismatches = false;
+ Label invoke;
+ if (expected.is_immediate()) {
+ ASSERT(actual.is_immediate());
+ if (expected.immediate() == actual.immediate()) {
+ definitely_matches = true;
+ } else {
+ mov(eax, actual.immediate());
+ const int sentinel = SharedFunctionInfo::kDontAdaptArgumentsSentinel;
+ if (expected.immediate() == sentinel) {
+ // Don't worry about adapting arguments for builtins that
+ // don't want that done. Skip adaption code by making it look
+ // like we have a match between expected and actual number of
+ // arguments.
+ definitely_matches = true;
+ } else {
+ *definitely_mismatches = true;
+ mov(ebx, expected.immediate());
+ }
+ }
+ } else {
+ if (actual.is_immediate()) {
+ // Expected is in register, actual is immediate. This is the
+ // case when we invoke function values without going through the
+ // IC mechanism.
+ cmp(expected.reg(), actual.immediate());
+ j(equal, &invoke);
+ ASSERT(expected.reg().is(ebx));
+ mov(eax, actual.immediate());
+ } else if (!expected.reg().is(actual.reg())) {
+ // Both expected and actual are in (different) registers. This
+ // is the case when we invoke functions using call and apply.
+ cmp(expected.reg(), actual.reg());
+ j(equal, &invoke);
+ ASSERT(actual.reg().is(eax));
+ ASSERT(expected.reg().is(ebx));
+ }
+ }
+
+ if (!definitely_matches) {
+ Handle<Code> adaptor =
+ isolate()->builtins()->ArgumentsAdaptorTrampoline();
+ if (!code_constant.is_null()) {
+ mov(edx, Immediate(code_constant));
+ add(edx, Immediate(Code::kHeaderSize - kHeapObjectTag));
+ } else if (!code_operand.is_reg(edx)) {
+ mov(edx, code_operand);
+ }
+
+ if (flag == CALL_FUNCTION) {
+ call_wrapper.BeforeCall(CallSize(adaptor, RelocInfo::CODE_TARGET));
+ call(adaptor, RelocInfo::CODE_TARGET);
+ call_wrapper.AfterCall();
+ if (!*definitely_mismatches) {
+ jmp(done, done_near);
+ }
+ } else {
+ jmp(adaptor, RelocInfo::CODE_TARGET);
+ }
+ bind(&invoke);
+ }
+}
+
+
+void MacroAssembler::InvokeCode(const Operand& code,
+ const ParameterCount& expected,
+ const ParameterCount& actual,
+ InvokeFlag flag,
+ const CallWrapper& call_wrapper) {
+ // You can't call a function without a valid frame.
+ ASSERT(flag == JUMP_FUNCTION || has_frame());
+
+ Label done;
+ bool definitely_mismatches = false;
+ InvokePrologue(expected, actual, Handle<Code>::null(), code,
+ &done, &definitely_mismatches, flag, Label::kNear,
+ call_wrapper);
+ if (!definitely_mismatches) {
+ if (flag == CALL_FUNCTION) {
+ call_wrapper.BeforeCall(CallSize(code));
+ call(code);
+ call_wrapper.AfterCall();
+ } else {
+ ASSERT(flag == JUMP_FUNCTION);
+ jmp(code);
+ }
+ bind(&done);
+ }
+}
+
+
+void MacroAssembler::InvokeFunction(Register fun,
+ const ParameterCount& actual,
+ InvokeFlag flag,
+ const CallWrapper& call_wrapper) {
+ // You can't call a function without a valid frame.
+ ASSERT(flag == JUMP_FUNCTION || has_frame());
+
+ ASSERT(fun.is(edi));
+ mov(edx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
+ mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
+ mov(ebx, FieldOperand(edx, SharedFunctionInfo::kFormalParameterCountOffset));
+ SmiUntag(ebx);
+
+ ParameterCount expected(ebx);
+ InvokeCode(FieldOperand(edi, JSFunction::kCodeEntryOffset),
+ expected, actual, flag, call_wrapper);
+}
+
+
+void MacroAssembler::InvokeFunction(Register fun,
+ const ParameterCount& expected,
+ const ParameterCount& actual,
+ InvokeFlag flag,
+ const CallWrapper& call_wrapper) {
+ // You can't call a function without a valid frame.
+ ASSERT(flag == JUMP_FUNCTION || has_frame());
+
+ ASSERT(fun.is(edi));
+ mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
+
+ InvokeCode(FieldOperand(edi, JSFunction::kCodeEntryOffset),
+ expected, actual, flag, call_wrapper);
+}
+
+
+void MacroAssembler::InvokeFunction(Handle<JSFunction> function,
+ const ParameterCount& expected,
+ const ParameterCount& actual,
+ InvokeFlag flag,
+ const CallWrapper& call_wrapper) {
+ LoadHeapObject(edi, function);
+ InvokeFunction(edi, expected, actual, flag, call_wrapper);
+}
+
+
+void MacroAssembler::InvokeBuiltin(Builtins::JavaScript id,
+ InvokeFlag flag,
+ const CallWrapper& call_wrapper) {
+ // You can't call a builtin without a valid frame.
+ ASSERT(flag == JUMP_FUNCTION || has_frame());
+
+ // Rely on the assertion to check that the number of provided
+ // arguments match the expected number of arguments. Fake a
+ // parameter count to avoid emitting code to do the check.
+ ParameterCount expected(0);
+ GetBuiltinFunction(edi, id);
+ InvokeCode(FieldOperand(edi, JSFunction::kCodeEntryOffset),
+ expected, expected, flag, call_wrapper);
+}
+
+
+void MacroAssembler::GetBuiltinFunction(Register target,
+ Builtins::JavaScript id) {
+ // Load the JavaScript builtin function from the builtins object.
+ mov(target, Operand(esi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
+ mov(target, FieldOperand(target, GlobalObject::kBuiltinsOffset));
+ mov(target, FieldOperand(target,
+ JSBuiltinsObject::OffsetOfFunctionWithId(id)));
+}
+
+
+void MacroAssembler::GetBuiltinEntry(Register target, Builtins::JavaScript id) {
+ ASSERT(!target.is(edi));
+ // Load the JavaScript builtin function from the builtins object.
+ GetBuiltinFunction(edi, id);
+ // Load the code entry point from the function into the target register.
+ mov(target, FieldOperand(edi, JSFunction::kCodeEntryOffset));
+}
+
+
+void MacroAssembler::LoadContext(Register dst, int context_chain_length) {
+ if (context_chain_length > 0) {
+ // Move up the chain of contexts to the context containing the slot.
+ mov(dst, Operand(esi, Context::SlotOffset(Context::PREVIOUS_INDEX)));
+ for (int i = 1; i < context_chain_length; i++) {
+ mov(dst, Operand(dst, Context::SlotOffset(Context::PREVIOUS_INDEX)));
+ }
+ } else {
+ // Slot is in the current function context. Move it into the
+ // destination register in case we store into it (the write barrier
+ // cannot be allowed to destroy the context in esi).
+ mov(dst, esi);
+ }
+
+ // We should not have found a with context by walking the context chain
+ // (i.e., the static scope chain and runtime context chain do not agree).
+ // A variable occurring in such a scope should have slot type LOOKUP and
+ // not CONTEXT.
+ if (emit_debug_code()) {
+ cmp(FieldOperand(dst, HeapObject::kMapOffset),
+ isolate()->factory()->with_context_map());
+ Check(not_equal, kVariableResolvedToWithContext);
+ }
+}
+
+
+void MacroAssembler::LoadTransitionedArrayMapConditional(
+ ElementsKind expected_kind,
+ ElementsKind transitioned_kind,
+ Register map_in_out,
+ Register scratch,
+ Label* no_map_match) {
+ // Load the global or builtins object from the current context.
+ mov(scratch, Operand(esi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
+ mov(scratch, FieldOperand(scratch, GlobalObject::kNativeContextOffset));
+
+ // Check that the function's map is the same as the expected cached map.
+ mov(scratch, Operand(scratch,
+ Context::SlotOffset(Context::JS_ARRAY_MAPS_INDEX)));
+
+ size_t offset = expected_kind * kPointerSize +
+ FixedArrayBase::kHeaderSize;
+ cmp(map_in_out, FieldOperand(scratch, offset));
+ j(not_equal, no_map_match);
+
+ // Use the transitioned cached map.
+ offset = transitioned_kind * kPointerSize +
+ FixedArrayBase::kHeaderSize;
+ mov(map_in_out, FieldOperand(scratch, offset));
+}
+
+
+void MacroAssembler::LoadGlobalFunction(int index, Register function) {
+ // Load the global or builtins object from the current context.
+ mov(function,
+ Operand(esi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
+ // Load the native context from the global or builtins object.
+ mov(function,
+ FieldOperand(function, GlobalObject::kNativeContextOffset));
+ // Load the function from the native context.
+ mov(function, Operand(function, Context::SlotOffset(index)));
+}
+
+
+void MacroAssembler::LoadGlobalFunctionInitialMap(Register function,
+ Register map) {
+ // Load the initial map. The global functions all have initial maps.
+ mov(map, FieldOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
+ if (emit_debug_code()) {
+ Label ok, fail;
+ CheckMap(map, isolate()->factory()->meta_map(), &fail, DO_SMI_CHECK);
+ jmp(&ok);
+ bind(&fail);
+ Abort(kGlobalFunctionsMustHaveInitialMap);
+ bind(&ok);
+ }
+}
+
+
+// Store the value in register src in the safepoint register stack
+// slot for register dst.
+void MacroAssembler::StoreToSafepointRegisterSlot(Register dst, Register src) {
+ mov(SafepointRegisterSlot(dst), src);
+}
+
+
+void MacroAssembler::StoreToSafepointRegisterSlot(Register dst, Immediate src) {
+ mov(SafepointRegisterSlot(dst), src);
+}
+
+
+void MacroAssembler::LoadFromSafepointRegisterSlot(Register dst, Register src) {
+ mov(dst, SafepointRegisterSlot(src));
+}
+
+
+Operand MacroAssembler::SafepointRegisterSlot(Register reg) {
+ return Operand(esp, SafepointRegisterStackIndex(reg.code()) * kPointerSize);
+}
+
+
+int MacroAssembler::SafepointRegisterStackIndex(int reg_code) {
+ // The registers are pushed starting with the lowest encoding,
+ // which means that lowest encodings are furthest away from
+ // the stack pointer.
+ ASSERT(reg_code >= 0 && reg_code < kNumSafepointRegisters);
+ return kNumSafepointRegisters - reg_code - 1;
+}
+
+
+void MacroAssembler::LoadHeapObject(Register result,
+ Handle<HeapObject> object) {
+ AllowDeferredHandleDereference embedding_raw_address;
+ if (isolate()->heap()->InNewSpace(*object)) {
+ Handle<Cell> cell = isolate()->factory()->NewCell(object);
+ mov(result, Operand::ForCell(cell));
+ } else {
+ mov(result, object);
+ }
+}
+
+
+void MacroAssembler::CmpHeapObject(Register reg, Handle<HeapObject> object) {
+ AllowDeferredHandleDereference using_raw_address;
+ if (isolate()->heap()->InNewSpace(*object)) {
+ Handle<Cell> cell = isolate()->factory()->NewCell(object);
+ cmp(reg, Operand::ForCell(cell));
+ } else {
+ cmp(reg, object);
+ }
+}
+
+
+void MacroAssembler::PushHeapObject(Handle<HeapObject> object) {
+ AllowDeferredHandleDereference using_raw_address;
+ if (isolate()->heap()->InNewSpace(*object)) {
+ Handle<Cell> cell = isolate()->factory()->NewCell(object);
+ push(Operand::ForCell(cell));
+ } else {
+ Push(object);
+ }
+}
+
+
+void MacroAssembler::Ret() {
+ ret(0);
+}
+
+
+void MacroAssembler::Ret(int bytes_dropped, Register scratch) {
+ if (is_uint16(bytes_dropped)) {
+ ret(bytes_dropped);
+ } else {
+ pop(scratch);
+ add(esp, Immediate(bytes_dropped));
+ push(scratch);
+ ret(0);
+ }
+}
+
+
+void MacroAssembler::VerifyX87StackDepth(uint32_t depth) {
+ // Make sure the floating point stack is either empty or has depth items.
+ ASSERT(depth <= 7);
+ // This is very expensive.
+ ASSERT(FLAG_debug_code && FLAG_enable_slow_asserts);
+
+ // The top-of-stack (tos) is 7 if there is one item pushed.
+ int tos = (8 - depth) % 8;
+ const int kTopMask = 0x3800;
+ push(eax);
+ fwait();
+ fnstsw_ax();
+ and_(eax, kTopMask);
+ shr(eax, 11);
+ cmp(eax, Immediate(tos));
+ Check(equal, kUnexpectedFPUStackDepthAfterInstruction);
+ fnclex();
+ pop(eax);
+}
+
+
+void MacroAssembler::Drop(int stack_elements) {
+ if (stack_elements > 0) {
+ add(esp, Immediate(stack_elements * kPointerSize));
+ }
+}
+
+
+void MacroAssembler::Move(Register dst, Register src) {
+ if (!dst.is(src)) {
+ mov(dst, src);
+ }
+}
+
+
+void MacroAssembler::Move(Register dst, const Immediate& x) {
+ if (x.is_zero()) {
+ xor_(dst, dst); // Shorter than mov of 32-bit immediate 0.
+ } else {
+ mov(dst, x);
+ }
+}
+
+
+void MacroAssembler::Move(const Operand& dst, const Immediate& x) {
+ mov(dst, x);
+}
+
+
+void MacroAssembler::SetCounter(StatsCounter* counter, int value) {
+ if (FLAG_native_code_counters && counter->Enabled()) {
+ mov(Operand::StaticVariable(ExternalReference(counter)), Immediate(value));
+ }
+}
+
+
+void MacroAssembler::IncrementCounter(StatsCounter* counter, int value) {
+ ASSERT(value > 0);
+ if (FLAG_native_code_counters && counter->Enabled()) {
+ Operand operand = Operand::StaticVariable(ExternalReference(counter));
+ if (value == 1) {
+ inc(operand);
+ } else {
+ add(operand, Immediate(value));
+ }
+ }
+}
+
+
+void MacroAssembler::DecrementCounter(StatsCounter* counter, int value) {
+ ASSERT(value > 0);
+ if (FLAG_native_code_counters && counter->Enabled()) {
+ Operand operand = Operand::StaticVariable(ExternalReference(counter));
+ if (value == 1) {
+ dec(operand);
+ } else {
+ sub(operand, Immediate(value));
+ }
+ }
+}
+
+
+void MacroAssembler::IncrementCounter(Condition cc,
+ StatsCounter* counter,
+ int value) {
+ ASSERT(value > 0);
+ if (FLAG_native_code_counters && counter->Enabled()) {
+ Label skip;
+ j(NegateCondition(cc), &skip);
+ pushfd();
+ IncrementCounter(counter, value);
+ popfd();
+ bind(&skip);
+ }
+}
+
+
+void MacroAssembler::DecrementCounter(Condition cc,
+ StatsCounter* counter,
+ int value) {
+ ASSERT(value > 0);
+ if (FLAG_native_code_counters && counter->Enabled()) {
+ Label skip;
+ j(NegateCondition(cc), &skip);
+ pushfd();
+ DecrementCounter(counter, value);
+ popfd();
+ bind(&skip);
+ }
+}
+
+
+void MacroAssembler::Assert(Condition cc, BailoutReason reason) {
+ if (emit_debug_code()) Check(cc, reason);
+}
+
+
+void MacroAssembler::AssertFastElements(Register elements) {
+ if (emit_debug_code()) {
+ Factory* factory = isolate()->factory();
+ Label ok;
+ cmp(FieldOperand(elements, HeapObject::kMapOffset),
+ Immediate(factory->fixed_array_map()));
+ j(equal, &ok);
+ cmp(FieldOperand(elements, HeapObject::kMapOffset),
+ Immediate(factory->fixed_double_array_map()));
+ j(equal, &ok);
+ cmp(FieldOperand(elements, HeapObject::kMapOffset),
+ Immediate(factory->fixed_cow_array_map()));
+ j(equal, &ok);
+ Abort(kJSObjectWithFastElementsMapHasSlowElements);
+ bind(&ok);
+ }
+}
+
+
+void MacroAssembler::Check(Condition cc, BailoutReason reason) {
+ Label L;
+ j(cc, &L);
+ Abort(reason);
+ // will not return here
+ bind(&L);
+}
+
+
+void MacroAssembler::CheckStackAlignment() {
+ int frame_alignment = OS::ActivationFrameAlignment();
+ int frame_alignment_mask = frame_alignment - 1;
+ if (frame_alignment > kPointerSize) {
+ ASSERT(IsPowerOf2(frame_alignment));
+ Label alignment_as_expected;
+ test(esp, Immediate(frame_alignment_mask));
+ j(zero, &alignment_as_expected);
+ // Abort if stack is not aligned.
+ int3();
+ bind(&alignment_as_expected);
+ }
+}
+
+
+void MacroAssembler::Abort(BailoutReason reason) {
+#ifdef DEBUG
+ const char* msg = GetBailoutReason(reason);
+ if (msg != NULL) {
+ RecordComment("Abort message: ");
+ RecordComment(msg);
+ }
+
+ if (FLAG_trap_on_abort) {
+ int3();
+ return;
+ }
+#endif
+
+ push(Immediate(reinterpret_cast<intptr_t>(Smi::FromInt(reason))));
+ // Disable stub call restrictions to always allow calls to abort.
+ if (!has_frame_) {
+ // We don't actually want to generate a pile of code for this, so just
+ // claim there is a stack frame, without generating one.
+ FrameScope scope(this, StackFrame::NONE);
+ CallRuntime(Runtime::kAbort, 1);
+ } else {
+ CallRuntime(Runtime::kAbort, 1);
+ }
+ // will not return here
+ int3();
+}
+
+
+void MacroAssembler::LoadInstanceDescriptors(Register map,
+ Register descriptors) {
+ mov(descriptors, FieldOperand(map, Map::kDescriptorsOffset));
+}
+
+
+void MacroAssembler::NumberOfOwnDescriptors(Register dst, Register map) {
+ mov(dst, FieldOperand(map, Map::kBitField3Offset));
+ DecodeField<Map::NumberOfOwnDescriptorsBits>(dst);
+}
+
+
+void MacroAssembler::LookupNumberStringCache(Register object,
+ Register result,
+ Register scratch1,
+ Register scratch2,
+ Label* not_found) {
+ // Use of registers. Register result is used as a temporary.
+ Register number_string_cache = result;
+ Register mask = scratch1;
+ Register scratch = scratch2;
+
+ // Load the number string cache.
+ LoadRoot(number_string_cache, Heap::kNumberStringCacheRootIndex);
+ // Make the hash mask from the length of the number string cache. It
+ // contains two elements (number and string) for each cache entry.
+ mov(mask, FieldOperand(number_string_cache, FixedArray::kLengthOffset));
+ shr(mask, kSmiTagSize + 1); // Untag length and divide it by two.
+ sub(mask, Immediate(1)); // Make mask.
+
+ // Calculate the entry in the number string cache. The hash value in the
+ // number string cache for smis is just the smi value, and the hash for
+ // doubles is the xor of the upper and lower words. See
+ // Heap::GetNumberStringCache.
+ Label smi_hash_calculated;
+ Label load_result_from_cache;
+ Label not_smi;
+ STATIC_ASSERT(kSmiTag == 0);
+ JumpIfNotSmi(object, &not_smi, Label::kNear);
+ mov(scratch, object);
+ SmiUntag(scratch);
+ jmp(&smi_hash_calculated, Label::kNear);
+ bind(&not_smi);
+ cmp(FieldOperand(object, HeapObject::kMapOffset),
+ isolate()->factory()->heap_number_map());
+ j(not_equal, not_found);
+ STATIC_ASSERT(8 == kDoubleSize);
+ mov(scratch, FieldOperand(object, HeapNumber::kValueOffset));
+ xor_(scratch, FieldOperand(object, HeapNumber::kValueOffset + 4));
+ // Object is heap number and hash is now in scratch. Calculate cache index.
+ and_(scratch, mask);
+ Register index = scratch;
+ Register probe = mask;
+ mov(probe,
+ FieldOperand(number_string_cache,
+ index,
+ times_twice_pointer_size,
+ FixedArray::kHeaderSize));
+ JumpIfSmi(probe, not_found);
+ fld_d(FieldOperand(object, HeapNumber::kValueOffset));
+ fld_d(FieldOperand(probe, HeapNumber::kValueOffset));
+ FCmp();
+ j(parity_even, not_found); // Bail out if NaN is involved.
+ j(not_equal, not_found); // The cache did not contain this value.
+ jmp(&load_result_from_cache, Label::kNear);
+
+ bind(&smi_hash_calculated);
+ // Object is smi and hash is now in scratch. Calculate cache index.
+ and_(scratch, mask);
+ // Check if the entry is the smi we are looking for.
+ cmp(object,
+ FieldOperand(number_string_cache,
+ index,
+ times_twice_pointer_size,
+ FixedArray::kHeaderSize));
+ j(not_equal, not_found);
+
+ // Get the result from the cache.
+ bind(&load_result_from_cache);
+ mov(result,
+ FieldOperand(number_string_cache,
+ index,
+ times_twice_pointer_size,
+ FixedArray::kHeaderSize + kPointerSize));
+ IncrementCounter(isolate()->counters()->number_to_string_native(), 1);
+}
+
+
+void MacroAssembler::JumpIfInstanceTypeIsNotSequentialAscii(
+ Register instance_type,
+ Register scratch,
+ Label* failure) {
+ if (!scratch.is(instance_type)) {
+ mov(scratch, instance_type);
+ }
+ and_(scratch,
+ kIsNotStringMask | kStringRepresentationMask | kStringEncodingMask);
+ cmp(scratch, kStringTag | kSeqStringTag | kOneByteStringTag);
+ j(not_equal, failure);
+}
+
+
+void MacroAssembler::JumpIfNotBothSequentialAsciiStrings(Register object1,
+ Register object2,
+ Register scratch1,
+ Register scratch2,
+ Label* failure) {
+ // Check that both objects are not smis.
+ STATIC_ASSERT(kSmiTag == 0);
+ mov(scratch1, object1);
+ and_(scratch1, object2);
+ JumpIfSmi(scratch1, failure);
+
+ // Load instance type for both strings.
+ mov(scratch1, FieldOperand(object1, HeapObject::kMapOffset));
+ mov(scratch2, FieldOperand(object2, HeapObject::kMapOffset));
+ movzx_b(scratch1, FieldOperand(scratch1, Map::kInstanceTypeOffset));
+ movzx_b(scratch2, FieldOperand(scratch2, Map::kInstanceTypeOffset));
+
+ // Check that both are flat ASCII strings.
+ const int kFlatAsciiStringMask =
+ kIsNotStringMask | kStringRepresentationMask | kStringEncodingMask;
+ const int kFlatAsciiStringTag =
+ kStringTag | kOneByteStringTag | kSeqStringTag;
+ // Interleave bits from both instance types and compare them in one check.
+ ASSERT_EQ(0, kFlatAsciiStringMask & (kFlatAsciiStringMask << 3));
+ and_(scratch1, kFlatAsciiStringMask);
+ and_(scratch2, kFlatAsciiStringMask);
+ lea(scratch1, Operand(scratch1, scratch2, times_8, 0));
+ cmp(scratch1, kFlatAsciiStringTag | (kFlatAsciiStringTag << 3));
+ j(not_equal, failure);
+}
+
+
+void MacroAssembler::JumpIfNotUniqueName(Operand operand,
+ Label* not_unique_name,
+ Label::Distance distance) {
+ STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0);
+ Label succeed;
+ test(operand, Immediate(kIsNotStringMask | kIsNotInternalizedMask));
+ j(zero, &succeed);
+ cmpb(operand, static_cast<uint8_t>(SYMBOL_TYPE));
+ j(not_equal, not_unique_name, distance);
+
+ bind(&succeed);
+}
+
+
+void MacroAssembler::EmitSeqStringSetCharCheck(Register string,
+ Register index,
+ Register value,
+ uint32_t encoding_mask) {
+ Label is_object;
+ JumpIfNotSmi(string, &is_object, Label::kNear);
+ Abort(kNonObject);
+ bind(&is_object);
+
+ push(value);
+ mov(value, FieldOperand(string, HeapObject::kMapOffset));
+ movzx_b(value, FieldOperand(value, Map::kInstanceTypeOffset));
+
+ and_(value, Immediate(kStringRepresentationMask | kStringEncodingMask));
+ cmp(value, Immediate(encoding_mask));
+ pop(value);
+ Check(equal, kUnexpectedStringType);
+
+ // The index is assumed to be untagged coming in, tag it to compare with the
+ // string length without using a temp register, it is restored at the end of
+ // this function.
+ SmiTag(index);
+ Check(no_overflow, kIndexIsTooLarge);
+
+ cmp(index, FieldOperand(string, String::kLengthOffset));
+ Check(less, kIndexIsTooLarge);
+
+ cmp(index, Immediate(Smi::FromInt(0)));
+ Check(greater_equal, kIndexIsNegative);
+
+ // Restore the index
+ SmiUntag(index);
+}
+
+
+void MacroAssembler::PrepareCallCFunction(int num_arguments, Register scratch) {
+ int frame_alignment = OS::ActivationFrameAlignment();
+ if (frame_alignment != 0) {
+ // Make stack end at alignment and make room for num_arguments words
+ // and the original value of esp.
+ mov(scratch, esp);
+ sub(esp, Immediate((num_arguments + 1) * kPointerSize));
+ ASSERT(IsPowerOf2(frame_alignment));
+ and_(esp, -frame_alignment);
+ mov(Operand(esp, num_arguments * kPointerSize), scratch);
+ } else {
+ sub(esp, Immediate(num_arguments * kPointerSize));
+ }
+}
+
+
+void MacroAssembler::CallCFunction(ExternalReference function,
+ int num_arguments) {
+ // Trashing eax is ok as it will be the return value.
+ mov(eax, Immediate(function));
+ CallCFunction(eax, num_arguments);
+}
+
+
+void MacroAssembler::CallCFunction(Register function,
+ int num_arguments) {
+ ASSERT(has_frame());
+ // Check stack alignment.
+ if (emit_debug_code()) {
+ CheckStackAlignment();
+ }
+
+ call(function);
+ if (OS::ActivationFrameAlignment() != 0) {
+ mov(esp, Operand(esp, num_arguments * kPointerSize));
+ } else {
+ add(esp, Immediate(num_arguments * kPointerSize));
+ }
+}
+
+
+bool AreAliased(Register r1, Register r2, Register r3, Register r4) {
+ if (r1.is(r2)) return true;
+ if (r1.is(r3)) return true;
+ if (r1.is(r4)) return true;
+ if (r2.is(r3)) return true;
+ if (r2.is(r4)) return true;
+ if (r3.is(r4)) return true;
+ return false;
+}
+
+
+CodePatcher::CodePatcher(byte* address, int size)
+ : address_(address),
+ size_(size),
+ masm_(NULL, address, size + Assembler::kGap) {
+ // Create a new macro assembler pointing to the address of the code to patch.
+ // The size is adjusted with kGap on order for the assembler to generate size
+ // bytes of instructions without failing with buffer size constraints.
+ ASSERT(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
+}
+
+
+CodePatcher::~CodePatcher() {
+ // Indicate that code has changed.
+ CPU::FlushICache(address_, size_);
+
+ // Check that the code was patched as expected.
+ ASSERT(masm_.pc_ == address_ + size_);
+ ASSERT(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
+}
+
+
+void MacroAssembler::CheckPageFlag(
+ Register object,
+ Register scratch,
+ int mask,
+ Condition cc,
+ Label* condition_met,
+ Label::Distance condition_met_distance) {
+ ASSERT(cc == zero || cc == not_zero);
+ if (scratch.is(object)) {
+ and_(scratch, Immediate(~Page::kPageAlignmentMask));
+ } else {
+ mov(scratch, Immediate(~Page::kPageAlignmentMask));
+ and_(scratch, object);
+ }
+ if (mask < (1 << kBitsPerByte)) {
+ test_b(Operand(scratch, MemoryChunk::kFlagsOffset),
+ static_cast<uint8_t>(mask));
+ } else {
+ test(Operand(scratch, MemoryChunk::kFlagsOffset), Immediate(mask));
+ }
+ j(cc, condition_met, condition_met_distance);
+}
+
+
+void MacroAssembler::CheckPageFlagForMap(
+ Handle<Map> map,
+ int mask,
+ Condition cc,
+ Label* condition_met,
+ Label::Distance condition_met_distance) {
+ ASSERT(cc == zero || cc == not_zero);
+ Page* page = Page::FromAddress(map->address());
+ ExternalReference reference(ExternalReference::page_flags(page));
+ // The inlined static address check of the page's flags relies
+ // on maps never being compacted.
+ ASSERT(!isolate()->heap()->mark_compact_collector()->
+ IsOnEvacuationCandidate(*map));
+ if (mask < (1 << kBitsPerByte)) {
+ test_b(Operand::StaticVariable(reference), static_cast<uint8_t>(mask));
+ } else {
+ test(Operand::StaticVariable(reference), Immediate(mask));
+ }
+ j(cc, condition_met, condition_met_distance);
+}
+
+
+void MacroAssembler::CheckMapDeprecated(Handle<Map> map,
+ Register scratch,
+ Label* if_deprecated) {
+ if (map->CanBeDeprecated()) {
+ mov(scratch, map);
+ mov(scratch, FieldOperand(scratch, Map::kBitField3Offset));
+ and_(scratch, Immediate(Map::Deprecated::kMask));
+ j(not_zero, if_deprecated);
+ }
+}
+
+
+void MacroAssembler::JumpIfBlack(Register object,
+ Register scratch0,
+ Register scratch1,
+ Label* on_black,
+ Label::Distance on_black_near) {
+ HasColor(object, scratch0, scratch1,
+ on_black, on_black_near,
+ 1, 0); // kBlackBitPattern.
+ ASSERT(strcmp(Marking::kBlackBitPattern, "10") == 0);
+}
+
+
+void MacroAssembler::HasColor(Register object,
+ Register bitmap_scratch,
+ Register mask_scratch,
+ Label* has_color,
+ Label::Distance has_color_distance,
+ int first_bit,
+ int second_bit) {
+ ASSERT(!AreAliased(object, bitmap_scratch, mask_scratch, ecx));
+
+ GetMarkBits(object, bitmap_scratch, mask_scratch);
+
+ Label other_color, word_boundary;
+ test(mask_scratch, Operand(bitmap_scratch, MemoryChunk::kHeaderSize));
+ j(first_bit == 1 ? zero : not_zero, &other_color, Label::kNear);
+ add(mask_scratch, mask_scratch); // Shift left 1 by adding.
+ j(zero, &word_boundary, Label::kNear);
+ test(mask_scratch, Operand(bitmap_scratch, MemoryChunk::kHeaderSize));
+ j(second_bit == 1 ? not_zero : zero, has_color, has_color_distance);
+ jmp(&other_color, Label::kNear);
+
+ bind(&word_boundary);
+ test_b(Operand(bitmap_scratch, MemoryChunk::kHeaderSize + kPointerSize), 1);
+
+ j(second_bit == 1 ? not_zero : zero, has_color, has_color_distance);
+ bind(&other_color);
+}
+
+
+void MacroAssembler::GetMarkBits(Register addr_reg,
+ Register bitmap_reg,
+ Register mask_reg) {
+ ASSERT(!AreAliased(addr_reg, mask_reg, bitmap_reg, ecx));
+ mov(bitmap_reg, Immediate(~Page::kPageAlignmentMask));
+ and_(bitmap_reg, addr_reg);
+ mov(ecx, addr_reg);
+ int shift =
+ Bitmap::kBitsPerCellLog2 + kPointerSizeLog2 - Bitmap::kBytesPerCellLog2;
+ shr(ecx, shift);
+ and_(ecx,
+ (Page::kPageAlignmentMask >> shift) & ~(Bitmap::kBytesPerCell - 1));
+
+ add(bitmap_reg, ecx);
+ mov(ecx, addr_reg);
+ shr(ecx, kPointerSizeLog2);
+ and_(ecx, (1 << Bitmap::kBitsPerCellLog2) - 1);
+ mov(mask_reg, Immediate(1));
+ shl_cl(mask_reg);
+}
+
+
+void MacroAssembler::EnsureNotWhite(
+ Register value,
+ Register bitmap_scratch,
+ Register mask_scratch,
+ Label* value_is_white_and_not_data,
+ Label::Distance distance) {
+ ASSERT(!AreAliased(value, bitmap_scratch, mask_scratch, ecx));
+ GetMarkBits(value, bitmap_scratch, mask_scratch);
+
+ // If the value is black or grey we don't need to do anything.
+ ASSERT(strcmp(Marking::kWhiteBitPattern, "00") == 0);
+ ASSERT(strcmp(Marking::kBlackBitPattern, "10") == 0);
+ ASSERT(strcmp(Marking::kGreyBitPattern, "11") == 0);
+ ASSERT(strcmp(Marking::kImpossibleBitPattern, "01") == 0);
+
+ Label done;
+
+ // Since both black and grey have a 1 in the first position and white does
+ // not have a 1 there we only need to check one bit.
+ test(mask_scratch, Operand(bitmap_scratch, MemoryChunk::kHeaderSize));
+ j(not_zero, &done, Label::kNear);
+
+ if (emit_debug_code()) {
+ // Check for impossible bit pattern.
+ Label ok;
+ push(mask_scratch);
+ // shl. May overflow making the check conservative.
+ add(mask_scratch, mask_scratch);
+ test(mask_scratch, Operand(bitmap_scratch, MemoryChunk::kHeaderSize));
+ j(zero, &ok, Label::kNear);
+ int3();
+ bind(&ok);
+ pop(mask_scratch);
+ }
+
+ // Value is white. We check whether it is data that doesn't need scanning.
+ // Currently only checks for HeapNumber and non-cons strings.
+ Register map = ecx; // Holds map while checking type.
+ Register length = ecx; // Holds length of object after checking type.
+ Label not_heap_number;
+ Label is_data_object;
+
+ // Check for heap-number
+ mov(map, FieldOperand(value, HeapObject::kMapOffset));
+ cmp(map, isolate()->factory()->heap_number_map());
+ j(not_equal, &not_heap_number, Label::kNear);
+ mov(length, Immediate(HeapNumber::kSize));
+ jmp(&is_data_object, Label::kNear);
+
+ bind(&not_heap_number);
+ // Check for strings.
+ ASSERT(kIsIndirectStringTag == 1 && kIsIndirectStringMask == 1);
+ ASSERT(kNotStringTag == 0x80 && kIsNotStringMask == 0x80);
+ // If it's a string and it's not a cons string then it's an object containing
+ // no GC pointers.
+ Register instance_type = ecx;
+ movzx_b(instance_type, FieldOperand(map, Map::kInstanceTypeOffset));
+ test_b(instance_type, kIsIndirectStringMask | kIsNotStringMask);
+ j(not_zero, value_is_white_and_not_data);
+ // It's a non-indirect (non-cons and non-slice) string.
+ // If it's external, the length is just ExternalString::kSize.
+ // Otherwise it's String::kHeaderSize + string->length() * (1 or 2).
+ Label not_external;
+ // External strings are the only ones with the kExternalStringTag bit
+ // set.
+ ASSERT_EQ(0, kSeqStringTag & kExternalStringTag);
+ ASSERT_EQ(0, kConsStringTag & kExternalStringTag);
+ test_b(instance_type, kExternalStringTag);
+ j(zero, &not_external, Label::kNear);
+ mov(length, Immediate(ExternalString::kSize));
+ jmp(&is_data_object, Label::kNear);
+
+ bind(&not_external);
+ // Sequential string, either ASCII or UC16.
+ ASSERT(kOneByteStringTag == 0x04);
+ and_(length, Immediate(kStringEncodingMask));
+ xor_(length, Immediate(kStringEncodingMask));
+ add(length, Immediate(0x04));
+ // Value now either 4 (if ASCII) or 8 (if UC16), i.e., char-size shifted
+ // by 2. If we multiply the string length as smi by this, it still
+ // won't overflow a 32-bit value.
+ ASSERT_EQ(SeqOneByteString::kMaxSize, SeqTwoByteString::kMaxSize);
+ ASSERT(SeqOneByteString::kMaxSize <=
+ static_cast<int>(0xffffffffu >> (2 + kSmiTagSize)));
+ imul(length, FieldOperand(value, String::kLengthOffset));
+ shr(length, 2 + kSmiTagSize + kSmiShiftSize);
+ add(length, Immediate(SeqString::kHeaderSize + kObjectAlignmentMask));
+ and_(length, Immediate(~kObjectAlignmentMask));
+
+ bind(&is_data_object);
+ // Value is a data object, and it is white. Mark it black. Since we know
+ // that the object is white we can make it black by flipping one bit.
+ or_(Operand(bitmap_scratch, MemoryChunk::kHeaderSize), mask_scratch);
+
+ and_(bitmap_scratch, Immediate(~Page::kPageAlignmentMask));
+ add(Operand(bitmap_scratch, MemoryChunk::kLiveBytesOffset),
+ length);
+ if (emit_debug_code()) {
+ mov(length, Operand(bitmap_scratch, MemoryChunk::kLiveBytesOffset));
+ cmp(length, Operand(bitmap_scratch, MemoryChunk::kSizeOffset));
+ Check(less_equal, kLiveBytesCountOverflowChunkSize);
+ }
+
+ bind(&done);
+}
+
+
+void MacroAssembler::EnumLength(Register dst, Register map) {
+ STATIC_ASSERT(Map::EnumLengthBits::kShift == 0);
+ mov(dst, FieldOperand(map, Map::kBitField3Offset));
+ and_(dst, Immediate(Map::EnumLengthBits::kMask));
+ SmiTag(dst);
+}
+
+
+void MacroAssembler::CheckEnumCache(Label* call_runtime) {
+ Label next, start;
+ mov(ecx, eax);
+
+ // Check if the enum length field is properly initialized, indicating that
+ // there is an enum cache.
+ mov(ebx, FieldOperand(ecx, HeapObject::kMapOffset));
+
+ EnumLength(edx, ebx);
+ cmp(edx, Immediate(Smi::FromInt(kInvalidEnumCacheSentinel)));
+ j(equal, call_runtime);
+
+ jmp(&start);
+
+ bind(&next);
+ mov(ebx, FieldOperand(ecx, HeapObject::kMapOffset));
+
+ // For all objects but the receiver, check that the cache is empty.
+ EnumLength(edx, ebx);
+ cmp(edx, Immediate(Smi::FromInt(0)));
+ j(not_equal, call_runtime);
+
+ bind(&start);
+
+ // Check that there are no elements. Register rcx contains the current JS
+ // object we've reached through the prototype chain.
+ Label no_elements;
+ mov(ecx, FieldOperand(ecx, JSObject::kElementsOffset));
+ cmp(ecx, isolate()->factory()->empty_fixed_array());
+ j(equal, &no_elements);
+
+ // Second chance, the object may be using the empty slow element dictionary.
+ cmp(ecx, isolate()->factory()->empty_slow_element_dictionary());
+ j(not_equal, call_runtime);
+
+ bind(&no_elements);
+ mov(ecx, FieldOperand(ebx, Map::kPrototypeOffset));
+ cmp(ecx, isolate()->factory()->null_value());
+ j(not_equal, &next);
+}
+
+
+void MacroAssembler::TestJSArrayForAllocationMemento(
+ Register receiver_reg,
+ Register scratch_reg,
+ Label* no_memento_found) {
+ ExternalReference new_space_start =
+ ExternalReference::new_space_start(isolate());
+ ExternalReference new_space_allocation_top =
+ ExternalReference::new_space_allocation_top_address(isolate());
+
+ lea(scratch_reg, Operand(receiver_reg,
+ JSArray::kSize + AllocationMemento::kSize - kHeapObjectTag));
+ cmp(scratch_reg, Immediate(new_space_start));
+ j(less, no_memento_found);
+ cmp(scratch_reg, Operand::StaticVariable(new_space_allocation_top));
+ j(greater, no_memento_found);
+ cmp(MemOperand(scratch_reg, -AllocationMemento::kSize),
+ Immediate(isolate()->factory()->allocation_memento_map()));
+}
+
+
+void MacroAssembler::JumpIfDictionaryInPrototypeChain(
+ Register object,
+ Register scratch0,
+ Register scratch1,
+ Label* found) {
+ ASSERT(!scratch1.is(scratch0));
+ Factory* factory = isolate()->factory();
+ Register current = scratch0;
+ Label loop_again;
+
+ // scratch contained elements pointer.
+ mov(current, object);
+
+ // Loop based on the map going up the prototype chain.
+ bind(&loop_again);
+ mov(current, FieldOperand(current, HeapObject::kMapOffset));
+ mov(scratch1, FieldOperand(current, Map::kBitField2Offset));
+ DecodeField<Map::ElementsKindBits>(scratch1);
+ cmp(scratch1, Immediate(DICTIONARY_ELEMENTS));
+ j(equal, found);
+ mov(current, FieldOperand(current, Map::kPrototypeOffset));
+ cmp(current, Immediate(factory->null_value()));
+ j(not_equal, &loop_again);
+}
+
+
+void MacroAssembler::TruncatingDiv(Register dividend, int32_t divisor) {
+ ASSERT(!dividend.is(eax));
+ ASSERT(!dividend.is(edx));
+ MultiplierAndShift ms(divisor);
+ mov(eax, Immediate(ms.multiplier()));
+ imul(dividend);
+ if (divisor > 0 && ms.multiplier() < 0) add(edx, dividend);
+ if (divisor < 0 && ms.multiplier() > 0) sub(edx, dividend);
+ if (ms.shift() > 0) sar(edx, ms.shift());
+ mov(eax, dividend);
+ shr(eax, 31);
+ add(edx, eax);
+}
+
+
+} } // namespace v8::internal
+
+#endif // V8_TARGET_ARCH_X87
diff --git a/chromium/v8/src/x87/macro-assembler-x87.h b/chromium/v8/src/x87/macro-assembler-x87.h
new file mode 100644
index 00000000000..84141e6bc36
--- /dev/null
+++ b/chromium/v8/src/x87/macro-assembler-x87.h
@@ -0,0 +1,1090 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_X87_MACRO_ASSEMBLER_X87_H_
+#define V8_X87_MACRO_ASSEMBLER_X87_H_
+
+#include "src/assembler.h"
+#include "src/frames.h"
+#include "src/globals.h"
+
+namespace v8 {
+namespace internal {
+
+// Convenience for platform-independent signatures. We do not normally
+// distinguish memory operands from other operands on ia32.
+typedef Operand MemOperand;
+
+enum RememberedSetAction { EMIT_REMEMBERED_SET, OMIT_REMEMBERED_SET };
+enum SmiCheck { INLINE_SMI_CHECK, OMIT_SMI_CHECK };
+enum PointersToHereCheck {
+ kPointersToHereMaybeInteresting,
+ kPointersToHereAreAlwaysInteresting
+};
+
+
+enum RegisterValueType {
+ REGISTER_VALUE_IS_SMI,
+ REGISTER_VALUE_IS_INT32
+};
+
+
+bool AreAliased(Register r1, Register r2, Register r3, Register r4);
+
+
+// MacroAssembler implements a collection of frequently used macros.
+class MacroAssembler: public Assembler {
+ public:
+ // The isolate parameter can be NULL if the macro assembler should
+ // not use isolate-dependent functionality. In this case, it's the
+ // responsibility of the caller to never invoke such function on the
+ // macro assembler.
+ MacroAssembler(Isolate* isolate, void* buffer, int size);
+
+ void Load(Register dst, const Operand& src, Representation r);
+ void Store(Register src, const Operand& dst, Representation r);
+
+ // Operations on roots in the root-array.
+ void LoadRoot(Register destination, Heap::RootListIndex index);
+ void StoreRoot(Register source, Register scratch, Heap::RootListIndex index);
+ void CompareRoot(Register with, Register scratch, Heap::RootListIndex index);
+ // These methods can only be used with constant roots (i.e. non-writable
+ // and not in new space).
+ void CompareRoot(Register with, Heap::RootListIndex index);
+ void CompareRoot(const Operand& with, Heap::RootListIndex index);
+
+ // ---------------------------------------------------------------------------
+ // GC Support
+ enum RememberedSetFinalAction {
+ kReturnAtEnd,
+ kFallThroughAtEnd
+ };
+
+ // Record in the remembered set the fact that we have a pointer to new space
+ // at the address pointed to by the addr register. Only works if addr is not
+ // in new space.
+ void RememberedSetHelper(Register object, // Used for debug code.
+ Register addr,
+ Register scratch,
+ RememberedSetFinalAction and_then);
+
+ void CheckPageFlag(Register object,
+ Register scratch,
+ int mask,
+ Condition cc,
+ Label* condition_met,
+ Label::Distance condition_met_distance = Label::kFar);
+
+ void CheckPageFlagForMap(
+ Handle<Map> map,
+ int mask,
+ Condition cc,
+ Label* condition_met,
+ Label::Distance condition_met_distance = Label::kFar);
+
+ void CheckMapDeprecated(Handle<Map> map,
+ Register scratch,
+ Label* if_deprecated);
+
+ // Check if object is in new space. Jumps if the object is not in new space.
+ // The register scratch can be object itself, but scratch will be clobbered.
+ void JumpIfNotInNewSpace(Register object,
+ Register scratch,
+ Label* branch,
+ Label::Distance distance = Label::kFar) {
+ InNewSpace(object, scratch, zero, branch, distance);
+ }
+
+ // Check if object is in new space. Jumps if the object is in new space.
+ // The register scratch can be object itself, but it will be clobbered.
+ void JumpIfInNewSpace(Register object,
+ Register scratch,
+ Label* branch,
+ Label::Distance distance = Label::kFar) {
+ InNewSpace(object, scratch, not_zero, branch, distance);
+ }
+
+ // Check if an object has a given incremental marking color. Also uses ecx!
+ void HasColor(Register object,
+ Register scratch0,
+ Register scratch1,
+ Label* has_color,
+ Label::Distance has_color_distance,
+ int first_bit,
+ int second_bit);
+
+ void JumpIfBlack(Register object,
+ Register scratch0,
+ Register scratch1,
+ Label* on_black,
+ Label::Distance on_black_distance = Label::kFar);
+
+ // Checks the color of an object. If the object is already grey or black
+ // then we just fall through, since it is already live. If it is white and
+ // we can determine that it doesn't need to be scanned, then we just mark it
+ // black and fall through. For the rest we jump to the label so the
+ // incremental marker can fix its assumptions.
+ void EnsureNotWhite(Register object,
+ Register scratch1,
+ Register scratch2,
+ Label* object_is_white_and_not_data,
+ Label::Distance distance);
+
+ // Notify the garbage collector that we wrote a pointer into an object.
+ // |object| is the object being stored into, |value| is the object being
+ // stored. value and scratch registers are clobbered by the operation.
+ // The offset is the offset from the start of the object, not the offset from
+ // the tagged HeapObject pointer. For use with FieldOperand(reg, off).
+ void RecordWriteField(
+ Register object,
+ int offset,
+ Register value,
+ Register scratch,
+ RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
+ SmiCheck smi_check = INLINE_SMI_CHECK,
+ PointersToHereCheck pointers_to_here_check_for_value =
+ kPointersToHereMaybeInteresting);
+
+ // As above, but the offset has the tag presubtracted. For use with
+ // Operand(reg, off).
+ void RecordWriteContextSlot(
+ Register context,
+ int offset,
+ Register value,
+ Register scratch,
+ RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
+ SmiCheck smi_check = INLINE_SMI_CHECK,
+ PointersToHereCheck pointers_to_here_check_for_value =
+ kPointersToHereMaybeInteresting) {
+ RecordWriteField(context,
+ offset + kHeapObjectTag,
+ value,
+ scratch,
+ remembered_set_action,
+ smi_check,
+ pointers_to_here_check_for_value);
+ }
+
+ // Notify the garbage collector that we wrote a pointer into a fixed array.
+ // |array| is the array being stored into, |value| is the
+ // object being stored. |index| is the array index represented as a
+ // Smi. All registers are clobbered by the operation RecordWriteArray
+ // filters out smis so it does not update the write barrier if the
+ // value is a smi.
+ void RecordWriteArray(
+ Register array,
+ Register value,
+ Register index,
+ RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
+ SmiCheck smi_check = INLINE_SMI_CHECK,
+ PointersToHereCheck pointers_to_here_check_for_value =
+ kPointersToHereMaybeInteresting);
+
+ // For page containing |object| mark region covering |address|
+ // dirty. |object| is the object being stored into, |value| is the
+ // object being stored. The address and value registers are clobbered by the
+ // operation. RecordWrite filters out smis so it does not update the
+ // write barrier if the value is a smi.
+ void RecordWrite(
+ Register object,
+ Register address,
+ Register value,
+ RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
+ SmiCheck smi_check = INLINE_SMI_CHECK,
+ PointersToHereCheck pointers_to_here_check_for_value =
+ kPointersToHereMaybeInteresting);
+
+ // For page containing |object| mark the region covering the object's map
+ // dirty. |object| is the object being stored into, |map| is the Map object
+ // that was stored.
+ void RecordWriteForMap(
+ Register object,
+ Handle<Map> map,
+ Register scratch1,
+ Register scratch2);
+
+ // ---------------------------------------------------------------------------
+ // Debugger Support
+
+ void DebugBreak();
+
+ // Generates function and stub prologue code.
+ void StubPrologue();
+ void Prologue(bool code_pre_aging);
+
+ // Enter specific kind of exit frame. Expects the number of
+ // arguments in register eax and sets up the number of arguments in
+ // register edi and the pointer to the first argument in register
+ // esi.
+ void EnterExitFrame();
+
+ void EnterApiExitFrame(int argc);
+
+ // Leave the current exit frame. Expects the return value in
+ // register eax:edx (untouched) and the pointer to the first
+ // argument in register esi.
+ void LeaveExitFrame();
+
+ // Leave the current exit frame. Expects the return value in
+ // register eax (untouched).
+ void LeaveApiExitFrame(bool restore_context);
+
+ // Find the function context up the context chain.
+ void LoadContext(Register dst, int context_chain_length);
+
+ // Conditionally load the cached Array transitioned map of type
+ // transitioned_kind from the native context if the map in register
+ // map_in_out is the cached Array map in the native context of
+ // expected_kind.
+ void LoadTransitionedArrayMapConditional(
+ ElementsKind expected_kind,
+ ElementsKind transitioned_kind,
+ Register map_in_out,
+ Register scratch,
+ Label* no_map_match);
+
+ // Load the global function with the given index.
+ void LoadGlobalFunction(int index, Register function);
+
+ // Load the initial map from the global function. The registers
+ // function and map can be the same.
+ void LoadGlobalFunctionInitialMap(Register function, Register map);
+
+ // Push and pop the registers that can hold pointers.
+ void PushSafepointRegisters() { pushad(); }
+ void PopSafepointRegisters() { popad(); }
+ // Store the value in register/immediate src in the safepoint
+ // register stack slot for register dst.
+ void StoreToSafepointRegisterSlot(Register dst, Register src);
+ void StoreToSafepointRegisterSlot(Register dst, Immediate src);
+ void LoadFromSafepointRegisterSlot(Register dst, Register src);
+
+ void LoadHeapObject(Register result, Handle<HeapObject> object);
+ void CmpHeapObject(Register reg, Handle<HeapObject> object);
+ void PushHeapObject(Handle<HeapObject> object);
+
+ void LoadObject(Register result, Handle<Object> object) {
+ AllowDeferredHandleDereference heap_object_check;
+ if (object->IsHeapObject()) {
+ LoadHeapObject(result, Handle<HeapObject>::cast(object));
+ } else {
+ Move(result, Immediate(object));
+ }
+ }
+
+ void CmpObject(Register reg, Handle<Object> object) {
+ AllowDeferredHandleDereference heap_object_check;
+ if (object->IsHeapObject()) {
+ CmpHeapObject(reg, Handle<HeapObject>::cast(object));
+ } else {
+ cmp(reg, Immediate(object));
+ }
+ }
+
+ // ---------------------------------------------------------------------------
+ // JavaScript invokes
+
+ // Invoke the JavaScript function code by either calling or jumping.
+ void InvokeCode(Register code,
+ const ParameterCount& expected,
+ const ParameterCount& actual,
+ InvokeFlag flag,
+ const CallWrapper& call_wrapper) {
+ InvokeCode(Operand(code), expected, actual, flag, call_wrapper);
+ }
+
+ void InvokeCode(const Operand& code,
+ const ParameterCount& expected,
+ const ParameterCount& actual,
+ InvokeFlag flag,
+ const CallWrapper& call_wrapper);
+
+ // Invoke the JavaScript function in the given register. Changes the
+ // current context to the context in the function before invoking.
+ void InvokeFunction(Register function,
+ const ParameterCount& actual,
+ InvokeFlag flag,
+ const CallWrapper& call_wrapper);
+
+ void InvokeFunction(Register function,
+ const ParameterCount& expected,
+ const ParameterCount& actual,
+ InvokeFlag flag,
+ const CallWrapper& call_wrapper);
+
+ void InvokeFunction(Handle<JSFunction> function,
+ const ParameterCount& expected,
+ const ParameterCount& actual,
+ InvokeFlag flag,
+ const CallWrapper& call_wrapper);
+
+ // Invoke specified builtin JavaScript function. Adds an entry to
+ // the unresolved list if the name does not resolve.
+ void InvokeBuiltin(Builtins::JavaScript id,
+ InvokeFlag flag,
+ const CallWrapper& call_wrapper = NullCallWrapper());
+
+ // Store the function for the given builtin in the target register.
+ void GetBuiltinFunction(Register target, Builtins::JavaScript id);
+
+ // Store the code object for the given builtin in the target register.
+ void GetBuiltinEntry(Register target, Builtins::JavaScript id);
+
+ // Expression support
+ // Support for constant splitting.
+ bool IsUnsafeImmediate(const Immediate& x);
+ void SafeMove(Register dst, const Immediate& x);
+ void SafePush(const Immediate& x);
+
+ // Compare object type for heap object.
+ // Incoming register is heap_object and outgoing register is map.
+ void CmpObjectType(Register heap_object, InstanceType type, Register map);
+
+ // Compare instance type for map.
+ void CmpInstanceType(Register map, InstanceType type);
+
+ // Check if a map for a JSObject indicates that the object has fast elements.
+ // Jump to the specified label if it does not.
+ void CheckFastElements(Register map,
+ Label* fail,
+ Label::Distance distance = Label::kFar);
+
+ // Check if a map for a JSObject indicates that the object can have both smi
+ // and HeapObject elements. Jump to the specified label if it does not.
+ void CheckFastObjectElements(Register map,
+ Label* fail,
+ Label::Distance distance = Label::kFar);
+
+ // Check if a map for a JSObject indicates that the object has fast smi only
+ // elements. Jump to the specified label if it does not.
+ void CheckFastSmiElements(Register map,
+ Label* fail,
+ Label::Distance distance = Label::kFar);
+
+ // Check to see if maybe_number can be stored as a double in
+ // FastDoubleElements. If it can, store it at the index specified by key in
+ // the FastDoubleElements array elements, otherwise jump to fail.
+ void StoreNumberToDoubleElements(Register maybe_number,
+ Register elements,
+ Register key,
+ Register scratch,
+ Label* fail,
+ int offset = 0);
+
+ // Compare an object's map with the specified map.
+ void CompareMap(Register obj, Handle<Map> map);
+
+ // Check if the map of an object is equal to a specified map and branch to
+ // label if not. Skip the smi check if not required (object is known to be a
+ // heap object). If mode is ALLOW_ELEMENT_TRANSITION_MAPS, then also match
+ // against maps that are ElementsKind transition maps of the specified map.
+ void CheckMap(Register obj,
+ Handle<Map> map,
+ Label* fail,
+ SmiCheckType smi_check_type);
+
+ // Check if the map of an object is equal to a specified map and branch to a
+ // specified target if equal. Skip the smi check if not required (object is
+ // known to be a heap object)
+ void DispatchMap(Register obj,
+ Register unused,
+ Handle<Map> map,
+ Handle<Code> success,
+ SmiCheckType smi_check_type);
+
+ // Check if the object in register heap_object is a string. Afterwards the
+ // register map contains the object map and the register instance_type
+ // contains the instance_type. The registers map and instance_type can be the
+ // same in which case it contains the instance type afterwards. Either of the
+ // registers map and instance_type can be the same as heap_object.
+ Condition IsObjectStringType(Register heap_object,
+ Register map,
+ Register instance_type);
+
+ // Check if the object in register heap_object is a name. Afterwards the
+ // register map contains the object map and the register instance_type
+ // contains the instance_type. The registers map and instance_type can be the
+ // same in which case it contains the instance type afterwards. Either of the
+ // registers map and instance_type can be the same as heap_object.
+ Condition IsObjectNameType(Register heap_object,
+ Register map,
+ Register instance_type);
+
+ // Check if a heap object's type is in the JSObject range, not including
+ // JSFunction. The object's map will be loaded in the map register.
+ // Any or all of the three registers may be the same.
+ // The contents of the scratch register will always be overwritten.
+ void IsObjectJSObjectType(Register heap_object,
+ Register map,
+ Register scratch,
+ Label* fail);
+
+ // The contents of the scratch register will be overwritten.
+ void IsInstanceJSObjectType(Register map, Register scratch, Label* fail);
+
+ // FCmp is similar to integer cmp, but requires unsigned
+ // jcc instructions (je, ja, jae, jb, jbe, je, and jz).
+ void FCmp();
+
+ void ClampUint8(Register reg);
+
+ void SlowTruncateToI(Register result_reg, Register input_reg,
+ int offset = HeapNumber::kValueOffset - kHeapObjectTag);
+
+ void TruncateHeapNumberToI(Register result_reg, Register input_reg);
+ void TruncateX87TOSToI(Register result_reg);
+
+ void X87TOSToI(Register result_reg, MinusZeroMode minus_zero_mode,
+ Label* conversion_failed, Label::Distance dst = Label::kFar);
+
+ void TaggedToI(Register result_reg, Register input_reg,
+ MinusZeroMode minus_zero_mode, Label* lost_precision);
+
+ // Smi tagging support.
+ void SmiTag(Register reg) {
+ STATIC_ASSERT(kSmiTag == 0);
+ STATIC_ASSERT(kSmiTagSize == 1);
+ add(reg, reg);
+ }
+ void SmiUntag(Register reg) {
+ sar(reg, kSmiTagSize);
+ }
+
+ // Modifies the register even if it does not contain a Smi!
+ void SmiUntag(Register reg, Label* is_smi) {
+ STATIC_ASSERT(kSmiTagSize == 1);
+ sar(reg, kSmiTagSize);
+ STATIC_ASSERT(kSmiTag == 0);
+ j(not_carry, is_smi);
+ }
+
+ void LoadUint32NoSSE2(Register src);
+
+ // Jump the register contains a smi.
+ inline void JumpIfSmi(Register value,
+ Label* smi_label,
+ Label::Distance distance = Label::kFar) {
+ test(value, Immediate(kSmiTagMask));
+ j(zero, smi_label, distance);
+ }
+ // Jump if the operand is a smi.
+ inline void JumpIfSmi(Operand value,
+ Label* smi_label,
+ Label::Distance distance = Label::kFar) {
+ test(value, Immediate(kSmiTagMask));
+ j(zero, smi_label, distance);
+ }
+ // Jump if register contain a non-smi.
+ inline void JumpIfNotSmi(Register value,
+ Label* not_smi_label,
+ Label::Distance distance = Label::kFar) {
+ test(value, Immediate(kSmiTagMask));
+ j(not_zero, not_smi_label, distance);
+ }
+
+ void LoadInstanceDescriptors(Register map, Register descriptors);
+ void EnumLength(Register dst, Register map);
+ void NumberOfOwnDescriptors(Register dst, Register map);
+
+ template<typename Field>
+ void DecodeField(Register reg) {
+ static const int shift = Field::kShift;
+ static const int mask = Field::kMask >> Field::kShift;
+ if (shift != 0) {
+ sar(reg, shift);
+ }
+ and_(reg, Immediate(mask));
+ }
+
+ template<typename Field>
+ void DecodeFieldToSmi(Register reg) {
+ static const int shift = Field::kShift;
+ static const int mask = (Field::kMask >> Field::kShift) << kSmiTagSize;
+ STATIC_ASSERT((mask & (0x80000000u >> (kSmiTagSize - 1))) == 0);
+ STATIC_ASSERT(kSmiTag == 0);
+ if (shift < kSmiTagSize) {
+ shl(reg, kSmiTagSize - shift);
+ } else if (shift > kSmiTagSize) {
+ sar(reg, shift - kSmiTagSize);
+ }
+ and_(reg, Immediate(mask));
+ }
+
+ // Abort execution if argument is not a number, enabled via --debug-code.
+ void AssertNumber(Register object);
+
+ // Abort execution if argument is not a smi, enabled via --debug-code.
+ void AssertSmi(Register object);
+
+ // Abort execution if argument is a smi, enabled via --debug-code.
+ void AssertNotSmi(Register object);
+
+ // Abort execution if argument is not a string, enabled via --debug-code.
+ void AssertString(Register object);
+
+ // Abort execution if argument is not a name, enabled via --debug-code.
+ void AssertName(Register object);
+
+ // Abort execution if argument is not undefined or an AllocationSite, enabled
+ // via --debug-code.
+ void AssertUndefinedOrAllocationSite(Register object);
+
+ // ---------------------------------------------------------------------------
+ // Exception handling
+
+ // Push a new try handler and link it into try handler chain.
+ void PushTryHandler(StackHandler::Kind kind, int handler_index);
+
+ // Unlink the stack handler on top of the stack from the try handler chain.
+ void PopTryHandler();
+
+ // Throw to the top handler in the try hander chain.
+ void Throw(Register value);
+
+ // Throw past all JS frames to the top JS entry frame.
+ void ThrowUncatchable(Register value);
+
+ // ---------------------------------------------------------------------------
+ // Inline caching support
+
+ // Generate code for checking access rights - used for security checks
+ // on access to global objects across environments. The holder register
+ // is left untouched, but the scratch register is clobbered.
+ void CheckAccessGlobalProxy(Register holder_reg,
+ Register scratch1,
+ Register scratch2,
+ Label* miss);
+
+ void GetNumberHash(Register r0, Register scratch);
+
+ void LoadFromNumberDictionary(Label* miss,
+ Register elements,
+ Register key,
+ Register r0,
+ Register r1,
+ Register r2,
+ Register result);
+
+
+ // ---------------------------------------------------------------------------
+ // Allocation support
+
+ // Allocate an object in new space or old pointer space. If the given space
+ // is exhausted control continues at the gc_required label. The allocated
+ // object is returned in result and end of the new object is returned in
+ // result_end. The register scratch can be passed as no_reg in which case
+ // an additional object reference will be added to the reloc info. The
+ // returned pointers in result and result_end have not yet been tagged as
+ // heap objects. If result_contains_top_on_entry is true the content of
+ // result is known to be the allocation top on entry (could be result_end
+ // from a previous call). If result_contains_top_on_entry is true scratch
+ // should be no_reg as it is never used.
+ void Allocate(int object_size,
+ Register result,
+ Register result_end,
+ Register scratch,
+ Label* gc_required,
+ AllocationFlags flags);
+
+ void Allocate(int header_size,
+ ScaleFactor element_size,
+ Register element_count,
+ RegisterValueType element_count_type,
+ Register result,
+ Register result_end,
+ Register scratch,
+ Label* gc_required,
+ AllocationFlags flags);
+
+ void Allocate(Register object_size,
+ Register result,
+ Register result_end,
+ Register scratch,
+ Label* gc_required,
+ AllocationFlags flags);
+
+ // Undo allocation in new space. The object passed and objects allocated after
+ // it will no longer be allocated. Make sure that no pointers are left to the
+ // object(s) no longer allocated as they would be invalid when allocation is
+ // un-done.
+ void UndoAllocationInNewSpace(Register object);
+
+ // Allocate a heap number in new space with undefined value. The
+ // register scratch2 can be passed as no_reg; the others must be
+ // valid registers. Returns tagged pointer in result register, or
+ // jumps to gc_required if new space is full.
+ void AllocateHeapNumber(Register result,
+ Register scratch1,
+ Register scratch2,
+ Label* gc_required);
+
+ // Allocate a sequential string. All the header fields of the string object
+ // are initialized.
+ void AllocateTwoByteString(Register result,
+ Register length,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ Label* gc_required);
+ void AllocateAsciiString(Register result,
+ Register length,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ Label* gc_required);
+ void AllocateAsciiString(Register result,
+ int length,
+ Register scratch1,
+ Register scratch2,
+ Label* gc_required);
+
+ // Allocate a raw cons string object. Only the map field of the result is
+ // initialized.
+ void AllocateTwoByteConsString(Register result,
+ Register scratch1,
+ Register scratch2,
+ Label* gc_required);
+ void AllocateAsciiConsString(Register result,
+ Register scratch1,
+ Register scratch2,
+ Label* gc_required);
+
+ // Allocate a raw sliced string object. Only the map field of the result is
+ // initialized.
+ void AllocateTwoByteSlicedString(Register result,
+ Register scratch1,
+ Register scratch2,
+ Label* gc_required);
+ void AllocateAsciiSlicedString(Register result,
+ Register scratch1,
+ Register scratch2,
+ Label* gc_required);
+
+ // Copy memory, byte-by-byte, from source to destination. Not optimized for
+ // long or aligned copies.
+ // The contents of index and scratch are destroyed.
+ void CopyBytes(Register source,
+ Register destination,
+ Register length,
+ Register scratch);
+
+ // Initialize fields with filler values. Fields starting at |start_offset|
+ // not including end_offset are overwritten with the value in |filler|. At
+ // the end the loop, |start_offset| takes the value of |end_offset|.
+ void InitializeFieldsWithFiller(Register start_offset,
+ Register end_offset,
+ Register filler);
+
+ // ---------------------------------------------------------------------------
+ // Support functions.
+
+ // Check a boolean-bit of a Smi field.
+ void BooleanBitTest(Register object, int field_offset, int bit_index);
+
+ // Check if result is zero and op is negative.
+ void NegativeZeroTest(Register result, Register op, Label* then_label);
+
+ // Check if result is zero and any of op1 and op2 are negative.
+ // Register scratch is destroyed, and it must be different from op2.
+ void NegativeZeroTest(Register result, Register op1, Register op2,
+ Register scratch, Label* then_label);
+
+ // Try to get function prototype of a function and puts the value in
+ // the result register. Checks that the function really is a
+ // function and jumps to the miss label if the fast checks fail. The
+ // function register will be untouched; the other registers may be
+ // clobbered.
+ void TryGetFunctionPrototype(Register function,
+ Register result,
+ Register scratch,
+ Label* miss,
+ bool miss_on_bound_function = false);
+
+ // Picks out an array index from the hash field.
+ // Register use:
+ // hash - holds the index's hash. Clobbered.
+ // index - holds the overwritten index on exit.
+ void IndexFromHash(Register hash, Register index);
+
+ // ---------------------------------------------------------------------------
+ // Runtime calls
+
+ // Call a code stub. Generate the code if necessary.
+ void CallStub(CodeStub* stub, TypeFeedbackId ast_id = TypeFeedbackId::None());
+
+ // Tail call a code stub (jump). Generate the code if necessary.
+ void TailCallStub(CodeStub* stub);
+
+ // Return from a code stub after popping its arguments.
+ void StubReturn(int argc);
+
+ // Call a runtime routine.
+ void CallRuntime(const Runtime::Function* f, int num_arguments);
+ // Convenience function: Same as above, but takes the fid instead.
+ void CallRuntime(Runtime::FunctionId id) {
+ const Runtime::Function* function = Runtime::FunctionForId(id);
+ CallRuntime(function, function->nargs);
+ }
+ void CallRuntime(Runtime::FunctionId id, int num_arguments) {
+ CallRuntime(Runtime::FunctionForId(id), num_arguments);
+ }
+
+ // Convenience function: call an external reference.
+ void CallExternalReference(ExternalReference ref, int num_arguments);
+
+ // Tail call of a runtime routine (jump).
+ // Like JumpToExternalReference, but also takes care of passing the number
+ // of parameters.
+ void TailCallExternalReference(const ExternalReference& ext,
+ int num_arguments,
+ int result_size);
+
+ // Convenience function: tail call a runtime routine (jump).
+ void TailCallRuntime(Runtime::FunctionId fid,
+ int num_arguments,
+ int result_size);
+
+ // Before calling a C-function from generated code, align arguments on stack.
+ // After aligning the frame, arguments must be stored in esp[0], esp[4],
+ // etc., not pushed. The argument count assumes all arguments are word sized.
+ // Some compilers/platforms require the stack to be aligned when calling
+ // C++ code.
+ // Needs a scratch register to do some arithmetic. This register will be
+ // trashed.
+ void PrepareCallCFunction(int num_arguments, Register scratch);
+
+ // Calls a C function and cleans up the space for arguments allocated
+ // by PrepareCallCFunction. The called function is not allowed to trigger a
+ // garbage collection, since that might move the code and invalidate the
+ // return address (unless this is somehow accounted for by the called
+ // function).
+ void CallCFunction(ExternalReference function, int num_arguments);
+ void CallCFunction(Register function, int num_arguments);
+
+ // Prepares stack to put arguments (aligns and so on). Reserves
+ // space for return value if needed (assumes the return value is a handle).
+ // Arguments must be stored in ApiParameterOperand(0), ApiParameterOperand(1)
+ // etc. Saves context (esi). If space was reserved for return value then
+ // stores the pointer to the reserved slot into esi.
+ void PrepareCallApiFunction(int argc);
+
+ // Calls an API function. Allocates HandleScope, extracts returned value
+ // from handle and propagates exceptions. Clobbers ebx, edi and
+ // caller-save registers. Restores context. On return removes
+ // stack_space * kPointerSize (GCed).
+ void CallApiFunctionAndReturn(Register function_address,
+ ExternalReference thunk_ref,
+ Operand thunk_last_arg,
+ int stack_space,
+ Operand return_value_operand,
+ Operand* context_restore_operand);
+
+ // Jump to a runtime routine.
+ void JumpToExternalReference(const ExternalReference& ext);
+
+ // ---------------------------------------------------------------------------
+ // Utilities
+
+ void Ret();
+
+ // Return and drop arguments from stack, where the number of arguments
+ // may be bigger than 2^16 - 1. Requires a scratch register.
+ void Ret(int bytes_dropped, Register scratch);
+
+ // Emit code to discard a non-negative number of pointer-sized elements
+ // from the stack, clobbering only the esp register.
+ void Drop(int element_count);
+
+ void Call(Label* target) { call(target); }
+ void Push(Register src) { push(src); }
+ void Pop(Register dst) { pop(dst); }
+
+ // Emit call to the code we are currently generating.
+ void CallSelf() {
+ Handle<Code> self(reinterpret_cast<Code**>(CodeObject().location()));
+ call(self, RelocInfo::CODE_TARGET);
+ }
+
+ // Move if the registers are not identical.
+ void Move(Register target, Register source);
+
+ // Move a constant into a destination using the most efficient encoding.
+ void Move(Register dst, const Immediate& x);
+ void Move(const Operand& dst, const Immediate& x);
+
+ // Push a handle value.
+ void Push(Handle<Object> handle) { push(Immediate(handle)); }
+ void Push(Smi* smi) { Push(Handle<Smi>(smi, isolate())); }
+
+ Handle<Object> CodeObject() {
+ ASSERT(!code_object_.is_null());
+ return code_object_;
+ }
+
+ // Insert code to verify that the x87 stack has the specified depth (0-7)
+ void VerifyX87StackDepth(uint32_t depth);
+
+ // Emit code for a truncating division by a constant. The dividend register is
+ // unchanged, the result is in edx, and eax gets clobbered.
+ void TruncatingDiv(Register dividend, int32_t divisor);
+
+ // ---------------------------------------------------------------------------
+ // StatsCounter support
+
+ void SetCounter(StatsCounter* counter, int value);
+ void IncrementCounter(StatsCounter* counter, int value);
+ void DecrementCounter(StatsCounter* counter, int value);
+ void IncrementCounter(Condition cc, StatsCounter* counter, int value);
+ void DecrementCounter(Condition cc, StatsCounter* counter, int value);
+
+
+ // ---------------------------------------------------------------------------
+ // Debugging
+
+ // Calls Abort(msg) if the condition cc is not satisfied.
+ // Use --debug_code to enable.
+ void Assert(Condition cc, BailoutReason reason);
+
+ void AssertFastElements(Register elements);
+
+ // Like Assert(), but always enabled.
+ void Check(Condition cc, BailoutReason reason);
+
+ // Print a message to stdout and abort execution.
+ void Abort(BailoutReason reason);
+
+ // Check that the stack is aligned.
+ void CheckStackAlignment();
+
+ // Verify restrictions about code generated in stubs.
+ void set_generating_stub(bool value) { generating_stub_ = value; }
+ bool generating_stub() { return generating_stub_; }
+ void set_has_frame(bool value) { has_frame_ = value; }
+ bool has_frame() { return has_frame_; }
+ inline bool AllowThisStubCall(CodeStub* stub);
+
+ // ---------------------------------------------------------------------------
+ // String utilities.
+
+ // Generate code to do a lookup in the number string cache. If the number in
+ // the register object is found in the cache the generated code falls through
+ // with the result in the result register. The object and the result register
+ // can be the same. If the number is not found in the cache the code jumps to
+ // the label not_found with only the content of register object unchanged.
+ void LookupNumberStringCache(Register object,
+ Register result,
+ Register scratch1,
+ Register scratch2,
+ Label* not_found);
+
+ // Check whether the instance type represents a flat ASCII string. Jump to the
+ // label if not. If the instance type can be scratched specify same register
+ // for both instance type and scratch.
+ void JumpIfInstanceTypeIsNotSequentialAscii(Register instance_type,
+ Register scratch,
+ Label* on_not_flat_ascii_string);
+
+ // Checks if both objects are sequential ASCII strings, and jumps to label
+ // if either is not.
+ void JumpIfNotBothSequentialAsciiStrings(Register object1,
+ Register object2,
+ Register scratch1,
+ Register scratch2,
+ Label* on_not_flat_ascii_strings);
+
+ // Checks if the given register or operand is a unique name
+ void JumpIfNotUniqueName(Register reg, Label* not_unique_name,
+ Label::Distance distance = Label::kFar) {
+ JumpIfNotUniqueName(Operand(reg), not_unique_name, distance);
+ }
+
+ void JumpIfNotUniqueName(Operand operand, Label* not_unique_name,
+ Label::Distance distance = Label::kFar);
+
+ void EmitSeqStringSetCharCheck(Register string,
+ Register index,
+ Register value,
+ uint32_t encoding_mask);
+
+ static int SafepointRegisterStackIndex(Register reg) {
+ return SafepointRegisterStackIndex(reg.code());
+ }
+
+ // Activation support.
+ void EnterFrame(StackFrame::Type type);
+ void LeaveFrame(StackFrame::Type type);
+
+ // Expects object in eax and returns map with validated enum cache
+ // in eax. Assumes that any other register can be used as a scratch.
+ void CheckEnumCache(Label* call_runtime);
+
+ // AllocationMemento support. Arrays may have an associated
+ // AllocationMemento object that can be checked for in order to pretransition
+ // to another type.
+ // On entry, receiver_reg should point to the array object.
+ // scratch_reg gets clobbered.
+ // If allocation info is present, conditional code is set to equal.
+ void TestJSArrayForAllocationMemento(Register receiver_reg,
+ Register scratch_reg,
+ Label* no_memento_found);
+
+ void JumpIfJSArrayHasAllocationMemento(Register receiver_reg,
+ Register scratch_reg,
+ Label* memento_found) {
+ Label no_memento_found;
+ TestJSArrayForAllocationMemento(receiver_reg, scratch_reg,
+ &no_memento_found);
+ j(equal, memento_found);
+ bind(&no_memento_found);
+ }
+
+ // Jumps to found label if a prototype map has dictionary elements.
+ void JumpIfDictionaryInPrototypeChain(Register object, Register scratch0,
+ Register scratch1, Label* found);
+
+ private:
+ bool generating_stub_;
+ bool has_frame_;
+ // This handle will be patched with the code object on installation.
+ Handle<Object> code_object_;
+
+ // Helper functions for generating invokes.
+ void InvokePrologue(const ParameterCount& expected,
+ const ParameterCount& actual,
+ Handle<Code> code_constant,
+ const Operand& code_operand,
+ Label* done,
+ bool* definitely_mismatches,
+ InvokeFlag flag,
+ Label::Distance done_distance,
+ const CallWrapper& call_wrapper = NullCallWrapper());
+
+ void EnterExitFramePrologue();
+ void EnterExitFrameEpilogue(int argc);
+
+ void LeaveExitFrameEpilogue(bool restore_context);
+
+ // Allocation support helpers.
+ void LoadAllocationTopHelper(Register result,
+ Register scratch,
+ AllocationFlags flags);
+
+ void UpdateAllocationTopHelper(Register result_end,
+ Register scratch,
+ AllocationFlags flags);
+
+ // Helper for implementing JumpIfNotInNewSpace and JumpIfInNewSpace.
+ void InNewSpace(Register object,
+ Register scratch,
+ Condition cc,
+ Label* condition_met,
+ Label::Distance condition_met_distance = Label::kFar);
+
+ // Helper for finding the mark bits for an address. Afterwards, the
+ // bitmap register points at the word with the mark bits and the mask
+ // the position of the first bit. Uses ecx as scratch and leaves addr_reg
+ // unchanged.
+ inline void GetMarkBits(Register addr_reg,
+ Register bitmap_reg,
+ Register mask_reg);
+
+ // Helper for throwing exceptions. Compute a handler address and jump to
+ // it. See the implementation for register usage.
+ void JumpToHandlerEntry();
+
+ // Compute memory operands for safepoint stack slots.
+ Operand SafepointRegisterSlot(Register reg);
+ static int SafepointRegisterStackIndex(int reg_code);
+
+ // Needs access to SafepointRegisterStackIndex for compiled frame
+ // traversal.
+ friend class StandardFrame;
+};
+
+
+// The code patcher is used to patch (typically) small parts of code e.g. for
+// debugging and other types of instrumentation. When using the code patcher
+// the exact number of bytes specified must be emitted. Is not legal to emit
+// relocation information. If any of these constraints are violated it causes
+// an assertion.
+class CodePatcher {
+ public:
+ CodePatcher(byte* address, int size);
+ virtual ~CodePatcher();
+
+ // Macro assembler to emit code.
+ MacroAssembler* masm() { return &masm_; }
+
+ private:
+ byte* address_; // The address of the code being patched.
+ int size_; // Number of bytes of the expected patch size.
+ MacroAssembler masm_; // Macro assembler used to generate the code.
+};
+
+
+// -----------------------------------------------------------------------------
+// Static helper functions.
+
+// Generate an Operand for loading a field from an object.
+inline Operand FieldOperand(Register object, int offset) {
+ return Operand(object, offset - kHeapObjectTag);
+}
+
+
+// Generate an Operand for loading an indexed field from an object.
+inline Operand FieldOperand(Register object,
+ Register index,
+ ScaleFactor scale,
+ int offset) {
+ return Operand(object, index, scale, offset - kHeapObjectTag);
+}
+
+
+inline Operand FixedArrayElementOperand(Register array,
+ Register index_as_smi,
+ int additional_offset = 0) {
+ int offset = FixedArray::kHeaderSize + additional_offset * kPointerSize;
+ return FieldOperand(array, index_as_smi, times_half_pointer_size, offset);
+}
+
+
+inline Operand ContextOperand(Register context, int index) {
+ return Operand(context, Context::SlotOffset(index));
+}
+
+
+inline Operand GlobalObjectOperand() {
+ return ContextOperand(esi, Context::GLOBAL_OBJECT_INDEX);
+}
+
+
+// Generates an Operand for saving parameters after PrepareCallApiFunction.
+Operand ApiParameterOperand(int index);
+
+
+#ifdef GENERATED_CODE_COVERAGE
+extern void LogGeneratedCodeCoverage(const char* file_line);
+#define CODE_COVERAGE_STRINGIFY(x) #x
+#define CODE_COVERAGE_TOSTRING(x) CODE_COVERAGE_STRINGIFY(x)
+#define __FILE_LINE__ __FILE__ ":" CODE_COVERAGE_TOSTRING(__LINE__)
+#define ACCESS_MASM(masm) { \
+ byte* ia32_coverage_function = \
+ reinterpret_cast<byte*>(FUNCTION_ADDR(LogGeneratedCodeCoverage)); \
+ masm->pushfd(); \
+ masm->pushad(); \
+ masm->push(Immediate(reinterpret_cast<int>(&__FILE_LINE__))); \
+ masm->call(ia32_coverage_function, RelocInfo::RUNTIME_ENTRY); \
+ masm->pop(eax); \
+ masm->popad(); \
+ masm->popfd(); \
+ } \
+ masm->
+#else
+#define ACCESS_MASM(masm) masm->
+#endif
+
+
+} } // namespace v8::internal
+
+#endif // V8_X87_MACRO_ASSEMBLER_X87_H_
diff --git a/chromium/v8/src/x87/regexp-macro-assembler-x87.cc b/chromium/v8/src/x87/regexp-macro-assembler-x87.cc
new file mode 100644
index 00000000000..c5ea9815703
--- /dev/null
+++ b/chromium/v8/src/x87/regexp-macro-assembler-x87.cc
@@ -0,0 +1,1309 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#if V8_TARGET_ARCH_X87
+
+#include "src/cpu-profiler.h"
+#include "src/unicode.h"
+#include "src/log.h"
+#include "src/regexp-stack.h"
+#include "src/macro-assembler.h"
+#include "src/regexp-macro-assembler.h"
+#include "src/x87/regexp-macro-assembler-x87.h"
+
+namespace v8 {
+namespace internal {
+
+#ifndef V8_INTERPRETED_REGEXP
+/*
+ * This assembler uses the following register assignment convention
+ * - edx : Current character. Must be loaded using LoadCurrentCharacter
+ * before using any of the dispatch methods. Temporarily stores the
+ * index of capture start after a matching pass for a global regexp.
+ * - edi : Current position in input, as negative offset from end of string.
+ * Please notice that this is the byte offset, not the character offset!
+ * - esi : end of input (points to byte after last character in input).
+ * - ebp : Frame pointer. Used to access arguments, local variables and
+ * RegExp registers.
+ * - esp : Points to tip of C stack.
+ * - ecx : Points to tip of backtrack stack
+ *
+ * The registers eax and ebx are free to use for computations.
+ *
+ * Each call to a public method should retain this convention.
+ * The stack will have the following structure:
+ * - Isolate* isolate (address of the current isolate)
+ * - direct_call (if 1, direct call from JavaScript code, if 0
+ * call through the runtime system)
+ * - stack_area_base (high end of the memory area to use as
+ * backtracking stack)
+ * - capture array size (may fit multiple sets of matches)
+ * - int* capture_array (int[num_saved_registers_], for output).
+ * - end of input (address of end of string)
+ * - start of input (address of first character in string)
+ * - start index (character index of start)
+ * - String* input_string (location of a handle containing the string)
+ * --- frame alignment (if applicable) ---
+ * - return address
+ * ebp-> - old ebp
+ * - backup of caller esi
+ * - backup of caller edi
+ * - backup of caller ebx
+ * - success counter (only for global regexps to count matches).
+ * - Offset of location before start of input (effectively character
+ * position -1). Used to initialize capture registers to a non-position.
+ * - register 0 ebp[-4] (only positions must be stored in the first
+ * - register 1 ebp[-8] num_saved_registers_ registers)
+ * - ...
+ *
+ * The first num_saved_registers_ registers are initialized to point to
+ * "character -1" in the string (i.e., char_size() bytes before the first
+ * character of the string). The remaining registers starts out as garbage.
+ *
+ * The data up to the return address must be placed there by the calling
+ * code, by calling the code entry as cast to a function with the signature:
+ * int (*match)(String* input_string,
+ * int start_index,
+ * Address start,
+ * Address end,
+ * int* capture_output_array,
+ * bool at_start,
+ * byte* stack_area_base,
+ * bool direct_call)
+ */
+
+#define __ ACCESS_MASM(masm_)
+
+RegExpMacroAssemblerX87::RegExpMacroAssemblerX87(
+ Mode mode,
+ int registers_to_save,
+ Zone* zone)
+ : NativeRegExpMacroAssembler(zone),
+ masm_(new MacroAssembler(zone->isolate(), NULL, kRegExpCodeSize)),
+ mode_(mode),
+ num_registers_(registers_to_save),
+ num_saved_registers_(registers_to_save),
+ entry_label_(),
+ start_label_(),
+ success_label_(),
+ backtrack_label_(),
+ exit_label_() {
+ ASSERT_EQ(0, registers_to_save % 2);
+ __ jmp(&entry_label_); // We'll write the entry code later.
+ __ bind(&start_label_); // And then continue from here.
+}
+
+
+RegExpMacroAssemblerX87::~RegExpMacroAssemblerX87() {
+ delete masm_;
+ // Unuse labels in case we throw away the assembler without calling GetCode.
+ entry_label_.Unuse();
+ start_label_.Unuse();
+ success_label_.Unuse();
+ backtrack_label_.Unuse();
+ exit_label_.Unuse();
+ check_preempt_label_.Unuse();
+ stack_overflow_label_.Unuse();
+}
+
+
+int RegExpMacroAssemblerX87::stack_limit_slack() {
+ return RegExpStack::kStackLimitSlack;
+}
+
+
+void RegExpMacroAssemblerX87::AdvanceCurrentPosition(int by) {
+ if (by != 0) {
+ __ add(edi, Immediate(by * char_size()));
+ }
+}
+
+
+void RegExpMacroAssemblerX87::AdvanceRegister(int reg, int by) {
+ ASSERT(reg >= 0);
+ ASSERT(reg < num_registers_);
+ if (by != 0) {
+ __ add(register_location(reg), Immediate(by));
+ }
+}
+
+
+void RegExpMacroAssemblerX87::Backtrack() {
+ CheckPreemption();
+ // Pop Code* offset from backtrack stack, add Code* and jump to location.
+ Pop(ebx);
+ __ add(ebx, Immediate(masm_->CodeObject()));
+ __ jmp(ebx);
+}
+
+
+void RegExpMacroAssemblerX87::Bind(Label* label) {
+ __ bind(label);
+}
+
+
+void RegExpMacroAssemblerX87::CheckCharacter(uint32_t c, Label* on_equal) {
+ __ cmp(current_character(), c);
+ BranchOrBacktrack(equal, on_equal);
+}
+
+
+void RegExpMacroAssemblerX87::CheckCharacterGT(uc16 limit, Label* on_greater) {
+ __ cmp(current_character(), limit);
+ BranchOrBacktrack(greater, on_greater);
+}
+
+
+void RegExpMacroAssemblerX87::CheckAtStart(Label* on_at_start) {
+ Label not_at_start;
+ // Did we start the match at the start of the string at all?
+ __ cmp(Operand(ebp, kStartIndex), Immediate(0));
+ BranchOrBacktrack(not_equal, &not_at_start);
+ // If we did, are we still at the start of the input?
+ __ lea(eax, Operand(esi, edi, times_1, 0));
+ __ cmp(eax, Operand(ebp, kInputStart));
+ BranchOrBacktrack(equal, on_at_start);
+ __ bind(&not_at_start);
+}
+
+
+void RegExpMacroAssemblerX87::CheckNotAtStart(Label* on_not_at_start) {
+ // Did we start the match at the start of the string at all?
+ __ cmp(Operand(ebp, kStartIndex), Immediate(0));
+ BranchOrBacktrack(not_equal, on_not_at_start);
+ // If we did, are we still at the start of the input?
+ __ lea(eax, Operand(esi, edi, times_1, 0));
+ __ cmp(eax, Operand(ebp, kInputStart));
+ BranchOrBacktrack(not_equal, on_not_at_start);
+}
+
+
+void RegExpMacroAssemblerX87::CheckCharacterLT(uc16 limit, Label* on_less) {
+ __ cmp(current_character(), limit);
+ BranchOrBacktrack(less, on_less);
+}
+
+
+void RegExpMacroAssemblerX87::CheckGreedyLoop(Label* on_equal) {
+ Label fallthrough;
+ __ cmp(edi, Operand(backtrack_stackpointer(), 0));
+ __ j(not_equal, &fallthrough);
+ __ add(backtrack_stackpointer(), Immediate(kPointerSize)); // Pop.
+ BranchOrBacktrack(no_condition, on_equal);
+ __ bind(&fallthrough);
+}
+
+
+void RegExpMacroAssemblerX87::CheckNotBackReferenceIgnoreCase(
+ int start_reg,
+ Label* on_no_match) {
+ Label fallthrough;
+ __ mov(edx, register_location(start_reg)); // Index of start of capture
+ __ mov(ebx, register_location(start_reg + 1)); // Index of end of capture
+ __ sub(ebx, edx); // Length of capture.
+
+ // The length of a capture should not be negative. This can only happen
+ // if the end of the capture is unrecorded, or at a point earlier than
+ // the start of the capture.
+ BranchOrBacktrack(less, on_no_match);
+
+ // If length is zero, either the capture is empty or it is completely
+ // uncaptured. In either case succeed immediately.
+ __ j(equal, &fallthrough);
+
+ // Check that there are sufficient characters left in the input.
+ __ mov(eax, edi);
+ __ add(eax, ebx);
+ BranchOrBacktrack(greater, on_no_match);
+
+ if (mode_ == ASCII) {
+ Label success;
+ Label fail;
+ Label loop_increment;
+ // Save register contents to make the registers available below.
+ __ push(edi);
+ __ push(backtrack_stackpointer());
+ // After this, the eax, ecx, and edi registers are available.
+
+ __ add(edx, esi); // Start of capture
+ __ add(edi, esi); // Start of text to match against capture.
+ __ add(ebx, edi); // End of text to match against capture.
+
+ Label loop;
+ __ bind(&loop);
+ __ movzx_b(eax, Operand(edi, 0));
+ __ cmpb_al(Operand(edx, 0));
+ __ j(equal, &loop_increment);
+
+ // Mismatch, try case-insensitive match (converting letters to lower-case).
+ __ or_(eax, 0x20); // Convert match character to lower-case.
+ __ lea(ecx, Operand(eax, -'a'));
+ __ cmp(ecx, static_cast<int32_t>('z' - 'a')); // Is eax a lowercase letter?
+ Label convert_capture;
+ __ j(below_equal, &convert_capture); // In range 'a'-'z'.
+ // Latin-1: Check for values in range [224,254] but not 247.
+ __ sub(ecx, Immediate(224 - 'a'));
+ __ cmp(ecx, Immediate(254 - 224));
+ __ j(above, &fail); // Weren't Latin-1 letters.
+ __ cmp(ecx, Immediate(247 - 224)); // Check for 247.
+ __ j(equal, &fail);
+ __ bind(&convert_capture);
+ // Also convert capture character.
+ __ movzx_b(ecx, Operand(edx, 0));
+ __ or_(ecx, 0x20);
+
+ __ cmp(eax, ecx);
+ __ j(not_equal, &fail);
+
+ __ bind(&loop_increment);
+ // Increment pointers into match and capture strings.
+ __ add(edx, Immediate(1));
+ __ add(edi, Immediate(1));
+ // Compare to end of match, and loop if not done.
+ __ cmp(edi, ebx);
+ __ j(below, &loop);
+ __ jmp(&success);
+
+ __ bind(&fail);
+ // Restore original values before failing.
+ __ pop(backtrack_stackpointer());
+ __ pop(edi);
+ BranchOrBacktrack(no_condition, on_no_match);
+
+ __ bind(&success);
+ // Restore original value before continuing.
+ __ pop(backtrack_stackpointer());
+ // Drop original value of character position.
+ __ add(esp, Immediate(kPointerSize));
+ // Compute new value of character position after the matched part.
+ __ sub(edi, esi);
+ } else {
+ ASSERT(mode_ == UC16);
+ // Save registers before calling C function.
+ __ push(esi);
+ __ push(edi);
+ __ push(backtrack_stackpointer());
+ __ push(ebx);
+
+ static const int argument_count = 4;
+ __ PrepareCallCFunction(argument_count, ecx);
+ // Put arguments into allocated stack area, last argument highest on stack.
+ // Parameters are
+ // Address byte_offset1 - Address captured substring's start.
+ // Address byte_offset2 - Address of current character position.
+ // size_t byte_length - length of capture in bytes(!)
+ // Isolate* isolate
+
+ // Set isolate.
+ __ mov(Operand(esp, 3 * kPointerSize),
+ Immediate(ExternalReference::isolate_address(isolate())));
+ // Set byte_length.
+ __ mov(Operand(esp, 2 * kPointerSize), ebx);
+ // Set byte_offset2.
+ // Found by adding negative string-end offset of current position (edi)
+ // to end of string.
+ __ add(edi, esi);
+ __ mov(Operand(esp, 1 * kPointerSize), edi);
+ // Set byte_offset1.
+ // Start of capture, where edx already holds string-end negative offset.
+ __ add(edx, esi);
+ __ mov(Operand(esp, 0 * kPointerSize), edx);
+
+ {
+ AllowExternalCallThatCantCauseGC scope(masm_);
+ ExternalReference compare =
+ ExternalReference::re_case_insensitive_compare_uc16(isolate());
+ __ CallCFunction(compare, argument_count);
+ }
+ // Pop original values before reacting on result value.
+ __ pop(ebx);
+ __ pop(backtrack_stackpointer());
+ __ pop(edi);
+ __ pop(esi);
+
+ // Check if function returned non-zero for success or zero for failure.
+ __ or_(eax, eax);
+ BranchOrBacktrack(zero, on_no_match);
+ // On success, increment position by length of capture.
+ __ add(edi, ebx);
+ }
+ __ bind(&fallthrough);
+}
+
+
+void RegExpMacroAssemblerX87::CheckNotBackReference(
+ int start_reg,
+ Label* on_no_match) {
+ Label fallthrough;
+ Label success;
+ Label fail;
+
+ // Find length of back-referenced capture.
+ __ mov(edx, register_location(start_reg));
+ __ mov(eax, register_location(start_reg + 1));
+ __ sub(eax, edx); // Length to check.
+ // Fail on partial or illegal capture (start of capture after end of capture).
+ BranchOrBacktrack(less, on_no_match);
+ // Succeed on empty capture (including no capture)
+ __ j(equal, &fallthrough);
+
+ // Check that there are sufficient characters left in the input.
+ __ mov(ebx, edi);
+ __ add(ebx, eax);
+ BranchOrBacktrack(greater, on_no_match);
+
+ // Save register to make it available below.
+ __ push(backtrack_stackpointer());
+
+ // Compute pointers to match string and capture string
+ __ lea(ebx, Operand(esi, edi, times_1, 0)); // Start of match.
+ __ add(edx, esi); // Start of capture.
+ __ lea(ecx, Operand(eax, ebx, times_1, 0)); // End of match
+
+ Label loop;
+ __ bind(&loop);
+ if (mode_ == ASCII) {
+ __ movzx_b(eax, Operand(edx, 0));
+ __ cmpb_al(Operand(ebx, 0));
+ } else {
+ ASSERT(mode_ == UC16);
+ __ movzx_w(eax, Operand(edx, 0));
+ __ cmpw_ax(Operand(ebx, 0));
+ }
+ __ j(not_equal, &fail);
+ // Increment pointers into capture and match string.
+ __ add(edx, Immediate(char_size()));
+ __ add(ebx, Immediate(char_size()));
+ // Check if we have reached end of match area.
+ __ cmp(ebx, ecx);
+ __ j(below, &loop);
+ __ jmp(&success);
+
+ __ bind(&fail);
+ // Restore backtrack stackpointer.
+ __ pop(backtrack_stackpointer());
+ BranchOrBacktrack(no_condition, on_no_match);
+
+ __ bind(&success);
+ // Move current character position to position after match.
+ __ mov(edi, ecx);
+ __ sub(edi, esi);
+ // Restore backtrack stackpointer.
+ __ pop(backtrack_stackpointer());
+
+ __ bind(&fallthrough);
+}
+
+
+void RegExpMacroAssemblerX87::CheckNotCharacter(uint32_t c,
+ Label* on_not_equal) {
+ __ cmp(current_character(), c);
+ BranchOrBacktrack(not_equal, on_not_equal);
+}
+
+
+void RegExpMacroAssemblerX87::CheckCharacterAfterAnd(uint32_t c,
+ uint32_t mask,
+ Label* on_equal) {
+ if (c == 0) {
+ __ test(current_character(), Immediate(mask));
+ } else {
+ __ mov(eax, mask);
+ __ and_(eax, current_character());
+ __ cmp(eax, c);
+ }
+ BranchOrBacktrack(equal, on_equal);
+}
+
+
+void RegExpMacroAssemblerX87::CheckNotCharacterAfterAnd(uint32_t c,
+ uint32_t mask,
+ Label* on_not_equal) {
+ if (c == 0) {
+ __ test(current_character(), Immediate(mask));
+ } else {
+ __ mov(eax, mask);
+ __ and_(eax, current_character());
+ __ cmp(eax, c);
+ }
+ BranchOrBacktrack(not_equal, on_not_equal);
+}
+
+
+void RegExpMacroAssemblerX87::CheckNotCharacterAfterMinusAnd(
+ uc16 c,
+ uc16 minus,
+ uc16 mask,
+ Label* on_not_equal) {
+ ASSERT(minus < String::kMaxUtf16CodeUnit);
+ __ lea(eax, Operand(current_character(), -minus));
+ if (c == 0) {
+ __ test(eax, Immediate(mask));
+ } else {
+ __ and_(eax, mask);
+ __ cmp(eax, c);
+ }
+ BranchOrBacktrack(not_equal, on_not_equal);
+}
+
+
+void RegExpMacroAssemblerX87::CheckCharacterInRange(
+ uc16 from,
+ uc16 to,
+ Label* on_in_range) {
+ __ lea(eax, Operand(current_character(), -from));
+ __ cmp(eax, to - from);
+ BranchOrBacktrack(below_equal, on_in_range);
+}
+
+
+void RegExpMacroAssemblerX87::CheckCharacterNotInRange(
+ uc16 from,
+ uc16 to,
+ Label* on_not_in_range) {
+ __ lea(eax, Operand(current_character(), -from));
+ __ cmp(eax, to - from);
+ BranchOrBacktrack(above, on_not_in_range);
+}
+
+
+void RegExpMacroAssemblerX87::CheckBitInTable(
+ Handle<ByteArray> table,
+ Label* on_bit_set) {
+ __ mov(eax, Immediate(table));
+ Register index = current_character();
+ if (mode_ != ASCII || kTableMask != String::kMaxOneByteCharCode) {
+ __ mov(ebx, kTableSize - 1);
+ __ and_(ebx, current_character());
+ index = ebx;
+ }
+ __ cmpb(FieldOperand(eax, index, times_1, ByteArray::kHeaderSize), 0);
+ BranchOrBacktrack(not_equal, on_bit_set);
+}
+
+
+bool RegExpMacroAssemblerX87::CheckSpecialCharacterClass(uc16 type,
+ Label* on_no_match) {
+ // Range checks (c in min..max) are generally implemented by an unsigned
+ // (c - min) <= (max - min) check
+ switch (type) {
+ case 's':
+ // Match space-characters
+ if (mode_ == ASCII) {
+ // One byte space characters are '\t'..'\r', ' ' and \u00a0.
+ Label success;
+ __ cmp(current_character(), ' ');
+ __ j(equal, &success, Label::kNear);
+ // Check range 0x09..0x0d
+ __ lea(eax, Operand(current_character(), -'\t'));
+ __ cmp(eax, '\r' - '\t');
+ __ j(below_equal, &success, Label::kNear);
+ // \u00a0 (NBSP).
+ __ cmp(eax, 0x00a0 - '\t');
+ BranchOrBacktrack(not_equal, on_no_match);
+ __ bind(&success);
+ return true;
+ }
+ return false;
+ case 'S':
+ // The emitted code for generic character classes is good enough.
+ return false;
+ case 'd':
+ // Match ASCII digits ('0'..'9')
+ __ lea(eax, Operand(current_character(), -'0'));
+ __ cmp(eax, '9' - '0');
+ BranchOrBacktrack(above, on_no_match);
+ return true;
+ case 'D':
+ // Match non ASCII-digits
+ __ lea(eax, Operand(current_character(), -'0'));
+ __ cmp(eax, '9' - '0');
+ BranchOrBacktrack(below_equal, on_no_match);
+ return true;
+ case '.': {
+ // Match non-newlines (not 0x0a('\n'), 0x0d('\r'), 0x2028 and 0x2029)
+ __ mov(eax, current_character());
+ __ xor_(eax, Immediate(0x01));
+ // See if current character is '\n'^1 or '\r'^1, i.e., 0x0b or 0x0c
+ __ sub(eax, Immediate(0x0b));
+ __ cmp(eax, 0x0c - 0x0b);
+ BranchOrBacktrack(below_equal, on_no_match);
+ if (mode_ == UC16) {
+ // Compare original value to 0x2028 and 0x2029, using the already
+ // computed (current_char ^ 0x01 - 0x0b). I.e., check for
+ // 0x201d (0x2028 - 0x0b) or 0x201e.
+ __ sub(eax, Immediate(0x2028 - 0x0b));
+ __ cmp(eax, 0x2029 - 0x2028);
+ BranchOrBacktrack(below_equal, on_no_match);
+ }
+ return true;
+ }
+ case 'w': {
+ if (mode_ != ASCII) {
+ // Table is 128 entries, so all ASCII characters can be tested.
+ __ cmp(current_character(), Immediate('z'));
+ BranchOrBacktrack(above, on_no_match);
+ }
+ ASSERT_EQ(0, word_character_map[0]); // Character '\0' is not a word char.
+ ExternalReference word_map = ExternalReference::re_word_character_map();
+ __ test_b(current_character(),
+ Operand::StaticArray(current_character(), times_1, word_map));
+ BranchOrBacktrack(zero, on_no_match);
+ return true;
+ }
+ case 'W': {
+ Label done;
+ if (mode_ != ASCII) {
+ // Table is 128 entries, so all ASCII characters can be tested.
+ __ cmp(current_character(), Immediate('z'));
+ __ j(above, &done);
+ }
+ ASSERT_EQ(0, word_character_map[0]); // Character '\0' is not a word char.
+ ExternalReference word_map = ExternalReference::re_word_character_map();
+ __ test_b(current_character(),
+ Operand::StaticArray(current_character(), times_1, word_map));
+ BranchOrBacktrack(not_zero, on_no_match);
+ if (mode_ != ASCII) {
+ __ bind(&done);
+ }
+ return true;
+ }
+ // Non-standard classes (with no syntactic shorthand) used internally.
+ case '*':
+ // Match any character.
+ return true;
+ case 'n': {
+ // Match newlines (0x0a('\n'), 0x0d('\r'), 0x2028 or 0x2029).
+ // The opposite of '.'.
+ __ mov(eax, current_character());
+ __ xor_(eax, Immediate(0x01));
+ // See if current character is '\n'^1 or '\r'^1, i.e., 0x0b or 0x0c
+ __ sub(eax, Immediate(0x0b));
+ __ cmp(eax, 0x0c - 0x0b);
+ if (mode_ == ASCII) {
+ BranchOrBacktrack(above, on_no_match);
+ } else {
+ Label done;
+ BranchOrBacktrack(below_equal, &done);
+ ASSERT_EQ(UC16, mode_);
+ // Compare original value to 0x2028 and 0x2029, using the already
+ // computed (current_char ^ 0x01 - 0x0b). I.e., check for
+ // 0x201d (0x2028 - 0x0b) or 0x201e.
+ __ sub(eax, Immediate(0x2028 - 0x0b));
+ __ cmp(eax, 1);
+ BranchOrBacktrack(above, on_no_match);
+ __ bind(&done);
+ }
+ return true;
+ }
+ // No custom implementation (yet): s(UC16), S(UC16).
+ default:
+ return false;
+ }
+}
+
+
+void RegExpMacroAssemblerX87::Fail() {
+ STATIC_ASSERT(FAILURE == 0); // Return value for failure is zero.
+ if (!global()) {
+ __ Move(eax, Immediate(FAILURE));
+ }
+ __ jmp(&exit_label_);
+}
+
+
+Handle<HeapObject> RegExpMacroAssemblerX87::GetCode(Handle<String> source) {
+ Label return_eax;
+ // Finalize code - write the entry point code now we know how many
+ // registers we need.
+
+ // Entry code:
+ __ bind(&entry_label_);
+
+ // Tell the system that we have a stack frame. Because the type is MANUAL, no
+ // code is generated.
+ FrameScope scope(masm_, StackFrame::MANUAL);
+
+ // Actually emit code to start a new stack frame.
+ __ push(ebp);
+ __ mov(ebp, esp);
+ // Save callee-save registers. Order here should correspond to order of
+ // kBackup_ebx etc.
+ __ push(esi);
+ __ push(edi);
+ __ push(ebx); // Callee-save on MacOS.
+ __ push(Immediate(0)); // Number of successful matches in a global regexp.
+ __ push(Immediate(0)); // Make room for "input start - 1" constant.
+
+ // Check if we have space on the stack for registers.
+ Label stack_limit_hit;
+ Label stack_ok;
+
+ ExternalReference stack_limit =
+ ExternalReference::address_of_stack_limit(isolate());
+ __ mov(ecx, esp);
+ __ sub(ecx, Operand::StaticVariable(stack_limit));
+ // Handle it if the stack pointer is already below the stack limit.
+ __ j(below_equal, &stack_limit_hit);
+ // Check if there is room for the variable number of registers above
+ // the stack limit.
+ __ cmp(ecx, num_registers_ * kPointerSize);
+ __ j(above_equal, &stack_ok);
+ // Exit with OutOfMemory exception. There is not enough space on the stack
+ // for our working registers.
+ __ mov(eax, EXCEPTION);
+ __ jmp(&return_eax);
+
+ __ bind(&stack_limit_hit);
+ CallCheckStackGuardState(ebx);
+ __ or_(eax, eax);
+ // If returned value is non-zero, we exit with the returned value as result.
+ __ j(not_zero, &return_eax);
+
+ __ bind(&stack_ok);
+ // Load start index for later use.
+ __ mov(ebx, Operand(ebp, kStartIndex));
+
+ // Allocate space on stack for registers.
+ __ sub(esp, Immediate(num_registers_ * kPointerSize));
+ // Load string length.
+ __ mov(esi, Operand(ebp, kInputEnd));
+ // Load input position.
+ __ mov(edi, Operand(ebp, kInputStart));
+ // Set up edi to be negative offset from string end.
+ __ sub(edi, esi);
+
+ // Set eax to address of char before start of the string.
+ // (effectively string position -1).
+ __ neg(ebx);
+ if (mode_ == UC16) {
+ __ lea(eax, Operand(edi, ebx, times_2, -char_size()));
+ } else {
+ __ lea(eax, Operand(edi, ebx, times_1, -char_size()));
+ }
+ // Store this value in a local variable, for use when clearing
+ // position registers.
+ __ mov(Operand(ebp, kInputStartMinusOne), eax);
+
+#if V8_OS_WIN
+ // Ensure that we write to each stack page, in order. Skipping a page
+ // on Windows can cause segmentation faults. Assuming page size is 4k.
+ const int kPageSize = 4096;
+ const int kRegistersPerPage = kPageSize / kPointerSize;
+ for (int i = num_saved_registers_ + kRegistersPerPage - 1;
+ i < num_registers_;
+ i += kRegistersPerPage) {
+ __ mov(register_location(i), eax); // One write every page.
+ }
+#endif // V8_OS_WIN
+
+ Label load_char_start_regexp, start_regexp;
+ // Load newline if index is at start, previous character otherwise.
+ __ cmp(Operand(ebp, kStartIndex), Immediate(0));
+ __ j(not_equal, &load_char_start_regexp, Label::kNear);
+ __ mov(current_character(), '\n');
+ __ jmp(&start_regexp, Label::kNear);
+
+ // Global regexp restarts matching here.
+ __ bind(&load_char_start_regexp);
+ // Load previous char as initial value of current character register.
+ LoadCurrentCharacterUnchecked(-1, 1);
+ __ bind(&start_regexp);
+
+ // Initialize on-stack registers.
+ if (num_saved_registers_ > 0) { // Always is, if generated from a regexp.
+ // Fill saved registers with initial value = start offset - 1
+ // Fill in stack push order, to avoid accessing across an unwritten
+ // page (a problem on Windows).
+ if (num_saved_registers_ > 8) {
+ __ mov(ecx, kRegisterZero);
+ Label init_loop;
+ __ bind(&init_loop);
+ __ mov(Operand(ebp, ecx, times_1, 0), eax);
+ __ sub(ecx, Immediate(kPointerSize));
+ __ cmp(ecx, kRegisterZero - num_saved_registers_ * kPointerSize);
+ __ j(greater, &init_loop);
+ } else { // Unroll the loop.
+ for (int i = 0; i < num_saved_registers_; i++) {
+ __ mov(register_location(i), eax);
+ }
+ }
+ }
+
+ // Initialize backtrack stack pointer.
+ __ mov(backtrack_stackpointer(), Operand(ebp, kStackHighEnd));
+
+ __ jmp(&start_label_);
+
+ // Exit code:
+ if (success_label_.is_linked()) {
+ // Save captures when successful.
+ __ bind(&success_label_);
+ if (num_saved_registers_ > 0) {
+ // copy captures to output
+ __ mov(ebx, Operand(ebp, kRegisterOutput));
+ __ mov(ecx, Operand(ebp, kInputEnd));
+ __ mov(edx, Operand(ebp, kStartIndex));
+ __ sub(ecx, Operand(ebp, kInputStart));
+ if (mode_ == UC16) {
+ __ lea(ecx, Operand(ecx, edx, times_2, 0));
+ } else {
+ __ add(ecx, edx);
+ }
+ for (int i = 0; i < num_saved_registers_; i++) {
+ __ mov(eax, register_location(i));
+ if (i == 0 && global_with_zero_length_check()) {
+ // Keep capture start in edx for the zero-length check later.
+ __ mov(edx, eax);
+ }
+ // Convert to index from start of string, not end.
+ __ add(eax, ecx);
+ if (mode_ == UC16) {
+ __ sar(eax, 1); // Convert byte index to character index.
+ }
+ __ mov(Operand(ebx, i * kPointerSize), eax);
+ }
+ }
+
+ if (global()) {
+ // Restart matching if the regular expression is flagged as global.
+ // Increment success counter.
+ __ inc(Operand(ebp, kSuccessfulCaptures));
+ // Capture results have been stored, so the number of remaining global
+ // output registers is reduced by the number of stored captures.
+ __ mov(ecx, Operand(ebp, kNumOutputRegisters));
+ __ sub(ecx, Immediate(num_saved_registers_));
+ // Check whether we have enough room for another set of capture results.
+ __ cmp(ecx, Immediate(num_saved_registers_));
+ __ j(less, &exit_label_);
+
+ __ mov(Operand(ebp, kNumOutputRegisters), ecx);
+ // Advance the location for output.
+ __ add(Operand(ebp, kRegisterOutput),
+ Immediate(num_saved_registers_ * kPointerSize));
+
+ // Prepare eax to initialize registers with its value in the next run.
+ __ mov(eax, Operand(ebp, kInputStartMinusOne));
+
+ if (global_with_zero_length_check()) {
+ // Special case for zero-length matches.
+ // edx: capture start index
+ __ cmp(edi, edx);
+ // Not a zero-length match, restart.
+ __ j(not_equal, &load_char_start_regexp);
+ // edi (offset from the end) is zero if we already reached the end.
+ __ test(edi, edi);
+ __ j(zero, &exit_label_, Label::kNear);
+ // Advance current position after a zero-length match.
+ if (mode_ == UC16) {
+ __ add(edi, Immediate(2));
+ } else {
+ __ inc(edi);
+ }
+ }
+
+ __ jmp(&load_char_start_regexp);
+ } else {
+ __ mov(eax, Immediate(SUCCESS));
+ }
+ }
+
+ __ bind(&exit_label_);
+ if (global()) {
+ // Return the number of successful captures.
+ __ mov(eax, Operand(ebp, kSuccessfulCaptures));
+ }
+
+ __ bind(&return_eax);
+ // Skip esp past regexp registers.
+ __ lea(esp, Operand(ebp, kBackup_ebx));
+ // Restore callee-save registers.
+ __ pop(ebx);
+ __ pop(edi);
+ __ pop(esi);
+ // Exit function frame, restore previous one.
+ __ pop(ebp);
+ __ ret(0);
+
+ // Backtrack code (branch target for conditional backtracks).
+ if (backtrack_label_.is_linked()) {
+ __ bind(&backtrack_label_);
+ Backtrack();
+ }
+
+ Label exit_with_exception;
+
+ // Preempt-code
+ if (check_preempt_label_.is_linked()) {
+ SafeCallTarget(&check_preempt_label_);
+
+ __ push(backtrack_stackpointer());
+ __ push(edi);
+
+ CallCheckStackGuardState(ebx);
+ __ or_(eax, eax);
+ // If returning non-zero, we should end execution with the given
+ // result as return value.
+ __ j(not_zero, &return_eax);
+
+ __ pop(edi);
+ __ pop(backtrack_stackpointer());
+ // String might have moved: Reload esi from frame.
+ __ mov(esi, Operand(ebp, kInputEnd));
+ SafeReturn();
+ }
+
+ // Backtrack stack overflow code.
+ if (stack_overflow_label_.is_linked()) {
+ SafeCallTarget(&stack_overflow_label_);
+ // Reached if the backtrack-stack limit has been hit.
+
+ Label grow_failed;
+ // Save registers before calling C function
+ __ push(esi);
+ __ push(edi);
+
+ // Call GrowStack(backtrack_stackpointer())
+ static const int num_arguments = 3;
+ __ PrepareCallCFunction(num_arguments, ebx);
+ __ mov(Operand(esp, 2 * kPointerSize),
+ Immediate(ExternalReference::isolate_address(isolate())));
+ __ lea(eax, Operand(ebp, kStackHighEnd));
+ __ mov(Operand(esp, 1 * kPointerSize), eax);
+ __ mov(Operand(esp, 0 * kPointerSize), backtrack_stackpointer());
+ ExternalReference grow_stack =
+ ExternalReference::re_grow_stack(isolate());
+ __ CallCFunction(grow_stack, num_arguments);
+ // If return NULL, we have failed to grow the stack, and
+ // must exit with a stack-overflow exception.
+ __ or_(eax, eax);
+ __ j(equal, &exit_with_exception);
+ // Otherwise use return value as new stack pointer.
+ __ mov(backtrack_stackpointer(), eax);
+ // Restore saved registers and continue.
+ __ pop(edi);
+ __ pop(esi);
+ SafeReturn();
+ }
+
+ if (exit_with_exception.is_linked()) {
+ // If any of the code above needed to exit with an exception.
+ __ bind(&exit_with_exception);
+ // Exit with Result EXCEPTION(-1) to signal thrown exception.
+ __ mov(eax, EXCEPTION);
+ __ jmp(&return_eax);
+ }
+
+ CodeDesc code_desc;
+ masm_->GetCode(&code_desc);
+ Handle<Code> code =
+ isolate()->factory()->NewCode(code_desc,
+ Code::ComputeFlags(Code::REGEXP),
+ masm_->CodeObject());
+ PROFILE(isolate(), RegExpCodeCreateEvent(*code, *source));
+ return Handle<HeapObject>::cast(code);
+}
+
+
+void RegExpMacroAssemblerX87::GoTo(Label* to) {
+ BranchOrBacktrack(no_condition, to);
+}
+
+
+void RegExpMacroAssemblerX87::IfRegisterGE(int reg,
+ int comparand,
+ Label* if_ge) {
+ __ cmp(register_location(reg), Immediate(comparand));
+ BranchOrBacktrack(greater_equal, if_ge);
+}
+
+
+void RegExpMacroAssemblerX87::IfRegisterLT(int reg,
+ int comparand,
+ Label* if_lt) {
+ __ cmp(register_location(reg), Immediate(comparand));
+ BranchOrBacktrack(less, if_lt);
+}
+
+
+void RegExpMacroAssemblerX87::IfRegisterEqPos(int reg,
+ Label* if_eq) {
+ __ cmp(edi, register_location(reg));
+ BranchOrBacktrack(equal, if_eq);
+}
+
+
+RegExpMacroAssembler::IrregexpImplementation
+ RegExpMacroAssemblerX87::Implementation() {
+ return kX87Implementation;
+}
+
+
+void RegExpMacroAssemblerX87::LoadCurrentCharacter(int cp_offset,
+ Label* on_end_of_input,
+ bool check_bounds,
+ int characters) {
+ ASSERT(cp_offset >= -1); // ^ and \b can look behind one character.
+ ASSERT(cp_offset < (1<<30)); // Be sane! (And ensure negation works)
+ if (check_bounds) {
+ CheckPosition(cp_offset + characters - 1, on_end_of_input);
+ }
+ LoadCurrentCharacterUnchecked(cp_offset, characters);
+}
+
+
+void RegExpMacroAssemblerX87::PopCurrentPosition() {
+ Pop(edi);
+}
+
+
+void RegExpMacroAssemblerX87::PopRegister(int register_index) {
+ Pop(eax);
+ __ mov(register_location(register_index), eax);
+}
+
+
+void RegExpMacroAssemblerX87::PushBacktrack(Label* label) {
+ Push(Immediate::CodeRelativeOffset(label));
+ CheckStackLimit();
+}
+
+
+void RegExpMacroAssemblerX87::PushCurrentPosition() {
+ Push(edi);
+}
+
+
+void RegExpMacroAssemblerX87::PushRegister(int register_index,
+ StackCheckFlag check_stack_limit) {
+ __ mov(eax, register_location(register_index));
+ Push(eax);
+ if (check_stack_limit) CheckStackLimit();
+}
+
+
+void RegExpMacroAssemblerX87::ReadCurrentPositionFromRegister(int reg) {
+ __ mov(edi, register_location(reg));
+}
+
+
+void RegExpMacroAssemblerX87::ReadStackPointerFromRegister(int reg) {
+ __ mov(backtrack_stackpointer(), register_location(reg));
+ __ add(backtrack_stackpointer(), Operand(ebp, kStackHighEnd));
+}
+
+void RegExpMacroAssemblerX87::SetCurrentPositionFromEnd(int by) {
+ Label after_position;
+ __ cmp(edi, -by * char_size());
+ __ j(greater_equal, &after_position, Label::kNear);
+ __ mov(edi, -by * char_size());
+ // On RegExp code entry (where this operation is used), the character before
+ // the current position is expected to be already loaded.
+ // We have advanced the position, so it's safe to read backwards.
+ LoadCurrentCharacterUnchecked(-1, 1);
+ __ bind(&after_position);
+}
+
+
+void RegExpMacroAssemblerX87::SetRegister(int register_index, int to) {
+ ASSERT(register_index >= num_saved_registers_); // Reserved for positions!
+ __ mov(register_location(register_index), Immediate(to));
+}
+
+
+bool RegExpMacroAssemblerX87::Succeed() {
+ __ jmp(&success_label_);
+ return global();
+}
+
+
+void RegExpMacroAssemblerX87::WriteCurrentPositionToRegister(int reg,
+ int cp_offset) {
+ if (cp_offset == 0) {
+ __ mov(register_location(reg), edi);
+ } else {
+ __ lea(eax, Operand(edi, cp_offset * char_size()));
+ __ mov(register_location(reg), eax);
+ }
+}
+
+
+void RegExpMacroAssemblerX87::ClearRegisters(int reg_from, int reg_to) {
+ ASSERT(reg_from <= reg_to);
+ __ mov(eax, Operand(ebp, kInputStartMinusOne));
+ for (int reg = reg_from; reg <= reg_to; reg++) {
+ __ mov(register_location(reg), eax);
+ }
+}
+
+
+void RegExpMacroAssemblerX87::WriteStackPointerToRegister(int reg) {
+ __ mov(eax, backtrack_stackpointer());
+ __ sub(eax, Operand(ebp, kStackHighEnd));
+ __ mov(register_location(reg), eax);
+}
+
+
+// Private methods:
+
+void RegExpMacroAssemblerX87::CallCheckStackGuardState(Register scratch) {
+ static const int num_arguments = 3;
+ __ PrepareCallCFunction(num_arguments, scratch);
+ // RegExp code frame pointer.
+ __ mov(Operand(esp, 2 * kPointerSize), ebp);
+ // Code* of self.
+ __ mov(Operand(esp, 1 * kPointerSize), Immediate(masm_->CodeObject()));
+ // Next address on the stack (will be address of return address).
+ __ lea(eax, Operand(esp, -kPointerSize));
+ __ mov(Operand(esp, 0 * kPointerSize), eax);
+ ExternalReference check_stack_guard =
+ ExternalReference::re_check_stack_guard_state(isolate());
+ __ CallCFunction(check_stack_guard, num_arguments);
+}
+
+
+// Helper function for reading a value out of a stack frame.
+template <typename T>
+static T& frame_entry(Address re_frame, int frame_offset) {
+ return reinterpret_cast<T&>(Memory::int32_at(re_frame + frame_offset));
+}
+
+
+int RegExpMacroAssemblerX87::CheckStackGuardState(Address* return_address,
+ Code* re_code,
+ Address re_frame) {
+ Isolate* isolate = frame_entry<Isolate*>(re_frame, kIsolate);
+ StackLimitCheck check(isolate);
+ if (check.JsHasOverflowed()) {
+ isolate->StackOverflow();
+ return EXCEPTION;
+ }
+
+ // If not real stack overflow the stack guard was used to interrupt
+ // execution for another purpose.
+
+ // If this is a direct call from JavaScript retry the RegExp forcing the call
+ // through the runtime system. Currently the direct call cannot handle a GC.
+ if (frame_entry<int>(re_frame, kDirectCall) == 1) {
+ return RETRY;
+ }
+
+ // Prepare for possible GC.
+ HandleScope handles(isolate);
+ Handle<Code> code_handle(re_code);
+
+ Handle<String> subject(frame_entry<String*>(re_frame, kInputString));
+
+ // Current string.
+ bool is_ascii = subject->IsOneByteRepresentationUnderneath();
+
+ ASSERT(re_code->instruction_start() <= *return_address);
+ ASSERT(*return_address <=
+ re_code->instruction_start() + re_code->instruction_size());
+
+ Object* result = isolate->stack_guard()->HandleInterrupts();
+
+ if (*code_handle != re_code) { // Return address no longer valid
+ int delta = code_handle->address() - re_code->address();
+ // Overwrite the return address on the stack.
+ *return_address += delta;
+ }
+
+ if (result->IsException()) {
+ return EXCEPTION;
+ }
+
+ Handle<String> subject_tmp = subject;
+ int slice_offset = 0;
+
+ // Extract the underlying string and the slice offset.
+ if (StringShape(*subject_tmp).IsCons()) {
+ subject_tmp = Handle<String>(ConsString::cast(*subject_tmp)->first());
+ } else if (StringShape(*subject_tmp).IsSliced()) {
+ SlicedString* slice = SlicedString::cast(*subject_tmp);
+ subject_tmp = Handle<String>(slice->parent());
+ slice_offset = slice->offset();
+ }
+
+ // String might have changed.
+ if (subject_tmp->IsOneByteRepresentation() != is_ascii) {
+ // If we changed between an ASCII and an UC16 string, the specialized
+ // code cannot be used, and we need to restart regexp matching from
+ // scratch (including, potentially, compiling a new version of the code).
+ return RETRY;
+ }
+
+ // Otherwise, the content of the string might have moved. It must still
+ // be a sequential or external string with the same content.
+ // Update the start and end pointers in the stack frame to the current
+ // location (whether it has actually moved or not).
+ ASSERT(StringShape(*subject_tmp).IsSequential() ||
+ StringShape(*subject_tmp).IsExternal());
+
+ // The original start address of the characters to match.
+ const byte* start_address = frame_entry<const byte*>(re_frame, kInputStart);
+
+ // Find the current start address of the same character at the current string
+ // position.
+ int start_index = frame_entry<int>(re_frame, kStartIndex);
+ const byte* new_address = StringCharacterPosition(*subject_tmp,
+ start_index + slice_offset);
+
+ if (start_address != new_address) {
+ // If there is a difference, update the object pointer and start and end
+ // addresses in the RegExp stack frame to match the new value.
+ const byte* end_address = frame_entry<const byte* >(re_frame, kInputEnd);
+ int byte_length = static_cast<int>(end_address - start_address);
+ frame_entry<const String*>(re_frame, kInputString) = *subject;
+ frame_entry<const byte*>(re_frame, kInputStart) = new_address;
+ frame_entry<const byte*>(re_frame, kInputEnd) = new_address + byte_length;
+ } else if (frame_entry<const String*>(re_frame, kInputString) != *subject) {
+ // Subject string might have been a ConsString that underwent
+ // short-circuiting during GC. That will not change start_address but
+ // will change pointer inside the subject handle.
+ frame_entry<const String*>(re_frame, kInputString) = *subject;
+ }
+
+ return 0;
+}
+
+
+Operand RegExpMacroAssemblerX87::register_location(int register_index) {
+ ASSERT(register_index < (1<<30));
+ if (num_registers_ <= register_index) {
+ num_registers_ = register_index + 1;
+ }
+ return Operand(ebp, kRegisterZero - register_index * kPointerSize);
+}
+
+
+void RegExpMacroAssemblerX87::CheckPosition(int cp_offset,
+ Label* on_outside_input) {
+ __ cmp(edi, -cp_offset * char_size());
+ BranchOrBacktrack(greater_equal, on_outside_input);
+}
+
+
+void RegExpMacroAssemblerX87::BranchOrBacktrack(Condition condition,
+ Label* to) {
+ if (condition < 0) { // No condition
+ if (to == NULL) {
+ Backtrack();
+ return;
+ }
+ __ jmp(to);
+ return;
+ }
+ if (to == NULL) {
+ __ j(condition, &backtrack_label_);
+ return;
+ }
+ __ j(condition, to);
+}
+
+
+void RegExpMacroAssemblerX87::SafeCall(Label* to) {
+ Label return_to;
+ __ push(Immediate::CodeRelativeOffset(&return_to));
+ __ jmp(to);
+ __ bind(&return_to);
+}
+
+
+void RegExpMacroAssemblerX87::SafeReturn() {
+ __ pop(ebx);
+ __ add(ebx, Immediate(masm_->CodeObject()));
+ __ jmp(ebx);
+}
+
+
+void RegExpMacroAssemblerX87::SafeCallTarget(Label* name) {
+ __ bind(name);
+}
+
+
+void RegExpMacroAssemblerX87::Push(Register source) {
+ ASSERT(!source.is(backtrack_stackpointer()));
+ // Notice: This updates flags, unlike normal Push.
+ __ sub(backtrack_stackpointer(), Immediate(kPointerSize));
+ __ mov(Operand(backtrack_stackpointer(), 0), source);
+}
+
+
+void RegExpMacroAssemblerX87::Push(Immediate value) {
+ // Notice: This updates flags, unlike normal Push.
+ __ sub(backtrack_stackpointer(), Immediate(kPointerSize));
+ __ mov(Operand(backtrack_stackpointer(), 0), value);
+}
+
+
+void RegExpMacroAssemblerX87::Pop(Register target) {
+ ASSERT(!target.is(backtrack_stackpointer()));
+ __ mov(target, Operand(backtrack_stackpointer(), 0));
+ // Notice: This updates flags, unlike normal Pop.
+ __ add(backtrack_stackpointer(), Immediate(kPointerSize));
+}
+
+
+void RegExpMacroAssemblerX87::CheckPreemption() {
+ // Check for preemption.
+ Label no_preempt;
+ ExternalReference stack_limit =
+ ExternalReference::address_of_stack_limit(isolate());
+ __ cmp(esp, Operand::StaticVariable(stack_limit));
+ __ j(above, &no_preempt);
+
+ SafeCall(&check_preempt_label_);
+
+ __ bind(&no_preempt);
+}
+
+
+void RegExpMacroAssemblerX87::CheckStackLimit() {
+ Label no_stack_overflow;
+ ExternalReference stack_limit =
+ ExternalReference::address_of_regexp_stack_limit(isolate());
+ __ cmp(backtrack_stackpointer(), Operand::StaticVariable(stack_limit));
+ __ j(above, &no_stack_overflow);
+
+ SafeCall(&stack_overflow_label_);
+
+ __ bind(&no_stack_overflow);
+}
+
+
+void RegExpMacroAssemblerX87::LoadCurrentCharacterUnchecked(int cp_offset,
+ int characters) {
+ if (mode_ == ASCII) {
+ if (characters == 4) {
+ __ mov(current_character(), Operand(esi, edi, times_1, cp_offset));
+ } else if (characters == 2) {
+ __ movzx_w(current_character(), Operand(esi, edi, times_1, cp_offset));
+ } else {
+ ASSERT(characters == 1);
+ __ movzx_b(current_character(), Operand(esi, edi, times_1, cp_offset));
+ }
+ } else {
+ ASSERT(mode_ == UC16);
+ if (characters == 2) {
+ __ mov(current_character(),
+ Operand(esi, edi, times_1, cp_offset * sizeof(uc16)));
+ } else {
+ ASSERT(characters == 1);
+ __ movzx_w(current_character(),
+ Operand(esi, edi, times_1, cp_offset * sizeof(uc16)));
+ }
+ }
+}
+
+
+#undef __
+
+#endif // V8_INTERPRETED_REGEXP
+
+}} // namespace v8::internal
+
+#endif // V8_TARGET_ARCH_X87
diff --git a/chromium/v8/src/x87/regexp-macro-assembler-x87.h b/chromium/v8/src/x87/regexp-macro-assembler-x87.h
new file mode 100644
index 00000000000..e4cae628b3c
--- /dev/null
+++ b/chromium/v8/src/x87/regexp-macro-assembler-x87.h
@@ -0,0 +1,200 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_X87_REGEXP_MACRO_ASSEMBLER_X87_H_
+#define V8_X87_REGEXP_MACRO_ASSEMBLER_X87_H_
+
+#include "src/x87/assembler-x87.h"
+#include "src/x87/assembler-x87-inl.h"
+#include "src/macro-assembler.h"
+
+namespace v8 {
+namespace internal {
+
+#ifndef V8_INTERPRETED_REGEXP
+class RegExpMacroAssemblerX87: public NativeRegExpMacroAssembler {
+ public:
+ RegExpMacroAssemblerX87(Mode mode, int registers_to_save, Zone* zone);
+ virtual ~RegExpMacroAssemblerX87();
+ virtual int stack_limit_slack();
+ virtual void AdvanceCurrentPosition(int by);
+ virtual void AdvanceRegister(int reg, int by);
+ virtual void Backtrack();
+ virtual void Bind(Label* label);
+ virtual void CheckAtStart(Label* on_at_start);
+ virtual void CheckCharacter(uint32_t c, Label* on_equal);
+ virtual void CheckCharacterAfterAnd(uint32_t c,
+ uint32_t mask,
+ Label* on_equal);
+ virtual void CheckCharacterGT(uc16 limit, Label* on_greater);
+ virtual void CheckCharacterLT(uc16 limit, Label* on_less);
+ // A "greedy loop" is a loop that is both greedy and with a simple
+ // body. It has a particularly simple implementation.
+ virtual void CheckGreedyLoop(Label* on_tos_equals_current_position);
+ virtual void CheckNotAtStart(Label* on_not_at_start);
+ virtual void CheckNotBackReference(int start_reg, Label* on_no_match);
+ virtual void CheckNotBackReferenceIgnoreCase(int start_reg,
+ Label* on_no_match);
+ virtual void CheckNotCharacter(uint32_t c, Label* on_not_equal);
+ virtual void CheckNotCharacterAfterAnd(uint32_t c,
+ uint32_t mask,
+ Label* on_not_equal);
+ virtual void CheckNotCharacterAfterMinusAnd(uc16 c,
+ uc16 minus,
+ uc16 mask,
+ Label* on_not_equal);
+ virtual void CheckCharacterInRange(uc16 from,
+ uc16 to,
+ Label* on_in_range);
+ virtual void CheckCharacterNotInRange(uc16 from,
+ uc16 to,
+ Label* on_not_in_range);
+ virtual void CheckBitInTable(Handle<ByteArray> table, Label* on_bit_set);
+
+ // Checks whether the given offset from the current position is before
+ // the end of the string.
+ virtual void CheckPosition(int cp_offset, Label* on_outside_input);
+ virtual bool CheckSpecialCharacterClass(uc16 type, Label* on_no_match);
+ virtual void Fail();
+ virtual Handle<HeapObject> GetCode(Handle<String> source);
+ virtual void GoTo(Label* label);
+ virtual void IfRegisterGE(int reg, int comparand, Label* if_ge);
+ virtual void IfRegisterLT(int reg, int comparand, Label* if_lt);
+ virtual void IfRegisterEqPos(int reg, Label* if_eq);
+ virtual IrregexpImplementation Implementation();
+ virtual void LoadCurrentCharacter(int cp_offset,
+ Label* on_end_of_input,
+ bool check_bounds = true,
+ int characters = 1);
+ virtual void PopCurrentPosition();
+ virtual void PopRegister(int register_index);
+ virtual void PushBacktrack(Label* label);
+ virtual void PushCurrentPosition();
+ virtual void PushRegister(int register_index,
+ StackCheckFlag check_stack_limit);
+ virtual void ReadCurrentPositionFromRegister(int reg);
+ virtual void ReadStackPointerFromRegister(int reg);
+ virtual void SetCurrentPositionFromEnd(int by);
+ virtual void SetRegister(int register_index, int to);
+ virtual bool Succeed();
+ virtual void WriteCurrentPositionToRegister(int reg, int cp_offset);
+ virtual void ClearRegisters(int reg_from, int reg_to);
+ virtual void WriteStackPointerToRegister(int reg);
+
+ // Called from RegExp if the stack-guard is triggered.
+ // If the code object is relocated, the return address is fixed before
+ // returning.
+ static int CheckStackGuardState(Address* return_address,
+ Code* re_code,
+ Address re_frame);
+
+ private:
+ // Offsets from ebp of function parameters and stored registers.
+ static const int kFramePointer = 0;
+ // Above the frame pointer - function parameters and return address.
+ static const int kReturn_eip = kFramePointer + kPointerSize;
+ static const int kFrameAlign = kReturn_eip + kPointerSize;
+ // Parameters.
+ static const int kInputString = kFrameAlign;
+ static const int kStartIndex = kInputString + kPointerSize;
+ static const int kInputStart = kStartIndex + kPointerSize;
+ static const int kInputEnd = kInputStart + kPointerSize;
+ static const int kRegisterOutput = kInputEnd + kPointerSize;
+ // For the case of global regular expression, we have room to store at least
+ // one set of capture results. For the case of non-global regexp, we ignore
+ // this value.
+ static const int kNumOutputRegisters = kRegisterOutput + kPointerSize;
+ static const int kStackHighEnd = kNumOutputRegisters + kPointerSize;
+ static const int kDirectCall = kStackHighEnd + kPointerSize;
+ static const int kIsolate = kDirectCall + kPointerSize;
+ // Below the frame pointer - local stack variables.
+ // When adding local variables remember to push space for them in
+ // the frame in GetCode.
+ static const int kBackup_esi = kFramePointer - kPointerSize;
+ static const int kBackup_edi = kBackup_esi - kPointerSize;
+ static const int kBackup_ebx = kBackup_edi - kPointerSize;
+ static const int kSuccessfulCaptures = kBackup_ebx - kPointerSize;
+ static const int kInputStartMinusOne = kSuccessfulCaptures - kPointerSize;
+ // First register address. Following registers are below it on the stack.
+ static const int kRegisterZero = kInputStartMinusOne - kPointerSize;
+
+ // Initial size of code buffer.
+ static const size_t kRegExpCodeSize = 1024;
+
+ // Load a number of characters at the given offset from the
+ // current position, into the current-character register.
+ void LoadCurrentCharacterUnchecked(int cp_offset, int character_count);
+
+ // Check whether preemption has been requested.
+ void CheckPreemption();
+
+ // Check whether we are exceeding the stack limit on the backtrack stack.
+ void CheckStackLimit();
+
+ // Generate a call to CheckStackGuardState.
+ void CallCheckStackGuardState(Register scratch);
+
+ // The ebp-relative location of a regexp register.
+ Operand register_location(int register_index);
+
+ // The register containing the current character after LoadCurrentCharacter.
+ inline Register current_character() { return edx; }
+
+ // The register containing the backtrack stack top. Provides a meaningful
+ // name to the register.
+ inline Register backtrack_stackpointer() { return ecx; }
+
+ // Byte size of chars in the string to match (decided by the Mode argument)
+ inline int char_size() { return static_cast<int>(mode_); }
+
+ // Equivalent to a conditional branch to the label, unless the label
+ // is NULL, in which case it is a conditional Backtrack.
+ void BranchOrBacktrack(Condition condition, Label* to);
+
+ // Call and return internally in the generated code in a way that
+ // is GC-safe (i.e., doesn't leave absolute code addresses on the stack)
+ inline void SafeCall(Label* to);
+ inline void SafeReturn();
+ inline void SafeCallTarget(Label* name);
+
+ // Pushes the value of a register on the backtrack stack. Decrements the
+ // stack pointer (ecx) by a word size and stores the register's value there.
+ inline void Push(Register source);
+
+ // Pushes a value on the backtrack stack. Decrements the stack pointer (ecx)
+ // by a word size and stores the value there.
+ inline void Push(Immediate value);
+
+ // Pops a value from the backtrack stack. Reads the word at the stack pointer
+ // (ecx) and increments it by a word size.
+ inline void Pop(Register target);
+
+ Isolate* isolate() const { return masm_->isolate(); }
+
+ MacroAssembler* masm_;
+
+ // Which mode to generate code for (ASCII or UC16).
+ Mode mode_;
+
+ // One greater than maximal register index actually used.
+ int num_registers_;
+
+ // Number of registers to output at the end (the saved registers
+ // are always 0..num_saved_registers_-1)
+ int num_saved_registers_;
+
+ // Labels used internally.
+ Label entry_label_;
+ Label start_label_;
+ Label success_label_;
+ Label backtrack_label_;
+ Label exit_label_;
+ Label check_preempt_label_;
+ Label stack_overflow_label_;
+};
+#endif // V8_INTERPRETED_REGEXP
+
+}} // namespace v8::internal
+
+#endif // V8_X87_REGEXP_MACRO_ASSEMBLER_X87_H_
diff --git a/chromium/v8/src/x87/simulator-x87.cc b/chromium/v8/src/x87/simulator-x87.cc
new file mode 100644
index 00000000000..20edae83a2a
--- /dev/null
+++ b/chromium/v8/src/x87/simulator-x87.cc
@@ -0,0 +1,6 @@
+// Copyright 2008 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+
+// Since there is no simulator for the ia32 architecture this file is empty.
diff --git a/chromium/v8/src/x87/simulator-x87.h b/chromium/v8/src/x87/simulator-x87.h
new file mode 100644
index 00000000000..a780e839d2d
--- /dev/null
+++ b/chromium/v8/src/x87/simulator-x87.h
@@ -0,0 +1,48 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_X87_SIMULATOR_X87_H_
+#define V8_X87_SIMULATOR_X87_H_
+
+#include "src/allocation.h"
+
+namespace v8 {
+namespace internal {
+
+// Since there is no simulator for the ia32 architecture the only thing we can
+// do is to call the entry directly.
+#define CALL_GENERATED_CODE(entry, p0, p1, p2, p3, p4) \
+ (entry(p0, p1, p2, p3, p4))
+
+
+typedef int (*regexp_matcher)(String*, int, const byte*,
+ const byte*, int*, int, Address, int, Isolate*);
+
+// Call the generated regexp code directly. The code at the entry address should
+// expect eight int/pointer sized arguments and return an int.
+#define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6, p7, p8) \
+ (FUNCTION_CAST<regexp_matcher>(entry)(p0, p1, p2, p3, p4, p5, p6, p7, p8))
+
+
+// The stack limit beyond which we will throw stack overflow errors in
+// generated code. Because generated code on ia32 uses the C stack, we
+// just use the C stack limit.
+class SimulatorStack : public v8::internal::AllStatic {
+ public:
+ static inline uintptr_t JsLimitFromCLimit(Isolate* isolate,
+ uintptr_t c_limit) {
+ USE(isolate);
+ return c_limit;
+ }
+
+ static inline uintptr_t RegisterCTryCatch(uintptr_t try_catch_address) {
+ return try_catch_address;
+ }
+
+ static inline void UnregisterCTryCatch() { }
+};
+
+} } // namespace v8::internal
+
+#endif // V8_X87_SIMULATOR_X87_H_
diff --git a/chromium/v8/src/x87/stub-cache-x87.cc b/chromium/v8/src/x87/stub-cache-x87.cc
new file mode 100644
index 00000000000..f480b5100e4
--- /dev/null
+++ b/chromium/v8/src/x87/stub-cache-x87.cc
@@ -0,0 +1,1493 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#if V8_TARGET_ARCH_X87
+
+#include "src/ic-inl.h"
+#include "src/codegen.h"
+#include "src/stub-cache.h"
+
+namespace v8 {
+namespace internal {
+
+#define __ ACCESS_MASM(masm)
+
+
+static void ProbeTable(Isolate* isolate,
+ MacroAssembler* masm,
+ Code::Flags flags,
+ StubCache::Table table,
+ Register name,
+ Register receiver,
+ // Number of the cache entry pointer-size scaled.
+ Register offset,
+ Register extra) {
+ ExternalReference key_offset(isolate->stub_cache()->key_reference(table));
+ ExternalReference value_offset(isolate->stub_cache()->value_reference(table));
+ ExternalReference map_offset(isolate->stub_cache()->map_reference(table));
+
+ Label miss;
+
+ // Multiply by 3 because there are 3 fields per entry (name, code, map).
+ __ lea(offset, Operand(offset, offset, times_2, 0));
+
+ if (extra.is_valid()) {
+ // Get the code entry from the cache.
+ __ mov(extra, Operand::StaticArray(offset, times_1, value_offset));
+
+ // Check that the key in the entry matches the name.
+ __ cmp(name, Operand::StaticArray(offset, times_1, key_offset));
+ __ j(not_equal, &miss);
+
+ // Check the map matches.
+ __ mov(offset, Operand::StaticArray(offset, times_1, map_offset));
+ __ cmp(offset, FieldOperand(receiver, HeapObject::kMapOffset));
+ __ j(not_equal, &miss);
+
+ // Check that the flags match what we're looking for.
+ __ mov(offset, FieldOperand(extra, Code::kFlagsOffset));
+ __ and_(offset, ~Code::kFlagsNotUsedInLookup);
+ __ cmp(offset, flags);
+ __ j(not_equal, &miss);
+
+#ifdef DEBUG
+ if (FLAG_test_secondary_stub_cache && table == StubCache::kPrimary) {
+ __ jmp(&miss);
+ } else if (FLAG_test_primary_stub_cache && table == StubCache::kSecondary) {
+ __ jmp(&miss);
+ }
+#endif
+
+ // Jump to the first instruction in the code stub.
+ __ add(extra, Immediate(Code::kHeaderSize - kHeapObjectTag));
+ __ jmp(extra);
+
+ __ bind(&miss);
+ } else {
+ // Save the offset on the stack.
+ __ push(offset);
+
+ // Check that the key in the entry matches the name.
+ __ cmp(name, Operand::StaticArray(offset, times_1, key_offset));
+ __ j(not_equal, &miss);
+
+ // Check the map matches.
+ __ mov(offset, Operand::StaticArray(offset, times_1, map_offset));
+ __ cmp(offset, FieldOperand(receiver, HeapObject::kMapOffset));
+ __ j(not_equal, &miss);
+
+ // Restore offset register.
+ __ mov(offset, Operand(esp, 0));
+
+ // Get the code entry from the cache.
+ __ mov(offset, Operand::StaticArray(offset, times_1, value_offset));
+
+ // Check that the flags match what we're looking for.
+ __ mov(offset, FieldOperand(offset, Code::kFlagsOffset));
+ __ and_(offset, ~Code::kFlagsNotUsedInLookup);
+ __ cmp(offset, flags);
+ __ j(not_equal, &miss);
+
+#ifdef DEBUG
+ if (FLAG_test_secondary_stub_cache && table == StubCache::kPrimary) {
+ __ jmp(&miss);
+ } else if (FLAG_test_primary_stub_cache && table == StubCache::kSecondary) {
+ __ jmp(&miss);
+ }
+#endif
+
+ // Restore offset and re-load code entry from cache.
+ __ pop(offset);
+ __ mov(offset, Operand::StaticArray(offset, times_1, value_offset));
+
+ // Jump to the first instruction in the code stub.
+ __ add(offset, Immediate(Code::kHeaderSize - kHeapObjectTag));
+ __ jmp(offset);
+
+ // Pop at miss.
+ __ bind(&miss);
+ __ pop(offset);
+ }
+}
+
+
+void StubCompiler::GenerateDictionaryNegativeLookup(MacroAssembler* masm,
+ Label* miss_label,
+ Register receiver,
+ Handle<Name> name,
+ Register scratch0,
+ Register scratch1) {
+ ASSERT(name->IsUniqueName());
+ ASSERT(!receiver.is(scratch0));
+ Counters* counters = masm->isolate()->counters();
+ __ IncrementCounter(counters->negative_lookups(), 1);
+ __ IncrementCounter(counters->negative_lookups_miss(), 1);
+
+ __ mov(scratch0, FieldOperand(receiver, HeapObject::kMapOffset));
+
+ const int kInterceptorOrAccessCheckNeededMask =
+ (1 << Map::kHasNamedInterceptor) | (1 << Map::kIsAccessCheckNeeded);
+
+ // Bail out if the receiver has a named interceptor or requires access checks.
+ __ test_b(FieldOperand(scratch0, Map::kBitFieldOffset),
+ kInterceptorOrAccessCheckNeededMask);
+ __ j(not_zero, miss_label);
+
+ // Check that receiver is a JSObject.
+ __ CmpInstanceType(scratch0, FIRST_SPEC_OBJECT_TYPE);
+ __ j(below, miss_label);
+
+ // Load properties array.
+ Register properties = scratch0;
+ __ mov(properties, FieldOperand(receiver, JSObject::kPropertiesOffset));
+
+ // Check that the properties array is a dictionary.
+ __ cmp(FieldOperand(properties, HeapObject::kMapOffset),
+ Immediate(masm->isolate()->factory()->hash_table_map()));
+ __ j(not_equal, miss_label);
+
+ Label done;
+ NameDictionaryLookupStub::GenerateNegativeLookup(masm,
+ miss_label,
+ &done,
+ properties,
+ name,
+ scratch1);
+ __ bind(&done);
+ __ DecrementCounter(counters->negative_lookups_miss(), 1);
+}
+
+
+void StubCache::GenerateProbe(MacroAssembler* masm,
+ Code::Flags flags,
+ Register receiver,
+ Register name,
+ Register scratch,
+ Register extra,
+ Register extra2,
+ Register extra3) {
+ Label miss;
+
+ // Assert that code is valid. The multiplying code relies on the entry size
+ // being 12.
+ ASSERT(sizeof(Entry) == 12);
+
+ // Assert the flags do not name a specific type.
+ ASSERT(Code::ExtractTypeFromFlags(flags) == 0);
+
+ // Assert that there are no register conflicts.
+ ASSERT(!scratch.is(receiver));
+ ASSERT(!scratch.is(name));
+ ASSERT(!extra.is(receiver));
+ ASSERT(!extra.is(name));
+ ASSERT(!extra.is(scratch));
+
+ // Assert scratch and extra registers are valid, and extra2/3 are unused.
+ ASSERT(!scratch.is(no_reg));
+ ASSERT(extra2.is(no_reg));
+ ASSERT(extra3.is(no_reg));
+
+ Register offset = scratch;
+ scratch = no_reg;
+
+ Counters* counters = masm->isolate()->counters();
+ __ IncrementCounter(counters->megamorphic_stub_cache_probes(), 1);
+
+ // Check that the receiver isn't a smi.
+ __ JumpIfSmi(receiver, &miss);
+
+ // Get the map of the receiver and compute the hash.
+ __ mov(offset, FieldOperand(name, Name::kHashFieldOffset));
+ __ add(offset, FieldOperand(receiver, HeapObject::kMapOffset));
+ __ xor_(offset, flags);
+ // We mask out the last two bits because they are not part of the hash and
+ // they are always 01 for maps. Also in the two 'and' instructions below.
+ __ and_(offset, (kPrimaryTableSize - 1) << kHeapObjectTagSize);
+ // ProbeTable expects the offset to be pointer scaled, which it is, because
+ // the heap object tag size is 2 and the pointer size log 2 is also 2.
+ ASSERT(kHeapObjectTagSize == kPointerSizeLog2);
+
+ // Probe the primary table.
+ ProbeTable(isolate(), masm, flags, kPrimary, name, receiver, offset, extra);
+
+ // Primary miss: Compute hash for secondary probe.
+ __ mov(offset, FieldOperand(name, Name::kHashFieldOffset));
+ __ add(offset, FieldOperand(receiver, HeapObject::kMapOffset));
+ __ xor_(offset, flags);
+ __ and_(offset, (kPrimaryTableSize - 1) << kHeapObjectTagSize);
+ __ sub(offset, name);
+ __ add(offset, Immediate(flags));
+ __ and_(offset, (kSecondaryTableSize - 1) << kHeapObjectTagSize);
+
+ // Probe the secondary table.
+ ProbeTable(
+ isolate(), masm, flags, kSecondary, name, receiver, offset, extra);
+
+ // Cache miss: Fall-through and let caller handle the miss by
+ // entering the runtime system.
+ __ bind(&miss);
+ __ IncrementCounter(counters->megamorphic_stub_cache_misses(), 1);
+}
+
+
+void StubCompiler::GenerateLoadGlobalFunctionPrototype(MacroAssembler* masm,
+ int index,
+ Register prototype) {
+ __ LoadGlobalFunction(index, prototype);
+ __ LoadGlobalFunctionInitialMap(prototype, prototype);
+ // Load the prototype from the initial map.
+ __ mov(prototype, FieldOperand(prototype, Map::kPrototypeOffset));
+}
+
+
+void StubCompiler::GenerateDirectLoadGlobalFunctionPrototype(
+ MacroAssembler* masm,
+ int index,
+ Register prototype,
+ Label* miss) {
+ // Get the global function with the given index.
+ Handle<JSFunction> function(
+ JSFunction::cast(masm->isolate()->native_context()->get(index)));
+ // Check we're still in the same context.
+ Register scratch = prototype;
+ const int offset = Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX);
+ __ mov(scratch, Operand(esi, offset));
+ __ mov(scratch, FieldOperand(scratch, GlobalObject::kNativeContextOffset));
+ __ cmp(Operand(scratch, Context::SlotOffset(index)), function);
+ __ j(not_equal, miss);
+
+ // Load its initial map. The global functions all have initial maps.
+ __ Move(prototype, Immediate(Handle<Map>(function->initial_map())));
+ // Load the prototype from the initial map.
+ __ mov(prototype, FieldOperand(prototype, Map::kPrototypeOffset));
+}
+
+
+void StubCompiler::GenerateLoadArrayLength(MacroAssembler* masm,
+ Register receiver,
+ Register scratch,
+ Label* miss_label) {
+ // Check that the receiver isn't a smi.
+ __ JumpIfSmi(receiver, miss_label);
+
+ // Check that the object is a JS array.
+ __ CmpObjectType(receiver, JS_ARRAY_TYPE, scratch);
+ __ j(not_equal, miss_label);
+
+ // Load length directly from the JS array.
+ __ mov(eax, FieldOperand(receiver, JSArray::kLengthOffset));
+ __ ret(0);
+}
+
+
+void StubCompiler::GenerateLoadFunctionPrototype(MacroAssembler* masm,
+ Register receiver,
+ Register scratch1,
+ Register scratch2,
+ Label* miss_label) {
+ __ TryGetFunctionPrototype(receiver, scratch1, scratch2, miss_label);
+ __ mov(eax, scratch1);
+ __ ret(0);
+}
+
+
+void StubCompiler::GenerateFastPropertyLoad(MacroAssembler* masm,
+ Register dst,
+ Register src,
+ bool inobject,
+ int index,
+ Representation representation) {
+ ASSERT(!representation.IsDouble());
+ int offset = index * kPointerSize;
+ if (!inobject) {
+ // Calculate the offset into the properties array.
+ offset = offset + FixedArray::kHeaderSize;
+ __ mov(dst, FieldOperand(src, JSObject::kPropertiesOffset));
+ src = dst;
+ }
+ __ mov(dst, FieldOperand(src, offset));
+}
+
+
+static void PushInterceptorArguments(MacroAssembler* masm,
+ Register receiver,
+ Register holder,
+ Register name,
+ Handle<JSObject> holder_obj) {
+ STATIC_ASSERT(StubCache::kInterceptorArgsNameIndex == 0);
+ STATIC_ASSERT(StubCache::kInterceptorArgsInfoIndex == 1);
+ STATIC_ASSERT(StubCache::kInterceptorArgsThisIndex == 2);
+ STATIC_ASSERT(StubCache::kInterceptorArgsHolderIndex == 3);
+ STATIC_ASSERT(StubCache::kInterceptorArgsLength == 4);
+ __ push(name);
+ Handle<InterceptorInfo> interceptor(holder_obj->GetNamedInterceptor());
+ ASSERT(!masm->isolate()->heap()->InNewSpace(*interceptor));
+ Register scratch = name;
+ __ mov(scratch, Immediate(interceptor));
+ __ push(scratch);
+ __ push(receiver);
+ __ push(holder);
+}
+
+
+static void CompileCallLoadPropertyWithInterceptor(
+ MacroAssembler* masm,
+ Register receiver,
+ Register holder,
+ Register name,
+ Handle<JSObject> holder_obj,
+ IC::UtilityId id) {
+ PushInterceptorArguments(masm, receiver, holder, name, holder_obj);
+ __ CallExternalReference(
+ ExternalReference(IC_Utility(id), masm->isolate()),
+ StubCache::kInterceptorArgsLength);
+}
+
+
+// Generate call to api function.
+// This function uses push() to generate smaller, faster code than
+// the version above. It is an optimization that should will be removed
+// when api call ICs are generated in hydrogen.
+void StubCompiler::GenerateFastApiCall(MacroAssembler* masm,
+ const CallOptimization& optimization,
+ Handle<Map> receiver_map,
+ Register receiver,
+ Register scratch_in,
+ bool is_store,
+ int argc,
+ Register* values) {
+ // Copy return value.
+ __ pop(scratch_in);
+ // receiver
+ __ push(receiver);
+ // Write the arguments to stack frame.
+ for (int i = 0; i < argc; i++) {
+ Register arg = values[argc-1-i];
+ ASSERT(!receiver.is(arg));
+ ASSERT(!scratch_in.is(arg));
+ __ push(arg);
+ }
+ __ push(scratch_in);
+ // Stack now matches JSFunction abi.
+ ASSERT(optimization.is_simple_api_call());
+
+ // Abi for CallApiFunctionStub.
+ Register callee = eax;
+ Register call_data = ebx;
+ Register holder = ecx;
+ Register api_function_address = edx;
+ Register scratch = edi; // scratch_in is no longer valid.
+
+ // Put holder in place.
+ CallOptimization::HolderLookup holder_lookup;
+ Handle<JSObject> api_holder = optimization.LookupHolderOfExpectedType(
+ receiver_map,
+ &holder_lookup);
+ switch (holder_lookup) {
+ case CallOptimization::kHolderIsReceiver:
+ __ Move(holder, receiver);
+ break;
+ case CallOptimization::kHolderFound:
+ __ LoadHeapObject(holder, api_holder);
+ break;
+ case CallOptimization::kHolderNotFound:
+ UNREACHABLE();
+ break;
+ }
+
+ Isolate* isolate = masm->isolate();
+ Handle<JSFunction> function = optimization.constant_function();
+ Handle<CallHandlerInfo> api_call_info = optimization.api_call_info();
+ Handle<Object> call_data_obj(api_call_info->data(), isolate);
+
+ // Put callee in place.
+ __ LoadHeapObject(callee, function);
+
+ bool call_data_undefined = false;
+ // Put call_data in place.
+ if (isolate->heap()->InNewSpace(*call_data_obj)) {
+ __ mov(scratch, api_call_info);
+ __ mov(call_data, FieldOperand(scratch, CallHandlerInfo::kDataOffset));
+ } else if (call_data_obj->IsUndefined()) {
+ call_data_undefined = true;
+ __ mov(call_data, Immediate(isolate->factory()->undefined_value()));
+ } else {
+ __ mov(call_data, call_data_obj);
+ }
+
+ // Put api_function_address in place.
+ Address function_address = v8::ToCData<Address>(api_call_info->callback());
+ __ mov(api_function_address, Immediate(function_address));
+
+ // Jump to stub.
+ CallApiFunctionStub stub(isolate, is_store, call_data_undefined, argc);
+ __ TailCallStub(&stub);
+}
+
+
+void StoreStubCompiler::GenerateRestoreName(MacroAssembler* masm,
+ Label* label,
+ Handle<Name> name) {
+ if (!label->is_unused()) {
+ __ bind(label);
+ __ mov(this->name(), Immediate(name));
+ }
+}
+
+
+// Generate code to check that a global property cell is empty. Create
+// the property cell at compilation time if no cell exists for the
+// property.
+void StubCompiler::GenerateCheckPropertyCell(MacroAssembler* masm,
+ Handle<JSGlobalObject> global,
+ Handle<Name> name,
+ Register scratch,
+ Label* miss) {
+ Handle<PropertyCell> cell =
+ JSGlobalObject::EnsurePropertyCell(global, name);
+ ASSERT(cell->value()->IsTheHole());
+ Handle<Oddball> the_hole = masm->isolate()->factory()->the_hole_value();
+ if (masm->serializer_enabled()) {
+ __ mov(scratch, Immediate(cell));
+ __ cmp(FieldOperand(scratch, PropertyCell::kValueOffset),
+ Immediate(the_hole));
+ } else {
+ __ cmp(Operand::ForCell(cell), Immediate(the_hole));
+ }
+ __ j(not_equal, miss);
+}
+
+
+void StoreStubCompiler::GenerateNegativeHolderLookup(
+ MacroAssembler* masm,
+ Handle<JSObject> holder,
+ Register holder_reg,
+ Handle<Name> name,
+ Label* miss) {
+ if (holder->IsJSGlobalObject()) {
+ GenerateCheckPropertyCell(
+ masm, Handle<JSGlobalObject>::cast(holder), name, scratch1(), miss);
+ } else if (!holder->HasFastProperties() && !holder->IsJSGlobalProxy()) {
+ GenerateDictionaryNegativeLookup(
+ masm, miss, holder_reg, name, scratch1(), scratch2());
+ }
+}
+
+
+// Receiver_reg is preserved on jumps to miss_label, but may be destroyed if
+// store is successful.
+void StoreStubCompiler::GenerateStoreTransition(MacroAssembler* masm,
+ Handle<JSObject> object,
+ LookupResult* lookup,
+ Handle<Map> transition,
+ Handle<Name> name,
+ Register receiver_reg,
+ Register storage_reg,
+ Register value_reg,
+ Register scratch1,
+ Register scratch2,
+ Register unused,
+ Label* miss_label,
+ Label* slow) {
+ int descriptor = transition->LastAdded();
+ DescriptorArray* descriptors = transition->instance_descriptors();
+ PropertyDetails details = descriptors->GetDetails(descriptor);
+ Representation representation = details.representation();
+ ASSERT(!representation.IsNone());
+
+ if (details.type() == CONSTANT) {
+ Handle<Object> constant(descriptors->GetValue(descriptor), masm->isolate());
+ __ CmpObject(value_reg, constant);
+ __ j(not_equal, miss_label);
+ } else if (representation.IsSmi()) {
+ __ JumpIfNotSmi(value_reg, miss_label);
+ } else if (representation.IsHeapObject()) {
+ __ JumpIfSmi(value_reg, miss_label);
+ HeapType* field_type = descriptors->GetFieldType(descriptor);
+ HeapType::Iterator<Map> it = field_type->Classes();
+ if (!it.Done()) {
+ Label do_store;
+ while (true) {
+ __ CompareMap(value_reg, it.Current());
+ it.Advance();
+ if (it.Done()) {
+ __ j(not_equal, miss_label);
+ break;
+ }
+ __ j(equal, &do_store, Label::kNear);
+ }
+ __ bind(&do_store);
+ }
+ } else if (representation.IsDouble()) {
+ Label do_store, heap_number;
+ __ AllocateHeapNumber(storage_reg, scratch1, scratch2, slow);
+
+ __ JumpIfNotSmi(value_reg, &heap_number);
+ __ SmiUntag(value_reg);
+ __ push(value_reg);
+ __ fild_s(Operand(esp, 0));
+ __ pop(value_reg);
+ __ SmiTag(value_reg);
+ __ jmp(&do_store);
+
+ __ bind(&heap_number);
+ __ CheckMap(value_reg, masm->isolate()->factory()->heap_number_map(),
+ miss_label, DONT_DO_SMI_CHECK);
+ __ fld_d(FieldOperand(value_reg, HeapNumber::kValueOffset));
+
+ __ bind(&do_store);
+ __ fstp_d(FieldOperand(storage_reg, HeapNumber::kValueOffset));
+ }
+
+ // Stub never generated for non-global objects that require access
+ // checks.
+ ASSERT(object->IsJSGlobalProxy() || !object->IsAccessCheckNeeded());
+
+ // Perform map transition for the receiver if necessary.
+ if (details.type() == FIELD &&
+ object->map()->unused_property_fields() == 0) {
+ // The properties must be extended before we can store the value.
+ // We jump to a runtime call that extends the properties array.
+ __ pop(scratch1); // Return address.
+ __ push(receiver_reg);
+ __ push(Immediate(transition));
+ __ push(value_reg);
+ __ push(scratch1);
+ __ TailCallExternalReference(
+ ExternalReference(IC_Utility(IC::kSharedStoreIC_ExtendStorage),
+ masm->isolate()),
+ 3,
+ 1);
+ return;
+ }
+
+ // Update the map of the object.
+ __ mov(scratch1, Immediate(transition));
+ __ mov(FieldOperand(receiver_reg, HeapObject::kMapOffset), scratch1);
+
+ // Update the write barrier for the map field.
+ __ RecordWriteField(receiver_reg,
+ HeapObject::kMapOffset,
+ scratch1,
+ scratch2,
+ OMIT_REMEMBERED_SET,
+ OMIT_SMI_CHECK);
+
+ if (details.type() == CONSTANT) {
+ ASSERT(value_reg.is(eax));
+ __ ret(0);
+ return;
+ }
+
+ int index = transition->instance_descriptors()->GetFieldIndex(
+ transition->LastAdded());
+
+ // Adjust for the number of properties stored in the object. Even in the
+ // face of a transition we can use the old map here because the size of the
+ // object and the number of in-object properties is not going to change.
+ index -= object->map()->inobject_properties();
+
+ SmiCheck smi_check = representation.IsTagged()
+ ? INLINE_SMI_CHECK : OMIT_SMI_CHECK;
+ // TODO(verwaest): Share this code as a code stub.
+ if (index < 0) {
+ // Set the property straight into the object.
+ int offset = object->map()->instance_size() + (index * kPointerSize);
+ if (representation.IsDouble()) {
+ __ mov(FieldOperand(receiver_reg, offset), storage_reg);
+ } else {
+ __ mov(FieldOperand(receiver_reg, offset), value_reg);
+ }
+
+ if (!representation.IsSmi()) {
+ // Update the write barrier for the array address.
+ if (!representation.IsDouble()) {
+ __ mov(storage_reg, value_reg);
+ }
+ __ RecordWriteField(receiver_reg,
+ offset,
+ storage_reg,
+ scratch1,
+ EMIT_REMEMBERED_SET,
+ smi_check);
+ }
+ } else {
+ // Write to the properties array.
+ int offset = index * kPointerSize + FixedArray::kHeaderSize;
+ // Get the properties array (optimistically).
+ __ mov(scratch1, FieldOperand(receiver_reg, JSObject::kPropertiesOffset));
+ if (representation.IsDouble()) {
+ __ mov(FieldOperand(scratch1, offset), storage_reg);
+ } else {
+ __ mov(FieldOperand(scratch1, offset), value_reg);
+ }
+
+ if (!representation.IsSmi()) {
+ // Update the write barrier for the array address.
+ if (!representation.IsDouble()) {
+ __ mov(storage_reg, value_reg);
+ }
+ __ RecordWriteField(scratch1,
+ offset,
+ storage_reg,
+ receiver_reg,
+ EMIT_REMEMBERED_SET,
+ smi_check);
+ }
+ }
+
+ // Return the value (register eax).
+ ASSERT(value_reg.is(eax));
+ __ ret(0);
+}
+
+
+// Both name_reg and receiver_reg are preserved on jumps to miss_label,
+// but may be destroyed if store is successful.
+void StoreStubCompiler::GenerateStoreField(MacroAssembler* masm,
+ Handle<JSObject> object,
+ LookupResult* lookup,
+ Register receiver_reg,
+ Register name_reg,
+ Register value_reg,
+ Register scratch1,
+ Register scratch2,
+ Label* miss_label) {
+ // Stub never generated for non-global objects that require access
+ // checks.
+ ASSERT(object->IsJSGlobalProxy() || !object->IsAccessCheckNeeded());
+
+ FieldIndex index = lookup->GetFieldIndex();
+
+ Representation representation = lookup->representation();
+ ASSERT(!representation.IsNone());
+ if (representation.IsSmi()) {
+ __ JumpIfNotSmi(value_reg, miss_label);
+ } else if (representation.IsHeapObject()) {
+ __ JumpIfSmi(value_reg, miss_label);
+ HeapType* field_type = lookup->GetFieldType();
+ HeapType::Iterator<Map> it = field_type->Classes();
+ if (!it.Done()) {
+ Label do_store;
+ while (true) {
+ __ CompareMap(value_reg, it.Current());
+ it.Advance();
+ if (it.Done()) {
+ __ j(not_equal, miss_label);
+ break;
+ }
+ __ j(equal, &do_store, Label::kNear);
+ }
+ __ bind(&do_store);
+ }
+ } else if (representation.IsDouble()) {
+ // Load the double storage.
+ if (index.is_inobject()) {
+ __ mov(scratch1, FieldOperand(receiver_reg, index.offset()));
+ } else {
+ __ mov(scratch1, FieldOperand(receiver_reg, JSObject::kPropertiesOffset));
+ __ mov(scratch1, FieldOperand(scratch1, index.offset()));
+ }
+
+ // Store the value into the storage.
+ Label do_store, heap_number;
+ __ JumpIfNotSmi(value_reg, &heap_number);
+ __ SmiUntag(value_reg);
+ __ push(value_reg);
+ __ fild_s(Operand(esp, 0));
+ __ pop(value_reg);
+ __ SmiTag(value_reg);
+ __ jmp(&do_store);
+ __ bind(&heap_number);
+ __ CheckMap(value_reg, masm->isolate()->factory()->heap_number_map(),
+ miss_label, DONT_DO_SMI_CHECK);
+ __ fld_d(FieldOperand(value_reg, HeapNumber::kValueOffset));
+ __ bind(&do_store);
+ __ fstp_d(FieldOperand(scratch1, HeapNumber::kValueOffset));
+ // Return the value (register eax).
+ ASSERT(value_reg.is(eax));
+ __ ret(0);
+ return;
+ }
+
+ ASSERT(!representation.IsDouble());
+ // TODO(verwaest): Share this code as a code stub.
+ SmiCheck smi_check = representation.IsTagged()
+ ? INLINE_SMI_CHECK : OMIT_SMI_CHECK;
+ if (index.is_inobject()) {
+ // Set the property straight into the object.
+ __ mov(FieldOperand(receiver_reg, index.offset()), value_reg);
+
+ if (!representation.IsSmi()) {
+ // Update the write barrier for the array address.
+ // Pass the value being stored in the now unused name_reg.
+ __ mov(name_reg, value_reg);
+ __ RecordWriteField(receiver_reg,
+ index.offset(),
+ name_reg,
+ scratch1,
+ EMIT_REMEMBERED_SET,
+ smi_check);
+ }
+ } else {
+ // Write to the properties array.
+ // Get the properties array (optimistically).
+ __ mov(scratch1, FieldOperand(receiver_reg, JSObject::kPropertiesOffset));
+ __ mov(FieldOperand(scratch1, index.offset()), value_reg);
+
+ if (!representation.IsSmi()) {
+ // Update the write barrier for the array address.
+ // Pass the value being stored in the now unused name_reg.
+ __ mov(name_reg, value_reg);
+ __ RecordWriteField(scratch1,
+ index.offset(),
+ name_reg,
+ receiver_reg,
+ EMIT_REMEMBERED_SET,
+ smi_check);
+ }
+ }
+
+ // Return the value (register eax).
+ ASSERT(value_reg.is(eax));
+ __ ret(0);
+}
+
+
+void StubCompiler::GenerateTailCall(MacroAssembler* masm, Handle<Code> code) {
+ __ jmp(code, RelocInfo::CODE_TARGET);
+}
+
+
+#undef __
+#define __ ACCESS_MASM(masm())
+
+
+Register StubCompiler::CheckPrototypes(Handle<HeapType> type,
+ Register object_reg,
+ Handle<JSObject> holder,
+ Register holder_reg,
+ Register scratch1,
+ Register scratch2,
+ Handle<Name> name,
+ Label* miss,
+ PrototypeCheckType check) {
+ Handle<Map> receiver_map(IC::TypeToMap(*type, isolate()));
+
+ // Make sure there's no overlap between holder and object registers.
+ ASSERT(!scratch1.is(object_reg) && !scratch1.is(holder_reg));
+ ASSERT(!scratch2.is(object_reg) && !scratch2.is(holder_reg)
+ && !scratch2.is(scratch1));
+
+ // Keep track of the current object in register reg.
+ Register reg = object_reg;
+ int depth = 0;
+
+ Handle<JSObject> current = Handle<JSObject>::null();
+ if (type->IsConstant()) current =
+ Handle<JSObject>::cast(type->AsConstant()->Value());
+ Handle<JSObject> prototype = Handle<JSObject>::null();
+ Handle<Map> current_map = receiver_map;
+ Handle<Map> holder_map(holder->map());
+ // Traverse the prototype chain and check the maps in the prototype chain for
+ // fast and global objects or do negative lookup for normal objects.
+ while (!current_map.is_identical_to(holder_map)) {
+ ++depth;
+
+ // Only global objects and objects that do not require access
+ // checks are allowed in stubs.
+ ASSERT(current_map->IsJSGlobalProxyMap() ||
+ !current_map->is_access_check_needed());
+
+ prototype = handle(JSObject::cast(current_map->prototype()));
+ if (current_map->is_dictionary_map() &&
+ !current_map->IsJSGlobalObjectMap() &&
+ !current_map->IsJSGlobalProxyMap()) {
+ if (!name->IsUniqueName()) {
+ ASSERT(name->IsString());
+ name = factory()->InternalizeString(Handle<String>::cast(name));
+ }
+ ASSERT(current.is_null() ||
+ current->property_dictionary()->FindEntry(name) ==
+ NameDictionary::kNotFound);
+
+ GenerateDictionaryNegativeLookup(masm(), miss, reg, name,
+ scratch1, scratch2);
+
+ __ mov(scratch1, FieldOperand(reg, HeapObject::kMapOffset));
+ reg = holder_reg; // From now on the object will be in holder_reg.
+ __ mov(reg, FieldOperand(scratch1, Map::kPrototypeOffset));
+ } else {
+ bool in_new_space = heap()->InNewSpace(*prototype);
+ if (depth != 1 || check == CHECK_ALL_MAPS) {
+ __ CheckMap(reg, current_map, miss, DONT_DO_SMI_CHECK);
+ }
+
+ // Check access rights to the global object. This has to happen after
+ // the map check so that we know that the object is actually a global
+ // object.
+ if (current_map->IsJSGlobalProxyMap()) {
+ __ CheckAccessGlobalProxy(reg, scratch1, scratch2, miss);
+ } else if (current_map->IsJSGlobalObjectMap()) {
+ GenerateCheckPropertyCell(
+ masm(), Handle<JSGlobalObject>::cast(current), name,
+ scratch2, miss);
+ }
+
+ if (in_new_space) {
+ // Save the map in scratch1 for later.
+ __ mov(scratch1, FieldOperand(reg, HeapObject::kMapOffset));
+ }
+
+ reg = holder_reg; // From now on the object will be in holder_reg.
+
+ if (in_new_space) {
+ // The prototype is in new space; we cannot store a reference to it
+ // in the code. Load it from the map.
+ __ mov(reg, FieldOperand(scratch1, Map::kPrototypeOffset));
+ } else {
+ // The prototype is in old space; load it directly.
+ __ mov(reg, prototype);
+ }
+ }
+
+ // Go to the next object in the prototype chain.
+ current = prototype;
+ current_map = handle(current->map());
+ }
+
+ // Log the check depth.
+ LOG(isolate(), IntEvent("check-maps-depth", depth + 1));
+
+ if (depth != 0 || check == CHECK_ALL_MAPS) {
+ // Check the holder map.
+ __ CheckMap(reg, current_map, miss, DONT_DO_SMI_CHECK);
+ }
+
+ // Perform security check for access to the global object.
+ ASSERT(current_map->IsJSGlobalProxyMap() ||
+ !current_map->is_access_check_needed());
+ if (current_map->IsJSGlobalProxyMap()) {
+ __ CheckAccessGlobalProxy(reg, scratch1, scratch2, miss);
+ }
+
+ // Return the register containing the holder.
+ return reg;
+}
+
+
+void LoadStubCompiler::HandlerFrontendFooter(Handle<Name> name, Label* miss) {
+ if (!miss->is_unused()) {
+ Label success;
+ __ jmp(&success);
+ __ bind(miss);
+ TailCallBuiltin(masm(), MissBuiltin(kind()));
+ __ bind(&success);
+ }
+}
+
+
+void StoreStubCompiler::HandlerFrontendFooter(Handle<Name> name, Label* miss) {
+ if (!miss->is_unused()) {
+ Label success;
+ __ jmp(&success);
+ GenerateRestoreName(masm(), miss, name);
+ TailCallBuiltin(masm(), MissBuiltin(kind()));
+ __ bind(&success);
+ }
+}
+
+
+Register LoadStubCompiler::CallbackHandlerFrontend(
+ Handle<HeapType> type,
+ Register object_reg,
+ Handle<JSObject> holder,
+ Handle<Name> name,
+ Handle<Object> callback) {
+ Label miss;
+
+ Register reg = HandlerFrontendHeader(type, object_reg, holder, name, &miss);
+
+ if (!holder->HasFastProperties() && !holder->IsJSGlobalObject()) {
+ ASSERT(!reg.is(scratch2()));
+ ASSERT(!reg.is(scratch3()));
+ Register dictionary = scratch1();
+ bool must_preserve_dictionary_reg = reg.is(dictionary);
+
+ // Load the properties dictionary.
+ if (must_preserve_dictionary_reg) {
+ __ push(dictionary);
+ }
+ __ mov(dictionary, FieldOperand(reg, JSObject::kPropertiesOffset));
+
+ // Probe the dictionary.
+ Label probe_done, pop_and_miss;
+ NameDictionaryLookupStub::GeneratePositiveLookup(masm(),
+ &pop_and_miss,
+ &probe_done,
+ dictionary,
+ this->name(),
+ scratch2(),
+ scratch3());
+ __ bind(&pop_and_miss);
+ if (must_preserve_dictionary_reg) {
+ __ pop(dictionary);
+ }
+ __ jmp(&miss);
+ __ bind(&probe_done);
+
+ // If probing finds an entry in the dictionary, scratch2 contains the
+ // index into the dictionary. Check that the value is the callback.
+ Register index = scratch2();
+ const int kElementsStartOffset =
+ NameDictionary::kHeaderSize +
+ NameDictionary::kElementsStartIndex * kPointerSize;
+ const int kValueOffset = kElementsStartOffset + kPointerSize;
+ __ mov(scratch3(),
+ Operand(dictionary, index, times_4, kValueOffset - kHeapObjectTag));
+ if (must_preserve_dictionary_reg) {
+ __ pop(dictionary);
+ }
+ __ cmp(scratch3(), callback);
+ __ j(not_equal, &miss);
+ }
+
+ HandlerFrontendFooter(name, &miss);
+ return reg;
+}
+
+
+void LoadStubCompiler::GenerateLoadField(Register reg,
+ Handle<JSObject> holder,
+ FieldIndex field,
+ Representation representation) {
+ if (!reg.is(receiver())) __ mov(receiver(), reg);
+ if (kind() == Code::LOAD_IC) {
+ LoadFieldStub stub(isolate(), field);
+ GenerateTailCall(masm(), stub.GetCode());
+ } else {
+ KeyedLoadFieldStub stub(isolate(), field);
+ GenerateTailCall(masm(), stub.GetCode());
+ }
+}
+
+
+void LoadStubCompiler::GenerateLoadCallback(
+ Register reg,
+ Handle<ExecutableAccessorInfo> callback) {
+ // Insert additional parameters into the stack frame above return address.
+ ASSERT(!scratch3().is(reg));
+ __ pop(scratch3()); // Get return address to place it below.
+
+ STATIC_ASSERT(PropertyCallbackArguments::kHolderIndex == 0);
+ STATIC_ASSERT(PropertyCallbackArguments::kIsolateIndex == 1);
+ STATIC_ASSERT(PropertyCallbackArguments::kReturnValueDefaultValueIndex == 2);
+ STATIC_ASSERT(PropertyCallbackArguments::kReturnValueOffset == 3);
+ STATIC_ASSERT(PropertyCallbackArguments::kDataIndex == 4);
+ STATIC_ASSERT(PropertyCallbackArguments::kThisIndex == 5);
+ __ push(receiver()); // receiver
+ // Push data from ExecutableAccessorInfo.
+ if (isolate()->heap()->InNewSpace(callback->data())) {
+ ASSERT(!scratch2().is(reg));
+ __ mov(scratch2(), Immediate(callback));
+ __ push(FieldOperand(scratch2(), ExecutableAccessorInfo::kDataOffset));
+ } else {
+ __ push(Immediate(Handle<Object>(callback->data(), isolate())));
+ }
+ __ push(Immediate(isolate()->factory()->undefined_value())); // ReturnValue
+ // ReturnValue default value
+ __ push(Immediate(isolate()->factory()->undefined_value()));
+ __ push(Immediate(reinterpret_cast<int>(isolate())));
+ __ push(reg); // holder
+
+ // Save a pointer to where we pushed the arguments. This will be
+ // passed as the const PropertyAccessorInfo& to the C++ callback.
+ __ push(esp);
+
+ __ push(name()); // name
+
+ __ push(scratch3()); // Restore return address.
+
+ // Abi for CallApiGetter
+ Register getter_address = edx;
+ Address function_address = v8::ToCData<Address>(callback->getter());
+ __ mov(getter_address, Immediate(function_address));
+
+ CallApiGetterStub stub(isolate());
+ __ TailCallStub(&stub);
+}
+
+
+void LoadStubCompiler::GenerateLoadConstant(Handle<Object> value) {
+ // Return the constant value.
+ __ LoadObject(eax, value);
+ __ ret(0);
+}
+
+
+void LoadStubCompiler::GenerateLoadInterceptor(
+ Register holder_reg,
+ Handle<Object> object,
+ Handle<JSObject> interceptor_holder,
+ LookupResult* lookup,
+ Handle<Name> name) {
+ ASSERT(interceptor_holder->HasNamedInterceptor());
+ ASSERT(!interceptor_holder->GetNamedInterceptor()->getter()->IsUndefined());
+
+ // So far the most popular follow ups for interceptor loads are FIELD
+ // and CALLBACKS, so inline only them, other cases may be added
+ // later.
+ bool compile_followup_inline = false;
+ if (lookup->IsFound() && lookup->IsCacheable()) {
+ if (lookup->IsField()) {
+ compile_followup_inline = true;
+ } else if (lookup->type() == CALLBACKS &&
+ lookup->GetCallbackObject()->IsExecutableAccessorInfo()) {
+ ExecutableAccessorInfo* callback =
+ ExecutableAccessorInfo::cast(lookup->GetCallbackObject());
+ compile_followup_inline = callback->getter() != NULL &&
+ callback->IsCompatibleReceiver(*object);
+ }
+ }
+
+ if (compile_followup_inline) {
+ // Compile the interceptor call, followed by inline code to load the
+ // property from further up the prototype chain if the call fails.
+ // Check that the maps haven't changed.
+ ASSERT(holder_reg.is(receiver()) || holder_reg.is(scratch1()));
+
+ // Preserve the receiver register explicitly whenever it is different from
+ // the holder and it is needed should the interceptor return without any
+ // result. The CALLBACKS case needs the receiver to be passed into C++ code,
+ // the FIELD case might cause a miss during the prototype check.
+ bool must_perfrom_prototype_check = *interceptor_holder != lookup->holder();
+ bool must_preserve_receiver_reg = !receiver().is(holder_reg) &&
+ (lookup->type() == CALLBACKS || must_perfrom_prototype_check);
+
+ // Save necessary data before invoking an interceptor.
+ // Requires a frame to make GC aware of pushed pointers.
+ {
+ FrameScope frame_scope(masm(), StackFrame::INTERNAL);
+
+ if (must_preserve_receiver_reg) {
+ __ push(receiver());
+ }
+ __ push(holder_reg);
+ __ push(this->name());
+
+ // Invoke an interceptor. Note: map checks from receiver to
+ // interceptor's holder has been compiled before (see a caller
+ // of this method.)
+ CompileCallLoadPropertyWithInterceptor(
+ masm(), receiver(), holder_reg, this->name(), interceptor_holder,
+ IC::kLoadPropertyWithInterceptorOnly);
+
+ // Check if interceptor provided a value for property. If it's
+ // the case, return immediately.
+ Label interceptor_failed;
+ __ cmp(eax, factory()->no_interceptor_result_sentinel());
+ __ j(equal, &interceptor_failed);
+ frame_scope.GenerateLeaveFrame();
+ __ ret(0);
+
+ // Clobber registers when generating debug-code to provoke errors.
+ __ bind(&interceptor_failed);
+ if (FLAG_debug_code) {
+ __ mov(receiver(), Immediate(BitCast<int32_t>(kZapValue)));
+ __ mov(holder_reg, Immediate(BitCast<int32_t>(kZapValue)));
+ __ mov(this->name(), Immediate(BitCast<int32_t>(kZapValue)));
+ }
+
+ __ pop(this->name());
+ __ pop(holder_reg);
+ if (must_preserve_receiver_reg) {
+ __ pop(receiver());
+ }
+
+ // Leave the internal frame.
+ }
+
+ GenerateLoadPostInterceptor(holder_reg, interceptor_holder, name, lookup);
+ } else { // !compile_followup_inline
+ // Call the runtime system to load the interceptor.
+ // Check that the maps haven't changed.
+ __ pop(scratch2()); // save old return address
+ PushInterceptorArguments(masm(), receiver(), holder_reg,
+ this->name(), interceptor_holder);
+ __ push(scratch2()); // restore old return address
+
+ ExternalReference ref =
+ ExternalReference(IC_Utility(IC::kLoadPropertyWithInterceptor),
+ isolate());
+ __ TailCallExternalReference(ref, StubCache::kInterceptorArgsLength, 1);
+ }
+}
+
+
+Handle<Code> StoreStubCompiler::CompileStoreCallback(
+ Handle<JSObject> object,
+ Handle<JSObject> holder,
+ Handle<Name> name,
+ Handle<ExecutableAccessorInfo> callback) {
+ Register holder_reg = HandlerFrontend(
+ IC::CurrentTypeOf(object, isolate()), receiver(), holder, name);
+
+ __ pop(scratch1()); // remove the return address
+ __ push(receiver());
+ __ push(holder_reg);
+ __ Push(callback);
+ __ Push(name);
+ __ push(value());
+ __ push(scratch1()); // restore return address
+
+ // Do tail-call to the runtime system.
+ ExternalReference store_callback_property =
+ ExternalReference(IC_Utility(IC::kStoreCallbackProperty), isolate());
+ __ TailCallExternalReference(store_callback_property, 5, 1);
+
+ // Return the generated code.
+ return GetCode(kind(), Code::FAST, name);
+}
+
+
+#undef __
+#define __ ACCESS_MASM(masm)
+
+
+void StoreStubCompiler::GenerateStoreViaSetter(
+ MacroAssembler* masm,
+ Handle<HeapType> type,
+ Register receiver,
+ Handle<JSFunction> setter) {
+ // ----------- S t a t e -------------
+ // -- esp[0] : return address
+ // -----------------------------------
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+
+ // Save value register, so we can restore it later.
+ __ push(value());
+
+ if (!setter.is_null()) {
+ // Call the JavaScript setter with receiver and value on the stack.
+ if (IC::TypeToMap(*type, masm->isolate())->IsJSGlobalObjectMap()) {
+ // Swap in the global receiver.
+ __ mov(receiver,
+ FieldOperand(receiver, JSGlobalObject::kGlobalReceiverOffset));
+ }
+ __ push(receiver);
+ __ push(value());
+ ParameterCount actual(1);
+ ParameterCount expected(setter);
+ __ InvokeFunction(setter, expected, actual,
+ CALL_FUNCTION, NullCallWrapper());
+ } else {
+ // If we generate a global code snippet for deoptimization only, remember
+ // the place to continue after deoptimization.
+ masm->isolate()->heap()->SetSetterStubDeoptPCOffset(masm->pc_offset());
+ }
+
+ // We have to return the passed value, not the return value of the setter.
+ __ pop(eax);
+
+ // Restore context register.
+ __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
+ }
+ __ ret(0);
+}
+
+
+#undef __
+#define __ ACCESS_MASM(masm())
+
+
+Handle<Code> StoreStubCompiler::CompileStoreInterceptor(
+ Handle<JSObject> object,
+ Handle<Name> name) {
+ __ pop(scratch1()); // remove the return address
+ __ push(receiver());
+ __ push(this->name());
+ __ push(value());
+ __ push(scratch1()); // restore return address
+
+ // Do tail-call to the runtime system.
+ ExternalReference store_ic_property =
+ ExternalReference(IC_Utility(IC::kStoreInterceptorProperty), isolate());
+ __ TailCallExternalReference(store_ic_property, 3, 1);
+
+ // Return the generated code.
+ return GetCode(kind(), Code::FAST, name);
+}
+
+
+void StoreStubCompiler::GenerateStoreArrayLength() {
+ // Prepare tail call to StoreIC_ArrayLength.
+ __ pop(scratch1()); // remove the return address
+ __ push(receiver());
+ __ push(value());
+ __ push(scratch1()); // restore return address
+
+ ExternalReference ref =
+ ExternalReference(IC_Utility(IC::kStoreIC_ArrayLength),
+ masm()->isolate());
+ __ TailCallExternalReference(ref, 2, 1);
+}
+
+
+Handle<Code> KeyedStoreStubCompiler::CompileStorePolymorphic(
+ MapHandleList* receiver_maps,
+ CodeHandleList* handler_stubs,
+ MapHandleList* transitioned_maps) {
+ Label miss;
+ __ JumpIfSmi(receiver(), &miss, Label::kNear);
+ __ mov(scratch1(), FieldOperand(receiver(), HeapObject::kMapOffset));
+ for (int i = 0; i < receiver_maps->length(); ++i) {
+ __ cmp(scratch1(), receiver_maps->at(i));
+ if (transitioned_maps->at(i).is_null()) {
+ __ j(equal, handler_stubs->at(i));
+ } else {
+ Label next_map;
+ __ j(not_equal, &next_map, Label::kNear);
+ __ mov(transition_map(), Immediate(transitioned_maps->at(i)));
+ __ jmp(handler_stubs->at(i), RelocInfo::CODE_TARGET);
+ __ bind(&next_map);
+ }
+ }
+ __ bind(&miss);
+ TailCallBuiltin(masm(), MissBuiltin(kind()));
+
+ // Return the generated code.
+ return GetICCode(
+ kind(), Code::NORMAL, factory()->empty_string(), POLYMORPHIC);
+}
+
+
+Handle<Code> LoadStubCompiler::CompileLoadNonexistent(Handle<HeapType> type,
+ Handle<JSObject> last,
+ Handle<Name> name) {
+ NonexistentHandlerFrontend(type, last, name);
+
+ // Return undefined if maps of the full prototype chain are still the
+ // same and no global property with this name contains a value.
+ __ mov(eax, isolate()->factory()->undefined_value());
+ __ ret(0);
+
+ // Return the generated code.
+ return GetCode(kind(), Code::FAST, name);
+}
+
+
+Register* LoadStubCompiler::registers() {
+ // receiver, name, scratch1, scratch2, scratch3, scratch4.
+ static Register registers[] = { edx, ecx, ebx, eax, edi, no_reg };
+ return registers;
+}
+
+
+Register* KeyedLoadStubCompiler::registers() {
+ // receiver, name, scratch1, scratch2, scratch3, scratch4.
+ static Register registers[] = { edx, ecx, ebx, eax, edi, no_reg };
+ return registers;
+}
+
+
+Register StoreStubCompiler::value() {
+ return eax;
+}
+
+
+Register* StoreStubCompiler::registers() {
+ // receiver, name, scratch1, scratch2, scratch3.
+ static Register registers[] = { edx, ecx, ebx, edi, no_reg };
+ return registers;
+}
+
+
+Register* KeyedStoreStubCompiler::registers() {
+ // receiver, name, scratch1, scratch2, scratch3.
+ static Register registers[] = { edx, ecx, ebx, edi, no_reg };
+ return registers;
+}
+
+
+#undef __
+#define __ ACCESS_MASM(masm)
+
+
+void LoadStubCompiler::GenerateLoadViaGetter(MacroAssembler* masm,
+ Handle<HeapType> type,
+ Register receiver,
+ Handle<JSFunction> getter) {
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+
+ if (!getter.is_null()) {
+ // Call the JavaScript getter with the receiver on the stack.
+ if (IC::TypeToMap(*type, masm->isolate())->IsJSGlobalObjectMap()) {
+ // Swap in the global receiver.
+ __ mov(receiver,
+ FieldOperand(receiver, JSGlobalObject::kGlobalReceiverOffset));
+ }
+ __ push(receiver);
+ ParameterCount actual(0);
+ ParameterCount expected(getter);
+ __ InvokeFunction(getter, expected, actual,
+ CALL_FUNCTION, NullCallWrapper());
+ } else {
+ // If we generate a global code snippet for deoptimization only, remember
+ // the place to continue after deoptimization.
+ masm->isolate()->heap()->SetGetterStubDeoptPCOffset(masm->pc_offset());
+ }
+
+ // Restore context register.
+ __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
+ }
+ __ ret(0);
+}
+
+
+#undef __
+#define __ ACCESS_MASM(masm())
+
+
+Handle<Code> LoadStubCompiler::CompileLoadGlobal(
+ Handle<HeapType> type,
+ Handle<GlobalObject> global,
+ Handle<PropertyCell> cell,
+ Handle<Name> name,
+ bool is_dont_delete) {
+ Label miss;
+
+ HandlerFrontendHeader(type, receiver(), global, name, &miss);
+ // Get the value from the cell.
+ if (masm()->serializer_enabled()) {
+ __ mov(eax, Immediate(cell));
+ __ mov(eax, FieldOperand(eax, PropertyCell::kValueOffset));
+ } else {
+ __ mov(eax, Operand::ForCell(cell));
+ }
+
+ // Check for deleted property if property can actually be deleted.
+ if (!is_dont_delete) {
+ __ cmp(eax, factory()->the_hole_value());
+ __ j(equal, &miss);
+ } else if (FLAG_debug_code) {
+ __ cmp(eax, factory()->the_hole_value());
+ __ Check(not_equal, kDontDeleteCellsCannotContainTheHole);
+ }
+
+ Counters* counters = isolate()->counters();
+ __ IncrementCounter(counters->named_load_global_stub(), 1);
+ // The code above already loads the result into the return register.
+ __ ret(0);
+
+ HandlerFrontendFooter(name, &miss);
+
+ // Return the generated code.
+ return GetCode(kind(), Code::NORMAL, name);
+}
+
+
+Handle<Code> BaseLoadStoreStubCompiler::CompilePolymorphicIC(
+ TypeHandleList* types,
+ CodeHandleList* handlers,
+ Handle<Name> name,
+ Code::StubType type,
+ IcCheckType check) {
+ Label miss;
+
+ if (check == PROPERTY &&
+ (kind() == Code::KEYED_LOAD_IC || kind() == Code::KEYED_STORE_IC)) {
+ __ cmp(this->name(), Immediate(name));
+ __ j(not_equal, &miss);
+ }
+
+ Label number_case;
+ Label* smi_target = IncludesNumberType(types) ? &number_case : &miss;
+ __ JumpIfSmi(receiver(), smi_target);
+
+ Register map_reg = scratch1();
+ __ mov(map_reg, FieldOperand(receiver(), HeapObject::kMapOffset));
+ int receiver_count = types->length();
+ int number_of_handled_maps = 0;
+ for (int current = 0; current < receiver_count; ++current) {
+ Handle<HeapType> type = types->at(current);
+ Handle<Map> map = IC::TypeToMap(*type, isolate());
+ if (!map->is_deprecated()) {
+ number_of_handled_maps++;
+ __ cmp(map_reg, map);
+ if (type->Is(HeapType::Number())) {
+ ASSERT(!number_case.is_unused());
+ __ bind(&number_case);
+ }
+ __ j(equal, handlers->at(current));
+ }
+ }
+ ASSERT(number_of_handled_maps != 0);
+
+ __ bind(&miss);
+ TailCallBuiltin(masm(), MissBuiltin(kind()));
+
+ // Return the generated code.
+ InlineCacheState state =
+ number_of_handled_maps > 1 ? POLYMORPHIC : MONOMORPHIC;
+ return GetICCode(kind(), type, name, state);
+}
+
+
+#undef __
+#define __ ACCESS_MASM(masm)
+
+
+void KeyedLoadStubCompiler::GenerateLoadDictionaryElement(
+ MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- ecx : key
+ // -- edx : receiver
+ // -- esp[0] : return address
+ // -----------------------------------
+ Label slow, miss;
+
+ // This stub is meant to be tail-jumped to, the receiver must already
+ // have been verified by the caller to not be a smi.
+ __ JumpIfNotSmi(ecx, &miss);
+ __ mov(ebx, ecx);
+ __ SmiUntag(ebx);
+ __ mov(eax, FieldOperand(edx, JSObject::kElementsOffset));
+
+ // Push receiver on the stack to free up a register for the dictionary
+ // probing.
+ __ push(edx);
+ __ LoadFromNumberDictionary(&slow, eax, ecx, ebx, edx, edi, eax);
+ // Pop receiver before returning.
+ __ pop(edx);
+ __ ret(0);
+
+ __ bind(&slow);
+ __ pop(edx);
+
+ // ----------- S t a t e -------------
+ // -- ecx : key
+ // -- edx : receiver
+ // -- esp[0] : return address
+ // -----------------------------------
+ TailCallBuiltin(masm, Builtins::kKeyedLoadIC_Slow);
+
+ __ bind(&miss);
+ // ----------- S t a t e -------------
+ // -- ecx : key
+ // -- edx : receiver
+ // -- esp[0] : return address
+ // -----------------------------------
+ TailCallBuiltin(masm, Builtins::kKeyedLoadIC_Miss);
+}
+
+
+#undef __
+
+} } // namespace v8::internal
+
+#endif // V8_TARGET_ARCH_X87
diff --git a/chromium/v8/src/zone-allocator.h b/chromium/v8/src/zone-allocator.h
new file mode 100644
index 00000000000..fd07ce22ef7
--- /dev/null
+++ b/chromium/v8/src/zone-allocator.h
@@ -0,0 +1,67 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_ZONE_ALLOCATOR_H_
+#define V8_ZONE_ALLOCATOR_H_
+
+#include <limits>
+
+#include "src/zone.h"
+
+namespace v8 {
+namespace internal {
+
+template<typename T>
+class zone_allocator {
+ public:
+ typedef T* pointer;
+ typedef const T* const_pointer;
+ typedef T& reference;
+ typedef const T& const_reference;
+ typedef T value_type;
+ typedef size_t size_type;
+ typedef ptrdiff_t difference_type;
+ template<class O> struct rebind {
+ typedef zone_allocator<O> other;
+ };
+
+ explicit zone_allocator(Zone* zone) throw() : zone_(zone) {}
+ explicit zone_allocator(const zone_allocator& other) throw()
+ : zone_(other.zone_) {}
+ template<typename U> zone_allocator(const zone_allocator<U>& other) throw()
+ : zone_(other.zone_) {}
+ template<typename U> friend class zone_allocator;
+
+ pointer address(reference x) const {return &x;}
+ const_pointer address(const_reference x) const {return &x;}
+
+ pointer allocate(size_type n, const void* hint = 0) {
+ return static_cast<pointer>(zone_->NewArray<value_type>(
+ static_cast<int>(n)));
+ }
+ void deallocate(pointer p, size_type) { /* noop for Zones */ }
+
+ size_type max_size() const throw() {
+ return std::numeric_limits<int>::max() / sizeof(value_type);
+ }
+ void construct(pointer p, const T& val) {
+ new(static_cast<void*>(p)) T(val);
+ }
+ void destroy(pointer p) { p->~T(); }
+
+ bool operator==(zone_allocator const& other) {
+ return zone_ == other.zone_;
+ }
+ bool operator!=(zone_allocator const& other) {
+ return zone_ != other.zone_;
+ }
+
+ private:
+ zone_allocator();
+ Zone* zone_;
+};
+
+} } // namespace v8::internal
+
+#endif // V8_ZONE_ALLOCATOR_H_
diff --git a/chromium/v8/src/zone-containers.h b/chromium/v8/src/zone-containers.h
new file mode 100644
index 00000000000..3b08b471138
--- /dev/null
+++ b/chromium/v8/src/zone-containers.h
@@ -0,0 +1,23 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_ZONE_CONTAINERS_H_
+#define V8_ZONE_CONTAINERS_H_
+
+#include <vector>
+#include <set>
+
+#include "src/zone.h"
+
+namespace v8 {
+namespace internal {
+
+typedef zone_allocator<int> ZoneIntAllocator;
+typedef std::vector<int, ZoneIntAllocator> IntVector;
+typedef IntVector::iterator IntVectorIter;
+typedef IntVector::reverse_iterator IntVectorRIter;
+
+} } // namespace v8::internal
+
+#endif // V8_ZONE_CONTAINERS_H_
diff --git a/chromium/v8/src/zone-inl.h b/chromium/v8/src/zone-inl.h
index f257382a2db..6c5aecd4d2a 100644
--- a/chromium/v8/src/zone-inl.h
+++ b/chromium/v8/src/zone-inl.h
@@ -1,44 +1,29 @@
// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
#ifndef V8_ZONE_INL_H_
#define V8_ZONE_INL_H_
-#include "zone.h"
+#include "src/zone.h"
-#include "counters.h"
-#include "isolate.h"
-#include "utils.h"
-#include "v8-counters.h"
+#ifdef V8_USE_ADDRESS_SANITIZER
+ #include <sanitizer/asan_interface.h>
+#else
+ #define ASAN_UNPOISON_MEMORY_REGION(start, size) ((void) 0)
+#endif
+
+#include "src/counters.h"
+#include "src/isolate.h"
+#include "src/utils.h"
namespace v8 {
namespace internal {
+static const int kASanRedzoneBytes = 24; // Must be a multiple of 8.
+
+
inline void* Zone::New(int size) {
// Round up the requested size to fit the alignment.
size = RoundUp(size, kAlignment);
@@ -54,12 +39,25 @@ inline void* Zone::New(int size) {
// Check if the requested size is available without expanding.
Address result = position_;
- if (size > limit_ - position_) {
- result = NewExpand(size);
+ int size_with_redzone =
+#ifdef V8_USE_ADDRESS_SANITIZER
+ size + kASanRedzoneBytes;
+#else
+ size;
+#endif
+
+ if (size_with_redzone > limit_ - position_) {
+ result = NewExpand(size_with_redzone);
} else {
- position_ += size;
+ position_ += size_with_redzone;
}
+#ifdef V8_USE_ADDRESS_SANITIZER
+ Address redzone_position = result + size;
+ ASSERT(redzone_position + kASanRedzoneBytes == position_);
+ ASAN_POISON_MEMORY_REGION(redzone_position, kASanRedzoneBytes);
+#endif
+
// Check that the result has the proper alignment and return it.
ASSERT(IsAddressAligned(result, kAlignment, 0));
allocation_size_ += size;
@@ -69,6 +67,7 @@ inline void* Zone::New(int size) {
template <typename T>
T* Zone::NewArray(int length) {
+ CHECK(std::numeric_limits<int>::max() / static_cast<int>(sizeof(T)) > length);
return static_cast<T*>(New(length * sizeof(T)));
}
diff --git a/chromium/v8/src/zone.cc b/chromium/v8/src/zone.cc
index 417f895e5ae..d4fa42fd411 100644
--- a/chromium/v8/src/zone.cc
+++ b/chromium/v8/src/zone.cc
@@ -1,34 +1,11 @@
// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
#include <string.h>
-#include "v8.h"
-#include "zone-inl.h"
+#include "src/v8.h"
+#include "src/zone-inl.h"
namespace v8 {
namespace internal {
@@ -104,6 +81,8 @@ void Zone::DeleteAll() {
} else {
int size = current->size();
#ifdef DEBUG
+ // Un-poison first so the zapping doesn't trigger ASan complaints.
+ ASAN_UNPOISON_MEMORY_REGION(current, size);
// Zap the entire current segment (including the header).
memset(current, kZapDeadByte, size);
#endif
@@ -120,6 +99,8 @@ void Zone::DeleteAll() {
Address start = keep->start();
position_ = RoundUp(start, kAlignment);
limit_ = keep->end();
+ // Un-poison so we can re-use the segment later.
+ ASAN_UNPOISON_MEMORY_REGION(start, keep->capacity());
#ifdef DEBUG
// Zap the contents of the kept segment (but not the header).
memset(start, kZapDeadByte, keep->capacity());
@@ -143,6 +124,8 @@ void Zone::DeleteKeptSegment() {
if (segment_head_ != NULL) {
int size = segment_head_->size();
#ifdef DEBUG
+ // Un-poison first so the zapping doesn't trigger ASan complaints.
+ ASAN_UNPOISON_MEMORY_REGION(segment_head_, size);
// Zap the entire kept segment (including the header).
memset(segment_head_, kZapDeadByte, size);
#endif
diff --git a/chromium/v8/src/zone.h b/chromium/v8/src/zone.h
index bd7cc39b0c4..d31d6425d4a 100644
--- a/chromium/v8/src/zone.h
+++ b/chromium/v8/src/zone.h
@@ -1,39 +1,16 @@
// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
#ifndef V8_ZONE_H_
#define V8_ZONE_H_
-#include "allocation.h"
-#include "checks.h"
-#include "hashmap.h"
-#include "globals.h"
-#include "list.h"
-#include "splay-tree.h"
+#include "src/allocation.h"
+#include "src/checks.h"
+#include "src/hashmap.h"
+#include "src/globals.h"
+#include "src/list.h"
+#include "src/splay-tree.h"
namespace v8 {
namespace internal {
@@ -89,8 +66,13 @@ class Zone {
// All pointers returned from New() have this alignment. In addition, if the
// object being allocated has a size that is divisible by 8 then its alignment
- // will be 8.
+ // will be 8. ASan requires 8-byte alignment.
+#ifdef V8_USE_ADDRESS_SANITIZER
+ static const int kAlignment = 8;
+ STATIC_ASSERT(kPointerSize <= 8);
+#else
static const int kAlignment = kPointerSize;
+#endif
// Never allocate segments smaller than this size in bytes.
static const int kMinimumSegmentSize = 8 * KB;